diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000000000000000000000000000000000..28771e1ae591700be3fe27c3acd668ef8cee77ad --- /dev/null +++ b/.clang-format @@ -0,0 +1,195 @@ +--- +Language: Cpp +# BasedOnStyle: Google +AccessModifierOffset: -1 +AlignAfterOpenBracket: Align +AlignConsecutiveMacros: None +AlignConsecutiveAssignments: None +AlignConsecutiveBitFields: None +AlignConsecutiveDeclarations: None +AlignEscapedNewlines: Left +AlignOperands: Align +AlignTrailingComments: true +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortEnumsOnASingleLine: false +AllowShortBlocksOnASingleLine: Never +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortLambdasOnASingleLine: All +AllowShortIfStatementsOnASingleLine: WithoutElse +AllowShortLoopsOnASingleLine: true +AlwaysBreakAfterDefinitionReturnType: None +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: Yes +AttributeMacros: + - __capability +BinPackArguments: true +BinPackParameters: true +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterControlStatement: Never + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakBeforeBinaryOperators: None +BreakBeforeConceptDeclarations: true +BreakBeforeBraces: Attach +BreakBeforeInheritanceComma: false +BreakInheritanceList: BeforeColon +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakConstructorInitializers: BeforeColon +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: true +ColumnLimit: 120 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DeriveLineEnding: true +DerivePointerAlignment: false +DisableFormat: false +EmptyLineBeforeAccessModifier: LogicalBlock +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +StatementAttributeLikeMacros: + - Q_EMIT +IncludeBlocks: Regroup +IncludeCategories: + - Regex: '^' + Priority: 2 + SortPriority: 0 + CaseSensitive: false + - Regex: '^<.*\.h>' + Priority: 1 + SortPriority: 0 + CaseSensitive: false + - Regex: '^<.*' + Priority: 2 + SortPriority: 0 + CaseSensitive: false + - Regex: '.*' + Priority: 3 + SortPriority: 0 + CaseSensitive: false +IncludeIsMainRegex: '([-_](test|unittest))?$' +IncludeIsMainSourceRegex: '' +IndentCaseLabels: true +IndentCaseBlocks: false +IndentGotoLabels: true +IndentPPDirectives: None +IndentExternBlock: AfterExternBlock +IndentRequires: false +IndentWidth: 2 +IndentWrappedFunctionNames: false +InsertTrailingCommas: None +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +PenaltyIndentedWhitespace: 0 +PointerAlignment: Right +RawStringFormats: + - Language: Cpp + Delimiters: + - cc + - CC + - cpp + - Cpp + - CPP + - 'c++' + - 'C++' + CanonicalDelimiter: '' + BasedOnStyle: google + - Language: TextProto + Delimiters: + - pb + - PB + - proto + - PROTO + EnclosingFunctions: + - EqualsProto + - EquivToProto + - PARSE_PARTIAL_TEXT_PROTO + - PARSE_TEST_PROTO + - PARSE_TEXT_PROTO + - ParseTextOrDie + - ParseTextProtoOrDie + - ParseTestProto + - ParsePartialTestProto + CanonicalDelimiter: '' + BasedOnStyle: google +ReflowComments: false +SortIncludes: true +SortJavaStaticImport: Before +SortUsingDeclarations: true +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeCaseColon: false +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeParens: ControlStatements +SpaceAroundPointerQualifiers: Default +SpaceBeforeRangeBasedForLoopColon: true +SpaceInEmptyBlock: false +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 2 +SpacesInAngles: false +SpacesInConditionalStatement: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +SpaceBeforeSquareBrackets: false +BitFieldColonSpacing: Both +Standard: Auto +StatementMacros: + - Q_UNUSED + - QT_REQUIRE_VERSION +TabWidth: 2 +UseCRLF: false +UseTab: Never +WhitespaceSensitiveMacros: + - STRINGIZE + - PP_STRINGIZE + - BOOST_PP_STRINGIZE + - NS_SWIFT_NAME + - CF_SWIFT_NAME +... + diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000000000000000000000000000000000000..b41ec6b295a3e650b762617712ebadebe0383cfe --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,31 @@ +Checks: '-*,readability-identifier-naming,readability-function-size, + readability-braces-around-statements,readability-magic-numbers, + misc-unused-parameters,modernize-use-nullptr,modernize-replace-auto-ptr, + modernize-use-noexcept,modernize-use-override,performance-move-const-arg, + cppcoreguidelines-pro-type-cstyle-cast,cppcoreguidelines-pro-type-reinterpret-cast, + cppcoreguidelines-pro-type-const-cast' +CheckOptions: + - key: readability-identifier-naming.ClassCase + value: CamelCase + - key: readability-identifier-naming.StructCase + value: CamelCase + - key: readability-identifier-naming.TypedefCase + value: CamelCase + - key: readability-identifier-naming.EnumCase + value: CamelCase + - key: readability-identifier-naming.EnumConstantCase + value: camelBack + - key: readability-identifier-naming.UnionCase + value: CamelCase + - key: readability-identifier-naming.NamespaceCase + value: lower_case + - key: readability-identifier-naming.FunctionCase + value: CamelCase + - key: readability-identifier-naming.VariableCase + value: camelBack + - key: readability-identifier-naming.ConstantCase + value: camelBack + - key: readability-function-size.StatementThreshold + value: 50 + - key: readability-function-size.ParameterThreshold + value: 5 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..dcf0dec08f06bb5391c8935caae1a02ce71b0c3c --- /dev/null +++ b/.gitignore @@ -0,0 +1,30 @@ +android/* +third_party/d8* +third_party/icu* +third_party/libdex* +third_party/aosp_10.0.0_r35* +third_party/aosp_modified* +third_party/ctorture* +third_party/llvm_modified* +third_party/llvm-* +tools/lib +tools/bin* +tools/android* +tools/aosp* +tools/clang* +tools/gcc* +tools/gn* +tools/icu* +tools/libz* +tools/zlib* +tools/ninja* +tools/qemu* +tools/r8* +tools/release* +tools/sysroot-glibc* +build/logs* +libjava-core +output +compile_commands.json +testsuite/tools* +*__pycache__* diff --git a/.gn b/.gn new file mode 100644 index 0000000000000000000000000000000000000000..c00e581063d8ce84c72d56d979a105c8d833a701 --- /dev/null +++ b/.gn @@ -0,0 +1,15 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +buildconfig = "//build/config/BUILDCONFIG.gn" diff --git a/BUILD.gn b/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..40e691379fdb548eb9a6292775b15262bfa1c51d --- /dev/null +++ b/BUILD.gn @@ -0,0 +1,68 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +group("maple") { + deps = [ "${MAPLEALL_ROOT}:maple" ] +} + +group("irbuild") { + deps = [ "${MAPLEALL_ROOT}:irbuild" ] +} + +group("hir2mpl") { + deps = [ "${HIR2MPL_ROOT}:hir2mpl" ] +} + +group("hir2mplUT") { + deps = [ "${HIR2MPL_ROOT}/test:hir2mplUT" ] +} + +group("maple-rt") { + deps = [ "${MAPLE_MRT_ROOT}:maple-rt" ] +} + +group("ast2mpl") { + deps = [] + if (IS_AST2MPL_EXISTS == "1") { + deps = [ "${AST2MPL_ROOT}/src:ast2mpl" ] + } +} + +group("mapleallUT") { + deps = [ + "${MAPLEALL_ROOT}/test:mapleallUT", + ] +} + +group("maplegendef") { + exeTool = "-e" + rebase_path("${GN_BINARY_OUTPUT_DIRECTORY}/maplegen", root_build_dir) + mdDir = "-m" + rebase_path("${MAPLEALL_ROOT}/maple_be/include/ad/cortex_a55", root_build_dir) + outDir = "-o" + rebase_path("${MAPLE_BUILD_OUTPUT}/common/target", root_build_dir) + if (ASAN == 1) { + exec_script("${MAPLEALL_ROOT}/maple_be/mdgen/gendef.py", + [ + "-aLD_PRELOAD=${LLVMLIBDIR}/libclang_rt.asan-x86_64.so", + exeTool, + mdDir, + outDir, + ]) + } else { + exec_script("${MAPLEALL_ROOT}/maple_be/mdgen/gendef.py", + [ + exeTool, + mdDir, + outDir + ]) + } +} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..f1e5b20f8152395af6c06f294c0275611b1abc89 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,51 @@ +FROM ubuntu:16.04 AS build-env +MAINTAINER https://www.openarkcompiler.cn + +# Setting up the build environment +RUN sed -i 's/archive.ubuntu.com/mirrors.163.com/g' /etc/apt/sources.list && \ + dpkg --add-architecture i386 && \ + apt-get -y update && \ + apt-get -y dist-upgrade && \ + apt-get -y install openjdk-8-jdk git-core build-essential zlib1g-dev libc6-dev-i386 g++-multilib gcc-multilib linux-libc-dev:i386 && \ + apt-get -y install gcc-5-aarch64-linux-gnu g++-5-aarch64-linux-gnu unzip tar curl && \ + apt-get -y install python3-paramiko python-paramiko python-requests && \ + mkdir -p /tools/ninja /tools/gn + +# 在国内请反注释下行, 因为容器也是个单独的系统,所以别用127.0.0.1 +#ENV http_proxy=http://192.168.3.81:1081 \ +# https_proxy=http://192.168.3.81:1081 + +RUN cd /tools && \ + curl -C - -LO http://releases.llvm.org/8.0.0/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04.tar.xz && \ + curl -LO https://github.com/ninja-build/ninja/releases/download/v1.9.0/ninja-linux.zip && \ + curl -LO http://tools.harmonyos.com/mirrors/gn/1523/linux/gn.1523.tar && \ + tar Jvxf /tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04.tar.xz -C /tools/ && \ + unzip /tools/ninja-linux.zip -d /tools/ninja/ && \ + tar xvf /tools/gn.1523.tar && \ + chmod a+x /tools/gn/gn && \ + rm /tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04.tar.xz /tools/ninja-linux.zip && \ + rm -rf /var/cache/apt/archives + +# copy source +COPY . /OpenArkCompiler +WORKDIR /OpenArkCompiler + +# create symbolic link +RUN mkdir -p /OpenArkCompiler/tools /OpenArkCompiler/tools/gn && \ + ln -s /tools/ninja /OpenArkCompiler/tools/ninja_1.9.0 && \ + ln -s /tools/gn/gn /OpenArkCompiler/tools/gn/gn && \ + ln -s /tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04 /OpenArkCompiler/tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04 + +# compile +RUN ["/bin/bash", "-c", "source build/envsetup.sh && make && ls -al "] + +# build final docker image +FROM ubuntu:16.04 +RUN sed -i 's/archive.ubuntu.com/mirrors.163.com/g' /etc/apt/sources.list && \ + apt-get -y update && \ + apt-get install -y openjdk-8-jdk curl vim && \ + rm -rf /var/cache/apt/archives +COPY --from=build-env /OpenArkCompiler/output /OpenArkCompiler +VOLUME /OpenArkCompiler +ENV PATH=/OpenArkCompiler/bin:$PATH +CMD maple -h diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e3a7e3bc88374fd545074c6ad755af9a98538cf7 --- /dev/null +++ b/Makefile @@ -0,0 +1,207 @@ +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +# Makefile for OpenArkCompiler +OPT := O2 +DEBUG := $(MAPLE_DEBUG) +INSTALL_DIR := $(MAPLE_BUILD_OUTPUT) +LIB_CORE_PATH := $(MAPLE_BUILD_OUTPUT)/libjava-core/host-x86_64-$(OPT) +MAPLE_BIN_DIR := $(MAPLE_ROOT)/src/mapleall/bin +MRT_ROOT := $(MAPLE_ROOT)/src/mrt +ANDROID_ROOT := $(MAPLE_ROOT)/android +MAJOR_VERSION := $(MAPLE_MAJOR_VERSION) +MINOR_VERSION := $(MAPLE_MINOR_VERSION) +RELEASE_VERSION := $(MAPLE_RELEASE_VERSION) +BUILD_VERSION := $(MAPLE_BUILD_VERSION) +GIT_REVISION := $(shell git log --pretty=format:"%H" -1) +MAST := 0 +ASAN := 0 +ONLY_C := 0 +COV := 0 +GPROF := 0 +ifeq ($(DEBUG),0) + BUILD_TYPE := RELEASE +else + BUILD_TYPE := DEBUG +endif +HOST_ARCH := 64 +MIR_JAVA := 1 +GN := $(MAPLE_ROOT)/tools/gn/gn +NINJA := $(shell ls $(MAPLE_ROOT)/tools/ninja*/ninja | tail -1) +ifneq ($(findstring GC,$(OPT)),) + GCONLY := 1 +else + GCONLY := 0 +endif + +GN_OPTIONS := \ + GN_INSTALL_PREFIX="$(MAPLE_ROOT)" \ + GN_BUILD_TYPE="$(BUILD_TYPE)" \ + HOST_ARCH=$(HOST_ARCH) \ + MIR_JAVA=$(MIR_JAVA) \ + OPT="$(OPT)" \ + GCONLY=$(GCONLY) \ + TARGET="$(TARGET_PROCESSOR)" \ + MAJOR_VERSION="$(MAJOR_VERSION)" \ + MINOR_VERSION="$(MINOR_VERSION)" \ + RELEASE_VERSION="$(RELEASE_VERSION)" \ + BUILD_VERSION="$(BUILD_VERSION)" \ + GIT_REVISION="$(GIT_REVISION)" \ + MAST=$(MAST) \ + ASAN=$(ASAN) \ + ONLY_C=$(ONLY_C) \ + COV=$(COV) \ + GPROF=$(GPROF) + +.PHONY: default +default: install + +.PHONY: directory +directory: + $(shell mkdir -p $(INSTALL_DIR)/bin;) + +.PHONY: install_patch +install_patch: + @bash build/third_party/patch.sh patch + +.PHONY: uninstall_patch +uninstall_patch: + @bash build/third_party/patch.sh unpatch + +.PHONY: maplegen +maplegen:install_patch + $(call build_gn, $(GN_OPTIONS), maplegen) + +.PHONY: maplegendef +maplegendef: maplegen + $(call build_gn, $(GN_OPTIONS), maplegendef) + +.PHONY: maple +maple: maplegendef + $(call build_gn, $(GN_OPTIONS), maple) + +.PHONY: irbuild +irbuild: install_patch + $(call build_gn, $(GN_OPTIONS), irbuild) + +.PHONY: mpldbg +mpldbg: + $(call build_gn, $(GN_OPTIONS), mpldbg) + +.PHONY: ast2mpl +ast2mpl: + $(call build_gn, $(GN_OPTIONS), ast2mpl) + +.PHONY: hir2mpl +hir2mpl: install_patch + $(call build_gn, $(GN_OPTIONS), hir2mpl) + +.PHONY: clang2mpl +clang2mpl: maple + (cd tools/clang2mpl; make setup; make; make install) + +.PHONY: hir2mplUT +hir2mplUT: + $(call build_gn, $(GN_OPTIONS) COV_CHECK=1, hir2mplUT) + +.PHONY: libcore +libcore: maple-rt + cd $(LIB_CORE_PATH); \ + $(MAKE) install OPT=$(OPT) DEBUG=$(DEBUG) + +.PHONY: maple-rt +maple-rt: java-core-def + $(call build_gn, $(GN_OPTIONS), maple-rt) + +.PHONY: mapleallUT +mapleallUT: install_patch + $(call build_gn, $(GN_OPTIONS), mapleallUT) + +.PHONY: java-core-def +java-core-def: install + mkdir -p $(LIB_CORE_PATH); \ + cp -rp $(MAPLE_ROOT)/libjava-core/* $(LIB_CORE_PATH)/; \ + cd $(LIB_CORE_PATH); \ + ln -f -s $(MAPLE_ROOT)/build/core/libcore.mk ./makefile; \ + $(MAKE) gen-def OPT=$(OPT) DEBUG=$(DEBUG) + +.PHONY: install +install: maple dex2mpl_install irbuild hir2mpl + $(shell mkdir -p $(INSTALL_DIR)/ops/linker/; \ + rsync -a -L $(MRT_ROOT)/maplert/linker/maplelld.so.lds $(INSTALL_DIR)/ops/linker/; \ + rsync -a -L $(MAPLE_ROOT)/build/java2d8 $(INSTALL_DIR)/bin; \ + rsync -a -L $(MAPLE_BIN_DIR)/java2jar $(INSTALL_DIR)/bin/; \ + cp -rf $(MAPLE_ROOT)/tools $(INSTALL_DIR)/../; \ + rsync -a -L $(MAPLE_ROOT)/src/hir2mpl/ast_input/clang/lib/sys/ $(INSTALL_DIR)/lib/include/;) + +.PHONY: all +all: install libcore + +.PHONY: dex2mpl_install +dex2mpl_install: directory + $(shell rsync -a -L $(MAPLE_BIN_DIR)/dex2mpl $(INSTALL_DIR)/bin/;) + +.PHONY: setup +setup: + (cd tools; ./setup_tools.sh) + +.PHONY: demo +demo: + test/maple_aarch64_with_hir2mpl.sh test/c_demo printHuawei 1 1 + test/maple_aarch64_with_clang2mpl.sh test/c_demo printHuawei 1 1 + +.PHONY: ctorture-ci +ctorture-ci: + (cd third_party/ctorture; git checkout .; git pull; ./ci.sh) + +.PHONY: ctorture +ctorture: + (cd third_party/ctorture; git checkout .; git pull; ./run.sh work.list) + +.PHONY: ctorture2 +ctorture2: + (cd third_party/ctorture; git checkout .; git pull; ./run.sh work.list hir2mpl) + +THREADS := 50 +ifneq ($(findstring test,$(MAKECMDGOALS)),) +TESTTARGET := $(MAKECMDGOALS) +ifdef TARGET +REALTARGET := $(TARGET) +else +REALTARGET := $(TESTTARGET) +endif +.PHONY: $(TESTTARGET) +${TESTTARGET}: + @python3 $(MAPLE_ROOT)/testsuite/driver/src/driver.py --target=$(REALTARGET) --run-path=$(MAPLE_ROOT)/output/$(MAPLE_BUILD_TYPE)/testsuite $(if $(MOD), --mod=$(MOD),) --j=$(THREADS) +endif + +.PHONY: cleanrsd +cleanrsd:uninstall_patch + @rm -rf libjava-core/libcore-all.* libjava-core/m* libjava-core/comb.* + +.PHONY: clean +clean: cleanrsd + @rm -rf $(MAPLE_BUILD_OUTPUT)/ + @rm -rf $(MAPLE_ROOT)/report.txt + +.PHONY: clobber +clobber: cleanrsd + @rm -rf output + +define build_gn + mkdir -p $(INSTALL_DIR); \ + $(GN) gen $(INSTALL_DIR) --args='$(1)' --export-compile-commands; \ + cd $(INSTALL_DIR); \ + $(NINJA) -v $(2); +endef diff --git a/Readme.md b/Readme.md new file mode 100644 index 0000000000000000000000000000000000000000..870f27a720326ec28ca5afd48ac9a87b4d16265f --- /dev/null +++ b/Readme.md @@ -0,0 +1,82 @@ +# OpenArkCompiler + +## Overview +----------------- +> Unified programming platform supporting multiple devices and languages + +OpenArkCompiler is Huawei's open source project. + +### Four Technical Features of OpenArkCompiler ### + +Compiles code of different languages into a set of executable files and efficiently executes the files in the running environment. +- Supports joint optimization of multiple languages and eliminates cross-language calling overhead. +- Achieves lightweight language runtime. +- Collaborates hardware and software to maximize hardware energy efficiency. +- Supports diversified terminal platforms. + +## Open Source Plan +**Open source compiler framework code** +- Time: August, 2019 +- Scope: Compiler IR and middle-end language implementation +- Benefits: + - Provides an open source framework for understanding the architecture and framework code of OpenArkCompiler. + - Allows developers to build a complete compiler toolchain that supports the compilation of Java samples (non-application). + +**Subsequent open source scope** +Open the front end, back end, and compilation optimization of compilers. Support the full compilation of Java programs and JavaScript applications. + +|Open Source Scope|March 2020 |May 2020 | +| ------------ | -------------------|--------------------- | +|Front end| Jbc front-end basic framework | Front-end full open source | +|Middle end |Weekly open source optimization phase | | +|Back end |Backend open source (O0) (aarch64)|Weekly open source optimization (O2) (aarch64)| +|Test framework|Test framework and basic Cases| | + +**Updating...** + +## How to use +- source build/envsetup.sh arm release + or +- source build/envsetup.sh arm debug +- make setup (In this step, you may need to configure the proxy or VPN to download all dependencies.) +- make +- make libcore +- make testall + +C example: +- make demo + +## OpenArkCompiler Incubator +- FutureWei Mapleall https://gitee.com/openarkcompiler-incubator/mapleall +- Maple Engine https://gitee.com/openarkcompiler-incubator/maple_engine +- Maple FrondEnd https://gitee.com/openarkcompiler-incubator/MapleFE +- Maple clang2mpl https://gitee.com/openarkcompiler-incubator/clang2mpl + +## Related Documents + +- Architecture Design Principles + - [MAPLE IR Design](doc/en/MapleIRDesign.md) + - [RC API](doc/en/RcApi.md) + - [Naive RC Insertion Principle](doc/en/NaiveRcInsertionDescription.md) + - [Virtual Table and Interface Table Design](doc/en/VtableItableDescription.md) + - [Phase Design](doc/en/CompilerPhaseDescription.md) + - [Application Manual of Maple General Modules](doc/en/DeveloperGuide4Utility.md) + + +- [Environment Configuration](doc/en/DevelopmentPreparation.md) + +- [Developer Guide](doc/en/DeveloperGuide.md) + +- [Programming Specifications](doc/en/ProgrammingSpecifications.md) + +- [Discussion of Pointer in C++ Programming](doc/en/CPPCodingTalkAboutPointer.md) + +## License +- [LICENSE](license/LICENSE) + +## Real-time Chat Channel +- [Discord-OpenArkCompiler](https://discord.gg/CJeJWQXxMP) +- 9:00 am - 10:30 am everyday, developers can discuss community issues in the voice channel "panel discussion" + +- [Discord-MapleFE](https://discord.gg/sBj3cc4GhM) +- we have weekly meeting about projects related to MapleFE diff --git a/Readme_zh.md b/Readme_zh.md new file mode 100644 index 0000000000000000000000000000000000000000..a2aab5783752ad526e2a212a7374fd72ca03abbd --- /dev/null +++ b/Readme_zh.md @@ -0,0 +1,76 @@ +# OpenArkCompiler + +## 概述 +----------------- +> 面向多设备、支持多语言的统一编程平台。 + +OpenArkCompiler是来自华为方舟编译器的开源项目。 + +### OpenArkCompiler 四个技术特点 ### + +能够将不同语言代码编译成一套可执行文件,在运行环境中高效执行: +- 支持多语言联合优化、消除跨语言调用开销; +- 更轻量的语言运行时; +- 软硬协同充分发挥硬件能效; +- 支持多样化的终端设备平台 + +## 开源计划 +**编译器框架代码开源** +- 时间:2019年8月 +- 开源范围:编译器IR+中端语言实现 +- 开放能力: + - 框架开源供参考学习,了解方舟编译器架构及框架代码 + - 开发者可构建出完整编译器工具链,支持Java Sample程序编译(非应用) + +**后续开源范围** +陆续开源编译器前端、后端、编译优化;完整支持Java程序编译、JavaScript语言应用的编译等。 + +|开源范围 |2020年3月 |2020年5月 | +| ------------ | -------------------|--------------------- | +|编译器前端 |jbc前端基础框架 |前端全量开源 | +|编译器中端 |独立优化Phase每周持续开源 || +|编译器后端 |后端开源(O0) (aarch64)|独立优化按周开源(O2) (aarch64)| +|测试框架 |测试框架+基础用例开源| | + +**计划持续更新...** + +## 如何使用 +- source build/envsetup.sh arm release + 或 +- source build/envsetup.sh arm debug +- make setup (这一步可能需要配置代理或者vpn,才能将所有依赖下载下来) +- make +- make libcore +- make testall + +## 孵化器项目 +- FutureWei编译器分支 https://gitee.com/openarkcompiler-incubator/mapleall +- Maple Engine https://gitee.com/openarkcompiler-incubator/maple_engine + +## 相关文档 + +- 架构设计原理 + - [MAPLE IR Design](doc/en/MapleIRDesign.md) + - [RC API](doc/cn/RcApi.md) + - [Naive RC操作插入原理](doc/cn/NaiveRcInsertionDescription.md) + - [虚函数表和接口函数表设计介绍](doc/cn/VtableItableDescription.md) + - [Phase设计介绍](doc/cn/CompilerPhaseDescription.md) + - [Maple通用模块应用手册](doc/cn/DeveloperGuide4Utility.md) + +- [环境配置](doc/cn/DevelopmentPreparation.md) + +- [开发者指南](doc/cn/DeveloperGuide.md) + +- [编程规范](doc/cn/ProgrammingSpecifications.md) + +- [C++编程探讨之指针](doc/cn/CPPCodingTalkAboutPointer.md) + +## 许可证 +- [LICENSE](license/LICENSE) + +## 开发者交流频道 +- [Discord-OpenArkCompiler](https://discord.gg/CJeJWQXxMP) +- 每天上午9:00~10:30不定时在语音频道“panel disscussion”交流社区议题。 + +## 如何提交代码 +- [wiki](https://gitee.com/openarkcompiler/OpenArkCompiler/wikis/%E4%BB%A3%E7%A0%81%E6%8F%90%E4%BA%A4%E6%B5%81%E7%A8%8B?sort_id=2447213) diff --git a/Release.md b/Release.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/MapleFE/recdetect/build.sh b/build/build.sh similarity index 36% rename from src/MapleFE/recdetect/build.sh rename to build/build.sh index aa92897ffaa1b044f1d04b1656f09453074e554e..c88052b81a918ee150c5fe13d447f6d9964b1553 100755 --- a/src/MapleFE/recdetect/build.sh +++ b/build/build.sh @@ -1,39 +1,30 @@ -# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. -# -# OpenArkFE is licensed under the Mulan PSL v2. +#!/bin/bash +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. # You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# +# +# http://license.coscl.org.cn/MulanPSL2 +# # THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR # FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. -# - -#!/bin/bash - -# Usage: -# ./build.sh [LANG] # -# eg. ./build.sh java - -rm -rf $BUILDDIR/recdetect -rm -rf $BUILDDIR/recdetect/$1 - -rm -rf $1 -mkdir -p $1 - -cp ../$1/include/gen_*.h $1/ -cp ../$1/src/gen_*.cpp $1/ - -# The four generated files shouldn't be taken in. -rm -f $1/gen_recursion.h -rm -f $1/gen_recursion.cpp -rm -f $1/gen_lookahead.h -rm -f $1/gen_lookahead.cpp - -mkdir -p $BUILDDIR/recdetect -mkdir -p $BUILDDIR/recdetect/$1 -make LANG=$1 +set -e +source build/envsetup.sh arm release +make clean +option=$@ +model=$1 +if [ -z "$model" ];then + model="libcore" +fi +logfile_name=$(date +"%Y-%m-%d-%H-%M-%S") +logfile_path=${MAPLE_ROOT}/build/logs +date_str=$(date "+%Y-%m-%d %H:%M:%S") +echo "${date_str} $model INFO special log start" | tee ${logfile_path}/${logfile_name}.log +make ${option} | tee -a ${logfile_path}/${logfile_name}.log +date_str=$(date "+%Y-%m-%d %H:%M:%S") +echo "${date_str} $model INFO special log end" | tee -a ${logfile_path}/${logfile_name}.log diff --git a/build/config.gni b/build/config.gni new file mode 100644 index 0000000000000000000000000000000000000000..5db3bf2ce137c9fb04aad98a231669bab0537452 --- /dev/null +++ b/build/config.gni @@ -0,0 +1,42 @@ +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +# Toolchain setup + +CLANG_PATH = "${MAPLE_ROOT}/tools/clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-18.04" + +LLVMINC = "${CLANG_PATH}/include" +LLVMLIBDIR = "${CLANG_PATH}/lib" +CFE_SRC_DIR = "${MAPLE_ROOT}/../third-party/clang-10.0.0.src" +CFE_SRC_PATH = "${MAPLE_ROOT}/third_party/llvm-12.0.0.src/clang" + +# MapleFE path +MAPLE_PARSER_PATH = "" + +GCC_LINARO_PATH = "${MAPLE_ROOT}/tools/gcc-linaro-7.5.0" +NDK_PATH = "${MAPLE_ROOT}/tools/android-ndk-r21" + +GN_C_COMPILER = "${TOOL_BIN_PATH}/clang" +GN_CXX_COMPILER = "${TOOL_BIN_PATH}/clang++" +GN_AR_COMPILER = "${TOOL_BIN_PATH}/llvm-ar" +GN_RANLIB_COMPILER = "${TOOL_BIN_PATH}/llvm-ranlib" + +target_toolchain = "//build/toolchain:clang" +set_default_toolchain(target_toolchain) + +# Cross compile +GN_C_CROSS_COMPILER = "${GCC_LINARO_PATH}/bin/aarch64-linux-gnu-gcc" +GN_CXX_CROSS_COMPILER = "${GCC_LINARO_PATH}/bin/aarch64-linux-gnu-g++" +GN_AR_CROSS_COMPILER = "${GCC_LINARO_PATH}/bin/aarch64-linux-gnu-ar" +GN_RANLIB_CROSS_COMPILER = "${GCC_LINARO_PATH}/bin/aarch64-linux-gnu-ranlib" diff --git a/build/config/BUILDCONFIG.gn b/build/config/BUILDCONFIG.gn new file mode 100755 index 0000000000000000000000000000000000000000..675724a3a732318314e6e0bdd3c8b57f62eeee75 --- /dev/null +++ b/build/config/BUILDCONFIG.gn @@ -0,0 +1,188 @@ +# This file is the master GN build configuration, all variables +# declare here will be implicitly global. +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +# List all the input args +declare_args() { + GN_INSTALL_PREFIX = "" + GN_BUILD_TYPE = "" + HOST_ARCH = 64 + MIR_JAVA = 1 + COV_CHECK = 0 + GCONLY = 0 + OPT = "O2" + TARGET = "" + MAJOR_VERSION = "" + MINOR_VERSION = "" + RELEASE_VERSION = "" + BUILD_VERSION = "" + GIT_REVISION = "" + MAST = 0 + ASAN = 0 + ONLY_C = 0 + COV = 0 + GPROF = 0 +} + +# Define global args +MAPLE_ROOT = getenv("MAPLE_ROOT") +ANDROID_ROOT = "${MAPLE_ROOT}/android" +AOSP_ROOT = "${MAPLE_ROOT}/third_party/aosp_modified" +MAPLE_BUILD_TYPE = getenv("MAPLE_BUILD_TYPE") +MAPLE_BUILD_OUTPUT = getenv("MAPLE_BUILD_OUTPUT") +TOOL_BIN_PATH = getenv("TOOL_BIN_PATH") +QEMU_PATH = getenv("TOOL_BIN_PATH") +OLD_OS = getenv("OLD_OS") +if (OLD_OS == "1") { + DESIGNATOR="-Wno-gnu-designator" +} else { + DESIGNATOR="-Wno-c99-designator" +} + +DYNAMICLANG = true +RC_V2 = true +TEST_BENCHMARK = false +MEMORY_LEAK_CHECK = false +MARK_CYCLE_ROOTS = false + +OPENSOURCE_DEPS = "${MAPLE_ROOT}/src/mapleall/deplibs" +OPENSOURCE_OUTPUT = "${MAPLE_BUILD_OUTPUT}" +AST2MPL_ROOT = "${MAPLE_ROOT}/src/ast2mpl" +IS_AST2MPL_EXISTS = getenv("IS_AST2MPL_EXISTS") +MAPLEALL_ROOT = "${MAPLE_ROOT}/src" +MAPLEALL_ROOT = "${MAPLE_ROOT}/src/mapleall" +HIR2MPL_ROOT = "${MAPLE_ROOT}/src/hir2mpl" +MAPLE_MRT_ROOT = "${MAPLE_ROOT}/src/mrt" +THIRD_PARTY_ROOT = "${MAPLE_ROOT}/third_party" + +# Put all built library files under lib +GN_ARCHIVE_OUTPUT_DIRECTORY = "${MAPLE_BUILD_OUTPUT}/ar/host-x86_64-${OPT}" +GN_LIBRARY_OUTPUT_DIRECTORY = "${MAPLE_BUILD_OUTPUT}/lib" + +# Put all built binary files under bin +GN_BINARY_OUTPUT_DIRECTORY = "${MAPLE_BUILD_OUTPUT}/bin" + +import("${MAPLE_ROOT}/build/config.gni") + +# C/CXX Build flags +cflags = [] +cflags_cc = [] +cflags_c = [] +if (GN_BUILD_TYPE == "RELEASE") { + cflags_cc += [ + "-O2", + "-fno-strict-aliasing", + "-fvisibility=hidden", + "-D_FORTIFY_SOURCE=2", + ] + cflags_c += [ + "-O2", + "-fno-strict-aliasing", + "-fvisibility=hidden", + "-D_FORTIFY_SOURCE=2", + ] +} else if (GN_BUILD_TYPE == "DEBUG") { + cflags_cc += [ + "-O0", + "-g3", + "-ftrapv", + "-fstack-check", + ] + cflags_c += [ + "-O0", + "-g3", + "-ftrapv", + "-fstack-check", + ] + if (target_toolchain == "//build/toolchain:clang") { + cflags_c += ["-fno-limit-debug-info"] + cflags_cc += ["-fno-limit-debug-info"] + } +} else { + cflags_cc += [ + "-O2", + "-fno-strict-aliasing", + "-g", + ] + cflags_c += [ + "-O2", + "-fno-strict-aliasing", + "-g", + ] +} + +cflags_c += [ + "-Wall", + "-fstack-protector-strong", + "-fPIC", + "-fPIE", + "-pipe", + "-Werror", + "-Wdate-time", + "-Wfloat-equal", + "${DESIGNATOR}", +] + +cflags_cc += [ + "-Wall", + "-fstack-protector-strong", + "-fPIC", + "-fPIE", + "-pipe", + "-Wno-c99-designator", + "-Wno-range-loop-construct", + "-Werror", + "-Wdate-time", + "-Wfloat-equal", + "${DESIGNATOR}", +] + +if (HOST_ARCH == 64) { + cflags_c += [ "-m64" ] + cflags_cc += [ "-m64" ] +} else { + cflags_c += [ "-m32" ] + cflags_cc += [ "-m32" ] +} + +if (DYNAMICLANG) { + cflags_cc += [ "-DDYNAMICLANG" ] +} + +if (RC_V2) { + cflags_cc += [ "-DRC_NO_MMAP" ] +} + +if (TEST_BENCHMARK) { + cflags_cc += [ "-DTEST_BENCHMARK" ] +} + +if (MEMORY_LEAK_CHECK) { + cflags_cc += [ "-DMEMORY_LEAK_CHECK" ] +} + +if (MARK_CYCLE_ROOTS) { + cflags_cc += [ "-DMARK_CYCLE_ROOTS" ] +} + +cflags_cc += [ "-DMIR_FEATURE_FULL=1" ] + +if (MIR_JAVA == 1) { + cflags_cc += [ "-DMIR_JAVA=1" ] +} else { + TARGET = "vm" + cflags_cc += [ "-DMIR_JAVA=0" ] +} diff --git a/build/core/extra.mk b/build/core/extra.mk new file mode 100644 index 0000000000000000000000000000000000000000..2c8551bb9d37ec6804fdd10fc93fe96041a5a890 --- /dev/null +++ b/build/core/extra.mk @@ -0,0 +1,24 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +$(INIT_CXX_SRC): $(ADD_OBJS) + cp -f $< $@ + +ifeq ($(DEBUG), 0) +$(INIT_CXX_O) : $(INIT_CXX_SRC) + $(QEMU_CLANG_CPP) -O2 $(QEMU_CLANG_FLAGS) -c $^ -o $@ +else +$(INIT_CXX_O) : $(INIT_CXX_SRC) + $(QEMU_CLANG_CPP) -O0 -g3 $(QEMU_CLANG_FLAGS) -c $^ -o $@ +endif diff --git a/build/core/genmplt.mk b/build/core/genmplt.mk new file mode 100644 index 0000000000000000000000000000000000000000..89ffab5337c56eaa0627544a447f829ad793a60a --- /dev/null +++ b/build/core/genmplt.mk @@ -0,0 +1,16 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +$(LIB_CORE_MPLT): $(JBC2MPL_BIN) $(LIB_CORE_JAR) + $(JBC2MPL_BIN) -injar $(LIB_CORE_JAR) -output $(LIB_CORE_PATH) diff --git a/build/core/hir2mpl.mk b/build/core/hir2mpl.mk new file mode 100644 index 0000000000000000000000000000000000000000..e45502f68de5ae349e36d54b7787e27b6bca2506 --- /dev/null +++ b/build/core/hir2mpl.mk @@ -0,0 +1,17 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +$(APP_MPL): %.mpl : %.dex $(HIR2MPL_BIN) + $(HIR2MPL_BIN) $(HIR2MPL_FLAGS) $< + diff --git a/build/core/hir2mpl_test.mk b/build/core/hir2mpl_test.mk new file mode 100644 index 0000000000000000000000000000000000000000..de06e1b828e6ae748cf2d42be13b2c225249d0ef --- /dev/null +++ b/build/core/hir2mpl_test.mk @@ -0,0 +1,16 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +$(APP_MPL): %.mpl : %.dex $(HIR2MPL_BIN) + $(HIR2MPL_BIN) $(HIR2MPL_APP_FLAGS) $< diff --git a/build/core/java2dex.mk b/build/core/java2dex.mk new file mode 100644 index 0000000000000000000000000000000000000000..20dbe3122425e58a963a168f1e291e4706e23696 --- /dev/null +++ b/build/core/java2dex.mk @@ -0,0 +1,2 @@ +$(APP_DEX): %.dex : %.java $(wildcard $(JAR_LIBS_PATH)/*.java) + @bash $(JAVA2DEX) -o $(APP_DEX) $(JAVA2DEX_FLAGS) -i $(APP_JAVA):${EXTRA_JAVA_FILE} $< diff --git a/build/core/libcore.mk b/build/core/libcore.mk new file mode 100644 index 0000000000000000000000000000000000000000..84354068e26247d7c3f09e1fb1f75bad5313cd29 --- /dev/null +++ b/build/core/libcore.mk @@ -0,0 +1,88 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +APP := libcore-all +include $(MAPLE_BUILD_CORE)/maple_variables.mk +ifeq ($(DEBUG), 1) + MPLCG_FLAGS := $(MPLCG_FLAGS) --add-debug-trace +endif +include $(MAPLE_BUILD_CORE)/qemu_ar.mk + +ifeq ($(DEBUG), 0) + QEMU_CXX_FLAGS := -O2 -s +else + QEMU_CXX_FLAGS := -O0 -g3 +endif + +ifeq ($(DISABLE_RC_DUPLICATE), 1) + RC_FLAGS = -DDISABLE_RC_DUPLICATE=1 +else + RC_FLAGS = -DDISABLE_RC_DUPLICATE=0 +endif + +LINKER_QEMU_OPT := -fuse-ld=lld -rdynamic -Wl,-Bsymbolic -lpthread -ldl -L$(MAPLE_OUT)/lib/ -L$(MAPLE_ROOT)/third_party/icu/lib/aarch64-linux-gnu -licuuc + +$(APP_O): %.VtableImpl.o : %.VtableImpl.s + $(QEMU_CLANG_CPP) $(QEMU_CXX_FLAGS) $(QEMU_CLANG_FLAGS) $(RC_FLAGS) -c -o $@ $< + +$(APP_QEMU_SO): %.VtableImpl.qemu.so : %.VtableImpl.o $(INIT_CXX_O) $(qemu) + $(QEMU_CLANG_CPP) $(QEMU_CXX_FLAGS) $(QEMU_CLANG_FLAGS) $(RC_FLAGS) -shared -o $@ $(INIT_CXX_O) -Wl,--whole-archive $(qemu) -Wl,--no-whole-archive $(LINKER_QEMU_OPT) $< -Wl,-T $(LDS) + +include $(MAPLE_BUILD_CORE)/extra.mk +include $(MAPLE_BUILD_CORE)/mplcomb_dex.mk +include $(MAPLE_BUILD_CORE)/hir2mpl.mk + +$(APP_DEX): %.dex : $(D8) $(LIB_CORE_JAR) + $(D8) --min-api 39 --output . $(LIB_CORE_JAR) + mv classes.dex $(APP_DEX) + +.PHONY: gen-def +gen-def: $(APP_DEF) +$(APP_DEF): $(APP_S) + rsync -a -L $(APP_DEF) $(MAPLE_ROOT)/src/mrt/unified.macros.def + +.PHONY: install +install: libcore_so deplibs + $(shell mkdir -p $(MAPLE_OUT)/ops/host-x86_64-$(OPT); \ + mkdir -p $(MAPLE_OUT)/ops/third_party; \ + rsync -a -L ${MAPLE_ROOT}/src/mrt/codetricks ${MAPLE_ROOT}/output/tools/; \ + rsync -a -L $(MAPLE_OUT)/lib/$(OPT)/libcore-all.so $(MAPLE_OUT)/ops/host-x86_64-$(OPT); \ + rsync -a -L $(LIB_CORE_PATH)/mrt_module_init.o $(MAPLE_OUT)/ops/; \ + rsync -a -L $(LIB_CORE_PATH)/libcore-all.mplt $(MAPLE_OUT)/ops/; \ + rsync -a -L $(MAPLE_ROOT)/third_party/aosp_10.0.0_r35/libnativehelper $(MAPLE_OUT)/ops/; \ + rsync -a -L $(MAPLE_ROOT)/android/out/target/common/obj/JAVA_LIBRARIES $(MAPLE_OUT)/ops/third_party; \ + rsync -a -L $(MAPLE_ROOT)/third_party/libdex/prebuilts/aarch64-linux-gnu/libz.so.1.2.8 $(MAPLE_OUT)/ops/third_party/libz.so.1; \ + rsync -a -L $(MAPLE_ROOT)/third_party/icu/lib/aarch64-linux-gnu/* $(MAPLE_OUT)/ops/third_party/; \ + ) + +.PHONY: deplibs +deplibs: + $(shell rsync -a -L $(MAPLE_ROOT)/src/mrt/deplibs/*.so $(MAPLE_OUT)/ops/host-x86_64-$(OPT); \ + rsync -a -L $(MAPLE_ROOT)/src/mrt/bin/mplsh $(MAPLE_OUT)/ops/;) + +.PHONY: libcore_so +libcore_so: $(LIBCORE_SO_QEMU) +$(LIBCORE_SO_QEMU): $(APP_QEMU_SO) + mkdir -p $(MAPLE_OUT)/lib/$(OPT)/ + rsync -a -L $< $@ +clean: + @rm -f libcore-all.* + @rm -f *.mpl + @rm -f *.mplt + @rm -f *.groots.txt + @rm -f *.primordials.txt + @rm -rf comb.log + @rm -rf *.muid + @rm -f *.s + @rm -rf *.o diff --git a/build/core/link.mk b/build/core/link.mk new file mode 100644 index 0000000000000000000000000000000000000000..785af47bc93f0bcfd850de57c2b1bd888ac09b68 --- /dev/null +++ b/build/core/link.mk @@ -0,0 +1,2 @@ +${APP_SO}: %.so : %.VtableImpl.s $(CLANG_BIN) + ${TOOL_BIN_PATH}/clang++ -g3 -O2 -x assembler-with-cpp -march=armv8-a -target aarch64-linux-gnu -c $(APP_S) && ${TOOL_BIN_PATH}/clang++ $(APP_O) -L${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/host-x86_64-O2 -g3 -O2 -march=armv8-a -target aarch64-linux-gnu -fPIC -shared -o $(APP_SO) ${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/mrt_module_init.o -fuse-ld=lld -rdynamic -lcore-all -lcommon-bridge -Wl,-z,notext -Wl,-T${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/linker/maplelld.so.lds diff --git a/build/core/maple_test.mk b/build/core/maple_test.mk new file mode 100644 index 0000000000000000000000000000000000000000..0e148680c16512659331a93f30851a6af97405c7 --- /dev/null +++ b/build/core/maple_test.mk @@ -0,0 +1,39 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +include $(MAPLE_BUILD_CORE)/maple_variables.mk + +test: APP_RUN +include $(MAPLE_BUILD_CORE)/qemu_run.mk +include $(MAPLE_BUILD_CORE)/link.mk +include $(MAPLE_BUILD_CORE)/mplcomb_dex.mk +include $(MAPLE_BUILD_CORE)/genmplt.mk +include $(MAPLE_BUILD_CORE)/hir2mpl_test.mk +include $(MAPLE_BUILD_CORE)/java2dex.mk + +.PHONY: clean +clean: + @rm -rf *.jar + @rm -f *.class + @rm -f *.mpl + @rm -f *.mplt + @rm -f *.s + @rm -f *.groots.txt + @rm -f *.primordials.txt + @rm -rf comb.log + @rm -rf *.muid + @rm -rf *.dex + @rm -rf *.def + @rm -rf *.so + @rm -rf *.o diff --git a/build/core/maple_variables.mk b/build/core/maple_variables.mk new file mode 100644 index 0000000000000000000000000000000000000000..87c8372d90c848b67f1bca0dfd5149fa7727aab2 --- /dev/null +++ b/build/core/maple_variables.mk @@ -0,0 +1,98 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +OPT := O2 +DEBUG := 0 +LIB_CORE_PATH := $(MAPLE_BUILD_OUTPUT)/libjava-core/host-x86_64-$(OPT) +LIB_CORE_JAR := $(LIB_CORE_PATH)/java-core.jar +LIB_CORE_MPLT := $(LIB_CORE_PATH)/java-core.mplt + +GCC_LINARO_PATH := $(MAPLE_ROOT)/tools/gcc-linaro-7.5.0 + +TARGETS := $(APP) +APP_JAVA := $(foreach APP, $(TARGETS), $(APP).java) +APP_DEX := $(foreach APP, $(TARGETS), $(APP).dex) +APP_CLASS := $(foreach APP, $(TARGETS), $(APP).class) +APP_JAR := $(foreach APP, $(TARGETS), $(APP).jar) +APP_MPL := $(foreach APP, $(TARGETS), $(APP).mpl) +APP_MPLT:=$(foreach APP, $(TARGETS), $(APP).mplt) +APP_S := $(foreach APP, $(TARGETS), $(APP).VtableImpl.s) +APP_DEF := $(foreach APP, $(TARGETS), $(APP).VtableImpl.macros.def) +APP_O := $(foreach APP, $(TARGETS), $(APP).VtableImpl.o) +APP_SO := $(foreach APP, $(TARGETS), $(APP).so) +APP_QEMU_SO := $(foreach APP, $(TARGETS), $(APP).VtableImpl.qemu.so) +APP_VTABLEIMPL_MPL := $(foreach APP, $(TARGETS), $(APP).VtableImpl.mpl) + +MAPLE_OUT := $(MAPLE_BUILD_OUTPUT) +JAVA2JAR := $(MAPLE_OUT)/bin/java2jar +JBC2MPL_BIN := $(MAPLE_OUT)/bin/jbc2mpl +MAPLE_BIN := $(MAPLE_OUT)/bin/maple +MPLCG_BIN := $(MAPLE_OUT)/bin/mplcg +JAVA2D8 := $(MAPLE_OUT)/bin/java2d8 +HIR2MPL_BIN := $(MAPLE_OUT)/bin/hir2mpl +JAVA2DEX := ${MAPLE_ROOT}/build/java2dex + +D8 := $(MAPLE_ROOT)/build/d8 +ADD_OBJS := $(MAPLE_ROOT)/src/mrt/maplert/src/mrt_module_init.c__ +INIT_CXX_SRC := $(LIB_CORE_PATH)/mrt_module_init.cpp +INIT_CXX_O := $(LIB_CORE_PATH)/mrt_module_init.o + +LDS := $(MAPLE_ROOT)/src/mrt/maplert/linker/maplelld.so.lds +DUPLICATE_DIR := $(MAPLE_ROOT)/src/mrt/codetricks/arch/arm64 + +QEMU_CLANG_CPP := $(TOOL_BIN_PATH)/clang++ + +QEMU_CLANG_FLAGS := -Wall -W -Werror -Wno-unused-command-line-argument -Wl,-z,now -fPIC -fstack-protector-strong \ + -fvisibility=hidden -std=c++14 -march=armv8-a + +QEMU_CLANG_FLAGS += -nostdlibinc \ + --gcc-toolchain=$(GCC_LINARO_PATH) \ + --sysroot=$(GCC_LINARO_PATH)/aarch64-linux-gnu/libc \ + -isystem $(GCC_LINARO_PATH)/aarch64-linux-gnu/include/c++/7.5.0 \ + -isystem $(GCC_LINARO_PATH)/aarch64-linux-gnu/include/c++/7.5.0/aarch64-linux-gnu \ + -isystem $(GCC_LINARO_PATH)/aarch64-linux-gnu/include/c++/7.5.0/backward \ + -isystem $(GCC_LINARO_PATH)/lib/gcc/aarch64-linux-gnu/7.5.0/include \ + -isystem $(GCC_LINARO_PATH)/lib/gcc/aarch64-linux-gnu/7.5.0/include-fixed \ + -isystem $(GCC_LINARO_PATH)/aarch64-linux-gnu/include \ + -isystem $(GCC_LINARO_PATH)/aarch64-linux-gnu/libc/usr/include \ + -target aarch64-linux-gnu + +ifeq ($(OPT),O2) + HIR2MPL_FLAGS := --rc + MPLME_FLAGS := --O2 --quiet + MPL2MPL_FLAGS := --O2 --quiet --regnativefunc --no-nativeopt --maplelinker + MPLCG_FLAGS := --O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=$(DUPLICATE_DIR)/duplicateFunc.s + MPLCG_SO_FLAGS := --fpic +else ifeq ($(OPT),O0) + HIR2MPL_FLAGS := --rc + MPLME_FLAGS := --quiet + MPL2MPL_FLAGS := --quiet --regnativefunc --maplelinker + MPLCG_FLAGS := --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=$(DUPLICATE_DIR)/duplicateFunc.s + MPLCG_SO_FLAGS := --fpic +else ifeq ($(OPT),GC_O2) + HIR2MPL_FLAGS := + MPLME_FLAGS := --O2 --quiet --gconly + MPL2MPL_FLAGS := --O2 --quiet --regnativefunc --no-nativeopt --maplelinker + MPLCG_FLAGS := --O2 --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=$(DUPLICATE_DIR)/duplicateFunc.s --gconly + MPLCG_SO_FLAGS := --fpic +else ifeq ($(OPT),GC_O0) + HIR2MPL_FLAGS := + MPLME_FLAGS := --quiet --gconly + MPL2MPL_FLAGS := --quiet --regnativefunc --maplelinker + MPLCG_FLAGS := --quiet --no-pie --verbose-asm --gen-c-macro-def --maplelinker --duplicate_asm_list=$(DUPLICATE_DIR)/duplicateFunc.s --gconly + MPLCG_SO_FLAGS := --fpic +endif +HIR2MPL_APP_FLAGS := -mplt=${LIB_CORE_PATH}/libcore-all.mplt +MPLCOMBO_FLAGS := --run=me:mpl2mpl:mplcg --option="$(MPLME_FLAGS):$(MPL2MPL_FLAGS):$(MPLCG_FLAGS) $(MPLCG_SO_FLAGS)" +JAVA2DEX_FLAGS := -p ${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/third_party/JAVA_LIBRARIES/core-oj_intermediates/classes.jar:${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/third_party/JAVA_LIBRARIES/core-libart_intermediates/classes.jar diff --git a/build/core/mplcomb.mk b/build/core/mplcomb.mk new file mode 100644 index 0000000000000000000000000000000000000000..b0891618fb791f176635ee966b65b1155f5ebab6 --- /dev/null +++ b/build/core/mplcomb.mk @@ -0,0 +1,16 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +$(APP_S): %.VtableImpl.s : %.jar $(MAPLE_BIN) $(JBC2MPL_BIN) $(LIB_CORE_MPLT) + $(MAPLE_BIN) -$(OPT) --mplt $(LIB_CORE_MPLT) $< diff --git a/build/core/mplcomb_dex.mk b/build/core/mplcomb_dex.mk new file mode 100644 index 0000000000000000000000000000000000000000..94f477d038f4ca162e059afe5bbea7691989b042 --- /dev/null +++ b/build/core/mplcomb_dex.mk @@ -0,0 +1,16 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +$(APP_S): %.VtableImpl.s : %.mpl $(MAPLE_BIN) $(DUPLICATE_DIR)/duplicateFunc.s + $(MAPLE_BIN) --infile $< $(MPLCOMBO_FLAGS) --save-temps > comb.log 2>&1 diff --git a/build/core/qemu_ar.mk b/build/core/qemu_ar.mk new file mode 100644 index 0000000000000000000000000000000000000000..323357f226cc43f9229b53205b5a4cfbc583892e --- /dev/null +++ b/build/core/qemu_ar.mk @@ -0,0 +1,29 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +LIB_AR_PATH=$(MAPLE_OUT)/ar/host-x86_64-$(OPT) +LIB_RUNTIME=$(LIB_AR_PATH)/libmplcompiler-rt.a +LIB_ZTERP=$(LIB_AR_PATH)/libzterp.a +LIB_MRT=$(LIB_AR_PATH)/libmaplert.a +LIB_HUAWEISECUREC=$(LIB_AR_PATH)/libhuawei_secure_c.a +LIB_CORENATIVE=$(LIB_AR_PATH)/libcore-static-binding-jni.a +LIBDEXINTERFACE=$(LIB_AR_PATH)/libdexinterface.a +LIBCORE_SO_QEMU := $(MAPLE_OUT)/lib/$(OPT)/libcore-all.so +ifneq ($(findstring GC,$(OPT)),) + LIB_CORENATIVE_QEMU=$(MAPLE_ROOT)/src/mrt/deplibs/libcore-static-binding-jni-qemu.a +else + LIB_CORENATIVE_QEMU=$(MAPLE_ROOT)/src/mrt/deplibs/gc/libcore-static-binding-jni-qemu.a +endif + +qemu := $(LIB_RUNTIME) $(LIB_MRT) $(LIB_HUAWEISECUREC) $(LIB_CORENATIVE_QEMU) $(LIB_CORENATIVE) $(LIB_ZTERP) $(LIBDEXINTERFACE) diff --git a/build/core/qemu_run.mk b/build/core/qemu_run.mk new file mode 100644 index 0000000000000000000000000000000000000000..3572fea91d5ac8eeb3f6f27773b42f2fb49c0271 --- /dev/null +++ b/build/core/qemu_run.mk @@ -0,0 +1,2 @@ +APP_RUN: $(APP_SO) + ${TOOL_BIN_PATH}/qemu-aarch64 -L /usr/aarch64-linux-gnu -E LD_LIBRARY_PATH=${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/third_party:${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/host-x86_64-O2:./ ${OUT_ROOT}/${MAPLE_BUILD_TYPE}/ops/mplsh -Xbootclasspath:libcore-all.so -cp $(APP_SO) $(APP) diff --git a/build/d8 b/build/d8 new file mode 100755 index 0000000000000000000000000000000000000000..a3c0c08c419accdd314a53b80d6b59a2d6486fc0 --- /dev/null +++ b/build/d8 @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under the Mulan PSL v1. +# You can use this software according to the terms and conditions of the Mulan PSL v1. +# You may obtain a copy of Mulan PSL v1 at: +# +# http://license.coscl.org.cn/MulanPSL +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v1 for more details. +# +jarfile=d8.jar +libdir=${MAPLE_ROOT}/third_party/d8/lib +jarpath="$libdir/$jarfile" + +# a max heap size of dx, can be overwrite +defaultHeapMax="-Xmx1024M" +javaOpts="${javaOpts} ${defaultHeapMax}" + +java $javaOpts -jar "$jarpath" "$@" diff --git a/build/envsetup.sh b/build/envsetup.sh new file mode 100644 index 0000000000000000000000000000000000000000..0a363eac85fc4c1c4776cafca438cc11696381f5 --- /dev/null +++ b/build/envsetup.sh @@ -0,0 +1,216 @@ +#!/bin/bash +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +function print_usage { + echo " " + echo "usage: source envsetup.sh arm/ark/engine/riscv/x86_64 release/debug" + echo " " +} + +if [ "$#" -lt 2 ]; then + print_usage +# return +fi + +curdir=$(pwd) +export MAPLE_ROOT=${curdir} +export SPEC=${MAPLE_ROOT}/testsuite/c_test/spec_test +export LD_LIBRARY_PATH=${MAPLE_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib:${MAPLE_ROOT}/tools/clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-18.04/lib:${LD_LIBRARY_PATH} +export SPECPERLLIB=${SPEC}/bin/lib:${SPEC}/bin:${SPEC}/SPEC500-perlbench_r/data/all/input/lib:${SPEC}/SPEC500-perlbench_r/t/lib +export CASE_ROOT=${curdir}/testsuite +export OUT_ROOT=${curdir}/output +export ANDROID_ROOT=${curdir}/android +export MAPLE_BUILD_CORE=${MAPLE_ROOT}/build/core +if [ -d ${MAPLE_ROOT}/src/ast2mpl ]; then + export IS_AST2MPL_EXISTS=1 +else + export IS_AST2MPL_EXISTS=0 +fi +export GCOV_PREFIX=${MAPLE_ROOT}/report/gcda +export GCOV_PREFIX_STRIP=7 + +# display OS version +lsb_release -d + +export TOOL_BIN_PATH=${MAPLE_ROOT}/tools/bin +if [ -d ${MAPLE_ROOT}/testsuite/driver/.config ];then + rm -rf ${MAPLE_ROOT}/testsuite/driver/config + rm -rf ${MAPLE_ROOT}/testsuite/driver/src/api + rm -rf ${MAPLE_ROOT}/testsuite/driver/src/mode + cd ${MAPLE_ROOT}/testsuite/driver + ln -s -f .config config + cd ${MAPLE_ROOT}/testsuite/driver/src + ln -s -f .api api + ln -s -f .mode mode +fi + +cd ${MAPLE_ROOT} + +OS_VERSION=`lsb_release -r | sed -e "s/^[^0-9]*//" -e "s/\..*//"` +if [ "$OS_VERSION" = "16" ] || [ "$OS_VERSION" = "18" ]; then + export OLD_OS=1 +else + export OLD_OS=0 +fi + +# support multiple ARCH and BUILD_TYPE + +if [ $1 = "arm" ]; then + PLATFORM=aarch64 + USEOJ=0 +elif [ $1 = "riscv" ]; then + PLATFORM=riscv64 + USEOJ=0 +elif [ $1 = "x86_64" ] ; then + PLATFORM=x86_64 + USEOJ=0 +elif [ $1 = "engine" ]; then + PLATFORM=ark + USEOJ=1 +elif [ $1 = "ark" ]; then + PLATFORM=ark + USEOJ=1 +else + print_usage + return +fi + +if [ "$2" = "release" ]; then + TYPE=release + DEBUG=0 +elif [ "$2" = "debug" ]; then + TYPE=debug + DEBUG=1 +else + print_usage + return +fi + +export MAPLE_DEBUG=${DEBUG} +export TARGET_PROCESSOR=${PLATFORM} +export TARGET_SCOPE=${TYPE} +export USE_OJ_LIBCORE=${USEOJ} +export TARGET_TOOLCHAIN=clang +export MAPLE_BUILD_TYPE=${TARGET_PROCESSOR}-${TARGET_TOOLCHAIN}-${TARGET_SCOPE} +echo "Build: $MAPLE_BUILD_TYPE" +export MAPLE_BUILD_OUTPUT=${MAPLE_ROOT}/output/${MAPLE_BUILD_TYPE} +export MAPLE_EXECUTE_BIN=${MAPLE_ROOT}/output/${MAPLE_BUILD_TYPE}/bin +export TEST_BIN=${CASE_ROOT}/driver/script +export PATH=$PATH:${MAPLE_EXECUTE_BIN}:${TEST_BIN} + +# Enable Autocompletion for maple driver +if [ -f $MAPLE_ROOT/tools/maple_autocompletion.sh ]; then + source ${MAPLE_ROOT}/tools/maple_autocompletion.sh +fi + +if [ ! -f $MAPLE_ROOT/tools/qemu/usr/bin/qemu-aarch64 ] && [ "$OLD_OS" = "0" ]; then + echo " " + echo "!!! please run \"make setup\" to get proper qemu-aarch64" + echo " " +fi + +function mm +{ + THREADS=$(cat /proc/cpuinfo| grep "processor"| wc -l) + PWD=$(pwd) + num=${#CASE_ROOT} + let num++ + ALL_MODE_LIST=$(cd ${CASE_ROOT}/driver/src/mode; find -name "*.py" | xargs basename -s .py;) + TARGET=${PWD:${num}} + MODE= + + #mm MODE=O0 + if [ $# -lt 3 ] && [[ "x${1^^}" =~ ^xMODE=.* ]]; then + MODE=${1#*=} + MODE=${MODE^^} + python3 ${CASE_ROOT}/driver/src/driver.py --target=${TARGET} --mode=${MODE} --detail + elif [ $# -lt 3 ] && [[ `echo ${ALL_MODE_LIST[@]} | grep -w ${1^^}` ]] ; then + MODE=${1^^} + python3 ${CASE_ROOT}/driver/src/driver.py --target=${TARGET} --mode=${MODE} --detail + + #mm clean + elif [ $# -lt 3 ] && [ "x${1}" = "xclean" ]; then + python3 ${CASE_ROOT}/driver/src/driver.py --target=${TARGET} --clean --detail + + #mm save + elif [ $# = 1 ] && [ "x${1}" = "xsave" ]; then + python3 ${CASE_ROOT}/driver/src/driver.py --target=${TARGET} --save + + #mm testall + elif [ $# = 1 ] && [ -f ${CASE_ROOT}/driver/config/${1}.conf ]; then + TARGET=${1} + python3 ${CASE_ROOT}/driver/src/driver.py --target=${TARGET} --run-path=${OUT_ROOT}/host/test --j=${THREADS} + + #mm testall MODE=O0 + elif [ $# = 2 ] && [ -f ${CASE_ROOT}/driver/config/${1}.conf ]; then + if [[ "x${2^^}" =~ ^xMODE=.* ]]; then + MODE=${2#*=} + MODE=${MODE^^} + elif [[ `echo ${ALL_MODE_LIST[@]} | grep -w ${2^^}` ]] ; then + MODE=${2^^} + fi + TARGET=${1} + python3 ${CASE_ROOT}/driver/src/driver.py --target=${TARGET} --run-path=${OUT_ROOT}/host/test --mode=${MODE} --j=${THREADS} + + #mm app_test + elif [ $# = 1 ] && [ -d ${CASE_ROOT}/${1} ] + then + TARGET=${1} + python3 ${CASE_ROOT}/driver/src/driver.py --target=${TARGET} --run-path=${OUT_ROOT}/host/test --j=${THREADS} + + #mm app_test MODE=O2 + elif [ $# = 2 ] && [ -d ${CASE_ROOT}/${1} ]; then + if [[ "x${2^^}" =~ ^xMODE=.* ]]; then + MODE=${2#*=} + MODE=${MODE^^} + elif [[ `echo ${ALL_MODE_LIST[@]} | grep -w ${2^^}` ]] ; then + MODE=${2^^} + fi + TARGET=${1} + python3 ${CASE_ROOT}/driver/src/driver.py --target=${TARGET} --run-path=${OUT_ROOT}/host/test --mode=${MODE} --j=${THREADS} + + elif [ $# = 1 ] && [ "x${1,,}" = "x-h" -o "x${1,,}" = "x--help" ]; + then + cat < + + namespace art { + +@@ -115,11 +116,23 @@ + static_assert(IsAligned(kX86StackOverflowReservedBytes), "X86 gap not page aligned"); + static_assert(IsAligned(kX86_64StackOverflowReservedBytes), + "X86_64 gap not page aligned"); +- ++/* ++cflags += [ ++ "-DART_STACK_OVERFLOW_GAP_arm=8192", ++ "-DART_STACK_OVERFLOW_GAP_arm64=8192", ++ "-DART_STACK_OVERFLOW_GAP_mips=16384", ++ "-DART_STACK_OVERFLOW_GAP_mips64=16384", ++ "-DART_STACK_OVERFLOW_GAP_x86=16384", ++ "-DART_STACK_OVERFLOW_GAP_x86_64=20480", ++ "-DART_FRAME_SIZE_LIMIT=7400", ++ ] ++ */ ++/* + #if !defined(ART_FRAME_SIZE_LIMIT) + #error "ART frame size limit missing" + #endif +- ++*/ ++const uint32_t ART_FRAME_SIZE_LIMIT = 7400; + // TODO: Should we require an extra page (RoundUp(SIZE) + kPageSize)? + static_assert(ART_FRAME_SIZE_LIMIT < kArmStackOverflowReservedBytes, "Frame size limit too large"); + static_assert(ART_FRAME_SIZE_LIMIT < kArm64StackOverflowReservedBytes, +diff -ur art_aosp/libartbase/arch/instruction_set.h art/libartbase/arch/instruction_set.h +--- art_aosp/libartbase/arch/instruction_set.h 2021-01-29 11:00:02.202266487 +0800 ++++ art/libartbase/arch/instruction_set.h 2021-01-29 10:34:50.012986865 +0800 +@@ -227,19 +227,30 @@ + } + + namespace instruction_set_details { +- ++/* ++cflags += [ ++ "-DART_STACK_OVERFLOW_GAP_arm=8192", ++ "-DART_STACK_OVERFLOW_GAP_arm64=8192", ++ "-DART_STACK_OVERFLOW_GAP_mips=16384", ++ "-DART_STACK_OVERFLOW_GAP_mips64=16384", ++ "-DART_STACK_OVERFLOW_GAP_x86=16384", ++ "-DART_STACK_OVERFLOW_GAP_x86_64=20480", ++ "-DART_FRAME_SIZE_LIMIT=7400", ++ ] ++ */ ++ /* + #if !defined(ART_STACK_OVERFLOW_GAP_arm) || !defined(ART_STACK_OVERFLOW_GAP_arm64) || \ + !defined(ART_STACK_OVERFLOW_GAP_mips) || !defined(ART_STACK_OVERFLOW_GAP_mips64) || \ + !defined(ART_STACK_OVERFLOW_GAP_x86) || !defined(ART_STACK_OVERFLOW_GAP_x86_64) + #error "Missing defines for stack overflow gap" + #endif +- +-static constexpr size_t kArmStackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_arm; +-static constexpr size_t kArm64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_arm64; +-static constexpr size_t kMipsStackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips; +-static constexpr size_t kMips64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_mips64; +-static constexpr size_t kX86StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86; +-static constexpr size_t kX86_64StackOverflowReservedBytes = ART_STACK_OVERFLOW_GAP_x86_64; ++*/ ++static constexpr size_t kArmStackOverflowReservedBytes = 8192; ++static constexpr size_t kArm64StackOverflowReservedBytes = 8192; ++static constexpr size_t kMipsStackOverflowReservedBytes = 16384; ++static constexpr size_t kMips64StackOverflowReservedBytes = 16384; ++static constexpr size_t kX86StackOverflowReservedBytes = 16384; ++static constexpr size_t kX86_64StackOverflowReservedBytes = 20480; + + NO_RETURN void GetStackOverflowReservedBytesFailure(const char* error_msg); + +diff -ur art_aosp/libartbase/base/arena_allocator.h art/libartbase/base/arena_allocator.h +--- art_aosp/libartbase/base/arena_allocator.h 2021-01-29 11:00:02.198266452 +0800 ++++ art/libartbase/base/arena_allocator.h 2021-01-29 10:34:50.012986865 +0800 +@@ -25,6 +25,7 @@ + #include "dchecked_vector.h" + #include "macros.h" + #include "memory_tool.h" ++#include + + namespace art { + +diff -ur art_aosp/libartbase/base/bit_vector.h art/libartbase/base/bit_vector.h +--- art_aosp/libartbase/base/bit_vector.h 2021-01-29 11:00:02.202266487 +0800 ++++ art/libartbase/base/bit_vector.h 2021-01-29 10:34:50.012986865 +0800 +@@ -19,7 +19,7 @@ + + #include + #include +- ++#include + #include "bit_utils.h" + #include "globals.h" + +diff -ur art_aosp/libartbase/base/file_magic.cc art/libartbase/base/file_magic.cc +--- art_aosp/libartbase/base/file_magic.cc 2021-01-29 11:00:02.198266452 +0800 ++++ art/libartbase/base/file_magic.cc 2021-01-29 10:34:50.012986865 +0800 +@@ -19,7 +19,7 @@ + #include + #include + #include +- ++#include + #include + #include + +diff -ur art_aosp/libartbase/base/file_utils.cc art/libartbase/base/file_utils.cc +--- art_aosp/libartbase/base/file_utils.cc 2021-01-29 11:00:02.198266452 +0800 ++++ art/libartbase/base/file_utils.cc 2021-01-29 10:34:50.012986865 +0800 +@@ -40,7 +40,7 @@ + + + #include +- ++#include + #include "android-base/stringprintf.h" + #include "android-base/strings.h" + +@@ -401,7 +401,7 @@ + &error_msg); + return (android_root != nullptr) + && (runtime_root != nullptr) +- && (std::string_view(android_root) != std::string_view(runtime_root)); ++ && (StringView(android_root) != StringView(runtime_root)); + } + + int DupCloexec(int fd) { +diff -ur art_aosp/libartbase/base/hiddenapi_stubs.h art/libartbase/base/hiddenapi_stubs.h +--- art_aosp/libartbase/base/hiddenapi_stubs.h 2021-01-29 11:00:02.198266452 +0800 ++++ art/libartbase/base/hiddenapi_stubs.h 2021-01-29 10:34:50.012986865 +0800 +@@ -18,11 +18,16 @@ + #define ART_LIBARTBASE_BASE_HIDDENAPI_STUBS_H_ + + #include +-#include ++#include "string_view_format.h" + + namespace art { + namespace hiddenapi { + ++const std::string kPublicApiStr = "public-api"; ++const std::string kSystemApiStr = "system-api"; ++const std::string kTestApiStr = "test-api"; ++const std::string kCorePlatformApiStr = "core-platform-api"; ++ + class ApiStubs { + public: + enum class Kind { +@@ -32,7 +37,7 @@ + kCorePlatformApi, + }; + +- static const std::string_view ToString(Kind api) { ++ static std::string ToString(Kind api) { + switch (api) { + case Kind::kPublicApi: + return kPublicApiStr; +@@ -45,16 +50,10 @@ + } + } + +- static bool IsStubsFlag(const std::string_view& api_flag_name) { ++ static bool IsStubsFlag(const std::string& api_flag_name) { + return api_flag_name == kPublicApiStr || api_flag_name == kSystemApiStr || + api_flag_name == kTestApiStr || api_flag_name == kCorePlatformApiStr; + } +- +- private: +- static constexpr std::string_view kPublicApiStr{"public-api"}; +- static constexpr std::string_view kSystemApiStr{"system-api"}; +- static constexpr std::string_view kTestApiStr{"test-api"}; +- static constexpr std::string_view kCorePlatformApiStr{"core-platform-api"}; + }; + + } // namespace hiddenapi +diff -ur art_aosp/libartbase/base/logging.cc art/libartbase/base/logging.cc +--- art_aosp/libartbase/base/logging.cc 2021-01-29 11:00:02.202266487 +0800 ++++ art/libartbase/base/logging.cc 2021-01-29 10:34:50.012986865 +0800 +@@ -23,6 +23,7 @@ + #include "aborting.h" + #include "os.h" + #include "unix_file/fd_file.h" ++#include + + // Headers for LogMessage::LogLine. + #ifdef ART_TARGET_ANDROID +diff -ur art_aosp/libartbase/base/memfd.cc art/libartbase/base/memfd.cc +--- art_aosp/libartbase/base/memfd.cc 2021-01-29 11:00:02.198266452 +0800 ++++ art/libartbase/base/memfd.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -25,6 +25,7 @@ + #endif + + #include "macros.h" ++#include + + // When building for linux host, glibc in prebuilts does not include memfd_create system call + // number. As a temporary testing measure, we add the definition here. +diff -ur art_aosp/libartbase/base/os_linux.cc art/libartbase/base/os_linux.cc +--- art_aosp/libartbase/base/os_linux.cc 2021-01-29 11:00:02.202266487 +0800 ++++ art/libartbase/base/os_linux.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -62,8 +62,7 @@ + CHECK(name != nullptr); + bool read_only = ((flags & O_ACCMODE) == O_RDONLY); + bool check_usage = !read_only && auto_flush; +- std::unique_ptr file( +- new File(name, flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH, check_usage)); ++ std::unique_ptr file(new File(name, flags, 0666, check_usage)); + if (!file->IsOpened()) { + return nullptr; + } +diff -ur art_aosp/libartbase/base/safe_copy.cc art/libartbase/base/safe_copy.cc +--- art_aosp/libartbase/base/safe_copy.cc 2021-01-29 11:00:02.202266487 +0800 ++++ art/libartbase/base/safe_copy.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -56,10 +56,10 @@ + } + + src_iovs[iovecs_used].iov_base = const_cast(cur); +- if (!IsAlignedParam(cur, PAGE_SIZE)) { +- src_iovs[iovecs_used].iov_len = AlignUp(cur, PAGE_SIZE) - cur; ++ if (!IsAlignedParam(cur, sysconf(_SC_PAGE_SIZE))) { ++ src_iovs[iovecs_used].iov_len = AlignUp(cur, sysconf(_SC_PAGE_SIZE)) - cur; + } else { +- src_iovs[iovecs_used].iov_len = PAGE_SIZE; ++ src_iovs[iovecs_used].iov_len = sysconf(_SC_PAGE_SIZE); + } + + src_iovs[iovecs_used].iov_len = std::min(src_iovs[iovecs_used].iov_len, len); +diff -ur art_aosp/libartbase/base/scoped_flock.cc art/libartbase/base/scoped_flock.cc +--- art_aosp/libartbase/base/scoped_flock.cc 2021-01-29 11:00:02.202266487 +0800 ++++ art/libartbase/base/scoped_flock.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -24,6 +24,7 @@ + + #include "file_utils.h" + #include "unix_file/fd_file.h" ++#include + + namespace art { + +diff -ur art_aosp/libartbase/base/string_view_cpp20.h art/libartbase/base/string_view_cpp20.h +--- art_aosp/libartbase/base/string_view_cpp20.h 2021-01-29 11:00:02.198266452 +0800 ++++ art/libartbase/base/string_view_cpp20.h 2021-01-29 10:34:50.016986900 +0800 +@@ -17,21 +17,21 @@ + #ifndef ART_LIBARTBASE_BASE_STRING_VIEW_CPP20_H_ + #define ART_LIBARTBASE_BASE_STRING_VIEW_CPP20_H_ + +-#include ++#include "string_view_format.h" + + namespace art { + +-// Replacement functions for std::string_view::starts_with(), ends_with() ++// Replacement functions for StringView::starts_with(), ends_with() + // which shall be available in C++20. + #if __cplusplus >= 202000L + #error "When upgrading to C++20, remove this error and file a bug to remove this workaround." + #endif + +-inline bool StartsWith(std::string_view sv, std::string_view prefix) { ++inline bool StartsWith(StringView sv, StringView prefix) { + return sv.substr(0u, prefix.size()) == prefix; + } + +-inline bool EndsWith(std::string_view sv, std::string_view suffix) { ++inline bool EndsWith(StringView sv, StringView suffix) { + return sv.size() >= suffix.size() && sv.substr(sv.size() - suffix.size()) == suffix; + } + +diff -ur art_aosp/libartbase/base/unix_file/fd_file.cc art/libartbase/base/unix_file/fd_file.cc +--- art_aosp/libartbase/base/unix_file/fd_file.cc 2021-01-29 11:00:02.202266487 +0800 ++++ art/libartbase/base/unix_file/fd_file.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -20,6 +20,7 @@ + #include + #include + #include ++#include + + #if defined(__BIONIC__) + #include +diff -ur art_aosp/libartbase/base/utils.cc art/libartbase/base/utils.cc +--- art_aosp/libartbase/base/utils.cc 2021-01-29 11:00:02.202266487 +0800 ++++ art/libartbase/base/utils.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -29,7 +29,6 @@ + #include "android-base/stringprintf.h" + #include "android-base/strings.h" + +-#include "bit_utils.h" + #include "os.h" + + #if defined(__APPLE__) +@@ -63,98 +62,6 @@ + using android::base::ReadFileToString; + using android::base::StringPrintf; + +-#if defined(__arm__) +- +-namespace { +- +-// Bitmap of caches to flush for cacheflush(2). Must be zero for ARM. +-static constexpr int kCacheFlushFlags = 0x0; +- +-// Number of retry attempts when flushing cache ranges. +-static constexpr size_t kMaxFlushAttempts = 4; +- +-int CacheFlush(uintptr_t start, uintptr_t limit) { +- // The signature of cacheflush(2) seems to vary by source. On ARM the system call wrapper +- // (bionic/SYSCALLS.TXT) has the form: int cacheflush(long start, long end, long flags); +- int r = cacheflush(start, limit, kCacheFlushFlags); +- if (r == -1) { +- CHECK_NE(errno, EINVAL); +- } +- return r; +-} +- +-bool TouchAndFlushCacheLinesWithinPage(uintptr_t start, uintptr_t limit, size_t attempts) { +- CHECK_LT(start, limit); +- CHECK_EQ(RoundDown(start, kPageSize), RoundDown(limit - 1, kPageSize)) << "range spans pages"; +- // Declare a volatile variable so the compiler does not elide reads from the page being touched. +- volatile uint8_t v = 0; +- for (size_t i = 0; i < attempts; ++i) { +- // Touch page to maximize chance page is resident. +- v = *reinterpret_cast(start); +- +- if (LIKELY(CacheFlush(start, limit) == 0)) { +- return true; +- } +- } +- return false; +-} +- +-} // namespace +- +-bool FlushCpuCaches(void* begin, void* end) { +- // This method is specialized for ARM as the generic implementation below uses the +- // __builtin___clear_cache() intrinsic which is declared as void. On ARMv7 flushing the CPU +- // caches is a privileged operation. The Linux kernel allows these operations to fail when they +- // trigger a fault (e.g. page not resident). We use a wrapper for the ARM specific cacheflush() +- // system call to detect the failure and potential erroneous state of the data and instruction +- // caches. +- // +- // The Android bug for this is b/132205399 and there's a similar discussion on +- // https://reviews.llvm.org/D37788. This is primarily an issue for the dual view JIT where the +- // pages where code is executed are only ever RX and never RWX. When attempting to invalidate +- // instruction cache lines in the RX mapping after writing fresh code in the RW mapping, the +- // page may not be resident (due to memory pressure), and this means that a fault is raised in +- // the midst of a cacheflush() call and the instruction cache lines are not invalidated and so +- // have stale code. +- // +- // Other architectures fair better for reasons such as: +- // +- // (1) stronger coherence between the data and instruction caches. +- // +- // (2) fault handling that allows flushing/invalidation to continue after +- // a missing page has been faulted in. +- +- // In the common case, this flush of the complete range succeeds. +- uintptr_t start = reinterpret_cast(begin); +- const uintptr_t limit = reinterpret_cast(end); +- if (LIKELY(CacheFlush(start, limit) == 0)) { +- return true; +- } +- +- // A rare failure has occurred implying that part of the range (begin, end] has been swapped +- // out. Retry flushing but this time grouping cache-line flushes on individual pages and +- // touching each page before flushing. +- uintptr_t next_page = RoundUp(start + 1, kPageSize); +- while (start < limit) { +- uintptr_t boundary = std::min(next_page, limit); +- if (!TouchAndFlushCacheLinesWithinPage(start, boundary, kMaxFlushAttempts)) { +- return false; +- } +- start = boundary; +- next_page += kPageSize; +- } +- return true; +-} +- +-#else +- +-bool FlushCpuCaches(void* begin, void* end) { +- __builtin___clear_cache(reinterpret_cast(begin), reinterpret_cast(end)); +- return true; +-} +- +-#endif +- + pid_t GetTid() { + #if defined(__APPLE__) + uint64_t owner; +diff -ur art_aosp/libartbase/base/utils.h art/libartbase/base/utils.h +--- art_aosp/libartbase/base/utils.h 2021-01-29 11:00:02.198266452 +0800 ++++ art/libartbase/base/utils.h 2021-01-29 10:34:50.016986900 +0800 +@@ -113,8 +113,15 @@ + // Sleep forever and never come back. + NO_RETURN void SleepForever(); + +-// Flush CPU caches. Returns true on success, false if flush failed. +-WARN_UNUSED bool FlushCpuCaches(void* begin, void* end); ++inline void FlushDataCache(void* begin, void* end) { ++ __builtin___clear_cache(reinterpret_cast(begin), reinterpret_cast(end)); ++} ++ ++inline void FlushInstructionCache(void* begin, void* end) { ++ // Same as FlushInstructionCache for lack of other builtin. __builtin___clear_cache ++ // flushes both caches. ++ __builtin___clear_cache(reinterpret_cast(begin), reinterpret_cast(end)); ++} + + template + constexpr PointerSize ConvertToPointerSize(T any) { +diff -ur art_aosp/libartpalette/system/palette_fake.cc art/libartpalette/system/palette_fake.cc +--- art_aosp/libartpalette/system/palette_fake.cc 2021-01-29 11:01:59.615297526 +0800 ++++ art/libartpalette/system/palette_fake.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -21,7 +21,7 @@ + + #include + #include // For ATTRIBUTE_UNUSED +- ++#include "string_view_format.h" + #include "palette_system.h" + + enum PaletteStatus PaletteGetVersion(int32_t* version) { +@@ -54,7 +54,7 @@ + } + + enum PaletteStatus PaletteWriteCrashThreadStacks(/*in*/ const char* stacks, size_t stacks_len) { +- LOG(INFO) << std::string_view(stacks, stacks_len); ++ LOG(INFO) << StringView(stacks, stacks_len); + return PaletteStatus::kOkay; + } + +diff -ur art_aosp/libdexfile/dex/compact_dex_file.cc art/libdexfile/dex/compact_dex_file.cc +--- art_aosp/libdexfile/dex/compact_dex_file.cc 2021-01-29 11:02:31.435577410 +0800 ++++ art/libdexfile/dex/compact_dex_file.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -19,6 +19,8 @@ + #include "base/leb128.h" + #include "code_item_accessors-inl.h" + #include "dex_file-inl.h" ++#include ++#include + + namespace art { + +diff -ur art_aosp/libdexfile/dex/compact_offset_table.h art/libdexfile/dex/compact_offset_table.h +--- art_aosp/libdexfile/dex/compact_offset_table.h 2021-01-29 11:02:32.119583426 +0800 ++++ art/libdexfile/dex/compact_offset_table.h 2021-01-29 10:34:50.016986900 +0800 +@@ -28,7 +28,7 @@ + public: + // This value is coupled with the leb chunk bitmask. That logic must also be adjusted when the + // integer is modified. +- static constexpr size_t kElementsPerIndex = 16; ++ static constexpr std::size_t kElementsPerIndex = 16; + + // Leb block format: + // [uint16_t] 16 bit mask for what indexes actually have a non zero offset for the chunk. +@@ -61,7 +61,7 @@ + uint32_t* out_table_offset); + + // 32 bit aligned for the offset table. +- static constexpr size_t kAlignment = sizeof(uint32_t); ++ static constexpr std::size_t kAlignment = sizeof(uint32_t); + }; + + } // namespace art +diff -ur art_aosp/libdexfile/dex/descriptors_names.cc art/libdexfile/dex/descriptors_names.cc +--- art_aosp/libdexfile/dex/descriptors_names.cc 2021-01-29 11:02:30.463568859 +0800 ++++ art/libdexfile/dex/descriptors_names.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -21,6 +21,8 @@ + + #include "base/macros.h" + #include "dex/utf-inl.h" ++#include ++#include + + namespace art { + +diff -ur art_aosp/libdexfile/dex/dex_file.cc art/libdexfile/dex/dex_file.cc +--- art_aosp/libdexfile/dex/dex_file.cc 2021-01-29 11:02:29.831563300 +0800 ++++ art/libdexfile/dex/dex_file.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -400,7 +400,7 @@ + } + + // Given a signature place the type ids into the given vector +-bool DexFile::CreateTypeList(std::string_view signature, ++bool DexFile::CreateTypeList(StringView signature, + dex::TypeIndex* return_type_idx, + std::vector* param_type_idxs) const { + if (signature[0] != '(') { +diff -ur art_aosp/libdexfile/dex/dex_file.h art/libdexfile/dex/dex_file.h +--- art_aosp/libdexfile/dex/dex_file.h 2021-01-29 11:02:29.947564321 +0800 ++++ art/libdexfile/dex/dex_file.h 2021-01-29 10:34:50.016986900 +0800 +@@ -19,7 +19,7 @@ + + #include + #include +-#include ++#include "string_view_format.h" + #include + + #include +@@ -262,7 +262,7 @@ + const char* StringDataAndUtf16LengthByIdx(dex::StringIndex idx, uint32_t* utf16_length) const; + + const char* StringDataByIdx(dex::StringIndex idx) const; +- std::string_view StringViewByIdx(dex::StringIndex idx) const; ++ StringView StringViewByIdx(dex::StringIndex idx) const; + + // Looks up a string id for a given modified utf8 string. + const dex::StringId* FindStringId(const char* string) const; +@@ -480,7 +480,7 @@ + } + + // Given a signature place the type ids into the given vector, returns true on success +- bool CreateTypeList(std::string_view signature, ++ bool CreateTypeList(StringView signature, + dex::TypeIndex* return_type_idx, + std::vector* param_type_idxs) const; + +diff -ur art_aosp/libdexfile/dex/dex_file-inl.h art/libdexfile/dex/dex_file-inl.h +--- art_aosp/libdexfile/dex/dex_file-inl.h 2021-01-29 11:02:30.975573364 +0800 ++++ art/libdexfile/dex/dex_file-inl.h 2021-01-29 10:34:50.016986900 +0800 +@@ -28,14 +28,15 @@ + #include "dex_instruction_iterator.h" + #include "invoke_type.h" + #include "standard_dex_file.h" ++#include "string_view_format.h" + + namespace art { + +-inline std::string_view StringViewFromUtf16Length(const char* utf8_data, size_t utf16_length) { ++inline StringView StringViewFromUtf16Length(const char* utf8_data, size_t utf16_length) { + size_t utf8_length = LIKELY(utf8_data[utf16_length] == 0) // Is ASCII? + ? utf16_length + : utf16_length + strlen(utf8_data + utf16_length); +- return std::string_view(utf8_data, utf8_length); ++ return StringView(utf8_data, utf8_length); + } + + inline int32_t DexFile::GetStringLength(const dex::StringId& string_id) const { +@@ -71,10 +72,10 @@ + return StringDataAndUtf16LengthByIdx(idx, &unicode_length); + } + +-inline std::string_view DexFile::StringViewByIdx(dex::StringIndex idx) const { ++inline StringView DexFile::StringViewByIdx(dex::StringIndex idx) const { + uint32_t unicode_length; + const char* data = StringDataAndUtf16LengthByIdx(idx, &unicode_length); +- return data != nullptr ? StringViewFromUtf16Length(data, unicode_length) : std::string_view(""); ++ return data != nullptr ? StringViewFromUtf16Length(data, unicode_length) : StringView(""); + } + + inline const char* DexFile::StringByTypeIdx(dex::TypeIndex idx, uint32_t* unicode_length) const { +diff -ur art_aosp/libdexfile/dex/dex_file_loader.h art/libdexfile/dex/dex_file_loader.h +--- art_aosp/libdexfile/dex/dex_file_loader.h 2021-01-29 11:02:31.547578394 +0800 ++++ art/libdexfile/dex/dex_file_loader.h 2021-01-29 10:34:50.016986900 +0800 +@@ -19,6 +19,7 @@ + + #include + #include ++#include + #include + #include + +diff -ur art_aosp/libdexfile/dex/dex_file_verifier.cc art/libdexfile/dex/dex_file_verifier.cc +--- art_aosp/libdexfile/dex/dex_file_verifier.cc 2021-01-29 11:02:29.603561296 +0800 ++++ art/libdexfile/dex/dex_file_verifier.cc 2021-01-29 10:34:50.016986900 +0800 +@@ -17,7 +17,7 @@ + #include "dex_file_verifier.h" + + #include +- ++#include + #include + + #include "android-base/stringprintf.h" +diff -ur art_aosp/libdexfile/dex/signature.cc art/libdexfile/dex/signature.cc +--- art_aosp/libdexfile/dex/signature.cc 2021-01-29 11:02:32.579587471 +0800 ++++ art/libdexfile/dex/signature.cc 2021-01-29 10:34:50.020986935 +0800 +@@ -57,11 +57,11 @@ + return strcmp(return_type, "V") == 0; + } + +-bool Signature::operator==(std::string_view rhs) const { ++bool Signature::operator==(StringView rhs) const { + if (dex_file_ == nullptr) { + return false; + } +- std::string_view tail(rhs); ++ StringView tail(rhs); + if (!StartsWith(tail, "(")) { + return false; // Invalid signature + } +@@ -69,7 +69,7 @@ + const TypeList* params = dex_file_->GetProtoParameters(*proto_id_); + if (params != nullptr) { + for (uint32_t i = 0; i < params->Size(); ++i) { +- std::string_view param(dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_)); ++ StringView param(dex_file_->StringByTypeIdx(params->GetTypeItem(i).type_idx_)); + if (!StartsWith(tail, param)) { + return false; + } +diff -ur art_aosp/libdexfile/dex/signature.h art/libdexfile/dex/signature.h +--- art_aosp/libdexfile/dex/signature.h 2021-01-29 11:02:30.347567837 +0800 ++++ art/libdexfile/dex/signature.h 2021-01-29 10:34:50.020986935 +0800 +@@ -19,7 +19,7 @@ + + #include + #include +-#include ++#include "string_view_format.h" + + #include + +@@ -49,7 +49,7 @@ + return !(*this == rhs); + } + +- bool operator==(std::string_view rhs) const; ++ bool operator==(StringView rhs) const; + + private: + Signature(const DexFile* dex, const dex::ProtoId& proto) : dex_file_(dex), proto_id_(&proto) { +diff -ur art_aosp/libdexfile/dex/signature-inl.h art/libdexfile/dex/signature-inl.h +--- art_aosp/libdexfile/dex/signature-inl.h 2021-01-29 11:02:32.063582932 +0800 ++++ art/libdexfile/dex/signature-inl.h 2021-01-29 10:34:50.020986935 +0800 +@@ -36,13 +36,13 @@ + uint32_t lhs_shorty_len; // For a shorty utf16 length == mutf8 length. + const char* lhs_shorty_data = dex_file_->StringDataAndUtf16LengthByIdx(proto_id_->shorty_idx_, + &lhs_shorty_len); +- std::string_view lhs_shorty(lhs_shorty_data, lhs_shorty_len); ++ StringView lhs_shorty(lhs_shorty_data, lhs_shorty_len); + { + uint32_t rhs_shorty_len; + const char* rhs_shorty_data = + rhs.dex_file_->StringDataAndUtf16LengthByIdx(rhs.proto_id_->shorty_idx_, + &rhs_shorty_len); +- std::string_view rhs_shorty(rhs_shorty_data, rhs_shorty_len); ++ StringView rhs_shorty(rhs_shorty_data, rhs_shorty_len); + if (lhs_shorty != rhs_shorty) { + return false; // Shorty mismatch. + } +@@ -56,7 +56,7 @@ + return false; // Return type mismatch. + } + } +- if (lhs_shorty.find('L', 1) != std::string_view::npos) { ++ if (lhs_shorty.find('L', 1) != StringView::npos) { + const dex::TypeList* params = dex_file_->GetProtoParameters(*proto_id_); + const dex::TypeList* rhs_params = rhs.dex_file_->GetProtoParameters(*rhs.proto_id_); + // We found a reference parameter in the matching shorty, so both lists must be non-empty. +diff -ur art_aosp/libdexfile/dex/standard_dex_file.cc art/libdexfile/dex/standard_dex_file.cc +--- art_aosp/libdexfile/dex/standard_dex_file.cc 2021-01-29 11:02:31.491577900 +0800 ++++ art/libdexfile/dex/standard_dex_file.cc 2021-01-29 10:34:50.020986935 +0800 +@@ -20,6 +20,8 @@ + #include "base/leb128.h" + #include "code_item_accessors-inl.h" + #include "dex_file-inl.h" ++#include ++#include + + namespace art { + +diff -ur art_aosp/libdexfile/external/include/art_api/dex_file_support.h art/libdexfile/external/include/art_api/dex_file_support.h +--- art_aosp/libdexfile/external/include/art_api/dex_file_support.h 2021-01-29 11:02:33.379594509 +0800 ++++ art/libdexfile/external/include/art_api/dex_file_support.h 2021-01-29 10:34:50.020986935 +0800 +@@ -22,7 +22,7 @@ + #include + #include + #include +-#include ++#include "string_view_format.h" + #include + #include + +@@ -46,7 +46,7 @@ + } + explicit DexString(const char* str = "") + : ext_string_(MakeExtDexFileString(str, std::strlen(str))) {} +- explicit DexString(std::string_view str) ++ explicit DexString(StringView str) + : ext_string_(MakeExtDexFileString(str.data(), str.size())) {} + ~DexString() { g_ExtDexFileFreeString(ext_string_); } + +@@ -68,10 +68,10 @@ + } + size_t length() const { return size(); } + +- operator std::string_view() const { ++ operator StringView() const { + size_t len; + const char* chars = g_ExtDexFileGetString(ext_string_, &len); +- return std::string_view(chars, len); ++ return StringView(chars, len); + } + + private: diff --git a/build/third_party/llvm_001.patch b/build/third_party/llvm_001.patch new file mode 100644 index 0000000000000000000000000000000000000000..d09e4adbb13564ddd32431836209f7e8241a190c --- /dev/null +++ b/build/third_party/llvm_001.patch @@ -0,0 +1,659 @@ +diff -ur llvm/include/llvm/BinaryFormat/Dwarf.def llvm_mod/include/llvm/BinaryFormat/Dwarf.def +--- llvm/include/llvm/BinaryFormat/Dwarf.def 2021-08-05 10:44:22.675885701 +0800 ++++ llvm_mod/include/llvm/BinaryFormat/Dwarf.def 2021-08-05 10:53:17.888077533 +0800 +@@ -17,7 +17,7 @@ + defined HANDLE_DW_VIRTUALITY || defined HANDLE_DW_DEFAULTED || \ + defined HANDLE_DW_CC || defined HANDLE_DW_LNS || defined HANDLE_DW_LNE || \ + defined HANDLE_DW_LNCT || defined HANDLE_DW_MACRO || \ +- defined HANDLE_DW_MACRO_GNU || defined HANDLE_MACRO_FLAG || \ ++ defined HANDLE_MACRO_FLAG || \ + defined HANDLE_DW_RLE || defined HANDLE_DW_LLE || \ + (defined HANDLE_DW_CFA && defined HANDLE_DW_CFA_PRED) || \ + defined HANDLE_DW_APPLE_PROPERTY || defined HANDLE_DW_UT || \ +@@ -88,10 +88,6 @@ + #define HANDLE_DW_MACRO(ID, NAME) + #endif + +-#ifndef HANDLE_DW_MACRO_GNU +-#define HANDLE_DW_MACRO_GNU(ID, NAME) +-#endif +- + #ifndef HANDLE_MACRO_FLAG + #define HANDLE_MACRO_FLAG(ID, NAME) + #endif +@@ -841,18 +837,6 @@ + HANDLE_DW_MACRO(0x0b, define_strx) + HANDLE_DW_MACRO(0x0c, undef_strx) + +-// GNU .debug_macro extension. +-HANDLE_DW_MACRO_GNU(0x01, define) +-HANDLE_DW_MACRO_GNU(0x02, undef) +-HANDLE_DW_MACRO_GNU(0x03, start_file) +-HANDLE_DW_MACRO_GNU(0x04, end_file) +-HANDLE_DW_MACRO_GNU(0x05, define_indirect) +-HANDLE_DW_MACRO_GNU(0x06, undef_indirect) +-HANDLE_DW_MACRO_GNU(0x07, transparent_include) +-HANDLE_DW_MACRO_GNU(0x08, define_indirect_alt) +-HANDLE_DW_MACRO_GNU(0x09, undef_indirect_alt) +-HANDLE_DW_MACRO_GNU(0x0a, transparent_include_alt) +- + // DWARF v5 Macro header flags. + HANDLE_MACRO_FLAG(0x01, OFFSET_SIZE) + HANDLE_MACRO_FLAG(0x02, DEBUG_LINE_OFFSET) +@@ -909,7 +893,7 @@ + HANDLE_DW_CFA(0x16, val_expression) + // Vendor extensions: + HANDLE_DW_CFA_PRED(0x1d, MIPS_advance_loc8, SELECT_MIPS64) +-HANDLE_DW_CFA_PRED(0x2d, GNU_window_save, SELECT_SPARC) ++//HANDLE_DW_CFA_PRED(0x2d, GNU_window_save, SELECT_SPARC) + HANDLE_DW_CFA_PRED(0x2d, AARCH64_negate_ra_state, SELECT_AARCH64) + HANDLE_DW_CFA_PRED(0x2e, GNU_args_size, SELECT_X86) + +@@ -1002,7 +986,6 @@ + #undef HANDLE_DW_LNE + #undef HANDLE_DW_LNCT + #undef HANDLE_DW_MACRO +-#undef HANDLE_DW_MACRO_GNU + #undef HANDLE_MACRO_FLAG + #undef HANDLE_DW_RLE + #undef HANDLE_DW_LLE +diff -ur llvm/include/llvm/BinaryFormat/Dwarf.h llvm_mod/include/llvm/BinaryFormat/Dwarf.h +--- llvm/include/llvm/BinaryFormat/Dwarf.h 2021-08-05 10:44:22.675885701 +0800 ++++ llvm_mod/include/llvm/BinaryFormat/Dwarf.h 2021-08-05 10:53:21.308129313 +0800 +@@ -1,4 +1,4 @@ +-//===-- llvm/BinaryFormat/Dwarf.h ---Dwarf Constants-------------*- C++ -*-===// ++//===-- Dwarf.h ---Dwarf Constants-------------*- C++ -*-===// + // + // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. + // See https://llvm.org/LICENSE.txt for license information. +@@ -19,21 +19,6 @@ + #ifndef LLVM_BINARYFORMAT_DWARF_H + #define LLVM_BINARYFORMAT_DWARF_H + +-#include "llvm/ADT/Optional.h" +-#include "llvm/Support/Compiler.h" +-#include "llvm/Support/DataTypes.h" +-#include "llvm/Support/ErrorHandling.h" +-#include "llvm/Support/Format.h" +-#include "llvm/Support/FormatVariadicDetails.h" +-#include "llvm/ADT/Triple.h" +- +-#include +- +-namespace llvm { +-class StringRef; +- +-namespace dwarf { +- + //===----------------------------------------------------------------------===// + // DWARF constants as gleaned from the DWARF Debugging Information Format V.5 + // reference manual http://www.dwarfstd.org/. +@@ -43,7 +28,7 @@ + // enumeration base type. + + enum LLVMConstants : uint32_t { +- // LLVM mock tags (see also llvm/BinaryFormat/Dwarf.def). ++ // LLVM mock tags (see also Dwarf.def). + DW_TAG_invalid = ~0U, // Tag for invalid results. + DW_VIRTUALITY_invalid = ~0U, // Virtuality for invalid results. + DW_MACINFO_invalid = ~0U, // Macinfo type for invalid results. +@@ -84,7 +69,7 @@ + + enum Tag : uint16_t { + #define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR, KIND) DW_TAG_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_TAG_lo_user = 0x4080, + DW_TAG_hi_user = 0xffff, + DW_TAG_user_base = 0x1000 ///< Recommended base for user tags. +@@ -97,39 +82,38 @@ + #define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR, KIND) \ + case DW_TAG_##NAME: \ + return (KIND == DW_KIND_TYPE); +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + } + } + + /// Attributes. + enum Attribute : uint16_t { + #define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) DW_AT_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_AT_lo_user = 0x2000, + DW_AT_hi_user = 0x3fff, + }; + + enum Form : uint16_t { + #define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) DW_FORM_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_FORM_lo_user = 0x1f00, ///< Not specified by DWARF. + }; + + enum LocationAtom { + #define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) DW_OP_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_OP_lo_user = 0xe0, + DW_OP_hi_user = 0xff, +- DW_OP_LLVM_fragment = 0x1000, ///< Only used in LLVM metadata. +- DW_OP_LLVM_convert = 0x1001, ///< Only used in LLVM metadata. +- DW_OP_LLVM_tag_offset = 0x1002, ///< Only used in LLVM metadata. +- DW_OP_LLVM_entry_value = 0x1003, ///< Only used in LLVM metadata. +- DW_OP_LLVM_implicit_pointer = 0x1004, ///< Only used in LLVM metadata. ++ DW_OP_LLVM_fragment = 0x1000, ///< Only used in LLVM metadata. ++ DW_OP_LLVM_convert = 0x1001, ///< Only used in LLVM metadata. ++ DW_OP_LLVM_tag_offset = 0x1002, ///< Only used in LLVM metadata. ++ DW_OP_LLVM_entry_value = 0x1003, ///< Only used in LLVM metadata. + }; + + enum TypeKind : uint8_t { + #define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) DW_ATE_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_ATE_lo_user = 0x80, + DW_ATE_hi_user = 0xff + }; +@@ -146,7 +130,7 @@ + enum EndianityEncoding { + // Endianity attribute values + #define HANDLE_DW_END(ID, NAME) DW_END_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_END_lo_user = 0x40, + DW_END_hi_user = 0xff + }; +@@ -167,26 +151,25 @@ + + enum VirtualityAttribute { + #define HANDLE_DW_VIRTUALITY(ID, NAME) DW_VIRTUALITY_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_VIRTUALITY_max = 0x02 + }; + + enum DefaultedMemberAttribute { + #define HANDLE_DW_DEFAULTED(ID, NAME) DW_DEFAULTED_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_DEFAULTED_max = 0x02 + }; + + enum SourceLanguage { + #define HANDLE_DW_LANG(ID, NAME, LOWER_BOUND, VERSION, VENDOR) \ + DW_LANG_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_LANG_lo_user = 0x8000, + DW_LANG_hi_user = 0xffff + }; + + inline bool isCPlusPlus(SourceLanguage S) { +- bool result = false; + // Deliberately enumerate all the language options so we get a warning when + // new language options are added (-Wswitch) that'll hopefully help keep this + // switch up-to-date when new C++ versions are added. +@@ -195,8 +178,7 @@ + case DW_LANG_C_plus_plus_03: + case DW_LANG_C_plus_plus_11: + case DW_LANG_C_plus_plus_14: +- result = true; +- break; ++ return true; + case DW_LANG_C89: + case DW_LANG_C: + case DW_LANG_Ada83: +@@ -235,68 +217,8 @@ + case DW_LANG_BORLAND_Delphi: + case DW_LANG_lo_user: + case DW_LANG_hi_user: +- result = false; +- break; +- } +- +- return result; +-} +- +-inline bool isFortran(SourceLanguage S) { +- bool result = false; +- // Deliberately enumerate all the language options so we get a warning when +- // new language options are added (-Wswitch) that'll hopefully help keep this +- // switch up-to-date when new Fortran versions are added. +- switch (S) { +- case DW_LANG_Fortran77: +- case DW_LANG_Fortran90: +- case DW_LANG_Fortran95: +- case DW_LANG_Fortran03: +- case DW_LANG_Fortran08: +- result = true; +- break; +- case DW_LANG_C89: +- case DW_LANG_C: +- case DW_LANG_Ada83: +- case DW_LANG_C_plus_plus: +- case DW_LANG_Cobol74: +- case DW_LANG_Cobol85: +- case DW_LANG_Pascal83: +- case DW_LANG_Modula2: +- case DW_LANG_Java: +- case DW_LANG_C99: +- case DW_LANG_Ada95: +- case DW_LANG_PLI: +- case DW_LANG_ObjC: +- case DW_LANG_ObjC_plus_plus: +- case DW_LANG_UPC: +- case DW_LANG_D: +- case DW_LANG_Python: +- case DW_LANG_OpenCL: +- case DW_LANG_Go: +- case DW_LANG_Modula3: +- case DW_LANG_Haskell: +- case DW_LANG_C_plus_plus_03: +- case DW_LANG_C_plus_plus_11: +- case DW_LANG_OCaml: +- case DW_LANG_Rust: +- case DW_LANG_C11: +- case DW_LANG_Swift: +- case DW_LANG_Julia: +- case DW_LANG_Dylan: +- case DW_LANG_C_plus_plus_14: +- case DW_LANG_RenderScript: +- case DW_LANG_BLISS: +- case DW_LANG_Mips_Assembler: +- case DW_LANG_GOOGLE_RenderScript: +- case DW_LANG_BORLAND_Delphi: +- case DW_LANG_lo_user: +- case DW_LANG_hi_user: +- result = false; +- break; ++ return false; + } +- +- return result; + } + + enum CaseSensitivity { +@@ -310,7 +232,7 @@ + enum CallingConvention { + // Calling convention codes + #define HANDLE_DW_CC(ID, NAME) DW_CC_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_CC_lo_user = 0x40, + DW_CC_hi_user = 0xff + }; +@@ -338,20 +260,20 @@ + /// Line Number Standard Opcode Encodings. + enum LineNumberOps : uint8_t { + #define HANDLE_DW_LNS(ID, NAME) DW_LNS_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + }; + + /// Line Number Extended Opcode Encodings. + enum LineNumberExtendedOps { + #define HANDLE_DW_LNE(ID, NAME) DW_LNE_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_LNE_lo_user = 0x80, + DW_LNE_hi_user = 0xff + }; + + enum LineNumberEntryFormat { + #define HANDLE_DW_LNCT(ID, NAME) DW_LNCT_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_LNCT_lo_user = 0x2000, + DW_LNCT_hi_user = 0x3fff, + }; +@@ -368,36 +290,28 @@ + /// DWARF v5 macro information entry type encodings. + enum MacroEntryType { + #define HANDLE_DW_MACRO(ID, NAME) DW_MACRO_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_MACRO_lo_user = 0xe0, + DW_MACRO_hi_user = 0xff + }; + +-/// GNU .debug_macro macro information entry type encodings. +-enum GnuMacroEntryType { +-#define HANDLE_DW_MACRO_GNU(ID, NAME) DW_MACRO_GNU_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" +- DW_MACRO_GNU_lo_user = 0xe0, +- DW_MACRO_GNU_hi_user = 0xff +-}; +- + /// DWARF v5 range list entry encoding values. + enum RnglistEntries { + #define HANDLE_DW_RLE(ID, NAME) DW_RLE_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + }; + + /// DWARF v5 loc list entry encoding values. + enum LoclistEntries { + #define HANDLE_DW_LLE(ID, NAME) DW_LLE_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + }; + + /// Call frame instruction encodings. + enum CallFrameInfo { + #define HANDLE_DW_CFA(ID, NAME) DW_CFA_##NAME = ID, + #define HANDLE_DW_CFA_PRED(ID, NAME, ARCH) DW_CFA_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_CFA_extended = 0x00, + + DW_CFA_lo_user = 0x1c, +@@ -433,20 +347,20 @@ + /// ObjCPropertyAttribute::Kind! + enum ApplePropertyAttributes { + #define HANDLE_DW_APPLE_PROPERTY(ID, NAME) DW_APPLE_PROPERTY_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + }; + + /// Constants for unit types in DWARF v5. + enum UnitType : unsigned char { + #define HANDLE_DW_UT(ID, NAME) DW_UT_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_UT_lo_user = 0x80, + DW_UT_hi_user = 0xff + }; + + enum Index { + #define HANDLE_DW_IDX(ID, NAME) DW_IDX_##NAME = ID, +-#include "llvm/BinaryFormat/Dwarf.def" ++#include "Dwarf.def" + DW_IDX_lo_user = 0x2000, + DW_IDX_hi_user = 0x3fff + }; +@@ -465,7 +379,7 @@ + } + } + +-inline bool isUnitType(dwarf::Tag T) { ++inline bool isUnitType(Tag T) { + switch (T) { + case DW_TAG_compile_unit: + case DW_TAG_type_unit: +@@ -502,275 +416,6 @@ + DW_hash_function_djb = 0u + }; + +-// Constants for the GNU pubnames/pubtypes extensions supporting gdb index. +-enum GDBIndexEntryKind { +- GIEK_NONE, +- GIEK_TYPE, +- GIEK_VARIABLE, +- GIEK_FUNCTION, +- GIEK_OTHER, +- GIEK_UNUSED5, +- GIEK_UNUSED6, +- GIEK_UNUSED7 +-}; +- + enum GDBIndexEntryLinkage { GIEL_EXTERNAL, GIEL_STATIC }; + +-/// \defgroup DwarfConstantsDumping Dwarf constants dumping functions +-/// +-/// All these functions map their argument's value back to the +-/// corresponding enumerator name or return an empty StringRef if the value +-/// isn't known. +-/// +-/// @{ +-StringRef TagString(unsigned Tag); +-StringRef ChildrenString(unsigned Children); +-StringRef AttributeString(unsigned Attribute); +-StringRef FormEncodingString(unsigned Encoding); +-StringRef OperationEncodingString(unsigned Encoding); +-StringRef AttributeEncodingString(unsigned Encoding); +-StringRef DecimalSignString(unsigned Sign); +-StringRef EndianityString(unsigned Endian); +-StringRef AccessibilityString(unsigned Access); +-StringRef DefaultedMemberString(unsigned DefaultedEncodings); +-StringRef VisibilityString(unsigned Visibility); +-StringRef VirtualityString(unsigned Virtuality); +-StringRef LanguageString(unsigned Language); +-StringRef CaseString(unsigned Case); +-StringRef ConventionString(unsigned Convention); +-StringRef InlineCodeString(unsigned Code); +-StringRef ArrayOrderString(unsigned Order); +-StringRef LNStandardString(unsigned Standard); +-StringRef LNExtendedString(unsigned Encoding); +-StringRef MacinfoString(unsigned Encoding); +-StringRef MacroString(unsigned Encoding); +-StringRef GnuMacroString(unsigned Encoding); +-StringRef RangeListEncodingString(unsigned Encoding); +-StringRef LocListEncodingString(unsigned Encoding); +-StringRef CallFrameString(unsigned Encoding, Triple::ArchType Arch); +-StringRef ApplePropertyString(unsigned); +-StringRef UnitTypeString(unsigned); +-StringRef AtomTypeString(unsigned Atom); +-StringRef GDBIndexEntryKindString(GDBIndexEntryKind Kind); +-StringRef GDBIndexEntryLinkageString(GDBIndexEntryLinkage Linkage); +-StringRef IndexString(unsigned Idx); +-StringRef FormatString(DwarfFormat Format); +-StringRef FormatString(bool IsDWARF64); +-StringRef RLEString(unsigned RLE); +-/// @} +- +-/// \defgroup DwarfConstantsParsing Dwarf constants parsing functions +-/// +-/// These functions map their strings back to the corresponding enumeration +-/// value or return 0 if there is none, except for these exceptions: +-/// +-/// \li \a getTag() returns \a DW_TAG_invalid on invalid input. +-/// \li \a getVirtuality() returns \a DW_VIRTUALITY_invalid on invalid input. +-/// \li \a getMacinfo() returns \a DW_MACINFO_invalid on invalid input. +-/// +-/// @{ +-unsigned getTag(StringRef TagString); +-unsigned getOperationEncoding(StringRef OperationEncodingString); +-unsigned getVirtuality(StringRef VirtualityString); +-unsigned getLanguage(StringRef LanguageString); +-unsigned getCallingConvention(StringRef LanguageString); +-unsigned getAttributeEncoding(StringRef EncodingString); +-unsigned getMacinfo(StringRef MacinfoString); +-unsigned getMacro(StringRef MacroString); +-/// @} +- +-/// \defgroup DwarfConstantsVersioning Dwarf version for constants +-/// +-/// For constants defined by DWARF, returns the DWARF version when the constant +-/// was first defined. For vendor extensions, if there is a version-related +-/// policy for when to emit it, returns a version number for that policy. +-/// Otherwise returns 0. +-/// +-/// @{ +-unsigned TagVersion(Tag T); +-unsigned AttributeVersion(Attribute A); +-unsigned FormVersion(Form F); +-unsigned OperationVersion(LocationAtom O); +-unsigned AttributeEncodingVersion(TypeKind E); +-unsigned LanguageVersion(SourceLanguage L); +-/// @} +- +-/// \defgroup DwarfConstantsVendor Dwarf "vendor" for constants +-/// +-/// These functions return an identifier describing "who" defined the constant, +-/// either the DWARF standard itself or the vendor who defined the extension. +-/// +-/// @{ +-unsigned TagVendor(Tag T); +-unsigned AttributeVendor(Attribute A); +-unsigned FormVendor(Form F); +-unsigned OperationVendor(LocationAtom O); +-unsigned AttributeEncodingVendor(TypeKind E); +-unsigned LanguageVendor(SourceLanguage L); +-/// @} +- +-Optional LanguageLowerBound(SourceLanguage L); +- +-/// The size of a reference determined by the DWARF 32/64-bit format. +-inline uint8_t getDwarfOffsetByteSize(DwarfFormat Format) { +- switch (Format) { +- case DwarfFormat::DWARF32: +- return 4; +- case DwarfFormat::DWARF64: +- return 8; +- } +- llvm_unreachable("Invalid Format value"); +-} +- +-/// A helper struct providing information about the byte size of DW_FORM +-/// values that vary in size depending on the DWARF version, address byte +-/// size, or DWARF32/DWARF64. +-struct FormParams { +- uint16_t Version; +- uint8_t AddrSize; +- DwarfFormat Format; +- +- /// The definition of the size of form DW_FORM_ref_addr depends on the +- /// version. In DWARF v2 it's the size of an address; after that, it's the +- /// size of a reference. +- uint8_t getRefAddrByteSize() const { +- if (Version == 2) +- return AddrSize; +- return getDwarfOffsetByteSize(); +- } +- +- /// The size of a reference is determined by the DWARF 32/64-bit format. +- uint8_t getDwarfOffsetByteSize() const { +- return dwarf::getDwarfOffsetByteSize(Format); +- } +- +- explicit operator bool() const { return Version && AddrSize; } +-}; +- +-/// Get the byte size of the unit length field depending on the DWARF format. +-inline uint8_t getUnitLengthFieldByteSize(DwarfFormat Format) { +- switch (Format) { +- case DwarfFormat::DWARF32: +- return 4; +- case DwarfFormat::DWARF64: +- return 12; +- } +- llvm_unreachable("Invalid Format value"); +-} +- +-/// Get the fixed byte size for a given form. +-/// +-/// If the form has a fixed byte size, then an Optional with a value will be +-/// returned. If the form is always encoded using a variable length storage +-/// format (ULEB or SLEB numbers or blocks) then None will be returned. +-/// +-/// \param Form DWARF form to get the fixed byte size for. +-/// \param Params DWARF parameters to help interpret forms. +-/// \returns Optional value with the fixed byte size or None if +-/// \p Form doesn't have a fixed byte size. +-Optional getFixedFormByteSize(dwarf::Form Form, FormParams Params); +- +-/// Tells whether the specified form is defined in the specified version, +-/// or is an extension if extensions are allowed. +-bool isValidFormForVersion(Form F, unsigned Version, bool ExtensionsOk = true); +- +-/// Returns the symbolic string representing Val when used as a value +-/// for attribute Attr. +-StringRef AttributeValueString(uint16_t Attr, unsigned Val); +- +-/// Returns the symbolic string representing Val when used as a value +-/// for atom Atom. +-StringRef AtomValueString(uint16_t Atom, unsigned Val); +- +-/// Describes an entry of the various gnu_pub* debug sections. +-/// +-/// The gnu_pub* kind looks like: +-/// +-/// 0-3 reserved +-/// 4-6 symbol kind +-/// 7 0 == global, 1 == static +-/// +-/// A gdb_index descriptor includes the above kind, shifted 24 bits up with the +-/// offset of the cu within the debug_info section stored in those 24 bits. +-struct PubIndexEntryDescriptor { +- GDBIndexEntryKind Kind; +- GDBIndexEntryLinkage Linkage; +- PubIndexEntryDescriptor(GDBIndexEntryKind Kind, GDBIndexEntryLinkage Linkage) +- : Kind(Kind), Linkage(Linkage) {} +- /* implicit */ PubIndexEntryDescriptor(GDBIndexEntryKind Kind) +- : Kind(Kind), Linkage(GIEL_EXTERNAL) {} +- explicit PubIndexEntryDescriptor(uint8_t Value) +- : Kind( +- static_cast((Value & KIND_MASK) >> KIND_OFFSET)), +- Linkage(static_cast((Value & LINKAGE_MASK) >> +- LINKAGE_OFFSET)) {} +- uint8_t toBits() const { +- return Kind << KIND_OFFSET | Linkage << LINKAGE_OFFSET; +- } +- +-private: +- enum { +- KIND_OFFSET = 4, +- KIND_MASK = 7 << KIND_OFFSET, +- LINKAGE_OFFSET = 7, +- LINKAGE_MASK = 1 << LINKAGE_OFFSET +- }; +-}; +- +-template struct EnumTraits : public std::false_type {}; +- +-template <> struct EnumTraits : public std::true_type { +- static constexpr char Type[3] = "AT"; +- static constexpr StringRef (*StringFn)(unsigned) = &AttributeString; +-}; +- +-template <> struct EnumTraits
: public std::true_type { +- static constexpr char Type[5] = "FORM"; +- static constexpr StringRef (*StringFn)(unsigned) = &FormEncodingString; +-}; +- +-template <> struct EnumTraits : public std::true_type { +- static constexpr char Type[4] = "IDX"; +- static constexpr StringRef (*StringFn)(unsigned) = &IndexString; +-}; +- +-template <> struct EnumTraits : public std::true_type { +- static constexpr char Type[4] = "TAG"; +- static constexpr StringRef (*StringFn)(unsigned) = &TagString; +-}; +- +-template <> struct EnumTraits : public std::true_type { +- static constexpr char Type[4] = "LNS"; +- static constexpr StringRef (*StringFn)(unsigned) = &LNStandardString; +-}; +- +-template <> struct EnumTraits : public std::true_type { +- static constexpr char Type[3] = "OP"; +- static constexpr StringRef (*StringFn)(unsigned) = &OperationEncodingString; +-}; +- +-inline uint64_t computeTombstoneAddress(uint8_t AddressByteSize) { +- return std::numeric_limits::max() >> (8 - AddressByteSize) * 8; +-} +- +-} // End of namespace dwarf +- +-/// Dwarf constants format_provider +-/// +-/// Specialization of the format_provider template for dwarf enums. Unlike the +-/// dumping functions above, these format unknown enumerator values as +-/// DW_TYPE_unknown_1234 (e.g. DW_TAG_unknown_ffff). +-template +-struct format_provider::value>> { +- static void format(const Enum &E, raw_ostream &OS, StringRef Style) { +- StringRef Str = dwarf::EnumTraits::StringFn(E); +- if (Str.empty()) { +- OS << "DW_" << dwarf::EnumTraits::Type << "_unknown_" +- << llvm::format("%x", E); +- } else +- OS << Str; +- } +-}; +-} // End of namespace llvm +- + #endif diff --git a/build/third_party/patch.sh b/build/third_party/patch.sh new file mode 100644 index 0000000000000000000000000000000000000000..647ba0f34807bfaa40c99d440c44170c4f04cb95 --- /dev/null +++ b/build/third_party/patch.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +THIRD_PARTY_PATH=$MAPLE_ROOT/third_party +TOOLS_PATH=$MAPLE_ROOT/build/third_party +AOSP_PATH=$THIRD_PARTY_PATH/aosp_10.0.0_r35 +AOSP_GN_PATH=$TOOLS_PATH/aosp_gn +LLVM_PATH=$THIRD_PARTY_PATH/llvm-12.0.0.src +MODIFIED_AOSP_PATH=$THIRD_PARTY_PATH/aosp_modified +MODIFIED_LLVM_PATH=$THIRD_PARTY_PATH/llvm_modified + +function install_patch { + if [ -d $MODIFIED_AOSP_PATH ];then + echo "Already Patched." + exit 0 + fi + + echo "Preparing the build environment..." + + #backup source code + cd $THIRD_PARTY_PATH + cp -rH $AOSP_PATH $MODIFIED_AOSP_PATH + cp -rH $LLVM_PATH $MODIFIED_LLVM_PATH + + #patch + cd $MODIFIED_AOSP_PATH + patch -p0 < $TOOLS_PATH/system_001.patch + patch -p0 < $TOOLS_PATH/art_001.patch + mkdir -p include/ + cp -r ${MAPLE_ROOT}/src/hir2mpl/bytecode_input/dex/include/string_view_format.h include/ + + cd $MODIFIED_LLVM_PATH + patch -p0 < $TOOLS_PATH/llvm_001.patch + + #add third_party gn + cp -f $AOSP_GN_PATH/art/libdexfile/BUILD.gn $MODIFIED_AOSP_PATH/art/libdexfile/ + cp -f $AOSP_GN_PATH/system/core/libziparchive/BUILD.gn $MODIFIED_AOSP_PATH/system/core/libziparchive/ + cp -f $AOSP_GN_PATH/system/core/base/BUILD.gn $MODIFIED_AOSP_PATH/system/core/base/ +} + + +function uninstall_patch { + rm -rf $MODIFIED_AOSP_PATH $MODIFIED_LLVM_PATH +} + +function main { + if [ "x$1" == "xpatch" ]; then + install_patch + fi + + if [ "x$1" == "xunpatch" ]; then + uninstall_patch + fi + cd $MAPLE_ROOT +} + + +main $@ diff --git a/build/third_party/system_001.patch b/build/third_party/system_001.patch new file mode 100644 index 0000000000000000000000000000000000000000..a9c5aa77f2859f45158071c39b3f98980dcaa0ce --- /dev/null +++ b/build/third_party/system_001.patch @@ -0,0 +1,476 @@ +diff -ur system_aosp/core/base/cmsg.cpp system/core/base/cmsg.cpp +--- system_aosp/core/base/cmsg.cpp 2021-01-29 10:43:34.145594722 +0800 ++++ system/core/base/cmsg.cpp 2021-01-29 10:36:40.905961768 +0800 +@@ -21,7 +21,7 @@ + #include + #include + #include +- ++#include + #include + + #include +@@ -33,7 +33,7 @@ + const std::vector& fds) { + size_t cmsg_space = CMSG_SPACE(sizeof(int) * fds.size()); + size_t cmsg_len = CMSG_LEN(sizeof(int) * fds.size()); +- if (cmsg_space >= PAGE_SIZE) { ++ if (cmsg_space >= sysconf(_SC_PAGE_SIZE)) { + errno = ENOMEM; + return -1; + } +@@ -75,7 +75,7 @@ + fds->clear(); + + size_t cmsg_space = CMSG_SPACE(sizeof(int) * max_fds); +- if (cmsg_space >= PAGE_SIZE) { ++ if (cmsg_space >= sysconf(_SC_PAGE_SIZE)) { + errno = ENOMEM; + return -1; + } +diff -ur system_aosp/core/base/include/android-base/logging.h system/core/base/include/android-base/logging.h +--- system_aosp/core/base/include/android-base/logging.h 2021-01-29 10:43:34.145594722 +0800 ++++ system/core/base/include/android-base/logging.h 2021-01-29 10:36:40.905961768 +0800 +@@ -81,7 +81,7 @@ + + enum LogSeverity { + VERBOSE, +- DEBUG, ++ DEBUG_S, + INFO, + WARNING, + ERROR, +@@ -181,7 +181,7 @@ + // Note: DO NOT USE DIRECTLY. This is an implementation detail. + #define SEVERITY_LAMBDA(severity) ([&]() { \ + using ::android::base::VERBOSE; \ +- using ::android::base::DEBUG; \ ++ using ::android::base::DEBUG_S; \ + using ::android::base::INFO; \ + using ::android::base::WARNING; \ + using ::android::base::ERROR; \ +@@ -248,7 +248,7 @@ + // Logs a message to logcat with the specified log ID on Android otherwise to + // stderr. If the severity is FATAL it also causes an abort. + // Use an expression here so we can support the << operator following the macro, +-// like "LOG(DEBUG) << xxx;". ++// like "LOG(DEBUG_S) << xxx;". + #define LOG_TO(dest, severity) LOGGING_PREAMBLE(severity) && LOG_STREAM_TO(dest, severity) + + // A variant of LOG that also logs the current errno value. To be used when +diff -ur system_aosp/core/base/include/android-base/strings.h system/core/base/include/android-base/strings.h +--- system_aosp/core/base/include/android-base/strings.h 2021-01-29 10:43:34.145594722 +0800 ++++ system/core/base/include/android-base/strings.h 2021-01-29 10:36:40.905961768 +0800 +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include "string_view_format.h" + + namespace android { + namespace base { +@@ -56,17 +57,17 @@ + extern template std::string Join(const std::vector&, const std::string&); + + // Tests whether 's' starts with 'prefix'. +-bool StartsWith(std::string_view s, std::string_view prefix); +-bool StartsWith(std::string_view s, char prefix); +-bool StartsWithIgnoreCase(std::string_view s, std::string_view prefix); ++bool StartsWith(StringView s, StringView prefix); ++bool StartsWith(StringView s, char prefix); ++bool StartsWithIgnoreCase(StringView s, StringView prefix); + + // Tests whether 's' ends with 'suffix'. +-bool EndsWith(std::string_view s, std::string_view suffix); +-bool EndsWith(std::string_view s, char suffix); +-bool EndsWithIgnoreCase(std::string_view s, std::string_view suffix); ++bool EndsWith(StringView s, StringView suffix); ++bool EndsWith(StringView s, char suffix); ++bool EndsWithIgnoreCase(StringView s, StringView suffix); + + // Tests whether 'lhs' equals 'rhs', ignoring case. +-bool EqualsIgnoreCase(std::string_view lhs, std::string_view rhs); ++bool EqualsIgnoreCase(StringView lhs, StringView rhs); + + } // namespace base + } // namespace android +diff -ur system_aosp/core/base/logging.cpp system/core/base/logging.cpp +--- system_aosp/core/base/logging.cpp 2021-01-29 10:43:34.145594722 +0800 ++++ system/core/base/logging.cpp 2021-01-29 10:36:40.905961768 +0800 +@@ -16,10 +16,11 @@ + + #if defined(_WIN32) + #include ++#include "android-base/threads.h" + #endif + + #include "android-base/logging.h" +- ++#include + #include + #include + #include +@@ -167,7 +168,7 @@ + static constexpr int kLogSeverityToKernelLogLevel[] = { + [android::base::VERBOSE] = 7, // KERN_DEBUG (there is no verbose kernel log + // level) +- [android::base::DEBUG] = 7, // KERN_DEBUG ++ [android::base::DEBUG_S] = 7, // KERN_DEBUG_S + [android::base::INFO] = 6, // KERN_INFO + [android::base::WARNING] = 4, // KERN_WARNING + [android::base::ERROR] = 3, // KERN_ERROR +@@ -310,7 +311,7 @@ + gMinimumLogSeverity = VERBOSE; + continue; + case 'd': +- gMinimumLogSeverity = DEBUG; ++ gMinimumLogSeverity = DEBUG_S; + continue; + case 'i': + gMinimumLogSeverity = INFO; +diff -ur system_aosp/core/base/mapped_file.cpp system/core/base/mapped_file.cpp +--- system_aosp/core/base/mapped_file.cpp 2021-01-29 10:43:34.145594722 +0800 ++++ system/core/base/mapped_file.cpp 2021-01-29 10:36:40.905961768 +0800 +@@ -76,7 +76,7 @@ + if (base_ != nullptr) UnmapViewOfFile(base_); + if (handle_ != nullptr) CloseHandle(handle_); + #else +- if (base_ != nullptr) munmap(base_, size_ + offset_); ++ if (base_ != nullptr) munmap(base_, size_); + #endif + + base_ = nullptr; +diff -ur system_aosp/core/base/strings.cpp system/core/base/strings.cpp +--- system_aosp/core/base/strings.cpp 2021-01-29 10:43:34.145594722 +0800 ++++ system/core/base/strings.cpp 2021-01-29 10:36:40.905961768 +0800 +@@ -87,32 +87,32 @@ + template std::string Join(const std::vector&, const std::string&); + template std::string Join(const std::vector&, const std::string&); + +-bool StartsWith(std::string_view s, std::string_view prefix) { ++bool StartsWith(StringView s, StringView prefix) { + return s.substr(0, prefix.size()) == prefix; + } + +-bool StartsWith(std::string_view s, char prefix) { ++bool StartsWith(StringView s, char prefix) { + return !s.empty() && s.front() == prefix; + } + +-bool StartsWithIgnoreCase(std::string_view s, std::string_view prefix) { ++bool StartsWithIgnoreCase(StringView s, StringView prefix) { + return s.size() >= prefix.size() && strncasecmp(s.data(), prefix.data(), prefix.size()) == 0; + } + +-bool EndsWith(std::string_view s, std::string_view suffix) { ++bool EndsWith(StringView s, StringView suffix) { + return s.size() >= suffix.size() && s.substr(s.size() - suffix.size(), suffix.size()) == suffix; + } + +-bool EndsWith(std::string_view s, char suffix) { ++bool EndsWith(StringView s, char suffix) { + return !s.empty() && s.back() == suffix; + } + +-bool EndsWithIgnoreCase(std::string_view s, std::string_view suffix) { ++bool EndsWithIgnoreCase(StringView s, StringView suffix) { + return s.size() >= suffix.size() && + strncasecmp(s.data() + (s.size() - suffix.size()), suffix.data(), suffix.size()) == 0; + } + +-bool EqualsIgnoreCase(std::string_view lhs, std::string_view rhs) { ++bool EqualsIgnoreCase(StringView lhs, StringView rhs) { + return lhs.size() == rhs.size() && strncasecmp(lhs.data(), rhs.data(), lhs.size()) == 0; + } + +diff -ur system_aosp/core/include/cutils/android_filesystem_config.h system/core/include/cutils/android_filesystem_config.h +--- system_aosp/core/include/cutils/android_filesystem_config.h 2021-01-29 10:43:42.185665405 +0800 ++++ system/core/include/cutils/android_filesystem_config.h 2021-01-29 10:36:40.909961804 +0800 +@@ -144,6 +144,7 @@ + /* The range 2900-2999 is reserved for OEM, and must never be + * used here */ + #define AID_OEM_RESERVED_START 2900 ++#define AID_HDB 2901 /* access hdbservice */ + #define AID_OEM_RESERVED_END 2999 + + /* The 3000 series are intended for use as supplemental group id's only. +@@ -161,6 +162,38 @@ + + /* The range 5000-5999 is also reserved for OEM, and must never be used here. */ + #define AID_OEM_RESERVED_2_START 5000 ++ ++/* Huawei Extend AID */ ++/* ++ * 1. ALL huawei extend AID should add VENDOR prefix,e.g. AID_VENDOR_XXXX ++ * 2. If the added AID was used in vendor partition only, Add it to config.fs ++ * vendor/huawei/chipset_common/config/common/config.fs ++ * 3. Huawei AID range: ++ * AID used in system partition: 5501-5900 ++ * AID used in vendor partiton only: 5900-5999 ++ * 4. wiki: http://3ms.huawei.com/hi/group/2844405/wiki_5160709.html?for_statistic_from=creation_group_wiki ++*/ ++ ++#define AID_VENDOR_HDB 5501 /* access hdbservice*/ ++ ++#define AID_VENDOR_DSM 5502 /* dsm access */ ++ ++#define AID_VENDOR_HWHFD 5503 /* Huawei kernel hot fix daemon */ ++ ++#define AID_VENDOR_SKYTONE 5504 /* access skytone */ ++ ++#define AID_VENDOR_ACT_RCS 5505 /* access device actr */ ++ ++#define AID_VENDOR_ODMF 5506 /* access AI model files */ ++ ++#define AID_VENDOR_INSTALLER 5507 /* access installer files */ ++ ++#define AID_VENDOR_HBS 5508 /* access hbs data */ ++ ++#define AID_DSM 5509 /* dsm access */ ++ ++#define AID_VENDOR_FACEID 5510 /* acess faceid */ ++ + #define AID_OEM_RESERVED_2_END 5999 + + #define AID_EVERYBODY 9997 /* shared between all apps in the same profile */ +diff -ur system_aosp/core/include/cutils/fs.h system/core/include/cutils/fs.h +--- system_aosp/core/include/cutils/fs.h 2021-01-29 10:43:42.185665405 +0800 ++++ system/core/include/cutils/fs.h 2021-01-29 10:36:40.909961804 +0800 +@@ -45,6 +45,14 @@ + */ + extern int fs_prepare_dir(const char* path, mode_t mode, uid_t uid, gid_t gid); + ++/* DTS2016051401335 AR000485VM FixUid l00214442 20160514 begin */ ++/* ++ * Ensure that directory exists with given mode and owners. If it exists ++ * with a different mode or owners, they are fixed to match the given values recursively. ++ */ ++extern int fs_prepare_dir_fixup_recursive(const char* path, mode_t mode, uid_t uid, gid_t gid, int allow_fixup); ++/* DTS2016051401335 AR000485VM FixUid l00214442 20160514 end */ ++ + /* + * Ensure that directory exists with given mode and owners. If it exists + * with different owners, they are not fixed and -1 is returned. +diff -ur system_aosp/core/include/cutils/trace.h system/core/include/cutils/trace.h +--- system_aosp/core/include/cutils/trace.h 2021-01-29 10:43:42.185665405 +0800 ++++ system/core/include/cutils/trace.h 2021-01-29 10:36:40.909961804 +0800 +@@ -18,7 +18,6 @@ + #define _LIBS_CUTILS_TRACE_H + + #include +-#include + #include + #include + #include +@@ -88,7 +87,7 @@ + #elif ATRACE_TAG > ATRACE_TAG_VALID_MASK + #error ATRACE_TAG must be defined to be one of the tags defined in cutils/trace.h + #endif +- ++using namespace std; + /** + * Opens the trace file for writing and reads the property for initial tags. + * The atrace.tags.enableflags property sets the tags to trace. +diff -ur system_aosp/core/include/log/log_id.h system/core/include/log/log_id.h +--- system_aosp/core/include/log/log_id.h 2021-01-29 10:43:42.185665405 +0800 ++++ system/core/include/log/log_id.h 2021-01-29 10:36:40.909961804 +0800 +@@ -58,6 +58,8 @@ + log_id_t android_name_to_log_id(const char* logName); + const char* android_log_id_to_name(log_id_t log_id); + ++int __hwlog_setparam(int paramid, const char *val); ++ + #ifdef __cplusplus + } + #endif +diff -ur system_aosp/core/include/private/android_filesystem_config.h system/core/include/private/android_filesystem_config.h +--- system_aosp/core/include/private/android_filesystem_config.h 2021-01-29 10:43:42.185665405 +0800 ++++ system/core/include/private/android_filesystem_config.h 2021-01-29 10:36:40.909961804 +0800 +@@ -144,6 +144,7 @@ + /* The range 2900-2999 is reserved for OEM, and must never be + * used here */ + #define AID_OEM_RESERVED_START 2900 ++#define AID_HDB 2901 /* access hdbservice */ + #define AID_OEM_RESERVED_END 2999 + + /* The 3000 series are intended for use as supplemental group id's only. +@@ -161,6 +162,38 @@ + + /* The range 5000-5999 is also reserved for OEM, and must never be used here. */ + #define AID_OEM_RESERVED_2_START 5000 ++ ++/* Huawei Extend AID */ ++/* ++ * 1. ALL huawei extend AID should add VENDOR prefix,e.g. AID_VENDOR_XXXX ++ * 2. If the added AID was used in vendor partition only, Add it to config.fs ++ * vendor/huawei/chipset_common/config/common/config.fs ++ * 3. Huawei AID range: ++ * AID used in system partition: 5501-5900 ++ * AID used in vendor partiton only: 5900-5999 ++ * 4. wiki: http://3ms.huawei.com/hi/group/2844405/wiki_5160709.html?for_statistic_from=creation_group_wiki ++*/ ++ ++#define AID_VENDOR_HDB 5501 /* access hdbservice*/ ++ ++#define AID_VENDOR_DSM 5502 /* dsm access */ ++ ++#define AID_VENDOR_HWHFD 5503 /* Huawei kernel hot fix daemon */ ++ ++#define AID_VENDOR_SKYTONE 5504 /* access skytone */ ++ ++#define AID_VENDOR_ACT_RCS 5505 /* access device actr */ ++ ++#define AID_VENDOR_ODMF 5506 /* access AI model files */ ++ ++#define AID_VENDOR_INSTALLER 5507 /* access installer files */ ++ ++#define AID_VENDOR_HBS 5508 /* access hbs data */ ++ ++#define AID_DSM 5509 /* dsm access */ ++ ++#define AID_VENDOR_FACEID 5510 /* acess faceid */ ++ + #define AID_OEM_RESERVED_2_END 5999 + + #define AID_EVERYBODY 9997 /* shared between all apps in the same profile */ +diff -ur system_aosp/core/include/utils/Flattenable.h system/core/include/utils/Flattenable.h +--- system_aosp/core/include/utils/Flattenable.h 2021-01-29 10:43:42.189665440 +0800 ++++ system/core/include/utils/Flattenable.h 2021-01-29 10:36:40.909961804 +0800 +@@ -47,12 +47,7 @@ + + template + static size_t align(void*& buffer) { +- static_assert(!(N & (N - 1)), "Can only align to a power of 2."); +- void* b = buffer; +- buffer = reinterpret_cast((uintptr_t(buffer) + (N-1)) & ~(N-1)); +- size_t delta = size_t(uintptr_t(buffer) - uintptr_t(b)); +- memset(b, 0, delta); +- return delta; ++ return align( const_cast(buffer) ); + } + + static void advance(void*& buffer, size_t& size, size_t offset) { +diff -ur system_aosp/core/include/utils/String8.h system/core/include/utils/String8.h +--- system_aosp/core/include/utils/String8.h 2021-01-29 10:43:42.189665440 +0800 ++++ system/core/include/utils/String8.h 2021-01-29 10:36:40.909961804 +0800 +@@ -58,6 +58,9 @@ + explicit String8(const char16_t* o, size_t numChars); + explicit String8(const char32_t* o); + explicit String8(const char32_t* o, size_t numChars); ++ //fix bug of sogou input method ++ explicit String8(unsigned short const* o); ++ //fix bug of sogou input method + ~String8(); + + static inline const String8 empty(); +diff -ur system_aosp/core/liblog/include/log/log_id.h system/core/liblog/include/log/log_id.h +--- system_aosp/core/liblog/include/log/log_id.h 2021-01-29 10:43:56.097787709 +0800 ++++ system/core/liblog/include/log/log_id.h 2021-01-29 10:36:40.909961804 +0800 +@@ -58,6 +58,8 @@ + log_id_t android_name_to_log_id(const char* logName); + const char* android_log_id_to_name(log_id_t log_id); + ++int __hwlog_setparam(int paramid, const char *val); ++ + #ifdef __cplusplus + } + #endif +diff -ur system_aosp/core/liblog/include_vndk/log/log_id.h system/core/liblog/include_vndk/log/log_id.h +--- system_aosp/core/liblog/include_vndk/log/log_id.h 2021-01-29 10:43:56.097787709 +0800 ++++ system/core/liblog/include_vndk/log/log_id.h 2021-01-29 10:36:40.909961804 +0800 +@@ -58,6 +58,8 @@ + log_id_t android_name_to_log_id(const char* logName); + const char* android_log_id_to_name(log_id_t log_id); + ++int __hwlog_setparam(int paramid, const char *val); ++ + #ifdef __cplusplus + } + #endif +diff -ur system_aosp/core/liblog/liblog.map.txt system/core/liblog/liblog.map.txt +--- system_aosp/core/liblog/liblog.map.txt 2021-01-29 10:43:56.097787709 +0800 ++++ system/core/liblog/liblog.map.txt 2021-01-29 10:36:40.909961804 +0800 +@@ -63,6 +63,7 @@ + __android_log_security; # apex + android_log_reset; #vndk + android_log_parser_reset; #vndk ++ __hwlog_setparam; + }; + + LIBLOG_PRIVATE { +diff -ur system_aosp/core/libutils/include/utils/Flattenable.h system/core/libutils/include/utils/Flattenable.h +--- system_aosp/core/libutils/include/utils/Flattenable.h 2021-01-29 10:44:02.421843310 +0800 ++++ system/core/libutils/include/utils/Flattenable.h 2021-01-29 10:36:40.909961804 +0800 +@@ -47,12 +47,7 @@ + + template + static size_t align(void*& buffer) { +- static_assert(!(N & (N - 1)), "Can only align to a power of 2."); +- void* b = buffer; +- buffer = reinterpret_cast((uintptr_t(buffer) + (N-1)) & ~(N-1)); +- size_t delta = size_t(uintptr_t(buffer) - uintptr_t(b)); +- memset(b, 0, delta); +- return delta; ++ return align( const_cast(buffer) ); + } + + static void advance(void*& buffer, size_t& size, size_t offset) { +diff -ur system_aosp/core/libutils/include/utils/String8.h system/core/libutils/include/utils/String8.h +--- system_aosp/core/libutils/include/utils/String8.h 2021-01-29 10:44:02.421843310 +0800 ++++ system/core/libutils/include/utils/String8.h 2021-01-29 10:36:40.909961804 +0800 +@@ -58,6 +58,9 @@ + explicit String8(const char16_t* o, size_t numChars); + explicit String8(const char32_t* o); + explicit String8(const char32_t* o, size_t numChars); ++ //fix bug of sogou input method ++ explicit String8(unsigned short const* o); ++ //fix bug of sogou input method + ~String8(); + + static inline const String8 empty(); +diff -ur system_aosp/core/libziparchive/zip_archive.cc system/core/libziparchive/zip_archive.cc +--- system_aosp/core/libziparchive/zip_archive.cc 2021-01-29 10:44:10.001909933 +0800 ++++ system/core/libziparchive/zip_archive.cc 2021-01-29 10:36:40.913961839 +0800 +@@ -33,6 +33,7 @@ + + #include + #include ++#include "string_view_format.h" + + #if defined(__APPLE__) + #define lseek64 lseek +@@ -103,8 +104,8 @@ + + static uint32_t ComputeHash(const ZipString& name) { + #if !defined(_WIN32) +- return std::hash{}( +- std::string_view(reinterpret_cast(name.name), name.name_length)); ++ return std::hash{}( ++ StringView(reinterpret_cast(name.name), name.name_length)); + #else + // Remove this code path once the windows compiler knows how to compile the above statement. + uint32_t hash = 0; +diff -ur system_aosp/core/libziparchive/zip_archive_private.h system/core/libziparchive/zip_archive_private.h +--- system_aosp/core/libziparchive/zip_archive_private.h 2021-01-29 10:44:10.001909933 +0800 ++++ system/core/libziparchive/zip_archive_private.h 2021-01-29 10:36:40.913961839 +0800 +@@ -138,7 +138,7 @@ + + /** + * More space efficient string representation of strings in an mmaped zipped file than +- * std::string_view or ZipString. Using ZipString as an entry in the ZipArchive hashtable wastes ++ * StringView or ZipString. Using ZipString as an entry in the ZipArchive hashtable wastes + * space. ZipString stores a pointer to a string (on 64 bit, 8 bytes) and the length to read from + * that pointer, 2 bytes. Because of alignment, the structure consumes 16 bytes, wasting 6 bytes. + * ZipStringOffset stores a 4 byte offset from a fixed location in the memory mapped file instead +diff -ur system_aosp/core/libziparchive/zip_writer.cc system/core/libziparchive/zip_writer.cc +--- system_aosp/core/libziparchive/zip_writer.cc 2021-01-29 10:44:10.001909933 +0800 ++++ system/core/libziparchive/zip_writer.cc 2021-01-29 10:36:40.913961839 +0800 +@@ -358,7 +358,7 @@ + CHECK(z_stream_->avail_out != 0); + + // Prepare the input. +- z_stream_->next_in = reinterpret_cast(data); ++ z_stream_->next_in = (unsigned char *)(data); + z_stream_->avail_in = len; + + while (z_stream_->avail_in > 0) { diff --git a/build/toolchain/BUILD.gn b/build/toolchain/BUILD.gn new file mode 100755 index 0000000000000000000000000000000000000000..d45cde7be74dbf4cc33faff3ebd9af95e833d9f0 --- /dev/null +++ b/build/toolchain/BUILD.gn @@ -0,0 +1,205 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +toolchain("clang") { + tool("cc") { + depfile = "{{output}}.d" + command = "${GN_C_COMPILER} {{defines}} {{include_dirs}} {{cflags}} {{cflags_c}} -MD -MT {{output}} -MF $depfile -o {{output}} -c {{source}}" + depsformat = "gcc" + description = "Building C object {{output}}" + outputs = [ + "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o", + ] + } + + tool("cxx") { + depfile = "{{output}}.d" + command = "${GN_CXX_COMPILER} {{defines}} {{include_dirs}} {{cflags}} {{cflags_cc}} -MD -MT {{output}} -MF $depfile -o {{output}} -c {{source}}" + depsformat = "gcc" + description = "Building CXX object {{output}}" + outputs = [ + "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o", + ] + } + + tool("asm") { + depfile = "{{output}}.d" + + #command = "${GN_C_COMPILER} -MMD -MF $depfile ${rebuild_string}{{defines}} {{include_dirs}} {{asmflags}}${extra_asmflags} -c {{source}} -o {{output}}" + command = "${GN_CXX_COMPILER} {{defines}} {{include_dirs}} {{asmflags}} -MD -MT {{output}} -MF $depfile -c {{source}} -o {{output}}" + depsformat = "gcc" + description = "ASM {{output}}" + outputs = [ + "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o", + ] + } + + tool("alink") { + rspfile = "{{output}}.rsp" + rspfile_content = "{{inputs}}" + command = "rm -f {{output}} && ${GN_AR_COMPILER} qc {{output}} ${rspfile_content} && ${GN_RANLIB_COMPILER} {{output}}" + description = "AR {{target_output_name}}{{output_extension}}" + outputs = [ + "{{output_dir}}/{{target_output_name}}{{output_extension}}", + ] + default_output_extension = ".a" + default_output_dir = "${GN_ARCHIVE_OUTPUT_DIRECTORY}" + } + + tool("solink") { + soname = "{{target_output_name}}{{output_extension}}" + sofile = "{{output_dir}}/$soname" + rspfile = soname + ".rsp" + + #command = "${GN_CXX_COMPILER} -shared {{ldflags}} -o $soname -Wl,-soname=$soname @$rspfile {{libs}}" + rspfile_content = "{{inputs}} {{solibs}} {{libs}}" + command = "${GN_CXX_COMPILER} -fuse-ld=lld {{ldflags}} -o $sofile -shared -Wl,-soname=$soname ${rspfile_content}" + description = "SOLINK $soname" + + # Use this for {{output_extension}} expansions unless a target manually + # overrides it (in which case {{output_extension}} will be what the target + # specifies). + default_output_extension = ".so" + outputs = [ + sofile, + ] + link_output = sofile + depend_output = sofile + + #output_prefix = "" + default_output_dir = "${GN_LIBRARY_OUTPUT_DIRECTORY}/${OPT}" + restat = true + } + + tool("link") { + outfile = "${GN_BINARY_OUTPUT_DIRECTORY}/{{target_output_name}}{{output_extension}}" + rspfile = "$outfile.rsp" + rspfile_content = "{{inputs}}" + + strip_flag = "" + if (GN_BUILD_TYPE == "RELEASE" && GPROF == 0) { + strip_flag = "-s" + } + + command = "${GN_CXX_COMPILER} ${strip_flag} -fuse-ld=lld {{ldflags}} -o $outfile -Wl,--start-group ${rspfile_content} {{libs}} -Wl,--end-group {{solibs}}" + + description = "LINK $outfile" + rspfile_content = "{{inputs}}" + outputs = [ + outfile, + ] + } + + tool("stamp") { + command = "touch {{output}}" + description = "STAMP {{output}}" + } + + tool("copy") { + command = "cp -af {{source}} {{output}}" + description = "COPY {{source}} {{output}}" + } +} + +toolchain("cross_compile") { + tool("cc") { + depfile = "{{output}}.d" + command = "${GN_C_CROSS_COMPILER} {{defines}} {{include_dirs}} {{cflags}} {{cflags_c}} -MD -MT {{output}} -MF $depfile -o {{output}} -c {{source}}" + depsformat = "gcc" + description = "Building C object {{output}}" + outputs = [ + "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o", + ] + } + + tool("cxx") { + depfile = "{{output}}.d" + command = "${GN_CXX_CROSS_COMPILER} {{defines}} {{include_dirs}} {{cflags}} {{cflags_cc}} -MD -MT {{output}} -MF $depfile -o {{output}} -c {{source}}" + depsformat = "gcc" + description = "Building CXX object {{output}}" + outputs = [ + "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o", + ] + } + + tool("asm") { + depfile = "{{output}}.d" + command = "${GN_CXX_CROSS_COMPILER} {{defines}} {{include_dirs}} {{asmflags}} -MD -MT {{output}} -MF $depfile -c {{source}} -o {{output}}" + depsformat = "gcc" + description = "ASM {{output}}" + outputs = [ + "{{source_out_dir}}/{{target_output_name}}.{{source_name_part}}.o", + ] + } + + tool("alink") { + rspfile = "{{output}}.rsp" + rspfile_content = "{{inputs}}" + command = "rm -f {{output}} && ${GN_AR_CROSS_COMPILER} qc {{output}} ${rspfile_content} && ${GN_RANLIB_CROSS_COMPILER} {{output}}" + description = "AR {{target_output_name}}{{output_extension}}" + rspfile_content = "{{inputs}}" + outputs = [ + "{{output_dir}}/{{target_output_name}}{{output_extension}}", + ] + default_output_extension = ".a" + default_output_dir = "${GN_ARCHIVE_OUTPUT_DIRECTORY}" + } + + tool("solink") { + soname = "{{target_output_name}}{{output_extension}}" + sofile = "{{output_dir}}/$soname" + rspfile = soname + ".rsp" + rspfile_content = "{{inputs}} {{solibs}} {{libs}}" + command = "${GN_CXX_CROSS_COMPILER} {{ldflags}} -o $sofile -shared -Wl,-soname=$soname ${rspfile_content}" + description = "SOLINK $soname" + + # Use this for {{output_extension}} expansions unless a target manually + # overrides it (in which case {{output_extension}} will be what the target + # specifies). + default_output_extension = ".so" + outputs = [ + sofile, + ] + link_output = sofile + depend_output = sofile + + #output_prefix = "" + default_output_dir = "${GN_LIBRARY_OUTPUT_DIRECTORY}/${OPT}" + restat = true + } + + tool("link") { + outfile = "${GN_BINARY_OUTPUT_DIRECTORY}/{{target_output_name}}{{output_extension}}" + rspfile = "$outfile.rsp" + rspfile_content = "{{inputs}}" + + #command = "${GN_CXX_CROSS_COMPILER} {{ldflags}} -o $outfile @$rspfile {{solibs}} {{libs}}" + command = "${GN_CXX_CROSS_COMPILER} {{ldflags}} -o $outfile ${rspfile_content} {{solibs}} {{libs}}" + description = "LINK $outfile" + rspfile_content = "{{inputs}}" + outputs = [ + outfile, + ] + } + + tool("stamp") { + command = "touch {{output}}" + description = "STAMP {{output}}" + } + + tool("copy") { + command = "cp -af {{source}} {{output}}" + description = "COPY {{source}} {{output}}" + } +} diff --git a/build/tools/common/bis.sh b/build/tools/common/bis.sh new file mode 100755 index 0000000000000000000000000000000000000000..644ff36e9bda6025369a3547bcf65c7bfbfbf640 --- /dev/null +++ b/build/tools/common/bis.sh @@ -0,0 +1,75 @@ + +good_dir=$1 +bad_dir=$2 +src_list=src_list.log +pwd=$PWD +cd $bad_dir +sed 's/ /\n/g' obj.list > $pwd/$src_list +cd - +file_lenth=`sed -n '$=' $src_list` + +function get_src_list() { + min=$1 + max=$2 + line_number=1 + src=" " + cat $src_list | while read line + do + if [ "${line_number}" -ge "${min}" ]&&[ "${line_number}" -le "${max}" ]; + then + src=${src}${bad_dir}"/"${line}" " + else + src=${src}${good_dir}"/"${line}" " + fi + if [ $line_number -eq $file_lenth ];then + echo "${src}" + fi + line_number=`expr $line_number + 1` + done +} + +cur_max=`sed -n '$=' $src_list` +cur_min=1 +last_max=$cur_max +while true +do +cur_src=`echo $(get_src_list $cur_min $cur_max)` +echo $file_lenth +bash run.sh $cur_src + +if [ $? -eq 0 ];then + +if [ $cur_min -eq $cur_max ];then +cur_min=$last_max +cur_max=$last_max +cur_src=`echo $(get_src_list $cur_min $cur_max)` +break +fi + +cur_min=$cur_max +cur_max=${last_max} + +else +if [ $cur_min -eq $cur_max ];then +cur_src=`echo $(get_src_list $cur_min $cur_max)` +break +fi +last_max=$cur_max +cur_max=$(((${cur_max}+${cur_min})/2)) +fi +echo "===================================" +echo "cur_min" $cur_min +echo "cur_max" $cur_max +echo "===================================" + +done + +bash run.sh $cur_src +if [ $? -eq 0 ];then + echo "bad src not found" +else + echo "==================================" + echo "bad src found:" + head -$cur_min $src_list | tail -1 + echo "==================================" +fi diff --git a/build/tools/common/bisfunc.sh b/build/tools/common/bisfunc.sh new file mode 100755 index 0000000000000000000000000000000000000000..8ad515db2dd2e18e671da73c64fca2fbe3cd9258 --- /dev/null +++ b/build/tools/common/bisfunc.sh @@ -0,0 +1,132 @@ +#!/bin/bash +bad_mpl="" +good_dir="" +bad_dir="" +candidate_type="object" +src_list=src_list.log +pwd=${PWD} +twd=${pwd}/temp_dir +LINARO=${MAPLE_ROOT}/tools/gcc-linaro-7.5.0 +fileName="" + +function parse_args() { + while [ $# -ne 0 ]; do + case "$1" in + *.mpl) + bad_mpl=$1 + candidate_type="mpl" + fileName=${bad_mpl%\.mpl*} + ;; + *) + if [ -d "$1" ]; then + good_dir=$1 + shift + bad_dir=$1 + else + echo "no such directory ""$1" + exit 1 + fi + ;; + esac + shift + done +} + +function get_cur_func_list() { + echo > ${twd}/temp_func.list + min=$1 + max=$2 + line_number=1 + src=" " + cat ${twd}/$src_list | while read line + do + if [ "${line_number}" -ge "${min}" ]&&[ "${line_number}" -le "${max}" ]; + then + echo ${line} >> ${twd}/temp_func.list + fi + line_number=`expr $line_number + 1` + done +} + +function compileMpl() { + #echo "cur parto2list:" + #cat temp_func.list | lolcat + $MAPLE_ROOT/output/aarch64-clang-release/bin/maple --partO2=${twd}/temp_func.list --run=me:mpl2mpl:mplcg --option="--O2 --quiet: --O2 --quiet: --O2 --quiet --no-pie --verbose-asm" ${twd}/$bad_mpl &> comb.log + $LINARO/bin/aarch64-linux-gnu-gcc -O2 -std=c99 -o ${twd}/${fileName}.o -c ${twd}/${fileName}.s +} + +function asmMpl() { + cat temp_func.list + python3 replace_func.py good.s bad.s ${twd}/temp_func.list > ${twd}/newgood.s + $LINARO/bin/aarch64-linux-gnu-gcc -O2 -std=c99 -o ${twd}/${fileName}.o -c ${twd}/newgood.s +} + +function main() { + rm -rf ${twd} + cp -r $good_dir ${twd} + cp run.sh ${twd}/ + #cp replace_func.py ${twd}/ + #cp good.s ${twd}/ + #cp bad.s ${twd}/ + if [ "$candidate_type" == "mpl" ]; then + grep -E "func &.*{$" $good_dir/$bad_mpl | awk '{print $2}' | sed 's/&//g' > ${twd}/$src_list + else + sed 's/ /\n/g' $good_dir/obj.list > $twd/$src_list + fi + + obj_list=`sed 's/ /\n/g' $good_dir/obj.list` + cd ${twd} + file_length=`sed -n '$=' $src_list` + + cur_min=1 + cur_max=$file_length + last_max=$cur_max + while true + do + get_cur_func_list $cur_min $cur_max + #echo $file_length + + echo "===================================" + echo "cur_min" $cur_min + echo "cur_max" $cur_max + + #asmMpl + compileMpl + bash run.sh $obj_list + + if [ $? -eq 0 ];then + echo -e "\033[32mSUCCESS \033[0m" + if [ $cur_min -eq $cur_max ];then + cur_min=$last_max + cur_max=$last_max + get_cur_func_list $cur_min $cur_max + break + fi + cur_min=$cur_max + cur_max=${last_max} + else + echo -e "\033[31mFAILED \033[0m" + if [ $cur_min -eq $cur_max ];then + get_cur_func_list $cur_min $cur_max + break + fi + last_max=$cur_max + cur_max=$(((${cur_max}+${cur_min})/2)) + fi + echo "===================================" + done + + #asmMpl + compileMpl + echo "===================================" + bash run.sh $obj_list + if [ $? -eq 0 ];then + echo "bad func not found" + else + echo "bad func found:" + head -$cur_min $src_list | tail -1 + fi +} + +parse_args $@ +main \ No newline at end of file diff --git a/build/tools/common/maplec b/build/tools/common/maplec new file mode 100755 index 0000000000000000000000000000000000000000..6d0ff340990039bb46fce6644349ed53a70d9b58 --- /dev/null +++ b/build/tools/common/maplec @@ -0,0 +1,260 @@ +#!/bin/bash +src_file_path="" +target_name="" +file_name="" +option="" +obj_list="" +src_list="" +pre_process=true +compile=true +assemble=true +link=true +out="" +std="-std=gnu99" +list_mode="false" + +# config fe compiler +use_hir2mpl=true +use_clang2mpl=false +# config maple optimization level +use_O2=true +# config mpl or bpl +use_bpl=false + +if [ "$use_bpl" == "true" ];then + WHIRL_OPT="" + suffix="bpl" +else + WHIRL_OPT="-a" + suffix="mpl" +fi + +# preifx directories +OPENSOURCE_OUT=$MAPLE_ROOT/output +MAPLE_BIN=$OPENSOURCE_OUT/aarch64-clang-release/bin +TOOLS_ROOT=$MAPLE_ROOT/tools +TOOLS_BIN=$TOOLS_ROOT/bin +LINARO=$TOOLS_ROOT/gcc-linaro-7.5.0 +GCC=$LINARO/bin/aarch64-linux-gnu-gcc + +# compile flags +ISYSTEM_FLAGS="-isystem $LINARO/aarch64-linux-gnu/libc/usr/include -isystem $LINARO/lib/gcc/aarch64-linux-gnu/7.5.0/include" + +# maple options +O2="--O2 --quiet:--O2 --quiet:--O2 --quiet --verbose-asm --verbose-cg" +O0="--O0 --quiet --verbose-asm --verbose-cg" + +# whole cmd of maplec.sh +SELF_CMD="${0} ${@}" + +while [ $# -ne 0 ]; do + case "$1" in + -O2) + use_O2=true + ;; + -O0) + use_O2=false + ;; + -hir2mpl) + use_hir2mpl=true + use_clang2mpl=false + ;; + -clang2mpl) + use_hir2mpl=false + use_clang2mpl=true + ;; + --list) + shift + list_mode=true + src_list="$1" + ;; + -v) + exit 0 + ;; + -b) + WHIRL_OPT="" + suffix="bpl" + ;; + -o) + shift + out="-o $1" + ;; + -I) + shift + option=$option"-I $1 " + ;; + -isystem) + shift + ISYSTEM_FLAGS="-isystem $1 "$ISYSTEM_FLAGS + ;; + -c) + link=false + ;; + -S) + assemble=false + link=false + ;; + -E) + compile=false + assemble=false + link=false + ;; + -include) + shift + option=$option"-include $1 " + ;; + -gcc) + shift + GCC=$1 + ;; + -Werror) + ;; + -fno-pic) + O2=$O2" --no-fpic" + O0=$O0" --no-fpic" + ;; + -ffunction-sections) + O2=$O2" --function-sections" + O0=$O0" --function-sections" + ;; + -mgeneral-regs-only) + O2=$O2" --general-reg-only" + O0=$O0" --general-reg-only" + ;; + -f*) + ;; + -*) + option=$option"$1"" " + ;; + *.c) + src_file_path="$1" + ;; + *.mpl) + src_file_path="$1" + pre_process=false + ;; + *.s) + src_file_path="$1" + pre_process=false + compile=false + ;; + *.o) + obj_list=$obj_list"$1"" " + pre_process=false + compile=false + assemble=false + link=true + ;; + esac + shift +done + +# flags for clang +CLANG_COMMON_FLAGS="-U __SIZEOF_INT128__ $ISYSTEM_FLAGS" +CLANGFE_FLAGS="-cc1 ${std} -emit-llvm -triple aarch64-linux-gnu -D__clang__ -D__BLOCKS__ $CLANG_COMMON_FLAGS -fgnu89-inline" +CLANG2MPL_FLAGS="--target=aarch64-linux-elf -Wno-return-type -U__SIZEOF_INT128__" +CLANG_FLAGS="--target=aarch64 $CLANG_COMMON_FLAGS -emit-ast" + +function color_print() { + # default -S 1050 -F 0.01 + echo -e $1 #| lolcat -S 1050 -F 0.01 +} + +function excecute() { + outputFile=$target_name + echo "${@}" && echo "" + "${@}" 1>/dev/null 2>$outputFile || { + echo "" >> "$outputFile" && cat $outputFile + echo -e "FAILED:" "$@" | tee -a $outputFile + echo "" | tee -a "$outputFile" + echo -e "[whole compile cmd]: "$SELF_CMD >> "$outputFile" + echo "" >> "$outputFile" && echo "" >> "$outputFile" + exit 1; + } + echo "" +} + +function hir2mpl() { + # generate .ast + color_print "Starting ast for $src_file_path:" + excecute $TOOLS_BIN/clang $CLANG_FLAGS $option $src_file_path -o $file_name.ast + # generate .mpl + color_print "Starting hir2mpl for $src_file_path:" + excecute $MAPLE_BIN/hir2mpl $file_name.ast --enable-variable-array +} + +function clangfe() { + # generate .B + color_print "Starting clangfe for $src_file_path:" + excecute $OPEN64BIN/clangfe $CLANGFE_FLAGS $option $src_file_path + # generate .mpl + color_print "Starting whirl2mpl for $src_file_path:" + excecute $OPEN64BIN/whirl2mpl $WHIRL_OPT $file_name.B +} + +function clang2mpl() { + # generate .mpl + color_print "Starting clang2mpl for $src_file_path:" + excecute $MAPLE_BIN/clang2mpl --ascii "$src_file_path" -- $CLANG2MPL_FLAGS $option +} + +function c_to_mpl() { + [ "$use_clang2mpl" == "true" ] && clang2mpl && return + [ "$use_hir2mpl" == "true" ] && hir2mpl && return + clangfe +} + +function maple_to_asm() { + color_print "Starting maplecomb for $src_file_path:" + run_exe=mplcg + maple_option=$O0 + [ "$use_O2" == "true" ] && run_exe="me:mpl2mpl:mplcg" && maple_option=$O2 + excecute $MAPLE_BIN/maple "--run=$run_exe" "--option=$maple_option" "${file_name}.${suffix}" --save-temps +} + +function asm_to_obj() { + color_print "Starting asm for $src_file_path:" + excecute $GCC -std=c99 $option -c "${file_name}.s" $out + echo "" +} + +function init() { + src_file_path=$(realpath $src_file_path) + target_name=${src_file_path##*/}".err" + file_name=${src_file_path%\.*} +} + +function c_to_asm() { + init + # generate .mpl + c_to_mpl + # generate .s + maple_to_asm + # generate .o + asm_to_obj +} + +function link() { + target_name=${out##* }".err" + echo -e $obj_list > obj.list + echo -e $GCC ${std} $option $obj_list $out > link.cmd + color_print "Starting Link ${out##* }:" + excecute $GCC ${std} $obj_list $option $out +} + +function asm_list() { + cat $src_list | grep "\.c" | while read line + do + src_file_path=`echo ${line} | awk '{print $NF}'` + c_to_asm + done +} + +init +[ "$pre_process" == "true" ] && c_to_mpl +[ "$compile" == "true" ] && maple_to_asm +[ "$assemble" == "true" ] && asm_to_obj +[ "$link" == "true" ] && link + +rm $target_name +exit 0 diff --git a/build/tools/common/replace_func.py b/build/tools/common/replace_func.py new file mode 100755 index 0000000000000000000000000000000000000000..32dd095547c529cbb898068a4649618a5230f379 --- /dev/null +++ b/build/tools/common/replace_func.py @@ -0,0 +1,151 @@ +#!/usr/bin/python3 +import sys +import os +import re + +funclist=[] +funcMap={} +buffer='' + +class AsmFunc: + funcName = '' + start = 0 + end = 0 + funcId = 0 + totalLine = 0 + realLine = 0 + blankLine = 0 + varRegLine = 0 + aliasLine = 0 + commentLine = 0 + funcInfo = 7 + locLine = 0 + otherLine = 0 + source = '' + locList =[] + + def __str__(self): + return '\n'.join(['%s:%s' % item for item in self.__dict__.items()]) + def calc(self): + self.totalLine = self.end - self.start - 1 + self.realLine = self.totalLine - 1 - self.funcInfo - self.blankLine - self.varRegLine - self.aliasLine - self.locLine - self.commentLine + + +def binarySearch (arr, l, r, x): + while l <= r: + mid = int(l + (r - l)/2) + # 元素整好的中间位置 + if arr[mid] == x: + return mid + # 元素小于中间位置的元素,只需要再比较左边的元素 + elif arr[mid] > x: + r = mid-1 + # 元素大于中间位置的元素,只需要再比较右边的元素 + else: + l = mid+1 + # 不存在 + return -1 + +def readFunclist(file): + global funclist + with open(file, "r") as f: + for func in f: + funclist.append(func.strip()) + funclist.sort() + +def isMatch(name): + global funclist + index = binarySearch(funclist, 0, len(funclist) - 1, name) + if index != -1: + return True + return False + +def exportToFile(list, file): + f = open(file, "a+") + i = 0 + while i < len(list): + f.write(list[i].source) + i += 1 + f.close() + +def processAsm(mpl): + global funcMap + with open(mpl, "r") as f: + infunc = False + match = False + flag = False + curfunc = AsmFunc() + for ori_line in f: + line = ori_line.strip() + if not match: + if line.endswith("%function"): + funcName = line.split()[1][:-1] + match=isMatch(funcName) + if match: + func = AsmFunc() + func.funcName = funcName + funcMap[funcName] = func + curfunc = func + continue + else: + if not infunc: + if line == curfunc.funcName + ":": + infunc = True + curfunc.source += ori_line + else: + curfunc.source += ori_line + if line.endswith(curfunc.funcName + ", .-" + curfunc.funcName): + infunc = False + match = False + +def repaceGood(mpl): + global funcMap + with open(mpl, "r") as f: + infunc = False + match = False + curfunc = AsmFunc() + for ori_line in f: + line = ori_line.strip() + if not match: + print(ori_line, end='') + if line.endswith("%function"): + funcName = line.split()[1][:-1] + match=isMatch(funcName) + if match: + func = AsmFunc() + func.funcName = funcName + curfunc = func + continue + else: + if not infunc: + if line == curfunc.funcName + ":": + print(funcMap[curfunc.funcName].source, end='') + infunc = True + else: + print(ori_line, end='') + else: + if line.endswith(curfunc.funcName + ", .-" + curfunc.funcName): + infunc = False + match = False + +def main(): + if len(sys.argv) < 4 : + print("args count less than 4") + return + global file1 + good = sys.argv[1] + bad = sys.argv[2] + readFunclist(sys.argv[3]) + if os.path.isfile(bad): + processAsm(bad) + else: + print("file not found") + if os.path.isfile(good): + repaceGood(good) + else: + print("file not found") + + +if __name__ == "__main__": + main() + diff --git a/build/tools/spec/bis.sh b/build/tools/spec/bis.sh new file mode 120000 index 0000000000000000000000000000000000000000..5eaf823ba64478e16393288c6c6e72c1b9269495 --- /dev/null +++ b/build/tools/spec/bis.sh @@ -0,0 +1 @@ +../common/bis.sh \ No newline at end of file diff --git a/build/tools/spec/bisfunc.sh b/build/tools/spec/bisfunc.sh new file mode 120000 index 0000000000000000000000000000000000000000..49fcfec73350cbd8807e54fbcc38dcf05a97e2cd --- /dev/null +++ b/build/tools/spec/bisfunc.sh @@ -0,0 +1 @@ +../common/bisfunc.sh \ No newline at end of file diff --git a/build/tools/spec/clang2mpl.cfg b/build/tools/spec/clang2mpl.cfg new file mode 100644 index 0000000000000000000000000000000000000000..76d8c154208451f5e42ea9817a237ba0f7c12c9d --- /dev/null +++ b/build/tools/spec/clang2mpl.cfg @@ -0,0 +1,687 @@ +#------------------------------------------------------------------------------ +# SPEC CPU2017 config file for: gcc / g++ / gfortran on Linux x86 +#------------------------------------------------------------------------------ +# +# Usage: (1) Copy this to a new name +# cd $SPEC/config +# cp Example-x.cfg myname.cfg +# (2) Change items that are marked 'EDIT' (search for it) +# +# SPEC tested this config file with: +# Compiler version(s): 4.4.7, 4.9.2, 5.2.0, 6.3.0, 7.2.1, 8.1.0 +# Operating system(s): Oracle Linux Server 6.5 and 7.4 / +# Red Hat Enterprise Linux Server 6.5 and 7.4 +# Hardware: Xeon +# +# If your system differs, this config file might not work. +# You might find a better config file at http://www.spec.org/cpu2017/results +# +# Known Limitations with GCC 4 +# +# (1) Possible problem: compile time messages +# error: unrecognized command line option '...' +# Recommendation: Use a newer version of the compiler. +# If that is not possible, remove the unrecognized +# option from this config file. +# +# (2) Possible problem: run time errors messages +# 527.cam4_r or 627.cam4_s *** Miscompare of cam4_validate.txt +# Recommendation: Use a newer version of the compiler. +# If that is not possible, try reducing the optimization. +# +# +# Compiler issues: Contact your compiler vendor, not SPEC. +# For SPEC help: http://www.spec.org/cpu2017/Docs/techsupport.html +#------------------------------------------------------------------------------ + + +#--------- Label -------------------------------------------------------------- +# Arbitrary string to tag binaries (no spaces allowed) +# Two Suggestions: # (1) EDIT this label as you try new ideas. +%define label maplec # (2) Use a label meaningful to *you*. + + +#--------- Preprocessor ------------------------------------------------------- +%ifndef %{bits} # EDIT to control 32 or 64 bit compilation. Or, +% define bits 64 # you can set it on the command line using: +%endif # 'runcpu --define bits=nn' + +%ifndef %{build_ncpus} # EDIT to adjust number of simultaneous compiles. +% define build_ncpus 8 # Or, you can set it on the command line: +%endif # 'runcpu --define build_ncpus=nn' + +# Don't change this part. +%define os LINUX +%if %{bits} == 64 +% define model -m64 +%elif %{bits} == 32 +% define model -m32 +%else +% error Please define number of bits - see instructions in config file +%endif +%if %{label} =~ m/ / +% error Your label "%{label}" contains spaces. Please try underscores instead. +%endif +%if %{label} !~ m/^[a-zA-Z0-9._-]+$/ +% error Illegal character in label "%{label}". Please use only alphanumerics, underscore, hyphen, and period. +%endif + + + +#--------- Global Settings ---------------------------------------------------- +# For info, see: +# https://www.spec.org/cpu2017/Docs/config.html#fieldname +# Example: https://www.spec.org/cpu2017/Docs/config.html#tune + +command_add_redirect = 1 +flagsurl = $[top]/config/flags/gcc.xml +ignore_errors = 1 +iterations = 1 +label = %{label}-m%{bits} +line_width = 1020 +log_line_width = 1020 +makeflags = --jobs=%{build_ncpus} +mean_anyway = 1 +output_format = txt,html,cfg,pdf,csv +preenv = 1 +reportable = 0 +tune = base + + +#--------- How Many CPUs? ----------------------------------------------------- +# Both SPECrate and SPECspeed can test multiple chips / cores / hw threads +# - For SPECrate, you set the number of copies. +# - For SPECspeed, you set the number of threads. +# See: https://www.spec.org/cpu2017/Docs/system-requirements.html#MultipleCPUs +# +# q. How many should I set? +# a. Unknown, you will have to try it and see! +# +# To get you started, some suggestions: +# +# copies - This config file defaults to testing only 1 copy. You might +# try changing it to match the number of cores on your system, +# or perhaps the number of virtual CPUs as reported by: +# grep -c processor /proc/cpuinfo +# Be sure you have enough memory. See: +# https://www.spec.org/cpu2017/Docs/system-requirements.html#memory +# +# threads - This config file sets a starting point. You could try raising +# it. A higher thread count is much more likely to be useful for +# fpspeed than for intspeed. +# +intrate,fprate: + copies = 1 # EDIT to change number of copies (see above) +intspeed,fpspeed: + threads = 4 # EDIT to change number of OpenMP threads (see above) + +%ifndef %{gcc_dir} +% define gcc_dir /gcc_dir-is-not-used/tools/gcc-linaro-7.5.0 # EDIT (see above) +%endif + +#------- Compilers ------------------------------------------------------------ +default: +# EDIT: The parent directory for your compiler. +# Do not include the trailing /bin/ +# Do not include a trailing slash +# Examples: +# 1 On a Red Hat system, you said +# 'yum install devtoolset-7' +# Use: % define gcc_dir /opt/rh/devtoolset-7/root/usr +# +# 2 You built GCC in: /disk1/mybuild/gcc-8.1.0/bin/gcc +# Use: % define gcc_dir /disk1/mybuild/gcc-8.1.0 +# +# 3 You want: /usr/bin/gcc +# Use: % define gcc_dir /usr +# WARNING: See section +# "Known Limitations with GCC 4" +# + LINARO = $MAPLE_ROOT/tools/gcc-linaro-7.5.0 + CC = $(MAPLE_ROOT)/build/tools/spec/maplec -clang2mpl + CXX = $(LINARO)/bin/aarch64-linux-gnu-g++ -std=c++03 + FC = $(LINARO)/bin/aarch64-linux-gnu-gfortran + # How to say "Show me your version, please" + CC_VERSION_OPTION = -v + CXX_VERSION_OPTION = -v + FC_VERSION_OPTION = -v + + QEMU_RUN = $MAPLE_ROOT/tools/bin/qemu-aarch64 -L $LINARO/aarch64-linux-gnu/libc + +default: +%if %{bits} == 64 + sw_base_ptrsize = 64-bit + sw_peak_ptrsize = 64-bit +%else + sw_base_ptrsize = 32-bit + sw_peak_ptrsize = 32-bit +%endif + + +#--------- Portability -------------------------------------------------------- +default: # data model applies to all benchmarks +%if %{bits} == 32 + # Strongly recommended because at run-time, operations using modern file + # systems may fail spectacularly and frequently (or, worse, quietly and + # randomly) if a program does not accommodate 64-bit metadata. + EXTRA_PORTABILITY = -D_FILE_OFFSET_BITS=64 +%else + EXTRA_PORTABILITY = -DSPEC_LP64 +%endif + +# Benchmark-specific portability (ordered by last 2 digits of bmark number) + +500.perlbench_r,600.perlbench_s: #lang='C' +%if %{bits} == 32 +% define suffix IA32 +%else +% define suffix X64 +%endif + PORTABILITY = -DSPEC_%{os}_%{suffix} + +502.gcc_r: #lang='c' + CPORTABILITY = -DHAVE_ALLOCA_H + +521.wrf_r,621.wrf_s: #lang='F,C' + CPORTABILITY = -DSPEC_CASE_FLAG + FPORTABILITY = -fconvert=big-endian + +523.xalancbmk_r,623.xalancbmk_s: #lang='CXX' + PORTABILITY = -DSPEC_%{os} + +526.blender_r: #lang='CXX,C' + PORTABILITY = -funsigned-char -DSPEC_LINUX + +527.cam4_r,627.cam4_s: #lang='F,C' + PORTABILITY = -DSPEC_CASE_FLAG + +628.pop2_s: #lang='F,C' + CPORTABILITY = -DSPEC_CASE_FLAG + FPORTABILITY = -fconvert=big-endian + + +#-------- Tuning Flags common to Base and Peak -------------------------------- + +# +# Speed (OpenMP and Autopar allowed) +# +%if %{bits} == 32 + intspeed,fpspeed: + # + # Many of the speed benchmarks (6nn.benchmark_s) do not fit in 32 bits + # If you wish to run SPECint2017_speed or SPECfp2017_speed, please use + # + # runcpu --define bits=64 + # + fail_build = 1 +%else + intspeed,fpspeed: + EXTRA_OPTIMIZE = -fopenmp -lgomp -DSPEC_OPENMP + fpspeed: + # + # 627.cam4 needs a big stack; the preENV will apply it to all + # benchmarks in the set, as required by the rules. + # + preENV_OMP_STACKSIZE = 120M +%endif + + +#-------- Baseline Tuning Flags ---------------------------------------------- +# +# EDIT if needed -- Older GCC might not support some of the optimization +# switches here. See also 'About the -fno switches' below. +# +default=base: # flags for all base + OPTIMIZE = + +intrate,intspeed=base: # flags for integer base + EXTRA_COPTIMIZE = +# Notes about the above +# - 500.perlbench_r/600.perlbench_s needs -fno-strict-aliasing. +# - 502.gcc_r/602.gcc_s needs -fgnu89-inline or -z muldefs +# - For 'base', all benchmarks in a set must use the same options. +# - Therefore, all base benchmarks get the above. See: +# www.spec.org/cpu2017/Docs/runrules.html#BaseFlags +# www.spec.org/cpu2017/Docs/benchmarks/500.perlbench_r.html +# www.spec.org/cpu2017/Docs/benchmarks/502.gcc_r.html + + +#-------- Peak Tuning Flags ---------------------------------------------- +default=peak: + basepeak = yes # if you develop some peak tuning, remove this line. + # + # ----------------------- + # About the -fno switches + # ----------------------- + # + # For 'base', this config file (conservatively) disables some optimizations. + # You might want to try turning some of them back on, by creating a 'peak' + # section here, with individualized benchmark options: + # + # 500.perlbench_r=peak: + # OPTIMIZE = this + # 502.gcc_r=peak: + # OPTIMIZE = that + # 503.bwaves_r=peak: + # OPTIMIZE = other .....(and so forth) + # + # If you try it: + # - You must remove the 'basepeak' option, above. + # - You will need time and patience, to diagnose and avoid any errors. + # - perlbench is unlikely to work with strict aliasing + # - Some floating point benchmarks may get wrong answers, depending on: + # the particular chip + # the version of GCC + # other optimizations enabled + # -m32 vs. -m64 + # - See: http://www.spec.org/cpu2017/Docs/config.html + # - and: http://www.spec.org/cpu2017/Docs/runrules.html + + +#------------------------------------------------------------------------------ +# Tester and System Descriptions - EDIT all sections below this point +#------------------------------------------------------------------------------ +# For info about any field, see +# https://www.spec.org/cpu2017/Docs/config.html#fieldname +# Example: https://www.spec.org/cpu2017/Docs/config.html#hw_memory +#------------------------------------------------------------------------------- + +#--------- EDIT to match your version ----------------------------------------- +default: + sw_compiler001 = C/C++/Fortran: Version 7.2.1 of GCC, the + sw_compiler002 = GNU Compiler Collection + +#--------- EDIT info about you ------------------------------------------------ +# To understand the difference between hw_vendor/sponsor/tester, see: +# https://www.spec.org/cpu2017/Docs/config.html#test_sponsor +intrate,intspeed,fprate,fpspeed: # Important: keep this line + hw_vendor = My Corporation + tester = My Corporation + test_sponsor = My Corporation + license_num = nnn (Your SPEC license number) +# prepared_by = # Ima Pseudonym # Whatever you like: is never output + + +#--------- EDIT system availability dates ------------------------------------- +intrate,intspeed,fprate,fpspeed: # Important: keep this line + # Example # Brief info about field + hw_avail = # Nov-2099 # Date of LAST hardware component to ship + sw_avail = # Nov-2099 # Date of LAST software component to ship + +#--------- EDIT system information -------------------------------------------- +intrate,intspeed,fprate,fpspeed: # Important: keep this line + # Example # Brief info about field +# hw_cpu_name = # Intel Xeon E9-9999 v9 # chip name + hw_cpu_nominal_mhz = # 9999 # Nominal chip frequency, in MHz + hw_cpu_max_mhz = # 9999 # Max chip frequency, in MHz +# hw_disk = # 9 x 9 TB SATA III 9999 RPM # Size, type, other perf-relevant info + hw_model = # TurboBlaster 3000 # system model name +# hw_nchips = # 99 # number chips enabled + hw_ncores = # 9999 # number cores enabled + hw_ncpuorder = # 1-9 chips # Ordering options + hw_nthreadspercore = # 9 # number threads enabled per core + hw_other = # TurboNUMA Router 10 Gb # Other perf-relevant hw, or "None" + +# hw_memory001 = # 999 GB (99 x 9 GB 2Rx4 PC4-2133P-R, # The 'PCn-etc' is from the JEDEC +# hw_memory002 = # running at 1600 MHz) # label on the DIMM. + + hw_pcache = # 99 KB I + 99 KB D on chip per core # Primary cache size, type, location + hw_scache = # 99 KB I+D on chip per 9 cores # Second cache or "None" + hw_tcache = # 9 MB I+D on chip per chip # Third cache or "None" + hw_ocache = # 9 GB I+D off chip per system board # Other cache or "None" + + fw_bios = # American Megatrends 39030100 02/29/2016 # Firmware information +# sw_file = # ext99 # File system +# sw_os001 = # Linux Sailboat # Operating system +# sw_os002 = # Distribution 7.2 SP1 # and version + sw_other = # TurboHeap Library V8.1 # Other perf-relevant sw, or "None" +# sw_state = # Run level 99 # Software state. + +# Note: Some commented-out fields above are automatically set to preliminary +# values by sysinfo +# https://www.spec.org/cpu2017/Docs/config.html#sysinfo +# Uncomment lines for which you already know a better answer than sysinfo + +__HASH__ +500.perlbench_r=base=maple_test-m64: +# Last updated 2021-05-10 10:36:40 +opthash=1b55f44e78c515229834a282ce9d3013fee214077130f5cc6c8d469cce53274c +baggage= +compiler_version=\ +@eNp9VE1v2zAMvftX+NYNteK26ZIigA9Z6hYD0qZI3e0YyBLtqJUlTx+J08N++2Qnjr202MWC+Cjy\ +kXy0P5utfsbL5x+Lx9XiKXHHxHvRTOR+ahk3iAlfl0D0wJst5vN4lqzuZ7MotFqFKRNhTsgRmCeL\ +1a/l9OkpXu4dOEtrh7C6Ga1G14gzYSuUCxuOQ24k2ipclqC8xd3dfDG9XSXT5X2crB6nD/FzJDal\ +qZCQAk7x2/hu+jJPoksvwSoHM/FP43szKTKWWwXU3zKznviDQagVCUlr99HGR6jGUPmWb0BpJkV0\ +9pJaYaw/HnwbXKChbW6Xfy5vBhfXZ61/al0AHmWMwyQMmzr1GisIqSR1tWgcLuPp7UM8+G5z7V6B\ +wCkHxLHILc5BRyTAFAfk/DzIZZAqlgc0yKQyCotApq+k/iAHu7elgoxVTTfb/HWKAr9KhQ60kRR8\ +1/jKXOECaZvVb9C4ZzvEOW0U6ug1NdAeXSbeQKFaBBSxGnDThAoIZeo43QMnaWudEG4pUOQmYqAy\ +XSSzVoCpjkqpWbWPcxLj4Ch4r12plEa7lpSdiXBJMIfIEe/zTLWhpKoQBTeazwDDCoh2oNsOUsiw\ +ddLuPHDKIgHb7nHdGyvYbwvIDQNIXQ5lusE2Zn+AYtnun3xFWXX3ktvcLc/x3mYtGbRE9E4bKNB7\ +10lkGkmfIIcItTbc+CNsjeyshQvKsCLrHsUtKCWPiqlBNLyK2OhmdLS5iovRdXttgrhkrgptomJ4\ +FTgwKKrh1UmiPlUr3CxAuD6QHsks4xLTQyG6t8c9sRBLMaKKuR72xrsG8uZ+PJECDljX/o36PqjW\ +AWvpaH5i32f9gHhJo0G/kBT4xG+U6LlF8g8rtN94/8t/9v+r7/0FuibFEg== +compile_options=\ +@eNrVU21r2zAQ/u5fIfxdccdGWUJdsB0l8eZEwrIh2xfhOkqqLZaD5IS1v34n56UtDFpGv1RgfLq3\ +R3fP3aLVuKl+y7XaStTuOtVqO/JsZ1TdCbPXK2XEQRq1fgj9T74HogWX0L8aDK+ufc9L6JyNkB/s\ +rQnulA42dY3cwbZbhfVw6MTm+gvCNcLtGWDQIjzmjCTwW4xJXE5BYCTPREJzgnA6gG+lbBekFKR6\ +V+mgUI3EM5VLe9bMinmGWWWsNKCSf7rASBDsTtam0itIOaZlnBHBs4jPCBcOMY2y8OqELqKyoIKX\ +jOWEc0EZWcwZ2EQW5VMiJmlG+OXqboLTMk+I0/VXOplwUog4LXgINSK8QZh+hoIrU9+HuurUAV60\ +1i3ea1utJRi6e+ya0KjHqm8FOto7IyXetu0OH2TdtUY9SnQ6p7dm6aJciqWDOcUcScLVVlVWaYBe\ +b/T+6xArvVVaPotkLujGtntTy1vfS0YoScJXKTs60vgbZUXov+DP94DzhJWTLJpysL3ksrfGZJHM\ +xMXhw5Pre3SEoBPpPP1JoKL3ZLpvGKN5EcVplhY/zh194rxHJ8sij6CJT494fQr61MfAfwIwlxtg\ +vr9lh4/Kd5/x/x3wm/buFySyt85/26DLwlzmFKF+iLPxW6b9g/IL9c2jYga5Yrdp2+ZUMi3d1j5b\ +2b8rRMvz +exehash=1c69320bf26365e65e5b19b87fd09bbfdf73b3d6257845446d6001048f92fd40 + +619.lbm_s=base=maplec-m64: +# Last updated 2021-05-12 16:29:02 +opthash=7affc07e1d7c5ca4633093daa05c20102ce1dec866120559a921160cddd58e76 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNqVUG1rgzAY/J5fEfLd2kEZTGpBY2ZdowlW2csXaV1kbmpE7WD/ftG+Ci1sIZCH3OVyd4GstHLz\ +JbK8EFDWXS6r1gBt1+RplzS76j1vkm/R5NmPie4QUGOrKCaaTh6mMwQAZj43INI/ZCn0dlfp27zS\ +y01diBRqasuj6ERCzVlzgtUROMSOXTVQK3RJ8szCFWWWAy+XlslaVGV9eJUwTgKfD8j+gvL7GYTz\ +Vu6aVCwQwAbE2LzqZA8y+4nxyEQjWwgo+5jHj9Ry1wobWxxQmwR4mZwIY88IMAOSlyi0lMPI8703\ +okhXvQ9ieypnYWTZHvWi1+OfQx4EqBesbvX5p37mcvsp0q5d9JyiPPNPkSEc+qDOrbL+kUjp+Fa0\ +TKhn9+UU5UGaxX3RFy3/AtPTsPg= +exehash=4796f3d722b9339855ae459928ddcee0650c458340a971fc905fa5338976c68c + +505.mcf_r=base=maplec-m64: +# Last updated 2021-05-12 17:04:02 +opthash=cd00cc44466c7c00fcb0a07a84b8bdec510af969b5da706b722633229c2d2efc +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNqNkF1PgzAYhe/7K5reM2aiJpKxBApuaKENH4leNRt2EQWKFEz89xYEdYsa3zRpk3PanvNEsjaq\ +3bM4FKWAsukKWSsLqK4t8o63ff1QtPxVtMXhzUZnCOij0hYbLRdXywsEAKYhsyAyH2UlTNXX5r6o\ +zWrXlCKHhl5yfnQhoeElzMd6izzfzTYQGoFqRM5flGy7SeVOllKeZIzFfpJwyvwoZPBkJithl+cQ\ +rpTs21ysEcAWxNj+McuHSN0bylIbHQVDQBfALLsmzibR2nHIUXX9CG85nh3/TD1e9e/S2OGMxqnj\ +BiRI7+cfxvQIkCC6/Y3fyazk/knknVoPBMrqi8ZnFQjHnsT7A0LopFtOAncoUlaTn2YDlW9I3gG2\ +95r+ +exehash=806db0e68b5994ed49999cc86262dbea2e13c31853d3ba087eb2f180c940ee06 + +502.gcc_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=2839b1967320158040c55387a370c4661288143e5e404dc55ea5be68a750538d +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNq1kV9PgzAUxd/5FE3fGdM4E5exBLoOqqxtBhh9ajbWxSrQScHEby/g8M+ivnnTpjc5J7f9nVJd\ +2sXmSe5VLoE+1EqXZmqZulJZLaqm3KlKvMhK7V9deAattjWtxYXj0dV4Ai0LsRWfAug86EI6pimd\ +rSqdYnPIZQbsdulh6EgDexFzjNqDLrCfBsAmo247qszyZif73hxkJp6NruqjXUzG50PrpQkTccr5\ +GsexYBzTFW81QkWAurmhd4sFYnRJAhGC0zoOifjlBQAzo5sqk3NooSlAyP2R4F1k/jXjiQu/4UCr\ +xUY8XUZeELfad7Re9TFFoRgM/8ra34fvkrUnOFsnnk8iktwPz+qRoRURevPbV53UTG8fZVabeRdb\ +XnxG+MEPQB9OtPgjuZWXhCIifoefF0c/S7sov+T4BmRTt5k= +exehash=0 + +520.omnetpp_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=be6e4dd67b5187b3cefa0f68d2b9d0de0da20ab650fd3c3ee25c7e2e09cbff32 +baggage= +compiler_version=\ +*IENYWENfVkVSU0lPTl9PUFRJT046Ci9iaW4vc2g6IENDOiBjb21tYW5kIG5vdCBmb3VuZAo= +compile_options=\ +@eNq1UNFOgzAUfe9XNH1nzkRNJGMJlLqhHW2gjfjUTOgSFCgBZuLfW3E45rv3pTfnnt57zolN49T7\ +d30oKw1NO5Sm6V3QD12ZD6o7NkXZqQ/dlYdPD10jYNveUjy0XNwvbxEAmO24CxHG0MmhY6YVCwOd\ +MOXEwmEckkBuoBP1ZX2s9oPprlr7FLqdY7avTaEr++E5ElsVExHIiIYkOW1SvhRMpZLzhKSpYpzE\ +Ow7PdWJRfncD4ao3xy7XawSwC3GWeVbh1LPgkXHhoQvBCFgbmMsH6m9SO7sUP04DEuOtmgj/4WY8\ +QzKR+IqzRPhBRCPxMqkZrSFAo/jpJ/E/tTKvbzof+vUcPDuEcAyAhr9Z0JDJ7xxmIXwBBSCXiw== +exehash=0 + +523.xalancbmk_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=a5f92e3f29c9a9dc5714dc66815a936b3270df34bf15c51d6007f825c19a3bfd +baggage= +compiler_version=\ +*IENYWENfVkVSU0lPTl9PUFRJT046Ci9iaW4vc2g6IENDOiBjb21tYW5kIG5vdCBmb3VuZAo= +compile_options=\ +@eNrtU9FumzAUfecrLL8TuqmbtKip5ICX0tnYwmaiTxZ1aEVHcIVJ1f59HZhTsk1969ssIZ17z8G6\ +3HPITBfuql/1XdPWwDwOjensMrBD3+hB9ftu2/Tqqe6bu5cV/AQDB62TrODZ4tvZFxgEMaN8CWAc\ +g1CD0PgrFgaEieDYtZMswetiAxxCnKuMKXmVY5QI1ygRQZlKM4qpomKjCEMJzkGYLtzzXPe6tvoN\ +RVuzO62iZvfYzlq2ep5V+6FpI2rviam2bu4o7Wi9M/3LnxLZV53VZtJo0z0dBFVbdTpqOt3ut7Ub\ +lefsWpWUcJSLw4jHRiFT4suJFL4UqDyPZ8VnjxNGPfyJSJogyca33HWqENhvBAuBNtgvZdynQoVk\ +ShSc545VjOOMcnA4v3mSZkV50uBfzwG4sGbvPvkSBvESxGW5cpZ5zNbXjMsVPHEQBs7XmBffCdoI\ +x526ObJrnMVXyl1w1Pw3+GMMHtfNWS7ROiWpvPF+TG6PLC5ljtQ/NS4AMHDKH9OP+nb+isyFuX2o\ +9WAvwVx1zAQAY2RIMqXn/ZlGISsOuZqF6hW+xlPZ +exehash=0 + +525.x264_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=17555907b69668487c703e8ad30fdca935346ca79afa0bfdc1ae288920733ee3 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNrtkt9vgjAQx9/5K5q+I+rUbUZMAJmyIW34kcynBrFu3YCagsb990PEDc22V/dA06TX3l179+nX\ +4amchO90zWIK+CZnPM2GUpYLFuVEbNMVE2RHBVt/qLADpcLMihAVtlv37T6U0DEDrLkALAlf6C6M\ +2SrMKel3+0NJMtAcDwFUXnlClWybKkuWKkm4iWkE5GLy05stDuSJh02jWJyJqQdTIFvxikZ8RTIR\ +KSwtoq19d9A7bGumQve5CLP6ScSThKfVfUQLfES8AGPX9DyCsOnM8ZlPX/gmciemq7b3ne5Nrz+4\ +vQOXo0qw8aAHwCjjWxHRMZSMITAM9cf+jk6kPyLsq/CsWSgVUAwcPNja1Ct8542XXt10jBk5BVyR\ +RFmN+ey7GsHI9TXdsi1/cSq6BAIl23KefvvmizHiyzca5dn4ADVOvgF/0QGgRGdP/uA61/wZsS39\ +ACdOqngUHEDXKNfVWREUjSgbUf4bUZZMGkk2kryuJD8BghRXuw== +exehash=0 + +531.deepsjeng_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=c9d27d3cf5376f585e57a3f2a29ee65eb5a8ddb7a1042d47749d036bb8f42043 +baggage= +compiler_version=\ +*IENYWENfVkVSU0lPTl9PUFRJT046Ci9iaW4vc2g6IENDOiBjb21tYW5kIG5vdCBmb3VuZAo= +compile_options=\ +@eNqNkF9PgzAUxd/7KW76zjKTxUQylkBbJ1poQyFhT41il9Q/1AAz8dvb4ZDNJ+/T7T2nvb/T3LXB\ +++Or2ds3A+5jsK7tQ9QPnW0G3R3aZ9vpT9PZ/VeErzDybe8tEV4ubpYrjBARmQwBEwJBA4Gbnlg4\ +CKiSzI9pTllSbY/nLOZcZywTxe4k67gqhVaVlAVTSgvJ8kzCXCcXl9crgHXvDl1jNhiREEhdR37t\ +1IvkXsgywhcUGHk2IqtbHm+V1y6JRjVhObnTs+GfiONdVpdFrKUoyjhJeVruphUjL0Y8zR9+/uZP\ +rd3Ti2mGfnM+nLEBxlSc/gbkVFTHcGfJvgF3B3iC +exehash=0 + +541.leela_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=c715a7e7b712428302fadbd8205a3f5ff13d2a99dbcb03cfef4de14ebb0dbb76 +baggage= +compiler_version=\ +*IENYWENfVkVSU0lPTl9PUFRJT046Ci9iaW4vc2g6IENDOiBjb21tYW5kIG5vdCBmb3VuZAo= +compile_options=\ +@eNp9kE9LxDAQxe/5FEPuKSuoYNkutElcq9kktCnUU9CahfqnkbYr+O2NdevuenBOw5uXmd+L9B15\ +e3hx2/bVgX8fW98NMRrGvm1G2++6p7a3H65vt58JPsMotEOwJHgRXS0uMEJUbXQMmFIgDRA/r4g8\ +EFZqHmQmGc+qNZA82ms2rYyyZaV1wcvSKs3lRsOh9i6hL88BloPf9Y1bYURjoHWdhFtzr7JbpU2C\ +T05jFICorq5Fui7D7BRjmmZc0hs7G/7nmh7w2hSp1aowaZaL3NzPeydIjEQu735+4U8t/eOza8Zh\ +dSweWAGmKIL9phJMVd+JjuJ8ASVmcYc= +exehash=0 + +548.exchange2_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=7c95017cd71c6e94420e5cb70d3cc0a48a81eb16a8246cb031329e6470ab6a5c +baggage= +compiler_version=\ +*IEZDX1ZFUlNJT05fT1BUSU9OOgovYmluL3NoOiBmOTA6IGNvbW1hbmQgbm90IGZvdW5kCg== +compile_options=\ +@eNq9kEtPhDAUhff8iqZ7yJioCWSYhEcZ0Q5teCS6apQpSX3QhjJG/fUWBhmcuLab3vae2/OdZrK1\ +3x5feCNeOZCqF7LVnqX7TtQ96w7tXnTsnXei+fThBbRMqY3EhyvHXV1By0oo9QDUitdKATsuKIrM\ +lsUorLbTmQVVSVhRUZqjomCEomxHf3qYXl+CtZaHruYbYMu5dhqlxBffO437AS1jEtEqwcG28OGZ\ +zdgNURbdsF+CP31HMbov84BRkpdBmOK0fJhHBhxoRWQ3pGrcFbDrAWr6GEeCmc+oPJBE/qA61iS8\ +JbQ0Ty1H/hsdp9ndhH621vLpmde93iwvT6AAjClwfEqEY1INeRZhvgFuXpvb +exehash=0 + +557.xz_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=34d1fd17d5aa193e0a6a2c6e3f3472c24b4e41ed9de1aafdefcd23008f8b6564 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNrtk1tPwyAUx9/7KQjvXTcvUxdr0gtu1a6QXsz0hXQMHVrK0nZG/fSy6Uw7L59AAhz+nMNJzvmF\ +SJWmzJ/4vSg4UKtGqLIeGXVTCdbQal0uREWfeSXuX204gIY+1jrEhv3eWX8IDcPDUzIC0Foqya16\ +XVpzUVoyXxWcAVNPtUvaU8D0E4I8bSIfudn4U1MnSzF1b1OEYx/Fdv9lcHB4dDw8OdUBE+cGUQ9H\ +l8GYTuzB7skUTWmAd2p210mVZITEKEkoJiiaEmAGPb3qFWdUckmF2qhlbh5oy5SUqtSHQsyLN5lb\ ++Uq01GZryW/RbMnZU0vXQurKWxcLXjR5J2FLVHn5wJla8Arsj896QjI8AuC8VuuK8QtoeCPgefaP\ +3f5wYvcKk9SGndZDQyPySHYZOuNE+7oYtl4XRd6EdgL+ufBq2xs0S2OHEhynjhuEQXr71aENHmiE\ +QXT92xfYG+dq/shZU190WH+BAmBLMfT/QBz6ONsAbtF9B8AAJ6o= +exehash=0 + +999.specrand_ir=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=2b6f45155529ef0ee94c19b9445375b5551207252ee5fb82fdcb9fe4f6f19a4c +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNp9kFFLwzAUhd/zK0Leu04UwbIO2rTOamzC1oI+lS7LMLomJWkF/72xo2KLermQwDm5+c7NtfKa\ ++k0c5UlA3XZSKxsA2xnJu8r06iBN9S6MPH6E6AIBd7XOEqLl4mZ5iQDA9JEFEPkvuhG+7ZW/l8pv\ +6vYkOPRc63HoQkMv2bEUuyNP0rjcQC+zreCmVgeP66bRCs7r/KIi7PoKwpXVveFijQAOIMbhr5+e\ +RRrfU1aEaEKAgCPFrLwl0WbntCnNoMZpju+q0TDHGyzpU7GNKka3RRRnJCuex0kDJQIkyx/+Wsis\ +Vnr/Knhn15PI37wQDmFI8k9SktDyK+ePkJ/hnoeM +exehash=0 + +519.lbm_r=base=maplec-m64: +# Last updated 2021-05-12 17:29:14 +opthash=55e637b326098960c2f25abcffd8e22854b4464b6ae2619ebf92ce74d672f662 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNp9kF9PgzAUxd/7KZq+M2aiJpKxBApuaKENlESfmg27WAW68MfEb2/ZcMoyvWnSNufc9nduomur\ +2rzLnSol1PtO6bp1QNs1quhE09cvqhEfslG7TxddIWCOrbG4aD67m98gADCNmQOR/aorabd9bW9V\ +bVebfSkLaJmlvx+daWgFGQux2ZIg9PPVeBdezqnIcsbSMMsEZWESM3heo5ew22sIF63um0IuEcAO\ +xNi9+P1RpP4DZdxFExYEDDNm+T3xVpnRplwH1Q8TvBYTw0XQgzl84qknGE2550ck4s+nloEXARIl\ +j38N6awWevsmi65dDpnL6if/CR7CQzIS/BM79vhakMgf2Mtq9NN8mMOvIXwB6zCRCQ== +exehash=95e543602be6b0de89f9afd90aacb45d06bb7b32f0f2295d9571e3c9a893f901 + +511.povray_r=base=maplec-m64: +# Last updated 2021-05-19 14:10:44 +opthash=6681316d90fcef5a8ec9d57d1ec3590b7d32c00621a8f461b30ed5b709bb4dc3 +baggage= +compiler_version=\ +@eNq9VUtv2zAMvvtX6NYNqaKmj7QJ4MNgBNuAYCnabOstkGXGViNLmiQ3bg/77ZPtuPaaDANWrJc4\ +FB8f+YmkUHR3F62+zW5uPy++rBbXS/+ZBijaP/tquUxRXHDhMJfIamB2GESL+XwWLVcfoygkmcqB\ +PGY5yakWsFIapFWFYUB6f51SwpKUMSy4pEbhy+HF8ITEXBJKDcvG55WiKHEqC5wOBs8Q8+Vi9f3m\ +w/X17Ob1UMMhETyGElil30cmjalwCm8N1RpMsKQmBTdFe7ZBpOSap4WBBG25y6boqMnPsW2KK8YS\ +K+gDkK0yG6sp22lyugFsQAC1sBoRK6m2mXJ1xsOUu59d1vj0ZDQZjk4Ja6GO0O2n2Xwe1tXE1GYI\ +4woc55qF/wK/qi1JY08SsC7hhvDx1Rhr1hXbwazNW+CkuX4TGN9s1PYlkXgp4ZbGArx9nBfJWlDt\ +D0E2Z051gs1odf2NvyqqEWGiSCDBvmcclK4zleIZxz5aBzl+8uF7YPZe3GMoGWjHlbSdZ5VWIfmP\ +ArCK74H1gvqCNmAaejB/kbqvnZWlrzvrHNhk0hOEYlRA2BDyHLJ1TCAu0p5CybT+acvw7ioNpWpl\ +rUVP4nYntRnlhXC8KXkXcc1LzJTxPGF6cYavzi4ux5M/q8/PzkeTNnw1jSE1+cMVpp2LywzQxIZa\ +WV52xzV05XGoTMdzCB/hBeFc+sbxXON1IVl1IS1ww7W/QqOUe02P7kLY/S3U65PXgvxtEA6sQM8M\ +6/VIBmzjn4BwF7p3oXGVvTO/DQeVaUFTsCE7ZoPB8dpfn6HyuBmaOpnw0DBmyrqDCldv3/AQRdqA\ +75D/SU6wrNsJ5SoBMUV1UwV+S6MHMLZqifq1QO/m9cJG/jFEvaX9HgW/AHQUm+k= +compile_options=\ +@eNrtlFFPwjAQx9/3KZq+Lt1mRI2EkcA2ER1bw7YEn5pRCkyhxXYY+fZuAxTUxAcToskuWXrJXde7\ +37X/QHC0TJ/YNFswIFZ5JrhqaiqXGc2JXPNJJskLk9l0Y8MzqBWuKlJsaBnXVgNqmhMOcBNAcy6W\ +zNzMl+YyXS0YESvGlVhLyswDNxdiocwZpWiR8VQKdGVcGJY5zriZppLOLxtlYP2KZnyNZroOSkMq\ +n9hU161zgChAYl+lIQByI+w5xRK4XjfpAdSfSsFzxieFO04VKxaj+NSKUfKshMx3W0gniUMSJRgP\ +vSgiIfaCAQYftsvy8WUDgNa2+jbUnCZwRiP7NN3ujwu7dyGObXjUPNQK6g5ObvxOLypixyCqaNcL\ +nFuyT/gVmep/3igedggOh3Gn2/f78cP+2AoT/HoTykYrPvR0g/txfI79bYXb4H9B7feD+5M9uk/W\ +EuNHRnPVPgL+TguACqXvnvKV+G6YlGM7mFktTLUw1cJUC9OfE6Y3nDbadw== +exehash=0f4659ece5065638b9268e65a56d231636b83a05ae00d17df9bf1a2ae7112f8b + +538.imagick_r=base=maplec-m64: +# Last updated 2021-05-31 19:30:05 +opthash=54a10f343892c95817762d047800af2eccb783c0e3ff864fcef7d0ae46bd6135 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNrtkdFqgzAUhu/zFCH3agdlMKkFja51iyZohO0qtC5lbmqK0cHeftbqtpatDzAWAgk5fw7f/59Y\ +1Ua1eZW7opRQ7dtC1doGum2KvBVNVz8VjXiTTbF7d9AVAv1V9xIHzcyb2RwBgGnEbIisZ1VJS3e1\ +tS1qq9rsS5lDo99qamoqaPgpC3B/xH7gZStohOb4JtyMU5FmjCVBmgrKgjhi8HyNWsKu5xAutOqa\ +XC4RwDbE2PkR4Vik3h1l3EEnPAj03Jhlt8RdpX3tlG2oekGM12ISXIYdPgQPPHEFowl3vZCE/HHq\ +OzAjQML4/rewztZCbV9k3urlwXdZfWXwaQDCwR3xL1iPXL4WJPQO/GU16ml2yOJbEP9D/AND/ABB\ +Sw9g +exehash=facef3555a82b36cc7e8d92e422f6bffa9ea772e21800b4dd646cd8aa6e6eb36 + +500.perlbench_r=base=maplec-m64: +# Last updated 2021-06-02 10:59:30 +opthash=907b382deeffec61ac5b9a27df38c7435aa5d83bcabdd7e546b4199881462251 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNrVUtFqgzAUffcrgu/WDkZhZQ40TatbaoKJ0D0Fa1OWrWpJ7Nj+ftG1doWOPU+U3HvO8YZ77k2b\ +2quKN7lVOwmafaua2kwd02pVtkIf6o3S4l1qtf0M3BvXsaGxksAdj+7GE9dxIFnSKXD9l6aSvjnU\ +/lrVflXsd7IEnn2bU9FRA7wZowjaI52hKF/YgKIMC0gyBLxkZL+NMq2fEBuV+6L2uaqkF6tMmhMS\ +8yX2aKGN1BaSH62vpQ3MXpa6qDe25IzkEUaC4ZDFiInuxiTEwfh4uwhzTgTLKc0QY4JQlC6p5QQO\ +swUS8wQjNqRdJhjJM4g6rE/JfM4QF1HCWTC5BcNzLI+TNF+J1cCcYNoh96Y56FI+uA6cAgiDq7Z9\ +kyR6JJQH7oWHrmO9hjSf43DBLHfpZ89GKIWxGAT/3uC+KUoyHkYJTvjzqeuz0b0CrXgWiqs62kms\ ++um3Nf1jhvfN+lWWrXnoBLvqLB7GAkA/Mzz7baB/t2B/X4Y8tlDUzW1XHSuSvNuBHwvwBanrEMs= +exehash=51357adb393d1e80ce358878f264dc9f58399d0c7c71acd7c4834ae0d4c6d3e4 + +600.perlbench_s=base=maplec-m64: +# Last updated 2021-07-08 13:59:25 +opthash=4a435f7f3d851ba39439680b2f6c3962466eabafd26d686251cc537e642df2bf +baggage= +compiler_version=\ +@eNo9T0FugzAQvOcVfIC6h6qHSjmAWYIlB1s2VLlZBLaqVcCWoVWfX7ttcljtzOxoZzej1LyC0ky0\ +RsgutpcDeXcLkmuwI5Jl8DMS4XEtwgd1i7czBnL9tPNEdufmjWwexz/bmOWVlkBjayso+1MEEhQ3\ +VCjIcvYQa7LbTpiIaPTDSjq7YN5YhdtNabozz+UQNgxRwu+dBIwgpYRhneLKSvQlB6N5oRvQJiWy\ +gh8f48jwQp3A1IyDvtPEjBa9opC0XyrqWkNnStbp4/PT/92Gs7a/mEsS3lx8efG3iZDQnuXdJ5Pl\ +6/ADa+5hBw== +compile_options=\ +@eNrVUk2PmzAQvfMrLO6EVKp6iJaVwHGCWxNbYKR0LxYhjkoXMLJJ1f77DqTJ7kp72u1lEYj5eH4z\ +fjM70wdd9ahPTauRGcbG9G7ludE29ajsuT82Vv3Stjn9ifxPvgemAwjYi+Vy6Xse5plYIT/8YTod\ +HuCUDrtqaHXIB93H9hGbbgBqGx7OTXsMR2NaF7pB1xdYjQJ4zbXywqBgXQiC4bdbk6TcgiFIzhTm\ +OUEBXcB3bNwYUg5WPVR9KJtOB2mTa3eNpDJjgais0xZC+vcYWg3GVNVW/REo17xMGFEFi4uUFGqq\ +SGMWLSGlWJxvidpQRoqbO3mq4GWOyRSbXb7ZFESqhMoi+vIZ3Z7LBRSju3Kv9nMmOBlQoxuuOS7I\ +LhPPwWLC3TlztrW+9z28QhhHb1f1wsCTr1zIyH8hse/BvLAoNyzeFpB7KfecTcgOp+oG+BD6z40L\ +nss4oYzK79ebPc3B9/gKkb3MY9Bf0ow+EAC9OpmZ7AJ9lVJMbED87V2b/6aVuTOHn7oe3f2Eabsn\ +ituAEZqnz9bv2p//qia0k8UyhdPJtFFt969DXk7b+Ww1/wKvBF+R +exehash=d2d00cc9f02ff885cbbaa84d0ca4c454e358751556a1d406920eb0d884eddc13 + +602.gcc_s=base=maplec-m64: +# Last updated 2021-07-08 14:13:46 +opthash=2b044efddeb93a2f639d7b416bfe4d7dffc256240a57f0bb58e0d756048efe18 +baggage= +compiler_version=\ +@eNo1jVEPgiAAhN/9FfwBorXmQ2+GpGwFLMtXlkjLBUKo/f6w8uF2d9t3O4CxrMm5opxJLi7Rdgl6\ +OKtREzqlkb15oxH3us/CEzvrO6MDaqbOtGh0zgxo8Fr9MAVgXgmCo7Gc7K8FgHQ1C3W9MlOrv3nm\ +5WtwYfzjMl1vYqRMFnjelllNJObsQAtZAnh38dz6BeaCsJNY2lGkWwDfyQcMLD2L +compile_options=\ +@eNqlUdFOgzAUfe9XNH1nTGP2QDYT6BhUgZKNGfWl2VgXcUCxBRP/3gJDt8QHdU2b3vSec2/vOZEo\ +jWJz4Pss51BUdSZKZQFVyyytmWzKXSbZO5fZ/mOGrhDQodIQHY/G4zECANMwtiAyX0TBza1mcbPY\ +VDk3acVLWx6wKCpdWprbJst3Zi1ErkxV8bSHpdDQWwydRwIa81XsYn1Fc9dZe9Ago/aYWZnmzY53\ +cctnb0rI+ghnk/G1DknEPNxyffvBZZhGC+IxH54uYy/0x4pqINLYjcK4y/QPQTy5gXCqRCNTfosA\ +tiDGs/8P2Fegzh2Nkxk6mxYBLR2O14vA9lY6dz55l3XcCPtsAFwsBQLUgu5jsrT14AkJybOry/4o\ +Sde+h8Z0mdgOCUjyNPyykwmBgET3F7n/K2+mYvvK01rdtpi8+MZ/KQlhJ3Mwv8ioP2ijm4V24rOA\ +OK0xeXHsT9etyScOfwKElQZx +exehash=fbf53d23cd71f67cdb1656653f9f52d792b67aecc01a767deb20944e23e43b6a + +625.x264_s=base=maplec-m64: +# Last updated 2021-07-08 14:14:34 +opthash=d40a7799bcf65d95bfb5aa310bda263a121e6e86a0a7c7cbe07f1d48ebad6dd9 +baggage= +compiler_version=\ +@eNpNjkkKwjAYhfeeIhcIv0OtIrjoEKSgTahVcBXaNGIwaUJapce3ooKrN/DxeChJ+JkUx4zmnLJy\ +lM0EbtZIqL0SEkzltATqZBv5e2KNU1p6qB9KN9BbqzvonBQfTCCcHhlJRslTEp92CGe6kcI2vPMC\ +VDsC2TAPg3f8syCH3lfdfyOsMbb97vHoVFIeX0pCi5QU2+kwmy+CZbhaI3y14zXjfiRlJD+wX9qz\ +MED4OXkB41NIsA== +compile_options=\ +@eNrtU9FugjAUfecrmr5j1Tm3EDUBZNoNKFFM5l6IYs2YlJIWF/f3K6ibJnvYZvawhIakl97Te9tz\ +enye6WyxoeskpYDnRcIzaWiyEElcRGKbrRIRvVKRrN/6sAU1FUoFUXGj2WxCTbOJFxgAomfOKFqq\ +XRSxRZ5SRHKamWJjc5ar0gItt0m6QgXnqUQyp/EeFgNdffzYucGBPpwGjq0mf+hYsxHQcbqiMV9F\ +UsQoyRQa79rdTvl7EiK6K8RCnq7EnDGeHepF5iwkkTUPHTIZOpN+c9dqX3Wuuze34HToa66OzfLj\ +LhI4vhdUmf2CG3Q7APQk34qYDqBmG8C2+7+//r4Cse5JEPbhGRdQU8TawezONUdTlTvnpcpajm+P\ +oyPgj4mCGjGA8xhOTEVLiD385KimXxJWHW4PDcgkNC3s4nB+vENFItRc7D9c9HK+pVyPL19oXMhB\ +iUnZJ/6DZwAqEdzhRTL+gBvVzDPDceRiq5QtZYf+ZFY+gRP9a3PV5qrNVZurNldtrv9lrnet0yqr +exehash=49c2c4c7a717f1d6c11fb7bb9ebeeebca2e5dafe6288d3b0f1112ec42afbf549 + +605.mcf_s=base=maplec-m64: +# Last updated 2021-07-08 14:27:49 +opthash=5a9bcfb51f44556d7b0f1d91dc12b533c8271e4d495bdde702718e8c126d2e5c +baggage= +compiler_version=\ +@eNo1jdEKwiAYhe/3FL6A/BDRRXfLSQxKZW7dSjNHkqbp6vkz1q4O55wPPkSIutBOtpwpLvoS+wru\ +wRsYk9UG/DU6AzyaZ50eJPhonUkwvq27wRyCy5Cj0QumEcZ64xFupKCkBGvoYTgi3P4Y9cohzf9T\ +1UPPlRyE6KiUxUzZWSA8hSLycYXWeWknsdsi/Km+tk04/Q== +compile_options=\ +@eNqtUV1vgjAUfe+vuOk74pZlD0RNoDJlQ9oIJNteGsWaMYGyFpfs36+gRE32sGX2pTf3nvtxzolk\ +ZZWrndjmhQBZN7mstIN0o/Ks4WpfbXLFP4XKt19jfIORCbWBmHgwHA4xQoQumAPYfpOlsNemS9jl\ +qi6ETWtRuWpHZFmb0cpe7/NiYzdSFtrWtcgOsAwsK7stwTKB7PcPJFjTmPnEfNHU99IZgBW0TfxD\ +S9Ucq9xNE8rjlLGlH8ecMj9aMDh71laaI8q6x58Qx0TI7u8ARlruVSYmGBEHCBn/l8xhDvUeKUvG\ ++IIZRkYswtKH0J3FpnbJsqt6fkTmnPSIX9LGiDrgPydL1ySSYBG8+qb5R/7dlgOU0WXiekEYJC/9\ +MZ0mGIVB9HQFW+E3dozk+l1kjZ60mOLUdJINoNM0nF7Bmz/oZFYu3GTOw8BrrSjK4xU0bX09M/Ub\ +hGoAkA== +exehash=92eb0a99ad04eb8d3409b9a0638b2e87a9e31d34990aac67a88b4a3bcd2accb3 + diff --git a/build/tools/spec/cross-gcc.cfg b/build/tools/spec/cross-gcc.cfg new file mode 100644 index 0000000000000000000000000000000000000000..361d657c6ff5ca93312d99121afd3271bfdd4d67 --- /dev/null +++ b/build/tools/spec/cross-gcc.cfg @@ -0,0 +1,496 @@ +#------------------------------------------------------------------------------ +# SPEC CPU2017 config file for: gcc / g++ / gfortran on Linux x86 +#------------------------------------------------------------------------------ +# +# Usage: (1) Copy this to a new name +# cd $SPEC/config +# cp Example-x.cfg myname.cfg +# (2) Change items that are marked 'EDIT' (search for it) +# +# SPEC tested this config file with: +# Compiler version(s): 4.4.7, 4.9.2, 5.2.0, 6.3.0, 7.2.1, 8.1.0 +# Operating system(s): Oracle Linux Server 6.5 and 7.4 / +# Red Hat Enterprise Linux Server 6.5 and 7.4 +# Hardware: Xeon +# +# If your system differs, this config file might not work. +# You might find a better config file at http://www.spec.org/cpu2017/results +# +# Known Limitations with GCC 4 +# +# (1) Possible problem: compile time messages +# error: unrecognized command line option '...' +# Recommendation: Use a newer version of the compiler. +# If that is not possible, remove the unrecognized +# option from this config file. +# +# (2) Possible problem: run time errors messages +# 527.cam4_r or 627.cam4_s *** Miscompare of cam4_validate.txt +# Recommendation: Use a newer version of the compiler. +# If that is not possible, try reducing the optimization. +# +# +# Compiler issues: Contact your compiler vendor, not SPEC. +# For SPEC help: http://www.spec.org/cpu2017/Docs/techsupport.html +#------------------------------------------------------------------------------ + + +#--------- Label -------------------------------------------------------------- +# Arbitrary string to tag binaries (no spaces allowed) +# Two Suggestions: # (1) EDIT this label as you try new ideas. +%define label gcc # (2) Use a label meaningful to *you*. + + +#--------- Preprocessor ------------------------------------------------------- +%ifndef %{bits} # EDIT to control 32 or 64 bit compilation. Or, +% define bits 64 # you can set it on the command line using: +%endif # 'runcpu --define bits=nn' + +%ifndef %{build_ncpus} # EDIT to adjust number of simultaneous compiles. +% define build_ncpus 8 # Or, you can set it on the command line: +%endif # 'runcpu --define build_ncpus=nn' + +# Don't change this part. +%define os LINUX +%if %{bits} == 64 +% define model -m64 +%elif %{bits} == 32 +% define model -m32 +%else +% error Please define number of bits - see instructions in config file +%endif +%if %{label} =~ m/ / +% error Your label "%{label}" contains spaces. Please try underscores instead. +%endif +%if %{label} !~ m/^[a-zA-Z0-9._-]+$/ +% error Illegal character in label "%{label}". Please use only alphanumerics, underscore, hyphen, and period. +%endif + + + +#--------- Global Settings ---------------------------------------------------- +# For info, see: +# https://www.spec.org/cpu2017/Docs/config.html#fieldname +# Example: https://www.spec.org/cpu2017/Docs/config.html#tune + +command_add_redirect = 1 +flagsurl = $[top]/config/flags/gcc.xml +ignore_errors = 1 +iterations = 1 +label = %{label}-m%{bits} +line_width = 1020 +log_line_width = 1020 +makeflags = --jobs=%{build_ncpus} +mean_anyway = 1 +output_format = txt,html,cfg,pdf,csv +preenv = 1 +reportable = 0 +tune = base + + +#--------- How Many CPUs? ----------------------------------------------------- +# Both SPECrate and SPECspeed can test multiple chips / cores / hw threads +# - For SPECrate, you set the number of copies. +# - For SPECspeed, you set the number of threads. +# See: https://www.spec.org/cpu2017/Docs/system-requirements.html#MultipleCPUs +# +# q. How many should I set? +# a. Unknown, you will have to try it and see! +# +# To get you started, some suggestions: +# +# copies - This config file defaults to testing only 1 copy. You might +# try changing it to match the number of cores on your system, +# or perhaps the number of virtual CPUs as reported by: +# grep -c processor /proc/cpuinfo +# Be sure you have enough memory. See: +# https://www.spec.org/cpu2017/Docs/system-requirements.html#memory +# +# threads - This config file sets a starting point. You could try raising +# it. A higher thread count is much more likely to be useful for +# fpspeed than for intspeed. +# +intrate,fprate: + copies = 1 # EDIT to change number of copies (see above) +intspeed,fpspeed: + threads = 4 # EDIT to change number of OpenMP threads (see above) + + +#------- Compilers ------------------------------------------------------------ +default: +# EDIT: The parent directory for your compiler. +# Do not include the trailing /bin/ +# Do not include a trailing slash +# Examples: +# 1 On a Red Hat system, you said +# 'yum install devtoolset-7' +# Use: % define gcc_dir /opt/rh/devtoolset-7/root/usr +# +# 2 You built GCC in: /disk1/mybuild/gcc-8.1.0/bin/gcc +# Use: % define gcc_dir /disk1/mybuild/gcc-8.1.0 +# +# 3 You want: /usr/bin/gcc +# Use: % define gcc_dir /usr +# WARNING: See section +# "Known Limitations with GCC 4" +# +%ifndef %{gcc_dir} +% define gcc_dir /home/yhm/maple_opensource/opensource/tools/gcc-linaro-7.5.0 # EDIT (see above) +%endif + +# EDIT if needed: the preENV line adds library directories to the runtime +# path. You can adjust it, or add lines for other environment variables. +# See: https://www.spec.org/cpu2017/Docs/config.html#preenv +# and: https://gcc.gnu.org/onlinedocs/gcc/Environment-Variables.html + SPECLANG = %{gcc_dir}/bin/ + CC = $(SPECLANG)aarch64-linux-gnu-gcc -std=c99 + CXX = $(SPECLANG)aarch64-linux-gnu-g++ -std=c++03 + FC = /usr/bin/aarch64-linux-gnu-gfortran-7 + # How to say "Show me your version, please" + CC_VERSION_OPTION = -v + CXX_VERSION_OPTION = -v + FC_VERSION_OPTION = -v + +default: +%if %{bits} == 64 + sw_base_ptrsize = 64-bit + sw_peak_ptrsize = 64-bit +%else + sw_base_ptrsize = 32-bit + sw_peak_ptrsize = 32-bit +%endif + + +#--------- Portability -------------------------------------------------------- +default: # data model applies to all benchmarks +%if %{bits} == 32 + # Strongly recommended because at run-time, operations using modern file + # systems may fail spectacularly and frequently (or, worse, quietly and + # randomly) if a program does not accommodate 64-bit metadata. + EXTRA_PORTABILITY = -D_FILE_OFFSET_BITS=64 +%else + EXTRA_PORTABILITY = -DSPEC_LP64 +%endif + +# Benchmark-specific portability (ordered by last 2 digits of bmark number) + +500.perlbench_r,600.perlbench_s: #lang='C' +%if %{bits} == 32 +% define suffix IA32 +%else +% define suffix X64 +%endif + PORTABILITY = -DSPEC_%{os}_%{suffix} + +521.wrf_r,621.wrf_s: #lang='F,C' + CPORTABILITY = -DSPEC_CASE_FLAG + FPORTABILITY = -fconvert=big-endian + +523.xalancbmk_r,623.xalancbmk_s: #lang='CXX' + PORTABILITY = -DSPEC_%{os} + +526.blender_r: #lang='CXX,C' + PORTABILITY = -funsigned-char -DSPEC_LINUX + +527.cam4_r,627.cam4_s: #lang='F,C' + PORTABILITY = -DSPEC_CASE_FLAG + +628.pop2_s: #lang='F,C' + CPORTABILITY = -DSPEC_CASE_FLAG + FPORTABILITY = -fconvert=big-endian + + +#-------- Tuning Flags common to Base and Peak -------------------------------- + +# +# Speed (OpenMP and Autopar allowed) +# +%if %{bits} == 32 + intspeed,fpspeed: + # + # Many of the speed benchmarks (6nn.benchmark_s) do not fit in 32 bits + # If you wish to run SPECint2017_speed or SPECfp2017_speed, please use + # + # runcpu --define bits=64 + # + fail_build = 1 +%else + intspeed,fpspeed: + EXTRA_OPTIMIZE = -fopenmp -DSPEC_OPENMP + fpspeed: + # + # 627.cam4 needs a big stack; the preENV will apply it to all + # benchmarks in the set, as required by the rules. + # + preENV_OMP_STACKSIZE = 120M +%endif + + +#-------- Baseline Tuning Flags ---------------------------------------------- +# +# EDIT if needed -- Older GCC might not support some of the optimization +# switches here. See also 'About the -fno switches' below. +# +default=base: # flags for all base + OPTIMIZE = -g -O3 -fno-pie -fno-unsafe-math-optimizations -fno-tree-loop-vectorize + +intrate,intspeed=base: # flags for integer base + EXTRA_COPTIMIZE = -fno-strict-aliasing -fgnu89-inline +# Notes about the above +# - 500.perlbench_r/600.perlbench_s needs -fno-strict-aliasing. +# - 502.gcc_r/602.gcc_s needs -fgnu89-inline or -z muldefs +# - For 'base', all benchmarks in a set must use the same options. +# - Therefore, all base benchmarks get the above. See: +# www.spec.org/cpu2017/Docs/runrules.html#BaseFlags +# www.spec.org/cpu2017/Docs/benchmarks/500.perlbench_r.html +# www.spec.org/cpu2017/Docs/benchmarks/502.gcc_r.html + + +#-------- Peak Tuning Flags ---------------------------------------------- +default=peak: + basepeak = yes # if you develop some peak tuning, remove this line. + # + # ----------------------- + # About the -fno switches + # ----------------------- + # + # For 'base', this config file (conservatively) disables some optimizations. + # You might want to try turning some of them back on, by creating a 'peak' + # section here, with individualized benchmark options: + # + # 500.perlbench_r=peak: + # OPTIMIZE = this + # 502.gcc_r=peak: + # OPTIMIZE = that + # 503.bwaves_r=peak: + # OPTIMIZE = other .....(and so forth) + # + # If you try it: + # - You must remove the 'basepeak' option, above. + # - You will need time and patience, to diagnose and avoid any errors. + # - perlbench is unlikely to work with strict aliasing + # - Some floating point benchmarks may get wrong answers, depending on: + # the particular chip + # the version of GCC + # other optimizations enabled + # -m32 vs. -m64 + # - See: http://www.spec.org/cpu2017/Docs/config.html + # - and: http://www.spec.org/cpu2017/Docs/runrules.html + + +#------------------------------------------------------------------------------ +# Tester and System Descriptions - EDIT all sections below this point +#------------------------------------------------------------------------------ +# For info about any field, see +# https://www.spec.org/cpu2017/Docs/config.html#fieldname +# Example: https://www.spec.org/cpu2017/Docs/config.html#hw_memory +#------------------------------------------------------------------------------- + +#--------- EDIT to match your version ----------------------------------------- +default: + sw_compiler001 = C/C++/Fortran: Version 7.2.1 of GCC, the + sw_compiler002 = GNU Compiler Collection + +#--------- EDIT info about you ------------------------------------------------ +# To understand the difference between hw_vendor/sponsor/tester, see: +# https://www.spec.org/cpu2017/Docs/config.html#test_sponsor +intrate,intspeed,fprate,fpspeed: # Important: keep this line + hw_vendor = My Corporation + tester = My Corporation + test_sponsor = My Corporation + license_num = nnn (Your SPEC license number) +# prepared_by = # Ima Pseudonym # Whatever you like: is never output + + +#--------- EDIT system availability dates ------------------------------------- +intrate,intspeed,fprate,fpspeed: # Important: keep this line + # Example # Brief info about field + hw_avail = # Nov-2099 # Date of LAST hardware component to ship + sw_avail = # Nov-2099 # Date of LAST software component to ship + +#--------- EDIT system information -------------------------------------------- +intrate,intspeed,fprate,fpspeed: # Important: keep this line + # Example # Brief info about field +# hw_cpu_name = # Intel Xeon E9-9999 v9 # chip name + hw_cpu_nominal_mhz = # 9999 # Nominal chip frequency, in MHz + hw_cpu_max_mhz = # 9999 # Max chip frequency, in MHz +# hw_disk = # 9 x 9 TB SATA III 9999 RPM # Size, type, other perf-relevant info + hw_model = # TurboBlaster 3000 # system model name +# hw_nchips = # 99 # number chips enabled + hw_ncores = # 9999 # number cores enabled + hw_ncpuorder = # 1-9 chips # Ordering options + hw_nthreadspercore = # 9 # number threads enabled per core + hw_other = # TurboNUMA Router 10 Gb # Other perf-relevant hw, or "None" + +# hw_memory001 = # 999 GB (99 x 9 GB 2Rx4 PC4-2133P-R, # The 'PCn-etc' is from the JEDEC +# hw_memory002 = # running at 1600 MHz) # label on the DIMM. + + hw_pcache = # 99 KB I + 99 KB D on chip per core # Primary cache size, type, location + hw_scache = # 99 KB I+D on chip per 9 cores # Second cache or "None" + hw_tcache = # 9 MB I+D on chip per chip # Third cache or "None" + hw_ocache = # 9 GB I+D off chip per system board # Other cache or "None" + + fw_bios = # American Megatrends 39030100 02/29/2016 # Firmware information +# sw_file = # ext99 # File system +# sw_os001 = # Linux Sailboat # Operating system +# sw_os002 = # Distribution 7.2 SP1 # and version + sw_other = # TurboHeap Library V8.1 # Other perf-relevant sw, or "None" +# sw_state = # Run level 99 # Software state. + +# Note: Some commented-out fields above are automatically set to preliminary +# values by sysinfo +# https://www.spec.org/cpu2017/Docs/config.html#sysinfo +# Uncomment lines for which you already know a better answer than sysinfo + +__HASH__ +500.perlbench_r=base=maple_test-m64: +# Last updated 2021-05-10 10:36:40 +opthash=1b55f44e78c515229834a282ce9d3013fee214077130f5cc6c8d469cce53274c +baggage= +compiler_version=\ +@eNp9VE1v2zAMvftX+NYNteK26ZIigA9Z6hYD0qZI3e0YyBLtqJUlTx+J08N++2Qnjr202MWC+Cjy\ +kXy0P5utfsbL5x+Lx9XiKXHHxHvRTOR+ahk3iAlfl0D0wJst5vN4lqzuZ7MotFqFKRNhTsgRmCeL\ +1a/l9OkpXu4dOEtrh7C6Ga1G14gzYSuUCxuOQ24k2ipclqC8xd3dfDG9XSXT5X2crB6nD/FzJDal\ +qZCQAk7x2/hu+jJPoksvwSoHM/FP43szKTKWWwXU3zKznviDQagVCUlr99HGR6jGUPmWb0BpJkV0\ +9pJaYaw/HnwbXKChbW6Xfy5vBhfXZ61/al0AHmWMwyQMmzr1GisIqSR1tWgcLuPp7UM8+G5z7V6B\ +wCkHxLHILc5BRyTAFAfk/DzIZZAqlgc0yKQyCotApq+k/iAHu7elgoxVTTfb/HWKAr9KhQ60kRR8\ +1/jKXOECaZvVb9C4ZzvEOW0U6ug1NdAeXSbeQKFaBBSxGnDThAoIZeo43QMnaWudEG4pUOQmYqAy\ +XSSzVoCpjkqpWbWPcxLj4Ch4r12plEa7lpSdiXBJMIfIEe/zTLWhpKoQBTeazwDDCoh2oNsOUsiw\ +ddLuPHDKIgHb7nHdGyvYbwvIDQNIXQ5lusE2Zn+AYtnun3xFWXX3ktvcLc/x3mYtGbRE9E4bKNB7\ +10lkGkmfIIcItTbc+CNsjeyshQvKsCLrHsUtKCWPiqlBNLyK2OhmdLS5iovRdXttgrhkrgptomJ4\ +FTgwKKrh1UmiPlUr3CxAuD6QHsks4xLTQyG6t8c9sRBLMaKKuR72xrsG8uZ+PJECDljX/o36PqjW\ +AWvpaH5i32f9gHhJo0G/kBT4xG+U6LlF8g8rtN94/8t/9v+r7/0FuibFEg== +compile_options=\ +@eNrVU21r2zAQ/u5fIfxdccdGWUJdsB0l8eZEwrIh2xfhOkqqLZaD5IS1v34n56UtDFpGv1RgfLq3\ +R3fP3aLVuKl+y7XaStTuOtVqO/JsZ1TdCbPXK2XEQRq1fgj9T74HogWX0L8aDK+ufc9L6JyNkB/s\ +rQnulA42dY3cwbZbhfVw6MTm+gvCNcLtGWDQIjzmjCTwW4xJXE5BYCTPREJzgnA6gG+lbBekFKR6\ +V+mgUI3EM5VLe9bMinmGWWWsNKCSf7rASBDsTtam0itIOaZlnBHBs4jPCBcOMY2y8OqELqKyoIKX\ +jOWEc0EZWcwZ2EQW5VMiJmlG+OXqboLTMk+I0/VXOplwUog4LXgINSK8QZh+hoIrU9+HuurUAV60\ +1i3ea1utJRi6e+ya0KjHqm8FOto7IyXetu0OH2TdtUY9SnQ6p7dm6aJciqWDOcUcScLVVlVWaYBe\ +b/T+6xArvVVaPotkLujGtntTy1vfS0YoScJXKTs60vgbZUXov+DP94DzhJWTLJpysL3ksrfGZJHM\ +xMXhw5Pre3SEoBPpPP1JoKL3ZLpvGKN5EcVplhY/zh194rxHJ8sij6CJT494fQr61MfAfwIwlxtg\ +vr9lh4/Kd5/x/x3wm/buFySyt85/26DLwlzmFKF+iLPxW6b9g/IL9c2jYga5Yrdp2+ZUMi3d1j5b\ +2b8rRMvz +exehash=1c69320bf26365e65e5b19b87fd09bbfdf73b3d6257845446d6001048f92fd40 + +500.perlbench_r=base=gcc-m64: +# Last updated 2021-05-13 10:18:46 +opthash=4a1fc46ed4b7dc5c387a83a4c8727837cdc03d41f8bddf80c915370e9e6911bd +baggage= +compiler_version=\ +@eNq9VUtv2zAMvudX6NYNraKmj7QJ4MMQBNuAYAnabDsasszYamRJ06Nxd9hvn2zHtddmGLBuvcSh\ ++PjITySFZrP4y/zm9uPyU7xcrcNnOvhsucxQ4rlwmEtkNTA7HMyWi8V8to7fz2YRyVUBxHpJoPCc\ +FFQLiJUGaZU3DEjvr1NKWJIxhgWX1Ch8NbwcnpKES0KpYfn4olL4EmfS42D2iLNYL+OvN+9Wq/nN\ +P8IbDongCZTAKv1zeNKYCqfwzlCtwQzW1GTgpuiZ7WCm5IZn3kCKdtzlU3TUJOnYLsMVd6kV9B7I\ +Tpmt1ZTtNQXdAjYggFqIR8RKqm2uXJ3xMOPuR5c1PjsdTYajM8JaqCN0+2G+WER1NQm1OcK4AseF\ +ZtHfwMe1JWnsSQrWpdwQPr4eY826YjuYjXkNnKzQrwITOo7aviTSIKXc0kRAsE8Kn24E1eEQZHPm\ +VCfYnFbX3/grXw0LEz6FFIeecVC6zlSKRxz7YB0U+HsI3wOzd+IOQ8lAO66k7TyrtLzk3zxgldwB\ +6wUNBW3BNPRg/iT1UDsry1B33jmwyaQnCMWogKgh5DFk65hC4rOeQsms/mnLCO4qi6RqZa1FT+J2\ +L7UZFV443pS8j7jhJWbKBJ4wvTzH1+eXV+PJ79UX5xejSRu+msaImuL+GtPOxeUGaGojrSwvu+Ma\ +uvI4VKbjBUQP8IRwLkPjBK7xxktWXUgL3HAdrtAo5V7So/sQ9vkW6vXJS0H+NAgHVmBghvV6JAe2\ +DY9BtA/du9Ckyt6ZX4aDyszTDGzETtjx8ckmXJ+h8qQZmjqZ6NAw5sq6gwpXb9/oEEXaQOiQ/0nO\ +YF23EypUCmKK6qYahC2N7sHYqiXq1wK9WdQLG4VnEfWW9ls0+Alw/ZnQ +compile_options=\ +@eNrVVG1r2zAQ/u5fIfxdcca6bAl1wXaUxJsTCb9Ati/CdZREmy0ZyS5df/1kO0lTGAxGKVRgfKd7\ +eaS757SRAlb5L7bnJQOybrgUembpRvGioaoVO67oA1N8/9u1P9iWEbVxce3xaDqe2JYV4DWZAds5\ +yoo5uhUOq1ruVHldMiprJrRsVcGcK7GRstTOoShgyUWuJPw8+jQaO/dcOHmuiuPkpjO0j/AgWmjc\ +QLegbnZuMZ0CWAAozwcdSQDnCUGB+W3myM+WRiAojmiAYwRgODLfjuvGCbGRijoXTsorBlc8Zvq8\ +s0rXESS50kyZLfbYOIoZQdesULnYmZRznPkRoknkJSuU0A4x9CJ3fEKnXpZimmSExChJKCZosybG\ +RiMvXiK6CCOUXNROownO4gB1e72KF4sEpdQP08Sd3JjbHgDEHwHcCwlrzgahFTrfM9Os5gi7+1f8\ +Ke+rAAZ7oxiDpZQ1fGBFIxV/YuC0TseMwk22pdsO4RQz9BnmJc81FwZ1b4r+ZQq5MC1gV5GkC7od\ +OnhnW8EMBIH7Zl0fALH/FZPUtV9QwLYM/QKSLSJvmRjbSzr0Vh9tghW9OLx7ftgWngFTiXAd/kDm\ +Rq9Elr5WBMep54dRmH4/F/OZNj0w2qaxZ+r3jP9vIvWph8C/ApAut4H59pYvyaC95qT975jdyvuf\ +JpG+6/zLClzG9sJyAPoRiOZvOXPvj2WmRGsvXZlcfjfqZXWqGs66Z+PqzfgDFjQx9g== +exehash=6baceb9eb267d444c46c4c9f978e50063408522981011f9ac78e7db729fc8926 + +519.lbm_r=base=gcc-m64: +# Last updated 2021-05-13 10:35:31 +opthash=d0595b41acba249b4b15e2950cf3b949e9504a149adac9800b3de173849eacd9 +baggage= +compiler_version=\ +@eNq9VUtv2zAMvudX6NYNraKmj7QJ4MMQBNuAYAnabDsasszYamRJ06Nxd9hvn2zHtddmGLBuvcSh\ ++PjITySFZrP4y/zm9uPyU7xcrcNnOvhsucxQ4rlwmEtkNTA7HMyWi8V8to7fz2YRyVUBxHpJoPCc\ +FFQLiJUGaZU3DEjvr1NKWJIxhgWX1Ch8NbwcnpKES0KpYfn4olL4EmfS42D2iLNYL+OvN+9Wq/nN\ +P8IbDongCZTAKv1zeNKYCqfwzlCtwQzW1GTgpuiZ7WCm5IZn3kCKdtzlU3TUJOnYLsMVd6kV9B7I\ +Tpmt1ZTtNQXdAjYggFqIR8RKqm2uXJ3xMOPuR5c1PjsdTYajM8JaqCN0+2G+WER1NQm1OcK4AseF\ +ZtHfwMe1JWnsSQrWpdwQPr4eY826YjuYjXkNnKzQrwITOo7aviTSIKXc0kRAsE8Kn24E1eEQZHPm\ +VCfYnFbX3/grXw0LEz6FFIeecVC6zlSKRxz7YB0U+HsI3wOzd+IOQ8lAO66k7TyrtLzk3zxgldwB\ +6wUNBW3BNPRg/iT1UDsry1B33jmwyaQnCMWogKgh5DFk65hC4rOeQsms/mnLCO4qi6RqZa1FT+J2\ +L7UZFV443pS8j7jhJWbKBJ4wvTzH1+eXV+PJ79UX5xejSRu+msaImuL+GtPOxeUGaGojrSwvu+Ma\ +uvI4VKbjBUQP8IRwLkPjBK7xxktWXUgL3HAdrtAo5V7So/sQ9vkW6vXJS0H+NAgHVmBghvV6JAe2\ +DY9BtA/du9Ckyt6ZX4aDyszTDGzETtjx8ckmXJ+h8qQZmjqZ6NAw5sq6gwpXb9/oEEXaQOiQ/0nO\ +YF23EypUCmKK6qYahC2N7sHYqiXq1wK9WdQLG4VnEfWW9ls0+Alw/ZnQ +compile_options=\ +@eNq9UlFPgzAQfudXNH3vmNFptsgSYKgoo82ARH1pkHVbFVrSglF/vWXM6RIfdffSu97lu7vvvlgK\ +VOUvbMVLBmTdcCn0xNKN4kVDVSuWXNFXpvjq3YEn0DKuNiUOHA7GwxG0LB/PyQRAeyMrZutW2Kxq\ +uV3ldcmorJnQslUFs3+4jZSlttdFgUouciXRxWA0GNpPXNh5rorN+VmXaN/QWrTIlIHOkG6WTjEe\ +A1QAJL8GHUiAZgkJfPPEs8DLrncxdbMU0yQjZBEkCcUkiOfEoKwBwqcArYRENWe90wqdr5ghodmg\ +DrfiH/kWHfT5RjGGSilr9MqKRir+wcCh7XpG5PwMgMt+zSm0/Anwfedo1PQNsXeLSerAA56gZW7k\ +k+wqcq8TkzvkbJv1gti/oQcFv5IILTwBpkM4Dx8DU/lHjG5nCO7ThUsJXqSuF0Zh+rCfpKMWWlEY\ +3x1Ta33055q5lE/P5ldPO/Sy+pbR/l4AbI8ZzY6pnn+6q9lj7qY3NAq9TllltVsNZ51Kf0j0E2qG\ +c4o= +exehash=b17469e48480b17716b457cef173dcb01bb9f937e33f10bfda3c01e7cce63d3b + +538.imagick_r=base=gcc-m64: +# Last updated 2021-05-13 15:27:38 +opthash=2f48a899d3d456bed03183b03581d9905efa460c559864a9e038739267957e43 +baggage= +compiler_version=\ +@eNq9VUtv2zAMvudX6NYNraKmj7QJ4MMQBNuAYAnabDsasszYamRJ06Nxd9hvn2zHtddmGLBuvcSh\ ++PjITySFZrP4y/zm9uPyU7xcrcNnOvhsucxQ4rlwmEtkNTA7HMyWi8V8to7fz2YRyVUBxHpJoPCc\ +FFQLiJUGaZU3DEjvr1NKWJIxhgWX1Ch8NbwcnpKES0KpYfn4olL4EmfS42D2iLNYL+OvN+9Wq/nN\ +P8IbDongCZTAKv1zeNKYCqfwzlCtwQzW1GTgpuiZ7WCm5IZn3kCKdtzlU3TUJOnYLsMVd6kV9B7I\ +Tpmt1ZTtNQXdAjYggFqIR8RKqm2uXJ3xMOPuR5c1PjsdTYajM8JaqCN0+2G+WER1NQm1OcK4AseF\ +ZtHfwMe1JWnsSQrWpdwQPr4eY826YjuYjXkNnKzQrwITOo7aviTSIKXc0kRAsE8Kn24E1eEQZHPm\ +VCfYnFbX3/grXw0LEz6FFIeecVC6zlSKRxz7YB0U+HsI3wOzd+IOQ8lAO66k7TyrtLzk3zxgldwB\ +6wUNBW3BNPRg/iT1UDsry1B33jmwyaQnCMWogKgh5DFk65hC4rOeQsms/mnLCO4qi6RqZa1FT+J2\ +L7UZFV443pS8j7jhJWbKBJ4wvTzH1+eXV+PJ79UX5xejSRu+msaImuL+GtPOxeUGaGojrSwvu+Ma\ +uvI4VKbjBUQP8IRwLkPjBK7xxktWXUgL3HAdrtAo5V7So/sQ9vkW6vXJS0H+NAgHVmBghvV6JAe2\ +DY9BtA/du9Ckyt6ZX4aDyszTDGzETtjx8ckmXJ+h8qQZmjqZ6NAw5sq6gwpXb9/oEEXaQOiQ/0nO\ +YF23EypUCmKK6qYahC2N7sHYqiXq1wK9WdQLG4VnEfWW9ls0+Alw/ZnQ +compile_options=\ +@eNrtVE1vozAQvfMrLN8NWTXtKlGpBIS27BJsBZDaXixKncRdsJENVbe/fg0ku81lT2lO8cVjz2g+\ +3jy9RApUF7/YmlcMyKblUui5pVvFy5aqTrxwRd+Y4uvfLvwGLWNqE+LCiT2bTKFlBXhJ5gA6W1kz\ +R3fCYXXHnbpoKkZlw4SWnSqZ88lspay0sylLVHFRKIm+25f2xHnmwikKVW6vpr2je0cb0SETBvqD\ +dPvilrMZQCVAct+oLQFapCQMzJUsQj+/Ayiyd3/UyzNM05yQVZimFJMwWRKTaQMQvgBoLSRqOBuN\ +TuhizQwQ7Rb1uWv+UQwVwOhvFWOokrJBb6xspeIfDByeXc2YXE0BuB5HvYFWMAdB4J4MnrEg9n9g\ +krnwACtomT0FJL+NvbvU+A5xG7x+mAT3dB/wfyChhefAVImW0VNooo+E6tBH+JCtPErwKvP8KI6y\ +x327A7zQiqPk5yk5N76Ozptr+fxqfvVNn72q/1Hp784AGBYaL07JoC/aq5lj6WX3NI78nl1VvRsN\ +5z1TP9H0LClnSTlLyllSjigpfwDZpdRT +exehash=eee9c4547d70bbcff53dd938e8d24d3492bdaa0d41abdbc0912925aae18bf1d5 + +507.cactuBSSN_r=base=gcc-m64: +# Last updated 2021-05-19 10:35:36 +opthash=89a6bfc2a18191e165ebb9999593cbcfd3b1e3234d8c08585230a52bdafd0e27 +baggage= +compiler_version=\ +@eNrtVltv2zYYffev0Fs6JLTiJnFiA3roPO8CeEuQulvfDIr6LDGmSJaX2NlDf/soSrJUW06LBOuA\ +wS+2SX73yzkOJh8/ThZ/Tu/f/3b7x+L2bu6+xr1g0nH38/7dB015GsSWMoMoD7QEovu9ye1sNp3M\ +F79MJlGYiRzCpywPcywZLIQEroVVBMLWTyME02FKCGKUYyXQdf+qfx7GlIcYK5INL4sHu0Eptyg9\ +Pd26mM1vF3/dv7u7m96/3lW/HzIawwZI8b7vOSxFmRForbCUoHpzrFIw42BPtjcRfElTqyAJ1tRk\ +4+CkjM+QdYqKiiWa4UcI10KttMSkesnxCpACBljDYhBqjqXOhPER91NqPjdRo7fng1F/8DYktauT\ +4P2v09ks8tnEWGcBQoVzlEsSvcT9wkuGpXyYgDYJVSEd3gyRJE2yjZul+h5+0lx+Fzdu2LBun1ji\ +TgnVOGbg5OPcJkuGpbsEXt4Z0Rx0hov2l/rCFitCmE0gQW5mDGxMI8rZ1o9+0gZy9Lcz33KmH9gD\ +gg0BaajgutEswrKcfrKARPwApGXUJbQCVZYH0Z3QXe5ks3F5Z40CGY1aByYIZhCVBdmarBUTiG3a\ +ehA89R91Gk5dpBEX9VlK1jpRXZ3qiHLLDC1Triwu6QYRoVydEL66QDcXV9fD0eHny4vLwag2X2xj\ +hFX+eINwo2IyBTjRkRSabppr77rQ6ErT0ByiJ9gpOOVucFyt0dJyUjSkdlzW2rVQCWFeM6OVCb2P\ +Qq05ea2Try1CBwS6ypDWjGRAVo4Cosp0q6FxEb1RXywH5qnFKeiInJHT07Ola5/C/KxcGh9M1LWM\ +mdCm88F49I26SiQVuAn5N4vTm/txCnKRABsHfqh6DqWDR1C6GAnPFsGbmQfswJFh0ALtH4L/iDwJ\ +OZLnkTyP5HkkzyN5Hsnz/0yeVqtDJFgVD113c2Gh6XrliY4ooXUXnb2UyhxRakUa3gnQ43bNV2mV\ +f3TyIbbc2LAqgC8HurD+cvB5cNM/vzxp9sbZYdGSMhiHoY/eo2eYCE/G6Dq8n7776fdp/0eb6u6R\ +wgn2Y5WKs2Q7Ww4TSfGB3EtrLJyDLa476zl+EApVgSPB2ZOXFanCOdJ2Weig6w5kP4i01R8JNznb\ +XnwTEexilNPbsdHJGCUSvBi8u+DOm05giR08tiRwTCMO6+cJqMUznyxOcmyy7ltXXCldqxpzktmU\ +8uZcRyApdDPkYRQ/RBF1HGtQSqhn0awEpc3NcLGDLx6TOu6fhaRynqoZ3N9pJ1MPRt3y/aWtJL4V\ +fb6+gw6J/gGx5MUB +compile_options=\ +@eNrNVd1umzAYvecpLG4jh1TLWiUqlfhLx0oABZCy3VjUMYk3sCMD1dqnn4GmC1qjdVLK5hvbfAd9\ +x+cc2T5nsEi/k4zmBPB9RTkr50pZCYorJGq2oQI9EEGzR129UBW5LCVEVyfj2WSqKooVLMM5ULUd\ +L4j2uCu0It3nBPE9YSWvBSba0bLiPC+1LcYwpywVHF6NP44n2j1lWpoKvLucNoX6B9yyGm5HI9AM\ +WFYbHY9Gkw8AYgD5geWYA2hHoWPJybcdM7l93iMjiQMUJWG4cqIIBaHjL0MAXcpwXm+IRFlWYDvt\ +HN8h240M03OQBMcr14r1CwC3AAayXcY43FPSLWpWphmRYlU72FAo6FPaEgFdvRKEwJzzPXwguOKC\ +PhFwGM/EvPByCsB1p8aNqlhzYK3X+jDqHdoF5ucgjHW1J6aqSBetMFl4xm0ka31h26rp+NYn1AO8\ +qvQRWHY74N+uvqoEcyAZukv3qyP/PJMXLS1nHa8MFAar2DBdz42/vJyksUZ9zzRjfOTHbHbuLJ87\ +sX/MraUPIVPXa8jIngrs/xDLuhSndMu4qETK4NXvwfp1kr/jfDIjvSAsmiC8hViHHtDKxStWLv61\ +lZ7r3w32Xnb7s18N1/z+m/xa3vSuihc3AWit9uzBHrZ3srM9RJA0aT2K6k+WW+Ko +exehash=63bc323d25366ddc5092f5d594b9e1caa8da1a2fac4e5ece4e8311cc4282a03a + diff --git a/build/tools/spec/gcc.cfg b/build/tools/spec/gcc.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f264f2b95e3cf4d79cdeafe417ca1637635b522a --- /dev/null +++ b/build/tools/spec/gcc.cfg @@ -0,0 +1,461 @@ +#------------------------------------------------------------------------------ +# SPEC CPU2017 config file for: gcc / g++ / gfortran on Linux x86 +#------------------------------------------------------------------------------ +# +# Usage: (1) Copy this to a new name +# cd $SPEC/config +# cp Example-x.cfg myname.cfg +# (2) Change items that are marked 'EDIT' (search for it) +# +# SPEC tested this config file with: +# Compiler version(s): 4.4.7, 4.9.2, 5.2.0, 6.3.0, 7.2.1, 8.1.0 +# Operating system(s): Oracle Linux Server 6.5 and 7.4 / +# Red Hat Enterprise Linux Server 6.5 and 7.4 +# Hardware: Xeon +# +# If your system differs, this config file might not work. +# You might find a better config file at http://www.spec.org/cpu2017/results +# +# Known Limitations with GCC 4 +# +# (1) Possible problem: compile time messages +# error: unrecognized command line option '...' +# Recommendation: Use a newer version of the compiler. +# If that is not possible, remove the unrecognized +# option from this config file. +# +# (2) Possible problem: run time errors messages +# 527.cam4_r or 627.cam4_s *** Miscompare of cam4_validate.txt +# Recommendation: Use a newer version of the compiler. +# If that is not possible, try reducing the optimization. +# +# +# Compiler issues: Contact your compiler vendor, not SPEC. +# For SPEC help: http://www.spec.org/cpu2017/Docs/techsupport.html +#------------------------------------------------------------------------------ + + +#--------- Label -------------------------------------------------------------- +# Arbitrary string to tag binaries (no spaces allowed) +# Two Suggestions: # (1) EDIT this label as you try new ideas. +%define label gcc # (2) Use a label meaningful to *you*. + + +#--------- Preprocessor ------------------------------------------------------- +%ifndef %{bits} # EDIT to control 32 or 64 bit compilation. Or, +% define bits 64 # you can set it on the command line using: +%endif # 'runcpu --define bits=nn' + +%ifndef %{build_ncpus} # EDIT to adjust number of simultaneous compiles. +% define build_ncpus 8 # Or, you can set it on the command line: +%endif # 'runcpu --define build_ncpus=nn' + +# Don't change this part. +%define os LINUX +%if %{bits} == 64 +% define model -m64 +%elif %{bits} == 32 +% define model -m32 +%else +% error Please define number of bits - see instructions in config file +%endif +%if %{label} =~ m/ / +% error Your label "%{label}" contains spaces. Please try underscores instead. +%endif +%if %{label} !~ m/^[a-zA-Z0-9._-]+$/ +% error Illegal character in label "%{label}". Please use only alphanumerics, underscore, hyphen, and period. +%endif + + + +#--------- Global Settings ---------------------------------------------------- +# For info, see: +# https://www.spec.org/cpu2017/Docs/config.html#fieldname +# Example: https://www.spec.org/cpu2017/Docs/config.html#tune + +command_add_redirect = 1 +flagsurl = $[top]/config/flags/gcc.xml +ignore_errors = 1 +iterations = 1 +label = %{label}-m%{bits} +line_width = 1020 +log_line_width = 1020 +makeflags = --jobs=%{build_ncpus} +mean_anyway = 1 +output_format = txt,html,cfg,pdf,csv +preenv = 1 +reportable = 0 +tune = base + + +#--------- How Many CPUs? ----------------------------------------------------- +# Both SPECrate and SPECspeed can test multiple chips / cores / hw threads +# - For SPECrate, you set the number of copies. +# - For SPECspeed, you set the number of threads. +# See: https://www.spec.org/cpu2017/Docs/system-requirements.html#MultipleCPUs +# +# q. How many should I set? +# a. Unknown, you will have to try it and see! +# +# To get you started, some suggestions: +# +# copies - This config file defaults to testing only 1 copy. You might +# try changing it to match the number of cores on your system, +# or perhaps the number of virtual CPUs as reported by: +# grep -c processor /proc/cpuinfo +# Be sure you have enough memory. See: +# https://www.spec.org/cpu2017/Docs/system-requirements.html#memory +# +# threads - This config file sets a starting point. You could try raising +# it. A higher thread count is much more likely to be useful for +# fpspeed than for intspeed. +# +intrate,fprate: + copies = 1 # EDIT to change number of copies (see above) +intspeed,fpspeed: + threads = 4 # EDIT to change number of OpenMP threads (see above) + + +#------- Compilers ------------------------------------------------------------ +default: +# EDIT: The parent directory for your compiler. +# Do not include the trailing /bin/ +# Do not include a trailing slash +# Examples: +# 1 On a Red Hat system, you said +# 'yum install devtoolset-7' +# Use: % define gcc_dir /opt/rh/devtoolset-7/root/usr +# +# 2 You built GCC in: /disk1/mybuild/gcc-8.1.0/bin/gcc +# Use: % define gcc_dir /disk1/mybuild/gcc-8.1.0 +# +# 3 You want: /usr/bin/gcc +# Use: % define gcc_dir /usr +# WARNING: See section +# "Known Limitations with GCC 4" +# +%ifndef %{gcc_dir} +% define gcc_dir /home/sun/emui/maple_opensource/opensource/tools/gcc-linaro-7.5.0 # EDIT (see above) +%endif + +# EDIT if needed: the preENV line adds library directories to the runtime +# path. You can adjust it, or add lines for other environment variables. +# See: https://www.spec.org/cpu2017/Docs/config.html#preenv +# and: https://gcc.gnu.org/onlinedocs/gcc/Environment-Variables.html + SPECLANG = %{gcc_dir}/bin/ + CC = $(SPECLANG)aarch64-linux-gnu-gcc -std=c99 + CXX = $(SPECLANG)g++ -std=c++03 %{model} + FC = $(SPECLANG)gfortran %{model} + # How to say "Show me your version, please" + CC_VERSION_OPTION = -v + CXX_VERSION_OPTION = -v + FC_VERSION_OPTION = -v + +default: +%if %{bits} == 64 + sw_base_ptrsize = 64-bit + sw_peak_ptrsize = 64-bit +%else + sw_base_ptrsize = 32-bit + sw_peak_ptrsize = 32-bit +%endif + + +#--------- Portability -------------------------------------------------------- +default: # data model applies to all benchmarks +%if %{bits} == 32 + # Strongly recommended because at run-time, operations using modern file + # systems may fail spectacularly and frequently (or, worse, quietly and + # randomly) if a program does not accommodate 64-bit metadata. + EXTRA_PORTABILITY = -D_FILE_OFFSET_BITS=64 +%else + EXTRA_PORTABILITY = -DSPEC_LP64 +%endif + +# Benchmark-specific portability (ordered by last 2 digits of bmark number) + +500.perlbench_r,600.perlbench_s: #lang='C' +%if %{bits} == 32 +% define suffix IA32 +%else +% define suffix X64 +%endif + PORTABILITY = -DSPEC_%{os}_%{suffix} + +521.wrf_r,621.wrf_s: #lang='F,C' + CPORTABILITY = -DSPEC_CASE_FLAG + FPORTABILITY = -fconvert=big-endian + +523.xalancbmk_r,623.xalancbmk_s: #lang='CXX' + PORTABILITY = -DSPEC_%{os} + +526.blender_r: #lang='CXX,C' + PORTABILITY = -funsigned-char -DSPEC_LINUX + +527.cam4_r,627.cam4_s: #lang='F,C' + PORTABILITY = -DSPEC_CASE_FLAG + +628.pop2_s: #lang='F,C' + CPORTABILITY = -DSPEC_CASE_FLAG + FPORTABILITY = -fconvert=big-endian + + +#-------- Tuning Flags common to Base and Peak -------------------------------- + +# +# Speed (OpenMP and Autopar allowed) +# +%if %{bits} == 32 + intspeed,fpspeed: + # + # Many of the speed benchmarks (6nn.benchmark_s) do not fit in 32 bits + # If you wish to run SPECint2017_speed or SPECfp2017_speed, please use + # + # runcpu --define bits=64 + # + fail_build = 1 +%else + intspeed,fpspeed: + EXTRA_OPTIMIZE = -fopenmp -DSPEC_OPENMP + fpspeed: + # + # 627.cam4 needs a big stack; the preENV will apply it to all + # benchmarks in the set, as required by the rules. + # + preENV_OMP_STACKSIZE = 120M +%endif + + +#-------- Baseline Tuning Flags ---------------------------------------------- +# +# EDIT if needed -- Older GCC might not support some of the optimization +# switches here. See also 'About the -fno switches' below. +# +default=base: # flags for all base + OPTIMIZE = -O2 -fno-pie -fno-unsafe-math-optimizations -fno-tree-loop-vectorize + +intrate,intspeed=base: # flags for integer base + EXTRA_COPTIMIZE = -fno-strict-aliasing -fgnu89-inline +# Notes about the above +# - 500.perlbench_r/600.perlbench_s needs -fno-strict-aliasing. +# - 502.gcc_r/602.gcc_s needs -fgnu89-inline or -z muldefs +# - For 'base', all benchmarks in a set must use the same options. +# - Therefore, all base benchmarks get the above. See: +# www.spec.org/cpu2017/Docs/runrules.html#BaseFlags +# www.spec.org/cpu2017/Docs/benchmarks/500.perlbench_r.html +# www.spec.org/cpu2017/Docs/benchmarks/502.gcc_r.html + + +#-------- Peak Tuning Flags ---------------------------------------------- +default=peak: + basepeak = yes # if you develop some peak tuning, remove this line. + # + # ----------------------- + # About the -fno switches + # ----------------------- + # + # For 'base', this config file (conservatively) disables some optimizations. + # You might want to try turning some of them back on, by creating a 'peak' + # section here, with individualized benchmark options: + # + # 500.perlbench_r=peak: + # OPTIMIZE = this + # 502.gcc_r=peak: + # OPTIMIZE = that + # 503.bwaves_r=peak: + # OPTIMIZE = other .....(and so forth) + # + # If you try it: + # - You must remove the 'basepeak' option, above. + # - You will need time and patience, to diagnose and avoid any errors. + # - perlbench is unlikely to work with strict aliasing + # - Some floating point benchmarks may get wrong answers, depending on: + # the particular chip + # the version of GCC + # other optimizations enabled + # -m32 vs. -m64 + # - See: http://www.spec.org/cpu2017/Docs/config.html + # - and: http://www.spec.org/cpu2017/Docs/runrules.html + + +#------------------------------------------------------------------------------ +# Tester and System Descriptions - EDIT all sections below this point +#------------------------------------------------------------------------------ +# For info about any field, see +# https://www.spec.org/cpu2017/Docs/config.html#fieldname +# Example: https://www.spec.org/cpu2017/Docs/config.html#hw_memory +#------------------------------------------------------------------------------- + +#--------- EDIT to match your version ----------------------------------------- +default: + sw_compiler001 = C/C++/Fortran: Version 7.2.1 of GCC, the + sw_compiler002 = GNU Compiler Collection + +#--------- EDIT info about you ------------------------------------------------ +# To understand the difference between hw_vendor/sponsor/tester, see: +# https://www.spec.org/cpu2017/Docs/config.html#test_sponsor +intrate,intspeed,fprate,fpspeed: # Important: keep this line + hw_vendor = My Corporation + tester = My Corporation + test_sponsor = My Corporation + license_num = nnn (Your SPEC license number) +# prepared_by = # Ima Pseudonym # Whatever you like: is never output + + +#--------- EDIT system availability dates ------------------------------------- +intrate,intspeed,fprate,fpspeed: # Important: keep this line + # Example # Brief info about field + hw_avail = # Nov-2099 # Date of LAST hardware component to ship + sw_avail = # Nov-2099 # Date of LAST software component to ship + +#--------- EDIT system information -------------------------------------------- +intrate,intspeed,fprate,fpspeed: # Important: keep this line + # Example # Brief info about field +# hw_cpu_name = # Intel Xeon E9-9999 v9 # chip name + hw_cpu_nominal_mhz = # 9999 # Nominal chip frequency, in MHz + hw_cpu_max_mhz = # 9999 # Max chip frequency, in MHz +# hw_disk = # 9 x 9 TB SATA III 9999 RPM # Size, type, other perf-relevant info + hw_model = # TurboBlaster 3000 # system model name +# hw_nchips = # 99 # number chips enabled + hw_ncores = # 9999 # number cores enabled + hw_ncpuorder = # 1-9 chips # Ordering options + hw_nthreadspercore = # 9 # number threads enabled per core + hw_other = # TurboNUMA Router 10 Gb # Other perf-relevant hw, or "None" + +# hw_memory001 = # 999 GB (99 x 9 GB 2Rx4 PC4-2133P-R, # The 'PCn-etc' is from the JEDEC +# hw_memory002 = # running at 1600 MHz) # label on the DIMM. + + hw_pcache = # 99 KB I + 99 KB D on chip per core # Primary cache size, type, location + hw_scache = # 99 KB I+D on chip per 9 cores # Second cache or "None" + hw_tcache = # 9 MB I+D on chip per chip # Third cache or "None" + hw_ocache = # 9 GB I+D off chip per system board # Other cache or "None" + + fw_bios = # American Megatrends 39030100 02/29/2016 # Firmware information +# sw_file = # ext99 # File system +# sw_os001 = # Linux Sailboat # Operating system +# sw_os002 = # Distribution 7.2 SP1 # and version + sw_other = # TurboHeap Library V8.1 # Other perf-relevant sw, or "None" +# sw_state = # Run level 99 # Software state. + +# Note: Some commented-out fields above are automatically set to preliminary +# values by sysinfo +# https://www.spec.org/cpu2017/Docs/config.html#sysinfo +# Uncomment lines for which you already know a better answer than sysinfo + +__HASH__ +500.perlbench_r=base=maple_test-m64: +# Last updated 2021-05-10 10:36:40 +opthash=1b55f44e78c515229834a282ce9d3013fee214077130f5cc6c8d469cce53274c +baggage= +compiler_version=\ +@eNp9VE1v2zAMvftX+NYNteK26ZIigA9Z6hYD0qZI3e0YyBLtqJUlTx+J08N++2Qnjr202MWC+Cjy\ +kXy0P5utfsbL5x+Lx9XiKXHHxHvRTOR+ahk3iAlfl0D0wJst5vN4lqzuZ7MotFqFKRNhTsgRmCeL\ +1a/l9OkpXu4dOEtrh7C6Ga1G14gzYSuUCxuOQ24k2ipclqC8xd3dfDG9XSXT5X2crB6nD/FzJDal\ +qZCQAk7x2/hu+jJPoksvwSoHM/FP43szKTKWWwXU3zKznviDQagVCUlr99HGR6jGUPmWb0BpJkV0\ +9pJaYaw/HnwbXKChbW6Xfy5vBhfXZ61/al0AHmWMwyQMmzr1GisIqSR1tWgcLuPp7UM8+G5z7V6B\ +wCkHxLHILc5BRyTAFAfk/DzIZZAqlgc0yKQyCotApq+k/iAHu7elgoxVTTfb/HWKAr9KhQ60kRR8\ +1/jKXOECaZvVb9C4ZzvEOW0U6ug1NdAeXSbeQKFaBBSxGnDThAoIZeo43QMnaWudEG4pUOQmYqAy\ +XSSzVoCpjkqpWbWPcxLj4Ch4r12plEa7lpSdiXBJMIfIEe/zTLWhpKoQBTeazwDDCoh2oNsOUsiw\ +ddLuPHDKIgHb7nHdGyvYbwvIDQNIXQ5lusE2Zn+AYtnun3xFWXX3ktvcLc/x3mYtGbRE9E4bKNB7\ +10lkGkmfIIcItTbc+CNsjeyshQvKsCLrHsUtKCWPiqlBNLyK2OhmdLS5iovRdXttgrhkrgptomJ4\ +FTgwKKrh1UmiPlUr3CxAuD6QHsks4xLTQyG6t8c9sRBLMaKKuR72xrsG8uZ+PJECDljX/o36PqjW\ +AWvpaH5i32f9gHhJo0G/kBT4xG+U6LlF8g8rtN94/8t/9v+r7/0FuibFEg== +compile_options=\ +@eNrVU21r2zAQ/u5fIfxdccdGWUJdsB0l8eZEwrIh2xfhOkqqLZaD5IS1v34n56UtDFpGv1RgfLq3\ +R3fP3aLVuKl+y7XaStTuOtVqO/JsZ1TdCbPXK2XEQRq1fgj9T74HogWX0L8aDK+ufc9L6JyNkB/s\ +rQnulA42dY3cwbZbhfVw6MTm+gvCNcLtGWDQIjzmjCTwW4xJXE5BYCTPREJzgnA6gG+lbBekFKR6\ +V+mgUI3EM5VLe9bMinmGWWWsNKCSf7rASBDsTtam0itIOaZlnBHBs4jPCBcOMY2y8OqELqKyoIKX\ +jOWEc0EZWcwZ2EQW5VMiJmlG+OXqboLTMk+I0/VXOplwUog4LXgINSK8QZh+hoIrU9+HuurUAV60\ +1i3ea1utJRi6e+ya0KjHqm8FOto7IyXetu0OH2TdtUY9SnQ6p7dm6aJciqWDOcUcScLVVlVWaYBe\ +b/T+6xArvVVaPotkLujGtntTy1vfS0YoScJXKTs60vgbZUXov+DP94DzhJWTLJpysL3ksrfGZJHM\ +xMXhw5Pre3SEoBPpPP1JoKL3ZLpvGKN5EcVplhY/zh194rxHJ8sij6CJT494fQr61MfAfwIwlxtg\ +vr9lh4/Kd5/x/x3wm/buFySyt85/26DLwlzmFKF+iLPxW6b9g/IL9c2jYga5Yrdp2+ZUMi3d1j5b\ +2b8rRMvz +exehash=1c69320bf26365e65e5b19b87fd09bbfdf73b3d6257845446d6001048f92fd40 + +500.perlbench_r=base=gcc-m64: +# Last updated 2021-05-13 10:18:46 +opthash=4a1fc46ed4b7dc5c387a83a4c8727837cdc03d41f8bddf80c915370e9e6911bd +baggage= +compiler_version=\ +@eNq9VUtv2zAMvudX6NYNraKmj7QJ4MMQBNuAYAnabDsasszYamRJ06Nxd9hvn2zHtddmGLBuvcSh\ ++PjITySFZrP4y/zm9uPyU7xcrcNnOvhsucxQ4rlwmEtkNTA7HMyWi8V8to7fz2YRyVUBxHpJoPCc\ +FFQLiJUGaZU3DEjvr1NKWJIxhgWX1Ch8NbwcnpKES0KpYfn4olL4EmfS42D2iLNYL+OvN+9Wq/nN\ +P8IbDongCZTAKv1zeNKYCqfwzlCtwQzW1GTgpuiZ7WCm5IZn3kCKdtzlU3TUJOnYLsMVd6kV9B7I\ +Tpmt1ZTtNQXdAjYggFqIR8RKqm2uXJ3xMOPuR5c1PjsdTYajM8JaqCN0+2G+WER1NQm1OcK4AseF\ +ZtHfwMe1JWnsSQrWpdwQPr4eY826YjuYjXkNnKzQrwITOo7aviTSIKXc0kRAsE8Kn24E1eEQZHPm\ +VCfYnFbX3/grXw0LEz6FFIeecVC6zlSKRxz7YB0U+HsI3wOzd+IOQ8lAO66k7TyrtLzk3zxgldwB\ +6wUNBW3BNPRg/iT1UDsry1B33jmwyaQnCMWogKgh5DFk65hC4rOeQsms/mnLCO4qi6RqZa1FT+J2\ +L7UZFV443pS8j7jhJWbKBJ4wvTzH1+eXV+PJ79UX5xejSRu+msaImuL+GtPOxeUGaGojrSwvu+Ma\ +uvI4VKbjBUQP8IRwLkPjBK7xxktWXUgL3HAdrtAo5V7So/sQ9vkW6vXJS0H+NAgHVmBghvV6JAe2\ +DY9BtA/du9Ckyt6ZX4aDyszTDGzETtjx8ckmXJ+h8qQZmjqZ6NAw5sq6gwpXb9/oEEXaQOiQ/0nO\ +YF23EypUCmKK6qYahC2N7sHYqiXq1wK9WdQLG4VnEfWW9ls0+Alw/ZnQ +compile_options=\ +@eNrVVG1r2zAQ/u5fIfxdcca6bAl1wXaUxJsTCb9Ati/CdZREmy0ZyS5df/1kO0lTGAxGKVRgfKd7\ +eaS757SRAlb5L7bnJQOybrgUembpRvGioaoVO67oA1N8/9u1P9iWEbVxce3xaDqe2JYV4DWZAds5\ +yoo5uhUOq1ruVHldMiprJrRsVcGcK7GRstTOoShgyUWuJPw8+jQaO/dcOHmuiuPkpjO0j/AgWmjc\ +QLegbnZuMZ0CWAAozwcdSQDnCUGB+W3myM+WRiAojmiAYwRgODLfjuvGCbGRijoXTsorBlc8Zvq8\ +s0rXESS50kyZLfbYOIoZQdesULnYmZRznPkRoknkJSuU0A4x9CJ3fEKnXpZimmSExChJKCZosybG\ +RiMvXiK6CCOUXNROownO4gB1e72KF4sEpdQP08Sd3JjbHgDEHwHcCwlrzgahFTrfM9Os5gi7+1f8\ +Ke+rAAZ7oxiDpZQ1fGBFIxV/YuC0TseMwk22pdsO4RQz9BnmJc81FwZ1b4r+ZQq5MC1gV5GkC7od\ +OnhnW8EMBIH7Zl0fALH/FZPUtV9QwLYM/QKSLSJvmRjbSzr0Vh9tghW9OLx7ftgWngFTiXAd/kDm\ +Rq9Elr5WBMep54dRmH4/F/OZNj0w2qaxZ+r3jP9vIvWph8C/ApAut4H59pYvyaC95qT975jdyvuf\ +JpG+6/zLClzG9sJyAPoRiOZvOXPvj2WmRGsvXZlcfjfqZXWqGs66Z+PqzfgDFjQx9g== +exehash=6baceb9eb267d444c46c4c9f978e50063408522981011f9ac78e7db729fc8926 + +519.lbm_r=base=gcc-m64: +# Last updated 2021-05-13 10:35:31 +opthash=d0595b41acba249b4b15e2950cf3b949e9504a149adac9800b3de173849eacd9 +baggage= +compiler_version=\ +@eNq9VUtv2zAMvudX6NYNraKmj7QJ4MMQBNuAYAnabDsasszYamRJ06Nxd9hvn2zHtddmGLBuvcSh\ ++PjITySFZrP4y/zm9uPyU7xcrcNnOvhsucxQ4rlwmEtkNTA7HMyWi8V8to7fz2YRyVUBxHpJoPCc\ +FFQLiJUGaZU3DEjvr1NKWJIxhgWX1Ch8NbwcnpKES0KpYfn4olL4EmfS42D2iLNYL+OvN+9Wq/nN\ +P8IbDongCZTAKv1zeNKYCqfwzlCtwQzW1GTgpuiZ7WCm5IZn3kCKdtzlU3TUJOnYLsMVd6kV9B7I\ +Tpmt1ZTtNQXdAjYggFqIR8RKqm2uXJ3xMOPuR5c1PjsdTYajM8JaqCN0+2G+WER1NQm1OcK4AseF\ +ZtHfwMe1JWnsSQrWpdwQPr4eY826YjuYjXkNnKzQrwITOo7aviTSIKXc0kRAsE8Kn24E1eEQZHPm\ +VCfYnFbX3/grXw0LEz6FFIeecVC6zlSKRxz7YB0U+HsI3wOzd+IOQ8lAO66k7TyrtLzk3zxgldwB\ +6wUNBW3BNPRg/iT1UDsry1B33jmwyaQnCMWogKgh5DFk65hC4rOeQsms/mnLCO4qi6RqZa1FT+J2\ +L7UZFV443pS8j7jhJWbKBJ4wvTzH1+eXV+PJ79UX5xejSRu+msaImuL+GtPOxeUGaGojrSwvu+Ma\ +uvI4VKbjBUQP8IRwLkPjBK7xxktWXUgL3HAdrtAo5V7So/sQ9vkW6vXJS0H+NAgHVmBghvV6JAe2\ +DY9BtA/du9Ckyt6ZX4aDyszTDGzETtjx8ckmXJ+h8qQZmjqZ6NAw5sq6gwpXb9/oEEXaQOiQ/0nO\ +YF23EypUCmKK6qYahC2N7sHYqiXq1wK9WdQLG4VnEfWW9ls0+Alw/ZnQ +compile_options=\ +@eNq9UlFPgzAQfudXNH3vmNFptsgSYKgoo82ARH1pkHVbFVrSglF/vWXM6RIfdffSu97lu7vvvlgK\ +VOUvbMVLBmTdcCn0xNKN4kVDVSuWXNFXpvjq3YEn0DKuNiUOHA7GwxG0LB/PyQRAeyMrZutW2Kxq\ +uV3ldcmorJnQslUFs3+4jZSlttdFgUouciXRxWA0GNpPXNh5rorN+VmXaN/QWrTIlIHOkG6WTjEe\ +A1QAJL8GHUiAZgkJfPPEs8DLrncxdbMU0yQjZBEkCcUkiOfEoKwBwqcArYRENWe90wqdr5ghodmg\ +DrfiH/kWHfT5RjGGSilr9MqKRir+wcCh7XpG5PwMgMt+zSm0/Anwfedo1PQNsXeLSerAA56gZW7k\ +k+wqcq8TkzvkbJv1gti/oQcFv5IILTwBpkM4Dx8DU/lHjG5nCO7ThUsJXqSuF0Zh+rCfpKMWWlEY\ +3x1Ta33055q5lE/P5ldPO/Sy+pbR/l4AbI8ZzY6pnn+6q9lj7qY3NAq9TllltVsNZ51Kf0j0E2qG\ +c4o= +exehash=b17469e48480b17716b457cef173dcb01bb9f937e33f10bfda3c01e7cce63d3b + +538.imagick_r=base=gcc-m64: +# Last updated 2021-06-01 19:42:39 +opthash=f82ded290c3df58b7c2eebd89f6f9377bf6eeeea69047f4bcaf68dd02039245c +baggage= +compiler_version=\ +@eNq9VUtv2zAMvudX6NYNraKmj7QJ4MMQBNuAYAnabDsasszYamRJ06Nxd9hvn2zHtddmGLBuvcSh\ ++PjITySFZrP4y/zm9uPyU7xcrcNnOvhsucxQ4rlwmEtkNTA7HMyWi8V8to7fz2YRyVUBxHpJoPCc\ +FFQLiJUGaZU3DEjvr1NKWJIxhgWX1Ch8NbwcnpKES0KpYfn4olL4EmfS42D2iLNYL+OvN+9Wq/nN\ +P8IbDongCZTAKv1zeNKYCqfwzlCtwQzW1GTgpuiZ7WCm5IZn3kCKdtzlU3TUJOnYLsMVd6kV9B7I\ +Tpmt1ZTtNQXdAjYggFqIR8RKqm2uXJ3xMOPuR5c1PjsdTYajM8JaqCN0+2G+WER1NQm1OcK4AseF\ +ZtHfwMe1JWnsSQrWpdwQPr4eY826YjuYjXkNnKzQrwITOo7aviTSIKXc0kRAsE8Kn24E1eEQZHPm\ +VCfYnFbX3/grXw0LEz6FFIeecVC6zlSKRxz7YB0U+HsI3wOzd+IOQ8lAO66k7TyrtLzk3zxgldwB\ +6wUNBW3BNPRg/iT1UDsry1B33jmwyaQnCMWogKgh5DFk65hC4rOeQsms/mnLCO4qi6RqZa1FT+J2\ +L7UZFV443pS8j7jhJWbKBJ4wvTzH1+eXV+PJ79UX5xejSRu+msaImuL+GtPOxeUGaGojrSwvu+Ma\ +uvI4VKbjBUQP8IRwLkPjBK7xxktWXUgL3HAdrtAo5V7So/sQ9vkW6vXJS0H+NAgHVmBghvV6JAe2\ +DY9BtA/du9Ckyt6ZX4aDyszTDGzETtjx8ckmXJ+h8qQZmjqZ6NAw5sq6gwpXb9/oEEXaQOiQ/0nO\ +YF23EypUCmKK6qYahC2N7sHYqiXq1wK9WdQLG4VnEfWW9ls0+Alw/ZnQ +compile_options=\ +@eNrtVE1vozAQvfMrLN8N6SrbKlGpBIS27BJsBZC6e7EodRq3YCMbqm5+/RpI2ubSU5tTfPHgGebj\ +zdNLpEB18czWvGJANi2XQs8t3SpetlR14oEr+sIUX/9z4Rm0jKlNiAsn9mwyhZYV4CWZA+hsZM0c\ +3QmH1R136qKpGJUNE1p2qmTOB7OVstLOY1miiotCSXRh/7Qnzj0XTlGocnM+7R3dK3oUHTJhoD9I\ +tw9uOZsBVAIk943aEqBFSsLAXMki9PMbgCJ790a9PMM0zQlZhWlKMQmTJTGZ8A+A1kKihrPR6IQu\ +1syg0G5Qn7jm22JID0Z/qxhDlZQNemFlKxXf7n7kwjTKwOHZFY/J+RSAy3HmK2gFcxAE7tFwGgti\ +/xcmmQsPQIOWWVhA8uvYu0mN7xDAweuHSXBL9wGfIwotPAemSrSM/oYm+qvhHRoK77KVRwleZZ4f\ +xVH2Z9/3gDO04ij5fUwWjl/fx6RLef9kIvRVX6aq38n1tkUAhhXHi2Ny6rs3bQZaetktjSO/J15V\ +72bEeU/iDww+yc5Jdk6yc5KdI8vOf2Dz8Is= +exehash=5fecfa5494e0b35756916f9e524fafee9ba36469de705f9e33ee6082bdf1d8bc + diff --git a/build/tools/spec/maplec b/build/tools/spec/maplec new file mode 120000 index 0000000000000000000000000000000000000000..0d46bbe86784244a11ac16fcbfe9e9b01a5f94da --- /dev/null +++ b/build/tools/spec/maplec @@ -0,0 +1 @@ +../common/maplec \ No newline at end of file diff --git a/build/tools/spec/mplfe.cfg b/build/tools/spec/mplfe.cfg new file mode 100644 index 0000000000000000000000000000000000000000..523eccea9e626bf2d1bf86c003f9ee2498354621 --- /dev/null +++ b/build/tools/spec/mplfe.cfg @@ -0,0 +1,643 @@ +#------------------------------------------------------------------------------ +# SPEC CPU2017 config file for: gcc / g++ / gfortran on Linux x86 +#------------------------------------------------------------------------------ +# +# Usage: (1) Copy this to a new name +# cd $SPEC/config +# cp Example-x.cfg myname.cfg +# (2) Change items that are marked 'EDIT' (search for it) +# +# SPEC tested this config file with: +# Compiler version(s): 4.4.7, 4.9.2, 5.2.0, 6.3.0, 7.2.1, 8.1.0 +# Operating system(s): Oracle Linux Server 6.5 and 7.4 / +# Red Hat Enterprise Linux Server 6.5 and 7.4 +# Hardware: Xeon +# +# If your system differs, this config file might not work. +# You might find a better config file at http://www.spec.org/cpu2017/results +# +# Known Limitations with GCC 4 +# +# (1) Possible problem: compile time messages +# error: unrecognized command line option '...' +# Recommendation: Use a newer version of the compiler. +# If that is not possible, remove the unrecognized +# option from this config file. +# +# (2) Possible problem: run time errors messages +# 527.cam4_r or 627.cam4_s *** Miscompare of cam4_validate.txt +# Recommendation: Use a newer version of the compiler. +# If that is not possible, try reducing the optimization. +# +# +# Compiler issues: Contact your compiler vendor, not SPEC. +# For SPEC help: http://www.spec.org/cpu2017/Docs/techsupport.html +#------------------------------------------------------------------------------ + + +#--------- Label -------------------------------------------------------------- +# Arbitrary string to tag binaries (no spaces allowed) +# Two Suggestions: # (1) EDIT this label as you try new ideas. +%define label maplec # (2) Use a label meaningful to *you*. + + +#--------- Preprocessor ------------------------------------------------------- +%ifndef %{bits} # EDIT to control 32 or 64 bit compilation. Or, +% define bits 64 # you can set it on the command line using: +%endif # 'runcpu --define bits=nn' + +%ifndef %{build_ncpus} # EDIT to adjust number of simultaneous compiles. +% define build_ncpus 8 # Or, you can set it on the command line: +%endif # 'runcpu --define build_ncpus=nn' + +# Don't change this part. +%define os LINUX +%if %{bits} == 64 +% define model -m64 +%elif %{bits} == 32 +% define model -m32 +%else +% error Please define number of bits - see instructions in config file +%endif +%if %{label} =~ m/ / +% error Your label "%{label}" contains spaces. Please try underscores instead. +%endif +%if %{label} !~ m/^[a-zA-Z0-9._-]+$/ +% error Illegal character in label "%{label}". Please use only alphanumerics, underscore, hyphen, and period. +%endif + + + +#--------- Global Settings ---------------------------------------------------- +# For info, see: +# https://www.spec.org/cpu2017/Docs/config.html#fieldname +# Example: https://www.spec.org/cpu2017/Docs/config.html#tune + +command_add_redirect = 1 +flagsurl = $[top]/config/flags/gcc.xml +ignore_errors = 1 +iterations = 1 +label = %{label}-m%{bits} +line_width = 1020 +log_line_width = 1020 +makeflags = --jobs=%{build_ncpus} +mean_anyway = 1 +output_format = txt,html,cfg,pdf,csv +preenv = 1 +reportable = 0 +tune = base + + +#--------- How Many CPUs? ----------------------------------------------------- +# Both SPECrate and SPECspeed can test multiple chips / cores / hw threads +# - For SPECrate, you set the number of copies. +# - For SPECspeed, you set the number of threads. +# See: https://www.spec.org/cpu2017/Docs/system-requirements.html#MultipleCPUs +# +# q. How many should I set? +# a. Unknown, you will have to try it and see! +# +# To get you started, some suggestions: +# +# copies - This config file defaults to testing only 1 copy. You might +# try changing it to match the number of cores on your system, +# or perhaps the number of virtual CPUs as reported by: +# grep -c processor /proc/cpuinfo +# Be sure you have enough memory. See: +# https://www.spec.org/cpu2017/Docs/system-requirements.html#memory +# +# threads - This config file sets a starting point. You could try raising +# it. A higher thread count is much more likely to be useful for +# fpspeed than for intspeed. +# +intrate,fprate: + copies = 1 # EDIT to change number of copies (see above) +intspeed,fpspeed: + threads = 4 # EDIT to change number of OpenMP threads (see above) + +%ifndef %{gcc_dir} +% define gcc_dir /gcc_dir-is-not-used/tools/gcc-linaro-7.5.0 # EDIT (see above) +%endif + +#------- Compilers ------------------------------------------------------------ +default: +# EDIT: The parent directory for your compiler. +# Do not include the trailing /bin/ +# Do not include a trailing slash +# Examples: +# 1 On a Red Hat system, you said +# 'yum install devtoolset-7' +# Use: % define gcc_dir /opt/rh/devtoolset-7/root/usr +# +# 2 You built GCC in: /disk1/mybuild/gcc-8.1.0/bin/gcc +# Use: % define gcc_dir /disk1/mybuild/gcc-8.1.0 +# +# 3 You want: /usr/bin/gcc +# Use: % define gcc_dir /usr +# WARNING: See section +# "Known Limitations with GCC 4" +# + LINARO = $MAPLE_ROOT/tools/gcc-linaro-7.5.0 + CC = $(MAPLE_ROOT)/build/tools/spec/maplec + CXX = $(LINARO)/bin/aarch64-linux-gnu-g++ -std=c++03 + FC = $(LINARO)/bin/aarch64-linux-gnu-gfortran + # How to say "Show me your version, please" + CC_VERSION_OPTION = -v + CXX_VERSION_OPTION = -v + FC_VERSION_OPTION = -v + + QEMU_RUN = $MAPLE_ROOT/tools/bin/qemu-aarch64 -L $LINARO/aarch64-linux-gnu/libc + +default: +%if %{bits} == 64 + sw_base_ptrsize = 64-bit + sw_peak_ptrsize = 64-bit +%else + sw_base_ptrsize = 32-bit + sw_peak_ptrsize = 32-bit +%endif + + +#--------- Portability -------------------------------------------------------- +default: # data model applies to all benchmarks +%if %{bits} == 32 + # Strongly recommended because at run-time, operations using modern file + # systems may fail spectacularly and frequently (or, worse, quietly and + # randomly) if a program does not accommodate 64-bit metadata. + EXTRA_PORTABILITY = -D_FILE_OFFSET_BITS=64 +%else + EXTRA_PORTABILITY = -DSPEC_LP64 +%endif + +# Benchmark-specific portability (ordered by last 2 digits of bmark number) + +500.perlbench_r,600.perlbench_s: #lang='C' +%if %{bits} == 32 +% define suffix IA32 +%else +% define suffix X64 +%endif + PORTABILITY = -DSPEC_%{os}_%{suffix} + +502.gcc_r: #lang='c' + CPORTABILITY = -DHAVE_ALLOCA_H + +521.wrf_r,621.wrf_s: #lang='F,C' + CPORTABILITY = -DSPEC_CASE_FLAG + FPORTABILITY = -fconvert=big-endian + +523.xalancbmk_r,623.xalancbmk_s: #lang='CXX' + PORTABILITY = -DSPEC_%{os} + +526.blender_r: #lang='CXX,C' + PORTABILITY = -funsigned-char -DSPEC_LINUX + +527.cam4_r,627.cam4_s: #lang='F,C' + PORTABILITY = -DSPEC_CASE_FLAG + +628.pop2_s: #lang='F,C' + CPORTABILITY = -DSPEC_CASE_FLAG + FPORTABILITY = -fconvert=big-endian + + +#-------- Tuning Flags common to Base and Peak -------------------------------- + +# +# Speed (OpenMP and Autopar allowed) +# +%if %{bits} == 32 + intspeed,fpspeed: + # + # Many of the speed benchmarks (6nn.benchmark_s) do not fit in 32 bits + # If you wish to run SPECint2017_speed or SPECfp2017_speed, please use + # + # runcpu --define bits=64 + # + fail_build = 1 +%else + intspeed,fpspeed: + EXTRA_OPTIMIZE = -fopenmp -lgomp -DSPEC_OPENMP + fpspeed: + # + # 627.cam4 needs a big stack; the preENV will apply it to all + # benchmarks in the set, as required by the rules. + # + preENV_OMP_STACKSIZE = 120M +%endif + + +#-------- Baseline Tuning Flags ---------------------------------------------- +# +# EDIT if needed -- Older GCC might not support some of the optimization +# switches here. See also 'About the -fno switches' below. +# +default=base: # flags for all base + OPTIMIZE = + +intrate,intspeed=base: # flags for integer base + EXTRA_COPTIMIZE = +# Notes about the above +# - 500.perlbench_r/600.perlbench_s needs -fno-strict-aliasing. +# - 502.gcc_r/602.gcc_s needs -fgnu89-inline or -z muldefs +# - For 'base', all benchmarks in a set must use the same options. +# - Therefore, all base benchmarks get the above. See: +# www.spec.org/cpu2017/Docs/runrules.html#BaseFlags +# www.spec.org/cpu2017/Docs/benchmarks/500.perlbench_r.html +# www.spec.org/cpu2017/Docs/benchmarks/502.gcc_r.html + + +#-------- Peak Tuning Flags ---------------------------------------------- +default=peak: + basepeak = yes # if you develop some peak tuning, remove this line. + # + # ----------------------- + # About the -fno switches + # ----------------------- + # + # For 'base', this config file (conservatively) disables some optimizations. + # You might want to try turning some of them back on, by creating a 'peak' + # section here, with individualized benchmark options: + # + # 500.perlbench_r=peak: + # OPTIMIZE = this + # 502.gcc_r=peak: + # OPTIMIZE = that + # 503.bwaves_r=peak: + # OPTIMIZE = other .....(and so forth) + # + # If you try it: + # - You must remove the 'basepeak' option, above. + # - You will need time and patience, to diagnose and avoid any errors. + # - perlbench is unlikely to work with strict aliasing + # - Some floating point benchmarks may get wrong answers, depending on: + # the particular chip + # the version of GCC + # other optimizations enabled + # -m32 vs. -m64 + # - See: http://www.spec.org/cpu2017/Docs/config.html + # - and: http://www.spec.org/cpu2017/Docs/runrules.html + + +#------------------------------------------------------------------------------ +# Tester and System Descriptions - EDIT all sections below this point +#------------------------------------------------------------------------------ +# For info about any field, see +# https://www.spec.org/cpu2017/Docs/config.html#fieldname +# Example: https://www.spec.org/cpu2017/Docs/config.html#hw_memory +#------------------------------------------------------------------------------- + +#--------- EDIT to match your version ----------------------------------------- +default: + sw_compiler001 = C/C++/Fortran: Version 7.2.1 of GCC, the + sw_compiler002 = GNU Compiler Collection + +#--------- EDIT info about you ------------------------------------------------ +# To understand the difference between hw_vendor/sponsor/tester, see: +# https://www.spec.org/cpu2017/Docs/config.html#test_sponsor +intrate,intspeed,fprate,fpspeed: # Important: keep this line + hw_vendor = My Corporation + tester = My Corporation + test_sponsor = My Corporation + license_num = nnn (Your SPEC license number) +# prepared_by = # Ima Pseudonym # Whatever you like: is never output + + +#--------- EDIT system availability dates ------------------------------------- +intrate,intspeed,fprate,fpspeed: # Important: keep this line + # Example # Brief info about field + hw_avail = # Nov-2099 # Date of LAST hardware component to ship + sw_avail = # Nov-2099 # Date of LAST software component to ship + +#--------- EDIT system information -------------------------------------------- +intrate,intspeed,fprate,fpspeed: # Important: keep this line + # Example # Brief info about field +# hw_cpu_name = # Intel Xeon E9-9999 v9 # chip name + hw_cpu_nominal_mhz = # 9999 # Nominal chip frequency, in MHz + hw_cpu_max_mhz = # 9999 # Max chip frequency, in MHz +# hw_disk = # 9 x 9 TB SATA III 9999 RPM # Size, type, other perf-relevant info + hw_model = # TurboBlaster 3000 # system model name +# hw_nchips = # 99 # number chips enabled + hw_ncores = # 9999 # number cores enabled + hw_ncpuorder = # 1-9 chips # Ordering options + hw_nthreadspercore = # 9 # number threads enabled per core + hw_other = # TurboNUMA Router 10 Gb # Other perf-relevant hw, or "None" + +# hw_memory001 = # 999 GB (99 x 9 GB 2Rx4 PC4-2133P-R, # The 'PCn-etc' is from the JEDEC +# hw_memory002 = # running at 1600 MHz) # label on the DIMM. + + hw_pcache = # 99 KB I + 99 KB D on chip per core # Primary cache size, type, location + hw_scache = # 99 KB I+D on chip per 9 cores # Second cache or "None" + hw_tcache = # 9 MB I+D on chip per chip # Third cache or "None" + hw_ocache = # 9 GB I+D off chip per system board # Other cache or "None" + + fw_bios = # American Megatrends 39030100 02/29/2016 # Firmware information +# sw_file = # ext99 # File system +# sw_os001 = # Linux Sailboat # Operating system +# sw_os002 = # Distribution 7.2 SP1 # and version + sw_other = # TurboHeap Library V8.1 # Other perf-relevant sw, or "None" +# sw_state = # Run level 99 # Software state. + +# Note: Some commented-out fields above are automatically set to preliminary +# values by sysinfo +# https://www.spec.org/cpu2017/Docs/config.html#sysinfo +# Uncomment lines for which you already know a better answer than sysinfo + +__HASH__ +500.perlbench_r=base=maple_test-m64: +# Last updated 2021-05-10 10:36:40 +opthash=1b55f44e78c515229834a282ce9d3013fee214077130f5cc6c8d469cce53274c +baggage= +compiler_version=\ +@eNp9VE1v2zAMvftX+NYNteK26ZIigA9Z6hYD0qZI3e0YyBLtqJUlTx+J08N++2Qnjr202MWC+Cjy\ +kXy0P5utfsbL5x+Lx9XiKXHHxHvRTOR+ahk3iAlfl0D0wJst5vN4lqzuZ7MotFqFKRNhTsgRmCeL\ +1a/l9OkpXu4dOEtrh7C6Ga1G14gzYSuUCxuOQ24k2ipclqC8xd3dfDG9XSXT5X2crB6nD/FzJDal\ +qZCQAk7x2/hu+jJPoksvwSoHM/FP43szKTKWWwXU3zKznviDQagVCUlr99HGR6jGUPmWb0BpJkV0\ +9pJaYaw/HnwbXKChbW6Xfy5vBhfXZ61/al0AHmWMwyQMmzr1GisIqSR1tWgcLuPp7UM8+G5z7V6B\ +wCkHxLHILc5BRyTAFAfk/DzIZZAqlgc0yKQyCotApq+k/iAHu7elgoxVTTfb/HWKAr9KhQ60kRR8\ +1/jKXOECaZvVb9C4ZzvEOW0U6ug1NdAeXSbeQKFaBBSxGnDThAoIZeo43QMnaWudEG4pUOQmYqAy\ +XSSzVoCpjkqpWbWPcxLj4Ch4r12plEa7lpSdiXBJMIfIEe/zTLWhpKoQBTeazwDDCoh2oNsOUsiw\ +ddLuPHDKIgHb7nHdGyvYbwvIDQNIXQ5lusE2Zn+AYtnun3xFWXX3ktvcLc/x3mYtGbRE9E4bKNB7\ +10lkGkmfIIcItTbc+CNsjeyshQvKsCLrHsUtKCWPiqlBNLyK2OhmdLS5iovRdXttgrhkrgptomJ4\ +FTgwKKrh1UmiPlUr3CxAuD6QHsks4xLTQyG6t8c9sRBLMaKKuR72xrsG8uZ+PJECDljX/o36PqjW\ +AWvpaH5i32f9gHhJo0G/kBT4xG+U6LlF8g8rtN94/8t/9v+r7/0FuibFEg== +compile_options=\ +@eNrVU21r2zAQ/u5fIfxdccdGWUJdsB0l8eZEwrIh2xfhOkqqLZaD5IS1v34n56UtDFpGv1RgfLq3\ +R3fP3aLVuKl+y7XaStTuOtVqO/JsZ1TdCbPXK2XEQRq1fgj9T74HogWX0L8aDK+ufc9L6JyNkB/s\ +rQnulA42dY3cwbZbhfVw6MTm+gvCNcLtGWDQIjzmjCTwW4xJXE5BYCTPREJzgnA6gG+lbBekFKR6\ +V+mgUI3EM5VLe9bMinmGWWWsNKCSf7rASBDsTtam0itIOaZlnBHBs4jPCBcOMY2y8OqELqKyoIKX\ +jOWEc0EZWcwZ2EQW5VMiJmlG+OXqboLTMk+I0/VXOplwUog4LXgINSK8QZh+hoIrU9+HuurUAV60\ +1i3ea1utJRi6e+ya0KjHqm8FOto7IyXetu0OH2TdtUY9SnQ6p7dm6aJciqWDOcUcScLVVlVWaYBe\ +b/T+6xArvVVaPotkLujGtntTy1vfS0YoScJXKTs60vgbZUXov+DP94DzhJWTLJpysL3ksrfGZJHM\ +xMXhw5Pre3SEoBPpPP1JoKL3ZLpvGKN5EcVplhY/zh194rxHJ8sij6CJT494fQr61MfAfwIwlxtg\ +vr9lh4/Kd5/x/x3wm/buFySyt85/26DLwlzmFKF+iLPxW6b9g/IL9c2jYga5Yrdp2+ZUMi3d1j5b\ +2b8rRMvz +exehash=1c69320bf26365e65e5b19b87fd09bbfdf73b3d6257845446d6001048f92fd40 + +600.perlbench_s=base=maplec-m64: +# Last updated 2021-05-12 15:35:30 +opthash=3cf993256ffc5aaeca4f7b92e3300c53cdd5a2bdb530ca54cfd558466978ed44 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNrVUsGOmzAQvfMVlu+EVKpWarSsBMQJbg22sJHSXiyWOKrbgJFNqvbva0iT3ZWyl2ovi0DMzHu8\ +wW+mNH3YNT/VQR8VMMOoTe9WgRutbkdpT/1eW/lLWX34E8MPMPCh85QYLheflncwCDJasBWA0XfT\ +qcid+uhR91HXDEfVgtDf5iK6MCBcc4Yy/yrXKK23PmCoIjKjFQIhXvhnr90YYeqjdmj6SOhOhbmu\ +lLtUclGQkDXWKetL6vcYWeUDN6jWNv3eS65pnRIkOUl4jricOuKExEsPSZJUWyQ3mCB+TadMclpX\ +GZpqc0o3G46ETLHg8d1HcL3OB5AEl/VO7mYkPJhB9d1wwShDZcGek9nEu3fmZFv1AINsBbIsvmnY\ +GaTpZ8pEDF+4BwPvcsbqDUm23GMvnZzRFJVZLq+Ed2Ht/OOMViJJMcHi6+VkTxbDgK4A2okq8dYK\ +XOBvyJNumj6Lnak3Jdmk5oW/vLav/zXoe/P4Q7Wje5g4x+5J4jo7AObBkvVrU39TD3ynIhG5/zqd\ +9uDY/WtO62mnni3UX8u2Jz8= +exehash=2d8e7986845a1a99ef89f93cd339b6da23b7cfe2fc85ca3e8925db6f58260583 + +619.lbm_s=base=maplec-m64: +# Last updated 2021-05-12 16:29:02 +opthash=7affc07e1d7c5ca4633093daa05c20102ce1dec866120559a921160cddd58e76 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNqVUG1rgzAY/J5fEfLd2kEZTGpBY2ZdowlW2csXaV1kbmpE7WD/ftG+Ci1sIZCH3OVyd4GstHLz\ +JbK8EFDWXS6r1gBt1+RplzS76j1vkm/R5NmPie4QUGOrKCaaTh6mMwQAZj43INI/ZCn0dlfp27zS\ +y01diBRqasuj6ERCzVlzgtUROMSOXTVQK3RJ8szCFWWWAy+XlslaVGV9eJUwTgKfD8j+gvL7GYTz\ +Vu6aVCwQwAbE2LzqZA8y+4nxyEQjWwgo+5jHj9Ry1wobWxxQmwR4mZwIY88IMAOSlyi0lMPI8703\ +okhXvQ9ieypnYWTZHvWi1+OfQx4EqBesbvX5p37mcvsp0q5d9JyiPPNPkSEc+qDOrbL+kUjp+Fa0\ +TKhn9+UU5UGaxX3RFy3/AtPTsPg= +exehash=4796f3d722b9339855ae459928ddcee0650c458340a971fc905fa5338976c68c + +605.mcf_s=base=maplec-m64: +# Last updated 2021-05-12 17:02:05 +opthash=f8c9b30b6e77a4a59a777237ddaf5842ad1c2a8ae410864ef1a31d5c27b9ec03 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNqVUV1rgzAUfc+vCHm3drANJrWgada6qQk1wraX0LqUualxxg7275fa2g9ooQuBhHtObs45N1aV\ +VS6+5CovJFR1m6tKO0C3TZ61ollX73kjfmSTr35ddIOAuWpDcdFw8DC8QwBgGjEHIvtDldLW68pe\ +5pVdLupCZtAyW/VNBwpak4QRbI54Qvx0CqEV6Fpm4lurpt2hwks5FUnK2JwkiaCMxBGDR8taqVpW\ +Zd3zD4xdIWT3txCOtFo3mRwjgB2IsXtW4Rak/hNl3EUnchEwtjBLH0NvmhjsVHqH+iTGM4F7xpVe\ +EKAOJC987pkCD6LgjZjHZ011v2ypjM655wdhwF97MZ1RBMIgfr40AHhNcCO1/JRZq8cbTlEe+Pss\ +IOyCCieXUvyHI9Mn8vhMhIG/Ca0od61pupnAUfx/9bTCPg== +exehash=c4863fbc60ab23589893d59f3b7f8b19f77ca751d2a775ef1905cd68a80904ea + +505.mcf_r=base=maplec-m64: +# Last updated 2021-05-12 17:04:02 +opthash=cd00cc44466c7c00fcb0a07a84b8bdec510af969b5da706b722633229c2d2efc +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNqNkF1PgzAYhe/7K5reM2aiJpKxBApuaKENH4leNRt2EQWKFEz89xYEdYsa3zRpk3PanvNEsjaq\ +3bM4FKWAsukKWSsLqK4t8o63ff1QtPxVtMXhzUZnCOij0hYbLRdXywsEAKYhsyAyH2UlTNXX5r6o\ +zWrXlCKHhl5yfnQhoeElzMd6izzfzTYQGoFqRM5flGy7SeVOllKeZIzFfpJwyvwoZPBkJithl+cQ\ +rpTs21ysEcAWxNj+McuHSN0bylIbHQVDQBfALLsmzibR2nHIUXX9CG85nh3/TD1e9e/S2OGMxqnj\ +BiRI7+cfxvQIkCC6/Y3fyazk/knknVoPBMrqi8ZnFQjHnsT7A0LopFtOAncoUlaTn2YDlW9I3gG2\ +95r+ +exehash=806db0e68b5994ed49999cc86262dbea2e13c31853d3ba087eb2f180c940ee06 + +502.gcc_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=2839b1967320158040c55387a370c4661288143e5e404dc55ea5be68a750538d +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNq1kV9PgzAUxd/5FE3fGdM4E5exBLoOqqxtBhh9ajbWxSrQScHEby/g8M+ivnnTpjc5J7f9nVJd\ +2sXmSe5VLoE+1EqXZmqZulJZLaqm3KlKvMhK7V9deAattjWtxYXj0dV4Ai0LsRWfAug86EI6pimd\ +rSqdYnPIZQbsdulh6EgDexFzjNqDLrCfBsAmo247qszyZif73hxkJp6NruqjXUzG50PrpQkTccr5\ +GsexYBzTFW81QkWAurmhd4sFYnRJAhGC0zoOifjlBQAzo5sqk3NooSlAyP2R4F1k/jXjiQu/4UCr\ +xUY8XUZeELfad7Re9TFFoRgM/8ra34fvkrUnOFsnnk8iktwPz+qRoRURevPbV53UTG8fZVabeRdb\ +XnxG+MEPQB9OtPgjuZWXhCIifoefF0c/S7sov+T4BmRTt5k= +exehash=0 + +520.omnetpp_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=be6e4dd67b5187b3cefa0f68d2b9d0de0da20ab650fd3c3ee25c7e2e09cbff32 +baggage= +compiler_version=\ +*IENYWENfVkVSU0lPTl9PUFRJT046Ci9iaW4vc2g6IENDOiBjb21tYW5kIG5vdCBmb3VuZAo= +compile_options=\ +@eNq1UNFOgzAUfe9XNH1nzkRNJGMJlLqhHW2gjfjUTOgSFCgBZuLfW3E45rv3pTfnnt57zolN49T7\ +d30oKw1NO5Sm6V3QD12ZD6o7NkXZqQ/dlYdPD10jYNveUjy0XNwvbxEAmO24CxHG0MmhY6YVCwOd\ +MOXEwmEckkBuoBP1ZX2s9oPprlr7FLqdY7avTaEr++E5ElsVExHIiIYkOW1SvhRMpZLzhKSpYpzE\ +Ow7PdWJRfncD4ao3xy7XawSwC3GWeVbh1LPgkXHhoQvBCFgbmMsH6m9SO7sUP04DEuOtmgj/4WY8\ +QzKR+IqzRPhBRCPxMqkZrSFAo/jpJ/E/tTKvbzof+vUcPDuEcAyAhr9Z0JDJ7xxmIXwBBSCXiw== +exehash=0 + +523.xalancbmk_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=a5f92e3f29c9a9dc5714dc66815a936b3270df34bf15c51d6007f825c19a3bfd +baggage= +compiler_version=\ +*IENYWENfVkVSU0lPTl9PUFRJT046Ci9iaW4vc2g6IENDOiBjb21tYW5kIG5vdCBmb3VuZAo= +compile_options=\ +@eNrtU9FumzAUfecrLL8TuqmbtKip5ICX0tnYwmaiTxZ1aEVHcIVJ1f59HZhTsk1969ssIZ17z8G6\ +3HPITBfuql/1XdPWwDwOjensMrBD3+hB9ftu2/Tqqe6bu5cV/AQDB62TrODZ4tvZFxgEMaN8CWAc\ +g1CD0PgrFgaEieDYtZMswetiAxxCnKuMKXmVY5QI1ygRQZlKM4qpomKjCEMJzkGYLtzzXPe6tvoN\ +RVuzO62iZvfYzlq2ep5V+6FpI2rviam2bu4o7Wi9M/3LnxLZV53VZtJo0z0dBFVbdTpqOt3ut7Ub\ +lefsWpWUcJSLw4jHRiFT4suJFL4UqDyPZ8VnjxNGPfyJSJogyca33HWqENhvBAuBNtgvZdynQoVk\ +ShSc545VjOOMcnA4v3mSZkV50uBfzwG4sGbvPvkSBvESxGW5cpZ5zNbXjMsVPHEQBs7XmBffCdoI\ +x526ObJrnMVXyl1w1Pw3+GMMHtfNWS7ROiWpvPF+TG6PLC5ljtQ/NS4AMHDKH9OP+nb+isyFuX2o\ +9WAvwVx1zAQAY2RIMqXn/ZlGISsOuZqF6hW+xlPZ +exehash=0 + +525.x264_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=17555907b69668487c703e8ad30fdca935346ca79afa0bfdc1ae288920733ee3 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNrtkt9vgjAQx9/5K5q+I+rUbUZMAJmyIW34kcynBrFu3YCagsb990PEDc22V/dA06TX3l179+nX\ +4amchO90zWIK+CZnPM2GUpYLFuVEbNMVE2RHBVt/qLADpcLMihAVtlv37T6U0DEDrLkALAlf6C6M\ +2SrMKel3+0NJMtAcDwFUXnlClWybKkuWKkm4iWkE5GLy05stDuSJh02jWJyJqQdTIFvxikZ8RTIR\ +KSwtoq19d9A7bGumQve5CLP6ScSThKfVfUQLfES8AGPX9DyCsOnM8ZlPX/gmciemq7b3ne5Nrz+4\ +vQOXo0qw8aAHwCjjWxHRMZSMITAM9cf+jk6kPyLsq/CsWSgVUAwcPNja1Ct8542XXt10jBk5BVyR\ +RFmN+ey7GsHI9TXdsi1/cSq6BAIl23KefvvmizHiyzca5dn4ADVOvgF/0QGgRGdP/uA61/wZsS39\ +ACdOqngUHEDXKNfVWREUjSgbUf4bUZZMGkk2kryuJD8BghRXuw== +exehash=0 + +531.deepsjeng_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=c9d27d3cf5376f585e57a3f2a29ee65eb5a8ddb7a1042d47749d036bb8f42043 +baggage= +compiler_version=\ +*IENYWENfVkVSU0lPTl9PUFRJT046Ci9iaW4vc2g6IENDOiBjb21tYW5kIG5vdCBmb3VuZAo= +compile_options=\ +@eNqNkF9PgzAUxd/7KW76zjKTxUQylkBbJ1poQyFhT41il9Q/1AAz8dvb4ZDNJ+/T7T2nvb/T3LXB\ +++Or2ds3A+5jsK7tQ9QPnW0G3R3aZ9vpT9PZ/VeErzDybe8tEV4ubpYrjBARmQwBEwJBA4Gbnlg4\ +CKiSzI9pTllSbY/nLOZcZywTxe4k67gqhVaVlAVTSgvJ8kzCXCcXl9crgHXvDl1jNhiREEhdR37t\ +1IvkXsgywhcUGHk2IqtbHm+V1y6JRjVhObnTs+GfiONdVpdFrKUoyjhJeVruphUjL0Y8zR9+/uZP\ +rd3Ti2mGfnM+nLEBxlSc/gbkVFTHcGfJvgF3B3iC +exehash=0 + +541.leela_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=c715a7e7b712428302fadbd8205a3f5ff13d2a99dbcb03cfef4de14ebb0dbb76 +baggage= +compiler_version=\ +*IENYWENfVkVSU0lPTl9PUFRJT046Ci9iaW4vc2g6IENDOiBjb21tYW5kIG5vdCBmb3VuZAo= +compile_options=\ +@eNp9kE9LxDAQxe/5FEPuKSuoYNkutElcq9kktCnUU9CahfqnkbYr+O2NdevuenBOw5uXmd+L9B15\ +e3hx2/bVgX8fW98NMRrGvm1G2++6p7a3H65vt58JPsMotEOwJHgRXS0uMEJUbXQMmFIgDRA/r4g8\ +EFZqHmQmGc+qNZA82ms2rYyyZaV1wcvSKs3lRsOh9i6hL88BloPf9Y1bYURjoHWdhFtzr7JbpU2C\ +T05jFICorq5Fui7D7BRjmmZc0hs7G/7nmh7w2hSp1aowaZaL3NzPeydIjEQu735+4U8t/eOza8Zh\ +dSweWAGmKIL9phJMVd+JjuJ8ASVmcYc= +exehash=0 + +548.exchange2_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=7c95017cd71c6e94420e5cb70d3cc0a48a81eb16a8246cb031329e6470ab6a5c +baggage= +compiler_version=\ +*IEZDX1ZFUlNJT05fT1BUSU9OOgovYmluL3NoOiBmOTA6IGNvbW1hbmQgbm90IGZvdW5kCg== +compile_options=\ +@eNq9kEtPhDAUhff8iqZ7yJioCWSYhEcZ0Q5teCS6apQpSX3QhjJG/fUWBhmcuLab3vae2/OdZrK1\ +3x5feCNeOZCqF7LVnqX7TtQ96w7tXnTsnXei+fThBbRMqY3EhyvHXV1By0oo9QDUitdKATsuKIrM\ +lsUorLbTmQVVSVhRUZqjomCEomxHf3qYXl+CtZaHruYbYMu5dhqlxBffO437AS1jEtEqwcG28OGZ\ +zdgNURbdsF+CP31HMbov84BRkpdBmOK0fJhHBhxoRWQ3pGrcFbDrAWr6GEeCmc+oPJBE/qA61iS8\ +JbQ0Ty1H/hsdp9ndhH621vLpmde93iwvT6AAjClwfEqEY1INeRZhvgFuXpvb +exehash=0 + +557.xz_r=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=34d1fd17d5aa193e0a6a2c6e3f3472c24b4e41ed9de1aafdefcd23008f8b6564 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNrtk1tPwyAUx9/7KQjvXTcvUxdr0gtu1a6QXsz0hXQMHVrK0nZG/fSy6Uw7L59AAhz+nMNJzvmF\ +SJWmzJ/4vSg4UKtGqLIeGXVTCdbQal0uREWfeSXuX204gIY+1jrEhv3eWX8IDcPDUzIC0Foqya16\ +XVpzUVoyXxWcAVNPtUvaU8D0E4I8bSIfudn4U1MnSzF1b1OEYx/Fdv9lcHB4dDw8OdUBE+cGUQ9H\ +l8GYTuzB7skUTWmAd2p210mVZITEKEkoJiiaEmAGPb3qFWdUckmF2qhlbh5oy5SUqtSHQsyLN5lb\ ++Uq01GZryW/RbMnZU0vXQurKWxcLXjR5J2FLVHn5wJla8Arsj896QjI8AuC8VuuK8QtoeCPgefaP\ +3f5wYvcKk9SGndZDQyPySHYZOuNE+7oYtl4XRd6EdgL+ufBq2xs0S2OHEhynjhuEQXr71aENHmiE\ +QXT92xfYG+dq/shZU190WH+BAmBLMfT/QBz6ONsAbtF9B8AAJ6o= +exehash=0 + +999.specrand_ir=base=maplec-m64: +# Last updated 2021-05-12 17:24:36 +opthash=2b6f45155529ef0ee94c19b9445375b5551207252ee5fb82fdcb9fe4f6f19a4c +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNp9kFFLwzAUhd/zK0Leu04UwbIO2rTOamzC1oI+lS7LMLomJWkF/72xo2KLermQwDm5+c7NtfKa\ ++k0c5UlA3XZSKxsA2xnJu8r06iBN9S6MPH6E6AIBd7XOEqLl4mZ5iQDA9JEFEPkvuhG+7ZW/l8pv\ +6vYkOPRc63HoQkMv2bEUuyNP0rjcQC+zreCmVgeP66bRCs7r/KIi7PoKwpXVveFijQAOIMbhr5+e\ +RRrfU1aEaEKAgCPFrLwl0WbntCnNoMZpju+q0TDHGyzpU7GNKka3RRRnJCuex0kDJQIkyx/+Wsis\ +Vnr/Knhn15PI37wQDmFI8k9SktDyK+ePkJ/hnoeM +exehash=0 + +519.lbm_r=base=maplec-m64: +# Last updated 2021-05-12 17:29:14 +opthash=55e637b326098960c2f25abcffd8e22854b4464b6ae2619ebf92ce74d672f662 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNp9kF9PgzAUxd/7KZq+M2aiJpKxBApuaKENlESfmg27WAW68MfEb2/ZcMoyvWnSNufc9nduomur\ +2rzLnSol1PtO6bp1QNs1quhE09cvqhEfslG7TxddIWCOrbG4aD67m98gADCNmQOR/aorabd9bW9V\ +bVebfSkLaJmlvx+daWgFGQux2ZIg9PPVeBdezqnIcsbSMMsEZWESM3heo5ew22sIF63um0IuEcAO\ +xNi9+P1RpP4DZdxFExYEDDNm+T3xVpnRplwH1Q8TvBYTw0XQgzl84qknGE2550ck4s+nloEXARIl\ +j38N6awWevsmi65dDpnL6if/CR7CQzIS/BM79vhakMgf2Mtq9NN8mMOvIXwB6zCRCQ== +exehash=95e543602be6b0de89f9afd90aacb45d06bb7b32f0f2295d9571e3c9a893f901 + +511.povray_r=base=maplec-m64: +# Last updated 2021-05-19 14:10:44 +opthash=6681316d90fcef5a8ec9d57d1ec3590b7d32c00621a8f461b30ed5b709bb4dc3 +baggage= +compiler_version=\ +@eNq9VUtv2zAMvvtX6NYNqaKmj7QJ4MNgBNuAYCnabOstkGXGViNLmiQ3bg/77ZPtuPaaDANWrJc4\ +FB8f+YmkUHR3F62+zW5uPy++rBbXS/+ZBijaP/tquUxRXHDhMJfIamB2GESL+XwWLVcfoygkmcqB\ +PGY5yakWsFIapFWFYUB6f51SwpKUMSy4pEbhy+HF8ITEXBJKDcvG55WiKHEqC5wOBs8Q8+Vi9f3m\ +w/X17Ob1UMMhETyGElil30cmjalwCm8N1RpMsKQmBTdFe7ZBpOSap4WBBG25y6boqMnPsW2KK8YS\ +K+gDkK0yG6sp22lyugFsQAC1sBoRK6m2mXJ1xsOUu59d1vj0ZDQZjk4Ja6GO0O2n2Xwe1tXE1GYI\ +4woc55qF/wK/qi1JY08SsC7hhvDx1Rhr1hXbwazNW+CkuX4TGN9s1PYlkXgp4ZbGArx9nBfJWlDt\ +D0E2Z051gs1odf2NvyqqEWGiSCDBvmcclK4zleIZxz5aBzl+8uF7YPZe3GMoGWjHlbSdZ5VWIfmP\ +ArCK74H1gvqCNmAaejB/kbqvnZWlrzvrHNhk0hOEYlRA2BDyHLJ1TCAu0p5CybT+acvw7ioNpWpl\ +rUVP4nYntRnlhXC8KXkXcc1LzJTxPGF6cYavzi4ux5M/q8/PzkeTNnw1jSE1+cMVpp2LywzQxIZa\ +WV52xzV05XGoTMdzCB/hBeFc+sbxXON1IVl1IS1ww7W/QqOUe02P7kLY/S3U65PXgvxtEA6sQM8M\ +6/VIBmzjn4BwF7p3oXGVvTO/DQeVaUFTsCE7ZoPB8dpfn6HyuBmaOpnw0DBmyrqDCldv3/AQRdqA\ +75D/SU6wrNsJ5SoBMUV1UwV+S6MHMLZqifq1QO/m9cJG/jFEvaX9HgW/AHQUm+k= +compile_options=\ +@eNrtlFFPwjAQx9/3KZq+Lt1mRI2EkcA2ER1bw7YEn5pRCkyhxXYY+fZuAxTUxAcToskuWXrJXde7\ +37X/QHC0TJ/YNFswIFZ5JrhqaiqXGc2JXPNJJskLk9l0Y8MzqBWuKlJsaBnXVgNqmhMOcBNAcy6W\ +zNzMl+YyXS0YESvGlVhLyswDNxdiocwZpWiR8VQKdGVcGJY5zriZppLOLxtlYP2KZnyNZroOSkMq\ +n9hU161zgChAYl+lIQByI+w5xRK4XjfpAdSfSsFzxieFO04VKxaj+NSKUfKshMx3W0gniUMSJRgP\ +vSgiIfaCAQYftsvy8WUDgNa2+jbUnCZwRiP7NN3ujwu7dyGObXjUPNQK6g5ObvxOLypixyCqaNcL\ +nFuyT/gVmep/3igedggOh3Gn2/f78cP+2AoT/HoTykYrPvR0g/txfI79bYXb4H9B7feD+5M9uk/W\ +EuNHRnPVPgL+TguACqXvnvKV+G6YlGM7mFktTLUw1cJUC9OfE6Y3nDbadw== +exehash=0f4659ece5065638b9268e65a56d231636b83a05ae00d17df9bf1a2ae7112f8b + +538.imagick_r=base=maplec-m64: +# Last updated 2021-05-31 19:30:05 +opthash=54a10f343892c95817762d047800af2eccb783c0e3ff864fcef7d0ae46bd6135 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNrtkdFqgzAUhu/zFCH3agdlMKkFja51iyZohO0qtC5lbmqK0cHeftbqtpatDzAWAgk5fw7f/59Y\ +1Ua1eZW7opRQ7dtC1doGum2KvBVNVz8VjXiTTbF7d9AVAv1V9xIHzcyb2RwBgGnEbIisZ1VJS3e1\ +tS1qq9rsS5lDo99qamoqaPgpC3B/xH7gZStohOb4JtyMU5FmjCVBmgrKgjhi8HyNWsKu5xAutOqa\ +XC4RwDbE2PkR4Vik3h1l3EEnPAj03Jhlt8RdpX3tlG2oekGM12ISXIYdPgQPPHEFowl3vZCE/HHq\ +OzAjQML4/rewztZCbV9k3urlwXdZfWXwaQDCwR3xL1iPXL4WJPQO/GU16ml2yOJbEP9D/AND/ABB\ +Sw9g +exehash=facef3555a82b36cc7e8d92e422f6bffa9ea772e21800b4dd646cd8aa6e6eb36 + +500.perlbench_r=base=maplec-m64: +# Last updated 2021-06-02 10:59:30 +opthash=907b382deeffec61ac5b9a27df38c7435aa5d83bcabdd7e546b4199881462251 +baggage= +compiler_version=\ +*IENDX1ZFUlNJT05fT1BUSU9OOgo= +compile_options=\ +@eNrVUtFqgzAUffcrgu/WDkZhZQ40TatbaoKJ0D0Fa1OWrWpJ7Nj+ftG1doWOPU+U3HvO8YZ77k2b\ +2quKN7lVOwmafaua2kwd02pVtkIf6o3S4l1qtf0M3BvXsaGxksAdj+7GE9dxIFnSKXD9l6aSvjnU\ +/lrVflXsd7IEnn2bU9FRA7wZowjaI52hKF/YgKIMC0gyBLxkZL+NMq2fEBuV+6L2uaqkF6tMmhMS\ +8yX2aKGN1BaSH62vpQ3MXpa6qDe25IzkEUaC4ZDFiInuxiTEwfh4uwhzTgTLKc0QY4JQlC6p5QQO\ +swUS8wQjNqRdJhjJM4g6rE/JfM4QF1HCWTC5BcNzLI+TNF+J1cCcYNoh96Y56FI+uA6cAgiDq7Z9\ +kyR6JJQH7oWHrmO9hjSf43DBLHfpZ89GKIWxGAT/3uC+KUoyHkYJTvjzqeuz0b0CrXgWiqs62kms\ ++um3Nf1jhvfN+lWWrXnoBLvqLB7GAkA/Mzz7baB/t2B/X4Y8tlDUzW1XHSuSvNuBHwvwBanrEMs= +exehash=51357adb393d1e80ce358878f264dc9f58399d0c7c71acd7c4834ae0d4c6d3e4 + diff --git a/build/tools/spec/replace_func.py b/build/tools/spec/replace_func.py new file mode 120000 index 0000000000000000000000000000000000000000..8da5f589b021dd54b47a6da164e7197c75f79024 --- /dev/null +++ b/build/tools/spec/replace_func.py @@ -0,0 +1 @@ +../common/replace_func.py \ No newline at end of file diff --git a/doc/cn/CPPCodingTalkAboutPointer.md b/doc/cn/CPPCodingTalkAboutPointer.md new file mode 100644 index 0000000000000000000000000000000000000000..d8be304b8ec099fbd7f614cb0a300b5028f16dd1 --- /dev/null +++ b/doc/cn/CPPCodingTalkAboutPointer.md @@ -0,0 +1,266 @@ +# C++编程探讨之指针 + +# 背景 + +`C/C++`中指针的使用具有极大的灵活性,伴随着的是更多的安全风险,同时这也对程序员提出了更高的要求。本文将讨论裸指针在`C/C++`中当如何被使用,乃至最终确立一种编码范式。 + +# 裸指针vs引用 + +## 成员访问 + +当访问对象成员时,裸指针存在为空的场景(指针的有效性由闭合对象或函数从逻辑上自保证),所以必须检查非法指针。而引用必定非空。 + +## 做容器成员 + +引用从`C++`语义中,表达的是别名关系,理论上不占内存(实际中规中矩的编译器对于引用的内部实现是指针)。引用本身不是对象,这点与指针不同,指针可以作为各容器成员,而引用不行。 + +# 裸指针vs智能指针 + +## 堆对象销毁 + +```c++ +class Int { + ... + private: + int data; +} + +void test(int* in) { + Int* tmp = new Int(); + ... + goto LABEL; + ... + + delete tmp; +LABEL: +} +``` + +对于资源(堆对象、栈对象、文件资源等)的使用,遵循**“谁申请,谁释放”**的原则(RAII),这样可以最大限度的降低资源泄露的可能。 + +裸指针的`new`与`delete`之间往往会包含一段处理逻辑以及子函数调用,中间的处理逻辑可能发生异常、跳转等动作(中间的处理逻辑的行为不会由当前对象越权限制,超出`new`行为的管辖范围),而跳过资源的释放,从而造成资源泄露(如示例中`test`函数中`tmp`对象)。 + +智能指针改造为`auto tmp = std::make_unique();`,构造对象`tmp`时,即绑定其`delete`行为,退出当前作用域销毁,而避免了资源泄露的可能。 + +## 管理权vs使用权 + +```c++ +int* delete(int* in); +``` + +管理权:拥有销毁、重建对象的权利 + +使用权:拥有访问、修改对象的权利 + +如上示例所示,当使用裸指针传递参数时,由于其隐含了转移所有权的属性(可能转移所有权,亦可能没有),入参`in`以及出参均无法确定行使了**管理权**还是**使用权**。调用此函数将需要额外补充信息:`in`是否会被`delete`函数销毁?返回值是否需要调用者销毁? + +```c++ +std::unique_ptr delete(std::unique_ptr& in); +``` + +使用智能指针将在接口中明确表达参数的角色,如`std::unique_ptr& in`代表`delete`函数享有其**使用权**,函数返回值代表`delete`函数转移所有权。 + +# 指针使用范式 + +## `new`创建的对象,必须立即绑定其销毁方式 + +错误示例: + +```c++ +Object* obj = new Object(); +... +delete obj; +``` + +正确示例: + +```c++ +std::unique_ptr obj = std::make_unique(new Object()); +``` + +## 申请的资源,必须立即绑定其释放方式 + +错误示例: + +```c++ +FILE* file = open("xxx.txt"); +... +file->close(); +``` + +正确示例(本例比较通用,最佳方式应用类封装`open`): + +```c++ +template +class ResourceGuard { + public: + ResourceGuard(T* _obj, Func _func) : obj(_obj), func(_func) {} + + ~ResourceGuard() { obj.func(); } + private: + T* obj; + Func func; +} + +FILE* file = open("xxx.txt"); +auto fileGuard = ResourceGuard>(file, FILE::close); +... +``` + +## 确定不为空的场景,使用引用而非指针 + +错误示例: + +```c++ +void func1(int* in) { + if (in == nullptr) return; + ... +} + +void func2() { + int* p = nullptr; + ... + if (p != nullptr) { + func1(p); + } +} +``` + +正确示例: + +```c++ +void func1(int& in) { + ... +} + +void func2() { + int* p = nullptr; + ... + if (p != nullptr) { + func1(*p); + } +} +``` + +## 作为容器成员(不具管理权),确定不为空时,使用封装的引用容器,而非指针 + +错误示例: + +```c++ +void func(std::vector& in) { + for (auto *p : in) { + if (p == nullptr) { + continue; + } + ... + } +} +``` + +正确示例: + +```c++ +template +class Ref { + public: + Ref() = delete; + Ref(T& ref) : data(&ref) {} + + ... + + operator T() const noexcept { + return *data; + } + + private: + T* data; +} + +template +using ref_vector = std::vector>; +void func(ref_vector& in) { + for (auto p : in) { + int& data = p; + ... + } +} +``` + +## 作为容器成员(具备管理权),使用具有管理生命周期的容器,而非指针容器 + +错误示例: + +```c++ +std::vector data; +... +for (auto *p : data) { + delete p; +} +``` + +正确示例: + +```c++ +template +class ptr_vector { + public: + ~ptr_vector() { + for (auto *p : data) { + delete p; + } + } + + private: + std::vector data; +} + +ptr_vector data; +... +``` + +## 显示转移对象管理权,明确对象使用权 + +`C++11`新增了`move`语义,并废弃`auto_ptr`而使用需显示转移所有权的`unique_ptr`,使得栈对象和堆对象的生命周期管理方式可以进行统一。 + +栈对象转移示例: + +```c++ +std::vector func() { + std::vector data; + data.push_back(0); + return std::move(data); +} +``` + +模糊的堆对象转移示例: + +```c++ +Object* func() { + std::unique_ptr data = std::make_unique(new Object); + Object& rData = ToRef(data); + rData.push_back(0); + return data.release(); +} +``` + +明晰的的堆对象转移示例: + +```c++ +std::unique_ptr func() { + std::unique_ptr data = std::make_unique(new Object); + Object& rData = ToRef(data); + rData.push_back(0); + return std::move(data); +} +``` + +## 应当使用指针场景 + +1. 第三方库函数传入或传出指针,但必须在调用前一刻使用`unique_ptr.get()`或`unique_ptr.release()`构建入参,出参也必须在拿到后立即使用`unique_ptr`接住或判空并转引用。 +2. 作为容器成员(不具管理权),使用场景中有空指针设计,但必须在使用前立即判空并转引用,不支持指针扩散。 + +# 备注 + +上述的`Ref`、`ref_vector`已开发完成,`Ref`由于`operator.`无法被重载,所以定义为`SafePtr`。 + +上述的`ResourceGuard`、`ptr_vector`正在开发中,文中主要为示意。 diff --git a/doc/cn/CompilerPhaseDescription.md b/doc/cn/CompilerPhaseDescription.md new file mode 100644 index 0000000000000000000000000000000000000000..3bd1244dac0613fb660ec67930f0caa46e0bf328 --- /dev/null +++ b/doc/cn/CompilerPhaseDescription.md @@ -0,0 +1,155 @@ +### 方舟编译器phase层次结构 + +目前phase主要有两类:module phase和function phase。在其他层次的IR上的phase可以通过继承自MaplePhase完成。MaplePhase在特定的IR层次上完成优化或者进行程序分析。 + +##### MapleModulePhase + +如果一个phase继承自MapleModulePhase,说明这个phase需要在整个module上进行转换;module phase也可以调用更低层次的MaplePhase。 + +##### MapleFunctionPhase + +```c++ + template + class MapleFunctionPhase : public MaplePhase +``` + +不同于MapleModulePhase,MapleFunctionPhase是一个模板类,主要是因为Maple中有不同层次的函数级IR。中端的优化phase和后端的优化phase都是该类的派生类。 + +### phase的内存管理 + +方舟编译器的phasemanager可以对内存进行有效的管理,以便每个phase可以保留可能被其他phase依赖的分析结果;以及丢弃失效的结果。每个phasemanager中提供一个AnalysisDataManager类(多线程时,每个线程对应一个AnalysisDataManager)用来存储分析结果。为了实现这个功能,每个phase需要实现GetAnalysisDependence函数。如下: + +```c++ + void ::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); // 配置当前phase需要依赖的phase。 + aDep.AddPreserved(); // 配置当前phase执行完成后,需要保留的分析结果。 + aDep.SetPreservedAll(); // 保留所有的分析结果。 +} +``` + +#### 分析phase + +分析phase中的GetPhasesMempool函数返回的mempool来自于AnalysisDataManager。分析phase的内存池中分配的对象,只有在被其他phase设置为需要丢弃;或者当前phasemanager结束时,才会被释放。否则会一直存在于当前phasemanager中。如果有些信息仅仅是在分析phase内部局部使用,那么可以将其分配在临时内存池中,通过ApplyTempMemPool函数获取一个临时的内存池。 + +#### 转化phase + +对于转化phase,GetPhasesMempool和ApplyTempMemPool获取的内存池类似,都会在该phase结束后进行释放。**如果一个转化phase不实现GetAnalysisDependence,则意味着phase结束后会把当前phase所在的phasemanager中的所有现存分析phase的结果删掉**。 + + + +**If a transform phase does not implement the GetAnalysisDependence method, it defaults to not having any prerequisite phases, and invalidating all phases information in analysisDataManager.** Transfrom phase default to be assumed to invalid all ananlysis results. + +### 快速开始 + +假设我们要在中端实现一个叫做“MEHello”的`转化`phase。 + +- 1 新增两个文件 + +me_hello_opt.h: + +```c++ +#ifndef MAPLE_ME_INCLUDE_ME_HELLO_OPT_H +#define MAPLE_ME_INCLUDE_ME_HELLO_OPT_H +#include "me_function.h" // 使用MeFunction +#include "maple_phase.h" // 使用MAPLE_FUNC_PHASE_DECLARE宏 +namespace maple { + +// 对于转化phase,尽量使用这个宏。 +MAPLE_FUNC_PHASE_DECLARE(MEHello, MeFunction) +} // namespace maple +#endif // MAPLE_ME_INCLUDE_ME_HELLO_OPT_H + +``` + +me_hello_opt.cpp: + +```c++ +#include "me_hello_opt.h" +// 将该phase对应的phasemanager头文件引入进来,可以方便地使用其他phase的分析结果。 +#include "me_phase_manager.h" + +namespace maple { + +// 您需要清楚地知道,当前的phase依赖什么分析结果,以及会破坏什么分析结果。 +void MEHello::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); // phasemanager会保证MEDominace这个phase的分析结果在当前phase中是可用的。 + aDep.SetPreservedAll(); // 表明了当前phase不会破坏任何分析结果。 +} + +// 这个函数的返回值表明了当前phase是否修改了IR,目前该返回值并未使用。 +bool MEHello::PhaseRun(maple::MeFunction &f) { + // 您可以通过这个宏来获取想要的分析结果,前提是在GetAnalysisDependence配置过,否则会报错。 + auto *dom = GET_ANALYSIS(MEDominance); + // 使用dom信息,做一些事情。。。 + LogInfo::MapleLogger() << "hello opt on function: " << f.GetName() << '\n'; + return false; +} + +} // namespace maple + +``` + +- 2 告知phasemanager,有新的phase加入了。 + - 2.1 将新phase的头文件(me_hello_opt.h) 添加至me_phase_manager.h + - 2.2 在me_phase_manager.cpp注册新的phase,如下: + +```c++ +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MEHdse, hdse) +// 后缀CANSKIP的宏表明了这个phase可以跳过。第一个参数是phase的实现类,第二个参数是phase的名字。 +// phase的名字可以用在多个选项中,如skip-phases,dump-phase(s),skip-after等等;同时配置哪些phase需要运行,也是用这个名字。 ++MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MEHello, hello) +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MELfoIVCanon, ivcanon) +``` + +- 3 配置需要运行的phase列表。 + x修改phase.def文件,如下: + +```c++ +... +ADDMAPLEMEPHASE("dse", MeOption::optLevel >= 2) +ADDMAPLEMEPHASE("hello", MeOption::optLevel >= 2) +ADDMAPLEMEPHASE("analyzector", JAVALANG) +... +``` + +- 4 把cpp文件加入到对应的build.gn中。 +- 5 编译工具链测试 + maple --run=me:mpl2mpl:mplcg --option="--O2 :--O2 --quiet:--O2 --quiet" test.mpl + 可以看到,我们新增的phase在dse之后正确地输出了预期的内容! + +``` +>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Optimizing Function < VEC_invariant_p_base_space id=5471 >--- +---Preparing Function < VEC_invariant_p_base_space > [1] --- +---Run Phase [ mecfgbuild ]--- +---Run Phase [ cfgOpt ]--- +---Run Phase [ loopcanon ]--- + ++ trigger phase [ dominance ] + ++ trigger phase [ identloops ] +---Run Phase [ splitcriticaledge ]--- +---Run Phase [ ssatab ]--- +---Run Phase [ aliasclass ]--- +---Run Phase [ ssa ]--- + ++ trigger phase [ dominance ] +---Run Phase [ dse ]--- + ++ trigger phase [ fsaa ] +---Run Phase [ hello ]--- +hello opt on function: VEC_invariant_p_base_space +---Run Phase [ hprop ]--- + ++ trigger phase [ irmapbuild ] +---Run Phase [ valueRangePropagation ]--- + ++ trigger phase [ identloops ] +---Run Phase [ hdse ]--- +---Run Phase [ epre ]--- + ++ trigger phase [ dominance ] + ++ trigger phase [ identloops ] + == epre invokes [ hdse ] == +---Run Phase [ rename2preg ]--- +---Run Phase [ lpre ]--- +---Run Phase [ storepre ]--- +---Run Phase [ copyprop ]--- +---Run Phase [ hdse ]--- +---Run Phase [ pregrename ]--- +---Run Phase [ bblayout ]--- +---Run Phase [ meemit ]--- + +``` diff --git a/doc/cn/DeveloperGuide.md b/doc/cn/DeveloperGuide.md new file mode 100644 index 0000000000000000000000000000000000000000..969f52b08816f29a9a796d8852ce78f9b60395ee --- /dev/null +++ b/doc/cn/DeveloperGuide.md @@ -0,0 +1,103 @@ +# 开发者指南 + +通过参考本文档,您可以下载编译器源码编译出OpenArkCompiler。同时,本文档也为开发者提供了源码静态检查指南。 + +## 源码下载 + +下载地址:,可以通过`Clone` or `Download`的方式下载openarkcompiler源码。 + + > 注:默认源码下载目录为openarkcompiler。 + +之后请按照《环境配置》文档完成您的开发环境准备。 + + +## 源码编译 + +在openarkcompiler目录下执行以下命令,编译出OpenArkCompiler,默认输出路径 openarkcompiler/output/TYPE/bin, TYPE: aarch64-clang-release。 + +``` +source build/envsetup.sh arm release +make setup +make +``` + +命令说明: + +- `source build/envsetup.sh arm release` 初始化环境,将OpenArkCompiler工具链路径openarkcompiler/output/TYPE/bin设置到环境变量中; +- `make` 编译OpenArkCompiler的Release版本; +- `make BUILD_TYPE=DEBUG` 编译OpenArkCompiler的Debug版本。 + +在openarkcompiler目录下执行以下命令,编译出OpenArkCompiler及maple runtime部分,默认输出路径 openarkcompiler/output/TYPE, TYPE: aarch64-clang-release。 + +``` +source build/envsetup.sh arm release +make setup +make libcore +``` + +命令说明: + +- `make libcore` 编译OpenArkCompiler及maple runtime部分的Release版本; +- `make libcore BUILD_TYPE=DEBUG` 编译OpenArkCompiler及maple runtime部分的Debug版本; + +此外,方舟编译器还提供了源码编译脚本,开发者也可以通过在openarkcompiler目录下执行该脚本,默认编译出OpenArkCompiler及maple runtime部分的Release版本。执行命令如下: + +``` +source build/build.sh +``` + +## Sample示例编译 + +当前编译方舟编译器Sample应用需要使用到Java基础库,我们以Android系统提供的Java基础库为例,展示Sample样例的编译过程。 + +**基础库准备** + +环境准备阶段已经通过AOSP获取到需要的libcore的jar文件。 + +**生成libjava-core.mplt文件** + +编译前,请先在openarkcompiler目录下创建libjava-core目录,拷贝java-core.jar到此目录下,在openarkcompiler目录执行以下命令: + +``` +source build/envsetup.sh arm release +make +cd libjava-core +jbc2mpl -injar java-core.jar -out libjava-core +``` + +执行完成后会在此目录下生成libjava-core.mplt文件。 + +**示例代码快速编译** + +示例代码位于openarkcompiler/samples目录。 + +以samples/helloworld/代码为例,在openarkcompiler/目录下执行以下命令: + +``` +source build/envsetup.sh arm release +make +cd samples/helloworld/ +make +``` + +## 源码静态检查 + +本部分内容将指导您使用clang-tidy进行源码静态检查。在对源码进行修改之后,对源码进行静态检查,可以检查源码是否符合编程规范,有效的提高代码质量。 + +静态源码检查之前,需要先编译出OpenArkCompiler。此后,以检查src/maple_driver源码为例,在openarkcompiler目录下执行以下命令: + +``` +cp output/TYPE/compile_commands.json ./ +./tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04/share/clang/run-clang-tidy.py -clang-tidy-binary='./tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04/bin/clang-tidy' -clang-apply-replacements-binary='./tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04/bin/clang-apply-replacements' src/maple_driver/ +``` + +命令说明: + +- `cp output/TYPE/compile_commands.json ./` 将output/TYPE目录之下的compile_commands.json复制到当前目录之下,它是clang-tidy运行所需要的编译命令; + +- `./tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04/share/clang/run-clang-tidy.py` 调用clang-tidy进行批量检查的脚本run-clang-tidy.py,其中 `./tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04/`目录是之前配置的clang编译器的发行包主目录; `-clang-tidy-binary` 是指明clang-tidy的具体位置; `-clang-apply-replacements-binary` 是指明run-clang-tidy.py所依赖的clang-apply-replacements的位置; `src/maple_driver/` 是要进行源码检查的目录。 + +## 编译器注意事项 + +- 方舟编译器前端暂时不支持字节码校验,未符合dex、jbc语义的字节码输入可能造成编译器Crash。 +- 方舟编译器中后端暂时不支持IR中间件语法校验,未符合MapleIR语义的中间件输入可能造成编译器Crash。 diff --git a/doc/cn/DeveloperGuide4Utility.md b/doc/cn/DeveloperGuide4Utility.md new file mode 100644 index 0000000000000000000000000000000000000000..08045fc06352bcdc7acac72aac6e0796a4d9368d --- /dev/null +++ b/doc/cn/DeveloperGuide4Utility.md @@ -0,0 +1,431 @@ +# Maple通用模块应用手册 + +# Cast + +## `instance_of`与`safe_cast` + +`maple`对于`C++`的使用,原则上需要禁用`RTTI`,即禁用`dynamic_cast`。而由于编译器系统的复杂性,完全通过类设计来破除父类向子类转换的情况,反而会使得对象关系变得更加复杂,得不偿失。所以`maple`代码实现中便有了众多如下设计: + +```c++ +SubClass *sub = nullptr; +if (base.Type() == SubType) { + sub = static_cast(base); +} +``` + +通过设计某属性字段来实现父类与子类之间的关系绑定,从而达到与`dynamic_cast`相同的效果。 + +但此种写法仍有一些不足,首先`SubType`与`SubClass`之间隐藏有静态绑定关系,这由设计者决定,却需要调用者将两者关系显化,这会产生强依赖;其次,并不是所有场景都如类型比较这样直观,复杂的场景对于调用者来说更容易出错;最后,散落在各地的转换,将会埋下散弹式修改的问题。所以我们设计出了`safe_cast`,由设计者注册转换关系,调用者只需以`dynamic_cast`的方式调用即可 + +### 注册方式 + +通过`REGISTER_SAFE_CAST`宏来达成注册,声明如下: + +```c++ +#define REGISTER_SAFE_CAST(type, condition) +``` + +其中`type`为子类类型(假设为`B`),`condition`为匹配到`B`以及其子类所有场景的布尔表达式。示例如下: + +```c++ +class A; +class B : A; +class C : B; +REGISTER_SAFE_CAST(B, from.Kind() == B || from.Kind() == C); +REGISTER_SAFE_CAST(C, from.Kind() == C); +``` + +`from`为表达式传入的类型的形参名。 + +*注意:* + +*- 注册同时持了子类向父类转换以及父类向子类转换。* + +*- `condition`可以是任意的布尔表达式,但设计者应尽量使其符合继承关系,避免非继承关系的转换。* + +*- 对于复杂的`condition`使用`kind ==`表示一棵继承关系树是不明智的,需考虑优化,如:组织`kind`的范围、使用特定位标记等方式来达成快速匹配。* + +### 使用场景 + +1. 针对只做单个类型匹配的场景 + +```c++ +SubClass *sub = safe_cast(base); +if (sub == nullptr) { + // TODO +} +``` + +2. 针对多个类型匹配的场景 + +```c++ +if (instanceof(base)) { + auto *sub = static_cast(base); + // TODO +} else if (instanceof(base)) { + auto *sub = static_cast(base); + // TODO +} +... +``` + +*注意:* + +*- 对于`switch case`等已经正确识别类型的场景,使用`static_cast`即可。* + +*- `safe_cast`返回值永远是指针,外部识别是否转换成功。* + +*- 输入若为指针,`instance_of`与`safe_cast`都会做判空,所以`base`非空时优先传入引用* + +# Container + +## `Ptr` + +`Ptr`模拟了原生指针的行为,但切除了对数组操作的支持。 + +其通过在构造、赋值的操作中校验数据,使得指针对象仅在构造时需验证满足某特征,而传递和使用时均无需再次验证该特征,降低重复检查的开销。 + +```c++ +template +using PtrCheckerType = void (*)(const T*); + +template +constexpr void CheckNothing(const T*) {} + +template Check = CheckNothing> +class Ptr; +``` + +如上所示,`Ptr`的默认校验行为不做任何检查,通过 + +```c++ +template < typename T> +using XXXPtr = utils::Ptr>; +``` + +即可定义不同场景下的已经校验过的指针对象,统称安全指针。 + +*注:其由`safe_ptr`的诉求而扩展而来,但`safe_ptr`相对通用的`Ptr`场景更为复杂,因为其针对的是指针本身的合法性而非指针对象的特征。* + +## `safe_ptr` + +参见《CPPCodingTalkAboutPointer》中`Ref`的定义。由于`c++`中`operator.`无法进行重载,所以无法构建`Ref`对象,但可以定义与其等效的非空指针对象,即`safe_ptr`。 + +### 场景:数组、字典等容器成员 + +示例可见于《CPPCodingTalkAboutPointer》。 + +为了弥补`operator.`无法重载的问题,后续将扩展`Utility`中`ToRef`小工具的能力,保证指针无开销但安全转换为引用。其可能特化如下: + +```c++ +template +constexpr T &ToRef(safe_ptr ptr) { + return *ptr; +} +``` + +*注:对于使用频繁的容器,后续将封装`ref_xxx`系列的容器以取代`safe_ptr`这类使用场景。* + +### 场景:作为对象成员 + +此为`safe_ptr`开发出来后意外发掘的场景。 + +由于存在引用数据成员的类,编译器无法为其生成默认拷贝和转移,但很多场景下需要支持拷贝和转移能力,此时通常的做法是存储为指针成员。但指针成员将带来两个问题: + +1. 针对指针成员访问时需要去识别指针的合法性。 +2. 重构和演进都有可能从定义成员的角色方来改变其行为可空,而使用成员的角色方不一定会被告知,这可能会导致进一步的隐藏bug。 + +而使用`safe_ptr`代替裸指针,`safe_ptr`解引用处都使用`ToRef`来转引用(裸指针的`ToRef`有额外的开销),将会避免这样的问题。 + +*注意:`safe_ptr`设计为容器或对象成员,而不应用于函数传参,函数传参应使用`&`。* + +## `mpl_iterator` + +`mpl_iterator`原名为`iterator`,但由于和容器中`using iterator`重名,所以添加`mpl`前缀。 + +由于`ref_xxx`容器的设计,以及未来可能扩展small talk系列,重复的迭代器设计是个比较麻烦的问题,所以抽象出持续演进的统一迭代器容器,通过`mpl_iterator_traits`进行少量配置而快速实现新容器的迭代器。 + +当前基于`ref_vector`的迭代器诉求,设计`mpl_iterator_traits`的成员如下: + +```c++ +template +struct mpl_iterator_traits { + using iterator_category = typename std::iterator_traits::iterator_category; + using value_type = typename std::iterator_traits::value_type; + using difference_type = typename std::iterator_traits::difference_type; + using pointer = typename std::iterator_traits::pointer; + using reference = typename std::iterator_traits::reference; + + static reference operator_dereference(Iterator iter) { + return *iter; + }; + + static Iterator operator_arrow(Iterator iter) { + return iter; + } + + static reference operator_bracket(Iterator iter, difference_type n) { + return iter[n]; + } +}; + +``` + +## `ref_vector` + +参见《CPPCodingTalkAboutPointer》中`ref_vector`的定义。 + +使用指针数组:`std::vector` + +```c++ + int a = 0, b = 1; + + std::vector data; + data.push_back(&a); + data.push_back(&b); + ASSERT_EQ(*data[0], 0); + ASSERT_EQ(*data[1], 1); +``` + +重构为引用数组:`utils::ref_vector` +```c++ + int a = 0, b = 1; + + ref_vector data; + data.push_back(a); + data.push_back(b); + ASSERT_EQ(data[0], 0); + ASSERT_EQ(data[1], 1); +``` + +## `Index` + +`Index`的设计初衷为满足基础类型的静态安全。 + +如`GStrIdx`、`UStrIdx`、`U16StrIdx`三者在业务代码中会共同参与计算,包括其提供的接口也有比较高的相似性。它们底层均为`uint32`,若均定义为`uint32`那将是个灾难,调用者不得不得小心翼翼,但也很难避免传错数据。所以使静态类型互斥,由编译器来检查使用的正确性,将大大降低出错的几率。 + +定义一个静态类型方式非常简单,只要定义不同的Tag即可,如下: + +```c++ +class GStrTag; +using GStrIdx = utils::Index; + +class UStrTag; +using UStrIdx = utils::Index; + +class U16StrTag; +using U16StrIdx = utils::Index; +``` + +# Generalize Pattern + +## `ObjectFactory` + +`ObjectFactory`为针对抽象工厂的封装,用于解决以下问题: + +1. 消除代码中由`switch ... case`、`if ... else if ...`等构成的具有高圈复杂度的大函数,这类函数难以维护和扩展,且灵活度低。*(注意:应避免机械的性解决问题,需分析语义做好设计,如配合模板模式等,实现设计隔离而非仅仅代码隔离。)* + +2. 采用工厂将依赖反转,更容易将调用者和设计者、设计者和设计者之间隔离。 + +其应用方式如下示例: + +```c++ +// 定义Key,用于标记工厂将创建的产品类别 +enum class ObjectType { + kPlus, + kMinus +}; + +// 定义接口,所有产品应实现的接口协议 +class Base { + public: + virtual ~Base() = default; + virtual int32_t DoIt(int32_t lhs, int32_t rhs) const = 0; +}; + +// 定义工厂,使用ObjectType作为查找关键字,Base为接口协议,int32_t为所有产品构造函数的参数类型 +using TestObjectFactory = ObjectFactory; + +// Key,接口,工厂需要对注册者和调用者均可见,所以可能在.h文件中,可能在.cpp文件汇总 +// 产品只要保证能注册入工厂,可以在不同的.cpp文件中 +// 定义产品 +class ObjectPlus : public Base { + public: + // 定义工厂时所约定的构造函数 + explicit ObjectPlus(int32_t base) : base(base) {} + virtual ~ObjectPlus() = default; + + // 定义接口时的约定 + virtual int32_t DoIt(int32_t lhs, int32_t rhs) const override { + return base + lhs + rhs; + } + + private: + int32_t base; +}; + +// 定义产品,可能在另外的.cpp文件中 +class ObjectMinus : public Base { + public: + explicit ObjectMinus(int32_t base) : base(base) {} + virtual ~ObjectMinus() = default; + + virtual int32_t DoIt(int32_t lhs, int32_t rhs) const override { + return base + lhs - rhs; + } + + private: + int32_t base; +}; + +// 注册产品,注册产品的方式随着Key,接口,工厂,产品之间的分布以及加载可灵活处理,保证注册成功即可 +// 此处利用static变量初始化来确保注册 +bool RegisterObject() { + RegisterFactoryObject(); + RegisterFactoryObject(); +} +static auto testObjectFactory = RegisterObject(); + +TEST(TestFactory, ObjectFactory) { + // 获取产品对象,需要判空,本示例略 + auto obj = CreateProductObject(ObjectType::kPlus, 10); + ASSERT_EQ(obj->DoIt(1, 2), 13); + obj = CreateProductObject(ObjectType::kMinus, 10); + ASSERT_EQ(obj->DoIt(1, 2), 9); +} +``` + +## `FunctionFactory` + +`FunctionFactory`解决的问题与`ObjectFactory`类似,其主要为了简化抽象工厂的复杂性,对于大多数场景,`FunctionFactory`更容易编写和上手。 + +其应用方式如下示例: + +```c++ +// 定义Key,用于标记工厂将创建的产品类别 +enum class FunctionType { + kPlus, + kMinus +}; + +// 定义接口和工厂,使用FunctionType作为查找关键字,int32_t(int32_t, int32_t)为函数协议 +using TestFunctionFactory = FunctionFactory; + +// 定义产品 +int32_t Plus(int32_t lhs, int32_t rhs) { + return lhs + rhs; +} + +// 定义产品,可能在另外的.cpp文件中 +int32_t Minus(int32_t lhs, int32_t rhs) { + return lhs - rhs; +} + +// 注册产品,注册产品的方式随着Key,接口,工厂,产品之间的分布以及加载可灵活处理,保证注册成功即可 +// 此处利用单件模式来确保注册 +bool RegisterFunction() { + RegisterFactoryFunction(FunctionType::kPlus, Plus); + RegisterFactoryFunction(FunctionType::kMinus, Minus); +} +void AutoFunctionLoader() { + static auto testObjectFactor = RegisterFunction(); +} + +TEST(TestFactory, TestAll) { + // 加载产品 + AutoFunctionLoader(); + + // 获取产品对象,需要判空,本示例略 + auto func = CreateProductFunction(FunctionType::kPlus); + ASSERT_EQ(func(1, 2), 3); + func = CreateProductFunction(FunctionType::kMinus); + ASSERT_EQ(func(1, 2), -1); +} +``` + + +# Utility + +## `ToRef` + +针对方舟新增代码和重构代码中指针传参的场景,期望以引用的方式来替代,即确保所有指针均已经过校验,再以引用的方式传递给被调用的函数,被调用函数多数情况下不应承担函数参数中指针为空的风险以及判断的开销。 + +通常的写法为(示例中`DoIt`和`Run`可当做第三方接口,无法更改): + +```c++ +A *DoIt(B &b); +void Run(B *b) { + CHECK_NULL_FATAL(b); + // ... + A *a = DoIt(*b); + CHECK_NULL_FATAL(a); + a->Do; +} +``` + +期望多数指针获取即引用: + +```c++ +A *DoIt(B &b); +void Run(B *b) { + B &bRef = utils::ToRef(b); + // ... + A &a = utils::ToRef(DoIt(bRef)); + a.Do; +} +``` + +对于`b`仅单次使用,亦可以调整为: + +```c++ +A *DoIt(B &b); +void Run(B *b) { + // ... + A &a = utils::ToRef(DoIt(utils::ToRef(b))); + a.Do; +} +``` + +## `bit_field_v`&`lbit_field_v` + +使用bit位来标记状态开关组合,是一种既节约内存又能高效编码的设计方法。通常,在枚举定义或常量定义时,会有如下写法: + +```c++ +enum BBAttr : uint32 { + kBBAttrIsEntry = 0x02, + kBBAttrIsExit = 0x04, + kBBAttrWontExit = 0x08, + kBBAttrIsTry = 0x10, + kBBAttrIsTryEnd = 0x20, + kBBAttrIsJSCatch = 0x40, + kBBAttrIsJSFinally = 0x80, + kBBAttrIsCatch = 0x0100, + kBBAttrIsJavaFinally = 0x0200, + kBBAttrArtificial = 0x0400, + kBBAttrIsInLoop = 0x0800, + kBBAttrIsInLoopForEA = 0x1000 +}; +``` + +此设计很明显的欲用位来记录某些属性信息,但位信息比较隐晦,难以维护与阅读。 + +所以需要更加清晰的设计: + +```c++ +enum BBAttr : uint32 { + kBBAttrIsEntry = utils::bit_field_v<1>, + kBBAttrIsExit = utils::bit_field_v<2>, + kBBAttrWontExit = utils::bit_field_v<3>, + kBBAttrIsTry = utils::bit_field_v<4>, + kBBAttrIsTryEnd = utils::bit_field_v<5>, + kBBAttrIsJSCatch = utils::bit_field_v<6>, + kBBAttrIsJSFinally = utils::bit_field_v<7>, + kBBAttrIsCatch = utils::bit_field_v<8>, + kBBAttrIsJavaFinally = utils::bit_field_v<9>, + kBBAttrArtificial = utils::bit_field_v<10>, + kBBAttrIsInLoop = utils::bit_field_v<11>, + kBBAttrIsInLoopForEA = utils::bit_field_v<12> +}; +``` + +其中`bit_field_v`:`uint32`,`lbit_field_v`:`uint64`,未来将按需添加`sbit_field_v`:`uint16`以及`bbit_field_v`:`uint8`。 diff --git a/doc/cn/DevelopmentPreparation.md b/doc/cn/DevelopmentPreparation.md new file mode 100644 index 0000000000000000000000000000000000000000..8420fc48fdb4cef88e31c202e272cfb393c8449a --- /dev/null +++ b/doc/cn/DevelopmentPreparation.md @@ -0,0 +1,171 @@ +# 环境配置 + +## 硬件推荐配置 + +- 2 GHz 双核处理器或者更高等级 CPU + +- 2 GB 系统内存及以上 + +- 200GB 可用磁盘空间 + +## 开发环境推荐 + +您需要安装一个 64 位版本的 Ubuntu(Ubuntu 16.04,18.04,20.04 皆可) + +``` +sudo apt-get -y install clang llvm lld libelf-dev libssl-dev python qemu openjdk-8-jre-headless openjdk-8-jdk-headless cmake +sudo apt-get -y install git build-essential zlib1g-dev libc6-dev-i386 g++-multilib gcc-multilib linux-libc-dev:i386 + +Ubuntu 16.04: +sudo apt-get -y install gcc-5-aarch64-linux-gnu g++-5-aarch64-linux-gnu + +Ubuntu 18.04: +sudo apt-get -y install gcc-7-aarch64-linux-gnu g++-7-aarch64-linux-gnu + +Ubuntu 20.04: +sudo apt-get -y install gcc-9-aarch64-linux-gnu g++-9-aarch64-linux-gnu libncurses5 +``` + +## 自动安装工具 + +``` +source build/envsetup.sh arm release +make setup + +以下的步骤只是作为参考,需要的工具都已经在 "make setup" 一步自动安装完成。 +``` + +## 安装 Clang 编译器并完成配置(用于编译方舟编译器代码,20.04 已改为使用系统安装的 Clang) + +下载**clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04** (具体版本请根据系统版本确定) + +LLVM 下载地址:http://releases.llvm.org/download.html#10.0.0 + +解压并放置到`openarkcompiler/tools`目录 + +- 修改`openarkcompiler/build/envsetup.sh`文件,将`CLANG_PATH`变量配置为 clang 编译器所在路径,例如: + +``` +CLANG_PATH = "${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin" +``` + +其中${MAPLE_ROOT}为 openarkcompiler 源码根目录。 + +## 安装 Ninja、GN 并完成配置 + +下载**Ninja(v1.10.0)**及**GN(Linux Version)** + +Ninja 下载地址:https://github.com/ninja-build/ninja/releases + +GN 下载地址:https://gitee.com/xlnb/gn_binary + +将 GN 和 Ninja 可执行程序放置到`openarkcompiler/tools`目录,然后修改这两个文件为可执行: + +``` +cd openarkcompiler/tools +chmod 775 gn +chmod 775 ninja +``` + +打开`openarkcompiler/Makefile`文件,将 GN 和 NINJA 两个变量配置为 GN 和 Ninja 可执行程序所在路径。例如: + +``` +GN := ${MAPLE_ROOT}/tools/gn/gn +NINJA := ${MAPLE_ROOT}/tools/ninja/ninja +``` + +## 安装 gcc-linaro 并完成配置(用于交叉编译方舟编译器代码) + +下载**gcc-linaro-7.5.0-2019.12-i686_aarch64-linux-gnu** + +gcc-linaro-7.5.0 下载地址:https://releases.linaro.org/components/toolchain/binaries/latest-7/aarch64-linux-gnu/ + +解压并放置到`openarkcompiler/tools`目录,并将文件夹更名为`gcc-linaro-7.5.0`。 + +- 修改`openarkcompiler/build/config.gni`文件,将`GCC_LINARO_PATH`变量配置为 gcc-linaro-7.5.0 所在路径,例如: + +``` +GCC_LINARO_PATH = "${MAPLE_ROOT}/tools/gcc-linaro-7.5.0" +``` + +- 修改`openarkcompiler/build/core/maple_variables.mk`文件,将`GCC_LINARO_PATH`配置为 gcc-linaro-7.5.0 所在路径,例如: + +``` +GCC_LINARO_PATH := ${MAPLE_ROOT}/tools/gcc-linaro-7.5.0 +``` + +## 安装 android-ndk 并完成配置(用于编译方舟编译器代码) + +下载**android-ndk-r21b-linux-x86_64.zip** + +android-ndk-r21 下载地址:https://developer.android.google.cn/ndk/downloads/ + +解压并放置到 openarkcompiler/tools 目录,并将文件夹更名为`android-ndk-r21`。 + +- 修改`openarkcompiler/build/config.gni`文件,将`NDK_PATH`变量配置为 android-ndk-r21 所在路径,例如: + +``` +NDK_PATH = "${MAPLE_ROOT}/tools/android-ndk-r21" +``` + +- 修改`openarkcompiler/build/core/maple_variables.mk`文件,将`NDK_PATH`配置为 android-ndk-r21 所在路径,例如: + +``` +NDK_PATH := ${MAPLE_ROOT}/tools/android-ndk-r21 +``` + +## AOSP 运行环境依赖 + +当前编译方舟编译器 Sample 应用需要使用到 Java 基础库,我们通过 AOSP 来获取,请使用 Android-10.0.0_r35 版本,暂不支持 Android11 版本。 + +AOSP 下载地址:https://source.android.com/source/downloading/ + +下载 AOSP 并编译完成。 + +- 在 openarkcompiler 目录下新建链接`android/`,并链接到 AOSP 的根目录; +- 将`openarkcompiler/android/out/target/product/generic_arm64/obj/JAVA_LIBRARIES/core-all_intermediates/javalib.jar`拷贝到`openarkcompiler/libjava-core`目录,并命名为`java-core.jar`,同时码云上也提供了编译好的 libcore 的 jar 文件,你可以下载直接使用,下载链接`https://gitee.com/xlnb/aosp_core_bin`; +- 在 openarkcompiler/tools 下新建链接 gcc,并链接到 AOSP 的`openarkcompiler/android/prebuilts/gcc`; +- 在 openarkcompiler/tools 下新建链接 clang-r353983c,并链接到 AOSP 的`openarkcompiler/android/prebuilts/clang/host/linux-x86/clang-r353983c`; +- 修改`openarkcompiler/build/config.gni`和`openarkcompiler/build/core/maple_variables.mk`中`ANDROID_GCC_PATH`和`ANDROID_CLANG_PATH`两个变量,配置为上述 gcc 和 clang-r353982c 的所在路径,例如: + +config.gni + +``` +ANDROID_GCC_PATH = "${MAPLE_ROOT}/tools/gcc" +ANDROID_CLANG_PATH = "${MAPLE_ROOT}/tools/clang-r353983c" +``` + +maple_variables.mk + +``` +ANDROID_GCC_PATH := ${MAPLE_ROOT}/tools/gcc +ANDROID_GLANG_PATH := ${MAPLE_ROOT}/tools/clang-r353983c +``` + +## 构建工具依赖下载 + +### icu 下载并编译 + +当前用例编译需要 icu 动态库支持,请使用 icu56.1 版本。 + +icu 下载地址:http://site.icu-project.org/home + +下载 56.1 版本的 icu4c 并编译完成,生成`libicuuc.so`和`libicudata.so`,将两者放置到`openarkcompiler/third_party/icu/lib/aarch64-linux-gnu`路径下,并重命名为`libicuuc.so.56`和`libicudata.so.56`。 + +### libz 下载并编译 + +当前用例编译需要 libz.so 支持,请使用 1.2.8 版本。 + +libz 下载地址:https://zlib.net + +下载 1.2.8 版本的 libz.so,将其放置到`openarkcompiler/third_party/libdex/prebuilts/aarch64-linux-gnu/`路径下,并重命名为`libz.so.1.2.8`。 + +### r8 下载并编译 + +当前用例编译需要 d8.jar 支持,请使用 d8-1.5.13 版本。 + +r8 社区地址:https://r8.googlesource.com/r8 + +已经编译后的二进制:https://gitee.com/xlnb/r8-d81513/tree/master/d8/lib/d8.jar + +将 d8.jar 放置到`openarkcompiler/third_party/d8/lib/`目录 diff --git a/doc/cn/MapleDriverOverview.md b/doc/cn/MapleDriverOverview.md new file mode 100644 index 0000000000000000000000000000000000000000..ca6fbbe172b833f9314178a8d43f6a47421107ac --- /dev/null +++ b/doc/cn/MapleDriverOverview.md @@ -0,0 +1,46 @@ +# maple驱动程序概述 +## 简介 +本文档描述了maple驱动程序的当前状态。这包括设计、使用、目标和内部实施。 + +## 目标 +maple驱动程序旨在满足优秀编译器(如clang)的要求。换句话说,它应该是: +-灵活支持新功能 +-高效,开销低 +-易于使用 + +驱动程序开发的最终目标是完全支持gcc选项,并直接集成到CMake构建系统中。 + +## 设计和实现 + +### 设计概述 + +下图显示了maple驱动程序体系结构的重要组件以及它们之间的关系。红色组件表示基本的驱动程序部分(类、方法),蓝色组件是输出/输入数据结构,绿色组件是重要的帮助器类。 + +![](media/MapleDriverStructure.png) + +### 驱动程序阶段 + +驱动程序功能可分为五个阶段: + +**1.解析选项** + +首先检查输入命令行参数字符串的格式正确性,并将其转换为键值对,然后检查键是否在`OptionParser`的`usage`多映射(以前从帮助数据结构创建)中匹配。Option类还包含Descriptor数据结构,用一些附加数据描述选项参数所需的解析细节。然后解析参数。驱动程序希望了解所有可用选项。然后,结果将写入`OptionParser`类的`options`向量中。 + + +**2.填充MplOptions** + +解析输入后,根据结果填充`MplOptions`:首先确定运行类型为自动或者自定义(取决于`--run`选项),然后初始化输入文件并检查有效性。如果运行类型为自动maple驱动程序还将自行配置代码生成管道(您将接收汇编程序文件作为最终输出),具体取决于第一个输入文件的扩展名。然后处理其他选项,包括`--option`,其值必须包含所有编译阶段的选项,使用解析通用选项时相同方法和数据结构来解析值,结果将存放到`MplOptions`类的`exeOptions`映射中。然后,maple驱动程序尝试打开输入文件,如果成功,将进入下一阶段。 + +**3.编译器选择** + +上一阶段完成后,maple驱动程序触发`CompilerFactory`类构造函数,该构造函数创建受支持编译器的类,并将指向所有编译器的指针保存在`supportedCompilers`数据结构中。 + +**4.阶段特定选项构建** + +`CompilerFactory`调用选定编译器的`Compile`方法,该方法构造默认和自定义的选项并写入字符串中。主要的问题是命令从一种风格转换到另一种风格,一些驱动程序组件需要自己的方法正确运行,如`MplcgCompiler`;而另一些组件只使用几个选项,它们的主要目的是确定要调用的可执行文件的路径和传递它们的输入和输出参数,如`AsCompiler`。 + +但是,`MaplecombCompiler`和`MplcgCompiler`需要特殊的管道,它们不调用可执行文件给它们传递命令行,而是使用`MIRModule`、`mirParser`和`DriverRunner`类与输入文件交互。`MIRModule`是一种数据结构,其功能类似于`MplOptions`,`MaplecombCompiler`和`MplcgCompiler`会在其中存储关键数据(输入文件的名称、源语言等);`MIRParser`是为了解析maple IR; `DriverRunner`是一个协调器,它可以与以前的两个数据结构一起工作,同时还存储其负责的阶段的选项和编译中所需的其他数据。 + +**5.执行** + +之后,可执行文件的命令行和完整路径将重定向到`SafeExe`类的`Exe`方法,在那里它通过子进程处理和执行。如果是`MaplecombCompiler`和`MplcgCompiler`,则调用`DriverRunner`的`Run`方法,并将作业发布到阶段管理器。 \ No newline at end of file diff --git a/doc/cn/NaiveRcInsertionDescription.md b/doc/cn/NaiveRcInsertionDescription.md new file mode 100644 index 0000000000000000000000000000000000000000..cfd65561644fd7a017b5d22db865e4b8cbf7c80c --- /dev/null +++ b/doc/cn/NaiveRcInsertionDescription.md @@ -0,0 +1,174 @@ +# 朴素版RC操作插入原理 + +引用计数(Reference Counting, RC)是计算机编程语言中的一种内存管理技术,是指将资源(可以是对象、内存或磁盘空间等等)的被引用次数保存起来,当被引用次数变为零时就将其释放的过程。使用引用计数技术可以实现自动资源管理的目的。同时引用计数还可以指使用引用计数技术回收未使用资源的垃圾回收算法。朴素版RC(Naive RC)是一种简单直接的RC插入操作。 + + +Naive RC插入的基本原理 +====================== + +- 对象的引用计数的来源: + + - 堆内对象(其它对象、本身)的引用 + + - 栈上的引用(包含寄存器) + + - 静态、全局变量 + +- 引用计数操作的插入规则(编译器和运行时),对应上面的: + + - Object field的赋值,需要将field指向的新对象+1,原对象计数-1 + + - 读取对象到栈上局部变量(含寄存器),需要对读取的对象引用计数+1 + + - 局部变量Last Use后引用计数-1 + + - 返回对象,引用计数+1,补偿局部变量Last Use后-1 + +- 简单示例 + + - 插入前 + + ```cpp + class A { + static Object static_field; + Object instance_field; + A() { + static_field = new Object(); + } + } + Object foo(){ + A a = new A(); + bar(a, new Object()) + return a.instance_field; + } + void bar(A a, Object o) { + a.instance_field = o; + } + ``` + + - 插入后 + + ```cpp + class A { + A() { + local_var t = new Object(); // t是赋值给static_field过程中使用的临时变量 + old = static_field; + static_field = t; + IncRef(t); DecRef(old); // 更新堆上RC + DecRef(t); // 函数退出释放栈上RC + } + } + Object foo(){ + A a = new A(); + bar(a, new Object()); + locl_var t = a.instance_field; + IncRef(t) // 栈上变量引用RC+1 + IncRef(t) // 函数返回,返回值RC+1 + DecRef(a) // 函数退出释放栈上RC,释放a + DecRef(t) // 函数退出释放栈上RC + return t; + } + void bar(A a, Object o) { + old = a.instance_field + a.instance_field = o; + IncRef(o); DecRef(old); + } + ``` + + +- 引用计数函数MIntrinsicsId: + + - 基础函数 + 使用方法:使用IrMap中的CreateIntrinsicCallMeStmt创建IntrinsiccallMeStmt语句,并插入到RC需要加减之处。 + + - INTRN_MCCIncRef + + - INTRN_MCCDecRef + + - Load/Write函数 + 使用方法:Write函数使用IrMap中的CreateIntrinsicCallMeStmt创建IntrinsiccallMeStmt语句替换左值具有static、global、volatile等属性的iassign语句、Load函数使用CreateIntrinsicCallAssignedMeStmt创建IntrinsiccallMeStmt语句,并替换右值具有static、global、volatile等属性的dassign语句,Load/Write函数均具有IncRef操作。 + + - INTRN_MCCLoadRef + + - INTRN_MCCLoadRefS + + - INTRN_MCCLoadRefVol + + - INTRN_MCCLoadRefSVol + + - INTRN_MCCWrite + + - INTRN_MCCWriteS + + - INTRN_MCCWriteVol + + - INTRN_MCCWriteSVol + +RefVar IncRef 处理规则: +======================== + +- 赋值语句处理: + + - 按照左值(赋值语句中被赋值的变量)、右值(赋值语句中被引用的值)表达式不同处理 + + - 先处理右值:比如选择什么Load接口;不需要+1的右值(New、Call) + + - Global属性选择INTRN_MCCLoadRef + + - Static属性选择INTRN_MCCLoadRefS + + - Volatile属性选择INTRN_MCCLoadRefVol + + - 再处理左值:选择什么Write接口;是否需要保存老值等等 + + - Global属性选择INTRN_MCCWriteRef + + - Static属性选择INTRN_MCCWriteRefS + + - Volatile属性选择INTRN_MCCWriteRefVol + +- 返回值处理: + + - 返回值+1 + +- 局部变量处理: + + - 在当前函数退出前(异常、正常退出)引用计数减一 + +Rclowering处理流程: +==================== + +- 设置rclowering处理标志位 +- 标记localrefvar + +- rclowering预处理 + + - 标记需要RC操作的变量 + + - 需要标记DecRef的左值(赋值语句中被赋值的变量): + + - Ref变量 + + - 需要标记IncRef的右值(赋值语句中被引用的值或返回值等): + + - Return值 + + - Ref变量 + + - hrow Value的寄存器 + + - 清理栈变量 + +- rclowering处理 + + - 处理包含Ref类型变量的赋值语句 + + - DecRef原值 + + - IncRef新值,参照RefVar IncRef 处理规则 + +- rclowering后处理 + + - 在函数入口对参数做IncRef并标记为LocalRefVar属性 + + - 处理函数返回值, 属性为LocalRefVar,则InRef,其他参照RefVar IncRef处理规则右值部分 diff --git a/doc/cn/ProgrammingSpecifications.md b/doc/cn/ProgrammingSpecifications.md new file mode 100644 index 0000000000000000000000000000000000000000..2e80d28632387a41cc4d317cef5fa39ee27feecc --- /dev/null +++ b/doc/cn/ProgrammingSpecifications.md @@ -0,0 +1,3089 @@ + + 方舟编译器C++语言编程规范  + + + + + + + + + + + +| 章节 | 内容 | +| ------------------ | ---------------------------------------- | +| [0 前言](#c0) | [目的](#c0-1) [重点关注](#c0-2) [约定](#c0-3) [例外](#c0-4) | +| [1 原则](#c1) | [好代码的原则](#c1-1) [类和函数设计指导原则](#c1-2) [遵循C++ ISO标准](#c1-4)
[优先编译时检查错误](#c1-5) [使用命名空间来限定作用域](#c1-6) [优先使用C++特性而不是C特性](#c1-7) | +| [2 命名](#c2) | [通用命名](#c2-1) [文件命名](#c2-2) [函数命名](#c2-3) [类型命名](#c2-4) [变量命名](#c2-5) [宏、常量、枚举命名](#c2-6) | +| [3 格式](#c3) | [行宽](#c3-1) [缩进](#c3-2) [大括号](#c3-3) [函数声明和定义](#c3-4) [函数调用](#c3-5) [if语句](#c3-6) [循环语句](#c3-7) [switch语句](#c3-8) [表达式](#c3-9) [变量赋值](#c3-10)
[初始化](#c3-11) [指针和引用](#c3-12) [编译预处理](#c3-13) [空格和空行](#c3-14) [类](#c3-15) | +| [4 注释](#c4) | [注释风格](#c4-1) [文件头注释](#c4-2) [函数头注释](#c4-3) [代码注释](#c4-4) | +| [5 头文件](#c5) | [头文件职责](#c5-1) [头文件依赖](#c5-2) | +| [6 作用域](#c6) | [命名空间](#c6-1) [全局函数和静态成员函数](#c6-2) [全局变量](#c6-3) [全局常量和静态成员常量](#c6-4) | +| [7 类](#c7) | [构造、拷贝构造、赋值和析构函数](#c7-1) [继承](#c7-2) [多重继承](#c7-3) [重载](#c7-4) | +| [8 函数](#c8) | [函数设计](#c8-1) [内联函数](#c8-2) [函数参数](#c8-3) | +| [9 C++其他特性](#c9) | [常量与初始化](#c9-1) [表达式](#c9-2) [类型转换](#c9-3) [资源分配和释放](#c9-4) [标准库](#c9-5) [const的用法](#c9-6) [模板](#c9-7) [宏](#c9-8) [其他](#c9-9)| +| [10 现代C++特性](#c10) | [代码简洁性和安全性提升](#c10-1) [智能指针](#c10-2) [Lambda](#c10-3) [接口](#c10-4) | +| [11 安全编码规范](#c11) | [基本原则](#c11-1) [变量](#c11-2) [断言](#c11-3) [异常机制](#c11-4) [内存](#c11-5) [危险函数](#c11-6) | + +# 0 前言 + +## 目的 + +规则并不是完美的,通过禁止在特定情况下有用的特性,可能会对代码实现造成影响。但是我们制定规则的目的__“为了大多数程序员可以得到更多的好处”__, 如果在团队运作中认为某个规则无法遵循,希望可以共同改进该规则。 + +参考该规范之前,希望您具有相应的C++基础能力,而不是通过该文档来学习C++。 +1. 了解C++的ISO标准; +2. 熟知C++的基本语言特性,包括C++ 03/11/14/17相关特性; +3. 了解C++的标准库; + + +## 重点关注 +1. 约定C++的编程风格,比如命名,排版等。 +2. C++的模块化设计,如何设计头文件,类,接口和函数。 +3. C++相关特性的优秀实践,比如常量,类型转换,资源管理,模板等。 +4. 现代C++的优秀实践,包括C++11/14/17中可以提高代码可维护性,提高代码可靠性的相关约定。 + + +## 约定 +**规则**:编程时必须遵守的约定(must) + +**建议**:编程时应该遵守的约定(should) + +本规范适用通用C++标准, 如果没有特定的标准版本,适用所有的版本(C++03/11/14/17)。 + +## 例外 +无论是'规则'还是'建议',都必须理解该条目这么规定的原因,并努力遵守。 +但是,有些规则和建议可能会有例外。 + +在不违背总体原则,经过充分考虑,有充足的理由的前提下,可以适当违背规范中约定。 +例外破坏了代码的一致性,请尽量避免。'规则'的例外应该是极少的。 + +下列情况,应风格一致性原则优先: +**修改外部开源代码、第三方代码时,应该遵守开源代码、第三方代码已有规范,保持风格统一。** +**某些特定领域,优先参考其行业规范。** + +# 1 原则 + +## 好代码的原则 +我们参考Kent Beck的简单设计四原则来指导我们的如何写出优秀的代码,如何有效地判断我们的代码是优秀的。 +1. 通过所有测试(Passes its tests) +2. 尽可能消除重复 (Minimizes duplication) +3. 尽可能清晰表达 (Maximizes clarity) +4. 更少代码元素 (Has fewer elements) +5. 以上四个原则的重要程度依次降低。 + 这组定义被称做简单设计原则。 + +第一条强调的是外部需求,这是代码实现最重要的;第二点就是代码的模块架构设计,保证代码的正交性,保证代码更容易修改;第三点是代码的可阅读性,保证代码是容易阅读的;最后一点才是保证代码是简洁的,在简洁和表达力之间,我们更看重表达力。 + +## 类和函数设计指导原则 +C++是典型的面向对象编程语言,软件工程界已经有很多OOP原则来指导我们编写大规模的,高可扩展的,可维护性的代码: +- 高内聚,低耦合的基本原则:使程序模块的可重用性、移植性大大增强 +- SOLID原则:分别是单一原则、开闭原则、里氏替换原则、接口隔离原则、依赖倒置原则,遵循五大原则可以使程序低耦合,更加健壮 +- 迪米特法则:降低类之间的耦合 +- “Tell,Don’t ask”原则:一个对象应该命令其它对象做什么,而不是去查询其它对象的状态来决定做什么 +- 组合/聚合复用原则:尽量使用合成/聚合,不要使用类继承 + +## 遵循C++ ISO标准 +希望通过使用ISO C++标准的特性来编写C++代码,对于ISO标准中未定义的或者编译器实现的特性要谨慎使用,对于GCC等编译器的提供的扩展特性也需要谨慎使用,这些特性会导致代码的可移植性比较差。 + +注意:如果模块中需要使用相关的扩展特性来,那么尽可能将这些特性封装成独立的接口,并且可以通过编译选项关闭或者编译这些特性。对于这些扩展特性的使用,请模块制定特性编程指南来指导这些特性的使用。 + +## 优先编译时检查错误 +通过编译器来优先保证代码健壮性,而不是通过编写错误处理代码来处理编译就可以发现的异常,比如: + +- 通过const来保证数据的不变性,防止数据被无意修改。 +- 通过static_assert来进行编译时检查。 + +## 使用命名空间来限定作用域 +全局变量,全局常量和全局类型定义由于都属于全局作用域,在项目中,使用第三方库中容易出现冲突。 + +命名空间将作用域细分为独立的,具名的作用域,可有效地防止全局作用域的命名冲突。 +1. class,struct等都具有自己的类作用域。 +2. 具名的namespace可以实现类作用域更上层的作用域。 +3. 匿名namespace和static可以实现文件作用域。 + +对于没有作用域的宏变量,宏函数强烈建议不使用。 + +作用域的一些缺点: +1. 虽然可以通过作用域来区分两个命名相同的类型,但是还是具有迷惑性。 +2. 内联命名空间会让命名空间内部的成员摆脱限制,让人迷惑。 +3. 通过多重嵌套来定义namespace,会让完整的命名空间比较冗长。 + +所以,我们使用命名空间的建议如下: +- 对于变量,常量和类型定义尽可能使用namespace,减少全局作用域的冲突 +- 不要在头文件中使用using namespace +- 不要使用内联命名空间 +- 鼓励在.cpp文件中通过匿名namespace或者static来封装,防止不必要的定义通过API暴露出去。 + + +## 优先使用C++特性而不是C特性 +C++比起C语言更加类型安全,更加抽象。我们更推荐使用C++的语言特性来编程,比如使用string而不是`char*`, 使用vector而不是原生数组,使用namespace而不是static。 + + +# 2 命名 +## 通用命名 +常见命名风格有: +__驼峰风格(CamelCase)__ +大小写字母混用,单词连在一起,不同单词间通过单词首字母大写来分开。 +按连接后的首字母是否大写,又分: 大驼峰(UpperCamelCase)和小驼峰(lowerCamelCase) + +__内核风格(unix_like)__ +单词全小写,用下划线分割。 +如:'test_result' + +__匈牙利风格__ +在‘大驼峰’的基础上,加上前缀;前缀用于表达类型或用途。 +如:'uiSavedCount', 'bTested' + +### 规则2.1.1 标识符命名使用驼峰风格 +不考虑匈牙利命名,在内核风格与驼峰风格之间,根据存量代码的情况,我们选择驼峰风格。 + +| 类型 | 命名风格 | +| ------------------------------------------------------------ | ---------------------------------------- | +| 类类型,结构体类型,枚举类型,联合体类型等类型定义 | 大驼峰 | +| 函数(包括全局函数,作用域函数,成员函数) | 大驼峰(接口部分可加前缀,如XXX_函数名) | +| 全局变量(包括全局和命名空间域下的变量,类静态变量),局部变量,函数参数,类、结构体和联合体中的成员变量 | 小驼峰 | +| 常量(const),枚举值 | k+大小写混合 | +| 宏 | 大写+下划线 | +| 命名空间 | 全小写 | + +注意: +上表中__常量__是指全局作用域、namespace域、类的静态成员域下,以 const或constexpr 修饰的基本数据类型、枚举、字符串类型的变量。 +上表中__变量__是指除常量定义以外的其他变量,均使用小驼峰风格。 + +## 文件命名 +### 建议2.2.1 C++文件以.cpp结尾,头文件以.h结尾。文件名使用下划线小写风格。 + +目前业界还有一些其他的后缀的表示方法: + +- 头文件: .hh, .hpp, .hxx +- cpp文件:.cc, .cxx, .C + +对于本文档,我们默认使用.h和.cpp作为后缀。 + +文件名如下: +- database_connection.h +- database_connection.cpp + + +## 函数命名 +函数命名统一使用大驼峰风格,一般采用动词或者动宾结构。接口部分可加前缀,如XXX_函数名。 +```cpp +class List { + public: + void AddElement(const Element& element); + Element GetElement(const unsigned int index) const; + bool IsEmpty() const; + bool MCC_GetClass(); +}; + +namespace utils { +void DeleteUser(); +} +``` + +## 类型命名 + +类型命名采用大驼峰命名风格。 +所有类型命名——类、结构体、联合体、类型定义(typedef)、枚举——使用相同约定,例如: + +```cpp +// classes, structs and unions +class UrlTable { ... +class UrlTableTester { ... +struct UrlTableProperties { ... +union Packet { ... + +// typedefs +typedef std::map PropertiesMap; + +// enums +enum UrlTableErrors { ... +``` + +对于命名空间的命名,建议全小写: +```cpp +// namespace +namespace osutils { + +namespace fileutils { + +} + +} +``` + + +## 变量命名 +通用变量命名采用小驼峰,包括全局变量,函数形参,局部变量,成员变量。 +```cpp +std::string tableName; // Good: 推荐此风格 +std::string tablename; // Bad: 禁止此风格 +std::string path; // Good: 只有一个单词时,小驼峰为全小写 + +class Foo { + private: + std::string fileName; // 不添加任何作用域前缀或者后缀 +}; +``` + +## 宏、常量、枚举命名 +宏采用全大写,下划线连接的格式。常量、枚举值使用k+大小写混合。 +函数局部 const 常量和类的普通const成员变量,使用小驼峰命名风格。 + +```cpp +#define MAX(a, b) (((a) < (b)) ? (b) : (a)) // 仅对宏命名举例,并不推荐用宏实现此类功能 + +enum TintColor { // 注意,枚举类型名用大驼峰,其下面的取值是k+大小写混合 + kRed, + kDarkRed, + kGreen, + kLightGreen +}; + +int Func(...) { + const unsigned int bufferSize = 100; // 函数局部常量 + char *p = new char[bufferSize]; + ... +} + +namespace utils { +const unsigned int kFileSize = 200; // 全局常量 +} + +``` + +# 3 格式 +尽管有些编程的排版风格因人而异,但是我们强烈建议和要求使用统一的编码风格,以便所有人都能够轻松的阅读和理解代码,增强代码的可维护性。 + +## 行宽 + +### 建议3.1.1 行宽不超过 120 个字符 +建议每行字符数不要超过 120 个。如果超过120个字符,请选择合理的方式进行换行。 + +例外: +- 如果一行注释包含了超过120 个字符的命令或URL,则可以保持一行,以方便复制、粘贴和通过grep查找; +- 包含长路径的 #include 语句可以超出120 个字符,但是也需要尽量避免; +- 编译预处理中的error信息可以超出一行。 +预处理的 error 信息在一行便于阅读和理解,即使超过 120 个字符。 + +```cpp +#ifndef XXX_YYY_ZZZ +#error Header aaaa/bbbb/cccc/abc.h must only be included after xxxx/yyyy/zzzz/xyz.h, because xxxxxxxxxxxxxxxxxxxxxxxxxxxxx +#endif +``` + +## 缩进 + +### 规则3.2.1 使用空格进行缩进,每次缩进2个空格 +只允许使用空格(space)进行缩进,每次缩进为 2 个空格。 + + + + +## 大括号 +### 规则3.3.1 除函数外,使用 K&R 缩进风格 +函数左大括号跟随语句放行末。 +右大括号独占一行,除非后面跟着同一语句的剩余部分,如 do 语句中的 while,或者 if 语句的 else/else if,或者逗号、分号。 + +如: +```cpp +struct MyType { // 跟随语句放行末,前置1空格 + ... +}; + +int Foo(int a) { // 函数左大括号跟随语句放行末 + if (...) { + ... + } else { + ... + } +} +``` +推荐这种风格的理由: + +- 代码更紧凑; +- 相比另起一行,放行末使代码阅读节奏感上更连续; +- 符合后来语言的习惯,符合业界主流习惯; +- 现代集成开发环境(IDE)都具有代码缩进对齐显示的辅助功能,大括号放在行尾并不会对缩进和范围产生理解上的影响。 + + +对于空函数体,可以将大括号放在同一行: +```cpp +class MyClass { + public: + MyClass() : value(0) {} + + private: + int value; +}; +``` + +## 函数声明和定义 + +### 规则3.4.1 函数声明和定义的返回类型和函数名在同一行;函数参数列表超出行宽时要换行并合理对齐 +在声明和定义函数的时候,函数的返回值类型应该和函数名在同一行;如果行宽度允许,函数参数也应该放在一行;否则,函数参数应该换行,并进行合理对齐。 +参数列表的左圆括号总是和函数名在同一行,不要单独一行;右圆括号总是跟随最后一个参数。 + +换行举例: +```cpp +ReturnType FunctionName(ArgType paramName1, ArgType paramName2) { // Good:全在同一行 + ... +} + +ReturnType VeryVeryVeryLongFunctionName(ArgType paramName1, // 行宽不满足所有参数,进行换行 + ArgType paramName2, // Good:和上一行参数对齐 + ArgType paramName3) { + ... +} + +ReturnType LongFunctionName(ArgType paramName1, ArgType paramName2, // 行宽限制,进行换行 + ArgType paramName3, ArgType paramName4, ArgType paramName5) { // Good: 换行后 4 空格缩进 + ... +} + +ReturnType ReallyReallyReallyReallyLongFunctionName( // 行宽不满足第1个参数,直接换行 + ArgType paramName1, ArgType paramName2, ArgType paramName3) { // Good: 换行后 4 空格缩进 + ... +} +``` + +## 函数调用 +### 规则3.5.1 函数调用入参列表应放在一行,超出行宽换行时,保持参数进行合理对齐 +函数调用时,函数参数列表放在一行。参数列表如果超过行宽,需要换行并进行合理的参数对齐。 +左圆括号总是跟函数名,右圆括号总是跟最后一个参数。 + +换行举例: +```cpp +ReturnType result = FunctionName(paramName1, paramName2); // Good:函数参数放在一行 + +ReturnType result = FunctionName(paramName1, + paramName2, // Good:保持与上方参数对齐 + paramName3); + +ReturnType result = FunctionName(paramName1, paramName2, + paramName3, paramName4, paramName5); // Good:参数换行,4 空格缩进 + +ReturnType result = VeryVeryVeryLongFunctionName( // 行宽不满足第1个参数,直接换行 + paramName1, paramName2, paramName3); // 换行后,4 空格缩进 +``` + +如果函数调用的参数存在内在关联性,按照可理解性优先于格式排版要求,对参数进行合理分组换行。 +```cpp +// Good:每行的参数代表一组相关性较强的数据结构,放在一行便于理解 +int result = DealWithStructureLikeParams(left.x, left.y, // 表示一组相关参数 + right.x, right.y); // 表示另外一组相关参数 +``` + +## if语句 + +### 规则3.6.1 if语句必须要使用大括号 +我们要求if语句都需要使用大括号,即便只有一条语句。 + +理由: +- 代码逻辑直观,易读; +- 在已有条件语句代码上增加新代码时不容易出错; +- 对于在if语句中使用函数式宏时,有大括号保护不易出错(如果宏定义时遗漏了大括号)。 + +```cpp +if (objectIsNotExist) { // Good:单行条件语句也加大括号 + return CreateNewObject(); +} +``` +### 规则3.6.2 禁止 if/else/else if 写在同一行 +条件语句中,若有多个分支,应该写在不同行。 + +如下是正确的写法: + +```cpp +if (someConditions) { + DoSomething(); + ... +} else { // Good: else 与 if 在不同行 + ... +} +``` + +下面是不符合规范的案例: + +```cpp +if (someConditions) { ... } else { ... } // Bad: else 与 if 在同一行 +``` + +## 循环语句 +### 规则3.7.1 循环语句要求使用大括号 +和if语句类似,我们要求for/while循环语句必须加上的大括号,即使循环体是空的,或者循环语句只有一条。 + +```cpp +for (int i = 0; i < someRange; i++) { + DoSomething(); +} +``` + +如果循环体是空的,应该使用空的大括号,而不是使用单个分号。 单个分号容易被遗漏,也容易被误认为是循环语句中的一部分。 + +```cpp +for (int i = 0; i < someRange; i++) { } // Good: for循环体是空,使用大括号,而不是使用分号 + +while (someCondition) { } // Good:while循环体是空,使用大括号,而不是使用分号 + +while (someCondition) { + continue; // Good:continue表示空逻辑,可以使用大括号也可以不使用 +} + +``` + +坏的例子: +```cpp +for (int i = 0; i < someRange; i++) ; // Bad: for循环体是空,也不要只使用分号,要使用大括号 + +while (someCondition) ; // Bad:使用分号容易让人误解是while语句中的一部分 +``` + +## switch语句 +### 规则3.8.1 switch 语句的 case/default 要缩进一层 +switch 语句的缩进风格如下: +```cpp +switch (var) { + case 0: // Good: 缩进 + DoSomething1(); // Good: 缩进 + break; + case 1: { // Good: 带大括号格式 + DoSomething2(); + break; + } + default: + break; +} +``` + +```cpp +switch (var) { +case 0: // Bad: case 未缩进 + DoSomething(); + break; +default: // Bad: default 未缩进 + break; +} +``` + +## 表达式 + +### 建议3.9.1 表达式换行要保持换行的一致性 +较长的表达式,不满足行宽要求的时候,需要在适当的地方换行。 +例: + +// 假设下面第一行已经不满足行宽要求 +```cpp +if (currentValue > threshold && + someConditionsion) { + DoSomething(); + ... +} + +int result = reallyReallyLongVariableName1 + // Good + reallyReallyLongVariableName2; +``` +表达式换行后,注意保持合理对齐,或者4空格缩进。参考下面例子 + +```cpp +int sum = longVaribleName1 + longVaribleName2 + longVaribleName3 + + longVaribleName4 + longVaribleName5 + longVaribleName6; // Good: 4空格缩进 + +int sum = longVaribleName1 + longVaribleName2 + longVaribleName3 + + longVaribleName4 + longVaribleName5 + longVaribleName6; // Good: 保持对齐 +``` +## 变量赋值 + +### 规则3.10.1 多个变量定义和赋值语句不允许写在一行 +每行只有一个变量初始化的语句,更容易阅读和理解。 + +```cpp +int maxCount = 10; +bool isCompleted = false; +``` + +下面是不符合规范的示例: + +```cpp +int maxCount = 10; bool isCompleted = false; // Bad:多个变量初始化需要分开放在多行,每行一个变量初始化 +int x, y = 0; // Bad:多个变量定义需要分行,每行一个 + +int pointX; +int pointY; +... +pointX = 1; pointY = 2; // Bad:多个变量赋值语句放同一行 +``` +例外:for 循环头、if 初始化语句(C++17)、结构化绑定语句(C++17)中可以声明和初始化多个变量。这些语句中的多个变量声明有较强关联,如果强行分成多行会带来作用域不一致,声明和初始化割裂等问题。 + +## 初始化 +初始化包括结构体、联合体、及数组的初始化 + +### 规则3.11.1 初始化换行时要有缩进,并进行合理对齐 +结构体或数组初始化时,如果换行应保持4空格缩进。 +从可读性角度出发,选择换行点和对齐位置。 + +```cpp +const int rank[] = { + 16, 16, 16, 16, 32, 32, 32, 32, + 64, 64, 64, 64, 32, 32, 32, 32 +}; +``` + +## 指针与引用 +### 建议3.12.1 指针类型"* "跟随变量名或者类型,不要两边都留有或者都没有空格 + +指针命名: * 靠左靠右都可以,但是不要两边都有或者都没有空格。 + +```cpp +int* p = NULL; // Good +int *p = NULL; // Good +int*p = NULL; // Bad +int * p = NULL; // Bad +``` + +例外:当变量被 const 修饰时,"`*`" 无法跟随变量,此时也不要跟随类型。 +```cpp +char * const VERSION = "V100"; +``` + +例外:对于如` static_cast(somePointer)/(char*)/vector `中的场景,*与类型之间不加空格。 + +### 建议3.12.2 引用类型"& "跟随变量名或者类型,不要两边都留有或者都没有空格 + +引用命名: & 靠左靠右都可以,但是不要两边都有或者都没有空格。 + +```cpp +int i = 8; +int& p = i; // Good +int &p = i; // Good +int & p = i; // Bad +int&p = i; // Bad +``` + + +## 编译预处理 +### 规则3.13.1 编译预处理的"#"统一放在行首,嵌套编译预处理语句时,"#"不缩进 +编译预处理的"#"统一放在行首,即使编译预处理的代码是嵌入在函数体中的,"#"也应该放在行首。 + +```cpp +#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) // Good:"#"放在行首 +#define ATOMIC_X86_HAS_CMPXCHG16B 1 // Good:"#"放在行首 +#else +#define ATOMIC_X86_HAS_CMPXCHG16B 0 +#endif + + +int FunctionName() { + if (someThingError) { + ... +#ifdef HAS_SYSLOG // Good:即便在函数内部,"#"也放在行首 + WriteToSysLog(); +#else + WriteToFileLog(); +#endif + } +} +``` +内嵌的预处理语句"#"不缩进 + +```cpp +#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) +#define ATOMIC_X86_HAS_CMPXCHG16B 1 // Good:区分层次,便于阅读 +#else +#define ATOMIC_X86_HAS_CMPXCHG16B 0 +#endif +``` + +## 空格和空行 +### 建议3.14.1 水平空格应该突出关键字和重要信息,避免不必要的留白 +水平空格应该突出关键字和重要信息,每行代码尾部不要加空格。总体规则如下: + +- if, switch, case, do, while, for等关键字之后加空格; +- 小括号内部的两侧,不要加空格; +- 大括号内部两侧有无空格,左右必须保持一致; +- 一元操作符(& * + ‐ ~ !)之后不要加空格; +- 二元操作符(= + ‐ < > * / % | & ^ <= >= == != )左右两侧加空格 +- 三目运算符(? :)符号两侧均需要空格 +- 前置和后置的自增、自减(++ --)和变量之间不加空格 +- 结构体成员操作符(. ->)前后不加空格 +- 逗号(,)前面不加空格,后面增加空格 +- 对于模板和类型转换(<>)和类型之间不要添加空格 +- 域操作符(::)前后不要添加空格 +- 冒号(:)前后根据情况来判断是否要添加空格 + +常规情况: +```cpp +void Foo(int b) { // Good:大括号前应该留空格 + +int i = 0; // Good:变量初始化时,=前后应该有空格,分号前面不要留空格 + +int buf[kBufSize] = {0}; // Good:大括号内两侧都无空格 +``` + +函数定义和函数调用: +```cpp +int result = Foo(arg1,arg2); + ^ // Bad: 逗号后面需要增加空格 + +int result = Foo( arg1, arg2 ); + ^ ^ // Bad: 函数参数列表的左括号后面不应该有空格,右括号前面不应该有空格 +``` + +指针和取地址 +```cpp +x = *p; // Good:*操作符和指针p之间不加空格 +p = &x; // Good:&操作符和变量x之间不加空格 +x = r.y; // Good:通过.访问成员变量时不加空格 +x = r->y; // Good:通过->访问成员变量时不加空格 +``` + +操作符: +```cpp +x = 0; // Good:赋值操作的=前后都要加空格 +x = -5; // Good:负数的符号和数值之前不要加空格 +++x; // Good:前置和后置的++/--和变量之间不要加空格 +x--; + +if (x && !y) // Good:布尔操作符前后要加上空格,!操作和变量之间不要空格 +v = w * x + y / z; // Good:二元操作符前后要加空格 +v = w * (x + z); // Good:括号内的表达式前后不需要加空格 + +int a = (x < y) ? x : y; // Good: 三目运算符, ?和:前后需要添加空格 +``` + +循环和条件语句: +```cpp +if (condition) { // Good:if关键字和括号之间加空格,括号内条件语句前后不加空格 + ... +} else { // Good:else关键字和大括号之间加空格 + ... +} + +while (condition) {} // Good:while关键字和括号之间加空格,括号内条件语句前后不加空格 + +for (int i = 0; i < someRange; ++i) { // Good:for关键字和括号之间加空格,分号之后加空格 + ... +} + +switch (condition) { // Good: switch 关键字后面有1空格 + case 0: // Good:case语句条件和冒号之间不加空格 + ... + break; + ... + default: + ... + break; +} +``` + +模板和转换 +```cpp +// 尖括号(< and >) 不与空格紧邻, < 前没有空格, > 和 ( 之间也没有. +vector x; +y = static_cast(x); + +// 在类型与指针操作符之间留空格也可以, 但要保持一致. +vector x; +``` + +域操作符 +```cpp +std::cout; // Good: 命名空间访问,不要留空格 + +int MyClass::GetValue() const {} // Good: 对于成员函数定义,不要留空格 +``` + +冒号 +```cpp +// 添加空格的场景 + +// Good: 类的派生需要留有空格 +class Sub : public Base { + +}; + +// 构造函数初始化列表需要留有空格 +MyClass::MyClass(int var) : someVar(var) { + DoSomething(); +} + +// 位域表示也留有空格 +struct XX { + char a : 4; + char b : 5; + char c : 4; +}; +``` + +```cpp +// 不添加空格的场景 + +// Good: 对于public:, private:这种类访问权限的冒号不用添加空格 +class MyClass { + public: + MyClass(int var); + private: + int someVar; +}; + +// 对于switch-case的case和default后面的冒号不用添加空格 +switch (value) { + case 1: + DoSomething(); + break; + default: + break; +} +``` + +注意:当前的集成开发环境(IDE)可以设置删除行尾的空格,请正确配置。 + +### 建议3.14.2 合理安排空行,保持代码紧凑 + +减少不必要的空行,可以显示更多的代码,方便代码阅读。下面有一些建议遵守的规则: +- 根据上下内容的相关程度,合理安排空行; +- 函数内部、类型定义内部、宏内部、初始化表达式内部,不使用连续空行 +- 不使用连续 **3** 个空行,或更多 +- 大括号内的代码块行首之前和行尾之后不要加空行。 + +```cpp +int Foo() { + ... +} + + +// Bad:两个函数定义间超过了一个空行 +int Bar() { + ... +} + + +if (...) { + // Bad:大括号内的代码块行首不要加入空行 + ... + // Bad:大括号内的代码块行尾不要加入空行 +} + +int Foo(...) { + // Bad:函数体内行首不要加空行 + ... +} +``` + +## 类 +### 规则3.15.1 类访问控制块的声明依次序是 public:, protected:, private:,每个都缩进 1 个空格 +```cpp +class MyClass : public BaseClass { + public: // 1空格缩进 + MyClass(); // 标准的2空格缩进 + explicit MyClass(int var); + ~MyClass() {} + + void SomeFunction(); + void SomeFunctionThatDoesNothing() { + } + + void SetVar(int var) { + someVar = var; + } + + int GetVar() const { + return someVar; + } + + private: + bool SomeInternalFunction(); + + int someVar; + int someOtherVar; +}; +``` + +在各个部分中,建议将类似的声明放在一起, 并且建议以如下的顺序: 类型 (包括 typedef, using 和嵌套的结构体与类), 常量, 工厂函数, 构造函数, 赋值运算符, 析构函数, 其它成员函数, 数据成员。 + + +### 规则3.15.2 构造函数初始化列表放在同一行或按四格缩进并排多行 +```cpp +// 如果所有变量能放在同一行: +MyClass::MyClass(int var) : someVar(var) { + DoSomething(); +} + +// 如果不能放在同一行, +// 必须置于冒号后, 并缩进4个空格 +MyClass::MyClass(int var) + : someVar(var), someOtherVar(var + 1) { // Good: 逗号后面留有空格 + DoSomething(); +} + +// 如果初始化列表需要置于多行, 需要逐行对齐 +MyClass::MyClass(int var) + : someVar(var), // 缩进4个空格 + someOtherVar(var + 1) { + DoSomething(); +} +``` + +# 4 注释 +一般的,尽量通过清晰的架构逻辑,好的符号命名来提高代码可读性;需要的时候,才辅以注释说明。 +注释是为了帮助阅读者快速读懂代码,所以要从读者的角度出发,__按需注释__。 + +注释内容要简洁、明了、无二义性,信息全面且不冗余。 + +__注释跟代码一样重要。__ +写注释时要换位思考,用注释去表达此时读者真正需要的信息。在代码的功能、意图层次上进行注释,即注释解释代码难以表达的意图,不要重复代码信息。 +修改代码时,也要保证其相关注释的一致性。只改代码,不改注释是一种不文明行为,破坏了代码与注释的一致性,让阅读者迷惑、费解,甚至误解。 + +## 注释风格 +在 C++ 代码中,使用` /* */`和` // `都是可以的。 +按注释的目的和位置,注释可分为不同的类型,如文件头注释、函数头注释、代码注释等等; +同一类型的注释应该保持统一的风格,建议: +1)文件头注释使用` /* */ `。 +2)同一文件内函数头注释、代码注释要使用相同的注释符,不可混用。 + +注意:__本文示例代码中,大量使用 '//' 后置注释只是为了更精确的描述问题,并不代表这种注释风格更好。__ + +## 文件头注释 +### 规则4.2.1 文件头注释必须包含版权许可 +```cpp +/* + * Copyright (c) [2019] [name of copyright holder] + * [Software Name] is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * http://license.coscl.org.cn/MulanPSL + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v1 for more details. + */ +``` + + + +## 函数头注释 +### 规则4.3.1 禁止空有格式的函数头注释 +并不是所有的函数都需要函数头注释; +函数签名无法表达的信息,加函数头注释辅助说明; + +函数头注释统一放在函数声明或定义上方,使用如下风格之一: +使用`//`写函数头 + +```cpp +// 单行函数头 +int Func1(void); + +// 多行函数头 +// 第二行 +int Func2(void); +``` + +使用`/* */`写函数头 +```cpp +/* 单行函数头 */ +int Func1(void); + +/* + * 另一种单行函数头 + */ +int Func2(void); + +/* + * 多行函数头 + * 第二行 + */ +int Func3(void); +``` +函数尽量通过函数名自注释,按需写函数头注释。 +不要写无用、信息冗余的函数头;不要写空有格式的函数头。 + +函数头注释内容可选,但不限于:功能说明、返回值,性能约束、用法、内存约定、算法实现、可重入的要求等等。 +模块对外头文件中的函数接口声明,其函数头注释,应当将重要、有用的信息表达清楚。 + +例: + +```cpp +/* + * 返回实际写入的字节数,-1表示写入失败 + * 注意,内存 buf 由调用者负责释放 + */ +int WriteString(const char *buf, int len); +``` + +坏的例子: +```cpp +/* + * 函数名:WriteString + * 功能:写入字符串 + * 参数: + * 返回值: + */ +int WriteString(const char *buf, int len); +``` +上面例子中的问题: + +- 参数、返回值,空有格式没内容 +- 函数名信息冗余 +- 关键的 buf 由谁释放没有说清楚 + +## 代码注释 +### 规则4.4.1 代码注释放于对应代码的上方或右边 +### 规则4.4.2 注释符与注释内容间要有1空格;右置注释与前面代码至少1空格 +代码上方的注释,应该保持对应代码一样的缩进。 +选择并统一使用如下风格之一: +使用`//` +```cpp + +// 这是单行注释 +DoSomething(); + +// 这是多行注释 +// 第二行 +DoSomething(); +``` + +使用`/*' '*/` +```cpp +/* 这是单行注释 */ +DoSomething(); + +/* + * 另一种方式的多行注释 + * 第二行 + */ +DoSomething(); +``` +代码右边的注释,与代码之间,至少留1空格,建议不超过4空格。 +通常使用扩展后的 TAB 键即可实现 1-4 空格的缩进。 + +选择并统一使用如下风格之一: + +```cpp +int foo = 100; // 放右边的注释 +int bar = 200; /* 放右边的注释 */ +``` +右置格式在适当的时候,上下对齐会更美观。 +对齐后的注释,离左边代码最近的那一行,保证1-4空格的间隔。 +例: + +```cpp +const int kConst = 100; /* 相关的同类注释,可以考虑上下对齐 */ +const int kAnotherConst = 200; /* 上下对齐时,与左侧代码保持间隔*/ +``` +当右置的注释超过行宽时,请考虑将注释置于代码上方。 + +### 规则4.4.3 不用的代码段直接删除,不要注释掉 +被注释掉的代码,无法被正常维护;当企图恢复使用这段代码时,极有可能引入易被忽略的缺陷。 +正确的做法是,不需要的代码直接删除掉。若再需要时,考虑移植或重写这段代码。 + +这里说的注释掉代码,包括用 /* */ 和 //,还包括 #if 0, #ifdef NEVER_DEFINED 等等。 + +### 建议4.4.1 代码尽量不要包含 TODO/TBD/FIXME 注释 +TODO/TBD 注释一般用来描述已知待改进、待补充的修改点 +FIXME 注释一般用来描述已知缺陷 +它们都应该有统一风格,方便文本搜索统一处理。如: + +```cpp +// TODO(): 补充XX处理 +// FIXME: XX缺陷 +``` + + +# 5 头文件 +## 头文件职责 +头文件是模块或文件的对外接口,头文件的设计体现了大部分的系统设计。 +头文件中适合放置接口的声明,不适合放置实现(内联函数除外)。对于cpp文件中内部才需要使用的函数、宏、枚举、结构定义等不要放在头文件中。 +头文件应当职责单一。头文件过于复杂,依赖过于复杂还是导致编译时间过长的主要原因。 + +### 建议5.1.1 每一个.cpp文件应有一个对应的.h文件,用于声明需要对外公开的类与接口 +通常情况下,每个.cpp文件都有一个相应的.h,用于放置对外提供的函数声明、宏定义、类型定义等。另外,可根据实际情况添加对应的.inline.h文件优化代码。 +如果一个.cpp文件不需要对外公布任何接口,则其就不应当存在。 +例外:__程序的入口(如main函数所在的文件),单元测试代码,动态库代码。__ + +示例: +```cpp +// Foo.h + +#ifndef FOO_H +#define FOO_H + +class Foo { + public: + Foo(); + void Fun(); + + private: + int value; +}; + +#endif +``` + +```cpp +// Foo.cpp +#include "Foo.h" + +namespace { // Good: 对内函数的声明放在.cpp文件的头部,并声明为匿名namespace或者static限制其作用域 +void Bar() { +} +} + +... + +void Foo::Fun() { + Bar(); +} +``` + +## 头文件依赖 +### 规则5.2.1 禁止头文件循环依赖 +头文件循环依赖,指 a.h 包含 b.h,b.h 包含 c.h,c.h 包含 a.h, 导致任何一个头文件修改,都导致所有包含了a.h/b.h/c.h的代码全部重新编译一遍。 +而如果是单向依赖,如a.h包含b.h,b.h包含c.h,而c.h不包含任何头文件,则修改a.h不会导致包含了b.h/c.h的源代码重新编译。 + +头文件循环依赖直接体现了架构设计上的不合理,可通过优化架构去避免。 + +### 规则5.2.2 禁止包含用不到的头文件 +用不到的头文件被包含的同时引入了不必要的依赖,增加了模块或单元之间的耦合度,只要该头文件被修改,代码就要重新编译。 + +很多系统中头文件包含关系复杂,开发人员为了省事起见,直接包含一切想到的头文件,甚至发布了一个god.h,其中包含了所有头文件,然后发布给各个项目组使用,这种只图一时省事的做法,导致整个系统的编译时间进一步恶化,并对后来人的维护造成了巨大的麻烦。 + +### 规则5.2.3 头文件应当自包含 +简单的说,自包含就是任意一个头文件均可独立编译。如果一个文件包含某个头文件,还要包含另外一个头文件才能工作的话,给这个头文件的用户增添不必要的负担。 + +示例: +如果a.h不是自包含的,需要包含b.h才能编译,会带来的危害: +每个使用a.h头文件的.cpp文件,为了让引入的a.h的内容编译通过,都要包含额外的头文件b.h。 +额外的头文件b.h必须在a.h之前进行包含,这在包含顺序上产生了依赖。 + + +### 规则5.2.4 头文件必须编写`#define`保护,防止重复包含 +为防止头文件被重复包含,所有头文件都应当使用 #define 保护;不要使用 #pragma once + +定义包含保护符时,应该遵守如下规则: +1)保护符使用唯一名称; +2)不要在受保护部分的前后放置代码或者注释,文件头注释除外。 + +示例:假定VOS工程的timer模块的timer.h,其目录为VOS/include/timer/Timer.h,应按如下方式保护: + +```cpp +#ifndef VOS_INCLUDE_TIMER_TIMER_H +#define VOS_INCLUDE_TIMER_TIMER_H +... +#endif +``` + +也可以不用像上面添加路径,但是要保证当前工程内宏是唯一的。 +```cpp +#ifndef TIMER_H +#define TIMER_H +... +#endif +``` + +### 建议5.2.1 禁止通过声明的方式引用外部函数接口、变量 +只能通过包含头文件的方式使用其他模块或文件提供的接口。 +通过 extern 声明的方式使用外部函数接口、变量,容易在外部接口改变时可能导致声明和定义不一致。 +同时这种隐式依赖,容易导致架构腐化。 + +不符合规范的案例: + +// a.cpp内容 +```cpp +extern int Fun(); // Bad: 通过extern的方式使用外部函数 + +void Bar() { + int i = Fun(); + ... +} +``` + +// b.cpp内容 +```cpp +int Fun() { + // Do something +} +``` +应该改为: + +// a.cpp内容 +```cpp +#include "b.h" // Good: 通过包含头文件的方式使用其他.cpp提供的接口 + +void Bar() { + int i = Fun(); + ... +} +``` + +// b.h内容 +```cpp +int Fun(); +``` + +// b.cpp内容 +```cpp +int Fun() { + // Do something +} +``` +例外,有些场景需要引用其内部函数,但并不想侵入代码时,可以 extern 声明方式引用。 +如: +针对某一内部函数进行单元测试时,可以通过 extern 声明来引用被测函数; +当需要对某一函数进行打桩、打补丁处理时,允许 extern 声明该函数。 + +### 规则5.2.5 禁止在extern "C"中包含头文件 +在 extern "C" 中包含头文件,有可能会导致 extern "C" 嵌套,部分编译器对 extern "C" 嵌套层次有限制,嵌套层次太多会编译错误。 + +在C,C++混合编程的情况下,在extern "C"中包含头文件,可能会导致被包含头文件的原有意图遭到破坏,比如链接规范被不正确地更改。 + +示例,存在a.h和b.h两个头文件: + +// a.h内容 +```cpp +... +#ifdef __cplusplus +void Foo(int); +#define A(value) Foo(value) +#else +void A(int) +#endif +``` +// b.h内容 +```cpp +... +#ifdef __cplusplus +extern "C" { +#endif + +#include "a.h" +void B(); + +#ifdef __cplusplus +} +#endif +``` + +使用C++预处理器展开b.h,将会得到 +```cpp +extern "C" { + void Foo(int); + void B(); +} +``` + +按照 a.h 作者的本意,函数 Foo 是一个 C++ 自由函数,其链接规范为 "C++"。 +但在 b.h 中,由于 `#include "a.h"` 被放到了 `extern "C"` 的内部,函数 Foo 的链接规范被不正确地更改了。 + +例外: +如果在 C++ 编译环境中,想引用纯C的头文件,这些C头文件并没有` extern "C"` 修饰。非侵入式的做法是,在 `extern "C"` 中去包含C头文件。 + +### 建议5.2.2尽量避免使用前置声明,而是通过`#include`来包含头文件 +前置声明(forward declaration)是类、函数和模板的纯粹声明,没伴随着其定义。 + +- 优点: + 1. 前置声明能够节省编译时间,多余的 #include 会迫使编译器展开更多的文件,处理更多的输入。 + 2. 前置声明能够节省不必要的重新编译的时间。 #include 使代码因为头文件中无关的改动而被重新编译多次。 +- 缺点: + 1. 前置声明隐藏了依赖关系,头文件改动时,用户的代码会跳过必要的重新编译过程。 + 2. 前置声明可能会被库的后续更改所破坏。前置声明函数或模板有时会妨碍头文件开发者变动其 API. 例如扩大形参类型,加个自带默认参数的模板形参等等。 + 3. 前置声明来自命名空间` std::` 的 symbol 时,其行为未定义(在C++11标准规范中明确说明)。 + 4. 前置声明了不少来自头文件的 symbol 时,就会比单单一行的 include 冗长。 + 5. 仅仅为了能前置声明而重构代码(比如用指针成员代替对象成员)会使代码变得更慢更复杂。 + 6. 很难判断什么时候该用前置声明,什么时候该用`#include`,某些场景下面前置声明和`#include`互换以后会导致意想不到的结果。 + +所以我们尽可能避免使用前置声明,而是使用#include头文件来保证依赖关系。 + +### 建议5.2.3 头文件包含顺序:首先是.cpp相应的.h文件,其它头文件按照稳定度排序 +使用标准的头文件包含顺序可增强可读性, 避免隐藏依赖,建议按照稳定度排序:cpp对应的头文件, C/C++标准库, 系统库的.h, 其他库的.h, 本项目内其他的.h。 + +举例,Foo.cpp中包含头文件的次序如下: +```cpp +#include "Foo/Foo.h" + +#include +#include + +#include +#include + +#include "platform/Base.h" +#include "platform/Framework.h" + +#include "project/public/Log.h" +``` +将Foo.h放在最前面可以保证当Foo.h遗漏某些必要的库,或者有错误时,Foo.cpp的构建会立刻中止,减少编译时间。 对于头文件中包含顺序也参照此建议。 + +例外: +平台特定代码需要条件编译,这些代码可以放到其它 includes 之后。 +```cpp +#include "foo/public/FooServer.h" + +#include "base/Port.h" // For LANG_CXX11. + +#ifdef LANG_CXX11 +#include +#endif // LANG_CXX11 +``` + +# 6 作用域 + +## 命名空间 +命名空间里的内容不缩进。 + +### 建议6.1.1 对于cpp文件中不需要导出的变量,常量或者函数,请使用匿名namespace封装或者用static修饰 +在C++ 2003标准规范中,使用static修饰文件作用域的变量,函数等被标记为deprecated特性,所以更推荐使用匿名namespace。 + +主要原因如下: +1. static在C++中已经赋予了太多的含义,静态函数成员变量,静态成员函数,静态全局变量,静态函数局部变量,每一种都有特殊的处理。 +2. static只能保证变量,常量和函数的文件作用域,但是namespace还可以封装类型等。 +3. 统一namespace来处理C++的作用域,而不需要同时使用static和namespace来管理。 +4. static修饰的函数不能用来实例化模板,而匿名namespace可以。 + +但是不要在 .h 中使用中使用匿名namespace或者static。 + +```cpp +// Foo.cpp + +namespace { +const int kMaxCount = 20; +void InternalFun(){}; +} + +void Foo::Fun() { + int i = kMaxCount; + + InternalFun(); +} + +``` + +### 规则6.1.1 不要在头文件中或者#include之前使用using导入命名空间 +说明:使用using导入命名空间会影响后续代码,易造成符号冲突,所以不要在头文件以及源文件中的#include之前使用using导入命名空间。 +示例: + +```cpp +// 头文件a.h +namespace namespacea { +int Fun(int); +} +``` + +```cpp +// 头文件b.h +namespace namespaceb { +int Fun(int); +} + +using namespace namespaceb; + +void G() { + Fun(1); +} +``` + +```cpp +// 源代码a.cpp +#include "a.h" +using namespace namespacea; +#include "b.h" + +void main() { + G(); // using namespace namespacea在#include “b.h”之前,引发歧义:namespacea::Fun,namespaceb::Fun调用不明确 +} +``` + +对于在头文件中使用using导入单个符号或定义别名,允许在模块自定义名字空间中使用,但禁止在全局名字空间中使用。 +```cpp +// foo.h + +#include +using fancy::string; // Bad,禁止向全局名字空间导入符号 + +namespace foo { +using fancy::string; // Good,可以在模块自定义名字空间中导入符号 +using MyVector = fancy::vector; // Good,C++11可在自定义名字空间中定义别名 +} +``` + + +### 规则6.1.2 禁止using namespace std; +说明:使用std前缀让代码更清楚,并且可以防止名字冲突。 + + +## 全局函数和静态成员函数 + +### 建议6.2.1 优先使用命名空间来管理全局函数,如果和某个class有直接关系的,可以使用静态成员函数 +说明:非成员函数放在名字空间内可避免污染全局作用域, 也不要用类+静态成员方法来简单管理全局函数。 如果某个全局函数和某个类有紧密联系, 那么可以作为类的静态成员函数。 + +如果你需要定义一些全局函数,给某个cpp文件使用,那么请使用匿名namespace来管理。 +```cpp +namespace mynamespace { +int Add(int a, int b); +} + +class File { + public: + static File CreateTempFile(const std::string& fileName); +}; +``` + +## 全局常量和静态成员常量 + +### 建议6.3.1 优先使用命名空间来管理全局常量,如果和某个class有直接关系的,可以使用静态成员常量 +说明:全局常量放在命名空间内可避免污染全局作用域, 也不要用类+静态成员常量来简单管理全局常量。 如果某个全局常量和某个类有紧密联系, 那么可以作为类的静态成员常量。 + +如果你需要定义一些全局常量,只给某个cpp文件使用,那么请使用匿名namespace来管理。 +```cpp +namespace mynamespace { +const int kMaxSize = 100; +} + +class File { + public: + static const std::string kName; +}; +``` + +## 全局变量 + +### 建议6.4.1 尽量避免使用全局变量,考虑使用单例模式 +说明:全局变量是可以修改和读取的,那么这样会导致业务代码和这个全局变量产生数据耦合。 +```cpp +int counter = 0; + +// a.cpp +counter++; + +// b.cpp +counter++; + +// c.cpp +cout << counter << endl; +``` + +使用单实例模式 +```cpp +class Counter { + public: + static Counter& GetInstance() { + static Counter counter; + return counter; + } // 单实例实现简单举例 + + void Increase() { + value++; + } + + void Print() const { + std::cout << value << std::endl; + } + + private: + Counter() : value(0) {} + + private: + int value; +}; + +// a.cpp +Counter::GetInstance().Increase(); + +// b.cpp +Counter::GetInstance().Increase(); + +// c.cpp +Counter::GetInstance().Print(); +``` + +实现单例模式以后,实现了全局唯一一个实例,和全局变量同样的效果,并且单实例提供了更好的封装性。 + +例外:有的时候全局变量的作用域仅仅是模块内部,这样进程空间里面就会有多个全局变量实例,每个模块持有一份,这种场景下是无法使用单例模式解决的。 + +# 7 类 + +如果仅有数据成员,使用结构体,其他使用类 + +## 构造,拷贝构造,赋值和析构函数 +构造,拷贝,移动和析构函数提供了对象的生命周期管理方法: +- 构造函数(constructor): `X()` +- 拷贝构造函数(copy constructor):`X(const X&)` +- 拷贝赋值操作符(copy assignment):`operator=(const X&)` +- 移动构造函数(move constructor):`X(X&&)` *C++11以后提供* +- 移动赋值操作符(move assignment):`operator=(X&&)` *C++11以后提供* +- 析构函数(destructor):`~X()` + +### 规则7.1.1 类的成员变量必须显式初始化 +说明:如果类有成员变量,没有定义构造函数,又没有定义默认构造函数,编译器将自动生成一个构造函数,但编译器生成的构造函数并不会对成员变量进行初始化,对象状态处于一种不确定性。 + +例外: +- 如果类的成员变量具有默认构造函数,那么可以不需要显式初始化。 + +示例:如下代码没有构造函数,私有数据成员无法初始化: +```cpp +class Message { + public: + void ProcessOutMsg() { + //… + } + private: + unsigned int msgID; + unsigned int msgLength; + unsigned char* msgBuffer; + std::string someIdentifier; +}; + +Message message; // message成员变量没有初始化 +message.ProcessOutMsg(); // 后续使用存在隐患 + +// 因此,有必要定义默认构造函数,如下: +class Message { + public: + Message() : msgID(0), msgLength(0) { + } + + void ProcessOutMsg() { + // … + } + + private: + unsigned int msgID; + unsigned int msgLength; + unsigned char* msgBuffer; + std::string someIdentifier; //具有默认构造函数,不需要显式初始化 +}; +``` + +### 建议7.1.1 成员变量优先使用声明时初始化(C++11)和构造函数初始化列表初始化 +说明:C++11的声明时初始化可以一目了然的看出成员初始值,应当优先使用。如果成员初始化值和构造函数相关,或者不支持C++11,则应当优先使用构造函数初始化列表来初始化成员。相比起在构造函数体中对成员赋值,初始化列表的代码更简洁,执行性能更好,而且可以对const成员和引用成员初始化。 + +```cpp +class Message { + public: + Message() : msgLength(0) { // Good,优先使用初始化列表 + msgBuffer = NULL; // Bad,不推荐在构造函数中赋值 + } + + private: + unsigned int msgID{0}; // Good,C++11中使用 + unsigned int msgLength; + unsigned char* msgBuffer; +}; +``` + +### 规则7.1.2 为避免隐式转换,将单参数构造函数声明为explicit +说明:单参数构造函数如果没有用explicit声明,则会成为隐式转换函数。 +示例: + +```cpp +class Foo { + public: + explicit Foo(const string& name): name(name) { + } + private: + string name; +}; + + +void ProcessFoo(const Foo& foo){} + +int main(void) { + std::string test = "test"; + ProcessFoo(test); // 编译不通过 + return 0; +} +``` + +上面的代码编译不通过,因为`ProcessFoo`需要的参数是Foo类型,传入的string类型不匹配。 + +如果将Foo构造函数的explicit关键字移除,那么调用`ProcessFoo`传入的string就会触发隐式转换,生成一个临时的Foo对象。往往这种隐式转换是让人迷惑的,并且容易隐藏Bug,得到了一个不期望的类型转换。所以对于单参数的构造函数是要求explicit声明。 + +### 规则7.1.3 如果不需要拷贝构造函数、赋值操作符 / 移动构造函数、赋值操作符,请明确禁止 +说明:如果用户不定义,编译器默认会生成拷贝构造函数和拷贝赋值操作符, 移动构造和移动赋值操作符(移动语义的函数C++11以后才有)。 +如果我们不要使用拷贝构造函数,或者赋值操作符,请明确拒绝: + +1.将拷贝构造函数或者赋值操作符设置为private,并且不实现: + +```cpp +class Foo { + private: + Foo(const Foo&); + Foo& operator=(const Foo&); +}; +``` +2.使用C++11提供的delete: + +```cpp +// 同时禁止, 使用C++11的delete +class Foo { + public: + Foo(Foo&&) = delete; + Foo& operator=(Foo&&) = delete; +}; +``` +3.静态方法类,禁用构造函数,防止创建实例 + +```cpp +class Helper { + public: + static bool DoSomething(); + + private: + Helper(); +}; +``` +4.单例类,禁用构造函数,拷贝构造函数,防止创建实例 + +```cpp +class Foo { + private: + static Foo *instance; + Foo() {} + Foo(const Foo &a); + Foo& operator=(const Foo &a); + public: + static Foo &Instance() { + if (!instance) { + instance = new Foo(); + } + return *instance; + } +}; +``` + +5.析构函数通过裸指针释放资源的,禁用拷贝构造、拷贝赋值,防止重复释放 + +```cpp +class Foo { + private: + FILE *fp; + Foo(const Foo &a); + Foo& operator=(const Foo &a); + public: + Foo() : fp(nullptr) {} + ~Foo() { + if (fp != nullptr) { + fclose(fp); + fp = nullptr; + } + } +}; + +Foo* Foo::instance = nullptr; +``` + +### 规则7.1.4 拷贝构造和拷贝赋值操作符应该是成对出现或者禁止 +拷贝构造函数和拷贝赋值操作符都是具有拷贝语义的,应该同时出现或者禁止。 + +```cpp +// 同时出现 +class Foo { + public: + ... + Foo(const Foo&); + Foo& operator=(const Foo&); + ... +}; + +// 同时default, C++11支持 +class Foo { + public: + Foo(const Foo&) = default; + Foo& operator=(const Foo&) = default; +}; + +// 同时禁止, C++11可以使用delete +class Foo { + private: + Foo(const Foo&); + Foo& operator=(const Foo&); +}; +``` + +### 规则7.1.5 移动构造和移动赋值操作符应该是成对出现或者禁止 +在C++11中增加了move操作,如果需要某个类支持移动操作,那么需要实现移动构造和移动赋值操作符。 + +移动构造函数和移动赋值操作符都是具有移动语义的,应该同时出现或者禁止。 +```cpp +// 同时出现 +class Foo { + public: + ... + Foo(Foo&&); + Foo& operator=(Foo&&); + ... +}; + +// 同时default, C++11支持 +class Foo { + public: + Foo(Foo&&) = default; + Foo& operator=(Foo&&) = default; +}; + +// 同时禁止, 使用C++11的delete +class Foo { + public: + Foo(Foo&&) = delete; + Foo& operator=(Foo&&) = delete; +}; +``` + +### 规则7.1.6 禁止在构造函数和析构函数中调用虚函数 +说明:在构造函数和析构函数中调用当前对象的虚函数,会导致未实现多态的行为。 +在C++中,一个基类一次只构造一个完整的对象。 + +示例:类Base是基类,Sub是派生类 +```cpp +class Base { + public: + Base(); + virtual void Log() = 0; // 不同的派生类调用不同的日志文件 +}; + +Base::Base() { // 基类构造函数 + Log(); // 调用虚函数Log +} + +class Sub : public Base { + public: + virtual void Log(); +}; +``` + +当执行如下语句: +`Sub sub;` +会先执行Sub的构造函数,但首先调用Base的构造函数,由于Base的构造函数调用虚函数Log,此时Log还是基类的版本,只有基类构造完成后,才会完成派生类的构造,从而导致未实现多态的行为。 +同样的道理也适用于析构函数。 + +### 建议7.1.2 类定义中的函数不要添加inline关键字 +说明:类定义中的函数默认是inline的。 + + +## 继承 + +### 规则7.2.1 基类的析构函数应该声明为virtual +说明:只有基类析构函数是virtual,通过多态调用的时候才能保证派生类的析构函数被调用。 + +示例:基类的析构函数没有声明为virtual导致了内存泄漏。 +```cpp +class Base { + public: + virtual std::string getVersion() = 0; + + ~Base() { + std::cout << "~Base" << std::endl; + } +}; +``` + +```cpp +class Sub : public Base { + public: + Sub() : numbers(nullptr) { + } + + ~Sub() { + delete[] numbers; + std::cout << "~Sub" << std::endl; + } + + int Init() { + const size_t numberCount = 100; + numbers = new (std::nothrow) int[numberCount]; + if (numbers == nullptr) { + return -1; + } + + ... + } + + std::string getVersion() { + return std::string("hello!"); + } +private: + int* numbers; +}; +``` + +```cpp +int main(int argc, char* args[]) { + Base* b = new Sub(); + + delete b; + return 0; +} +``` +由于基类Base的析构函数没有声明为virtual,当对象被销毁时,只会调用基类的析构函数,不会调用派生类Sub的析构函数,导致内存泄漏。 + + +### 规则7.2.2 禁止虚函数使用缺省参数值 +说明:在C++中,虚函数是动态绑定的,但函数的缺省参数却是在编译时就静态绑定的。这意味着你最终执行的函数是一个定义在派生类,但使用了基类中的缺省参数值的虚函数。为了避免虚函数重载时,因参数声明不一致给使用者带来的困惑和由此导致的问题,规定所有虚函数均不允许声明缺省参数值。 +示例:虚函数display缺省参数值text是由编译时刻决定的,而非运行时刻,没有达到多态的目的: +```cpp +class Base { + public: + virtual void Display(const std::string& text = "Base!") { + std::cout << text << std::endl; + } + + virtual ~Base(){} +}; + +class Sub : public Base { + public: + virtual void Display(const std::string& text = "Sub!") { + std::cout << text << std::endl; + } + + virtual ~Sub(){} +}; + +int main() { + Base* base = new Sub(); + Sub* sub = new Sub(); + + ... + + base->Display(); // 程序输出结果: Base! 而期望输出:Sub! + sub->Display(); // 程序输出结果: Sub! + + delete base; + delete sub; + return 0; +}; +``` + +### 规则7.2.3 禁止重新定义继承而来的非虚函数 +说明:因为非虚函数无法实现动态绑定,只有虚函数才能实现动态绑定:只要操作基类的指针,即可获得正确的结果。 + +示例: +```cpp +class Base { + public: + void Fun(); +}; + +class Sub : public Base { + public: + void Fun(); +}; + +Sub* sub = new Sub(); +Base* base = sub; + +sub->Fun(); // 调用子类的Fun +base->Fun(); // 调用父类的Fun +//... + +``` + +## 多重继承 +在实际开发过程中使用多重继承的场景是比较少的,因为多重继承使用过程中有下面的典型问题: +1. 菱形继承所带来的数据重复,以及名字二义性。因此,C++引入了virtual继承来解决这类问题; +2. 即便不是菱形继承,多个父类之间的名字也可能存在冲突,从而导致的二义性; +3. 如果子类需要扩展或改写多个父类的方法时,造成子类的职责不明,语义混乱; +4. 相对于委托,继承是一种白盒复用,即子类可以访问父类的protected成员, 这会导致更强的耦合。而多重继承,由于耦合了多个父类,相对于单根继承,这会产生更强的耦合关系。 + +多重继承具有下面的优点: +多重继承提供了一种更简单的组合来实现多种接口或者类的组装与复用。 + +所以,对于多重继承的只有下面几种情况下面才允许使用多重继承。 + +### 建议7.3.1 使用多重继承来实现接口分离与多角色组合 +如果某个类需要实现多重接口,可以通过多重继承把多个分离的接口组合起来,类似 scala 语言的 traits 混入。 + +```cpp +class Role1 {}; +class Role2 {}; +class Role3 {}; + +class Object1 : public Role1, public Role2 { + // ... +}; + +class Object2 : public Role2, public Role3 { + // ... +}; + +``` + +在C++标准库中也有类似的实现样例: +```cpp +class basic_istream {}; +class basic_ostream {}; + +class basic_iostream : public basic_istream, public basic_ostream { + +}; +``` + +## 重载 + +重载操作符要有充分理由,而且不要改变操作符原有语义,例如不要使用 ‘+’ 操作符来做减运算。 +操作符重载令代码更加直观,但也有一些不足: +- 混淆直觉,误以为该操作和内建类型一样是高性能的,忽略了性能降低的可能; +- 问题定位时不够直观,按函数名查找比按操作符显然更方便。 +- 重载操作符如果行为定义不直观(例如将‘+’ 操作符来做减运算),会让代码产生混淆。 +- 赋值操作符的重载引入的隐式转换会隐藏很深的bug。可以定义类似Equals()、CopyFrom()等函数来替代=,==操作符。 + + + +# 8 函数 +## 函数设计 +### 建议8.1.1 避免函数过长,函数不超过50行(非空非注释) +函数应该可以一屏显示完 (50行以内),只做一件事情,而且把它做好。 + +过长的函数往往意味着函数功能不单一,过于复杂,或过分呈现细节,未进行进一步抽象。 + +例外:某些实现算法的函数,由于算法的聚合性与功能的全面性,可能会超过50行。 + +即使一个长函数现在工作的非常好, 一旦有人对其修改, 有可能出现新的问题, 甚至导致难以发现的bug。 +建议将其拆分为更加简短并易于管理的若干函数,以便于他人阅读和修改代码。 + +## 内联函数 + +### 建议8.2.1 内联函数不超过10行(非空非注释) +**说明**:内联函数具有一般函数的特性,它与一般函数不同之处只在于函数调用的处理。一般函数进行调用时,要将程序执行权转到被调用函数中,然后再返回到调用它的函数中;而内联函数在调用时,是将调用表达式用内联函数体来替换。 + +内联函数只适合于只有 1~10 行的小函数。对一个含有许多语句的大函数,函数调用和返回的开销相对来说微不足道,也没有必要用内联函数实现,一般的编译器会放弃内联方式,而采用普通的方式调用函数。 + +如果内联函数包含复杂的控制结构,如循环、分支(switch)、try-catch 等语句,一般编译器将该函数视同普通函数。 +**虚函数、递归函数不能被用来做内联函数**。 + +## 函数参数 + +### 建议8.3.1 函数参数使用引用取代指针 + +**说明**:引用比指针更安全,因为它一定非空,且一定不会再指向其他目标;引用不需要检查非法的NULL指针。 + +选择 const 避免参数被修改,让代码阅读者清晰地知道该参数不被修改,可大大增强代码可读性。 + +### 建议8.3.2 使用强类型参数,避免使用void* +尽管不同的语言对待强类型和弱类型有自己的观点,但是一般认为c/c++是强类型语言,既然我们使用的语言是强类型的,就应该保持这样的风格。 +好处是尽量让编译器在编译阶段就检查出类型不匹配的问题。 + +使用强类型便于编译器帮我们发现错误,如下代码中注意函数 FooListAddNode 的使用: +```cpp +struct FooNode { + struct List link; + int foo; +}; + +struct BarNode { + struct List link; + int bar; +} + +void FooListAddNode(void *node) { // Bad: 这里用 void * 类型传递参数 + FooNode *foo = (FooNode *)node; + ListAppend(&fooList, &foo->link); +} + +void MakeTheList() { + FooNode *foo = nullptr; + BarNode *bar = nullptr; + ... + + FooListAddNode(bar); // Wrong: 这里本意是想传递参数 foo,但错传了 bar,却没有报错 +} +``` + +1. 可以使用模板函数来实现参数类型的变化。 +2. 可以使用基类指针来实现多态。 + +### 建议8.3.3 函数的参数个数不超过5个 +函数的参数过多,会使得该函数易于受外部变化的影响,从而影响维护工作。函数的参数过多同时也会增大测试的工作量。 + +如果超过可以考虑: +- 看能否拆分函数 +- 看能否将相关参数合在一起,定义结构体 + +# 9 C++其他特性 + +## 常量与初始化 + +不变的值更易于理解、跟踪和分析,所以应该尽可能地使用常量代替变量,定义值的时候,应该把const作为默认的选项。 + +### 建议9.1.1 不允许使用宏来表示常量 + +**说明**:宏是简单的文本替换,在预处理阶段时完成,运行报错时直接报相应的值;跟踪调试时也是显示值,而不是宏名;宏没有类型检查,不安全;宏没有作用域。 + +```cpp +#define MAX_MSISDN_LEN 20 // 不好 + +// C++请使用const常量 +const int kMaxMsisdnLen = 20; // 好 + +// 对于C++11以上版本,可以使用constexpr +constexpr int kMaxMsisdnLen = 20; +``` + +### 建议9.1.2 一组相关的整型常量应定义为枚举 + +**说明**:枚举比`#define`或`const int`更安全。编译器会检查参数值是否位于枚举取值范围内,避免错误发生。 + +```cpp +// 好的例子: +enum Week { + kSunday, + kMonday, + kTuesday, + kWednesday, + kThursday, + kFriday, + kSaturday +}; + +enum Color { + kRed, + kBlack, + kBlue +}; + +void ColorizeCalendar(Week today, Color color); + +ColorizeCalendar(kBlue, kSunday); // 编译报错,参数类型错误 + +// 不好的例子: +const int kSunday = 0; +const int kMonday = 1; + +const int kRed = 0; +const int kBlack = 1; + +bool ColorizeCalendar(int today, int color); +ColorizeCalendar(kBlue, kSunday); // 不会报错 +``` + +当枚举值需要对应到具体数值时,须在声明时显式赋值。否则不需要显式赋值,以避免重复赋值,降低维护(增加、删除成员)工作量。 + +```cpp +// 好的例子:S协议里定义的设备ID值,用于标识设备类型 +enum DeviceType { + kUnknown = -1, + kDsmp = 0, + kIsmg = 1, + kWapportal = 2 +}; +``` + + + +### 建议9.1.3 不允许使用魔鬼数字 +所谓魔鬼数字即看不懂、难以理解的数字。 + +魔鬼数字并非一个非黑即白的概念,看不懂也有程度,需要自行判断。 +例如数字 12,在不同的上下文中情况是不一样的: +type = 12; 就看不懂,但 `month = year * 12`; 就能看懂。 +数字 0 有时候也是魔鬼数字,比如 `status = 0`; 并不能表达是什么状态。 + +解决途径: +对于局部使用的数字,可以增加注释说明 +对于多处使用的数字,必须定义 const 常量,并通过符号命名自注释。 + +禁止出现下列情况: +没有通过符号来解释数字含义,如` const int kZero = 0` +符号命名限制了其取值,如 `const int kXxTimerInterval = 300`,直接使用`kXxTimerInterval `来表示该常量是定时器的时间间隔。 + +### 规则9.1.1 常量应该保证单一职责 + +**说明**:一个常量只用来表示一个特定功能,即一个常量不能有多种用途。 + +```cpp +// 好的例子:协议A和协议B,手机号(MSISDN)的长度都是20。 +const unsigned int kAMaxMsisdnLen = 20; +const unsigned int kBMaxMsisdnLen = 20; + +// 或者使用不同的名字空间: +namespace namespace1 { +const unsigned int kMaxMsisdnLen = 20; +} + +namespace namespace2 { +const unsigned int kMaxMsisdnLen = 20; +} +``` + +### 建议9.1.4 禁止用memcpy_s、memset_s初始化非POD对象 + +**说明**:`POD`全称是`Plain Old Data`,是C++ 98标准(ISO/IEC 14882, first edition, 1998-09-01)中引入的一个概念,`POD`类型主要包括`int`, `char`, `float`,`double`,`enumeration`,`void`,指针等原始类型以及聚合类型,不能使用封装和面向对象特性(如用户定义的构造/赋值/析构函数、基类、虚函数等)。 + +由于非POD类型比如非聚合类型的class对象,可能存在虚函数,内存布局不确定,跟编译器有关,滥用内存拷贝可能会导致严重的问题。 + +即使对聚合类型的class,使用直接的内存拷贝和比较,破坏了信息隐蔽和数据保护的作用,也不提倡`memcpy_s`、`memset_s`操作。 + +对于POD类型的详细说明请参见附录。 + + +## 表达式 + +### 规则9.2.1 switch语句要有default分支 +大部分情况下,switch语句中要有default分支,保证在遗漏case标签处理时能够有一个缺省的处理行为。 + +特例: +如果switch条件变量是枚举类型,并且 case 分支覆盖了所有取值,则加上default分支处理有些多余。 +现代编译器都具备检查是否在switch语句中遗漏了某些枚举值的case分支的能力,会有相应的warning提示。 + +```cpp +enum Color { + kRed = 0, + kBlue +}; + +// 因为switch条件变量是枚举值,这里可以不用加default处理分支 +switch (color) { + case kRed: + DoRedThing(); + break; + case kBlue: + DoBlueThing(); + ... + break; +} +``` + +### 建议9.2.1 表达式的比较,应当遵循左侧倾向于变化、右侧倾向于不变的原则 +当变量与常量比较时,如果常量放左边,如 if (MAX == v) 不符合阅读习惯,而 if (MAX > v) 更是难于理解。 +应当按人的正常阅读、表达习惯,将常量放右边。写成如下方式: +```cpp +if (value == MAX) { + +} + +if (value < MAX) { + +} +``` +也有特殊情况,如:`if (MIN < value && value < MAX)` 用来描述区间时,前半段是常量在左的。 + +不用担心将 '==' 误写成 '=',因为` if (value = MAX)` 会有编译告警,其他静态检查工具也会报错。让工具去解决笔误问题,代码要符合可读性第一。 + + +## 类型转换 + +避免使用类型分支来定制行为:类型分支来定制行为容易出错,是企图用C++编写C代码的明显标志。这是一种很不灵活的技术,要添加新类型时,如果忘记修改所有分支,编译器也不会告知。使用模板和虚函数,让类型自己而不是调用它们的代码来决定行为。 + +建议避免类型转换,我们在代码的类型设计上应该考虑到每种数据的数据类型是什么,而不是应该过度使用类型转换来解决问题。在设计某个基本类型的时候,请考虑: +- 是无符号还是有符号的 +- 是适合float还是double +- 是使用int8,int16,int32还是int64,确定整形的长度 + +但是我们无法禁止使用类型转换,因为C++语言是一门面向机器编程的语言,涉及到指针地址,并且我们会与各种第三方或者底层API交互,他们的类型设计不一定是合理的,在这个适配的过程中很容易出现类型转换。 + +例外:在调用某个函数的时候,如果我们不想处理函数结果,首先要考虑这个是否是你的最好的选择。如果确实不想处理函数的返回值,那么可以使用(void)转换来解决。 + +### 规则9.3.1 如果确定要使用类型转换,请使用有C++提供的类型转换,而不是C风格的类型转换 + +**说明**: + +C++提供的类型转换操作比C风格更有针对性,更易读,也更加安全,C++提供的转换有: +- 类型转换: +1. `dynamic_cast`:主要用于继承体系下行转换,`dynamic_cast`具有类型检查的功能,请做好基类和派生类的设计,避免使用dynamic_cast来进行转换。 +2. `static_cast`:和C风格转换相似可做值的强制转换,或上行转换(把派生类的指针或引用转换成基类的指针或引用)。该转换经常用于消除多重继承带来的类型歧义,是相对安全的。如果是纯粹的算数转换,那么请使用后面的大括号转换方式。 +3. `reinterpret_cast`:用于转换不相关的类型。`reinterpret_cast`强制编译器将某个类型对象的内存重新解释成另一种类型,这是一种不安全的转换,建议尽可能少用`reinterpret_cast`。 +4. `const_cast`:用于移除对象的`const`属性,使对象变得可修改,这样会破坏数据的不变性,建议尽可能少用。 + +- 算数转换: (C++11开始支持) + 对于那种算数转换,并且类型信息没有丢失的,比如float到double, int32到int64的转换,推荐使用大括号的初始方式。 +```cpp + double d{ someFloat }; + int64_t i{ someInt32 }; +``` + +### 建议9.3.1 避免使用`dynamic_cast` +1. `dynamic_cast`依赖于C++的RTTI, 让程序员在运行时识别C++类对象的类型。 +2. `dynamic_cast`的出现一般说明我们的基类和派生类设计出现了问题,派生类破坏了基类的契约,不得不通过`dynamic_cast`转换到子类进行特殊处理,这个时候更希望来改善类的设计,而不是通过`dynamic_cast`来解决问题。 + +### 建议9.3.2 避免使用`reinterpret_cast` + +**说明**:`reinterpret_cast`用于转换不相关类型。尝试用`reinterpret_cast`将一种类型强制转换另一种类型,这破坏了类型的安全性与可靠性,是一种不安全的转换。不同类型之间尽量避免转换。 + +### 建议9.3.3 避免使用`const_cast` + +**说明**:`const_cast`用于移除对象的`const`和`volatile`性质。 + +使用const_cast转换后的指针或者引用来修改const对象,行为是未定义的。 + +```cpp +// 不好的例子 +const int i = 1024; +int* p = const_cast(&i); +*p = 2048; // 未定义行为 +``` + +```cpp +// 不好的例子 +class Foo { + public: + Foo() : i(3) {} + + void Fun(int v) { + i = v; + } + + private: + int i; +}; + +int main(void) { + const Foo f; + Foo* p = const_cast(&f); + p->Fun(8); // 未定义行为 +} + +``` + + +## 资源分配和释放 + +### 规则9.4.1 单个对象释放使用delete,数组对象释放使用delete [] +说明:单个对象删除使用delete, 数组对象删除使用delete [],原因: + +- 调用new所包含的动作:从系统中申请一块内存,并调用此类型的构造函数。 +- 调用new[n]所包含的动作:申请可容纳n个对象的内存,并且对每一个对象调用其构造函数。 +- 调用delete所包含的动作:先调用相应的析构函数,再将内存归还系统。 +- 调用delete[]所包含的动作:对每一个对象调用析构函数,再释放所有内存 + +如果new和delete的格式不匹配,结果是未知的。对于非class类型, new和delete不会调用构造与析构函数。 + +错误写法: +```cpp +const int KMaxArraySize = 100; +int* numberArray = new int[KMaxArraySize]; +... +delete numberArray; +numberArray = NULL; +``` + +正确写法: +```cpp +const int KMaxArraySize = 100; +int* numberArray = new int[KMaxArraySize]; +... +delete[] numberArray; +numberArray = NULL; +``` + +## 标准库 + +STL标准模板库在不同模块使用程度不同,这里列出一些基本规则和建议。 + +### 规则9.5.1 不要保存std::string的c_str()返回的指针 + +说明:在C++标准中并未规定string::c_str()指针持久有效,因此特定STL实现完全可以在调用string::c_str()时返回一个临时存储区并很快释放。所以为了保证程序的可移植性,不要保存string::c_str()的结果,而是在每次需要时直接调用。 + +示例: + +```cpp +void Fun1() { + std::string name = "demo"; + const char* text = name.c_str(); // 表达式结束以后,name的生命周期还在,指针有效 + + // 如果中间调用了string的非const成员函数,导致string被修改,比如operator[], begin()等 + // 可能会导致text的内容不可用,或者不是原来的字符串 + name = "test"; + name[1] = '2'; + + // 后续使用text指针,其字符串内容不再是"demo" +} + +void Fun2() { + std::string name = "demo"; + std::string test = "test"; + const char* text = (name + test).c_str(); // 表达式结束以后,+号产生的临时对象被销毁,指针无效 + + // 后续使用text指针,其已不再指向合法内存空间 +} +``` +例外:在少数对性能要求非常高的代码中,为了适配已有的只接受const char*类型入参的函数,可以临时保存string::c_str()返回的指针。但是必须严格保证string对象的生命周期长于所保存指针的生命周期,并且保证在所保存指针的生命周期内,string对象不会被修改。 + + +### 建议9.5.1 使用std::string代替char* + +说明:使用string代替`char*`有很多优势,比如: +1. 不用考虑结尾的’\0’; +2. 可以直接使用+, =, ==等运算符以及其它字符串操作函数; +3. 不需要考虑内存分配操作,避免了显式的new/delete,以及由此导致的错误; + +需要注意的是某些stl实现中string是基于写时复制策略的,这会带来2个问题,一是某些版本的写时复制策略没有实现线程安全,在多线程环境下会引起程序崩溃;二是当与动态链接库相互传递基于写时复制策略的string时,由于引用计数在动态链接库被卸载时无法减少可能导致悬挂指针。因此,慎重选择一个可靠的stl实现对于保证程序稳定是很重要的。 + +例外: +当调用系统或者其它第三方库的API时,针对已经定义好的接口,只能使用`char*`。但是在调用接口之前都可以使用string,在调用接口时使用string::c_str()获得字符指针。 +当在栈上分配字符数组当作缓冲区使用时,可以直接定义字符数组,不要使用string,也没有必要使用类似`vector`等容器。 + +### 规则9.5.2 禁止使用auto_ptr +说明:在stl库中的std::auto_ptr具有一个隐式的所有权转移行为,如下代码: +```cpp +auto_ptr p1(new T); +auto_ptr p2 = p1; +``` +当执行完第2行语句后,p1已经不再指向第1行中分配的对象,而是变为NULL。正因为如此,auto_ptr不能被置于各种标准容器中。 +转移所有权的行为通常不是期望的结果。对于必须转移所有权的场景,也不应该使用隐式转移的方式。这往往需要程序员对使用auto_ptr的代码保持额外的谨慎,否则出现对空指针的访问。 +使用auto_ptr常见的有两种场景,一是作为智能指针传递到产生auto_ptr的函数外部,二是使用auto_ptr作为RAII管理类,在超出auto_ptr的生命周期时自动释放资源。 +对于第1种场景,可以使用std::shared_ptr来代替。 +对于第2种场景,可以使用C++11标准中的std::unique_ptr来代替。其中std::unique_ptr是std::auto_ptr的代替品,支持显式的所有权转移。 + +例外: +在C++11标准得到普遍使用之前,在一定需要对所有权进行转移的场景下,可以使用std::auto_ptr,但是建议对std::auto_ptr进行封装,并禁用封装类的拷贝构造函数和赋值运算符,以使该封装类无法用于标准容器。 + + +### 建议9.5.2 使用新的标准头文件 + +说明: +使用C++的标准头文件时,请使用``这样的,而不是``这种的。 + +## const的用法 +在声明的变量或参数前加上关键字 const 用于指明变量值不可被篡改 (如 `const int foo` ). 为类中的函数加上 const 限定符表明该函数不会修改类成员变量的状态 (如 `class Foo { int Bar(char c) const; };`)。 const 变量, 数据成员, 函数和参数为编译时类型检测增加了一层保障, 便于尽早发现错误。因此, 我们强烈建议在任何可能的情况下使用 const。 +有时候,使用C++11的constexpr来定义真正的常量可能更好。 + +### 规则9.6.1 对于指针和引用类型的形参,如果是不需要修改的,请使用const +不变的值更易于理解/跟踪和分析,把const作为默认选项,在编译时会对其进行检查,使代码更牢固/更安全。 +```cpp +class Foo; + +void PrintFoo(const Foo& foo); +``` + +### 规则9.6.2 对于不会修改成员变量的成员函数请使用const修饰 +尽可能将成员函数声明为 const。 访问函数应该总是 const。只要不修改数据成员的成员函数,都声明为const。 + +```cpp +class Foo { + public: + + // ... + + int PrintValue() const { // const修饰成员函数,不会修改成员变量 + std::cout << value << std::endl; + } + + int GetValue() const { // const修饰成员函数,不会修改成员变量 + return value; + } + + private: + int value; +}; +``` + +### 建议9.6.1 初始化后不会再修改的成员变量定义为const + +```cpp +class Foo { + public: + Foo(int length) : dataLength(length) {} + private: + const int dataLength; +}; +``` + +## 模板 + +模板能够实现非常灵活简洁的类型安全的接口,实现类型不同但是行为相同的代码复用。 + +模板编程的缺点: + +1. 模板编程所使用的技巧对于使用c++不是很熟练的人是比较晦涩难懂的。在复杂的地方使用模板的代码让人更不容易读懂,并且debug 和维护起来都很麻烦。 +2. 模板编程经常会导致编译出错的信息非常不友好: 在代码出错的时候, 即使这个接口非常的简单, 模板内部复杂的实现细节也会在出错信息显示. 导致这个编译出错信息看起来非常难以理解。 +3. 模板如果使用不当,会导致运行时代码过度膨胀。 +4. 模板代码难以修改和重构。模板的代码会在很多上下文里面扩展开来, 所以很难确认重构对所有的这些展开的代码有用。 + +所以, 建议__模板编程最好只用在少量的基础组件,基础数据结构上面__。并且使用模板编程的时候尽可能把__复杂度最小化__,尽量__不要让模板对外暴露__。最好只在实现里面使用模板, 然后给用户暴露的接口里面并不使用模板, 这样能提高你的接口的可读性。 并且你应该在这些使用模板的代码上写尽可能详细的注释。 + + +## 宏 +在C++语言中,我们强烈建议尽可能少使用复杂的宏 +- 对于常量定义,请按照前面章节所述,使用const或者枚举; +- 对于宏函数,尽可能简单,并且遵循下面的原则,并且优先使用内联函数,模板函数等进行替换。 + +```cpp +// 不推荐使用宏函数 +#define SQUARE(a, b) ((a) * (b)) + +// 请使用模板函数,内联函数等来替换。 +template T Square(T a, T b) { return a * b; } +``` + +如果需要使用宏,请参考C语言规范的相关章节。 +**例外**:一些通用且成熟的应用,如:对 new, delete 的封装处理,可以保留对宏的使用。 + + +## 其他 + +### 建议9.9.1 输出到文件时,尽量使用'\n'代替std::endl; +说明:std::endl会将缓冲区的内容flush到文件中,可能会影响性能。 + +# 10 现代C++特性 + +随着 ISO 在2011年发布 C++11 语言标准,以及2017年3月发布 C++17 ,现代C++(C++11/14/17等)增加了大量提高编程效率、代码质量的新语言特性和标准库。 +本章节描述了一些可以帮助团队更有效率的使用现代C++,规避语言陷阱的指导意见。 + +## 代码简洁性和安全性提升 +### 建议10.1.1 合理使用`auto` +**理由** + +* `auto`可以避免编写冗长、重复的类型名,也可以保证定义变量时初始化。 +* `auto`类型推导规则复杂,需要仔细理解。 +* 如果能够使代码更清晰,继续使用明确的类型,且只在局部变量使用`auto`。 + +**示例** + +```cpp +// 避免冗长的类型名 +std::map::iterator iter = m.find(val); +auto iter = m.find(val); + +// 避免重复类型名 +class Foo {...}; +Foo* p = new Foo; +auto p = new Foo; + +// 保证初始化 +int x; // 编译正确,没有初始化 +auto x; // 编译失败,必须初始化 +``` + +auto 的类型推导可能导致困惑: + +```cpp +auto a = 3; // int +const auto ca = a; // const int +const auto& ra = a; // const int& +auto aa = ca; // int, 忽略 const 和 reference +auto ila1 = { 10 }; // std::initializer_list +auto ila2{ 10 }; // std::initializer_list + +auto&& ura1 = x; // int& +auto&& ura2 = ca; // const int& +auto&& ura3 = 10; // int&& + +const int b[10]; +auto arr1 = b; // const int* +auto& arr2 = b; // const int(&)[10] +``` + +如果没有注意 `auto` 类型推导时忽略引用,可能引入难以发现的性能问题: + +```cpp +std::vector v; +auto s1 = v[0]; // auto 推导为 std::string,拷贝 v[0] +``` + +如果使用`auto`定义接口,如头文件中的常量,可能因为开发人员修改了值,而导致类型发生变化。 + +在循环中,考虑使用auto &和auto *去遍历复杂对象,以提升性能。 + +```cpp +for (auto &stmt : bb->GetStmtNodes()) { +... +} +``` + +### 规则10.1.1 在重写虚函数时请使用`override`关键字 +**理由** +`override`关键字保证函数是虚函数,且重写了基类的虚函数。如果子类函数与基类函数原型不一致,则产生编译告警。 + +如果修改了基类虚函数原型,但忘记修改子类重写的虚函数,在编译期就可以发现。也可以避免有多个子类时,重写函数的修改遗漏。 + +**示例** + +```cpp +class Base { + public: + virtual void Foo(); + void Bar(); +}; + +class Derived : public Base { + public: + void Foo() const override; // 编译失败: derived::Foo 和 base::Foo 原型不一致,不是重写 + void Foo() override; // 正确: derived::Foo 重写 base::Foo + void Bar() override; // 编译失败: base::Bar 不是虚函数 +}; +``` + +**总结** +1. 基类首次定义虚函数,使用`virtual`关键字 +2. 子类重写基类虚函数,使用`override`关键字 +3. 非虚函数,`virtual`和`override`都不使用 + +### 规则10.1.2 使用`delete`关键字删除函数 +**理由** +相比于将类成员函数声明为`private`但不实现,`delete`关键字更明确,且适用范围更广。 + +**示例** + +```cpp +class Foo { + private: + // 只看头文件不知道拷贝构造是否被删除 + Foo(const Foo&); +}; + +class Foo { + public: + // 明确删除拷贝赋值函数 + Foo& operator=(const Foo&) = delete; +}; +``` + +`delete`关键字还支持删除非成员函数 + +```cpp +template +void Process(T value); + +template<> +void Process(void) = delete; +``` + +### 规则10.1.3 使用`nullptr`,而不是`NULL`或`0` +**理由** +长期以来,C++没有一个代表空指针的关键字,这是一件很尴尬的事: + +```cpp +#define NULL ((void *)0) + +char* str = NULL; // 错误: void* 不能自动转换为 char* + +void(C::*pmf)() = &C::Func; +if (pmf == NULL) {} // 错误: void* 不能自动转换为指向成员函数的指针 +``` + +如果把`NULL`被定义为`0`或`0L`。可以解决上面的问题。 + +或者在需要空指针的地方直接使用`0`。但这引入另一个问题,代码不清晰,特别是使用`auto`自动推导: + +```cpp +auto result = Find(id); +if (result == 0) { // Find() 返回的是 指针 还是 整数? + // do something +} +``` + +`0`字面上是`int`类型(`0L`是`long`),所以`NULL`和`0`都不是指针类型。 +当重载指针和整数类型的函数时,传递`NULL`或`0`都调用到整数类型重载的函数: + +```cpp +void F(int); +void F(int*); + +F(0); // 调用 F(int),而非 F(int*) +F(NULL); // 调用 F(int),而非 F(int*) +``` + +另外,`sizeof(NULL) == sizeof(void*)`并不一定总是成立的,这也是一个潜在的风险。 + +总结: 直接使用`0`或`0L`,代码不清晰,且无法做到类型安全;使用`NULL`无法做到类型安全。这些都是潜在的风险。 + +`nullptr`的优势不仅仅是在字面上代表了空指针,使代码清晰,而且它不再是一个整数类型。 + +`nullptr`是`std::nullptr_t`类型,而`std::nullptr_t`可以隐式的转换为所有的原始指针类型,这使得`nullptr`可以表现成指向任意类型的空指针。 + +```cpp +void F(int); +void F(int*); +F(nullptr); // 调用 F(int*) + +auto result = Find(id); +if (result == nullptr) { // Find() 返回的是 指针 + // do something +} +``` + +### 建议10.1.2 使用`using`而非`typedef` +在`C++11`之前,可以通过`typedef`定义类型的别名。没人愿意多次重复`std::map>`这样的代码。 + +```cpp +typedef std::map> SomeType; +``` + +类型的别名实际是对类型的封装。而通过封装,可以让代码更清晰,同时在很大程度上避免类型变化带来的散弹式修改。 +在`C++11`之后,提供`using`,实现`声明别名(alias declarations)`: + +```cpp +using SomeType = std::map>; +``` + +对比两者的格式: + +```cpp +typedef Type Alias; // Type 在前,还是 Alias 在前 +using Alias = Type; // 符合'赋值'的用法,容易理解,不易出错 +``` + +如果觉得这点还不足以切换到`using`,我们接着看看`模板别名(alias template)`: + +```cpp +// 定义模板的别名,一行代码 +template +using MyAllocatorVector = std::vector>; + +MyAllocatorVector data; // 使用 using 定义的别名 + +template +class MyClass { + private: + MyAllocatorVector data_; // 模板类中使用 using 定义的别名 +}; +``` + +而`typedef`不支持带模板参数的别名,只能"曲线救国": + +```cpp +// 通过模板包装 typedef,需要实现一个模板类 +template +struct MyAllocatorVector { + typedef std::vector> type; +}; + +MyAllocatorVector::type data; // 使用 typedef 定义的别名,多写 ::type + +template +class MyClass { + private: + typename MyAllocatorVector::type data_; // 模板类中使用,除了 ::type,还需要加上 typename +}; +``` + +### 规则10.1.4 禁止使用std::move操作const对象 +从字面上看,`std::move`的意思是要移动一个对象。而const对象是不允许修改的,自然也无法移动。因此用`std::move`操作const对象会给代码阅读者带来困惑。 +在实际功能上,`std::move`会把对象转换成右值引用类型;对于const对象,会将其转换成const的右值引用。由于极少有类型会定义以const右值引用为参数的移动构造函数和移动赋值操作符,因此代码实际功能往往退化成了对象拷贝而不是对象移动,带来了性能上的损失。 + +**错误示例:** +```cpp +std::string gString; +std::vector gStringList; + +void func() { + const std::string myString = "String content"; + gString = std::move(myString); // bad:并没有移动myString,而是进行了复制 + const std::string anotherString = "Another string content"; + gStringList.push_back(std::move(anotherString)); // bad:并没有移动anotherString,而是进行了复制 +} +``` + +## 智能指针 +### 建议10.2.1 优先使用智能指针而不是原始指针管理资源 +**理由** +避免资源泄露。 + +**示例** + +```cpp +void Use(int i) { + auto p = new int {7}; // 不好: 通过 new 初始化局部指针 + auto q = std::make_unique(9); // 好: 保证释放内存 + if (i > 0) { + return; // 可能 return,导致内存泄露 + } + delete p; // 太晚了 +} +``` + +**例外** +在性能敏感、兼容性等场景可以使用原始指针。 + +### 规则10.2.1 优先使用`unique_ptr`而不是`shared_ptr` +**理由** +1. `shared_ptr`引用计数的原子操作存在可测量的开销,大量使用`shared_ptr`影响性能。 +2. 共享所有权在某些情况(如循环依赖)可能导致对象永远得不到释放。 +3. 相比于谨慎设计所有权,共享所有权是一种诱人的替代方案,但它可能使系统变得混乱。 + +### 规则10.2.2 使用`std::make_unique`而不是`new`创建`unique_ptr` +**理由** +1. `make_unique`提供了更简洁的创建方式 +2. 保证了复杂表达式的异常安全 + +**示例** + +```cpp +// 不好:两次出现 MyClass,重复导致不一致风险 +std::unique_ptr ptr(new MyClass(0, 1)); +// 好:只出现一次 MyClass,不存在不一致的可能 +auto ptr = std::make_unique(0, 1); +``` + +重复出现类型可能导致非常严重的问题,且很难发现: + +```cpp +// 编译正确,但new和delete不配套 +std::unique_ptr ptr(new uint8_t[10]); +std::unique_ptr ptr(new uint8_t); +// 非异常安全: 编译器可能按如下顺序计算参数: +// 1. 分配 Foo 的内存, +// 2. 构造 Foo, +// 3. 调用 Bar, +// 4. 构造 unique_ptr. +// 如果 Bar 抛出异常, Foo 不会被销毁,产生内存泄露。 +F(unique_ptr(new Foo()), Bar()); + +// 异常安全: 调用函数不会被打断. +F(make_unique(), Bar()); +``` + +**例外** +`std::make_unique`不支持自定义`deleter`。 +在需要自定义`deleter`的场景,建议在自己的命名空间实现定制版本的`make_unique`。 +使用`new`创建自定义`deleter`的`unique_ptr`是最后的选择。 + +### 规则10.2.3 使用`std::make_shared`而不是`new`创建`shared_ptr` +**理由** +使用`std::make_shared`除了类似`std::make_unique`一致性等原因外,还有性能的因素。 +`std::shared_ptr`管理两个实体: +* 控制块(存储引用计数,`deleter`等) +* 管理对象 + +`std::make_shared`创建`std::shared_ptr`,会一次性在堆上分配足够容纳控制块和管理对象的内存。而使用`std::shared_ptr(new MyClass)`创建`std::shared_ptr`,除了`new MyClass`会触发一次堆分配外,`std::shard_ptr`的构造函数还会触发第二次堆分配,产生额外的开销。 + +**例外** +类似`std::make_unique`,`std::make_shared`不支持定制`deleter` + +## Lambda +### 建议10.3.1 当函数不能工作时选择使用`lambda`(捕获局部变量,或编写局部函数) +**理由** +函数无法捕获局部变量或在局部范围内声明;如果需要这些东西,尽可能选择`lambda`,而不是手写的`functor`。 +另一方面,`lambda`和`functor`不会重载;如果需要重载,则使用函数。 +如果`lambda`和函数都可以的场景,则优先使用函数;尽可能使用最简单的工具。 + +**示例** + +```cpp +// 编写一个只接受 int 或 string 的函数 +// -- 重载是自然的选择 +void F(int); +void F(const string&); + +// 需要捕获局部状态,或出现在语句或表达式范围 +// -- lambda 是自然的选择 +vector v = LotsOfWork(); +for (int taskNum = 0; taskNum < max; ++taskNum) { + pool.Run([=, &v] {...}); +} +pool.Join(); +``` + +### 规则10.3.1 非局部范围使用`lambdas`,避免使用按引用捕获 +**理由** +非局部范围使用`lambdas`包括返回值,存储在堆上,或者传递给其它线程。局部的指针和引用不应该在它们的范围外存在。`lambdas`按引用捕获就是把局部对象的引用存储起来。如果这会导致超过局部变量生命周期的引用存在,则不应该按引用捕获。 + +**示例** + +```cpp +// 不好 +void Foo() { + int local = 42; + // 按引用捕获 local. + // 当函数返回后,local 不再存在, + // 因此 Process() 的行为未定义! + threadPool.QueueWork([&]{ Process(local); }); +} + +// 好 +void Foo() { + int local = 42; + // 按值捕获 local。 + // 因为拷贝,Process() 调用过程中,local 总是有效的 + threadPool.QueueWork([=]{ Process(local); }); +} +``` + +### 建议10.3.2 如果捕获`this`,则显式捕获所有变量 +**理由** +在成员函数中的`[=]`看起来是按值捕获。但因为是隐式的按值获取了`this`指针,并能够操作所有成员变量,数据成员实际是按引用捕获的,一般情况下建议避免。如果的确需要这样做,明确写出对`this`的捕获。 + +**示例** + +```cpp +class MyClass { + public: + void Foo() { + int i = 0; + + auto Lambda = [=]() { Use(i, data_); }; // 不好: 看起来像是拷贝/按值捕获,成员变量实际上是按引用捕获 + + data_ = 42; + Lambda(); // 调用 use(42); + data_ = 43; + Lambda(); // 调用 use(43); + + auto Lambda2 = [i, this]() { Use(i, data_); }; // 好,显式指定按值捕获,最明确,最少的混淆 + } + + private: + int data_ = 0; +}; +``` + +### 建议10.3.3 避免使用默认捕获模式 +**理由** +lambda表达式提供了两种默认捕获模式:按引用(&)和按值(=)。 +默认按引用捕获会隐式的捕获所有局部变量的引用,容易导致访问悬空引用。相比之下,显式的写出需要捕获的变量可以更容易的检查对象生命周期,减小犯错可能。 +默认按值捕获会隐式的捕获this指针,且难以看出lambda函数所依赖的变量是哪些。如果存在静态变量,还会让阅读者误以为lambda拷贝了一份静态变量。 +因此,通常应当明确写出lambda需要捕获的变量,而不是使用默认捕获模式。 + +**错误示例** +```cpp +auto func() { + int addend = 5; + static int baseValue = 3; + + return [=]() { // 实际上只复制了addend + ++baseValue; // 修改会影响静态变量的值 + return baseValue + addend; + }; +} +``` + +**正确示例** +```cpp +auto func() { + int addend = 5; + static int baseValue = 3; + + return [addend, baseValue = baseValue]() mutable { // 使用C++14的捕获初始化拷贝一份变量 + ++baseValue; // 修改自己的拷贝,不会影响静态变量的值 + return baseValue + addend; + }; +} +``` + +参考:《Effective Modern C++》:Item 31: Avoid default capture modes. + +## 接口 +### 建议10.4.1 不涉及所有权的场景,使用`T*`或`T&`作为参数,而不是智能指针 +**理由** +1. 只在需要明确所有权机制时,才通过智能指针转移或共享所有权. +2. 通过智能指针传递,限制了函数调用者必须使用智能指针(如调用者希望传递`this`)。 +3. 传递共享所有权的智能指针存在运行时的开销。 + +**示例** + +```cpp +// 接受任何 int* +void F(int*); + +// 只能接受希望转移所有权的 int +void G(unique_ptr); + +// 只能接受希望共享所有权的 int +void G(shared_ptr); + +// 不改变所有权,但需要特定所有权的调用者 +void H(const unique_ptr&); + +// 接受任何 int +void H(int&); + +// 不好 +void F(shared_ptr& w) { + // ... + Use(*w); // 只使用 w -- 完全不涉及生命周期管理 + // ... +}; +``` + + +# 11 安全编码规范 + +## 基本原则 + +1. 程序在处理外部数据时必须经过严格的合法性校验。编程人员在处理外部数据过程中必须时刻保持这种思维意 +识,不能做出任何外部数据符合预期的假设,外部数据必须经过严格判断后才能使用。编码人员必须在这种严 +酷的攻击环境下通过遵守这一原则保证程序的执行过程符合预期结果。 +2. 尽量减少代码的攻击面,代码的实现应该尽量简单,避免与外部环境做多余的数据交互,过多的攻击面增加了 +被攻击的概率,尽量避免将程序内部的数据处理过程暴露到外部环境。 +3. 通过防御性的编码策略来弥补潜在的编码人员的疏忽。粗心是人类的天性。由于外部环境的不确定性,以及编 +码人员的经验、习惯的差异,代码的执行过程很难达到完全符合预期设想的情况。因此在编码过程中必须采取 +防御性的策略,尽量缓解由于编码人员疏忽导致的缺陷。这些措施包括: + +- 变量声明应该赋予初值 +- 谨慎使用全局变量 +- 避免使用功能复杂、易用错的函数 +- 禁用易用错的编译器/操作系统的机制 +- 小心处理资源访问过程 +- 不要改变操作系统的运行环境(创建临时文件、修改环境变量、创建进程等) +- 严格的错误处理 +- 合理使用调试断言(ASSERT) + +## 变量 + +### 规则11.2.1 指针变量、表示资源描述符的变量、BOOL变量声明必须赋予初值 + +说明:变量声明赋予初值,可以避免由于编程人员的疏忽导致的变量未初始化引用。 + +正确示例: + +```cpp +SOCKET s = INVALID_SOCKET; +unsigned char *msg = nullptr; +int fd = -1; +``` + +错误示例:以下代码,由于变量声明未赋予初值,在最后free的时候出错。 + +```cpp +char *message; // 错误!必须声明为 char *message = nullptr; + +if (condition) { + message = (char *)malloc(len); +} + +if (message != nullptr) { + free(message); //如果condition未满足,会造成free未初始化的内存。 +} +``` + +### 规则11.2.2 指向资源句柄或描述符的变量,在资源释放后立即赋予新值 + +说明:资源释放后,对应的变量应该立即赋予新值,防止后续又被重新引用。如果释放语句刚好在变量作用域的最后一句,可以不进行赋值。 + +正确示例: + +```cpp +SOCKET s = INVALID_SOCKET; +... +closesocket(s); +s = INVALID_SOCKET; + +unsigned char *msg = nullptr; +... +free(msg); +msg = nullptr; +``` + +### 规则11.2.3 同一个函数内,局部变量所占用的空间不要过大 + +程序在运行期间,函数内的局部变量保存在栈中,栈的大小是有限的。如果申请过大的静态数组,可能导致出现运行 +出错。 建议在申请静态数组的时候,大小不超过0x1000。 + +下面的代码,buff申请过大,导致栈空间不够,程序发生stackoverflow异常。 + +```c++ +constexpr int MAX_BUF = 0x1000000; +int Foo() { + char buff[MAX_BUFF] = {0}; // Bad + ... +} +``` + +## 断言 + +### 断言使用原则 + +目前代码中有两种断言:一种是ASSERT,用于在DEBUG模式进行条件判定,条件不满足时程序直接退出;一种是CHECK_FATAL,用于检测运行时异常,条件不满足时程序直接退出。 + +适用用CHECK_FATAL的情况:主要是输入、资源申请等不受控情况。示例: + +```cpp +CHECK_FATAL(mplName.rfind(kMplSuffix) != std::string::npos, "File name %s does not contain .mpl", mplName.c_str()); // 文件名不符合要求 + +CHECK_FATAL(intrinCall->GetReturnVec().size() == 1, "INTRN_JAVA_FILL_NEW_ARRAY should have 1 return value"); // 逻辑约束不满足 + +CHECK_FATAL(func->GetParamSize() <= 0xffff, "Error:the argsize is too large"); // 输入合法性较验 + +void *MemPool::Malloc(size_t size) { + ... + CHECK_FATAL(b != nullptr, "ERROR: Malloc error"); // 内存申请失败 +} +``` + +适合用ASSERT的情况,用于bug定位,防御性编程。示例: + +```cpp +ASSERT(false, "should not be here"); + +ASSERT(false, "Unknown opcode for FoldIntConstComparison"); +``` + +### 建议11.3.1 上下文安全的指针,不用使用断言较验其是否为nullptr + +说明:编译器是离线编译工具,进程崩溃的影响相对在线服务要小很多,可以适当减少防御性编程方式。因此,并不是所有入参都需要较验空指针,而是通过上下文逻辑来判断是否需要较验空指针。逻辑上不可能为nullptr的入参,可以不用较验。确定需要较验的,参考断言使用原则。 + +### 建议11.3.2 上下文安全的数组下标,不用使用断言较验数组是否越界 + +说明:同空指针原则一样,通过逻辑来判断是否需要用断言进行数组越界较验。确定需要添加的,参考断言使用原则。 + +### 建议11.3.3 上下文安全的情况,不用使用断言较验整数溢出、截断、回绕 + +说明:加法或乘法导致的整数溢出,如果逻辑保证的,可以不用较验。整数类型转换也可能导致截断、回绕,如果是逻辑保证的,可以不用较验。确定需要较验的,参考断言使用原则。 + +如果希望容错,逻辑继续运行的,可以用条件语句进行较验。 + +### 规则11.3.1 运行时可能会导致的错误,严禁使用ASSERT断言 + +错误示例: + +```cpp +FILE *fp = fopen(path, "r"); +ASSERT(fp != nullptr, "nullptr check"); //错误用法:文件有可能打开失败 + +char *str = (char *)malloc(MAX_LINE); +ASSERT(str != nullptr, "nullptr check"); //错误用法:内存有可能分配失败 +ReadLine(fp, str); +``` + +### 规则11.3.2 严禁在断言内改变运行环境 + +说明:在程序正式发布阶段,断言不会被编译进去,为了确保调试版和正式版的功能一致性,严禁在断言中使用任何赋值、 +修改变量、资源操作、内存申请等操作。 + +例如,以下的断言方式是错误的: + +```cpp +ASSERT(i++ > 1000); // p1被修改 +ASSERT(close(fd) == 0); // fd被关闭 +``` + +## 异常机制 + +### 规则11.4.1 禁用C++异常机制 + +说明:严禁使用C++的异常机制,所有的错误都应该通过错误值在函数之间传递并做相应的判断, 而不应该通过异常机制进行错误处理。 + +编码人员必须完全掌控整个编码过程,建立攻击者思维,增强安全编码意识,主动把握有可能出错的环节。而使用C++异常机制进行错误处理,会削弱编码人员的安全意识。 + +异常机制会打乱程序的正常执行流程,使程序结构更加复杂,原先申请的资源可能会得不到有效清理。 + +异常机制导致代码的复用性降低,使用了异常机制的代码,不能直接给不使用异常机制的代码复用。 + +异常机制在实现上依赖于编译器、操作系统、处理器,使用异常机制,导致程序执行性能降低。 + +在二进制层面,程序被加载后,异常处理函数增加了程序的被攻击面,攻击者可以通过覆盖异常处理函数地址,达到 +攻击的效果。 + +## 内存 + +### 规则11.5.1 内存申请前,必须对申请内存大小进行合法性校验 + +内存申请的大小可能来自于外部数据,必须检查其合法性,防止过多地、非法地申请内存。不能申请0长度的内存。 +例如: +```cpp +int Foo(int size) { + if (size <= 0) { + //error + ... + } + ... + char *msg = (char *)malloc(size); + ... +} +``` + +### 规则11.5.2 内存分配后必须判断是否成功 + +```cpp +char *msg = (char *)malloc(size); +if (msg != nullptr) { + ... +} +``` + +## 危险函数 + +### 规则11.6.1 禁止使用内存操作类危险函数 +C标准的许多函数,没有将目标缓冲区的大小作为参数,并且未考虑到内存重叠、非法指针的情况,在使用中很容易引入缓冲区溢出等安全漏洞。 + +基于历史缓冲区溢出漏洞触发的情况统计,有很大一部分是因为调用了这些内存操作类函数但未考虑目标缓冲区大小而导致。 +以下列出了部分内存操作类危险函数: + +内存拷贝函数:memcpy(), wmemcpy(), memmove(), wmemmove() + +内存初始化函数:memset() + +字符串拷贝函数:strcpy(), wcscpy(),strncpy(), wcsncpy() + +字符串拼接函数:strcat(), wcscat(),strncat(), wcsncat() + +字符串格式化输出函数:sprintf(), swprintf(), vsprintf(), vswprintf(), snprintf(), vsnprintf() + +字符串格式化输入函数:scanf(), wscanf(), vscanf(), vwscanf(), fscanf(),fwscanf(),vfscanf(),vfwscanf(),sscanf(), swscanf(), vsscanf(), vswscanf() + +stdin流输入函数:gets() +请使用对应的安全函数(详细信息请参考huawei_secure_c)。 + +例外:在下列情况下,由于未涉及到外部数据处理,不存在被攻击的场景,内存操作完全在本函数内完成,不存在因外部控制而失败的可能性。 +如果使用安全函数反而造成代码的冗余,可以留用危险函数: + +(1)对固定长度的数组进行初始化,或对固定长度的结构体进行内存初始化: +```cpp +BYTE array[ARRAY_SIZE]; + +void Foo() { + char destBuff[BUFF_SIZE]; + ... + memset(array, c1, sizeof(array)); //对全局固定长度的数据赋值 + ... + memset(destBuff, c2, sizeof(destBuff)); //对局部固定长度的数据赋值 + ... +} + +typedef struct { + int type; + int data; +} Tag; + +Tag g_tag = {1, 2}; + +void Foo() { + Tag dest; + ... + memcpy((void *)&dest, (const void *)&g_tag, sizeof(Tag)); //对固定长度结构体赋值 + ... +} +``` + +(2)函数参数中有表示内存的参数,对该内存进行初始化: +```cpp +void Foo(BYTE *buff1, size_t len1, BYTE *buff2, size_t len2) { + ... + memset(buff1, 0, len1); //对buff1清0 + memset(buff2, 0, len2); //对buff2清0 + ... +} +``` + +(3)从堆中分配内存后,赋予初值: +```cpp +size_t len = ... +char *str = (char *)malloc(len); +if (str != nullptr) { + memset(str, 0, len); + ... +} +``` + +(4)根据源内存的大小进行同等大小的内存复制: +以下代码基于srcSize分配了一块相同大小的内存,并复制过去: +```cpp +BYTE *src = ... +size_t srcSize = ... +BYTE *destBuff = new BYTE[srcSize]; +memcpy(destBuff, src, srcSize); +``` + +以下代码根据源字符串的大小分配一块相同的内存,并复制过去: +```cpp +char *src = ... +size_t len = strlen(src); +if (len > BUFF_SIZE) { + ... +} +char *destBuff = new char[len + 1]; +strcpy(destBuff, src); +``` + +(5)源内存全部是静态字符串常量(编码时需要检查目标内存是否足够的存储空间): +以下代码直接将字符串常量“hello”复制到数组中: +```cpp +char destBuff[BUFF_SIZE]; +strcpy(destBuff, "hello"); +``` +以下代码对静态字符串常量进行拼接: +```cpp +const char *list[] = {"red","green","blue"}; +char destBuff[BUFF_SIZE]; +sprintf(destBuff, "hello %s", list[i]); +``` diff --git a/doc/cn/RcApi.md b/doc/cn/RcApi.md new file mode 100644 index 0000000000000000000000000000000000000000..7a3777a7c277ecef8771dea1b3f9048ee35ea8fa --- /dev/null +++ b/doc/cn/RcApi.md @@ -0,0 +1,524 @@ +RC API +---- + +引用计数(Reference Counting, RC)是计算机编程语言中的一种内存管理技术,是指将资源(可以是对象、内存或磁盘空间等等)的被引用次数保存起来,当被引用次数变为零时就将其释放的过程。使用引用计数技术可以实现自动资源管理的目的。同时引用计数还可以指使用引用计数技术回收未使用资源的垃圾回收算法。 + +由于需要支持RC操作,运行时为方舟编译器提供了如下API,以便其更好的生成相关代码。 + +## void MCC\_IncRef\_NaiveRCFast(address\_t obj) + +**功能说明:** + +将对象的RC值原子性的加一。 + +**入参说明:** + +obj, 堆对象指针。 + +**返回值:** + +无 + +## void MCC\_DecRef\_NaiveRCFast(address\_t obj) + +**功能说明:** + +将对象的RC值原子性的减一。 + +**入参说明:** + +obj, 堆对象指针。 + +**返回值:** + +无 + +## void MCC\_ClearLocalStackRef(address\_t \*addr) + +**功能说明:** + +用来清理线程栈上的局部引用(Local Reference)。当前编译框架下为支持异常和GC,此函数可以将对应栈地址上的局部引用清零,并将其老值的RC原子性的减一。 + +**入参说明:** + +addr, 栈上局部引用的地址。 + +**返回值:** + +无 + +## void MCC\_IncDecRef\_NaiveRCFast(address\_t incObj, address\_t decObj) + +**功能说明:** + +用来将incObj指向的对象的RC值原子性的加一和将decObj指向的对象的RC原子性的减一。 + +**入参说明:** + +incObj,需要将RC原子性加一的对象地址。 + +decObj,需要将RC原子性减一的对象地址。 + +**返回值:** + +无 + +## void MCC\_IncDecRefReset(address\_t incObj, address\_t \*decAddr) + +**功能说明:** + +将incObj指向的对象的RC值原子性的加一,并将栈地址指针decAddr存放的局部变量对象的RC原子性的减一,同时将栈地址指针decAddr指向的内存清零。 + +**入参说明:** + +incObj,需要将RC原子性加一的堆对象。 + +decAddr,栈上局部引用的地址。 + +**返回值:** + +无 + +## void MCC\_DecRefResetPair(address\_t \*decAddr0, address\_t \*decAddr1) + +**功能说明:** + +将所有参数指向的栈地址空间清零,并将对应包含的局部变量老值的RC原子性的减一。 + +**入参说明:** + +decAddr0和decAddr1,栈上局部引用的地址。 + +**返回值:** + +无 + +## void MCC\_SetObjectPermanent(address\_t obj) + +**功能说明:** + +将堆对象设置成永久存活的对象,调用之后obj对应的对象的RC会变成极大值。 + +**入参说明:** + +obj的堆对象地址。 + +**返回值:** + +无 + +## address\_t MCC\_LoadVolatileStaticField(address\_t \*fieldAddr) + +**功能说明:** + +从易失性(Volatile)静态变量中取值,并且将取到的堆对象的RC原子性的加一。 + +**入参说明:** + +fieldAddr,Volatile静态变量的地址。 + +**返回值:** + +返回Volatile静态变量的值。 + +## address\_t MCC\_LoadRefStatic(address\_t \*fieldAddr) + +**功能说明:** + +从静态变量中取值,并且将取到的堆对象的RC原子性的加一。 + +**入参说明:** + +fieldAddr,静态变量的地址。 + +**返回值:** + +返回静态变量的值。 + +## address\_t MCC\_LoadVolatileWeakField(address\_t obj, address\_t \*fieldAddr) + +**功能说明:** + +从Weak的Volatile字段中取值, 如果取到非空的堆对象,会将该对象的RC原子性的加一。 + +**入参说明:** + +obj,堆对象地址。 + +fieldAddr,标记为Weak的Volatile字段的地址 + +**返回值:** + +返回标记为Weak的Volatile字段中的值,有可能返回空对象指针。 + +## address\_t MCC\_LoadWeakField(address\_t obj, address\_t \*field\_addr) + +**功能说明:** + +从Weak的字段中取值,如果取到非空的堆对象,会将该对象的RC原子性的加一。 + +**入参说明:** + +obj,堆对象地址。 + +fieldAddr,标记为Weak的字段的地址 + +**返回值:** + +返回标记为Weak的字段中的值,有可能返回空对象指针。 + +## address\_t MCC\_LoadRefField\_NaiveRCFast(address\_t obj, address\_t \*fieldAddr) + +**功能说明:** + +从fieldAddr字段取值,并将取到的堆对象的RC原子性的加一。 + +**入参说明:** + +obj,堆对象地址。 + +fieldAddr,对象对应字段的地址 + +**返回值:** + +返回对象字段中存储的值。 + +## address\_t MCC\_LoadVolatileField(address\_t obj, address\_t \*fieldAddr) + +**功能说明:** + +从Volatile字段中取值,并将取到的堆对象的RC原子性的加一。 + +**入参说明:** + +obj,堆对象地址。 + +fieldAddr,对象对应Volatile字段的地址 + +**返回值:** + +返回对象Volatile字段中存储的值。 + +## void MCC\_WriteReferent(address\_t obj, address\_t value) + +**功能说明:** + +写入java.lang.ref.Reference的referent字段,如果取到非空的堆对象,会将该对象的RC原子性的加一。 + +**入参说明:** + +obj,java.lang.ref.Reference对象地址。 + +value,作为referent的堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteVolatileStaticFieldNoInc(address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向Volatile静态变量中写入堆对象,此调用不改变写入的堆对象的RC,但是会将静态变量存储的老值的RC原子性的减一。 + +**入参说明:** + +fieldAddr,Volatile静态变量的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteVolatileStaticFieldNoDec(address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向Volatile静态变量中写入堆对象,此调用会将新写入的堆对象value的RC原子性的加一,但是不将静态变量的老值减一。 + +**入参说明:** + +fieldAddr,Volatile静态变量的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteVolatileStaticFieldNoRC(address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向Volatile静态变量中写入堆对象,此调用不会改变写入的新值(value)的RC,也不会改变老值(fieldAddr存放的值)的RC。 + +**入参说明:** + +fieldAddr,Volatile静态变量的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteVolatileStaticField(address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向Volatile静态变量中写入堆对象,此调用会将新写入的堆对象value的RC原子性的加一,但是并将静态变量的老值的RC减一。 + +**入参说明:** + +fieldAddr,Volatile静态变量的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteRefFieldStaticNoInc(address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象普通静态变量字段中写入堆对象value,此调用不会将新写入的堆对象value的RC原子性的加一,但是会将其存储的老值的RC减一。 + +**入参说明:** + +fieldAddr,对象静态变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteRefFieldStaticNoDec(address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象普通静态变量字段中写入堆对象value,此调用会将新写入的堆对象value的RC原子性的加一,但是不会将其存储的老值的RC减一。 + +**入参说明:** + +fieldAddr,对象静态变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteRefFieldStaticNoRC(address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象普通静态变量字段中写入堆对象value,此调用不会将新写入的堆对象value的RC原子性的加一,也不会将其存储的老值的RC减一。 + +**入参说明:** + +fieldAddr,对象静态变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteRefFieldStatic(address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象普通静态变量字段中写入堆对象value,此调用会将新写入的堆对象value的RC原子性的加一,同时会将其存储的老值的RC减一。 + +**入参说明:** + +fieldAddr,对象静态变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteVolatileFieldNoInc(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象Volatile变量字段中写入堆对象value,此调用不会将新写入的堆对象value的RC原子性的加一,但是会将其存储的老值的RC减一。 + +**入参说明:** + +obj,对象的地址 + +fieldAddr,对象volatile变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteVolatileFieldNoDec(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象Volatile变量字段中写入堆对象value,此调用会将新写入的堆对象value的RC原子性的加一,但是不会并将其存储的老值的RC减一。 + +**入参说明:** + +obj,对象的地址 + +fieldAddr,对象volatile变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteVolatileFieldNoRC(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象Volatile变量字段中写入堆对象value,此调用不会将新写入的堆对象value的RC原子性的加一,也不会将其存储的老值的RC减一。 + +**入参说明:** + +obj,对象的地址 + +fieldAddr,对象volatile变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteVolatileField(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象Volatile变量字段中写入堆对象value,此调用会将新写入的堆对象value的RC原子性的加一,也会将其存储的老值的RC减一。 + +**入参说明:** + +obj,对象的地址 + +fieldAddr,对象volatile变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteRefFieldNoInc(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象普通变量字段中写入堆对象value,此调用不会将新写入的堆对象value的RC原子性的加一,但是会将其存储的老值的RC减一。 + +**入参说明:** + +obj,对象的地址 + +fieldAddr,对象普通变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteRefFieldNoDec(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象普通变量字段中写入堆对象value,此调用会将新写入的堆对象value的RC原子性的加一,但是不会将其存储的老值的RC减一。 + +**入参说明:** + +obj,对象的地址 + +fieldAddr,对象普通变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteRefFieldNoRC(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象普通变量字段中写入堆对象value,此调用不会将新写入的堆对象value的RC原子性的加一,也不会将其存储的老值的RC减一。 + +**入参说明:** + +obj,对象的地址 + +fieldAddr,对象普通变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteRefField(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象普通变量字段中写入堆对象value,此调用会将新写入的堆对象value的RC原子性的加一,也会将其存储的老值的RC减一。 + +**入参说明:** + +obj,对象的地址 + +fieldAddr,对象普通变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteVolatileWeakField(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象标记为Weak的volatile变量字段中写入堆对象value,此调用会将新写入的堆对象value的Weak RC原子性的加一,也会将其存储的老值的Weak RC减一。 + +**入参说明:** + +obj,对象的地址 + +fieldAddr,对象标记为Weak的volatile变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 + +## void MCC\_WriteWeakField(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**功能说明:** + +向对象标记为Weak的普通变量字段中写入堆对象value,此调用会将新写入的堆对象value的Weak RC原子性的加一,也会将其存储的老值的Weak RC减一。 + +**入参说明:** + +obj,对象的地址 + +fieldAddr,对象标记为Weak的普通变量字段的地址。 + +value,待写入堆对象地址 + +**返回值:** + +无。 diff --git a/doc/cn/VtableItableDescription.md b/doc/cn/VtableItableDescription.md new file mode 100644 index 0000000000000000000000000000000000000000..a7092d0f45a7042919897e16f92af2c3e576e575 --- /dev/null +++ b/doc/cn/VtableItableDescription.md @@ -0,0 +1,401 @@ +# 虚函数表和接口函数表设计介绍 + +## 虚函数表 + +方舟编译器会为每一个类生成一个虚方法表。在这个表中,会存储父类的虚方法,再加上子类的虚方法以及实现的接口类的Default方法。如果子类重载了父类的实现,那么在虚方法表中同样的位置,则会覆盖掉父类的方法。 + +下面,展示一个具体 的例子: + +```java +class A { + public int first() { + return 0; + } +} + +class B extends A { + public void foo() { + } + public int first() { + return 1; + } +} + +class C extends A { + public void bar() { + } + public int first() { + return 2; + } +} + +public class IsEmpty { + public static void main(String [] args) { + A x = new B(); + x.first(); + A y = new C() + y.first(); + } + + public void add(A x) { + x.first(); + } +} +``` + + + +方舟编译器生成的虚函数表的结构如下: + +A: +``` + _vtb_LA_3B: + .quad Ljava_2Flang_2FObject_3B_7Cclone_7C_28_29Ljava_2Flang_2FObject_3B - . + .quad Ljava_2Flang_2FObject_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z - . + .quad Ljava_2Flang_2FObject_3B_7Cfinalize_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CgetClass_7C_28_29Ljava_2Flang_2FClass_3B - . + .quad Ljava_2Flang_2FObject_3B_7ChashCode_7C_28_29I - . + .quad Ljava_2Flang_2FObject_3B_7Cnotify_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CnotifyAll_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28J_29V - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28JI_29V - . + .quad LA_3B_7Cfirst_7C_28_29I - . +``` + +B: + +``` + __vtb_LB_3B: + .quad Ljava_2Flang_2FObject_3B_7Cclone_7C_28_29Ljava_2Flang_2FObject_3B - . + .quad Ljava_2Flang_2FObject_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z - . + .quad Ljava_2Flang_2FObject_3B_7Cfinalize_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CgetClass_7C_28_29Ljava_2Flang_2FClass_3B - . + .quad Ljava_2Flang_2FObject_3B_7ChashCode_7C_28_29I - . + .quad Ljava_2Flang_2FObject_3B_7Cnotify_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CnotifyAll_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28J_29V - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28JI_29V - . + .quad LB_3B_7Cfirst_7C_28_29I - . + .quad LB_3B_7Cfoo_7C_28_29V - . +``` +C: +``` +__vtb_LC_3B: +前面11个和A和B一样 + … … + .quad LC_3B_7Cfirst_7C_28_29I - . + .quad LC_3B_7Cbar_7C_28_29V - . + +``` + +对比一下,我们可以发现: + +1. 所有的类都是继承自Object类,所以虚函数表的前面11个函数都是继承自Object,且和父类Object布局保持一致。 +2. 对于第12个函数,子类B覆盖了父类A,所以在同样的位置,父类是LA_3B_7Cfirst_7C_28_29I,子类是LB_3B_7Cfirst_7C_28_29I。类C继承至类A并重写了first函数,且实现了接口iD,所以12的位置为 LC_3B_7Cfirst_7C_28_29I,接口函数LiD_3B_7Csecond_7C_28_29I被放在了13的位置。 + +## 虚函数访问处理(编译时) + +因为要保持多态性,所以在编译时,不能确定具体调用是哪个函数。在当前例子里面体现在这个位置: + +```java +public class IsEmpty { + public static void main(String [] args) { + A x = new B(); + x.first(); + A y = new C(); + y.first(); + } + + public void add(A x) { + x.first(); + } +} +``` + + +像这种情况,我们编译时不能知道运行时调用的是B里面的first函数,还是C里面的first函数。 + +但是,因为first函数在A,B,C中布局一致,而这个例子当中first函数在vtab中的偏移是12,因此我们可以生成这样的访问代码:从相应的对象中拿到vtable指针,然后加上offset12。这样我们就能访问到正确的函数。 + +## 虚函数调用处理(运行时) + +在程序执行过程中,以(图1)为例,当遇到虚拟函数调用的时候,我们执行如下步骤: +1. 判断对象(this_指针)是哪个类的实例(在图1中,this是类C的实例); +2. 使用函数索引在对应类的虚拟函数表中查找; +3. 返回实际调用的函数指针。 + +![](media/javavmt.png) +
图1: Java虚拟函数调用的静态化
+ +## 接口函数表 + +Interface call类似于多重继承,比java单继承要复杂。多继承无法天然地确定唯一一个继承顺序。 + +在一个封闭的环境下,可以通过拓扑排序的方法确定一个顺序(iA,iB,iC)。好处是可以实现像虚函数表一样的访问方式来处理interface call。但是这样得到的一个类的itable会非常大,而且大部分是空洞。考虑到性能和代码大小,这种方法是不可用的。 + +![](media/Topology.png) + + +对于开放的环境,编译时刻是无法通过拓扑排序来确定一个顺序的,导致接口函数表里的方法顺序不固定,所以实际实现中没办法像虚函数调用一样实现一个顺序一致的方法表和访问形式为 offset 的访问机制。在编译时刻,可以确定一个类实现的所有接口,以及接口的继承关系。运行时对函数签名进行比较来确定需要调用的函数。因为字符串比较开销比较大,所以方舟编译器会在编译时刻对函数名字和签名进行哈希计算得出哈希值。运行时首先比较哈希值;如果哈希值相同,且不存在哈希冲突,则调用这个函数。如果存在哈希冲突,则进行函数名字和签名的比较,取得函数指针。 + +同时,考虑到运行时效率和 ROM 空间的原因,我们把 Itable 分成了两级表,第一级表为真正的 hash ,考虑到ROM空间的压力,我们把 hash 值设为23,然后,依据 hash 得到的值把函数地址填到这个值对应的位置上去(0~22)。如果我们一级 hash 表不发生冲突的话,那么在最后一个包含函数地址的位置之后的表项都是空白的,这样后面的表项可以被去掉而不需要占用空间。如果一级 hash 表发生冲突了,则在发生冲突的位置填0,然后在第23个 slot 的位置填上二级表的地址。 + +一级函数表结构如下: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
表序接口函数表项说明
0&Func 或 0对应Hash值的函数地址,没有则为0; 如果出现冲突,那么冲突的位置也为0,并且在第23个表项填上二级表的地址; 如果没有冲突,那么将最后一个有对应函数的hash值( n )之后的表项删除
1&Func 或 0
2&Func 或 0
...&Func 或 0
n&Func
23&itbC二级表的地址。如果一级表没有出现冲突,则该项不存在
+ + +二级函数表的结构如下: + +| 接口函数表项 | 说明 | +|----------------------|--------------------------------------------------| +| Size | 不冲突的接口函数表的大小 | +| 1 | 对齐站位作用,无实际意义 | +| Func1_sig | Func1签名的哈希值 | +| &Func1 | Fun1的地址 | +| Func2_sig | Func2签名的哈希值 | +| &Func2 | Func2的地址 | +| ...... | | +| Func3_sig和Func4_sig | Func3签名的哈希值和Func4签名的哈希值,两个值相同 | +| 1 | 表示冲突,因为func3和func4签名的哈希值一样 | +| …… | | +| Func3_sig | Func3签名 | +| &Func3 | Func3的地址 | +| Func4_sig | Func4签名 | +| &Func4 | Func4的地址 | +| …… | | + +## 接口函数调用: + +对于声明为如下形式的接口函数调用的情况,我们的调用过程如图2所示: + +```java +interface B { funcB(); } +interface C { funcC(); } +interface D { funcD(); } +Class A implements B, C, D {} +``` + +![](media/interface1.jpg) + +
图2:Java接口函数调用的静态化
+ + +如图2所示,在程序执行过程中,我们执行如下步骤: + +1. 判断对象(obj)是哪个类的实例,当前为类A的实例; +2. 根据hash值,在一级表中查找,存在则返回函数指针,如果对应位置为0,则通过二级表查找。在二级表中,使用函数签名的哈希值查找,如果找到就返回函数指针,否则用函数名查找; +3. 间接调用函数指针,并把相关的参数(args)传给间接调用。 + +下面,举一个具体的例子: + +这个IsEmpty类实现了接口A和B,每个接口中声明有两个方法。 + +```java +interface A{ + public int add(); + public int minus(); +} + +interface B{ + public int mult(); + public int div(); +} + +public class IsEmpty implements A, B { + public static void main(String[]args) { + } + + public void test(B x) { + x.mult(); + } + + public int add() { + return 6 + 3; + } + + public int minus() { + return 6 - 3; + } + + public int mult() { + return 6 * 3; + } + + public int div() { + return 6 / 3; + } +} +``` + +首先,我们来看一下 IsEmpty 的 itable 在 maple 代码里面是怎么样的: + +``` +var $__itb_LIsEmpty_3B fstatic <[24] <* void>> = [0, 0, 0, 0, 0, 0, 0, 0, addroffunc ptr &LIsEmpty_3B_7Cdiv_7C_28_29I, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, addroffunc ptr &LIsEmpty_3B_7Cadd_7C_28_29I, 0, 0, addrof ptr $__itabC_LIsEmpty_3B] + +var $__itbC_LIsEmpty_3B fstatic <[6] <* void>> = [2, 1, 0xb97, addroffunc ptr &LIsEmpty_3B_7Cmult_7C_28_29I, 0x1f7f, addroffunc ptr &LIsEmpty_3B_7Cminus_7C_28_29I] +``` + +对应的汇编结构: + +``` +__itb_LIsEmpty_3B: + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad LIsEmpty_3B_7Cdiv_7C_28_29I - . + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad LIsEmpty_3B_7Cadd_7C_28_29I - . + .quad 0 + .quad 0 + .quad __itabC_LIsEmpty_3B - . +``` +``` +__itbC_LIsEmpty_3B: + .quad 2 + .quad 1 + .quad 2967 + .quad LIsEmpty_3B_7Cmult_7C_28_29I - . + .quad 8063 + .quad LIsEmpty_3B_7Cminus_7C_28_29I - . +``` + +其中表项内容如下: + +1. 一级表中(__itb_LIsEmpty_3B),共23项,其中第9项和第20项为函数地址,第23项为二级表地址,由此可见一级表发生了冲突,从而需要二级表来确认具体的函数地址; + +2. 二级表中第一项为2,表示有2个不冲突的函数,第二项为1,起到对齐占位的作用,而后面4项分别为函数签名产生的hash值和对应的函数地址。 + +接下来我们看到这个例子里面,源码中test函数中会产生一个interface-call,对应的maple代码如下: + +``` +if (eq u1 u64 (regread u64 %4, constval u64 0)) { + callassigned &MCC_getFuncPtrFromItabSecondHash64 (regread ptr %3, constval u64 0xb97, conststr ptr "mult|()I") { regassign u64 %4} +} +icallassigned (regread u64 %4, regread ref %2) {} +``` + +可以看出调用逻辑是这样的: + +首先判断一级itable表当中hash值对应位置表项是否为空,如果不空则直接使用该地址;如果为空,则调用 getFuncPtrFromItabSecondHash64 函数。 + +getFuncPtrFromItabSecondHash64 函数有三个参数,itable 地址,函数 basename 对应的 hash 值,和函数的签名。完整的调用过程是先通过 classinfo 找到对应的 itable 地址,然后进行 hash 值的比对,如果比对成功且不冲突就能得到正确的地址;如果比对冲突,则直接使用 signature name 进行比对(字符串比对)。 + +这里所访问的 itable 和上面列出的 IsEmpty 的itable表项形式一致。 + +## Interface override + +Java 8中引入了 `Default` 函数。父类中的实现会覆盖接口中的Default函数;接口之间的Default函数需要根据接口的继承关系来确定Override。如下所示,类cA继承类cB实现了接口iD,在cB和iD中都有foo的实现。对于cA来说,foo的实现来自父类cB而不是接口iD。 + +```java + +interface iD { + public default void foo(){System.out.println("iD foo");} +} + +class cB { + public void foo(){System.out.println("cB foo");} +} + +class cA extends cB implements iD { +} + +public class IsEmpty { + public static void main(String [] args) { + iD obj = new cA(); + obj.foo(); + } +} +``` + + +如下所示,接口Parent和Son都定义了getValue。对于类Sson来说,getValue的实现来自Son,而不是来自Parent。 + +```java +interface Parent{ + default void getValue(){ + System.out.println("Parent getVatue……"); + } +} + +interface Son extends Parent{ + default void getValue(){ + System.out.println("OfInt getValue……") + } +} + +abstract class OfPrimitive implements Parent{ +} + +class SSon extends OfPrimitive implements Son{ +} + +public class Main { + static int get() { + return 1; + } + + public static void main(String[] args) { + Parent son = (Parent)new SSon(); + son.getValue(); + + SSon son2; + if(get()==1) { + son2 = new SSon(); + } + else son2 = new SSon(); + son2.getValue(); + } +} +``` diff --git a/doc/cn/media/MapleDriverStructure.png b/doc/cn/media/MapleDriverStructure.png new file mode 100644 index 0000000000000000000000000000000000000000..adc5f98ba54d4ffb4430cb3bbff37e8e45d06214 Binary files /dev/null and b/doc/cn/media/MapleDriverStructure.png differ diff --git a/doc/cn/media/Topology.png b/doc/cn/media/Topology.png new file mode 100644 index 0000000000000000000000000000000000000000..6967780a1ac7addf7811b4271bbf805c6d659027 Binary files /dev/null and b/doc/cn/media/Topology.png differ diff --git a/doc/cn/media/addphase.png b/doc/cn/media/addphase.png new file mode 100644 index 0000000000000000000000000000000000000000..ed9b5fbfccb5a30ba1ac877952c6b5372a43dafa Binary files /dev/null and b/doc/cn/media/addphase.png differ diff --git a/doc/cn/media/interface1.jpg b/doc/cn/media/interface1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7377546fc36b03b282e5d85d0a89fd274288f831 Binary files /dev/null and b/doc/cn/media/interface1.jpg differ diff --git a/doc/cn/media/javavmt.png b/doc/cn/media/javavmt.png new file mode 100644 index 0000000000000000000000000000000000000000..ef93f36923559321d0512a862ca9a1ff9d36a6b6 Binary files /dev/null and b/doc/cn/media/javavmt.png differ diff --git a/doc/en/CPPCodingTalkAboutPointer.md b/doc/en/CPPCodingTalkAboutPointer.md new file mode 100644 index 0000000000000000000000000000000000000000..2e822efaa531cc7c8b7785c8a9f07748ce6a08d5 --- /dev/null +++ b/doc/en/CPPCodingTalkAboutPointer.md @@ -0,0 +1,266 @@ +# Discussion of Pointer in C++ Programming + +# Background + +The use of pointers in `C/C++` is flexible and accompanied by many security risks, which poses higher requirements on programmers. This document will discuss how to use raw pointers in `C/C++` and ultimately establish a coding paradigm. + +# Raw Pointer vs Reference + +## Member access. + +When an object member is accessed, the raw pointer may be null (the validity of a pointer is logically ensured by a closed object or function). Therefore, invalid pointers must be checked, whereas the reference cannot be null and does not need to be checked. + +## Container members. + +In the semantics of `C++`, references express alias relationships, which do not occupy memory theoretically. (In practice, references are implemented internally as pointers in a compiler.) A reference is not an object in itself, which is different from a pointer. A pointer can be a container member, but a reference cannot. + +# Raw Pointer vs Smart Pointer + +## Destroy a heap object. + +```c++ +class Int { + ... +private: + int data; +} + +void test(int *in) { + Int* tmp = new Int(); + ... + goto LABEL; + ... + + delete tmp; +LABEL: +} +``` + +The use of resources (heap objects, stack objects, and file resources) complies with the principle that **"resources that are released in the same scope as they are acquired"** in Resource Acquisition Is Initialization (RAII), which minimizes the possibility of resource leakage. + +A segment of processing logic and sub-function calling are usually involved between `new` and `delete` of a raw pointer. The intermediate processing logic may encounter exceptions or jumps. (The current object will not go beyond authority to restrict the behavior of the intermediate processing logic, which exceeds the management scope of `new`.) The resource release is skipped due to exceptions or jumps, causing resource leakage (for example, the `tmp` object in the `test` function in the preceding example). + +The smart pointer is reconstructed to `auto tmp = std::make_unique();`. When the `tmp` object is constructed, the `delete` behavior is bound and the current scope is destroyed, preventing resource leakage. + +## Management permission vs use permission. + +```c++ +int *delete(int *in); +``` + +Management permission: Destroy and rebuild objects. + +Use permission: Access and modify objects. + +As shown in the preceding example, when a raw pointer is used to transfer parameters, the use of the **management permission** or **use permission** cannot be determined by the input parameter `in` and output parameter because the raw pointer implies an attribute of transferring the ownership (possibly or not). Additional information is required when this function is called: Will the `in` parameter be destroyed by the `delete` function? Does the return value need to be destroyed by the caller? + +```c++ +std::unique_ptr delete(std::unique_ptr &in); +``` + +A smart pointer is used to specify a role of a parameter in an interface. For example, `std::unique_ptr& in` indicates that the `delete` function has the **use permission**, and the return value indicates that the `delete` function transfers the ownership. + +# Pointer Normalization + +## A destruction method must be immediately bound to an object created by `new`. + +Bad example: + +```c++ +Object *obj = new Object(); +... +delete obj; +``` + +Good example: + +```c++ +std::unique_ptr obj = std::make_unique(); +``` + +## A release method must be immediately bound to the applied resources. + +Bad example: + +```c++ +FILE *file = open("xxx.txt"); +... +file->close(); +``` + +Good example: (This example is commonly used. The best way is to encapsulate an application class `open`.) + +```c++ +template +class ResourceGuard { + public: + ResourceGuard(T *_obj, Func _func) : obj(_obj), func(_func) {} + + ~ResourceGuard() { obj.func(); } + private: + T *obj; + Func func; +} + +FILE* file = open("xxx.txt"); +auto fileGuard = ResourceGuard>(file, FILE::close); +... +``` + +## Use a reference instead of a pointer when the value is not null. + +Bad example: + +```c++ +void func1(int *in) { + if (in == nullptr) return; + ... +} + +void func2() { + int *p = nullptr; + ... + if (p != nullptr) { + func1(p); + } +} +``` + +Good example: + +```c++ +void func1(int &in) { + ... +} + +void func2() { + int *p = nullptr; + ... + if (p != nullptr) { + func1(*p); + } +} +``` + +## As a container member (without the management permission), use an encapsulated reference container instead of a pointer when the value is not null. + +Bad example: + +```c++ +void func(std::vector &in) { + for (auto *p : in) { + if (p == nullptr) { + continue; + } + ... + } +} +``` + +Good example: + +```c++ +template +class Ref { + public: + Ref() = delete; + Ref(T &ref) : data(&ref) {} + + ... + + operator T() const noexcept { + return *data; + } + + private: + T *data; +} + +template +using ref_vector = std::vector>; +void func(ref_vector &in) { + for (auto p : in) { + int &data = p; + ... + } +} +``` + +## As a container member (with the management permission), use a container that has the lifecycle management permission instead of a pointer container. + +Bad example: + +```c++ +std::vector data; +... +for (auto *p : data) { + delete p; +} +``` + +Good example: + +```c++ +template +class ptr_vector { +public: + ~ptr_vector() { + for (auto *p : data) { + delete p; + } + } + +private: + std::vector data; +} + +ptr_vector data; +... +``` + +## Explicitly transfer an object management permission and specify an object use permission. + +`move` semantics is added to `C++11`, and `auto_ptr` is discarded. `unique_ptr` is used to explicitly transfer the ownership so that the lifecycle management methods of stack objects and heap objects can be unified. + +Example of stack object transfer: + +```c++ +std::vector func() { + std::vector data; + data.push_back(0); + return std::move(data); +} +``` + +Example of fuzzy heap object transfer: + +```c++ +Object *func() { + std::unique_ptr data = std::make_unique(); + Object &rData = ToRef(data); + rData.push_back(0); + return data.release(); +} +``` + +Example of clear heap object transfer: + +```c++ +std::unique_ptr func() { + std::unique_ptr data = std::make_unique(); + Object &rData = ToRef(data); + rData.push_back(0); + return std::move(data); +} +``` + +## Scenarios where pointers should be used. + +1. When a third-party library function transfers in or out a pointer, `unique_ptr.get()` or `unique_ptr.release()` must be used to construct input parameters before the function is called. After output parameters are obtained, `unique_ptr` must be used to catch or check whether the output parameters are null and convert the output parameters to references. +2. As a container member (without the management permission), a null pointer is designed in the application scenario. However, the null pointer must be checked and converted to a reference immediately before use. Pointer diffusion is not supported. + +# Remarks + +`Ref` and `ref_vector` have been developed. `Ref` is defined as `SafePtr` because `operator.` cannot be reloaded. + +The `ResourceGuard` and `ptr_vector` are being developed and are mainly used as examples in this document. diff --git a/doc/en/CompilerPhaseDescription.md b/doc/en/CompilerPhaseDescription.md new file mode 100644 index 0000000000000000000000000000000000000000..1d9870b21b9e96684973c1d9707298760fe9aae3 --- /dev/null +++ b/doc/en/CompilerPhaseDescription.md @@ -0,0 +1,152 @@ +### Hierarchy in maple phase +At the Current stage, there are two phases classes provided for inheritance. +Other level IR phase can be designed as a subclass of the MaplePhase class. +They perform the optimizations or generate analysis results on the specific IR level. + +##### The MapleModulePhase class + +If a phases is derived from MapleModulePhase, it indicates that this phase does transformation on the module. +It can run lower level IR phases manager as well. + +##### The MapleFunctionPhase class + +```c++ + template + class MapleFunctionPhase : public MaplePhase +``` + +In constrast to MapleFunctionPhase, it is template class due to different Function level IRs in Maple. +Both CodeGen Function level IR and MidEnd Function level IR derives this class. + +### Memory management for maple phase + +Maple phase management is able to manage memory so that each phase can keep the information required by other phases and discard useless information. Each phase manager provides an analysisDataManager (In multithreading, each threads provides an analysisDataMangers) which takes responsibility for storing analysis data. +To implement this functionality, the GetAnalysisDependence Method is required to be implemented be each phases. +```c++ + void ::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); // If a previous phase is requried to be executed + aDep.AddPreserved(); // preserve specific previous phase information + aDep.SetPreservedAll(); // preserve all previous phase information in analysisDataManager +} +``` +#### Analysis phase + +GetPhasesMempool() in Analysis phase provides mempool from analysisDataManager. +Any data that is put in analysis phase mempool is not deleted until it is declared to be discarded by other phases or the end of phase manager. +If information generated during analysis phase need to be deleted after this analysis phase, it can be put in Temp mempool in MaplePhase. + +#### Transfrom phase + +GetPhasesMempool() in Transform phase provides mempool which lives until this transformation finish. +**If a transform phase does not implement the GetAnalysisDependence method, it defaults to not having any prerequisite phases, and invalidating all phases information in analysisDataManager.** Transfrom phase default to be assumed to invalid all ananlysis results. + + +### Quick Start -- basic code + +An example for function level `transform` phase called 'MEHello' in me. +- 1 add two files: me_hello_opt.h and me_hello_opt.cpp + +content in me_hello_opt.h +```c++ +#ifndef MAPLE_ME_INCLUDE_ME_HELLO_OPT_H +#define MAPLE_ME_INCLUDE_ME_HELLO_OPT_H +#include "me_function.h" // in order to use MeFunction +#include "maple_phase.h" // in order to use the macro +namespace maple { + +// always use this macro when you work with a transform phase. +MAPLE_FUNC_PHASE_DECLARE(MEHello, MeFunction) +} // namespace maple +#endif // MAPLE_ME_INCLUDE_ME_HELLO_OPT_H + +``` +content in me_hello_opt.cpp +```c++ +#include "me_hello_opt.h" +// include the corresponding phasemanager in order to use information from other phase. +#include "me_phase_manager.h" + +namespace maple { + +// you need always keep in mind that which analysis results are needed and which results will be destroyed. +void MEHello::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); // it is guaranteed that MEDominance's result is available in this phase. + aDep.SetPreservedAll(); // it means that this phase will not destroy any analysis results. +} + +// the return value of this function indicates that whether this phase +// has modified the IR of this function, for now we do not use this value. +bool MEHello::PhaseRun(maple::MeFunction &f) { + // you can use this macro to get the result which is configured as Required + // in GetAnalysisDependence; or an error will be reported. + auto *dom = GET_ANALYSIS(MEDominance); + // do something using dom info. + LogInfo::MapleLogger() << "hello opt on function: " << f.GetName() << '\n'; + return false; +} + +} // namespace maple + +``` +- 2 tell the phase manager to welcome a new phase. + - 2.1 add the header file(me_hello_opt.h) to me_phase_manager.h + - 2.2 register the phase in me_phase_manager.cpp, like this: +```c++ +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MEHdse, hdse) +// the macro suffix CANSKIP indicates that this phase can be skipped. +// first parameter tells the implement class and the second parameter is +// the phase name, which could be used in option like: dump-phase(s), +// skip-phases, skip-after; and it is used to configure the phase list that will run in phasemanager. ++MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MEHello, hello) +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(MELfoIVCanon, ivcanon) +``` + +- 3 add the new phase into run list. +modify the phase.def file, like this: +```c++ +... +ADDMAPLEMEPHASE("dse", MeOption::optLevel >= 2) +ADDMAPLEMEPHASE("hello", MeOption::optLevel >= 2) +ADDMAPLEMEPHASE("analyzector", JAVALANG) +... +``` +- 4 add the cpp file into corresponding build.gn file. +- 5 compile and test new phase. +maple --run=me:mpl2mpl:mplcg --option="--O2 :--O2 --quiet:--O2 --quiet" test.mpl +we can see that our new hello phase is performed after dse successfully. +``` +>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Optimizing Function < VEC_invariant_p_base_space id=5471 >--- +---Preparing Function < VEC_invariant_p_base_space > [1] --- +---Run Phase [ mecfgbuild ]--- +---Run Phase [ cfgOpt ]--- +---Run Phase [ loopcanon ]--- + ++ trigger phase [ dominance ] + ++ trigger phase [ identloops ] +---Run Phase [ splitcriticaledge ]--- +---Run Phase [ ssatab ]--- +---Run Phase [ aliasclass ]--- +---Run Phase [ ssa ]--- + ++ trigger phase [ dominance ] +---Run Phase [ dse ]--- + ++ trigger phase [ fsaa ] +---Run Phase [ hello ]--- +hello opt on function: VEC_invariant_p_base_space +---Run Phase [ hprop ]--- + ++ trigger phase [ irmapbuild ] +---Run Phase [ valueRangePropagation ]--- + ++ trigger phase [ identloops ] +---Run Phase [ hdse ]--- +---Run Phase [ epre ]--- + ++ trigger phase [ dominance ] + ++ trigger phase [ identloops ] + == epre invokes [ hdse ] == +---Run Phase [ rename2preg ]--- +---Run Phase [ lpre ]--- +---Run Phase [ storepre ]--- +---Run Phase [ copyprop ]--- +---Run Phase [ hdse ]--- +---Run Phase [ pregrename ]--- +---Run Phase [ bblayout ]--- +---Run Phase [ meemit ]--- + +``` \ No newline at end of file diff --git a/doc/en/DeveloperGuide.md b/doc/en/DeveloperGuide.md new file mode 100644 index 0000000000000000000000000000000000000000..7b604e2540d76f5c04f236e94ee23ebed3a2717e --- /dev/null +++ b/doc/en/DeveloperGuide.md @@ -0,0 +1,102 @@ +# Developer Guide + +By referring to this document, you can download the OpenArkCompiler source code to compile it. At the same time, this document also provide the guide of static code analysis to developers. + +## Prerequisites + +Prepare development environment by referring to Environment Configuration. + + +## Downloading Source Code + +Download address: . +You can download the OpenArkCompiler source code in `Clone` or `Download` mode. +> Note: The download directory of the source code is openarkcompiler by default. + + +## Compiling Source Code + + +Run the following command in the openarkcompiler directory to compile OpenArkCompiler. The output path is openarkcompiler/output/TYPE/bin, TYPE: aarch64-clang-release by default. + +``` +source build/envsetup.sh +make setup +make +``` +Command description: + +- `source build/envsetup.sh`: Initialize the environment and add the toolchain path openarkcompiler/output/bin of OpenArkCompiler to environment variables. +- `make`: Compile the release version of OpenArkCompiler. +- `make BUILD_TYPE=DEBUG`: Compile the debug version of OpenArkCompiler. + +Run the following command in the openarkcompiler directory to compile OpenArkCompiler and maple runtime. The output path is openarkcompiler/output/TYPE, TYPE: aarch64-clang-release by default. + +``` +source build/envsetup.sh arm release +make setup +make libcore +``` + +命令说明: + +- `make libcore` Compile the release version of OpenArkCompiler; +- `make libcore BUILD_TYPE=DEBUG` Compile the debug version of OpenArkCompiler; + +In addition, the OpenArkCompiler also provides a shell script which contains the command to compile OpenArkCompiler. The developer can run the script in the openarkcompiler directory to compile OpenArkCompiler. The command to run the script: + +``` +source build/build.sh +``` + +## Compiling Sample + +The Java basic library is required for OpenArkCompiler to compile a sample. The following uses the Java basic library provided by the Android OS to describe the sample compilation process. + +**Preparing basic library** + +- Download the Android code and compile it locally to obtain the libcore JAR package. The Android 9.0.0_r45 version is recommended. + +- Gitee also provides the compiled libcore JAR file. Download address: https://gitee.com/mirrors/java-core/ . + +**Generating the libjava-core.mplt file** + +Before compilation, create the libjava-core directory in the openarkcompiler directory, copy the java-core.jar file to the libjava-core directory, and run the following commands in the openarkcompiler directory: + +``` +source build/envsetup.sh +make +cd libjava-core +jbc2mpl -injar java-core.jar -out libjava-core +``` + +After the execution is complete, the libjava-core.mplt file is generated in the directory. + +**Compiling sample code** + +The sample code is in the openarkcompiler/samples directory. + +Take the samples/helloworld/ code as an example. Run the following command in the openarkcompiler/ directory: + +``` +source build/envsetup.sh +make +cd samples/helloworld/ +make +``` + +## Static code analysis + +This part will guide you to do the static code analysis by using the clang-tidy. After the code is changed, the static code analysis will check the coding specifications to improve the code quality. + +Before the static code analysis, we need compiled the OpenArkCompiler. After that, using the code of src/maple_driver as the tested directory, run the following commands in the openarkcompiler directory: + +``` +cp output/compile_commands.json ./ +./tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04/share/clang/run-clang-tidy.py -clang-tidy-binary='./tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04/bin/clang-tidy' -clang-apply-replacements-binary='./tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04/bin/clang-apply-replacements' src/maple_driver/ +``` +Command description: + +- `cp output/compile_commands.json ./`: Copy the compile_commands.json in the output directory to the openarkcompiler directory, it is required by the clang-tidy, it contains the compile commands of OpenArkCompiler. + +- `./tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04/share/clang/run-clang-tidy.py`: Call the run-clang-tidy.py which is the parallel clang-tidy runner. The `./tools/clang+llvm-8.0.0-x86_64-linux-gnu-ubuntu-16.04/` directory is the directory of the release package of clang compiler. The `-clang-tidy-binary` set the path of clang-tidy binary. The `-clang-apply-replacements-binary` set the path of the clang-apply-replacements binary which is requried by the run-clang-tidy.py. The `src/maple_driver/` is the tested code directory. diff --git a/doc/en/DeveloperGuide4Utility.md b/doc/en/DeveloperGuide4Utility.md new file mode 100644 index 0000000000000000000000000000000000000000..d1262234c1b994a87719154a4f44881422282683 --- /dev/null +++ b/doc/en/DeveloperGuide4Utility.md @@ -0,0 +1,431 @@ +# Application Manual of Maple General Modules + +# Cast + +## `instance_of` and `safe_cast`. + +In principle, `RTTI`, that is, `dynamic_cast`, must be disabled for the use of `C++` in `maple`. The compiler system is complex. It will make the object relationship more complex to cast from a class object to a subclass object by class design. Therefore, the `maple` code implementation introduces the following designs: + +```c++ +SubClass *sub = nullptr; +if (base.Type() == SubType) { + sub = static_cast(base); +} +``` + +A property field is designed to implement the binding between a class and its subclass, achieving the same effect as `dynamic_cast`. + +However, this method has some disadvantages. First, whereas the binding relationship between `SubType` and `SubClass` is static and determined by the designer, the caller needs to make the relationship explicit, resulting in strong dependency. Second, not all scenarios are as intuitive as type comparison. Callers are prone to make mistakes in complex scenarios, causing shotgun surgery in later code rectification. Therefore, the `safe_cast` is designed. The designer registers the casting relationship. A caller only needs to call the `dynamic_cast` method. + +### Registration method. + +Use the `REGISTER_SAFE_CAST` macro to complete the registration. The declaration is as follows: + +```c++ +#define REGISTER_SAFE_CAST(type, condition) +``` + +`type` is a subclass type (assumed as `B`), and `condition` is a Boolean expression that matches `B` and all its subclasses. Example: + +```c++ +class A; +class B : A; +class C : B; +REGISTER_SAFE_CAST(B, from.Kind() == B || from.Kind() == C); +REGISTER_SAFE_CAST(C, from.Kind() == C); +``` + +`from` is a formal parameter name of a type transferred by an expression. + +*Note:* + +*- The registration supports the casting from a subclass to a class and from a class to a subclass.* + +*- `condition` can be any Boolean expression. However, the designer must ensure that it complies with the inheritance relationship to avoid casting of non-inheritance relationships.* + +*- For a complex `condition`, using `kind ==` to indicate an inheritance relationship tree is not recommended. Optimization needs to be considered. For example, the range of `kind` needs to be organized, and a specific bit flag needs to be used to achieve quick matching.* + +#### Application scenarios. + +1. For scenarios where only one type is matched: + +```c++ +SubClass *sub = safe_cast(base); +if (sub == nullptr) { + // TODO +} +``` + +2. For scenarios where multiple types are matched: + +```c++ +if (instanceof(base)) { + auto *sub = static_cast(base); + // TODO +} else if (instanceof(base)) { + auto *sub = static_cast(base); + // TODO +} +... +``` + +*Note:* + +*- In scenarios where types have been correctly identified, such as `switch case`, use `static_cast`.* + +*- The return value of `safe_cast` is always a pointer, indicating whether the casting is successful.* + +*- If the input is a pointer, both `instance_of` and `safe_cast` check whether the input is null. Therefore, if `base` is not null, the reference is preferentially transferred.* + +# Container + +## `Ptr` + +`Ptr` simulates the behavior of a native pointer, but removes support for array operations. + +In this method, data is validated in the construction and assignment operations, so that a feature of a pointer object needs to be validated only during construction, and does not need to be validated again during transfer and use, thereby reducing overheads of repeated check. + +```c++ +template +using PtrCheckerType = void (*)(const T*); + +template +constexpr void CheckNothing(const T*) {} + +template Check = CheckNothing> +class Ptr; +``` + +As shown in the preceding example, the default validation behavior of `Ptr` is not checked. + +```c++ +template < typename T> +using XXXPtr = utils::Ptr>; +``` + +In the preceding example, the validated pointer objects in different scenarios can be defined, which are called security pointers. + +*Note: The preceding behavior is extended based on the requirements of `safe_ptr`. However, `safe_ptr` is more complex than the common `Ptr` scenario because it is specific to the validity of a pointer rather than features of a pointer object.* + +## `safe_ptr` + +For details, see the definition of `Ref` in the CPPCodingTalkAboutPointer. The `operator.` in `c++` cannot be overloaded. As a result, the `Ref` object cannot be built. However, the `safe_ptr` object, which is a non-null pointer object equivalent to the `Ref` object, can be defined. + +### Scenario: Container members such as arrays and dictionaries. + +For details, see the CPPCodingTalkAboutPointer. + +To solve the problem that `operator.` cannot be overloaded, the `ToRef` tool capability in `Utility` will be extended, ensuring that the pointer does not have overhead and is securely casted into a reference. It may have the following features: + +```c++ +template +constexpr T &ToRef(safe_ptr ptr) { + return *ptr; +} +``` + +*Note: For containers that are frequently used, containers of the `ref_xxx` series will be encapsulated to replace the `safe_ptr` application scenarios.* + +### Scenario: Object members. + +This is an unexpected scenario discovered after `safe_ptr` is developed. + +The compiler cannot generate default copies and transfers for classes of reference data members. However, the copy and transfer capabilities are required in many scenarios. In this case, the common method is to store the classes as pointer members. However, pointer members bring the following problems: + +1. When a pointer member is accessed, the validity of the pointer needs to be identified. +2. The behavior can be controllable by defining member roles during refactoring and evolution. However, the role that uses the member may not be notified, which may further cause hidden bugs. + +If `safe_ptr` is used to replace a raw pointer and `ToRef` is used to convert the reference at the `safe_ptr` dereference position (`ToRef` of a raw pointer has extra overheads), the preceding problems can be avoided. + +*Note: `safe_ptr` is designed as a container or object member. `&` is used for transferring function parameters.* + +## `mpl_iterator` + +The original name of `mpl_iterator` is `iterator`, but the name is the same as that of `using iterator` in the container. Therefore, the `mpl` prefix is added. + +Taking the design of the `ref_xxx` container and possible expansion of the small talk series in the future into consideration, repeated iterator design is troublesome. Therefore, a unified iterator container that continuously evolves is abstracted, which uses `mpl_iterator_traits` to quickly implement the iterator of the new container with a few configurations. + +Based on the iterator requirements of `ref_vector`, members of `mpl_iterator_traits` are designed as follows: + +```c++ +template +struct mpl_iterator_traits { + using iterator_category = typename std::iterator_traits::iterator_category; + using value_type = typename std::iterator_traits::value_type; + using difference_type = typename std::iterator_traits::difference_type; + using pointer = typename std::iterator_traits::pointer; + using reference = typename std::iterator_traits::reference; + + static reference operator_dereference(Iterator iter) { + return *iter; + }; + + static Iterator operator_arrow(Iterator iter) { + return iter; + } + + static reference operator_bracket(Iterator iter, difference_type n) { + return iter[n]; + } +}; + +``` + +## `ref_vector` + +For details, see the definition of `ref_vector` in the CPPCodingTalkAboutPointer. + +Use the pointer array: `std::vector`. + +```c++ + int a = 0, b = 1; + + std::vector data; + data.push_back(&a); + data.push_back(&b); + ASSERT_EQ(*data[0], 0); + ASSERT_EQ(*data[1], 1); +``` + +Refactored as a reference array: `utils::ref_vector`. +```c++ + int a = 0, b = 1; + + ref_vector data; + data.push_back(a); + data.push_back(b); + ASSERT_EQ(data[0], 0); + ASSERT_EQ(data[1], 1); +``` + +## `Index` + +`Index` is designed to meet the static security requirements of basic types. + +For example, `GStrIdx`, `UStrIdx`, and `U16StrIdx` are involved in calculation in service code, and their interfaces are similar. Their bottom layers are `uint32`. If all formal parameters are defined as `uint32`, it will be a disaster. The caller must be careful, but it is difficult to avoid transferring incorrect data. So static type conflict is used and the compiler checks the correctness of the use, which will greatly reduce the errors. + +To define a static type, you only need to define different tags, as shown in the following example: + +```c++ +class GStrTag; +using GStrIdx = utils::Index; + +class UStrTag; +using UStrIdx = utils::Index; + +class U16StrTag; +using U16StrIdx = utils::Index; +``` + +# Generalize Pattern + +## `ObjectFactory` + +The `ObjectFactory` is an encapsulation for an abstract factory and is used to solve the following problems: + +1. Eliminate large functions with high cyclomatic complexity in code, such as `switch... case` and `if... else if...`. These functions are difficult to maintain and extend and have low flexibility.* (Note: Analyze semantics and design well instead of rigidly solving problems, for example, use template mode to implement design isolation instead of code isolation.)* + +2. The use of a factory will invert the dependency, making it easier to isolate the caller from the designer and the designer from each other. + +Example: + +```c++ +// Defines a key to mark the product type to be created by the factory. +enum class ObjectType { + kPlus, + kMinus +}; + +//Defines interfaces and interface protocols that all products must implement. +class Base { + public: + virtual ~Base() = default; + virtual int32_t DoIt(int32_t lhs, int32_t rhs) const = 0; +}; + +// Defines a factory by using ObjectType as the search keyword. Base indicates the interface protocol, and int32_t indicates the parameter type of all product constructor functions. +using TestObjectFactory = ObjectFactory; + +// Key, interface, and factory must be visible to both the registrant and caller. Therefore, the key may be in the .h file or summarized in the .cpp file. +// Products can be registered in different .cpp files as long as they can be registered in the factory. +// Defines a product. +class ObjectPlus : public Base { + public: + // Constructor function agreed upon during factory definition. + explicit ObjectPlus(int32_t base) : base(base) {} + virtual ~ObjectPlus() = default; + + // Conventions for defining interfaces. + virtual int32_t DoIt(int32_t lhs, int32_t rhs) const override { + return base + lhs + rhs; + } + + private: + int32_t base; +}; + +// Defines the product, which may be in another .cpp file. +class ObjectMinus : public Base { + public: + explicit ObjectMinus(int32_t base) : base(base) {} + virtual ~ObjectMinus() = default; + + virtual int32_t DoIt(int32_t lhs, int32_t rhs) const override { + return base + lhs - rhs; + } + + private: + int32_t base; +}; + +// Registers a product. The product registration mode varies depending on the distribution and loading of keys, interfaces, factories, and products. Ensure that the registration is successful. +// Initializes the static variable to ensure registration. +bool RegisterObject() { + RegisterFactoryObject(); + RegisterFactoryObject(); +} +static auto testObjectFactory = RegisterObject(); + +TEST(TestFactory, ObjectFactory) { + // Obtains a product object. Check whether the value is null. The example is omitted. + auto obj = CreateProductObject(ObjectType::kPlus, 10); + ASSERT_EQ(obj->DoIt(1, 2), 13); + obj = CreateProductObject(ObjectType::kMinus, 10); + ASSERT_EQ(obj->DoIt(1, 2), 9); +} +``` + +## `FunctionFactory` + +Similar to `ObjectFactory`, `FunctionFactory` is used to simplify the complexity of abstract factories. In most scenarios, `FunctionFactory` is easier to compile and use. + +Example: + +```c++ +// Defines a key to mark the product type to be created by the factory. +enum class FunctionType { + kPlus, + kMinus +}; + +// Defines the interface and factory by using the FunctionType as the search keyword and the int32_t(int32_t, int32_t) as the function protocol. +using TestFunctionFactory = FunctionFactory; + +// Defines a product. +int32_t Plus(int32_t lhs, int32_t rhs) { + return lhs + rhs; +} + +// Defines the product, which may be in another .cpp file. +int32_t Minus(int32_t lhs, int32_t rhs) { + return lhs - rhs; +} + +// Registers a product. The product registration mode varies depending on the distribution and loading of keys, interfaces, factories, and products. Ensure that the registration is successful. +// Uses the singleton pattern to ensure registration. +bool RegisterFunction() { + RegisterFactoryFunction(FunctionType::kPlus, Plus); + RegisterFactoryFunction(FunctionType::kMinus, Minus); +} +void AutoFunctionLoader() { + static auto testObjectFactor = RegisterFunction(); +} + +TEST(TestFactory, TestAll) { + // Loads the product. + AutoFunctionLoader(); + + // Obtains a product object. Check whether the value is null. The example is omitted. + auto func = CreateProductFunction(FunctionType::kPlus); + ASSERT_EQ(func(1, 2), 3); + func = CreateProductFunction(FunctionType::kMinus); + ASSERT_EQ(func(1, 2), -1); +} +``` + + +# Utility + +## `ToRef` + +In the scenario where pointers transfer parameters in the new code and refactored code of Ark Compiler, it is expected that all pointers are validated and then transferred to the called function in reference mode. In most cases, the called function should not bear the risk of null pointers in function parameters and the overhead of judgment. + +The common format is as follows (In the example, `DoIt` and `Run` can be used as third-party interfaces and cannot be changed): + +```c++ +A *DoIt(B &b); +void Run(B *b) { + CHECK_NULL_FATAL(b); + // ... + A *a = DoIt(*b); + CHECK_NULL_FATAL(a); + a->Do; +} +``` + +If most pointers are transferred into a reference type once obtained: + +```c++ +A *DoIt(B &b); +void Run(B *b) { + B &bRef = utils::ToRef(b); + // ... + A &a = utils::ToRef(DoIt(bRef)); + a.Do; +} +``` + +If `b` is used only once: + +```c++ +A *DoIt(B &b); +void Run(B *b) { + // ... + A &a = utils::ToRef(DoIt(utils::ToRef(b))); + a.Do; +} +``` + +## `bit_field_v`&`lbit_field_v` + +Using a bit to mark a state switch combination is a design method that saves memory and can be efficiently encoded. Generally, the enumeration definition or constant definition is written as follows: + +```c++ +enum BBAttr : uint32 { + kBBAttrIsEntry = 0x02, + kBBAttrIsExit = 0x04, + kBBAttrWontExit = 0x08, + kBBAttrIsTry = 0x10, + kBBAttrIsTryEnd = 0x20, + kBBAttrIsJSCatch = 0x40, + kBBAttrIsJSFinally = 0x80, + kBBAttrIsCatch = 0x0100, + kBBAttrIsJavaFinally = 0x0200, + kBBAttrArtificial = 0x0400, + kBBAttrIsInLoop = 0x0800, + kBBAttrIsInLoopForEA = 0x1000 +}; +``` + +This design is obviously intended to use bits to record some attribute information, but the bits are obscure and difficult to maintain and read. + +Therefore, a clearer design is required. + +```c++ +enum BBAttr : uint32 { + kBBAttrIsEntry = utils::bit_field_v<1>, + kBBAttrIsExit = utils::bit_field_v<2>, + kBBAttrWontExit = utils::bit_field_v<3>, + kBBAttrIsTry = utils::bit_field_v<4>, + kBBAttrIsTryEnd = utils::bit_field_v<5>, + kBBAttrIsJSCatch = utils::bit_field_v<6>, + kBBAttrIsJSFinally = utils::bit_field_v<7>, + kBBAttrIsCatch = utils::bit_field_v<8>, + kBBAttrIsJavaFinally = utils::bit_field_v<9>, + kBBAttrArtificial = utils::bit_field_v<10>, + kBBAttrIsInLoop = utils::bit_field_v<11>, + kBBAttrIsInLoopForEA = utils::bit_field_v<12> +}; +``` + +`bit_field_v`:`uint32` and `lbit_field_v`:`uint64` are supported. In the future, `sbit_field_v`:`uint16` and `bbit_field_v`:`uint8` will be added as required. diff --git a/doc/en/DevelopmentPreparation.md b/doc/en/DevelopmentPreparation.md new file mode 100644 index 0000000000000000000000000000000000000000..912e9653876f608fe98634ac787ffcdd36dd1aef --- /dev/null +++ b/doc/en/DevelopmentPreparation.md @@ -0,0 +1,72 @@ +## Environment Configuration + +## Recommended Hardware: + +- 2 GHz dual-core processor or higher + +- 2 GB system memory or higher + +- 200 GB available disk space + +## Recommended Development Environment + +Install a 64-bit Ubuntu (Ubuntu 16.04, 18.04 or 20.04 is required). + +``` +sudo apt-get -y install clang llvm lld libelf-dev libssl-dev python qemu openjdk-8-jre-headless openjdk-8-jdk-headless cmake +sudo apt-get -y install git build-essential zlib1g-dev libc6-dev-i386 g++-multilib gcc-multilib linux-libc-dev:i386 + +Ubuntu 16.04: +sudo apt-get -y install gcc-5-aarch64-linux-gnu g++-5-aarch64-linux-gnu + +Ubuntu 18.04: +sudo apt-get -y install gcc-7-aarch64-linux-gnu g++-7-aarch64-linux-gnu + +Ubuntu 20.04: +sudo apt-get -y install gcc-9-aarch64-linux-gnu g++-9-aarch64-linux-gnu libncurses5 +``` + +## Auto Installation of Tools + +``` +source build/envsetup.sh arm release +make setup + +Note: the following steps are for reference only. All required tools are installed during above "make setup" +``` + +## Installing and Configuring Clang (for Compiling the OpenArkCompiler Code) + +Download **clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04** +LLVM download address: http://releases.llvm.org/download.html#10.0.0 + +Place the downloaded files in the `openarkcompiler/tools` directory, open the `openarkcompiler/build/config.gni` file, and set the three variables `GN_C_COMPILER`, `GN_CXX_COMPILER`, and `GN_AR_COMPILER` to the path where Clang is located. For example: + +``` +GN_C_COMPILER = "${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang" +GN_CXX_COMPILER = "${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang++" +GN_AR_COMPILER = "${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/llvm-ar" +``` + +${MAPLE_ROOT} is the root directory of the OpenArkCompiler source code. + +## Installing and configuring Ninja and GN + +Download **Ninja(v1.10.0)** and **GN(Linux Version)** +Ninja download address: https://github.com/ninja-build/ninja/releases +GN download address: https://gitee.com/xlnb/gn_binary + +Place the executable programs of GN and Ninja in the openarkcompiler/tools directory, modify these two files to be executable. + +``` +cd openarkcompiler/tools +chmod 775 gn +chmod 775 ninja +``` + +Open the openarkcompiler/Makefile file, and set the two variables GN and NINJA to the path where the executable programs of GN and Ninja are located. For example, + +``` +GN := ${MAPLE_ROOT}/tools/gn/gn +NINJA := ${MAPLE_ROOT}/tools/ninja/ninja +``` diff --git a/doc/en/MapleDriverOverview.md b/doc/en/MapleDriverOverview.md new file mode 100644 index 0000000000000000000000000000000000000000..6d29efbd0fdbcded50fc5ba54aec3b378f5bf417 --- /dev/null +++ b/doc/en/MapleDriverOverview.md @@ -0,0 +1,48 @@ +# Maple Driver Overview +## Introduction +This document describes current state of Maple Driver. This includes design, usage, goals and internal implementation. + +## Goals +The Maple driver is intended to meet requirements of a good compiler driver such as Clang. In other words it should be: +- Flexible to support new features +- Efficient with low overhead +- Easy to use + +The end goal of driver development is full support of gcc options with direct integration into the CMake build system. + +## Design and Implementation + +### Design overview + +The diagram below shows significant components of the Maple Driver architecture as well as their realtions to one another. The red components represent essential driver parts(classes, methods), the blue components are output/input data structures, the green components are important helper classes. + +![](media/MapleDriverStructure.png) + +### Driver stages + +The driver functionality can be divded into five stages: + +**1. Parsing options** + +The input command-line argument strings are firstly checked for correctness of format and transformed into pairs of key-value, keys are then checked for a match in `usages` multimap (previously created from a helper data structure) of `OptionParser`. The `Option` class also contains `Descriptor` data structure, describing required parsing details for option argument with some additional data. Then the argument is parsed. The driver expects to understand all available options. The result is then written in the `options` vector of the `OptionParser` class. + + +**2. Filling MplOptions** + +After parsing input, `MplOptions` is filled depending on the results: firstly the run type is decided to be either automatic or custom (depends on the presence of `--run` option), then input files are initialized and checked for validity. If the run type is automatic Maple Driver will also configure the code generation pipeline by itself(you will recieve assembler file as the final output), depending on the extension of the first input file. Then other options are handled, including `--option`, value of which must contain options for all phases of compilation, the value is parsed using the same methods and data structures that were used in parsing of general options, the results are pushed in map `exeOptions` of the `MplOptions` class. Then the Maple Driver attempts to open input file and in case of success moves on to the next stage. + +**3. Compiler Selection** + +Upon the completion of the previous stage Maple Driver triggers `CompilerFactory` class constructor, which creates classes of supported compilers and saves pointers to all of them in `supportedCompilers` data structure. + +**4. Phase specific option construction** + +`CompilerFactory` calls `Compile` methods of selected Compilers, during which default and user-determined options are constructed and written in a string. The main problem is the translation of commands from one style to another, some driver components, like `MplcgCompiler` require their own methods to work correctly, while others, like `AsCompiler` work with just a handful of options and their main purpose is to determine the path of the executable to call and pass on their input and output arguments. + +However, `MaplecombCompiler` and `MplcgCompiler` require special pipeline, they do not call executables and pass on command-line to them, but interact the with input file using `MIRModule`, `MIRParser` and `DriverRunner` classes. `MIRModule` is a data structure, purpose of which is similar to `MplOptions`, two previously mentioned compilers store crucial data in it (name of the input file, source language, etc.); `MIRParser` as the name implies exists to parse Maple IR; `DriverRunner` is an orchestrator that works with two previous data structures and also stores options for phases it is responsible for and other data, required in compilation. + +**5. Execute** + +After that the command-line and full path to executable is redirected to the `Exe` method of the `SafeExe` class, where it is handled and executed via child process. In case of `MaplecombCompiler` and `MplcgCompiler` the `Run` method of the `DriverRunner` is called and issues job to the Phase Manger. + + diff --git a/doc/en/MapleIRDesign.md b/doc/en/MapleIRDesign.md new file mode 100644 index 0000000000000000000000000000000000000000..ac0ca70085cb5c81e02c52c78786515f52f150f4 --- /dev/null +++ b/doc/en/MapleIRDesign.md @@ -0,0 +1,2616 @@ +``` +# +# Copyright (C) [2020-2021] Futurewei Technologies, Inc. All rights reverved. +# +# Licensed under the Mulan Permissive Software License v2. +# You can use this software according to the terms and conditions of the MulanPSL - 2.0. +# You may obtain a copy of MulanPSL - 2.0 at: +# +# https://opensource.org/licenses/MulanPSL-2.0 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the MulanPSL - 2.0 for more details. +# +``` + +MAPLE IR Specification +====================== + +Contents {#contents .TOC-Heading} +======== + +[1 Introduction 7](#introduction) + +[2 Program Representation in MAPLE IR 8](#program-representation-in-maple-ir) + +[2.1 Symbol Tables 9](#symbol-tables) + +[2.2 Primitive Types 9](#primitive-types) + +[2.3 Constants 10](#constants) + +[2.4 Identifiers (\$ % &) 10](#identifiers) + +[2.5 Pseudo-registers (%) 11](#pseudo-registers) + +[2.6 Special Registers (%%) 11](#special-registers) + +[2.7 Statement Labels (@) 11](#statement-labels) + +[2.8 Storage Accesses 11](#storage-accesses) + +[2.9 Aggregates 12](#aggregates) + +[2.9.1 Arrays 12](#arrays) + +[2.9.2 Structures 12](#structures) + +[3 Instruction Specification 13](#instruction-specification) + +[3.1 Storage Access Instructions 13](#storage-access-instructions) + +[3.1.1 dassign 13](#dassign) + +[3.1.2 dread 14](#dread) + +[3.1.3 iassign 14](#iassign) + +[3.1.4 iread 14](#iread) + +[3.1.5 iassignoff 14](#iassignoff) + +[3.1.6 iassignfpoff 14](#iassignfpoff) + +[3.1.7 iassignpcoff 14](#iassignpcoff) + +[3.1.8 ireadoff 15](#ireadoff) + +[3.1.9 ireadfpoff 15](#ireadfpoff) + +[3.1.10 ireadpcoff 15](#ireadpcoff) + +[3.1.11 regassign 15](#regassign) + +[3.1.12 regread 15](#regread) + +[3.2 Leaf Opcodes 15](#leaf-opcodes) + +[3.2.1 addrof 15](#addrof) + +[3.2.2 addroflabel 16](#addroflabel) + +[3.2.3 addroffunc 16](#addroffunc) + +[3.2.4 addroffpc 16](#addroffpc) + +[3.2.5 conststr 16](#conststr) + +[3.2.6 conststr16 16](#conststr16) + +[3.2.7 constval 16](#constval) + +[3.2.8 sizeoftype 16](#sizeoftype) + +[3.3 Unary Expression Opcodes 16](#unary-expression-opcodes) + +[3.3.1 abs 16](#abs) + +[3.3.2 bnot 17](#bnot) + +[3.3.3 extractbits 17](#extractbits) + +[3.3.4 iaddrof 17](#iaddrof) + +[3.3.5 lnot 17](#lnot) + +[3.3.6 neg 17](#neg) + +[3.3.7 recip 17](#recip) + +[3.3.8 sext 17](#sext) + +[3.3.9 sqrt 17](#sqrt) + +[3.3.10 zext 17](#zext) + +[3.4 Type Conversion Expression Opcodes 18](#type-conversion-expression-opcodes) + +[3.4.1 ceil 18](#ceil) + +[3.4.2 cvt 18](#cvt) + +[3.4.3 floor 18](#floor) + +[3.4.4 retype 18](#retype) + +[3.4.5 round 18](#round) + +[3.4.6 trunc 18](#trunc) + +[3.5 Binary Expression Opcodes 19](#binary-expression-opcodes) + +[3.5.1 add 19](#add) + +[3.5.2 ashr 19](#ashr) + +[3.5.3 band 19](#band) + +[3.5.4 bior 19](#bior) + +[3.5.5 bxor 19](#bxor) + +[3.5.6 cand 19](#cand) + +[3.5.7 cior 19](#cior) + +[3.5.8 cmp 19](#cmp) + +[3.5.9 cmpg 19](#cmpg) + +[3.5.10 cmpl 20](#cmpl) + +[3.5.11 depositbits 20](#depositbits) + +[3.5.12 div 20](#div) + +[3.5.13 eq 20](#eq) + +[3.5.14 ge 20](#ge) + +[3.5.15 gt 20](#gt) + +[3.5.16 land 20](#land) + +[3.5.17 lior 20](#lior) + +[3.5.18 le 20](#le) + +[3.5.19 lshr 20](#lshr) + +[3.5.20 lt 21](#lt) + +[3.5.21 max 21](#max) + +[3.5.22 min 21](#min) + +[3.5.23 mul 21](#mul) + +[3.5.24 ne 21](#ne) + +[3.5.25 rem 21](#rem) + +[3.5.26 shl 21](#shl) + +[3.5.27 sub 21](#sub) + +[3.6 Ternary Expression Opcodes 21](#ternary-expression-opcodes) + +[3.6.1 select 21](#select) + +[3.7 N-ary Expression Opcodes 22](#n-ary-expression-opcodes) + +[3.7.1 array 22](#array) + +[3.7.2 intrinsicop 22](#intrinsicop) + +[3.7.3 intrinsicopwithtype 22](#intrinsicopwithtype) + +[3.8 Control Flow Statements 22](#control-flow-statements) + +[3.8.1 Hierarchical control flow statements 22](#hierarchical-control-flow-statements) + +[3.8.1.1 doloop 23](#doloop) + +[3.8.1.2 dowhile 23](#dowhile) + +[3.8.1.3 foreachelem 23](#foreachelem) + +[3.8.1.4 if 23](#if) + +[3.8.1.5 while 23](#while) + +[3.8.2 Flat control flow statements 24](#flat-control-flow-statements) + +[3.8.2.1 brfalse 24](#brfalse) + +[3.8.2.2 brtrue 24](#brtrue) + +[3.8.2.3 goto 24](#goto) + +[3.8.2.4 igoto 24](#igoto) + +[3.8.2.5 multiway 24](#multiway) + +[3.8.2.6 return 24](#return) + +[3.8.2.7 switch 24](#switch) + +[3.8.2.8 rangegoto 25](#rangegoto) + +[3.8.2.9 indexgoto 25](#indexgoto) + +[3.9 Call Statements 25](#call-statements) + +[3.9.1 call 25](#call) + +[3.9.2 callinstant 26](#callinstant) + +[3.9.3 icall 26](#icall) + +[3.9.4 intrinsiccall 26](#intrinsiccall) + +[3.9.5 intrinsiccallwithtype 26](#intrinsiccallwithtype) + +[3.9.6 xintrinsiccall 26](#xintrinsiccall) + +[3.10 Java Call Statements 26](#java-call-statements) + +[3.10.1 virtualcall 26](#virtualcall) + +[3.10.2 superclasscall 26](#superclasscall) + +[3.10.3 interfacecall 26](#interfacecall) + +[3.11 Calls with Return Values Assigned 27](#calls-with-return-values-assigned) + +[3.11.1 callassigned 27](#callassigned) + +[3.12 Exceptions Handling 27](#exceptions-handling) + +[3.12.1 jstry 28](#jstry) + +[3.12.2 javatry 28](#javatry) + +[3.12.3 cpptry 29](#cpptry) + +[3.12.4 throw 29](#throw) + +[3.12.5 jscatch 29](#jscatch) + +[3.12.6 javacatch 29](#javacatch) + +[3.12.7 cppcatch 29](#cppcatch) + +[3.12.8 finally 29](#finally) + +[3.12.9 cleanuptry 30](#cleanuptry) + +[3.12.10 endtry 30](#endtry) + +[3.12.11 gosub 30](#gosub) + +[3.12.12 retsub 30](#retsub) + +[3.13 Memory Allocation and Deallocation 30](#memory-allocation-and-deallocation) + +[3.13.1 alloca 30](#alloca) + +[3.13.2 decref 31](#decref) + +[3.13.3 decrefreset 31](#decrefreset) + +[3.13.4 free 31](#free) + +[3.13.5 gcmalloc 31](#gcmalloc) + +[3.13.6 gcmallocjarray 31](#gcmallocjarray) + +[3.13.7 gcpermalloc 31](#gcpermalloc) + +[3.13.8 incref 32](#incref) + +[3.13.9 malloc 32](#malloc) + +[3.13.10 stackmalloc 32](#stackmalloc) + +[3.13.11 stackmallocjarray 32](#stackmallocjarray) + +[3.14 Other Statements 32](#other-statements) + +[3.14.1 assertge 32](#assertge) + +[3.14.2 assertlt 32](#assertlt) + +[3.14.3 assertnonnull 32](#assertnonnull) + +[3.14.4 eval 33](#eval) + +[3.14.5 checkpoint 33](#checkpoint) + +[3.14.6 membaracquire 33](#membaracquire) + +[3.14.7 membarrelease 33](#membarrelease) + +[3.14.8 membarfull 33](#membarfull) + +[3.14.9 syncenter 33](#syncenter) + +[3.14.10 syncexit 33](#syncexit) + +[4 Declaration Specification 34](#declaration-specification) + +[4.1 Module Declaration 34](#module-declaration) + +[4.1.1 entryfunc 34](#entryfunc) + +[4.1.2 flavor 34](#flavor) + +[4.1.3 globalmemmap 34](#globalmemmap) + +[4.1.4 globalmemsize 34](#globalmemsize) + +[4.1.5 globalwordstypetagged 34](#globalwordstypetagged) + +[4.1.6 globalwordsrefcounted 35](#globalwordsrefcounted) + +[4.1.7 id 35](#id) + +[4.1.8 import 35](#import) + +[4.1.9 importpath 35](#importpath) + +[4.1.10 numfuncs 35](#numfuncs) + +[4.1.11 srclang 35](#srclang) + +[4.2 Variable Declaration 35](#variable-declaration) + +[4.3 Pseudo-register Declarations 36](#pseudo-register-declarations) + +[4.4 Type Specification 36](#type-specification) + +[4.4.1 Incomplete Type Specification 37](#incomplete-type-specification) + +[4.5 Type Declaration 37](#type-declaration) + +[4.6 Java Class and Interface Declaration 38](#java-class-and-interface-declaration) + +[4.7 Function Declaration 39](#function-declaration) + +[4.7.1 funcsize 39](#funcsize) + +[4.7.2 framesize 39](#framesize) + +[4.7.3 moduleid 39](#moduleid) + +[4.7.4 upformalsize 39](#upformalsize) + +[4.7.5 formalwordstypetagged 40](#formalwordstypetagged) + +[4.7.6 formalwordsrefcounted 40](#formalwordsrefcounted) + +[4.7.7 localwordstypetagged 40](#localwordstypetagged) + +[4.7.8 localwordsrefcounted 40](#localwordsrefcounted) + +[4.8 Initializations 40](#initializations) + +[4.9 Type Parameters 41](#type-parameters) + +Introduction +============ + +MAPLE IR is an internal program representation to support program +compilation, analysis, optimization and execution. The name MAPLE comes +from the acronym "Multiple Architecture and Programming Language +Environment". Because information in the source program may be useful +for program analysis and optimization, MAPLE IR aims to provide +information about the source program that is as complete as possible. + +Program information is represented in two parts: the declaration part +for defining the program constructs and the execution part specifying +the program code. The former is often collectively referred to as the +symbol table, though there can be different types of tables. + +MAPLE IR is target-independent. It is not pre-disposed towards any +specific target processor or processor characteristic. + +MAPLE IR can be regarded as the ISA of a virtual machine (VM). The MAPLE +VM can be regarded as a general purpose processor that takes MAPLE IR as +input and directly executes the program portion of the MAPLE IR. + +MAPLE IR is the common representation for programs compiled from +different programming languages, which include general purpose languages +like C, C++, Java and Javascript. MAPLE IR is extensible. As additional +languages, including domain-specific languages, are compiled into MAPLE +IR, more constructs will be added to represent constructs unique to each +language. + +The compilation process can be viewed as a gradual lowering of the +program representation from high level human perceivable form to low +level machine executable form. MAPLE IR is capable of supporting all +known program analyses and optimizations, owing to its flexibility of +being able to represent program code at multiple semantic levels. At the +highest level, MAPLE IR exhibits more variety of program constructs, +corresponding to abstract language operations or constructs unique to +the language it is translated from. As a result, the code sequences at +the higher levels are shorter. Language or domain specific optimizations +are best performed at the higher levels. At the higher levels, there are +also constructs that are hierarchical in nature, like nested blocks and +expression trees. Nearly all program information is retained at the +highest level. + +As compilation proceeds, MAPLE IR is gradually lowered so that the +granularity of its operations corresponds more closely to the +instructions of a general purpose processor. The code sequences become +longer as many high-level constructs are disallowed. At the same time, +the program constructs become less hierarchical. It is at the lower +levels where general purpose optimizations are performed. In particular, +at the lowest level, MAPLE IR instructions map nearly one-to-one to the +machine instructions of the mainstream processor ISAs. This is where the +effects of optimizations at the IR level are maximized, as each +eliminated operation will have the corresponding effect on the target +machine. At the lowest level, all operations, including type conversion +operations, are explicitly expressed at the IR level so that they can be +optimized by the compiler. The lowest level of MAPLE IR is also its +executable form, where the program structure is flat to allow the +sequential form of program execution. Expression trees are laid out in +prefix form and they are evaluated by the execution engine using the +stack machine model. + +Program Representation in MAPLE IR +================================== + +The internal representation of MAPLE IR consists of tables for the +declaration part and tree nodes for the execution part. In the execution +part, each tree node represents one MAPLE IR instruction, and is just +large enough to store the instruction's contents. Externally, MAPLE IR +can exist in either binary or ASCII formats. The binary format is a +straightforward dump of the byte contents of the internal program data +structures. The ASCII form is editable, which implies that it is +possible to program in MAPLE IR directly. The ASCII form of MAPLE IR has +a layout similar to the C language. Declarations are followed by +executable code. Expressions are displayed in in-fix notation, using +parentheses to explicitly indicate the nesting relationships. + +The language front-end compiles a source file into a MAPLE IR file. +Thus, each MAPLE IR file corresponds to a compilation unit, referred to +as a *module*. A module is made up of declarations at the global scope. +Among the declarations are functions. Inside each function are +declarations at the local scope followed by the executable code of the +function. + +There are three kinds of executable nodes in MAPLE IR: + +1. Leaf nodes - Also called terminal nodes, these nodes denote a value + at execution time, which may be a constant or the value of a storage + unit. + +2. Expression nodes - An expression node performs an operation on its + operands to compute a result. Its result is a function of the values + of its operands and nothing else. Each operand can be either a leaf + node or another expression node. Expression nodes are the internal + nodes of expression trees. + +3. Statement nodes - These represent the flow of control. Execution + starts at the entry of the function and continues sequentially + statement by statement until a control flow statement is executed. + Apart from modifying control flow, statements can also modify data + storage in the program. A statement node has operands that can be + leaf, expression or statement nodes. + +In all the executable nodes, the opcode field specifies the operation of +the node, followed by additional field specification relevant to the +opcode. The operands for the node are specified inside parentheses +separated by commas. The general form is: + +``` + opcode fields (opnd0, opnd1, opnd2) +``` + +For example, the C statement \"a = b\" is translated to the **dassign** +node that assigns the right hand side operand b to a . + +``` + dassign $a (dread i32 $b) +``` + +In the declaration part, each declaration is a statement. Each +declaration or execution statement must start a new line, and each line +cannot contain more than one statement node. A statement node can occupy +more than one line. The character \'\#\' can appear anywhere to indicate +that the contents from the \'\#\' to the end of the line are comments. + +Symbol Tables +------------- + +Program information that is of declarative nature is stored in the +symbol table portion of the IR. Having the executable instructions refer +to the symbol tables reduces the amount of information that needs to be +stored in the executable instructions. For each declaration scope, there +is a main table called the Symbol Table that manages all the +user-defined names in that scope. This implies one global Symbol Table +and a local Symbol Table for each function declared in the file. The +various types of symbol entries correspond to the kinds of constructs +that can be assigned names, including: + +1. Types + +2. Variables + +3. Functions (either prototypes or with function bodies) + +In the ASCII format, the IR instructions refer to the various symbols by +their names. In the binary representation, the symbols are referred to +via their table indices.. + +Primitive Types +--------------- + +Primitive types can be regarded as pre-defined types supported by the +execution engine such that they can be directly operated on. They also +play a part in conveying the semantics of operations, as addresses are +distinct from unsigned integers. The number in the primitive type name +indicates the storage size in bits. + +The primitive types are: + +``` + no type - void + signed integers - i8, i16, i32, i64 + unsigned integers - u8, u16, u32, u64 + booleans- u1 + addresses - ptr, ref, a32, a64 + floating point numbers - f32, f64 + complex numbers - c64, c128 + aggregates - agg + javascript types: + dynany + dynu32 + dyni32 + dynundef + dynnull + dynhole + dynbool + dynptr + dynf64 + dynf32 + dynstr + dynob + SIMD types -- v2i64, v4i32, v8i16, v16i8, v2f64, v4f32 + unknown +``` + +An instruction that produces or operates on values must specify the +primitive type in the instruction, as the type is not necessarily +implied by the opcode. There is the distinction between result type and +operand type. Result type can be regarded as the type of the value as it +resides in a machine register, because arithmetic operations in the +mainstream processor architectures are mostly register-based. When an +instruction only specifies a single primitive type, the type specified +applies to both the operands and the result. In the case of instructions +where the operand and result types may differ, the primitive type +specified is the result type, and a second field is added to specify the +operand type. + +Some opcodes are applicable to non-primitive (or derived) types, as in +an aggregate assignment. When the type is derived, agg can be used. In +such cases, the data size can be found by looking up the type of the +symbol . + +The primitive types *ptr* and *ref* are the target-independent types for +addresses. *ref* conveys the additional semantics that the address is a +reference to a run-time managed block of memory or object in the heap. +Uses of ptr or ref instead of a32 or a64 allow the IR to be independent +of the target machine by not manifesting the size of addresses until the +later target-dependent compilation phases. + +The primitive type unknown is used by the language front-end when the +type of a field in an object has not been fully resolved because the +full definition resides in a different compilation unit. + +Constants +--------- + +Constants in MAPLE IR are always of one of the primitive types. + +Integer and address (pointer) types can be specified in decimal or in +hexadecimal using the 0x prefix. + +Floating point types can be specified in hexadecimal or as floating +point literal as in standard C. + +Single characters enclosed in single quotes can be used for i8 and u8 +constants. + +String literals are enclosed in double quotes. + +For the complex and SIMD types, the group of values are enclosed in +square brackets separated by commas. + +Identifiers (\$ % &) +-------------------- + +In ASCII MAPLE IR, standalone identifier names are regarded as keywords +of the MAPLE IR language. To refer to entries in the symbol tables, +identifier names must be prefixed. + +**\$** - Identifiers prefixed with \'\$\' are global variables and will +be looked up in the global Symbol Table. + +**%** - Identifiers prefixed with \'%\' are local variables and will be +looked up in the local Symbol Table. + +**&** - Identifiers prefixed with \'&\' are function or method names and +will be looked up in the Functions Table. The major purpose of these +prefixes is to avoid name clash with the keywords (opcode names, etc.) +in the IR. + +Pseudo-registers (%) +-------------------- + +Pseudo-registers can be regarded as local variables of a primitive type +whose addresses are never taken. They can be declared implicitly by +their appearances. The primitive type associated with a pseudo-register +is sticky. + +Because pseudo-registers can only be created to store primitive types, +the use of field IDs does not apply to them. Pseudo-registers are +referred to by the \'%\' prefix followed by a number. This distinguishes +them from other local variables that are not pseudo-registers, as their +names cannot start with a number. + +The compiler will promote variables to pseudo-registers. To avoid the +loss of high level type information when a variable is promoted to +pseudo-registers, reg declarations are used to provide the type +information associated with the pseudo-registers. + +Special Registers (%%) +---------------------- + +Special registers are registers with special meaning. They are all +specified using %% as prefix. **%%SP** is the stack pointer and **%%FP** +the frame pointer in referencing the stack frame of the current +function. **%%GP** is the global pointer used for addressing global +variables. **%%PC** is the program counter. **%%thrownval** stores the +thrown value after throwing an exception. + +Special registers **%%retval0**, **%%retval1**, **%%retval2**, etc. are +used for fetching the multiple values returned by a call. They are +overwritten by each call, and should only be read at most once after +each call. They can assume whatever is the type of the return value. + +Statement Labels (@) +-------------------- + +Label names are prefixed with \'@\' which serves to identify them. Any +statement beginning with a label name defines that label as referring to +that text position. Labels are only referred to locally by goto and +branch statements. + +Storage Accesses +---------------- + +Since MAPLE IR is target-independent, it does not exhibit any +pre-disposition as to how storage are allocated for the program +variables. It only applies rules defined by the language regarding +storage. + +In general, there are two distinct kinds of storage accesses: direct and +indirect. Direct accesses do not require any run-time computation to +determine the exact address location. Indirect accesses require address +arithmetic before the location can be determined. Indirect accesses are +associated with pointer dereferences and arrays. Direct accesses are +associated with scalar variables and fixed fields inside structures. + +Direct accesses can be mapped to pseudo-register if the variable or +field has no alias. Indirect accesses cannot be mapped to +pseudo-registers unless the computed address does not change. + +In MAPLE IR, **dassign** and **dread** are the opcodes for direct +assignments and direct references; **iassign** and **iread** are the +opcodes for indirect assignments and indirect references. + +Aggregates +---------- + +Aggregates (or composites) are either structures or arrays. They both +designate a grouping of storage elements. In structures, the storage +elements are designated by field names and can be of different types and +sizes. Classes and objects are special kinds of structures in +object-oriented programming languages. In arrays, the same storage +element is repeated a number of times and the elements are accessed via +index (or subscript). + +### Arrays + +Array subscripting designates address computation. Since making the +subscripts stand out facilitate data dependency analysis and other loop +oriented optimizations, MAPLE IR represents array subscripting using the +special **array** opcode, which returns the address resulting from the +subscripting operation. For example, \"a\[i\] = i\" is: +``` + # <* [10] i32> is pointer to array of 10 ints + iassign <* i32> ( + array a32 <* [10] i32> (addrof a32 $a, dread i32 $i), + dread i32 \$i) +``` + +and \"x = a\[i,j\]\" is: + + +``` + # <* [10] [10] i32 indicates pointer to a 10x10 matrix of ints + dassign $x ( + iread i32 <* i32> ( + array a32 <* [10] [10] i32> (addrof a32 $a, dread i32 $i, dread i32 $j))) +``` + +### Structures + +Fields in a structure can be accessed directly, but use of **dassign** +or **dread** on a structure would refer to the entire structure as an +aggregate. Thus, we extend **dassign**, **dread**, **iassign** and +**iread** to take an additional parameter called field-ID. + +In general, for a top level structure, unique field-IDs can be assigned +to all the fields contained inside the structure. Field-ID 0 is assigned +to the top level structure (the entire structure). Field-ID is also 0 if +it is not a structure. As each field is visited, the field-ID is +incremented by 1. If a field is a structure, that structure is assigned +a unique field-ID, and then field-ID assignments continue with the +fields inside the nested structure. If a field is an array, the array is +assigned only one field-ID. + +Note that if a structure exists both standalone and nested inside +another structure, the same field inside the structure will be assigned +different field-IDs because field-ID assignment always starts from the +top level structure. + +Three kinds of structures are supported: **struct**, **class** and +**interface**. + +A **struct** corresponds to the struct type in C, and is specified by +the **struct** keyword followed by a list of field declarations enclosed +by braces, as in: +``` + struct { + @f1 i32, + @f2 <$structz> } # $structz is the type name of another struct +``` +A **class** corresponds to the class type in Java, to provide single +inheritances. The syntax is the same as struct except for an additional +type name specified after the class keyword that specifies the class it +inherits from. Fields in the parent class are also referred to via +field-IDs, as if the first field of the derived class is the parent +class. In other words, the parent class is treated like a sub-structure. +``` + class <$classz> { # $classz is the parent of this class being defined + @f1 i32, + @f2 f32 } +``` +Unrelated to storage, structures can contain member function prototypes. +The list of member function prototypes must appear after all the fields +have been specified. Each member function name starts with &, which +indicates that it is a function prototype. The prototype specification +follows the same syntax as ordinary function prototypes. + +An **interface** corresponds to the interface type in Java, and has the +same form as class, except that it cannot be instantiated via a var +declaration, and fields declared inside it are always statically +allocated. More details are provided later in this document. + +Instruction Specification +========================= + +In MAPLE IR expression trees, we use parentheses or braces to +distinguish operands from the other fields of an instruction, to +facilitate visualization of the nested structure of MAPLE IR. The +expression operands of each instruction are always enclosed by +parentheses, using commas to separate the operands. Statement operands +are indicated via braces. + +Storage Access Instructions +--------------------------- + +A memory access instruction either loads a memory location to a register +for further processing, or store a value from register to memory. For +load instructions, the result type given in the instruction is the type +of the loaded value residing in register. If the memory location is of +size smaller than the register size, the value being loaded must be of +integer type, and there will be implicit zero- or sign-extension +depending on the signedness of the result type. + +### dassign + +syntax: dassign \ \ (\) + +\ is computed to return a value, which is then assigned to +variable \. If \ is not 0, then the variable must +be a structure, and the assignment only applies to the specified field. +If \ is of type agg, then the size of the structure must +match. If \ is a primitive integer type, the assigned +variable may be smaller, resulting in a truncation. If\ is +not specified, it is assumed to be 0. + +### dread + +syntax: dread \ \ \ + +Variable \ is read from its storage location. If the variable +is a structure, then \ should specify agg. If \ +is not 0, then the variable must be a structure, and instead of reading +the entire variable, only the specified field is read. If the field +itself is also a structure, then \ should also specify agg. +If \ is not specified, it is assumed to be 0. + +### iassign + +syntax: iassign \ \ (\, \) + +\ is computed to return an address. \ gives the high +level type of \ and must be a pointer type. \ is +computed to return a value, which is then assigned to the location +specified by \. If \ is not 0, then the computed +address must correspond to a structure, and the assignment only applies +to the specified field. If \ is of type agg, then the size of +the structure must match. The size of the location affected by the +assignment is determined by what \ points to. If \ is +a primitive integer type, the assigned location (according to what +\ points to) may be smaller, resulting in a truncation. If +\ is not specified, it is assumed to be 0. + +### iread + +syntax: iread \ \ \ (\) + +The content of the location specified by the address computed from the +address expression \ is read (dereferenced) as the given +primitive type. \ gives the high level type of \ and +must be a pointer type. If the content dereferenced is a structure (as +given by what \ points to), then \ should specify +agg. If \ is not 0, then \ must specify pointer to a +structure, and instead of reading the entire structure, only the +specified field is read. If the field itself is also a structure, then +\ should also specify agg. If \ is not specified, +it is assumed to be 0. + +### iassignoff + +syntax: iassignoff \ \ (\, +\) + +\ is computed to return a scalar value, which is then +assigned to the memory location formed by the addition of \ in +bytes to \. \ gives the type of the stored-to +location, which also specifies the size of the affected memory location. + +### iassignfpoff + +syntax: iassignfpoff \ \ (\) + +\ is computed to return a scalar value, which is then +assigned to the memory location formed by the addition of \ in +bytes to %%FP. \ gives the type of the stored-to location, +which also specifies the size of the affected memory location. This is +the same as iassignoff where its \ is %%FP. + +### iassignpcoff + +syntax: iassignpcoff \ \ (\) + +\ is computed to return a scalar value, which is then +assigned to the memory location formed by the addition of \ in +bytes to %%PC.  \ gives the type of the stored-to location, +which also specifies the size of the affected memory location + +### ireadoff + +syntax: ireadoff \ \ (\) + +\ must be of scalar type. \ in bytes is added to +\ to form the address of the memory location to be read as +the specified scalar type. + +### ireadfpoff + +syntax: ireadfpoff \ \ + +\ must be of scalar type. \ in bytes is added to +%%FP to form the address of the memory location to be read as the +specified scalar type. This is the same as ireadoff where its +\ is %%FP. + +### ireadpcoff + +syntax: ireadpcoff \ \ + +\ must be of scalar type.  \ in bytes is added to +%%PC to form the address location to be read as the specified scalar +type. + +### regassign + +syntax: regassign \ \ (\) + +\ is computed to return a scalar value, which is then +assigned to the pseudo or special register given by \. +\ gives the type of the register, which also specifies the +size of the value being assigned. + +### regread + +syntax: regread \ \ + +The given pseudo or special register is read in the scalar type +specified by \. + +Leaf Opcodes +------------ + +dread and regread are leaf opcodes for reading the contents of +variables. The following are additional leaf opcodes: + +### addrof + +syntax: addrof \ \ \ + +The address of the variable \ is returned. \ must +be either ptr, a32 or a64. If \ is not 0, then the variable +must be a structure, and the address of the specified field is returned +instead. + +### addroflabel + +syntax: addroflabel \ \ + +The text address of the label is returned. \ must be either +a32 or a64. + +### addroffunc + +syntax: addroffunc \ \ + +The text address of the function is returned. \ must be +either a32 or a64. + +### addroffpc + +syntax: addroffpc \ \ + +\ must be either a32 or a64.  \ in bytes is added +to %%PC to form the address value being returned. + +### conststr + +syntax: conststr \ \ + +The address of the string literal is returned. \ must be +either ptr, a32 or a64. The string must be stored in read-only memory. + +### conststr16 + +syntax: conststr16 \ \ + +The address of the string literal composed of 16-bit wide characters is +returned. \ must be either ptr, a32 or a64. The string must +be stored in read-only memory. + +### constval + +syntax: constval \ \ + +The specified constant value of the given primitive type is returned. +Since floating point values cannot be represented in ASCII without loss +of accuracy, they can be specified in hexadecimal form, in which case +\ indicates the floating point type. + +### sizeoftype + +syntax: sizeoftype \ \ + +The size in bytes of \ is returned as an integer constant value. +Since type size is in general target-dependent, use of this opcode +preserves the target independence of the program code. + +Unary Expression Opcodes +------------------------ + +These are opcodes with a single operand. + +### abs + +syntax: abs \ (\) + +The absolute value of the operand is returned. + +### bnot + +syntax: bnot \ (\ \ \ (\) + +The bitfield starting at bit position \ with \ number +of bits is extracted and then sign- or zero- extended to form the +primitive integer given by \. The operand must be of integer +type and must be large enough to contain the specified bitfield. + +### iaddrof + +syntax: iaddrof \ \ \(\) + +\ gives the high level type of \ and must be a +pointer type. The address of the pointed-to item is returned. +\ must be either ptr, a32 or a64. If \ is not 0, +then \ must specify a pointer to a structure, and the address of +the specified field in the structure is returned instead. This operation +is of no utility if \ is 0, as it will just return the +value of \. + +### lnot + +syntax: lnot \ (\) + +If the operand is not 0, 0 is returned. If the operand is 0, 1 is +returned. + +### neg + +syntax: neg \ (\) + +The operand value is negated and returned. + +### recip + +syntax: recip \ (\) + +The reciprocal of the operand is returned. \ must be a +floating-point type. + +### sext + +syntax: sext \ \ (\) + +Sign-extend the integer by treating the integer size as being \ +bits. This can be regarded as a special case of extractbits where the +bitfield is in the lowest bits. The primitive type \ +stays the same. + +### sqrt + +syntax: sqrt \ (\) + +The square root of the operand is returned. \ must be a +floating-point type. + +### zext + +syntax: zext \ \ (\) + +Zero-extend the integer by treating the integer size as being \ +bits. This can be regarded as a special case of extractbits where the +bitfield is in the lowest bits. The primitive type \ +stays the same. + +Type Conversion Expression Opcodes +---------------------------------- + +Type conversion opcodes are unary in nature. With the exception of +retype, they all require specifying both the from and to types in the +instruction. Conversions between integer types of different sizes +require the cvt opcode. + +Conversion between signed and unsigned integers of the same size does +not require any operation, not even retype. + +### ceil + +syntax: ceil \ \ (\) + +The floating point value is rounded towards positive infinity. + +### cvt + +syntax: cvt \ \ (\) + +Convert the operand\'s value from \ to \. This +instruction must not be used If the sizes of the two types are the same +and the conversion does not result in altering the bit contents. + +### floor + +syntax: floor \ \ (\) + +The floating point value is rounded towards negative infinity. + +### Retype + +syntax: retype \ \ (\) + +\ is converted to \ which has derived type \ +without changing any bits. The size of \ and \ must +be the same. \ may be of aggregate type. + +### round + +syntax: round \ \ (\) + +The floating point value is rounded to the nearest integer. + +### trunc + +syntax: trunc \ \ (\) + +The floating point value is rounded towards zero. + +Binary Expression Opcodes +------------------------- + +These are opcodes with two operands. + +### add + +syntax: add \ (\, \) + +Perform the addition of the two operands. + +### ashr + +syntax: ashr \ (\, \) + +Return \ with its bits shifted to the right by \ bits. +The high order bits shifted in are set according to the original sign +bit. + +### band + +syntax: band \ (\, \) + +Perform the bitwise AND of the two operands. + +### bior + +syntax: bior \ (\, \) + +Perform the bitwise inclusive OR of the two operands. + +### bxor + +syntax: bxor \ (\, \) + +Perform the bitwise exclusive OR of the two operands. + +### cand + +syntax: cand \ (\, \) + +Perform the logical AND of the two operands via short-circuiting. If +\ yields 0, \ is not to be evaluated. The result is +either 0 or 1. + +### cior + +syntax: cior \ (\, \) + +Perform the logical inclusive OR of the two operands via +short-circuiting. If \ yields 1, \ is not to be +evaluated. The result is either 0 or 1. + +### cmp + +syntax: cmp \ \ (\, \) + +Performs a comparison between \ and \. If the two +operands are equal, return 0. If \ is less than \, +return -1. Otherwise, return +1. + +### cmpg + +syntax: cmpg \ \ (\, \) + +Same as cmp, except 1 is returned if any operand is NaN. + +### cmpl + +syntax: cmpl \ \ (\, \) + +Same as cmp, except -1 is returned if any operand is NaN. + +### depositbits + +syntax: depositbits \ \ \ (\, +\) + +Creates a new integer value by depositing the value of \ into +the range of bits in \ that starts at bit position \ +and runs for \ bits. \ must be large enough to contain +the specified bitfield. + +Depending on the size of \ relative to the bitfield, there may +be truncation. The rest of the bits in \ remains unchanged. + +### div + +syntax: div \ (\, \) + +Perform \ divided by \ and return the result. + +### eq + +syntax: eq \ \ (\, \) + +If the two operands are equal, return 1. Otherwise, return 0. + +### ge + +syntax: ge \ \ (\, \) + +If \ is greater than or equal to \, return 1. Otherwise, +return 0. + +### gt + +syntax: ge \ \ (\, \) + +If \ is greater than \, return 1. Otherwise, return 0. + +### land + +syntax: land \ (\, \) + +Perform the logical AND of the two operands. The result is either 0 or +1. + +### lior + +syntax: lior \ (\, \) + +Perform the logical inclusive OR of the two operands. The result is +either 0 or 1. + +### le + +syntax: le \ \ (\, \) + +If \ is less than or equal to \, return 1. Otherwise, +return 0. + +### lshr + +syntax: lshr \ (\, \) + +Return \ with its bits shifted to the right by \ bits. +The high order bits shifted in are set to 0. + +### lt + +syntax: lt \ \ (\, \) + +If \ is less than \, return 1. Otherwise, return 0. + +### max + +syntax: max \ (\, \) + +Return the maximum of \ and \. + +### min + +syntax: min \ (\, \) + +Return the minimum of \ and \. + +### mul + +syntax: mul \ (\, \) + +Perform the multiplication of the two operands. + +### ne + +syntax: ne \ \ (\, \) + +If the two operands are not equal, return 1. Otherwise, return 0. + +### rem + +syntax: rem \ (\, \) + +Return the remainder when \ is divided by \. + +### shl + +syntax: shl \ (\, \) + +Return \ with its bits shifted to the left by \ bits. +The low order bits shifted in are set to 0. + +### sub + +syntax: sub \ (\, \) + +Subtract \ from \ and return the result. + +Ternary Expression Opcodes +-------------------------- + +These are opcodes with three operands. + +### select + +syntax: select \ (\, \, \) + +\ must be of integer type. \ and \ must be of +the type given by \. If \ is not 0, return +\. Otherwise, return \. + +N-ary Expression Opcodes +------------------------ + +These are opcodes that can have any number of operands. + +### array + +syntax: array \ \ \ (\, +\, . . . , \) + +\ is the base address of an array in memory. \ is +either 0 or 1, indicating bounds-checking needs not be performed or +needs to be performed respectively. \ gives the high-level +type of a pointer to the array. Return the address resulting from +row-major order multi-dimentional indexing operation, with the indices +represented by \ onwards. Since arrays must have at least one +dimension, this opcode must have at least two operands. + +### intrinsicop + +syntax: intrinsicop \ \ (\, \..., +\) + +\ indicates an intrinsic function that has no side effect +whose return value depends only on the arguments (a pure function), and +thus can be represented as an expression opcode. + +### intrinsicopwithtype + +syntax: intrinsicopwithtype \ \ \ +(\, \..., \) + +This is the same as intrinsicop except that it takes on an additional +high level type argument specified by \. + +Control Flow Statements +----------------------- + +Program control flows can be represented by either hierarchical +statement structures or a flat list of statements. Hierarchical +statement structures are mostly derived from constructs in the source +language. Flat control flow statements correspond more closely to +processor instructions. Thus, hierarchical statements exist only in high +level MAPLE IR. They are lowered to the flat control flow statements in +the course of compilation. + +A statement block is indicated by multiple statements enclosed inside +the braces \'{\' and \'}\'. Such statement blocks can appear any where +that a statement is allowed. In hierarchical statements, nested +statements are specified by such statement blocks. Statement blocks +should only associated with hierarchical control flow statements. + +In MAPLE IR, each statement must start on a new line. The use of +semicolon is not needed, or even allowed, to indicate the end of +statements. + +### Hierarchical control flow statements + +#### doloop + +syntax: +``` + doloop (, , ) { + } +``` + +\ specifies a local integer scalar variable with no alias. +\ must be an integer expression. \ is initialized +with \. \ must be a single comparison operation +representing the termination test. The loop body is represented by +\, which specifies the list of statements to be executed as +long as \ evaluates to true. After each execution of the +loop body, \ is incremented by \ and the loop is +tested again for termination. + +#### dowhile + +syntax: +``` + dowhile { + + } () +``` +Execute the loop body represented by \, and while +\ evaluates to non-zero, continue to execute \. +Since \ is tested at the end of the loop body, the loop body +is executed at least once. + +#### foreachelem + +syntax: +``` + foreach { + } +``` + +This is an abstract loop form where \ is an array-like +variable representing a collection of uniform elements, and \ +specifies a variable whose type is the element type of +\. The loop body is represented by \, +which specifies the list of statements to be repeated for each element +of \ expressed via \. This statement will be +lowered to a more concrete loop form based on the type of +\. + +#### if + +syntax: +``` + if () { + } + else { + } +``` +If \ evaluates to non-zero, control flow passes to the +\ statements. Otherwise, control flow passes to the +\ statements. If there is no else part, \"else { +\ }\" can be omitted. + +#### while + +syntax: +``` + while () { + } +``` +This implements the while loop. While \ evaluates to +non-zero, the list of statements represented by \ are +repeatedly executed. Since \ is tested before the first +execution, the loop body may execute zero times. + +### Flat control flow statements + +#### brfalse + +syntax: brfalse \ (\) + +If \ evaluates to 0, branch to \. Otherwise, fall +through. + +#### brtrue + +syntax: brtrue \ (\) + +If \ evaluates to non-0, branch to \. Otherwise, fall +through. + +#### goto + +syntax: goto \ + +Transfer control unconditionally to \. + +#### igoto + +syntax: igoto (\) + +\ must evaluate to the address of a label. Transfer control unconditionally to the evaluated label address. + +#### multiway + +syntax: +``` + multiway () { + (): goto + (): goto + ... + (): goto } +``` + +\ must be of type convertible to an integer or a string. +Following \ is a table of pairs of expression tags and +labels. When executed, it evaluates \ and then searches the +table for a match of the evaluated value of \ with the evaluated +value of each expression tag \ in the listed order. On a match, +control is transferred to the corresponding label. If no match is found, +control is transferred to \. The evaluation of the +expression tags must not incur side effect. Depending on the resolved +type of \, this statement will be lowered to either the switch +statement or a cascade of if statements. + +#### return + +syntax: return (\, . . ., \) + +Return from the current PU with the multiple return values given by the +operands. The list of operands can be empty, which corresponds to no +return value. The types of \ must be compatible with the list of +return types according to the declaration or prototype of the PU. + +#### switch + +syntax: +``` + switch () { + : goto + : goto + ... + : goto } +``` + +\ must be of integer type. After \, it specifies +a table of pairs of constant integer values (tags) and labels. When +executed, it searches the table for a match with the evaluated value of +\ and transfers control to the corresponding label. If no match +is found, control is transferred to \. There must not be +duplicate entries for the constant integer values. It is up to the +compiler backend to decide how to actually generate code for this +statement after analyzing the tag distribution in the table. + +#### rangegoto + +syntax: +``` + rangegoto ( { + : goto + : goto + ... + : goto } +``` + +This is the lowered form of switch that explicitly designates its +execution to be handled by the jump table mechanism. \ must be +of integer type. After \ follows a table of pairs of +constant integer values and labels. In searching the table for a match +during execution, the evaluated value of \ minus \ +is used during execution so as to cause transfer of control to the +corresponding label. There must be no gap in the constant integer values +specified, and a match is guaranteed within the range of specified +constant integer values, which means the code generator can omit +generation of out-of-range checks. There must be no duplicated entries +for the constant integer values. + +#### indexgoto + +syntax: indexgoto (\ \ + +This is only generated by the compiler as a result of lowering the +switch statement. \ is the name of a compiler-generated +symbol designating a static array, or jump table, which is statically +initialized to store labels. + +Each stored label marks the code corresponding to a switch case. +Execution of this instruction uses the evaluated value of \ to +index into this array and then transfers control to the label stored at +that array element. If the evaluated value of \ is less than 0 +or exceeds the number of entries in the jump table, the behavior is +undefined. + +Call Statements +--------------- + +There are various flavors of procedure invocation. Intrinsics are +library functions known to the compiler. + +### call + +syntax: call \ (\, \..., \) + +Invoke the function while passing the parameters given by the operands. + +### callinstant + +syntax: callinstant \ \ (\, +\..., \) + +Instantiate the given generic function according to instantiation vector +\ and then invoke the function while passing the +parameters given by the operands. + +### icall + +syntax: icall (\, \, \..., \) + +Invoke the function specified indirectly by \, passing the +parameters given by \ onwards. + +### intrinsiccall + +syntax: intrinsiccall \ (\, \..., \) + +Invoke the specified intrinsic defined by the compiler while passing the +parameters given by the operands. + +### intrinsiccallwithtype + +syntax: intrinsiccallwithtype \ \ (\, \..., +\) + +This is the same as intrinsiccall except that it takes on an additional +high level type argument specified by \. + +### xintrinsiccall + +syntax: xintrinsiccall \ (\, \..., +\) + +Invoke the intrinsic specified as an index into a user-defined intrinsic +table while passing the parameters given by the operands. + +Java Call Statements +-------------------- + +The following statements are used to represent Java member function +calls that are not yet resolved. + +### virtualcall + +syntax: virtualcall \ (\, \, \..., +\) + +\ is a pointer to an instance of a class. The class +hierarchy is searched using the specified \ to find the +appropriate virtual method to invoke. The invocation will pass the +remaining operands as parameters. + +### superclasscall + +syntax: superclasscall \ (\, \, \..., +\) + +This is the same as virtualcall except it will not use the class\'s own +virtual method, but the one in its closest superclass that defines the +virtual method. + +### interfacecall + +syntax: interfacecall \ (\, \, \..., +\) + +\ is a method defined in an interface. \ is a +pointer to an instance of a class the implements the interface. The +class is searched using the \ to find the corresponding +method to invoke. The invocation will pass the remaining operands as +parameters. + +There are also virtualcallinstant, superclasscallinstant and +interfacecallinstant for calling generic versions of the methods after +instantiating with the specified instantiation vector. The instantiation +vector is specified after \, as in the callinstant +instruction. + +Calls with Return Values Assigned +--------------------------------- + +MAPLE IR supports calls with any number of return values. All the +various call operations have a corresponding higher-level abstracted +variant such that a single call operation also specify how the multiple +function return values are assigned, without relying on separate +statements to read the %%retval registers. Only assignments to local +scalar variables, fields in local aggregates or pseudo-registers are +allowed. These operations have the same names with the suffix +\"assigned\" added. They are callassigned, callinstantassigned, +icallassigned, intrinsiccallassigned, intrinsiccallwithtypeassigned, +xintrinsiccallassigned, virtualcallassigned, virtualcallinstantassigned, +superclasscallassigned, superclasscallinstantassigned, +interfacecallassigned and interfacecallinstantassigned. Only +callassigned is defined here. The same extension applies to the +definitions of the rest of these call operations. + +### callassigned + +syntax: +``` + callassigned (, ..., ) { + dassign + dassign + ... + dassign } +``` + +Invoke the function passing the parameters given by the operands. After +returning from the call, the multiple return values are assigned to the +scalar variables listed in order. If a specified field-id is not 0, then +the corresponding variable must be an aggregate, and the assignment is +to the field corresponding to the field-id. If a field-id is absent, 0 +is implied. If \ is absent, it means the corresponding return +value is ignored by the caller. If a call has no return value, no +dassign should be listed. + +In the course of compilation, these call instructions may be lowered to +use the special registers %%retval0, retval1, %%retval2, etc. to +indicate how their return values are fetched and used. These special +registers are overwritten by each call. The same special register can +assume whatever is the type of the return value. Each special register +can be read only once after each call. + +Exceptions Handling +------------------- + +Described in this section are the various exception handling constructs +and operations. The try statement marks the entrance to a try block. The +catch statement marks the entrance to a catch block. The finally +statement marks the entrance to a finally block. The endtry statement +marks the end of the composite exception handling constructs that began +with the try. In addition, there are two special types of labels. +Handler labels are placed before catch statements, and finally labels +are placed before finally statements. Handler labels are distinguished +from ordinary labels via the prefix \"\@h@\", while finally labels use +the prefix \"\@f@\". These special labels explicitly shows the +correspondence of try, catch and finally to each other in each +try-catch-finally composite, without relying on block nesting. The +special register %%thrownval contains the value being thrown, which is +the operand of the throw operation that raised the current exception. + +Since different languages have exhibit different semantics or behavior +related to the exception handling constructs, the try and catch opcodes +have different variants distinguished by their language prefices. + +### jstry + +syntax: jstry \ \ + +Executing this statement indicates entry into a Javascript try block. +\ is 0 when there is no catch block associated with the +try. \ is 0 when there is no finally block associated +with the try. Any exception thrown inside this try block will transfer +control to these labels, unless another nested try block is entered. A +finally block if present must be executed to conclude the execution of +the try composite constructs regardless of whether exception is thrown +or not. + +There are three possible scenarios based on the way the +jstry-jscatch-finally composite is written: + +1. jstry-jscatch + +2. jstry-finally + +3. jstry-jscatch-finally + +For case 1, if an exception is thrown inside the try block, control is +transferred to the handler label that marks the catch statement and the +exception is regarded as having been handled. Program flow eventually +exits the try block with a goto statement to the label that marks the +endtry statement. If no exception is thrown, the try block is eventually +exited via a goto statement to the label that marks the endtry +statement. + +For case 2, if an exception is thrown inside the try block, control is +transferred to the finally label that marks the finally statement. But +the exception is regarded as not having been handled yet, and the search +for the throw\'s upper level handler starts. If no exception is thrown +in try block, program flow eventually exits the try block with a gosub +statement to the finally block. Execution in the finally block ends with +a retsub, which returns to the try block and the try block is then +exited via a goto statement to the label that marks the endtry +statement. + +For case 3, if an exception is thrown inside the try block, control is +transferred to the handler label that marks the catch statement as in +case 1. Execution in the catch block ends with a gosub statement to the +finally block. Execution in the finally block ends with a retsub, which +returns to the catch block and the catch block is then exited via a goto +statement to the label that marks the endtry statement. If no exception +is thrown in the try block, program flow eventually exits the try block +with a gosub statement to the finally block, and execution will continue +in the finally block until it executes a retsub, at which time it +returns to the try block and the try block is then exited via a goto +statement to the label that marks the endtry statement. + +### javatry + +syntax: javatry {\ \ . . . +\} + +This is the Java flavor of try, which can have more than one catch block +but no finally block. The entry labels for the catch blocks are listed +in order and enclosed in braces. + +### cpptry + +syntax: cpptry {\ \ . . . +\} + +This is the C++ flavor of try, which can have more than one catch block +but no finally block. The entry labels for the catch blocks are listed +in order and enclosed in braces. + +### throw + +syntax: throw (\) + +Raise a user-defined exception with the given exception value. If this +statement is nested within a try block, control is then transferred to +the label associated with the try, which is either a catch statement or +a finally statement. If this throw statement is nested within a catch +block, control is first transferred to the finally associated with the +catch if any, in which case the finally block will be executed. After it +finishes executing the finally block, the search for the throw\'s upper +level handler starts. If this throw statement is nested within a finally +block, the search for the throw\'s upper level handler starts right +away. If the throw is not nested inside any try block within a function, +the system will look for the first enclosing try block by unwinding the +call stack. If no try block is found, the program will terminate. Inside +the catch block, the thrown exception value can be accessed using the +special register %%thrownval. + +### jscatch + +syntax: \ jscatch + +This marks the start of the catch block associated with a try in +Javascript. The try block associated with this catch block is regarded +to have been exited and the exception is regarded as being handled. If +no exception is thrown inside the catch block, exit from the catch block +is effected by either a gosub statement to a finally label, if there is +a finally block, or a goto statement to endtry. + +### javacatch + +syntax: \ javacatch {\ \ \... \} + +This marks the start of a catch block in Java. The possible types of +thrown value that match this catch block are listed. If none of the +specified types corresponds to the type of the thrown value, control +will pass to the next javacatch. + +### cppcatch + +syntax: \ cppcatch \ + +This marks the start of a catch block in C++, in which each catch block +can only be matched by one type of thrown value. If specified type does +not correspond to the type of the thrown value, control will pass to the +next cppcatch. + +### finally + +syntax: \ finally + +This marks the start of the finally block in Javascript. The finally +block can be entered either via the execution of a gosub statement, or a +throw statement the finally\'s corresponding try block that has no catch +block, or a throw statement in the finally\'s corresponding catch block. +The exit from the finally block can be effected by the execution of +either a retsub or a throw statement in the finally block. If the exit +is via a retsub and if there is outstanding throw yet to be handled, the +search for the throw\'s upper level handler continues. + +### cleanuptry + +syntax: cleanuptry + +This statement is generated in situations where the control is going to +leave the try-catch-finally composite prematurely via jumps unrelated to +exception handling. This statement effects the cleanup work related to +exception handling for the current try-catch-finally composite in +Javascript. + +### endtry + +syntax: \ endtry + +This marks either the end of each try-catch-finally composite or the end +of each javatry block. + +### gosub + +syntax: gosub \ + +Transfer control to the finally block marked by \. This +also has the effect of exiting the try block or catch block which this +statement belongs. It is like a goto, except that the next instruction +is saved. Execution will transfer back to the next instruction when a +retsub statement is executed. This can also be thought of as a call, +except it uses label name instead of function name, and no passing of +parameter or return value is implied. This opcode is only generated from +Javascript. + +### retsub + +syntax: retsub + +This must only occur as the last instruction inside a finally block. If +there is no outstanding throw, control is transferred back to the +instruction following the last gosub executed. Otherwise the search for +the upper level exception handler continues. This opcode is only +generated from Javascript. + +Memory Allocation and Deallocation +---------------------------------- + +The following instructions are related to the allocation and +de-allocation of dynamic memory during program execution. The +instructions with \"gc\" as prefix are associated with languages with +managed runtime environments. + +### alloca + +syntax: alloca \ (\) + +This returns a pointer to the block of uninitialized memory allocated by +adjusting the function stack pointer %%SP, with size in bytes given by +\. This instruction must only appear as the right hand side of +an assignment operation. + +### decref + +syntax: decref (\) + +\ must be a dread or iread of a pointer that refers to an object +allocated in the run-time-managed part of the heap. It decrements the +reference count of the pointed-to object by 1. \ must be of +primitive type ref. + +### decrefreset + +syntax: decrefreset(\) + +\ must be the address of a pointer that refers to an object +allocated in the run-time-managed part of the heap. It decrements the +reference count of the pointed-to object by 1, and then reset the value +of the pointer to null by zeroing its memory location. + +### free + +syntax: free (\) + +The block of memory pointed to by \ is released so it can be +re-allocated by the system for other uses. + +### gcmalloc + +syntax: gcmalloc \ \ + +This requests the memory manager to allocate an object of type \ +with associated meta-data according to the requirements of the managed +runtime. The size of the object must be fixed at compilation time. As +this returns the pointer to the allocated block, this instruction must +only appear as the right hand side of an assignment operation. + +The managed runtime is responsible for its eventual deallocation. + +### gcmallocjarray + +syntax: gcmallocjarray \ \ +(\) + +This requests the memory manager to allocate a java array object as +given by \. The allocated storage must be large enough +to store the number of array elements as given by \. As this +returns the pointer to the allocated block, this instruction must only +appear as the right hand side of an assignment operation. + +The managed runtime is responsible for its eventual deallocation, and +the block size must remain fixed during its life time. + +### gcpermalloc + +syntax: gcpermalloc \ \ + +This requests the memory manager to allocate an object of type \ +in the permanent area of the heap which is not subject to deallocation. +The size of the object must be fixed at compilation time. As this +returns the pointer to the allocated block, this instruction must only +appear as the right hand side of an assignment operation + +### incref + +syntax: incref (\) + +\ must be a dread or iread of a pointer that refers to an object +allocated in the run-time managed part of the heap. It increments the +reference count of the pointed-to object\'s by 1. \ must be of +primitive type ref. + +### malloc + +syntax: malloc \ (\) + +This requests the system to allocate a block of uninitialized memory +with size in bytes given by \. As this returns the pointer to +the allocated block, this instruction must only appear as the right hand +side of an assignment operation. The block of memory remains unavailable +for re-use until it is explicitly freed via the free instruction. + +### stackmalloc + +syntax: stackmalloc \ \ + +This allocates an object of type \ on the function stack frame by +decrementing %%SP. The size of the object must be fixed at compilation +time. As this returns the pointer to the allocated block, this +instruction must only appear as the right hand side of an assignment +operation. + +### stackmallocjarray + +syntax: stackmallocjarray \ \ +(\) + +This allocates a java array object as given by \ on +the function stack frame by decrementing %%SP. The allocated storage +must be large enough to store the number of array elements as given by +\. As this returns the pointer to the allocated block, this +instruction must only appear as the right hand side of an assignment +operation. + +Other Statements +---------------- + +### assertge + +syntax: assertge (\, \) + +Raise an exception if \ is not greater than or equal to +\. This is used for checking if an array index is within range +during execution. \ and \ must be of the same type. + +### assertlt + +syntax: assertlt (\, \) + +Raise an exception if \ is not less than \. This is used +for checking if an array index is within range during execution. +\ and \ must be of the same type. + +### assertnonnull + +syntax: assertnonnull (\) + +Raise an exception if \ is a null pointer, corresponding to the +value 0. + +### eval + +syntax: eval (\) + +\ is evaluated but the result is thrown away. If \ +contains volatile references, this statement cannot be optimized away. + +### checkpoint + +syntax: checkpoint \ + +This instruction serves as a check point, such that when execution +reaches this instruction, it will trigger the indicated action. + +### membaracquire + +syntax: membaracquire + +This instruction acts as both a Load-to-Load barrier and a Load-to-Store +barrier: the order between any load instruction before it and any load +instruction after it must be strictly followed, and the order between +any load instruction before it and any store instruction after it must +be strictly followed. + +### membarrelease + +syntax: membarrelease + +This instruction acts as both a Load-to-Store barrier and a +Store-to-Store barrier: the order between any load instruction before it +and any store instruction after it must be strictly followed, and the +order between any store instruction before it and any store instruction +after it must be strictly followed. + +### membarfull + +syntax: membarfull + +This instruction acts as a barrier to any load or store instruction +before it and any load or store instruction after it. + +### syncenter + +syntax: syncenter (\) + +This instruction indicates entry to a region where the object pointed to +by the pointer \ needs to be synchronized for Java +multi-threading. This means at any time, there cannot be more than one +thread executing in a synchronized region of the same object. Any other +thread attempting to enter a synchronized region of the same object will +be blocked. For the compiler, it implies a barrier to the backward +movement (against the flow of control) of any operation that accesses +the object. + +### syncexit + +syntax: syncexit (\) + +This instruction indicates exit from a region where the object pointed +to by the pointer \ needs to be synchronized for Java +multi-threading. For the compiler, it implies a barrier to the forward +movement (along the flow of control) of any operation that accesses the +object. + +Declaration Specification +========================= + +In this section, we describes the various kinds of statements in the +declaration part of Maple IR. Internally, they are represented by data +structures organized into different kinds of tables. + +Type declarations can be huge and they are often shared by different +modules, MAPLE IR provides the *import* facility to avoid duplicating +type declarations in each MAPLE IR file. MAPLE IR files that store only +type information have .mplt as file suffix. They can then be imported +into each MAPLE IR files that need them via the **import** statement. + +Module Declaration +------------------ + +Each Maple IR file represents a program module, also called compilation +unit, that consists of various declarations at the global scope. The +following directives appear at the start of the Maple IR file and +provide information about the module: + +### entryfunc + +syntax: entryfunc \ + +This gives the name of the function defined in the module that will +serve as the single entry point for the module. + +### flavor + +syntax: flavor \ + +The IR flavor gives information as to how the IR was produced, which in +turn indicates the state of the compilation process. + +### globalmemmap + +syntax: globalmemmap = \[ \ \] + +This specifies the static initialization values of the global memory +block as a list of space-separated 32-bit integer constants. The amount +of initializations should correspond to the memory size given by +globalmemsize. + +### globalmemsize + +syntax: globalmemsize \ + +This gives the size of the global memory block for storing all global +static variables. + +### globalwordstypetagged + +syntax: globalwordstypetagged = \[ \ \] + +This specifies a bitvector initialized to the value specified by the +list of space-separated 32-bit integer constants. The Nth bit in this +bitvector is set to 1 if the Nth word in globalmemmap has a type tag, in +which case the type tag is at the (N+1)th word. + +### globalwordsrefcounted + +syntax: globalwordsrefcounted = \[ \ \] + +This specifies a bitvector initialized to the value specified by the +list of space-separated 32-bit integer constants. The Nth bit in this +bitvector is set to 1 if the Nth word in globalmemmap is a pointer to a +reference-counted dynamically allocated memory block. + +### id + +syntax: id \ + +This gives the unique module id assigned to the module. This id enables +the Maple virtual machine to handle the execution of program code +originating from multiple Maple IR modules. + +### import + +syntax: import \"\\" + +\ is the path name for a MAPLE IR file with suffix .mplt that +only stores type declarations. The contents of this file are imported. +This allows the same type declarations to be shared across multiple +files, and large volumes of type declarations to be organized by files. +Only one level of import is allowed, as .mplt files are not allowed to +have import statements. + +### importpath + +syntax: importpath \"\\" + +This specifies a directory path for the compiler to look for the +imported MAPLE IR files required to complete the compilation. This is +used only in the early compilation phases, before all types and symbol +names have been fully resolved. Each appearance only specifies one +specific path. + +### numfuncs + +syntax: numfuncs \ + +This gives the number of function definitions in the module, excluding +function prototypes. + +### srclang + +syntax: srclang \ + +This gives the source language that produces the Maple IR module. + +Variable Declaration +-------------------- + +syntax: var \ \ \ \ + +The keyword \'var\' designates a declaration statement for a variable. +\ specifies its name, prefixed by \'\$\' or \'%\' based on +whether its scope is global or local respectively. \ is +optional and can be extern, fstatic or pstatic. \ is the type +specification. \ is optional and specifies additional +attributes like volatile, const, alignment, etc. Examples: + +var \$x extern f32 volatile static + +syntax: tempvar \ \ \ +\ + +This is the same as **var** except that it conveys the additional +information that it is a compiler-generated temporary. + +Pseudo-register Declarations +---------------------------- + +syntax: reg \ \ + +The keyword \'reg\' designates a declaration statement for a +pseudo-register. \ specifies the pseudo-register prefixed by +\'%\'. \ is the high level type information. If a pseudo-register +is only of primitive type, its declaration is optional. This statement +is only allowed inside functions as pseudo-registers are of local scope. + +Type Specification +------------------ + +Types are either primitive or derived. Derived types are specified using +C-like tokens, except that the specification is always +right-associative, following a strictly left to right order. Derived +types are distinguished from primitive types by being enclosed in +angular brackets. Derived types can also be thought of as high-level +types. Examples: +``` + var %p <* i32> # pointer to a 32-bit integer + + var %a <[10] i32> # an array of 10 32-bit integers +``` +var %foo \ \# a pointer to a function that takes one i32 +parameter and returns an i32 value (func is a keyword) + +Additional nested angular brackets are not required, as there is no +ambiguity due to the right-associative rule. But the nested angular +brackets can be optionally inserted to aid readability. Thus, the +following two examples are equivalent: +``` + var %q <* <[10] i32>> # pointer to an array of 10 32-bit integers + + var %q <* [10] i32> # pointer to an array of 10 32-bit integers +``` +Inside a struct declaration, field names are prefixed by @. Though label +names also use @ as prefix, there is no ambiguity due to the usage +context between struct field declaration and label usage being distinct. +Example: +``` + var %s # a bitfield of 3 bits +``` +A union declaration has the same syntax as struct. In a union, all the +fields overlap with each other. + +The last field of a struct can be a flexible array member along the line +of the C99 standard, which is an array with variable number of elements. +It is specified with empty square brackets, as in: +``` + var %p <* struct{@f1 i32, + @f2 <[] u16>}> # a flexible array member with unsigned 16-bit integers as elements +``` +Structs with flexible array member as its last field can only be +dynamically allocated. Its actual size is fixed only at its allocation +during execution time, and cannot change during its life time. A struct +with flexible array member cannot be nested inside another aggregate. +During compilation, the flexible array member is regarded as having size +zero. + +Because its use is usually associated with managed runtime, a language +processor may introduce additional meta-data associated with the array. +In particular, there must be some language-dependent scheme to keep +track of the size of the array during execution time. + +When a type needs to be qualified by additional attributes for const, +volatile, restrict and various alignments, they follow the type that +they qualify. These attributes are not regarded as part of the type. If +these attributes are applied to a derived type, they must follow outside +the type angular brackets. Examples: +``` + var %x f64 volatile align(16) # %s is a f64 value that is volatile and + # aligned on 16 byte boundary + var %p <* f32> const volatile # %p is a pointer to a f32 value, and + # %p itself is const and volatile +``` +Alignments are specified in units of bytes and must be power of 2. +Alignment attributes must only be used to increase the natural +alignments of the types, to make the alignments more stringent. For +decreasing alignments, the generator of MAPLE IR must use smaller types +to achieve the effect of packing instead of relying on the align +attribute. + +### Incomplete Type Specification + +Languages like Java allow contents of any object to be referenced +without full definition of the object being available. Their full +contents are to be resolved from additional input files in later +compilation phases. MAPLE IR allows structs, classes and interfaces to +be declared incompletely so their specific contents can be referenced. +Instead of the struct, class and interface keywords, structincomplete, +classincomplete and interfaceincomplete should be used instead. + +Type Declaration +---------------- + +The purpose of type declaration is to associate type names with types so +they can be referred to via their names, thus avoiding repeated +specification of the details of the types. + +syntax: type \ \ + +Type names are also prefixed with either \'\$\' or \'%\' based on +whether the scope is global or local. Example: +``` + type $i32ptr <* i32> # the type $i32ptr is a pointer to i32 +``` +Primitive types are not allowed to be given a different type name. + +Attributes are not allowed in type declaration. + +Once a type name is defined, specifying the type name is equivalent to +specifying the derived type that it stands for. Thus, the use of a type +name should always be enclosed in angular brackets. + +Java Class and Interface Declaration +------------------------------------ + +A Java class designates more than a type, because the class name also +carries the attributes declared for the class. Thus, we support +declaration of Java classes via: + +syntax: javaclass \ \ \ + +\ must have \'\$\' as prefix as class names always have global +scope. For example: +``` + # a java class named "Puppy" with a single field "color" and attributes public and final + javaclass $Puppy public final +``` +A **javaclass** name should not be regarded as a type name as it +contains additional attribute information. It cannot be enclosed in +angular brackets as it cannot be referred to as a type. + +A java interface has the same form as the class type, being able to +extend another interface, but unlike class, an interface can extend +multiple interfaces. Another difference from class is that an interface +cannot be instantiated. Without instantiation, the data fields in +interfaces are always allocated statically. For example, +``` + interface <$interfaceA> { # this interface extends interfaceA + @s1 int32, # data fields inside interfaces always statically allocated + &method1(int32) f32 } # a method declaration +``` +MAPLE IR handles an interface declaration as a type declaration. Thus, +the above can be specified after the type keyword to be associated with +a type name. Separately, the **javainterface** keyword declares the +symbol associated with the interface: + +syntax: javainterface \ \ \ + +\ must have \'\$\' as prefix as interface names always have +global scope. For example: +``` + # $IFA is an interface with a single method &amethod + javainterface $IFA public static +``` +Again, a **javainterface** name should not be regarded as a type name, +as it is a symbol name. When a class implements an interface, it +specifies the **javainterface** name as part of its comma-separated +contents, as in: +``` + class <$PP> { &amethod(void) int32, # this class extends the $PP class, and + # &amethod is a member function of this class + $IFA } # this class implements the $IFA interface +``` +Function Declaration +-------------------- + +syntax: +``` + func ( + var , + ... + var ) , . . . { + + ... + } + +``` +\ provides various attributes about the function, like +static, extern, etc. The opening parentheses starts the parameter +declarations, which can be empty. Each \ is of the form of a +**var** or **reg** declaration declaring each incoming parameter. If the +last parameter is specified as \"\...\", it indicates the start of the +variable part of the arguments as in C. Following the parameter +declarations is a list of the multiple return types separated by commas. +If there is no return value, \ should specify void. Each +\ can be either primitive or derived. If no statement +follows the parentheses, then it is just a prototype declaration. + +### funcsize + +syntax: funcsize \ + +This directive appears inside the function block to give the Maple IR +code size of the function. + +### framesize + +syntax: framesize \ + +This directive appears inside the function block to give the stack frame +size of the function. + +### moduleid + +syntax: moduleid \ + +This directive appears inside the function to give the unique id of the +module which the function belongs to. + +### upformalsize + +syntax: upformalsize \ + +This directive appears inside the function block to give the size of +upformal segment that stores the formal parameters being passed above +the frame pointer %%FP. + +### formalwordstypetagged + +syntax: formalwordstypetagged = \[ \ \] + +This specifies a bitvector initialized to the value specified by the +list of space-separated 32-bit integer constants. The Nth bit in this +bitvector is set to 1 if the Nth word in the upformal segment has a type +tag, in which case the type tag is at the (N+1)th word. + +### formalwordsrefcounted + +syntax: formalwordsrefcounted = \[ \ \] + +This specifies a bitvector initialized to the value specified by the +list of space-separated 32-bit integer constants. The Nth bit in this +bitvector is set to 1 if the Nth word in the upformal segment is a +pointer to a reference-counted dynamically allocated memory block. + +### localwordstypetagged + +syntax: localwordstypetagged = \[ \ \] + +This specifies a bitvector initialized to the value specified by the +list of space-separated 32-bit integer constants. The Nth bit in this +bitvector is set to 1 if the -Nth word in the local stack frame has a +type tag, in which case the type tag is at the (-N+1)th word. + +### localwordsrefcounted + +syntax: localwordsrefcounted = \[ \ \] + +This specifies a bitvector initialized to the value specified by the +list of space-separated 32-bit integer constants. The Nth bit in this +bitvector is set to 1 if the -Nth word in the local stack frame is a +pointer to a reference-counted dynamically allocated memory block. + +Initializations +--------------- + +When there are initializations associated with a **var** declaration, +there is \'=\' after the **var** declaration, followed by the +initialization value. For aggregates, the list of initialization values +are enclosed by brackets \'\[\' and \'\]\', with the values separated by +commas. In arrays, the initialization values for the array elements are +listed one by one, and nested brackets must be used to correspond to +elements in each lower-order dimension. + +In specifying the initializations for a struct, inside the brackets, +field number followed by \'=\' must be used to specify the value for each +field explicitly. The fields\' initialization values can be listed in +arbitrary order. For nested structs, nested brackets must be used according +to the nesting relationship. Because a bracket is used for each sub-struct in +the nesting, the field number usage is relative to the sub-struct, and starts +at 1 for the first field of the sub-struct. Example: +``` + type %SS + var %s struct{@f1 i32, + @f2 <%SS>, + @f3:4 i32} = [ + 1 = 99, # field f1 is initialized to 99 + 2 = [1= 10.0f, 2=22.2f], + # fields f2.g1 and f2.g2 initialized to + # 10.0f and 22.2f respectively + 3 = 15 ] # field f3 (4 bits in size) has field number 3 in + # struct %s and is initialized to 15 +``` + Type Parameters +--------------- + +Also called generics or templates, type parameters allow derived types +and functions to be written without specifying the exact types in parts +of their contents. The type parameters can be instantiated to different +specific types later, thus enabling the code to be more widely +applicable, promoting software re-use. + +Type parameters and their instantiation can be handled completely by the +language front-ends. MAPLE IR provides representation for generic types +and generic functions and their instantiation so as to reduce the amount +of work in the language front-ends. A MAPLE IR file with type parameters +requires a front-end lowering phase to de-genericize the IR before the +rest of the MAPLE IR components can process the code. + +Type parameters are symbol names prefixed with \"!\", and can appear +anywhere that a type can appear. Each type or function definition can +have multiple type parameters, and each type parameter can appear more +than one time. Since type parameters are also types, they can only +appear inside the angular brackets \"\<\" and \"\>\", e.g. \. When +the definition of a derived type contains any type parameter, the type +becomes a generic type. When the definition of a function contains any +type parameter, the function becomes a generic function. A function +prototype cannot contain generic type. + +A generic type or generic function is marked with the generic attribute +to make them easier to be identified. A generic type or function is +instantiated by assigning specific non-generic types to each of its type +parameters. The instantiation is specified by a list of such assignments +separated by commas. We refer to this as an instantiation vector, which +is specified inside braces \"{\" and \"}\". In the case of the +instantiation of a generic type, the type parameter is immediately +followed by the instantiation vector. Example: +``` + type $apair , @f2 }> # $apair is a generic type + + var $x <$apair{!T=f32}> # the type of $x is $apair instantiated with + # f32 being assigned to the type parameter !T +``` +A generic function is instantiated by invoking it with an instantiation +vector. The instantiation vector immediately follows the name of the +generic function. Since the instantiation vector is regarded as type +information, it is further enclosed inside the angular brackets \"\<\" +and \"\>\". Invocation of generic functions must be via the opcodes +callinstant and callinstantassigned, which correspond to call and +callassigned respectively. Example: +``` + # &swap is a generic function to swap two parameters + func &swap (var %x \, var %y \) void { + var %z \ + dassign %z (dread agg %x) + dassiign %x (dread agg %y) + dassign %y (dread agg %z) + return + } + + ... + + # &swap is instantiated with type argument <$apair{!T=i32}>, + # itself an instantiated type + callinstant &swap<{!UU=<$apair{!T=i32}>}> ( + dread agg %a, + dread agg %b) +``` diff --git a/doc/en/NaiveRcInsertionDescription.md b/doc/en/NaiveRcInsertionDescription.md new file mode 100644 index 0000000000000000000000000000000000000000..f3d19b343de320220a26244510027991f6ee377e --- /dev/null +++ b/doc/en/NaiveRcInsertionDescription.md @@ -0,0 +1,175 @@ +# Naive RC Insertion Principle + +Reference Counting (RC) is a programming technique of storing the number of references to a resource, such as an object, a block of memory, disk space, and others, and releasing the resource when the number of references becomes 0. RC is used to achieve automatic resource management. RC also refers to a garbage collection algorithm that deallocates objects which are no longer referenced. Naive RC is a simplified RC. + + +Principle +====================== + +- RC sources: + + - Heap reference (other objects or itself) + + - Stack-based reference (including registers) + + - Static variable and global variable + +- RC Insertion rules (compiler and runtime): + + - If a value is assigned to a field, RC for the new object to which the field points is incremented and RC for the original object is decremented. + + - To read a local variable (including registers) on the stack, RC for the object to be read is incremented. + + - After last use, RC for the local variable is decremented. + + - If an object is returned, RC is incremented. After last use, RC for the compensated local variable is decremented. + +- Example + + - Before insertion + + ```cpp + class A { + static Object static_field; + Object instance_field; + A() { + static_field = new Object(); + } + } + Object foo(){ + A a = new A(); + bar(a, new Object()) + return a.instance_field; + } + void bar(A a, Object o) { + a.instance_field = o; + } + ``` + + - After insertion + + ```cpp + class A { + A() { + local_var t = new Object(); // t is a temporary variable assigned to the static_field process. + old = static_field; + static_field = t; + IncRef(t); DecRef(old); // RC is updated on the heap. + The DecRef(t); // The function exits and RC on the stack is released. + } + } + Object foo(){ + A a = new A(); + bar(a, new Object()); + locl_var t = a.instance_field; + IncRef(t) // RC for the variable on the stack is incremented. + The IncRef(t) // The function returns and RC for the returned value is incremented. + The DecRef(a) // The function exits and RC on the stack is released. + The DecRef(t) // The function exits and RC on the stack is released. + return t; + } + void bar(A a, Object o) { + old = a.instance_field + a.instance_field = o; + IncRef(o); DecRef(old); + } + ``` + + +- RC intrinsics: + + - Basic function + Method: The basic function uses CreateIntrinsicCallMeStmt in IrMap to create the IntrinsiccallMeStmt statement and insert it to the position where RC needs increment or decrement. + + - INTRN_MCCIncRef + + - INTRN_MCCDecRef + + - Load/Write function + Method: The Write function uses CreateIntrinsicCallMeStmt in IrMap to create the IntrinsiccallMeStmt statement and replace the iassign statement whose lvalue is static, global, or volatile. The Load function uses CreateIntrinsicCallAssignedMeStmt to create the IntrinsiccallMeStmt statement and replace the dassign statement whose rvalue is static, global, or volatile. The Load/Write function supports the IncRef operation. + + - INTRN_MCCLoadRef + + - INTRN_MCCLoadRefS + + - INTRN_MCCLoadRefVol + + - INTRN_MCCLoadRefSVol + + - INTRN_MCCWrite + + - INTRN_MCCWriteS + + - INTRN_MCCWriteVol + + - INTRN_MCCWriteSVol + +RefVar IncRef Processing Rules: +======================== + +- Assignment statement processing: + + - Process the statement based on the lvalue (variable value in the assignment statement) and rvalue (referenced value in the assignment statement) expressions. + + - First, process the rvalue. For example, select a load interface, or the rvalue (New, Call) that does not need RC increment. + + - Set Global to INTRN_MCCLoadRef. + + - Set Static to INTRN_MCCLoadRefS. + + - Set Volatile to INTRN_MCCLoadRefVol. + + - Then process the lvalue. For example, select a write interface, or save the old value or not. + + - Set Global to INTRN_MCCWriteRef. + + - Set Static to INTRN_MCCWriteRefS. + + - Set Volatile to INTRN_MCCWriteRefVol. + +- Return value processing: + + - Increment RC for the return value. + +- Local variable processing: + + - Decrement RC before the current function exits (normal or abnormal) + +Rclowering Processing Procedure: +==================== + +- Set the rclowering processing flag. +- Mark localrefvar. + +- Rclowering pre-processing + + - Mark variables that need RC. + + - Mark the lvalue as DecRef, that is, variable value in the assignment statement. + + - Ref variable + + - Mark the rvalue as IncRef, that is, referenced or return value in the assignment statement. + + - Return value + + - Ref variable + + - hrow Value register + + - Clear stack variables. + +- Rclowering processing + + - Process the assignment statement that contains the Ref variable. + + - DecRef original value + + - IncRef new value. For details, see RefVar IncRef processing rules. + +- Rclowering post-processing + + - Perform the IncRef operation on the parameter at the function entry and mark the parameter as LocalRefVar. + + - Process the return value of the function. If the attribute is LocalRefVar, then the InRef operation is performed. If not, see RefVar IncRef processing rules. + diff --git a/doc/en/ProgrammingSpecifications.md b/doc/en/ProgrammingSpecifications.md new file mode 100644 index 0000000000000000000000000000000000000000..be602470dc602a958c8fab045b8d849be108e80e --- /dev/null +++ b/doc/en/ProgrammingSpecifications.md @@ -0,0 +1,3077 @@ + + Ark Compiler C++ Coding Style Guide  + + + + + + + + + + + +| Chapter | Content | +| ------------------ | ---------------------------------------- | +| [0 About This Document](#c0) | [Purpose](#c0-1) [Key Points](#c0-2) [Conventions](#c0-3) [Exceptions](#c0-4) | +| [1 Principles](#c1) | [Principles of Good Code](#c1-1) [Class and Function Design Guidelines](#c1-2) [Follow C++ ISO Standards](#c1-4)
[Check Errors During Compilation](#c1-5) [Use Namespaces for Scoping](#c1-6) [Use C++ Features over C Features](#c1-7) | +| [2 Naming](#c2) | [General Naming Rules](#c2-1) [File Names](#c2-2) [Function Names](#c2-3) [Type Names](#c2-4) [Variable Names](#c2-5) [Macro, Constant, and Enumeration Names](#c2-6) | +| [3 Formatting](#c3) | [Line Length](#c3-1) [Indentation](#c3-2) [Braces](#c3-3) [Function Declarations and Definitions](#c3-4) [Function Calls](#c3-5) [if Statements](#c3-6) [Loop Statements](#c3-7) [Switch Statements](#c3-8) [Expressions](#c3-9) [Variable Assignment](#c3-10)
[Initialization](#c3-11) [Pointers and References](#c3-12) [Preprocessor Directives](#c3-13) [Whitespace](#c3-14) [Classes](#c3-15) | +| [4 Comments](#c4) | [Comment Style](#c4-1) [File Header Comments](#c4-2) [Function Header Comments](#c4-3) [Code Comments](#c4-4) | +| [5 Header Files](#c5) | [Header File Responsibility](#c5-1) [Header File Dependency](#c5-2) | +| [6 Scopes](#c6) | [Namespaces](#c6-1) [Global Functions and Static Member Functions](#c6-2) [Global Variables](#c6-3) [Global Constants and Static Member Constants](#c6-4) | +| [7 Classes](#c7) | [Constructors, Copy/Move Constructors, Copy/Move Assignment Operators, and Destructors](#c7-1) [Inheritance](#c7-2) [Multiple Inheritance](#c7-3) [Overloading](#c7-4) | +| [8 Functions](#c8) | [Function Design](#c8-1) [Inline Functions](#c8-2) [Function Parameters](#c8-3) | +| [9 Other C++ Features](#c9) | [Constants and Initialization](#c9-1) [Expressions](#c9-2) [Type Casting](#c9-3) [Resource Allocation and Release](#c9-4) [Standard Template Library](#c9-5) [Usage of const](#c9-6) [Templates](#c9-7) [Macros](#c9-8) [Others](#c9-9)| +| [10 Modern C++ Features](#c10) | [Code Simplicity and Security Improvement](#c10-1) [Smart Pointers](#c10-2) [Lambda](#c10-3) [Interfaces](#c10-4) | +| [11 Security Coding Specifications](#c11) | [Basic Principles](#c11-1) [Variables](#c11-2) [Assertions](#c11-3) [Exception Mechanisms](#c11-4) [Memory](#c11-5) [Dangerous Functions](#c11-6) | + +# 0 About This Document + +## Purpose + +Rules are not perfect. Disabling useful features in specific situations may affect implementation. However, the rules are formulated "to benefit most programmers". If a rule is found unhelpful or difficult to follow during team coding, please send feedback so we can improve it accordingly. + +Before referring to this guide, you are expected to have the following basic capabilities for C++. It is not for a beginner that wants to learn about C++. +1. Have a general knowledge of ISO standards for C++. +2. Be familiar with the basic features of C++, including those of C++ 03/11/14/17. +3. Have a general knowledge of the C++ Standard Library. + + +## Key Points +1. C++ programming style, such as naming and typesetting. +2. C++ modular design, including how to design header files, classes, interfaces, and functions. +3. Best practices of C++ features, including constants, type casting, resource management, and templates. +4. Best practices of modern C++, including conventions that can improve code maintainability and reliability in C++ 11/14/17. + + +## Conventions +**Rule**: a regulating principle that must be followed during programming. + +**Recommendation**: a guideline that must be considered during programming. + +This document is applicable to standard C++ versions (03/11/14/17) unless otherwise specified in the rule. + +## Exceptions +It is necessary to understand the reason for each 'rule' or 'recommendation' and to try and comply with them. +However, some rules and recommendations have exceptions. + +The only acceptable exceptions are those that do not violate the general principles and provide appropriate reasons for the exception. +Try to avoid exceptions because they affect the code consistency. Exceptions to 'Rules' should be very rare. + +The style consistency principle is preferred in the following cases: +**When you modify external open source or third-party code, the existing code specifications prevail.** +**For specific domains, the domain specifications prevail.** + +# 1 Principles + +## Principles of Good Code +We refer to Kent Beck's four rules of simple design to guide our coding and to identify good code. +1. Passes its tests +2. Minimizes duplication +3. Maximizes clarity +4. Has fewer elements +5. The importance of the preceding four rules decreases in sequence. + They are referred to as rules of simple design. + +The first point is the most important as it stresses external requirements. The second point refers to the modular design of code to ensure orthogonality and maintainability. The third point refers to code readability. The fourth point is code simplicity. Of course, we still emphasize expressiveness over simplicity. + +## Class and Function Design Guidelines +C++ is a typical object-oriented programming (OOP) language. The software engineering industry has developed many OOP principles to guide programmers in compiling large-scale, highly scalable, and maintainable code: +- Basic rule of high cohesion and low coupling: improves the reuse and portability of program modules. +- SOLID principles: consists of single responsibility, open–closed, Liskov substitution, interface segregation, and dependency inversion. The SOLID principles can make the program less coupled and more robust. +- Law of Demeter: reduces coupling between classes. +- "Tell, Don't ask": suggests that it is better to issue an object a command to perform some operation or logic, rather than to query its state and then take some action as a result. +- Composition/Aggregation Principle (CARP): favors composition/aggregation over class inheritance. + +## Follow C++ ISO Standards +It is hoped that C++ code is written using features compliant with ISO standards. Features that are not defined by ISO or those used in compilers must be used with caution. Extended features provided by compilers such as GCC must be used with caution too, because these features lead to poor portability. + +Note: If extended features are required by the product, encapsulate these features into independent interfaces and enable or disable these features through options on the interface. Develop programming manuals to instruct programmers on the use of these extended features. + +## Check Errors During Compilation +Use compilers to ensure code robustness instead of compiling error processing codes to handle exceptions. + +- Use const to ensure data consistency and prevent data from being modified unexpectedly. +- Run the static_assert command to check errors at compilation time. + +## Use Namespaces for Scoping +Global variables, global constants, and global type definitions belong to the global scope. Conflicts may occur when a third-party library is used in a project. + +Namespaces divide a scope into independent, name-specified scopes that can effectively prevent name conflicts within the global scope. +1. Classes and structs have their own scopes. +2. A named namespace can implement an upper-level scope, higher than a class scope. +3. Unnamed namespaces and the static keyword can be used to implement a file scope. + +We strongly recommend programmers not use global macro variables and functions, and instead place them inside a more restrictive scope. + +Scopes have the following disadvantages: +1. Although two types of the same name can be distinguished in different scopes, they are still confusing to readers. +2. An inline namespace allows its members to be treated as if they are members of the enclosing namespace, which is also confusing to readers. +3. A nested namespace definition can make names lengthy when the namespace needs to be referenced. + +Therefore, we recommended: +- For variables, constants, and type definitions, use namespaces as much as possible to reduce conflicts within the global scope. +- Do not use "using namespace" in header files. +- Do not use inline namespaces. +- Encapsulate definitions using unnamed namespaces or the static keyword in .cpp files to prevent leaking through APIs. + + +## Use C++ Features over C Features +C++ is more type safe and more abstract than C. It is recommended that you use C++ features for programming. For example, use strings instead of `char*`, use vectors instead of native arrays, and use namespaces instead of statically defined members. + + +# 2 Naming +## General Naming Rules +General naming styles include the following: +__CamelCase__ +CamelCase is the practice of writing compound words or phrases so that each word or abbreviation in the phrase begins with a capital letter, with no intervening spaces or punctuation. +There are two conventions: UpperCamelCase and lowerCamelCase. + +__Kernel Style (Unix-like)__ +Words are in lowercase and are separated with underscores (_). +'test_result' + +__Hungarian Style__ +Add a prefix to UpperCamelCase. The prefix indicates the type or usage. +'uiSavedCount ', or ' bTested' + +### Rule 2.1.1 Use the CamelCase style for identifier names. +The Hungarian style is not considered for identifier names, and we choose the CamelCase style over the Kernel style. + +| Type | Naming Style | +| ------------------------------------------------------------ | ---------------------------------------- | +| Class Type, Struct Type, Enumeration Type, and Union Type Definitions | UpperCamelCase | +| Functions (Including Global Functions, Scope Functions, and Member Functions) | UpperCamelCase (You can add a prefix to an interface. XXX_FunctionName) | +| Global Variables (Including Variables of the Global and Namespace Scopes, Namespace Variables, and Class Static Variables), Local Variables, Function Parameters, and Class, Struct, and Union Member Variables | lowerCamelCase | +| Constant, Enumerated Value | k+CamelCase | +| Macro | All caps, separated with underscores (_) | +| Namespace | All in lowercase | + +Note: +__Constant__ indicates the variables of the basic, enumeration, or character string type modified by const or constexpr in the global scope, the namespace scope, and the scope of a static member of a class. +__Variable__ indicates the variables excluding those defined in Constant. These variables use the lowerCamelCase style. + +## File Names +### Recommendation 2.2.1 Use .cpp as the C++ file name extension and .h as the header file name extension. Use Kernel style for file names. + +At present, there are some other file name extensions used by programmers: + +- Header files: .hh, .hpp, .hxx +- Implementation files: .cc, .cxx, .C + +This document uses .h and .cpp extensions. + +File names are as follows: +- database_connection.h +- database_connection.cpp + + +## Function Names +Functions are named in UpperCamelCase. Generally, the verb or verb-object structure is used. You can add a prefix to an interface. XXX_FunctionName +```cpp +class List { + public: + void AddElement(const Element& element); + Element GetElement(const unsigned int index) const; + bool IsEmpty() const; + bool MCC_GetClass(); +}; + +namespace utils { +void DeleteUser(); +} +``` + +## Type Names + +Types are named in the UpperCamelCase style. +All types, such as classes, structs, unions, typedefs, and enumerations, use the same conventions. + +```cpp +// classes, structs and unions +class UrlTable { ... +class UrlTableTester { ... +struct UrlTableProperties { ... +union Packet { ... + +// typedefs +typedef std::map PropertiesMap; + +// enums +enum UrlTableErrors { ... +``` + +For namespace naming, UpperCamelCase is recommended. +```cpp +// namespace +namespace osutils { + +namespace fileutils { + +} + +} +``` + + +## Variable Names +General variables are named in lowerCamelCase, including global variables, function parameters, local variables, and member variables. +```cpp +std::string tableName; // Good: Recommended style. +std::string tablename; // Bad: Forbidden style. +std::string path; // Good: When there is only one word, lowerCamelCase (all lowercase) is used. + +class Foo { + private: + std::string fileName; // Do not add a prefix or suffix that identifies the scope. +}; +``` + +## Macro, Constant, and Enumeration Names +For macros, use all caps separated with underscores (_). For constants and enumerated values, use k+CamelCase. +Local constants and ordinary const member variables use the lowerCamelCase naming style. + +```cpp +#define MAX(a, b) (((a) < (b)) ? (b) : (a)) // Example of naming a macro only. + +enum TintColor { // Note: Enumerated types are named in the UpperCamelCase style, while their values are in k+CamelCase style. + kRed, + kDarkRed, + kGreen, + kLightGreen +}; + +int Func(...) { + const unsigned int bufferSize = 100; // Local variable + char *p = new char[bufferSize]; + ... +} + +namespace utils { +const unsigned int kFileSize = 200; // Global variable +} + +``` + +# 3 Formatting +While programming styles coexist to meet different requirements, we strongly recommend that you use a standardized coding style in the same project so that everyone can easily read and understand the code and the code can be easily maintained. + +## Line Length + +### Recommendation 3.1.1 Each line of code should contain a maximum of 120 characters. +It is recommended that the number of characters in each line not exceed 120. If the line of code exceeds the permitted length, wrap the line appropriately. + +Exception: +- If a one-line comment contains a command or URL of more than 120 characters, you can keep the line for ease in using copy, paste, and search using the grep command. +- The length of an #include statement can contain a long path exceeding 120 characters, but this should be avoided if possible. +- The error information in preprocessor directives can exceed the permitted length. +Put the error information of preprocessor directives in one line to facilitate reading and understanding even if the line contains more than 120 characters. + +```cpp +#ifndef XXX_YYY_ZZZ +#error Header aaaa/bbbb/cccc/abc.h must only be included after xxxx/yyyy/zzzz/xyz.h, because xxxxxxxxxxxxxxxxxxxxxxxxxxxxx +#endif +``` + +## Indentation + +### Rule 3.2.1 Use spaces to indent and indent two spaces at a time. +Only spaces can be used for indentation. Two spaces are indented each time. + + + + +## Braces +### Rule 3.3.1 Use the K&R indentation writing style except for functions. +The left brace of the function is placed at the end of the statement. +The right brace starts a new line and nothing else is placed on the line, unless it is followed by the remaining part of the same statement, for example, "while" in the do statement, "else" or "else if" in the if statement, a comma, and a semicolon. + + +```cpp +struct MyType { // Follow the statement to the end, and indent one space. + ... +}; + +int Foo(int a) { // The left brace of the function is placed at the end of the statement. + if (...) { + ... + } else { + ... + } +} +``` + + +- Code is more compact. +- Placing the brace at the end of the statement makes the code more continuous in reading rhythm than starting a new line. +- This style complies with mainstream norms and habits of programming languages. +- Most modern IDEs have an automatic code indentation, alignment and display. Placing the brace at the end of a line does not impact understanding. + + +If no function body is inside the braces, the braces can be put on the same line. +```cpp +class MyClass { + public: + MyClass() : value(0) {} + + private: + int value; +}; +``` + +## Function Declarations and Definitions + +### Rule 3.4.1 The return type and the function name of a function declaration or definition must be on the same line. When the length of the function parameter list exceeds the permitted length, a line break is required and parameters must be aligned appropriately. +When a function is declared or defined, the return value type of the function should be on the same line as the function name. If the line length permits, the function parameters should be placed on the same line. Otherwise, the function parameters should be wrapped and properly aligned. +The left parenthesis of a parameter list should always be on the same line as the function name. The right parenthesis always follows the last parameter. + +The following is an example of line breaks: +```cpp +ReturnType FunctionName(ArgType paramName1, ArgType paramName2) { // Good: All are on the same line. + ... +} + +ReturnType VeryVeryVeryLongFunctionName(ArgType paramName1, // Each added parameter starts on a new line because the line length limit is exceeded. + ArgType paramName2, // Good: Aligned with the previous parameter + ArgType paramName3) { + ... +} + +ReturnType LongFunctionName(ArgType paramName1, ArgType paramName2, // The parameters are wrapped because the line length limit is exceeded. + ArgType paramName3, ArgType paramName4, ArgType paramName5) { // Good: After the line break, 4 spaces are used for indentation. + ... +} + +ReturnType ReallyReallyReallyReallyLongFunctionName( // The line length cannot accommodate even the first parameter, and a line break is required. + ArgType paramName1, ArgType paramName2, ArgType paramName3) { // Good: After the line break, 4 spaces are used for indentation. + ... +} +``` + +## Function Calls +### Rule 3.5.1 A function call parameter list should be placed on one line. When the parameter list exceeds the line length and requires a line break, the parameters should be properly aligned. +The left parenthesis always follows the function name, and the right parenthesis always follows the last parameter. + + +The following is an example of line breaks: +```cpp +ReturnType result = FunctionName(paramName1, paramName2); // Good: All function parameters are on one line. + +ReturnType result = FunctionName(paramName1, + paramName2, // Good: Aligned with the previous parameter. + paramName3); + +ReturnType result = FunctionName(paramName1, paramName2, + paramName3, paramName4, paramName5); // Good: Parameters are wrapped. After the line break, 4 spaces are used for indentation. + +ReturnType result = VeryVeryVeryLongFunctionName( // The line length cannot accommodate even the first parameter, and a line break is required. + paramName1, paramName2, paramName3); // After the line break, 4 spaces are used for indentation. +``` + +If some of the parameters called by a function are associated with each other, you can group them for better understanding. +```cpp +// Good: The parameters in each line represent a group of data structures with a strong correlation. They are placed on one line for ease of understanding. +int result = DealWithStructureLikeParams(left.x, left.y, // A group of related parameters. + right.x, right.y); // Another group of related parameters. +``` + +## if Statements + +### Rule 3.6.1 Use braces to include an if statement. +We require that all if statements use braces, even if there is only one statement. + + +- The logic is intuitive and easy to read. +- It is less prone to mistakes when new code is added to the existing if statement. +- If function-like macros are used in a conditional statement, it is less prone to mistakes (in case the braces are missing when macros are defined). + +```cpp +if (objectIsNotExist) { // Good: Braces are added to a single-line conditional statement. + return CreateNewObject(); +} +``` +### Rule 3.6.2 Place if, else, and else if keywords on separate lines. +If there are multiple branches in a conditional statement, they should be placed on separate lines. + +Good example: + +```cpp +if (someConditions) { + DoSomething(); + ... +} else { // Good: Put the if and else keywords on separate lines. + ... +} +``` + +Bad example: + +```cpp +if (someConditions) { ... } else { ... } // Bad: The if and else keywords are put on the same line. +``` + +## Loop Statements +### Rule 3.7.1 Use braces after loop statements. +Similar to if statements, we require that the for and while loop statements contain braces, even if the loop body is empty or there is only one loop statement. + +```cpp +for (int i = 0; i < someRange; i++) { + DoSomething(); +} +``` + +If the loop body is empty, use empty braces instead of a single semicolon. A single semicolon is easy to miss or incorrectly regarded as a part of the loop statement. + +```cpp +for (int i = 0; i < someRange; i++) { } // Good: The for loop body is empty. Braces should be used, instead of semicolons (;). + +while (someCondition) { } // Good: The while loop body is empty. Braces should be used, instead of semicolons (;). + +while (someCondition) { + continue; // Good: The continue keyword highlights the end of the empty loop. Braces are optional in this case. +} + +``` + +Bad example: +```cpp +for (int i = 0; i < someRange; i++) ; // Bad: The for loop body is empty. Braces are mandatory. + +while (someCondition) ; // Bad: Using a semicolon here will make people misunderstand that it is a part of the while statement and not the end to it. +``` + +## Switch Statements +### Rule 3.8.1 Indent case and default in a switch statement with four spaces. +This rule includes the requirement to further indent all content encased by a case or the default case. +```cpp +switch (var) { + case 0: // Good: Indented + DoSomething1(); // Good: Indented + break; + case 1: { // Good: Braces are added. + DoSomething2(); + break; + } + default: + break; +} +``` + +```cpp +switch (var) { +case 0: // Bad: case is not indented. + DoSomething(); + break; +default: // Bad: default is not indented. + break; +} +``` + +## Expressions + +### Recommendation 3.9.1 Keep a consistent line break style for expressions. +A long expression that does not meet the line length requirement must be wrapped appropriately. + + +// Assume that the first line exceeds the length limit. +```cpp +if (currentValue > threshold && + someConditionsion) { + DoSomething(); + ... +} + +int result = reallyReallyLongVariableName1 + // Good + reallyReallyLongVariableName2; +``` +After an expression is wrapped, ensure that the lines are aligned appropriately or indented with 4 spaces. See the following example. + +```cpp +int sum = longVaribleName1 + longVaribleName2 + longVaribleName3 + + longVaribleName4 + longVaribleName5 + longVaribleName6; // Good: Indented with 4 spaces. + +int sum = longVaribleName1 + longVaribleName2 + longVaribleName3 + + longVaribleName4 + longVaribleName5 + longVaribleName6; // Good: The lines are aligned. +``` +## Variable Assignment + +### Rule 3.10.1 Multiple variable definitions and assignment statements cannot be written on one line. +Each line should have only one variable initialization statement. It is easier to read and understand. + +```cpp +int maxCount = 10; +bool isCompleted = false; +``` + +Bad example: + +```cpp +int maxCount = 10; bool isCompleted = false; // Bad: Multiple variable initialization statements must be separated on different lines. Each variable initialization statement occupies one line. +int x, y = 0; // Bad: Multiple variable definitions must be separated on different lines. Each definition occupies one line. + +int pointX; +int pointY; +... +pointX = 1; pointY = 2; // Bad: Multiple variable assignment statements must be separated on different lines. +``` +Exception: Multiple variables can be declared and initialized in the for loop header, if initialization statement (C++17), and structured binding statement (C++17). Multiple variable declarations in these statements have strong associations. Forcible division into multiple lines may cause problems such as scope inconsistency and separation of declaration from initialization. + +## Initialization +Initialization is applicable to structs, unions, and arrays. + +### Rule 3.11.1 When an initialization list is wrapped, ensure that the line after the break is indented and aligned properly. +If a structure or array initialization list is wrapped, the line after the break is indented with four spaces. +Choose the wrap location and alignment style for best comprehension. + +```cpp +const int rank[] = { + 16, 16, 16, 16, 32, 32, 32, 32, + 64, 64, 64, 64, 32, 32, 32, 32 +}; +``` + +## Pointers and References +### Recommendation 3.12.1 The pointer type "`*`" follows a variable name. There is one space between variable name and type. + +```cpp +int *p = nullptr; // Good +``` + +Exception: When a variable is modified by const or restrict, "`*`" cannot follow the variable or type. +```cpp +char * const VERSION = "V100"; +``` + +### Recommendation 3.12.2 The reference type "`&`" follows a variable name. There is one space between variable name and type. +```cpp +int i = 8; + +int &p = i; // Good +``` + +## Preprocessor Directives +### Rule 3.13.1 The number sign "#" that starts a preprocessor directive must be at the beginning of the line and is not indented in nested preprocessor directives. +The number sign "#" that starts a preprocessor directive must be at the beginning of the line even through the preprocessor directive is inside a function. + +```cpp +#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) // Good: "#" is at the beginning of the line. +#define ATOMIC_X86_HAS_CMPXCHG16B 1 // Good: "#" is at the beginning of the line. +#else +#define ATOMIC_X86_HAS_CMPXCHG16B 0 +#endif + + +int FunctionName() { + if (someThingError) { + ... +#ifdef HAS_SYSLOG // Good: Even in the function body, "#" is at the beginning of the line. + WriteToSysLog(); +#else + WriteToFileLog(); +#endif + } +} +``` +The nested preprocessor directives starting with "#" is not indented. + +```cpp +#if defined(__x86_64__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) +#define ATOMIC_X86_HAS_CMPXCHG16B 1 // Good: Wrapped for easier comprehension. +#else +#define ATOMIC_X86_HAS_CMPXCHG16B 0 +#endif +``` + +## Whitespace +### Rule 3.14.1 Ensure that horizontal spaces are used to highlight keywords and important information, and avoid unnecessary whitespace. +Horizontal spaces are used to highlight keywords and important information. Spaces are not allowed at the end of each code line. The general rules are as follows: + +- Add spaces after keywords such as if, switch, case, do, while, and for. +- Do not add spaces after the left parenthesis or before the right parenthesis. +- For expressions enclosed by braces, either add a space on either side or avoid a space on either side. +- Do not add spaces after unary operators (& * + - ~ !). +- Add a space to the left and right sides of each binary operator (= + - < > * / % | & ^ <= >= == !=). +- Add spaces to the left and right sides of a ternary operator (? :). +- Do not add spaces between a prefix or suffix increment (++) or decrement (--) operator and a variable. +- Do not add spaces before or after a struct member operator (. ->). +- Do not add spaces before commas. Add spaces after commas. +- Do not add spaces between a template or type conversion operator (<>) and a type. +- Do not add spaces before or after a domain operator (::). +- Determine whether to add spaces before and after a colon (:) based on the situation. + +In normal cases: +```cpp +void Foo(int b) { // Good: A space is before the left brace. + +int i = 0; // Good: During variable initialization, there should be spaces before and after =. Do not leave a space before the semicolon. + +int buf[kBufSize] = {0}; // Good: Spaces are not allowed in braces. +``` + +Function definition and call: +```cpp +int result = Foo(arg1,arg2); + ^ // Bad: Function arguments must be separated by spaces for explicit display. + +int result = Foo( arg1, arg2 ); + ^ ^ // Bad: There cannot be spaces after the left parenthesis or before the right parenthesis. +``` + +Pointer and Address Operator +```cpp +x = *p; // Good: There is no space between the operator * and the pointer p. +p = &x; // Good: There is no space between the operator & and the variable x. +x = r.y; // Good: When a member variable is accessed through the operator (.), no space is added. +x = r->y; // Good: When a member variable is accessed through the operator (->), no space is added. +``` + +Other Operators: +```cpp +x = 0; // Good: There is a space before and after the assignment operator (=). +x = -5; // Good: There is no space between the minus sign (–) and the number. +++x; // Good: Do not add spaces between a prefix or suffix increment (++) or decrement (--) operator and a variable. +x--; + +if (x && !y) // Good: There is a space before and after the Boolean operator. There is no space between the ! operator and the variable. +v = w * x + y / z; // Good: There is a space before and after the binary operator. +v = w * (x + z); // Good: There is no space before or after the expression in the parentheses. + +int a = (x < y) ? x : y; // Good: Ternary operator. There is a space before and after ? and : +``` + +Loops and Conditional Statements: +```cpp +if (condition) { // Good: There is a space between the if keyword and the left parenthesis, and no space before or after the conditional statement in the parentheses. + ... +} else { // Good: There is a space between the else keyword and the left brace. + ... +} + +while (condition) {} // Good: There is a space between the while keyword and the left parenthesis. There is no space before or after the conditional statement in the parentheses. + +for (int i = 0; i < someRange; ++i) { // Good: There is a space between the for keyword and the left parenthesis, and after the semicolons. + ... +} + +switch (condition) { // Good: There is a space after the switch keyword. + case 0: // Good: There is no space between the case condition and the colon. + ... + break; + ... + default: + ... + break; +} +``` + +Templates and Conversions +```cpp +// Angle brackets (< and >) are not adjacent to space. There is no space before < or between > and (. +vector x; +y = static_cast(x); + +// There can be a space between the type and the pointer operator. Keep the spacing style consistent. +vector x; +``` + +Scope Operators +```cpp +std::cout; // Good: Namespace access. Do not leave spaces. + +int MyClass::GetValue() const {} // Good: Do not leave spaces in the definition of member functions. +``` + +Colons +```cpp +// Scenarios when space is required. + +// Good: Add a space before or after the colon in a derived class definition. +class Sub : public Base { + +}; + +// Add a space before and after the colon for the initialization list of a constructor function. +MyClass::MyClass(int var) : someVar(var) { + DoSomething(); +} + +// Add a space before and after the colon in a bit-field. +struct XX { + char a : 4; + char b : 5; + char c : 4; +}; +``` + +```cpp +// Scenarios when space is not required. + +// Good: // No space is added before or after the colon next to a class access permission (public or private). +class MyClass { + public: + MyClass(int var); + private: + int someVar; +}; + +// No space is added before or after the colon in a switch statement. +switch (value) { + case 1: + DoSomething(); + break; + default: + break; +} +``` + +Note: Currently, all IDEs support automatic deletion of spaces at the end of a line. Please configure your IDE correctly. + +### Recommendation 3.14.2 Use blank lines only when necessary to keep code compact. + +There must be as few blank lines as possible so that more code can be displayed for easy reading. Recommendations: +- Add blank lines according to the correlation between lines. +- Consecutive blank lines are not allowed inside functions, type definitions, macros, and initialization expressions. +- A maximum of **two** consecutive blank lines can be used. +-.Do not add blank lines on the first and last lines of a code block. + +```cpp +int Foo() { + ... +} + + +// Bad: More than one blank lines are used between two function definitions. +int Bar() { + ... +} + + +if (...) { + // Bad: Do not add blank lines on the first and last lines of a code block. + ... + // Bad: Do not add blank lines on the first and last lines of a code block. +} + +int Foo(...) { + // Bad: Do not add blank lines before the first statement in a function body. + ... +} +``` + +## Classes +### Rule 3.15.1 Class access specifier declarations are in the sequence: public, protected, private. Indent each specifier with one space. +```cpp +class MyClass : public BaseClass { + public: // Indented with 1 space. + MyClass(); // Indented with 2 spaces. + explicit MyClass(int var); + ~MyClass() {} + + void SomeFunction(); + void SomeFunctionThatDoesNothing() { + } + + void SetVar(int var) { + someVar = var; + } + + int GetVar() const { + return someVar; + } + + private: + bool SomeInternalFunction(); + + int someVar; + int someOtherVar; +}; +``` + +In each part, it is recommended that similar statements be put together in the following order: Type (including typedef, using, nested structs and classes), Constant, Factory Function, Constructor, Assignment Operator, Destructor, Other Member Function, and Data Member. + + +### Rule 3.15.2 The constructor initialization list must be on the same line or wrapped and aligned with four spaces of indentation. +```cpp +// If all variables can be placed on the same line: +MyClass::MyClass(int var) : someVar(var) { + DoSomething(); +} + +// If the variables cannot be placed on the same line: +// Wrapped at the colon and indented with four spaces. +MyClass::MyClass(int var) + : someVar(var), someOtherVar(var + 1) { // Good: Add a space after the comma. + DoSomething(); +} + +// If an initialization list needs to be placed in multiple lines, put each member on a separate line and align between lines. +MyClass::MyClass(int var) + : someVar(var), // Indented with 4 spaces. + someOtherVar(var + 1) { + DoSomething(); +} +``` + +# 4 Comments +Generally, clear architecture and good naming are recommended to improve code readability, and comments are provided only when necessary. +Comments are used to help readers quickly understand code. Therefore, comments should be provided __for the sake of readers__. + +Comments must be concise, clear, and unambiguous, ensuring that information is complete and not redundant. + +__Comments are as important as code.__ +When writing a comment, you need to step into the reader's shoes and use comments to express what the reader really needs. Comments are used to express the function and intention of code, rather than repeating the code. +When modifying the code, ensure that the comments are consistent with the modified code. It is not polite to modify only code and keep the old comments, which will undermine the consistency between code and comments, and may confuse or even mislead readers. + +## Comment Style +In C++ code, both ` /* */` and ` // ` can be used for commenting. +Comments can be classified into different types, such as file header comments, function header comments, and code comments. This is based on their purposes and positions. +Comments of the same type must keep a consistent style. +(1) Use ` /* */ ` for file header comments. +(2) The style of function header comments and code comments in the same file must be consistent. + +Note: __Example code in this document uses comments in the '//' format only to better describe the rules and recommendations. This does not mean this comment format is better.__ + +## File Header Comments +### Rule 4.2.1 File header comments must contain the copyright notice. +```cpp +/* + * Copyright (c) [2019] [name of copyright holder] + * [Software Name] is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * http://license.coscl.org.cn/MulanPSL + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR + * PURPOSE. + * See the Mulan PSL v1 for more details. + */ +``` + + + +## Function Header Comments +### Rule 4.3.1 Function header comments with no content are forbidden. +Not all functions need function header comments. +For information that cannot be described by function signatures, add function header comments. + +Function header comments are placed above the function declaration or definition. Use one of the following styles: +Use `//` to start the function header. + +```cpp +// Single-line function header +int Func1(void); + +// Multi-line function header +// Second line +int Func2(void); +``` + +Use `/* */` to start the function header. +```cpp +/* single-line function header */ +int Func1(void); + +/* + * Another single-line function header + */ +int Func2(void); + +/* + * Multi-line function header + * Second line + */ +int Func3(void); +``` +Use function names to describe functions, and add function header comments if necessary. +Do not write useless or redundant function headers. Do not write empty function headers with no content. + +The function header comment content will depend on the function and includes but is not limited to: a function description, return value, performance constraints, usage comments, memory conventions, algorithm implementation, reentering requirements. +In the function interface declaration in the external header file, the function header comment should clearly describe important and useful information. + +Good example: + +```cpp +/* + * The number of written bytes is returned. If -1 is returned, the write operation failed. + * Note that, the memory buffer should be released by the caller. + */ +int WriteString(const char *buf, int len); +``` + +Bad example: +```cpp +/* + * Function name: WriteString + * Function: Write a character string. + * Parameters: + * Return value: + */ +int WriteString(const char *buf, int len); +``` +Problems: + +- The 'Parameters' and 'Return value' have no content. +- The function name comment is redundant. +- The most important thing, that is, who needs to release the buffer, is not clearly stated. + +## Code Comments +### Rule 4.4.1 Code comments are placed above or to the right of the corresponding code. +### Rule 4.4.2 There must be a space between the comment character and the comment content. At least one space is required between the comment and code if the comment is placed to the right of code. +Comments placed above the code should be indented the same as that of the code. +Use one of the following styles: +Use `//`. +```cpp + +// Single-line comment +DoSomething(); + +// Multi-line comment +// Second line +DoSomething(); +``` + +Use `/*' '*/`. +```cpp +/* Single-line comment */ +DoSomething(); + +/* + * Multi-line comment in another mode + * Second line + */ +DoSomething(); +``` +Leave at least one space between the code and the comment on the right. It is recommended that no more than four spaces be left. +You can use the Tab key to indent 1–4 spaces, set this in your IDE or editor. + +Use one of the following styles: + +```cpp +int foo = 100; // Comment on the right +int bar = 200; /* Comment on the right */ +``` +It is more appealing sometimes when the comment is placed on the right of code and the comments are aligned vertically. +After the alignment, ensure that the comment is still 1–4 spaces away from the widest line of code. +Example: + +```cpp +const int kConst = 100; /* Related comments of the same type can be aligned vertically. */ +const int kAnotherConst = 200; /* Leave spaces after code to align comments vertically.*/ +``` +When the comment on the right exceeds the line width, consider placing the comment above the code. + +### Rule 4.4.3 Delete unused code segments. Do not comment them out. +Code that is commented out cannot be maintained normally. When you attempt to restore the code, it is very likely to introduce easy to overlook defects. +The correct method is to delete unnecessary code directly. If necessary, consider porting or rewriting the code. + +Here, commenting out the removal of code from compilation without actually deleting it. This is done using /* */, //, #if 0, #ifdef NEVER_DEFINED, and so on. + +### Recommendation 4.4.1 Try not to contain a TODO/TBD/FIXME comment in code. +TODO/TBD comments are used to describe required improvements and supplements. +FIXME comments are used to describe defects that need fixing. +They should have a standardized style, which facilitates text search. + +```cpp +// TODO(): XX +// FIXME: XX +``` + + +# 5 Header Files +## Header File Responsibility +A header file is an external interface of a module or file. The design of a header file shows most of the system design. +The interface declaration for most functions is more suitable placed in the header file, but implementation (except inline functions) cannot be placed in the header file. Functions, macros, enumerations, and structure definitions that need to be used in .cpp files cannot be placed in the header file. +The header responsibility should be simple. A too complex header file will make dependencies complex and cause a long compilation time. + +### Recommendation 5.1.1 Each .cpp file should have a .h file with the same name. The file is used to declare the classes and interfaces that need to be exposed externally. +Generally, each .cpp file has a corresponding .h file. This .cpp file is used to store the function declarations, macro definitions, and class definitions that are to be disclosed externally. In addition, corresponding .inline.h files can be added based on sit requirements to optimize code. +If a .cpp file does not need to open any interface externally, it should not exist. +Exception: __An entry point (for example, the file where the main function is located), unit tests, and dynamic library code.__ + +Example: +```cpp +// Foo.h + +#ifndef FOO_H +#define FOO_H + +class Foo { + public: + Foo(); + void Fun(); + + private: + int value; +}; + +#endif +``` + +```cpp +// Foo.cpp +#include "Foo.h" + +namespace { // Good: The declaration of the internal function is placed in the header of the .cpp file, and has been limited to the unnamed namespace or static scope. +void Bar() { +} +} + +... + +void Foo::Fun() { + Bar(); +} +``` + +## Header File Dependency +### Rule 5.2.1 Header file cyclic dependency is forbidden. +Cyclic dependency of header files means that a.h contains b.h, b.h contains c.h, and c.h contains a.h. If any header file is modified, all code containing a.h, b.h, and c.h needs to be recompiled. +For a unidirectional dependency, for example, a.h contains b.h, b.h contains c.h, and c.h does not contain any header file, modifying a.h does not mean that we need to recompile the source code for b.h or c.h. + +The cyclic dependency of header files directly reflects the unreasonable architecture design, which can be avoided by optimizing the architecture. + +### Rule 5.2.2 Do not include unnecessary header files. +The inclusion of header files that are not used will cause unnecessary dependency, which increases the coupling between modules or units. As long as a header file is modified, the code needs to be recompiled. + +In many systems, the inclusion relationships of header files are complex. To save time, developers may directly include all header files in their files, or even release a god.h file that contains all header files to project teams. This will cause a great time in compilation and great trouble in maintenance. + +### Rule 5.2.3 Header files should be self-contained. +Simply, self-containing means that any header file can be compiled independently. For a file containing a header file, unnecessary burdens are added to users if the file cannot work unless the header file contains another header file. + +For example, if the a.h header file is not self-contained, but must contain b.h, it will cause: + +Each .cpp file that uses the a.h header file must include the additional b.h header file to ensure that the a.h content can be compiled. +The additional b.h header file must be included before a.h, which has a dependency in the inclusion order. + + +### Rule 5.2.4 Header files must have `#define` guards to prevent multiple inclusion. +To prevent header files from being included multiple times, all header files should be protected by #define. Do not use #pragma once. + +When defining a protection character, comply with the following rules: +(1) The protection character uses a unique name. +(2) Do not place code or comments (except for file header comments) before or after the protected part. + +Example: Assume that the timer.h file of the timer module of the VOS project is in the VOS/include/timer/Timer.h directory. Perform the following operations to protect the timer.h file: + +```cpp +#ifndef VOS_INCLUDE_TIMER_TIMER_H +#define VOS_INCLUDE_TIMER_TIMER_H +... +#endif +``` + +You do not need to add a path as shown in the preceding example, but you need to ensure that the macro in the current project is unique. +```cpp +#ifndef TIMER_H +#define TIMER_H +... +#endif +``` + +### Recommendation 5.2.1 It is prohibited to reference external function interfaces and variables in declaration mode. +Interfaces provided by other modules or files can be used only by including header files. +Using external function interfaces and variables in extern declaration mode may cause inconsistency between declarations and definitions when external interfaces are changed. +In addition, this implicit dependency may cause architecture corruption. + +Cases that do not comply with specifications: + +// a.cpp content +```cpp +extern int Fun(); // Bad: Use external functions in extern mode. + +void Bar() { + int i = Fun(); + ... +} +``` + +// b.cpp content +```cpp +int Fun() { + // Do something +} +``` +It should be changed to: + +// a.cpp content +```cpp +#include "b.h" // Good: Use the interface provided by other .cpp by including its corresponding header file. + +void Bar() { + int i = Fun(); + ... +} +``` + +// b.h content +```cpp +int Fun(); +``` + +// b.cpp content +```cpp +int Fun() { + // Do something +} +``` +In some scenarios, if the internal functions need to be referenced with no intrusion to the code, the extern declaration mode can be used. +For example: +When performing unit testing on an internal function, you can use the extern declaration to reference the function to be tested. +When a function needs to be stubbed or patched, the function can be declared using extern. + +### Rule 5.2.5 Do not include header files in extern "C". +If a header file is included in extern "C", extern "C" may be nested. Some compilers restrict the nesting level of extern "C". If there are too many nested layers, compilation errors may occur. + +When C and C++ programmings are used together and if extern "C" includes a header file, the original intent behind the header file may be hindered. For example, when the link specifications are modified incorrectly. + +Example: Assume that there are two header files, a.h and b.h. + +// a.h content +```cpp +... +#ifdef __cplusplus +void Foo(int); +#define A(value) Foo(value) +#else +void A(int) +#endif +``` +// b.h content +```cpp +... +#ifdef __cplusplus +extern "C" { +#endif + +#include "a.h" +void B(); + +#ifdef __cplusplus +} +#endif +``` + +Use the C++ preprocessor to expand b.h. The following information is displayed: +```cpp +extern "C" { + void Foo(int); + void B(); +} +``` + +According to the author of a.h, the function Foo is a C++ free function following the "C++" link specification. +However, because `#include "a.h"` is placed inside `extern "C"` in b.h, the link specification of function Foo is changed incorrectly. + +Exception: +In the C++ compilation environment, if you want to reference the header file of pure C, the C header files must not include `extern "C"`. The non-intrusive approach is to include the C header file in `extern "C"`. + +### Recommendation 5.2.2 Use `#include` instead of a forward declaration to include header files. +A forward declaration is for the declaration of classes, functions, and templates and is not meant for its definition. + +- Pros: + 1. Forward declarations can save compilation time. Unnecessary #includes force the compiler to open more files and process more input. + 2. Forward declarations can save unnecessary recompilation time. The use of #include will force your code to be recompiled for multiple times due to unrelated changes in header files. +- Cons: + 1. Forward declarations hide dependency relationship. When a header file is modified, user code will skip the necessary recompilation process. + 2. A forward declaration may be broken by subsequent changes to the library. Forward declarations of functions and templates sometimes prevent header file developers from changing APIs. For example, widening a formal parameter type, adding a formal template parameter with a default value, and so on. + 3. Forward declaration of symbols from the namespace `std::` is seen as undefined behavior (as specified in the C++ 11 standard specification). + 4. Forward declaration of multiple symbols from a header file can be more verbose than simply including (#include) the header. + 5. Structuring code only for forward declaration (for example, using pointer members instead of object members) can make the code more complex and slower. + 6. It is difficult to determine whether a forward declaration or `#include` is needed. In some scenarios, replacing `#include` with a forward declaration may cause unexpected results. + +Therefore, we should avoid using forward declarations as much as possible. Instead, we use the #include statement to include a header file and ensure dependency. + +### Recommendation 5.2.3 Include headers in the following sequence: .h file corresponding to the .cpp file > other header files according to their stability. +Using standard header file inclusion sequence can enhance readability and avoid hidden dependencies. The recommended header file inclusion priority is: the header file corresponding to the .cpp file > C/C++ standard libraries > .h files from used system libraries > .h files from other libraries > other .h files in the project. + +For example, the sequence of the header files in Foo.cpp is as follows: +```cpp +#include "Foo/Foo.h" + +#include +#include + +#include +#include + +#include "platform/Base.h" +#include "platform/Framework.h" + +#include "project/public/Log.h" +``` +Placing the Foo.h file at the top ensures that when the Foo.h file misses some necessary libraries or an error occurs, the Foo.cpp build is terminated immediately, reducing the compilation time. For the sequence of header files, refer to this suggestion. + +Exception: +Platform-specific code requires conditional compilation. The code can be placed after other "includes". +```cpp +#include "foo/public/FooServer.h" + +#include "base/Port.h" // For LANG_CXX11. + +#ifdef LANG_CXX11 +#include +#endif // LANG_CXX11 +``` + +# 6 Scopes + +## Namespaces +The content of a namespace is not indented. + +### Recommendation 6.1.1 Use an unnamed namespace to encapsulate or use static to modify variables, constants, or functions that do not need to be exported from the .cpp file. +In the C++ 2003 standard, using static to modify the external availability of functions and variables was marked as deprecated. Therefore, unnamed namespaces are the recommended method. + +The main reasons are as follows: +1. There are too many meanings for static in C++: static function member variable, static member function, static global variable, and static function local variable. Each of them has special processing. +2. Static can only be used to define variables, constants, and functions that are not referenced outside the current .cpp file, while namespaces can also be used to encapsulate types. +3. Use a namespace to process the scope of C++ instead of using both static and namespaces. +4. Unnamed namespaces rather than functions modified by static can be used to instantiate templates. + +Do not use unnamed namespaces or static in header files. + +```cpp +// Foo.cpp + +namespace { +const int kMaxCount = 20; +void InternalFun(){}; +} + +void Foo::Fun() { + int i = kMaxCount; + + InternalFun(); +} + +``` + +### Rule 6.1.1 Do not use "using" to import a namespace in header files or before #include statements. +Note: Using "using" to import a namespace will affect subsequent code and may cause symbol conflicts. Therefore, do not use "using" to import a namespace in a header file or before #include in a source file. +Example: + +```cpp +// Header file a.h +namespace namespacea { +int Fun(int); +} +``` + +```cpp +// Header file b.h +namespace namespaceb { +int Fun(int); +} + +using namespace namespaceb; + +void G() { + Fun(1); +} +``` + +```cpp +// Source code a.cpp +#include "a.h" +using namespace namespacea; +#include "b.h" + +void main() { + G(); // using namespace namespacea is before #include "b.h", which will cause the following issues: The calling of namespacea::Fun and namespaceb::Fun is not clear. +} +``` + +Using "using" to import a symbol or define an alias in a header file is allowed in customized namespaces of modules, but is prohibited in the global namespace. +```cpp +// foo.h + +#include +using fancy::string; // Bad: It is prohibited to import symbols to global namespaces. + +namespace foo { +using fancy::string; // Good: Symbols can be imported in customized namespaces of modules. +using MyVector = fancy::vector; // Good: In C++11, aliases can be defined in customized namespaces. +} +``` + + +### Rule 6.1.2 Do not use "using namespace std". +Note: The std:: prefix can make code clear and avoid naming conflicts. + + +## Global Functions and Static Member Functions + +### Recommendation 6.2.1 Preferentially use namespaces to manage global functions. If global functions are closely related to a class, you can use static member functions. +Note: Placing non-member functions in a namespace avoids polluting the global scope. Do not use "class + static member function" to simply manage global functions. If a global function is closely tied to a class, it can be used as a static member function of the class. + +If you need to define some global functions for a .cpp file, use unnamed namespaces for management. +```cpp +namespace mynamespace { +int Add(int a, int b); +} + +class File { + public: + static File CreateTempFile(const std::string& fileName); +}; +``` + +## Global Constants and Static Member Constants + +### Recommendation 6.3.1 Preferentially use namespaces to manage global constants. If global constants are closely related to a class, you can use static member constants. +Note: Placing global constants in a namespace avoids polluting the global scope. Do not use "class + static member constant" to simply manage global constants. If a global constant is closely tied to a class, it can be used as a static member constant of the class. + +If you need to define some global constants only for a .cpp file, use unnamed namespaces for management. +```cpp +namespace mynamespace { +const int kMaxSize = 100; +} + +class File { + public: + static const std::string kName; +}; +``` + +## Global Variables + +### Recommendation 6.4.1 Do not use global variables. Use the singleton pattern instead. +Note: Global variables can be modified and read, which causes data coupling between the business code and the global variable. +```cpp +int counter = 0; + +// a.cpp +counter++; + +// b.cpp +counter++; + +// c.cpp +cout << counter << endl; +``` + +Singleton +```cpp +class Counter { + public: + static Counter& GetInstance() { + static Counter counter; + return counter; + } // Simple example of a singleton implementation + + void Increase() { + value++; + } + + void Print() const { + std::cout << value << std::endl; + } + + private: + Counter() : value(0) {} + + private: + int value; +}; + +// a.cpp +Counter::GetInstance().Increase(); + +// b.cpp +Counter::GetInstance().Increase(); + +// c.cpp +Counter::GetInstance().Print(); +``` + +After the singleton is implemented, there is a unique global instance, which can functions as a global variable. In addition, singleton provides better encapsulation. + +Exception: In some cases, the scope of a global variable is only inside a module. Multiple instances of the same global variable may exist in the process space, and each module holds one copy. In this case, a singleton cannot be used as it is limited to one instance. + +# 7 Classes + +Use a struct only for passive objects that carry data; everything else is a class. + +## Constructors, Copy/Move Constructors, Copy/Move Assignment Operators, and Destructors +Constructors, copy/move constructors, copy/move assignment operators, and destructors provide lifetime management methods for objects. +- Constructor: `X()` +- Copy constructor: `X(const X&)` +- Copy assignment operator: `operator=(const X&)` +- Move constructor: `X (X&&)` *Provided in versions later than C++ 11*. +- Move assignment operator: `operator=(X&&)` *Provided in versions later than C++ 11*. +- Destructor: `~X()` + +### Rule 7.1.1 The member variables of a class must be initialized explicitly. +Note: If a class has member variables but no constructors and default constructors are defined, the compiler will automatically generate a constructor, which will not initialize member variables. The object status is uncertain. + +Exception: +- If the member variables of a class have a default constructor, explicit initialization is not required. + +Example: The following code has no constructor, and private data members cannot be initialized: +```cpp +class Message { + public: + void ProcessOutMsg() { + //… + } + private: + unsigned int msgID; + unsigned int msgLength; + unsigned char* msgBuffer; + std::string someIdentifier; +}; + +Message message; // The message member variable is not initialized. +message.ProcessOutMsg(); // Potential risks exist in subsequent use. + +// Therefore, it is necessary to define the default constructor as follows: +class Message { + public: + Message() : msgID(0), msgLength(0) { + } + + void ProcessOutMsg() { + // … + } + + private: + unsigned int msgID; + unsigned int msgLength; + unsigned char* msgBuffer; + std::string someIdentifier; // The member variable has a default constructor. Therefore, explicit initialization is not required. +}; +``` + +### Recommendation 7.1.1 Initialization during declaration (C++ 11) and initialization using the constructor initialization list are preferred for member variables. +Note: Initialization during declaration (C++11) is preferred because initialized values of member variables can be easily understood. If initialized values of certain member variables are relevant to constructors, or C++ 11 is not supported, the constructor initialization list should be used preferentially to initialize these member variables. Compared with the assignment statements in constructors, code of the constructor initialization list is simpler and has higher performance, and can be used to initialize constant and reference members. + +```cpp +class Message { + public: + Message() : msgLength(0) { // Good: The constructor initialization list is preferred. + msgBuffer = NULL; // Bad: Values cannot be assigned in constructors. + } + + private: + unsigned int msgID{0}; // Good: used in C++11. + unsigned int msgLength; + unsigned char* msgBuffer; +}; +``` + +### Rule 7.1.2 Declare single-parameter constructors as explicit to prevent implicit conversion. +Note: If a single-parameter constructor is not declared as explicit, it will become an implicit conversion function. +Example: + +```cpp +class Foo { + public: + explicit Foo(const string& name): name(name) { + } + private: + string name; +}; + + +void ProcessFoo(const Foo& foo){} + +int main(void) { + std::string test = "test"; + ProcessFoo(test); // Compiling failed. + return 0; +} +``` + +The preceding code fails to be compiled because the parameter required by `ProcessFoo` is of the Foo type, which mismatch with the input string type. + +If the explicit keyword of the Foo constructor is removed, implicit conversion is triggered and a temporary Foo object is generated when `ProcessFoo` is called with the string parameter. Usually, this implicit conversion is confusing and bugs are apt to be hidden, due to unexpected type conversion. Therefore, single-parameter constructors require explicit declaration. + +### Rule 7.1.3 If copy/move constructors and copy/move assignment operators are not needed, clearly prohibit them. +Note: If users do not define it, the compiler will generate copy/move constructors and copy/move assignment operators (move semantic functions will be available in versions later than C++ 11). +If we do not use copy constructors or copy assignment operators, explicitly delete them. + +1. Set copy constructors or copy assignment operators to private and do not implement them. + +```cpp +class Foo { + private: + Foo(const Foo&); + Foo& operator=(const Foo&); +}; +``` +2. Use delete provided by C++ 11. + +```cpp +// Copy constructors and copy assignment operators are forbidden together. Use delete provided by C++ 11. +class Foo { + public: + Foo(Foo&&) = delete; + Foo& operator=(Foo&&) = delete; +}; +``` +3. For static method class, disable constructors to prevent instances from being created. + +```cpp +class Helper { + public: + static bool DoSomething(); + + private: + Helper(); +}; +``` +4. For singleton class, disable constructors and copy constructors to prevent instances from being created. + +```cpp +class Foo { + private: + static Foo *instance; + Foo() {} + Foo(const Foo &a); + Foo& operator=(const Foo &a); + public: + static Foo &Instance() { + if (!instance) { + instance = new Foo(); + } + return *instance; + } +}; +``` + +5. For destructors that release resources by raw pointers, disable copy constructions and copy assignment operators to prevent repeated release. + +```cpp +class Foo { + private: + FILE *fp; + Foo(const Foo &a); + Foo& operator=(const Foo &a); + public: + Foo() : fp(nullptr) {} + ~Foo() { + if (fp != nullptr) { + fclose(fp); + fp = nullptr; + } + } +}; + +Foo* Foo::instance = nullptr; +``` + +### Rule 7.1.4 Copy constructors and copy assignment operators should be implemented or forbidden together. +Both copy constructors and copy assignment operators provide copy semantics. They should be implemented or forbidden together. + +```cpp +// Copy constructors and copy assignment operators are implemented together. +class Foo { + public: + ... + Foo(const Foo&); + Foo& operator=(const Foo&); + ... +}; + +// Copy constructors and copy assignment operators are both set to default, as supported by C++ 11. +class Foo { + public: + Foo(const Foo&) = default; + Foo& operator=(const Foo&) = default; +}; + +// Copy constructors and copy assignment operators are forbidden together. You can use delete provided by C++ 11. +class Foo { + private: + Foo(const Foo&); + Foo& operator=(const Foo&); +}; +``` + +### Rule 7.1.5 Move constructors and move assignment operators should be implemented or forbidden together. +The move operation is added in C++ 11. If a class is required to support the move operation, move constructors and move assignment operators need to be implemented. + +Both move constructors and move assignment operators provide move semantics. They should be implemented or forbidden together. +```cpp +// Move constructors and move assignment operators are implemented together. +class Foo { + public: + ... + Foo(Foo&&); + Foo& operator=(Foo&&); + ... +}; + +// Move constructors and move assignment operators are both set to default, as supported by C++ 11. +class Foo { + public: + Foo(Foo&&) = default; + Foo& operator=(Foo&&) = default; +}; + +// Move constructors and move assignment operators are forbidden together. Use delete provided by C++ 11. +class Foo { + public: + Foo(Foo&&) = delete; + Foo& operator=(Foo&&) = delete; +}; +``` + +### Rule 7.1.6 It is prohibited to call virtual functions in constructors and destructors. +Note: Calling a virtual function of the current object in a constructor or destructor will cause behavior of non-polymorphism. +In C++, a base class constructs only one complete object at a time. + +Example: Base indicates the base class, and Sub indicates the derived class. +```cpp +class Base { + public: + Base(); + virtual void Log() = 0; // Different derived classes call different log files. +}; + +Base::Base() { // Base class constructor + Log(); // Call the virtual function log. +} + +class Sub : public Base { + public: + virtual void Log(); +}; +``` + +When running the following statement: +`Sub sub;` +The constructor of the derived class is executed first. However, the constructor of the base class is called first. Because the constructor of the base class calls the virtual function log, the log is in the base class version. The derived class is constructed only after the base class is constructed. As a result, behavior of non-polymorphism are caused. +This also applies to destructors. + +### Recommendation 7.1.2 Do not add the inline keyword to functions in the class definition. +Note: By default, functions in the class definition are inline. + + +## Inheritance + +### Rule 7.2.1 Destructors of the base class should be declared as virtual. +Note: Destructors of the derived class can be called during polymorphism invocation only when destructors of the base class are virtual. + +Example: There will be memory leak if destructors of the base class are not declared as virtual. +```cpp +class Base { + public: + virtual std::string getVersion() = 0; + + ~Base() { + std::cout << "~Base" << std::endl; + } +}; +``` + +```cpp +class Sub : public Base { + public: + Sub() : numbers(nullptr) { + } + + ~Sub() { + delete[] numbers; + std::cout << "~Sub" << std::endl; + } + + int Init() { + const size_t numberCount = 100; + numbers = new (std::nothrow) int[numberCount]; + if (numbers == nullptr) { + return -1; + } + + ... + } + + std::string getVersion() { + return std::string("hello!"); + } +private: + int* numbers; +}; +``` + +```cpp +int main(int argc, char* args[]) { + Base* b = new Sub(); + + delete b; + return 0; +} +``` +Because destructors of the base class are not declared as virtual, only destructors of the base class are called when an object is destroyed. Destructors of the derived class Sub are not called. As a result, a memory leak occurs. + + +### Rule 7.2.2 Do not use default parameter values for virtual functions. +Note: In C++, virtual functions are dynamically bound, but the default parameters of functions are statically bound during compilation. This means that the function you finally execute is a virtual function that is defined in the derived class but uses the default parameter value in the base class. To avoid confusion and other problems caused by inconsistent default parameter declarations during overriding of virtual functions, it is prohibited to declare default parameter values for all virtual functions. +Example: The default value of parameter "text" of the virtual function "Display" is determined at compilation time instead of runtime, which does not fit with polymorphism. +```cpp +class Base { + public: + virtual void Display(const std::string& text = "Base!") { + std::cout << text << std::endl; + } + + virtual ~Base(){} +}; + +class Sub : public Base { + public: + virtual void Display(const std::string& text = "Sub!") { + std::cout << text << std::endl; + } + + virtual ~Sub(){} +}; + +int main() { + Base* base = new Sub(); + Sub* sub = new Sub(); + + ... + + base->Display(); // The program output is as follows: Base! The expected output is as follows: Sub! + sub->Display(); // The program output is as follows: Sub! + + delete base; + delete sub; + return 0; +}; +``` + +### Rule 7.2.3 Do not redefine inherited non-virtual functions. +Note: Non-virtual functions cannot be dynamically bound (only virtual functions can be dynamically bound). You can obtain the correct result by operating the pointer of the base class. + +Example: +```cpp +class Base { + public: + void Fun(); +}; + +class Sub : public Base { + public: + void Fun(); +}; + +Sub* sub = new Sub(); +Base* base = sub; + +sub->Fun(); // Call Fun of the derived class. +base->Fun(); // Call Fun of the base class. +//... + +``` + +## Multiple Inheritance +In the actual development process, multiple inheritance scenarios are seldom used because the following typical problems may occur: +1. Data duplication and name ambiguity caused by "diamond" inheritance. Therefore, C++ introduces virtual inheritance to solve these problems. +2. In addition to "diamond" inheritance, names of multiple base classes may also conflict with each other, resulting in name ambiguity. +3. If a derived class needs to be extended or needs to override methods of multiple base classes, the responsibilities of the derived classes are unclear and semantics are muddled. +4. Compared with delegation, inheritance is seen as white box reuse, that is, a derived class can access the protected members of the base class, which leads to more coupling. Multiple inheritance, due to the coupling of multiple base classes, leads to even more coupling. + +Multiple inheritance has the following advantages: +Multiple inheritance provides a simpler method for assembling and reusing multiple interfaces or classes. + +Therefore, multiple inheritance can be used only in the following cases: + +### Recommendation 7.3.1 Use multiple inheritance to implement interface separation and multi-role combination. +If a class requires multiple interfaces, combine multiple separated interfaces by using multiple inheritance. This is similar to the Traits mixin of the Scala language. + +```cpp +class Role1 {}; +class Role2 {}; +class Role3 {}; + +class Object1 : public Role1, public Role2 { + // ... +}; + +class Object2 : public Role2, public Role3 { + // ... +}; + +``` + +The C++ standard library has a similar implementation example: +```cpp +class basic_istream {}; +class basic_ostream {}; + +class basic_iostream : public basic_istream, public basic_ostream { + +}; +``` + +## Overloading + +Overload operators should be used when there are sufficient reasons, and they do not change the original perception of the operators. For example, do not use the plus sign (+) to perform subtraction. +Operator overloading can make code more intuitive but has some disadvantages: +- It is often mistaken that the operation is as fast as a built-in operator, which has no performance degradation. +- There is no naming to aid debugging. It is more convenient to search by function name than by operator. +- Overloading operators can cause confusion if behavior definitions are not intuitive (for example, if the "+" operator is used for subtraction). +- The implicit conversion caused by the overloading of assignment operators may lead to entrenched bugs. Functions such as Equals () and CopyFrom () can be defined to replace the = and == operators. + + + +# 8 Functions +## Function Design +### Recommendation 8.1.1 Avoid long functions and ensure that each function contains no more than 50 lines (non-null and non-comment). +A function should be displayed on one screen (no longer than 50 lines). It should do only one thing, and do it well. + +Long functions often mean that the functions are too complex to implement more than one function, or overly detailed but not further abstracted. + +Exception: Some implementation algorithm functions may be longer than 50 lines due to algorithm convergence and functional comprehensiveness. + +Even if a long function works very well now, once someone modifies it, new problems may occur, even causing bugs that are difficult to discover. +It is recommended that you split a long function into several functions that are simpler and easier to manage, facilitating code comprehension and modification. + +## Inline Functions + +### Recommendation 8.2.1 An inline function cannot exceed 10 lines. +**Note**: An inline function has the same characteristics of a normal function. The difference between an inline function and a normal function lies in the processing of function calls. When a general function is called, the program execution right is transferred to the called function, and then returned to the function that calls it. When an inline function is called, the invocation expression is replaced with an inline function body. + +Inline functions are only suitable for small functions with only 1-10 lines. For a large function that contains many statements, the function call and return overheads are relatively trivial and do not need to be implemented by an inline function. Most compilers may abandon the inline mode and use the common method to call the function. + +If an inline function contains complex control structures, such as loop, branch (switch), and try-catch statements, the compiler may regard the function as a common function. +**Virtual functions and recursive functions cannot be used as inline functions**. + +## Function Parameters + +### Recommendation 8.3.1 Use a reference instead of a pointer for function parameters. + +**Note**: A reference is more secure than a pointer because it is not empty and does not point to other targets. Using a reference stops the need to check for illegal null pointers. + +Use const to avoid parameter modification, so that readers can clearly know that a parameter is not going to be modified. This greatly enhances code readability. + +### Recommendation 8.3.2 Use strongly typed parameters. Do not use void*. +While different languages have their own views on strong typing and weak typing, it is generally believed that C/C++ is a strongly typed language. Since we use such a strongly typed language, we should keep this style. +An advantage of this is the compiler can find type mismatch problems at the compilation stage. + +Using strong typing helps the compiler find more errors for us. Pay attention to the usage of the FooListAddNode function in the following code: +```cpp +struct FooNode { + struct List link; + int foo; +}; + +struct BarNode { + struct List link; + int bar; +} + +void FooListAddNode(void *node) { // Bad: Here, the void * type is used to transfer parameters. + FooNode *foo = (FooNode *)node; + ListAppend(&fooList, &foo->link); +} + +void MakeTheList() { + FooNode *foo = nullptr; + BarNode *bar = nullptr; + ... + + FooListAddNode(bar); // Wrong: In this example, the foo parameter was supposed to be transferred, but the bar parameter is accidentally transferred instead. However, no error is reported. +} +``` + +1. You can use the template function to change the parameter type. +2. A base class pointer can be used to implement polymorphism. + +### Recommendation 8.3.3 A function can have a maximum of five parameters. +If a function has too many parameters, it is apt to be affected by external changes, and therefore maintenance is affected. Too many parameters will also increase the testing workload. + +If a function has more than five parameters, you can: +- Split the function. +- Combine related parameters into a struct. + +# 9 Other C++ Features + +## Constants and Initialization + +Unchanged values are easier to understand, trace, and analyze. Therefore, use constants instead of variables as much as possible. When defining values, use const as a default. + +### Recommendation 9.1.1 Do not use macros to replace constants. + +**Note**: Macros are a simple text replacement that is completed in the preprocessing phase. When an error is reported, the corresponding value is reported. During tracing and debugging, the value is also displayed instead of the macro name. A macro does not support type checking and is insecure. A macro has no scope. + +```cpp +#define MAX_MSISDN_LEN 20 // Bad + +// Use the const constant in C++. +const int kMaxMsisdnLen = 20; // Good + +// In versions later than C++ 11, constexpr can be used. +constexpr int kMaxMsisdnLen = 20; +``` + +### Recommendation 9.1.2 A group of related integer constants must be defined as an enumeration. + +**Note**: Enumerations are more secure than `#define` or `const int`. The compiler checks whether a parameter value is within the enumerated value range to avoid errors. + +```cpp +// Good example: +enum Week { + kSunday, + kMonday, + kTuesday, + kWednesday, + kThursday, + kFriday, + kSaturday +}; + +enum Color { + kRed, + kBlack, + kBlue +}; + +void ColorizeCalendar(Week today, Color color); + +ColorizeCalendar(kBlue, kSunday); // Compilation error. The parameter type is incorrect. + +// Bad example: +const int kSunday = 0; +const int kMonday = 1; + +const int kRed = 0; +const int kBlack = 1; + +bool ColorizeCalendar(int today, int color); +ColorizeCalendar(kBlue, kSunday); // No error is reported. +``` + +When an enumeration value needs to correspond to a specific value, explicit value assignment is required during declaration. Otherwise, do not assign explicit values. This will prevent repeated assignment and reduce the maintenance workload (when adding and deleting members). + +```cpp +// Good example: Device ID defined in the S protocol. It is used to identify a device type. +enum DeviceType { + kUnknown = -1, + kDsmp = 0, + kIsmg = 1, + kWapportal = 2 +}; +``` + + + +### Recommendation 9.1.3 Magic numbers cannot be used. +So-called magic numbers are the numbers that are unintelligible and difficult to understand. + +Some numbers can be understood based on context. +For example, you may understand the number 12 in certain contexts. +type = 12; is not intelligible (and a magic number), but `month = year * 12`; can be understood, so we wouldn't really class this as a magic number. +The number 0 is often seen as a magic number. For example, `status = 0`; cannot truly express any status information. + +Solution: +Comments can be added for numbers that are used locally. +For the numbers that are used multiple times, you must define them as constants and give them descriptive names. + +The following cases are forbidden: +No symbol is used to explain the meaning of a number, for example, `const int kZero = 0`. +The symbol name limits the value. For example, in`const int kXxTimerInterval = 300`, Use `kXxTimerInterval` instead. + +### Rule 9.1.1 Ensure that a constant has only one responsibility. + +**Note**: A constant is used for only a specific function, that is, a constant cannot be used for multiple purposes. + +```cpp +// Good example: For protocol A and protocol B, the length of the MSISDN is 20. +const unsigned int kAMaxMsisdnLen = 20; +const unsigned int kBMaxMsisdnLen = 20; + +// Or use different namespaces: +namespace namespace1 { +const unsigned int kMaxMsisdnLen = 20; +} + +namespace namespace2 { +const unsigned int kMaxMsisdnLen = 20; +} +``` + +### Recommendation 9.1.4 Do not use memcpy_s or memset_s to initialize non-POD objects. + +**Note**: `POD` is short for `Plain Old Data`, which is a concept introduced in the C++ 98 standard (ISO/IEC 14882, first edition, 1998-09-01). The `POD` types include the original types and aggregate types such as `int`, `char`, `float`, `double`, `enumeration`, `void`, and pointer. Encapsulation and object-oriented features cannot be used (for example, user-defined constructors, assignment operators, destructors, base classes, and virtual functions). + +For non-POD classes, such as class objects of non-aggregate types, virtual functions may exist. Memory layout is uncertain, and is related to the compiler. Misuse of memory copies may cause serious problems. + +Even if a class of the aggregate type is directly copied and compared, and any functions hiding information or protecting data are destroyed, the `memcpy_s` and `memset_s` operations are not recommended. + +For details about the POD type, see the appendix. + + +## Expressions + +### Rule 9.2.1 A switch statement must have a default branch. +In most cases, a switch statement requires a default branch to ensure that there is a default action when the case tag is missing. + +Exception: +If the switch condition variables are enumerated and the case branch covers all values, the default branch is redundant. +Modern compilers can check which case branches are missing in the switch statement and provide an advanced warning. + +```cpp +enum Color { + kRed = 0, + kBlue +}; + +// The switch condition variables are enumerated. Therefore, you do not need to add a default branch. +switch (color) { + case kRed: + DoRedThing(); + break; + case kBlue: + DoBlueThing(); + ... + break; +} +``` + +### Recommendation 9.2.1 When comparing expressions, follow the principle that the left side tends to change and the right side tends to remain unchanged. +When a variable is compared with a constant, placing the constant on the left, for example, if (MAX == v), does not comply with reading habits and if (MAX > v) is more difficult to understand. +The constant should be placed on the right according to common reading and expression conventions. The expression is written as follows: +```cpp +if (value == MAX) { + +} + +if (value < MAX) { + +} +``` +There are special cases: for example, if the expression `if (MIN < value && value < MAX)` is used to describe a range, the first half, as a constant, should be placed on the left. + +You do not need to worry about writing '==' as '=' because a compilation alarm will be generated for `if (value = MAX)` and an error will be reported by other static check tools. Use the tool to solve such writing errors and ensure that the code must be readable. + + +## Type Casting + +Do not use type branches to customize behavior. Type branch customization behavior is prone to errors and is an obvious sign of attempting to compile C code using C++. This is very inflexible technology. If you forget to modify all branches when adding a new type to a compiler, you will not be notified. Use templates and virtual functions to let the type define itself rather than letting the calling side determine behavior. + +It is recommended that type casting be avoided. We should consider the data type of each type of data in the code design instead of overusing type casting to solve type conflicts. When designing a basic type, consider the following: +- Whether it is unsigned or signed? +- Is it suitable for float or double? +- Should you use int8, int16, int32, or int64 bit lengths? + +However, we cannot prohibit the use of type casting because the C++ language is a machine-oriented programming language, involving pointer addresses, and we interact with various third-party or underlying APIs. Their type design may not be reasonable and type casting tends to occur in the adaptation process. + +Exception: When calling a function, if we do not want to process the result of the function, first consider whether this is your best choice. If you do not want to process the return value of the function, cast it to void. + +### Rule 9.3.1 If type casting is required, use the type casting provided by the C++ instead of the C style. + +**Note**: + +The type casting provided by C++ is more targeted, easy to read, and more secure than the C style. C++ provides the following types of casting: +- Type casting: +1. `dynamic_cast`: It is used to inherit the downstream transformation of the system. `dynamic_cast` has the type check function. Design the base class and derived class to avoid using dynamic_cast for casting. +2. `static_cast`: It is similar to the C style casting, which can be used to convert a value, or to convert the pointer or reference of a derived class into a base class pointer or reference. This casting is often used to eliminate type ambiguity brought on by multiple inheritance, which is relatively safe. If it is a pure arithmetic conversion, use the braces as stated in the following text. +3. `reinterpret_cast`: It is used to convert irrelevant types. `reinterpret_cast` forces the compiler to reinterpret the memory of a certain type of objects into another type, which is an unsafe conversion. It is recommended that `reinterpret_cast` be used as little as possible. +4. `const_cast`: It is used to remove the `const` attribute of an object so that the object can be modified. It is recommended that `const_cast` be used as little as possible. + +- Arithmetic conversion: (Supported by C++ 11 and later versions) + If the type information is not lost, for example, the casting from float to double, or from int32 to int64, the initial mode of braces is recommended. +```cpp + double d{ someFloat }; + int64_t i{ someInt32 }; +``` + +### Recommendation 9.3.1 Avoid using `dynamic_cast`. +1. `dynamic_cast` depends on the RTTI of C++ so that the programmer can identify the type of the object in C++ at run time. +2. `dynamic_cast` indicates that a problem occurs in the design of the base class and derived class. The derived class destroys the contract of the base class and it is necessary to use `dynamic_cast` to convert the class to a subclass for special processing. In this case, it is more desirable to improve the design of the class, instead of using `dynamic_cast` to solve the problem. + +### Recommendation 9.3.2 Avoid using `reinterpret_cast`. + +**Note**: `reinterpret_cast` is used to convert irrelevant types. Trying to use `reinterpret_cast` to force a type to another type destroys the security and reliability of the type and is an insecure casting method. Avoid casting between different types. + +### Recommendation 9.3.3 Avoid using `const_cast`. + +**Note**: The `const_cast` command is used to remove the `const` and `volatile` properties of an object. + +The action of using a pointer or reference after the const_cast conversion to modify the const property of an object is undefined. + +```cpp +// Bad example: +const int i = 1024; +int* p = const_cast(&i); +*p = 2048; // The action is undefined. +``` + +```cpp +// Bad example: +class Foo { + public: + Foo() : i(3) {} + + void Fun(int v) { + i = v; + } + + private: + int i; +}; + +int main(void) { + const Foo f; + Foo* p = const_cast(&f); + p->Fun(8); // The action is undefined. +} + +``` + + +## Resource Allocation and Release + +### Rule 9.4.1 When a single object is released, delete is used. When an array object is released, delete [] is used. +Note: Delete is used to delete a single object, and delete [] is used to delete an array object. Reason: + +- new: Apply for memory from the system and call the corresponding constructor to initialize an object. +- new[n]: Apply for memory for n objects and call the constructor n times for each object to initialize them. +- delete: Call the corresponding destructor first and release the memory of an object. +- delete[]: Call the corresponding destructor for each object and release their memory. + +If the usage of new and delete does not match this format, the results are unknown. For a non-class type, new and delete will not call the constructor or destructor. + +The incorrect format is as follows: +```cpp +const int KMaxArraySize = 100; +int* numberArray = new int[KMaxArraySize]; +... +delete numberArray; +numberArray = NULL; +``` + +The correct format is as follows: +```cpp +const int KMaxArraySize = 100; +int* numberArray = new int[KMaxArraySize]; +... +delete[] numberArray; +numberArray = NULL; +``` + +## Standard Template Library + +The standard template library (STL) varies between modules. The following table lists some basic rules and suggestions. + +### Rule 9.5.1 Do not save the pointer returned by c_str () of std::string. + +Note: The C++ standard does not specify that the string::c_str () pointer is permanently valid. Therefore, the STL implementation used can return a temporary storage area and release it quickly when calling string::c_str (). Therefore, to ensure the portability of the program, do not save the result of string::c_str (). Instead, call it directly. + +Example: + +```cpp +void Fun1() { + std::string name = "demo"; + const char* text = name.c_str(); // After the expression ends, the life cycle of name is still in use and the pointer is valid. + + // If a non-const member function (such as operator[] and begin()) of the string type is invoked and the string is therefore modified, + // the text content may become unavailable or may not be the original character string. + name = "test"; + name[1] = '2'; + + // When the text pointer is used next time, the string is no longer "demo". +} + +void Fun2() { + std::string name = "demo"; + std::string test = "test"; + const char* text = (name + test).c_str(); // After the expression ends, the temporary object generated by the + operator may be destroyed, and the pointer may be invalid. + + // When the text pointer is used next time, it no longer points to the valid memory space. +} +``` +Exception: In rare cases where high performance coding is required , you can temporarily save the pointer returned by string::c_str() to match the existing functions which support only the input parameters of the const char* type. However, you should ensure that the life cycle of the string object is longer than that of the saved pointer, and that the string object is not modified within the life cycle of the saved pointer. + + +### Recommendation 9.5.1 Use std::string instead of char*. + +Note: Using string instead of `char*` has the following advantages: +1. There is no need to consider the null character '\0' at the end. +2. You can directly use operators such as +, =, and ==, and other character string operation functions. +3. No need to consider memory allocation operations. This helps avoid explicit usage of new and delete and the resulting errors. + +Note that in some STL implementations, string is based on the copy-on-write policy, which causes two problems. One is that the copy-on-write policy of some versions does not implement thread security, and the program breaks down in multi-threaded environments. Second, dangling pointers may be caused when a dynamic link library transfers the string based on the copy-on-write policy, due to the fact that reference count cannot be reduced when the library is unloaded. Therefore, it is important to select a reliable STL implementation to ensure the stability of the program. + +Exceptions: +When an API of a system or other third-party libraries is called, only `char*` can be used for defined interfaces. However, before calling the interfaces, you can use string. When calling the interfaces, you can use string::c_str () to obtain the character pointer. +When a character array is allocated as a buffer on the stack, you can directly define the character array without using string or containers such as `vector`. + +### Rule 9.5.2 Do not use auto_ptr. +Note: The std::auto_ptr in the STL library has an implicit ownership transfer behavior. The code is as follows: +```cpp +auto_ptr p1(new T); +auto_ptr p2 = p1; +``` +After the second line of statements is executed, p1 does not point to the object allocated in line 1 and becomes NULL. Therefore, auto_ptr cannot be placed in any standard containers. +This ownership transfer behavior is not expected. In scenarios where ownership must be transferred, implicit transfer should not be used. This often requires the programmer to keep extra attention on code that uses auto_ptr , otherwise access to a null pointer will occur. +There are two common scenarios for using auto_ptr . One is to transfer it as a smart pointer to outside of the function that generates the auto_ptr , and the other is to use auto_ptr as the RAII management class. Resources are automatically released when the lifecycle of auto_ptr expires. +In the first scenario, you can use std::shared_ptr instead. +In the second scenario, you can use std::unique_ptr in the C++ 11 standard. std::unique_ptr is a substitute for std::auto_ptr and supports explicit ownership transfer. + +Exceptions: +Before the C++ 11 standard is widely used, std::auto_ptr can be used in scenarios where ownership needs to be transferred. However, it is recommended that std::auto_ptr be encapsulated. The copy constructor and assignment operator of the encapsulation class should not be used, so that the encapsulation class cannot be used in a standard container. + + +### Recommendation 9.5.2 Use the new standard header files. + +Note: +When using the standard header file of C++, use `` instead of ``. + +## Usage of const +Add the keyword const before the declared variable or parameter (example: `const int foo`) to prevent the variable from being tampered with. Add the const qualifier to the function in the class (example: `class Foo {int Bar (char c) const;} ;`) to make sure the function does not modify the status of the class member variable. const variables, data members, functions, and parameters ensure that the type detection during compilation is accurate and errors are found as soon as possible. Therefore, we strongly recommend that const be used in any possible case. +Sometimes it is better to use constexpr of C++ 11 to define real constants. + +### Rule 9.6.1 For formal parameters of pointer and reference types, if the parameters do not need to be modified, use const. +Unchanged values are easier to understand, trace, and analyze. const is used as the default option and is checked during compilation to make the code more secure and reliable. +```cpp +class Foo; + +void PrintFoo(const Foo& foo); +``` + +### Rule 9.6.2 For member functions that do not modify member variables, use const. +Declare the member function as const whenever possible. The access function should always be const. So long as the function of a member is not modified, the function is declared with const. + +```cpp +class Foo { + public: + + // ... + + int PrintValue() const // const’s usage here modifies member functions and does not modify member variables. + std::cout << value << std::endl; + } + + int GetValue() const // and again here. + return value; + } + + private: + int value; +}; +``` + +### Recommendation 9.6.1 Member variables that will not be modified after initialization should be defined as constants. + +```cpp +class Foo { + public: + Foo(int length) : dataLength(length) {} + private: + const int dataLength; +}; +``` + +## Templates + +Template programming allows for extremely flexible interfaces that are type safe and high performance, enabling reuse of code of different types but with the same behavior. + +The disadvantages of template programming are as follows: + +1. The techniques used in template programming are often obscure to anyone but language experts. Code that uses templates in complicated ways is often unreadable, and is hard to debug or maintain. +2. Template programming often leads to extremely poor compiler time error messages: even if an interface is simple, complicated implementation details become visible when the user does something wrong. +3. If the template is not properly used, the code will be over expanded during runtime. +4. It is difficult to modify or refactor the template code. The template code is expanded in multiple contexts, and it is hard to verify that the transformation makes sense in all of them. + +Therefore, it is recommended that __template programming be used only in a small number of basic components and basic data structure__. When using the template programming, minimize the __complexity as much as possible__, and __avoid exposing the template__. It is better to hide programming as an implementation detail whenever possible, so that user-facing headers are readable. And you should write sufficiently detailed comments for code that uses templates. + + +## Macros +In the C++ language, it is strongly recommended that complex macros be used as little as possible. +- For constant definitions, use `const` or `enum` as stated in the preceding sections. +- For macro functions, try to be as simple as possible, comply with the following principles, and use inline functions and template functions for replacement. + +```cpp +// The macro function is not recommended. +#define SQUARE(a, b) ((a) * (b)) + +// Use the template function and inline function as a replacement. +template T Square(T a, T b) { return a * b; } +``` + +For details about how to use macros, see the related chapters about the C language specifications. +**Exception**: For some common and mature applications, for example, encapsulation for new and delete, the use of macros can be retained. + + +## Others + +### Recommendation 9.9.1 Use '\n' instead of std::endl when exporting objects to a file. +Note: std::endl flushes content in the buffer to a file, which may affect the performance. + +# 10 Modern C++ Features + +As the ISO released the C++ 11 language standard in 2011 and released the C++ 17 in March 2017, the modern C++ (C++ 11/14/17) adds a large number of new language features and standard libraries that improve programming efficiency and code quality. +This chapter describes some guidelines for modern C++ use, to avoid language pitfalls. + +## Code Simplicity and Security Improvement +### Recommendation 10.1.1 Use `auto` properly. + + +* `auto` can help you avoid writing verbose, repeated type names, and can also ensure initialization when variables are defined. +* The `auto` type deduction rules are complex and need to be read carefully. +* If using `auto` makes the code clearer, use a specific type of it and use it only for local variables. + +**Example** + +```cpp +// Avoid verbose type names. +std::map::iterator iter = m.find(val); +auto iter = m.find(val); + +// Avoid duplicate type names. +class Foo {...}; +Foo* p = new Foo; +auto p = new Foo; + +// Ensure that the initialization is successful. +int x; // The compilation is correct but the variable is not initialized. +auto x; // The compilation failed. Initialization is needed. +``` + +`auto` type deduction may cause the following problems: + +```cpp +auto a = 3; // int +const auto ca = a; // const int +const auto& ra = a; // const int& +auto aa = ca; // int, const and reference are neglected. +auto ila1 = { 10 }; // std::initializer_list +auto ila2{ 10 }; // std::initializer_list + +auto&& ura1 = x; // int& +auto&& ura2 = ca; // const int& +auto&& ura3 = 10; // int&& + +const int b[10]; +auto arr1 = b; // const int* +auto& arr2 = b; // const int(&)[10] +``` + +If you do not pay attention to `auto` type deduction and ignore the reference, hard-to-find performance problems may be created. + +```cpp +std::vector v; +auto s1 = v[0]; // auto deduction changes s1 to std::string in order to copy v[0]. +``` + +If the `auto` is used to define an interface, such as a constant in the header file, it may be possible that the type has changed because the developer has modified the value. + +In a loop, consider using auto & and auto * to traverse complex objects to improve performance. + +```cpp +for (auto &stmt : bb->GetStmtNodes()) { +... +} +``` + +### Rule 10.1.1 Use the keyword `override` when rewriting virtual functions. + +The keyword `override` ensures that the function is a virtual function and an overridden virtual function of the base class. If the subclass function is different from the base class function prototype, a compilation alarm is generated. + +If you modify the prototype of a base class virtual function but forget to modify the virtual function overridden by the subclass, you can find inconsistency during compilation. You can also avoid forgetting to modify the overridden function when there are multiple subclasses. + +**Example** + +```cpp +class Base { + public: + virtual void Foo(); + void Bar(); +}; + +class Derived : public Base { + public: + void Foo() const override; // Compilation failed: derived::Foo is different from that of the prototype of base::Foo and is not overridden. + void Foo() override; // Compilation successful: derived::Foo overrode base::Foo. + void Bar() override; // Compilation failed: base::Bar is not a virtual function. +}; +``` + +**Summary** +1. When defining the virtual function for the first time based on the base class, use the keyword `virtual`. +2. When the subclass overrides the base class’ virtual function, use the keyword `virtual`. +3. For the non-virtual function, do not use `virtual` or `override`. + +### Rule: 10.1.2 Use the keyword delete to `delete` functions. + +The `delete` keyword is clearer and the application scope is wider than a class member function that is declared as private and not implemented. + +**Example** + +```cpp +class Foo { + private: + // Whether the copy structure is deleted or not is unknown because usually only the header file is checked. + Foo(const Foo&); +}; + +class Foo { + public: + // Explicitly delete the copy assignment operator. + Foo& operator=(const Foo&) = delete; +}; +``` + +The `delete` keyword can also be used to delete non-member functions. + +```cpp +template +void Process(T value); + +template<> +void Process(void) = delete; +``` + +### Rule 10.1.3 Use `nullptr` instead of `NULL` or `0`. + +For a long time, C++ has not had a keyword that represents a null pointer, which is embarrassing: + +```cpp +#define NULL ((void *)0) + +char* str = NULL; // Error: void* cannot be automatically converted to char*. + +void(C::*pmf)() = &C::Func; +if (pmf == NULL) {} // Error: void* cannot be automatically converted to the pointer that points to the member function. +``` + +If `NULL` is defined as `0` or `0L`, the above problems can be solved. + +Alternatively, use `0` directly in places where null pointers are required. However, another problem occurs. The code is not clear, especially when the `auto` is used for automatic deduction. + +```cpp +auto result = Find(id); +if (result == 0) { // Does Find() return a pointer or an integer? + // do something +} +``` + +Literally `0` is of the `int` type (`0L` is the `long` type). Therefore, neither `NULL` nor `0` is a pointer type. +When a function of the pointer or integer type is overloaded, `NULL` or `0` calls only the overloaded pointer function. + +```cpp +void F(int); +void F(int*); + +F(0); // Call F(int) instead of F(int*). +F(NULL); // Call F(int) instead of F(int*). +``` + +In addition, `sizeof(NULL) == sizeof(void*)` does not always make sense, which is a potential risk. + +Summary: If `0` or `0L` is directly used, the code is not clear and type security cannot be ensured. If `NULL` is used, the type security cannot be ensured. These are all potential risks. + +`nullptr` has many advantages. It literally represents the null pointer and makes the code clearer. More to the point, it is no longer an integer type. + +`nullptr` is of the `std::nullptr_t` type. `std::nullptr_t` can be implicitly converted into all original pointer types, so that `nullptr` can represent a null pointer that points to any type. + +```cpp +void F(int); +void F(int*); +F(nullptr); // Call F(int*). + +auto result = Find(id); +if (result == nullptr) { // Find() returns a pointer. + // do something +} +``` + +### Recommendation 10.1.2 Use `using` instead of `typedef`. +For versions earlier than `C++11`, you can define the alias of the type by using `typedef`. No one wants to repeat code like `std::map>`. + +```cpp +typedef std::map> SomeType; +``` + +Using alias for the type is actually encapsulating the type. This encapsulation makes the code clearer, and to a large extent avoids the bulk modification caused by the type change. +For versions later than `C++ 11`, `using` is provided to implement `alias declarations`: + +```cpp +using SomeType = std::map>; +``` + +Compare the two formats: + +```cpp +typedef Type Alias; // It cannot be told whether Type or Alias is at the front. +using Alias = Type; // The format confirms to the assignment rule. It is easy to understand and helps reduce errors. +``` + +If this is not enough to prove the advantages of `using`, the alias template may be a better example: + +```cpp +//: Only one line of code is need to define an alias for a template. +template +using MyAllocatorVector = std::vector>; + +MyAllocatorVector data; // An alias for a template defined with "using". + +template +class MyClass { + private: + MyAllocatorVector data_; // Another. +}; +``` + +`typedef` does not support alias templates and they have to be hacked in. + +```cpp +// A template is used for packaging typedef. Therefore, a template class is needed. +template +struct MyAllocatorVector { + typedef std::vector> type; +}; + +MyAllocatorVector::type data; // ::type needs to be added when using typedef to define an alias. + +template +class MyClass { + private: + typename MyAllocatorVector::type data_; // For a template class, typename is also needed in addition to ::type. +}; +``` + +### Rule 10.1.4 Do not use std::move to operate the const object. +Literally, `std::move` means moving an object. The const object cannot be modified and cannot be moved. Therefore, using `std::move` to operate the const object may confuse code readers. +Regarding actual functions, `std::move` converts an object to the rvalue reference type. It can convert the const object to the rvalue reference of const. Because few types define the move constructor and the move assignment operator that use the const rvalue reference as the parameter, the actual function of code is often degraded to object copy instead of object movement, which brings performance loss. + +**Bad example:** +```cpp +std::string gString; +std::vector gStringList; + +void func() { + const std::string myString = "String content"; + gString = std::move(myString); // Bad: myString is not moved. Instead, it is copied. + const std::string anotherString = "Another string content"; + gStringList.push_back(std::move(anotherString)); // Bad: anotherString is not moved. Instead, it is copied. +} +``` + +## Smart Pointers +### Recommendation 10.2.1 Preferentially use the smart pointer instead of the raw pointer to manage resources. + +Avoid resource leakage. + +**Example**: + +```cpp +void Use(int i) { + auto p = new int {7}; // Bad: Initializing local pointers with new. + auto q = std::make_unique(9); // Good: Guarantee that memory is released. + if (i > 0) { + return; // Return and possible leak. + } + delete p; // Too late to salvage. +} +``` + +**Exception:** +Raw pointers can be used in scenarios such as performance sensitivity and compatibility. + +### Rule 10.2.1 Use `unique_ptr` instead of `shared_ptr`. + +1. Using `shared_ptr` a lot has an overhead (atomic operations on the `shared_ptr`s reference count have a measurable cost). +2. Shared ownership in some cases (such as circular dependency) may create objects that can never be released. +3. Shared ownership can be an attractive alternative to careful ownership design but it may obfuscate the design of a system. + +### Rule 10.2.2 Use `std::make_unique` instead of `new` to create `unique_ptr`. + +1. `make_uniqe` provides a simpler creation method. +2. `make_uniqe` ensures the exception safety of complex expressions. + +**Example** + +```cpp +// Bad: MyClass appears twice, which carries a risk of inconsistency. +std::unique_ptr ptr(new MyClass(0, 1)); +// Good: MyClass appears once and there is no possibility of inconsistency. +auto ptr = std::make_unique(0, 1); +``` + +Recurrence of types may cause serious problems, and it is difficult to find them: + +```cpp +// The code compiles fine, but new and delete usage does not match. +std::unique_ptr ptr(new uint8_t[10]); +std::unique_ptr ptr(new uint8_t); +// No exception safety: The compiler may calculate parameters in the following order: +// 1. Allocate the memory of Foo. +// 2. Construct Foo. +// 3. Call Bar. +// 4. Construct unique_ptr. +// If Bar throws an exception, Foo is not destroyed and a memory leak occurs. +F(unique_ptr(new Foo()), Bar()); + +// Exception safety: Calling of function is not interrupted. +F(make_unique(), Bar()); +``` + +**Exception:** +`std::make_unique` does not support user-defined `deleter`. +In the scenario where the `deleter` needs to be customized, it is recommended that `make_unique` of the customized version be implemented in its own namespace. +`Using `new` to create `unique_ptr` with the user-defined `deleter` is the last choice. + +### Rule 10.2.3 Create `shared_ptr` by using `std::make_shared` instead of `new`. + +In addition to the consistency factor similar to that in `std::make_unique` when using `std::make_shared`, performance is also a factor to consider. +`std::shared_ptr` manages two entities: +* Control block (storing reference count, `deleter`, etc.) +Managed objects + +When `std::make_shared` creates `std::shared_ptr`, it allocates sufficient memory for storing control blocks and managed objects on the heap at a time. When `std::shared_ptr(new MyClass)`is used to create `std::shared_ptr`, except that `new MyClass` triggers a heap allocation, the constructor function of `std::shard_ptr` triggers the second heap allocation, resulting in extra overhead. + +**Exception:** +Similar to `std::make_unique`, `std::make_shared` does not support `deleter` customization. + +## Lambda +### Recommendation 10.3.1 Use `lambda` to capture local variables or write local functions when normal functions do not work. + +Functions cannot capture local variables or be declared at local scope. If you need those things, choose `lambda` instead of handwritten `functor`. +On the other hand, `lambda` and `functor` objects do not overload. If overload is required, use a function. +If both `lambda` and functions work, a function is preferred. Use the simplest tool. + +**Example** + +```cpp +// Write a function that accepts only an int or string. +// -- Overloading is natural. +void F(int); +void F(const string&); + +// The local state needs to be captured or appear in the statement or expression range. +// -- A lambda is natural. +vector v = LotsOfWork(); +for (int taskNum = 0; taskNum < max; ++taskNum) { + pool.Run([=, &v] {...}); +} +pool.Join(); +``` + +### Rule 10.3.1 Avoid capturing by reference in lambdas that will be used nonlocally. + +When used in non-local scope, `lambdas` includes returned values which are stored on the heap, or passed to other threads. Local pointers and references should not outlive their scope. Capturing by reference in `lambdas` indicates storing a reference to a local object. If this leads to a reference that exceeds the local variable lifecycle, capturing by reference should not be used. + +**Example** + +```cpp +// Bad +void Foo() { + int local = 42; + // Capture a reference to a local variable. + // After the function returns results, local no longer exists, + // Process() call will have undefined behavior. + threadPool.QueueWork([&]{ Process(local); }); +} + +Good +void Foo() { + int local = 42; + // Capture a copy of local. + // Since a copy of local is made, it will be always available for the call. + threadPool.QueueWork([=]{ Process(local); }); +} +``` + +### Recommendation 10.3.2 All variables are explicitly captured if `this` is captured. + +The `[=]` in the member function seems to indicate capturing by value but actually it is capturing data members by reference because it captures the invisible `this` pointer by value. Generally, it is recommended that capturing by reference be avoided. If it is necessary to do so, write `this` explicitly. + +**Example** + +```cpp +class MyClass { + public: + void Foo() { + int i = 0; + + auto Lambda = [=]() { Use(i, data_); }; // Bad: It looks like coping or capturing by value but member variables are actually captured by reference. + + data_ = 42; + Lambda(); // Call use(42); + data_ = 43; + Lambda(); // Call use(43); + + auto Lambda2 = [i, this]() { Use(i, data_); }; // Good: the most explicit and least confusing method. + } + + private: + int data_ = 0; +}; +``` + +### Recommendation 10.3.3 Avoid default capture modes. + +The lambda expression provides two default capture modes: by-reference (&) and by-value (=). +By default, the "by-reference" capture mode will implicitly capture the reference of all local variables, which will easily lead to dangling references. By contrast, explicitly writing variables that need to be captured can make it easier to check the life cycle of an object and reduce the possibility of making a mistake +By default, the "by-value” capture mode will implicitly capture this pointer, and it is difficult to find out which variables the lambda function depends on. If a static variable exists, the reader mistakenly considers that the lambda has copied a static variable. +Therefore, it is required to clearly state the variables that lambda needs to capture, instead of using the default capture mode. + +**Bad example:** +```cpp +auto func() { + int addend = 5; + static int baseValue = 3; + + return [=]() { // Only addend is actually copied. + ++baseValue; // The modification will affect the value of the static variable. + return baseValue + addend; + }; +} +``` + +**Good example:** +```cpp +auto func() { + int addend = 5; + static int baseValue = 3; + + return [addend, baseValue = baseValue]() mutable { // Uses the C++14 capture initialization to copy a variable. + ++baseValue; // Modifying the copy of a static variable does not affect the value of the static variable. + return baseValue + addend; + }; +} +``` + +Reference: Effective Modern C++: Item 31: Avoid default capture modes. + +## Interfaces +### Recommendation 10.4.1 Use `T*` or `T&` arguments instead of a smart pointer in scenarios where ownership is not involved. + +1. Passing a smart pointer to transfer or share ownership should only be used when the ownership mechanism is explicitly required. +2. Passing a smart pointer (for example, passing the `this` smart pointer) restricts the use of a function to callers using smart pointers. +3. Passing a shared smart pointer adds a runtime performance cost. + +**Example**: + +```cpp +// Accept any int*. +void F(int*); + +// Accept only integers for which you want to transfer ownership. +void G(unique_ptr); + +// Accept only integers for which you want to share ownership. +void G(shared_ptr); + +// Does not need to change the ownership but requires ownership of the caller. +void H(const unique_ptr&); + +// Accept any int. +void H(int&); + +//Bad example +void F(shared_ptr& w) { + // ... + Use(*w); // When only w is used, lifecycle management is not required. + // ... +}; +``` + + +# 11 Secure Coding Standard + +## Basic Principles + +1. Programs must strictly verify external data. During external data processing, programmers must keep this in mind and not make any assumption that external data meets expectations. External data must be strictly checked before being used. + Programmers must abide by this principle in the complex attack environment to ensure that the program execution process is in line with expected results. + +2. The attack surface of code must be minimized. The code implementation should be as simple as possible to avoid unnecessary data exchange with external environments. Excess attack surfaces will increase the attack probability. Therefore, avoid exposing internal data processing of programs to external environments. + +3. Defensive coding strategies must be used to compensate for potential negligence of programmers. Every man is liable to error. Due to uncertainties of external environments and the differences in the experience and habits of programmers, it is hard for the code execution process to fully meet expectations. + Therefore, defensive strategies must be adopted in the coding process to minimize the defects caused by the negligence of programmers. + The measures include: + +- Defining an initial value for the declaration of variables. +- Exercise caution in using global variables. +- Avoid using complex and error-prone functions. +- Do not use error-prone mechanisms of compilers/operating systems. +- Deal with the resource access process carefully. +- Do not change the runtime environment of the operating system. For example, do not create temporary files, modify environment variables, or create processes. +- Rectify errors strictly. +- Use the debugging assertion (ASSERT) properly. + +## Variables + +### Rule 11.2.1: Define an initial value for the declaration of pointer variables, variables indicating resource descriptors, or BOOL variables. + +Note: Defining an initial value for the declaration of variables can prevent programmers from referencing uninitialized variables. + +Good example: + +```cpp +SOCKET s = INVALID_SOCKET; +unsigned char *msg = nullptr; +int fd = -1; +``` + +Bad example: In the following code, no initial value is defined for the declaration of variables. As a result, an error occurs in the free step. + +```cpp +char *message; // Error! char *message = nullptr; is required. + +if (condition) { + message = (char *)malloc(len); +} + +if (message != nullptr) { + free(message); //If the condition is not met, the uninitialized memory will be freed. +} +``` + +### Rule 11.2.2: Assign a new value to the variable pointing to a resource handle or descriptor immediately after the resource is freed. + +Note: After a resource is freed, a new value must be immediately assigned to the corresponding variable to prevent the re-reference of the variable. If the release statement is in the last line of the scope, you do not need to assign a new value. + +Good example: + +```cpp +SOCKET s = INVALID_SOCKET; +... +closesocket(s); +s = INVALID_SOCKET; + +unsigned char *msg = nullptr; +... +free(msg); +msg = nullptr; +``` + +### Rule 11.2.3: Ensure that local variables in a function do not take up too much space. + +When a program is running, the local variables in the function are stored in the stack, and the stack size is limited. If a large static array is requested, an error may occur. + It is recommended that the size of the static array not exceed 0x1000. + +In the following code, buff requests a large stack but the stack space is insufficient. As a result, stack overflow occurs in the program. + +```c++ +constexpr int MAX_BUF = 0x1000000; +int Foo() { + char buff[MAX_BUFF] = {0}; // Bad + ... +} +``` + +## Assertions + +### Principles + +Assertions in code consist of ASSERT and CHECK_FATAL. ASSERT is used to determine conditions in DEBUG mode. If conditions are not met, the program exits directly. CHECK_FATAL is used to detect exceptions during program running. If the conditions are not met, the program exits. + +CHECK_FATAL is applicable to scenarios where the input and resource application are not under control. Example: + +```cpp +CHECK_FATAL(mplName.rfind(kMplSuffix) != std::string::npos, "File name %s does not contain .mpl", mplName.c_str()); // The file name does not meet the requirements. + +CHECK_FATAL(intrinCall->GetReturnVec().size() == 1, "INTRN_JAVA_FILL_NEW_ARRAY should have 1 return value"); // The logic restriction is not met. + +CHECK_FATAL(func->GetParamSize() <= 0xffff, "Error:the argsize is too large"); // The validity is verified. + +void *MemPool::Malloc(size_t size) { + ... + CHECK_FATAL(b != nullptr, "ERROR: Malloc error"); // Failed to apply for memory. +} +``` + +ASSERT is applicable to scenarios where you want to locate bugs in the defensive programming mode. Example: + +```cpp +ASSERT(false, "should not be here"); + +ASSERT(false, "Unknown opcode for FoldIntConstComparison"); +``` + +### Recommendation 11.3.1 Do not use ASSERT to verify whether a pointer with security context is nullptr. + +Note: The compiler is an offline compilation tool. The impact of process breakdown is much less than that of online services. Therefore, the defensive programming mode should be reduced. Not all input parameters require null pointer verification. Instead, the context logic is used to determine whether null pointer verification is required. An input parameter without the nullptr logic does not need to be verified. For details, see the assertion usage principles. + +### Recommendation 11.3.2 Do not use ASSERT to verify whether a data array with security context exceeds the threshold. + +Note: Similar to the null pointer rule, the context logic is used to determine whether to use assertions for out-of-threshold array verification. For details, see the assertion usage principles. + +### Recommendation 11.3.3 Do not use ASSERT to verify integer overflow, truncation, or wraparound in the case of context security. + +Note: In terms of integer overflow caused by addition or multiplication, verification is not required with the context logic guaranteed. In terms of integer truncation and wraparound caused by type conversion, verification is not required with the context logic guaranteed. For details, see the assertion usage principles. + +To ensure that fault tolerance and logic continue to run, you can use conditional statements for verification. + +### Rule 11.3.1 Do not use ASSERT to verify errors that may occur during program runtime. + +Bad example: + +```cpp +FILE *fp = fopen(path, "r"); +ASSERT(fp != nullptr, "nullptr check"); //Incorrect code: Opening the file may fail. + +char *str = (char *)malloc(MAX_LINE); +ASSERT(str != nullptr, "nullptr check"); //Incorrect code: Memory allocation may fail. +ReadLine(fp, str); +``` + +### Rule 11.3.2 Do not modify the runtime environment in ASSERT. + +Note: In the formal release stage of a program, ASSERT is not compiled. To ensure the function consistency between the debugging version and formal version, do not perform any operation, such as value assignment, variable modification, resource operation, or memory application, in ASSERT. + + +In the following code, ASSERT configuration is incorrect. + +```cpp +ASSERT(i++ > 1000); // p1 is modified. +ASSERT(close(fd) == 0); // fd is closed. +``` + +## Exception Mechanisms + +### Rule 11.4.1 Do not use the C++ exception mechanism. + +Note: Do not use the exception mechanism of C++. All errors must be transferred between functions and judged using error values, but not be handled using the exception mechanism. + +Programmers must fully control the entire coding process, build the attacker mindset, enhance secure coding awareness, and attach importance to procedures with potential errors. Using the C++ exception mechanism to handle errors, however, will weaken the security awareness of programmers because it will: + +Disrupt program execution, making the program structure more complex and used resources not cleared. + +Reduce the reusability of code. The code that uses the exception mechanism cannot be reused by the code that does not use the exception mechanism. + +Depend on the compiler, operating system, and processor. The execution performance of the program will deteriorate if the exception mechanism is used. + +Increase the attack surface of a program in the binary layer after the program is loaded. The attacker can overwrite the abnormal processing function address to launch an attack. + + +## Memory + +### Rule 11.5.1: Verify the requested memory size before requesting memory. + +The requested memory size may come from external data and must be verified to prevent memory abuse. The requested memory size must not be 0. +Example: +```cpp +int Foo(int size) { + if (size <= 0) { + //error + ... + } + ... + char *msg = (char *)malloc(size); + ... +} +``` + +### Rule 11.5.2: Check whether memory allocation is successful. + +```cpp +char *msg = (char *)malloc(size); +if (msg != nullptr) { + ... +} +``` + +## Dangerous Functions + +### Rule 11.6.1: Do not use dangerous functions related to memory operations. +Many C functions do not use the destination buffer size as a parameter or consider memory overlapping and invalid pointers. As a result, security vulnerabilities such as buffer overflow may be caused. + +The historical statistics about buffer overflow vulnerabilities show that a majority of the vulnerabilities are caused by memory operation functions that do not consider the destination buffer size. +The following lists the dangerous functions related to memory operations: + +Memory copy functions: memcpy(), wmemcpy(), memmove(), wmemmove() + +Memory initialization function: memset() + +String copy functions: strcpy(), wcscpy(),strncpy(), wcsncpy() + +String concatenation functions: strcat(), wcscat(),strncat(), wcsncat() + +Formatted string output functions: sprintf(), swprintf(), vsprintf(), vswprintf(), snprintf(), vsnprintf() + +Formatted string input functions: scanf(), wscanf(), vscanf(), vwscanf(), fscanf(),fwscanf(),vfscanf(),vfwscanf(),sscanf(), swscanf(), vsscanf(), vswscanf() + +stdin stream-input function: gets() +Use safe functions. For details, see huawei_secure_c. + +Exceptions: In the following cases, external data processing is not involved, and no attack risks exist. Memory operations are complete in this function, and there is no possibility of failure. +Using safe functions causes redundant code, and therefore dangerous functions can be used in these cases. + +(1) Initialize a fixed-length array, or initialize the memory of the structure with a fixed length: +```cpp +BYTE array[ARRAY_SIZE]; + +void Foo() { + char destBuff[BUFF_SIZE]; + ... + memset(array, c1, sizeof(array)); //Assign values to global fixed-length data. + ... + memset(destBuff, c2, sizeof(destBuff)); //Assign values to partial fixed-length data. + ... +} + +typedef struct { + int type; + int data; +} Tag; + +Tag g_tag = {1, 2}; + +void Foo() { + Tag dest; + ... + memcpy((void *)&dest, (const void *)&g_tag, sizeof(Tag)); //Assign values to fixed-length structure. + ... +} +``` + +(2) Initialize memory if function parameters include memory parameters. +```cpp +void Foo(BYTE *buff1, size_t len1, BYTE *buff2, size_t len2) { + ... + memset(buff1, 0, len1); //Clear buff1. + memset(buff2, 0, len2); //Clear buff2. + ... +} +``` + +(3) Assign an initial value after allocating memory from the heap. +```cpp +size_t len = ... +char *str = (char *)malloc(len); +if (str != nullptr) { + memset(str, 0, len); + ... +} +``` + +(4) Copy memory with the same size as the source memory size. +The following code copies a memory block with the same size as srcSize: +```cpp +BYTE *src = ... +size_t srcSize = ... +BYTE *destBuff = new BYTE[srcSize]; +memcpy(destBuff, src, srcSize); +``` + +The following code copies a memory block with the same size as the source character string: +```cpp +char *src = ... +size_t len = strlen(src); +if (len > BUFF_SIZE) { + ... +} +char *destBuff = new char[len + 1]; +strcpy(destBuff, src); +``` + +(5) The source memory stores static character string constants only. (Check whether the destination memory is sufficient during encoding.) +The following code directly copies the string constant "hello" to the array: +```cpp +char destBuff[BUFF_SIZE]; +strcpy(destBuff, "hello"); +``` +The following code concatenates static character string constants: +```cpp +const char *list[] = {"red","green","blue"}; +char destBuff[BUFF_SIZE]; +sprintf(destBuff, "hello %s", list[i]); +``` + diff --git a/doc/en/RcApi.md b/doc/en/RcApi.md new file mode 100644 index 0000000000000000000000000000000000000000..c3568ac28b1e4ab326e3d2b1d00b43507968f8bb --- /dev/null +++ b/doc/en/RcApi.md @@ -0,0 +1,525 @@ +RC API +---- + +Reference counting (RC) is a programming technique of storing the number of references to a resource, such as an object, a block of memory, disk space, and others, and releasing the resource when the number of references becomes 0. RC is used to achieve automatic resource management. RC also refers to a garbage collection algorithm that deallocates objects which are no longer referenced. + +To support RC, OpenArkCompiler provides the following APIs for better code generation. + +## void MCC\_IncRef\_NaiveRCFast(address\_t obj) + +**Function:** + +Increments RC of the object. + +**Input parameter:** + +obj: pointer of the heap object + +**Return value:** + +None + +## void MCC\_DecRef\_NaiveRCFast(address\_t obj) + +**Function:** + +Decrements RC of the object. + +**Input parameter:** + +obj: pointer of the heap object + +**Return value:** + +None + +## void MCC\_ClearLocalStackRef(address\_t \*addr) + +**Function:** + +Clears local reference on the thread stack and decrements RC for the stored reference. + +**Input parameter:** + +addr: address of the local reference on the thread stack + +**Return value:** + +None + +## void MCC\_IncDecRef\_NaiveRCFast(address\_t incObj, address\_t decObj) + +**Function:** + +Increments RC for the object to which incObj points and decrements RC for the object to which decObj points. + +**Input parameter:** + +incObj: address of the object whose RC needs increment + +incObj: address of the object whose RC needs decrement + +**Return value:** + +None + +## void MCC\_IncDecRefReset(address\_t incObj, address\_t \*decAddr) + +**Function:** + +Increments RC for the object to which incObj points, decrements RC for the local variable object stored on the stack address pointer decAddr, and clears the memory to which the stack address pointer decAddr points. + +**Input parameter:** + +incObj: heap object whose RC needs increment + +decAddr: address of the local reference on the stack + +**Return value:** + +None + +## void MCC\_DecRefResetPair(address\_t \*decAddr0, address\_t \*decAddr1) + +**Function:** + +Clears the stack address space to which all parameters point, and decrements RC for the old value of the local variable. + +**Input parameter:** + +decAddr0 and decAddr1: addresses of the local reference on the stack + +**Return value:** + +None + +## void MCC\_SetObjectPermanent(address\_t obj) + +**Function:** + +Sets a heap object to be permanently valid. After being invoked, RC for the object reaches the maximum value. + +**Input parameter:** + +obj: address of the heap object + +**Return value:** + +None + +## address\_t MCC\_LoadVolatileStaticField(address\_t \*fieldAddr) + +**Function:** + +Obtains the value of the volatile static variable and increments RC for the fetched heap object. + +**Input parameter:** + +fieldAddr: address of the volatile static variable + +**Return value:** + +Returns the value of the volatile static variable. + +## address\_t MCC\_LoadRefStatic(address\_t \*fieldAddr) + +**Function:** + +Obtains the value of the static variable and increments RC for the fetched heap object. + +**Input parameter:** + +fieldAddr: address of the static variable + +**Return value** + +Returns the value of the static variable. + +## address\_t MCC\_LoadVolatileWeakField(address\_t obj, address\_t \*fieldAddr) + +**Function:** + +Obtains the value of the volatile variable marked by the weak annotation. If a non-null heap object is obtained, RC for the object will be incremented. + +**Input parameter:** + +obj: address of the heap object + +fieldAddr: address of the volatile variable marked as weak + +**Return value:** + +Returns the value of the volatile variable marked as weak. A null object pointer may be returned. + +## address\_t MCC\_LoadWeakField(address\_t obj, address\_t \*field\_addr) + +**Function:** + +Obtains the value of the variable marked by the weak annotation. If a non-null heap object is obtained, RC for the object will be incremented. + +**Input parameter:** + +obj: address of the heap object + +fieldAddr: address of the variable marked as weak + +**Return value:** + +Returns the value of the variable marked as weak. A null object pointer may be returned. + +## address\_t MCC\_LoadRefField\_NaiveRCFast(address\_t obj, address\_t \*fieldAddr) + +**Function:** + +Obtains the value of the fieldAddr variable, and increments RC for the obtained heap object. + +**Input parameter:** + +obj: address of the heap object + +fieldAddr: address of the variable + +**Return value:** + +Returns the value of the variable. + +## address\_t MCC\_LoadVolatileField(address\_t obj, address\_t \*fieldAddr) + +**Function:** + +Obtains the value of the volatile variable, and increments RC for the fetched heap object. + +**Input parameter:** + +obj: address of the heap object + +fieldAddr: address of the volatile variable + +**Return value:** + +Returns the value of the volatile variable. + +## void MCC\_WriteReferent(address\_t obj, address\_t value) + +**Function:** + +Store an object to the referent field of a java.lang.ref.Reference object. If a non-null heap object is obtained, RC for the object is incremented. + +**Input parameter:** + +obj: address of java.lang.ref.Reference + +value: address of the heap object + +**Return value:** + +None + +## void MCC\_WriteVolatileStaticFieldNoInc(address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the volatile static variable. This does not change RC for the heap object, but decrements RC for the old value of the static variable. + +**Input parameter:** + +fieldAddr: address of the volatile static variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteVolatileStaticFieldNoDec(address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the volatile static variable. This increments RC for the heap object, but does not decrement RC for the old value of the static variable. + +**Input parameter:** + +fieldAddr: address of the volatile static variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteVolatileStaticFieldNoRC(address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the volatile static variable. This does not change RC for the new value (value) or the old value (value of fieldAddr). + +**Input parameter:** + +fieldAddr: address of the volatile static variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteVolatileStaticField(address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the volatile static variable. This increments RC for the heap object, and decrements RC for the old value of the static variable. + +**Input parameter:** + +fieldAddr: address of the volatile static variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteRefFieldStaticNoInc(address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the static variable. This does not increment RC for the heap object, but decrements RC for the old value of the static variable. + +**Input parameter:** + +fieldAddr: address of the static variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteRefFieldStaticNoDec(address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the static variable. This increments RC for the heap object, but does not decrement RC for the old value of the static variable. + +**Input parameter:** + +fieldAddr: address of the static variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteRefFieldStaticNoRC(address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the static variable. This does not increment RC for the heap object or decrement RC for the old value of the static variable. + +**Input parameter:** + +fieldAddr: address of the static variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteRefFieldStatic(address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the static variable. This increments RC for the heap object, and decrements RC for the old value of the static variable. + +**Input parameter:** + +fieldAddr: address of the static variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteVolatileFieldNoInc(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the volatile variable. This does not increment RC for the heap object, but decrements RC for the old value of the volatile variable. + +**Input parameter:** + +obj: address of the object + +fieldAddr: address of the volatile variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteVolatileFieldNoDec(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the volatile variable. This increments RC for the heap object, but does not decrement RC for the old value of the volatile variable. + +**Input parameter:** + +obj: address of the object + +fieldAddr: address of the volatile variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteVolatileFieldNoRC(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the volatile variable. This does not increment RC for the heap object or decrement RC for the old value of the volatile variable. + +**Input parameter:** + +obj: address of the object + +fieldAddr: address of the volatile variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteVolatileField(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the volatile variable. This increments RC for the heap object, and decrements RC for the old value of the volatile variable. + +**Input parameter:** + +obj: address of the object + +fieldAddr: address of the volatile variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteRefFieldNoInc(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the variable. This does not increment RC for the heap object, but decrements RC for the old value of the variable. + +**Input parameter:** + +obj: address of the object + +fieldAddr: address of the variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteRefFieldNoDec(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the variable. This increments RC for the heap object, but does not decrement RC for the old value of the variable. + +**Input parameter:** + +obj: address of the object + +fieldAddr: address of the variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteRefFieldNoRC(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the variable. This does not increment RC for the heap object or decrement RC for the old value of the variable. + +**Input parameter:** + +obj: address of the object + +fieldAddr: address of the variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteRefField(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the variable. This increments RC for the heap object, and decrements RC for the old value of the variable. + +**Input parameter:** + +obj: address of the object + +fieldAddr: address of the variable + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteVolatileWeakField(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the volatile variable marked by Weak annotation. This increments RC for the heap object, and decrements RC for the old value of the volatile variable. + +**Input parameter:** + +obj: address of the object + +fieldAddr: field of the volatile variable marked by the weak annotation + +value: address of the heap object to be written + +**Return value:** + +None + +## void MCC\_WriteWeakField(address\_t obj, address\_t \*fieldAddr, address\_t value) + +**Function:** + +Writes a heap object to the variable marked by the weak annotation. This increments RC for the heap object, and decrements RC for the old value of the variable. + +**Input parameter:** + +obj: address of the object + +fieldAddr: field of the variable which marked by weak annotation + +value: address of the heap object to be written + +**Return value:** + +None + diff --git a/doc/en/TargetConstants.md b/doc/en/TargetConstants.md new file mode 100644 index 0000000000000000000000000000000000000000..4be1770113c4d96ab7d45d7cfd760b8b9abe2c07 --- /dev/null +++ b/doc/en/TargetConstants.md @@ -0,0 +1,136 @@ +### Introduction +This document describes the way to work with target constants in maple IR. +It's very important to distinguish `host` constants from the `target` ones. +`Host` constants represent values on the machine where the compiler runs. +`Target` constants represent values that will be present in the target code generated by the compiler. +So, in the common case there is no one-to-one mapping between `host` constants and `target` ones. +For example, a host machine has `one's complement` representation for integers, but the `target` machine +has `two's complement` integers representation. Or more realistic case when the target machine supports `int128_t` and can +hold 128-bit constants directly, but the `host` machine does not. +So, we need to find a safe and convenient way how to represent and operate on the `target` constants in maple IR +while compiling a user program. + +### Working with constants in maple IR +Let's show how to work with constants on `Constant Folding` optimization as an example. +Consider the following Maple IR: +``` +func &foo static used () i8 { + return (add i8 (constval i8 1, constval i8 2)) +} +``` +we want to fold this `add` to perform calculation at compile time. +We could do something like this: +```c++ +MIRConst *ConstantFold::FoldIntConstBinaryMIRConst(Opcode opcode, PrimType resultType, + const MIRIntConst *intConst0, + const MIRIntConst *intConst1) const { + int64 intValueOfConst0 = intConst0->GetValue(); + int64 intValueOfConst1 = intConst1->GetValue(); + + uint64 result64 = 0; + uint32 result32 = 0; + uint16 result16 = 0; + uint8 result8 = 0; + + bool useResult64 = (GetPrimTypeSize(resultType) == 8); + bool useResult32 = (GetPrimTypeSize(resultType) == 4); + bool useResult16 = (GetPrimTypeSize(resultType) == 2); + bool useResult8 = (GetPrimTypeSize(resultType) == 1); + + switch (opcode) { + case OP_add: { + if (useResult64) { + result64 = static_cast(intValueOfConst0) + static_cast(intValueOfConst1); + } else if (useResult32) { + result32 = static_cast(intValueOfConst0) + static_cast(intValueOfConst1); + } else if (useResult16) { + result16 = static_cast(intValueOfConst0) + static_cast(intValueOfConst1); + } else if (useResult8) { + result8 = static_cast(intValueOfConst0) + static_cast(intValueOfConst1); + } + break; + } + ... +``` +As can be seen, there is a lot of boilerplate code depending on the type size of the result of operation. +In a more general case, the sign of the result type is also has to be taken into account, doubling the code size (e.g. div) +Also, if we manipulate with 128-bit types it's possible that there is no `int128_t` type on the host machine and in this +case we need to perform such calculation manually. For example, we need to hold the value in two `int64_t` variables. +So, we need to be able to work with target constants in a safe and convenient way. To achieve this goal we can use +a special class that's called `IntVal`. It provides a convenient interface for manipulation with target constants. +For example, the above case can be managed as follows: +```c++ +MIRConst *ConstantFold::FoldIntConstBinaryMIRConst(Opcode opcode, PrimType resultType, + const MIRIntConst *intConst0, + const MIRIntConst *intConst1) const { + IntVal intVal0 = intConst0->GetValue(); + IntVal intVal1 = intConst1->GetValue(); + + IntVal result(0, resultType); + + switch (opcode) { + case OP_add: { + result = intVal0.Add(intVal1, resultType); + // or + // result = intVal0 + intVal1; + // if resultType is equal to type of operands + break; + } + ... +``` +Let's take a look at `IntVal` class in more detail. + +##### The IntVal class +This class represents a target integer constant in `two's complement` representation. +It's able to hold signed and unsigned integers with arbitrary bit-width (currently, no more than 64 bits. 128-bit support is in progress) +that can be created using the following constructors: +```c++ +/// Creates IntVal object from uint64 value with bit-width defined by bitWidth parameter +/// and interpreted as signed value if isSigned parameter is true +IntVal(uint64 val, uint8 bitWidth, bool isSigned); + +/// The same as above, but bit-width and signedness are obtained from the given PrimType +IntVal(uint64 val, PrimType type); +``` +Also, this class provides an interface to perform arithmetic, bitwise, comparison and other operations on the target constants. +For example: +```c++ +/// perform an addition: *this + val. Bit-width and signedness of values must be the same +IntVal operator+(const IntVal &val) const; + +/// the same as above, but performs '+' in terms of the given integer PrimType +IntVal Add(const IntVal &val, PrimType pType) const; + +/// perform a binary 'and': *this & val. Bit-width and signedness of values must be the same +IntVal operator&(const IntVal &val) const; + +/// the same as above, but performs '&' in terms of the given integer PrimType +IntVal And(const IntVal &val, PrimType pType) const; + +/// perform a comparison: *this < val. Bit-width and signedness of values must be the same +bool operator<(const IntVal &rhs) const; + +/// the same as above, but performs '<' in terms of the given integer PrimType +bool Less(const IntVal &rhs, PrimType pType) const; +``` + +There are `Extend`, `Trunc` and `TruncOrExtend` functions that allow truncating or extending (zero or sign extension) +depending on the given integer PrimType. These functions return new `IntVal` object that has bit-width and sign obtained +from the given PrimType and has a value obtained from the original value by truncation or extension (zero of sign). +```c++ +IntVal TruncOrExtend(PrimType newType) cosnt; +IntVal Extend(PrimType newType) cosnt; +IntVal Trunc(PrimType newType) cosnt; +``` +It's possible to get a host constant from the target one (in case the value can fit into the host constant) using the following +interfaces: +```c++ +/// perform zero extension of the value +uint64 GetZXTValue(uint8 size = 0) const; + +/// perform sign extension of the value +int64 GetSXTValue(uint8 size = 0) const; + +/// perform sign or zero extension of the value depending on its sign +int64 GetExtValue(uint8 size = 0) const; +``` diff --git a/doc/en/VtableItableDescription.md b/doc/en/VtableItableDescription.md new file mode 100644 index 0000000000000000000000000000000000000000..aa01bca9736393114700781a35bb5150b9009ebe --- /dev/null +++ b/doc/en/VtableItableDescription.md @@ -0,0 +1,376 @@ +# Virtual Table and Interface Table Design + +## Virtual Table + +OpenArkCompiler generates a virtual table for each class. A virtual table of one class consists of virtual functions of its super classs, virtual functions of the current class, and default functions of the interfaces which the current class implements. If function of the current class override function of its superclass, the function of its superclass will be replaced by the function of the current class in the virtual table of the current class. + +The following is a specific example: + +```java +class A { + public int first() { + return 0; + } +} + +class B extends A { + public void foo() { + } + public int first() { + return 1; + } +} + +class C extends A { + public void bar() { + } + public int first() { + return 2; + } +} + +public class IsEmpty { + public static void main(String [] args) { + A x = new B(); + x.first(); + A y = new C() + y.first(); + } + + public void add(A x) { + x.first(); + } +} +``` + + + +The structure of the virtual table generated by OpenArkCompiler is as follows: + +A: +``` + _vtb_LA_3B: + .quad Ljava_2Flang_2FObject_3B_7Cclone_7C_28_29Ljava_2Flang_2FObject_3B - . + .quad Ljava_2Flang_2FObject_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z - . + .quad Ljava_2Flang_2FObject_3B_7Cfinalize_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CgetClass_7C_28_29Ljava_2Flang_2FClass_3B - . + .quad Ljava_2Flang_2FObject_3B_7ChashCode_7C_28_29I - . + .quad Ljava_2Flang_2FObject_3B_7Cnotify_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CnotifyAll_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28J_29V - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28JI_29V - . + .quad LA_3B_7Cfirst_7C_28_29I - . +``` + +B: + +``` + __vtb_LB_3B: + .quad Ljava_2Flang_2FObject_3B_7Cclone_7C_28_29Ljava_2Flang_2FObject_3B - . + .quad Ljava_2Flang_2FObject_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z - . + .quad Ljava_2Flang_2FObject_3B_7Cfinalize_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CgetClass_7C_28_29Ljava_2Flang_2FClass_3B - . + .quad Ljava_2Flang_2FObject_3B_7ChashCode_7C_28_29I - . + .quad Ljava_2Flang_2FObject_3B_7Cnotify_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CnotifyAll_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28_29V - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28J_29V - . + .quad Ljava_2Flang_2FObject_3B_7Cwait_7C_28JI_29V - . + .quad LB_3B_7Cfirst_7C_28_29I - . + .quad LB_3B_7Cfoo_7C_28_29V - . +``` +C: +``` +__vtb_LC_3B: +The first 11 functions are the same as those of A and B. + … … + .quad LC_3B_7Cfirst_7C_28_29I - . + .quad LC_3B_7Cbar_7C_28_29V - . + +``` + +Comparison shows that: + +1. All classes inherit from the object class. Therefore, the first 11 functions in the virtual table are inherited from the object class, and the layout is the same as that of the object class. +2. For function 12, function 12 of the subclass B overrides function 12 of the superclass A. Therefore, in the same position, the virtual table of class A is LA_3B_7Cfirst_7C_28_29I and the virtual table of class B is LB_3B_7Cfirst_7C_28_29I. Class C inherits from Class A and overrides the first function. In addition, the iD interface is implemented. Therefore, LC_3B_7Cfirst_7C_28_29I lies on position 12, and LiD_3B_7Csecond_7C_28_29I lies on position 13. + +## Virtual Function Invocation (During Compilation) + +For virtual call, we cannot determine which function is called during compilation. The following is an example: + +```java +public class IsEmpty { + public static void main(String [] args) { + A x = new B(); + x.first(); + A y = new C(); + y.first(); + } + + public void add(A x) { + x.first(); + } +} +``` + + +In this case, we are not sure whether the first function in B or C is called during compilation. + +However, the layout of the first function in A, B, and C is the same. In this example, the offset of the first function in the virtual table is 12. Therefore, we can access the function by obtaining the virtual table pointer from the corresponding object and add offset 12 to the pointer. +## Virtual Function Invocation (Running) + +In a program execution process, as shown in Figure 1, if a virtual function is called, we perform the following steps: +1. Determine the instance class of the object (this_ pointer). In Figure 1, this is the instance of class C. +2. Search the virtual table of the corresponding class using the function index. +3. Return the function pointer that is actually called. + +![](media/javavmt.png) +
Figure 1: Static calling of Java virtual function
+ +## Interface Table + +Interface call is similar as multiple inheritance and is more complex than Java single inheritance. For multiple inheritance, a unique inheritance order cannot be determined naturally. + +In a closed environment, a sequence (iA, iB, iC) can be determined through topology sorting. The advantage is that interface call can be processed in a way similar as that of a virtual table. However, the interface table of a class obtained in this way will be very large with few data. This approach is not available considering performance and code size. + +![](media/Topology.png) + + +In an open environment, a sequence cannot be determined through topology sorting during compilation. As a result, the sequence of functions in the interface table is not fixed. Therefore, in actual implementation, it is unlikely to achieve a function table with a consistent sequence and an access mechanism by offset. During compilation, we can determine all interfaces implemented by a class and the inheritance relationship of the interfaces. During running, the function signatures can be compared to determine which function needs to be called. As string comparison causes overhead, OpenArkCompiler calculates the hash value of the function name and signature during compilation. First, compare the hash values. If they are the same, and no hash conflict exists, the function is called. If a hash conflict occurs, OpenArkCompiler compares the function name with signature and obtain the function pointer. + +In addition, considering running efficiency and ROM size, we divide the interface table into two levels of tables. Level 1 is the real hash table. We set the hash value to 23, and enter the function address in the position corresponding to the hash value (0-C22). If no conflict occurs in the level 1 hash table, the entries following the last position that contains the function address are blank. In this way, the subsequent entries can be deleted without occupying space. If a conflict occurs in the level-2 hash table, enter 0 in the position where the conflict occurs, and then enter the address of the level-2 hash table in entry 23. + +The structure of the level-1 table is as follows: + +| Sequence | Interface Table Entry | Description +|----|------------|---------------------------------------------| +| 0 | &Func or 0 | Function address corresponding to the hash value. If no function address exists, the value is 0. If a conflict occurs, the conflict position is also 0, and the level-2 table address is entered in entry 23. If no conflict occurs, the entries after the last value (n) with a corresponding function are deleted.| +| 1 | &Func or 0 | +| 2 | &Func or 0 | +| ... | &Func or 0 | +|n | &Func | +| 23 | &itbC | Address of the level-2 table If no conflict occurs in the level-1 table, this entry does not exist. | + + +The structure of the level-2 function table is as follows: + +| Interface Table Entry | Description | +|----------------------|--------------------------------------------------| +| Size | Size of the interface table that does not conflict | +| 1 | Align the site position. Meaningless | +| Func1_sig | Func1 signature hash value | +| &Func1 | Fun1 address | +| Func2_sig | Func2 signature hash value| +| &Func2 | Func2 address | +| ...... | | +| Func3_sig and Func4_sig | Func3 signature and Func4 signature hash values. The two values are the same. | +| 1 | Indicates a conflict as Func3 and Func4 signature harsh values are the same. | +| ...... | | +| Func3_sig | Func3 signature | +| &Func3 | Func3 address | +| Func4_sig | Func4 signature | +| &Func4 | Func4 address | +| ...... | | + +## Interface Function Calling: + +Figure 2 shows the process of calling an interface function that is declared as follows: + +```java +interface B { funcB(); } +interface C { funcC(); } +interface D { funcD(); } +Class A implements B, C, D {} +``` + +![](media/interface1.jpg) + +
Figure 2: Static calling of Java interface function
+ + +As shown in Figure 2, in a program execution process, we perform the following steps: + +1. Determine the class instance of the object. Here, the object is the instance of class A. +2. In the level-1 table, search for the address based on the harsh value. If the address exists, the function pointer is returned. If the value is 0, search the level-2 table. In the level-2 table, search for the address using the signature hash value. If the address is found, the function pointer is returned. Otherwise, use the function name to search the table. +3. Indirectly invoke the function pointer and transfer related parameter (args) to the indirect invocation. + +The following is a specific example: + +The IsEmpty class implements interfaces A and B. Each interface declares two functions. + +```java +interface A{ + public int add(); + public int minus(); +} + +interface B{ + public int mult(); + public int div(); +} + +public class IsEmpty implements A, B { + public static void main(String[]args) { + } + + public void test(B x) { + x.mult(); + } + + public int add() { + return 6 + 3; + } + + public int minus() { + return 6 - 3; + } + + public int mult() { + return 6 * 3; + } + + public int div() { + return 6 / 3; + } +} +``` + +The interface table of IsEmpty is in the maple code is as follows: + +``` +var $__itb_LIsEmpty_3B fstatic <[24] <* void>> = [0, 0, 0, 0, 0, 0, 0, 0, addroffunc ptr &LIsEmpty_3B_7Cdiv_7C_28_29I, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, addroffunc ptr &LIsEmpty_3B_7Cadd_7C_28_29I, 0, 0, addrof ptr $__itabC_LIsEmpty_3B] + +var $__itbC_LIsEmpty_3B fstatic <[6] <* void>> = [2, 1, 0xb97, addroffunc ptr &LIsEmpty_3B_7Cmult_7C_28_29I, 0x1f7f, addroffunc ptr &LIsEmpty_3B_7Cminus_7C_28_29I] +``` + +Corresponding assembly structure: + +``` +__itb_LIsEmpty_3B: + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad LIsEmpty_3B_7Cdiv_7C_28_29I - . + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad 0 + .quad LIsEmpty_3B_7Cadd_7C_28_29I - . + .quad 0 + .quad 0 + .quad __itabC_LIsEmpty_3B - . +``` +``` +__itbC_LIsEmpty_3B: + .quad 2 + .quad 1 + .quad 2967 + .quad LIsEmpty_3B_7Cmult_7C_28_29I - . + .quad 8063 + .quad LIsEmpty_3B_7Cminus_7C_28_29I - . +``` + +The entry content is as follows: + +1. In the level-1 table (__itb_LIsEmpty_3B), there are 23 entries in total, where entry 9 and entry 12 are function addresses, and entry 23 is the level-2 table address. Therefore, a conflict occurs in the level-1 table, and a specific function address needs to be confirmed in the level-2 table. + +2. In the level-2 table, the first entry is 2, indicating that there are two functions that do not conflict with each other. The second entry is 1, which is used for alignment and placeholder. The last four entries are the hash values generated by the function signature and the corresponding function addresses. + +In the following example, the test function in the source code generates interface-call. The corresponding maple code is as follows: + +``` +if (eq u1 u64 (regread u64 %4, constval u64 0)) { + callassigned &MCC_getFuncPtrFromItabSecondHash64 (regread ptr %3, constval u64 0xb97, conststr ptr "mult|()I") { regassign u64 %4} +} +icallassigned (regread u64 %4, regread ref %2) {} +``` + +The calling logic is as follows: + +Check whether the entry corresponding to the hash value in the level-1 interface table is null. If not, use the address. If yes, call the getFuncPtrFromItabSecondHash64 function. + +The getFuncPtrFromItabSecondHash64 function has three parameters: interface table address, hash value of the basename function, and the function signature. The complete calling process is as follows: Find the corresponding interface table address based on classinfo and compare the hash values. If the comparison is successful, and no conflict occurs, the correct address can be obtained. If a conflict occurs, compare the signature names (character string comparison). + +The format of the accessed interface table is the same as that of IsEmpty. + +## Interface override + +The `Default` function is introduced in Java 8. The implementation in the superclass overrides the default function of the interface. Override depends on the inheritance relationship between interfaces. As shown in the following figure, the cA class inherits the cB class to implement the iD interface. The foo function is implemented in both the cB class and iD interface. For the cA class, the implementation of the foo function depends on the superclass cB rather than the iD interface. + +```java + +interface iD { + public default void foo(){System.out.println("iD foo");} +} + +class cB { + public void foo(){System.out.println("cB foo");} +} + +class cA extends cB implements iD { +} + +public class IsEmpty { + public static void main(String [] args) { + iD obj = new cA(); + obj.foo(); + } +} +``` + + +As shown in the following figure, the getValue function is defined in both parent and son interfaces. For the Sson calss, the implementation of the getValue function depends on the son interface rather than the parent interface. + +```java +interface Parent{ + default void getValue(){ + System.out.println("Parent getVatue......"); + } +} + +interface Son extends Parent{ + default void getValue(){ + System.out.println("OfInt getValue......") + } +} + +abstract class OfPrimitive implements Parent{ +} + +class SSon extends OfPrimitive implements Son{ +} + +public class Main { + static int get() { + return 1; + } + + public static void main(String[] args) { + Parent son = (Parent)new SSon(); + son.getValue(); + + SSon son2; + if(get()==1) { + son2 = new SSon(); + } + else son2 = new SSon(); + son2.getValue(); + } +} +``` + diff --git a/doc/en/media/MapleDriverStructure.png b/doc/en/media/MapleDriverStructure.png new file mode 100644 index 0000000000000000000000000000000000000000..adc5f98ba54d4ffb4430cb3bbff37e8e45d06214 Binary files /dev/null and b/doc/en/media/MapleDriverStructure.png differ diff --git a/doc/en/media/Topology.png b/doc/en/media/Topology.png new file mode 100644 index 0000000000000000000000000000000000000000..6967780a1ac7addf7811b4271bbf805c6d659027 Binary files /dev/null and b/doc/en/media/Topology.png differ diff --git a/doc/en/media/addphase.png b/doc/en/media/addphase.png new file mode 100644 index 0000000000000000000000000000000000000000..049560035bd8b661454107d4be2498f3bc9d06b9 Binary files /dev/null and b/doc/en/media/addphase.png differ diff --git a/doc/en/media/interface1.jpg b/doc/en/media/interface1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7377546fc36b03b282e5d85d0a89fd274288f831 Binary files /dev/null and b/doc/en/media/interface1.jpg differ diff --git a/doc/en/media/javavmt.png b/doc/en/media/javavmt.png new file mode 100644 index 0000000000000000000000000000000000000000..ef93f36923559321d0512a862ca9a1ff9d36a6b6 Binary files /dev/null and b/doc/en/media/javavmt.png differ diff --git a/format.sh b/format.sh new file mode 100755 index 0000000000000000000000000000000000000000..1189a7868479d16a67d2db24b67a054fca1fdaba --- /dev/null +++ b/format.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# usage: in OpenArkCompiler dir, ./format.sh xxx.cpp +CLANG_FORMAT=$MAPLE_ROOT/tools/clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang-format + +$CLANG_FORMAT -style=file -i $1 +sed -i -e 's/ \*,/\*,/g' -e 's/ \*>/\*>/g' -e 's/ \*)/\*)/g' -e 's/ \&,/\&,/g' -e 's/ \&>/\&>/g' -e 's/ \&)/\&)/g' $1 +sed -i ":a;$!N;s/enum\(.*\)\n{/enum\1 {/g;ba" $1 diff --git a/license/LICENSE b/license/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9e32cdef1625daed25cf365c865f01050877cff3 --- /dev/null +++ b/license/LICENSE @@ -0,0 +1,127 @@ + 木兰宽松许可证, 第2版 + + 木兰宽松许可证, 第2版 + 2020年1月 http://license.coscl.org.cn/MulanPSL2 + + + 您对“软件”的复制、使用、修改及分发受木兰宽松许可证,第2版(“本许可证”)的如下条款的约束: + + 0. 定义 + + “软件”是指由“贡献”构成的许可在“本许可证”下的程序和相关文档的集合。 + + “贡献”是指由任一“贡献者”许可在“本许可证”下的受版权法保护的作品。 + + “贡献者”是指将受版权法保护的作品许可在“本许可证”下的自然人或“法人实体”。 + + “法人实体”是指提交贡献的机构及其“关联实体”。 + + “关联实体”是指,对“本许可证”下的行为方而言,控制、受控制或与其共同受控制的机构,此处的控制是指有受控方或共同受控方至少50%直接或间接的投票权、资金或其他有价证券。 + + 1. 授予版权许可 + + 每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的版权许可,您可以复制、使用、修改、分发其“贡献”,不论修改与否。 + + 2. 授予专利许可 + + 每个“贡献者”根据“本许可证”授予您永久性的、全球性的、免费的、非独占的、不可撤销的(根据本条规定撤销除外)专利许可,供您制造、委托制造、使用、许诺销售、销售、进口其“贡献”或以其他方式转移其“贡献”。前述专利许可仅限于“贡献者”现在或将来拥有或控制的其“贡献”本身或其“贡献”与许可“贡献”时的“软件”结合而将必然会侵犯的专利权利要求,不包括对“贡献”的修改或包含“贡献”的其他结合。如果您或您的“关联实体”直接或间接地,就“软件”或其中的“贡献”对任何人发起专利侵权诉讼(包括反诉或交叉诉讼)或其他专利维权行动,指控其侵犯专利权,则“本许可证”授予您对“软件”的专利许可自您提起诉讼或发起维权行动之日终止。 + + 3. 无商标许可 + + “本许可证”不提供对“贡献者”的商品名称、商标、服务标志或产品名称的商标许可,但您为满足第4条规定的声明义务而必须使用除外。 + + 4. 分发限制 + + 您可以在任何媒介中将“软件”以源程序形式或可执行形式重新分发,不论修改与否,但您必须向接收者提供“本许可证”的副本,并保留“软件”中的版权、商标、专利及免责声明。 + + 5. 免责声明与责任限制 + + “软件”及其中的“贡献”在提供时不带任何明示或默示的担保。在任何情况下,“贡献者”或版权所有者不对任何人因使用“软件”或其中的“贡献”而引发的任何直接或间接损失承担责任,不论因何种原因导致或者基于何种法律理论,即使其曾被建议有此种损失的可能性。 + + 6. 语言 + “本许可证”以中英文双语表述,中英文版本具有同等法律效力。如果中英文版本存在任何冲突不一致,以中文版为准。 + + 条款结束 + + 如何将木兰宽松许可证,第2版,应用到您的软件 + + 如果您希望将木兰宽松许可证,第2版,应用到您的新软件,为了方便接收者查阅,建议您完成如下三步: + + 1, 请您补充如下声明中的空白,包括软件名、软件的首次发表年份以及您作为版权人的名字; + + 2, 请您在软件包的一级目录下创建以“LICENSE”为名的文件,将整个许可证文本放入该文件中; + + 3, 请将如下声明文本放入每个源文件的头部注释中。 + + Copyright (c) [Year] [name of copyright holder] + [Software Name] is licensed under Mulan PSL v2. + You can use this software according to the terms and conditions of the Mulan PSL v2. + You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 + THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + See the Mulan PSL v2 for more details. + + + Mulan Permissive Software License,Version 2 + + Mulan Permissive Software License,Version 2 (Mulan PSL v2) + January 2020 http://license.coscl.org.cn/MulanPSL2 + + Your reproduction, use, modification and distribution of the Software shall be subject to Mulan PSL v2 (this License) with the following terms and conditions: + + 0. Definition + + Software means the program and related documents which are licensed under this License and comprise all Contribution(s). + + Contribution means the copyrightable work licensed by a particular Contributor under this License. + + Contributor means the Individual or Legal Entity who licenses its copyrightable work under this License. + + Legal Entity means the entity making a Contribution and all its Affiliates. + + Affiliates means entities that control, are controlled by, or are under common control with the acting entity under this License, ‘control’ means direct or indirect ownership of at least fifty percent (50%) of the voting power, capital or other securities of controlled or commonly controlled entity. + + 1. Grant of Copyright License + + Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable copyright license to reproduce, use, modify, or distribute its Contribution, with modification or not. + + 2. Grant of Patent License + + Subject to the terms and conditions of this License, each Contributor hereby grants to you a perpetual, worldwide, royalty-free, non-exclusive, irrevocable (except for revocation under this Section) patent license to make, have made, use, offer for sale, sell, import or otherwise transfer its Contribution, where such patent license is only limited to the patent claims owned or controlled by such Contributor now or in future which will be necessarily infringed by its Contribution alone, or by combination of the Contribution with the Software to which the Contribution was contributed. The patent license shall not apply to any modification of the Contribution, and any other combination which includes the Contribution. If you or your Affiliates directly or indirectly institute patent litigation (including a cross claim or counterclaim in a litigation) or other patent enforcement activities against any individual or entity by alleging that the Software or any Contribution in it infringes patents, then any patent license granted to you under this License for the Software shall terminate as of the date such litigation or activity is filed or taken. + + 3. No Trademark License + + No trademark license is granted to use the trade names, trademarks, service marks, or product names of Contributor, except as required to fulfill notice requirements in Section 4. + + 4. Distribution Restriction + + You may distribute the Software in any medium with or without modification, whether in source or executable forms, provided that you provide recipients with a copy of this License and retain copyright, patent, trademark and disclaimer statements in the Software. + + 5. Disclaimer of Warranty and Limitation of Liability + + THE SOFTWARE AND CONTRIBUTION IN IT ARE PROVIDED WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED. IN NO EVENT SHALL ANY CONTRIBUTOR OR COPYRIGHT HOLDER BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE SOFTWARE OR THE CONTRIBUTION IN IT, NO MATTER HOW IT’S CAUSED OR BASED ON WHICH LEGAL THEORY, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + + 6. Language + + THIS LICENSE IS WRITTEN IN BOTH CHINESE AND ENGLISH, AND THE CHINESE VERSION AND ENGLISH VERSION SHALL HAVE THE SAME LEGAL EFFECT. IN THE CASE OF DIVERGENCE BETWEEN THE CHINESE AND ENGLISH VERSIONS, THE CHINESE VERSION SHALL PREVAIL. + + END OF THE TERMS AND CONDITIONS + + How to Apply the Mulan Permissive Software License,Version 2 (Mulan PSL v2) to Your Software + + To apply the Mulan PSL v2 to your work, for easy identification by recipients, you are suggested to complete following three steps: + + i Fill in the blanks in following statement, including insert your software name, the year of the first publication of your software, and your name identified as the copyright owner; + + ii Create a file named “LICENSE” which contains the whole context of this License in the first directory of your software package; + + iii Attach the statement to the appropriate annotated syntax at the beginning of each source file. + + + Copyright (c) [Year] [name of copyright holder] + [Software Name] is licensed under Mulan PSL v2. + You can use this software according to the terms and conditions of the Mulan PSL v2. + You may obtain a copy of Mulan PSL v2 at: + http://license.coscl.org.cn/MulanPSL2 + THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + See the Mulan PSL v2 for more details. diff --git a/license/ThirdPartyOpenSourceSoftwareNotice b/license/ThirdPartyOpenSourceSoftwareNotice new file mode 100644 index 0000000000000000000000000000000000000000..ab3019730241e2616919a552300ecb8205d64aba --- /dev/null +++ b/license/ThirdPartyOpenSourceSoftwareNotice @@ -0,0 +1,483 @@ +OPEN SOURCE SOFTWARE NOTICE + +Please note we provide an open source software notice along with this product and/or this product firmware (in the following just “this product”). The open source software licenses are granted by the respective right holders. And the open source licenses prevail all other license information with regard to the respective open source software contained in the product, including but not limited to End User Software Licensing Agreement. This notice is provided on behalf of Huawei Technologies Co. Ltd. and any of its local subsidiaries which may have provided this product to you in your local country. + +Warranty Disclaimer + +THE OPEN SOURCE SOFTWARE IN THIS PRODUCT IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, BUT WITHOUT ANY WARRANTY, WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE APPLICABLE LICENSES FOR MORE DETAILS. + +Copyright Notice and License Texts + +Software: LLVM 10.0.0 + +Copyright notice: +Copyright 2015 Google Inc. All rights reserved. +Copyright 2018 Google Inc. All rights reserved. +Copyright 2016 Ismael Jimenez Martinez. All rights reserved. +Copyright 2017 Roman Lebedev. All rights reserved. +Copyright 2011 The Go Authors. All rights reserved. +Copyright (c) 2012 Alexandre K. I. de Mendonca ,Paulo Pizarro +Copyright (c) 2012 Alexandre K. I. de Mendonca +Copyright (c) 2013 Imagination Technologies +Copyright (c) 2013 Imagination Technologies Ltd. +Copyright (c) 2014,2015 Advanced Micro Devices, Inc. +Copyright (c) 1998 Cygnus Solutions +Copyright (c) 2004 Simon Posnjak +Copyright (c) 2005 Axis Communications AB +Copyright (C) 2007 Free Software Foundation, Inc. +Copyright (c) 2012 Anthony Green +Copyright (c) 1996-2003 Red Hat, Inc. +Copyright (c) 2011-2019 by the contributors listed in CREDITS.TXT +Copyright 2008, Google Inc. +Copyright 2005, Google Inc. +Copyright 2007, Google Inc. +Copyright 2008 Google Inc. +Copyright (c) 2000, 2007 Software AG +Copyright (c) 2008 Red Hat, Inc +Copyright (c) 2015 Advanced Micro Devices, Inc. +Copyright (c) 2009-2019 by the contributors listed in CREDITS.TXT +Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT +Copyright (c) 2019 The LLVM Developers +Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,to any person obtaining a copy of this software and associated documentation +Copyright (c) 2014 Advanced Micro Devices, Inc. +Copyright (c) 2014, 2015 Advanced Micro Devices, Inc. +Copyright (c) 2013 Victor Oliveira +Copyright (c) 2013 Jesse Towner +Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. +Copyright (c) 2016 Aaron Watry +Copyright (c) 2016 Aaron Watry +Copyright (c) 2011-2014 by the contributors listed in CREDITS.TXT +Copyright © 2012 Peter Harris +Copyright (c) 2007, 2009, 2010 Red Hat, Inc. +Copyright (C) 2007, 2009, 2010 Free Software Foundation, Inc +Copyright (c) 2011 Plausible Labs Cooperative, Inc. +Copyright (c) 1996 Red Hat, Inc. +Copyright (c) 1999, 2007, 2008 Red Hat, Inc. +Copyright (c) 2011, 2012 Anthony Green +Copyright (c) 1996, 1998, 2007 Red Hat, Inc. +Copyright (c) 1999, 2008 Red Hat, Inc. +Copyright (c) 1996, 1998 Red Hat, Inc. +Copyright (c) 2012 Anthony Green +Copyright (c) 1998, 2001, 2007, 2008 Red Hat, Inc. +Copyright (C) 2012-2015 Free Software Foundation, Inc. +Copyright (C) 2013-2015 Free Software Foundation, Inc. +Copyright (c) 2009-2019 Polly Team +Copyright (c) 1996-2014 Anthony Green, Red Hat, Inc and others. +Copyright (c) 2013 Mentor Graphics. +Copyright (c) 2007-2019 University of Illinois at Urbana-Champaign. +Copyright (c) 2014 Red Hat, Inc. +Copyright (C) 2011, 2012, 2013 Anthony Green +Copyright (C) 2007 Free Software Foundation, Inc +Copyright (c) 1996 Red Hat, Inc. +Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd. +Copyright (c) 2013 Synopsys, Inc. (www.synopsys.com) +Copyright (c) 2013 Synopsys, Inc. (www.synopsys.com) +Copyright (c) 2011 Timothy Wall +Copyright (c) 2011 Anthony Green +Copyright (c) 2011 Free Software Foundation +Copyright (c) 1998, 2008, 2011 Red Hat, Inc. +Copyright (c) 2010 CodeSourcery +Copyright (c) 2011 Anthony Green +Copyright (c) 2009 Bradley Smith +Copyright (C) 2004 Anthony Green +Copyright (C) 2007 Free Software Foundation, Inc. +Copyright (C) 2008 Red Hat, Inc. +Copyright (c) 1996-2004 Red Hat, Inc. +Copyright (c) 1998, 2007, 2008, 2012 Red Hat, Inc. +Copyright (c) 2000 Hewlett Packard Company +Copyright (c) 2000 Hewlett Packard Company +Copyright (c) 2004 Renesas Technology +Copyright (c) 2008 Red Hat, Inc. +Copyright (c) 2004 Renesas Technology. +Copyright (c) 2013 Miodrag Vallat. +Copyright (c) 2012, 2013 Xilinx, Inc +Copyright (c) 2012, 2013 Xilinx, Inc +Copyright (c) 2008 David Daney +Copyright (c) 1996, 2007, 2008, 2011 Red Hat, Inc. +Copyright (C) 2012, 2013 Anthony Green +Copyright (c) 2012, 2013 Anthony Green +Copyright (c) 2014 Sebastian Macke +Copyright (c) 2014 Sebastian Macke +(c) 2011 Anthony Green +(c) 2008 Red Hat, Inc. +(c) 2006 Free Software Foundation, Inc. +(c) 2003-2004 Randolph Chung +Copyright (c) 1998 Geoffrey Keating +Copyright (C) 2013 IBM +Copyright (C) 2011 Anthony Green +Copyright (C) 2011 Kyle Moffett +Copyright (C) 2008 Red Hat, Inc +Copyright (C) 2007, 2008 Free Software Foundation, Inc +Copyright (c) 1998 Geoffrey Keating +Copyright (C) 2007, 2008, 2010 Free Software Foundation, Inc +Copyright (C) 1998 Geoffrey Keating +Copyright (C) 2001 John Hornkvist +Copyright (C) 2002, 2006, 2007, 2009, 2010 Free Software Foundation, Inc. +Copyright (C) 2013 IBM +Copyright (c) 2002-2008, 2012 Kaz Kojima +Copyright (c) 2008 Red Hat, Inc. +Copyright (c) 2012 Anthony Green +Copyright (c) 2003, 2004, 2006, 2007, 2012 Kaz Kojima +Copyright (c) 2008 Anthony Green +Copyright (c) 2011, 2013 Anthony Green +Copyright (c) 1996, 2003-2004, 2007-2008 Red Hat, Inc. +Copyright (c) 2012 Tilera Corp. +Copyright (c) 2012 Tilera Corp. +Copyright (c) 20011 Anthony Green +Copyright (c) 2008, 2010 Red Hat, Inc. +Copyright (c) 2002, 2007 Bo Thorsen +Copyright (c) 1996, 1998, 1999, 2001, 2007, 2008 Red Hat, Inc. +Copyright (c) 2002 Ranjit Mathew +Copyright (c) 2002 Bo Thorsen +Copyright (c) 2002 Roger Sayle +Copyright (C) 2008, 2010 Free Software Foundation, Inc. +Copyright (c) 2013 The Written Word, Inc. +Copyright (c) 2011 Anthony Green +Copyright (c) 2012, 2014 Anthony Green +Copyright (c) 1996-2003, 2010 Red Hat, Inc. +Copyright (C) 2008 Free Software Foundation, Inc. +Copyright (c) 2014 Red Hat, Inc. +Copyright (c) 2013 Tensilica, Inc. +Copyright (c) 2013 Tensilica, Inc. +Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006,2007, 2008, 2009 Free Software Foundation, Inc. +Copyright 2009 The Go Authors. All rights reserved. +Copyright 2012 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. All rights reserved. +Copyright 2014 The Go Authors. All rights reserved. +Copyright 2013 The Go Authors. All rights reserved. +Copyright 2010 The Go Authors. All rights reserved. +Copyright 2012 The Go Authors. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2014 The Go Authors. All rights reserved. +Copyright 2015 The Go Authors. All rights reserved. +Copyright 2009, 2010 The Go Authors. All rights reserved. +Copyright 2013 The Go Authors. All rights reserved. +Copyright 2001-2004 Unicode, Inc. +Copyright (C) 2004 eXtensible Systems, Inc. +Copyright (c) 2001 Alexander Peslyak and it is hereby released to the general public under the following terms: +Copyright (C) 2012-2016, Yann Collet. +Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign. +Copyright (c) 2013-2014, Pexpect development team +Copyright (c) 2012, Noah Spurrier +Copyright (c) 2009-2016 by the contributors listed in CREDITS.TXT +Copyright 2015, Google Inc. +Copyright (c) 1999-2003 Steve Purcell +Copyright (c) 2003-2010 Python Software Foundation +Copyright (c) 1999-2007 Apple Inc. All rights reserved. +Copyright (c) 2017-2019 by the contributors listed in CREDITS.TXT +Copyright (c) 2010 Apple Inc. +Copyright (c) 2013-2016, Pexpect development team +Copyright (c) 2010-2015 Benjamin Peterson +Copyright 2004 Free Software Foundation, Inc. +Copyright 2006, Google Inc. +Copyright (c) 2007-2018 University of Illinois at Urbana-Champaign. +Copyright 2013, Google Inc. +Copyright (c) 2009 Google Inc. All rights reserved. +Copyright (c) 2006 Kirill Simonov +Copyright © 2006-2009 Steven J. Bethard . +Copyright 2008-2009 Katholieke Universiteit Leuven +Copyright 2006-2007 Universiteit Leiden +Copyright 2012,2014 Ecole Normale Superieure +Copyright 2013 Ecole Normale Superieure +Copyright 2017 Sven Verdoolaege +Copyright 2011 INRIA Saclay +Copyright 2011 Sven Verdoolaege +Copyright 2012-2014 Ecole Normale Superieure +Copyright 2014 INRIA Rocquencourt +Copyright 2010 INRIA Saclay +Copyright 2012 Ecole Normale Superieure +Copyright 2012-2013 Ecole Normale Superieure +Copyright 2016 Sven Verdoolaege +Copyright 2010-2011 INRIA Saclay +Copyright 2016 INRIA Paris +Copyright 2005-2007 Universiteit Leiden +Copyright 2012 Universiteit Leiden +Copyright 2014 Ecole Normale Superieure +Copyright 2015 INRIA Paris-Rocquencourt +Copyright 2014-2015 INRIA Rocquencourt +Copyright 2015-2016 Sven Verdoolaege +Copyright 2013-2014 Ecole Normale Superieure +Copyright 2016-2017 Sven Verdoolaege +Copyright 2018 Sven Verdoolaege +Copyright 2016-2017 Tobias Grosser +Copyright 2015 INRIA Paris-Rocquencourt +Copyright (C) 1996-2015 Free Software Foundation, Inc. +Copyright (c) 2012 Qualcomm Innovation Center, Inc. All rights reserved. +Copyright (C) 2002-2007 Michael J. Fromberger, All Rights Reserved. +Copyright (c) 1992, 1993 UNIX International, Inc. +Copyright 1992, 1993, 1994 Henry Spencer. All rights reserved. +Copyright (c) 1994 +Copyright (c) 1992, 1993, 1994 Henry Spencer. +Copyright (c) 1992, 1993, 1994 +Copyright (c) 1992 Henry Spencer. +Copyright (c) 1992, 1993 +Copyright (c) 1998 Todd C. Miller +Copyright (C) 2012-2016, Yann Collet +Copyright (c) 1997-2019 Intel Corporation +Copyright (c) 2009-2014 by the contributors listed in CREDITS.TXT +Copyright 2012 INRIA Paris-Rocquencourt +Copyright 2012 Ecole Normale Superieure +Copyright 2015 Sven Verdoolaege +Copyright (C) Microsoft Corporation. All rights reserved. +Copyright 2003 Google Inc. +Copyright 2009 Google Inc. +Copyright 2016, 2017 Tobias Grosser. All rights reserved. +Copyright 2011 Sven Verdoolaege. All rights reserved. +Copyright 2011,2015 Sven Verdoolaege. All rights reserved. + +License: Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +1. Definitions. +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: +You must give any other recipients of the Work or Derivative Works a copy of this License; and +You must cause any modified files to carry prominent notices stating that You changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. +END OF TERMS AND CONDITIONS + +License:University of Illinois/NCSA Open Source License +Copyright (c) All rights reserved. +Developed by: + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +• Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. +• Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. +• Neither the names of , nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. + +License:The MIT License +Copyright (c) +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + +Software: Android 10.0.0_r2 + +Copyright notice: +Copyright (c) 2005-2020, The Android Open Source Project + +License: Apache License V2.0 + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); + +you may not use this file except in compliance with the License. + +You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +THIS OFFER IS VALID FOR THREE YEARS FROM THE MOMENT WE DISTRIBUTED THE PRODUCT OR FIRMWARE. diff --git a/samples/exceptiontest/Arith.java b/samples/exceptiontest/Arith.java new file mode 100644 index 0000000000000000000000000000000000000000..893f452074a96f9251f8ab6b01bcd83050f361ce --- /dev/null +++ b/samples/exceptiontest/Arith.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +public class Arith { + private static native void raise_sigfpe(); + + public static int TestMain( int c ) { + int r = 0; + try { + r += 1; + if( c > 0 ) + raise_sigfpe(); + else if ( c == 0 ) { + r += 103; + throw new ArithmeticException(); + } + r += 3; + } catch( ArithmeticException e ) { + r += 100; + } + return r; + } + + public static void main(String[] args) { + System.out.println(TestMain(-5)); + System.out.println(TestMain(0)); + } +} diff --git a/samples/exceptiontest/Makefile b/samples/exceptiontest/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..061f116d0a5cfb94e95955f707fea94fe505daf0 --- /dev/null +++ b/samples/exceptiontest/Makefile @@ -0,0 +1,16 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +APP = Arith +include $(MAPLE_BUILD_CORE)/maple_test.mk diff --git a/samples/helloworld/HelloWorld.java b/samples/helloworld/HelloWorld.java new file mode 100644 index 0000000000000000000000000000000000000000..a002960377e6d7728decb9f111625bc301e864e4 --- /dev/null +++ b/samples/helloworld/HelloWorld.java @@ -0,0 +1,20 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +public class HelloWorld { + public static void main(String[] args) { + System.out.println("Hello World!"); + } +} + diff --git a/samples/helloworld/Makefile b/samples/helloworld/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..77580165d82941ec9b348ed15fed0ce311721b58 --- /dev/null +++ b/samples/helloworld/Makefile @@ -0,0 +1,16 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +APP = HelloWorld +include $(MAPLE_BUILD_CORE)/maple_test.mk diff --git a/samples/iteratorandtemplate/IteratorAndTemplateTest.java b/samples/iteratorandtemplate/IteratorAndTemplateTest.java new file mode 100644 index 0000000000000000000000000000000000000000..bb0cd3f3b6186b0414e4585cbbb91c568dacc0fb --- /dev/null +++ b/samples/iteratorandtemplate/IteratorAndTemplateTest.java @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +import java.util.*; +public class IteratorAndTemplateTest { + public static void main(String args[]) { + testIterator(); + testTemplate(); + } + + public static void testIterator(){ + AbstractCollection l = new ArrayList(); + l.add(new String("Hello")); + l.add(new String(" World")); + System.out.println(l.size()); + System.out.println(l.toString()); + } + + public static void testTemplate(){ + List wholeChain = new ArrayList(); + wholeChain.add(1); + wholeChain.add(2); + for (Integer i : wholeChain) { + System.out.println(i); + } + } +} + diff --git a/samples/iteratorandtemplate/Makefile b/samples/iteratorandtemplate/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..93802205e8ac460bb8d55d927d681b8e8620b023 --- /dev/null +++ b/samples/iteratorandtemplate/Makefile @@ -0,0 +1,16 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +APP = IteratorAndTemplateTest +include $(MAPLE_BUILD_CORE)/maple_test.mk diff --git a/samples/polymorphismtest/InterfaceTest.java b/samples/polymorphismtest/InterfaceTest.java new file mode 100644 index 0000000000000000000000000000000000000000..2df75fe2d722d335775ac194bff17f836b138e7b --- /dev/null +++ b/samples/polymorphismtest/InterfaceTest.java @@ -0,0 +1,42 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +interface Inter { + public default void foo() { + System.out.println("Inter.foo()"); + } +} + +class Base implements Inter { + public void foo() { + System.out.println("Base.foo()"); + } +} + +class Derived extends Base { + public void foo() { + System.out.println("Derived.foo()"); + } +} + +public class InterfaceTest { + public static void main(String[] args) { + Derived o1 = new Derived(); + o1.foo(); + Base o2 = new Derived(); + o2.foo(); + Inter o3 = new Derived(); + o3.foo(); + } +} diff --git a/samples/polymorphismtest/Makefile b/samples/polymorphismtest/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..aa9a53434063a40b1f813d3b244521f25c6afbbe --- /dev/null +++ b/samples/polymorphismtest/Makefile @@ -0,0 +1,16 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +APP = InterfaceTest +include $(MAPLE_BUILD_CORE)/maple_test.mk diff --git a/samples/rccycletest/Makefile b/samples/rccycletest/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c93d70d8f68ec451b48f8eb0e5c6355106cefe81 --- /dev/null +++ b/samples/rccycletest/Makefile @@ -0,0 +1,16 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +APP = RCCycleTest +include $(MAPLE_BUILD_CORE)/maple_test.mk diff --git a/samples/rccycletest/RCCycleTest.java b/samples/rccycletest/RCCycleTest.java new file mode 100644 index 0000000000000000000000000000000000000000..c8794429e207e58c5ac14247c567492414da7a16 --- /dev/null +++ b/samples/rccycletest/RCCycleTest.java @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +class Test_A { + Test_B bb; + void genCycle() { + Test_B b = new Test_B(); + bb = b; + b.aa = this; + } +} + +class Test_B { + Test_A aa; +} + +public class RCCycleTest { + public static void main (String []args) { + Test_A a = new Test_A(); + a.genCycle(); + if (a.bb == null) { + System.out.println("class B is collected"); + } else { + System.out.println("class B is not collected"); + } + } +} diff --git a/samples/threadtest/Makefile b/samples/threadtest/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..7b98e1ec3fd25777e2a39abdb6aced671a2e6688 --- /dev/null +++ b/samples/threadtest/Makefile @@ -0,0 +1,16 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +APP = ThreadTest +include $(MAPLE_BUILD_CORE)/maple_test.mk diff --git a/samples/threadtest/ThreadTest.java b/samples/threadtest/ThreadTest.java new file mode 100644 index 0000000000000000000000000000000000000000..522947b67468088295faba0c455f1e284d05fd02 --- /dev/null +++ b/samples/threadtest/ThreadTest.java @@ -0,0 +1,45 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +public class ThreadTest { + static volatile long flag = 0L; + static int number = 0; + static class FirstThread extends Thread { + public void run() { + while(flag != Long.MAX_VALUE) { + } + System.out.println(number); + } + } + static class SecondThread extends Thread { + public void run() { + number = 42; + flag = Long.MAX_VALUE; + } + } + + public static void main(String args[]) { + FirstThread t1 = new FirstThread(); + SecondThread t2 = new SecondThread(); + t1.start(); + t2.start(); + try { + t1.join(); + t2.join(); + } catch (InterruptedException e) { + System.out.println("INTERRUPTED_MESSAGE"); + } + System.out.println("OK"); + } +} diff --git a/src/MapleFE/.gitignore b/src/MapleFE/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..533695ff01a419af9567d36b8d2cf41970296906 --- /dev/null +++ b/src/MapleFE/.gitignore @@ -0,0 +1,25 @@ +output/ +*.output +*.out +*.bak +*.swo +*.swp +*.mpl +*.js +*.dot +*.png +*.out.ts +*.ts.ast +*.java.ast +gdbfile +test/typescript/**/*.cpp +test/typescript/**/*.h +test/typescript/**/*.log +test/TypeScript/ +test/msts_failed.txt +test/msts_passed.txt +test/typescript/ms_tests/*.ts +test/typescript/ms_tests/LICENSE.txt +test/typescript/unit_tests/temp.ts +test/typescript/unit_tests/perf.data +test/typescript/unit_tests/perf.data.old diff --git a/src/MapleFE/Makefile b/src/MapleFE/Makefile index c3c89b4319bc3779fa06f6319ae45d34f165095a..f1dbcac61dd5bed54e947fda4a6c6be58a2144c1 100644 --- a/src/MapleFE/Makefile +++ b/src/MapleFE/Makefile @@ -1,4 +1,4 @@ -# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# Copyright (C) [2020-2021] Futurewei Technologies, Inc. All rights reverved. # # OpenArkFE is licensed under the Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -14,37 +14,64 @@ include Makefile.in -TARGS = autogen shared recdetect ladetect java2mpl +TARGS = autogen shared recdetect ladetect astopt java2mpl ast2mpl ts2ast ast2cpp c2ast obfuscate # create BUILDDIR first $(shell $(MKDIR_P) $(BUILDDIR)) -java2mpl: autogen recdetect ladetect shared - $(MAKE) LANG=java -C java +ifeq ($(SRCLANG),java) + TARGET := java2ast ast2mpl +else ifeq ($(SRCLANG),typescript) + TARGET := ts2ast ast2cpp obfuscate +else ifeq ($(SRCLANG),c) + TARGET := c2ast +endif -recdetect: autogen shared - (cd recdetect; ./build.sh java) - (cd $(BUILDDIR)/recdetect; ./recdetect) +all: $(TARGET) + +java2ast: autogen recdetect ladetect shared + $(MAKE) LANG=$(SRCLANG) -C $(SRCLANG) + +ts2ast: autogen recdetect ladetect shared + $(MAKE) LANG=$(SRCLANG) -C $(SRCLANG) + +c2ast: autogen recdetect ladetect shared astopt + $(MAKE) LANG=$(SRCLANG) -C $(SRCLANG) + +recdetect: autogen shared ladetect + $(MAKE) LANG=$(SRCLANG) -C recdetect ladetect: autogen shared - (cd ladetect; ./build.sh java) - (cd $(BUILDDIR)/ladetect; ./ladetect) + $(MAKE) LANG=$(SRCLANG) -C ladetect + +ast2mpl: astopt java2ast + $(MAKE) -C ast2mpl + +astopt: shared recdetect ladetect + $(MAKE) -C astopt + +ast2cpp: astopt ts2ast + $(MAKE) -C ast2cpp + +obfuscate: astopt ts2ast ast2cpp + $(MAKE) -C tools/obfuscate shared: autogen - $(MAKE) LANG=java -C shared + $(MAKE) -C shared autogen: - $(MAKE) LANG=java -C autogen - (cd $(BUILDDIR)/autogen; ./autogen) + $(MAKE) -C autogen mapleall: ./scripts/build_mapleall.sh -test: autogen - $(MAKE) LANG=java -C test +test: + $(MAKE) LANG=$(SRCLANG) -C test -testall: - (cd test; ./runtests.pl all) +testms: + $(MAKE) LANG=$(SRCLANG) -C test testms + +testall: test test1: @cp test/java2mpl/t1.java . @@ -55,12 +82,14 @@ test1: clean: rm -rf $(BUILDDIR) -clobber: clean - rm -rf java/include/gen_*.h java/src/gen_*.cpp ladetect/java recdetect/java +clobber: + rm -rf output + +r: rebuild rebuild: make clobber - make -j8 + make -j -.PHONY: $(TARGS) +.PHONY: $(TARGS) test diff --git a/src/MapleFE/Makefile.in b/src/MapleFE/Makefile.in index 0902fbd1540ecc1bbcf4887df41e44c3f94f50d2..9ec2c1ca5240a2d01be9d271ec8a04b1c6abbcb3 100644 --- a/src/MapleFE/Makefile.in +++ b/src/MapleFE/Makefile.in @@ -1,17 +1,17 @@ CXX = clang++ -CC = gcc +CC = clang AR = ar rcs FLAVOR = gnu LD = clang++ -CXXFLAGS = -O0 -g3 -Wall -std=c++17 -DDEBUG +CXXFLAGS = -O0 -g3 -Wall -std=c++17 -DDEBUG -fPIC LFLAGS=-std=c++17 +LANG=java MKDIR_P = mkdir -p MAPLELIBPATH:=$(MAPLE_ROOT)/OpenArkCompiler/output/aarch64-clang-debug/lib/64 -MAPLELIBPATH1:=$(MAPLE_ROOT)/OpenArkCompiler/output/aarch64-clang-debug/ar -MAPLELIBS := -L $(MAPLELIBPATH) -L $(MAPLELIBPATH1) -lmplir -loption_parser -lmpl2mpl -lmplutil -lmempool -lHWSecureC -ldriver_option +MAPLELIBS := -L $(MAPLELIBPATH) -lmplir -loption_parser -lmplphase -lmplutil -lmempool -lmpl2mpl -lHWSecureC -ldriver_option -lmplir -loption_parser -ldriver_option MAPLEALL_INC = -I $(MAPLEALL_SRC)/maple_ir/include \ -I $(MAPLEALL_SRC)/mempool/include \ diff --git a/src/MapleFE/README b/src/MapleFE/README index 5a495962a12a54b48f9929ffbba1e88b224f33e3..9446b57ff2979767e8ab7b069dae94f732af269f 100644 --- a/src/MapleFE/README +++ b/src/MapleFE/README @@ -28,32 +28,35 @@ have our own frontend, which could be easier to handle new languages. [Preparation] -Before start building MapleFE, please download mapleall from the open ark compiler incubator, -https://gitee.com/openarkcompiler-incubator/mapleall -Follow the instruction of mapleall and build the necessary libraries. -Once you are done, please update the lib and include paths in MapleFE/shared/src/Makefile accordingly. +https://gitee.com/openarkcompiler/OpenArkCompiler/blob/master/doc/en/DevelopmentPreparation.md +Follow the instruction in "Recommended Development Environment" section to get tools installed + +The following packages also need to be installed: +```bash +sudo apt install -y clang-tools-10 clang-format-10 python3 python3-pip libyaml-cpp-dev nodejs npm +pip3 install pyyaml +sudo npm install -g typescript@latest +``` [How to build] -1. source envsetup.sh -2. make mapleall. +1. source envsetup.sh [java|typescript] +2. make mapleall This step is to build mapleall (Maple IR related libraries). If you have done it once, you dont need do it again unless you changed the code in mapleall. 3. make -4. If you are working at Java frontend, you will see output/java/java2mpl. This is the executable +4. If you are working at Java frontend, you will see output/java/java/java2mpl. This is the executable frontend. +4. If you are working at Typescript frontend, you will see output/typescript/bin/ts2ast. This is the executable + frontend. You can also see output/typescript/bin/ast2cpp, which translate AST to cpp source code. [How to test] -1. cd test -2. ./runtests.pl all - -or -1. make testall +1. make test run a single test, say t1.java: 1. cd test 2. make t1 -You can find all Java test cases in test/java2mpl. Right now only java test cases -are used. +You can find all Java test cases in test/java. +You can find all Typescript test cases in test/typescript. diff --git a/src/MapleFE/TODO b/src/MapleFE/TODO new file mode 100644 index 0000000000000000000000000000000000000000..a24e602d81cb144fd26df0dec6da3ddad3914cc7 --- /dev/null +++ b/src/MapleFE/TODO @@ -0,0 +1,4 @@ +1. Implement language specific lexer through overriding. Right now some functions are in shared lexer + template literal related code +2. Implement language specific through overriding. Right now semi-coloning skipping is in shared parser. + See code in Parser::TraverseToken() diff --git a/src/MapleFE/ast2cpp/Makefile b/src/MapleFE/ast2cpp/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..34428253a1698e6d7154d16969f3816729268ebf --- /dev/null +++ b/src/MapleFE/ast2cpp/Makefile @@ -0,0 +1,27 @@ +# Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +include ../Makefile.in + +all: + $(MAKE) -C src + +clean: + rm -rf $(BUILDDIR)/ast2cpp + +test: + $(MAKE) -C ../test p + +.PHONY: $(TARGS) + diff --git a/src/MapleFE/ast2cpp/README.md b/src/MapleFE/ast2cpp/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/MapleFE/ast2cpp/include/a2c_util.h b/src/MapleFE/ast2cpp/include/a2c_util.h new file mode 100644 index 0000000000000000000000000000000000000000..9ec86c37a25dcc7ed52239d67e1fea36dad3227f --- /dev/null +++ b/src/MapleFE/ast2cpp/include/a2c_util.h @@ -0,0 +1,45 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +////////////////////////////////////////////////////////////////////////////////////////////// +// This is the interface to translate AST to C++ +////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef __A2C_UTIL_HEADER__ +#define __A2C_UTIL_HEADER__ + +#include +#include +#include "gen_astvisitor.h" + +namespace maplefe { + +// To collect all filenames for imported modules +class ImportedFiles : public AstVisitor { + public: + ModuleNode *mModule; + std::vector mFilenames; + public: + ImportedFiles(ModuleNode *m) : mModule(m) {} + + // A helper function to get the target filename of an ImportNode + std::string GetTargetFilename(TreeNode *node); + + ImportNode *VisitImportNode(ImportNode *node); + ExportNode *VisitExportNode(ExportNode *node); +}; + +} +#endif diff --git a/src/MapleFE/ast2cpp/include/ast2cpp.h b/src/MapleFE/ast2cpp/include/ast2cpp.h new file mode 100644 index 0000000000000000000000000000000000000000..9687592f296f1a5c117f3dab2a54a4164bff7b09 --- /dev/null +++ b/src/MapleFE/ast2cpp/include/ast2cpp.h @@ -0,0 +1,62 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +////////////////////////////////////////////////////////////////////////////////////////////// +// This is the interface to translate AST to C++ +////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef __AST2CPP_HEADER__ +#define __AST2CPP_HEADER__ + +#include "astopt.h" +#include "ast_handler.h" +#include "ast_module.h" + +namespace maplefe { + +class A2C : public AstOpt { +private: + AST_Handler *mASTHandler; + unsigned mFlags; + unsigned mIndexImported; + +public: + explicit A2C(AST_Handler *h, unsigned flags) : + AstOpt(h, flags), + mASTHandler(h), + mFlags(flags), + mIndexImported(0) {} + ~A2C() = default; + + void EmitTS(); + bool LoadImportedModules(); + + // return 0 if successful + // return non-zero if failed + int ProcessAST(); +}; + +class CppHandler { +private: + AST_Handler *mASTHandler; + unsigned mFlags; + +public: + CppHandler(AST_Handler *h, unsigned f) : mASTHandler(h), mFlags(f) {} + bool EmitCxxFiles(); +}; + +} +#endif diff --git a/src/MapleFE/ast2cpp/include/cpp_declaration.h b/src/MapleFE/ast2cpp/include/cpp_declaration.h new file mode 100644 index 0000000000000000000000000000000000000000..352a0006bc51c858d06c97b5bd1a24562cb4e327 --- /dev/null +++ b/src/MapleFE/ast2cpp/include/cpp_declaration.h @@ -0,0 +1,105 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkFE is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __CPPDECL_HEADER__ +#define __CPPDECL_HEADER__ + +#include +#include "ast_handler.h" +#include "cpp_emitter.h" + +namespace maplefe { + +class CppDecl : public CppEmitter { +private: + std::set mImportedModules; + std::string mDefinitions; + std::string mInits; + +public: + CppDecl(Module_Handler *h) : CppEmitter(h) {} + CppDecl() : CppDecl(nullptr) {} + + std::string Emit() { + return EmitTreeNode(GetASTModule()); + } + + void AddImportedModule(const std::string& module); + bool IsImportedModule(const std::string& module); + + void AddDefinition(const std::string& def) { mDefinitions += def; } + std::string GetDefinitions() { return mDefinitions; } + void AddInit(const std::string& init) { mInits += init; } + std::string GetInits() { return mInits; } + + std::string EmitUserTypeNode(UserTypeNode *node) override; + std::string EmitBinOperatorNode(BinOperatorNode *node) override; + std::string EmitIdentifierNode(IdentifierNode *node) override; + std::string EmitDeclNode(DeclNode *node) override; + std::string EmitFieldNode(FieldNode *node) override; + std::string EmitArrayLiteralNode(ArrayLiteralNode *node) override; + std::string EmitCondBranchNode(CondBranchNode *node) override; + std::string EmitForLoopNode(ForLoopNode *node) override; + std::string EmitWhileLoopNode(WhileLoopNode *node) override; + std::string EmitDoLoopNode(DoLoopNode *node) override; + std::string EmitAssertNode(AssertNode *node) override; + std::string EmitCallNode(CallNode *node) override; + std::string EmitFunctionNode(FunctionNode *node) override; + std::string EmitPrimTypeNode(PrimTypeNode *node) override; + std::string EmitModuleNode(ModuleNode *node) override; + std::string EmitClassNode(ClassNode *node) override; + + std::string EmitNumIndexSigNode(NumIndexSigNode *node) override; + std::string EmitStrIndexSigNode(StrIndexSigNode *node) override; + + std::string EmitNewNode(NewNode *node) override; + std::string EmitStructNode(StructNode *node) override; + std::string EmitTypeAliasNode(TypeAliasNode* node) override; + std::string EmitLiteralNode(LiteralNode* node) override; + std::string EmitArrayTypeNode(ArrayTypeNode *node) override; + + std::string GetTypeString(TreeNode *node, TreeNode *child = nullptr); + std::string EmitTSEnum(StructNode *node); + std::string EmitInterface(StructNode *node); + + void CollectFuncArgInfo(TreeNode* node); + std::string ConstructArray(ArrayLiteralNode* node, int dim, std::string type); + std::string ConstructArrayAny(ArrayLiteralNode* node); +}; + +inline bool IsVarInitStructLiteral(DeclNode* node) { + return node->GetInit() && + node->GetInit()->IsTypeIdClass() && + node->GetInit()->IsStructLiteral(); +} + +inline bool IsVarInitClass(DeclNode* node) { + return node->GetInit() && + node->GetInit()->IsTypeIdClass() && + node->GetInit()->IsIdentifier(); +} + +template +bool HasAttrStatic(T* node) { + for (unsigned i = 0; i < node->GetAttrsNum(); ++i) { + std::string s = Emitter::GetEnumAttrId(node->GetAttrAtIndex(i)); + if (s.compare("static ") == 0) + return true; + } + return false; +} + +} // namespace maplefe +#endif diff --git a/src/MapleFE/ast2cpp/include/cpp_definition.h b/src/MapleFE/ast2cpp/include/cpp_definition.h new file mode 100644 index 0000000000000000000000000000000000000000..ab4c1b077356d1a8b6d1ac7f80797a6e58a25583 --- /dev/null +++ b/src/MapleFE/ast2cpp/include/cpp_definition.h @@ -0,0 +1,93 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkFE is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __CPPDEFINITIONEMITTER_HEADER__ +#define __CPPDEFINITIONEMITTER_HEADER__ + +#include "ast_handler.h" +#include "cpp_emitter.h" +#include "cpp_declaration.h" + +namespace maplefe { + +class CppDef : public CppEmitter { +public: + CppDecl &mCppDecl; + bool mIsInit; + bool mIsGenerator; + + CppDef(Module_Handler *h, CppDecl &d) : CppEmitter(h), mCppDecl(d), mIsInit(false), mIsGenerator(false) {} + + std::string Emit() { + return EmitTreeNode(GetASTModule()); + } + + std::string EmitIdentifierNode(IdentifierNode *node) override; + std::string EmitImportNode(ImportNode *node) override; + std::string EmitXXportAsPairNode(XXportAsPairNode *node) override; + std::string EmitExportNode(ExportNode *node) override; + std::string EmitUnaOperatorNode(UnaOperatorNode *node) override; + std::string EmitBinOperatorNode(BinOperatorNode *node) override; + std::string EmitBlockNode(BlockNode *node) override; + std::string EmitDeclNode(DeclNode *node) override; + std::string EmitFieldNode(FieldNode *node) override; + std::string EmitArrayLiteralNode(ArrayLiteralNode *node) override; + std::string EmitTemplateLiteralNode(TemplateLiteralNode *node) override; + std::string EmitLiteralNode(LiteralNode *node) override; + std::string EmitCondBranchNode(CondBranchNode *node) override; + std::string EmitBreakNode(BreakNode *node) override; + std::string EmitContinueNode(ContinueNode *node) override; + std::string EmitForLoopNode(ForLoopNode *node) override; + std::string EmitSwitchNode(SwitchNode *node) override; + std::string EmitCallNode(CallNode *node) override; + std::string EmitFunctionNode(FunctionNode *node) override; + std::string EmitTypeOfNode(TypeOfNode *node) override; + std::string EmitModuleNode(ModuleNode *node) override; + std::string EmitPrimTypeNode(PrimTypeNode *node) override; + std::string EmitPrimArrayTypeNode(PrimArrayTypeNode *node) override; + std::string EmitNewNode(NewNode *node) override; + std::string EmitArrayElementNode(ArrayElementNode *node) override; + std::string EmitTypeAliasNode(TypeAliasNode* node) override; + std::string EmitInstanceOfNode(InstanceOfNode *node) override; + std::string EmitDeclareNode(DeclareNode *node) override; + std::string EmitAsTypeNode(AsTypeNode *node) override; + std::string EmitNamespaceNode(NamespaceNode *node) override; + std::string EmitRegExprNode(RegExprNode *node); + std::string EmitStructNode(StructNode *node) override; + std::string EmitStructLiteralNode(StructLiteralNode* node) override; + std::string EmitWhileLoopNode(WhileLoopNode *node) override; + std::string EmitYieldNode(YieldNode *node) override; + std::string& HandleTreeNode(std::string &str, TreeNode *node) override; + + std::string EmitClassProps(TreeNode *node); + std::string EmitFuncScopeVarDecls(FunctionNode *node); + std::string EmitCppCtor(ClassNode* node); + std::string EmitCtorInstance(ClassNode *c); + std::string EmitDefaultCtor(ClassNode *c); + std::string EmitBracketNotationProp(ArrayElementNode* ae, OprId binOpId, bool isLhs, bool& isDynProp); + TypeId GetTypeIdFromDecl(TreeNode* id); + bool IsClassField(ArrayElementNode* node, std::string propKey); + std::string GetTypeForTemplateArg(TreeNode* node); + TreeNode* FindDeclType(TreeNode* node); + std::string GetThisParamObjType(TreeNode *node); + + std::string ConstructArray(ArrayLiteralNode* node, int dim, std::string type); + std::string ConstructArrayAny(ArrayLiteralNode* node); + std::string GenObjectLiteral(TreeNode* var, std::string varName, TreeNode* idType, StructLiteralNode* n); + std::string GenDirectFieldInit(std::string varName, StructLiteralNode* node); +}; + +} // namespace maplefe +#endif diff --git a/src/MapleFE/ast2cpp/include/cpp_emitter.h b/src/MapleFE/ast2cpp/include/cpp_emitter.h new file mode 100644 index 0000000000000000000000000000000000000000..ec2f269dba259ae718c2db3d18ca98b74b05c500 --- /dev/null +++ b/src/MapleFE/ast2cpp/include/cpp_emitter.h @@ -0,0 +1,45 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkFE is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __CPPEMITTER_HEADER__ +#define __CPPEMITTER_HEADER__ + +#include "ast_module.h" +#include "emitter.h" + +namespace maplefe { + +// Class CppEmitter includes all functionalities which are common for Cpp definition and declaration +class CppEmitter : public Emitter { + +public: + CppEmitter(Module_Handler *h) : Emitter(h) {} + + std::string GetIdentifierName(TreeNode *node); + bool IsInNamespace(TreeNode *node); + std::string GetNamespace(TreeNode *node); + std::string GetQualifiedName(IdentifierNode *node); + bool IsClassId(TreeNode *node); + bool IsVarTypeClass(TreeNode* var); + void InsertEscapes(std::string& str); + bool IsGenerator(TreeNode *node); + FunctionNode* GetGeneratorFunc(TreeNode *node); + void GetArrayTypeInfo(ArrayLiteralNode* node, int& numDim, std::string& type); + std::string FunctionHeader(FunctionNode* node, std::string retType); + std::string GetClassName(TreeNode* node); +}; + +} // namespace maplefe +#endif diff --git a/src/MapleFE/ast2cpp/include/emitter.h b/src/MapleFE/ast2cpp/include/emitter.h new file mode 100644 index 0000000000000000000000000000000000000000..f8447db75616fe3886cdee9d83e863082a074955 --- /dev/null +++ b/src/MapleFE/ast2cpp/include/emitter.h @@ -0,0 +1,152 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkFE is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __EMITTER_HEADER__ +#define __EMITTER_HEADER__ + +#include "ast.h" +#include "ast_attr.h" +#include "ast_module.h" +#include "ast_type.h" + +#include "ast_handler.h" +#include "gen_astdump.h" +using namespace std::string_literals; + +namespace maplefe { + +class Emitter { + +protected: + using Precedence = unsigned char; + Precedence mPrecedence; + + Module_Handler *mHandler; + +public: + Emitter(Module_Handler *h) : mHandler(h) {} + + std::string Emit(const char *title); + std::string GetEnding(TreeNode *n); + std::string Clean(std::string &s); + std::string GetBaseFilename(); + std::string GetModuleName(const char *p = nullptr); + std::string GetModuleName(TreeNode *node); + + Module_Handler *GetModuleHandler() { return mHandler; } + ModuleNode *GetASTModule() { return mHandler->GetASTModule(); } + + virtual std::string EmitAnnotationNode(AnnotationNode *node); + virtual std::string EmitAsTypeNode(AsTypeNode *node); + virtual std::string EmitIdentifierNode(IdentifierNode *node); + virtual std::string EmitFunctionNode(FunctionNode *node); + virtual std::string EmitUserTypeNode(UserTypeNode *node); + virtual std::string EmitComputedNameNode(ComputedNameNode *node); + virtual std::string EmitPackageNode(PackageNode *node); + virtual std::string EmitXXportAsPairNode(XXportAsPairNode *node); + virtual std::string EmitDeclareNode(DeclareNode *node); + virtual std::string EmitExportNode(ExportNode *node); + virtual std::string EmitImportNode(ImportNode *node); + virtual std::string EmitUnaOperatorNode(UnaOperatorNode *node); + virtual std::string EmitBinOperatorNode(BinOperatorNode *node); + virtual std::string EmitTerOperatorNode(TerOperatorNode *node); + virtual std::string EmitTypeAliasNode(TypeAliasNode *node); + virtual std::string EmitConditionalTypeNode(ConditionalTypeNode *node); + virtual std::string EmitTypeParameterNode(TypeParameterNode *node); + virtual std::string EmitBlockNode(BlockNode *node); + virtual std::string EmitNewNode(NewNode *node); + virtual std::string EmitDeleteNode(DeleteNode *node); + virtual std::string EmitAnnotationTypeNode(AnnotationTypeNode *node); + virtual std::string EmitDimensionNode(DimensionNode *node); + virtual std::string EmitDeclNode(DeclNode *node); + virtual std::string EmitCastNode(CastNode *node); + virtual std::string EmitParenthesisNode(ParenthesisNode *node); + virtual std::string EmitFieldNode(FieldNode *node); + virtual std::string EmitArrayElementNode(ArrayElementNode *node); + virtual std::string EmitArrayLiteralNode(ArrayLiteralNode *node); + virtual std::string EmitBindingElementNode(BindingElementNode *node); + virtual std::string EmitBindingPatternNode(BindingPatternNode *node); + virtual std::string EmitNumIndexSigNode(NumIndexSigNode *node); + virtual std::string EmitStrIndexSigNode(StrIndexSigNode *node); + virtual std::string EmitStructNode(StructNode *node); + virtual std::string EmitFieldLiteralNode(FieldLiteralNode *node); + virtual std::string EmitStructLiteralNode(StructLiteralNode *node); + virtual std::string EmitNamespaceNode(NamespaceNode *node); + virtual std::string EmitVarListNode(VarListNode *node); + virtual std::string EmitExprListNode(ExprListNode *node); + virtual std::string EmitTemplateLiteralNode(TemplateLiteralNode *node); + virtual std::string EmitLiteralNode(LiteralNode *node); + virtual std::string EmitRegExprNode(RegExprNode *node); + virtual std::string EmitThrowNode(ThrowNode *node); + virtual std::string EmitCatchNode(CatchNode *node); + virtual std::string EmitFinallyNode(FinallyNode *node); + virtual std::string EmitTryNode(TryNode *node); + virtual std::string EmitExceptionNode(ExceptionNode *node); + virtual std::string EmitReturnNode(ReturnNode *node); + virtual std::string EmitYieldNode(YieldNode *node); + virtual std::string EmitCondBranchNode(CondBranchNode *node); + virtual std::string EmitBreakNode(BreakNode *node); + virtual std::string EmitContinueNode(ContinueNode *node); + virtual std::string EmitForLoopNode(ForLoopNode *node); + virtual std::string EmitWhileLoopNode(WhileLoopNode *node); + virtual std::string EmitDoLoopNode(DoLoopNode *node); + virtual std::string EmitSwitchLabelNode(SwitchLabelNode *node); + virtual std::string EmitSwitchCaseNode(SwitchCaseNode *node); + virtual std::string EmitSwitchNode(SwitchNode *node); + virtual std::string EmitAssertNode(AssertNode *node); + virtual std::string EmitCallNode(CallNode *node); + virtual std::string EmitInterfaceNode(InterfaceNode *node); + virtual std::string EmitClassNode(ClassNode *node); + virtual std::string EmitPassNode(PassNode *node); + virtual std::string EmitLambdaNode(LambdaNode *node); + virtual std::string EmitInstanceOfNode(InstanceOfNode *node); + virtual std::string EmitTypeOfNode(TypeOfNode *node); + virtual std::string EmitKeyOfNode(KeyOfNode *node); + virtual std::string EmitInferNode(InferNode *node); + virtual std::string EmitInNode(InNode *node); + virtual std::string EmitIsNode(IsNode *node); + virtual std::string EmitAwaitNode(AwaitNode *node); + virtual std::string EmitNameTypePairNode(NameTypePairNode *node); + virtual std::string EmitTupleTypeNode(TupleTypeNode *node); + virtual std::string EmitTripleSlashNode(TripleSlashNode *node); + virtual std::string EmitModuleNode(ModuleNode *node); + virtual std::string EmitAttrNode(AttrNode *node); + virtual std::string EmitArrayTypeNode(ArrayTypeNode *node); + virtual std::string EmitFunctionTypeNode(FunctionTypeNode *node); + virtual std::string EmitPrimTypeNode(PrimTypeNode *node); + virtual std::string EmitPrimArrayTypeNode(PrimArrayTypeNode *node); + + virtual std::string EmitTreeNode(TreeNode *node); + virtual std::string& HandleTreeNode(std::string &str, TreeNode *node); + + static void Replace(std::string &str, const char *o, const char *n, int cnt = 0); + static std::string GetEnumTypeId(TypeId k); + //static const char *GetEnumSepId(SepId k); + static const char *GetEnumOprId(OprId k); + //static const char *GetEnumLitId(LitId k); + static std::string GetEnumAttrId(AttrId k); + //static const char *GetEnumImportProperty(ImportProperty k); + //static const char *GetEnumOperatorProperty(OperatorProperty k); + static std::string GetEnumDeclProp(DeclProp k); + //static const char *GetEnumStructProp(StructProp k); + //static const char *GetEnumForLoopProp(ForLoopProp k); + //static const char *GetEnumLambdaProperty(LambdaProperty k); + const char *GetEnumTripleSlashProp(TripleSlashProp k); + std::string &AddParentheses(std::string &str, TreeNode *node); + +}; + +} // namespace maplefe +#endif diff --git a/src/MapleFE/ast2cpp/include/helper.h b/src/MapleFE/ast2cpp/include/helper.h new file mode 100644 index 0000000000000000000000000000000000000000..0eb8e89843ecb23e65507e68955a116bbc319deb --- /dev/null +++ b/src/MapleFE/ast2cpp/include/helper.h @@ -0,0 +1,143 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __HELPER_H__ +#define __HELPER_H__ + +#include +#include +#include +#include +#include +#include "massert.h" +#include "ast.h" +#include "typetable.h" +#include "emitter.h" + +using namespace std::string_literals; + +namespace maplefe { +extern std::string GeneratorFn_start; +extern std::string GeneratorFn_return; + +extern std::unordered_mapTypeIdToJSType; +extern std::unordered_mapTypeIdToJSTypeCXX; +extern TypeId hlpGetTypeId(TreeNode* node); +extern std::string GenClassFldAddProp(std::string, std::string, std::string, std::string, std::string); +extern std::string FunctionClassDecl(std::string retType, std::string funcName, unsigned nodeId); +extern std::string GeneratorClassDecl(std::string funcName, unsigned nodeId); +extern std::string GeneratorClassDef(std::string ns, std::string funcName, unsigned nodeId); +extern std::string tab(int n); +extern bool IsClassMethod(TreeNode* node); +extern std::string GetClassOfAssignedFunc(TreeNode* node); +extern std::string GenAnonFuncName(TreeNode* node); +inline std::string ClsName(std::string func) { return "Cls_"s + func; } +inline std::string GeneratorName(std::string func) { return "Generator_"s + func; } +inline std::string GeneratorFuncName(std::string func) { return "GeneratorFunc_"s + func; } +extern std::string hlpGetJSValTypeStr(TypeId typeId); +extern std::string ArrayCtorName(int dim, std::string type); +extern bool IsBuiltinObj(std::string name); +extern std::string ObjectTypeStr(std::string name); +extern std::string GeneratorFuncHeader(std::string cls, unsigned nodeId); +extern std::string FunctionParams(unsigned nodeId, bool handleThis, bool argsOnly = false, bool byRef = false, bool fdInit = false, bool capture = false); + +class GeneratorLabels { +private: + unsigned GenLoopId = 0; + unsigned GenYieldId= 0; +public: + std::string NextLoopLabel(void) { + std::string label = "_loop_" + std::to_string(++GenLoopId); + return label; + } + std::string NextYieldLabel(void) { + std::string label = "_yield_" + std::to_string(++GenYieldId); + return label; + } + void ResetLabels(void) { + GenLoopId = 0; + GenYieldId = 0; + } +}; + +class FuncTable { +private: + std::unordered_map TopLevelFunc; // map nodeId to TreeNode* + std::set TopLevelFuncNm; // name of top level func or name of var assigned top level func + std::set ImportedFields; + std::set StaticMembers; + + // map of FunctionNode node id to vector of function arg info (pair of arg type and name) + std::unordered_map>> args; + +public: + FuncTable() {} + ~FuncTable() {} + + // Check if node is top level function + void AddTopLevelFunc(TreeNode* node) { + assert(node->IsFunction()); + if (!static_cast(node)->IsConstructor()) + TopLevelFunc[node->GetNodeId()] = node; + } + bool IsTopLevelFunc(TreeNode* node) { + assert(node->IsFunction()); + std::unordered_map::iterator it; + it = TopLevelFunc.find(node->GetNodeId()); + return(it != TopLevelFunc.end()); + } + + // Check if name is top level func + void AddNameIsTopLevelFunc(std::string name) { + TopLevelFuncNm.insert(name); // name can be 1st level func, or func typed var + } + bool IsTopLevelFuncName(std::string& name) { + return(TopLevelFuncNm.find(name) != TopLevelFuncNm.end()); + } + + // Check if string (xxx::yyy) is an Imported field + std::string& AddFieldIsImported(std::string field) { + ImportedFields.insert(field); + return(field); + } + bool IsImportedField(std::string& field) { + return(ImportedFields.find(field) != ImportedFields.end()); + } + + // Check if a class member (field or method) is static + std::string& AddMemberIsStatic(std::string field) { + StaticMembers.insert(field); + return(field); + } + bool IsStaticMember(std::string& field) { + return(StaticMembers.find(field) != StaticMembers.end()); + } + + // Function arg info + void AddArgInfo(unsigned nodeId, std::string type, std::string name) { + args[nodeId].push_back(std::pair(type, name)); + } + std::vector> GetArgInfo(unsigned nodeId) { + return args[nodeId]; + } + +}; + +extern FuncTable hFuncTable; +extern GeneratorLabels GenFnLabels; + +} +#endif // __HELPER_H__ + diff --git a/src/MapleFE/ast2cpp/runtime/include/builtins.h b/src/MapleFE/ast2cpp/runtime/include/builtins.h new file mode 100644 index 0000000000000000000000000000000000000000..af0fe8f50dd34f39cf5912d7ed54593be4ce5748 --- /dev/null +++ b/src/MapleFE/ast2cpp/runtime/include/builtins.h @@ -0,0 +1,233 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkFE is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __BUILTINS_H__ +#define __BUILTINS_H__ + +#include + +namespace t2crt { + +template +class Record : public Object { + public: + std::unordered_map records; + Record() {} + ~Record() {} + Record(Function* ctor, Object* proto) : Object(ctor, proto) {} + Record(Function* ctor, Object* proto, std::vector props) : Object(ctor, proto, props) {} +}; + +template +class Array : public Object { + public: + std::vector elements; + Array(Function* ctor, Object* proto): Object(ctor, proto) {} + Array(Function* ctor, Object* proto, std::initializer_list l): Object(ctor, proto), elements(l) {} + + T& operator[](int i) {return elements[i];} + void operator = (const std::vector &v) { elements = v; } + long size() { return elements.size(); } + + // Output array to string (recurses if multi-dim array via ostream output operator overload in t2cpp.cpp) + std::string Dump (void) override { + std::stringstream ss; + std::streambuf* old = std::cout.rdbuf(ss.rdbuf()); + if (elements.empty()) + std::cout << "[]"; + else { + std::cout << "[ "; + auto i = elements.begin(), e = elements.end(); + std::cout << *i++; + for (; i != e; ++i) + std::cout << ", " << *i; + std::cout << " ]"; + } + std::cout.rdbuf(old); + return ss.str(); + } + + // Put JS Array.prototype props as static fields and methods in this class + // and add to proplist of Array_ctor.prototype object on system init. + + class Ctor: public Function { + public: + Ctor(Function* ctor, Object* proto, Object* prototype_proto) : Function(ctor, proto, prototype_proto) {} + Array* _new() { + return new Array(this, this->prototype); + } + Array* _new(std::initializer_listl) { + return new Array(this, this->prototype, l); + } + }; + static Ctor ctor; +}; + +// Create ctor func for 1,2,3 dimension array of given type +// note: must be in sync with format generated by ArrayCtorName in helper.h +#define ARR_CTOR_DEF(type) \ + template <> \ + Array::Ctor Array::ctor = Array::Ctor(&Function::ctor, Function::ctor.prototype, Object::ctor.prototype); \ + template <> \ + Array*>::Ctor Array*>::ctor = Array*>::Ctor(&Function::ctor, Function::ctor.prototype, Object::ctor.prototype); \ + template <> \ + Array*>*>::Ctor Array*>*>::ctor = Array*>*>::Ctor(&Function::ctor, Function::ctor.prototype, Object::ctor.prototype); + +class JSON : public Object { + // TODO +}; + +class RegExp : public Object { + // TODO +public: + RegExp(Function* ctor, Object* proto): Object(ctor, proto) { } + RegExp(Function* ctor, Object* proto, std::string src): Object(ctor, proto) { source = src; } + ~RegExp(){} + std::string source; // text of the pattern + std::string Dump(void) override { return source; } + + class Ctor : public Function { + public: + Ctor(Function* ctor, Object* proto, Object* prototype_proto) : Function(ctor, proto, prototype_proto) { } + RegExp* _new(std::string src) {return new RegExp(this, this->prototype, src);} + virtual const char* __GetClassName() const {return "RegExp ";} + }; + static Ctor ctor; +}; + +class Number : public Object { +public: + // TODO + class Ctor : public Function { + public: + Ctor(Function* ctor, Object* proto, Object* prototype_proto) : Function(ctor, proto, prototype_proto) { } + virtual const char* __GetClassName() const {return "Number ";} + }; + static Ctor ctor; +}; + + +// 20.5 Error objects for execptions +class Error : public Object { + // TODO +}; + +// JavaScript generators and generator functions +// - The builtin GeneratorFunction is the constructor for all generator functions. +// - Generator functions are called directly to return generators (with closure). +// - Generators are iterators that calls corresponding generator function with +// data captured in closure to iterate for results. + +// ecma-262 section references are based on ecma-262 edition 12.0 + +// ecma262 27.1.1.5 IteratorResult interface: +struct IteratorResult : public Object { + bool done; // status of iterator next() call + JS_Val value; // done=false: current iteration element value + // done=true: return value of the iterator, undefined if none returned + IteratorResult() : done(true), value(undefined) { + this->AddProp("done", t2crt::ClassFld(&IteratorResult::done).NewProp(this, t2crt::TY_CXX_Bool)); + this->AddProp("value", t2crt::ClassFld(&IteratorResult::value).NewProp(this, t2crt::TY_CXX_Any)); + } + IteratorResult(bool done, JS_Val val) : done(done), value(val) { } + ~IteratorResult() { } +}; + +// ecma262 27.1.1.1 Iterable interface: +// To be iterable, an object or one of the objects up its prototype chain must +// have a property with a @@iterator key ([Symbol.iterator], the value of +// which is a function that returns iterators (i.e objects with Iterator interace +// methods next/return/throw). +// +// Note: For iterable objects such as arrays and strings, [Symbol.iterator]() +// returns a new iteraor object. But for the intrinsic object %IteratorPrototype% +// (27.1.2.1) it returns the current iterator instance, which +// means for all iterators, [Sumbol.iterator]() returns itself. + +// ecma262 27.1.2.1 %IteratorPrototype%: +// 1) All objects that implement iterator interface also inherit from %IteratorPrototype% +// 2) %IteratorPrototype% provides shared props for all iterator objects +// 3) %IteratorPrototype%[Symbol.iterator]() = this (current iterator instance) - used in for loops +class IteratorProto : public Object { +public: + IteratorResult _res; + IteratorProto(Function* ctor, Object* proto) : Object(ctor, proto) { } + ~IteratorProto() { } + // note: the arg on an iterator's 1st next() call is ignored per spec 27.5.1.2 + virtual IteratorResult* next (JS_Val* arg = nullptr) { return &_res; } + virtual IteratorResult* _return(JS_Val* val = nullptr) { return &_res; } + virtual IteratorResult* _throw(Error exception) { return &_res; } + + // TODO: %IteratorPrototype%[Symbol.iterator]() = this (current iterator instance) +}; + +// 27.5.1 Generator Prototype Object +// - in ecma edition 11: named %GeneratorPrototype% (25.4.1) +// - in ecma edition 12: named %GeneratorFunction.prototype.prototype% (27.5.1) but +// labelled as %GeneratoPrototype% in 27.3 Figure 5. +// Label corrected in version at tc39. +class GeneratorProto : public IteratorProto { +public: + GeneratorProto(Function* ctor, Object* proto) : IteratorProto(ctor, proto) { } + ~GeneratorProto() { } + void* _yield = nullptr; // pointer to yield label to resume execution + bool _finished = false; // flag if generator is in finished state + bool _firstNext = true; // flag if first next has been called on iterator (27.5.1.2) + + IteratorResult* _return(JS_Val* arg = nullptr) override { + _finished = true; + if (arg != nullptr) { + _res.value = *arg; + } + return &_res; + } +}; + +// 27.3.1 GeneratorFunction Constructor +class GeneratorFunc : public Function::Ctor { +public: + GeneratorFunc(Function* ctor, Object* proto, Object* prototype_proto, Function* prototype_obj) : Function::Ctor(ctor, proto, prototype_proto, prototype_obj) { } + ~GeneratorFunc() {} +}; + +// 27.3.3 GeneratorFunction Prototype Obejct +// - in ecma edition 11: named %Generator% (25.2.3) +// - in ecma edition 12: named %GeneratorFunction.prorotype% (27.3.3) but +// labelled as %Generator% in 27.3 Figure 5. +// Label corrected in tc39 version. +class GeneratorFuncPrototype : public Function { +public: + GeneratorFuncPrototype(Function* ctor, Object* proto, Object* prototype_proto) : Function(ctor, proto, prototype_proto) { } +}; + +// Generator related intrinsic objects. (ecma 27.3) +// IteratorPrototype: It is not a prototype object of any constructor func, but holds shared properties for iterators +// GeneratorFunction: A builtin function used as the constructor for generator functions. +// Generator: (GeneratorFuncion.prototype in edition 12.0) is the prototype object of GeneratorFunction, +// It is a special object used as both prototype object and constructor - as prototype for sharing +// properties between generator functions, and as constructor whose prototype object (GeneratorPrototype +// in edition 11) holds shared properties for generators (i.e. instances returned by generator functions. +extern IteratorProto IteratorPrototype; +extern GeneratorFunc GeneratorFunction; +extern GeneratorFuncPrototype Generator; +extern Object* GeneratorPrototype; + +} // namespace t2crt + + +using t2crt::Record; +using t2crt::JSON; +using t2crt::RegExp; +#endif // __BUILTINS_H__ diff --git a/src/MapleFE/ast2cpp/runtime/include/ts2cpp.h b/src/MapleFE/ast2cpp/runtime/include/ts2cpp.h new file mode 100644 index 0000000000000000000000000000000000000000..cdf48d2d55cc3ac65f17f6585192b2efbcd8fb25 --- /dev/null +++ b/src/MapleFE/ast2cpp/runtime/include/ts2cpp.h @@ -0,0 +1,510 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkFE is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef __TS2CPP_RT_HEADER__ +#define __TS2CPP_RT_HEADER__ + +#include +#include +#include +#include +#include +#include +#include + +using namespace std::string_literals; + +namespace t2crt { + +using std::to_string; + +inline std::string to_string(std::string t) {return t;} + +class Object; +class Function; +template +class Array; + +// JS types for props +#define TY_CXX 0x20 +typedef enum JS_Type : uint8_t { + TY_None = 0, // Placeholder for non-existing property + TY_Undef, // "undefined" + TY_Null, // "object" + TY_Bool, // "boolean" + TY_Long, // "number" + TY_Double, // "number" + TY_BigInt, // "bigint" + TY_String, // "string" + TY_Symbol, // "symbol" + TY_Function, // "function" + TY_Object, // "object" + TY_Array, + TY_Any, // JS_Val::x.field points to a JS_Val + TY_LAST, + TY_CXX_Undef = TY_Undef | TY_CXX, + TY_CXX_Null, + TY_CXX_Bool, + TY_CXX_Long, + TY_CXX_Double, + TY_CXX_BigInt, + TY_CXX_String, + TY_CXX_Symbol, + TY_CXX_Function, + TY_CXX_Object, + TY_CXX_Array, + TY_CXX_Any, // JS_Val::x.field points to a JS_Val + TY_CXX_LAST, +} JS_Type; + +struct JS_Val { + union { + void* field; // used by compiler genereted fields only + bool val_bool; + int64_t val_long; + double val_double; + void* val_bigint; + std::string* val_string; // JS string primitive (not JS String object) + Object* val_obj; // for function, object (incl. String objects) + Function* val_func; // for function + } x; + JS_Type type; + + bool IsCxxProp() { return type & TY_CXX; } // true if a cxx field + bool IsNone() { return type == TY_None; } + bool IsNull() { return type & TY_Null; } + bool IsUndef() { return type & TY_Undef; } + + JS_Val() { x.val_long = 0l; type = TY_Undef; } + JS_Val(int64_t l, JS_Type t, bool c) { x.val_long = l; type = t; } + JS_Val(bool b) { x.val_bool = b; type = TY_Bool; } + JS_Val(int64_t l) { x.val_long = l; type = TY_Long; } + JS_Val(double d) { x.val_double = d; type = TY_Double; } + JS_Val(Object* o) { x.val_obj = o; type = TY_Object; } + JS_Val(Object* o, JS_Type t) { x.val_obj = o; type = t; } + JS_Val(Function* o){ x.val_func = o; type = TY_Function; } + JS_Val(std::string* s) { x.val_string = s; type = TY_String; } + JS_Val(std::string s) { x.val_string = new std::string(s); type = TY_String; } + JS_Val(const char* s) { x.val_string = new std::string(s); type = TY_String; } + JS_Val(int i) { x.val_long = i; type = TY_Long; } + JS_Val(JS_Type jstype, bool v) { x.val_long = (int64_t)v; type = jstype; } + // Prop directly generated as class fields when TS is compiled into CPP + JS_Val(JS_Type jstype, void* field) { x.field = field; type = static_cast(jstype|TY_CXX); } + +#define OPERATORS(op) \ + JS_Val operator op(const JS_Val &v) { \ + JS_Val res; \ + if(type == v.type) \ + switch(type) { \ + case TY_Long: return { x.val_long op v.x.val_long }; \ + case TY_Double: return { x.val_double op v.x.val_double }; \ + } \ + else { \ + if(type == TY_Long && v.type == TY_Double) \ + return { (double)x.val_long op v.x.val_double }; \ + if(type == TY_Double && v.type == TY_Long) \ + return { x.val_double op (double)v.x.val_long }; \ + } \ + return res; \ + } + + OPERATORS(+) + OPERATORS(-) + OPERATORS(*) + + // Handle assigning value to a JS_Val obj +#define ASSIGN_OPR(ctype, jstype, jscxxtype) \ + JS_Val& operator=(ctype val) { \ + if (IsCxxProp()) { \ + type = jscxxtype; \ + *(ctype *)x.field = val; \ + } else { \ + type = jstype; \ + x.val_##ctype = val; \ + } \ + return *this; \ + } + + ASSIGN_OPR(double, TY_Double, TY_CXX_Double) + ASSIGN_OPR(long, TY_Long, TY_CXX_Long) + ASSIGN_OPR(bool, TY_Bool, TY_CXX_Bool) + + // assign Object ptr to JS_Val + JS_Val& operator=(Object* val) { + if (IsCxxProp()) { + type = TY_CXX_Object; + *(Object **)x.field = val; + } else { + type = TY_Object; + x.val_obj = val; + } + return *this; + } + + // convert int to numeric type in target JS_Val + JS_Val& operator=(int val) { + if (IsCxxProp()) { + if (type == TY_CXX_Long) + *(long*)x.field = (long)val; + else if (type == TY_CXX_Double) + *(double *)x.field = (double)val; + } else { + if (type == TY_Long) + x.val_long = (long)val; + else if (type == TY_Double) + x.val_double = (double)val; + else if (type == TY_Undef) { + // if target JS_Val is undef, convert to long + type = TY_Long; + x.val_long = (long)val; + } + } + return *this; + } + + JS_Val& operator=(std::string &val) { + if (IsCxxProp()) { + type = TY_CXX_String; + // x.field already pt to a str fd in a c++ obj + *(std::string *)x.field = val; + } else { + type = TY_String; + x.val_string = new std::string(val); + } + return *this; + } + + JS_Val& operator=(char* val) { + if (IsCxxProp()) { + type = TY_CXX_String; + *(std::string *)x.field = std::string(val); + } else { + type = TY_String; + x.val_string = new std::string(val); + } + return *this; + } + + +}; + +typedef std::unordered_map JS_PropList; +typedef std::pair ObjectProp; + +class Object { + public: + JS_PropList propList; + Object* __proto__; // prototype chain + Function* constructor; // constructor of object + + public: + Object(): __proto__(nullptr) {} + Object(Function* ctor, Object* proto): constructor(ctor), __proto__(proto) {} + Object(Function* ctor, Object* proto, std::vector props): constructor(ctor), __proto__(proto) + { + for (int i=0; iAddProp(props[i].first, props[i].second); + } + virtual ~Object() {} + class Ctor; + static Ctor ctor; + virtual std::string Dump(void); + + JS_Val& operator[] (std::string key) + { + if (!HasOwnProp(key)) AddProp(key, JS_Val()); + return GetPropVal(key); + } + + + bool HasOwnProp(std::string key) { + JS_PropList::iterator it; + it = propList.find(key); + return (it != propList.end()); + } + + void AddProp(std::string key, JS_Val val) { + propList[key] = val; + } + + JS_Val GetProp(std::string key) { + return propList[key]; + } + + JS_Val& GetPropVal(std::string key) { + return propList[key]; + } + + void* GetPropField(std::string key) { + return propList[key].x.field; + } + + bool GetPropBool(std::string key) { + if (propList[key].IsCxxProp()) + return *(bool*)GetPropField(key); + return propList[key].x.val_bool; + } + long GetPropLong(std::string key) { + if (propList[key].IsCxxProp()) + return *(long*)GetPropField(key); + return propList[key].x.val_long; + } + double GetPropDouble(std::string key) { + if (propList[key].IsCxxProp()) + return *(double*)GetPropField(key); + return propList[key].x.val_double; + } + void* GetPropBigInt(std::string key) { + if (propList[key].IsCxxProp()) + return *(void**)GetPropField(key); + return propList[key].x.val_bigint; + } + std::string GetPropStr(std::string key) { + if (propList[key].IsCxxProp()) + return *(*(std::string**)GetPropField(key)); + return *propList[key].x.val_string; + } + Object* GetPropObj(std::string key) { + if (propList[key].IsCxxProp()) + return *(Object**)GetPropField(key); + return propList[key].x.val_obj; + } + Function* GetPropFunc(std::string key) { + if (propList[key].IsCxxProp()) + return *(Function**)GetPropField(key); + return propList[key].x.val_func; + } + + virtual bool IsFuncObj() { + return false; + } + + bool IsEmpty() const { + return propList.empty(); + } + + virtual const char* __GetClassName() const { + return ""; + } + + // Put code for JS Object.prototype props as static fields and methods in this class + // and add to propList of Object_ctor.prototype object on system init. + + virtual std::string TypeId() { + return "object"s; + } +}; + + +using ArgsT = Array; + +class Function : public Object { + public: + Object* prototype; // prototype property + Object* _thisArg; // from bind() + ArgsT* _args; // from bind() + + Function(Function* ctor, Object* proto, Object* prototype_proto) : Object(ctor, proto) { + JS_Val val(this); + prototype = new Object(this, prototype_proto); + prototype->AddProp("constructor", val); + } + // Special constructor for creating builtin constructor function "GeneratorFunction" see builtins.h + Function(Function* ctor, Object* proto, Object* prototype_proto, Object* prototype_obj) : Object(ctor, proto) { + JS_Val val(this); + prototype = prototype_obj; + prototype->AddProp("constructor", val); + } + + class Ctor; + static Ctor ctor; + + bool IsFuncObj() { + return true; + } + + // Put code for JS Function.prototype props as static fields and methods in this class + // and add to propList of Function_ctor.prototype object on system init. + virtual Object* bind (Object* obj, ArgsT* argv) { return nullptr; } + virtual JS_Val call (Object* obj, ArgsT* argv) { JS_Val res; return res; } + virtual JS_Val apply(Object* obj, ArgsT* argv) { JS_Val res; return res; } + + std::string TypeId() override { + return "function"s; + } +}; + + + +class Object::Ctor : public Function { + public: + Ctor(Function* ctor, Object* proto) : Function(ctor, proto, nullptr) {} + + Object* operator()(Object* obj) { + return(obj); + } + Object* _new() { + return new Object(this, this->prototype); + } + Object* _new(std::vector props) { + return new Object(this, this->prototype, props); + } +}; + +class Function::Ctor : public Function { + public: + Ctor(Function* ctor, Object* proto, Object* prototype_proto) : Function(ctor, proto, prototype_proto) {} + Ctor(Function* ctor, Object* proto, Object* prototype_proto, Object* prototype_obj) : Function(ctor, proto, prototype_proto, prototype_obj) {} +}; + +template +class ClassFld { + // Helper class for converting between class field offset, void ptr and integer val + // NewProp() creates a JS_Val of type TY_CXX_ with pointer to objet member field. + typedef union { +// void* addr; + T offset; + int fld_offset; + } FldAddr; + + public: + FldAddr field; + public: +// JS_Val NewProp(JS_Type type) {return JS_Val(type, field.addr);} + ClassFld(T offset) {field.offset = offset;} + T Offset() {return field.offset;} + JS_Val NewProp(void* obj, JS_Type type) {return JS_Val(type, (void*)((char*)obj+field.fld_offset));} +}; + +template std::string __js_typeof(T* v) { + return v->TypeId(); +} + +template std::string __js_typeof(T v) { + if (std::is_signed::value) + return "number"s; + if (std::is_integral::value) + return "boolean"s; + return "unknown"s; +} + +template <> inline std::string __js_typeof(std::string v) { + return "string"s; +} + +template <> inline std::string __js_typeof(t2crt::JS_Val v) { + static std::string names[t2crt::TY_LAST] = { + [t2crt::TY_None] = "none"s, + [t2crt::TY_Undef] = "undefined"s, + [t2crt::TY_Null] = "object"s, + [t2crt::TY_Bool] = "boolean"s, + [t2crt::TY_Long] = "number"s, + [t2crt::TY_Double] = "number"s, + [t2crt::TY_BigInt] = "bigint"s, + [t2crt::TY_String] = "string"s, + [t2crt::TY_Symbol] = "symbol"s, + [t2crt::TY_Function] = "function"s, + [t2crt::TY_Object] = "object"s, + }; + return names[v.type]; +} + +// TSC restricts Lhs of instanceof operator to either type any or an object type. +bool InstanceOf(JS_Val val, Function* ctor); + +// Our implementation returns true if the prototype property of the func/class +// constructor appers in the proto chain of the object. +template +bool InstanceOf(T* val, Function* ctor) { + if (ctor == nullptr) + return false; + + Object* p = val->__proto__; + while (p) { + if (p == ctor->prototype) + return true; + else + p = p->__proto__; + } + return false; +} + +// TODO: To be implemented +inline bool StrictEqu(JS_Val lhs, JS_Val rhs) { return false; } +inline bool StrictNotEqu(JS_Val lhs, JS_Val rhs) { return true; } + +void GenerateDOTGraph( std::vector&obj, std::vector&name); + +} // namespace t2crt + +extern std::ostream& operator<< (std::ostream& out, const t2crt::JS_Val& v); +extern std::ostream& operator<< (std::ostream& out, t2crt::Object* obj); +extern const t2crt::JS_Val undefined; +extern const t2crt::JS_Val null; + +#include "builtins.h" + +template +std::ostream& operator<< (std::ostream& out, const std::vector& v) { + if(v.empty()) + out << "[]"; + else { + out << "[ "; + auto i = v.begin(), e = v.end(); + out << *i++; + for (; i != e; ++i) + std::cout << ", " << *i; + out << " ]"; + } + return out; +} + +template +std::ostream& operator<< (std::ostream& out, const t2crt::Array* v) { + if(v->elements.empty()) + out << "[]"; + else { + out << "[ "; + auto i = v->elements.begin(), e = v->elements.end(); + out << *i++; + for (; i != e; ++i) + std::cout << ", " << *i; + out << " ]"; + } + return out; +} + +template +std::ostream& operator<< (std::ostream& out, const t2crt::Array& v) { + if(v.elements.empty()) + out << "[]"; + else { + out << "[ "; + auto i = v.elements.begin(), e = v.elements.end(); + out << *i++; + for (; i != e; ++i) + std::cout << ", " << *i; + out << " ]"; + } + return out; +} + +#define debugger (0) + +using t2crt::Object; +using t2crt::Function; +using t2crt::Array; + +#endif diff --git a/src/MapleFE/ast2cpp/runtime/src/builtins.cpp b/src/MapleFE/ast2cpp/runtime/src/builtins.cpp new file mode 100644 index 0000000000000000000000000000000000000000..43f6dd65fbd1bf433a3576824b3745121c340ef2 --- /dev/null +++ b/src/MapleFE/ast2cpp/runtime/src/builtins.cpp @@ -0,0 +1,23 @@ +#include "../include/ts2cpp.h" + +namespace t2crt { + +Object::Ctor Object::ctor (&Function::ctor, Function::ctor.prototype); +Function::Ctor Function::ctor(&Function::ctor, Function::ctor.prototype, Object::ctor.prototype); +Number::Ctor Number::ctor (&Function::ctor, Function::ctor.prototype, Object::ctor.prototype); +RegExp::Ctor RegExp::ctor (&Function::ctor, Function::ctor.prototype, Object::ctor.prototype); + +IteratorProto IteratorPrototype(&Object::ctor, Object::ctor.prototype); +GeneratorFuncPrototype Generator(&GeneratorFunction, Function::ctor.prototype, &IteratorPrototype); +GeneratorFunc GeneratorFunction(&Function::ctor, &Function::ctor, Function::ctor.prototype, &Generator); +Object* GeneratorPrototype = Generator.prototype; + +ARR_CTOR_DEF(int) +ARR_CTOR_DEF(long) +ARR_CTOR_DEF(double) +ARR_CTOR_DEF(JS_Val) +ARR_CTOR_DEF(Object) +ARR_CTOR_DEF(Object*) + +} // namepsace t2crt + diff --git a/src/MapleFE/ast2cpp/runtime/src/ts2cpp.cpp b/src/MapleFE/ast2cpp/runtime/src/ts2cpp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..50fecb38bc70c6b4c71e517d5bf98f7392e4178e --- /dev/null +++ b/src/MapleFE/ast2cpp/runtime/src/ts2cpp.cpp @@ -0,0 +1,151 @@ +#include +#include +#include "../include/ts2cpp.h" +#include "ast_common.h" + +std::ostream& operator<< (std::ostream& out, const t2crt::JS_Val& v) { + switch(v.type) { + case t2crt::TY_None: out << "None"; break; + case t2crt::TY_Undef: out << "undefined"; break; + case t2crt::TY_Null: out << "null"; break; + case t2crt::TY_Bool: out << v.x.val_bool; break; + case t2crt::TY_Long: out << v.x.val_long; break; + case t2crt::TY_Double: out << v.x.val_double; break; + case t2crt::TY_BigInt: out << "bigint"; break; + case t2crt::TY_String: out << *v.x.val_string; break; + case t2crt::TY_Symbol: out << "symbol"; break; + case t2crt::TY_Function: out << "function"; break; + case t2crt::TY_Object: out << v.x.val_obj; break; + case t2crt::TY_Any: out << *(t2crt::JS_Val*)v.x.field; break; + + case t2crt::TY_CXX_Undef: out << "undefined"; break; + case t2crt::TY_CXX_Null: out << "null"; break; + case t2crt::TY_CXX_Bool: out << *(bool*)v.x.field; break; + case t2crt::TY_CXX_Long: out << *(int64_t*)v.x.field; break; + case t2crt::TY_CXX_Double: out << *(double *)v.x.field; break; + case t2crt::TY_CXX_BigInt: out << "bigint"; break; + case t2crt::TY_CXX_String: out << *(std::string*)v.x.field; break; + case t2crt::TY_CXX_Symbol: out << "symbol"; break; + case t2crt::TY_CXX_Function: out << "function"; break; + case t2crt::TY_CXX_Object: out << *(Object**)v.x.field; break; + case t2crt::TY_CXX_Any: out << *(t2crt::JS_Val*)v.x.field; break; + } + return out; +} + +std::ostream& operator<< (std::ostream& out, t2crt::Object *obj) { + if (obj == nullptr) + return out; + out << obj->Dump(); + return out; +} + +const t2crt::JS_Val undefined = { 0, t2crt::TY_Undef, false }; +const t2crt::JS_Val null = { 0, t2crt::TY_Null, false }; + +namespace t2crt { + +std::string Object::Dump(void) { + std::string str; + str += this->constructor->__GetClassName(); + if (this->IsEmpty()) + str += "{}"; + else { + std::vector vec; + unsigned cnt = 0; + std::stringstream buf; + buf << std::boolalpha; + // non-object fields go first + for (bool flag: { false, true }) { + for (auto it = this->propList.begin(); it != this->propList.end(); it++) { + auto k = it->second.type; + auto b = k == t2crt::TY_Object || k == t2crt::TY_CXX_Object; + if (b == flag) { + buf.str(std::string()); + const std::string &prop = it->first; + if (isdigit(prop.front())) + buf << '\'' << prop << "': "; + else { + auto len = prop.length(); + constexpr auto suffixlen = sizeof(RENAMINGSUFFIX) - 1; + if (len > suffixlen && prop.substr(len - suffixlen) == RENAMINGSUFFIX) + buf << prop.substr(0, len - suffixlen) << ": "; + else + buf << prop << ": "; + } + if (k == t2crt::TY_String || k == t2crt::TY_CXX_String) + buf << '\'' << it->second << '\''; + else + buf << it->second; + vec.push_back(buf.str()); + } + } + std::sort(vec.begin() + cnt, vec.end()); + cnt = vec.size(); + } + const char *p = "{ "; + for (auto prop: vec) { + str += p + prop; + p = ", "; + } + str += " }"; + } + return str; +} + +bool InstanceOf(JS_Val val, Function* ctor) { + if (val.type != TY_Object || val.x.val_obj == nullptr || ctor == nullptr) + return false; + + Object* p = val.x.val_obj->__proto__; + while (p) { + if (p == ctor->prototype) + return true; + else + p = p->__proto__; + } + return false; +} + +// Generate DOT graph output to show object inheritance with +// constructor, prototype chain and prototype property linkages +void GenerateDOTGraph( std::vector&obj, std::vector&name) { + for(int g = 0; g < 2; ++g) { + std::vector objs = obj; + std::vector names = name; + std::cout << "digraph JS" << g << " {\nranksep=0.6;\nnodesep=0.6;\n" << (g == 0 ? "newrank=true;\n" : "") << std::endl; + int num = objs.size(); + int k = num; + + for (int i=0; iIsFuncObj(); + if (isFuncObj) { + // only function objects have prototype prop + objs.push_back(((Function *)objs[i])->prototype); + names.push_back(names[i] + "_prototype"); + if (g == 0) { + std::cout << "subgraph cluster_" << names[i] << " {\nrank=same;\ncolor=white;\n" + << names[i] << ";\n" << names[k] << "[label=\"" << names[i] << ".prototype\", shape=box];\n }" << std::endl; + } + else { + std::cout << names[k] << "[label=\"" << names[i] << ".prototype\", shape=box];" << std::endl; + } + std::cout << names[i] << " -> " << names[k] << " [label=\"prototype\", color=blue, fontcolor=blue];" << std::endl; + k++; + } + } + num = objs.size(); + for (int i=0; i 0 && objs[i]->constructor == objs[j]) + std::cout << names[i] << " -> " << names[j] << " [label=\"ctor\", color=darkgreen, fontcolor=darkgreen];" << std::endl; + + if(objs[i]->__proto__ == objs[j]) + std::cout << names[i] << " -> " << names[j] << " [label=\"__proto__\", color=red, fontcolor=red];" << std::endl; + } + } + std::cout << "} // digraph JS" << g << std::endl; + } +} + +} // namespace t2crt diff --git a/src/MapleFE/ast2cpp/src/Makefile b/src/MapleFE/ast2cpp/src/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..084bcad114a3f5728966433d2aaf28c3569731ba --- /dev/null +++ b/src/MapleFE/ast2cpp/src/Makefile @@ -0,0 +1,93 @@ +# Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +include ../../Makefile.in +BUILDBIN=$(BUILDDIR)/bin +BUILD=$(BUILDDIR)/ast2cpp +BUILDGEN=$(BUILDDIR)/gen +BUILDASTGEN=$(BUILDDIR)/ast_gen/shared +$(shell $(MKDIR_P) $(BUILD)) + +SRC=$(wildcard *.cpp) +OBJ :=$(patsubst %.cpp,%.o,$(SRC)) +DEP :=$(patsubst %.cpp,%.d,$(SRC)) + +SRCG := $(wildcard $(BUILDGEN)/gen*.cpp) +OBJG := $(patsubst %.cpp, %.o, $(SRCG)) +DEPG := $(patsubst %.cpp, %.d, $(SRCG)) + +LOCALOBJS :=$(foreach obj,$(OBJ), $(BUILD)/$(obj)) +LOCALDEPS :=$(foreach dep,$(DEP), $(BUILD)/$(dep)) +OBJS :=$(LOCALOBJS) $(OBJG) +DEPS :=$(LOCALDEPS) $(DEPG) + +LIBOBJS :=$(patsubst $(BUILD)/main.o,,$(LOCALOBJS)) + +GENDIR:=${BUILDDIR}/ast_gen/shared + +INCLUDES := -I $(MAPLEFE_ROOT)/shared/include \ + -I $(MAPLEFE_ROOT)/astopt/include \ + -I $(MAPLEFE_ROOT)/ast2cpp/include \ + -I $(MAPLEFE_ROOT)/autogen/include \ + -I $(MAPLEFE_ROOT)/shared/include \ + -I $(MAPLEFE_ROOT)/typescript/include \ + $(MAPLEALL_INC) -I ${GENDIR} + +INCLUDEGEN := -I $(MAPLEFE_ROOT)/shared/include -I $(BUILDDIR)/gen -I $(BUILDASTGEN) + +TARGET=ast2cpp +TARGET_A=ast2cpp.a + +SHAREDLIB = $(BUILDDIR)/astopt/astopt.a $(BUILDDIR)/shared/shared.a $(BUILDASTGEN)/genast.a +LANGSPEC=$(BUILDDIR)/typescript/lang_spec.o + +.PHONY: all +all: $(BUILDBIN)/$(TARGET) + +-include $(DEPS) +.PHONY: clean + +vpath %.o $(BUILD) +vpath %.d $(BUILD) + +#Pattern Rules +$(BUILD)/%.o : %.cpp + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ + +$(BUILD)/%.d : %.cpp + @$(CXX) $(CXXFLAGS) -MM $(INCLUDES) $< > $@ + @mv -f $(BUILD)/$*.d $(BUILD)/$*.d.tmp + @sed -e 's|.*:|$(BUILD)/$*.o:|' < $(BUILD)/$*.d.tmp > $(BUILD)/$*.d + @rm -f $(BUILD)/$*.d.tmp + +$(BUILDGEN)/%.o : $(BUILDGEN)/%.cpp $(BUILDGEN)/%.d + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDEGEN) -w -c $< -o $@ + +$(BUILDGEN)/%.d : $(BUILDGEN)/%.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDEGEN) $< > $@ + @mv -f $(BUILDGEN)/$*.d $(BUILDGEN)/$*.d.tmp + @sed -e 's|.*:|$(BUILDGEN)/$*.o:|' < $(BUILDGEN)/$*.d.tmp > $(BUILDGEN)/$*.d + @rm -f $(BUILDGEN)/$*.d.tmp + +# TARGET depends on OBJS and shared OBJS from shared directory +# as well as mapleall libraries +$(BUILD)/$(TARGET_A): $(LIBOBJS) + /usr/bin/ar rcs $(BUILD)/$(TARGET_A) $(LIBOBJS) + +$(BUILDBIN)/$(TARGET): $(BUILD)/$(TARGET_A) $(OBJS) $(SHAREDLIB) + @mkdir -p $(BUILDBIN) + $(LD) -o $(BUILDBIN)/$(TARGET) $(BUILD)/main.o $(BUILD)/$(TARGET_A) $(OBJG) $(LANGSPEC) $(SHAREDLIB) + +clean: + rm -rf $(BUILD) diff --git a/src/MapleFE/ast2cpp/src/a2c_util.cpp b/src/MapleFE/ast2cpp/src/a2c_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f9bb62cadc557da43df4949d5b5d2e3702303830 --- /dev/null +++ b/src/MapleFE/ast2cpp/src/a2c_util.cpp @@ -0,0 +1,73 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include "a2c_util.h" +#include "gen_astdump.h" + +namespace maplefe { + +std::string ImportedFiles::GetTargetFilename(TreeNode *node) { + std::string filename; + if (node && node->IsLiteral()) { + LiteralNode *lit = static_cast(node); + LitData data = lit->GetData(); + filename = AstDump::GetEnumLitData(data); + filename += ".ts.ast"s; + if(filename.front() != '/') { + std::filesystem::path p = mModule->GetFilename(); + try { + p = std::filesystem::canonical(p.parent_path() / filename); + filename = p.string(); + } + catch(std::filesystem::filesystem_error const& ex) { + // Ignore std::filesystem::filesystem_error exception + // keep filename without converting it to a cannonical path + } + } + } + return filename; +} + +ImportNode *ImportedFiles::VisitImportNode(ImportNode *node) { + std::string name = GetTargetFilename(node->GetTarget()); + if (!name.empty()) { + mFilenames.push_back(name); + } + for (unsigned i = 0; i < node->GetPairsNum(); ++i) { + if (auto x = node->GetPair(i); x->IsSingle()) { + std::string s = GetTargetFilename(x->GetBefore()); + if (!s.empty()) + mFilenames.push_back(s); + } + } + for (unsigned i = 0; i < node->GetPairsNum(); i++) { + XXportAsPairNode *pair = node->GetPair(i); + (void) AstVisitor::VisitTreeNode(pair); + } + return node; +} + +ExportNode *ImportedFiles::VisitExportNode(ExportNode *node) { + std::string name = GetTargetFilename(node->GetTarget()); + if (!name.empty()) + mFilenames.push_back(name); + for (unsigned i = 0; i < node->GetPairsNum(); i++) { + XXportAsPairNode *pair = node->GetPair(i); + (void) AstVisitor::VisitTreeNode(pair); + } + return node; +} +} diff --git a/src/MapleFE/ast2cpp/src/ast2cpp.cpp b/src/MapleFE/ast2cpp/src/ast2cpp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..449e4b9c17caeb49da0feb1e037cf51a99b2ade2 --- /dev/null +++ b/src/MapleFE/ast2cpp/src/ast2cpp.cpp @@ -0,0 +1,243 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include +#include + +#include "ast2cpp.h" +#include "ast_handler.h" +#include "gen_astdump.h" +#include "gen_astgraph.h" +#include "gen_aststore.h" +#include "gen_astload.h" +#include "cpp_definition.h" +#include "cpp_declaration.h" +#include "a2c_util.h" + +namespace maplefe { + +void A2C::EmitTS() { + for (HandlerIndex i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + + // build CFG + handler->BuildCFG(); + + ModuleNode *module = handler->GetASTModule(); + std::cout << "============= AstDump ===========" << std::endl; + AstDump astdump(module); + astdump.Dump("After BuildCFG()", &std::cout); + std::cout << "============= AstGraph ===========" << std::endl; + AstGraph graph(module); + graph.DumpGraph("After BuildCFG()", &std::cout); + std::cout << "============= Emitter ===========" << std::endl; + maplefe::Emitter emitter(handler); + std::string code = emitter.Emit("Convert AST to TypeScript code"); + std::cout << code; + } +} + +bool A2C::LoadImportedModules() { + std::queue queue; + for (HandlerIndex i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + ImportedFiles imported(module); + imported.VisitTreeNode(module); + for(const auto &e: imported.mFilenames) + queue.push(e); + } + + bool err = false; + while(!queue.empty()) { + std::string filename = queue.front(); + queue.pop(); + if(mASTHandler->GetHandlerIndex(filename.c_str()) == HandlerNotFound) { + std::ifstream input(filename, std::ifstream::binary); + if(input.fail()) { + std::cerr << "Error: File " << filename << " not found for imported module" << std::endl; + err = true; + continue; + } + input >> std::noskipws; + std::istream_iterator s(input), e; + maplefe::AstBuffer vec(s, e); + maplefe::AstLoad loadAst; + maplefe::ModuleNode *mod = loadAst.LoadFromAstBuf(vec); + // add mod to the vector + while(mod) { + mASTHandler->AddModule(mod); + ImportedFiles imported(mod); + imported.VisitTreeNode(mod); + for(const auto &e: imported.mFilenames) + queue.push(e); + mod = loadAst.Next(); + } + } + } + return err; +} + +// starting point of AST +int A2C::ProcessAST() { + mIndexImported = GetModuleNum(); + + // used for FE verification + if (mFlags & FLG_emit_ts_only) { + EmitTS(); + return 0; + } + + // load all imported modules + if (!(mFlags & FLG_no_imported)) + if (LoadImportedModules()) + return 1; + + // loop through module handlers + for (HandlerIndex i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + + if (mFlags & FLG_trace_1) { + std::cout << "============= in ProcessAST ===========" << std::endl; + std::cout << "srcLang : " << module->GetSrcLangString() << std::endl; + + for(unsigned k = 0; k < module->GetTreesNum(); k++) { + TreeNode *tnode = module->GetTree(k); + if (mFlags & FLG_trace_1) { + tnode->Dump(0); + std::cout << std::endl; + } + } + } + + if (mFlags & FLG_trace_2) { + std::cout << "============= AstGraph ===========" << std::endl; + AstGraph graph(module); + graph.DumpGraph("After LoadFromAstBuf()", &std::cout); + } + } + + // build dependency of modules + PreprocessModules(); + + // loop through module handlers in import/export dependency order + for (auto handler: mHandlersInOrder) { + ModuleNode *module = handler->GetASTModule(); + + // basic analysis + handler->BasicAnalysis(); + + if (mFlags & FLG_trace_2) { + std::cout << "============= After AdjustAST ===========" << std::endl; + for(unsigned k = 0; k < module->GetTreesNum(); k++) { + TreeNode *tnode = module->GetTree(k); + if (mFlags & FLG_trace_1) { + tnode->Dump(0); + std::cout << std::endl; + } + } + AstGraph graph(module); + graph.DumpGraph("After AdjustAST()", &std::cout); + } + + // build CFG + handler->BuildCFG(); + + if (mFlags & FLG_trace_2) { + handler->Dump("After BuildCFG()"); + } + + // control flow analysis + handler->ControlFlowAnalysis(); + + // type inference + handler->TypeInference(); + + if (mFlags & FLG_trace_2) { + std::cout << "============= AstGraph ===========" << std::endl; + AstGraph graph(module); + graph.DumpGraph("After TypeInference()", &std::cout); + } + + if (mFlags & FLG_trace_2) { + std::cout << "============= AstDump ===========" << std::endl; + AstDump astdump(module); + astdump.Dump("After TypeInference()", &std::cout); + } + + // data flow analysis + handler->DataFlowAnalysis(); + + if (mFlags & FLG_trace_2) { + handler->Dump("After DataFlowAnalysis()"); + } + } + + for (auto handler: mHandlersInOrder) { + ModuleNode *module = handler->GetASTModule(); + + if (mFlags & FLG_emit_ts) { + std::cout << "============= Emitter ===========" << std::endl; + maplefe::Emitter emitter(handler); + std::string code = emitter.Emit("Convert AST to TypeScript code"); + std::cout << code; + } + } + + if (mFlags & FLG_trace_2) { + std::cout << "============= CppHandler ===========" << std::endl; + } + maplefe::CppHandler cppHandler(mASTHandler, mFlags); + cppHandler.EmitCxxFiles(); + return 0; +} + +bool CppHandler::EmitCxxFiles() { + unsigned size = mASTHandler->GetSize(); + for (int i = 0; i < size; i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + CppDecl decl(handler); + { // Emit C++ header file + std::string decl_code = decl.Emit(); + std::string fn = decl.GetBaseFilename() + ".h"s; + std::ofstream out(fn.c_str(), std::ofstream::out); + out << decl_code; + out.close(); + if (mFlags & FLG_format_cpp) { + std::string cmd = "clang-format-10 -i --sort-includes=0 "s + fn; + std::system(cmd.c_str()); + } + } + { // Emit C++ implementation file + CppDef def(handler, decl); + std::string def_code = def.Emit(); + std::string fn = def.GetBaseFilename() + ".cpp"s; + std::ofstream out(fn.c_str(), std::ofstream::out); + out << def_code; + out.close(); + if (mFlags & FLG_format_cpp) { + std::string cmd = "clang-format-10 -i --sort-includes=0 "s + fn; + std::system(cmd.c_str()); + } + } + } + return true; +} + +} // namespace maplefe diff --git a/src/MapleFE/ast2cpp/src/cpp_declaration.cpp b/src/MapleFE/ast2cpp/src/cpp_declaration.cpp new file mode 100644 index 0000000000000000000000000000000000000000..315c8624b6a528f0959b5770b4a3d2642a03470b --- /dev/null +++ b/src/MapleFE/ast2cpp/src/cpp_declaration.cpp @@ -0,0 +1,1128 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkFE is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "cpp_declaration.h" +#include "gen_astvisitor.h" +#include +#include +#include "helper.h" +#include "ast_common.h" + +namespace maplefe { + +class ImportExportModules : public AstVisitor { + private: + CppDecl *mCppDecl; + Emitter *mEmitter; + std::string mIncludes; + std::string mImports; + std::string mExports; + bool mExFlag; + + public: + ImportExportModules(CppDecl *c) : mCppDecl(c), + mIncludes("// include directives\n"), + mImports("// imports\n"), + mExports("// exports\n"), + mExFlag(false) { + mEmitter = new Emitter(c->GetModuleHandler()); + } + ~ImportExportModules() { delete mEmitter; } + + std::string GetIncludes() { return mIncludes; } + std::string GetImports() { return mImports; } + std::string GetExports() { return mExports; } + + std::string AddIncludes(TreeNode *node) { + std::string filename; + if (node) { + filename = mEmitter->EmitTreeNode(node); + auto len = filename.size(); + filename = len >= 2 && filename.back() == '"' ? filename.substr(1, len - 2) : std::string(); + // may have some duplicated include directives which do not hurt + if (!filename.empty()) { + std::string incl = "#include \""s + filename + ".h\"\n"s; + std::size_t found = mIncludes.find(incl); + if (found == std::string::npos) { + mIncludes += "#include \""s + filename + ".h\"\n"s; + mCppDecl->AddInit(tab(1) + mCppDecl->GetModuleName(filename.c_str()) + "::__init_func__();\n"s); + } + } + } + return filename; + } + + std::string Comment(TreeNode *node) { + std::string s = mEmitter->EmitTreeNode(node); + return s.empty() ? s : "//--- "s + s.substr(0, s.find('\n')) + '\n'; + } + + ImportNode *VisitImportNode(ImportNode *node) { + std::string filename = AddIncludes(node->GetTarget()); + std::string module = mCppDecl->GetModuleName(filename.c_str()); + for (unsigned i = 0; i < node->GetPairsNum(); ++i) { + if (auto x = node->GetPair(i)) { + mImports += Comment(node); + std::string str; + if (x->IsDefault()) { + if (auto b = x->GetBefore()) { + std::string v = module + "::__export::__default"s; + std::string s = mEmitter->EmitTreeNode(b); + if (b->GetTypeId() == TY_Class) + mImports += "using "s + s + " = "s + v + ";\n"s; + else + mImports += "inline const decltype("s + v + ") &"s + s + " = "s + v + ";\n"s; + } + } else if (x->IsSingle()) { + if (auto b = x->GetBefore(); b->IsLiteral()) { + if (auto a = x->GetAfter()) { + // import X = require("./module"); + std::string after = mEmitter->EmitTreeNode(a); + filename = AddIncludes(b); + module = mCppDecl->GetModuleName(filename.c_str()); + std::string v = module + "::__export::__default"s; + if(a->GetTypeId() == TY_Module || a->GetTypeId() == TY_Namespace) + mImports += "namespace "s + after + " = "s + module + v + ";\n"s; + else if (a->GetTypeId() == TY_Class) + mImports += "using "s + after + " = "s + v + ";\n"s; + else + mImports += "inline const decltype("s + v + ") &"s + after + " = "s + v + ";\n"s; + } + } + } else if (x->IsEverything()) { + if (auto n = x->GetBefore()) { + std::string s = mEmitter->EmitTreeNode(n); + mImports += "namespace "s + s + " = " + module + "::__export;\n"s; + mCppDecl->AddImportedModule(s); + } + } else { + if (auto n = x->GetBefore()) { + std::string v = mEmitter->EmitTreeNode(n); + if (auto a = x->GetAfter()) { + std::string after = mEmitter->EmitTreeNode(a); + if (node->GetTarget()) { + v = "::__export::"s + (v == "default" ? "__"s + v : v); + if (node->IsImportType()) + mImports += "using "s + after + " = "s + module + v + ";\n"s; + else if (a->GetTypeId() == TY_Module || a->GetTypeId() == TY_Namespace) + mImports += "namespace "s + after + " = "s + module + v + ";\n"s; + else + mImports += "inline const decltype("s + module + v + ") &"s + after + + " = "s + module + v + ";\n"s; + } else { + mEmitter->Replace(v, ".", "::"); + mImports += "inline const decltype("s + v + ") &"s + after + " = "s + v + ";\n"s; + } + if (mExFlag) + mExports += "namespace __export { using "s + mCppDecl->GetModuleName() + "::"s + after + "; }\n"s; + } else { + auto u = module + "::__export::"s + v; + if (node->IsImportType()) + mImports += "using "s + v + " = "s + u + ";\n"s; + else + mImports += "inline const decltype("s + u + ") &"s + v + " = "s + u + ";\n"s; + } + } + } + } + } + return node; + } + + ExportNode *VisitExportNode(ExportNode *node) { + if (mCppDecl->IsInNamespace(node)) + return node; + // 'export *' does not re-export a default, it re-exports only named exports + // Multiple 'export *'s fails with tsc if they export multiple exports with same name + std::string filename = AddIncludes(node->GetTarget()); + std::string module = mCppDecl->GetModuleName(filename.c_str()); + for (unsigned i = 0; i < node->GetPairsNum(); ++i) { + if (auto x = node->GetPair(i)) { + mExports += Comment(node); + if (x->IsDefault()) { + if (x->IsRef()) { + auto b = x->GetBefore(); + std::string target = mCppDecl->GetIdentifierName(b); + bool emit = true; + if (target == "default" RENAMINGSUFFIX) { + target = module + "::__export::__default"; + mExports += "namespace __export { inline const decltype("s + target + ") &"s + + "__"s + "default" + " = "s + target + "; }\n"s; + emit = false; + } + if (emit) { + mExports += "namespace __export { inline const decltype("s + target + ") &__default = "s + target + "; }\n"s; + emit = false; + } + if (emit) + mExports += "namespace __export { using "s + module + "::"s + "default" RENAMINGSUFFIX "; }\n"s; + } else { + if (auto n = x->GetBefore()) { + std::string v = mEmitter->EmitTreeNode(n); + mExports += "namespace __export { inline decltype("s + v + ") __default; }\n"s; + } + } + } else if (x->IsSingle()) { + std::string str; + if (auto a = x->GetAfter()) + mExports += "TODO: " + mEmitter->EmitTreeNode(a) + '\n'; + else if (auto b = x->GetBefore()) { + std::string s = mEmitter->EmitTreeNode(b); + if (b->GetTypeId() == TY_Class) + mExports += "namespace __export { using __default = "s + module + "::"s + s + "; }\n"s; + else if (b->GetTypeId() == TY_Namespace) + mExports += "namespace __export { namespace __default = "s + module + "::"s + s + "; }\n"s; + else + mExports += "namespace __export { inline const decltype("s + module + "::"s + s + ") &__default = "s + + module + "::"s + s + "; }\n"s; + } + } else if (x->IsEverything()) { + if (auto b = x->GetBefore()) + mExports += "namespace __export { namespace "s + mCppDecl->GetIdentifierName(b) + + " = " + module + "::__export; }\n"s; + else + mExports += "namespace __export { using namespace "s + module + "::__export; }\n"s; + } else if (x->GetAsNamespace()) { + if (auto b = x->GetBefore()) + mExports += "namespace __export { namespace "s + mCppDecl->GetIdentifierName(b) + + " = " + module + "; }\n"s; + } else { + if (auto b = x->GetBefore()) { + if (b->IsDeclare()) { + DeclareNode *decl = static_cast(b); + if (decl->GetDeclsNum() == 1) + b = decl->GetDeclAtIndex(0); + } + if (b->IsImport()) { + mExFlag = true; + VisitImportNode(static_cast(b)); + mExFlag = false; + continue; + } + std::string target = mCppDecl->GetIdentifierName(b); + bool emit = true; + if (auto a = x->GetAfter()) { + std::string after = mCppDecl->GetIdentifierName(a); + if (target == "default" RENAMINGSUFFIX) { + target = module + "::__export::__default"; + mExports += "namespace __export { inline const decltype("s + target + ") &"s + + (after == "default" RENAMINGSUFFIX ? "__"s + after : after) + " = "s + target + "; }\n"s; + emit = false; + } + else if (target != after) { + auto t = a->GetTypeId(); + if (t == TY_Namespace) + mExports += "namespace __export { namespace "s + after + " = "s + module + "::"s + target + "; }\n"s; + else if (t == TY_Function) + mExports += "namespace __export { inline const decltype("s + target + ") &"s + after + " = "s + + module + "::"s + target + "; }\n"s; + else + mExports += "namespace __export { using "s + after + " = "s + module + "::"s + target + "; }\n"s; + emit = false; + } + } + if (emit) + if (b->IsNamespace()) + mExports += "namespace __export { namespace "s + target + " = "s + module + "::"s + target + "; }\n"s; + else + mExports += "namespace __export { using "s + module + "::"s + target + "; }\n"s; + } + } + } + } + return node; + } +}; + +class ClassDecls : public AstVisitor { + private: + CppDecl *mCppDecl; + std::string mDecls; + + public: + ClassDecls(CppDecl *c) : mCppDecl(c), mDecls("// class decls\n") {} + + ClassNode *VisitClassNode(ClassNode *node) { + std::string ns = mCppDecl->GetNamespace(node); + if (ns.empty()) + mDecls += mCppDecl->EmitTreeNode(node) + ";\n"s; + else + mDecls += "namespace "s + ns + " {\n"s + mCppDecl->EmitTreeNode(node) + ";\n}\n"s; + return node; + } + + StructNode *VisitStructNode(StructNode *node) { + std::string ns = mCppDecl->GetNamespace(node); + if (ns.empty()) + mDecls += mCppDecl->EmitStructNode(node); + else + mDecls += "namespace "s + ns + " {\n"s + mCppDecl->EmitTreeNode(node) + "}\n"s; + return node; + } + + TypeAliasNode *VisitTypeAliasNode(TypeAliasNode* node) { + std::string ns = mCppDecl->GetNamespace(node); + if (ns.empty()) + mDecls += mCppDecl->EmitTypeAliasNode(node); + else + mDecls += "namespace "s + ns + " {\n"s + mCppDecl->EmitTypeAliasNode(node) + "}\n"s; + return node; + } + + std::string GetDecls() { return mDecls; } +}; + +class CollectDecls : public AstVisitor { + private: + CppDecl *mCppDecl; + std::string mDecls; + + public: + CollectDecls(CppDecl *c) : mCppDecl(c), mDecls("// var decls\n") {} + + FunctionNode *VisitFunctionNode(FunctionNode *node) { + return node; + } + + LambdaNode *VisitLambdaNode(LambdaNode *node) { + return node; + } + + DeclNode *VisitDeclNode(DeclNode *node) { + std::string def = mCppDecl->EmitTreeNode(node); + std::string var = mCppDecl->EmitTreeNode(node->GetVar()); + std::string ns = mCppDecl->GetNamespace(node); + std::string ext = "extern "s + def.substr(0, def.find('=')) + ";\n"s; + if (ns.empty()) + mDecls += ext; + else { + mDecls += "namespace "s + ns + " {\n"s + ext + "}\n"s; + def = "namespace "s + ns + " {\n"s + def + ";\n}\n"s; + } + mCppDecl->AddDefinition(def + ";\n"s); + return node; + } + + std::string GetDecls() { return mDecls; } +}; + +void CppDecl::AddImportedModule(const std::string& module) { + mImportedModules.insert(module); +} + +bool CppDecl::IsImportedModule(const std::string& module) { + auto res = mImportedModules.find(module); + return res != mImportedModules.end(); +} + +void CppDecl::CollectFuncArgInfo(TreeNode* node) { + if (!node->IsFunction()) + return; + + FunctionNode* func = static_cast(node); + for (unsigned i = 0; i < func->GetParamsNum(); ++i) { + if (auto n = func->GetParam(i)) { + // build vector of string pairs of argument types and names + std::string name = GetIdentifierName(n); + std::string type = GetTypeString(n, n->IsIdentifier()? static_cast(n)->GetType(): nullptr); + type.erase(type.find_last_not_of(' ')+1); // strip trailing spaces + hFuncTable.AddArgInfo(func->GetNodeId(), type, name); + } + } +} + +std::string CppDecl::EmitModuleNode(ModuleNode *node) { + if (node == nullptr) + return std::string(); + std::string module = GetModuleName(); + std::string header("__"); + for(auto &c : module) + header += std::toupper(c); + header += "__HEADER__\n"; + std::string str("// TypeScript filename: "s + node->GetFilename() + "\n"s); + str += "#ifndef "s + header + "#define "s + header; + str += R"""( +#include "ts2cpp.h" +)"""; + + ImportExportModules xxportModules(this); + xxportModules.Visit(node); + // All include directived from import/export statements + str += xxportModules.GetIncludes(); + + // Generate the namespace of current module + str += R"""( +namespace )""" + module + R"""( { +)"""; + + // Generate code for all imports + str += xxportModules.GetImports(); + + ClassDecls clsDecls(this); + clsDecls.VisitTreeNode(node); + // declarations of user defined classes + str += clsDecls.GetDecls(); + + // declarations of all top level functions + CfgFunc *mod = mHandler->GetCfgFunc(); + auto num = mod->GetNestedFuncsNum(); + for(unsigned i = 0; i < num; ++i) { + CfgFunc *func = mod->GetNestedFuncAtIndex(i); + TreeNode *node = func->GetFuncNode(); + std::string funcName = GetIdentifierName(node); + + CollectFuncArgInfo(node); + if (!IsClassMethod(node)) { + std::string ns = GetNamespace(node); + if (!ns.empty()) + str += "namespace "s + ns + " {\n"s; + bool isGenerator = static_cast(node)->IsGenerator(); + std::string generatorClassDef; + if (isGenerator) { + str += GeneratorClassDecl(funcName, node->GetNodeId()); + generatorClassDef = GeneratorClassDef(ns, funcName, node->GetNodeId()); + AddDefinition(generatorClassDef); + } + else { + // gen function class for each top level function + str += FunctionClassDecl(GetTypeString(static_cast(node)->GetRetType(), nullptr), GetIdentifierName(node), node->GetNodeId()); + } + if (!mHandler->IsFromLambda(node)) { + // top level funcs instantiated here as function objects from their func class + // top level lamda funcs instantiated later in assignment stmts + std::string typeName = isGenerator? GeneratorFuncName(funcName): ClsName(funcName); + std::string funcinit = typeName + "* "s + funcName + " = new "s + typeName + "();\n"s; + if (ns.empty()) + AddDefinition(funcinit); + else + AddDefinition("namespace "s + ns + " {\n"s + funcinit + "\n}\n"s); + str += "extern "s + typeName + "* "s + funcName + ";\n"s; + } + if (!ns.empty()) + str += "\n} // namespace " + ns + '\n'; + } + } + + CollectDecls decls(this); + decls.VisitTreeNode(node); + // declarations of all variables + str += decls.GetDecls(); + + // Generate code for all exports + str += xxportModules.GetExports() + "\nnamespace __export {}\n"s; + + // init function and an object for dynamic properties + str += R"""( + // init function for current module + void __init_func__(); + + // all dynamic properties of current module + extern t2crt::Object __module; + +} // namespace of current module + +#endif +)"""; + return str; +} + +std::string CppDecl::EmitFunctionNode(FunctionNode *node) { + if (node == nullptr) + return std::string(); + std::string str(GetTypeString(node->GetRetType(), node->GetRetType())); + if(node->GetStrIdx()) + str += " "s + node->GetName(); + str += "("s; + for (unsigned i = 0; i < node->GetParamsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetParam(i)) { + str += EmitTreeNode(n); + } + } + str += ");\n"s; + + if (HasAttrStatic(node)) + str = "static "s + str; + return str; +} + +std::string CppDecl::EmitBinOperatorNode(BinOperatorNode *node) { + const char *op = Emitter::GetEnumOprId(node->GetOprId()); + const Precedence precd = *op & 0x3f; + const bool rl_assoc = *op >> 6; // false: left-to-right, true: right-to-left + std::string lhs, rhs; + if (auto n = node->GetOpndA()) { + lhs = EmitTreeNode(n); + if(precd > mPrecedence || (precd == mPrecedence && rl_assoc)) + lhs = "("s + lhs + ")"s; + } + else + lhs = "(NIL) "s; + if (auto n = node->GetOpndB()) { + rhs = EmitTreeNode(n); + if(precd > mPrecedence || (precd == mPrecedence && !rl_assoc)) + rhs = "("s + rhs + ")"s; + } + else + rhs = " (NIL)"s; + OprId k = node->GetOprId(); + std::string str; + if(k == OPR_Exp) { + str = "std::pow("s + lhs + ", "s + rhs + ")"; + } else { + switch(k) { + case OPR_Band: + case OPR_Bor: + case OPR_Bxor: + case OPR_Shl: + case OPR_Shr: + lhs = "static_cast("s + lhs + ")"s; + break; + case OPR_Zext: + lhs = "static_cast("s + lhs + ")"s; + op = "\015>>"; + break; + } + str = lhs + " "s + std::string(op + 1) + " "s + rhs; + } + mPrecedence = precd; + if (node->IsStmt()) + str += ";\n"s; + return str; + +} + +std::string CppDecl::EmitIdentifierNode(IdentifierNode *node) { + if (node == nullptr) + return std::string(); + std::string str(GetTypeString(node, node->GetType())); + str += " "s + node->GetName(); + + if (HasAttrStatic(node)) + str = "static "s + str; + else if (auto n = node->GetInit()) { + // emit init for non static class field + if (node->GetParent() && node->GetParent()->IsClass()) + str += " = "s + EmitTreeNode(n); + } + return str; +} + +std::string CppDecl::EmitPrimTypeNode(PrimTypeNode *node) { + if (node == nullptr) + return std::string(); + return GetTypeString(node); +} + +std::string CppDecl::EmitDeclNode(DeclNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetVar()) { + str += " "s + EmitTreeNode(n); + } + return str; +} + +std::string CppDecl::EmitCallNode(CallNode *node) { + return std::string(); +} + +std::string CppDecl::EmitCondBranchNode(CondBranchNode *node) { + return std::string(); +} + +std::string CppDecl::EmitForLoopNode(ForLoopNode *node) { + + return std::string(); +} + +std::string CppDecl::EmitWhileLoopNode(WhileLoopNode *node) { + return std::string(); +} + +std::string CppDecl::EmitDoLoopNode(DoLoopNode *node) { + return std::string(); +} + +std::string CppDecl::EmitAssertNode(AssertNode *node) { + return std::string(); +} + +// Generate code to construct an array of type any from an ArrayLiteral. TODO: merge with similar in cppdef +std::string CppDecl::ConstructArrayAny(ArrayLiteralNode *node) { + if (node == nullptr || !node->IsArrayLiteral()) + return std::string(); + + // Generate array ctor call to instantiate array + std::string literals; + for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { + if (i) + literals += ", "s; + if (auto n = node->GetLiteral(i)) { + if (n->IsArrayLiteral()) + // Recurse to handle array elements that are arrays + literals += ConstructArrayAny(static_cast(n)); + else { + // Wrap element in JS_Val. C++ class constructor of JS_Val + // will set tupe tag in JS_Val according to element type. + literals += "t2crt::JS_Val("s + EmitTreeNode(n) + ")"s; + } + } + } + std::string str = ArrayCtorName(1, "t2crt::JS_Val") + "._new({"s + literals + "})"s; + return str; +} + +// Generate code to construct an array object with brace-enclosed initializer list TODO: merge with similar in cppdef +std::string CppDecl::ConstructArray(ArrayLiteralNode *node, int dim, std::string type) { + if (type.empty()) { + return ConstructArrayAny(node); // proceed as array of type any if no type info + } + // Generate array ctor call to instantiate array + std::string str = ArrayCtorName(dim, type) + "._new({"s; + for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetLiteral(i)) { + if (n->IsArrayLiteral()) + str += ConstructArray(static_cast(n), dim-1, type); + else + str += EmitTreeNode(n); + } + } + str += "})"s; + return str; +} + +std::string CppDecl::EmitArrayLiteralNode(ArrayLiteralNode *node) { // TODO: merge with similar in cppdef + if (node == nullptr) + return std::string(); + if (node->GetParent() && + node->GetParent()->IsDecl() || // for var decl init + node->GetParent()->IsIdentifier() || // for default val init in class field decl + node->GetParent()->IsFieldLiteral()) { // for obj decl with struct literal init + // emit code to construct array object with brace-enclosed initializer list + int dim; + std::string str, type; + GetArrayTypeInfo(node, dim, type); + str = ConstructArray(node, dim, type); + return str; + } + + // emit code to build a brace-enclosed intializer list (for rhs of array var assignment op) + std::string str("{"s); + for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetLiteral(i)) { + str += EmitTreeNode(n); + } + } + str += "}"s; + return str; +} + +std::string BuildArrayType(int dim, std::string typeStr) { + std::string str; + str = "t2crt::Array<"s + typeStr + ">*"s;; + for (unsigned i = 1; i < dim; ++i) { + str = "t2crt::Array<"s + str + ">*"s;; + } + return str; +} + +std::string CppDecl::EmitArrayTypeNode(ArrayTypeNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + + if (node->GetElemType() && node->GetDims()) { + str = BuildArrayType( + node->GetDims()->GetDimensionsNum(), + EmitTreeNode(node->GetElemType())); + } + return str; +} + +std::string CppDecl::EmitFieldNode(FieldNode *node) { + return std::string(); +} + +std::string CppDecl::GetTypeString(TreeNode *node, TreeNode *child) { + std::string str; + if (node) { + if (IsGenerator(node)) { // check generator type + if (auto func = GetGeneratorFunc(node)) + return GeneratorName(GetIdentifierName(func)) + "*"s; + } + TypeId k = node->GetTypeId(); + if (k == TY_None || k == TY_Class) { + switch(node->GetKind()) { + case NK_PrimType: + k = static_cast(node)->GetPrimType(); + break; + case NK_Identifier: + case NK_Function: + if (child && child->IsUserType()) + return EmitTreeNode(child); + break; + case NK_UserType: + return EmitTreeNode(node); + } + } + switch(k) { + case TY_Object: + return "t2crt::Object* "s; + case TY_Function: // Need to handle class constructor type: Ctor_* + { + std::string funcName = GetClassOfAssignedFunc(node); + if (!funcName.empty()) + return funcName + "* "; + else + return "t2crt::Function* "s; + } + case TY_Boolean: + return "bool "s; + case TY_Int: + return "long "s; + case TY_String: + return "std::string "s; + case TY_Number: + case TY_Double: + return "double "s; + case TY_Class: + return "t2crt::Object* "s; + case TY_Any: + return "t2crt::JS_Val "s; + } + { + if (child && child->IsStruct() && static_cast(child)->GetProp() == SProp_NA) { + // This will change pending solution for issue #69. + return "t2crt::Object * "s; // object literal type - dynamic-import.ts + } + str = child ? EmitTreeNode(child) : (k == TY_Array ? "t2crt::Array* "s : Emitter::GetEnumTypeId(k)); + if (str != "none"s) + return str + " "s; + } + } + return "t2crt::JS_Val "s; +} + +std::string CppDecl::EmitUserTypeNode(UserTypeNode *node) { + if (node == nullptr) + return std::string(); + { + auto k = node->GetType(); + if(k == UT_Union || k == UT_Inter) + // Generate both vars and arrays of union/intersect type as t2crt::JS_Val of type TY_Object. + return "t2crt::JS_Val"s; + } + std::string str, usrType; + + if (auto n = node->GetId()) { + if (n->IsTypeIdClass()) { + if (mHandler->IsGeneratorUsed(n->GetNodeId())) { + // Check if a generator type : TODO: this needs TI + auto func = mHandler->GetGeneratorUsed(n->GetNodeId()); + usrType = GetIdentifierName(func) + "*"s; + } else + usrType = n->GetName() + "*"s; + } + else if (IsBuiltinObj(n->GetName())) + usrType = "t2crt::"s + n->GetName() + "*"s; + else // TypeAlias Id gets returned here + usrType = n->GetName(); + + str = usrType; // note: array dimension now come from ArrayTypeNode + auto num = node->GetTypeGenericsNum(); + if(num) { + std::string lastChar = ""; + if (str.back() == '*') { + str.pop_back(); + lastChar = "*"; + } + str += "<"s; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetTypeGeneric(i)) { + str += EmitTreeNode(n); + } + } + str += ">"s; + str += lastChar; + } + } +#if 0 + auto k = node->GetType(); + if(k != UT_Regular) { + if(!str.empty()) + str = "using "s + str + " = "s; + std::string op = k == UT_Union ? " | "s : " & "s; + for (unsigned i = 0; i < node->GetUnionInterTypesNum(); ++i) { + if(i) + str += op; + str += EmitTreeNode(node->GetUnionInterType(i)); + } + } +#endif + return str; +} + +std::string CppDecl::EmitClassNode(ClassNode *node) { + std::string str; + std::string base; + std::string staticProps; + + if (node == nullptr) + return std::string(); + + std::string clsName = node->GetName(); + // 1. c++ class for JS object + base = (node->GetSuperClassesNum() != 0)? node->GetSuperClass(0)->GetName() : "t2crt::Object"; + str += "class "s + clsName + " : public "s + base + " {\n"s; + + str += "public:\n"; + + // constructor decl + str += " "s + clsName + "(t2crt::Function* ctor, t2crt::Object* proto);\n"s; + str += " ~"s + clsName + "(){}\n"; + + // class field decl and init. TODO: handle private, protected attrs. + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + auto n = node->GetField(i); + str += " "s + EmitTreeNode(n); + + if (n->IsIdentifier()) { + if (HasAttrStatic(static_cast(n))) { + // static field - add field to ctor prop and init later at field def in cpp + staticProps += tab(3) + "this->AddProp(\""s + clsName + "\", t2crt::JS_Val("s + + TypeIdToJSTypeCXX[n->GetTypeId()] + ", &"s + clsName + "::"s + GetIdentifierName(n) + "));\n"s; + } + } + str += ";\n"; + } + for (unsigned i = 0; i < node->GetMethodsNum(); ++i) { + str += tab(1) + EmitFunctionNode(node->GetMethod(i)); + } + + // 2. c++ class for the JS object's JS constructor + std::string indent = tab(1); + if (!staticProps.empty()) staticProps = "\n"s + staticProps; + base = (node->GetSuperClassesNum() != 0)? (node->GetSuperClass(0)->GetName()+"::Ctor"s) : "t2crt::Function"; + str += indent + "class Ctor : public "s + base + " {\n"s; + str += indent + "public:\n"; + str += indent + " Ctor(t2crt::Function* ctor, t2crt::Object* proto, t2crt::Object* prototype_proto) : "s + + base + "(ctor, proto, prototype_proto) {"s + staticProps + tab(2) + "}\n"s; + + // constructor function + for (unsigned i = 0; i < node->GetConstructorsNum(); ++i) { + std::string ctor; + if (auto c = node->GetConstructor(i)) { + ctor = indent + " "s + clsName + "* operator()("s + clsName + "* obj"s; + for (unsigned k = 0; k < c->GetParamsNum(); ++k) { + ctor += ", "s; + if (auto n = c->GetParam(k)) { + ctor += EmitTreeNode(n); + } + } + ctor += ");\n"; + str += ctor; + } + } + + // Generate decl for default constructor function if none declared for class + if (node->GetConstructorsNum() == 0) + str += indent + " "s + clsName + "* operator()("s + clsName + "* obj);\n"s; + + // Generate new() function + str += indent + " "s+clsName+"* _new() {return new "s+clsName+"(this, this->prototype);}\n"s; + str += indent + " virtual const char* __GetClassName() const {return \""s + clsName + " \";}\n"s; + str += indent + "};\n"; + str += indent + "static Ctor ctor;\n"s; + str += "};\n"; + return str; +} + +std::string CppDecl::EmitNewNode(NewNode *node) { + if (node == nullptr || node->GetAttrsNum() > 0) + return std::string(); + + std::string str; + MASSERT(node->GetId() && "No mId on NewNode"); + if (node->GetId() && node->GetId()->IsTypeIdClass()) { + // Generate code to create new obj and call constructor + str = node->GetId()->GetName() + "::ctor("s + node->GetId()->GetName() + "::ctor._new("s; + } else if (IsBuiltinObj(node->GetId()->GetName())) { + // Check for builtin obejcts: t2crt::Object, t2crt::Function, etc. + str = node->GetId()->GetName() + "::ctor._new("s; + } else { + str = "new "s + EmitTreeNode(node->GetId()); + str += "("s; + } + + auto num = node->GetArgsNum(); + for (unsigned i = 0; i < num; ++i) { + if (i || node->GetId()->IsTypeIdClass()) + str += ", "s; + if (auto n = node->GetArg(i)) { + str += EmitTreeNode(n); + } + } + str += ")"s; + mPrecedence = '\024'; + return str; +} + +std::string CppDecl::EmitInterface(StructNode *node) { + std::string str, ifName; + std::string def; + + if (node == nullptr) + return std::string(); + + std::string superClass = "t2crt::Object"; + if (node->GetSupersNum() > 0) { + auto n = node->GetSuper(0); + superClass = EmitTreeNode(n); + if (superClass.back() == '*') + superClass.pop_back(); + } + ifName = GetIdentifierName(node); + str = "class "s + ifName + " : public "s + superClass + " {\n"s; + str += " public:\n"s; + + // Generate code to add prop in class constructor + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (auto n = node->GetField(i)) { + str += " "s + EmitTreeNode(n) + ";\n"s; + if (n->IsIdentifier()) { + def += tab(1) + GenClassFldAddProp("this", ifName, n->GetName(), + GetTypeString(n, static_cast(n)->GetType()), + TypeIdToJSTypeCXX[hlpGetTypeId(n)]) + ";\n"s; + } + } + } + if (!def.empty()) + def = "\n"+def; + + str += " "s + ifName + "() {};\n"s; + str += " ~"s + ifName + "() {};\n"s; + str += " "s + ifName + "(t2crt::Function* ctor, t2crt::Object* proto);\n"s; + str += " "s + ifName + "(t2crt::Function* ctor, t2crt::Object* proto, std::vector props): "s + superClass + "(ctor, proto, props) {}\n"s; + str += "};\n"s; + + def = ifName + "::"s + ifName + "(t2crt::Function* ctor, t2crt::Object* proto): "s + superClass + "(ctor, proto) {" + def + "}\n"; + AddDefinition(def); + return str; +} + +// Generate C++ class def for the TS num type here and instance in CppDef EmitStructNode. +// TS enum member field can be either Identifier or Literal string (of any character). +// TS enum member value can be either long, double, or string. +// Numeric enum member with no assigned value default to val of preceding member plus 1. +std::string CppDecl::EmitTSEnum(StructNode *node) { + std::string str, init; + TypeId memberValType = TY_None; + + if (node == nullptr) + return std::string(); + + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + // Enum member field default to TY_Int if not specified - should this be set by FE? + if (node->GetField(i)->IsTypeIdNone()) + node->GetField(i)->SetTypeId(TY_Int); + } + + if (node->GetFieldsNum() > 0) { + memberValType = node->GetField(0)->GetTypeId(); + MASSERT(memberValType == TY_String || + memberValType == TY_Double || + memberValType == TY_Int && "Unsupported Enum type"); + } + + str = "class "s; + std::string enumClsName, enumName; + if (auto n = node->GetStructId()) { + enumName = GetIdentifierName(n); + enumClsName = "Enum_"s + enumName; + str += enumClsName + " : public t2crt::Object {\n"s; + } + str += " public:\n"s; + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (auto n = node->GetField(i)) { + if (n->IsLiteral()) { + MWARNING("Literal enum field not supported yet"); + continue; + } + IdentifierNode* id = static_cast(n); + if (id->GetInit()) { + init = EmitTreeNode(id->GetInit()); + } else { + if (memberValType == TY_Int || memberValType == TY_Double) { + // if numeric member and no initialzer, set to 0 or last field + 1 + if (i == 0) + init = "0"; + else + init = GetIdentifierName(node->GetField(i-1)) + "+1"s; + } + } + str += " const "s + EmitTreeNode(n) + " = "s + init + ";\n"s; + } + } + str += " "s + enumClsName + "() {};\n"s; + str += " ~"s + enumClsName + "() {};\n"s; + + std::string def = enumClsName + "* "s + enumName + ";\n"s; + AddDefinition(def); + str += "};\nextern "s + def; + return str; +} + +std::string CppDecl::EmitStructNode(StructNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + const char *suffix = ";\n"; + switch(node->GetProp()) { +#if 0 + case SProp_CStruct: + str = "struct "s; + break; + case SProp_NA: + str = ""s; + break; +#endif + case SProp_TSEnum: + str = EmitTSEnum(node); + return str; + case SProp_TSInterface: + case SProp_NA: // for classes generated by TI + str = EmitInterface(node); + return str; + default: + return std::string(); + MASSERT(0 && "Unexpected enumerator"); + } +#if 0 + if (auto n = node->GetStructId()) { + str += EmitIdentifierNode(n); + } + + auto num = node->GetTypeParamsNum(); + if(num) { + str += "<"s; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetTypeParamAtIndex(i)) + str += EmitTreeNode(n); + } + str += ">"s; + } + + for (unsigned i = 0; i < node->GetSupersNum(); ++i) { + str += i ? ", "s : " extends "s; + if (auto n = node->GetSuper(i)) + str += EmitTreeNode(n); + } + str += " {\n"s; + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (auto n = node->GetField(i)) { + str += EmitTreeNode(n) + suffix; + } + } + + if (auto n = node->GetNumIndexSig()) + str += EmitNumIndexSigNode(n) + "\n"s;; + } + if (auto n = node->GetStrIndexSig()) { + str += EmitStrIndexSigNode(n) + "\n"; + } + + for (unsigned i = 0; i < node->GetMethodsNum(); ++i) { + if (auto n = node->GetMethod(i)) { + std::string func = EmitFunctionNode(n); + if (func.substr(0, 9) == "function ") + func = func.substr(9); + size_t index = func.rfind(')'); + if (index != std::string::npos) { + std::string t = func.substr(index); + Replace(t, "=>", ":"); + func = func.substr(0, index) + t; + } + str += func.length() > 2 && func.substr(func.length() - 2) == ";\n" ? func : func + ";\n"s; + } + } + str += "}\n"s; +#endif + return HandleTreeNode(str, node); +} + +std::string CppDecl::EmitNumIndexSigNode(NumIndexSigNode *node) { + return std::string(); +} + +std::string CppDecl::EmitStrIndexSigNode(StrIndexSigNode *node) { + return std::string(); +} + +std::string CppDecl::EmitTypeAliasNode(TypeAliasNode* node) { + if (node == nullptr) + return std::string(); + std::string str, alias; + + if (auto n = node->GetId()) { + if (n->IsUserType()) { + str = EmitTreeNode(n); + if (str.back() == '*') + str.pop_back(); + } + } + if (auto m = node->GetAlias()) { + if (m->IsUserType()) { + alias = EmitTreeNode(m); + if (alias.back() == '*') + alias.pop_back(); + str = "using "s + str + " = "s + alias + ";\n"; + } else { // if (m->IsStruct()) { + // todo + str = "// type alias for "s + str + '\n';; + } + } + return str; +} + +std::string CppDecl::EmitLiteralNode(LiteralNode *node) { + if (node == nullptr) + return std::string(); + LitData lit = node->GetData(); + std::string str(AstDump::GetEnumLitData(lit)); + if(lit.mType == LT_StringLiteral || lit.mType == LT_CharacterLiteral) + str = '"' + str + '"'; + mPrecedence = '\030'; + str = HandleTreeNode(str, node); + if (auto n = node->GetType()) { + str += ": "s + EmitTreeNode(n); + } + if (auto n = node->GetInit()) { + str += " = "s + EmitTreeNode(n); + } + return str; +} + +} // namespace maplefe diff --git a/src/MapleFE/ast2cpp/src/cpp_definition.cpp b/src/MapleFE/ast2cpp/src/cpp_definition.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d6b1935446c577ae09fecea01366394615c1d2df --- /dev/null +++ b/src/MapleFE/ast2cpp/src/cpp_definition.cpp @@ -0,0 +1,1584 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkFE is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "cpp_definition.h" +#include "helper.h" + +namespace maplefe { + +std::string CppDef::EmitCtorInstance(ClassNode *c) { + std::string str, thisClass, ctor, proto, prototypeProto; + ctor = "&t2crt::Function::ctor"; + thisClass = c->GetName(); + if (c->GetSuperClassesNum() == 0) { + proto = "t2crt::Function::ctor.prototype"; + prototypeProto = "t2crt::Object::ctor.prototype"; + } else { + proto = c->GetSuperClass(0)->GetName() + "::ctor"s; + prototypeProto = proto + ".prototype"s; + proto.insert(0, "&"s, 0, std::string::npos); + } + str = "\n// Init class ctor as a static class field for "+ thisClass+ "\n"s; + str += thisClass + "::Ctor "s + thisClass+"::ctor("s +ctor+","s+proto+","+prototypeProto+");\n\n"s; + + // piggy back generation of static field definition + for (unsigned i = 0; i < c->GetFieldsNum(); ++i) { + auto n = c->GetField(i); + if (n->IsIdentifier() && + HasAttrStatic(static_cast(n))) { + str += mCppDecl.GetTypeString(n, n) + " "s + thisClass + "::"s + EmitTreeNode(n); + str += ";\n"; + } + } + + return str; +} + +// Emit default constructor func def and instance +std::string CppDef::EmitDefaultCtor(ClassNode *c) { + if (c == nullptr) + return std::string(); + + std::string str, className; + className = c->GetName(); + str = "\n"s; + str += className + "* "s + className + "::Ctor::operator()("s + className + "* obj)"s; + str += "{ return obj; }\n"s; + str += EmitCtorInstance(c); + + return str; +} + +std::string CppDef::EmitCppCtor(ClassNode* node) { + std::string str, base, props; + props = EmitClassProps(node); + if (!props.empty()) + props = "\n"s + props; + base = (node->GetSuperClassesNum() != 0)? node->GetSuperClass(0)->GetName() : "t2crt::Object"; + str += node->GetName() + "::"s + node->GetName() + "(t2crt::Function* ctor, t2crt::Object* proto): "s + + base + "(ctor, proto)" + " {"s + props +"}\n"s; + return str; +} + +std::string CppDef::EmitModuleNode(ModuleNode *node) { + if (node == nullptr) + return std::string(); + std::string module = GetModuleName(); + + // include directives + std::string str("// TypeScript filename: "s + node->GetFilename() + "\n"s); + str += "#include \n#include \""s + GetBaseFilename() + ".h\"\n\n"s; + + // start a namespace for this module + str += "namespace "s + module + " {\n"s; + + // definitions of default class constructors + for (unsigned i = 0; i < node->GetTreesNum(); ++i) { + if (auto n = node->GetTree(i)) + if (n->IsClass()) { + str += EmitCppCtor(static_cast(n)); + if (static_cast(n)->GetConstructorsNum() == 0) + str += EmitDefaultCtor(static_cast(n)); + } + } + + // definitions + str += mCppDecl.GetDefinitions(); + + // definitions of all top level functions in current module + CfgFunc *mod = mHandler->GetCfgFunc(); + auto num = mod->GetNestedFuncsNum(); + for(unsigned i = 0; i < num; ++i) { + CfgFunc *func = mod->GetNestedFuncAtIndex(i); + TreeNode *node = func->GetFuncNode(); + if (!IsClassMethod(node)) { + hFuncTable.AddTopLevelFunc(node); + hFuncTable.AddNameIsTopLevelFunc(GetIdentifierName(node)); + } + std::string s = EmitTreeNode(node) + GetEnding(node); + str += s; + } + + // definition of init function of current module + str += R"""(void __init_func__() { + // bind "this" to current module + static bool __init_once = false; + if (__init_once) return; + __init_once = true; +)""" + mCppDecl.GetInits(); + mIsInit = true; + for (unsigned i = 0; i < node->GetTreesNum(); ++i) { + if (auto n = node->GetTree(i)) { + if (!n->IsClass()) { + std::string s = EmitTreeNode(n); + if (!s.empty()) + str += tab(1) + s + (s.back()=='\n'? "": ";\n"); + } + } + } + str += R"""(} + + t2crt::Object __module; +} // namespace of current module +)"""; + + AST_Handler *handler = mHandler->GetASTHandler(); + HandlerIndex idx = handler->GetHandlerIndex(node->GetFilename()); + // If the program starts from this module, generate the main function + if (idx == 0) { + str += R"""( +int main(int argc, char **argv) { + std::cout << std::boolalpha; +)""" + " "s + module + R"""(::__init_func__(); // call its __init_func__() + return 0; +} +)"""; + } + return str; +} + +std::string CppDef::EmitExportNode(ExportNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + auto num = node->GetPairsNum(); + for (unsigned i = 0; i < node->GetPairsNum(); ++i) { + if (auto x = node->GetPair(i)) { + if (x->IsDefault() && !x->IsRef()) { + if (auto n = x->GetBefore()) { + std::string v = EmitTreeNode(n); + str += "__export::__default = "s + v + ";\n"s; + } + } + if(node->GetTarget() == nullptr && + !x->IsDefault() && + !x->IsEverything() && + !x->GetAsNamespace() && + !x->IsSingle()) { + //str += EmitXXportAsPairNode(x); + if (auto n = x->GetBefore()) + if ((!n->IsIdentifier() || static_cast(n)->GetInit() != nullptr) && + !n->IsStruct() && + !n->IsClass() && + !n->IsUserType()) + str += EmitTreeNode(n); + } + } + } + return HandleTreeNode(str, node); +} + +std::string CppDef::EmitImportNode(ImportNode *node) { + return std::string(); +} + +std::string CppDef::EmitXXportAsPairNode(XXportAsPairNode *node) { + return std::string(); +} + +inline bool IsClassMethod(FunctionNode* f) { + return (f && f->GetParent() && f->GetParent()->IsClass()); +} + +std::string CppDef::EmitClassProps(TreeNode* node) { + std::string clsFd, addProp; + MASSERT(node->IsClass() && "Not NK_Class node"); + ClassNode* c = static_cast(node); + for (unsigned i = 0; i < c->GetFieldsNum(); ++i) { + auto node = c->GetField(i); + if (!node->IsIdentifier()) + // StrIndexSig, NumIndexSig, ComputedName have to be handled at run time + continue; + IdentifierNode* id = static_cast(node); + if (HasAttrStatic(id)) // static props are added to class ctor + continue; + std::string fdName = node->GetName(); + std::string fdType = mCppDecl.GetTypeString(node, id->GetType()); + TypeId typeId = node->GetTypeId(); + if (typeId == TY_None) { + if (auto n = id->GetType()) { + if (n->IsPrimType()) + typeId = static_cast(n)->GetPrimType(); + else if (n->IsUserType()) + typeId = static_cast(n)->GetTypeId(); + } + } + addProp += " "s + GenClassFldAddProp("this", c->GetName(), fdName, fdType, TypeIdToJSTypeCXX[typeId]) + ";\n"s; + } + return " // Add class fields to obj prop list\n"s + clsFd + addProp; +} + +// "var" declarations in TS/JS functions are function scope. +// Duplicate decls may appear in different blocks within +// function but should all refer to the same function scope var. +// +// So we scan for JS_Var decls with dup names in a function and emit +// a decl list with unique names for insert into function definition. +// +// The var may be initialized to different values in different +// blocks which will be done in the individual DeclNodes (re: var-dup.ts) +std::string CppDef::EmitFuncScopeVarDecls(FunctionNode *node) { + std::unordered_mapvarDeclsInScope; + ASTScope* s = node->GetScope(); + for (int i = 0; i < s->GetDeclNum(); i++) { + // build list of var decls (no dups) in function scope + TreeNode* n = s->GetDecl(i); + if (!n->IsDecl()) + continue; + DeclNode* d = static_cast(n); + if (d->GetProp() == JS_Var) { + // skip var decl name duplicates - same name but diff + // type is illegal in typescript so not checked here + std::unordered_map::iterator it; + it = varDeclsInScope.find(d->GetVar()->GetName()); + if (it == varDeclsInScope.end()) { + varDeclsInScope[d->GetVar()->GetName()] = d; + } + } + } + std::string str; + for (auto const&[key, val] : varDeclsInScope) { + // Emit decl for the var (just type and name). The init part + // to be emitted when corresponding DeclNode is processed + str += tab(1) + mCppDecl.EmitTreeNode(val->GetVar()) + ";\n"s; + } + return str; +} + +std::string CppDef::EmitYieldNode(YieldNode *node) { + if (node == nullptr) + return std::string(); + //std::string str(node->IsTransfer() ? "yield* " : "yield "); + std::string str, res; + if (auto n = node->GetResult()) + res = EmitTreeNode(n); + else + res = "undefined"; + + std::string yieldLabel = GenFnLabels.NextYieldLabel(); + str += " yield = &&" + yieldLabel + ";\n"; // save yp + str += " res.value = t2crt::JS_Val(" +res+ ");\n"; // init value and return + str += " res.done = false;\n"; + str += " return;\n"; + str += yieldLabel + ":\n"; // label for this yp + + mPrecedence = '\024'; + return str; +} + +std::string CppDef::EmitWhileLoopNode(WhileLoopNode *node) { +// return(Emitter::EmitWhileLoopNode(node)); + + if (node == nullptr) + return std::string(); + std::string str; + std::string loopLabel; + + if(auto n = node->GetLabel()) { + str = EmitTreeNode(n) + ":\n"s; + } + + if (mIsGenerator) { // insert label and loop cond check + loopLabel = GenFnLabels.NextLoopLabel(); + str += loopLabel + ":\n"; + if (auto n = node->GetCond()) { + std::string cond = EmitTreeNode(n); + str += " if (!(" +cond+ "))\n"; + str += " goto " +loopLabel+ "_exit;\n"; + } + } else { // normal while loop + str += "while("s; + if (auto n = node->GetCond()) { + str += EmitTreeNode(n); + } + str += ')'; + } + + if (auto n = node->GetBody()) { + str += EmitTreeNode(n) + GetEnding(n); + if (mIsGenerator) { + str.insert(str.find_first_of("{"), " "); + str.insert(str.find_last_of("}"), " "); + } + } + + if (mIsGenerator) { // insert loop back and label at loop exit + str += " goto " +loopLabel+ ";\n"; + str += loopLabel + "_exit:"; + } + + return HandleTreeNode(str, node); +} + + +std::string CppDef::EmitFunctionNode(FunctionNode *node) { + if (mIsInit || node == nullptr) + return std::string(); + + bool isTopLevel = hFuncTable.IsTopLevelFunc(node); + std::string str; + str += "\n"; + str += FunctionHeader(node, mCppDecl.GetTypeString(node->GetRetType(), node->GetRetType())); + mIsGenerator = node->IsGenerator(); + + int bodyPos = str.size(); + if (auto n = node->GetBody()) { + auto varDecls = EmitFuncScopeVarDecls(node); + auto s = EmitBlockNode(n); + if (isTopLevel) + Emitter::Replace(s, "this", "_this"); + if(s.empty() || s.front() != '{') + str += "{\n"s + s + "}\n"s; + else + str += s; + str.insert(bodyPos+2, varDecls); // skip over leading "{\n" of generated block + } else + str += "{}\n"s; + + if (mIsGenerator) { + str.insert(str.find_first_of("{")+1, GeneratorFn_start); + str.insert(str.find_last_of("}"), GeneratorFn_return); + } + + if (node->IsConstructor()) { + Emitter::Replace(str, "this->", "obj->", 0); + std::string ctorBody; + ctorBody += " return obj;\n"s; + str.insert(str.size()-2, ctorBody, 0, std::string::npos); + str += EmitCtorInstance(static_cast(node->GetParent())); + } + + if (mIsGenerator) { + mIsGenerator = false; + GenFnLabels.ResetLabels(); + } + return str; +} + +std::string CppDef::EmitIdentifierNode(IdentifierNode *node) { + if (node == nullptr) + return std::string(); + std::string str = GetQualifiedName(node); + if (auto n = node->GetInit()) { + str += " = "s + EmitTreeNode(n); + } + mPrecedence = '\030'; + return str; +} + +// Generate code to create object instance that was declared using +// an object literal. First process the object literals in the +// StructLiteralNode argument to build the proplist list (vector of +// type ObjectProp) to be used as initializer, then generate call to +// the builtin Object constructor with initializer as parameter. +std::string CppDef::EmitStructLiteralNode(StructLiteralNode* node) { + std::string str; + int stops = 2; + // Build proplist to be used as initializer + str += "\n"s + tab(stops) + "std::vector({\n"s; + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (i) + str += ",\n"s; + if (auto field = node->GetField(i)) { + auto lit = field->GetLiteral(); + std::string fieldName = EmitTreeNode(field->GetFieldName()); + TypeId typId = lit->GetTypeId(); + std::string fieldVal = EmitTreeNode(lit); + str += tab(stops+1); + switch(typId) { + case TY_Object: + break; + case TY_Function: + break; + case TY_Array: + fieldVal = EmitTreeNode(lit); // ArrayLiteralNode + str += "std::make_pair(\""s + fieldName + "\", t2crt::JS_Val("s + fieldVal + "))"s; + break; + case TY_Boolean: + str += "std::make_pair(\""s + fieldName + "\", t2crt::JS_Val(bool("s + fieldVal + ")))"s; + break; + case TY_None: + if (fieldVal.compare("true") == 0 || fieldVal.compare("false") == 0) + str += "std::make_pair(\""s + fieldName + "\", t2crt::JS_Val(bool("s + fieldVal + ")))"s; + else + // if no type info, use type any (JS_Val) + str += "std::make_pair(\""s + fieldName + "\", t2crt::JS_Val("s + fieldVal + "))"s; + break; + case TY_Int: + str += "std::make_pair(\""s + fieldName + "\", t2crt::JS_Val(int64_t("s + fieldVal + ")))"s; + break; + case TY_String: + str += "std::make_pair(\""s + fieldName + "\", t2crt::JS_Val("s + fieldVal + "))"s; + break; + case TY_Number: + case TY_Double: + str += "std::make_pair(\""s + fieldName + "\", t2crt::JS_Val(double("s + fieldVal + ")))"s; + break; + case TY_Class: + // Handle embedded t2crt::ObjectLiterals recursively + if (lit->IsStructLiteral()) { + std::string props = EmitStructLiteralNode(static_cast(lit)); + str += "std::make_pair(\""s + fieldName + "\", t2crt::JS_Val("s + props + "))"s; + } + break; + } + } + } + str += " })"s; + // Generate code to call builtin Object constructor with the initializer proplist. + str = "t2crt::Object::ctor._new("s + str + ")"s; + return str; +} + +std::string CppDef::GenDirectFieldInit(std::string varName, StructLiteralNode* node) { + std::string str; + //str += ";\n"s; + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (auto field = node->GetField(i)) { + auto lit = field->GetLiteral(); + std::string fieldName = EmitTreeNode(field->GetFieldName()); + std::string fieldVal = EmitTreeNode(lit); + if (mHandler->IsCppField(field)) // Check if it accesses a Cxx class field + str += tab(1) + varName + "->"s + fieldName + " = "s + fieldVal + ";\n"s; + else + str += tab(1) + "(*"s + varName + ")[\""s + fieldName + "\"] = "s + fieldVal + ";\n"s; + } + } + return str; +} + +std::string CppDef::GenObjectLiteral(TreeNode* var, std::string varName, TreeNode* varIdType, StructLiteralNode* node) { + if (varName.empty()) + return std::string(); + + std::string str; + // UserType can be TS interface, class, type alias, builtin (t2crt::Object, t2crt::Record..) + UserTypeNode* userType = (varIdType && varIdType->IsUserType())? (UserTypeNode*)varIdType: nullptr; + + if (userType == nullptr) { + // no type info - create instance of builtin t2crt::Object with proplist + str = varName+ " = "s + EmitTreeNode(node); + } else if (IsVarTypeClass(var)) { + // init var of type TS class + // - create obj instance of user defined class and do direct field access init + // - todo: handle class with generics + str = varName+ " = "s +userType->GetId()->GetName()+ "::ctor._new();\n"s; + str += GenDirectFieldInit(varName, node); + } else { + // type is builtin (e.g. t2crt::Record) and StructNode types (e.g. TSInterface) + // create instance of type but set constructor to the builtin t2crt::Object. + str = varName+ " = new "s +EmitUserTypeNode(userType)+ "(&t2crt::Object::ctor, t2crt::Object::ctor.prototype);\n"s; + auto n = mHandler->FindDecl(static_cast(userType->GetId())); + if (n && n->IsStruct() && static_cast(n)->GetProp() == SProp_TSInterface) { + str += GenDirectFieldInit(varName, node); // do direct field init + } + } + return str; +} + +// Generate code to construct an array of type any from an ArrayLiteral. +std::string CppDef::ConstructArrayAny(ArrayLiteralNode *node) { + if (node == nullptr || !node->IsArrayLiteral()) + return std::string(); + + // Generate array ctor call to instantiate array + std::string literals; + for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { + if (i) + literals += ", "s; + if (auto n = node->GetLiteral(i)) { + if (n->IsArrayLiteral()) + // Recurse to handle array elements that are arrays + literals += ConstructArrayAny(static_cast(n)); + else { + // Wrap element in JS_Val. C++ class constructor of JS_Val + // will set tupe tag in JS_Val according to element type. + literals += "t2crt::JS_Val("s + EmitTreeNode(n) + ")"s; + } + } + } + std::string str = ArrayCtorName(1, "t2crt::JS_Val") + "._new({"s + literals + "})"s; + return str; +} + +// Generate code to construct an array object with brace-enclosed initializer list +std::string CppDef::ConstructArray(ArrayLiteralNode *node, int dim, std::string type) { + if (type.empty()) { + return ConstructArrayAny(node); // proceed as array of type any if no type info + } + // Generate array ctor call to instantiate array + std::string str = ArrayCtorName(dim, type) + "._new({"s; + for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetLiteral(i)) { + if (n->IsArrayLiteral()) + str += ConstructArray(static_cast(n), dim-1, type); + else + str += EmitTreeNode(n); + } + } + str += "})"s; + return str; +} + +// decl of global var is handled by EmitDeclNode in cpp_declaration +// decl of function vars of type JS_Var is handled in EmitFuncSCopeVarDecls +// This function handles init of global/func var, and decl/init of func let/const. +// +// Declaration of Javascript "var", "let" an "const" variables: +// - "var" decls are function/global scoped +// - "let" and "const" are block scoped +// TS/JS allows duplicate "var" declarations in global scope as well as +// function scope. Duplicate global scope var decls are resolved +// by the front end which make only 1 decl for the dup and changes any inits +// in the dup decls to assigments. Duplicate function scope var decls are +// handled in CppDef::EmitFuncScopeVarDecls. +// +std::string CppDef::EmitDeclNode(DeclNode *node) { + if (node == nullptr) + return std::string(); + + std::string str, varStr; + TreeNode* idType = nullptr; + TypeId varType = TY_None; + + //std::string str(Emitter::GetEnumDeclProp(node->GetProp())); + + // For func var of JS_Var and global vars, emit var name + // For func var of JS_Let/JS_Const, emit both var type & name + if (auto n = node->GetVar()) { + varType = n->GetTypeId(); + if (mIsInit || node->GetProp() == JS_Var) { + // handle declnode inside for-of/for-in (uses GetSet() and has null GetInit()) + if (!node->GetInit() && node->GetParent() && !node->GetParent()->IsForLoop()) + return std::string(); + varStr = EmitTreeNode(n); // emit var name only + } else { + varStr = mCppDecl.EmitTreeNode(n); // emit both var type and name + } + if (n->IsIdentifier()) { + idType = static_cast(n)->GetType(); + } + } + if (auto n = node->GetInit()) { + if (n->IsStructLiteral()) + str += GenObjectLiteral(node->GetVar(), varStr, idType, static_cast(n)); + else if (node->GetVar()->IsIdentifier() && n->IsIdentifier() && n->IsTypeIdClass()) + str += varStr + "= &"s + n->GetName() + "::ctor"s; // init with ctor address + else if (n->IsFunction()) { + if (hFuncTable.IsTopLevelFunc(n)) { + str += varStr + " = new "s + "Cls_" + n->GetName() + "()"s; + hFuncTable.AddNameIsTopLevelFunc(varStr); + } + } else { + str += varStr + " = "; + if (varType == TY_None) // no type info. assume TY_Any and wrap val in JS_Val + str += "t2crt::JS_Val("s + EmitTreeNode(n) + ")"s; + else + str += EmitTreeNode(n); + } + } else { + str = varStr; + } + return str; +} + +static bool QuoteStringLiteral(std::string &s) { + if(s.front() != '"' || s.back() != '"') + return false; + s = s.substr(1, s.length() - 2); + Emitter::Replace(s, "\"", "\\\"", 0); + s = "\"" + s + "\""; + return true; +} + +std::string EmitSuperCtorCall(TreeNode* node) { + while (node->GetKind() && !node->IsClass()) + node = node->GetParent(); + if (node && node->IsClass()) { + std::string base, str; + base = (static_cast(node)->GetSuperClassesNum() != 0)? static_cast(node)->GetSuperClass(0)->GetName() : "t2crt::Object"; + str = " "s + base + "::ctor"s; + return str; + } + return ""s; +} + +std::string CppDef::EmitCallNode(CallNode *node) { + if (node == nullptr) + return std::string(); + bool log = false; + bool isSuper = false; + std::string str; + if (auto n = node->GetMethod()) { + if(n->IsFunction()) { + str += static_cast(n)->GetName(); + } else { + auto s = EmitTreeNode(n); + if(s.compare(0, 12, "console->log") == 0) { + str += "std::cout"s; + log = true; + } else if (s.compare("super") == 0) { + isSuper = true; + str += EmitSuperCtorCall(node); + } else if (hFuncTable.IsTopLevelFuncName(s)) { + // s can be either ts function or ts var of function type + str += "(*"s + s + ")"s; + } else if (hFuncTable.IsImportedField(s)) { + str += "(*"s + s + ")"s; + } else if (hFuncTable.IsStaticMember(s)) { + str += "("s + s +")"s; + } else if (s.find("->") != std::string::npos) + str += s; + else + str += "(*("s + s + "))"s; + } + } + if(!log) + str += isSuper? "(obj"s : "("s; + unsigned num = node->GetArgsNum(); + for (unsigned i = 0; i < num; ++i) { + if(log) { + std::string s = EmitTreeNode(node->GetArg(i)); + if(QuoteStringLiteral(s)) { + //if(num > 1) + // s = "\"'\""s + s + "\"'\""s; + } else if(mPrecedence <= 13) // '\015' + s = "("s + s + ")"s; + if (i) + str += " << ' ' "s; + str += " << "s + s; + } else { + if (i || isSuper) + str += ", "s; + if (auto n = node->GetArg(i)) + str += EmitTreeNode(n); + } + } + if(!log) + str += ")"s; + else + str += " << std::endl;"; + mPrecedence = '\024'; + return str; +} + +std::string CppDef::EmitPrimTypeNode(PrimTypeNode *node) { + return mCppDecl.EmitPrimTypeNode(node); +} + +std::string CppDef::EmitPrimArrayTypeNode(PrimArrayTypeNode *node) { + return std::string(); +} + +inline bool IsBracketNotationProp(TreeNode *node) { + return node->IsArrayElement() && + static_cast(node)->GetArray()->IsTypeIdClass(); +} + +std::string CppDef::EmitArrayElementNode(ArrayElementNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (IsBracketNotationProp(node)) { + bool unused; + str = EmitBracketNotationProp(node, OPR_Arrow, false, unused); + return str; + } + if (auto n = node->GetArray()) { + std::string s = EmitTreeNode(n); + if (mIsInit && s == "this") + str = "__module"s; + else + str = "(*"s + s + ")"s; + if(mPrecedence < '\024') + str = "("s + str + ")"s; + } + + + for (unsigned i = 0; i < node->GetExprsNum(); ++i) { + if (auto n = node->GetExprAtIndex(i)) { + std::string expr; + expr = "["s + EmitTreeNode(n) + "]"s; + if (i < node->GetExprsNum()-1) + str = "(*"s + str + expr + ")"s; + else + str += expr; + } + } + + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + + +std::string CppDef::EmitArrayLiteralNode(ArrayLiteralNode *node) { + if (node == nullptr) + return std::string(); + if (node->GetParent() && + node->GetParent()->IsDecl() || // for var decl init + node->GetParent()->IsIdentifier() || // for default val init in class field decl + node->GetParent()->IsFieldLiteral()) { // for obj decl with struct literal init + // emit code to construct array object with brace-enclosed initializer list + int dim; + std::string str, type; + GetArrayTypeInfo(node, dim, type); + str = ConstructArray(node, dim, type); + return str; + } + + // emit code to build a brace-enclosed intializer list (for rhs of array var assignment op) + std::string str("{"s); + for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetLiteral(i)) { + str += EmitTreeNode(n); + } + } + str += "}"s; + return str; +} + +std::string CppDef::EmitFieldNode(FieldNode *node) { + if (node == nullptr) + return std::string(); + std::string upper, field, propType; + bool isRhs = false; // indicate if field is rhs (val) or lhs (ref) + auto upnode = node->GetUpper(); + bool upperIsGenerator = false; + if (upnode) { + upper = EmitTreeNode(upnode); + isRhs = !mHandler->IsDef(upnode); + upperIsGenerator = IsGenerator(upnode); // TODO: await TI fix for generator3.ts + } + if (auto n = node->GetField()) { + if (isRhs) { + propType = hlpGetJSValTypeStr(hlpGetTypeId(n)); + } + field = EmitTreeNode(n); + if (n->IsIdentifier()) { // check for static class field or method + if (auto decl = mHandler->FindDecl(static_cast(n))) { + if ((decl->IsFunction() && HasAttrStatic(static_cast(decl))) || + (decl->IsIdentifier() && HasAttrStatic(static_cast(decl)))) { + std::string fdStr = GetClassName(decl) + "::"s + field; + hFuncTable.AddMemberIsStatic(fdStr); + return fdStr; + } + } + } + } + if (upper.empty() || field.empty()) // Error if either is empty + return "%%%Empty%%%"; + if (field == "length") // for length property + return upper + "->size()"s; + if (mCppDecl.IsImportedModule(upper) || upnode->GetTypeId() == TY_Module || upnode->GetTypeId() == TY_Namespace) // for imported module + return hFuncTable.AddFieldIsImported(upper + "::"s + field); + if (mHandler->IsCppField(node->GetField()) || // check if it accesses a Cxx class field + node->IsTypeIdFunction()) + return upper + "->"s + field; + if (isRhs) + return "(*"s + upper + ").GetProp"s + propType + "(\""s + field + "\")"s; + return "(*"s + upper + ")[\""s + field + "\"]"s; +} + +std::string CppDef::EmitCondBranchNode(CondBranchNode *node) { + if (node == nullptr) + return std::string(); + std::string str("if("s); + if (auto n = node->GetCond()) { + auto cond = EmitTreeNode(n); + str += Clean(cond); + } + str += ")"s; + if (auto n = node->GetTrueBranch()) { + str += EmitTreeNode(n) + GetEnding(n); + } + if (auto n = node->GetFalseBranch()) { + str += "else"s + EmitTreeNode(n) + GetEnding(n); + } + if(auto n = node->GetLabel()) { + str += "__label_break_"s + EmitTreeNode(n) + ":;\n"s; + } + return str; +} + +std::string CppDef::EmitBlockNode(BlockNode *node) { + if (node == nullptr) + return std::string(); + std::string str("{\n"); + for (unsigned i = 0; i < node->GetChildrenNum(); ++i) { + if (auto n = node->GetChildAtIndex(i)) { + std::string s = EmitTreeNode(n); + if (n->IsYield()) { + str += s; + continue; + } + if (!s.empty()) + str += " "s + s + GetEnding(n); + } + } + str += "}\n"s; + if(auto n = node->GetLabel()) { + str += "__label_break_"s + EmitTreeNode(n) + ":;\n"s; + } + mPrecedence = '\030'; + return str; +} + +std::string CppDef::EmitForLoopNode(ForLoopNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + str += "for("s; + switch(node->GetProp()) { + case FLP_Regular: + { + for (unsigned i = 0; i < node->GetInitsNum(); ++i) + if (auto n = node->GetInitAtIndex(i)) { + if (i) + str += ", "s; + str += EmitTreeNode(n); + } + str += "; "s; + if (auto n = node->GetCond()) { + str += EmitTreeNode(n); + } + str += "; "s; + for (unsigned i = 0; i < node->GetUpdatesNum(); ++i) + if (auto n = node->GetUpdateAtIndex(i)) { + if (i) + str += ", "s; + str += EmitTreeNode(n); + } + break; + } + case FLP_JSIn: + { + if (auto n = node->GetVariable()) { + str += EmitTreeNode(n); + } + str += " in "s; + if (auto n = node->GetSet()) { + str += EmitTreeNode(n); + } + break; + } + case FLP_JSOf: + { + if (auto n = node->GetVariable()) { + std::string s = EmitTreeNode(n); + str += "auto "s + Clean(s); + } + str += " : "s; + if (auto n = node->GetSet()) { + str += EmitTreeNode(n); + if (n->IsIdentifier() && static_cast(n)->IsTypeIdArray()) { + str += "->elements"s; + } + } + break; + } + case FLP_NA: + return "FLP_NA"s; + default: + MASSERT(0 && "Unexpected enumerator"); + } + str += ")"s; + + auto label = node->GetLabel(); + std::string lstr; + if(label) { + lstr = EmitTreeNode(label); + str += "{\n"s; + } + if (auto n = node->GetBody()) { + str += EmitTreeNode(n) + GetEnding(n); + } + if(label) + str += "__label_cont_"s + lstr + ":;\n}\n"s + "__label_break_"s + lstr + ":;\n"s; + return str; +} + +std::string CppDef::EmitBreakNode(BreakNode *node) { + if (node == nullptr) + return std::string(); + auto target = node->GetTarget(); + std::string str = target ? "goto __label_break_"s + EmitTreeNode(target) : "break"s; + return str; +} + +std::string CppDef::EmitContinueNode(ContinueNode *node) { + if (node == nullptr) + return std::string(); + auto target = node->GetTarget(); + std::string str = target ? "goto __label_cont_"s + EmitTreeNode(target) : "continue"s; + return str; +} + +TypeId CppDef::GetTypeIdFromDecl(TreeNode* node) { + TypeId typeId = TY_None; + + if (auto typ = FindDeclType(node)) { + if (typ->IsPrimType()) + typeId = static_cast(typ)->GetPrimType(); + } + return typeId; +} + +// Check if a bracket notation property is a class member field +bool CppDef::IsClassField(ArrayElementNode* node, std::string propKey) { + if (!node->GetArray()->IsIdentifier()) { + return false; + } + // find declared type of bracket notation obj; if class type + // if class type, lookup class decl and check if prop is a class member fd + if (auto typ = FindDeclType(node->GetArray())) + if (typ->IsUserType() && static_cast(typ)->IsTypeIdClass()) + if (auto classId = static_cast(typ)->GetId()) + if (auto n = mHandler->FindDecl(static_cast(classId))) + if (n->IsClass()) + for (unsigned i = 0; i < static_cast(n)->GetFieldsNum(); ++i) { + auto fd = static_cast(n)->GetField(i); + if (fd->IsIdentifier()) + // skip leading and trailing quote in propKey when comparing + if (propKey.compare(1, propKey.length()-2, static_cast(fd)->GetName()) == 0) + return true; + } + return false; +} + +// +// For property access using bracket notation (e.g. bar["prop"]): +// 1) If the property is a member field in the object's class, emit: bar->prop +// 2) Otherwise it is a property created dynamically at runtime: +// - If it is a lvalue, emit: (*bar)["prop"] - [] operator overloaded in ts2cpp.h +// - If it is a rvalue, emit: bar->GetPropXX("prop") - XX is one of union types in t2crt::JS_Val +// 3) For OP_Assign, if lvalue on lhs is dynamic prop, wrap rhs with t2crt::JS_Val() macro. +// e.g. (*bar)["p1"] = t2crt::JS_Val(0xa); +// (*bar)["p2"] = t2crt::JS_Val(bar->f2); +// (*bar)["p2"] = t2crt::JS_Val(bar->GetPropLong("p1")); +// (*bar)["p2"] = t2crt::JS_Val((uint32_t)(bar->GetPropLong("p2") >> bar->GetPropLong("p1"))); +// +// *note: to do 1), the property key must be a string literal. if the property key +// is an identfier, then we have to do 2) because the identfier can be +// a var or a TS symobol resolvable only at runtime. +// Also, the object may be an expression, in which case, it can only be evaluated at runtime +// +std::string CppDef::EmitBracketNotationProp(ArrayElementNode* ae, OprId binOpId, bool isLhs, bool& isDynProp) { + if (ae == nullptr) + return std::string(); + + isDynProp = true; + std::string str, propKey; + std::string objName; + + if (ae->GetArray()->IsIdentifier()) { + objName = ae->GetArray()->GetName(); + } else { + // case where the object is an expression - re: call-func.ts + objName = EmitTreeNode(ae->GetArray()); + } + + TypeId propKeyType = ae->GetExprAtIndex(0)->GetTypeId(); + if (propKeyType == TY_String && ae->GetExprAtIndex(0)->IsLiteral()) { + propKey = EmitTreeNode(ae->GetExprAtIndex(0)); + if (IsClassField(ae, propKey)) { + // property is class member field + str = objName + "->"s + propKey.substr(1, propKey.length()-2); + isDynProp = false; + return str; + } + } + if (propKeyType == TY_None) { + propKeyType = GetTypeIdFromDecl(ae->GetExprAtIndex(0)); + } + // resolve propKey at runtime + switch (propKeyType) { + case TY_Int: + propKey = "t2crt::to_string("s + EmitTreeNode(ae->GetExprAtIndex(0)) + ")"s; + break; + case TY_String: + propKey = EmitTreeNode(ae->GetExprAtIndex(0)); + break; + case TY_Symbol: + propKey = "t2crt::to_string("s + EmitTreeNode(ae->GetExprAtIndex(0)) + ")"s; + break; + default: + MASSERT(0 && "Encounter unsupported prop key type in bracket notation"); + break; + } + + if (binOpId == OPR_Assign && isLhs) { + // prop is lvalue + str = "(*"s + objName + ")["s + propKey + "]"s; + } else { + switch(ae->GetTypeId()) { + case TY_Long: + case TY_Int: + str = objName + "->GetPropLong("s + propKey + ")"s; + break; + case TY_Double: + str = objName + "->GetPropDouble("s + propKey + ")"s; + break; + case TY_String: + str = objName + "->GetPropString("s + propKey + ")"s; + break; + case TY_Boolean: + str = objName + "->GetPropBool("s + propKey + ")"s; + break; + case TY_Function: + case TY_Object: + str = objName + "->GetPropObj("s + propKey + ")"s; + break; + case TY_Any: + str = objName + "->GetProp("s + propKey + ")"s; + break; + default: + str = "(*"s + objName + ")["s + propKey + ']'; + } + + // prop is rvalue + // emit: bar->GetPropXX("prop") + // Need type info for each property + } + return str; +} + +std::string CppDef::EmitBinOperatorNode(BinOperatorNode *node) { + if (node == nullptr) + return std::string(); + const char *op = Emitter::GetEnumOprId(node->GetOprId()); + const Precedence precd = *op & 0x3f; + const bool rl_assoc = *op >> 6; // false: left-to-right, true: right-to-left + std::string lhs, rhs; + bool lhsIsDynProp = false; + bool rhsIsDynProp = false; + + if (auto n = node->GetOpndA()) { + if (IsBracketNotationProp(n)) { + lhs = EmitBracketNotationProp(static_cast(n), node->GetOprId(), true, lhsIsDynProp); + } else { + lhs = EmitTreeNode(n); + if (n->IsIdentifier() && n->IsTypeIdArray()) + lhs = "*"s + lhs; + } + if(precd > mPrecedence || (precd == mPrecedence && rl_assoc)) + lhs = "("s + lhs + ")"s; + } + else + lhs = "(NIL) "s; + + if (auto n = node->GetOpndB()) { + if (IsBracketNotationProp(n)) { + rhs = EmitBracketNotationProp(static_cast(n), node->GetOprId(), false, rhsIsDynProp); + } else if (IsClassId(n)) { + rhs = "&"s + n->GetName() + "::ctor"s; + } else + rhs = EmitTreeNode(n); + if(precd > mPrecedence || (precd == mPrecedence && !rl_assoc)) + rhs = "("s + rhs + ")"s; + } + else + rhs = " (NIL)"s; + + OprId k = node->GetOprId(); + std::string str; + switch(k) { + case OPR_Exp: + str = "std::pow("s + lhs + ", "s + rhs + ")"; + break; + case OPR_StEq: + str = "t2crt::StrictEqu("s + lhs + ',' + rhs + ')'; + break; + case OPR_StNe: + str = "t2crt::StrictNotEqu("s + lhs + ',' + rhs + ')'; + break; + default: + switch(k) { + case OPR_Band: + case OPR_Bor: + case OPR_Bxor: + case OPR_Shl: + case OPR_Shr: + lhs = "static_cast(static_cast("s + lhs + "))"s; + break; + case OPR_Zext: + lhs = "static_cast(static_cast("s + lhs + "))"s; + op = "\015>>"; + break; + } + if (k == OPR_Assign && lhsIsDynProp) + rhs = "t2crt::JS_Val("s + rhs + ")"s; + str = lhs + " "s + std::string(op + 1) + " "s + rhs; + } + mPrecedence = precd; + return str; +} + +std::string CppDef::EmitUnaOperatorNode(UnaOperatorNode *node) { + if (node == nullptr) + return std::string(); + bool isPost = node->IsPost(); + const char *op = Emitter::GetEnumOprId(node->GetOprId()); + const Precedence precd = *op & 0x3f; + const bool rl_assoc = *op >> 6; // false: left-to-right, true: right-to-left + std::string opr; + if (auto n = node->GetOpnd()) { + opr = EmitTreeNode(n); + if(precd > mPrecedence || (precd == mPrecedence && (rl_assoc && isPost || !rl_assoc && !isPost))) + opr = "("s + opr + ")"s; + } + else + opr = "(NIL)"s; + if(node->GetOprId() == OPR_Bcomp) + opr = "static_cast("s + opr + ")"s; + std::string str; + if(node->IsPost()) + str = opr + std::string(op + 1) + " "s; + else + str = " "s + std::string(op + 1) + opr; + mPrecedence = precd; + return str; +} + +std::string CppDef::EmitTemplateLiteralNode(TemplateLiteralNode *node) { + if (node == nullptr) + return std::string(); + auto num = node->GetTreesNum(); + std::string str; + for (unsigned i = 0; i < num; ++i) { + if (auto n = node->GetTreeAtIndex(i)) { + if (!std::empty(str)) + str += " + "s; + std::string s(EmitTreeNode(n)); + if(i & 0x1) + str += "t2crt::to_string("s + s+ ")"s; + else { + QuoteStringLiteral(s); + str += s; + } + } + } + mPrecedence = '\016'; + return str; +} + +std::string CppDef::EmitLiteralNode(LiteralNode *node) { + if (node == nullptr) + return std::string(); + LitData lit = node->GetData(); + if(lit.mType == LT_VoidLiteral) + return "undefined"; + std::string str = Emitter::EmitLiteralNode(node); + return str; +} + +std::string CppDef::EmitSwitchNode(SwitchNode *node) { + if (node == nullptr) + return std::string(); + bool doable = true; + for (unsigned i = 0; i < node->GetCasesNum(); ++i) + if (SwitchCaseNode* c = node->GetCaseAtIndex(i)) + for (unsigned j = 0; j < c->GetLabelsNum(); ++j) { + auto l = c->GetLabelAtIndex(j); + if (l && l->IsSwitchLabel()) { + auto ln = static_cast(l); + if (auto v = ln->GetValue()) + if(!v->IsLiteral() || !v->IsTypeIdInt()) { + doable = false; + goto out_of_loops; + } + } + } +out_of_loops: + std::string label; + TreeNode* lab = node->GetLabel(); + if(lab) + label = "__label_break_"s + EmitTreeNode(lab); + else + label = "__label_switch_" + std::to_string(node->GetNodeId()); + std::string str; + if(doable) { + str = "switch("s; + if (TreeNode* n = node->GetExpr()) { + std::string expr = EmitTreeNode(n); + str += Clean(expr); + } + str += "){\n"s; + for (unsigned i = 0; i < node->GetCasesNum(); ++i) { + if(SwitchCaseNode* n = node->GetCaseAtIndex(i)) + str += EmitTreeNode(n); + } + str += "}\n"s; + } else { + std::string tmp = "__tmp_"s + std::to_string(node->GetNodeId()); + str = "do { // switch\nauto "s + tmp + " = "s; + if (TreeNode* n = node->GetExpr()) { + std::string expr = EmitTreeNode(n); + str += Clean(expr); + } + str += ";\n"s; + std::string body; + std::string other = "goto "s + label + ";\n"s;; + for (unsigned i = 0; i < node->GetCasesNum(); ++i) + if (SwitchCaseNode* cn = node->GetCaseAtIndex(i)) { + for (unsigned j = 0; j < cn->GetLabelsNum(); ++j) + if (SwitchLabelNode* ln = cn->GetLabelAtIndex(j)) { + if(ln->IsDefault()) + other = "goto __case_"s + std::to_string(cn->GetNodeId()) + ";\n"s; + else { + std::string le = EmitTreeNode(ln->GetValue()); + str += "if("s + tmp + " == ("s + Clean(le) + + "))\ngoto __case_"s + std::to_string(cn->GetNodeId()) + ";\n"s; + } + } + body += "__case_"s + std::to_string(cn->GetNodeId()) + ":\n"s; + for (unsigned s = 0; s < cn->GetStmtsNum(); ++s) + if (TreeNode* t = cn->GetStmtAtIndex(s)) + body += EmitTreeNode(t) + ";\n"s; + } + str += other + body; + str += "} while(0);\n"s; + } + if(!doable || lab) + str += label + ":;\n"s; + return str; +} + +std::string CppDef::EmitTypeOfNode(TypeOfNode *node) { + if (node == nullptr) + return std::string(); + std::string str("t2crt::__js_typeof("s), rhs; + if (auto n = node->GetExpr()) + rhs = EmitTreeNode(n); + str += rhs + ")"s; + return HandleTreeNode(str, node); +} + +// Return C++ object type of "this" parameter in function param declaration +std::string CppDef::GetThisParamObjType(TreeNode *node) { + if (node && !node->IsFunction()) + return std::string(); + + std::string str = "t2crt::Object"; + if (static_cast(node)->GetParamsNum()) { + auto n = static_cast(node)->GetParam(0); + if (n->IsThis()) { + TreeNode* tn = static_cast(n)->GetType(); + str = mCppDecl.GetTypeString(tn, nullptr); + if (str.back() == '*') + str.pop_back(); + if (!str.compare("t2crt::JS_Val ") || !str.compare("t2crt::JS_Val")) { + str = "t2crt::Object"; + } + } + } + return str; +} + +std::string CppDef::EmitNewNode(NewNode *node) { + if (node == nullptr) + return std::string(); + + std::string str; + MASSERT(node->GetId() && "No mId on NewNode"); + + if (auto id = node->GetId()) { + if (id->IsTypeIdClass()) { + // Generate code to create new class obj and call class constructor + std::string clsName = EmitTreeNode(node->GetId()); + if (IsBuiltinObj(clsName)) + clsName = "t2crt::"s + clsName; + str = clsName + "::ctor("s + clsName + "::ctor._new()"s; + + } else if (id->IsTypeIdFunction()) { // TS: new () + // When calling TS new() on constructor function: + // A new object is created and bound to "this" of ctor func which is then invoked. + // The object's proto chain is linked to ctor func prototype, and constructor set + // to the consturctor object. The object is then returned. + // note: When calling new() on functions, TSC only allows void functions that + // reference 'this' or non void function that do not reference 'this'. + // TSC strict mode requires all funcs that refs "this" to declare it as 1st parm. + if (auto decl = mHandler->FindDecl(static_cast(id))) { + std::string objClass = GetThisParamObjType(decl); // "t2crt::Object" , "Foo" etc + std::string fnName = GetIdentifierName(id); + // create new obj with proto chain and ctor init'd : new (, ->prototype) + std::string newObj = "new "s + objClass + "("s + fnName + ", "s + fnName + "->prototype)"s; + str = fnName + "->ctor("s + newObj + ", "s; // call ctor function with new obj as this arg + } + } else { + // for builtins + str = "new "s + EmitTreeNode(node->GetId()); + str += "("s; + } + } + + auto num = node->GetArgsNum(); + for (unsigned i = 0; i < num; ++i) { + if (i || node->GetId()->IsTypeIdClass()) + str += ", "s; + if (auto n = node->GetArg(i)) { + str += EmitTreeNode(n); + } + } + str += ")"s; + if (auto n = node->GetBody()) { + str += " "s + EmitBlockNode(n); + } + mPrecedence = '\024'; + return HandleTreeNode(str, node); +} + +static std::string MethodString(std::string &func) { + size_t s = func.substr(0, 9) == "function " ? 9 : 0; + return func.back() == '}' ? func.substr(s) + "\n"s : func.substr(s) + ";\n"s; +} + +std::string CppDef::EmitStructNode(StructNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + const char *suffix = ";\n"; + switch(node->GetProp()) { + case SProp_CStruct: + str = "struct "s; + break; + case SProp_TSInterface: + return std::string(); // decl already generation by CppDecl + case SProp_TSEnum: { + // Create the enum type object + std::string enumClsName; + if (auto n = node->GetStructId()) { + enumClsName = "Enum_"s + n->GetName(); + str += n->GetName() + " = new "s + enumClsName + "();\n"s; + return str; + } + break; + } + case SProp_NA: + str = ""s; + return str; // todo: handle anonymous struct created for untyped object literals. + break; + default: + MASSERT(0 && "Unexpected enumerator"); + } + + if (auto n = node->GetStructId()) { + str += EmitIdentifierNode(n); + } + + auto num = node->GetTypeParamsNum(); + if(num) { + str += "<"s; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetTypeParamAtIndex(i)) + str += EmitTreeNode(n); + } + str += ">"s; + } + + for (unsigned i = 0; i < node->GetSupersNum(); ++i) { + str += i ? ", "s : " extends "s; + if (auto n = node->GetSuper(i)) + str += EmitTreeNode(n); + } + str += " {\n"s; + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (auto n = node->GetField(i)) { + str += EmitTreeNode(n) + suffix; + } + } + + if (auto n = node->GetNumIndexSig()) { + str += EmitNumIndexSigNode(n) + "\n"s;; + } + if (auto n = node->GetStrIndexSig()) { + str += EmitStrIndexSigNode(n) + "\n"; + } + + for (unsigned i = 0; i < node->GetMethodsNum(); ++i) { + if (auto n = node->GetMethod(i)) { + std::string func = EmitFunctionNode(n); + func = Clean(func); + str += MethodString(func); + } + } + + str += "}\n"s; + return HandleTreeNode(str, node); +} + +std::string CppDef::EmitTypeAliasNode(TypeAliasNode *node) { + return std::string(); +} + +// Return the declared type for an identifier +TreeNode* CppDef::FindDeclType(TreeNode* node) { + if (node == nullptr || !node->IsIdentifier()) + return nullptr; + + if (auto n = mHandler->FindDecl(static_cast(node))) + if (n->IsDecl()) + if (auto var = static_cast(n)->GetVar()) + if (var->IsIdentifier()) + if (auto type = static_cast(var)->GetType()) + return type; + + return nullptr; +} + +// Get template argument for calling InstanceOf template func. +std::string CppDef::GetTypeForTemplateArg(TreeNode* node) { + if (node == nullptr) + return std::string(); + + std::string str; + if (auto n = FindDeclType(node)) { + // lhs type of instanceof operator is either object or ANY + switch(n->GetKind()) { + case NK_UserType: + str = EmitTreeNode(n); + break; + case NK_PrimType: + str = EmitTreeNode(n); + if (str.find("t2crt::JS_Val") != std::string::npos) + str = "t2crt::JS_Val"; + break; + default: + MASSERT(0 && "Unexpected node type"); + } + } else if (node->IsField()) { + // Lookup declared type of the obj in mUpper, then find + // mField from the obj field list to get the field type. + // + // The result is used as template argument for the InstanceOf + // template func. However, without this information + // the compiler does template argument deduction + // to call the template func, and has work ok for testcases. + // So implementation of this part is deferred until needed. + } else { + // No info - return ANY for now... + str = "t2crt::JS_Val"s; + //MASSERT(0 && "Unexpected node type"); + } + return str; +} + +std::string CppDef::EmitInstanceOfNode(InstanceOfNode *node) { + if (node == nullptr) + return std::string(); + const Precedence precd = '\014'; + const bool rl_assoc = false; // false: left-to-right + std::string lhs, rhs, typ; + if (auto n = node->GetLeft()) { + lhs = EmitTreeNode(n); + typ = GetTypeForTemplateArg(n); + if (typ.compare("t2crt::JS_Val") == 0) { + lhs = "t2crt::JS_Val("s + lhs + ")"s; + typ = ""; + } + else if (!typ.empty()) + typ = "<"s + typ + ">"s; // InstanceOf + + if(precd > mPrecedence) + lhs = '(' + lhs + ')'; + } + else + lhs = "(NIL) "s; + + if (auto n = node->GetRight()) { + if (IsClassId(n) || IsBuiltinObj(n->GetName())) + rhs = "&t2crt::"s + n->GetName() + "::ctor"s; + else + rhs = EmitTreeNode(n); + if(precd > mPrecedence || (precd == mPrecedence && !rl_assoc)) + rhs = '(' + rhs + ')'; + } + else + rhs = " (NIL)"s; + + std::string str("t2crt::InstanceOf"s + typ + "("s + lhs + ", "s + rhs + ")"s); + mPrecedence = precd; + return HandleTreeNode(str, node); +} + +std::string CppDef::EmitDeclareNode(DeclareNode *node) { + return std::string(); +} + +std::string CppDef::EmitAsTypeNode(AsTypeNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetType()) + str = EmitTreeNode(n); + if (!str.empty()) + str = '(' + str + ')'; + return str; +} + +std::string CppDef::EmitNamespaceNode(NamespaceNode *node) { + if (node == nullptr) + return std::string(); + // emit namespace in each statement inside of current namespace + std::string str; + for (unsigned i = 0; i < node->GetElementsNum(); ++i) { + if (auto n = node->GetElementAtIndex(i)) { + str += EmitTreeNode(n) + GetEnding(n); + } + } + return str; +} + +std::string &CppDef::HandleTreeNode(std::string &str, TreeNode *node) { + auto num = node->GetAsTypesNum(); + if(num > 0) { + std::string as; + for (unsigned i = 0; i < num; ++i) + if (auto t = node->GetAsTypeAtIndex(i)) + as = EmitAsTypeNode(t) + as; + str = as + '(' + str + ')'; + } + /* + if(node->IsOptional()) + str = AddParentheses(str, node) + '?'; + if(node->IsNonNull()) + str = AddParentheses(str, node) + '!'; + */ + if(node->IsRest()) + str = "..."s; // + AddParentheses(str, node); + /* + if(node->IsConst()) + if(node->IsField()) + str += " as const"s; + else + str = AddParentheses(str, node) + " as const"s; + */ + return str; +} + +std::string CppDef::EmitRegExprNode(RegExprNode *node) { + if (node == nullptr) + return std::string(); + std::string source = Emitter::EmitRegExprNode(node); + InsertEscapes(source); + return "RegExp::ctor._new(\""s + source + "\")"s; +} + +} // namespace maplefe diff --git a/src/MapleFE/ast2cpp/src/cpp_emitter.cpp b/src/MapleFE/ast2cpp/src/cpp_emitter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c5c92c0b0409646a598f2cc081f2b3d50197f638 --- /dev/null +++ b/src/MapleFE/ast2cpp/src/cpp_emitter.cpp @@ -0,0 +1,245 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkFE is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cpp_emitter.h" +#include "helper.h" + +namespace maplefe { + +std::string CppEmitter::GetIdentifierName(TreeNode *node) { + if (node == nullptr) + return std::string(); + switch (node->GetKind()) { + case NK_Identifier: + return std::string(static_cast(node)->GetName()); + case NK_Decl: + return GetIdentifierName(static_cast(node)->GetVar()); + case NK_Struct: + // Named StructNode has name in StructId. Unamed StructNode is assigned + // anonymous name by frontend and can be accessed using node mStrIdx + // through node GetName() interface. + if (auto n = static_cast(node)->GetStructId()) + return GetIdentifierName(n); + else + return node->GetName(); // for anonomyous name + case NK_Function: + if (static_cast(node)->GetFuncName()) + return GetIdentifierName(static_cast(node)->GetFuncName()); + else + return GenAnonFuncName(node); + case NK_Class: + return std::string(static_cast(node)->GetName()); + case NK_Interface: + return std::string(static_cast(node)->GetName()); + case NK_UserType: + return GetIdentifierName(static_cast(node)->GetId()); + case NK_TypeAlias: + return GetIdentifierName(static_cast(node)->GetId()); + case NK_Namespace: + return GetIdentifierName(static_cast(node)->GetId()); + case NK_Module: + return GetModuleName(static_cast(node)->GetFilename()); + case NK_Literal: + return AstDump::GetEnumLitData(static_cast(node)->GetData()); + case NK_Declare: + { auto n = static_cast(node); + auto num = n->GetDeclsNum(); + if (num == 1) + return GetIdentifierName(n->GetDeclAtIndex(0)); + return "Failed: one decl is expected"s; + } + default: + return "Failed to get the name of "s + AstDump::GetEnumNodeKind(node->GetKind()); + } +} + +bool CppEmitter::IsInNamespace(TreeNode *node) { + while (node) { + if (node->IsNamespace()) + return true; + node = node->GetParent(); + } + return false; +} + +std::string CppEmitter::GetNamespace(TreeNode *node) { + std::string ns; + while (node) { + if (node->IsNamespace()) { + TreeNode *id = static_cast(node)->GetId(); + if (id->IsIdentifier()) { + std::string s = Emitter::EmitIdentifierNode(static_cast(id)); + ns = ns.empty() ? s : s + "::"s + ns; + } + } + node = node->GetParent(); + } + return ns; +} + +std::string CppEmitter::GetQualifiedName(IdentifierNode *node) { + std::string name; + if (node == nullptr) + return name; + name = node->GetName(); + TreeNode *parent = node->GetParent(); + if (parent->IsField()) + return name; + Module_Handler *handler = GetModuleHandler(); + TreeNode *decl = handler->FindDecl(node); + if (decl == nullptr) + return name; + std::string ns = GetNamespace(decl); + return ns.empty() ? name : ns + "::"s + name; +} + +// Returns true if identifier is a class +bool CppEmitter::IsClassId(TreeNode* node) { + if (node == nullptr || !node->IsIdentifier()) + return false; + if (auto decl = mHandler->FindDecl(static_cast(node), true)) { // deep, cross module lookup + if (decl->IsClass()) + return true; + // TODO: handle type alias + } + return false; +} + +// Returns true if the declared type of a var is a TS class +bool CppEmitter::IsVarTypeClass(TreeNode* var) { + if (var == nullptr) + return false; + if (auto n = gTypeTable.GetTypeFromTypeIdx(var->GetTypeIdx())) { + if (n->IsClass()) + return true; + } + return false; +} + +void CppEmitter::InsertEscapes(std::string& str) { + Emitter::Replace(str, "\\", "\\\\", 0); + Emitter::Replace(str, "\"", "\\\"", 0); +} + +bool CppEmitter::IsGenerator(TreeNode* node) { + return mHandler->IsGeneratorUsed(node->GetNodeId()); +} + +FunctionNode* CppEmitter::GetGeneratorFunc(TreeNode* node) { + return mHandler->GetGeneratorUsed(node->GetNodeId()); +} + +// +// Interface to get array type and dimension interface for an ArrayLiteral +// (should be just a wrapper to call TI interfaces GetArrayElemTypeId() +// and GetArrayDim(), but until the usage of those 2 interface can cover all +// use caes, this interface encaps any additional work to get array type info. +// +void CppEmitter::GetArrayTypeInfo(ArrayLiteralNode* node, int& numDim, std::string& type) { + TypeId typeId = mHandler->GetArrayElemTypeId(node->GetNodeId()); + DimensionNode* dim = mHandler->GetArrayDim(node->GetNodeId()); + if (dim) + numDim = dim->GetDimensionsNum(); + switch(typeId) { + case TY_Class: { + unsigned tIdx = mHandler->GetArrayElemTypeIdx(node->GetNodeId()); + TreeNode* tp = gTypeTable.GetTypeFromTypeIdx(tIdx); + type = ObjectTypeStr(tp->GetName()); + break; + } + case TY_Int: + type = "long"; + break; + case TY_String: + type = "std::string"; + break; + case TY_Double: + type = "double"; + break; + case TY_None: + type = "t2crt::JS_Val"; + break; +#if 0 + case TY_Array: + type = "t2crt::Array*"; + break; +#endif + case TY_Function: + default: + // TODO + dim = 0; + type = "TBD"; + break; + } + return; + +#if 0 + if (!node->GetParent()) + return; + + switch(node->GetParent()->GetKind()) { + case NK_Decl: + // e.g. var arr: number[]=[1,2,3]; + //GetArrInfoByVarId(node, dim, type); + break; + case NK_Identifier: + // e.g. class Foo { arr: number[]=[1,2,3]; } + //GetArrInfoByClassFieldId(node, dim, type); + break; + case NK_FieldLiteral: + // e.g. var: {arr:number[]} = { n:[1,2,3] }; + //GetArrInfoByObjLiteralClassField(node, dim, type); + break; + } +#endif +} + +// C++ function header for different TS function types: +// Generator: t2crt::IteratorResult [::]GeneratorFunc_::_body(t2crt::Object* _this, void*& yield[, &]...) +// Class ctor: [::]* ::Ctor::operator()(* obj[, ]...) +// Class method: [::]::([params]...) +// Function: [::]Cls_::_body(t2crt::Object|* _this[, params]...) +std::string CppEmitter::FunctionHeader(FunctionNode* node, std::string retType) { + std::string str; + std::string ns = GetNamespace(node).empty() ? ""s : GetNamespace(node)+"::"; + std::string funcName = GetIdentifierName(node); + std::string className= ns + GetClassName(node); + bool isTopLevel = hFuncTable.IsTopLevelFunc(node); + retType = retType + " "s + ns; + + if (node->IsGenerator()) // generator + str += GeneratorFuncHeader(ns+GeneratorFuncName(funcName)+"::", node->GetNodeId()); + else if (node->IsConstructor()) { // class constructor + std::string param = FunctionParams(node->GetNodeId(), false); + param = param.empty() ? ""s : (", "s+param); + str += className + "* "s + className + "::Ctor::operator()" + "(" +className+ "* obj" +param+ ") "; + } + else if (IsClassMethod(node)) // class method + str += retType + GetClassName(node) + "::" + funcName + "(" + FunctionParams(node->GetNodeId(), false) + ") "; + else if (isTopLevel) // top level function + str += retType + "Cls_" + funcName + "::_body" + "(" + FunctionParams(node->GetNodeId(), true) + ") "; + else + str += retType + funcName + "(" + FunctionParams(node->GetNodeId(), false) + ") "; + return str; +} + +// Return class name from class method or class field +std::string CppEmitter::GetClassName(TreeNode* node) { + TreeNode* n = node->GetParent(); + if (n && n->IsClass()) + return n->GetName(); + return ""s; +} + +} // namespace maplefe diff --git a/src/MapleFE/ast2cpp/src/emitter.cpp b/src/MapleFE/ast2cpp/src/emitter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fbf695857eaf577226ec8854914a7f84f48dba2c --- /dev/null +++ b/src/MapleFE/ast2cpp/src/emitter.cpp @@ -0,0 +1,2453 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkFE is licensed under the Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "emitter.h" +#include +#include +#include + +namespace maplefe { + +std::string Emitter::Emit(const char *title) { + std::string code; + code = "// [Beginning of Emitter: "s + title + "\n"s; + code += EmitTreeNode(GetASTModule()); + code += "// End of Emitter]\n"s; + return code; +} + +std::string Emitter::GetEnding(TreeNode *n) { + if (n->IsExport()) { + ExportNode *ex = static_cast(n); + if (ex->GetPairsNum() == 1) { + if (auto p = ex->GetPair(0)->GetBefore()) + n = p; + } + } + if (n->IsDeclare()) { + DeclareNode *d = static_cast(n); + if (d->GetDeclsNum() == 1 && !d->IsGlobal()) + if (auto p = d->GetDeclAtIndex(0)) + n = p; + } + std::string str; + switch(n->GetKind()) { + case NK_Function: + case NK_TripleSlash: + str += '\n'; + break; + default: + str += ';'; + case NK_Block: + case NK_Switch: + case NK_ForLoop: + case NK_WhileLoop: + case NK_DoLoop: + case NK_CondBranch: + case NK_Class: + case NK_Struct: + case NK_Namespace: + case NK_Declare: + case NK_Module: + str += '\n'; + } + return str; +} + +std::string Emitter::Clean(std::string &s) { + auto len = s.length(); + if(len >= 1 && s.back() == '\n') + s = s.erase(len - 1); + if(len >= 2 && s.back() == ';') + return s.erase(len - 2); + return s; +} + +std::string Emitter::GetBaseFilename() { + std::string str(GetASTModule()->GetFilename()); + auto len = str.length(); + if(len >= 3 && str.substr(len - 3) == ".ts") + return str.erase(len - 3); + return str; +} + +std::string Emitter::GetModuleName(const char *p) { + std::string str = p && *p ? p : GetBaseFilename(); + size_t pos = str.rfind("/", std::string::npos); + str = pos == std::string::npos ? str : str.substr(pos); + for (auto &c : str) + if(std::ispunct(c)) + c = '_'; + return "M_"s + str; +} + +std::string Emitter::GetModuleName(TreeNode *node) { + if (node == nullptr) + return std::string(); + std::string str = EmitTreeNode(node); + auto len = str.length(); + if (len <= 2 || str.back() != '"' || str.front() != '"') + return std::string(); + str = str.substr(1, len - 2); + return GetModuleName(str.c_str()); +} + +std::string Emitter::GetEnumAttrId(AttrId k) { + std::string str(AstDump::GetEnumAttrId(k) + 5); + Emitter::Replace(str, "etter", "et"); + str += ' '; + return str; +} + +void Emitter::Replace(std::string &str, const char *o, const char *n, int cnt) { + size_t len = std::strlen(o); + size_t nlen = std::strlen(n); + if(cnt > 0) { + size_t index = 0; + int num = cnt; + do { + index = str.find(o, index); + if (index == std::string::npos) break; + str.replace(index, len, n); + index += nlen; + } while(--num && index < str.size()); + } else { + size_t index = std::string::npos; + int num = cnt ? -cnt : std::numeric_limits::max(); + do { + index = str.rfind(o, index); + if (index == std::string::npos) break; + str.replace(index, len, n); + index -= len; + } while(--num); + } +} + +std::string Emitter::EmitAnnotationNode(AnnotationNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetId()) { + str += EmitTreeNode(n); + } + if (auto num = node->GetArgsNum()) { + str += '('; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetArgAtIndex(i)) + str += EmitTreeNode(n); + } + str += ')'; + } + if (auto n = node->GetType()) { + str += ": "s + EmitAnnotationTypeNode(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitAsTypeNode(AsTypeNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetType()) { + str += " as "s + EmitTreeNode(n); + } + mPrecedence = '\023'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitIdentifierNode(IdentifierNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + + for (unsigned i = 0; i < node->GetAnnotationsNum(); ++i) + if (auto n = node->GetAnnotationAtIndex(i)) + str += '@' + EmitTreeNode(n) + "\n"s; + + std::string accessor1, accessor2; + for (unsigned i = 0; i < node->GetAttrsNum(); ++i) { + std::string s = GetEnumAttrId(node->GetAttrAtIndex(i)); + if (s == "get "s || s == "set "s) + accessor2 += s; + else + accessor1 += s; + } + std::string name(node->GetName()); + if (accessor1 == "private "s && name == "private"s) + str += "#private"s; + else + str += accessor1 + accessor2 + name; + mPrecedence = '\030'; + str = HandleTreeNode(str, node); + //if (auto n = node->GetDims()) { + // str += ' ' + EmitDimensionNode(n); + //} + + if (auto n = node->GetType()) { + std::string s = EmitTreeNode(n); + if(s.length() > 9 && s.substr(0, 9) == "function ") { + std::size_t loc = s.find("("); + if(loc != std::string::npos) + s = s.substr(loc); + } + str += ": "s + s; + } + if (auto n = node->GetInit()) { + str += " = "s + EmitTreeNode(n); + } + return str; +} + +std::string Emitter::EmitFunctionNode(FunctionNode *node) { + if (node == nullptr) + return std::string(); + std::string pre; + for (unsigned i = 0; i < node->GetAnnotationsNum(); ++i) + if (auto n = node->GetAnnotationAtIndex(i)) + pre += '@' + EmitTreeNode(n) + "\n"s; + + auto p = node->GetParent(); + NodeKind k = p ? p->GetKind() : NK_Null; + bool inside = k == NK_Class || k == NK_Struct || k == NK_Interface; + bool func = !inside; + std::string accessor; + for (unsigned i = 0; i < node->GetAttrsNum(); ++i) { + std::string s = GetEnumAttrId(node->GetAttrAtIndex(i)); + if (s == "get "s || s == "set "s) { + func = false; + accessor += s; + } else + pre += s; + } + pre += accessor; + + std::string str; + if (node->IsConstructor()) + str = "constructor "s; + bool has_name; + if(TreeNode* name = node->GetFuncName()) { + std::string s = EmitTreeNode(name); + has_name = s.substr(0, 9) != "__lambda_"; + if (has_name) + str += node->IsIterator() ? "* ["s + s + ']' : s; + } else + has_name = k == NK_XXportAsPair; + + if (node->IsConstructSignature()) + str += "new"s; + + auto num = node->GetTypeParamsNum(); + if(num) { + str += '<'; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetTypeParamAtIndex(i)) { + str += EmitTreeNode(n); + } + } + str += '>'; + } + + /* + for (unsigned i = 0; i < node->GetThrowsNum(); ++i) { + if (auto n = node->GetThrowAtIndex(i)) { + str += ' ' + EmitExceptionNode(n); + } + } + */ + + str += '('; + for (unsigned i = 0; i < node->GetParamsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetParam(i)) { + str += EmitTreeNode(n); + } + } + str += ')'; + + if (auto n = node->GetAssert()) + str += " : asserts "s + EmitTreeNode(n); + + auto body = node->GetBody(); + if (auto n = node->GetRetType()) { + std::string s = EmitTreeNode(n); + if(!s.empty()) { + str += (body || has_name || inside ? " : "s : " => "s) + s; + if (!body && !has_name) + func = false; + } + } + + str = pre + (func && !node->IsIterator() ? (node->IsGenerator() ? "function* "s : "function "s) : ""s) + str; + + if (body) { + auto s = EmitBlockNode(body); + if(s.empty() || s.front() != '{') + str += '{' + s + "}\n"s; + else + str += s; + } + else + if (k == NK_Block || k == NK_Struct || k == NK_Class) + str += ";\n"s; + /* + if (auto n = node->GetDims()) { + str += ' ' + EmitDimensionNode(n); + } + str += ' ' + std::to_string(node->IsConstructor()); + */ + mPrecedence = '\004'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitUserTypeNode(UserTypeNode *node) { + if (node == nullptr) + return std::string(); + auto k = node->GetType(); + Precedence precd; + switch (k) { + case UT_Union: + precd = '\010'; + break; + case UT_Inter: + precd = '\012'; + break; + default: + precd = '\030'; + } + std::string attrs, accessor, str; + for (unsigned i = 0; i < node->GetAttrsNum(); ++i) { + std::string s = GetEnumAttrId(node->GetAttrAtIndex(i)); + if (s == "get "s || s == "set "s) + accessor += s; + else + attrs += s; + } + attrs += accessor; + if (auto n = node->GetId()) { + str += EmitTreeNode(n); + auto num = node->GetTypeGenericsNum(); + if(num) { + str += '<'; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetTypeGeneric(i)) { + str += EmitTreeNode(n); + } + } + str += '>'; + } + precd = mPrecedence; + } + + if(k != UT_Regular) { + if(!str.empty()) + str = "type "s + str + " = "s; + std::string op = k == UT_Union ? " | "s : " & "s; + for (unsigned i = 0; i < node->GetUnionInterTypesNum(); ++i) { + if(i) + str += op; + std::string s = EmitTreeNode(node->GetUnionInterType(i)); + if (precd >= mPrecedence) + s = '(' + s + ')'; + str += s; + } + mPrecedence = precd; + } + + if (auto n = node->GetDims()) { + std::string s = EmitDimensionNode(n); + if (precd <= mPrecedence) + str = '(' + str + ')'; + str += s; + } + str = attrs + str; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitComputedNameNode(ComputedNameNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + for (unsigned i = 0; i < node->GetAttrsNum(); ++i) { + std::string s = GetEnumAttrId(node->GetAttrAtIndex(i)); + str += s; + } + str += " ["s; + if (auto n = node->GetExpr()) { + str += EmitTreeNode(n); + } + str += "] "s; + + if (auto prop = node->GetProp()) { + if (prop & CNP_Rem_ReadOnly) + str = "-readonly "s + str; + if (prop & CNP_Add_ReadOnly) + str = "readonly "s + str; + if (prop & CNP_Rem_Optional) + str += "-?"s; + if (prop & CNP_Add_Optional) + str += '?'; + } + + str = HandleTreeNode(str, node); + if (auto n = node->GetExtendType()) { + str += ": "s + EmitTreeNode(n); + } + if (auto n = node->GetInit()) { + str += " = "s + EmitTreeNode(n); + } + return str; +} + +std::string Emitter::EmitPackageNode(PackageNode *node) { + if (node == nullptr) + return std::string(); + std::string str = "package"; + if (auto n = node->GetPackage()) { + str += ' ' + EmitTreeNode(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitXXportAsPairNode(XXportAsPairNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (node->IsDefault()) { + if (auto n = node->GetBefore()) + str += "default "s + EmitTreeNode(n); + } else if (node->IsEverything()) { + str += " * "s; + if (auto n = node->GetBefore()) + str += "as "s + EmitTreeNode(n); + } else if (node->IsSingle()) { + if (auto a = node->GetAfter()) + str += EmitTreeNode(a); + if (auto n = node->GetBefore()) { + str += " = "s; + std::string s = EmitTreeNode(n); + str += n->IsLiteral() ? "require("s + s + ')' : s; + } + } else if (node->GetAsNamespace()) { + if (auto n = node->GetBefore()) + str += " as namespace "s + EmitTreeNode(n); + } else { + if (auto n = node->GetBefore()) { + std::string s = EmitTreeNode(n); + if (auto a = node->GetAfter()) + s += " as "s + EmitTreeNode(a); + if (n->IsIdentifier()) + s = "{ "s + s + " }"s; + str += s; + } + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitDeclareNode(DeclareNode *node) { + if (node == nullptr) + return std::string(); + std::string str, accessor; + for (unsigned i = 0; i < node->GetAttrsNum(); ++i) { + std::string s = GetEnumAttrId(node->GetAttrAtIndex(i)); + if (s == "get "s || s == "set "s) + accessor += s; + else + str += s; + } + str += accessor; + + unsigned num = node->GetDeclsNum(); + if (node->IsGlobal() || num != 1) { + str += "declare "s; + if (node->IsGlobal()) { + TreeNode *n = node; + while(n = n->GetParent()) + if (n->IsDeclare()) { + str = std::string(); + break; + } + str += "global "s; + } + str += "{\n"s; + for (unsigned i = 0; i < num; ++i) { + if (auto n = node->GetDeclAtIndex(i)) + str += EmitTreeNode(n) + GetEnding(n); + } + str += "}\n"s; + } else { + if (auto n = node->GetDeclAtIndex(0)) { + std::string s = EmitTreeNode(n); + str += "declare "s + s; + } + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitExportNode(ExportNode *node) { + if (node == nullptr) + return std::string(); + std::string deco; + for (unsigned i = 0; i < node->GetAnnotationsNum(); ++i) + if (auto n = node->GetAnnotationAtIndex(i)) + deco += '@' + EmitTreeNode(n) + "\n"s; + + std::string str; + auto num = node->GetPairsNum(); + if (num == 1 && node->GetTarget() == nullptr) + if (XXportAsPairNode *pair = node->GetPair(0)) + if (auto b = pair->GetBefore()) + if (b->IsModule() && pair->GetAfter() == nullptr) + return "export "s + EmitTreeNode(b); + for (unsigned i = 0; i < num; ++i) { + if (auto n = node->GetPair(i)) { + std::string s = EmitXXportAsPairNode(n); + if (!s.empty() && s.front() == '{' && !str.empty() && str.back() == '}') { + str.pop_back(); + s.erase(0, 1); + } + str += i ? ", "s + s : s; + } + } + if (auto n = node->GetTarget()) { + str += " from "s + EmitTreeNode(n); + } + str = Clean(str); + if (str.empty()) + str = "{}"s; + str = deco + (node->IsExportType() ? "export type "s : "export "s) + str; + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitImportNode(ImportNode *node) { + if (node == nullptr) + return std::string(); + std::string str("import "); + if (node->IsImportType()) + str += "type "s; + /* + switch (node->GetProperty()) { + case ImpNone: + break; + case ImpType: + break; + case ImpStatic: + break; + case ImpSingle: + break; + case ImpAll: + break; + case ImpLocal: + break; + case ImpSystem: + break; + default: + MASSERT(0 && "Unexpected enumerator"); + } + */ + auto num = node->GetPairsNum(); + if (num == 1 && node->GetTarget() == nullptr) + if (XXportAsPairNode *pair = node->GetPair(0)) + if (auto a = pair->GetAfter()) + if (auto b = pair->GetBefore()) + if (a->IsIdentifier() && !b->IsLiteral()) { + str += EmitTreeNode(a) + " = "s + EmitTreeNode(b); + return HandleTreeNode(str, node); + } + + for (unsigned i = 0; i < node->GetPairsNum(); ++i) { + if (auto n = node->GetPair(i)) { + std::string s = EmitXXportAsPairNode(n); + auto len = s.length(); + if(len > 13 && s.substr(len - 13) == " as default }"s) + s = s.substr(1, len - 13); // default export from a module + if(len > 8 && s.substr(0, 8) == "default "s) + s = s.substr(8, len - 1); + if (!s.empty() && s.front() == '{' && !str.empty() && str.back() == '}') { + str.pop_back(); + s.erase(0, 1); + } + str += i ? ", "s + s : s; + } + } + if (auto n = node->GetTarget()) { + std::string s = EmitTreeNode(n); + if (num) + str += " from "s + s; + else { + auto p = node->GetParent(); + if (p && (p->IsField() || p->IsTypeOf() || p->IsAwait())) + str += '(' + s + ')'; + else + str += s; + } + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitUnaOperatorNode(UnaOperatorNode *node) { + if (node == nullptr) + return std::string(); + bool isPost = node->IsPost(); + const char *op = Emitter::GetEnumOprId(node->GetOprId()); + const Precedence precd = *op & 0x3f; + const bool rl_assoc = *op >> 6; // false: left-to-right, true: right-to-left + std::string opr; + if (auto n = node->GetOpnd()) { + opr = EmitTreeNode(n); + if(precd > mPrecedence || (precd == mPrecedence && (rl_assoc && isPost || !rl_assoc && !isPost))) + opr = '(' + opr + ')'; + } + else + opr = "(NIL)"s; + std::string str; + if(node->IsPost()) + str = opr + std::string(op + 1) + ' '; + else + str = ' ' + std::string(op + 1) + opr; + mPrecedence = precd; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitBinOperatorNode(BinOperatorNode *node) { + if (node == nullptr) + return std::string(); + const char *op = Emitter::GetEnumOprId(node->GetOprId()); + const Precedence precd = *op & 0x3f; + const bool rl_assoc = *op >> 6; // false: left-to-right, true: right-to-left + std::string lhs, rhs; + if (auto n = node->GetOpndA()) { + lhs = EmitTreeNode(n); + if(precd > mPrecedence || (precd == mPrecedence && rl_assoc)) + lhs = '(' + lhs + ')'; + } + else + lhs = "(NIL) "s; + if (auto n = node->GetOpndB()) { + rhs = EmitTreeNode(n); + if(precd > mPrecedence || (precd == mPrecedence && !rl_assoc)) + rhs = '(' + rhs + ')'; + } + else + rhs = " (NIL)"s; + std::string str(lhs + ' ' + std::string(op + 1) + ' ' + rhs); + mPrecedence = precd; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitTerOperatorNode(TerOperatorNode *node) { + if (node == nullptr) + return std::string(); + const Precedence precd = '\004'; + const bool rl_assoc = true; // true: right-to-left + std::string str; + if (auto n = node->GetOpndA()) { + str = EmitTreeNode(n); + if(precd > mPrecedence || (precd == mPrecedence && rl_assoc)) + str = '(' + str + ')'; + } + str += " ? "s; + if (auto n = node->GetOpndB()) { + str += EmitTreeNode(n); + } + str += " : "s; + if (auto n = node->GetOpndC()) { + auto s = EmitTreeNode(n); + if(precd > mPrecedence) + s = '(' + s + ')'; + str += s; + } + mPrecedence = '\004'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitTypeAliasNode(TypeAliasNode *node) { + if (node == nullptr) + return std::string(); + std::string str("type "); + if (auto n = node->GetId()) { + str += EmitUserTypeNode(n); + } + if (auto n = node->GetAlias()) { + str += " = "s + EmitTreeNode(n); + } + mPrecedence = '\030'; + return str; +} + +std::string Emitter::EmitConditionalTypeNode(ConditionalTypeNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + Precedence precd = '\030'; + if (auto n = node->GetTypeA()) { + str = EmitTreeNode(n); + precd = mPrecedence; + } + if (auto n = node->GetTypeB()) { + str = Clean(str); + if (precd < '\024') + str = '(' + str + ')'; + str += " extends "s + EmitTreeNode(n); + } + if (auto n = node->GetTypeC()) { + str += " ? "s + EmitTreeNode(n); + } + if (auto n = node->GetTypeD()) { + str += " : "s + EmitTreeNode(n); + } + mPrecedence = '\004'; + return str; +} + +std::string Emitter::EmitTypeParameterNode(TypeParameterNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetId()) { + str = EmitTreeNode(n); + } + if (auto n = node->GetExtends()) { + str += " extends "s + EmitTreeNode(n); + } + if (auto n = node->GetDefault()) { + str += " = "s + EmitTreeNode(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitBlockNode(BlockNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if(auto n = node->GetLabel()) { + str = EmitTreeNode(n) + ":\n"s; + } + str += "{\n"s; + for (unsigned i = 0; i < node->GetChildrenNum(); ++i) { + if (auto n = node->GetChildAtIndex(i)) { + str += EmitTreeNode(n) + GetEnding(n); + } + } + str += "}\n"s; + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitNewNode(NewNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + for (unsigned i = 0; i < node->GetAttrsNum(); ++i) + str += GetEnumAttrId(node->GetAttrAtIndex(i)); + str += "new"s; + if (auto id = node->GetId()) { + std::string idstr = EmitTreeNode(id); + if (mPrecedence <= '\024' + && !id->IsUserType() + && !id->IsLambda() + && !id->IsFunction()) + idstr = '(' + idstr + ')'; + str += ' ' + idstr; + if(!id->IsFunction() && !id->IsLambda() && !id->IsClass()) { + auto num = node->GetArgsNum(); + str += '('; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetArg(i)) { + str += EmitTreeNode(n); + } + } + str += ')'; + } + } + if (auto n = node->GetBody()) { + str += ' ' + EmitBlockNode(n); + } + // Set mPrecedence, e.g. type AType = (new () => Error)|Error; + mPrecedence = '\004'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitDeleteNode(DeleteNode *node) { + if (node == nullptr) + return std::string(); + std::string str("delete "s); + if (auto n = node->GetExpr()) { + str += EmitTreeNode(n); + } + mPrecedence = '\021'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitAnnotationTypeNode(AnnotationTypeNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetId()) { + str += ' ' + EmitIdentifierNode(n); + } + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitDimensionNode(DimensionNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + for (unsigned i = 0; i < node->GetDimensionsNum(); ++i) { + auto n = node->GetDimension(i); + std::string d(n ? std::to_string(n) : ""s); + str += '[' + d + ']'; + } + mPrecedence = '\024'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitDeclNode(DeclNode *node) { + if (node == nullptr) + return std::string(); + std::string str(Emitter::GetEnumDeclProp(node->GetProp())); + if (auto n = node->GetVar()) { + str += ' ' + EmitTreeNode(n); + } + if (auto n = node->GetInit()) { + str += " = "s + EmitTreeNode(n); + } + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitCastNode(CastNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetDestType()) { + str += '<' + EmitTreeNode(n) + ">"; + } + if (auto n = node->GetExpr()) { + str += EmitTreeNode(n); + } + mPrecedence = '\021'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitParenthesisNode(ParenthesisNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetExpr()) { + str += '(' + EmitTreeNode(n) + ')'; + } + mPrecedence = '\025'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitFieldNode(FieldNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + const Precedence precd = '\024'; + if (auto n = node->GetUpper()) { + str = EmitTreeNode(n); + if (precd > mPrecedence) + str = '(' + str + ')'; + } + if (auto n = node->GetField()) { + str += '.' + EmitTreeNode(n); + } + mPrecedence = precd; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitArrayElementNode(ArrayElementNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetArray()) { + str = EmitTreeNode(n); + if(mPrecedence < '\024') + str = '(' + str + ')'; + } + str = Clean(str); + for (unsigned i = 0; i < node->GetExprsNum(); ++i) { + if (auto n = node->GetExprAtIndex(i)) { + if (str.back() == '?') + str += '.'; + str += '[' + EmitTreeNode(n) + ']'; + } + } + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitArrayLiteralNode(ArrayLiteralNode *node) { + if (node == nullptr) + return std::string(); + std::string str("["); + for (unsigned i = 0; i < node->GetLiteralsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetLiteral(i)) { + str += EmitTreeNode(n); + } + } + str += ']'; + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitBindingElementNode(BindingElementNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetVariable()) { + str += EmitTreeNode(n); + } + if (auto n = node->GetElement()) { + if (!str.empty()) + str += ": "s; + str += EmitTreeNode(n); + } + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitBindingPatternNode(BindingPatternNode *node) { + if (node == nullptr) + return std::string(); + // Needs a flag to distinguish between array destructuring and object destructuring + // Object destructuring: optional-prop.ts + // Array destructuring: trailing-commas.ts + std::string str; + + for (unsigned i = 0; i < node->GetElementsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetElement(i)) { + str += EmitTreeNode(n); + } + } + + if (node->GetProp() == BPP_ArrayBinding) + str = '[' + str + ']'; + else + str = '{' + str + '}'; + str = HandleTreeNode(str, node); + + if (auto n = node->GetType()) { + str += ": "s + EmitTreeNode(n); + } + if (auto n = node->GetInit()) { + str += " = "s + EmitTreeNode(n); + } + mPrecedence = '\030'; + return str; +} + +std::string Emitter::EmitNumIndexSigNode(NumIndexSigNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetKey()) + str += "[ "s + EmitTreeNode(n) + " : number ]"; + if (auto n = node->GetDataType()) { + str += " : "s + EmitTreeNode(n); + } + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitStrIndexSigNode(StrIndexSigNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetKey()) + str += "[ "s + EmitTreeNode(n) + " : string ]"; + if (auto n = node->GetDataType()) { + str += " : "s + EmitTreeNode(n); + } + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +static std::string MethodString(std::string &func) { + size_t s = func.substr(0, 9) == "function " ? 9 : 0; + return func.back() == '}' ? func.substr(s) + "\n"s : func.substr(s) + ";\n"s; +} + +std::string Emitter::EmitStructNode(StructNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + const char *suffix = ";\n"; + switch(node->GetProp()) { + case SProp_CStruct: + str = "struct "s; + break; + case SProp_TSInterface: + str = "interface "s; + break; + case SProp_TSEnum: + str = "enum "s; + suffix = ",\n"; + break; + case SProp_NA: + str = ""s; + break; + default: + MASSERT(0 && "Unexpected enumerator"); + } + + if (auto n = node->GetStructId()) { + str += EmitIdentifierNode(n); + if (str.substr(0,16) == "AnonymousStruct_") + str = "class "s + str; + } + + auto num = node->GetTypeParamsNum(); + if(num) { + str += '<'; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetTypeParamAtIndex(i)) + str += EmitTreeNode(n); + } + str += '>'; + } + + for (unsigned i = 0; i < node->GetSupersNum(); ++i) { + str += i ? ", "s : " extends "s; + if (auto n = node->GetSuper(i)) + str += EmitTreeNode(n); + } + str += " {\n"s; + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (auto n = node->GetField(i)) { + str += EmitTreeNode(n) + suffix; + } + } + + if (auto n = node->GetNumIndexSig()) { + str += EmitNumIndexSigNode(n) + "\n"s;; + } + if (auto n = node->GetStrIndexSig()) { + str += EmitStrIndexSigNode(n) + "\n"; + } + + for (unsigned i = 0; i < node->GetMethodsNum(); ++i) { + if (auto n = node->GetMethod(i)) { + std::string func = EmitFunctionNode(n); + func = Clean(func); + str += MethodString(func); + } + } + + str += "}\n"s; + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitFieldLiteralNode(FieldLiteralNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + auto lit = node->GetLiteral(); + if (auto n = node->GetFieldName()) { + if(lit && lit->IsFunction() && + static_cast(lit)->GetFuncName() == n) { + str = EmitTreeNode(lit); + if (str.substr(0, 9) == "function ") + str = str.substr(9); + lit = nullptr; + } else { + str = EmitTreeNode(n); + if(lit) + str += ": "s; + } + } + if(lit) { + auto s = EmitTreeNode(lit); + if(s.size() > 4 && (s[0] == 's' || s[0] == 'g') && !s.compare(1, 3, "et ")) + str = s; + else + str += s; + } + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitStructLiteralNode(StructLiteralNode *node) { + if (node == nullptr) + return std::string(); + std::string str("{"); + auto num = node->GetFieldsNum(); + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetField(i)) { + str += EmitFieldLiteralNode(n); + } + } + + // Workaround for an identifier issue + if (num > 1 && str.length() > 6 && + (str.substr(str.length() - 6) == ": true" || + str.substr(str.length() - 7) == ": false")) + str += ','; + + str += '}'; + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitNamespaceNode(NamespaceNode *node) { + if (node == nullptr) + return std::string(); + std::string str("namespace "); + if (auto n = node->GetId()) { + std::string s = EmitTreeNode(n); + str += Clean(s); + } + str += " {\n"s; + for (unsigned i = 0; i < node->GetElementsNum(); ++i) { + if (auto n = node->GetElementAtIndex(i)) { + str += EmitTreeNode(n) + GetEnding(n); + } + } + str += "}\n"s; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitVarListNode(VarListNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + + for (unsigned i = 0; i < node->GetVarsNum(); ++i) { + if (auto n = node->GetVarAtIndex(i)) { + str += ' ' + EmitIdentifierNode(n); + } + } + + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitExprListNode(ExprListNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + + for (unsigned i = 0; i < node->GetExprsNum(); ++i) { + if (auto n = node->GetExprAtIndex(i)) { + str += ' ' + EmitTreeNode(n); + } + } + + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitTemplateLiteralNode(TemplateLiteralNode *node) { + if (node == nullptr) + return std::string(); + std::string str("`"); + auto num = node->GetTreesNum(); + for (unsigned i = 0; i < num; ++i) { + if (auto n = node->GetTreeAtIndex(i)) { + std::string s(EmitTreeNode(n)); + if (i & 0x1) + str += "${"s + s+ '}'; + else + str += s.front() == '"' && s.back() == '"' && s.size() >= 2 ? s.substr(1, s.size() - 2) : s; + } + } + str += '`'; + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitLiteralNode(LiteralNode *node) { + if (node == nullptr) + return std::string(); + LitData lit = node->GetData(); + std::string str(AstDump::GetEnumLitData(lit)); + if(lit.mType == LT_StringLiteral || lit.mType == LT_CharacterLiteral) + str = '"' + str + '"'; + mPrecedence = '\030'; + str = HandleTreeNode(str, node); + if (auto n = node->GetType()) { + str += ": "s + EmitTreeNode(n); + } + if (auto n = node->GetInit()) { + str += " = "s + EmitTreeNode(n); + } + return str; +} + +std::string Emitter::EmitRegExprNode(RegExprNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (const char* e = node->GetData().mExpr) + str = "/"s + e + '/'; + if (const char* f = node->GetData().mFlags) + str += f; + return str; +} + +std::string Emitter::EmitThrowNode(ThrowNode *node) { + if (node == nullptr) + return std::string(); + std::string str("throw "); + for (unsigned i = 0; i < node->GetExceptionsNum(); ++i) { + if (auto n = node->GetExceptionAtIndex(i)) { + str += EmitTreeNode(n); + } + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitCatchNode(CatchNode *node) { + if (node == nullptr) + return std::string(); + std::string str("catch"); + unsigned num = node->GetParamsNum(); + if (num > 0) { + str += '('; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetParamAtIndex(i)) { + str += EmitTreeNode(n); + } + } + str += ')'; + } + if (auto n = node->GetBlock()) { + str += EmitBlockNode(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitFinallyNode(FinallyNode *node) { + if (node == nullptr) + return std::string(); + std::string str("finally "); + if (auto n = node->GetBlock()) { + str += EmitBlockNode(n); + } + else + str += "{}\n"s; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitTryNode(TryNode *node) { + if (node == nullptr) + return std::string(); + std::string str("try "); + if (auto n = node->GetBlock()) { + str += EmitBlockNode(n); + } + for (unsigned i = 0; i < node->GetCatchesNum(); ++i) { + if (auto n = node->GetCatchAtIndex(i)) { + str += EmitCatchNode(n); + } + } + if (auto n = node->GetFinally()) { + str += EmitFinallyNode(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitExceptionNode(ExceptionNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetException()) { + str += ' ' + EmitIdentifierNode(n); + } + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitReturnNode(ReturnNode *node) { + if (node == nullptr) + return std::string(); + std::string str("return"); + if (auto n = node->GetResult()) { + str += ' ' + EmitTreeNode(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitYieldNode(YieldNode *node) { + if (node == nullptr) + return std::string(); + std::string str(node->IsTransfer() ? "yield* " : "yield "); + if (auto n = node->GetResult()) { + str += EmitTreeNode(n); + } + mPrecedence = '\024'; + return str; +} + +std::string Emitter::EmitCondBranchNode(CondBranchNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if(auto n = node->GetLabel()) { + str = EmitTreeNode(n) + ":\n"s; + } + str += "if("s; + if (auto n = node->GetCond()) { + auto cond = EmitTreeNode(n); + str += Clean(cond); + } + str += ")\n"s; + if (auto n = node->GetTrueBranch()) { + str += EmitTreeNode(n) + GetEnding(n); + } + if (auto n = node->GetFalseBranch()) { + str += "else\n"s + EmitTreeNode(n) + GetEnding(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitBreakNode(BreakNode *node) { + if (node == nullptr) + return std::string(); + std::string str("break"); + if (auto n = node->GetTarget()) { + str += ' ' + EmitTreeNode(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitContinueNode(ContinueNode *node) { + if (node == nullptr) + return std::string(); + std::string str("continue"); + if (auto n = node->GetTarget()) { + str += ' ' + EmitTreeNode(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitForLoopNode(ForLoopNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if(auto n = node->GetLabel()) { + str = EmitTreeNode(n) + ":\n"s; + } + str += "for("s; + switch(node->GetProp()) { + case FLP_Regular: + { + for (unsigned i = 0; i < node->GetInitsNum(); ++i) + if (auto n = node->GetInitAtIndex(i)) { + std::string init = EmitTreeNode(n); + if (i) { + str += ", "s; + if(init.substr(0, 4) == "let " || init.substr(0, 4) == "var ") + init = init.substr(4); + else if(init.substr(0, 6) == "const ") + init = init.substr(6); + } + str += init; + } + str += "; "s; + if (auto n = node->GetCond()) { + str += EmitTreeNode(n); + } + str += "; "s; + for (unsigned i = 0; i < node->GetUpdatesNum(); ++i) + if (auto n = node->GetUpdateAtIndex(i)) { + if (i) + str += ", "s; + str += EmitTreeNode(n); + } + break; + } + case FLP_JSIn: + { + if (auto n = node->GetVariable()) { + str += EmitTreeNode(n); + } + str += " in "s; + if (auto n = node->GetSet()) { + str += EmitTreeNode(n); + } + break; + } + case FLP_JSOf: + { + if (auto n = node->GetVariable()) { + str += EmitTreeNode(n); + } + str += " of "s; + if (auto n = node->GetSet()) { + str += EmitTreeNode(n); + } + break; + } + case FLP_NA: + return "FLP_NA"s; + default: + MASSERT(0 && "Unexpected enumerator"); + } + str += ')'; + + if (auto n = node->GetBody()) { + str += EmitTreeNode(n) + GetEnding(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitWhileLoopNode(WhileLoopNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if(auto n = node->GetLabel()) { + str = EmitTreeNode(n) + ":\n"s; + } + str += "while("s; + if (auto n = node->GetCond()) { + str += EmitTreeNode(n); + } + str += ')'; + if (auto n = node->GetBody()) { + str += EmitTreeNode(n) + GetEnding(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitDoLoopNode(DoLoopNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if(auto n = node->GetLabel()) { + str = EmitTreeNode(n) + ":\n"s; + } + str += "do "s; + if (auto n = node->GetBody()) { + str += EmitTreeNode(n); + } + str += "while("s; + if (auto n = node->GetCond()) { + str += EmitTreeNode(n); + } + str += ");\n"s; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitSwitchLabelNode(SwitchLabelNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if(node->IsDefault()) + str += "default:\n"s; + if(auto n = node->GetValue()) { + auto ce = EmitTreeNode(n); + str += "case "s + Clean(ce) + ":\n"s; + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitSwitchCaseNode(SwitchCaseNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + for (unsigned i = 0; i < node->GetLabelsNum(); ++i) { + if (auto n = node->GetLabelAtIndex(i)) { + str += EmitTreeNode(n); + } + } + for (unsigned i = 0; i < node->GetStmtsNum(); ++i) { + if (auto n = node->GetStmtAtIndex(i)) + str += EmitTreeNode(n) + GetEnding(n); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitSwitchNode(SwitchNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if(auto n = node->GetLabel()) { + str = EmitTreeNode(n) + ":\n"s; + } + str += "switch("s; + if (auto n = node->GetExpr()) { + auto expr = EmitTreeNode(n); + str += Clean(expr); + } + str += "){\n"s; + for (unsigned i = 0; i < node->GetCasesNum(); ++i) { + if(auto n = node->GetCaseAtIndex(i)) + str += EmitTreeNode(n); + } + str += "}\n"s; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitAssertNode(AssertNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetExpr()) { + str += ' ' + EmitTreeNode(n); + } + if (auto n = node->GetMsg()) { + str += ' ' + EmitTreeNode(n); + } + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitCallNode(CallNode *node) { + if (node == nullptr) + return std::string(); + // Function call: left-to-right, precedence = 20 + std::string str; + if (auto n = node->GetMethod()) { + std::string s = EmitTreeNode(n); + bool optional = n->IsOptional(); + if (optional && !s.empty() && s.back() == '?') + s.pop_back(); + if(n->IsFunction() || n->IsLambda() || n->IsTerOperator()) + str += '(' + s + ')'; + else + str += s; + if (optional) + str += "?."s; // for optional chaining + } + if(auto num = node->GetTypeArgumentsNum()) { + str += '<'; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetTypeArgumentAtIndex(i)) { + str += EmitTreeNode(n); + } + } + str += '>'; + } + if (auto tagged = node->GetTaggedTemplate()) { + str += EmitTreeNode(tagged); + } else { + str += '('; + for (unsigned i = 0; i < node->GetArgsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetArg(i)) + str += EmitTreeNode(n); + } + str += ')'; + } + mPrecedence = '\024'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitInterfaceNode(InterfaceNode *node) { + if (node == nullptr) + return std::string(); + std::string str = "interface "s + node->GetName(); + + /* + auto num = node->GetTypeParamsNum(); + if(num) { + str += '<'; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetTypeParamAtIndex(i)) + str += EmitTreeNode(n); + } + str += '>'; + } + */ + + for (unsigned i = 0; i < node->GetSuperInterfacesNum(); ++i) { + str += i ? ", "s : " implements "s; + if (auto n = node->GetSuperInterfaceAtIndex(i)) + str += EmitTreeNode(n); + } + str += " {\n"s; + + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (auto n = node->GetField(i)) { + str += EmitIdentifierNode(n) + ";\n"s; + } + } + + for (unsigned i = 0; i < node->GetMethodsNum(); ++i) { + if (auto n = node->GetMethod(i)) { + std::string func = EmitFunctionNode(n); + func = Clean(func); + str += MethodString(func); + } + } + + str += "}\n"s; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitClassNode(ClassNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + for (unsigned i = 0; i < node->GetAnnotationsNum(); ++i) + if (auto n = node->GetAnnotationAtIndex(i)) + str += '@' + EmitTreeNode(n) + "\n"s; + + for (unsigned i = 0; i < node->GetAttributesNum(); ++i) + str += GetEnumAttrId(node->GetAttribute(i)); + + str += "class "s + node->GetName(); + + auto num = node->GetTypeParamsNum(); + if(num) { + str += '<'; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetTypeParamAtIndex(i)) + str += EmitTreeNode(n); + } + str += '>'; + } + + auto classNum = node->GetSuperClassesNum(); + for (unsigned i = 0; i < classNum; ++i) { + str += i ? ", "s : " extends "s; + if (auto n = node->GetSuperClass(i)) { + std::string s = EmitTreeNode(n); + str += mPrecedence > '\023' ? s : '(' + s + ')'; + } + } + for (unsigned i = 0; i < node->GetSuperInterfacesNum(); ++i) { + str += i ? ", "s : " implements "s; + if (auto n = node->GetSuperInterface(i)) + str += EmitTreeNode(n); + } + str += " {\n"s; + + for (unsigned i = 0; i < node->GetDeclaresNum(); ++i) { + if (auto n = node->GetDeclare(i)) { + std::string s = EmitTreeNode(n); + Replace(s, " var ", " "); // TODO: JS_Var for field + str += s + ";\n"s; + } + } + + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (auto n = node->GetField(i)) { + str += EmitTreeNode(n) + ";\n"s; + } + } + + for (unsigned i = 0; i < node->GetConstructorsNum(); ++i) { + if (auto n = node->GetConstructor(i)) { + std::string func = EmitFunctionNode(n); + if (func.substr(0, 9) == "function ") + func = func.substr(9); + func = Clean(func); + str += func.back() == '}' ? func + "\n"s : func + ";\n"s; + } + } + + for (unsigned i = 0; i < node->GetMethodsNum(); ++i) { + if (FunctionNode *n = node->GetMethod(i)) { + std::string func = EmitFunctionNode(n); + func = Clean(func); + str += MethodString(func); + } + } + + for (unsigned i = 0; i < node->GetInstInitsNum(); ++i) { + if (auto n = node->GetInstInit(i)) { + str += EmitBlockNode(n) + "\n"s; + } + } + + for (unsigned i = 0; i < node->GetLocalClassesNum(); ++i) { + if (auto n = node->GetLocalClass(i)) { + str += EmitClassNode(n) + "\n"s; + } + } + + for (unsigned i = 0; i < node->GetLocalInterfacesNum(); ++i) { + if (auto n = node->GetLocalInterface(i)) { + str += EmitInterfaceNode(n) + "\n"s; + } + } + + str += "}\n"; + mPrecedence = '\020'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitPassNode(PassNode *node) { + if (node == nullptr) + return std::string(); + std::string str("PassNode {"); + + for (unsigned i = 0; i < node->GetChildrenNum(); ++i) { + if (auto n = node->GetChild(i)) { + str += ' ' + EmitTreeNode(n); + } + } + + str += '}'; + mPrecedence = '\030'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitLambdaNode(LambdaNode *node) { + if (node == nullptr) + return std::string(); + switch (node->GetProperty()) { + case LP_JSArrowFunction: + break; + case LP_JavaLambda: + case LP_NA: + default: + MASSERT(0 && "Unexpected enumerator"); + } + + std::string str; + for (unsigned i = 0; i < node->GetAttrsNum(); ++i) + str += GetEnumAttrId(node->GetAttrAtIndex(i)); + auto num = node->GetTypeParamsNum(); + if(num) { + str += '<'; + for (unsigned i = 0; i < num; ++i) { + if (i) + str += ", "s; + if (auto n = node->GetTypeParamAtIndex(i)) { + str += EmitTreeNode(n); + } + } + str += '>'; + } + + str += '('; + for (unsigned i = 0; i < node->GetParamsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetParam(i)) { + str += EmitTreeNode(n); + } + } + str += ')'; + + if (auto n = node->GetBody()) { + if (auto t = node->GetRetType()) { + str += ": "s + EmitTreeNode(t); + } + std::string s = EmitTreeNode(n); + s = Clean(s); + if (n->IsStructLiteral()) + s = '(' + s + ')'; + str += " => "s + s; + } + else { + if (auto t = node->GetRetType()) { + str += " => "s + EmitTreeNode(t); + } + } + + mPrecedence = '\004'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitInstanceOfNode(InstanceOfNode *node) { + if (node == nullptr) + return std::string(); + const Precedence precd = '\014'; + const bool rl_assoc = false; // false: left-to-right + std::string lhs, rhs; + if (auto n = node->GetLeft()) { + lhs = EmitTreeNode(n); + if(precd > mPrecedence) + lhs = '(' + lhs + ')'; + } + else + lhs = "(NIL) "s; + if (auto n = node->GetRight()) { + rhs = EmitTreeNode(n); + if(precd > mPrecedence || (precd == mPrecedence && !rl_assoc)) + rhs = '(' + rhs + ')'; + } + else + rhs = " (NIL)"s; + std::string str(lhs + " instanceof "s + rhs); + mPrecedence = precd; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitTypeOfNode(TypeOfNode *node) { + if (node == nullptr) + return std::string(); + const Precedence precd = '\121' & 0x3f; + std::string str("typeof "s), rhs; + if (auto n = node->GetExpr()) { + rhs = EmitTreeNode(n); + if(precd > mPrecedence && !n->IsConditionalType()) // right-to-left + rhs = '(' + rhs + ')'; + } + else + rhs = " (NIL)"s; + str += rhs; + mPrecedence = precd; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitKeyOfNode(KeyOfNode *node) { + if (node == nullptr) + return std::string(); + const Precedence precd = '\121' & 0x3f; + std::string str("keyof "s), rhs; + if (auto n = node->GetExpr()) { + rhs = EmitTreeNode(n); + if(precd > mPrecedence) + rhs = '(' + rhs + ')'; + } + else + rhs = " (NIL)"s; + str += rhs; + mPrecedence = precd; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitInferNode(InferNode *node) { + if (node == nullptr) + return std::string(); + std::string str("infer "); + if (auto n = node->GetExpr()) { + str += EmitTreeNode(n); + } + mPrecedence = '\024'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitInNode(InNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetLeft()) { + str += EmitTreeNode(n); + } + str += " in "s; + if (auto n = node->GetRight()) { + str += EmitTreeNode(n); + } + mPrecedence = '\014'; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitIsNode(IsNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetLeft()) { + str = EmitTreeNode(n); + } + str += " is "s; + if (auto n = node->GetRight()) { + str += EmitTreeNode(n); + } + return str; +} + +std::string Emitter::EmitAwaitNode(AwaitNode *node) { + if (node == nullptr) + return std::string(); + std::string str("await "s); + if (auto n = node->GetExpr()) { + str += EmitTreeNode(n); + } + mPrecedence = '\023'; + if (node->IsStmt()) + str += ";\n"s; + return str; +} + +std::string Emitter::EmitNameTypePairNode(NameTypePairNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + if (auto n = node->GetVar()) { + str += EmitTreeNode(n) + ": "s; + } + if (auto n = node->GetType()) { + str += EmitTreeNode(n); + } + return str; +} + +std::string Emitter::EmitTupleTypeNode(TupleTypeNode *node) { + if (node == nullptr) + return std::string(); + std::string str("[ "); + + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (i) + str += ", "s; + if (auto n = node->GetField(i)) { + str += EmitNameTypePairNode(n); + } + } + str += " ]"s; + + mPrecedence = '\030'; + return str; +} + +std::string Emitter::EmitTripleSlashNode(TripleSlashNode *node) { + if (node == nullptr) + return std::string(); + std::string str; + str += "/// GetProp()); + if (auto n = node->GetValue()) { + str += '=' + EmitTreeNode(n); + } + str += " />"s; + return str; +} + +std::string Emitter::EmitModuleNode(ModuleNode *node) { + if (node == nullptr) + return std::string(); + + std::string str; + for (unsigned i = 0; i < node->GetTreesNum(); ++i) { + if (auto n = node->GetTree(i)) { + str += EmitTreeNode(n) + GetEnding(n); + } + } + + std::string name = node->GetFilename(); + if (auto p = node->GetParent()) { + if (node->IsAmbient()) + name = '"' + name + '"'; + str = "module "s + name + " {\n"s + str + "}\n"s; + } else + str = "// Filename: "s + name + "\n"s + str; + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitAttrNode(AttrNode *node) { + if (node == nullptr) + return std::string(); + std::string str(GetEnumAttrId(node->GetId())); + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitArrayTypeNode(ArrayTypeNode *node) { + // TODO + std::string str = ""; + return str; +} + +std::string Emitter::EmitFunctionTypeNode(FunctionTypeNode *node) { + // TODO + std::string str = ""; + return str; +} + +std::string Emitter::EmitPrimTypeNode(PrimTypeNode *node) { + if (node == nullptr) + return std::string(); + auto k = node->GetPrimType(); + std::string str = k == TY_None ? std::string() : Emitter::GetEnumTypeId(k); + if (node->IsUnique()) + str = "unique "s + str; + mPrecedence = '\030'; + return str; +} + +std::string Emitter::EmitPrimArrayTypeNode(PrimArrayTypeNode *node) { + if (node == nullptr) + return std::string(); + std::string str, accessor; + for (unsigned i = 0; i < node->GetAttrsNum(); ++i) { + std::string s = GetEnumAttrId(node->GetAttrAtIndex(i)); + if (s == "get "s || s == "set "s) + accessor += s; + else + str += s; + } + str += accessor; + if (auto n = node->GetPrim()) { + str += EmitPrimTypeNode(n); + } + if (auto n = node->GetDims()) { + str += EmitDimensionNode(n); + Replace(str, "never[", "["); + } + return HandleTreeNode(str, node); +} + +std::string Emitter::EmitTreeNode(TreeNode *node) { + if (node == nullptr) + return std::string(); + switch (node->GetKind()) { + case NK_Module: + return EmitModuleNode(static_cast(node)); + break; + case NK_Package: + return EmitPackageNode(static_cast(node)); + break; + case NK_XXportAsPair: + return EmitXXportAsPairNode(static_cast(node)); + break; + case NK_Import: + return EmitImportNode(static_cast(node)); + break; + case NK_Export: + return EmitExportNode(static_cast(node)); + break; + case NK_Declare: + return EmitDeclareNode(static_cast(node)); + break; + case NK_Decl: + return EmitDeclNode(static_cast(node)); + break; + case NK_Identifier: + return EmitIdentifierNode(static_cast(node)); + break; + case NK_Field: + return EmitFieldNode(static_cast(node)); + break; + case NK_Dimension: + return EmitDimensionNode(static_cast(node)); + break; + case NK_Attr: + return EmitAttrNode(static_cast(node)); + break; + case NK_NameTypePair: + return EmitNameTypePairNode(static_cast(node)); + break; + case NK_PrimType: + return EmitPrimTypeNode(static_cast(node)); + break; + case NK_PrimArrayType: + return EmitPrimArrayTypeNode(static_cast(node)); + break; + case NK_ArrayType: + return EmitArrayTypeNode(static_cast(node)); + break; + case NK_FunctionType: + return EmitFunctionTypeNode(static_cast(node)); + break; + case NK_UserType: + return EmitUserTypeNode(static_cast(node)); + break; + case NK_TypeParameter: + return EmitTypeParameterNode(static_cast(node)); + break; + case NK_AsType: + return EmitAsTypeNode(static_cast(node)); + break; + case NK_TypeAlias: + return EmitTypeAliasNode(static_cast(node)); + break; + case NK_ConditionalType: + return EmitConditionalTypeNode(static_cast(node)); + break; + case NK_TupleType: + return EmitTupleTypeNode(static_cast(node)); + break; + case NK_Cast: + return EmitCastNode(static_cast(node)); + break; + case NK_Parenthesis: + return EmitParenthesisNode(static_cast(node)); + break; + case NK_BindingElement: + return EmitBindingElementNode(static_cast(node)); + break; + case NK_BindingPattern: + return EmitBindingPatternNode(static_cast(node)); + break; + case NK_Struct: + return EmitStructNode(static_cast(node)); + break; + case NK_StructLiteral: + return EmitStructLiteralNode(static_cast(node)); + break; + case NK_FieldLiteral: + return EmitFieldLiteralNode(static_cast(node)); + break; + case NK_NumIndexSig: + return EmitNumIndexSigNode(static_cast(node)); + break; + case NK_StrIndexSig: + return EmitStrIndexSigNode(static_cast(node)); + break; + case NK_ComputedName: + return EmitComputedNameNode(static_cast(node)); + break; + case NK_ArrayElement: + return EmitArrayElementNode(static_cast(node)); + break; + case NK_ArrayLiteral: + return EmitArrayLiteralNode(static_cast(node)); + break; + case NK_VarList: + return EmitVarListNode(static_cast(node)); + break; + case NK_ExprList: + return EmitExprListNode(static_cast(node)); + break; + case NK_TemplateLiteral: + return EmitTemplateLiteralNode(static_cast(node)); + break; + case NK_RegExpr: + return EmitRegExprNode(static_cast(node)); + break; + case NK_Literal: + return EmitLiteralNode(static_cast(node)); + break; + case NK_UnaOperator: + return EmitUnaOperatorNode(static_cast(node)); + break; + case NK_BinOperator: + return EmitBinOperatorNode(static_cast(node)); + break; + case NK_TerOperator: + return EmitTerOperatorNode(static_cast(node)); + break; + case NK_Lambda: + return EmitLambdaNode(static_cast(node)); + break; + case NK_InstanceOf: + return EmitInstanceOfNode(static_cast(node)); + break; + case NK_TypeOf: + return EmitTypeOfNode(static_cast(node)); + break; + case NK_KeyOf: + return EmitKeyOfNode(static_cast(node)); + break; + case NK_In: + return EmitInNode(static_cast(node)); + break; + case NK_Is: + return EmitIsNode(static_cast(node)); + break; + case NK_Infer: + return EmitInferNode(static_cast(node)); + break; + case NK_TripleSlash: + return EmitTripleSlashNode(static_cast(node)); + break; + case NK_Block: + return EmitBlockNode(static_cast(node)); + break; + case NK_Function: + return EmitFunctionNode(static_cast(node)); + break; + case NK_Class: + return EmitClassNode(static_cast(node)); + break; + case NK_Interface: + return EmitInterfaceNode(static_cast(node)); + break; + case NK_Namespace: + return EmitNamespaceNode(static_cast(node)); + break; + case NK_AnnotationType: + return EmitAnnotationTypeNode(static_cast(node)); + break; + case NK_Annotation: + return EmitAnnotationNode(static_cast(node)); + break; + case NK_Try: + return EmitTryNode(static_cast(node)); + break; + case NK_Catch: + return EmitCatchNode(static_cast(node)); + break; + case NK_Finally: + return EmitFinallyNode(static_cast(node)); + break; + case NK_Exception: + return EmitExceptionNode(static_cast(node)); + break; + case NK_Throw: + return EmitThrowNode(static_cast(node)); + break; + case NK_Await: + return EmitAwaitNode(static_cast(node)); + break; + case NK_Return: + return EmitReturnNode(static_cast(node)); + break; + case NK_Yield: + return EmitYieldNode(static_cast(node)); + break; + case NK_CondBranch: + return EmitCondBranchNode(static_cast(node)); + break; + case NK_Break: + return EmitBreakNode(static_cast(node)); + break; + case NK_Continue: + return EmitContinueNode(static_cast(node)); + break; + case NK_ForLoop: + return EmitForLoopNode(static_cast(node)); + break; + case NK_WhileLoop: + return EmitWhileLoopNode(static_cast(node)); + break; + case NK_DoLoop: + return EmitDoLoopNode(static_cast(node)); + break; + case NK_New: + return EmitNewNode(static_cast(node)); + break; + case NK_Delete: + return EmitDeleteNode(static_cast(node)); + break; + case NK_Call: + return EmitCallNode(static_cast(node)); + break; + case NK_Assert: + return EmitAssertNode(static_cast(node)); + break; + case NK_SwitchLabel: + return EmitSwitchLabelNode(static_cast(node)); + break; + case NK_SwitchCase: + return EmitSwitchCaseNode(static_cast(node)); + break; + case NK_Switch: + return EmitSwitchNode(static_cast(node)); + break; + case NK_Pass: + return EmitPassNode(static_cast(node)); + break; + case NK_Null: + // Ignore NullNode + break; + default: + MASSERT(0 && "Unexpected node kind"); + } + return std::string(); +} + +std::string &Emitter::AddParentheses(std::string &str, TreeNode *node) { + if (mPrecedence < '\024' || + (!node->IsIdentifier() && + !node->IsField() && + !node->IsInfer() && + !node->IsLiteral() && + !node->IsArrayLiteral() && + !node->IsBindingPattern())) { + str = '(' + str + ')'; + mPrecedence = '\030'; + } + return str; +} + +std::string &Emitter::HandleTreeNode(std::string &str, TreeNode *node) { + auto num = node->GetAsTypesNum(); + if(num > 0) { + if (node->IsBinOperator() || + node->IsUnaOperator() || + node->IsTerOperator() || + node->IsNew()) { + str = '(' + str + ')'; + mPrecedence = '\030'; + } + for (unsigned i = 0; i < num; ++i) + if (auto t = node->GetAsTypeAtIndex(i)) { + str += EmitAsTypeNode(t); + mPrecedence = '\003'; + } + } + if(node->IsOptional()) + str = AddParentheses(str, node) + '?'; + if(node->IsNonNull()) + str = AddParentheses(str, node) + '!'; + if(node->IsRest()) + str = "..."s + AddParentheses(str, node); + if(node->IsConst()) { + str = AddParentheses(str, node) + " as const"s; + mPrecedence = '\003'; + } + return str; +} + +std::string Emitter::GetEnumTypeId(TypeId k) { + std::string str(AstDump::GetEnumTypeId(k) + 3); + if (k != TY_Function && k != TY_Object) + str[0] = std::tolower(str[0]); + return str; +} + +std::string Emitter::GetEnumDeclProp(DeclProp k) { + std::string str(AstDump::GetEnumDeclProp(k) + 3); + if(str != "NA") + str[0] = std::tolower(str[0]); + return str; +} + +const char *Emitter::GetEnumOprId(OprId k) { + // The first char in the returned string includes operator precedence and associativity info + // + // bits 7 6 5 4 3 2 1 0 + // 0 ^ ^---^---^-+-^---^---^ + // | |__ operator precedence + // | + // |__ associativity, 0: left-to-right, 1: right-to-left + // + switch (k) { + case OPR_Plus: + return "\121+"; + case OPR_Add: + return "\016+"; + case OPR_Minus: + return "\121-"; + case OPR_Sub: + return "\016-"; + case OPR_Mul: + return "\017*"; + case OPR_Div: + return "\017/"; + case OPR_Mod: + return "\017%"; + case OPR_PreInc: + return "\121++"; + case OPR_Inc: + return "\022++"; + case OPR_PreDec: + return "\121--"; + case OPR_Dec: + return "\022--"; + case OPR_Exp: + return "\120**"; + case OPR_EQ: + return "\013=="; + case OPR_NE: + return "\013!="; + case OPR_GT: + return "\014>"; + case OPR_LT: + return "\014<"; + case OPR_GE: + return "\014>="; + case OPR_LE: + return "\014<="; + case OPR_Band: + return "\012&"; + case OPR_Bor: + return "\010|"; + case OPR_Bxor: + return "\011^"; + case OPR_Bcomp: + return "\121~"; + case OPR_Shl: + return "\015<<"; + case OPR_Shr: + return "\015>>"; + case OPR_Zext: + return "\015>>>"; + case OPR_Land: + return "\007&&"; + case OPR_Lor: + return "\006||"; + case OPR_Not: + return "\121!"; + case OPR_Assign: + return "\103="; + case OPR_AddAssign: + return "\103+="; + case OPR_SubAssign: + return "\103-="; + case OPR_MulAssign: + return "\103*="; + case OPR_DivAssign: + return "\103/="; + case OPR_ModAssign: + return "\103%="; + case OPR_ShlAssign: + return "\103<<="; + case OPR_ShrAssign: + return "\103>>="; + case OPR_BandAssign: + return "\103&="; + case OPR_BorAssign: + return "\103|="; + case OPR_BxorAssign: + return "\103^="; + case OPR_ZextAssign: + return "\103>>>="; + case OPR_NullAssign: + return "\103??="; + case OPR_Arrow: + return "\030 OPR_Arrow"; + case OPR_Diamond: + return "\030 OPR_Diamond"; + case OPR_StEq: + return "\013==="; + case OPR_StNe: + return "\013!=="; + case OPR_ArrowFunction: + return "\030 OPR_ArrowFunction"; + case OPR_NullCoalesce: + return "\005??"; + case OPR_NA: + return "\030 OPR_NA"; + default: + MASSERT(0 && "Unexpected enumerator"); + } + return "UNEXPECTED OprId"; +} + +const char *Emitter::GetEnumTripleSlashProp(TripleSlashProp k) { + switch (k) { + case TSP_Path: + return "path"; + case TSP_Types: + return "types"; + case TSP_Lib: + return "lib"; + case TSP_NoDefaultLib: + return "no-default-lib"; + case TSP_NA: + return "TSP_NA"; + default: + MASSERT(0 && "Unexpected enumerator"); + } + return "UNEXPECTED TripleSlashProp"; +} + +} // namespace maplefe diff --git a/src/MapleFE/ast2cpp/src/helper.cpp b/src/MapleFE/ast2cpp/src/helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1222f2341802e47bf06284101f033cd65dd21376 --- /dev/null +++ b/src/MapleFE/ast2cpp/src/helper.cpp @@ -0,0 +1,355 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#include "helper.h" + +namespace maplefe { + +FuncTable hFuncTable; +GeneratorLabels GenFnLabels; + +std::unordered_mapTypeIdToJSTypeCXX = { + // AST TypeId to t2crt JS_Type mapping for JS_Val type of obj props that pts to CXX class fields + {TY_Object, "t2crt::TY_CXX_Object"}, + {TY_Function,"t2crt::TY_CXX_Function"}, + {TY_Boolean, "t2crt::TY_CXX_Bool"}, + {TY_Int, "t2crt::TY_CXX_Long"}, + {TY_String, "t2crt::TY_CXX_String"}, + {TY_Number, "t2crt::TY_CXX_Double"}, + {TY_Double, "t2crt::TY_CXX_Double"}, + {TY_Array, "t2crt::TY_CXX_Object"}, + {TY_Class, "t2crt::TY_CXX_Object"}, + {TY_Any, "t2crt::TY_CXX_Any"}, +}; + +std::string GeneratorFn_start = R"""( + if (yield != nullptr) + goto *yield; +)"""; + +std::string GeneratorFn_return = R"""( + res.value = undefined; + res.done = true; + return; +)"""; + +// Used to build GetProp for calls to get Object (class Object in ts2cpp.h) property +std::string hlpGetJSValTypeStr(TypeId typeId) { + switch(typeId) { + case TY_Object: + case TY_Class: + return "Obj"; + case TY_Any: + return ""; + case TY_Function: + return "Func"; + case TY_Boolean: + return "bool"; + case TY_Number: + return "Double"; + case TY_String: + return "Str"; + } + return std::string(); +} + +// Get TypeId info for a treenode. +TypeId hlpGetTypeId(TreeNode* node) { + // lookup typetable + if (node->GetTypeIdx()) { + TypeEntry* te = gTypeTable.GetTypeEntryFromTypeIdx(node->GetTypeIdx()); + if (te) + return te->GetTypeId(); + } + + // lookup ast node mTypeId (will be deprecated) + if (node->GetTypeId() != TY_None) { + return node->GetTypeId(); + } + return TY_Any; +} + +// Temp. workaround to get func info for var of type function. TODO: replace with TI API when avail. +std::string GetClassOfAssignedFunc(TreeNode* node) { + if (node->IsIdentifier() && + node->GetParent() && + node->GetParent()->IsDecl() && + static_cast(node->GetParent())->GetInit() && + static_cast(node->GetParent())->GetInit()->IsFunction()) { + auto n = static_cast(node->GetParent())->GetInit(); + return("Cls_"s + static_cast(n)->GetName()); + } + return std::string(); +} + +// Generate call to create obj prop with ptr to c++ class fld member +// e.g. obj->AddProp("fdLong", t2crt::ClassFld(&Foo::fdLong).NewProp(this, t2crt::TY_CXX_Long)) +// obj->AddProp("fdAny", t2crt::ClassFld(&Foo::fdAny).NewProp(this, t2crt::TY_CXX_Any)) +std::string GenClassFldAddProp(std::string objName, + std::string clsName, + std::string fldName, + std::string fldCType, + std::string fldJSType) { + std::string str; + str = objName + "->AddProp(\"" + fldName + "\", t2crt::ClassFld<" + + fldCType + " " + clsName + "::*>(&" + + clsName + "::" + fldName + ").NewProp(this, " + fldJSType + "))"; + return str; +} + +// From TS func param info, generate param and arg list for corresponding mapped C++ func. +// +// Different formats of arg list as needed by C++ mapping of function/class/generators +// - args for function class functor and generation class constructor +// () - generator class constructor field init list +// & - args passed by reference to generation function _body method +// ; - generator class fields for capturing closure +// +std::string FunctionParams(unsigned nodeId, bool handleThis, bool argsOnly, bool byRef, bool fdInit, bool capture) { + std::vector> funcParams = hFuncTable.GetArgInfo(nodeId); + std::string ObjT = "t2crt::Object*"; + std::string str; + + // "this" in TS function paramter mapping to C++: + // + // TS2cpp's C++ mapping for TS func has a "this" obj in the c++ func param list + // which will be generated from AST if "this" is declared as a TS func parameter + // as required by TS strict mode. However TS funcs that do not reference 'this' + // are not required to declare it, in which case emitter has to insert one. + // + // Cases: + // if TS func has no param + // - insert param "ts2crt::Object* _this" + // if 1st TS func param is not "this" + // - insert param "ts2crt::Object* _this" + // if 1st TS func param is "this" + // - rename to "_this" + // - if type is Any (JS_Val), change to "ts2crt::Object*" + // + if (handleThis) { + if (funcParams.size() == 0) // func has no param + return argsOnly ? "_this"s : (ObjT + " _this"); + } + + for (bool first=true; auto elem : funcParams) { + std::string type = elem.first, name = elem.second; + if (!first) + str += ", "s; + else { // 1st param of TS func + if (handleThis) { + if (name.compare("this") != 0) // if not "this", insert _this + str += argsOnly? ("_this, "s): (ObjT + " _this, "s); + else { // if "this" + name = "_this"; // rename to "_this" + if (type.compare("t2crt::JS_Val") == 0) + type = ObjT; // change type Any to Object* + } + } + first = false; + } + if (fdInit) + str += name + "(" + name + ")"; + else if (capture) + str += type + " " + name + ";\n"; + else + str += argsOnly? name: (type + (byRef?"\&":"") + " "s + name); + } + return str; +} + +// Each first level function is instantiated from a corresponding class generated with interfaces below: +// Body - user defined function code +// () - functor for OrdinaryCallEvaluteBody [9.2.1.3] +// Ctor - Call user defined code as constructor (with object from new() op) +// Call - Call user defined code with designated 'this' +// Apply - Call user defined code with designated 'this' and array of argument +// Bind - Create and return function object binded to designated this and optional args +// note: the parameter args is expected to be a string that start with "_this" +// TODO: apply and bind may be moved to ts2cpp.h Fuction class as virtual +// note: TSC prohibits calling non-void constructor func with new(), so in the code generated below +// for ctor(), it calls _body() but ignores return val from _body(), and instead returns _this +// per TS/JS spec. + +std::string FunctionClassDecl(std::string retType, std::string funcName, unsigned nodeId) { + std::string str, args, params, thisType; + + std::string clsName = ClsName(funcName); + params = FunctionParams(nodeId, true, false); + args = FunctionParams(nodeId, true, true); + thisType = params.substr(0, params.find(" ")); // extract return type of "this" parameter + + std::string functorParams = params; + std::string functorArgs = args; + functorArgs.replace(0, 5, "_thisArg"); // replace _this with _thisArg + size_t pos; + if ((pos = functorParams.find("_this, ")) != std::string::npos) + functorParams.erase(0, pos+7); + else if ((pos = functorParams.find("_this")) != std::string::npos) + functorParams.erase(0, pos+5); + + str = R"""( +class )""" + clsName + R"""( : public t2crt::Function { + public: + )""" + clsName + R"""(() : t2crt::Function(&t2crt::Function::ctor,t2crt::Function::ctor.prototype,t2crt::Object::ctor.prototype) {} + ~)""" + clsName + R"""(() {} + + )""" + retType + R"""( _body()""" + params + R"""(); + )""" + retType + R"""( operator()()""" + functorParams + R"""() { return _body(()""" + thisType + R"""())""" + functorArgs + R"""(); } + t2crt::Object* ctor ()""" + params + R"""() { _body()""" + args + R"""(); return(_this); } + )""" + retType + R"""( call ()""" + params + R"""() { return _body()""" + args + R"""( ); } + )""" + retType + R"""( apply(t2crt::Object* _this, t2crt::ArgsT& args) { /* TODO: call _body wtih flatten args */ } + )""" + clsName + R"""(* bind (t2crt::Object* _this, t2crt::ArgsT* args) { + )""" + clsName + R"""(* func = new )""" + clsName + R"""((); + func->_thisArg = _this; + func->_args = args; + return(func); + } + virtual const char* __GetClassName() const {return ")""" + funcName + R"""( ";} +}; + +)"""; + return str; +} + +// build generator function header for _body +std::string GeneratorFuncHeader(std::string cls, unsigned nodeId) { + std::string params = FunctionParams(nodeId, false, false, true); // pass params by ref into _body() + if (!params.empty()) + params = ", " + params; + return "void " + cls + "_body(t2crt::Object* _this, void*& yield, t2crt::IteratorResult& res" + params + ")"; +} + +// Generating Generators and Generator Functions: +// For each TS generator function, 2 C++ classes: generator and generator function are emitted. +// The generator function has only a single instance. It is called to create generator instances. +std::string GeneratorClassDecl(std::string funcName, unsigned nodeId) { + std::string str; + std::string generatorName = GeneratorName(funcName); + std::string generatorFuncName = GeneratorFuncName(funcName); + + std::string functorArgs = FunctionParams(nodeId, false, false); + std::string initList = FunctionParams(nodeId, false, false, false, true) ; + std::string captureFields = FunctionParams(nodeId, false, false, false, false, true); + std::string ctorArgs = functorArgs.empty()? std::string(): (", "s + functorArgs); + initList = initList.empty()? "": (", "s + initList); + + std::string genClsDecl[] = { +"// " +funcName+ " generators", +"class " +generatorName+ " : public t2crt::GeneratorProto {", +"public:", +" " +generatorName+ "(t2crt::Function* ctor, t2crt::Object* proto" +ctorArgs+ ") : t2crt::GeneratorProto(ctor, proto)" +initList+ " {}", +" ~" +generatorName+ "() {}", +" // closure capture fields", +" " +captureFields, +" // iterator interface (override _return and _throw when needed)", +" t2crt::IteratorResult* next(t2crt::JS_Val* arg = nullptr) override;", +"};", +"// " +funcName+ " generator function", +"class " +generatorFuncName+ " : public t2crt::GeneratorFuncPrototype {", +"public:", +" " +generatorFuncName+ "() : t2crt::GeneratorFuncPrototype(&t2crt::GeneratorFunction, &t2crt::Generator, t2crt::GeneratorPrototype) {}", +" ~" +generatorFuncName+ "() {}", +" // call operator returns generator instances", +" " +generatorName+ "* operator()(" +functorArgs+ ");", +" // generator function body", +" " +GeneratorFuncHeader("", nodeId)+ ";", +"};", +"" + }; + + str += "\n"; + for (auto elem : genClsDecl) + str += elem + "\n"; + return str; +} + +std::string GeneratorClassDef(std::string ns, std::string funcName, unsigned nodeId) { + std::string str; + std::string generatorName = ns + GeneratorName(funcName); + std::string generatorFuncName = ns + GeneratorFuncName(funcName); + + if (!ns.empty()) + funcName = ns + "::" + funcName; + + std::string params = FunctionParams(nodeId, false, false); + std::string args = FunctionParams(nodeId, false, true); + if (!args.empty()) + args = ", " + args; + + str = R"""( +t2crt::IteratorResult* )""" + generatorName + R"""(::next(t2crt::JS_Val* arg) { + if (_finished) { + _res.done = true; + return &_res; + } + // iterate by calling generation function with captures in generator + )""" + funcName + R"""(->_body(this, _yield, _res)""" + args + R"""(); + if (_res.done == true) + _finished = true; + return &_res; +} + +)""" + generatorName + "* "s + generatorFuncName + R"""(::operator()()""" + params + R"""() { + return new )""" + generatorName + R"""((&t2crt::Generator, foo->prototype)""" + args + R"""(); +} + +)"""; + return str; +} + +bool IsClassMethod(TreeNode* node) { + return(node->IsFunction() && node->GetParent() && node->GetParent()->IsClass()); +} + +std::string tab(int n) { + return std::string(n*2,' '); +} + +std::string GenAnonFuncName(TreeNode* node) { + return "_anon_func_"s + std::to_string(node->GetNodeId()); +} + +// return array constructor name of given type +// format: +// 1D array: t2crt::Array::ctor +// 2D array: t2crt::Array*>::ctor +// 3D array: t2crt::Array*>*>::ctor +// ... +// note: must be in sycn with format generated by ARR_CTOR_DEF in builtins.h +std::string ArrayCtorName(int dim, std::string type) { + if (!dim) + return std::string(); + std::string str = "t2crt::Array<"s + type + ">"s; + for (int i=1; i"s; + } + str = str + "::ctor"s; + return str; +} + +// note: entries below are to match values from ast nodes. Do not prepend with "t2crt::" +std::vectorbuiltins = {"Object", "Function", "Number", "Array", "Record"}; + +bool IsBuiltinObj(std::string name) { + return std::find(builtins.begin(), builtins.end(), name) != builtins.end(); +} + +std::string ObjectTypeStr(std::string name) { + if (IsBuiltinObj(name)) + return "t2crt::" + name + "*"; + else + return name + "*"; +} + +} // namespace maplefe diff --git a/src/MapleFE/ast2cpp/src/main.cpp b/src/MapleFE/ast2cpp/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6062f4228f538d04148f62a5fa909b7bacb08819 --- /dev/null +++ b/src/MapleFE/ast2cpp/src/main.cpp @@ -0,0 +1,108 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include "gen_astload.h" +#include "ast_handler.h" +#include "ast2cpp.h" + +static void help() { + std::cout << "ast2cpp a.ast[,b.ast] [options]:" << std::endl; + std::cout << " --out=x.cpp : cpp output file" << std::endl; + std::cout << " --help : print this help" << std::endl; + std::cout << " --trace=n : Emit trace with 4-bit combo levels 1...15" << std::endl; + std::cout << " 1 : Emit ast tree visits" << std::endl; + std::cout << " 2 : Emit graph" << std::endl; + std::cout << " --emit-ts-only : Emit ts code only" << std::endl; + std::cout << " --emit-ts : Emit ts code" << std::endl; + std::cout << " --format-cpp : Format cpp" << std::endl; + std::cout << " --no-imported : Do not process the imported modules" << std::endl; + std::cout << "default out name uses the first input name: a.cpp" << std::endl; +} + +int main (int argc, char *argv[]) { + if (argc == 1 || (!strncmp(argv[1], "--help", 6) && (strlen(argv[1]) == 6))) { + help(); + exit(-1); + } + + unsigned flags; + // one or more input .ast files separated by ',' + const char *inputname = argv[1]; + // output .cpp file + const char *outputname = nullptr; + + // Parse the argument + for (unsigned i = 2; i < argc; i++) { + if (!strncmp(argv[i], "--trace=", 8)) { + int val = atoi(argv[i] + 8); + if (val < 1 || val > 15) { + help(); + exit(-1); + } + flags |= val; + } else if (!strncmp(argv[i], "--emit-ts-only", 14)) { + flags |= maplefe::FLG_emit_ts_only; + } else if (!strncmp(argv[i], "--emit-ts", 9)) { + flags |= maplefe::FLG_emit_ts; + } else if (!strncmp(argv[i], "--format-cpp", 12)) { + flags |= maplefe::FLG_format_cpp; + } else if (!strncmp(argv[i], "--no-imported", 13)) { + flags |= maplefe::FLG_no_imported; + } else if (!strncmp(argv[i], "--in=", 5)) { + inputname = argv[i]+5; + } else if (!strncmp(argv[i], "--out=", 6)) { + outputname = argv[i]+6; + } else { + std::cerr << "unknown option " << argv[i] << std::endl; + exit(-1); + } + } + + // input ast files + std::vector inputfiles; + if (inputname) { + std::stringstream ss; + ss.str(inputname); + std::string item; + while (std::getline(ss, item, ',')) { + // std::cout << "item " << item << " xxx"<< std::endl; + inputfiles.push_back(item); + } + } + + unsigned trace = (flags & maplefe::FLG_trace); + maplefe::AST_Handler handler(trace); + for (auto astfile: inputfiles) { + std::ifstream input(astfile, std::ifstream::binary); + input >> std::noskipws; + std::istream_iterator s(input), e; + maplefe::AstBuffer vec(s, e); + maplefe::AstLoad loadAst; + maplefe::ModuleNode *mod = loadAst.LoadFromAstBuf(vec); + // add mod to the vector + while(mod) { + handler.AddModule(mod); + mod = loadAst.Next(); + } + } + + maplefe::A2C *a2c = new maplefe::A2C(&handler, flags); + int res = a2c->ProcessAST(); + + return res; +} diff --git a/src/MapleFE/test/testall b/src/MapleFE/ast2mpl/Makefile old mode 100755 new mode 100644 similarity index 71% rename from src/MapleFE/test/testall rename to src/MapleFE/ast2mpl/Makefile index 5de40cd76a41b0fbc4a25cd8d8a55707cfeecb14..da0321c1b14cbbfc75d469ee5abcdb42e6d29b15 --- a/src/MapleFE/test/testall +++ b/src/MapleFE/ast2mpl/Makefile @@ -1,4 +1,4 @@ -# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. # # OpenArkFE is licensed under the Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -12,5 +12,16 @@ # See the Mulan PSL v2 for more details. # -./java2mpl_runtests.pl java2mpl -./sharedfe_runtests.pl sharedfe +include ../Makefile.in + +all: + $(MAKE) -C src + +clean: + rm -rf $(BUILDDIR)/shared + +test: + $(MAKE) -C ../test p + +.PHONY: $(TARGS) + diff --git a/src/MapleFE/java/include/ast2mpl_java.h b/src/MapleFE/ast2mpl/include/ast2mpl.h similarity index 53% rename from src/MapleFE/java/include/ast2mpl_java.h rename to src/MapleFE/ast2mpl/include/ast2mpl.h index cec63ef9035608a2e8ab8eddf821b02b6d0fc2ae..a9283482498005a40f1666a3bbaace49a85e4557 100644 --- a/src/MapleFE/java/include/ast2mpl_java.h +++ b/src/MapleFE/ast2mpl/include/ast2mpl.h @@ -13,25 +13,38 @@ * See the Mulan PSL v2 for more details. */ -///////////////////////////////////////////////////////////////////////////////// -// Java Specific AST2MPL // -///////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////// +// This is the interface to translate AST to MapleIR. +////////////////////////////////////////////////////////////////////////////////////////////// -#ifndef __AST2MPL_JAVA_H__ -#define __AST2MPL_JAVA_H__ +#ifndef __AST2MPL_HEADER__ +#define __AST2MPL_HEADER__ -#include "ast2mpl.h" +#include "astopt.h" +#include "ast_handler.h" + +#include "mir_module.h" +#include "maplefe_mir_builder.h" namespace maplefe { -class A2MJava : public A2M { +class A2M : public AstOpt { private: -public: - A2MJava(const char *filename) : A2M(filename) { } + AST_Handler *mASTHandler; + unsigned mFlags; + unsigned mIndexImported; - const char *Type2Label(const maple::MIRType *type); - - maple::MIRType *MapPrimType(PrimTypeNode *ptnode); +public: + explicit A2M(AST_Handler *h, unsigned flags) : + AstOpt(h, flags), + mASTHandler(h), + mFlags(flags), + mIndexImported(0) {} + ~A2M() = default; + + // return 0 if successful + // return non-zero if failed + int ProcessAST(); }; } diff --git a/src/MapleFE/shared/include/ast2mpl.h b/src/MapleFE/ast2mpl/include/ast2mpl_builder.h similarity index 75% rename from src/MapleFE/shared/include/ast2mpl.h rename to src/MapleFE/ast2mpl/include/ast2mpl_builder.h index 837c7ee30229625f1a58893e3f424b26a858f48a..1270063fde7b185f7c04293502898921f6174215 100644 --- a/src/MapleFE/shared/include/ast2mpl.h +++ b/src/MapleFE/ast2mpl/include/ast2mpl_builder.h @@ -17,14 +17,14 @@ // This is the interface to translate AST to MapleIR. ////////////////////////////////////////////////////////////////////////////////////////////// -#ifndef __AST2MPL_HEADER__ -#define __AST2MPL_HEADER__ +#ifndef __AST2MPL_BUILDER_HEADER__ +#define __AST2MPL_BUILDER_HEADER__ -#include "ast_module.h" #include "ast.h" -#include "ast_type.h" +#include "ast_handler.h" #include "mir_module.h" +#include "generic_attrs.h" #include "maplefe_mir_builder.h" namespace maplefe { @@ -40,54 +40,72 @@ enum StmtExprKind { SK_Expr }; -class A2M { +enum bool3 { + false3, + true3, + maybe3 +}; + +class Ast2MplBuilder { private: - const char *mFileName; - bool mTraceA2m; - FEMIRBuilder *mMirBuilder; + AST_Handler *mASTHandler; + const char *mFilename; + bool mTraceA2m; + FEMIRBuilder *mMirBuilder; maple::MIRType *mDefaultType; - FieldData *mFieldData; + FieldData *mFieldData; + unsigned mUniqNum; + unsigned mFlags; + public: maple::MIRModule *mMirModule; // use type's uniq name as key - std::map mNodeTypeMap; - std::map> mNameFuncMap; + std::map mNodeTypeMap; + std::map> mNameFuncMap; std::map mBlockNodeMap; std::map mBlockFuncMap; std::map mFuncMap; - std::map, maple::MIRSymbol*> mNameBlockVarMap; + std::map, maple::MIRSymbol*> mNameBlockVarMap; + + Ast2MplBuilder(AST_Handler *h, unsigned f); + ~Ast2MplBuilder(); - A2M(const char *filename); - ~A2M(); + void Build(); void Init(); bool IsStmt(TreeNode *tnode); + bool3 IsCompatibleTo(maple::PrimType expected, maple::PrimType prim); void UpdateFuncName(maple::MIRFunction *func); virtual const char *Type2Label(const maple::MIRType *type); void Type2Name(std::string &str, const maple::MIRType *type); + ClassNode *GetSuperClass(ClassNode *klass); BlockNode *GetSuperBlock(BlockNode *block); maple::MIRSymbol *GetSymbol(TreeNode *tnode, BlockNode *block); maple::MIRSymbol *CreateSymbol(TreeNode *tnode, BlockNode *block); maple::MIRSymbol *CreateTempVar(const char *prefix, maple::MIRType *type); - maple::MIRFunction *GetFunc(BlockNode *block); - maple::MIRFunction *SearchFunc(const char *name, const maple::MapleVector &args); + maple::MIRFunction *GetCurrFunc(BlockNode *block); + maple::MIRFunction *SearchFunc(unsigned idx, maple::MapleVector &args); + maple::MIRFunction *SearchFunc(TreeNode *method, maple::MapleVector &args, BlockNode *block); maple::MIRClassType *GetClass(BlockNode *block); void UpdateUniqName(std::string &str); - virtual maple::MIRType *MapPrimType(PrimTypeNode *tnode)=0; + maple::PrimType MapPrim(TypeId id); + maple::MIRType *MapPrimType(TypeId id); + maple::MIRType *MapPrimType(PrimTypeNode *tnode); maple::MIRType *MapType(TreeNode *tnode); - void MapAttr(maple::GenericAttrs &attr, const IdentifierNode *inode); + void MapAttr(maple::GenericAttrs &attr, AttrId id); + void MapAttr(maple::GenericAttrs &attr, IdentifierNode *inode); + void MapAttr(maple::GenericAttrs &attr, FunctionNode *fnode); maple::Opcode MapUnaOpcode(OprId); maple::Opcode MapBinOpcode(OprId); maple::Opcode MapBinCmpOpcode(OprId); maple::Opcode MapBinComboOpcode(OprId); - void ProcessAST(bool trace_a2m); maple::BaseNode *ProcessNodeDecl(StmtExprKind, TreeNode *tnode, BlockNode *); maple::BaseNode *ProcessNode(StmtExprKind, TreeNode *tnode, BlockNode *); @@ -123,6 +141,7 @@ public: BlockNode *block); maple::BaseNode *ProcessLoopCondBody(StmtExprKind skind, TreeNode *cond, TreeNode *body, BlockNode *block); + maple::BaseNode *GetNewNodeLhs(NewNode *node, BlockNode *block); }; } diff --git a/src/MapleFE/autogen/src/expr_gen.cpp b/src/MapleFE/ast2mpl/include/cvt_block.h similarity index 39% rename from src/MapleFE/autogen/src/expr_gen.cpp rename to src/MapleFE/ast2mpl/include/cvt_block.h index 1a5af43a85bb0328fc87483e9a1d188989ac93c4..99ac1da878fed8b78a11a0465c6d7aec4a56bd33 100644 --- a/src/MapleFE/autogen/src/expr_gen.cpp +++ b/src/MapleFE/ast2mpl/include/cvt_block.h @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -12,36 +12,30 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ -#include "expr_gen.h" -namespace maplefe { - -void ExprGen::Generate() { - GenRuleTables(); - GenHeaderFile(); - GenCppFile(); -} +#ifndef __AST_CVT_BLOCK_H__ +#define __AST_CVT_BLOCK_H__ -void ExprGen::GenHeaderFile() { - mHeaderFile.WriteOneLine("#ifndef __EXPR_GEN_H__", 22); - mHeaderFile.WriteOneLine("#define __EXPR_GEN_H__", 22); - mHeaderFile.WriteOneLine("namespace maplefe {", 19); +#include "ast_module.h" +#include "ast.h" +#include "gen_astvisitor.h" - // generate the rule tables - mHeaderFile.WriteFormattedBuffer(&mRuleTableHeader); +namespace maplefe { - mHeaderFile.WriteOneLine("}", 1); - mHeaderFile.WriteOneLine("#endif", 6); -} +// CvtBlockVisitor is to fix up some tree nodes after the AST is created +class CvtToBlockVisitor : public AstVisitor { + private: + ModuleNode *mASTModule; + bool mUpdated; -void ExprGen::GenCppFile() { - mCppFile.WriteOneLine("#include \"common_header_autogen.h\"", 34); - mCppFile.WriteOneLine("namespace maplefe {", 19); + public: + CvtToBlockVisitor(ModuleNode *m) : mASTModule(m), mUpdated(false) {} - // generate the rule tables - mCppFile.WriteFormattedBuffer(&mRuleTableCpp); - mCppFile.WriteOneLine("}", 1); -} -} + bool CvtToBlock(); + CondBranchNode *VisitCondBranchNode(CondBranchNode *node); + ForLoopNode *VisitForLoopNode(ForLoopNode *node); +}; +} +#endif diff --git a/src/MapleFE/ast2mpl/include/generic_attrs.h b/src/MapleFE/ast2mpl/include/generic_attrs.h new file mode 100644 index 0000000000000000000000000000000000000000..ecab7c62d180bec7218941b55af3167d9d8eefcf --- /dev/null +++ b/src/MapleFE/ast2mpl/include/generic_attrs.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef GENERIC_ATTRS_H +#define GENERIC_ATTRS_H +#include +#include "mir_type.h" + +namespace maple { +// only for internal use, not emitted +enum GenericAttrKind { +#define FUNC_ATTR +#define TYPE_ATTR +#define FIELD_ATTR +#define ATTR(STR) GENATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef FUNC_ATTR +#undef TYPE_ATTR +#undef FIELD_ATTR +}; + +class GenericAttrs { + public: + GenericAttrs() = default; + GenericAttrs(const GenericAttrs &ta) = default; + GenericAttrs &operator=(const GenericAttrs &p) = default; + ~GenericAttrs() = default; + + void SetAttr(GenericAttrKind x) { + attrFlag.set(x); + } + + bool GetAttr(GenericAttrKind x) const { + return attrFlag[x]; + } + + bool operator==(const GenericAttrs &tA) const { + return attrFlag == tA.attrFlag; + } + + bool operator!=(const GenericAttrs &tA) const { + return !(*this == tA); + } + + FieldAttrs ConvertToFieldAttrs(); + TypeAttrs ConvertToTypeAttrs(); + FuncAttrs ConvertToFuncAttrs(); + + private: + std::bitset<128> attrFlag = 0; +}; +} +#endif // GENERIC_ATTRS_H \ No newline at end of file diff --git a/src/MapleFE/shared/include/maplefe_mir_builder.h b/src/MapleFE/ast2mpl/include/maplefe_mir_builder.h similarity index 89% rename from src/MapleFE/shared/include/maplefe_mir_builder.h rename to src/MapleFE/ast2mpl/include/maplefe_mir_builder.h index d4fa730156948804ba3add28fa1162b3ec61b3cf..52261130d603806cea5c925440facd161cf15718 100644 --- a/src/MapleFE/shared/include/maplefe_mir_builder.h +++ b/src/MapleFE/ast2mpl/include/maplefe_mir_builder.h @@ -77,6 +77,11 @@ class FEMIRBuilder : public maple::MIRBuilder { bool TraverseToNamedField(maple::MIRStructType *structType, unsigned &fieldID, FieldData *fieldData); maple::BaseNode *CreateExprDread(const maple::MIRSymbol *symbol, maple::FieldID fieldID = maple::FieldID(0)); + + // use maple::PTY_ref for pointer types + maple::MIRType *GetOrCreatePointerType(const maple::MIRType *pointTo) { + return maple::GlobalTables::GetTypeTable().GetOrCreatePointerType(*pointTo, maple::PTY_ref); + } }; } diff --git a/src/MapleFE/ast2mpl/src/Makefile b/src/MapleFE/ast2mpl/src/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..51ab1dcd3e1b7c95dcda06ba6a8a02e8df10d433 --- /dev/null +++ b/src/MapleFE/ast2mpl/src/Makefile @@ -0,0 +1,88 @@ +# Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +include ../../Makefile.in +BUILDBIN=$(BUILDDIR)/bin +BUILD=$(BUILDDIR)/ast2mpl +BUILDGEN=$(BUILDDIR)/gen +BUILDASTGEN=$(BUILDDIR)/ast_gen/shared +$(shell $(MKDIR_P) $(BUILD)) + +SRC=$(wildcard *.cpp) +OBJ :=$(patsubst %.cpp,%.o,$(SRC)) +DEP :=$(patsubst %.cpp,%.d,$(SRC)) + +SRCG := $(wildcard $(BUILDGEN)/gen*.cpp) +OBJG := $(patsubst %.cpp, %.o, $(SRCG)) +DEPG := $(patsubst %.cpp, %.d, $(SRCG)) + +OBJS :=$(foreach obj,$(OBJ), $(BUILD)/$(obj)) $(OBJG) +DEPS :=$(foreach dep,$(DEP), $(BUILD)/$(dep)) $(DEPG) + +LIBOBJS :=$(patsubst $(BUILD)/main.o,,$(OBJS)) + +GENDIR:=${BUILDDIR}/ast_gen/shared + +INCLUDES := -I $(MAPLEFE_ROOT)/shared/include \ + -I $(MAPLEFE_ROOT)/astopt/include \ + -I $(MAPLEFE_ROOT)/ast2mpl/include \ + -I $(MAPLEFE_ROOT)/autogen/include \ + -I $(MAPLEFE_ROOT)/shared/include \ + -I $(MAPLEFE_ROOT)/java/include \ + $(MAPLEALL_INC) -I ${GENDIR} + +AST2MPLLIB = ast2mpl.a + +INCLUDEGEN := -I $(MAPLEFE_ROOT)/shared/include -I $(BUILDDIR)/gen -I $(BUILDASTGEN) + +TARGET=ast2mpl +SHAREDLIB = $(BUILDDIR)/astopt/astopt.a $(BUILDDIR)/shared/shared.a $(BUILDASTGEN)/genast.a +LANGSPEC=$(BUILDDIR)/java/lang_spec.o + +.PHONY: all +all: $(BUILDBIN)/$(TARGET) + +-include $(DEPS) +.PHONY: clean + +vpath %.o $(BUILD) +vpath %.d $(BUILD) + +#Pattern Rules +$(BUILD)/%.o : %.cpp + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ + +$(BUILD)/%.d : %.cpp + @$(CXX) $(CXXFLAGS) -MM $(INCLUDES) $< > $@ + @mv -f $(BUILD)/$*.d $(BUILD)/$*.d.tmp + @sed -e 's|.*:|$(BUILD)/$*.o:|' < $(BUILD)/$*.d.tmp > $(BUILD)/$*.d + @rm -f $(BUILD)/$*.d.tmp + +$(BUILDGEN)/%.o : $(BUILDGEN)/%.cpp $(BUILDGEN)/%.d + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDEGEN) -w -c $< -o $@ + +$(BUILDGEN)/%.d : $(BUILDGEN)/%.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDEGEN) $< > $@ + @mv -f $(BUILDGEN)/$*.d $(BUILDGEN)/$*.d.tmp + @sed -e 's|.*:|$(BUILDGEN)/$*.o:|' < $(BUILDGEN)/$*.d.tmp > $(BUILDGEN)/$*.d + @rm -f $(BUILDGEN)/$*.d.tmp + +# TARGET depends on OBJS and shared OBJS from shared directory +# as well as mapleall libraries +$(BUILDBIN)/$(TARGET): $(OBJS) $(SHAREDLIB) + @mkdir -p $(BUILDBIN) + $(LD) -o $(BUILDBIN)/$(TARGET) $(OBJS) $(LANGSPEC) $(SHAREDLIB) $(MAPLELIBS) + +clean: + rm -rf $(BUILD) diff --git a/src/MapleFE/ast2mpl/src/ast2mpl.cpp b/src/MapleFE/ast2mpl/src/ast2mpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e6ac6c2b44641dfdd3b96313037b5dadfe55ecbb --- /dev/null +++ b/src/MapleFE/ast2mpl/src/ast2mpl.cpp @@ -0,0 +1,129 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include + +#include "ast2mpl.h" +#include "ast_handler.h" +#include "gen_astdump.h" +#include "gen_astgraph.h" +#include "gen_aststore.h" +#include "gen_astload.h" + +#include "mir_function.h" +#include "ast2mpl_builder.h" + +namespace maplefe { + +// starting point of AST +int A2M::ProcessAST() { + mIndexImported = GetModuleNum(); + + // loop through module handlers + for (HandlerIndex i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + + if (mFlags & FLG_trace_1) { + std::cout << "============= in ProcessAST ===========" << std::endl; + std::cout << "srcLang : " << module->GetSrcLangString() << std::endl; + + for(unsigned k = 0; k < module->GetTreesNum(); k++) { + TreeNode *tnode = module->GetTree(k); + if (mFlags & FLG_trace_1) { + tnode->Dump(0); + std::cout << std::endl; + } + } + } + + if (mFlags & FLG_trace_2) { + std::cout << "============= AstGraph ===========" << std::endl; + AstGraph graph(module); + graph.DumpGraph("After LoadFromAstBuf()", &std::cout); + } + } + + // build dependency of modules + PreprocessModules(); + + // loop through module handlers in import/export dependency order + for (auto handler: mHandlersInOrder) { + ModuleNode *module = handler->GetASTModule(); + + // basic analysis + handler->BasicAnalysis(); + + if (mFlags & FLG_trace_2) { + std::cout << "============= After AdjustAST ===========" << std::endl; + for(unsigned k = 0; k < module->GetTreesNum(); k++) { + TreeNode *tnode = module->GetTree(k); + if (mFlags & FLG_trace_1) { + tnode->Dump(0); + std::cout << std::endl; + } + } + AstGraph graph(module); + graph.DumpGraph("After AdjustAST()", &std::cout); + } + + // build CFG + handler->BuildCFG(); + + if (mFlags & FLG_trace_2) { + handler->Dump("After BuildCFG()"); + } + + // control flow analysis + handler->ControlFlowAnalysis(); + + // type inference + handler->TypeInference(); + + if (mFlags & FLG_trace_2) { + std::cout << "============= AstGraph ===========" << std::endl; + AstGraph graph(module); + graph.DumpGraph("After BuildCFG()", &std::cout); + } + + if (mFlags & FLG_trace_2) { + std::cout << "============= AstDump ===========" << std::endl; + AstDump astdump(module); + astdump.Dump("After BuildCFG()", &std::cout); + } + + // data flow analysis + handler->DataFlowAnalysis(); + + if (mFlags & FLG_trace_2) { + handler->Dump("After DataFlowAnalysis()"); + } + } + + // build mpl + if (mFlags & FLG_trace_2) { + std::cout << "============= Ast2Mpl Build ===========" << std::endl; + } + maplefe::Ast2MplBuilder ast2mpl_builder(mASTHandler, mFlags); + ast2mpl_builder.Build(); + + ast2mpl_builder.mMirModule->OutputAsciiMpl("", ".mpl"); + return 0; +} + +} diff --git a/src/MapleFE/shared/src/ast2mpl.cpp b/src/MapleFE/ast2mpl/src/ast2mpl_builder.cpp similarity index 50% rename from src/MapleFE/shared/src/ast2mpl.cpp rename to src/MapleFE/ast2mpl/src/ast2mpl_builder.cpp index f980347848ae9f737588bf3a6dc768ea8ca57a01..2c8c3546f32498b5d14b0b4639585a97e747164d 100644 --- a/src/MapleFE/shared/src/ast2mpl.cpp +++ b/src/MapleFE/ast2mpl/src/ast2mpl_builder.cpp @@ -13,80 +13,127 @@ * See the Mulan PSL v2 for more details. */ -#include "ast2mpl.h" +#include "ast2mpl_builder.h" +#include "gen_astdump.h" +#include "mir_module.h" #include "mir_function.h" #include "maplefe_mir_builder.h" +#include "cvt_block.h" namespace maplefe { -static unsigned mVarUniqNum; - -A2M::A2M(const char *filename) : mFileName(filename) { - mMirModule = new maple::MIRModule(mFileName); +Ast2MplBuilder::Ast2MplBuilder(AST_Handler *h, unsigned f) : mASTHandler(h), mFlags(f) { + mFilename = h->GetModuleHandler((unsigned)0)->GetASTModule()->GetFilename(); + mMirModule = new maple::MIRModule(mFilename); maple::theMIRModule = mMirModule; mMirBuilder = new FEMIRBuilder(mMirModule); mFieldData = new FieldData(); - mVarUniqNum = 1; Init(); } -A2M::~A2M() { +Ast2MplBuilder::~Ast2MplBuilder() { delete mMirModule; delete mMirBuilder; delete mFieldData; mNodeTypeMap.clear(); } -void A2M::Init() { +void Ast2MplBuilder::Init() { // create mDefaultType maple::MIRType *type = maple::GlobalTables::GetTypeTable().GetOrCreateClassType("DEFAULT_TYPE", *mMirModule); type->SetMIRTypeKind(maple::kTypeClass); - mDefaultType = maple::GlobalTables::GetTypeTable().GetOrCreatePointerType(*type); + mDefaultType = mMirBuilder->GetOrCreatePointerType(type); // setup flavor and srclang mMirModule->SetFlavor(maple::kFeProduced); mMirModule->SetSrcLang(maple::kSrcLangJava); // setup INFO_filename - maple::GStrIdx idx = maple::GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(mFileName); + maple::GStrIdx idx = maple::GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(mFilename); SET_INFO_PAIR(mMirModule, "INFO_filename", idx.GetIdx(), true); - // add to java src file list - std::string str(mFileName); + // add to src file list + std::string str(mFilename); size_t pos = str.rfind('/'); if (pos != std::string::npos) { idx = maple::GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str.substr(pos+1)); } mMirModule->PushbackFileInfo(maple::MIRInfoPair(idx, 2)); + + // initialize unique serial number for temporary variables and inner classes + mUniqNum = 1; } // starting point of AST to MPL process -void A2M::ProcessAST(bool trace_a2m) { - mTraceA2m = trace_a2m; - if (mTraceA2m) std::cout << "============= in ProcessAST ===========" << std::endl; - // pass 1: collect class/interface/function decl - for(auto it: gModule.mTrees) { - TreeNode *tnode = it->mRootNode; - ProcessNodeDecl(SK_Stmt, tnode, nullptr); +void Ast2MplBuilder::Build() { + mTraceA2m = mFlags & FLG_trace_2; + for (HandlerIndex i = 0; i < mASTHandler->GetSize(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + + AstDump astdump(module); + + if (mTraceA2m) { + std::cout << "============= in ProcessAST ===========" << std::endl; + std::cout << "srcLang : " << module->GetSrcLangString() << std::endl; + } + // pass 0: convert to use BlockNode for if-then-else and loop bodies + CvtToBlockVisitor visitor(module); + visitor.CvtToBlock(); + + // pass 1: collect class/interface/function decl + for (unsigned i = 0; i < module->GetTreesNum(); i++) { + TreeNode *tnode = module->GetTree(i); + ProcessNodeDecl(SK_Stmt, tnode, nullptr); + } + + // pass 2: handle function def + for (unsigned i = 0; i < module->GetTreesNum(); i++) { + TreeNode *tnode = module->GetTree(i); + ProcessNode(SK_Stmt, tnode, nullptr); + } + if (mTraceA2m) { astdump.Dump("Build", &std::cout); } } - // pass 2: handle function def - for(auto it: gModule.mTrees) { - TreeNode *tnode = it->mRootNode; - if (mTraceA2m) { tnode->Dump(0); fflush(0); } - ProcessNode(SK_Stmt, tnode, nullptr); +} + +maple::PrimType Ast2MplBuilder::MapPrim(TypeId id) { + maple::PrimType prim; + switch (id) { + case TY_Boolean: prim = maple::PTY_u1; break; + case TY_Byte: prim = maple::PTY_u8; break; + case TY_Short: prim = maple::PTY_i16; break; + case TY_Int: prim = maple::PTY_i32; break; + case TY_Long: prim = maple::PTY_i64; break; + case TY_Char: prim = maple::PTY_u16; break; + case TY_Float: prim = maple::PTY_f32; break; + case TY_Double: prim = maple::PTY_f64; break; + case TY_Void: prim = maple::PTY_void; break; + case TY_Null: prim = maple::PTY_void; break; + default: MASSERT("Unsupported PrimType"); break; } + return prim; +} + +maple::MIRType *Ast2MplBuilder::MapPrimType(TypeId id) { + maple::PrimType prim = MapPrim(id); + maple::TyIdx tid(prim); + return maple::GlobalTables::GetTypeTable().GetTypeFromTyIdx(tid); +} + +maple::MIRType *Ast2MplBuilder::MapPrimType(PrimTypeNode *ptnode) { + return MapPrimType(ptnode->GetPrimType()); } -maple::MIRType *A2M::MapType(TreeNode *type) { +maple::MIRType *Ast2MplBuilder::MapType(TreeNode *type) { if (!type) { return maple::GlobalTables::GetTypeTable().GetVoid(); } maple::MIRType *mir_type = mDefaultType; - const char *name = type->GetName(); - if (mNodeTypeMap.find(name) != mNodeTypeMap.end()) { - return mNodeTypeMap[name]; + unsigned idx = type->GetStrIdx(); + if (mNodeTypeMap.find(idx) != mNodeTypeMap.end()) { + return mNodeTypeMap[idx]; } if (type->IsPrimType()) { @@ -94,7 +141,7 @@ maple::MIRType *A2M::MapType(TreeNode *type) { mir_type = MapPrimType(ptnode); // update mNodeTypeMap - mNodeTypeMap[name] = mir_type; + mNodeTypeMap[idx] = mir_type; } else if (type->IsUserType()) { if (type->IsIdentifier()) { IdentifierNode *inode = static_cast(type); @@ -106,14 +153,20 @@ maple::MIRType *A2M::MapType(TreeNode *type) { } // DimensionNode *mDims // unsigned dnum = inode->GetDimsNum(); - mNodeTypeMap[name] = mir_type; + mNodeTypeMap[idx] = mir_type; + } else if (idx) { + AST2MPLMSG("MapType add a class type by idx", idx); + mir_type = maple::GlobalTables::GetTypeTable().GetOrCreateClassType(type->GetName(), *mMirModule); + mir_type->SetMIRTypeKind(maple::kTypeClass); + mir_type = mMirBuilder->GetOrCreatePointerType(mir_type); + mNodeTypeMap[idx] = mir_type; } else { - NOTYETIMPL("MapType Unknown"); + NOTYETIMPL("MapType unknown type"); } return mir_type; } -maple::Opcode A2M::MapUnaOpcode(OprId ast_op) { +maple::Opcode Ast2MplBuilder::MapUnaOpcode(OprId ast_op) { maple::Opcode op = maple::OP_undef; switch (ast_op) { case OPR_Add: op = maple::OP_add; break; @@ -127,7 +180,7 @@ maple::Opcode A2M::MapUnaOpcode(OprId ast_op) { return op; } -maple::Opcode A2M::MapBinOpcode(OprId ast_op) { +maple::Opcode Ast2MplBuilder::MapBinOpcode(OprId ast_op) { maple::Opcode op = maple::OP_undef; switch (ast_op) { case OPR_Add: op = maple::OP_add; break; @@ -148,7 +201,7 @@ maple::Opcode A2M::MapBinOpcode(OprId ast_op) { return op; } -maple::Opcode A2M::MapBinCmpOpcode(OprId ast_op) { +maple::Opcode Ast2MplBuilder::MapBinCmpOpcode(OprId ast_op) { maple::Opcode op = maple::OP_undef; switch (ast_op) { case OPR_EQ: op = maple::OP_eq; break; @@ -162,7 +215,7 @@ maple::Opcode A2M::MapBinCmpOpcode(OprId ast_op) { return op; } -maple::Opcode A2M::MapBinComboOpcode(OprId ast_op) { +maple::Opcode Ast2MplBuilder::MapBinComboOpcode(OprId ast_op) { maple::Opcode op = maple::OP_undef; switch (ast_op) { case OPR_AddAssign: op = maple::OP_add; break; @@ -182,7 +235,25 @@ maple::Opcode A2M::MapBinComboOpcode(OprId ast_op) { return op; } -const char *A2M::Type2Label(const maple::MIRType *type) { +#if 1 +const char *Ast2MplBuilder::Type2Label(const maple::MIRType *type) { + maple::PrimType pty = type->GetPrimType(); + switch (pty) { + case maple::PTY_u1: return "Z"; + case maple::PTY_u8: return "B"; + case maple::PTY_i16: return "S"; + case maple::PTY_u16: return "C"; + case maple::PTY_i32: return "I"; + case maple::PTY_i64: return "J"; + case maple::PTY_f32: return "F"; + case maple::PTY_f64: return "D"; + case maple::PTY_void: return "V"; + default: return "L"; + } +} +#else + +const char *Ast2MplBuilder::Type2Label(const maple::MIRType *type) { maple::PrimType pty = type->GetPrimType(); switch (pty) { case maple::PTY_u1: return "Z"; @@ -200,16 +271,13 @@ const char *A2M::Type2Label(const maple::MIRType *type) { default: return "L"; } } +#endif -bool A2M::IsStmt(TreeNode *tnode) { +bool Ast2MplBuilder::IsStmt(TreeNode *tnode) { bool status = true; if (!tnode) return false; switch (tnode->GetKind()) { - case NK_Literal: - case NK_Identifier: - status = false; - break; case NK_Parenthesis: { ParenthesisNode *node = static_cast(tnode); status = IsStmt(node->GetExpr()); @@ -221,16 +289,57 @@ bool A2M::IsStmt(TreeNode *tnode) { if (ast_op != OPR_Inc && ast_op != OPR_Dec) { status = false; } + break; } case NK_BinOperator: { BinOperatorNode *bon = static_cast(tnode); - maple::Opcode op = MapBinComboOpcode(bon->mOprId); - if (bon->mOprId != OPR_Assign && op == maple::OP_undef) { + maple::Opcode op = MapBinComboOpcode(bon->GetOprId()); + if (bon->GetOprId() != OPR_Assign && op == maple::OP_undef) { status = false; } break; } + case NK_Block: + case NK_Break: + case NK_Call: + case NK_CondBranch: + case NK_Delete: + case NK_DoLoop: + case NK_ExprList: + case NK_ForLoop: + case NK_Function: + case NK_Interface: + case NK_Lambda: + case NK_New: + case NK_Return: + case NK_Switch: + case NK_SwitchCase: + case NK_VarList: + case NK_WhileLoop: + status = true; + break; + case NK_Annotation: + case NK_AnnotationType: + case NK_Assert: + case NK_Attr: + case NK_Cast: + case NK_Class: + case NK_Dimension: + case NK_Exception: + case NK_Field: + case NK_Identifier: + case NK_Import: + case NK_InstanceOf: + case NK_Literal: + case NK_Package: + case NK_Pass: + case NK_PrimArrayType: + case NK_PrimType: + case NK_SwitchLabel: + case NK_TerOperator: + case NK_UserType: default: + status = false; break; } return status; @@ -251,7 +360,7 @@ bool A2M::IsStmt(TreeNode *tnode) { #define RARG "_29" // ")" #endif -void A2M::Type2Name(std::string &str, const maple::MIRType *type) { +void Ast2MplBuilder::Type2Name(std::string &str, const maple::MIRType *type) { maple::PrimType pty = type->GetPrimType(); const char *n = Type2Label(type); str.append(n); @@ -265,16 +374,15 @@ void A2M::Type2Name(std::string &str, const maple::MIRType *type) { } } -// update to use uniq name: str --> str|mVarUniqNum -void A2M::UpdateUniqName(std::string &str) { +// update to use uniq name: str --> str|mUniqNum +void Ast2MplBuilder::UpdateUniqName(std::string &str) { str.append(SEP); - str.append(std::to_string(mVarUniqNum)); - mVarUniqNum++; + str.append(std::to_string(mUniqNum++)); return; } // update to use mangled name: className|funcName|(argTypes)retType -void A2M::UpdateFuncName(maple::MIRFunction *func) { +void Ast2MplBuilder::UpdateFuncName(maple::MIRFunction *func) { std::string str; maple::TyIdx tyIdx = func->GetClassTyIdx(); maple::MIRType *type; @@ -308,21 +416,29 @@ void A2M::UpdateFuncName(maple::MIRFunction *func) { maple::GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcst); } -BlockNode *A2M::GetSuperBlock(BlockNode *block) { - TreeNode *blk = block->GetParent(); - while (blk && !blk->IsBlock()) { - blk = blk->GetParent(); +ClassNode *Ast2MplBuilder::GetSuperClass(ClassNode *klass) { + TreeNode *tnode = klass->GetParent(); + while (tnode && !tnode->IsClass()) { + tnode = tnode->GetParent(); } - return (BlockNode*)blk; + return static_cast(tnode); } -maple::MIRSymbol *A2M::GetSymbol(TreeNode *tnode, BlockNode *block) { - const char *name = tnode->GetName(); +BlockNode *Ast2MplBuilder::GetSuperBlock(BlockNode *block) { + TreeNode *tnode = block->GetParent(); + while (tnode && !tnode->IsBlock()) { + tnode = tnode->GetParent(); + } + return static_cast(tnode); +} + +maple::MIRSymbol *Ast2MplBuilder::GetSymbol(TreeNode *tnode, BlockNode *block) { + unsigned idx = tnode->GetStrIdx(); maple::MIRSymbol *symbol = nullptr; // global symbol if (!block) { - std::pair P(tnode->GetName(), block); + std::pair P(idx, block); symbol = mNameBlockVarMap[P]; return symbol; } @@ -330,7 +446,7 @@ maple::MIRSymbol *A2M::GetSymbol(TreeNode *tnode, BlockNode *block) { // trace block hirachy for defined symbol BlockNode *blk = block; do { - std::pair P(tnode->GetName(), blk); + std::pair P(idx, blk); symbol = mNameBlockVarMap[P]; if (symbol) { return symbol; @@ -339,12 +455,12 @@ maple::MIRSymbol *A2M::GetSymbol(TreeNode *tnode, BlockNode *block) { } while (blk); // check parameters - maple::MIRFunction *func = GetFunc(blk); + maple::MIRFunction *func = GetCurrFunc(blk); if (!func) { NOTYETIMPL("Block parent hirachy"); return symbol; } - maple::GStrIdx stridx = maple::GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + maple::GStrIdx stridx = maple::GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(tnode->GetName()); if (func->IsAFormalName(stridx)) { maple::FormalDef def = func->GetFormalFromName(stridx); return def.formalSym; @@ -352,21 +468,20 @@ maple::MIRSymbol *A2M::GetSymbol(TreeNode *tnode, BlockNode *block) { return symbol; } -maple::MIRSymbol *A2M::CreateTempVar(const char *prefix, maple::MIRType *type) { +maple::MIRSymbol *Ast2MplBuilder::CreateTempVar(const char *prefix, maple::MIRType *type) { if (!type) { return nullptr; } std::string str(prefix); str.append(SEP); - str.append(std::to_string(mVarUniqNum)); - mVarUniqNum++; + str.append(std::to_string(mUniqNum++)); maple::MIRFunction *func = mMirModule->CurFunction(); maple::MIRSymbol *var = mMirBuilder->CreateLocalDecl(str, *type); return var; } -maple::MIRSymbol *A2M::CreateSymbol(TreeNode *tnode, BlockNode *block) { - const char *name = tnode->GetName(); +maple::MIRSymbol *Ast2MplBuilder::CreateSymbol(TreeNode *tnode, BlockNode *block) { + std::string name = tnode->GetName(); maple::MIRType *mir_type; if (tnode->IsIdentifier()) { @@ -381,12 +496,12 @@ maple::MIRSymbol *A2M::CreateSymbol(TreeNode *tnode, BlockNode *block) { maple::MIRTypeKind kind = mir_type->GetKind(); if (kind == maple::kTypeClass || kind == maple::kTypeClassIncomplete || kind == maple::kTypeInterface || kind == maple::kTypeInterfaceIncomplete) { - mir_type = maple::GlobalTables::GetTypeTable().GetOrCreatePointerType(*mir_type, maple::PTY_ref); + mir_type = mMirBuilder->GetOrCreatePointerType(mir_type); } maple::MIRSymbol *symbol = nullptr; if (block) { - maple::MIRFunction *func = GetFunc(block); + maple::MIRFunction *func = GetCurrFunc(block); symbol = mMirBuilder->GetLocalDecl(name); std::string str(name); // symbol with same name already exist, use a uniq new name @@ -404,54 +519,197 @@ maple::MIRSymbol *A2M::CreateSymbol(TreeNode *tnode, BlockNode *block) { symbol = mMirBuilder->CreateGlobalDecl(str, *mir_type, maple::kScGlobal); } - std::pair P(name, block); + std::pair P(tnode->GetStrIdx(), block); mNameBlockVarMap[P] = symbol; return symbol; } -maple::MIRFunction *A2M::GetFunc(BlockNode *block) { +maple::MIRFunction *Ast2MplBuilder::GetCurrFunc(BlockNode *block) { maple::MIRFunction *func = nullptr; // func = mBlockFuncMap[block]; func = mMirModule->CurFunction(); return func; } -maple::MIRClassType *A2M::GetClass(BlockNode *block) { - maple::TyIdx tyidx = GetFunc(block)->GetClassTyIdx(); +maple::MIRClassType *Ast2MplBuilder::GetClass(BlockNode *block) { + maple::TyIdx tyidx = GetCurrFunc(block)->GetClassTyIdx(); return (maple::MIRClassType*)maple::GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyidx); } -maple::MIRFunction *A2M::SearchFunc(const char *name, const maple::MapleVector &args) { - if (mNameFuncMap.find(name) == mNameFuncMap.end()) { +bool3 Ast2MplBuilder::IsCompatibleTo(maple::PrimType expected, maple::PrimType prim) { + if (expected == prim) + return true3; + + maple::PrimitiveType type(prim); + bool3 comp = false3; + switch (expected) { + case maple::PTY_i8: + case maple::PTY_i16: + case maple::PTY_i32: + case maple::PTY_i64: + case maple::PTY_u8: + case maple::PTY_u16: + case maple::PTY_u32: + case maple::PTY_u64: + case maple::PTY_u1: + if (type.IsInteger()) { + comp = true3; + } + break; + case maple::PTY_ptr: + case maple::PTY_ref: + if (type.IsPointer()) { + comp = true3; + } + if (type.IsInteger()) { + comp = maybe3; + } + break; + case maple::PTY_a32: + case maple::PTY_a64: + if (type.IsAddress()) { + comp = true3; + } + break; + case maple::PTY_f32: + case maple::PTY_f64: + case maple::PTY_f128: + if (type.IsFloat()) { + comp = true3; + } + break; + case maple::PTY_c64: + case maple::PTY_c128: + if (type.IsInteger()) { + comp = true3; + } + break; + case maple::PTY_constStr: + case maple::PTY_gen: + case maple::PTY_agg: + case maple::PTY_unknown: + case maple::PTY_v2i64: + case maple::PTY_v4i32: + case maple::PTY_v8i16: + case maple::PTY_v16i8: + case maple::PTY_v2f64: + case maple::PTY_v4f32: + case maple::PTY_void: + default: + break; + } + return comp; +} + +maple::MIRFunction *Ast2MplBuilder::SearchFunc(unsigned idx, maple::MapleVector &args) { + if (mNameFuncMap.find(idx) == mNameFuncMap.end()) { return nullptr; } - for (auto it: mNameFuncMap[name]) { + std::vector candidates; + for (auto it: mNameFuncMap[idx]) { if (it->GetFormalCount() != args.size()) { continue; } bool matched = true; + bool3 mightmatched = true3; for (int i = 0; i < it->GetFormalCount(); i++) { - // TODO: allow compatible types maple::MIRType *type = maple::GlobalTables::GetTypeTable().GetTypeFromTyIdx(it->GetFormalDefAt(i).formalTyIdx); - if (type->GetPrimType() != args[i]->GetPrimType()) { + bool3 comp = IsCompatibleTo(type->GetPrimType(), args[i]->GetPrimType()); + if (comp == false3) { matched = false; + mightmatched = false3; break; + } else if (comp == maybe3) { + matched = false; } } - if (!matched) { - continue; + if (matched) { + return it; } - return it; + if (mightmatched != false3) { + candidates.push_back(it); + } + } + if (candidates.size()) { + return candidates[0]; } return nullptr; } -void A2M::MapAttr(maple::GenericAttrs &attr, const IdentifierNode *inode) { +maple::MIRFunction *Ast2MplBuilder::SearchFunc(TreeNode *method, maple::MapleVector &args, BlockNode *block) { + maple::MIRFunction *func = nullptr; + switch (method->GetKind()) { + case NK_Function: { + func = SearchFunc(method->GetStrIdx(), args); + break; + } + case NK_Identifier: { + IdentifierNode *imethod = static_cast(method); + func = SearchFunc(imethod->GetStrIdx(), args); + break; + } + case NK_Field: { + FieldNode *node = static_cast(method); + TreeNode *upper = node->GetUpper(); + TreeNode *field = node->GetField(); + if (field->IsIdentifier()) { + // pass upper as this + maple::BaseNode *bn = ProcessNode(SK_Expr, upper, block); + if (bn) { + args[0] = bn; + } + } + func = SearchFunc(field, args, block); + break; + } + default: + NOTYETIMPL("GetFuncName() method to be handled"); + } + + return func; +} + +void Ast2MplBuilder::MapAttr(maple::GenericAttrs &attr, AttrId id) { + switch (id) { +#undef ATTRIBUTE +#define ATTRIBUTE(X) case ATTR_##X: attr.SetAttr(maple::GENATTR_##X); break; +// #include "supported_attributes.def" +ATTRIBUTE(abstract) +ATTRIBUTE(const) +ATTRIBUTE(volatile) +ATTRIBUTE(final) +ATTRIBUTE(native) +ATTRIBUTE(private) +ATTRIBUTE(protected) +ATTRIBUTE(public) +ATTRIBUTE(static) +ATTRIBUTE(default) +ATTRIBUTE(synchronized) + +// ATTRIBUTE(strictfp) + case ATTR_strictfp: attr.SetAttr(maple::GENATTR_strict); break; + + default: + break; + } +} + +void Ast2MplBuilder::MapAttr(maple::GenericAttrs &attr, IdentifierNode *inode) { // SmallVector mAttrs unsigned anum = inode->GetAttrsNum(); for (int i = 0; i < anum; i++) { + const AttrId ast_attr = inode->GetAttrAtIndex(i); + MapAttr(attr, ast_attr); } } + +void Ast2MplBuilder::MapAttr(maple::GenericAttrs &attr, FunctionNode *fnode) { + unsigned anum = fnode->GetAttrsNum(); + for (int i = 0; i < anum; i++) { + const AttrId ast_attr = fnode->GetAttrAtIndex(i); + MapAttr(attr, ast_attr); + } } +} diff --git a/src/MapleFE/ast2mpl/src/cvt_block.cpp b/src/MapleFE/ast2mpl/src/cvt_block.cpp new file mode 100644 index 0000000000000000000000000000000000000000..419d1c54bd72c4aa5d0db2ac80bb82b0b40f713a --- /dev/null +++ b/src/MapleFE/ast2mpl/src/cvt_block.cpp @@ -0,0 +1,62 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "cvt_block.h" + +namespace maplefe { + +bool CvtToBlockVisitor::CvtToBlock() { + for (unsigned i = 0; i < mASTModule->GetTreesNum(); i++ ) { + TreeNode *it = mASTModule->GetTree(i); + Visit(it); + } + return mUpdated; +} + +// if-then-else +CondBranchNode *CvtToBlockVisitor::VisitCondBranchNode(CondBranchNode *node) { + TreeNode *tn = VisitTreeNode(node->GetTrueBranch()); + if (tn && !tn->IsBlock()) { + BlockNode *blk = (BlockNode*)gTreePool.NewTreeNode(sizeof(BlockNode)); + new (blk) BlockNode(); + blk->AddChild(tn); + node->SetTrueBranch(blk); + mUpdated = true; + } + tn = VisitTreeNode(node->GetFalseBranch()); + if (tn && !tn->IsBlock()) { + BlockNode *blk = (BlockNode*)gTreePool.NewTreeNode(sizeof(BlockNode)); + new (blk) BlockNode(); + blk->AddChild(tn); + node->SetFalseBranch(blk); + mUpdated = true; + } + return node; +} + +// for +ForLoopNode *CvtToBlockVisitor::VisitForLoopNode(ForLoopNode *node) { + TreeNode *tn = VisitTreeNode(node->GetBody()); + if (tn && !tn->IsBlock()) { + BlockNode *blk = (BlockNode*)gTreePool.NewTreeNode(sizeof(BlockNode)); + new (blk) BlockNode(); + blk->AddChild(tn); + node->SetBody(blk); + mUpdated = true; + } + return node; +} + +} diff --git a/src/MapleFE/ast2mpl/src/generic_attrs.cpp b/src/MapleFE/ast2mpl/src/generic_attrs.cpp new file mode 100644 index 0000000000000000000000000000000000000000..185d327387249f9ceea60c5fd288970ac463c739 --- /dev/null +++ b/src/MapleFE/ast2mpl/src/generic_attrs.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "generic_attrs.h" + +namespace maple { +TypeAttrs GenericAttrs::ConvertToTypeAttrs() { + TypeAttrs attr; + constexpr uint32 maxAttrNum = 128; + for (uint32 i = 0; i < maxAttrNum; ++i) { + if (attrFlag[i] == 0) { + continue; + } + auto tA = static_cast(i); + switch (tA) { +#define TYPE_ATTR +#define ATTR(STR) \ + case GENATTR_##STR: \ + attr.SetAttr(ATTR_##STR); \ + break; +#include "all_attributes.def" +#undef ATTR +#undef TYPE_ATTR + default: + ASSERT(false, "unknown TypeAttrs"); + break; + } + } + return attr; +} + +FuncAttrs GenericAttrs::ConvertToFuncAttrs() { + FuncAttrs attr; + constexpr uint32 maxAttrNum = 128; + for (uint32 i = 0; i < maxAttrNum; ++i) { + if (attrFlag[i] == 0) { + continue; + } + auto tA = static_cast(i); + switch (tA) { +#define FUNC_ATTR +#define ATTR(STR) \ + case GENATTR_##STR: \ + attr.SetAttr(FUNCATTR_##STR); \ + break; +#include "all_attributes.def" +#undef ATTR +#undef FUNC_ATTR + default: + ASSERT(false, "unknown FuncAttrs"); + break; + } + } + return attr; +} + +FieldAttrs GenericAttrs::ConvertToFieldAttrs() { + FieldAttrs attr; + constexpr uint32 maxAttrNum = 128; + for (uint32 i = 0; i < maxAttrNum; ++i) { + if (attrFlag[i] == 0) { + continue; + } + auto tA = static_cast(i); + switch (tA) { +#define FIELD_ATTR +#define ATTR(STR) \ + case GENATTR_##STR: \ + attr.SetAttr(FLDATTR_##STR); \ + break; +#include "all_attributes.def" +#undef ATTR +#undef FIELD_ATTR + default: + ASSERT(false, "unknown FieldAttrs"); + break; + } + } + return attr; +} +} \ No newline at end of file diff --git a/src/MapleFE/ast2mpl/src/main.cpp b/src/MapleFE/ast2mpl/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a0f89a9114ac926688e5a8b2f6e0d4ff6c9176d8 --- /dev/null +++ b/src/MapleFE/ast2mpl/src/main.cpp @@ -0,0 +1,96 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include "gen_astload.h" +#include "ast_handler.h" +#include "ast2mpl.h" + +static void help() { + std::cout << "ast2cpp a.ast[,b.ast] [options]:" << std::endl; + std::cout << " --out=x.cpp : cpp output file" << std::endl; + std::cout << " --help : print this help" << std::endl; + std::cout << " --trace=n : Emit trace with 4-bit combo levels 1...15" << std::endl; + std::cout << " 1 : Emit ast tree visits" << std::endl; + std::cout << " 2 : Emit graph" << std::endl; + std::cout << " --emit-ts-only : Emit ts code only" << std::endl; + std::cout << " --emit-ts : Emit ts code" << std::endl; + std::cout << " --format-cpp : Format cpp" << std::endl; + std::cout << " --no-imported : Do not process the imported modules" << std::endl; + std::cout << "default out name uses the first input name: a.cpp" << std::endl; +} + +int main (int argc, char *argv[]) { + if (argc == 1 || (!strncmp(argv[1], "--help", 6) && (strlen(argv[1]) == 6))) { + help(); + exit(-1); + } + + unsigned flags; + // one or more input .ast files separated by ',' + const char *inputname = argv[1]; + + // Parse the argument + for (unsigned i = 2; i < argc; i++) { + if (!strncmp(argv[i], "--trace=", 8)) { + int val = atoi(argv[i] + 8); + if (val < 1 || val > 15) { + help(); + exit(-1); + } + flags |= val; + } else { + std::cerr << "unknown option " << argv[i] << std::endl; + exit(-1); + } + } + + // input ast files + std::vector inputfiles; + if (inputname) { + std::stringstream ss; + ss.str(inputname); + std::string item; + while (std::getline(ss, item, ',')) { + // std::cout << "item " << item << " xxx"<< std::endl; + inputfiles.push_back(item); + } + } + + unsigned trace = (flags & maplefe::FLG_trace); + maplefe::AST_Handler handler(trace); + for (auto astfile: inputfiles) { + std::ifstream input(astfile, std::ifstream::binary); + input >> std::noskipws; + std::istream_iterator s(input), e; + maplefe::AstBuffer vec(s, e); + maplefe::AstLoad loadAst; + maplefe::ModuleNode *mod = loadAst.LoadFromAstBuf(vec); + // add mod to the vector + while(mod) { + handler.AddModule(mod); + mod = loadAst.Next(); + } + } + + maplefe::A2M *a2m = new maplefe::A2M(&handler, flags); + int res = a2m->ProcessAST(); + + delete a2m; + + return 0; +} diff --git a/src/MapleFE/shared/src/maplefe_mir_builder.cpp b/src/MapleFE/ast2mpl/src/maplefe_mir_builder.cpp similarity index 94% rename from src/MapleFE/shared/src/maplefe_mir_builder.cpp rename to src/MapleFE/ast2mpl/src/maplefe_mir_builder.cpp index adae368a639ea461a274e7ba7d5df2e55b5fcbc3..680635f744443258f5713f5ebaab85fe54fc1278 100644 --- a/src/MapleFE/shared/src/maplefe_mir_builder.cpp +++ b/src/MapleFE/ast2mpl/src/maplefe_mir_builder.cpp @@ -59,7 +59,8 @@ bool FEMIRBuilder::TraverseToNamedField(maple::MIRStructType *structType, unsign } maple::BaseNode *FEMIRBuilder::CreateExprDread(const maple::MIRSymbol *symbol, maple::FieldID fieldID) { - maple::BaseNode *nd = new maple::AddrofNode(maple::OP_dread, maple::kPtyInvalid, symbol->GetStIdx(), fieldID); + maple::PrimType prim = symbol->GetType()->GetPrimType(); + maple::BaseNode *nd = new maple::AddrofNode(maple::OP_dread, prim, symbol->GetStIdx(), fieldID); return nd; } diff --git a/src/MapleFE/shared/src/mpl_processor.cpp b/src/MapleFE/ast2mpl/src/mpl_processor.cpp similarity index 50% rename from src/MapleFE/shared/src/mpl_processor.cpp rename to src/MapleFE/ast2mpl/src/mpl_processor.cpp index 62e2e29d0f242febba626ffefd240c4fceb98544..378c18860ff09647cbe0d32e5dd21e526a94b0f3 100644 --- a/src/MapleFE/shared/src/mpl_processor.cpp +++ b/src/MapleFE/ast2mpl/src/mpl_processor.cpp @@ -13,11 +13,12 @@ * See the Mulan PSL v2 for more details. */ -#include "ast2mpl.h" +#include "ast2mpl_builder.h" +#include "stringpool.h" namespace maplefe { -maple::BaseNode *A2M::ProcessNodeDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessNodeDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { if (!tnode) { return nullptr; } @@ -25,19 +26,19 @@ maple::BaseNode *A2M::ProcessNodeDecl(StmtExprKind skind, TreeNode *tnode, Block maple::BaseNode *mpl_node = nullptr; switch (tnode->GetKind()) { case NK_Class: { - mpl_node =ProcessClassDecl(SK_Stmt, tnode, nullptr); + mpl_node =ProcessClassDecl(SK_Stmt, tnode, block); break; } case NK_Interface: { - mpl_node = ProcessInterfaceDecl(SK_Stmt, tnode, nullptr); + mpl_node = ProcessInterfaceDecl(SK_Stmt, tnode, block); break; } case NK_Function: { - mpl_node = ProcessFuncDecl(SK_Stmt, tnode, nullptr); + mpl_node = ProcessFuncDecl(SK_Stmt, tnode, block); break; } case NK_Block: { - mpl_node = ProcessBlockDecl(SK_Stmt, tnode, nullptr); + mpl_node = ProcessBlockDecl(SK_Stmt, tnode, block); break; } default: { @@ -47,7 +48,7 @@ maple::BaseNode *A2M::ProcessNodeDecl(StmtExprKind skind, TreeNode *tnode, Block return mpl_node; } -maple::BaseNode *A2M::ProcessNode(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessNode(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { if (!tnode) { return nullptr; } @@ -64,21 +65,43 @@ maple::BaseNode *A2M::ProcessNode(StmtExprKind skind, TreeNode *tnode, BlockNode return mpl_node; } -maple::BaseNode *A2M::ProcessPackage(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessModule(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessModule()"); + ModuleNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessPackage(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessPackage()"); PackageNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessImport(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessDeclare(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessImport(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessImport()"); ImportNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessIdentifier(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessExport(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessExport()"); + ExportNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessXXportAsPair(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessXXportAsPair()"); + XXportAsPairNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessIdentifier(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { IdentifierNode *node = static_cast(tnode); - const char *name = node->GetName(); + std::string name = node->GetName(); if (skind == SK_Stmt) { AST2MPLMSG("ProcessIdentifier() is a decl", name); @@ -94,7 +117,7 @@ maple::BaseNode *A2M::ProcessIdentifier(StmtExprKind skind, TreeNode *tnode, Blo } maple::GStrIdx stridx = maple::GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); - maple::MIRFunction *func = GetFunc(block); + maple::MIRFunction *func = GetCurrFunc(block); // check parameters if (func->IsAFormalName(stridx)) { @@ -118,7 +141,7 @@ maple::BaseNode *A2M::ProcessIdentifier(StmtExprKind skind, TreeNode *tnode, Blo maple::BaseNode *bn = mMirBuilder->CreateExprDread(sym); maple::MIRType *ftype = maple::GlobalTables::GetTypeTable().GetTypeFromTyIdx(mFieldData->GetTyIdx()); AST2MPLMSG("ProcessIdentifier() found match field", name); - return new maple::IreadNode(maple::OP_iread, ftype->GetPrimType(), mFieldData->GetTyIdx(), maple::FieldID(fid), bn); + return new maple::IreadNode(maple::OP_iread, ftype->GetPrimType(), sym->GetTyIdx(), maple::FieldID(fid), bn); } // check global var @@ -129,11 +152,12 @@ maple::BaseNode *A2M::ProcessIdentifier(StmtExprKind skind, TreeNode *tnode, Blo } AST2MPLMSG("ProcessIdentifier() unknown identifier", name); + // create a dummy var with name and mDefaultType symbol = mMirBuilder->GetOrCreateLocalDecl(name, *mDefaultType); return mMirBuilder->CreateExprDread(symbol); } -maple::BaseNode *A2M::ProcessField(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessField(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { FieldNode *node = static_cast(tnode); maple::BaseNode *bn = nullptr; @@ -148,40 +172,81 @@ maple::BaseNode *A2M::ProcessField(StmtExprKind skind, TreeNode *tnode, BlockNod maple::MIRType *ctype = nullptr; maple::TyIdx cptyidx(0); - maple::BaseNode *dr = nullptr; + maple::BaseNode *nd = nullptr; + nd = ProcessNode(SK_Expr, upper, block); if (upper->IsLiteral()) { LiteralNode *lt = static_cast(upper); if (lt->GetData().mType == LT_ThisLiteral) { - maple::MIRFunction *func = GetFunc(block); + maple::MIRFunction *func = GetCurrFunc(block); maple::MIRSymbol *sym = func->GetFormal(0); // this cptyidx = sym->GetTyIdx(); ctype = GetClass(block); - dr = new maple::DreadNode(maple::OP_dread, maple::PTY_ptr, sym->GetStIdx(), 0); + nd = new maple::DreadNode(maple::OP_dread, maple::PTY_ptr, sym->GetStIdx(), 0); } else { NOTYETIMPL("ProcessField() not this literal"); } + } else if (nd) { + maple::TyIdx tyidx(0); + switch (nd->GetOpCode()) { + case maple::OP_dread: { + maple::AddrofNode *dr = static_cast(nd); + maple::StIdx stidx = dr->GetStIdx(); + maple::MIRSymbol *sym = nullptr; + if (stidx.Islocal()) { + sym = mMirModule->CurFunction()->GetSymTab()->GetSymbolFromStIdx(stidx.Idx()); + } else { + sym = maple::GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + } + tyidx = sym->GetTyIdx(); + break; + } + case maple::OP_iread: { + maple::IreadNode *ir = static_cast(nd); + tyidx = ir->GetTyIdx(); + break; + } + default: + NOTYETIMPL("ProcessField() supper"); + break; + } + maple::MIRType *type = maple::GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyidx); + cptyidx = type->GetTypeIndex(); + while (type->GetKind() == maple::kTypePointer) { + maple::MIRPtrType *ptype = static_cast(type); + type = ptype->GetPointedType(); + } + ctype = type; } else { - NOTYETIMPL("ProcessField() upper not literal"); + NOTYETIMPL("ProcessField() nullptr nd"); } - if (!ctype) { - NOTYETIMPL("ProcessField() null class type"); + if (!(ctype && ctype->IsInstanceOfMIRStructType())) { + NOTYETIMPL("ProcessField() null or non structure/class/interface type"); return bn; } - const char *fname = field->GetName(); + std::string fname = field->GetName(); maple::GStrIdx stridx = maple::GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fname); mFieldData->ResetStrIdx(stridx); maple::uint32 fid = 0; bool status = mMirBuilder->TraverseToNamedField((maple::MIRStructType*)ctype, fid, mFieldData); + maple::MIRType *ftype = maple::GlobalTables::GetTypeTable().GetTypeFromTyIdx(mFieldData->GetTyIdx()); if (status) { - maple::MIRType *ftype = maple::GlobalTables::GetTypeTable().GetTypeFromTyIdx(mFieldData->GetTyIdx()); - bn = new maple::IreadNode(maple::OP_iread, ftype->GetPrimType(), cptyidx, fid, dr); + bn = new maple::IreadNode(maple::OP_iread, ftype->GetPrimType(), cptyidx, fid, nd); + } else { + NOTYETIMPL("ProcessField() can not find field"); + // insert a dummy field with fname and mDefaultType + const maple::FieldAttrs attr; + maple::TyIdxFieldAttrPair P0(mDefaultType->GetTypeIndex(), attr); + maple::FieldPair P1(stridx, P0); + maple::MIRStructType *stype = static_cast(ctype); + stype->GetFields().push_back(P1); + bn = new maple::IreadNode(maple::OP_iread, maple::PTY_begin, cptyidx, fid+1, nd); } return bn; } -maple::BaseNode *A2M::ProcessFieldDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessFieldDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { FieldNode *node = static_cast(tnode); maple::BaseNode *bn = nullptr; @@ -192,88 +257,133 @@ maple::BaseNode *A2M::ProcessFieldDecl(StmtExprKind skind, TreeNode *tnode, Bloc TreeNode *parent = tnode->GetParent(); MASSERT((parent->IsClass() || parent->IsInterface()) && "Not class or interface"); - maple::MIRType *ptype = mNodeTypeMap[parent->GetName()]; + maple::MIRType *ptype = mNodeTypeMap[parent->GetStrIdx()]; + if (ptype->IsMIRPtrType()) { + maple::MIRPtrType * ptrtype = static_cast(ptype); + ptype = ptrtype->GetPointedType(); + } maple::MIRStructType *stype = static_cast(ptype); MASSERT(stype && "struct type not valid"); IdentifierNode *inode = static_cast(tnode); - const char *name = inode->GetName(); + std::string name = inode->GetName(); TreeNode *type = inode->GetType(); // PrimTypeNode or UserTypeNode TreeNode *init = inode->GetInit(); // Init value maple::GenericAttrs genAttrs; MapAttr(genAttrs, inode); + maple::FieldAttrs fAttrs = genAttrs.ConvertToFieldAttrs(); + bool isStatic = fAttrs.GetAttr(maple::FLDATTR_static); - maple::GStrIdx stridx = maple::GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); maple::MIRType *mir_type = MapType(type); - // always use pointer type for classes, with PTY_ref - if (mir_type->GetKind() == maple::kTypeClass || mir_type->GetKind() == maple::kTypeClassIncomplete || - mir_type->GetKind() == maple::kTypeInterface || mir_type->GetKind() == maple::kTypeInterfaceIncomplete) { - mir_type = maple::GlobalTables::GetTypeTable().GetOrCreatePointerType(*mir_type, maple::PTY_ref); + if (!mir_type) { + NOTYETIMPL("ProcessFieldSetup() unknown field type"); } - if (mir_type) { - maple::TyIdxFieldAttrPair P0(mir_type->GetTypeIndex(), genAttrs.ConvertToFieldAttrs()); - maple::FieldPair P1(stridx, P0); + + // use mangled name for static fields as they will be global + std::string str(name); + if (isStatic) { + str.insert(0, "|"); + str.insert(0, parent->GetName()); + } + + maple::GStrIdx stridx = maple::GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + maple::TyIdxFieldAttrPair P0(mir_type->GetTypeIndex(), fAttrs); + maple::FieldPair P1(stridx, P0); + maple::MIRSymbol *symbol = nullptr; + if (isStatic) { + stype->GetStaticFields().push_back(P1); + symbol = mMirBuilder->CreateGlobalDecl(str, *mir_type, maple::kScGlobal); + symbol->SetAttrs(genAttrs.ConvertToTypeAttrs()); + } else { stype->GetFields().push_back(P1); } + if (init) { + if (isStatic) { + if (init->IsLiteral()) { + maple::BaseNode *val = ProcessLiteral(SK_Expr, init, nullptr); + if (val->op == maple::OP_constval) { + maple::ConstvalNode *cval = static_cast(val); + symbol->SetKonst(cval->GetConstVal()); + } else { + NOTYETIMPL("ProcessFieldSetup() not constval Init"); + } + } + } else { + NOTYETIMPL("ProcessFieldSetup() non-static Init"); + } + } + return bn; } -maple::BaseNode *A2M::ProcessAssert(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + DeclNode *node = static_cast(tnode); + TreeNode *vars = node->GetVar(); + return ProcessNode(skind, vars, block); +} + +maple::BaseNode *Ast2MplBuilder::ProcessAssert(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessAssert()"); AssertNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessDimension(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessDimension(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessDimension()"); DimensionNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessAttr(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessAttr(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessAttr()"); // AttrNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessPrimType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessPrimType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessPrimType()"); PrimTypeNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessPrimArrayType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessPrimArrayType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessPrimArrayType()"); PrimArrayTypeNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessUserType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessUserType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessUserType()"); UserTypeNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessCast(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessCast(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessCast()"); CastNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessParenthesis(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessParenthesis(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { ParenthesisNode *node = static_cast(tnode); return ProcessNode(skind, node->GetExpr(), block); } -maple::BaseNode *A2M::ProcessVarList(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessVarList(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { VarListNode *node = static_cast(tnode); - for (int i = 0; i < node->GetNum(); i++) { - TreeNode *n = node->VarAtIndex(i); + for (int i = 0; i < node->GetVarsNum(); i++) { + TreeNode *n = node->GetVarAtIndex(i); IdentifierNode *inode = static_cast(n); AST2MPLMSG("ProcessVarList() decl", inode->GetName()); maple::MIRSymbol *symbol = CreateSymbol(inode, block); + + maple::GenericAttrs genAttrs; + MapAttr(genAttrs, inode); + maple::TypeAttrs tAttrs = genAttrs.ConvertToTypeAttrs(); + symbol->SetAttrs(tAttrs); + TreeNode *init = inode->GetInit(); // Init value if (init) { maple::BaseNode *bn = ProcessNode(SK_Expr, init, block); @@ -288,35 +398,78 @@ maple::BaseNode *A2M::ProcessVarList(StmtExprKind skind, TreeNode *tnode, BlockN return nullptr; } -maple::BaseNode *A2M::ProcessExprList(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessExprList(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessExprList()"); ExprListNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessLiteral(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessNamespace(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessLiteral(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { LiteralNode *node = static_cast(tnode); LitData data = node->GetData(); + maple::MIRType *type = nullptr; + maple::PrimType prim = maple::PTY_unknown; maple::BaseNode *bn = nullptr; switch (data.mType) { case LT_IntegerLiteral: { - maple::MIRType *typeI32 = maple::GlobalTables::GetTypeTable().GetInt32(); - maple::MIRIntConst *cst = new maple::MIRIntConst(data.mData.mInt, *typeI32); - bn = new maple::ConstvalNode(maple::PTY_i32, cst); + type = MapPrimType(TY_Int); + prim = MapPrim(TY_Int); + maple::MIRIntConst *cst = new maple::MIRIntConst(data.mData.mInt, *type); + bn = new maple::ConstvalNode(prim, cst); break; } case LT_BooleanLiteral: { - int val = (data.mData.mBool == true) ? 1 : 0; - maple::MIRType *typeU1 = maple::GlobalTables::GetTypeTable().GetUInt1(); - bn = new maple::ConstvalNode(maple::PTY_u1, new maple::MIRIntConst(val, *typeU1)); + type = MapPrimType(TY_Boolean); + prim = MapPrim(TY_Boolean); + maple::MIRIntConst *cst = new maple::MIRIntConst((data.mData.mBool == true) ? 1 : 0, *type); + bn = new maple::ConstvalNode(prim, cst); + break; + } + case LT_CharacterLiteral: { + type = MapPrimType(TY_Char); + prim = MapPrim(TY_Char); + maple::MIRIntConst *cst = new maple::MIRIntConst((int)data.mData.mChar.mData.mChar, *type); + bn = new maple::ConstvalNode(prim, cst); + break; + } + case LT_FPLiteral: { + type = MapPrimType(TY_Float); + prim = MapPrim(TY_Float); + maple::MIRFloatConst *cst = new maple::MIRFloatConst(data.mData.mFloat, *type); + bn = new maple::ConstvalNode(prim, cst); + break; + } + case LT_DoubleLiteral: { + type = MapPrimType(TY_Double); + prim = MapPrim(TY_Double); + maple::MIRDoubleConst *cst = new maple::MIRDoubleConst(data.mData.mDouble, *type); + bn = new maple::ConstvalNode(prim, cst); + break; + } + case LT_StringLiteral: { + const std::string str(gStringPool.GetStringFromStrIdx(data.mData.mStrIdx)); + maple::UStrIdx strIdx = maple::GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str); + bn = new maple::ConststrNode(strIdx); + bn->SetPrimType(maple::PTY_ptr); + break; + } + case LT_NullLiteral: { + type = MapPrimType(TY_Null); + prim = MapPrim(TY_Null); + maple::MIRIntConst *cst = new maple::MIRIntConst(0, *type); + bn = new maple::ConstvalNode(prim, cst); + break; + } + case LT_ThisLiteral: { + maple::MIRFunction *func = GetCurrFunc(block); + maple::MIRSymbol *sym = func->GetFormal(0); + bn = mMirBuilder->CreateExprDread(sym); break; } - case LT_FPLiteral: - case LT_DoubleLiteral: - case LT_CharacterLiteral: - case LT_StringLiteral: - case LT_NullLiteral: - case LT_ThisLiteral: default: { NOTYETIMPL("ProcessLiteral() need support"); break; @@ -325,7 +478,7 @@ maple::BaseNode *A2M::ProcessLiteral(StmtExprKind skind, TreeNode *tnode, BlockN return bn; } -maple::BaseNode *A2M::ProcessUnaOperator(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessUnaOperator(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { UnaOperatorNode *node = static_cast(tnode); OprId ast_op = node->GetOprId(); TreeNode *ast_rhs = node->GetOpnd(); @@ -347,11 +500,11 @@ maple::BaseNode *A2M::ProcessUnaOperator(StmtExprKind skind, TreeNode *tnode, Bl return mpl_node; } -maple::BaseNode *A2M::ProcessBinOperator(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessBinOperator(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { BinOperatorNode *bon = static_cast(tnode); - OprId ast_op = bon->mOprId; - TreeNode *ast_lhs = bon->mOpndA; - TreeNode *ast_rhs = bon->mOpndB; + OprId ast_op = bon->GetOprId(); + TreeNode *ast_lhs = bon->GetOpndA(); + TreeNode *ast_rhs = bon->GetOpndB(); maple::BaseNode *lhs = ProcessNode(SK_Expr, ast_lhs, block); maple::BaseNode *rhs = ProcessNode(SK_Expr, ast_rhs, block); maple::BaseNode *mpl_node = nullptr; @@ -394,35 +547,98 @@ maple::BaseNode *A2M::ProcessBinOperator(StmtExprKind skind, TreeNode *tnode, Bl return mpl_node; } -maple::BaseNode *A2M::ProcessTerOperator(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessTerOperator(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessTerOperator()"); TerOperatorNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessLambda(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessLambda(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessLambda()"); LambdaNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessBlockDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessInstanceOf(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessInstanceOf()"); + InstanceOfNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessTemplateLiteral(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessTemplateLiteral()"); + TemplateLiteralNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessRegExpr(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessIn(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessIn()"); + InNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessComputedName(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessIs(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessIs()"); + IsNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessTypeOf(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessTypeOf()"); + TypeOfNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessAwait(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessKeyOf(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessInfer(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessTripleSlash(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessFunctionType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessBlockDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { BlockNode *ast_block = static_cast(tnode); - maple::BlockNode *blk = mBlockNodeMap[block]; for (int i = 0; i < ast_block->GetChildrenNum(); i++) { TreeNode *child = ast_block->GetChildAtIndex(i); - maple::BaseNode *stmt = ProcessNodeDecl(skind, child, block); + maple::BaseNode *stmt = ProcessNodeDecl(skind, child, ast_block); } return nullptr; } -maple::BaseNode *A2M::ProcessBlock(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessBlock(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { BlockNode *ast_block = static_cast(tnode); - maple::BlockNode *blk = mBlockNodeMap[block]; + if (block) { + // promote statements to parent + mBlockNodeMap[ast_block] = mBlockNodeMap[block]; + } else { + mBlockNodeMap[ast_block] = new maple::BlockNode(); + } + maple::BlockNode *blk = mBlockNodeMap[ast_block]; for (int i = 0; i < ast_block->GetChildrenNum(); i++) { TreeNode *child = ast_block->GetChildAtIndex(i); - maple::BaseNode *stmt = ProcessNode(skind, child, block); - if (stmt) { + maple::BaseNode *stmt = ProcessNode(skind, child, ast_block); + if (stmt && IsStmt(child)) { blk->AddStatement((maple::StmtNode*)stmt); if (mTraceA2m) stmt->Dump(0); } @@ -430,19 +646,19 @@ maple::BaseNode *A2M::ProcessBlock(StmtExprKind skind, TreeNode *tnode, BlockNod return nullptr; } -maple::BaseNode *A2M::ProcessFunction(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessFunction(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { MASSERT(tnode->IsFunction() && "it is not an FunctionNode"); NOTYETIMPL("ProcessFunction()"); return nullptr; } -maple::BaseNode *A2M::ProcessFuncDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessFuncDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { FunctionNode *ast_func = static_cast(tnode); - const char *name = ast_func->GetName(); + std::string name = ast_func->GetName(); // SmallVector mAttrs; // SmallVector mAnnotations; //annotation or pragma // SmallVector mThrows; // exceptions it can throw - TreeNode *ast_rettype = ast_func->GetType(); // return type + TreeNode *ast_rettype = ast_func->GetRetType(); // return type // SmallVector mParams; // BlockNode *ast_body = ast_func->GetBody(); // DimensionNode *mDims; @@ -452,7 +668,11 @@ maple::BaseNode *A2M::ProcessFuncDecl(StmtExprKind skind, TreeNode *tnode, Block TreeNode *parent = tnode->GetParent(); maple::MIRStructType *stype = nullptr; if (parent->IsClass() || parent->IsInterface()) { - maple::MIRType *ptype = mNodeTypeMap[parent->GetName()]; + maple::MIRType *ptype = mNodeTypeMap[parent->GetStrIdx()]; + if (ptype->IsMIRPtrType()) { + maple::MIRPtrType * ptrtype = static_cast(ptype); + ptype = ptrtype->GetPointedType(); + } stype = static_cast(ptype); MASSERT(stype && "struct type not valid"); } @@ -461,8 +681,12 @@ maple::BaseNode *A2M::ProcessFuncDecl(StmtExprKind skind, TreeNode *tnode, Block maple::MIRFunction *func = mMirBuilder->GetOrCreateFunction(name, rettype->GetTypeIndex()); // init function fields - func->SetBody(func->GetCodeMemPool()->New()); + maple::BlockNode *funcbody = func->GetCodeMemPool()->New(); + func->SetBody(funcbody); func->AllocSymTab(); + if (ast_func->IsConstructor()) { + func->SetAttr(maple::FUNCATTR_constructor); + } mMirModule->AddFunction(func); mMirModule->SetCurFunction(func); @@ -480,7 +704,7 @@ maple::BaseNode *A2M::ProcessFuncDecl(StmtExprKind skind, TreeNode *tnode, Block if (stype) { maple::GStrIdx stridx = maple::GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("this"); maple::TypeAttrs attr = maple::TypeAttrs(); - maple::MIRType *sptype = maple::GlobalTables::GetTypeTable().GetOrCreatePointerType(*stype, maple::PTY_ref); + maple::MIRType *sptype = mMirBuilder->GetOrCreatePointerType(stype); maple::MIRSymbol *sym = mMirBuilder->GetOrCreateLocalDecl("this", *sptype); sym->SetStorageClass(maple::kScFormal); func->AddArgument(sym); @@ -512,10 +736,11 @@ maple::BaseNode *A2M::ProcessFuncDecl(StmtExprKind skind, TreeNode *tnode, Block // use className|funcName|_argTypes_retType as function name UpdateFuncName(func); mFuncMap[ast_func] = func; - mNameFuncMap[name].push_back(func); + mNameFuncMap[ast_func->GetStrIdx()].push_back(func); // create function type - maple::MIRFuncType *functype = (maple::MIRFuncType*)maple::GlobalTables::GetTypeTable().GetOrCreateFunctionType(*mMirModule, rettype->GetTypeIndex(), funcvectype, funcvecattr, /*isvarg*/ false, false); + maple::MIRFuncType *functype = (maple::MIRFuncType*)maple::GlobalTables::GetTypeTable().GetOrCreateFunctionType( + rettype->GetTypeIndex(), funcvectype, funcvecattr, /*isvarg*/ false); func->SetMIRFuncType(functype); // update function symbol's type @@ -531,11 +756,10 @@ maple::BaseNode *A2M::ProcessFuncDecl(StmtExprKind skind, TreeNode *tnode, Block stype->GetMethods().push_back(P1); } - if (ast_func->GetBody()) { - BlockNode *ast_block = static_cast(ast_func->GetBody()); - for (int i = 0; i < ast_block->GetChildrenNum(); i++) { - TreeNode *child = ast_block->GetChildAtIndex(i); - maple::BaseNode *stmt = ProcessNodeDecl(skind, child, block); + if (ast_body) { + for (int i = 0; i < ast_body->GetChildrenNum(); i++) { + TreeNode *child = ast_body->GetChildAtIndex(i); + maple::BaseNode *stmt = ProcessNodeDecl(skind, child, ast_body); } } @@ -543,7 +767,7 @@ maple::BaseNode *A2M::ProcessFuncDecl(StmtExprKind skind, TreeNode *tnode, Block return nullptr; } -maple::BaseNode *A2M::ProcessFuncSetup(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessFuncSetup(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { FunctionNode *ast_func = static_cast(tnode); maple::MIRFunction *func = mFuncMap[ast_func]; @@ -572,11 +796,27 @@ maple::BaseNode *A2M::ProcessFuncSetup(StmtExprKind skind, TreeNode *tnode, Bloc return nullptr; } -maple::BaseNode *A2M::ProcessClassDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessClassDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { ClassNode *classnode = static_cast(tnode); - const char *name = classnode->GetName(); + std::string name = classnode->GetName(); + TreeNode *parent = GetSuperClass(classnode); + // mangle the name for inner classes + if (parent) { + if (parent->IsClass()) { + std::string str = parent->GetName(); + str.append("$"); // indicate inner class name + str.append(std::to_string(mUniqNum++)); + str.append(name); + name = strdup(str.c_str()); + classnode->SetStrIdx(name); + } + } maple::MIRType *type = maple::GlobalTables::GetTypeTable().GetOrCreateClassType(name, *mMirModule); - mNodeTypeMap[name] = type; + type->SetMIRTypeKind(maple::kTypeClass); + // always use pointer type for classes, with PTY_ref + type = mMirBuilder->GetOrCreatePointerType(type); + unsigned idx = gStringPool.GetStrIdx(name); + mNodeTypeMap[idx] = type; AST2MPLMSG("\n================== class =====================", name); for (int i=0; i < classnode->GetLocalClassesNum(); i++) { @@ -591,7 +831,7 @@ maple::BaseNode *A2M::ProcessClassDecl(StmtExprKind skind, TreeNode *tnode, Bloc ProcessFieldDecl(skind, classnode->GetField(i), block); } - for (int i=0; i < classnode->GetConstructorNum(); i++) { + for (int i=0; i < classnode->GetConstructorsNum(); i++) { ProcessFuncDecl(skind, classnode->GetConstructor(i), block); } @@ -599,16 +839,13 @@ maple::BaseNode *A2M::ProcessClassDecl(StmtExprKind skind, TreeNode *tnode, Bloc ProcessFuncDecl(skind, classnode->GetMethod(i), block); } - // set kind to kTypeClass from kTypeClassIncomplete - type->SetMIRTypeKind(maple::kTypeClass); return nullptr; } -maple::BaseNode *A2M::ProcessClass(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessClass(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { ClassNode *classnode = static_cast(tnode); - const char *name = classnode->GetName(); - maple::MIRType *type = maple::GlobalTables::GetTypeTable().GetOrCreateClassType(name, *mMirModule); - mNodeTypeMap[name] = type; + std::string name = classnode->GetName(); + maple::MIRType *type = mNodeTypeMap[classnode->GetStrIdx()]; AST2MPLMSG("\n================== class =====================", name); for (int i=0; i < classnode->GetLocalClassesNum(); i++) { @@ -619,7 +856,7 @@ maple::BaseNode *A2M::ProcessClass(StmtExprKind skind, TreeNode *tnode, BlockNod ProcessInterface(skind, classnode->GetLocalInterface(i), block); } - for (int i=0; i < classnode->GetConstructorNum(); i++) { + for (int i=0; i < classnode->GetConstructorsNum(); i++) { ProcessFuncSetup(skind, classnode->GetConstructor(i), block); } @@ -627,49 +864,149 @@ maple::BaseNode *A2M::ProcessClass(StmtExprKind skind, TreeNode *tnode, BlockNod ProcessFuncSetup(skind, classnode->GetMethod(i), block); } - // set kind to kTypeClass from kTypeClassIncomplete - type->SetMIRTypeKind(maple::kTypeClass); return nullptr; } -maple::BaseNode *A2M::ProcessInterfaceDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessInterfaceDecl(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessInterfaceDecl()"); InterfaceNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessInterface(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessInterface(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessInterface()"); InterfaceNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessAnnotationType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessArrayElement(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessArrayElement()"); + ArrayElementNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessArrayLiteral(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessArrayLiteral()"); + ArrayLiteralNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessNumIndexSig(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessStrIndexSig(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessBindingElement(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessBindingPattern(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessTypeAlias(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessAsType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessConditionalType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessTypeParameter(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessNameTypePair(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessTupleType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessStruct(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessStruct()"); + StructNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessStructLiteral(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessStructLiteral()"); + StructLiteralNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessFieldLiteral(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessFieldLiteral()"); + FieldLiteralNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessAnnotationType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessAnnotationType()"); AnnotationTypeNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessAnnotation(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessAnnotation(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessAnnotation()"); AnnotationNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessException(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessTry(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessTry()"); + TryNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessCatch(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessCatch()"); + CatchNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessFinally(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessFinally()"); + FinallyNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessThrow(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessThrow()"); + ThrowNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessException(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessException()"); ExceptionNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessReturn(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessReturn(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { ReturnNode *node = static_cast(tnode); maple::BaseNode *val = ProcessNode(SK_Expr, node->GetResult(), block); maple::NaryStmtNode *stmt = mMirBuilder->CreateStmtReturn(val); return stmt; } -maple::BaseNode *A2M::ProcessCondBranch(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessYield(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessArrayType(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessCondBranch(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { CondBranchNode *node = static_cast(tnode); maple::BaseNode *cond = ProcessNode(SK_Expr, node->GetCond(), block); if (!cond) { @@ -700,13 +1037,19 @@ maple::BaseNode *A2M::ProcessCondBranch(StmtExprKind skind, TreeNode *tnode, Blo return ifnode; } -maple::BaseNode *A2M::ProcessBreak(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessBreak(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessBreak()"); BreakNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessLoopCondBody(StmtExprKind skind, TreeNode *cond, TreeNode *body, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessContinue(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { + NOTYETIMPL("ProcessContinue()"); + ContinueNode *node = static_cast(tnode); + return nullptr; +} + +maple::BaseNode *Ast2MplBuilder::ProcessLoopCondBody(StmtExprKind skind, TreeNode *cond, TreeNode *body, BlockNode *block) { maple::BaseNode *mircond = ProcessNode(SK_Expr, cond, block); if (!mircond) { NOTYETIMPL("ProcessLoopCondBody() condition"); @@ -727,14 +1070,14 @@ maple::BaseNode *A2M::ProcessLoopCondBody(StmtExprKind skind, TreeNode *cond, Tr return nullptr; } -maple::BaseNode *A2M::ProcessForLoop(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessForLoop(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { ForLoopNode *node = static_cast(tnode); maple::BlockNode *mblock = mBlockNodeMap[block]; maple::BaseNode *bn = nullptr; // init - for (int i = 0; i < node->GetInitNum(); i++) { - bn = ProcessNode(SK_Stmt, node->InitAtIndex(i), block); + for (int i = 0; i < node->GetInitsNum(); i++) { + bn = ProcessNode(SK_Stmt, node->GetInitAtIndex(i), block); if (bn) { mblock->AddStatement((maple::StmtNode*)bn); if (mTraceA2m) bn->Dump(0); @@ -748,8 +1091,8 @@ maple::BaseNode *A2M::ProcessForLoop(StmtExprKind skind, TreeNode *tnode, BlockN maple::BlockNode *mbody = mBlockNodeMap[static_cast(astbody)]; // update stmts are added into loop mbody - for (int i = 0; i < node->GetUpdateNum(); i++) { - bn = ProcessNode(SK_Stmt, node->UpdateAtIndex(i), (maplefe::BlockNode*)astbody); + for (int i = 0; i < node->GetUpdatesNum(); i++) { + bn = ProcessNode(SK_Stmt, node->GetUpdateAtIndex(i), (maplefe::BlockNode*)astbody); if (bn) { mbody->AddStatement((maple::StmtNode*)bn); if (mTraceA2m) bn->Dump(0); @@ -759,33 +1102,81 @@ maple::BaseNode *A2M::ProcessForLoop(StmtExprKind skind, TreeNode *tnode, BlockN return nullptr; } -maple::BaseNode *A2M::ProcessWhileLoop(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessWhileLoop(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { WhileLoopNode *node = static_cast(tnode); return ProcessLoopCondBody(skind, node->GetCond(), node->GetBody(), block); } -maple::BaseNode *A2M::ProcessDoLoop(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessDoLoop(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { DoLoopNode *node = static_cast(tnode); return ProcessLoopCondBody(skind, node->GetCond(), node->GetBody(), block); } -maple::BaseNode *A2M::ProcessNew(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { - NOTYETIMPL("ProcessNew()"); +maple::BaseNode *Ast2MplBuilder::ProcessNew(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NewNode *node = static_cast(tnode); - return nullptr; + maple::BaseNode *bn = nullptr; + + // search for constructor to call + TreeNode *id = node->GetId(); + if (!id->IsClass()) { + NOTYETIMPL("ProcessNew() mId not class"); + return bn; + } + ClassNode *classnode = static_cast(id); + + FunctionNode *func = nullptr; + for (int i=0; i < classnode->GetConstructorsNum(); i++) { + func = classnode->GetConstructor(i); + if (func->GetParamsNum() == node->GetArgsNum()) { + break; + } + } + if (!func) { + NOTYETIMPL("ProcessNew() null ast constructor"); + return bn; + } + + maple::BaseNode *obj = GetNewNodeLhs(node, block); + if (!obj) { + NOTYETIMPL("ProcessNew() null lhs"); + return bn; + } + + maple::MapleVector args(mMirModule->CurFuncCodeMemPoolAllocator()->Adapter()); + args.push_back(obj); + // pass arg + for (int i = 0; i < node->GetArgsNum(); i++) { + maple::BaseNode *arg = ProcessNode(SK_Expr, node->GetArg(i), block); + if (arg) { + args.push_back(arg); + } else { + NOTYETIMPL("ProcessCall() null arg"); + } + } + + maple::MIRFunction *callfunc = SearchFunc(func, args, block); + if (!callfunc) { + NOTYETIMPL("ProcessNew() null maple constructor"); + return bn; + } + maple::PUIdx puIdx = callfunc->GetPuidx(); + maple::Opcode callop = maple::OP_virtualcall; + maple::StmtNode *stmt = mMirBuilder->CreateStmtCallAssigned(puIdx, args, nullptr, callop); + maple::BlockNode *blk = mBlockNodeMap[block]; + blk->AddStatement(stmt); + return bn; } -maple::BaseNode *A2M::ProcessDelete(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessDelete(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessDelete()"); DeleteNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessCall(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { - NOTYETIMPL("ProcessCall()"); +maple::BaseNode *Ast2MplBuilder::ProcessCall(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { CallNode *node = static_cast(tnode); maple::MapleVector args(mMirModule->CurFuncCodeMemPoolAllocator()->Adapter()); - maple::MIRFunction *func = GetFunc(block); + maple::MIRFunction *func = GetCurrFunc(block); // pass this maple::MIRSymbol *sym = func->GetFormal(0); @@ -801,24 +1192,20 @@ maple::BaseNode *A2M::ProcessCall(StmtExprKind skind, TreeNode *tnode, BlockNode } TreeNode *method = node->GetMethod(); - if (!method->IsIdentifier()) { - NOTYETIMPL("ProcessCall() method not an identifier"); - } - IdentifierNode *imethod = static_cast(method); - func = SearchFunc(imethod->GetName(), args); - if (!func) { + maple::MIRFunction *callfunc = SearchFunc(method, args, block); + if (!callfunc) { NOTYETIMPL("ProcessCall() method not found"); return nullptr; } - maple::PUIdx puIdx = func->GetPuidx(); + maple::PUIdx puIdx = callfunc->GetPuidx(); - maple::MIRType *returnType = func->GetReturnType(); + maple::MIRType *returnType = callfunc->GetReturnType(); maple::MIRSymbol *rv = nullptr; - maple::Opcode callop = maple::OP_call; + maple::Opcode callop = func->IsJava() ? maple::OP_virtualcall : maple::OP_call; if (returnType->GetPrimType() != maple::PTY_void) { - NOTYETIMPL("ProcessCall() OP_callassigned"); - // rv = CreateTempVar("retvar", returnType); - // callop = maple::OP_callassigned; + AST2MPLMSG0("ProcessCall() OP_[virtual]callassigned"); + rv = CreateTempVar("retvar", returnType); + callop = func->IsJava() ? maple::OP_virtualcallassigned : maple::OP_callassigned; } maple::StmtNode *stmt = mMirBuilder->CreateStmtCallAssigned(puIdx, args, rv, callop); @@ -832,25 +1219,25 @@ maple::BaseNode *A2M::ProcessCall(StmtExprKind skind, TreeNode *tnode, BlockNode } } -maple::BaseNode *A2M::ProcessSwitchLabel(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessSwitchLabel(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessSwitchLabel()"); SwitchLabelNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessSwitchCase(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessSwitchCase(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessSwitchCase()"); SwitchCaseNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessSwitch(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessSwitch(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { NOTYETIMPL("ProcessSwitch()"); SwitchNode *node = static_cast(tnode); return nullptr; } -maple::BaseNode *A2M::ProcessPass(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { +maple::BaseNode *Ast2MplBuilder::ProcessPass(StmtExprKind skind, TreeNode *tnode, BlockNode *block) { PassNode *node = static_cast(tnode); maple::BlockNode *blk = mBlockNodeMap[block]; maple::BaseNode *stmt = nullptr; @@ -860,12 +1247,15 @@ maple::BaseNode *A2M::ProcessPass(StmtExprKind skind, TreeNode *tnode, BlockNode if (stmt && IsStmt(child)) { blk->AddStatement((maple::StmtNode*)stmt); if (mTraceA2m) stmt->Dump(0); + } else { + NOTYETIMPL("ProcessPass() having unhandled stmt"); + break; } } return nullptr; } -maple::BaseNode *A2M::ProcessUnaOperatorMpl(StmtExprKind skind, +maple::BaseNode *Ast2MplBuilder::ProcessUnaOperatorMpl(StmtExprKind skind, maple::Opcode op, maple::BaseNode *bn, BlockNode *block) { @@ -910,7 +1300,7 @@ maple::BaseNode *A2M::ProcessUnaOperatorMpl(StmtExprKind skind, return node; } -maple::BaseNode *A2M::ProcessBinOperatorMplAssign(StmtExprKind skind, +maple::BaseNode *Ast2MplBuilder::ProcessBinOperatorMplAssign(StmtExprKind skind, maple::BaseNode *lhs, maple::BaseNode *rhs, BlockNode *block) { @@ -939,7 +1329,7 @@ maple::BaseNode *A2M::ProcessBinOperatorMplAssign(StmtExprKind skind, return node; } -maple::BaseNode *A2M::ProcessBinOperatorMplComboAssign(StmtExprKind skind, +maple::BaseNode *Ast2MplBuilder::ProcessBinOperatorMplComboAssign(StmtExprKind skind, maple::Opcode op, maple::BaseNode *lhs, maple::BaseNode *rhs, @@ -949,7 +1339,7 @@ maple::BaseNode *A2M::ProcessBinOperatorMplComboAssign(StmtExprKind skind, return assign; } -maple::BaseNode *A2M::ProcessBinOperatorMplArror(StmtExprKind skind, +maple::BaseNode *Ast2MplBuilder::ProcessBinOperatorMplArror(StmtExprKind skind, maple::BaseNode *lhs, maple::BaseNode *rhs, BlockNode *block) { @@ -957,5 +1347,51 @@ maple::BaseNode *A2M::ProcessBinOperatorMplArror(StmtExprKind skind, return nullptr; } +// Lhs = new ... +maple::BaseNode *Ast2MplBuilder::GetNewNodeLhs(NewNode *node, BlockNode *block) { + maple::BaseNode *obj = nullptr; + TreeNode *id = node->GetId(); + + TreeNode *parent = node->GetParent(); + if (!parent) { + NOTYETIMPL("GetNewNodeLhs() null parent"); + return obj; + } + maple::BaseNode *bn = nullptr; + switch (parent->GetKind()) { + // Lhs = new ... + case NK_BinOperator: { + BinOperatorNode *biop = static_cast(parent); + switch (biop->GetOprId()) { + case OPR_Assign: { + obj = ProcessNode(SK_Expr, biop->GetOpndA(), block); + break; + } + default: { + NOTYETIMPL("GetNewNodeLhs() not BinOperator"); + break; + } + } + break; + } + // decl init: Type Lhs = new ... + case NK_Identifier: { + obj = ProcessNode(SK_Expr, parent, block); + break; + } + default: + NOTYETIMPL("GetNewNodeLhs() other kind"); + break; + } + + if (!obj) { + AST2MPLMSG0("ProcessNew() null lhs"); + maple::MIRType *type = MapType(id); + maple::MIRSymbol *var = CreateTempVar("dummy_new", type); + obj = mMirBuilder->CreateExprDread(var); + } + return obj; +} + } diff --git a/src/MapleFE/astopt/Makefile b/src/MapleFE/astopt/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0a16203b38c8275b85df68e1d5e3fb7ad56fac63 --- /dev/null +++ b/src/MapleFE/astopt/Makefile @@ -0,0 +1,27 @@ +# Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +include ../Makefile.in + +all: + $(MAKE) -C src + +clean: + rm -rf $(BUILDDIR)/astopt + +test: + $(MAKE) -C ../test p + +.PHONY: $(TARGS) + diff --git a/src/MapleFE/astopt/include/ast_adj.h b/src/MapleFE/astopt/include/ast_adj.h new file mode 100644 index 0000000000000000000000000000000000000000..36675d91eb0d6004051f5d04c2518db280fe7337 --- /dev/null +++ b/src/MapleFE/astopt/include/ast_adj.h @@ -0,0 +1,85 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_ADJ_HEADER__ +#define __AST_ADJ_HEADER__ + +#include +#include +#include +#include +#include "ast_module.h" +#include "ast.h" +#include "ast_type.h" +#include "gen_astvisitor.h" +#include "ast_info.h" + +namespace maplefe { + +class AST_ADJ { + private: + Module_Handler *mHandler; + unsigned mFlags; + + public: + explicit AST_ADJ(Module_Handler *h, unsigned f) : mHandler(h), mFlags(f) {} + ~AST_ADJ() {} + + void AdjustAST(); +}; + +class AdjustASTVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + AST_INFO *mInfo; + AST_Util *mUtil; + unsigned mFlags; + bool mUpdated; + + public: + explicit AdjustASTVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h), mFlags(f), mUpdated(false) { + mInfo = h->GetINFO(); + mInfo->SetNameAnonyStruct(true); + mUtil = h->GetUtil(); + } + ~AdjustASTVisitor() = default; + + std::unordered_map mRenameMap; + void CheckAndRenameCppKeywords(TreeNode *node); + void AssignPseudoName(TreeNode *node); + + DeclNode *VisitDeclNode(DeclNode *node); + ImportNode *VisitImportNode(ImportNode *node); + ExportNode *VisitExportNode(ExportNode *node); + CondBranchNode *VisitCondBranchNode(CondBranchNode *node); + ForLoopNode *VisitForLoopNode(ForLoopNode *node); + LambdaNode *VisitLambdaNode(LambdaNode *node); + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); + NamespaceNode *VisitNamespaceNode(NamespaceNode *node); + StructNode *VisitStructNode(StructNode *node); + StructLiteralNode *VisitStructLiteralNode(StructLiteralNode *node); + ClassNode *VisitClassNode(ClassNode *node); + InterfaceNode *VisitInterfaceNode(InterfaceNode *node); + FunctionNode *VisitFunctionNode(FunctionNode *node); + UserTypeNode *VisitUserTypeNode(UserTypeNode *node); + TypeAliasNode *VisitTypeAliasNode(TypeAliasNode *node); + LiteralNode *VisitLiteralNode(LiteralNode *node); + UnaOperatorNode *VisitUnaOperatorNode(UnaOperatorNode *node); + PrimArrayTypeNode *VisitPrimArrayTypeNode(PrimArrayTypeNode *node); +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/ast_cfa.h b/src/MapleFE/astopt/include/ast_cfa.h new file mode 100644 index 0000000000000000000000000000000000000000..b4b65773184350b795ac3f891a43421fa315f854 --- /dev/null +++ b/src/MapleFE/astopt/include/ast_cfa.h @@ -0,0 +1,45 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_CFA_HEADER__ +#define __AST_CFA_HEADER__ + +#include +#include +#include "ast_module.h" +#include "ast.h" +#include "ast_type.h" +#include "gen_astvisitor.h" + +namespace maplefe { + +class AST_CFA { + private: + Module_Handler *mHandler; + unsigned mFlags; + std::unordered_set mReachableBbIdx;; + + public: + explicit AST_CFA(Module_Handler *h, unsigned f) : mHandler(h), mFlags(f) {} + ~AST_CFA() {} + + void ControlFlowAnalysis(); + void CollectReachableBB(); + void RemoveUnreachableBB(); + void Dump(); +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/ast_cfg.h b/src/MapleFE/astopt/include/ast_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..40fd2e9c4af8fafe3e10683b1d725625aff95cc4 --- /dev/null +++ b/src/MapleFE/astopt/include/ast_cfg.h @@ -0,0 +1,272 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_CFG_HEADER__ +#define __AST_CFG_HEADER__ + +#include +#include "ast_module.h" +#include "ast.h" +#include "ast_type.h" +#include "gen_astvisitor.h" + +namespace maplefe { + +enum BBKind { + BK_Unknown, // Uninitialized + BK_Uncond, // BB for unconditional branch + BK_Block, // BB for a block/compound statement + BK_Branch, // BB ends up with a predicate for true/false branches + BK_LoopHeader, // BB for a loop header of a for, for/in, for/of, while, or do/while statement + BK_Switch, // BB for a switch statement + BK_Case, // BB for a case in switch statement + BK_Try, // BB for a try block + BK_Catch, // BB for a catch block + BK_Finally, // BB for a finally block + BK_Yield, // Yield BB eneded with a yield statement + BK_Terminated, // Return BB endded with a return/break/continue statement + BK_Join, // BB at join point for loops and switch + BK_Join2, // BB at join point for if-stmt and block +}; + +enum BBAttribute : unsigned { + AK_None = 0, + AK_Entry = 1 << 0, + AK_Exit = 1 << 1, + AK_Break = 1 << 2, + AK_Return = 1 << 3, + AK_Throw = 1 << 4, + AK_Cont = 1 << 5, + AK_InLoop = 1 << 6, + AK_HasCall = 1 << 7, + AK_ALL = 0xffffffff +}; + +inline BBAttribute operator|(BBAttribute x, BBAttribute y) { + return static_cast(static_cast(x) | static_cast(y)); +} +inline BBAttribute operator&(BBAttribute x, BBAttribute y) { + return static_cast(static_cast(x) & static_cast(y)); +} + +using BBIndex = unsigned; +class Module_Handler; +class AST_AST; + +class CfgBB { + private: + BBKind mKind; + BBAttribute mAttr; + BBIndex mId; // unique BB id + TreeNode *mPredicate; // a predicate for true/false branches + TreeNode *mAuxNode; // the auxiliary node of current BB + SmallList mStatements; // all statement nodes + SmallList mSuccessors; // for BK_Branch: [0] true branch, [1] false branch + SmallList mPredecessors; + + friend class AST_AST; + friend class AST_CFA; + + public: + explicit CfgBB(BBKind k) + : mKind(k), mAttr(AK_None), mId(GetNextId()), mPredicate(nullptr), mAuxNode(nullptr) {} + ~CfgBB() {mStatements.Release(); mSuccessors.Release(); mPredecessors.Release();} + + void SetKind(BBKind k) {mKind = k;} + BBKind GetKind() {return mKind;} + + void SetAttr(BBAttribute a) {mAttr = mAttr | a;} + BBIndex GetAttr() {return mAttr;} + bool TestAttr(BBAttribute a) {return mAttr & a;} + + void SetId(BBIndex id) {mId = id;} + BBIndex GetId() {return mId;} + + void SetPredicate(TreeNode *node) {mPredicate = node;} + TreeNode *GetPredicate() {return mPredicate;} + + void SetAuxNode(TreeNode *node) {mAuxNode = node;} + TreeNode *GetAuxNode() {return mAuxNode;} + + unsigned GetStatementsNum() {return mStatements.GetNum();} + TreeNode* GetStatementAtIndex(unsigned i) {return mStatements.ValueAtIndex(i);} + + void AddStatement(TreeNode *stmt) { + if(mKind != BK_Terminated) { + mStatements.PushBack(stmt); + stmt->SetIsStmt(); + } + } + + void InsertStmtAfter(TreeNode *new_stmt, TreeNode *exist_stmt) { + mStatements.LocateValue(exist_stmt); + mStatements.InsertAfter(new_stmt); + } + + void InsertStmtBefore(TreeNode *new_stmt, TreeNode *exist_stmt) { + mStatements.LocateValue(exist_stmt); + mStatements.InsertBefore(new_stmt); + } + + void AddSuccessor(CfgBB *succ) { + if(mKind == BK_Terminated) { + return; + } + mSuccessors.PushBack(succ); + succ->mPredecessors.PushBack(this); + } + unsigned GetSuccessorsNum() {return mSuccessors.GetNum();} + CfgBB *GetSuccessorAtIndex(unsigned i) {return mSuccessors.ValueAtIndex(i);} + unsigned GetPredecessorsNum() {return mPredecessors.GetNum();} + CfgBB *GetPredecessorAtIndex(unsigned i) {return mPredecessors.ValueAtIndex(i);} + + static BBIndex GetLastId() {return GetNextId(false);} + + void Dump(); + + private: + static BBIndex GetNextId(bool inc = true) {static BBIndex id = 0; return inc ? ++id : id; } +}; + +class CfgFunc { + private: + TreeNode *mFuncNode; // ModuleNode, FunctionNode or LambdaNode + SmallList mNestedFuncs; // nested functions + CfgFunc *mParent; + CfgBB *mEntryBB; + CfgBB *mExitBB; + BBIndex mLastBBId; + + public: + explicit CfgFunc() : mFuncNode(nullptr), mParent(nullptr), mEntryBB(nullptr), mExitBB(nullptr) {} + ~CfgFunc() {mNestedFuncs.Release();} + + void SetFuncNode(TreeNode *func) {mFuncNode = func;} + TreeNode *GetFuncNode() {return mFuncNode;} + + const char *GetName() { + return mFuncNode->IsModule() ? "_init_" : + (mFuncNode->GetStrIdx() ? mFuncNode->GetName() : "_anonymous_"); + } + + void AddNestedFunc(CfgFunc *func) {mNestedFuncs.PushBack(func); func->SetParent(this);} + unsigned GetNestedFuncsNum() {return mNestedFuncs.GetNum();} + CfgFunc *GetNestedFuncAtIndex(unsigned i) {return mNestedFuncs.ValueAtIndex(i);} + + void SetParent(CfgFunc *func) {mParent = func;} + CfgFunc *GetParent() {return mParent;} + + void SetEntryBB(CfgBB *bb) {mEntryBB = bb; bb->SetAttr(AK_Entry);} + CfgBB *GetEntryBB() {return mEntryBB;} + + void SetExitBB(CfgBB *bb) {mExitBB = bb; bb->SetAttr(AK_Exit);} + CfgBB *GetExitBB() {return mExitBB;} + + void SetLastBBId(BBIndex id) {mLastBBId = id;} + BBIndex GetLastBBId() {return mLastBBId;} + + void Dump(); +}; + +class CfgBuilder : public AstVisitor { + + using TargetLabel = unsigned; + using TargetBB = std::pair; + using TargetBBStack = std::vector; + + private: + Module_Handler *mHandler; + unsigned mFlags; + + CfgFunc *mCurrentFunction; + CfgBB *mCurrentBB; + + TargetBBStack mBreakBBs; + TargetBBStack mContinueBBs; + TargetBBStack mThrowBBs; + + public: + explicit CfgBuilder(Module_Handler *h, unsigned f) + : AstVisitor(false), mHandler(h), mFlags(f) {} + ~CfgBuilder() = default; + + void Build(); + + // Create CfgFunc nodes for a module + CfgFunc *InitCfgFunc(ModuleNode *module); + + CfgFunc *NewFunction(TreeNode *); + CfgBB *NewBB(BBKind k); + + static void Push(TargetBBStack &stack, CfgBB* bb, TreeNode *label); + static CfgBB *LookUp(TargetBBStack &stack, TreeNode *label); + static void Pop(TargetBBStack &stack); + + void InitializeFunction(CfgFunc *func); + void FinalizeFunction(); + + // For function and lambda + FunctionNode *VisitFunctionNode(FunctionNode *node); + LambdaNode *VisitLambdaNode(LambdaNode *node); + + // For class and interface + ClassNode *VisitClassNode(ClassNode *node); + InterfaceNode *VisitInterfaceNode(InterfaceNode *node); + StructNode *VisitStructNode(StructNode *node); + + // For statements of control flow + ReturnNode *VisitReturnNode(ReturnNode *node); + CondBranchNode *VisitCondBranchNode(CondBranchNode *node); + ForLoopNode *VisitForLoopNode(ForLoopNode *node); + WhileLoopNode *VisitWhileLoopNode(WhileLoopNode *node); + DoLoopNode *VisitDoLoopNode(DoLoopNode *node); + ContinueNode *VisitContinueNode(ContinueNode *node); + BreakNode *VisitBreakNode(BreakNode *node); + SwitchNode *VisitSwitchNode(SwitchNode *node); + TryNode *VisitTryNode(TryNode *node); + ThrowNode *VisitThrowNode(ThrowNode *node); + BlockNode *VisitBlockNode(BlockNode *node); + NamespaceNode *VisitNamespaceNode(NamespaceNode *node); + + // For statements of a BB + PassNode *VisitPassNode(PassNode *node); + TemplateLiteralNode *VisitTemplateLiteralNode(TemplateLiteralNode *node); + ImportNode *VisitImportNode(ImportNode *node); + ExportNode *VisitExportNode(ExportNode *node); + DeclNode *VisitDeclNode(DeclNode *node); + ParenthesisNode *VisitParenthesisNode(ParenthesisNode *node); + CastNode *VisitCastNode(CastNode *node); + ArrayElementNode *VisitArrayElementNode(ArrayElementNode *node); + VarListNode *VisitVarListNode(VarListNode *node); + ExprListNode *VisitExprListNode(ExprListNode *node); + UnaOperatorNode *VisitUnaOperatorNode(UnaOperatorNode *node); + BinOperatorNode *VisitBinOperatorNode(BinOperatorNode *node); + TerOperatorNode *VisitTerOperatorNode(TerOperatorNode *node); + InstanceOfNode *VisitInstanceOfNode(InstanceOfNode *node); + TypeOfNode *VisitTypeOfNode(TypeOfNode *node); + NewNode *VisitNewNode(NewNode *node); + DeleteNode *VisitDeleteNode(DeleteNode *node); + CallNode *VisitCallNode(CallNode *node); + AssertNode *VisitAssertNode(AssertNode *node); + UserTypeNode *VisitUserTypeNode(UserTypeNode *node); + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); + LiteralNode *VisitLiteralNode(LiteralNode *node); + TypeAliasNode *VisitTypeAliasNode(TypeAliasNode *node); + FieldNode *VisitFieldNode(FieldNode *node); + + TreeNode *BaseTreeNode(TreeNode *node); +}; +} +#endif diff --git a/src/MapleFE/astopt/include/ast_common.h b/src/MapleFE/astopt/include/ast_common.h new file mode 100644 index 0000000000000000000000000000000000000000..7c25f1ca9f1e7553c799aefcba22a8f93d64c638 --- /dev/null +++ b/src/MapleFE/astopt/include/ast_common.h @@ -0,0 +1,42 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_COMMON_HEADER__ +#define __AST_COMMON_HEADER__ + +namespace maplefe { + +#define DEFAULTVALUE 0xdeadbeef +#define RENAMINGSUFFIX "__RENAMED" + +#define NOTYETIMPL(M) { if (mFlags & FLG_trace) { MNYI(M); }} +#define MSGNOLOC0(M) { if (mFlags & FLG_trace_3) { MMSGNOLOC0(M); }} +#define MSGNOLOC(M,v) { if (mFlags & FLG_trace_3) { MMSGNOLOC(M,v); }} + +enum AST_Flags { + FLG_trace_1 = 0x00000001, + FLG_trace_2 = 0x00000002, + FLG_trace_3 = 0x00000004, + FLG_trace_4 = 0x00000008, + FLG_trace = 0x0000000f, + + FLG_emit_ts = 0x00000010, + FLG_emit_ts_only = 0x00000020, + FLG_format_cpp = 0x00000040, + FLG_no_imported = 0x00000080, +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/ast_dfa.h b/src/MapleFE/astopt/include/ast_dfa.h new file mode 100644 index 0000000000000000000000000000000000000000..5e232449f729595439d87b1c02c3d65e96cece52 --- /dev/null +++ b/src/MapleFE/astopt/include/ast_dfa.h @@ -0,0 +1,155 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_DFA_HEADER__ +#define __AST_DFA_HEADER__ + +#include +#include + +#include "stringpool.h" +#include "ast_module.h" +#include "ast.h" +#include "ast_type.h" +#include "ast_handler.h" +#include "gen_astvisitor.h" + +namespace maplefe { + +// def positions: +typedef std::pair DefPosition; +// map +typedef std::unordered_map BVMap; + +class AST_DFA { + private: + Module_Handler *mHandler; + unsigned mFlags; + std::unordered_map mVar2DeclMap; // var to decl, both NodeId + + // stmt id + SmallVector mStmtIdVec; + std::unordered_map mStmtId2StmtMap; + std::unordered_map mEntryBbId2FuncMap; + + // def node id set + std::unordered_set mDefNodeIdSet; + // def and use id set: i in i++; i+=j; + std::unordered_set mDefUseNodeIdSet; + // def positions, def index + SmallVector mDefPositionVec; + // use stridx to set of node id + std::unordered_map> mUsePositionMap; + + // followint maps with key BB id + BVMap mPrsvMap; + BVMap mGenMap; + BVMap mRchInMap; // reaching definition bit vector entering bb + BVMap mRchOutMap; + + // def/use nid --> stmtid + std::unordered_map mNodeId2StmtIdMap; + // stmtid --> bbid + std::unordered_map mStmtId2BbIdMap; + + // def stridx set + std::unordered_set mDefStrIdxSet; + // def-use : key is def node id to a set of use node id + std::unordered_map> mDefUseMap; + + friend class DefUseChainVisitor; + + public: + explicit AST_DFA(Module_Handler *h, unsigned f) : mHandler(h), mFlags(f) {} + ~AST_DFA(); + + void DataFlowAnalysis(); + + void CollectInfo(); + void CollectDefNodes(); + void BuildBitVectors(); + void BuildDefUseChain(); + + bool IsDef(unsigned nid) { return mDefNodeIdSet.find(nid) != mDefNodeIdSet.end();} + bool IsDefUse(unsigned nid) { return mDefUseNodeIdSet.find(nid) != mDefUseNodeIdSet.end();} + // return def stridx, return 0 if no def + unsigned GetDefStrIdx(TreeNode *node); + // return def nodeId, return 0 if no def + unsigned AddDef(TreeNode *node, unsigned &bitnum, unsigned bbid); + + void SetNodeId2StmtId(unsigned nid, unsigned sid) { mNodeId2StmtIdMap[nid] = sid; } + unsigned GetStmtIdFromNodeId(unsigned id) { return mNodeId2StmtIdMap[id]; } + unsigned GetBbIdFromStmtId(unsigned id) { return mStmtId2BbIdMap[id]; } + TreeNode *GetStmtFromStmtId(unsigned id) { return mStmtId2StmtMap[id]; } + CfgBB *GetBbFromBbId(unsigned id) { return mHandler->GetBbFromBbId(id); } + + void DumpDefPosition(unsigned idx, DefPosition pos); + void DumpDefPositionVec(); + void DumpReachDefIn(); + + void DumpBV(BitVector *bv); + void DumpBVMap(BVMap &bvmap); + void DumpAllBVMaps(); + void DumpDefUse(); + void TestBV(); + void Clear(); +}; + +class CollectInfoVisitor : public AstVisitor { + private: + AST_DFA *mDFA; + unsigned mStmtIdx; + unsigned mBbId; + + public: + explicit CollectInfoVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mDFA(h->GetDFA()) {} + ~CollectInfoVisitor() = default; + + void SetStmtIdx(unsigned id) { mStmtIdx = id; } + void SetBbId(unsigned id) { mBbId = id; } + + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); +}; + +class DefUseChainVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + AST_DFA *mDFA; + unsigned mFlags; + unsigned mStmtIdx; + unsigned mBbId; + + public: + unsigned mDefNodeId; + unsigned mDefStrIdx; + unsigned mReachDef; + unsigned mReachNewDef; + + public: + explicit DefUseChainVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h), mDFA(h->GetDFA()), mFlags(f) {} + ~DefUseChainVisitor() = default; + + void SetStmtIdx(unsigned id) { mStmtIdx = id; } + void SetBbId(unsigned id) { mBbId = id; } + void VisitBB(unsigned bbid); + + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); + BinOperatorNode *VisitBinOperatorNode(BinOperatorNode *node); +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/ast_handler.h b/src/MapleFE/astopt/include/ast_handler.h new file mode 100644 index 0000000000000000000000000000000000000000..42153f013fb46b771480e7cba2c63a86480d7f76 --- /dev/null +++ b/src/MapleFE/astopt/include/ast_handler.h @@ -0,0 +1,267 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_HANDLER_HEADER__ +#define __AST_HANDLER_HEADER__ + +#include +#include +#include +#include +#include +#include +#include +#include "ast_module.h" +#include "ast.h" +#include "astopt.h" +#include "ast_cfg.h" +#include "ast_type.h" +#include "ast_common.h" +#include "gen_astvisitor.h" + +namespace maplefe { + +class CfgBB; +class CfgFunc; +class AST_INFO; +class AST_ADJ; +class AST_CFA; +class AST_DFA; +class AST_SCP; +class AST_Util; +class AST_XXport;; +class Module_Handler; +class TypeInfer; +class TypeTable; +class AstOpt; + +using HandlerIndex = unsigned; +const HandlerIndex HandlerNotFound = UINT_MAX; + +struct StrLess { + bool operator()(const char *p, const char *q) const { + return std::strcmp(p, q) < 0; + } +}; + +class AST_Handler { + private: + MemPool mMemPool; // Memory pool for all CfgFunc, CfgBB, etc. + AstOpt *mAstOpt; + unsigned mSize; + unsigned mFlags; + + // vector of all AST modules + SmallVector mModuleHandlers; + Module_Handler *GetModuleHandler(ModuleNode *module); + + public: + // mapping of mModuleHandlers index with its corresponding filename as its key + std::map mModuleHandlerMap; + + explicit AST_Handler(unsigned f) : mSize(0), mFlags(f) {} + ~AST_Handler() {mMemPool.Release();} + + MemPool *GetMemPool() {return &mMemPool;} + + AstOpt *GetAstOpt() {return mAstOpt;} + void SetAstOpt(AstOpt *opt) {mAstOpt = opt;} + + Module_Handler *GetModuleHandler(unsigned i) {return mModuleHandlers.ValueAtIndex(i);} + Module_Handler *GetModuleHandler(TreeNode *node); + + unsigned GetSize() {return mSize;} + + // If m does not exist in mModuleHandlerMap, + // create an object of Module_Handler for module m + // add this object to mModuleHandlers + // map its corresponding filename and the index of this object in mModuleHandlers in mModuleHandlerMap + // return true + // Otherwise, + // return false + bool AddModule(ModuleNode *m); + + // Return an index of mModuleHandlers if filename exists in mModuleHandlerMap, otherwise return HandlerNotFound + HandlerIndex GetHandlerIndex(const char *filename); +}; + +// Each source file is a module +class Module_Handler { + private: + AST_Handler *mASTHandler; + ModuleNode *mASTModule; // for an AST module + CfgFunc *mCfgFunc; // initial CfgFunc in module scope + AST_INFO *mINFO; + AST_ADJ *mADJ; + AST_SCP *mSCP; + TypeInfer *mTI; + AST_CFA *mCFA; + AST_DFA *mDFA; + AST_Util *mUtil; + const char *mOutputFilename; + unsigned mHidx; // handler index in AST_Handler + + unsigned mFlags; + bool mIsTS; + + std::unordered_map mNodeId2BbMap; + + public: + // module's ast function vector + std::vector mModuleFuncs; + // all BBs + std::unordered_map mBbId2BbMap; + // bbid vec - only reachable BBs + std::vector mBbIdVec; + // identifier node id to decl + std::unordered_map mNodeId2Decl; + // array's element type: decl node id to typeid + std::unordered_map mArrayDeclId2EleTypeIdMap; + // array's element typeidx: decl node id to typeidx + std::unordered_map mArrayDeclId2EleTypeIdxMap; + // array literal's dim: decl node id to dim + std::unordered_map mArrayDeclId2DimMap; + // nodeid to used generator + std::unordered_map mGeneratorUsedMap; + // fields' nodeid set + std::unordered_set mDirectFieldSet; + // alias type, identifier node id + std::unordered_set mAliasTypeSet; + + public: + explicit Module_Handler(unsigned f) : + mCfgFunc(nullptr), + mINFO(nullptr), + mADJ(nullptr), + mSCP(nullptr), + mTI(nullptr), + mCFA(nullptr), + mDFA(nullptr), + mUtil(nullptr), + mFlags(f) {} + ~Module_Handler(); + + void BasicAnalysis(); + void CollectInfo(); + void AdjustAST(); + void ScopeAnalysis(); + void TypeInference(); + void BuildCFG(); + void ControlFlowAnalysis(); + void DataFlowAnalysis(); + + const char *GetOutputFilename() {return mOutputFilename;} + void SetOutputFilename(const char *name) {mOutputFilename = name;} + + void SetASTHandler(AST_Handler *h) {mASTHandler = h;} + AST_Handler *GetASTHandler() {return mASTHandler;} + + void SetASTModule(ModuleNode *mod) {mASTModule = mod;} + ModuleNode *GetASTModule() {return mASTModule;} + + MemPool *GetMemPool(); + + void SetCfgFunc(CfgFunc *func) {mCfgFunc = func;} + CfgFunc *GetCfgFunc() {return mCfgFunc;} + + void SetBbFromNodeId(unsigned id, CfgBB *bb) { mNodeId2BbMap[id] = bb; } + CfgBB *GetBbFromNodeId(unsigned id) { return mNodeId2BbMap[id]; } + + void SetBbFromBbId(unsigned id, CfgBB *bb) { mBbId2BbMap[id] = bb; } + CfgBB *GetBbFromBbId(unsigned id) { return mBbId2BbMap[id]; } + + unsigned GetFlags() {return mFlags;} + unsigned GetHidx() {return mHidx;} + AST_INFO *GetINFO() {return mINFO;} + AST_ADJ *GetADJ() {return mADJ;} + AST_CFA *GetCFA() {return mCFA;} + AST_DFA *GetDFA() {return mDFA;} + AST_SCP *GetSCP() {return mSCP;} + TypeInfer *GetTI() {return mTI;} + AST_Util *GetUtil() {return mUtil;} + AstOpt *GetAstOpt(); + AST_XXport *GetASTXXport(); + + void SetHidx(unsigned idx) {mHidx = idx;} + void SetINFO(AST_INFO *p) {mINFO = p;} + void SetADJ(AST_ADJ *p) {mADJ = p;} + void SetCFA(AST_CFA *p) {mCFA = p;} + void SetDFA(AST_DFA *p) {mDFA = p;} + void SetSCP(AST_SCP *p) {mSCP = p;} + void SetTI(TypeInfer *p) {mTI = p;} + void SetUtil(AST_Util *p) {mUtil = p;} + void SetIsTS(bool b) {mIsTS = b;} + + // deep true : find Decl in imported module as well + // false : find Decl in current module only + TreeNode *FindDecl(IdentifierNode *node, bool deep = false); + + TreeNode *FindType(IdentifierNode *node); + TreeNode *FindFunc(TreeNode *node); + + void AddDirectField(TreeNode *node); + bool IsDirectField(TreeNode *node); + + bool IsFromLambda(TreeNode *node); + bool IsDef(TreeNode *node); + bool IsTS() {return mIsTS;} + + void AddNodeId2DeclMap(unsigned nid, TreeNode *node) { + mNodeId2Decl[nid] = node; + } + + void AddAliasType(unsigned nid) { mAliasTypeSet.insert(nid); } + bool isAliasType(unsigned nid) { + return mAliasTypeSet.find(nid) != mAliasTypeSet.end(); + } + bool isAliasType(TreeNode *node) { return isAliasType(node->GetNodeId()); } + + template + T *NewTreeNode() { + T *node = (T*)gTreePool.NewTreeNode(sizeof(T)); + new (node) T(); + AstOpt *opt = mASTHandler->GetAstOpt(); + opt->AddNodeId2NodeMap(node); + return node; + } + + Module_Handler *GetModuleHandler(unsigned i) {return mASTHandler->GetModuleHandler(i);} + Module_Handler *GetModuleHandler(TreeNode *node) {return mASTHandler->GetModuleHandler(node);} + + // array's element typeid + TypeId GetArrayElemTypeId(unsigned nid); + void SetArrayElemTypeId(unsigned nid, TypeId tid); + unsigned GetArrayElemTypeIdx(unsigned nid); + void SetArrayElemTypeIdx(unsigned nid, unsigned tidx); + DimensionNode *GetArrayDim(unsigned nid); + void SetArrayDim(unsigned nid, DimensionNode *dim); + + // used generator + void AddGeneratorUsed(unsigned nid, FunctionNode *func); + bool IsGeneratorUsed(unsigned nid); + FunctionNode *GetGeneratorUsed(unsigned nid); + bool UpdateGeneratorUsed(unsigned target, unsigned src); + + // API to check a node is c++ field which satisfy both: + // 1. direct field + // 2. its name is valid in c++ + bool IsCppField(TreeNode *node); + + void Dump(char *msg); + void DumpArrayElemTypeIdMap(); +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/ast_info.h b/src/MapleFE/astopt/include/ast_info.h new file mode 100644 index 0000000000000000000000000000000000000000..83a0fc37122e10231fdde2f6d7050bd0eaf908ff --- /dev/null +++ b/src/MapleFE/astopt/include/ast_info.h @@ -0,0 +1,197 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_INFO_HEADER__ +#define __AST_INFO_HEADER__ + +#include +#include +#include +#include +#include "ast_module.h" +#include "ast.h" +#include "ast_type.h" +#include "gen_astvisitor.h" + +namespace maplefe { + +class FindStrIdxVisitor; + +class AST_INFO { + private: + Module_Handler *mHandler; + unsigned mFlags; + unsigned mNum; + bool mNameAnonyStruct; + unsigned mPass; + FindStrIdxVisitor *mStrIdxVisitor; + + std::unordered_set mReachableBbIdx;; + std::unordered_map> mFieldNum2StructNodeMap; + std::unordered_map> mStructId2FieldsMap; + std::unordered_map mStrIdx2StructMap; + std::unordered_set mTypeParamStrIdxSet; + std::unordered_set mWithTypeParamNodeSet; + std::unordered_set mWithThisFuncSet;; + std::unordered_set mFromLambda; + std::unordered_map mStrIdx2TypeIdxMap; + + void AddField(unsigned nid, TreeNode *node); + + public: + explicit AST_INFO(Module_Handler *h, unsigned f) : mHandler(h), mFlags(f), mNum(1), + mNameAnonyStruct(false) {} + ~AST_INFO() {} + + void CollectInfo(); + + unsigned GetPass() { return mPass; } + TypeId GetTypeId(TreeNode *node); + unsigned GetFieldsSize(TreeNode *node, bool native = false); + TreeNode *GetField(TreeNode *node, unsigned i, bool native = false); + void AddField(TreeNode *container, TreeNode *node); + TreeNode *GetField(unsigned nid, unsigned stridx); + unsigned GetSuperSize(TreeNode *node, unsigned idx); + TreeNode *GetSuper(TreeNode *node, unsigned i, unsigned idx); + + void SetStrIdx2Struct(unsigned stridx, TreeNode *node) { mStrIdx2StructMap[stridx] = node; } + TreeNode *GetStructFromStrIdx(unsigned stridx) { return mStrIdx2StructMap[stridx]; } + + TreeNode *GetCanonicStructNode(TreeNode *node); + + IdentifierNode *CreateIdentifierNode(unsigned stridx); + UserTypeNode *CreateUserTypeNode(unsigned stridx, ASTScope *scope = NULL); + UserTypeNode *CreateUserTypeNode(IdentifierNode *node); + TypeAliasNode *CreateTypeAliasNode(TreeNode *to, TreeNode *from); + StructNode *CreateStructFromStructLiteral(StructLiteralNode *node); + + unsigned GetAnonymousName(); + TreeNode *GetAnonymousStruct(TreeNode *node); + + bool IsInterface(TreeNode *node); + bool IsTypeIdCompatibleTo(TypeId field, TypeId target); + bool IsTypeCompatible(TreeNode *node1, TreeNode *node2); + bool IsFieldCompatibleTo(TreeNode *from, TreeNode *to); + + void SetNameAnonyStruct(bool b) { mNameAnonyStruct = b; } + bool GetNameAnonyStruct() { return mNameAnonyStruct; } + + template void SortFields(T1 *node); + template void ExtendFields(T1 *node, TreeNode *sup); + + bool WithStrIdx(TreeNode *node, unsigned stridx); + bool WithTypeParam(TreeNode *node); + bool WithTypeParamFast(TreeNode *node); + void InsertTypeParamStrIdx(unsigned stridx) { mTypeParamStrIdxSet.insert(stridx); } + void InsertWithTypeParamNode(TreeNode *node) { mWithTypeParamNodeSet.insert(node->GetNodeId()); } + + bool WithThis(TreeNode *node); + void InsertWithThisFunc(TreeNode *node) { mWithThisFuncSet.insert(node->GetNodeId()); } + bool IsFuncBodyUseThis(TreeNode *node) { return mWithThisFuncSet.find(node->GetNodeId())!= mWithThisFuncSet.end(); } + + bool WithSuper(TreeNode *node); + + void SetTypeId(TreeNode *node, TypeId tid); + void SetTypeIdx(TreeNode *node, unsigned tidx); + + void AddFromLambda(unsigned nid) { mFromLambda.insert(nid); } + bool IsFromLambda(unsigned nid) { return mFromLambda.find(nid) != mFromLambda.end(); } + + void AddBuiltInTypes(); + bool IsBuiltInType(TreeNode *node); + unsigned GetBuiltInTypeIdx(unsigned stridx); + unsigned GetBuiltInTypeIdx(TreeNode *node); +}; + +class FillNodeInfoVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + AST_INFO *mInfo; + + public: + explicit FillNodeInfoVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h) { + mInfo= mHandler->GetINFO(); + } + ~FillNodeInfoVisitor() = default; + + LiteralNode *VisitLiteralNode(LiteralNode *node); + PrimTypeNode *VisitPrimTypeNode(PrimTypeNode *node); + UserTypeNode *VisitUserTypeNode(UserTypeNode *node); + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); + FunctionNode *VisitFunctionNode(FunctionNode *node); +}; + +class ClassStructVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + AST_INFO *mInfo; + + public: + explicit ClassStructVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h) { + mInfo= mHandler->GetINFO(); + } + ~ClassStructVisitor() = default; + + StructLiteralNode *VisitStructLiteralNode(StructLiteralNode *node); + StructNode *VisitStructNode(StructNode *node); + ClassNode *VisitClassNode(ClassNode *node); + InterfaceNode *VisitInterfaceNode(InterfaceNode *node); + TypeParameterNode *VisitTypeParameterNode(TypeParameterNode *node); + FunctionNode *VisitFunctionNode(FunctionNode *node); +}; + +class FunctionVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + AST_INFO *mInfo; + + public: + explicit FunctionVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h) { + mInfo= mHandler->GetINFO(); + } + ~FunctionVisitor() = default; + + FunctionNode *VisitFunctionNode(FunctionNode *node); +}; + +class FindStrIdxVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + AST_INFO *mInfo; + unsigned mStrIdx; + bool mCheckThis; + bool mFound; + + public: + explicit FindStrIdxVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h), mStrIdx(0), + mCheckThis(false), mFound(false) { + mInfo = mHandler->GetINFO(); + } + ~FindStrIdxVisitor() = default; + + void ResetFound() { mFound = false; } + void SetStrIdx(unsigned stridx) { mStrIdx = stridx; } + void SetCheckThis(bool b = true) { mCheckThis = b; } + bool GetFound() { return mFound; } + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); + LiteralNode *VisitLiteralNode(LiteralNode *node); +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/ast_scp.h b/src/MapleFE/astopt/include/ast_scp.h new file mode 100644 index 0000000000000000000000000000000000000000..1a9a7124a2adbd4d89b4da617fa21fbb57763f6a --- /dev/null +++ b/src/MapleFE/astopt/include/ast_scp.h @@ -0,0 +1,173 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_SCP_HEADER__ +#define __AST_SCP_HEADER__ + +#include +#include +#include + +#include "stringpool.h" +#include "ast_module.h" +#include "ast.h" +#include "ast_type.h" +#include "ast_handler.h" +#include "gen_astvisitor.h" + +namespace maplefe { + +class AST_SCP { + private: + Module_Handler *mHandler; + unsigned mFlags; + + public: + explicit AST_SCP(Module_Handler *h, unsigned f) : mHandler(h), mFlags(f) {} + ~AST_SCP() {}; + + void ScopeAnalysis(); + + void BuildScope(); + void RenameVar(); + void AdjustASTWithScope(); +}; + +class BuildScopeBaseVisitor : public AstVisitor { + public: + std::stack mScopeStack; + std::stack mUserScopeStack; + + public: + explicit BuildScopeBaseVisitor(unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base) {} + ~BuildScopeBaseVisitor() = default; + +#undef NODEKIND +#define NODEKIND(K) virtual K##Node *Visit##K##Node(K##Node *node) {\ + ASTScope *scope = mScopeStack.top(); \ + node->SetScope(scope); \ + (void) AstVisitor::Visit##K##Node(node); \ + return node; \ +} +#include "ast_nk.def" +}; + +class BuildScopeVisitor : public BuildScopeBaseVisitor { + private: + Module_Handler *mHandler; + ModuleNode *mASTModule; + unsigned mFlags; + bool mRunIt; + AST_XXport *mXXport; + + // stridx to scope map for struct/class + std::unordered_map mStrIdx2ScopeMap;; + + std::unordered_map> mScope2DeclsMap; + std::unordered_map> mScope2ImportedDeclsMap; + std::unordered_map> mScope2ExportedDeclsMap; + std::unordered_map> mScope2TypesMap; + + public: + explicit BuildScopeVisitor(Module_Handler *h, unsigned f, bool base = false) + : BuildScopeBaseVisitor(f, base), mHandler(h), mFlags(f) { + mASTModule = mHandler->GetASTModule(); + mXXport = h->GetASTXXport(); + } + ~BuildScopeVisitor() = default; + + bool GetRunIt() { return mRunIt; } + void SetRunIt(bool b) { mRunIt = b; } + + void InitInternalTypes(); + ClassNode *AddClass(unsigned stridx, unsigned tyidx = 0); + FunctionNode *AddFunction(std::string name); + + void AddType(ASTScope *scope, TreeNode *node); + void AddImportedDecl(ASTScope *scope, TreeNode *node); + void AddExportedDecl(ASTScope *scope, TreeNode *node); + void AddDecl(ASTScope *scope, TreeNode *node); + void AddTypeAndDecl(ASTScope *scope, TreeNode *node); + ASTScope *NewScope(ASTScope *parent, TreeNode *node); + + void AddScopeMap(unsigned stridx, ASTScope *scope) { mStrIdx2ScopeMap[stridx] = scope; } + + // scope nodes + BlockNode *VisitBlockNode(BlockNode *node); + FunctionNode *VisitFunctionNode(FunctionNode *node); + LambdaNode *VisitLambdaNode(LambdaNode *node); + ClassNode *VisitClassNode(ClassNode *node); + StructNode *VisitStructNode(StructNode *node); + StructLiteralNode *VisitStructLiteralNode(StructLiteralNode *node); + InterfaceNode *VisitInterfaceNode(InterfaceNode *node); + NamespaceNode *VisitNamespaceNode(NamespaceNode *node); + ForLoopNode *VisitForLoopNode(ForLoopNode *node); + TryNode *VisitTryNode(TryNode *node); + CatchNode *VisitCatchNode(CatchNode *node); + FinallyNode *VisitFinallyNode(FinallyNode *node); + + FieldNode *VisitFieldNode(FieldNode *node); + TypeParameterNode *VisitTypeParameterNode(TypeParameterNode *node); + + // related node with scope : decl, type + DeclNode *VisitDeclNode(DeclNode *node); + UserTypeNode *VisitUserTypeNode(UserTypeNode *node); + TypeAliasNode *VisitTypeAliasNode(TypeAliasNode *node); + ImportNode *VisitImportNode(ImportNode *node); + ExportNode *VisitExportNode(ExportNode *node); +}; + +class RenameVarVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + ModuleNode *mASTModule; + AstOpt *mAstOpt; + unsigned mFlags; + + public: + unsigned mPass; + unsigned mOldStrIdx; + unsigned mNewStrIdx; + std::unordered_map> mStridx2DeclIdMap; + + public: + explicit RenameVarVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h), mFlags(f) { + mASTModule = mHandler->GetASTModule(); + mAstOpt = mHandler->GetASTHandler()->GetAstOpt(); + } + ~RenameVarVisitor() = default; + + bool SkipRename(IdentifierNode *node); + bool IsFuncArg(FunctionNode *func, IdentifierNode *node); + void InsertToStridx2DeclIdMap(unsigned stridx, IdentifierNode *node); + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); +}; + +class AdjustASTWithScopeVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + + public: + explicit AdjustASTWithScopeVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h) {} + ~AdjustASTWithScopeVisitor() = default; + + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/ast_ti.h b/src/MapleFE/astopt/include/ast_ti.h new file mode 100644 index 0000000000000000000000000000000000000000..ece38d68dcbe7e6257562c68bfc23f37383b231b --- /dev/null +++ b/src/MapleFE/astopt/include/ast_ti.h @@ -0,0 +1,232 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_TYPE_INFERENCE_HEADER__ +#define __AST_TYPE_INFERENCE_HEADER__ + +#include +#include +#include "ast_module.h" +#include "ast.h" +#include "ast_type.h" +#include "gen_astvisitor.h" +#include "ast_common.h" + +namespace maplefe { + +class Module_Handler; + +class TypeInfer { + private: + Module_Handler *mHandler; + unsigned mFlags; + + public: + explicit TypeInfer(Module_Handler *h, unsigned f) : mHandler(h), mFlags(f) {} + ~TypeInfer() {} + + void TypeInference(); + void CheckType(); +}; + +class BuildIdNodeToDeclVisitor : public AstVisitor { + Module_Handler *mHandler; + + public: + explicit BuildIdNodeToDeclVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h) {} + ~BuildIdNodeToDeclVisitor() = default; + + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); +}; + +class BuildIdDirectFieldVisitor : public AstVisitor { + Module_Handler *mHandler; + unsigned mFlags; + + public: + explicit BuildIdDirectFieldVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h), mFlags(f) {} + ~BuildIdDirectFieldVisitor() = default; + + TreeNode *GetParentVarClass(TreeNode *node); + Module_Handler *GetHandler(TreeNode *node); + + FieldNode *VisitFieldNode(FieldNode *node); + FieldLiteralNode *VisitFieldLiteralNode(FieldLiteralNode *node); + ArrayElementNode *VisitArrayElementNode(ArrayElementNode *node); + void Dump(); +}; + +class TypeInferBaseVisitor : public AstVisitor { + public: + explicit TypeInferBaseVisitor(unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base) {} + ~TypeInferBaseVisitor() = default; + +#undef NODEKIND +#define NODEKIND(K) virtual K##Node *Visit##K##Node(K##Node *node) { \ + (void) AstVisitor::Visit##K##Node(node); \ + return node; \ +} +#include "ast_nk.def" +}; + +class ChangeTypeIdxVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + unsigned mStrIdx; + unsigned mTypeIdx; + + public: + explicit ChangeTypeIdxVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h) {} + ~ChangeTypeIdxVisitor() = default; + + void Setup(unsigned stridx, unsigned tidx) { mStrIdx = stridx; mTypeIdx = tidx;} + + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); +}; + +class TypeInferVisitor : public TypeInferBaseVisitor { + private: + Module_Handler *mHandler; + unsigned mFlags; + bool mUpdated; + AST_INFO *mInfo; + AST_XXport *mXXport; + AstOpt *mAstOpt; + + ChangeTypeIdxVisitor *mChangeTypeIdxVisitor; + + std::unordered_map> mParam2ArgArrayDeclMap;; + + // func nodeid to typeidx + std::unordered_map mFuncIsNodeMap;; + std::unordered_map> mCbFuncIsDone; + + public: + explicit TypeInferVisitor(Module_Handler *h, unsigned f, bool base = false) + : TypeInferBaseVisitor(f, base), mHandler(h), mFlags(f) { + mChangeTypeIdxVisitor = new ChangeTypeIdxVisitor(h, f, true); + mInfo = h->GetINFO(); + mXXport = h->GetASTXXport(); + mAstOpt = h->GetAstOpt(); + } + + ~TypeInferVisitor() = default; + + bool IsPrimTypeId(TypeId tid); + + bool GetUpdated() {return mUpdated;} + void SetUpdated(bool b = true) {mUpdated = b;} + + void SetTypeId(TreeNode *node, TypeId tid); + void SetTypeId(TreeNode *node1, TreeNode *node2); + void UpdateTypeId(TreeNode *node, TypeId tid); + void UpdateTypeId(TreeNode *node1, TreeNode *node2); + + void SetTypeIdx(TreeNode *node, unsigned tidx); + void SetTypeIdx(TreeNode *node1, TreeNode *node2); + void UpdateTypeIdx(TreeNode *node, unsigned tidx); + void UpdateTypeIdx(TreeNode *node1, TreeNode *node2); + + void UpdateFuncRetTypeId(FunctionNode *node, TypeId tid, unsigned tidx); + void UpdateTypeUseNode(TreeNode *target, TreeNode *input); + void UpdateArgArrayDecls(unsigned nid, TypeId tid); + void UpdateArrayElemTypeMap(TreeNode *node, TypeId tid, unsigned tidx); + void UpdateArrayDimMap(TreeNode *node, DimensionNode *dim); + bool UpdateVarTypeWithInit(TreeNode *var, TreeNode *init); + TypeId GetArrayElemTypeId(TreeNode *node); + unsigned GetArrayElemTypeIdx(TreeNode *node); + + TypeId MergeTypeId(TypeId tia, TypeId tib); + unsigned MergeTypeIdx(unsigned tia, unsigned tib); + + bool IsArray(TreeNode *node); + // refer to shared/include/supported_types.def + bool IsPrimTypeIdx(unsigned tidx) { return tidx > 0 && tidx < TY_Void; } + + PrimTypeNode *GetOrClonePrimTypeNode(PrimTypeNode *node, TypeId tid); + + TreeNode *VisitClassField(TreeNode *node); + + ArrayElementNode *VisitArrayElementNode(ArrayElementNode *node); + ArrayLiteralNode *VisitArrayLiteralNode(ArrayLiteralNode *node); + AsTypeNode *VisitAsTypeNode(AsTypeNode *node); + BinOperatorNode *VisitBinOperatorNode(BinOperatorNode *node); + CallNode *VisitCallNode(CallNode *node); + CastNode *VisitCastNode(CastNode *node); + ClassNode *VisitClassNode(ClassNode *node); + CondBranchNode *VisitCondBranchNode(CondBranchNode *node); + DeclNode *VisitDeclNode(DeclNode *node); + ExportNode *VisitExportNode(ExportNode *node); + FieldLiteralNode *VisitFieldLiteralNode(FieldLiteralNode *node); + FieldNode *VisitFieldNode(FieldNode *node); + ForLoopNode *VisitForLoopNode(ForLoopNode *node); + FunctionNode *VisitFunctionNode(FunctionNode *node); + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); + ImportNode *VisitImportNode(ImportNode *node); + InterfaceNode *VisitInterfaceNode(InterfaceNode *node); + IsNode *VisitIsNode(IsNode *node); + LambdaNode *VisitLambdaNode(LambdaNode *node); + LiteralNode *VisitLiteralNode(LiteralNode *node); + NewNode *VisitNewNode(NewNode *node); + ReturnNode *VisitReturnNode(ReturnNode *node); + StructLiteralNode *VisitStructLiteralNode(StructLiteralNode *node); + StructNode *VisitStructNode(StructNode *node); + TemplateLiteralNode *VisitTemplateLiteralNode(TemplateLiteralNode *node); + TerOperatorNode *VisitTerOperatorNode(TerOperatorNode *node); + TypeAliasNode *VisitTypeAliasNode(TypeAliasNode *node); + TypeOfNode *VisitTypeOfNode(TypeOfNode *node); + UnaOperatorNode *VisitUnaOperatorNode(UnaOperatorNode *node); + UserTypeNode *VisitUserTypeNode(UserTypeNode *node); +}; + +class ShareUTVisitor : public AstVisitor { + private: + std::stack mScopeStack; + + public: + explicit ShareUTVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base) {} + ~ShareUTVisitor() = default; + + void Push(ASTScope *scope) { mScopeStack.push(scope); } + void Pop() { mScopeStack.pop(); } + + UserTypeNode *VisitUserTypeNode(UserTypeNode *node); +}; + +class CheckTypeVisitor : public AstVisitor { + private: + Module_Handler *mHandler; + unsigned mFlags; + + std::stack mScopeStack; + + public: + explicit CheckTypeVisitor(Module_Handler *h, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mHandler(h), mFlags(f) {} + ~CheckTypeVisitor() = default; + + // check if typeid "tid" is compatible with "target" + bool IsCompatible(TypeId tid, TypeId target); + + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/ast_util.h b/src/MapleFE/astopt/include/ast_util.h new file mode 100644 index 0000000000000000000000000000000000000000..c2a39d40d31ce275deb9bcf829424acbaca230bd --- /dev/null +++ b/src/MapleFE/astopt/include/ast_util.h @@ -0,0 +1,55 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +////////////////////////////////////////////////////////////////////////////////////////////// +// This is the interface to translate AST to C++ +////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef __AST_UTIL_HEADER__ +#define __AST_UTIL_HEADER__ + +#include + +namespace maplefe { + +class Module_Handler; + +class AST_Util { + private: + Module_Handler *mHandler; + unsigned mFlags; + + std::unordered_set mCppKeywords; + + void BuildCppKeyWordSet(); + + public: + explicit AST_Util(Module_Handler *h, unsigned f) : mHandler(h), mFlags(f) { + BuildCppKeyWordSet(); + } + ~AST_Util() {} + + bool IsDirectField(TreeNode *node); + bool IsCppKeyWord(unsigned stridx); + bool IsCppKeyWord(std::string name); + bool IsCppName(std::string name); + bool IsCppField(TreeNode *node); + + void SetTypeId(TreeNode *node, TypeId tid); + void SetTypeIdx(TreeNode *node, unsigned tidx); +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/ast_xxport.h b/src/MapleFE/astopt/include/ast_xxport.h new file mode 100644 index 0000000000000000000000000000000000000000..f5f99e7352f721b88d1f4db931fd762acdd18be5 --- /dev/null +++ b/src/MapleFE/astopt/include/ast_xxport.h @@ -0,0 +1,166 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_XXPORT_HEADER__ +#define __AST_XXPORT_HEADER__ + +#include +#include +#include +#include +#include +#include +#include "ast_module.h" +#include "ast.h" +#include "gen_astvisitor.h" + +namespace maplefe { + +class AstOpt; +class AST_Handler; +class Module_Handler; + +class XXportInfo { + public: + unsigned mModuleStrIdx; + unsigned mXXportNodeId; + unsigned mDefaultNodeId; + bool mEverything; + std::set> mNodeIdPairs; + + public: + explicit XXportInfo(unsigned stridx, unsigned nid) : + mModuleStrIdx(stridx), mXXportNodeId(nid), mDefaultNodeId(0), mEverything(false) {} + ~XXportInfo() = default; + + void SetEverything() {mEverything = true;} + void Dump(); +}; + +class AST_XXport { + private: + AstOpt *mAstOpt; + AST_Handler *mASTHandler; + unsigned mFlags; + + std::list mHandlersIdxInOrder; + std::unordered_map> mHandlerIdx2DependentHandlerIdxMap; + std::unordered_map mStrIdx2HandlerIdxMap; + + std::unordered_map mIdStrIdx2ModuleStrIdxMap; + + public: + // module handler idx to set of XXportInfo + std::unordered_map> mImports; + std::unordered_map> mExports; + + // module handler idx to import/export nodes in the module + std::unordered_map> mImportNodeSets; + std::unordered_map> mExportNodeSets; + + std::unordered_map> mImportedDeclIds; + std::unordered_map> mExportedDeclIds; + + public: + explicit AST_XXport(AstOpt *o, unsigned f); + ~AST_XXport() {} + + unsigned GetModuleNum(); + + void BuildModuleOrder(); + + void SetModuleStrIdx(); + void CollectXXportNodes(); + + TreeNode *GetTarget(TreeNode *node); + std::string GetTargetFilename(unsigned hidx, TreeNode *node); + void UpdateDependency(unsigned hidx, TreeNode *node); + void AddHandler(); + + void SortHandler(); + + void CollectXXportInfo(unsigned hidx); + void CollectImportInfo(unsigned hidx); + void CollectExportInfo(unsigned hidx); + + void AddHandlerIdx2DependentHandlerIdxMap(unsigned hdlIdx, unsigned depHdlIdx) { + mHandlerIdx2DependentHandlerIdxMap[hdlIdx].insert(depHdlIdx); + } + + unsigned GetHandleIdxFromStrIdx(unsigned stridx); + + // check if node with id is imported decl + bool IsImportedDeclId(unsigned hidx, unsigned id) { + return (std::find(mImportedDeclIds[hidx].begin(), + mImportedDeclIds[hidx].end(), id) != mImportedDeclIds[hidx].end()); + } + + // check if node with id is exported decl + bool IsExportedDeclId(unsigned hidx, unsigned id) { + return (std::find(mExportedDeclIds[hidx].begin(), + mExportedDeclIds[hidx].end(), id) != mExportedDeclIds[hidx].end()); + } + + bool IsImportedExportedDeclId(unsigned hidx, unsigned id) { + return IsImportedDeclId(hidx, id) || IsExportedDeclId(hidx, id); + } + + void AddImportedDeclIds(unsigned hidx, unsigned nid) {mImportedDeclIds[hidx].push_back(nid);} + void AddExportedDeclIds(unsigned hidx, unsigned nid) {mExportedDeclIds[hidx].push_back(nid);} + + TreeNode *FindExportedDecl(unsigned hidx, unsigned stridx); + + // get stridx of M from M.get() + unsigned ExtractTargetStrIdx(TreeNode *node); + + // get/set full module file name stridx of M.ts from an identifier of that module M + unsigned GetModuleStrIdxFromIdStrIdx(unsigned stridx) {return mIdStrIdx2ModuleStrIdxMap[stridx];} + void SetIdStrIdx2ModuleStrIdx(unsigned id, unsigned mod) {mIdStrIdx2ModuleStrIdxMap[id] = mod;} + + // find default exported node in handler + TreeNode *GetExportedDefault(unsigned hstridx); + + // find exported node of given name in handler hidx + TreeNode *GetExportedNamedNode(unsigned hidx, unsigned stridx); + + // find the exported node in exporting module given imported node + // hidx is the handler index of node with index nid + TreeNode *GetExportedNodeFromImportedNode(unsigned hidx, unsigned nid); + + // get identifier from node + TreeNode *GetIdentifier(TreeNode *node); + + void Dump(); +}; + +class XXportBasicVisitor : public AstVisitor { + private: + AST_XXport *mASTXXport; + unsigned mHandlerIdx; + + public: + std::unordered_set mImported; + + public: + explicit XXportBasicVisitor(AST_XXport *xx, Module_Handler *h, unsigned i, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mASTXXport(xx), mHandlerIdx(i) {} + ~XXportBasicVisitor() = default; + + ImportNode *VisitImportNode(ImportNode *node); + ExportNode *VisitExportNode(ExportNode *node); +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/astopt.h b/src/MapleFE/astopt/include/astopt.h new file mode 100644 index 0000000000000000000000000000000000000000..6a5fd6e144fbc8c99b0547c7910bfb0b6e87698b --- /dev/null +++ b/src/MapleFE/astopt/include/astopt.h @@ -0,0 +1,110 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +////////////////////////////////////////////////////////////////////////////////////////////// +// This is the interface to translate AST to C++ +////////////////////////////////////////////////////////////////////////////////////////////// + +#ifndef __ASTOPT_HEADER__ +#define __ASTOPT_HEADER__ + +#include +#include +#include +#include "ast_module.h" +#include "ast.h" +#include "gen_astvisitor.h" +#include "ast_common.h" + +namespace maplefe { + +class AST_Handler; +class Module_Handler; +class AST_XXport; + +class AstOpt { +private: + AST_Handler *mASTHandler; + AST_XXport *mASTXXport; + unsigned mFlags; + + // nodeid to node map for all nodes in all modules + std::unordered_map mNodeId2NodeMap; + + // language keywords + std::unordered_set mLangKeywords; + + // nodeid to handler map for all nodes in all modules + std::unordered_map mNodeId2HandlerMap; + +public: + // module handlers in mASTHandler sorted by import/export dependency + std::vector mHandlersInOrder; + +public: + explicit AstOpt(AST_Handler *h, unsigned f); + ~AstOpt() {} + + AST_Handler *GetASTHandler() {return mASTHandler;} + AST_XXport *GetASTXXport() {return mASTXXport;} + unsigned GetModuleNum(); + Module_Handler *GetModuleHandler(unsigned i) { return mHandlersInOrder[i]; } + void AddModuleHandler(Module_Handler *h) { mHandlersInOrder.push_back(h); } + + void PreprocessModules(); + virtual void ProcessAST(unsigned trace); + + TreeNode *GetNodeFromNodeId(unsigned nid) { return mNodeId2NodeMap[nid]; } + void AddNodeId2NodeMap(TreeNode *node) { mNodeId2NodeMap[node->GetNodeId()] = node; } + + Module_Handler *GetHandlerFromNodeId(unsigned nid) { return mNodeId2HandlerMap[nid]; } + void AddNodeId2HandlerMap(unsigned nid, Module_Handler *h) { mNodeId2HandlerMap[nid] = h; } + + bool IsLangKeyword(TreeNode *node) { + return mLangKeywords.find(node->GetStrIdx()) != mLangKeywords.end(); + } +}; + +class BuildNodeIdToNodeVisitor : public AstVisitor { + AstOpt *mAstOpt; + unsigned mFlags; + Module_Handler *mHandler; + + public: + explicit BuildNodeIdToNodeVisitor(AstOpt *opt, unsigned f, bool base = false) + : AstVisitor((f & FLG_trace_1) && base), mAstOpt(opt), mFlags(f) {} + ~BuildNodeIdToNodeVisitor() = default; + + void SetHandler(Module_Handler *handler) { mHandler = handler; } + + TreeNode *VisitTreeNode(TreeNode *node) { + if (mFlags & FLG_trace_3) std::cout << "nodeid2node: " << node->GetNodeId() << std::endl; + (void) AstVisitor::VisitTreeNode(node); + mAstOpt->AddNodeId2NodeMap(node); + mAstOpt->AddNodeId2HandlerMap(node->GetNodeId(), mHandler); + return node; + } + + TreeNode *BaseTreeNode(TreeNode *node) { + if (mFlags & FLG_trace_3) std::cout << "nodeid2node: b " << node->GetNodeId() << std::endl; + (void) AstVisitor::BaseTreeNode(node); + mAstOpt->AddNodeId2NodeMap(node); + mAstOpt->AddNodeId2HandlerMap(node->GetNodeId(), mHandler); + return node; + } +}; + +} +#endif diff --git a/src/MapleFE/astopt/include/cpp_keywords.def b/src/MapleFE/astopt/include/cpp_keywords.def new file mode 100644 index 0000000000000000000000000000000000000000..f2fc30566654fbdca92a9621b6324dd4b46be091 --- /dev/null +++ b/src/MapleFE/astopt/include/cpp_keywords.def @@ -0,0 +1,112 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +CPPKEYWORD(alignas) +CPPKEYWORD(alignof) +CPPKEYWORD(and) +CPPKEYWORD(and_eq) +CPPKEYWORD(asm) +CPPKEYWORD(atomic_cancel) +CPPKEYWORD(atomic_commit) +CPPKEYWORD(atomic_noexcept) +CPPKEYWORD(auto) +CPPKEYWORD(bitand) +CPPKEYWORD(bitor) +CPPKEYWORD(bool) +CPPKEYWORD(break) +CPPKEYWORD(case) +CPPKEYWORD(catch) +CPPKEYWORD(char) +CPPKEYWORD(char8_t) +CPPKEYWORD(char16_t) +CPPKEYWORD(char32_t) +CPPKEYWORD(class) +CPPKEYWORD(compl) +CPPKEYWORD(concept) +CPPKEYWORD(const) +CPPKEYWORD(consteval) +CPPKEYWORD(constexpr) +CPPKEYWORD(constinit) +CPPKEYWORD(const_cast) +CPPKEYWORD(continue) +CPPKEYWORD(co_await) +CPPKEYWORD(co_return) +CPPKEYWORD(co_yield) +CPPKEYWORD(decltype) +CPPKEYWORD(default) +CPPKEYWORD(delete) +CPPKEYWORD(do) +CPPKEYWORD(double) +CPPKEYWORD(dynamic_cast) +CPPKEYWORD(else) +CPPKEYWORD(enum) +CPPKEYWORD(explicit) +CPPKEYWORD(export) +CPPKEYWORD(extern) +CPPKEYWORD(false) +CPPKEYWORD(float) +CPPKEYWORD(for) +CPPKEYWORD(friend) +CPPKEYWORD(goto) +CPPKEYWORD(if) +CPPKEYWORD(inline) +CPPKEYWORD(int) +CPPKEYWORD(long) +CPPKEYWORD(mutable) +CPPKEYWORD(namespace) +CPPKEYWORD(new) +CPPKEYWORD(noexcept) +CPPKEYWORD(not) +CPPKEYWORD(not_eq) +CPPKEYWORD(nullptr) +CPPKEYWORD(operator) +CPPKEYWORD(or) +CPPKEYWORD(or_eq) +CPPKEYWORD(private) +CPPKEYWORD(protected) +CPPKEYWORD(public) +CPPKEYWORD(reflexpr) +CPPKEYWORD(register) +CPPKEYWORD(reinterpret_cast) +CPPKEYWORD(requires) +CPPKEYWORD(return) +CPPKEYWORD(short) +CPPKEYWORD(signed) +CPPKEYWORD(sizeof) +CPPKEYWORD(static) +CPPKEYWORD(static_assert) +CPPKEYWORD(static_cast) +CPPKEYWORD(struct) +CPPKEYWORD(switch) +CPPKEYWORD(synchronized) +CPPKEYWORD(template) +CPPKEYWORD(this) +CPPKEYWORD(thread_local) +CPPKEYWORD(throw) +CPPKEYWORD(true) +CPPKEYWORD(try) +CPPKEYWORD(typedef) +CPPKEYWORD(typeid) +CPPKEYWORD(typename) +CPPKEYWORD(union) +CPPKEYWORD(unsigned) +CPPKEYWORD(using) +CPPKEYWORD(virtual) +CPPKEYWORD(void) +CPPKEYWORD(volatile) +CPPKEYWORD(wchar_t) +CPPKEYWORD(while) +CPPKEYWORD(xor) +CPPKEYWORD(xor_eq) diff --git a/src/MapleFE/astopt/src/Makefile b/src/MapleFE/astopt/src/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..3d61edb58810ed11d2f63a25a32eba9c5b225f77 --- /dev/null +++ b/src/MapleFE/astopt/src/Makefile @@ -0,0 +1,61 @@ +# Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +include ../../Makefile.in +BUILD=$(BUILDDIR)/astopt +$(shell $(MKDIR_P) $(BUILD)) + +SRC=$(wildcard *.cpp) +OBJ :=$(patsubst %.cpp,%.o,$(SRC)) +DEP :=$(patsubst %.cpp,%.d,$(SRC)) + +OBJS :=$(foreach obj,$(OBJ), $(BUILD)/$(obj)) +DEPS :=$(foreach dep,$(DEP), $(BUILD)/$(dep)) + +LIBOBJS :=$(patsubst $(BUILD)/main.o,,$(OBJS)) + + +INCLUDES := -I $(MAPLEFE_ROOT)/shared/include \ + -I $(MAPLEFE_ROOT)/astopt/include \ + -I $(MAPLEFE_ROOT)/$(SRCLANG)/include \ + -I ${BUILDDIR}/ast_gen/shared + +TARGET=astopt.a + +.PHONY: all +all: $(BUILD)/$(TARGET) + +-include $(DEPS) +.PHONY: clean + +vpath %.o $(BUILD) +vpath %.d $(BUILD) + +#Pattern Rules +$(BUILD)/%.o : %.cpp + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ + +$(BUILD)/%.d : %.cpp + @$(CXX) $(CXXFLAGS) -MM $(INCLUDES) $< > $@ + @mv -f $(BUILD)/$*.d $(BUILD)/$*.d.tmp + @sed -e 's|.*:|$(BUILD)/$*.o:|' < $(BUILD)/$*.d.tmp > $(BUILD)/$*.d + @rm -f $(BUILD)/$*.d.tmp + +# TARGET depends on OBJS and shared OBJS from shared directory +# as well as mapleall libraries +$(BUILD)/$(TARGET): $(OBJS) + /usr/bin/ar rcs $(BUILD)/$(TARGET) $(OBJS) + +clean: + rm -rf $(BUILD) diff --git a/src/MapleFE/astopt/src/ast_adj.cpp b/src/MapleFE/astopt/src/ast_adj.cpp new file mode 100644 index 0000000000000000000000000000000000000000..64b8e82bf1f0de9ea9ecc9c1e7f3ef79609bd954 --- /dev/null +++ b/src/MapleFE/astopt/src/ast_adj.cpp @@ -0,0 +1,607 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include "ast_handler.h" +#include "typetable.h" +#include "ast_info.h" +#include "ast_adj.h" +#include "ast_util.h" + +namespace maplefe { + +void AST_ADJ::AdjustAST() { + MSGNOLOC0("============== AdjustAST =============="); + ModuleNode *module = mHandler->GetASTModule(); + for(unsigned i = 0; i < module->GetTreesNum(); i++) { + TreeNode *it = module->GetTree(i); + it->SetParent(module); + } + + // adjust ast + AdjustASTVisitor adjust_visitor(mHandler, mFlags, true); + adjust_visitor.Visit(module); +} + +// set parent for some identifier's type +IdentifierNode *AdjustASTVisitor::VisitIdentifierNode(IdentifierNode *node) { + (void) AstVisitor::VisitIdentifierNode(node); + CheckAndRenameCppKeywords(node); + + TreeNode *type = node->GetType(); + if (type && type->IsUserType()) { + type->SetParent(node); + } + return node; +} + +ClassNode *AdjustASTVisitor::VisitClassNode(ClassNode *node) { + (void) AstVisitor::VisitClassNode(node); + CheckAndRenameCppKeywords(node); + AssignPseudoName(node); + + // record names + gStringPool.AddAltStrIdx(node->GetStrIdx()); + for (unsigned i = 0; i < node->GetFieldsNum(); i++) { + TreeNode *n = node->GetField(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + for (unsigned i = 0; i < node->GetMethodsNum(); i++) { + TreeNode *n = node->GetMethod(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + + // skip getting canonical type if not only fields + if (node->GetMethodsNum() || node->GetSuperClassesNum() || node->GetSuperInterfacesNum() || + node->GetSuperClassesNum() || node->GetTypeParamsNum()) { + return node; + } + + TreeNode *parent = node->GetParent(); + TreeNode *newnode = mInfo->GetCanonicStructNode(node); + if (newnode == node) { + return node; + } + + // create a TypeAlias for duplicated type if top level + if (parent && parent->IsModule()) { + newnode = (ClassNode*)(mInfo->CreateTypeAliasNode(newnode, node)); + } + + return (ClassNode*)newnode; +} + +InterfaceNode *AdjustASTVisitor::VisitInterfaceNode(InterfaceNode *node) { + (void) AstVisitor::VisitInterfaceNode(node); + CheckAndRenameCppKeywords(node); + + // record names + gStringPool.AddAltStrIdx(node->GetStrIdx()); + for (unsigned i = 0; i < node->GetFieldsNum(); i++) { + TreeNode *n = node->GetField(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + for (unsigned i = 0; i < node->GetMethodsNum(); i++) { + TreeNode *n = node->GetMethod(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + + // skip getting canonical type if not only fields + if (node->GetMethodsNum() || node->GetSuperInterfacesNum()) { + return node; + } + + TreeNode *parent = node->GetParent(); + TreeNode *newnode = mInfo->GetCanonicStructNode(node); + if (newnode == node) { + return node; + } + + // create a TypeAlias for duplicated type if top level + if (parent && parent->IsModule()) { + newnode = (InterfaceNode*)(mInfo->CreateTypeAliasNode(newnode, node)); + } + return (InterfaceNode*)newnode; +} + +StructLiteralNode *AdjustASTVisitor::VisitStructLiteralNode(StructLiteralNode *node) { + (void) AstVisitor::VisitStructLiteralNode(node); + + unsigned size = node->GetFieldsNum(); + for (unsigned fid = 0; fid < size; fid++) { + FieldLiteralNode *field = node->GetField(fid); + TreeNode *name = field->GetFieldName(); + TreeNode *lit = field->GetLiteral(); + if (!name || !lit || !lit->IsLiteral()) { + return node; + } + } + + TreeNode *newnode = mInfo->GetCanonicStructNode(node); + gStringPool.AddAltStrIdx(newnode->GetStrIdx()); + if (newnode != node) { + node->SetTypeIdx(newnode->GetTypeIdx()); + } + + return node; +} + +StructNode *AdjustASTVisitor::VisitStructNode(StructNode *node) { + (void) AstVisitor::VisitStructNode(node); + CheckAndRenameCppKeywords(node); + + // record names + gStringPool.AddAltStrIdx(node->GetStrIdx()); + for (unsigned i = 0; i < node->GetFieldsNum(); i++) { + TreeNode *n = node->GetField(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + for (unsigned i = 0; i < node->GetMethodsNum(); i++) { + TreeNode *n = node->GetMethod(i); + gStringPool.AddAltStrIdx(n->GetStrIdx()); + } + + // skip getting canonical type for TypeAlias + TreeNode *parent_orig = node->GetParent(); + TreeNode *p = parent_orig; + while (p) { + if (p->IsTypeAlias()) { + return node; + } + p = p->GetParent(); + } + + // skip getting canonical type if not only fields + if (node->GetMethodsNum() || node->GetSupersNum() || node->GetTypeParamsNum()) { + return node; + } + + TreeNode *newnode = mInfo->GetCanonicStructNode(node); + + // if returned itself that means it should be added to the moudule if not yet + if (newnode == node) { + ModuleNode *module = mHandler->GetASTModule(); + bool found = false; + for (unsigned i = 0; i < module->GetTreesNum(); ++i) { + if (newnode == module->GetTree(i)) { + found = true; + break; + } + } + if (!found) { + module->AddTreeFront(newnode); + } + } + + // create a TypeAlias for duplicated type if top level + // except newly added anonymous type which has updated parent + TreeNode *parent = node->GetParent(); + if (newnode != node && parent_orig && parent && parent == parent_orig && parent->IsModule()) { + newnode = mInfo->CreateTypeAliasNode(newnode, node); + } + + // for non top level tree node, replace it with a UserType node + if (!parent_orig || !parent_orig->IsModule()) { + if (newnode->GetStrIdx()) { + // for anonymous class, check it is given a name + // not skipped due to type parameters + newnode = mInfo->CreateUserTypeNode(newnode->GetStrIdx()); + } + } + + return (StructNode*)newnode; +} + +// convert prim array node to array type node +PrimArrayTypeNode *AdjustASTVisitor::VisitPrimArrayTypeNode(PrimArrayTypeNode *node) { + (void) AstVisitor::VisitPrimArrayTypeNode(node); + ArrayTypeNode *arr = mHandler->NewTreeNode(); + DimensionNode *dim = node->GetDims(); + arr->SetDims(dim); + arr->SetElemType(node->GetPrim()); + + return (PrimArrayTypeNode *)arr; +} + +// set UserTypeNode's mStrIdx to be its mId's +UserTypeNode *AdjustASTVisitor::VisitUserTypeNode(UserTypeNode *node) { + (void) AstVisitor::VisitUserTypeNode(node); + TreeNode *id = node->GetId(); + if (id) { + node->SetStrIdx(id->GetStrIdx()); + } + + // use array type node + DimensionNode *dim = node->GetDims(); + bool isarr = (dim != NULL); + // element type + TreeNode *etype = NULL; + + if (isarr) { + etype = node; + } else if (id && id->IsIdentifier()) { + IdentifierNode *idnode = static_cast(id); + isarr = (idnode->GetStrIdx() == gStringPool.GetStrIdx("Array")); + if (isarr) { + if (unsigned s = node->GetTypeGenericsNum()) { + if (s == 1) { + etype = node->GetTypeGeneric(0); + } else { + NOTYETIMPL("array usertype with multiple generic type"); + } + } + } + } + + if (isarr) { + ArrayTypeNode *arr = mHandler->NewTreeNode(); + arr->SetDims(dim); + arr->SetElemType(etype); + node->SetDims(NULL); + node = (UserTypeNode *)arr; + } + + return node; +} + +// set TypeAliasNode's mStrIdx to be its mId's +TypeAliasNode *AdjustASTVisitor::VisitTypeAliasNode(TypeAliasNode *node) { + (void) AstVisitor::VisitTypeAliasNode(node); + UserTypeNode *id = node->GetId(); + if (id && id->GetId()) { + node->SetStrIdx(id->GetId()->GetStrIdx()); + } + return node; +} + +LiteralNode *AdjustASTVisitor::VisitLiteralNode(LiteralNode *node) { + (void) AstVisitor::VisitLiteralNode(node); + if (node->IsThis()) { + unsigned stridx = gStringPool.GetStrIdx("this"); + IdentifierNode *id = mInfo->CreateIdentifierNode(stridx); + TreeNode *type = node->GetType(); + if (type) { + id->SetType(type); + id->SetTypeId(type->GetTypeId()); + id->SetTypeIdx(type->GetTypeIdx()); + } + node = (LiteralNode *)id; + } + return node; +} + +UnaOperatorNode *AdjustASTVisitor::VisitUnaOperatorNode(UnaOperatorNode *node) { + (void) AstVisitor::VisitUnaOperatorNode(node); + TreeNode *opnd = node->GetOpnd(); + OprId op = node->GetOprId(); + if (op == OPR_Plus || op == OPR_Minus) { + if (opnd->IsLiteral()) { + LiteralNode *lit = static_cast(opnd); + if (lit->GetData().mType == LT_StringLiteral) { + unsigned stridx = lit->GetData().mData.mStrIdx; + std::string str = gStringPool.GetStringFromStrIdx(stridx); + double d = std::stod(str); + if (op == OPR_Minus) { + d = -d; + } + long l = (long)d; + LitData data; + if (d == l) { + data.mType = LT_IntegerLiteral; + data.mData.mInt = l; + } else { + data.mType = LT_DoubleLiteral; + data.mData.mDouble = d; + } + LiteralNode *n = mHandler->NewTreeNode(); + n->SetData(data); + n->SetParent(node->GetParent()); + return (UnaOperatorNode*)n; + } + } + } + return node; +} + +FunctionNode *AdjustASTVisitor::VisitFunctionNode(FunctionNode *node) { + (void) AstVisitor::VisitFunctionNode(node); + CheckAndRenameCppKeywords(node); + + gStringPool.AddAltStrIdx(node->GetStrIdx()); + + for(unsigned i = 0; i < node->GetParamsNum(); i++) { + TreeNode *it = node->GetParam(i); + gStringPool.AddAltStrIdx(it->GetStrIdx()); + } + + TreeNode *type = node->GetRetType(); + if (type && type->IsUserType()) { + type->SetParent(node); + } + + // Refine function TypeParames + for(unsigned i = 0; i < node->GetTypeParamsNum(); i++) { + type = node->GetTypeParamAtIndex(i); + if (type->IsIdentifier()) { + IdentifierNode *inode = static_cast(type); + TreeNode *newtype = mInfo->CreateUserTypeNode(inode); + node->SetTypeParamAtIndex(i, newtype); + newtype->SetParent(node); + } + } + return node; +} + +// move init from identifier to decl +// copy stridx to decl +DeclNode *AdjustASTVisitor::VisitDeclNode(DeclNode *node) { + TreeNode *var = node->GetVar(); + if (var->IsIdentifier()) { + IdentifierNode *inode = static_cast(var); + (void) AstVisitor::VisitIdentifierNode(inode); + + // copy stridx from Identifier to Decl + unsigned stridx = inode->GetStrIdx(); + if (stridx) { + node->SetStrIdx(stridx); + gStringPool.AddAltStrIdx(stridx); + mUpdated = true; + } + + // move init from Identifier to Decl + TreeNode *init = inode->GetInit(); + if (init) { + node->SetInit(init); + inode->ClearInit(); + mUpdated = true; + } + } else if (var->IsBindingPattern()) { + BindingPatternNode *bind = static_cast(var); + VisitBindingPatternNode(bind); + } else { + NOTYETIMPL("decl not idenfier or bind pattern"); + } + + // reorg before processing init + (void) AstVisitor::VisitDeclNode(node); + return node; +} + +ImportNode *AdjustASTVisitor::VisitImportNode(ImportNode *node) { + (void) AstVisitor::VisitImportNode(node); + + return node; +} + +// if-then-else : use BlockNode for then and else bodies +// split export decl and body +// export {func add(y)} ==> export {add}; func add(y) +ExportNode *AdjustASTVisitor::VisitExportNode(ExportNode *node) { + (void) AstVisitor::VisitExportNode(node); + TreeNode *parent = node->GetParent(); + if (!parent || parent->IsNamespace()) { + // Export declarations are not permitted in a namespace + return node; + } + if (node->GetPairsNum() == 1) { + XXportAsPairNode *p = node->GetPair(0); + TreeNode *bfnode = p->GetBefore(); + if (bfnode) { + if (!bfnode->IsIdentifier()) { + switch (bfnode->GetKind()) { + case NK_Function: { + FunctionNode *func = static_cast(bfnode); + IdentifierNode *n = mInfo->CreateIdentifierNode(func->GetStrIdx()); + // update p + p->SetBefore(n); + n->SetParent(p); + mUpdated = true; + // insert func into AST after node + if (parent->IsBlock()) { + static_cast(parent)->InsertStmtAfter(func, node); + } else if (parent->IsModule()) { + static_cast(parent)->InsertAfter(func, node); + } + // cp annotation from node to func + for (unsigned i = 0; i < node->GetAnnotationsNum(); i++) { + func->AddAnnotation(node->GetAnnotationAtIndex(i)); + } + node->ClearAnnotation(); + break; + } + case NK_Class: { + ClassNode *classnode = static_cast(bfnode); + IdentifierNode *n = mInfo->CreateIdentifierNode(classnode->GetStrIdx()); + // update p + p->SetBefore(n); + n->SetParent(p); + mUpdated = true; + // insert classnode into AST after node + if (parent->IsBlock()) { + static_cast(parent)->InsertStmtAfter(classnode, node); + } else if (parent->IsModule()) { + static_cast(parent)->InsertAfter(classnode, node); + } + // cp annotation from node to classnode + for (unsigned i = 0; i < node->GetAnnotationsNum(); i++) { + classnode->AddAnnotation(node->GetAnnotationAtIndex(i)); + } + node->ClearAnnotation(); + break; + } + default: + NOTYETIMPL("VisitExportNode neither function nor class"); + break; + } + } + } + } + return node; +} + +NamespaceNode *AdjustASTVisitor::VisitNamespaceNode(NamespaceNode *node) { + (void) AstVisitor::VisitNamespaceNode(node); + TreeNode *id = node->GetId(); + if (id) { + // for namespace with nested id, split into namespaces + if (id->IsField()) { + FieldNode *fld = static_cast(id); + TreeNode *upper = fld->GetUpper(); + TreeNode *field = fld->GetField(); + // rename node with upper + node->SetId(upper); + + NamespaceNode *ns = mHandler->NewTreeNode(); + // name ns with field + ns->SetId(field); + // move elements of node to ns + for (unsigned i = 0; i < node->GetElementsNum(); i++) { + ns->AddElement(node->GetElementAtIndex(i)); + } + node->Release(); + // add ns as element of node + node->AddElement(ns); + + // recursive if needed + if (!upper->IsIdentifier()) { + node = VisitNamespaceNode(static_cast(node)); + } + } + } + return node; +} + +CondBranchNode *AdjustASTVisitor::VisitCondBranchNode(CondBranchNode *node) { + TreeNode *tn = VisitTreeNode(node->GetCond()); + tn = VisitTreeNode(node->GetTrueBranch()); + if (tn && !tn->IsBlock()) { + BlockNode *blk = mHandler->NewTreeNode(); + blk->AddChild(tn); + node->SetTrueBranch(blk); + mUpdated = true; + } + tn = VisitTreeNode(node->GetFalseBranch()); + if (tn && !tn->IsBlock()) { + BlockNode *blk = mHandler->NewTreeNode(); + blk->AddChild(tn); + node->SetFalseBranch(blk); + mUpdated = true; + } + return node; +} + +// for : use BlockNode for body +ForLoopNode *AdjustASTVisitor::VisitForLoopNode(ForLoopNode *node) { + (void) AstVisitor::VisitForLoopNode(node); + TreeNode *tn = node->GetBody(); + if (tn && !tn->IsBlock()) { + BlockNode *blk = mHandler->NewTreeNode(); + blk->AddChild(tn); + node->SetBody(blk); + mUpdated = true; + } + return node; +} + +static unsigned uniq_number = 1; + +// lamda : create a FunctionNode for it +// use BlockNode for body, add a ReturnNode +LambdaNode *AdjustASTVisitor::VisitLambdaNode(LambdaNode *node) { + FunctionNode *func = mHandler->NewTreeNode(); + + // func name + std::string str("__lambda_"); + str += std::to_string(uniq_number++); + str += "__"; + unsigned stridx = gStringPool.GetStrIdx(str); + IdentifierNode *name = mInfo->CreateIdentifierNode(stridx); + func->SetStrIdx(stridx); + func->SetFuncName(name); + mInfo->AddFromLambda(func->GetNodeId()); + + // func parameters + for (int i = 0; i < node->GetParamsNum(); i++) { + func->AddParam(node->GetParam(i)); + } + + // func Attributes + for (int i = 0; i < node->GetAttrsNum(); i++) { + func->AddAttr(node->GetAttrAtIndex(i)); + } + + // func type parameters + for (int i = 0; i < node->GetTypeParamsNum(); i++) { + func->AddTypeParam(node->GetTypeParamAtIndex(i)); + } + + // func body + TreeNode *tn = VisitTreeNode(node->GetBody()); + if (tn) { + if (tn->IsBlock()) { + func->SetBody(static_cast(tn)); + func->SetRetType(node->GetRetType()); + } else { + BlockNode *blk = mHandler->NewTreeNode(); + ReturnNode *ret = mHandler->NewTreeNode(); + ret->SetResult(tn); + blk->AddChild(ret); + + func->SetBody(blk); + } + } + + // func return type + if (node->GetRetType()) { + func->SetRetType(node->GetRetType()); + } + + mUpdated = true; + // note: the following conversion is only for the visitor to notice the node is updated + return (LambdaNode*)func; +} + +void AdjustASTVisitor::CheckAndRenameCppKeywords(TreeNode *node) { + if (!mHandler->IsTS() || node->GetStrIdx() == 0) { + return; + } + + unsigned stridx = node->GetStrIdx(); + if (mRenameMap.find(stridx) != mRenameMap.end()) { + node->SetStrIdx(mRenameMap[stridx]); + return; + } + + if (mUtil->IsCppKeyWord(stridx)) { + std::string name = gStringPool.GetStringFromStrIdx(stridx); + unsigned newidx = gStringPool.GetStrIdx(name + RENAMINGSUFFIX); + node->SetStrIdx(newidx); + mRenameMap[stridx] = newidx; + } +} + +void AdjustASTVisitor::AssignPseudoName(TreeNode *node) { + if (!mHandler->IsTS() || node->GetStrIdx() != 0) { + return; + } + static int pseudo = 0; + // Set a pseudo name + unsigned newidx = gStringPool.GetStrIdx("__Pseudo_" + std::to_string(++pseudo)); + node->SetStrIdx(newidx); +} + +} diff --git a/src/MapleFE/astopt/src/ast_cfa.cpp b/src/MapleFE/astopt/src/ast_cfa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..57dda93f78017323662bb905e3dbf68c1410094d --- /dev/null +++ b/src/MapleFE/astopt/src/ast_cfa.cpp @@ -0,0 +1,107 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include "ast_handler.h" +#include "ast_cfa.h" + +namespace maplefe { + +void AST_CFA::ControlFlowAnalysis() { + MSGNOLOC0("============== ControlFlowAnalysis =============="); + CollectReachableBB(); + RemoveUnreachableBB(); + + if (mFlags & FLG_trace_3) { + Dump(); + } +} + +void AST_CFA::Dump() { + ModuleNode *module = mHandler->GetASTModule(); + for(unsigned i = 0; i < module->GetTreesNum(); i++) { + TreeNode *it = module->GetTree(i); + it->Dump(0); + std::cout << std::endl; + } +} + +// this calcuates mNodeId2BbMap +void AST_CFA::CollectReachableBB() { + MSGNOLOC0("============== CollectReachableBB =============="); + mReachableBbIdx.clear(); + std::deque working_list; + + // process each functions + for (auto func: mHandler->mModuleFuncs) { + // initialisze work list with all entry BB + working_list.push_back(func->GetEntryBB()); + + while(working_list.size()) { + CfgBB *bb = working_list.front(); + MASSERT(bb && "null BB"); + unsigned bbid = bb->GetId(); + + // skip bb already visited + if (mReachableBbIdx.find(bbid) != mReachableBbIdx.end()) { + working_list.pop_front(); + continue; + } + + for (int i = 0; i < bb->GetSuccessorsNum(); i++) { + working_list.push_back(bb->GetSuccessorAtIndex(i)); + } + + for (int i = 0; i < bb->GetStatementsNum(); i++) { + TreeNode *node = bb->GetStatementAtIndex(i); + mHandler->SetBbFromNodeId(node->GetNodeId(), bb); + } + + mHandler->SetBbFromBbId(bbid, bb); + mHandler->mBbIdVec.push_back(bbid); + + mReachableBbIdx.insert(bbid); + working_list.pop_front(); + } + } +} + +void AST_CFA::RemoveUnreachableBB() { + std::set deadBb; + CfgBB *bb = nullptr; + for (auto id: mReachableBbIdx) { + bb = mHandler->mBbId2BbMap[id]; + for (int i = 0; i < bb->GetPredecessorsNum(); i++) { + CfgBB *pred = bb->GetPredecessorAtIndex(i); + unsigned pid = pred->GetId(); + if (mHandler->mBbId2BbMap.find(pid) == mHandler->mBbId2BbMap.end()) { + deadBb.insert(pred); + } + } + } + for (auto it: deadBb) { + if (mFlags & FLG_trace_3) std::cout << "deleted BB :"; + for (int i = 0; i < it->GetSuccessorsNum(); i++) { + bb = it->GetSuccessorAtIndex(i); + bb->mPredecessors.Remove(it); + } + if (mFlags & FLG_trace_3) std::cout << " BB" << it->GetId(); + it->~CfgBB(); + } + if (mFlags & FLG_trace_3) std::cout << std::endl; +} + +} diff --git a/src/MapleFE/astopt/src/ast_cfg.cpp b/src/MapleFE/astopt/src/ast_cfg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..63bda48be94433bf8c45d6ca52e3f6235734da2f --- /dev/null +++ b/src/MapleFE/astopt/src/ast_cfg.cpp @@ -0,0 +1,882 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include +#include "ast_cfg.h" +#include "ast_dfa.h" +#include "ast_handler.h" +#include "gen_astdump.h" + +namespace maplefe { + +class CollectNestedFuncs : public AstVisitor { + private: + CfgBuilder *mBuilder; + CfgFunc *mFunc; + + public: + CollectNestedFuncs(CfgBuilder *b, CfgFunc *f) : mBuilder(b), mFunc(f) {} + + FunctionNode *VisitFunctionNode(FunctionNode *node) { + if(TreeNode *body = node->GetBody()) + HandleNestedFunc(node, body); + return node; + } + + LambdaNode *VisitLambdaNode(LambdaNode *node) { + if(TreeNode *body = node->GetBody()) + HandleNestedFunc(node, body); + return node; + } + + void HandleNestedFunc(TreeNode *func, TreeNode *body) { + CfgFunc *current = mFunc; + mFunc = mBuilder->NewFunction(func); + current->AddNestedFunc(mFunc); + AstVisitor::VisitTreeNode(body); + mFunc = current; + } +}; + +// Create CfgFunc nodes for a module +CfgFunc *CfgBuilder::InitCfgFunc(ModuleNode *module) { + CfgFunc *module_func = NewFunction(module); + CollectNestedFuncs collector(this, module_func); + collector.Visit(module); + return module_func; +} + +// Initialize a CfgFunc node +void CfgBuilder::InitializeFunction(CfgFunc *func) { + // Create the entry BB and exit BB of current function + CfgBB *entry = NewBB(BK_Uncond); + func->SetEntryBB(entry); + CfgBB *exit = NewBB(BK_Join); + func->SetExitBB(exit); + CfgBuilder::Push(mThrowBBs, exit, nullptr); + // Initialize the working function and BB + mCurrentFunction = func; + mCurrentBB = NewBB(BK_Uncond); + entry->AddSuccessor(mCurrentBB); +} + +// Finalize a CfgFunc node +void CfgBuilder::FinalizeFunction() { + CfgBuilder::Pop(mThrowBBs); + CfgBB *exit = mCurrentFunction->GetExitBB(); + mCurrentBB->AddSuccessor(exit); + mCurrentFunction->SetLastBBId(CfgBB::GetLastId()); + mCurrentFunction = nullptr; + mCurrentBB = nullptr; +} + +// Push a BB to target BB stack +void CfgBuilder::Push(TargetBBStack &stack, CfgBB* bb, TreeNode *label) { + unsigned idx = 0; + if(label && label->IsIdentifier()) + idx = static_cast(label)->GetStrIdx(); + stack.push_back(TargetBB{bb, idx}); +} + +// Look up a target BB +CfgBB *CfgBuilder::LookUp(TargetBBStack &stack, TreeNode *label) { + unsigned idx = 0; + if(label && label->IsIdentifier()) + idx = static_cast(label)->GetStrIdx(); + if(idx == 0) { + for(auto it = stack.rbegin(); it != stack.rend(); ++it) + if(it->first->GetKind() != BK_Join2) + return it->first; + } else { + for(auto it = stack.rbegin(); it != stack.rend(); ++it) + if(it->second == idx) + return it->first; + } + MASSERT(0 && "Unexpected: Target not found."); + return nullptr; +} + +// Pop from a target BB stack +void CfgBuilder::Pop(TargetBBStack &stack) { + stack.pop_back(); +} + +// Handle a function +FunctionNode *CfgBuilder::VisitFunctionNode(FunctionNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// Handle a lambda +LambdaNode *CfgBuilder::VisitLambdaNode(LambdaNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +ClassNode *CfgBuilder::VisitClassNode(ClassNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +InterfaceNode *CfgBuilder::VisitInterfaceNode(InterfaceNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +StructNode *CfgBuilder::VisitStructNode(StructNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For control flow +ReturnNode *CfgBuilder::VisitReturnNode(ReturnNode *node) { + mCurrentBB->AddStatement(node); + CfgBB *exit = mCurrentFunction->GetExitBB(); + mCurrentBB->AddSuccessor(exit); + mCurrentBB->SetKind(BK_Terminated); + mCurrentBB->SetAttr(AK_Return); + return node; +} + +// For control flow +CondBranchNode *CfgBuilder::VisitCondBranchNode(CondBranchNode *node) { + mCurrentBB->SetKind(BK_Branch); + //mCurrentBB->AddStatement(node); + mCurrentBB->SetAuxNode(node); + + if(TreeNode *cond = node->GetCond()) { + // Set predicate of current BB + mCurrentBB->SetPredicate(cond); + mCurrentBB->AddStatement(cond); + } + + // Save current BB + CfgBB *current_bb = mCurrentBB; + + // Create a new BB for true branch + mCurrentBB = NewBB(BK_Uncond); + current_bb->AddSuccessor(mCurrentBB); + + // Create a BB for the join point + CfgBB *join = NewBB(BK_Join2); + + // Handle the label of current if-statement + TreeNode *label = node->GetLabel(); + if(label) + CfgBuilder::Push(mBreakBBs, join, label); + + // Visit true branch first + VisitTreeNode(node->GetTrueBranch()); + mCurrentBB->AddSuccessor(join); + + TreeNode *false_branch = node->GetFalseBranch(); + if(false_branch == nullptr) { + current_bb->AddSuccessor(join); + } else { + mCurrentBB = NewBB(BK_Uncond); + current_bb->AddSuccessor(mCurrentBB); + // Visit false branch if it exists + VisitTreeNode(false_branch); + mCurrentBB->AddSuccessor(join); + } + + if(label) + CfgBuilder::Pop(mBreakBBs); + + // Keep going with the BB at the join point + mCurrentBB = join; + return node; +} + +// For control flow +// ForLoopProp: FLP_Regular, FLP_JSIn, FLP_JSOf +ForLoopNode *CfgBuilder::VisitForLoopNode(ForLoopNode *node) { + // Visit all inits + for (unsigned i = 0; i < node->GetInitsNum(); ++i) { + VisitTreeNode(node->GetInitAtIndex(i)); + } + + CfgBB *current_bb = mCurrentBB; + // Create a new BB for loop header + mCurrentBB = NewBB(BK_LoopHeader); + + // Add current node to the loop header BB + //mCurrentBB->AddStatement(node); + mCurrentBB->SetAuxNode(node); + current_bb->AddSuccessor(mCurrentBB); + // Set current_bb to be loop header + current_bb = mCurrentBB; + + if(node->GetProp() == FLP_Regular) { + if(TreeNode *cond = node->GetCond()) { + // Set predicate of current BB + mCurrentBB->SetPredicate(cond); + mCurrentBB->AddStatement(cond); + } + } else + // Set predicate to be current ForLoopNode when it is FLP_JSIn or FLP_JSOf + mCurrentBB->SetPredicate(node); + + // Create a BB for loop body + mCurrentBB = NewBB(BK_Uncond); + current_bb->AddSuccessor(mCurrentBB); + // Create a new BB for getting out of the loop + CfgBB *loop_exit = NewBB(BK_Join); + + // Push loop_exit and current_bb to stacks for 'break' and 'continue' + CfgBuilder::Push(mBreakBBs, loop_exit, node->GetLabel()); + CfgBuilder::Push(mContinueBBs, current_bb, node->GetLabel()); + // Visit loop body + VisitTreeNode(node->GetBody()); + CfgBuilder::Pop(mContinueBBs); + CfgBuilder::Pop(mBreakBBs); + + // Visit all updates + for (unsigned i = 0; i < node->GetUpdatesNum(); ++i) { + VisitTreeNode(node->GetUpdateAtIndex(i)); + } + // Add a back edge to loop header + mCurrentBB->AddSuccessor(current_bb); + current_bb->AddSuccessor(loop_exit); + mCurrentBB = loop_exit; + return node; +} + +// For control flow +WhileLoopNode *CfgBuilder::VisitWhileLoopNode(WhileLoopNode *node) { + CfgBB *current_bb = mCurrentBB; + // Create a new BB for loop header + mCurrentBB = NewBB(BK_LoopHeader); + // Add current node to the loop header BB + //mCurrentBB->AddStatement(node); + mCurrentBB->SetAuxNode(node); + current_bb->AddSuccessor(mCurrentBB); + // Set current_bb to be loop header + current_bb = mCurrentBB; + + if(TreeNode *cond = node->GetCond()) { + // Set predicate of current BB + mCurrentBB->SetPredicate(cond); + mCurrentBB->AddStatement(cond); + } + + // Create a BB for loop body + mCurrentBB = NewBB(BK_Uncond); + current_bb->AddSuccessor(mCurrentBB); + // Create a new BB for getting out of the loop + CfgBB *loop_exit = NewBB(BK_Join); + + // Push loop_exit and current_bb to stacks for 'break' and 'continue' + CfgBuilder::Push(mBreakBBs, loop_exit, node->GetLabel()); + CfgBuilder::Push(mContinueBBs, current_bb, node->GetLabel()); + // Visit loop body + VisitTreeNode(node->GetBody()); + CfgBuilder::Pop(mContinueBBs); + CfgBuilder::Pop(mBreakBBs); + + // Add a back edge to loop header + mCurrentBB->AddSuccessor(current_bb); + current_bb->AddSuccessor(loop_exit); + mCurrentBB = loop_exit; + return node; +} + +// For control flow +DoLoopNode *CfgBuilder::VisitDoLoopNode(DoLoopNode *node) { + CfgBB *current_bb = mCurrentBB; + // Create a new BB for loop header + mCurrentBB = NewBB(BK_LoopHeader); + // Add current node to the loop header BB + //mCurrentBB->AddStatement(node); + mCurrentBB->SetAuxNode(node); + current_bb->AddSuccessor(mCurrentBB); + // Set current_bb to be loop header + current_bb = mCurrentBB; + + // Create a BB for loop body + mCurrentBB = NewBB(BK_Uncond); + current_bb->AddSuccessor(mCurrentBB); + // Create a new BB for getting out of the loop + CfgBB *loop_exit = NewBB(BK_Join); + + // Push loop_exit and current_bb to stacks for 'break' and 'continue' + CfgBuilder::Push(mBreakBBs, loop_exit, node->GetLabel()); + CfgBuilder::Push(mContinueBBs, current_bb, node->GetLabel()); + // Visit loop body + VisitTreeNode(node->GetBody()); + CfgBuilder::Pop(mContinueBBs); + CfgBuilder::Pop(mBreakBBs); + + if(TreeNode *cond = node->GetCond()) { + // Set predicate of current BB + mCurrentBB->SetPredicate(cond); + mCurrentBB->AddStatement(cond); + } + + // Add a back edge to loop header + mCurrentBB->AddSuccessor(current_bb); + mCurrentBB->AddSuccessor(loop_exit); + mCurrentBB = loop_exit; + return node; +} + +// For control flow +ContinueNode *CfgBuilder::VisitContinueNode(ContinueNode *node) { + mCurrentBB->AddStatement(node); + // Get the loop header + CfgBB *loop_header = CfgBuilder::LookUp(mContinueBBs, node->GetTarget()); + // Add the loop header as a successor of current BB + mCurrentBB->AddSuccessor(loop_header); + mCurrentBB->SetKind(BK_Terminated); + mCurrentBB->SetAttr(AK_Cont); + return node; +} + +// For control flow +BreakNode *CfgBuilder::VisitBreakNode(BreakNode *node) { + mCurrentBB->AddStatement(node); + // Get the target BB for a loop or switch statement + CfgBB *exit = CfgBuilder::LookUp(mBreakBBs, node->GetTarget()); + // Add the target as a successor of current BB + mCurrentBB->AddSuccessor(exit); + mCurrentBB->SetKind(BK_Terminated); + mCurrentBB->SetAttr(AK_Break); + return node; +} + +// For control flow +SwitchNode *CfgBuilder::VisitSwitchNode(SwitchNode *node) { + mCurrentBB->SetKind(BK_Switch); + mCurrentBB->AddStatement(node); + // Set the root node of current BB + mCurrentBB->SetAuxNode(node); + + // Save current BB + CfgBB *current_bb = mCurrentBB; + + // Create a new BB for getting out of the switch block + CfgBB *exit = NewBB(BK_Join); + CfgBuilder::Push(mBreakBBs, exit, node->GetLabel()); + CfgBB *prev_block = nullptr; + TreeNode *switch_expr = node->GetExpr(); + for (unsigned i = 0; i < node->GetCasesNum(); ++i) { + CfgBB *case_bb = NewBB(BK_Case); + current_bb->AddSuccessor(case_bb); + + TreeNode *case_node = node->GetCaseAtIndex(i); + // Set the auxiliary node and predicate for current case BB + case_bb->SetAuxNode(case_node); + case_bb->SetPredicate(switch_expr); + case_bb->AddStatement(switch_expr); + + bool is_default = false; + TreeNode *case_expr = nullptr; + if(case_node->IsSwitchCase()) { + // Use the first label node of current SwitchCaseNode + TreeNode *label_node = static_cast(case_node)->GetLabelAtIndex(0); + if(label_node->IsSwitchLabel()) { + is_default = static_cast(label_node)->IsDefault(); + case_expr = static_cast(label_node)->GetValue(); + } + } + + // Optimize for default case + if(is_default) { + mCurrentBB = case_bb; + case_bb->SetKind(BK_Uncond); + } else { + mCurrentBB = NewBB(BK_Uncond); + case_bb->AddSuccessor(mCurrentBB); + } + + // Add a fall-through edge if needed + if(prev_block) { + prev_block->AddSuccessor(mCurrentBB); + } + + // Visit all statements of current case + if(case_node->IsSwitchCase()) { + SwitchCaseNode *cnode = static_cast(case_node); + for (unsigned i = 0; i < cnode->GetStmtsNum(); ++i) { + if (auto t = cnode->GetStmtAtIndex(i)) + VisitTreeNode(t); + } + } + + // Prepare for next case + prev_block = mCurrentBB; + current_bb = case_bb; + } + CfgBuilder::Pop(mBreakBBs); + + // Connect to the exit BB of this switch statement + prev_block->AddSuccessor(exit); + if(prev_block != current_bb) { + current_bb->AddSuccessor(exit); + } + mCurrentBB = exit; + return node; +} + +// For control flow +TryNode *CfgBuilder::VisitTryNode(TryNode *node) { + mCurrentBB->SetKind(BK_Try); + //mCurrentBB->AddStatement(node); + mCurrentBB->SetAuxNode(node); + + auto try_block_node = node->GetBlock(); + //mCurrentBB->AddStatement(try_block_node); + + unsigned num = node->GetCatchesNum(); + CfgBB *catch_bb = num ? NewBB(BK_Catch) : nullptr; + + auto finally_node = node->GetFinally(); + // Create a BB for the join point + CfgBB *join = finally_node ? NewBB(BK_Finally) : NewBB(BK_Join); + + // Save current BB + CfgBB *current_bb = mCurrentBB; + // Create a new BB for current block node + mCurrentBB = NewBB(BK_Uncond); + current_bb->AddSuccessor(mCurrentBB); + + // Add an edge for exception + current_bb->AddSuccessor(num ? catch_bb : join); + + // Visit try block + if(num) { + CfgBuilder::Push(mThrowBBs, catch_bb, nullptr); + AstVisitor::VisitBlockNode(try_block_node); + CfgBuilder::Pop(mThrowBBs); + } else + AstVisitor::VisitBlockNode(try_block_node); + + // Add an edge to join point + mCurrentBB->AddSuccessor(join); + + // JavaScript can have one catch block, or one finally block without catch block + // Other languages, such as C++ and Java, may have multiple catch blocks + CfgBB *curr_bb = mCurrentBB; + for (unsigned i = 0; i < num; ++i) { + if(i > 0) + catch_bb = NewBB(BK_Catch); + // Add an edge to catch bb + curr_bb->AddSuccessor(catch_bb); + mCurrentBB = catch_bb; + auto catch_node = node->GetCatchAtIndex(i); + catch_bb->AddStatement(catch_node); + + auto catch_block = catch_node->GetBlock(); + catch_bb->AddStatement(catch_block); + AstVisitor::VisitBlockNode(catch_block); + + mCurrentBB->AddSuccessor(join); + curr_bb = catch_bb; + } + + mCurrentBB = join; + if(finally_node) { + // For finally block + //mCurrentBB->AddStatement(finally_node); + mCurrentBB->SetAuxNode(finally_node); + AstVisitor::VisitTreeNode(finally_node); + curr_bb = NewBB(BK_Join); + mCurrentBB->AddSuccessor(curr_bb); + // Add an edge to recent catch BB or exit BB + mCurrentBB->AddSuccessor(CfgBuilder::LookUp(mThrowBBs, nullptr)); + mCurrentBB = curr_bb; + } + return node; +} + +// For control flow +ThrowNode *CfgBuilder::VisitThrowNode(ThrowNode *node) { + mCurrentBB->AddStatement(node); + // Get the catch/exit bb for this throw statement + CfgBB *catch_bb = CfgBuilder::LookUp(mThrowBBs, nullptr); + // Add the loop header as a successor of current BB + mCurrentBB->AddSuccessor(catch_bb); + mCurrentBB->SetKind(BK_Terminated); + mCurrentBB->SetAttr(AK_Throw); + return node; +} + +// For control flow +BlockNode *CfgBuilder::VisitBlockNode(BlockNode *node) { + //mCurrentBB->AddStatement(node); + mCurrentBB->SetAuxNode(node); + // Check if current block constains any JS_Let or JS_Const DeclNode + unsigned i, num = node->GetChildrenNum(); + for (i = 0; i < num; ++i) { + TreeNode *child = node->GetChildAtIndex(i); + if(child == nullptr || !child->IsDecl()) { + continue; + } + DeclNode *decl = static_cast(child); + if(decl->GetProp() == JS_Let || decl->GetProp() == JS_Const) { + break; + } + } + + // Handle the label of current block statement + TreeNode *label = node->GetLabel(); + + if(i >= num && label == nullptr) { + // Do not create BB for current block when no JS_Let or JS_Const DeclNode inside + // Visit all child nodes + AstVisitor::VisitBlockNode(node); + } else { + mCurrentBB->SetKind(BK_Block); + // Set the auxiliary node of this BB + mCurrentBB->SetAuxNode(node); + + // Create a BB for the join point + CfgBB *join = NewBB(BK_Join2); + + if(label) + CfgBuilder::Push(mBreakBBs, join, label); + + // Needs BBs for current block + // Save current BB + CfgBB *current_bb = mCurrentBB; + // Create a new BB for current block node + mCurrentBB = NewBB(BK_Uncond); + current_bb->AddSuccessor(mCurrentBB); + + // Visit all child nodes + AstVisitor::VisitBlockNode(node); + + mCurrentBB->AddSuccessor(join); + // This edge is to determine the block range for JS_Let or JS_Const DeclNode + //current_bb->AddSuccessor(join); + + if(label) + CfgBuilder::Pop(mBreakBBs); + + mCurrentBB = join; + } + return node; +} + +// For control flow +NamespaceNode *CfgBuilder::VisitNamespaceNode(NamespaceNode *node) { + //mCurrentBB->AddStatement(node); + mCurrentBB->SetKind(BK_Block); + // Set the auxiliary node of this BB + mCurrentBB->SetAuxNode(node); + + // Create a BB for the join point + CfgBB *join = NewBB(BK_Join2); + + // Save current BB + CfgBB *current_bb = mCurrentBB; + // Create a new BB for current block node + mCurrentBB = NewBB(BK_Uncond); + current_bb->AddSuccessor(mCurrentBB); + + // Visit all child nodes + AstVisitor::VisitNamespaceNode(node); + + mCurrentBB->AddSuccessor(join); + // This edge is to determine the namespace range + //current_bb->AddSuccessor(join); + + mCurrentBB = join; + return node; +} + +// For PassNode +PassNode *CfgBuilder::VisitPassNode(PassNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +TemplateLiteralNode *CfgBuilder::VisitTemplateLiteralNode(TemplateLiteralNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +ImportNode *CfgBuilder::VisitImportNode(ImportNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +ExportNode *CfgBuilder::VisitExportNode(ExportNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +DeclNode *CfgBuilder::VisitDeclNode(DeclNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +ParenthesisNode *CfgBuilder::VisitParenthesisNode(ParenthesisNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +CastNode *CfgBuilder::VisitCastNode(CastNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +ArrayElementNode *CfgBuilder::VisitArrayElementNode(ArrayElementNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +VarListNode *CfgBuilder::VisitVarListNode(VarListNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +ExprListNode *CfgBuilder::VisitExprListNode(ExprListNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +UnaOperatorNode *CfgBuilder::VisitUnaOperatorNode(UnaOperatorNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +BinOperatorNode *CfgBuilder::VisitBinOperatorNode(BinOperatorNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +TerOperatorNode *CfgBuilder::VisitTerOperatorNode(TerOperatorNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +InstanceOfNode *CfgBuilder::VisitInstanceOfNode(InstanceOfNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +TypeOfNode *CfgBuilder::VisitTypeOfNode(TypeOfNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +NewNode *CfgBuilder::VisitNewNode(NewNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +DeleteNode *CfgBuilder::VisitDeleteNode(DeleteNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +CallNode *CfgBuilder::VisitCallNode(CallNode *node) { + mCurrentBB->AddStatement(node); + mCurrentBB->SetAttr(AK_HasCall); + return node; +} + +// For statement of current BB +AssertNode *CfgBuilder::VisitAssertNode(AssertNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +UserTypeNode *CfgBuilder::VisitUserTypeNode(UserTypeNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +IdentifierNode *CfgBuilder::VisitIdentifierNode(IdentifierNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +LiteralNode *CfgBuilder::VisitLiteralNode(LiteralNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +TypeAliasNode *CfgBuilder::VisitTypeAliasNode(TypeAliasNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +// For statement of current BB +FieldNode *CfgBuilder::VisitFieldNode(FieldNode *node) { + mCurrentBB->AddStatement(node); + return node; +} + +TreeNode *CfgBuilder::BaseTreeNode(TreeNode *node) { + return node; +} + +// Allocate a new CfgFunc node +CfgFunc *CfgBuilder::NewFunction(TreeNode *node) { + CfgFunc *func = new(mHandler->GetMemPool()->Alloc(sizeof(CfgFunc))) CfgFunc; + func->SetFuncNode(node); + return func; +} + +// Allocate a new CfgBB node +CfgBB *CfgBuilder::NewBB(BBKind k) { + return new(mHandler->GetMemPool()->Alloc(sizeof(CfgBB))) CfgBB(k); +} + +// Helper for a node in dot graph +static std::string BBLabelStr(CfgBB *bb, const char *shape = nullptr, const char *fn = nullptr) { + static const char* const kBBNames[] = + { "unknown", "uncond", "block", "branch", "loop", "switch", "case", "try", "catch", "finally", + "yield", "term", "join", "join2" }; + if(shape == nullptr) + return kBBNames[bb->GetKind()]; + std::string str("BB" + std::to_string(bb->GetId())); + str += " [label=\"" + str + (shape[0] == 'e' ? std::string("\\n") + kBBNames[bb->GetKind()] : "") + + (fn ? std::string("\\n\\\"") + fn + "\\\"" : "") + "\", shape=" + shape + "];\n"; + return str; +} + +// Dump current CfgFunc node +void CfgFunc::Dump() { + const char *func_name = GetName(); + std::cout << "Function " << func_name << " {" << std::endl; + unsigned num = GetNestedFuncsNum(); + if(num > 0) { + std::cout << "Nested Functions: " << num << " [" << std::endl; + for(unsigned i = 0; i < num; ++i) { + CfgFunc *afunc = GetNestedFuncAtIndex(i); + const char *fname = afunc->GetName(); + std::cout << "Function: " << i + 1 << " " << fname << std::endl; + afunc->Dump(); + } + std::cout << "] // Nested Functions" << std::endl; + } + std::cout << "BBs: [" << std::endl; + + std::stack bb_stack; + CfgBB *entry = GetEntryBB(), *exit = GetExitBB(); + bb_stack.push(exit); + bb_stack.push(entry); + std::set visited; + visited.insert(exit); + visited.insert(entry); + // Dump CFG in dot format + std::string dot("---\ndigraph CFG_"); + dot = dot + func_name + " {\n" + BBLabelStr(entry, "box", func_name) + BBLabelStr(exit, "doubleoctagon"); + const char* scoped = " [style=dashed color=grey];"; + while(!bb_stack.empty()) { + CfgBB *bb = bb_stack.top(); + bb_stack.pop(); + unsigned succ_num = bb->GetSuccessorsNum(); + std::cout << "BB" << bb->GetId() << ", " << BBLabelStr(bb) << (succ_num ? " ( succ: " : " ( Exit "); + for(unsigned i = 0; i < succ_num; ++i) { + CfgBB *curr = bb->GetSuccessorAtIndex(i); + std::cout << "BB" << curr->GetId() << " "; + dot += "BB" + std::to_string(bb->GetId()) + " -> BB" + std::to_string(curr->GetId()) + + (bb == entry ? (curr == exit ? scoped : ";") : + succ_num == 1 ? ";" : i ? bb->GetKind() == BK_Block ? scoped : + " [color=darkred];" : " [color=darkgreen];") + "\n"; + if(visited.find(curr) == visited.end()) { + bb_stack.push(curr); + visited.insert(curr); + dot += BBLabelStr(curr, "ellipse"); + } + } + std::cout << ")" << std::endl; + unsigned stmt_num = bb->GetStatementsNum(); + if(stmt_num) { + for(unsigned i = 0; i < stmt_num; ++i) { + TreeNode *stmt = bb->GetStatementAtIndex(i); + std::cout << " " << i + 1 << ". NodeId: " << stmt->GetNodeId() << ", " + << AstDump::GetEnumNodeKind(stmt->GetKind()) << " : "; + stmt->Dump(0); + std::cout << std::endl; + } + } + } + std::cout << dot << "} // CFG in dot format" << std::endl; + std::cout << "] // BBs\nLastBBId" << (num ? " (Including nested functions)" : "") << ": " + << GetLastBBId() << "\n} // Function" << std::endl; +} + +void CfgBuilder::Build() { + MSGNOLOC0("============== BuildCFG =============="); + + ModuleNode *module = mHandler->GetASTModule(); + + // Set the init function for current module + CfgFunc *func = InitCfgFunc(module); + mHandler->mModuleFuncs.push_back(func); + + mHandler->SetCfgFunc(func); + + std::queue funcQueue; + funcQueue.push(func); + while(!funcQueue.empty()) { + func = funcQueue.front(); + funcQueue.pop(); + // Start to build CFG for current function + InitializeFunction(func); + TreeNode *node = func->GetFuncNode(); + switch(node->GetKind()) { + case NK_Function: + Visit(static_cast(node)->GetBody());; + break; + case NK_Lambda: + { + auto n = static_cast(node)->GetBody(); + if(n->IsBlock()) + Visit(n); + } + break; + default: + Visit(node); + } + FinalizeFunction(); + for(unsigned i = 0; i < func->GetNestedFuncsNum(); ++i) { + CfgFunc *f = func->GetNestedFuncAtIndex(i); + funcQueue.push(f); + mHandler->mModuleFuncs.push_back(f); + } + } +} + +} diff --git a/src/MapleFE/astopt/src/ast_dfa.cpp b/src/MapleFE/astopt/src/ast_dfa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3e6918b17001c4d523858a3a637f438307762009 --- /dev/null +++ b/src/MapleFE/astopt/src/ast_dfa.cpp @@ -0,0 +1,711 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include "stringpool.h" +#include "ast_cfg.h" +#include "ast_dfa.h" + +namespace maplefe { + +AST_DFA::~AST_DFA() { + mVar2DeclMap.clear(); + for (auto it: mStmtId2StmtMap) { + delete it.second; + } + mStmtId2StmtMap.clear(); + for (auto it: mPrsvMap) { + delete it.second; + } + mPrsvMap.clear(); + for (auto it: mGenMap) { + delete it.second; + } + mGenMap.clear(); + for (auto it: mRchInMap) { + delete it.second; + } + mRchInMap.clear(); + for (auto it: mRchOutMap) { + delete it.second; + } +} + +void AST_DFA::TestBV() { + unsigned size = 300; + BitVector *bv1 = new BitVector(size); + bv1->WipeOff(0xab); + + BitVector *bv2 = new BitVector(size); + bv2->WipeOff(0x12); + + DumpBV(bv1); + DumpBV(bv2); + + bv1->Or(bv2); + DumpBV(bv1); + + bv1->And(bv2); + DumpBV(bv1); + + free(bv1); + free(bv2); +} + +void AST_DFA::DataFlowAnalysis() { + Clear(); + // TestBV(); + CollectInfo(); + CollectDefNodes(); + BuildBitVectors(); + BuildDefUseChain(); + if (mFlags & FLG_trace_3) DumpDefUse(); +} + +void AST_DFA::Clear() { + mVar2DeclMap.clear(); + mStmtIdVec.Clear(); + mStmtId2StmtMap.clear(); + mDefNodeIdSet.clear(); + mDefPositionVec.Clear(); + mUsePositionMap.clear(); + for (auto it: mStmtId2StmtMap) { + delete it.second; + } + mStmtId2StmtMap.clear(); + for (auto it: mPrsvMap) { + delete it.second; + } + mPrsvMap.clear(); + for (auto it: mGenMap) { + delete it.second; + } + mGenMap.clear(); + for (auto it: mRchInMap) { + delete it.second; + } + mRchInMap.clear(); + for (auto it: mRchOutMap) { + delete it.second; + } + for (auto it: mPrsvMap) { + delete it.second; + } + mNodeId2StmtIdMap.clear(); + mStmtId2BbIdMap.clear(); + mDefStrIdxSet.clear(); + mDefUseMap.clear(); +} + +void AST_DFA::DumpDefPosition(unsigned idx, DefPosition pos) { + std::cout << " DefPos: " << idx; + unsigned stridx = pos.first; + std::cout << " stridx: " << stridx << " " << gStringPool.GetStringFromStrIdx(stridx) << "\t"; + unsigned nid = pos.second; + std::cout << "nodeid: " << nid << "\t"; + unsigned sid = GetStmtIdFromNodeId(nid); + std::cout << "stmtid: " << sid << "\t"; + unsigned bbid = GetBbIdFromStmtId(sid); + std::cout << " bbid: " << bbid << "\t"; + std::cout << std::endl; +} + +void AST_DFA::DumpDefPositionVec() { + MSGNOLOC0("============== DefPositionVec =============="); + for (unsigned i = 0; i < mDefPositionVec.GetNum(); i++) { + DefPosition pos = mDefPositionVec.ValueAtIndex(i); + DumpDefPosition(i, pos); + } +} + +#define DUMMY_BBID 0xffffffff + +unsigned AST_DFA::GetDefStrIdx(TreeNode *node) { + unsigned stridx = 0; + // pass DUMMY_BBID to indicate to get stridx only + AddDef(node, stridx, DUMMY_BBID); + return stridx; +} + +// return def-node id defined in node +// return 0 if node has no def +unsigned AST_DFA::AddDef(TreeNode *node, unsigned &bitnum, unsigned bbid) { + unsigned stridx = 0; + unsigned nodeid = 0; + bool isDefUse = false; + switch (node->GetKind()) { + case NK_Decl: { + DeclNode *decl = static_cast(node); + if (decl->GetInit()) { + stridx = decl->GetStrIdx(); + nodeid = decl->GetVar()->GetNodeId(); + } + break; + } + case NK_BinOperator: { + BinOperatorNode *bon = static_cast(node); + OprId op = bon->GetOprId(); + switch (op) { + case OPR_Assign: { + TreeNode *lhs = bon->GetOpndA(); + if (lhs->IsField()) { + lhs = static_cast(lhs)->GetUpper(); + } + stridx = lhs->GetStrIdx(); + nodeid = lhs->GetNodeId(); + break; + } + case OPR_AddAssign: + case OPR_SubAssign: + case OPR_MulAssign: + case OPR_DivAssign: + case OPR_ModAssign: + case OPR_ShlAssign: + case OPR_ShrAssign: + case OPR_BandAssign: + case OPR_BorAssign: + case OPR_BxorAssign: + case OPR_ZextAssign: { + TreeNode *lhs = bon->GetOpndA(); + if (lhs->IsField()) { + lhs = static_cast(lhs)->GetUpper(); + } + stridx = lhs->GetStrIdx(); + nodeid = lhs->GetNodeId(); + isDefUse = true; + break; + } + default: + break; + } + break; + } + case NK_UnaOperator: { + UnaOperatorNode *uon = static_cast(node); + OprId op = uon->GetOprId(); + if (op == OPR_Inc || op == OPR_Dec || op == OPR_PreInc || op == OPR_PreDec) { + TreeNode *lhs = uon->GetOpnd(); + stridx = lhs->GetStrIdx(); + nodeid = lhs->GetNodeId(); + isDefUse = true; + } + break; + } + default: + break; + } + + // update mDefPositionVec + if (stridx) { + if (bbid == DUMMY_BBID) { + // special usage for GetDefStrIdx(): use bitnum to return stridx + bitnum = stridx; + } else { + DefPosition pos(stridx, nodeid); + bitnum++; + mDefStrIdxSet.insert(stridx); + mDefNodeIdSet.insert(nodeid); + if (isDefUse) { + mDefUseNodeIdSet.insert(nodeid); + } + mDefPositionVec.PushBack(pos); + return nodeid; + } + } + + return nodeid; +} + +// this calcuates mDefPositionVec +void AST_DFA::CollectDefNodes() { + MSGNOLOC0("============== CollectDefNodes =============="); + std::unordered_set done_list; + std::deque working_list; + + unsigned bitnum = 0; + + // process each functions + for (auto func: mHandler->mModuleFuncs) { + // add arguments as def + TreeNode *f = static_cast(func)->GetFuncNode(); + if (f->IsFunction()) { + FunctionNode *fn = static_cast(f); + for (unsigned i = 0; i < fn->GetParamsNum(); i++) { + TreeNode *arg = fn->GetParam(i); + if (arg->IsIdentifier()) { + unsigned stridx = arg->GetStrIdx(); + unsigned nodeid = arg->GetNodeId(); + DefPosition pos(stridx, nodeid); + bitnum++; + mDefStrIdxSet.insert(stridx); + mDefNodeIdSet.insert(nodeid); + mDefPositionVec.PushBack(pos); + SetNodeId2StmtId(nodeid, fn->GetNodeId()); + } + } + } + + CfgBB *bb = func->GetEntryBB(); + MASSERT(bb && "null BB"); + unsigned bbid = bb->GetId(); + + working_list.push_back(bb); + + while(working_list.size()) { + bb = working_list.front(); + MASSERT(bb && "null BB"); + bbid = bb->GetId(); + + // process bb not visited + if (done_list.find(bbid) == done_list.end()) { + if (mFlags & FLG_trace_3) std::cout << "working_list work " << bbid << std::endl; + for (int i = 0; i < bb->GetStatementsNum(); i++) { + TreeNode *stmt = bb->GetStatementAtIndex(i); + (void) AddDef(stmt, bitnum, bbid); + } + + for (int i = 0; i < bb->GetSuccessorsNum(); i++) { + working_list.push_back(bb->GetSuccessorAtIndex(i)); + } + + done_list.insert(bbid); + } + + working_list.pop_front(); + } + } + + if (mFlags & FLG_trace_3) DumpDefPositionVec(); +} + +void AST_DFA::BuildBitVectors() { + MSGNOLOC0("============== BuildBitVectors =============="); + std::unordered_set done_list; + std::deque working_list; + + // init bit vectors + unsigned bvsize = mDefPositionVec.GetNum(); + for (auto bbid: mHandler->mBbIdVec) { + BitVector *bv1 = new BitVector(bvsize); + bv1->WipeOff(0xff); // init with all 1 + mPrsvMap[bbid] = bv1; + + BitVector *bv2 = new BitVector(bvsize); + bv2->WipeOff(0); + mGenMap[bbid] = bv2; + + working_list.push_back(mHandler->mBbId2BbMap[bbid]); + } + + while(working_list.size()) { + CfgBB *bb = working_list.front(); + MASSERT(bb && "null BB"); + unsigned bbid = bb->GetId(); + + // skip bb already visited + if (done_list.find(bbid) != done_list.end()) { + working_list.pop_front(); + continue; + } + + for (int it = 0; it < bb->GetSuccessorsNum(); it++) { + working_list.push_back(bb->GetSuccessorAtIndex(it)); + } + + // function parameters are considered defined at function entry bb + if (bb->GetAttr() == AK_Entry) { + CfgFunc *func = mEntryBbId2FuncMap[bbid]; + if(func) { + FunctionNode *fn = static_cast(func->GetFuncNode()); + for (unsigned i = 0; i < fn->GetParamsNum(); i++) { + TreeNode *arg = fn->GetParam(i); + if (arg->IsIdentifier()) { + for (int i = 0; i < mDefPositionVec.GetNum(); i++) { + // set bits for matching bbid + unsigned nid = mDefPositionVec.ValueAtIndex(i).second; + unsigned sid = GetStmtIdFromNodeId(nid); + unsigned bid = GetBbIdFromStmtId(sid); + if (bid == bbid) { + mGenMap[bbid]->SetBit(i); + } + } + } + } + } + } + + for (int it = 0; it < bb->GetStatementsNum(); it++) { + TreeNode *node = bb->GetStatementAtIndex(it); + unsigned stridx = GetDefStrIdx(node); + if (stridx != 0) { + // now loop through all the definition positions + // mPrsvMap + for (int i = 0; i < mDefPositionVec.GetNum(); i++) { + // clear bits for matching stridx + if (mDefPositionVec.ValueAtIndex(i).first == stridx) { + mPrsvMap[bbid]->ClearBit(i); + } + } + + // mGenMap + for (int i = 0; i < mDefPositionVec.GetNum(); i++) { + // set bits for matching bbid + unsigned nid = mDefPositionVec.ValueAtIndex(i).second; + unsigned sid = GetStmtIdFromNodeId(nid); + unsigned bid = GetBbIdFromStmtId(sid); + if (bid == bbid) { + mGenMap[bbid]->SetBit(i); + } + } + } + } + + done_list.insert(bbid); + working_list.pop_front(); + } + + // mRchInMap + for (auto bbid: mHandler->mBbIdVec) { + BitVector *bv = new BitVector(bvsize); + bv->Alloc(bvsize); + bv->WipeOff(0); + mRchInMap[bbid] = bv; + } + + working_list.clear(); + // initialize work list with all reachable BB + for (auto it: done_list) { + CfgBB *bb = mHandler->mBbId2BbMap[it]; + working_list.push_back(bb); + } + + BitVector *old_bv = new BitVector(bvsize); + BitVector *tmp_bv = new BitVector(bvsize); + while (working_list.size()) { + CfgBB *bb = working_list.front(); + unsigned bbid = bb->GetId(); + + tmp_bv->WipeOff(0); + old_bv->WipeOff(0); + old_bv->Or(mRchInMap[bbid]); + mRchInMap[bbid]->WipeOff(0); + for (int i = 0; i < bb->GetPredecessorsNum(); i++){ + CfgBB *pred = bb->GetPredecessorAtIndex(i); + unsigned pid = pred->GetId(); + tmp_bv->WipeOff(0); + tmp_bv->Or(mRchInMap[pid]); + tmp_bv->And(mPrsvMap[pid]); + tmp_bv->Or(mGenMap[pid]); + mRchInMap[bbid]->Or(tmp_bv); + } + + working_list.pop_front(); + if (!mRchInMap[bbid]->Equal(old_bv)) { + for (int i = 0; i < bb->GetSuccessorsNum(); i++) { + working_list.push_back(bb->GetSuccessorAtIndex(i)); + } + } + } + + bool buildOutMap = false; + if (buildOutMap) { + for (auto bbid: mHandler->mBbIdVec) { + BitVector *bv = new BitVector(bvsize); + bv->Alloc(bvsize); + bv->WipeOff(0); + mRchOutMap[bbid] = bv; + } + } + + delete old_bv; + delete tmp_bv; + if (mFlags & FLG_trace_3) DumpAllBVMaps(); +} + +void AST_DFA::DumpAllBVMaps() { + MSGNOLOC0("=== mPrsvMap ==="); + DumpBVMap(mPrsvMap); + MSGNOLOC0("=== mGenMap ==="); + DumpBVMap(mGenMap); + MSGNOLOC0("=== mRchInMap ==="); + DumpBVMap(mRchInMap); +} + +void AST_DFA::DumpBVMap(BVMap &map) { + if (!map.size()) { return; } + std::set ordered(mHandler->mBbIdVec.begin(), mHandler->mBbIdVec.end()); + for (auto bbid: ordered) { + std::cout << "BB" << bbid << " : "; + DumpBV(map[bbid]); + } + std::cout << std::endl; +} + +void AST_DFA::DumpBV(BitVector *bv) { + std::cout << "BitVector: "; + for (int i = 0; i < mDefPositionVec.GetNum(); i++) { + std::cout << bv->GetBit(i); + if (i%8 == 7) std::cout << " "; + if (i%64 == 63) { + std::cout << std::endl; + std::cout << " "; + } + } + std::cout << std::endl; +} + +void AST_DFA::DumpDefUse() { + MSGNOLOC0("============== Dump DefUse =============="); + for (unsigned i = 0; i < mDefPositionVec.GetNum(); i++) { + DefPosition pos = mDefPositionVec.ValueAtIndex(i); + DumpDefPosition(i, pos); + + std::cout << "Use: "; + for (auto nid: mDefUseMap[pos.second]) { + std::cout << nid << " "; + } + std::cout << std::endl; + } +} + +void AST_DFA::CollectInfo() { + MSGNOLOC0("============== CollectInfo =============="); + // process each functions for arguments + for (auto func: mHandler->mModuleFuncs) { + // add arguments as def + TreeNode *f = static_cast(func)->GetFuncNode(); + if (f && f->IsFunction()) { + unsigned sid = f->GetNodeId(); + mStmtIdVec.PushBack(sid); + + // use entry bbid for function and its arguments + unsigned bbid = func->GetEntryBB()->GetId(); + mStmtId2BbIdMap[sid] = bbid; + mEntryBbId2FuncMap[bbid] = func; + + FunctionNode *fn = static_cast(f); + for (unsigned i = 0; i < fn->GetParamsNum(); i++) { + TreeNode *arg = fn->GetParam(i); + if (arg->IsIdentifier()) { + SetNodeId2StmtId(arg->GetNodeId(), sid); + } + } + } + } + + // loop through each BB and each statement + CollectInfoVisitor visitor(mHandler, mFlags, true); + for (auto bbid: mHandler->mBbIdVec) { + visitor.SetBbId(bbid); + if (mFlags & FLG_trace_3) std::cout << " == bbid " << bbid << std::endl; + CfgBB *bb = mHandler->mBbId2BbMap[bbid]; + for (int i = 0; i < bb->GetStatementsNum(); i++) { + TreeNode *stmt = bb->GetStatementAtIndex(i); + // function nodes are handled above + // so do not override with real bbid + if (stmt->IsFunction()) { + continue; + } + unsigned sid = stmt->GetNodeId(); + mStmtIdVec.PushBack(sid); + mStmtId2StmtMap[sid] = stmt; + mStmtId2BbIdMap[sid] = bbid; + visitor.SetStmtIdx(stmt->GetNodeId()); + visitor.SetBbId(bbid); + visitor.Visit(stmt); + } + } +} + +IdentifierNode *CollectInfoVisitor::VisitIdentifierNode(IdentifierNode *node) { + AstVisitor::VisitIdentifierNode(node); + mDFA->SetNodeId2StmtId(node->GetNodeId(), mStmtIdx); + return node; +} + +void AST_DFA::BuildDefUseChain() { + MSGNOLOC0("============== BuildDefUseChain =============="); + DefUseChainVisitor visitor(mHandler, mFlags, true); + std::unordered_set defStrIdxs; + std::unordered_set done_list; + CfgBB *bb; + + // loop through each variable def + for (int i = 0; i < mDefPositionVec.GetNum(); i++) { + DefPosition pos = mDefPositionVec.ValueAtIndex(i); + if (mFlags & FLG_trace_3) DumpDefPosition(i, pos); + std::deque working_list; + + // def stridx + visitor.mDefStrIdx = pos.first; + // def nodeid + visitor.mDefNodeId = pos.second; + visitor.mReachNewDef = false; + unsigned sid = GetStmtIdFromNodeId(visitor.mDefNodeId); + unsigned bid = GetBbIdFromStmtId(sid); + // check if func entry bb + if (mEntryBbId2FuncMap.find(bid) != mEntryBbId2FuncMap.end()) { + // func arguments are defined in entry bb, mark defined + visitor.mReachDef = true; + } else { + visitor.mReachDef = false; + } + + bb = mHandler->mBbId2BbMap[bid]; + + // loop through each BB and each statement + working_list.push_back(bb); + done_list.clear(); + + while(working_list.size()) { + bb = working_list.front(); + MASSERT(bb && "null BB"); + unsigned bbid = bb->GetId(); + MSGNOLOC("BB", bbid); + + // check if def is either alive at bb entry or created in bb + bool alive = mRchInMap[bbid]->GetBit(i); + bool gen = mGenMap[bbid]->GetBit(i); + if (!(alive || gen)) { + done_list.insert(bbid); + working_list.pop_front(); + continue; + } + + // process bb + visitor.VisitBB(bbid); + + // add successors to working_list if not in done_list + if (done_list.find(bbid) == done_list.end()) { + // if new def in bb, then no need to visit successors for the current def + if (!visitor.mReachNewDef) { + for (int i = 0; i < bb->GetSuccessorsNum(); i++) { + working_list.push_back(bb->GetSuccessorAtIndex(i)); + } + } + } + + done_list.insert(bbid); + working_list.pop_front(); + } + } +} + +void DefUseChainVisitor::VisitBB(unsigned bbid) { + SetBbId(bbid); + CfgBB *bb = mHandler->mBbId2BbMap[bbid]; + for (int i = 0; i < bb->GetStatementsNum(); i++) { + TreeNode *node = bb->GetStatementAtIndex(i); + SetStmtIdx(node->GetNodeId()); + if (node->IsFunction()) { + FunctionNode *func = static_cast(node); + for (unsigned i = 0; i < func->GetParamsNum(); i++) { + TreeNode *arg = func->GetParam(i); + Visit(arg); + } + } else { + Visit(node); + } + if (mReachNewDef) { + return; + } + } + return; +} + +IdentifierNode *DefUseChainVisitor::VisitIdentifierNode(IdentifierNode *node) { + if (mFlags & FLG_trace_1) std::cout << "Visiting IdentifierNode, id=" << node->GetNodeId() << "..." << std::endl; + + // only deal with use with same stridx of current def + unsigned stridx = node->GetStrIdx(); + if (stridx != mDefStrIdx) { + return node; + } + + // new def, quit + unsigned nid = node->GetNodeId(); + bool isDef = mHandler->GetDFA()->IsDef(nid); + bool isUse = mHandler->GetDFA()->IsDefUse(nid); + if (isDef) { + if (mReachDef) { + mReachNewDef = true; + if (!isUse) { + return node; + } + } else { + mReachDef = true; + isUse = false; + } + } else if (mReachDef) { + isUse = true; + } + + // exclude its own decl + TreeNode *p = node->GetParent(); + if (p && p->IsDecl()) { + DeclNode *dn = static_cast(p); + if (dn->GetVar() == node) { + return node; + } + } + + // add to mDefUseMap + if (isUse) { + mDFA->mDefUseMap[mDefNodeId].insert(nid); + } + return node; +} + +BinOperatorNode *DefUseChainVisitor::VisitBinOperatorNode(BinOperatorNode *node) { + if (mFlags & FLG_trace_1) std::cout << "Visiting BinOperatorNode, id=" << node->GetNodeId() << "..." << std::endl; + + BinOperatorNode *bon = static_cast(node); + OprId op = bon->GetOprId(); + switch (op) { + case OPR_Assign: + case OPR_AddAssign: + case OPR_SubAssign: + case OPR_MulAssign: + case OPR_DivAssign: + case OPR_ModAssign: + case OPR_ShlAssign: + case OPR_ShrAssign: + case OPR_BandAssign: + case OPR_BorAssign: + case OPR_BxorAssign: + case OPR_ZextAssign: { + // visit rhs first due to computation use/def order + TreeNode *rhs = bon->GetOpndB(); + (void) AstVisitor::VisitTreeNode(rhs); + + TreeNode *lhs = bon->GetOpndA(); + (void) AstVisitor::VisitTreeNode(lhs); + break; + } + default: + TreeNode *lhs = bon->GetOpndA(); + (void) AstVisitor::VisitTreeNode(lhs); + + TreeNode *rhs = bon->GetOpndB(); + (void) AstVisitor::VisitTreeNode(rhs); + break; + } + return node; +} + + +} diff --git a/src/MapleFE/astopt/src/ast_handler.cpp b/src/MapleFE/astopt/src/ast_handler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d08f7415070c5c5908d0d876d002fec026d97733 --- /dev/null +++ b/src/MapleFE/astopt/src/ast_handler.cpp @@ -0,0 +1,343 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include "ast_handler.h" +#include "ast_cfg.h" +#include "ast_info.h" +#include "ast_adj.h" +#include "ast_scp.h" +#include "ast_ti.h" +#include "ast_cfa.h" +#include "ast_dfa.h" +#include "ast_util.h" +#include "ast_xxport.h" +#include "astopt.h" +#include "typetable.h" +#include "gen_astdump.h" + +namespace maplefe { + +Module_Handler::~Module_Handler() { + mNodeId2BbMap.clear(); + mModuleFuncs.clear(); + mBbId2BbMap.clear(); + mBbIdVec.clear(); + mNodeId2Decl.clear(); + mArrayDeclId2EleTypeIdMap.clear(); + delete mCfgFunc; + delete mINFO; + delete mADJ; + delete mSCP; + delete mTI; + delete mCFA; + delete mDFA; + delete mUtil; +} + +MemPool *Module_Handler::GetMemPool() { + return mASTHandler->GetMemPool(); +} + +Module_Handler *AST_Handler::GetModuleHandler(ModuleNode *module) { + for (unsigned i = 0; i < mModuleHandlers.GetNum(); i++) { + Module_Handler *h = mModuleHandlers.ValueAtIndex(i); + if (h->GetASTModule() == module) { + return h; + } + } + return NULL; +} + +Module_Handler *AST_Handler::GetModuleHandler(TreeNode *node) { + ASTScope *scp = node->GetScope(); + while (!scp->GetTree()->IsModule()) { + scp = scp->GetParent(); + } + TreeNode *mod = scp->GetTree(); + Module_Handler *h = NULL; + if (mod && mod->IsModule()) { + h = GetModuleHandler(static_cast(mod)); + } + + return h; +} + +HandlerIndex AST_Handler::GetHandlerIndex(const char *filename) { + if (mModuleHandlerMap.find(filename) == mModuleHandlerMap.end()) + return HandlerNotFound; + return mModuleHandlerMap.at(filename); +} + +bool AST_Handler::AddModule(ModuleNode *m) { + const char *filename = m->GetFilename(); + if (mModuleHandlerMap.find(filename) != mModuleHandlerMap.end()) + return false; + mModuleHandlerMap[filename] = mSize; + + Module_Handler *handler = new(mMemPool.Alloc(sizeof(Module_Handler))) Module_Handler(mFlags); + handler->SetASTModule(m); + handler->SetIsTS(m->GetSrcLang() == SrcLangTypeScript); + handler->SetASTHandler(this); + mModuleHandlers.PushBack(handler); + mSize++; + return true; +} + +void Module_Handler::BasicAnalysis() { + // collect AST info + CollectInfo(); + + // rewirte some AST nodes + AdjustAST(); + + // scope analysis + ScopeAnalysis(); +} + +void Module_Handler::CollectInfo() { + if (!mUtil) { + mUtil = new(GetMemPool()->Alloc(sizeof(AST_Util))) AST_Util(this, mFlags); + } + if (!mINFO) { + mINFO = new(GetMemPool()->Alloc(sizeof(AST_INFO))) AST_INFO(this, mFlags); + } + mINFO->CollectInfo(); +} + +void Module_Handler::AdjustAST() { + if (!mADJ) { + mADJ = new(GetMemPool()->Alloc(sizeof(AST_ADJ))) AST_ADJ(this, mFlags); + } + mADJ->AdjustAST(); +} + +void Module_Handler::ScopeAnalysis() { + if (!mSCP) { + mSCP = new(GetMemPool()->Alloc(sizeof(AST_SCP))) AST_SCP(this, mFlags); + } + mSCP->ScopeAnalysis(); + + if (mFlags & FLG_trace_2) { + std::cout << "============== Dump Scope ==============" << std::endl; + mASTModule->GetRootScope()->Dump(0); + } +} + +void Module_Handler::TypeInference() { + if (!mTI) { + mTI = new(GetMemPool()->Alloc(sizeof(TypeInfer))) TypeInfer(this, mFlags); + } + mTI->TypeInference(); +} + +void Module_Handler::BuildCFG() { + CfgBuilder builder(this, mFlags); + builder.Build(); +} + +void Module_Handler::ControlFlowAnalysis() { + if (!mCFA) { + mCFA = new(GetMemPool()->Alloc(sizeof(AST_CFA))) AST_CFA(this, mFlags); + } + mCFA->ControlFlowAnalysis(); +} + +void Module_Handler::DataFlowAnalysis() { + if (!mDFA) { + mDFA = new(GetMemPool()->Alloc(sizeof(AST_DFA))) AST_DFA(this, mFlags); + } + mDFA->DataFlowAnalysis(); +} + +AstOpt *Module_Handler::GetAstOpt() { + return mASTHandler->GetAstOpt(); +} + +AST_XXport *Module_Handler::GetASTXXport() { + return mASTHandler->GetAstOpt()->GetASTXXport(); +} + +// input an identifire ===> returen the decl node with same name +TreeNode *Module_Handler::FindDecl(IdentifierNode *node, bool deep) { + TreeNode *decl = NULL; + if (!node) { + return decl; + } + unsigned nid = node->GetNodeId(); + + // search the existing map mNodeId2Decl first + if (mNodeId2Decl.find(nid) != mNodeId2Decl.end()) { + decl = mNodeId2Decl[nid]; + // deep search for imported decl, chase down + if (deep && decl && GetASTXXport()->IsImportedDeclId(mHidx, decl->GetNodeId())) { + decl = GetASTXXport()->GetExportedNodeFromImportedNode(mHidx, decl->GetNodeId()); + } + return decl; + } + + ASTScope *scope = node->GetScope(); + MASSERT(scope && "null scope"); + + TreeNode *tree = scope->GetTree(); + unsigned stridx = node->GetStrIdx(); + + decl = mINFO->GetField(tree->GetNodeId(), stridx); + + if (!decl) { + decl = scope->FindDeclOf(stridx); + } + + if (!decl && deep) { + decl = scope->FindExportedDeclOf(stridx); + } + + if (decl) { + AddNodeId2DeclMap(node->GetNodeId(), decl); + } + + if (deep && decl && GetASTXXport()->IsImportedDeclId(mHidx, decl->GetNodeId())) { + decl = GetASTXXport()->GetExportedNodeFromImportedNode(mHidx, decl->GetNodeId()); + } + + return decl; +} + +TreeNode *Module_Handler::FindType(IdentifierNode *node) { + if (!node) { + return NULL; + } + ASTScope *scope = node->GetScope(); + MASSERT(scope && "null scope"); + TreeNode *type = scope->FindTypeOf(node->GetStrIdx()); + + return type; +} + +// input a node ==> return the function node contains it +TreeNode *Module_Handler::FindFunc(TreeNode *node) { + ASTScope *scope = node->GetScope(); + MASSERT(scope && "null scope"); + while (scope) { + TreeNode *sn = scope->GetTree(); + if (sn->IsFunction()) { + return sn; + } + scope = scope->GetParent(); + } + return NULL; +} + +void Module_Handler::AddDirectField(TreeNode *node) { + mDirectFieldSet.insert(node->GetNodeId()); +} + +bool Module_Handler::IsDirectField(TreeNode *node) { + return mDirectFieldSet.find(node->GetNodeId()) != mDirectFieldSet.end(); +} + +// array's element typeid +TypeId Module_Handler::GetArrayElemTypeId(unsigned nid) { + TypeId tid = TY_None; + if (mArrayDeclId2EleTypeIdMap.find(nid) != mArrayDeclId2EleTypeIdMap.end()) { + tid = mArrayDeclId2EleTypeIdMap[nid]; + } + return tid; +} + +void Module_Handler::SetArrayElemTypeId(unsigned nid, TypeId tid) { + mArrayDeclId2EleTypeIdMap[nid] = tid; +} + +// array's element typeidx +unsigned Module_Handler::GetArrayElemTypeIdx(unsigned nid) { + unsigned tidx = 0; + if (mArrayDeclId2EleTypeIdxMap.find(nid) != mArrayDeclId2EleTypeIdxMap.end()) { + tidx = mArrayDeclId2EleTypeIdxMap[nid]; + } + return tidx; +} + +void Module_Handler::SetArrayElemTypeIdx(unsigned nid, unsigned tidx) { + mArrayDeclId2EleTypeIdxMap[nid] = tidx; +} + +DimensionNode *Module_Handler::GetArrayDim(unsigned nid) { + DimensionNode *dim = NULL; + if (mArrayDeclId2DimMap.find(nid) != mArrayDeclId2DimMap.end()) { + dim = mArrayDeclId2DimMap[nid]; + } + return dim; +} + +void Module_Handler::SetArrayDim(unsigned nid, DimensionNode *dim) { + mArrayDeclId2DimMap[nid] = dim; +} + +void Module_Handler::AddGeneratorUsed(unsigned nid, FunctionNode *func) { + mGeneratorUsedMap[nid] = func; +} + +bool Module_Handler::IsGeneratorUsed(unsigned nid) { + return (mGeneratorUsedMap.find(nid) != mGeneratorUsedMap.end()); +} + +FunctionNode *Module_Handler::GetGeneratorUsed(unsigned nid) { + if (mGeneratorUsedMap.find(nid) != mGeneratorUsedMap.end()) { + return mGeneratorUsedMap[nid]; + } + return NULL; +} + +bool Module_Handler::UpdateGeneratorUsed(unsigned target, unsigned src) { + if (mGeneratorUsedMap.find(src) != mGeneratorUsedMap.end()) { + mGeneratorUsedMap[target] = mGeneratorUsedMap[src]; + return true; + } + return false; +} + +bool Module_Handler::IsFromLambda(TreeNode *node) { + if (!node) { + return false; + } + unsigned nid = node->GetNodeId(); + return mINFO->IsFromLambda(nid); +} + +bool Module_Handler::IsDef(TreeNode *node) { + return mDFA->IsDef(node->GetNodeId()); +} + +bool Module_Handler::IsCppField(TreeNode *node) { + return mUtil->IsCppField(node); +} + +void Module_Handler::Dump(char *msg) { + std::cout << msg << " : " << std::endl; + CfgFunc *func = GetCfgFunc(); + func->Dump(); +} + +void Module_Handler::DumpArrayElemTypeIdMap() { + std::cout << "================= ArrayDeclId2EleTypeIdMap ==========" << std::endl; + for (auto it : mArrayDeclId2EleTypeIdMap) { + std::cout << "nodeid : " << it.first << " " + << AstDump::GetEnumTypeId(it.second) << std::endl; + } +} + +} diff --git a/src/MapleFE/astopt/src/ast_info.cpp b/src/MapleFE/astopt/src/ast_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5c8ef881ec5d39d5ac2f54f6a6cae737fbb8cb82 --- /dev/null +++ b/src/MapleFE/astopt/src/ast_info.cpp @@ -0,0 +1,977 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include "ast_handler.h" +#include "typetable.h" +#include "ast_util.h" +#include "ast_info.h" +#include "ast_xxport.h" + +namespace maplefe { + +void AST_INFO::CollectInfo() { + MSGNOLOC0("============== ASTInfo =============="); + ModuleNode *module = mHandler->GetASTModule(); + for(unsigned i = 0; i < module->GetTreesNum(); i++) { + TreeNode *it = module->GetTree(i); + it->SetParent(module); + } + + AddBuiltInTypes(); + + // collect import/export info + MSGNOLOC0("============== XXport info =============="); + mHandler->GetASTXXport()->CollectXXportInfo(mHandler->GetHidx()); + + MSGNOLOC0("============== Fill node info =============="); + FillNodeInfoVisitor visitor_node(mHandler, mFlags, true); + visitor_node.Visit(module); + + mStrIdxVisitor = new FindStrIdxVisitor(mHandler, mFlags, true); + + // collect class/interface/struct decl + mPass = 0; + MSGNOLOC0("============== Collect class/interface/struct =============="); + ClassStructVisitor visitor(mHandler, mFlags, true); + visitor.Visit(module); + + if (module->GetSrcLang() != SrcLangTypeScript) { + return; + } + + // collect type parameters + // sort fields according to the field name stridx + // it also include first visit to named types to reduce + // creation of unnecessary anonymous struct + mPass = 1; + MSGNOLOC0("============== type parameter, super fields and Sort fields =============="); + visitor.Visit(module); + + // merge class/interface/struct decl first to reduce + // introducing of unnecessary anonymous structs + mPass = 2; + MSGNOLOC0("============== merge class/interface/struct =============="); + visitor.Visit(module); + + // collect function types + FunctionVisitor func_visitor(mHandler, mFlags, true); + func_visitor.Visit(module); +} + +void AST_INFO::AddBuiltInTypes() { + unsigned size = gTypeTable.size(); + for (unsigned idx = 1; idx < size; idx++) { + TreeNode *node = gTypeTable.GetTypeFromTypeIdx(idx); + if (node->IsUserType()) { + mStrIdx2TypeIdxMap[node->GetStrIdx()] = node->GetTypeIdx(); + } + } + + // add language builtin types + TreeNode *node = NULL; + unsigned stridx = 0; +#define BUILTIN(T) \ + stridx = gStringPool.GetStrIdx(#T);\ + if (mStrIdx2TypeIdxMap.find(stridx) == mStrIdx2TypeIdxMap.end()) {\ + node = gTypeTable.CreateBuiltinType(#T, TY_Class);\ + gTypeTable.AddType(node);\ + mStrIdx2TypeIdxMap[stridx] = node->GetTypeIdx();\ + } +#include "lang_builtin.def" +} + +bool AST_INFO::IsBuiltInType(TreeNode *node) { + return mStrIdx2TypeIdxMap.find(node->GetStrIdx()) != mStrIdx2TypeIdxMap.end(); +} + +unsigned AST_INFO::GetBuiltInTypeIdx(unsigned stridx) { + if (mStrIdx2TypeIdxMap.find(stridx) != mStrIdx2TypeIdxMap.end()) { + return mStrIdx2TypeIdxMap[stridx]; + } + return 0; +} + +unsigned AST_INFO::GetBuiltInTypeIdx(TreeNode *node) { + unsigned stridx = node->GetStrIdx(); + return GetBuiltInTypeIdx(stridx); +} + +TypeId AST_INFO::GetTypeId(TreeNode *node) { + return node->GetTypeId(); +} + +unsigned AST_INFO::GetFieldsSize(TreeNode *node, bool native) { + unsigned size = 0; + unsigned nid = node->GetNodeId(); + if (!native && mStructId2FieldsMap.find(nid) != mStructId2FieldsMap.end()) { + size = mStructId2FieldsMap[nid].GetNum(); + return size; + } + switch (node->GetKind()) { + case NK_StructLiteral: + size = static_cast(node)->GetFieldsNum(); + break; + case NK_Struct: + size = static_cast(node)->GetFieldsNum(); + break; + case NK_Class: + size = static_cast(node)->GetFieldsNum(); + break; + case NK_Interface: + size = static_cast(node)->GetFieldsNum(); + break; + default: + break; + } + return size; +} + +TreeNode *AST_INFO::GetField(TreeNode *node, unsigned i, bool native) { + TreeNode *fld = NULL; + unsigned nid = node->GetNodeId(); + if (!native && mStructId2FieldsMap.find(nid) != mStructId2FieldsMap.end()) { + fld = mStructId2FieldsMap[nid].ValueAtIndex(i); + return fld; + } + switch (node->GetKind()) { + case NK_StructLiteral: + fld = static_cast(node)->GetField(i); + break; + case NK_Struct: + fld = static_cast(node)->GetField(i); + break; + case NK_Class: + fld = static_cast(node)->GetField(i); + break; + case NK_Interface: + fld = static_cast(node)->GetField(i); + break; + default: + break; + } + return fld; +} + +void AST_INFO::AddField(TreeNode *container, TreeNode *node) { + unsigned nid = container->GetNodeId(); + if (mStructId2FieldsMap.find(nid) == mStructId2FieldsMap.end()) { + for (unsigned i = 0; i < GetFieldsSize(container, true); i++) { + TreeNode *fld = GetField(container, i, true); + mStructId2FieldsMap[nid].PushBack(fld); + } + } + AddField(nid, node); +} + +void AST_INFO::AddField(unsigned nid, TreeNode *node) { + mStructId2FieldsMap[nid].PushBack(node); +} + +// get the filed in nid, including its super, with name stridx +// return NULL if not found +TreeNode *AST_INFO::GetField(unsigned nid, unsigned stridx) { + if (mStructId2FieldsMap.find(nid) != mStructId2FieldsMap.end()) { + unsigned size = mStructId2FieldsMap[nid].GetNum(); + for (unsigned i = 0; i < size; i++) { + TreeNode *fld = mStructId2FieldsMap[nid].ValueAtIndex(i); + if (fld->GetStrIdx() == stridx) { + return fld; + } + } + } + return NULL; +} + +unsigned AST_INFO::GetSuperSize(TreeNode *node, unsigned idx) { + unsigned size1 = 0; + unsigned size2 = 0; + switch (node->GetKind()) { + case NK_Struct: { + size1 = static_cast(node)->GetSupersNum(); + size2 = 0; + break; + } + case NK_Class: { + size1 = static_cast(node)->GetSuperClassesNum(); + size2 = static_cast(node)->GetSuperInterfacesNum(); + break; + } + case NK_Interface: { + size1 = static_cast(node)->GetSuperInterfacesNum(); + size2 = 0; + break; + } + default: + break; + } + return (idx == 1) ? size1 : size2; +} + +TreeNode *AST_INFO::GetSuper(TreeNode *node, unsigned i, unsigned idx) { + TreeNode *fld1 = NULL; + TreeNode *fld2 = NULL; + switch (node->GetKind()) { + case NK_Struct: + fld1 = static_cast(node)->GetSuper(i); + fld2 = NULL; + break; + case NK_Class: { + fld1 = (idx == 1) ? static_cast(node)->GetSuperClass(i) : NULL; + fld2 = (idx == 2) ? static_cast(node)->GetSuperInterface(i) : NULL; + break; + } + case NK_Interface: + fld1 = static_cast(node)->GetSuperInterfaceAtIndex(i); + fld2 = NULL; + break; + default: + break; + } + return (idx == 1) ? fld1 : fld2; +} + +bool AST_INFO::IsInterface(TreeNode *node) { + bool isI = node->IsInterface(); + if (node->IsStruct()) { + isI = isI || (static_cast(node)->GetProp() == SProp_TSInterface); + } + return isI; +} + +bool AST_INFO::IsTypeCompatible(TreeNode *node1, TreeNode *node2) { + if (node1 == node2) { + return true; + } + // only one is NULL + if ((!node1 && node2) || (node1 && !node2)) { + return false; + } + // not same kind + if (node1->GetKind() != node2->GetKind()) { + return false; + } + // at least one is prim + if (node1->IsPrimType() || node2->IsPrimType()) { + TypeId tid_field = GetTypeId(node2); + TypeId tid_target = GetTypeId(node1); + return (tid_field == tid_target); + } + bool result = false; + // same kind + NodeKind nk = node1->GetKind(); + switch (nk) { + case NK_UserType: { + UserTypeNode *ut1 = static_cast(node1); + UserTypeNode *ut2 = static_cast(node2); + result = IsTypeCompatible(ut1->GetId(), ut2->GetId()); + } + case NK_PrimType: { + PrimTypeNode *pt1 = static_cast(node1); + PrimTypeNode *pt2 = static_cast(node2); + result = (pt1->GetPrimType() == pt2->GetPrimType()); + } + case NK_PrimArrayType: { + PrimArrayTypeNode *pat1 = static_cast(node1); + PrimArrayTypeNode *pat2 = static_cast(node2); + result = IsTypeCompatible(pat1->GetPrim(), pat2->GetPrim()); + if (!result) { + break; + } + result = IsTypeCompatible(pat1->GetDims(), pat2->GetDims()); + } + case NK_Dimension: { + DimensionNode *dim1 = static_cast(node1); + DimensionNode *dim2 = static_cast(node2); + result = (dim1->GetDimensionsNum() == dim2->GetDimensionsNum()); + if (!result) { + break; + } + for (unsigned i = 0; i < dim1->GetDimensionsNum(); i++) { + result = result && (dim1->GetDimension(i) == dim2->GetDimension(i)); + } + break; + } + default: { + break; + } + } + return result; +} + +bool AST_INFO::IsTypeIdCompatibleTo(TypeId field, TypeId target) { + if (target == TY_None) { + return true; + } else if (target == TY_Number) { + return field == TY_Int || field == TY_Long || field == TY_Float || field == TY_Double; + } else { + return field == target; + } + return false; +} + +// check if "field" is compatible to "target" +bool AST_INFO::IsFieldCompatibleTo(TreeNode *field, TreeNode *target) { + if (!target->IsIdentifier()) { + return false; + } + unsigned stridx_target = target->GetStrIdx(); + IdentifierNode *id_target = static_cast(target); + + unsigned stridx_field = 0; + bool result = false; + + // is identifier + if (field->IsIdentifier()) { + stridx_field = field->GetStrIdx(); + if (stridx_field != stridx_target) { + return false; + } + IdentifierNode *id_field = static_cast(field); + TreeNode *type_target = id_target->GetType(); + TreeNode *type_field = id_field->GetType(); + result = IsTypeCompatible(type_field, type_target); + } + // field literal + else if (field->IsFieldLiteral()) { + FieldLiteralNode *fln = static_cast(field); + TreeNode *name = fln->GetFieldName(); + if (name && name->IsIdentifier()) { + stridx_field = name->GetStrIdx(); + } + if (stridx_field != stridx_target) { + return false; + } + TreeNode *lit = fln->GetLiteral(); + if (!lit->IsLiteral()) { + return false; + } + LiteralNode *ln = static_cast(lit); + TypeId tid_field = GetTypeId(ln); + TypeId tid_target = GetTypeId(id_target->GetType()); + result = IsTypeIdCompatibleTo(tid_field, tid_target); + } + + return result; +} + +TreeNode *AST_INFO::GetCanonicStructNode(TreeNode *node) { + // node with super does not map to others + if (WithSuper(node)) { + return node; + } + + unsigned size = GetFieldsSize(node); + bool isI0 = IsInterface(node); + + for (auto s: mFieldNum2StructNodeMap[size]) { + // found itself + if (node == s) { + return s; + } + + bool isI = IsInterface(s); + if (!node->IsStructLiteral()) { + // skip if one is interface but other is not + if ((isI0 && !isI) || (!isI0 && isI)) { + continue; + } + } + bool match = true;; + // check fields + for (unsigned fid = 0; fid < size; fid++) { + TreeNode *f0 = GetField(node, fid); + TreeNode *f1 = GetField(s, fid); + if (!IsFieldCompatibleTo(f0, f1)) { + match = false; + break; + } + } + + if (match) { + node->SetTypeIdx(s->GetTypeIdx()); + return s; + } + } + + // do not proceed if node contains type parameter + if (WithTypeParamFast(node)) { + return node; + } + + // not match + unsigned stridx = node->GetStrIdx(); + + // node as anonymous struct will be added to module scope + if (GetNameAnonyStruct() && stridx == 0) { + TreeNode *anony = GetAnonymousStruct(node); + if (!anony) { + return node; + } else { + node->SetTypeIdx(anony->GetTypeIdx()); + node = anony; + } + } + mFieldNum2StructNodeMap[size].insert(node); + + return node; +} + +IdentifierNode *AST_INFO::CreateIdentifierNode(unsigned stridx) { + IdentifierNode *node = mHandler->NewTreeNode(); + node->SetStrIdx(stridx); + return node; +} + +UserTypeNode *AST_INFO::CreateUserTypeNode(unsigned stridx, ASTScope *scope) { + unsigned tidx = GetBuiltInTypeIdx(stridx); + IdentifierNode *node = CreateIdentifierNode(stridx); + SetTypeId(node, TY_Class); + SetTypeIdx(node, tidx); + if (scope) { + node->SetScope(scope); + } + + UserTypeNode *utype = mHandler->NewTreeNode(); + utype->SetId(node); + utype->SetStrIdx(stridx); + SetTypeId(utype, TY_Class); + SetTypeIdx(utype, tidx); + node->SetParent(utype); + return utype; +} + +UserTypeNode *AST_INFO::CreateUserTypeNode(IdentifierNode *node) { + SetTypeId(node, TY_Class); + UserTypeNode *utype = mHandler->NewTreeNode(); + utype->SetId(node); + utype->SetStrIdx(node->GetStrIdx()); + SetTypeId(utype, TY_Class); + node->SetParent(utype); + return utype; +} + +TypeAliasNode *AST_INFO::CreateTypeAliasNode(TreeNode *to, TreeNode *from) { + UserTypeNode *utype1 = CreateUserTypeNode(from->GetStrIdx()); + UserTypeNode *utype2 = CreateUserTypeNode(to->GetStrIdx()); + + TypeAliasNode *alias = mHandler->NewTreeNode(); + alias->SetId(utype1); + alias->SetStrIdx(from->GetStrIdx()); + alias->SetAlias(utype2); + + return alias; +} + +StructNode *AST_INFO::CreateStructFromStructLiteral(StructLiteralNode *node) { + StructNode *newnode = mHandler->NewTreeNode(); + SetTypeId(newnode, TY_Class); + + for (unsigned i = 0; i < node->GetFieldsNum(); i++) { + FieldLiteralNode *fl = node->GetField(i); + TreeNode *name = fl->GetFieldName(); + if (!name || !name->IsIdentifier()) { + newnode->Release(); + return NULL; + } + + IdentifierNode *fid = CreateIdentifierNode(name->GetStrIdx()); + newnode->AddField(fid); + mHandler->AddDirectField(fid); + + TreeNode *lit = fl->GetLiteral(); + if (lit && lit->IsLiteral()) { + TypeId tid = GetTypeId(lit); + PrimTypeNode *type = mHandler->NewTreeNode(); + type->SetPrimType(tid); + fid->SetType(type); + } else { + NOTYETIMPL("StructLiteralNode literal field kind"); + } + } + return newnode; +} + +unsigned AST_INFO::GetAnonymousName() { + std::string str("AnonymousStruct__"); + str += std::to_string(mNum++); + unsigned stridx = gStringPool.GetStrIdx(str); + return stridx; +} + +TreeNode *AST_INFO::GetAnonymousStruct(TreeNode *node) { + unsigned stridx = GetAnonymousName(); + TreeNode *newnode = node; + if (newnode->IsStructLiteral()) { + StructLiteralNode *sl = static_cast(node); + newnode = CreateStructFromStructLiteral(sl); + if (!newnode) { + return NULL; + } + } + newnode->SetStrIdx(stridx); + + // for struct node, set structid + if (newnode->IsStruct()) { + StructNode *snode = static_cast(newnode); + IdentifierNode *id = snode->GetStructId(); + if (!id) { + id = CreateIdentifierNode(0); + id->SetScope(snode->GetScope()); + snode->SetStructId(id); + } + + // set stridx for structid + id->SetStrIdx(stridx); + } + + ModuleNode *module = mHandler->GetASTModule(); + gTypeTable.AddType(newnode); + module->GetScope()->AddType(newnode); + module->AddTreeFront(newnode); + return newnode; +} + +bool AST_INFO::WithStrIdx(TreeNode *node, unsigned stridx) { + mStrIdxVisitor->ResetFound(); + mStrIdxVisitor->SetStrIdx(stridx); + mStrIdxVisitor->Visit(node); + return mStrIdxVisitor->GetFound(); +} + +bool AST_INFO::WithTypeParam(TreeNode *node) { + for (auto idx: mTypeParamStrIdxSet) { + if (WithStrIdx(node, idx)) { + return true; + } + } + return false; +} + +bool AST_INFO::WithThis(TreeNode *node) { + unsigned idx = gStringPool.GetStrIdx("this"); + mStrIdxVisitor->SetCheckThis(true); + if (WithStrIdx(node, idx)) { + return true; + } + return false; +} + +bool AST_INFO::WithTypeParamFast(TreeNode *node) { + return (mWithTypeParamNodeSet.find(node->GetNodeId()) != mWithTypeParamNodeSet.end()); +} + +bool AST_INFO::WithSuper(TreeNode *node) { + unsigned supernum = 0; + switch (node->GetKind()) { + case NK_Class: + supernum = static_cast(node)->GetSuperClassesNum(); + break; + case NK_Interface: + supernum = static_cast(node)->GetSuperInterfacesNum(); + break; + default: + break; + } + return (supernum != 0); +} + +template +void AST_INFO::ExtendFields(T1 *node, TreeNode *sup) { + if (sup == NULL) { + // let sup be node itself + sup = node; + } + unsigned nid = node->GetNodeId(); + if (sup && sup->IsUserType()) { + sup = static_cast(sup)->GetId(); + } + if (sup && sup->IsIdentifier()) { + sup = GetStructFromStrIdx(sup->GetStrIdx()); + } + if (!sup) { + return; + } + for (unsigned i = 0; i < GetFieldsSize(sup, true); i++) { + TreeNode *fld = GetField(sup, i, true); + AddField(nid, fld); + } + for (unsigned i = 0; i < GetSuperSize(sup, 1); i++) { + TreeNode *s = GetSuper(sup, i, 1); + ExtendFields(node, s); + } + for (unsigned i = 0; i < GetSuperSize(sup, 2); i++) { + TreeNode *s = GetSuper(sup, i, 2); + ExtendFields(node, s); + } +} + +template +static void DumpVec(std::vector> vec) { + unsigned size = vec.size(); + std::cout << "================ Dump Vec ================" << std::endl; + for (unsigned i = 0; i < size; i++) { + std::cout << "item #" << i + << " nodeid " << vec[i].second->GetNodeId() + << " stridx " << vec[i].second->GetStrIdx() + << std::endl; + } +} + +template +void AST_INFO::SortFields(T1 *node) { + std::vector> vec; + unsigned size = GetFieldsSize(node, true); + if (size) { + for (unsigned i = 0; i < size; i++) { + T2 *fld = node->GetField(i); + unsigned stridx = fld->GetStrIdx(); + std::pair p(stridx, fld); + vec.push_back(p); + } + std::sort(vec.begin(), vec.end()); + for (unsigned i = 0; i < size; i++) { + node->SetField(i, vec[i].second); + } + } + + unsigned nid = node->GetNodeId(); + if (mStructId2FieldsMap.find(nid) != mStructId2FieldsMap.end()) { + std::vector> vec; + unsigned size = mStructId2FieldsMap[nid].GetNum(); + for (unsigned i = 0; i < size; i++) { + TreeNode *fld = mStructId2FieldsMap[nid].ValueAtIndex(i); + unsigned stridx = fld->GetStrIdx(); + std::pair p(stridx, fld); + vec.push_back(p); + } + std::sort(vec.begin(), vec.end()); + for (unsigned i = 0; i < size; i++) { + *(mStructId2FieldsMap[nid].RefAtIndex(i)) = vec[i].second; + } + } +} + +void AST_INFO::SetTypeId(TreeNode *node, TypeId tid) { + mHandler->GetUtil()->SetTypeId(node, tid); +} + +void AST_INFO::SetTypeIdx(TreeNode *node, unsigned tidx) { + mHandler->GetUtil()->SetTypeIdx(node, tidx); +} + +IdentifierNode *FillNodeInfoVisitor::VisitIdentifierNode(IdentifierNode *node) { + (void) AstVisitor::VisitIdentifierNode(node); + TreeNode *type = node->GetType(); + if (type) { + mInfo->SetTypeId(node, type->GetTypeId()); + mInfo->SetTypeIdx(node, type->GetTypeIdx()); + } + return node; +} + +FunctionNode *FillNodeInfoVisitor::VisitFunctionNode(FunctionNode *node) { + (void) AstVisitor::VisitFunctionNode(node); + TreeNode *type = node->GetRetType(); + if (type) { + mInfo->SetTypeId(node, type->GetTypeId()); + mInfo->SetTypeIdx(node, type->GetTypeIdx()); + } else if (node->IsGenerator()) { + unsigned stridx = gStringPool.GetStrIdx("Generator"); + unsigned tidx = mInfo->GetBuiltInTypeIdx(stridx); + UserTypeNode *ut = mInfo->CreateUserTypeNode(stridx); + node->SetRetType(ut); + } + return node; +} + +LiteralNode *FillNodeInfoVisitor::VisitLiteralNode(LiteralNode *node) { + (void) AstVisitor::VisitLiteralNode(node); + LitData data = node->GetData(); + LitId id = data.mType; + switch (id) { + case LT_IntegerLiteral: + mInfo->SetTypeId(node, TY_Int); + mInfo->SetTypeIdx(node, TY_Int); + break; + case LT_FPLiteral: + mInfo->SetTypeId(node, TY_Float); + mInfo->SetTypeIdx(node, TY_Float); + break; + case LT_DoubleLiteral: + mInfo->SetTypeId(node, TY_Double); + mInfo->SetTypeIdx(node, TY_Double); + break; + case LT_BooleanLiteral: + mInfo->SetTypeId(node, TY_Boolean); + mInfo->SetTypeIdx(node, TY_Boolean); + break; + case LT_CharacterLiteral: + mInfo->SetTypeId(node, TY_Char); + mInfo->SetTypeIdx(node, TY_Char); + break; + case LT_StringLiteral: + mInfo->SetTypeId(node, TY_String); + mInfo->SetTypeIdx(node, TY_String); + if (node->GetStrIdx() == 0) { + node->SetStrIdx(data.mData.mStrIdx); + } + break; + case LT_NullLiteral: + mInfo->SetTypeId(node, TY_Null); + break; + case LT_SuperLiteral: + mInfo->SetTypeId(node, TY_Object); + break; + case LT_VoidLiteral: + mInfo->SetTypeId(node, TY_Void); + break; + case LT_ThisLiteral: + break; + default: + break; + } + return node; +} + +PrimTypeNode *FillNodeInfoVisitor::VisitPrimTypeNode(PrimTypeNode *node) { + (void) AstVisitor::VisitPrimTypeNode(node); + TypeId prim = node->GetPrimType(); + bool isprim = gTypeTable.IsPrimTypeId(prim); + + if (isprim) { + mInfo->SetTypeIdx(node, prim); + } else { + TreeNode *type = gTypeTable.GetTypeFromTypeId(prim); + mInfo->SetTypeIdx(node, type->GetTypeIdx()); + } + return node; +} + +UserTypeNode *FillNodeInfoVisitor::VisitUserTypeNode(UserTypeNode *node) { + (void) AstVisitor::VisitUserTypeNode(node); + TreeNode *id = node->GetId(); + if (id) { + unsigned tidx = mInfo->GetBuiltInTypeIdx(id); + if (tidx) { + mInfo->SetTypeIdx(id, tidx); + } + if (!id->IsTypeIdNone()) { + mInfo->SetTypeId(node, id->GetTypeId()); + mInfo->SetTypeIdx(node, id->GetTypeIdx()); + } + } + return node; +} + +StructLiteralNode *ClassStructVisitor::VisitStructLiteralNode(StructLiteralNode *node) { + mInfo->SetTypeId(node, TY_Class); + (void) AstVisitor::VisitStructLiteralNode(node); + if (mInfo->GetPass() == 0) { + // field literal stridx to its ids' + for (unsigned i = 0; i < node->GetFieldsNum(); i++) { + FieldLiteralNode *fln = node->GetField(i); + TreeNode *name = fln->GetFieldName(); + if (name) { + fln->SetStrIdx(name->GetStrIdx()); + } + } + } else if (mInfo->GetPass() == 1) { + if (mInfo->WithTypeParam(node)) { + mInfo->InsertWithTypeParamNode(node); + } + // sort fields + mInfo->SortFields(node); + } else if (mInfo->GetPass() == 2) { + // create a anonymous struct for it + mInfo->SetNameAnonyStruct(true); + TreeNode *csn = mInfo->GetCanonicStructNode(node); + if (csn && csn != node) { + VisitTreeNode(csn); + } + } + return node; +} + +StructNode *ClassStructVisitor::VisitStructNode(StructNode *node) { + if (node->GetProp() != SProp_TSEnum) { + mInfo->SetTypeId(node, TY_Class); + if (node->GetStructId()) { + mInfo->SetTypeId(node->GetStructId(), TY_Class); + } + } + (void) AstVisitor::VisitStructNode(node); + if (mInfo->GetPass() == 0) { + gTypeTable.AddType(node); + IdentifierNode *id = node->GetStructId(); + if (id && node->GetStrIdx() == 0) { + node->SetStrIdx(id->GetStrIdx()); + } + if (node->GetStrIdx() == 0) { + node->SetStrIdx(mInfo->GetAnonymousName()); + } + mInfo->SetStrIdx2Struct(node->GetStrIdx(), node); + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (TreeNode *t = node->GetField(i)) { + if (!t->IsStrIndexSig()) { + mHandler->AddDirectField(t); + } + } + } + } else if (mInfo->GetPass() == 1) { + if (mInfo->WithTypeParam(node)) { + mInfo->InsertWithTypeParamNode(node); + } + // extends + mInfo->ExtendFields(node, NULL); + // sort fields + mInfo->SortFields(node); + } else if (mInfo->GetPass() == 2) { + // skip getting canonical type if not only fields + if (node->GetMethodsNum() || node->GetStrIdx() == 0) { + return node; + } + mInfo->GetCanonicStructNode(node); + } + return node; +} + +ClassNode *ClassStructVisitor::VisitClassNode(ClassNode *node) { + mInfo->SetTypeId(node, TY_Class); + (void) AstVisitor::VisitClassNode(node); + if (mInfo->GetPass() == 0) { + if (node->GetStrIdx() == 0) { + node->SetStrIdx(mInfo->GetAnonymousName()); + } + gTypeTable.AddType(node); + mInfo->SetStrIdx2Struct(node->GetStrIdx(), node); + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (TreeNode *t = node->GetField(i)) { + if (!t->IsStrIndexSig()) { + mHandler->AddDirectField(t); + } + } + } + } else if (mInfo->GetPass() == 1) { + if (mInfo->WithTypeParam(node)) { + mInfo->InsertWithTypeParamNode(node); + } + // include super class fields + mInfo->ExtendFields(node, NULL); + // sort fields + mInfo->SortFields(node); + } else if (mInfo->GetPass() == 2) { + // skip getting canonical type if not only fields + if (node->GetMethodsNum() || node->GetTypeParamsNum()) { + return node; + } + mInfo->GetCanonicStructNode(node); + } + return node; +} + +InterfaceNode *ClassStructVisitor::VisitInterfaceNode(InterfaceNode *node) { + mInfo->SetTypeId(node, TY_Class); + (void) AstVisitor::VisitInterfaceNode(node); + if (mInfo->GetPass() == 0) { + if (node->GetStrIdx() == 0) { + node->SetStrIdx(mInfo->GetAnonymousName()); + } + gTypeTable.AddType(node); + mInfo->SetStrIdx2Struct(node->GetStrIdx(), node); + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + if (TreeNode *t = node->GetField(i)) { + if (!t->IsStrIndexSig()) { + mHandler->AddDirectField(t); + } + } + } + } else if (mInfo->GetPass() == 1) { + if (mInfo->WithTypeParam(node)) { + mInfo->InsertWithTypeParamNode(node); + } + // sort fields + mInfo->SortFields(node); + } else if (mInfo->GetPass() == 2) { + // skip getting canonical type if not only fields + if (node->GetMethodsNum() || node->GetSuperInterfacesNum()) { + return node; + } + mInfo->GetCanonicStructNode(node); + } + return node; +} + +TypeParameterNode *ClassStructVisitor::VisitTypeParameterNode(TypeParameterNode *node) { + (void) AstVisitor::VisitTypeParameterNode(node); + if (mInfo->GetPass() == 0) { + TreeNode *id = node->GetId(); + if (id) { + unsigned stridx = id->GetStrIdx(); + mInfo->InsertTypeParamStrIdx(stridx); + node->SetStrIdx(stridx); + } + } + return node; +} + +FunctionNode *ClassStructVisitor::VisitFunctionNode(FunctionNode *node) { + (void) AstVisitor::VisitFunctionNode(node); + if (mInfo->GetPass() == 1) { + TreeNode *body = node->GetBody(); + if (body && mInfo->WithThis(body)) { + mInfo->InsertWithThisFunc(node); + } + } + return node; +} + +FunctionNode *FunctionVisitor::VisitFunctionNode(FunctionNode *node) { + FunctionTypeNode *functype = mHandler->NewTreeNode(); + TreeNode *n = NULL; + for (unsigned i = 0; i < node->GetParamsNum(); i++) { + n = node->GetParam(i); + functype->AddParam(n ? n->GetTypeIdx() : 0); + } + + // add return + n = node->GetRetType(); + functype->AddParam(n ? n->GetTypeIdx() : 0); + + unsigned tidx = gTypeTable.GetOrCreateFunctionTypeIdx(functype); + node->SetTypeIdx(tidx); + + return node; +} + +IdentifierNode *FindStrIdxVisitor::VisitIdentifierNode(IdentifierNode *node) { + (void) AstVisitor::VisitIdentifierNode(node); + if (node->GetStrIdx() == mStrIdx) { + mFound = true; + } + return node; +} + +LiteralNode *FindStrIdxVisitor::VisitLiteralNode(LiteralNode *node) { + (void) AstVisitor::VisitLiteralNode(node); + if (mCheckThis && node->IsThis()) { + mFound = true; + } + return node; +} + +} diff --git a/src/MapleFE/astopt/src/ast_scp.cpp b/src/MapleFE/astopt/src/ast_scp.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fe472ccd5efd3289ee290da38b51789e456a4f01 --- /dev/null +++ b/src/MapleFE/astopt/src/ast_scp.cpp @@ -0,0 +1,911 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include "stringpool.h" +#include "astopt.h" +#include "ast_cfg.h" +#include "ast_scp.h" +#include "ast_xxport.h" +#include "typetable.h" + +namespace maplefe { + +void AST_SCP::ScopeAnalysis() { + MSGNOLOC0("============== ScopeAnalysis =============="); + BuildScope(); + RenameVar(); + AdjustASTWithScope(); +} + +void AST_SCP::BuildScope() { + MSGNOLOC0("============== BuildScope =============="); + BuildScopeVisitor visitor(mHandler, mFlags, true); + + // add all module themselves to the stridx to scope map + AST_Handler *asthandler = mHandler->GetASTHandler(); + for (int i = 0; i < asthandler->GetSize(); i++) { + Module_Handler *handler = asthandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + ASTScope *scope = module->GetRootScope(); + module->SetScope(scope); + visitor.AddScopeMap(module->GetStrIdx(), scope); + } + + ModuleNode *module = mHandler->GetASTModule(); + ASTScope *scope = module->GetRootScope(); + + visitor.InitInternalTypes(); + + visitor.SetRunIt(true); + unsigned count = 0; + // run twice if necessary in case struct's scope is used before set + while (visitor.GetRunIt() && count < 2) { + visitor.SetRunIt(false); + while(!visitor.mScopeStack.empty()) { + visitor.mScopeStack.pop(); + } + visitor.mScopeStack.push(scope); + + while(!visitor.mUserScopeStack.empty()) { + visitor.mUserScopeStack.pop(); + } + visitor.mUserScopeStack.push(scope); + + visitor.Visit(module); + count++; + } + + if (mFlags & FLG_trace_3) { + gTypeTable.Dump(); + } +} + +void BuildScopeVisitor::AddDecl(ASTScope *scope, TreeNode *node) { + unsigned sid = scope->GetTree()->GetNodeId(); + if (mScope2DeclsMap[sid].find(node->GetNodeId()) == + mScope2DeclsMap[sid].end()) { + scope->AddDecl(node); + mScope2DeclsMap[sid].insert(node->GetNodeId()); + } +} + +void BuildScopeVisitor::AddImportedDecl(ASTScope *scope, TreeNode *node) { + unsigned sid = scope->GetTree()->GetNodeId(); + unsigned nid = node->GetNodeId(); + if (mScope2ImportedDeclsMap[sid].find(nid) == + mScope2ImportedDeclsMap[sid].end()) { + scope->AddImportDecl(node); + mScope2ImportedDeclsMap[sid].insert(nid); + mHandler->AddNodeId2DeclMap(nid, node); + } +} + +void BuildScopeVisitor::AddExportedDecl(ASTScope *scope, TreeNode *node) { + unsigned sid = scope->GetTree()->GetNodeId(); + unsigned nid = node->GetNodeId(); + if (mScope2ExportedDeclsMap[sid].find(nid) == + mScope2ExportedDeclsMap[sid].end()) { + scope->AddExportDecl(node); + mScope2ExportedDeclsMap[sid].insert(nid); + } +} + +void BuildScopeVisitor::AddType(ASTScope *scope, TreeNode *node) { + unsigned sid = scope->GetTree()->GetNodeId(); + if (mScope2TypesMap[sid].find(node->GetNodeId()) == + mScope2TypesMap[sid].end()) { + scope->AddType(node); + gTypeTable.AddType(node); + mScope2TypesMap[sid].insert(node->GetNodeId()); + } +} + +void BuildScopeVisitor::AddTypeAndDecl(ASTScope *scope, TreeNode *node) { + AddType(scope, node); + AddDecl(scope, node); +} + +void BuildScopeVisitor::InitInternalTypes() { + // add primitive and builtin types to root scope + ModuleNode *module = mHandler->GetASTModule(); + ASTScope *scope = module->GetRootScope(); + for (unsigned i = 1; i < gTypeTable.GetPreBuildSize(); i++) { + TreeNode *node = gTypeTable.GetTypeFromTypeIdx(i); + node->SetScope(scope); + if (node->IsUserType()) { + UserTypeNode *ut = static_cast(node); + TreeNode *id = ut->GetId(); + id->SetScope(scope); + AddType(scope, ut); + // id as a decl + AddDecl(scope, id); + } else { + AddType(scope, node); + } + } + + // add dummpy console.log() + unsigned size = gStringPool.GetSize(); + unsigned stridx = gStringPool.GetStrIdx("console"); + TreeNode *type = gTypeTable.GetTypeFromStrIdx(stridx); + if (!type) { + ClassNode *console = AddClass(stridx); + ASTScope *scp = NewScope(scope, console); + mStrIdx2ScopeMap[console->GetStrIdx()] = scp; + FunctionNode *log = AddFunction("log"); + log->SetTypeIdx(TY_Void); + console->AddMethod(log); + log->SetScope(scp); + AddDecl(scp, log); + } +} + +ClassNode *BuildScopeVisitor::AddClass(unsigned stridx, unsigned tyidx) { + ClassNode *node = mHandler->NewTreeNode(); + node->SetStrIdx(stridx); + node->SetTypeIdx(tyidx); + + ModuleNode *module = mHandler->GetASTModule(); + ASTScope *scope = module->GetRootScope(); + AddTypeAndDecl(scope, node); + return node; +} + +FunctionNode *BuildScopeVisitor::AddFunction(std::string name) { + FunctionNode *func = mHandler->NewTreeNode(); + unsigned idx = gStringPool.GetStrIdx(name); + func->SetStrIdx(idx); + + IdentifierNode *id = mHandler->NewTreeNode(); + id->SetStrIdx(idx); + func->SetFuncName(id); + + // add func to module scope + ModuleNode *module = mHandler->GetASTModule(); + ASTScope *scope = module->GetRootScope(); + AddDecl(scope, func); + id->SetScope(scope); + return func; +} + +ASTScope *BuildScopeVisitor::NewScope(ASTScope *parent, TreeNode *node) { + MASSERT(parent && "parent scope NULL"); + ASTScope *scope = node->GetScope(); + if (!scope || (scope && scope->GetTree() != node)) { + scope = mASTModule->NewScope(parent, node); + } + return scope; +} + +BlockNode *BuildScopeVisitor::VisitBlockNode(BlockNode *node) { + ASTScope *parent = mScopeStack.top(); + ASTScope *scope = NewScope(parent, node); + mScopeStack.push(scope); + BuildScopeBaseVisitor::VisitBlockNode(node); + mScopeStack.pop(); + return node; +} + +FunctionNode *BuildScopeVisitor::VisitFunctionNode(FunctionNode *node) { + ASTScope *parent = mScopeStack.top(); + // function is a decl + AddDecl(parent, node); + ASTScope *scope = NewScope(parent, node); + mScopeStack.push(scope); + mUserScopeStack.push(scope); + + // add parameters as decl + for(unsigned i = 0; i < node->GetParamsNum(); i++) { + TreeNode *it = node->GetParam(i); + AddDecl(scope, it); + + // added extra this is the parent with typeid TY_Class + if (it->GetStrIdx() == gStringPool.GetStrIdx("this") && it->GetTypeIdx() == 0) { + ASTScope *scp = scope; + while (scp && scp->GetTree()->GetTypeId() != TY_Class) { + scp = scp->GetParent(); + } + if (scp) { + it->SetTypeId(TY_Object); + it->SetTypeIdx(scp->GetTree()->GetTypeIdx()); + } + } + } + + for(unsigned i = 0; i < node->GetTypeParamsNum(); i++) { + TreeNode *it = node->GetTypeParamAtIndex(i); + + // add type parameter as decl + if (it->IsTypeParameter()) { + VisitTreeNode(it); + continue; + } + + // add it to scope's mTypes only if it is a new type + TreeNode *tn = it; + if (it->IsUserType()) { + UserTypeNode *ut = static_cast(it); + tn = ut->GetId(); + } + TreeNode *decl = NULL; + if (tn->IsIdentifier()) { + IdentifierNode *id = static_cast(tn); + // check if it is a known type + decl = scope->FindTypeOf(id->GetStrIdx()); + } + // add it if not found + if (!decl) { + AddType(scope, it); + } + } + BuildScopeBaseVisitor::VisitFunctionNode(node); + mUserScopeStack.pop(); + mScopeStack.pop(); + return node; +} + +LambdaNode *BuildScopeVisitor::VisitLambdaNode(LambdaNode *node) { + ASTScope *parent = mScopeStack.top(); + ASTScope *scope = NewScope(parent, node); + + // add parameters as decl + for(unsigned i = 0; i < node->GetParamsNum(); i++) { + TreeNode *it = node->GetParam(i); + AddDecl(scope, it); + } + mScopeStack.push(scope); + mUserScopeStack.push(scope); + BuildScopeBaseVisitor::VisitLambdaNode(node); + mUserScopeStack.pop(); + mScopeStack.pop(); + return node; +} + +ClassNode *BuildScopeVisitor::VisitClassNode(ClassNode *node) { + ASTScope *parent = mScopeStack.top(); + // inner class is a decl + if (parent) { + AddDecl(parent, node); + AddType(parent, node); + } + + ASTScope *scope = NewScope(parent, node); + if (node->GetStrIdx()) { + mStrIdx2ScopeMap[node->GetStrIdx()] = scope; + } + + // add fields as decl + for(unsigned i = 0; i < node->GetFieldsNum(); i++) { + TreeNode *fld = node->GetField(i); + if (fld->IsStrIndexSig()) { + StrIndexSigNode *sis = static_cast(fld); + TreeNode *key = sis->GetKey(); + if (key) { + AddDecl(scope, key); + } + } else if (fld->IsIdentifier()) { + AddDecl(scope, fld); + } else { + NOTYETIMPL("new type of class field"); + } + } + mScopeStack.push(scope); + BuildScopeBaseVisitor::VisitClassNode(node); + mScopeStack.pop(); + return node; +} + +InterfaceNode *BuildScopeVisitor::VisitInterfaceNode(InterfaceNode *node) { + ASTScope *parent = mScopeStack.top(); + // inner interface is a decl + if (parent) { + AddDecl(parent, node); + AddType(parent, node); + } + + ASTScope *scope = NewScope(parent, node); + if (node->GetStrIdx()) { + mStrIdx2ScopeMap[node->GetStrIdx()] = scope; + } + + // add fields as decl + for(unsigned i = 0; i < node->GetFieldsNum(); i++) { + TreeNode *fld = node->GetField(i); + if (fld->IsStrIndexSig()) { + continue; + } + if (fld->IsIdentifier()) { + AddDecl(scope, fld); + } + } + mScopeStack.push(scope); + BuildScopeBaseVisitor::VisitInterfaceNode(node); + mScopeStack.pop(); + return node; +} + +StructNode *BuildScopeVisitor::VisitStructNode(StructNode *node) { + ASTScope *parent = mScopeStack.top(); + // struct is a decl + if (parent) { + AddDecl(parent, node); + AddType(parent, node); + } + + ASTScope *scope = NewScope(parent, node); + mScopeStack.push(scope); + if (node->GetStructId() && node->GetStructId()->GetStrIdx()) { + mStrIdx2ScopeMap[node->GetStructId()->GetStrIdx()] = scope; + } + + // add fields as decl + for(unsigned i = 0; i < node->GetFieldsNum(); i++) { + TreeNode *fld = node->GetField(i); + if (fld->IsStrIndexSig()) { + continue; + } + if (fld && fld->IsIdentifier()) { + AddDecl(scope, fld); + } + } + + // add type parameter as decl + for(unsigned i = 0; i < node->GetTypeParamsNum(); i++) { + VisitTreeNode(node->GetTypeParamAtIndex(i)); + continue; + } + + // add string indexed field as decl + StrIndexSigNode *sig = node->GetStrIndexSig(); + if (sig) { + TreeNode *fld = sig->GetKey(); + sig->SetStrIdx(fld->GetStrIdx()); + if (fld && fld->IsIdentifier()) { + AddDecl(scope, fld); + } + } + + BuildScopeBaseVisitor::VisitStructNode(node); + mScopeStack.pop(); + return node; +} + +StructLiteralNode *BuildScopeVisitor::VisitStructLiteralNode(StructLiteralNode *node) { + ASTScope *parent = mScopeStack.top(); + + ASTScope *scope = NewScope(parent, node); + + // add fields as decl + for(unsigned i = 0; i < node->GetFieldsNum(); i++) { + FieldLiteralNode *fld = node->GetField(i); + TreeNode *name = fld->GetFieldName(); + if (name && name->IsIdentifier()) { + AddDecl(scope, name); + } + } + mScopeStack.push(scope); + BuildScopeBaseVisitor::VisitStructLiteralNode(node); + mScopeStack.pop(); + return node; +} + +NamespaceNode *BuildScopeVisitor::VisitNamespaceNode(NamespaceNode *node) { + node->SetTypeId(TY_Namespace); + TreeNode *id = node->GetId(); + unsigned stridx = 0; + if (id && id->GetStrIdx()) { + stridx = id->GetStrIdx(); + node->SetStrIdx(stridx); + } + + ASTScope *parent = mScopeStack.top(); + // inner namespace is a decl + if (parent) { + AddTypeAndDecl(parent, node); + } + + ASTScope *scope = NewScope(parent, node); + if (stridx != 0) { + mStrIdx2ScopeMap[stridx] = scope; + } + + mScopeStack.push(scope); + mUserScopeStack.push(scope); + BuildScopeBaseVisitor::VisitNamespaceNode(node); + mUserScopeStack.pop(); + mScopeStack.pop(); + return node; +} + +DeclNode *BuildScopeVisitor::VisitDeclNode(DeclNode *node) { + BuildScopeBaseVisitor::VisitDeclNode(node); + ASTScope *scope = NULL; + bool deep = true; + if (node->GetProp() == JS_Var) { + // promote to use function or module scope + scope = mUserScopeStack.top(); + + // update scope + node->SetScope(scope); + if (node->GetVar()) { + node->GetVar()->SetScope(scope); + } + } else { + scope = mScopeStack.top(); + // for body of function use function scope instead of body scope + TreeNode *b = node->GetParent(); + if (b && b->IsBlock()) { + TreeNode *f = b->GetParent(); + if (f && f->IsFunction()) { + scope = mUserScopeStack.top(); + } + } + // restrict to current scope + deep = false; + } + // check if it is already a decl in the scope + unsigned stridx = node->GetStrIdx(); + TreeNode *decl = scope->FindDeclOf(stridx, deep); + if (decl) { + if (decl != node) { + // replace with an assignment if apply + if (node->GetInit()) { + BinOperatorNode *bop = mHandler->NewTreeNode(); + bop->SetOprId(OPR_Assign); + IdentifierNode *id = mHandler->NewTreeNode(); + id->SetStrIdx(stridx); + id->SetScope(scope); + + bop->SetOpndA(id); + bop->SetOpndB(node->GetInit()); + node = (DeclNode *)bop; + } else { + node = NULL; + } + } + } else { + AddDecl(scope, node); + } + return node; +} + +UserTypeNode *BuildScopeVisitor::VisitUserTypeNode(UserTypeNode *node) { + BuildScopeBaseVisitor::VisitUserTypeNode(node); + ASTScope *scope = mScopeStack.top(); + TreeNode *p = node->GetParent(); + if (p) { + if (p->IsFunction()) { + // exclude function return type + FunctionNode *f = static_cast(p); + if (f->GetRetType() == node) { + return node; + } + } else if (p->IsTypeAlias()) { + // handled by typealias node + return node; + } + + if (p->IsScope()) { + // normal type decl + // check if it is already in typetable + TreeNode *id = node->GetId(); + if (id) { + TreeNode *decl = scope->FindTypeOf(id->GetStrIdx()); + if (!decl) { + AddType(scope, node); + } + } + } + } + return node; +} + +TypeAliasNode *BuildScopeVisitor::VisitTypeAliasNode(TypeAliasNode *node) { + ASTScope *scope = mScopeStack.top(); + BuildScopeBaseVisitor::VisitTypeAliasNode(node); + TreeNode *ut = node->GetId(); + if (ut->IsUserType()) { + TreeNode *id = static_cast(ut)->GetId(); + AddDecl(scope, id); + + // add to Alias type + mHandler->AddAliasType(id->GetNodeId()); + } + return node; +} + +ForLoopNode *BuildScopeVisitor::VisitForLoopNode(ForLoopNode *node) { + ASTScope *parent = mScopeStack.top(); + ASTScope *scope = parent; + if (node->GetProp() == FLP_JSIn) { + scope = NewScope(parent, node); + TreeNode *var = node->GetVariable(); + if (var) { + if (var->IsDecl()) { + AddDecl(scope, var); + } else { + NOTYETIMPL("VisitForLoopNode() FLP_JSIn var not decl"); + } + } + mScopeStack.push(scope); + } + + BuildScopeBaseVisitor::VisitForLoopNode(node); + + if (scope != parent) { + mScopeStack.pop(); + } + return node; +} + +FieldNode *BuildScopeVisitor::VisitFieldNode(FieldNode *node) { + BuildScopeBaseVisitor::VisitFieldNode(node); + + TreeNode *upper = node->GetUpper(); + TreeNode *field = node->GetField(); + + if (upper && upper->GetStrIdx()) { + ASTScope *scope = mStrIdx2ScopeMap[upper->GetStrIdx()]; + if (!scope && upper->IsIdentifier()) { + TreeNode *decl = mHandler->FindDecl(static_cast(upper)); + if (decl && decl->IsDecl()) { + decl = static_cast(decl)->GetVar(); + } + if (decl && decl->IsIdentifier()) { + IdentifierNode *id = static_cast(decl); + TreeNode *type = id->GetType(); + if (type && type->IsUserType()) { + TreeNode *id = static_cast(type)->GetId(); + if (id) { + scope = mStrIdx2ScopeMap[id->GetStrIdx()]; + } + } else if (id->GetParent() && id->GetParent()->IsXXportAsPair()) { + scope = id->GetScope(); + } + } + } + if (scope) { + mScopeStack.push(scope); + BuildScopeBaseVisitor::Visit(field); + mScopeStack.pop(); + } else { + mRunIt = true; + } + } + + return node; +} + +TypeParameterNode *BuildScopeVisitor::VisitTypeParameterNode(TypeParameterNode *node) { + BuildScopeBaseVisitor::VisitTypeParameterNode(node); + TreeNode *id = node->GetId(); + if (id && id->IsIdentifier()) { + ASTScope *scope = mScopeStack.top(); + id->SetScope(scope); + AddDecl(scope, id); + } + return node; +} + +ImportNode *BuildScopeVisitor::VisitImportNode(ImportNode *node) { + (void) AstVisitor::VisitImportNode(node); + ASTScope *scope = mScopeStack.top(); + + Module_Handler *targetHandler = NULL; + TreeNode *target = node->GetTarget(); + if (target) { + unsigned hstridx = target->GetStrIdx(); + unsigned hidx = mXXport->GetHandleIdxFromStrIdx(hstridx); + targetHandler = mHandler->GetASTHandler()->GetModuleHandler(hidx); + } + + for (unsigned i = 0; i < node->GetPairsNum(); i++) { + XXportAsPairNode *p = node->GetPair(i); + TreeNode *bfnode = p->GetBefore(); + TreeNode *afnode = p->GetAfter(); + if (p->IsDefault()) { + if (bfnode) { + bfnode->SetScope(scope); + AddImportedDecl(scope, bfnode); + } + } else if (p->IsEverything()) { + if (bfnode) { + AddImportedDecl(scope, bfnode); + // imported scope + scope = targetHandler->GetASTModule()->GetScope(); + bfnode->SetScope(scope); + } + } else if (afnode) { + afnode->SetScope(scope); + AddImportedDecl(scope, afnode); + + if (bfnode && targetHandler) { + ModuleNode *mod = targetHandler->GetASTModule(); + ASTScope *modscp = mod->GetScope(); + bfnode->SetScope(modscp); + } + } else if (bfnode) { + bfnode->SetScope(scope); + AddImportedDecl(scope, bfnode); + } + } + + return node; +} + +ExportNode *BuildScopeVisitor::VisitExportNode(ExportNode *node) { + (void) AstVisitor::VisitExportNode(node); + ASTScope *scope = mScopeStack.top(); + + Module_Handler *targetHandler = NULL; + TreeNode *target = node->GetTarget(); + // re-export + if (target) { + unsigned hstridx = target->GetStrIdx(); + unsigned hidx = mXXport->GetHandleIdxFromStrIdx(hstridx); + targetHandler = mHandler->GetASTHandler()->GetModuleHandler(hidx); + } + + for (unsigned i = 0; i < node->GetPairsNum(); i++) { + XXportAsPairNode *p = node->GetPair(i); + TreeNode *bfnode = p->GetBefore(); + TreeNode *afnode = p->GetAfter(); + if (targetHandler) { + ModuleNode *mod = targetHandler->GetASTModule(); + ASTScope *modscp = mod->GetScope(); + + if (bfnode) { + bfnode->SetScope(modscp); + // reexported bfnode is treated as a decl, add directly into map + if (bfnode->IsIdentifier()) { + mHandler->AddNodeId2DeclMap(bfnode->GetNodeId(), bfnode); + } + } else if (!afnode) { + // reexport everything + for (unsigned j = 0; j < modscp->GetExportedDeclNum(); j++) { + AddExportedDecl(scope, modscp->GetExportedDecl(j)); + } + } + } else { + if (!p->IsDefault() && bfnode && !afnode) { + AddExportedDecl(scope, bfnode); + } + } + + if (afnode && afnode->IsIdentifier()) { + // exported afnode is treated as a decl, add directly into map + mHandler->AddNodeId2DeclMap(afnode->GetNodeId(), afnode); + AddExportedDecl(scope, afnode); + } + } + + return node; +} + +TryNode *BuildScopeVisitor::VisitTryNode(TryNode *node) { + ASTScope *parent = mScopeStack.top(); + ASTScope *scope = NewScope(parent, node); + mScopeStack.push(scope); + BuildScopeBaseVisitor::VisitTryNode(node); + mScopeStack.pop(); + return node; +} + +CatchNode *BuildScopeVisitor::VisitCatchNode(CatchNode *node) { + ASTScope *parent = mScopeStack.top(); + ASTScope *scope = NewScope(parent, node); + + // add params as decl + for (unsigned i = 0; i < node->GetParamsNum(); i++) { + TreeNode *n = node->GetParamAtIndex(i); + AddDecl(scope, n); + } + + mScopeStack.push(scope); + BuildScopeBaseVisitor::VisitCatchNode(node); + mScopeStack.pop(); + return node; +} + +FinallyNode *BuildScopeVisitor::VisitFinallyNode(FinallyNode *node) { + ASTScope *parent = mScopeStack.top(); + ASTScope *scope = NewScope(parent, node); + mScopeStack.push(scope); + BuildScopeBaseVisitor::VisitFinallyNode(node); + mScopeStack.pop(); + return node; +} + +// rename var with same name, i --> i__vN where N is 1, 2, 3 ... +void AST_SCP::RenameVar() { + MSGNOLOC0("============== RenameVar =============="); + RenameVarVisitor visitor(mHandler, mFlags, true); + ModuleNode *module = mHandler->GetASTModule(); + visitor.mPass = 0; + visitor.Visit(module); + + visitor.mPass = 1; + for (auto it: visitor.mStridx2DeclIdMap) { + unsigned stridx = it.first; + unsigned size = it.second.size(); + if (size > 1) { + const char *name = gStringPool.GetStringFromStrIdx(stridx); + + if (mFlags & FLG_trace_3) { + std::cout << "\nstridx: " << stridx << " " << name << std::endl; + std::cout << "decl nid : "; + for (auto i : visitor.mStridx2DeclIdMap[stridx]) { + std::cout << " " << i; + } + std::cout << std::endl; + } + + // variable renaming is in reverse order starting from smaller scope variables + std::deque::reverse_iterator rit = visitor.mStridx2DeclIdMap[stridx].rbegin(); + size--; + for (; size && rit!= visitor.mStridx2DeclIdMap[stridx].rend(); --size, ++rit) { + unsigned nid = *rit; + std::string str(name); + str += "__v"; + str += std::to_string(size); + visitor.mOldStrIdx = stridx; + visitor.mNewStrIdx = gStringPool.GetStrIdx(str); + gStringPool.AddAltStrIdx(visitor.mNewStrIdx); + TreeNode *tn = mHandler->GetAstOpt()->GetNodeFromNodeId(nid); + ASTScope *scope = tn->GetScope(); + tn = scope->GetTree(); + if (mFlags & FLG_trace_3) { + std::cout << "\nupdate name : " + << gStringPool.GetStringFromStrIdx(visitor.mOldStrIdx) + << " --> " + << gStringPool.GetStringFromStrIdx(visitor.mNewStrIdx) + << std::endl; + } + visitor.Visit(tn); + } + } + } +} + +// fields are not renamed +bool RenameVarVisitor::SkipRename(IdentifierNode *node) { + TreeNode *parent = node->GetParent(); + if (parent) { + switch (parent->GetKind()) { + case NK_Struct: + case NK_Class: + case NK_Interface: + return true; + case NK_Field: { + FieldNode *f = static_cast(parent); + // skip for mField + return node == f->GetField();; + } + default: + return false; + } + } + return true; +} + +// check if node is of same name as a parameter of func +bool RenameVarVisitor::IsFuncArg(FunctionNode *func, IdentifierNode *node) { + for (unsigned i = 0; i < func->GetParamsNum(); i++) { + if (func->GetParam(i)->GetStrIdx() == node->GetStrIdx()) { + return true; + } + } + return false; +} + +// insert decl in lattice order of scopes hierachy to ensure proper name versions. +// +// the entries of node id in mStridx2DeclIdMap are inserted to a list from larger scopes to smaller scopes +// later variable renaming is performed in reverse from bottom up of AST within the scope of the variable +// +void RenameVarVisitor::InsertToStridx2DeclIdMap(unsigned stridx, IdentifierNode *node) { + ASTScope *s0 = node->GetScope(); + std::deque::iterator it; + unsigned i = 0; + for (it = mStridx2DeclIdMap[stridx].begin(); it != mStridx2DeclIdMap[stridx].end(); ++it) { + i = *it; + TreeNode *node1 = mHandler->GetAstOpt()->GetNodeFromNodeId(i); + ASTScope *s1 = node1->GetScope(); + // decl at same scope already exist + if (s1 == s0) { + return; + } + // do not insert node after node1 if node's scope s0 is an ancestor of node1's scope s1 + if (s1->IsAncestor(s0)) { + mStridx2DeclIdMap[stridx].insert(it, node->GetNodeId()); + return; + } + } + mStridx2DeclIdMap[stridx].push_back(node->GetNodeId()); +} + +IdentifierNode *RenameVarVisitor::VisitIdentifierNode(IdentifierNode *node) { + AstVisitor::VisitIdentifierNode(node); + // fields are not renamed + if (SkipRename(node)) { + return node; + } + if (mPass == 0) { + unsigned stridx = node->GetStrIdx(); + if (stridx) { + TreeNode *parent = node->GetParent(); + if (parent) { + // insert in order according to scopes hierachy to ensure proper name version + if ((parent->IsDecl() && parent->GetStrIdx() == stridx)) { + // decl + InsertToStridx2DeclIdMap(stridx, node); + } else if (parent->IsFunction() && IsFuncArg(static_cast(parent), node)) { + // func parameters + InsertToStridx2DeclIdMap(stridx, node); + } + } + } else { + NOTYETIMPL("Unexpected - decl without name stridx"); + } + } else if (mPass == 1) { + if (node->GetStrIdx() == mOldStrIdx) { + node->SetStrIdx(mNewStrIdx); + MSGNOLOC0(" name updated"); + TreeNode *parent = node->GetParent(); + if (parent && parent->IsDecl()) { + parent->SetStrIdx(mNewStrIdx); + } + } + } + return node; +} + +void AST_SCP::AdjustASTWithScope() { + MSGNOLOC0("============== AdjustASTWithScope =============="); + AdjustASTWithScopeVisitor visitor(mHandler, mFlags, true); + ModuleNode *module = mHandler->GetASTModule(); + visitor.Visit(module); +} + +IdentifierNode *AdjustASTWithScopeVisitor::VisitIdentifierNode(IdentifierNode *node) { + TreeNode *decl = mHandler->FindDecl(node); + if (!decl) { + LitData data; + bool change = false; + // handle literals true false + unsigned stridx = node->GetStrIdx(); + if (stridx == gStringPool.GetStrIdx("true")) { + data.mType = LT_BooleanLiteral; + data.mData.mBool = true; + change = true; + } else if (stridx == gStringPool.GetStrIdx("false")) { + data.mType = LT_BooleanLiteral; + data.mData.mBool = false; + change = true; + } + + if (change) { + LiteralNode *lit = mHandler->NewTreeNode(); + lit->SetData(data); + return (IdentifierNode*)(lit); + } + } + return node; +} + +} diff --git a/src/MapleFE/astopt/src/ast_ti.cpp b/src/MapleFE/astopt/src/ast_ti.cpp new file mode 100644 index 0000000000000000000000000000000000000000..99439b59de694b757e30393cd8db4d3a3456d115 --- /dev/null +++ b/src/MapleFE/astopt/src/ast_ti.cpp @@ -0,0 +1,2042 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include "ast_handler.h" +#include "ast_info.h" +#include "ast_util.h" +#include "ast_xxport.h" +#include "ast_ti.h" +#include "typetable.h" +#include "gen_astdump.h" + +#define ITERATEMAX 10 + +namespace maplefe { + +void TypeInfer::TypeInference() { + ModuleNode *module = mHandler->GetASTModule(); + + if (mFlags & FLG_trace_3) { + gStringPool.Dump(); + gTypeTable.Dump(); + } + + // build mNodeId2Decl + MSGNOLOC0("============== Build NodeId2Decl =============="); + BuildIdNodeToDeclVisitor visitor_build(mHandler, mFlags, true); + visitor_build.Visit(module); + + // type inference + MSGNOLOC0("============== TypeInfer =============="); + TypeInferVisitor visitor_ti(mHandler, mFlags, true); + visitor_ti.SetUpdated(true); + int count = 0; + while (visitor_ti.GetUpdated() && count++ <= ITERATEMAX) { + MSGNOLOC("\n TypeInference iterate ", count); + visitor_ti.SetUpdated(false); + visitor_ti.Visit(module); + } + + // build mDirectFieldSet + MSGNOLOC0("============== Build DirectFieldSet =============="); + BuildIdDirectFieldVisitor visitor_field(mHandler, mFlags, true); + visitor_field.Visit(module); + if (mFlags & FLG_trace_3) { + visitor_field.Dump(); + } + + if (mFlags & FLG_trace_3) std::cout << "\n>>>>>> TypeInference() iterated " << count << " times\n" << std::endl; + + // share UserType + MSGNOLOC0("============== Share UserType =============="); + ShareUTVisitor visitor_ut(mHandler, mFlags, true); + visitor_ut.Push(module->GetRootScope()); + visitor_ut.Visit(module); + + // Check Type + MSGNOLOC0("============== Check Type =============="); + CheckTypeVisitor visitor_check(mHandler, mFlags, true); + visitor_check.Visit(module); + + if (mFlags & FLG_trace_3) { + mHandler->DumpArrayElemTypeIdMap(); + } +} + +// build up mNodeId2Decl by visiting each Identifier +IdentifierNode *BuildIdNodeToDeclVisitor::VisitIdentifierNode(IdentifierNode *node) { + if (mHandler->GetAstOpt()->IsLangKeyword(node)) { + return node; + } + (void) AstVisitor::VisitIdentifierNode(node); + // mHandler->FindDecl() will use/add entries to mNodeId2Decl + TreeNode *decl = mHandler->FindDecl(node); + if (decl && decl != node) { + mHandler->GetUtil()->SetTypeId(node, decl->GetTypeId()); + mHandler->GetUtil()->SetTypeIdx(node, decl->GetTypeIdx()); + } + TreeNode *type = node->GetType(); + if (type && type->IsPrimType()) { + PrimTypeNode *ptn = static_cast(type); + TypeId tid = ptn->GetPrimType(); + if (gTypeTable.IsPrimTypeId(tid)) { + // mHandler->GetUtil()->SetTypeId(node, tid); + mHandler->GetUtil()->SetTypeIdx(node, tid); + } + } + return node; +} + +FieldNode *BuildIdDirectFieldVisitor::VisitFieldNode(FieldNode *node) { + (void) AstVisitor::VisitFieldNode(node); + IdentifierNode *field = static_cast(node->GetField()); + TreeNode *decl = NULL; + decl = mHandler->FindDecl(field); + if (decl) { + mHandler->AddDirectField(field); + mHandler->AddDirectField(node); + } + return node; +} + +TreeNode *BuildIdDirectFieldVisitor::GetParentVarClass(TreeNode *node) { + TreeNode *n = node; + while (n && !n->IsModule()) { + unsigned tyidx = 0; + if (n->IsDecl()) { + tyidx = n->GetTypeIdx(); + } else if (n->IsBinOperator()) { + tyidx = n->GetTypeIdx(); + } + if (tyidx) { + return gTypeTable.GetTypeFromTypeIdx(tyidx); + } + n = n->GetParent(); + } + return NULL; +} + +FieldLiteralNode *BuildIdDirectFieldVisitor::VisitFieldLiteralNode(FieldLiteralNode *node) { + (void) AstVisitor::VisitFieldLiteralNode(node); + TreeNode *name = node->GetFieldName(); + IdentifierNode *field = static_cast(name); + TreeNode *decl = mHandler->FindDecl(field); + TreeNode *vtype = GetParentVarClass(decl); + if (vtype && !mHandler->GetINFO()->IsBuiltInType(vtype)) { + // check if decl is a field of vtype + // note: vtype could be in different module + Module_Handler *h = mHandler->GetModuleHandler(vtype); + TreeNode *fld = h->GetINFO()->GetField(vtype->GetNodeId(), decl->GetStrIdx()); + if (fld) { + mHandler->AddDirectField(field); + mHandler->AddDirectField(node); + } + } + return node; +} + +ArrayElementNode *BuildIdDirectFieldVisitor::VisitArrayElementNode(ArrayElementNode *node) { + (void) AstVisitor::VisitArrayElementNode(node); + TreeNode *array = node->GetArray(); + if (!array || !array->IsIdentifier()) { + return node; + } + + TreeNode *decl = mHandler->FindDecl(static_cast(array)); + if (decl && decl->IsTypeIdClass()) { + // indexed access of types + TreeNode *exp = node->GetExprAtIndex(0); + unsigned stridx = exp->GetStrIdx(); + if (exp->IsLiteral() && exp->IsTypeIdString()) { + stridx = (static_cast(exp))->GetData().mData.mStrIdx; + } + if (decl->IsDecl()) { + TreeNode *var = static_cast(decl)->GetVar(); + TreeNode * type = static_cast(var)->GetType(); + if (type && type->IsUserType()) { + UserTypeNode *ut = static_cast(type); + decl = mHandler->FindDecl(static_cast(ut->GetId())); + } + } + if (decl->IsStruct() || decl->IsClass()) { + for (int i = 0; i < mHandler->GetINFO()->GetFieldsSize(decl); i++) { + TreeNode *f = mHandler->GetINFO()->GetField(decl, i); + if (f->GetStrIdx() == stridx) { + mHandler->AddDirectField(exp); + mHandler->AddDirectField(node); + return node; + } + } + } + } + return node; +} + +void BuildIdDirectFieldVisitor::Dump() { + MSGNOLOC0("============== Direct Field NodeIds =============="); + for (auto i: mHandler->mDirectFieldSet) { + std::cout << " " << i; + } + std::cout << std::endl; +} + +#undef TYPE +#undef PRIMTYPE +#define TYPE(T) +#define PRIMTYPE(T) case TY_##T: +bool TypeInferVisitor::IsPrimTypeId(TypeId tid) { + bool result = false; + switch (tid) { +#include "supported_types.def" + result = true; + break; + default: + break; + } + return result; +} + +TypeId TypeInferVisitor::MergeTypeId(TypeId tia, TypeId tib) { + if (tia == tib || tib == TY_None) { + return tia; + } + + if (tib == TY_Object || tib == TY_User) { + return tib; + } + + if ((tia == TY_Function && tib == TY_Class) || (tib == TY_Function && tia == TY_Class)) { + return TY_None; + } + + // tia != tib && tib != TY_None + TypeId result = TY_None; + switch (tia) { + case TY_None: result = tib; break; + + case TY_Class: + case TY_Object: + case TY_User: result = tia; break; + + case TY_Merge: + case TY_Undefined: + case TY_String: + case TY_Function: + case TY_Array: result = TY_Merge; break; + + case TY_Number: { + switch (tib) { + case TY_Int: + case TY_Long: + case TY_Float: + case TY_Double: result = tib; break; + default: result = TY_Merge; break; + } + break; + } + + case TY_Boolean: { + switch (tib) { + case TY_Int: + case TY_Long: + case TY_Float: + case TY_Double: result = tib; break; + case TY_Number: result = tia; break; + default: result = TY_Merge; break; + } + break; + } + case TY_Int: { + switch (tib) { + case TY_Number: + case TY_Boolean: result = tia; break; + case TY_Long: + case TY_Float: + case TY_Double: result = tib; break; + default: result = TY_Merge; break; + } + break; + } + case TY_Long: { + switch (tib) { + case TY_Number: + case TY_Boolean: + case TY_Int: result = tia; break; + case TY_Float: + case TY_Double: result = TY_Double; break; + default: result = TY_Merge; break; + } + break; + } + case TY_Float: { + switch (tib) { + case TY_Number: + case TY_Boolean: + case TY_Int: result = tia; break; + case TY_Long: + case TY_Double: result = TY_Double; break; + default: result = TY_Merge; break; + } + break; + } + case TY_Double: { + switch (tib) { + case TY_Number: + case TY_Boolean: + case TY_Int: + case TY_Long: + case TY_Double: result = tia; break; + default: result = TY_Merge; break; + } + break; + } + default: + break; + } + if (result == TY_None) { + NOTYETIMPL("MergeTypeId()"); + } + if (mFlags & FLG_trace_3) { + std::cout << " Type Merge: " + << AstDump::GetEnumTypeId(tia) << " " + << AstDump::GetEnumTypeId(tib) << " --> " + << AstDump::GetEnumTypeId(result) << std::endl; + } + return result; +} + +unsigned TypeInferVisitor::MergeTypeIdx(unsigned tia, unsigned tib) { + if (tia == tib || tib <= 1) { + return tia; + } + + if (tia <= 1) { + return tib; + } + + unsigned result = 0; + + TreeNode *ta = gTypeTable.GetTypeFromTypeIdx(tia); + TreeNode *tb = gTypeTable.GetTypeFromTypeIdx(tib); + if (ta->IsPrimType() && tb->IsPrimType()) { + TypeId tid = MergeTypeId(ta->GetTypeId(), tb->GetTypeId()); + result = (unsigned)tid; + } else { + TreeNode *type = gTypeTable.GetTypeFromTypeId(TY_Merge); + result = type->GetTypeIdx(); + } + + if (mFlags & FLG_trace_3) { + std::cout << " Type idx Merge: " + << tia << " " + << tib << " --> " + << result << std::endl; + } + return result; +} + +void TypeInferVisitor::SetTypeId(TreeNode *node, TypeId tid) { + TypeId id = node->GetTypeId(); + mHandler->GetUtil()->SetTypeId(node, tid); + if (tid != id) { + id = node->GetTypeId(); + if (IsPrimTypeIdx(id) && node->GetTypeIdx() == 0) { + SetTypeIdx(node, id); + } + SetUpdated(); + } +} + +void TypeInferVisitor::SetTypeIdx(TreeNode *node, unsigned tidx) { + mHandler->GetUtil()->SetTypeIdx(node, tidx); + if (node && node->GetTypeIdx() != tidx) { + SetUpdated(); + } +} + +void TypeInferVisitor::SetTypeId(TreeNode *node1, TreeNode *node2) { + SetTypeId(node1, node2->GetTypeId()); + SetTypeIdx(node1, node2->GetTypeIdx()); +} + +void TypeInferVisitor::SetTypeIdx(TreeNode *node1, TreeNode *node2) { + SetTypeIdx(node1, node2->GetTypeIdx()); +} + +void TypeInferVisitor::UpdateTypeId(TreeNode *node, TypeId tid) { + if (tid == TY_None || !node || node->IsLiteral()) { + return; + } + tid = MergeTypeId(node->GetTypeId(), tid); + SetTypeId(node, tid); +} + +void TypeInferVisitor::UpdateTypeId(TreeNode *node1, TreeNode *node2) { + if (!node1 || !node2 || node1 == node2) { + return; + } + TypeId tid = MergeTypeId(node1->GetTypeId(), node2->GetTypeId()); + if (!node1->IsLiteral()) { + SetTypeId(node1, tid); + } + if (!node2->IsLiteral()) { + SetTypeId(node2, tid); + } + + // update type idx as well + UpdateTypeIdx(node1, node2); +} + +void TypeInferVisitor::UpdateTypeIdx(TreeNode *node, unsigned tidx) { + if (tidx == 0 || !node || node->IsLiteral()) { + return; + } + tidx = MergeTypeIdx(node->GetTypeIdx(), tidx); + SetTypeIdx(node, tidx); +} + +void TypeInferVisitor::UpdateTypeIdx(TreeNode *node1, TreeNode *node2) { + if (!node1 || !node2 || node1 == node2) { + return; + } + unsigned tidx = MergeTypeIdx(node1->GetTypeIdx(), node2->GetTypeIdx()); + if (tidx != 0) { + SetTypeIdx(node1, tidx); + SetTypeIdx(node2, tidx); + } +} + +PrimTypeNode *TypeInferVisitor::GetOrClonePrimTypeNode(PrimTypeNode *pt, TypeId tid) { + PrimTypeNode *new_pt = pt; + TypeId oldtid = pt->GetTypeId(); + // merge tids + tid = MergeTypeId(oldtid, tid); + // check if we need update + if (tid != oldtid) { + // check if we need clone PrimTypeNode to avoid using the shared one + if (oldtid == TY_None) { + new_pt = mHandler->NewTreeNode(); + new_pt->SetPrimType(pt->GetPrimType()); + } + SetTypeId(new_pt, tid); + if (IsPrimTypeId(tid)) { + SetTypeIdx(new_pt, tid); + } else { + SetTypeIdx(new_pt, gTypeTable.GetTypeFromTypeId(tid)->GetTypeIdx()); + } + SetUpdated(); + } + return new_pt; +} + +bool static IsScalar(TypeId tid) { + switch (tid) { + case TY_None: + case TY_Int: + case TY_Long: + case TY_Float: + case TY_Double: + return true; + default: + return false; + } + return false; +} + +// when function arry type parameter is updated, need update all +// caller arguments to be consistent with the array type parameter +void TypeInferVisitor::UpdateArgArrayDecls(unsigned nid, TypeId tid) { + for (auto id: mParam2ArgArrayDeclMap[nid]) { + mHandler->SetArrayElemTypeId(nid, tid); + if (id && id->IsDecl()) { + id = static_cast(id)->GetVar(); + } + if (id && id->IsIdentifier()) { + IdentifierNode *inode = static_cast(id); + TreeNode *type = inode->GetType(); + if (type && type->IsPrimArrayType()) { + PrimArrayTypeNode *pat = static_cast(type); + SetTypeId(pat->GetPrim(), tid); + SetUpdated(); + } + } + } +} + +// use input node's type info to update target node's type info +void TypeInferVisitor::UpdateTypeUseNode(TreeNode *target, TreeNode *input) { + // this functionality is reserved for typescript + if (!mHandler->IsTS()) { + return; + } + TypeId tid = target->GetTypeId(); + TypeId iid = input->GetTypeId(); + if ((tid == iid && IsScalar(tid)) || (iid == TY_Array && tid != iid)) { + return; + } + switch (iid) { + case TY_None: + break; + case TY_Array: { + // function's formals with corresponding calls' parameters passed in + if (input->IsIdentifier()) { + TreeNode *decl = mHandler->FindDecl(static_cast(input)); + TypeId old_elemTypeId = GetArrayElemTypeId(target); + unsigned old_elemTypeIdx = GetArrayElemTypeIdx(target); + TypeId inid = GetArrayElemTypeId(decl); + unsigned inidx = GetArrayElemTypeIdx(decl); + if (old_elemTypeId != inid || old_elemTypeIdx != inidx) { + UpdateArrayElemTypeMap(target, inid, inidx); + } + TypeId new_elemTypeId = GetArrayElemTypeId(target); + TreeNode *type = static_cast(target)->GetType(); + MASSERT(target->IsIdentifier() && "target node not identifier"); + if (type && type->IsPrimArrayType()) { + unsigned nid = target->GetNodeId(); + mParam2ArgArrayDeclMap[nid].insert(decl); + if (old_elemTypeId != new_elemTypeId) { + PrimArrayTypeNode *pat = static_cast(type); + PrimTypeNode *pt = pat->GetPrim(); + PrimTypeNode *new_pt = GetOrClonePrimTypeNode(pt, new_elemTypeId); + pat->SetPrim(new_pt); + SetUpdated(); + + UpdateArgArrayDecls(nid, new_elemTypeId); + } + } + } + // function's return type with return statement + else if (input->IsArrayLiteral()) { + TypeId old_elemTypeId = GetArrayElemTypeId(target); + unsigned old_elemTypeIdx = GetArrayElemTypeIdx(target); + TypeId inid = GetArrayElemTypeId(input); + unsigned inidx = GetArrayElemTypeIdx(input); + if (old_elemTypeId != inid || old_elemTypeIdx != inidx) { + UpdateArrayElemTypeMap(target, inid, inidx); + } + TypeId new_elemTypeId = GetArrayElemTypeId(target); + if (target->IsPrimArrayType()) { + unsigned nid = target->GetNodeId(); + if (old_elemTypeId != new_elemTypeId) { + PrimArrayTypeNode *pat = static_cast(target); + PrimTypeNode *pt = pat->GetPrim(); + PrimTypeNode *new_pt = GetOrClonePrimTypeNode(pt, new_elemTypeId); + pat->SetPrim(new_pt); + SetUpdated(); + + UpdateArgArrayDecls(nid, new_elemTypeId); + } + } + } else { + NOTYETIMPL("parameter not identifier"); + } + break; + } + case TY_Int: + case TY_Long: + case TY_Float: + case TY_Double: + case TY_String: + case TY_Class: + case TY_Object: + case TY_Function: { + TypeId merged = MergeTypeId(tid, iid); + if (merged != tid && merged != TY_None) { + SetTypeId(target, merged); + SetUpdated(); + } + break; + } + case TY_User: + break; + default: + NOTYETIMPL("TypeId not handled"); + break; + } + return; +} + +void TypeInferVisitor::UpdateFuncRetTypeId(FunctionNode *node, TypeId tid, unsigned tidx) { + if (!node || (node->GetTypeId() == tid && node->GetTypeIdx() == tidx)) { + return; + } + TreeNode *type = node->GetRetType(); + // create new return type node if it was shared + + if (type) { + if (type->IsPrimType() && type->IsTypeIdNone()) { + type = GetOrClonePrimTypeNode((PrimTypeNode *)type, tid); + node->SetRetType(type); + } + tid = MergeTypeId(type->GetTypeId(), tid); + SetTypeId(type, tid); + tidx = MergeTypeIdx(type->GetTypeIdx(), tidx); + SetTypeIdx(type, tidx); + } +} + +TypeId TypeInferVisitor::GetArrayElemTypeId(TreeNode *node) { + unsigned nid = node->GetNodeId(); + return mHandler->GetArrayElemTypeId(nid); +} + +unsigned TypeInferVisitor::GetArrayElemTypeIdx(TreeNode *node) { + unsigned nid = node->GetNodeId(); + return mHandler->GetArrayElemTypeIdx(nid); +} + +void TypeInferVisitor::UpdateArrayElemTypeMap(TreeNode *node, TypeId tid, unsigned tidx) { + if (!node || tid == TY_None || !IsArray(node)) { + return; + } + unsigned nodeid = node->GetNodeId(); + TypeId currtid = mHandler->GetArrayElemTypeId(nodeid); + tid = MergeTypeId(tid, currtid); + + unsigned currtidx = mHandler->GetArrayElemTypeIdx(nodeid); + tidx = MergeTypeIdx(tidx, currtidx); + + if (currtid != tid || currtidx != tidx) { + mHandler->SetArrayElemTypeId(node->GetNodeId(), tid); + mHandler->SetArrayElemTypeIdx(node->GetNodeId(), tidx); + SetUpdated(); + + // update array's PrimType node with a new node + if (node->IsDecl()) { + DeclNode *decl = static_cast(node); + node = decl->GetVar(); + } + if (node->IsIdentifier()) { + IdentifierNode *in = static_cast(node); + node = in->GetType(); + } + + if (node->IsPrimArrayType()) { + PrimArrayTypeNode *pat = static_cast(node); + PrimTypeNode *pt = pat->GetPrim(); + PrimTypeNode *new_pt = GetOrClonePrimTypeNode(pt, tid); + pat->SetPrim(new_pt); + } + } +} + +void TypeInferVisitor::UpdateArrayDimMap(TreeNode *node, DimensionNode *dim) { + mHandler->SetArrayDim(node->GetNodeId(), dim); +} + +// return true if identifier is constructor +bool TypeInferVisitor::UpdateVarTypeWithInit(TreeNode *var, TreeNode *init) { + bool result = var->IsFunction(); + if (!var->IsIdentifier()) { + return result; + } + IdentifierNode *idnode = static_cast(var); + TreeNode *type = idnode->GetType(); + // use init NewNode to set decl type + if (init) { + if (init->IsNew()) { + NewNode *n = static_cast(init); + if (n->GetId()) { + TreeNode *id = n->GetId(); + if (id->IsIdentifier() && id->IsTypeIdClass()) { + UserTypeNode *utype = mInfo->CreateUserTypeNode(id->GetStrIdx(), var->GetScope()); + utype->SetParent(idnode); + idnode->SetType(utype); + SetUpdated(); + } + } + } else if (init->IsIdentifier()) { + TreeNode *decl = mHandler->FindDecl(static_cast(init)); + if (decl) { + unsigned tidx = decl->GetTypeIdx(); + if ((decl->IsClass() || (0 < tidx && tidx < (unsigned)TY_Max))) { + SetTypeId(idnode, TY_Function); + SetUpdated(); + result = true; + } + } + } else if (init->IsStructLiteral()) { + if (!type && init->GetTypeIdx() != 0) { + type = gTypeTable.GetTypeFromTypeIdx(init->GetTypeIdx()); + UserTypeNode *utype = mInfo->CreateUserTypeNode(type->GetStrIdx(), var->GetScope()); + utype->SetParent(idnode); + idnode->SetType(utype); + SetUpdated(); + } + } else if (init->IsArrayLiteral()) { + TypeId tid = GetArrayElemTypeId(init); + unsigned tidx = GetArrayElemTypeIdx(init); + if (type) { + if (type->IsArrayType()) { + ArrayTypeNode *pat = static_cast(type); + // update array element type + SetTypeId(pat->GetElemType(), tid); + SetTypeIdx(pat->GetElemType(), tidx); + SetUpdated(); + } else { + NOTYETIMPL("array type not ArrayTypeNode"); + } + return result; + } + + TreeNode *elemtype = NULL; + if (IsPrimTypeId(tid)) { + elemtype = gTypeTable.GetTypeFromTypeId(tid); + } else if (tidx != 0) { + elemtype = gTypeTable.GetTypeFromTypeIdx(tidx); + } + if (elemtype) { + ArrayTypeNode *pat = mHandler->NewTreeNode(); + pat->SetElemType(elemtype); + + DimensionNode *dims = mHandler->GetArrayDim(init->GetNodeId()); + pat->SetDims(dims); + + pat->SetParent(idnode); + idnode->SetType(pat); + SetUpdated(); + } + } + } + return result; +} + +bool TypeInferVisitor::IsArray(TreeNode *node) { + if (!node) { + return false; + } + if (node->IsArrayLiteral() || node->IsPrimArrayType()) { + return true; + } + TreeNode *tn = node; + if (node->IsDecl()) { + DeclNode *decl = static_cast(node); + tn = decl->GetVar(); + } + if (tn->IsIdentifier()) { + IdentifierNode *idnode = static_cast(tn); + if (idnode && idnode->GetType() && idnode->GetType()->IsPrimArrayType()) { + return true; + } + } else if (tn->IsBindingPattern()) { + // could be either object or array destructuring + return false; + } else { + NOTYETIMPL("array not identifier or bind pattern"); + } + return false; +} + +TreeNode *TypeInferVisitor::VisitClassField(TreeNode *node) { + (void) AstVisitor::VisitTreeNode(node); + if (node->IsIdentifier()) { + IdentifierNode *idnode = static_cast(node); + TreeNode *type = idnode->GetType(); + if (type) { + TypeId tid = type->GetTypeId(); + if (type->IsPrimType()) { + PrimTypeNode *ptn = static_cast(type); + tid = ptn->GetPrimType(); + } + // use non TY_Number + if (tid != TY_Number) { + SetTypeId(node, tid); + } + } + TreeNode *init = idnode->GetInit(); + if (init) { + VisitTreeNode(init); + UpdateTypeId(node, init->GetTypeId()); + } + } else if (node->IsLiteral()) { + MSGNOLOC0("field is Literal"); + } else if (node->IsComputedName()) { + MSGNOLOC0("field is ComputedName"); + } else if (node->IsStrIndexSig()) { + MSGNOLOC0("field is StrIndexSig"); + } else { + NOTYETIMPL("field new kind"); + } + return node; +} + +// ArrayElementNode are for +// 1. array access +// 2. indexed access of class/structure for fields, field types +ArrayElementNode *TypeInferVisitor::VisitArrayElementNode(ArrayElementNode *node) { + (void) AstVisitor::VisitArrayElementNode(node); + TreeNode *array = node->GetArray(); + if (array) { + if (array->IsIdentifier()) { + TreeNode *decl = mHandler->FindDecl(static_cast(array)); + if (decl) { + // indexed access of class fields or types + if (decl->IsTypeIdClass()) { + SetTypeId(array, TY_Class); + TreeNode *exp = node->GetExprAtIndex(0); + if (exp->IsLiteral()) { + if (exp->IsTypeIdString()) { + // indexed access of types + unsigned stridx = (static_cast(exp))->GetData().mData.mStrIdx; + if (decl->IsDecl()) { + TreeNode *var = static_cast(decl)->GetVar(); + TreeNode * type = static_cast(var)->GetType(); + if (type && type->IsUserType()) { + UserTypeNode *ut = static_cast(type); + decl = mHandler->FindDecl(static_cast(ut->GetId())); + } + } + if (decl->IsStruct() || decl->IsClass()) { + bool found = false; + for (int i = 0; i < mInfo->GetFieldsSize(decl); i++) { + TreeNode *f = mInfo->GetField(decl, i); + if (f->GetStrIdx() == stridx) { + UpdateTypeId(node, f); + found = true; + break; + } + } + // new field + if (!found) { + IdentifierNode *id = mInfo->CreateIdentifierNode(stridx); + mInfo->AddField(decl, id); + } + } + } else if (exp->IsTypeIdInt()) { + // indexed access of fields + // unsigned i = (static_cast(exp))->GetData().mData.mInt64; + NOTYETIMPL("indexed access with literal field id"); + } else { + AstVisitor::VisitTreeNode(exp); + NOTYETIMPL("indexed access not literal"); + } + } else { + AstVisitor::VisitTreeNode(exp); + } + } else { + // default + UpdateTypeId(array, TY_Array); + UpdateTypeId(decl, array); + UpdateArrayElemTypeMap(decl, node->GetTypeId(), node->GetTypeIdx()); + UpdateTypeId(node, mHandler->GetArrayElemTypeId(decl->GetNodeId())); + } + } else { + NOTYETIMPL("array not declared"); + } + } else if (array->IsArrayElement()) { + NOTYETIMPL("array in ArrayElementNode IsArrayElement"); + } else if (array->IsField()) { + NOTYETIMPL("array in ArrayElementNode IsField"); + } else if (array->IsUserType()) { + NOTYETIMPL("array in ArrayElementNode IsUserType"); + } else if (array->IsBinOperator()) { + NOTYETIMPL("array in ArrayElementNode IsBinOperator"); + } else if (array->IsLiteral() && ((LiteralNode*)array)->IsThis()) { + NOTYETIMPL("array in ArrayElementNode IsLiteral"); + } else if (array->IsPrimType()) { + NOTYETIMPL("array in ArrayElementNode IsPrimType"); + } else { + NOTYETIMPL("array in ArrayElementNode unknown"); + } + } + return node; +} + +FieldLiteralNode *TypeInferVisitor::VisitFieldLiteralNode(FieldLiteralNode *node) { + (void) AstVisitor::VisitFieldLiteralNode(node); + TreeNode *name = node->GetFieldName(); + TreeNode *lit = node->GetLiteral(); + UpdateTypeId(name, lit->GetTypeId()); + UpdateTypeId(node, name); + return node; +} + +ArrayLiteralNode *TypeInferVisitor::VisitArrayLiteralNode(ArrayLiteralNode *node) { + UpdateTypeId(node, TY_Array); + (void) AstVisitor::VisitArrayLiteralNode(node); + if (node->IsArrayLiteral()) { + unsigned size = node->GetLiteralsNum(); + TypeId tid = TY_None; + unsigned tidx = 0; + bool allElemArray = true; + for (unsigned i = 0; i < size; i++) { + TreeNode *n = node->GetLiteral(i); + TypeId id = n->GetTypeId(); + unsigned idx = n->GetTypeIdx(); + tid = MergeTypeId(tid, id); + tidx = MergeTypeIdx(tidx, idx); + if (tid != TY_Array) { + allElemArray = false; + } + } + + DimensionNode *dim = mHandler->NewTreeNode(); + dim->AddDimension(size); + + // n-D array: elements are all arrays + if (allElemArray) { + unsigned elemdim = DEFAULTVALUE; + // recalculate element typeid + tid = TY_None; + tidx = 0; + for (unsigned i = 0; i < size; i++) { + TreeNode *n = node->GetLiteral(i); + if (n->IsArrayLiteral()) { + DimensionNode * dn = mHandler->GetArrayDim(n->GetNodeId()); + unsigned currdim = dn ? dn->GetDimensionsNum() : 0; + // find min dim of all elements + if (elemdim == DEFAULTVALUE) { + elemdim = currdim; + tid = mHandler->GetArrayElemTypeId(n->GetNodeId()); + tidx = mHandler->GetArrayElemTypeIdx(n->GetNodeId()); + } else if (currdim < elemdim) { + elemdim = currdim; + tid = TY_Merge; + tidx = 0; + } else if (currdim > elemdim) { + tid = TY_Merge; + tidx = 0; + } else { + tid = MergeTypeId(tid, mHandler->GetArrayElemTypeId(n->GetNodeId())); + tidx = MergeTypeIdx(tidx, mHandler->GetArrayElemTypeIdx(n->GetNodeId())); + } + } + } + if (elemdim != DEFAULTVALUE) { + for (unsigned i = 0; i < elemdim; i++) { + // with unspecified length, can add details later + dim->AddDimension(0); + } + } + } + + UpdateArrayElemTypeMap(node, tid, tidx); + UpdateArrayDimMap(node, dim); + } + + return node; +} + +BinOperatorNode *TypeInferVisitor::VisitBinOperatorNode(BinOperatorNode *node) { + if (mFlags & FLG_trace_1) std::cout << "Visiting BinOperatorNode, id=" << node->GetNodeId() << "..." << std::endl; + // (void) AstVisitor::VisitBinOperatorNode(node); + OprId op = node->GetOprId(); + TreeNode *ta = node->GetOpndA(); + TreeNode *tb = node->GetOpndB(); + (void) VisitTreeNode(tb); + (void) VisitTreeNode(ta); + // modified operand + TreeNode *mod = NULL; + TypeId tia = ta->GetTypeId(); + TypeId tib = tb->GetTypeId(); + switch (op) { + case OPR_StEq: + case OPR_EQ: + case OPR_NE: + case OPR_GT: + case OPR_LT: + case OPR_GE: + case OPR_LE: { + if (tia != TY_None && tib == TY_None) { + UpdateTypeId(tb, tia); + mod = tb; + } else if (tia == TY_None && tib != TY_None) { + UpdateTypeId(ta, tib); + mod = ta; + } + SetTypeId(node, TY_Boolean); + SetTypeIdx(node, TY_Boolean); + break; + } + case OPR_Assign: + case OPR_AddAssign: + case OPR_SubAssign: + case OPR_MulAssign: + case OPR_DivAssign: + case OPR_ModAssign: + case OPR_ShlAssign: + case OPR_ShrAssign: + case OPR_BandAssign: + case OPR_BorAssign: + case OPR_BxorAssign: + case OPR_ZextAssign: { + TypeId ti = MergeTypeId(tia, tib); + if (tia == TY_None || (ta->IsIdentifier() && tia != ti)) { + UpdateTypeId(ta, ti); + mod = ta; + } else if (tib == TY_None) { + UpdateTypeId(tb, ti); + mod = tb; + } + UpdateTypeId(node, ti); + unsigned tix = MergeTypeIdx(ta->GetTypeIdx(), tb->GetTypeIdx()); + UpdateTypeIdx(ta, tix); + UpdateTypeIdx(tb, tix); + UpdateTypeIdx(node, tix); + break; + } + case OPR_Add: + case OPR_Sub: + case OPR_Mul: + case OPR_Div: { + if (tia != TY_None && tib == TY_None) { + UpdateTypeId(tb, tia); + mod = tb; + } else if (tia == TY_None && tib != TY_None) { + UpdateTypeId(ta, tib); + mod = ta; + } + TypeId ti = MergeTypeId(tia, tib); + UpdateTypeId(node, ti); + unsigned tix = MergeTypeIdx(ta->GetTypeIdx(), tb->GetTypeIdx()); + UpdateTypeIdx(ta, tix); + UpdateTypeIdx(tb, tix); + UpdateTypeIdx(node, tix); + break; + } + case OPR_Mod: + case OPR_Band: + case OPR_Bor: + case OPR_Bxor: + case OPR_Shl: + case OPR_Shr: + case OPR_Zext: { + SetTypeId(ta, TY_Int); + SetTypeId(tb, TY_Int); + SetTypeId(node, TY_Int); + SetTypeIdx(ta, TY_Int); + SetTypeIdx(tb, TY_Int); + SetTypeIdx(node, TY_Int); + break; + } + case OPR_Land: + case OPR_Lor: { + SetTypeId(ta, TY_Boolean); + SetTypeId(tb, TY_Boolean); + SetTypeId(node, TY_Boolean); + SetTypeIdx(ta, TY_Boolean); + SetTypeIdx(tb, TY_Boolean); + SetTypeIdx(node, TY_Boolean); + break; + } + case OPR_Exp: { + if (tia == TY_Int && tib == TY_Int) { + SetTypeId(node, TY_Int); + SetTypeIdx(node, TY_Int); + } + break; + } + case OPR_NullCoalesce: + break; + default: { + NOTYETIMPL("VisitBinOperatorNode()"); + break; + } + } + // visit mod to update its content + (void) VisitTreeNode(mod); + return node; +} + +CallNode *TypeInferVisitor::VisitCallNode(CallNode *node) { + if (mFlags & FLG_trace_1) std::cout << "Visiting CallNode, id=" << node->GetNodeId() << "..." << std::endl; + TreeNode *method = node->GetMethod(); + Module_Handler *handler = mHandler; + UpdateTypeId(method, TY_Function); + if (method) { + if (method->IsField()) { + FieldNode *field = static_cast(method); + method = field->GetField(); + TreeNode *upper = field->GetUpper(); + handler = mAstOpt->GetHandlerFromNodeId(upper->GetNodeId()); + } + if (method->IsIdentifier()) { + IdentifierNode *mid = static_cast(method); + TreeNode *decl = mHandler->FindDecl(mid); + if (decl) { + SetTypeIdx(node, decl->GetTypeIdx()); + SetTypeIdx(mid, decl->GetTypeIdx()); + + if (decl->IsDecl()) { + DeclNode *d = static_cast(decl); + if (d->GetInit()) { + decl = d->GetInit(); + } + } + if (decl && decl->IsIdentifier()) { + IdentifierNode *id = static_cast(decl); + if (id->GetType()) { + decl = id->GetType(); + } else if (id->IsTypeIdFunction()) { + NOTYETIMPL("VisitCallNode TY_Function"); + } + } + if (decl) { + if (decl->IsFunction()) { + FunctionNode *func = static_cast(decl); + // check if called a generator + if (func->IsGenerator()) { + mHandler->AddGeneratorUsed(node->GetNodeId(), func); + } + // update call's return type + if (func->GetRetType()) { + UpdateTypeId(node, func->GetRetType()->GetTypeId()); + } + // skip imported and exported functions as they are generic + // so should not restrict their types + if (!mXXport->IsImportedExportedDeclId(mHandler->GetHidx(), decl->GetNodeId())) { + unsigned min = func->GetParamsNum(); + if (func->GetParamsNum() != node->GetArgsNum()) { + // count minimun number of args need to be passed + min = 0; + // check arg about whether it is optional or has default value + for (unsigned i = 0; i < func->GetParamsNum(); i++) { + TreeNode *arg = func->GetParam(i); + if (arg->IsOptional()) { + continue; + } else if(arg->IsIdentifier()) { + IdentifierNode *id = static_cast(arg); + TreeNode *d = mHandler->FindDecl(id); + if (d) { + SetTypeId(id, d->GetTypeId()); + SetTypeIdx(id, d->GetTypeIdx()); + } + if (!id->GetInit()) { + min++; + } + } else { + min++; + } + } + if (min > node->GetArgsNum()) { + NOTYETIMPL("call and func number of arguments not compatible"); + return node; + } + } + // update function's argument types + for (unsigned i = 0; i < min; i++) { + UpdateTypeUseNode(func->GetParam(i), node->GetArg(i)); + } + + // dummy functions like console.log + if (func->IsTypeIdNone()) { + for (unsigned i = 0; i < node->GetArgsNum(); i++) { + TreeNode *arg = node->GetArg(i); + if(arg->IsIdentifier()) { + IdentifierNode *id = static_cast(arg); + TreeNode *d = mHandler->FindDecl(id); + if (d) { + SetTypeId(id, d->GetTypeId()); + SetTypeIdx(id, d->GetTypeIdx()); + } + } + } + } + } + } else if (decl->IsCall()) { + (void) VisitCallNode(static_cast(decl)); + } else if (decl->IsDecl()) { + DeclNode *d = static_cast(decl); + if (d->GetInit()) { + NOTYETIMPL("VisitCallNode decl init"); + } + } else if (decl->IsLiteral()) { + NOTYETIMPL("VisitCallNode literal node"); + } else if (decl->IsTypeIdClass()) { + // object + if (node->GetArgsNum()) { + TreeNode *arg = node->GetArg(0); + SetTypeId(arg, TY_Object); + SetTypeIdx(arg, decl->GetTypeIdx()); + } + } else { + NOTYETIMPL("VisitCallNode not function node"); + } + } else { + NOTYETIMPL("VisitCallNode null decl"); + } + } else if (mAstOpt->IsLangKeyword(mid)) { + // known calls + } else { + // calling constructor like Array(...) could also end up here + TreeNode *type = mHandler->FindType(mid); + if (type) { + NOTYETIMPL("VisitCallNode type"); + } else { + NOTYETIMPL("VisitCallNode null decl and null type"); + } + } + } + } + (void) AstVisitor::VisitCallNode(node); + return node; +} + +CastNode *TypeInferVisitor::VisitCastNode(CastNode *node) { + (void) AstVisitor::VisitCastNode(node); + TreeNode *dest = node->GetDestType(); + SetTypeId(node, dest); + return node; +} + +AsTypeNode *TypeInferVisitor::VisitAsTypeNode(AsTypeNode *node) { + (void) AstVisitor::VisitAsTypeNode(node); + TreeNode *dest = node->GetType(); + if (node->GetTypeIdx() == 0) { + SetTypeId(node, dest); + } + + TreeNode *parent = node->GetParent(); + if (parent) { + // pass to parent, need refine if multiple AsTypeNode + if (parent->GetAsTypesNum() == 1 && parent->GetAsTypeAtIndex(0) == node) { + if (parent->GetTypeIdx() == 0) { + SetTypeId(parent, dest); + } + } + } + return node; +} + +ClassNode *TypeInferVisitor::VisitClassNode(ClassNode *node) { + if (mFlags & FLG_trace_1) std::cout << "Visiting ClassNode, id=" << node->GetNodeId() << "..." << std::endl; + UpdateTypeId(node, TY_Class); + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + TreeNode *t = node->GetField(i); + (void) VisitClassField(t); + } + (void) AstVisitor::VisitClassNode(node); + return node; +} + +CondBranchNode *TypeInferVisitor::VisitCondBranchNode(CondBranchNode *node) { + (void) AstVisitor::VisitCondBranchNode(node); + TreeNode *cond = node->GetCond(); + TreeNode *blockT = NULL; + TreeNode *blockF = NULL; + if (cond->IsUnaOperator()) { + cond = static_cast(cond)->GetOpnd(); + blockT = node->GetFalseBranch(); + blockF = node->GetTrueBranch(); + } else { + blockT = node->GetTrueBranch(); + blockF = node->GetFalseBranch(); + } + + if (cond->IsCall()) { + CallNode *call = static_cast(cond); + TreeNode *method = call->GetMethod(); + if (method && method->IsIdentifier()) { + IdentifierNode *id = static_cast(method); + unsigned mid = id->GetStrIdx(); + unsigned nid = node->GetNodeId(); + // check if already visited for mid and nodeid + if (mCbFuncIsDone.find(mid) != mCbFuncIsDone.end() && + mCbFuncIsDone[mid].find(nid) != mCbFuncIsDone[mid].end()) { + return node; + } + TreeNode *decl = mHandler->FindDecl(id); + if (decl && decl->IsFunction()) { + unsigned fid = decl->GetNodeId(); + if (mFuncIsNodeMap.find(fid) != mFuncIsNodeMap.end()) { + unsigned tidx = mFuncIsNodeMap[fid]; + TreeNode *arg = call->GetArg(0); + if (arg->IsIdentifier()) { + unsigned stridx = arg->GetStrIdx(); + mChangeTypeIdxVisitor->Setup(stridx, tidx); + mChangeTypeIdxVisitor->Visit(blockT); + mCbFuncIsDone[mid].insert(nid); + SetUpdated(); + + // if union of 2 types, update other branch with other type + TreeNode *argdecl = mHandler->FindDecl(static_cast(arg)); + if (argdecl && argdecl->IsIdentifier()) { + TreeNode *type = static_cast(argdecl)->GetType(); + if (type->IsUserType()) { + UserTypeNode *ut = static_cast(type); + if (ut->GetType() == UT_Union && ut->GetUnionInterTypesNum() == 2) { + TreeNode *u0 = ut->GetUnionInterType(0); + TreeNode *u1 = ut->GetUnionInterType(1); + tidx = (u0->GetTypeIdx() == tidx) ? u1->GetTypeIdx() : u0->GetTypeIdx(); + mChangeTypeIdxVisitor->Setup(stridx, tidx); + mChangeTypeIdxVisitor->Visit(blockF); + } + } + } + } + } + } + + } else { + NOTYETIMPL("mentod null or not identifier"); + } + } + + return node; +} + +DeclNode *TypeInferVisitor::VisitDeclNode(DeclNode *node) { + if (mFlags & FLG_trace_1) std::cout << "Visiting DeclNode, id=" << node->GetNodeId() << "..." << std::endl; + (void) AstVisitor::VisitDeclNode(node); + TreeNode *init = node->GetInit(); + TreeNode *var = node->GetVar(); + TypeId merged = node->GetTypeId(); + unsigned mergedtidx = node->GetTypeIdx(); + TypeId elemTypeId = TY_None; + unsigned elemTypeIdx = 0; + bool isArray = false; + bool isFromGenerator = false; + if (init) { + merged = MergeTypeId(merged, init->GetTypeId()); + mergedtidx = MergeTypeIdx(mergedtidx, init->GetTypeIdx()); + // collect array element typeid if any + elemTypeId = GetArrayElemTypeId(init); + elemTypeIdx = GetArrayElemTypeIdx(init); + isArray = (elemTypeId != TY_None); + // pass IsGeneratorUsed + isFromGenerator = mHandler->UpdateGeneratorUsed(node->GetNodeId(), init->GetNodeId()); + if (var && isFromGenerator) { + mHandler->UpdateGeneratorUsed(var->GetNodeId(), init->GetNodeId()); + } + } + if (var) { + // normal cases + if(var->IsIdentifier()) { + IdentifierNode *idvar = static_cast(var); + if (isFromGenerator && !idvar->GetType()) { + unsigned stridx = gStringPool.GetStrIdx("Generator"); + UserTypeNode *ut = mInfo->CreateUserTypeNode(stridx, var->GetScope()); + idvar->SetType(ut); + } + merged = MergeTypeId(merged, var->GetTypeId()); + mergedtidx = MergeTypeIdx(mergedtidx, var->GetTypeIdx()); + bool isFunc = UpdateVarTypeWithInit(var, init); + if (isFunc) { + UpdateTypeId(node, var->GetTypeId()); + UpdateTypeIdx(node, mergedtidx); + UpdateTypeIdx(var, mergedtidx); + return node; + } + } else { + // BindingPatternNode + } + } else { + MASSERT("var null"); + } + // override TypeId for array + if (isArray) { + merged = TY_Array; + SetTypeId(node, merged); + SetTypeId(init, merged); + SetTypeId(var, merged); + } else { + UpdateTypeId(node, merged); + UpdateTypeId(init, merged); + UpdateTypeId(var, merged); + if (mergedtidx > 0) { + UpdateTypeIdx(node, mergedtidx); + UpdateTypeIdx(init, mergedtidx); + UpdateTypeIdx(var, mergedtidx); + } + } + SetTypeIdx(node, var->GetTypeIdx()); + if (isArray || IsArray(node)) { + UpdateArrayElemTypeMap(node, elemTypeId, elemTypeIdx); + } + return node; +} + +ImportNode *TypeInferVisitor::VisitImportNode(ImportNode *node) { + //(void) AstVisitor::VisitImportNode(node); + TreeNode *target = node->GetTarget(); + unsigned hidx = DEFAULTVALUE; + unsigned hstridx = 0; + if (target) { + std::string name = mXXport->GetTargetFilename(mHandler->GetHidx(), target); + // store name's string index in node + hstridx = gStringPool.GetStrIdx(name); + hidx = mXXport->GetHandleIdxFromStrIdx(hstridx); + } + for (unsigned i = 0; i < node->GetPairsNum(); i++) { + XXportAsPairNode *p = node->GetPair(i); + TreeNode *bfnode = p->GetBefore(); + TreeNode *afnode = p->GetAfter(); + if (bfnode) { + if (hidx == DEFAULTVALUE) { + hstridx = mXXport->ExtractTargetStrIdx(bfnode); + if (hstridx) { + hidx = mXXport->GetHandleIdxFromStrIdx(hstridx); + } else { + NOTYETIMPL("can not find import target"); + return node; + } + } + if (p->IsDefault()) { + TreeNode *dflt = mXXport->GetExportedDefault(hstridx); + if (dflt) { + UpdateTypeId(bfnode, dflt->GetTypeId()); + UpdateTypeIdx(bfnode, dflt->GetTypeIdx()); + } else { + NOTYETIMPL("can not find exported default"); + } + } else if (!bfnode->IsTypeIdModule()) { + TreeNode *exported = NULL; + if (bfnode->IsField()) { + FieldNode *field = static_cast(bfnode); + TreeNode *upper = field->GetUpper(); + TreeNode *fld = field->GetField(); + if (upper->IsTypeIdModule()) { + TreeNode *type = gTypeTable.GetTypeFromTypeIdx(upper->GetTypeIdx()); + Module_Handler *h = mHandler->GetModuleHandler(type); + exported = mXXport->GetExportedNamedNode(h->GetHidx(), fld->GetStrIdx()); + if (exported) { + UpdateTypeId(bfnode, exported->GetTypeId()); + UpdateTypeIdx(bfnode, exported->GetTypeIdx()); + } + } + } else { + exported = mXXport->GetExportedNamedNode(hidx, bfnode->GetStrIdx()); + if (exported) { + SetTypeId(bfnode, exported); + } + } + if (!exported) { + NOTYETIMPL("can not find exported node"); + } + } + + SetTypeId(p, bfnode); + if (afnode) { + SetTypeId(afnode, bfnode); + } + } + } + return node; +} + +// check if node is identifier with name "default"+RENAMINGSUFFIX +static bool IsDefault(TreeNode *node) { + return node->GetStrIdx() == gStringPool.GetStrIdx(std::string("default") + RENAMINGSUFFIX); +} + +ExportNode *TypeInferVisitor::VisitExportNode(ExportNode *node) { + (void) AstVisitor::VisitExportNode(node); + unsigned hidx = mHandler->GetHidx(); + for (unsigned i = 0; i < node->GetPairsNum(); i++) { + XXportAsPairNode *p = node->GetPair(i); + TreeNode *bfnode = p->GetBefore(); + TreeNode *afnode = p->GetAfter(); + if (bfnode) { + switch (bfnode->GetKind()) { + case NK_Struct: + case NK_Function: + case NK_Decl: { + mXXport->AddExportedDeclIds(hidx, bfnode->GetNodeId()); + break; + } + case NK_Declare: { + DeclareNode *declare = static_cast(bfnode); + for (unsigned i = 0; i < declare-> GetDeclsNum(); i++) { + TreeNode *decl = declare->GetDeclAtIndex(i); + if (decl) { + mXXport->AddExportedDeclIds(hidx, decl->GetNodeId()); + } + } + break; + } + case NK_Identifier: { + IdentifierNode *idnode = static_cast(bfnode); + TreeNode *decl = mHandler->FindDecl(idnode); + if (decl) { + mXXport->AddExportedDeclIds(hidx, decl->GetNodeId()); + } + if (IsDefault(bfnode)) { + unsigned stridx = node->GetStrIdx(); + TreeNode *dflt = mXXport->GetExportedDefault(stridx); + if (dflt) { + UpdateTypeId(bfnode, dflt->GetTypeId()); + UpdateTypeIdx(bfnode, dflt->GetTypeIdx()); + } else { + NOTYETIMPL("can not find exported default"); + } + } + break; + } + case NK_TypeAlias: + case NK_UserType: + case NK_Import: + break; + default: { + NOTYETIMPL("new export node kind"); + break; + } + } + + UpdateTypeId(p, bfnode->GetTypeId()); + UpdateTypeIdx(p, bfnode->GetTypeIdx()); + if (afnode) { + UpdateTypeId(afnode, bfnode->GetTypeId()); + UpdateTypeIdx(afnode, bfnode->GetTypeIdx()); + } + } + } + return node; +} + +FieldNode *TypeInferVisitor::VisitFieldNode(FieldNode *node) { + (void) AstVisitor::VisitFieldNode(node); + TreeNode *upper = node->GetUpper(); + IdentifierNode *field = static_cast(node->GetField()); + TreeNode *decl = NULL; + if (!upper) { + decl = mHandler->FindDecl(field); + } else { + if (upper->IsLiteral()) { + LiteralNode *ln = static_cast(upper); + // this.f + if (ln->GetData().mType == LT_ThisLiteral) { + decl = mHandler->FindDecl(field); + } + } + if (!decl) { + unsigned tidx = upper->GetTypeIdx(); + if (tidx) { + TreeNode *n = gTypeTable.GetTypeFromTypeIdx(tidx); + ASTScope *scope = n->GetScope(); + decl = scope->FindDeclOf(field->GetStrIdx()); + } + } + } + if (decl) { + UpdateTypeId(node, decl); + } + UpdateTypeId(field, node); + return node; +} + +ForLoopNode *TypeInferVisitor::VisitForLoopNode(ForLoopNode *node) { + if (mFlags & FLG_trace_1) std::cout << "Visiting ForLoopNode, id=" << node->GetNodeId() << "..." << std::endl; + if (node->GetProp() == FLP_JSIn) { + TreeNode *var = node->GetVariable(); + if (var) { + SetTypeId(var, TY_Int); + } + } + (void) AstVisitor::VisitForLoopNode(node); + return node; +} + +FunctionNode *TypeInferVisitor::VisitFunctionNode(FunctionNode *node) { + if (mFlags & FLG_trace_1) std::cout << "Visiting FunctionNode, id=" << node->GetNodeId() << "..." << std::endl; + UpdateTypeId(node, node->IsArray() ? TY_Object : TY_Function); + (void) AstVisitor::VisitFunctionNode(node); + if (node->GetFuncName()) { + SetTypeId(node->GetFuncName(), node->GetTypeId()); + } + return node; +} + +IdentifierNode *TypeInferVisitor::VisitIdentifierNode(IdentifierNode *node) { + if (mFlags & FLG_trace_1) std::cout << "Visiting IdentifierNode, id=" << node->GetNodeId() << "..." << std::endl; + if (mAstOpt->IsLangKeyword(node)) { + return node; + } + TreeNode *type = node->GetType(); + if (type) { + if (type->IsPrimArrayType()) { + SetTypeId(node, TY_Array); + SetUpdated(); + } + } + (void) AstVisitor::VisitIdentifierNode(node); + if (type) { + unsigned tidx = type->GetTypeIdx(); + if (tidx && node->GetTypeIdx() != tidx) { + UpdateTypeId(node, type->GetTypeId()); + UpdateTypeIdx(node, tidx); + SetUpdated(); + } + } + TreeNode *init = node->GetInit(); + if (init) { + if (node->GetTypeId() == TY_None) { + SetTypeId(node, init->GetTypeId()); + } + if (node->GetTypeIdx() == 0) { + SetTypeIdx(node, init->GetTypeIdx()); + } + SetUpdated(); + if (init->IsArrayLiteral()) { + // pass array element info + TypeId tid = mHandler->GetArrayElemTypeId(init->GetNodeId()); + unsigned tidx = mHandler->GetArrayElemTypeIdx(init->GetNodeId()); + UpdateArrayElemTypeMap(node, tid, tidx); + if (type && type->IsArrayType()) { + TreeNode *et = static_cast(type)->GetElemType(); + et->SetTypeId(tid); + et->SetTypeIdx(tidx); + } + } + return node; + } + TreeNode *parent = node->GetParent(); + TreeNode *decl = NULL; + if (parent && parent->IsField()) { + FieldNode *field = static_cast(parent); + TreeNode *upper = field->GetUpper(); + TreeNode *fld = field->GetField(); + ASTScope *scope = NULL; + if (node == upper) { + if (upper->IsThis()) { + // this.f + scope = upper->GetScope(); + // this is the parent with typeid TY_Class + while (scope && scope->GetTree()->GetTypeId() != TY_Class) { + scope = scope->GetParent(); + } + if (scope) { + upper->SetTypeId(scope->GetTree()->GetTypeId()); + upper->SetTypeIdx(scope->GetTree()->GetTypeIdx()); + } + decl = upper; + } else { + decl = mHandler->FindDecl(node, true); + } + } else if (node == fld) { + TreeNode *uptype = gTypeTable.GetTypeFromTypeIdx(upper->GetTypeIdx()); + if (uptype) { + scope = uptype->GetScope(); + if (scope) { + node->SetScope(scope); + decl = mHandler->FindDecl(node, true); + } + } + } else { + NOTYETIMPL("node not in field"); + } + } else { + decl = mHandler->FindDecl(node); + } + + if (decl) { + // check node itself is part of decl + if (decl != parent) { + UpdateTypeId(node, decl); + UpdateTypeIdx(node, decl); + } + // pass IsGeneratorUsed + mHandler->UpdateGeneratorUsed(node->GetNodeId(), decl->GetNodeId()); + } else { + NOTYETIMPL("node not declared"); + MSGNOLOC0(node->GetName()); + } + + return node; +} + +InterfaceNode *TypeInferVisitor::VisitInterfaceNode(InterfaceNode *node) { + UpdateTypeId(node, TY_Class); + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + TreeNode *t = node->GetField(i); + (void) VisitClassField(t); + } + (void) AstVisitor::VisitInterfaceNode(node); + return node; +} + +IsNode *TypeInferVisitor::VisitIsNode(IsNode *node) { + (void) AstVisitor::VisitIsNode(node); + SetTypeIdx(node, TY_Boolean); + TreeNode *parent = node->GetParent(); + if (parent->IsFunction()) { + FunctionNode *func = static_cast(parent); + if (func->GetRetType() == node) { + TreeNode *right = node->GetRight(); + if (right->IsUserType()) { + TreeNode *id = static_cast(right)->GetId(); + SetTypeIdx(right, id->GetTypeIdx()); + mFuncIsNodeMap[func->GetNodeId()] = id->GetTypeIdx(); + } else { + NOTYETIMPL("isnode right not user type"); + } + } + } + + return node; +} + +NewNode *TypeInferVisitor::VisitNewNode(NewNode *node) { + (void) AstVisitor::VisitNewNode(node); + TreeNode *id = node->GetId(); + if (id) { + UpdateTypeId(node, TY_Class); + if (id->GetTypeIdx() == 0) { + if (id->IsIdentifier()) { + IdentifierNode *idn = static_cast(id); + TreeNode *decl = mHandler->FindDecl(idn); + if (decl && decl->GetTypeIdx() != 0) { + SetTypeIdx(node, decl->GetTypeIdx()); + } + } + } else { + SetTypeIdx(node, id->GetTypeIdx()); + } + } + return node; +} + +StructNode *TypeInferVisitor::VisitStructNode(StructNode *node) { + if (node->GetProp() != SProp_TSEnum) { + SetTypeId(node, TY_Class); + } + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + TreeNode *t = node->GetField(i); + (void) VisitClassField(t); + } + if (node->GetProp() == SProp_TSEnum) { + TypeId tid = TY_None; + unsigned tidx = 0; + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + TreeNode *t = node->GetField(i); + tid = MergeTypeId(tid, t->GetTypeId()); + tidx = MergeTypeIdx(tidx, t->GetTypeIdx()); + } + if (tid == TY_None) { + tid = TY_Int; + tidx = (unsigned)TY_Int; + } + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + TreeNode *t = node->GetField(i); + SetTypeId(t, tid); + SetTypeIdx(t, tidx); + } + TreeNode *id = node->GetStructId(); + if (id) { + SetTypeId(id, node->GetTypeId()); + SetTypeIdx(id, node->GetTypeIdx()); + } + } + (void) AstVisitor::VisitStructNode(node); + return node; +} + +LambdaNode *TypeInferVisitor::VisitLambdaNode(LambdaNode *node) { + if (mFlags & FLG_trace_1) std::cout << "Visiting LambdaNode, id=" << node->GetNodeId() << "..." << std::endl; + UpdateTypeId(node, TY_Function); + (void) AstVisitor::VisitLambdaNode(node); + return node; +} + +LiteralNode *TypeInferVisitor::VisitLiteralNode(LiteralNode *node) { + (void) AstVisitor::VisitLiteralNode(node); + LitId id = node->GetData().mType; + switch (id) { + case LT_IntegerLiteral: + SetTypeId(node, TY_Int); + SetTypeIdx(node, TY_Int); + break; + case LT_FPLiteral: + SetTypeId(node, TY_Float); + SetTypeIdx(node, TY_Float); + break; + case LT_DoubleLiteral: + SetTypeId(node, TY_Double); + SetTypeIdx(node, TY_Double); + break; + case LT_StringLiteral: + SetTypeId(node, TY_String); + SetTypeIdx(node, TY_String); + break; + case LT_VoidLiteral: + SetTypeId(node, TY_Undefined); + break; + default: + break; + } + return node; +} + +ReturnNode *TypeInferVisitor::VisitReturnNode(ReturnNode *node) { + (void) AstVisitor::VisitReturnNode(node); + TreeNode *res = node->GetResult(); + if (res) { + UpdateTypeId(node, res->GetTypeId()); + UpdateTypeIdx(node, res->GetTypeIdx()); + } + TreeNode *tn = mHandler->FindFunc(node); + if (tn) { + FunctionNode *func = static_cast(tn); + // use dummy PrimTypeNode as return type of function if not set to carry return TypeId + if (!func->GetRetType()) { + PrimTypeNode *type = mHandler->NewTreeNode(); + type->SetPrimType(TY_None); + func->SetRetType(type); + } + if (!func->IsGenerator() && !func->IsIterator()) { + UpdateFuncRetTypeId(func, node->GetTypeId(), node->GetTypeIdx()); + if (res) { + // use res to update function's return type + UpdateTypeUseNode(func->GetRetType(), res); + } + } + } + return node; +} + +StructLiteralNode *TypeInferVisitor::VisitStructLiteralNode(StructLiteralNode *node) { + SetTypeId(node, TY_Class); + for (unsigned i = 0; i < node->GetFieldsNum(); ++i) { + FieldLiteralNode *t = node->GetField(i); + (void) VisitFieldLiteralNode(t); + } + (void) AstVisitor::VisitStructLiteralNode(node); + return node; +} + +TemplateLiteralNode *TypeInferVisitor::VisitTemplateLiteralNode(TemplateLiteralNode *node) { + UpdateTypeId(node, TY_String); + (void) AstVisitor::VisitTemplateLiteralNode(node); + return node; +} + +TerOperatorNode *TypeInferVisitor::VisitTerOperatorNode(TerOperatorNode *node) { + TreeNode *ta = node->GetOpndA(); + TreeNode *tb = node->GetOpndB(); + TreeNode *tc = node->GetOpndC(); + (void) VisitTreeNode(ta); + (void) VisitTreeNode(tb); + (void) VisitTreeNode(tc); + UpdateTypeId(node, tb); + return node; +} + +TypeOfNode *TypeInferVisitor::VisitTypeOfNode(TypeOfNode *node) { + UpdateTypeId(node, TY_String); + (void) AstVisitor::VisitTypeOfNode(node); + return node; +} + +TypeAliasNode *TypeInferVisitor::VisitTypeAliasNode(TypeAliasNode *node) { + (void) AstVisitor::VisitTypeAliasNode(node); + UserTypeNode *id = node->GetId(); + TreeNode *alias = node->GetAlias(); + UpdateTypeId(id, alias); + return node; +} + +UnaOperatorNode *TypeInferVisitor::VisitUnaOperatorNode(UnaOperatorNode *node) { + (void) AstVisitor::VisitUnaOperatorNode(node); + OprId op = node->GetOprId(); + TreeNode *ta = node->GetOpnd(); + switch (op) { + case OPR_Plus: + case OPR_Minus: + UpdateTypeId(node, ta->GetTypeId()); + break; + case OPR_PreInc: + case OPR_Inc: + case OPR_PreDec: + case OPR_Dec: + UpdateTypeId(ta, TY_Int); + UpdateTypeId(node, TY_Int); + break; + case OPR_Bcomp: + UpdateTypeId(ta, TY_Int); + UpdateTypeId(node, TY_Int); + break; + case OPR_Not: + UpdateTypeId(ta, TY_Boolean); + UpdateTypeId(node, TY_Boolean); + break; + default: { + NOTYETIMPL("VisitUnaOperatorNode()"); + break; + } + } + return node; +} + +UserTypeNode *TypeInferVisitor::VisitUserTypeNode(UserTypeNode *node) { + (void) AstVisitor::VisitUserTypeNode(node); + if (node->GetDims()) { + SetTypeId(node, TY_Array); + SetTypeIdx(node, TY_Array); + } else if (node->GetId()) { + // non-enum user type which keep TY_None + if (node->GetId()->GetTypeId() != TY_None) { + SetTypeId(node, TY_Class); + } + UpdateTypeIdx(node, node->GetId()); + } + TreeNode *parent = node->GetParent(); + if (parent && parent->IsIdentifier()) { + // typeid: merge -> user + if (parent->IsTypeIdMerge()) { + SetTypeId(parent, TY_User); + SetUpdated(); + } else if (parent->IsTypeIdArray()) { + TreeNode *idnode = node->GetId(); + if (idnode && idnode->IsIdentifier()) { + // number, string could have been used + // as usertype identifier instand of primtype by parser + IdentifierNode *id = static_cast(idnode); + TypeId tid = TY_None; + unsigned stridx = id->GetStrIdx(); + if (stridx == gStringPool.GetStrIdx("number")) { + tid = TY_Number; + } else if (stridx == gStringPool.GetStrIdx("string")) { + tid = TY_String; + } + if (tid != TY_None) { + PrimArrayTypeNode *type = mHandler->NewTreeNode(); + PrimTypeNode *pt = mHandler->NewTreeNode(); + pt->SetPrimType(tid); + type->SetPrim(pt); + type->SetDims(node->GetDims()); + IdentifierNode *parentid = static_cast(parent); + parentid->SetType(type); + + // clean up parent info + id->SetParent(NULL); + node->SetParent(NULL); + + // set updated + SetUpdated(); + } + } + } + } + return node; +} + +UserTypeNode *ShareUTVisitor::VisitUserTypeNode(UserTypeNode *node) { + // skip it + return node; + + (void) AstVisitor::VisitUserTypeNode(node); + + // skip for array + if (node->GetDims()) { + return node; + } + + TreeNode *idnode = node->GetId(); + if (idnode && idnode->IsIdentifier()) { + IdentifierNode *id = static_cast(idnode); + ASTScope *scope = id->GetScope(); + TreeNode *type = scope->FindTypeOf(id->GetStrIdx()); + if (type && type != node && type->IsUserType()) { + UserTypeNode *ut = static_cast(type); + if (node->GetType() == ut->GetType()) { + // do not share if there are generics + if (ut->GetTypeGenericsNum() == 0) { + return ut; + } + } + } + } + return node; +} + +bool CheckTypeVisitor::IsCompatible(TypeId tid, TypeId target) { + if (tid == target) { + return true; + } + + bool result = false; + switch (target) { + case TY_Number: { + switch (tid) { + case TY_None: + case TY_Int: + case TY_Long: + case TY_Float: + case TY_Double: + result = true; + break; + default: + result = false; + break; + } + break; + } + case TY_Any: + // TY_Any or unspecified matches everything + result = true; + break; + default: + // ok if same typeid + result = (target == tid); + break; + } + + // tid being TY_None means untouched + result = result || (tid == TY_None); + + return result; +} + +IdentifierNode *CheckTypeVisitor::VisitIdentifierNode(IdentifierNode *node) { + (void) AstVisitor::VisitIdentifierNode(node); + + TreeNode *d = mHandler->FindDecl(node); + if (d && d->IsDecl()) { + DeclNode *decl = static_cast(d); + TreeNode *var = decl->GetVar(); + if (var && var != node && var->IsIdentifier()) { + IdentifierNode *id = static_cast(var); + bool result = false; + TreeNode *type = id->GetType(); + if (type) { + TypeId id = node->GetTypeId(); + TypeId target = TY_None; + switch (type->GetKind()) { + case NK_PrimType: { + PrimTypeNode *ptn = static_cast(type); + target = ptn->GetPrimType(); + break; + } + case NK_UserType: { + target = var->GetTypeId(); + break; + } + default: { + target = var->GetTypeId(); + break; + } + } + result = IsCompatible(id, target); + if (!result || (mFlags & FLG_trace_3)) { + std::cout << " Type Compatiblity : " << result << " : " + << AstDump::GetEnumTypeId(target) << " " + << AstDump::GetEnumTypeId(id) << std::endl; + } + } + } + } + return node; +} + +IdentifierNode *ChangeTypeIdxVisitor::VisitIdentifierNode(IdentifierNode *node) { + (void) AstVisitor::VisitIdentifierNode(node); + if (node->GetStrIdx() == mStrIdx) { + mHandler->GetUtil()->SetTypeIdx(node, mTypeIdx); + } + return node; +} + +} diff --git a/src/MapleFE/astopt/src/ast_util.cpp b/src/MapleFE/astopt/src/ast_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..84faa11a789b3d1396105073d4d9c3d55b59e194 --- /dev/null +++ b/src/MapleFE/astopt/src/ast_util.cpp @@ -0,0 +1,102 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "ast.h" +#include "ast_util.h" +#include "ast_handler.h" +#include "gen_astdump.h" + +namespace maplefe { + +#define CPPKEYWORD(K) stridx = gStringPool.GetStrIdx(#K); mCppKeywords.insert(stridx); +void AST_Util::BuildCppKeyWordSet() { + unsigned stridx; +#include "cpp_keywords.def" +} + +bool AST_Util::IsDirectField(TreeNode *node) { + return mHandler->IsDirectField(node); +} + +bool AST_Util::IsCppKeyWord(unsigned stridx) { + return mCppKeywords.find(stridx) != mCppKeywords.end(); +} + +bool AST_Util::IsCppKeyWord(std::string name) { + unsigned stridx = gStringPool.GetStrIdx(name); + return mCppKeywords.find(stridx) != mCppKeywords.end(); +} + +// +bool AST_Util::IsCppName(std::string name) { + // check first char [a-z][A-Z]_ + char c = name[0]; + if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_')) { + return false; + } + + // check char [a-z][A-Z][0-9]_ + for (int i = 1; i < name.length(); i++) { + char c = name[i]; + if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (c >= '0' && c <= '9') || c == '_')) { + return false; + } + } + + return true; +} + +bool AST_Util::IsCppField(TreeNode *node) { + // check if it is a direct field + if (!IsDirectField(node)) { + return false; + } + + // check if it's name is a C++ keyworld + unsigned stridx = node->GetStrIdx(); + if (IsCppKeyWord(stridx)) { + return false; + } + + // check if it's name is a valid C++ name + std::string name = gStringPool.GetStringFromStrIdx(stridx); + if (!IsCppName(name)) { + return false; + } + return true; +} + +void AST_Util::SetTypeId(TreeNode *node, TypeId tid) { + if (tid != TY_None && node && node->GetTypeId() != tid) { + if (mFlags & FLG_trace_3) { + std::cout << " NodeId : " << node->GetNodeId() << " Set TypeId : " + << AstDump::GetEnumTypeId(node->GetTypeId()) << " --> " + << AstDump::GetEnumTypeId(tid) << std::endl; + } + node->SetTypeId(tid); + } +} + +void AST_Util::SetTypeIdx(TreeNode *node, unsigned tidx) { + if (node && node->GetTypeIdx() != tidx) { + if (mFlags & FLG_trace_3) { + std::cout << " NodeId : " << node->GetNodeId() << " Set TypeIdx : " + << node->GetTypeIdx() << " --> " << tidx << std::endl; + } + node->SetTypeIdx(tidx); + } +} + +} diff --git a/src/MapleFE/astopt/src/ast_xxport.cpp b/src/MapleFE/astopt/src/ast_xxport.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3b828851ba70dbec2137c069b8d0b647e1f34ca0 --- /dev/null +++ b/src/MapleFE/astopt/src/ast_xxport.cpp @@ -0,0 +1,546 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include +#include "ast_handler.h" +#include "ast_util.h" +#include "astopt.h" +#include "ast_xxport.h" +#include "gen_astdump.h" + +namespace maplefe { + +AST_XXport::AST_XXport(AstOpt *o, unsigned f) { + mAstOpt = o; + mASTHandler = o->GetASTHandler(); + mFlags = f; +} + +unsigned AST_XXport::GetModuleNum() { + return mASTHandler->GetSize(); +} + +void AST_XXport::BuildModuleOrder() { + // setup module stridx + SetModuleStrIdx(); + + // collect dependent info + CollectXXportNodes(); + + // collect dependent info + AddHandler(); + + // sort handlers with dependency + SortHandler(); +} + +void AST_XXport::SetModuleStrIdx() { + for (int i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + + // setup module node stridx with module file name + unsigned stridx = gStringPool.GetStrIdx(module->GetFilename()); + module->SetStrIdx(stridx); + mStrIdx2HandlerIdxMap[stridx] = i; + } +} + +TreeNode *AST_XXport::FindExportedDecl(unsigned hidx, unsigned stridx) { + for (auto nid : mExportedDeclIds[hidx]) { + TreeNode *node = mAstOpt->GetNodeFromNodeId(nid); + if (node->GetStrIdx() == stridx) { + return node; + } + } + return NULL; +} + +void AST_XXport::CollectXXportNodes() { + for (int i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + + XXportBasicVisitor visitor(this, handler, i, mFlags); + visitor.Visit(module); + } +} + +void AST_XXport::AddHandler() { + for (unsigned hidx = 0; hidx < GetModuleNum(); hidx++) { + for (auto it : mImportNodeSets[hidx]) { + ImportNode *node = it; + UpdateDependency(hidx, node); + } + for (auto it : mExportNodeSets[hidx]) { + ExportNode *node = it; + UpdateDependency(hidx, node); + } + } +} + +void AST_XXport::SortHandler() { + for (int i = 0; i < GetModuleNum(); i++) { + if (mHandlersIdxInOrder.size() == 0) { + mHandlersIdxInOrder.push_back(i); + continue; + } + + bool added = false; + std::list::iterator it = mHandlersIdxInOrder.begin(); + for (; it != mHandlersIdxInOrder.end(); it++) { + unsigned idx = *it; + // check if handler idx has dependency on i + if (mHandlerIdx2DependentHandlerIdxMap[idx].find(i) != mHandlerIdx2DependentHandlerIdxMap[i].end()) { + mHandlersIdxInOrder.insert(it, i); + added = true; + break; + } + } + + if (!added) { + mHandlersIdxInOrder.push_back(i); + } + } + + // copy result to AstOpt + std::list::iterator it = mHandlersIdxInOrder.begin(); + for (; it != mHandlersIdxInOrder.end(); it++) { + unsigned idx = *it; + Module_Handler *h = mASTHandler->GetModuleHandler(idx); + mAstOpt->AddModuleHandler(h); + } + + if (mFlags & FLG_trace_2) { + std::cout << "============== Module Order ==============" << std::endl; + for (auto hidx: mHandlersIdxInOrder) { + Module_Handler *handler = mASTHandler->GetModuleHandler(hidx); + ModuleNode *module = handler->GetASTModule(); + std::cout << "module : " << gStringPool.GetStringFromStrIdx(module->GetStrIdx()) << std::endl; + for (auto nid: mExportedDeclIds[hidx]) { + TreeNode * node = mAstOpt->GetNodeFromNodeId(nid); + std::cout << " export : " << gStringPool.GetStringFromStrIdx(node->GetStrIdx()) << " " << nid << std::endl; + } + } + } +} + +unsigned AST_XXport::GetHandleIdxFromStrIdx(unsigned stridx) { + if (mStrIdx2HandlerIdxMap.find(stridx) != mStrIdx2HandlerIdxMap.end()) { + return mStrIdx2HandlerIdxMap[stridx]; + } + return FLG_no_imported ? 0 : DEFAULTVALUE; +} + +void AST_XXport::CollectXXportInfo(unsigned hidx) { + CollectImportInfo(hidx); + CollectExportInfo(hidx); +} + +// check if node is identifier with name "default" +static bool IsDefault(TreeNode *node) { + return node->GetStrIdx() == gStringPool.GetStrIdx("default"); +} + +void AST_XXport::CollectImportInfo(unsigned hidx) { + Module_Handler *handler = mASTHandler->GetModuleHandler(hidx); + ModuleNode *module = handler->GetASTModule(); + + for (auto it : mImportNodeSets[hidx]) { + ImportNode *node = it; + TreeNode *target = GetTarget(node); + unsigned stridx = (target && target->GetStrIdx()) ? target->GetStrIdx() : module->GetStrIdx(); + XXportInfo *info = new XXportInfo(stridx, node->GetNodeId());; + + unsigned targethidx = DEFAULTVALUE;; + Module_Handler *targethandler = NULL; + ModuleNode *targetmodule = NULL; + + if (target) { + targethidx = GetHandleIdxFromStrIdx(target->GetStrIdx()); + targethandler = mASTHandler->GetModuleHandler(targethidx); + targetmodule = targethandler->GetASTModule(); + } + + for (unsigned i = 0; i < node->GetPairsNum(); i++) { + XXportAsPairNode *p = node->GetPair(i); + TreeNode *bfnode = p->GetBefore(); + TreeNode *afnode = p->GetAfter(); + MASSERT(bfnode && "before node NULL for default"); + + // import * as MM from "./M"; + // bfnode represents a module + if (p->IsEverything()) { + info->SetEverything(); + + MASSERT(target && "everything export no target"); + SetIdStrIdx2ModuleStrIdx(bfnode->GetStrIdx(), target->GetStrIdx()); + + bfnode->SetTypeId(TY_Module); + bfnode->SetTypeIdx(targetmodule->GetTypeIdx()); + + std::pair pnid(bfnode->GetNodeId(), 0); + info->mNodeIdPairs.insert(pnid); + } else { + // reformat default import + if (!p->IsDefault()) { + if (IsDefault(bfnode) ) { + p->SetIsDefault(true); + p->SetBefore(afnode); + p->SetAfter(NULL); + } + } + + bfnode = p->GetBefore(); + afnode = p->GetAfter(); + if (p->IsDefault()) { + TreeNode *exported = GetExportedDefault(targetmodule->GetStrIdx()); + if (exported) { + std::pair pnid(exported->GetNodeId(), bfnode->GetNodeId()); + info->mNodeIdPairs.insert(pnid); + TypeId tid = exported->GetTypeId(); + bfnode->SetTypeId(tid); + unsigned tidx = exported->GetTypeIdx(); + bfnode->SetTypeIdx(tidx); + } else { + NOTYETIMPL("failed to find the exported - default"); + } + } else if (afnode) { + // import bfnode as afnode + TreeNode *exported = FindExportedDecl(targethidx, bfnode->GetStrIdx()); + if (!exported) { + NOTYETIMPL("need to extract exported - bfnode M.x"); + exported = bfnode; + } + std::pair pnid(exported->GetNodeId(), afnode->GetNodeId()); + info->mNodeIdPairs.insert(pnid); + TypeId tid = exported->GetTypeId(); + unsigned tidx = exported->GetTypeIdx(); + bfnode->SetTypeId(tid); + bfnode->SetTypeIdx(tidx); + afnode->SetTypeId(tid); + afnode->SetTypeIdx(tidx); + } else if (bfnode) { + // import bfnode + TreeNode *exported = FindExportedDecl(targethidx, bfnode->GetStrIdx()); + if (!exported) { + NOTYETIMPL("need to extract exported - bfnode M.x"); + exported = bfnode; + } + std::pair pnid(exported->GetNodeId(), bfnode->GetNodeId()); + info->mNodeIdPairs.insert(pnid); + TypeId tid = exported->GetTypeId(); + unsigned tidx = exported->GetTypeIdx(); + bfnode->SetTypeId(tid); + bfnode->SetTypeIdx(tidx); + } else { + NOTYETIMPL("failed to find the exported"); + } + } + // add afnode, bfnode as a decl + if (afnode) { + AddImportedDeclIds(handler->GetHidx(), afnode->GetNodeId()); + handler->AddNodeId2DeclMap(afnode->GetNodeId(), afnode); + } + AddImportedDeclIds(handler->GetHidx(), bfnode->GetNodeId()); + handler->AddNodeId2DeclMap(bfnode->GetNodeId(), bfnode); + } + + mImports[hidx].insert(info); + } +} + +TreeNode *AST_XXport::GetIdentifier(TreeNode *node) { + switch (node->GetKind()) { + case NK_Decl: { + DeclNode *decl = static_cast(node); + node = GetIdentifier(decl->GetVar()); + break; + } + case NK_Identifier: + break; + case NK_TypeAlias: { + TypeAliasNode *ta = static_cast(node); + node = GetIdentifier(ta->GetId()); + break; + } + case NK_UserType: { + UserTypeNode *ut = static_cast(node); + node = GetIdentifier(ut->GetId()); + break; + } + default: + NOTYETIMPL("need to extract identifier"); + break; + } + return node; +} + +void AST_XXport::CollectExportInfo(unsigned hidx) { + Module_Handler *handler = mASTHandler->GetModuleHandler(hidx); + ModuleNode *module = handler->GetASTModule(); + + for (auto it : mExportNodeSets[hidx]) { + ExportNode *node = it; + TreeNode *target = GetTarget(node); + unsigned stridx = (target && target->GetStrIdx()) ? target->GetStrIdx() : module->GetStrIdx(); + XXportInfo *info = new XXportInfo(stridx, node->GetNodeId());; + + unsigned targethidx = DEFAULTVALUE; + Module_Handler *targethandler = NULL; + ModuleNode *targetmodule = NULL; + + if (target) { + targethidx = GetHandleIdxFromStrIdx(target->GetStrIdx()); + targethandler = mASTHandler->GetModuleHandler(targethidx); + targetmodule = targethandler->GetASTModule(); + } + + for (unsigned i = 0; i < node->GetPairsNum(); i++) { + XXportAsPairNode *p = node->GetPair(i); + TreeNode *bfnode = p->GetBefore(); + TreeNode *afnode = p->GetAfter(); + + // export import a = M.a + if (bfnode && bfnode->IsImport()) { + ImportNode *imp = static_cast(bfnode); + for (unsigned j = 0; j < imp->GetPairsNum(); j++) { + XXportAsPairNode *q = imp->GetPair(i); + TreeNode *bf = q->GetBefore(); + TreeNode *af = q->GetAfter(); + std::pair pnid(af->GetNodeId(), bf->GetNodeId()); + info->mNodeIdPairs.insert(pnid); + } + continue; + } + + if (p->IsEverything()) { + info->SetEverything(); + if (bfnode) { + // export * as MM from "./M"; + // bfnode represents a module + MASSERT(target && "everything export no target"); + SetIdStrIdx2ModuleStrIdx(bfnode->GetStrIdx(), target->GetStrIdx()); + + bfnode->SetTypeId(TY_Module); + bfnode->SetTypeIdx(targetmodule->GetTypeIdx()); + AddExportedDeclIds(hidx, bfnode->GetNodeId()); + } else { + // export * from "./M" + for (auto k : mExportedDeclIds[targethidx]) { + AddExportedDeclIds(hidx, k); + } + continue; + } + } + + // reformat default export + if (p->IsDefault()) { + p->SetIsRef(false); + } else if (afnode && IsDefault(afnode)) { + p->SetIsDefault(true); + p->SetBefore(bfnode); + p->SetAfter(NULL); + } + + afnode = p->GetAfter(); + bfnode = p->GetBefore(); + if (!bfnode->IsIdentifier()) { + bfnode = GetIdentifier(bfnode); + } + + unsigned exportednid = (afnode ? afnode->GetNodeId(): bfnode->GetNodeId()); + if (p->IsDefault()) { + info->mDefaultNodeId = exportednid; + } else if (mExportNodeSets[hidx].size() == 1 && node->GetPairsNum() == 1) { + info->mDefaultNodeId = bfnode->GetNodeId(); + std::pair pnid(bfnode->GetNodeId(), exportednid); + info->mNodeIdPairs.insert(pnid); + } else { + std::pair pnid(bfnode->GetNodeId(), exportednid); + info->mNodeIdPairs.insert(pnid); + } + AddExportedDeclIds(hidx, exportednid); + } + + if (info->mDefaultNodeId || info->mNodeIdPairs.size()) { + mExports[hidx].insert(info); + } else { + delete info; + } + } +} + +TreeNode *AST_XXport::GetTarget(TreeNode *node) { + TreeNode *tree = NULL; + if (node->IsImport()) { + tree = static_cast(node)->GetTarget(); + } else if (node->IsExport()) { + tree = static_cast(node)->GetTarget(); + } + return tree; +} + +// borrowed from ast2cpp +std::string AST_XXport::GetTargetFilename(unsigned hidx, TreeNode *node) { + std::string filename; + if (node && node->IsLiteral()) { + LiteralNode *lit = static_cast(node); + LitData data = lit->GetData(); + filename = AstDump::GetEnumLitData(data); + filename += ".ts"s; + if(filename.front() != '/') { + Module_Handler *handler = mASTHandler->GetModuleHandler(hidx); + ModuleNode *module = handler->GetASTModule(); + std::filesystem::path p = module->GetFilename(); + try { + p = std::filesystem::canonical(p.parent_path() / filename); + filename = p.string(); + } + catch(std::filesystem::filesystem_error const& ex) { + // Ignore std::filesystem::filesystem_error exception + // keep filename without converting it to a cannonical path + } + } + } + return filename; +} + +// set up import/export node stridx with target module file name stridx +void AST_XXport::UpdateDependency(unsigned hidx, TreeNode *node) { + TreeNode *target = GetTarget(node); + if (target) { + std::string name = GetTargetFilename(hidx, target); + + // store name's string index in node + unsigned stridx = gStringPool.GetStrIdx(name); + node->SetStrIdx(stridx); + target->SetStrIdx(stridx); + + // update handler dependency map + unsigned dep = GetHandleIdxFromStrIdx(stridx); + mHandlerIdx2DependentHandlerIdxMap[hidx].insert(dep); + + if (node->IsImport()) { + Module_Handler *handler = mASTHandler->GetModuleHandler(dep); + ModuleNode *module = handler->GetASTModule(); + + node->SetTypeId(module->GetTypeId()); + node->SetTypeIdx(module->GetTypeIdx()); + } + } +} + +unsigned AST_XXport::ExtractTargetStrIdx(TreeNode *node) { + unsigned stridx = 0; + if (node->IsField()) { + FieldNode *fld = static_cast(node); + TreeNode *upper = fld->GetUpper(); + stridx = GetModuleStrIdxFromIdStrIdx(upper->GetStrIdx()); + if (stridx) { + unsigned hidx = GetHandleIdxFromStrIdx(stridx); + Module_Handler *handler = mASTHandler->GetModuleHandler(hidx); + ModuleNode *module = handler->GetASTModule(); + upper->SetTypeId(TY_Module); + upper->SetTypeIdx(module->GetTypeIdx()); + } + } + return stridx; +} + +// hstridx is the string index of handler/module name with full path +TreeNode *AST_XXport::GetExportedDefault(unsigned hstridx) { + if (mStrIdx2HandlerIdxMap.find(hstridx) != mStrIdx2HandlerIdxMap.end()) { + unsigned hidx = GetHandleIdxFromStrIdx(hstridx); + for (auto it : mExports[hidx]) { + if (it->mDefaultNodeId) { + return mAstOpt->GetNodeFromNodeId(it->mDefaultNodeId); + } + } + } + return NULL; +} + +// hidx is the index of handler, string is the string index of identifier +TreeNode *AST_XXport::GetExportedNamedNode(unsigned hidx, unsigned stridx) { + for (auto it : mExports[hidx]) { + for (auto it1 : it->mNodeIdPairs) { + unsigned nid = it1.first; + TreeNode *node = mAstOpt->GetNodeFromNodeId(nid); + if (node->GetStrIdx() == stridx ) { + return node; + } + } + } + return NULL; +} + +// hidx is the index of handler, string is the string index of identifier +TreeNode *AST_XXport::GetExportedNodeFromImportedNode(unsigned hidx, unsigned nid) { + TreeNode *node = mAstOpt->GetNodeFromNodeId(nid); + + for (auto it : mImports[hidx]) { + if (it->mDefaultNodeId == nid) { + node = GetExportedDefault(it->mModuleStrIdx); + return node; + } + for (auto it1 : it->mNodeIdPairs) { + unsigned nid2 = it1.second; + if (nid2 == nid) { + unsigned nid1 = it1.first; + node = mAstOpt->GetNodeFromNodeId(nid1); + return node; + } + } + } + return node; +} + +ImportNode *XXportBasicVisitor::VisitImportNode(ImportNode *node) { + (void) AstVisitor::VisitImportNode(node); + mASTXXport->mImportNodeSets[mHandlerIdx].push_back(node); + + TreeNode *target = mASTXXport->GetTarget(node); + if (!target) { + // extract target info for + // import Bar = require("./Foo"); + for (unsigned i = 0; i < node->GetPairsNum(); i++) { + XXportAsPairNode *p = node->GetPair(i); + TreeNode *bfnode = p->GetBefore(); + if (bfnode && bfnode->IsLiteral()) { + LiteralNode *lit = static_cast(bfnode); + LitId id = lit->GetData().mType; + if (id == LT_StringLiteral) { + node->SetTarget(bfnode); + p->SetBefore(p->GetAfter()); + p->SetAfter(NULL); + p->SetIsDefault(true); + } + } + } + } + return node; +} + +ExportNode *XXportBasicVisitor::VisitExportNode(ExportNode *node) { + (void) AstVisitor::VisitExportNode(node); + mASTXXport->mExportNodeSets[mHandlerIdx].push_back(node); + return node; +} + +} diff --git a/src/MapleFE/astopt/src/astopt.cpp b/src/MapleFE/astopt/src/astopt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bd2ab47d290cfe4fc2577c821046c192246c9ba6 --- /dev/null +++ b/src/MapleFE/astopt/src/astopt.cpp @@ -0,0 +1,119 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "astopt.h" +#include "typetable.h" +#include "ast_handler.h" +#include "ast_xxport.h" +#include "gen_astgraph.h" +#include "gen_aststore.h" +#include "gen_astload.h" + +namespace maplefe { + +class ImportedFiles; + +AstOpt::AstOpt(AST_Handler *h, unsigned f) { + mASTHandler = h; + h->SetAstOpt(this); + mASTXXport = new AST_XXport(this, f); + mFlags = f; +} + +unsigned AstOpt::GetModuleNum() { + return mASTHandler->GetSize(); +} + +// starting point of AST +void AstOpt::ProcessAST(unsigned flags) { + // loop through module handlers + for (int i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + + mFlags = flags; + if (mFlags & FLG_trace_3) { + std::cout << "============= in ProcessAST ===========" << std::endl; + std::cout << "srcLang : " << module->GetSrcLangString() << std::endl; + + for(unsigned i = 0; i < module->GetTreesNum(); i++) { + TreeNode *tnode = module->GetTree(i); + tnode->Dump(0); + std::cout << std::endl; + } + } + } + + // build dependency of modules + PreprocessModules(); + + for (auto handler: mHandlersInOrder) { + // basic analysis + handler->BasicAnalysis(); + + // build CFG + handler->BuildCFG(); + + // control flow analysis + handler->ControlFlowAnalysis(); + + // type inference + handler->TypeInference(); + + // data flow analysis + handler->DataFlowAnalysis(); + } + + for (auto handler: mHandlersInOrder) { + ModuleNode *module = handler->GetASTModule(); + + AstStore saveAst(module); + saveAst.StoreInAstBuf(); + } + + return; +} + +void AstOpt::PreprocessModules() { + // initialize gTypeTable with builtin types + gTypeTable.AddPrimAndBuiltinTypes(); + + // collect language keywords +#undef LANGKEYWORD +#define LANGKEYWORD(K) mLangKeywords.insert(gStringPool.GetStrIdx(#K)); +#include "lang_keywords.def" + + // scan through modules to setup mNodeId2NodeMap + BuildNodeIdToNodeVisitor visitor(this, mFlags); + + for (int i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + // fill handler index + handler->SetHidx(i); + + ModuleNode *module = handler->GetASTModule(); + // add module as type for import/export purpose + module->SetTypeId(TY_Module); + gTypeTable.AddType(module); + + visitor.SetHandler(handler); + visitor.Visit(module); + } + + // list modules according to dependency + mASTXXport->BuildModuleOrder(); +} + +} diff --git a/src/MapleFE/java/alt_tokens.spec b/src/MapleFE/autogen/alt_tokens.spec similarity index 95% rename from src/MapleFE/java/alt_tokens.spec rename to src/MapleFE/autogen/alt_tokens.spec index 831eb7a829637f416ba53a862d6cf60a54dd5bc9..26dc069662815249b01beb67ee04558a1d88bc48 100644 --- a/src/MapleFE/java/alt_tokens.spec +++ b/src/MapleFE/autogen/alt_tokens.spec @@ -17,6 +17,8 @@ // This file will be included in token_gen.cpp. // This file defines the alternative tokens of some special tokens. Please refer // shared/include/token.h for the definition of alternative tokens. +// +// Right now this is language independent {">>", 2, ">"}, {">>>", 3, ">"} diff --git a/src/MapleFE/autogen/include/auto_gen.h b/src/MapleFE/autogen/include/auto_gen.h index fc5269ec5ad24f74ddb66f6fcfb1db24ea02dd50..40da81fb2e91567b32fc3cf14c1aa1015eec459c 100644 --- a/src/MapleFE/autogen/include/auto_gen.h +++ b/src/MapleFE/autogen/include/auto_gen.h @@ -25,10 +25,8 @@ #include "iden_gen.h" #include "literal_gen.h" #include "type_gen.h" -#include "block_gen.h" #include "separator_gen.h" #include "operator_gen.h" -#include "expr_gen.h" #include "stmt_gen.h" #include "keyword_gen.h" #include "attr_gen.h" @@ -43,21 +41,23 @@ private: LiteralGen *mLitGen; TypeGen *mTypeGen; AttrGen *mAttrGen; - BlockGen *mBlockGen; SeparatorGen *mSeparatorGen; OperatorGen *mOperatorGen; KeywordGen *mKeywordGen; - ExprGen *mExprGen; StmtGen *mStmtGen; TokenGen *mTokenGen; std::vector mGenArray; SPECParser *mParser; + std::string mLang; // the language(i.e. directory) + public: AutoGen(SPECParser *p) : mParser(p) {} ~AutoGen(); + void SetLang(std::string s) {mLang = s;} + void Init(); void Run(); void BackPatch(); diff --git a/src/MapleFE/autogen/include/base_gen.h b/src/MapleFE/autogen/include/base_gen.h index 07c08121cf0da65352cb3aa6dac3d1a659c1e37f..fa8a76229fb18f00d118a955cc1836c72a657ad4 100644 --- a/src/MapleFE/autogen/include/base_gen.h +++ b/src/MapleFE/autogen/include/base_gen.h @@ -50,7 +50,7 @@ class SPECParser;; // the current type of element being read. This helps to read ')' and '+'. // // Possibly new syntax in the future also need this help. -typedef enum { +typedef enum ST_status { ST_Set, // in ( , , ) ST_Concatenate, // in E + E + E ST_Null @@ -67,7 +67,7 @@ public: FormattedBuffer mRuleTableCpp; FormattedBuffer mRuleTableHeader; - std::string mSpecFile; + std::string mSpecFile; FileWriter mHeaderFile; FileWriter mCppFile; StringPool *mStringPool; diff --git a/src/MapleFE/autogen/include/base_struct.h b/src/MapleFE/autogen/include/base_struct.h index 957810021afa54fdd7b561281947651e4cb38644..63e49b4444d3247ce8bcc7087a63338aae98ed74 100644 --- a/src/MapleFE/autogen/include/base_struct.h +++ b/src/MapleFE/autogen/include/base_struct.h @@ -37,7 +37,7 @@ class StringPool; // 2-tuple or 3 tuple. The data in each StructElem could be number, string, // string literal (those quoted with "). // -// Each type of .spec has its own definition of STRUCT and so its data in +// Each type of .spec has its own definition of STRUCT and so its data in // StructElem. So the parsing of a StructElem is left to each .spec parser. // But we do provide some common functions of parsing certain StructElem. // @@ -114,7 +114,7 @@ public: void SetName(const char *s) { mName = s; } bool Empty() {return mStructElems.size() == 0;} void Dump(); - void Sort(unsigned i); // sort by the length of i-th element which is astring + void Sort(unsigned i); // sort by the length of i-th element which is astring }; } diff --git a/src/MapleFE/autogen/include/buffer2write.h b/src/MapleFE/autogen/include/buffer2write.h index c954eda99ae061ed0b48b555877845cf42d23ff2..dcf0cf2b16686159477cabc6b9abbffb2484c32f 100644 --- a/src/MapleFE/autogen/include/buffer2write.h +++ b/src/MapleFE/autogen/include/buffer2write.h @@ -53,12 +53,12 @@ class BaseGen; // default construction of buffers. // ////////////////////////////////////////////////////////////////////////// -typedef enum { +typedef enum WriteStatus { WR_GOOD, WR_OVERSIZE }WriteStatus; -typedef enum { +typedef enum SimpleBufferType { SB_Line, SB_Rect }SimpleBufferType; @@ -75,7 +75,7 @@ public: public: SimpleBuffer(unsigned ind) : mIndentation(ind) {} - ~SimpleBuffer() {} + virtual ~SimpleBuffer() = default; virtual WriteStatus AddChar(const char) = 0; virtual WriteStatus AddString(const char*) = 0; @@ -86,7 +86,7 @@ public: virtual bool CurrentLineEmpty() = 0; }; - + // A single line buffer, over MAX_LINE_LIMIT. Theoritically the LineBuffer // can extend to unlimited length. class LineBuffer : public SimpleBuffer { @@ -127,7 +127,7 @@ class RectBuffer : public SimpleBuffer { public: unsigned mUsed; // a LINES_PER_BLOCK bitmap telling which lines are used. char mData[MAX_LINE_LIMIT * LINES_PER_BLOCK]; - char *mCurrLine; + char *mCurrLine; unsigned mCurrLineSize; // how many char-s in the line public: @@ -161,7 +161,7 @@ public: union { SimpleBuffer *mSimple; FormattedBuffer *mFormatted; - }mData; + }mData; bool mIsSimple; public: @@ -194,10 +194,10 @@ public: ///////////////////////////////////////////////////////////////////////////// // [FormattedBuffer] // -// A formatted buffer is composed of a set of SimpleBuffer or nested +// A formatted buffer is composed of a set of SimpleBuffer or nested // formatted buffer. Its goal is to provide interface for buffer users. // The inside details of the OneBuffer is protected. -// +// // The nested FormattedBuffer is never modified by the parent FormattedBuffer. // So the children should be fixed before added into the parent. @@ -216,7 +216,7 @@ public: FormattedBuffer(unsigned ind = 0, bool iscomment = false); ~FormattedBuffer(); -public: +public: void AddNestedBuffer(FormattedBuffer *); char* NewLine(); @@ -312,7 +312,7 @@ public: // FunctionBuffer // // A function has three buffers. // // 1) Declaration in .h file // -// 2) Header in .cpp file // +// 2) Header in .cpp file // // 3) Body in .cpp file // // Here is an example: // // void foo (int a) <-- Header // @@ -332,7 +332,7 @@ public: void AddReturnType(const char*); void AddFunctionName(const char*); - void AddParameter(const char* type, const char *name, bool end = false); + void AddParameter(const char* type, const char *name, bool end = false); ScopedBuffer* GetBody() { return &mBody; } }; diff --git a/src/MapleFE/autogen/include/exprbuffer.h b/src/MapleFE/autogen/include/exprbuffer.h index 6fd37327fac5701ac0c9efe4869d7e84a983b036..da0b7133769f3e62486563c43c6bb10a0a612f32 100644 --- a/src/MapleFE/autogen/include/exprbuffer.h +++ b/src/MapleFE/autogen/include/exprbuffer.h @@ -99,8 +99,8 @@ public: // the following functions are used for un-determined number of children. void AllocChildren(unsigned i, ExprBuffer *buf); ExprNode* GetChild(unsigned index); -}; - +}; + class ExprBuffer { private: char *mData; @@ -126,7 +126,7 @@ public: public: ExprBuffer(); - ~ExprBuffer(); + ~ExprBuffer(); // write the expr to the buffer, mData, in text format void Write2Buffer(); diff --git a/src/MapleFE/autogen/include/operator_gen.h b/src/MapleFE/autogen/include/operator_gen.h index 3d3e902359db79540ad00c1c62e172812deb3811..943106cfa475ab754641a0bb972aea16611fa0ec 100644 --- a/src/MapleFE/autogen/include/operator_gen.h +++ b/src/MapleFE/autogen/include/operator_gen.h @@ -14,7 +14,7 @@ */ //////////////////////////////////////////////////////////////////////// // Operator Generation -// The output of this Operator Generation is a table in gen_operator.cpp +// The output of this Operator Generation is a table in gen_operator.cpp // // OprTableEntry OprTable[OPR_Null] = { // {"xxx", OPR_Xxx}, @@ -37,7 +37,7 @@ namespace maplefe { // For each operator, it has three parts involved in the generation. // 1. OprId: Used inside autogen, connection between LANGUAGE and // shared/supported.h files -// 2. Name: Name of OPR ID, to be generated in gen_operator.cpp +// 2. Name: Name of OPR ID, to be generated in gen_operator.cpp // 4. Text: LANGUAGE syntax text, to be in gen_operator.cpp // The SUPPORTED operator and their name. diff --git a/src/MapleFE/autogen/include/rule.h b/src/MapleFE/autogen/include/rule.h index 607d8c9369aa116bbc5ec45d62cb08b72962ea73..5c23c5ee007c4da4f34ee3fee521d30941e9ecf7 100644 --- a/src/MapleFE/autogen/include/rule.h +++ b/src/MapleFE/autogen/include/rule.h @@ -30,15 +30,16 @@ namespace maplefe { class Rule; class StructData; -typedef enum { +typedef enum RuleOp { RO_Oneof, // one of (...) RO_Zeroormore, // zero or more of (...) RO_Zeroorone, // zero or one ( ... ) RO_Concatenate,// Elem + Elem + Elem + RO_ASI, // Typescript/Javascript semicolon checking RO_Null } RuleOp; -typedef enum { +typedef enum ElemType { ET_Char, // It's a literal elements, char, 'c'. ET_String, // It's a literal elements, string "abc". ET_Rule, // It's rule diff --git a/src/MapleFE/autogen/include/rule_gen.h b/src/MapleFE/autogen/include/rule_gen.h index da0cd43cdb485f470b88582dce370eaa3609709b..c68109a69bfcc881aee807046389d0a426b5310c 100644 --- a/src/MapleFE/autogen/include/rule_gen.h +++ b/src/MapleFE/autogen/include/rule_gen.h @@ -66,7 +66,7 @@ public: void PatchTokenOnElem(RuleElem*); void PatchToken(); - void Generate(); + void Generate(); }; } diff --git a/src/MapleFE/autogen/include/separator_gen.h b/src/MapleFE/autogen/include/separator_gen.h index 31597f55668e43e87462935c64fd624223dde7bc..e5b4e3b77a525fe97550e3ce02cafbbf3e3907d3 100644 --- a/src/MapleFE/autogen/include/separator_gen.h +++ b/src/MapleFE/autogen/include/separator_gen.h @@ -14,7 +14,7 @@ */ //////////////////////////////////////////////////////////////////////// // Separator Generation -// The output of this Separator Generation is a table in gen_separator.cpp +// The output of this Separator Generation is a table in gen_separator.cpp // // SepTableEntry SepTable[SEP_Null] = { // {"xxx", SEP_Xxx}, @@ -45,7 +45,7 @@ namespace maplefe { // For each separator, it has three parts involved in the generation. // 1. SepId: Used inside autogen, connection between LANGUAGE and SUPPORTED // .spec files -// 2. Name: Name of SEP_ID, to be generated in gen_separator.cpp +// 2. Name: Name of SEP_ID, to be generated in gen_separator.cpp // 4. Keyword: LANGUAGE syntax text, to be in gen_separator.cpp // The SUPPORTED separator and their name. diff --git a/src/MapleFE/autogen/include/spec_keywords.h b/src/MapleFE/autogen/include/spec_keywords.h index 9ede40d1238ab3899900a16802a5a9fa522a5d27..8d944cf257d7e764a79f19eb0b6d0fec9d9dc634 100644 --- a/src/MapleFE/autogen/include/spec_keywords.h +++ b/src/MapleFE/autogen/include/spec_keywords.h @@ -16,6 +16,7 @@ KEYWORD(rule, Rule) KEYWORD(ONEOF, Oneof) KEYWORD(ZEROORONE, Zeroorone) KEYWORD(ZEROORMORE, Zeroormore) +KEYWORD(ASI, ASI) KEYWORD(STRUCT, Struct) KEYWORD(func, Func) KEYWORD(attr, Attr) diff --git a/src/MapleFE/autogen/include/spec_lexer.h b/src/MapleFE/autogen/include/spec_lexer.h index cd5d2fbdefedb9f6e5df976546704304c47ffa67..db32e526eb897155b06cb3d7c317a39612aa5daf 100644 --- a/src/MapleFE/autogen/include/spec_lexer.h +++ b/src/MapleFE/autogen/include/spec_lexer.h @@ -107,7 +107,7 @@ class SPECLexer { return _thekind; } - char *GetLine() const { return line; } + char *GetLine() const { return line; } int GetLineNum() const { return _linenum; } int GetCuridx() const { return curidx; } const std::string &GetTheName() const { return thename; } diff --git a/src/MapleFE/autogen/include/spec_parser.h b/src/MapleFE/autogen/include/spec_parser.h index cc2ac3ebc668874d1205f9f135f5bee4aacb6ad7..af1754b1311d87c0dea4b954adb8f1d094b9b1dc 100644 --- a/src/MapleFE/autogen/include/spec_parser.h +++ b/src/MapleFE/autogen/include/spec_parser.h @@ -69,7 +69,7 @@ public: bool ParseStruct(); bool ParseStructElements(); bool ParseElemData(StructElem *elem); - + bool ParseType(); bool ParseAttr(); diff --git a/src/MapleFE/autogen/include/spec_tokens.h b/src/MapleFE/autogen/include/spec_tokens.h index 666e52677eca583ecd50132776a6bfbe24893f23..8d8cb19cfbf3ceab0653be4a6de226e2bcf47e6d 100644 --- a/src/MapleFE/autogen/include/spec_tokens.h +++ b/src/MapleFE/autogen/include/spec_tokens.h @@ -14,7 +14,7 @@ */ #ifndef SPERC_TOKENS_H #define SPERC_TOKENS_H -typedef enum { +typedef enum SPECTokenKind { SPECTK_Invalid, // keywords #define KEYWORD(S,T) SPECTK_##T, diff --git a/src/MapleFE/autogen/reserved.spec b/src/MapleFE/autogen/reserved.spec index b3bb95f7aaa8715527a8f3e76c4d3b8141018e5d..cd258684dc2ad9b79fe06683a1d021930fe3fa98 100644 --- a/src/MapleFE/autogen/reserved.spec +++ b/src/MapleFE/autogen/reserved.spec @@ -22,19 +22,21 @@ rule CHAR : ONEOF('a','b','c','d','e','f','g','h','i','j','k','l','m','n','o',' # DIGIT refers to the 10 digits rule DIGIT : ONEOF('0', '1', '2', '3', '4', '5', '6', '7', '8', '9') -# The ASCII character exclude ", ', and \ -# -# [NOTE] Becareful of ' over here. It's duplicated in ESCAPE as '\' + '''. There is a reason. -# When the lexer read a string "doesn't", which is wrong since Java request ' be escaped but -# many code does NOT escape, the string in memory is "doesn't" too. The system Reading function -# which is in C doesn't escape '. So I duplicate here to catch this case. -# -# Please see test case java2mpl/literal-string-2.java for example. -# -rule ASCII : ONEOF(' ', '!', '#', '$', '%', ''', '&', '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', ']', '^', '_', '`', '{', '|', '}', '~', CHAR, DIGIT) +# The ASCII character exclude ", ', \, and \n +rule ASCII : ONEOF(' ', '!', '#', '$', '%', '&', '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?', '@', '[', ']', '^', '_', '`', '{', '|', '}', '~', CHAR, DIGIT) # About the handling of escape character in autogen, xx_gen.cpp/h, and stringutil.cpp # please refer to the comments in StringToValue::StringToString() in stringutil.cpp rule ESCAPE : ONEOF('\' + 'b', '\' + 't', '\' + 'n', '\' + 'f', '\' + 'r', '\' + '"', '\' + ''', '\' + '\') rule HEXDIGIT : ONEOF(DIGIT, 'a', 'b', 'c', 'd', 'e', 'f', 'A', 'B', 'C', 'D', 'E', 'F') + +# irregular char like \n, \, DEL, etc. will be handled in lexer.cpp if some language allows them in string literal. +rule IRREGULAR_CHAR : "this_is_for_fake_rule" + +# Below are special rules handled in lexer.cpp. Since it'll be in lexer code, it means +# it's a shared rule of all languages. It has to be in reserved.spec. +rule UTF8 : "this_is_for_fake_rule" +rule TemplateLiteral : "this_is_for_fake_rule" +rule RegularExpression : "this_is_for_fake_rule" +rule NoLineTerminator : "this_is_for_fake_rule" diff --git a/src/MapleFE/autogen/src/Makefile b/src/MapleFE/autogen/src/Makefile index 86927f0b36c18b1eb363aeebd544dea2587d4499..7297ffdb8d3decb0a97a0bb9d936a02390535a3d 100644 --- a/src/MapleFE/autogen/src/Makefile +++ b/src/MapleFE/autogen/src/Makefile @@ -1,8 +1,22 @@ +# Copyright (C) [2020-2021] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + include ../../Makefile.in # create build first BUILD=$(BUILDDIR)/autogen -$(shell $(MKDIR_P) $(BUILD)) +$(shell $(MKDIR_P) $(BUILD) $(BUILDDIR)/$(SRCLANG) $(BUILDDIR)/gen) SHAREDSRC := token.cpp mempool.cpp stringmap.cpp stringpool.cpp write2file.cpp SRC := $(wildcard *.cpp) $(SHAREDSRC) @@ -14,33 +28,24 @@ LIBOBJS :=$(patsubst $(BUILD)/main.o,,$(OBJS)) DEPS := $(foreach dep, $(DEP), $(BUILD)/$(dep)) INCLUDES := -I $(MAPLEFE_ROOT)/autogen/include \ - -I $(MAPLEFE_ROOT)/shared/include \ - -I $(MAPLEFE_ROOT)/java/include \ - -I $(MAPLEFE_ROOT)/java + -I $(MAPLEFE_ROOT)/autogen \ + -I $(MAPLEFE_ROOT)/shared/include + +SPECS := $(wildcard $(MAPLEFE_ROOT)/$(SRCLANG)/*.spec) TARGET = autogen AUTOGENLIB = autogen.a .PHONY: all +all: $(BUILD)/$(TARGET) -all: $(TARGET) - -$(TARGET) : $(AUTOGENLIB) +$(BUILD)/$(TARGET) : $(BUILD)/$(AUTOGENLIB) $(LD) -o $(BUILD)/$(TARGET) $(BUILD)/main.o $(BUILD)/$(AUTOGENLIB) - @ln -sf $(MAPLEFE_ROOT)/java/identifier.spec $(BUILD)/identifier.spec - @ln -sf $(MAPLEFE_ROOT)/java/literal.spec $(BUILD)/literal.spec - @ln -sf $(MAPLEFE_ROOT)/java/operator.spec $(BUILD)/operator.spec - @ln -sf $(MAPLEFE_ROOT)/java/separator.spec $(BUILD)/separator.spec - @ln -sf $(MAPLEFE_ROOT)/java/keyword.spec $(BUILD)/keyword.spec - @ln -sf $(MAPLEFE_ROOT)/java/type.spec $(BUILD)/type.spec - @ln -sf $(MAPLEFE_ROOT)/java/attr.spec $(BUILD)/attr.spec - @ln -sf $(MAPLEFE_ROOT)/java/block.spec $(BUILD)/block.spec - @ln -sf $(MAPLEFE_ROOT)/java/expr.spec $(BUILD)/expr.spec - @ln -sf $(MAPLEFE_ROOT)/java/stmt.spec $(BUILD)/stmt.spec - @ln -sf $(MAPLEFE_ROOT)/autogen/reserved.spec $(BUILD)/reserved.spec - -$(AUTOGENLIB) : $(OBJS) + (cd $(BUILD); ./$(TARGET) $(SRCLANG)) + (cd $(MAPLEFE_ROOT); ./scripts/maplefe-autogen.py) + +$(BUILD)/$(AUTOGENLIB) : $(OBJS) /usr/bin/ar rcs $(BUILD)/$(AUTOGENLIB) $(LIBOBJS) -include $(DEPS) @@ -50,24 +55,15 @@ vpath %.cpp $(MAPLEFE_ROOT)/shared/src vpath %.o $(BUILD) vpath %.d $(BUILD) -ifeq ($(LANG), java) -CXXFLAGS := $(CXXFLAGS) -I ../java/ -endif - #Pattern Rules $(BUILD)/%.o : %.cpp $(BUILD)/%.d $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ -$(BUILD)/%.d : %.cpp +$(BUILD)/%.d : %.cpp $(SPECS) @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $< > $@ @mv -f $(BUILD)/$*.d $(BUILD)/$*.d.tmp @sed -e 's|.*:|$(BUILD)/$*.o:|' < $(BUILD)/$*.d.tmp > $(BUILD)/$*.d @rm -f $(BUILD)/$*.d.tmp - -#.cpp.o: -# $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $*.cpp -o $(BUILD)/$*.o -# $(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $*.cpp > $(BUILD)/$*.d - clean: rm -rf $(BUILD) diff --git a/src/MapleFE/autogen/src/all_supported.cpp b/src/MapleFE/autogen/src/all_supported.cpp index 66ebbdbf2f11dd1d7544189c7a7143964eb40cb1..5ef17f9564e15a043cb16ddd60c1b3567d6cfcf3 100644 --- a/src/MapleFE/autogen/src/all_supported.cpp +++ b/src/MapleFE/autogen/src/all_supported.cpp @@ -22,7 +22,9 @@ namespace maplefe { ////////////////////////////////////////////////////////////////////// #undef TYPE +#undef PRIMTYPE #define TYPE(T) {#T, TY_##T}, +#define PRIMTYPE(T) {#T, TY_##T}, TypeMapping TypesSupported[TY_NA] = { #include "supported_types.def" }; @@ -36,7 +38,9 @@ TypeId FindTypeIdLangIndep(const std::string &s) { } #undef TYPE +#undef PRIMTYPE #define TYPE(T) case TY_##T: return #T; +#define PRIMTYPE(T) case TY_##T: return #T; char *GetTypeString(TypeId tid) { switch (tid) { #include "supported_types.def" @@ -83,7 +87,7 @@ LiteralSuppStruct LiteralsSupported[LT_NA] = { #include "supported_literals.def" }; -// s : the name +// s : the name // return the LitId LitId FindLiteralId(const std::string &s) { for (unsigned u = 0; u < LT_NA; u++) { diff --git a/src/MapleFE/autogen/src/attr_gen.cpp b/src/MapleFE/autogen/src/attr_gen.cpp index a2c2cc963f35afa55e0aecac3f5d0c013710fdef..a311fddab876c1337d348ad42880acdfcb2b9b1e 100644 --- a/src/MapleFE/autogen/src/attr_gen.cpp +++ b/src/MapleFE/autogen/src/attr_gen.cpp @@ -66,29 +66,28 @@ const std::string AttrGen::EnumNextElem(){ ///////////////////////////////////////////////////////////////////// void AttrGen::Generate() { - GenHeaderFile(); GenCppFile(); } -void AttrGen::GenHeaderFile() { - mHeaderFile.WriteOneLine("#ifndef __ATTR_GEN_H__", 22); - mHeaderFile.WriteOneLine("#define __ATTR_GEN_H__", 22); - mHeaderFile.WriteOneLine("namespace maplefe {", 19); - mHeaderFile.WriteOneLine("extern AttrKeyword AttrKeywordTable[ATTR_NA];", 45); - mHeaderFile.WriteOneLine("}", 1); - mHeaderFile.WriteOneLine("#endif", 6); -} - void AttrGen::GenCppFile() { mCppFile.WriteOneLine("#include \"common_header_autogen.h\"", 34); mCppFile.WriteOneLine("namespace maplefe {", 19); TableBuffer tb; - tb.Generate(this, "AttrKeyword AttrKeywordTable[ATTR_NA] = {"); + std::string s = "AttrKeyword AttrKeywordTable["; + std::string num = std::to_string(mAttrs.size()); + s += num; + s += "] = {"; + tb.Generate(this, s); mCppFile.WriteFormattedBuffer(&tb); mCppFile.WriteOneLine("};", 2); + + // generate the table size + s = "unsigned AttrKeywordTableSize = "; + s += num; + s += ";"; + mCppFile.WriteOneLine(s.c_str(), s.size()); + mCppFile.WriteOneLine("}", 1); } } - - diff --git a/src/MapleFE/autogen/src/auto_gen.cpp b/src/MapleFE/autogen/src/auto_gen.cpp index e53abeb62264e6f43588277634b417167e3b8661..d2baba06127bf93fbbfc9cd9d1a60b500a4a6542 100644 --- a/src/MapleFE/autogen/src/auto_gen.cpp +++ b/src/MapleFE/autogen/src/auto_gen.cpp @@ -62,52 +62,9 @@ FileWriter *gSummaryCppFile; unsigned gRuleTableNum; std::vector gTopRules; -static void WriteSummaryHFile() { - gSummaryHFile->WriteOneLine("#ifndef __DEBUG_GEN_H__", 23); - gSummaryHFile->WriteOneLine("#define __DEBUG_GEN_H__", 23); - gSummaryHFile->WriteOneLine("#include \"ruletable.h\"", 22); - gSummaryHFile->WriteOneLine("#include \"succ_match.h\"", 23); - gSummaryHFile->WriteOneLine("#include ", 17); - gSummaryHFile->WriteOneLine("namespace maplefe {", 19); - gSummaryHFile->WriteOneLine("typedef struct {", 16); - gSummaryHFile->WriteOneLine(" const RuleTable *mAddr;", 25); - gSummaryHFile->WriteOneLine(" const char *mName;", 25); - gSummaryHFile->WriteOneLine(" unsigned mIndex;", 26); - gSummaryHFile->WriteOneLine("}RuleTableSummary;", 18); - gSummaryHFile->WriteOneLine("extern RuleTableSummary gRuleTableSummarys[];", 45); - gSummaryHFile->WriteOneLine("extern unsigned RuleTableNum;", 29); - gSummaryHFile->WriteOneLine("extern const char* GetRuleTableName(const RuleTable*);", 54); - - std::string s = "extern std::vector gFailed["; - s += std::to_string(gRuleTableNum); - s += "];"; - gSummaryHFile->WriteOneLine(s.c_str(), s.size()); - - // Write SuccMatch array - s = "class SuccMatch;"; - gSummaryHFile->WriteOneLine(s.c_str(), s.size()); - - s = "extern SuccMatch gSucc["; - s += std::to_string(gRuleTableNum); - s += "];"; - gSummaryHFile->WriteOneLine(s.c_str(), s.size()); - - // Write Top rules - s = "extern unsigned gTopRulesNum;"; - gSummaryHFile->WriteOneLine(s.c_str(), s.size()); - - s = "extern RuleTable* gTopRules["; - s += std::to_string(gTopRules.size()); - s += "];"; - gSummaryHFile->WriteOneLine(s.c_str(), s.size()); - - gSummaryHFile->WriteOneLine("}", 1); - gSummaryHFile->WriteOneLine("#endif", 6); -} - // write the beginning part of summary file static void PrepareSummaryCppFile() { - gSummaryCppFile->WriteOneLine("#include \"gen_summary.h\"", 24); + gSummaryCppFile->WriteOneLine("#include \"rule_summary.h\"", 25); gSummaryCppFile->WriteOneLine("#include \"common_header_autogen.h\"", 34); gSummaryCppFile->WriteOneLine("namespace maplefe {", 19); gSummaryCppFile->WriteOneLine("RuleTableSummary gRuleTableSummarys[] = {", 41); @@ -130,7 +87,7 @@ static void FinishSummaryCppFile() { gSummaryCppFile->WriteOneLine(" return NULL;", 14); gSummaryCppFile->WriteOneLine("}", 1); - std::string s = "std::vector gFailed["; + std::string s = "BitVector gFailed["; s += std::to_string(gRuleTableNum); s += "];"; gSummaryCppFile->WriteOneLine(s.c_str(), s.size()); @@ -167,85 +124,96 @@ static void FinishSummaryCppFile() { /////////////////////////////////////////////////////////////////////////////////////// void AutoGen::Init() { - std::string lang_path_header("../../java/include/"); - std::string lang_path_cpp("../../java/src/"); + std::string lang_path("../gen/"); - std::string summary_file_name = lang_path_cpp + "gen_summary.cpp"; + std::string summary_file_name = lang_path + "gen_summary.cpp"; gSummaryCppFile = new FileWriter(summary_file_name); - summary_file_name = lang_path_header + "gen_summary.h"; + summary_file_name = lang_path + "gen_summary.h"; gSummaryHFile = new FileWriter(summary_file_name); gRuleTableNum = 0; PrepareSummaryCppFile(); - std::string hFile = lang_path_header + "gen_reserved.h"; - std::string cppFile = lang_path_cpp + "gen_reserved.cpp"; - mReservedGen = new ReservedGen("reserved.spec", hFile.c_str(), cppFile.c_str()); + std::string hFile = lang_path + "gen_reserved.h"; + std::string cppFile = lang_path + "gen_reserved.cpp"; + mReservedGen = new ReservedGen("../../../autogen/reserved.spec", hFile.c_str(), cppFile.c_str()); mReservedGen->SetReserved(mReservedGen); mGenArray.push_back(mReservedGen); - hFile = lang_path_header + "gen_iden.h"; - cppFile = lang_path_cpp + "gen_iden.cpp"; - mIdenGen = new IdenGen("identifier.spec", hFile.c_str(), cppFile.c_str()); + hFile = lang_path + "gen_iden.h"; + cppFile = lang_path + "gen_iden.cpp"; + std::string specFile = "../../../"; + specFile += mLang; + specFile += "/identifier.spec"; + mIdenGen = new IdenGen(specFile.c_str(), hFile.c_str(), cppFile.c_str()); mIdenGen->SetReserved(mReservedGen); mGenArray.push_back(mIdenGen); - hFile = lang_path_header + "gen_literal.h"; - cppFile = lang_path_cpp + "gen_literal.cpp"; - mLitGen = new LiteralGen("literal.spec", hFile.c_str(), cppFile.c_str()); + hFile = lang_path + "gen_literal.h"; + cppFile = lang_path + "gen_literal.cpp"; + specFile = "../../../"; + specFile += mLang; + specFile += "/literal.spec"; + mLitGen = new LiteralGen(specFile.c_str(), hFile.c_str(), cppFile.c_str()); mLitGen->SetReserved(mReservedGen); mGenArray.push_back(mLitGen); - hFile = lang_path_header + "gen_type.h"; - cppFile = lang_path_cpp + "gen_type.cpp"; - mTypeGen = new TypeGen("type.spec", hFile.c_str(), cppFile.c_str()); + hFile = lang_path + "gen_type.h"; + cppFile = lang_path + "gen_type.cpp"; + specFile = "../../../"; + specFile += mLang; + specFile += "/type.spec"; + mTypeGen = new TypeGen(specFile.c_str(), hFile.c_str(), cppFile.c_str()); mTypeGen->SetReserved(mReservedGen); mGenArray.push_back(mTypeGen); - hFile = lang_path_header + "gen_attr.h"; - cppFile = lang_path_cpp + "gen_attr.cpp"; - mAttrGen = new AttrGen("attr.spec", hFile.c_str(), cppFile.c_str()); + hFile = lang_path + "gen_attr.h"; + cppFile = lang_path + "gen_attr.cpp"; + specFile = "../../../"; + specFile += mLang; + specFile += "/attr.spec"; + mAttrGen = new AttrGen(specFile.c_str(), hFile.c_str(), cppFile.c_str()); mAttrGen->SetReserved(mReservedGen); mGenArray.push_back(mAttrGen); - hFile = lang_path_header + "gen_block.h"; - cppFile = lang_path_cpp + "gen_block.cpp"; - mBlockGen = new BlockGen("block.spec", hFile.c_str(), cppFile.c_str()); - mBlockGen->SetReserved(mReservedGen); - mGenArray.push_back(mBlockGen); - - hFile = lang_path_header + "gen_separator.h"; - cppFile = lang_path_cpp + "gen_separator.cpp"; - mSeparatorGen = new SeparatorGen("separator.spec", hFile.c_str(), cppFile.c_str()); + hFile = lang_path + "gen_separator.h"; + cppFile = lang_path + "gen_separator.cpp"; + specFile = "../../../"; + specFile += mLang; + specFile += "/separator.spec"; + mSeparatorGen = new SeparatorGen(specFile.c_str(), hFile.c_str(), cppFile.c_str()); mSeparatorGen->SetReserved(mReservedGen); mGenArray.push_back(mSeparatorGen); - hFile = lang_path_header + "gen_operator.h"; - cppFile = lang_path_cpp + "gen_operator.cpp"; - mOperatorGen = new OperatorGen("operator.spec", hFile.c_str(), cppFile.c_str()); + hFile = lang_path + "gen_operator.h"; + cppFile = lang_path + "gen_operator.cpp"; + specFile = "../../../"; + specFile += mLang; + specFile += "/operator.spec"; + mOperatorGen = new OperatorGen(specFile.c_str(), hFile.c_str(), cppFile.c_str()); mOperatorGen->SetReserved(mReservedGen); mGenArray.push_back(mOperatorGen); - hFile = lang_path_header + "gen_keyword.h"; - cppFile = lang_path_cpp + "gen_keyword.cpp"; - mKeywordGen = new KeywordGen("keyword.spec", hFile.c_str(), cppFile.c_str()); + hFile = lang_path + "gen_keyword.h"; + cppFile = lang_path + "gen_keyword.cpp"; + specFile = "../../../"; + specFile += mLang; + specFile += "/keyword.spec"; + mKeywordGen = new KeywordGen(specFile.c_str(), hFile.c_str(), cppFile.c_str()); mKeywordGen->SetReserved(mReservedGen); mGenArray.push_back(mKeywordGen); - hFile = lang_path_header + "gen_expr.h"; - cppFile = lang_path_cpp + "gen_expr.cpp"; - mExprGen = new ExprGen("expr.spec", hFile.c_str(), cppFile.c_str()); - mExprGen->SetReserved(mReservedGen); - mGenArray.push_back(mExprGen); - - hFile = lang_path_header + "gen_stmt.h"; - cppFile = lang_path_cpp + "gen_stmt.cpp"; - mStmtGen = new StmtGen("stmt.spec", hFile.c_str(), cppFile.c_str()); + hFile = lang_path + "gen_stmt.h"; + cppFile = lang_path + "gen_stmt.cpp"; + specFile = "../../../"; + specFile += mLang; + specFile += "/stmt.spec"; + mStmtGen = new StmtGen(specFile.c_str(), hFile.c_str(), cppFile.c_str()); mStmtGen->SetReserved(mReservedGen); mGenArray.push_back(mStmtGen); - hFile = lang_path_header + "gen_token.h"; - cppFile = lang_path_cpp + "gen_token.cpp"; + hFile = lang_path + "gen_token.h"; + cppFile = lang_path + "gen_token.cpp"; mTokenGen = new TokenGen(hFile.c_str(), cppFile.c_str()); mTokenGen->SetReserved(mReservedGen); mGenArray.push_back(mTokenGen); @@ -304,7 +272,6 @@ void AutoGen::Gen() { gen->Generate(); } - WriteSummaryHFile(); FinishSummaryCppFile(); } diff --git a/src/MapleFE/autogen/src/base_gen.cpp b/src/MapleFE/autogen/src/base_gen.cpp index 2bad09f17e5d1bc9b9b94e847464b308fd6bcf2a..23f00799aaf9469b78a149295a79ae3073a116a3 100644 --- a/src/MapleFE/autogen/src/base_gen.cpp +++ b/src/MapleFE/autogen/src/base_gen.cpp @@ -81,8 +81,8 @@ BaseGen::~BaseGen() { } Rule *BaseGen::AddLiteralRule(std::string rulename) { - Rule *rule = NULL; - if (rule = FindRule(rulename)) { + Rule *rule = FindRule(rulename); + if (rule) { return rule; } diff --git a/src/MapleFE/autogen/src/block_gen.cpp b/src/MapleFE/autogen/src/block_gen.cpp deleted file mode 100644 index 365f093ad106fb1db90569ae08993eb7be8508bd..0000000000000000000000000000000000000000 --- a/src/MapleFE/autogen/src/block_gen.cpp +++ /dev/null @@ -1,46 +0,0 @@ -/* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. -* -* OpenArkFE is licensed under the Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* -* http://license.coscl.org.cn/MulanPSL2 -* -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER -* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR -* FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ -#include "block_gen.h" - -namespace maplefe { - -void BlockGen::Generate() { - GenRuleTables(); - GenHeaderFile(); - GenCppFile(); -} - -void BlockGen::GenHeaderFile() { - mHeaderFile.WriteOneLine("#ifndef __BLOCK_GEN_H__", 23); - mHeaderFile.WriteOneLine("#define __BLOCK_GEN_H__", 23); - mHeaderFile.WriteOneLine("namespace maplefe {", 19); - - // generate the rule tables - mHeaderFile.WriteFormattedBuffer(&mRuleTableHeader); - - mHeaderFile.WriteOneLine("}", 1); - mHeaderFile.WriteOneLine("#endif", 6); -} - -void BlockGen::GenCppFile() { - mCppFile.WriteOneLine("#include \"common_header_autogen.h\"", 34); - mCppFile.WriteOneLine("namespace maplefe {", 19); - // generate the rule tables - mCppFile.WriteFormattedBuffer(&mRuleTableCpp); - mCppFile.WriteOneLine("}", 1); -} -} - - diff --git a/src/MapleFE/autogen/src/buffer2write.cpp b/src/MapleFE/autogen/src/buffer2write.cpp index 7381a69168343728306b0d16597effe67009cfa3..17d7bb7be28966ac0755a67eda9f9b526b4335e1 100644 --- a/src/MapleFE/autogen/src/buffer2write.cpp +++ b/src/MapleFE/autogen/src/buffer2write.cpp @@ -102,7 +102,7 @@ WriteStatus LineBuffer::AddInteger(int i) { unsigned digits = 0; if (i < 0) { digits = 1; - } + } digits += NumDigits(i); @@ -166,12 +166,12 @@ char* RectBuffer::GetNextAvailLine() { if (CurrentLineEmpty()) return mCurrLine; - + unsigned mask = (1 << LINES_PER_BLOCK) - 1; unsigned line = (mUsed & mask) + 1; unsigned log2line = (unsigned)log2(line); char *addr = mData + MAX_LINE_LIMIT * log2line; - mUsed = mUsed | line; + mUsed = mUsed | line; mCurrLine = addr; mCurrLineSize = 0; @@ -212,7 +212,7 @@ WriteStatus RectBuffer::AddInteger(int i) { // take '-' into account. if (i < 0) { digits = 1; - } + } digits += NumDigits(i); left -= digits; @@ -238,7 +238,7 @@ WriteStatus RectBuffer::AddString(const char *str){ // std::string data(indentation); // data = data + str; // str = data.c_str(); - + char *pos = mCurrLine + mCurrLineSize; unsigned left = MAX_LINE_LIMIT - mCurrLineSize; unsigned len = strlen(str); @@ -404,7 +404,7 @@ char* FormattedBuffer::NewLine() { // default constructor of OneBuffer will allocate a new RectBuffer ob = NewOneBuffer(); } - return ob->GetNextAvailLine(); + return ob->GetNextAvailLine(); } // Open a new line for comment, return the addr of the line. @@ -473,7 +473,7 @@ void FormattedBuffer::AddStringWholeLine(const char *str){ void FormattedBuffer::AddNestedBuffer(FormattedBuffer *nested) { OneBuffer *ob = new OneBuffer(nested); mBuffers.push_back(ob); - return; + return; } // wipe existing data, fill with whitespace. @@ -490,7 +490,7 @@ void FormattedBuffer::ClearLine(char *line) { // the enum structure will have the following pattern. The following is // for Separator. -// typedef enum { +// typedef enum SepId { // SEP_Xxx, // ... // SEP_Null @@ -508,21 +508,23 @@ void EnumBuffer::Generate(BaseGen *bg, const char *last_item){ while(!bg->EnumEnd()) { NewLine(); std::string elem = bg->EnumNextElem(); - if (!bg->EnumEnd() || last_item) - if (!elem.size()) + if (!bg->EnumEnd() || last_item) { + if (!elem.size()) { elem = ","; - else + } else { elem = elem + ","; + } + } AddString(elem); - } + } NewLine(); AddString(last_item); - + DecIndent(); const std::string name = bg->EnumName(); std::string s = "}" + name + ";"; NewOneBuffer(s.size(), true); // a separate LineBuffer - AddString(s); + AddString(s); return; } @@ -596,13 +598,15 @@ void TableBuffer::Generate(BaseGen *bg, const std::string decl){ while(!bg->EnumEnd()) { NewLine(); std::string elem = bg->EnumNextElem(); - if (!bg->EnumEnd()) - if (!elem.size()) + if (!bg->EnumEnd()) { + if (!elem.size()) { elem = ","; - else + } else { elem = elem + ","; + } + } AddString(elem); - } + } DecIndent(); return; diff --git a/src/MapleFE/autogen/src/exprbuffer.cpp b/src/MapleFE/autogen/src/exprbuffer.cpp index 4425cc7d0a178b4193ec69b8f44a97060bc9ff49..507ad149840db83a11387fb5c97d6ff5682384e3 100644 --- a/src/MapleFE/autogen/src/exprbuffer.cpp +++ b/src/MapleFE/autogen/src/exprbuffer.cpp @@ -21,7 +21,7 @@ namespace maplefe { // Note: This must be in the order as in 'enum OprCode', and this is the // reason I duplicate OprCode here in order to make the code easy -// to maintain. +// to maintain. OprInfo gOprInfo[OPC_Null] = { {OPC_Add, OPT_Bin, "+"}, {OPC_Sub, OPT_Bin, "-"}, @@ -71,21 +71,21 @@ void ExprNode::SetCall(const char *s) { unsigned len = strlen(s); MASSERT(len < NAME_SIZE); strncpy(mName, s, len); - mOpc = OPC_Call; + mOpc = OPC_Call; } void ExprNode::SetString(const char *s) { unsigned len = strlen(s); MASSERT(len < NAME_SIZE); strncpy(mName, s, len); - mOpc = OPC_Str; + mOpc = OPC_Str; } void ExprNode::SetLiteral(const char *s) { unsigned len = strlen(s); MASSERT(len < NAME_SIZE); strncpy(mName, s, len); - mOpc = OPC_Lit; + mOpc = OPC_Lit; } ////////////////////////////////////////////////////////////////////////// @@ -94,7 +94,7 @@ void ExprNode::SetLiteral(const char *s) { ExprBuffer::ExprBuffer() { mSize = EXPR_SIZE; - mData = (char*)malloc(mSize); + mData = (char*)malloc(mSize); MASSERT(mData && "Cannot malloc buffer for ExprBuffer"); mPos = 0; mRoot = (ExprNode*)mMemPool.Alloc(sizeof(ExprNode)); @@ -104,8 +104,8 @@ ExprBuffer::ExprBuffer() { // Depth-first search to free string memory which is allocated by // the caller of ExprBuffer. ExprBuffer::~ExprBuffer() { - if (mData); - free(mData); + if (mData) + free(mData); } // Extend the size of mData @@ -134,7 +134,7 @@ ExprNode* ExprBuffer::NewExprNodes(unsigned num) { // In-Order void ExprBuffer::Write2Buffer(){ MASSERT(mRoot && "Root of expression is NULL!"); - W2BRecursive(mRoot); + W2BRecursive(mRoot); } void ExprBuffer::WriteStringNode(const ExprNode *n){ @@ -143,7 +143,7 @@ void ExprBuffer::WriteStringNode(const ExprNode *n){ Extend(mPos + len); strncpy(mData+mPos, n->mName, len); - mPos += len; + mPos += len; } void ExprBuffer::WriteLiteralNode(const ExprNode *n){ @@ -153,7 +153,7 @@ void ExprBuffer::WriteLiteralNode(const ExprNode *n){ WriteChar('\"'); strncpy(mData+mPos, n->mName, len); - mPos += len; + mPos += len; WriteChar('\"'); } @@ -170,7 +170,7 @@ void ExprBuffer::WriteOperatorNode(const ExprNode *n){ if (mPos + len > mSize) Extend(mPos + len); strncpy(mData + mPos, text, len); - mPos += len; + mPos += len; } void ExprBuffer::WriteChar(const char c){ diff --git a/src/MapleFE/autogen/src/file_write.cpp b/src/MapleFE/autogen/src/file_write.cpp index 752ffcf699dcf793435793d3d13ae8cd1604f9eb..ed301faaa6ad1d1344580131c198b512b4bd1eee 100644 --- a/src/MapleFE/autogen/src/file_write.cpp +++ b/src/MapleFE/autogen/src/file_write.cpp @@ -47,7 +47,7 @@ void FileWriter::WriteSimpleBuffers(const FormattedBuffer *fb) { std::vector::const_iterator it = fb->mBuffers.begin(); for (; it != fb->mBuffers.end(); it++) { OneBuffer *one = *it; - if (one->mIsSimple) { + if (one->mIsSimple) { RectBuffer *rect = one->GetRectBuffer(); if (rect) { mIndentation = rect->mIndentation; diff --git a/src/MapleFE/autogen/src/keyword_gen.cpp b/src/MapleFE/autogen/src/keyword_gen.cpp index 36d4afd4b96e715b1b2d84db823bba59ef7f6d90..74bb18dee8aa70f76811f9220c9e6d7337475bc9 100644 --- a/src/MapleFE/autogen/src/keyword_gen.cpp +++ b/src/MapleFE/autogen/src/keyword_gen.cpp @@ -36,7 +36,7 @@ const bool KeywordGen::EnumEnd(){ const std::string KeywordGen::EnumNextElem(){ std::string keyword = "\"" + *mEnumIter + "\""; mEnumIter++; - return keyword; + return keyword; } ///////////////////////////////////////////////////////////////////// diff --git a/src/MapleFE/autogen/src/main.cpp b/src/MapleFE/autogen/src/main.cpp index c8d04677a3682776771ff2bb23ae76f4f00ecbda..02343c71a819dc50bcf12400399643a468330701 100644 --- a/src/MapleFE/autogen/src/main.cpp +++ b/src/MapleFE/autogen/src/main.cpp @@ -31,11 +31,19 @@ int main(int argc, char *argv[]) { int verbose = 0; int fileIndex = 2; + std::string lang; + if (argc >=2) { for (int i = 1; i < argc; i++) { int len = strlen(argv[i]); if (!strncmp(argv[i], "-verbose=", 9)) { verbose = atoi(argv[i]+9); + } else if (!strncmp(argv[i], "java", 4)) { + lang = "java"; + } else if (!strncmp(argv[i], "typescript", 10)) { + lang = "typescript"; + } else if (!strncmp(argv[i], "c", 1)) { + lang = "c"; } else if (strcmp(argv[i], "-p") == 0) { checkParserOnly = true; } else { @@ -57,6 +65,7 @@ int main(int argc, char *argv[]) { } maplefe::AutoGen ag(parser); + ag.SetLang(lang); ag.Gen(); return 0; diff --git a/src/MapleFE/autogen/src/operator_gen.cpp b/src/MapleFE/autogen/src/operator_gen.cpp index eb9ea3f20c87cd9f71f520ff7a564c6166ab06a0..5113524011919ca0954b09f5c02709465130122a 100644 --- a/src/MapleFE/autogen/src/operator_gen.cpp +++ b/src/MapleFE/autogen/src/operator_gen.cpp @@ -27,11 +27,12 @@ OperatorId OprsSupported[OPR_NA] = { #include "supported_operators.def" }; -// s : the literal name +// s : the literal name // return the OprId OprId FindOperatorId(const std::string &s) { for (unsigned u = 0; u < OPR_NA; u++) { - if (!OprsSupported[u].mName.compare(0, s.length(), s)) + if (!OprsSupported[u].mName.compare(0, s.length(), s) && + s.length() == OprsSupported[u].mName.size()) return OprsSupported[u].mOprId; } return OPR_NA; @@ -74,7 +75,7 @@ const std::string OperatorGen::EnumNextElem(){ enum_item = enum_item + "\"" + opr.mText + "\", OPR_" + FindOperatorName(opr.mID); enum_item = enum_item + "}"; mEnumIter++; - return enum_item; + return enum_item; } ///////////////////////////////////////////////////////////////////// @@ -111,7 +112,7 @@ void OperatorGen::GenHeaderFile() { mHeaderFile.WriteOneLine("#ifndef __OPERATOR_GEN_H__", 26); mHeaderFile.WriteOneLine("#define __OPERATOR_GEN_H__", 26); mHeaderFile.WriteOneLine("namespace maplefe {", 19); - mHeaderFile.WriteOneLine("extern OprTableEntry OprTable[OPR_NA];", 38); + mHeaderFile.WriteOneLine("extern OprTableEntry OprTable[];", 32); mHeaderFile.WriteOneLine("}", 1); mHeaderFile.WriteOneLine("#endif", 6); } @@ -120,9 +121,20 @@ void OperatorGen::GenCppFile() { mCppFile.WriteOneLine("#include \"ruletable.h\"", 22); mCppFile.WriteOneLine("namespace maplefe {", 19); TableBuffer tb; - tb.Generate(this, "OprTableEntry OprTable[] = {"); + std::string s = "OprTableEntry OprTable["; + std::string num = std::to_string(mOperators.size()); + s += num; + s += "] = {"; + tb.Generate(this, s); mCppFile.WriteFormattedBuffer(&tb); mCppFile.WriteOneLine("};", 2); + + // generate the table size + s = "unsigned OprTableSize = "; + s += num; + s += ";"; + mCppFile.WriteOneLine(s.c_str(), s.size()); + mCppFile.WriteOneLine("}", 1); } } diff --git a/src/MapleFE/autogen/src/reserved_gen.cpp b/src/MapleFE/autogen/src/reserved_gen.cpp index 06a60db950802d6cb4f75da7071989436100eff4..5920a7094ee5cb5f15bde22caceb56f8f292f7c7 100644 --- a/src/MapleFE/autogen/src/reserved_gen.cpp +++ b/src/MapleFE/autogen/src/reserved_gen.cpp @@ -25,10 +25,12 @@ ReservedGen::ReservedGen(const char *dfile, const char *hf, const char *cppf) ReservedOp oneof = {"ONEOF", RO_Oneof}; ReservedOp zeroplus = {"ZEROORMORE", RO_Zeroormore}; ReservedOp zeroorone = {"ZEROORONE", RO_Zeroorone}; + ReservedOp asi = {"ASI", RO_ASI}; mOps.push_back(oneof); mOps.push_back(zeroplus); mOps.push_back(zeroorone); + mOps.push_back(asi); } // Needs to override BaseGen::Run, since we dont need process diff --git a/src/MapleFE/autogen/src/rule.cpp b/src/MapleFE/autogen/src/rule.cpp index 259485306ffa7c12c2ce1dee180ddcdf763e254f..bde0e42ca0d00c440410947c983f3220653ce479 100644 --- a/src/MapleFE/autogen/src/rule.cpp +++ b/src/MapleFE/autogen/src/rule.cpp @@ -132,6 +132,7 @@ const char *RuleElem::GetRuleOpName() { case RO_Oneof: return "ONEOF"; case RO_Zeroormore: return "ZEROORMORE"; case RO_Zeroorone: return "ZEROORONE"; + case RO_ASI: return "ASI"; case RO_Concatenate: case RO_Null: default: return ""; @@ -170,7 +171,10 @@ void RuleElem::Dump(bool newline) { case ET_Op: { std::cout << GetRuleOpName(); - if (mData.mOp == RO_Oneof || mData.mOp == RO_Zeroormore || mData.mOp == RO_Zeroorone) + if (mData.mOp == RO_Oneof + || mData.mOp == RO_Zeroormore + || mData.mOp == RO_Zeroorone + || mData.mOp == RO_ASI ) std::cout << "("; std::vector::iterator it = mSubElems.begin(); @@ -197,6 +201,7 @@ void RuleElem::Dump(bool newline) { } case RO_Zeroormore: case RO_Zeroorone: + case RO_ASI: { (*it)->Dump(); break; @@ -205,7 +210,10 @@ void RuleElem::Dump(bool newline) { break; } - if (mData.mOp == RO_Oneof || mData.mOp == RO_Zeroormore || mData.mOp == RO_Zeroorone) + if (mData.mOp == RO_Oneof + || mData.mOp == RO_Zeroormore + || mData.mOp == RO_Zeroorone + || mData.mOp == RO_ASI) std::cout << ")"; break; } diff --git a/src/MapleFE/autogen/src/rule_gen.cpp b/src/MapleFE/autogen/src/rule_gen.cpp index fdf811afd28ff6f9531cd4116ded4b930425b982..33269487078102ae13291a040dde0def0ca12a0d 100644 --- a/src/MapleFE/autogen/src/rule_gen.cpp +++ b/src/MapleFE/autogen/src/rule_gen.cpp @@ -77,14 +77,14 @@ namespace maplefe { // Generate the table name for mRule std::string RuleGen::GetTblName(const Rule *rule) { std::string tn = "Tbl" + rule->mName; - return tn; + return tn; } // Generate the table name for sub rules in mRule // The table id will be introduced here std::string RuleGen::GetSubTblName() { std::string tn = "Tbl" + mRule->mName + "_sub" + std::to_string(mSubTblNum); - return tn; + return tn; } std::string RuleGen::GetPropertyName(const RuleAttr *attr) { @@ -122,7 +122,7 @@ std::string RuleGen::GetEntryTypeName(ElemType type, RuleOp op) { name = "ET_Data"; break; case ET_Op: { - switch(op) { + switch(op) { case RO_Oneof: name = "ET_Oneof"; break; @@ -135,6 +135,9 @@ std::string RuleGen::GetEntryTypeName(ElemType type, RuleOp op) { case RO_Concatenate: name = "ET_Concatenate"; break; + case RO_ASI: + name = "ET_ASI"; + break; default: MERROR("unknown RuleOp"); break; @@ -190,7 +193,7 @@ std::string RuleGen::Gen4RuleElem(const RuleElem *elem) { // Note: The table could be defined in other files. Need include them. data += "DT_Subtable, &"; data += GetTblName(elem->mData.mRule); - break; + break; case ET_Op: { // Each Op will be generated as a new sub table mSubTblNum++; @@ -248,7 +251,7 @@ void RuleGen::GenDebug(const std::string &rule_table_name) { addr_name_mapping += rule_table_name; addr_name_mapping += "\", "; addr_name_mapping += std::to_string(gRuleTableNum); - addr_name_mapping += "}\,"; + addr_name_mapping += "},"; gSummaryCppFile->WriteOneLine(addr_name_mapping.c_str(), addr_name_mapping.size()); gRuleTableNum++; } @@ -336,7 +339,7 @@ void RuleGen::Gen4Table(const Rule *rule, const RuleElem *elem){ } } } - + Gen4RuleAttr(rule_table_name, attr); Gen4TableHeader(rule_table_name); unsigned index = gRuleTableNum; @@ -347,7 +350,7 @@ void RuleGen::Gen4Table(const Rule *rule, const RuleElem *elem){ std::string rule_table; // 1. Add the LHS of table decl - rule_table = "RuleTable " + rule_table_name + " ="; + rule_table = "RuleTable " + rule_table_name + " ="; rule_table_data = "TableData " + rule_table_data_name + "["; std::string elemnum = std::to_string(elem->mSubElems.size()); @@ -358,7 +361,7 @@ void RuleGen::Gen4Table(const Rule *rule, const RuleElem *elem){ if (elem && (elem->mSubElems.size() == 0)) elemnum = "1"; - rule_table_data += elemnum + "] ="; + rule_table_data += elemnum + "] ="; // 2. Add the beginning '{' rule_table += '{'; @@ -389,7 +392,7 @@ void RuleGen::Gen4Table(const Rule *rule, const RuleElem *elem){ rule_table += "};"; - + // 4. go through the rule elements, generate rule table data std::string data = Gen4TableData(elem); rule_table_data += '{'; @@ -405,7 +408,7 @@ void RuleGen::Gen4Table(const Rule *rule, const RuleElem *elem){ // The structure of rule and its sub-rule can be viewed as a tree. // We generate tables for rule and its sub-rules in depth first order. -// +// void RuleGen::Generate() { bool need_patch = true; if ((mRule->mName.compare("CHAR") == 0) || diff --git a/src/MapleFE/autogen/src/separator_gen.cpp b/src/MapleFE/autogen/src/separator_gen.cpp index 0afaa8ed75662b63cd5aebcc4b716faddd090141..39cb2318d175e1e5bd151c022fb84d97b0476227 100644 --- a/src/MapleFE/autogen/src/separator_gen.cpp +++ b/src/MapleFE/autogen/src/separator_gen.cpp @@ -28,7 +28,7 @@ SuppSepId SepsSupported[SEP_NA] = { #include "supported_separators.def" }; -// s : the literal name +// s : the literal name // return the SepId SepId FindSeparatorId(const std::string &s) { for (unsigned u = 0; u < SEP_NA; u++) { @@ -75,7 +75,7 @@ const std::string SeparatorGen::EnumNextElem(){ enum_item = enum_item + "\"" + sep.mKeyword + "\", SEP_" + FindSeparatorName(sep.mID); enum_item = enum_item + "}"; mEnumIter++; - return enum_item; + return enum_item; } ///////////////////////////////////////////////////////////////////// @@ -112,7 +112,7 @@ void SeparatorGen::GenHeaderFile() { mHeaderFile.WriteOneLine("#ifndef __SEPARATOR_GEN_H__", 27); mHeaderFile.WriteOneLine("#define __SEPARATOR_GEN_H__", 27); mHeaderFile.WriteOneLine("namespace maplefe {", 19); - mHeaderFile.WriteOneLine("extern SepTableEntry SepTable[SEP_NA];", 38); + mHeaderFile.WriteOneLine("extern SepTableEntry SepTable[];", 32); mHeaderFile.WriteOneLine("}", 1); mHeaderFile.WriteOneLine("#endif", 6); } @@ -121,9 +121,20 @@ void SeparatorGen::GenCppFile() { mCppFile.WriteOneLine("#include \"ruletable.h\"", 22); mCppFile.WriteOneLine("namespace maplefe {", 19); TableBuffer tb; - tb.Generate(this, "SepTableEntry SepTable[SEP_NA] = {"); + std::string s = "SepTableEntry SepTable["; + std::string num = std::to_string(mSeparators.size()); + s += num; + s += "] = {"; + tb.Generate(this, s); mCppFile.WriteFormattedBuffer(&tb); mCppFile.WriteOneLine("};", 2); + + // generate the table size + s = "unsigned SepTableSize = "; + s += num; + s += ";"; + mCppFile.WriteOneLine(s.c_str(), s.size()); + mCppFile.WriteOneLine("}", 1); } } diff --git a/src/MapleFE/autogen/src/spec_lexer.cpp b/src/MapleFE/autogen/src/spec_lexer.cpp index 8aea7c1db0900a123d1cda9f9f38ea4516bc287a..bb581c3469e7d71ed6283c55152f073a3e21ce9b 100644 --- a/src/MapleFE/autogen/src/spec_lexer.cpp +++ b/src/MapleFE/autogen/src/spec_lexer.cpp @@ -600,6 +600,10 @@ std::string SPECLexer::GetTokenString(SPECTokenKind thekind) { temp = "ZEROORMORE"; break; } + case SPECTK_ASI: { + temp = "ASI"; + break; + } case SPECTK_Concat: { temp = "+"; break; diff --git a/src/MapleFE/autogen/src/spec_parser.cpp b/src/MapleFE/autogen/src/spec_parser.cpp index f8af4546aac1688a239c475278118eda729593d3..aa0108fad0b41435fc4835ebeb17010aaeb9e647 100644 --- a/src/MapleFE/autogen/src/spec_parser.cpp +++ b/src/MapleFE/autogen/src/spec_parser.cpp @@ -31,7 +31,7 @@ namespace maplefe { ////////////////////////////////////////////////////////////////////////////// -void SPECParser::ParserError(std::string msg, std::string str) { +void SPECParser::ParserError(std::string msg, std::string str) { std::cout << "\n================================= spec file syntax error ============================" << std::endl; std::cout << "file " << mFilename << std::endl; std::cout << "line " << mLexer->GetLineNum() << std::endl; @@ -194,6 +194,10 @@ bool SPECParser::ParseElement(RuleElem *&elem, bool allowConcat) { elem = mBaseGen->NewRuleElem(RO_Zeroormore); status = ParseElementSet(elem); break; + case SPECTK_ASI: + elem = mBaseGen->NewRuleElem(RO_ASI); + status = ParseElementSet(elem); + break; case SPECTK_Char: { elem = mBaseGen->GetOrCreateRuleElemFromChar(mLexer->thechar); @@ -299,8 +303,11 @@ RuleAction *SPECParser::GetAction() { bool SPECParser::ParseElementSet(RuleElem *elem) { SPECTokenKind optk = mLexer->GetToken(); - if (!(optk == SPECTK_Oneof || optk == SPECTK_Zeroorone || optk == SPECTK_Zeroormore)) - ParserError("expect ONEOF/ZEROORONE/ZEROORMORE but get ", mLexer->GetTokenString()); + if (!(optk == SPECTK_Oneof + || optk == SPECTK_Zeroorone + || optk == SPECTK_Zeroormore + || optk == SPECTK_ASI)) + ParserError("expect ONEOF/ZEROORONE/ZEROORMORE/ASI but get ", mLexer->GetTokenString()); SPECTokenKind tk = mLexer->NextToken(); if (tk != SPECTK_Lparen) @@ -318,8 +325,8 @@ bool SPECParser::ParseElementSet(RuleElem *elem) { tk = mLexer->GetToken(); if (optk == SPECTK_Oneof && tk == SPECTK_Coma) tk = mLexer->NextToken(); - else if (optk == SPECTK_Zeroorone || optk == SPECTK_Zeroormore) - // SPECTK_Zeroorone and SPECTK_Zeroormore only allow one element + else if (optk == SPECTK_Zeroorone || optk == SPECTK_Zeroormore || optk == SPECTK_ASI) + // SPECTK_Zeroorone, SPECTK_Zeroormore and ASI only allow one element break; } diff --git a/src/MapleFE/autogen/src/token_gen.cpp b/src/MapleFE/autogen/src/token_gen.cpp index 725192414d5cbdd64d937aa63a9a9cfb504affbc..45840e7e312e0b3052e20d8e0d3feb49136effe0 100644 --- a/src/MapleFE/autogen/src/token_gen.cpp +++ b/src/MapleFE/autogen/src/token_gen.cpp @@ -65,11 +65,12 @@ void TokenGen::ProcessAltTokens() { for (unsigned i = 0; i < mAltTokensNum; i++) { AlternativeToken at = alt_tokens[i]; unsigned orig_id; - bool found = gTokenTable.FindStringTokenId(at.mName, orig_id); - MASSERT(found); + bool orig_found = gTokenTable.FindStringTokenId(at.mName, orig_id); unsigned alt_id; - found = gTokenTable.FindStringTokenId(at.mAltName, alt_id); - MASSERT(found); + bool alt_found = gTokenTable.FindStringTokenId(at.mAltName, alt_id); + if (!orig_found || !alt_found) { + continue; + } ProcessedAltToken pat; pat.mId = orig_id; @@ -155,7 +156,7 @@ void TokenGen::GenCppFile() { std::list::iterator oit = gTokenTable.mOperators->begin(); for (; oit != gTokenTable.mOperators->end(); oit++, overall_index++) { - std::string output = " {.mTkType = TT_OP, {.mOprId = "; + std::string output = " {.mTkType = TT_OP, .mLineNum = 0, .mColNum = 0, .mLineBegin = false, .mLineEnd = false, {.mOprId = "; Operator opr = *oit; std::string opr_name = "OPR_"; opr_name += FindOperatorName(opr.mID); @@ -186,7 +187,7 @@ void TokenGen::GenCppFile() { std::list::iterator sit = gTokenTable.mSeparators->begin(); for (; sit != gTokenTable.mSeparators->end(); sit++, overall_index++) { - std::string output = " {.mTkType = TT_SP, {.mSepId = "; + std::string output = " {.mTkType = TT_SP, .mLineNum = 0, .mColNum = 0, .mLineBegin = false, .mLineEnd = false, {.mSepId = "; Separator sep = *sit; std::string sep_name = "SEP_"; sep_name += FindSeparatorName(sep.mID); @@ -217,7 +218,7 @@ void TokenGen::GenCppFile() { unsigned kw_size = gTokenTable.mKeywords.size(); for (unsigned index = 0; index < kw_size; index++, overall_index++) { - std::string output = " {.mTkType = TT_KW, {.mName = "; + std::string output = " {.mTkType = TT_KW, .mLineNum = 0, .mColNum = 0, .mLineBegin = false, .mLineEnd = false, {.mName = "; std::string keyword = gTokenTable.mKeywords[index]; output += "\""; output += keyword; @@ -247,7 +248,7 @@ void TokenGen::GenCppFile() { } // Write the comment token - std::string output = " {.mTkType = TT_CM, {.mName = NULL}, .mAltTokens = NULL}"; + std::string output = " {.mTkType = TT_CM, .mLineNum = 0, .mColNum = 0, .mLineBegin = false, .mLineEnd = false, {.mName = NULL}, .mAltTokens = NULL}"; mCppFile.WriteOneLine(output.c_str(), output.size()); mCppFile.WriteOneLine("};", 2); mCppFile.WriteOneLine("}", 1); diff --git a/src/MapleFE/autogen/src/type_gen.cpp b/src/MapleFE/autogen/src/type_gen.cpp index 786a3d57618abcf6b2bebdbc53a65632572b422e..57214ec7fd0f9f5a32c12df874e048e5b06b24d6 100644 --- a/src/MapleFE/autogen/src/type_gen.cpp +++ b/src/MapleFE/autogen/src/type_gen.cpp @@ -76,7 +76,7 @@ void TypeGen::GenHeaderFile() { mHeaderFile.WriteOneLine("namespace maplefe {", 19); // generate the keyword table - mHeaderFile.WriteOneLine("extern TypeKeyword TypeKeywordTable[TY_NA];", 43); + mHeaderFile.WriteOneLine("extern TypeKeyword TypeKeywordTable[];", 38); // generate the rule tables mHeaderFile.WriteFormattedBuffer(&mRuleTableHeader); @@ -90,12 +90,23 @@ void TypeGen::GenCppFile() { // generate the keyword table TableBuffer tb; - tb.Generate(this, "TypeKeyword TypeKeywordTable[TY_NA] = {"); + std::string s = "TypeKeyword TypeKeywordTable["; + std::string num = std::to_string(mTypes.size()); + s += num; + s += "] = {"; + tb.Generate(this, s); mCppFile.WriteFormattedBuffer(&tb); mCppFile.WriteOneLine("};", 2); + // generate the table size + s = "unsigned TypeKeywordTableSize = "; + s += num; + s += ";"; + mCppFile.WriteOneLine(s.c_str(), s.size()); + // generate the rule tables mCppFile.WriteFormattedBuffer(&mRuleTableCpp); + mCppFile.WriteOneLine("}", 1); } diff --git a/src/MapleFE/c/Makefile b/src/MapleFE/c/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..6ff0312533c771b903d1686b33384d7da06eff4b --- /dev/null +++ b/src/MapleFE/c/Makefile @@ -0,0 +1,24 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +include ../Makefile.in + +TARGS = c2ast + +$(TARGS): + $(MAKE) -C src + +clean: + rm -rf $(BUILDDIR)/c + +.PHONY: $(TARGS) diff --git a/src/MapleFE/c/attr.spec b/src/MapleFE/c/attr.spec new file mode 100644 index 0000000000000000000000000000000000000000..7211303cd96c3d2ceb5755398e12958832394a0a --- /dev/null +++ b/src/MapleFE/c/attr.spec @@ -0,0 +1,18 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +STRUCT Attribute : (("const", const), + ("static", static), + ("volatile", volatile), + ("restrict", restrict)) \ No newline at end of file diff --git a/src/MapleFE/c/identifier.spec b/src/MapleFE/c/identifier.spec new file mode 100644 index 0000000000000000000000000000000000000000..ff6f96fdb1c0fa797e70f876d0d2f53f41342f8c --- /dev/null +++ b/src/MapleFE/c/identifier.spec @@ -0,0 +1,17 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +rule CChar : ONEOF(CHAR, '_') +rule CharOrDigit : ONEOF(CChar, DIGIT) +rule Identifier : CChar + ZEROORMORE(CharOrDigit) \ No newline at end of file diff --git a/src/MapleFE/c/include/lang_builtin.def b/src/MapleFE/c/include/lang_builtin.def new file mode 100644 index 0000000000000000000000000000000000000000..8c127c72a426bb6113253cdf59a103a11785e8fb --- /dev/null +++ b/src/MapleFE/c/include/lang_builtin.def @@ -0,0 +1,14 @@ +// Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. +// +// OpenArkFE is licensed under the Mulan PSL v2. +// You can use this software according to the terms and conditions of the Mulan PSL v2. +// You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +// FIT FOR A PARTICULAR PURPOSE. +// See the Mulan PSL v2 for more details. +// + diff --git a/src/MapleFE/c/include/lang_keywords.def b/src/MapleFE/c/include/lang_keywords.def new file mode 100644 index 0000000000000000000000000000000000000000..fc60a43491ab2c16f99198453f2543f60947aad0 --- /dev/null +++ b/src/MapleFE/c/include/lang_keywords.def @@ -0,0 +1,52 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +LANGKEYWORD(auto) +LANGKEYWORD(break) +LANGKEYWORD(case) +LANGKEYWORD(char) +LANGKEYWORD(const) +LANGKEYWORD(continue) +LANGKEYWORD(default) +LANGKEYWORD(do) +LANGKEYWORD(double) +LANGKEYWORD(else) +LANGKEYWORD(enum) +LANGKEYWORD(extern) +LANGKEYWORD(float) +LANGKEYWORD(for) +LANGKEYWORD(goto) +LANGKEYWORD(if) +LANGKEYWORD(inline) +LANGKEYWORD(int) +LANGKEYWORD(long) +LANGKEYWORD(register) +LANGKEYWORD(restrict) +LANGKEYWORD(return) +LANGKEYWORD(short) +LANGKEYWORD(signed) +LANGKEYWORD(sizeof) +LANGKEYWORD(static) +LANGKEYWORD(struct) +LANGKEYWORD(switch) +LANGKEYWORD(typedef) +LANGKEYWORD(union) +LANGKEYWORD(unsigned) +LANGKEYWORD(void) +LANGKEYWORD(volatile) +LANGKEYWORD(while) +LANGKEYWORD(_Bool) +LANGKEYWORD(_Complex) +LANGKEYWORD(_Imaginary) \ No newline at end of file diff --git a/src/MapleFE/c/include/lang_spec.h b/src/MapleFE/c/include/lang_spec.h new file mode 100644 index 0000000000000000000000000000000000000000..bae3daba4ee9dc8df6b59334d68e9df4b9f84798 --- /dev/null +++ b/src/MapleFE/c/include/lang_spec.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEFE_LANG_SPEC_H +#define MAPLEFE_LANG_SPEC_H +#include "stringutil.h" +#include "token.h" +#include "lexer.h" + +namespace maplefe { + +class StringToValueImpl : public StringToValue { +public: + float StringToFloat(std::string &s); + double StringToDouble(std::string &s); + bool StringToBool(std::string &s); + Char StringToChar(std::string &s); + bool StringIsNull(std::string &s); +}; + +extern LitData ProcessLiteral(LitId type, const char *str); + +class CLexer : public Lexer { +}; + +} +#endif //MAPLEFE_LANG_SPEC_H diff --git a/src/MapleFE/c/keyword.spec b/src/MapleFE/c/keyword.spec new file mode 100644 index 0000000000000000000000000000000000000000..2e62b807aaaf736015788008e0c36e2b1843bc51 --- /dev/null +++ b/src/MapleFE/c/keyword.spec @@ -0,0 +1,51 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +STRUCT KeyWord : ((auto), + (break), + (case), + (char), + (const), + (continue), + (default), + (do), + (double), + (else), + (enum), + (extern), + (float), + (for), + (goto), + (if), + (inline), + (int), + (long), + (register), + (restrict), + (return), + (short), + (signed), + (sizeof), + (static), + (struct), + (switch), + (typedef), + (union), + (unsigned), + (void), + (volatile), + (while), + (_Bool), + (_Complex), + (_Imaginary)) diff --git a/src/MapleFE/c/literal.spec b/src/MapleFE/c/literal.spec new file mode 100644 index 0000000000000000000000000000000000000000..013f438418239a299bcd63ca4285bc00efd532c6 --- /dev/null +++ b/src/MapleFE/c/literal.spec @@ -0,0 +1,118 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +######################################################################### +## Integer ## +######################################################################### + +### Decimal rules + +rule NonZeroDigit : ONEOF('1', '2', '3', '4', '5', '6', '7', '8', '9') +rule Digit : ONEOF('0', NonZeroDigit) +rule DecimalNumeral : ONEOF('0', NonZeroDigit + ZEROORONE(Digit)) + +### Hexadecimal rules + +rule HexDigit : ONEOF('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'a', 'b', 'c', 'd', 'e', 'f', + 'A', 'B', 'C', 'D', 'E', 'F') +rule HexNumeral : ONEOF("0x" + HexDigit, "0X" + HexDigit) + +### Octal rules + +rule OctalDigit : ONEOF('0', '1', '2', '3', '4', '5', '6', '7') +rule OctalNumeral : ONEOF('0' + OctalDigit) + +rule IntegerTypeSuffix : ONEOF('L', 'l') +rule DecimalIntegerLiteral: DecimalNumeral + ZEROORONE(IntegerTypeSuffix) +rule HexIntegerLiteral : HexNumeral + ZEROORONE(IntegerTypeSuffix) +rule OctalIntegerLiteral : OctalNumeral + ZEROORONE(IntegerTypeSuffix) + +rule IntegerLiteral: ONEOF(DecimalIntegerLiteral, + HexIntegerLiteral, + OctalIntegerLiteral) + +######################################################################### +## Floating Point ## +######################################################################### + +##### Decimal floating point literal + +rule Sign : ONEOF('+', '-') +rule FloatTypeSuffix : ONEOF('f', 'F') +rule ExponentIndicator : ONEOF('e', 'E') +rule SignedInteger : ZEROORONE(Sign) + Digit +rule ExponentPart : ExponentIndicator + SignedInteger + +rule DecFPLiteral : ONEOF(Digit + '.' + ZEROORONE(Digit) + ZEROORONE(ExponentPart) + ZEROORONE(FloatTypeSuffix), + '.'+Digit + ZEROORONE(ExponentPart) + ZEROORONE(FloatTypeSuffix), + Digit + ExponentPart + ZEROORONE(FloatTypeSuffix), + Digit + ZEROORONE(ExponentPart)) + +####### Hex floating point literal + +rule BinaryExponentIndicator : ONEOF('p', 'P') +rule BinaryExponent : BinaryExponentIndicator + SignedInteger +rule HexSignificand : ONEOF(HexNumeral + ZEROORONE('.'), + "0x" + ZEROORONE(HexDigit) + '.' + HexDigit, + "0X" + ZEROORONE(HexDigit) + '.' + HexDigit) +rule HexFPLiteral: HexSignificand + BinaryExponent + ZEROORONE(FloatTypeSuffix) + +###### Floating Point Literal + +rule FPLiteral : ONEOF(DecFPLiteral, HexFPLiteral) + +######################################################################### +## Boolean ## +######################################################################### + +rule BooleanLiteral : ONEOF ("true", "false") + +######################################################################### +## Character ## +## ESCAPE is a reserved rule in reserved.spec. ## +######################################################################### + +rule UnicodeEscape: '\' + 'u' + HEXDIGIT + HEXDIGIT + HEXDIGIT + HEXDIGIT +rule RawInputCharacter : ONEOF(ASCII, ''', ESCAPE) +rule SingleCharacter: ONEOF(UnicodeEscape, RawInputCharacter) + +rule OctalEscape : ONEOF('\' + '0', '\' + '1') +rule EscapeSequence : ONEOF(ESCAPE, OctalEscape) +rule CharacterLiteral : ''' + ONEOF(SingleCharacter, EscapeSequence) + ''' + +######################################################################### +## String ## +######################################################################### +# The UnicodeEscape is limited from \u0000 to \u00ff. +rule StringUnicodeEscape: '\' + 'u' + '0' + '0' + HEXDIGIT + HEXDIGIT +rule StringCharater: ONEOF(StringUnicodeEscape, RawInputCharacter) +rule StringLiteral : '"' + ZEROORMORE(StringCharater) + '"' + +######################################################################### +## Null ## +######################################################################### + +rule NullLiteral : "NULL" + +######################################################################### +## Literal ## +######################################################################### + +rule Literal : ONEOF(IntegerLiteral, + FPLiteral, + BooleanLiteral, + CharacterLiteral, + StringLiteral, + NullLiteral) \ No newline at end of file diff --git a/src/MapleFE/c/operator.spec b/src/MapleFE/c/operator.spec new file mode 100644 index 0000000000000000000000000000000000000000..eac023f63e58c861984945d34559c511e2c45daf --- /dev/null +++ b/src/MapleFE/c/operator.spec @@ -0,0 +1,54 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +STRUCT Operator : ONEOF( + # Arithmetic + ("+", Add), + ("-", Sub), + ("*", Mul), + ("/", Div), + ("%", Mod), + ("++", Inc), + ("--", Dec), + # Relation + ("==", EQ), + ("!=", NE), + (">", GT), + ("<", LT), + (">=", GE), + ("<=", LE), + # Bitwise + ("&", Band), + ("|", Bor), + ("^", Bxor), + ("~", Bcomp), + # Shift + ("<<", Shl), + (">>", Shr), + # Logical + ("&&", Land), + ("||", Lor), + ("!", Not), + # Assign + ("=", Assign), + ("+=", AddAssign), + ("-=", SubAssign), + ("*=", MulAssign), + ("/=", DivAssign), + ("%=", ModAssign), + ("<<=", ShlAssign), + (">>=", ShrAssign), + ("&=", BandAssign), + ("|=", BorAssign), + ("^=", BxorAssign)) \ No newline at end of file diff --git a/src/MapleFE/c/separator.spec b/src/MapleFE/c/separator.spec new file mode 100644 index 0000000000000000000000000000000000000000..dd37454e81425f720a84c45ab4e96d2a0f4f311f --- /dev/null +++ b/src/MapleFE/c/separator.spec @@ -0,0 +1,29 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +STRUCT Separator : ((" ", Whitespace), + ("(", Lparen), + (")", Rparen), + ("{", Lbrace), + ("}", Rbrace), + ("[", Lbrack), + ("]", Rbrack), + (";", Semicolon), + (",", Comma), + (".", Dot), + ("...", Dotdotdot), + (":", Colon), + ("?", Select), + ("::", Of), + ("#", Pound)) \ No newline at end of file diff --git a/src/MapleFE/c/src/Makefile b/src/MapleFE/c/src/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..c6cb1db128a3572f42d7043f1a9bf8d4fdf58bfc --- /dev/null +++ b/src/MapleFE/c/src/Makefile @@ -0,0 +1,80 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +include ../../Makefile.in +BUILDBIN=$(BUILDDIR)/bin +BUILD=$(BUILDDIR)/c +BUILDGEN=$(BUILDDIR)/gen +BUILDASTGEN=$(BUILDDIR)/ast_gen/shared +$(shell $(MKDIR_P) $(BUILD) $(BUILDGEN)) + +SRC=$(wildcard *.cpp) +OBJ :=$(patsubst %.cpp,%.o,$(SRC)) +DEP :=$(patsubst %.cpp,%.d,$(SRC)) + +SRCG := $(wildcard $(BUILDGEN)/gen*.cpp) +OBJG := $(patsubst %.cpp, %.o, $(SRCG)) +DEPG := $(patsubst %.cpp, %.d, $(SRCG)) + +OBJS :=$(foreach obj,$(OBJ), $(BUILD)/$(obj)) $(OBJG) +DEPS :=$(foreach dep,$(DEP), $(BUILD)/$(dep)) $(DEPG) + +INCLUDES := -I $(MAPLEFE_ROOT)/shared/include \ + -I $(MAPLEFE_ROOT)/c/include \ + -I $(MAPLEFE_ROOT)/autogen/include \ + -I ${BUILDDIR}/ast_gen/shared \ + -I $(BUILDGEN) + +INCLUDEGEN := -I $(BUILDGEN) -I $(MAPLEFE_ROOT)/shared/include + +TARGET=c2ast + +SHAREDLIB = $(BUILDDIR)/shared/shared.a $(BUILDASTGEN)/genast.a + +.PHONY: all +all: $(BUILDBIN)/$(TARGET) + +-include $(DEPS) +.PHONY: clean + +vpath %.o $(BUILD) +vpath %.d $(BUILD) + +# Pattern Rules +$(BUILD)/%.o : %.cpp + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ + +$(BUILD)/%.d : %.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $< > $@ + @mv -f $(BUILD)/$*.d $(BUILD)/$*.d.tmp + @sed -e 's|.*:|$(BUILD)/$*.o:|' < $(BUILD)/$*.d.tmp > $(BUILD)/$*.d + @rm -f $(BUILD)/$*.d.tmp + +$(BUILDGEN)/%.o : $(BUILDGEN)/%.cpp $(BUILDGEN)/%.d + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDEGEN) -w -c $< -o $@ + +$(BUILDGEN)/%.d : $(BUILDGEN)/%.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDEGEN) $< > $@ + @mv -f $(BUILDGEN)/$*.d $(BUILDGEN)/$*.d.tmp + @sed -e 's|.*:|$(BUILDGEN)/$*.o:|' < $(BUILDGEN)/$*.d.tmp > $(BUILDGEN)/$*.d + @rm -f $(BUILDGEN)/$*.d.tmp + +# TARGET depends on OBJS and shared OBJS from shared directory +# as well as mapleall libraries +$(BUILDBIN)/$(TARGET): $(OBJS) $(SHAREDLIB) + @mkdir -p $(BUILDBIN) + $(LD) -o $(BUILDBIN)/$(TARGET) $(OBJS) $(SHAREDLIB) -lstdc++fs + +clean: + rm -rf $(BUILD) diff --git a/src/MapleFE/c/src/lang_spec.cpp b/src/MapleFE/c/src/lang_spec.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d3fd8113c8b85c1770822f1b9b324b860407c410 --- /dev/null +++ b/src/MapleFE/c/src/lang_spec.cpp @@ -0,0 +1,160 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "lang_spec.h" +#include "stringpool.h" + +namespace maplefe { +float StringToValueImpl::StringToFloat(std::string &s) { + return stof(s); +} + +double StringToValueImpl::StringToDouble(std::string &s) { + std::string str = s; + char suffix = str[str.length() - 1]; + if (suffix == 'l' || suffix == 'L') + str[str.length() - 1] = 'L'; + return stod(str); +} + +bool StringToValueImpl::StringToBool(std::string &s) { + if ((s.size() == 4) && (s.compare("true") == 0)) + return true; + else if ((s.size() == 5) && (s.compare("false") == 0)) + return false; + else + MERROR("unknown bool literal"); +} + +bool StringToValueImpl::StringIsNull(std::string &s) {return false;} + +static char DeEscape(char c) { + switch(c) { + case 'b': + return '\b'; + case 't': + return '\t'; + case 'n': + return '\n'; + case 'f': + return '\f'; + case 'r': + return '\r'; + case '"': + return '\"'; + case '\'': + return '\''; + case '\\': + return '\\'; + case '0': + return '\0'; + default: + MERROR("Unsupported in DeEscape()."); + } +} + +static int char2int(char c) { + if (c >= '0' && c <= '9') + return c - '0'; + else if (c >= 'a' && c <= 'f') + return c - 'a' + 10; + else if (c >= 'A' && c <= 'F') + return c - 'A' + 10; + else + MERROR("Unsupported char in char2int()."); +} + +Char StringToValueImpl::StringToChar(std::string &s) { + Char ret_char; + ret_char.mIsUnicode = false; + MASSERT (s[0] == '\''); + if (s[1] == '\\') { + if (s[2] == 'u') { + ret_char.mIsUnicode = true; + int first = char2int(s[3]); + int second = char2int(s[4]); + int third = char2int(s[5]); + int forth = char2int(s[6]); + MASSERT(s[7] == '\''); + ret_char.mData.mUniValue = (first << 12) + (second << 8) + + (third << 4) + forth; + } else { + ret_char.mData.mChar = DeEscape(s[2]); + } + } else { + MASSERT(s[2] == '\''); + ret_char.mData.mChar = s[1]; + } + return ret_char; +} + +LitData ProcessLiteral(LitId id, const char *str) { + LitData data; + std::string value_text(str); + StringToValueImpl s2v; + + switch (id) { + case LT_IntegerLiteral: { + int i = s2v.StringToInt(value_text); + data.mType = LT_IntegerLiteral; + data.mData.mInt = i; + break; + } + case LT_FPLiteral: { + char suffix = value_text[value_text.length() - 1]; + if (suffix == 'f' || suffix == 'F') { + float f = s2v.StringToFloat(value_text); + data.mType = LT_FPLiteral; + data.mData.mFloat = f; + } else { + double d = s2v.StringToDouble(value_text); + data.mType = LT_DoubleLiteral; + data.mData.mDouble = d; + } + break; + } + case LT_BooleanLiteral: { + bool b = s2v.StringToBool(value_text); + data.mType = LT_BooleanLiteral; + data.mData.mBool = b; + break; } + case LT_CharacterLiteral: { + Char c = s2v.StringToChar(value_text); + data.mType = LT_CharacterLiteral; + data.mData.mChar = c; + break; } + case LT_StringLiteral: { + const char *s = s2v.StringToString(value_text); + data.mType = LT_StringLiteral; + data.mData.mStrIdx = gStringPool.GetStrIdx(s); + break; } + case LT_NullLiteral: { + // Just need set the id + data.mType = LT_NullLiteral; + break; } + case LT_NA: // N/A, + default: + data.mType = LT_NA; + break; + } + + return data; +} + +Lexer* CreateLexer() { + Lexer *lexer = new CLexer(); + return lexer; +} + +} diff --git a/src/MapleFE/c/src/main.cpp b/src/MapleFE/c/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b055675ccd5e23deec5d5292167d7cea2d3a57a6 --- /dev/null +++ b/src/MapleFE/c/src/main.cpp @@ -0,0 +1,123 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "parser.h" +#include "token.h" +#include "common_header_autogen.h" +#include "ruletable_util.h" +#include "gen_summary.h" +#include "gen_aststore.h" +#include "gen_astdump.h" +#include "gen_astgraph.h" + +static void help() { + std::cout << "c sourcefile [options]:\n" << std::endl; + std::cout << " --help : print this help" << std::endl; + std::cout << " --trace-lexer : Trace lexing" << std::endl; + std::cout << " --trace-table : Trace rule table when entering and exiting" << std::endl; + std::cout << " --trace-left-rec : Trace left recursion parsing" << std::endl; + std::cout << " --trace-appeal : Trace appeal process" << std::endl; + std::cout << " --trace-failed : Trace failed tokens of table" << std::endl; + std::cout << " --trace-timing : Trace parsing time" << std::endl; + std::cout << " --trace-stack : Trace visited-token stack of table" << std::endl; + std::cout << " --trace-sortout : Trace SortOut" << std::endl; + std::cout << " --trace-ast-build : Trace AST Builder" << std::endl; + std::cout << " --trace-patch-was-succ : Trace Patching of WasSucc nodes" << std::endl; + std::cout << " --trace-warning : Print Warning" << std::endl; + std::cout << " --dump-ast : Dump AST in text format" << std::endl; + std::cout << " --dump-dot : Dump AST in dot format" << std::endl; +} + +int main (int argc, char *argv[]) { + if (argc == 1 || (!strncmp(argv[1], "--help", 6) && (strlen(argv[1]) == 6))) { + help(); + exit(-1); + } + + maplefe::Parser *parser = new maplefe::Parser(argv[1]); + + bool dump_ast = false; + bool dump_dot = false; + bool succ; + + // Parse the argument + for (unsigned i = 2; i < argc; i++) { + if (!strncmp(argv[i], "--trace-lexer", 13) && (strlen(argv[i]) == 13)) { + parser->SetLexerTrace(); + } else if (!strncmp(argv[i], "--trace-table", 13) && (strlen(argv[i]) == 13)) { + parser->mTraceTable = true; + } else if (!strncmp(argv[i], "--trace-left-rec", 16) && (strlen(argv[i]) == 16)) { + parser->mTraceLeftRec = true; + } else if (!strncmp(argv[i], "--trace-appeal", 14) && (strlen(argv[i]) == 14)) { + parser->mTraceAppeal = true; + } else if (!strncmp(argv[i], "--trace-stack", 13) && (strlen(argv[i]) == 13)) { + parser->mTraceVisited = true; + } else if (!strncmp(argv[i], "--trace-failed", 14) && (strlen(argv[i]) == 14)) { + parser->mTraceFailed = true; + } else if (!strncmp(argv[i], "--trace-timing", 14) && (strlen(argv[i]) == 14)) { + parser->mTraceTiming = true; + } else if (!strncmp(argv[i], "--trace-sortout", 15) && (strlen(argv[i]) == 15)) { + parser->mTraceSortOut = true; + } else if (!strncmp(argv[i], "--trace-ast-build", 17) && (strlen(argv[i]) == 17)) { + parser->mTraceAstBuild = true; + } else if (!strncmp(argv[i], "--trace-patch-was-succ", 22) && (strlen(argv[i]) == 22)) { + parser->mTracePatchWasSucc = true; + } else if (!strncmp(argv[i], "--trace-warning", 15) && (strlen(argv[i]) == 15)) { + parser->mTraceWarning = true; + } else if (!strncmp(argv[i], "--dump-ast", 10) && (strlen(argv[i]) == 10)) { + dump_ast = true; + } else if (!strncmp(argv[i], "--dump-dot", 10) && (strlen(argv[i]) == 10)) { + dump_dot = true; + } else { + std::cerr << "unknown option " << argv[i] << std::endl; + exit(-1); + } + } + + parser->InitRecursion(); + succ = parser->Parse(); + if (!succ) { + delete parser; + return 1; + } + + // the module from parser + maplefe::ModuleNode *module = parser->GetModule(); + + if(dump_ast) { + maplefe::AstDump astdump(module); + astdump.Dump("c2ast: Initial AST", &std::cout); + } + + if(dump_dot) { + maplefe::AstGraph graph(module); + graph.DumpGraph("c2ast: Initial AST", &std::cout); + } + + maplefe::AstStore saveAst(module); + saveAst.StoreInAstBuf(); + maplefe::AstBuffer &ast_buf = saveAst.GetAstBuf(); + + std::ofstream ofs; + std::string fname(module->GetFilename()); + fname = fname.replace(fname.find(".c"), 2, ".mast"); + ofs.open(fname, std::ofstream::out); + const char *addr = (const char *)(&(ast_buf[0])); + ofs.write(addr, ast_buf.size()); + ofs.close(); + + delete parser; + return 0; +} diff --git a/src/MapleFE/c/stmt.spec b/src/MapleFE/c/stmt.spec new file mode 100644 index 0000000000000000000000000000000000000000..f71d36b960266b2d4a5e42e799f73bf60a0223ff --- /dev/null +++ b/src/MapleFE/c/stmt.spec @@ -0,0 +1,163 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +###################################################################### +# Expression # +###################################################################### + +rule PrimaryExpression : ONEOF( + Literal, + Identifier +) + +rule DimExprs : DimExpr + ZEROORMORE(DimExpr) + +rule DimExpr : '[' + Expression + ']' + +rule Expression : ONEOF( + PrimaryExpression, + UnaryExpression) + +rule UnaryExpression : ONEOF( + PreIncrementExpression, + PreDecrementExpression, + PostIncrementExpression, + PostDecrementExpression) + +rule PreIncrementExpression : "++" + PrimaryExpression + attr.action : BuildUnaryOperation(%1, %2) + +rule PreDecrementExpression : "--" + PrimaryExpression + attr.action : BuildUnaryOperation(%1, %2) + +rule PostIncrementExpression : PrimaryExpression + "++" + attr.action : BuildPostfixOperation(%2, %1) + +rule PostDecrementExpression : PrimaryExpression + "--" + attr.action : BuildPostfixOperation(%2, %1) + +###################################################################### +# Variable # +###################################################################### + +rule GlobalVariableDeclarationStatement : VariableDeclaration + ';' + attr.property : Top + +rule LocalVariableDeclarationStatement : VariableDeclaration + ';' + +rule VariableDeclaration : ZEROORMORE(VariableModifier) + Type + VariableDeclaratorList + attr.action: BuildDecl(%2, %3) + attr.action: AddModifier(%1) + +rule VariableModifier : ONEOF( + "static", + "const", + "volatile", + "restrict") + +rule VariableDeclaratorList : VariableDeclarator + ZEROORMORE(',' + VariableDeclarator) + attr.action: BuildVarList(%1, %2) + +rule VariableDeclarator : VariableDeclaratorId + ZEROORONE('=' + VariableInitializer) + attr.action: AddInitTo(%1, %2) + +rule VariableDeclaratorId : Identifier + ZEROORONE(Dims) + attr.action: AddDimsTo(%1, %2) + +rule VariableInitializer : ONEOF( + Expression, + ArrayInitializer) + +rule ArrayInitializer : '{' + ZEROORONE(VariableInitializerList) + ZEROORONE(',') + '}' + +rule VariableInitializerList: VariableInitializer + ZEROORMORE(',' + VariableInitializer) + +rule Dims : Dim + ZEROORMORE(Dim) + attr.action: BuildDims(%1, %2) + +rule Dim : '[' + ']' + attr.action: BuildDim(%1) + +###################################################################### +# statement # +###################################################################### + +rule Statement : ONEOF(LocalVariableDeclarationStatement, + ExpressionStatement, + ReturnStatement) + attr.property: Single + +rule ExpressionStatement : StatementExpression + ';' + +rule StatementExpression : ONEOF( + PreIncrementExpression, + PreDecrementExpression, + PostIncrementExpression, + PostDecrementExpression, + ) + attr.property: Single + +rule ReturnStatement : "return" + ZEROORONE(Expression) + ';' + attr.action : BuildReturn(%2) + + +###################################################################### +# Function # +###################################################################### + +rule GlobalFuncDeclaration : FuncDeclaration + attr.property : Top + +rule FuncDeclaration : ZEROORMORE(FuncModifier) + FuncHeader + FuncBody + attr.action: AddModifierTo(%2, %1) + attr.action: AddFunctionBodyTo(%2, %3) + +rule FuncBody : ONEOF(Block, ';') + attr.property : Single + +rule FuncHeader : ONEOF(Result + FuncDeclarator) + attr.action.%1: AddType(%2, %1) + attr.property : Single + +rule Result : ONEOF(Type, "void") + attr.property : Single + +rule FuncDeclarator : Identifier + '(' + ZEROORONE(FormalParameters) + ')' + attr.action: BuildFunction(%1) + attr.action: AddParams(%3) + +rule FuncAttr : ONEOF("const", "static") + attr.property : Single + +rule FuncModifier : ONEOF(FuncAttr) + attr.property : Single + +rule FormalParameters : ONEOF(FormalParameter + ZEROORMORE(',' + FormalParameter)) + attr.property : Single + +rule FormalParameter : ZEROORMORE(VariableModifier) + Type + VariableDeclaratorId + attr.action: BuildDecl(%2, %3) + attr.action: AddModifier(%1) + +###################################################################### +# Block # +###################################################################### + +rule BlockStatement : ONEOF(LocalVariableDeclarationStatement, Statement) + attr.property : Single + +rule BlockStatements : BlockStatement + ZEROORMORE(BlockStatement) + +rule Block : '{' + ZEROORONE(BlockStatements) + '}' + attr.action: BuildBlock(%2) diff --git a/src/MapleFE/c/type.spec b/src/MapleFE/c/type.spec new file mode 100644 index 0000000000000000000000000000000000000000..af2d02bbb43ecb326acb4a3722ef4c981d76285a --- /dev/null +++ b/src/MapleFE/c/type.spec @@ -0,0 +1,37 @@ +# +# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +STRUCT Keyword : (("char", Char), + ("short", Short), + ("int", Int), + ("long", Long), + ("float", Float), + ("double", Double), + ("void", Void), + ("_Bool", Boolean)) + +rule BooleanType : "_Bool" +rule IntType : ONEOF("char", "unsigned" + "char", "signed" + "char", + "short", "unsigned" + "short", "signed" + "short", + "int", "unsigned" + "int", "signed" + "int", "unsigned", "signed", + "long", "unsigned" + "long", "signed" + "long") +rule FPType : ONEOF("float", "double") +rule NumericType : ONEOF(IntType, FPType) + +rule PrimitiveType : ONEOF(NumericType, BooleanType) +rule TypeVariable : Identifier + +rule NonePointerType : ONEOF(PrimitiveType, TypeVariable) +rule PointerType : NonePointerType + '*' + ZEROORMORE('*') +rule Type : ONEOF(NonePointerType, PointerType) \ No newline at end of file diff --git a/src/MapleFE/docs/astdump.md b/src/MapleFE/docs/astdump.md new file mode 100644 index 0000000000000000000000000000000000000000..7f68eeff72608372e0ba4d81e8a7296497013c37 --- /dev/null +++ b/src/MapleFE/docs/astdump.md @@ -0,0 +1,45 @@ +## Overview + +The `astdump.sh` tool is a bash script which executes the `ts2cpp` binary executable with option +`--trace-a2c` to dump the AST and CFG graphs of a TypeScript program. + +It also dump the TypeScript code converted from the corresponding AST with AST emitter. + +## Usage to astdump.sh + +```bash +Usage: astdump.sh [-dot] [-f|--fullscreen] [-p |--pre ] [-a|--ast] [-c|--cfg] [-A|--all] [-C|--clean] [ ...] + + -d | --dot Use Graphviz dot to generate the graph and view it with viewnior + -f | --fullscreen View the generated graph in fullscreen mode. It implies option -dot + -p | --pre Filter graphs with the specified , e.g. -p "CFG_" + -a | --ast Show AST graph. It is equivalent to options "-dot -p AST" + -c | --cfg Show CFG graph. It is equivalent to options "-dot -p CFG" + -s | --syntax Syntax highlighting the generated TypeScript code + -A | --all Process all .ts files in current directory excluding *.ts-[0-9]*.out.ts + -C | --clean Clean up generated files (*.ts-[0-9]*.out.ts) + [ ...] Specify one or more TypeScript files to be processed +``` +## Example with binary-search.ts + +You can execute the following commands to get the CFG graph of the test case `binary-search.ts`, and the +TypeScript code converted from the corresponding AST. + +### 1. Command lines +```bash +cd MapleFE/test/typescript/unit_tests +../../astdump.sh --cfg --syntax binary-search.ts +``` + +### 2. CFG graph of function "binarySearch" + +This is the CFG graph of function `binarySearch`. + + + +### 3. TypeScript code converted from the corresponding AST + +This is the TypeScript code generated from AST. + + + diff --git a/src/MapleFE/docs/astvisitor.md b/src/MapleFE/docs/astvisitor.md new file mode 100644 index 0000000000000000000000000000000000000000..6a579350701c0463ca903e31834acd571ad792f6 --- /dev/null +++ b/src/MapleFE/docs/astvisitor.md @@ -0,0 +1,86 @@ +## Overview of AstVisitor + +The `AstVisitor` class is to let you be able to traverse an AST in a depth-first manner and call a visitor +function for each node in the AST. Each visitor function for a tree node can be overridden by a customized +visitor in a derived class of `AstVisitor`. + +A customized visitor enable you to gather any interesting information of a tree node and replace this tree +node with another which the customized visitor returns. + +## Creat your own visitors in a derived class of AstVisitor + +First of all, you need to derive a class from `AstVisitor` and implement your own visitors for any nodes to +be processed. + +Here is an example. The class `MyVisitor` is derived from `AstVisitor` and has two customized visitors +implemented for `CondBranchNode`. + + +```c +class MyVisitor : public AstVisitor { + ... ... + CondBranchNode *VisitCondBranchNode(CondBranchNode *node); +} +``` + +```c +CondBranchNode *MyVisitor::VisitCondBranchNode(CondBranchNode *node) { + // Do something as needed + ... + // Call AstVisitor::VisitCondBranchNode to traverse subtrees of this node if needed + AstVisitor::VisitCondBranchNode(node); + return node; +} +``` + +## Replace a tree node with another one + +You may replace a tree node with another one with your customized visitor. + +```c +CondBranchNode *MyVisitor::VisitCondBranchNode(CondBranchNode *node) { + // Do something as needed + ... + // Create a new CondBranchNode node to replace the current one + CondBranchNode *new_node = ...; + return new_node; +} +``` + +The customized visitor `MyVisitor::VisitCondBranchNode()` visits a node which is the root of a subtree, +and replace this node with `new_node` which is the root of another subtree. This provides a way to +transform a subtree into a new subtree when needed. + +## How does the replacement work? + +The `AstVisitor` class is declared and defined in `MapleFE/output/typescript/ast_gen/shared/gen_astvisitor.{h,cpp}`. + +```c +BlockNode *AstVisitor::VisitBlockNode(BlockNode *node) { + if (node != nullptr) { + if (mTrace) { + std::cout << "Visiting BlockNode, id=" << node->GetNodeId() << "..." << std::endl; + } + + for (unsigned i = 0; i < node->GetChildrenNum(); ++i) { + if (auto t = node->GetChildAtIndex(i)) { + auto n = VisitTreeNode(t); + if (n != t) { // If the returned node 'n' is not the same as 't' + node->SetChildAtIndex(i, n); // the current child node `t` of BlockNode is replaced with 'n' + } + } + } + + if (auto t = node->GetSync()) { + auto n = VisitTreeNode(t); + if (n != t) { // Similar as mentioned above + node->SetSync(n); + } + } + } + return node; +} +``` + +if a `CondBranchNode` node is a child of a `BlockNode` node, and `MyVisitor::VisitCondBranchNode()` returns +a new node, this child node of the `BlockNode` node will be replaced with the one returned. diff --git a/src/MapleFE/docs/builtin-constructors.md b/src/MapleFE/docs/builtin-constructors.md new file mode 100644 index 0000000000000000000000000000000000000000..aa844737710707a03f7a67d1b7751fb919b9b24c --- /dev/null +++ b/src/MapleFE/docs/builtin-constructors.md @@ -0,0 +1,123 @@ + +## JavaScript built-in objects + +JavaScript built-in object info is available at: + +### 1. ECMA-262 standard +https://262.ecma-international.org/12.0/#sec-ecmascript-standard-built-in-objects +``` +Under sections: + 19 The Global Object + 20 Fundamental Objects + 21 Numbers and Dates + 22 Text Processing + 23 Indexed Collections + 24 Keyed Collections + 25 Structured Data +``` + +### 2. Mozilla Developer docs +https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference under Built-in objects + +## JavaScript built-in object constructors + +Not all built-in objects work as object constructors. The following is a list of +JavaScript built-in objects that works as object constructors to create objects +of corresponding built-in type: + +### 1. List of JavaScript built-in object constructors +``` + 1 AggregateError + 2 Array + 3 ArrayBuffer + 4 AsyncFunction + 5 BigInt64Array + 6 BigUint64Array + 7 Boolean + 8 DataView + 9 Date + 10 Error + 11 EvalError + 12 FinalizationRegistry + 13 Float32Array + 14 Float64Array + 15 Function + 16 Generator + 17 GeneratorFunction + 18 Int16Array + 19 Int32Array + 20 Int8Array + 21 InternalError (Mozilla only) + 22 Map + 23 Number + 24 Object + 25 Promise + 26 Proxy + 27 RangeError + 28 ReferenceError + 29 RegExp + 30 Set + 31 SharedArrayBuffer + 32 String + 33 Symbol + 34 SyntaxError + 35 TypeError + 36 Uint16Array + 37 Uint32Array + 38 Uint8Array + 39 Uint8ClampedArray + 40 URIError + 41 WeakMap + 42 WeakRef + 43 WeakSet +``` + +### 2. JavaScript builtin String/Number/Boolean object constructor and string/number/boolean primitive +https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String#string_primitives_and_string_objects + +"Note that JavaScript distinguishes between String objects and primitive string values. (The same is true of Boolean and Numbers.) + +String literals (denoted by double or single quotes) and strings returned from String calls in a non-constructor context (that is, called without using the new keyword) are primitive strings. JavaScript automatically converts primitives to String objects, so that it's possible to use String object methods for primitive strings. In contexts where a method is to be invoked on a primitive string or a property lookup occurs, JavaScript will automatically wrap the string primitive and call the method or perform the property lookup." +``` + 1 var s1 : string = "test"; // string literal + 2 var s2 : String = "test"; // string literal + 3 var s3 : string = String("test"); // string literal + 4 var s4 : String = String("test"); // string literal + 5 var s5 : String = new String("test"); // String object + 6 console.log(typeof(s1)); // string + 7 console.log(s1.slice(1,2)); // string literal s1 wrapped/converted to String object for call + 8 console.log(typeof(s2)); // string + 9 console.log(typeof(s3)); // string + 10 console.log(typeof(s4)); // string + 11 console.log(typeof(s5)); // object +``` + +For TypeScript to C++ mapping, string primitive maps to std::string, and String objects maples to builtin String object t2crt::String (same for Booelan and Numbers). + +The type returned by JavaScript/TypeScript String/Number/Boolean builtin/constructor function depends on the usage: +- when used as a function, it is a type converter (convert between literal typeis), returns primitve/literal type string/number/boolean +- when used with new op(), it is a constructor and returns an object +- A variable declared as primitve type string/number/boolean will be wrapped/converted to a String/Number/Boolean object if any object property/method is referenced + For TypeScript to C++, this conversion can be done by the runtime, but there is opportunity for optimization if it can be determined at compile time whether a primitive will be used as an object, in which case the primitve literal can be generated as object intead. + + +## TypeScript types + +Additionally these TypeScript types will be treated as built-in object types too: +- Record +- Tuple +- Iterable +- Iterator + +### 1. Record and Tuple types +Record and Tuple currently are TypeScript only types. They are +not ECMA-262 standard yet, but has been proposed and undergoing standardization. + +- https://tc39.es/proposal-record-tuple +- https://www.typescriptlang.org/docs/handbook/2/objects.html#tuple-types +- https://www.typescriptlang.org/docs/handbook/utility-types.html#recordkeys-type + +### 2. Iterable and Iterator types +These are TypeScript types +- https://www.typescriptlang.org/docs/handbook/release-notes/typescript-3-6.html#stricter-generators +- https://www.typescriptlang.org/docs/handbook/iterators-and-generators.html#iterable-interface diff --git a/src/MapleFE/docs/maplefe-autogen.md b/src/MapleFE/docs/maplefe-autogen.md new file mode 100644 index 0000000000000000000000000000000000000000..843f2b6a7a7f2f506037b0e0624821a8a97568d7 --- /dev/null +++ b/src/MapleFE/docs/maplefe-autogen.md @@ -0,0 +1,138 @@ +## Overview +The Python script `maplefe-autogen.py` is a tool to generate the code for class `AstVisitor`, `AstDump`, `AstGraph`, +etc. for AST tree. + +Whenever a new kind of AST tree node is introduced for a new language feature in the future, or any changes are made +to the existing AST tree node, these classes will be updated automatically. It reduces the maintenance effort and +ensures the consistency and completeness of these classes. + +## How does it work? + +It makes use of `clang-doc-10` to parse source file `ast_builder.cpp` and get its documentation files in YAML format. +These YAML files are fed into `maplefe-autogen.py` to generate C++ header and source files as described below. + +## What should I do to generate a new C++ class? + +Let's use `AstVisitor` as an example to explain it. + +### 1. Set filename, class name, prefix of function name and extra include directives + +```python +gen_args = [ + "gen_astvisitor", # Filename + "AstVisitor", # Class name + "Visit", # Prefix of function name + "", # Extra include directives + ] +``` +The list `gen_args` contains the filename, class name, prefix of function name and extra include directives for +generating C++ header and source files. You have to use the list name `gen_args` for them. + +### 2. Define the specific content in header file + +```python +astvisitor_init = [ +""" +private: +bool mTrace; + +public: +{gen_args1}(bool t = false) : mTrace(t) {{}} + +TreeNode* {gen_args2}(TreeNode* node) {{ + return {gen_args2}TreeNode(node); +}} +""".format(gen_args1=gen_args[1], gen_args2=gen_args[2]) +] # astvisitor_init +``` +The list `astvisitor_init` contains the specific content in C++ header file. The list name can be customized. + +### 3. Set callback functions for each part of a C++ function for a AST node + +The following callback functions are defined for generating the code for `AstVisitor`. + +```python +gen_call_handle_values = lambda: False +``` +This function returns true or false. +There are two kinds of fields in an AST node. One is a pointer to another AST node, and another is +a value with a non-AST-node type, such as enum, int, etc. +If this function returns false, all non-AST-node values will be ignored. + + +```python +gen_func_declaration = lambda dictionary, node_name: ... +``` +Function `gen_func_declaration` returns a string for the declaration of a function for an AST node `node_name`. The result will be in +the C++ header file. + + +```python +gen_func_definition = lambda dictionary, node_name: ... +``` +Function `gen_func_definition` returns a string for the definition of a function for an AST node `node_name`. The result will be in +the C++ source file. Any code which occurs at the beginning of its function body can be placed here. + + +```python +gen_call_child_node = lambda dictionary, node_name, field_name, node_type, accessor: ... +``` +Function `gen_call_child_node` returns a string with the statements to handle a child AST node `field_name`. The child node has +`node_type` and you can use `accessor` to get its pointer value. + + +```python +gen_call_children_node = lambda dictionary, node_name, field_name, node_type, accessor: '' +``` +Function `gen_call_children_node` returns a string with the statements to handle a SmallVector or SmallList node `field_name` with +`node_type`. The `accessor` can be used to get its pointer value. It returns an empty string for `AstVisitor`. + + +```python +gen_call_children_node_end = lambda dictionary, node_name, field_name, node_type, accessor: '' +``` +Function `gen_call_children_node_end` returns a string with the statements following the for-loop for each value stored in the +SmallVector or SmallList node `field_name`. It returns an empty string for `AstVisitor`. + + +```python +gen_func_definition_end = lambda dictionary, node_name: '}\nreturn node;\n}' +``` +Function `gen_func_definition_end` returns a string with the statements at the end of the function body. + +Since it does not need to handle any non-AST-node values, therefore some functions for values are missing for `AstVisitor`. + +### 4. Generate the code in C++ header and source files + +Here is the code block for generating the C++ code. +```python +handle_src_include_files(Initialization) +append(include_file, astvisitor_init) +handle_yaml(initial_yaml, gen_handler) +handle_src_include_files(Finalization) +``` + +It has to call `handle_src_include_files(Initialization)` and `handle_src_include_files(Finalization)` at the beginning and end of the block as shown above. + +```python +append(include_file, astvisitor_init) +``` +This is to append the code in list `astvisitor_init` to the C++ header file. + +```python +handle_yaml(initial_yaml, gen_handler) +``` +This function call `handle_yaml(initial_yaml, gen_handler)` is to handle all related YAML file, call the `gen_func_*` and `gen_call_*` callback functions defined at step 3, and generate the code in C++ header and source files with them. + +## Where are the generated files located? + +They can be found under directory `MapleFE/output/typescript/ast_gen/shared/`. +```bash +MapleFE/output/typescript/ast_gen/shared/gen_astvisitor.h +MapleFE/output/typescript/ast_gen/shared/gen_astvisitor.cpp +``` + +## Format of the generated C++ header and source files + +The generated C++ header and source files will be formatted with clang-format-10. You do not +have to format the generated code manually. diff --git a/src/MapleFE/docs/readme b/src/MapleFE/docs/readme new file mode 100644 index 0000000000000000000000000000000000000000..9ff4bbefc70014338c1bd442785b82389db6b262 --- /dev/null +++ b/src/MapleFE/docs/readme @@ -0,0 +1,192 @@ +1. SOURCE CODE +============== + +git clone https://gitee.com/openarkcompiler/OpenArkCompiler -b dev_MapleFE + +2. ENVIRONMENT +============== +System software versions used: + Ubuntu: 20.04 + Typescript: 4.5.5 + Nodejs: 14.7.0 + +export MFE=~/OpenArkCompiler/src/MapleFE +export tsbin=$MFE/output/typescript/bin +export rtsrc=$MFE/ast2cpp/runtime/src +export unit=$MFE/test/typescript/unit_tests +export rtsrc=$MFE/ast2cpp/runtime/src +alias runbld='cd $MFE;source envsetup.sh typescript;make -j16;cd -' +alias runtest='rm cxx.log; ../ts2cxx-test.sh +alias goutil='cd $MFE/test/typescript/utils' +alias gofe='cd $MFE' +alias gocpp='cd $MFE/ast2cpp/src' +alias gort='cd $MFE/ast2cpp/runtime/include' +alias gounit='cd $unit' + +- To build everything: runbld +- To run unit tests: cd $unit; runtest +- To run single test: cd $unit; ../ts2cxx-test.sh +- To run TSC compile and nodejs execute of JS only for specific testcase: cd $unit; ../tsc.sh +- CPP emitter source code: $MFE/ast2cpp/src +- MapleFe compiler toolsfor TSC: + o xxx.ts -> [ast2cpp] -> xxx.ts.ast -> [ast2cpp] -> xxx.h xxx.cpp + o $tsbin/ts2ast - Compile .ts into .ts.ast + o $tsbin/ast2cpp - Compiles .ts.ast into .cpp or back into .ts (for verification) + o $MFE/test/astdump.sh - Dump AST tree from .ts.ast. Output can be text or graph. + o run each of above cmds to see parameters and options, e.g. $tsbin/ast2cpp will display: + ast2cpp a.ast[,b.ast] [options]: + --out=x.cpp : cpp output file + --help : print this help + --trace=n : Emit trace with 4-bit combo levels 1...15 + 1 : Emit ast tree visits + 2 : Emit graph + --emit-ts-only : Emit ts code only + --emit-ts : Emit ts code + --format-cpp : Format cpp + ... +- For graphic view of JavaScript object inheritance relations: + o cd $MFE/docs/util; node proto.js | ./viewdot.sh + +3. Run a single case from .ts + 1) ~/OpenArkCompiler/src/MapleFE/output/typescript/bin/ts2ast while-stmt.ts + This creates while-stmt.ts.ast at the same directory + 2) ~/OpenArkCompiler/src/MapleFE/output/typescript/bin/ast2cpp while-stmt.ts.ast + This generates the while-stmt.cpp + 3) g++ -g -o run -I/home/ubuntu/OpenArkCompiler/src/MapleFE/ast2cpp/runtime/include -I/home/ubuntu/OpenArkCompiler/src/MapleFE/astopt/include -std=c++17 while-stmt.cpp /home/ubuntu/OpenArkCompiler/src/MapleFE/ast2cpp/runtime/src/*.cpp + This generates the executable run. + 4) ./run to test the result. + +4. TypeScript/JavaScript inheritance modeling in C++ +==================================================== + +4.1 JavaScript Object Properties + +JavaScript objects have both instance properties and inherited +properties. Instance properties are held in the object's own property +list. Inherited properties are held in an object pointed to by the +instance property __proto__. + +The chain of __proto__ between objects forms a hierarchical prototype +chain used in the lookup for an object's inherited properties. Inherited +properites have only a single copy and is shared by all objects that +inherits the property. + +In Javascript, object property lookup order is: +- first lookup instance property (from property list of current object) +- then lookup inherited property (from property list of object pointed to + by chain of __proto__ starting from __proto__ of current object + +4.2 TypeScript/JavaScript inheritance modeling in C++ + +The inheritance relationship of TS/JS objects is mapped to C++ as +classes derived hierarchically along the __proto__ chain. This allows +accessing inherited properties directly as C++ fields and methods of +parent classess, instead of having to traverse the __proto__ chain. + +To maintain continued support of property lookup by __proto__ chain +traversal, each object has a proplist which is a C++ map of the object's +property name and value pairs. + +Proplist entries for runtime created instance properties holds the +actual property value. Proplist entries for properties generated as +C++ class fields and methods at compile time holds pointers to class +fields and methods. + +This guarantees continued functioning of run time property lookup +using prop list and __proto__ chain traversal. This is necessary not +only because of compatibility but also because TS objects can have +instance properties created at runtime which can only be accessed +through prop list because they cannot be generated as C++ class fields +at compile time. + +To ensure inherited properties have only a single copy, all properties +in prototype objects are generated at compile time as C++ static class +fields and methods and accessed either via direct C++ object field +reference, or via pointers from the prototype object's proplist. + +Note that where __proto__ points to a JavaScript function constructor +instead of a prototype object, there is still only a single copy of +inheited properties, because in JavaScript, there is only 1 single +instance of each function constructor. + +4.3 Property inheritance with __proto__ chain + +See environment section in readme for instruction to view graphic +display of JavaScript object inheritance relationship. The following +graphs will be displayed: + + Graph Id. Graph Name + 1. Class Graph with Constructor Edges + 2. Generator Graph with Constructor Edges + 3. Builtin Graph with Constructor Edges + 4. Closure Graph with Constructor Edges + 5. Iterator Graph with Constructor Edges + 6. Async Graph with Constructor Edges + 7. Class Graph + 8. Generator Graph + 9. Builtin Graph + 10. Closure Graph + 11. Iterator Graph + 12. Async Graph + +The CPP emitter optimises property access with static compilation by: +- Generate C++ class fields and methods for JS properties of an object and +- Generate code to access JS properties directly as C++ object fields/method +- Disallow runtime modification of __proto__ property. +- Disallow runtime modification of inheritable properties in prototype objects. + +All JavaScript objects have a __proto__ property. Depending on what the +kind of JS object, its __proto__ may point to different kinds of JS objects: +note: (graph x: node x) cross refs example objects by id of graph above and node within graph. + +JS object What the object's __proto__ is set to +========= ===================================== +1) Object instances (What .__proto__ is set to) + a) Created by user defined or + builtin JS ctor functions, Prototype of the object's ctor function + e.g. myCar, car (graph 1: node 7, 14) (graph 1: node 8, 9) + including builtin objects: + - JSON (graph 3: node 7) (graph 3: node 1) + - Math (graph 3: node 6) + b) Special object instances: + - user defined generators The builtin "Generator" (renamed GeneratorFunction.prototype in 2022 spec) + (graph 2: node 4) (graph 2: node 8) + - generator instances returned by Prototype of user defined generator that returned the instance + user defined generators (graph 2: node 5) + (graph 2: node 10) + +2) Prototype object of ctor function which is: (What .prototype.__proto__ is set to) + a) ctor of a JS class Prototype of ctor of class it extends + (graph 1: node 8) (graph 1: node 9) + b) ctor of top level func or class Prototype of the builtin "Object" (Object.prototype) + (graph 1: node 10) (graph 1: node 1) + includes builtin functions + - Symbol (graph 3: node 5) (graph 3: node 1) + - Promise (graph 3: node 9) (graph 3: node 1) + +3) Special Prototype objects + a) prototype of user defined generators The builtin "GeneratorPrototype" + (graph 2: node 5) (graph 2: node 6) + b) the builtin "Generator" Prototype of the builtin "Function" (Function.prototype) + (graph 2: node 8) (graph 2: node 2) + (GeneratorFunction.prototype in 2022 spec) + c) the builtin "GeneratorPrototype" The builtin "IteratorPrototype" + (graph 2: node 6) (graph 2: node 7) + (renamed GeneratorFunction.prototype.prototype in 2022 spec) + d) the builtin "IteratorPrototype" Prototype of the builtin "Object" (Object.prototype) + (graph 2: node 7) (graph 2: node 1) + note: A prototype object is usaually the prototype object of its constructor + but IteraorPrototype is an exception. Its constructor is "Object" but + it is not the prototype object of "Object", or any other ctor function. + It's an independent prototype object that does not belong to any constructor + +4) Ctor function (What .__proto__ is set to) + a) ctor for a JS class Ctor of class it extends (i.e parent class) + (graph 1: node 13) (graph 1: node 12) + b) top level func or class Prototype of the builtin "Function" (Function.prototype) + (graph 1: node 11) (graph 1: node 2) + includes builtin functions + - Symbol (graph 3: node 4) (graph 3: node 2) + - Promise (graph 3: node 8) + c) the builtin "GeneratorFunction" Ctor of "Function" ("GeneratorFunction extends Function" - so use rule a) + (graph 2: node 9) (graph 2: node 3) diff --git a/src/MapleFE/docs/utils/README b/src/MapleFE/docs/utils/README new file mode 100644 index 0000000000000000000000000000000000000000..1364eab75456cecfec2e7c9c6fa34a87720e5a50 --- /dev/null +++ b/src/MapleFE/docs/utils/README @@ -0,0 +1,13 @@ +Contents in this directory: + +o proto.js + - Sample Javascript program to demonstrate object inheritance with constructors, prototype chain and prototype properties. + - Run with following commands: + node proto.js | ./viewdot.sh +o proto.cpp + - Sample C++ program based on proto.js to demonstrate how JavaSCript object inheritance can be modeled in C++. + - Run with following commands: + g++ ../../ast2cpp/runtime/src/builtins.cpp ../../ast2cpp/runtime/src/ts2cpp.cpp proto.cpp; + ./a.out | ./viewdot.sh + +note: requires dot and viewnior for graph generation and display. diff --git a/src/MapleFE/docs/utils/proto-ctor.png b/src/MapleFE/docs/utils/proto-ctor.png new file mode 100644 index 0000000000000000000000000000000000000000..d6fa7cff21607b69e556bb167f278d20c3d31c9b Binary files /dev/null and b/src/MapleFE/docs/utils/proto-ctor.png differ diff --git a/src/MapleFE/docs/utils/proto.cpp b/src/MapleFE/docs/utils/proto.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e57e11232bc7eae40cc3b99903edfd781acbde1b --- /dev/null +++ b/src/MapleFE/docs/utils/proto.cpp @@ -0,0 +1,112 @@ +#include "../../ast2cpp/runtime/include/ts2cpp.h" + +using namespace t2crt; + +class Ctor_Vehicle; +class Ctor_Car; +class Ctor_MyCar; + +class Vehicle; +class Car; +class MyCar; + +extern Ctor_Vehicle Vehicle_ctor; +extern Ctor_Car Car_ctor; +extern Ctor_MyCar MyCar_ctor; + +class Vehicle : public Object { + public: + Vehicle(Function* ctor, Object* proto): Object(ctor, proto) {} + // Vehicle.prototype props (use static) + + public: + // Vehicle instance props + std::string name; +}; + +class Car : public Vehicle { + public: + Car(Function* ctor, Object* proto): Vehicle(ctor, proto) {} + // Car.prototype props (use static) + + public: + // Car instance props +}; + +// C++ Class def for instance props and prototype props +class MyCar : public Car { + public: + MyCar(Function* ctor, Object* proto): Car(ctor, proto) {} + // MyCar.prototype props (use static) + + public: + // MyCar instance props +}; + +// Class def for function constructors + +class Ctor_Vehicle : public Function { + public: + Ctor_Vehicle(Function* ctor, Object* proto, Object* prototype) : Function(ctor, proto, prototype) {} + + Vehicle* _new() { + return new Vehicle(this, this->prototype); + } + + void operator()(Vehicle* obj , std::string name) { + // add instance props to instance prop list + ClassFld field(&Vehicle::name); + obj->AddProp("name", field.NewProp(TY_String)); + // init instance props + obj->name = name; + } +}; + +class Ctor_Car : public Ctor_Vehicle { + public: + Ctor_Car(Function* ctor, Object* proto, Object* prototype) : Ctor_Vehicle(ctor, proto, prototype) {} + + Car* _new() { + return new Car(this, this->prototype); + } + + void operator()(Car* obj , std::string name) { + Vehicle_ctor(obj, name); + } +}; + +class Ctor_MyCar : public Ctor_Car { + public: + Ctor_MyCar(Function* ctor, Object* proto, Object* prototype) : Ctor_Car(ctor, proto, prototype) {} + + MyCar* _new() { + return new MyCar(this, this->prototype); + } + + void operator()(MyCar* obj , std::string name) { + Car_ctor(obj, name); + } +}; + +// Function constructors +Ctor_MyCar MyCar_ctor (&Function_ctor, &Car_ctor, Car_ctor.prototype); +Ctor_Car Car_ctor (&Function_ctor, &Vehicle_ctor, Vehicle_ctor.prototype); +Ctor_Vehicle Vehicle_ctor (&Function_ctor, Function_ctor.prototype, Object_ctor.prototype); + +// Object instances +Car* car = Car_ctor._new(); +MyCar* mycar= MyCar_ctor._new(); +Array* arr = Array_ctor._new(); +Object* obj = Object_ctor._new(); + +int main(int argc, char* argv[]) { + // InitAllProtoTypeProps(); + + Car_ctor(car, "Tesla"); + MyCar_ctor(mycar, "Tesla Model X"); + std::vector objs = {obj, arr, &Array_ctor, &Vehicle_ctor, &Car_ctor, &MyCar_ctor, car, mycar, &Function_ctor, &Object_ctor}; + std::vector names = {"obj", "arr", "Array", "Vehicle", "Car", "MyCar", "car", "mycar", "Function", "Object"}; + GenerateDOTGraph(objs, names); + return 0; +} + diff --git a/src/MapleFE/docs/utils/proto.js b/src/MapleFE/docs/utils/proto.js new file mode 100644 index 0000000000000000000000000000000000000000..6115e6d550e9b98a8657ae0ac7a7482e2d9ebcec --- /dev/null +++ b/src/MapleFE/docs/utils/proto.js @@ -0,0 +1,98 @@ +// Command line to get graphs: nodejs proto.js | ./viewdot.sh +class Vehicle { + constructor (name) { this.name = name; } +} +class Car extends Vehicle { + constructor (name) { super(name); } +} +class MyCar extends Car { + constructor (name) { super(name); } +} + +let car = new Car("A car"); +let myCar = new MyCar("My car"); +let arr = [1, 2, 3] +let regexpr = /ab+c/i + +function* generator() { yield 1; } +const gpt = generator.prototype.__proto__; + +function makeClosure(a) { + return function (b) { + return a + b; + } +} +const closure = makeClosure(1); + +let myMap = new Map(); +let myMapIterator = myMap[Symbol.iterator](); +let MapIteratorPrototype = Object.getPrototypeOf(new Map()[Symbol.iterator]()); + +async function asyncFunction() {} +async function* asyncGenerator() {} +const agpt = asyncGenerator.prototype.__proto__; + +// All data for generating graphs +let graphData = { + Class : ["Array", "arr", "myCar", "car"], + Generator: ["generator", [generator(), "generator_instance"], [gpt, "GeneratorPrototype"], + [gpt.__proto__, "IteratorPrototype"], [generator.__proto__, "Generator"]], + Builtin : ["Symbol", "Math", "JSON", "Promise", "RegExp", "regexpr"], + Closure : ["makeClosure", "closure"], + Iterator : ["myMap", "myMapIterator", "MapIteratorPrototype", [gpt.__proto__, "IteratorPrototype"]], + Async : ["asyncFunction", "asyncGenerator", [asyncGenerator(), "asyncGenerator_instance"], [agpt, "AsyncGeneratorPrototype"], + [agpt.__proto__, "AsyncIteratorPrototype"], [asyncGenerator.__proto__, "AsyncGenerator"]], +}; + +generateGraph(graphData); + +function generateGraph(data) { + // Gather all reachable objects from their prototype, __proto__ and constructor properties + function insert(g, depth, ...args) { + for (let arg of args) { + let [o, name] = typeof arg === "string" ? [eval(arg), arg] : arg; + if (typeof o !== "undefined" && o !== null) + if (!g.has(o)) { + g.set(o, name !== null || typeof o !== "function" ? name : o.toString().split(" ")[1].replace(/[^a-zA-Z0-9+]/g, "")); + insert(g, depth + 1, [o.prototype, g.get(o) === null ? null : g.get(o) + "Prototype"], [o.__proto__, null], [o.constructor, null]); + } else if (name !== null) + g.set(o, name); + } + if (depth === 0) { + let visited = new Set(); + for (let [index, [key, val]] of Array.from(g).entries()) { + val = val === null || val === "" ? "Object_" + index : val.replace(/[^A-Za-z0-9]+/g, "_"); + if (visited.has(val)) val += "__" + index; + visited.add(val); + g.set(key, val); + } + } + } + + // Dump graphs with edges for prototype, __proto__ and constructor properties of each object + let nodejs = (typeof process !== 'undefined') && (process.release.name === 'node') + for (let prop in data) { + let graph = new Map(); + insert(graph, 0, "Function", "Object", ...data[prop]); + for (let ctor of ["", "_with_Constructor_Edges"]) { + console.log("digraph JS_" + prop + ctor + " {\nlabel=\"\\n" + prop + " Graph" + ctor.replace(/_/g, " ") + "\\n(Node in gray: function)" + + "\";\nrankdir = TB;\nranksep=0.6;\nnodesep=0.6;\n" + (ctor != "" ? "" : "newrank=true;")); + for (let [index, [key, val]] of Array.from(graph).entries()) { + let func = typeof key === "function"; + // Add comments with detailed information of keys + if (nodejs) + console.log("\n/* key =", key, "\nObject.getOwnPropertyNames(" + val + "):\n", Object.getOwnPropertyNames(key), + "\n" + val + ".toString(): " + (func ? key.toString().replace(/\s+/g, " ") : "-") + "\n*/"); + console.log(val + " [label=\"" + val + " " + (index < 4 ? 3 - index : index) + "\", shape=" + + (val.includes("Prototype") ? "box" : "oval") + (func ? ", style=filled" : "") + "];"); + // Add edges for prototype, constructor and __proto__ properties of objects + for (let [f, c] of [["prototype", "blue"], ["constructor", "darkgreen"], ["__proto__", "red"]]) + if (typeof key[f] !== "undefined" && key[f] !== null && graph.has(key[f]) && (ctor != "" || f !== "constructor")) + console.log((ctor != "" || f !== "prototype" ? "" : "subgraph cluster_" + val + " {\nlabel=\"\";rank=same;color=white;\n" + + val + ";\n" + graph.get(key.prototype) + " [shape=box];\n}\n") + val + " -> " + graph.get(key[f]) + + " [label=\"" + (f === "constructor" ? "ctor" : f) + "\", color=" + c + ", fontcolor=" + c + "];"); + } + console.log("} // digraph JS_" + prop + ctor); + } + } +} // generateGraph diff --git a/src/MapleFE/docs/utils/proto.png b/src/MapleFE/docs/utils/proto.png new file mode 100644 index 0000000000000000000000000000000000000000..b938c09f413a08ffcc3f90c994bf171a8320244b Binary files /dev/null and b/src/MapleFE/docs/utils/proto.png differ diff --git a/src/MapleFE/docs/utils/proto2.ts b/src/MapleFE/docs/utils/proto2.ts new file mode 100644 index 0000000000000000000000000000000000000000..d7d0c406520b346a16a93236e401f4debff57ace --- /dev/null +++ b/src/MapleFE/docs/utils/proto2.ts @@ -0,0 +1,54 @@ +interface IPrototype { prototype: any; } +type TPrototype = IPrototype & Function; +interface IProto { __proto__: any; } +type TProto = IProto & Object; + +// Command line to get graphs: tsc -t es6 proto2.ts; nodejs proto2.js | ./viewdot.sh +class Vehicle { +} +class Car implements Vehicle { + name: string; + constructor (name) { this.name = name; } +} +class MyCar extends Car { + constructor (name) { super(name); } +} + +let car = new Car("A car"); +let mycar = new MyCar("My car"); +let arr = [] + +// Dump graphs with edges for prototype, __proto__ and constructor properties of each objects +for(let g = 0; g < 2; ++g) { + let objs = [ arr, Array, Vehicle, Car, MyCar, car, mycar, Function, Object, ]; + let names = [ "arr", "Array", "Vehicle", "Car", "MyCar", "car", "mycar", "Function", "Object", ]; + console.log("digraph JS" + g + " {\nranksep=0.6;\nnodesep=0.6;\n" + (g == 0 ? "newrank=true;\n" : "")); + let num = objs.length; + let k = num; + // Add prototype objects and edges for them + for(let i = 0; i < num; ++i) { + let x = typeof (objs[i] as unknown as TPrototype).prototype; + if(x === "function" || x === "object") { + objs[k] = (objs[i] as unknown as TPrototype).prototype; + names[k] = names[i] + "_prototype"; + console.log(g == 0 ? "subgraph cluster_" + names[i] + " {\nrank=same;\ncolor=white;\n" + + names[i] + ";\n" + names[k] + "[label=\"" + names[i] + ".prototype\", shape=box];\n }" + : names[k] + "[label=\"" + names[i] + ".prototype\", shape=box];"); + console.log(names[i] + " -> " + names[k] + " [label=\"prototype\", color=blue, fontcolor=blue];"); + k++; + } + } + // Add edges for __proto__ and constructor properties of each objects + num = objs.length; + for(let i = 0; i < num; ++i) { + for(let j = 0; j < num; ++j) { + // Edges for constructor properties in the second graph only + if(g > 0 && objs[i].constructor === objs[j]) + console.log(names[i] + " -> " + names[j] + " [label=\"ctor\", color=darkgreen, fontcolor=darkgreen];"); + // Edges for __proto__ properties + if((objs[i] as unknown as TProto).__proto__ === objs[j]) + console.log(names[i] + " -> " + names[j] + " [label=\"__proto__\", color=red, fontcolor=red];"); + } + } + console.log("}"); +} diff --git a/src/MapleFE/docs/utils/viewdot.sh b/src/MapleFE/docs/utils/viewdot.sh new file mode 100755 index 0000000000000000000000000000000000000000..f9252d7c0ae07e7fb98cfdfa058bcfa6bcd6a583 --- /dev/null +++ b/src/MapleFE/docs/utils/viewdot.sh @@ -0,0 +1,23 @@ +#!/bin/bash +if [ $# -lt 1 ]; then + out=$(cat) + tmpdir=$(mktemp -dt viewdot-XXXXXX) + trap "rm -rf $tmpdir" SIGINT SIGQUIT SIGKILL + grep -n -e "digraph [^{]* {" -e "^} // digraph JS" <<< "$out" | grep -A1 "digraph [^{]* {" | + grep -v ^-- | sed 'N;s/\n/ /' | sed -e 's/:.*digraph [^{]* { */,/' -e 's/:.*/p/g' | + { while read cmd; do + name=$(sed -n $cmd <<< "$out" | head -1 | sed 's/.*digraph \([^{]*\) {.*/\1/') + echo $$-$name + sed -n $cmd <<< "$out" > "$tmpdir"/$$-$name.dot + dot -Tpng -o "$tmpdir"/$$-$name.png "$tmpdir"/$$-$name.dot + env LC_ALL=C viewnior "$tmpdir"/$$-$name.png & + done + wait + rm -rf "$tmpdir"; } + exit +fi +for f; do + dot -Tpng -o $f.png $f + viewnior $f.png + rm -f $f.png +done diff --git a/src/MapleFE/envsetup.sh b/src/MapleFE/envsetup.sh index 1895cbccb164fd281ea3a071a2fc425bec9b3281..bc75a54707313c7edd50cad8b64557d54b67b914 100755 --- a/src/MapleFE/envsetup.sh +++ b/src/MapleFE/envsetup.sh @@ -12,26 +12,35 @@ # FIT FOR A PARTICULAR PURPOSE. # See the Mulan PSL v2 for more details. # +if [ "${BASH_SOURCE[0]}" -ef "$0" ]; then + echo "This script should be sourced in a bash shell, not executed directly" + exit 1 +fi function print_usage { echo " " - echo "usage: source envsetup.sh" + echo "usage: source envsetup.sh java/typescript/c" echo " " } -if [ "$#" -ne 0 ]; then +if [ "$#" -gt 1 ]; then echo $# print_usage return fi -pdir=$(cd ..; pwd) -unset MAPLE_ROOT -export MAPLE_ROOT=${pdir} +LANGSRC=java +if [ "$#" -eq 1 ]; then + if [ $1 = "typescript" ]; then + LANGSRC=typescript + elif [ $1 = "c" ]; then + LANGSRC=c + fi +fi +export SRCLANG=$LANGSRC -curdir=$(pwd) -unset MAPLEFE_ROOT -export MAPLEFE_ROOT=${curdir} +export MAPLEFE_ROOT=$(cd $(dirname ${BASH_SOURCE[0]}); pwd) +export MAPLE_ROOT=$(cd ${MAPLEFE_ROOT}/../../..; pwd) unset MAPLEALL_ROOT export MAPLEALL_ROOT=${MAPLE_ROOT}/OpenArkCompiler @@ -40,4 +49,4 @@ unset MAPLEALL_SRC export MAPLEALL_SRC=${MAPLEALL_ROOT}/src/mapleall unset BUILDDIR -export BUILDDIR=${MAPLEFE_ROOT}/output +export BUILDDIR=${MAPLEFE_ROOT}/output/${SRCLANG} diff --git a/src/MapleFE/java/attr.spec b/src/MapleFE/java/attr.spec index daa682e84c9fcd5953172872560e37cdf0922498..3f098f9c9ab505abc952ee48b66ba263220b9529 100644 --- a/src/MapleFE/java/attr.spec +++ b/src/MapleFE/java/attr.spec @@ -31,5 +31,6 @@ STRUCT Attribute : (("abstract", abstract), ("public", public), ("static", static), ("strictfp", strictfp), - ("default", default)) + ("default", default), + ("synchronized", synchronized)) diff --git a/src/MapleFE/java/block.spec b/src/MapleFE/java/block.spec deleted file mode 100644 index 905f252c80befab9080c03548ad4c8b87453940e..0000000000000000000000000000000000000000 --- a/src/MapleFE/java/block.spec +++ /dev/null @@ -1,346 +0,0 @@ -# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. -# -# OpenArkFE is licensed under the Mulan PSL v2. -# You can use this software according to the terms and conditions of the Mulan PSL v2. -# You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR -# FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v2 for more details. -# -################################################################################### -# This file defines the Java Block/Class/Interface statement. -################################################################################### - -rule ClassDeclaration : ONEOF(NormalClassDeclaration, EnumDeclaration) - attr.property : Single, Top -rule NormalClassDeclaration : ZEROORMORE(ClassModifier) + "class" + Identifier + - ZEROORONE(TypeParameters) + ZEROORONE(Superclass) + - ZEROORONE(Superinterfaces) + ClassBody - attr.action : BuildClass(%3) - attr.action : AddModifier(%1) - attr.action : AddClassBody(%7) - attr.action : AddSuperClass(%5) - attr.action : AddSuperInterface(%6) - attr.property.%1,%4,%5,%6 : ZomFast - -rule ClassAttr : ONEOF("public", "protected", "private", "abstract", "static", "final", "strictfp") - attr.property : Single -rule ClassModifier : ONEOF(Annotation, ClassAttr) - attr.property : Single - -# 1. Generic class -# 2. TypeParameter will be defined in type.spec -rule TypeParameters : '<' + TypeParameterList + '>' -rule TypeParameterList : TypeParameter + ZEROORMORE(',' + TypeParameter) -rule TypeParameter : ZEROORMORE(TypeParameterModifier) + Identifier + ZEROORONE(TypeBound) -rule TypeParameterModifier : Annotation -rule TypeBound : ONEOF("extends" + TypeVariable, - "extends" + ClassOrInterfaceType + ZEROORMORE(AdditionalBound)) -rule AdditionalBound : '&' + InterfaceType - -# ClassType and InterfaceType are defined in type.spec -rule Superclass : "extends" + ClassType -rule Superinterfaces : "implements" + InterfaceTypeList -rule InterfaceTypeList : InterfaceType + ZEROORMORE(',' + InterfaceType) - -# class body -rule ClassBody : "{" + ZEROORMORE(ClassBodyDeclaration) + "}" - attr.action: BuildBlock(%2) - attr.property.%2 : ZomFast - -rule ClassBodyDeclaration : ONEOF(ClassMemberDeclaration, - InstanceInitializer, - StaticInitializer, - ConstructorDeclaration) - attr.property : Single - -rule InstanceInitializer : Block - attr.action: BuildInstInit(%1) -rule StaticInitializer : "static" + Block - attr.action: BuildInstInit(%2) - attr.action: AddModifierTo(%2, %1) - -rule ClassMemberDeclaration : ONEOF(FieldDeclaration, - MethodDeclaration, - ClassDeclaration, - InterfaceDeclaration, - ';') - attr.property : Single - -rule FieldDeclaration : ZEROORMORE(FieldModifier) + UnannType + VariableDeclaratorList + ';' - attr.action: BuildDecl(%2, %3) - attr.action: AddModifier(%1) - -rule MethodDeclaration : ZEROORMORE(MethodModifier) + MethodHeader + MethodBody - attr.action: AddModifierTo(%2, %1) - attr.action: AddFunctionBodyTo(%2, %3) - -rule MethodBody : ONEOF(Block, ';') - attr.property : Single -rule MethodHeader : ONEOF(Result + MethodDeclarator + ZEROORONE(Throws), - TypeParameters + ZEROORMORE(Annotation) + Result + MethodDeclarator + - ZEROORONE(Throws)) - attr.action.%1: AddTypeTo(%2, %1) - attr.action.%1: AddThrowsTo(%2, %3) - attr.action.%2: AddTypeTo(%4, %3) - attr.action.%2: AddThrowsTo(%4, %5) - attr.property : Single - -rule Result : ONEOF(UnannType, "void") - attr.property : Single -rule MethodDeclarator : Identifier + '(' + ZEROORONE(FormalParameterList) + ')' + ZEROORONE(Dims) - attr.action: BuildFunction(%1) - attr.action: AddParams(%3) - attr.action: AddDims(%5) - -rule Throws : "throws" + ExceptionTypeList - attr.action: BuildThrows(%2) - -rule ExceptionTypeList : ExceptionType + ZEROORMORE(',' + ExceptionType) -rule ExceptionType : ONEOF(ClassType, TypeVariable) - -rule MethodAttr : ONEOF("public", "protected", "private", "abstract", "static", - "final", "synchronized", "native", "strictfp") - attr.property : Single - -rule MethodModifier : ONEOF(Annotation, MethodAttr) - attr.property : Single - -rule FormalParameterListNoReceiver : ONEOF(FormalParameters + ',' + LastFormalParameter, - LastFormalParameter) - attr.action.%1: BuildVarList(%1, %3) - -# ReceiverParameter and FormalParameterListNoReceiver could match at the same -# but with different num of tokens. Here is an example -# foo(T T.this) -# The NoReceiver could match "T T", while Receiver match "T T.this". -# Although later it figures out NoReceiver is wrong, but at this rule, both rule work. -# If we put NoReceiver as the 1st child and set property 'Single', we will miss -# Receiver which is the correct one. -# -# So I move Receiver to be the 1st child, since NoReceiver is not correct matching -# if Receiver works. -rule FormalParameterList : ONEOF(ReceiverParameter, FormalParameterListNoReceiver) - attr.property : Single - -rule FormalParameters : ONEOF(FormalParameter + ZEROORMORE(',' + FormalParameter), - ReceiverParameter + ZEROORMORE(',' + FormalParameter)) - attr.action.%1: BuildVarList(%1, %2) - attr.property : Single - -rule FormalParameter : ZEROORMORE(VariableModifier) + UnannType + VariableDeclaratorId - attr.action: BuildDecl(%2, %3) - attr.action: AddModifier(%1) -rule ReceiverParameter : ZEROORMORE(Annotation) + UnannType + ZEROORONE(Identifier + '.') + "this" - -rule LastFormalParameter : ONEOF(ZEROORMORE(VariableModifier) + UnannType + ZEROORMORE(Annotation) + - "..." + VariableDeclaratorId, - FormalParameter) - attr.property : Single - - -rule FieldAttr : ONEOF("public", "protected", "private", "static", "final", "transient", "volatile") - attr.property : Single -rule FieldModifier : ONEOF(Annotation, FieldAttr) - attr.property : Single - -################################################################ -# Constructor # -################################################################ -rule ConstructorDeclaration : ZEROORMORE(ConstructorModifier) + ConstructorDeclarator + - ZEROORONE(Throws) + ConstructorBody - attr.action : AddFunctionBodyTo(%2, %4) - -rule ConstructorAttr : ONEOF("public", "protected", "private") - attr.property : Single -rule ConstructorModifier : ONEOF(Annotation, ConstructorAttr) - attr.property : Single -rule ConstructorDeclarator : ZEROORONE(TypeParameters) + SimpleTypeName + '(' + - ZEROORONE(FormalParameterList) + ')' - attr.action : BuildConstructor(%2) -rule SimpleTypeName : Identifier -rule ConstructorBody : '{' + ZEROORONE(ExplicitConstructorInvocation) + - ZEROORONE(BlockStatements) + '}' - attr.action : BuildBlock(%3) - -# Although ExpressionName and Primary are not excluding each other, given the rest -# of the concatenate elements this is a 'Single' rule. -rule ExplicitConstructorInvocation : ONEOF( - ZEROORONE(TypeArguments) + "this" + '(' + ZEROORONE(ArgumentList) + ')' + ';', - ZEROORONE(TypeArguments) + "super" + '(' + ZEROORONE(ArgumentList) + ')' + ';', - ExpressionName + '.' + ZEROORONE(TypeArguments) + "super" + '(' + ZEROORONE(ArgumentList) + ')' + ';', - Primary + '.' + ZEROORONE(TypeArguments) + "super" + '(' + ZEROORONE(ArgumentList) + ')' + ';') - attr.property : Single - -###################################################################### -# Enum # -###################################################################### -rule EnumDeclaration: ZEROORMORE(ClassModifier) + "enum" + Identifier + - ZEROORONE(Superinterfaces) + EnumBody - attr.action : BuildClass(%3) - attr.action : SetClassIsJavaEnum() - attr.action : AddModifier(%1) - attr.action : AddSuperInterface(%4) - attr.action : AddClassBody(%5) - -# This returns a PassNode with some ConstantNode and BlockNode. The -# different between ConstantNode and LiteralNode can be found in their definition. -rule EnumBody: '{' + ZEROORONE(EnumConstantList) + ZEROORONE(',') + - ZEROORONE(EnumBodyDeclarations) + '}' - attr.action: BuildBlock(%2) - attr.action: AddToBlock(%4) - -# This returns a PassNode -rule EnumConstantList: EnumConstant + ZEROORMORE(',' + EnumConstant) - -# AddInitTo() will handle this complicated JavaEnum style initial value of identifier -rule EnumConstant: ZEROORMORE(EnumConstantModifier) + Identifier + - ZEROORONE('(' + ZEROORONE(ArgumentList) + ')') + ZEROORONE(ClassBody) - attr.action : AddInitTo(%2, %3) - attr.action : AddInitTo(%2, %4) - -rule EnumConstantModifier: Annotation - -# This returns a PassNode with a set of BlockNode -rule EnumBodyDeclarations: ';' + ZEROORMORE(ClassBodyDeclaration) - -###################################################################### -# Block # -###################################################################### - -# 1st and 3rd children don't exclude each other. However, if both of them -# match, they will match the same sequence of tokens, being a -# LocalVariableDeclarationStatement. So don't need traverse 3rd any more. -rule BlockStatement : ONEOF(LocalVariableDeclarationStatement, - ClassDeclaration, - Statement) - attr.property : Single - -rule BlockStatements : BlockStatement + ZEROORMORE(BlockStatement) - attr.property.%2 : ZomFast -rule Block : '{' + ZEROORONE(BlockStatements) + '}' - attr.action: BuildBlock(%2) - - -###################################################################### -# Interface # -###################################################################### -rule InterfaceDeclaration : ONEOF(NormalInterfaceDeclaration, AnnotationTypeDeclaration) - attr.property : Single, Top - -rule NormalInterfaceDeclaration : ZEROORMORE(InterfaceModifier) + "interface" + Identifier + - ZEROORONE(TypeParameters) + ZEROORONE(ExtendsInterfaces) + InterfaceBody - attr.action : BuildInterface(%3) - attr.action : AddInterfaceBody(%6) - -rule InterfaceAttr : ONEOF("public", "protected", "private", "abstract", "static", "strictfp") - attr.property : Single -rule InterfaceModifier : ONEOF(Annotation, InterfaceAttr) - attr.property : Single -rule ExtendsInterfaces : "extends" + InterfaceTypeList -rule InterfaceBody : '{' + ZEROORMORE(InterfaceMemberDeclaration) + '}' - attr.action : BuildBlock(%2) - attr.property.%2 : ZomFast - -rule InterfaceMemberDeclaration : ONEOF(ConstantDeclaration, - InterfaceMethodDeclaration, - ClassDeclaration, - InterfaceDeclaration, - ';') - attr.property : Single - -# constant decl is also called field decl. In interface, field must have a variable initializer -# However, the rules below don't tell this limitation. -rule ConstantDeclaration : ZEROORMORE(ConstantModifier) + UnannType + VariableDeclaratorList + ';' -rule ConstantAttr : ONEOF("public", "static", "final") - attr.property : Single -rule ConstantModifier : ONEOF(Annotation, ConstantAttr) - attr.property : Single - -rule InterfaceMethodDeclaration : ZEROORMORE(InterfaceMethodModifier) + MethodHeader + MethodBody - attr.action: AddModifierTo(%2, %1) - attr.action: AddFunctionBodyTo(%2, %3) -rule InterfaceMethodAttr : ONEOF("public", "abstract", "default", "static", "strictfp") - attr.property : Single -rule InterfaceMethodModifier : ONEOF(Annotation, InterfaceMethodAttr) - attr.property : Single - -###################################################################### -# Annotation Type # -###################################################################### -rule AnnotationTypeDeclaration : ZEROORMORE(InterfaceModifier) + '@' + "interface" + - Identifier + AnnotationTypeBody - attr.action : BuildAnnotationType(%4) - attr.action : AddModifier(%1) - attr.action : AddAnnotationTypeBody(%5) - -rule AnnotationTypeBody : '{' + ZEROORMORE(AnnotationTypeMemberDeclaration) + '}' - attr.action : BuildBlock(%2) -rule AnnotationTypeMemberDeclaration : ONEOF(AnnotationTypeElementDeclaration, - ConstantDeclaration, - ClassDeclaration, - InterfaceDeclaration, - ';') - attr.property : Single -rule AnnotationTypeElementDeclaration : ZEROORMORE(AnnotationTypeElementModifier) + UnannType + - Identifier + '(' + ')' + ZEROORONE(Dims) + - ZEROORONE(DefaultValue) + ';' -rule AnnotationTypeElementAttr : ONEOF("public", "abstract") - attr.property : Single -rule AnnotationTypeElementModifier : ONEOF(Annotation, AnnotationTypeElementAttr) - attr.property : Single -rule DefaultValue : "default" + ElementValue - -###################################################################### -# Annotation # -###################################################################### -rule Annotation : ONEOF(NormalAnnotation, - MarkerAnnotation, - SingleElementAnnotation) - -rule NormalAnnotation : '@' + TypeName + '(' + ZEROORONE(ElementValuePairList) + ')' - attr.action : BuildAnnotation(%2) - -rule MarkerAnnotation : '@' + TypeName - attr.action : BuildAnnotation(%2) - -rule SingleElementAnnotation : '@' + TypeName + '(' + ElementValue + ')' - attr.action : BuildAnnotation(%2) - -rule ElementValuePairList : ElementValuePair + ZEROORMORE(',' + ElementValuePair) -rule ElementValuePair : Identifier + '=' + ElementValue -rule ElementValue : ONEOF(ConditionalExpression, - ElementValueArrayInitializer, - Annotation) -rule ElementValueArrayInitializer : '{' + ZEROORONE(ElementValueList) + ZEROORONE(',') + '}' -rule ElementValueList : ElementValue + ZEROORMORE(',' + ElementValue) - -###################################################################### -# Package # -###################################################################### -rule PackageModifier: Annotation -rule PackageDeclaration: ZEROORMORE(PackageModifier) + "package" + Identifier + ZEROORMORE('.' + Identifier) + ';' - attr.action : BuildField(%3, %4) - attr.action : BuildPackageName() - attr.property : Top - attr.property.%1,%4 : ZomFast - -rule ImportDeclaration: ONEOF(SingleTypeImportDeclaration, - TypeImportOnDemandDeclaration, - SingleStaticImportDeclaration, - StaticImportOnDemandDeclaration) - attr.property : Top - -rule SingleTypeImportDeclaration: "import" + TypeName + ';' - attr.action : BuildSingleTypeImport(%2) -rule TypeImportOnDemandDeclaration: "import" + PackageOrTypeName + '.' + '*' + ';' - attr.action : BuildAllTypeImport(%2) -rule SingleStaticImportDeclaration: "import" + "static" + TypeName + '.' + Identifier + ';' - attr.action : BuildField(%3, %4) - attr.action : BuildSingleStaticImport() -rule StaticImportOnDemandDeclaration: "import" + "static" + TypeName + '.' + '*' + ';' - attr.action : BuildAllStaticImport(%3) diff --git a/src/MapleFE/java/expr.spec b/src/MapleFE/java/expr.spec deleted file mode 100644 index 6ac6ad72eb76dcb9f28715a0ff5125c6ab09b6a2..0000000000000000000000000000000000000000 --- a/src/MapleFE/java/expr.spec +++ /dev/null @@ -1,293 +0,0 @@ -# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. -# -# OpenArkFE is licensed under the Mulan PSL v2. -# You can use this software according to the terms and conditions of the Mulan PSL v2. -# You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR -# FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v2 for more details. -# - - -rule PackageName : ONEOF(Identifier, PackageName + '.' + Identifier) - attr.action.%2 : BuildField(%1, %3) - -rule TypeName : ONEOF(Identifier, PackageOrTypeName + '.' + Identifier) - attr.action.%2 : BuildField(%1, %3) - -rule PackageOrTypeName : ONEOF(Identifier, PackageOrTypeName + '.' + Identifier) - attr.action.%2 : BuildField(%1, %3) - -rule ExpressionName : ONEOF(Identifier, AmbiguousName + '.' + Identifier) - attr.action.%2 : BuildField(%1, %3) - -rule MethodName : Identifier - -rule AmbiguousName : ONEOF(Identifier, AmbiguousName + '.' + Identifier) - attr.action.%2 : BuildField(%1, %3) - -rule Class : "class" -rule ClassLiteral : ONEOF(TypeName + ZEROORMORE('[' + ']') + '.' + Class, - NumericType + ZEROORMORE('[' + ']') + '.' + Class, - "boolean" + ZEROORMORE('[' + ']') + '.' + Class, - "void" + '.' + Class) - attr.property : Single - -rule PrimaryNoNewArray_single : ONEOF( - Literal, - ClassLiteral, - "this", - TypeName + '.' + "this", - '(' + Expression + ')', - ClassInstanceCreationExpression) - attr.action.%4 : BuildField(%1, %3) - attr.action.%5 : BuildParenthesis(%2) - attr.property : Single - -rule PrimaryNoNewArray : ONEOF( - PrimaryNoNewArray_single, - FieldAccess, - ArrayAccess, - MethodInvocation, - MethodReference) - -# There was a child rule. -# ExpressionName + '.' + UnqualifiedClassInstanceCreationExpression, -# But Primary contains ExpressionName. It's a duplication, so I removed it. -rule ClassInstanceCreationExpression : ONEOF( - UnqualifiedClassInstanceCreationExpression, - Primary + '.' + UnqualifiedClassInstanceCreationExpression) - -rule UnqualifiedClassInstanceCreationExpression : - "new" + ZEROORONE(TypeArguments) + ClassOrInterfaceTypeToInstantiate + - '(' + ZEROORONE(ArgumentList) + ')' + ZEROORONE(ClassBody) - attr.action : BuildNewOperation(%3, %5, %7) - -rule ClassOrInterfaceTypeToInstantiate : - ZEROORMORE(Annotation) + Identifier + ZEROORMORE('.' + ZEROORMORE(Annotation) + Identifier) + - ZEROORONE(TypeArgumentsOrDiamond) - attr.action : BuildUserType(%2) - attr.action : AddTypeArgument(%4) - -rule TypeArgumentsOrDiamond : ONEOF( - TypeArguments, - "<>") - -rule ArgumentList : Expression + ZEROORMORE(',' + Expression) - -rule ArrayInitializer : '{' + ZEROORONE(VariableInitializerList) + ZEROORONE(',') + '}' -rule ArrayCreationExpression : ONEOF( - "new" + PrimitiveType + DimExprs + ZEROORONE(Dims), - "new" + ClassOrInterfaceType + DimExprs + ZEROORONE(Dims), - "new" + PrimitiveType + Dims + ArrayInitializer, - "new" + ClassOrInterfaceType + Dims + ArrayInitializer) - -rule DimExprs : DimExpr + ZEROORMORE(DimExpr) - -rule DimExpr : ZEROORMORE(Annotation) + '[' + Expression + ']' - -rule ArrayAccess : ONEOF( - ExpressionName + '[' + Expression + ']', - PrimaryNoNewArray + '[' + Expression + ']') - -rule FieldAccess : ONEOF( - Primary + '.' + Identifier, - "super" + '.' + Identifier, - TypeName + '.' + "super" + '.' + Identifier) - attr.action.%1 : BuildField(%1, %3) - -# It's possible MethodInvocation includes a MethodReference, like -# A::B(a,b) -rule MethodInvocation : ONEOF( - MethodName + '(' + ZEROORONE(ArgumentList) + ')', - TypeName + '.' + ZEROORONE(TypeArguments) + Identifier + '(' + ZEROORONE(ArgumentList) + ')', - ExpressionName + '.' + ZEROORONE(TypeArguments) + Identifier + '(' + ZEROORONE(ArgumentList) + ')', - Primary + '.' + ZEROORONE(TypeArguments) + Identifier + '(' + ZEROORONE(ArgumentList) + ')', - "super" + '.' + ZEROORONE(TypeArguments) + Identifier + '(' + ZEROORONE(ArgumentList) + ')', - TypeName + '.' + "super" + '.' + ZEROORONE(TypeArguments) + Identifier + '(' + ZEROORONE(ArgumentList) + ')') - attr.action.%1 : BuildCall(%1) - attr.action.%1 : AddArguments(%3) - attr.action.%2 : BuildField(%1, %4) - attr.action.%2 : BuildCall() - attr.action.%2 : AddArguments(%6) - -rule ArgumentList : Expression + ZEROORMORE(',' + Expression) - attr.action.%1: BuildExprList(%1, %2) - -rule MethodReference : ONEOF( - ExpressionName + "::" + ZEROORONE(TypeArguments) + Identifier, - Primary + "::" + ZEROORONE(TypeArguments) + Identifier, - ReferenceType + "::" + ZEROORONE(TypeArguments) + Identifier, - "super" + "::" + ZEROORONE(TypeArguments) + Identifier, - TypeName + '.' + "super" + "::" + ZEROORONE(TypeArguments) + Identifier, - ClassType + "::" + ZEROORONE(TypeArguments) "new", - ArrayType + "::" + "new") - -rule PostfixExpression : ONEOF( - Primary, - ExpressionName, - PostIncrementExpression, - PostDecrementExpression) - -rule PostIncrementExpression : PostfixExpression + "++" - attr.action : BuildPostfixOperation(%2, %1) -rule PostDecrementExpression : PostfixExpression + "--" - attr.action : BuildPostfixOperation(%2, %1) - -rule UnaryExpression : ONEOF( - PreIncrementExpression, - PreDecrementExpression, - '+' + UnaryExpression, - '-' + UnaryExpression, - UnaryExpressionNotPlusMinus) - attr.action.%3,%4 : BuildUnaryOperation(%1, %2) - -rule PreIncrementExpression : "++" + UnaryExpression - attr.action : BuildUnaryOperation(%1, %2) - -rule PreDecrementExpression : "--" + UnaryExpression - attr.action : BuildUnaryOperation(%1, %2) - -rule UnaryExpressionNotPlusMinus : ONEOF( - PostfixExpression, - '~' + UnaryExpression, - '!' + UnaryExpression, - CastExpression) - -rule CastExpression : ONEOF( - '(' + PrimitiveType + ')' + UnaryExpression, - '(' + ReferenceType + ZEROORMORE(AdditionalBound) + ')' + UnaryExpressionNotPlusMinus, - '(' + ReferenceType + ZEROORMORE(AdditionalBound) + ')' + LambdaExpression) - attr.action.%1 : BuildCast(%2, %4) - attr.action.%2,%3 : BuildCast(%2, %5) - -rule MultiplicativeExpression : ONEOF( - UnaryExpression, - MultiplicativeExpression + '*' + UnaryExpression, - MultiplicativeExpression + '/' + UnaryExpression, - MultiplicativeExpression + '%' + UnaryExpression) - -rule AdditiveExpression : ONEOF( - MultiplicativeExpression, - AdditiveExpression + '+' + MultiplicativeExpression, - AdditiveExpression + '-' + MultiplicativeExpression) - attr.action.%2,%3 : BuildBinaryOperation(%1, %2, %3) - -rule ShiftExpression : ONEOF( - AdditiveExpression, - ShiftExpression + "<<" + AdditiveExpression, - ShiftExpression + ">>" + AdditiveExpression, - ShiftExpression + ">>>" + AdditiveExpression) - attr.action.%2,%3,%4 : BuildBinaryOperation(%1, %2, %3) - -rule RelationalExpression : ONEOF( - ShiftExpression, - RelationalExpression + '<' + ShiftExpression, - RelationalExpression + '>' + ShiftExpression, - RelationalExpression + "<=" + ShiftExpression, - RelationalExpression + ">=" + ShiftExpression, - RelationalExpression + "instanceof" + ReferenceType) - attr.action.%2,%3,%4,%5 : BuildBinaryOperation(%1, %2, %3) - -rule EqualityExpression : ONEOF( - RelationalExpression, - EqualityExpression + "==" + RelationalExpression, - EqualityExpression + "!=" + RelationalExpression) - attr.action.%2,%3 : BuildBinaryOperation(%1, %2, %3) - -rule AndExpression : ONEOF( - EqualityExpression, - AndExpression + '&' + EqualityExpression) - attr.action.%2 : BuildBinaryOperation(%1, %2, %3) - -rule ExclusiveOrExpression : ONEOF( - AndExpression, - ExclusiveOrExpression + '^' + AndExpression) - attr.action.%2 : BuildBinaryOperation(%1, %2, %3) - -rule InclusiveOrExpression : ONEOF( - ExclusiveOrExpression, - InclusiveOrExpression + '|' + ExclusiveOrExpression) - attr.action.%2 : BuildBinaryOperation(%1, %2, %3) - -rule ConditionalAndExpression : ONEOF( - InclusiveOrExpression, - ConditionalAndExpression + "&&" + InclusiveOrExpression) - attr.action.%2 : BuildBinaryOperation(%1, %2, %3) - -rule ConditionalOrExpression : ONEOF( - ConditionalAndExpression, - ConditionalOrExpression + "||" + ConditionalAndExpression) - attr.action.%2 : BuildBinaryOperation(%1, %2, %3) - -rule ConditionalExpression : ONEOF( - ConditionalOrExpression, - ConditionalOrExpression + '?' + Expression + ':' + ConditionalExpression, - ConditionalOrExpression + '?' + Expression + ':' + LambdaExpression) - -rule AssignmentExpression : ONEOF( - ConditionalExpression, - Assignment) - -rule Assignment : LeftHandSide + AssignmentOperator + Expression - attr.action : BuildAssignment(%1, %2, %3) - -rule LeftHandSide : ONEOF( - ExpressionName, - FieldAccess, - ArrayAccess) - -rule AssignmentOperator : ONEOF('=', "*=", "/=", "%=", "+=", "-=", "<<=", ">>=", ">>>=", "&=", "^=", "|=") - -rule LambdaExpression : LambdaParameters + "->" + LambdaBody - attr.action : BuildLambda(%1, %3) - -rule LambdaParameters : ONEOF( - '(' + ZEROORONE(LambdaParameterList) + ')', - Identifier) - -rule LambdaParameterList : ONEOF( - LambdaParameter + ZEROORMORE(',' + LambdaParameter), - Identifier + ZEROORMORE(',' + Identifier)) - -rule LambdaParameter : ONEOF( - ZEROORMORE(VariableModifier) + LambdaParameterType + VariableDeclaratorId, - VariableArityParameter) - -rule LambdaParameterType : ONEOF(UnannType, "var") - -rule VariableArityParameter : ZEROORMORE(VariableModifier) + UnannType + ZEROORMORE(Annotation) + "..." + Identifier - -rule LambdaBody : ONEOF(Expression, Block) - -rule ConstantExpression : Expression - -rule Primary : ONEOF( - PrimaryNoNewArray, - ArrayCreationExpression) - -rule Expression : ONEOF( - ExpressionName, - Primary, - UnaryExpression, - BinaryExpression, - ConditionalExpression, - LambdaExpression, - AssignmentExpression) - -rule BinaryExpression : ONEOF ( - MultiplicativeExpression, - AdditiveExpression, - ShiftExpression, - RelationalExpression, - EqualityExpression, - AndExpression, - ExclusiveOrExpression, - InclusiveOrExpression, - ConditionalAndExpression, - ConditionalOrExpression) - diff --git a/src/MapleFE/java/identifier.spec b/src/MapleFE/java/identifier.spec index a482d95b78ac4e237121b90f6677d208c2333b5c..6cfe3baf6e1b7b01004cdd387139257599dd0990 100644 --- a/src/MapleFE/java/identifier.spec +++ b/src/MapleFE/java/identifier.spec @@ -14,8 +14,6 @@ # An identifier is an unlimited-length sequence of Java letters and Java digits, the # first of which must be a Java letter # -# TODO: So far we dont support unicode which are not major goal right now. - rule JavaChar : ONEOF(CHAR, '_' , '$') rule CharOrDigit : ONEOF(JavaChar, DIGIT) rule Identifier : JavaChar + ZEROORMORE(CharOrDigit) diff --git a/src/MapleFE/java/include/lang_builtin.def b/src/MapleFE/java/include/lang_builtin.def new file mode 100644 index 0000000000000000000000000000000000000000..8c127c72a426bb6113253cdf59a103a11785e8fb --- /dev/null +++ b/src/MapleFE/java/include/lang_builtin.def @@ -0,0 +1,14 @@ +// Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. +// +// OpenArkFE is licensed under the Mulan PSL v2. +// You can use this software according to the terms and conditions of the Mulan PSL v2. +// You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +// FIT FOR A PARTICULAR PURPOSE. +// See the Mulan PSL v2 for more details. +// + diff --git a/src/MapleFE/java/include/lang_keywords.def b/src/MapleFE/java/include/lang_keywords.def new file mode 100644 index 0000000000000000000000000000000000000000..cc571c8c9d42f70245f634ca76206bf0196ab253 --- /dev/null +++ b/src/MapleFE/java/include/lang_keywords.def @@ -0,0 +1,71 @@ +// Copyright (C) [2021] Futurewei Technologies Inc. All rights reverved. +// +// OpenArkFE is licensed under the Mulan PSL v2. +// You can use this software according to the terms and conditions of the Mulan PSL v2. +// You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS WITHOUT WARRANTIES OF ANY KIND, EITHER +// EXPRESS OR IMPLIED INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +// FIT FOR A PARTICULAR PURPOSE. +// See the Mulan PSL v2 for more details. +// + +LANGKEYWORD(boolean) +LANGKEYWORD(byte) +LANGKEYWORD(char) +LANGKEYWORD(class) +LANGKEYWORD(double) +LANGKEYWORD(enum) +LANGKEYWORD(float) +LANGKEYWORD(int) +LANGKEYWORD(interface) +LANGKEYWORD(long) +LANGKEYWORD(package) +LANGKEYWORD(short) +LANGKEYWORD(void) + +LANGKEYWORD(var) + +LANGKEYWORD(break) +LANGKEYWORD(case) +LANGKEYWORD(catch) +LANGKEYWORD(continue) +LANGKEYWORD(default) +LANGKEYWORD(do) +LANGKEYWORD(else) +LANGKEYWORD(finally) +LANGKEYWORD(for) +LANGKEYWORD(goto) +LANGKEYWORD(if) +LANGKEYWORD(return) +LANGKEYWORD(switch) +LANGKEYWORD(try) +LANGKEYWORD(while) + +LANGKEYWORD(abstract) +LANGKEYWORD(const) +LANGKEYWORD(volatile) + +LANGKEYWORD(assert) +LANGKEYWORD(new) + +LANGKEYWORD(instanceof) +LANGKEYWORD(extends) +LANGKEYWORD(implements) +LANGKEYWORD(import) +LANGKEYWORD(super) +LANGKEYWORD(synchronized) +// LANGKEYWORD(this) +LANGKEYWORD(throw) +LANGKEYWORD(throws) +LANGKEYWORD(transient) + +LANGKEYWORD(final) +LANGKEYWORD(native) +LANGKEYWORD(private) +LANGKEYWORD(protected) +LANGKEYWORD(public) +LANGKEYWORD(static) +LANGKEYWORD(strictfp) diff --git a/src/MapleFE/java/include/lang_spec.h b/src/MapleFE/java/include/lang_spec.h index b461f71a033dd0257f66e5f00f5f6b73a52bda48..8f02be06ff2494d211b5641e9846c506592c6185 100644 --- a/src/MapleFE/java/include/lang_spec.h +++ b/src/MapleFE/java/include/lang_spec.h @@ -21,6 +21,7 @@ #include "stringutil.h" #include "token.h" +#include "lexer.h" namespace maplefe { @@ -34,5 +35,11 @@ public: }; extern LitData ProcessLiteral(LitId type, const char *str); + +// +class JavaLexer : public Lexer { +}; + } + #endif diff --git a/src/MapleFE/java/include/vfy_java.h b/src/MapleFE/java/include/vfy_java.h index ba1fbaeb9e0a4101d442a184fcca674f91a444ff..89ab484fe50ea29420ba4fd72a3f00513f7fc057 100644 --- a/src/MapleFE/java/include/vfy_java.h +++ b/src/MapleFE/java/include/vfy_java.h @@ -26,7 +26,7 @@ namespace maplefe { class VerifierJava : public Verifier { private: public: - VerifierJava(){} + VerifierJava(ModuleNode *m) : Verifier(m) {} ~VerifierJava(){} void VerifyGlobalScope(); diff --git a/src/MapleFE/java/literal.spec b/src/MapleFE/java/literal.spec index 87f8a49652cbc3bea76b89b52b505c008f6e0981..68eae0bdf7f3a0718916e1d537c690b58d569fc9 100644 --- a/src/MapleFE/java/literal.spec +++ b/src/MapleFE/java/literal.spec @@ -124,7 +124,15 @@ rule BooleanLiteral : ONEOF ("true", "false") # I decided to simplify the unicode escape a little bit. I don't want to # handle all odd cases. rule UnicodeEscape: '\' + 'u' + HEXDIGIT + HEXDIGIT + HEXDIGIT + HEXDIGIT -rule RawInputCharacter : ONEOF(ASCII, ESCAPE) + +# [NOTE] Becareful of ' over here. It's duplicated in ESCAPE as '\' + '''. There is a reason. +# When the lexer read a string "doesn't", which is wrong since Java request ' be escaped but +# many code does NOT escape, the string in memory is "doesn't" too. The system Reading function +# which is in C doesn't escape '. So I duplicate here to catch this case. +# +# Please see test case java2mpl/literal-string-2.java for example. +# +rule RawInputCharacter : ONEOF(ASCII, ''', ESCAPE) rule SingleCharacter: ONEOF(UnicodeEscape, RawInputCharacter) rule OctalEscape : ONEOF('\' + '0', '\' + '1') @@ -135,7 +143,10 @@ rule CharacterLiteral : ''' + ONEOF(SingleCharacter, EscapeSequence) + ''' ######################################################################### ## String ## ######################################################################### -rule StringLiteral : '"' + ZEROORMORE(RawInputCharacter) + '"' +# The UnicodeEscape is limited from \u0000 to \u00ff. +rule StringUnicodeEscape: '\' + 'u' + '0' + '0' + HEXDIGIT + HEXDIGIT +rule StringCharater: ONEOF(StringUnicodeEscape, RawInputCharacter) +rule StringLiteral : '"' + ZEROORMORE(StringCharater) + '"' ######################################################################### ## Null ## diff --git a/src/MapleFE/java/operator.spec b/src/MapleFE/java/operator.spec index b5c472571074f25f96b1b5203fde4dc7c842734f..5f561f909f2bb78e7ce8a15fd7178bfb7c845c47 100644 --- a/src/MapleFE/java/operator.spec +++ b/src/MapleFE/java/operator.spec @@ -20,6 +20,11 @@ # 2. The rule part, defining the language restrictions of each operator. ########################################################################## +# NOTE +# Some languages could have one synatx belonging to both separator and operators. +# eg., ':' in Java 8, it's both a separator colon and operator select. +# We need avoid such duplication in .spec files. + STRUCT Operator : ONEOF( # Arithmetic ("+", Add), @@ -64,6 +69,4 @@ STRUCT Operator : ONEOF( (">>>=", ZextAssign), # ("->", Arrow), - (":", Select), - ("?", Cond), ("<>", Diamond)) diff --git a/src/MapleFE/java/separator.spec b/src/MapleFE/java/separator.spec index 1363b2d7b0e21c862e3384129d652f50b5c48b7d..db9df6f5cad9161df16d0ad6ecbff01866f9dc9d 100644 --- a/src/MapleFE/java/separator.spec +++ b/src/MapleFE/java/separator.spec @@ -31,6 +31,7 @@ STRUCT Separator : ((" ", Whitespace), (".", Dot), ("...", Dotdotdot), (":", Colon), + ("?", Select), ("::", Of), ("@", At), ("#", Pound)) diff --git a/src/MapleFE/java/src/Makefile b/src/MapleFE/java/src/Makefile index 5c8aae7fcff64d30beae6ba5ead94393877700e2..7715078eb86192d2422d3f29aec0b0c0a0a5c5aa 100644 --- a/src/MapleFE/java/src/Makefile +++ b/src/MapleFE/java/src/Makefile @@ -1,4 +1,4 @@ -# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# Copyright (C) [2020-2021] Futurewei Technologies, Inc. All rights reverved. # # OpenArkFE is licensed under the Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -13,27 +13,37 @@ # include ../../Makefile.in +BUILDBIN=$(BUILDDIR)/bin BUILD=$(BUILDDIR)/java -$(shell $(MKDIR_P) $(BUILD)) +BUILDGEN=$(BUILDDIR)/gen +BUILDASTGEN=$(BUILDDIR)/ast_gen/shared +$(shell $(MKDIR_P) $(BUILD) $(BUILDGEN)) SRC=$(wildcard *.cpp) OBJ :=$(patsubst %.cpp,%.o,$(SRC)) DEP :=$(patsubst %.cpp,%.d,$(SRC)) -OBJS :=$(foreach obj,$(OBJ), $(BUILD)/$(obj)) -DEPS :=$(foreach dep,$(DEP), $(BUILD)/$(dep)) +SRCG := $(wildcard $(BUILDGEN)/gen*.cpp) +OBJG := $(patsubst %.cpp, %.o, $(SRCG)) +DEPG := $(patsubst %.cpp, %.d, $(SRCG)) + +OBJS :=$(foreach obj,$(OBJ), $(BUILD)/$(obj)) $(OBJG) +DEPS :=$(foreach dep,$(DEP), $(BUILD)/$(dep)) $(DEPG) INCLUDES := -I $(MAPLEFE_ROOT)/shared/include \ -I $(MAPLEFE_ROOT)/java/include \ -I $(MAPLEFE_ROOT)/autogen/include \ - -I . $(MAPLEALL_INC) + -I ${BUILDDIR}/ast_gen/shared \ + -I $(BUILDGEN) $(MAPLEALL_INC) + +INCLUDEGEN := -I $(BUILDGEN) -I $(MAPLEFE_ROOT)/shared/include -TARGET=java2mpl +TARGET=java2ast -SHAREDLIB = $(BUILDDIR)/shared/shared.a +SHAREDLIB = $(BUILDDIR)/shared/shared.a $(BUILDASTGEN)/genast.a -.PHONY: all $(TARGET) -all: $(TARGET) +.PHONY: all +all: $(BUILD)/$(TARGET) -include $(DEPS) .PHONY: clean @@ -43,7 +53,7 @@ vpath %.d $(BUILD) # Pattern Rules $(BUILD)/%.o : %.cpp - $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ $(BUILD)/%.d : %.cpp @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $< > $@ @@ -51,14 +61,20 @@ $(BUILD)/%.d : %.cpp @sed -e 's|.*:|$(BUILD)/$*.o:|' < $(BUILD)/$*.d.tmp > $(BUILD)/$*.d @rm -f $(BUILD)/$*.d.tmp +$(BUILDGEN)/%.o : $(BUILDGEN)/%.cpp $(BUILDGEN)/%.d + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDEGEN) -w -c $< -o $@ + +$(BUILDGEN)/%.d : $(BUILDGEN)/%.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDEGEN) $< > $@ + @mv -f $(BUILDGEN)/$*.d $(BUILDGEN)/$*.d.tmp + @sed -e 's|.*:|$(BUILDGEN)/$*.o:|' < $(BUILDGEN)/$*.d.tmp > $(BUILDGEN)/$*.d + @rm -f $(BUILDGEN)/$*.d.tmp + # TARGET depends on OBJS and shared OBJS from shared directory # as well as mapleall libraries -$(TARGET): $(OBJS) $(SHAREDLIB) - $(LD) -o $(BUILD)/$(TARGET) $(OBJS) $(SHAREDLIB) $(MAPLELIBS) - -#.cpp.o: -# $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $*.cpp -o $(BUILD)/$*.o -# $(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $*.cpp > $(BUILD)/$*.d +$(BUILD)/$(TARGET): $(OBJS) $(SHAREDLIB) + @mkdir -p $(BUILDBIN) + $(LD) -o $(BUILDBIN)/$(TARGET) $(OBJS) $(SHAREDLIB) $(MAPLELIBS) clean: rm -rf $(BUILD) diff --git a/src/MapleFE/java/src/ast2mpl_java.cpp b/src/MapleFE/java/src/ast2mpl_java.cpp deleted file mode 100644 index ca77eaaa32c95629b3a6b89eb088e175ee9c78f3..0000000000000000000000000000000000000000 --- a/src/MapleFE/java/src/ast2mpl_java.cpp +++ /dev/null @@ -1,56 +0,0 @@ -/* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. -* -* OpenArkFE is licensed under the Mulan PSL v2. -* You can use this software according to the terms and conditions of the Mulan PSL v2. -* You may obtain a copy of Mulan PSL v2 at: -* -* http://license.coscl.org.cn/MulanPSL2 -* -* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER -* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR -* FIT FOR A PARTICULAR PURPOSE. -* See the Mulan PSL v2 for more details. -*/ - -#include "ast2mpl_java.h" - -namespace maplefe { - -maple::MIRType *A2MJava::MapPrimType(PrimTypeNode *ptnode) { - maple::PrimType prim; - switch (ptnode->GetPrimType()) { - case TY_Boolean: prim = maple::PTY_u1; break; - case TY_Byte: prim = maple::PTY_u8; break; - case TY_Short: prim = maple::PTY_i16; break; - case TY_Int: prim = maple::PTY_i32; break; - case TY_Long: prim = maple::PTY_i64; break; - case TY_Char: prim = maple::PTY_u16; break; - case TY_Float: prim = maple::PTY_f32; break; - case TY_Double: prim = maple::PTY_f64; break; - case TY_Void: prim = maple::PTY_void; break; - case TY_Null: prim = maple::PTY_void; break; - default: MASSERT("Unsupported PrimType"); break; - } - - maple::TyIdx tid(prim); - return maple::GlobalTables::GetTypeTable().GetTypeFromTyIdx(tid); -} - -const char *A2MJava::Type2Label(const maple::MIRType *type) { - maple::PrimType pty = type->GetPrimType(); - switch (pty) { - case maple::PTY_u1: return "Z"; - case maple::PTY_u8: return "B"; - case maple::PTY_i16: return "S"; - case maple::PTY_u16: return "C"; - case maple::PTY_i32: return "I"; - case maple::PTY_i64: return "J"; - case maple::PTY_f32: return "F"; - case maple::PTY_f64: return "D"; - case maple::PTY_void: return "V"; - default: return "L"; - } -} - -} diff --git a/src/MapleFE/java/src/lang_spec.cpp b/src/MapleFE/java/src/lang_spec.cpp index 522e67ad1630e3b14304cf89b34d97ac49bd3e77..7585961fd230765c5e5504e0f9598465fbd7ee2c 100644 --- a/src/MapleFE/java/src/lang_spec.cpp +++ b/src/MapleFE/java/src/lang_spec.cpp @@ -13,6 +13,7 @@ * See the Mulan PSL v2 for more details. */ #include "lang_spec.h" +#include "stringpool.h" namespace maplefe { @@ -149,7 +150,7 @@ LitData ProcessLiteral(LitId id, const char *str) { case LT_StringLiteral: { const char *s = s2v.StringToString(value_text); data.mType = LT_StringLiteral; - data.mData.mStr = s; + data.mData.mStrIdx = gStringPool.GetStrIdx(s); break; } case LT_NullLiteral: { // Just need set the id @@ -164,4 +165,14 @@ LitData ProcessLiteral(LitId id, const char *str) { return data; } +///////////////////////////////////////////////////////////////////////////////////// +// Implementation of Java Lexer +///////////////////////////////////////////////////////////////////////////////////// + +Lexer* CreateLexer() { + Lexer *lexer = new JavaLexer(); + return lexer; +} + + } diff --git a/src/MapleFE/java/src/main.cpp b/src/MapleFE/java/src/main.cpp index e5a0e5092add3d5b21454be0f8674d9b6b22637d..e90b50d01f5137deaea432f63fb09184bf69b3b2 100644 --- a/src/MapleFE/java/src/main.cpp +++ b/src/MapleFE/java/src/main.cpp @@ -17,8 +17,10 @@ #include "common_header_autogen.h" #include "ruletable_util.h" #include "gen_summary.h" +#include "gen_aststore.h" +#include "gen_astdump.h" +#include "gen_astgraph.h" #include "vfy_java.h" -#include "ast2mpl_java.h" static void help() { std::cout << "java2mpl sourcefile [options]:\n" << std::endl; @@ -35,6 +37,8 @@ static void help() { std::cout << " --trace-patch-was-succ : Trace Patching of WasSucc nodes" << std::endl; std::cout << " --trace-warning : Print Warning" << std::endl; std::cout << " --trace-a2m : Trace MPL Builder" << std::endl; + std::cout << " --dump-ast : Dump AST in text format" << std::endl; + std::cout << " --dump-dot : Dump AST in dot format" << std::endl; } int main (int argc, char *argv[]) { @@ -45,7 +49,9 @@ int main (int argc, char *argv[]) { maplefe::Parser *parser = new maplefe::Parser(argv[1]); - bool trace_a2m = false; + bool dump_ast = false; + bool dump_dot = false; + bool succ; // Parse the argument for (unsigned i = 2; i < argc; i++) { @@ -71,8 +77,10 @@ int main (int argc, char *argv[]) { parser->mTracePatchWasSucc = true; } else if (!strncmp(argv[i], "--trace-warning", 15) && (strlen(argv[i]) == 15)) { parser->mTraceWarning = true; - } else if (!strncmp(argv[i], "--trace-a2m", 11) && (strlen(argv[i]) == 11)) { - trace_a2m = true; + } else if (!strncmp(argv[i], "--dump-ast", 10) && (strlen(argv[i]) == 10)) { + dump_ast = true; + } else if (!strncmp(argv[i], "--dump-dot", 10) && (strlen(argv[i]) == 10)) { + dump_dot = true; } else { std::cerr << "unknown option " << argv[i] << std::endl; exit(-1); @@ -80,18 +88,40 @@ int main (int argc, char *argv[]) { } parser->InitRecursion(); - parser->Parse(); + succ = parser->Parse(); + if (!succ) { + delete parser; + return 1; + } + + // the module from parser + maplefe::ModuleNode *module = parser->GetModule(); - maplefe::VerifierJava vfy_java; + maplefe::VerifierJava vfy_java(module); vfy_java.Do(); - maplefe::A2MJava *a2m = new maplefe::A2MJava(maplefe::gModule.mFileName); - a2m->ProcessAST(trace_a2m); + if(dump_ast) { + maplefe::AstDump astdump(module); + astdump.Dump("ts2ast: Initial AST", &std::cout); + } - a2m->mMirModule->OutputAsciiMpl("", ".mpl"); + if(dump_dot) { + maplefe::AstGraph graph(module); + graph.DumpGraph("ts2ast: Initial AST", &std::cout); + } - delete parser; - delete a2m; + maplefe::AstStore saveAst(module); + saveAst.StoreInAstBuf(); + maplefe::AstBuffer &ast_buf = saveAst.GetAstBuf(); + + std::ofstream ofs; + std::string fname(module->GetFilename()); + fname += ".ast"; + ofs.open(fname, std::ofstream::out); + const char *addr = (const char *)(&(ast_buf[0])); + ofs.write(addr, ast_buf.size()); + ofs.close(); + delete parser; return 0; } diff --git a/src/MapleFE/java/src/vfy_java.cpp b/src/MapleFE/java/src/vfy_java.cpp index e77e3b6bff5177a52f307303d3f540fe21fb8b19..53efd9c35ca3c4fc47ac81a45d759d7b0ade4587 100644 --- a/src/MapleFE/java/src/vfy_java.cpp +++ b/src/MapleFE/java/src/vfy_java.cpp @@ -24,19 +24,15 @@ namespace maplefe { // Collect all types, decls of global scope all at once. void VerifierJava::VerifyGlobalScope() { - mCurrScope = gModule.mRootScope; - std::vector::iterator tree_it = gModule.mTrees.begin(); - for (; tree_it != gModule.mTrees.end(); tree_it++) { - ASTTree *asttree = *tree_it; - TreeNode *tree = asttree->mRootNode; + mCurrScope = mASTModule->mRootScope; + for (unsigned i = 0; i < mASTModule->GetTreesNum(); i++) { + TreeNode *tree = mASTModule->GetTree(i); mCurrScope->TryAddDecl(tree); mCurrScope->TryAddType(tree); - } + } - tree_it = gModule.mTrees.begin(); - for (; tree_it != gModule.mTrees.end(); tree_it++) { - ASTTree *asttree = *tree_it; - TreeNode *tree = asttree->mRootNode; + for (unsigned i = 0; i < mASTModule->GetTreesNum(); i++) { + TreeNode *tree = mASTModule->GetTree(i); VerifyTree(tree); } } diff --git a/src/MapleFE/java/stmt.spec b/src/MapleFE/java/stmt.spec index 4e4922f23196015f1a6aa75527cbdd8e44dc19fc..ea54785ffea55213637b31e913ea1544959965c9 100644 --- a/src/MapleFE/java/stmt.spec +++ b/src/MapleFE/java/stmt.spec @@ -12,6 +12,289 @@ # See the Mulan PSL v2 for more details. # +rule PackageName : ONEOF(Identifier, PackageName + '.' + Identifier) + attr.action.%2 : BuildField(%1, %3) + +rule TypeName : ONEOF(Identifier, PackageOrTypeName + '.' + Identifier) + attr.action.%2 : BuildField(%1, %3) + +rule PackageOrTypeName : ONEOF(Identifier, PackageOrTypeName + '.' + Identifier) + attr.action.%2 : BuildField(%1, %3) + +rule ExpressionName : ONEOF(Identifier, AmbiguousName + '.' + Identifier) + attr.action.%2 : BuildField(%1, %3) + +rule MethodName : Identifier + +rule AmbiguousName : ONEOF(Identifier, AmbiguousName + '.' + Identifier) + attr.action.%2 : BuildField(%1, %3) + +rule Class : "class" +rule ClassLiteral : ONEOF(TypeName + ZEROORMORE('[' + ']') + '.' + Class, + NumericType + ZEROORMORE('[' + ']') + '.' + Class, + "boolean" + ZEROORMORE('[' + ']') + '.' + Class, + "void" + '.' + Class) + attr.property : Single + +rule PrimaryNoNewArray_single : ONEOF( + Literal, + ClassLiteral, + "this", + TypeName + '.' + "this", + '(' + Expression + ')', + ClassInstanceCreationExpression) + attr.action.%4 : BuildField(%1, %3) + attr.action.%5 : BuildParenthesis(%2) + attr.property : Single + +rule PrimaryNoNewArray : ONEOF( + PrimaryNoNewArray_single, + FieldAccess, + ArrayAccess, + MethodInvocation, + MethodReference) + +# There was a child rule. +# ExpressionName + '.' + UnqualifiedClassInstanceCreationExpression, +# But Primary contains ExpressionName. It's a duplication, so I removed it. +rule ClassInstanceCreationExpression : ONEOF( + UnqualifiedClassInstanceCreationExpression, + Primary + '.' + UnqualifiedClassInstanceCreationExpression) + +rule UnqualifiedClassInstanceCreationExpression : + "new" + ZEROORONE(TypeArguments) + ClassOrInterfaceTypeToInstantiate + + '(' + ZEROORONE(ArgumentList) + ')' + ZEROORONE(ClassBody) + attr.action : BuildNewOperation(%3, %5, %7) + +rule ClassOrInterfaceTypeToInstantiate : + ZEROORMORE(Annotation) + Identifier + ZEROORMORE('.' + ZEROORMORE(Annotation) + Identifier) + + ZEROORONE(TypeArgumentsOrDiamond) + attr.action : BuildUserType(%2) + attr.action : AddTypeGenerics(%4) + +rule TypeArgumentsOrDiamond : ONEOF( + TypeArguments, + "<>") + +rule ArrayInitializer : '{' + ZEROORONE(VariableInitializerList) + ZEROORONE(',') + '}' +rule ArrayCreationExpression : ONEOF( + "new" + PrimitiveType + DimExprs + ZEROORONE(Dims), + "new" + ClassOrInterfaceType + DimExprs + ZEROORONE(Dims), + "new" + PrimitiveType + Dims + ArrayInitializer, + "new" + ClassOrInterfaceType + Dims + ArrayInitializer) + +rule DimExprs : DimExpr + ZEROORMORE(DimExpr) + +rule DimExpr : ZEROORMORE(Annotation) + '[' + Expression + ']' + +rule ArrayAccess : ONEOF( + ExpressionName + '[' + Expression + ']', + PrimaryNoNewArray + '[' + Expression + ']') + +rule FieldAccess : ONEOF( + Primary + '.' + Identifier, + "super" + '.' + Identifier, + TypeName + '.' + "super" + '.' + Identifier) + attr.action.%1 : BuildField(%1, %3) + +# It's possible MethodInvocation includes a MethodReference, like +# A::B(a,b) +rule MethodInvocation : ONEOF( + MethodName + '(' + ZEROORONE(ArgumentList) + ')', + TypeName + '.' + ZEROORONE(TypeArguments) + Identifier + '(' + ZEROORONE(ArgumentList) + ')', + ExpressionName + '.' + ZEROORONE(TypeArguments) + Identifier + '(' + ZEROORONE(ArgumentList) + ')', + Primary + '.' + ZEROORONE(TypeArguments) + Identifier + '(' + ZEROORONE(ArgumentList) + ')', + "super" + '.' + ZEROORONE(TypeArguments) + Identifier + '(' + ZEROORONE(ArgumentList) + ')', + TypeName + '.' + "super" + '.' + ZEROORONE(TypeArguments) + Identifier + '(' + ZEROORONE(ArgumentList) + ')') + attr.action.%1 : BuildCall(%1) + attr.action.%1 : AddArguments(%3) + attr.action.%2 : BuildField(%1, %4) + attr.action.%2 : BuildCall() + attr.action.%2 : AddArguments(%6) + attr.action.%3,%4,%5 : BuildField(%1, %4) + attr.action.%3,%4,%5 : BuildCall() + attr.action.%3,%4,%5 : AddArguments(%6) + +rule ArgumentList : Expression + ZEROORMORE(',' + Expression) + attr.action.%1: BuildExprList(%1, %2) + +rule MethodReference : ONEOF( + ExpressionName + "::" + ZEROORONE(TypeArguments) + Identifier, + Primary + "::" + ZEROORONE(TypeArguments) + Identifier, + ReferenceType + "::" + ZEROORONE(TypeArguments) + Identifier, + "super" + "::" + ZEROORONE(TypeArguments) + Identifier, + TypeName + '.' + "super" + "::" + ZEROORONE(TypeArguments) + Identifier, + ClassType + "::" + ZEROORONE(TypeArguments) "new", + ArrayType + "::" + "new") + +rule PostfixExpression : ONEOF( + Primary, + ExpressionName, + PostIncrementExpression, + PostDecrementExpression) + +rule PostIncrementExpression : PostfixExpression + "++" + attr.action : BuildPostfixOperation(%2, %1) +rule PostDecrementExpression : PostfixExpression + "--" + attr.action : BuildPostfixOperation(%2, %1) + +rule UnaryExpression : ONEOF( + PreIncrementExpression, + PreDecrementExpression, + '+' + UnaryExpression, + '-' + UnaryExpression, + UnaryExpressionNotPlusMinus) + attr.action.%3,%4 : BuildUnaryOperation(%1, %2) + +rule PreIncrementExpression : "++" + UnaryExpression + attr.action : BuildUnaryOperation(%1, %2) + +rule PreDecrementExpression : "--" + UnaryExpression + attr.action : BuildUnaryOperation(%1, %2) + +rule UnaryExpressionNotPlusMinus : ONEOF( + PostfixExpression, + '~' + UnaryExpression, + '!' + UnaryExpression, + CastExpression) + +rule CastExpression : ONEOF( + '(' + PrimitiveType + ')' + UnaryExpression, + '(' + ReferenceType + ZEROORMORE(AdditionalBound) + ')' + UnaryExpressionNotPlusMinus, + '(' + ReferenceType + ZEROORMORE(AdditionalBound) + ')' + LambdaExpression) + attr.action.%1 : BuildCast(%2, %4) + attr.action.%2,%3 : BuildCast(%2, %5) + +rule MultiplicativeExpression : ONEOF( + UnaryExpression, + MultiplicativeExpression + '*' + UnaryExpression, + MultiplicativeExpression + '/' + UnaryExpression, + MultiplicativeExpression + '%' + UnaryExpression) + attr.action.%2,%3,%4 : BuildBinaryOperation(%1, %2, %3) + +rule AdditiveExpression : ONEOF( + MultiplicativeExpression, + AdditiveExpression + '+' + MultiplicativeExpression, + AdditiveExpression + '-' + MultiplicativeExpression) + attr.action.%2,%3 : BuildBinaryOperation(%1, %2, %3) + +rule ShiftExpression : ONEOF( + AdditiveExpression, + ShiftExpression + "<<" + AdditiveExpression, + ShiftExpression + ">>" + AdditiveExpression, + ShiftExpression + ">>>" + AdditiveExpression) + attr.action.%2,%3,%4 : BuildBinaryOperation(%1, %2, %3) + +rule RelationalExpression : ONEOF( + ShiftExpression, + RelationalExpression + '<' + ShiftExpression, + RelationalExpression + '>' + ShiftExpression, + RelationalExpression + "<=" + ShiftExpression, + RelationalExpression + ">=" + ShiftExpression, + RelationalExpression + "instanceof" + ReferenceType) + attr.action.%2,%3,%4,%5 : BuildBinaryOperation(%1, %2, %3) + attr.action.%6 : BuildInstanceOf(%1, %3) + +rule EqualityExpression : ONEOF( + RelationalExpression, + EqualityExpression + "==" + RelationalExpression, + EqualityExpression + "!=" + RelationalExpression) + attr.action.%2,%3 : BuildBinaryOperation(%1, %2, %3) + +rule AndExpression : ONEOF( + EqualityExpression, + AndExpression + '&' + EqualityExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +rule ExclusiveOrExpression : ONEOF( + AndExpression, + ExclusiveOrExpression + '^' + AndExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +rule InclusiveOrExpression : ONEOF( + ExclusiveOrExpression, + InclusiveOrExpression + '|' + ExclusiveOrExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +rule ConditionalAndExpression : ONEOF( + InclusiveOrExpression, + ConditionalAndExpression + "&&" + InclusiveOrExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +rule ConditionalOrExpression : ONEOF( + ConditionalAndExpression, + ConditionalOrExpression + "||" + ConditionalAndExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +rule ConditionalExpression : ONEOF( + ConditionalOrExpression, + ConditionalOrExpression + '?' + Expression + ':' + ConditionalExpression, + ConditionalOrExpression + '?' + Expression + ':' + LambdaExpression) + +rule AssignmentExpression : ONEOF( + ConditionalExpression, + Assignment) + +rule Assignment : LeftHandSide + AssignmentOperator + Expression + attr.action : BuildAssignment(%1, %2, %3) + +rule LeftHandSide : ONEOF( + ExpressionName, + FieldAccess, + ArrayAccess) + +rule AssignmentOperator : ONEOF('=', "*=", "/=", "%=", "+=", "-=", "<<=", ">>=", ">>>=", "&=", "^=", "|=") + +rule LambdaExpression : LambdaParameters + "->" + LambdaBody + attr.action : BuildLambda(%1, %3) + +rule LambdaParameters : ONEOF( + '(' + ZEROORONE(LambdaParameterList) + ')', + Identifier) + +rule LambdaParameterList : ONEOF( + LambdaParameter + ZEROORMORE(',' + LambdaParameter), + Identifier + ZEROORMORE(',' + Identifier)) + +rule LambdaParameter : ONEOF( + ZEROORMORE(VariableModifier) + LambdaParameterType + VariableDeclaratorId, + VariableArityParameter) + attr.action.%1: BuildDecl(%2, %3) + +rule LambdaParameterType : ONEOF(UnannType, "var") + +rule VariableArityParameter : ZEROORMORE(VariableModifier) + UnannType + ZEROORMORE(Annotation) + "..." + Identifier + +rule LambdaBody : ONEOF(Expression, Block) + +rule ConstantExpression : Expression + +rule Primary : ONEOF( + PrimaryNoNewArray, + ArrayCreationExpression) + +rule Expression : ONEOF( + ExpressionName, + Primary, + UnaryExpression, + BinaryExpression, + ConditionalExpression, + LambdaExpression, + AssignmentExpression) + +rule BinaryExpression : ONEOF ( + MultiplicativeExpression, + AdditiveExpression, + ShiftExpression, + RelationalExpression, + EqualityExpression, + AndExpression, + ExclusiveOrExpression, + InclusiveOrExpression, + ConditionalAndExpression, + ConditionalOrExpression) + + rule LocalVariableDeclarationStatement : LocalVariableDeclaration + ';' rule LocalVariableDeclaration : ZEROORMORE(VariableModifier) + UnannType + VariableDeclaratorList @@ -86,6 +369,8 @@ rule IfThenStatement : "if" + '(' + Expression + ')' + Statement attr.action: BuildCondBranch(%3) attr.action: AddCondBranchTrueStatement(%5) +## " This line is to make my vim show right color for the below contents. + rule IfThenElseStatement : "if" + '(' + Expression + ')' + StatementNoShortIf + "else" + Statement attr.action: BuildCondBranch(%3) attr.action: AddCondBranchTrueStatement(%5) @@ -125,7 +410,6 @@ rule SwitchStatement : "switch" + '(' + Expression + ')' + SwitchBlock attr.action : BuildSwitch(%3, %5) rule SwitchBlock : '{' + ZEROORMORE(ZEROORMORE(SwitchBlockStatementGroup) + ZEROORMORE(SwitchLabel)) + '}' - attr.action : BuildAllCases(%2) rule SwitchBlockStatementGroup : SwitchLabels + BlockStatements attr.action : BuildOneCase(%1, %2) @@ -187,6 +471,7 @@ rule ReturnStatement : "return" + ZEROORONE(Expression) + ';' rule ThrowStatement : "throw" + Expression + ';' rule SynchronizedStatement : "synchronized" + '(' + Expression + ')' + Block + attr.action : AddSyncToBlock(%3, %5) rule TryStatement : ONEOF( "try" + Block + Catches, @@ -217,3 +502,339 @@ rule VariableAccess : ONEOF( ExpressionName, FieldAccess) +################################################################################### +# This file defines the Java Block/Class/Interface statement. +################################################################################### + +rule ClassDeclaration : ONEOF(NormalClassDeclaration, EnumDeclaration) + attr.property : Single, Top +rule NormalClassDeclaration : ZEROORMORE(ClassModifier) + "class" + Identifier + + ZEROORONE(TypeParameters) + ZEROORONE(Superclass) + + ZEROORONE(Superinterfaces) + ClassBody + attr.action : BuildClass(%3) + attr.action : AddModifier(%1) + attr.action : AddClassBody(%7) + attr.action : AddSuperClass(%5) + attr.action : AddSuperInterface(%6) + attr.property.%1,%4,%5,%6 : ZomFast + +rule ClassAttr : ONEOF("public", "protected", "private", "abstract", "static", "final", "strictfp") + attr.property : Single +rule ClassModifier : ONEOF(Annotation, ClassAttr) + attr.property : Single + +# 1. Generic class +# 2. TypeParameter will be defined in type.spec +rule TypeParameters : '<' + TypeParameterList + '>' +rule TypeParameterList : TypeParameter + ZEROORMORE(',' + TypeParameter) +rule TypeParameter : ZEROORMORE(TypeParameterModifier) + Identifier + ZEROORONE(TypeBound) +rule TypeParameterModifier : Annotation +rule TypeBound : ONEOF("extends" + TypeVariable, + "extends" + ClassOrInterfaceType + ZEROORMORE(AdditionalBound)) +rule AdditionalBound : '&' + InterfaceType + +# ClassType and InterfaceType are defined in type.spec +rule Superclass : "extends" + ClassType +rule Superinterfaces : "implements" + InterfaceTypeList +rule InterfaceTypeList : InterfaceType + ZEROORMORE(',' + InterfaceType) + +# class body +rule ClassBody : "{" + ZEROORMORE(ClassBodyDeclaration) + "}" + attr.action: BuildBlock(%2) + attr.property.%2 : ZomFast + +rule ClassBodyDeclaration : ONEOF(ClassMemberDeclaration, + InstanceInitializer, + StaticInitializer, + ConstructorDeclaration) + attr.property : Single + +rule InstanceInitializer : Block + attr.action: BuildInstInit(%1) +rule StaticInitializer : "static" + Block + attr.action: BuildInstInit(%2) + attr.action: AddModifierTo(%2, %1) + +rule ClassMemberDeclaration : ONEOF(FieldDeclaration, + MethodDeclaration, + ClassDeclaration, + InterfaceDeclaration, + ';') + attr.property : Single + +rule FieldDeclaration : ZEROORMORE(FieldModifier) + UnannType + VariableDeclaratorList + ';' + attr.action: BuildDecl(%2, %3) + attr.action: AddModifier(%1) + +rule MethodDeclaration : ZEROORMORE(MethodModifier) + MethodHeader + MethodBody + attr.action: AddModifierTo(%2, %1) + attr.action: AddFunctionBodyTo(%2, %3) + +rule MethodBody : ONEOF(Block, ';') + attr.property : Single +rule MethodHeader : ONEOF(Result + MethodDeclarator + ZEROORONE(Throws), + TypeParameters + ZEROORMORE(Annotation) + Result + MethodDeclarator + + ZEROORONE(Throws)) + attr.action.%1: AddType(%2, %1) + attr.action.%1: AddThrowsTo(%2, %3) + attr.action.%2: AddType(%4, %3) + attr.action.%2: AddThrowsTo(%4, %5) + attr.property : Single + +rule Result : ONEOF(UnannType, "void") + attr.property : Single +rule MethodDeclarator : Identifier + '(' + ZEROORONE(FormalParameterList) + ')' + ZEROORONE(Dims) + attr.action: BuildFunction(%1) + attr.action: AddParams(%3) + attr.action: AddDims(%5) + +rule Throws : "throws" + ExceptionTypeList + attr.action: PassChild(%2) + +rule ExceptionTypeList : ExceptionType + ZEROORMORE(',' + ExceptionType) +rule ExceptionType : ONEOF(ClassType, TypeVariable) + +rule MethodAttr : ONEOF("public", "protected", "private", "abstract", "static", + "final", "synchronized", "native", "strictfp") + attr.property : Single + +rule MethodModifier : ONEOF(Annotation, MethodAttr) + attr.property : Single + +rule FormalParameterListNoReceiver : ONEOF(FormalParameters + ',' + LastFormalParameter, + LastFormalParameter) + +# ReceiverParameter and FormalParameterListNoReceiver could match at the same +# but with different num of tokens. Here is an example +# foo(T T.this) +# The NoReceiver could match "T T", while Receiver match "T T.this". +# Although later it figures out NoReceiver is wrong, but at this rule, both rule work. +# If we put NoReceiver as the 1st child and set property 'Single', we will miss +# Receiver which is the correct one. +# +# So I move Receiver to be the 1st child, since NoReceiver is not correct matching +# if Receiver works. +rule FormalParameterList : ONEOF(ReceiverParameter, FormalParameterListNoReceiver) + attr.property : Single + +# We don't do any action. Just let it pass a PassNode +rule FormalParameters : ONEOF(FormalParameter + ZEROORMORE(',' + FormalParameter), + ReceiverParameter + ZEROORMORE(',' + FormalParameter)) + attr.property : Single + +rule FormalParameter : ZEROORMORE(VariableModifier) + UnannType + VariableDeclaratorId + attr.action: BuildDecl(%2, %3) + attr.action: AddModifier(%1) +rule ReceiverParameter : ZEROORMORE(Annotation) + UnannType + ZEROORONE(Identifier + '.') + "this" + attr.action: BuildDecl(%2, %3) + +rule LastFormalParameter : ONEOF(ZEROORMORE(VariableModifier) + UnannType + ZEROORMORE(Annotation) + + "..." + VariableDeclaratorId, + FormalParameter) + attr.action.%1: BuildDecl(%2, %5) + attr.action.%1: AddModifier(%1) + attr.property : Single + + +rule FieldAttr : ONEOF("public", "protected", "private", "static", "final", "transient", "volatile") + attr.property : Single +rule FieldModifier : ONEOF(Annotation, FieldAttr) + attr.property : Single + +################################################################ +# Constructor # +################################################################ +rule ConstructorDeclaration : ZEROORMORE(ConstructorModifier) + ConstructorDeclarator + + ZEROORONE(Throws) + ConstructorBody + attr.action : AddFunctionBodyTo(%2, %4) + +rule ConstructorAttr : ONEOF("public", "protected", "private") + attr.property : Single +rule ConstructorModifier : ONEOF(Annotation, ConstructorAttr) + attr.property : Single +rule ConstructorDeclarator : ZEROORONE(TypeParameters) + SimpleTypeName + '(' + + ZEROORONE(FormalParameterList) + ')' + attr.action : BuildConstructor(%2) + attr.action: AddParams(%4) +rule SimpleTypeName : Identifier +rule ConstructorBody : '{' + ZEROORONE(ExplicitConstructorInvocation) + + ZEROORONE(BlockStatements) + '}' + attr.action : BuildBlock(%3) + +# Although ExpressionName and Primary are not excluding each other, given the rest +# of the concatenate elements this is a 'Single' rule. +rule ExplicitConstructorInvocation : ONEOF( + ZEROORONE(TypeArguments) + "this" + '(' + ZEROORONE(ArgumentList) + ')' + ';', + ZEROORONE(TypeArguments) + "super" + '(' + ZEROORONE(ArgumentList) + ')' + ';', + ExpressionName + '.' + ZEROORONE(TypeArguments) + "super" + '(' + ZEROORONE(ArgumentList) + ')' + ';', + Primary + '.' + ZEROORONE(TypeArguments) + "super" + '(' + ZEROORONE(ArgumentList) + ')' + ';') + attr.property : Single + +###################################################################### +# Enum # +###################################################################### +rule EnumDeclaration: ZEROORMORE(ClassModifier) + "enum" + Identifier + + ZEROORONE(Superinterfaces) + EnumBody + attr.action : BuildClass(%3) + attr.action : SetClassIsJavaEnum() + attr.action : AddModifier(%1) + attr.action : AddSuperInterface(%4) + attr.action : AddClassBody(%5) + +# This returns a PassNode with some ConstantNode and BlockNode. The +# different between ConstantNode and LiteralNode can be found in their definition. +rule EnumBody: '{' + ZEROORONE(EnumConstantList) + ZEROORONE(',') + + ZEROORONE(EnumBodyDeclarations) + '}' + attr.action: BuildBlock(%2) + attr.action: AddToBlock(%4) + +# This returns a PassNode +rule EnumConstantList: EnumConstant + ZEROORMORE(',' + EnumConstant) + +# AddInitTo() will handle this complicated JavaEnum style initial value of identifier +rule EnumConstant: ZEROORMORE(EnumConstantModifier) + Identifier + + ZEROORONE('(' + ZEROORONE(ArgumentList) + ')') + ZEROORONE(ClassBody) + attr.action : AddInitTo(%2, %3) + attr.action : AddInitTo(%2, %4) + +rule EnumConstantModifier: Annotation + +# This returns a PassNode with a set of BlockNode +rule EnumBodyDeclarations: ';' + ZEROORMORE(ClassBodyDeclaration) + +###################################################################### +# Block # +###################################################################### + +# 1st and 3rd children don't exclude each other. However, if both of them +# match, they will match the same sequence of tokens, being a +# LocalVariableDeclarationStatement. So don't need traverse 3rd any more. +rule BlockStatement : ONEOF(LocalVariableDeclarationStatement, + ClassDeclaration, + Statement) + attr.property : Single + +rule BlockStatements : BlockStatement + ZEROORMORE(BlockStatement) + attr.property.%2 : ZomFast +rule Block : '{' + ZEROORONE(BlockStatements) + '}' + attr.action: BuildBlock(%2) + + +###################################################################### +# Interface # +###################################################################### +rule InterfaceDeclaration : ONEOF(NormalInterfaceDeclaration, AnnotationTypeDeclaration) + attr.property : Single, Top + +rule NormalInterfaceDeclaration : ZEROORMORE(InterfaceModifier) + "interface" + Identifier + + ZEROORONE(TypeParameters) + ZEROORONE(ExtendsInterfaces) + InterfaceBody + attr.action : BuildInterface(%3) + attr.action : AddInterfaceBody(%6) + +rule InterfaceAttr : ONEOF("public", "protected", "private", "abstract", "static", "strictfp") + attr.property : Single +rule InterfaceModifier : ONEOF(Annotation, InterfaceAttr) + attr.property : Single +rule ExtendsInterfaces : "extends" + InterfaceTypeList +rule InterfaceBody : '{' + ZEROORMORE(InterfaceMemberDeclaration) + '}' + attr.action : BuildBlock(%2) + attr.property.%2 : ZomFast + +rule InterfaceMemberDeclaration : ONEOF(ConstantDeclaration, + InterfaceMethodDeclaration, + ClassDeclaration, + InterfaceDeclaration, + ';') + attr.property : Single + +# constant decl is also called field decl. In interface, field must have a variable initializer +# However, the rules below don't tell this limitation. +rule ConstantDeclaration : ZEROORMORE(ConstantModifier) + UnannType + VariableDeclaratorList + ';' +rule ConstantAttr : ONEOF("public", "static", "final") + attr.property : Single +rule ConstantModifier : ONEOF(Annotation, ConstantAttr) + attr.property : Single + +rule InterfaceMethodDeclaration : ZEROORMORE(InterfaceMethodModifier) + MethodHeader + MethodBody + attr.action: AddModifierTo(%2, %1) + attr.action: AddFunctionBodyTo(%2, %3) +rule InterfaceMethodAttr : ONEOF("public", "abstract", "default", "static", "strictfp") + attr.property : Single +rule InterfaceMethodModifier : ONEOF(Annotation, InterfaceMethodAttr) + attr.property : Single + +###################################################################### +# Annotation Type # +###################################################################### +rule AnnotationTypeDeclaration : ZEROORMORE(InterfaceModifier) + '@' + "interface" + + Identifier + AnnotationTypeBody + attr.action : BuildAnnotationType(%4) + attr.action : AddModifier(%1) + attr.action : AddAnnotationTypeBody(%5) + +rule AnnotationTypeBody : '{' + ZEROORMORE(AnnotationTypeMemberDeclaration) + '}' + attr.action : BuildBlock(%2) +rule AnnotationTypeMemberDeclaration : ONEOF(AnnotationTypeElementDeclaration, + ConstantDeclaration, + ClassDeclaration, + InterfaceDeclaration, + ';') + attr.property : Single +rule AnnotationTypeElementDeclaration : ZEROORMORE(AnnotationTypeElementModifier) + UnannType + + Identifier + '(' + ')' + ZEROORONE(Dims) + + ZEROORONE(DefaultValue) + ';' +rule AnnotationTypeElementAttr : ONEOF("public", "abstract") + attr.property : Single +rule AnnotationTypeElementModifier : ONEOF(Annotation, AnnotationTypeElementAttr) + attr.property : Single +rule DefaultValue : "default" + ElementValue + +###################################################################### +# Annotation # +###################################################################### +rule Annotation : ONEOF(NormalAnnotation, + MarkerAnnotation, + SingleElementAnnotation) + +rule NormalAnnotation : '@' + TypeName + '(' + ZEROORONE(ElementValuePairList) + ')' + attr.action : BuildAnnotation(%2) + +rule MarkerAnnotation : '@' + TypeName + attr.action : BuildAnnotation(%2) + +rule SingleElementAnnotation : '@' + TypeName + '(' + ElementValue + ')' + attr.action : BuildAnnotation(%2) + +rule ElementValuePairList : ElementValuePair + ZEROORMORE(',' + ElementValuePair) +rule ElementValuePair : Identifier + '=' + ElementValue +rule ElementValue : ONEOF(ConditionalExpression, + ElementValueArrayInitializer, + Annotation) +rule ElementValueArrayInitializer : '{' + ZEROORONE(ElementValueList) + ZEROORONE(',') + '}' +rule ElementValueList : ElementValue + ZEROORMORE(',' + ElementValue) + +###################################################################### +# Package # +###################################################################### +rule PackageModifier: Annotation +rule PackageDeclaration: ZEROORMORE(PackageModifier) + "package" + Identifier + ZEROORMORE('.' + Identifier) + ';' + attr.action : BuildField(%3, %4) + attr.action : BuildPackageName() + attr.property : Top + attr.property.%1,%4 : ZomFast + +rule ImportDeclaration: ONEOF(SingleTypeImportDeclaration, + TypeImportOnDemandDeclaration, + SingleStaticImportDeclaration, + StaticImportOnDemandDeclaration) + attr.property : Top + +rule SingleTypeImportDeclaration: "import" + TypeName + ';' + attr.action : BuildSingleTypeImport(%2) +rule TypeImportOnDemandDeclaration: "import" + PackageOrTypeName + '.' + '*' + ';' + attr.action : BuildAllTypeImport(%2) +rule SingleStaticImportDeclaration: "import" + "static" + TypeName + '.' + Identifier + ';' + attr.action : BuildField(%3, %5) + attr.action : BuildSingleStaticImport() +rule StaticImportOnDemandDeclaration: "import" + "static" + TypeName + '.' + '*' + ';' + attr.action : BuildAllStaticImport(%3) diff --git a/src/MapleFE/java/type.spec b/src/MapleFE/java/type.spec index 6a8990b563038b3e2f1256dd137ed226386f8351..d0339d802c7f076b7b25e5b1861000747331ef07 100644 --- a/src/MapleFE/java/type.spec +++ b/src/MapleFE/java/type.spec @@ -118,6 +118,7 @@ rule ReferenceType : ONEOF(ClassOrInterfaceType, TypeVariable, ArrayType) # Final one ########################### rule TYPE: ONEOF(PrimitiveType, ReferenceType, NullType) +rule Type : TYPE ##################################################################################### # Abnormal types # diff --git a/src/MapleFE/java/type_conv.spec b/src/MapleFE/java/type_conv.spec deleted file mode 100644 index aa140d750f2be14c30d1a82688fceb9cf866dc44..0000000000000000000000000000000000000000 --- a/src/MapleFE/java/type_conv.spec +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. -# -# OpenArkFE is licensed under the Mulan PSL v2. -# You can use this software according to the terms and conditions of the Mulan PSL v2. -# You may obtain a copy of Mulan PSL v2 at: -# -# http://license.coscl.org.cn/MulanPSL2 -# -# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR -# FIT FOR A PARTICULAR PURPOSE. -# See the Mulan PSL v2 for more details. -# -####################################################################### -# There are over 10 kinds of type conversion in Java. This file # -# defines the rules of type conversion in different categories. # -# # -# There are many details putting many flavors in the semantics. # -# They can be implemented in java/src/*.cpp. Such as when convert # -# 'float' to 'Float', it diverges when the value is 'NaN'. # -# Situations like this will be handled in specified functions. # -####################################################################### - - -####################################################################### -# Identity Conversion # -####################################################################### - - -####################################################################### -# Widening Primitive Conversion # -####################################################################### - - -####################################################################### -# Narrowing Primitive Conversion # -####################################################################### - - -####################################################################### -# Widening Reference Conversion # -####################################################################### - - -####################################################################### -# Narrowing Reference Conversion # -####################################################################### - - -####################################################################### -# Boxing Conversion # -####################################################################### - - -####################################################################### -# Unboxing Conversion # -####################################################################### - - -####################################################################### -# Unchecked Conversion # -####################################################################### - - -####################################################################### -# Capture Conversion # -####################################################################### - - -####################################################################### -# String Conversion # -####################################################################### - - -####################################################################### -# Forbidden Conversion # -####################################################################### - - -####################################################################### -# Value Set Conversion # -####################################################################### - - -####################################################################### -# Boxing/Unboxing conversion is conversion between primitive types # -# and their corresponding Java reference types. # -# This could also happen in other similar lanaguages. # -# # -# 1. Since reference types are not part of Autogen, they are not # -# handled in the supported_types.spec. # -# 2. Reference types here will be only recognized through keyword, # -# The generated files will handle the conversion. # -# # -# TODO: There is one thing I will figure out later, i.e., see if we # -# need generate the 'new' operation in the Maple IR # -####################################################################### - -# The syntax of Boxing is a duplex -# PrimType: Use the types supported in autogen/supported_types.spec -# RefType: Use the keyword of reference types -# - -STRUCT Boxing : ONEOF((Boolean, "Boolean"), - (Byte, "Byte"), - (Short, "Short"), - (Char, "Character"), - (Int, "Integer"), - (Long, "Long"), - (Float, "Float"), - (Double, "Double")) - -STRUCT UnBoxing : ONEOF(("Boolean", Boolean), - ("Byte", Byte), - ("Short", Short), - ("Char", Char), - ("Int", Int), - ("Long", Long), - ("Float", Float), - ("Double", Double)) - diff --git a/src/MapleFE/ladetect/Makefile b/src/MapleFE/ladetect/Makefile index ac28904e9a9334189058b9c9d33c477270ca9fc1..3badd03259474113eedfb80b0f9aed5609b63823 100644 --- a/src/MapleFE/ladetect/Makefile +++ b/src/MapleFE/ladetect/Makefile @@ -1,46 +1,70 @@ +# Copyright (C) [2020-2021] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + include ../Makefile.in # create build first BUILD=$(BUILDDIR)/ladetect -$(shell $(MKDIR_P) $(BUILD)) +BUILDGEN=$(BUILDDIR)/gen +$(shell $(MKDIR_P) $(BUILD) $(BUILDGEN)) -# TEMPVAR=$(shell rm -rf $(LANG)) -# TEMPVAR=$(shell mkdir -p $(LANG)) -# TEMPVAR=$(shell cp ../$(LANG)/include/gen_*.h $(LANG)/) -# TEMPVAR=$(shell cp ../$(LANG)/src/gen_*.cpp $(LANG)/) -# TEMPVAR=$(shell mkdir -p $(BUILD)) -# TEMPVAR=$(shell mkdir -p $(BUILD)/$(LANG)) +SRCG := $(wildcard $(BUILDGEN)/gen_*.cpp) +OBJG := $(patsubst %.cpp, %.o, $(SRCG)) +DEPG := $(patsubst %.cpp, %.d, $(SRCG)) -LANGSRC=$(shell ls $(LANG)/*.cpp) -SRC := $(LANGSRC) $(wildcard *.cpp) +SRC := $(wildcard *.cpp) OBJ := $(patsubst %.cpp, %.o, $(SRC)) +OBJL := $(foreach obj, $(OBJ), $(BUILD)/$(obj)) DEP := $(patsubst %.cpp, %.d, $(SRC)) +DEPL := $(foreach dep, $(DEP), $(BUILD)/$(dep)) -OBJS := $(foreach obj, $(OBJ), $(BUILD)/$(obj)) -DEPS := $(foreach dep, $(DEP), $(BUILD)/$(dep)) +OBJS := $(OBJG) $(OBJL) +DEPS := $(DEPG) $(DEPL) INCLUDES := -I $(MAPLEFE_ROOT)/ladetect \ - -I $(MAPLEFE_ROOT)/ladetect/$(LANG) \ - -I $(MAPLEFE_ROOT)/shared/include + -I $(MAPLEFE_ROOT)/shared/include \ + -I $(MAPLEFE_ROOT)/$(SRCLANG)/include \ + -I $(BUILDGEN) + +INCLUDEGEN := -I $(BUILDGEN) -I $(MAPLEFE_ROOT)/shared/include + SHAREDLIB = $(BUILDDIR)/shared/shared.a TARGET = ladetect .PHONY: all -all: $(TARGET) +all: $(BUILD)/$(TARGET) -$(TARGET) : $(OBJS) $(SHAREDLIB) +$(BUILD)/$(TARGET) : $(OBJS) $(SHAREDLIB) $(LD) -o $(BUILD)/$(TARGET) $(OBJS) $(SHAREDLIB) + (cd $(BUILD); ./$(TARGET)) -include $(DEPS) .PHONY: clean -vpath %.cpp $(MAPLEFE_ROOT)/shared/src vpath %.o $(BUILD) vpath %.d $(BUILD) -CXXFLAGS := $(CXXFLAGS) -I ../$(LANG)/ +$(BUILDGEN)/%.o : $(BUILDGEN)/%.cpp $(BUILDGEN)/%.d + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDEGEN) -w -c $< -o $@ + +$(BUILDGEN)/%.d : $(BUILDGEN)/%.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDEGEN) $< > $@ + @mv -f $(BUILDGEN)/$*.d $(BUILDGEN)/$*.d.tmp + @sed -e 's|.*:|$(BUILDGEN)/$*.o:|' < $(BUILDGEN)/$*.d.tmp > $(BUILDGEN)/$*.d + @rm -f $(BUILDGEN)/$*.d.tmp #Pattern Rules $(BUILD)/%.o : %.cpp $(BUILD)/%.d @@ -52,10 +76,5 @@ $(BUILD)/%.d : %.cpp @sed -e 's|.*:|$(BUILD)/$*.o:|' < $(BUILD)/$*.d.tmp > $(BUILD)/$*.d @rm -f $(BUILD)/$*.d.tmp - -#.cpp.o: -# $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $*.cpp -o $(BUILD)/$*.o -# $(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $*.cpp > $(BUILD)/$*.d - clean: - rm -rf $(BUILD) + rm -rf $(BUILD) $(OBJG) $(DEPG) diff --git a/src/MapleFE/ladetect/la_detect.cpp b/src/MapleFE/ladetect/la_detect.cpp index c1eba89defe7c1517e0548ebf3f891381a92bf1c..49757bab478a9dc1923ba914f572f824646fe25b 100644 --- a/src/MapleFE/ladetect/la_detect.cpp +++ b/src/MapleFE/ladetect/la_detect.cpp @@ -15,7 +15,7 @@ #include "common_header_autogen.h" #include "ruletable_util.h" -#include "gen_summary.h" +#include "rule_summary.h" #include "la_detect.h" #include "container.h" @@ -266,6 +266,7 @@ TResult LADetector::DetectRuleTable(RuleTable *rt, ContTreeNode *p) break; case ET_Zeroorone: case ET_Zeroormore: + case ET_ASI: res = DetectZeroorXXX(rt, node); break; case ET_Concatenate: @@ -351,7 +352,7 @@ TResult LADetector::DetectOneof(RuleTable *rule_table, ContTreeNode return result; } -// Zeroormore and Zeroorone has the same way to handle. +// Zeroormore and Zeroorone and ASI has the same way to handle. TResult LADetector::DetectZeroorXXX(RuleTable *rule_table, ContTreeNode *tree_node) { TResult result = TRS_NA; MASSERT((rule_table->mNum == 1) && "zeroormore node has more than one elements?"); @@ -488,7 +489,7 @@ void LADetector::Release() { } // To write the external decl of all LookAheadTable of each rule. -// +// // extern LookAheadTable TblStatementLookAheadTable; // extern LookAhead *TblStatementLookAhead; // ... @@ -597,14 +598,13 @@ void LADetector::WriteCppFile() { mCppFile->WriteOneLine("}", 1); } -// Write the recursion to java/gen_recursion.h and java/gen_recursion.cpp +// Write the recursion to gen/genmore_lookahead.h and gen/genmore_lookahead.cpp void LADetector::Write() { - std::string lang_path_header("../../java/include/"); - std::string lang_path_cpp("../../java/src/"); + std::string lang_path("../gen/"); - std::string file_name = lang_path_cpp + "gen_lookahead.cpp"; + std::string file_name = lang_path + "genmore_lookahead.cpp"; mCppFile = new Write2File(file_name); - file_name = lang_path_header + "gen_lookahead.h"; + file_name = lang_path + "gen_lookahead.h"; mHeaderFile = new Write2File(file_name); WriteHeaderFile(); diff --git a/src/MapleFE/ladetect/la_detect.h b/src/MapleFE/ladetect/la_detect.h index 18a1bcbd8a8fd2e4f4cfad834fed7a53fdf4f76e..46fe89d1a9677b41d3b3c5df319fd28946d7a4c5 100644 --- a/src/MapleFE/ladetect/la_detect.h +++ b/src/MapleFE/ladetect/la_detect.h @@ -62,7 +62,7 @@ public: void AddDependent(RuleTable *rt) {mDependents.PushBack(rt);} void Release() {mDependents.Release();} -}; +}; // Return result of most detect functions. enum TResult { diff --git a/src/MapleFE/recdetect/Makefile b/src/MapleFE/recdetect/Makefile index 6d436ee84cde07134c4d5222729a46aaa40b283e..025b900d399d63e2cab3d3bb9e35a7a9eedb3ca9 100644 --- a/src/MapleFE/recdetect/Makefile +++ b/src/MapleFE/recdetect/Makefile @@ -1,46 +1,67 @@ +# Copyright (C) [2020-2021] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + include ../Makefile.in # create build first BUILD=$(BUILDDIR)/recdetect -$(shell $(MKDIR_P) $(BUILD)) +BUILDGEN=$(BUILDDIR)/gen +$(shell $(MKDIR_P) $(BUILD) $(BUILDGEN)) -# TEMPVAR=$(shell rm -rf $(LANG)) -# TEMPVAR=$(shell mkdir -p $(LANG)) -# TEMPVAR=$(shell cp ../$(LANG)/include/gen_*.h $(LANG)/) -# TEMPVAR=$(shell cp ../$(LANG)/src/gen_*.cpp $(LANG)/) -# TEMPVAR=$(shell mkdir -p $(BUILD)) -# TEMPVAR=$(shell mkdir -p $(BUILD)/$(LANG)) +SRCG := $(wildcard $(BUILDGEN)/gen_*.cpp) +OBJG := $(patsubst %.cpp, %.o, $(SRCG)) +DEPG := $(patsubst %.cpp, %.d, $(SRCG)) -LANGSRC=$(shell ls $(LANG)/*.cpp) -SRC := $(LANGSRC) $(wildcard *.cpp) +SRC := $(wildcard *.cpp) OBJ := $(patsubst %.cpp, %.o, $(SRC)) +OBJL := $(foreach obj, $(OBJ), $(BUILD)/$(obj)) DEP := $(patsubst %.cpp, %.d, $(SRC)) +DEPL := $(foreach dep, $(DEP), $(BUILD)/$(dep)) + +OBJS := $(OBJG) $(OBJL) +DEPS := $(DEPG) $(DEPL) -OBJS := $(foreach obj, $(OBJ), $(BUILD)/$(obj)) -DEPS := $(foreach dep, $(DEP), $(BUILD)/$(dep)) +INCLUDES := -I $(MAPLEFE_ROOT)/recdetect \ + -I $(MAPLEFE_ROOT)/shared/include \ + -I $(BUILDGEN) -INCLUDES := -I $(MAPLEFE_ROOT)/recdetect/include \ - -I $(MAPLEFE_ROOT)/recdetect/$(LANG) \ - -I $(MAPLEFE_ROOT)/shared/include SHAREDLIB = $(BUILDDIR)/shared/shared.a TARGET = recdetect .PHONY: all -all: $(TARGET) +all: $(BUILD)/$(TARGET) -$(TARGET) : $(OBJS) $(SHAREDLIB) +$(BUILD)/$(TARGET) : $(OBJS) $(SHAREDLIB) $(LD) -o $(BUILD)/$(TARGET) $(OBJS) $(SHAREDLIB) + (cd $(BUILD); ./$(TARGET)) -include $(DEPS) .PHONY: clean -vpath %.cpp $(MAPLEFE_ROOT)/shared/src vpath %.o $(BUILD) vpath %.d $(BUILD) -CXXFLAGS := $(CXXFLAGS) -I ../$(LANG)/ +$(BUILDGEN)/%.o : $(BUILDGEN)/%.cpp $(BUILDGEN)/%.d + $(CXX) $(CXXFLAGS) -fpermissive -I $(BUILDGEN) -w -c $< -o $@ + +$(BUILDGEN)/%.d : $(BUILDGEN)/%.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $< > $@ + @mv -f $(BUILDGEN)/$*.d $(BUILDGEN)/$*.d.tmp + @sed -e 's|.*:|$(BUILDGEN)/$*.o:|' < $(BUILDGEN)/$*.d.tmp > $(BUILDGEN)/$*.d + @rm -f $(BUILDGEN)/$*.d.tmp #Pattern Rules $(BUILD)/%.o : %.cpp $(BUILD)/%.d @@ -52,10 +73,5 @@ $(BUILD)/%.d : %.cpp @sed -e 's|.*:|$(BUILD)/$*.o:|' < $(BUILD)/$*.d.tmp > $(BUILD)/$*.d @rm -f $(BUILD)/$*.d.tmp - -#.cpp.o: -# $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $*.cpp -o $(BUILD)/$*.o -# $(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $*.cpp > $(BUILD)/$*.d - clean: - rm -rf $(BUILD) + rm -rf $(BUILD) $(OBJG) $(DEPG) diff --git a/src/MapleFE/recdetect/rec_detect.cpp b/src/MapleFE/recdetect/rec_detect.cpp index 8de48d23fff2b8abc31c11d9ef14c6b4df1dc342..8193ebd46c6978641d2dcd2c91896cdb838f4f33 100644 --- a/src/MapleFE/recdetect/rec_detect.cpp +++ b/src/MapleFE/recdetect/rec_detect.cpp @@ -16,7 +16,7 @@ #include "common_header_autogen.h" #include "ruletable_util.h" #include "rec_detect.h" -#include "gen_summary.h" +#include "rule_summary.h" namespace maplefe { @@ -27,7 +27,7 @@ namespace maplefe { // the recursions. We differentiate a recursion using the first node, ie, the topmost // node in the tree in this recursion. // 2) Each node (ie rule table) could have multiple recursions. -// 3) Recursions could include children recursions inside. +// 3) Recursions could include children recursions inside. // // [NOTE] The key point in recursion detector is to make sure for each loop, there // should be one and only one recursion counted, even if there are multiple @@ -346,6 +346,7 @@ TResult RecDetector::DetectRuleTable(RuleTable *rt, ContTreeNode *p) break; case ET_Zeroorone: case ET_Zeroormore: + case ET_ASI: res = DetectZeroorXXX(rt, node); break; case ET_Concatenate: @@ -422,7 +423,7 @@ TResult RecDetector::DetectOneof(RuleTable *rule_table, ContTreeNode return result; } -// Zeroormore and Zeroorone has the same way to handle. +// Zeroormore and Zeroorone and ASI has the same way to handle. TResult RecDetector::DetectZeroorXXX(RuleTable *rule_table, ContTreeNode *p) { TResult result = TRS_NA; MASSERT((rule_table->mNum == 1) && "zeroormore node has more than one elements?"); @@ -548,7 +549,7 @@ TResult RecDetector::DetectData(RuleTable *rule_table, ContTreeNode // At any point when we want to return, we need take the remaining elements // to ToDo list, if they are not in any of InProcess, Done and ToDo. - + TResult RecDetector::DetectConcatenate(RuleTable *rule_table, ContTreeNode *p) { // We use 'res' to record the status of the leading children. TRS_MaybeZero is good // for the beginning as it means it's empty right now. @@ -571,7 +572,7 @@ TResult RecDetector::DetectConcatenate(RuleTable *rule_table, ContTreeNodemNum == 1); TableData *data = rt->mData; @@ -1123,7 +1125,7 @@ void RecDetector::WriteRecursionGroups() { } group += "};"; mCppFile->WriteOneLine(group.c_str(), group.size()); - } + } // LeftRecursion **recursion_groups[N] = {RecursionGroup_1, RecursionGroup_2...}; // LeftRecursion ***gRecursionGroups = recursion_groups; @@ -1146,7 +1148,7 @@ void RecDetector::WriteRecursionGroups() { // We want to dump like below. // unsigned gRule2GroupNum = X; -// +// void RecDetector::WriteRule2Group() { std::string comment = "// Rule2Group mapping"; @@ -1357,12 +1359,11 @@ void RecDetector::WriteRule2Recursion() { // Write the recursion to java/gen_recursion.h and java/gen_recursion.cpp void RecDetector::Write() { - std::string lang_path_header("../../java/include/"); - std::string lang_path_cpp("../../java/src/"); + std::string lang_path("../gen/"); - std::string file_name = lang_path_cpp + "gen_recursion.cpp"; + std::string file_name = lang_path + "genmore_recursion.cpp"; mCppFile = new Write2File(file_name); - file_name = lang_path_header + "gen_recursion.h"; + file_name = lang_path + "gen_recursion.h"; mHeaderFile = new Write2File(file_name); WriteHeaderFile(); diff --git a/src/MapleFE/scripts/build_mapleall.sh b/src/MapleFE/scripts/build_mapleall.sh index a37d1634def7e2988d4bc89a29ee234cfb97e03e..c1c5d7375cdb59f496a9c361166f40eab0b2a456 100755 --- a/src/MapleFE/scripts/build_mapleall.sh +++ b/src/MapleFE/scripts/build_mapleall.sh @@ -13,13 +13,18 @@ # See the Mulan PSL v2 for more details. # +set -e + if [ ! -d $MAPLEALL_ROOT ]; then cd $MAPLE_ROOT - git clone https://gitee.com/openarkcompiler/OpenArkCompiler.git + git clone https://gitee.com/openarkcompiler/OpenArkCompiler.git -b dev_MapleFE fi cd $MAPLEALL_ROOT +git checkout dev_MapleFE git pull source build/envsetup.sh arm debug make setup -make +make clobber +make maple +make irbuild diff --git a/src/MapleFE/scripts/maplefe-autogen.py b/src/MapleFE/scripts/maplefe-autogen.py new file mode 100755 index 0000000000000000000000000000000000000000..f0ffce0cf1f8723de889853653e340c32e058df9 --- /dev/null +++ b/src/MapleFE/scripts/maplefe-autogen.py @@ -0,0 +1,1375 @@ +#!/usr/bin/env python3 +from os import path, environ +import subprocess +import hashlib +import yaml + +# +# Needs to install the following packages on Ubuntu 18.04 or 20.04 +# sudo apt install -y clang-tools-10 clang-format-10 libyaml-cpp-dev +# + +root_dir = path.dirname(path.dirname(path.realpath(__file__))) + '/' +builddir = environ.get('BUILDDIR') +output_dir = builddir + '/ast_gen/' if builddir != None else root_dir + "output/typescript/ast_gen/" +maplefe_dir = root_dir + 'shared/' +# initial_yaml = output_dir + 'maplefe/index.yaml' # For higher version of clang-doc +initial_yaml = output_dir + 'maplefe.yaml' # For version 10 +treenode_yaml = output_dir + 'maplefe/TreeNode.yaml' + +if not hasattr(yaml, "cyaml"): + print("Note: You may install package 'libyaml-cpp-dev' to speed up YAML parsing.") + +license_notice = [ +"""/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +// Generated by maplefe-autogen.py +""" +] # license_notice + +compile_commands = [ +f""" +[ + {{ "directory": "{maplefe_dir}src" + "command": "clang++ -std=c++17 -DDEBUG -fpermissive -I {maplefe_dir}include -w -c ast_builder.cpp", + "file": "ast_builder.cpp", + "output": "{output_dir}" + }} +] +""" +] # compile_commands + +bash_commands = [ +f""" +cd {maplefe_dir}src || exit 1 +rm -f {output_dir}yaml.log +clang-doc-10 ast_builder.cpp -p {output_dir} --format=yaml -output={output_dir} +""" +] # bash_commands + +def exec_command(cmd): + subprocess.call(cmd, shell=True) + +def create(filename, lines): + with open(filename, "w") as f: + for line in lines: + f.write(line + "\n") + +def append(filename, lines): + with open(filename, "a") as f: + for line in lines: + f.write(line + "\n") + +def finalize(filename, lines): + append(filename, lines) + exec_command('clang-format-10 -i --style="{ColumnLimit: 120}" ' + filename) + print("Generated " + filename) + +exec_command('bash -c "mkdir -p ' + output_dir + 'shared"') +create(output_dir + 'compile_commands.json', compile_commands) +create(output_dir + 'ast.sh', bash_commands) +exec_command('bash ' + output_dir + 'ast.sh') + +################################################################################ +# # +# Common code to handle YAML files # +# # +################################################################################ + +# Dump all content in a dictionary to ast_gen/yaml.log +def log(dictionary, indent, msg = ""): + global log_buf + if indent == 0: log_buf = [msg] + indstr = " . " * indent + for key, value in dictionary.items(): + if key == "USR": continue + prefix = indstr + key + ' : ' + if isinstance(value, dict): + log_buf.append(prefix + "{") + log(value, indent + 1) + log_buf.append(indstr+ " }") + elif isinstance(value, list): + log_buf.append(prefix + "[") + for elem in value: + if isinstance(elem, dict): + log(elem, indent + 1) + else: + log_buf.append(indstr + " " + str(elem)) + log_buf.append(indstr+ " ]") + else: + log_buf.append(prefix + str(value)) + log_buf.append(indstr + "---") + if indent == 0: + append(output_dir + 'yaml.log', log_buf) + +# Handle a YAML file with a callback +def handle_yaml(filename, callback, saved_yaml = {}): + if filename not in saved_yaml: + print(str(len(saved_yaml) + 1) + ": Processing " + filename + " ...") + with open(filename) as stream: + loader = yaml.cyaml.CLoader if hasattr(yaml, "cyaml") else yaml.Loader + yaml_data = yaml.load(stream, Loader=loader) + saved_yaml[filename] = yaml_data + log(yaml_data, 0, "YAML file: " + filename) + else: + yaml_data = saved_yaml[filename] + callback(yaml_data) + +# Get the pointed-to type, e.g. FunctionNode of "class maplefe::FunctionNode *" +def get_pointed(mtype): + loc = mtype.find("class maplefe::") + return mtype[loc + 15:-2] if loc >= 0 and mtype[-6:] == "Node *" else None + +# Get enum type, e.g. ImportProperty of "enum maplefe::ImportProperty" +def get_enum_type(mtype): + loc = mtype.find("maplefe::") + return mtype[loc + 9:] if loc >= 0 else None + +# Get the enum list for given enum name +def get_enum_list(dictionary, enum_name): + assert dictionary != None + enums = dictionary["ChildEnums"] + for e in enums: + for key, value in e.items(): + if key == "Name" and value == enum_name: + return e["Members"] + return [] + +# Generate functions for enum types, e.g. "const char *GetEnumOprId(OprId k);" for enum OprId +def gen_enum_func(dictionary): + global include_file, src_file, gen_args + hcode = [''] + xcode = [''] + for each in dictionary["ChildEnums"]: + name = each["Name"] + hcode.append("static const char* GetEnum" + name + "(" + name + " k);") + xcode.extend(["const char* " + gen_args[1] + "::GetEnum" + name + "(" + name + " k) {", + "switch(k) {"]) + for e in get_enum_list(dictionary, name): + xcode.append('case ' + e + ': return "' + e + '";') + xcode.extend(['default: MASSERT(0 && "Unexpected enumerator");', + '}', + 'return "UNEXPECTED ' + name + '";', + '}\n']) + append(src_file, xcode) + append(include_file, hcode) + +# Generate code for class node which is derived from TreeNode +def gen_handler_ast_node(dictionary): + global include_file, src_file, gen_args + code = [''] + node_name = dictionary["Name"]; + assert dictionary["TagType"] == "Class" + + member_functions = {} + child_functions = dictionary.get("ChildFunctions") + if child_functions != None: + for c in child_functions: + name = c.get("Name") + member_functions[name] = "R-" + str(c.get("ReturnType").get("Type").get("Name")) + + # gen_func_definition() for the code at the beginning of current function body + code.append(gen_func_definition(dictionary, node_name)) + members = dictionary.get("Members") + if members != None: + declloc = dictionary.get("DefLocation") + if gen_func_decl_location() and declloc != None and isinstance(declloc, dict): + fname = declloc.get("Filename") + floc = fname.find("shared/") + code.append("// Declared at " + fname[floc:] + ":" + str(declloc.get("LineNumber"))) + + for m in members: + name = m.get("Name") + assert name[0:1] == "m" + otype = m.get("Type").get("Name") + if otype == "_Bool": otype = "bool" + + plural = "Get" + name[1:] + + # m*Children to GetChild*() + if name[-8:] == "Children": + singular = "Get" + name[1:-3] + + # m*Catches to GetCatch*(), m*Classes to GetClass*() + elif name[-4:] == "ches" or name[-4:] == "shes" or name[-4:] == "sses" or name[-4:] == "xes" : + singular = "Get" + name[1:-2] + + # mIs* to Is*() for boolean type + elif name[:3] == "mIs" and otype == "bool": + plural = name[1:] + singular = name[1:] + + # Default singular without the endding 's' + else: + singular = "Get" + name[1:-1] + + ntype = get_pointed(otype) + access = m.get("Access") + accessstr = access if access != None else "" + if ntype != None: + if member_functions.get(plural) != None: + # gen_call_child_node() for child node in current function body + code.append(gen_call_child_node(dictionary, node_name, name, ntype, "node->" + plural + "()")) + else: + # It is an ERROR if no member function for the child node + code.append("Error!; // " + gen_call_child_node(dictionary, node_name, name, ntype, "node->" + plural + "()")) + elif ((otype == "SmallVector" or otype == "SmallList" or otype == "ExprListNode") + and member_functions.get(plural + "Num") != None + and (member_functions.get(singular) != None or member_functions.get(singular + "AtIndex") != None)): + func_name = singular if member_functions.get(singular) != None else singular + "AtIndex" + rtype = member_functions[func_name][2:] + if rtype == "_Bool": rtype = "bool" + ntype = get_pointed(rtype) + if (ntype != None or gen_call_handle_values()) and gen_children_num(plural) != None: + # gen_call_children_node() for list or vector of nodes before entering the loop + code.append(gen_call_children_node(dictionary, node_name, name, otype + "<" + rtype + ">", "node->" + plural + "Num()")) + code.append("for(unsigned i = 0; i < " + gen_children_num(plural) + "; ++i) {") + if ntype != None: + # gen_call_nth_child_node() for the nth child node in the loop for the list or vector + code.append(gen_call_nth_child_node(dictionary, node_name, name, ntype, "node->" + func_name + "(i)")) + else: + # gen_call_nth_child_value() for the nth child value in the loop for the list or vector + code.append(gen_call_nth_child_value(dictionary, node_name, name, rtype, "node->" + func_name + "(i)")) + code.append("}") + code.append(gen_call_children_node_end(dictionary, node_name, name, otype + "<" + rtype + ">", "node->" + plural + "Num()")) + elif gen_call_handle_values(): + if member_functions.get(plural) != None: + # gen_call_child_value() for child value in current function body + code.append(gen_call_child_value(dictionary, node_name, name, otype, "node->" + plural + "()")) + else: + # It is an ERROR if no member function for the child value + code.append("Error!; // " + gen_call_child_value(dictionary, node_name, name, otype, "node->" + plural + "()")) + + # gen_func_definition_end() for the code at the end of current function body + code.append(gen_func_definition_end(dictionary, node_name)) + append(src_file, code) + + code = [] + code.append(gen_func_declaration(dictionary, node_name)) + append(include_file, code) + +# Generate handler for TreeNode +def gen_handler_ast_TreeNode(dictionary): + global include_file, src_file, gen_args + code = [''] + code.append(gen_func_declaration(dictionary, "TreeNode")) + append(include_file, code) + + code = [''] + code.append(gen_func_definition(dictionary, "TreeNode")) + + code.append("switch(" + gen_switch_expr() + ") {") + for flag in get_enum_list(dictionary, "NodeKind"): + code.append("case " + flag + ":"); + node_name = flag[3:] + "Node" + filename = output_dir + 'maplefe/' + node_name + '.yaml' + if path.exists(filename): + # gen_call_child_node() for visiting child node + code.append(gen_call_child_node(dictionary, node_name, "", node_name, "static_cast<" + node_name + "*>(node)")) + elif node_name == "NullNode": + code.append("// Ignore NullNode") + else: + # it is an ERROR if the node kind is out of range + code.append("Error!!! // " + gen_call_child_node(dictionary, node_name, "", node_name, "static_cast<" + node_name + "*>(node)")) + code.append("break;"); + code.append('default: MASSERT(0 && "Unexpected node kind");') + code.append("}") + code.append(gen_func_definition_end(dictionary, "TreeNode")) + append(src_file, code) + +# Handle each node which has TreeNode as its base +def gen_handler_ast_node_file(dictionary): + base = dictionary.get("Bases") + if base != None: + basename = base[0].get("Name") + if basename == "TreeNode": + gen_handler_ast_node(dictionary) + +# Check each child records +def gen_handler(dictionary): + child_records = dictionary["ChildRecords"] + for child in child_records: + value = child["Name"] + filename = output_dir + 'maplefe/' + value + '.yaml' + if path.exists(filename): + handle_yaml(filename, gen_handler_ast_node_file) + # Generate handler for TreeNode + gen_handler_ast_TreeNode(dictionary) + +################################################################################ +# # +# Signature of TreeNodes # +# # +################################################################################ + +def gen_signature_of_ast_node(dictionary): + global tn_signature + tn_signature += '\n^' + dictionary["Name"]; + members = dictionary.get("Members") + if members != None: + for m in members: + tn_signature += '\n' + m.get("Name") + ':' + m.get("Type").get("Name") + +def gen_signature_of_ast_nodes(dictionary): + base = dictionary.get("Bases") + basename = base[0].get("Name") if base != None else '' + if basename == "TreeNode": + gen_signature_of_ast_node(dictionary) + +def gen_signature(dictionary): + for child in dictionary["ChildRecords"]: + filename = output_dir + 'maplefe/' + child["Name"] + '.yaml' + if path.exists(filename): + handle_yaml(filename, gen_signature_of_ast_nodes) + +tn_signature = 'Signature:' +handle_yaml(initial_yaml, gen_signature) +handle_yaml(treenode_yaml, gen_signature_of_ast_node) +signature = int(hashlib.sha256(tn_signature.encode('utf-8')).hexdigest()[-15:], 16) +append(output_dir + 'yaml.log', [tn_signature, str(signature)]) + +################################################################################ +# # +# Initialize/finalize include_file and src_file with gen_args # +# # +################################################################################ + +Initialization = 1 +Finalization = 2 +def handle_src_include_files(phase): + global include_file, src_file, gen_args + include_file = output_dir + "shared/" + gen_args[0] + ".h" + src_file = output_dir + "shared/" + gen_args[0] + ".cpp" + + include_start = [ +""" +#ifndef __{gen_args1upper}_HEADER__ +#define __{gen_args1upper}_HEADER__ + +#include "ast_module.h" +#include "ast.h" +#include "ast_type.h" +#include "ast_attr.h" +{gen_args3} + +namespace maplefe {{ + +class {gen_args1} {gen_args4} {{ +""".format(gen_args1upper=gen_args[1].upper(), gen_args1=gen_args[1], gen_args3=gen_args[3], gen_args4=gen_args[4]) +] # include_start + + include_end = [ +""" +}}; + +}} +#endif +""".format() # Use format() to match each pair of "{{" and "}}" +] # include_end + + src_start = [ +""" +#include "{gen_args0}.h" + +namespace maplefe {{ +""".format(gen_args0=gen_args[0]) +] # src_start + + src_end = [ +""" +}} +""".format() # Use format() to match each pair of "{{" and "}}" +] + if phase == Initialization: + create(include_file, license_notice + include_start) + create(src_file, license_notice + src_start) + elif phase == Finalization: + finalize(include_file, include_end) + finalize(src_file, src_end) + +################################################################################ +# # +# AstDump # +# # +################################################################################ + +def get_data_based_on_type(val_type, accessor): + if val_type[-10:] == "ASTScope *" or val_type[-12:] == "ASTScopePool": + return val_type + ': skipped"' + e = get_enum_type(val_type) + if e != None: + return e + ': " + GetEnum' + e + '(' + accessor + ')' + elif val_type == "LitData": + return 'LitData: LitId, " + GetEnumLitId(' + accessor + '.mType) + ", " + GetEnumLitData(' + accessor + ')' + elif val_type == "bool": + return val_type + ', ", ' + accessor + elif val_type == 'unsigned int' or val_type == 'uint32_t' or val_type == 'uint64_t' \ + or val_type == 'unsigned' or val_type == 'int' or val_type == 'int32_t' or val_type == 'int64_t' : + if accessor.find("GetStrIdx()") >= 0: + return val_type + ', " + std::to_string(' + accessor + ') + " => " + (' + accessor \ + + '? "\\""s + gStringPool.GetStringFromStrIdx(' + accessor + ') + "\\""s : "null"s)' + return val_type + ', " + std::to_string(' + accessor + ')' + elif val_type == 'const char *': + return 'const char*, " + (' + accessor + ' ? "\\""s + EncodeLiteral(' + accessor + ') + "\\""s : "null"s)' + elif val_type == 'RegExprData': + return 'RegExprData, Expr: " + "\\""s + ' + accessor + '.mExpr + "\\", Flags: \\""s + (' \ + + accessor + '.mFlags ? ' + accessor + '.mFlags : ""s) + "\\""s' + return val_type + ', " + "value" /* Warning: failed to get value */' + +def short_name(node_type): + return node_type.replace('class ', '').replace('maplefe::', '').replace(' *', '*') + +gen_padding = '' +def padding_name(name): + return gen_padding + name.ljust(7) + +# The follwoing gen_func_* and gen_call* functions are for AstDump +gen_children_num = lambda pl: 'node->' + pl + 'Num()' +gen_switch_expr = lambda: 'node->GetKind()' +gen_func_decl_location = lambda: True +gen_call_handle_values = lambda: True +gen_func_declaration = lambda dictionary, node_name: \ + "void " + gen_args[2] + node_name + "(" + node_name + "* node);" +gen_func_definition = lambda dictionary, node_name: \ + "void " + gen_args[1] + "::" + gen_args[2] + node_name + "(" + node_name + "* node) {" \ + + ('if (node == nullptr){return;}' if node_name == "TreeNode" else '\nif(DumpFB("' + node_name \ + + '", node)) { MASSERT(node->Is' + node_name.replace('Node', '()') + ');') +gen_call_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + ('Dump("' + padding_name(field_name) + ': ' + short_name(node_type) + '*", ' + accessor + ');\n' \ + if field_name != '' else '') + gen_args[2] + short_name(node_type) + '(' + accessor + ');' +gen_call_child_value = lambda dictionary, node_name, field_name, val_type, accessor: \ + 'Dump("' + padding_name(field_name) + ': "s + "' + get_data_based_on_type(val_type, accessor) + ');' +gen_call_children_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'DumpLB("' + padding_name(field_name) + ': ' + short_name(node_type) + ', size=", ' + accessor+ ');' +gen_call_children_node_end = lambda dictionary, node_name, field_name, node_type, accessor: 'DumpLE(' + accessor + ');' +gen_call_nth_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'Dump(std::to_string(i + 1) + ": ' + short_name(node_type) + '*", ' + accessor + ');\n' \ + + gen_args[2] + short_name(node_type) + '(' + accessor + ');' +gen_call_nth_child_value = lambda dictionary, node_name, field_name, val_type, accessor: \ + 'Dump(std::to_string(i) + ". ' + get_data_based_on_type(val_type, accessor) + ');' +gen_func_definition_end = lambda dictionary, node_name: \ + 'return;\n}' if node_name == "TreeNode" else 'DumpFE();\n}\nreturn;\n}' + +# +# Generate source files for dumping AST +# +gen_args = [ + "gen_astdump", # Filename + "AstDump", # Class name + "AstDump", # Prefix of function name + """ +#include +using namespace std::string_literals; +""", # Extra include directives + "", # Base class + ] +astdump = gen_args[0] +astdumpclass = gen_args[1] +prefixfuncname = gen_args[2] + +astdump_init = [ +""" +private: +ModuleNode *mASTModule; +std::ostream *mOs; +int indent; +std::string indstr; + +public: +{gen_args1}(ModuleNode *m) : mASTModule(m), mOs(nullptr), indent(0) {{ +indstr = std::string(256, \' \'); +for(int i = 2; i < 256; i += 4) +indstr.at(i) = \'.\'; +}} + +void Dump(const char *title, std::ostream *os) {{ + mOs = os; + *mOs << "{gen_args1}: " << title << " {{\\n"; + {gen_args2}TreeNode(mASTModule); + *mOs << "}}\\n"; +}} + +static std::string EncodeLiteral(std::string str) {{ + std::string enc; + bool esc = false; + for (auto&c : str) {{ + if(esc) {{ + switch(c) {{ + //case 'a': c = '\\a'; break; '\a' is 'a' in Javascript + case 'b': c = '\\b'; break; + case 'f': c = '\\f'; break; + case 'n': c = '\\n'; break; + case 'r': c = '\\r'; break; + case 't': c = '\\t'; break; + case 'v': c = '\\v'; break; + case '\\'': c = '\\''; break; + case '\\"': c = '"'; break; + default: enc += '\\\\'; + }} + esc = false; + }} else if(c == '\\\\') {{ + esc = true; + continue; + }} + switch(c) {{ + case '"': enc += "\\\\\\""; break; + case '\\b': enc += "\\\\b"; break; + case '\\f': enc += "\\\\f"; break; + case '\\n': enc += "\\\\n"; break; + case '\\r': enc += "\\\\r"; break; + case '\\t': enc += "\\\\t"; break; + case '\\v': enc += "\\\\v"; break; + default: enc += c; // TODO: Unicode support + }} + }} + return enc; +}} + +static std::string GetEnumLitData(LitData lit) {{ + switch (lit.mType) {{ + case LT_IntegerLiteral: + {{ std::stringstream s; + s << lit.mData.mInt; + return s.str(); + }} + case LT_FPLiteral: + return std::to_string(lit.mData.mFloat); + case LT_DoubleLiteral: + return std::to_string(lit.mData.mDouble); + case LT_BooleanLiteral: + return lit.mData.mBool ? "true" : "false"; + case LT_CharacterLiteral: + {{ std::string s = std::string(1, lit.mData.mChar.mData.mChar); + return EncodeLiteral(s); + }} + case LT_StringLiteral: + {{ std::string s = std::string(gStringPool.GetStringFromStrIdx(lit.mData.mStrIdx)); + return EncodeLiteral(s); + }} + case LT_NullLiteral: + return "null"; + case LT_ThisLiteral: + return "this"; + case LT_SuperLiteral: + return "super"; + case LT_VoidLiteral: + return "void 0"; + case LT_NA: + return "NA"; + default: + MASSERT(0 && "Unexpected LitData"); + }} + return "Unexpected"; +}} + +private: +void Dump(const std::string& msg) {{ + *mOs << indstr.substr(0, indent) << msg << std::endl; +}} + +void Dump(const std::string& msg, TreeNode *node) {{ + *mOs << indstr.substr(0, indent) << msg << (node ? "" : ", null") << std::endl; +}} + +void Dump(const std::string& msg, bool val) {{ + *mOs << indstr.substr(0, indent) << msg << (val ? "true" : "false") << std::endl; +}} + +TreeNode* DumpFB(const std::string& msg, TreeNode* node) {{ + if (node != nullptr) {{ + *mOs << indstr.substr(0, indent + 2) << msg; + indent += 4; + *mOs << " {{" << std::endl; + DumpTreeNode(node); + }} + return node; +}} + +void DumpFE() {{ + indent -= 4; + *mOs << indstr.substr(0, indent + 2) << "}}" << std::endl; +}} + +void DumpLB(const std::string& msg, unsigned size) {{ + *mOs << indstr.substr(0, indent) << msg << size << (size ? " [" : "") << std::endl; + indent += 4; +}} + +void DumpLE(unsigned size) {{ + indent -= 4; + if(size) + *mOs << indstr.substr(0, indent + 2) << "]" << std::endl; +}} +""".format(gen_args1=gen_args[1], gen_args2=gen_args[2]) +] # astdump_init + +handle_src_include_files(Initialization) +append(include_file, astdump_init) +handle_yaml(initial_yaml, gen_handler) +append(include_file, ['','public:']) +handle_yaml(initial_yaml, gen_enum_func) +gen_args[2] = "Dump" +gen_padding = "^ " +gen_call_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + ('Dump("' + padding_name(field_name) + ': ' + short_name(node_type) \ + + '*, " + (' + accessor + ' ? "NodeId=" + std::to_string(' + accessor \ + + '->GetNodeId()) : "null"s));\n' if field_name == "mParent" else \ + 'Dump("' + padding_name(field_name) + ': ' + short_name(node_type) + '*", ' + accessor + ');\n' \ + + prefixfuncname + short_name(node_type) + '(' + accessor + ');') if field_name != '' else '' +gen_call_nth_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'Dump(std::to_string(i + 1) + ": ' + short_name(node_type) + '*", ' + accessor + ');\n' \ + + "Ast" + gen_args[2] + short_name(node_type) + '(' + accessor + ');' +handle_yaml(treenode_yaml, gen_handler_ast_node) +handle_src_include_files(Finalization) + +################################################################################ +# # +# AstVisitor # +# # +################################################################################ + +def gen_setter(accessor): + return accessor.replace("Get", "Set").replace("()", "(n)").replace("(i)", "(i,n)") + +# The follwoing gen_func_* and gen_call* functions are for AstVisitor +gen_call_handle_values = lambda: False +gen_func_declaration = lambda dictionary, node_name: \ + 'virtual ' + node_name + '* ' + gen_args[2] + node_name + '(' + node_name + '* node);' +gen_func_definition = lambda dictionary, node_name: \ + node_name + '* ' + gen_args[1] + '::' + gen_args[2] + node_name + '(' + node_name \ + + '* node) {\nif(node != nullptr' + (' && !IsVisited(node)) {' \ + + '\nif(mTrace){std::cout << "Visiting ' + node_name + ', id=" << node->GetNodeId() << "..." << std::endl;}' \ + + '\nBaseTreeNode(node);' if node_name != 'TreeNode' else ') {') +gen_call_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + ('if(auto t = ' + accessor + ') {' + 'auto n = ' + gen_args[5] + node_type + '(t);' \ + + 'if(n != t){' + gen_setter(accessor) + ';}}' if field_name != "mParent" else '') \ + if field_name != '' else 'return ' + gen_args[5] + node_type + '(' + accessor + ');\n' +gen_call_children_node = lambda dictionary, node_name, field_name, node_type, accessor: '' +gen_call_children_node_end = lambda dictionary, node_name, field_name, node_type, accessor: '' +gen_call_nth_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'if(auto t = ' + accessor + ') { auto n = ' + gen_args[5] + node_type + '(t);' \ + + 'if(n != t) {' + gen_setter(accessor) + ';}}' +gen_func_definition_end = lambda dictionary, node_name: '}\nreturn node;\n}' + +# ------------------------------------------------------- +gen_args = [ + "gen_astvisitor", # Filename + "AstVisitor", # Class name + "Visit", # Prefix of function name + "", # Extra include directives + "", # Base class + "Visit", # In body + ] +astvisitor = gen_args[0] +astvisitorclass = gen_args[1] + +astvisitor_init = [ +""" +private: +bool mTrace; +BitVector mVisited; + +public: +{gen_args1}(bool t = false) : mTrace(t) {{}} + +TreeNode* {gen_args2}(TreeNode* node) {{ + mVisited.ClearAll(); + return {gen_args2}TreeNode(node); +}} + +virtual bool IsVisited(TreeNode* node) {{ + if(mVisited.GetBit(node->GetNodeId())) + return true; + mVisited.SetBit(node->GetNodeId()); + return false; +}} +""".format(gen_args1=gen_args[1], gen_args2=gen_args[2]) +] # astvisitor_init + +# Example to extract code pieces starting from initial_yaml +handle_src_include_files(Initialization) +append(include_file, astvisitor_init) +handle_yaml(initial_yaml, gen_handler) +gen_args[2] = "Base" +handle_yaml(treenode_yaml, gen_handler_ast_node) +handle_src_include_files(Finalization) + +################################################################################ +# # +# AstGraph # +# # +################################################################################ + +# The follwoing gen_func_* and gen_call* functions are for AstGraph +gen_func_declaration = lambda dictionary, node_name: \ + 'void ' + gen_args[2] + node_name + '(' + node_name + '* node);' +gen_func_definition = lambda dictionary, node_name: \ + 'void ' + gen_args[1] + '::' + gen_args[2] + node_name + '(' + node_name + '* node) {' \ + + '\nif(node != nullptr' + (' && PutNode(node)) {\nHandleTreeNode(node);' \ + if node_name != "TreeNode" else ') {') +gen_call_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'if(auto t = ' + accessor + ') {' + ('PutEdge(node, t, "' + field_name[1:] + \ + '", NK_' + node_type.replace('Node', '').replace('Tree', 'Null') + ');' \ + if field_name != '' else '') + gen_args[2] + node_type + '(t);}' +gen_call_nth_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'if(auto t = ' + accessor + ') { PutChildEdge(node, t, "' + field_name[1:] \ + + '", i, NK_' + node_type.replace('Node', '').replace('Tree', 'Null') \ + + '); ' + gen_args[2] + node_type + '(t);}' +gen_func_definition_end = lambda dictionary, node_name: '}\n}' + +# ------------------------------------------------------- +gen_args = [ + "gen_astgraph", # Filename + "AstGraph", # Class name + "DumpGraph", # Prefix of function name + """ +#include "stringpool.h" +#include "{astdump}.h" +#include +#include +#include +#include """.format(astdump = astdump), # Extra include directives + "", # Base class + ] + +astgraph_init = [ +""" +public: +{gen_args1}(TreeNode *m) : mRoot(m), mOs(nullptr) {{}} + +#define NodeName(n,s) ({astdumpclass}::GetEnumNodeKind((n)->GetKind()) + 3) << s << n->GetNodeId() +#define EnumVal(t,e,m) {astdumpclass}::GetEnum##e((static_cast(n))->Get##m()) +#define NodeColor(c) "\\",style=filled,color=white,fillcolor=\\""#c + +void {gen_args2}(const char *title, std::ostream *os) {{ + mNodes.clear(); + mOs = os; + mTitle = title; + *mOs << "digraph AST_Module {{\\nrankdir=LR;\\n"; + {gen_args2}TreeNode(mRoot); + *mOs << "}}\\n"; +}} + +bool PutNode(TreeNode *n) {{ + if(n && mNodes.find(n) == mNodes.end()) {{ + mNodes.insert(n); + *mOs << NodeName(n,\'_\') << " [label=\\"" << NodeName(n,',') << "\\\\n"; + std::string tid(EnumVal(TreeNode, TypeId, TypeId)); + if(tid != "TY_None" || n->GetTypeIdx() != 0) *mOs << "<" << tid << " " << n->GetTypeIdx() << ">\\\\n"; + + switch(n->GetKind()) {{ + case NK_Module: {{ + auto fn = static_cast(n)->GetFilename(); + if(auto p = std::strrchr(fn, '/')) + fn = p + 1; + *mOs << fn << "\\\\n" << mTitle << "\\",shape=\\"box"; break; + }} + case NK_Function: *mOs << (n->GetStrIdx() ? n->GetName() : "_anonymous_") << NodeColor(lightcoral); break; + case NK_Lambda: *mOs << NodeColor(pink); break; + case NK_Call: *mOs << NodeColor(burlywood); break; + case NK_Class: *mOs << (n->GetStrIdx() ? n->GetName() : ""); break; + case NK_Block: *mOs << NodeColor(lightcyan); break; + case NK_CondBranch: *mOs << NodeColor(lightblue); break; + case NK_Return: *mOs << NodeColor(tan); break; + case NK_Break: *mOs << NodeColor(peachpuff); break; + case NK_Continue: *mOs << NodeColor(paleturquoise); break; + case NK_SwitchCase: + case NK_SwitchLabel: + case NK_Switch: *mOs << NodeColor(powderblue); break; + case NK_ForLoop: *mOs << EnumVal(ForLoopNode, ForLoopProp, Prop); + case NK_WhileLoop: + case NK_DoLoop: *mOs << NodeColor(lightskyblue); break; + case NK_Identifier: *mOs << "\\\\\\"" << n->GetName() << "\\\\\\"" << NodeColor(wheat); break; + case NK_Decl: *mOs << EnumVal(DeclNode, DeclProp, Prop) << NodeColor(palegoldenrod); break; + case NK_PrimType: *mOs << EnumVal(PrimTypeNode, TypeId, PrimType) << NodeColor(lemonchiffon); break; + case NK_BinOperator: *mOs << EnumVal(BinOperatorNode, OprId, OprId); + case NK_TerOperator: + *mOs << NodeColor(palegreen); break; + case NK_UnaOperator: *mOs << EnumVal(UnaOperatorNode, OprId, OprId); + case NK_InstanceOf: + case NK_TypeOf: *mOs << NodeColor(lightgreen); break; + case NK_Literal: {{ + std::string s({astdumpclass}::GetEnumLitData(static_cast(n)->GetData())); + std::replace(s.begin(), s.end(), '"', ':'); + *mOs << s; + break; + }} + case NK_Pass: *mOs << NodeColor(darkgrey); break; + case NK_AsType: *mOs << NodeColor(bisque); break; + case NK_New: *mOs << NodeColor(khaki); break; + case NK_Try: *mOs << NodeColor(plum); break; + case NK_Catch: *mOs << NodeColor(thistle); break; + case NK_Finally: *mOs << NodeColor(thistle); break; + case NK_Throw: *mOs << NodeColor(plum); break; + case NK_Dimension: *mOs << static_cast(n)->GetDimensionsNum() << " dim(s)"; break; + case NK_UserType: *mOs << EnumVal(UserTypeNode, UT_Type, Type); break; + case NK_XXportAsPair: *mOs << (static_cast(n)->IsDefault() ? "default" : ""); + *mOs << (static_cast(n)->IsRef() ? " ref" : " copy"); break; + case NK_Struct: *mOs << EnumVal(StructNode, StructProp, Prop); *mOs << "\\\\n" << n->GetName(); break; + }} + if(n->IsStmt()) + *mOs << "\\",penwidth=2,color=\\"tomato"; + *mOs << "\\"];\\n"; + return true; + }} + return false; +}} + +void PutEdge(TreeNode *from, TreeNode *to, const char *field, NodeKind k) {{ + if(to) + *mOs << NodeName(from,\'_\') << " -> " << NodeName(to,\'_\') << "[label=" << field + << (to->GetParent() == from ? ",arrowhead=diamond" : "") << "];\\n"; +}} + +void PutChildEdge(TreeNode *from, TreeNode *to, const char *field, unsigned idx, NodeKind k) {{ + if(to) + *mOs << NodeName(from,\'_\') << " -> " << NodeName(to,\'_\') << "[label=\\"" << field + << "[" << idx << "]\\"" << (to->GetParent() == from ? ",arrowhead=diamond" : "") << "];\\n"; +}} + +void HandleTreeNode(TreeNode *node) {{ + if (auto t = node->GetLabel()) {{ + PutEdge(node, t, "Label", NK_Null); + DumpGraphTreeNode(t); + }} + for (unsigned i = 0; i < node->GetAsTypesNum(); ++i) + if (auto t = node->GetAsTypeAtIndex(i)) {{ + PutChildEdge(node, t, "AsTypes", i, NK_AsType); + DumpGraphAsTypeNode(t); + }} +}} + +private: +TreeNode *mRoot; +std::ostream *mOs; +std::set mNodes; +const char *mTitle; +""".format(gen_args1=gen_args[1], gen_args2=gen_args[2], astdumpclass=astdumpclass) +] # astgraph_init + +handle_src_include_files(Initialization) +append(include_file, astgraph_init) +handle_yaml(initial_yaml, gen_handler) +handle_src_include_files(Finalization) + +################################################################################ +# # +# Emitter # +# # +################################################################################ + +def get_data_based_on_type(val_type, accessor): + e = get_enum_type(val_type) + if e == "ASTScope *": + return e + ': " + "' + accessor + '");' + elif e != None: + return astdumpclass + '::GetEnum' + e + '(' + accessor + ')' + elif val_type == "LitData": + return astdumpclass + '::GetEnumLitData(' + accessor + ')' + elif val_type == "bool": + return 'std::to_string(' + accessor + ')' + elif val_type == 'unsigned int' or val_type == 'uint32_t' or val_type == 'uint64_t' \ + or val_type == 'unsigned' or val_type == 'int' or val_type == 'int32_t' or val_type == 'int64_t' : + return 'std::to_string(' + accessor + ')' + elif val_type == 'const char *': + return 'std::to_string(' + accessor + ' ? std::string("\\"") + ' + accessor + ' + "\\"" : "null")' + return 'Warning: failed to get value with ' + val_type + ", " + accessor + +def short_name(node_type): + return node_type.replace('class ', '').replace('maplefe::', '').replace(' *', '*') + +# The follwoing gen_func_* and gen_call* functions are for Emitter +gen_func_decl_location = lambda: False +gen_call_handle_values = lambda: True +gen_func_declaration = lambda dictionary, node_name: \ + "virtual std::string " + gen_args[2] + node_name + "(" + node_name + "* node);" +gen_func_definition = lambda dictionary, node_name: \ + "std::string " + gen_args[1] + "::" + gen_args[2] + node_name + "(" + node_name + "* node) {" \ + + 'if (node == nullptr) \nreturn std::string();' \ + + ('' if node_name == "TreeNode" else \ + 'std::string str;') +gen_call_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'if(auto n = ' + accessor + ') {str += " "s + ' + gen_args[2] + short_name(node_type) + '(n);}' \ + if field_name != '' else \ + 'return ' + gen_args[2] + short_name(node_type) + '(' + accessor + ');' +gen_call_child_value = lambda dictionary, node_name, field_name, val_type, accessor: \ + 'str += " "s + ' + get_data_based_on_type(val_type, accessor) + ';' +gen_call_nth_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'if(i)str+= ", "s; if(auto n = ' + accessor + ') {str += " "s + ' + gen_args[2] + short_name(node_type) + '(n);}' +gen_call_nth_child_value = lambda dictionary, node_name, field_name, val_type, accessor: \ + 'str += " "s + ' + get_data_based_on_type(val_type, accessor) + ';' +gen_func_definition_end = lambda dictionary, node_name: \ + 'mPrecedence = \'\\030\'; if(node->IsStmt()) str += ";\\n"s;' \ + + 'return str;}' if node_name != "TreeNode" else 'return std::string();}' + +# +gen_args = [ + "gen_emitter", # Filename + "Emitter", # Class name + "Emit", # Prefix of function name + """ +#include "ast_handler.h" +#include "{astdump}.h" +using namespace std::string_literals; +""".format(astdump = astdump), # Extra include directives + "" + "", # Base class + ] + +astemit_init = [ +""" +protected: +using Precedence = unsigned char; +Precedence mPrecedence; + +Module_Handler *mHandler; + +public: +{gen_args1}(Module_Handler *h) : mHandler(h) {{}} + +std::string {gen_args2}(const char *title); +std::string GetEnding(TreeNode *n); +std::string Clean(std::string &s); +std::string GetBaseFilename(); +std::string GetModuleName(const char *p = nullptr); + +ModuleNode *GetASTModule() {{ return mHandler->GetASTModule(); }} + +""".format(gen_args1=gen_args[1], gen_args2=gen_args[2]) +] # astemit_init + +if False: + handle_src_include_files(Initialization) + append(src_file, ['using namespace std::string_literals;']) + append(include_file, astemit_init) + handle_yaml(initial_yaml, gen_handler) + handle_src_include_files(Finalization) + +################################################################################ +# # +# AstStore # +# # +################################################################################ + +def get_data_based_on_type(val_type, accessor): + if val_type[-10:] == "ASTScope *" or val_type[-12:] == "ASTScopePool": + return '; // Skip ' + val_type + e = get_enum_type(val_type) + if e != None: + return 'WriteValue(static_cast(' + accessor + '));' + elif val_type == "bool": + return 'WriteValue(static_cast(' + accessor + '));' + elif val_type == 'unsigned int' or val_type == 'uint32_t' or val_type == 'uint64_t' \ + or val_type == 'unsigned' or val_type == 'int' or val_type == 'int32_t' or val_type == 'int64_t' : + return ('AddStrIdx(' + accessor + ');' if accessor.find("GetStrIdx()") >= 0 else '') \ + + 'WriteValue(static_cast(' + accessor + '));' + elif val_type == "LitData": + return 'if(' + accessor + '.mType == LT_StringLiteral) AddStrIdx(' + accessor + '.mData.mStrIdx);' \ + + 'WriteValue(static_cast(' + accessor + '.mType));' \ + + 'WriteValue(' + accessor + '.mData.mInt64);' + elif val_type == 'const char *': + return 'WriteString(' + accessor + ');' + elif val_type == 'RegExprData': + return 'WriteString(' + accessor + '.mExpr);\nWriteString(' + accessor + '.mFlags);' + return 'Failed to get value with ' + val_type + ", " + accessor + ';' + +def short_name(node_type): + return node_type.replace('class ', '').replace('maplefe::', '').replace(' *', '*') + +# The follwoing gen_func_* and gen_call* functions are for AstStore +# +gen_func_decl_location = lambda: False +gen_call_handle_values = lambda: True +gen_func_declaration = lambda dictionary, node_name: \ + "void " + gen_args[2] + node_name + "(" + node_name + "* node);" +gen_func_definition = lambda dictionary, node_name: \ + "void " + gen_args[1] + "::" + gen_args[2] + node_name + "(" + node_name + "* node) {" \ + + ('' if node_name == "TreeNode" else 'WriteNode(node);') +gen_call_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'WriteAddress(' + accessor + '); // ' + field_name + ': ' + node_type if field_name != '' else \ + gen_args[2] + short_name(node_type) + '(' + accessor + ');' +gen_call_child_value = lambda dictionary, node_name, field_name, val_type, accessor: \ + get_data_based_on_type(val_type, accessor) + ' // ' + field_name + ': ' + val_type +gen_call_children_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'WriteLength(' + accessor + '); // ' + field_name + ': ' + node_type +gen_call_nth_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + 'WriteAddress(' + accessor + '); // ' + field_name + ': ' + node_type +gen_call_nth_child_value = lambda dictionary, node_name, field_name, val_type, accessor: \ + get_data_based_on_type(val_type, accessor) + ' // ' + field_name + ': ' + val_type +gen_func_definition_end = lambda dictionary, node_name: '}' +# +gen_args = [ + "gen_aststore", # Filename + "AstStore", # Class name + "Store", # Prefix of function name + """ +#include "stringpool.h" +#include "{astvisitor}.h" +#include +#include +#include +namespace maplefe {{ +using AstBuffer = std::vector; +using AstNodeVec = std::vector; +}} +""".format(astvisitor=astvisitor), + ": public " + astvisitorclass, # Base class + ] + +#tag_control = 'true' +tag_control = "tag != 'A' && tag != 'V'" + +aststore_init = [ +""" +private: +ModuleNode *mASTModule; +AstBuffer mAstBuf {{'M', 'P'}}; +AstBuffer *mBufPtr; +std::set mStrIdxSet; + +public: +{gen_args1}(ModuleNode *m) : mASTModule(m) {{}} + +AstBuffer& GetAstBuf() {{return mAstBuf;}} + +bool {gen_args2}InAstBuf() {{ + AstBuffer node_buf; + mAstBuf.erase(mAstBuf.begin() + 2, mAstBuf.end()); + mBufPtr = &mAstBuf; + WriteNum('L', {signature}LL); + mAstBuf.reserve(32768); // For performance + node_buf.reserve(32768); + mBufPtr = &node_buf; + VisitTreeNode(mASTModule); + mBufPtr = &mAstBuf; + WriteStrIdxTable(); + mAstBuf.insert(mAstBuf.end(), node_buf.begin(), node_buf.end()); + mStrIdxSet.clear(); + return true; +}} + +bool IsVisited(TreeNode* node) {{ + if({astvisitorclass}::IsVisited(node)) + return true; + {gen_args2}TreeNode(node); + return false; +}} + +// Tags: +// 'N': Beginning of a tree node +// 'A': address of a child tree node +// 'V': value of a field in a tree node +// 'L': list/vector of chrildren in a tree node +// 'S': char string of a field in a tree node +// 'T': StrIdx Table +// The initial version will keep all tags, and some of them can be optimized out + +// LEB128, same as for MapleIR +void WriteNum(uint8_t tag, int64_t x) {{ + if({tag_control}) + mBufPtr->push_back(tag); + while (x < -0x40 || x >= 0x40) {{ + mBufPtr->push_back(static_cast((static_cast(x) & 0x7F) + 0x80)); + x = x >> 7; + }} + mBufPtr->push_back(static_cast(static_cast(x) & 0x7F)); +}} + +void WriteNode(TreeNode *node) {{ + AstBuffer *tmp = mBufPtr; + mBufPtr = &mAstBuf; + WriteNum('N', static_cast(node->GetKind())); + WriteNum('V', static_cast(node->GetNodeId())); + mBufPtr = tmp; + //WriteNum('N', static_cast(node->GetKind())); + WriteTreeNode(node); // Base TreeNode +}} + +void WriteAddress(TreeNode *node) {{ + if(node) + WriteNum('A', static_cast(node->GetNodeId())); + else + WriteNum('A', -1); +}} + +void WriteValue(int64_t val) {{ + WriteNum('V', val); +}} + +void WriteLength(unsigned len) {{ + WriteNum('L', static_cast(len)); +}} + +void WriteString(const char *str) {{ + if(const char *p = str) {{ + WriteNum('S', static_cast(std::strlen(p) + 1)); + do {{ + mBufPtr->push_back(static_cast(*p)); + }} while(*p++); + }} else + WriteNum('S', 0); +}} + +void WriteStrIdxTable() {{ + WriteNum('T', static_cast(mStrIdxSet.size())); + for(auto s: mStrIdxSet) {{ + WriteValue(s); + WriteString(gStringPool.GetStringFromStrIdx(s)); + }} +}} + +void AddStrIdx(unsigned idx) {{ + if(idx) + mStrIdxSet.insert(idx); +}} + +""".format(signature=signature, gen_args1=gen_args[1], gen_args2=gen_args[2], \ + astvisitorclass=astvisitorclass, tag_control=tag_control) +] # aststore_init + +handle_src_include_files(Initialization) +append(src_file, ['using namespace std::string_literals;']) +append(include_file, aststore_init) +handle_yaml(initial_yaml, gen_handler) +gen_args[2] = "Write" +handle_yaml(treenode_yaml, gen_handler_ast_node) +handle_src_include_files(Finalization) + +################################################################################ +# # +# AstLoad # +# # +################################################################################ + +def gen_setter(accessor): + return accessor.replace("Get", "Set").replace("()", "(n)").replace("(i)", "(i,n)").replace("Is", "SetIs") + +def gen_add_setter(accessor): + return accessor.replace("Get", "Add").replace("AtIndex", "").replace("(i)", "(n)") + +def short_name(node_type): + return node_type.replace('class ', '').replace('maplefe::', '').replace(' *', '*') + +def set_data_based_on_type(val_type, accessor, setter): + if val_type[-10:] == "ASTScope *" or val_type[-12:] == "ASTScopePool": + return '; /* Skip ' + val_type + ' */' + e = get_enum_type(val_type) + if e != None: + return e + ' n = static_cast<' + e + '>(ReadValue());' + setter(accessor) + ';' + elif val_type == "bool": + return val_type + ' n = static_cast<' + val_type + '>(ReadValue());' + setter(accessor) + ';' + elif val_type == 'unsigned int' or val_type == 'uint32_t' or val_type == 'uint64_t' \ + or val_type == 'unsigned' or val_type == 'int' or val_type == 'int32_t' or val_type == 'int64_t' : + return (val_type + ' n = static_cast<' + val_type + '>(ReadValue());' if accessor.find("GetStrIdx()") < 0 \ + else val_type + ' n = static_cast<' + val_type + '>(mStrMap[ReadValue()]);') \ + + (setter(accessor) + ';' if accessor.find("GetNodeId()") < 0 else '/* ' + setter(accessor) + '; */') + elif val_type == "LitData": + return val_type + ' n; n.mType = static_cast(ReadValue());if(n.mType == LT_StringLiteral)' \ + + 'n.mData.mInt64 = mStrMap[ReadValue()]; else n.mData.mInt64 = ReadValue();' + setter(accessor) + ';' + elif val_type == 'const char *': + return val_type + ' n = ReadString();' + setter(accessor) + ';' + elif val_type == 'RegExprData': + return val_type + ' n; n.mExpr = ReadString(); n.mFlags = ReadString();' + setter(accessor) + ';' + return 'Failed to get value with ' + val_type + ", " + accessor + ';' + +# The follwoing gen_func_* and gen_call* functions are for AstLoad +# +gen_children_num = lambda pl: 'num' +gen_func_decl_location = lambda: False +gen_call_handle_values = lambda: True +gen_func_declaration = lambda dictionary, node_name: \ + "void " + gen_args[2] + node_name + '(' + node_name + ' *node);' +gen_func_definition = lambda dictionary, node_name: \ + "void " + gen_args[1] + "::" + gen_args[2] + node_name + '(' + node_name + ' *node) {\n' \ + + ('' if node_name == "TreeNode" else 'InitTreeNode(node);') +gen_call_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + '{' + node_type + '* n = static_cast<' + node_type + '*>(ReadAddress()); ' + gen_setter(accessor) \ + + '; } // ' + field_name + ': ' + node_type \ + if field_name != '' else gen_args[2] + short_name(node_type) + '(static_cast<' + short_name(node_name) + '*>(node));' +gen_call_child_value = lambda dictionary, node_name, field_name, val_type, accessor: \ + '{' + set_data_based_on_type(val_type, accessor, gen_setter) + '} // ' + field_name + ': ' + val_type +gen_call_children_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + '{unsigned num = ReadLength(); // ' + field_name + ': ' + node_type +gen_call_nth_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + node_type + '* n = static_cast<' + node_type + '*>(ReadAddress()); ' + gen_add_setter(accessor) \ + + '; // ' + field_name + ': ' + node_type +gen_call_nth_child_value = lambda dictionary, node_name, field_name, val_type, accessor: \ + '{' + set_data_based_on_type(val_type, accessor, gen_add_setter) + '} // ' + field_name + ': ' + val_type +gen_call_children_node_end = lambda dictionary, node_name, field_name, node_type, accessor: '}' +gen_func_definition_end = lambda dictionary, node_name: 'return;}' +# +gen_args = [ + "gen_astload", # Filename + "AstLoad", # Class name + "Load", # Prefix of function name + """ +#include "stringpool.h" +#include "ast_mempool.h" +#include "{astvisitor}.h" +#include +#include +#include +#include +namespace maplefe {{ +using AstBuffer = std::vector; +using AstBufIter = std::vector::iterator; +using AstNodeVec = std::vector; +using AstNodeMap = std::unordered_map; +using AstStrMap = std::unordered_map; +}} +""".format(astvisitor=astvisitor), + "" # Base class + ] + +astload_init = [ +""" +private: +AstBufIter it; +AstBufIter end; +AstStrMap mStrMap; // key: previous str id, val: new str id +AstNodeMap mNodeMap; // key: previous node id, val: TreeNode* + +public: +ModuleNode *{gen_args2}FromAstBuf(AstBuffer &buf) {{ + it = buf.begin(); + end = buf.end(); + return Next(); +}} + +ModuleNode *Next() {{ + if(it == end) + return nullptr; + bool check = *it++ == 'M'; + check &= *it++ == 'P'; + if(!check) {{ + std::cerr << "Error: Unknown file type." << std::endl; + return nullptr; + }} + int64_t sig = ReadNum('L'); + if(sig != {signature}LL) {{ + std::cerr << "Error: Unknown signature " << sig << ". Expected {signature}." << std::endl; + return nullptr; + }} + AstNodeVec node_vec; + while(*it != 'T') + node_vec.push_back(CreateNode()); + ModuleNode *module = static_cast(node_vec.front()); + ReadStrIdxTable(); + for(auto iter = node_vec.begin(); iter != node_vec.end(); ++iter) + ReadNode(*iter); + mStrMap.clear(); + mNodeMap.clear(); + return module; +}} + +TreeNode *CreateNode() {{ + NodeKind k = static_cast(ReadNum('N')); + unsigned id = static_cast(ReadNum('V')); + TreeNode *node = CreateTreeNode(k); + mNodeMap[id] = node; + return node; +}} + +// LEB128, same as for MapleIR +int64_t ReadNum(uint8_t tag) {{ + if({tag_control}) {{ + bool check = tag == *it++; + MASSERT(check); + }} + uint64_t n = 0; + int64_t y = 0; + uint64_t b = static_cast(*it++); + while (b >= 0x80) {{ + y += ((b - 0x80) << n); + n += 7; + b = static_cast(*it++); + }} + b = (b & 0x3F) - (b & 0x40); + return y + (b << n); +}} + +void ReadNode(TreeNode *node) {{ + //NodeKind k = static_cast(ReadNum('N')); + //MASSERT(k == node->GetKind()); + LoadTreeNode(node); +}} + +TreeNode *ReadAddress() {{ + int64_t n = ReadNum('A'); + return n != -1 ? mNodeMap[static_cast(n)] : nullptr; +}} + +int64_t ReadValue() {{ + return ReadNum('V'); +}} + +int64_t ReadLength() {{ + return ReadNum('L'); +}} + +const char *ReadString() {{ + int64_t len = ReadNum('S'); + if(len) {{ + const char *res = gStringPool.FindString(reinterpret_cast(&(*it))); + it += len; + return res; + }} else + return nullptr; +}} + +void ReadStrIdxTable() {{ + int64_t num = ReadNum('T'); + for(int64_t i = 0; i < num; ++i) {{ + unsigned id = static_cast(ReadValue()); + const char *s = ReadString(); + unsigned nid = gStringPool.GetStrIdx(s); + mStrMap[id] = nid; + }} +}} + +""".format(signature=signature, gen_args1=gen_args[1], gen_args2=gen_args[2], \ + astvisitorclass=astvisitorclass, tag_control=tag_control) +] # astload_init + +handle_src_include_files(Initialization) +append(src_file, ['using namespace std::string_literals;']) +append(include_file, astload_init) +handle_yaml(initial_yaml, gen_handler) +gen_args[2] = "Init" +gen_func_declaration = lambda dictionary, node_name: \ + 'void ' + gen_args[2] + node_name + '(TreeNode *node);' +gen_func_definition = lambda dictionary, node_name: \ + 'void ' + gen_args[1] + "::" + gen_args[2] + node_name + '(TreeNode *node) {' +gen_func_definition_end = lambda dictionary, node_name: '}' +handle_yaml(treenode_yaml, gen_handler_ast_node) +gen_args[2] = "Create" +gen_switch_expr = lambda: 'k' +gen_children_num = lambda pl: None +gen_call_handle_values = lambda: False +gen_func_declaration = lambda dictionary, node_name: \ + node_name + "* " + gen_args[2] + node_name + "(NodeKind k);" +gen_func_definition = lambda dictionary, node_name: \ + node_name + "* " + gen_args[1] + "::" + gen_args[2] + node_name + "(NodeKind k) {\n" \ + + (node_name + ' *node;' if node_name == "TreeNode" else \ + node_name + ' *node = new (gTreePool.NewTreeNode(sizeof(' + node_name + '))) ' + node_name + '();') +gen_call_child_node = lambda dictionary, node_name, field_name, node_type, accessor: \ + '' if field_name != '' else 'node = ' + gen_args[2] + short_name(node_type) + '(k);' +gen_call_children_node = lambda dictionary, node_name, field_name, node_type, accessor: '' +gen_call_nth_child_node = lambda dictionary, node_name, field_name, node_type, accessor: '' +gen_call_children_node_end = lambda dictionary, node_name, field_name, node_type, accessor: '' +gen_func_definition_end = lambda dictionary, node_name: 'return node;}' +handle_yaml(initial_yaml, gen_handler) +handle_src_include_files(Finalization) diff --git a/src/MapleFE/scripts/perf-java.sh b/src/MapleFE/scripts/perf-java.sh new file mode 100755 index 0000000000000000000000000000000000000000..b838fd3675cf0b76f7ba0a6bc9aee3d477e59d70 --- /dev/null +++ b/src/MapleFE/scripts/perf-java.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# This script is to measure the runtime performance of java2ast and other executables +# If perf is not installed yet, please install the package linux-tools-common with all dependencies +TS2AST=$(dirname $0)/../output/java/bin/java2ast +CMD="sudo perf record -e cpu-cycles,cache-misses --call-graph fp -F 10000 -o perf.data" +if [ $# -eq 0 ]; then + echo "Usage: $0 " + echo " $0 " + exit 1 +elif [ $# -eq 1 -a "$(basename $1)" != "$(basename $1 .ts)" ]; then + echo $CMD $TS2AST "$@" + $CMD $TS2AST "$@" +else + echo $CMD "$@" + $CMD "$@" +fi +echo sudo perf report +sudo perf report diff --git a/src/MapleFE/scripts/perf.sh b/src/MapleFE/scripts/perf.sh new file mode 100755 index 0000000000000000000000000000000000000000..7d3c9bbbd695f5b0d3896bd344bc96d72839da3b --- /dev/null +++ b/src/MapleFE/scripts/perf.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# This script is to measure the runtime performance of ts2ast and other executables +# If perf is not installed yet, please install the package linux-tools-common with all dependencies +TS2AST=$(dirname $0)/../output/typescript/bin/ts2ast +CMD="sudo perf record -e cpu-cycles,cache-misses --call-graph fp -F 10000 -o perf.data" +if [ $# -eq 0 ]; then + echo "Usage: $0 " + echo " $0 " + exit 1 +elif [ $# -eq 1 -a "$(basename $1)" != "$(basename $1 .ts)" ]; then + echo $CMD $TS2AST "$@" + $CMD $TS2AST "$@" +else + echo $CMD "$@" + $CMD "$@" +fi +echo sudo perf report +sudo perf report diff --git a/src/MapleFE/shared/Makefile b/src/MapleFE/shared/Makefile index 23e7e196bb2a0b8fc522c57cf1eff8239f2bbef3..a36bf4408e58eba9d2274287221a13d0f1b632a0 100644 --- a/src/MapleFE/shared/Makefile +++ b/src/MapleFE/shared/Makefile @@ -1,10 +1,24 @@ +# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + include ../Makefile.in all: $(MAKE) -C src clean: - rm -rf $(MAPLEFE_ROOT)/$(BUILDDIR)/shared + rm -rf $(BUILDDIR)/shared test: $(MAKE) -C ../test p diff --git a/src/MapleFE/shared/include/appnode_pool.h b/src/MapleFE/shared/include/appnode_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..70ed2959f21e195c79841fe5e62039dd866d3ad9 --- /dev/null +++ b/src/MapleFE/shared/include/appnode_pool.h @@ -0,0 +1,49 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +////////////////////////////////////////////////////////////////////////////// +// This file contains the Memory Pool for AppealNode which are dynamically // +// allocated. // +////////////////////////////////////////////////////////////////////////////// + +#ifndef __APPNODE_POOL_H__ +#define __APPNODE_POOL_H__ + +#include "mempool.h" + +namespace maplefe { + +class AppealNode; + +// this Pool contains two types of dynamic memory. +// (1) Those managed by mMP. This is where all the AppealNode come from. +// (2) Those managed by some containers in some AppealNode. For example, the +// SmallVector of children nodes. These are maintained by containers. +// +class AppealNodePool { +private: + MemPool mMP; +public: + AppealNodePool(){} + ~AppealNodePool() {mMP.Release();} + + void SetBlockSize(unsigned s) {mMP.SetBlockSize(s);} + AppealNode* NewAppealNode(); + + // Clear all data, keep the memory + void Clear() {mMP.Clear();} +}; + +} +#endif diff --git a/src/MapleFE/shared/include/ast.h b/src/MapleFE/shared/include/ast.h index 43c2c003e0633608e04028c107833e4a391f670d..347f8d443493ec5bb3bf048c33a8f7d29e0328a6 100644 --- a/src/MapleFE/shared/include/ast.h +++ b/src/MapleFE/shared/include/ast.h @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -46,6 +46,7 @@ // A Function node have its arguments as children node. The return value is not counted. // +#include "stringpool.h" #include "ast_mempool.h" #include "container.h" @@ -54,6 +55,8 @@ namespace maplefe { +#define SETPARENT(n) if(n && !n->IsPrimType()) n->SetParent(this) + enum NodeKind { #undef NODEKIND #define NODEKIND(K) NK_##K, @@ -69,36 +72,102 @@ enum NodeKind { // if needed in order to invoke the derived class destructor. class AnnotationNode; +class AsTypeNode; +class IdentifierNode; +class FunctionNode; +class UserTypeNode; +class ComputedNameNode; +class ASTScope; + class TreeNode { protected: NodeKind mKind; + unsigned mNodeId; TreeNode *mParent; TreeNode *mLabel; // label of a statement, or expression. - const char *mName; + unsigned mStrIdx; + unsigned mTypeIdx; // typetable index + TypeId mTypeId; // typeId of the node + ASTScope *mScope; + + bool mIsStmt; // if a node is a statement + bool mIsOptional; // if a node is optionally existing during runtime. + // This design is first coming from Javascript. + bool mIsNonNull; // if a node is asserted to be non-null. + // This design is first coming from Typescript. + bool mIsRest; // A spread or rest syntax in Javascript. + bool mIsConst; // A constant node. Readonly. + + // This is a feature coming from TypeScript. Almost every expression in Typescript has + // this information. So it's here. + SmallVector mAsTypes; + public: - TreeNode() {mKind = NK_Null; mLabel = NULL; mParent = NULL; mName = NULL;} + TreeNode(NodeKind k, unsigned i) + : mKind(k), mLabel(nullptr), mParent(nullptr), mStrIdx(i), mIsStmt(false), mTypeId(TY_None), mTypeIdx(0), + mScope(nullptr), mIsOptional(false), mIsNonNull(false), mIsRest(false), mIsConst(false) {} + TreeNode(NodeKind k) : TreeNode(k, 0) {} + //TreeNode() : TreeNode(NK_Null, 0) {} virtual ~TreeNode() {} #undef NODEKIND #define NODEKIND(K) bool Is##K() const {return mKind == NK_##K;} #include "ast_nk.def" - bool IsScope() {return IsBlock() || IsClass() || IsFunction() || IsInterface();} +#undef TYPE +#undef PRIMTYPE +#define TYPE(K) bool IsTypeId##K() const {return mTypeId == TY_##K;} +#define PRIMTYPE(K) bool IsTypeId##K() const {return mTypeId == TY_##K;} +#include "supported_types.def" + + bool IsScope() {return IsBlock() || IsClass() || IsFunction() || IsInterface() || IsModule();} bool TypeEquivalent(TreeNode*); - NodeKind GetKind() {return mKind;} + void SetKind(NodeKind k) {} // Not allowed to change its kind + void SetNodeId(unsigned id) {mNodeId = id;} void SetParent(TreeNode *p) {mParent = p;} - void SetLabel (TreeNode *p) {mLabel = p;} + void SetLabel (TreeNode *p) {mLabel = p; SETPARENT(p);} + void SetTypeId(TypeId id) {mTypeId = id;} + void SetTypeIdx(unsigned id) {mTypeIdx = id;} + void SetScope(ASTScope *s) {mScope = s;} + + NodeKind GetKind() {return mKind;} + unsigned GetNodeId() {return mNodeId;} TreeNode* GetParent() {return mParent;} TreeNode* GetLabel() {return mLabel;} - - virtual const char* GetName() {return mName;} - virtual void SetName(const char *s) {mName = s;} + TypeId GetTypeId() {return mTypeId;} + unsigned GetTypeIdx() {return mTypeIdx;} + ASTScope *GetScope() {return mScope;} + + bool IsStmt() {return mIsStmt;} + void SetIsStmt(bool b = true) {mIsStmt = b;} + bool IsOptional() {return mIsOptional;} + void SetIsOptional(bool b = true){mIsOptional = b;} + bool IsNonNull() {return mIsNonNull;} + void SetIsNonNull(bool b = true) {mIsNonNull = b;} + bool IsRest() {return mIsRest;} + void SetIsRest(bool b = true) {mIsRest = b;} + bool IsConst() {return mIsConst;} + void SetIsConst(bool b = true) {mIsConst = b;} + bool IsThis() {return mStrIdx == gStringPool.GetStrIdx("this");} + + virtual unsigned GetStrIdx() {return mStrIdx;} + virtual void SetStrIdx(unsigned id) {mStrIdx = id;} + virtual const char *GetName() {return gStringPool.GetStringFromStrIdx(mStrIdx);} + virtual void SetStrIdx(std::string str) {mStrIdx = gStringPool.GetStrIdx(str);} virtual void ReplaceChild(TreeNode *oldchild, TreeNode *newchild){} virtual void AddAttr(AttrId) {} virtual void AddAnnotation(AnnotationNode *n){} - virtual void Dump(unsigned){} + // AsType related + unsigned GetAsTypesNum() {return mAsTypes.GetNum();} + void AddAsType(AsTypeNode *n) {mAsTypes.PushBack(n);} + void AddAsTypes(TreeNode *n); + AsTypeNode* GetAsTypeAtIndex(unsigned i) {return mAsTypes.ValueAtIndex(i);} + void SetAsTypeAtIndex(unsigned i, AsTypeNode* n) {*(mAsTypes.RefAtIndex(i)) = n;} + + virtual void Dump(unsigned){} + virtual void D(){Dump(0); std::cout << std::endl;} void DumpIndentation(unsigned); void DumpLabel(unsigned); @@ -112,15 +181,171 @@ public: class PackageNode : public TreeNode { private: + TreeNode *mPackage; public: - PackageNode(){mKind = NK_Package;} - PackageNode(const char *s) {mKind = NK_Package; mName = s;} + PackageNode() : TreeNode(NK_Package) {} + PackageNode(unsigned id) : TreeNode(NK_Package, id) {} ~PackageNode() {} - void SetName(const char *s) {mName = s;} + TreeNode* GetPackage() {return mPackage;} + void SetPackage(TreeNode *t) {mPackage = t;} + + void SetName(unsigned id) {mStrIdx = id;} + void Dump(unsigned indent); +}; + +////////////////////////////////////////////////////////////////////////// +// XXportAsPair Node +// In JS, the import or export support: xxport {x as y} +// Its kind of like a mapping of internal name to extern name. +// +// Regarding 'default' and 'everything' (*), there are some rules of saving +// in the XXportAsPairNode. First, let's look at the variaties of combinations +// of default and *. +// +// 1) import * as x +// mIsEverything = true +// mAfter = x +// 2) import default as x +// 3) export x as default +// 4) export * +// mIsEverything = true +// mAfter = nullptr, mBefore = nullptr +// 5) export default declaration +// 6) export = x // This exports a single object. +// mIsSingle = true +// mBefore = x +// 7) import x = require("y") // The counterpart of export = xxx +// mIsSingle = true; +// mBefore = y +// mAfter = x +// For all these cases, we set mIsDefault or mIsEverything, and save the +// possible 'x' to mBefore. +////////////////////////////////////////////////////////////////////////// + +class XXportAsPairNode : public TreeNode { +private: + bool mIsDefault; // import or export 'default' + bool mIsEverything; // import or export '*', which is everything + bool mIsSingle; // export = xxx + bool mIsRef; // export is reference, otherwise is copy + bool mAsNamespace; // export as namespace xxx; + TreeNode *mBefore; // In usual cases, name before 'as' + TreeNode *mAfter; // In usual cases, name after 'as' + +public: + XXportAsPairNode() : TreeNode(NK_XXportAsPair), + mIsDefault(false), mIsEverything(false), mIsSingle(false), mIsRef(true), mAsNamespace(false), + mBefore(nullptr), mAfter(nullptr) {} + ~XXportAsPairNode() {} + + bool IsDefault() {return mIsDefault;} + void SetIsDefault(bool b = true) {mIsDefault = b;} + + bool IsEverything() {return mIsEverything;} + void SetIsEverything(bool b = true) {mIsEverything = b;} + + bool IsSingle() {return mIsSingle;} + void SetIsSingle(bool b = true) {mIsSingle = b;} + + bool IsRef() {return mIsRef;} + void SetIsRef(bool b = true) {mIsRef = b;} + + bool GetAsNamespace() {return mAsNamespace;} + void SetAsNamespace(bool b = true) {mAsNamespace = b;} + + TreeNode* GetBefore() {return mBefore;} + void SetBefore(TreeNode *t) {mBefore = t; SETPARENT(t);} + + TreeNode* GetAfter() {return mAfter;} + void SetAfter(TreeNode *t) {mAfter = t; SETPARENT(t);} + + void Dump(unsigned indent); +}; + +////////////////////////////////////////////////////////////////////////// +// Declare Nodes +// C/C++ extern decl, +// Typescript declare. +// +// A declare node could declare more than one declarations, like +// declare global { +// interface a {} +// var b; +// .. +// } +////////////////////////////////////////////////////////////////////////// + +class DeclareNode : public TreeNode { +private: + SmallVector mDecls; + SmallVector mAttrs; + bool mIsGlobal;// +public: + DeclareNode() : TreeNode(NK_Declare), mIsGlobal(false) {} + ~DeclareNode(){mAttrs.Release(); mDecls.Release();} + + bool IsGlobal() {return mIsGlobal;} + void SetIsGlobal(bool b = true) {mIsGlobal = b;} + + // Attributes related + unsigned GetAttrsNum() const {return mAttrs.GetNum();} + void AddAttr(AttrId a) {mAttrs.PushBack(a);} + AttrId GetAttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetAttrAtIndex(unsigned i, AttrId n) {*(mAttrs.RefAtIndex(i)) = n;} + + // Declares + unsigned GetDeclsNum() const {return mDecls.GetNum();} + void AddDecl(TreeNode*); + TreeNode* GetDeclAtIndex(unsigned i) {return mDecls.ValueAtIndex(i);} + void SetDeclAtIndex(unsigned i, TreeNode *n) {*(mDecls.RefAtIndex(i)) = n;} + + void Dump(unsigned indent); +}; + +////////////////////////////////////////////////////////////////////////// +// Export Nodes +// export first comes from Javascript. +// +// If Export only exports a decl or a statement, it will be saved in +// mPairs as the only pair. This is the same in ImportNode. +////////////////////////////////////////////////////////////////////////// + +class ExportNode : public TreeNode { +private: + TreeNode *mTarget; // the exported package in Java or module in JS + SmallVector mPairs; + SmallVector mAnnotations; //annotation or pragma + bool mIsExportType; + +public: + ExportNode() : TreeNode(NK_Export), mTarget(nullptr), mIsExportType(false) {} + ~ExportNode(){} + + void SetTarget(TreeNode *t) {mTarget = t; SETPARENT(t);} + TreeNode* GetTarget() {return mTarget;} + + bool IsExportType() {return mIsExportType;} + void SetIsExportType(bool b = true) {mIsExportType = b;} + + unsigned GetPairsNum() {return mPairs.GetNum();} + XXportAsPairNode* GetPair(unsigned i) {return mPairs.ValueAtIndex(i);} + void SetPair(unsigned i, XXportAsPairNode* n) {*(mPairs.RefAtIndex(i)) = n; SETPARENT(n);} + void AddPair(TreeNode *p); + void AddDefaultPair(TreeNode *p); + void AddSinglePair(TreeNode *before, TreeNode *after); + + // Annotation/Pragma related + unsigned GetAnnotationsNum() {return mAnnotations.GetNum();} + void AddAnnotation(AnnotationNode *n) {mAnnotations.PushBack(n);} + AnnotationNode* GetAnnotationAtIndex(unsigned i) {return mAnnotations.ValueAtIndex(i);} + void SetAnnotationAtIndex(unsigned i, AnnotationNode* n) {*(mAnnotations.RefAtIndex(i)) = n;} + void ClearAnnotation() {mAnnotations.Clear();} + void Dump(unsigned indent); }; + ////////////////////////////////////////////////////////////////////////// // Import Node // Java import, c/c++ include, are the same scenarios. We just save the @@ -129,7 +354,9 @@ public: // We also borrow the idea of system directory vs. local directory from c/c++. ////////////////////////////////////////////////////////////////////////// +// The property is useful in Java right now. Javascript use the XXportAsPair. enum ImportProperty { + ImpNone = 0, ImpType = 1, // Java like, import type ImpStatic = 1 << 1, // Java like, import static field. // If we don't specify the data type of imported, it's @@ -141,15 +368,33 @@ enum ImportProperty { ImpSystem = 1 << 5 }; +inline ImportProperty& operator|=(ImportProperty& t, ImportProperty p) { + return t = static_cast(static_cast(t) | static_cast(p)); +} + +inline ImportProperty operator&(ImportProperty p, ImportProperty q) { + return static_cast(static_cast(p) & static_cast(q)); +} + class ImportNode : public TreeNode { private: - unsigned mProperty; + ImportProperty mProperty; + + // Solely for javascript right now. + // In many languages, mPairs could be empty. In such case it import the whole module. + SmallVector mPairs; + + // the imported target, a package in Java, or a module in JS + TreeNode *mTarget; + public: - ImportNode() {mName = NULL; mProperty = 0; mKind = NK_Import;} - ~ImportNode(){} + ImportNode() : TreeNode(NK_Import), mProperty(ImpNone), mTarget(nullptr) {} + ~ImportNode(){mPairs.Release();} - void SetName(const char *s) {mName = s;} - const char* GetName() {return mName;} + void SetProperty(ImportProperty p) {mProperty = p;} + ImportProperty GetProperty() {return mProperty;} + void SetTarget(TreeNode *n) {mTarget = n; SETPARENT(n);} + TreeNode* GetTarget() {return mTarget;} void SetImportType() {mProperty |= ImpType;} void SetImportStatic() {mProperty |= ImpStatic;} @@ -164,6 +409,13 @@ public: bool IsImportLocal() {return mProperty & ImpLocal;} bool IsImportSystem() {return mProperty & ImpSystem;} + unsigned GetPairsNum() {return mPairs.GetNum();} + XXportAsPairNode* GetPair(unsigned i) {return mPairs.ValueAtIndex(i);} + void SetPair(unsigned i, XXportAsPairNode* n) {*(mPairs.RefAtIndex(i)) = n;} + void AddPair(TreeNode *p); + void AddDefaultPair(TreeNode *p); + void AddSinglePair(TreeNode *before, TreeNode *after); + void Dump(unsigned indent); }; @@ -195,13 +447,13 @@ private: OprId mOprId; TreeNode *mOpnd; public: - UnaOperatorNode(OprId id) : mOprId(id), mOpnd(NULL), mIsPost(false) - {mKind = NK_UnaOperator;} - UnaOperatorNode() : mOpnd(NULL), mIsPost(false) {mKind = NK_UnaOperator;} + UnaOperatorNode(OprId id) : TreeNode(NK_UnaOperator), + mOprId(id), mOpnd(nullptr), mIsPost(false) {} + UnaOperatorNode() : UnaOperatorNode(OPR_NA) {} ~UnaOperatorNode() {} void SetIsPost(bool b) {mIsPost = b;} - void SetOpnd(TreeNode* t) {mOpnd = t; t->SetParent(this);} + void SetOpnd(TreeNode* t) {mOpnd = t; SETPARENT(t);} void SetOprId(OprId o) {mOprId = o;} bool IsPost() {return mIsPost;} @@ -214,30 +466,143 @@ public: }; class BinOperatorNode : public TreeNode { -public: +private: OprId mOprId; TreeNode *mOpndA; TreeNode *mOpndB; public: - BinOperatorNode(OprId id) : mOprId(id) {mKind = NK_BinOperator;} - BinOperatorNode() {mKind = NK_BinOperator;} + BinOperatorNode(OprId id) : TreeNode(NK_BinOperator), mOprId(id) {} + BinOperatorNode() : BinOperatorNode(OPR_NA) {} ~BinOperatorNode() {} + OprId GetOprId() {return mOprId;} + TreeNode* GetOpndA() {return mOpndA;} + TreeNode* GetOpndB() {return mOpndB;} + void SetOprId(OprId o) {mOprId = o;} + void SetOpndA(TreeNode* t) {mOpndA = t; SETPARENT(t);} + void SetOpndB(TreeNode* t) {mOpndB = t; SETPARENT(t);} + void ReplaceChild(TreeNode*, TreeNode*); void Dump(unsigned); }; +// TerOperatorNode is for an expression like +// a > b ? c : d class TerOperatorNode : public TreeNode { -public: - OprId mOprId; +private: TreeNode *mOpndA; TreeNode *mOpndB; TreeNode *mOpndC; public: - TerOperatorNode(OprId id) : mOprId(id) {mKind = NK_TerOperator;} - TerOperatorNode() {mKind = NK_TerOperator;} + TerOperatorNode() : TreeNode(NK_TerOperator) {} ~TerOperatorNode() {} + TreeNode* GetOpndA() {return mOpndA;} + TreeNode* GetOpndB() {return mOpndB;} + TreeNode* GetOpndC() {return mOpndC;} + void SetOpndA(TreeNode* t) {mOpndA = t; SETPARENT(t);} + void SetOpndB(TreeNode* t) {mOpndB = t; SETPARENT(t);} + void SetOpndC(TreeNode* t) {mOpndC = t; SETPARENT(t);} + + void Dump(unsigned); +}; + +////////////////////////////////////////////////////////////////////////// +// TypeAliasNode +// The syntax is type alias in Typescript, typedef in c/c++ +////////////////////////////////////////////////////////////////////////// + +class TypeAliasNode : public TreeNode { +private: + UserTypeNode *mId; + TreeNode *mAlias; +public: + TypeAliasNode() : TreeNode(NK_TypeAlias), mId(nullptr), mAlias(nullptr){} + ~TypeAliasNode() {} + + UserTypeNode* GetId() {return mId;} + void SetId(UserTypeNode *id); + + TreeNode* GetAlias() {return mAlias;} + void SetAlias(TreeNode *n); + + void Dump(unsigned); +}; + +////////////////////////////////////////////////////////////////////////// +// ConditionalType +// The syntax is n Typescript, +// type-a extends type-b ? type-c : type-d +////////////////////////////////////////////////////////////////////////// + +class ConditionalTypeNode : public TreeNode { +private: + TreeNode *mTypeA; + TreeNode *mTypeB; + TreeNode *mTypeC; + TreeNode *mTypeD; +public: + ConditionalTypeNode() : TreeNode(NK_ConditionalType), + mTypeA(nullptr), mTypeB(nullptr), mTypeC(nullptr), mTypeD(nullptr){} + ~ConditionalTypeNode() {} + + TreeNode* GetTypeA() {return mTypeA;} + TreeNode* GetTypeB() {return mTypeB;} + TreeNode* GetTypeC() {return mTypeC;} + TreeNode* GetTypeD() {return mTypeD;} + void SetTypeA(TreeNode *n) {mTypeA = n; SETPARENT(n);} + void SetTypeB(TreeNode *n) {mTypeB = n; SETPARENT(n);} + void SetTypeC(TreeNode *n) {mTypeC = n; SETPARENT(n);} + void SetTypeD(TreeNode *n) {mTypeD = n; SETPARENT(n);} + + void Dump(unsigned); +}; + +////////////////////////////////////////////////////////////////////////// +// AstType +// The syntax is like: variable as type-a as type-b ... +// It tells what type is the 'variable' +////////////////////////////////////////////////////////////////////////// + +class AsTypeNode : public TreeNode { +private: + TreeNode *mType; +public: + AsTypeNode() : TreeNode(NK_AsType), mType(nullptr) {} + ~AsTypeNode() {} + + TreeNode* GetType() {return mType;} + void SetType(TreeNode *t) {mType = t; SETPARENT(t);} + + void Dump(unsigned indent); +}; + +////////////////////////////////////////////////////////////////////////// +// TypeParameter +////////////////////////////////////////////////////////////////////////// + +class TypeParameterNode : public TreeNode { +private: + TreeNode *mId; // The name of the type parameter + TreeNode *mDefault; // The default value of this type parameter. + // some languages support default value. + TreeNode *mExtends; // The constraint of this type parameter. + // In Typescript, the syntax is like: T + +public: + TypeParameterNode() : TreeNode(NK_TypeParameter), mId(nullptr), mDefault(nullptr), + mExtends(nullptr) {} + ~TypeParameterNode() {} + + TreeNode* GetId() {return mId;} + void SetId(TreeNode* t) {mId = t; SETPARENT(t);} + + TreeNode* GetDefault() {return mDefault;} + void SetDefault(TreeNode* t) {mDefault = t; SETPARENT(t);} + + TreeNode* GetExtends() {return mExtends;} + void SetExtends(TreeNode* t) {mExtends = t; SETPARENT(t);} + void Dump(unsigned); }; @@ -255,27 +620,100 @@ private: TreeNode *mId; // A name could be like Outer.Inner // Hard to give a const char* as name. // So give it an id. - SmallVector mParams; // + // In Typescript, it could be a lambda: + // new (...) => Type + // in which mArgs and mBody are not used. + SmallVector mArgs; // BlockNode *mBody; // When body is not empty, it's an // anonymous class. + SmallVector mAttrs; public: - NewNode() : mId(NULL), mBody(NULL) {mKind = NK_New;} - ~NewNode() {mParams.Release();} + NewNode() : TreeNode(NK_New), mId(nullptr), mBody(nullptr) {} + ~NewNode() {mArgs.Release();} TreeNode* GetId() {return mId;} - void SetId(TreeNode *n) {mId = n;} + void SetId(TreeNode *n) {mId = n; SETPARENT(n);} BlockNode* GetBody() {return mBody;} void SetBody(BlockNode *n) {mBody = n;} - unsigned GetParamsNum() {return mParams.GetNum();} - TreeNode* GetParam(unsigned i) {return mParams.ValueAtIndex(i);} - void AddParam(TreeNode *t) {mParams.PushBack(t);} + unsigned GetArgsNum() {return mArgs.GetNum();} + TreeNode* GetArg(unsigned i) {return mArgs.ValueAtIndex(i);} + void SetArg(unsigned i, TreeNode* n) {*(mArgs.RefAtIndex(i)) = n; SETPARENT(n);} + void AddArg(TreeNode *t) {mArgs.PushBack(t); SETPARENT(t);} + + // Attributes related + unsigned GetAttrsNum() {return mAttrs.GetNum();} + void AddAttr(AttrId a) {mAttrs.PushBack(a);} + AttrId GetAttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetAttrAtIndex(unsigned i, AttrId n) {*(mAttrs.RefAtIndex(i)) = n;} void ReplaceChild(TreeNode *oldone, TreeNode *newone); void Dump(unsigned); }; class DeleteNode : public TreeNode { +private: + TreeNode *mExpr; +public: + DeleteNode() : TreeNode(NK_Delete), mExpr(nullptr) {} + ~DeleteNode(){} + + TreeNode* GetExpr() {return mExpr;} + void SetExpr(TreeNode *t) {mExpr = t; SETPARENT(t);} + + void Dump(unsigned); +}; + +////////////////////////////////////////////////////////////////////////// +// AnnotationNode +// +// Pragma or Annotation are language constructs to help (1) compiler +// (2) runtime (3) other tools, to better analyze or execute the program. +// It doesn't change the program behaviour, but may be improve static +// and/or dynamic analysis and/or performance. +// +// We have a dedicated AnnotationNode here. The program/annotation syntax +// could be complicated, eg. c99 introduce function call like pragma. +// +// The difference between Java annotation and C/c++ pragma is annotation +// is user defined and pragma is compiler/system defined. In another word +// Java annoation has unlimited number while pragmas are limited. +////////////////////////////////////////////////////////////////////////// + +// AnnotationTypeNode defines a new Annotation +class AnnotationTypeNode : public TreeNode { +private: + IdentifierNode *mId; +public: + AnnotationTypeNode() : TreeNode(NK_AnnotationType) {} + ~AnnotationTypeNode() {} + IdentifierNode* GetId() {return mId;} + void SetId(IdentifierNode *n) {mId = n;} + void Dump(unsigned); +}; + +// Annotation/Pragma is complicated, but everything starts from the +// name. So at construction, we will take in the name at first. +// Later we will initialize mType, mExpr based on the 'name' and other +// expression. +class AnnotationNode : public TreeNode { +private: + TreeNode *mId; + AnnotationTypeNode *mType; + SmallVector mArgs; +public: + AnnotationNode() : TreeNode(NK_Annotation), mId(nullptr), mType(nullptr) {} + ~AnnotationNode(){mArgs.Release();} + + TreeNode* GetId() {return mId;} + void SetId(TreeNode *n) {mId = n; SETPARENT(n);} + AnnotationTypeNode* GetType() {return mType;} + void SetType(AnnotationTypeNode *n) {mType = n; SETPARENT(n);} + + unsigned GetArgsNum() {return mArgs.GetNum();} + TreeNode* GetArgAtIndex(unsigned i) {return mArgs.ValueAtIndex(i);} + void SetArgAtIndex(unsigned i, TreeNode* n) {*(mArgs.RefAtIndex(i)) = n;} + void AddArg(TreeNode *n){mArgs.PushBack(n); SETPARENT(n);} }; ////////////////////////////////////////////////////////////////////////// @@ -307,16 +745,13 @@ class DimensionNode : public TreeNode { private: SmallVector mDimensions; public: - DimensionNode() {mKind = NK_Dimension;} + DimensionNode() : TreeNode(NK_Dimension) {} ~DimensionNode(){Release();} - unsigned GetDimsNum() {return mDimensions.GetNum();} - unsigned GetNthDim(unsigned n) {return mDimensions.ValueAtIndex(n);} // 0 means unspecified. - void SetNthDim(unsigned n, unsigned i) { - unsigned *addr = mDimensions.RefAtIndex(n); - *addr = i; - } - void AddDim(unsigned i = 0) {mDimensions.PushBack(i);} + unsigned GetDimensionsNum() {return mDimensions.GetNum();} + unsigned GetDimension(unsigned i) {return mDimensions.ValueAtIndex(i);} // 0 means unspecified. + void SetDimension(unsigned i, unsigned n) {*(mDimensions.RefAtIndex(i)) = n;} + void AddDimension(unsigned i = 0) {mDimensions.PushBack(i);} void Merge(const TreeNode*); void Release() {mDimensions.Release();} @@ -331,77 +766,84 @@ private: TreeNode *mType; // PrimTypeNode or UserTypeNode TreeNode *mInit; // Init value DimensionNode *mDims; + + SmallVector mAnnotations; //annotation or pragma + public: - IdentifierNode(const char *s) : mType(NULL), mInit(NULL), mDims(NULL){ - mKind = NK_Identifier; - SetName(s); } - IdentifierNode(const char *s, TreeNode *t) : mType(t), mInit(NULL), mDims(NULL) { - mKind = NK_Identifier; - SetName(s);} - ~IdentifierNode(){} + IdentifierNode(unsigned id, TreeNode *t) : TreeNode(NK_Identifier, id), + mType(t), mInit(nullptr), mDims(nullptr) {} + IdentifierNode(unsigned id) : IdentifierNode(id, nullptr) {} + IdentifierNode() : IdentifierNode(0, nullptr) {} + ~IdentifierNode(){Release();} TreeNode* GetType() {return mType;} TreeNode* GetInit() {return mInit;} + DimensionNode* GetDims() {return mDims;} - void SetType(TreeNode *t) {mType = t;} - void SetInit(TreeNode *t) {mInit = t;} - void SetDims(DimensionNode *t) {mDims = t;} + void SetType(TreeNode *t) {mType = t; SETPARENT(t);} + void SetInit(TreeNode *t) {mInit = t; SETPARENT(t);} + void ClearInit() {mInit = nullptr;} + void SetDims(DimensionNode *t) {mDims = t; SETPARENT(t);} - unsigned GetDimsNum() {return mDims->GetDimsNum();} + unsigned GetDimsNum() {return mDims->GetDimensionsNum();} + unsigned GetDim(unsigned n) {return mDims->GetDimension(n);} // 0 means unspecified. bool IsArray() {return mDims && GetDimsNum() > 0;} - unsigned AddDim(unsigned i = 0){mDims->AddDim(i);} // 0 means unspecified - unsigned GetNthNum(unsigned n) {return mDims->GetNthDim(n);} // 0 means unspecified. - void SetNthNum(unsigned n, unsigned i) {mDims->SetNthDim(n, i);} + void AddDim(unsigned i = 0){mDims->AddDimension(i);} // 0 means unspecified + unsigned GetNthNum(unsigned n) {return mDims->GetDimension(n);} // 0 means unspecified. + void SetNthNum(unsigned n, unsigned i) {mDims->SetDimension(n, i);} // Attributes related - unsigned GetAttrsNum() const {return mAttrs.GetNum();} - void AddAttr(AttrId a) {mAttrs.PushBack(a);} - AttrId AttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + unsigned GetAttrsNum() const {return mAttrs.GetNum();} + void AddAttr(AttrId a) {mAttrs.PushBack(a);} + AttrId GetAttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetAttrAtIndex(unsigned i, AttrId n) {*(mAttrs.RefAtIndex(i)) = n;} + + // Annotation/Pragma related + unsigned GetAnnotationsNum() {return mAnnotations.GetNum();} + void AddAnnotation(AnnotationNode *n) {mAnnotations.PushBack(n); SETPARENT(n);} + AnnotationNode* GetAnnotationAtIndex(unsigned i) {return mAnnotations.ValueAtIndex(i);} + void SetAnnotationAtIndex(unsigned i, AnnotationNode* n) {*(mAnnotations.RefAtIndex(i)) = n;} - void Release() { if (mDims) mDims->Release();} + void Release(); void Dump(unsigned); }; ////////////////////////////////////////////////////////////////////////// -// AnnotationNode -// -// Pragma or Annotation are language constructs to help (1) compiler -// (2) runtime (3) other tools, to better analyze or execute the program. -// It doesn't change the program behaviour, but may be improve static -// and/or dynamic analysis and/or performance. -// -// We have a dedicated AnnotationNode here. The program/annotation syntax -// could be complicated, eg. c99 introduce function call like pragma. -// -// The difference between Java annotation and C/c++ pragma is annotation -// is user defined and pragma is compiler/system defined. In another word -// Java annoation has unlimited number while pragmas are limited. +// DeclNode +// A DeclNode defines one single variable or a VarList. +// The type info and init expr are both inside the IdentifierNode. +// DeclNode only tells this is a declaration. ////////////////////////////////////////////////////////////////////////// -// AnnotationTypeNode defines a new Annotation -class AnnotationTypeNode : public TreeNode { -private: - IdentifierNode *mId; -public: - void SetId(IdentifierNode *n) {mId = n;} - void Dump(unsigned); +// special property of Javascript +enum DeclProp { + JS_Var, + JS_Let, + JS_Const, + DP_NA }; -// Annotation/Pragma is complicated, but everything starts from the -// name. So at construction, we will take in the name at first. -// Later we will initialize mType, mExpr based on the 'name' and other -// expression. -class AnnotationNode : public TreeNode { +class DeclNode : public TreeNode { private: - IdentifierNode *mId; - AnnotationTypeNode *mType; - TreeNode *mExpr; + TreeNode *mVar; + TreeNode *mInit; // Init value + DeclProp mProp; public: - AnnotationNode() : mId(NULL), mType(NULL), mExpr(NULL) { - mKind = NK_Annotation;} - ~AnnotationNode(){} + DeclNode(TreeNode *t) : TreeNode(NK_Decl), + mVar(t), mInit(nullptr), mProp(DP_NA) {SETPARENT(t);} + DeclNode() : DeclNode(nullptr) {} + ~DeclNode(){} - void SetId(IdentifierNode *n) {mId = n;} + TreeNode* GetVar() {return mVar;} + TreeNode* GetInit() {return mInit;} + + void SetVar(TreeNode *t) {mVar = t; SETPARENT(t);} + void SetInit(TreeNode *t) {mInit = t; SETPARENT(t);} + + DeclProp GetProp() {return mProp;} + void SetProp(DeclProp p) {mProp = p;} + + void Dump(unsigned); }; ////////////////////////////////////////////////////////////////////////// @@ -414,16 +856,16 @@ private: TreeNode *mDestType; TreeNode *mExpr; public: - CastNode() : mDestType(NULL), mExpr(NULL) {mKind = NK_Cast;} + CastNode() : TreeNode(NK_Cast), mDestType(nullptr), mExpr(nullptr) {} ~CastNode(){} TreeNode* GetDestType() {return mDestType;} void SetDestType(TreeNode *t) {mDestType = t;} TreeNode* GetExpr() {return mExpr;} - void SetExpr(TreeNode *t) {mExpr = t;} + void SetExpr(TreeNode *t) {mExpr = t; SETPARENT(t);} - const char* GetName(); + const char* GetDumpName(); void Dump(unsigned); }; @@ -443,44 +885,328 @@ class ParenthesisNode : public TreeNode { private: TreeNode *mExpr; public: - ParenthesisNode() : mExpr(NULL) {mKind = NK_Parenthesis;} + ParenthesisNode() : TreeNode(NK_Parenthesis), mExpr(nullptr) {} ~ParenthesisNode(){} - TreeNode* GetExpr() {return mExpr;} - void SetExpr(TreeNode *t) {mExpr = t; t->SetParent(this);} + TreeNode* GetExpr() {return mExpr;} + void SetExpr(TreeNode *t) {mExpr = t; SETPARENT(t);} + + void Dump(unsigned); +}; + +////////////////////////////////////////////////////////////////////////// +// FieldNode +// This is used for field reference. The field could be a member field or +// a member function. +////////////////////////////////////////////////////////////////////////// + +class FieldNode : public TreeNode { +private: + TreeNode *mUpper; // The upper enclosing structure + TreeNode *mField; +public: + FieldNode() : TreeNode(NK_Field), mField(nullptr), mUpper(nullptr) {} + ~FieldNode(){} + + TreeNode* GetField() {return mField;} + void SetField(TreeNode *f) {mField = f; SETPARENT(f);} + + TreeNode *GetUpper() {return mUpper;} + void SetUpper(TreeNode *n) { + TreeNode *up = n; + while (up->IsParenthesis()) { + ParenthesisNode *pn = (ParenthesisNode*)up; + up = pn->GetExpr(); + } + mUpper = up; + SETPARENT(up); + } + + void Dump(unsigned); +}; + +////////////////////////////////////////////////////////////////////////// +// Array Related Nodes +// ArrayElementNode, ArrayLiteralNode. +////////////////////////////////////////////////////////////////////////// + +// Array element is a[b][c]. +class ArrayElementNode : public TreeNode { +private: + TreeNode *mArray; + SmallVector mExprs; // index expressions. +public: + ArrayElementNode() : TreeNode(NK_ArrayElement), mArray(nullptr) {} + ~ArrayElementNode() {Release();} + + TreeNode* GetArray() {return mArray;} + void SetArray(TreeNode *n) {mArray = n; SETPARENT(n);} + + unsigned GetExprsNum() {return mExprs.GetNum();} + TreeNode* GetExprAtIndex(unsigned i) {return mExprs.ValueAtIndex(i);} + void SetExprAtIndex(unsigned i, TreeNode* n) {*(mExprs.RefAtIndex(i)) = n; SETPARENT(n);} + void AddExpr(TreeNode *n){mExprs.PushBack(n); SETPARENT(n);} + + void Release() {mExprs.Release();} + void Dump(unsigned); +}; + + +// Array literal is [1, 2 , 0, -4]. It's an arrya of literals. +// It could also be multi-dim array literal like [[1,2],[2,3]] +class ArrayLiteralNode : public TreeNode { +private: + SmallVector mLiterals; +public: + ArrayLiteralNode() : TreeNode(NK_ArrayLiteral) {} + ~ArrayLiteralNode() {Release();} + + unsigned GetLiteralsNum() {return mLiterals.GetNum();} + TreeNode* GetLiteral(unsigned i) {return mLiterals.ValueAtIndex(i);} + void SetLiteral(unsigned i, TreeNode* n) {*(mLiterals.RefAtIndex(i)) = n; SETPARENT(n);} + void AddLiteral(TreeNode *n){mLiterals.PushBack(n); SETPARENT(n);} + + void Release() {mLiterals.Release();} + void Dump(unsigned); +}; + +////////////////////////////////////////////////////////////////////////// +// BindingPattern +// It's used in Destructuring scenarios. It comes from Javascript. +// It takes out elements from structs or arrays and form a new one. +// It may bind elements to some new variables. I believe this is the reason +// it's called BindingXXX +////////////////////////////////////////////////////////////////////////// + +class BindingElementNode : public TreeNode { +private: + TreeNode *mVariable; // The new variable to bind element to + TreeNode *mElement; // the elements in the source struct or array +public: + BindingElementNode() : TreeNode(NK_BindingElement), + mVariable(nullptr), mElement(nullptr) {} + ~BindingElementNode() {} + + TreeNode* GetVariable() {return mVariable;} + void SetVariable(TreeNode* n) {mVariable = n; SETPARENT(n);} + TreeNode* GetElement() {return mElement;} + void SetElement(TreeNode* n) {mElement = n; SETPARENT(n);} + + void Dump(unsigned); +}; + +enum BindPattProp { + BPP_ArrayBinding, + BPP_ObjectBinding, + BPP_NA +}; + +class BindingPatternNode : public TreeNode { +private: + BindPattProp mProp; + SmallVector mElements; // mostly BindingElementNode, also could be + // a nested BindingPatternNode. + TreeNode *mType; // The type + TreeNode *mInit; // An initializer +public: + BindingPatternNode() : + TreeNode(NK_BindingPattern), mInit(nullptr), mType(nullptr), mProp(BPP_NA) {} + ~BindingPatternNode() {Release();} + + BindPattProp GetProp() {return mProp;} + void SetProp(BindPattProp n) {mProp = n;} + + unsigned GetElementsNum() {return mElements.GetNum();} + TreeNode* GetElement(unsigned i) {return mElements.ValueAtIndex(i);} + void SetElement(unsigned i, TreeNode* n) {*(mElements.RefAtIndex(i)) = n;} + void AddElement(TreeNode *n); + + TreeNode* GetType() {return mType;} + void SetType(TreeNode* n) {mType = n; SETPARENT(n);} + TreeNode* GetInit() {return mInit;} + void SetInit(TreeNode* n) {mInit = n; SETPARENT(n);} + + void Release() {mElements.Release();} + void Dump(unsigned); +}; + + +////////////////////////////////////////////////////////////////////////// +// Struct Node +// This is first coming from C struct. Typescript 'interface' has the +// similar structure. +// +// Index signature of Typescript make it complicated. Here is an example. +// interface Foo{ +// [key: string]: number; +// } +// +// let bar: Foo = {}; +// bar['key1'] = 1; +// +////////////////////////////////////////////////////////////////////////// + +enum StructProp { + SProp_CStruct, + SProp_TSInterface, + SProp_TSEnum, + SProp_NA +}; + +class NumIndexSigNode : public TreeNode{ +public: + TreeNode *mKey; + TreeNode *mDataType; + + void SetKey(TreeNode *t) {mKey = t;} + TreeNode* GetKey() {return mKey;} + void SetDataType(TreeNode *t) {mDataType = t;} + TreeNode* GetDataType() {return mDataType;} + + NumIndexSigNode() : TreeNode(NK_NumIndexSig), mDataType(nullptr), mKey(nullptr) {} + ~NumIndexSigNode(){} + void Dump(unsigned); +}; + +class StrIndexSigNode : public TreeNode{ +public: + TreeNode *mKey; + TreeNode *mDataType; + + void SetKey(TreeNode *t) {mKey = t; SETPARENT(t);} + TreeNode* GetKey() {return mKey;} + void SetDataType(TreeNode *t) {mDataType = t; SETPARENT(t);} + TreeNode* GetDataType() {return mDataType;} + + StrIndexSigNode() : TreeNode(NK_StrIndexSig), mDataType(nullptr), mKey(nullptr) {} + ~StrIndexSigNode(){} + void Dump(unsigned); +}; + +// C++ struct or Typescript interface. +// The methods in Typescript interface has no function body. +class StructNode : public TreeNode { +private: + StructProp mProp; + IdentifierNode *mStructId; + SmallVector mTypeParams; + SmallVector mFields; + SmallVector mMethods; + SmallVector mSupers; + + // These are for 'number' or 'string' index data type + NumIndexSigNode *mNumIndexSig; + StrIndexSigNode *mStrIndexSig; + +public: + StructNode(IdentifierNode *n) : TreeNode(NK_Struct), mStructId(n), mProp(SProp_NA), + mNumIndexSig(nullptr), mStrIndexSig(nullptr) {SETPARENT(n);} + StructNode() : StructNode(nullptr) {} + ~StructNode() {Release();} + + StructProp GetProp() {return mProp;} + IdentifierNode* GetStructId() {return mStructId;} + void SetProp(StructProp p) {mProp = p;} + void SetStructId(IdentifierNode *n) {mStructId = n; SETPARENT(n);} + + NumIndexSigNode* GetNumIndexSig() {return mNumIndexSig;} + StrIndexSigNode* GetStrIndexSig() {return mStrIndexSig;} + void SetNumIndexSig(NumIndexSigNode *t) {mNumIndexSig = t;} + void SetStrIndexSig(StrIndexSigNode *t) {mStrIndexSig = t;} + + // TypeParameter + unsigned GetTypeParamsNum() {return mTypeParams.GetNum();} + void AddTypeParam(TreeNode *n); + TypeParameterNode* GetTypeParamAtIndex(unsigned i) {return mTypeParams.ValueAtIndex(i);} + void SetTypeParamAtIndex(unsigned i, TypeParameterNode* n) {*(mTypeParams.RefAtIndex(i)) = n;} + + unsigned GetFieldsNum() {return mFields.GetNum();} + TreeNode* GetField(unsigned i) {return mFields.ValueAtIndex(i);} + void SetField(unsigned i, TreeNode* n) {*(mFields.RefAtIndex(i)) = n; SETPARENT(n);} + void AddField(TreeNode *n) {mFields.PushBack(n); SETPARENT(n);} + + unsigned GetSupersNum() {return mSupers.GetNum();} + TreeNode* GetSuper(unsigned i) {return mSupers.ValueAtIndex(i);} + void SetSuper(unsigned i, TreeNode* n) {*(mSupers.RefAtIndex(i)) = n;} + void AddSuper(TreeNode *n); + + unsigned GetMethodsNum() {return mMethods.GetNum();} + FunctionNode* GetMethod(unsigned i) {return mMethods.ValueAtIndex(i);} + void SetMethod(unsigned i, FunctionNode* n) {*(mMethods.RefAtIndex(i)) = n;} + void AddMethod(FunctionNode *n) {mMethods.PushBack(n);} + + void AddChild(TreeNode *); + + void Release() {mFields.Release(); mMethods.Release(); mSupers.Release(); mTypeParams.Release();} + void Dump(unsigned); +}; + +// We define StructLiteral for C/C++ struct literal, TS/JS object literal. +// It contains a list of duple +// +// In Javascript, the GetAccessor/SetAccessor makes it complicated. +// We save the XetAccessor as a field literal with fieldname being func +// name and literal being function node itself. + +// mFieldName could be nullptr, like {3, 4} or {a, b}, 3, 4, a and b are literals without +// a name. but mLiteral may not be nullptr. +class FieldLiteralNode : public TreeNode{ +public: + TreeNode *mFieldName; // Generally a field is an identifier. However, in JS/TS + // it could be a literal string or numeric. + TreeNode *mLiteral; + + void SetFieldName(TreeNode *id) {mFieldName = id; SETPARENT(id);} + void SetLiteral(TreeNode *id) {mLiteral = id; SETPARENT(id);} + + TreeNode* GetFieldName() {return mFieldName;} + TreeNode* GetLiteral() {return mLiteral;} + + FieldLiteralNode() : mFieldName(nullptr), TreeNode(NK_FieldLiteral) {} + ~FieldLiteralNode(){} +}; + +class StructLiteralNode : public TreeNode { +private: + SmallVector mFields; +public: + StructLiteralNode() : TreeNode(NK_StructLiteral) {} + ~StructLiteralNode(){Release();} + + unsigned GetFieldsNum() {return mFields.GetNum();} + FieldLiteralNode* GetField(unsigned i) {return mFields.ValueAtIndex(i);} + void SetField(unsigned i, FieldLiteralNode* n) {*(mFields.RefAtIndex(i)) = n; SETPARENT(n);} + void AddField(TreeNode *n); void Dump(unsigned); }; ////////////////////////////////////////////////////////////////////////// -// FieldNode -// This is used for field reference. It includes both member field and -// member function. +// Namespace Node +// Typescript namespace has only a list of children which could be any +// kind of declaration or statement. So I simply keep their original +// nodes. ////////////////////////////////////////////////////////////////////////// -class FieldNode : public TreeNode { +class NamespaceNode : public TreeNode { private: - TreeNode *mUpper; // The upper enclosing structure - IdentifierNode *mField; + SmallVector mElements; + TreeNode *mId; // the name of namespace public: - FieldNode() : TreeNode(), mField(NULL), mUpper(NULL) {mKind = NK_Field;} - ~FieldNode(){} + NamespaceNode() : TreeNode(NK_Namespace), mId(nullptr) {} + ~NamespaceNode() {Release();} - void Init(); + void SetId(TreeNode *id) {mId = id; SETPARENT(id);} + TreeNode* GetId() {return mId;} - IdentifierNode* GetField() {return mField;} - void SetField(IdentifierNode *f) {mField = f;} + unsigned GetElementsNum() {return mElements.GetNum();} + TreeNode* GetElementAtIndex(unsigned i) {return mElements.ValueAtIndex(i);} + void SetElementAtIndex(unsigned i, TreeNode* n) {*(mElements.RefAtIndex(i)) = n;} + void AddElement(TreeNode* n) {mElements.PushBack(n); SETPARENT(n);} - TreeNode *GetUpper() {return mUpper;} - void SetUpper(TreeNode *n) { - TreeNode *up = n; - while (up->IsParenthesis()) { - ParenthesisNode *pn = (ParenthesisNode*)up; - up = pn->GetExpr(); - } - mUpper = up; - } + void AddBody(TreeNode *); + void Release() {mElements.Release();} void Dump(unsigned); }; @@ -494,11 +1220,12 @@ class VarListNode : public TreeNode { private: SmallVector mVars; public: - VarListNode() {mKind = NK_VarList;} - ~VarListNode() {} + VarListNode() : TreeNode(NK_VarList) {} + ~VarListNode() {Release();} - unsigned GetNum() {return mVars.GetNum();} - IdentifierNode* VarAtIndex(unsigned i) {return mVars.ValueAtIndex(i);} + unsigned GetVarsNum() {return mVars.GetNum();} + IdentifierNode* GetVarAtIndex(unsigned i) {return mVars.ValueAtIndex(i);} + void SetVarAtIndex(unsigned i, IdentifierNode* n) {*(mVars.RefAtIndex(i)) = n;} void AddVar(IdentifierNode *n); void Merge(TreeNode*); @@ -516,11 +1243,12 @@ class ExprListNode : public TreeNode { private: SmallVector mExprs; public: - ExprListNode() {mKind = NK_ExprList;} + ExprListNode() : TreeNode(NK_ExprList) {} ~ExprListNode() {} - unsigned GetNum() {return mExprs.GetNum();} - TreeNode* ExprAtIndex(unsigned i) {return mExprs.ValueAtIndex(i);} + unsigned GetExprsNum() {return mExprs.GetNum();} + TreeNode* GetExprAtIndex(unsigned i) {return mExprs.ValueAtIndex(i);} + void SetExprAtIndex(unsigned i, TreeNode* n) {*(mExprs.RefAtIndex(i)) = n;} void AddExpr(TreeNode *n) {mExprs.PushBack(n);} void Merge(TreeNode*); @@ -528,23 +1256,182 @@ public: void Dump(unsigned); }; +////////////////////////////////////////////////////////////////////////// +// TemplateLiteral Nodes +// TemplateLiteral node is created from the corresponding TempLit Token, +// copying the raw mStrings from token. +// Later on, call parser special API to create AST nodes for the patterns. +// After that, only mStrings and mTrees are used. +////////////////////////////////////////////////////////////////////////// + +class TemplateLiteralNode : public TreeNode { +private: + // mStrings save pairs. + SmallVector mStrings; + + // It's tree nodes of pairs of . So it would be pairs + // of , For any missing element, a nullptr is saved + // in its position. + // Even index elements are for formats, Odd index elements are for placeholder. + SmallVector mTrees; + +public: + TemplateLiteralNode() : TreeNode(NK_TemplateLiteral) {} + ~TemplateLiteralNode(){mStrings.Release(); mTrees.Release();} + + unsigned GetStringsNum() {return mStrings.GetNum();} + const char* GetStringAtIndex(unsigned i) {return mStrings.ValueAtIndex(i);} + void SetStringAtIndex(unsigned i, const char* n) {*(mStrings.RefAtIndex(i)) = n;} + void AddString(const char *n) {mStrings.PushBack(n);} + + unsigned GetTreesNum() {return mTrees.GetNum();} + TreeNode* GetTreeAtIndex(unsigned i) {return mTrees.ValueAtIndex(i);} + void SetTreeAtIndex(unsigned i, TreeNode *n) {*(mTrees.RefAtIndex(i)) = n; SETPARENT(n);} + void AddTree(TreeNode *n) {mTrees.PushBack(n); SETPARENT(n);} + + void Dump(unsigned); +}; + +// We define a global vector for TemplateLiteralNode created after all parsing. +extern SmallVector gTemplateLiteralNodes; + ////////////////////////////////////////////////////////////////////////// // Literal Nodes ////////////////////////////////////////////////////////////////////////// class LiteralNode : public TreeNode { private: - LitData mData; + LitData mData; + + // The regular type information is stored in LitData which is common practice. + // However, in languages like Typescript, it does allow special literals like 'this' + // to have a dedicated type. So here comes 'mType'. + TreeNode *mType; + + // Typescript allows a string literal to be used as an Identifier, so it allows + // an init value. + TreeNode *mInit; + private: void InitName(); public: - LiteralNode(LitData d) : mData(d) { - mKind = NK_Literal; InitName(); - } + LiteralNode(LitData d) : TreeNode(NK_Literal), mData(d), mType(nullptr), mInit(nullptr) {} + LiteralNode() : LiteralNode({.mType = LT_NA, .mData.mInt = 0}) {} ~LiteralNode(){} LitData GetData() {return mData;} + void SetData(LitData d) {mData = d;} + + TreeNode* GetType() {return mType;} + void SetType(TreeNode *t) {mType = t; SETPARENT(t);} + TreeNode* GetInit() {return mInit;} + void SetInit(TreeNode *t) {mInit = t; SETPARENT(t);} + + bool IsThis() {return mData.mType == LT_ThisLiteral;} + + void Dump(unsigned); +}; + +////////////////////////////////////////////////////////////////////////// +// RegExpr Nodes +////////////////////////////////////////////////////////////////////////// + +class RegExprNode : public TreeNode { +private: + RegExprData mData; +public: + RegExprNode() : TreeNode(NK_RegExpr) {mData.mExpr = nullptr; mData.mFlags = nullptr;} + ~RegExprNode(){} + + RegExprData GetData() {return mData;} + void SetData(RegExprData d) {mData = d;} + + void Dump(unsigned); +}; + +////////////////////////////////////////////////////////////////////////// +// ThrowNode +// This is the throw statement. +// In Java some functions throw exception in declaration, and they are +// saved in FunctionNode::mThrows. +////////////////////////////////////////////////////////////////////////// + +class ThrowNode : public TreeNode { +private: + SmallVector mExceptions; +public: + ThrowNode() : TreeNode(NK_Throw) {} + ~ThrowNode(){} + + unsigned GetExceptionsNum() {return mExceptions.GetNum();} + TreeNode* GetExceptionAtIndex(unsigned i) {return mExceptions.ValueAtIndex(i);} + void SetExceptionAtIndex(unsigned i, TreeNode* n) {*(mExceptions.RefAtIndex(i)) = n;} + void AddException(TreeNode *n); + + void Dump(unsigned); +}; + +////////////////////////////////////////////////////////////////////////// +// Try, Catch, Finally +// I can build one single TryNode to contain all the information. However, +// I built all three types of nodes, since some language could have complex +// syntax of catch or finally. +////////////////////////////////////////////////////////////////////////// + +class CatchNode : public TreeNode { +private: + SmallVector mParams; // In Java, this sould be exception node. + BlockNode *mBlock; +public: + CatchNode() : TreeNode(NK_Catch), mBlock(nullptr) {} + ~CatchNode(){} + + BlockNode* GetBlock() {return mBlock;} + void SetBlock(BlockNode *n) {mBlock = n;} + + unsigned GetParamsNum() {return mParams.GetNum();} + TreeNode* GetParamAtIndex(unsigned i) {return mParams.ValueAtIndex(i);} + void SetParamAtIndex(unsigned i, TreeNode* n) {*(mParams.RefAtIndex(i)) = n; SETPARENT(n);} + void AddParam(TreeNode *n); + + void Dump(unsigned); +}; + +class FinallyNode : public TreeNode { +private: + BlockNode *mBlock; +public: + FinallyNode() : TreeNode(NK_Finally), mBlock(nullptr) {} + ~FinallyNode(){} + + BlockNode* GetBlock() {return mBlock;} + void SetBlock(BlockNode *n) {mBlock = n;} + + void Dump(unsigned); +}; + +class TryNode : public TreeNode { +private: + BlockNode *mBlock; + FinallyNode *mFinally; + SmallVector mCatches; // There could be >1 catches. + +public: + TryNode() : TreeNode(NK_Try), mBlock(nullptr), mFinally(nullptr) {} + ~TryNode(){} + + BlockNode* GetBlock() {return mBlock;} + void SetBlock(BlockNode *n) {mBlock = n;} + + FinallyNode* GetFinally() {return mFinally;} + void SetFinally(FinallyNode *n) {mFinally = n;} + + unsigned GetCatchesNum() {return mCatches.GetNum();} + CatchNode* GetCatchAtIndex(unsigned i) {return mCatches.ValueAtIndex(i);} + void SetCatchAtIndex(unsigned i, CatchNode* n) {*(mCatches.RefAtIndex(i)) = n;} + void AddCatch(TreeNode *n); + void Dump(unsigned); }; @@ -557,8 +1444,8 @@ private: // right now I only put a name for it. It could have more properties. IdentifierNode *mException; public: - ExceptionNode() : mException(NULL) {mKind = NK_Exception;} - ExceptionNode(IdentifierNode *inode) : mException(inode) {mKind = NK_Exception;} + ExceptionNode(IdentifierNode *inode) : TreeNode(NK_Exception), mException(inode) {} + ExceptionNode() : ExceptionNode(nullptr) {} ~ExceptionNode(){} IdentifierNode* GetException() {return mException;} @@ -575,10 +1462,26 @@ class ReturnNode : public TreeNode { private: TreeNode *mResult; public: - ReturnNode() : mResult(NULL) {mKind = NK_Return;} + ReturnNode() : TreeNode(NK_Return), mResult(nullptr) {} ~ReturnNode(){} - void SetResult(TreeNode *t) {mResult = t;} + void SetResult(TreeNode *t) {mResult = t; SETPARENT(t);} + TreeNode* GetResult() { return mResult; } + void Dump(unsigned); +}; + +class YieldNode : public TreeNode { +private: + TreeNode *mResult; + bool mIsTransfer; // In TS, yeild* tranfers to another generator +public: + YieldNode() : TreeNode(NK_Yield), mResult(nullptr), mIsTransfer(false) {} + ~YieldNode(){} + + void SetIsTransfer(bool b = true) {mIsTransfer = b;} + bool IsTransfer() {return mIsTransfer;} + + void SetResult(TreeNode *t) {mResult = t; SETPARENT(t);} TreeNode* GetResult() { return mResult; } void Dump(unsigned); }; @@ -592,9 +1495,9 @@ public: CondBranchNode(); ~CondBranchNode(){} - void SetCond(TreeNode *t) {mCond = t; t->SetParent(this);} - void SetTrueBranch(TreeNode *t) {mTrueBranch = t; t->SetParent(this);} - void SetFalseBranch(TreeNode *t){mFalseBranch = t; t->SetParent(this);} + void SetCond(TreeNode *t) {mCond = t; SETPARENT(t);} + void SetTrueBranch(TreeNode *t) {mTrueBranch = t; SETPARENT(t);} + void SetFalseBranch(TreeNode *t){mFalseBranch = t; SETPARENT(t);} TreeNode* GetCond() {return mCond;} TreeNode* GetTrueBranch() {return mTrueBranch;} @@ -608,50 +1511,97 @@ class BreakNode : public TreeNode { private: TreeNode* mTarget; public: - BreakNode() {mKind = NK_Break; mTarget = NULL;} + BreakNode() : TreeNode(NK_Break), mTarget(nullptr) {} ~BreakNode(){} TreeNode* GetTarget() {return mTarget;} - void SetTarget(TreeNode* t){mTarget = t;} + void SetTarget(TreeNode* t){mTarget = t; SETPARENT(t);} + void Dump(unsigned); +}; + + +// Continue statement. Continue targets could be one identifier or empty. +class ContinueNode : public TreeNode { +private: + TreeNode* mTarget; +public: + ContinueNode() : TreeNode(NK_Continue), mTarget(nullptr) {} + ~ContinueNode(){} + + TreeNode* GetTarget() {return mTarget;} + void SetTarget(TreeNode* t){mTarget = t; SETPARENT(t);} void Dump(unsigned); }; +// Javascript makes for loop complicated. It creates two special syntax as below +// for (var in set) {...} +// for (var of set) {...} +// We use FL_Prop to differentiate them with regular for loop. +enum ForLoopProp { + FLP_Regular, // this is the default property + FLP_JSIn, + FLP_JSOf, + FLP_NA +}; + class ForLoopNode : public TreeNode { private: - SmallVector mInit; + ForLoopProp mProp; + + // Regular for loop + SmallVector mInits; TreeNode *mCond; - SmallVector mUpdate; - TreeNode *mBody; // This is a block node + SmallVector mUpdates; + + // JS In or JS Of + TreeNode *mVariable; + TreeNode *mSet; + + // shared by all kinds + TreeNode *mBody; // This could be a single statement, or a block node + public: - ForLoopNode() {mCond = NULL; mBody = NULL; mKind = NK_ForLoop;} + ForLoopNode() : TreeNode(NK_ForLoop), + mCond(nullptr), mBody(nullptr), mVariable(nullptr), mSet(nullptr), mProp(FLP_Regular) {} ~ForLoopNode() {Release();} - void AddInit(TreeNode *t) {mInit.PushBack(t);} - void AddUpdate(TreeNode *t) {mUpdate.PushBack(t);} - void SetCond(TreeNode *t) {mCond = t;} - void SetBody(TreeNode *t) {mBody = t;} - - unsigned GetInitNum() {return mInit.GetNum();} - unsigned GetUpdateNum() {return mUpdate.GetNum();} - TreeNode* InitAtIndex(unsigned i) {return mInit.ValueAtIndex(i);} - TreeNode* UpdateAtIndex(unsigned i) {return mUpdate.ValueAtIndex(i);} + void AddInit(TreeNode *t); + void AddUpdate(TreeNode *t); + void SetCond(TreeNode *t) {mCond = t; SETPARENT(t);} + void SetBody(TreeNode *t) {mBody = t; SETPARENT(t);} + + unsigned GetInitsNum() {return mInits.GetNum();} + unsigned GetUpdatesNum() {return mUpdates.GetNum();} + TreeNode* GetInitAtIndex(unsigned i) {return mInits.ValueAtIndex(i);} + void SetInitAtIndex(unsigned i, TreeNode* n) {*(mInits.RefAtIndex(i)) = n;} + TreeNode* GetUpdateAtIndex(unsigned i) {return mUpdates.ValueAtIndex(i);} + void SetUpdateAtIndex(unsigned i, TreeNode* n) {*(mUpdates.RefAtIndex(i)) = n;} TreeNode* GetCond() {return mCond;} TreeNode* GetBody() {return mBody;} - void Release() {mInit.Release(); mUpdate.Release();} + ForLoopProp GetProp() {return mProp;} + void SetProp(ForLoopProp p) {mProp = p;} + + TreeNode* GetVariable() {return mVariable;} + void SetVariable(TreeNode *t) {mVariable = t; SETPARENT(t);} + + TreeNode* GetSet() {return mSet;} + void SetSet(TreeNode *t) {mSet = t; SETPARENT(t);} + + void Release() {mInits.Release(); mUpdates.Release();} void Dump(unsigned); }; class WhileLoopNode : public TreeNode { private: TreeNode *mCond; - TreeNode *mBody; // This is a block node + TreeNode *mBody; // This could be a single statement, or a block node public: - WhileLoopNode() {mCond = NULL; mBody = NULL; mKind = NK_WhileLoop;} + WhileLoopNode() : TreeNode(NK_WhileLoop), mCond(nullptr), mBody(nullptr) {} ~WhileLoopNode() {Release();} - void SetCond(TreeNode *t) {mCond = t;} - void SetBody(TreeNode *t) {mBody = t;} + void SetCond(TreeNode *t) {mCond = t; SETPARENT(t);} + void SetBody(TreeNode *t) {mBody = t; SETPARENT(t);} TreeNode* GetCond() {return mCond;} TreeNode* GetBody() {return mBody;} @@ -662,13 +1612,13 @@ public: class DoLoopNode : public TreeNode { private: TreeNode *mCond; - TreeNode *mBody; // This is a block node + TreeNode *mBody; // This could be a single statement, or a block node public: - DoLoopNode() {mCond = NULL; mBody = NULL; mKind = NK_DoLoop;} + DoLoopNode() : TreeNode(NK_DoLoop), mCond(nullptr), mBody(nullptr) {} ~DoLoopNode(){Release();} - void SetCond(TreeNode *t) {mCond = t;} - void SetBody(TreeNode *t) {mBody = t;} + void SetCond(TreeNode *t) {mCond = t; SETPARENT(t);} + void SetBody(TreeNode *t) {mBody = t; SETPARENT(t);} TreeNode* GetCond() {return mCond;} TreeNode* GetBody() {return mBody;} @@ -689,11 +1639,11 @@ private: bool mIsDefault; // default lable TreeNode *mValue; // the constant expression public: - SwitchLabelNode() : mIsDefault(false), mValue(NULL) {mKind = NK_SwitchLabel;} + SwitchLabelNode() : TreeNode(NK_SwitchLabel), mIsDefault(false), mValue(nullptr) {} ~SwitchLabelNode(){} void SetIsDefault(bool b) {mIsDefault = b;} - void SetValue(TreeNode *t){mValue = t;} + void SetValue(TreeNode *t){mValue = t; SETPARENT(t);} bool IsDefault() {return mIsDefault;} TreeNode* GetValue() {return mValue;} @@ -709,16 +1659,19 @@ private: SmallVector mStmts; public: - SwitchCaseNode() {mKind = NK_SwitchCase;} + SwitchCaseNode() :TreeNode(NK_SwitchCase) {} ~SwitchCaseNode() {Release();} unsigned GetLabelsNum() {return mLabels.GetNum();} - TreeNode* GetLabelAtIndex(unsigned i) {return mLabels.ValueAtIndex(i);} void AddLabel(TreeNode*); + SwitchLabelNode* GetLabelAtIndex(unsigned i) {return mLabels.ValueAtIndex(i);} + void SetLabelAtIndex(unsigned i, SwitchLabelNode* n) {*(mLabels.RefAtIndex(i)) = n;} unsigned GetStmtsNum() {return mStmts.GetNum();} TreeNode* GetStmtAtIndex(unsigned i) {return mStmts.ValueAtIndex(i);} + void SetStmtAtIndex(unsigned i, TreeNode* n) {*(mStmts.RefAtIndex(i)) = n; SETPARENT(n);} void AddStmt(TreeNode*); + void PopStmt() {mStmts.PopBack();} void Release() {mStmts.Release(); mLabels.Release();} void Dump(unsigned); @@ -726,18 +1679,22 @@ public: class SwitchNode : public TreeNode { private: - TreeNode *mCond; + TreeNode *mExpr; SmallVector mCases; public: - SwitchNode() : mCond(NULL) {mKind = NK_Switch;} + SwitchNode() : TreeNode(NK_Switch), mExpr(nullptr) {} ~SwitchNode() {Release();} - TreeNode* GetCond() {return mCond;} - void SetCond(TreeNode *c) {mCond = c;} + TreeNode* GetExpr() {return mExpr;} + void SetExpr(TreeNode *c) {mExpr = c; SETPARENT(c);} unsigned GetCasesNum() {return mCases.GetNum();} - TreeNode* GetCaseAtIndex(unsigned i) {return mCases.ValueAtIndex(i);} - void AddCase(TreeNode *c); + SwitchCaseNode* GetCaseAtIndex(unsigned i) {return mCases.ValueAtIndex(i);} + void SetCaseAtIndex(unsigned i, SwitchCaseNode* n) {*(mCases.RefAtIndex(i)) = n;} + void AddCase(SwitchCaseNode* n) {mCases.PushBack(n); SETPARENT(n);} + + void AddSwitchCase(TreeNode *t); + SwitchCaseNode* SwitchLabelToCase(SwitchLabelNode*); void Release() {mCases.Release();} void Dump(unsigned); @@ -754,13 +1711,13 @@ private: TreeNode *mExpr; TreeNode *mMsg; public: - AssertNode() : mExpr(NULL), mMsg(NULL) {mKind = NK_Assert;} + AssertNode() : TreeNode(NK_Assert), mExpr(nullptr), mMsg(nullptr) {} ~AssertNode(){} TreeNode* GetExpr() {return mExpr;} TreeNode* GetMsg() {return mMsg;} - void SetExpr(TreeNode *t) {mExpr = t;} - void SetMsg(TreeNode *t) {mMsg = t;} + void SetExpr(TreeNode *t) {mExpr = t; SETPARENT(t);} + void SetMsg(TreeNode *t) {mMsg = t; SETPARENT(t);} void Dump(unsigned); }; @@ -773,20 +1730,31 @@ class CallNode : public TreeNode { private: TreeNode *mMethod; ExprListNode mArgs; + SmallVector mTypeArguments; + TemplateLiteralNode *mTaggedTemplate; public: - CallNode() : mMethod(NULL){mKind = NK_Call;} - ~CallNode(){} + CallNode() : TreeNode(NK_Call), mMethod(nullptr), mTaggedTemplate(nullptr) {} + ~CallNode(){Release();} void Init(); TreeNode* GetMethod() {return mMethod;} - void SetMethod(TreeNode *t) {mMethod = t;} + void SetMethod(TreeNode *t) {mMethod = t; SETPARENT(t);} - void AddArg(TreeNode *t); - unsigned GetArgsNum() {return mArgs.GetNum();} - TreeNode* GetArg(unsigned index) {return mArgs.ExprAtIndex(index);} + TemplateLiteralNode* GetTaggedTemplate() {return mTaggedTemplate;} + void SetTaggedTemplate(TemplateLiteralNode *t) {mTaggedTemplate = t; SETPARENT(t);} - void Release() {mArgs.Release();} + void AddArg(TreeNode *t) {mArgs.Merge(t); SETPARENT(t);} + unsigned GetArgsNum() {return mArgs.GetExprsNum();} + TreeNode* GetArg(unsigned index) {return mArgs.GetExprAtIndex(index);} + void SetArg(unsigned i, TreeNode* n) {mArgs.SetExprAtIndex(i, n); SETPARENT(n);} + + unsigned GetTypeArgumentsNum() {return mTypeArguments.GetNum();} + TreeNode* GetTypeArgumentAtIndex(unsigned i) {return mTypeArguments.ValueAtIndex(i);} + void SetTypeArgumentAtIndex(unsigned i, TreeNode* n) {*(mTypeArguments.RefAtIndex(i)) = n; SETPARENT(n);} + void AddTypeArgument(TreeNode *); + + void Release() {mArgs.Release(); mTypeArguments.Release();} void Dump(unsigned); }; @@ -817,22 +1785,32 @@ public: bool mIsInstInit; // Instance Initializer SmallVector mAttrs; + TreeNode *mSync; // Java allows a sync object on a Block. + public: - BlockNode(){mKind = NK_Block; mIsInstInit = false;} + BlockNode() : TreeNode(NK_Block), mIsInstInit(false), mSync(nullptr) {} ~BlockNode() {Release();} // Instance Initializer and Attributes related bool IsInstInit() {return mIsInstInit;} - void SetIsInstInit() {mIsInstInit = true;} - unsigned GetAttrsNum() {return mAttrs.GetNum();} - void AddAttr(AttrId a) {mAttrs.PushBack(a);} - AttrId AttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetIsInstInit(bool b = true) {mIsInstInit = b;} + unsigned GetAttrsNum() {return mAttrs.GetNum();} + void AddAttr(AttrId a) {mAttrs.PushBack(a);} + AttrId GetAttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetAttrAtIndex(unsigned i, AttrId n) {*(mAttrs.RefAtIndex(i)) = n;} + + void SetSync(TreeNode *n) {mSync = n;} + TreeNode* GetSync() {return mSync;} unsigned GetChildrenNum() {return mChildren.GetNum();} TreeNode* GetChildAtIndex(unsigned i) {return mChildren.ValueAtIndex(i);} - void AddChild(TreeNode *c) {mChildren.PushBack(c); c->SetParent(this);} + void SetChildAtIndex(unsigned i, TreeNode* n) {*(mChildren.RefAtIndex(i)) = n; SETPARENT(n);} + void AddChild(TreeNode *c); void ClearChildren() {mChildren.Clear();} + void InsertStmtAfter(TreeNode *new_stmt, TreeNode *exist_stmt); + void InsertStmtBefore(TreeNode *new_stmt, TreeNode *exist_stmt); + void Release() {mChildren.Release();} void Dump(unsigned); }; @@ -850,11 +1828,25 @@ private: SmallVector mAttrs; SmallVector mAnnotations; //annotation or pragma SmallVector mThrows; // exceptions it can throw - TreeNode *mType; // return type + SmallVector mTypeParams; + TreeNode *mFuncName; // function name, usually an identifier + TreeNode *mRetType; // return type SmallVector mParams; // BlockNode *mBody; DimensionNode *mDims; - bool mIsConstructor; + TreeNode *mAssert; // In typescript, a function could have asserts + // like: func () : asserts v is string + // or a type predicate signature, like + // func() : v is string + // So mAssert could be either an AssertNode or + // an IsNode. + bool mIsConstructor; + bool mIsGetAccessor; + bool mIsSetAccessor; + bool mIsGenerator; // JS/TS generator + bool mIsIterator; // JS/TS iterator + bool mIsCallSignature; // no func name, no func body + bool mIsConstructSignature; // no func name, no func body, and is a construct sig in TS public: FunctionNode(); @@ -864,40 +1856,69 @@ public: // the PassNode in the tree. void CleanUp(); - BlockNode* GetBody() {return mBody;} - void AddBody(BlockNode *b) {mBody = b; b->SetParent(this); CleanUp();} + TreeNode* GetFuncName() {return mFuncName;} + void SetFuncName(TreeNode *n) {mFuncName = n; SETPARENT(n);} - bool IsConstructor() {return mIsConstructor;} - void SetIsConstructor() {mIsConstructor = true;} + BlockNode* GetBody() {return mBody;} + void SetBody(BlockNode *b) {mBody = b; SETPARENT(b); if(b) CleanUp();} + + TreeNode* GetAssert() {return mAssert;} + void SetAssert(TreeNode *b) {mAssert = b; SETPARENT(b);} + + bool IsConstructor() {return mIsConstructor;} + void SetIsConstructor(bool b = true) {mIsConstructor = b;} + bool IsGenerator() {return mIsGenerator;} + void SetIsGenerator(bool b = true) {mIsGenerator = b;} + bool IsIterator() {return mIsIterator;} + void SetIsIterator(bool b = true) {mIsIterator = b;} + bool IsGetAccessor() {return mIsGetAccessor;} + void SetIsGetAccessor(bool b = true) {mIsGetAccessor = b;} + bool IsSetAccessor() {return mIsSetAccessor;} + void SetIsSetAccessor(bool b = true) {mIsSetAccessor = b;} + bool IsCallSignature() {return mIsCallSignature;} + void SetIsCallSignature(bool b = true) {mIsCallSignature = b;} + bool IsConstructSignature() {return mIsConstructSignature;} + void SetIsConstructSignature(bool b = true) {mIsConstructSignature = b;} unsigned GetParamsNum() {return mParams.GetNum();} TreeNode* GetParam(unsigned i) {return mParams.ValueAtIndex(i);} - void AddParam(TreeNode *t) {mParams.PushBack(t); t->SetParent(this);} - + void SetParam(unsigned i, TreeNode* n) {*(mParams.RefAtIndex(i)) = n; SETPARENT(n);} + void AddParam(TreeNode *t) {mParams.PushBack(t); SETPARENT(t);} + void ClearParam() {mParams.Clear();} + // Attributes related - unsigned GetAttrsNum() {return mAttrs.GetNum();} - void AddAttr(AttrId a) {mAttrs.PushBack(a);} - AttrId AttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + unsigned GetAttrsNum() {return mAttrs.GetNum();} + void AddAttr(AttrId a) {mAttrs.PushBack(a);} + AttrId GetAttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetAttrAtIndex(unsigned i, AttrId n) {*(mAttrs.RefAtIndex(i)) = n;} // Annotation/Pragma related unsigned GetAnnotationsNum() {return mAnnotations.GetNum();} void AddAnnotation(AnnotationNode *n) {mAnnotations.PushBack(n);} - AnnotationNode* AnnotationAtIndex(unsigned i) {return mAnnotations.ValueAtIndex(i);} + AnnotationNode* GetAnnotationAtIndex(unsigned i) {return mAnnotations.ValueAtIndex(i);} + void SetAnnotationAtIndex(unsigned i, AnnotationNode* n) {*(mAnnotations.RefAtIndex(i)) = n;} // Exception/throw related - unsigned GetThrowNum() {return mThrows.GetNum();} - void AddThrow(ExceptionNode *n){mThrows.PushBack(n);} - ExceptionNode* ThrowAtIndex(unsigned i) {return mThrows.ValueAtIndex(i);} + unsigned GetThrowsNum() {return mThrows.GetNum();} + void AddThrow(ExceptionNode *n) {mThrows.PushBack(n);} + ExceptionNode* GetThrowAtIndex(unsigned i) {return mThrows.ValueAtIndex(i);} + void SetThrowAtIndex(unsigned i, ExceptionNode* n) {*(mThrows.RefAtIndex(i)) = n;} - void SetType(TreeNode *t) {mType = t;} - TreeNode* GetType(){return mType;} + unsigned GetTypeParamsNum() {return mTypeParams.GetNum();} + TreeNode* GetTypeParamAtIndex(unsigned i) {return mTypeParams.ValueAtIndex(i);} + void SetTypeParamAtIndex(unsigned i, TreeNode* n) {*(mTypeParams.RefAtIndex(i)) = n; SETPARENT(n);} + void AddTypeParam(TreeNode *); + void SetRetType(TreeNode *t) {mRetType = t; SETPARENT(t);} + TreeNode* GetRetType(){return mRetType;} + + DimensionNode* GetDims() {return mDims;} void SetDims(DimensionNode *t) {mDims = t;} - unsigned GetDimsNum() {return mDims->GetDimsNum();} - bool IsArray() {return mDims && GetDimsNum() > 0;} - unsigned AddDim(unsigned i = 0){mDims->AddDim(i);} // 0 means unspecified - unsigned GetNthNum(unsigned n) {return mDims->GetNthDim(n);} // 0 means unspecified. - void SetNthNum(unsigned n, unsigned i) {mDims->SetNthDim(n, i);} + unsigned GetDimsNum() {return mDims->GetDimensionsNum();} + bool IsArray() {return mDims && mDims->GetDimensionsNum() > 0;} + void AddDim(unsigned i = 0){mDims->AddDimension(i);} // 0 means unspecified + unsigned GetNthDim(unsigned n) {return mDims->GetDimension(n);} // 0 means unspecified. + void SetNthDim(unsigned n, unsigned i) {mDims->SetDimension(n, i);} // Override equivalent. bool OverrideEquivalent(FunctionNode*); @@ -922,10 +1943,26 @@ private: SmallVector mFields; SmallVector mMethods; public: - InterfaceNode() : mIsAnnotation(false) {mKind = NK_Interface;} + InterfaceNode() : TreeNode(NK_Interface), mIsAnnotation(false) {} ~InterfaceNode() {} + unsigned GetSuperInterfacesNum() {return mSuperInterfaces.GetNum();} + void AddSuperInterface(InterfaceNode* a) {mSuperInterfaces.PushBack(a);} + InterfaceNode* GetSuperInterfaceAtIndex(unsigned i) {return mSuperInterfaces.ValueAtIndex(i);} + void SetSuperInterfaceAtIndex(unsigned i, InterfaceNode* n) {*(mSuperInterfaces.RefAtIndex(i)) = n;} + + unsigned GetFieldsNum() {return mFields.GetNum();} + void AddField(IdentifierNode* n) {mFields.PushBack(n); SETPARENT(n);} + IdentifierNode* GetField(unsigned i) {return mFields.ValueAtIndex(i);} + void SetField(unsigned i, IdentifierNode* n) {*(mFields.RefAtIndex(i)) = n; SETPARENT(n);} + + unsigned GetMethodsNum() {return mMethods.GetNum();} + void AddMethod(FunctionNode* a) {mMethods.PushBack(a);} + FunctionNode* GetMethod(unsigned i) {return mMethods.ValueAtIndex(i);} + void SetMethod(unsigned i, FunctionNode* n) {*(mMethods.RefAtIndex(i)) = n;} + void SetIsAnnotation(bool b) {mIsAnnotation = b;} + bool IsAnnotation() {return mIsAnnotation;} void Construct(BlockNode *); void Dump(unsigned); @@ -933,25 +1970,6 @@ public: ////////////////////////////////////////////////////////////////////////// // Class Nodes -// & -// ClassBody -->BlockNode -// In reality there is no such thing as ClassBody, since this 'body' will -// eventually become fields and methods of a class. However, during parsing -// the children are processed before parents, which means we need to have -// all fields and members ready before the class. So we come up with -// this ClassBody to temporarily hold these subtrees, and let the class -// to interpret it in the future. Once the class is done, this ClassBody -// is useless. In the real implementation, ClassBody is actually a BlockNode. -// -// NOTE. There is one important thing to know is This design is following -// the common rules of Java/C++, where a declaration of field or -// method has the scope of the whole class. This means it can be -// used before the declaratioin. There is no order of first decl then -// use. So we can do a Construct() which collect all Fields & Methods. -// -// However, we still keep mBody, which actually tells the order of -// the original program. Any language requiring order could use mBody -// to implement. ////////////////////////////////////////////////////////////////////////// class ClassNode : public TreeNode { @@ -960,46 +1978,101 @@ private: // and special semantic rules. We define JavaEnum here too. For other // languages like C/C++ which have very simply Enum, we will have a // dedicated EnumNode for them. - bool mJavaEnum; + bool mIsJavaEnum; - SmallVector mSuperClasses; - SmallVector mSuperInterfaces; - SmallVector mAttributes; - BlockNode *mBody; + SmallVector mSuperClasses; + SmallVector mSuperInterfaces; + SmallVector mAttributes; + SmallVector mAnnotations; //annotation or pragma + SmallVector mTypeParams; - SmallVector mFields; // aka Field + SmallVector mFields; // a Field could be identifier or computed name SmallVector mMethods; SmallVector mConstructors; SmallVector mInstInits; // instance initializer SmallVector mLocalClasses; SmallVector mLocalInterfaces; + SmallVector mImports; + SmallVector mExports; + + SmallVector mDeclares; // First coming from TS. public: - ClassNode(){mKind = NK_Class; mJavaEnum = false; mBody = NULL;} + ClassNode() : TreeNode(NK_Class), mIsJavaEnum(false) {} ~ClassNode() {Release();} - bool IsJavaEnum() {return mJavaEnum;} - void SetJavaEnum(){mJavaEnum = true;} - - void AddSuperClass(ClassNode *n) {mSuperClasses.PushBack(n);} - void AddSuperInterface(InterfaceNode *n) {mSuperInterfaces.PushBack(n);} - void AddAttr(AttrId a) {mAttributes.PushBack(a);} - void AddBody(BlockNode *b) {mBody = b; b->SetParent(this);} - - unsigned GetFieldsNum() {return mFields.GetNum();} - unsigned GetMethodsNum() {return mMethods.GetNum();} - unsigned GetConstructorNum() {return mConstructors.GetNum();} - unsigned GetInstInitsNum() {return mInstInits.GetNum();} - unsigned GetLocalClassesNum() {return mLocalClasses.GetNum();} - unsigned GetLocalInterfacesNum(){return mLocalInterfaces.GetNum();} - IdentifierNode* GetField(unsigned i) {return mFields.ValueAtIndex(i);} - FunctionNode* GetMethod(unsigned i) {return mMethods.ValueAtIndex(i);} - FunctionNode* GetConstructor(unsigned i) {return mConstructors.ValueAtIndex(i);} + bool IsJavaEnum() {return mIsJavaEnum;} + void SetIsJavaEnum(bool b = true){mIsJavaEnum = b;} + + // Annotation/Pragma related + unsigned GetAnnotationsNum() {return mAnnotations.GetNum();} + void AddAnnotation(AnnotationNode *n) {mAnnotations.PushBack(n);} + AnnotationNode* GetAnnotationAtIndex(unsigned i) {return mAnnotations.ValueAtIndex(i);} + void SetAnnotationAtIndex(unsigned i, AnnotationNode* n) {*(mAnnotations.RefAtIndex(i)) = n;} + + // TypeParameter + unsigned GetTypeParamsNum() {return mTypeParams.GetNum();} + void AddTypeParam(TreeNode *n); + TypeParameterNode* GetTypeParamAtIndex(unsigned i) {return mTypeParams.ValueAtIndex(i);} + void SetTypeParamAtIndex(unsigned i, TypeParameterNode* n) {*(mTypeParams.RefAtIndex(i)) = n;} + + void AddSuperClass(TreeNode *n); + unsigned GetSuperClassesNum() {return mSuperClasses.GetNum();} + TreeNode* GetSuperClass(unsigned i) {return mSuperClasses.ValueAtIndex(i);} + void SetSuperClass(unsigned i, TreeNode* n) {*(mSuperClasses.RefAtIndex(i)) = n;} + + void AddSuperInterface(TreeNode *n); + unsigned GetSuperInterfacesNum() {return mSuperInterfaces.GetNum();} + TreeNode* GetSuperInterface(unsigned i) {return mSuperInterfaces.ValueAtIndex(i);} + void SetSuperInterface(unsigned i, TreeNode* n) {*(mSuperInterfaces.RefAtIndex(i)) = n;} + + void AddAttr(AttrId a) {mAttributes.PushBack(a);} + void AddAttribute(AttrId a) {mAttributes.PushBack(a);} + + void AddField(TreeNode *n) {mFields.PushBack(n); SETPARENT(n);} + void AddMethod(FunctionNode *n) {mMethods.PushBack(n);} + void AddConstructor(FunctionNode *n) {mConstructors.PushBack(n);} + void AddInstInit(BlockNode *n) {mInstInits.PushBack(n);} + void AddLocalClass(ClassNode *n) {mLocalClasses.PushBack(n);} + void AddLocalInterface(InterfaceNode *n) {mLocalInterfaces.PushBack(n);} + void AddImport(ImportNode *n) {mImports.PushBack(n);} + void AddExport(ExportNode *n) {mExports.PushBack(n);} + void AddDeclare(DeclareNode *n) {mDeclares.PushBack(n);} + + + unsigned GetAttributesNum() {return mAttributes.GetNum();} + unsigned GetFieldsNum() {return mFields.GetNum();} + unsigned GetMethodsNum() {return mMethods.GetNum();} + unsigned GetConstructorsNum() {return mConstructors.GetNum();} + unsigned GetInstInitsNum() {return mInstInits.GetNum();} + unsigned GetLocalClassesNum() {return mLocalClasses.GetNum();} + unsigned GetLocalInterfacesNum() {return mLocalInterfaces.GetNum();} + unsigned GetImportsNum() {return mImports.GetNum();} + unsigned GetExportsNum() {return mExports.GetNum();} + unsigned GetDeclaresNum() {return mDeclares.GetNum();} + + AttrId GetAttribute(unsigned i) {return mAttributes.ValueAtIndex(i);} + void SetAttribute(unsigned i, AttrId n) {*(mAttributes.RefAtIndex(i)) = n;} + TreeNode* GetField(unsigned i) {return mFields.ValueAtIndex(i);} + void SetField(unsigned i, TreeNode* n) {*(mFields.RefAtIndex(i)) = n; SETPARENT(n);} + FunctionNode* GetMethod(unsigned i) {return mMethods.ValueAtIndex(i);} + void SetMethod(unsigned i, FunctionNode* n) {*(mMethods.RefAtIndex(i)) = n;} + FunctionNode* GetConstructor(unsigned i) {return mConstructors.ValueAtIndex(i);} + void SetConstructor(unsigned i, FunctionNode* n) {*(mConstructors.RefAtIndex(i)) = n;} BlockNode* GetInstInit(unsigned i) {return mInstInits.ValueAtIndex(i);} + void SetInstInit(unsigned i, BlockNode* n) {*(mInstInits.RefAtIndex(i)) = n;} ClassNode* GetLocalClass(unsigned i) {return mLocalClasses.ValueAtIndex(i);} - InterfaceNode* GetLocalInterface(unsigned i) {return mLocalInterfaces.ValueAtIndex(i);} - - void Construct(); + void SetLocalClass(unsigned i, ClassNode* n) {*(mLocalClasses.RefAtIndex(i)) = n;} + InterfaceNode* GetLocalInterface(unsigned i) {return mLocalInterfaces.ValueAtIndex(i);} + void SetLocalInterface(unsigned i, InterfaceNode* n) {*(mLocalInterfaces.RefAtIndex(i)) = n;} + ImportNode* GetImport(unsigned i) {return mImports.ValueAtIndex(i);} + void SetImport(unsigned i, ImportNode* n) {*(mImports.RefAtIndex(i)) = n;} + ExportNode* GetExport(unsigned i) {return mExports.ValueAtIndex(i);} + void SetExport(unsigned i, ExportNode* n) {*(mExports.RefAtIndex(i)) = n;} + DeclareNode* GetDeclare(unsigned i) {return mDeclares.ValueAtIndex(i);} + void SetDeclare(unsigned i, DeclareNode* n) {*(mDeclares.RefAtIndex(i)) = n;} + + void Construct(BlockNode*); void Release(); void Dump(unsigned); }; @@ -1018,69 +2091,332 @@ class PassNode : public TreeNode { private: SmallVector mChildren; public: - PassNode() {mKind = NK_Pass;} + PassNode() : TreeNode(NK_Pass) {} ~PassNode() {Release();} unsigned GetChildrenNum() {return mChildren.GetNum();} - TreeNode* GetChild(unsigned idx) {return mChildren.ValueAtIndex(idx);} - void SetChild(unsigned idx, TreeNode *t) {*(mChildren.RefAtIndex(idx)) = t;} + TreeNode* GetChild(unsigned i) {return mChildren.ValueAtIndex(i);} + void SetChild(unsigned i, TreeNode* n) {*(mChildren.RefAtIndex(i)) = n;} - void AddChild(TreeNode *c) {mChildren.PushBack(c); c->SetParent(this);} + void AddChild(TreeNode *c) {mChildren.PushBack(c); SETPARENT(c);} void Dump(unsigned); void Release() {mChildren.Release();} }; //////////////////////////////////////////////////////////////////////////// // Lambda Expression +// Java Lambda expression and JS arrow function have similar syntax. +// Also in Typescript, FunctionType and Constructor Type have the similar syntax. +// We put them in the same node. //////////////////////////////////////////////////////////////////////////// +// This property tells categories of LambdaNode +enum LambdaProperty { + LP_JavaLambda, + LP_JSArrowFunction, + LP_NA +}; + class LambdaNode : public TreeNode { private: - SmallVector mParams; - TreeNode *mBody; // the body is block. + LambdaProperty mProperty; + TreeNode *mRetType; // The return type. nullptr as Java Lambda. + SmallVector mParams; // A param could be an IdentifierNode or DeclNode. + TreeNode *mBody; // the body could be an expression, or block. + // nullptr as TS FunctionType and ConstructorType + SmallVector mTypeParams; + SmallVector mAttrs; public: - LambdaNode() {mBody = NULL; mKind = NK_Lambda;} + LambdaNode() : TreeNode(NK_Lambda), + mBody(nullptr), mProperty(LP_JSArrowFunction), mRetType(nullptr) {} ~LambdaNode(){Release();} - void AddParam(IdentifierNode *n) {mParams.PushBack(n); n->SetParent(this);} - void SetBody(TreeNode *n) {mBody = n; n->SetParent(this);} + + TreeNode* GetBody() {return mBody;} + void SetBody(TreeNode *n) {mBody = n; SETPARENT(n);} + + LambdaProperty GetProperty() {return mProperty;} + void SetProperty(LambdaProperty p) {mProperty = p;} + + TreeNode* GetRetType() {return mRetType;} + void SetRetType(TreeNode* t) {mRetType = t; SETPARENT(t);} + + unsigned GetParamsNum() {return mParams.GetNum();} + TreeNode* GetParam(unsigned i) {return mParams.ValueAtIndex(i);} + void SetParam(unsigned i, TreeNode* n) {*(mParams.RefAtIndex(i)) = n; SETPARENT(n);} + void AddParam(TreeNode *n) {mParams.PushBack(n); SETPARENT(n);} + + // TypeParameter + unsigned GetTypeParamsNum() {return mTypeParams.GetNum();} + void AddTypeParam(TreeNode *n); + TypeParameterNode* GetTypeParamAtIndex(unsigned i) {return mTypeParams.ValueAtIndex(i);} + void SetTypeParamAtIndex(unsigned i, TypeParameterNode* n) {*(mTypeParams.RefAtIndex(i)) = n;} + + // Attributes related + unsigned GetAttrsNum() const {return mAttrs.GetNum();} + void AddAttr(AttrId a) {mAttrs.PushBack(a);} + AttrId GetAttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetAttrAtIndex(unsigned i, AttrId n) {*(mAttrs.RefAtIndex(i)) = n;} void Release() {mParams.Release();} void Dump(unsigned); }; //////////////////////////////////////////////////////////////////////////// -// The AST Tree +// InstanceOf Expression +// This is first created for Java's instanceof operation. It has the form of +// left instanceof right. +//////////////////////////////////////////////////////////////////////////// + +class InstanceOfNode : public TreeNode { +private: + TreeNode *mLeft; + TreeNode *mRight; +public: + InstanceOfNode() : TreeNode(NK_InstanceOf), mLeft(nullptr), mRight(nullptr) {} + ~InstanceOfNode(){Release();} + + TreeNode* GetLeft() {return mLeft;} + void SetLeft(TreeNode *n) {mLeft = n; SETPARENT(n);} + TreeNode* GetRight() {return mRight;} + void SetRight(TreeNode *n){mRight = n; SETPARENT(n);} + + void Dump(unsigned); +}; + +//////////////////////////////////////////////////////////////////////////// +// TypeOf Expression +// First coming from typescript. +//////////////////////////////////////////////////////////////////////////// + +class TypeOfNode : public TreeNode { +private: + TreeNode *mExpr; +public: + TypeOfNode() : TreeNode(NK_TypeOf), mExpr(nullptr) {} + ~TypeOfNode(){Release();} + + TreeNode* GetExpr() {return mExpr;} + void SetExpr(TreeNode *n) {mExpr = n; SETPARENT(n);} + + void Dump(unsigned); +}; + +//////////////////////////////////////////////////////////////////////////// +// KeyOf Expression +// First coming from typescript. +//////////////////////////////////////////////////////////////////////////// + +class KeyOfNode : public TreeNode { +private: + TreeNode *mExpr; +public: + KeyOfNode() : TreeNode(NK_KeyOf), mExpr(nullptr) {} + ~KeyOfNode(){Release();} + + TreeNode* GetExpr() {return mExpr;} + void SetExpr(TreeNode *n) {mExpr = n; SETPARENT(n);} + + void Dump(unsigned); +}; + +//////////////////////////////////////////////////////////////////////////// +// Infer Expression +// First coming from typescript. +//////////////////////////////////////////////////////////////////////////// + +class InferNode : public TreeNode { +private: + TreeNode *mExpr; +public: + InferNode() : TreeNode(NK_Infer), mExpr(nullptr) {} + ~InferNode(){Release();} + + TreeNode* GetExpr() {return mExpr;} + void SetExpr(TreeNode *n) {mExpr = n; SETPARENT(n);} + + void Dump(unsigned); +}; + +//////////////////////////////////////////////////////////////////////////// +// In Expression +// First coming from Javascript. It's like +// A is IN B. +// B is a set of properties. A is one of the properties. +//////////////////////////////////////////////////////////////////////////// + +class InNode : public TreeNode { +private: + TreeNode *mLeft; + TreeNode *mRight; +public: + InNode() : TreeNode(NK_In), mLeft(nullptr), mRight(nullptr) {} + ~InNode(){Release();} + + TreeNode* GetLeft() {return mLeft;} + void SetLeft(TreeNode *n) {mLeft = n; SETPARENT(n);} + TreeNode* GetRight() {return mRight;} + void SetRight(TreeNode *n){mRight = n; SETPARENT(n);} + + void Dump(unsigned); +}; + +//////////////////////////////////////////////////////////////////////////// +// ComputedName Expression +// First coming from Javascript. It's like +// [ xxx ] //////////////////////////////////////////////////////////////////////////// -class AppealNode; -class ASTBuilder; +enum CompNameProp { + CNP_NA = 0, + CNP_Rem_ReadOnly = 1, + CNP_Add_ReadOnly = 1 << 1, + CNP_Rem_Optional = 1 << 2, + CNP_Add_Optional = 1 << 3, +}; + +class ComputedNameNode : public TreeNode { +private: + unsigned mProp; + TreeNode *mExpr; + TreeNode *mInit; + TreeNode *mExtendType; // This is the type extending expression + // of the mapped property + SmallVector mAttrs; +public: + ComputedNameNode() : TreeNode(NK_ComputedName), + mProp(CNP_NA), mExpr(nullptr), mExtendType(nullptr), mInit(nullptr) {} + ~ComputedNameNode(){Release();} + + void Release() {mAttrs.Release();} + + unsigned GetProp() {return mProp;} + void SetProp(unsigned p) {mProp = mProp | p;} + + TreeNode* GetExpr() {return mExpr;} + void SetExpr(TreeNode *n) {mExpr = n; SETPARENT(n);} + TreeNode* GetInit() {return mInit;} + void SetInit(TreeNode *n) {mInit = n; SETPARENT(n);} + TreeNode* GetExtendType() {return mExtendType;} + void SetExtendType(TreeNode *n) {mExtendType = n; SETPARENT(n);} + + // Attributes related + unsigned GetAttrsNum() const {return mAttrs.GetNum();} + void AddAttr(AttrId a) {mAttrs.PushBack(a);} + AttrId GetAttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetAttrAtIndex(unsigned i, AttrId n) {*(mAttrs.RefAtIndex(i)) = n;} + + void Dump(unsigned); +}; + +//////////////////////////////////////////////////////////////////////////// +// Is Expression +// First coming from Typescript. It's like +// A is B. +// B is usually a type. +//////////////////////////////////////////////////////////////////////////// -class ASTTree { +class IsNode : public TreeNode { +private: + TreeNode *mLeft; + TreeNode *mRight; public: - TreePool mTreePool; - TreeNode *mRootNode; + IsNode() : TreeNode(NK_Is), mLeft(nullptr), mRight(nullptr) {} + ~IsNode(){Release();} + + TreeNode* GetLeft() {return mLeft;} + void SetLeft(TreeNode *n) {mLeft = n; SETPARENT(n);} + TreeNode* GetRight() {return mRight;} + void SetRight(TreeNode *n){mRight = n; SETPARENT(n);} + + void Dump(unsigned); +}; +//////////////////////////////////////////////////////////////////////////// +// Await node +// 'await' is waiting on some expression to finish. +//////////////////////////////////////////////////////////////////////////// + +class AwaitNode : public TreeNode { private: - // We need a set of functions to deal with some common manipulations of - // most languages during AST Building. You can disable it if some functions - // are not what you want. - TreeNode* Manipulate(AppealNode*); - TreeNode* Manipulate2Binary(TreeNode*, TreeNode*); - TreeNode* Manipulate2Cast(TreeNode*, TreeNode*); + TreeNode *mExpr; +public: + AwaitNode() : TreeNode(NK_Await), mExpr(nullptr) {} + ~AwaitNode() {} + + TreeNode* GetExpr() {return mExpr;} + void SetExpr(TreeNode *n) {mExpr = n; SETPARENT(n);} + + void Dump(unsigned); +}; + +//////////////////////////////////////////////////////////////////////////// +// Tuple Type +// [label : type, label : type, ...] +//////////////////////////////////////////////////////////////////////////// +class NameTypePairNode : public TreeNode { +private: + TreeNode *mVar; + TreeNode *mType; public: - ASTTree(); - ~ASTTree(); + NameTypePairNode() : TreeNode(NK_NameTypePair), mVar(nullptr), mType(nullptr) {} + ~NameTypePairNode() {} + + TreeNode* GetVar() {return mVar;} + TreeNode* GetType() {return mType;} + void SetVar(TreeNode* t) {mVar = t; SETPARENT(t);} + void SetType(TreeNode* t) {mType = t; SETPARENT(t);} + + void Dump(unsigned); +}; - TreeNode* NewTreeNode(AppealNode*); +class TupleTypeNode : public TreeNode { +private: + SmallVector mFields; +public: + TupleTypeNode() : TreeNode(NK_TupleType) {} + ~TupleTypeNode() {Release();} - TreeNode* BuildBinaryOperation(TreeNode *, TreeNode *, OprId); - TreeNode* BuildPassNode(); + unsigned GetFieldsNum() {return mFields.GetNum();} + NameTypePairNode* GetField(unsigned i) {return mFields.ValueAtIndex(i);} + void SetField(unsigned i, NameTypePairNode* n) {*(mFields.RefAtIndex(i)) = n; SETPARENT(n);} + void AddField(NameTypePairNode *n) {mFields.PushBack(n); SETPARENT(n);} + void AddChild(TreeNode *n); + void Release() {mFields.Release();} void Dump(unsigned); +}; + +////////////////////////////////////////////////////// +// Triple Slash Directive +////////////////////////////////////////////////////// + +enum TripleSlashProp { + TSP_Path, + TSP_Types, + TSP_Lib, + TSP_NoDefaultLib, + TSP_NA +}; +class TripleSlashNode : public TreeNode { +private: + TripleSlashProp mProp; + TreeNode *mValue; +public: + TripleSlashNode() : TreeNode(NK_TripleSlash) {mValue = NULL; mProp = TSP_NA;} + ~TripleSlashNode() {} + + TreeNode* GetValue() {return mValue;} + void SetValue(TreeNode *n) {mValue = n; SETPARENT(n);} + + TripleSlashProp GetProp() {return mProp;} + void SetProp(TripleSlashProp p) {mProp = p;} + + void Dump(unsigned); }; + } #endif diff --git a/src/MapleFE/shared/include/ast_attr.h b/src/MapleFE/shared/include/ast_attr.h index 0678308c2835bc11074831bed84f2d47ac2a8624..326a9444cc24a4896c2dc072172b953776bb96aa 100644 --- a/src/MapleFE/shared/include/ast_attr.h +++ b/src/MapleFE/shared/include/ast_attr.h @@ -40,13 +40,16 @@ class AttrNode : public TreeNode { private: AttrId mId; public: - AttrNode() {mKind = NK_Attr; mId = ATTR_NA;} + AttrNode() : TreeNode(NK_Attr), mId(ATTR_NA) {} ~AttrNode(){} AttrId GetId() {return mId;} void SetId(AttrId id) {mId = id;} }; +extern const char* FindAttrKeyword(AttrId id); +extern AttrId FindAttrId(const char *keyword); + /////////////////////////////////////////////////////////////////////////////// // AttrPool // The size of AttrNode is fixed, so it's good to use MemPool for the storage. diff --git a/src/MapleFE/shared/include/ast_builder.h b/src/MapleFE/shared/include/ast_builder.h index 7890fd3c700bb4b29ac8ed8234aef5aeaa0ca56c..74ab70396f1a38e847fe91b4cacc9c21d4dec524 100644 --- a/src/MapleFE/shared/include/ast_builder.h +++ b/src/MapleFE/shared/include/ast_builder.h @@ -17,6 +17,7 @@ #define __AST_BUILDER_HEADER__ #include "ast.h" +#include "ast_module.h" #include "ast_mempool.h" namespace maplefe { @@ -49,24 +50,29 @@ class ASTScope; class ASTBuilder { private: - bool mTrace; + bool mTrace; + ModuleNode *mASTModule; // The last created node. It will be referenced by the // following AddModifier() or other functions. - TreeNode *mLastTreeNode; + TreeNode *mLastTreeNode; + + // Sometimes we call BuildIdentifier() to build an identifier node + // from keywords or any reserved words. This is allowed in Typescript. + // We save the Name of the keyword for BuildIdentifier(). + const char *mNameForBuildIdentifier; public: // information for a single action unsigned mActionId; std::vector mParams; - TreePool *mTreePool; public: - ASTBuilder() : mTreePool(NULL), mTrace(false) {} + ASTBuilder(ModuleNode *m) : mASTModule(m), mTrace(false), mLastTreeNode(NULL), mNameForBuildIdentifier(NULL) {} ~ASTBuilder() {} void SetTrace(bool b) {mTrace = b;} - void SetTreePool(TreePool *p) {mTreePool = p;} + void SetModule(ModuleNode *m) {mASTModule = m;} void AddParam(Param p) {mParams.push_back(p);} void ClearParams() {mParams.clear();} @@ -74,104 +80,18 @@ public: // Create Functions for Token TreeNode* CreateTokenTreeNode(const Token*); + TreeNode* BuildIdentifier(const Token*); + TreeNode* BuildIdentifier(const TreeNode*); TreeNode* Build(); - TreeNode* BuildPackageName(); - - TreeNode* BuildSingleTypeImport(); - TreeNode* BuildAllTypeImport(); - TreeNode* BuildSingleStaticImport(); - TreeNode* BuildAllStaticImport(); - TreeNode* BuildAllImport(); - - TreeNode* BuildBlock(); - TreeNode* AddToBlock(); - TreeNode* CvtToBlock(TreeNode *tnode); - - TreeNode* BuildUnaryOperation(); - TreeNode* BuildPostfixOperation(); - TreeNode* BuildBinaryOperation(); - TreeNode* BuildCast(); - TreeNode* BuildParenthesis(); - TreeNode* BuildLambda(); - - TreeNode* BuildDecl(); - TreeNode* BuildField(); - TreeNode* BuildVarList(); - - TreeNode* AddInitTo(); - TreeNode* AddTypeTo(); - TreeNode* AddModifier(); - TreeNode* AddModifierTo(); - - // Callsite - TreeNode* AddArguments(); - TreeNode* BuildCall(); - TreeNode* BuildExprList(); +#undef ACTION +#define ACTION(K) TreeNode* K(); +#include "supported_actions.def" - // Function related + void AddArguments(TreeNode *call, TreeNode *args); void AddParams(TreeNode *func, TreeNode *params); - TreeNode* AddParams(); - TreeNode* BuildFunction(); - TreeNode* BuildConstructor(); - TreeNode* AddFunctionBody(); - TreeNode* AddFunctionBodyTo(); - - TreeNode* BuildClass(); - TreeNode* SetClassIsJavaEnum(); - TreeNode* AddClassBody(); - TreeNode* AddSuperClass(); - TreeNode* AddSuperInterface(); - TreeNode* BuildInstInit(); - - // Annotation related - TreeNode* BuildAnnotationType(); - TreeNode* BuildAnnotation(); - TreeNode* AddAnnotationTypeBody(); - - // Interface related - TreeNode* BuildInterface(); - TreeNode* AddInterfaceBody(); - - // Dimension Related - TreeNode* BuildDim(); - TreeNode* BuildDims(); - TreeNode* AddDims(); - TreeNode* AddDimsTo(); - - // Statements, Control Flow - TreeNode* BuildAssignment(); - TreeNode* BuildReturn(); - TreeNode* BuildCondBranch(); - TreeNode* AddCondBranchTrueStatement(); - TreeNode* AddCondBranchFalseStatement(); - TreeNode* AddLabel(); - TreeNode* BuildBreak(); - TreeNode* BuildForLoop(); - TreeNode* BuildWhileLoop(); - TreeNode* BuildDoLoop(); - TreeNode* BuildNewOperation(); - TreeNode* BuildDeleteOperation(); - TreeNode* BuildAssert(); - - SwitchCaseNode* SwitchLabelToCase(SwitchLabelNode*); - TreeNode* BuildSwitchLabel(); - TreeNode* BuildDefaultSwitchLabel(); - TreeNode* BuildOneCase(); - TreeNode* BuildAllCases(); - TreeNode* BuildSwitch(); - - // Exception, throw - TreeNode* BuildThrows(); - TreeNode* AddThrowsTo(); - - // User Type related - TreeNode* BuildUserType(); - TreeNode* AddTypeArgument(); }; -// A global builder is good enough. -extern ASTBuilder gASTBuilder; } #endif diff --git a/src/MapleFE/shared/include/ast_fixup.h b/src/MapleFE/shared/include/ast_fixup.h new file mode 100644 index 0000000000000000000000000000000000000000..af36113470ee6ddd9d5ab4115d554b5a2a1eaeea --- /dev/null +++ b/src/MapleFE/shared/include/ast_fixup.h @@ -0,0 +1,59 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __AST_FIXUP_H__ +#define __AST_FIXUP_H__ + +#include "ast_module.h" +#include "ast.h" +#include "gen_astvisitor.h" + +namespace maplefe { + +// FixUpVisitor is to fix up some tree nodes after the AST is created +class FixUpVisitor : public AstVisitor { + private: + ModuleNode *mASTModule; + bool mUpdated; + + public: + FixUpVisitor(ModuleNode *m) : mASTModule(m), mUpdated(false) {} + + bool FixUp(); + + // Fix up OprId of a UnaOperatorNode + // OPR_Add --> OPR_Plus + // OPR_Sub --> OPR_Minus + // OPR_Inc && !IsPost() --> OPR_PreInc + // OPR_Dec && !IsPost() --> OPR_DecInc + // + UnaOperatorNode *VisitUnaOperatorNode(UnaOperatorNode *node); + + // Fix up literal boolean 'true' or 'false' as a type + UserTypeNode *VisitUserTypeNode(UserTypeNode *node); + + // Fix up the name string of a UserTypeNode + // Fix up literal boolean 'true' or 'false' + IdentifierNode *VisitIdentifierNode(IdentifierNode *node); + + // Update mFilename of a ModuleNode with a connonical absolute path + ModuleNode *VisitModuleNode(ModuleNode *node); + + // Replace a PassNode with a CallNode for tagged template literal + PassNode *VisitPassNode(PassNode *node); +}; + +} +#endif diff --git a/src/MapleFE/shared/include/ast_mempool.h b/src/MapleFE/shared/include/ast_mempool.h index f9a5c32e4958b1067ffc5b8628a9069cfcff22ef..62db391d1624da5da6d9f04376d5b78268756b54 100644 --- a/src/MapleFE/shared/include/ast_mempool.h +++ b/src/MapleFE/shared/include/ast_mempool.h @@ -54,5 +54,6 @@ public: // is out of the control of mMP. }; +extern TreePool gTreePool; } #endif diff --git a/src/MapleFE/shared/include/ast_module.h b/src/MapleFE/shared/include/ast_module.h index bc4de35a93412d29ab3afe0697899e677052f15a..58e6b19e1a0289949de47e4911f4ccc071203648 100644 --- a/src/MapleFE/shared/include/ast_module.h +++ b/src/MapleFE/shared/include/ast_module.h @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -23,42 +23,74 @@ namespace maplefe { -class ASTTree; -class ASTScope; +enum SrcLang { + SrcLangUnknown, + SrcLangJava, + SrcLangJavaScript, + SrcLangTypeScript, + SrcLangC, +}; // The module is a member of class Parser. -class ASTModule { +class ModuleNode : public TreeNode { public: - const char *mFileName; + const char *mFilename; PackageNode *mPackage; - SmallVector mImports; public: - std::vector mTrees; // All trees in the module. There is no root tree - // which covers all the others. - // Everything else will be treated as a TreeNode, not a tree, - // even if it's a local class. - // Memory is released in ~ASTModule(); + SmallList mTrees; // All trees in the module. ASTScope *mRootScope; // the scope corresponding to a module. All other scopes // are children of mRootScope. ASTScopePool mScopePool; // All the scopes are store in this pool. It also contains - // a vector of ASTScope pointer for traversal. + // a vector of ASTScope pointer for traversal. + SrcLang mSrcLang; + + bool mIsAmbient; // In Typescript there is an ambient module containing + // only declarations. public: - ASTModule(); - ~ASTModule(); + ModuleNode(); + ~ModuleNode(); + + bool IsAmbient() {return mIsAmbient;} + void SetIsAmbient(bool b = true) {mIsAmbient = b;} + + void SetFilename(const char *f) {mFilename = f;} + const char *GetFilename() {return mFilename;} + + void SetPackage(PackageNode *p); + PackageNode *GetPackage() {return mPackage;}; - void SetFileName(const char *f) {mFileName = f;} - void SetPackage(PackageNode *p); - void AddImport(ImportNode *imp) {mImports.PushBack(imp);} + void SetSrcLang(SrcLang l); + SrcLang GetSrcLang(); + std::string GetSrcLangString(); - void AddTree(ASTTree* t) { mTrees.push_back(t); } + unsigned GetTreesNum() {return mTrees.GetNum();} + TreeNode* GetTree(unsigned i) {return mTrees.ValueAtIndex(i);} + void SetTree(unsigned i, TreeNode* t) {*(mTrees.RefAtIndex(i)) = t;} + void AddTree(TreeNode* t); + void AddTreeFront(TreeNode* t); + + void InsertAfter(TreeNode *new_stmt, TreeNode *exist_stmt) { + mTrees.LocateValue(exist_stmt); + mTrees.InsertAfter(new_stmt); + if(new_stmt) new_stmt->SetParent(this); + } + void InsertBefore(TreeNode *new_stmt, TreeNode *exist_stmt) { + mTrees.LocateValue(exist_stmt); + mTrees.InsertBefore(new_stmt); + if(new_stmt) new_stmt->SetParent(this); + } + + ASTScope* GetRootScope() {return mRootScope;} + void SetRootScope(ASTScope *s) {mRootScope = s;} + + ASTScopePool& GetScopePool() {return mScopePool;} + void SetScopePool(ASTScopePool &s) {mScopePool = s;} ASTScope* NewScope(ASTScope *p); + ASTScope* NewScope(ASTScope *p, TreeNode *t); - void Dump(); + void Dump(unsigned); }; -// Assume currently only one global module is being processed. -extern ASTModule gModule; - } #endif diff --git a/src/MapleFE/shared/include/ast_nk.def b/src/MapleFE/shared/include/ast_nk.def index 7ce9f893576a8bf8ce4965f593ab37b38c6d246c..091b6931090a7a6a88b7c0b802624032fbf30ddc 100644 --- a/src/MapleFE/shared/include/ast_nk.def +++ b/src/MapleFE/shared/include/ast_nk.def @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -16,35 +16,86 @@ // This file defines the AST Node Kind list. It is shared nodes among different languages. // Each language can have its own unique set of noke kind list, and be included too. +NODEKIND(Module) NODEKIND(Package) + +NODEKIND(XXportAsPair) // JS has: import {x as y}, and export {x as y} NODEKIND(Import) +NODEKIND(Export) +NODEKIND(Declare) // c/c++ extern decl, or typescript 'declare' +NODEKIND(Decl) NODEKIND(Identifier) NODEKIND(Field) NODEKIND(Dimension) NODEKIND(Attr) + +// A pair of +NODEKIND(NameTypePair) + +// Type related NODEKIND(PrimType) NODEKIND(PrimArrayType) +NODEKIND(ArrayType) NODEKIND(UserType) +NODEKIND(TypeParameter) +NODEKIND(AsType) +NODEKIND(TypeAlias) +NODEKIND(ConditionalType) // Conditional types in Typescript +NODEKIND(TupleType) // tuple types in Typescript +NODEKIND(FunctionType) +// NODEKIND(Cast) NODEKIND(Parenthesis) +// For de-structuring syntax, first in JS +NODEKIND(BindingElement) +NODEKIND(BindingPattern) + +// Struct +NODEKIND(Struct) +NODEKIND(StructLiteral) +NODEKIND(FieldLiteral) + +// define two spcial nodes for typescript index signature +NODEKIND(NumIndexSig) +NODEKIND(StrIndexSig) + +// Typescript, computed name +NODEKIND(ComputedName) + +// Array related +NODEKIND(ArrayElement) +NODEKIND(ArrayLiteral) + // VarList is for identifier list only. // ExprList is for all kinds of node. NODEKIND(VarList) NODEKIND(ExprList) +NODEKIND(TemplateLiteral) +NODEKIND(RegExpr) + NODEKIND(Literal) NODEKIND(UnaOperator) NODEKIND(BinOperator) NODEKIND(TerOperator) NODEKIND(Lambda) +NODEKIND(InstanceOf) +NODEKIND(TypeOf) +NODEKIND(KeyOf) +NODEKIND(In) +NODEKIND(Is) +NODEKIND(Infer) + +NODEKIND(TripleSlash) // TS triple-slash directive NODEKIND(Block) NODEKIND(Function) NODEKIND(Class) NODEKIND(Interface) +NODEKIND(Namespace) // First come from Typescript namespace. // Annotation, Pragma in c/c++, has no effects for execution, but has meanings // for compiler or runtime. @@ -55,13 +106,23 @@ NODEKIND(Interface) NODEKIND(AnnotationType) NODEKIND(Annotation) +NODEKIND(Try) +NODEKIND(Catch) +NODEKIND(Finally) NODEKIND(Exception) +// This is for the throw statement. Not the "exceptions thrown by functions". +NODEKIND(Throw) + +NODEKIND(Await) + // These are statement nodes, or control flow related nodes. They are // common in most languages. NODEKIND(Return) +NODEKIND(Yield) NODEKIND(CondBranch) NODEKIND(Break) +NODEKIND(Continue) NODEKIND(ForLoop) NODEKIND(WhileLoop) NODEKIND(DoLoop) diff --git a/src/MapleFE/shared/include/ast_scope.h b/src/MapleFE/shared/include/ast_scope.h index c74c2f2c52ac9a34e6a86602b6c8506411724dd1..088072c362f6c492203110f4d0689223a2281428 100644 --- a/src/MapleFE/shared/include/ast_scope.h +++ b/src/MapleFE/shared/include/ast_scope.h @@ -40,7 +40,7 @@ private: // tree with it. It actually related to the module. Local scope always // has a local TreeNode with it. TreeNode *mTree; - + SmallVector mChildren; // Local User types like local class,etc. @@ -51,33 +51,53 @@ private: // So TreeNode is the only choice for it. SmallVector mDecls; + // Imported Decles + SmallVector mImportedDecls; + + // Exported Decles + SmallVector mExportedDecls; + public: ASTScope() : mParent(NULL), mTree(NULL) {} - ASTScope(ASTScope *p); + ASTScope(ASTScope *p) : mTree(NULL) { SetParent(p); } + ASTScope(ASTScope *p, TreeNode *t) { SetParent(p); SetTree(t); } ~ASTScope() {Release();} // It's the caller's duty to make sure p is not NULL - void SetParent(ASTScope *p) {mParent = p; p->AddChild(this);} + void SetParent(ASTScope *p) {mParent = p; if(p) p->AddChild(this);} ASTScope* GetParent() {return mParent;} TreeNode* GetTree() {return mTree;} - void SetTree(TreeNode* t) {mTree = t;} + void SetTree(TreeNode* t) {mTree = t; if(t) t->SetScope(this);} void AddChild(ASTScope*); + unsigned GetChildrenNum() {return mChildren.GetNum();} unsigned GetDeclNum() {return mDecls.GetNum();} + unsigned GetImportedDeclNum() {return mImportedDecls.GetNum();} + unsigned GetExportedDeclNum() {return mExportedDecls.GetNum();} unsigned GetTypeNum() {return mTypes.GetNum();} + ASTScope* GetChild(unsigned i) {return mChildren.ValueAtIndex(i);} TreeNode* GetDecl(unsigned i) {return mDecls.ValueAtIndex(i);} + TreeNode* GetImportedDecl(unsigned i) {return mImportedDecls.ValueAtIndex(i);} + TreeNode* GetExportedDecl(unsigned i) {return mExportedDecls.ValueAtIndex(i);} TreeNode* GetType(unsigned i) {return mTypes.ValueAtIndex(i);} - TreeNode* FindDeclOf(IdentifierNode*); - TreeNode* FindTypeOf(IdentifierNode*); + TreeNode* FindDeclOf(unsigned stridx, bool deep = true); + TreeNode* FindExportedDeclOf(unsigned stridx); + TreeNode* FindTypeOf(unsigned stridx); void AddDecl(TreeNode *n) {mDecls.PushBack(n);} + void AddImportDecl(TreeNode *n) {mImportedDecls.PushBack(n);} + void AddExportDecl(TreeNode *n) {mExportedDecls.PushBack(n);} void AddType(TreeNode *n) {mTypes.PushBack(n);} void TryAddDecl(TreeNode *n); void TryAddType(TreeNode *n); + bool IsAncestor(ASTScope *ancestor); + + void Dump(unsigned indent = 0); + virtual void Release(); }; @@ -94,7 +114,7 @@ private: public: ASTScopePool() {} ~ASTScopePool(); - + ASTScope* NewScope(ASTScope *parent); }; diff --git a/src/MapleFE/shared/include/ast_type.h b/src/MapleFE/shared/include/ast_type.h index 66c0b296680a5e7ee38ac468a7684b630aa0c3b8..0f4afe5ee73d61dff5d91d629ab2d40b51d7d471 100644 --- a/src/MapleFE/shared/include/ast_type.h +++ b/src/MapleFE/shared/include/ast_type.h @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -36,21 +36,20 @@ // Right now, I just let each instance represent a separate type, and will // come back to this. // -// So we a TreeNode of the type identifier can be a type, and we don't have -// to give any special data struct for it. A user type is created as a treenode -// (IdentifierNode) at the beginning, but later we will do consolidation, -// and it may be turned into a function, struct, etc. So a TreeNode is good here. -// -// // We define 3 different types. // 1. UserType // It's an identifier which defines a class, interface, struct, etc. +// It also includes DimensionNode to tell if it's array. // 2. PrimType // This is coming from language's type keyword which are primitive types. // PrimType-s have limited number, and we pre-created AST nodes for them. // All same prim type nodes are pointing to the same one. // 3. PrimArrayType // This is a special case. e.g. int[] +// +// The reason we split primary types into PrimType and PrimArrayType is to +// share the same PrimType since they can be predefined in the Prim Pool. +// ////////////////////////////////////////////////////////////////////////// #ifndef __AST_TYPE_H__ @@ -65,32 +64,130 @@ namespace maplefe { /////////////////////////////////////////////////////////////////////////////// // UserTypeNode +// User type is complicated in Typescript. It could a union or intersection +// of other types. /////////////////////////////////////////////////////////////////////////////// +enum UT_Type { + UT_Regular, // the normal user type, it could be just a name. + UT_Union, // Union of two other types. + UT_Inter, // Intersection of other types. +}; + class UserTypeNode : public TreeNode { private: - IdentifierNode *mId; - SmallVector mTypeArguments; + // A regular UT always has an Id (or name), or lambda, etc. + // A union or intersection UT may or may not have an ID. + TreeNode *mId; + + UT_Type mType; + DimensionNode *mDims; + + // the set of types in union or intersection. + SmallVector mUnionInterTypes; + + // There are two scenarios type generic info are used. + // 1. It's a type argument + // 2. It's a type parameter. Type parameter may have default value. + SmallVector mTypeGenerics; + + SmallVector mAttrs; + public: - UserTypeNode() : mId(NULL) {mKind = NK_UserType;} - UserTypeNode(IdentifierNode *n) : mId(n) {mKind = NK_UserType;} + UserTypeNode(TreeNode *n) : TreeNode(NK_UserType), + mId(n), mType(UT_Regular), mDims(NULL) { SETPARENT(n); } + UserTypeNode() : UserTypeNode(NULL) {} ~UserTypeNode(){Release();} - IdentifierNode* GetId() {return mId;} - void SetId(IdentifierNode *n) {mId = n;} + TreeNode* GetId() {return mId;} + void SetId(TreeNode *n) {mId = n; SETPARENT(n);} - const char* GetName() {return mId->GetName();} + unsigned GetUnionInterTypesNum() {return mUnionInterTypes.GetNum();} + void AddUnionInterType(TreeNode *n, bool front = false); + TreeNode* GetUnionInterType(unsigned i) {return mUnionInterTypes.ValueAtIndex(i);} + void SetUnionInterType(unsigned i, TreeNode* n) {*(mUnionInterTypes.RefAtIndex(i)) = n; SETPARENT(n);} - unsigned TypeArgsNum() {return mTypeArguments.GetNum();} - void AddTypeArg(IdentifierNode *n) {mTypeArguments.PushBack(n);} - void AddTypeArgs(TreeNode *n); + unsigned GetTypeGenericsNum() {return mTypeGenerics.GetNum();} + void AddTypeGeneric(TreeNode *n); + TreeNode* GetTypeGeneric(unsigned i) {return mTypeGenerics.ValueAtIndex(i);} + void SetTypeGeneric(unsigned i, TreeNode* n) {*(mTypeGenerics.RefAtIndex(i)) = n; SETPARENT(n);} + + UT_Type GetType() {return mType;} + void SetType(UT_Type t) {mType = t;} + + DimensionNode* GetDims() {return mDims;} + void SetDims(DimensionNode *d) {mDims = d;} + + unsigned GetDimsNum() {return mDims->GetDimensionsNum();} + bool IsArray() {return mDims && GetDimsNum() > 0;} + void AddDim(unsigned i = 0){mDims->AddDimension(i);} // 0 means unspecified + unsigned GetNthNum(unsigned n) {return mDims->GetDimension(n);} // 0 means unspecified. + void SetNthNum(unsigned n, unsigned i) {mDims->SetDimension(n, i);} + + // Attributes related + unsigned GetAttrsNum() const {return mAttrs.GetNum();} + void AddAttr(AttrId a) {mAttrs.PushBack(a);} + AttrId GetAttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetAttrAtIndex(unsigned i, AttrId n) {*(mAttrs.RefAtIndex(i)) = n;} bool TypeEquivalent(UserTypeNode *); - void Release() {mTypeArguments.Release();} + void Release() {mTypeGenerics.Release(); mUnionInterTypes.Release();} void Dump(unsigned); }; +/////////////////////////////////////////////////////////////////////////////// +// ArrayTypeNode +// It is used to specify Array types, including element type and dimensions +/////////////////////////////////////////////////////////////////////////////// + +class ArrayTypeNode : public TreeNode { +private: + TreeNode *mElemType; + DimensionNode *mDims; + SmallVector mAttrs; + +public: + ArrayTypeNode() : TreeNode(NK_ArrayType), mElemType(NULL), mDims(NULL) {} + ~ArrayTypeNode(){} + + void SetElemType(TreeNode *n) {mElemType = n; SETPARENT(n);} + void SetDims(DimensionNode *d) {mDims = d; SETPARENT(d);} + TreeNode* GetElemType() {return mElemType;} + DimensionNode* GetDims() {return mDims;} + + // Attributes related + unsigned GetAttrsNum() const {return mAttrs.GetNum();} + void AddAttr(AttrId a) {mAttrs.PushBack(a);} + AttrId GetAttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetAttrAtIndex(unsigned i, AttrId n) {*(mAttrs.RefAtIndex(i)) = n;} + + void Dump(unsigned); +}; + +/////////////////////////////////////////////////////////////////////////////// +// FunctionTypeNode +// It is used to specify function types with its parameters and return type +/////////////////////////////////////////////////////////////////////////////// +class FunctionTypeNode : public TreeNode { +private: + SmallVector mParams; // type index of formal parameters + // and return which is the last one + +public: + FunctionTypeNode() : TreeNode(NK_FunctionType) {} + ~FunctionTypeNode(){} + + unsigned GetParamsNum() {return mParams.GetNum();} + unsigned GetParam(unsigned i) {return mParams.ValueAtIndex(i);} + void SetParam(unsigned i, unsigned n) {*(mParams.RefAtIndex(i)) = n;} + void AddParam(unsigned i) {mParams.PushBack(i);} + void ClearParam() {mParams.Clear();} + + bool IsEqual(FunctionTypeNode *f); + void Dump(unsigned); +}; + /////////////////////////////////////////////////////////////////////////////// // PrimTypeNode & PrimTypePool // The size of PrimTypeNode is fixed, so it's good to use container for the storage. @@ -100,13 +197,18 @@ public: class PrimTypeNode : public TreeNode { private: TypeId mPrimType; // primitive type + bool mIsUnique; // This is specifically for TS "unique symbol". TS creates many + // opaque syntax. public: - PrimTypeNode() {mKind = NK_PrimType;} + PrimTypeNode() : TreeNode(NK_PrimType), mIsUnique(false) {} ~PrimTypeNode(){} + bool IsUnique() {return mIsUnique;} + void SetIsUnique(bool b = true) {mIsUnique = b;} + TypeId GetPrimType() {return mPrimType;} void SetPrimType(TypeId id) {mPrimType = id; } - const char* GetName(); // type name + const char* GetTypeName(); // type name void Dump(unsigned); }; @@ -115,29 +217,34 @@ class PrimArrayTypeNode : public TreeNode { private: PrimTypeNode *mPrim; DimensionNode *mDims; + SmallVector mAttrs; + public: - PrimArrayTypeNode() : mPrim(NULL), mDims(NULL) {mKind = NK_PrimArrayType;} + PrimArrayTypeNode() : TreeNode(NK_PrimArrayType), mPrim(NULL), mDims(NULL) {} ~PrimArrayTypeNode(){} - void SetPrim(PrimTypeNode *p) {mPrim = p;} - void SetDims(DimensionNode *d) {mDims = d;} + void SetPrim(PrimTypeNode *p) {mPrim = p; SETPARENT(p);} + void SetDims(DimensionNode *d) {mDims = d; SETPARENT(d);} PrimTypeNode* GetPrim() {return mPrim;} DimensionNode* GetDims(){return mDims;} + // Attributes related + unsigned GetAttrsNum() const {return mAttrs.GetNum();} + void AddAttr(AttrId a) {mAttrs.PushBack(a);} + AttrId GetAttrAtIndex(unsigned i) {return mAttrs.ValueAtIndex(i);} + void SetAttrAtIndex(unsigned i, AttrId n) {*(mAttrs.RefAtIndex(i)) = n;} + void Dump(unsigned); }; class PrimTypePool { private: - TreePool mTreePool; SmallVector mTypes; - - void Init(); - public: PrimTypePool(); ~PrimTypePool(); + void Init(); PrimTypeNode* FindType(const char *keyword); PrimTypeNode* FindType(TypeId id); }; diff --git a/src/MapleFE/shared/include/comment.h b/src/MapleFE/shared/include/comment.h index a468e1050557464db7c7f6ff1b223ec4c363daf2..d0ee240ad04a1104eb8a441438c1cefe1f28fc29 100644 --- a/src/MapleFE/shared/include/comment.h +++ b/src/MapleFE/shared/include/comment.h @@ -21,7 +21,7 @@ // |-->literals // |-->separators // |-->operators -// +// // This categorization is shared among all languages. [NOTE] If anything // in a new language is exceptional, please add to this. // @@ -36,7 +36,7 @@ namespace maplefe { -typedef enum { +typedef enum COMM_Type { COMM_EOL, //End of Line, // COMM_TRA //Traditional, /* ... */ }COMM_Type; @@ -46,7 +46,7 @@ private: COMM_Type CommType; public: Comment(COMM_Type ct) : CommType(ct) {EType = ET_CM;} - + bool IsEndOfLine() {return CommType == COMM_EOL;} bool IsTraditional() {return CommType == COMM_TRA;} }; diff --git a/src/MapleFE/shared/include/common_header_autogen.h b/src/MapleFE/shared/include/common_header_autogen.h index febf115539c59beeaf07f53a8b18f414cf3e1a5e..221b853d73d383f0147b0ce9c8a90358a84e1f8f 100644 --- a/src/MapleFE/shared/include/common_header_autogen.h +++ b/src/MapleFE/shared/include/common_header_autogen.h @@ -27,12 +27,9 @@ #include "gen_literal.h" #include "gen_iden.h" #include "gen_type.h" -#include "gen_expr.h" #include "gen_stmt.h" -#include "gen_block.h" #include "gen_separator.h" #include "gen_operator.h" #include "gen_keyword.h" -#include "gen_summary.h" #endif diff --git a/src/MapleFE/shared/include/container.h b/src/MapleFE/shared/include/container.h index 4859a7af047fd098ba2fcae063dde1fc64e1e159..828a8379e00034339b1eeca24fbc55045b9a79f9 100644 --- a/src/MapleFE/shared/include/container.h +++ b/src/MapleFE/shared/include/container.h @@ -1,5 +1,6 @@ /* * Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) 2022 Tencent. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -32,8 +33,11 @@ #ifndef __CONTAINER_H__ #define __CONTAINER_H__ +#include +#include #include "mempool.h" #include "massert.h" +#include "macros.h" namespace maplefe { @@ -45,9 +49,13 @@ namespace maplefe { class ContainerMemPool : public MemPool { public: unsigned mElemSize; + unsigned mElemNumPerBlock; public: + ContainerMemPool() : mElemSize(1) {} + + void SetBlockSize(unsigned i) {mBlockSize = i; mElemNumPerBlock = mBlockSize / mElemSize;} char* AddrOfIndex(unsigned index); - void SetElemSize(unsigned i) {mElemSize = i;} + void SetElemSize(unsigned i) {mElemSize = i; mElemNumPerBlock = mBlockSize/mElemSize;} char* AllocElem() {return Alloc(mElemSize);} }; @@ -77,7 +85,10 @@ public: void PushBack(T t) { char *addr = mMemPool.AllocElem(); - *(T*)addr = t; + if(std::is_class::value) + new (addr) T(t); + else + *(T*)addr = t; mNum++; } @@ -107,7 +118,10 @@ public: // It's the caller's duty to make sure ith element is valid. void SetElem(unsigned i, T t) { char *addr = mMemPool.AddrOfIndex(i); - *(T*)addr = t; + if(std::is_class::value) + new (addr) T(t); + else + *(T*)addr = t; } bool Find(T t) { @@ -472,7 +486,7 @@ private: // Add more elements, on the knob. // This is common scenario. To implement, it requires a temporary // pointer to the located knob. This temp knob is used ONLY when - // paired operations, PairedFindOrCreateKnob() and PairedAddElem() + // paired operations, PairedFindOrCreateKnob() and PairedAddElem() Knob *mTempKnob; private: @@ -765,6 +779,338 @@ public: } }; +//////////////////////////////////////////////////////////////////////// +// GuamianFast uses unordered map to store the Knob-s in order to +// speed up the searching with big number of data. +//////////////////////////////////////////////////////////////////////// + +template class GuamianFast { +private: + struct Elem{ + E mData; + Elem *mNext; + }; + + // Sometimes people need save certain additional information to + // each knob. So we define mData. + struct Knob{ + D mData; + Elem *mChildren; // pointing to the first child + }; + + MemPool mMemPool; + std::unordered_map mKnobs; + + // allocate a new knob + Knob* NewKnob() { + Knob *knob = (Knob*)mMemPool.Alloc(sizeof(Knob)); + knob->mData = 0; + knob->mChildren = NULL; + return knob; + } + + // allocate a new element + Elem* NewElem() { + Elem *elem = (Elem*)mMemPool.Alloc(sizeof(Elem)); + elem->mNext = NULL; + elem->mData = 0; + return elem; + } + + // Sometimes people want to have a sequence of operations like, + // Get the knob, + // Add one element, on the knob + // Add more elements, on the knob. + // This is common scenario. To implement, it requires a temporary + // pointer to the located knob. This temp knob is used ONLY when + // paired operations, PairedFindOrCreateKnob() and PairedAddElem() + struct { + Knob *mKnob; + K mKey; + }mTempKnob; + +private: + // Just try to find the Knob. + // return NULL if fails. + Knob* FindKnob(K key) { + Knob *result = NULL; + auto search = mKnobs.find(key); + if (search != mKnobs.end()) + result = search->second; + return result; + } + + // Try to find the Knob. Create one if failed. + Knob* FindOrCreateKnob(K key) { + Knob *knob = FindKnob(key); + if (!knob) { + knob = NewKnob(); + mKnobs.insert(std::make_pair(key, knob)); + } + return knob; + } + + // Add an element to knob. It's the caller's duty to assure + // knob is not NULL. + void AddElem(Knob *knob, E data) { + Elem *elem = knob->mChildren; + Elem *found = NULL; + while (elem) { + if (elem->mData == data) { + found = elem; + break; + } + elem = elem->mNext; + } + + if (!found) { + Elem *e = NewElem(); + e->mData = data; + e->mNext = knob->mChildren; + knob->mChildren = e; + } + } + + // return true : if find the element + // false : if fail + bool FindElem(Knob *knob, E data) { + Elem *elem = knob->mChildren; + while (elem) { + if (elem->mData == data) + return true; + elem = elem->mNext; + } + return false; + } + + // Remove elem from the list. If elem doesn't exist, exit quietly. + // It's caller's duty to assure elem exists. + void RemoveElem(Knob *knob, E data) { + Elem *elem = knob->mChildren; + Elem *elem_prev = NULL; + Elem *target = NULL; + while (elem) { + if (elem->mData == data) { + target = elem; + break; + } + elem_prev = elem; + elem = elem->mNext; + } + + if (target) { + if (target == knob->mChildren) + knob->mChildren = target->mNext; + else + elem_prev->mNext = target->mNext; + } + } + + // Move the element to be the first child of knob. + // It's the caller's duty to make sure 'data' does exist + // in knob's children. + void MoveElemToHead(Knob *knob, E data) { + Elem *target_elem = NULL; + Elem *elem = knob->mChildren; + Elem *elem_prev = NULL; + while (elem) { + if (elem->mData == data) { + target_elem = elem; + break; + } + elem_prev = elem; + elem = elem->mNext; + } + + if (target_elem && (target_elem != knob->mChildren)) { + elem_prev->mNext = target_elem->mNext; + target_elem->mNext = knob->mChildren; + knob->mChildren = target_elem; + } + } + + // Try to find the first child of Knob k. Return the data. + // found is set to false if fails, or true. + // [NOTE] It's the user's responsibilty to make sure the Knob + // of 'key' exists. + E FindFirstElem(Knob *knob, bool &found) { + Elem *e = knob->mChildren; + if (!e) { + found = false; + return 0; + } + found = true; + return e->mData; + } + + // return num of elements in knob. + // It's caller's duty to assure knob is not NULL. + unsigned NumOfElem(Knob *knob) { + Elem *e = knob->mChildren; + unsigned c = 0; + while(e) { + c++; + e = e->mNext; + } + return c; + } + + // Return the idx-th element in knob. + // It's caller's duty to assure the validity of return value. + // It doesn't check validity here. + // Index starts from 0. + E GetElemAtIndex(Knob *knob, unsigned idx) { + Elem *e = knob->mChildren; + unsigned c = 0; + E data; + while(e) { + if (c == idx) { + data = e->mData; + break; + } + c++; + e = e->mNext; + } + return data; + } + +public: + GuamianFast() {mTempKnob.mKnob = NULL;} + ~GuamianFast(){Release();} + + void AddElem(K key, E data) { + Knob *knob = FindOrCreateKnob(key); + AddElem(knob, data); + } + + // If 'data' doesn't exist, it ends quietly + void RemoveElem(K key, E data) { + Knob *knob = FindOrCreateKnob(key); + RemoveElem(knob, data); + } + + // Try to find the first child of Knob k. Return the data. + // found is set to false if fails, or true. + // [NOTE] It's the user's responsibilty to make sure the Knob + // of 'key' exists. + E FindFirstElem(K key, bool &found) { + Knob *knob = FindKnob(key); + if (!knob) { + found = false; + return 0; // return value doesn't matter when fails. + } + E data = FindFirstElem(knob, found); + return data; + } + + // return true : if find the element + // false : if fail + bool FindElem(K key, E data) { + Knob *knob = FindKnob(key); + if (!knob) + return false; + return FindElem(knob, data); + } + + // Move element to be the header + // If 'data' doesn't exist, it ends quietly. + void MoveElemToHead(K key, E data) { + Knob *knob = FindKnob(key); + if (!knob) + return; + MoveElemToHead(knob, data); + } + + ///////////////////////////////////////////////////////// + // Paired operations start with finding a knob. It can + // be either PairedFindKnob() or PairedFindOrCreateKnob() + // Following that, there could be any number of operations + // like searching, adding, moving an element. + ///////////////////////////////////////////////////////// + + void PairedFindOrCreateKnob(K key) { + mTempKnob.mKnob = FindOrCreateKnob(key); + mTempKnob.mKey = key; + } + + bool PairedFindKnob(K key) { + mTempKnob.mKnob = FindKnob(key); + mTempKnob.mKey = key; + if (mTempKnob.mKnob) + return true; + else + return false; + } + + void PairedAddElem(E data) { + AddElem(mTempKnob.mKnob, data); + } + + // If 'data' doesn't exist, it ends quietly + void PairedRemoveElem(E data) { + RemoveElem(mTempKnob.mKnob, data); + } + + bool PairedFindElem(E data) { + return FindElem(mTempKnob.mKnob, data); + } + + // If 'data' doesn't exist, it ends quietly. + void PairedMoveElemToHead(E data) { + MoveElemToHead(mTempKnob.mKnob, data); + } + + E PairedFindFirstElem(bool &found) { + return FindFirstElem(mTempKnob.mKnob, found); + } + + // return num of elements in current temp knob. + // It's caller's duty to assure knob is not NULL. + unsigned PairedNumOfElem() { + return NumOfElem(mTempKnob.mKnob); + } + + // Return the idx-th element in knob. + // It's caller's duty to assure the validity of return value. + // It doesn't check validity here. + // Index starts from 0. + E PairedGetElemAtIndex(unsigned idx) { + return GetElemAtIndex(mTempKnob.mKnob, idx); + } + + // Reduce the element at index exc_idx. + // It's caller's duty to assure the element exists. + void PairedReduceElems(unsigned exc_idx) { + ReduceElems(mTempKnob.mKnob, exc_idx); + } + + void PairedSetKnobData(D d) { + mTempKnob.mKnob->mData = d; + } + + D PairedGetKnobData() { + return mTempKnob.mKnob->mData; + } + + K PairedGetKnobKey() { + return mTempKnob.mKey; + } + + ///////////////////////////////////////////////////////// + // Other functions + ///////////////////////////////////////////////////////// + + void Clear(){ + mTempKnob.mKnob = NULL; + mKnobs.clear(); + mMemPool.Clear(); + } + + void Release(){ + mMemPool.Release(); + } +}; + ////////////////////////////////////////////////////////////////////////////////////// // Tree // This is a regular tree. It simply maintains the basic operations of a tree, like @@ -779,7 +1125,7 @@ public: // We first give each node certain children. If it needs more, an allocation from // memory pool is needed. -#define TREE_MAX_CHILDREN_NUM 12 +#define TREE_MAX_CHILDREN_NUM 20 #define TREE_CHILDREN_NUM 4 template class ContTreeNode { @@ -916,5 +1262,29 @@ public: } }; +///////////////////////////////////////////////////////////////////////////// +// Bit Vector +///////////////////////////////////////////////////////////////////////////// + +class BitVector : public MemPool { +private: + unsigned mBVSize; // number of bits + char* GetAddr(unsigned); // return the address of bit +public: + BitVector(); + BitVector(unsigned); + ~BitVector(){} + + void ClearAll() {WipeOff();} + void ClearBit(unsigned); + void SetBit(unsigned); + bool GetBit(unsigned); + void SetBVSize(unsigned i) {mBVSize = i;} + unsigned GetBVSize() {return mBVSize;} + bool Equal(BitVector *bv); + void And(BitVector *bv); + void Or(BitVector *bv); +}; + } #endif diff --git a/src/MapleFE/shared/include/diagnose.h b/src/MapleFE/shared/include/diagnose.h index a554325c11e705e317739040ebb29f84c6b4318b..5f769bc9621a18e37320e1e49cb27b8d0831d635 100644 --- a/src/MapleFE/shared/include/diagnose.h +++ b/src/MapleFE/shared/include/diagnose.h @@ -31,7 +31,7 @@ public: unsigned Col; char Msg[256]; public: - DiagMessage(const char*, unsigned, unsigned, const char*); + DiagMessage(const char*, unsigned, unsigned, const char*); } // We save OBJECT not POINTER of DiagMessage in Diagnose. diff --git a/src/MapleFE/shared/include/element.h b/src/MapleFE/shared/include/element.h index be10ef628f11f42fa58b495c8891261e60ca1cde..27b68a46e0c4cb3da2cc168de1c33498bdad356f 100644 --- a/src/MapleFE/shared/include/element.h +++ b/src/MapleFE/shared/include/element.h @@ -33,7 +33,7 @@ namespace maplefe { -typedef enum { +typedef enum ELMT_Type { ET_WS, // White Space ET_CM, // Comment ET_TK, // Token diff --git a/src/MapleFE/shared/include/fileread.h b/src/MapleFE/shared/include/fileread.h index dd106348fd3d9e18a5952bdb41f04dd87c449694..e71d2ba6cadf296303e9c9d1b0098be096f77c42 100644 --- a/src/MapleFE/shared/include/fileread.h +++ b/src/MapleFE/shared/include/fileread.h @@ -49,7 +49,7 @@ public: void MoveCursor(int i) {mCurChar += i; mPos += i;} // i can be <0. void MoveToEndOfLine() {mPos = mCurLine.size();} - bool MoveUntil(const char*); + bool MoveUntil(const char*); bool Good() { return mDefFile.good(); } bool EndOfLine() {return mPos == mCurLine.size();} diff --git a/src/MapleFE/shared/include/lexer.h b/src/MapleFE/shared/include/lexer.h index 4dfd0e1abc052b006f5856f9ad3e247c8614812d..00a3f708629d9e8e8d347b56e6e942b241c768fb 100644 --- a/src/MapleFE/shared/include/lexer.h +++ b/src/MapleFE/shared/include/lexer.h @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -46,6 +46,7 @@ public: TokenPool mTokenPool; unsigned mPredefinedTokenNum; // number of predefined tokens. bool mTrace; + bool mLineMode; // Lex just one line public: FILE *srcfile; @@ -54,6 +55,7 @@ public: ssize_t current_line_size; uint32_t curidx; uint32_t _linenum; + uint32_t _total_linenum; // total line number of the file bool endoffile; int ReadALine(); // read a line from def file. @@ -73,7 +75,11 @@ public: } } - void SetTrace() {mTrace = true;} + void SetTrace() {mTrace = true;} + bool GetTrace() {return mTrace;} + + void SetLineMode() {mLineMode = true;} + void ResetLineMode(){mLineMode = false;} bool EndOfLine() { return curidx == current_line_size; } bool EndOfFile() { return endoffile; } @@ -81,7 +87,7 @@ public: void ResetPos(); void PrepareForFile(const std::string filename); - void PrepareForString(const std::string &src); + void PrepareForString(const char *); int GetCuridx() const { return curidx; } void SetCuridx(int i) { curidx = i; } @@ -98,6 +104,11 @@ public: friend class Parser; + // These two functions are both due to weird literal or template literal + // in script language which allows \n directly in their literals. + void ClearLeadingNewLine(); // clear the leading \n in line. + void AddEndingNewLine(); // add the ending \n in a line. + // These are for autogen table testing Token* LexToken(); // always return token until end of file. Token* LexTokenNoNewLine(); // try to get token untile end of line. @@ -108,24 +119,29 @@ public: // They won't move 'curidx' if target is not hit. /////////////////////////////////////////////////////////////////////////////////// - SepId GetSeparator(); - OprId GetOperator(); - LitData GetLiteral(); - const char* GetKeyword(); - const char* GetIdentifier(); - bool GetComment(); + SepId GetSeparator(); + OprId GetOperator(); + LitData GetLiteral(); + const char* GetKeyword(); + const char* GetIdentifier(); + bool GetComment(); + + Token* FindRegExprToken(); + + // For most languages, this does nothing. TS/JS are doing something. + virtual bool CharIsSeparator(const char c) {return false;} + + virtual TempLitData* GetTempLit() {return NULL;} + virtual bool FindNextTLFormat(unsigned start, std::string& s, unsigned& end) {return false;} + virtual bool FindNextTLPlaceHolder(unsigned start, std::string& s, unsigned& end) {return false;} + + virtual bool FindTripleSlash() {return false;} // replace keyword/opr/sep... with tokens //void PlantTokens(); //void PlantTraverseRuleTable(RuleTable*); //void PlantTraverseTableData(TableData*); - // - Token* FindSeparatorToken(SepId id); - Token* FindOperatorToken(OprId id); - Token* FindKeywordToken(const char *key); - Token* FindCommentToken(); - // When we start walk a rule table to find a token, do we need check if // the following data is a separator? bool mCheckSeparator; diff --git a/src/MapleFE/shared/include/log.h b/src/MapleFE/shared/include/log.h index 9e5cb2f8aab714adfd4c7a53f94111f2950a55b3..1130cfc96091d4d06010da5dbb6f10fcf70ba8a7 100644 --- a/src/MapleFE/shared/include/log.h +++ b/src/MapleFE/shared/include/log.h @@ -13,7 +13,7 @@ * See the Mulan PSL v2 for more details. */ ///////////////////////////////////////////////////////////////////// -// Log file +// Log file // ///////////////////////////////////////////////////////////////////// @@ -34,7 +34,7 @@ private: std::ofstream mFile; public: - typedef enum { + typedef enum Level { LOG_NONE, LOG_DEBUG, LOG_INFO, @@ -42,10 +42,10 @@ public: LOG_ERROR, LOG_FATAL }Level; - + Log() {mFile.open("log");} ~Log(){mFile.close();} - + Log& operator<<(float f); Log& operator<<(int i); diff --git a/src/MapleFE/shared/include/mempool.h b/src/MapleFE/shared/include/mempool.h index c7a85ff0de8f4988468fb3645053cb7eb6119680..3112dafdc3b22f7882cee0daa02d74ca10f25cff 100644 --- a/src/MapleFE/shared/include/mempool.h +++ b/src/MapleFE/shared/include/mempool.h @@ -50,9 +50,6 @@ struct Block { Block *prev; // prev block }; -// So far there is nothing like free list. Everything will be released when -// StaticMemPool is destructed. -// class MemPool{ protected: Block *mCurrBlock; // Currently available block @@ -67,7 +64,8 @@ public: char* Alloc(unsigned); void Release(unsigned i); // release the last occupied i bytes. - void Clear(); // remove all data, but keep memory. + void WipeOff(int c = 0); // wipe off all data with c + void Clear(); // free up all block, dont wipe data, Keep memory. void Release(); // Allow users to free memory explicitly. }; diff --git a/src/MapleFE/shared/include/parser.h b/src/MapleFE/shared/include/parser.h index da040f8e7478857dc490779cc8051c133af7f8f2..5b4a1072bc48fd4bce6da0d79d0ffe37756fbfdb 100644 --- a/src/MapleFE/shared/include/parser.h +++ b/src/MapleFE/shared/include/parser.h @@ -1,7 +1,8 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. +* Copyright 2022 Tencent. All rights reverved. * -* OpenArkFE is licensed under the Mulan PSL v2. +* MapleFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. * You may obtain a copy of Mulan PSL v2 at: * @@ -23,10 +24,12 @@ #include "lexer.h" #include "ast_module.h" +#include "ast_builder.h" #include "container.h" #include "recursion.h" #include "succ_match.h" -#include "gen_summary.h" +#include "rule_summary.h" +#include "appnode_pool.h" namespace maplefe { @@ -38,14 +41,17 @@ class TableData; class ASTTree; class TreeNode; -typedef enum { +typedef enum AppealStatus { FailWasFailed, FailNotRightToken, + FailNotRightString, FailNotIdentifier, FailNotLiteral, + FailNotRegExpr, FailChildrenFailed, Fail2ndOf1st, FailLookAhead, + FailASI, // Succ : Really does the matching, will be saved in SuccMatch // SuccWasSucc : Was matched, not tried traversal for a second timewill, @@ -54,13 +60,21 @@ typedef enum { // in RecurionNodes where it does multiple instances of // traversal. But it doesn't make any change compared // to the last real Succ. It will NOT be saved in SuccMatch + // SuccASI: TS/JS auto-semicolon-insert Succ, SuccWasSucc, SuccStillWasSucc, + SuccASI, AppealStatus_NA }AppealStatus; +typedef enum { + ParseSucc, + ParseFail, + ParseEOF +} ParseStatus; + // As in Left Recursion scenario, a rule can have multiple matches on a start token. // Each AppealNode represents an instance in the recursion, and it matches different // number of tokens. However, truth is the parent nodes matches more than children @@ -68,14 +82,21 @@ typedef enum { class AppealNode{ private: - // In theory a tree shouldn't merge. But we do allow merge in the recursion - // parsing. mParent is the first level parent. mSecondParents are second level, and - // they are used during manipulation at certain phases like connecting instances - // of recursion. However, after SortOut, only mParent is valid. AppealNode *mParent; - SmallVector mSecondParents; + + // We do allow the tree to merge in the recursion parsing. + // they are used during manipulation at certain phases like connecting instances + // of recursion, if a recursion have multiple cycles with one same leading node. + // The leading node will be connected multiple times. See ConnectPrevious(). + // + // However, in sort out, we traverse from parent to child, and these 'secondary' + // parents are never used. So, I decided not to have dedicated data structure to + // record these 'second' parents. + // + // SmallVector mSecondParents; unsigned mStartIndex; // index of start matching token + unsigned mChildIndex; // index as a child in the parent rule table. bool mSorted; // already sorted out? bool mIsPseudo; // A pseudo node, mainly used for sub trees connection // It has no real program meaning, but can be used @@ -92,15 +113,14 @@ private: TreeNode *mAstTreeNode; // The AST tree node of this AppealNode. public: - unsigned GetSecondParentsNum() {return mSecondParents.GetNum();} - AppealNode* GetSecondParent(unsigned i) {return mSecondParents.ValueAtIndex(i);} - void ClearSecondParents() {mSecondParents.Clear();} AppealNode* GetParent() {return mParent;} void SetParent(AppealNode *n) {mParent = n;} void AddParent(AppealNode *n); - unsigned GetStartIndex() {return mStartIndex;} - void SetStartIndex(unsigned i){mStartIndex = i;} + unsigned GetStartIndex() {return mStartIndex;} + void SetStartIndex(unsigned i){mStartIndex = i;} + unsigned GetChildIndex() {return mChildIndex;} + void SetChildIndex(unsigned i){mChildIndex = i;} bool IsPseudo() {return mIsPseudo;} void SetIsPseudo(){mIsPseudo = true;} @@ -117,6 +137,7 @@ public: unsigned GetMatchNum() {return mMatches.GetNum();} unsigned GetMatch(unsigned i) {return mMatches.ValueAtIndex(i);} + void ClearMatch() {mMatches.Clear();} void AddMatch(unsigned i); unsigned LongestMatch(); // find the longest match. bool FindMatch(unsigned m); // if 'm' exists? @@ -125,12 +146,6 @@ public: public: bool mIsTable; // A AppealNode could relate to either rule table or token. - unsigned int mSimplifiedIndex; // After SimplifyShrinkEdges, a node could be moved to - // connect to a new 'parent' node, replacing its ancestor. - // To make AST building work, it needs to inherit ancestor's - // index in the rule table. - -public: union { RuleTable *mTable; Token *mToken; @@ -143,7 +158,7 @@ public: Token *mAltToken; // The alt token it matches. - std::vector mChildren; + SmallVector mChildren; // I use an additional vector for the sorted out children. Why do we have two duplicated // children vectors? The reason is coming from sortout. After SortOut we need remove some @@ -154,28 +169,30 @@ public: // During AST tree generation, for the SuccWasSucc child we need find the original matching // tree. That means the original mChildren vector needs to be traversed to locate that tree. // So we keep mChildren untouched and define a second vector for the SortOut-ed children. - std::vector mSortedChildren; + SmallVector mSortedChildren; // A Succ mResult doesn't mean 'really' matching tokens. e.g. Zeroorxxx rules could // match nothing, but it is succ. AppealStatus mResult; AppealNode() {mData.mTable=NULL; mParent = NULL; - mResult = AppealStatus_NA; mSimplifiedIndex = 0; mIsTable = true; + mResult = AppealStatus_NA; mIsTable = true; mStartIndex = 0; mSorted = false; mFinalMatch = 0; m1stAltTokenMatched = false; mAltToken = NULL; - mIsPseudo = false; mAstTreeNode = NULL; mAstCreated = false;} - ~AppealNode(){mMatches.Release();} + mIsPseudo = false; mAstTreeNode = NULL; mAstCreated = false; + mChildIndex = 0; + // These two don't need big memory. So set block size to 128. + mChildren.SetBlockSize(128); mSortedChildren.SetBlockSize(128); } + ~AppealNode() {Release();} + void Release(){mMatches.Release(); mChildren.Release(); mSortedChildren.Release();} - void AddChild(AppealNode *n) { mChildren.push_back(n); } - void RemoveChild(AppealNode *n); - void ClearChildren() { mChildren.clear(); } + void AddChild(AppealNode *n) { mChildren.PushBack(n); } + void ClearChildren() { mChildren.Clear(); } void ReplaceSortedChild(AppealNode *existing, AppealNode *replacement); - void AddSortedChild(AppealNode *n) { mSortedChildren.push_back(n); } - bool GetSortedChildIndex(AppealNode*, unsigned &); - AppealNode* GetSortedChildByIndex(unsigned idx); - AppealNode* FindSpecChild(TableData *tdata, unsigned match); + void AddSortedChild(AppealNode *n) { mSortedChildren.PushBack(n); } + AppealNode* GetSortedChild(unsigned idx); + AppealNode* FindIndexedChild(unsigned match, unsigned index); bool IsSucc() { return (mResult == Succ) || (mResult == SuccWasSucc) || @@ -195,11 +212,6 @@ public: void SetToken(Token *t) { mIsTable = false; mData.mToken = t; } RuleTable* GetTable() { return mData.mTable; } Token* GetToken() { return mData.mToken; } - - bool SuccEqualTo(AppealNode*); - - // If 'this' is a descendant of 'p'. - bool DescendantOf(AppealNode *p); }; class RecursionTraversal; @@ -215,14 +227,22 @@ struct RecStackEntry { } }; +//////////////////////////////////////////////////////////////////////////// +// +//////////////////////////////////////////////////////////////////////////// + class Parser { -private: +protected: friend class RecursionTraversal; // Matching on alternative tokens needs a state machine. - bool mInAltTokensMatching; // once it's true, mCurToken is frozen. - unsigned mNextAltTokenIndex; // index of next alt token to be matched. - unsigned mATMToken; // the current input token being processed. + bool mInAltTokensMatching; // once it's true, mCurToken is frozen. + unsigned mNextAltTokenIndex; // index of next alt token to be matched. + unsigned mATMToken; // the current input token being processed. + ModuleNode *mASTModule; // the AST Module + ASTBuilder *mASTBuilder; // the AST Builder + + AppealNodePool mAppealNodePool; public: Lexer *mLexer; @@ -242,16 +262,24 @@ public: bool mTracePatchWasSucc; // trace patching was succ node. bool mTraceWarning; // print the warning. + TreeNode *mNormalModeRoot;// For NormalMode, the root node after BuildAST. + + TreeNode *mLineModeRoot; // For LineMode, the root node after BuildAST. + bool mLineMode; // LineMode is for parsing a single line of source code. + // It could be from a string in memory, or read from URL. + // It's common in dynamic loading of code in web application. + void SetLexerTrace() {mLexer->SetTrace();} void DumpIndentation(); void DumpEnterTable(const char *tablename, unsigned indent); void DumpExitTable(const char *tablename, unsigned indent, AppealNode*); + void DumpExitTable(const char *tablename, unsigned indent, AppealStatus, AppealNode *n = NULL); void DumpAppeal(RuleTable *table, unsigned token); void DumpSuccTokens(AppealNode*); void DumpSortOut(AppealNode *root, const char * /*hint*/); void DumpSortOutNode(AppealNode*); -private: +public: SmallVector mActiveTokens; // vector for tokens during matching. unsigned mCurToken; // index in mActiveTokens, the next token to be matched. unsigned mPending; // index in mActiveTokens, the first pending token. @@ -262,22 +290,26 @@ private: void UpdateSuccInfo(unsigned, AppealNode*); bool TraverseStmt(); + bool TraverseTempLiteral(); bool TraverseRuleTable(RuleTable*, AppealNode*, AppealNode *&); bool TraverseRuleTableRegular(RuleTable*, AppealNode*); - bool TraverseRuleTablePre(AppealNode*); bool TraverseTableData(TableData*, AppealNode*, AppealNode *&); bool TraverseConcatenate(RuleTable*, AppealNode*); bool TraverseOneof(RuleTable*, AppealNode*); bool TraverseZeroormore(RuleTable*, AppealNode*); bool TraverseZeroorone(RuleTable*, AppealNode*); + virtual bool TraverseASI(RuleTable*, AppealNode*, AppealNode *&) {return false;} // There are some special cases we can speed up the traversal. // 1. If the target is a token, we just need compare mCurToken with it. // 2. If the target is a special rule table, like literal, identifier, we just // need check the type of mCurToken. + bool TraverseStringSucc(Token*, AppealNode*, AppealNode *&); bool TraverseToken(Token*, AppealNode*, AppealNode *&); bool TraverseLiteral(RuleTable*, AppealNode*); bool TraverseIdentifier(RuleTable*, AppealNode*); + bool TraverseTemplateLiteral(RuleTable*, AppealNode*); + bool TraverseRegularExpression(RuleTable*, AppealNode*); void TraverseSpecialTableSucc(RuleTable*, AppealNode*); bool IsVisited(RuleTable*); @@ -295,6 +327,7 @@ private: bool MoveCurToken(); // move mCurToken one step. Token* GetActiveToken(unsigned); // Get an active token. + void InsertToken(unsigned, Token*); // // Appealing System std::vector mAppealNodes; @@ -324,8 +357,19 @@ private: void SupplementalSortOut(AppealNode *root, AppealNode *target); // Build AST, for each top level construct. - ASTTree* BuildAST(); - + TreeNode* BuildAST(); + // We need a set of functions to deal with some common manipulations of + // most languages during AST Building. You can disable it if some functions + // are not what you want. + TreeNode* NewTreeNode(AppealNode*); + TreeNode* Manipulate(AppealNode*); + TreeNode* Manipulate2Binary(TreeNode*, TreeNode*); + TreeNode* Manipulate2Cast(TreeNode*, TreeNode*); + TreeNode* BuildBinaryOperation(TreeNode *, TreeNode *, OprId); + TreeNode* BuildPassNode(); + + // Handle TemplateLiteralNodes + void ParseTemplateLiterals(); ////////////////////////////////////////////////////////////// // The following section is all about left recursion parsing @@ -348,15 +392,30 @@ public: ~Parser(); void SetVerbose(int i) { mLexer->SetVerbose(i); } - int GetVerbose() { mLexer->GetVerbose(); } + int GetVerbose() { return mLexer->GetVerbose(); } + + ModuleNode* GetModule() {return mASTModule;} void Dump(); bool Parse(); - bool ParseStmt(); + ParseStatus ParseStmt(); + void InitRecursion(); unsigned LexOneLine(); + + bool TokenMerge(Token *); + + // These are language specific. + virtual bool TokenSplit(Token *) {return false;} + virtual Token* GetRegExpr(Token *t) {return t;} }; +// Each language will have its own implementation of lexer. Most of lexer +// are shared with some special functions being language specific. +// +// The implementation of this function is in lang/src/lang_spec.cpp. +extern Lexer* CreateLexer(); + } #endif diff --git a/src/MapleFE/shared/include/parser_rec.h b/src/MapleFE/shared/include/parser_rec.h index bc64dfb455cbb1ebc16ce066ce49bc3750488883..bd79654e3d31e1ad1d2f15b31613052c9df37b69 100644 --- a/src/MapleFE/shared/include/parser_rec.h +++ b/src/MapleFE/shared/include/parser_rec.h @@ -42,7 +42,7 @@ public: public: RecPath(){}; ~RecPath() {mPath.Release();} - + }; enum RecTraInstance { @@ -82,7 +82,7 @@ private: // // In each iteration, the first time a LeadNode is visited, it will be saved // in this vector. The second time it's visited, it should go to connect - // with the node in the previous instance. + // with the node in the previous instance or set as Failed2ndOf1st. SmallVector mVisitedLeadNodes; // Visited Recursion Node. This is a per-iteration data too. diff --git a/src/MapleFE/shared/include/recursion.h b/src/MapleFE/shared/include/recursion.h index c60575600ddfe6ebc0ec4d2d112e7ece575edf91..20e19e421fae6838f12601b36da880ab50c9e039 100644 --- a/src/MapleFE/shared/include/recursion.h +++ b/src/MapleFE/shared/include/recursion.h @@ -32,7 +32,7 @@ namespace maplefe { struct LeftRecursion { RuleTable *mRuleTable; unsigned mNum; // How many recursions - unsigned **mCircles; // + unsigned **mCircles; // }; extern LeftRecursion **gLeftRecursions; // diff --git a/src/MapleFE/shared/include/rule_summary.h b/src/MapleFE/shared/include/rule_summary.h new file mode 100644 index 0000000000000000000000000000000000000000..e6803bcd73b5f35eb5b870cb2e33d32d6c272942 --- /dev/null +++ b/src/MapleFE/shared/include/rule_summary.h @@ -0,0 +1,73 @@ +/* +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. +* Copyright 2022 Tencent. All rights reverved. +* +* MapleFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef __RULE_SUMMARY_H__ +#define __RULE_SUMMARY_H__ +#include "ruletable.h" +#include "succ_match.h" +#include "token.h" +namespace maplefe { +typedef struct { + const RuleTable *mAddr; + const char *mName; + unsigned mIndex; +}RuleTableSummary; +extern RuleTableSummary gRuleTableSummarys[]; +extern unsigned RuleTableNum; +extern const char* GetRuleTableName(const RuleTable*); +class BitVector; +extern BitVector gFailed[]; +class SuccMatch; +extern SuccMatch gSucc[]; +extern unsigned gTopRulesNum; +extern RuleTable* gTopRules[]; + +// The rule tables of autogen reserved rules. +extern RuleTable TblCHAR; +extern RuleTable TblDIGIT; +extern RuleTable TblASCII; +extern RuleTable TblESCAPE; +extern RuleTable TblHEXDIGIT; +extern RuleTable TblUTF8; +extern RuleTable TblIRREGULAR_CHAR; + +extern RuleTable TblNoLineTerminator; +extern RuleTable TblTemplateLiteral; +extern RuleTable TblRegularExpression; +extern RuleTable TblExpression; +extern RuleTable TblType; + +// +extern RuleTable TblIdentifier; +extern RuleTable TblLiteral; +extern RuleTable TblIntegerLiteral; +extern RuleTable TblFPLiteral; +extern RuleTable TblBooleanLiteral; +extern RuleTable TblCharacterLiteral; +extern RuleTable TblStringLiteral; +extern RuleTable TblNullLiteral; + +// The tokens defined by system +extern unsigned gSystemTokensNum; +extern unsigned gOperatorTokensNum; +extern unsigned gSeparatorTokensNum; +extern unsigned gKeywordTokensNum; +extern Token gSystemTokens[]; +extern unsigned gAltTokensNum; +extern AltToken gAltTokens[]; + +} +#endif diff --git a/src/MapleFE/shared/include/ruletable.h b/src/MapleFE/shared/include/ruletable.h index b671bc95f73671792d056b72931f931250b234e9..2464e7c6eaaafaf414c2420b5780d9f3a72e63e7 100644 --- a/src/MapleFE/shared/include/ruletable.h +++ b/src/MapleFE/shared/include/ruletable.h @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -14,7 +14,7 @@ */ ////////////////////////////////////////////////////////////////////////// // This file contains all the information to describe the tables that -// autogen creates. +// autogen creates. ////////////////////////////////////////////////////////////////////////// #ifndef __RULE_TABLE_H__ @@ -36,21 +36,23 @@ namespace maplefe { // keyword, separator, operator with tokens. This happens in memory. // The reason we need token is to save the time of matching a rule. Lexer // returns a set of tokens, so it's faster if parts of a rule are tokens -// to compare. +// to compare. /////////////////////////////////////////////////////////////////////////// // The list of RuleTable Entry types -typedef enum { +typedef enum EntryType { ET_Oneof, // one of (...) ET_Zeroormore, // zero or more of (...) ET_Zeroorone, // zero or one ( ... ) ET_Concatenate,// Elem + Elem + Elem + ET_ASI, // Typescript Auto-Semicolon-Insertion. This is to + // parse the missing simicolon of TS/JS ET_Data, // data, further categorized into DT_Char, DT_String, ... ET_Null }EntryType; // List of data types -typedef enum { +typedef enum DataType { DT_Char, // It's a literal elements, char, 'c'. DT_String, // It's a literal elements, string "abc". DT_Type, // It's a type id @@ -97,6 +99,8 @@ enum RuleProp { // match, and it's ok. However, some concatenate rules do require // certain sub-rules NOT to match the longest so that the later // sub-rules can match, and so the whole rule. + RP_NoAltToken = 16, // don't do alternative token matching for some special tokens + // inside the current rule. }; // A rule has a limited set of beginning tokens. These are called LookAhead. @@ -117,7 +121,7 @@ enum LAType { struct LookAhead { LAType mType; // the type of look ahead union{ - char mChar; + unsigned char mChar; const char *mString; unsigned mTokenId; }mData; @@ -153,6 +157,8 @@ struct TypeKeyword { const char *mText; TypeId mId; }; +extern TypeKeyword TypeKeywordTable[]; +extern unsigned TypeKeywordTableSize; ////////////////////////////////////////////////////////////////////// // Attribute Table // @@ -163,6 +169,8 @@ struct AttrKeyword { const char *mText; AttrId mId; }; +extern AttrKeyword AttrKeywordTable[]; +extern unsigned AttrKeywordTableSize; ////////////////////////////////////////////////////////////////////// // Separator Table // @@ -174,6 +182,8 @@ struct SepTableEntry { const char *mText; SepId mId; }; +extern SepTableEntry SepTable[]; +extern unsigned SepTableSize; ////////////////////////////////////////////////////////////////////// // Operator Table // @@ -185,6 +195,8 @@ struct OprTableEntry { const char *mText; OprId mId; }; +extern OprTableEntry OprTable[]; +extern unsigned OprTableSize; ////////////////////////////////////////////////////////////////////// // Keyword Table // @@ -195,6 +207,8 @@ struct OprTableEntry { struct KeywordTableEntry { const char *mText; }; +extern KeywordTableEntry KeywordTable[]; +extern unsigned KeywordTableSize; } #endif diff --git a/src/MapleFE/shared/include/ruletable_util.h b/src/MapleFE/shared/include/ruletable_util.h index 618b9ea6f53f8e982bef2058265978fca359e8bd..851feb3ea37a4e98ab90486910d5a96b2c8cc6ee 100644 --- a/src/MapleFE/shared/include/ruletable_util.h +++ b/src/MapleFE/shared/include/ruletable_util.h @@ -46,7 +46,7 @@ namespace maplefe { // The most straightforward idea is to put the function pointers in the rule table // with additional information. However, the signatures of a function makes the idea // complicated. -// +// // So I finally decided to have an Id for each check function, and Id is written into // the rule tables by Autogen. diff --git a/src/MapleFE/shared/include/stringmap.h b/src/MapleFE/shared/include/stringmap.h index cbfc6ca0f3264346552073612246bd06090fefa2..66fe263e81b040fe88d8663bc47dd3937044b7ac 100644 --- a/src/MapleFE/shared/include/stringmap.h +++ b/src/MapleFE/shared/include/stringmap.h @@ -16,7 +16,7 @@ // // StringMap is heavily used in the StringPool. It's used to locate the address // of string in the StringPool. Here is how it works: -// (1) std::string ---> Hash value, --> Locate the Bucket in StringMap +// (1) std::string ---> Hash value, --> Locate the Bucket in StringMap // --> get the addr from StringMapEntry // (2) If conflicted in the Bucket, iterate to find the right StringMapEntry // (3) If not found, a) Add std::string to the StringPool @@ -38,13 +38,17 @@ class StringPool; class StringMapEntry { public: char *Addr; // Addr in the string pool - StringMapEntry *Next; + unsigned StrIdx; // String index + StringMapEntry *Next; public: - StringMapEntry() { Addr = NULL; Next = NULL; } - StringMapEntry(char *A) { Addr = A; Next = NULL; } - StringMapEntry(char *A, StringMapEntry *E) { Addr = A; Next = E; } + StringMapEntry() { Addr = NULL; StrIdx = 0; Next = NULL; } + StringMapEntry(char *A, unsigned idx) { Addr = A; StrIdx = idx; Next = NULL; } + StringMapEntry(char *A, unsigned idx, StringMapEntry *E) { Addr = A; StrIdx = idx; Next = E; } ~StringMapEntry() {} + + char *GetAddr() { return Addr; } + unsigned GetStrIdx() { return StrIdx; } }; class StringMap { @@ -62,8 +66,8 @@ public: void SetPool(StringPool *p) {mPool = p;} unsigned BucketNoFor(const std::string &s); - char* LookupAddrFor(const std::string &s); - void InsertEntry(char *, unsigned); + StringMapEntry *LookupEntryFor(const std::string &s); + StringMapEntry *InsertEntry(char *, unsigned, unsigned); }; } diff --git a/src/MapleFE/shared/include/stringpool.h b/src/MapleFE/shared/include/stringpool.h index da076c9df949007d46dc8bd6427d3591fd9ae007..fd8dfee8ab8012e29a2bf5586db57f588e7d1721 100644 --- a/src/MapleFE/shared/include/stringpool.h +++ b/src/MapleFE/shared/include/stringpool.h @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -23,7 +23,10 @@ #include #include +#include +#include #include +#include "massert.h" namespace maplefe { @@ -44,21 +47,52 @@ private: StringMap *mMap; std::vector mBlocks; int mFirstAvail; // -1 means no available. + bool mUseAltStr; // use alter string + + std::vector mLongStrings; // for strings longer than block size, + // we allocate them by malloc. + + std::vector mStringTable; + + // alternate string which can be used for obfuscation + std::unordered_set mAltStrIdxSet; + std::unordered_map mAltStrIdxMap; + + friend class StringMap; public: + StringPool(); + ~StringPool(); + + void SetUseAltStr(bool b) { mUseAltStr = b; } + void AddAltStrIdx(unsigned idx) { mAltStrIdxSet.insert(idx); } + unsigned GetAltStrSize() { return mAltStrIdxSet.size(); } + bool IsAltStrIdx(unsigned idx) { + return mAltStrIdxSet.find(idx) != mAltStrIdxSet.end(); + } + void AddAltStrIdxMap(unsigned orig, unsigned alt) { mAltStrIdxMap[orig] = alt; } + void SetAltStrIdxMap(); + char* AllocBlock(); char* Alloc(const size_t); char* Alloc(const std::string&); char* Alloc(const char*); -public: - StringPool(); - ~StringPool(); - // If not found, add into StringPool const char* FindString(const std::string&); const char* FindString(const char*); const char* FindString(const char*, size_t); + + unsigned GetStrIdx(const std::string&); + unsigned GetStrIdx(const char*); + unsigned GetStrIdx(const char*, size_t); + + unsigned GetSize() {return mStringTable.size();} + + const char *GetStringFromStrIdx(unsigned idx); + + void Dump(); + void DumpAlt(); }; // Lexing, Parsing, AST Building and IR Building all share one global diff --git a/src/MapleFE/shared/include/stringutil.h b/src/MapleFE/shared/include/stringutil.h index b0b66a32775d6401c8986e2e9be95c4533ea1880..83b5000f7ab44e2a760291119308688d2b950485 100644 --- a/src/MapleFE/shared/include/stringutil.h +++ b/src/MapleFE/shared/include/stringutil.h @@ -31,7 +31,7 @@ namespace maplefe { // http://license.coscl.org.cn/MulanPSL2 static inline unsigned HashString(const std::string &s) { unsigned Result = 0; - for (size_t i = 0; i < s.size(); i++) + for (size_t i = 0; i < s.size(); i++) Result = Result * 33 + (unsigned char)s[i]; return Result; } diff --git a/src/MapleFE/shared/include/succ_match.h b/src/MapleFE/shared/include/succ_match.h index bfe143e66d32dd57ffed591c059e0b9b6eb7879d..648dc0be294fdb3a04afba79679b5ecb3596c819 100644 --- a/src/MapleFE/shared/include/succ_match.h +++ b/src/MapleFE/shared/include/succ_match.h @@ -1,7 +1,8 @@ /* * Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright 2022 Tencent. All rights reverved. * -* OpenArkFE is licensed under the Mulan PSL v2. +* MapleFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. * You may obtain a copy of Mulan PSL v2 at: * @@ -37,7 +38,7 @@ namespace maplefe { // on every recursion group in a wavefront manner. Although after each iteration of // the wavefront we got succ/fail info, but it's not complete yet. This field tells // if we have reached the fixed point or not. -// +// // The second 'unsigned' in mMatches is not used, since putting IsDone in mNodes // is enough. // @@ -47,8 +48,8 @@ namespace maplefe { class AppealNode; class SuccMatch { private: - Guamian mNodes; - Guamian mMatches; + GuamianFast mNodes; + GuamianFast mMatches; public: SuccMatch(){} diff --git a/src/MapleFE/shared/include/supported.h b/src/MapleFE/shared/include/supported.h index 9ed0f51d2601ccfb2dac1a7a58bf83367c737218..2b1e6ad41d92056dc07bdb83cfd66f1e5428be2c 100644 --- a/src/MapleFE/shared/include/supported.h +++ b/src/MapleFE/shared/include/supported.h @@ -24,8 +24,10 @@ namespace maplefe { // The list of all supported types. This covers all the languages. // NOTE: autogen also relies on this set of supported separators #undef TYPE +#undef PRIMTYPE #define TYPE(T) TY_##T, -typedef enum { +#define PRIMTYPE(T) TY_##T, +typedef enum TypeId { #include "supported_types.def" TY_NA }TypeId; @@ -34,7 +36,7 @@ TY_NA // NOTE: autogen also relies on this set of supported separators #undef SEPARATOR #define SEPARATOR(T) SEP_##T, -typedef enum { +typedef enum SepId { #include "supported_separators.def" SEP_NA }SepId; @@ -43,16 +45,16 @@ SEP_NA // NOTE: autogen also relies on this set of supported operators. #undef OPERATOR #define OPERATOR(T, D) OPR_##T, -typedef enum { +typedef enum OprId { #include "supported_operators.def" OPR_NA }OprId; #define LITERAL(T) LT_##T, -typedef enum { +typedef enum LitId { #include "supported_literals.def" LT_NA // N/A, in java, Null is legal type with only one value 'null' - // reference, a literal. So LT_Null is actually legal. + // reference, a literal. So LT_Null is actually legal. // So I put LT_NA for the illegal literal }LitId; @@ -64,7 +66,6 @@ enum AttrId { ATTR_NA }; - // TODO: The action id will come from both the shared part and language specific part. // Some language may have its own special action to build AST. // For now I just put everything together in order to expediate the overall diff --git a/src/MapleFE/shared/include/supported_actions.def b/src/MapleFE/shared/include/supported_actions.def index 30ca48756cfeb342538e299d4c8b04bbccc4537b..ef4710d3862514ff281d00f5f277cbf93d708a66 100644 --- a/src/MapleFE/shared/include/supported_actions.def +++ b/src/MapleFE/shared/include/supported_actions.def @@ -28,29 +28,96 @@ // It also shows the internal parameters when parser creates the AST tree node. // Please refer autogen/README.ACTIONS for details. +ACTION(BuildModule) +ACTION(AddModuleBody) +ACTION(SetIsAmbient) + ACTION(BuildPackageName) // These are about import/include ACTION(BuildSingleTypeImport) // style like java single type import ACTION(BuildAllTypeImport) // style like java ondemand type import ACTION(BuildSingleStaticImport) // style like java single static import -ACTION(BuildAllStaticImport) // style like java ondemand static import +ACTION(BuildAllStaticImport) // style like java on-demand static import ACTION(BuildAllImport) // style like c/c++ include all in .h file +ACTION(BuildExternalDeclaration) // c/c++ external decl, typescript declare. +ACTION(BuildGlobalExternalDeclaration) // typescript global declare. + +// Special actions for JS import/export +ACTION(BuildImport) +ACTION(BuildExport) +ACTION(SetIsXXportType) +ACTION(SetPairs) +ACTION(SetDefaultPairs) +ACTION(SetSinglePairs) +ACTION(SetFromModule) +ACTION(SetIsEverything) // Apply to all pairs in import/export +ACTION(SetAsNamespace) // Apply to all pairs in import/export + +ACTION(BuildXXportAsPair) // normal pair +ACTION(BuildXXportAsPairEverything) // '*' +ACTION(BuildXXportAsPairDefault) // 'default' + + ACTION(BuildBlock) ACTION(AddToBlock) +ACTION(AddSyncToBlock) // Java allows a sync obj to block + +ACTION(BuildAwait) ACTION(BuildCast) ACTION(BuildParenthesis) ACTION(BuildBinaryOperation) ACTION(BuildUnaryOperation) +ACTION(BuildTernaryOperation) ACTION(BuildPostfixOperation) +ACTION(BuildInstanceOf) +ACTION(BuildIn) +ACTION(BuildIs) +ACTION(BuildTypeOf) +ACTION(BuildKeyOf) +ACTION(BuildInfer) + ACTION(BuildLambda) +ACTION(SetJavaLambda) // java lambda +ACTION(SetArrowFunction) // JS arrow function + +// This is very special function, it changes other +// keyword, attr node or anything into a new identifier node +ACTION(BuildIdentifier) + +ACTION(BuildLiteral) // For variable declaration, expression ACTION(BuildDecl) +ACTION(SetJSVar) +ACTION(SetJSLet) +ACTION(SetJSConst) + ACTION(BuildField) ACTION(BuildVarList) +ACTION(BuildComputedName) + +ACTION(BuildBindingElement) +ACTION(BuildBindingPattern) +ACTION(SetObjectBinding) +ACTION(SetArrayBinding) + +// For struct +ACTION(BuildStruct) +ACTION(SetTSInterface) +ACTION(SetTSEnum) +ACTION(AddStructField) +ACTION(BuildFieldLiteral) +ACTION(BuildStructLiteral) + +ACTION(BuildStrIndexSig) +ACTION(BuildNumIndexSig) + +// For Array +ACTION(BuildArrayElement) +ACTION(BuildArrayLiteral) // For function ACTION(AddParams) @@ -58,6 +125,13 @@ ACTION(BuildFunction) ACTION(BuildConstructor) ACTION(AddFunctionBody) ACTION(AddFunctionBodyTo) +ACTION(SetGetAccessor) // TS 'get' accessor +ACTION(SetSetAccessor) // TS 'set' accessor +ACTION(SetCallSignature) // TS call signature +ACTION(SetConstructSignature) // TS construct signature +ACTION(SetIsGenerator) // JS generator +ACTION(SetIsIterator) // JS iterator +ACTION(AddAssert) // For callsite ACTION(BuildCall) @@ -72,10 +146,15 @@ ACTION(AddSuperInterface) ACTION(AddClassBody) ACTION(BuildInstInit) +ACTION(BuildNamespace) +ACTION(AddNamespaceBody) + ACTION(AddModifier) ACTION(AddModifierTo) +ACTION(AddInit) ACTION(AddInitTo) -ACTION(AddTypeTo) +ACTION(AddType) +ACTION(AddAsType) // Annotation. ACTION(BuildAnnotationType) @@ -92,6 +171,17 @@ ACTION(BuildDims) ACTION(AddDims) ACTION(AddDimsTo) +// Set a tree node as ... properties +ACTION(SetIsStmt) +ACTION(SetIsOptional) +ACTION(SetIsNonNull) +ACTION(SetIsRest) +ACTION(SetIsConst) +ACTION(SetIsUnique) + +ACTION(BuildYield) +ACTION(SetIsTransfer) + // statment, control flow ACTION(BuildAssignment) ACTION(BuildAssert) @@ -101,7 +191,10 @@ ACTION(AddCondBranchTrueStatement) ACTION(AddCondBranchFalseStatement) ACTION(AddLabel) ACTION(BuildBreak) +ACTION(BuildContinue) ACTION(BuildForLoop) +ACTION(BuildForLoop_In) //This is Javascript, for (.. in ..){} +ACTION(BuildForLoop_Of) //This is Javascript, for (.. of ..){} ACTION(BuildWhileLoop) ACTION(BuildDoLoop) ACTION(BuildNewOperation) @@ -110,13 +203,41 @@ ACTION(BuildDeleteOperation) ACTION(BuildSwitchLabel) ACTION(BuildDefaultSwitchLabel) ACTION(BuildOneCase) -ACTION(BuildAllCases) ACTION(BuildSwitch) // Exceptions, Throws ACTION(BuildThrows) ACTION(AddThrowsTo) +// Try, Catch, Finally +ACTION(BuildTry) +ACTION(BuildCatch) +ACTION(BuildFinally) +ACTION(AddCatch) +ACTION(AddFinally) + // User Types ACTION(BuildUserType) -ACTION(AddTypeArgument) +ACTION(BuildTupleType) // Comes from Typescript. +ACTION(AddTypeGenerics) // add type arguments or type parameters or type generics +ACTION(BuildUnionUserType) // build union type +ACTION(BuildInterUserType) // build intersect type +ACTION(BuildArrayType) // From Spec point of view, + // In languages like TS, dimension info is attached to type. + // In C/C++, dimension info is attached to identifier. + // However, the AST design is up to each implementation. +ACTION(BuildNeverArrayType) // Special 'never' array type. + +ACTION(BuildTypeAlias) +ACTION(BuildAsType) +ACTION(BuildConditionalType) + +ACTION(BuildTypeParameter) +ACTION(AddTypeParameterExtends) + +ACTION(BuildNameTypePair) + +ACTION(BuildTripleSlash) // TS triple-slash directive. + +// This is a special action to pass a child to parent +ACTION(PassChild) diff --git a/src/MapleFE/shared/include/supported_attributes.def b/src/MapleFE/shared/include/supported_attributes.def index 789de310f700b762f4ad4767e933e4a266aefe1b..86cc52600ea92b50476cd939557b8f7b454150ac 100644 --- a/src/MapleFE/shared/include/supported_attributes.def +++ b/src/MapleFE/shared/include/supported_attributes.def @@ -27,3 +27,8 @@ ATTRIBUTE(public) ATTRIBUTE(static) ATTRIBUTE(strictfp) ATTRIBUTE(default) +ATTRIBUTE(synchronized) +ATTRIBUTE(async) +ATTRIBUTE(readonly) // Typescript +ATTRIBUTE(getter) // Javascript getter function +ATTRIBUTE(setter) // Javascript setter function diff --git a/src/MapleFE/shared/include/supported_literals.def b/src/MapleFE/shared/include/supported_literals.def index 5bcf1a4f1353113fa837622cd83be46873a8554c..7ad1d28105b9ce2485cb6a90e5850ea2554fa430 100644 --- a/src/MapleFE/shared/include/supported_literals.def +++ b/src/MapleFE/shared/include/supported_literals.def @@ -26,4 +26,5 @@ LITERAL(CharacterLiteral) LITERAL(StringLiteral) LITERAL(NullLiteral) LITERAL(ThisLiteral) - +LITERAL(SuperLiteral) +LITERAL(VoidLiteral) diff --git a/src/MapleFE/shared/include/supported_operators.def b/src/MapleFE/shared/include/supported_operators.def index 7b96ce06c84e8e4bb3075af322ac198f9214569f..2ffa34dd44fa1f53b361757b863c4d9ae861e8b4 100644 --- a/src/MapleFE/shared/include/supported_operators.def +++ b/src/MapleFE/shared/include/supported_operators.def @@ -15,6 +15,12 @@ // This file defines the operators supported by AutoGen and MapleFE +// NOTE +// 1. Add/Sub will be used as binary only after AST building. +// Plus/Minus will be the unary +// 2. Inc/Dec will be used as post after AST building. +// PreInc/PreDec will be pre operators. + OPERATOR(Add, Binary|Unary) OPERATOR(Sub, Binary|Unary) OPERATOR(Mul, Binary) @@ -23,6 +29,13 @@ OPERATOR(Mod, Binary) OPERATOR(Inc, Pre|Post) OPERATOR(Dec, Pre|Post) +OPERATOR(Exp, Binary) + +OPERATOR(Plus, Unary) +OPERATOR(Minus, Unary) +OPERATOR(PreInc, Pre) +OPERATOR(PreDec, Pre) + OPERATOR(EQ, Binary) OPERATOR(NE, Binary) OPERATOR(GT, Binary) @@ -57,10 +70,16 @@ OPERATOR(BxorAssign, Binary) OPERATOR(ZextAssign, Binary) OPERATOR(Arrow, Binary) -OPERATOR(Select, Ternary) -OPERATOR(Cond, Ternary) - // In Java, there is a special type argument, <>, meaning no arguments. // It could appear in other languages. So I define Diamond. OPERATOR(Diamond, Unary) + +// This part comes from JS/TS +OPERATOR(StEq, Binary) +OPERATOR(StNe, Binary) +OPERATOR(ArrowFunction, Binary) +OPERATOR(NullCoalesce, Binary) +OPERATOR(NullAssign, Binary) +OPERATOR(TripleSlash, Binary) // triple slash directive + diff --git a/src/MapleFE/shared/include/supported_separators.def b/src/MapleFE/shared/include/supported_separators.def index a179c40679834dda7a12331f1e6fc44c0829b9d2..40298b4bb0afe66cac276efaf21fae8da10eff0d 100644 --- a/src/MapleFE/shared/include/supported_separators.def +++ b/src/MapleFE/shared/include/supported_separators.def @@ -25,8 +25,12 @@ SEPARATOR(Semicolon) SEPARATOR(Comma) SEPARATOR(Dot) SEPARATOR(Dotdotdot) -SEPARATOR(Colon) +SEPARATOR(Select) // ? +SEPARATOR(Colon) // : SEPARATOR(Of) SEPARATOR(At) SEPARATOR(Pound) SEPARATOR(Whitespace) +SEPARATOR(Tab) // Horizontal Tab, 0x09 +SEPARATOR(ArrowFunction) // first coming from JS, => +SEPARATOR(Optional) // first coming from JS, ?. diff --git a/src/MapleFE/shared/include/supported_types.def b/src/MapleFE/shared/include/supported_types.def index df5a396d5641b0055c2cf84ca54f63d8276310fd..c16bd4a934d49783017babb74231ece08d02a74d 100644 --- a/src/MapleFE/shared/include/supported_types.def +++ b/src/MapleFE/shared/include/supported_types.def @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -16,14 +16,40 @@ // This file defines the complete set of primitive types supported in Autogen // and the parser. -TYPE(Boolean) -TYPE(Byte) -TYPE(Short) -TYPE(Int) -TYPE(Long) -TYPE(Char) -TYPE(Float) -TYPE(Double) +TYPE(None) + +PRIMTYPE(Boolean) +PRIMTYPE(Byte) +PRIMTYPE(Short) +PRIMTYPE(Int) +PRIMTYPE(Long) +PRIMTYPE(Char) +PRIMTYPE(Float) +PRIMTYPE(Double) + +// keep these two after primitive types as we are going to create +// primitive types for them as well +TYPE(Number) // First come from JS +TYPE(String) // First come from JS + TYPE(Void) // Although this is not a type, it does categorize the types. TYPE(Null) // The Null type. It is a type. +TYPE(Unknown) // First come from TS 3.0 +TYPE(Never) // First come from TS +TYPE(Undefined) // First come from JS +TYPE(Symbol) // First come from JS +TYPE(Any) // First come from JS +TYPE(Date) + +TYPE(Array) +TYPE(Object) +TYPE(Function) // function/lambda decl +TYPE(Class) // class/interface decl +TYPE(Module) // import/export module +TYPE(Namespace) // namespace +TYPE(User) // user types +TYPE(Merge) // merge of two types, unexpected + +// last entry +TYPE(Max) diff --git a/src/MapleFE/shared/include/token.h b/src/MapleFE/shared/include/token.h index 71e9119486fbdf2448ff1f218ed36b3c04984ec9..f2406ee14ead46ea4581bf5a584ed6531026a84a 100644 --- a/src/MapleFE/shared/include/token.h +++ b/src/MapleFE/shared/include/token.h @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -30,20 +30,24 @@ #ifndef __Token_H__ #define __Token_H__ +#include #include #include "char.h" #include "stringutil.h" #include "supported.h" +#include "container.h" namespace maplefe { -typedef enum { +typedef enum TK_Type { TT_ID, // Identifier TT_KW, // Keyword TT_LT, // Literal TT_SP, // separator TT_OP, // operator TT_CM, // comment + TT_TL, // template literal. coming from Javascript first. + TT_RE, // regular expression, coming from most script languages. TT_NA // N.A. }TK_Type; @@ -59,12 +63,13 @@ typedef enum { struct LitData { LitId mType; union { - int mInt; + long mInt; float mFloat; double mDouble; bool mBool; Char mChar; - const char *mStr; // the string is allocated in gStringPool + unsigned mStrIdx; // the string is allocated in gStringPool + int64_t mInt64; // for serialization }mData; }; @@ -90,13 +95,36 @@ struct AltToken { unsigned mAltTokenId; }; +// We define the data of template literal token. +// TemplateLiteral data contains: formate and placeholder. +// They are saved as pair . If either is missing, NULL +// is saved in its position. +struct TempLitData { + SmallVector mStrings; +}; + +// Regular expressions have two data. One is the expression, +// the other is the flags. Both are saved as strings. +struct RegExprData { + const char *mExpr; + const char *mFlags; +}; + struct Token { TK_Type mTkType; + + unsigned mLineNum; // line num + unsigned mColNum; // column num + bool mLineBegin; // first token of line? + bool mLineEnd; // last token of line? + union { const char *mName; // Identifier, Keyword. In the gStringPool LitData mLitData; SepId mSepId; OprId mOprId; + TempLitData *mTempLitData; + RegExprData mRegExprData; }mData; AltToken *mAltTokens; @@ -107,17 +135,34 @@ struct Token { bool IsLiteral() const { return mTkType == TT_LT; } bool IsKeyword() const { return mTkType == TT_KW; } bool IsComment() const { return mTkType == TT_CM; } + bool IsTempLit() const { return mTkType == TT_TL; } + bool IsRegExpr() const { return mTkType == TT_RE; } void SetIdentifier(const char *name) {mTkType = TT_ID; mData.mName = name;} void SetLiteral(LitData data) {mTkType = TT_LT; mData.mLitData = data;} + void SetTempLit(TempLitData *data) {mTkType = TT_TL; mData.mTempLitData = data;} + void SetRegExpr(RegExprData data) {mTkType = TT_RE; mData.mRegExprData = data;} + + const char* GetName() const; + LitData GetLitData() const {return mData.mLitData;} + OprId GetOprId() const {return mData.mOprId;} + SepId GetSepId() const {return mData.mSepId;} + bool IsWhiteSpace() const {return mData.mSepId == SEP_Whitespace;} + bool IsTab() const {return mData.mSepId == SEP_Tab;} + TempLitData* GetTempLitData() const {return mData.mTempLitData;} + RegExprData GetRegExpr() const {return mData.mRegExprData;} + + // This is handle only Operator, Separator, and Keyword. All others return false. + bool Equal(Token *); - const char* GetName() const; - LitData GetLitData() const {return mData.mLitData;} - OprId GetOprId() const {return mData.mOprId;} - SepId GetSepId() const {return mData.mSepId;} - bool IsWhiteSpace() const {return mData.mSepId == SEP_Whitespace;} void Dump(); }; + // + Token* FindSeparatorToken(SepId id); + Token* FindOperatorToken(OprId id); + Token* FindKeywordToken(const char *key); + Token* FindCommentToken(); + } #endif diff --git a/src/MapleFE/shared/include/typetable.h b/src/MapleFE/shared/include/typetable.h new file mode 100644 index 0000000000000000000000000000000000000000..d53b17719156f086fefca6fbc7d2f9ec02844e92 --- /dev/null +++ b/src/MapleFE/shared/include/typetable.h @@ -0,0 +1,91 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +////////////////////////////////////////////////////////////////////////////// +// This file contains the Memory Pool for String pool. // +// // +// A String is stored in the pool with an ending '\0'. // +////////////////////////////////////////////////////////////////////////////// + +#ifndef __TYPETABLE_H__ +#define __TYPETABLE_H__ + +#include +#include +#include +#include +#include "massert.h" +#include "ast.h" +#include "ast_type.h" + +namespace maplefe { + +class TypeEntry { + private: + TreeNode *mType; + TypeId mTypeId; + + public: + TypeEntry() : mType(NULL), mTypeId(TY_None) {} + TypeEntry(TreeNode *node); + ~TypeEntry(){}; + + TypeId GetTypeId() { return mTypeId; } + TreeNode *GetType() { return mType; } + + void SetTypeId(TypeId i) { mTypeId = i; } + void SetType(TreeNode *n) { mType = n; } +}; + +class TypeTable { +private: + std::vector mTypeTable; + std::unordered_map mNodeId2TypeIdxMap; + std::unordered_map mTypeId2TypeMap; + std::unordered_set mPrimTypeId; + std::unordered_set mFuncTypeIdx; + unsigned mPrimSize; + unsigned mPreBuildSize; + +public: + TypeTable() {}; + ~TypeTable() { mTypeTable.clear(); }; + + unsigned size() { return mTypeTable.size(); } + unsigned GetPreBuildSize() { return mPreBuildSize; } + + bool IsPrimTypeId(TypeId tid) { return mPrimTypeId.find(tid) != mPrimTypeId.end(); } + unsigned GetPrimSize() { return mPrimSize; } + TreeNode *CreatePrimType(std::string name, TypeId tid); + TreeNode *CreateBuiltinType(std::string name, TypeId tid); + + void AddPrimTypeId(TypeId tid); + void AddPrimAndBuiltinTypes(); + bool AddType(TreeNode *node); + + TypeEntry *GetTypeEntryFromTypeIdx(unsigned tidx); + TreeNode *GetTypeFromTypeIdx(unsigned tidx); + TreeNode *GetTypeFromTypeId(TypeId tid) { return mTypeId2TypeMap[tid]; } + TreeNode *GetTypeFromStrIdx(unsigned strid); + + unsigned GetOrCreateFunctionTypeIdx(FunctionTypeNode *type); + + void Dump(); +}; + +extern TypeTable gTypeTable; + + +} +#endif // __TYPETABLE_H__ diff --git a/src/MapleFE/shared/include/vfy.h b/src/MapleFE/shared/include/vfy.h index 208fbac48e56d9d79723eab468e09b3ab29761b9..279052d38eae4bf06f6a70f8b8bf7a5f37921741 100644 --- a/src/MapleFE/shared/include/vfy.h +++ b/src/MapleFE/shared/include/vfy.h @@ -18,7 +18,7 @@ // which takes an ASTModule recently generated. At this point we have a complete // module with AST trees created by ASTBuilder. // -// The verification is a top-down traversal on the AST trees. +// The verification is a top-down traversal on the AST trees. // // It carries on more than one jobs. // Verification : Checks the validity of semanteme @@ -41,6 +41,7 @@ #include "ast.h" #include "ast_attr.h" #include "ast_type.h" +#include "ast_module.h" #include "container.h" #include "vfy_log.h" @@ -51,7 +52,8 @@ class TreeNode; class Verifier { protected: - VfyLog mLog; + VfyLog mLog; + ModuleNode *mASTModule; ASTScope *mCurrScope; @@ -72,7 +74,7 @@ protected: virtual void VerifyType(IdentifierNode*); public: - Verifier(); + Verifier(ModuleNode *m); ~Verifier(); void Do(); diff --git a/src/MapleFE/shared/src/Makefile b/src/MapleFE/shared/src/Makefile index 0ee1fc974c86b0f241dedbf40cd28ac587716001..e09a8359da282bcbcfa5eb587cfb226eff3a9939 100644 --- a/src/MapleFE/shared/src/Makefile +++ b/src/MapleFE/shared/src/Makefile @@ -1,4 +1,4 @@ -# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# Copyright (C) [2020-2021] Futurewei Technologies, Inc. All rights reverved. # # OpenArkFE is licensed under the Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -24,24 +24,36 @@ OBJS :=$(foreach obj,$(OBJ), $(BUILD)/$(obj)) LIBOBJS :=$(patsubst $(BUILD)/main.o,,$(OBJS)) DEPS :=$(foreach dep,$(DEP), $(BUILD)/$(dep)) -INCLUDES := -I $(MAPLEFE_ROOT)/shared/include \ - -I $(MAPLEFE_ROOT)/$(LANG)/include \ - -I . $(MAPLEALL_INC) +GENDIR:=${BUILDDIR}/gen -SHAREDLIB = shared.a +SRCG:=$(wildcard ${GENDIR}/gen_*.cpp) +OBJG:=$(patsubst %.cpp,%.o,$(SRCG)) +DEPG:=$(patsubst %.cpp,%.d,$(SRCG)) -.PHONY: all -all: $(SHAREDLIB) +GENASTDIR:=${BUILDDIR}/ast_gen/shared + +ASTSRCG:=$(wildcard ${GENASTDIR}/*.cpp) +ASTOBJG:=$(patsubst %.cpp,%.o,$(ASTSRCG)) +ASTDEPG:=$(patsubst %.cpp,%.d,$(ASTSRCG)) + +INCLUDES := -I $(MAPLEFE_ROOT)/shared/include -I ${GENASTDIR} -I ${GENDIR} + +SHAREDLIB := shared.a +GENASTLIB := genast.a +GENLIB := gen.a + +.PHONY: all gen_doc clean + +all: $(BUILD)/$(SHAREDLIB) $(GENASTDIR)/$(GENASTLIB) $(GENDIR)/$(GENLIB) -include $(DEPS) -.PHONY: clean vpath %.o $(BUILD) vpath %.d $(BUILD) #Pattern Rules $(BUILD)/%.o : %.cpp - $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ $(BUILD)/%.d : %.cpp @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $< > $@ @@ -49,8 +61,31 @@ $(BUILD)/%.d : %.cpp @sed -e 's|.*:|$(BUILD)/$*.o:|' < $(BUILD)/$*.d.tmp > $(BUILD)/$*.d @rm -f $(BUILD)/$*.d.tmp -$(SHAREDLIB) : $(OBJS) +$(GENASTDIR)/%.cpp : gendoc + +gen_doc: + (cd $(MAPLEFE_ROOT); ./scripts/maplefe-autogen.py) + +$(GENASTDIR)/%.o : $(GENASTDIR)/%.cpp + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ + +$(GENASTDIR)/%.d : $(GENASTDIR)/%.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $< > $@ + +$(GENDIR)/%.o : $(GENDIR)/%.cpp + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ + +$(GENDIR)/%.d : $(GENDIR)/%.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $< > $@ + +$(BUILD)/$(SHAREDLIB) : $(LIBOBJS) /usr/bin/ar rcs $(BUILD)/$(SHAREDLIB) $(LIBOBJS) +$(GENASTDIR)/$(GENASTLIB) : $(ASTOBJG) + /usr/bin/ar rcs $(GENASTDIR)/$(GENASTLIB) $(ASTOBJG) + +$(GENDIR)/$(GENLIB) : $(OBJG) + /usr/bin/ar rcs $(GENDIR)/$(GENLIB) $(OBJG) + clean: rm -rf $(BUILD) diff --git a/src/MapleFE/autogen/include/block_gen.h b/src/MapleFE/shared/src/appnode_pool.cpp similarity index 54% rename from src/MapleFE/autogen/include/block_gen.h rename to src/MapleFE/shared/src/appnode_pool.cpp index d2fbf1a80990fba82bf22399f9032a21c2bd280a..a2014c6148fb925f6a2f33eafa9efd9213d4af04 100644 --- a/src/MapleFE/autogen/include/block_gen.h +++ b/src/MapleFE/shared/src/appnode_pool.cpp @@ -12,28 +12,15 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ -//////////////////////////////////////////////////////////////// -// Block Generation -//////////////////////////////////////////////////////////////// - -#ifndef __BLOCK_GEN_H__ -#define __BLOCK_GEN_H__ - -#include "base_gen.h" +#include "appnode_pool.h" +#include "parser.h" namespace maplefe { -class BlockGen : public BaseGen { -public: - BlockGen(const char *dfile, const char *hfile, const char *cfile) : - BaseGen(dfile, hfile, cfile) {} - ~BlockGen(){} - - void Generate(); - void GenCppFile(); - void GenHeaderFile(); -}; - +AppealNode* AppealNodePool::NewAppealNode() { + AppealNode *node = (AppealNode*)mMP.Alloc(sizeof(AppealNode)); + new (node) AppealNode(); + return node; } -#endif +} diff --git a/src/MapleFE/shared/src/ast.cpp b/src/MapleFE/shared/src/ast.cpp index d33f7a7669a0975e4bef3bf5acb229b8672c1140..f42e21d7ff0442a6825f5ca3dcb6e92968630f91 100644 --- a/src/MapleFE/shared/src/ast.cpp +++ b/src/MapleFE/shared/src/ast.cpp @@ -54,228 +54,26 @@ static const char* GetOperatorName(OprId opr) { }; ////////////////////////////////////////////////////////////////////////////////////// -// ASTTree -////////////////////////////////////////////////////////////////////////////////////// - -ASTTree::ASTTree() { - mRootNode = NULL; - gASTBuilder.SetTreePool(&mTreePool); -} - -ASTTree::~ASTTree() { -} - -// Create tree node. Its children have been created tree nodes. -// There are couple issueshere. -// -// 1. An sorted AppealNode could have NO tree node, because it may have NO RuleAction to -// create the sub tree. This happens if the RuleTable is just a temporary intermediate -// table created by Autogen, or its rule is just ONEOF without real syntax. Here -// is an example. -// -// The AST after BuildAST() for a simple statment: c=a+b; -// -// ======= Simplify Trees Dump SortOut ======= -// [1] Table TblExpressionStatement@0: 2,3, -// [2:1] Table TblAssignment@0: 4,5,6, -// [3] Token -// [4:1] Token -// [5:2] Token -// [6:3] Table TblArrayAccess_sub1@2: 7,8, <-- supposed to get a binary expression -// [7:1] Token <-- a -// [8:2] Table TblUnaryExpression_sub1@3: 9,10, <-- +b -// [9] Token -// [10:2] Token -// -// Node [1] won't have a tree node at all since it has no Rule Action attached. -// Node [6] won't have a tree node either. -// -// 2. A binary operation like a+b could be parsed as (1) expression: a, and (2) a -// unary operation: +b. This is because we parse them in favor to ArrayAccess before -// Binary Operation. Usually to handle this issue, in some system like ANTLR, -// they require you to list the priority, by writing rules from higher priority to -// lower priority. -// -// We are going to do a consolidation of the sub-trees, by converting smaller trees -// to a more compact bigger trees. However, to do this we want to set some rules. -// *) The parent AppealNode of these sub-trees has no tree node. So the conversion -// helps make the tree complete. - -TreeNode* ASTTree::NewTreeNode(AppealNode *appeal_node) { - TreeNode *sub_tree = NULL; - - if (appeal_node->IsToken()) { - sub_tree = gASTBuilder.CreateTokenTreeNode(appeal_node->GetToken()); - return sub_tree; - } - - RuleTable *rule_table = appeal_node->GetTable(); - - for (unsigned i = 0; i < rule_table->mNumAction; i++) { - Action *action = rule_table->mActions + i; - gASTBuilder.mActionId = action->mId; - gASTBuilder.ClearParams(); - - for (unsigned j = 0; j < action->mNumElem; j++) { - // find the appeal node child - unsigned elem_idx = action->mElems[j]; - AppealNode *child = appeal_node->GetSortedChildByIndex(elem_idx); - Param p; - p.mIsEmpty = true; - // There are 3 cases to handle. - // 1. child is token, we pass the token to param. - // 2. child is a sub appeal tree, but has no legal AST tree. For example, - // a parameter list: '(' + param-lists + ')'. - // if param-list is empty, it has no AST tree. - // In this case, we sset mIsEmpty to true. - // 3. chidl is a sub appeal tree, and has a AST tree too. - if (child) { - TreeNode *tree_node = child->GetAstTreeNode(); - if (!tree_node) { - if (child->IsToken()) { - p.mIsEmpty = false; - p.mIsTreeNode = false; - p.mData.mToken = child->GetToken(); - } - } else { - p.mIsEmpty = false; - p.mIsTreeNode = true; - p.mData.mTreeNode = tree_node; - } - } - gASTBuilder.AddParam(p); - } - - // For multiple actions of a rule, there should be only action which create tree. - // The others are just for adding attribute or else, and return the same tree - // with additional attributes. - sub_tree = gASTBuilder.Build(); - } - - if (sub_tree) - return sub_tree; - - // It's possible that the Rule has no action, meaning it cannot create tree node. - // Now we have to do some manipulation. Please check if you need all of them. - sub_tree = Manipulate(appeal_node); - - // It's possible that the sub tree is actually empty. For example, in a Parameter list - // ( params ). If 'params' is empty, it returns NULL. - - return sub_tree; -} - -// It's possible that we get NULL tree. -TreeNode* ASTTree::Manipulate(AppealNode *appeal_node) { - TreeNode *sub_tree = NULL; +// TreeNode +////////////////////////////////////////////////////////////////////////////////////// - std::vector child_trees; - std::vector::iterator cit = appeal_node->mSortedChildren.begin(); - for (; cit != appeal_node->mSortedChildren.end(); cit++) { - AppealNode *a_node = *cit; - TreeNode *t_node = a_node->GetAstTreeNode(); - if (t_node) - child_trees.push_back(t_node); - } +void TreeNode::AddAsTypes(TreeNode *type) { + if (!type) + return; - // If we have one and only one child's tree node, we take it. - if (child_trees.size() == 1) { - sub_tree = child_trees[0]; - if (sub_tree) - return sub_tree; - else - MERROR("We got a broken AST tree, not connected sub tree."); - } - - // For the tree having two children, there are a few approaches to further - // manipulate them in order to obtain better AST. - // - // 1. There are cases like (type)value, but they are not recoganized as cast. - // Insteand they are seperated into two nodes, one is (type), the other value. - // So we define ParenthesisNode for (type), and build a CastNode over here. - // - // 2. There are cases like a+b could be parsed as "a" and "+b", a symbol and a - // unary operation. However, we do prefer binary operation than unary. So a - // combination is needed here, especially when the parent node is NULL. - if (child_trees.size() == 2) { - TreeNode *child_a = child_trees[0]; - TreeNode *child_b = child_trees[1]; - - sub_tree = Manipulate2Cast(child_a, child_b); - if (sub_tree) - return sub_tree; - - sub_tree = Manipulate2Binary(child_a, child_b); - if (sub_tree) - return sub_tree; - } - - // In the end, if we still have no suitable solution to create the tree, - // we will put subtrees into a PassNode to pass to parent. - if (child_trees.size() > 0) { - PassNode *pass = (PassNode*)BuildPassNode(); - std::vector::iterator child_it = child_trees.begin(); - for (; child_it != child_trees.end(); child_it++) - pass->AddChild(*child_it); - return pass; - } - - // It's possible that we get a Null tree. - return sub_tree; -} - -TreeNode* ASTTree::Manipulate2Cast(TreeNode *child_a, TreeNode *child_b) { - if (child_a->IsParenthesis()) { - ParenthesisNode *type = (ParenthesisNode*)child_a; - CastNode *n = (CastNode*)mTreePool.NewTreeNode(sizeof(CastNode)); - new (n) CastNode(); - n->SetDestType(type->GetExpr()); - n->SetExpr(child_b); - return n; - } - return NULL; -} - -TreeNode* ASTTree::Manipulate2Binary(TreeNode *child_a, TreeNode *child_b) { - if (child_b->IsUnaOperator()) { - UnaOperatorNode *unary = (UnaOperatorNode*)child_b; - unsigned property = GetOperatorProperty(unary->GetOprId()); - if ((property & Binary) && (property & Unary)) { - std::cout << "Convert unary --> binary" << std::endl; - TreeNode *unary_sub = unary->GetOpnd(); - TreeNode *binary = BuildBinaryOperation(child_a, unary_sub, unary->GetOprId()); - return binary; - } + if (type->IsPass()) { + PassNode *pass_node = (PassNode*)type; + for (unsigned i = 0; i < pass_node->GetChildrenNum(); i++) + AddAsTypes(pass_node->GetChild(i)); + } else if (type->IsAsType()) { + AsTypeNode *asn = (AsTypeNode*)type; + AddAsType(asn); + SETPARENT(asn); + } else { + MERROR("unsupported as-type in AddAsType."); } - return NULL; -} - -void ASTTree::Dump(unsigned indent) { - DUMP0("== Sub Tree =="); - mRootNode->Dump(indent); - std::cout << std::endl; -} - -TreeNode* ASTTree::BuildBinaryOperation(TreeNode *childA, TreeNode *childB, OprId id) { - BinOperatorNode *n = (BinOperatorNode*)mTreePool.NewTreeNode(sizeof(BinOperatorNode)); - new (n) BinOperatorNode(id); - n->mOpndA = childA; - n->mOpndB = childB; - childA->SetParent(n); - childB->SetParent(n); - return n; } -TreeNode* ASTTree::BuildPassNode() { - PassNode *n = (PassNode*)mTreePool.NewTreeNode(sizeof(PassNode)); - new (n) PassNode(); - return n; -} - -////////////////////////////////////////////////////////////////////////////////////// -// TreeNode -////////////////////////////////////////////////////////////////////////////////////// - // return true iff: // both are type nodes, either UserTypeNode or PrimTypeNode, and // they are type equal. @@ -318,19 +116,231 @@ void TreeNode::DumpIndentation(unsigned ind) { void PackageNode::Dump(unsigned indent) { DumpIndentation(indent); DUMP0_NORETURN("package "); - DUMP0_NORETURN(mName); + mPackage->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// DeclareNode +////////////////////////////////////////////////////////////////////////////////////// + +void DeclareNode::AddDecl(TreeNode *t) { + if (!t) + return; + + if (t->IsPass()) { + PassNode *n = (PassNode*)t; + for (unsigned i = 0; i < n->GetChildrenNum(); i++) { + TreeNode *child = n->GetChild(i); + AddDecl(child); + } + } else { + mDecls.PushBack(t); + SETPARENT(t); + } +} + +void DeclareNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("declare "); + + for (unsigned i = 0; i < mDecls.GetNum(); i++) { + TreeNode *tree = mDecls.ValueAtIndex(i); + tree->Dump(0); + } } ////////////////////////////////////////////////////////////////////////////////////// // ImportNode ////////////////////////////////////////////////////////////////////////////////////// +void ImportNode::AddPair(TreeNode *t) { + if (t->IsPass()) { + PassNode *n = (PassNode*)t; + for (unsigned i = 0; i < n->GetChildrenNum(); i++) { + TreeNode *child = n->GetChild(i); + AddPair(child); + } + } else if (t->IsXXportAsPair()) { + mPairs.PushBack((XXportAsPairNode*)t); + SETPARENT(t); + } else { + // We create a new pair to save 't'. + XXportAsPairNode *n = (XXportAsPairNode*)gTreePool.NewTreeNode(sizeof(XXportAsPairNode)); + new (n) XXportAsPairNode(); + n->SetBefore(t); + mPairs.PushBack(n); + SETPARENT(n); + } +} + +void ImportNode::AddDefaultPair(TreeNode *t) { + if (t->IsPass()) { + PassNode *n = (PassNode*)t; + for (unsigned i = 0; i < n->GetChildrenNum(); i++) { + TreeNode *child = n->GetChild(i); + AddDefaultPair(child); + } + } else if (t->IsXXportAsPair()) { + XXportAsPairNode *p = (XXportAsPairNode*)t; + p->SetIsDefault(); + mPairs.PushBack((XXportAsPairNode*)p); + SETPARENT(p); + } else { + // We create a new pair to save 't'. + XXportAsPairNode *n = (XXportAsPairNode*)gTreePool.NewTreeNode(sizeof(XXportAsPairNode)); + new (n) XXportAsPairNode(); + n->SetBefore(t); + n->SetIsDefault(); + mPairs.PushBack(n); + SETPARENT(n); + } +} + +void ImportNode::AddSinglePair(TreeNode *before, TreeNode *after) { + // We create a new pair + XXportAsPairNode *n = (XXportAsPairNode*)gTreePool.NewTreeNode(sizeof(XXportAsPairNode)); + new (n) XXportAsPairNode(); + n->SetBefore(before); + n->SetAfter(after); + n->SetIsSingle(); + mPairs.PushBack(n); + SETPARENT(n); +} + void ImportNode::Dump(unsigned indent) { DumpIndentation(indent); DUMP0_NORETURN("import "); if (IsImportStatic()) DUMP0_NORETURN("static "); - DUMP0_NORETURN(mName); + + if (mPairs.GetNum() > 0) { + DUMP0_NORETURN('{'); + for (unsigned i = 0; i < mPairs.GetNum(); i++) { + XXportAsPairNode *p = GetPair(i); + p->Dump(0); + if (i < mPairs.GetNum() - 1) + DUMP0_NORETURN(','); + } + DUMP0_NORETURN("} "); + } + + if (mTarget) + mTarget->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// ExportNode +////////////////////////////////////////////////////////////////////////////////////// + +void ExportNode::AddPair(TreeNode *t) { + if (t->IsPass()) { + PassNode *n = (PassNode*)t; + for (unsigned i = 0; i < n->GetChildrenNum(); i++) { + TreeNode *child = n->GetChild(i); + AddPair(child); + } + } else if (t->IsXXportAsPair()) { + mPairs.PushBack((XXportAsPairNode*)t); + SETPARENT(t); + } else { + // We create a new pair to save 't'. + XXportAsPairNode *n = (XXportAsPairNode*)gTreePool.NewTreeNode(sizeof(XXportAsPairNode)); + new (n) XXportAsPairNode(); + n->SetBefore(t); + mPairs.PushBack(n); + SETPARENT(n); + } +} + +void ExportNode::AddDefaultPair(TreeNode *t) { + if (t->IsPass()) { + PassNode *n = (PassNode*)t; + for (unsigned i = 0; i < n->GetChildrenNum(); i++) { + TreeNode *child = n->GetChild(i); + AddPair(child); + } + } else if (t->IsXXportAsPair()) { + XXportAsPairNode *p = (XXportAsPairNode*)t; + p->SetIsDefault(); + mPairs.PushBack(p); + SETPARENT(t); + } else { + // We create a new pair to save 't'. + XXportAsPairNode *n = (XXportAsPairNode*)gTreePool.NewTreeNode(sizeof(XXportAsPairNode)); + new (n) XXportAsPairNode(); + n->SetBefore(t); + n->SetIsDefault(); + mPairs.PushBack(n); + SETPARENT(n); + } +} + +void ExportNode::AddSinglePair(TreeNode *before, TreeNode *after) { + // We create a new pair + XXportAsPairNode *n = (XXportAsPairNode*)gTreePool.NewTreeNode(sizeof(XXportAsPairNode)); + new (n) XXportAsPairNode(); + n->SetBefore(before); + n->SetAfter(after); + n->SetIsSingle(); + mPairs.PushBack(n); + SETPARENT(n); +} + +void ExportNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("export "); + if (mIsExportType) + DUMP0_NORETURN("type "); + + if (mPairs.GetNum() > 0) { + DUMP0_NORETURN('{'); + for (unsigned i = 0; i < mPairs.GetNum(); i++) { + XXportAsPairNode *p = GetPair(i); + p->Dump(0); + if (i < mPairs.GetNum() - 1) + DUMP0_NORETURN(','); + } + DUMP0_NORETURN("} "); + } + + if (mTarget) + mTarget->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// XXportAsPair +////////////////////////////////////////////////////////////////////////////////////// + +void XXportAsPairNode::Dump(unsigned indent) { + DumpIndentation(indent); + if (IsEverything()) { + DUMP0_NORETURN(" *"); + if (mBefore) { + DUMP0_NORETURN(" as "); + mBefore->Dump(0); + } + } else if (IsDefault()) { + DUMP0_NORETURN(" default"); + if (mBefore) { + DUMP0_NORETURN(" as "); + mBefore->Dump(0); + } + } else if (IsSingle()) { + DUMP0_NORETURN(" SINGLE "); + if (mBefore) + mBefore->Dump(0); + if (mAfter) { + DUMP0_NORETURN(" as "); + mAfter->Dump(0); + } + } else { + MASSERT(mBefore); + mBefore->Dump(0); + if (mAfter) { + DUMP0_NORETURN(" as "); + mAfter->Dump(0); + } + } } ////////////////////////////////////////////////////////////////////////////////////// @@ -358,15 +368,12 @@ void AnnotationTypeNode::Dump(unsigned indent) { // CastNode ////////////////////////////////////////////////////////////////////////////////////// -const char* CastNode::GetName() { - if (mName) - return mName; +const char* CastNode::GetDumpName() { std::string name = "("; name += mDestType->GetName(); name += ")"; name += mExpr->GetName(); - mName = gStringPool.FindString(name); - return mName; + return gStringPool.FindString(name); } void CastNode::Dump(unsigned indent) { @@ -390,6 +397,19 @@ void AssertNode::Dump(unsigned indent) { mMsg->Dump(0); } +////////////////////////////////////////////////////////////////////////////////////// +// TerOpeartorNode +////////////////////////////////////////////////////////////////////////////////////// + +void TerOperatorNode::Dump(unsigned indent) { + DumpIndentation(indent); + mOpndA->Dump(0); + DUMP0_NORETURN(" ? "); + mOpndB->Dump(0); + DUMP0_NORETURN(" : "); + mOpndC->Dump(0); +} + ////////////////////////////////////////////////////////////////////////////////////// // BinOperatorNode ////////////////////////////////////////////////////////////////////////////////////// @@ -429,11 +449,13 @@ void UnaOperatorNode::Dump(unsigned indent) { const char *name = GetOperatorName(mOprId); DumpIndentation(indent); if (IsPost()) { - mOpnd->Dump(indent + 2); + mOpnd->Dump(0); + DUMP0_NORETURN(' '); DUMP0(name); } else { - DUMP0(name); - mOpnd->Dump(indent + 2); + DUMP0_NORETURN(name); + DUMP0_NORETURN(' '); + mOpnd->Dump(0); } } @@ -441,17 +463,73 @@ void UnaOperatorNode::Dump(unsigned indent) { // FieldNode ////////////////////////////////////////////////////////////////////////////////////// -// Right now it's major work is to init the name -void FieldNode::Init() { - std::string name = mUpper->GetName(); - name += '.'; - name += mField->GetName(); - mName = gStringPool.FindString(name); +void FieldNode::Dump(unsigned indent) { + DumpIndentation(indent); + mUpper->Dump(0); + DUMP0_NORETURN('.'); + mField->Dump(0); } -void FieldNode::Dump(unsigned indent) { +////////////////////////////////////////////////////////////////////////////////////// +// TypeAlias Node +////////////////////////////////////////////////////////////////////////////////////// + +void TypeAliasNode::SetId(UserTypeNode *id) { + mId = id; + SETPARENT(mId); +} + +void TypeAliasNode::SetAlias(TreeNode *n) { + mAlias = n; + SETPARENT(mAlias); +} + +void TypeAliasNode::Dump(unsigned indent) { DumpIndentation(indent); - DUMP0_NORETURN(mName); + DUMP0_NORETURN(" type "); + mId->Dump(0); + DUMP0_NORETURN(" = "); + mAlias->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// ConditionalType Node +////////////////////////////////////////////////////////////////////////////////////// + +void ConditionalTypeNode::Dump(unsigned indent) { + DumpIndentation(indent); + mTypeA->Dump(0); + DUMP0_NORETURN(" extends "); + mTypeB->Dump(0); + DUMP0_NORETURN(" ? "); + mTypeC->Dump(0); + DUMP0_NORETURN(" : "); + mTypeD->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// AsType Node +////////////////////////////////////////////////////////////////////////////////////// + +void AsTypeNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN(" as "); + TreeNode *type = GetType(); + type->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// type parameter & type argument +////////////////////////////////////////////////////////////////////////////////////// + +void TypeParameterNode::Dump(unsigned indent) { + DumpIndentation(indent); + TreeNode *id = GetId(); + DUMP0_NORETURN(id->GetName()); + if (mDefault) { + DUMP0_NORETURN("="); + DUMP0_NORETURN(mDefault->GetName()); + } } ////////////////////////////////////////////////////////////////////////////////////// @@ -464,9 +542,9 @@ void NewNode::ReplaceChild(TreeNode *old_child, TreeNode *new_child) { SetId(new_child); return; } else { - for (unsigned i = 0; i < GetParamsNum(); i++) { - if (GetParam(i) == old_child) - mParams.SetElem(i, new_child); + for (unsigned i = 0; i < GetArgsNum(); i++) { + if (GetArg(i) == old_child) + mArgs.SetElem(i, new_child); } } } @@ -475,30 +553,64 @@ void NewNode::Dump(unsigned indent) { DumpIndentation(indent); DUMP0_NORETURN("new "); TreeNode *id = GetId(); - id->Dump(0); - //DUMP0_NORETURN(id->GetName()); + if (id->IsLambda()) { + LambdaNode *lmd = (LambdaNode*)id; + lmd->Dump(0); + } else { + DUMP0_NORETURN(id->GetName()); + DUMP0_NORETURN("("); + for (unsigned i = 0; i < GetArgsNum(); i++) { + TreeNode *arg = GetArg(i); + arg->Dump(0); + if (i < GetArgsNum() - 1) + DUMP0_NORETURN(","); + } + DUMP0_NORETURN(")"); + } +} + +////////////////////////////////////////////////////////////////////////////////////// +// DeleteNode +////////////////////////////////////////////////////////////////////////////////////// + +void DeleteNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN(" delete "); + if (mExpr) + mExpr->Dump(0); } ////////////////////////////////////////////////////////////////////////////////////// // CallNode ////////////////////////////////////////////////////////////////////////////////////// -void CallNode::Init() { - // Init the mName; - if (mMethod->IsIdentifier() || mMethod->IsField()) { - mName = mMethod->GetName(); +void CallNode::AddTypeArgument(TreeNode *arg) { + if (arg->IsPass()) { + PassNode *n = (PassNode*)arg; + for (unsigned i = 0; i < n->GetChildrenNum(); i++) { + TreeNode *child = n->GetChild(i); + AddTypeArgument(child); + } } else { - MASSERT(0 && "Unsupported method type in CallNode"); + mTypeArguments.PushBack(arg); + SETPARENT(arg); } } -void CallNode::AddArg(TreeNode *arg) { - mArgs.Merge(arg); -} - void CallNode::Dump(unsigned indent) { DumpIndentation(indent); - DUMP0_NORETURN(mName); + mMethod->Dump(0); + if (GetTypeArgumentsNum() > 0) { + DUMP0_NORETURN("<"); + for (unsigned i = 0; i < GetTypeArgumentsNum(); i++) { + TreeNode *arg = GetTypeArgumentAtIndex(i); + arg->Dump(0); + if (i < GetTypeArgumentsNum() - 1) + DUMP0_NORETURN(","); + } + DUMP0_NORETURN(">"); + } + DUMP0_NORETURN("("); mArgs.Dump(0); DUMP0_NORETURN(")"); @@ -515,8 +627,8 @@ void DimensionNode::Merge(const TreeNode *node) { if (node->IsDimension()) { DimensionNode *n = (DimensionNode *)node; - for (unsigned i = 0; i < n->GetDimsNum(); i++) - AddDim(n->GetNthDim(i)); + for (unsigned i = 0; i < n->GetDimensionsNum(); i++) + AddDimension(n->GetDimension(i)); } else if (node->IsPass()) { PassNode *n = (PassNode*)node; for (unsigned i = 0; i < n->GetChildrenNum(); i++) { @@ -532,9 +644,22 @@ void DimensionNode::Merge(const TreeNode *node) { // IdentifierNode ////////////////////////////////////////////////////////////////////////////////////// +void IdentifierNode::Release() { + if (mDims) + mDims->Release(); + mAttrs.Release(); + mAnnotations.Release(); +} + void IdentifierNode::Dump(unsigned indent) { DumpIndentation(indent); - DUMP0_NORETURN(mName); + if (mIsRest) + DUMP0_NORETURN("..."); + DUMP0_NORETURN(GetName()); + if (mIsOptional) + DUMP0_NORETURN('?'); + if (IsNonNull()) + DUMP0_NORETURN('!'); if (mInit) { DUMP0_NORETURN('='); mInit->Dump(0); @@ -546,12 +671,283 @@ void IdentifierNode::Dump(unsigned indent) { } } +////////////////////////////////////////////////////////////////////////////////////// +// DeclNode +////////////////////////////////////////////////////////////////////////////////////// + +void DeclNode::Dump(unsigned indent) { + DumpIndentation(indent); + switch (mProp) { + case JS_Var: + DUMP0_NORETURN("js_var "); + break; + case JS_Let: + DUMP0_NORETURN("js_let "); + break; + case JS_Const: + DUMP0_NORETURN("js_const "); + break; + default: + break; + } + DUMP0_NORETURN("Decl: "); + mVar->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// ArrayElement and ArrayLiteral +////////////////////////////////////////////////////////////////////////////////////// + +void ArrayElementNode::Dump(unsigned indent) { + DumpIndentation(indent); + mArray->Dump(0); + for (unsigned i = 0; i < mExprs.GetNum(); i++) { + DUMP0_NORETURN("["); + mExprs.ValueAtIndex(i)->Dump(0); + DUMP0_NORETURN("]"); + } +} + +void ArrayLiteralNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("["); + for (unsigned i = 0; i < GetLiteralsNum(); i++) { + GetLiteral(i)->Dump(0); + if (i < GetLiteralsNum() - 1) + DUMP0_NORETURN(","); + } + DUMP0_NORETURN("]"); +} + +////////////////////////////////////////////////////////////////////////////////////// +// BindingElement and BindingPattern +////////////////////////////////////////////////////////////////////////////////////// + +void BindingElementNode::Dump(unsigned indent) { + DumpIndentation(indent); + if (mVariable) + mVariable->Dump(0); + DUMP0_NORETURN(":"); + if (mElement) + mElement->Dump(0); +} + +void BindingPatternNode::AddElement(TreeNode *tree) { + if (tree->IsBindingElement() || tree->IsBindingPattern()) { + mElements.PushBack(tree); + SETPARENT(tree); + } else if (tree->IsPass()) { + PassNode *pass = (PassNode*)tree; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { + TreeNode *child = pass->GetChild(i); + AddElement(child); + } + } else { + MERROR("Unsupported element of binding pattern."); + } +} + +void BindingPatternNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("{"); + for (unsigned i = 0; i < mElements.GetNum(); i++) { + TreeNode *elem = GetElement(i); + elem->Dump(0); + if (i != mElements.GetNum()-1) + DUMP0_NORETURN(", "); + } + DUMP0_NORETURN("}"); +} + +////////////////////////////////////////////////////////////////////////////////////// +// StructNode +////////////////////////////////////////////////////////////////////////////////////// + +void StructNode::AddTypeParam(TreeNode *param) { + if (param->IsPass()) { + PassNode *n = (PassNode*)param; + for (unsigned i = 0; i < n->GetChildrenNum(); i++) { + TreeNode *child = n->GetChild(i); + AddTypeParam(child); + } + } else { + MASSERT(param->IsTypeParameter()); + mTypeParams.PushBack((TypeParameterNode*)param); + SETPARENT(param); + } +} + +void StructNode::AddSuper(TreeNode *the_super) { + if (!the_super) + return; + + if (the_super->IsPass()) { + PassNode *pass_node = (PassNode*)the_super; + for (unsigned i = 0; i < pass_node->GetChildrenNum(); i++) + AddSuper(pass_node->GetChild(i)); + } else { + mSupers.PushBack(the_super); + } +} + +// Child could be a field or index signature. +void StructNode::AddChild(TreeNode *field) { + if (field->IsPass()) { + PassNode *pass = (PassNode*)field; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { + TreeNode *child = pass->GetChild(i); + AddChild(child); + } + } else if (field->IsIdentifier() || + field->IsComputedName() || + field->IsLiteral()) { + AddField(field); + SETPARENT(field); + } else if (field->IsFunction()) { + AddMethod((FunctionNode*)field); + SETPARENT(field); + } else if (field->IsNumIndexSig()) { + SetNumIndexSig((NumIndexSigNode*)field); + SETPARENT(field); + } else if (field->IsStrIndexSig()) { + SetStrIndexSig((StrIndexSigNode*)field); + SETPARENT(field); + } else + MERROR("Unsupported struct field type."); +} + +void NumIndexSigNode::Dump(unsigned indent) { + DumpIndentation(indent); + if (mDataType) + mDataType->Dump(0); +} + +void StrIndexSigNode::Dump(unsigned indent) { + DumpIndentation(indent); + if (mDataType) + mDataType->Dump(0); +} + +void StructNode::Dump(unsigned indent) { + DumpIndentation(indent); + switch (mProp) { + case SProp_CStruct: + DUMP0_NORETURN("struct: "); + break; + case SProp_TSInterface: + DUMP0_NORETURN("ts_interface: "); + break; + case SProp_TSEnum: + DUMP0_NORETURN("ts_enum: "); + break; + default: + break; + } + + if (mStructId) + mStructId->Dump(0); + + if (mTypeParams.GetNum() > 0) { + DUMP0_NORETURN("<"); + for (unsigned i = 0; i < mTypeParams.GetNum(); i++) { + TypeParameterNode *node = mTypeParams.ValueAtIndex(i); + node->Dump(0); + DUMP0_NORETURN(","); + } + DUMP0_NORETURN(">"); + } + + DUMP0_NORETURN(" {"); + + if (mNumIndexSig) { + DUMP0_NORETURN("numeric index type: "); + mNumIndexSig->Dump(0); + } + + if (mStrIndexSig) { + DUMP0_NORETURN("string index type: "); + mStrIndexSig->Dump(0); + } + + for (unsigned i = 0; i < mFields.GetNum(); i++) { + mFields.ValueAtIndex(i)->Dump(0); + if (i != mFields.GetNum()-1) + DUMP0_NORETURN(";"); + } + + for (unsigned i = 0; i < mMethods.GetNum(); i++) { + mMethods.ValueAtIndex(i)->Dump(0); + if (i != mMethods.GetNum()-1) + DUMP0_NORETURN(";"); + } + + DUMP0_NORETURN(" }"); +} + +////////////////////////////////////////////////////////////////////////////////////// +// StructLiteralNode +////////////////////////////////////////////////////////////////////////////////////// + +void StructLiteralNode::AddField(TreeNode *tree) { + if (tree->IsFieldLiteral()) { + FieldLiteralNode *fl = (FieldLiteralNode*)tree; + mFields.PushBack(fl); + SETPARENT(fl); + } else if (tree->IsFunction()) { + FunctionNode *node = (FunctionNode*)tree; + FieldLiteralNode *func_lit = (FieldLiteralNode*)gTreePool.NewTreeNode(sizeof(FieldLiteralNode)); + new (func_lit) FieldLiteralNode(); + TreeNode *func_name = node->GetFuncName(); + if (func_name) { + MASSERT(func_name->IsIdentifier() || func_name->IsComputedName()); + func_lit->SetFieldName(func_name); + } + func_lit->SetLiteral(node); + mFields.PushBack(func_lit); + SETPARENT(func_lit); + } else if (tree->IsLiteral() || + tree->IsIdentifier() || + tree->IsField() || + tree->IsCall() || + tree->IsArrayElement()) { + FieldLiteralNode *fln = (FieldLiteralNode*)gTreePool.NewTreeNode(sizeof(FieldLiteralNode)); + new (fln) FieldLiteralNode(); + fln->SetLiteral(tree); + mFields.PushBack(fln); + SETPARENT(fln); + } else if (tree->IsPass()) { + PassNode *pass = (PassNode*)tree; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { + TreeNode *child = pass->GetChild(i); + AddField(child); + } + } else { + MASSERT(0 && "unsupported."); + } +} + +void StructLiteralNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN(" {"); + for (unsigned i = 0; i < mFields.GetNum(); i++) { + FieldLiteralNode *fl = GetField(i); + if (fl->mFieldName) + fl->mFieldName->Dump(0); + DUMP0_NORETURN(":"); + fl->mLiteral->Dump(0); + if (i != mFields.GetNum()-1) + DUMP0_NORETURN(", "); + } + DUMP0_NORETURN("}"); +} + ////////////////////////////////////////////////////////////////////////////////////// // VarListNode ////////////////////////////////////////////////////////////////////////////////////// void VarListNode::AddVar(IdentifierNode *n) { mVars.PushBack(n); + SETPARENT(n); } // Merge a node. @@ -576,7 +972,6 @@ void VarListNode::Merge(TreeNode *n) { void VarListNode::Dump(unsigned indent) { DumpIndentation(indent); - DUMP0_NORETURN("var:"); for (unsigned i = 0; i < mVars.GetNum(); i++) { //DUMP0_NORETURN(mVars.ValueAtIndex(i)->GetName()); mVars.ValueAtIndex(i)->Dump(0); @@ -585,6 +980,36 @@ void VarListNode::Dump(unsigned indent) { } } +////////////////////////////////////////////////////////////////////////////////////// +// NamespaceNode +////////////////////////////////////////////////////////////////////////////////////// + +void NamespaceNode::AddBody(TreeNode *tree) { + if (tree->IsPass()) { + PassNode *p = (PassNode*)tree; + for (unsigned i = 0; i < p->GetChildrenNum(); i++) { + TreeNode *child = p->GetChild(i); + AddBody(child); + } + } else { + AddElement(tree); + SETPARENT(tree); + } +} + +void NamespaceNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("namespace "); + if (mId) + mId->Dump(0); + DUMP_RETURN(); + for (unsigned i = 0; i < mElements.GetNum(); i++) { + DumpIndentation(indent + 2); + mElements.ValueAtIndex(i)->Dump(0); + DUMP_RETURN(); + } +} + ////////////////////////////////////////////////////////////////////////////////////// // ExprListNode ////////////////////////////////////////////////////////////////////////////////////// @@ -594,8 +1019,8 @@ void VarListNode::Dump(unsigned indent) { void ExprListNode::Merge(TreeNode *n) { if (n->IsExprList()) { ExprListNode *expr_list = (ExprListNode*)n; - for (unsigned i = 0; i < expr_list->GetNum(); i++) - AddExpr(expr_list->ExprAtIndex(i)); + for (unsigned i = 0; i < expr_list->GetExprsNum(); i++) + AddExpr(expr_list->GetExprAtIndex(i)); } else if (n->IsPass()) { PassNode *p = (PassNode*)n; for (unsigned i = 0; i < p->GetChildrenNum(); i++) { @@ -606,11 +1031,29 @@ void ExprListNode::Merge(TreeNode *n) { AddExpr(n); } -void ExprListNode::Dump(unsigned indent) { +void ExprListNode::Dump(unsigned indent) { + DumpIndentation(indent); + for (unsigned i = 0; i < mExprs.GetNum(); i++) { + mExprs.ValueAtIndex(i)->Dump(0); + if (i != mExprs.GetNum()-1) + DUMP0_NORETURN(","); + } +} + +////////////////////////////////////////////////////////////////////////////////////// +// TemplateLiteralNode +////////////////////////////////////////////////////////////////////////////////////// + +void TemplateLiteralNode::Dump(unsigned indent) { DumpIndentation(indent); - for (unsigned i = 0; i < mExprs.GetNum(); i++) { - mExprs.ValueAtIndex(i)->Dump(0); - if (i != mExprs.GetNum()-1) + DUMP0_NORETURN(" template-literal: "); + for (unsigned i = 0; i < mTrees.GetNum(); i++) { + TreeNode *n = mTrees.ValueAtIndex(i); + if (n) + n->Dump(0); + else + DUMP0_NORETURN("NULL"); + if (i < mStrings.GetNum() - 1) DUMP0_NORETURN(","); } } @@ -624,11 +1067,11 @@ void LiteralNode::InitName() { switch (mData.mType) { case LT_NullLiteral: s = "null"; - mName = gStringPool.FindString(s); + mStrIdx = gStringPool.GetStrIdx(s); break; case LT_ThisLiteral: s = "this"; - mName = gStringPool.FindString(s); + mStrIdx = gStringPool.GetStrIdx(s); break; case LT_IntegerLiteral: case LT_DoubleLiteral: @@ -639,7 +1082,7 @@ void LiteralNode::InitName() { case LT_NA: default: s = ""; - mName = gStringPool.FindString(s); + mStrIdx = gStringPool.GetStrIdx(s); break; } } @@ -657,7 +1100,9 @@ void LiteralNode::Dump(unsigned indent) { DUMP0_NORETURN(mData.mData.mFloat); break; case LT_StringLiteral: - DUMP0_NORETURN(mData.mData.mStr); + DUMP0_NORETURN("\""); + DUMP0_NORETURN(gStringPool.GetStringFromStrIdx(mData.mData.mStrIdx)); + DUMP0_NORETURN("\""); break; case LT_BooleanLiteral: if(mData.mData.mBool == true) @@ -679,11 +1124,115 @@ void LiteralNode::Dump(unsigned indent) { case LT_ThisLiteral: DUMP0_NORETURN("this"); break; + case LT_SuperLiteral: + DUMP0_NORETURN("super"); + break; + case LT_VoidLiteral: + DUMP0_NORETURN("void"); + break; case LT_NA: default: DUMP0_NORETURN("NA Token:"); break; } + + if (mInit) { + DUMP0_NORETURN(" = "); + mInit->Dump(0); + } +} + +////////////////////////////////////////////////////////////////////////////////////// +// RegExprNode +////////////////////////////////////////////////////////////////////////////////////// + +void RegExprNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP1_NORETURN("reg expr : ", mData.mExpr); + if (mData.mFlags) + DUMP1_NORETURN(" ", mData.mFlags); +} + +////////////////////////////////////////////////////////////////////////////////////// +// ThrowNode +////////////////////////////////////////////////////////////////////////////////////// + +void ThrowNode::AddException(TreeNode *t) { + if (t->IsPass()) { + PassNode *pass_node = (PassNode*)t; + for (unsigned i = 0; i < pass_node->GetChildrenNum(); i++) + AddException(pass_node->GetChild(i)); + } else { + mExceptions.PushBack(t); + SETPARENT(t); + } +} + +void ThrowNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("throw "); + for (unsigned i = 0; i < GetExceptionsNum(); i++) { + TreeNode *t = GetExceptionAtIndex(i); + t->Dump(0); + if (i < GetExceptionsNum() - 1) + DUMP0_NORETURN(", "); + } + DUMP_RETURN(); +} + +////////////////////////////////////////////////////////////////////////////////////// +// Try, Catch, Finally nodes +////////////////////////////////////////////////////////////////////////////////////// + +void TryNode::AddCatch(TreeNode *t) { + if (t->IsPass()) { + PassNode *pass_node = (PassNode*)t; + for (unsigned i = 0; i < pass_node->GetChildrenNum(); i++) + AddCatch(pass_node->GetChild(i)); + } else { + MASSERT(t->IsCatch()); + mCatches.PushBack((CatchNode*)t); + SETPARENT(t); + } +} + +void TryNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("try "); + if (mBlock) + mBlock->Dump(indent + 2); + for (unsigned i = 0; i < GetCatchesNum(); i++) { + CatchNode *c = GetCatchAtIndex(i); + c->Dump(indent); + } + if (mFinally) + mFinally->Dump(indent); +} + +void CatchNode::AddParam(TreeNode *t) { + if (t->IsPass()) { + PassNode *pass_node = (PassNode*)t; + for (unsigned i = 0; i < pass_node->GetChildrenNum(); i++) + AddParam(pass_node->GetChild(i)); + } else { + mParams.PushBack(t); + SETPARENT(t); + } +} + +void CatchNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("catch("); + DUMP0_NORETURN(")"); + if (mBlock) + mBlock->Dump(indent + 2); +} + +void FinallyNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("finally "); + if (mBlock) + mBlock->Dump(indent + 2); } ////////////////////////////////////////////////////////////////////////////////////// @@ -706,13 +1255,20 @@ void ReturnNode::Dump(unsigned ind) { GetResult()->Dump(0); } -CondBranchNode::CondBranchNode() { - mKind = NK_CondBranch; - mCond = NULL; - mTrueBranch = NULL; - mFalseBranch = NULL; +void YieldNode::Dump(unsigned ind) { + DumpLabel(ind); + DumpIndentation(ind); + if (mIsTransfer) + DUMP0_NORETURN("yield* "); + else + DUMP0_NORETURN("yield "); + if (GetResult()) + GetResult()->Dump(0); } +CondBranchNode::CondBranchNode() : TreeNode(NK_CondBranch), + mCond(NULL), mTrueBranch(NULL), mFalseBranch(NULL) {} + void CondBranchNode::Dump(unsigned ind) { DumpLabel(ind); DumpIndentation(ind); @@ -732,11 +1288,61 @@ void CondBranchNode::Dump(unsigned ind) { void BreakNode::Dump(unsigned ind) { DumpLabel(ind); DumpIndentation(ind); - DUMP0_NORETURN("break "); - GetTarget()->Dump(0); + DUMP0_NORETURN("break:"); + if (GetTarget()) + GetTarget()->Dump(0); + DUMP_RETURN(); +} + +void ContinueNode::Dump(unsigned ind) { + DumpLabel(ind); + DumpIndentation(ind); + DUMP0_NORETURN("continue:"); + if (GetTarget()) + GetTarget()->Dump(0); DUMP_RETURN(); } +// 't' could be a decl with multiple var which are contained in +// a pass node. +void ForLoopNode::AddInit(TreeNode *t) { + if (t->IsDecl()) { + DeclNode *decl = (DeclNode*)t; + TreeNode *var = decl->GetVar(); + if (var && var->IsPass()) { + PassNode *pass = (PassNode*)var; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { + DeclNode *n = (DeclNode*)gTreePool.NewTreeNode(sizeof(DeclNode)); + new (n) DeclNode(); + n->SetVar(pass->GetChild(i)); + n->SetProp(decl->GetProp()); + AddInit(n); + } + } else { + mInits.PushBack(t); + SETPARENT(t); + } + } else if (t->IsPass()) { + PassNode *pass = (PassNode*)t; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) + AddInit(pass->GetChild(i)); + } else { + mInits.PushBack(t); + SETPARENT(t); + } +} + +void ForLoopNode::AddUpdate(TreeNode *t) { + if (t->IsPass()) { + PassNode *pass = (PassNode*)t; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) + AddUpdate(pass->GetChild(i)); + } else { + mUpdates.PushBack(t); + SETPARENT(t); + } +} + void ForLoopNode::Dump(unsigned ind) { DumpLabel(ind); DumpIndentation(ind); @@ -771,55 +1377,100 @@ void SwitchLabelNode::Dump(unsigned ind) { } void SwitchCaseNode::AddLabel(TreeNode *t) { - std::list working_list; - working_list.push_back(t); - while (!working_list.empty()) { - TreeNode *t = working_list.front(); - working_list.pop_front(); - if (t->IsPass()) { - PassNode *labels = (PassNode*)t; - for (unsigned i = 0; i < labels->GetChildrenNum(); i++) - working_list.push_back(labels->GetChild(i)); - } else { - MASSERT(t->IsSwitchLabel()); - mLabels.PushBack((SwitchLabelNode*)t); - } + if (t->IsPass()) { + PassNode *pass = (PassNode*)t; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) + AddLabel(pass->GetChild(i)); + } else { + MASSERT(t->IsSwitchLabel()); + mLabels.PushBack((SwitchLabelNode*)t); + SETPARENT(t); } } void SwitchCaseNode::AddStmt(TreeNode *t) { - std::list working_list; - working_list.push_back(t); - while (!working_list.empty()) { - TreeNode *t = working_list.front(); - working_list.pop_front(); - if (t->IsPass()) { - PassNode *stmts = (PassNode*)t; - for (unsigned i = 0; i < stmts->GetChildrenNum(); i++) - working_list.push_back(stmts->GetChild(i)); - } else { - mStmts.PushBack(t); - } + if (t->IsPass()) { + PassNode *pass = (PassNode*)t; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) + AddStmt(pass->GetChild(i)); + } else { + mStmts.PushBack(t); + SETPARENT(t); } } void SwitchCaseNode::Dump(unsigned ind) { } -void SwitchNode::AddCase(TreeNode *tree) { - std::list working_list; - working_list.push_back(tree); - while (!working_list.empty()) { - TreeNode *t = working_list.front(); - working_list.pop_front(); - if (t->IsPass()) { - PassNode *cases = (PassNode*)t; - for (unsigned i = 0; i < cases->GetChildrenNum(); i++) - working_list.push_back(cases->GetChild(i)); - } else { - MASSERT(t->IsSwitchCase()); - mCases.PushBack((SwitchCaseNode*)t); +SwitchCaseNode* SwitchNode::SwitchLabelToCase(SwitchLabelNode *label) { + SwitchCaseNode *case_node = + (SwitchCaseNode*)gTreePool.NewTreeNode(sizeof(SwitchCaseNode)); + new (case_node) SwitchCaseNode(); + case_node->AddLabel(label); + return case_node; +} + +void SwitchNode::AddSwitchCase(TreeNode *t) { + if (t->IsPass()) { + PassNode *cases = (PassNode*)t; + for (unsigned i = 0; i < cases->GetChildrenNum(); i++) + AddSwitchCase(cases->GetChild(i)); + } else if (t->IsSwitchCase()) { + // Need go through the statements in this case. Some stmt like + // default : xxx + // are parsed as a labeled stmt inside this case, which they + // are actually a case of switch. + SwitchCaseNode *the_case = (SwitchCaseNode*)t; + SwitchCaseNode *new_case = NULL; + unsigned stmt_num = the_case->GetStmtsNum(); + for (unsigned i = 0; i < stmt_num; i++) { + TreeNode *stmt = the_case->GetStmtAtIndex(i); + TreeNode *label = stmt->GetLabel(); + if (label) { + MASSERT(label->IsIdentifier()); + IdentifierNode *id = (IdentifierNode*)label; + const char *name = id->GetName(); + + // If it's a default case, all remaining statements belong + // to this default case. + if (!strncmp(name, "default", 7) && (strlen(name) == 7)) { + // 1. clear the label of stmt. + stmt->SetLabel(NULL); + // 2. build switch label + SwitchLabelNode *default_label = + (SwitchLabelNode*)gTreePool.NewTreeNode(sizeof(SwitchLabelNode)); + new (default_label) SwitchLabelNode(); + default_label->SetIsDefault(true); + // 3. build the switch case + new_case = (SwitchCaseNode*)gTreePool.NewTreeNode(sizeof(SwitchCaseNode)); + new (new_case) SwitchCaseNode(); + // 4. set the label and stmt for this case. + new_case->AddLabel(default_label); + new_case->AddStmt(stmt); + // 5. add all remaining stmts to new_case + for (unsigned j = i+1; j < stmt_num; j++) { + TreeNode *rem_stmt = the_case->GetStmtAtIndex(j); + new_case->AddStmt(rem_stmt); + } + // 6. remove the stmts added to new_case from the_case + for (unsigned j = i; j < stmt_num; j++) + the_case->PopStmt(); + break; + } + } + } + + AddCase(the_case); + SETPARENT(the_case); + if (new_case) { + AddCase(new_case); + SETPARENT(new_case); } + + } else if (t->IsSwitchLabel()) { + SwitchCaseNode *casenode = SwitchLabelToCase((SwitchLabelNode*)t); + AddCase(casenode); + SETPARENT(casenode); } } @@ -832,6 +1483,47 @@ void SwitchNode::Dump(unsigned ind) { // BlockNode ////////////////////////////////////////////////////////////////////////////////////// +void BlockNode::AddChild(TreeNode *tree) { + if (tree->IsDecl()) { + DeclNode *decl = (DeclNode*)tree; + TreeNode *var = decl->GetVar(); + if (var && var->IsPass()) { + PassNode *pass = (PassNode*)var; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { + DeclNode *n = (DeclNode*)gTreePool.NewTreeNode(sizeof(DeclNode)); + new (n) DeclNode(); + n->SetVar(pass->GetChild(i)); + n->SetProp(decl->GetProp()); + AddChild(n); + } + } else { + mChildren.PushBack(tree); + SETPARENT(tree); + } + } else if (tree->IsPass()) { + PassNode *passnode = (PassNode*)tree; + for (unsigned j = 0; j < passnode->GetChildrenNum(); j++) { + TreeNode *child = passnode->GetChild(j); + AddChild(child); + } + } else { + mChildren.PushBack(tree); + SETPARENT(tree); + } +} + +void BlockNode::InsertStmtAfter(TreeNode *new_stmt, TreeNode *exist_stmt) { + mChildren.LocateValue(exist_stmt); + mChildren.InsertAfter(new_stmt); + SETPARENT(new_stmt); +} + +void BlockNode::InsertStmtBefore(TreeNode *new_stmt, TreeNode *exist_stmt) { + mChildren.LocateValue(exist_stmt); + mChildren.InsertBefore(new_stmt); + SETPARENT(new_stmt); +} + void BlockNode::Dump(unsigned ind) { DumpLabel(ind); for (unsigned i = 0; i < GetChildrenNum(); i++) { @@ -859,35 +1551,113 @@ void PassNode::Dump(unsigned ind) { // ClassNode ////////////////////////////////////////////////////////////////////////////////////// +void ClassNode::AddTypeParam(TreeNode *param) { + if (param->IsPass()) { + PassNode *n = (PassNode*)param; + for (unsigned i = 0; i < n->GetChildrenNum(); i++) { + TreeNode *child = n->GetChild(i); + AddTypeParam(child); + } + } else { + MASSERT(param->IsTypeParameter()); + mTypeParams.PushBack((TypeParameterNode*)param); + SETPARENT(param); + } +} + +void ClassNode::AddSuperClass(TreeNode *the_super) { + if (!the_super) + return; + + if (the_super->IsPass()) { + PassNode *pass_node = (PassNode*)the_super; + for (unsigned i = 0; i < pass_node->GetChildrenNum(); i++) + AddSuperClass(pass_node->GetChild(i)); + } else { + mSuperClasses.PushBack(the_super); + SETPARENT(the_super); + } +} + +void ClassNode::AddSuperInterface(TreeNode *the_super) { + if (!the_super) + return; + + if (the_super->IsPass()) { + PassNode *pass_node = (PassNode*)the_super; + for (unsigned i = 0; i < pass_node->GetChildrenNum(); i++) + AddSuperInterface(pass_node->GetChild(i)); + } else { + mSuperInterfaces.PushBack(the_super); + SETPARENT(the_super); + } +} + // When the class body, a BlockNode, is added to the ClassNode, we need further // categorize the subtrees into members, methods, local classes, interfaces, etc. -void ClassNode::Construct() { - for (unsigned i = 0; i < mBody->GetChildrenNum(); i++) { - TreeNode *tree_node = mBody->GetChildAtIndex(i); - tree_node->SetParent(this); - if (tree_node->IsVarList()) { - VarListNode *vlnode = (VarListNode*)tree_node; - for (unsigned i = 0; i < vlnode->GetNum(); i++) { - IdentifierNode *inode = vlnode->VarAtIndex(i); - inode->SetParent(this); - mFields.PushBack(inode); - } - } else if (tree_node->IsIdentifier()) - mFields.PushBack((IdentifierNode*)tree_node); - else if (tree_node->IsFunction()) { +void ClassNode::Construct(BlockNode *block) { + for (unsigned i = 0; i < block->GetChildrenNum(); i++) { + TreeNode *tree_node = block->GetChildAtIndex(i); + SETPARENT(tree_node); + if (tree_node->IsDecl()) { + DeclNode *decl = (DeclNode*)tree_node; + TreeNode *var = decl->GetVar(); + if (var->IsVarList()) { + VarListNode *vlnode = (VarListNode*)var; + for (unsigned i = 0; i < vlnode->GetVarsNum(); i++) { + IdentifierNode *inode = vlnode->GetVarAtIndex(i); + SETPARENT(inode); + mFields.PushBack(inode); + } + } else if (var->IsIdentifier() || var->IsComputedName() || var->IsLiteral()) { + // string literal is allowed to be a field. + mFields.PushBack(var); + SETPARENT(var); + } else + MERROR("Unsupported class field."); + } else if (tree_node->IsNumIndexSig() || tree_node->IsStrIndexSig()) { + mFields.PushBack(tree_node); + SETPARENT(tree_node); + } else if (tree_node->IsFunction()) { FunctionNode *f = (FunctionNode*)tree_node; + // There is an ugly case from Typescript, which use keyword 'constructor' + // as an identifier. This causes a constructor to be recoganized as normal function. + // We do adjustment here. + TreeNode *name = f->GetFuncName(); + if (name && name->IsIdentifier()) { + IdentifierNode *id = (IdentifierNode*)name; + const char *name_str = id->GetName(); + if (!strncmp(name_str, "constructor", 11) && (strlen(name_str) == 11)) { + f->SetFuncName(NULL); + f->SetStrIdx(0); + f->SetIsConstructor(); + } + } if (f->IsConstructor()) mConstructors.PushBack(f); else mMethods.PushBack(f); - } else if (tree_node->IsClass()) + SETPARENT(f); + } else if (tree_node->IsClass()) { mLocalClasses.PushBack((ClassNode*)tree_node); - else if (tree_node->IsInterface()) + SETPARENT(tree_node); + } else if (tree_node->IsInterface()) { mLocalInterfaces.PushBack((InterfaceNode*)tree_node); - else if (tree_node->IsBlock()) { + SETPARENT(tree_node); + } else if (tree_node->IsBlock()) { BlockNode *block = (BlockNode*)tree_node; MASSERT(block->IsInstInit() && "unnamed block in class is not inst init?"); mInstInits.PushBack(block); + SETPARENT(tree_node); + } else if (tree_node->IsImport()) { + mImports.PushBack((ImportNode*)tree_node); + SETPARENT(tree_node); + } else if (tree_node->IsExport()) { + mExports.PushBack((ExportNode*)tree_node); + SETPARENT(tree_node); + } else if (tree_node->IsDeclare()) { + mDeclares.PushBack((DeclareNode*)tree_node); + SETPARENT(tree_node); } else MASSERT("Unsupported tree node in class body."); } @@ -899,20 +1669,35 @@ void ClassNode::Release() { mSuperClasses.Release(); mSuperInterfaces.Release(); mAttributes.Release(); + mAnnotations.Release(); + mTypeParams.Release(); mFields.Release(); mMethods.Release(); mLocalClasses.Release(); mLocalInterfaces.Release(); + mImports.Release(); + mExports.Release(); + mDeclares.Release(); } void ClassNode::Dump(unsigned indent) { DumpIndentation(indent); if (IsJavaEnum()) - DUMP1_NORETURN("class[JavaEnum] ", mName); + DUMP1_NORETURN("class[JavaEnum] ", GetName()); else - DUMP1_NORETURN("class ", mName); + DUMP1_NORETURN("class ", GetName()); DUMP_RETURN(); + if (mTypeParams.GetNum() > 0) { + DUMP0_NORETURN("<"); + for (unsigned i = 0; i < mTypeParams.GetNum(); i++) { + TypeParameterNode *node = mTypeParams.ValueAtIndex(i); + node->Dump(0); + DUMP0_NORETURN(","); + } + DUMP0_NORETURN(">"); + } + DumpIndentation(indent + 2); DUMP0("Fields: "); for (unsigned i = 0; i < mFields.GetNum(); i++) { @@ -956,28 +1741,65 @@ void ClassNode::Dump(unsigned indent) { TreeNode *node = mLocalInterfaces.ValueAtIndex(i); node->Dump(indent + 4); } + + if (mImports.GetNum() > 0) { + DumpIndentation(indent + 2); + DUMP0("Imports: "); + } + for (unsigned i = 0; i < mImports.GetNum(); i++) { + TreeNode *node = mImports.ValueAtIndex(i); + node->Dump(indent + 4); + } + + if (mExports.GetNum() > 0) { + DumpIndentation(indent + 2); + DUMP0("Exports: "); + } + for (unsigned i = 0; i < mExports.GetNum(); i++) { + TreeNode *node = mExports.ValueAtIndex(i); + node->Dump(indent + 4); + } + + if (mDeclares.GetNum() > 0) { + DumpIndentation(indent + 2); + DUMP0("Declares: "); + } + for (unsigned i = 0; i < mDeclares.GetNum(); i++) { + TreeNode *node = mDeclares.ValueAtIndex(i); + node->Dump(indent + 4); + } } ////////////////////////////////////////////////////////////////////////////////////// // FunctionNode ////////////////////////////////////////////////////////////////////////////////////// -FunctionNode::FunctionNode() { - mKind = NK_Function; - mName = NULL; - mType = NULL; - mBody = NULL; - mDims = NULL; - mIsConstructor = false; +FunctionNode::FunctionNode() : TreeNode(NK_Function), + mFuncName(NULL), mRetType(NULL), mBody(NULL), mDims(NULL), + mIsConstructor(false), mIsGenerator(false), mIsIterator(false), mIsGetAccessor(false), + mIsSetAccessor(false), mIsCallSignature(false), mIsConstructSignature(false), + mAssert(NULL) {} + +void FunctionNode::AddTypeParam(TreeNode *param) { + if (param->IsPass()) { + PassNode *n = (PassNode*)param; + for (unsigned i = 0; i < n->GetChildrenNum(); i++) { + TreeNode *child = n->GetChild(i); + AddTypeParam(child); + } + } else { + mTypeParams.PushBack(param); + SETPARENT(param); + } } // This is to tell if both FunctionNodes have same return type // and parameter types. So languages require Type Erasure at first, like Java. // Type erasure should be done earlier in language specific process. bool FunctionNode::OverrideEquivalent(FunctionNode *fun) { - if (!mType->TypeEquivalent(fun->GetType())) + if (!mRetType->TypeEquivalent(fun->GetRetType())) return false; - if (GetName() != fun->GetName()) + if (GetStrIdx() != fun->GetStrIdx()) return false; if (GetParamsNum() != fun->GetParamsNum()) return false; @@ -1021,6 +1843,8 @@ void FunctionNode::CleanUp() { } else { // If pass node is the header, insert before next. // If pass node is the last or any one else, insert after prev. + next = NULL; + prev = NULL; if (i == 0) { next = mBody->GetChildAtIndex(1); } else { @@ -1061,12 +1885,46 @@ void FunctionNode::CleanUp() { void FunctionNode::Dump(unsigned indent) { DumpIndentation(indent); if (mIsConstructor) - DUMP1_NORETURN("constructor ", mName); - else - DUMP1_NORETURN("func ", mName); + DUMP0_NORETURN("constructor "); + else if (mIsGetAccessor) + DUMP0_NORETURN("get "); + else if (mIsSetAccessor) + DUMP0_NORETURN("set "); + else if (mIsGenerator) + DUMP0_NORETURN("generator "); + else + DUMP0_NORETURN("func "); + + if (mStrIdx) + DUMP0_NORETURN(GetName()); + if (mFuncName && mFuncName->IsOptional()) + DUMP0_NORETURN("?"); + + if (GetTypeParamsNum() > 0) { + DUMP0_NORETURN("<"); + for (unsigned i = 0; i < GetTypeParamsNum(); i++) { + TreeNode *arg = GetTypeParamAtIndex(i); + arg->Dump(0); + if (i < GetTypeParamsNum() - 1) + DUMP0_NORETURN(","); + } + DUMP0_NORETURN(">"); + } // dump parameters - DUMP0_NORETURN("()"); + DUMP0_NORETURN("("); + for (unsigned i = 0; i < GetParamsNum(); i++) { + TreeNode *param = GetParam(i); + param->Dump(0); + if (i < GetParamsNum() - 1) + DUMP0_NORETURN(","); + } + DUMP0_NORETURN(")"); + + if (mAssert) { + DUMP0_NORETURN(" : "); + mAssert->Dump(0); + } // dump throws DUMP0_NORETURN(" throws: "); @@ -1085,13 +1943,30 @@ void FunctionNode::Dump(unsigned indent) { // LambdaNode ////////////////////////////////////////////////////////////////////////////////////// +void LambdaNode::AddTypeParam(TreeNode *param) { + if (param->IsPass()) { + PassNode *n = (PassNode*)param; + for (unsigned i = 0; i < n->GetChildrenNum(); i++) { + TreeNode *child = n->GetChild(i); + AddTypeParam(child); + } + } else { + MASSERT(param->IsTypeParameter()); + mTypeParams.PushBack((TypeParameterNode*)param); + SETPARENT(param); + } +} + void LambdaNode::Dump(unsigned indent) { DumpIndentation(indent); std::string dump; dump += "("; for (unsigned i = 0; i < mParams.GetNum(); i++) { - IdentifierNode *in = mParams.ValueAtIndex(i); - dump += in->GetName(); + TreeNode *in = mParams.ValueAtIndex(i); + if(in->IsDecl()) + dump += static_cast(in)->GetVar()->GetName(); + else + dump += in->GetName(); if (i < mParams.GetNum() - 1) dump += ","; } @@ -1101,6 +1976,137 @@ void LambdaNode::Dump(unsigned indent) { mBody->Dump(0); } +////////////////////////////////////////////////////////////////////////////////////// +// InstanceOfNode +////////////////////////////////////////////////////////////////////////////////////// + +void InstanceOfNode::Dump(unsigned indent) { + DumpIndentation(indent); + mLeft->Dump(0); + DUMP0_NORETURN(" instanceof "); + mRight->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// InNode +////////////////////////////////////////////////////////////////////////////////////// + +void InNode::Dump(unsigned indent) { + DumpIndentation(indent); + mLeft->Dump(0); + DUMP0_NORETURN(" in "); + mRight->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// MappedPropertyNode +////////////////////////////////////////////////////////////////////////////////////// + +void ComputedNameNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("["); + mExpr->Dump(0); + DUMP0_NORETURN("] : "); + if (mExtendType) + mExtendType->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// IsNode +////////////////////////////////////////////////////////////////////////////////////// + +void IsNode::Dump(unsigned indent) { + DumpIndentation(indent); + mLeft->Dump(0); + DUMP0_NORETURN(" is "); + mRight->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// AwaitNode +////////////////////////////////////////////////////////////////////////////////////// + +void AwaitNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN(" await "); + if (mExpr) + mExpr->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// TypeOfNode +////////////////////////////////////////////////////////////////////////////////////// + +void TypeOfNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN(" typeof "); + mExpr->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// KeyOfNode +////////////////////////////////////////////////////////////////////////////////////// + +void KeyOfNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN(" keyof "); + mExpr->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// InferNode +////////////////////////////////////////////////////////////////////////////////////// + +void InferNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN(" infer "); + mExpr->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// NameTypePairNode +////////////////////////////////////////////////////////////////////////////////////// + +void NameTypePairNode::Dump(unsigned indent) { + DumpIndentation(indent); + if (mVar) + mVar->Dump(0); + DUMP0_NORETURN(" : "); + if (mType) + mType->Dump(0); +} + +////////////////////////////////////////////////////////////////////////////////////// +// TupleTypeNode +////////////////////////////////////////////////////////////////////////////////////// + +// Child should be NameTypePairNode +void TupleTypeNode::AddChild(TreeNode *field) { + if (field->IsPass()) { + PassNode *pass = (PassNode*)field; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { + TreeNode *child = pass->GetChild(i); + AddChild(child); + } + } else { + MASSERT(field->IsNameTypePair()); + NameTypePairNode *node = (NameTypePairNode*)field; + AddField(node); + SETPARENT(node); + } +} + +void TupleTypeNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN(" [ "); + for (unsigned i = 0; i < mFields.GetNum(); i++) { + NameTypePairNode *node = mFields.ValueAtIndex(i); + node->Dump(0); + DUMP0_NORETURN(" , "); + } + DUMP0_NORETURN(" ] "); +} + ////////////////////////////////////////////////////////////////////////////////////// // InterfaceNode ////////////////////////////////////////////////////////////////////////////////////// @@ -1108,12 +2114,12 @@ void LambdaNode::Dump(unsigned indent) { void InterfaceNode::Construct(BlockNode *block) { for (unsigned i = 0; i < block->GetChildrenNum(); i++) { TreeNode *tree_node = block->GetChildAtIndex(i); - tree_node->SetParent(this); + SETPARENT(tree_node); if (tree_node->IsVarList()) { VarListNode *vlnode = (VarListNode*)tree_node; - for (unsigned i = 0; i < vlnode->GetNum(); i++) { - IdentifierNode *inode = vlnode->VarAtIndex(i); - inode->SetParent(this); + for (unsigned i = 0; i < vlnode->GetVarsNum(); i++) { + IdentifierNode *inode = vlnode->GetVarAtIndex(i); + SETPARENT(inode); mFields.PushBack(inode); } } else if (tree_node->IsIdentifier()) @@ -1128,7 +2134,7 @@ void InterfaceNode::Construct(BlockNode *block) { void InterfaceNode::Dump(unsigned indent) { DumpIndentation(indent); - DUMP1_NORETURN("interface ", mName); + DUMP1_NORETURN("interface ", GetName()); DUMP_RETURN(); DumpIndentation(indent + 2); @@ -1146,4 +2152,31 @@ void InterfaceNode::Dump(unsigned indent) { node->Dump(indent + 4); } } + +void TripleSlashNode::Dump(unsigned indent) { + DumpIndentation(indent); + DUMP0_NORETURN("trip-slash reference "); + + switch(mProp) { + case TSP_Path: + DUMP0_NORETURN("path = "); + break; + case TSP_Types: + DUMP0_NORETURN("types = "); + break; + case TSP_NoDefaultLib: + DUMP0_NORETURN("no-default-lib = "); + break; + case TSP_Lib: + DUMP0_NORETURN("lib = "); + break; + case TSP_NA: + default: + DUMP0_NORETURN("NA = "); + break; + } + + mValue->Dump(0); +} + } diff --git a/src/MapleFE/shared/src/ast_attr.cpp b/src/MapleFE/shared/src/ast_attr.cpp index 7b4279eb920d603bfa0809ff3bf6c904f6f7b1cc..19b2124b717efe6c259591dec60a9dd85fec112c 100644 --- a/src/MapleFE/shared/src/ast_attr.cpp +++ b/src/MapleFE/shared/src/ast_attr.cpp @@ -18,14 +18,13 @@ #include "ast.h" #include "ruletable.h" #include "ast_attr.h" -#include "gen_attr.h" // for language specific attr keyword #include "massert.h" namespace maplefe { // Inquiry function for language specific attr keyword -static const char* FindAttrKeyword(AttrId id) { - for (unsigned i = 0; i < ATTR_NA; i++) { +const char* FindAttrKeyword(AttrId id) { + for (unsigned i = 0; i < AttrKeywordTableSize; i++) { if (AttrKeywordTable[i].mId == id) return AttrKeywordTable[i].mText; } @@ -33,8 +32,8 @@ static const char* FindAttrKeyword(AttrId id) { } // Inquiry function for language specific attr keyword -static AttrId FindAttrId(const char *keyword) { - for (unsigned i = 0; i < ATTR_NA; i++) { +AttrId FindAttrId(const char *keyword) { + for (unsigned i = 0; i < AttrKeywordTableSize; i++) { if (strncmp(AttrKeywordTable[i].mText, keyword, strlen(keyword)) == 0 && strlen(keyword) == strlen(AttrKeywordTable[i].mText)) return AttrKeywordTable[i].mId; diff --git a/src/MapleFE/shared/src/ast_builder.cpp b/src/MapleFE/shared/src/ast_builder.cpp index 011cafb03f24d84d32a0967d9a5faddc0ad0bc02..8f345d1fff75a00261b6690b48ebe80000387b89 100644 --- a/src/MapleFE/shared/src/ast_builder.cpp +++ b/src/MapleFE/shared/src/ast_builder.cpp @@ -17,6 +17,7 @@ #include "token.h" #include "ruletable.h" +#include "stringpool.h" #include "ast_builder.h" #include "ast_scope.h" #include "ast_attr.h" @@ -26,8 +27,6 @@ namespace maplefe { -ASTBuilder gASTBuilder; - //////////////////////////////////////////////////////////////////////////////////////// // For the time being, we simply use a big switch-case. Later on we could use a more // flexible solution. @@ -51,17 +50,48 @@ TreeNode* ASTBuilder::Build() { TreeNode* ASTBuilder::CreateTokenTreeNode(const Token *token) { unsigned size = 0; if (token->IsIdentifier()) { - IdentifierNode *n = (IdentifierNode*)mTreePool->NewTreeNode(sizeof(IdentifierNode)); - new (n) IdentifierNode(token->GetName()); + IdentifierNode *n = (IdentifierNode*)gTreePool.NewTreeNode(sizeof(IdentifierNode)); + unsigned idx = gStringPool.GetStrIdx(token->GetName()); + new (n) IdentifierNode(idx); mLastTreeNode = n; return n; } else if (token->IsLiteral()) { LitData data = token->GetLitData(); - LiteralNode *n = (LiteralNode*)mTreePool->NewTreeNode(sizeof(LiteralNode)); + LiteralNode *n = (LiteralNode*)gTreePool.NewTreeNode(sizeof(LiteralNode)); new (n) LiteralNode(data); mLastTreeNode = n; return n; + } else if (token->IsTempLit()) { + TemplateLiteralNode *n = (TemplateLiteralNode*)gTreePool.NewTreeNode(sizeof(TemplateLiteralNode)); + new (n) TemplateLiteralNode(); + + // copy mStrings&mPlaceHolders to n + TempLitData *tld = token->GetTempLitData(); + for (unsigned i = 0; i < tld->mStrings.GetNum(); i++) { + const char *s = tld->mStrings.ValueAtIndex(i); + n->AddString(s); + } + + // release memeory of SmallVector of mStrings. + tld->mStrings.Release(); + delete tld; + + gTemplateLiteralNodes.PushBack(n); + mLastTreeNode = n; + return n; + + } else if (token->IsRegExpr()) { + RegExprNode *n = (RegExprNode*)gTreePool.NewTreeNode(sizeof(RegExprNode)); + new (n) RegExprNode(); + + RegExprData d = token->GetRegExpr(); + n->SetData(d); + + mLastTreeNode = n; + return n; + } else if (token->IsKeyword()) { + mNameForBuildIdentifier = NULL; const char *keyword = token->GetName(); // If it's an attribute AttrNode *n = gAttrPool.GetAttrNode(keyword); @@ -73,18 +103,29 @@ TreeNode* ASTBuilder::CreateTokenTreeNode(const Token *token) { PrimTypeNode *type = gPrimTypePool.FindType(keyword); if (type) { mLastTreeNode = type; + mNameForBuildIdentifier = keyword; return type; } - // We define special literal tree node for 'this'. + // We define special literal tree node for 'this', 'super'. if ((strlen(token->GetName()) == 4) && !strncmp(token->GetName(), "this", 4)) { LitData data; data.mType = LT_ThisLiteral; - LiteralNode *n = (LiteralNode*)mTreePool->NewTreeNode(sizeof(LiteralNode)); + LiteralNode *n = (LiteralNode*)gTreePool.NewTreeNode(sizeof(LiteralNode)); + new (n) LiteralNode(data); + mLastTreeNode = n; + return n; + } else if ((strlen(token->GetName()) == 5) && !strncmp(token->GetName(), "super", 5)) { + LitData data; + data.mType = LT_SuperLiteral; + LiteralNode *n = (LiteralNode*)gTreePool.NewTreeNode(sizeof(LiteralNode)); new (n) LiteralNode(data); mLastTreeNode = n; return n; } + // Otherwise, it doesn't create any tree node. + // But we pass the keyword name to future possible BuildIdentifier. + mNameForBuildIdentifier = keyword; } // Other tokens shouldn't be involved in the tree creation. @@ -99,24 +140,30 @@ static void add_attribute_to(TreeNode *tree, TreeNode *attr) { MASSERT(attr->IsAttr()); AttrNode *attr_node = (AttrNode*)attr; AttrId aid = attr_node->GetId(); - if (tree->IsVarList()) { + + if (tree->IsPass()) { + PassNode *pass_node = (PassNode*)tree; + for (unsigned i = 0; i < pass_node->GetChildrenNum(); i++) { + TreeNode *child = pass_node->GetChild(i); + add_attribute_to(child, attr); + } + } else if (tree->IsVarList()) { VarListNode *vl = (VarListNode*)tree; - for (unsigned i = 0; i < vl->GetNum(); i++) { - IdentifierNode *inode = vl->VarAtIndex(i); + for (unsigned i = 0; i < vl->GetVarsNum(); i++) { + IdentifierNode *inode = vl->GetVarAtIndex(i); inode->AddAttr(aid); } - return; - } else if (tree->IsBlock()){ - BlockNode *b = (BlockNode*)tree; - if (b->IsInstInit()) { - b->AddAttr(aid); - return; + } else if (tree->IsExprList()) { + ExprListNode *vl = (ExprListNode*)tree; + for (unsigned i = 0; i < vl->GetExprsNum(); i++) { + TreeNode *child = vl->GetExprAtIndex(i); + add_attribute_to(child, attr); } } else { - ClassNode *klass = (ClassNode*)tree; - klass->AddAttr(aid); - return; + // The basse TreeNode has a virtual AddAttr(). + tree->AddAttr(aid); } + return; } // It's the caller to assure tree is valid, meaning something could carry type. @@ -124,39 +171,243 @@ static void add_type_to(TreeNode *tree, TreeNode *type) { if (tree->IsIdentifier()) { IdentifierNode *in = (IdentifierNode*)tree; in->SetType(type); + } else if (tree->IsLiteral()) { + LiteralNode *lit = (LiteralNode*)tree; + lit->SetType(type); + } else if (tree->IsLambda()) { + LambdaNode *lam = (LambdaNode*)tree; + lam->SetRetType(type); } else if (tree->IsVarList()) { VarListNode *vl = (VarListNode*)tree; - for (unsigned i = 0; i < vl->GetNum(); i++) - vl->VarAtIndex(i)->SetType(type); + for (unsigned i = 0; i < vl->GetVarsNum(); i++) + vl->GetVarAtIndex(i)->SetType(type); } else if (tree->IsFunction()) { FunctionNode *func = (FunctionNode*)tree; - func->SetType(type); + func->SetRetType(type); + } else if (tree->IsBindingPattern()) { + BindingPatternNode *bp = (BindingPatternNode*)tree; + bp->SetType(type); + } else if (tree->IsComputedName()) { + ComputedNameNode *mp = (ComputedNameNode*)tree; + mp->SetExtendType(type); } else { MERROR("Unsupported tree node in add_type_to()"); } } //////////////////////////////////////////////////////////////////////////////////////// -// Major Functions to build the tree +// BuildModule +//////////////////////////////////////////////////////////////////////////////////////// + +// Take one argument, the module name +TreeNode* ASTBuilder::BuildModule() { + ModuleNode *n = (ModuleNode*)gTreePool.NewTreeNode(sizeof(ModuleNode)); + new (n) ModuleNode(); + + MASSERT(mParams.size() == 1); + Param p_a = mParams[0]; + if (!p_a.mIsEmpty && p_a.mIsTreeNode) { + TreeNode *tree = p_a.mData.mTreeNode; + if (tree->IsIdentifier()) { + const char *name = tree->GetName(); + n->SetFilename(name); + } else if (tree->IsLiteral()) { + LiteralNode *lit = (LiteralNode*)tree; + LitData data = lit->GetData(); + MASSERT(data.mType == LT_StringLiteral); + const char *name = gStringPool.GetStringFromStrIdx(data.mData.mStrIdx); + n->SetFilename(name); + } else { + MERROR("Unsupported module name."); + } + } + + mLastTreeNode = n; + return mLastTreeNode; +} + +// It takes no argument. +TreeNode* ASTBuilder::SetIsAmbient() { + MASSERT(mLastTreeNode->IsModule()); + ModuleNode *mod = (ModuleNode*)mLastTreeNode; + mod->SetIsAmbient(); + return mLastTreeNode; +} + +// Takes one parameter which is the tree of module body. +TreeNode* ASTBuilder::AddModuleBody() { + if (mTrace) + std::cout << "In AddModuleBody" << std::endl; + + Param p_body = mParams[0]; + if (!p_body.mIsEmpty) { + if (!p_body.mIsTreeNode) + MERROR("The module body is not a tree node."); + TreeNode *tn = p_body.mData.mTreeNode; + + MASSERT(mLastTreeNode->IsModule()); + ModuleNode *mod = (ModuleNode*)mLastTreeNode; + mod->AddTree(tn); + } + + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////////////// +// BuildIdentifier +//////////////////////////////////////////////////////////////////////////////////////// + +// 1. It takes one argument, the target to build into identifier. +// 2. It takes no argument, then mLastTreeNode is the target. +// It could be a token or tree. +TreeNode* ASTBuilder::BuildIdentifier() { + if (mParams.size() == 1) { + Param target = mParams[0]; + if (!target.mIsEmpty) { + if (target.mIsTreeNode) { + TreeNode *tn = target.mData.mTreeNode; + return BuildIdentifier(tn); + } else { + Token *tn = target.mData.mToken; + return BuildIdentifier(tn); + } + } + return NULL; + } + + if (mNameForBuildIdentifier) { + IdentifierNode *n = (IdentifierNode*)gTreePool.NewTreeNode(sizeof(IdentifierNode)); + unsigned idx = gStringPool.GetStrIdx(mNameForBuildIdentifier); + new (n) IdentifierNode(idx); + mLastTreeNode = n; + mNameForBuildIdentifier = NULL; + return n; + } else if (mLastTreeNode->IsIdentifier()) { + return mLastTreeNode; + } else if (mLastTreeNode->IsAttr()) { + AttrNode *an = (AttrNode*)mLastTreeNode; + AttrId aid = an->GetId(); + + IdentifierNode *n = (IdentifierNode*)gTreePool.NewTreeNode(sizeof(IdentifierNode)); + unsigned idx = gStringPool.GetStrIdx(FindAttrKeyword(aid)); + new (n) IdentifierNode(idx); + mLastTreeNode = n; + return n; + } else if (mLastTreeNode->IsPrimType()) { + PrimTypeNode *prim_type = (PrimTypeNode*)mLastTreeNode; + IdentifierNode *n = (IdentifierNode*)gTreePool.NewTreeNode(sizeof(IdentifierNode)); + unsigned idx = gStringPool.GetStrIdx(prim_type->GetTypeName()); + new (n) IdentifierNode(idx); + mLastTreeNode = n; + return n; + } else if (mLastTreeNode->IsLiteral()) { + LiteralNode *lit = (LiteralNode*)mLastTreeNode; + LitData data = lit->GetData(); + if (data.mType == LT_ThisLiteral) { + IdentifierNode *n = (IdentifierNode*)gTreePool.NewTreeNode(sizeof(IdentifierNode)); + unsigned idx = gStringPool.GetStrIdx("this"); + new (n) IdentifierNode(idx); + mLastTreeNode = n; + return n; + } else { + MERROR("Unsupported node type in BuildIdentifier()"); + } + } else { + MERROR("Unsupported node type in BuildIdentifier()"); + } +} + +// Build IdentifierNode from a token. +TreeNode* ASTBuilder::BuildIdentifier(const Token *token) { + const char *name = token->GetName(); + MASSERT(name); + IdentifierNode *n = (IdentifierNode*)gTreePool.NewTreeNode(sizeof(IdentifierNode)); + unsigned idx = gStringPool.GetStrIdx(name); + new (n) IdentifierNode(idx); + mLastTreeNode = n; + return n; +} + +// Build IdentifierNode from a TreeNode. +TreeNode* ASTBuilder::BuildIdentifier(const TreeNode *tree) { + if (!tree) + return NULL; + + if (tree->IsAttr()) { + AttrNode *an = (AttrNode*)tree; + AttrId aid = an->GetId(); + IdentifierNode *n = (IdentifierNode*)gTreePool.NewTreeNode(sizeof(IdentifierNode)); + unsigned idx = gStringPool.GetStrIdx(FindAttrKeyword(aid)); + new (n) IdentifierNode(idx); + mLastTreeNode = n; + return n; + } else if (tree->IsLiteral()) { + LiteralNode *lit = (LiteralNode*)tree; + LitData data = lit->GetData(); + if (data.mType == LT_ThisLiteral) { + IdentifierNode *n = (IdentifierNode*)gTreePool.NewTreeNode(sizeof(IdentifierNode)); + unsigned idx = gStringPool.GetStrIdx("this"); + new (n) IdentifierNode(idx); + mLastTreeNode = n; + return n; + } + } + + return NULL; +} + +//////////////////////////////////////////////////////////////////////////////////////// +// NameTypePair +//////////////////////////////////////////////////////////////////////////////////////// + +// It could takes (1) two arguments, name and type +// (2) one argument, the name +TreeNode* ASTBuilder::BuildNameTypePair() { + if (mTrace) + std::cout << "In BuildNameTypePair" << std::endl; + + NameTypePairNode *n = (NameTypePairNode*)gTreePool.NewTreeNode(sizeof(NameTypePairNode)); + new (n) NameTypePairNode(); + mLastTreeNode = n; + + if (mParams.size() == 2) { + Param p_a = mParams[0]; + Param p_b = mParams[1]; + + if (!p_a.mIsEmpty && p_a.mIsTreeNode) + n->SetVar(p_a.mData.mTreeNode); + + if (!p_b.mIsEmpty && p_b.mIsTreeNode) + n->SetType(p_b.mData.mTreeNode); + } else { + Param p_b = mParams[0]; + if (!p_b.mIsEmpty && p_b.mIsTreeNode) + n->SetType(p_b.mData.mTreeNode); + } + + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////////////// +// Interfaces for Java style package and import //////////////////////////////////////////////////////////////////////////////////////// TreeNode* ASTBuilder::BuildPackageName() { - MASSERT(!gModule.mPackage); + MASSERT(!mASTModule->mPackage); MASSERT(mLastTreeNode->IsField() || mLastTreeNode->IsIdentifier()); - PackageNode *n = (PackageNode*)mTreePool->NewTreeNode(sizeof(PackageNode)); + PackageNode *n = (PackageNode*)gTreePool.NewTreeNode(sizeof(PackageNode)); new (n) PackageNode(); - const char *name = mLastTreeNode->GetName(); - n->SetName(name); + n->SetPackage(mLastTreeNode); - gModule.SetPackage(n); + mASTModule->SetPackage(n); mLastTreeNode = n; return mLastTreeNode; } TreeNode* ASTBuilder::BuildSingleTypeImport() { - ImportNode *n = (ImportNode*)mTreePool->NewTreeNode(sizeof(ImportNode)); + ImportNode *n = (ImportNode*)gTreePool.NewTreeNode(sizeof(ImportNode)); new (n) ImportNode(); n->SetImportSingle(); n->SetImportType(); @@ -167,13 +418,13 @@ TreeNode* ASTBuilder::BuildSingleTypeImport() { TreeNode *tree = p.mData.mTreeNode; MASSERT(tree->IsIdentifier() || tree->IsField()); - n->SetName(tree->GetName()); + n->SetTarget(tree); mLastTreeNode = n; return mLastTreeNode; } TreeNode* ASTBuilder::BuildAllTypeImport() { - ImportNode *n = (ImportNode*)mTreePool->NewTreeNode(sizeof(ImportNode)); + ImportNode *n = (ImportNode*)gTreePool.NewTreeNode(sizeof(ImportNode)); new (n) ImportNode(); n->SetImportAll(); n->SetImportType(); @@ -184,20 +435,20 @@ TreeNode* ASTBuilder::BuildAllTypeImport() { TreeNode *tree = p.mData.mTreeNode; MASSERT(tree->IsIdentifier() || tree->IsField()); - n->SetName(tree->GetName()); + n->SetTarget(tree); mLastTreeNode = n; return mLastTreeNode; } // It takes the mLastTreeNode as parameter TreeNode* ASTBuilder::BuildSingleStaticImport() { - ImportNode *n = (ImportNode*)mTreePool->NewTreeNode(sizeof(ImportNode)); + ImportNode *n = (ImportNode*)gTreePool.NewTreeNode(sizeof(ImportNode)); new (n) ImportNode(); n->SetImportSingle(); n->SetImportType(); MASSERT(mLastTreeNode->IsIdentifier() || mLastTreeNode->IsField()); - n->SetName(mLastTreeNode->GetName()); + n->SetTarget(mLastTreeNode); n->SetImportStatic(); mLastTreeNode = n; return mLastTreeNode; @@ -211,8 +462,307 @@ TreeNode* ASTBuilder::BuildAllStaticImport() { } TreeNode* ASTBuilder::BuildAllImport() { + ImportNode *n = (ImportNode*)gTreePool.NewTreeNode(sizeof(ImportNode)); + new (n) ImportNode(); + n->SetImportAll(); + mLastTreeNode = n; + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////////////// +// Interfaces for Javascript style export and import +//////////////////////////////////////////////////////////////////////////////////////// + +// Takes no argument. +TreeNode* ASTBuilder::BuildImport() { + ImportNode *n = (ImportNode*)gTreePool.NewTreeNode(sizeof(ImportNode)); + new (n) ImportNode(); + + mLastTreeNode = n; + return mLastTreeNode; +} + +// Takes no argument. +TreeNode* ASTBuilder::BuildExport() { + ExportNode *n = (ExportNode*)gTreePool.NewTreeNode(sizeof(ExportNode)); + new (n) ExportNode(); + + mLastTreeNode = n; + return mLastTreeNode; +} + +// It set mLastTreeNode to a type import/export. +TreeNode* ASTBuilder::SetIsXXportType() { + if (mLastTreeNode->IsImport()) { + ImportNode *inode = (ImportNode*)mLastTreeNode; + inode->SetImportType(); + } else if (mLastTreeNode->IsExport()) { + ExportNode *enode = (ExportNode*)mLastTreeNode; + enode->SetIsExportType(); + } else { + MERROR("Unsupported action."); + } + + return mLastTreeNode; +} + +// It takes one argument, the pairs. +// The pairs could be complicated in Javascript. We will let ImportNode or +// ExportNode to handle by themselves. +TreeNode* ASTBuilder::SetPairs() { + TreeNode *pairs = NULL; + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) { + pairs = p.mData.mTreeNode; + if (mLastTreeNode->IsImport()) { + ImportNode *inode = (ImportNode*)mLastTreeNode; + inode->AddPair(pairs); + } else if (mLastTreeNode->IsExport()) { + ExportNode *enode = (ExportNode*)mLastTreeNode; + enode->AddPair(pairs); + } + } + + return mLastTreeNode; +} + +TreeNode* ASTBuilder::SetDefaultPairs() { + TreeNode *pairs = NULL; + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) { + pairs = p.mData.mTreeNode; + if (mLastTreeNode->IsImport()) { + ImportNode *inode = (ImportNode*)mLastTreeNode; + inode->AddDefaultPair(pairs); + } else if (mLastTreeNode->IsExport()) { + ExportNode *enode = (ExportNode*)mLastTreeNode; + enode->AddDefaultPair(pairs); + } + } + + return mLastTreeNode; +} + +// It takes +// 1. One argument: the mBefore object. Usually happens in +// export = x +// 2. Two arguments: the mBefore and mAfter. In TS, it has +// import x = require(y) +// [NOTE] In .spec file, put the arguments in order of +// SetSinglePairs(before, after) + +TreeNode* ASTBuilder::SetSinglePairs() { + TreeNode *before = NULL; + TreeNode *after = NULL; + + if (mParams.size() == 2) { + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) + before = p.mData.mTreeNode; + p = mParams[1]; + if (!p.mIsEmpty && p.mIsTreeNode) + after = p.mData.mTreeNode; + } else { + MASSERT(mParams.size() == 1); + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) + before = p.mData.mTreeNode; + } + + if (mLastTreeNode->IsImport()) { + ImportNode *inode = (ImportNode*)mLastTreeNode; + inode->AddSinglePair(before, after); + } else if (mLastTreeNode->IsExport()) { + ExportNode *enode = (ExportNode*)mLastTreeNode; + enode->AddSinglePair(before, after); + } + + return mLastTreeNode; +} + +// Takes one argument, the 'from' module +TreeNode* ASTBuilder::SetFromModule() { + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) { + TreeNode *t = p.mData.mTreeNode; + if (mLastTreeNode->IsImport()) { + ImportNode *inode = (ImportNode*)mLastTreeNode; + inode->SetTarget(t); + } else if (mLastTreeNode->IsExport()) { + ExportNode *enode = (ExportNode*)mLastTreeNode; + enode->SetTarget(t); + } + } + return mLastTreeNode; +} + +// Take no argument, or one argument. +// (1) If no argument, it applies to all pairs of import/export. +// This happens right after BuildImport or BuildExport, and there is no existing +// pairs. In this case, we create a new pair and sets it to *. +// (2) If one argument, it's the new name of '*' (aka the Everything), +// and is saved in mAfter of the pair. +// +// In either case, we need create a new and the only pair for XXport node. +TreeNode* ASTBuilder::SetIsEverything() { + XXportAsPairNode *n = (XXportAsPairNode*)gTreePool.NewTreeNode(sizeof(XXportAsPairNode)); + new (n) XXportAsPairNode(); + n->SetIsEverything(); + + if (mParams.size() == 1) { + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) { + TreeNode *expr = p.mData.mTreeNode; + n->SetBefore(expr); + } + } + + if (mLastTreeNode->IsImport()) { + ImportNode *inode = (ImportNode*)mLastTreeNode; + MASSERT(!inode->GetPairsNum()); + inode->AddPair(n); + } else if (mLastTreeNode->IsExport()) { + ExportNode *enode = (ExportNode*)mLastTreeNode; + MASSERT(!enode->GetPairsNum()); + enode->AddPair(n); + } + + return mLastTreeNode; +} + +// Right now it takes no argument. mLastTreeNode is the implicit argument +TreeNode* ASTBuilder::SetAsNamespace() { + MASSERT(mLastTreeNode->IsXXportAsPair()); + XXportAsPairNode *pair = (XXportAsPairNode*)mLastTreeNode; + pair->SetAsNamespace(); + return mLastTreeNode; +} + +// 1. It takes two arguments, before and after. +// 2. It takes one argument, before. +TreeNode* ASTBuilder::BuildXXportAsPair() { + + TreeNode *before = NULL; + TreeNode *after = NULL; + + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) { + before = p.mData.mTreeNode; + } + + if (mParams.size() == 2) { + p = mParams[1]; + if (!p.mIsEmpty) { + if (p.mIsTreeNode) { + after = p.mData.mTreeNode; + } else { + after = BuildIdentifier(p.mData.mToken); + } + } + } + + XXportAsPairNode *n = (XXportAsPairNode*)gTreePool.NewTreeNode(sizeof(XXportAsPairNode)); + new (n) XXportAsPairNode(); + + if (before) + n->SetBefore(before); + if (after) + n->SetAfter(after); + + mLastTreeNode = n; + return mLastTreeNode; +} + +// It takes one arguments, the 'x' in the '* as x'. +TreeNode* ASTBuilder::BuildXXportAsPairEverything() { + MASSERT(mParams.size() == 1); + + TreeNode *tree = NULL; + + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) { + tree = p.mData.mTreeNode; + } + + XXportAsPairNode *n = (XXportAsPairNode*)gTreePool.NewTreeNode(sizeof(XXportAsPairNode)); + new (n) XXportAsPairNode(); + n->SetIsEverything(); + + if (tree) + n->SetBefore(tree); + + mLastTreeNode = n; + return mLastTreeNode; +} + +// It takes one arguments, the name after 'as'. +TreeNode* ASTBuilder::BuildXXportAsPairDefault() { + MASSERT(mParams.size() == 1); + + TreeNode *tree = NULL; + + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) { + tree = p.mData.mTreeNode; + } + + XXportAsPairNode *n = (XXportAsPairNode*)gTreePool.NewTreeNode(sizeof(XXportAsPairNode)); + new (n) XXportAsPairNode(); + n->SetIsDefault(); + + if (tree) + n->SetBefore(tree); + + mLastTreeNode = n; + return mLastTreeNode; +} + +///////////////////////////////////////////////////////////////////////////////////////// +// BuildExternalDeclaration and BuildGlobalExternalDeclaration +///////////////////////////////////////////////////////////////////////////////////////// + +// It takes one arguments +TreeNode* ASTBuilder::BuildExternalDeclaration() { + MASSERT(mParams.size() == 1); + + TreeNode *tree = NULL; + + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) { + tree = p.mData.mTreeNode; + } + + DeclareNode *n = (DeclareNode*)gTreePool.NewTreeNode(sizeof(DeclareNode)); + new (n) DeclareNode(); + n->AddDecl(tree); + + mLastTreeNode = n; + return mLastTreeNode; +} + +// It takes one arguments +TreeNode* ASTBuilder::BuildGlobalExternalDeclaration() { + MASSERT(mParams.size() == 1); + + TreeNode *tree = NULL; + + Param p = mParams[0]; + if (!p.mIsEmpty && p.mIsTreeNode) { + tree = p.mData.mTreeNode; + } + + DeclareNode *n = (DeclareNode*)gTreePool.NewTreeNode(sizeof(DeclareNode)); + new (n) DeclareNode(); + n->AddDecl(tree); + n->SetIsGlobal(); + + mLastTreeNode = n; + return mLastTreeNode; } +///////////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////////// + // Takes one argument, the expression of the parenthesis TreeNode* ASTBuilder::BuildParenthesis() { if (mTrace) @@ -225,7 +775,7 @@ TreeNode* ASTBuilder::BuildParenthesis() { MASSERT(!p.mIsEmpty && p.mIsTreeNode); expr = p.mData.mTreeNode; - ParenthesisNode *n = (ParenthesisNode*)mTreePool->NewTreeNode(sizeof(ParenthesisNode)); + ParenthesisNode *n = (ParenthesisNode*)gTreePool.NewTreeNode(sizeof(ParenthesisNode)); new (n) ParenthesisNode(); n->SetExpr(expr); @@ -247,7 +797,7 @@ TreeNode* ASTBuilder::BuildCast() { TreeNode *desttype = p_a.mData.mTreeNode; TreeNode *expr = p_b.mData.mTreeNode; - CastNode *n = (CastNode*)mTreePool->NewTreeNode(sizeof(CastNode)); + CastNode *n = (CastNode*)gTreePool.NewTreeNode(sizeof(CastNode)); new (n) CastNode(); n->SetDestType(desttype); @@ -257,6 +807,35 @@ TreeNode* ASTBuilder::BuildCast() { return n; } +// It takes one parameter, the word tells what literal it is. +TreeNode* ASTBuilder::BuildLiteral() { + if (mTrace) + std::cout << "In BuildLiteral" << std::endl; + + MASSERT(mParams.size() == 1); + Param p_a = mParams[0]; + MASSERT(p_a.mIsTreeNode); + + TreeNode *tree = p_a.mData.mTreeNode; + bool is_void = false; + if (tree->IsPrimType()) { + PrimTypeNode *prim = (PrimTypeNode*)tree; + if (prim->GetPrimType() == TY_Void) + is_void = true; + } + + if (is_void) { + LitData data; + data.mType = LT_VoidLiteral; + LiteralNode *n = (LiteralNode*)gTreePool.NewTreeNode(sizeof(LiteralNode)); + new (n) LiteralNode(data); + mLastTreeNode = n; + return n; + } else { + MERROR("Unspported in BuildLiteral()."); + } +} + // For first parameter has to be an operator. TreeNode* ASTBuilder::BuildUnaryOperation() { @@ -272,7 +851,7 @@ TreeNode* ASTBuilder::BuildUnaryOperation() { MASSERT(token->IsOperator() && "First param of Unary Operator is not an operator token?"); // create the sub tree - UnaOperatorNode *n = (UnaOperatorNode*)mTreePool->NewTreeNode(sizeof(UnaOperatorNode)); + UnaOperatorNode *n = (UnaOperatorNode*)gTreePool.NewTreeNode(sizeof(UnaOperatorNode)); new (n) UnaOperatorNode(token->GetOprId()); // set 1st param @@ -282,7 +861,6 @@ TreeNode* ASTBuilder::BuildUnaryOperation() { TreeNode *tn = CreateTokenTreeNode(p_b.mData.mToken); n->SetOpnd(tn); } - n->GetOpnd()->SetParent(n); mLastTreeNode = n; return n; @@ -313,52 +891,234 @@ TreeNode* ASTBuilder::BuildBinaryOperation() { MASSERT(token->IsOperator() && "Second param of Binary Operator is not an operator token?"); // create the sub tree - BinOperatorNode *n = (BinOperatorNode*)mTreePool->NewTreeNode(sizeof(BinOperatorNode)); + BinOperatorNode *n = (BinOperatorNode*)gTreePool.NewTreeNode(sizeof(BinOperatorNode)); new (n) BinOperatorNode(token->GetOprId()); mLastTreeNode = n; // set 1st param if (p_a.mIsTreeNode) - n->mOpndA = p_a.mData.mTreeNode; + n->SetOpndA(p_a.mData.mTreeNode); else { TreeNode *tn = CreateTokenTreeNode(p_a.mData.mToken); - n->mOpndA = tn; + n->SetOpndA(tn); } - n->mOpndA->SetParent(n); // set 2nd param if (p_c.mIsTreeNode) - n->mOpndB = p_c.mData.mTreeNode; + n->SetOpndB(p_c.mData.mTreeNode); else { TreeNode *tn = CreateTokenTreeNode(p_c.mData.mToken); - n->mOpndB = tn; + n->SetOpndB(tn); } - n->mOpndB->SetParent(n); return n; } -// Assignment is actually a binary operator. -TreeNode* ASTBuilder::BuildAssignment() { +// For second parameter has to be an operator. +TreeNode* ASTBuilder::BuildTernaryOperation() { if (mTrace) - std::cout << "In assignment --> BuildBinary" << std::endl; - return BuildBinaryOperation(); -} + std::cout << "In BuildTernaryOperation" << std::endl; + + MASSERT(mParams.size() == 3 && "Ternary Operator has NO 3 params?"); + Param p_a = mParams[0]; + Param p_b = mParams[1]; + Param p_c = mParams[2]; + + // create the sub tree + TerOperatorNode *n = (TerOperatorNode*)gTreePool.NewTreeNode(sizeof(TerOperatorNode)); + new (n) TerOperatorNode(); + mLastTreeNode = n; + + MASSERT(p_a.mIsTreeNode); + n->SetOpndA(p_a.mData.mTreeNode); + + MASSERT(p_b.mIsTreeNode); + n->SetOpndB(p_b.mData.mTreeNode); + + MASSERT(p_c.mIsTreeNode); + n->SetOpndC(p_c.mData.mTreeNode); + + return n; +} + +// Takes one argument. Set the tree as a statement. +// We still return the previous mLastTreeNode. +TreeNode* ASTBuilder::SetIsStmt() { + if (mTrace) + std::cout << "In SetIsStmt" << std::endl; + + Param p_tree = mParams[0]; + if (!p_tree.mIsEmpty) { + MASSERT(p_tree.mIsTreeNode); + TreeNode *treenode = p_tree.mData.mTreeNode; + treenode->SetIsStmt(); + } + + return mLastTreeNode; +} + +// 1. Takes one argument. Set the tree as an optional node. +// 2. Takes no argument. Set mLastTreeNode as optional. +// We still return the previous mLastTreeNode. +TreeNode* ASTBuilder::SetIsOptional() { + if (mTrace) + std::cout << "In SetIsOptional" << std::endl; + + TreeNode *treenode = NULL; + if (mParams.size() > 0) { + Param p_tree = mParams[0]; + if (!p_tree.mIsEmpty) { + if (p_tree.mIsTreeNode) + treenode = p_tree.mData.mTreeNode; + else + treenode = BuildIdentifier(p_tree.mData.mToken); + } + } else { + treenode = mLastTreeNode; + } + + MASSERT(treenode); + if (treenode->IsFunction()) { + FunctionNode *f = (FunctionNode*)mLastTreeNode; + f->GetFuncName()->SetIsOptional(); + } else { + treenode->SetIsOptional(); + } + + return mLastTreeNode; +} + +// Takes one argument. Set the tree as a non null node. +// We still return the previous mLastTreeNode. +TreeNode* ASTBuilder::SetIsNonNull() { + if (mTrace) + std::cout << "In SetIsNonNull" << std::endl; + + Param p_tree = mParams[0]; + if (!p_tree.mIsEmpty) { + MASSERT(p_tree.mIsTreeNode); + TreeNode *treenode = p_tree.mData.mTreeNode; + treenode->SetIsNonNull(); + } + + return mLastTreeNode; +} + +// Takes one argument. Set the tree as a rest or spread node. +// We still return the previous mLastTreeNode. +TreeNode* ASTBuilder::SetIsRest() { + if (mTrace) + std::cout << "In SetIsRest" << std::endl; + + Param p_tree = mParams[0]; + if (!p_tree.mIsEmpty) { + MASSERT(p_tree.mIsTreeNode); + TreeNode *treenode = p_tree.mData.mTreeNode; + treenode->SetIsRest(); + } + + return mLastTreeNode; +} + +// Takes one argument. Set the tree as a constant node. +// Or takes no argument. Set mLastTreeNode as constant. +// We still return the previous mLastTreeNode. +TreeNode* ASTBuilder::SetIsConst() { + if (mTrace) + std::cout << "In SetIsConst" << std::endl; + + TreeNode *treenode = NULL; + + if (mParams.size() == 1) { + Param p_tree = mParams[0]; + if (!p_tree.mIsEmpty) { + MASSERT(p_tree.mIsTreeNode); + treenode = p_tree.mData.mTreeNode; + } + } else { + treenode = mLastTreeNode; + } + + treenode->SetIsConst(); + + mLastTreeNode = treenode; + return mLastTreeNode; +} + +// Takes one argument, which is a primary type node. Set the type as unique. +// Or takes no argument. Use mLastTreeNode as the argument. +TreeNode* ASTBuilder::SetIsUnique() { + if (mTrace) + std::cout << "In SetIsUnique" << std::endl; + + TreeNode *treenode = NULL; + + if (mParams.size() == 1) { + Param p_tree = mParams[0]; + if (!p_tree.mIsEmpty) { + MASSERT(p_tree.mIsTreeNode); + treenode = p_tree.mData.mTreeNode; + } + } else { + treenode = mLastTreeNode; + } + + MASSERT(treenode); + MASSERT(treenode->IsPrimType()); + PrimTypeNode *p = (PrimTypeNode*)treenode; + p->SetIsUnique(); + + return treenode; +} + + +// Assignment is actually a binary operator. +TreeNode* ASTBuilder::BuildAssignment() { + if (mTrace) + std::cout << "In assignment --> BuildBinary" << std::endl; + return BuildBinaryOperation(); +} // Takes one argument, the result expression +// Or takes 0 argument, and it's a simple return stmt. TreeNode* ASTBuilder::BuildReturn() { if (mTrace) std::cout << "In BuildReturn" << std::endl; - ReturnNode *result = (ReturnNode*)mTreePool->NewTreeNode(sizeof(ReturnNode)); + ReturnNode *result = (ReturnNode*)gTreePool.NewTreeNode(sizeof(ReturnNode)); new (result) ReturnNode(); - Param p_result = mParams[0]; - if (!p_result.mIsEmpty) { - if (!p_result.mIsTreeNode) - MERROR("The return value is not a tree node."); - TreeNode *result_value = p_result.mData.mTreeNode; - result->SetResult(result_value); + if (mParams.size() == 1) { + Param p_result = mParams[0]; + if (!p_result.mIsEmpty) { + if (!p_result.mIsTreeNode) + MERROR("The return value is not a tree node."); + TreeNode *result_value = p_result.mData.mTreeNode; + result->SetResult(result_value); + } + } + + mLastTreeNode = result; + return mLastTreeNode; +} + +// Takes one argument, the result expression +// Or takes 0 argument, and it's a simple return stmt. +TreeNode* ASTBuilder::BuildYield() { + if (mTrace) + std::cout << "In BuildYield" << std::endl; + + YieldNode *result = (YieldNode*)gTreePool.NewTreeNode(sizeof(YieldNode)); + new (result) YieldNode(); + + if (mParams.size() == 1) { + Param p_result = mParams[0]; + if (!p_result.mIsEmpty) { + if (!p_result.mIsTreeNode) + MERROR("The return value is not a tree node."); + TreeNode *result_value = p_result.mData.mTreeNode; + result->SetResult(result_value); + } } mLastTreeNode = result; @@ -370,7 +1130,7 @@ TreeNode* ASTBuilder::BuildCondBranch() { if (mTrace) std::cout << "In BuildCondBranch" << std::endl; - CondBranchNode *cond_branch = (CondBranchNode*)mTreePool->NewTreeNode(sizeof(CondBranchNode)); + CondBranchNode *cond_branch = (CondBranchNode*)gTreePool.NewTreeNode(sizeof(CondBranchNode)); new (cond_branch) CondBranchNode(); Param p_cond = mParams[0]; @@ -397,7 +1157,7 @@ TreeNode* ASTBuilder::AddCondBranchTrueStatement() { if (!p_true.mIsEmpty) { if (!p_true.mIsTreeNode) MERROR("The condition expr is not a tree node."); - TreeNode *true_expr = CvtToBlock(p_true.mData.mTreeNode); + TreeNode *true_expr = p_true.mData.mTreeNode; cond_branch->SetTrueBranch(true_expr); } @@ -414,7 +1174,7 @@ TreeNode* ASTBuilder::AddCondBranchFalseStatement() { if (!p_false.mIsEmpty) { if (!p_false.mIsTreeNode) MERROR("The condition expr is not a tree node."); - TreeNode *false_expr = CvtToBlock(p_false.mData.mTreeNode); + TreeNode *false_expr = p_false.mData.mTreeNode; cond_branch->SetFalseBranch(false_expr); } @@ -447,27 +1207,56 @@ TreeNode* ASTBuilder::AddLabel() { return tree; } -// BuildBreak takes one argument, an identifer node of empty. +// BuildBreak takes 1) one argument, an identifer node +// 2) empty TreeNode* ASTBuilder::BuildBreak() { if (mTrace) std::cout << "In BuildBreak " << std::endl; - BreakNode *break_node = (BreakNode*)mTreePool->NewTreeNode(sizeof(BreakNode)); + BreakNode *break_node = (BreakNode*)gTreePool.NewTreeNode(sizeof(BreakNode)); new (break_node) BreakNode(); - MASSERT(mParams.size() == 1 && "BuildBreak has NO 1 params?"); - Param p_target = mParams[0]; - if (!p_target.mIsEmpty) { - MASSERT(p_target.mIsTreeNode && "Target in BuildBreak is not a tree."); - TreeNode *target = p_target.mData.mTreeNode; - MASSERT(target->IsIdentifier() && "Target in BuildBreak is not an identifier."); - break_node->SetTarget(target); + TreeNode *target = NULL; + + if (mParams.size() == 1) { + Param p_target = mParams[0]; + if (!p_target.mIsEmpty) { + MASSERT(p_target.mIsTreeNode && "Target in BuildBreak is not a tree."); + target = p_target.mData.mTreeNode; + MASSERT(target->IsIdentifier() && "Target in BuildBreak is not an identifier."); + break_node->SetTarget(target); + } } mLastTreeNode = break_node; return break_node; } +// BuildContinue takes 1) one argument, an identifer node +// 2) empty +TreeNode* ASTBuilder::BuildContinue() { + if (mTrace) + std::cout << "In BuildContinue " << std::endl; + + ContinueNode *continue_node = (ContinueNode*)gTreePool.NewTreeNode(sizeof(ContinueNode)); + new (continue_node) ContinueNode(); + + TreeNode *target = NULL; + + if (mParams.size() == 1) { + Param p_target = mParams[0]; + if (!p_target.mIsEmpty) { + MASSERT(p_target.mIsTreeNode && "Target in BuildContinue is not a tree."); + target = p_target.mData.mTreeNode; + MASSERT(target->IsIdentifier() && "Target in BuildContinue is not an identifier."); + continue_node->SetTarget(target); + } + } + + mLastTreeNode = continue_node; + return continue_node; +} + // BuildForLoop takes four arguments. // 1. init statement, could be a list // 2. cond expression, should be a boolean expresion. @@ -477,7 +1266,7 @@ TreeNode* ASTBuilder::BuildForLoop() { if (mTrace) std::cout << "In BuildForLoop " << std::endl; - ForLoopNode *for_loop = (ForLoopNode*)mTreePool->NewTreeNode(sizeof(ForLoopNode)); + ForLoopNode *for_loop = (ForLoopNode*)gTreePool.NewTreeNode(sizeof(ForLoopNode)); new (for_loop) ForLoopNode(); MASSERT(mParams.size() == 4 && "BuildForLoop has NO 4 params?"); @@ -518,7 +1307,7 @@ TreeNode* ASTBuilder::BuildForLoop() { Param p_body = mParams[3]; if (!p_body.mIsEmpty) { MASSERT(p_body.mIsTreeNode && "ForLoop body is not a treenode."); - TreeNode *body = CvtToBlock(p_body.mData.mTreeNode); + TreeNode *body = p_body.mData.mTreeNode; for_loop->SetBody(body); } @@ -526,11 +1315,142 @@ TreeNode* ASTBuilder::BuildForLoop() { return mLastTreeNode; } +// BuildForLoop_In takes 3 or 2 arguments. +// If 3 arguments +// 1. The decl of variable. +// 2. The first explicit arg. This is the set of data +// 3. The body. +// If 2 arguments +// 1. The implicit arg, mLastTreeNode. This is a decl of variable. +// 2. The first explicit arg. This is the set of data +// 3. The body. +TreeNode* ASTBuilder::BuildForLoop_In() { + if (mTrace) + std::cout << "In BuildForLoop_In " << std::endl; + + ForLoopNode *for_loop = (ForLoopNode*)gTreePool.NewTreeNode(sizeof(ForLoopNode)); + new (for_loop) ForLoopNode(); + for_loop->SetProp(FLP_JSIn); + + TreeNode *the_var = NULL; + TreeNode *the_set = NULL; + TreeNode *the_body = NULL; + + if (mParams.size() == 3) { + Param p_var = mParams[0]; + if (!p_var.mIsEmpty) { + MASSERT(p_var.mIsTreeNode); + the_var = p_var.mData.mTreeNode; + } + + Param p_set = mParams[1]; + if (!p_set.mIsEmpty) { + MASSERT(p_set.mIsTreeNode); + the_set = p_set.mData.mTreeNode; + } + + Param p_body = mParams[2]; + if (!p_body.mIsEmpty) { + MASSERT(p_body.mIsTreeNode); + the_body = p_body.mData.mTreeNode; + } + } else { + MASSERT(mParams.size() == 2); + + the_var = mLastTreeNode; + + Param p_set = mParams[0]; + if (!p_set.mIsEmpty) { + MASSERT(p_set.mIsTreeNode); + the_set = p_set.mData.mTreeNode; + } + + Param p_body = mParams[1]; + if (!p_body.mIsEmpty) { + MASSERT(p_body.mIsTreeNode); + the_body = p_body.mData.mTreeNode; + } + } + + for_loop->SetVariable(the_var); + for_loop->SetSet(the_set); + for_loop->SetBody(the_body); + + mLastTreeNode = for_loop; + return mLastTreeNode; +} + +// BuildForLoop_Of takes 3 or 2 arguments. +// If 3 arguments +// 1. The decl of variable. +// 2. The first explicit arg. This is the set of data +// 3. The body. +// If 2 arguments +// 1. The implicit arg, mLastTreeNode. This is a decl of variable. +// 2. The first explicit arg. This is the set of data +// 3. The body. +TreeNode* ASTBuilder::BuildForLoop_Of() { + if (mTrace) + std::cout << "In BuildForLoop_Of " << std::endl; + + ForLoopNode *for_loop = (ForLoopNode*)gTreePool.NewTreeNode(sizeof(ForLoopNode)); + new (for_loop) ForLoopNode(); + for_loop->SetProp(FLP_JSOf); + + TreeNode *the_var = NULL; + TreeNode *the_set = NULL; + TreeNode *the_body = NULL; + + if (mParams.size() == 3) { + Param p_var = mParams[0]; + if (!p_var.mIsEmpty) { + MASSERT(p_var.mIsTreeNode); + the_var = p_var.mData.mTreeNode; + } + + Param p_set = mParams[1]; + if (!p_set.mIsEmpty) { + MASSERT(p_set.mIsTreeNode); + the_set = p_set.mData.mTreeNode; + } + + Param p_body = mParams[2]; + if (!p_body.mIsEmpty) { + MASSERT(p_body.mIsTreeNode); + the_body = p_body.mData.mTreeNode; + } + } else { + MASSERT(mParams.size() == 2); + + the_var = mLastTreeNode; + + Param p_set = mParams[0]; + if (!p_set.mIsEmpty) { + MASSERT(p_set.mIsTreeNode); + the_set = p_set.mData.mTreeNode; + } + + Param p_body = mParams[1]; + if (!p_body.mIsEmpty) { + MASSERT(p_body.mIsTreeNode); + the_body = p_body.mData.mTreeNode; + } + } + + for_loop->SetVariable(the_var); + for_loop->SetSet(the_set); + for_loop->SetBody(the_body); + + mLastTreeNode = for_loop; + return mLastTreeNode; +} + + TreeNode* ASTBuilder::BuildWhileLoop() { if (mTrace) std::cout << "In BuildWhileLoop " << std::endl; - WhileLoopNode *while_loop = (WhileLoopNode*)mTreePool->NewTreeNode(sizeof(WhileLoopNode)); + WhileLoopNode *while_loop = (WhileLoopNode*)gTreePool.NewTreeNode(sizeof(WhileLoopNode)); new (while_loop) WhileLoopNode(); MASSERT(mParams.size() == 2 && "BuildWhileLoop has NO 2 params?"); @@ -545,7 +1465,7 @@ TreeNode* ASTBuilder::BuildWhileLoop() { Param p_body = mParams[1]; if (!p_body.mIsEmpty) { MASSERT(p_body.mIsTreeNode && "WhileLoop body is not a treenode."); - TreeNode *body = CvtToBlock(p_body.mData.mTreeNode); + TreeNode *body = p_body.mData.mTreeNode; while_loop->SetBody(body); } @@ -557,7 +1477,7 @@ TreeNode* ASTBuilder::BuildDoLoop() { if (mTrace) std::cout << "In BuildDoLoop " << std::endl; - DoLoopNode *do_loop = (DoLoopNode*)mTreePool->NewTreeNode(sizeof(DoLoopNode)); + DoLoopNode *do_loop = (DoLoopNode*)gTreePool.NewTreeNode(sizeof(DoLoopNode)); new (do_loop) DoLoopNode(); MASSERT(mParams.size() == 2 && "BuildDoLoop has NO 2 params?"); @@ -572,7 +1492,7 @@ TreeNode* ASTBuilder::BuildDoLoop() { Param p_body = mParams[1]; if (!p_body.mIsEmpty) { MASSERT(p_body.mIsTreeNode && "DoLoop body is not a treenode."); - TreeNode *body = CvtToBlock(p_body.mData.mTreeNode); + TreeNode *body = p_body.mData.mTreeNode; do_loop->SetBody(body); } @@ -586,7 +1506,7 @@ TreeNode* ASTBuilder::BuildSwitchLabel() { std::cout << "In BuildSwitchLabel " << std::endl; SwitchLabelNode *label = - (SwitchLabelNode*)mTreePool->NewTreeNode(sizeof(SwitchLabelNode)); + (SwitchLabelNode*)gTreePool.NewTreeNode(sizeof(SwitchLabelNode)); new (label) SwitchLabelNode(); MASSERT(mParams.size() == 1 && "BuildSwitchLabel has NO 1 params?"); @@ -606,110 +1526,79 @@ TreeNode* ASTBuilder::BuildDefaultSwitchLabel() { if (mTrace) std::cout << "In BuildDefaultSwitchLabel " << std::endl; SwitchLabelNode *label = - (SwitchLabelNode*)mTreePool->NewTreeNode(sizeof(SwitchLabelNode)); + (SwitchLabelNode*)gTreePool.NewTreeNode(sizeof(SwitchLabelNode)); new (label) SwitchLabelNode(); label->SetIsDefault(true); mLastTreeNode = label; return label; } -// BuildOneCase takes two arguments, the expression of a label and a -// and the statements under the label. Both the label and the statements -// could be a PassNode, and I need look into it. We don't want to carry -// PassNode into the SwitchCaseNode. +// BuildOneCase takes +// 1. two arguments, the expression of a label and the statements under the label. +// 2. One arguemnt, which is the statements. The label is mLastTreeNode. TreeNode* ASTBuilder::BuildOneCase() { if (mTrace) std::cout << "In BuildOneCase " << std::endl; SwitchCaseNode *case_node = - (SwitchCaseNode*)mTreePool->NewTreeNode(sizeof(SwitchCaseNode)); + (SwitchCaseNode*)gTreePool.NewTreeNode(sizeof(SwitchCaseNode)); new (case_node) SwitchCaseNode(); - MASSERT(mParams.size() == 2 && "BuildOneCase has NO 1 params?"); + TreeNode *label = NULL; + TreeNode *stmt = NULL; - Param p_label = mParams[0]; - MASSERT(!p_label.mIsEmpty); - MASSERT(p_label.mIsTreeNode && "Labels in BuildOneCase is not a tree."); - TreeNode *label = p_label.mData.mTreeNode; - case_node->AddLabel(label); + if (mParams.size() == 2) { + Param p_label = mParams[0]; + if (!p_label.mIsEmpty) { + MASSERT(p_label.mIsTreeNode && "Labels in BuildOneCase is not a tree."); + label = p_label.mData.mTreeNode; + } + Param p_stmt = mParams[1]; + if (!p_stmt.mIsEmpty) { + MASSERT(p_stmt.mIsTreeNode && "Stmts in BuildOneCase is not a tree."); + stmt = p_stmt.mData.mTreeNode; + } + } else { + label = mLastTreeNode; + Param p_stmt = mParams[0]; + if (!p_stmt.mIsEmpty) { + MASSERT(p_stmt.mIsTreeNode && "Stmts in BuildOneCase is not a tree."); + stmt = p_stmt.mData.mTreeNode; + } + } - Param p_stmt = mParams[1]; - MASSERT(!p_stmt.mIsEmpty); - MASSERT(p_stmt.mIsTreeNode && "Stmts in BuildOneCase is not a tree."); - TreeNode *stmt = p_stmt.mData.mTreeNode; - case_node->AddStmt(stmt); + if (label) + case_node->AddLabel(label); + if (stmt) + case_node->AddStmt(stmt); mLastTreeNode = case_node; return case_node; } -SwitchCaseNode* ASTBuilder::SwitchLabelToCase(SwitchLabelNode *label) { - SwitchCaseNode *case_node = - (SwitchCaseNode*)mTreePool->NewTreeNode(sizeof(SwitchCaseNode)); - new (case_node) SwitchCaseNode(); - case_node->AddLabel(label); - return case_node; -} - -// This takes one argument, which is all SwitchCaseNode-s. It could be -// a PassNode. We don't handle it at all. We simply forward this tree node -// up to the parent, which should be final Switch Node. -TreeNode* ASTBuilder::BuildAllCases() { - if (mTrace) - std::cout << "In BuildAllCases " << std::endl; - - MASSERT(mParams.size() == 1 && "BuildAllCases has NO 1 params?"); - Param p_cases = mParams[0]; - MASSERT(!p_cases.mIsEmpty); - MASSERT(p_cases.mIsTreeNode && "Cases in BuildAllCases is not a tree."); - TreeNode *cases = p_cases.mData.mTreeNode; - - // We want to make sure every tree node after BuildAllCases() is - // a SwitchCaseNode. This will ease the handling in future AST process. - // So for those SwitchLabelNode which only have no statement, will be - // converted to a SwitchCaseNode. - - if (cases->IsPass()) { - PassNode *pass = (PassNode*)cases; - for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { - TreeNode *child = pass->GetChild(i); - if (child->IsSwitchLabel()) { - SwitchCaseNode *newcase = SwitchLabelToCase((SwitchLabelNode*)child); - pass->SetChild(i, newcase); - } - } - } else if (cases->IsSwitchLabel()) { - SwitchLabelNode *label = (SwitchLabelNode*)cases; - SwitchCaseNode *newcase = SwitchLabelToCase(label); - cases = newcase; - } - - mLastTreeNode = cases; - return cases; -} - TreeNode* ASTBuilder::BuildSwitch() { if (mTrace) std::cout << "In BuildSwitch " << std::endl; SwitchNode *switch_node = - (SwitchNode*)mTreePool->NewTreeNode(sizeof(SwitchNode)); + (SwitchNode*)gTreePool.NewTreeNode(sizeof(SwitchNode)); new (switch_node) SwitchNode(); MASSERT(mParams.size() == 2 && "BuildSwitch has NO 1 params?"); - Param p_cond = mParams[0]; - MASSERT(!p_cond.mIsEmpty); - MASSERT(p_cond.mIsTreeNode && "Condition in BuildSwitch is not a tree."); - TreeNode *cond = p_cond.mData.mTreeNode; - switch_node->SetCond(cond); + Param p_expr = mParams[0]; + MASSERT(!p_expr.mIsEmpty); + MASSERT(p_expr.mIsTreeNode && "Expression in BuildSwitch is not a tree."); + TreeNode *expr = p_expr.mData.mTreeNode; + switch_node->SetExpr(expr); Param p_cases = mParams[1]; MASSERT(!p_cases.mIsEmpty); MASSERT(p_cases.mIsTreeNode && "Cases in BuildSwitch is not a tree."); TreeNode *cases = p_cases.mData.mTreeNode; - switch_node->AddCase(cases); + + switch_node->AddSwitchCase(cases); mLastTreeNode = switch_node; return switch_node; @@ -723,118 +1612,610 @@ TreeNode* ASTBuilder::BuildSwitch() { // We need have a list of pending declarations until the scope is created. //////////////////////////////////////////////////////////////////////////////// -// AddTypeTo takes two parameters, 1) tree; 2) type -TreeNode* ASTBuilder::AddTypeTo() { +// AddType takes two parameters, 1) tree; 2) type +// or takes one parameter, type, and apply it to mLastTreeNode +TreeNode* ASTBuilder::AddType() { if (mTrace) - std::cout << "In AddTypeTo " << std::endl; + std::cout << "In AddType " << std::endl; - MASSERT(mParams.size() == 2 && "BinaryDecl has NO 2 params?"); - Param p_type = mParams[1]; - Param p_name = mParams[0]; + TreeNode *node = NULL; + TreeNode *tree_type = NULL; + + if (mParams.size() == 2) { + Param p_type = mParams[1]; + Param p_name = mParams[0]; - MASSERT(!p_type.mIsEmpty && p_type.mIsTreeNode - && "Not appropriate type node in AddTypeTo()"); - TreeNode *tree_type = p_type.mData.mTreeNode; + if(!p_type.mIsEmpty && p_type.mIsTreeNode) + tree_type = p_type.mData.mTreeNode; - if (!p_name.mIsTreeNode) - MERROR("The variable name should be a IdentifierNode already, but actually NOT?"); - TreeNode *node = p_name.mData.mTreeNode; + if (!p_name.mIsTreeNode) + MERROR("The variable name should be a IdentifierNode already, but actually NOT?"); + node = p_name.mData.mTreeNode; + + } else { + Param p_type = mParams[0]; + if(!p_type.mIsEmpty && p_type.mIsTreeNode) + tree_type = p_type.mData.mTreeNode; + node = mLastTreeNode; + } - add_type_to(node, tree_type); + if (tree_type) + add_type_to(node, tree_type); - return node; + mLastTreeNode = node; + return mLastTreeNode; } -// BuildDecl takes two parameters, 1) type; 2) name -TreeNode* ASTBuilder::BuildDecl() { +// AddType takes two parameters, 1) tree; 2) type +// or takes one parameter, type, and apply it to mLastTreeNode +TreeNode* ASTBuilder::AddAsType() { if (mTrace) - std::cout << "In BuildDecl" << std::endl; - - MASSERT(mParams.size() == 2 && "BinaryDecl has NO 2 params?"); - Param p_type = mParams[0]; - Param p_name = mParams[1]; + std::cout << "In AddAsType " << std::endl; - MASSERT(!p_type.mIsEmpty && p_type.mIsTreeNode - && "Not appropriate type node in BuildDecl()"); - TreeNode *tree_type = p_type.mData.mTreeNode; + TreeNode *node = NULL; + TreeNode *tree_type = NULL; - if (!p_name.mIsTreeNode) - MERROR("The variable name should be a IdentifierNode already, but actually NOT?"); - TreeNode *node = p_name.mData.mTreeNode; + if (mParams.size() == 2) { + Param p_type = mParams[1]; + Param p_name = mParams[0]; + if(!p_type.mIsEmpty) { + if(p_type.mIsTreeNode) + tree_type = p_type.mData.mTreeNode; + else + MERROR("The variable name should be a IdentifierNode already, but actually NOT?"); + } + node = p_name.mData.mTreeNode; + } else { + Param p_type = mParams[0]; + if(!p_type.mIsEmpty && p_type.mIsTreeNode) + tree_type = p_type.mData.mTreeNode; + node = mLastTreeNode; + } - add_type_to(node, tree_type); + if (tree_type) { + node->AddAsTypes(tree_type); + } mLastTreeNode = node; - return node; + return mLastTreeNode; } -// BuildField takes two parameters, -// 1) upper enclosing node, could be another field. -// 2) name of this field. -TreeNode* ASTBuilder::BuildField() { +// BuildDecl usually takes two parameters, 1) type; 2) name +// It can also take only one parameter: name. +// It can also take zero parameter, it's mLastTreeNode handled. +TreeNode* ASTBuilder::BuildDecl() { if (mTrace) - std::cout << "In BuildField" << std::endl; + std::cout << "In BuildDecl" << std::endl; - MASSERT(mParams.size() == 2 && "BuildField has NO 2 params?"); - Param p_var_a = mParams[0]; - Param p_var_b = mParams[1]; + TreeNode *tree_type = NULL; + TreeNode *var = NULL; - // Both variable should have been created as tree node. - if (!p_var_a.mIsTreeNode || !p_var_b.mIsTreeNode) { - MERROR("The param in BuildField is not a treenode"); + if (mParams.size() == 2) { + Param p_type = mParams[0]; + Param p_name = mParams[1]; + if(!p_type.mIsEmpty && p_type.mIsTreeNode) + tree_type = p_type.mData.mTreeNode; + + if (!p_name.mIsTreeNode) + MERROR("The variable name should be a IdentifierNode already, but actually NOT?"); + var = p_name.mData.mTreeNode; + + if (tree_type) + add_type_to(var, tree_type); + + } else if (mParams.size() == 1) { + Param p_name = mParams[0]; + if (!p_name.mIsTreeNode) + MERROR("The variable name should be a IdentifierNode already, but actually NOT?"); + var = p_name.mData.mTreeNode; + } else { + var = mLastTreeNode; } - // The second param should be an IdentifierNode - TreeNode *node_a = p_var_a.mIsEmpty ? NULL : p_var_a.mData.mTreeNode; - TreeNode *node_b = p_var_b.mIsEmpty ? NULL : p_var_b.mData.mTreeNode; + DeclNode *decl = decl = (DeclNode*)gTreePool.NewTreeNode(sizeof(DeclNode)); + new (decl) DeclNode(var); - FieldNode *field = NULL; + mLastTreeNode = decl; + return decl; +} - if (node_b->IsPass()) { - TreeNode *upper = node_a; - PassNode *pass = (PassNode*)node_b; - for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { - TreeNode *child = pass->GetChild(i); - MASSERT(child->IsIdentifier()); +TreeNode* ASTBuilder::SetJSVar() { + MASSERT(mLastTreeNode->IsDecl()); + DeclNode *decl = (DeclNode*)mLastTreeNode; + decl->SetProp(JS_Var); + return mLastTreeNode; +} - field = (FieldNode*)mTreePool->NewTreeNode(sizeof(FieldNode)); - new (field) FieldNode(); - field->SetUpper(upper); - field->SetField((IdentifierNode*)child); - field->Init(); +TreeNode* ASTBuilder::SetJSLet() { + MASSERT(mLastTreeNode->IsDecl()); + DeclNode *decl = (DeclNode*)mLastTreeNode; + decl->SetProp(JS_Let); + return mLastTreeNode; +} - upper = field; - } +TreeNode* ASTBuilder::SetJSConst() { + MASSERT(mLastTreeNode->IsDecl()); + DeclNode *decl = (DeclNode*)mLastTreeNode; + decl->SetProp(JS_Const); + return mLastTreeNode; +} + +////////////////////////////////////////////////////////////////////////////////// +// ArrayElement, ArrayLiteral +////////////////////////////////////////////////////////////////////////////////// + +// It takes two or more than two params. +// The first is the array. +// The second is the first dimension expression +// So on so forth. + +TreeNode* ASTBuilder::BuildArrayElement() { + if (mTrace) + std::cout << "In BuildArrayElement" << std::endl; + + MASSERT(mParams.size() >= 2); + + Param p_array = mParams[0]; + MASSERT(p_array.mIsTreeNode); + TreeNode *array = p_array.mData.mTreeNode; + MASSERT(array->IsIdentifier() || + array->IsArrayElement() || + array->IsField() || + array->IsUserType() || + array->IsBinOperator() || + array->IsCall() || + (array->IsLiteral() && ((LiteralNode*)array)->IsThis()) || + array->IsTupleType() || + array->IsStruct() || + array->IsNew() || + array->IsTypeOf() || + array->IsCast() || + array->IsPrimType()); + + ArrayElementNode *array_element = NULL; + if (array->IsIdentifier() || + array->IsField() || + array->IsUserType() || + array->IsBinOperator() || + array->IsCall() || + array->IsTupleType() || + (array->IsLiteral() && ((LiteralNode*)array)->IsThis()) || + array->IsStruct() || + array->IsNew() || + array->IsTypeOf() || + array->IsCast() || + array->IsPrimType()) { + array_element = (ArrayElementNode*)gTreePool.NewTreeNode(sizeof(ArrayElementNode)); + new (array_element) ArrayElementNode(); + array_element->SetArray(array); } else { - MASSERT(node_b->IsIdentifier()); - field = (FieldNode*)mTreePool->NewTreeNode(sizeof(FieldNode)); - new (field) FieldNode(); - field->SetUpper(node_a); - field->SetField((IdentifierNode*)node_b); - field->Init(); + array_element = (ArrayElementNode*)array; } - mLastTreeNode = field; + unsigned num = mParams.size() - 1; + for (unsigned i = 0; i < num; i++) { + Param p_index = mParams[i+1]; + MASSERT(p_index.mIsTreeNode); + TreeNode *index = p_index.mData.mTreeNode; + array_element->AddExpr(index); + } + + mLastTreeNode = array_element; return mLastTreeNode; } -// BuildVariableList takes two parameters, var 1 and var 2 -TreeNode* ASTBuilder::BuildVarList() { +// It takes only one parameter, the literals. +TreeNode* ASTBuilder::BuildArrayLiteral() { if (mTrace) - std::cout << "In build Variable List" << std::endl; + std::cout << "In BuildArrayLiteral" << std::endl; - MASSERT(mParams.size() == 2 && "BuildVarList has NO 2 params?"); - Param p_var_a = mParams[0]; - Param p_var_b = mParams[1]; + MASSERT(mParams.size() == 1); - // Both variable should have been created as tree node. - if (!p_var_a.mIsTreeNode || !p_var_b.mIsTreeNode) { - MERROR("The var in BuildVarList is not a treenode"); + // The parameter could be empty, meaning the literal is like: []. + // But it still is a array literal, and we create one for it with 0 expressions. + ArrayLiteralNode *array_literal = (ArrayLiteralNode*)gTreePool.NewTreeNode(sizeof(ArrayLiteralNode)); + new (array_literal) ArrayLiteralNode(); + + Param p_literals = mParams[0]; + if (!p_literals.mIsEmpty) { + MASSERT(p_literals.mIsTreeNode); + TreeNode *literals = p_literals.mData.mTreeNode; + MASSERT(literals->IsLiteral() || + literals->IsIdentifier() || + literals->IsNew() || + literals->IsExprList() || + literals->IsArrayLiteral() || + literals->IsStructLiteral() || + literals->IsFieldLiteral() || + literals->IsCall() || + literals->IsArrayElement() || + literals->IsField() || + literals->IsBinOperator() || + literals->IsUnaOperator() || + literals->IsTerOperator() || + literals->IsRegExpr() || + literals->IsFunction() || + literals->IsTemplateLiteral() || + literals->IsLambda()); + if (literals->IsExprList()) { + ExprListNode *el = (ExprListNode*)literals; + for (unsigned i = 0; i < el->GetExprsNum(); i++) { + TreeNode *expr = el->GetExprAtIndex(i); + MASSERT(expr->IsLiteral() || + expr->IsNew() || + expr->IsArrayLiteral() || + expr->IsFieldLiteral() || + expr->IsStructLiteral() || + expr->IsIdentifier() || + expr->IsCall() || + expr->IsArrayElement() || + expr->IsField() || + expr->IsBinOperator() || + expr->IsUnaOperator() || + expr->IsTerOperator() || + expr->IsRegExpr() || + expr->IsFunction() || + expr->IsTemplateLiteral() || + expr->IsLambda()); + array_literal->AddLiteral(expr); + } + } else { + array_literal->AddLiteral(literals); + } } - TreeNode *node_a = p_var_a.mIsEmpty ? NULL : p_var_a.mData.mTreeNode; - TreeNode *node_b = p_var_b.mIsEmpty ? NULL : p_var_b.mData.mTreeNode; - + mLastTreeNode = array_literal; + return mLastTreeNode; +} + +////////////////////////////////////////////////////////////////////////////////// +// BindingElement and BindingPattern +////////////////////////////////////////////////////////////////////////////////// + +// It could take: +// 1) Two arguments, 'variable' name and 'element' to bind +// 2) one argument, the 'element' +TreeNode* ASTBuilder::BuildBindingElement() { + if (mTrace) + std::cout << "In BuildBindingElement" << std::endl; + + BindingElementNode *be_node = NULL; + + if (mParams.size() == 2) { + Param p_variable = mParams[0]; + MASSERT(p_variable.mIsTreeNode); + TreeNode *variable = p_variable.mData.mTreeNode; + + Param p_element = mParams[1]; + MASSERT(p_element.mIsTreeNode); + TreeNode *element = p_element.mData.mTreeNode; + + // There are a few cases. + // 1. If element is an existing binding element, we just add the 'variable'. + // 2. If element is a binding pattern, we need create new binding element. + // 3. If element is anything else, we need create new binding element. + if (element->IsBindingElement()) { + be_node = (BindingElementNode*)element; + be_node->SetVariable(variable); + } else { + be_node = (BindingElementNode*)gTreePool.NewTreeNode(sizeof(BindingElementNode)); + new (be_node) BindingElementNode(); + be_node->SetVariable(variable); + be_node->SetElement(element); + } + } else if (mParams.size() == 1) { + Param p_element = mParams[0]; + MASSERT(p_element.mIsTreeNode); + TreeNode *element = p_element.mData.mTreeNode; + + be_node = (BindingElementNode*)gTreePool.NewTreeNode(sizeof(BindingElementNode)); + new (be_node) BindingElementNode(); + be_node->SetElement(element); + } else { + MASSERT(0 && "unsupported number of arguments in BuildBindingElemnt."); + } + + mLastTreeNode = be_node; + return mLastTreeNode; +} + +// It could take: +// 1) zero arguments. it is an empty binding pattern. +// 2) one argument, the 'element' or passnode containing list of elements. +TreeNode* ASTBuilder::BuildBindingPattern() { + if (mTrace) + std::cout << "In BuildBindingPattern" << std::endl; + + BindingPatternNode *bp = NULL; + + if (mParams.size() == 1) { + Param p_element = mParams[0]; + MASSERT(p_element.mIsTreeNode); + TreeNode *element = p_element.mData.mTreeNode; + + bp = (BindingPatternNode*)gTreePool.NewTreeNode(sizeof(BindingPatternNode)); + new (bp) BindingPatternNode(); + bp->AddElement(element); + } else if (mParams.size() == 0) { + // an empty binding pattern + bp = (BindingPatternNode*)gTreePool.NewTreeNode(sizeof(BindingPatternNode)); + new (bp) BindingPatternNode(); + } else { + MASSERT(0 && "unsupported number of arguments in BuildBindingElemnt."); + } + + mLastTreeNode = bp; + return mLastTreeNode; +} + +TreeNode* ASTBuilder::SetArrayBinding() { + MASSERT(mLastTreeNode->IsBindingPattern()); + BindingPatternNode *b = (BindingPatternNode*)mLastTreeNode; + b->SetProp(BPP_ArrayBinding); + return mLastTreeNode; +} + +TreeNode* ASTBuilder::SetObjectBinding() { + MASSERT(mLastTreeNode->IsBindingPattern()); + BindingPatternNode *b = (BindingPatternNode*)mLastTreeNode; + b->SetProp(BPP_ObjectBinding); + return mLastTreeNode; +} + +////////////////////////////////////////////////////////////////////////////////// +// StructNode, StructLiteralNode, FieldLiteralNode +////////////////////////////////////////////////////////////////////////////////// + +// It takes two parameters: name of key, the data type. +TreeNode* ASTBuilder::BuildNumIndexSig() { + if (mTrace) + std::cout << "In BuildNumIndexSig" << std::endl; + + Param p_key = mParams[0]; + MASSERT(p_key.mIsTreeNode); + TreeNode *key = p_key.mData.mTreeNode; + + Param p_data = mParams[1]; + MASSERT(p_data.mIsTreeNode); + TreeNode *data = p_data.mData.mTreeNode; + + NumIndexSigNode *sig = (NumIndexSigNode*)gTreePool.NewTreeNode(sizeof(NumIndexSigNode)); + new (sig) NumIndexSigNode(); + sig->SetKey(key); + sig->SetDataType(data); + + mLastTreeNode = sig; + return mLastTreeNode; +} + +// It takes two parameters: name of key, the data type. +TreeNode* ASTBuilder::BuildStrIndexSig() { + if (mTrace) + std::cout << "In BuildStrIndexSig" << std::endl; + + Param p_key = mParams[0]; + MASSERT(p_key.mIsTreeNode); + TreeNode *key = p_key.mData.mTreeNode; + + Param p_data = mParams[1]; + MASSERT(p_data.mIsTreeNode); + TreeNode *data = p_data.mData.mTreeNode; + + StrIndexSigNode *sig = (StrIndexSigNode*)gTreePool.NewTreeNode(sizeof(StrIndexSigNode)); + new (sig) StrIndexSigNode(); + sig->SetKey(key); + sig->SetDataType(data); + + mLastTreeNode = sig; + return mLastTreeNode; +} + +// It takes only one parameter: name. +// Or it take no param, meaning the name is empty. +TreeNode* ASTBuilder::BuildStruct() { + if (mTrace) + std::cout << "In BuildStruct" << std::endl; + + TreeNode *name = NULL; + + if (mParams.size() == 1) { + Param p_name = mParams[0]; + MASSERT(p_name.mIsTreeNode); + name = p_name.mData.mTreeNode; + MASSERT(name->IsIdentifier()); + } + + StructNode *struct_node = (StructNode*)gTreePool.NewTreeNode(sizeof(StructNode)); + new (struct_node) StructNode((IdentifierNode*)name); + + mLastTreeNode = struct_node; + return mLastTreeNode; +} + +// It take no param. +TreeNode* ASTBuilder::BuildTupleType() { + if (mTrace) + std::cout << "In BuildTupleType" << std::endl; + + TupleTypeNode *tuple_type = (TupleTypeNode*)gTreePool.NewTreeNode(sizeof(TupleTypeNode)); + new (tuple_type) TupleTypeNode(); + + mLastTreeNode = tuple_type; + return mLastTreeNode; +} + +// It takes only one parameter: Field. +TreeNode* ASTBuilder::AddStructField() { + if (mTrace) + std::cout << "In AddStructField" << std::endl; + Param p_field = mParams[0]; + if (!p_field.mIsEmpty) { + MASSERT(p_field.mIsTreeNode); + TreeNode *field = p_field.mData.mTreeNode; + + if (mLastTreeNode->IsStruct()) { + StructNode *struct_node = (StructNode*)mLastTreeNode; + struct_node->AddChild(field); + } else if (mLastTreeNode->IsTupleType()) { + TupleTypeNode *tt = (TupleTypeNode*)mLastTreeNode; + tt->AddChild(field); + } else { + MERROR("Unsupported in AddStructField()"); + } + } + return mLastTreeNode; +} + +TreeNode* ASTBuilder::SetTSInterface() { + MASSERT(mLastTreeNode->IsStruct()); + StructNode *s = (StructNode*)mLastTreeNode; + s->SetProp(SProp_TSInterface); + return mLastTreeNode; +} + +TreeNode* ASTBuilder::SetTSEnum() { + MASSERT(mLastTreeNode->IsStruct()); + StructNode *s = (StructNode*)mLastTreeNode; + s->SetProp(SProp_TSEnum); + return mLastTreeNode; +} + +// Build FieldLiteral +// It takes two param, field name and field value (a literal). +TreeNode* ASTBuilder::BuildFieldLiteral() { + if (mTrace) + std::cout << "In BuildFieldLiteral" << std::endl; + + TreeNode *field = NULL; + Param p_field = mParams[0]; + if (p_field.mIsTreeNode) { + field = p_field.mData.mTreeNode; + } else { + field = BuildIdentifier(p_field.mData.mToken); + } + + Param p_value = mParams[1]; + MASSERT(p_value.mIsTreeNode); + TreeNode *value = p_value.mData.mTreeNode; + + FieldLiteralNode *field_literal = (FieldLiteralNode*)gTreePool.NewTreeNode(sizeof(FieldLiteralNode)); + new (field_literal) FieldLiteralNode(); + field_literal->SetFieldName(field); + field_literal->SetLiteral(value); + + mLastTreeNode = field_literal; + return mLastTreeNode; +} + +// 1) It takes no param. We create an empty struct litreal. +// 2) It takes one param. The param could a FieldLiteralNode or +// a PassNode containing multiple FieldLiteralNode. +// +// The param could also be a GetAccessor/SetAccessor in Javascript, +// which is a function node. We take the name of function as field name, +// the FunctionNode as the value. +TreeNode* ASTBuilder::BuildStructLiteral() { + if (mTrace) + std::cout << "In BuildStructLiteral" << std::endl; + + TreeNode *literal = NULL; + + if (mParams.size() == 1) { + Param p_literal = mParams[0]; + MASSERT(p_literal.mIsTreeNode); + literal = p_literal.mData.mTreeNode; + } + + StructLiteralNode *struct_literal = (StructLiteralNode*)gTreePool.NewTreeNode(sizeof(StructLiteralNode)); + new (struct_literal) StructLiteralNode(); + + if (literal) + struct_literal->AddField(literal); + + mLastTreeNode = struct_literal; + return mLastTreeNode; +} + +////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////// + +// BuildField takes two parameters, +// 1) upper enclosing node, could be another field. +// 2) name of this field. +TreeNode* ASTBuilder::BuildField() { + if (mTrace) + std::cout << "In BuildField" << std::endl; + + MASSERT(mParams.size() == 2 && "BuildField has NO 2 params?"); + Param p_var_a = mParams[0]; + Param p_var_b = mParams[1]; + + // Both variable should have been created as tree node. + MASSERT(p_var_a.mIsTreeNode); + + // The second param should be an IdentifierNode + TreeNode *node_a = p_var_a.mIsEmpty ? NULL : p_var_a.mData.mTreeNode; + TreeNode *node_b = NULL; + if (!p_var_b.mIsEmpty) { + if (p_var_b.mIsTreeNode) { + node_b = p_var_b.mData.mTreeNode; + if (!node_b->IsIdentifier() && !node_b->IsComputedName()) { + TreeNode *id = BuildIdentifier(node_b); + if (id) + node_b = id; + } + } else { + node_b = BuildIdentifier(p_var_b.mData.mToken); + } + } + + FieldNode *field = NULL; + + if (node_b->IsPass()) { + TreeNode *upper = node_a; + PassNode *pass = (PassNode*)node_b; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { + TreeNode *child = pass->GetChild(i); + MASSERT(child->IsIdentifier()); + + field = (FieldNode*)gTreePool.NewTreeNode(sizeof(FieldNode)); + new (field) FieldNode(); + field->SetUpper(upper); + field->SetField((IdentifierNode*)child); + + upper = field; + } + } else { + MASSERT(node_b->IsIdentifier() || + node_b->IsCall() || + node_b->IsUserType()); + field = (FieldNode*)gTreePool.NewTreeNode(sizeof(FieldNode)); + new (field) FieldNode(); + field->SetUpper(node_a); + field->SetField(node_b); + } + + mLastTreeNode = field; + return mLastTreeNode; +} + +// BuildVariableList takes two parameters, var 1 and var 2 +TreeNode* ASTBuilder::BuildVarList() { + if (mTrace) + std::cout << "In build Variable List" << std::endl; + + MASSERT(mParams.size() == 2 && "BuildVarList has NO 2 params?"); + Param p_var_a = mParams[0]; + Param p_var_b = mParams[1]; + + // Both variable should have been created as tree node. + if (!p_var_a.mIsTreeNode || !p_var_b.mIsTreeNode) { + MERROR("The var in BuildVarList is not a treenode"); + } + + TreeNode *node_a = p_var_a.mIsEmpty ? NULL : p_var_a.mData.mTreeNode; + TreeNode *node_b = p_var_b.mIsEmpty ? NULL : p_var_b.mData.mTreeNode; + // There are a few different scenarios. // (1) node_a is a VarListNode, and we dont care about node_b // (2) node_a is an IdentifierNode, node_b is a VarListNode @@ -851,7 +2232,7 @@ TreeNode* ASTBuilder::BuildVarList() { node_ret->Merge(node_a); } else { // both nodes are not VarListNode - node_ret = (VarListNode*)mTreePool->NewTreeNode(sizeof(VarListNode)); + node_ret = (VarListNode*)gTreePool.NewTreeNode(sizeof(VarListNode)); new (node_ret) VarListNode(); if (node_a) node_ret->Merge(node_a); @@ -866,37 +2247,110 @@ TreeNode* ASTBuilder::BuildVarList() { } // Attach the modifier(s) to mLastTreeNode. +// It takes: +// 1. One argument, which is the solo modifier. +// 2. Two arguments, which happens mostly in Typescript, like +// + readonly +// - readonly +// + ? +// - ? TreeNode* ASTBuilder::AddModifier() { if (mTrace) std::cout << "In AddModifier" << std::endl; - Param p_mod = mParams[0]; - if (p_mod.mIsEmpty) { - if (mTrace) - std::cout << " do nothing." << std::endl; - return mLastTreeNode; - } + if (mParams.size() == 1) { + Param p_mod = mParams[0]; + if (p_mod.mIsEmpty) { + if (mTrace) + std::cout << " do nothing." << std::endl; + return mLastTreeNode; + } - if (!p_mod.mIsTreeNode) - MERROR("The modifier is not a treenode"); - TreeNode *mod= p_mod.mData.mTreeNode; + TreeNode *mod = NULL; + if (!p_mod.mIsTreeNode) { + Token *token = p_mod.mData.mToken; + if (token->IsSeparator() && token->GetSepId()==SEP_Pound) { + // This is a '#' in front of class member in Javascript. + // This is a 'private' modifier. + AttrNode *an = gAttrPool.GetAttrNode(ATTR_private); + MASSERT(an); + mod = an; + } else { + MERROR("The modifier is not a treenode"); + } + } else { + mod= p_mod.mData.mTreeNode; + } - if (mod->IsPass()) { - PassNode *pass = (PassNode*)mod; - for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { - TreeNode *child = pass->GetChild(i); - if (child->IsAnnotation()) { - AnnotationNode *a = (AnnotationNode*)child; - mLastTreeNode->AddAnnotation(a); + if (mod->IsPass()) { + PassNode *pass = (PassNode*)mod; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { + TreeNode *child = pass->GetChild(i); + if (child->IsAnnotation()) { + AnnotationNode *a = (AnnotationNode*)child; + mLastTreeNode->AddAnnotation(a); + } else { + add_attribute_to(mLastTreeNode, child); + } + } + } else if (mod->IsAnnotation()) { + AnnotationNode *a = (AnnotationNode*)mod; + mLastTreeNode->AddAnnotation(a); + } else { + add_attribute_to(mLastTreeNode, mod); + } + } else if (mParams.size() == 2) { + // the two modifiers are in fixed order, with +/- at first + // readonly/? the second. + bool add = false; // add opr if true, rem if false + bool readonly = false; + bool optional = false; + Param p_opr = mParams[0]; + if (p_opr.mIsEmpty) { + add = true; + } else { + MASSERT(!p_opr.mIsTreeNode); + Token *token= p_opr.mData.mToken; + if (token->GetOprId() == OPR_Add) + add = true; + else if (token->GetOprId() == OPR_Sub) + add = false; + else + MERROR("unsupported opr id."); + } + + Param p_prop = mParams[1]; + if (!p_prop.mIsEmpty) { + if (!p_prop.mIsTreeNode) { + Token *token= p_prop.mData.mToken; + MASSERT(token->IsSeparator() && (token->GetSepId() == SEP_Select)); + optional = true; + } else { + TreeNode *tree = p_prop.mData.mTreeNode; + MASSERT(tree->IsAttr()); + AttrNode *attr = (AttrNode*)tree; + MASSERT(attr->GetId() == ATTR_readonly); + readonly = true; + } + + MASSERT(mLastTreeNode->IsComputedName()); + ComputedNameNode *cnn = (ComputedNameNode*)mLastTreeNode; + if (add) { + if (readonly) + cnn->SetProp((unsigned)CNP_Add_ReadOnly); + else if (optional) + cnn->SetProp((unsigned)CNP_Add_Optional); + else + MERROR("unsupported property."); } else { - add_attribute_to(mLastTreeNode, child); + if (readonly) + cnn->SetProp((unsigned)CNP_Rem_ReadOnly); + else if (optional) + cnn->SetProp((unsigned)CNP_Rem_Optional); + else + MERROR("unsupported property."); } } - } else if (mod->IsAnnotation()) { - AnnotationNode *a = (AnnotationNode*)mod; - mLastTreeNode->AddAnnotation(a); - } else { - add_attribute_to(mLastTreeNode, mod); } return mLastTreeNode; @@ -943,55 +2397,151 @@ TreeNode* ASTBuilder::AddModifierTo() { return tree; } -TreeNode* ASTBuilder::AddInitTo() { +// It takes one argument, the init. +// Apply init to mLastTreeNode +TreeNode* ASTBuilder::AddInit() { if (mTrace) - std::cout << "In AddInitTo" << std::endl; - Param p_decl = mParams[0]; - Param p_init; + std::cout << "In AddInit" << std::endl; - // If there is no init value, return NULL. - if (mParams.size() == 1) - return NULL; + MASSERT(mParams.size() == 1); - p_init = mParams[1]; + Param p_init = mParams[0]; if (p_init.mIsEmpty) return NULL; - // Both variable should have been created as tree node. - if (!p_decl.mIsTreeNode || !p_init.mIsTreeNode) - MERROR("The decl or init is not a treenode in AddInitTo()"); + if (!p_init.mIsTreeNode) + MERROR("The init is not a treenode in AddInit()"); - TreeNode *node_decl = p_decl.mData.mTreeNode; TreeNode *node_init = p_init.mData.mTreeNode; - if (!node_decl->IsIdentifier()) - MERROR("The target of AddInitTo should be an indentifier node. Not?"); + if (mLastTreeNode->IsIdentifier()) { + IdentifierNode *in = (IdentifierNode*)mLastTreeNode; + in->SetInit(node_init); + return in; + } else if (mLastTreeNode->IsBindingPattern()) { + BindingPatternNode *in = (BindingPatternNode*)mLastTreeNode; + in->SetInit(node_init); + return in; + } else if (mLastTreeNode->IsTypeParameter()) { + TypeParameterNode *in = (TypeParameterNode*)mLastTreeNode; + in->SetDefault(node_init); + return in; + } else { + MERROR("The target of AddInit is unsupported."); + } +} + +// It takes (1) two arguments or (2) one argument +TreeNode* ASTBuilder::AddInitTo() { + if (mTrace) + std::cout << "In AddInitTo" << std::endl; + + TreeNode *node_decl = NULL; + TreeNode *node_init = NULL; + + // If there is no init value, return NULL. + if (mParams.size() == 1) { + Param p_init = mParams[0]; + if (p_init.mIsEmpty) + return NULL; + node_init = p_init.mData.mTreeNode; + node_decl = mLastTreeNode; + } else { + Param p_decl = mParams[0]; + Param p_init; + p_init = mParams[1]; + if (p_init.mIsEmpty) + return NULL; + + // Both variable should have been created as tree node. + if (!p_decl.mIsTreeNode || !p_init.mIsTreeNode) + MERROR("The decl or init is not a treenode in AddInitTo()"); + + node_decl = p_decl.mData.mTreeNode; + node_init = p_init.mData.mTreeNode; + } + + if (node_decl->IsIdentifier()) { + IdentifierNode *in = (IdentifierNode*)node_decl; + in->SetInit(node_init); + return in; + } else if (node_decl->IsBindingPattern()) { + BindingPatternNode *in = (BindingPatternNode*)node_decl; + in->SetInit(node_init); + return in; + } else if (node_decl->IsComputedName()) { + ComputedNameNode *in = (ComputedNameNode*)node_decl; + in->SetInit(node_init); + return in; + } else if (node_decl->IsLiteral()) { + LiteralNode *in = (LiteralNode*)node_decl; + in->SetInit(node_init); + return in; + } else { + MERROR("The target of AddInitTo is unsupported."); + } +} + +// This takes just one argument which is the namespace name. +TreeNode* ASTBuilder::BuildNamespace() { + if (mTrace) + std::cout << "In BuildNamespace" << std::endl; + + Param p_name = mParams[0]; + MASSERT(p_name.mIsTreeNode); + TreeNode *node_name = p_name.mData.mTreeNode; + + MASSERT(node_name->IsIdentifier() || node_name->IsField()); - IdentifierNode *in = (IdentifierNode*)node_decl; - in->SetInit(node_init); + NamespaceNode *ns = (NamespaceNode*)gTreePool.NewTreeNode(sizeof(NamespaceNode)); + new (ns) NamespaceNode(); + ns->SetId(node_name); - return in; + mLastTreeNode = ns; + return mLastTreeNode; } +// Takes one parameter which is the tree of namespace body. +TreeNode* ASTBuilder::AddNamespaceBody() { + if (mTrace) + std::cout << "In AddNamespaceBody" << std::endl; + + Param p_body = mParams[0]; + if (!p_body.mIsEmpty) { + if(!p_body.mIsTreeNode) + MERROR("The namespace body is not a tree node."); + TreeNode *tree = p_body.mData.mTreeNode; + + MASSERT(mLastTreeNode->IsNamespace()); + NamespaceNode *ns = (NamespaceNode*)mLastTreeNode; + ns->AddBody(tree); + } + + return mLastTreeNode; +} // This takes just one argument which is the class name. TreeNode* ASTBuilder::BuildClass() { if (mTrace) std::cout << "In BuildClass" << std::endl; - Param p_name = mParams[0]; - - if (!p_name.mIsTreeNode) - MERROR("The class name is not a treenode in BuildClass()"); - TreeNode *node_name = p_name.mData.mTreeNode; + IdentifierNode *in = NULL; - if (!node_name->IsIdentifier()) - MERROR("The class name should be an indentifier node. Not?"); - IdentifierNode *in = (IdentifierNode*)node_name; + Param p_name = mParams[0]; + if (!p_name.mIsEmpty) { + if (!p_name.mIsTreeNode) + MERROR("The class name is not a treenode in BuildClass()"); + TreeNode *node_name = p_name.mData.mTreeNode; + + if (!node_name->IsIdentifier()) + MERROR("The class name should be an identifier node. Not?"); + in = (IdentifierNode*)node_name; + } - ClassNode *node_class = (ClassNode*)mTreePool->NewTreeNode(sizeof(ClassNode)); + ClassNode *node_class = (ClassNode*)gTreePool.NewTreeNode(sizeof(ClassNode)); new (node_class) ClassNode(); - node_class->SetName(in->GetName()); + if (in) + node_class->SetStrIdx(in->GetStrIdx()); mLastTreeNode = node_class; return mLastTreeNode; @@ -999,7 +2549,7 @@ TreeNode* ASTBuilder::BuildClass() { TreeNode* ASTBuilder::SetClassIsJavaEnum() { ClassNode *klass = (ClassNode*)mLastTreeNode; - klass->SetJavaEnum(); + klass->SetIsJavaEnum(); return mLastTreeNode; } @@ -1008,7 +2558,7 @@ TreeNode* ASTBuilder::BuildBlock() { if (mTrace) std::cout << "In BuildBlock" << std::endl; - BlockNode *block = (BlockNode*)mTreePool->NewTreeNode(sizeof(BlockNode)); + BlockNode *block = (BlockNode*)gTreePool.NewTreeNode(sizeof(BlockNode)); new (block) BlockNode(); Param p_subtree = mParams[0]; @@ -1062,19 +2612,27 @@ TreeNode* ASTBuilder::AddToBlock() { return mLastTreeNode; } -// if tnode is not a BlockNode, wrap it into a BlockNode -TreeNode* ASTBuilder::CvtToBlock(TreeNode *tnode) { +// This takes just two arguments. First is the sync object, second the block +// It returns the block with sync added. +TreeNode* ASTBuilder::AddSyncToBlock() { if (mTrace) - std::cout << "In CvtToBlock" << std::endl; + std::cout << "In AddSyncToBlock" << std::endl; - if (tnode->IsBlock()) { - return tnode; - } + Param p_sync = mParams[0]; + MASSERT(!p_sync.mIsEmpty && p_sync.mIsTreeNode); + TreeNode *sync_tree = p_sync.mData.mTreeNode; - BlockNode *block = (BlockNode*)mTreePool->NewTreeNode(sizeof(BlockNode)); - new (block) BlockNode(); - block->AddChild(tnode); - return block; + Param p_block = mParams[1]; + MASSERT(!p_block.mIsEmpty && p_block.mIsTreeNode); + TreeNode *b = p_block.mData.mTreeNode; + MASSERT(b->IsBlock()); + BlockNode *block = (BlockNode*)b; + + block->SetSync(sync_tree); + + // set last tree node + mLastTreeNode = block; + return mLastTreeNode; } // This takes just one argument which either a block node, or the root of sub tree @@ -1109,14 +2667,41 @@ TreeNode* ASTBuilder::BuildInstInit() { TreeNode* ASTBuilder::AddSuperClass() { if (mTrace) std::cout << "In AddSuperClass" << std::endl; - Param p_attr = mParams[0]; + Param p_super = mParams[0]; + if (p_super.mIsEmpty) + return mLastTreeNode; + + MASSERT(p_super.mIsTreeNode); + TreeNode *t_super = p_super.mData.mTreeNode; + + if (mLastTreeNode->IsClass()) { + ClassNode *sn = (ClassNode*)mLastTreeNode; + sn->AddSuperClass(t_super); + } + return mLastTreeNode; } +// It takes one argument, the super interface. +// Add it to the mLastTreeNode. TreeNode* ASTBuilder::AddSuperInterface() { if (mTrace) std::cout << "In AddSuperInterface" << std::endl; - Param p_attr = mParams[0]; + Param p_super = mParams[0]; + if (p_super.mIsEmpty) + return mLastTreeNode; + + MASSERT(p_super.mIsTreeNode); + TreeNode *t_super = p_super.mData.mTreeNode; + + if (mLastTreeNode->IsStruct()) { + StructNode *sn = (StructNode*)mLastTreeNode; + sn->AddSuper(t_super); + } else if (mLastTreeNode->IsClass()) { + ClassNode *sn = (ClassNode*)mLastTreeNode; + sn->AddSuperInterface(t_super); + } + return mLastTreeNode; } @@ -1126,16 +2711,24 @@ TreeNode* ASTBuilder::AddClassBody() { std::cout << "In AddClassBody" << std::endl; Param p_body = mParams[0]; - if (!p_body.mIsTreeNode) + if (p_body.mIsEmpty) + return mLastTreeNode; + + if (!p_body.mIsTreeNode) MERROR("The class body is not a tree node."); - TreeNode *tree_node = p_body.mData.mTreeNode; - MASSERT(tree_node->IsBlock() && "Class body is not a BlockNode?"); - BlockNode *block = (BlockNode*)tree_node; + + TreeNode *tn = p_body.mData.mTreeNode; + if (!tn->IsBlock()) { + BlockNode *block = (BlockNode*)gTreePool.NewTreeNode(sizeof(BlockNode)); + new (block) BlockNode(); + block->AddChild(tn); + tn = block; + } + BlockNode *block = (BlockNode*)(tn); MASSERT(mLastTreeNode->IsClass() && "Class is not a ClassNode?"); ClassNode *klass = (ClassNode*)mLastTreeNode; - klass->AddBody(block); - klass->Construct(); + klass->Construct(block); return mLastTreeNode; } @@ -1152,10 +2745,10 @@ TreeNode* ASTBuilder::BuildAnnotationType() { TreeNode *node_name = p_name.mData.mTreeNode; if (!node_name->IsIdentifier()) - MERROR("The annotation type name should be an indentifier node. Not?"); + MERROR("The annotation type name should be an identifier node. Not?"); IdentifierNode *in = (IdentifierNode*)node_name; - AnnotationTypeNode *annon_type = (AnnotationTypeNode*)mTreePool->NewTreeNode(sizeof(AnnotationTypeNode)); + AnnotationTypeNode *annon_type = (AnnotationTypeNode*)gTreePool.NewTreeNode(sizeof(AnnotationTypeNode)); new (annon_type) AnnotationTypeNode(); annon_type->SetId(in); @@ -1180,12 +2773,9 @@ TreeNode* ASTBuilder::BuildAnnotation() { MERROR("The annotationtype name is not a treenode in BuildAnnotation()"); TreeNode *iden = p_name.mData.mTreeNode; - if (!iden->IsIdentifier()) - MERROR("The annotation name is NOT an indentifier node."); - - AnnotationNode *annot = (AnnotationNode*)mTreePool->NewTreeNode(sizeof(AnnotationNode)); + AnnotationNode *annot = (AnnotationNode*)gTreePool.NewTreeNode(sizeof(AnnotationNode)); new (annot) AnnotationNode(); - annot->SetId((IdentifierNode*)iden); + annot->SetId(iden); // set last tree node and return it. mLastTreeNode = annot; @@ -1202,12 +2792,12 @@ TreeNode* ASTBuilder::BuildInterface() { TreeNode *node_name = p_name.mData.mTreeNode; if (!node_name->IsIdentifier()) - MERROR("The name is NOT an indentifier node."); + MERROR("The name is NOT an identifier node."); IdentifierNode *in = (IdentifierNode*)node_name; - InterfaceNode *interf = (InterfaceNode*)mTreePool->NewTreeNode(sizeof(InterfaceNode)); + InterfaceNode *interf = (InterfaceNode*)gTreePool.NewTreeNode(sizeof(InterfaceNode)); new (interf) InterfaceNode(); - interf->SetName(in->GetName()); + interf->SetStrIdx(in->GetStrIdx()); // set last tree node and return it. mLastTreeNode = interf; @@ -1238,9 +2828,9 @@ TreeNode* ASTBuilder::BuildDim() { if (mTrace) std::cout << "In BuildDim" << std::endl; - DimensionNode *dim = (DimensionNode*)mTreePool->NewTreeNode(sizeof(DimensionNode)); + DimensionNode *dim = (DimensionNode*)gTreePool.NewTreeNode(sizeof(DimensionNode)); new (dim) DimensionNode(); - dim->AddDim(); + dim->AddDimension(); // set last tree node and return it. mLastTreeNode = dim; @@ -1309,7 +2899,7 @@ TreeNode* ASTBuilder::AddDimsTo() { mLastTreeNode = node_a; } else if (node_a->IsPrimType()) { PrimTypeNode *pt = (PrimTypeNode*)node_a; - PrimArrayTypeNode *pat = (PrimArrayTypeNode*)mTreePool->NewTreeNode(sizeof(PrimArrayTypeNode)); + PrimArrayTypeNode *pat = (PrimArrayTypeNode*)gTreePool.NewTreeNode(sizeof(PrimArrayTypeNode)); new (pat) PrimArrayTypeNode(); pat->SetPrim(pt); pat->SetDims(dim); @@ -1349,77 +2939,121 @@ TreeNode* ASTBuilder::AddDims() { return mLastTreeNode; } -//////////////////////////////////////////////////////////////////////////////// -// New & Delete operation related -//////////////////////////////////////////////////////////////////////////////// - // This is a help function which adds parameters to a function decl. // It's the caller's duty to assure 'func' and 'params' are non null. -void ASTBuilder::AddParams(TreeNode *func, TreeNode *params) { - if (params->IsIdentifier()) { - // one single parameter at call site - IdentifierNode *inode = (IdentifierNode*)params; - if (func->IsFunction()) - ((FunctionNode*)func)->AddParam(inode); - else if (func->IsNew()) - ((NewNode*)func)->AddParam(inode); - else - MERROR("Unsupported yet."); - } else if (params->IsVarList()) { - // a list of decls at function declaration - VarListNode *vl = (VarListNode*)params; - for (unsigned i = 0; i < vl->GetNum(); i++) { - IdentifierNode *inode = vl->VarAtIndex(i); +void ASTBuilder::AddParams(TreeNode *func, TreeNode *decl_params) { + if (decl_params->IsDecl()) { + DeclNode *decl = (DeclNode*)decl_params; + TreeNode *params = decl->GetVar(); + // a param could be a 'this' literal, binding pattern, etc + if (params->IsIdentifier() || params->IsLiteral() || params->IsBindingPattern()) { + // one single parameter at call site if (func->IsFunction()) - ((FunctionNode*)func)->AddParam(inode); - else if (func->IsNew()) - ((NewNode*)func)->AddParam(inode); + ((FunctionNode*)func)->AddParam(params); + else if (func->IsLambda()) + ((LambdaNode*)func)->AddParam(params); else MERROR("Unsupported yet."); + } else if (params->IsVarList()) { + // a list of decls at function declaration + VarListNode *vl = (VarListNode*)params; + for (unsigned i = 0; i < vl->GetVarsNum(); i++) { + IdentifierNode *inode = vl->GetVarAtIndex(i); + if (func->IsFunction()) + ((FunctionNode*)func)->AddParam(inode); + else if (func->IsLambda()) + ((LambdaNode*)func)->AddParam(inode); + else + MERROR("Unsupported yet."); + } + } else { + MERROR("Unsupported yet."); } - } else if (params->IsPass()) { - // a list of identifiers at call site. - PassNode *pass = (PassNode*)params; + } else if (decl_params->IsPass()) { + PassNode *pass = (PassNode*)decl_params; for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { TreeNode *child = pass->GetChild(i); - if (func->IsFunction()) - ((FunctionNode*)func)->AddParam(child); - else if (func->IsNew()) - ((NewNode*)func)->AddParam(child); - else - MERROR("Unsupported yet."); + AddParams(func, child); } + } else if (decl_params->IsIdentifier()) { + // sometimes, the parameter is just an identifier in Javascript. + // Like the SetAccessor + if (func->IsFunction()) + ((FunctionNode*)func)->AddParam(decl_params); + else if (func->IsLambda()) + ((LambdaNode*)func)->AddParam(decl_params); + else + MERROR("Unsupported yet."); + } else if (decl_params->IsStruct()) { + if (func->IsFunction()) + ((FunctionNode*)func)->AddParam(decl_params); + else if (func->IsLambda()) + ((LambdaNode*)func)->AddParam(decl_params); + else + MERROR("Unsupported yet."); + } else { + MERROR("Unsupported yet."); } } +// AddAssert takes one parameter, the asserts expression, and apply it to mLastTreeNode +TreeNode* ASTBuilder::AddAssert() { + if (mTrace) + std::cout << "In AddAssert " << std::endl; + + FunctionNode *f = NULL; + TreeNode *a = NULL; + + Param p_type = mParams[0]; + if(!p_type.mIsEmpty && p_type.mIsTreeNode) { + a = p_type.mData.mTreeNode; + MASSERT(mLastTreeNode->IsFunction()); + f = (FunctionNode*)mLastTreeNode; + } + + if (f && a) + f->SetAssert(a); + + return mLastTreeNode; +} +//////////////////////////////////////////////////////////////////////////////// +// New & Delete operation related +//////////////////////////////////////////////////////////////////////////////// + +// This function takes one, two or three arguments. +// 1. The id of the class/interface/function/..., or a lambda +// 2. The arguments, could be empty +// 3. In some cases there is a third argument, for function body. TreeNode* ASTBuilder::BuildNewOperation() { if (mTrace) std::cout << "In BuildNewOperation " << std::endl; - NewNode *new_node = (NewNode*)mTreePool->NewTreeNode(sizeof(NewNode)); + NewNode *new_node = (NewNode*)gTreePool.NewTreeNode(sizeof(NewNode)); new (new_node) NewNode(); - MASSERT(mParams.size() == 3 && "BuildNewOperation has NO 3 params?"); - Param p_a = mParams[0]; - Param p_b = mParams[1]; - Param p_c = mParams[2]; - // Name could not be empty + Param p_a = mParams[0]; if (p_a.mIsEmpty) MERROR("The name in BuildNewOperation() is empty?"); MASSERT(p_a.mIsTreeNode && "Name of new expression is not a tree?"); TreeNode *name = p_a.mData.mTreeNode; new_node->SetId(name); - - TreeNode *node_b = p_b.mIsEmpty ? NULL : p_b.mData.mTreeNode; - if (node_b) - AddParams(new_node, node_b); - TreeNode *node_c = p_c.mIsEmpty ? NULL : p_c.mData.mTreeNode; - if (node_c) { - MASSERT(node_c->IsBlock() && "ClassBody is not a block?"); - BlockNode *b = (BlockNode*)node_c; - new_node->SetBody(b); + if (mParams.size() > 1) { + Param p_b = mParams[1]; + TreeNode *node_b = p_b.mIsEmpty ? NULL : p_b.mData.mTreeNode; + if (node_b) + AddArguments(new_node, node_b); + } + + if (mParams.size() > 2) { + Param p_c = mParams[2]; + TreeNode *node_c = p_c.mIsEmpty ? NULL : p_c.mData.mTreeNode; + if (node_c) { + MASSERT(node_c->IsBlock() && "ClassBody is not a block?"); + BlockNode *b = (BlockNode*)node_c; + new_node->SetBody(b); + } } mLastTreeNode = new_node; @@ -1427,6 +3061,20 @@ TreeNode* ASTBuilder::BuildNewOperation() { } TreeNode* ASTBuilder::BuildDeleteOperation() { + if (mTrace) + std::cout << "In BuildDelete" << std::endl; + + Param l_param = mParams[0]; + MASSERT(!l_param.mIsEmpty); + MASSERT(l_param.mIsTreeNode); + TreeNode *expr = l_param.mData.mTreeNode; + + DeleteNode *d_node = (DeleteNode*)gTreePool.NewTreeNode(sizeof(DeleteNode)); + new (d_node) DeleteNode(); + d_node->SetExpr(expr); + + mLastTreeNode = d_node; + return mLastTreeNode; } //////////////////////////////////////////////////////////////////////////////// @@ -1439,7 +3087,7 @@ TreeNode* ASTBuilder::BuildAssert() { if (mTrace) std::cout << "In BuildAssert " << std::endl; - AssertNode *assert_node = (AssertNode*)mTreePool->NewTreeNode(sizeof(AssertNode)); + AssertNode *assert_node = (AssertNode*)gTreePool.NewTreeNode(sizeof(AssertNode)); new (assert_node) AssertNode(); MASSERT(mParams.size() >= 1 && "BuildAssert has NO expression?"); @@ -1475,7 +3123,7 @@ TreeNode* ASTBuilder::BuildCall() { if (mTrace) std::cout << "In BuildCall" << std::endl; - CallNode *call = (CallNode*)mTreePool->NewTreeNode(sizeof(CallNode)); + CallNode *call = (CallNode*)gTreePool.NewTreeNode(sizeof(CallNode)); new (call) CallNode(); // The default is having no param. @@ -1484,12 +3132,16 @@ TreeNode* ASTBuilder::BuildCall() { if (!ParamsEmpty()) { Param p_method = mParams[0]; if (!p_method.mIsTreeNode) - MERROR("The function name is not a treenode in BuildFunction()"); + MERROR("The function name is not a treenode in BuildCall()"); method = p_method.mData.mTreeNode; } + // In Typescript, get/set are keywords of attributes. But it also allowed to be + // function name. So we need transfer this AttrNode to IdentifierNode. + if (method && method->IsAttr()) + method = BuildIdentifier(method); + call->SetMethod(method); - call->Init(); mLastTreeNode = call; return mLastTreeNode; @@ -1499,43 +3151,67 @@ TreeNode* ASTBuilder::BuildCall() { // identifier, call, or any valid expression. // // This AddArguments can be used for CallNode, NewNode, etc. -// Right now I just support CallNode. NewNode will be moved from AddParams() -// to here. TreeNode* ASTBuilder::AddArguments() { if (mTrace) std::cout << "In AddArguments" << std::endl; Param p_params = mParams[0]; - TreeNode *params = NULL; + TreeNode *args = NULL; if (!p_params.mIsEmpty) { if (!p_params.mIsTreeNode) MERROR("The parameters is not a treenode in AddArguments()"); - params = p_params.mData.mTreeNode; + args = p_params.mData.mTreeNode; } - if (!params) + if (!args) return mLastTreeNode; - CallNode *call = (CallNode*)mLastTreeNode; + AddArguments(mLastTreeNode, args); - if (params->IsVarList()) { - VarListNode *vl = (VarListNode*)params; - for (unsigned i = 0; i < vl->GetNum(); i++) { - IdentifierNode *inode = vl->VarAtIndex(i); - call->AddArg(inode); + return mLastTreeNode; +} + +// 'call' could be a CallNode or NewNode. +// 'args' could be identifier, literal, expr, etc. +void ASTBuilder::AddArguments(TreeNode *call, TreeNode *args) { + CallNode *callnode = NULL; + NewNode *newnode = NULL; + AnnotationNode *annotation = NULL; + if (call->IsCall()) + callnode = (CallNode*)call; + else if (call->IsNew()) + newnode = (NewNode*)call; + else if (call->IsAnnotation()) + annotation = (AnnotationNode*)call; + else + MERROR("Unsupported call node."); + + if (args->IsVarList()) { + VarListNode *vl = (VarListNode*)args; + for (unsigned i = 0; i < vl->GetVarsNum(); i++) { + IdentifierNode *inode = vl->GetVarAtIndex(i); + if (callnode) + callnode->AddArg(inode); + else if (newnode) + newnode->AddArg(inode); + else if (annotation) + annotation->AddArg(inode); } - } else if (params->IsPass()) { - PassNode *pass = (PassNode*)params; + } else if (args->IsPass()) { + PassNode *pass = (PassNode*)args; for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { TreeNode *child = pass->GetChild(i); - call->AddArg(child); + AddArguments(call, child); } } else { - call->AddArg(params); + if (callnode) + callnode->AddArg(args); + else if (newnode) + newnode->AddArg(args); + else if (annotation) + annotation->AddArg(args); } - - return mLastTreeNode; } // BuildVariableList takes two parameters, var 1 and var 2 @@ -1564,7 +3240,7 @@ TreeNode* ASTBuilder::BuildExprList() { node_ret->Merge(node_a); } else { // both nodes are not ExprListNode - node_ret = (ExprListNode*)mTreePool->NewTreeNode(sizeof(ExprListNode)); + node_ret = (ExprListNode*)gTreePool.NewTreeNode(sizeof(ExprListNode)); new (node_ret) ExprListNode(); if (node_a) node_ret->Merge(node_a); @@ -1582,6 +3258,7 @@ TreeNode* ASTBuilder::BuildExprList() { // FunctionNode related //////////////////////////////////////////////////////////////////////////////// +// Takes only one argument, the params, and add it to mLastTreeNode TreeNode* ASTBuilder::AddParams() { if (mTrace) std::cout << "In AddParams" << std::endl; @@ -1598,25 +3275,41 @@ TreeNode* ASTBuilder::AddParams() { } // This takes just one argument which is the function name. +// The name could empty which is allowed in languages like JS. TreeNode* ASTBuilder::BuildFunction() { if (mTrace) std::cout << "In BuildFunction" << std::endl; - Param p_name = mParams[0]; - - if (!p_name.mIsTreeNode) - MERROR("The function name is not a treenode in BuildFunction()"); - TreeNode *node_name = p_name.mData.mTreeNode; + TreeNode *node_name = NULL; + + if (mParams.size() > 0) { + Param p_name = mParams[0]; + // In JS/TS the name could be empty. + if (!p_name.mIsEmpty) { + if (p_name.mIsTreeNode) { + node_name = p_name.mData.mTreeNode; + if (node_name->IsAttr()) { + node_name = BuildIdentifier(node_name); + } else if (!node_name->IsIdentifier() && + !node_name->IsComputedName() && + !node_name->IsLiteral() && + !node_name->IsField()) + MERROR("The function name should be an identifier node. Not?"); + } else { + node_name = BuildIdentifier(p_name.mData.mToken); + } + } + } - if (!node_name->IsIdentifier()) - MERROR("The function name should be an indentifier node. Not?"); - IdentifierNode *in = (IdentifierNode*)node_name; + FunctionNode *f = (FunctionNode*)gTreePool.NewTreeNode(sizeof(FunctionNode)); + new (f) FunctionNode(); - FunctionNode *function = (FunctionNode*)mTreePool->NewTreeNode(sizeof(FunctionNode)); - new (function) FunctionNode(); - function->SetName(node_name->GetName()); + if (node_name) { + f->SetFuncName(node_name); + f->SetStrIdx(node_name->GetStrIdx()); + } - mLastTreeNode = function; + mLastTreeNode = f; return mLastTreeNode; } @@ -1640,11 +3333,17 @@ TreeNode* ASTBuilder::AddFunctionBody() { // It's possible that the func body is empty, such as in the // function header declaration. Usually it's just a token ';'. Param p_body = mParams[0]; - if (p_body.mIsTreeNode) { + if (!p_body.mIsEmpty) { + MASSERT(p_body.mIsTreeNode); TreeNode *tree_node = p_body.mData.mTreeNode; MASSERT(tree_node->IsBlock() && "Class body is not a BlockNode?"); BlockNode *block = (BlockNode*)tree_node; - func->AddBody(block); + func->SetBody(block); + } else { + // It is an 'empty' function body. Not a NULL pointer of function body. + BlockNode *block = (BlockNode*)gTreePool.NewTreeNode(sizeof(BlockNode)); + new (block) BlockNode(); + func->SetBody(block); } mLastTreeNode = func; @@ -1668,24 +3367,194 @@ TreeNode* ASTBuilder::AddFunctionBodyTo() { // It's possible that the func body is empty, such as in the // function header declaration. Usually it's just a token ';'. Param p_body = mParams[1]; - if (p_body.mIsTreeNode) { + if (!p_body.mIsEmpty && p_body.mIsTreeNode) { TreeNode *tree_node = p_body.mData.mTreeNode; MASSERT(tree_node->IsBlock() && "Class body is not a BlockNode?"); BlockNode *block = (BlockNode*)tree_node; - func->AddBody(block); + func->SetBody(block); } mLastTreeNode = func; return mLastTreeNode; } +// It take no arugment. It uses mLastTreeNode which is +// a function node. +TreeNode* ASTBuilder::SetIsGenerator() { + MASSERT(mLastTreeNode->IsFunction()); + FunctionNode *node = (FunctionNode*)mLastTreeNode; + node->SetIsGenerator(); + return mLastTreeNode; +} + +// It take no arugment. It uses mLastTreeNode which is +// a function node. +TreeNode* ASTBuilder::SetIsIterator() { + MASSERT(mLastTreeNode->IsFunction()); + FunctionNode *node = (FunctionNode*)mLastTreeNode; + node->SetIsIterator(); + return mLastTreeNode; +} + +// It take no arugment. It uses mLastTreeNode which is +// a yield node. +TreeNode* ASTBuilder::SetIsTransfer() { + MASSERT(mLastTreeNode->IsYield()); + YieldNode *node = (YieldNode*)mLastTreeNode; + node->SetIsTransfer(); + return mLastTreeNode; +} + +// It take no arugment. It uses mLastTreeNode which is +// a function node. +TreeNode* ASTBuilder::SetGetAccessor() { + MASSERT(mLastTreeNode->IsFunction()); + FunctionNode *node = (FunctionNode*)mLastTreeNode; + node->SetIsGetAccessor(); + return mLastTreeNode; +} + +// It take no arugment. It uses mLastTreeNode which is +// a function node. +TreeNode* ASTBuilder::SetSetAccessor() { + MASSERT(mLastTreeNode->IsFunction()); + FunctionNode *node = (FunctionNode*)mLastTreeNode; + node->SetIsSetAccessor(); + return mLastTreeNode; +} + +// It take no arugment. It uses mLastTreeNode which is +// a function node. +TreeNode* ASTBuilder::SetCallSignature() { + MASSERT(mLastTreeNode->IsFunction()); + FunctionNode *node = (FunctionNode*)mLastTreeNode; + node->SetIsCallSignature(); + return mLastTreeNode; +} + +// It take no arugment. It uses mLastTreeNode which is +// a function node. +TreeNode* ASTBuilder::SetConstructSignature() { + MASSERT(mLastTreeNode->IsFunction()); + FunctionNode *node = (FunctionNode*)mLastTreeNode; + node->SetIsConstructSignature(); + return mLastTreeNode; +} + //////////////////////////////////////////////////////////////////////////////// -// Other Functions +// Try, Catch, Throw //////////////////////////////////////////////////////////////////////////////// -// This takes just one argument which is the tree passed from the -// children. It could be single IdentifierNode, or a PassNode with -// more than one tree nodes. +// Takes one argument which is the block. +TreeNode* ASTBuilder::BuildTry() { + if (mTrace) + std::cout << "In BuildTry" << std::endl; + + Param p_block = mParams[0]; + + MASSERT(p_block.mIsTreeNode); + TreeNode *block = p_block.mData.mTreeNode; + + TryNode *try_node = (TryNode*)gTreePool.NewTreeNode(sizeof(TryNode)); + new (try_node) TryNode(); + try_node->SetBlock((BlockNode*)block); + + mLastTreeNode = try_node; + return mLastTreeNode; +} + +// Takes one arguments, the catch clause +// Add to mLastTreeNode which is a TryNode. +TreeNode* ASTBuilder::AddCatch() { + if (mTrace) + std::cout << "In AddCatch " << std::endl; + + Param p_catch = mParams[0]; + MASSERT(p_catch.mIsTreeNode); + TreeNode *catch_node = p_catch.mData.mTreeNode; + + TryNode *try_node = (TryNode*)mLastTreeNode; + try_node->AddCatch(catch_node); + + return mLastTreeNode; +} + +// Takes one arguments, the finally clause +// Add to mLastTreeNode which is a TryNode. +TreeNode* ASTBuilder::AddFinally() { + if (mTrace) + std::cout << "In AddFinally " << std::endl; + + Param p_finally = mParams[0]; + MASSERT(p_finally.mIsTreeNode); + TreeNode *finally_node = p_finally.mData.mTreeNode; + MASSERT(finally_node->IsFinally()); + + TryNode *try_node = (TryNode*)mLastTreeNode; + try_node->SetFinally((FinallyNode*)finally_node); + + return mLastTreeNode; +} + +// Takes one argument which is the block. +TreeNode* ASTBuilder::BuildFinally() { + if (mTrace) + std::cout << "In BuildFinally" << std::endl; + + Param p_block = mParams[0]; + + MASSERT(p_block.mIsTreeNode); + TreeNode *block = p_block.mData.mTreeNode; + + FinallyNode *finally_node = (FinallyNode*)gTreePool.NewTreeNode(sizeof(FinallyNode)); + new (finally_node) FinallyNode(); + finally_node->SetBlock((BlockNode*)block); + + mLastTreeNode = finally_node; + return mLastTreeNode; +} + +// 1. Takes two arguments, the parameters and the block +// 2. Takes one argument, the block +TreeNode* ASTBuilder::BuildCatch() { + if (mTrace) + std::cout << "In BuildCatch" << std::endl; + + TreeNode *params = NULL; + TreeNode *block = NULL; + + if (mParams.size() == 2) { + Param p_params = mParams[0]; + Param p_block = mParams[1]; + + MASSERT(p_params.mIsTreeNode); + params = p_params.mData.mTreeNode; + + MASSERT(p_block.mIsTreeNode); + block = p_block.mData.mTreeNode; + } else { + Param p_block = mParams[0]; + MASSERT(p_block.mIsTreeNode); + block = p_block.mData.mTreeNode; + } + + CatchNode *catch_node = (CatchNode*)gTreePool.NewTreeNode(sizeof(CatchNode)); + new (catch_node) CatchNode(); + + if (params) + catch_node->AddParam(params); + catch_node->SetBlock((BlockNode*)block); + + mLastTreeNode = catch_node; + return mLastTreeNode; +} + + +//////////////////////////////////////////////////////////////////////////////// +// Throw Functions +//////////////////////////////////////////////////////////////////////////////// + +// This takes just one argument which is the exception(s) thrown. TreeNode* ASTBuilder::BuildThrows() { if (mTrace) std::cout << "In BuildThrows" << std::endl; @@ -1694,12 +3563,14 @@ TreeNode* ASTBuilder::BuildThrows() { if (!p_throws.mIsTreeNode) MERROR("The exceptions is not a treenode in BuildThrows()"); - TreeNode *node_throws = p_throws.mData.mTreeNode; + TreeNode *exceptions = p_throws.mData.mTreeNode; + + ThrowNode *throw_node = (ThrowNode*)gTreePool.NewTreeNode(sizeof(ThrowNode)); + new (throw_node) ThrowNode(); - if (!node_throws->IsIdentifier() && !node_throws->IsPass()) - MERROR("The throws should be an indentifier node or pass node. Not?"); + throw_node->AddException(exceptions); - mLastTreeNode = node_throws; + mLastTreeNode = throw_node; return mLastTreeNode; } @@ -1724,7 +3595,7 @@ TreeNode* ASTBuilder::AddThrowsTo() { TreeNode *tree_node = p_body.mData.mTreeNode; if (tree_node->IsIdentifier()) { IdentifierNode *id = (IdentifierNode*)tree_node; - ExceptionNode *exception = (ExceptionNode*)mTreePool->NewTreeNode(sizeof(ExceptionNode)); + ExceptionNode *exception = (ExceptionNode*)gTreePool.NewTreeNode(sizeof(ExceptionNode)); new (exception) ExceptionNode(id); func->AddThrow(exception); } else if (tree_node->IsPass()) { @@ -1733,7 +3604,7 @@ TreeNode* ASTBuilder::AddThrowsTo() { TreeNode *child = pass->GetChild(i); if (child->IsIdentifier()) { IdentifierNode *id = (IdentifierNode*)child; - ExceptionNode *exception = (ExceptionNode*)mTreePool->NewTreeNode(sizeof(ExceptionNode)); + ExceptionNode *exception = (ExceptionNode*)gTreePool.NewTreeNode(sizeof(ExceptionNode)); new (exception) ExceptionNode(id); func->AddThrow(exception); } else { @@ -1747,6 +3618,27 @@ TreeNode* ASTBuilder::AddThrowsTo() { return mLastTreeNode; } +//////////////////////////////////////////////////////////////////////////////// +// Pass a Child +// We only pass tree node. It should not be a token. +//////////////////////////////////////////////////////////////////////////////// + +TreeNode* ASTBuilder::PassChild() { + if (mTrace) + std::cout << "In PassChild" << std::endl; + + TreeNode *node = NULL; + Param p = mParams[0]; + if (!p.mIsEmpty) { + if (!p.mIsTreeNode) + MERROR("The child is not a treenode."); + node = p.mData.mTreeNode; + } + + mLastTreeNode = node; + return mLastTreeNode; +} + //////////////////////////////////////////////////////////////////////////////// // User Type Functions //////////////////////////////////////////////////////////////////////////////// @@ -1758,26 +3650,107 @@ TreeNode* ASTBuilder::BuildUserType() { Param p_id = mParams[0]; if (!p_id.mIsTreeNode) MERROR("The Identifier of user type is not a treenode."); + TreeNode *id = p_id.mData.mTreeNode; - TreeNode *node = p_id.mData.mTreeNode; - if (!node->IsIdentifier()) - MERROR("The Identifier of user type is not an identifier."); - IdentifierNode *id = (IdentifierNode*)node; - - UserTypeNode *user_type = (UserTypeNode*)mTreePool->NewTreeNode(sizeof(UserTypeNode)); + UserTypeNode *user_type = (UserTypeNode*)gTreePool.NewTreeNode(sizeof(UserTypeNode)); new (user_type) UserTypeNode(id); mLastTreeNode = user_type; return mLastTreeNode; } -TreeNode* ASTBuilder::AddTypeArgument() { +TreeNode* ASTBuilder::BuildTypeParameter() { + if (mTrace) + std::cout << "In BuildTypeParameter" << std::endl; + + Param p_id = mParams[0]; + if (!p_id.mIsTreeNode) + MERROR("The Identifier of type parameter is not a treenode."); + TreeNode *id = p_id.mData.mTreeNode; + + TypeParameterNode *tp = (TypeParameterNode*)gTreePool.NewTreeNode(sizeof(TypeParameterNode)); + new (tp) TypeParameterNode(); + tp->SetId(id); + + mLastTreeNode = tp; + return mLastTreeNode; +} + +// It takes one argument, the constraint which could be empty. +TreeNode* ASTBuilder::AddTypeParameterExtends() { if (mTrace) - std::cout << "In AddTypeArgument" << std::endl; + std::cout << "In AddTypeParameterExtends" << std::endl; + + Param p_id = mParams[0]; + if (p_id.mIsEmpty) + return mLastTreeNode; + + MASSERT(p_id.mIsTreeNode); + TreeNode *id = p_id.mData.mTreeNode; + + MASSERT(mLastTreeNode->IsTypeParameter()); + TypeParameterNode *tp = (TypeParameterNode*)mLastTreeNode; + tp->SetExtends(id); + + mLastTreeNode = tp; + return mLastTreeNode; +} + + +// Takes one argument, the as 'type'. +TreeNode* ASTBuilder::BuildAsType() { + if (mTrace) + std::cout << "In BuildAsType" << std::endl; + + Param p_id = mParams[0]; + if (!p_id.mIsTreeNode) + MERROR("The Identifier of type parameter is not a treenode."); + TreeNode *id = p_id.mData.mTreeNode; + + AsTypeNode *tp = (AsTypeNode*)gTreePool.NewTreeNode(sizeof(AsTypeNode)); + new (tp) AsTypeNode(); + tp->SetType(id); + + mLastTreeNode = tp; + return mLastTreeNode; +} + +// Takes four argument, type a, type b, type c , type d. +TreeNode* ASTBuilder::BuildConditionalType() { + if (mTrace) + std::cout << "In BuildConditionalType" << std::endl; + + Param p_a = mParams[0]; + TreeNode *type_a = p_a.mData.mTreeNode; + Param p_b = mParams[1]; + TreeNode *type_b = p_b.mData.mTreeNode; + Param p_c = mParams[2]; + TreeNode *type_c = p_c.mData.mTreeNode; + Param p_d = mParams[3]; + TreeNode *type_d = p_d.mData.mTreeNode; + + ConditionalTypeNode *tp = (ConditionalTypeNode*)gTreePool.NewTreeNode(sizeof(ConditionalTypeNode)); + new (tp) ConditionalTypeNode(); + tp->SetTypeA(type_a); + tp->SetTypeB(type_b); + tp->SetTypeC(type_c); + tp->SetTypeD(type_d); + + mLastTreeNode = tp; + return mLastTreeNode; +} + + +// It takes one argument, the type param or type arg +TreeNode* ASTBuilder::AddTypeGenerics() { + if (mTrace) + std::cout << "In AddTypeGenerics" << std::endl; if (mParams.size() == 0) return mLastTreeNode; Param p_args = mParams[0]; + if (p_args.mIsEmpty) + return mLastTreeNode; // Some language allows special syntax as type arguments, like <> in Java. // It's just a token. @@ -1787,22 +3760,273 @@ TreeNode* ASTBuilder::AddTypeArgument() { TreeNode *args = p_args.mData.mTreeNode; MASSERT(args); - UserTypeNode *type_node = (UserTypeNode*)mLastTreeNode; - type_node->AddTypeArgs(args); + if (mLastTreeNode->IsTypeAlias()) { + TypeAliasNode *type_alias = (TypeAliasNode*)mLastTreeNode; + UserTypeNode *n = type_alias->GetId(); + n->AddTypeGeneric(args); + } else if (mLastTreeNode->IsUserType()) { + UserTypeNode *type_node = (UserTypeNode*)mLastTreeNode; + type_node->AddTypeGeneric(args); + } else if (mLastTreeNode->IsCall()) { + CallNode *call = (CallNode*)mLastTreeNode; + call->AddTypeArgument(args); + } else if (mLastTreeNode->IsFunction()) { + FunctionNode *func = (FunctionNode*)mLastTreeNode; + func->AddTypeParam(args); + } else if (mLastTreeNode->IsClass()) { + ClassNode *c = (ClassNode*)mLastTreeNode; + c->AddTypeParam(args); + } else if (mLastTreeNode->IsStruct()) { + StructNode *c = (StructNode*)mLastTreeNode; + c->AddTypeParam(args); + } else if (mLastTreeNode->IsLambda()) { + LambdaNode *c = (LambdaNode*)mLastTreeNode; + c->AddTypeParam(args); + } else { + MERROR("Unsupported node in AddTypeGenerics()"); + } + + return mLastTreeNode; +} + +// It takes two arguments to build a union type, child-a and child-b +// A child could be a prim type or user type, or even a union user type. +TreeNode* ASTBuilder::BuildUnionUserType() { + if (mTrace) + std::cout << "In BuildUnionUserType" << std::endl; + + UserTypeNode *user_type = NULL; + + Param p_a = mParams[0]; + MASSERT (p_a.mIsTreeNode); + TreeNode *child_a = p_a.mData.mTreeNode; + + Param p_b = mParams[1]; + MASSERT (p_b.mIsTreeNode); + TreeNode *child_b = p_b.mData.mTreeNode; + + if (child_a->IsUserType()) { + UserTypeNode *ut = (UserTypeNode*)child_a; + // for case like : (a | b)[] | c + // We won't merge c into the array type. + if (ut->GetType() == UT_Union && !ut->GetDims()) { + user_type = ut; + user_type->AddUnionInterType(child_b); + } + } + + if (child_b->IsUserType()) { + UserTypeNode *ut = (UserTypeNode*)child_b; + if (ut->GetType() == UT_Union && !ut->GetDims()) { + // assert, both children cannot be UnionUserType at the same time. + MASSERT(!user_type); + user_type = ut; + user_type->AddUnionInterType(child_a, true); + } + } + + if (!user_type) { + user_type = (UserTypeNode*)gTreePool.NewTreeNode(sizeof(UserTypeNode)); + new (user_type) UserTypeNode(); + user_type->SetType(UT_Union); + user_type->AddUnionInterType(child_a); + user_type->AddUnionInterType(child_b); + } + + mLastTreeNode = user_type; + return mLastTreeNode; +} + +// It takes two arguments to build a intersection type, child-a and child-b +// A child could be a prim type or user type. +TreeNode* ASTBuilder::BuildInterUserType() { + if (mTrace) + std::cout << "In BuildInterUserType" << std::endl; + + UserTypeNode *user_type = NULL; + + Param p_a = mParams[0]; + MASSERT (p_a.mIsTreeNode); + TreeNode *child_a = p_a.mData.mTreeNode; + + Param p_b = mParams[1]; + MASSERT (p_b.mIsTreeNode); + TreeNode *child_b = p_b.mData.mTreeNode; + + if (child_a->IsUserType()) { + UserTypeNode *ut = (UserTypeNode*)child_a; + if (ut->GetType() == UT_Inter) { + user_type = ut; + user_type->AddUnionInterType(child_b); + } + } + + if (child_b->IsUserType()) { + UserTypeNode *ut = (UserTypeNode*)child_b; + if (ut->GetType() == UT_Inter) { + // assert, both children cannot be UnionUserType at the same time. + MASSERT(!user_type); + user_type = ut; + user_type->AddUnionInterType(child_a, true); + } + } + + if (!user_type) { + user_type = (UserTypeNode*)gTreePool.NewTreeNode(sizeof(UserTypeNode)); + new (user_type) UserTypeNode(); + user_type->SetType(UT_Inter); + user_type->AddUnionInterType(child_a); + user_type->AddUnionInterType(child_b); + } + + mLastTreeNode = user_type; + return mLastTreeNode; +} + +// It takes two arguments. The alias name, and they orig type. +TreeNode* ASTBuilder::BuildTypeAlias() { + if (mTrace) + std::cout << "In BuildTypeAlias" << std::endl; + + Param p_name = mParams[0]; + MASSERT (p_name.mIsTreeNode); + TreeNode *name = p_name.mData.mTreeNode; + MASSERT(name->IsIdentifier()); + IdentifierNode *id = (IdentifierNode*)name; + + UserTypeNode *user_type = (UserTypeNode*)gTreePool.NewTreeNode(sizeof(UserTypeNode)); + new (user_type) UserTypeNode(); + user_type->SetId(id); + + Param p_orig = mParams[1]; + MASSERT (p_orig.mIsTreeNode); + TreeNode *orig = p_orig.mData.mTreeNode; + + TypeAliasNode *type_alias = (TypeAliasNode*)gTreePool.NewTreeNode(sizeof(TypeAliasNode)); + new (type_alias) TypeAliasNode(); + type_alias->SetId(user_type); + type_alias->SetAlias(orig); + + mLastTreeNode = type_alias; + return mLastTreeNode; +} + +// It takes at least one argument, the basic type. +TreeNode* ASTBuilder::BuildNeverArrayType() { + if (mTrace) + std::cout << "In BuildNeverArrayType" << std::endl; + + PrimTypeNode *prim_type = gPrimTypePool.FindType(TY_Never); + PrimArrayTypeNode *prim_array_type = (PrimArrayTypeNode*)gTreePool.NewTreeNode(sizeof(PrimArrayTypeNode)); + new (prim_array_type) PrimArrayTypeNode(); + prim_array_type->SetPrim(prim_type); + + DimensionNode *dims = (DimensionNode*)gTreePool.NewTreeNode(sizeof(DimensionNode)); + new (dims) DimensionNode(); + dims->AddDimension(0); + + prim_array_type->SetDims(dims); + mLastTreeNode = prim_array_type; + + return mLastTreeNode; +} + +// It takes at least one argument, the basic type. +// The rest argument represent the dimensions. +// +// [NOTE] For each dimension, we are using a trick. If the size of a dimension is unknown, +// we use the same tree node of 'basic type'. + +TreeNode* ASTBuilder::BuildArrayType() { + if (mTrace) + std::cout << "In BuildArrayType" << std::endl; + + Param p_basic = mParams[0]; + MASSERT (p_basic.mIsTreeNode); + TreeNode *basic = p_basic.mData.mTreeNode; + + UserTypeNode *user_type = NULL; // we return either user_type + PrimTypeNode *prim_type = NULL; // + PrimArrayTypeNode *prim_array_type = NULL; // or prim_array_type + + DimensionNode *dims = NULL; + + // This is a weird behavior in Typescript. A type key word can be identifier also. + // I need check here. + if (basic->IsIdentifier()) { + IdentifierNode *id = (IdentifierNode*)basic; + const char *id_name = id->GetName(); + if (id_name) { + PrimTypeNode *pt = gPrimTypePool.FindType(id_name); + if (pt) + basic = pt; + } + } + + if (basic->IsPrimArrayType()) { + prim_array_type = (PrimArrayTypeNode*)basic; + dims = prim_array_type->GetDims(); + } else if (basic->IsUserType()) { + user_type = (UserTypeNode*)basic; + dims = user_type->GetDims(); + } else if (basic->IsPrimType()) { + prim_type = (PrimTypeNode*)basic; + prim_array_type = (PrimArrayTypeNode*)gTreePool.NewTreeNode(sizeof(PrimArrayTypeNode)); + new (prim_array_type) PrimArrayTypeNode(); + prim_array_type->SetPrim(prim_type); + } else { + user_type = (UserTypeNode*)gTreePool.NewTreeNode(sizeof(UserTypeNode)); + new (user_type) UserTypeNode(); + user_type->SetId(basic); + } + + if (!dims) { + dims = (DimensionNode*)gTreePool.NewTreeNode(sizeof(DimensionNode)); + new (dims) DimensionNode(); + } + + for (unsigned i = 1; i < mParams.size(); i++) { + Param p_dim = mParams[i]; + MASSERT (p_dim.mIsTreeNode); + TreeNode *dim = p_dim.mData.mTreeNode; + // Right now we just add all 0 to dim. + if (dim == basic) + dims->AddDimension(0); + else + dims->AddDimension(0); + } + + if (user_type) { + if (!user_type->GetDims()) + user_type->SetDims(dims); + mLastTreeNode = user_type; + } else { + MASSERT(prim_array_type); + if (!prim_array_type->GetDims()) + prim_array_type->SetDims(dims); + mLastTreeNode = prim_array_type; + } return mLastTreeNode; } //////////////////////////////////////////////////////////////////////////////// -// Lambda Expression +// LambdaNode +// As stated in the ast.h, LambdaNode could be different syntax construct in +// different languages. //////////////////////////////////////////////////////////////////////////////// +// It could take +// 1) One parameter, which is the parameter list. +// 2) two parameters, the parameter list and the body TreeNode* ASTBuilder::BuildLambda() { if (mTrace) std::cout << "In BuildLambda" << std::endl; - Param p_params = mParams[0]; TreeNode *params_node = NULL; + TreeNode *body_node = NULL; + + Param p_params = mParams[0]; if (!p_params.mIsEmpty) { if (!p_params.mIsTreeNode) MERROR("Lambda params is not a tree node."); @@ -1810,26 +4034,294 @@ TreeNode* ASTBuilder::BuildLambda() { params_node = p_params.mData.mTreeNode; } - Param p_body = mParams[1]; - TreeNode *body_node = NULL; - if (!p_body.mIsEmpty) { - if (!p_body.mIsTreeNode) - MERROR("Lambda Body is not a tree node."); - else - body_node = CvtToBlock(p_body.mData.mTreeNode); + if (mParams.size() == 2) { + Param p_body = mParams[1]; + if (!p_body.mIsEmpty) { + if (!p_body.mIsTreeNode) + MERROR("Lambda Body is not a tree node."); + else + body_node = p_body.mData.mTreeNode; + } } - LambdaNode *lambda = (LambdaNode*)mTreePool->NewTreeNode(sizeof(LambdaNode)); + LambdaNode *lambda = (LambdaNode*)gTreePool.NewTreeNode(sizeof(LambdaNode)); new (lambda) LambdaNode(); if (params_node) { - if (params_node->IsIdentifier()) + if (params_node->IsIdentifier()) { lambda->AddParam((IdentifierNode*)params_node); + } else { + AddParams(lambda, params_node); + } } - lambda->SetBody(body_node); + if (body_node) + lambda->SetBody(body_node); mLastTreeNode = lambda; return mLastTreeNode; } + +// It take no arugment. It uses mLastTreeNode which is +// a lambda node. +TreeNode* ASTBuilder::SetJavaLambda() { + MASSERT(mLastTreeNode->IsLambda()); + LambdaNode *node = (LambdaNode*)mLastTreeNode; + node->SetProperty(LP_JavaLambda); + return mLastTreeNode; +} + +// It take no arugment. It uses mLastTreeNode which is +// a lambda node. +TreeNode* ASTBuilder::SetArrowFunction() { + MASSERT(mLastTreeNode->IsLambda()); + LambdaNode *node = (LambdaNode*)mLastTreeNode; + node->SetProperty(LP_JSArrowFunction); + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////// +// InstanceOf Expression +//////////////////////////////////////////////////////////////////////////////// + +TreeNode* ASTBuilder::BuildInstanceOf() { + if (mTrace) + std::cout << "In BuildInstanceOf" << std::endl; + + Param l_param = mParams[0]; + MASSERT(!l_param.mIsEmpty); + MASSERT(l_param.mIsTreeNode); + TreeNode *left = l_param.mData.mTreeNode; + + Param r_param = mParams[1]; + MASSERT(!r_param.mIsEmpty); + MASSERT(r_param.mIsTreeNode); + TreeNode *right = r_param.mData.mTreeNode; + + InstanceOfNode *instanceof = (InstanceOfNode*)gTreePool.NewTreeNode(sizeof(InstanceOfNode)); + new (instanceof) InstanceOfNode(); + + instanceof->SetLeft(left); + instanceof->SetRight(right); + + mLastTreeNode = instanceof; + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////// +// In Expression +//////////////////////////////////////////////////////////////////////////////// + +TreeNode* ASTBuilder::BuildIn() { + if (mTrace) + std::cout << "In BuildIn" << std::endl; + + Param l_param = mParams[0]; + MASSERT(!l_param.mIsEmpty); + MASSERT(l_param.mIsTreeNode); + TreeNode *left = l_param.mData.mTreeNode; + + Param r_param = mParams[1]; + MASSERT(!r_param.mIsEmpty); + MASSERT(r_param.mIsTreeNode); + TreeNode *right = r_param.mData.mTreeNode; + + InNode *innode = (InNode*)gTreePool.NewTreeNode(sizeof(InNode)); + new (innode) InNode(); + + innode->SetLeft(left); + innode->SetRight(right); + + mLastTreeNode = innode; + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////// +// ComputedNameNode Expression +//////////////////////////////////////////////////////////////////////////////// + +TreeNode* ASTBuilder::BuildComputedName() { + if (mTrace) + std::cout << "In BuildComputedName" << std::endl; + + Param l_param = mParams[0]; + MASSERT(!l_param.mIsEmpty); + MASSERT(l_param.mIsTreeNode); + TreeNode *in = l_param.mData.mTreeNode; + + ComputedNameNode *innode = (ComputedNameNode*)gTreePool.NewTreeNode(sizeof(ComputedNameNode)); + new (innode) ComputedNameNode(); + innode->SetExpr(in); + + mLastTreeNode = innode; + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////// +// Is Expression +//////////////////////////////////////////////////////////////////////////////// + +TreeNode* ASTBuilder::BuildIs() { + if (mTrace) + std::cout << "In BuildIs" << std::endl; + + Param l_param = mParams[0]; + MASSERT(!l_param.mIsEmpty); + MASSERT(l_param.mIsTreeNode); + TreeNode *left = l_param.mData.mTreeNode; + + Param r_param = mParams[1]; + MASSERT(!r_param.mIsEmpty); + MASSERT(r_param.mIsTreeNode); + TreeNode *right = r_param.mData.mTreeNode; + + IsNode *isnode = (IsNode*)gTreePool.NewTreeNode(sizeof(IsNode)); + new (isnode) IsNode(); + + isnode->SetLeft(left); + isnode->SetRight(right); + + mLastTreeNode = isnode; + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////// +// TypeOf Expression +//////////////////////////////////////////////////////////////////////////////// + +// It takes (1) one argument +// (2) zero argument. Use mLastTreeNode as the argument. +TreeNode* ASTBuilder::BuildTypeOf() { + if (mTrace) + std::cout << "In BuildTypeOf" << std::endl; + + TreeNode *expr = NULL; + + if (mParams.size() == 0) { + expr = mLastTreeNode; + } else { + Param l_param = mParams[0]; + MASSERT(l_param.mIsTreeNode); + expr = l_param.mData.mTreeNode; + } + + TypeOfNode *typeof = (TypeOfNode*)gTreePool.NewTreeNode(sizeof(TypeOfNode)); + new (typeof) TypeOfNode(); + + typeof->SetExpr(expr); + + mLastTreeNode = typeof; + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////// +// KeyOf Expression +//////////////////////////////////////////////////////////////////////////////// + +TreeNode* ASTBuilder::BuildKeyOf() { + if (mTrace) + std::cout << "In BuildKeyOf" << std::endl; + + Param l_param = mParams[0]; + MASSERT(!l_param.mIsEmpty); + MASSERT(l_param.mIsTreeNode); + TreeNode *expr = l_param.mData.mTreeNode; + + KeyOfNode *keyof = (KeyOfNode*)gTreePool.NewTreeNode(sizeof(KeyOfNode)); + new (keyof) KeyOfNode(); + + keyof->SetExpr(expr); + + mLastTreeNode = keyof; + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////// +// Infer Expression +//////////////////////////////////////////////////////////////////////////////// + +TreeNode* ASTBuilder::BuildInfer() { + if (mTrace) + std::cout << "In BuildInfer" << std::endl; + + Param l_param = mParams[0]; + MASSERT(!l_param.mIsEmpty); + MASSERT(l_param.mIsTreeNode); + TreeNode *expr = l_param.mData.mTreeNode; + + InferNode *infer = (InferNode*)gTreePool.NewTreeNode(sizeof(InferNode)); + new (infer) InferNode(); + + infer->SetExpr(expr); + + mLastTreeNode = infer; + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////// +// Triple Slash Directive of TypeScript +//////////////////////////////////////////////////////////////////////////////// + +TreeNode* ASTBuilder::BuildTripleSlash() { + if (mTrace) + std::cout << "In BuildTripleSlash" << std::endl; + + Param l_param = mParams[0]; + MASSERT(!l_param.mIsEmpty); + MASSERT(l_param.mIsTreeNode); + TreeNode *left = l_param.mData.mTreeNode; + + Param r_param = mParams[1]; + MASSERT(!r_param.mIsEmpty); + MASSERT(r_param.mIsTreeNode); + TreeNode *right = r_param.mData.mTreeNode; + + TripleSlashNode *tsnode = (TripleSlashNode*)gTreePool.NewTreeNode(sizeof(TripleSlashNode)); + new (tsnode) TripleSlashNode(); + + TripleSlashProp prop = TSP_NA; + if (left->IsIdentifier()) { + // no-default-lib + if ((strlen(left->GetName()) == 14) && !strncmp(left->GetName(), "no-default-lib", 14)) + prop = TSP_NoDefaultLib; + // lib + if ((strlen(left->GetName()) == 3) && !strncmp(left->GetName(), "lib", 3)) + prop = TSP_Lib; + // types + if ((strlen(left->GetName()) == 5) && !strncmp(left->GetName(), "types", 5)) + prop = TSP_Types; + // path + if ((strlen(left->GetName()) == 4) && !strncmp(left->GetName(), "path", 4)) + prop = TSP_Path; + } + tsnode->SetProp(prop); + + tsnode->SetValue(right); + + mLastTreeNode = tsnode; + return mLastTreeNode; +} + +//////////////////////////////////////////////////////////////////////////////// +// Await +//////////////////////////////////////////////////////////////////////////////// + +// For first parameter has to be an operator. +TreeNode* ASTBuilder::BuildAwait() { + if (mTrace) + std::cout << "In BuildAwait" << std::endl; + + MASSERT(mParams.size() == 1); + Param p_a = mParams[0]; + MASSERT(!p_a.mIsEmpty && p_a.mIsTreeNode); + TreeNode *expr = p_a.mData.mTreeNode; + + AwaitNode *n = (AwaitNode*)gTreePool.NewTreeNode(sizeof(AwaitNode)); + new (n) AwaitNode(); + n->SetExpr(expr); + + mLastTreeNode = n; + return n; +} + } diff --git a/src/MapleFE/shared/src/ast_fixup.cpp b/src/MapleFE/shared/src/ast_fixup.cpp new file mode 100644 index 0000000000000000000000000000000000000000..30e8a26cc7475faacf088f8897946ef85b0e4c1f --- /dev/null +++ b/src/MapleFE/shared/src/ast_fixup.cpp @@ -0,0 +1,169 @@ +/* +* Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include "ast_fixup.h" +#include "stringpool.h" + +namespace maplefe { + +bool FixUpVisitor::FixUp() { + Visit(mASTModule); + return mUpdated; +} + +// Fix up mOprId of a UnaOperatorNode +UnaOperatorNode *FixUpVisitor::VisitUnaOperatorNode(UnaOperatorNode *node) { + switch(node->GetOprId()) { + case OPR_Add: + node->SetOprId(OPR_Plus); + mUpdated = true; + break; + case OPR_Sub: + node->SetOprId(OPR_Minus); + mUpdated = true; + case OPR_Minus: + if (TreeNode *n = node->GetOpnd(); n && n->IsLiteral()) { + LiteralNode *lit = static_cast(n); + LitData data = lit->GetData(); + switch(data.mType) { + case LT_IntegerLiteral: + data.mData.mInt = -data.mData.mInt; + break; + case LT_FPLiteral: + data.mData.mFloat = -data.mData.mFloat; + break; + case LT_DoubleLiteral: + data.mData.mDouble = -data.mData.mDouble; + break; + default: + goto skip; + } + lit->SetData(data); + lit->SetParent(node->GetParent()); + mUpdated = true; + return (UnaOperatorNode *)lit; + } + skip: + break; + case OPR_Inc: + if(!node->IsPost()) { + node->SetOprId(OPR_PreInc); + mUpdated = true; + } + break; + case OPR_Dec: + if(!node->IsPost()) { + node->SetOprId(OPR_PreDec); + mUpdated = true; + } + } + return AstVisitor::VisitUnaOperatorNode(node); +} + +// Fix up the name string of a UserTypeNode +// Fix up literal boolean 'true' or 'false' as a type +UserTypeNode *FixUpVisitor::VisitUserTypeNode(UserTypeNode *node) { + auto id = node->GetId(); + + // Java FE 'java2mpl' needs this + if(id) + if(auto n = id->GetStrIdx()) + if(node->GetStrIdx() != n) { + node->SetStrIdx(n); + mUpdated = true; + } + + if(id && id->IsIdentifier()) { + auto n = id->GetStrIdx(); + auto true_id = gStringPool.GetStrIdx("true"); + if(n == true_id || n == gStringPool.GetStrIdx("false")) { + if(node->GetType() == UT_Regular + && node->GetDims() == nullptr + && node->GetUnionInterTypesNum() == 0 + && node->GetTypeGenericsNum() == 0 + && node->GetAttrsNum() == 0 + && node->GetAsTypesNum() == 0) { + mUpdated = true; + LitData data; + data.mType = LT_BooleanLiteral; + data.mData.mBool = n == true_id; + LiteralNode *lit = new (gTreePool.NewTreeNode(sizeof(LiteralNode))) LiteralNode(data); + return (UserTypeNode*)lit; + } + } + } + return AstVisitor::VisitUserTypeNode(node); +} + +// Fix up literal 'true' or 'false' +IdentifierNode *FixUpVisitor::VisitIdentifierNode(IdentifierNode *node) { + auto p = node->GetParent(); + if(p && node->GetInit() == nullptr && + (p->IsFieldLiteral() || p->IsTerOperator() || p->IsIdentifier() || p->IsBinOperator())) { + if(auto n = node->GetStrIdx()) { + auto true_id = gStringPool.GetStrIdx("true"); + if(n == true_id || n == gStringPool.GetStrIdx("false")) { + mUpdated = true; + LitData data; + data.mType = LT_BooleanLiteral; + data.mData.mBool = n == true_id; + LiteralNode *lit = new (gTreePool.NewTreeNode(sizeof(LiteralNode))) LiteralNode(data); + return (IdentifierNode*)lit; + } + } + } + return AstVisitor::VisitIdentifierNode(node); +} + +// Fix up the filename of a ModuleNode +ModuleNode *FixUpVisitor::VisitModuleNode(ModuleNode *node) { + const char* filename = node->GetFilename(); + std::filesystem::path orig = filename; + std::filesystem::path uniq; + try { + uniq = std::filesystem::canonical(orig); + } + catch(std::filesystem::filesystem_error const& ex) { + // Use orig if std::filesystem::filesystem_error is thrown + uniq = orig; + } + std::string p = uniq.string(); + if(p != filename) { + const char *res = gStringPool.FindString(p.c_str()); + node->SetFilename(res); + mUpdated = true; + } + return AstVisitor::VisitModuleNode(node);; +} + +// Fux up a PassNode for tagged template literal +PassNode *FixUpVisitor::VisitPassNode(PassNode *node) { + AstVisitor::VisitPassNode(node); + unsigned num = node->GetChildrenNum(); + if (num == 2) { + TreeNode *child0 = node->GetChild(0); + TreeNode *child1 = node->GetChild(1); + if (child0 && child1 && child1->IsTemplateLiteral()) { + CallNode *call = new (gTreePool.NewTreeNode(sizeof(CallNode))) CallNode(); + call->SetMethod(child0); + call->SetTaggedTemplate(static_cast(child1)); + return (PassNode*)call; + } + } + return node; +} + +} diff --git a/src/MapleFE/shared/src/ast_mempool.cpp b/src/MapleFE/shared/src/ast_mempool.cpp index 1c9cf527a47f364bdc67cbb5abc5e0ff9ea02c05..6c52307703272dd690bcb109e88c9d62843fea55 100644 --- a/src/MapleFE/shared/src/ast_mempool.cpp +++ b/src/MapleFE/shared/src/ast_mempool.cpp @@ -17,16 +17,21 @@ namespace maplefe { +TreePool gTreePool; + TreePool::~TreePool() { Release(); } char* TreePool::NewTreeNode(unsigned size) { char *addr = mMP.Alloc(size); - mTreeNodes.push_back((TreeNode*)addr); + TreeNode *tree = (TreeNode*)addr; + mTreeNodes.push_back(tree); + unsigned id = mTreeNodes.size(); + tree->SetNodeId(id); return addr; } - + void TreePool::Release() { // step 1. Release the containers in each tree node. std::vector::iterator it = mTreeNodes.begin(); diff --git a/src/MapleFE/shared/src/ast_module.cpp b/src/MapleFE/shared/src/ast_module.cpp index 02e80f41f33b89072c5b716b6007f194af1da0a9..9cab6e95aaf3856b52ef1283fdef66b794d7a798 100644 --- a/src/MapleFE/shared/src/ast_module.cpp +++ b/src/MapleFE/shared/src/ast_module.cpp @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -18,45 +18,98 @@ namespace maplefe { -ASTModule gModule; - -ASTModule::ASTModule() { +ModuleNode::ModuleNode() : TreeNode(NK_Module), mPackage(NULL), mSrcLang(SrcLangUnknown), + mIsAmbient(false) { mRootScope = mScopePool.NewScope(NULL); - mPackage = NULL; + mRootScope->SetTree(this); + this->SetScope(mRootScope); } -ASTModule::~ASTModule() { - // free trees - std::vector::iterator it = mTrees.begin(); - for (; it != mTrees.end(); it++) { - ASTTree *tree = *it; - if (tree) - delete tree; - } - mTrees.clear(); - - mImports.Release(); +ModuleNode::~ModuleNode() { + mTrees.Release(); } // AFAIK, all languages allow only one package name if it allows. -void ASTModule::SetPackage(PackageNode *p) { +void ModuleNode::SetPackage(PackageNode *p) { MASSERT(!mPackage); mPackage = p; } +void ModuleNode::SetSrcLang(SrcLang l) { + mSrcLang = l; +} + +SrcLang ModuleNode::GetSrcLang() { + return mSrcLang; +} + +std::string ModuleNode::GetSrcLangString() { + switch (mSrcLang) { + case SrcLangJava: return "Java"; + case SrcLangTypeScript: return "TypeScript"; + case SrcLangJavaScript: return "JavaScript"; + case SrcLangC: return "C"; + default: break; + } + return "Unknown"; +} + +void ModuleNode::AddTreeFront(TreeNode *tree) { + mTrees.PushFront(tree); + tree->SetParent(this); +} + +// The tree could be PassNode +void ModuleNode::AddTree(TreeNode *tree) { + if (tree->IsDecl()) { + DeclNode *decl = (DeclNode*)tree; + TreeNode *var = decl->GetVar(); + if (var && var->IsPass()) { + PassNode *pass = (PassNode*)var; + for (unsigned i = 0; i < pass->GetChildrenNum(); i++) { + DeclNode *n = (DeclNode*)gTreePool.NewTreeNode(sizeof(DeclNode)); + new (n) DeclNode(); + n->SetVar(pass->GetChild(i)); + n->SetProp(decl->GetProp()); + AddTree(n); + } + } else { + mTrees.PushBack(tree); + tree->SetParent(this); + } + } else if (tree->IsPass()) { + PassNode *pass_node = (PassNode*)tree; + for (unsigned i = 0; i < pass_node->GetChildrenNum(); i++) { + TreeNode *child = pass_node->GetChild(i); + AddTree(child); + } + } else { + mTrees.PushBack(tree); + tree->SetParent(this); + } +} + // Return a new scope newly created. // Set the parent<->child relation between it and p. -ASTScope* ASTModule::NewScope(ASTScope *p) { +ASTScope* ModuleNode::NewScope(ASTScope *p) { + ASTScope *newscope = mScopePool.NewScope(p); + return newscope; +} + +ASTScope* ModuleNode::NewScope(ASTScope *p, TreeNode *t) { ASTScope *newscope = mScopePool.NewScope(p); + newscope->SetTree(t); + t->SetScope(newscope); return newscope; } -void ASTModule::Dump() { +void ModuleNode::Dump(unsigned indent) { std::cout << "============= Module ===========" << std::endl; - std::vector::iterator tree_it = mTrees.begin(); - for (; tree_it != mTrees.end(); tree_it++) { - ASTTree *tree = *tree_it; + for (unsigned i = 0; i < mTrees.GetNum(); i++) { + TreeNode *tree = GetTree(i); + DUMP0("== Sub Tree =="); tree->Dump(0); + DUMP_RETURN(); } } } diff --git a/src/MapleFE/shared/src/ast_scope.cpp b/src/MapleFE/shared/src/ast_scope.cpp index 8f416244c0097928eb237ee76796721ccfe288fd..f1e5505be67a6b5e36140edc8200f9c4c4f62fd3 100644 --- a/src/MapleFE/shared/src/ast_scope.cpp +++ b/src/MapleFE/shared/src/ast_scope.cpp @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -14,51 +14,69 @@ */ #include "ast_scope.h" +#include "gen_astdump.h" namespace maplefe { -ASTScope::ASTScope(ASTScope *parent) { - mParent = NULL; - mTree = NULL; - if (parent) - SetParent(parent); -} - void ASTScope::AddChild(ASTScope *s) { for (unsigned i = 0; i < mChildren.GetNum(); i++) { - ASTScope *scope = mChildren.ValueAtIndex(i); - if (s == scope) + if (s == GetChild(i)) { return; + } } mChildren.PushBack(s); s->SetParent(this); } -// We are using name address to decide if two names are equal, since we have a -// string pool with any two equal strings will be at the same address. -TreeNode* ASTScope::FindDeclOf(IdentifierNode *inode) { - for (unsigned i = 0; i < GetDeclNum(); i++) { - TreeNode *tree = GetDecl(i); - if (tree->IsIdentifier()) { - IdentifierNode *id = (IdentifierNode*)tree; - MASSERT(id->GetType() && "Identifier has no type?"); +// This is to find the decl having the name as stridx +// starting from local scope +TreeNode* ASTScope::FindDeclOf(unsigned stridx, bool deep) { + ASTScope *scope = this; + while (scope) { + for (unsigned i = 0; i < scope->GetDeclNum(); i++) { + TreeNode *tree = scope->GetDecl(i); + if (tree->GetStrIdx() == stridx) { + return tree; + } } - if (tree->GetName() == inode->GetName()) + for (unsigned i = 0; i < scope->GetImportedDeclNum(); i++) { + TreeNode *tree = scope->GetImportedDecl(i); + if (tree->GetStrIdx() == stridx) { + return tree; + } + } + // search parent scope if deep is set + scope = deep ? scope->mParent : NULL; + } + return NULL; +} + +// This is to find the exported decl having the name as stridx +TreeNode* ASTScope::FindExportedDeclOf(unsigned stridx) { + for (unsigned i = 0; i < GetExportedDeclNum(); i++) { + TreeNode *tree = GetExportedDecl(i); + if (tree->GetStrIdx() == stridx) { return tree; + } } return NULL; } -// This is to find the type having the same name as 'inode'. +// This is to find the type having the name as stridx. // -// We are using name address to decide if two names are equal, since we have a -// string pool with any two equal strings will be at the same address. -TreeNode* ASTScope::FindTypeOf(IdentifierNode *inode) { - for (unsigned i = 0; i < GetTypeNum(); i++) { - TreeNode *tree = GetType(i); - if (tree->GetName() == inode->GetName()) - return tree; +// starting from local scope +TreeNode* ASTScope::FindTypeOf(unsigned stridx) { + ASTScope *scope = this; + while (scope) { + for (unsigned i = 0; i < scope->GetTypeNum(); i++) { + TreeNode *tree = scope->GetType(i); + if (tree->GetStrIdx() == stridx) { + return tree; + } + } + // search parent scope + scope = scope->mParent; } return NULL; } @@ -78,8 +96,8 @@ void ASTScope::TryAddDecl(TreeNode *tree) { mDecls.PushBack(inode); } else if (tree->IsVarList()) { VarListNode *vl = (VarListNode*)tree; - for (unsigned i = 0; i < vl->GetNum(); i++) { - IdentifierNode *inode = vl->VarAtIndex(i); + for (unsigned i = 0; i < vl->GetVarsNum(); i++) { + IdentifierNode *inode = vl->GetVarAtIndex(i); if (inode->GetType()) mDecls.PushBack(inode); } @@ -117,4 +135,86 @@ ASTScope* ASTScopePool::NewScope(ASTScope *parent) { mScopes.push_back(s); return s; } + +bool ASTScope::IsAncestor(ASTScope *ancestor) { + ASTScope *p = this; + while (p) { + if (p == ancestor) { + return true; + } + p = p->GetParent(); + } + return false; +} + +void ASTScope::Dump(unsigned indent) { + mTree->DumpIndentation(indent); + std::cout << "scope: " << AstDump::GetEnumNodeKind(mTree->GetKind()) << " " << mTree->GetName() << " " << mTree->GetNodeId() << std::endl; + for (unsigned i = 0; i < GetDeclNum(); i++) { + TreeNode *node = GetDecl(i); + std::string str = ""; + switch (node->GetKind()) { + case NK_Identifier: str = " var: "; break; + case NK_Decl: str = " decl: "; break; + case NK_Function: str = " func: "; break; + case NK_Struct: str = " struct: "; break; + case NK_Class: str = " class: "; break; + case NK_Namespace: str = " namespace: "; break; + } + if (str.length()) { + node->DumpIndentation(indent); + std::string name = node->GetStrIdx() ? node->GetName() : "-"; + std::cout << str << name << " " << node->GetNodeId() << std::endl; + } + } + + for (unsigned i = 0; i < GetImportedDeclNum(); i++) { + TreeNode *node = GetImportedDecl(i); + std::string str = ""; + switch (node->GetKind()) { + case NK_Identifier: str = " var: - Imported "; break; + case NK_Decl: str = " decl: - Imported "; break; + case NK_Function: str = " func: - Imported "; break; + case NK_Struct: str = " struct: - Imported "; break; + case NK_Class: str = " class: - Imported "; break; + case NK_Namespace: str = " namespace: - Imported "; break; + } + if (str.length()) { + node->DumpIndentation(indent); + std::string name = node->GetStrIdx() ? node->GetName() : "-"; + std::cout << str << name << " " << node->GetNodeId() << std::endl; + } + } + + for (unsigned i = 0; i < GetExportedDeclNum(); i++) { + TreeNode *node = GetExportedDecl(i); + std::string str = ""; + switch (node->GetKind()) { + case NK_Identifier: str = " var: - Exported "; break; + case NK_Decl: str = " decl: - Exported "; break; + case NK_Function: str = " func: - Exported "; break; + case NK_Struct: str = " struct: - Exported "; break; + case NK_Class: str = " class: - Exported "; break; + case NK_Namespace: str = " namespace: - Exported "; break; + } + if (str.length()) { + node->DumpIndentation(indent); + std::string name = node->GetStrIdx() ? node->GetName() : "-"; + std::cout << str << name << " " << node->GetNodeId() << std::endl; + } + } + + for (unsigned i = 0; i < GetTypeNum(); i++) { + TreeNode *node = GetType(i); + node->DumpIndentation(indent); + std::string name = node->GetStrIdx() ? node->GetName() : "-"; + std::cout << " type: " << name << " " << node->GetTypeIdx() << std::endl; + } + + for (unsigned i = 0; i < GetChildrenNum(); i++) { + ASTScope *scope = GetChild(i); + scope->Dump(indent + 2); + } +} + } diff --git a/src/MapleFE/shared/src/ast_type.cpp b/src/MapleFE/shared/src/ast_type.cpp index ac170c9028beaef569783c841bc1a9201326eb05..74ab86984068083463faea968862b181c685d6b0 100644 --- a/src/MapleFE/shared/src/ast_type.cpp +++ b/src/MapleFE/shared/src/ast_type.cpp @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -16,8 +16,8 @@ #include #include "ast_type.h" -#include "gen_type.h" // for language specific type keyword #include "ruletable.h" +#include "rule_summary.h" #include "ast.h" #include "massert.h" @@ -27,15 +27,65 @@ namespace maplefe { // UserTypeNode // ////////////////////////////////////////////////////////////////////////// -void UserTypeNode::AddTypeArgs(TreeNode *args) { - if (args->IsIdentifier()) { - IdentifierNode *inode = (IdentifierNode*)args; - AddTypeArg(inode); +void UserTypeNode::AddUnionInterType(TreeNode *args, bool front) { + if (args->IsIdentifier() || + args->IsPrimType() || + args->IsPrimArrayType() || + args->IsUserType() || + args->IsLiteral() || + args->IsLambda() || + args->IsTypeOf() || + args->IsTupleType() || + args->IsArrayElement() || + args->IsConditionalType() || + args->IsNew() || + args->IsKeyOf() || + args->IsImport() || + args->IsField() || + args->IsTemplateLiteral() || + args->IsStruct()) { + mUnionInterTypes.PushBack(args); + if (front) { + for(unsigned i = mUnionInterTypes.GetNum() - 1; i > 0; --i) + mUnionInterTypes.SetElem(i, mUnionInterTypes.ValueAtIndex(i-1)); + mUnionInterTypes.SetElem(0, args); + } + SETPARENT(args); + } else if (args->IsPass()) { + PassNode *p = (PassNode*)args; + for (unsigned i = 0; i < p->GetChildrenNum(); i++) { + TreeNode *a = p->GetChild(i); + AddTypeGeneric(a); + } + } else { + MASSERT(0 && "Unsupported tree node in UserTypeNode::AddUnionInterType()"); + } +} + +void UserTypeNode::AddTypeGeneric(TreeNode *args) { + if (args->IsIdentifier() || + args->IsPrimType() || + args->IsPrimArrayType() || + args->IsUserType() || + args->IsTypeParameter() || + args->IsLiteral() || + args->IsTypeOf() || + args->IsArrayElement() || + args->IsStruct() || + args->IsTupleType() || + args->IsLambda() || + args->IsKeyOf() || + args->IsField() || + args->IsConditionalType() || + args->IsTemplateLiteral() || + args->IsInfer()) { + mTypeGenerics.PushBack(args); + SETPARENT(args); } else if (args->IsPass()) { PassNode *p = (PassNode*)args; for (unsigned i = 0; i < p->GetChildrenNum(); i++) { TreeNode *a = p->GetChild(i); - AddTypeArgs(a); + AddTypeGeneric(a); } } else { MASSERT(0 && "Unsupported tree node in UserTypeNode::AddTypeArgs()"); @@ -46,25 +96,60 @@ void UserTypeNode::AddTypeArgs(TreeNode *args) { bool UserTypeNode::TypeEquivalent(UserTypeNode *type) { // For now, I just check the name. The name is in the global string pool, // so two same names should be in the same address. - if (GetName() == type->GetName()) + if (GetStrIdx() == type->GetStrIdx()) return true; else return false; } void UserTypeNode::Dump(unsigned ind) { - mId->Dump(0); - unsigned size = mTypeArguments.GetNum(); + if (mType == UT_Union) + DUMP0_NORETURN("union "); + else if (mType == UT_Inter) + DUMP0_NORETURN("intersect "); + + if (mId) + mId->Dump(0); + + unsigned size = mTypeGenerics.GetNum(); if (size > 0) { DUMP0_NORETURN('<'); for (unsigned i = 0; i < size; i++) { - IdentifierNode *inode = mTypeArguments.ValueAtIndex(i); + TreeNode *inode = mTypeGenerics.ValueAtIndex(i); inode->Dump(0); if (i < size - 1) DUMP0_NORETURN(','); } DUMP0_NORETURN('>'); } + + size = mUnionInterTypes.GetNum(); + if (size > 0) { + DUMP0_NORETURN(" = "); + for (unsigned i = 0; i < size; i++) { + TreeNode *inode = mUnionInterTypes.ValueAtIndex(i); + inode->Dump(0); + if (i < size - 1) { + if (mType == UT_Union) + DUMP0_NORETURN(" | "); + else if (mType == UT_Inter) + DUMP0_NORETURN(" & "); + } + } + } + + if (mDims) { + for (unsigned i = 0; i < GetDimsNum(); i++) + DUMP0_NORETURN("[]"); + } +} + +////////////////////////////////////////////////////////////////////////// +// ArrayTypeNode // +////////////////////////////////////////////////////////////////////////// + +void ArrayTypeNode::Dump(unsigned ind) { + DUMP0_NORETURN("array-TBD"); } ////////////////////////////////////////////////////////////////////////// @@ -80,7 +165,7 @@ void PrimArrayTypeNode::Dump(unsigned ind) { ////////////////////////////////////////////////////////////////////////// static const char* FindPrimTypeName(TypeId id) { - for (unsigned i = 0; i < TY_NA; i++) { + for (unsigned i = 0; i < TypeKeywordTableSize; i++) { if (TypeKeywordTable[i].mId == id) return TypeKeywordTable[i].mText; } @@ -88,7 +173,7 @@ static const char* FindPrimTypeName(TypeId id) { } static TypeId FindPrimTypeId(const char *keyword) { - for (unsigned i = 0; i < TY_NA; i++) { + for (unsigned i = 0; i < TypeKeywordTableSize; i++) { if (strncmp(TypeKeywordTable[i].mText, keyword, strlen(keyword)) == 0 && strlen(keyword) == strlen(TypeKeywordTable[i].mText)) return TypeKeywordTable[i].mId; @@ -96,42 +181,64 @@ static TypeId FindPrimTypeId(const char *keyword) { return TY_NA; } +////////////////////////////////////////////////////////////////////////// +// FunctionTypeNode // +////////////////////////////////////////////////////////////////////////// + +bool FunctionTypeNode::IsEqual(FunctionTypeNode *node) { + bool result = true; + if (node->GetParamsNum() != GetParamsNum()) { + result = false; + } else { + for (unsigned i = 0; i < GetParamsNum(); i++) { + if (node->GetParam(i) != GetParam(i)) { + result = false; + break; + } + } + } + return result; +} + +void FunctionTypeNode::Dump(unsigned ind) { + DUMP0_NORETURN("functiontype-TBD"); +} + ////////////////////////////////////////////////////////////////////////// // PrimTypeNode // ////////////////////////////////////////////////////////////////////////// -const char* PrimTypeNode::GetName() { +const char* PrimTypeNode::GetTypeName() { const char *name = FindPrimTypeName(GetPrimType()); return name; } void PrimTypeNode::Dump(unsigned indent) { + if (mIsUnique) + DUMP0_NORETURN("unique "); + DumpIndentation(indent); - DUMP0_NORETURN(GetName()); + DUMP0_NORETURN(GetTypeName()); } ////////////////////////////////////////////////////////////////////////// // PrimTypePool // ////////////////////////////////////////////////////////////////////////// -// The global Pool for +// The global Pool for PrimTypePool gPrimTypePool; -PrimTypePool::PrimTypePool() { - // 1024 per block could be better. - mTreePool.SetBlockSize(1024); - Init(); -} +PrimTypePool::PrimTypePool() {} PrimTypePool::~PrimTypePool() { mTypes.Release(); } void PrimTypePool::Init() { - for (unsigned i = 0; i < TY_NA; i++) { - PrimTypeNode *n = (PrimTypeNode*)mTreePool.NewTreeNode(sizeof(PrimTypeNode)); + for (unsigned i = 0; i < TypeKeywordTableSize; i++) { + PrimTypeNode *n = (PrimTypeNode*)gTreePool.NewTreeNode(sizeof(PrimTypeNode)); new (n) PrimTypeNode(); - n->SetPrimType((TypeId)i); + n->SetPrimType((TypeId)TypeKeywordTable[i].mId); mTypes.PushBack(n); } } @@ -145,7 +252,7 @@ PrimTypeNode* PrimTypePool::FindType(const char *keyword) { } PrimTypeNode* PrimTypePool::FindType(TypeId id) { - for (unsigned i = 0; i < TY_NA; i++) { + for (unsigned i = 0; i < TypeKeywordTableSize; i++) { PrimTypeNode *type_float = mTypes.ValueAtIndex(6); PrimTypeNode *type = mTypes.ValueAtIndex(i); if (type->GetPrimType() == id) diff --git a/src/MapleFE/shared/src/container.cpp b/src/MapleFE/shared/src/container.cpp index ac5de846898990cc2d4ba5e58dc6485ef9a7e757..923c67715a734ca319d914a062f616c8abffbea2 100644 --- a/src/MapleFE/shared/src/container.cpp +++ b/src/MapleFE/shared/src/container.cpp @@ -18,6 +18,7 @@ ////////////////////////////////////////////////////////////////////////////// #include +#include #include "container.h" #include "massert.h" @@ -25,9 +26,8 @@ namespace maplefe { char* ContainerMemPool::AddrOfIndex(unsigned index) { - unsigned num_in_blk = mBlockSize / mElemSize; - unsigned blk = index / num_in_blk; - unsigned index_in_blk = index % num_in_blk; + unsigned blk = index / mElemNumPerBlock; + unsigned index_in_blk = index % mElemNumPerBlock; Block *block = mBlocks; for (unsigned i = 0; i < blk; i++) { @@ -37,4 +37,140 @@ char* ContainerMemPool::AddrOfIndex(unsigned index) { char *addr = block->addr + index_in_blk * mElemSize; return addr; } + +//////////////////////////////////////////////////////////////////////////////// +// Bit Vector +//////////////////////////////////////////////////////////////////////////////// + +BitVector::BitVector() : mBVSize(0) { + SetBlockSize(1024); +} + +BitVector::BitVector(unsigned n) : mBVSize(n) { + SetBlockSize(1024); + Alloc(n); +} + +void BitVector::ClearBit(unsigned idx) { + unsigned byte_idx = idx / 8; + unsigned blk_idx = byte_idx / mBlockSize; + Block *block = mBlocks; + for (unsigned i = 0; i < blk_idx; i++) { + block = block->next; + if (!block) + MERROR("ClearBit at unknown location."); + } + + unsigned bit_idx = idx % 8; + char mask = ~(1 << bit_idx); + + char *addr = block->addr + byte_idx % mBlockSize; + *addr = (*addr) & mask; +} + +void BitVector::SetBit(unsigned idx) { + unsigned byte_idx = idx / 8; + unsigned blk_idx = byte_idx / mBlockSize; + Block *block = mBlocks; + unsigned block_num = 0; + for (; block && (block_num < blk_idx); block_num++) { + block = block->next; + } + + // Out of memory. Need to allocate. + // For each block allocated, the random data need be wiped off. + if (!block) { + unsigned blocks_to_alloc = blk_idx + 1 - block_num; + for (unsigned i = 0; i < blocks_to_alloc; i++) { + char *addr = AllocBlock(); + memset((void*)addr, 0, mBlockSize); + } + + // get the block again + block = mBlocks; + for (unsigned i = 0; i < blk_idx; i++) + block = block->next; + } + + unsigned bit_idx = idx % 8; + char *addr = block->addr + byte_idx % mBlockSize; + *addr = (*addr) | (1 << bit_idx); +} + +// return true if the bit is set, or else false. +bool BitVector::GetBit(unsigned idx) { + unsigned byte_idx = idx / 8; + unsigned blk_idx = byte_idx / mBlockSize; + Block *block = mBlocks; + unsigned block_num = 0; + for (; block && (block_num < blk_idx); block_num++) { + block = block->next; + } + + // Out of memory. Need to allocate. + // For each block allocated, the random data need be wiped off. + if (!block) { + unsigned blocks_to_alloc = blk_idx + 1 - block_num; + for (unsigned i = 0; i < blocks_to_alloc; i++) { + char *addr = AllocBlock(); + memset((void*)addr, 0, mBlockSize); + } + + // get the block again + block = mBlocks; + for (unsigned i = 0; i < blk_idx; i++) + block = block->next; + } + + unsigned bit_idx = idx % 8; + char *addr = block->addr + byte_idx % mBlockSize; + unsigned data = (*addr) & (1 << bit_idx); + if (data != 0) + return true; + else + return false; +} + +// bit wise EQUAL +bool BitVector::Equal(BitVector *bv) { + char *addr = mBlocks->addr; + char *bvaddr = bv->mBlocks->addr; + if (mBVSize != bv->mBVSize) { + return false; + } + + MASSERT(mBVSize < 1024 && "NYI: BitVector length > 1024"); + + for (int i = 0; i < (mBVSize + 3)/4; i++) { + if (*(unsigned *)(addr + i*4) != *(unsigned*)(bvaddr + i*4)) { + return false; + } + } + + return true; +} + +// bit wise AND +void BitVector::And(BitVector *bv) { + char *addr = mBlocks->addr; + char *bvaddr = bv->mBlocks->addr; + MASSERT(mBVSize == bv->mBVSize && "BitVector length not equal"); + MASSERT(mBVSize < 1024 && "NYI: BitVector length > 1024"); + + for (int i = 0; i < (mBVSize + 3)/4; i++) { + *(unsigned *)(addr + i*4) &= *(unsigned*)(bvaddr + i*4); + } +} + +// bit wise OR +void BitVector::Or(BitVector *bv) { + char *addr = mBlocks->addr; + char *bvaddr = bv->mBlocks->addr; + MASSERT(mBVSize == bv->mBVSize && "BitVector length not equal"); + MASSERT(mBVSize < 1024 && "NYI: BitVector length > 1024"); + + for (int i = 0; i < (mBVSize + 3)/4; i++) { + *(unsigned *)(addr + i*4) |= *(unsigned*)(bvaddr + i*4); + } +} } diff --git a/src/MapleFE/shared/src/fileread.cpp b/src/MapleFE/shared/src/fileread.cpp index bb038660434317ec91a529b66ed83924dccf15ea..491d6684b7df5a80b029332bf9af64d595659e9b 100644 --- a/src/MapleFE/shared/src/fileread.cpp +++ b/src/MapleFE/shared/src/fileread.cpp @@ -131,7 +131,7 @@ bool FileReader::SkipTRAComment(){ Assert(0, "No ending */ of traditional comment)"); } } - return false; + return false; } // Skip the next separator designated by 'c'. @@ -173,7 +173,7 @@ bool FileReader::ReadLineNonEmpty(){ if (mCurLine.size() > 0) return true; } - return false; + return false; } // Read line from the file, and return the number of read chars. diff --git a/src/MapleFE/shared/src/lexer.cpp b/src/MapleFE/shared/src/lexer.cpp index 87f83e3689d3e39c78954aef24ade144ddad2437..694f86a8edafedea9ff0584a140a9b0e25777951 100644 --- a/src/MapleFE/shared/src/lexer.cpp +++ b/src/MapleFE/shared/src/lexer.cpp @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -17,16 +17,12 @@ #include "massert.h" #include "lexer.h" #include "token.h" -#include "common_header_autogen.h" #include "ruletable_util.h" -#include "gen_summary.h" -#include "gen_token.h" +#include "rule_summary.h" #include "massert.h" #include #include -#include "ruletable_util.h" - namespace maplefe { #define MAX_LINE_SIZE 4096 @@ -44,18 +40,38 @@ int Lexer::ReadALine() { } current_line_size = getline(&line, &linebuf_size, srcfile); + _linenum++; + if (current_line_size <= 0) { // EOF fclose(srcfile); line[0] = '\0'; endoffile = true; } else { - if (line[current_line_size - 1] == '\n') { + // There could be \n\r or \r\n + // Handle the last escape + if ((line[current_line_size - 1] == '\n') || + (line[current_line_size - 1] == '\r')) { + line[current_line_size - 1] = '\0'; + current_line_size--; + } + // Handle the second last escape + if ((line[current_line_size - 1] == '\n') || + (line[current_line_size - 1] == '\r')) { line[current_line_size - 1] = '\0'; current_line_size--; } } curidx = 0; + + // There are some special UTF-8 encoding in the beginning of some file format, like BOM + // with \357\273\277. We skip this mark. + if ( *(line+curidx) == -17 && + *(line+curidx+1) == -69 && + *(line+curidx+2) == -65) { + curidx += 3; + } + return current_line_size; } @@ -73,6 +89,8 @@ Lexer::Lexer() endoffile(false), mPredefinedTokenNum(0), mTrace(false), + mLineMode(false), + _total_linenum(0), _linenum(0) { seencomments.clear(); mCheckSeparator = true; @@ -81,12 +99,21 @@ Lexer::Lexer() } void Lexer::PrepareForFile(const std::string filename) { - // open file + // Find the total line number in the file srcfile = fopen(filename.c_str(), "r"); if (!srcfile) { std::cerr << "cannot open file " << filename << std::endl; exit(1); } + while (getline(&line, &linebuf_size, srcfile) > 0) { + _total_linenum++; + } + + fclose(srcfile); + line[0] = '\0'; + + // open file + srcfile = fopen(filename.c_str(), "r"); // allocate line buffer. linebuf_size = (size_t)MAX_LINE_SIZE; @@ -96,114 +123,202 @@ void Lexer::PrepareForFile(const std::string filename) { } // try to read the first line - if (ReadALine() < 0) { - _linenum = 0; - } else { - _linenum = 1; - } + ReadALine(); } -/////////////////////////////////////////////////////////////////////////// -// Utilities for finding system tokens -// Remember the order of tokens are operators, separators, and keywords. -/////////////////////////////////////////////////////////////////////////// +void Lexer::PrepareForString(const char *str) { + current_line_size = strlen(str); + strncpy(line, str, current_line_size); + line[current_line_size] = '\0'; + curidx = 0; + _linenum = 1; + endoffile = false; +} -Token* Lexer::FindOperatorToken(OprId id) { - Token *token = NULL; - bool found = false; - for (unsigned i = 0; i < gOperatorTokensNum; i++) { - token = &gSystemTokens[i]; - MASSERT(token->mTkType == TT_OP); - if (token->GetOprId() == id) { - found = true; - break; +///////////////////////////////////////////////////////////////////////////// +// Both ClearLeadingNewLine() and AddEndingNewLine() will later be implemented +// as language specific, and they will be overriding functions. +///////////////////////////////////////////////////////////////////////////// + +//1. mLexer could cross the line if it's a template literal in Javascript. +//2. During some language lexing, like Typescript template literal, we +// may add \n in a place holder (the line to be lexed). This \n should +// be removed when lexing the expressions in place holder. +void Lexer::ClearLeadingNewLine() { + while (line[curidx] == '\n') { + curidx ++; + if (curidx == current_line_size) { + ReadALine(); + if (EndOfFile()) + return; } } - MASSERT(found && token); - return token; } -Token* Lexer::FindSeparatorToken(SepId id) { - Token *token = NULL; - bool found = false; - for (unsigned i = gOperatorTokensNum; i < gOperatorTokensNum + gSeparatorTokensNum; i++) { - token = &gSystemTokens[i]; - MASSERT(token->mTkType == TT_SP); - if (token->GetSepId() == id) { - found = true; - break; +// We are starting a new token, if current char is ' or ", +// it's the beginning of a string literal. We traverse until the end of the +// current line, if there is no ending ' or ", it means the string goes to next +// line and we add an ending \n, and concatenate the next line. +void Lexer::AddEndingNewLine() { + bool single_quote = false; + bool double_quote = false; + + if (line[curidx] == '\'') + single_quote = true; + if (line[curidx] == '\"') + double_quote = true; + + if (!single_quote && !double_quote) + return; + + unsigned working_idx = curidx + 1; + + // If we are in escape + bool in_escape = false; + + // Reading a raw data, meaning we get the data through getline() directly. + // If we do ReadALine(), it's not a raw data because the ending \n or \r are removed. + bool raw_data = false; + + while(1) { + + // We reach the end of the line, and not done yet. + // So read in a new line and add \n to the end if it's Not raw data. + if (working_idx == current_line_size) { + // Add ending NewLine + if (!raw_data) { + line[working_idx] = '\n'; + current_line_size++; + working_idx++; + } + + // Read new line. + char *new_buf = NULL; + size_t new_buf_size = 0; + ssize_t new_line_size = getline(&new_buf, &new_buf_size, srcfile); + if (new_line_size <= 0) { // EOF + fclose(srcfile); + MERROR("EOF Error reading multi-line string literal."); + } else { + // add new_buf to line + strncpy(line + working_idx, new_buf, new_line_size); + current_line_size += new_line_size; + } + + free(new_buf); + + in_escape = false; + raw_data = true; } - } - MASSERT(found && token); - return token; -} -// The caller of this function makes sure 'key' is already in the -// string pool of Lexer. -Token* Lexer::FindKeywordToken(const char *key) { - Token *token = NULL; - bool found = false; - for (unsigned i = gOperatorTokensNum + gSeparatorTokensNum; - i < gOperatorTokensNum + gSeparatorTokensNum + gKeywordTokensNum; - i++) { - token = &gSystemTokens[i]; - MASSERT(token->mTkType == TT_KW); - if (strlen(key) == strlen(token->GetName()) && - !strncmp(key, token->GetName(), strlen(key))) { - found = true; - break; + // Handle escape + if (line[working_idx] == '\\') { + if (!in_escape) { + in_escape = true; + working_idx++; + continue; + } } - } - MASSERT(found && token); - return token; -} -// CommentToken is the last predefined token -Token* Lexer::FindCommentToken() { - Token *token = &gSystemTokens[gSystemTokensNum - 1]; - MASSERT((token->mTkType == TT_CM) && "Last system token is not a comment token."); - return token; + // return if string literal end. + if (!in_escape && + ( (line[working_idx] == '\'' && single_quote) || + (line[working_idx] == '\"' && double_quote))) { + if (raw_data) { + // Need remove the ending \n or \r for the regular token reading. + if ((line[current_line_size - 1] == '\n') || + (line[current_line_size - 1] == '\r')) { + line[current_line_size - 1] = '\0'; + current_line_size--; + } + // Handle the second last escape + if ((line[current_line_size - 1] == '\n') || + (line[current_line_size - 1] == '\r')) { + line[current_line_size - 1] = '\0'; + current_line_size--; + } + } + + // Finally We are done! + return; + } + + in_escape = false; + working_idx++; + } } ///////////////////////////////////////////////////////////////////////////// // ///////////////////////////////////////////////////////////////////////////// -// Read a token until end of file. -// If no remaining tokens in current line, we move to the next line. Token* Lexer::LexToken(void) { + ClearLeadingNewLine(); + AddEndingNewLine(); + if (EndOfFile()) + return NULL; + return LexTokenNoNewLine(); } // Read a token until end of line. // Return NULL if no token read. Token* Lexer::LexTokenNoNewLine(void) { + unsigned old_curidx = curidx; bool is_comment = GetComment(); if (is_comment) { - Token *t = FindCommentToken(); + Token *sys_t = FindCommentToken(); + Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); + *t = *sys_t; + t->mLineNum = _linenum; + t->mColNum = old_curidx; if (mTrace) t->Dump(); return t; } - // We try to get system tokens in the order of operator, separtor, and keyword - // This is the same as token_gen.cpp in autogen. There is a reason behind this. - // Some languages could have one synatx belonging to both separator and operators. - // eg., ':' in Java 8, it's both a separator colon and operator select. - // So the order here must be consistent with autogen where it decides ':' a colon or - // select in a rule. - OprId opr = GetOperator(); if (opr != OPR_NA) { - Token *t = FindOperatorToken(opr); + Token *sys_t = FindOperatorToken(opr); + Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); + *t = *sys_t; + t->mLineNum = _linenum; + t->mColNum = old_curidx; if (mTrace) t->Dump(); return t; } + // There is a corner case: .2 + // The dot is lexed as separator, and 2 is an integer. But actually it's a decimal. SepId sep = GetSeparator(); + unsigned new_curidx = curidx; + if (sep != SEP_NA) { - Token *t = FindSeparatorToken(sep); + if (sep == SEP_Dot) { + // restore curidx + curidx = old_curidx; + // try decimal literal + LitData ld = GetLiteral(); + if (ld.mType != LT_NA) { + MASSERT(ld.mType == LT_FPLiteral || ld.mType == LT_DoubleLiteral); + Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); + t->mLineNum = _linenum; + t->mColNum = old_curidx; + t->SetLiteral(ld); + if (mTrace) + t->Dump(); + return t; + } else { + curidx = new_curidx; + } + } + + Token *sys_t = FindSeparatorToken(sep); + Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); + *t = *sys_t; + t->mLineNum = _linenum; + t->mColNum = old_curidx; if (mTrace) t->Dump(); return t; @@ -211,7 +326,11 @@ Token* Lexer::LexTokenNoNewLine(void) { const char *keyword = GetKeyword(); if (keyword != NULL) { - Token *t = FindKeywordToken(keyword); + Token *sys_t = FindKeywordToken(keyword); + Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); + *t = *sys_t; + t->mLineNum = _linenum; + t->mColNum = old_curidx; if (mTrace) t->Dump(); return t; @@ -219,7 +338,9 @@ Token* Lexer::LexTokenNoNewLine(void) { LitData ld = GetLiteral(); if (ld.mType != LT_NA) { - Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); + Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); + t->mLineNum = _linenum; + t->mColNum = old_curidx; t->SetLiteral(ld); if (mTrace) t->Dump(); @@ -228,8 +349,21 @@ Token* Lexer::LexTokenNoNewLine(void) { const char *identifier = GetIdentifier(); if (identifier != NULL) { - Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); + Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); t->SetIdentifier(identifier); + t->mLineNum = _linenum; + t->mColNum = old_curidx; + if (mTrace) + t->Dump(); + return t; + } + + TempLitData* tldata = GetTempLit(); + if (tldata != NULL) { + Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); + t->mLineNum = _linenum; + t->mColNum = old_curidx; + t->SetTempLit(tldata); if (mTrace) t->Dump(); return t; @@ -238,6 +372,98 @@ Token* Lexer::LexTokenNoNewLine(void) { return NULL; } +// We only look for the reg expr ending with / and a few flags like 'g'. +// Flags include: d, g, i, m, s, u, y. +// Anything else finishes the flag. +// +// The content in the reg expr could be any character, we just allow +// all char excluding /. +// +// [NOTE] This function will later be implemented as an overriden function +// of a child class of Lexer. Each lang will have its own +// implementation of this function. +Token* Lexer::FindRegExprToken() { + + // for a regular expr, /a\b/g + // curidx is pointing to 'a' right now. + unsigned old_cur_idx = curidx; + unsigned work_idx = curidx; + unsigned expr_beg_idx = curidx; // the first char of reg expr. + unsigned expr_length = 0; + unsigned flag_beg_idx = 0; // the first char of flags. + unsigned flag_length = 0; // the number of char of flags. + + bool on_flags = false; + + // In Typescript, [ ] includes characters and the escape inside + // is defferent than outside. / is considered non-escape. + bool on_bracket = false; // + + while (work_idx < current_line_size) { + if (line[work_idx] == '[') { + on_bracket = true; + expr_length++; + } else if (on_bracket && line[work_idx] == ']') { + on_bracket = false; + expr_length++; + } else if (line[work_idx] == '/') { + if (on_bracket) { + expr_length++; + } else { + flag_beg_idx = work_idx + 1; + on_flags = true; + } + } else if (line[work_idx] == '\\') { + // An escape. + expr_length += 2; + work_idx += 2; + continue; + } else if (on_flags) { + if (line[work_idx] == 'd' || + line[work_idx] == 'g' || + line[work_idx] == 'i' || + line[work_idx] == 'm' || + line[work_idx] == 's' || + line[work_idx] == 'u' || + line[work_idx] == 'y') + flag_length++; + else + break; + } else { + expr_length++; + } + work_idx++; + } + + if (expr_length > 0) { + // set curidx + curidx = work_idx; + + const char *addr_expr = NULL; + std::string s(line + expr_beg_idx, expr_length); + addr_expr = gStringPool.FindString(s); + + const char *addr_flag = NULL; + if (flag_length > 0) { + std::string sf(line + flag_beg_idx, flag_length); + addr_flag = gStringPool.FindString(sf); + } + + RegExprData reg = {addr_expr, addr_flag}; + + Token *t = (Token*)mTokenPool.NewToken(sizeof(Token)); + t->SetRegExpr(reg); + if (mTrace) { + std::cout << "Find a reg expr: "; + t->Dump(); + } + return t; + + } else { + return NULL; + } +} + // Returen the separator ID, if it's. Or SEP_NA. SepId Lexer::GetSeparator() { return TraverseSepTable(); @@ -310,14 +536,24 @@ LitData Lexer::GetLiteral() { // This is the end of line // (2) /* .. */ // This is the traditional comments +// (3) #! +// This is the common Shebang. We takes it as a comment. // // Return true if a comment is read. The contents are ignore. bool Lexer::GetComment() { + if (FindTripleSlash()) + return false; + if (line[curidx] == '/' && line[curidx+1] == '/') { curidx = current_line_size; return true; } + if (line[curidx] == '#' && line[curidx+1] == '!') { + curidx = current_line_size; + return true; + } + // Handle comments in /* */ // If there is a /* without ending */, the rest of code until the end of the current // source file will be treated as comment. @@ -333,7 +569,6 @@ bool Lexer::GetComment() { len = ReadALine(); if (len < 0) return true; - _linenum++; // a new line read. } if ((line[curidx] == '*' && line[curidx+1] == '/')) { get_ending = true; @@ -414,14 +649,23 @@ bool Lexer::TraverseTableData(TableData *data) { case DT_String: { if( !strncmp(line + curidx, data->mData.mString, strlen(data->mData.mString))) { + bool special_need_check = false; + if (!strncmp(data->mData.mString, "false", 5) && (strlen(data->mData.mString) == 5)) + special_need_check = true; + if (!strncmp(data->mData.mString, "true", 4) && (strlen(data->mData.mString) == 4)) + special_need_check = true; // Need to make sure the following text is a separator curidx += strlen(data->mData.mString); - if (mCheckSeparator && (TraverseSepTable() != SEP_NA) && (TraverseOprTable() != OPR_NA)) { - // TraverseSepTable() moves 'curidx', need restore it - curidx = old_pos + strlen(data->mData.mString); - // Put into gStringPool - gStringPool.FindString(data->mData.mString); - found = true; + if (mCheckSeparator || special_need_check) { + if ((TraverseSepTable() != SEP_NA) || + (TraverseOprTable() != OPR_NA) || + EndOfLine()) { + // TraverseSepTable() moves 'curidx', need restore it + curidx = old_pos + strlen(data->mData.mString); + // Put into gStringPool + gStringPool.FindString(data->mData.mString); + found = true; + } } else { found = true; } @@ -472,7 +716,7 @@ bool Lexer::MatchToken(Token *token) { case TT_OP: { // Pick the longest matching operator. unsigned longest = 0; - for (unsigned i = 0; i < OPR_NA; i++) { + for (unsigned i = 0; i < OprTableSize; i++) { OprTableEntry e = OprTable[i]; if ((e.mId == token->GetOprId()) && !strncmp(line + curidx, e.mText, strlen(e.mText))) { @@ -490,7 +734,7 @@ bool Lexer::MatchToken(Token *token) { case TT_SP: { // Pick the longest matching separator. unsigned longest = 0; - for (unsigned i = 0; i < SEP_NA; i++) { + for (unsigned i = 0; i < SepTableSize; i++) { SepTableEntry e = SepTable[i]; if ((e.mId == token->GetSepId()) && !strncmp(line + curidx, e.mText, strlen(e.mText))) { @@ -656,11 +900,35 @@ bool Lexer::TraverseSecondTry(const RuleTable *rule_table) { bool Lexer::Traverse(const RuleTable *rule_table) { + if (rule_table == &TblUTF8) { + char c = *(line + curidx); + unsigned i = (unsigned)c; + if(i >= 0x80) { + curidx += 1; + return true; + } else { + return false; + } + } + // CHAR, DIGIT are reserved rules. It should NOT be changed. We can // expediate the lexing. if (rule_table == &TblCHAR) { char c = *(line + curidx); - if( (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')) { + if((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')) { + curidx += 1; + return true; + } else { + return false; + } + } + + // + // [NOTE] Since there is no way to describe special char in .spec files, we decided + // to handle here. + if (rule_table == &TblIRREGULAR_CHAR) { + char c = *(line + curidx); + if(c == '\n' || c == '\\' || (unsigned)c == 127) { curidx += 1; return true; } else { @@ -721,7 +989,7 @@ bool Lexer::Traverse(const RuleTable *rule_table) { } curidx = new_pos; - matched = found; + matched = found; break; } @@ -822,7 +1090,7 @@ const char* Lexer::TraverseKeywordTable() { if (addr) { unsigned saved_curidx = curidx; - // It's a keyword only if the following is a separator + // It's a keyword if the following is a separator // End of current line is a separator too. curidx += len; if ((current_line_size == curidx) || (TraverseSepTable() != SEP_NA)) { @@ -830,10 +1098,24 @@ const char* Lexer::TraverseKeywordTable() { curidx = saved_curidx + len; addr = gStringPool.FindString(addr); return addr; - } else { - // failed, restore curidx - curidx = saved_curidx; } + + // It's a keyword if the following is a operator + curidx = saved_curidx + len; + if ((TraverseOprTable() != OPR_NA)) { + curidx = saved_curidx + len; + addr = gStringPool.FindString(addr); + return addr; + } + + curidx = saved_curidx + len; + if (CharIsSeparator(line[curidx])) { + addr = gStringPool.FindString(addr); + return addr; + } + + // failed, restore curidx + curidx = saved_curidx; } return NULL; } diff --git a/src/MapleFE/shared/src/mempool.cpp b/src/MapleFE/shared/src/mempool.cpp index 7636364d10a9216ca7c0fe93c821b89fda42b8c7..9de4915a7dbefd2de3ac881863ffec0461021b2d 100644 --- a/src/MapleFE/shared/src/mempool.cpp +++ b/src/MapleFE/shared/src/mempool.cpp @@ -23,7 +23,7 @@ ////////////////////////////////////////////////////////////////////////////// #include - +#include #include "mempool.h" #include "massert.h" @@ -129,8 +129,7 @@ void MemPool::Release(unsigned num) { MERROR("Release of num bytes failed."); } -// Removes all data in the memory pool. Reset everything to the beginning -// of the pool. But we keep the memory. +// free all blocks in the memory pool. But we keep the memory. void MemPool::Clear() { mCurrBlock = mBlocks; Block *temp_block = mCurrBlock; @@ -139,5 +138,15 @@ void MemPool::Clear() { temp_block = temp_block->next; } } + +// Wipe off all data. Keep the blocks. +void MemPool::WipeOff(int c) { + Block *temp_block = mBlocks; + while(temp_block) { + memset((void*)temp_block->addr, c, mBlockSize); + temp_block = temp_block->next; + } +} + } diff --git a/src/MapleFE/shared/src/parser.cpp b/src/MapleFE/shared/src/parser.cpp index 6914855dca076c9bb02dc1c48fffcc9021405fef..a3f8139a95310f5d692004b8e687b0ca5c6bbca6 100644 --- a/src/MapleFE/shared/src/parser.cpp +++ b/src/MapleFE/shared/src/parser.cpp @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -21,16 +21,19 @@ #include "parser.h" #include "massert.h" #include "token.h" -#include "common_header_autogen.h" #include "ruletable_util.h" -#include "gen_summary.h" -#include "gen_token.h" +#include "rule_summary.h" #include "ast.h" #include "ast_builder.h" +#include "ast_mempool.h" +#include "ast_type.h" #include "parser_rec.h" +#include "ast_fixup.h" namespace maplefe { +SmallVector gTemplateLiteralNodes; + ////////////////////////////////////////////////////////////////////////////////// // Top Issues in Parsing System // @@ -182,15 +185,44 @@ namespace maplefe { ////////////////////////////////////////////////////////////////////////////////// Parser::Parser(const char *name) : filename(name) { - mLexer = new Lexer(); + mLexer = CreateLexer(); const std::string file(name); - gModule.SetFileName(name); + mASTModule = new (gTreePool.NewTreeNode(sizeof(ModuleNode))) ModuleNode(); + mASTModule->SetFilename(name); + mASTBuilder = new ASTBuilder(mASTModule); + gPrimTypePool.Init(); + + mAppealNodePool.SetBlockSize(16*4096); + + // get source language type + std::string::size_type lastDot = file.find_last_of('.'); + if (lastDot == std::string::npos) { + std::cout << "used improper source file" << std::endl; + return; + } + std::string fileExt = file.substr(lastDot); + if (fileExt.compare(".java") == 0) { + mASTModule->mSrcLang = SrcLangJava; + } else if (fileExt.compare(".js") == 0) { + mASTModule->mSrcLang = SrcLangJavaScript; + } else if (fileExt.compare(".ts") == 0) { + mASTModule->mSrcLang = SrcLangTypeScript; + } else if (fileExt.compare(".c") == 0) { + mASTModule->mSrcLang = SrcLangC; + } else { + mASTModule->mSrcLang = SrcLangUnknown; + } + mLexer->PrepareForFile(file); mCurToken = 0; mPending = 0; mEndOfFile = false; + mNormalModeRoot = NULL; + mLineModeRoot = NULL; + mLineMode = false; + mTraceTable = false; mTraceLeftRec = false; mTraceAppeal = false; @@ -220,33 +252,97 @@ void Parser::Dump() { void Parser::ClearFailed() { for (unsigned i = 0; i < RuleTableNum; i++) - gFailed[i].clear(); + gFailed[i].ClearAll(); } // Add one fail case for the table void Parser::AddFailed(RuleTable *table, unsigned token) { - //std::cout << " push " << mCurToken << " from " << table; - gFailed[table->mIndex].push_back(token); + gFailed[table->mIndex].SetBit(token); } // Remove one fail case for the table void Parser::ResetFailed(RuleTable *table, unsigned token) { - std::vector::iterator it = gFailed[table->mIndex].begin();; - for (; it != gFailed[table->mIndex].end(); it++) { - if (*it == token) - break; - } - - if (it != gFailed[table->mIndex].end()) - gFailed[table->mIndex].erase(it); + gFailed[table->mIndex].ClearBit(token); } bool Parser::WasFailed(RuleTable *table, unsigned token) { - std::vector::iterator it = gFailed[table->mIndex].begin(); - for (; it != gFailed[table->mIndex].end(); it++) { - if (*it == token) - return true; + return gFailed[table->mIndex].GetBit(token); +} + +// return true if t can be merged with previous tokens. +// This happens when "-3" is lexed as operator Sub and literal 3. +// For Lexer' stance, this is the right thing to do. However, we do +// need literal -3. +bool Parser::TokenMerge(Token *t) { + if (!t->IsLiteral()) + return false; + unsigned size = mActiveTokens.GetNum(); + if (size < 2) + return false; + + // We take care of a few scenarios. + // = -1 <-- sep is an assignment operator + // [-1 <-- sep is a separtor + // + // Here is also another ugly case in Typescript. + // keyword -1 + // such as: x extends -1 + // In normal sense, if it's a keyword we can merge tokens. + // However, in TS keyword can also be an identifier which means + // keyword - 1 could be an expression. + // In this case, we further look at one more token ahead, so if it's + // identifier keyword -1 + // then we know keyword is not used an identifier and we can merge tokens. + + Token *sep = mActiveTokens.ValueAtIndex(size - 2); + bool is_sep = false; + if (sep->IsSeparator() && + (sep->GetSepId() != SEP_Rparen) && + (sep->GetSepId() != SEP_Rbrack)) + is_sep = true; + if (sep->IsOperator() && + (sep->GetOprId() == OPR_Assign || + sep->GetOprId() == OPR_Bor)) + is_sep = true; + + if (sep->IsKeyword() && mActiveTokens.GetNum() >= 3) { + Token *idn = mActiveTokens.ValueAtIndex(size - 3); + if (idn->IsIdentifier()) + is_sep = true; + } + + if (!is_sep) + return false; + + Token *opr = mActiveTokens.ValueAtIndex(size - 1); + if (!opr->IsOperator()) + return false; + + if ((opr->GetOprId() != OPR_Sub) && (opr->GetOprId() != OPR_Add)) + return false; + + LitData data = t->GetLitData(); + if ((data.mType != LT_IntegerLiteral) && + (data.mType != LT_FPLiteral) && + (data.mType != LT_DoubleLiteral)) + return false; + + if (opr->GetOprId() == OPR_Sub) { + if ((data.mType == LT_IntegerLiteral)) { + data.mData.mInt = (-1) * data.mData.mInt; + } else if (data.mType == LT_FPLiteral) { + data.mData.mFloat = (-1) * data.mData.mFloat; + } else if (data.mType == LT_DoubleLiteral) { + data.mData.mDouble = (-1) * data.mData.mDouble; + } + t->SetLiteral(data); + mActiveTokens.SetElem(size - 1, t); + return true; + } else if (opr->GetOprId() == OPR_Add) { + mActiveTokens.SetElem(size - 1, t); + return true; } + return false; } @@ -257,12 +353,15 @@ unsigned Parser::LexOneLine() { unsigned token_num = 0; Token *t = NULL; + Token *last_token = NULL; + bool line_begin = true; + // Check if there are already pending tokens. if (mCurToken < mActiveTokens.GetNum()) return mActiveTokens.GetNum() - mCurToken; while (!token_num) { - // read untile end of line + // read until end of line while (!mLexer->EndOfLine() && !mLexer->EndOfFile()) { t = mLexer->LexToken(); if (t) { @@ -271,9 +370,33 @@ unsigned Parser::LexOneLine() { if (t->IsWhiteSpace()) is_whitespace = true; } - // Put into the token storage, as Pending tokens. - if (!is_whitespace && !t->IsComment()) { + bool is_tab = false; + if (t->IsSeparator()) { + if (t->IsTab()) + is_tab = true; + } + // Put into the token storage + if (!is_whitespace && !is_tab && !t->IsComment()) { + // 1. if need to merge + if (TokenMerge(t)) + continue; + + // 2. if need split tokens + if (TokenSplit(t)) + continue; + + // 3. handle regular expression + t = GetRegExpr(t); + + if (line_begin) { + t->mLineBegin = true; + line_begin = false; + if (mLexer->GetTrace()) + DUMP0("Set as Line First."); + } + mActiveTokens.PushBack(t); + last_token = t; token_num++; } } else { @@ -282,7 +405,7 @@ unsigned Parser::LexOneLine() { } } // Read in the next line. - if (!token_num) { + if (!token_num && !mLineMode) { if(!mLexer->EndOfFile()) mLexer->ReadALine(); else @@ -290,6 +413,13 @@ unsigned Parser::LexOneLine() { } } + // We are done with a meaningful line + if (token_num) { + last_token->mLineEnd = true; + if (mLexer->GetTrace()) + DUMP0("Set as Line End."); + } + return token_num; } @@ -299,6 +429,11 @@ unsigned Parser::LexOneLine() { bool Parser::MoveCurToken() { mCurToken++; if (mCurToken == mActiveTokens.GetNum()) { + // In line mode, we won't read new line any more. + if (mLineMode) { + mEndOfFile = true; + return true; + } unsigned num = LexOneLine(); if (!num) { mEndOfFile = true; @@ -314,29 +449,87 @@ Token* Parser::GetActiveToken(unsigned i) { return mActiveTokens.ValueAtIndex(i); } +// insert token at position idx. +void Parser::InsertToken(unsigned idx, Token *token) { + if (idx >= mActiveTokens.GetNum()) + MASSERT(0 && "mActiveTokens OutOfBound"); + // enlarge the size by 1. + mActiveTokens.PushBack(NULL); + // Copy each of them forwards. + unsigned i = mActiveTokens.GetNum() - 2; + for (; i >= idx; i--) { + Token *move_t = mActiveTokens.ValueAtIndex(i); + mActiveTokens.SetElem(i + 1, move_t); + } + mActiveTokens.SetElem(idx, token); +} + bool Parser::Parse() { - gASTBuilder.SetTrace(mTraceAstBuild); - bool succ = false; + gTemplateLiteralNodes.Clear(); + mASTBuilder->SetTrace(mTraceAstBuild); + ParseStatus res; while (1) { - succ = ParseStmt(); - if (!succ) + res = ParseStmt(); + if (res == ParseFail || res == ParseEOF) break; } - gModule.Dump(); + if (gTemplateLiteralNodes.GetNum() > 0) + ParseTemplateLiterals(); - return succ; + FixUpVisitor worker(mASTModule); + worker.FixUp(); + + mASTModule->Dump(0); + return (res==ParseFail)? false: true; +} + +void Parser::ParseTemplateLiterals() { + + mLineMode = true; + mLexer->SetLineMode(); + for (unsigned i = 0; i < gTemplateLiteralNodes.GetNum(); i++) { + TemplateLiteralNode *tl = gTemplateLiteralNodes.ValueAtIndex(i); + for (unsigned j = 1; j < tl->GetStringsNum(); j += 2) { + // Create tree node for format + const char *fmt_str = tl->GetStringAtIndex(j-1); + if (fmt_str) { + //Create a string literal node + LitData litdata; + litdata.mType = LT_StringLiteral; + litdata.mData.mStrIdx = gStringPool.GetStrIdx(fmt_str); + LiteralNode *n = (LiteralNode*)gTreePool.NewTreeNode(sizeof(LiteralNode)); + new (n) LiteralNode(litdata); + tl->AddTree(n); + } else { + tl->AddTree(NULL); + } + + const char *ph_str = tl->GetStringAtIndex(j); + if (ph_str) { + mLexer->PrepareForString(ph_str); + // Clear some status + ParseStatus result = ParseStmt(); + MASSERT(result == ParseSucc); + MASSERT(mLineModeRoot); + tl->AddTree(mLineModeRoot); + } else { + tl->AddTree(NULL); + } + } + } + mLineMode = false; + mLexer->ResetLineMode(); } -// Right now I didn't use mempool yet, will come back. -// [TODO] Using mempool. void Parser::ClearAppealNodes() { for (unsigned i = 0; i < mAppealNodes.size(); i++) { AppealNode *node = mAppealNodes[i]; if (node) - delete node; + node->Release(); } mAppealNodes.clear(); + mAppealNodePool.Clear(); } // This is for the appealing of mistaken Fail cases created during the first instance @@ -349,10 +542,7 @@ void Parser::ClearAppealNodes() { void Parser::Appeal(AppealNode *start, AppealNode *root) { MASSERT((root->IsSucc()) && "root->mResult is not Succ."); - // A recursion group could have >1 lead node. 'start' could be a different leadnode - // than 'root'. - - AppealNode *node = start->GetParent(); + AppealNode *node = start; // It's possible that this sub-tree could be separated. For example, the last // instance of RecursionTraversal, which is a Fake Succ, and is separated @@ -375,7 +565,7 @@ void Parser::Appeal(AppealNode *start, AppealNode *root) { // This is the parsing for highest level language constructs. It could be class // in Java/c++, or a function/statement in c/c++. In another word, it's the top // level constructs in a compilation unit (aka Module). -bool Parser::ParseStmt() { +ParseStatus Parser::ParseStmt() { // clear status ClearFailed(); ClearSucc(); @@ -383,21 +573,13 @@ bool Parser::ParseStmt() { mPending = 0; // set the root appealing node - mRootNode = new AppealNode(); + mRootNode = mAppealNodePool.NewAppealNode(); mAppealNodes.push_back(mRootNode); - // mActiveTokens contain some un-matched tokens from last time of TraverseStmt(), - // because at the end of every TraverseStmt() when it finishes its matching it always - // MoveCurToken() which in turn calls LexOneLine() to read new tokens of a new line. - // - // This means in LexOneLine() we also need check if there are already tokens pending. - // - // [TODO] Later on, we will move thoes pending tokens to a separate data structure. - unsigned token_num = LexOneLine(); // No more token, end of file if (!token_num) - return false; + return ParseEOF; // Match the tokens against the rule tables. // In a rule table there are : (1) separtaor, operator, keyword, are already in token @@ -409,7 +591,12 @@ bool Parser::ParseStmt() { if (mTraceTiming) gettimeofday(&start, NULL); - bool succ = TraverseStmt(); + bool succ = false; + if (mLineMode) + succ = TraverseTempLiteral(); + else + succ = TraverseStmt(); + if (mTraceTiming) { gettimeofday(&stop, NULL); std::cout << "Parse Time: " << (stop.tv_sec - start.tv_sec) * 1000000 + stop.tv_usec - start.tv_usec; @@ -421,7 +608,7 @@ bool Parser::ParseStmt() { if (mTraceTiming) gettimeofday(&start, NULL); - PatchWasSucc(mRootNode->mSortedChildren[0]); + PatchWasSucc(mRootNode->mSortedChildren.ValueAtIndex(0)); if (mTraceTiming) { gettimeofday(&stop, NULL); std::cout << "PatchWasSucc Time: " << (stop.tv_sec - start.tv_sec) * 1000000 + stop.tv_usec - start.tv_usec; @@ -439,9 +626,11 @@ bool Parser::ParseStmt() { if (mTraceTiming) gettimeofday(&start, NULL); - ASTTree *tree = BuildAST(); + TreeNode *tree = BuildAST(); if (tree) { - gModule.AddTree(tree); + if (!mLineMode) { + mASTModule->AddTree(tree); + } } if (mTraceTiming) { @@ -450,6 +639,71 @@ bool Parser::ParseStmt() { std::cout << " us" << std::endl; } } + return succ? ParseSucc: ParseFail; +} + +// return true : if all tokens in mActiveTokens are matched. +// false : if faled. +// For the place holders in Typescript Template Literal, there are usually two +// syntax, expression and type. + +bool Parser::TraverseTempLiteral() { + bool succ_expr = false; + bool succ_type = false; + unsigned saved_mCurToken = mCurToken; + unsigned new_mCurToken_expr = 0; + unsigned new_mCurToken_type = 0; + + mRootNode->ClearChildren(); + + RuleTable *t = &TblExpression; + AppealNode *child = NULL; + succ_expr = TraverseRuleTable(t, mRootNode, child); + if (succ_expr) { + MASSERT(child || t->mType == ET_ASI); + if (child) + mRootNode->CopyMatch(child); + // Need adjust the mCurToken. A rule could try multiple possible + // children rules, although there is one and only one valid child + // for a Top table. However, the mCurToken could deviate from + // the valid children and reflect the invalid children. + //MASSERT(mRootNode->mChildren.GetNum() == 1); + //AppealNode *topnode = mRootNode->mChildren.ValueAtIndex(0); + //MASSERT(topnode->IsSucc()); + new_mCurToken_expr = mCurToken; + } + + // Type TblType + mCurToken = saved_mCurToken; + t = &TblType; + child = NULL; + succ_type = TraverseRuleTable(t, mRootNode, child); + if (succ_type) { + MASSERT(child || t->mType == ET_ASI); + if (child) + mRootNode->CopyMatch(child); + // Need adjust the mCurToken. A rule could try multiple possible + // children rules, although there is one and only one valid child + // for a Top table. However, the mCurToken could deviate from + // the valid children and reflect the invalid children. + //MASSERT(mRootNode->mChildren.GetNum() == 1); + //AppealNode *topnode = mRootNode->mChildren.ValueAtIndex(0); + //MASSERT(topnode->IsSucc()); + new_mCurToken_type = mCurToken; + } + + mCurToken = new_mCurToken_expr > new_mCurToken_type ? new_mCurToken_expr : new_mCurToken_type; + + bool succ = succ_expr | succ_type; + if (succ) { + mRootNode->mResult = Succ; + SortOut(); + } + + if (!succ) + std::cout << "Illegal syntax detected!" << std::endl; + else + std::cout << "Matched " << mCurToken << " tokens." << std::endl; return succ; } @@ -467,14 +721,16 @@ bool Parser::TraverseStmt() { mRootNode->ClearChildren(); AppealNode *child = NULL; succ = TraverseRuleTable(t, mRootNode, child); - mRootNode->CopyMatch(child); if (succ) { + MASSERT(child || t->mType == ET_ASI); + if (child) + mRootNode->CopyMatch(child); // Need adjust the mCurToken. A rule could try multiple possible - // children rules, although there is one any only one valid child + // children rules, although there is one and only one valid child // for a Top table. However, the mCurToken could deviate from // the valid children and reflect the invalid children. - MASSERT(mRootNode->mChildren.size() == 1); - AppealNode *topnode = mRootNode->mChildren[0]; + MASSERT(mRootNode->mChildren.GetNum() == 1); + AppealNode *topnode = mRootNode->mChildren.ValueAtIndex(0); MASSERT(topnode->IsSucc()); // Top level table should have only one valid matching. Otherwise, @@ -515,39 +771,51 @@ void Parser::DumpEnterTable(const char *table_name, unsigned indent) { } void Parser::DumpExitTable(const char *table_name, unsigned indent, AppealNode *appeal) { + DumpExitTable(table_name, indent, appeal->mResult, appeal); +} + +void Parser::DumpExitTable(const char *table_name, unsigned indent, + AppealStatus reason, AppealNode *appeal) { for (unsigned i = 0; i < indent; i++) std::cout << " "; std::cout << "Exit " << table_name << "@" << mCurToken; - bool succ = appeal->IsSucc(); - AppealStatus reason = appeal->mResult; - if (succ) { - if (reason == SuccWasSucc) - std::cout << " succ@WasSucc" << "}"; - else if (reason == SuccStillWasSucc) - std::cout << " succ@StillWasSucc" << "}"; - else if (reason == Succ) - std::cout << " succ" << "}"; - + if (reason == SuccWasSucc) { + std::cout << " succ@WasSucc" << "}"; DumpSuccTokens(appeal); std::cout << std::endl; - } else { - if (reason == FailWasFailed) - std::cout << " fail@WasFailed" << "}" << std::endl; - else if (reason == FailNotRightToken) - std::cout << " fail@NotRightToken" << "}" << std::endl; - else if (reason == FailNotIdentifier) - std::cout << " fail@NotIdentifer" << "}" << std::endl; - else if (reason == FailNotLiteral) - std::cout << " fail@NotLiteral" << "}" << std::endl; - else if (reason == FailChildrenFailed) - std::cout << " fail@ChildrenFailed" << "}" << std::endl; - else if (reason == Fail2ndOf1st) - std::cout << " fail@2ndOf1st" << "}" << std::endl; - else if (reason == FailLookAhead) - std::cout << " fail@LookAhead" << "}" << std::endl; - else if (reason == AppealStatus_NA) - std::cout << " fail@NA" << "}" << std::endl; - } + } else if (reason == SuccStillWasSucc) { + std::cout << " succ@StillWasSucc" << "}"; + DumpSuccTokens(appeal); + std::cout << std::endl; + } else if (reason == Succ) { + std::cout << " succ" << "}"; + DumpSuccTokens(appeal); + std::cout << std::endl; + } else if (reason == SuccASI) { + std::cout << " succASI" << "}"; + std::cout << std::endl; + } else if (reason == FailWasFailed) + std::cout << " fail@WasFailed" << "}" << std::endl; + else if (reason == FailNotRightToken) + std::cout << " fail@NotRightToken" << "}" << std::endl; + else if (reason == FailNotRightString) + std::cout << " fail@NotRightString" << "}" << std::endl; + else if (reason == FailNotIdentifier) + std::cout << " fail@NotIdentifer" << "}" << std::endl; + else if (reason == FailNotLiteral) + std::cout << " fail@NotLiteral" << "}" << std::endl; + else if (reason == FailNotRegExpr) + std::cout << " fail@NotRegExpr" << "}" << std::endl; + else if (reason == FailChildrenFailed) + std::cout << " fail@ChildrenFailed" << "}" << std::endl; + else if (reason == Fail2ndOf1st) + std::cout << " fail@2ndOf1st" << "}" << std::endl; + else if (reason == FailLookAhead) + std::cout << " fail@LookAhead" << "}" << std::endl; + else if (reason == FailASI) + std::cout << " fail@ASI" << "}" << std::endl; + else if (reason == AppealStatus_NA) + std::cout << " fail@NA" << "}" << std::endl; } void Parser::DumpSuccTokens(AppealNode *appeal) { @@ -577,67 +845,9 @@ void Parser::RemoveSuccNode(unsigned curr_token, AppealNode *node) { succ_match->RemoveNode(node); } -// The PreProcessing of TraverseRuleTable(). -// Under the Wavefront algorithm of recursion group traversal, things are -// are a little complicated. -// 1. If a rule is failed at some token, it could be succ later. For example, -// the 2nd hit in 1st iteration of a recursion node, is failed@2ndof1st, -// but the rule could be succ match later. -// 2. If a rule is succ at some token, it doesn't mean it's finished, and -// there could be more matchings. -// -// Returns true : if SuccMatch is done. - -bool Parser::TraverseRuleTablePre(AppealNode *appeal) { - unsigned saved_mCurToken = mCurToken; - bool is_done = false; - RuleTable *rule_table = appeal->GetTable(); - const char *name = NULL; - if (mTraceTable) - name = GetRuleTableName(rule_table); - - // Check if it was succ. The longest matching is chosen for the next rule table to match. - SuccMatch *succ = &gSucc[rule_table->mIndex]; - if (succ) { - bool was_succ = succ->GetStartToken(mCurToken); - if (was_succ) { - // Those affected by the 1st appearance of 1st instance which returns false. - // 1stOf1st is not add to WasFail, but those affected will be added to WasFail. - // The affected can be succ later. So there is possibility both succ and fail - // exist at the same time. - // - // I still keep this assertion. We will see. Maybe we'll remove it. - MASSERT(!WasFailed(rule_table, mCurToken)); - - is_done = succ->IsDone(); - - unsigned num = succ->GetMatchNum(); - for (unsigned i = 0; i < num; i++) { - unsigned match = succ->GetOneMatch(i); - // WasSucc nodes need Match info, which will be used later - // in the sort out. - appeal->AddMatch(match); - if (match > mCurToken) - mCurToken = match; - } - appeal->mResult = SuccWasSucc; - - // In ZeroorXXX cases, it was successful and has SuccMatch. However, - // it could be a failure. In this case, we shouldn't move mCurToken. - if (num > 0) - MoveCurToken(); - } - } - - if (WasFailed(rule_table, saved_mCurToken)) { - appeal->mResult = FailWasFailed; - } - - return is_done; -} - bool Parser::LookAheadFail(RuleTable *rule_table, unsigned token) { Token *curr_token = GetActiveToken(token); + LookAheadTable latable = gLookAheadTable[rule_table->mIndex]; bool found = false; @@ -651,8 +861,20 @@ bool Parser::LookAheadFail(RuleTable *rule_table, unsigned token) { // which are not recoganized by lexer. break; case LA_Token: - if (curr_token == &gSystemTokens[la.mData.mTokenId]) + if (curr_token->Equal(&gSystemTokens[la.mData.mTokenId])) found = true; + // TemplateLiteral, Regular Expression is treated as a special keyword. + { + Token *t = &gSystemTokens[la.mData.mTokenId]; + if (t->IsKeyword() && !strncmp(t->GetName(), "this_is_for_fake_rule", 21)) { + if (curr_token->IsTempLit() || curr_token->IsRegExpr()) + found = true; + } + if (rule_table == &TblNoLineTerminator) { + if (!curr_token->mLineBegin) + found = true; + } + } break; case LA_Identifier: if (curr_token->IsIdentifier()) @@ -686,16 +908,13 @@ bool Parser::LookAheadFail(RuleTable *rule_table, unsigned token) { // 2. TraverseRuleTable will let the children's traverse to move mCurToken // if they succeeded. // 3. TraverseOneof, TraverseZeroxxxx, TraverseConcatenate follow rule 1&2. -// 4. TraverseRuleTablePre and TraverseLeadNode both exit early, so they -// need follow the rule 1&2. -// 3. TraverseRuleTablePre move mCurToken is succ, and actually it doesn't -// touch mCurToken when fail. +// 3. TraverseLeadNode exit early, so need follow the rule 1&2. // 4. TraverseLeadNode() also follows the rule 1&2. It moves mCurToken // when succ and restore it when fail. // // 'child' is the AppealNode of 'rule_table'. bool Parser::TraverseRuleTable(RuleTable *rule_table, AppealNode *parent, AppealNode *&child) { - if (mEndOfFile) + if (mEndOfFile && mCurToken >= mActiveTokens.GetNum()) return false; mIndentation += 2; @@ -705,45 +924,104 @@ bool Parser::TraverseRuleTable(RuleTable *rule_table, AppealNode *parent, Appeal DumpEnterTable(name, mIndentation); } - // set the apppeal node - AppealNode *appeal = new AppealNode(); - mAppealNodes.push_back(appeal); - appeal->SetTable(rule_table); - appeal->SetStartIndex(mCurToken); - appeal->SetParent(parent); - parent->AddChild(appeal); - child = appeal; + if (rule_table->mType == ET_ASI) { + bool found = TraverseASI(rule_table, parent, child); + if (mTraceTable) { + if (found) + DumpExitTable(name, mIndentation, SuccASI); + else + DumpExitTable(name, mIndentation, FailASI); + } + mIndentation -= 2; + return found; + } + // Lookahead fail is fast to check, even faster than check WasFailed. + if (LookAheadFail(rule_table, mCurToken) && + (rule_table->mType != ET_Zeroormore) && + (rule_table->mType != ET_Zeroorone)) { + if (mTraceTable) + DumpExitTable(name, mIndentation, FailLookAhead); + mIndentation -= 2; + return false; + } + + AppealNode *appeal = NULL; unsigned saved_mCurToken = mCurToken; - bool is_done = TraverseRuleTablePre(appeal); + bool is_done = false; + + // Check if it was succ. The longest matching is chosen for the next rule table to match. + SuccMatch *succ = &gSucc[rule_table->mIndex]; + if (succ) { + bool was_succ = succ->GetStartToken(mCurToken); + if (was_succ) { + // Those affected by the 1st appearance of 1st instance which returns false. + // 1stOf1st is not add to WasFail, but those affected will be added to WasFail. + // The affected can be succ later. So there is possibility both succ and fail + // exist at the same time. + // + // I still keep this assertion. We will see. Maybe we'll remove it. + MASSERT(!WasFailed(rule_table, mCurToken)); + + // set the apppeal node + appeal = mAppealNodePool.NewAppealNode(); + mAppealNodes.push_back(appeal); + appeal->SetTable(rule_table); + appeal->SetStartIndex(mCurToken); + appeal->SetParent(parent); + parent->AddChild(appeal); + child = appeal; + + is_done = succ->IsDone(); + + unsigned num = succ->GetMatchNum(); + for (unsigned i = 0; i < num; i++) { + unsigned match = succ->GetOneMatch(i); + // WasSucc nodes need Match info, which will be used later + // in the sort out. + appeal->AddMatch(match); + if (match > mCurToken) + mCurToken = match; + } + appeal->mResult = SuccWasSucc; + + // In ZeroorXXX cases, it was successful and has SuccMatch. However, + // it could be a failure. In this case, we shouldn't move mCurToken. + if (num > 0) + MoveCurToken(); + } + } unsigned group_id; bool in_group = FindRecursionGroup(rule_table, group_id); // 1. In a recursion, a rule could fail in the first a few instances, // but could match in a later instance. So I need check is_done. + // Here is an example. Node A is one of the circle node. + // (a) In the first recursion instance, A is failed, but luckly it + // gets appealed due to lead node is 2ndOf1st. + // (b) In the second instance, it still fail because its children + // failed. But the whole recursion actually matches tokens, and + // those matching rule tables are not related to A. + // (c) Finally, A matches because leading node goes forward and gives + // A new opportunity. // 2. For A not-in-group rule, a WasFailed is a real fail. - if (appeal->IsFail() && (!in_group || is_done)) { - if (mTraceTable) - DumpExitTable(name, mIndentation, appeal); - mIndentation -= 2; - return false; - } - if (LookAheadFail(rule_table, saved_mCurToken) && - (rule_table->mType != ET_Zeroormore) && - (rule_table->mType != ET_Zeroorone)) { - appeal->mResult = FailLookAhead; - AddFailed(rule_table, saved_mCurToken); + bool was_failed = WasFailed(rule_table, saved_mCurToken); + if (was_failed && (!in_group || is_done)) { if (mTraceTable) - DumpExitTable(name, mIndentation, appeal); + DumpExitTable(name, mIndentation, FailWasFailed); mIndentation -= 2; return false; } // If the rule is NOT in any recursion group, we simply return the result. // If the rule is done, we also simply return the result. - if (appeal->IsSucc()) { + + // If a rule is succ at some token, it doesn't mean it's finished, and + // there could be more matchings. + + if (appeal && appeal->IsSucc()) { if (!in_group || is_done) { if (mTraceTable) DumpExitTable(name, mIndentation, appeal); @@ -752,19 +1030,47 @@ bool Parser::TraverseRuleTable(RuleTable *rule_table, AppealNode *parent, Appeal } else { if (mTraceTable) { DumpIndentation(); - std::cout << "Traverse-Pre WasSucc, mCurToken:" << mCurToken; + std::cout << "Traverse-Pre WasSucc, mCurToken:" << saved_mCurToken; std::cout << std::endl; } } } - RecursionTraversal *rec_tra = FindRecStack(group_id, appeal->GetStartIndex()); + RecursionTraversal *rec_tra = FindRecStack(group_id, saved_mCurToken); // group_id is 0 which is the default value if rule_table is not in a group // Need to reset rec_tra; if (!in_group) rec_tra = NULL; + // This part is to handle a special case: The second appearance in the first instance + // (wave) in the Wavefront algorithm. At this moment, the first appearance in this + // instance hasn't finished its traversal, so there is no previous succ or fail case. + // + // We need to simply return false, but we cannot add them to the Fail mapping. + // A rule is AddFailed() in TraverseRuleTableRegular() which is in the end of this function. + + if (rec_tra && + rec_tra->GetInstance() == InstanceFirst && + rec_tra->LeadNodeVisited(rule_table)) { + rec_tra->AddAppealPoint(parent); + if (mTraceTable) + DumpExitTable(name, mIndentation, Fail2ndOf1st); + mIndentation -= 2; + return false; + } + + // We delay creation of AppealNode as much as possible. + if (!appeal) { + appeal = mAppealNodePool.NewAppealNode(); + mAppealNodes.push_back(appeal); + appeal->SetTable(rule_table); + appeal->SetStartIndex(saved_mCurToken); + appeal->SetParent(parent); + parent->AddChild(appeal); + child = appeal; + } + // If the rule is already traversed in this iteration(instance), we return the result. if (rec_tra && rec_tra->RecursionNodeVisited(rule_table)) { if (mTraceTable) @@ -788,7 +1094,7 @@ bool Parser::TraverseRuleTable(RuleTable *rule_table, AppealNode *parent, Appeal // wave (instance) of the Wavefront traversal, the 1st is not visited, the // 2nd is visited. - if (rec_tra->LeadNodeVisited(rule_table)) { + if (rec_tra->LeadNodeVisited(rule_table)) { if (mTraceLeftRec) { DumpIndentation(); std::cout << ": ConnectPrevious " << GetRuleTableName(rule_table) @@ -807,22 +1113,6 @@ bool Parser::TraverseRuleTable(RuleTable *rule_table, AppealNode *parent, Appeal } } - // This part is to handle a special case: The second appearance in the first instance - // (wave) in the Wavefront algorithm. At this moment, the first appearance in this - // instance hasn't finished its traversal, so there is no previous succ or fail case. - // - // We need to simply return false, but we cannot add them to the Fail mapping. - if (rec_tra && - rec_tra->GetInstance() == InstanceFirst && - rec_tra->LeadNodeVisited(rule_table)) { - rec_tra->AddAppealPoint(appeal); - appeal->mResult = Fail2ndOf1st; - if (mTraceTable) - DumpExitTable(name, mIndentation, appeal); - mIndentation -= 2; - return false; - } - // Restore the mCurToken since TraverseRuleTablePre() update the mCurToken // if succ. And we need use the old mCurToken. mCurToken = saved_mCurToken; @@ -874,12 +1164,24 @@ bool Parser::TraverseRuleTable(RuleTable *rule_table, AppealNode *parent, Appeal // 1. TraverseIdentifier, TraverseLiteral, TraverseOneof, TraverseZeroorXXx, etc // since we konw the relation between parent and children // 2. Or TraverseTableData in this function, because we know the relation. -// 3. Or when TraverseRuleTablePre is succ, also because we know the relation. We -// do it inside TraverseRuleTablePre. +// 3. When TraverseRuleTable pre-check was succ. Because we know the relation. // These are the only places of colleting succ match for a parent node. // bool Parser::TraverseRuleTableRegular(RuleTable *rule_table, AppealNode *appeal) { + // In TraverseToken(), alt tokens are traversed. The intermediate status of matching + // are needed. However, if a matching failed in the middle of alt token serial, the + // status is not cleared in TraverseToken(). It's hard to clear in TraverseToken() + // as it doesn't know the context of traversal. + // + // A better solution is to clear each time entering a top rule table. + if (rule_table->mProperties & RP_Top) { + mInAltTokensMatching = false; + mNextAltTokenIndex = 0; + if (mTraceTable) + std::cout << "Clear alt token status." << std::endl; + } + bool matched = false; unsigned saved_mCurToken = mCurToken; @@ -900,6 +1202,20 @@ bool Parser::TraverseRuleTableRegular(RuleTable *rule_table, AppealNode *appeal) if ((rule_table == &TblLiteral)) return TraverseLiteral(rule_table, appeal); + if ((rule_table == &TblTemplateLiteral)) + return TraverseTemplateLiteral(rule_table, appeal); + + if ((rule_table == &TblRegularExpression)) + return TraverseRegularExpression(rule_table, appeal); + + if (rule_table == &TblNoLineTerminator) { + Token *token = mActiveTokens.ValueAtIndex(mCurToken); + if (token->mLineBegin) + return false; + else + return true; + } + EntryType type = rule_table->mType; switch(type) { case ET_Oneof: @@ -914,14 +1230,22 @@ bool Parser::TraverseRuleTableRegular(RuleTable *rule_table, AppealNode *appeal) case ET_Concatenate: matched = TraverseConcatenate(rule_table, appeal); break; + case ET_ASI: { + AppealNode *child = NULL; + matched = TraverseASI(rule_table, appeal, child); + break; + } case ET_Data: { // This is a rare case where a rule table contains only table, either a token // or a single child rule. In this case, we need merge the child's match into // parent. However, we cannot do the merge in TraverseTableData() since this // function will be used in multiple places where we cannot merge. - AppealNode *child; + AppealNode *child = NULL; matched = TraverseTableData(rule_table->mData, appeal, child); - appeal->CopyMatch(child); + if (child) { + child->SetChildIndex(0); + appeal->CopyMatch(child); + } break; } case ET_Null: @@ -944,30 +1268,102 @@ bool Parser::TraverseRuleTableRegular(RuleTable *rule_table, AppealNode *appeal) // Returns 1. true if succ. // 2. child_node which represents 'token'. -bool Parser::TraverseToken(Token *token, AppealNode *parent, AppealNode *&child_node) { - Token *curr_token = GetActiveToken(mCurToken); - bool found = false; +bool Parser::TraverseStringSucc(Token *token, AppealNode *parent, AppealNode *&child_node) { + AppealNode *appeal = NULL; mIndentation += 2; if (mTraceTable) { - std::string name = "token:"; + std::string name = "string:"; name += token->GetName(); - name += " curr_token:"; - name += curr_token->GetName(); + name += " curr_token matches"; DumpEnterTable(name.c_str(), mIndentation); } - AppealNode *appeal = new AppealNode(); + appeal = mAppealNodePool.NewAppealNode(); child_node = appeal; mAppealNodes.push_back(appeal); - appeal->mResult = FailNotRightToken; - appeal->SetToken(curr_token); + appeal->SetToken(token); appeal->SetStartIndex(mCurToken); appeal->SetParent(parent); parent->AddChild(appeal); + appeal->mResult = Succ; + appeal->AddMatch(mCurToken); + MoveCurToken(); + + if (mTraceTable) { + std::string name; + name = "string:"; + name += token->GetName(); + DumpExitTable(name.c_str(), mIndentation, appeal); + } + + mIndentation -= 2; + return true; +} + +// Returns 1. true if succ. +// 2. child_node which represents 'token'. +bool Parser::TraverseToken(Token *token, AppealNode *parent, AppealNode *&child_node) { + Token *curr_token = GetActiveToken(mCurToken); + bool found = false; + mIndentation += 2; + + if (mTraceTable) { + std::string name = "token:"; + name += token->GetName(); + name += " curr_token:"; + name += curr_token->GetName(); + DumpEnterTable(name.c_str(), mIndentation); + } bool use_alt_token = false; - if (token == curr_token) { + AppealNode *appeal = NULL; + + // [TODO] + // We enable skipping semi-colon. Later we will implement TS specific version of parser + // which overried TraverseToken(). + // We handle one case in the following: + // The rule expects: + // { statement ;} + // But we see : + // { statement } // No ';' + // In this case we can skip the checking of ';' since '}' actually closes everything. + // There are many other cases. Will handle later. + if (token->IsSeparator() && token->GetSepId() == SEP_Semicolon) { + if (curr_token->IsSeparator() && curr_token->GetSepId() == SEP_Rbrace) { + // 1. There are rule like ZEROORMORE(';'). In this case, we don't insert + RuleTable *parent_rt = parent->GetTable(); + bool need_insert = true; + if (parent_rt->mType == ET_Zeroormore || parent_rt->mType == ET_Zeroorone) + need_insert = false; + + // We also require that '}' is the last token, at least the last in this line + // if not the end of file. + if (mActiveTokens.GetNum() > mCurToken + 1) + need_insert = false; + + // 2. we need check cases where we already have one previous ';'. + Token *prev = mActiveTokens.ValueAtIndex(mCurToken - 1); + if (prev != token && need_insert) { + // The simpliest way is to insert a semicolon token in mActiveTokens. + // Just pretend we lex a semicolon. + InsertToken(mCurToken, token); + curr_token = token; + if (mTraceTable) { + std::cout << "Auto-insert one semicolon." << std::endl; + } + } + } + } + + if (token->Equal(curr_token)) { + appeal = mAppealNodePool.NewAppealNode(); + child_node = appeal; + mAppealNodes.push_back(appeal); + appeal->SetToken(curr_token); + appeal->SetStartIndex(mCurToken); + appeal->SetParent(parent); + parent->AddChild(appeal); appeal->mResult = Succ; appeal->AddMatch(mCurToken); found = true; @@ -977,13 +1373,39 @@ bool Parser::TraverseToken(Token *token, AppealNode *parent, AppealNode *&child_ if (curr_token->mAltTokens) { bool alt_found = false; AltToken *pat = curr_token->mAltTokens; - if (token == &gSystemTokens[pat->mAltTokenId]) { + + // Sometimes a rule which has literally good alt tokens doesn't want to be + // considered as alt token matching, eg. + // RelationalExpression : expr + '>' + expr + // This '>' won't be suitable for alt tokens of >> or >>>, because so far + // there is no expr ending with '>'. + bool parent_ok = true; + if (parent->GetTable()->mProperties & RP_NoAltToken) + parent_ok = false; + + if (parent_ok && (token->Equal(&gSystemTokens[pat->mAltTokenId]))) { + appeal = mAppealNodePool.NewAppealNode(); + child_node = appeal; + mAppealNodes.push_back(appeal); + appeal->SetToken(curr_token); + appeal->SetStartIndex(mCurToken); + appeal->SetParent(parent); + parent->AddChild(appeal); + found = true; alt_found = true; mATMToken = mCurToken; + + if (mTraceTable) { + std::cout << "Work on alt token, index : " << mNextAltTokenIndex << std::endl; + } + if (!mInAltTokensMatching) { mInAltTokensMatching = true; appeal->m1stAltTokenMatched = true; + if (mTraceTable) { + std::cout << "Turn On mInAltTokensMatching " << std::endl; + } } mNextAltTokenIndex++; @@ -998,6 +1420,10 @@ bool Parser::TraverseToken(Token *token, AppealNode *parent, AppealNode *&child_ MoveCurToken(); mInAltTokensMatching = false; mNextAltTokenIndex = 0; + if (mTraceTable) { + std::cout << "Work on alt token is successfully finised. Set mNextAltTokenIndex to : " << mNextAltTokenIndex << std::endl; + std::cout << "Turn Off mInAltTokensMatching " << std::endl; + } } } } @@ -1010,7 +1436,10 @@ bool Parser::TraverseToken(Token *token, AppealNode *parent, AppealNode *&child_ else name = "token:"; name += token->GetName(); - DumpExitTable(name.c_str(), mIndentation, appeal); + if (appeal) + DumpExitTable(name.c_str(), mIndentation, appeal); + else + DumpExitTable(name.c_str(), mIndentation, FailNotRightToken); } mIndentation -= 2; @@ -1048,6 +1477,44 @@ bool Parser::TraverseLiteral(RuleTable *rule_table, AppealNode *appeal) { return found; } +// We don't go into TemplateLiteral table. +// 'appeal' is the node for this rule table. This is different than TraverseOneof +// or the others where 'appeal' is actually a parent node. +bool Parser::TraverseTemplateLiteral(RuleTable *rule_table, AppealNode *appeal) { + Token *curr_token = GetActiveToken(mCurToken); + const char *name = GetRuleTableName(rule_table); + bool found = false; + + if (curr_token->IsTempLit()) { + found = true; + TraverseSpecialTableSucc(rule_table, appeal); + } else { + appeal->mResult = FailNotLiteral; + AddFailed(rule_table, mCurToken); + } + + return found; +} + +// We don't go into RegularExpressionLiteral table. +// 'appeal' is the node for this rule table. This is different than TraverseOneof +// or the others where 'appeal' is actually a parent node. +bool Parser::TraverseRegularExpression(RuleTable *rule_table, AppealNode *appeal) { + Token *curr_token = GetActiveToken(mCurToken); + const char *name = GetRuleTableName(rule_table); + bool found = false; + + if (curr_token->IsRegExpr()) { + found = true; + TraverseSpecialTableSucc(rule_table, appeal); + } else { + appeal->mResult = FailNotRegExpr; + AddFailed(rule_table, mCurToken); + } + + return found; +} + // We don't go into Identifier table. // 'appeal' is the node for this rule table. bool Parser::TraverseIdentifier(RuleTable *rule_table, AppealNode *appeal) { @@ -1110,7 +1577,7 @@ bool Parser::TraverseZeroormore(RuleTable *rule_table, AppealNode *appeal) { bool temp_found = TraverseTableData(data, appeal, child); found_subtable |= temp_found; - if (temp_found) { + if (temp_found && child) { unsigned match_num = child->GetMatchNum(); for (unsigned id = 0; id < match_num; id++) { unsigned match = child->GetMatch(id); @@ -1150,13 +1617,16 @@ bool Parser::TraverseZeroormore(RuleTable *rule_table, AppealNode *appeal) { bool Parser::TraverseZeroorone(RuleTable *rule_table, AppealNode *appeal) { MASSERT((rule_table->mNum == 1) && "zeroorone node has more than one elements?"); TableData *data = rule_table->mData; - AppealNode *child; + AppealNode *child = NULL; bool found = TraverseTableData(data, appeal, child); - appeal->CopyMatch(child); + if (child) + appeal->CopyMatch(child); return true; } // 1. Save all the possible matchings from children. +// There is one exception. If the rule-table is a top rule it should +// get the longest match. // 2. As return value we choose the longest matching. // // 'appeal' is the node of 'rule_table'. @@ -1171,8 +1641,8 @@ bool Parser::TraverseOneof(RuleTable *rule_table, AppealNode *appeal) { bool temp_found = TraverseTableData(data, appeal, child); found = found | temp_found; if (temp_found) { - MASSERT(child); - appeal->CopyMatch(child); + if (child) + appeal->CopyMatch(child); if (mCurToken > new_mCurToken) new_mCurToken = mCurToken; @@ -1180,12 +1650,19 @@ bool Parser::TraverseOneof(RuleTable *rule_table, AppealNode *appeal) { mCurToken = old_mCurToken; // Some ONEOF rules can have only children matching current token seq. + // Or the language desiner just want to match the first children rule. if (rule_table->mProperties & RP_Single) { break; } } } + if (found && (rule_table->mProperties & RP_Top)) { + unsigned longest = appeal->LongestMatch(); + appeal->ClearMatch(); + appeal->AddMatch(longest); + } + // move position according to the longest matching mCurToken = new_mCurToken; return found; @@ -1202,7 +1679,7 @@ bool Parser::TraverseOneof(RuleTable *rule_table, AppealNode *appeal) { // e.g. in a rule like below // rule AA : BB + CC + ZEROORONE(xxx) // If ZEROORONE(xxx) doesn't match anything, it sets subtable_succ_tokens to 0. However -// rule AA matches multiple tokens. So final_succ_tokens needs to be calculated carefully. +// rule AA matches 'BB + CC'. So final_succ_tokens needs to be calculated carefully. // 4. We are going to take succ match info from SuccMatch, not from a specific // AppealNode. SuccMatch has the complete info. // @@ -1215,22 +1692,41 @@ bool Parser::TraverseConcatenate(RuleTable *rule_table, AppealNode *appeal) { SmallVector prev_succ_tokens; SmallVector subtable_succ_tokens; - SmallVector final_succ_tokens; unsigned saved_mCurToken = mCurToken; + // prepare the prev_succ_tokens[_num] for the 1st iteration. int last_matched = mCurToken - 1; + prev_succ_tokens.PushBack(last_matched); - // prepare the prev_succ_tokens[_num] for the 1st iteration. - prev_succ_tokens.PushBack(mCurToken - 1); + // This is regarding the matching of Alternative Tokens. For example, + // a> + // >> needs to be matched as two '>'. + // mInAltTokensMatching is used to control if we are in the middle of matching such tokens. + // However, there is a complicated case, that is "c>", which could be matched as part + // RelationshipExpression, and it turned on mInAltTokensMatching. But it actually fails + // to be a RelationshipExpression and mInAltTokensMatching should be turned off. + bool turned_on_AltToken = false; for (unsigned i = 0; i < rule_table->mNum; i++) { - bool is_zeroxxx = false; + bool is_zeroxxx = false; // If the table is Zeroorxxx(), or NoLineTerminator. + bool no_line_term = false; // If the table is NoLineTerminator + bool no_line_term_met = false; // If the table is NoLineTerminator and token is no line term. + bool is_asi = false; + bool is_token = false; + bool old_mInAltTokensMatching = mInAltTokensMatching; + TableData *data = rule_table->mData + i; if (data->mType == DT_Subtable) { - RuleTable *zero_rt = data->mData.mEntry; - if (zero_rt->mType == ET_Zeroormore || zero_rt->mType == ET_Zeroorone) + RuleTable *curr_rt = data->mData.mEntry; + if (curr_rt == &TblNoLineTerminator) + no_line_term = true; + if (curr_rt->mType == ET_Zeroormore || curr_rt->mType == ET_Zeroorone) is_zeroxxx = true; + if (curr_rt->mType == ET_ASI) + is_asi = true; + } else if (data->mType == DT_Token) { + is_token = true; } SmallVector carry_on_prev; @@ -1250,49 +1746,56 @@ bool Parser::TraverseConcatenate(RuleTable *rule_table, AppealNode *appeal) { AppealNode *child = NULL; bool temp_found = TraverseTableData(data, appeal, child); + if (child) + child->SetChildIndex(i); found_subtable |= temp_found; - if (temp_found) { - bool duplicated_with_prev = false; - for (unsigned id = 0; id < child->GetMatchNum(); id++) { - unsigned match = child->GetMatch(id); - if (!subtable_succ_tokens.Find(match)) - subtable_succ_tokens.PushBack(match); - if (match == prev) - duplicated_with_prev = true; + if (temp_found ) { + if (child) { + for (unsigned id = 0; id < child->GetMatchNum(); id++) { + unsigned match = child->GetMatch(id); + if (!subtable_succ_tokens.Find(match)) + subtable_succ_tokens.PushBack(match); + } + } else if (is_asi) { + // ASI succeeded, without child. It means semicolon is skipped. + // Keep prev. NO moving mCurToken. + subtable_succ_tokens.PushBack(prev); } - - // for Zeroorone/Zeroormore node it always returns true. NO matter how - // many tokens it really matches, 'zero' is also a correct match. we - // need take it into account so that the next rule table can try - // on it. [Except it's a duplication] - if (is_zeroxxx && !duplicated_with_prev) - carry_on_prev.PushBack(prev); } } - // Update the final_succ_tokens - // Please read comment 3 before this function. - if (!is_zeroxxx) - final_succ_tokens.Clear(); - - prev_succ_tokens.Clear(); + if ((prev_succ_tokens.GetNum() == 1) && no_line_term) { + unsigned prev = prev_succ_tokens.ValueAtIndex(0); + Token *t = GetActiveToken(prev + 1); + if (!t->mLineBegin) + no_line_term_met = true; + } - if (found_subtable) { + // for Zeroorone/Zeroormore node it always returns true. NO matter how + // many tokens it really matches, 'zero' is also a correct match. we + // need take it into account so that the next rule table can try + // on it. + if (!is_zeroxxx && !no_line_term_met) + prev_succ_tokens.Clear(); + + // is_zeroxxx seems redundant because the traversal should always be true. + // However, it's not true. In mLineMode, mEndOfFile could be set before + // traversing this ZEROORXXX table. It will return false. + // Since we do treat this case as success, so is_zeroxxx is included in this + // condition expression. + if (found_subtable || is_zeroxxx) { for (unsigned id = 0; id < subtable_succ_tokens.GetNum(); id++) { unsigned token = subtable_succ_tokens.ValueAtIndex(id); if (!prev_succ_tokens.Find(token)) prev_succ_tokens.PushBack(token); - if (!final_succ_tokens.Find(token)) - final_succ_tokens.PushBack(token); } - for (unsigned id = 0; id < carry_on_prev.GetNum(); id++) { - unsigned token = carry_on_prev.ValueAtIndex(id); - if (!prev_succ_tokens.Find(token)) - prev_succ_tokens.PushBack(token); + // alert mInAltTokensMatching is turned on + if (is_token && mInAltTokensMatching && !old_mInAltTokensMatching) { + turned_on_AltToken = true; } } else { - // Once a single child rule fails, the 'appeal' fails. + // Once a child rule fails, the 'appeal' fails. found = false; break; } @@ -1301,9 +1804,10 @@ bool Parser::TraverseConcatenate(RuleTable *rule_table, AppealNode *appeal) { mCurToken = saved_mCurToken; if (found) { - for (unsigned id = 0; id < final_succ_tokens.GetNum(); id++) { - unsigned token = final_succ_tokens.ValueAtIndex(id); - appeal->AddMatch(token); + for (unsigned id = 0; id < prev_succ_tokens.GetNum(); id++) { + unsigned token = prev_succ_tokens.ValueAtIndex(id); + if (token != last_matched) + appeal->AddMatch(token); } // mCurToken doesn't have much meaning in current algorithm when // transfer to the next rule table, because the next rule will take @@ -1314,6 +1818,10 @@ bool Parser::TraverseConcatenate(RuleTable *rule_table, AppealNode *appeal) { appeal->mResult = Succ; if (appeal->GetMatchNum() > 0) mCurToken = appeal->LongestMatch() + 1; + } else if (turned_on_AltToken) { + mInAltTokensMatching = false; + if (mTraceTable) + std::cout << "Turned Off mInAltTokensMatching." << std::endl; } return found; @@ -1325,8 +1833,24 @@ bool Parser::TraverseConcatenate(RuleTable *rule_table, AppealNode *appeal) { // Oneof and Zeroormore should be handled differently. // 3. The mCurToken moves if found target, or restore the original location. bool Parser::TraverseTableData(TableData *data, AppealNode *appeal, AppealNode *&child_node) { - if (mEndOfFile) - return false; + // Usually mCurToken is a new token to be matched. So if it's end of file, we simply return false. + // However, (1) if mCurToken is actually an ATMToken, which means it needs to be matched + // multiple times, we are NOT at the end yet. + // (2) If we are traverse a Concatenate rule, and the previous sub-rule has multiple matches, + // and we are trying the current sub-rule, ie. 'data', using one of the matches. + // The lexer actually reaches the EndOfFile in previous matchings, but the mCurToken + // we are working on right now is not the last token. It's one of the previous matches. + // So we need check if we are matching the last token. + if (mEndOfFile && mCurToken >= mActiveTokens.GetNum()) { + if (!(mInAltTokensMatching && (mCurToken == mATMToken))) { + if (data->mType == DT_Subtable) { + RuleTable *t = data->mData.mEntry; + if (t->mType == ET_ASI) + return TraverseASI(t, appeal, child_node); + } + return false; + } + } unsigned old_pos = mCurToken; bool found = false; @@ -1334,10 +1858,14 @@ bool Parser::TraverseTableData(TableData *data, AppealNode *appeal, AppealNode * switch (data->mType) { case DT_Char: + MASSERT(0 && "Hit Char in TableData during matching!"); + break; case DT_String: - //MASSERT(0 && "Hit Char/String in TableData during matching!"); - //TODO: Need compare literal. But so far looks like it's impossible to - // have a literal token able to match a string/char in rules. + if (curr_token->IsIdentifier() && + !strncmp(curr_token->GetName(), data->mData.mString, strlen(data->mData.mString)) && + strlen(curr_token->GetName()) == strlen(data->mData.mString) ){ + found = TraverseStringSucc(curr_token, appeal, child_node); + } break; // separator, operator, keywords are generated as DT_Token. // just need check the pointer of token @@ -1369,18 +1897,23 @@ void Parser::SetIsDone(unsigned group_id, unsigned start_token) { bool found = succ->GetStartToken(start_token); if(found) succ->SetIsDone(); - } + } } void Parser::SetIsDone(RuleTable *rt, unsigned start_token) { // We don't save SuccMatch for TblLiteral and TblIdentifier - if((rt == &TblLiteral) || (rt == &TblIdentifier)) + if((rt == &TblLiteral) || + (rt == &TblIdentifier) || + (rt == &TblRegularExpression) || + (rt == &TblTemplateLiteral)) return; SuccMatch *succ = &gSucc[rt->mIndex]; bool found = succ->GetStartToken(start_token); - MASSERT(found); - succ->SetIsDone(); + if (rt != &TblNoLineTerminator) { + MASSERT(found); + succ->SetIsDone(); + } } ///////////////////////////////////////////////////////////////////////////// @@ -1411,19 +1944,33 @@ void Parser::SetIsDone(RuleTable *rt, unsigned start_token) { // get a single tree. ///////////////////////////////////////////////////////////////////////////// -// We don't want to use recursive. So a deque is used here. +// We don't want to use recursion. So a deque is used here. static std::deque to_be_sorted; void Parser::SortOut() { // we remove all failed children, leaving only succ child - std::vector::iterator it = mRootNode->mChildren.begin(); - for (; it != mRootNode->mChildren.end(); it++) { - AppealNode *n = *it; - if (!n->IsFail() && !n->IsNA()) - mRootNode->mSortedChildren.push_back(n); + AppealNode *root = NULL; + if (!mLineMode) { + for (unsigned i = 0; i < mRootNode->mChildren.GetNum(); i++) { + AppealNode *n = mRootNode->mChildren.ValueAtIndex(i); + if (!n->IsFail() && !n->IsNA()) + mRootNode->mSortedChildren.PushBack(n); + } + MASSERT(mRootNode->mSortedChildren.GetNum()==1); + root = mRootNode->mSortedChildren.ValueAtIndex(0); + } else { + // LineMode could have >1 matching children + // Find the longest match + unsigned longest = mRootNode->LongestMatch(); + for (unsigned i = 0; i < mRootNode->mChildren.GetNum(); i++) { + AppealNode *n = mRootNode->mChildren.ValueAtIndex(i); + if (n->LongestMatch() == longest) { + root = n; + mRootNode->mSortedChildren.PushBack(n); + break; + } + } } - MASSERT(mRootNode->mSortedChildren.size()==1); - AppealNode *root = mRootNode->mSortedChildren.front(); // First sort the root. RuleTable *table = root->GetTable(); @@ -1432,12 +1979,22 @@ void Parser::SortOut() { MASSERT(succ && "root has no SuccMatch?"); bool found = succ->GetStartToken(root->GetStartIndex()); - // Top level tree can have only one match, otherwise, the language + // In regular parsing, Top level tree can have only one match, otherwise, the language // is ambiguous. + // In LineMode parsing, we are parsing an expression, and it could be multiple matching + // with some partial matchings. We pick the longest matching and it should be the same + // as mActiveTokens.GetNum() meaning it matches all token so far. unsigned match_num = succ->GetMatchNum(); - MASSERT(match_num == 1 && "Top level tree has >1 matches?"); - unsigned match = succ->GetOneMatch(0); + unsigned match = 0; + if (mLineMode) { + match = root->LongestMatch(); + MASSERT(match + 1 == mActiveTokens.GetNum()); + } else { + MASSERT(match_num == 1 && "Top level tree has >1 matches?"); + match = succ->GetOneMatch(0); + } root->SetFinalMatch(match); + root->SetSorted(); to_be_sorted.clear(); @@ -1468,7 +2025,7 @@ void Parser::SortOutNode(AppealNode *node) { // during matching. In SortOut, we simple return. However, when generating IR, // the children have to be created. if (node->mResult == SuccWasSucc) { - MASSERT(node->mChildren.size() == 0); + MASSERT(node->mChildren.GetNum() == 0); return; } @@ -1479,16 +2036,15 @@ void Parser::SortOutNode(AppealNode *node) { RuleTable *rule_table = node->GetTable(); // Table Identifier and Literal don't need sort. - if (rule_table == &TblIdentifier || rule_table == &TblLiteral) + if (rule_table == &TblIdentifier || rule_table == &TblLiteral || rule_table == &TblTemplateLiteral) return; // The lead node of a traversal group need special solution, if they are // simply connect to previous instance(s). if (mRecursionAll.IsLeadNode(rule_table)) { bool connect_only = true; - std::vector::iterator it = node->mChildren.begin(); - for (; it != node->mChildren.end(); it++) { - AppealNode *child = *it; + for (unsigned i = 0; i < node->mChildren.GetNum(); i++) { + AppealNode *child = node->mChildren.ValueAtIndex(i); if (!child->IsTable() || child->GetTable() != rule_table) { connect_only = false; break; @@ -1537,15 +2093,14 @@ void Parser::SortOutRecursionHead(AppealNode *parent) { unsigned parent_match = parent->GetFinalMatch(); //Find the first child having the same match as parent. - std::vector::iterator it = parent->mChildren.begin(); - for (; it != parent->mChildren.end(); it++) { - AppealNode *child = *it; + for (unsigned i = 0; i < parent->mChildren.GetNum(); i++) { + AppealNode *child = parent->mChildren.ValueAtIndex(i); if (child->IsFail() || child->IsNA()) continue; bool found = child->FindMatch(parent_match); if (found) { to_be_sorted.push_back(child); - parent->mSortedChildren.push_back(child); + parent->mSortedChildren.PushBack(child); child->SetFinalMatch(parent_match); child->SetSorted(); child->SetParent(parent); @@ -1566,9 +2121,8 @@ void Parser::SortOutOneof(AppealNode *parent) { unsigned parent_match = parent->GetFinalMatch(); unsigned good_children = 0; - std::vector::iterator it = parent->mChildren.begin(); - for (; it != parent->mChildren.end(); it++) { - AppealNode *child = *it; + for (unsigned i = 0; i < parent->mChildren.GetNum(); i++) { + AppealNode *child = parent->mChildren.ValueAtIndex(i); if (child->IsFail() || child->IsNA()) continue; @@ -1583,14 +2137,14 @@ void Parser::SortOutOneof(AppealNode *parent) { child->SetFinalMatch(parent_match); child->SetParent(parent); good_children++; - parent->mSortedChildren.push_back(child); + parent->mSortedChildren.PushBack(child); } } else { bool found = child->FindMatch(parent_match); if (found) { good_children++; to_be_sorted.push_back(child); - parent->mSortedChildren.push_back(child); + parent->mSortedChildren.PushBack(child); child->SetFinalMatch(parent_match); child->SetSorted(); child->SetParent(parent); @@ -1630,9 +2184,8 @@ void Parser::SortOutZeroormore(AppealNode *parent) { SmallVector sorted_children; while(1) { AppealNode *good_child = NULL; - std::vector::iterator it = parent->mChildren.begin(); - for (; it != parent->mChildren.end(); it++) { - AppealNode *child = *it; + for (unsigned i = 0; i < parent->mChildren.GetNum(); i++) { + AppealNode *child = parent->mChildren.ValueAtIndex(i); if (sorted_children.Find(child)) continue; if (child->IsSucc() && child->FindMatch(last_match)) { @@ -1657,7 +2210,7 @@ void Parser::SortOutZeroormore(AppealNode *parent) { for (int i = sorted_children.GetNum() - 1; i >= 0; i--) { AppealNode *child = sorted_children.ValueAtIndex(i); - parent->mSortedChildren.push_back(child); + parent->mSortedChildren.PushBack(child); if (child->IsTable()) to_be_sorted.push_back(child); } @@ -1682,8 +2235,8 @@ void Parser::SortOutZeroorone(AppealNode *parent) { // 2. If the child is succ, the major work of this loop is to verify the child's SuccMatch is // consistent with parent's. - MASSERT((parent->mChildren.size() == 1) && "Zeroorone has >1 valid children?"); - AppealNode *child = parent->mChildren.front(); + MASSERT((parent->mChildren.GetNum() == 1) && "Zeroorone has >1 valid children?"); + AppealNode *child = parent->mChildren.ValueAtIndex(0); if (child->IsFail() || child->IsNA()) return; @@ -1710,7 +2263,7 @@ void Parser::SortOutZeroorone(AppealNode *parent) { } // Finally add the only successful child to mSortedChildren - parent->mSortedChildren.push_back(child); + parent->mSortedChildren.PushBack(child); child->SetParent(parent); } @@ -1742,13 +2295,15 @@ void Parser::SortOutConcatenate(AppealNode *parent) { SmallVector sorted_children; for (int i = rule_table->mNum - 1; i >= 0; i--) { TableData *data = rule_table->mData + i; - AppealNode *child = parent->FindSpecChild(data, last_match); - // It's possible that we find NO child if 'data' is a ZEROORxxx table + AppealNode *child = parent->FindIndexedChild(last_match, i); + // It's possible that we find NO child if 'data' is a ZEROORxxx table or ASI. bool good_child = false; if (!child) { if (data->mType == DT_Subtable) { RuleTable *table = data->mData.mEntry; - if (table->mType == ET_Zeroorone || table->mType == ET_Zeroormore) + if (table->mType == ET_Zeroorone || table->mType == ET_Zeroormore || table == &TblNoLineTerminator) + good_child = true; + if (table->mType == ET_ASI) good_child = true; } MASSERT(good_child); @@ -1769,7 +2324,7 @@ void Parser::SortOutConcatenate(AppealNode *parent) { for (int i = sorted_children.GetNum() - 1; i >= 0; i--) { AppealNode *child = sorted_children.ValueAtIndex(i); - parent->mSortedChildren.push_back(child); + parent->mSortedChildren.PushBack(child); if (child->IsTable()) to_be_sorted.push_back(child); } @@ -1787,21 +2342,21 @@ void Parser::SortOutData(AppealNode *parent) { case DT_Subtable: { // There should be one child node, which represents the subtable. // we just need to add the child node to working list. - MASSERT((parent->mChildren.size() == 1) && "Should have only one child?"); - AppealNode *child = parent->mChildren.front(); + MASSERT((parent->mChildren.GetNum() == 1) && "Should have only one child?"); + AppealNode *child = parent->mChildren.ValueAtIndex(0); child->SetFinalMatch(parent->GetFinalMatch()); child->SetSorted(); to_be_sorted.push_back(child); - parent->mSortedChildren.push_back(child); + parent->mSortedChildren.PushBack(child); child->SetParent(parent); break; } case DT_Token: { // token in table-data created a Child AppealNode // Just keep the child node. Don't need do anything. - AppealNode *child = parent->mChildren.front(); + AppealNode *child = parent->mChildren.ValueAtIndex(0); child->SetFinalMatch(child->GetStartIndex()); - parent->mSortedChildren.push_back(child); + parent->mSortedChildren.PushBack(child); child->SetParent(parent); break; } @@ -1843,12 +2398,9 @@ void Parser::DumpSortOutNode(AppealNode *n) { unsigned dump_id = to_be_dumped_id.front(); to_be_dumped_id.pop_front(); - if (n->mSimplifiedIndex > 0) - std::cout << "[" << dump_id << ":" << n->mSimplifiedIndex<< "] "; - else - std::cout << "[" << dump_id << "] "; + std::cout << "[" << dump_id << ":" << n->GetChildIndex() << "] "; if (n->IsToken()) { - std::cout << "Token" << std::endl; + n->mData.mToken->Dump(); } else { RuleTable *t = n->GetTable(); std::cout << "Table " << GetRuleTableName(t) << "@" << n->GetStartIndex() << ": "; @@ -1856,10 +2408,9 @@ void Parser::DumpSortOutNode(AppealNode *n) { if (n->mResult == SuccWasSucc) std::cout << "WasSucc"; - std::vector::iterator it = n->mSortedChildren.begin(); - for (; it != n->mSortedChildren.end(); it++) { + for (unsigned i = 0; i < n->mSortedChildren.GetNum(); i++) { std::cout << seq_num << ","; - to_be_dumped.push_back(*it); + to_be_dumped.push_back(n->mSortedChildren.ValueAtIndex(i)); to_be_dumped_id.push_back(seq_num++); } std::cout << std::endl; @@ -1905,9 +2456,8 @@ void Parser::FindWasSucc(AppealNode *root) { std::cout << "a token?" << std::endl; } } else { - std::vector::iterator it = node->mSortedChildren.begin(); - for (; it != node->mSortedChildren.end(); it++) - working_list.push_back(*it); + for (unsigned i = 0; i < node->mSortedChildren.GetNum(); i++) + working_list.push_back(node->mSortedChildren.ValueAtIndex(i)); } } return; @@ -1950,7 +2500,7 @@ void Parser::FindPatchingNodes() { // This is another entry point of sort, similar as SortOut(). // The only difference is we use 'reference' as the refrence of final match. void Parser::SupplementalSortOut(AppealNode *root, AppealNode *reference) { - MASSERT(root->mSortedChildren.size()==0 && "root should be un-sorted."); + MASSERT(root->mSortedChildren.GetNum()==0 && "root should be un-sorted."); MASSERT(root->IsTable() && "root should be a table node."); // step 1. Find the last matching token index we want. @@ -2010,8 +2560,8 @@ void Parser::PatchWasSucc(AppealNode *root) { // it's the original tree. We don't want to mess it up. Think about it, if you // copy the mChildren to was_succ, there are duplicated tree nodes. This violates // the definition of the original tree. - for (unsigned j = 0; j < patch->mSortedChildren.size(); j++) - was_succ->AddSortedChild(patch->mSortedChildren[j]); + for (unsigned j = 0; j < patch->mSortedChildren.GetNum(); j++) + was_succ->AddSortedChild(patch->mSortedChildren.ValueAtIndex(j)); } } @@ -2031,7 +2581,7 @@ void Parser::PatchWasSucc(AppealNode *root) { void Parser::SimplifySortedTree() { // start with the only child of mRootNode. std::deque working_list; - working_list.push_back(mRootNode->mSortedChildren[0]); + working_list.push_back(mRootNode->mSortedChildren.ValueAtIndex(0)); while(!working_list.empty()) { AppealNode *node = working_list.front(); @@ -2043,14 +2593,13 @@ void Parser::SimplifySortedTree() { continue; node = SimplifyShrinkEdges(node); - std::vector::iterator it = node->mSortedChildren.begin(); - for (; it != node->mSortedChildren.end(); it++) { - working_list.push_back(*it); + for (unsigned i = 0; i < node->mSortedChildren.GetNum(); i++) { + working_list.push_back(node->mSortedChildren.ValueAtIndex(i)); } } if (mTraceSortOut) - DumpSortOut(mRootNode->mSortedChildren[0], "Simplify AppealNode Trees"); + DumpSortOut(mRootNode->mSortedChildren.ValueAtIndex(0), "Simplify AppealNode Trees"); } // Reduce an edge is (1) Pred has only one succ @@ -2069,31 +2618,31 @@ AppealNode* Parser::SimplifyShrinkEdges(AppealNode *node) { while(1) { // step 1. Check condition (1) (2) - if (node->mSortedChildren.size() != 1) + if (node->mSortedChildren.GetNum() != 1) break; - AppealNode *child = node->mSortedChildren[0]; + AppealNode *child = node->mSortedChildren.ValueAtIndex(0); // step 2. Find out the index of child, through looking into sub-ruletable or token. - // At this point, there is only one sorted child. - unsigned child_index; - bool found = node->GetSortedChildIndex(child, child_index); - if (!found) { - // There is one case where it cannot find child_index. In the left recursion - // parsing, each instance is connected to its previous one through the lead node. - // The connected two nodes are both lead rule table. We need remove one of them. - // - // In this case we don't worry about action since one of them is kept and the - // actions are kept actually. - RuleTable *rt_p = node->GetTable(); - RuleTable *rt_c = child->GetTable(); - MASSERT((rt_p == rt_c)); - MASSERT(mRecursionAll.IsLeadNode(rt_p)); - } else { + // There is one case where it cannot find child_index. In the left recursion + // parsing, each instance is connected to its previous one through the lead node. + // The connected two nodes are both lead rule table. We need remove one of them. + // + // In this case we don't worry about action since one of them is kept and the + // actions are kept actually. + + bool skip = false; + RuleTable *rt_p = node->GetTable(); + RuleTable *rt_c = child->GetTable(); + if (rt_p == rt_c && mRecursionAll.IsLeadNode(rt_p)) + skip = true; + + unsigned child_index = child->GetChildIndex(); + if (!skip) { // step 3. check condition (3) // [NOTE] in RuleAction, element index starts from 1. RuleTable *rt = node->GetTable(); - bool has_action = RuleActionHasElem(rt, child_index); + bool has_action = RuleActionHasElem(rt, child_index + 1); if (has_action) break; } @@ -2103,14 +2652,8 @@ AppealNode* Parser::SimplifyShrinkEdges(AppealNode *node) { AppealNode *parent = node->GetParent(); parent->ReplaceSortedChild(node, child); - // 1. mRootNode won't have RuleAction, so the index is never used. - // 2. 'index' just need be calculated once, at the first ancestor which is 'node' - // transferred into this function. - if (parent != mRootNode && index == 0) { - found = parent->GetSortedChildIndex(node, index); - MASSERT(found && "Could not find child index?"); - } - child->mSimplifiedIndex = index; + index = node->GetChildIndex(); + child->SetChildIndex(index); // step 5. keep going node = child; @@ -2123,20 +2666,20 @@ AppealNode* Parser::SimplifyShrinkEdges(AppealNode *node) { // Build the AST //////////////////////////////////////////////////////////////////////////////////// -ASTTree* Parser::BuildAST() { - ASTTree *tree = new ASTTree(); +TreeNode* Parser::BuildAST() { + mLineModeRoot = NULL; + mNormalModeRoot = NULL; std::stack appeal_stack; - appeal_stack.push(mRootNode->mSortedChildren[0]); + appeal_stack.push(mRootNode->mSortedChildren.ValueAtIndex(0)); // 1) If all children done. Time to create tree node for 'appeal_node' // 2) If some are done, some not. Add the first not-done child to stack while(!appeal_stack.empty()) { AppealNode *appeal_node = appeal_stack.top(); bool children_done = true; - std::vector::iterator it = appeal_node->mSortedChildren.begin(); - for (; it != appeal_node->mSortedChildren.end(); it++) { - AppealNode *child = *it; + for (unsigned i = 0; i < appeal_node->mSortedChildren.GetNum(); i++) { + AppealNode *child = appeal_node->mSortedChildren.ValueAtIndex(i); if (!child->AstCreated()) { appeal_stack.push(child); children_done = false; @@ -2147,24 +2690,219 @@ ASTTree* Parser::BuildAST() { if (children_done) { // Create tree node when there is a rule table, or meanful tokens. MASSERT(!appeal_node->GetAstTreeNode()); - TreeNode *sub_tree = tree->NewTreeNode(appeal_node); + TreeNode *sub_tree = NewTreeNode(appeal_node); if (sub_tree) { appeal_node->SetAstTreeNode(sub_tree); - // mRootNode is overwritten each time until the last one which is + // mNormalModeRoot is overwritten each time until the last one which is // the real root node. - tree->mRootNode = sub_tree; + mNormalModeRoot = sub_tree; } - // pop out the 'appeal_node' appeal_node->SetAstCreated(); appeal_stack.pop(); } } - if (!tree->mRootNode) - MERROR("We got a statement failed to create AST!"); + // The tree could be an empty statement like: ; + + if (mLineMode) + mLineModeRoot = mNormalModeRoot; + + return mNormalModeRoot; +} + +// Create tree node. Its children have been created tree nodes. +// There are couple issueshere. +// +// 1. An sorted AppealNode could have NO tree node, because it may have NO RuleAction to +// create the sub tree. This happens if the RuleTable is just a temporary intermediate +// table created by Autogen, or its rule is just ONEOF without real syntax. Here +// is an example. +// +// The AST after BuildAST() for a simple statment: c=a+b; +// +// ======= Simplify Trees Dump SortOut ======= +// [1] Table TblExpressionStatement@0: 2,3, +// [2:1] Table TblAssignment@0: 4,5,6, +// [3] Token +// [4:1] Token +// [5:2] Token +// [6:3] Table TblArrayAccess_sub1@2: 7,8, <-- supposed to get a binary expression +// [7:1] Token <-- a +// [8:2] Table TblUnaryExpression_sub1@3: 9,10, <-- +b +// [9] Token +// [10:2] Token +// +// Node [1] won't have a tree node at all since it has no Rule Action attached. +// Node [6] won't have a tree node either. +// +// 2. A binary operation like a+b could be parsed as (1) expression: a, and (2) a +// unary operation: +b. This is because we parse them in favor to ArrayAccess before +// Binary Operation. Usually to handle this issue, in some system like ANTLR, +// they require you to list the priority, by writing rules from higher priority to +// lower priority. +// +// We are going to do a consolidation of the sub-trees, by converting smaller trees +// to a more compact bigger trees. However, to do this we want to set some rules. +// *) The parent AppealNode of these sub-trees has no tree node. So the conversion +// helps make the tree complete. + +TreeNode* Parser::NewTreeNode(AppealNode *appeal_node) { + TreeNode *sub_tree = NULL; + + if (appeal_node->IsToken()) { + sub_tree = mASTBuilder->CreateTokenTreeNode(appeal_node->GetToken()); + return sub_tree; + } + + RuleTable *rule_table = appeal_node->GetTable(); + + for (unsigned i = 0; i < rule_table->mNumAction; i++) { + Action *action = rule_table->mActions + i; + mASTBuilder->mActionId = action->mId; + mASTBuilder->ClearParams(); + + for (unsigned j = 0; j < action->mNumElem; j++) { + // find the appeal node child + unsigned elem_idx = action->mElems[j]; + AppealNode *child = appeal_node->GetSortedChild(elem_idx - 1); + Param p; + p.mIsEmpty = true; + // There are 3 cases to handle. + // 1. child is token, we pass the token to param. + // 2. child is a sub appeal tree, but has no legal AST tree. For example, + // a parameter list: '(' + param-lists + ')'. + // if param-list is empty, it has no AST tree. + // In this case, we sset mIsEmpty to true. + // 3. chidl is a sub appeal tree, and has a AST tree too. + if (child) { + TreeNode *tree_node = child->GetAstTreeNode(); + if (!tree_node) { + if (child->IsToken()) { + p.mIsEmpty = false; + p.mIsTreeNode = false; + p.mData.mToken = child->GetToken(); + } + } else { + p.mIsEmpty = false; + p.mIsTreeNode = true; + p.mData.mTreeNode = tree_node; + } + } + mASTBuilder->AddParam(p); + } + + // For multiple actions of a rule, there should be only action which create tree. + // The others are just for adding attribute or else, and return the same tree + // with additional attributes. + sub_tree = mASTBuilder->Build(); + } + + if (sub_tree) + return sub_tree; + + // It's possible that the Rule has no action, meaning it cannot create tree node. + // Now we have to do some manipulation. Please check if you need all of them. + sub_tree = Manipulate(appeal_node); + + // It's possible that the sub tree is actually empty. For example, in a Parameter list + // ( params ). If 'params' is empty, it returns NULL. + + return sub_tree; +} + +// It's possible that we get NULL tree. +TreeNode* Parser::Manipulate(AppealNode *appeal_node) { + TreeNode *sub_tree = NULL; + + std::vector child_trees; + for (unsigned i = 0; i < appeal_node->mSortedChildren.GetNum(); i++) { + AppealNode *a_node = appeal_node->mSortedChildren.ValueAtIndex(i); + TreeNode *t_node = a_node->GetAstTreeNode(); + if (t_node) + child_trees.push_back(t_node); + } + + // If we have one and only one child's tree node, we take it. + if (child_trees.size() == 1) { + sub_tree = child_trees[0]; + if (sub_tree) + return sub_tree; + else + MERROR("We got a broken AST tree, not connected sub tree."); + } + + // For the tree having two children, there are a few approaches to further + // manipulate them in order to obtain better AST. + // + // 1. There are cases like (type)value, but they are not recoganized as cast. + // Insteand they are seperated into two nodes, one is (type), the other value. + // So we define ParenthesisNode for (type), and build a CastNode over here. + // + // 2. There are cases like a+b could be parsed as "a" and "+b", a symbol and a + // unary operation. However, we do prefer binary operation than unary. So a + // combination is needed here, especially when the parent node is NULL. + if (child_trees.size() == 2) { + TreeNode *child_a = child_trees[0]; + TreeNode *child_b = child_trees[1]; + + sub_tree = Manipulate2Cast(child_a, child_b); + if (sub_tree) + return sub_tree; + } + + // In the end, if we still have no suitable solution to create the tree, + // we will put subtrees into a PassNode to pass to parent. + if (child_trees.size() > 0) { + PassNode *pass = (PassNode*)BuildPassNode(); + std::vector::iterator child_it = child_trees.begin(); + for (; child_it != child_trees.end(); child_it++) + pass->AddChild(*child_it); + return pass; + } + + // It's possible that we get a Null tree. + return sub_tree; +} - return tree; +TreeNode* Parser::Manipulate2Cast(TreeNode *child_a, TreeNode *child_b) { + if (child_a->IsParenthesis()) { + ParenthesisNode *type = (ParenthesisNode*)child_a; + CastNode *n = (CastNode*)gTreePool.NewTreeNode(sizeof(CastNode)); + new (n) CastNode(); + n->SetDestType(type->GetExpr()); + n->SetExpr(child_b); + return n; + } + return NULL; +} + +TreeNode* Parser::Manipulate2Binary(TreeNode *child_a, TreeNode *child_b) { + if (child_b->IsUnaOperator()) { + UnaOperatorNode *unary = (UnaOperatorNode*)child_b; + unsigned property = GetOperatorProperty(unary->GetOprId()); + if ((property & Binary) && (property & Unary)) { + std::cout << "Convert unary --> binary" << std::endl; + TreeNode *unary_sub = unary->GetOpnd(); + TreeNode *binary = BuildBinaryOperation(child_a, unary_sub, unary->GetOprId()); + return binary; + } + } + return NULL; +} + +TreeNode* Parser::BuildBinaryOperation(TreeNode *childA, TreeNode *childB, OprId id) { + BinOperatorNode *n = (BinOperatorNode*)gTreePool.NewTreeNode(sizeof(BinOperatorNode)); + new (n) BinOperatorNode(id); + n->SetOpndA(childA); + n->SetOpndB(childB); + return n; +} + +TreeNode* Parser::BuildPassNode() { + PassNode *n = (PassNode*)gTreePool.NewTreeNode(sizeof(PassNode)); + new (n) PassNode(); + return n; } //////////////////////////////////////////////////////////////////////////// @@ -2280,8 +3018,8 @@ bool SuccMatch::IsDone() { void AppealNode::AddParent(AppealNode *p) { if (!mParent || mParent->IsPseudo()) mParent = p; - else - mSecondParents.PushBack(p); + //else + // mSecondParents.PushBack(p); return; } @@ -2324,47 +3062,11 @@ void AppealNode::CopyMatch(AppealNode *another) { mResult = another->mResult; } -// return true if 'parent' is a parent of this. -bool AppealNode::DescendantOf(AppealNode *parent) { - AppealNode *node = mParent; - while (node) { - if (node == parent) - return true; - node = node->mParent; - } - return false; -} - -// Returns true, if both nodes are successful and match the same tokens -// with the same rule table -bool AppealNode::SuccEqualTo(AppealNode *other) { - if (IsSucc() && other->IsSucc() && mStartIndex == other->GetStartIndex()) { - if (IsToken() && other->IsToken()) { - return GetToken() == other->GetToken(); - } else if (IsTable() && other->IsTable()) { - return GetTable() == other->GetTable(); - } - } - return false; -} - -void AppealNode::RemoveChild(AppealNode *child) { - std::vector temp_vector; - std::vector::iterator it = mChildren.begin(); - for (; it != mChildren.end(); it++) { - if (*it != child) - temp_vector.push_back(*it); - } - - mChildren.clear(); - mChildren.assign(temp_vector.begin(), temp_vector.end()); -} - void AppealNode::ReplaceSortedChild(AppealNode *existing, AppealNode *replacement) { unsigned index; bool found = false; - for (unsigned i = 0; i < mSortedChildren.size(); i++) { - if (mSortedChildren[i] == existing) { + for (unsigned i = 0; i < mSortedChildren.GetNum(); i++) { + if (mSortedChildren.ValueAtIndex(i) == existing) { index = i; found = true; break; @@ -2372,126 +3074,32 @@ void AppealNode::ReplaceSortedChild(AppealNode *existing, AppealNode *replacemen } MASSERT(found && "ReplaceSortedChild could not find existing node?"); - mSortedChildren[index] = replacement; + *(mSortedChildren.RefAtIndex(index)) = replacement; replacement->SetParent(this); } -// Returns true : if successfully found the index. -// [NOTE] This is the index in the Rule Spec description, which are used in the -// building of AST. So remember it starts from 1. -// -// The AppealNode tree has many messy nodes generated during second try, or others. -// It's not a good idea to find the index through the tree. The final real solution -// is to go through the RuleTable and locate the child's index. -bool AppealNode::GetSortedChildIndex(AppealNode *child, unsigned &index) { - bool found = false; - MASSERT(IsTable() && "Parent node is not a RuleTable"); - RuleTable *rule_table = GetTable(); - - // In SimplifyShrinkEdge, the tree could be simplified and a node could be given an index - // to his ancestor. - if (child->mSimplifiedIndex != 0) { - index = child->mSimplifiedIndex; - return true; - } - - // If the edge is not shrinked, we just look into the rule tabls or tokens. - for (unsigned i = 0; i < rule_table->mNum; i++) { - TableData *data = rule_table->mData + i; - switch (data->mType) { - case DT_Token: { - Token *t = &gSystemTokens[data->mData.mTokenId]; - if (child->IsToken() && child->GetToken() == t) { - found = true; - index = i+1; - } - break; - } - case DT_Subtable: { - RuleTable *t = data->mData.mEntry; - if (t == &TblIdentifier) { - if (child->IsToken()) { - Token *token = child->GetToken(); - if (token->IsIdentifier()) { - found = true; - index = i+1; - } - } - } else if (t == &TblLiteral) { - if (child->IsToken()) { - Token *token = child->GetToken(); - if (token->IsLiteral()) { - found = true; - index = i+1; - } - } - } else if (child->IsTable() && child->GetTable() == t) { - found = true; - index = i+1; - } - break; - } - case DT_String: - case DT_Char: - break; - default: - MASSERT(0 && "Unknown entry in TableData"); - break; - } - } - - return found; -} - -AppealNode* AppealNode::GetSortedChildByIndex(unsigned index) { - std::vector::iterator it = mSortedChildren.begin(); - for (; it != mSortedChildren.end(); it++) { - AppealNode *child = *it; - unsigned id = 0; - bool found = GetSortedChildIndex(child, id); - MASSERT(found && "sorted child has no index.."); +AppealNode* AppealNode::GetSortedChild(unsigned index) { + for (unsigned i = 0; i < mSortedChildren.GetNum(); i++) { + AppealNode *child = mSortedChildren.ValueAtIndex(i); + unsigned id = child->GetChildIndex(); if (id == index) return child; } return NULL; } -// Look for a specific un-sorted child having the ruletable/token and match. -// There could be multiple, but we return the first good one. -AppealNode* AppealNode::FindSpecChild(TableData *tdata, unsigned match) { +// Look for a specific un-sorted child having the child index and match. +AppealNode* AppealNode::FindIndexedChild(unsigned match, unsigned index) { AppealNode *ret_child = NULL; - - std::vector::iterator it = mChildren.begin(); - for (; it != mChildren.end(); it++) { - AppealNode *child = *it; - if (child->IsSucc() && child->FindMatch(match)) { - switch (tdata->mType) { - case DT_Subtable: { - RuleTable *child_rule = tdata->mData.mEntry; - if (child->IsTable() && child->GetTable() == child_rule) - ret_child = child; - // Literal and Identifier are treated as token. - if (child->IsToken() && (child_rule == &TblLiteral || child_rule == &TblIdentifier)) - ret_child = child; - break; - } - case DT_Token: { - Token *token = &gSystemTokens[tdata->mData.mTokenId]; - if ( child->IsToken() && - ((child->GetToken() == token) || (child->mAltToken == token))) - ret_child = child; - break; - } - case DT_Char: - case DT_String: - case DT_Type: - case DT_Null: - default: - break; - } + for (unsigned i = 0; i < mChildren.GetNum(); i++) { + AppealNode *child = mChildren.ValueAtIndex(i); + if (child->IsSucc() && + child->FindMatch(match) && + (index == child->GetChildIndex())) { + ret_child = child; + break; } } - return ret_child; } diff --git a/src/MapleFE/shared/src/parser_rec.cpp b/src/MapleFE/shared/src/parser_rec.cpp index d5fe65357bf228b316554bd64741c7c55d291d99..615b78d63a065ed6bb1798dcee738794a1654ae6 100644 --- a/src/MapleFE/shared/src/parser_rec.cpp +++ b/src/MapleFE/shared/src/parser_rec.cpp @@ -21,7 +21,7 @@ #include "parser.h" #include "parser_rec.h" #include "ruletable_util.h" -#include "gen_summary.h" +#include "rule_summary.h" namespace maplefe { @@ -182,7 +182,7 @@ bool RecursionTraversal::ConnectPrevious(AppealNode *curr_node) { // will handle the multiple parents issue. curr_node->AddChild(prev_lead); prev_lead->AddParent(curr_node); - + // there should be only one match. MASSERT(!found); found = true; @@ -217,8 +217,12 @@ bool RecursionTraversal::FindInstances() { mVisitedRecursionNodes.Clear(); mLeadNodes.Clear(); - // Find the instance + // Find the instance. + // Remember to reset mEndOfFile since the prev instance could reach the end of file + // We need start from the beginning. mParser->mCurToken = saved_mCurToken; + if (mParser->mEndOfFile) + mParser->mEndOfFile = false; temp_found = FindRestInstance(); } @@ -251,9 +255,9 @@ bool RecursionTraversal::FindFirstInstance() { // Appealing of the mistaken Fail nodes. // - // This is for appealing those affected by the 1st appearance - // of 1st instance which returns false. 1stOf1st is not add to WasFail, but - // those affected will be added to WasFail. + // This is for appealing those affected by the 2nd appearance + // of 2nd instance which returns false. 2ndOf1st is not add to WasFail, but + // those affected will be AddFailed(). // // I still keep an assertion in TraverseRuleTablePre() when it has SuccMatch, // asserting !WasFail. But I believe there are still WasFail at the same time. @@ -281,7 +285,6 @@ bool RecursionTraversal::FindRestInstance() { AppealNode *lead = new AppealNode(); lead->SetStartIndex(mStartToken); lead->SetTable(mRuleTable); - mParser->mAppealNodes.push_back(lead); AddLeadNode(lead); AddVisitedLeadNode(mRuleTable); diff --git a/src/MapleFE/shared/src/recursion.cpp b/src/MapleFE/shared/src/recursion.cpp index dc08a2d1a283582ea43824d381d14ee87ed4b7dd..c67f4e88ccd95427ea286ce43368c5c4d38e1652 100644 --- a/src/MapleFE/shared/src/recursion.cpp +++ b/src/MapleFE/shared/src/recursion.cpp @@ -20,8 +20,7 @@ ///////////////////////////////////////////////////////////////////////////////////// #include "recursion.h" -#include "gen_summary.h" -#include "gen_token.h" +#include "rule_summary.h" #include "token.h" namespace maplefe { @@ -275,7 +274,7 @@ void Recursion::FindFronNodes(unsigned circle_index) { case ET_Oneof: { // Look into every childof 'prev'. If it's not 'next' and // not in 'mRecursionNodes', it's a FronNode. - // + // // [NOTE] This is a per circle algorithm. So a FronNode found here could // be a recursion node in another circle. // Actually if it's Recursion node, we can still include it as FronNode, @@ -290,7 +289,7 @@ void Recursion::FindFronNodes(unsigned circle_index) { fnode.mData.mToken = &gSystemTokens[data->mData.mTokenId]; fron_nodes->PushBack(fnode); //std::cout << " Token " << data->mData.mToken->GetName() << std::endl; - } else if (data->mType = DT_Subtable) { + } else if (data->mType == DT_Subtable) { RuleTable *ruletable = data->mData.mEntry; bool found = IsRecursionNode(ruletable); if (!found && (ruletable != next)) { diff --git a/src/MapleFE/shared/src/ruletable_util.cpp b/src/MapleFE/shared/src/ruletable_util.cpp index 3b6049e2430cfbd3e7cd29d0065d2c57c224a0f5..65dc85557fe634d3dece1f2f5ae82b1f5d576f18 100644 --- a/src/MapleFE/shared/src/ruletable_util.cpp +++ b/src/MapleFE/shared/src/ruletable_util.cpp @@ -16,9 +16,8 @@ #include "ruletable_util.h" #include "lexer.h" -#include "lang_spec.h" #include "massert.h" -#include "common_header_autogen.h" +#include "rule_summary.h" #include "container.h" namespace maplefe { @@ -40,7 +39,7 @@ SepId FindSeparator(const char *str, const char c, unsigned &len) { text = c; unsigned i = 0; - for (; i < SEP_NA; i++) { + for (; i < SepTableSize; i++) { SepTableEntry e = SepTable[i]; if (!strncmp(text.c_str(), e.mText, strlen(e.mText))) { len = strlen(e.mText); @@ -63,7 +62,7 @@ OprId FindOperator(const char *str, const char c, unsigned &len) { text = c; unsigned i = 0; - for (; i < OPR_NA; i++) { + for (; i < OprTableSize; i++) { OprTableEntry e = OprTable[i]; if (!strncmp(text.c_str(), e.mText, strlen(e.mText))) { len = strlen(e.mText); @@ -100,12 +99,20 @@ const char* FindKeyword(const char *str, const char c, unsigned &len) { // Returns true : The rule actions in 'table' involves i-th element // [NOTE] i starts from 1. +// +// If there is an action which has no elem as its argument. It could +// take all element, or any number of element. In this case, we think +// it HasElem. Please look at typescript's stmt.spec. +// rule JSIdentifier: is a good example. +// bool RuleActionHasElem(RuleTable *table, unsigned target_idx) { for (unsigned i = 0; i < table->mNumAction; i++) { Action *act = table->mActions + i; + if (act->mNumElem == 0) + return true; for (unsigned j = 0; j < act->mNumElem; j++) { unsigned index = act->mElems[j]; - if (index = target_idx) + if (index == target_idx) return true; } } diff --git a/src/MapleFE/shared/src/stringmap.cpp b/src/MapleFE/shared/src/stringmap.cpp index 00103b1d35155b00cd0021371cf6b1b935d18b0d..4327e40f7d99d464907d6c7d18599e14d7cca561 100644 --- a/src/MapleFE/shared/src/stringmap.cpp +++ b/src/MapleFE/shared/src/stringmap.cpp @@ -40,17 +40,17 @@ StringMap::~StringMap() { temp = entry->Next; delete entry; entry = temp; - } + } } delete [] mBuckets; } void StringMap::Init(unsigned Num) { - MASSERT((Num & (Num-1)) == 0 && - "Init Size must be a power of 2 or zero!"); - mNumBuckets = Num ? Num : DEFAULT_BUCKETS_NUM; - + MASSERT((Num & (Num-1)) == 0 && + "Init Size must be a power of 2 or zero!"); + mNumBuckets = Num ? Num : DEFAULT_BUCKETS_NUM; + mBuckets = new StringMapEntry[mNumBuckets]; StringMapEntry *E = mBuckets; for (unsigned i = 0; i < mNumBuckets; i++, E++) { @@ -60,20 +60,20 @@ void StringMap::Init(unsigned Num) { } // Get the bucket no for 'S'. -unsigned StringMap::BucketNoFor(const std::string &S) { - unsigned HTSize = mNumBuckets; - if (HTSize == 0) { // Hash table unallocated so far? - Init(DEFAULT_BUCKETS_NUM); - HTSize = mNumBuckets; - } - unsigned FullHashValue = HashString(S); - unsigned BucketNo = FullHashValue & (HTSize-1); - return BucketNo; -} +unsigned StringMap::BucketNoFor(const std::string &S) { + unsigned HTSize = mNumBuckets; + if (HTSize == 0) { // Hash table unallocated so far? + Init(DEFAULT_BUCKETS_NUM); + HTSize = mNumBuckets; + } + unsigned FullHashValue = HashString(S); + unsigned BucketNo = FullHashValue & (HTSize-1); + return BucketNo; +} // Look up to find the address in the string pool of 'S'. // If 'S' is not in the string pool, insert it. -char* StringMap::LookupAddrFor(const std::string &S) { +StringMapEntry *StringMap::LookupEntryFor(const std::string &S) { unsigned BucketNo = BucketNoFor(S); StringMapEntry *E = &mBuckets[BucketNo]; @@ -82,32 +82,37 @@ char* StringMap::LookupAddrFor(const std::string &S) { if (E && !E->Addr && !E->Next) { char *addr = mPool->Alloc(S); E->Addr = addr; - return addr; + E->StrIdx = mPool->mStringTable.size(); + mPool->mStringTable.push_back(addr); + return E; } - + while (E && E->Addr) { - if (S.compare(E->Addr) == 0) - return E->Addr; + if (S.compare(E->Addr) == 0) { + return E; + } E = E->Next; } - - // We cannot find an existing string for 'S'. Need to allocate - char *Addr = mPool->Alloc(S); - InsertEntry(Addr, BucketNo); - return Addr; -} + // We cannot find an existing string for 'S'. Need to allocate + char *addr = mPool->Alloc(S); + unsigned idx = mPool->mStringTable.size(); + mPool->mStringTable.push_back(addr); + E = InsertEntry(addr, idx, BucketNo); + return E; +} // Add a new entry in 'bucket'. // 'addr' is the address in the string pool -void StringMap::InsertEntry(char *addr, unsigned bucket) { +StringMapEntry *StringMap::InsertEntry(char *addr, unsigned idx, unsigned bucket) { StringMapEntry *E = &mBuckets[bucket]; while (E->Next) { E = E->Next; } - StringMapEntry *NewEnt = new StringMapEntry(addr); + StringMapEntry *NewEnt = new StringMapEntry(addr, idx); E->Next = NewEnt; + return NewEnt; } } diff --git a/src/MapleFE/shared/src/stringpool.cpp b/src/MapleFE/shared/src/stringpool.cpp index eb5d6b6861ee0531272edd233e1e58c55078ab9b..3b6e292486e4199e33c3347bc6bc941298d08b41 100644 --- a/src/MapleFE/shared/src/stringpool.cpp +++ b/src/MapleFE/shared/src/stringpool.cpp @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -20,10 +20,10 @@ #include #include +#include #include "stringpool.h" #include "stringmap.h" -#include "massert.h" namespace maplefe { @@ -32,9 +32,14 @@ namespace maplefe { StringPool gStringPool; StringPool::StringPool() { + mUseAltStr = false; mMap = new StringMap(); mMap->SetPool(this); mFirstAvail = -1; + // make string idx starting from 1 + mStringTable.push_back(""); + // empty string idx is 1 + mStringTable.push_back(""); } StringPool::~StringPool() { @@ -45,7 +50,16 @@ StringPool::~StringPool() { char *addr = block.Addr; free(addr); } - + mStringTable.clear(); + + // Release the long strings + std::vector::iterator long_it; + for (long_it = mLongStrings.begin(); long_it != mLongStrings.end(); long_it++) { + char *addr = *long_it; + free(addr); + } + mLongStrings.clear(); + // Release the StringMap delete mMap; } @@ -57,11 +71,14 @@ char* StringPool::Alloc(const std::string &s) { // 's' must guarantee to end with NULL char* StringPool::Alloc(const char *s) { size_t size = strlen(s) + 1; + char *addr = NULL; if (size > BLOCK_SIZE) { - MERROR ("Requsted size is bigger than block size"); + addr = (char*)malloc(size); + mLongStrings.push_back(addr); + } else { + addr = Alloc(size); } - char *addr = Alloc(size); MASSERT (addr && "StringPool failed to alloc for string"); strncpy(addr, s, size - 1); @@ -114,24 +131,151 @@ char* StringPool::AllocBlock() { } // This is the public interface to find a string in the pool. -// If not found, add it. +// If not found, allocate in the pool and save in the map. const char* StringPool::FindString(const std::string &s) { - return mMap->LookupAddrFor(s); + return mMap->LookupEntryFor(s)->GetAddr(); } // This is the public interface to find a string in the pool. -// If not found, add it. +// If not found, allocate in the pool and save in the map. const char* StringPool::FindString(const char *str) { std::string s(str); - return mMap->LookupAddrFor(s); + return mMap->LookupEntryFor(s)->GetAddr(); } // This is the public interface to find a string in the pool. -// If not found, add it. +// If not found, allocate in the pool and save in the map. const char* StringPool::FindString(const char *str, size_t len) { std::string s; s.assign(str, len); - return mMap->LookupAddrFor(s); + return mMap->LookupEntryFor(s)->GetAddr(); +} + +// This is the public interface to find a string in the pool. +// If not found, allocate in the pool and save in the map. +unsigned StringPool::GetStrIdx(const std::string &s) { + if (s.empty()) return 1; + return mMap->LookupEntryFor(s)->GetStrIdx(); +} + +// This is the public interface to find a string in the pool. +// If not found, allocate in the pool and save in the map. +unsigned StringPool::GetStrIdx(const char *str) { + if (strlen(str) == 0) return 1; + std::string s(str); + return mMap->LookupEntryFor(s)->GetStrIdx(); +} + +// This is the public interface to find a string in the pool. +// If not found, allocate in the pool and save in the map. +unsigned StringPool::GetStrIdx(const char *str, size_t len) { + if (len == 0) return 1; + std::string s; + s.assign(str, len); + return mMap->LookupEntryFor(s)->GetStrIdx(); +} + +const char *StringPool::GetStringFromStrIdx(unsigned idx) { + MASSERT(idx < mStringTable.size() && "string index out of range"); + if (mUseAltStr) { + if (mAltStrIdxMap.find(idx) != mAltStrIdxMap.end()) { + idx = mAltStrIdxMap[idx]; + } + } + return mStringTable[idx]; +} + +// This is the public interface to setup AltStrIdxMap used for obfuscation +// a name is mapped to a fixed length random unused name. +// starting from 2-letter names, [a-zA-Z] [a-zA-Z], which will cover over 2K names +// AA Aa AB Ab, ...., zz +// if not enough will extend to use 3-letter or 4-letter for over 7 million names +void StringPool::SetAltStrIdxMap() { + // starting from 2-letter names + unsigned len = 2; + bool done = false; + + // names use [A-Z] and [a-z] total 52 letters + int k = 52; + + // total number of names can be handled for len = 4, 3, 2, 1 respectively + int Size[4] = {k*k*k*k, k*k*k, k*k, k}; + + // names, trailing '\0' + char A[5] = {0, 0, 0, 0, 0}; + + // names already encounted, either existing name or new names + std::unordered_set used; + + for (auto stridx : mAltStrIdxSet) { + done = false; + while (!done) { + unsigned offset = 4 - len; + int mod = Size[offset]; + + int n = rand(); + int r = n % mod; + + // check if already encounted + if (used.find(r) != used.end()) { + // expand to use one more leter if close to limit + if (used.size() > mod - Size[offset + 1]) { + len++; + MASSERT(len < 5 && "Need more names"); + } + continue; + } + + // have un-encounted name + used.insert(r); + + int q; + bool odd; + int i = 0; + while (i < len - 1) { + mod = Size[offset + 1 + i]; + q = r / mod; + r = r % mod; + + // char, use upper case for odd number + odd = q%2; + A[i++] = (odd ? 'A' : 'a') + q/2; + } + + // last char, use upper case for odd number + odd = r%2; + A[i] = (odd ? 'A' : 'a') + r/2; + + unsigned size = GetSize(); + unsigned alt = GetStrIdx(A); + // make sure alt is a new string + if (alt == size) { + mAltStrIdxMap[stridx] = alt; + done = true; + } + } + } } + +void StringPool::Dump() { + std::cout << "===================== StringTable =====================" << std::endl; + for (unsigned idx = 1; idx < mStringTable.size(); idx++) { + std::cout << " " << idx << " : " << mStringTable[idx] << std::endl; + } +} + +void StringPool::DumpAlt() { + std::cout << "================= Alt String Map ======================" << std::endl; + unsigned count = 0; + for (auto stridx : mAltStrIdxSet) { + unsigned alt = mAltStrIdxMap[stridx]; + std::cout << "count #" << stridx + << " str " << GetStringFromStrIdx(stridx) + << " --> " + << " alt " << GetStringFromStrIdx(alt) + << std::endl; + } +} + } diff --git a/src/MapleFE/shared/src/stringutil.cpp b/src/MapleFE/shared/src/stringutil.cpp index a3d8a5163c4c2831d9cedcd050eab46ff44c4e5f..f524c99a65609df284a42e5d08f4485a56ea8297 100644 --- a/src/MapleFE/shared/src/stringutil.cpp +++ b/src/MapleFE/shared/src/stringutil.cpp @@ -175,42 +175,24 @@ Char StringToValue::StringToChar(std::string &str) { // When parser read it, it's just a plain text file. So it will see 6 characters actually. // The backslash is itself a character. // After parser read this string into buffer, you can see it in gdb is "test\\n". -// -// 4. StringToValue need convert the two characters into an escape character. -// This is what we are doing here. +// Lexer and Parser will keep what they saw. -const char* StringToValue::StringToString(std::string &str) { +const char* StringToValue::StringToString(std::string &in_str) { std::string target; - for (unsigned i = 0; i < str.size(); i++) { - char c = str[i]; - if ((c == '\\') && (i < str.size() - 1)) { - char c_next = str[i+1]; - char c_target = 0; - if (c_next == 'n') - c_target = '\n'; - else if (c_next == '\\') - c_target = '\\'; - else if (c_next == '\'') - c_target = '\''; - else if (c_next == '\"') - c_target = '\"'; - else if (c_next == 'b') - c_target = '\b'; - else if (c_next == 'f') - c_target = '\f'; - else if (c_next == 'r') - c_target = '\r'; - - if (c_target) { - target += c_target; - i++; - } - } else { - target += c; - } + + // For most languages, the input 'in_str' still contains the leading " or ' and the + // ending " or '. They need to be removed. + std::string str; + + // If empty string literal, return the empty 'target'. + if (in_str.size() == 2) { + const char *s = gStringPool.FindString(target); + return s; + } else { + str.assign(in_str, 1, in_str.size() - 2); } - const char *s = gStringPool.FindString(target); + const char *s = gStringPool.FindString(str); return s; } diff --git a/src/MapleFE/shared/src/token.cpp b/src/MapleFE/shared/src/token.cpp index 38141cea10e3ae09302063c6687fa4ff97b742d3..39b20a15fe084e9007cf60667d4b3233f7e86ce6 100644 --- a/src/MapleFE/shared/src/token.cpp +++ b/src/MapleFE/shared/src/token.cpp @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -13,8 +13,13 @@ * See the Mulan PSL v2 for more details. */ #include "token.h" +#include "stringpool.h" +#include "rule_summary.h" #include "massert.h" +#include +#include + namespace maplefe { #undef SEPARATOR @@ -29,7 +34,7 @@ const char* SeparatorTokenGetName(SepId id) { void SeparatorTokenDump(SepId id) { const char *name = SeparatorTokenGetName(id); - DUMP1("Separator Token: ", name); + DUMP1_NORETURN("Separator Token: ", name); return; } @@ -45,34 +50,34 @@ const char* OperatorTokenGetName(OprId id) { void OperatorTokenDump(OprId id) { const char *name = OperatorTokenGetName(id); - DUMP1("Operator Token: ", name); + DUMP1_NORETURN("Operator Token: ", name); return; } void LiteralTokenDump(LitData data) { switch (data.mType) { case LT_IntegerLiteral: - DUMP1("Integer Literal Token:", data.mData.mInt); + DUMP1_NORETURN("Integer Literal Token:", data.mData.mInt); break; case LT_FPLiteral: - DUMP1("Floating Literal Token:", data.mData.mFloat); + DUMP1_NORETURN("Floating Literal Token:", data.mData.mFloat); break; case LT_DoubleLiteral: - DUMP1("Double Literal Token:", data.mData.mDouble); + DUMP1_NORETURN("Double Literal Token:", data.mData.mDouble); break; case LT_BooleanLiteral: - DUMP1("Boolean Literal Token:", data.mData.mBool); + DUMP1_NORETURN("Boolean Literal Token:", data.mData.mBool); break; case LT_CharacterLiteral: { Char the_char = data.mData.mChar; if (the_char.mIsUnicode) - DUMP1("Char Literal Token(Unicode):", the_char.mData.mUniValue); + DUMP1_NORETURN("Char Literal Token(Unicode):", the_char.mData.mUniValue); else - DUMP1("Char Literal Token:", the_char.mData.mChar); + DUMP1_NORETURN("Char Literal Token:", the_char.mData.mChar); break; } case LT_StringLiteral: - DUMP1("String Literal Token:", data.mData.mStr); + DUMP1_NORETURN("String Literal Token:", gStringPool.GetStringFromStrIdx(data.mData.mStrIdx)); break; case LT_NullLiteral: DUMP0("Null Literal Token:"); @@ -104,19 +109,128 @@ void Token::Dump() { OperatorTokenDump(mData.mOprId); break; case TT_ID: - DUMP1("Identifier Token: ", mData.mName); + DUMP1_NORETURN("Identifier Token: ", mData.mName); break; case TT_KW: - DUMP1("Keyword Token: ", mData.mName); + DUMP1_NORETURN("Keyword Token: ", mData.mName); break; case TT_CM: DUMP0("Comment Token: "); break; + case TT_TL: + DUMP0("TemplateLiteral Token: "); + break; + case TT_RE: + DUMP1_NORETURN("RegExpr Token: ", mData.mRegExprData.mExpr); + if (mData.mRegExprData.mFlags) + DUMP1_NORETURN(" : ", mData.mRegExprData.mFlags); + break; case TT_LT: LiteralTokenDump(mData.mLitData); break; default: break; } + + DUMP1_NORETURN(" line: ", mLineNum); + DUMP1_NORETURN(" col: ", mColNum); + if (mLineBegin) + DUMP0_NORETURN(" line-first "); + if (mLineEnd) + DUMP0_NORETURN(" line-last "); + DUMP_RETURN(); } + +bool Token::Equal(Token *t) { + bool equal = false; + switch (mTkType) { + case TT_SP: + if (t->mTkType == TT_SP && + GetSepId() == t->GetSepId()) + equal = true; + break; + case TT_OP: + if (t->mTkType == TT_OP && + GetOprId() == t->GetOprId()) + equal = true; + break; + case TT_KW: + if (t->mTkType == TT_KW && + GetName() == t->GetName()) + equal = true; + break; + case TT_ID: + case TT_CM: + case TT_TL: + case TT_RE: + case TT_LT: + default: + break; + } + + return equal; +} + +/////////////////////////////////////////////////////////////////////////// +// Utilities for finding system tokens +// Remember the order of tokens are operators, separators, and keywords. +/////////////////////////////////////////////////////////////////////////// + +Token* FindOperatorToken(OprId id) { + Token *token = NULL; + bool found = false; + for (unsigned i = 0; i < gOperatorTokensNum; i++) { + token = &gSystemTokens[i]; + MASSERT(token->mTkType == TT_OP); + if (token->GetOprId() == id) { + found = true; + break; + } + } + MASSERT(found && token); + return token; +} + +Token* FindSeparatorToken(SepId id) { + Token *token = NULL; + bool found = false; + for (unsigned i = gOperatorTokensNum; i < gOperatorTokensNum + gSeparatorTokensNum; i++) { + token = &gSystemTokens[i]; + MASSERT(token->mTkType == TT_SP); + if (token->GetSepId() == id) { + found = true; + break; + } + } + MASSERT(found && token); + return token; +} + +// The caller of this function makes sure 'key' is already in the +// string pool of Lexer. +Token* FindKeywordToken(const char *key) { + Token *token = NULL; + bool found = false; + for (unsigned i = gOperatorTokensNum + gSeparatorTokensNum; + i < gOperatorTokensNum + gSeparatorTokensNum + gKeywordTokensNum; + i++) { + token = &gSystemTokens[i]; + MASSERT(token->mTkType == TT_KW); + if (strlen(key) == strlen(token->GetName()) && + !strncmp(key, token->GetName(), strlen(key))) { + found = true; + break; + } + } + MASSERT(found && token); + return token; +} + +// CommentToken is the last predefined token +Token* FindCommentToken() { + Token *token = &gSystemTokens[gSystemTokensNum - 1]; + MASSERT((token->mTkType == TT_CM) && "Last system token is not a comment token."); + return token; +} + } diff --git a/src/MapleFE/shared/src/tokenpool.cpp b/src/MapleFE/shared/src/tokenpool.cpp index 263e76a03ccf5e313500e6e5c9ca953c041fc817..89dfde73e94772499496049360b155e912cb9b3a 100644 --- a/src/MapleFE/shared/src/tokenpool.cpp +++ b/src/MapleFE/shared/src/tokenpool.cpp @@ -23,8 +23,15 @@ char* TokenPool::NewToken(unsigned size) { char *addr = mMemPool.Alloc(size); MASSERT(addr && "MemPool failed to alloc a token."); Token *token = (Token*)addr; + token->mAltTokens = NULL; + token->mLineNum = 0; + token->mColNum = 0; + token->mLineBegin = false; + token->mLineEnd = false; + mTokens.PushBack((Token*)addr); return addr; } + } diff --git a/src/MapleFE/shared/src/typetable.cpp b/src/MapleFE/shared/src/typetable.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8a1d8a93e96c53f2c8636eafbdebf5d22e352a57 --- /dev/null +++ b/src/MapleFE/shared/src/typetable.cpp @@ -0,0 +1,204 @@ +/* +* Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +////////////////////////////////////////////////////////////////////////////// +// // +// This file contains the implementation of string pool. // +// // +////////////////////////////////////////////////////////////////////////////// + +#include +#include + +#include "typetable.h" +#include "gen_astdump.h" + +namespace maplefe { + +TypeTable gTypeTable; + +TypeEntry::TypeEntry(TreeNode *node) { + mType = node; + if (!node->IsTypeIdNone()) { + mTypeId = node->GetTypeId(); + } else { + switch (node->GetKind()) { + case NK_Struct: + case NK_StructLiteral: + case NK_Class: + case NK_Interface: + mTypeId = TY_Class; + break; + case NK_ArrayLiteral: + mTypeId = TY_Array; + break; + case NK_UserType: + mTypeId = TY_User; + break; + default: + mTypeId = TY_None; + break; + } + } +} + +TreeNode *TypeTable::CreatePrimType(std::string name, TypeId tid) { + unsigned stridx = gStringPool.GetStrIdx(name); + PrimTypeNode *ptype = (PrimTypeNode*)gTreePool.NewTreeNode(sizeof(PrimTypeNode)); + new (ptype) PrimTypeNode(); + ptype->SetStrIdx(stridx); + ptype->SetPrimType(tid); + ptype->SetTypeId(tid); + + mTypeId2TypeMap[tid] = ptype; + return ptype; +} + +TreeNode *TypeTable::CreateBuiltinType(std::string name, TypeId tid) { + unsigned stridx = gStringPool.GetStrIdx(name); + IdentifierNode *id = (IdentifierNode*)gTreePool.NewTreeNode(sizeof(IdentifierNode)); + new (id) IdentifierNode(stridx); + // use TY_Class for Object type + (tid == TY_Object) ? id->SetTypeId(TY_Class) : id->SetTypeId(tid); + + UserTypeNode *utype = (UserTypeNode*)gTreePool.NewTreeNode(sizeof(UserTypeNode)); + new (utype) UserTypeNode(id); + utype->SetStrIdx(stridx); + utype->SetTypeId(TY_Class); + id->SetParent(utype); + + mTypeId2TypeMap[tid] = utype; + return utype; +} + +bool TypeTable::AddType(TreeNode *node) { + unsigned nid = node->GetNodeId(); + if (mNodeId2TypeIdxMap.find(nid) != mNodeId2TypeIdxMap.end()) { + return false; + } + unsigned tidx = mTypeTable.size(); + mNodeId2TypeIdxMap[nid] = tidx; + node->SetTypeIdx(tidx); + if (node->IsUserType()) { + static_cast(node)->GetId()->SetTypeIdx(tidx); + } + TypeEntry *entry = new TypeEntry(node); + mTypeTable.push_back(entry); + return true; +} + +void TypeTable::AddPrimTypeId(TypeId tid) { + mPrimTypeId.insert(tid); +} + +#undef TYPE +#undef PRIMTYPE +void TypeTable::AddPrimAndBuiltinTypes() { + // only initialize once + if (mTypeTable.size() != 0) { + return; + } + + TreeNode *node; + // add a NULL entry so real typeidx starting from 1 + TypeEntry *entry = new TypeEntry(); + mTypeTable.push_back(entry); + + // first are primitive types, and their typeid TY_Xyz is their typeidx as well +#define TYPE(T) +#define PRIMTYPE(T) node = CreatePrimType(#T, TY_##T); AddType(node); AddPrimTypeId(TY_##T); +#include "supported_types.def" + // add additional primitive types for number and string + PRIMTYPE(Number); + PRIMTYPE(String); + + mPrimSize = size(); + +#define TYPE(T) node = CreateBuiltinType(#T, TY_##T); AddType(node); +#define PRIMTYPE(T) + // additional usertype Boolean + TYPE(Boolean); +#include "supported_types.def" + + mPreBuildSize = size(); + return; +} + +TypeEntry *TypeTable::GetTypeEntryFromTypeIdx(unsigned tidx) { + MASSERT(tidx < mTypeTable.size() && "type index out of range"); + return mTypeTable[tidx]; +} + +TreeNode *TypeTable::GetTypeFromTypeIdx(unsigned tidx) { + MASSERT(tidx < mTypeTable.size() && "type index out of range"); + return mTypeTable[tidx]->GetType(); +} + +TreeNode *TypeTable::GetTypeFromStrIdx(unsigned stridx) { + for (auto entry : mTypeTable) { + TreeNode *node = entry->GetType(); + if (node && node->GetStrIdx() == stridx) { + return node; + } + } + return NULL; +} + +unsigned TypeTable::GetOrCreateFunctionTypeIdx(FunctionTypeNode *node) { + for (auto tidx: mFuncTypeIdx) { + TreeNode *type = GetTypeFromTypeIdx(tidx); + FunctionTypeNode *functype = static_cast(type); + bool found = functype->IsEqual(node); + if (found) { + return tidx; + } + } + bool status = AddType(node); + MASSERT(status && "failed to add a functiontype"); + unsigned tidx = node->GetTypeIdx(); + mFuncTypeIdx.insert(tidx); + + std::string str("FuncType__"); + str += std::to_string(tidx); + unsigned stridx = gStringPool.GetStrIdx(str); + node->SetStrIdx(stridx); + + return tidx; +} + +void TypeTable::Dump() { + std::cout << "===================== TypeTable =====================" << std::endl; + std::cout << " tid:type-name: node-kind node-id" << std::endl; + std::cout << "--------------------------------" << std::endl; + unsigned idx = 1; + for (unsigned idx = 1; idx < mTypeTable.size(); idx++) { + TypeEntry *entry = mTypeTable[idx]; + TreeNode *node = entry->GetType(); + TypeId tid = node->GetTypeId(); + if (node->IsUserType()) { + tid = static_cast(node)->GetId()->GetTypeId(); + } + std::cout << " " << idx << " : " << node->GetName() << " : " << + AstDump::GetEnumNodeKind(node->GetKind()) << " " << + AstDump::GetEnumTypeId(tid) << " " << + "(typeid " << tid << ") " << + "(typeidx " << node->GetTypeIdx() << ") " << + "(stridx " << node->GetStrIdx() << ") " << + "(nodeid " << node->GetNodeId() << ")" << std::endl; + } + std::cout << "===================== End TypeTable =====================" << std::endl; +} + +} + diff --git a/src/MapleFE/shared/src/vfy.cpp b/src/MapleFE/shared/src/vfy.cpp index 7ed154992ed6d9486fc6c2539eae18270ac22165..07809d0f89ed58d2191117ee254c9fe0ca949449 100644 --- a/src/MapleFE/shared/src/vfy.cpp +++ b/src/MapleFE/shared/src/vfy.cpp @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -31,7 +31,7 @@ namespace maplefe { // resursive process. /////////////////////////////////////////////////////////////////////////////// -Verifier::Verifier() { +Verifier::Verifier(ModuleNode *m) : mASTModule(m) { mCurrScope = NULL; mTempParent = NULL; } @@ -52,11 +52,9 @@ void Verifier::Do() { // see java/vfy_java.cpp. void Verifier::VerifyGlobalScope() { - mCurrScope = gModule.mRootScope; - std::vector::iterator tree_it = gModule.mTrees.begin(); - for (; tree_it != gModule.mTrees.end(); tree_it++) { - ASTTree *asttree = *tree_it; - TreeNode *tree = asttree->mRootNode; + mCurrScope = mASTModule->mRootScope; + for (unsigned i = 0; i < mASTModule->GetTreesNum(); i++) { + TreeNode *tree = mASTModule->GetTree(i); // Step 1. Try to add decl. mCurrScope->TryAddDecl(tree); // Step 2. Try to add type. @@ -87,7 +85,7 @@ void Verifier::CollectAllDeclsTypes(ASTScope *scope) { // All fields, methods, local classes, local interfaces are decls. // All local classes/interfaces are types. for (unsigned i = 0; i < klass->GetFieldsNum(); i++) { - IdentifierNode *in = klass->GetField(i); + TreeNode *in = klass->GetField(i); scope->AddDecl(in); } for (unsigned i = 0; i < klass->GetMethodsNum(); i++) { @@ -119,7 +117,7 @@ void Verifier::VerifyIdentifier(IdentifierNode *inode) { ASTScope *scope = mCurrScope; IdentifierNode *decl = NULL; while (scope) { - if (decl = (IdentifierNode*) scope->FindDeclOf(inode)) + if ((decl = (IdentifierNode*) scope->FindDeclOf(inode->GetStrIdx()))) break; scope = scope->GetParent(); } @@ -127,18 +125,24 @@ void Verifier::VerifyIdentifier(IdentifierNode *inode) { if (!decl) { mLog.MissDecl(inode); } else { - // Replace the temp IdentifierNode with the found Decl. - // Sometimes inode and decl are the same, which happens for the declaration statement. - // We will verify decl statement as the others, so its inode is the same as decl. - if (inode != decl) { - // TODO : There are many complicated cases which we will handle in the furture. - // Right now I just put a simple check of mTempParent. - if (mTempParent) - mTempParent->ReplaceChild(inode, decl); - } + // We disabled this part to keep the integerity of TREE. We don't want + // a graph. + // + // // Replace the temp IdentifierNode with the found Decl. + // // Sometimes inode and decl are the same, which happens for the declaration statement. + // // We will verify decl statement as the others, so its inode is the same as decl. + // if (inode != decl) { + // // TODO : There are many complicated cases which we will handle in the furture. + // // Right now I just put a simple check of mTempParent. + // if (mTempParent) + // mTempParent->ReplaceChild(inode, decl); + // } } } +void Verifier::VerifyDecl(DeclNode *tree){ +} + void Verifier::VerifyDimension(DimensionNode *tree){ } @@ -160,8 +164,8 @@ void Verifier::VerifyPrimArrayType(PrimArrayTypeNode *tree){ void Verifier::VerifyVarList(VarListNode *vlnode){ TreeNode *old_temp_parent = mTempParent; mTempParent = vlnode; - for (unsigned i = 0; i < vlnode->GetNum(); i++) { - IdentifierNode *n = vlnode->VarAtIndex(i); + for (unsigned i = 0; i < vlnode->GetVarsNum(); i++) { + IdentifierNode *n = vlnode->GetVarAtIndex(i); VerifyIdentifier(n); } mTempParent = old_temp_parent; @@ -172,14 +176,22 @@ void Verifier::VerifyLiteral(LiteralNode *tree){ return; } +void Verifier::VerifyTemplateLiteral(TemplateLiteralNode *tree){ + return; +} + +void Verifier::VerifyRegExpr(RegExprNode *tree){ + return; +} + void Verifier::VerifyUnaOperator(UnaOperatorNode *tree){ } void Verifier::VerifyBinOperator(BinOperatorNode *binop){ TreeNode *old_temp_parent = mTempParent; mTempParent = binop; - VerifyTree(binop->mOpndA); - VerifyTree(binop->mOpndB); + VerifyTree(binop->GetOpndA()); + VerifyTree(binop->GetOpndB()); mTempParent = old_temp_parent; } @@ -187,7 +199,7 @@ void Verifier::VerifyTerOperator(TerOperatorNode *tree){ } void Verifier::VerifyBlock(BlockNode *block){ - mCurrScope = gModule.NewScope(mCurrScope); + mCurrScope = mASTModule->NewScope(mCurrScope); mCurrScope->SetTree(block); for (unsigned i = 0; i < block->GetChildrenNum(); i++) { @@ -204,7 +216,7 @@ void Verifier::VerifyBlock(BlockNode *block){ // Function body's block is different than a pure BlockNode. void Verifier::VerifyFunction(FunctionNode *func){ ASTScope *old_scope = mCurrScope; - mCurrScope = gModule.NewScope(mCurrScope); + mCurrScope = mASTModule->NewScope(mCurrScope); mCurrScope->SetTree(func); // Add the parameters to the decl. Since we search for the decl of a var from @@ -241,11 +253,11 @@ void Verifier::VerifyFunction(FunctionNode *func){ void Verifier::VerifyClassFields(ClassNode *klass) { // rule 1. No duplicated fields name with another decls. for (unsigned i = 0; i < klass->GetFieldsNum(); i++) { - IdentifierNode *na = klass->GetField(i); + TreeNode *na = klass->GetField(i); bool hit_self = false; for (unsigned j = 0; j < mCurrScope->GetDeclNum(); j++) { TreeNode *nb = mCurrScope->GetDecl(j); - if (na->GetName() == nb->GetName()) { + if (na->GetStrIdx() == nb->GetStrIdx()) { if (nb->IsIdentifier()) { if (!hit_self) hit_self = true; @@ -265,7 +277,7 @@ void Verifier::VerifyClassSuperInterfaces(ClassNode *klass) {} void Verifier::VerifyClass(ClassNode *klass){ // Step 1. Create a new scope - ASTScope *scope = gModule.NewScope(mCurrScope); + ASTScope *scope = mASTModule->NewScope(mCurrScope); mCurrScope = scope; scope->SetTree(klass); @@ -293,18 +305,36 @@ void Verifier::VerifyAnnotationType(AnnotationTypeNode *tree){ void Verifier::VerifyAnnotation(AnnotationNode *tree){ } +void Verifier::VerifyThrow(ThrowNode *tree){ +} + +void Verifier::VerifyTry(TryNode *tree){ +} + +void Verifier::VerifyCatch(CatchNode *tree){ +} + +void Verifier::VerifyFinally(FinallyNode *tree){ +} + void Verifier::VerifyException(ExceptionNode *tree){ } void Verifier::VerifyReturn(ReturnNode *tree){ } +void Verifier::VerifyYield(YieldNode *tree){ +} + void Verifier::VerifyCondBranch(CondBranchNode *tree){ } void Verifier::VerifyBreak(BreakNode *tree){ } +void Verifier::VerifyContinue(ContinueNode *tree){ +} + void Verifier::VerifyForLoop(ForLoopNode *tree){ } @@ -320,7 +350,7 @@ void Verifier::VerifyType(IdentifierNode *inode) { ASTScope *scope = mCurrScope; TreeNode *type = NULL; while (scope) { - if (type = scope->FindTypeOf(inode)) + if ((type = scope->FindTypeOf(inode->GetStrIdx()))) break; scope = scope->GetParent(); } @@ -356,8 +386,8 @@ void Verifier::VerifyNew(NewNode *new_node){ VerifyType(inode); // verify parameters. // A parameter could be any type. We have to verify type by type. - for (unsigned i = 0; i < new_node->GetParamsNum(); i++) { - TreeNode *p = new_node->GetParam(i); + for (unsigned i = 0; i < new_node->GetArgsNum(); i++) { + TreeNode *p = new_node->GetArg(i); if(p->IsIdentifier()) { IdentifierNode *inode = (IdentifierNode*)p; VerifyIdentifier(inode); @@ -386,6 +416,10 @@ void Verifier::VerifyExprList(ExprListNode *tree){ return; } +void Verifier::VerifyNamespace(NamespaceNode *tree){ + return; +} + void Verifier::VerifyCall(CallNode *tree){ return; } @@ -406,14 +440,30 @@ void Verifier::VerifyParenthesis(ParenthesisNode *tree){ return; } +void Verifier::VerifyModule(ModuleNode *tree){ + return; +} + void Verifier::VerifyPackage(PackageNode *tree){ return; } +void Verifier::VerifyDeclare(DeclareNode *tree){ + return; +} + void Verifier::VerifyImport(ImportNode *tree){ return; } +void Verifier::VerifyExport(ExportNode *tree){ + return; +} + +void Verifier::VerifyXXportAsPair(XXportAsPairNode *tree){ + return; +} + void Verifier::VerifyUserType(UserTypeNode *tree){ return; } @@ -421,4 +471,109 @@ void Verifier::VerifyUserType(UserTypeNode *tree){ void Verifier::VerifyLambda(LambdaNode *tree){ return; } + +void Verifier::VerifyInstanceOf(InstanceOfNode *tree){ + return; +} + +void Verifier::VerifyIn(InNode *tree){ + return; +} + +void Verifier::VerifyComputedName(ComputedNameNode *tree){ + return; +} + +void Verifier::VerifyIs(IsNode *tree){ + return; +} + +void Verifier::VerifyAwait(AwaitNode *tree){ + return; +} + +void Verifier::VerifyTypeOf(TypeOfNode *tree){ + return; +} + +void Verifier::VerifyTypeAlias(TypeAliasNode *tree){ + return; +} + +void Verifier::VerifyAsType(AsTypeNode *tree){ + return; +} + +void Verifier::VerifyConditionalType(ConditionalTypeNode *tree){ + return; +} + +void Verifier::VerifyTypeParameter(TypeParameterNode *tree){ + return; +} + +void Verifier::VerifyKeyOf(KeyOfNode *tree){ + return; +} + +void Verifier::VerifyInfer(InferNode *tree){ + return; +} + +void Verifier::VerifyArrayElement(ArrayElementNode *tree){ + return; +} + +void Verifier::VerifyArrayLiteral(ArrayLiteralNode *tree){ + return; +} + +void Verifier::VerifyNumIndexSig(NumIndexSigNode *tree){ + return; +} + +void Verifier::VerifyStrIndexSig(StrIndexSigNode *tree){ + return; +} + +void Verifier::VerifyStruct(StructNode *tree){ + return; +} + +void Verifier::VerifyNameTypePair(NameTypePairNode *tree){ + return; +} + +void Verifier::VerifyTupleType(TupleTypeNode *tree){ + return; +} + +void Verifier::VerifyBindingElement(BindingElementNode *tree){ + return; +} + +void Verifier::VerifyBindingPattern(BindingPatternNode *tree){ + return; +} + +void Verifier::VerifyStructLiteral(StructLiteralNode *tree){ + return; +} + +void Verifier::VerifyFieldLiteral(FieldLiteralNode *tree){ + return; +} + +void Verifier::VerifyArrayType(ArrayTypeNode *tree){ + return; +} + +void Verifier::VerifyTripleSlash(TripleSlashNode *tree){ + return; +} + +void Verifier::VerifyFunctionType(FunctionTypeNode *tree){ + return; +} + } diff --git a/src/MapleFE/test/Makefile b/src/MapleFE/test/Makefile index 09a42a4e3387428f41744a5e252a469072150699..aed83f9c0ad98a99312e7e2785030cbc13cd311a 100644 --- a/src/MapleFE/test/Makefile +++ b/src/MapleFE/test/Makefile @@ -1,4 +1,4 @@ -# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# Copyright (C) [2020-2021] Futurewei Technologies, Inc. All rights reverved. # # OpenArkFE is licensed under the Mulan PSL v2. # You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -15,74 +15,104 @@ # for testing batch1/Arithmetic/Add.java: # make Add -build = ../output/test +build = ../output/$(SRCLANG)/test $(shell mkdir -p $(build)) -DIR = . $(wildcard * */* */*/*) +ifeq ($(SRCLANG),java) + FLAGS := --trace-a2m +else ifeq ($(SRCLANG),typescript) + FLAGS1 := --trace-lexer + FLAGS2 := --trace=3 --format-cpp +endif -vpath %.java $(DIR) -vpath %.mpl $(DIR) -vpath %.c $(DIR) -vpath %.cpp $(DIR) +DIRS := . $(shell find $(SRCLANG) -type d) -all: test +vpath %.java $(DIRS) +vpath %.ts $(DIRS) +vpath %.mpl $(DIRS) +vpath %.c $(DIRS) +vpath %.cpp $(DIRS) -autogen: - make -C ../autogen +.PHONY: $(SRCLANG) +.PHONY: mssetup -shared: autogen test - make -C ../shared +$(SRCLANG): + env LANG=en_US.UTF-8 ./new_runtests.pl $(SRCLANG) full: - make -C .. - -clean: - make -C .. clean - -clobber: - make -C .. clobber + make -C .. -j8 rebuild: - make -C .. rebuild + make -C .. rebuild -j8 -tt: - ../output/java/java2mpl tt.java --trace-a2m - @echo "\n======================== tt.mpl ========================" - @cat tt.mpl - @echo "========================================================\n" - ../../mapleall/bin/aarch64-clang-debug/irbuild tt.mpl - ../../mapleall/bin/aarch64-clang-debug/irbuild tt.irb.mpl - diff -uwb tt.irb.mpl tt.irb.irb.mpl +clean: + rm -rf $(build) + +clobber: clean dbg: - gdb --args ../output/java/java2mpl tt.java --trace-a2m + gdb --args ../output/java/java/java2mpl test.java $(FLAGS) + +ifeq ($(SRCLANG),java) -# only run autogen for now -test: - ./runtests.pl all +mssetup: % : %.java @cp $< $(build) - @echo gdb --args ../output/java/java2mpl $(build)/$@.java --trace-a2m - ../output/java/java2mpl $(build)/$@.java --trace-a2m + @echo gdb --args ../output/java/bin/java2ast $(build)/$@.java $(FLAGS1) + @echo gdb --args ../output/java/bin/ast2mpl $(build)/$@.java.ast $(FLAGS2) + ../output/java/bin/java2ast $(build)/$@.java $(FLAGS1) + ../output/java/bin/ast2mpl $(build)/$@.java.ast $(FLAGS2) + @echo "\n======================= $@.java =========================" + @cat -n $(build)/$@.java + @echo "========================================================\n" + @echo gdb --args ../output/java/bin/java2ast $(build)/$@.java $(FLAGS1) + @echo gdb --args ../output/java/bin/ast2mpl $(build)/$@.java.ast $(FLAGS2) @echo "\n======================= $@.mpl =========================" - @cat $(build)/$@.mpl + @cat -n $(build)/$@.mpl @echo "========================================================\n" - ../../mapleall/bin/aarch64-clang-debug/irbuild $(build)/$@.mpl - ../../mapleall/bin/aarch64-clang-debug/irbuild $(build)/$@.irb.mpl + @echo gdb --args ../output/java/bin/java2ast $(build)/$@.java $(FLAGS1) + @echo gdb --args ../output/java/bin/ast2mpl $(build)/$@.java.ast $(FLAGS2) + @cat -n $(build)/$@.mpl + @echo "========================================================\n" + @echo gdb --args ../output/java/java/java2mpl $(build)/$@.java $(FLAGS) + $(MAPLEALL_ROOT)/output/aarch64-clang-debug/bin/irbuild $(build)/$@.mpl + $(MAPLEALL_ROOT)/output/aarch64-clang-debug/bin/irbuild $(build)/$@.irb.mpl diff -uwb $(build)/$@.irb.mpl $(build)/$@.irb.irb.mpl - @echo gdb --args ../output/java/java2mpl $(build)/$@.java --trace-a2m - -% : %.c - cp $< $(build) - @echo "\ngdb command:\n(cd $(BUILDDIR)/autogen/; gdb -args ./sharedfe -verbose=3 ../../test/$(build)/$<)" - (cd $(BUILDDIR)/autogen; ./sharedfe -verbose=3 ../../test/$(build)/$<) - @echo "\ncommand:\n(cd $(BUILDDIR)/autogen/; ./sharedfe ../../test/$(build)/$<)" - @echo "\ngdb command:\n(cd $(BUILDDIR)/autogen/; gdb -args ./sharedfe -verbose=3 ../../test/$(build)/$<)" - -% : %.cpp - cp $< $(build) - @echo "\ngdb command:\n(cd $(BUILDDIR)/autogen/; gdb -args ./sharedfe -verbose=3 ../../test/$(build)/$<)" - (cd $(BUILDDIR)/autogen; ./sharedfe -verbose=3 ../../test/$(build)/$<) - @echo "\ncommand:\n(cd $(BUILDDIR)/autogen/; ./sharedfe ../../test/$(build)/$<)" - @echo "\ngdb command:\n(cd $(BUILDDIR)/autogen/; gdb -args ./sharedfe -verbose=3 ../../test/$(build)/$<)" + +else ifeq ($(SRCLANG),typescript) + +mssetup: + ./msts_test.sh setup + +testms: + ./msts_test.sh + +testmslocal: mssetup + env LANG=en_US.UTF-8 ./new_runtests.pl typescript TypeScript/tests/cases/compiler + +unit: + env LANG=en_US.UTF-8 ./new_runtests.pl typescript typescript/unit_tests + +ms: mssetup + env LANG=en_US.UTF-8 ./new_runtests.pl typescript typescript/ms_tests + +% : %.ts + @cp $< $(build) + @echo gdb --args ../output/typescript/bin/ts2ast $(build)/$@.ts $(FLAGS1) + @echo gdb --args ../output/typescript/bin/ast2cpp $(build)/$@.ts.ast $(FLAGS2) + ../output/typescript/bin/ts2ast $(build)/$@.ts $(FLAGS1) + ../output/typescript/bin/ast2cpp $(build)/$@.ts.ast $(FLAGS2) + @echo "\n======================= $@.ts =========================" + @cat -n $(build)/$@.ts + @echo "========================================================\n" + @echo gdb --args ../output/typescript/bin/ts2ast $(build)/$@.ts $(FLAGS1) + @echo gdb --args ../output/typescript/bin/ast2cpp $(build)/$@.ts.ast $(FLAGS2) + @echo "\n======================= $@.h =========================" + @cat -n $(build)/$@.h + @echo "\n======================= $@.cpp =========================" + @cat -n $(build)/$@.cpp + @echo "========================================================\n" + @echo gdb --args ../output/typescript/bin/ts2ast $(build)/$@.ts $(FLAGS1) + @echo gdb --args ../output/typescript/bin/ast2cpp $(build)/$@.ts.ast $(FLAGS2) +endif diff --git a/src/MapleFE/test/README b/src/MapleFE/test/README index a46d226caf1805245765cf35d1efdf3efde933be..cdbdaa9af05a75e1b81b517a346e09daf8ea2618 100644 --- a/src/MapleFE/test/README +++ b/src/MapleFE/test/README @@ -1,10 +1,43 @@ This directory contains the test framework and test cases. -The test cases are in 3 directories. -1. java2mpl - This is for the Java test cases. They are correct code. -2. errtest - This is for the Java test cases. They are incorrect code. -3. others - This is for the Java test cases. They are taken from 3rd party open source projects, - in order to have a complete coverage of the testing. If you want to reuse the code - please read the LICENSE carefully. + +Directories: +./java +./java/syntaxonly Test cases for syntax check +./java/others Other Java test cases from third-party open source projects. Included the LICENSE for them. +./java/java2mpl Java test cases for java2mpl +./java/openjdk Java test cases +./java/errtest Negative test cases + +./typescript +./typescript/ms_tests TypeScript official test cases. Included the LICENSE for them. +./typescript/unit_tests Unit test cases + +To run TypeScript tests: + 1. source envsetup typescript; cd $MAPLEFE_ROOT/test + 2. Run both unit_tests and ms_tests: + make + Run just unit_tests + make unit + Run just ms_tests + make ms + Run specific typescript test + make + * Tests should be run using make instead of using the .pl scripts directly. + +Tool to dump AST and CFG of a TypeScript test case: + + Usage: astdump.sh [-dot] [-f|--fullscreen] [-p |--pre ] [-a|--ast] [-c|--cfg] [ ...] + + -d | --dot Use Graphviz dot to generate the graph and view it with viewnior + -f | --fullscreen View the generated graph in fullscreen mode. It implies option -dot + -p | --pre Filter graphs with the specified , e.g. -p "CFG_" + -a | --ast Show AST graph. It is equivalent to options "-dot -p AST" + -c | --cfg Show CFG graph. It is equivalent to options "-dot -p CFG" + -h | --help Display usage information + -s | --syntax Syntax highlighting the generated TypeScript code + [ ...] Specify one or more TypeScript files to be processed + +Examples: + cd MapleFE/test/typescript/unit_tests + ../../astdump.sh -a binary-search.ts # Show AST graph + ../../astdump.sh binary-search.ts -c # Show CFG graph diff --git a/src/MapleFE/test/astdump.sh b/src/MapleFE/test/astdump.sh new file mode 100755 index 0000000000000000000000000000000000000000..2260c58f95c703efdf91b6bb871d883f67219a3f --- /dev/null +++ b/src/MapleFE/test/astdump.sh @@ -0,0 +1,209 @@ +#!/bin/bash +function usage { +cat << EOF + +Usage: astdump.sh [-d] [-f] [-p ] [-a] [-c] [-k] [-A] [-C] [-n] [-t|-T] [ ...] + +Short/long options: + -d | --dot Use Graphviz dot to generate the graph and view it with viewnior + -f | --fullscreen View the generated graph in fullscreen mode. It implies option -dot + -p | --pre Filter graphs with the specified , e.g. -p "CFG_" + -a | --ast Show AST graph. It is equivalent to options "-d -p AST" + -c | --cfg Show CFG graph. It is equivalent to options "-d -p CFG" + -s | --syntax Syntax highlighting the generated TypeScript code + -k | --keep Keep generated files *.ts-[0-9]*.out.ts which fail to compile with tsc + -A | --all Process all .ts files in current directory excluding *.ts-[0-9]*.out.ts + -C | --clean Clean up generated files (*.ts-[0-9]*.out.ts) + -n | --name Keep original names by removing "__v[0-9]*" from generated code + -t | --treediff Compare the AST of generated TS code with the one of original TS code + -T | --Treediff Same as -t/--treediff except that it disables tsc for generated TS code + -i | --ignore-imported Ignore all imported modules + [ ...] Specify one or more TypeScript files to be processed +EOF +exit 1 +} +CMDLINE="$0 $*" +GITTS=$(git ls-files "*.ts") +DOT= PRE= LIST= VIEWOP= HIGHLIGHT="cat" TSCERR= KEEP= CLEAN= NAME= TREEDIFF= TSC=yes NOIMPORTED= +while [ $# -gt 0 ]; do + case $1 in + -d|--dot) DOT=dot;; + -f|--fullscreen) VIEWOP="--fullscreen"; DOT=fullscreen;; + -p|--pre) [ $# -ge 2 ] && { PRE="$2"; shift; } || { echo "$1 needs an argument"; exit 1; } ;; + -a|--ast) PRE="AST" ; DOT=ast ; TSCERR=">& /dev/null" ;; + -c|--cfg) PRE="CFG" ; DOT=cfg ; TSCERR=">& /dev/null" ;; + -s|--syntax) HIGHLIGHT="highlight -O xterm256 --syntax ts" ;; + -e|--tscerror) TSCERR= ;; + -k|--keep) KEEP=keep ;; + -C|--clean) CLEAN=clean ;; + -A|--all) LIST="$LIST $(echo $GITTS | xargs grep -l '^ *export ') " + LIST="$LIST $(echo $GITTS | xargs grep -L '^ *export ') " ;; + -n|--name) NAME="original" ;; + -t|--treediff) TREEDIFF="--emit-ts-only"; NAME="original"; TSC=yes ;; + -T|--Treediff) TREEDIFF="--emit-ts-only"; NAME="original"; TSC= ;; + -i|--ignore-imported) NOIMPORTED="--no-imported" ;; + -*) echo "Unknown option $1"; usage;; + *) LIST="$LIST $1" + esac + shift +done +LIST=$(echo $LIST | xargs -n1 | grep -v '\.ts-[0-9][0-9]*\.out\.[d.]*ts' | grep -vF .ts.tmp.ts) +if [ -n "$CLEAN" ]; then + echo Cleaning up generated files... + find -maxdepth 1 -regex '.*\.ts-[0-9]+\.out.[ctj][ps]p*\|.*\.ts-[0-9]+\.[pd][no][gt]\|.*\.ts.[ca][ps][pt]' -exec rm '{}' \; + rm -rf *.ts.orig *.ts.gen *.ts.tmp.ts *[0-9]-dump.out ts2cxx-lock-* + for ts in $LIST $GITTS; do + rm -rf $ts.orig $ts.gen $ts.tmp.ts $ts.*[0-9]-dump.out $ts-*[0-9].out.ts $ts-*[0-9].out.d.ts + done + echo Done. +fi +[ -n "$LIST" ] || { echo Please specify one or more TypeScript files.; usage; } +[ -z "$DOT" ] || [ -x /usr/bin/dot -a -x /usr/bin/viewnior -a -x /usr/bin/highlight ] || sudo apt install graphviz viewnior highlight +TSOUT=$(cd $(dirname $(realpath $0))/../; pwd)/output/typescript +TS2AST=$TSOUT/bin/ts2ast +AST2CPP=$TSOUT/bin/ast2cpp +[ -x "$TS2AST" ] || { echo Cannot execute $TS2AST; exit 1; } +[ -x "$AST2CPP" ] || { echo Cannot execute $AST2CPP; exit 1; } + +# Acquire/release a lock +typeset -i LockVar +LockVar=1 +function AcquireLock { + while [[ $LockVar -ne 0 ]] || sleep 0.3; do + ln -s Lock_$2 $1-lock-$((LockVar=(LockVar+1)%$3)) > /dev/null 2>&1 && break + done +} +function ReleaseLock { + rm -f $1-lock-$LockVar +} +trap "{ pstree -p $$ | tr ')' '\n' | sed 's/.*(//' | xargs kill -9 2> /dev/null; rm -f ts2cxx-lock-*; }" SIGINT SIGQUIT SIGKILL SIGTERM + +PROCID=$$ +rm -rf *$PROCID-dump.out $PROCID-summary.out *$PROCID.out.ts ts2cxx-lock-* +OPT="--target es6 \ + --lib es2015,es2017,dom \ + --module commonjs \ + --downlevelIteration \ + --esModuleInterop \ + --experimentalDecorators" +cnt=0 +for ts in $LIST; do + ts=$(sed 's|^\./||' <<< "$ts") + echo $((++cnt)): $ts + AcquireLock ts2cxx for_$(basename $ts) $(nproc) + (if true; then + set -x + echo --------- + echo "$TS2AST" "$ts" + out=$("$TS2AST" "$ts") + if [ $? -ne 0 ]; then + echo "MSG: Failed, test case (ts2ast)$ts" + else + echo "$AST2CPP" "$ts".ast --trace=2 --emit-ts $TREEDIFF $NOIMPORTED + out=$("$AST2CPP" "$ts".ast --trace=2 --emit-ts $TREEDIFF $NOIMPORTED 2>&1) + [ $? -eq 0 ] || echo "MSG: Failed, test case (ast2cpp)$ts" + fi + echo "$out" + cmd=$(grep -n -e "^// .Beginning of Emitter:" -e "// End of Emitter.$" <<< "$out" | + tail -2 | sed 's/:.*//' | xargs | sed 's/\([^ ]*\) \(.*\)/sed -n \1,$((\2))p/') + if [ "x${cmd:0:4}" = "xsed " ]; then + T=$(sed -e "s/\(.*\)\(\.d\)\(\.ts-$PROCID.out\)/\1\2\3\2/" <<< "$ts-$PROCID.out.ts") + eval $cmd <<< "$out" > "$T" + [ -z "$NAME" ] || sed -i 's/__v[0-9][0-9]*//g' "$T" + echo -e "\n====== TS Reformatted ======\n" + $HIGHLIGHT "$T" + echo TREEDIFF=$TREEDIFF + E= + grep -qm1 "PassNode *{" <<< "$out" && E=",PassNode" + if [ -z "$TREEDIFF" -o -n "$TSC" ]; then + eval tsc $OPT "$T" $TSCERR + else + echo Skipping tsc for tree diff + fi + # --strict --downlevelIteration --esModuleInterop --noImplicitAny --isolatedModules "$T" $TSCERR + if [ $? -ne 0 ]; then + echo "MSG: Failed, test case (tsc-failed$E)$ts" + [ -n "$KEEP" ] || rm -f "$T" + elif [ -z $TREEDIFF ]; then + echo "MSG: Passed, test case $ts" + Passed="$Passed $ts" + [ -n "$KEEP" ] || rm -f "$T" + else + cp $ts $ts.tmp.ts + $TS2AST $ts.tmp.ts + if [ $? -eq 0 ]; then + $AST2CPP $ts.tmp.ts.ast $TREEDIFF | sed -n '/^AstDump:/,/^}/p' | sed 's/\(mStrIdx: unsigned int, \)[0-9]* =>/\1=>/' + fi > $ts.orig + $TS2AST $T + if [ $? -eq 0 ]; then + $AST2CPP $T.ast $TREEDIFF | sed -n '/^AstDump:/,/^}/p' | sed -e "s|/$T|/$ts.tmp.ts|" \ + -e 's/\(mStrIdx: unsigned int, \)[0-9]* =>/\1=>/' + else + E="$E,ts2ast" + fi > $ts.gen + diff $ts.orig $ts.gen + if [ $? -eq 0 -a -s $ts.orig -a -s $ts.gen ]; then + Passed="$Passed $ts" + echo "MSG: Passed, test case $ts" + else + diff -I '[0-9]\. const char\*, "' $ts.orig $ts.gen + if [ $? -eq 0 -a -s $ts.orig -a -s $ts.gen ]; then + Passed="$Passed $ts" + echo "MSG: Passed, test case (const-char*)$ts" + else + echo "MSG: Failed, test case (diff-ast$E)$ts" + fi + fi + echo === "$T"; cat "$T" + echo --- "$ts"; cat "$ts" + echo === + if [ -n "$DOT" ]; then + for tf in $ts.tmp.ts $T; do + echo "=== $tf" + cat $tf + out="$out"$'\n'"$($TS2AST $tf --dump-dot)" + done + fi + [ -n "$KEEP" ] || rm -f "$T" $ts.orig $ts.gen $ts.tmp.ts + fi + fi + if [ -n "$DOT" ]; then + echo --- "$ts"; cat "$ts" + idx=0 + grep -n -e "^digraph $PRE[^{]* {" -e "^}" <<< "$out" | grep -A1 "digraph [^{]* {" | + grep -v ^-- | sed 'N;s/\n/ /' | sed -e 's/:digraph [^{]* { */,/' -e 's/:.*/p/g' | + { while read cmd; do + idx=$((idx+1)) + sed -n $cmd <<< "$out" > "$ts"-$idx.dot + dot -Tpng -o "$ts"-$idx.png "$ts"-$idx.dot + env LC_ALL=C viewnior $VIEWOP "$ts"-$idx.png & + done + wait + rm -f "$ts"-[0-9]*.png "$ts"-[0-9]*.dot; } + fi + ReleaseLock ts2cxx + set +x + fi >& $ts.$PROCID-dump.out + grep -a "^MSG: [PF]a[si][sl]ed," $ts.$PROCID-dump.out >> $PROCID-summary.out + ) & +done +wait +echo Done. +if [ -s $PROCID-summary.out ]; then + msg=$(grep -a "^MSG: [PF]a[si][sl]ed," $PROCID-summary.out) + echo "$CMDLINE" >> $PROCID-summary.out + if true; then + echo + echo "Test case(s) passed:" + grep -a "^MSG: Passed, test case " <<< "$msg" | sed 's/MSG: Passed, test case //' | env LC_ALL=C sort -r | nl + grep -aq -m1 "^MSG: Failed, test case " <<< "$msg" + if [ $? -eq 0 ]; then + echo + echo "Test case(s) failed:" + grep -a "^MSG: Failed," <<< "$msg" | sed 's/MSG: Failed, test case //' | env LC_ALL=C sort | nl + fi + echo + echo Total: $(wc -l <<< "$msg"), Passed: $(grep -ac "^MSG: Passed," <<< "$msg"), Failed: $(grep -ac "^MSG: Failed," <<< "$msg") + grep -a "^MSG: Failed," <<< "$msg" | sed 's/MSG: Failed, test case (\([^)]*\).*/due to \1/' | sort | uniq -c + fi | tee -a $PROCID-summary.out +fi diff --git a/src/MapleFE/test/astdumpjava.sh b/src/MapleFE/test/astdumpjava.sh new file mode 100755 index 0000000000000000000000000000000000000000..a60c18ab412cbd7f81066032884f0c34efec56f3 --- /dev/null +++ b/src/MapleFE/test/astdumpjava.sh @@ -0,0 +1,164 @@ +#!/bin/bash +function usage { +cat << EOF + +Usage: astdump.sh [-d] [-f] [-p ] [-a] [-c] [-k] [-A] [-C] [-n] [-t|-T] [ ...] + +Short/long options: + -d | --dot Use Graphviz dot to generate the graph and view it with viewnior + -f | --fullscreen View the generated graph in fullscreen mode. It implies option -dot + -p | --pre Filter graphs with the specified , e.g. -p "CFG_" + -a | --ast Show AST graph. It is equivalent to options "-d -p AST" + -c | --cfg Show CFG graph. It is equivalent to options "-d -p CFG" + -s | --syntax Syntax highlighting the generated TypeScript code + -k | --keep Keep generated files *.java-[0-9]*.out.java which fail to compile with tsc + -A | --all Process all .java files in current directory excluding *.java-[0-9]*.out.java + -C | --clean Clean up generated files (*.java-[0-9]*.out.java) + -n | --name Keep original names by removing "__v[0-9]*" from generated code + -t | --treediff Compare the AST of generated TS code with the one of original TS code + -T | --Treediff Same as -t/--treediff except that it disables tsc for generated TS code + [ ...] Specify one or more TypeScript files to be processed +EOF +exit 1 +} +CMDLINE="$0 $*" +DOT= PRE= LIST= VIEWOP= HIGHLIGHT="cat" TSCERR= KEEP= CLEAN= NAME= TREEDIFF= TSC=yes +while [ $# -gt 0 ]; do + case $1 in + -d|--dot) DOT=dot;; + -f|--fullscreen) VIEWOP="--fullscreen"; DOT=fullscreen;; + -p|--pre) [ $# -ge 2 ] && { PRE="$2"; shift; } || { echo "$1 needs an argument"; exit 1; } ;; + -a|--ast) PRE="AST" ; DOT=ast ; TSCERR=">& /dev/null" ;; + -c|--cfg) PRE="CFG" ; DOT=cfg ; TSCERR=">& /dev/null" ;; + -s|--syntax) HIGHLIGHT="highlight -O xterm256 --syntax ts" ;; + -e|--tscerror) TSCERR= ;; + -k|--keep) KEEP=keep ;; + -C|--clean) CLEAN=clean ;; + -A|--all) LIST="$LIST $(find -maxdepth 1 -name '*.java')" ;; + -n|--name) NAME="original" ;; + -t|--treediff) TREEDIFF="--emit-ts-only"; NAME="original"; TSC=yes ;; + -T|--Treediff) TREEDIFF="--emit-ts-only"; NAME="original"; TSC= ;; + -*) echo "Unknown option $1"; usage;; + *) LIST="$LIST $1" + esac + shift +done +LIST=$(echo $LIST | xargs -n1 | grep -v '\.java-[0-9][0-9]*\.out.java' | grep -vF .java.tmp.java) +if [ -n "$CLEAN" ]; then + echo Cleaning up generated files... + find -maxdepth 1 -regex '.*\.java-[0-9]+\.out.[ctj][ps]p*\|.*\.java-[0-9]+\.[pd][no][gt]\|.*\.java.[ca][ps][pt]' -exec rm '{}' \; + rm -rf *.java.orig *.java.gen *.java.tmp.java *[0-9]-dump.out ts2cxx-lock-* + for xx in $LIST; do + rm -rf $xx.orig $xx.gen $xx.tmp.java $xx.*[0-9]-dump.out $xx-*[0-9].out.java + done + echo Done. +fi +[ -n "$LIST" ] || { echo Please specify one or more TypeScript files.; usage; } +[ -z "$DOT" ] || [ -x /usr/bin/dot -a -x /usr/bin/viewnior -a -x /usr/bin/highlight ] || sudo apt install graphviz viewnior highlight +JAVAOUT=$(cd $(dirname $(realpath $0))/../; pwd)/output/java +JAVA2AST=$JAVAOUT/bin/java2ast +AST2MPL=$JAVAOUT/bin/ast2mpl +[ -x "$JAVA2AST" ] || { echo Cannot execute $JAVA2AST; exit 1; } +[ -x "$AST2MPL" ] || { echo Cannot execute $AST2MPL; exit 1; } + +# Acquire/release a lock +typeset -i LockVar +LockVar=1 +function AcquireLock { + while [[ $LockVar -ne 0 ]] || sleep 0.3; do + ln -s Lock_$2 $1-lock-$((LockVar=(LockVar+1)%$3)) > /dev/null 2>&1 && break + done +} +function ReleaseLock { + rm -f $1-lock-$LockVar +} + +PROCID=$$ +#rm -rf *$PROCID-dump.out $PROCID-summary.out *$PROCID.out.java ts2cxx-lock-* +echo PROCID-dump.out=$PROCID-dump.out $PROCID-summary.out *$PROCID.out.java ts2cxx-lock-* +rm -f ~/tmp/* +cnt=0 +for tt in $LIST; do + echo $((++cnt)): $tt + set -x + echo --------- + echo "$JAVA2AST" "$tt" + out=$("$JAVA2AST" "$tt") + if [ $? -ne 0 ]; then + echo "MSG: Failed, test case (java2ast)$tt" + else + echo "$AST2MPL" "$tt".ast --trace=2 $TREEDIFF + out=$("$AST2MPL" "$tt".ast --trace=2 $TREEDIFF 2>&1) + [ $? -eq 0 ] || echo "MSG: Failed, test case (ast2mpl)$tt" + fi + echo "$out" + $JAVA2AST $tt.java + if [ $? -eq 0 ]; then + $AST2MPL $tt.java.ast $TREEDIFF | sed -n '/^AstDump:/,/^}/p' | sed 's/\(mStrIdx: unsigned int, \)[0-9]* =>/\1=>/' + fi > $tt.orig + $JAVA2AST $T + if [ $? -eq 0 ]; then + $AST2MPL $T.ast $TREEDIFF | sed -n '/^AstDump:/,/^}/p' | sed -e "s|$T|$tt.java|" \ + -e 's/\(mStrIdx: unsigned int, \)[0-9]* =>/\1=>/' + else + E="$E,java2ast" + fi > $tt.gen + diff $tt.orig $tt.gen + if [ $? -eq 0 -a -s $tt.orig -a -s $tt.gen ]; then + Passed="$Passed $tt" + echo "MSG: Passed, test case $tt" + else + diff -I '[0-9]\. const char\*, "' $tt.orig $tt.gen + if [ $? -eq 0 -a -s $tt.orig -a -s $tt.gen ]; then + Passed="$Passed $tt" + echo "MSG: Passed, test case (const-char*)$tt" + else + echo "MSG: Failed, test case (diff-ast$E)$tt" + fi + fi + echo === "$T"; cat "$T" + echo --- "$tt"; cat "$tt" + echo === + if [ -n "$DOT" ]; then + for tf in $tt.java $T; do + echo "=== $tf" + cat $tf + out="$out"$'\n'"$($JAVA2AST $tf --dump-dot)" + done + fi + [ -n "$KEEP" ] || rm -f "$T" $tt.orig $tt.gen $tt.java + if [ -n "$DOT" ]; then + echo --- "$tt"; cat "$tt" + idx=0 + grep -n -e "^digraph $PRE[^{]* {" -e "^}" <<< "$out" | grep -A1 "digraph [^{]* {" | + grep -v ^-- | sed 'N;s/\n/ /' | sed -e 's/:digraph [^{]* { */,/' -e 's/:.*/p/g' | + { while read cmd; do + idx=$((idx+1)) + sed -n $cmd <<< "$out" > "$tt"-$idx.dot + dot -Tpng -o "$tt"-$idx.png "$tt"-$idx.dot + # env LC_ALL=C viewnior $VIEWOP "$tt"-$idx.png & + done + wait + rm -f "$tt"-[0-9]*.png "$tt"-[0-9]*.dot; } + fi +done +wait +echo Done. +if [ -s $PROCID-summary.out ]; then + msg=$(grep -a "^MSG: [PF]a[si][sl]ed," $PROCID-summary.out) + echo "$CMDLINE" >> $PROCID-summary.out + if true; then + echo + echo "Test case(s) passed:" + grep -a "^MSG: Passed, test case " <<< "$msg" | sed 's/MSG: Passed, test case //' | env LC_ALL=C sort -r | nl + grep -aq -m1 "^MSG: Failed, test case " <<< "$msg" + if [ $? -eq 0 ]; then + echo + echo "Test case(s) failed:" + grep -a "^MSG: Failed," <<< "$msg" | sed 's/MSG: Failed, test case //' | env LC_ALL=C sort | nl + fi + echo + echo Total: $(wc -l <<< "$msg"), Passed: $(grep -ac "^MSG: Passed," <<< "$msg"), Failed: $(grep -ac "^MSG: Failed," <<< "$msg") + grep -a "^MSG: Failed," <<< "$msg" | sed 's/MSG: Failed, test case (\([^)]*\).*/due to \1/' | sort | uniq -c + fi | tee -a $PROCID-summary.out +fi diff --git a/src/MapleFE/test/gdbfile b/src/MapleFE/test/gdbfile deleted file mode 100644 index 31f4b07b22e30fb1524e1f07cb6c3286ba82c803..0000000000000000000000000000000000000000 --- a/src/MapleFE/test/gdbfile +++ /dev/null @@ -1,4 +0,0 @@ -file $BUILDDIR/java/java2mpl -set args t1.java -b main -r diff --git a/src/MapleFE/test/errtest/Point.class b/src/MapleFE/test/java/errtest/Point.class similarity index 100% rename from src/MapleFE/test/errtest/Point.class rename to src/MapleFE/test/java/errtest/Point.class diff --git a/src/MapleFE/test/errtest/class-field-dup-1.java b/src/MapleFE/test/java/errtest/class-field-dup-1.java similarity index 100% rename from src/MapleFE/test/errtest/class-field-dup-1.java rename to src/MapleFE/test/java/errtest/class-field-dup-1.java diff --git a/src/MapleFE/test/errtest/class-field-dup-1.java.result b/src/MapleFE/test/java/errtest/class-field-dup-1.java.result similarity index 100% rename from src/MapleFE/test/errtest/class-field-dup-1.java.result rename to src/MapleFE/test/java/errtest/class-field-dup-1.java.result diff --git a/src/MapleFE/test/errtest/class-field-dup-2.java b/src/MapleFE/test/java/errtest/class-field-dup-2.java similarity index 100% rename from src/MapleFE/test/errtest/class-field-dup-2.java rename to src/MapleFE/test/java/errtest/class-field-dup-2.java diff --git a/src/MapleFE/test/errtest/class-field-dup-2.java.result b/src/MapleFE/test/java/errtest/class-field-dup-2.java.result similarity index 100% rename from src/MapleFE/test/errtest/class-field-dup-2.java.result rename to src/MapleFE/test/java/errtest/class-field-dup-2.java.result diff --git a/src/MapleFE/test/errtest/definite-assignment-1.java b/src/MapleFE/test/java/errtest/definite-assignment-1.java similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-1.java rename to src/MapleFE/test/java/errtest/definite-assignment-1.java diff --git a/src/MapleFE/test/errtest/definite-assignment-1.java.result b/src/MapleFE/test/java/errtest/definite-assignment-1.java.result similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-1.java.result rename to src/MapleFE/test/java/errtest/definite-assignment-1.java.result diff --git a/src/MapleFE/test/errtest/definite-assignment-2.java b/src/MapleFE/test/java/errtest/definite-assignment-2.java similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-2.java rename to src/MapleFE/test/java/errtest/definite-assignment-2.java diff --git a/src/MapleFE/test/errtest/definite-assignment-2.java.result b/src/MapleFE/test/java/errtest/definite-assignment-2.java.result similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-2.java.result rename to src/MapleFE/test/java/errtest/definite-assignment-2.java.result diff --git a/src/MapleFE/test/errtest/definite-assignment-3.java b/src/MapleFE/test/java/errtest/definite-assignment-3.java similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-3.java rename to src/MapleFE/test/java/errtest/definite-assignment-3.java diff --git a/src/MapleFE/test/errtest/definite-assignment-3.java.result b/src/MapleFE/test/java/errtest/definite-assignment-3.java.result similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-3.java.result rename to src/MapleFE/test/java/errtest/definite-assignment-3.java.result diff --git a/src/MapleFE/test/errtest/definite-assignment-4.java b/src/MapleFE/test/java/errtest/definite-assignment-4.java similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-4.java rename to src/MapleFE/test/java/errtest/definite-assignment-4.java diff --git a/src/MapleFE/test/errtest/definite-assignment-4.java.result b/src/MapleFE/test/java/errtest/definite-assignment-4.java.result similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-4.java.result rename to src/MapleFE/test/java/errtest/definite-assignment-4.java.result diff --git a/src/MapleFE/test/errtest/definite-assignment-5.java b/src/MapleFE/test/java/errtest/definite-assignment-5.java similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-5.java rename to src/MapleFE/test/java/errtest/definite-assignment-5.java diff --git a/src/MapleFE/test/errtest/definite-assignment-5.java.result b/src/MapleFE/test/java/errtest/definite-assignment-5.java.result similarity index 96% rename from src/MapleFE/test/errtest/definite-assignment-5.java.result rename to src/MapleFE/test/java/errtest/definite-assignment-5.java.result index 46be40791da6d84464d090e3e90b9dbd84981392..122d471355a0aca9f620ab28c98ea3b1411821ac 100644 --- a/src/MapleFE/test/errtest/definite-assignment-5.java.result +++ b/src/MapleFE/test/java/errtest/definite-assignment-5.java.result @@ -9,7 +9,7 @@ class Point Constructors: Methods: func foo() throws: - var:pl + Decl: pl pl.z Assign 1 return pl.z LocalClasses: diff --git a/src/MapleFE/test/errtest/definite-assignment-6.java b/src/MapleFE/test/java/errtest/definite-assignment-6.java similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-6.java rename to src/MapleFE/test/java/errtest/definite-assignment-6.java diff --git a/src/MapleFE/test/errtest/definite-assignment-6.java.result b/src/MapleFE/test/java/errtest/definite-assignment-6.java.result similarity index 68% rename from src/MapleFE/test/errtest/definite-assignment-6.java.result rename to src/MapleFE/test/java/errtest/definite-assignment-6.java.result index dbd6f354215e3278573491d27b4da271d2dc8e5d..2e0cfcd43ee209ad7e789d05352ea6955746fc54 100644 --- a/src/MapleFE/test/errtest/definite-assignment-6.java.result +++ b/src/MapleFE/test/java/errtest/definite-assignment-6.java.result @@ -8,9 +8,14 @@ class Point Constructors: Methods: func foo() throws: - var:k + Decl: k while n LT 4 k Assign n + cond-branch cond:k GE 5 + true branch : + break: + false branch : + n Assign 6 System.out.println(k) LocalClasses: diff --git a/src/MapleFE/test/errtest/definite-assignment-7.java b/src/MapleFE/test/java/errtest/definite-assignment-7.java similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-7.java rename to src/MapleFE/test/java/errtest/definite-assignment-7.java diff --git a/src/MapleFE/test/errtest/definite-assignment-7.java.result b/src/MapleFE/test/java/errtest/definite-assignment-7.java.result similarity index 91% rename from src/MapleFE/test/errtest/definite-assignment-7.java.result rename to src/MapleFE/test/java/errtest/definite-assignment-7.java.result index c468a381e0c98c132717d67ad6926eb48685beed..7a0a5b310e26ed6cbc2981c2272d18fd696f3271 100644 --- a/src/MapleFE/test/errtest/definite-assignment-7.java.result +++ b/src/MapleFE/test/java/errtest/definite-assignment-7.java.result @@ -8,8 +8,8 @@ class Point Constructors: Methods: func foo() throws: - var:k - var:n=5 + Decl: k + Decl: n=5 cond-branch cond:n GT 2 true branch : k Assign 3 false branch : diff --git a/src/MapleFE/test/errtest/definite-assignment-8.java b/src/MapleFE/test/java/errtest/definite-assignment-8.java similarity index 100% rename from src/MapleFE/test/errtest/definite-assignment-8.java rename to src/MapleFE/test/java/errtest/definite-assignment-8.java diff --git a/src/MapleFE/test/errtest/definite-assignment-8.java.result b/src/MapleFE/test/java/errtest/definite-assignment-8.java.result similarity index 89% rename from src/MapleFE/test/errtest/definite-assignment-8.java.result rename to src/MapleFE/test/java/errtest/definite-assignment-8.java.result index 54716ecde467bb990ae0a8b0aa9483885785a734..e3f0e3c4409d900d538423bb4f9a6814e2fbc441 100644 --- a/src/MapleFE/test/errtest/definite-assignment-8.java.result +++ b/src/MapleFE/test/java/errtest/definite-assignment-8.java.result @@ -7,8 +7,8 @@ class Point Instance Initializer: Constructors: Methods: - func flow() throws: - var:k + func flow(flag) throws: + Decl: k cond-branch cond:flag true branch : k Assign 3 false branch : diff --git a/src/MapleFE/test/errtest/fun-param-1.java b/src/MapleFE/test/java/errtest/fun-param-1.java similarity index 100% rename from src/MapleFE/test/errtest/fun-param-1.java rename to src/MapleFE/test/java/errtest/fun-param-1.java diff --git a/src/MapleFE/test/errtest/fun-param-1.java.result b/src/MapleFE/test/java/errtest/fun-param-1.java.result similarity index 100% rename from src/MapleFE/test/errtest/fun-param-1.java.result rename to src/MapleFE/test/java/errtest/fun-param-1.java.result diff --git a/src/MapleFE/test/errtest/func-dup-1.java b/src/MapleFE/test/java/errtest/func-dup-1.java similarity index 100% rename from src/MapleFE/test/errtest/func-dup-1.java rename to src/MapleFE/test/java/errtest/func-dup-1.java diff --git a/src/MapleFE/test/errtest/func-dup-1.java.result b/src/MapleFE/test/java/errtest/func-dup-1.java.result similarity index 75% rename from src/MapleFE/test/errtest/func-dup-1.java.result rename to src/MapleFE/test/java/errtest/func-dup-1.java.result index 1a3b6b7077899782927dcb665f8ebf820640da8a..e15de87904fd0abaa09181c30a775871270733fd 100644 --- a/src/MapleFE/test/errtest/func-dup-1.java.result +++ b/src/MapleFE/test/java/errtest/func-dup-1.java.result @@ -7,8 +7,8 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: - func foo() throws: + func foo(c,b) throws: + func foo(i,b,c) throws: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/errtest/generate-result.sh b/src/MapleFE/test/java/errtest/generate-result.sh similarity index 92% rename from src/MapleFE/test/errtest/generate-result.sh rename to src/MapleFE/test/java/errtest/generate-result.sh index 9e1585b384c4e043acf8f6551a256a2ee41f7f20..08b3acc3b58dc1585fb15888064b004629e72007 100755 --- a/src/MapleFE/test/errtest/generate-result.sh +++ b/src/MapleFE/test/java/errtest/generate-result.sh @@ -17,5 +17,5 @@ FILES=$(pwd)/*.java for f in $FILES do echo "Generating result for $f ..." - $BUILDDIR/java/java2mpl $f > $f.result + ../../../output/java/java/java2mpl $f > $f.result done diff --git a/src/MapleFE/test/errtest/new-class-nodecl.java b/src/MapleFE/test/java/errtest/new-class-nodecl.java similarity index 100% rename from src/MapleFE/test/errtest/new-class-nodecl.java rename to src/MapleFE/test/java/errtest/new-class-nodecl.java diff --git a/src/MapleFE/test/errtest/new-class-nodecl.java.result b/src/MapleFE/test/java/errtest/new-class-nodecl.java.result similarity index 90% rename from src/MapleFE/test/errtest/new-class-nodecl.java.result rename to src/MapleFE/test/java/errtest/new-class-nodecl.java.result index dae33b0a3fddbf6052438231ef602b7c352b1e43..b54ce6497757b1fe6579b5dd024179151531b77d 100644 --- a/src/MapleFE/test/errtest/new-class-nodecl.java.result +++ b/src/MapleFE/test/java/errtest/new-class-nodecl.java.result @@ -8,7 +8,7 @@ class A Constructors: Methods: func foo() throws: - new Cyclic + new Cyclic() LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/errtest/new-param-nodecl.java b/src/MapleFE/test/java/errtest/new-param-nodecl.java similarity index 100% rename from src/MapleFE/test/errtest/new-param-nodecl.java rename to src/MapleFE/test/java/errtest/new-param-nodecl.java diff --git a/src/MapleFE/test/errtest/new-param-nodecl.java.result b/src/MapleFE/test/java/errtest/new-param-nodecl.java.result similarity index 94% rename from src/MapleFE/test/errtest/new-param-nodecl.java.result rename to src/MapleFE/test/java/errtest/new-param-nodecl.java.result index 4e496b1c10557801f985647afcedf740d99497f5..452a50a7237d54b7b5146f5c8240fa0f3850a030 100644 --- a/src/MapleFE/test/errtest/new-param-nodecl.java.result +++ b/src/MapleFE/test/java/errtest/new-param-nodecl.java.result @@ -9,7 +9,7 @@ class A Constructors: Methods: func foo() throws: - new Cyclic + new Cyclic(a,b) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/errtest/var-no-decl-1.java b/src/MapleFE/test/java/errtest/var-no-decl-1.java similarity index 100% rename from src/MapleFE/test/errtest/var-no-decl-1.java rename to src/MapleFE/test/java/errtest/var-no-decl-1.java diff --git a/src/MapleFE/test/errtest/var-no-decl-1.java.result b/src/MapleFE/test/java/errtest/var-no-decl-1.java.result similarity index 86% rename from src/MapleFE/test/errtest/var-no-decl-1.java.result rename to src/MapleFE/test/java/errtest/var-no-decl-1.java.result index 5b6375ed5b910025aa1b7ff760e9d76a3620dfb4..8e3b5b1a9dfa0a0239a58c4669943b189ff3a5fa 100644 --- a/src/MapleFE/test/errtest/var-no-decl-1.java.result +++ b/src/MapleFE/test/java/errtest/var-no-decl-1.java.result @@ -8,9 +8,10 @@ class A Constructors: Methods: func foo() throws: - var:a + Decl: a a Assign b LocalClasses: LocalInterfaces: +Identifier:a has no decl. Identifier:b has no decl. diff --git a/src/MapleFE/test/errtest/var-no-decl-2.java b/src/MapleFE/test/java/errtest/var-no-decl-2.java similarity index 100% rename from src/MapleFE/test/errtest/var-no-decl-2.java rename to src/MapleFE/test/java/errtest/var-no-decl-2.java diff --git a/src/MapleFE/test/errtest/var-no-decl-2.java.result b/src/MapleFE/test/java/errtest/var-no-decl-2.java.result similarity index 100% rename from src/MapleFE/test/errtest/var-no-decl-2.java.result rename to src/MapleFE/test/java/errtest/var-no-decl-2.java.result diff --git a/src/MapleFE/test/errtest/var-no-decl-3.java b/src/MapleFE/test/java/errtest/var-no-decl-3.java similarity index 100% rename from src/MapleFE/test/errtest/var-no-decl-3.java rename to src/MapleFE/test/java/errtest/var-no-decl-3.java diff --git a/src/MapleFE/test/errtest/var-no-decl-3.java.result b/src/MapleFE/test/java/errtest/var-no-decl-3.java.result similarity index 100% rename from src/MapleFE/test/errtest/var-no-decl-3.java.result rename to src/MapleFE/test/java/errtest/var-no-decl-3.java.result diff --git a/src/MapleFE/test/java2mpl/annotation-marker.java b/src/MapleFE/test/java/java2mpl/annotation-marker.java similarity index 100% rename from src/MapleFE/test/java2mpl/annotation-marker.java rename to src/MapleFE/test/java/java2mpl/annotation-marker.java diff --git a/src/MapleFE/test/java2mpl/annotation-marker.java.result b/src/MapleFE/test/java/java2mpl/annotation-marker.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/annotation-marker.java.result rename to src/MapleFE/test/java/java2mpl/annotation-marker.java.result diff --git a/src/MapleFE/test/java2mpl/annotation-single-elem.java b/src/MapleFE/test/java/java2mpl/annotation-single-elem.java similarity index 100% rename from src/MapleFE/test/java2mpl/annotation-single-elem.java rename to src/MapleFE/test/java/java2mpl/annotation-single-elem.java diff --git a/src/MapleFE/test/java2mpl/annotation-single-elem.java.result b/src/MapleFE/test/java/java2mpl/annotation-single-elem.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/annotation-single-elem.java.result rename to src/MapleFE/test/java/java2mpl/annotation-single-elem.java.result diff --git a/src/MapleFE/test/java2mpl/annotation-type-decl.java b/src/MapleFE/test/java/java2mpl/annotation-type-decl.java similarity index 100% rename from src/MapleFE/test/java2mpl/annotation-type-decl.java rename to src/MapleFE/test/java/java2mpl/annotation-type-decl.java diff --git a/src/MapleFE/test/java2mpl/annotation-type-decl.java.result b/src/MapleFE/test/java/java2mpl/annotation-type-decl.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/annotation-type-decl.java.result rename to src/MapleFE/test/java/java2mpl/annotation-type-decl.java.result diff --git a/src/MapleFE/test/java2mpl/annotation-useage.java b/src/MapleFE/test/java/java2mpl/annotation-useage.java similarity index 100% rename from src/MapleFE/test/java2mpl/annotation-useage.java rename to src/MapleFE/test/java/java2mpl/annotation-useage.java diff --git a/src/MapleFE/test/java2mpl/annotation-useage.java.result b/src/MapleFE/test/java/java2mpl/annotation-useage.java.result similarity index 77% rename from src/MapleFE/test/java2mpl/annotation-useage.java.result rename to src/MapleFE/test/java/java2mpl/annotation-useage.java.result index a3e660fd181da2ad43978b1fbd62843a66e4cdff..1f64c090b4f1460068047d9e116dc344b9478ca8 100644 --- a/src/MapleFE/test/java2mpl/annotation-useage.java.result +++ b/src/MapleFE/test/java/java2mpl/annotation-useage.java.result @@ -7,7 +7,7 @@ class A Instance Initializer: Constructors: Methods: - func travelThroughTime() throws: + func travelThroughTime(destination) throws: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/array-2d.java b/src/MapleFE/test/java/java2mpl/array-2d.java similarity index 100% rename from src/MapleFE/test/java2mpl/array-2d.java rename to src/MapleFE/test/java/java2mpl/array-2d.java diff --git a/src/MapleFE/test/java2mpl/array-2d.java.result b/src/MapleFE/test/java/java2mpl/array-2d.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/array-2d.java.result rename to src/MapleFE/test/java/java2mpl/array-2d.java.result diff --git a/src/MapleFE/test/java2mpl/array-access.java b/src/MapleFE/test/java/java2mpl/array-access.java similarity index 100% rename from src/MapleFE/test/java2mpl/array-access.java rename to src/MapleFE/test/java/java2mpl/array-access.java diff --git a/src/MapleFE/test/java2mpl/array-access.java.result b/src/MapleFE/test/java/java2mpl/array-access.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/array-access.java.result rename to src/MapleFE/test/java/java2mpl/array-access.java.result diff --git a/src/MapleFE/test/java2mpl/array-init.java b/src/MapleFE/test/java/java2mpl/array-init.java similarity index 100% rename from src/MapleFE/test/java2mpl/array-init.java rename to src/MapleFE/test/java/java2mpl/array-init.java diff --git a/src/MapleFE/test/java2mpl/array-init.java.result b/src/MapleFE/test/java/java2mpl/array-init.java.result similarity index 78% rename from src/MapleFE/test/java2mpl/array-init.java.result rename to src/MapleFE/test/java/java2mpl/array-init.java.result index 66f4a67617a83bcb17b0c27bcf586a77ed2f6a51..3ef43504c621e734813d2c79a03ab7040f8ac918 100644 --- a/src/MapleFE/test/java2mpl/array-init.java.result +++ b/src/MapleFE/test/java/java2mpl/array-init.java.result @@ -8,9 +8,9 @@ class A Constructors: Methods: func foo() throws: - var:ba= - var:array=[] - var:array=[] + Decl: ba= + Decl: array=[] + Decl: array=[] LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/array.java b/src/MapleFE/test/java/java2mpl/array.java similarity index 100% rename from src/MapleFE/test/java2mpl/array.java rename to src/MapleFE/test/java/java2mpl/array.java diff --git a/src/MapleFE/test/java2mpl/array.java.result b/src/MapleFE/test/java/java2mpl/array.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/array.java.result rename to src/MapleFE/test/java/java2mpl/array.java.result diff --git a/src/MapleFE/test/java2mpl/assert.java b/src/MapleFE/test/java/java2mpl/assert.java similarity index 100% rename from src/MapleFE/test/java2mpl/assert.java rename to src/MapleFE/test/java/java2mpl/assert.java diff --git a/src/MapleFE/test/java2mpl/assert.java.result b/src/MapleFE/test/java/java2mpl/assert.java.result similarity index 87% rename from src/MapleFE/test/java2mpl/assert.java.result rename to src/MapleFE/test/java/java2mpl/assert.java.result index 5e8b03621585d4050be1e101251f7004ddc0ffa9..16a94b50a7df4c987ace27e3a7c273f576c42806 100644 --- a/src/MapleFE/test/java2mpl/assert.java.result +++ b/src/MapleFE/test/java/java2mpl/assert.java.result @@ -7,7 +7,7 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: + func foo(a,b) throws: assert a LT b : LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/assignment.java b/src/MapleFE/test/java/java2mpl/assignment.java similarity index 100% rename from src/MapleFE/test/java2mpl/assignment.java rename to src/MapleFE/test/java/java2mpl/assignment.java diff --git a/src/MapleFE/test/java2mpl/assignment.java.result b/src/MapleFE/test/java/java2mpl/assignment.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/assignment.java.result rename to src/MapleFE/test/java/java2mpl/assignment.java.result diff --git a/src/MapleFE/test/java2mpl/binary-expr-1.java b/src/MapleFE/test/java/java2mpl/binary-expr-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/binary-expr-1.java rename to src/MapleFE/test/java/java2mpl/binary-expr-1.java diff --git a/src/MapleFE/test/java2mpl/binary-expr-1.java.result b/src/MapleFE/test/java/java2mpl/binary-expr-1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/binary-expr-1.java.result rename to src/MapleFE/test/java/java2mpl/binary-expr-1.java.result diff --git a/src/MapleFE/test/java2mpl/binary-expr-2.java b/src/MapleFE/test/java/java2mpl/binary-expr-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/binary-expr-2.java rename to src/MapleFE/test/java/java2mpl/binary-expr-2.java diff --git a/src/MapleFE/test/java2mpl/binary-expr-2.java.result b/src/MapleFE/test/java/java2mpl/binary-expr-2.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/binary-expr-2.java.result rename to src/MapleFE/test/java/java2mpl/binary-expr-2.java.result diff --git a/src/MapleFE/test/java2mpl/binary-expr-3.java b/src/MapleFE/test/java/java2mpl/binary-expr-3.java similarity index 100% rename from src/MapleFE/test/java2mpl/binary-expr-3.java rename to src/MapleFE/test/java/java2mpl/binary-expr-3.java diff --git a/src/MapleFE/test/java2mpl/binary-expr-3.java.result b/src/MapleFE/test/java/java2mpl/binary-expr-3.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/binary-expr-3.java.result rename to src/MapleFE/test/java/java2mpl/binary-expr-3.java.result diff --git a/src/MapleFE/test/java2mpl/binary-expr-4.java b/src/MapleFE/test/java/java2mpl/binary-expr-4.java similarity index 100% rename from src/MapleFE/test/java2mpl/binary-expr-4.java rename to src/MapleFE/test/java/java2mpl/binary-expr-4.java diff --git a/src/MapleFE/test/java2mpl/binary-expr-4.java.result b/src/MapleFE/test/java/java2mpl/binary-expr-4.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/binary-expr-4.java.result rename to src/MapleFE/test/java/java2mpl/binary-expr-4.java.result diff --git a/src/MapleFE/test/java2mpl/binary-expr.java b/src/MapleFE/test/java/java2mpl/binary-expr.java similarity index 100% rename from src/MapleFE/test/java2mpl/binary-expr.java rename to src/MapleFE/test/java/java2mpl/binary-expr.java diff --git a/src/MapleFE/test/java2mpl/binary-expr.java.result b/src/MapleFE/test/java/java2mpl/binary-expr.java.result similarity index 36% rename from src/MapleFE/test/java2mpl/binary-expr.java.result rename to src/MapleFE/test/java/java2mpl/binary-expr.java.result index cdbfae457762078f979563e89710058d7f0507e8..c17eb9683da4b55f6b21b2ae0a5abc3504be38d1 100644 --- a/src/MapleFE/test/java2mpl/binary-expr.java.result +++ b/src/MapleFE/test/java/java2mpl/binary-expr.java.result @@ -8,7 +8,7 @@ class A Constructors: Methods: func foo() throws: - var:a,b,c + Decl: a,b,c a Assign b Add c a Assign b Sub c a Assign b GT c @@ -21,3 +21,30 @@ class A LocalClasses: LocalInterfaces: +Identifier:a has no decl. +Identifier:b has no decl. +Identifier:c has no decl. +Identifier:a has no decl. +Identifier:b has no decl. +Identifier:c has no decl. +Identifier:a has no decl. +Identifier:b has no decl. +Identifier:c has no decl. +Identifier:a has no decl. +Identifier:b has no decl. +Identifier:c has no decl. +Identifier:a has no decl. +Identifier:b has no decl. +Identifier:c has no decl. +Identifier:a has no decl. +Identifier:b has no decl. +Identifier:c has no decl. +Identifier:a has no decl. +Identifier:b has no decl. +Identifier:c has no decl. +Identifier:a has no decl. +Identifier:b has no decl. +Identifier:c has no decl. +Identifier:a has no decl. +Identifier:b has no decl. +Identifier:c has no decl. diff --git a/src/MapleFE/test/java2mpl/call-1.java b/src/MapleFE/test/java/java2mpl/call-1.java similarity index 94% rename from src/MapleFE/test/java2mpl/call-1.java rename to src/MapleFE/test/java/java2mpl/call-1.java index 1dab53c13ee39d0137e35fd78c56173feec84567..749296a93c6f1640b14620019a658782effd873e 100644 --- a/src/MapleFE/test/java2mpl/call-1.java +++ b/src/MapleFE/test/java/java2mpl/call-1.java @@ -13,7 +13,8 @@ //See the Mulan PSL v2 for more details. // class A { + void println() {} void foo() { - println(c); + println(); } } diff --git a/src/MapleFE/test/java2mpl/call-1.java.result b/src/MapleFE/test/java/java2mpl/call-1.java.result similarity index 74% rename from src/MapleFE/test/java2mpl/call-1.java.result rename to src/MapleFE/test/java/java2mpl/call-1.java.result index 7b1a54d690a1ddeda74cdc8d8a7ab52f58e98271..00599325f8be757af7b1762ef0a594cd1fa25d5f 100644 --- a/src/MapleFE/test/java2mpl/call-1.java.result +++ b/src/MapleFE/test/java/java2mpl/call-1.java.result @@ -1,4 +1,4 @@ -Matched 15 tokens. +Matched 20 tokens. ============= Module =========== == Sub Tree == class A @@ -7,8 +7,9 @@ class A Instance Initializer: Constructors: Methods: + func println() throws: func foo() throws: - println(c) + println() LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/call-2.java b/src/MapleFE/test/java/java2mpl/call-2.java similarity index 91% rename from src/MapleFE/test/java2mpl/call-2.java rename to src/MapleFE/test/java/java2mpl/call-2.java index 5ad3b36674bf4ff32ace41498c6cde0aae1c7567..315d053548472c2681db79f45d32fb7321c9022b 100644 --- a/src/MapleFE/test/java2mpl/call-2.java +++ b/src/MapleFE/test/java/java2mpl/call-2.java @@ -13,7 +13,8 @@ //See the Mulan PSL v2 for more details. // class A { + void println(char c, int a) {} void foo() { - println(c, a); + println('a', 1); } } diff --git a/src/MapleFE/test/java2mpl/call-2.java.result b/src/MapleFE/test/java/java2mpl/call-2.java.result similarity index 72% rename from src/MapleFE/test/java2mpl/call-2.java.result rename to src/MapleFE/test/java/java2mpl/call-2.java.result index 0f2a36102d38c5a7bff60fce9016ee322e6cfae1..d93d7abeeac17023bbe34f76127160e2ddd62abc 100644 --- a/src/MapleFE/test/java2mpl/call-2.java.result +++ b/src/MapleFE/test/java/java2mpl/call-2.java.result @@ -1,4 +1,4 @@ -Matched 17 tokens. +Matched 28 tokens. ============= Module =========== == Sub Tree == class A @@ -7,8 +7,9 @@ class A Instance Initializer: Constructors: Methods: + func println(c,a) throws: func foo() throws: - println(c,a) + println(a,1) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/call-3.java b/src/MapleFE/test/java/java2mpl/call-3.java similarity index 87% rename from src/MapleFE/test/java2mpl/call-3.java rename to src/MapleFE/test/java/java2mpl/call-3.java index f6578decf51503b17ff5d3ed4f414a0c113aeafc..2cbca58e90b3cb31eff2b3397e5868deaf32921d 100644 --- a/src/MapleFE/test/java2mpl/call-3.java +++ b/src/MapleFE/test/java/java2mpl/call-3.java @@ -13,7 +13,8 @@ //See the Mulan PSL v2 for more details. // class A { - void foo() { - println(c+1, a); + void println(char c, int a) {} + void foo(char c, int a) { + println(c, a); } } diff --git a/src/MapleFE/test/java/java2mpl/call-3.java.result b/src/MapleFE/test/java/java2mpl/call-3.java.result new file mode 100644 index 0000000000000000000000000000000000000000..e4cd31cb30e39ada338cb254b10c91ed6d41f425 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/call-3.java.result @@ -0,0 +1,15 @@ +Matched 33 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + + Instance Initializer: + Constructors: + Methods: + func println(c,a) throws: + func foo(c,a) throws: + println(c,a) + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java2mpl/call-4.java b/src/MapleFE/test/java/java2mpl/call-4.java similarity index 100% rename from src/MapleFE/test/java2mpl/call-4.java rename to src/MapleFE/test/java/java2mpl/call-4.java diff --git a/src/MapleFE/test/java2mpl/call-4.java.result b/src/MapleFE/test/java/java2mpl/call-4.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/call-4.java.result rename to src/MapleFE/test/java/java2mpl/call-4.java.result diff --git a/src/MapleFE/test/java2mpl/call-5.java b/src/MapleFE/test/java/java2mpl/call-5.java similarity index 100% rename from src/MapleFE/test/java2mpl/call-5.java rename to src/MapleFE/test/java/java2mpl/call-5.java diff --git a/src/MapleFE/test/java2mpl/call-5.java.result b/src/MapleFE/test/java/java2mpl/call-5.java.result similarity index 78% rename from src/MapleFE/test/java2mpl/call-5.java.result rename to src/MapleFE/test/java/java2mpl/call-5.java.result index 0addd443fa0633d217a13a0b62e5ed719636db3b..6f0495e478e15adaefe9afb3c1084606b10a5401 100644 --- a/src/MapleFE/test/java2mpl/call-5.java.result +++ b/src/MapleFE/test/java/java2mpl/call-5.java.result @@ -8,7 +8,7 @@ class A Constructors: Methods: func foo() throws: - var:socket=new Socket + Decl: socket=new Socket("localhost",listeningPort) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java/java2mpl/call-6.java b/src/MapleFE/test/java/java2mpl/call-6.java new file mode 100644 index 0000000000000000000000000000000000000000..c50c134fc5ed1a9d614a85c117819ba0008a6904 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/call-6.java @@ -0,0 +1,21 @@ +// +//Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +// +//OpenArkFE is licensed under the Mulan PSL v2. +//You can use this software according to the terms and conditions of the Mulan PSL v2. +//You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +//THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +//EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +//FIT FOR A PARTICULAR PURPOSE. +//See the Mulan PSL v2 for more details. +// +class A { + int bar(char c) { return 1; } + void foo(char c) { + int i; + i = bar(c); + } +} diff --git a/src/MapleFE/test/java/java2mpl/call-6.java.result b/src/MapleFE/test/java/java2mpl/call-6.java.result new file mode 100644 index 0000000000000000000000000000000000000000..9e07cabbf805525a53b101f2ff8cc6a850e31ee0 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/call-6.java.result @@ -0,0 +1,18 @@ +Matched 33 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + + Instance Initializer: + Constructors: + Methods: + func bar(c) throws: + return 1 + func foo(c) throws: + Decl: i + i Assign bar(c) + LocalClasses: + LocalInterfaces: + +Identifier:i has no decl. diff --git a/src/MapleFE/test/java/java2mpl/call-7.java b/src/MapleFE/test/java/java2mpl/call-7.java new file mode 100644 index 0000000000000000000000000000000000000000..9d89cb0d8c5bc486c4624e11b74fec8b0e69988d --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/call-7.java @@ -0,0 +1,26 @@ +// +//Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +// +//OpenArkFE is licensed under the Mulan PSL v2. +//You can use this software according to the terms and conditions of the Mulan PSL v2. +//You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +//THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +//EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +//FIT FOR A PARTICULAR PURPOSE. +//See the Mulan PSL v2 for more details. +// +class B { + int bar(int c, int a) { + c += a; + return c; + } +} + +class A { + int foo(B b) { + return b.bar(3, 4); + } +} diff --git a/src/MapleFE/test/java/java2mpl/call-7.java.result b/src/MapleFE/test/java/java2mpl/call-7.java.result new file mode 100644 index 0000000000000000000000000000000000000000..be32c9b7d483f8f204644fcd59cfd1ee6d2ed795 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/call-7.java.result @@ -0,0 +1,28 @@ +Matched 22 tokens. +Matched 44 tokens. +============= Module =========== +== Sub Tree == +class B + Fields: + + Instance Initializer: + Constructors: + Methods: + func bar(c,a) throws: + c AddAssign a + return c + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class A + Fields: + + Instance Initializer: + Constructors: + Methods: + func foo(b) throws: + return b.bar(3,4) + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/java2mpl/call-8.java b/src/MapleFE/test/java/java2mpl/call-8.java new file mode 100644 index 0000000000000000000000000000000000000000..68ca75463eab2d199bf7282930132016ee626871 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/call-8.java @@ -0,0 +1,29 @@ +// +//Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +// +//OpenArkFE is licensed under the Mulan PSL v2. +//You can use this software according to the terms and conditions of the Mulan PSL v2. +//You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +//THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +//EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +//FIT FOR A PARTICULAR PURPOSE. +//See the Mulan PSL v2 for more details. +// +class A { + int f; + int bar(int c, A a) { return c; } + int bar(int c, int a) { return a; } + int bar(A a) { return f; } + int foo(A b) { + int i = 0; + int j = 3; + int k = 4; + i = bar(i); + j = bar(j, k); + k = bar(j, this); + return i + j + k; + } +} diff --git a/src/MapleFE/test/java/java2mpl/call-8.java.result b/src/MapleFE/test/java/java2mpl/call-8.java.result new file mode 100644 index 0000000000000000000000000000000000000000..1ac416e49e35d2d8b79698376c5eab2d3977cbff --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/call-8.java.result @@ -0,0 +1,29 @@ +Matched 101 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + f + Instance Initializer: + Constructors: + Methods: + func bar(c,a) throws: + return c + func bar(c,a) throws: + return a + func bar(a) throws: + return f + func foo(b) throws: + Decl: i=0 + Decl: j=3 + Decl: k=4 + i Assign bar(i) + j Assign bar(j,k) + k Assign bar(j,this) + return i Add j Add k + LocalClasses: + LocalInterfaces: + +Identifier:i has no decl. +Identifier:j has no decl. +Identifier:k has no decl. diff --git a/src/MapleFE/test/sharedfe/add2.java b/src/MapleFE/test/java/java2mpl/call-param.java similarity index 90% rename from src/MapleFE/test/sharedfe/add2.java rename to src/MapleFE/test/java/java2mpl/call-param.java index 7f9c2a8f24dd8d66c4358eeab2251c8de35d71d1..eaed074e0fe533038ec2d7318ff163ed64e6b9b9 100644 --- a/src/MapleFE/test/sharedfe/add2.java +++ b/src/MapleFE/test/java/java2mpl/call-param.java @@ -12,10 +12,8 @@ //FIT FOR A PARTICULAR PURPOSE. //See the Mulan PSL v2 for more details. // - -int foo(int a, int b) { - int c; - - c = a + b; - return c; +class A { + void foo(int c) { + println(c); + } } diff --git a/src/MapleFE/test/java2mpl/call-3.java.result b/src/MapleFE/test/java/java2mpl/call-param.java.result similarity index 69% rename from src/MapleFE/test/java2mpl/call-3.java.result rename to src/MapleFE/test/java/java2mpl/call-param.java.result index 75eeeb5eba57813fd5c125aeb80afebf21dd84b5..058a3d0a1a765659f3b305ea8e88a7c2b4dd60b4 100644 --- a/src/MapleFE/test/java2mpl/call-3.java.result +++ b/src/MapleFE/test/java/java2mpl/call-param.java.result @@ -1,4 +1,4 @@ -Matched 19 tokens. +Matched 17 tokens. ============= Module =========== == Sub Tree == class A @@ -7,8 +7,8 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: - println(c Add 1,a) + func foo(c) throws: + println(c) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/cast-1.java b/src/MapleFE/test/java/java2mpl/cast-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/cast-1.java rename to src/MapleFE/test/java/java2mpl/cast-1.java diff --git a/src/MapleFE/test/java2mpl/cast-1.java.result b/src/MapleFE/test/java/java2mpl/cast-1.java.result similarity index 73% rename from src/MapleFE/test/java2mpl/cast-1.java.result rename to src/MapleFE/test/java/java2mpl/cast-1.java.result index 05d4989550b031a22b210e597570d9ba52e4368d..50dd6170c0555c65ec5dcda78983a90d7cc38fb4 100644 --- a/src/MapleFE/test/java2mpl/cast-1.java.result +++ b/src/MapleFE/test/java/java2mpl/cast-1.java.result @@ -19,7 +19,7 @@ interface Colorable Fields: Methods: - func setColor() throws: + func setColor(color) throws: == Sub Tree == class ColoredPoint @@ -28,7 +28,7 @@ class ColoredPoint Instance Initializer: Constructors: Methods: - func setColor() throws: + func setColor(color) throws: this.color Assign color LocalClasses: LocalInterfaces: @@ -50,15 +50,18 @@ class Test Instance Initializer: Constructors: Methods: - func main() throws: - var:p=new Point - var:cp=new ColoredPoint - var:c + func main(args) throws: + Decl: p=new Point() + Decl: cp=new ColoredPoint() + Decl: c cp Assign (ColoredPoint)p c Assign (Colorable)p - var:l=(Long)p - var:e=new EndPoint + Decl: l=(Long)p + Decl: e=new EndPoint() c Assign (Colorable)e LocalClasses: LocalInterfaces: +Identifier:cp has no decl. +Identifier:c has no decl. +Identifier:c has no decl. diff --git a/src/MapleFE/test/java2mpl/cast-2.java b/src/MapleFE/test/java/java2mpl/cast-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/cast-2.java rename to src/MapleFE/test/java/java2mpl/cast-2.java diff --git a/src/MapleFE/test/java2mpl/cast-2.java.result b/src/MapleFE/test/java/java2mpl/cast-2.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/cast-2.java.result rename to src/MapleFE/test/java/java2mpl/cast-2.java.result diff --git a/src/MapleFE/test/java2mpl/class-anonymous.java b/src/MapleFE/test/java/java2mpl/class-anonymous.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-anonymous.java rename to src/MapleFE/test/java/java2mpl/class-anonymous.java diff --git a/src/MapleFE/test/java2mpl/class-anonymous.java.result b/src/MapleFE/test/java/java2mpl/class-anonymous.java.result similarity index 83% rename from src/MapleFE/test/java2mpl/class-anonymous.java.result rename to src/MapleFE/test/java/java2mpl/class-anonymous.java.result index 5dd2661dbf5ef9a1262d47cf44c7b3dfc8675c40..69dec033ca83d6d62bb51a8bf318dbe45cab967b 100644 --- a/src/MapleFE/test/java2mpl/class-anonymous.java.result +++ b/src/MapleFE/test/java/java2mpl/class-anonymous.java.result @@ -8,7 +8,7 @@ class ChatTest Constructors: Methods: func performDontReceiveMessageInNameState() throws: Exception - var:client2=new Thread + Decl: client2=new Thread(new ChatConnection()) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/class-constructor-explicit-2.java b/src/MapleFE/test/java/java2mpl/class-constructor-explicit-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-constructor-explicit-2.java rename to src/MapleFE/test/java/java2mpl/class-constructor-explicit-2.java diff --git a/src/MapleFE/test/java2mpl/class-constructor-explicit-2.java.result b/src/MapleFE/test/java/java2mpl/class-constructor-explicit-2.java.result similarity index 76% rename from src/MapleFE/test/java2mpl/class-constructor-explicit-2.java.result rename to src/MapleFE/test/java/java2mpl/class-constructor-explicit-2.java.result index a2e56860a7eb31a9e86e27dd1b9929d646eb9bc8..3130fa4e825f9fa930afb2a9b417b7288aa59d8d 100644 --- a/src/MapleFE/test/java2mpl/class-constructor-explicit-2.java.result +++ b/src/MapleFE/test/java/java2mpl/class-constructor-explicit-2.java.result @@ -7,7 +7,7 @@ class Point x y Instance Initializer: Constructors: - constructor Point() throws: + constructor Point(x,y) throws: this.x Assign x this.y Assign y Methods: @@ -20,8 +20,8 @@ class ColoredPoint WHITE=0 color Instance Initializer: Constructors: - constructor ColoredPoint() throws: - constructor ColoredPoint() throws: + constructor ColoredPoint(x,y) throws: + constructor ColoredPoint(x,y,color) throws: this.color Assign color Methods: LocalClasses: diff --git a/src/MapleFE/test/java2mpl/class-constructor-explicit.java b/src/MapleFE/test/java/java2mpl/class-constructor-explicit.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-constructor-explicit.java rename to src/MapleFE/test/java/java2mpl/class-constructor-explicit.java diff --git a/src/MapleFE/test/java2mpl/class-constructor-explicit.java.result b/src/MapleFE/test/java/java2mpl/class-constructor-explicit.java.result similarity index 85% rename from src/MapleFE/test/java2mpl/class-constructor-explicit.java.result rename to src/MapleFE/test/java/java2mpl/class-constructor-explicit.java.result index 5194e250689f7824d8775eef28dcc95aea99c5dd..51e2d3636b85711fb1df3909b16776e020e146f6 100644 --- a/src/MapleFE/test/java2mpl/class-constructor-explicit.java.result +++ b/src/MapleFE/test/java/java2mpl/class-constructor-explicit.java.result @@ -6,7 +6,7 @@ class Point x y Instance Initializer: Constructors: - constructor Point() throws: + constructor Point(y) throws: this.y Assign y Methods: LocalClasses: diff --git a/src/MapleFE/test/java2mpl/class-constructor.java b/src/MapleFE/test/java/java2mpl/class-constructor.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-constructor.java rename to src/MapleFE/test/java/java2mpl/class-constructor.java diff --git a/src/MapleFE/test/java2mpl/class-constructor.java.result b/src/MapleFE/test/java/java2mpl/class-constructor.java.result similarity index 86% rename from src/MapleFE/test/java2mpl/class-constructor.java.result rename to src/MapleFE/test/java/java2mpl/class-constructor.java.result index d9c4a0af8eb133761c6002521e117587358765bf..ef94fd9d7618926f879363fb37540882c065f77a 100644 --- a/src/MapleFE/test/java2mpl/class-constructor.java.result +++ b/src/MapleFE/test/java/java2mpl/class-constructor.java.result @@ -6,7 +6,7 @@ class Point x y Instance Initializer: Constructors: - constructor Point() throws: + constructor Point(x,y) throws: this.x Assign x this.y Assign y Methods: diff --git a/src/MapleFE/test/java2mpl/class-instance-initializer.java b/src/MapleFE/test/java/java2mpl/class-instance-initializer.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-instance-initializer.java rename to src/MapleFE/test/java/java2mpl/class-instance-initializer.java diff --git a/src/MapleFE/test/java2mpl/class-instance-initializer.java.result b/src/MapleFE/test/java/java2mpl/class-instance-initializer.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/class-instance-initializer.java.result rename to src/MapleFE/test/java/java2mpl/class-instance-initializer.java.result diff --git a/src/MapleFE/test/java2mpl/class-method-1.java b/src/MapleFE/test/java/java2mpl/class-method-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-method-1.java rename to src/MapleFE/test/java/java2mpl/class-method-1.java diff --git a/src/MapleFE/test/java2mpl/class-method-1.java.result b/src/MapleFE/test/java/java2mpl/class-method-1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/class-method-1.java.result rename to src/MapleFE/test/java/java2mpl/class-method-1.java.result diff --git a/src/MapleFE/test/java2mpl/class-method-2.java b/src/MapleFE/test/java/java2mpl/class-method-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-method-2.java rename to src/MapleFE/test/java/java2mpl/class-method-2.java diff --git a/src/MapleFE/test/java2mpl/class-method-2.java.result b/src/MapleFE/test/java/java2mpl/class-method-2.java.result similarity index 87% rename from src/MapleFE/test/java2mpl/class-method-2.java.result rename to src/MapleFE/test/java/java2mpl/class-method-2.java.result index 3a49cec4d0c63bb39c2f62116642b5004d2651a6..63a9e974acf1a29c1e8253c016c96d886cfef76b 100644 --- a/src/MapleFE/test/java2mpl/class-method-2.java.result +++ b/src/MapleFE/test/java/java2mpl/class-method-2.java.result @@ -7,7 +7,7 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: + func foo(c) throws: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/class-multi-decl.java b/src/MapleFE/test/java/java2mpl/class-multi-decl.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-multi-decl.java rename to src/MapleFE/test/java/java2mpl/class-multi-decl.java diff --git a/src/MapleFE/test/java2mpl/class-multi-decl.java.result b/src/MapleFE/test/java/java2mpl/class-multi-decl.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/class-multi-decl.java.result rename to src/MapleFE/test/java/java2mpl/class-multi-decl.java.result diff --git a/src/MapleFE/test/java2mpl/class-multi-line.java b/src/MapleFE/test/java/java2mpl/class-multi-line.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-multi-line.java rename to src/MapleFE/test/java/java2mpl/class-multi-line.java diff --git a/src/MapleFE/test/java2mpl/class-multi-line.java.result b/src/MapleFE/test/java/java2mpl/class-multi-line.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/class-multi-line.java.result rename to src/MapleFE/test/java/java2mpl/class-multi-line.java.result diff --git a/src/MapleFE/test/java2mpl/class-nested.java b/src/MapleFE/test/java/java2mpl/class-nested.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-nested.java rename to src/MapleFE/test/java/java2mpl/class-nested.java diff --git a/src/MapleFE/test/java2mpl/class-nested.java.result b/src/MapleFE/test/java/java2mpl/class-nested.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/class-nested.java.result rename to src/MapleFE/test/java/java2mpl/class-nested.java.result diff --git a/src/MapleFE/test/java2mpl/class-static-initializer.java b/src/MapleFE/test/java/java2mpl/class-static-initializer.java similarity index 100% rename from src/MapleFE/test/java2mpl/class-static-initializer.java rename to src/MapleFE/test/java/java2mpl/class-static-initializer.java diff --git a/src/MapleFE/test/java2mpl/class-static-initializer.java.result b/src/MapleFE/test/java/java2mpl/class-static-initializer.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/class-static-initializer.java.result rename to src/MapleFE/test/java/java2mpl/class-static-initializer.java.result diff --git a/src/MapleFE/test/java2mpl/class.java b/src/MapleFE/test/java/java2mpl/class.java similarity index 100% rename from src/MapleFE/test/java2mpl/class.java rename to src/MapleFE/test/java/java2mpl/class.java diff --git a/src/MapleFE/test/java2mpl/class.java.result b/src/MapleFE/test/java/java2mpl/class.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/class.java.result rename to src/MapleFE/test/java/java2mpl/class.java.result diff --git a/src/MapleFE/test/java2mpl/comment-1.java b/src/MapleFE/test/java/java2mpl/comment-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/comment-1.java rename to src/MapleFE/test/java/java2mpl/comment-1.java diff --git a/src/MapleFE/test/java2mpl/comment-1.java.result b/src/MapleFE/test/java/java2mpl/comment-1.java.result similarity index 85% rename from src/MapleFE/test/java2mpl/comment-1.java.result rename to src/MapleFE/test/java/java2mpl/comment-1.java.result index 80a92a3a9b58bfc354c4a28fd18fe06c52931e64..d45e86e510ff9949e14b2fcf8a7510269aad3893 100644 --- a/src/MapleFE/test/java2mpl/comment-1.java.result +++ b/src/MapleFE/test/java/java2mpl/comment-1.java.result @@ -6,7 +6,7 @@ class Point Instance Initializer: Constructors: - constructor Point() throws: + constructor Point(x) throws: this.x Assign x Methods: LocalClasses: diff --git a/src/MapleFE/test/java2mpl/comment-2.java b/src/MapleFE/test/java/java2mpl/comment-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/comment-2.java rename to src/MapleFE/test/java/java2mpl/comment-2.java diff --git a/src/MapleFE/test/java2mpl/comment-2.java.result b/src/MapleFE/test/java/java2mpl/comment-2.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/comment-2.java.result rename to src/MapleFE/test/java/java2mpl/comment-2.java.result diff --git a/src/MapleFE/test/java2mpl/comment-3.java b/src/MapleFE/test/java/java2mpl/comment-3.java similarity index 100% rename from src/MapleFE/test/java2mpl/comment-3.java rename to src/MapleFE/test/java/java2mpl/comment-3.java diff --git a/src/MapleFE/test/java2mpl/comment-3.java.result b/src/MapleFE/test/java/java2mpl/comment-3.java.result similarity index 85% rename from src/MapleFE/test/java2mpl/comment-3.java.result rename to src/MapleFE/test/java/java2mpl/comment-3.java.result index 80a92a3a9b58bfc354c4a28fd18fe06c52931e64..d45e86e510ff9949e14b2fcf8a7510269aad3893 100644 --- a/src/MapleFE/test/java2mpl/comment-3.java.result +++ b/src/MapleFE/test/java/java2mpl/comment-3.java.result @@ -6,7 +6,7 @@ class Point Instance Initializer: Constructors: - constructor Point() throws: + constructor Point(x) throws: this.x Assign x Methods: LocalClasses: diff --git a/src/MapleFE/test/java2mpl/comment-4.java b/src/MapleFE/test/java/java2mpl/comment-4.java similarity index 100% rename from src/MapleFE/test/java2mpl/comment-4.java rename to src/MapleFE/test/java/java2mpl/comment-4.java diff --git a/src/MapleFE/test/java2mpl/comment-4.java.result b/src/MapleFE/test/java/java2mpl/comment-4.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/comment-4.java.result rename to src/MapleFE/test/java/java2mpl/comment-4.java.result diff --git a/src/MapleFE/test/java2mpl/conversion-1.java b/src/MapleFE/test/java/java2mpl/conversion-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/conversion-1.java rename to src/MapleFE/test/java/java2mpl/conversion-1.java diff --git a/src/MapleFE/test/java2mpl/conversion-1.java.result b/src/MapleFE/test/java/java2mpl/conversion-1.java.result similarity index 56% rename from src/MapleFE/test/java2mpl/conversion-1.java.result rename to src/MapleFE/test/java/java2mpl/conversion-1.java.result index 823c0b8ee4045035c60b7ad6696c76c986ddf44b..486eae7281893a5a605b3b0647b4e11745186e3b 100644 --- a/src/MapleFE/test/java2mpl/conversion-1.java.result +++ b/src/MapleFE/test/java/java2mpl/conversion-1.java.result @@ -7,13 +7,16 @@ class Test Instance Initializer: Constructors: Methods: - func main() throws: - var:i=(int)12.5 + func main(args) throws: + Decl: i=(int)12.5 System.out.println("(int)12.5f==" Add i) - var:f=i + Decl: f=i System.out.print(f) - f Assign - var:d=Math.sin(f) + f Assign f Mul i + Decl: d=Math.sin(f) LocalClasses: LocalInterfaces: +Identifier:f has no decl. +Identifier:f has no decl. +Identifier:i has no decl. diff --git a/src/MapleFE/test/java2mpl/definite-assignment-1.java b/src/MapleFE/test/java/java2mpl/definite-assignment-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/definite-assignment-1.java rename to src/MapleFE/test/java/java2mpl/definite-assignment-1.java diff --git a/src/MapleFE/test/java2mpl/definite-assignment-1.java.result b/src/MapleFE/test/java/java2mpl/definite-assignment-1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/definite-assignment-1.java.result rename to src/MapleFE/test/java/java2mpl/definite-assignment-1.java.result diff --git a/src/MapleFE/test/java2mpl/definite-assignment-2.java b/src/MapleFE/test/java/java2mpl/definite-assignment-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/definite-assignment-2.java rename to src/MapleFE/test/java/java2mpl/definite-assignment-2.java diff --git a/src/MapleFE/test/java2mpl/definite-assignment-2.java.result b/src/MapleFE/test/java/java2mpl/definite-assignment-2.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/definite-assignment-2.java.result rename to src/MapleFE/test/java/java2mpl/definite-assignment-2.java.result diff --git a/src/MapleFE/test/java2mpl/definite-assignment-3.java b/src/MapleFE/test/java/java2mpl/definite-assignment-3.java similarity index 100% rename from src/MapleFE/test/java2mpl/definite-assignment-3.java rename to src/MapleFE/test/java/java2mpl/definite-assignment-3.java diff --git a/src/MapleFE/test/java2mpl/definite-assignment-3.java.result b/src/MapleFE/test/java/java2mpl/definite-assignment-3.java.result similarity index 82% rename from src/MapleFE/test/java2mpl/definite-assignment-3.java.result rename to src/MapleFE/test/java/java2mpl/definite-assignment-3.java.result index 5066bbbf6afc1dbe2efdd8ab7cc4436d3066f89f..dacb216c91756bdea4d421503c0e1427f6afa5b1 100644 --- a/src/MapleFE/test/java2mpl/definite-assignment-3.java.result +++ b/src/MapleFE/test/java/java2mpl/definite-assignment-3.java.result @@ -8,11 +8,10 @@ class A Constructors: Methods: func foo() throws: - var:k + Decl: k cond-branch cond:v GT 0 Land (k Assign System.in.read()) GE 0 true branch : - System.out.println(k) - false branch : + System.out.println(k) false branch : LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/definite-assignment-4.java b/src/MapleFE/test/java/java2mpl/definite-assignment-4.java similarity index 100% rename from src/MapleFE/test/java2mpl/definite-assignment-4.java rename to src/MapleFE/test/java/java2mpl/definite-assignment-4.java diff --git a/src/MapleFE/test/java2mpl/definite-assignment-4.java.result b/src/MapleFE/test/java/java2mpl/definite-assignment-4.java.result similarity index 68% rename from src/MapleFE/test/java2mpl/definite-assignment-4.java.result rename to src/MapleFE/test/java/java2mpl/definite-assignment-4.java.result index c9954779e6e336d17c4ab81a1fb7c283799057ac..b682ea540fff5f1c93296b7d850111b67ac0d24b 100644 --- a/src/MapleFE/test/java2mpl/definite-assignment-4.java.result +++ b/src/MapleFE/test/java/java2mpl/definite-assignment-4.java.result @@ -8,9 +8,14 @@ class A Constructors: Methods: func foo() throws: - var:k + Decl: k while true k Assign n + cond-branch cond:k GE 5 + true branch : + break: + false branch : + n Assign 6 System.out.println(k) LocalClasses: diff --git a/src/MapleFE/test/java2mpl/definite-assignment-5.java b/src/MapleFE/test/java/java2mpl/definite-assignment-5.java similarity index 100% rename from src/MapleFE/test/java2mpl/definite-assignment-5.java rename to src/MapleFE/test/java/java2mpl/definite-assignment-5.java diff --git a/src/MapleFE/test/java2mpl/definite-assignment-5.java.result b/src/MapleFE/test/java/java2mpl/definite-assignment-5.java.result similarity index 76% rename from src/MapleFE/test/java2mpl/definite-assignment-5.java.result rename to src/MapleFE/test/java/java2mpl/definite-assignment-5.java.result index 96fed950ed666c113bb89f747d84303bcbb74ed3..ab7ce94f419de62fe6d9ec11a847a51f6b682b47 100644 --- a/src/MapleFE/test/java2mpl/definite-assignment-5.java.result +++ b/src/MapleFE/test/java/java2mpl/definite-assignment-5.java.result @@ -7,14 +7,12 @@ class A Instance Initializer: Constructors: Methods: - func flow() throws: - var:k + func flow(flag) throws: + Decl: k cond-branch cond:flag true branch : - k Assign 3 - false branch : + k Assign 3 false branch : k Assign 4 - System.out.println(k) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/definite-assignment-6.java b/src/MapleFE/test/java/java2mpl/definite-assignment-6.java similarity index 100% rename from src/MapleFE/test/java2mpl/definite-assignment-6.java rename to src/MapleFE/test/java/java2mpl/definite-assignment-6.java diff --git a/src/MapleFE/test/java2mpl/definite-assignment-6.java.result b/src/MapleFE/test/java/java2mpl/definite-assignment-6.java.result similarity index 82% rename from src/MapleFE/test/java2mpl/definite-assignment-6.java.result rename to src/MapleFE/test/java/java2mpl/definite-assignment-6.java.result index de1ccd48d8c787674c090b95980c5f7666f87e5d..72b74d484c82e50dc48ef519aa881d158f966453 100644 --- a/src/MapleFE/test/java2mpl/definite-assignment-6.java.result +++ b/src/MapleFE/test/java/java2mpl/definite-assignment-6.java.result @@ -8,11 +8,10 @@ class A Constructors: Methods: func foo() throws: - var:k + Decl: k cond-branch cond:v GT 0 Land (k Assign System.read()) GE 0 true branch : - System.println(k) - false branch : + System.println(k) false branch : LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/doloop-1.java b/src/MapleFE/test/java/java2mpl/doloop-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/doloop-1.java rename to src/MapleFE/test/java/java2mpl/doloop-1.java diff --git a/src/MapleFE/test/java2mpl/doloop-1.java.result b/src/MapleFE/test/java/java2mpl/doloop-1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/doloop-1.java.result rename to src/MapleFE/test/java/java2mpl/doloop-1.java.result diff --git a/src/MapleFE/test/java2mpl/enum-1.java b/src/MapleFE/test/java/java2mpl/enum-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/enum-1.java rename to src/MapleFE/test/java/java2mpl/enum-1.java diff --git a/src/MapleFE/test/java2mpl/enum-1.java.result b/src/MapleFE/test/java/java2mpl/enum-1.java.result similarity index 90% rename from src/MapleFE/test/java2mpl/enum-1.java.result rename to src/MapleFE/test/java/java2mpl/enum-1.java.result index 29ffe431e3bbc4d109adc343ba3963faf902856b..b1a3b3dc78218547f76750421ccb9dcc06c2931f 100644 --- a/src/MapleFE/test/java2mpl/enum-1.java.result +++ b/src/MapleFE/test/java/java2mpl/enum-1.java.result @@ -6,7 +6,7 @@ class[JavaEnum] Coin value Instance Initializer: Constructors: - constructor Coin() throws: + constructor Coin(value) throws: this.value Assign value Methods: func value() throws: diff --git a/src/MapleFE/test/java2mpl/escape-seq.java b/src/MapleFE/test/java/java2mpl/escape-seq.java similarity index 100% rename from src/MapleFE/test/java2mpl/escape-seq.java rename to src/MapleFE/test/java/java2mpl/escape-seq.java diff --git a/src/MapleFE/test/java2mpl/escape-seq.java.result b/src/MapleFE/test/java/java2mpl/escape-seq.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/escape-seq.java.result rename to src/MapleFE/test/java/java2mpl/escape-seq.java.result diff --git a/src/MapleFE/test/java/java2mpl/field-1.java b/src/MapleFE/test/java/java2mpl/field-1.java new file mode 100644 index 0000000000000000000000000000000000000000..7fa020083f8c42382ac826d28be6e066aa8d148e --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/field-1.java @@ -0,0 +1,5 @@ +class A { + public byte B() { + c = e.getValue().getBytes().length; + } +} diff --git a/src/MapleFE/test/java/java2mpl/field-1.java.result b/src/MapleFE/test/java/java2mpl/field-1.java.result new file mode 100644 index 0000000000000000000000000000000000000000..08b23df1739f889476828801001425adb619fe28 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/field-1.java.result @@ -0,0 +1,15 @@ +Matched 25 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + + Instance Initializer: + Constructors: + Methods: + func B() throws: + c Assign e.getValue().getBytes().length + LocalClasses: + LocalInterfaces: + +Identifier:c has no decl. diff --git a/src/MapleFE/test/java/java2mpl/field-2.java b/src/MapleFE/test/java/java2mpl/field-2.java new file mode 100644 index 0000000000000000000000000000000000000000..4f53850954577c44e65e20053be6878b3ff37f1d --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/field-2.java @@ -0,0 +1,5 @@ +public class Sizing { + void main() { + println(parseInstance(bitSet).toPrintable()); + } +} diff --git a/src/MapleFE/test/java/java2mpl/field-2.java.result b/src/MapleFE/test/java/java2mpl/field-2.java.result new file mode 100644 index 0000000000000000000000000000000000000000..ad276786674345046f80573238f0691223717a24 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/field-2.java.result @@ -0,0 +1,14 @@ +Matched 23 tokens. +============= Module =========== +== Sub Tree == +class Sizing + Fields: + + Instance Initializer: + Constructors: + Methods: + func main() throws: + println(parseInstance(bitSet).toPrintable()) + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/java2mpl/field-3.java b/src/MapleFE/test/java/java2mpl/field-3.java new file mode 100644 index 0000000000000000000000000000000000000000..3a2f73bd35a290803827ebbf1da9db79af766f1d --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/field-3.java @@ -0,0 +1,25 @@ +// +//Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +// +//OpenArkFE is licensed under the Mulan PSL v2. +//You can use this software according to the terms and conditions of the Mulan PSL v2. +//You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +//THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +//EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +//FIT FOR A PARTICULAR PURPOSE. +//See the Mulan PSL v2 for more details. +// + +class B { + Point p; +} + +class Point { + int x; + void foo(B q) { + q.p.x = 1; + } +} diff --git a/src/MapleFE/test/java/java2mpl/field-3.java.result b/src/MapleFE/test/java/java2mpl/field-3.java.result new file mode 100644 index 0000000000000000000000000000000000000000..5511e77c8fa6d76ebeefd6e1a0e499654fa25606 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/field-3.java.result @@ -0,0 +1,25 @@ +Matched 7 tokens. +Matched 30 tokens. +============= Module =========== +== Sub Tree == +class B + Fields: + p + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Point + Fields: + x + Instance Initializer: + Constructors: + Methods: + func foo(q) throws: + q.p.x Assign 1 + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java2mpl/forloop-1.java b/src/MapleFE/test/java/java2mpl/forloop-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/forloop-1.java rename to src/MapleFE/test/java/java2mpl/forloop-1.java diff --git a/src/MapleFE/test/java2mpl/forloop-1.java.result b/src/MapleFE/test/java/java2mpl/forloop-1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/forloop-1.java.result rename to src/MapleFE/test/java/java2mpl/forloop-1.java.result diff --git a/src/MapleFE/test/java2mpl/forloop-2.java b/src/MapleFE/test/java/java2mpl/forloop-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/forloop-2.java rename to src/MapleFE/test/java/java2mpl/forloop-2.java diff --git a/src/MapleFE/test/java2mpl/forloop-2.java.result b/src/MapleFE/test/java/java2mpl/forloop-2.java.result similarity index 82% rename from src/MapleFE/test/java2mpl/forloop-2.java.result rename to src/MapleFE/test/java/java2mpl/forloop-2.java.result index 3fd419113288e4558e75e69c0adbe2bc6ae823c6..de6b83a2cec7e703feccd478dde30b3a0949d36d 100644 --- a/src/MapleFE/test/java2mpl/forloop-2.java.result +++ b/src/MapleFE/test/java/java2mpl/forloop-2.java.result @@ -9,9 +9,8 @@ class A Methods: func foo() throws: for ( ) - Inc - k - iInc + PreInc k + i Inc LocalClasses: diff --git a/src/MapleFE/test/java2mpl/forloop-3.java b/src/MapleFE/test/java/java2mpl/forloop-3.java similarity index 100% rename from src/MapleFE/test/java2mpl/forloop-3.java rename to src/MapleFE/test/java/java2mpl/forloop-3.java diff --git a/src/MapleFE/test/java2mpl/forloop-3.java.result b/src/MapleFE/test/java/java2mpl/forloop-3.java.result similarity index 82% rename from src/MapleFE/test/java2mpl/forloop-3.java.result rename to src/MapleFE/test/java/java2mpl/forloop-3.java.result index db96d08b6c2d35814e60db931f53007844d0077f..77669d056798d30837c930e2a698e66707ea8457 100644 --- a/src/MapleFE/test/java2mpl/forloop-3.java.result +++ b/src/MapleFE/test/java/java2mpl/forloop-3.java.result @@ -9,9 +9,8 @@ class A Methods: func foo() throws: for ( ) - Inc - k - iInc + PreInc k + i Inc LocalClasses: diff --git a/src/MapleFE/test/java2mpl/forloop-4.java b/src/MapleFE/test/java/java2mpl/forloop-4.java similarity index 100% rename from src/MapleFE/test/java2mpl/forloop-4.java rename to src/MapleFE/test/java/java2mpl/forloop-4.java diff --git a/src/MapleFE/test/java2mpl/forloop-4.java.result b/src/MapleFE/test/java/java2mpl/forloop-4.java.result similarity index 82% rename from src/MapleFE/test/java2mpl/forloop-4.java.result rename to src/MapleFE/test/java/java2mpl/forloop-4.java.result index 08c5830eb8cd2bec5ae7d1a3ac64acf5608c0ad1..54c4d8cd6fa532770b043c1bf7cc15562422a5b7 100644 --- a/src/MapleFE/test/java2mpl/forloop-4.java.result +++ b/src/MapleFE/test/java/java2mpl/forloop-4.java.result @@ -9,9 +9,9 @@ class A Methods: func foo() throws: for ( ) - kInc + k Inc - iInc + i Inc LocalClasses: diff --git a/src/MapleFE/test/java2mpl/func-body-2.java b/src/MapleFE/test/java/java2mpl/func-body-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/func-body-2.java rename to src/MapleFE/test/java/java2mpl/func-body-2.java diff --git a/src/MapleFE/test/java2mpl/func-body-2.java.result b/src/MapleFE/test/java/java2mpl/func-body-2.java.result similarity index 88% rename from src/MapleFE/test/java2mpl/func-body-2.java.result rename to src/MapleFE/test/java/java2mpl/func-body-2.java.result index b95b2f11e5ce3466ae72bc353880bfe79a634f18..95a46f76b202a9ae5af20a270bedc5bfff676897 100644 --- a/src/MapleFE/test/java2mpl/func-body-2.java.result +++ b/src/MapleFE/test/java/java2mpl/func-body-2.java.result @@ -7,7 +7,7 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: + func foo(c,b) throws: c Assign 3 return c Add b LocalClasses: diff --git a/src/MapleFE/test/java2mpl/func-body-3.java b/src/MapleFE/test/java/java2mpl/func-body-3.java similarity index 100% rename from src/MapleFE/test/java2mpl/func-body-3.java rename to src/MapleFE/test/java/java2mpl/func-body-3.java diff --git a/src/MapleFE/test/java2mpl/func-body-3.java.result b/src/MapleFE/test/java/java2mpl/func-body-3.java.result similarity index 77% rename from src/MapleFE/test/java2mpl/func-body-3.java.result rename to src/MapleFE/test/java/java2mpl/func-body-3.java.result index 71fa07343af0c07a17c35b1fb4250d979231c687..27174deac163f7d3d7c34f0df14ac9959b7847cd 100644 --- a/src/MapleFE/test/java2mpl/func-body-3.java.result +++ b/src/MapleFE/test/java/java2mpl/func-body-3.java.result @@ -8,9 +8,11 @@ class Test Constructors: Methods: func main() throws: - var:i=12 + Decl: i=12 i Assign 2 i Assign 1 LocalClasses: LocalInterfaces: +Identifier:i has no decl. +Identifier:i has no decl. diff --git a/src/MapleFE/test/java2mpl/func-body-4.java b/src/MapleFE/test/java/java2mpl/func-body-4.java similarity index 100% rename from src/MapleFE/test/java2mpl/func-body-4.java rename to src/MapleFE/test/java/java2mpl/func-body-4.java diff --git a/src/MapleFE/test/java2mpl/func-body-4.java.result b/src/MapleFE/test/java/java2mpl/func-body-4.java.result similarity index 79% rename from src/MapleFE/test/java2mpl/func-body-4.java.result rename to src/MapleFE/test/java/java2mpl/func-body-4.java.result index dbb7ad1bb598bc5fe78c85446c5d7691f7a1ce07..def16e9fea4c4af5b4a744e39884f78b2589f3ce 100644 --- a/src/MapleFE/test/java2mpl/func-body-4.java.result +++ b/src/MapleFE/test/java/java2mpl/func-body-4.java.result @@ -7,9 +7,9 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: + func foo(c,b) throws: return c Add b - func foo() throws: + func foo(c) throws: return c LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/func-body.java b/src/MapleFE/test/java/java2mpl/func-body.java similarity index 100% rename from src/MapleFE/test/java2mpl/func-body.java rename to src/MapleFE/test/java/java2mpl/func-body.java diff --git a/src/MapleFE/test/java2mpl/func-body.java.result b/src/MapleFE/test/java/java2mpl/func-body.java.result similarity index 87% rename from src/MapleFE/test/java2mpl/func-body.java.result rename to src/MapleFE/test/java/java2mpl/func-body.java.result index 94281ad6d4d6e363fef569580b34a2c2308e1709..8b87a7f5f3bf5d6f487c466bbf24c40afbe73e6a 100644 --- a/src/MapleFE/test/java2mpl/func-body.java.result +++ b/src/MapleFE/test/java/java2mpl/func-body.java.result @@ -7,7 +7,7 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: + func foo(c,b) throws: return c Add b LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/func-multi-param.java b/src/MapleFE/test/java/java2mpl/func-multi-param.java similarity index 100% rename from src/MapleFE/test/java2mpl/func-multi-param.java rename to src/MapleFE/test/java/java2mpl/func-multi-param.java diff --git a/src/MapleFE/test/java2mpl/func-multi-param.java.result b/src/MapleFE/test/java/java2mpl/func-multi-param.java.result similarity index 75% rename from src/MapleFE/test/java2mpl/func-multi-param.java.result rename to src/MapleFE/test/java/java2mpl/func-multi-param.java.result index 9a8abb5a85217e09b46adae374347eb7faf7a5e5..adc37165f90e4d0093bae09fcd9350c922e5c3bb 100644 --- a/src/MapleFE/test/java2mpl/func-multi-param.java.result +++ b/src/MapleFE/test/java/java2mpl/func-multi-param.java.result @@ -7,8 +7,8 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: - func bar() throws: + func foo(c,b) throws: + func bar(i,b,c) throws: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/others/generate-result.sh b/src/MapleFE/test/java/java2mpl/generate-result.sh similarity index 92% rename from src/MapleFE/test/others/generate-result.sh rename to src/MapleFE/test/java/java2mpl/generate-result.sh index bbd3acd878bd41d3a31fc5123972e954d7c9fcd0..08b3acc3b58dc1585fb15888064b004629e72007 100755 --- a/src/MapleFE/test/others/generate-result.sh +++ b/src/MapleFE/test/java/java2mpl/generate-result.sh @@ -17,5 +17,5 @@ FILES=$(pwd)/*.java for f in $FILES do echo "Generating result for $f ..." - ../../output/java/java2mpl $f > $f.result + ../../../output/java/java/java2mpl $f > $f.result done diff --git a/src/MapleFE/test/java2mpl/if-else-1.java b/src/MapleFE/test/java/java2mpl/if-else-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/if-else-1.java rename to src/MapleFE/test/java/java2mpl/if-else-1.java diff --git a/src/MapleFE/test/java2mpl/if-else-1.java.result b/src/MapleFE/test/java/java2mpl/if-else-1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/if-else-1.java.result rename to src/MapleFE/test/java/java2mpl/if-else-1.java.result diff --git a/src/MapleFE/test/java2mpl/if-else-2.java b/src/MapleFE/test/java/java2mpl/if-else-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/if-else-2.java rename to src/MapleFE/test/java/java2mpl/if-else-2.java diff --git a/src/MapleFE/test/java2mpl/if-else-2.java.result b/src/MapleFE/test/java/java2mpl/if-else-2.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/if-else-2.java.result rename to src/MapleFE/test/java/java2mpl/if-else-2.java.result diff --git a/src/MapleFE/test/java2mpl/if-else-3.java b/src/MapleFE/test/java/java2mpl/if-else-3.java similarity index 100% rename from src/MapleFE/test/java2mpl/if-else-3.java rename to src/MapleFE/test/java/java2mpl/if-else-3.java diff --git a/src/MapleFE/test/java2mpl/if-else-3.java.result b/src/MapleFE/test/java/java2mpl/if-else-3.java.result similarity index 89% rename from src/MapleFE/test/java2mpl/if-else-3.java.result rename to src/MapleFE/test/java/java2mpl/if-else-3.java.result index e8f85eb80675776505131652c9c166686f5a35c7..02239e703020f1071f35269c604fef2fe80369af 100644 --- a/src/MapleFE/test/java2mpl/if-else-3.java.result +++ b/src/MapleFE/test/java/java2mpl/if-else-3.java.result @@ -14,8 +14,7 @@ class A false branch : cond-branch cond:b true branch : - return 3 - false branch : + return 3 false branch : return 1 diff --git a/src/MapleFE/test/java2mpl/illegal-expr-2.java b/src/MapleFE/test/java/java2mpl/illegal-expr-2.java.disabled similarity index 100% rename from src/MapleFE/test/java2mpl/illegal-expr-2.java rename to src/MapleFE/test/java/java2mpl/illegal-expr-2.java.disabled diff --git a/src/MapleFE/test/java2mpl/illegal-expr-2.java.result b/src/MapleFE/test/java/java2mpl/illegal-expr-2.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/illegal-expr-2.java.result rename to src/MapleFE/test/java/java2mpl/illegal-expr-2.java.result diff --git a/src/MapleFE/test/java2mpl/illegal-expr.java b/src/MapleFE/test/java/java2mpl/illegal-expr.java.disabled similarity index 100% rename from src/MapleFE/test/java2mpl/illegal-expr.java rename to src/MapleFE/test/java/java2mpl/illegal-expr.java.disabled diff --git a/src/MapleFE/test/java2mpl/illegal-expr.java.result b/src/MapleFE/test/java/java2mpl/illegal-expr.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/illegal-expr.java.result rename to src/MapleFE/test/java/java2mpl/illegal-expr.java.result diff --git a/src/MapleFE/test/java2mpl/interface-field.java b/src/MapleFE/test/java/java2mpl/interface-field.java similarity index 100% rename from src/MapleFE/test/java2mpl/interface-field.java rename to src/MapleFE/test/java/java2mpl/interface-field.java diff --git a/src/MapleFE/test/java2mpl/interface-field.java.result b/src/MapleFE/test/java/java2mpl/interface-field.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/interface-field.java.result rename to src/MapleFE/test/java/java2mpl/interface-field.java.result diff --git a/src/MapleFE/test/java2mpl/label-stmt.java b/src/MapleFE/test/java/java2mpl/label-stmt.java similarity index 100% rename from src/MapleFE/test/java2mpl/label-stmt.java rename to src/MapleFE/test/java/java2mpl/label-stmt.java diff --git a/src/MapleFE/test/java2mpl/label-stmt.java.result b/src/MapleFE/test/java/java2mpl/label-stmt.java.result similarity index 92% rename from src/MapleFE/test/java2mpl/label-stmt.java.result rename to src/MapleFE/test/java/java2mpl/label-stmt.java.result index bc0929a2817f385360eed4bc847c985e6f193ac3..524f7cd81391e216eaba2a56c6b8ecf37d8f14dd 100644 --- a/src/MapleFE/test/java2mpl/label-stmt.java.result +++ b/src/MapleFE/test/java/java2mpl/label-stmt.java.result @@ -9,7 +9,7 @@ class A Methods: func loseEdges() throws: search: - break search + break:search LocalClasses: diff --git a/src/MapleFE/test/java2mpl/lambda-1.java b/src/MapleFE/test/java/java2mpl/lambda-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/lambda-1.java rename to src/MapleFE/test/java/java2mpl/lambda-1.java diff --git a/src/MapleFE/test/java2mpl/lambda-1.java.result b/src/MapleFE/test/java/java2mpl/lambda-1.java.result similarity index 90% rename from src/MapleFE/test/java2mpl/lambda-1.java.result rename to src/MapleFE/test/java/java2mpl/lambda-1.java.result index 44e7e9238f5c5d76810ebb0837ccc2e4e20ac85d..16ea64b335bac5e1dfc3b385d142bd4aaf2d39ed 100644 --- a/src/MapleFE/test/java2mpl/lambda-1.java.result +++ b/src/MapleFE/test/java/java2mpl/lambda-1.java.result @@ -8,8 +8,7 @@ class Point Constructors: Methods: func foo() throws: - bar(() -> 42 -) + bar(() -> 42) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/lambda-2.java b/src/MapleFE/test/java/java2mpl/lambda-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/lambda-2.java rename to src/MapleFE/test/java/java2mpl/lambda-2.java diff --git a/src/MapleFE/test/java2mpl/lambda-2.java.result b/src/MapleFE/test/java/java2mpl/lambda-2.java.result similarity index 78% rename from src/MapleFE/test/java2mpl/lambda-2.java.result rename to src/MapleFE/test/java/java2mpl/lambda-2.java.result index b122cfbdb909338b687e18e76255885d36037005..5cc1b420788f5c4f7f1bc668b4227a7c2d25e22a 100644 --- a/src/MapleFE/test/java2mpl/lambda-2.java.result +++ b/src/MapleFE/test/java/java2mpl/lambda-2.java.result @@ -13,13 +13,13 @@ class Main Instance Initializer: Constructors: Methods: - func main() throws: - var:numbers=new ArrayList + func main(args) throws: + Decl: numbers=new ArrayList() numbers.add(5) numbers.add(9) numbers.add(8) numbers.add(1) - var:method=(n) -> System.out.println(n) + Decl: method=(n) -> System.out.println(n) numbers.forEach(method) LocalClasses: diff --git a/src/MapleFE/test/java2mpl/lambda-3.java b/src/MapleFE/test/java/java2mpl/lambda-3.java similarity index 100% rename from src/MapleFE/test/java2mpl/lambda-3.java rename to src/MapleFE/test/java/java2mpl/lambda-3.java diff --git a/src/MapleFE/test/java2mpl/lambda-3.java.result b/src/MapleFE/test/java/java2mpl/lambda-3.java.result similarity index 84% rename from src/MapleFE/test/java2mpl/lambda-3.java.result rename to src/MapleFE/test/java/java2mpl/lambda-3.java.result index 114e7b196e8479492d4f475732ea58d7509a1d47..0fb5dcb10fd362b5e55248f08f92321b6c1913f7 100644 --- a/src/MapleFE/test/java2mpl/lambda-3.java.result +++ b/src/MapleFE/test/java/java2mpl/lambda-3.java.result @@ -10,8 +10,8 @@ class Main Instance Initializer: Constructors: Methods: - func main() throws: - var:numbers=new ArrayList + func main(args) throws: + Decl: numbers=new ArrayList() numbers.add(5) numbers.add(9) numbers.add(8) diff --git a/src/MapleFE/test/java2mpl/lambda-4.java b/src/MapleFE/test/java/java2mpl/lambda-4.java similarity index 100% rename from src/MapleFE/test/java2mpl/lambda-4.java rename to src/MapleFE/test/java/java2mpl/lambda-4.java diff --git a/src/MapleFE/test/java2mpl/lambda-4.java.result b/src/MapleFE/test/java/java2mpl/lambda-4.java.result similarity index 63% rename from src/MapleFE/test/java2mpl/lambda-4.java.result rename to src/MapleFE/test/java/java2mpl/lambda-4.java.result index 7adeb147e3af0a80394bba7f21c7f4d21c7ad890..f7c61355a8bd9c2ce61c1027292f29e0a192e3b5 100644 --- a/src/MapleFE/test/java2mpl/lambda-4.java.result +++ b/src/MapleFE/test/java/java2mpl/lambda-4.java.result @@ -6,7 +6,7 @@ interface StringFunction Fields: Methods: - func run() throws: + func run(str) throws: == Sub Tree == class Main @@ -15,15 +15,13 @@ class Main Instance Initializer: Constructors: Methods: - func main() throws: - var:exclaim=(s) -> s Add "!" - - var:ask=(s) -> s Add "?" - + func main(args) throws: + Decl: exclaim=(s) -> s Add "!" + Decl: ask=(s) -> s Add "?" printFormatted("Hello",exclaim) printFormatted("Hello",ask) - func printFormatted() throws: - var:result=format.run(str) + func printFormatted(str,format) throws: + Decl: result=format.run(str) System.out.println(result) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/lambda-5.java b/src/MapleFE/test/java/java2mpl/lambda-5.java similarity index 100% rename from src/MapleFE/test/java2mpl/lambda-5.java rename to src/MapleFE/test/java/java2mpl/lambda-5.java diff --git a/src/MapleFE/test/java2mpl/lambda-5.java.result b/src/MapleFE/test/java/java2mpl/lambda-5.java.result similarity index 70% rename from src/MapleFE/test/java2mpl/lambda-5.java.result rename to src/MapleFE/test/java/java2mpl/lambda-5.java.result index 9f9aa2932015d632f7a10b0c950fe64332b145c6..e4892c90d2521fa5e2b1cc01315ec9fa5003e030 100644 --- a/src/MapleFE/test/java2mpl/lambda-5.java.result +++ b/src/MapleFE/test/java/java2mpl/lambda-5.java.result @@ -14,18 +14,21 @@ class Point Constructors: Methods: func foo() throws: - var:m + Decl: m m Assign () -> m Assign () -> 42 - m Assign () -> null - m Assign () -> return 42 m Assign () -> system.gc() - m Assign () -> x Add y - + m Assign (x,y) -> x Add y LocalClasses: LocalInterfaces: +Identifier:m has no decl. +Identifier:m has no decl. +Identifier:m has no decl. +Identifier:m has no decl. +Identifier:m has no decl. +Identifier:m has no decl. diff --git a/src/MapleFE/test/java2mpl/left-rec-1.java b/src/MapleFE/test/java/java2mpl/left-rec-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/left-rec-1.java rename to src/MapleFE/test/java/java2mpl/left-rec-1.java diff --git a/src/MapleFE/test/java2mpl/left-rec-1.java.result b/src/MapleFE/test/java/java2mpl/left-rec-1.java.result similarity index 84% rename from src/MapleFE/test/java2mpl/left-rec-1.java.result rename to src/MapleFE/test/java/java2mpl/left-rec-1.java.result index 6e55437b2ccd830ae8aba352a63855e46b0b326a..bc2788a9097ee1e454a8b4b79a6d1cbad7d168da 100644 --- a/src/MapleFE/test/java2mpl/left-rec-1.java.result +++ b/src/MapleFE/test/java/java2mpl/left-rec-1.java.result @@ -10,8 +10,7 @@ class A func foo() throws: cond-branch cond:(k Assign System.in.read()) GE 0 true branch : - System.out.println(k) - false branch : + System.out.println(k) false branch : LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/literal-float-double.java b/src/MapleFE/test/java/java2mpl/literal-float-double.java similarity index 100% rename from src/MapleFE/test/java2mpl/literal-float-double.java rename to src/MapleFE/test/java/java2mpl/literal-float-double.java diff --git a/src/MapleFE/test/java2mpl/literal-float-double.java.result b/src/MapleFE/test/java/java2mpl/literal-float-double.java.result similarity index 84% rename from src/MapleFE/test/java2mpl/literal-float-double.java.result rename to src/MapleFE/test/java/java2mpl/literal-float-double.java.result index 5f1bf6992b63ece0ed6f720b0efd526749804fe0..9d218a0b075eecae25ce5dc6e1f3d275ab359a2a 100644 --- a/src/MapleFE/test/java2mpl/literal-float-double.java.result +++ b/src/MapleFE/test/java/java2mpl/literal-float-double.java.result @@ -8,8 +8,8 @@ class A Constructors: Methods: func foo() throws: - var:a=11.1 - var:a=22.2 + Decl: a=11.1 + Decl: a=22.2 LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/literal-integer-2.java b/src/MapleFE/test/java/java2mpl/literal-integer-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/literal-integer-2.java rename to src/MapleFE/test/java/java2mpl/literal-integer-2.java diff --git a/src/MapleFE/test/java2mpl/literal-integer-2.java.result b/src/MapleFE/test/java/java2mpl/literal-integer-2.java.result similarity index 91% rename from src/MapleFE/test/java2mpl/literal-integer-2.java.result rename to src/MapleFE/test/java/java2mpl/literal-integer-2.java.result index abf79b059c13b8041cc6b659bf3ee076356d444e..1a5bb2d45d38ce0132e33e4eaabe9a42b0782b65 100644 --- a/src/MapleFE/test/java2mpl/literal-integer-2.java.result +++ b/src/MapleFE/test/java/java2mpl/literal-integer-2.java.result @@ -8,7 +8,7 @@ class A Constructors: Methods: func foo() throws: - var:a=273 + Decl: a=273 LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/literal-integer-3.java b/src/MapleFE/test/java/java2mpl/literal-integer-3.java similarity index 100% rename from src/MapleFE/test/java2mpl/literal-integer-3.java rename to src/MapleFE/test/java/java2mpl/literal-integer-3.java diff --git a/src/MapleFE/test/java2mpl/literal-integer-3.java.result b/src/MapleFE/test/java/java2mpl/literal-integer-3.java.result similarity index 78% rename from src/MapleFE/test/java2mpl/literal-integer-3.java.result rename to src/MapleFE/test/java/java2mpl/literal-integer-3.java.result index 27e73a24f4e60eceba4e6b702479ef334cc5aacc..82c1284467f4d864f07fde4f01a0cd96e4f8db3f 100644 --- a/src/MapleFE/test/java2mpl/literal-integer-3.java.result +++ b/src/MapleFE/test/java/java2mpl/literal-integer-3.java.result @@ -8,9 +8,11 @@ class A Constructors: Methods: func foo() throws: - var:a=273 + Decl: a=273 a Assign 268435455 a Assign -1 LocalClasses: LocalInterfaces: +Identifier:a has no decl. +Identifier:a has no decl. diff --git a/src/MapleFE/test/java2mpl/literal-integer.java b/src/MapleFE/test/java/java2mpl/literal-integer.java similarity index 100% rename from src/MapleFE/test/java2mpl/literal-integer.java rename to src/MapleFE/test/java/java2mpl/literal-integer.java diff --git a/src/MapleFE/test/java2mpl/literal-integer.java.result b/src/MapleFE/test/java/java2mpl/literal-integer.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/literal-integer.java.result rename to src/MapleFE/test/java/java2mpl/literal-integer.java.result diff --git a/src/MapleFE/test/java2mpl/literal-string-2.java b/src/MapleFE/test/java/java2mpl/literal-string-2.java similarity index 96% rename from src/MapleFE/test/java2mpl/literal-string-2.java rename to src/MapleFE/test/java/java2mpl/literal-string-2.java index ca47e9ef5dc4e5e1ede1446154728b82ef09bbc1..c0dfe8fc3d012602cfa29afc5d3926fa0eaa7557 100644 --- a/src/MapleFE/test/java2mpl/literal-string-2.java +++ b/src/MapleFE/test/java/java2mpl/literal-string-2.java @@ -13,6 +13,7 @@ //See the Mulan PSL v2 for more details. // class A { + void write(String s); void foo() { write("doesn't"); write("doesn\'t"); diff --git a/src/MapleFE/test/java2mpl/literal-string-2.java.result b/src/MapleFE/test/java/java2mpl/literal-string-2.java.result similarity index 74% rename from src/MapleFE/test/java2mpl/literal-string-2.java.result rename to src/MapleFE/test/java/java2mpl/literal-string-2.java.result index e08faab0e035bc782e0a08d15c571bf1ef0dc80e..7699fb51c96a52def125d2b29ff9cae1b195d9af 100644 --- a/src/MapleFE/test/java2mpl/literal-string-2.java.result +++ b/src/MapleFE/test/java/java2mpl/literal-string-2.java.result @@ -1,4 +1,4 @@ -Matched 20 tokens. +Matched 27 tokens. ============= Module =========== == Sub Tree == class A @@ -7,9 +7,10 @@ class A Instance Initializer: Constructors: Methods: + func write(s) throws: func foo() throws: write("doesn't") - write("doesn't") + write("doesn\'t") LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/literal-string.java b/src/MapleFE/test/java/java2mpl/literal-string.java similarity index 96% rename from src/MapleFE/test/java2mpl/literal-string.java rename to src/MapleFE/test/java/java2mpl/literal-string.java index c3aaa6176a67e590e888242ffc60c43d86afdebb..7eb79933aa51f92fd62c9d55f5b598b2d4fded85 100644 --- a/src/MapleFE/test/java2mpl/literal-string.java +++ b/src/MapleFE/test/java/java2mpl/literal-string.java @@ -13,6 +13,7 @@ //See the Mulan PSL v2 for more details. // class A { + void write(String s); void foo() { write("test\n"); } diff --git a/src/MapleFE/test/java2mpl/literal-string.java.result b/src/MapleFE/test/java/java2mpl/literal-string.java.result similarity index 72% rename from src/MapleFE/test/java2mpl/literal-string.java.result rename to src/MapleFE/test/java/java2mpl/literal-string.java.result index 1f3dd1f0ba06163f48ca36de7a2e7c4d0e50c67d..145e330401b210f7edb9cff65e3353f4a1d87a93 100644 --- a/src/MapleFE/test/java2mpl/literal-string.java.result +++ b/src/MapleFE/test/java/java2mpl/literal-string.java.result @@ -1,4 +1,4 @@ -Matched 15 tokens. +Matched 22 tokens. ============= Module =========== == Sub Tree == class A @@ -7,9 +7,9 @@ class A Instance Initializer: Constructors: Methods: + func write(s) throws: func foo() throws: - write("test -") + write("test\n") LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/literal-this-1.java b/src/MapleFE/test/java/java2mpl/literal-this-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/literal-this-1.java rename to src/MapleFE/test/java/java2mpl/literal-this-1.java diff --git a/src/MapleFE/test/java2mpl/literal-this-1.java.result b/src/MapleFE/test/java/java2mpl/literal-this-1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/literal-this-1.java.result rename to src/MapleFE/test/java/java2mpl/literal-this-1.java.result diff --git a/src/MapleFE/test/java2mpl/literal-this-2.java b/src/MapleFE/test/java/java2mpl/literal-this-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/literal-this-2.java rename to src/MapleFE/test/java/java2mpl/literal-this-2.java diff --git a/src/MapleFE/test/java2mpl/literal-this-2.java.result b/src/MapleFE/test/java/java2mpl/literal-this-2.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/literal-this-2.java.result rename to src/MapleFE/test/java/java2mpl/literal-this-2.java.result diff --git a/src/MapleFE/test/java2mpl/literal-unicode.java b/src/MapleFE/test/java/java2mpl/literal-unicode.java similarity index 100% rename from src/MapleFE/test/java2mpl/literal-unicode.java rename to src/MapleFE/test/java/java2mpl/literal-unicode.java diff --git a/src/MapleFE/test/java2mpl/literal-unicode.java.result b/src/MapleFE/test/java/java2mpl/literal-unicode.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/literal-unicode.java.result rename to src/MapleFE/test/java/java2mpl/literal-unicode.java.result diff --git a/src/MapleFE/test/java2mpl/local-class.java b/src/MapleFE/test/java/java2mpl/local-class.java similarity index 100% rename from src/MapleFE/test/java2mpl/local-class.java rename to src/MapleFE/test/java/java2mpl/local-class.java diff --git a/src/MapleFE/test/java2mpl/local-class.java.result b/src/MapleFE/test/java/java2mpl/local-class.java.result similarity index 98% rename from src/MapleFE/test/java2mpl/local-class.java.result rename to src/MapleFE/test/java/java2mpl/local-class.java.result index 663f73158c2a3f2a351f96443fa90985342fc1ac..94a8dc2d57c2e3f92d87ded085b7bb6f95b9374e 100644 --- a/src/MapleFE/test/java2mpl/local-class.java.result +++ b/src/MapleFE/test/java/java2mpl/local-class.java.result @@ -8,7 +8,7 @@ class Global Constructors: Methods: func foo() throws: - new Cyclic + new Cyclic() class Cyclic Fields: diff --git a/src/MapleFE/test/java2mpl/new-stmt-1.java b/src/MapleFE/test/java/java2mpl/new-stmt-1.java similarity index 97% rename from src/MapleFE/test/java2mpl/new-stmt-1.java rename to src/MapleFE/test/java/java2mpl/new-stmt-1.java index 989e7968879e403c0c1d1f52b49232d501a3f61c..bdd4b5513fcf65d3fda6a20e7f27185f551cc19e 100644 --- a/src/MapleFE/test/java2mpl/new-stmt-1.java +++ b/src/MapleFE/test/java/java2mpl/new-stmt-1.java @@ -19,4 +19,5 @@ class A { } class Cyclic { + Cyclic() {} } diff --git a/src/MapleFE/test/java2mpl/new-stmt-1.java.result b/src/MapleFE/test/java/java2mpl/new-stmt-1.java.result similarity index 82% rename from src/MapleFE/test/java2mpl/new-stmt-1.java.result rename to src/MapleFE/test/java/java2mpl/new-stmt-1.java.result index 4785ac1f04b0f462d9cb8377472b482aceb337f8..38cc51684021ccf647c1be343aeff4babdfd0138 100644 --- a/src/MapleFE/test/java2mpl/new-stmt-1.java.result +++ b/src/MapleFE/test/java/java2mpl/new-stmt-1.java.result @@ -1,5 +1,5 @@ Matched 15 tokens. -Matched 19 tokens. +Matched 24 tokens. ============= Module =========== == Sub Tree == class A @@ -9,7 +9,7 @@ class A Constructors: Methods: func foo() throws: - new Cyclic + new Cyclic() LocalClasses: LocalInterfaces: @@ -19,6 +19,7 @@ class Cyclic Instance Initializer: Constructors: + constructor Cyclic() throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/new-stmt-2.java b/src/MapleFE/test/java/java2mpl/new-stmt-2.java similarity index 97% rename from src/MapleFE/test/java2mpl/new-stmt-2.java rename to src/MapleFE/test/java/java2mpl/new-stmt-2.java index a0156ccc386cc5be0dcdbe870ff686c82f05bda3..6ee5b41ae2c94a87fb25b4fddd9f7419c12e3e3d 100644 --- a/src/MapleFE/test/java2mpl/new-stmt-2.java +++ b/src/MapleFE/test/java/java2mpl/new-stmt-2.java @@ -19,4 +19,5 @@ class A { } class Cyclic { + Cyclic(int i) {} } diff --git a/src/MapleFE/test/java2mpl/new-stmt-2.java.result b/src/MapleFE/test/java/java2mpl/new-stmt-2.java.result similarity index 75% rename from src/MapleFE/test/java2mpl/new-stmt-2.java.result rename to src/MapleFE/test/java/java2mpl/new-stmt-2.java.result index 8706e6ffe58523083074c28d47d300efdfa1890d..7a792a923733e75bd9219d1c6c587ca8a68e919b 100644 --- a/src/MapleFE/test/java2mpl/new-stmt-2.java.result +++ b/src/MapleFE/test/java/java2mpl/new-stmt-2.java.result @@ -1,5 +1,5 @@ Matched 18 tokens. -Matched 22 tokens. +Matched 29 tokens. ============= Module =========== == Sub Tree == class A @@ -8,8 +8,8 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: - new Cyclic + func foo(a) throws: + new Cyclic(a) LocalClasses: LocalInterfaces: @@ -19,6 +19,7 @@ class Cyclic Instance Initializer: Constructors: + constructor Cyclic(i) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/new-stmt-3.java b/src/MapleFE/test/java/java2mpl/new-stmt-3.java similarity index 100% rename from src/MapleFE/test/java2mpl/new-stmt-3.java rename to src/MapleFE/test/java/java2mpl/new-stmt-3.java diff --git a/src/MapleFE/test/java2mpl/new-stmt-3.java.result b/src/MapleFE/test/java/java2mpl/new-stmt-3.java.result similarity index 90% rename from src/MapleFE/test/java2mpl/new-stmt-3.java.result rename to src/MapleFE/test/java/java2mpl/new-stmt-3.java.result index 195cc40edeb05f2eb020078f2093a3811bb36fa0..54d6721ee9ed140d8614f17dfc776f155ceaabf8 100644 --- a/src/MapleFE/test/java2mpl/new-stmt-3.java.result +++ b/src/MapleFE/test/java/java2mpl/new-stmt-3.java.result @@ -7,7 +7,7 @@ class Outer Instance Initializer: Constructors: constructor Outer() throws: - var:i=new Inner + Decl: i=new Inner() Methods: LocalClasses: class Inner @@ -17,7 +17,7 @@ class Outer Constructors: Methods: func foo() throws: - var:i= + Decl: i= LocalClasses: class Inner2 Fields: @@ -32,7 +32,7 @@ class Outer Instance Initializer: Constructors: constructor Inner3() throws: - var:i2=new Inner2 + Decl: i2=new Inner2() Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/sharedfe/add3.java b/src/MapleFE/test/java/java2mpl/new-stmt-4.java similarity index 88% rename from src/MapleFE/test/sharedfe/add3.java rename to src/MapleFE/test/java/java2mpl/new-stmt-4.java index 1c9b539ae78938be3d630dcaeb6e3924679dbac0..fe5428e599323109a4837de0c21d38894030b52c 100644 --- a/src/MapleFE/test/sharedfe/add3.java +++ b/src/MapleFE/test/java/java2mpl/new-stmt-4.java @@ -13,9 +13,10 @@ //See the Mulan PSL v2 for more details. // -int foo(int a, int b) { - int c; - - c = a + b + c; - return c; +class A { + A(int x) {} + void foo() { + A a; + a = new A(3); + } } diff --git a/src/MapleFE/test/java/java2mpl/new-stmt-4.java.result b/src/MapleFE/test/java/java2mpl/new-stmt-4.java.result new file mode 100644 index 0000000000000000000000000000000000000000..4836a70c96e244e9044b81e52801a680f1b3c411 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/new-stmt-4.java.result @@ -0,0 +1,17 @@ +Matched 28 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + + Instance Initializer: + Constructors: + constructor A(x) throws: + Methods: + func foo() throws: + Decl: a + a Assign new A(3) + LocalClasses: + LocalInterfaces: + +Identifier:a has no decl. diff --git a/src/MapleFE/test/java2mpl/point-list.java b/src/MapleFE/test/java/java2mpl/point-list.java similarity index 100% rename from src/MapleFE/test/java2mpl/point-list.java rename to src/MapleFE/test/java/java2mpl/point-list.java diff --git a/src/MapleFE/test/java2mpl/point-list.java.result b/src/MapleFE/test/java/java2mpl/point-list.java.result similarity index 92% rename from src/MapleFE/test/java2mpl/point-list.java.result rename to src/MapleFE/test/java/java2mpl/point-list.java.result index 7c9b31ffc1da5e7657ea28ff533f0b0dc5013672..b9155b43b38ab2271706493070e60f786a181a2d 100644 --- a/src/MapleFE/test/java2mpl/point-list.java.result +++ b/src/MapleFE/test/java/java2mpl/point-list.java.result @@ -9,7 +9,7 @@ class Point Constructors: Methods: func foo() throws: - var:pl=new PointList + Decl: pl=new PointList() pl.z Assign 1 return pl.z LocalClasses: diff --git a/src/MapleFE/test/java2mpl/switch-1.java b/src/MapleFE/test/java/java2mpl/switch-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/switch-1.java rename to src/MapleFE/test/java/java2mpl/switch-1.java diff --git a/src/MapleFE/test/java2mpl/switch-1.java.result b/src/MapleFE/test/java/java2mpl/switch-1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/switch-1.java.result rename to src/MapleFE/test/java/java2mpl/switch-1.java.result diff --git a/src/MapleFE/test/java2mpl/switch-2.java b/src/MapleFE/test/java/java2mpl/switch-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/switch-2.java rename to src/MapleFE/test/java/java2mpl/switch-2.java diff --git a/src/MapleFE/test/java2mpl/switch-2.java.result b/src/MapleFE/test/java/java2mpl/switch-2.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/switch-2.java.result rename to src/MapleFE/test/java/java2mpl/switch-2.java.result diff --git a/src/MapleFE/test/java2mpl/t.java b/src/MapleFE/test/java/java2mpl/t.java similarity index 100% rename from src/MapleFE/test/java2mpl/t.java rename to src/MapleFE/test/java/java2mpl/t.java diff --git a/src/MapleFE/test/java2mpl/t.java.result b/src/MapleFE/test/java/java2mpl/t.java.result similarity index 85% rename from src/MapleFE/test/java2mpl/t.java.result rename to src/MapleFE/test/java/java2mpl/t.java.result index 80a92a3a9b58bfc354c4a28fd18fe06c52931e64..d45e86e510ff9949e14b2fcf8a7510269aad3893 100644 --- a/src/MapleFE/test/java2mpl/t.java.result +++ b/src/MapleFE/test/java/java2mpl/t.java.result @@ -6,7 +6,7 @@ class Point Instance Initializer: Constructors: - constructor Point() throws: + constructor Point(x) throws: this.x Assign x Methods: LocalClasses: diff --git a/src/MapleFE/test/java2mpl/t1.java b/src/MapleFE/test/java/java2mpl/t1.java similarity index 100% rename from src/MapleFE/test/java2mpl/t1.java rename to src/MapleFE/test/java/java2mpl/t1.java diff --git a/src/MapleFE/test/java2mpl/t1.java.result b/src/MapleFE/test/java/java2mpl/t1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/t1.java.result rename to src/MapleFE/test/java/java2mpl/t1.java.result diff --git a/src/MapleFE/test/java2mpl/t2.java b/src/MapleFE/test/java/java2mpl/t2.java similarity index 100% rename from src/MapleFE/test/java2mpl/t2.java rename to src/MapleFE/test/java/java2mpl/t2.java diff --git a/src/MapleFE/test/java2mpl/t2.java.result b/src/MapleFE/test/java/java2mpl/t2.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/t2.java.result rename to src/MapleFE/test/java/java2mpl/t2.java.result diff --git a/src/MapleFE/test/java2mpl/t3.java b/src/MapleFE/test/java/java2mpl/t3.java similarity index 100% rename from src/MapleFE/test/java2mpl/t3.java rename to src/MapleFE/test/java/java2mpl/t3.java diff --git a/src/MapleFE/test/java2mpl/t3.java.result b/src/MapleFE/test/java/java2mpl/t3.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/t3.java.result rename to src/MapleFE/test/java/java2mpl/t3.java.result diff --git a/src/MapleFE/test/java2mpl/t4.java b/src/MapleFE/test/java/java2mpl/t4.java similarity index 100% rename from src/MapleFE/test/java2mpl/t4.java rename to src/MapleFE/test/java/java2mpl/t4.java diff --git a/src/MapleFE/test/java2mpl/t4.java.result b/src/MapleFE/test/java/java2mpl/t4.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/t4.java.result rename to src/MapleFE/test/java/java2mpl/t4.java.result diff --git a/src/MapleFE/test/java2mpl/t5.java b/src/MapleFE/test/java/java2mpl/t5.java similarity index 100% rename from src/MapleFE/test/java2mpl/t5.java rename to src/MapleFE/test/java/java2mpl/t5.java diff --git a/src/MapleFE/test/java2mpl/t5.java.result b/src/MapleFE/test/java/java2mpl/t5.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/t5.java.result rename to src/MapleFE/test/java/java2mpl/t5.java.result diff --git a/src/MapleFE/test/java/java2mpl/ternary-operator.java b/src/MapleFE/test/java/java2mpl/ternary-operator.java new file mode 100644 index 0000000000000000000000000000000000000000..0c4abf67536d37c48435740c1f62a56ccc72fa93 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/ternary-operator.java @@ -0,0 +1,23 @@ +// +//Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +// +//OpenArkFE is licensed under the Mulan PSL v2. +//You can use this software according to the terms and conditions of the Mulan PSL v2. +//You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +//THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +//EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +//FIT FOR A PARTICULAR PURPOSE. +//See the Mulan PSL v2 for more details. +// + +class A { + public static Boolean Is3(int s) { + Boolean b; + b = (s == 3 ? true : false); + return b; + } + +} diff --git a/src/MapleFE/test/java/java2mpl/ternary-operator.java.result b/src/MapleFE/test/java/java2mpl/ternary-operator.java.result new file mode 100644 index 0000000000000000000000000000000000000000..c1cc983aa0ee0b76fa407770126d462e8a11c470 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/ternary-operator.java.result @@ -0,0 +1,17 @@ +Matched 32 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + + Instance Initializer: + Constructors: + Methods: + func Is3(s) throws: + Decl: b + b Assign () + return b + LocalClasses: + LocalInterfaces: + +Identifier:b has no decl. diff --git a/src/MapleFE/test/java2mpl/throw-1.java b/src/MapleFE/test/java/java2mpl/throw-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/throw-1.java rename to src/MapleFE/test/java/java2mpl/throw-1.java diff --git a/src/MapleFE/test/java2mpl/throw-1.java.result b/src/MapleFE/test/java/java2mpl/throw-1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/throw-1.java.result rename to src/MapleFE/test/java/java2mpl/throw-1.java.result diff --git a/src/MapleFE/test/java2mpl/throw-2.java b/src/MapleFE/test/java/java2mpl/throw-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/throw-2.java rename to src/MapleFE/test/java/java2mpl/throw-2.java diff --git a/src/MapleFE/test/java2mpl/throw-2.java.result b/src/MapleFE/test/java/java2mpl/throw-2.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/throw-2.java.result rename to src/MapleFE/test/java/java2mpl/throw-2.java.result diff --git a/src/MapleFE/test/java2mpl/type-argument-1.java b/src/MapleFE/test/java/java2mpl/type-argument-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/type-argument-1.java rename to src/MapleFE/test/java/java2mpl/type-argument-1.java diff --git a/src/MapleFE/test/java2mpl/type-argument-1.java.result b/src/MapleFE/test/java/java2mpl/type-argument-1.java.result similarity index 90% rename from src/MapleFE/test/java2mpl/type-argument-1.java.result rename to src/MapleFE/test/java/java2mpl/type-argument-1.java.result index 26072486913e9134b0cf558311ceb80140ee77b6..5031a406178674ab0966917f2af76f62ca7c61ea 100644 --- a/src/MapleFE/test/java2mpl/type-argument-1.java.result +++ b/src/MapleFE/test/java/java2mpl/type-argument-1.java.result @@ -3,7 +3,7 @@ Matched 14 tokens. == Sub Tree == class A Fields: - m=new HM + m=new HM(256) Instance Initializer: Constructors: Methods: diff --git a/src/MapleFE/test/java2mpl/type-argument-2.java b/src/MapleFE/test/java/java2mpl/type-argument-2.java similarity index 100% rename from src/MapleFE/test/java2mpl/type-argument-2.java rename to src/MapleFE/test/java/java2mpl/type-argument-2.java diff --git a/src/MapleFE/test/java2mpl/type-argument-2.java.result b/src/MapleFE/test/java/java2mpl/type-argument-2.java.result similarity index 91% rename from src/MapleFE/test/java2mpl/type-argument-2.java.result rename to src/MapleFE/test/java/java2mpl/type-argument-2.java.result index bfaa72f4a5a6423dd755381ddb9ef0ec2af9866d..3898da8e42cd8039913a076660fd43c24e5df300 100644 --- a/src/MapleFE/test/java2mpl/type-argument-2.java.result +++ b/src/MapleFE/test/java/java2mpl/type-argument-2.java.result @@ -3,7 +3,7 @@ Matched 23 tokens. == Sub Tree == class A Fields: - b=new B + b=new B(8) Instance Initializer: Constructors: Methods: diff --git a/src/MapleFE/test/java2mpl/type-argument-3.java b/src/MapleFE/test/java/java2mpl/type-argument-3.java similarity index 100% rename from src/MapleFE/test/java2mpl/type-argument-3.java rename to src/MapleFE/test/java/java2mpl/type-argument-3.java diff --git a/src/MapleFE/test/java2mpl/type-argument-3.java.result b/src/MapleFE/test/java/java2mpl/type-argument-3.java.result similarity index 78% rename from src/MapleFE/test/java2mpl/type-argument-3.java.result rename to src/MapleFE/test/java/java2mpl/type-argument-3.java.result index 05fbf45200db860a4b16d79f4922a52b86cd5c81..9bf1a3effa320225106fe83923691696a2c0ea7a 100644 --- a/src/MapleFE/test/java2mpl/type-argument-3.java.result +++ b/src/MapleFE/test/java/java2mpl/type-argument-3.java.result @@ -7,8 +7,8 @@ class C Instance Initializer: Constructors: Methods: - func getClasses() throws: - var:result=a + func getClasses(a) throws: + Decl: result=a return result LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/unary-operator-1.java b/src/MapleFE/test/java/java2mpl/unary-operator-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/unary-operator-1.java rename to src/MapleFE/test/java/java2mpl/unary-operator-1.java diff --git a/src/MapleFE/test/java2mpl/unary-operator-1.java.result b/src/MapleFE/test/java/java2mpl/unary-operator-1.java.result similarity index 78% rename from src/MapleFE/test/java2mpl/unary-operator-1.java.result rename to src/MapleFE/test/java/java2mpl/unary-operator-1.java.result index e493b2c3387e31f42111d2dfc125f8918c20f80f..931f5e09901bbc714327fc6fc656b6b01a04ba88 100644 --- a/src/MapleFE/test/java2mpl/unary-operator-1.java.result +++ b/src/MapleFE/test/java/java2mpl/unary-operator-1.java.result @@ -7,11 +7,10 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: + func foo(x) throws: cond-branch cond:x NE 0 true branch : - return 0 - false branch : + return 0 false branch : LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java2mpl/unary-operator.java b/src/MapleFE/test/java/java2mpl/unary-operator.java similarity index 100% rename from src/MapleFE/test/java2mpl/unary-operator.java rename to src/MapleFE/test/java/java2mpl/unary-operator.java diff --git a/src/MapleFE/test/java2mpl/unary-operator.java.result b/src/MapleFE/test/java/java2mpl/unary-operator.java.result similarity index 66% rename from src/MapleFE/test/java2mpl/unary-operator.java.result rename to src/MapleFE/test/java/java2mpl/unary-operator.java.result index fde76bdbe978473e1d5d2a435729d9ab8a2f68ac..2ed0a9a8e3cf5158bc710cd9148bcdaac9b5877b 100644 --- a/src/MapleFE/test/java2mpl/unary-operator.java.result +++ b/src/MapleFE/test/java/java2mpl/unary-operator.java.result @@ -7,12 +7,12 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: - var:y,z - xInc + func foo(x) throws: + Decl: y,z + x Inc y AddAssign (x Add a Add b) - aDec + a Dec b BxorAssign a z MulAssign y Add 4 @@ -20,3 +20,6 @@ class A LocalClasses: LocalInterfaces: +Identifier:y has no decl. +Identifier:z has no decl. +Identifier:y has no decl. diff --git a/src/MapleFE/test/java/java2mpl/var-scope-2.java b/src/MapleFE/test/java/java2mpl/var-scope-2.java new file mode 100644 index 0000000000000000000000000000000000000000..f5391c4be2185cfb52d8e8a25311711a1e3faf36 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/var-scope-2.java @@ -0,0 +1,36 @@ +// +//Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +// +//OpenArkFE is licensed under the Mulan PSL v2. +//You can use this software according to the terms and conditions of the Mulan PSL v2. +//You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +//THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +//EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +//FIT FOR A PARTICULAR PURPOSE. +//See the Mulan PSL v2 for more details. +// + +class A { + int a; + + int foo(int x) { + a = x; // field a + if (a == 1) { + int a = 2; // local variable a + a = x + this.a; + return a + 2; + } + + int a = 4; // local variable a|1 + a = x + this.a; + { + int a = 6; // local variable a|2 + x = a; + } + a += x; // local variable a|1 + return a + 4; + } +} diff --git a/src/MapleFE/test/java/java2mpl/var-scope-2.java.result b/src/MapleFE/test/java/java2mpl/var-scope-2.java.result new file mode 100644 index 0000000000000000000000000000000000000000..5a56845acdad661704058e92c9e99b6f4cdfb461 --- /dev/null +++ b/src/MapleFE/test/java/java2mpl/var-scope-2.java.result @@ -0,0 +1,28 @@ +Matched 78 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + a + Instance Initializer: + Constructors: + Methods: + func foo(x) throws: + a Assign x + cond-branch cond:a EQ 1 + true branch : + Decl: a=2 + a Assign x Add this.a + return a Add 2 + false branch : + + Decl: a=4 + a Assign x Add this.a + Decl: a=6 + x Assign a + + a AddAssign x + return a Add 4 + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java2mpl/var-scope.java b/src/MapleFE/test/java/java2mpl/var-scope.java similarity index 100% rename from src/MapleFE/test/java2mpl/var-scope.java rename to src/MapleFE/test/java/java2mpl/var-scope.java diff --git a/src/MapleFE/test/java2mpl/var-scope.java.result b/src/MapleFE/test/java/java2mpl/var-scope.java.result similarity index 73% rename from src/MapleFE/test/java2mpl/var-scope.java.result rename to src/MapleFE/test/java/java2mpl/var-scope.java.result index 253ac3d55f5aa2c5fd80c158f084987ebcf9bdb3..85df7146e43c6edfedc9749e43b16cd3901a7e5d 100644 --- a/src/MapleFE/test/java2mpl/var-scope.java.result +++ b/src/MapleFE/test/java/java2mpl/var-scope.java.result @@ -7,15 +7,16 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: + func foo(x) throws: a Assign x cond-branch cond:a EQ 1 true branch : - var:a=2 - + Decl: a=2 + a Assign x Add this.a + return a Add 2 false branch : - var:a=4 + Decl: a=4 a Assign x Add this.a return a Add 4 LocalClasses: diff --git a/src/MapleFE/test/java2mpl/whileloop-1.java b/src/MapleFE/test/java/java2mpl/whileloop-1.java similarity index 100% rename from src/MapleFE/test/java2mpl/whileloop-1.java rename to src/MapleFE/test/java/java2mpl/whileloop-1.java diff --git a/src/MapleFE/test/java2mpl/whileloop-1.java.result b/src/MapleFE/test/java/java2mpl/whileloop-1.java.result similarity index 100% rename from src/MapleFE/test/java2mpl/whileloop-1.java.result rename to src/MapleFE/test/java/java2mpl/whileloop-1.java.result diff --git a/src/MapleFE/test/openjdk/AbstractMethodError.java b/src/MapleFE/test/java/openjdk/AbstractMethodError.java similarity index 100% rename from src/MapleFE/test/openjdk/AbstractMethodError.java rename to src/MapleFE/test/java/openjdk/AbstractMethodError.java diff --git a/src/MapleFE/test/openjdk/AbstractMethodError.java.result b/src/MapleFE/test/java/openjdk/AbstractMethodError.java.result similarity index 72% rename from src/MapleFE/test/openjdk/AbstractMethodError.java.result rename to src/MapleFE/test/java/openjdk/AbstractMethodError.java.result index b316609ddc8ca5d33c2ebbdf41903a71a4cf14ed..ba598d38f2e8ca72c4baf23edd11262f99d7e37d 100644 --- a/src/MapleFE/test/openjdk/AbstractMethodError.java.result +++ b/src/MapleFE/test/java/openjdk/AbstractMethodError.java.result @@ -1,17 +1,16 @@ Matched 5 tokens. -Matched 44 tokens. +Matched 43 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class AbstractMethodError Fields: - serialVersionUID=Sub - -1256039074 + serialVersionUID=1256039074 Instance Initializer: Constructors: constructor AbstractMethodError() throws: - constructor AbstractMethodError() throws: + constructor AbstractMethodError(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/AbstractStringBuilder-simplified.java b/src/MapleFE/test/java/openjdk/AbstractStringBuilder-simplified.java similarity index 100% rename from src/MapleFE/test/openjdk/AbstractStringBuilder-simplified.java rename to src/MapleFE/test/java/openjdk/AbstractStringBuilder-simplified.java diff --git a/src/MapleFE/test/java/openjdk/AbstractStringBuilder-simplified.java.result b/src/MapleFE/test/java/openjdk/AbstractStringBuilder-simplified.java.result new file mode 100644 index 0000000000000000000000000000000000000000..4ca8eed4878e7a6857f1d703c8b24ab46b62a1de --- /dev/null +++ b/src/MapleFE/test/java/openjdk/AbstractStringBuilder-simplified.java.result @@ -0,0 +1,43 @@ +Matched 329 tokens. +============= Module =========== +== Sub Tree == +class AbstractStringBuilder + Fields: + value count MAX_ARRAY_SIZE=Integer.MAX_VALUE Sub 8 + Instance Initializer: + Constructors: + constructor AbstractStringBuilder() throws: + constructor AbstractStringBuilder(capacity) throws: + Methods: + func length() throws: + func capacity() throws: + func ensureCapacity(minimumCapacity) throws: + func ensureCapacityInternal(minimumCapacity) throws: + func newCapacity(minCapacity) throws: + func hugeCapacity(minCapacity) throws: + func trimToSize() throws: + func setLength(newLength) throws: + func charAt(index) throws: + func codePointAt(index) throws: + func codePointBefore(index) throws: + func codePointCount(beginIndex,endIndex) throws: + func offsetByCodePoints(index,codePointOffset) throws: + func getChars(srcBegin,srcEnd,dst,dstBegin) throws: + func setCharAt(index,ch) throws: + func append(obj) throws: + func append(str) throws: + func append(sb) throws: + func append(asb) throws: + func append(s) throws: + func appendNull() throws: + func append(s,start,end) throws: + func append(str) throws: + func append(str[],offset,len) throws: + func append(b) throws: + func append(c) throws: + func append(i) throws: + func append(l) throws: + func append(f) throws: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/openjdk/AbstractStringBuilder.java b/src/MapleFE/test/java/openjdk/AbstractStringBuilder.java similarity index 100% rename from src/MapleFE/test/openjdk/AbstractStringBuilder.java rename to src/MapleFE/test/java/openjdk/AbstractStringBuilder.java diff --git a/src/MapleFE/test/openjdk/AbstractStringBuilder.java.result b/src/MapleFE/test/java/openjdk/AbstractStringBuilder.java.result similarity index 57% rename from src/MapleFE/test/openjdk/AbstractStringBuilder.java.result rename to src/MapleFE/test/java/openjdk/AbstractStringBuilder.java.result index e2266a7cc41e75fd420a0e6afe758f2f10896cd7..9e9e2349c4b2ed8229f362673a4d7110a10e62ee 100644 Binary files a/src/MapleFE/test/openjdk/AbstractStringBuilder.java.result and b/src/MapleFE/test/java/openjdk/AbstractStringBuilder.java.result differ diff --git a/src/MapleFE/test/openjdk/Appendable.java b/src/MapleFE/test/java/openjdk/Appendable.java similarity index 100% rename from src/MapleFE/test/openjdk/Appendable.java rename to src/MapleFE/test/java/openjdk/Appendable.java diff --git a/src/MapleFE/test/openjdk/Appendable.java.result b/src/MapleFE/test/java/openjdk/Appendable.java.result similarity index 60% rename from src/MapleFE/test/openjdk/Appendable.java.result rename to src/MapleFE/test/java/openjdk/Appendable.java.result index 93789bd41e00d444dbea1751d57c1b97382d94ab..ee3a5ce7fa79c2591be93f9ab36dd50cc0a4ff15 100644 --- a/src/MapleFE/test/openjdk/Appendable.java.result +++ b/src/MapleFE/test/java/openjdk/Appendable.java.result @@ -11,7 +11,7 @@ interface Appendable Fields: Methods: - func append() throws: IOException - func append() throws: IOException - func append() throws: IOException + func append(csq) throws: IOException + func append(csq,start,end) throws: IOException + func append(c) throws: IOException diff --git a/src/MapleFE/test/openjdk/ArithmeticException.java b/src/MapleFE/test/java/openjdk/ArithmeticException.java similarity index 100% rename from src/MapleFE/test/openjdk/ArithmeticException.java rename to src/MapleFE/test/java/openjdk/ArithmeticException.java diff --git a/src/MapleFE/test/openjdk/ArithmeticException.java.result b/src/MapleFE/test/java/openjdk/ArithmeticException.java.result similarity index 86% rename from src/MapleFE/test/openjdk/ArithmeticException.java.result rename to src/MapleFE/test/java/openjdk/ArithmeticException.java.result index 9b4a3b76ead1bd4ae2341109ff7b11a765835a95..b858b7e57a8387ece48fb83667e0434f45e1f613 100644 --- a/src/MapleFE/test/openjdk/ArithmeticException.java.result +++ b/src/MapleFE/test/java/openjdk/ArithmeticException.java.result @@ -10,7 +10,7 @@ class ArithmeticException Instance Initializer: Constructors: constructor ArithmeticException() throws: - constructor ArithmeticException() throws: + constructor ArithmeticException(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/ArrayIndexOutOfBoundsException.java b/src/MapleFE/test/java/openjdk/ArrayIndexOutOfBoundsException.java similarity index 100% rename from src/MapleFE/test/openjdk/ArrayIndexOutOfBoundsException.java rename to src/MapleFE/test/java/openjdk/ArrayIndexOutOfBoundsException.java diff --git a/src/MapleFE/test/openjdk/ArrayIndexOutOfBoundsException.java.result b/src/MapleFE/test/java/openjdk/ArrayIndexOutOfBoundsException.java.result similarity index 46% rename from src/MapleFE/test/openjdk/ArrayIndexOutOfBoundsException.java.result rename to src/MapleFE/test/java/openjdk/ArrayIndexOutOfBoundsException.java.result index ddd36a4f3ca1e7977ba9191c26babd5f435be1a6..b80ef27091987537772f2e3601c2f636cabf1e82 100644 --- a/src/MapleFE/test/openjdk/ArrayIndexOutOfBoundsException.java.result +++ b/src/MapleFE/test/java/openjdk/ArrayIndexOutOfBoundsException.java.result @@ -1,20 +1,19 @@ Matched 5 tokens. -Matched 110 tokens. +Matched 109 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class ArrayIndexOutOfBoundsException Fields: - serialVersionUID=Sub - -1467917380 + serialVersionUID=1467917380 Instance Initializer: Constructors: constructor ArrayIndexOutOfBoundsException() throws: - constructor ArrayIndexOutOfBoundsException() throws: - constructor ArrayIndexOutOfBoundsException() throws: - constructor ArrayIndexOutOfBoundsException() throws: - constructor ArrayIndexOutOfBoundsException() throws: + constructor ArrayIndexOutOfBoundsException(index) throws: + constructor ArrayIndexOutOfBoundsException(s) throws: + constructor ArrayIndexOutOfBoundsException(sourceLength,index) throws: + constructor ArrayIndexOutOfBoundsException(sourceLength,offset,count) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/ArrayStoreException.java b/src/MapleFE/test/java/openjdk/ArrayStoreException.java similarity index 100% rename from src/MapleFE/test/openjdk/ArrayStoreException.java rename to src/MapleFE/test/java/openjdk/ArrayStoreException.java diff --git a/src/MapleFE/test/openjdk/ArrayStoreException.java.result b/src/MapleFE/test/java/openjdk/ArrayStoreException.java.result similarity index 72% rename from src/MapleFE/test/openjdk/ArrayStoreException.java.result rename to src/MapleFE/test/java/openjdk/ArrayStoreException.java.result index eedee8eec59aa257701e5ca55d339cda3c8e64e9..9ce786e95f8d5b34264ed8cba49b6b6710357764 100644 --- a/src/MapleFE/test/openjdk/ArrayStoreException.java.result +++ b/src/MapleFE/test/java/openjdk/ArrayStoreException.java.result @@ -1,17 +1,16 @@ Matched 5 tokens. -Matched 44 tokens. +Matched 43 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class ArrayStoreException Fields: - serialVersionUID=Sub - 1085227297 + serialVersionUID=-1085227297 Instance Initializer: Constructors: constructor ArrayStoreException() throws: - constructor ArrayStoreException() throws: + constructor ArrayStoreException(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/AutoCloseable.java b/src/MapleFE/test/java/openjdk/AutoCloseable.java similarity index 100% rename from src/MapleFE/test/openjdk/AutoCloseable.java rename to src/MapleFE/test/java/openjdk/AutoCloseable.java diff --git a/src/MapleFE/test/openjdk/AutoCloseable.java.result b/src/MapleFE/test/java/openjdk/AutoCloseable.java.result similarity index 100% rename from src/MapleFE/test/openjdk/AutoCloseable.java.result rename to src/MapleFE/test/java/openjdk/AutoCloseable.java.result diff --git a/src/MapleFE/test/openjdk/Boolean.java b/src/MapleFE/test/java/openjdk/Boolean.java similarity index 100% rename from src/MapleFE/test/openjdk/Boolean.java rename to src/MapleFE/test/java/openjdk/Boolean.java diff --git a/src/MapleFE/test/openjdk/Boolean.java.result b/src/MapleFE/test/java/openjdk/Boolean.java.result similarity index 44% rename from src/MapleFE/test/openjdk/Boolean.java.result rename to src/MapleFE/test/java/openjdk/Boolean.java.result index 8e54b2f8c83bb7e8394d669aed217fc728fdaed5..a780b2d8ad1a8801a0ff204e8763f0546d141c0b 100644 --- a/src/MapleFE/test/openjdk/Boolean.java.result +++ b/src/MapleFE/test/java/openjdk/Boolean.java.result @@ -1,58 +1,64 @@ Matched 5 tokens. -Matched 442 tokens. +Matched 440 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class Boolean Fields: - TRUE=new Boolean FALSE=new Boolean TYPE=() value serialVersionUID=Sub - 711132434 + TRUE=new Boolean(true) FALSE=new Boolean(false) TYPE=()boolean.getComponentType() value serialVersionUID=-711132434 Instance Initializer: Constructors: - constructor Boolean() throws: + constructor Boolean(value) throws: this.value Assign value - constructor Boolean() throws: + constructor Boolean(s) throws: Methods: - func parseBoolean() throws: + func parseBoolean(s) throws: return ((s NE null) Land s.equalsIgnoreCase("true")) func booleanValue() throws: return value - func valueOf() throws: + func valueOf(b) throws: return () - func valueOf() throws: + func valueOf(s) throws: return - func toString() throws: + func toString(b) throws: return func toString() throws: return func hashCode() throws: return Boolean.hashCode(value) - func hashCode() throws: + func hashCode(value) throws: return - func equals() throws: - cond-branch cond: + func equals(obj) throws: + cond-branch cond:obj instanceof Boolean true branch : - return value EQ ((Boolean)obj)booleanValue + return value EQ (Boolean)obj.booleanValue() false branch : return false - func getBoolean() throws: - var:result=false + func getBoolean(name) throws: + Decl: result=false result Assign parseBoolean(System.getProperty(name)) + IllegalArgumentException + NullPointerException + e return result - func compareTo() throws: + func compareTo(b) throws: return compare(this.value,b.value) - func compare() throws: + func compare(x,y) throws: return - func logicalAnd() throws: + func logicalAnd(a,b) throws: return a Land b - func logicalOr() throws: + func logicalOr(a,b) throws: return a Lor b - func logicalXor() throws: + func logicalXor(a,b) throws: return a Bxor b LocalClasses: LocalInterfaces: +Identifier:result has no decl. +Identifier:IllegalArgumentException has no decl. +Identifier:NullPointerException has no decl. +Identifier:e has no decl. diff --git a/src/MapleFE/test/openjdk/BootstrapMethodError.java b/src/MapleFE/test/java/openjdk/BootstrapMethodError.java similarity index 100% rename from src/MapleFE/test/openjdk/BootstrapMethodError.java rename to src/MapleFE/test/java/openjdk/BootstrapMethodError.java diff --git a/src/MapleFE/test/openjdk/BootstrapMethodError.java.result b/src/MapleFE/test/java/openjdk/BootstrapMethodError.java.result similarity index 68% rename from src/MapleFE/test/openjdk/BootstrapMethodError.java.result rename to src/MapleFE/test/java/openjdk/BootstrapMethodError.java.result index 144c70e68044fbb458be1deb47ae88174ad43e1a..d4d4b74a471b0aaaae526add731d196d79c6dfcd 100644 --- a/src/MapleFE/test/openjdk/BootstrapMethodError.java.result +++ b/src/MapleFE/test/java/openjdk/BootstrapMethodError.java.result @@ -10,9 +10,9 @@ class BootstrapMethodError Instance Initializer: Constructors: constructor BootstrapMethodError() throws: - constructor BootstrapMethodError() throws: - constructor BootstrapMethodError() throws: - constructor BootstrapMethodError() throws: + constructor BootstrapMethodError(s) throws: + constructor BootstrapMethodError(s,cause) throws: + constructor BootstrapMethodError(cause) throws: initCause(cause) Methods: LocalClasses: diff --git a/src/MapleFE/test/openjdk/Byte.java b/src/MapleFE/test/java/openjdk/Byte.java similarity index 100% rename from src/MapleFE/test/openjdk/Byte.java rename to src/MapleFE/test/java/openjdk/Byte.java diff --git a/src/MapleFE/test/openjdk/Byte.java.result b/src/MapleFE/test/java/openjdk/Byte.java.result similarity index 53% rename from src/MapleFE/test/openjdk/Byte.java.result rename to src/MapleFE/test/java/openjdk/Byte.java.result index 2c9e8a581342384a63e3bd9d4b345eaea6a1c9ed..37e80e137922edf8e27e5f557862acf629f2eb8d 100644 --- a/src/MapleFE/test/openjdk/Byte.java.result +++ b/src/MapleFE/test/java/openjdk/Byte.java.result @@ -1,46 +1,42 @@ Matched 5 tokens. -Matched 909 tokens. +Matched 906 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class Byte Fields: - MIN_VALUE=Sub - 128 MAX_VALUE=127 TYPE=() value SIZE=8 BYTES= serialVersionUID=Sub - 296684260 DIGITS= UPPER_CASE_DIGITS= + MIN_VALUE=-128 MAX_VALUE=127 TYPE=()byte.getComponentType() value SIZE=8 BYTES=SIZE Div Byte.SIZE serialVersionUID=-296684260 DIGITS= UPPER_CASE_DIGITS= Instance Initializer: Constructors: - constructor Byte() throws: + constructor Byte(value) throws: this.value Assign value - constructor Byte() throws: + constructor Byte(s) throws: this.value Assign parseByte(s,10) Methods: - func toString() throws: + func toString(b) throws: return Integer.toString((int)b,10) - func valueOf() throws: - var:offset=128 + func valueOf(b) throws: + Decl: offset=128 return - func parseByte() throws: NumberFormatException - var:i=Integer.parseInt(s,radix) + func parseByte(s,radix) throws: NumberFormatException + Decl: i=Integer.parseInt(s,radix) cond-branch cond:i LT MIN_VALUE Lor i GT MAX_VALUE true branch : - new NumberFormatException - false branch : + new NumberFormatException("Value out of range. Value:\"" Add s Add "\" Radix:" Add radix) false branch : return (byte)i - func parseByte() throws: NumberFormatException + func parseByte(s) throws: NumberFormatException return parseByte(s,10) - func valueOf() throws: NumberFormatException + func valueOf(s,radix) throws: NumberFormatException return valueOf(parseByte(s,radix)) - func valueOf() throws: NumberFormatException + func valueOf(s) throws: NumberFormatException return valueOf(s,10) - func decode() throws: NumberFormatException - var:i=Integer.decode(nm) + func decode(nm) throws: NumberFormatException + Decl: i=Integer.decode(nm) cond-branch cond:i LT MIN_VALUE Lor i GT MAX_VALUE true branch : - new NumberFormatException - false branch : + new NumberFormatException("Value " Add i Add " out of range from input " Add nm) false branch : return valueOf((byte)i) func byteValue() throws: @@ -59,29 +55,29 @@ class Byte return Integer.toString((int)value) func hashCode() throws: return Byte.hashCode(value) - func hashCode() throws: + func hashCode(value) throws: return (int)value - func equals() throws: - cond-branch cond: + func equals(obj) throws: + cond-branch cond:obj instanceof Byte true branch : - return value EQ ((Byte)obj)byteValue + return value EQ (Byte)obj.byteValue() false branch : return false - func compareTo() throws: + func compareTo(anotherByte) throws: return compare(this.value,anotherByte.value) - func compare() throws: + func compare(x,y) throws: return x Sub y - func toUnsignedInt() throws: + func toUnsignedInt(x) throws: return ((int)x) Band 255 - func toUnsignedLong() throws: + func toUnsignedLong(x) throws: return ((long)x) Band 4095 - func toHexString() throws: - var:digits= - var:buf= + func toHexString(b,upperCase) throws: + Decl: digits= + Decl: buf= Assign Assign - return new String + return new String(0,2,buf) LocalClasses: class ByteCache Fields: diff --git a/src/MapleFE/test/openjdk/CharSequence.java b/src/MapleFE/test/java/openjdk/CharSequence.java similarity index 100% rename from src/MapleFE/test/openjdk/CharSequence.java rename to src/MapleFE/test/java/openjdk/CharSequence.java diff --git a/src/MapleFE/test/openjdk/CharSequence.java.result b/src/MapleFE/test/java/openjdk/CharSequence.java.result similarity index 62% rename from src/MapleFE/test/openjdk/CharSequence.java.result rename to src/MapleFE/test/java/openjdk/CharSequence.java.result index 30f6b8b351ecccc27fe23d6dd4d5a06a1bba1703..be0ea994337d1959b2b85cf4253f0ef8d0608588 100644 --- a/src/MapleFE/test/openjdk/CharSequence.java.result +++ b/src/MapleFE/test/java/openjdk/CharSequence.java.result @@ -30,8 +30,8 @@ interface CharSequence Methods: func length() throws: - func charAt() throws: - func subSequence() throws: + func charAt(index) throws: + func subSequence(start,end) throws: func toString() throws: func chars() throws: class CharIterator @@ -45,20 +45,19 @@ interface CharSequence func nextInt() throws: cond-branch cond:hasNext() true branch : - return charAt( curInc + return charAt(cur Inc ) false branch : - new NoSuchElementException + new NoSuchElementException() - func forEachRemaining() throws: + func forEachRemaining(block) throws: for ( ) block.accept(charAt(cur)) LocalClasses: LocalInterfaces: - return StreamSupport.intStream(() -> Spliterators.spliterator(new CharIterator,length(),Spliterator.ORDERED) -,Spliterator.SUBSIZED Bor Spliterator.SIZED Bor Spliterator.ORDERED,false) + return StreamSupport.intStream(() -> Spliterators.spliterator(new CharIterator(),length(),Spliterator.ORDERED),Spliterator.SUBSIZED Bor Spliterator.SIZED Bor Spliterator.ORDERED,false) func codePoints() throws: class CodePointIterator Fields: @@ -66,27 +65,46 @@ interface CharSequence Instance Initializer: Constructors: Methods: - func forEachRemaining() throws: - var:length=length() - var:i=cur + func forEachRemaining(block) throws: + Decl: length=length() + Decl: i=cur + while i LT length Decl: c1=charAt(i Inc +) + cond-branch cond:Character.isHighSurrogate(c1) Lor i GE length + true branch : + block.accept(c1) + false branch : + Decl: c2=charAt(i) + cond-branch cond:Character.isLowSurrogate(c2) + true branch : + i Inc + + block.accept(Character.toCodePoint(c1,c2)) + false branch : + block.accept(c1) + + + + + cur Assign i func hasNext() throws: return cur LT length() func nextInt() throws: - var:length=length() + Decl: length=length() cond-branch cond:cur GE length true branch : - new NoSuchElementException + new NoSuchElementException() false branch : - var:c1=charAt( curInc + Decl: c1=charAt(cur Inc ) cond-branch cond:Character.isHighSurrogate(c1) Land cur LT length true branch : - var:c2=charAt(cur) + Decl: c2=charAt(cur) cond-branch cond:Character.isLowSurrogate(c2) true branch : - curInc + cur Inc return Character.toCodePoint(c1,c2) false branch : @@ -97,6 +115,5 @@ interface CharSequence LocalClasses: LocalInterfaces: - return StreamSupport.intStream(() -> Spliterators.spliteratorUnknownSize(new CodePointIterator,Spliterator.ORDERED) -,Spliterator.ORDERED,false) + return StreamSupport.intStream(() -> Spliterators.spliteratorUnknownSize(new CodePointIterator(),Spliterator.ORDERED),Spliterator.ORDERED,false) diff --git a/src/MapleFE/test/openjdk/Character-1.java b/src/MapleFE/test/java/openjdk/Character-1.java similarity index 100% rename from src/MapleFE/test/openjdk/Character-1.java rename to src/MapleFE/test/java/openjdk/Character-1.java diff --git a/src/MapleFE/test/java/openjdk/Character-1.java.result b/src/MapleFE/test/java/openjdk/Character-1.java.result new file mode 100644 index 0000000000000000000000000000000000000000..b145edf69e466f864e9534a862fa50f3b9feecd1 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Character-1.java.result @@ -0,0 +1,94 @@ +Matched 5 tokens. +Matched 14 tokens. +Matched 21 tokens. +Matched 28 tokens. +Matched 35 tokens. +Matched 42 tokens. +Matched 5258 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import dalvik.annotation.optimization.FastNative +== Sub Tree == +import java.util.Arrays +== Sub Tree == +import java.util.HashMap +== Sub Tree == +import java.util.Locale +== Sub Tree == +import java.util.Map +== Sub Tree == +class Character + Fields: + MIN_RADIX=2 MAX_RADIX=36 MIN_VALUE=0 MAX_VALUE=65535 TYPE=()char.getComponentType() UNASSIGNED=0 UPPERCASE_LETTER=1 LOWERCASE_LETTER=2 TITLECASE_LETTER=3 MODIFIER_LETTER=4 OTHER_LETTER=5 NON_SPACING_MARK=6 ENCLOSING_MARK=7 COMBINING_SPACING_MARK=8 DECIMAL_DIGIT_NUMBER=9 LETTER_NUMBER=10 OTHER_NUMBER=11 SPACE_SEPARATOR=12 LINE_SEPARATOR=13 PARAGRAPH_SEPARATOR=14 CONTROL=15 FORMAT=16 PRIVATE_USE=18 SURROGATE=19 DASH_PUNCTUATION=20 START_PUNCTUATION=21 END_PUNCTUATION=22 CONNECTOR_PUNCTUATION=23 OTHER_PUNCTUATION=24 MATH_SYMBOL=25 CURRENCY_SYMBOL=26 MODIFIER_SYMBOL=27 OTHER_SYMBOL=28 INITIAL_QUOTE_PUNCTUATION=29 FINAL_QUOTE_PUNCTUATION=30 ERROR=-1 DIRECTIONALITY_UNDEFINED=-1 DIRECTIONALITY_LEFT_TO_RIGHT=0 DIRECTIONALITY_RIGHT_TO_LEFT=1 DIRECTIONALITY_RIGHT_TO_LEFT_ARABIC=2 DIRECTIONALITY_EUROPEAN_NUMBER=3 DIRECTIONALITY_EUROPEAN_NUMBER_SEPARATOR=4 DIRECTIONALITY_EUROPEAN_NUMBER_TERMINATOR=5 DIRECTIONALITY_ARABIC_NUMBER=6 DIRECTIONALITY_COMMON_NUMBER_SEPARATOR=7 DIRECTIONALITY_NONSPACING_MARK=8 DIRECTIONALITY_BOUNDARY_NEUTRAL=9 DIRECTIONALITY_PARAGRAPH_SEPARATOR=10 DIRECTIONALITY_SEGMENT_SEPARATOR=11 DIRECTIONALITY_WHITESPACE=12 DIRECTIONALITY_OTHER_NEUTRALS=13 DIRECTIONALITY_LEFT_TO_RIGHT_EMBEDDING=14 DIRECTIONALITY_LEFT_TO_RIGHT_OVERRIDE=15 DIRECTIONALITY_RIGHT_TO_LEFT_EMBEDDING=16 DIRECTIONALITY_RIGHT_TO_LEFT_OVERRIDE=17 DIRECTIONALITY_POP_DIRECTIONAL_FORMAT=18 MIN_HIGH_SURROGATE=55296 MAX_HIGH_SURROGATE=56319 MIN_LOW_SURROGATE=56320 MAX_LOW_SURROGATE=57343 MIN_SURROGATE=MIN_HIGH_SURROGATE MAX_SURROGATE=MAX_LOW_SURROGATE MIN_SUPPLEMENTARY_CODE_POINT=65536 MIN_CODE_POINT=0 MAX_CODE_POINT=1114111 DIRECTIONALITY= + Instance Initializer: + Constructors: + Methods: + LocalClasses: + class Subset + Fields: + name + Instance Initializer: + Constructors: + constructor Subset(name) throws: + cond-branch cond:name EQ null + true branch : + new NullPointerException("name") + false branch : + + this.name Assign name + Methods: + func equals(obj) throws: + return (this EQ obj) + func hashCode() throws: + return super.hashCode() + func toString() throws: + return name + LocalClasses: + LocalInterfaces: + class UnicodeBlock + Fields: + map=new HashMap(256) BASIC_LATIN=new UnicodeBlock("BASIC_LATIN","BASIC LATIN","BASICLATIN") LATIN_1_SUPPLEMENT=new UnicodeBlock("LATIN_1_SUPPLEMENT","LATIN-1 SUPPLEMENT","LATIN-1SUPPLEMENT") LATIN_EXTENDED_A=new UnicodeBlock("LATIN_EXTENDED_A","LATIN EXTENDED-A","LATINEXTENDED-A") LATIN_EXTENDED_B=new UnicodeBlock("LATIN_EXTENDED_B","LATIN EXTENDED-B","LATINEXTENDED-B") IPA_EXTENSIONS=new UnicodeBlock("IPA_EXTENSIONS","IPA EXTENSIONS","IPAEXTENSIONS") SPACING_MODIFIER_LETTERS=new UnicodeBlock("SPACING_MODIFIER_LETTERS","SPACING MODIFIER LETTERS","SPACINGMODIFIERLETTERS") COMBINING_DIACRITICAL_MARKS=new UnicodeBlock("COMBINING_DIACRITICAL_MARKS","COMBINING DIACRITICAL MARKS","COMBININGDIACRITICALMARKS") GREEK=new UnicodeBlock("GREEK","GREEK AND COPTIC","GREEKANDCOPTIC") CYRILLIC=new UnicodeBlock("CYRILLIC") ARMENIAN=new UnicodeBlock("ARMENIAN") HEBREW=new UnicodeBlock("HEBREW") ARABIC=new UnicodeBlock("ARABIC") DEVANAGARI=new UnicodeBlock("DEVANAGARI") BENGALI=new UnicodeBlock("BENGALI") GURMUKHI=new UnicodeBlock("GURMUKHI") GUJARATI=new UnicodeBlock("GUJARATI") ORIYA=new UnicodeBlock("ORIYA") TAMIL=new UnicodeBlock("TAMIL") TELUGU=new UnicodeBlock("TELUGU") KANNADA=new UnicodeBlock("KANNADA") MALAYALAM=new UnicodeBlock("MALAYALAM") THAI=new UnicodeBlock("THAI") LAO=new UnicodeBlock("LAO") TIBETAN=new UnicodeBlock("TIBETAN") GEORGIAN=new UnicodeBlock("GEORGIAN") HANGUL_JAMO=new UnicodeBlock("HANGUL_JAMO","HANGUL JAMO","HANGULJAMO") LATIN_EXTENDED_ADDITIONAL=new UnicodeBlock("LATIN_EXTENDED_ADDITIONAL","LATIN EXTENDED ADDITIONAL","LATINEXTENDEDADDITIONAL") GREEK_EXTENDED=new UnicodeBlock("GREEK_EXTENDED","GREEK EXTENDED","GREEKEXTENDED") GENERAL_PUNCTUATION=new UnicodeBlock("GENERAL_PUNCTUATION","GENERAL PUNCTUATION","GENERALPUNCTUATION") SUPERSCRIPTS_AND_SUBSCRIPTS=new UnicodeBlock("SUPERSCRIPTS_AND_SUBSCRIPTS","SUPERSCRIPTS AND SUBSCRIPTS","SUPERSCRIPTSANDSUBSCRIPTS") CURRENCY_SYMBOLS=new UnicodeBlock("CURRENCY_SYMBOLS","CURRENCY SYMBOLS","CURRENCYSYMBOLS") COMBINING_MARKS_FOR_SYMBOLS=new UnicodeBlock("COMBINING_MARKS_FOR_SYMBOLS","COMBINING DIACRITICAL MARKS FOR SYMBOLS","COMBININGDIACRITICALMARKSFORSYMBOLS","COMBINING MARKS FOR SYMBOLS","COMBININGMARKSFORSYMBOLS") LETTERLIKE_SYMBOLS=new UnicodeBlock("LETTERLIKE_SYMBOLS","LETTERLIKE SYMBOLS","LETTERLIKESYMBOLS") NUMBER_FORMS=new UnicodeBlock("NUMBER_FORMS","NUMBER FORMS","NUMBERFORMS") ARROWS=new UnicodeBlock("ARROWS") MATHEMATICAL_OPERATORS=new UnicodeBlock("MATHEMATICAL_OPERATORS","MATHEMATICAL OPERATORS","MATHEMATICALOPERATORS") MISCELLANEOUS_TECHNICAL=new UnicodeBlock("MISCELLANEOUS_TECHNICAL","MISCELLANEOUS TECHNICAL","MISCELLANEOUSTECHNICAL") CONTROL_PICTURES=new UnicodeBlock("CONTROL_PICTURES","CONTROL PICTURES","CONTROLPICTURES") OPTICAL_CHARACTER_RECOGNITION=new UnicodeBlock("OPTICAL_CHARACTER_RECOGNITION","OPTICAL CHARACTER RECOGNITION","OPTICALCHARACTERRECOGNITION") ENCLOSED_ALPHANUMERICS=new UnicodeBlock("ENCLOSED_ALPHANUMERICS","ENCLOSED ALPHANUMERICS","ENCLOSEDALPHANUMERICS") BOX_DRAWING=new UnicodeBlock("BOX_DRAWING","BOX DRAWING","BOXDRAWING") BLOCK_ELEMENTS=new UnicodeBlock("BLOCK_ELEMENTS","BLOCK ELEMENTS","BLOCKELEMENTS") GEOMETRIC_SHAPES=new UnicodeBlock("GEOMETRIC_SHAPES","GEOMETRIC SHAPES","GEOMETRICSHAPES") MISCELLANEOUS_SYMBOLS=new UnicodeBlock("MISCELLANEOUS_SYMBOLS","MISCELLANEOUS SYMBOLS","MISCELLANEOUSSYMBOLS") DINGBATS=new UnicodeBlock("DINGBATS") CJK_SYMBOLS_AND_PUNCTUATION=new UnicodeBlock("CJK_SYMBOLS_AND_PUNCTUATION","CJK SYMBOLS AND PUNCTUATION","CJKSYMBOLSANDPUNCTUATION") HIRAGANA=new UnicodeBlock("HIRAGANA") KATAKANA=new UnicodeBlock("KATAKANA") BOPOMOFO=new UnicodeBlock("BOPOMOFO") HANGUL_COMPATIBILITY_JAMO=new UnicodeBlock("HANGUL_COMPATIBILITY_JAMO","HANGUL COMPATIBILITY JAMO","HANGULCOMPATIBILITYJAMO") KANBUN=new UnicodeBlock("KANBUN") ENCLOSED_CJK_LETTERS_AND_MONTHS=new UnicodeBlock("ENCLOSED_CJK_LETTERS_AND_MONTHS","ENCLOSED CJK LETTERS AND MONTHS","ENCLOSEDCJKLETTERSANDMONTHS") CJK_COMPATIBILITY=new UnicodeBlock("CJK_COMPATIBILITY","CJK COMPATIBILITY","CJKCOMPATIBILITY") CJK_UNIFIED_IDEOGRAPHS=new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS","CJK UNIFIED IDEOGRAPHS","CJKUNIFIEDIDEOGRAPHS") HANGUL_SYLLABLES=new UnicodeBlock("HANGUL_SYLLABLES","HANGUL SYLLABLES","HANGULSYLLABLES") PRIVATE_USE_AREA=new UnicodeBlock("PRIVATE_USE_AREA","PRIVATE USE AREA","PRIVATEUSEAREA") CJK_COMPATIBILITY_IDEOGRAPHS=new UnicodeBlock("CJK_COMPATIBILITY_IDEOGRAPHS","CJK COMPATIBILITY IDEOGRAPHS","CJKCOMPATIBILITYIDEOGRAPHS") ALPHABETIC_PRESENTATION_FORMS=new UnicodeBlock("ALPHABETIC_PRESENTATION_FORMS","ALPHABETIC PRESENTATION FORMS","ALPHABETICPRESENTATIONFORMS") ARABIC_PRESENTATION_FORMS_A=new UnicodeBlock("ARABIC_PRESENTATION_FORMS_A","ARABIC PRESENTATION FORMS-A","ARABICPRESENTATIONFORMS-A") COMBINING_HALF_MARKS=new UnicodeBlock("COMBINING_HALF_MARKS","COMBINING HALF MARKS","COMBININGHALFMARKS") CJK_COMPATIBILITY_FORMS=new UnicodeBlock("CJK_COMPATIBILITY_FORMS","CJK COMPATIBILITY FORMS","CJKCOMPATIBILITYFORMS") SMALL_FORM_VARIANTS=new UnicodeBlock("SMALL_FORM_VARIANTS","SMALL FORM VARIANTS","SMALLFORMVARIANTS") ARABIC_PRESENTATION_FORMS_B=new UnicodeBlock("ARABIC_PRESENTATION_FORMS_B","ARABIC PRESENTATION FORMS-B","ARABICPRESENTATIONFORMS-B") HALFWIDTH_AND_FULLWIDTH_FORMS=new UnicodeBlock("HALFWIDTH_AND_FULLWIDTH_FORMS","HALFWIDTH AND FULLWIDTH FORMS","HALFWIDTHANDFULLWIDTHFORMS") SPECIALS=new UnicodeBlock("SPECIALS") SURROGATES_AREA=new UnicodeBlock("SURROGATES_AREA",false) SYRIAC=new UnicodeBlock("SYRIAC") THAANA=new UnicodeBlock("THAANA") SINHALA=new UnicodeBlock("SINHALA") MYANMAR=new UnicodeBlock("MYANMAR") ETHIOPIC=new UnicodeBlock("ETHIOPIC") CHEROKEE=new UnicodeBlock("CHEROKEE") UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS=new UnicodeBlock("UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS","UNIFIED CANADIAN ABORIGINAL SYLLABICS","UNIFIEDCANADIANABORIGINALSYLLABICS") OGHAM=new UnicodeBlock("OGHAM") RUNIC=new UnicodeBlock("RUNIC") KHMER=new UnicodeBlock("KHMER") MONGOLIAN=new UnicodeBlock("MONGOLIAN") BRAILLE_PATTERNS=new UnicodeBlock("BRAILLE_PATTERNS","BRAILLE PATTERNS","BRAILLEPATTERNS") CJK_RADICALS_SUPPLEMENT=new UnicodeBlock("CJK_RADICALS_SUPPLEMENT","CJK RADICALS SUPPLEMENT","CJKRADICALSSUPPLEMENT") KANGXI_RADICALS=new UnicodeBlock("KANGXI_RADICALS","KANGXI RADICALS","KANGXIRADICALS") IDEOGRAPHIC_DESCRIPTION_CHARACTERS=new UnicodeBlock("IDEOGRAPHIC_DESCRIPTION_CHARACTERS","IDEOGRAPHIC DESCRIPTION CHARACTERS","IDEOGRAPHICDESCRIPTIONCHARACTERS") BOPOMOFO_EXTENDED=new UnicodeBlock("BOPOMOFO_EXTENDED","BOPOMOFO EXTENDED","BOPOMOFOEXTENDED") CJK_UNIFIED_IDEOGRAPHS_EXTENSION_A=new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_A","CJK UNIFIED IDEOGRAPHS EXTENSION A","CJKUNIFIEDIDEOGRAPHSEXTENSIONA") YI_SYLLABLES=new UnicodeBlock("YI_SYLLABLES","YI SYLLABLES","YISYLLABLES") YI_RADICALS=new UnicodeBlock("YI_RADICALS","YI RADICALS","YIRADICALS") CYRILLIC_SUPPLEMENTARY=new UnicodeBlock("CYRILLIC_SUPPLEMENTARY","CYRILLIC SUPPLEMENTARY","CYRILLICSUPPLEMENTARY","CYRILLIC SUPPLEMENT","CYRILLICSUPPLEMENT") TAGALOG=new UnicodeBlock("TAGALOG") HANUNOO=new UnicodeBlock("HANUNOO") BUHID=new UnicodeBlock("BUHID") TAGBANWA=new UnicodeBlock("TAGBANWA") LIMBU=new UnicodeBlock("LIMBU") TAI_LE=new UnicodeBlock("TAI_LE","TAI LE","TAILE") KHMER_SYMBOLS=new UnicodeBlock("KHMER_SYMBOLS","KHMER SYMBOLS","KHMERSYMBOLS") PHONETIC_EXTENSIONS=new UnicodeBlock("PHONETIC_EXTENSIONS","PHONETIC EXTENSIONS","PHONETICEXTENSIONS") MISCELLANEOUS_MATHEMATICAL_SYMBOLS_A=new UnicodeBlock("MISCELLANEOUS_MATHEMATICAL_SYMBOLS_A","MISCELLANEOUS MATHEMATICAL SYMBOLS-A","MISCELLANEOUSMATHEMATICALSYMBOLS-A") SUPPLEMENTAL_ARROWS_A=new UnicodeBlock("SUPPLEMENTAL_ARROWS_A","SUPPLEMENTAL ARROWS-A","SUPPLEMENTALARROWS-A") SUPPLEMENTAL_ARROWS_B=new UnicodeBlock("SUPPLEMENTAL_ARROWS_B","SUPPLEMENTAL ARROWS-B","SUPPLEMENTALARROWS-B") MISCELLANEOUS_MATHEMATICAL_SYMBOLS_B=new UnicodeBlock("MISCELLANEOUS_MATHEMATICAL_SYMBOLS_B","MISCELLANEOUS MATHEMATICAL SYMBOLS-B","MISCELLANEOUSMATHEMATICALSYMBOLS-B") SUPPLEMENTAL_MATHEMATICAL_OPERATORS=new UnicodeBlock("SUPPLEMENTAL_MATHEMATICAL_OPERATORS","SUPPLEMENTAL MATHEMATICAL OPERATORS","SUPPLEMENTALMATHEMATICALOPERATORS") MISCELLANEOUS_SYMBOLS_AND_ARROWS=new UnicodeBlock("MISCELLANEOUS_SYMBOLS_AND_ARROWS","MISCELLANEOUS SYMBOLS AND ARROWS","MISCELLANEOUSSYMBOLSANDARROWS") KATAKANA_PHONETIC_EXTENSIONS=new UnicodeBlock("KATAKANA_PHONETIC_EXTENSIONS","KATAKANA PHONETIC EXTENSIONS","KATAKANAPHONETICEXTENSIONS") YIJING_HEXAGRAM_SYMBOLS=new UnicodeBlock("YIJING_HEXAGRAM_SYMBOLS","YIJING HEXAGRAM SYMBOLS","YIJINGHEXAGRAMSYMBOLS") VARIATION_SELECTORS=new UnicodeBlock("VARIATION_SELECTORS","VARIATION SELECTORS","VARIATIONSELECTORS") LINEAR_B_SYLLABARY=new UnicodeBlock("LINEAR_B_SYLLABARY","LINEAR B SYLLABARY","LINEARBSYLLABARY") LINEAR_B_IDEOGRAMS=new UnicodeBlock("LINEAR_B_IDEOGRAMS","LINEAR B IDEOGRAMS","LINEARBIDEOGRAMS") AEGEAN_NUMBERS=new UnicodeBlock("AEGEAN_NUMBERS","AEGEAN NUMBERS","AEGEANNUMBERS") OLD_ITALIC=new UnicodeBlock("OLD_ITALIC","OLD ITALIC","OLDITALIC") GOTHIC=new UnicodeBlock("GOTHIC") UGARITIC=new UnicodeBlock("UGARITIC") DESERET=new UnicodeBlock("DESERET") SHAVIAN=new UnicodeBlock("SHAVIAN") OSMANYA=new UnicodeBlock("OSMANYA") CYPRIOT_SYLLABARY=new UnicodeBlock("CYPRIOT_SYLLABARY","CYPRIOT SYLLABARY","CYPRIOTSYLLABARY") BYZANTINE_MUSICAL_SYMBOLS=new UnicodeBlock("BYZANTINE_MUSICAL_SYMBOLS","BYZANTINE MUSICAL SYMBOLS","BYZANTINEMUSICALSYMBOLS") MUSICAL_SYMBOLS=new UnicodeBlock("MUSICAL_SYMBOLS","MUSICAL SYMBOLS","MUSICALSYMBOLS") TAI_XUAN_JING_SYMBOLS=new UnicodeBlock("TAI_XUAN_JING_SYMBOLS","TAI XUAN JING SYMBOLS","TAIXUANJINGSYMBOLS") MATHEMATICAL_ALPHANUMERIC_SYMBOLS=new UnicodeBlock("MATHEMATICAL_ALPHANUMERIC_SYMBOLS","MATHEMATICAL ALPHANUMERIC SYMBOLS","MATHEMATICALALPHANUMERICSYMBOLS") CJK_UNIFIED_IDEOGRAPHS_EXTENSION_B=new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_B","CJK UNIFIED IDEOGRAPHS EXTENSION B","CJKUNIFIEDIDEOGRAPHSEXTENSIONB") CJK_COMPATIBILITY_IDEOGRAPHS_SUPPLEMENT=new UnicodeBlock("CJK_COMPATIBILITY_IDEOGRAPHS_SUPPLEMENT","CJK COMPATIBILITY IDEOGRAPHS SUPPLEMENT","CJKCOMPATIBILITYIDEOGRAPHSSUPPLEMENT") TAGS=new UnicodeBlock("TAGS") VARIATION_SELECTORS_SUPPLEMENT=new UnicodeBlock("VARIATION_SELECTORS_SUPPLEMENT","VARIATION SELECTORS SUPPLEMENT","VARIATIONSELECTORSSUPPLEMENT") SUPPLEMENTARY_PRIVATE_USE_AREA_A=new UnicodeBlock("SUPPLEMENTARY_PRIVATE_USE_AREA_A","SUPPLEMENTARY PRIVATE USE AREA-A","SUPPLEMENTARYPRIVATEUSEAREA-A") SUPPLEMENTARY_PRIVATE_USE_AREA_B=new UnicodeBlock("SUPPLEMENTARY_PRIVATE_USE_AREA_B","SUPPLEMENTARY PRIVATE USE AREA-B","SUPPLEMENTARYPRIVATEUSEAREA-B") HIGH_SURROGATES=new UnicodeBlock("HIGH_SURROGATES","HIGH SURROGATES","HIGHSURROGATES") HIGH_PRIVATE_USE_SURROGATES=new UnicodeBlock("HIGH_PRIVATE_USE_SURROGATES","HIGH PRIVATE USE SURROGATES","HIGHPRIVATEUSESURROGATES") LOW_SURROGATES=new UnicodeBlock("LOW_SURROGATES","LOW SURROGATES","LOWSURROGATES") ARABIC_SUPPLEMENT=new UnicodeBlock("ARABIC_SUPPLEMENT","ARABIC SUPPLEMENT","ARABICSUPPLEMENT") NKO=new UnicodeBlock("NKO") SAMARITAN=new UnicodeBlock("SAMARITAN") MANDAIC=new UnicodeBlock("MANDAIC") ETHIOPIC_SUPPLEMENT=new UnicodeBlock("ETHIOPIC_SUPPLEMENT","ETHIOPIC SUPPLEMENT","ETHIOPICSUPPLEMENT") UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED=new UnicodeBlock("UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED","UNIFIED CANADIAN ABORIGINAL SYLLABICS EXTENDED","UNIFIEDCANADIANABORIGINALSYLLABICSEXTENDED") NEW_TAI_LUE=new UnicodeBlock("NEW_TAI_LUE","NEW TAI LUE","NEWTAILUE") BUGINESE=new UnicodeBlock("BUGINESE") TAI_THAM=new UnicodeBlock("TAI_THAM","TAI THAM","TAITHAM") BALINESE=new UnicodeBlock("BALINESE") SUNDANESE=new UnicodeBlock("SUNDANESE") BATAK=new UnicodeBlock("BATAK") LEPCHA=new UnicodeBlock("LEPCHA") OL_CHIKI=new UnicodeBlock("OL_CHIKI","OL CHIKI","OLCHIKI") VEDIC_EXTENSIONS=new UnicodeBlock("VEDIC_EXTENSIONS","VEDIC EXTENSIONS","VEDICEXTENSIONS") PHONETIC_EXTENSIONS_SUPPLEMENT=new UnicodeBlock("PHONETIC_EXTENSIONS_SUPPLEMENT","PHONETIC EXTENSIONS SUPPLEMENT","PHONETICEXTENSIONSSUPPLEMENT") COMBINING_DIACRITICAL_MARKS_SUPPLEMENT=new UnicodeBlock("COMBINING_DIACRITICAL_MARKS_SUPPLEMENT","COMBINING DIACRITICAL MARKS SUPPLEMENT","COMBININGDIACRITICALMARKSSUPPLEMENT") GLAGOLITIC=new UnicodeBlock("GLAGOLITIC") LATIN_EXTENDED_C=new UnicodeBlock("LATIN_EXTENDED_C","LATIN EXTENDED-C","LATINEXTENDED-C") COPTIC=new UnicodeBlock("COPTIC") GEORGIAN_SUPPLEMENT=new UnicodeBlock("GEORGIAN_SUPPLEMENT","GEORGIAN SUPPLEMENT","GEORGIANSUPPLEMENT") TIFINAGH=new UnicodeBlock("TIFINAGH") ETHIOPIC_EXTENDED=new UnicodeBlock("ETHIOPIC_EXTENDED","ETHIOPIC EXTENDED","ETHIOPICEXTENDED") CYRILLIC_EXTENDED_A=new UnicodeBlock("CYRILLIC_EXTENDED_A","CYRILLIC EXTENDED-A","CYRILLICEXTENDED-A") SUPPLEMENTAL_PUNCTUATION=new UnicodeBlock("SUPPLEMENTAL_PUNCTUATION","SUPPLEMENTAL PUNCTUATION","SUPPLEMENTALPUNCTUATION") CJK_STROKES=new UnicodeBlock("CJK_STROKES","CJK STROKES","CJKSTROKES") LISU=new UnicodeBlock("LISU") VAI=new UnicodeBlock("VAI") CYRILLIC_EXTENDED_B=new UnicodeBlock("CYRILLIC_EXTENDED_B","CYRILLIC EXTENDED-B","CYRILLICEXTENDED-B") BAMUM=new UnicodeBlock("BAMUM") MODIFIER_TONE_LETTERS=new UnicodeBlock("MODIFIER_TONE_LETTERS","MODIFIER TONE LETTERS","MODIFIERTONELETTERS") LATIN_EXTENDED_D=new UnicodeBlock("LATIN_EXTENDED_D","LATIN EXTENDED-D","LATINEXTENDED-D") SYLOTI_NAGRI=new UnicodeBlock("SYLOTI_NAGRI","SYLOTI NAGRI","SYLOTINAGRI") COMMON_INDIC_NUMBER_FORMS=new UnicodeBlock("COMMON_INDIC_NUMBER_FORMS","COMMON INDIC NUMBER FORMS","COMMONINDICNUMBERFORMS") PHAGS_PA=new UnicodeBlock("PHAGS_PA","PHAGS-PA") SAURASHTRA=new UnicodeBlock("SAURASHTRA") DEVANAGARI_EXTENDED=new UnicodeBlock("DEVANAGARI_EXTENDED","DEVANAGARI EXTENDED","DEVANAGARIEXTENDED") KAYAH_LI=new UnicodeBlock("KAYAH_LI","KAYAH LI","KAYAHLI") REJANG=new UnicodeBlock("REJANG") HANGUL_JAMO_EXTENDED_A=new UnicodeBlock("HANGUL_JAMO_EXTENDED_A","HANGUL JAMO EXTENDED-A","HANGULJAMOEXTENDED-A") JAVANESE=new UnicodeBlock("JAVANESE") CHAM=new UnicodeBlock("CHAM") MYANMAR_EXTENDED_A=new UnicodeBlock("MYANMAR_EXTENDED_A","MYANMAR EXTENDED-A","MYANMAREXTENDED-A") TAI_VIET=new UnicodeBlock("TAI_VIET","TAI VIET","TAIVIET") ETHIOPIC_EXTENDED_A=new UnicodeBlock("ETHIOPIC_EXTENDED_A","ETHIOPIC EXTENDED-A","ETHIOPICEXTENDED-A") MEETEI_MAYEK=new UnicodeBlock("MEETEI_MAYEK","MEETEI MAYEK","MEETEIMAYEK") HANGUL_JAMO_EXTENDED_B=new UnicodeBlock("HANGUL_JAMO_EXTENDED_B","HANGUL JAMO EXTENDED-B","HANGULJAMOEXTENDED-B") VERTICAL_FORMS=new UnicodeBlock("VERTICAL_FORMS","VERTICAL FORMS","VERTICALFORMS") ANCIENT_GREEK_NUMBERS=new UnicodeBlock("ANCIENT_GREEK_NUMBERS","ANCIENT GREEK NUMBERS","ANCIENTGREEKNUMBERS") ANCIENT_SYMBOLS=new UnicodeBlock("ANCIENT_SYMBOLS","ANCIENT SYMBOLS","ANCIENTSYMBOLS") PHAISTOS_DISC=new UnicodeBlock("PHAISTOS_DISC","PHAISTOS DISC","PHAISTOSDISC") LYCIAN=new UnicodeBlock("LYCIAN") CARIAN=new UnicodeBlock("CARIAN") OLD_PERSIAN=new UnicodeBlock("OLD_PERSIAN","OLD PERSIAN","OLDPERSIAN") IMPERIAL_ARAMAIC=new UnicodeBlock("IMPERIAL_ARAMAIC","IMPERIAL ARAMAIC","IMPERIALARAMAIC") PHOENICIAN=new UnicodeBlock("PHOENICIAN") LYDIAN=new UnicodeBlock("LYDIAN") KHAROSHTHI=new UnicodeBlock("KHAROSHTHI") OLD_SOUTH_ARABIAN=new UnicodeBlock("OLD_SOUTH_ARABIAN","OLD SOUTH ARABIAN","OLDSOUTHARABIAN") AVESTAN=new UnicodeBlock("AVESTAN") INSCRIPTIONAL_PARTHIAN=new UnicodeBlock("INSCRIPTIONAL_PARTHIAN","INSCRIPTIONAL PARTHIAN","INSCRIPTIONALPARTHIAN") INSCRIPTIONAL_PAHLAVI=new UnicodeBlock("INSCRIPTIONAL_PAHLAVI","INSCRIPTIONAL PAHLAVI","INSCRIPTIONALPAHLAVI") OLD_TURKIC=new UnicodeBlock("OLD_TURKIC","OLD TURKIC","OLDTURKIC") RUMI_NUMERAL_SYMBOLS=new UnicodeBlock("RUMI_NUMERAL_SYMBOLS","RUMI NUMERAL SYMBOLS","RUMINUMERALSYMBOLS") BRAHMI=new UnicodeBlock("BRAHMI") KAITHI=new UnicodeBlock("KAITHI") CUNEIFORM=new UnicodeBlock("CUNEIFORM") CUNEIFORM_NUMBERS_AND_PUNCTUATION=new UnicodeBlock("CUNEIFORM_NUMBERS_AND_PUNCTUATION","CUNEIFORM NUMBERS AND PUNCTUATION","CUNEIFORMNUMBERSANDPUNCTUATION") EGYPTIAN_HIEROGLYPHS=new UnicodeBlock("EGYPTIAN_HIEROGLYPHS","EGYPTIAN HIEROGLYPHS","EGYPTIANHIEROGLYPHS") BAMUM_SUPPLEMENT=new UnicodeBlock("BAMUM_SUPPLEMENT","BAMUM SUPPLEMENT","BAMUMSUPPLEMENT") KANA_SUPPLEMENT=new UnicodeBlock("KANA_SUPPLEMENT","KANA SUPPLEMENT","KANASUPPLEMENT") ANCIENT_GREEK_MUSICAL_NOTATION=new UnicodeBlock("ANCIENT_GREEK_MUSICAL_NOTATION","ANCIENT GREEK MUSICAL NOTATION","ANCIENTGREEKMUSICALNOTATION") COUNTING_ROD_NUMERALS=new UnicodeBlock("COUNTING_ROD_NUMERALS","COUNTING ROD NUMERALS","COUNTINGRODNUMERALS") MAHJONG_TILES=new UnicodeBlock("MAHJONG_TILES","MAHJONG TILES","MAHJONGTILES") DOMINO_TILES=new UnicodeBlock("DOMINO_TILES","DOMINO TILES","DOMINOTILES") PLAYING_CARDS=new UnicodeBlock("PLAYING_CARDS","PLAYING CARDS","PLAYINGCARDS") ENCLOSED_ALPHANUMERIC_SUPPLEMENT=new UnicodeBlock("ENCLOSED_ALPHANUMERIC_SUPPLEMENT","ENCLOSED ALPHANUMERIC SUPPLEMENT","ENCLOSEDALPHANUMERICSUPPLEMENT") ENCLOSED_IDEOGRAPHIC_SUPPLEMENT=new UnicodeBlock("ENCLOSED_IDEOGRAPHIC_SUPPLEMENT","ENCLOSED IDEOGRAPHIC SUPPLEMENT","ENCLOSEDIDEOGRAPHICSUPPLEMENT") MISCELLANEOUS_SYMBOLS_AND_PICTOGRAPHS=new UnicodeBlock("MISCELLANEOUS_SYMBOLS_AND_PICTOGRAPHS","MISCELLANEOUS SYMBOLS AND PICTOGRAPHS","MISCELLANEOUSSYMBOLSANDPICTOGRAPHS") EMOTICONS=new UnicodeBlock("EMOTICONS") TRANSPORT_AND_MAP_SYMBOLS=new UnicodeBlock("TRANSPORT_AND_MAP_SYMBOLS","TRANSPORT AND MAP SYMBOLS","TRANSPORTANDMAPSYMBOLS") ALCHEMICAL_SYMBOLS=new UnicodeBlock("ALCHEMICAL_SYMBOLS","ALCHEMICAL SYMBOLS","ALCHEMICALSYMBOLS") CJK_UNIFIED_IDEOGRAPHS_EXTENSION_C=new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_C","CJK UNIFIED IDEOGRAPHS EXTENSION C","CJKUNIFIEDIDEOGRAPHSEXTENSIONC") CJK_UNIFIED_IDEOGRAPHS_EXTENSION_D=new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_D","CJK UNIFIED IDEOGRAPHS EXTENSION D","CJKUNIFIEDIDEOGRAPHSEXTENSIOND") ARABIC_EXTENDED_A=new UnicodeBlock("ARABIC_EXTENDED_A","ARABIC EXTENDED-A","ARABICEXTENDED-A") SUNDANESE_SUPPLEMENT=new UnicodeBlock("SUNDANESE_SUPPLEMENT","SUNDANESE SUPPLEMENT","SUNDANESESUPPLEMENT") MEETEI_MAYEK_EXTENSIONS=new UnicodeBlock("MEETEI_MAYEK_EXTENSIONS","MEETEI MAYEK EXTENSIONS","MEETEIMAYEKEXTENSIONS") MEROITIC_HIEROGLYPHS=new UnicodeBlock("MEROITIC_HIEROGLYPHS","MEROITIC HIEROGLYPHS","MEROITICHIEROGLYPHS") MEROITIC_CURSIVE=new UnicodeBlock("MEROITIC_CURSIVE","MEROITIC CURSIVE","MEROITICCURSIVE") SORA_SOMPENG=new UnicodeBlock("SORA_SOMPENG","SORA SOMPENG","SORASOMPENG") CHAKMA=new UnicodeBlock("CHAKMA") SHARADA=new UnicodeBlock("SHARADA") TAKRI=new UnicodeBlock("TAKRI") MIAO=new UnicodeBlock("MIAO") ARABIC_MATHEMATICAL_ALPHABETIC_SYMBOLS=new UnicodeBlock("ARABIC_MATHEMATICAL_ALPHABETIC_SYMBOLS","ARABIC MATHEMATICAL ALPHABETIC SYMBOLS","ARABICMATHEMATICALALPHABETICSYMBOLS") blockStarts=[] blocks= + Instance Initializer: + Constructors: + constructor UnicodeBlock(idName) throws: + constructor UnicodeBlock(idName,isMap) throws: + cond-branch cond:isMap + true branch : + map.put(idName,this) + false branch : + + constructor UnicodeBlock(idName,alias) throws: + map.put(alias,this) + constructor UnicodeBlock(idName,aliases) throws: + String + alias + aliases + map.put(alias,this) + Methods: + func of(c) throws: + return of((int)c) + func of(codePoint) throws: + cond-branch cond:isValidCodePoint(codePoint) + true branch : + new IllegalArgumentException() + false branch : + + Decl: top,bottom,current + bottom Assign 0 + top Assign blockStarts.length + current Assign top Div 2 + while top Sub bottom GT 1 cond-branch cond:codePoint GE + true branch : + bottom Assign current + false branch : + top Assign current + + current Assign (top Add bottom) Div 2 + + return + LocalClasses: + LocalInterfaces: + LocalInterfaces: + diff --git a/src/MapleFE/test/openjdk/Character-2.java b/src/MapleFE/test/java/openjdk/Character-2.java similarity index 100% rename from src/MapleFE/test/openjdk/Character-2.java rename to src/MapleFE/test/java/openjdk/Character-2.java diff --git a/src/MapleFE/test/openjdk/Character-2.java.result b/src/MapleFE/test/java/openjdk/Character-2.java.result similarity index 77% rename from src/MapleFE/test/openjdk/Character-2.java.result rename to src/MapleFE/test/java/openjdk/Character-2.java.result index 3dba7f116e0ccd4f8eac2e8ff074b1fc0010531c..57633d3b59ff2a36d79c9557e9d32cf09d0b6aff 100644 Binary files a/src/MapleFE/test/openjdk/Character-2.java.result and b/src/MapleFE/test/java/openjdk/Character-2.java.result differ diff --git a/src/MapleFE/test/openjdk/Character-3.java b/src/MapleFE/test/java/openjdk/Character-3.java similarity index 100% rename from src/MapleFE/test/openjdk/Character-3.java rename to src/MapleFE/test/java/openjdk/Character-3.java diff --git a/src/MapleFE/test/openjdk/Character-3.java.result b/src/MapleFE/test/java/openjdk/Character-3.java.result similarity index 96% rename from src/MapleFE/test/openjdk/Character-3.java.result rename to src/MapleFE/test/java/openjdk/Character-3.java.result index 5e7b55c8cfbffe30ecb9973faf6c0c2d1ff48e86..96e1415cc94ad9c912b3a7ff8d4185b15f7e08d1 100644 --- a/src/MapleFE/test/openjdk/Character-3.java.result +++ b/src/MapleFE/test/java/openjdk/Character-3.java.result @@ -7,7 +7,7 @@ class Character Instance Initializer: Constructors: Methods: - func isWhitespace() throws: + func isWhitespace(codePoint) throws: cond-branch cond:(codePoint GE 28 Land codePoint LE 32) Lor (codePoint GE 9 Land codePoint LE 13) true branch : return true diff --git a/src/MapleFE/test/openjdk/Character.java b/src/MapleFE/test/java/openjdk/Character.java similarity index 100% rename from src/MapleFE/test/openjdk/Character.java rename to src/MapleFE/test/java/openjdk/Character.java diff --git a/src/MapleFE/test/openjdk/Character.java.result b/src/MapleFE/test/java/openjdk/Character.java.result similarity index 30% rename from src/MapleFE/test/openjdk/Character.java.result rename to src/MapleFE/test/java/openjdk/Character.java.result index f074c02b6c61834e1a3f55ab5542329269e33d9a..3ab414e2dfa0b84f28a4e026a1d301519307fc0f 100644 --- a/src/MapleFE/test/openjdk/Character.java.result +++ b/src/MapleFE/test/java/openjdk/Character.java.result @@ -4,7 +4,7 @@ Matched 21 tokens. Matched 28 tokens. Matched 35 tokens. Matched 42 tokens. -Matched 11999 tokens. +Matched 11996 tokens. ============= Module =========== == Sub Tree == package java.lang @@ -21,63 +21,61 @@ import java.util.Map == Sub Tree == class Character Fields: - MIN_RADIX=2 MAX_RADIX=36 MIN_VALUE=0 MAX_VALUE=65535 TYPE=() UNASSIGNED=0 UPPERCASE_LETTER=1 LOWERCASE_LETTER=2 TITLECASE_LETTER=3 MODIFIER_LETTER=4 OTHER_LETTER=5 NON_SPACING_MARK=6 ENCLOSING_MARK=7 COMBINING_SPACING_MARK=8 DECIMAL_DIGIT_NUMBER=9 LETTER_NUMBER=10 OTHER_NUMBER=11 SPACE_SEPARATOR=12 LINE_SEPARATOR=13 PARAGRAPH_SEPARATOR=14 CONTROL=15 FORMAT=16 PRIVATE_USE=18 SURROGATE=19 DASH_PUNCTUATION=20 START_PUNCTUATION=21 END_PUNCTUATION=22 CONNECTOR_PUNCTUATION=23 OTHER_PUNCTUATION=24 MATH_SYMBOL=25 CURRENCY_SYMBOL=26 MODIFIER_SYMBOL=27 OTHER_SYMBOL=28 INITIAL_QUOTE_PUNCTUATION=29 FINAL_QUOTE_PUNCTUATION=30 ERROR=-1 DIRECTIONALITY_UNDEFINED=Sub - 1 DIRECTIONALITY_LEFT_TO_RIGHT=0 DIRECTIONALITY_RIGHT_TO_LEFT=1 DIRECTIONALITY_RIGHT_TO_LEFT_ARABIC=2 DIRECTIONALITY_EUROPEAN_NUMBER=3 DIRECTIONALITY_EUROPEAN_NUMBER_SEPARATOR=4 DIRECTIONALITY_EUROPEAN_NUMBER_TERMINATOR=5 DIRECTIONALITY_ARABIC_NUMBER=6 DIRECTIONALITY_COMMON_NUMBER_SEPARATOR=7 DIRECTIONALITY_NONSPACING_MARK=8 DIRECTIONALITY_BOUNDARY_NEUTRAL=9 DIRECTIONALITY_PARAGRAPH_SEPARATOR=10 DIRECTIONALITY_SEGMENT_SEPARATOR=11 DIRECTIONALITY_WHITESPACE=12 DIRECTIONALITY_OTHER_NEUTRALS=13 DIRECTIONALITY_LEFT_TO_RIGHT_EMBEDDING=14 DIRECTIONALITY_LEFT_TO_RIGHT_OVERRIDE=15 DIRECTIONALITY_RIGHT_TO_LEFT_EMBEDDING=16 DIRECTIONALITY_RIGHT_TO_LEFT_OVERRIDE=17 DIRECTIONALITY_POP_DIRECTIONAL_FORMAT=18 MIN_HIGH_SURROGATE=55296 MAX_HIGH_SURROGATE=56319 MIN_LOW_SURROGATE=56320 MAX_LOW_SURROGATE=57343 MIN_SURROGATE=MIN_HIGH_SURROGATE MAX_SURROGATE=MAX_LOW_SURROGATE MIN_SUPPLEMENTARY_CODE_POINT=65536 MIN_CODE_POINT=0 MAX_CODE_POINT=1114111 DIRECTIONALITY= value serialVersionUID=1796875896 SIZE=16 BYTES= + MIN_RADIX=2 MAX_RADIX=36 MIN_VALUE=0 MAX_VALUE=65535 TYPE=()char.getComponentType() UNASSIGNED=0 UPPERCASE_LETTER=1 LOWERCASE_LETTER=2 TITLECASE_LETTER=3 MODIFIER_LETTER=4 OTHER_LETTER=5 NON_SPACING_MARK=6 ENCLOSING_MARK=7 COMBINING_SPACING_MARK=8 DECIMAL_DIGIT_NUMBER=9 LETTER_NUMBER=10 OTHER_NUMBER=11 SPACE_SEPARATOR=12 LINE_SEPARATOR=13 PARAGRAPH_SEPARATOR=14 CONTROL=15 FORMAT=16 PRIVATE_USE=18 SURROGATE=19 DASH_PUNCTUATION=20 START_PUNCTUATION=21 END_PUNCTUATION=22 CONNECTOR_PUNCTUATION=23 OTHER_PUNCTUATION=24 MATH_SYMBOL=25 CURRENCY_SYMBOL=26 MODIFIER_SYMBOL=27 OTHER_SYMBOL=28 INITIAL_QUOTE_PUNCTUATION=29 FINAL_QUOTE_PUNCTUATION=30 ERROR=-1 DIRECTIONALITY_UNDEFINED=-1 DIRECTIONALITY_LEFT_TO_RIGHT=0 DIRECTIONALITY_RIGHT_TO_LEFT=1 DIRECTIONALITY_RIGHT_TO_LEFT_ARABIC=2 DIRECTIONALITY_EUROPEAN_NUMBER=3 DIRECTIONALITY_EUROPEAN_NUMBER_SEPARATOR=4 DIRECTIONALITY_EUROPEAN_NUMBER_TERMINATOR=5 DIRECTIONALITY_ARABIC_NUMBER=6 DIRECTIONALITY_COMMON_NUMBER_SEPARATOR=7 DIRECTIONALITY_NONSPACING_MARK=8 DIRECTIONALITY_BOUNDARY_NEUTRAL=9 DIRECTIONALITY_PARAGRAPH_SEPARATOR=10 DIRECTIONALITY_SEGMENT_SEPARATOR=11 DIRECTIONALITY_WHITESPACE=12 DIRECTIONALITY_OTHER_NEUTRALS=13 DIRECTIONALITY_LEFT_TO_RIGHT_EMBEDDING=14 DIRECTIONALITY_LEFT_TO_RIGHT_OVERRIDE=15 DIRECTIONALITY_RIGHT_TO_LEFT_EMBEDDING=16 DIRECTIONALITY_RIGHT_TO_LEFT_OVERRIDE=17 DIRECTIONALITY_POP_DIRECTIONAL_FORMAT=18 MIN_HIGH_SURROGATE=55296 MAX_HIGH_SURROGATE=56319 MIN_LOW_SURROGATE=56320 MAX_LOW_SURROGATE=57343 MIN_SURROGATE=MIN_HIGH_SURROGATE MAX_SURROGATE=MAX_LOW_SURROGATE MIN_SUPPLEMENTARY_CODE_POINT=65536 MIN_CODE_POINT=0 MAX_CODE_POINT=1114111 DIRECTIONALITY= value serialVersionUID=1796875896 SIZE=16 BYTES=SIZE Div Byte.SIZE Instance Initializer: Constructors: - constructor Character() throws: + constructor Character(value) throws: this.value Assign value Methods: - func valueOf() throws: + func valueOf(c) throws: cond-branch cond:c LE 127 true branch : return false branch : - return new Character + return new Character(c) func charValue() throws: return value func hashCode() throws: return Character.hashCode(value) - func hashCode() throws: + func hashCode(value) throws: return (int)value - func equals() throws: - cond-branch cond: + func equals(obj) throws: + cond-branch cond:obj instanceof Character true branch : - return value EQ ((Character)obj)charValue + return value EQ (Character)obj.charValue() false branch : return false func toString() throws: - var:buf=value[] + Decl: buf=value[] return String.valueOf(buf) - func toString() throws: + func toString(c) throws: return String.valueOf(c) - func isValidCodePoint() throws: - var:plane=codePoint Zext 16 + func isValidCodePoint(codePoint) throws: + Decl: plane=codePoint Zext 16 return plane LT ((MAX_CODE_POINT Add 1) Zext 16) - func isBmpCodePoint() throws: + func isBmpCodePoint(codePoint) throws: return codePoint Zext 16 EQ 0 - func isSupplementaryCodePoint() throws: + func isSupplementaryCodePoint(codePoint) throws: return codePoint GE MIN_SUPPLEMENTARY_CODE_POINT Land codePoint LT MAX_CODE_POINT Add 1 - func isHighSurrogate() throws: + func isHighSurrogate(ch) throws: return ch GE MIN_HIGH_SURROGATE Land ch LT (MAX_HIGH_SURROGATE Add 1) - func isLowSurrogate() throws: + func isLowSurrogate(ch) throws: return ch GE MIN_LOW_SURROGATE Land ch LT (MAX_LOW_SURROGATE Add 1) - func isSurrogate() throws: + func isSurrogate(ch) throws: return ch GE MIN_SURROGATE Land ch LT (MAX_SURROGATE Add 1) - func isSurrogatePair() throws: + func isSurrogatePair(high,low) throws: return isHighSurrogate(high) Land isLowSurrogate(low) - func charCount() throws: + func charCount(codePoint) throws: return - func toCodePoint() throws: + func toCodePoint(high,low) throws: return ((high Shl 10) Add low) Add (MIN_SUPPLEMENTARY_CODE_POINT Sub (MIN_HIGH_SURROGATE Shl 10) Sub MIN_LOW_SURROGATE) - func codePointAt() throws: - var:c1=seq.charAt(index) - cond-branch cond:isHighSurrogate(c1) Land Inc - index LT seq.length() + func codePointAt(seq,index) throws: + Decl: c1=seq.charAt(index) + cond-branch cond:isHighSurrogate(c1) Land PreInc index LT seq.length() true branch : - var:c2=seq.charAt(index) + Decl: c2=seq.charAt(index) cond-branch cond:isLowSurrogate(c2) true branch : return toCodePoint(c1,c2) @@ -86,21 +84,20 @@ class Character false branch : return c1 - func codePointAt() throws: + func codePointAt(a,index) throws: return codePointAtImpl(a,index,a.length) - func codePointAt() throws: + func codePointAt(a,index,limit) throws: cond-branch cond:index GE limit Lor limit LT 0 Lor limit GT a.length true branch : - new IndexOutOfBoundsException + new IndexOutOfBoundsException() false branch : return codePointAtImpl(a,index,limit) - func codePointAtImpl() throws: - var:c1= - cond-branch cond:isHighSurrogate(c1) Land Inc - index LT limit + func codePointAtImpl(a,index,limit) throws: + Decl: c1= + cond-branch cond:isHighSurrogate(c1) Land PreInc index LT limit true branch : - var:c2= + Decl: c2= cond-branch cond:isLowSurrogate(c2) true branch : return toCodePoint(c1,c2) @@ -109,13 +106,11 @@ class Character false branch : return c1 - func codePointBefore() throws: - var:c2=seq.charAt(Dec - index) + func codePointBefore(seq,index) throws: + Decl: c2=seq.charAt(PreDec index) cond-branch cond:isLowSurrogate(c2) Land index GT 0 true branch : - var:c1=seq.charAt(Dec - index) + Decl: c1=seq.charAt(PreDec index) cond-branch cond:isHighSurrogate(c1) true branch : return toCodePoint(c1,c2) @@ -124,20 +119,20 @@ class Character false branch : return c2 - func codePointBefore() throws: + func codePointBefore(a,index) throws: return codePointBeforeImpl(a,index,0) - func codePointBefore() throws: + func codePointBefore(a,index,start) throws: cond-branch cond:index LE start Lor start LT 0 Lor start GE a.length true branch : - new IndexOutOfBoundsException + new IndexOutOfBoundsException() false branch : return codePointBeforeImpl(a,index,start) - func codePointBeforeImpl() throws: - var:c2= + func codePointBeforeImpl(a,index,start) throws: + Decl: c2= cond-branch cond:isLowSurrogate(c2) Land index GT start true branch : - var:c1= + Decl: c1= cond-branch cond:isHighSurrogate(c1) true branch : return toCodePoint(c1,c2) @@ -146,11 +141,11 @@ class Character false branch : return c2 - func highSurrogate() throws: + func highSurrogate(codePoint) throws: return (char)((codePoint Zext 10) Add (MIN_HIGH_SURROGATE Sub (MIN_SUPPLEMENTARY_CODE_POINT Zext 10))) - func lowSurrogate() throws: + func lowSurrogate(codePoint) throws: return (char)((codePoint Band 1023) Add MIN_LOW_SURROGATE) - func toChars() throws: + func toChars(codePoint,dst,dstIndex) throws: cond-branch cond:isBmpCodePoint(codePoint) true branch : Assign (char)codePoint @@ -161,150 +156,200 @@ class Character toSurrogates(codePoint,dst,dstIndex) return 2 false branch : - new IllegalArgumentException + new IllegalArgumentException() - - func toChars() throws: + func toChars(codePoint) throws: cond-branch cond:isBmpCodePoint(codePoint) true branch : return false branch : cond-branch cond:isValidCodePoint(codePoint) true branch : - var:result= - + Decl: result= + toSurrogates(codePoint,result,0) + return result false branch : - new IllegalArgumentException + new IllegalArgumentException() - - func toSurrogates() throws: + func toSurrogates(codePoint,dst,index) throws: Assign lowSurrogate(codePoint) Assign highSurrogate(codePoint) - func codePointCount() throws: - var:length=seq.length() + func codePointCount(seq,beginIndex,endIndex) throws: + Decl: length=seq.length() cond-branch cond:beginIndex LT 0 Lor endIndex GT length Lor beginIndex GT endIndex true branch : - new IndexOutOfBoundsException + new IndexOutOfBoundsException() false branch : - var:n=endIndex Sub beginIndex + Decl: n=endIndex Sub beginIndex for ( ) - cond-branch cond:isHighSurrogate(seq.charAt( iInc + cond-branch cond:isHighSurrogate(seq.charAt(i Inc )) Land i LT endIndex Land isLowSurrogate(seq.charAt(i)) true branch : - nDec + n Dec - iInc + i Inc false branch : return n - func codePointCount() throws: + func codePointCount(a,offset,count) throws: cond-branch cond:count GT a.length Sub offset Lor offset LT 0 Lor count LT 0 true branch : - new IndexOutOfBoundsException + new IndexOutOfBoundsException() false branch : return codePointCountImpl(a,offset,count) - func codePointCountImpl() throws: - var:endIndex=offset Add count - var:n=count + func codePointCountImpl(a,offset,count) throws: + Decl: endIndex=offset Add count + Decl: n=count for ( ) - cond-branch cond:isHighSurrogate(a, iInc + cond-branch cond:isHighSurrogate(a,i Inc ) Land i LT endIndex Land isLowSurrogate(a,i) true branch : - nDec + n Dec - iInc + i Inc false branch : return n - func offsetByCodePoints() throws: - var:length=seq.length() + func offsetByCodePoints(seq,index,codePointOffset) throws: + Decl: length=seq.length() cond-branch cond:index LT 0 Lor index GT length true branch : - new IndexOutOfBoundsException + new IndexOutOfBoundsException() false branch : - var:x=index + Decl: x=index cond-branch cond:codePointOffset GE 0 true branch : - var:i + Decl: i + for ( ) + cond-branch cond:isHighSurrogate(seq.charAt(x Inc +)) Land x LT length Land isLowSurrogate(seq.charAt(x)) + true branch : + x Inc + + false branch : + + + cond-branch cond:i LT codePointOffset + true branch : + new IndexOutOfBoundsException() + false branch : false branch : - var:i + Decl: i + for ( ) + cond-branch cond:isLowSurrogate(seq.charAt(PreDec x)) Land x GT 0 Land isHighSurrogate(seq.charAt(x Sub 1)) + true branch : + x Dec + + false branch : + + + cond-branch cond:i LT 0 + true branch : + new IndexOutOfBoundsException() + false branch : return x - func offsetByCodePoints() throws: + func offsetByCodePoints(a,start,count,index,codePointOffset) throws: cond-branch cond:count GT a.length Sub start Lor start LT 0 Lor count LT 0 Lor index LT start Lor index GT start Add count true branch : - new IndexOutOfBoundsException + new IndexOutOfBoundsException() false branch : return offsetByCodePointsImpl(a,start,count,index,codePointOffset) - func offsetByCodePointsImpl() throws: - var:x=index + func offsetByCodePointsImpl(a,start,count,index,codePointOffset) throws: + Decl: x=index cond-branch cond:codePointOffset GE 0 true branch : - var:limit=start Add count + Decl: limit=start Add count + Decl: i + for ( ) + cond-branch cond:isHighSurrogate(a,x Inc +) Land x LT limit Land isLowSurrogate(a,x) + true branch : + x Inc + + false branch : + + + cond-branch cond:i LT codePointOffset + true branch : + new IndexOutOfBoundsException() + false branch : false branch : - var:i + Decl: i + for ( ) + cond-branch cond:isLowSurrogate(a,PreDec x) Land x GT start Land isHighSurrogate(a,x Sub 1) + true branch : + x Dec + + false branch : + + + cond-branch cond:i LT 0 + true branch : + new IndexOutOfBoundsException() + false branch : return x - func isLowerCase() throws: + func isLowerCase(ch) throws: return isLowerCase((int)ch) - func isLowerCase() throws: + func isLowerCase(codePoint) throws: return isLowerCaseImpl(codePoint) - func isLowerCaseImpl() throws: - func isUpperCase() throws: + func isLowerCaseImpl(codePoint) throws: + func isUpperCase(ch) throws: return isUpperCase((int)ch) - func isUpperCase() throws: + func isUpperCase(codePoint) throws: return isUpperCaseImpl(codePoint) - func isUpperCaseImpl() throws: - func isTitleCase() throws: + func isUpperCaseImpl(codePoint) throws: + func isTitleCase(ch) throws: return isTitleCase((int)ch) - func isTitleCase() throws: + func isTitleCase(codePoint) throws: return isTitleCaseImpl(codePoint) - func isTitleCaseImpl() throws: - func isDigit() throws: + func isTitleCaseImpl(codePoint) throws: + func isDigit(ch) throws: return isDigit((int)ch) - func isDigit() throws: + func isDigit(codePoint) throws: return isDigitImpl(codePoint) - func isDigitImpl() throws: - func isDefined() throws: + func isDigitImpl(codePoint) throws: + func isDefined(ch) throws: return isDefined((int)ch) - func isDefined() throws: + func isDefined(codePoint) throws: return isDefinedImpl(codePoint) - func isDefinedImpl() throws: - func isLetter() throws: + func isDefinedImpl(codePoint) throws: + func isLetter(ch) throws: return isLetter((int)ch) - func isLetter() throws: + func isLetter(codePoint) throws: return isLetterImpl(codePoint) - func isLetterImpl() throws: - func isLetterOrDigit() throws: + func isLetterImpl(codePoint) throws: + func isLetterOrDigit(ch) throws: return isLetterOrDigit((int)ch) - func isLetterOrDigit() throws: + func isLetterOrDigit(codePoint) throws: return isLetterOrDigitImpl(codePoint) - func isLetterOrDigitImpl() throws: - func isJavaLetter() throws: + func isLetterOrDigitImpl(codePoint) throws: + func isJavaLetter(ch) throws: return isJavaIdentifierStart(ch) - func isJavaLetterOrDigit() throws: + func isJavaLetterOrDigit(ch) throws: return isJavaIdentifierPart(ch) - func isAlphabetic() throws: + func isAlphabetic(codePoint) throws: return isAlphabeticImpl(codePoint) - func isAlphabeticImpl() throws: - func isIdeographic() throws: + func isAlphabeticImpl(codePoint) throws: + func isIdeographic(codePoint) throws: return isIdeographicImpl(codePoint) - func isIdeographicImpl() throws: - func isJavaIdentifierStart() throws: + func isIdeographicImpl(codePoint) throws: + func isJavaIdentifierStart(ch) throws: return isJavaIdentifierStart((int)ch) - func isJavaIdentifierStart() throws: + func isJavaIdentifierStart(codePoint) throws: cond-branch cond:codePoint LT 64 true branch : return (codePoint EQ $) @@ -314,11 +359,10 @@ class Character return (2147483630 Band (1 Shl (codePoint Sub 64))) NE 0 false branch : - return ((1 Shl getType(codePoint)) Band ((1 Shl UPPERCASE_LETTER) Bor (1 Shl LOWERCASE_LETTER) Bor (1 Shl TITLECASE_LETTER) Bor (1 Shl MODIFIER_LETTER) Bor (1 Shl OTHER_LETTER) Bor (1 Shl CURRENCY_SYMBOL) Bor (1 Shl CONNECTOR_PUNCTUATION) Bor (1 Shl LETTER_NUMBER))) NE 0 - func isJavaIdentifierPart() throws: + func isJavaIdentifierPart(ch) throws: return isJavaIdentifierPart((int)ch) - func isJavaIdentifierPart() throws: + func isJavaIdentifierPart(codePoint) throws: cond-branch cond:codePoint LT 64 true branch : return (-253953 Band (1 Shl codePoint)) NE 0 @@ -328,26 +372,25 @@ class Character return (2147483630 Band (1 Shl (codePoint Sub 64))) NE 0 false branch : - return ((1 Shl getType(codePoint)) Band ((1 Shl UPPERCASE_LETTER) Bor (1 Shl LOWERCASE_LETTER) Bor (1 Shl TITLECASE_LETTER) Bor (1 Shl MODIFIER_LETTER) Bor (1 Shl OTHER_LETTER) Bor (1 Shl CURRENCY_SYMBOL) Bor (1 Shl CONNECTOR_PUNCTUATION) Bor (1 Shl DECIMAL_DIGIT_NUMBER) Bor (1 Shl LETTER_NUMBER) Bor (1 Shl FORMAT) Bor (1 Shl COMBINING_SPACING_MARK) Bor (1 Shl NON_SPACING_MARK))) NE 0 Lor (codePoint GE 0 Land codePoint LE 8) Lor (codePoint GE 14 Land codePoint LE 27) Lor (codePoint GE 127 Land codePoint LE 159) - func isUnicodeIdentifierStart() throws: + func isUnicodeIdentifierStart(ch) throws: return isUnicodeIdentifierStart((int)ch) - func isUnicodeIdentifierStart() throws: + func isUnicodeIdentifierStart(codePoint) throws: return isUnicodeIdentifierStartImpl(codePoint) - func isUnicodeIdentifierStartImpl() throws: - func isUnicodeIdentifierPart() throws: + func isUnicodeIdentifierStartImpl(codePoint) throws: + func isUnicodeIdentifierPart(ch) throws: return isUnicodeIdentifierPart((int)ch) - func isUnicodeIdentifierPart() throws: + func isUnicodeIdentifierPart(codePoint) throws: return isUnicodeIdentifierPartImpl(codePoint) - func isUnicodeIdentifierPartImpl() throws: - func isIdentifierIgnorable() throws: + func isUnicodeIdentifierPartImpl(codePoint) throws: + func isIdentifierIgnorable(ch) throws: return isIdentifierIgnorable((int)ch) - func isIdentifierIgnorable() throws: + func isIdentifierIgnorable(codePoint) throws: return isIdentifierIgnorableImpl(codePoint) - func isIdentifierIgnorableImpl() throws: - func toLowerCase() throws: + func isIdentifierIgnorableImpl(codePoint) throws: + func toLowerCase(ch) throws: return (char)toLowerCase((int)ch) - func toLowerCase() throws: + func toLowerCase(codePoint) throws: cond-branch cond:codePoint GE A Land codePoint LE Z true branch : return codePoint Add (a Sub A) @@ -359,10 +402,10 @@ class Character false branch : return toLowerCaseImpl(codePoint) - func toLowerCaseImpl() throws: - func toUpperCase() throws: + func toLowerCaseImpl(codePoint) throws: + func toUpperCase(ch) throws: return (char)toUpperCase((int)ch) - func toUpperCase() throws: + func toUpperCase(codePoint) throws: cond-branch cond:codePoint GE a Land codePoint LE z true branch : return codePoint Sub (a Sub A) @@ -374,33 +417,44 @@ class Character false branch : return toUpperCaseImpl(codePoint) - func toUpperCaseImpl() throws: - func toTitleCase() throws: + func toUpperCaseImpl(codePoint) throws: + func toTitleCase(ch) throws: return (char)toTitleCase((int)ch) - func toTitleCase() throws: + func toTitleCase(codePoint) throws: return toTitleCaseImpl(codePoint) - func toTitleCaseImpl() throws: - func digit() throws: + func toTitleCaseImpl(codePoint) throws: + func digit(ch,radix) throws: return digit((int)ch,radix) - func digit() throws: + func digit(codePoint,radix) throws: cond-branch cond:radix LT MIN_RADIX Lor radix GT MAX_RADIX true branch : - return Sub - 1 + return -1 false branch : cond-branch cond:codePoint LT 128 true branch : - var:result=Sub - 1 + Decl: result=-1 + cond-branch cond:0 LE codePoint Land codePoint LE 9 + true branch : + result Assign codePoint Sub 0 + false branch : + cond-branch cond:a LE codePoint Land codePoint LE z + true branch : + result Assign 10 Add (codePoint Sub a) + false branch : + cond-branch cond:A LE codePoint Land codePoint LE Z + true branch : + result Assign 10 Add (codePoint Sub A) + false branch : + return false branch : return digitImpl(codePoint,radix) - func digitImpl() throws: - func getNumericValue() throws: + func digitImpl(codePoint,radix) throws: + func getNumericValue(ch) throws: return getNumericValue((int)ch) - func getNumericValue() throws: + func getNumericValue(codePoint) throws: cond-branch cond:codePoint LT 128 true branch : cond-branch cond:codePoint GE 0 Land codePoint LE 9 @@ -408,7 +462,17 @@ class Character return codePoint Sub 0 false branch : + cond-branch cond:codePoint GE a Land codePoint LE z + true branch : + return codePoint Sub (a Sub 10) + false branch : + cond-branch cond:codePoint GE A Land codePoint LE Z + true branch : + return codePoint Sub (A Sub 10) + false branch : + + return -1 false branch : cond-branch cond:codePoint GE 65313 Land codePoint LE 65338 @@ -422,12 +486,12 @@ class Character false branch : return getNumericValueImpl(codePoint) - func getNumericValueImpl() throws: - func isSpace() throws: + func getNumericValueImpl(codePoint) throws: + func isSpace(ch) throws: return (ch LE 32) Land (((((1 Shl 9) Bor (1 Shl 10) Bor (1 Shl 12) Bor (1 Shl 13) Bor (1 Shl 32)) Shr ch) Band 1) NE 0) - func isSpaceChar() throws: + func isSpaceChar(ch) throws: return isSpaceChar((int)ch) - func isSpaceChar() throws: + func isSpaceChar(codePoint) throws: cond-branch cond:codePoint EQ 32 Lor codePoint EQ 160 true branch : return true @@ -454,10 +518,10 @@ class Character false branch : return isSpaceCharImpl(codePoint) - func isSpaceCharImpl() throws: - func isWhitespace() throws: + func isSpaceCharImpl(codePoint) throws: + func isWhitespace(ch) throws: return isWhitespace((int)ch) - func isWhitespace() throws: + func isWhitespace(codePoint) throws: cond-branch cond:(codePoint GE 28 Land codePoint LE 32) Lor (codePoint GE 9 Land codePoint LE 13) true branch : return true @@ -489,23 +553,23 @@ class Character false branch : return isWhitespaceImpl(codePoint) - func isWhitespaceImpl() throws: - func isISOControl() throws: + func isWhitespaceImpl(codePoint) throws: + func isISOControl(ch) throws: return isISOControl((int)ch) - func isISOControl() throws: + func isISOControl(codePoint) throws: return codePoint LE 159 Land (codePoint GE 127 Lor (codePoint Zext 5 EQ 0)) - func getType() throws: + func getType(ch) throws: return getType((int)ch) - func getType() throws: - var:type=getTypeImpl(codePoint) + func getType(codePoint) throws: + Decl: type=getTypeImpl(codePoint) cond-branch cond:type LE Character.FORMAT true branch : return type false branch : return (type Add 1) - func getTypeImpl() throws: - func forDigit() throws: + func getTypeImpl(codePoint) throws: + func forDigit(digit,radix) throws: cond-branch cond:(digit GE radix) Lor (digit LT 0) true branch : return @@ -522,126 +586,123 @@ class Character false branch : return (char)(a Sub 10 Add digit) - func getDirectionality() throws: + func getDirectionality(ch) throws: return getDirectionality((int)ch) - func getDirectionality() throws: + func getDirectionality(codePoint) throws: cond-branch cond:getType(codePoint) EQ Character.UNASSIGNED true branch : return Character.DIRECTIONALITY_UNDEFINED false branch : - var:directionality=getDirectionalityImpl(codePoint) + Decl: directionality=getDirectionalityImpl(codePoint) cond-branch cond:directionality GE 0 Land directionality LT DIRECTIONALITY.length true branch : return false branch : return Character.DIRECTIONALITY_UNDEFINED - func getDirectionalityImpl() throws: - func isMirrored() throws: + func getDirectionalityImpl(codePoint) throws: + func isMirrored(ch) throws: return isMirrored((int)ch) - func isMirrored() throws: + func isMirrored(codePoint) throws: return isMirroredImpl(codePoint) - func isMirroredImpl() throws: - func compareTo() throws: + func isMirroredImpl(codePoint) throws: + func compareTo(anotherCharacter) throws: return compare(this.value,anotherCharacter.value) - func compare() throws: + func compare(x,y) throws: return x Sub y - func reverseBytes() throws: + func reverseBytes(ch) throws: return (char)(((ch Band 65280) Shr 8) Bor (ch Shl 8)) - func getName() throws: + func getName(codePoint) throws: cond-branch cond:isValidCodePoint(codePoint) true branch : - new IllegalArgumentException + new IllegalArgumentException() false branch : - var:name=getNameImpl(codePoint) + Decl: name=getNameImpl(codePoint) cond-branch cond:name NE null true branch : - return name - false branch : + return name false branch : cond-branch cond:getType(codePoint) EQ UNASSIGNED true branch : - return null - false branch : + return null false branch : - var:block=UnicodeBlock.of(codePoint) + Decl: block=UnicodeBlock.of(codePoint) cond-branch cond:block NE null true branch : - return Add " " Add - false branch : + return block.toString().replace(_, ) Add " " Add Integer.toHexString(codePoint).toUpperCase(Locale.ENGLISH) false branch : - return - func getNameImpl() throws: + return Integer.toHexString(codePoint).toUpperCase(Locale.ENGLISH) + func getNameImpl(codePoint) throws: LocalClasses: class Subset Fields: name Instance Initializer: Constructors: - constructor Subset() throws: + constructor Subset(name) throws: cond-branch cond:name EQ null true branch : - new NullPointerException + new NullPointerException("name") false branch : this.name Assign name Methods: - func equals() throws: + func equals(obj) throws: return (this EQ obj) func hashCode() throws: - return hashCode + return super.hashCode() func toString() throws: return name LocalClasses: LocalInterfaces: class UnicodeBlock Fields: - map=new HashMap BASIC_LATIN=new UnicodeBlock LATIN_1_SUPPLEMENT=new UnicodeBlock LATIN_EXTENDED_A=new UnicodeBlock LATIN_EXTENDED_B=new UnicodeBlock IPA_EXTENSIONS=new UnicodeBlock SPACING_MODIFIER_LETTERS=new UnicodeBlock COMBINING_DIACRITICAL_MARKS=new UnicodeBlock GREEK=new UnicodeBlock CYRILLIC=new UnicodeBlock ARMENIAN=new UnicodeBlock HEBREW=new UnicodeBlock ARABIC=new UnicodeBlock DEVANAGARI=new UnicodeBlock BENGALI=new UnicodeBlock GURMUKHI=new UnicodeBlock GUJARATI=new UnicodeBlock ORIYA=new UnicodeBlock TAMIL=new UnicodeBlock TELUGU=new UnicodeBlock KANNADA=new UnicodeBlock MALAYALAM=new UnicodeBlock THAI=new UnicodeBlock LAO=new UnicodeBlock TIBETAN=new UnicodeBlock GEORGIAN=new UnicodeBlock HANGUL_JAMO=new UnicodeBlock LATIN_EXTENDED_ADDITIONAL=new UnicodeBlock GREEK_EXTENDED=new UnicodeBlock GENERAL_PUNCTUATION=new UnicodeBlock SUPERSCRIPTS_AND_SUBSCRIPTS=new UnicodeBlock CURRENCY_SYMBOLS=new UnicodeBlock COMBINING_MARKS_FOR_SYMBOLS=new UnicodeBlock LETTERLIKE_SYMBOLS=new UnicodeBlock NUMBER_FORMS=new UnicodeBlock ARROWS=new UnicodeBlock MATHEMATICAL_OPERATORS=new UnicodeBlock MISCELLANEOUS_TECHNICAL=new UnicodeBlock CONTROL_PICTURES=new UnicodeBlock OPTICAL_CHARACTER_RECOGNITION=new UnicodeBlock ENCLOSED_ALPHANUMERICS=new UnicodeBlock BOX_DRAWING=new UnicodeBlock BLOCK_ELEMENTS=new UnicodeBlock GEOMETRIC_SHAPES=new UnicodeBlock MISCELLANEOUS_SYMBOLS=new UnicodeBlock DINGBATS=new UnicodeBlock CJK_SYMBOLS_AND_PUNCTUATION=new UnicodeBlock HIRAGANA=new UnicodeBlock KATAKANA=new UnicodeBlock BOPOMOFO=new UnicodeBlock HANGUL_COMPATIBILITY_JAMO=new UnicodeBlock KANBUN=new UnicodeBlock ENCLOSED_CJK_LETTERS_AND_MONTHS=new UnicodeBlock CJK_COMPATIBILITY=new UnicodeBlock CJK_UNIFIED_IDEOGRAPHS=new UnicodeBlock HANGUL_SYLLABLES=new UnicodeBlock PRIVATE_USE_AREA=new UnicodeBlock CJK_COMPATIBILITY_IDEOGRAPHS=new UnicodeBlock ALPHABETIC_PRESENTATION_FORMS=new UnicodeBlock ARABIC_PRESENTATION_FORMS_A=new UnicodeBlock COMBINING_HALF_MARKS=new UnicodeBlock CJK_COMPATIBILITY_FORMS=new UnicodeBlock SMALL_FORM_VARIANTS=new UnicodeBlock ARABIC_PRESENTATION_FORMS_B=new UnicodeBlock HALFWIDTH_AND_FULLWIDTH_FORMS=new UnicodeBlock SPECIALS=new UnicodeBlock SURROGATES_AREA=new UnicodeBlock SYRIAC=new UnicodeBlock THAANA=new UnicodeBlock SINHALA=new UnicodeBlock MYANMAR=new UnicodeBlock ETHIOPIC=new UnicodeBlock CHEROKEE=new UnicodeBlock UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS=new UnicodeBlock OGHAM=new UnicodeBlock RUNIC=new UnicodeBlock KHMER=new UnicodeBlock MONGOLIAN=new UnicodeBlock BRAILLE_PATTERNS=new UnicodeBlock CJK_RADICALS_SUPPLEMENT=new UnicodeBlock KANGXI_RADICALS=new UnicodeBlock IDEOGRAPHIC_DESCRIPTION_CHARACTERS=new UnicodeBlock BOPOMOFO_EXTENDED=new UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_A=new UnicodeBlock YI_SYLLABLES=new UnicodeBlock YI_RADICALS=new UnicodeBlock CYRILLIC_SUPPLEMENTARY=new UnicodeBlock TAGALOG=new UnicodeBlock HANUNOO=new UnicodeBlock BUHID=new UnicodeBlock TAGBANWA=new UnicodeBlock LIMBU=new UnicodeBlock TAI_LE=new UnicodeBlock KHMER_SYMBOLS=new UnicodeBlock PHONETIC_EXTENSIONS=new UnicodeBlock MISCELLANEOUS_MATHEMATICAL_SYMBOLS_A=new UnicodeBlock SUPPLEMENTAL_ARROWS_A=new UnicodeBlock SUPPLEMENTAL_ARROWS_B=new UnicodeBlock MISCELLANEOUS_MATHEMATICAL_SYMBOLS_B=new UnicodeBlock SUPPLEMENTAL_MATHEMATICAL_OPERATORS=new UnicodeBlock MISCELLANEOUS_SYMBOLS_AND_ARROWS=new UnicodeBlock KATAKANA_PHONETIC_EXTENSIONS=new UnicodeBlock YIJING_HEXAGRAM_SYMBOLS=new UnicodeBlock VARIATION_SELECTORS=new UnicodeBlock LINEAR_B_SYLLABARY=new UnicodeBlock LINEAR_B_IDEOGRAMS=new UnicodeBlock AEGEAN_NUMBERS=new UnicodeBlock OLD_ITALIC=new UnicodeBlock GOTHIC=new UnicodeBlock UGARITIC=new UnicodeBlock DESERET=new UnicodeBlock SHAVIAN=new UnicodeBlock OSMANYA=new UnicodeBlock CYPRIOT_SYLLABARY=new UnicodeBlock BYZANTINE_MUSICAL_SYMBOLS=new UnicodeBlock MUSICAL_SYMBOLS=new UnicodeBlock TAI_XUAN_JING_SYMBOLS=new UnicodeBlock MATHEMATICAL_ALPHANUMERIC_SYMBOLS=new UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_B=new UnicodeBlock CJK_COMPATIBILITY_IDEOGRAPHS_SUPPLEMENT=new UnicodeBlock TAGS=new UnicodeBlock VARIATION_SELECTORS_SUPPLEMENT=new UnicodeBlock SUPPLEMENTARY_PRIVATE_USE_AREA_A=new UnicodeBlock SUPPLEMENTARY_PRIVATE_USE_AREA_B=new UnicodeBlock HIGH_SURROGATES=new UnicodeBlock HIGH_PRIVATE_USE_SURROGATES=new UnicodeBlock LOW_SURROGATES=new UnicodeBlock ARABIC_SUPPLEMENT=new UnicodeBlock NKO=new UnicodeBlock SAMARITAN=new UnicodeBlock MANDAIC=new UnicodeBlock ETHIOPIC_SUPPLEMENT=new UnicodeBlock UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED=new UnicodeBlock NEW_TAI_LUE=new UnicodeBlock BUGINESE=new UnicodeBlock TAI_THAM=new UnicodeBlock BALINESE=new UnicodeBlock SUNDANESE=new UnicodeBlock BATAK=new UnicodeBlock LEPCHA=new UnicodeBlock OL_CHIKI=new UnicodeBlock VEDIC_EXTENSIONS=new UnicodeBlock PHONETIC_EXTENSIONS_SUPPLEMENT=new UnicodeBlock COMBINING_DIACRITICAL_MARKS_SUPPLEMENT=new UnicodeBlock GLAGOLITIC=new UnicodeBlock LATIN_EXTENDED_C=new UnicodeBlock COPTIC=new UnicodeBlock GEORGIAN_SUPPLEMENT=new UnicodeBlock TIFINAGH=new UnicodeBlock ETHIOPIC_EXTENDED=new UnicodeBlock CYRILLIC_EXTENDED_A=new UnicodeBlock SUPPLEMENTAL_PUNCTUATION=new UnicodeBlock CJK_STROKES=new UnicodeBlock LISU=new UnicodeBlock VAI=new UnicodeBlock CYRILLIC_EXTENDED_B=new UnicodeBlock BAMUM=new UnicodeBlock MODIFIER_TONE_LETTERS=new UnicodeBlock LATIN_EXTENDED_D=new UnicodeBlock SYLOTI_NAGRI=new UnicodeBlock COMMON_INDIC_NUMBER_FORMS=new UnicodeBlock PHAGS_PA=new UnicodeBlock SAURASHTRA=new UnicodeBlock DEVANAGARI_EXTENDED=new UnicodeBlock KAYAH_LI=new UnicodeBlock REJANG=new UnicodeBlock HANGUL_JAMO_EXTENDED_A=new UnicodeBlock JAVANESE=new UnicodeBlock CHAM=new UnicodeBlock MYANMAR_EXTENDED_A=new UnicodeBlock TAI_VIET=new UnicodeBlock ETHIOPIC_EXTENDED_A=new UnicodeBlock MEETEI_MAYEK=new UnicodeBlock HANGUL_JAMO_EXTENDED_B=new UnicodeBlock VERTICAL_FORMS=new UnicodeBlock ANCIENT_GREEK_NUMBERS=new UnicodeBlock ANCIENT_SYMBOLS=new UnicodeBlock PHAISTOS_DISC=new UnicodeBlock LYCIAN=new UnicodeBlock CARIAN=new UnicodeBlock OLD_PERSIAN=new UnicodeBlock IMPERIAL_ARAMAIC=new UnicodeBlock PHOENICIAN=new UnicodeBlock LYDIAN=new UnicodeBlock KHAROSHTHI=new UnicodeBlock OLD_SOUTH_ARABIAN=new UnicodeBlock AVESTAN=new UnicodeBlock INSCRIPTIONAL_PARTHIAN=new UnicodeBlock INSCRIPTIONAL_PAHLAVI=new UnicodeBlock OLD_TURKIC=new UnicodeBlock RUMI_NUMERAL_SYMBOLS=new UnicodeBlock BRAHMI=new UnicodeBlock KAITHI=new UnicodeBlock CUNEIFORM=new UnicodeBlock CUNEIFORM_NUMBERS_AND_PUNCTUATION=new UnicodeBlock EGYPTIAN_HIEROGLYPHS=new UnicodeBlock BAMUM_SUPPLEMENT=new UnicodeBlock KANA_SUPPLEMENT=new UnicodeBlock ANCIENT_GREEK_MUSICAL_NOTATION=new UnicodeBlock COUNTING_ROD_NUMERALS=new UnicodeBlock MAHJONG_TILES=new UnicodeBlock DOMINO_TILES=new UnicodeBlock PLAYING_CARDS=new UnicodeBlock ENCLOSED_ALPHANUMERIC_SUPPLEMENT=new UnicodeBlock ENCLOSED_IDEOGRAPHIC_SUPPLEMENT=new UnicodeBlock MISCELLANEOUS_SYMBOLS_AND_PICTOGRAPHS=new UnicodeBlock EMOTICONS=new UnicodeBlock TRANSPORT_AND_MAP_SYMBOLS=new UnicodeBlock ALCHEMICAL_SYMBOLS=new UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_C=new UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_D=new UnicodeBlock ARABIC_EXTENDED_A=new UnicodeBlock SUNDANESE_SUPPLEMENT=new UnicodeBlock MEETEI_MAYEK_EXTENSIONS=new UnicodeBlock MEROITIC_HIEROGLYPHS=new UnicodeBlock MEROITIC_CURSIVE=new UnicodeBlock SORA_SOMPENG=new UnicodeBlock CHAKMA=new UnicodeBlock SHARADA=new UnicodeBlock TAKRI=new UnicodeBlock MIAO=new UnicodeBlock ARABIC_MATHEMATICAL_ALPHABETIC_SYMBOLS=new UnicodeBlock blockStarts=[] blocks= + map=new HashMap(256) BASIC_LATIN=new UnicodeBlock("BASIC_LATIN","BASIC LATIN","BASICLATIN") LATIN_1_SUPPLEMENT=new UnicodeBlock("LATIN_1_SUPPLEMENT","LATIN-1 SUPPLEMENT","LATIN-1SUPPLEMENT") LATIN_EXTENDED_A=new UnicodeBlock("LATIN_EXTENDED_A","LATIN EXTENDED-A","LATINEXTENDED-A") LATIN_EXTENDED_B=new UnicodeBlock("LATIN_EXTENDED_B","LATIN EXTENDED-B","LATINEXTENDED-B") IPA_EXTENSIONS=new UnicodeBlock("IPA_EXTENSIONS","IPA EXTENSIONS","IPAEXTENSIONS") SPACING_MODIFIER_LETTERS=new UnicodeBlock("SPACING_MODIFIER_LETTERS","SPACING MODIFIER LETTERS","SPACINGMODIFIERLETTERS") COMBINING_DIACRITICAL_MARKS=new UnicodeBlock("COMBINING_DIACRITICAL_MARKS","COMBINING DIACRITICAL MARKS","COMBININGDIACRITICALMARKS") GREEK=new UnicodeBlock("GREEK","GREEK AND COPTIC","GREEKANDCOPTIC") CYRILLIC=new UnicodeBlock("CYRILLIC") ARMENIAN=new UnicodeBlock("ARMENIAN") HEBREW=new UnicodeBlock("HEBREW") ARABIC=new UnicodeBlock("ARABIC") DEVANAGARI=new UnicodeBlock("DEVANAGARI") BENGALI=new UnicodeBlock("BENGALI") GURMUKHI=new UnicodeBlock("GURMUKHI") GUJARATI=new UnicodeBlock("GUJARATI") ORIYA=new UnicodeBlock("ORIYA") TAMIL=new UnicodeBlock("TAMIL") TELUGU=new UnicodeBlock("TELUGU") KANNADA=new UnicodeBlock("KANNADA") MALAYALAM=new UnicodeBlock("MALAYALAM") THAI=new UnicodeBlock("THAI") LAO=new UnicodeBlock("LAO") TIBETAN=new UnicodeBlock("TIBETAN") GEORGIAN=new UnicodeBlock("GEORGIAN") HANGUL_JAMO=new UnicodeBlock("HANGUL_JAMO","HANGUL JAMO","HANGULJAMO") LATIN_EXTENDED_ADDITIONAL=new UnicodeBlock("LATIN_EXTENDED_ADDITIONAL","LATIN EXTENDED ADDITIONAL","LATINEXTENDEDADDITIONAL") GREEK_EXTENDED=new UnicodeBlock("GREEK_EXTENDED","GREEK EXTENDED","GREEKEXTENDED") GENERAL_PUNCTUATION=new UnicodeBlock("GENERAL_PUNCTUATION","GENERAL PUNCTUATION","GENERALPUNCTUATION") SUPERSCRIPTS_AND_SUBSCRIPTS=new UnicodeBlock("SUPERSCRIPTS_AND_SUBSCRIPTS","SUPERSCRIPTS AND SUBSCRIPTS","SUPERSCRIPTSANDSUBSCRIPTS") CURRENCY_SYMBOLS=new UnicodeBlock("CURRENCY_SYMBOLS","CURRENCY SYMBOLS","CURRENCYSYMBOLS") COMBINING_MARKS_FOR_SYMBOLS=new UnicodeBlock("COMBINING_MARKS_FOR_SYMBOLS","COMBINING DIACRITICAL MARKS FOR SYMBOLS","COMBININGDIACRITICALMARKSFORSYMBOLS","COMBINING MARKS FOR SYMBOLS","COMBININGMARKSFORSYMBOLS") LETTERLIKE_SYMBOLS=new UnicodeBlock("LETTERLIKE_SYMBOLS","LETTERLIKE SYMBOLS","LETTERLIKESYMBOLS") NUMBER_FORMS=new UnicodeBlock("NUMBER_FORMS","NUMBER FORMS","NUMBERFORMS") ARROWS=new UnicodeBlock("ARROWS") MATHEMATICAL_OPERATORS=new UnicodeBlock("MATHEMATICAL_OPERATORS","MATHEMATICAL OPERATORS","MATHEMATICALOPERATORS") MISCELLANEOUS_TECHNICAL=new UnicodeBlock("MISCELLANEOUS_TECHNICAL","MISCELLANEOUS TECHNICAL","MISCELLANEOUSTECHNICAL") CONTROL_PICTURES=new UnicodeBlock("CONTROL_PICTURES","CONTROL PICTURES","CONTROLPICTURES") OPTICAL_CHARACTER_RECOGNITION=new UnicodeBlock("OPTICAL_CHARACTER_RECOGNITION","OPTICAL CHARACTER RECOGNITION","OPTICALCHARACTERRECOGNITION") ENCLOSED_ALPHANUMERICS=new UnicodeBlock("ENCLOSED_ALPHANUMERICS","ENCLOSED ALPHANUMERICS","ENCLOSEDALPHANUMERICS") BOX_DRAWING=new UnicodeBlock("BOX_DRAWING","BOX DRAWING","BOXDRAWING") BLOCK_ELEMENTS=new UnicodeBlock("BLOCK_ELEMENTS","BLOCK ELEMENTS","BLOCKELEMENTS") GEOMETRIC_SHAPES=new UnicodeBlock("GEOMETRIC_SHAPES","GEOMETRIC SHAPES","GEOMETRICSHAPES") MISCELLANEOUS_SYMBOLS=new UnicodeBlock("MISCELLANEOUS_SYMBOLS","MISCELLANEOUS SYMBOLS","MISCELLANEOUSSYMBOLS") DINGBATS=new UnicodeBlock("DINGBATS") CJK_SYMBOLS_AND_PUNCTUATION=new UnicodeBlock("CJK_SYMBOLS_AND_PUNCTUATION","CJK SYMBOLS AND PUNCTUATION","CJKSYMBOLSANDPUNCTUATION") HIRAGANA=new UnicodeBlock("HIRAGANA") KATAKANA=new UnicodeBlock("KATAKANA") BOPOMOFO=new UnicodeBlock("BOPOMOFO") HANGUL_COMPATIBILITY_JAMO=new UnicodeBlock("HANGUL_COMPATIBILITY_JAMO","HANGUL COMPATIBILITY JAMO","HANGULCOMPATIBILITYJAMO") KANBUN=new UnicodeBlock("KANBUN") ENCLOSED_CJK_LETTERS_AND_MONTHS=new UnicodeBlock("ENCLOSED_CJK_LETTERS_AND_MONTHS","ENCLOSED CJK LETTERS AND MONTHS","ENCLOSEDCJKLETTERSANDMONTHS") CJK_COMPATIBILITY=new UnicodeBlock("CJK_COMPATIBILITY","CJK COMPATIBILITY","CJKCOMPATIBILITY") CJK_UNIFIED_IDEOGRAPHS=new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS","CJK UNIFIED IDEOGRAPHS","CJKUNIFIEDIDEOGRAPHS") HANGUL_SYLLABLES=new UnicodeBlock("HANGUL_SYLLABLES","HANGUL SYLLABLES","HANGULSYLLABLES") PRIVATE_USE_AREA=new UnicodeBlock("PRIVATE_USE_AREA","PRIVATE USE AREA","PRIVATEUSEAREA") CJK_COMPATIBILITY_IDEOGRAPHS=new UnicodeBlock("CJK_COMPATIBILITY_IDEOGRAPHS","CJK COMPATIBILITY IDEOGRAPHS","CJKCOMPATIBILITYIDEOGRAPHS") ALPHABETIC_PRESENTATION_FORMS=new UnicodeBlock("ALPHABETIC_PRESENTATION_FORMS","ALPHABETIC PRESENTATION FORMS","ALPHABETICPRESENTATIONFORMS") ARABIC_PRESENTATION_FORMS_A=new UnicodeBlock("ARABIC_PRESENTATION_FORMS_A","ARABIC PRESENTATION FORMS-A","ARABICPRESENTATIONFORMS-A") COMBINING_HALF_MARKS=new UnicodeBlock("COMBINING_HALF_MARKS","COMBINING HALF MARKS","COMBININGHALFMARKS") CJK_COMPATIBILITY_FORMS=new UnicodeBlock("CJK_COMPATIBILITY_FORMS","CJK COMPATIBILITY FORMS","CJKCOMPATIBILITYFORMS") SMALL_FORM_VARIANTS=new UnicodeBlock("SMALL_FORM_VARIANTS","SMALL FORM VARIANTS","SMALLFORMVARIANTS") ARABIC_PRESENTATION_FORMS_B=new UnicodeBlock("ARABIC_PRESENTATION_FORMS_B","ARABIC PRESENTATION FORMS-B","ARABICPRESENTATIONFORMS-B") HALFWIDTH_AND_FULLWIDTH_FORMS=new UnicodeBlock("HALFWIDTH_AND_FULLWIDTH_FORMS","HALFWIDTH AND FULLWIDTH FORMS","HALFWIDTHANDFULLWIDTHFORMS") SPECIALS=new UnicodeBlock("SPECIALS") SURROGATES_AREA=new UnicodeBlock("SURROGATES_AREA",false) SYRIAC=new UnicodeBlock("SYRIAC") THAANA=new UnicodeBlock("THAANA") SINHALA=new UnicodeBlock("SINHALA") MYANMAR=new UnicodeBlock("MYANMAR") ETHIOPIC=new UnicodeBlock("ETHIOPIC") CHEROKEE=new UnicodeBlock("CHEROKEE") UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS=new UnicodeBlock("UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS","UNIFIED CANADIAN ABORIGINAL SYLLABICS","UNIFIEDCANADIANABORIGINALSYLLABICS") OGHAM=new UnicodeBlock("OGHAM") RUNIC=new UnicodeBlock("RUNIC") KHMER=new UnicodeBlock("KHMER") MONGOLIAN=new UnicodeBlock("MONGOLIAN") BRAILLE_PATTERNS=new UnicodeBlock("BRAILLE_PATTERNS","BRAILLE PATTERNS","BRAILLEPATTERNS") CJK_RADICALS_SUPPLEMENT=new UnicodeBlock("CJK_RADICALS_SUPPLEMENT","CJK RADICALS SUPPLEMENT","CJKRADICALSSUPPLEMENT") KANGXI_RADICALS=new UnicodeBlock("KANGXI_RADICALS","KANGXI RADICALS","KANGXIRADICALS") IDEOGRAPHIC_DESCRIPTION_CHARACTERS=new UnicodeBlock("IDEOGRAPHIC_DESCRIPTION_CHARACTERS","IDEOGRAPHIC DESCRIPTION CHARACTERS","IDEOGRAPHICDESCRIPTIONCHARACTERS") BOPOMOFO_EXTENDED=new UnicodeBlock("BOPOMOFO_EXTENDED","BOPOMOFO EXTENDED","BOPOMOFOEXTENDED") CJK_UNIFIED_IDEOGRAPHS_EXTENSION_A=new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_A","CJK UNIFIED IDEOGRAPHS EXTENSION A","CJKUNIFIEDIDEOGRAPHSEXTENSIONA") YI_SYLLABLES=new UnicodeBlock("YI_SYLLABLES","YI SYLLABLES","YISYLLABLES") YI_RADICALS=new UnicodeBlock("YI_RADICALS","YI RADICALS","YIRADICALS") CYRILLIC_SUPPLEMENTARY=new UnicodeBlock("CYRILLIC_SUPPLEMENTARY","CYRILLIC SUPPLEMENTARY","CYRILLICSUPPLEMENTARY","CYRILLIC SUPPLEMENT","CYRILLICSUPPLEMENT") TAGALOG=new UnicodeBlock("TAGALOG") HANUNOO=new UnicodeBlock("HANUNOO") BUHID=new UnicodeBlock("BUHID") TAGBANWA=new UnicodeBlock("TAGBANWA") LIMBU=new UnicodeBlock("LIMBU") TAI_LE=new UnicodeBlock("TAI_LE","TAI LE","TAILE") KHMER_SYMBOLS=new UnicodeBlock("KHMER_SYMBOLS","KHMER SYMBOLS","KHMERSYMBOLS") PHONETIC_EXTENSIONS=new UnicodeBlock("PHONETIC_EXTENSIONS","PHONETIC EXTENSIONS","PHONETICEXTENSIONS") MISCELLANEOUS_MATHEMATICAL_SYMBOLS_A=new UnicodeBlock("MISCELLANEOUS_MATHEMATICAL_SYMBOLS_A","MISCELLANEOUS MATHEMATICAL SYMBOLS-A","MISCELLANEOUSMATHEMATICALSYMBOLS-A") SUPPLEMENTAL_ARROWS_A=new UnicodeBlock("SUPPLEMENTAL_ARROWS_A","SUPPLEMENTAL ARROWS-A","SUPPLEMENTALARROWS-A") SUPPLEMENTAL_ARROWS_B=new UnicodeBlock("SUPPLEMENTAL_ARROWS_B","SUPPLEMENTAL ARROWS-B","SUPPLEMENTALARROWS-B") MISCELLANEOUS_MATHEMATICAL_SYMBOLS_B=new UnicodeBlock("MISCELLANEOUS_MATHEMATICAL_SYMBOLS_B","MISCELLANEOUS MATHEMATICAL SYMBOLS-B","MISCELLANEOUSMATHEMATICALSYMBOLS-B") SUPPLEMENTAL_MATHEMATICAL_OPERATORS=new UnicodeBlock("SUPPLEMENTAL_MATHEMATICAL_OPERATORS","SUPPLEMENTAL MATHEMATICAL OPERATORS","SUPPLEMENTALMATHEMATICALOPERATORS") MISCELLANEOUS_SYMBOLS_AND_ARROWS=new UnicodeBlock("MISCELLANEOUS_SYMBOLS_AND_ARROWS","MISCELLANEOUS SYMBOLS AND ARROWS","MISCELLANEOUSSYMBOLSANDARROWS") KATAKANA_PHONETIC_EXTENSIONS=new UnicodeBlock("KATAKANA_PHONETIC_EXTENSIONS","KATAKANA PHONETIC EXTENSIONS","KATAKANAPHONETICEXTENSIONS") YIJING_HEXAGRAM_SYMBOLS=new UnicodeBlock("YIJING_HEXAGRAM_SYMBOLS","YIJING HEXAGRAM SYMBOLS","YIJINGHEXAGRAMSYMBOLS") VARIATION_SELECTORS=new UnicodeBlock("VARIATION_SELECTORS","VARIATION SELECTORS","VARIATIONSELECTORS") LINEAR_B_SYLLABARY=new UnicodeBlock("LINEAR_B_SYLLABARY","LINEAR B SYLLABARY","LINEARBSYLLABARY") LINEAR_B_IDEOGRAMS=new UnicodeBlock("LINEAR_B_IDEOGRAMS","LINEAR B IDEOGRAMS","LINEARBIDEOGRAMS") AEGEAN_NUMBERS=new UnicodeBlock("AEGEAN_NUMBERS","AEGEAN NUMBERS","AEGEANNUMBERS") OLD_ITALIC=new UnicodeBlock("OLD_ITALIC","OLD ITALIC","OLDITALIC") GOTHIC=new UnicodeBlock("GOTHIC") UGARITIC=new UnicodeBlock("UGARITIC") DESERET=new UnicodeBlock("DESERET") SHAVIAN=new UnicodeBlock("SHAVIAN") OSMANYA=new UnicodeBlock("OSMANYA") CYPRIOT_SYLLABARY=new UnicodeBlock("CYPRIOT_SYLLABARY","CYPRIOT SYLLABARY","CYPRIOTSYLLABARY") BYZANTINE_MUSICAL_SYMBOLS=new UnicodeBlock("BYZANTINE_MUSICAL_SYMBOLS","BYZANTINE MUSICAL SYMBOLS","BYZANTINEMUSICALSYMBOLS") MUSICAL_SYMBOLS=new UnicodeBlock("MUSICAL_SYMBOLS","MUSICAL SYMBOLS","MUSICALSYMBOLS") TAI_XUAN_JING_SYMBOLS=new UnicodeBlock("TAI_XUAN_JING_SYMBOLS","TAI XUAN JING SYMBOLS","TAIXUANJINGSYMBOLS") MATHEMATICAL_ALPHANUMERIC_SYMBOLS=new UnicodeBlock("MATHEMATICAL_ALPHANUMERIC_SYMBOLS","MATHEMATICAL ALPHANUMERIC SYMBOLS","MATHEMATICALALPHANUMERICSYMBOLS") CJK_UNIFIED_IDEOGRAPHS_EXTENSION_B=new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_B","CJK UNIFIED IDEOGRAPHS EXTENSION B","CJKUNIFIEDIDEOGRAPHSEXTENSIONB") CJK_COMPATIBILITY_IDEOGRAPHS_SUPPLEMENT=new UnicodeBlock("CJK_COMPATIBILITY_IDEOGRAPHS_SUPPLEMENT","CJK COMPATIBILITY IDEOGRAPHS SUPPLEMENT","CJKCOMPATIBILITYIDEOGRAPHSSUPPLEMENT") TAGS=new UnicodeBlock("TAGS") VARIATION_SELECTORS_SUPPLEMENT=new UnicodeBlock("VARIATION_SELECTORS_SUPPLEMENT","VARIATION SELECTORS SUPPLEMENT","VARIATIONSELECTORSSUPPLEMENT") SUPPLEMENTARY_PRIVATE_USE_AREA_A=new UnicodeBlock("SUPPLEMENTARY_PRIVATE_USE_AREA_A","SUPPLEMENTARY PRIVATE USE AREA-A","SUPPLEMENTARYPRIVATEUSEAREA-A") SUPPLEMENTARY_PRIVATE_USE_AREA_B=new UnicodeBlock("SUPPLEMENTARY_PRIVATE_USE_AREA_B","SUPPLEMENTARY PRIVATE USE AREA-B","SUPPLEMENTARYPRIVATEUSEAREA-B") HIGH_SURROGATES=new UnicodeBlock("HIGH_SURROGATES","HIGH SURROGATES","HIGHSURROGATES") HIGH_PRIVATE_USE_SURROGATES=new UnicodeBlock("HIGH_PRIVATE_USE_SURROGATES","HIGH PRIVATE USE SURROGATES","HIGHPRIVATEUSESURROGATES") LOW_SURROGATES=new UnicodeBlock("LOW_SURROGATES","LOW SURROGATES","LOWSURROGATES") ARABIC_SUPPLEMENT=new UnicodeBlock("ARABIC_SUPPLEMENT","ARABIC SUPPLEMENT","ARABICSUPPLEMENT") NKO=new UnicodeBlock("NKO") SAMARITAN=new UnicodeBlock("SAMARITAN") MANDAIC=new UnicodeBlock("MANDAIC") ETHIOPIC_SUPPLEMENT=new UnicodeBlock("ETHIOPIC_SUPPLEMENT","ETHIOPIC SUPPLEMENT","ETHIOPICSUPPLEMENT") UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED=new UnicodeBlock("UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED","UNIFIED CANADIAN ABORIGINAL SYLLABICS EXTENDED","UNIFIEDCANADIANABORIGINALSYLLABICSEXTENDED") NEW_TAI_LUE=new UnicodeBlock("NEW_TAI_LUE","NEW TAI LUE","NEWTAILUE") BUGINESE=new UnicodeBlock("BUGINESE") TAI_THAM=new UnicodeBlock("TAI_THAM","TAI THAM","TAITHAM") BALINESE=new UnicodeBlock("BALINESE") SUNDANESE=new UnicodeBlock("SUNDANESE") BATAK=new UnicodeBlock("BATAK") LEPCHA=new UnicodeBlock("LEPCHA") OL_CHIKI=new UnicodeBlock("OL_CHIKI","OL CHIKI","OLCHIKI") VEDIC_EXTENSIONS=new UnicodeBlock("VEDIC_EXTENSIONS","VEDIC EXTENSIONS","VEDICEXTENSIONS") PHONETIC_EXTENSIONS_SUPPLEMENT=new UnicodeBlock("PHONETIC_EXTENSIONS_SUPPLEMENT","PHONETIC EXTENSIONS SUPPLEMENT","PHONETICEXTENSIONSSUPPLEMENT") COMBINING_DIACRITICAL_MARKS_SUPPLEMENT=new UnicodeBlock("COMBINING_DIACRITICAL_MARKS_SUPPLEMENT","COMBINING DIACRITICAL MARKS SUPPLEMENT","COMBININGDIACRITICALMARKSSUPPLEMENT") GLAGOLITIC=new UnicodeBlock("GLAGOLITIC") LATIN_EXTENDED_C=new UnicodeBlock("LATIN_EXTENDED_C","LATIN EXTENDED-C","LATINEXTENDED-C") COPTIC=new UnicodeBlock("COPTIC") GEORGIAN_SUPPLEMENT=new UnicodeBlock("GEORGIAN_SUPPLEMENT","GEORGIAN SUPPLEMENT","GEORGIANSUPPLEMENT") TIFINAGH=new UnicodeBlock("TIFINAGH") ETHIOPIC_EXTENDED=new UnicodeBlock("ETHIOPIC_EXTENDED","ETHIOPIC EXTENDED","ETHIOPICEXTENDED") CYRILLIC_EXTENDED_A=new UnicodeBlock("CYRILLIC_EXTENDED_A","CYRILLIC EXTENDED-A","CYRILLICEXTENDED-A") SUPPLEMENTAL_PUNCTUATION=new UnicodeBlock("SUPPLEMENTAL_PUNCTUATION","SUPPLEMENTAL PUNCTUATION","SUPPLEMENTALPUNCTUATION") CJK_STROKES=new UnicodeBlock("CJK_STROKES","CJK STROKES","CJKSTROKES") LISU=new UnicodeBlock("LISU") VAI=new UnicodeBlock("VAI") CYRILLIC_EXTENDED_B=new UnicodeBlock("CYRILLIC_EXTENDED_B","CYRILLIC EXTENDED-B","CYRILLICEXTENDED-B") BAMUM=new UnicodeBlock("BAMUM") MODIFIER_TONE_LETTERS=new UnicodeBlock("MODIFIER_TONE_LETTERS","MODIFIER TONE LETTERS","MODIFIERTONELETTERS") LATIN_EXTENDED_D=new UnicodeBlock("LATIN_EXTENDED_D","LATIN EXTENDED-D","LATINEXTENDED-D") SYLOTI_NAGRI=new UnicodeBlock("SYLOTI_NAGRI","SYLOTI NAGRI","SYLOTINAGRI") COMMON_INDIC_NUMBER_FORMS=new UnicodeBlock("COMMON_INDIC_NUMBER_FORMS","COMMON INDIC NUMBER FORMS","COMMONINDICNUMBERFORMS") PHAGS_PA=new UnicodeBlock("PHAGS_PA","PHAGS-PA") SAURASHTRA=new UnicodeBlock("SAURASHTRA") DEVANAGARI_EXTENDED=new UnicodeBlock("DEVANAGARI_EXTENDED","DEVANAGARI EXTENDED","DEVANAGARIEXTENDED") KAYAH_LI=new UnicodeBlock("KAYAH_LI","KAYAH LI","KAYAHLI") REJANG=new UnicodeBlock("REJANG") HANGUL_JAMO_EXTENDED_A=new UnicodeBlock("HANGUL_JAMO_EXTENDED_A","HANGUL JAMO EXTENDED-A","HANGULJAMOEXTENDED-A") JAVANESE=new UnicodeBlock("JAVANESE") CHAM=new UnicodeBlock("CHAM") MYANMAR_EXTENDED_A=new UnicodeBlock("MYANMAR_EXTENDED_A","MYANMAR EXTENDED-A","MYANMAREXTENDED-A") TAI_VIET=new UnicodeBlock("TAI_VIET","TAI VIET","TAIVIET") ETHIOPIC_EXTENDED_A=new UnicodeBlock("ETHIOPIC_EXTENDED_A","ETHIOPIC EXTENDED-A","ETHIOPICEXTENDED-A") MEETEI_MAYEK=new UnicodeBlock("MEETEI_MAYEK","MEETEI MAYEK","MEETEIMAYEK") HANGUL_JAMO_EXTENDED_B=new UnicodeBlock("HANGUL_JAMO_EXTENDED_B","HANGUL JAMO EXTENDED-B","HANGULJAMOEXTENDED-B") VERTICAL_FORMS=new UnicodeBlock("VERTICAL_FORMS","VERTICAL FORMS","VERTICALFORMS") ANCIENT_GREEK_NUMBERS=new UnicodeBlock("ANCIENT_GREEK_NUMBERS","ANCIENT GREEK NUMBERS","ANCIENTGREEKNUMBERS") ANCIENT_SYMBOLS=new UnicodeBlock("ANCIENT_SYMBOLS","ANCIENT SYMBOLS","ANCIENTSYMBOLS") PHAISTOS_DISC=new UnicodeBlock("PHAISTOS_DISC","PHAISTOS DISC","PHAISTOSDISC") LYCIAN=new UnicodeBlock("LYCIAN") CARIAN=new UnicodeBlock("CARIAN") OLD_PERSIAN=new UnicodeBlock("OLD_PERSIAN","OLD PERSIAN","OLDPERSIAN") IMPERIAL_ARAMAIC=new UnicodeBlock("IMPERIAL_ARAMAIC","IMPERIAL ARAMAIC","IMPERIALARAMAIC") PHOENICIAN=new UnicodeBlock("PHOENICIAN") LYDIAN=new UnicodeBlock("LYDIAN") KHAROSHTHI=new UnicodeBlock("KHAROSHTHI") OLD_SOUTH_ARABIAN=new UnicodeBlock("OLD_SOUTH_ARABIAN","OLD SOUTH ARABIAN","OLDSOUTHARABIAN") AVESTAN=new UnicodeBlock("AVESTAN") INSCRIPTIONAL_PARTHIAN=new UnicodeBlock("INSCRIPTIONAL_PARTHIAN","INSCRIPTIONAL PARTHIAN","INSCRIPTIONALPARTHIAN") INSCRIPTIONAL_PAHLAVI=new UnicodeBlock("INSCRIPTIONAL_PAHLAVI","INSCRIPTIONAL PAHLAVI","INSCRIPTIONALPAHLAVI") OLD_TURKIC=new UnicodeBlock("OLD_TURKIC","OLD TURKIC","OLDTURKIC") RUMI_NUMERAL_SYMBOLS=new UnicodeBlock("RUMI_NUMERAL_SYMBOLS","RUMI NUMERAL SYMBOLS","RUMINUMERALSYMBOLS") BRAHMI=new UnicodeBlock("BRAHMI") KAITHI=new UnicodeBlock("KAITHI") CUNEIFORM=new UnicodeBlock("CUNEIFORM") CUNEIFORM_NUMBERS_AND_PUNCTUATION=new UnicodeBlock("CUNEIFORM_NUMBERS_AND_PUNCTUATION","CUNEIFORM NUMBERS AND PUNCTUATION","CUNEIFORMNUMBERSANDPUNCTUATION") EGYPTIAN_HIEROGLYPHS=new UnicodeBlock("EGYPTIAN_HIEROGLYPHS","EGYPTIAN HIEROGLYPHS","EGYPTIANHIEROGLYPHS") BAMUM_SUPPLEMENT=new UnicodeBlock("BAMUM_SUPPLEMENT","BAMUM SUPPLEMENT","BAMUMSUPPLEMENT") KANA_SUPPLEMENT=new UnicodeBlock("KANA_SUPPLEMENT","KANA SUPPLEMENT","KANASUPPLEMENT") ANCIENT_GREEK_MUSICAL_NOTATION=new UnicodeBlock("ANCIENT_GREEK_MUSICAL_NOTATION","ANCIENT GREEK MUSICAL NOTATION","ANCIENTGREEKMUSICALNOTATION") COUNTING_ROD_NUMERALS=new UnicodeBlock("COUNTING_ROD_NUMERALS","COUNTING ROD NUMERALS","COUNTINGRODNUMERALS") MAHJONG_TILES=new UnicodeBlock("MAHJONG_TILES","MAHJONG TILES","MAHJONGTILES") DOMINO_TILES=new UnicodeBlock("DOMINO_TILES","DOMINO TILES","DOMINOTILES") PLAYING_CARDS=new UnicodeBlock("PLAYING_CARDS","PLAYING CARDS","PLAYINGCARDS") ENCLOSED_ALPHANUMERIC_SUPPLEMENT=new UnicodeBlock("ENCLOSED_ALPHANUMERIC_SUPPLEMENT","ENCLOSED ALPHANUMERIC SUPPLEMENT","ENCLOSEDALPHANUMERICSUPPLEMENT") ENCLOSED_IDEOGRAPHIC_SUPPLEMENT=new UnicodeBlock("ENCLOSED_IDEOGRAPHIC_SUPPLEMENT","ENCLOSED IDEOGRAPHIC SUPPLEMENT","ENCLOSEDIDEOGRAPHICSUPPLEMENT") MISCELLANEOUS_SYMBOLS_AND_PICTOGRAPHS=new UnicodeBlock("MISCELLANEOUS_SYMBOLS_AND_PICTOGRAPHS","MISCELLANEOUS SYMBOLS AND PICTOGRAPHS","MISCELLANEOUSSYMBOLSANDPICTOGRAPHS") EMOTICONS=new UnicodeBlock("EMOTICONS") TRANSPORT_AND_MAP_SYMBOLS=new UnicodeBlock("TRANSPORT_AND_MAP_SYMBOLS","TRANSPORT AND MAP SYMBOLS","TRANSPORTANDMAPSYMBOLS") ALCHEMICAL_SYMBOLS=new UnicodeBlock("ALCHEMICAL_SYMBOLS","ALCHEMICAL SYMBOLS","ALCHEMICALSYMBOLS") CJK_UNIFIED_IDEOGRAPHS_EXTENSION_C=new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_C","CJK UNIFIED IDEOGRAPHS EXTENSION C","CJKUNIFIEDIDEOGRAPHSEXTENSIONC") CJK_UNIFIED_IDEOGRAPHS_EXTENSION_D=new UnicodeBlock("CJK_UNIFIED_IDEOGRAPHS_EXTENSION_D","CJK UNIFIED IDEOGRAPHS EXTENSION D","CJKUNIFIEDIDEOGRAPHSEXTENSIOND") ARABIC_EXTENDED_A=new UnicodeBlock("ARABIC_EXTENDED_A","ARABIC EXTENDED-A","ARABICEXTENDED-A") SUNDANESE_SUPPLEMENT=new UnicodeBlock("SUNDANESE_SUPPLEMENT","SUNDANESE SUPPLEMENT","SUNDANESESUPPLEMENT") MEETEI_MAYEK_EXTENSIONS=new UnicodeBlock("MEETEI_MAYEK_EXTENSIONS","MEETEI MAYEK EXTENSIONS","MEETEIMAYEKEXTENSIONS") MEROITIC_HIEROGLYPHS=new UnicodeBlock("MEROITIC_HIEROGLYPHS","MEROITIC HIEROGLYPHS","MEROITICHIEROGLYPHS") MEROITIC_CURSIVE=new UnicodeBlock("MEROITIC_CURSIVE","MEROITIC CURSIVE","MEROITICCURSIVE") SORA_SOMPENG=new UnicodeBlock("SORA_SOMPENG","SORA SOMPENG","SORASOMPENG") CHAKMA=new UnicodeBlock("CHAKMA") SHARADA=new UnicodeBlock("SHARADA") TAKRI=new UnicodeBlock("TAKRI") MIAO=new UnicodeBlock("MIAO") ARABIC_MATHEMATICAL_ALPHABETIC_SYMBOLS=new UnicodeBlock("ARABIC_MATHEMATICAL_ALPHABETIC_SYMBOLS","ARABIC MATHEMATICAL ALPHABETIC SYMBOLS","ARABICMATHEMATICALALPHABETICSYMBOLS") blockStarts=[] blocks= Instance Initializer: Constructors: - constructor UnicodeBlock() throws: - constructor UnicodeBlock() throws: + constructor UnicodeBlock(idName) throws: + constructor UnicodeBlock(idName,isMap) throws: cond-branch cond:isMap true branch : map.put(idName,this) false branch : - constructor UnicodeBlock() throws: + constructor UnicodeBlock(idName,alias) throws: map.put(alias,this) - constructor UnicodeBlock() throws: + constructor UnicodeBlock(idName,aliases) throws: String alias aliases map.put(alias,this) Methods: - func of() throws: + func of(c) throws: return of((int)c) - func of() throws: + func of(codePoint) throws: cond-branch cond:isValidCodePoint(codePoint) true branch : - new IllegalArgumentException + new IllegalArgumentException() false branch : - var:top,bottom,current + Decl: top,bottom,current bottom Assign 0 top Assign blockStarts.length - current Assign + current Assign top Div 2 while top Sub bottom GT 1 cond-branch cond:codePoint GE true branch : bottom Assign current false branch : top Assign current - current Assign (top Add bottom)2 + current Assign (top Add bottom) Div 2 return - func forName() throws: - var:block=map.get(blockName.toUpperCase(Locale.US)) + func forName(blockName) throws: + Decl: block=map.get(blockName.toUpperCase(Locale.US)) cond-branch cond:block EQ null true branch : - new IllegalArgumentException + new IllegalArgumentException() false branch : return block @@ -649,38 +710,33 @@ class Character LocalInterfaces: class[JavaEnum] UnicodeScript Fields: - COMMON scriptStarts= scripts= aliases + scriptStarts= scripts= aliases Instance Initializer: InstInit- 0 Constructors: Methods: - func of() throws: + func of(codePoint) throws: cond-branch cond:isValidCodePoint(codePoint) true branch : - new IllegalArgumentException - false branch : + new IllegalArgumentException() false branch : - var:type=getType(codePoint) + Decl: type=getType(codePoint) cond-branch cond:type EQ UNASSIGNED true branch : - return UNKNOWN - false branch : + return UNKNOWN false branch : - var:index=Arrays.binarySearch(scriptStarts,codePoint) + Decl: index=Arrays.binarySearch(scriptStarts,codePoint) cond-branch cond:index LT 0 true branch : - index Assign Sub - index Sub 2 - false branch : + index Assign Minus index Sub 2 false branch : return - func forName() throws: + func forName(scriptName) throws: scriptName Assign scriptName.toUpperCase(Locale.ENGLISH) - var:sc=aliases.get(scriptName) + Decl: sc=aliases.get(scriptName) cond-branch cond:sc NE null true branch : - return sc - false branch : + return sc false branch : return valueOf(scriptName) LocalClasses: diff --git a/src/MapleFE/test/openjdk/Class-2.java b/src/MapleFE/test/java/openjdk/Class-2.java similarity index 100% rename from src/MapleFE/test/openjdk/Class-2.java rename to src/MapleFE/test/java/openjdk/Class-2.java diff --git a/src/MapleFE/test/openjdk/Class-2.java.result b/src/MapleFE/test/java/openjdk/Class-2.java.result similarity index 62% rename from src/MapleFE/test/openjdk/Class-2.java.result rename to src/MapleFE/test/java/openjdk/Class-2.java.result index a18295d54303b1ee1f0f4b9fb0638dcc2b954404..bbe92670f84638da3f5c4970793b40eb95f8b2ea 100644 --- a/src/MapleFE/test/openjdk/Class-2.java.result +++ b/src/MapleFE/test/java/openjdk/Class-2.java.result @@ -16,14 +16,13 @@ class Class return getEnclosingConstructorNative() func getEnclosingConstructorNative() throws: func classNameImpliesTopLevel() throws: - return + return getName().contains("$") func getDeclaringClass() throws: func getEnclosingClass() throws: func getSimpleName() throws: cond-branch cond:isArray() true branch : - return Add "[]" - false branch : + return getComponentType().getSimpleName() Add "[]" false branch : cond-branch cond:isAnonymousClass() true branch : @@ -35,8 +34,8 @@ class Class return getInnerClassName() false branch : - var:simpleName=getName() - var:dot=simpleName.lastIndexOf(".") + Decl: simpleName=getName() + Decl: dot=simpleName.lastIndexOf(".") cond-branch cond:dot GT 0 true branch : return simpleName.substring(simpleName.lastIndexOf(".") Add 1) @@ -46,9 +45,21 @@ class Class func getTypeName() throws: cond-branch cond:isArray() true branch : - var:cl=this + Decl: cl=this + Decl: dimensions=0 + while cl.isArray() dimensions Inc + + cl Assign cl.getComponentType() + + Decl: sb=new StringBuilder() + sb.append(cl.getName()) + for ( ) + sb.append("[]") + return sb.toString() + Throwable + e false branch : @@ -56,27 +67,28 @@ class Class func getCanonicalName() throws: cond-branch cond:isArray() true branch : - var:canonicalName= + Decl: canonicalName=getComponentType().getCanonicalName() cond-branch cond:canonicalName NE null true branch : - return canonicalName Add "[]" - false branch : + return canonicalName Add "[]" false branch : return null - false branch : cond-branch cond:isLocalOrAnonymousClass() true branch : - return null - false branch : + return null false branch : - var:enclosingClass=getEnclosingClass() + Decl: enclosingClass=getEnclosingClass() cond-branch cond:enclosingClass EQ null true branch : return getName() false branch : - var:enclosingName=enclosingClass.getCanonicalName() + Decl: enclosingName=enclosingClass.getCanonicalName() + cond-branch cond:enclosingName EQ null + true branch : + return null false branch : + return enclosingName Add "." Add getSimpleName() func isAnonymousClass() throws: func isLocalClass() throws: @@ -86,7 +98,7 @@ class Class func isLocalOrAnonymousClass() throws: return isLocalClass() Lor isAnonymousClass() func getClasses() throws: - var:result=new ArrayList + Decl: result=new ArrayList() for ( ) Class member @@ -100,27 +112,27 @@ class Class return result.toArray(Class,result.size()) func getFields() throws: SecurityException - var:fields=new ArrayList + Decl: fields=new ArrayList() getPublicFieldsRecursive(fields) return fields.toArray(Field,fields.size()) - func getPublicFieldsRecursive() throws: + func getPublicFieldsRecursive(result) throws: for ( ) Collections.addAll(result,c.getPublicDeclaredFields()) - var:iftable=ifTable + Decl: iftable=ifTable cond-branch cond:iftable NE null true branch : for ( ) - Collections.addAll(result,((Class))getPublicDeclaredFields) + Collections.addAll(result,(Class).getPublicDeclaredFields()) false branch : func getMethods() throws: SecurityException - var:methods=new ArrayList + Decl: methods=new ArrayList() getPublicMethodsInternal(methods) CollectionUtils.removeDuplicates(methods,Method.ORDER_BY_SIGNATURE) return methods.toArray(Method,methods.size()) - func getPublicMethodsInternal() throws: + func getPublicMethodsInternal(result) throws: Collections.addAll(result,getDeclaredMethodsUnchecked(true)) cond-branch cond:isInterface() true branch : @@ -129,40 +141,40 @@ class Class false branch : - var:iftable=ifTable + Decl: iftable=ifTable cond-branch cond:iftable NE null true branch : for ( ) - var:ifc=(Class) + Decl: ifc=(Class) Collections.addAll(result,ifc.getDeclaredMethodsUnchecked(true)) false branch : func getConstructors() throws: SecurityException return getDeclaredConstructorsInternal(true) - func getField() throws: NoSuchFieldException + func getField(name) throws: NoSuchFieldException cond-branch cond:name EQ null true branch : - new NullPointerException + new NullPointerException("name == null") false branch : - var:result=getPublicFieldRecursive(name) + Decl: result=getPublicFieldRecursive(name) cond-branch cond:result EQ null true branch : - new NoSuchFieldException + new NoSuchFieldException(name) false branch : return result - func getPublicFieldRecursive() throws: - func getMethod() throws: NoSuchMethodException SecurityException + func getPublicFieldRecursive(name) throws: + func getMethod(name,parameterTypes) throws: NoSuchMethodException SecurityException return getMethod(name,parameterTypes,true) - func getConstructor() throws: NoSuchMethodException SecurityException + func getConstructor(parameterTypes) throws: NoSuchMethodException SecurityException return getConstructor0(parameterTypes,Member.PUBLIC) func getDeclaredClasses() throws: func getDeclaredFields() throws: - func getDeclaredFieldsUnchecked() throws: + func getDeclaredFieldsUnchecked(publicOnly) throws: func getDeclaredMethods() throws: SecurityException - var:result=getDeclaredMethodsUnchecked(false) + Decl: result=getDeclaredMethodsUnchecked(false) Method m result @@ -170,18 +182,18 @@ class Class m.getParameterTypes() return result - func getDeclaredMethodsUnchecked() throws: + func getDeclaredMethodsUnchecked(publicOnly) throws: func getDeclaredConstructors() throws: SecurityException return getDeclaredConstructorsInternal(false) - func getDeclaredConstructorsInternal() throws: - func getDeclaredField() throws: NoSuchFieldException + func getDeclaredConstructorsInternal(publicOnly) throws: + func getDeclaredField(name) throws: NoSuchFieldException func getPublicDeclaredFields() throws: - func getDeclaredMethod() throws: NoSuchMethodException SecurityException + func getDeclaredMethod(name,parameterTypes) throws: NoSuchMethodException SecurityException return getMethod(name,parameterTypes,false) - func getMethod() throws: NoSuchMethodException + func getMethod(name,parameterTypes,recursivePublicMethods) throws: NoSuchMethodException cond-branch cond:name EQ null true branch : - new NullPointerException + new NullPointerException("name == null") false branch : cond-branch cond:parameterTypes EQ null @@ -189,17 +201,25 @@ class Class parameterTypes Assign EmptyArray.CLASS false branch : + Class + c + parameterTypes + cond-branch cond:c EQ null + true branch : + new NoSuchMethodException("parameter type is null") + false branch : + - var:result= + Decl: result= cond-branch cond:result EQ null Lor (recursivePublicMethods Land Modifier.isPublic(result.getAccessFlags())) true branch : - new NoSuchMethodException + new NoSuchMethodException(name Add " " Add Arrays.toString(parameterTypes)) false branch : return result - func getPublicMethodRecursive() throws: + func getPublicMethodRecursive(name,parameterTypes) throws: for ( ) - var:result=c.getDeclaredMethodInternal(name,parameterTypes) + Decl: result=c.getDeclaredMethodInternal(name,parameterTypes) cond-branch cond:result NE null Land Modifier.isPublic(result.getAccessFlags()) true branch : return result @@ -207,9 +227,9 @@ class Class return findInterfaceMethod(name,parameterTypes) - func getInstanceMethod() throws: NoSuchMethodException IllegalAccessException + func getInstanceMethod(name,parameterTypes) throws: NoSuchMethodException IllegalAccessException for ( ) - var:result=c.getDeclaredMethodInternal(name,parameterTypes) + Decl: result=c.getDeclaredMethodInternal(name,parameterTypes) cond-branch cond:result NE null Land Modifier.isStatic(result.getModifiers()) true branch : return result @@ -217,31 +237,36 @@ class Class return findInterfaceMethod(name,parameterTypes) - func findInterfaceMethod() throws: - var:iftable=ifTable + func findInterfaceMethod(name,parameterTypes) throws: + Decl: iftable=ifTable cond-branch cond:iftable NE null true branch : for ( ) - var:ifc=(Class) + Decl: ifc=(Class) + Decl: result=ifc.getPublicMethodRecursive(name,parameterTypes) + cond-branch cond:result NE null Land Modifier.isPublic(result.getAccessFlags()) + true branch : + return result + false branch : false branch : return null - func getDeclaredConstructor() throws: NoSuchMethodException SecurityException + func getDeclaredConstructor(parameterTypes) throws: NoSuchMethodException SecurityException return getConstructor0(parameterTypes,Member.DECLARED) - func getResourceAsStream() throws: + func getResourceAsStream(name) throws: name Assign resolveName(name) - var:cl=getClassLoader() + Decl: cl=getClassLoader() cond-branch cond:cl EQ null true branch : return ClassLoader.getSystemResourceAsStream(name) false branch : return cl.getResourceAsStream(name) - func getResource() throws: + func getResource(name) throws: name Assign resolveName(name) - var:cl=getClassLoader() + Decl: cl=getClassLoader() cond-branch cond:cl EQ null true branch : return ClassLoader.getSystemResource(name) @@ -250,7 +275,7 @@ class Class return cl.getResource(name) func getProtectionDomain() throws: return null - func resolveName() throws: + func resolveName(name) throws: cond-branch cond:name EQ null true branch : return name @@ -258,13 +283,21 @@ class Class cond-branch cond:name.startsWith("/") true branch : - var:c=this + Decl: c=this + while c.isArray() c Assign c.getComponentType() + + Decl: baseName=c.getName() + Decl: index=baseName.lastIndexOf(.) + cond-branch cond:index NE -1 + true branch : + name Assign baseName.substring(0,index).replace(.,/) Add "/" Add name + false branch : false branch : name Assign name.substring(1) return name - func getConstructor0() throws: NoSuchMethodException + func getConstructor0(parameterTypes,which) throws: NoSuchMethodException cond-branch cond:parameterTypes EQ null true branch : parameterTypes Assign EmptyArray.CLASS @@ -275,53 +308,49 @@ class Class parameterTypes cond-branch cond:c EQ null true branch : - new NoSuchMethodException + new NoSuchMethodException("parameter type is null") false branch : - var:result=getDeclaredConstructorInternal(parameterTypes) + Decl: result=getDeclaredConstructorInternal(parameterTypes) cond-branch cond:result EQ null Lor which EQ Member.PUBLIC Land Modifier.isPublic(result.getAccessFlags()) true branch : - new NoSuchMethodException + new NoSuchMethodException(" " Add Arrays.toString(parameterTypes)) false branch : return result - func getDeclaredConstructorInternal() throws: + func getDeclaredConstructorInternal(args) throws: func desiredAssertionStatus() throws: return false func getInnerClassName() throws: - func getInnerClassFlags() throws: + func getInnerClassFlags(defaultValue) throws: func isEnum() throws: - return ( Band ENUM) NE 0 Land EQ java.lang.Enum + return (this.getModifiers() Band ENUM) NE 0 Land this.getSuperclass() EQ java.lang.Enum func getEnumConstants() throws: - var:values=getEnumConstantsShared() + Decl: values=getEnumConstantsShared() return func getEnumConstantsShared() throws: cond-branch cond:isEnum() true branch : - return null - false branch : + return null false branch : return (T[])Enum.getSharedConstants((Class)this) - func cast() throws: + func cast(obj) throws: cond-branch cond:obj NE null Land isInstance(obj) true branch : - new ClassCastException - false branch : + new ClassCastException(cannotCastMsg(obj)) false branch : return (T)obj - func cannotCastMsg() throws: - return "Cannot cast " Add Add " to " Add getName() - func asSubclass() throws: + func cannotCastMsg(obj) throws: + return "Cannot cast " Add obj.getClass().getName() Add " to " Add getName() + func asSubclass(clazz) throws: cond-branch cond:clazz.isAssignableFrom(this) true branch : - return ()this - false branch : - new ClassCastException - - func getAnnotation() throws: + return ()this false branch : + new ClassCastException(this.toString() Add " cannot be cast to " Add clazz.getName()) + func getAnnotation(annotationClass) throws: Objects.requireNonNull(annotationClass) - var:annotation=getDeclaredAnnotation(annotationClass) + Decl: annotation=getDeclaredAnnotation(annotationClass) cond-branch cond:annotation NE null true branch : return annotation @@ -340,10 +369,10 @@ class Class false branch : return null - func isAnnotationPresent() throws: + func isAnnotationPresent(annotationClass) throws: cond-branch cond:annotationClass EQ null true branch : - new NullPointerException + new NullPointerException("annotationClass == null") false branch : cond-branch cond:isDeclaredAnnotationPresent(annotationClass) @@ -363,8 +392,8 @@ class Class false branch : return false - func getAnnotationsByType() throws: - var:annotations= + func getAnnotationsByType(annotationClass) throws: + Decl: annotations= cond-branch cond:annotations.length NE 0 true branch : return annotations @@ -372,7 +401,7 @@ class Class cond-branch cond:annotationClass.isDeclaredAnnotationPresent(Inherited) true branch : - var:superClass=getSuperclass() + Decl: superClass=getSuperclass() cond-branch cond:superClass NE null true branch : return superClass.getAnnotationsByType(annotationClass) @@ -382,7 +411,7 @@ class Class return (A[])Array.newInstance(annotationClass,0) func getAnnotations() throws: - var:map=new HashMap + Decl: map=new HashMap() Annotation declaredAnnotation getDeclaredAnnotations() @@ -392,7 +421,7 @@ class Class Annotation declaredAnnotation sup.getDeclaredAnnotations() - var:clazz=declaredAnnotation.annotationType() + Decl: clazz=declaredAnnotation.annotationType() cond-branch cond:map.containsKey(clazz) Land clazz.isDeclaredAnnotationPresent(Inherited) true branch : map.put(clazz,declaredAnnotation) @@ -400,19 +429,23 @@ class Class - var:coll=map.values() + Decl: coll=map.values() return coll.toArray(Annotation,coll.size()) - func getDeclaredAnnotation() throws: + func getDeclaredAnnotation(annotationClass) throws: func getDeclaredAnnotations() throws: - func isDeclaredAnnotationPresent() throws: + func isDeclaredAnnotationPresent(annotationClass) throws: func getSignatureAttribute() throws: - var:annotation=getSignatureAnnotation() + Decl: annotation=getSignatureAnnotation() cond-branch cond:annotation EQ null true branch : return null false branch : - var:result=new StringBuilder + Decl: result=new StringBuilder() + String + s + annotation + result.append(s) return result.toString() func getSignatureAnnotation() throws: @@ -420,11 +453,11 @@ class Class return (accessFlags Band 262144) NE 0 func getAccessFlags() throws: return accessFlags - func getDeclaredMethodInternal() throws: + func getDeclaredMethodInternal(name,args) throws: LocalClasses: class Caches Fields: - genericInterfaces=new BasicLruCache + genericInterfaces=new BasicLruCache(8) Instance Initializer: Constructors: Methods: @@ -434,7 +467,13 @@ class Class Identifier:Method has no decl. Identifier:m has no decl. +Identifier:result has no decl. +Identifier:Class has no decl. +Identifier:c has no decl. Identifier:Class has no decl. Identifier:c has no decl. Identifier:Annotation has no decl. Identifier:declaredAnnotation has no decl. +Identifier:String has no decl. +Identifier:s has no decl. +Identifier:annotation has no decl. diff --git a/src/MapleFE/test/openjdk/Class-3.java b/src/MapleFE/test/java/openjdk/Class-3.java similarity index 100% rename from src/MapleFE/test/openjdk/Class-3.java rename to src/MapleFE/test/java/openjdk/Class-3.java diff --git a/src/MapleFE/test/openjdk/Class-3.java.result b/src/MapleFE/test/java/openjdk/Class-3.java.result similarity index 81% rename from src/MapleFE/test/openjdk/Class-3.java.result rename to src/MapleFE/test/java/openjdk/Class-3.java.result index f9098f16aa989f15fb7789e7587fb0d2d603431b..2d303b495bbeb5dff8564b3783c1e40a45e94e90 100644 --- a/src/MapleFE/test/openjdk/Class-3.java.result +++ b/src/MapleFE/test/java/openjdk/Class-3.java.result @@ -8,7 +8,7 @@ class Class Constructors: Methods: func getDeclaredMethods() throws: - var:result=getDeclaredMethodsUnchecked(false) + Decl: result=getDeclaredMethodsUnchecked(false) Method m result @@ -20,3 +20,4 @@ class Class Identifier:Method has no decl. Identifier:m has no decl. +Identifier:result has no decl. diff --git a/src/MapleFE/test/openjdk/Class.java b/src/MapleFE/test/java/openjdk/Class.java similarity index 100% rename from src/MapleFE/test/openjdk/Class.java rename to src/MapleFE/test/java/openjdk/Class.java diff --git a/src/MapleFE/test/openjdk/Class.java.result b/src/MapleFE/test/java/openjdk/Class.java.result similarity index 62% rename from src/MapleFE/test/openjdk/Class.java.result rename to src/MapleFE/test/java/openjdk/Class.java.result index d955278969e86f51936a20429807dc3ecf5a320d..15d929fa9918f9b0f835856e0fdb4ab78a8040f6 100644 --- a/src/MapleFE/test/openjdk/Class.java.result +++ b/src/MapleFE/test/java/openjdk/Class.java.result @@ -111,30 +111,82 @@ class Class true branch : return toString() false branch : - var:sb=new StringBuilder + Decl: sb=new StringBuilder() + Decl: modifiers=getModifiers() Band Modifier.classModifiers() + cond-branch cond:modifiers NE 0 + true branch : + sb.append(Modifier.toString(modifiers)) + sb.append( ) + false branch : + cond-branch cond:isAnnotation() + true branch : + sb.append(@) + false branch : - func forName() throws: ClassNotFoundException + cond-branch cond:isInterface() + true branch : + sb.append("interface") + false branch : + cond-branch cond:isEnum() + true branch : + sb.append("enum") false branch : + sb.append("class") + + sb.append( ) + sb.append(getName()) + Decl: typeparms=getTypeParameters() + cond-branch cond:typeparms.length GT 0 + true branch : + Decl: first=true + sb.append(<) + TypeVariable + typeparm + typeparms + cond-branch cond:first + true branch : + sb.append(,) false branch : + + sb.append(typeparm.getTypeName()) + first Assign false + + sb.append(>) + false branch : + + return sb.toString() + + func forName(className) throws: ClassNotFoundException return forName(className,true,VMStack.getCallingClassLoader()) - func forName() throws: ClassNotFoundException + func forName(name,initialize,loader) throws: ClassNotFoundException cond-branch cond:loader EQ null true branch : loader Assign BootClassLoader.getInstance() false branch : - var:result + Decl: result + result Assign classForName(name,initialize,loader) + + ClassNotFoundException + e + Decl: cause=e.getCause() + cond-branch cond:cause instanceof LinkageError + true branch : + (LinkageError)cause + false branch : + + e return result - func classForName() throws: ClassNotFoundException + func classForName(className,shouldInitialize,classLoader) throws: ClassNotFoundException func newInstance() throws: InstantiationException IllegalAccessException - func isInstance() throws: + func isInstance(obj) throws: cond-branch cond:obj EQ null true branch : return false false branch : return isAssignableFrom(obj.getClass()) - func isAssignableFrom() throws: + func isAssignableFrom(cls) throws: cond-branch cond:this EQ cls true branch : return true @@ -149,8 +201,19 @@ class Class false branch : cond-branch cond:isInterface() true branch : - var:iftable=cls.ifTable + Decl: iftable=cls.ifTable + cond-branch cond:iftable NE null + true branch : + for ( ) + cond-branch cond: EQ this + true branch : + return true + false branch : + + false branch : + + return false false branch : cond-branch cond:cls.isInterface() true branch : @@ -165,9 +228,6 @@ class Class return false - - - func isInterface() throws: return (accessFlags Band Modifier.INTERFACE) NE 0 func isArray() throws: @@ -181,11 +241,10 @@ class Class func isSynthetic() throws: return (getModifiers() Band SYNTHETIC) NE 0 func getName() throws: - var:name=this.name + Decl: name=this.name cond-branch cond:name EQ null true branch : - this.name Assign name Assign getNameNative() - false branch : + this.name Assign name Assign getNameNative() false branch : return name func getNameNative() throws: @@ -197,13 +256,13 @@ class Class return func getTypeParameters() throws: - var:annotationSignature=getSignatureAttribute() + Decl: annotationSignature=getSignatureAttribute() cond-branch cond:annotationSignature EQ null true branch : return EmptyArray.TYPE_VARIABLE false branch : - var:parser=new GenericSignatureParser + Decl: parser=new GenericSignatureParser(getClassLoader()) parser.parseForClass(this,annotationSignature) return parser.formalTypeParameters func getSuperclass() throws: @@ -214,32 +273,33 @@ class Class return superClass func getGenericSuperclass() throws: - var:genericSuperclass=getSuperclass() + Decl: genericSuperclass=getSuperclass() cond-branch cond:genericSuperclass EQ null true branch : return null false branch : - var:annotationSignature=getSignatureAttribute() + Decl: annotationSignature=getSignatureAttribute() cond-branch cond:annotationSignature NE null true branch : - var:parser=new GenericSignatureParser - + Decl: parser=new GenericSignatureParser(getClassLoader()) + parser.parseForClass(this,annotationSignature) + genericSuperclass Assign parser.superclassType false branch : return Types.getType(genericSuperclass) func getPackage() throws: - var:loader=getClassLoader() + Decl: loader=getClassLoader() cond-branch cond:loader NE null true branch : - var:packageName=getPackageName$() + Decl: packageName=getPackageName$() return false branch : return null func getPackageName$() throws: - var:name=getName() - var:last=name.lastIndexOf(.) + Decl: name=getName() + Decl: last=name.lastIndexOf(.) return func getInterfaces() throws: cond-branch cond:isArray() @@ -247,7 +307,7 @@ class Class return false branch : - var:ifaces=getInterfacesInternal() + Decl: ifaces=getInterfacesInternal() cond-branch cond:ifaces EQ null true branch : return EmptyArray.CLASS @@ -256,13 +316,20 @@ class Class return ifaces func getInterfacesInternal() throws: func getGenericInterfaces() throws: - var:result - Caches.genericInterfaces + Decl: result result Assign Caches.genericInterfaces.get(this) cond-branch cond:result EQ null true branch : - var:annotationSignature=getSignatureAttribute() + Decl: annotationSignature=getSignatureAttribute() + cond-branch cond:annotationSignature EQ null + true branch : + result Assign getInterfaces() + false branch : + Decl: parser=new GenericSignatureParser(getClassLoader()) + parser.parseForClass(this,annotationSignature) + result Assign Types.getTypeArray(parser.interfaceTypes,false) + Caches.genericInterfaces.put(this,result) false branch : @@ -272,12 +339,17 @@ class Class func getModifiers() throws: cond-branch cond:isArray() true branch : - var:componentModifiers= + Decl: componentModifiers=getComponentType().getModifiers() + cond-branch cond:(componentModifiers Band Modifier.INTERFACE) NE 0 + true branch : + componentModifiers BandAssign (Modifier.INTERFACE Bor Modifier.STATIC) + false branch : + return Modifier.ABSTRACT Bor Modifier.FINAL Bor componentModifiers false branch : - var:JAVA_FLAGS_MASK=65535 - var:modifiers= + Decl: JAVA_FLAGS_MASK=65535 + Decl: modifiers=this.getInnerClassFlags(accessFlags Band JAVA_FLAGS_MASK) return modifiers Band JAVA_FLAGS_MASK func getSigners() throws: return null @@ -298,14 +370,13 @@ class Class return getEnclosingConstructorNative() func getEnclosingConstructorNative() throws: func classNameImpliesTopLevel() throws: - return + return getName().contains("$") func getDeclaringClass() throws: func getEnclosingClass() throws: func getSimpleName() throws: cond-branch cond:isArray() true branch : - return Add "[]" - false branch : + return getComponentType().getSimpleName() Add "[]" false branch : cond-branch cond:isAnonymousClass() true branch : @@ -317,8 +388,8 @@ class Class return getInnerClassName() false branch : - var:simpleName=getName() - var:dot=simpleName.lastIndexOf(".") + Decl: simpleName=getName() + Decl: dot=simpleName.lastIndexOf(".") cond-branch cond:dot GT 0 true branch : return simpleName.substring(simpleName.lastIndexOf(".") Add 1) @@ -328,9 +399,21 @@ class Class func getTypeName() throws: cond-branch cond:isArray() true branch : - var:cl=this + Decl: cl=this + Decl: dimensions=0 + while cl.isArray() dimensions Inc + + cl Assign cl.getComponentType() + + Decl: sb=new StringBuilder() + sb.append(cl.getName()) + for ( ) + sb.append("[]") + return sb.toString() + Throwable + e false branch : @@ -338,27 +421,28 @@ class Class func getCanonicalName() throws: cond-branch cond:isArray() true branch : - var:canonicalName= + Decl: canonicalName=getComponentType().getCanonicalName() cond-branch cond:canonicalName NE null true branch : - return canonicalName Add "[]" - false branch : + return canonicalName Add "[]" false branch : return null - false branch : cond-branch cond:isLocalOrAnonymousClass() true branch : - return null - false branch : + return null false branch : - var:enclosingClass=getEnclosingClass() + Decl: enclosingClass=getEnclosingClass() cond-branch cond:enclosingClass EQ null true branch : return getName() false branch : - var:enclosingName=enclosingClass.getCanonicalName() + Decl: enclosingName=enclosingClass.getCanonicalName() + cond-branch cond:enclosingName EQ null + true branch : + return null false branch : + return enclosingName Add "." Add getSimpleName() func isAnonymousClass() throws: func isLocalClass() throws: @@ -368,7 +452,7 @@ class Class func isLocalOrAnonymousClass() throws: return isLocalClass() Lor isAnonymousClass() func getClasses() throws: - var:result=new ArrayList + Decl: result=new ArrayList() for ( ) Class member @@ -382,27 +466,27 @@ class Class return result.toArray(Class,result.size()) func getFields() throws: SecurityException - var:fields=new ArrayList + Decl: fields=new ArrayList() getPublicFieldsRecursive(fields) return fields.toArray(Field,fields.size()) - func getPublicFieldsRecursive() throws: + func getPublicFieldsRecursive(result) throws: for ( ) Collections.addAll(result,c.getPublicDeclaredFields()) - var:iftable=ifTable + Decl: iftable=ifTable cond-branch cond:iftable NE null true branch : for ( ) - Collections.addAll(result,((Class))getPublicDeclaredFields) + Collections.addAll(result,(Class).getPublicDeclaredFields()) false branch : func getMethods() throws: SecurityException - var:methods=new ArrayList + Decl: methods=new ArrayList() getPublicMethodsInternal(methods) CollectionUtils.removeDuplicates(methods,Method.ORDER_BY_SIGNATURE) return methods.toArray(Method,methods.size()) - func getPublicMethodsInternal() throws: + func getPublicMethodsInternal(result) throws: Collections.addAll(result,getDeclaredMethodsUnchecked(true)) cond-branch cond:isInterface() true branch : @@ -411,40 +495,40 @@ class Class false branch : - var:iftable=ifTable + Decl: iftable=ifTable cond-branch cond:iftable NE null true branch : for ( ) - var:ifc=(Class) + Decl: ifc=(Class) Collections.addAll(result,ifc.getDeclaredMethodsUnchecked(true)) false branch : func getConstructors() throws: SecurityException return getDeclaredConstructorsInternal(true) - func getField() throws: NoSuchFieldException + func getField(name) throws: NoSuchFieldException cond-branch cond:name EQ null true branch : - new NullPointerException + new NullPointerException("name == null") false branch : - var:result=getPublicFieldRecursive(name) + Decl: result=getPublicFieldRecursive(name) cond-branch cond:result EQ null true branch : - new NoSuchFieldException + new NoSuchFieldException(name) false branch : return result - func getPublicFieldRecursive() throws: - func getMethod() throws: NoSuchMethodException SecurityException + func getPublicFieldRecursive(name) throws: + func getMethod(name,parameterTypes) throws: NoSuchMethodException SecurityException return getMethod(name,parameterTypes,true) - func getConstructor() throws: NoSuchMethodException SecurityException + func getConstructor(parameterTypes) throws: NoSuchMethodException SecurityException return getConstructor0(parameterTypes,Member.PUBLIC) func getDeclaredClasses() throws: func getDeclaredFields() throws: - func getDeclaredFieldsUnchecked() throws: + func getDeclaredFieldsUnchecked(publicOnly) throws: func getDeclaredMethods() throws: SecurityException - var:result=getDeclaredMethodsUnchecked(false) + Decl: result=getDeclaredMethodsUnchecked(false) Method m result @@ -452,18 +536,18 @@ class Class m.getParameterTypes() return result - func getDeclaredMethodsUnchecked() throws: + func getDeclaredMethodsUnchecked(publicOnly) throws: func getDeclaredConstructors() throws: SecurityException return getDeclaredConstructorsInternal(false) - func getDeclaredConstructorsInternal() throws: - func getDeclaredField() throws: NoSuchFieldException + func getDeclaredConstructorsInternal(publicOnly) throws: + func getDeclaredField(name) throws: NoSuchFieldException func getPublicDeclaredFields() throws: - func getDeclaredMethod() throws: NoSuchMethodException SecurityException + func getDeclaredMethod(name,parameterTypes) throws: NoSuchMethodException SecurityException return getMethod(name,parameterTypes,false) - func getMethod() throws: NoSuchMethodException + func getMethod(name,parameterTypes,recursivePublicMethods) throws: NoSuchMethodException cond-branch cond:name EQ null true branch : - new NullPointerException + new NullPointerException("name == null") false branch : cond-branch cond:parameterTypes EQ null @@ -471,17 +555,25 @@ class Class parameterTypes Assign EmptyArray.CLASS false branch : + Class + c + parameterTypes + cond-branch cond:c EQ null + true branch : + new NoSuchMethodException("parameter type is null") + false branch : + - var:result= + Decl: result= cond-branch cond:result EQ null Lor (recursivePublicMethods Land Modifier.isPublic(result.getAccessFlags())) true branch : - new NoSuchMethodException + new NoSuchMethodException(name Add " " Add Arrays.toString(parameterTypes)) false branch : return result - func getPublicMethodRecursive() throws: + func getPublicMethodRecursive(name,parameterTypes) throws: for ( ) - var:result=c.getDeclaredMethodInternal(name,parameterTypes) + Decl: result=c.getDeclaredMethodInternal(name,parameterTypes) cond-branch cond:result NE null Land Modifier.isPublic(result.getAccessFlags()) true branch : return result @@ -489,9 +581,9 @@ class Class return findInterfaceMethod(name,parameterTypes) - func getInstanceMethod() throws: NoSuchMethodException IllegalAccessException + func getInstanceMethod(name,parameterTypes) throws: NoSuchMethodException IllegalAccessException for ( ) - var:result=c.getDeclaredMethodInternal(name,parameterTypes) + Decl: result=c.getDeclaredMethodInternal(name,parameterTypes) cond-branch cond:result NE null Land Modifier.isStatic(result.getModifiers()) true branch : return result @@ -499,31 +591,36 @@ class Class return findInterfaceMethod(name,parameterTypes) - func findInterfaceMethod() throws: - var:iftable=ifTable + func findInterfaceMethod(name,parameterTypes) throws: + Decl: iftable=ifTable cond-branch cond:iftable NE null true branch : for ( ) - var:ifc=(Class) + Decl: ifc=(Class) + Decl: result=ifc.getPublicMethodRecursive(name,parameterTypes) + cond-branch cond:result NE null Land Modifier.isPublic(result.getAccessFlags()) + true branch : + return result + false branch : false branch : return null - func getDeclaredConstructor() throws: NoSuchMethodException SecurityException + func getDeclaredConstructor(parameterTypes) throws: NoSuchMethodException SecurityException return getConstructor0(parameterTypes,Member.DECLARED) - func getResourceAsStream() throws: + func getResourceAsStream(name) throws: name Assign resolveName(name) - var:cl=getClassLoader() + Decl: cl=getClassLoader() cond-branch cond:cl EQ null true branch : return ClassLoader.getSystemResourceAsStream(name) false branch : return cl.getResourceAsStream(name) - func getResource() throws: + func getResource(name) throws: name Assign resolveName(name) - var:cl=getClassLoader() + Decl: cl=getClassLoader() cond-branch cond:cl EQ null true branch : return ClassLoader.getSystemResource(name) @@ -532,7 +629,7 @@ class Class return cl.getResource(name) func getProtectionDomain() throws: return null - func resolveName() throws: + func resolveName(name) throws: cond-branch cond:name EQ null true branch : return name @@ -540,13 +637,21 @@ class Class cond-branch cond:name.startsWith("/") true branch : - var:c=this + Decl: c=this + while c.isArray() c Assign c.getComponentType() + + Decl: baseName=c.getName() + Decl: index=baseName.lastIndexOf(.) + cond-branch cond:index NE -1 + true branch : + name Assign baseName.substring(0,index).replace(.,/) Add "/" Add name + false branch : false branch : name Assign name.substring(1) return name - func getConstructor0() throws: NoSuchMethodException + func getConstructor0(parameterTypes,which) throws: NoSuchMethodException cond-branch cond:parameterTypes EQ null true branch : parameterTypes Assign EmptyArray.CLASS @@ -557,53 +662,49 @@ class Class parameterTypes cond-branch cond:c EQ null true branch : - new NoSuchMethodException + new NoSuchMethodException("parameter type is null") false branch : - var:result=getDeclaredConstructorInternal(parameterTypes) + Decl: result=getDeclaredConstructorInternal(parameterTypes) cond-branch cond:result EQ null Lor which EQ Member.PUBLIC Land Modifier.isPublic(result.getAccessFlags()) true branch : - new NoSuchMethodException + new NoSuchMethodException(" " Add Arrays.toString(parameterTypes)) false branch : return result - func getDeclaredConstructorInternal() throws: + func getDeclaredConstructorInternal(args) throws: func desiredAssertionStatus() throws: return false func getInnerClassName() throws: - func getInnerClassFlags() throws: + func getInnerClassFlags(defaultValue) throws: func isEnum() throws: - return ( Band ENUM) NE 0 Land EQ java.lang.Enum + return (this.getModifiers() Band ENUM) NE 0 Land this.getSuperclass() EQ java.lang.Enum func getEnumConstants() throws: - var:values=getEnumConstantsShared() + Decl: values=getEnumConstantsShared() return func getEnumConstantsShared() throws: cond-branch cond:isEnum() true branch : - return null - false branch : + return null false branch : return (T[])Enum.getSharedConstants((Class)this) - func cast() throws: + func cast(obj) throws: cond-branch cond:obj NE null Land isInstance(obj) true branch : - new ClassCastException - false branch : + new ClassCastException(cannotCastMsg(obj)) false branch : return (T)obj - func cannotCastMsg() throws: - return "Cannot cast " Add Add " to " Add getName() - func asSubclass() throws: + func cannotCastMsg(obj) throws: + return "Cannot cast " Add obj.getClass().getName() Add " to " Add getName() + func asSubclass(clazz) throws: cond-branch cond:clazz.isAssignableFrom(this) true branch : - return ()this - false branch : - new ClassCastException - - func getAnnotation() throws: + return ()this false branch : + new ClassCastException(this.toString() Add " cannot be cast to " Add clazz.getName()) + func getAnnotation(annotationClass) throws: Objects.requireNonNull(annotationClass) - var:annotation=getDeclaredAnnotation(annotationClass) + Decl: annotation=getDeclaredAnnotation(annotationClass) cond-branch cond:annotation NE null true branch : return annotation @@ -622,10 +723,10 @@ class Class false branch : return null - func isAnnotationPresent() throws: + func isAnnotationPresent(annotationClass) throws: cond-branch cond:annotationClass EQ null true branch : - new NullPointerException + new NullPointerException("annotationClass == null") false branch : cond-branch cond:isDeclaredAnnotationPresent(annotationClass) @@ -645,8 +746,8 @@ class Class false branch : return false - func getAnnotationsByType() throws: - var:annotations= + func getAnnotationsByType(annotationClass) throws: + Decl: annotations= cond-branch cond:annotations.length NE 0 true branch : return annotations @@ -654,7 +755,7 @@ class Class cond-branch cond:annotationClass.isDeclaredAnnotationPresent(Inherited) true branch : - var:superClass=getSuperclass() + Decl: superClass=getSuperclass() cond-branch cond:superClass NE null true branch : return superClass.getAnnotationsByType(annotationClass) @@ -664,7 +765,7 @@ class Class return (A[])Array.newInstance(annotationClass,0) func getAnnotations() throws: - var:map=new HashMap + Decl: map=new HashMap() Annotation declaredAnnotation getDeclaredAnnotations() @@ -674,7 +775,7 @@ class Class Annotation declaredAnnotation sup.getDeclaredAnnotations() - var:clazz=declaredAnnotation.annotationType() + Decl: clazz=declaredAnnotation.annotationType() cond-branch cond:map.containsKey(clazz) Land clazz.isDeclaredAnnotationPresent(Inherited) true branch : map.put(clazz,declaredAnnotation) @@ -682,19 +783,23 @@ class Class - var:coll=map.values() + Decl: coll=map.values() return coll.toArray(Annotation,coll.size()) - func getDeclaredAnnotation() throws: + func getDeclaredAnnotation(annotationClass) throws: func getDeclaredAnnotations() throws: - func isDeclaredAnnotationPresent() throws: + func isDeclaredAnnotationPresent(annotationClass) throws: func getSignatureAttribute() throws: - var:annotation=getSignatureAnnotation() + Decl: annotation=getSignatureAnnotation() cond-branch cond:annotation EQ null true branch : return null false branch : - var:result=new StringBuilder + Decl: result=new StringBuilder() + String + s + annotation + result.append(s) return result.toString() func getSignatureAnnotation() throws: @@ -702,11 +807,11 @@ class Class return (accessFlags Band 262144) NE 0 func getAccessFlags() throws: return accessFlags - func getDeclaredMethodInternal() throws: + func getDeclaredMethodInternal(name,args) throws: LocalClasses: class Caches Fields: - genericInterfaces=new BasicLruCache + genericInterfaces=new BasicLruCache(8) Instance Initializer: Constructors: Methods: @@ -714,9 +819,20 @@ class Class LocalInterfaces: LocalInterfaces: +Identifier:result has no decl. +Identifier:ClassNotFoundException has no decl. +Identifier:e has no decl. +Identifier:e has no decl. +Identifier:result has no decl. Identifier:Method has no decl. Identifier:m has no decl. +Identifier:result has no decl. +Identifier:Class has no decl. +Identifier:c has no decl. Identifier:Class has no decl. Identifier:c has no decl. Identifier:Annotation has no decl. Identifier:declaredAnnotation has no decl. +Identifier:String has no decl. +Identifier:s has no decl. +Identifier:annotation has no decl. diff --git a/src/MapleFE/test/openjdk/ClassCastException.java b/src/MapleFE/test/java/openjdk/ClassCastException.java similarity index 100% rename from src/MapleFE/test/openjdk/ClassCastException.java rename to src/MapleFE/test/java/openjdk/ClassCastException.java diff --git a/src/MapleFE/test/openjdk/ClassCastException.java.result b/src/MapleFE/test/java/openjdk/ClassCastException.java.result similarity index 72% rename from src/MapleFE/test/openjdk/ClassCastException.java.result rename to src/MapleFE/test/java/openjdk/ClassCastException.java.result index f6924ba6c8d0a3f64851ac35dda1cc1f8f7b0b45..51ba376909b673710ac93cfe8dd5b9797f8df885 100644 --- a/src/MapleFE/test/openjdk/ClassCastException.java.result +++ b/src/MapleFE/test/java/openjdk/ClassCastException.java.result @@ -1,17 +1,16 @@ Matched 5 tokens. -Matched 44 tokens. +Matched 43 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class ClassCastException Fields: - serialVersionUID=Sub - 832051876 + serialVersionUID=-832051876 Instance Initializer: Constructors: constructor ClassCastException() throws: - constructor ClassCastException() throws: + constructor ClassCastException(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/ClassCircularityError.java b/src/MapleFE/test/java/openjdk/ClassCircularityError.java similarity index 100% rename from src/MapleFE/test/openjdk/ClassCircularityError.java rename to src/MapleFE/test/java/openjdk/ClassCircularityError.java diff --git a/src/MapleFE/test/openjdk/ClassCircularityError.java.result b/src/MapleFE/test/java/openjdk/ClassCircularityError.java.result similarity index 86% rename from src/MapleFE/test/openjdk/ClassCircularityError.java.result rename to src/MapleFE/test/java/openjdk/ClassCircularityError.java.result index 83db87477c031158210b810f224ec4191755f058..7982bbd80765a5f334f08732eab60e5f54215fcf 100644 --- a/src/MapleFE/test/openjdk/ClassCircularityError.java.result +++ b/src/MapleFE/test/java/openjdk/ClassCircularityError.java.result @@ -10,7 +10,7 @@ class ClassCircularityError Instance Initializer: Constructors: constructor ClassCircularityError() throws: - constructor ClassCircularityError() throws: + constructor ClassCircularityError(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/ClassFormatError.java b/src/MapleFE/test/java/openjdk/ClassFormatError.java similarity index 100% rename from src/MapleFE/test/openjdk/ClassFormatError.java rename to src/MapleFE/test/java/openjdk/ClassFormatError.java diff --git a/src/MapleFE/test/openjdk/ClassFormatError.java.result b/src/MapleFE/test/java/openjdk/ClassFormatError.java.result similarity index 72% rename from src/MapleFE/test/openjdk/ClassFormatError.java.result rename to src/MapleFE/test/java/openjdk/ClassFormatError.java.result index 865999f63df951b27a8c2a422eb5b3a14179ec5a..2a347889ce0f2ca65e193a48128bcfd04f82f4fe 100644 --- a/src/MapleFE/test/openjdk/ClassFormatError.java.result +++ b/src/MapleFE/test/java/openjdk/ClassFormatError.java.result @@ -1,17 +1,16 @@ Matched 5 tokens. -Matched 44 tokens. +Matched 43 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class ClassFormatError Fields: - serialVersionUID=Sub - -344126837 + serialVersionUID=344126837 Instance Initializer: Constructors: constructor ClassFormatError() throws: - constructor ClassFormatError() throws: + constructor ClassFormatError(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/ClassLoader.java b/src/MapleFE/test/java/openjdk/ClassLoader.java similarity index 100% rename from src/MapleFE/test/openjdk/ClassLoader.java rename to src/MapleFE/test/java/openjdk/ClassLoader.java diff --git a/src/MapleFE/test/openjdk/ClassLoader.java.result b/src/MapleFE/test/java/openjdk/ClassLoader.java.result similarity index 57% rename from src/MapleFE/test/openjdk/ClassLoader.java.result rename to src/MapleFE/test/java/openjdk/ClassLoader.java.result index 70912ebe161e0b4b5972a9499fa7f9766efb01fe..185be8e8e980c216e0e7c7636d510e71897251bf 100644 --- a/src/MapleFE/test/openjdk/ClassLoader.java.result +++ b/src/MapleFE/test/java/openjdk/ClassLoader.java.result @@ -115,26 +115,35 @@ import sun.security.util.SecurityConstants == Sub Tree == class ClassLoader Fields: - proxyCache=new HashMap parent packages=new HashMap allocator classTable + proxyCache=new HashMap() parent packages=new HashMap() allocator classTable Instance Initializer: Constructors: - constructor ClassLoader() throws: + constructor ClassLoader(unused,parent) throws: this.parent Assign parent - constructor ClassLoader() throws: + constructor ClassLoader(parent) throws: constructor ClassLoader() throws: Methods: func createSystemClassLoader() throws: - var:classPath=System.getProperty("java.class.path",".") - var:librarySearchPath=System.getProperty("java.library.path","") - return new PathClassLoader + Decl: classPath=System.getProperty("java.class.path",".") + Decl: librarySearchPath=System.getProperty("java.library.path","") + return new PathClassLoader(classPath,librarySearchPath,BootClassLoader.getInstance()) func checkCreateClassLoader() throws: return null - func loadClass() throws: ClassNotFoundException + func loadClass(name) throws: ClassNotFoundException return loadClass(name,false) - func loadClass() throws: ClassNotFoundException - var:c=findLoadedClass(name) + func loadClass(name,resolve) throws: ClassNotFoundException + Decl: c=findLoadedClass(name) cond-branch cond:c EQ null true branch : + cond-branch cond:parent NE null + true branch : + c Assign parent.loadClass(name,false) + false branch : + c Assign findBootstrapClassOrNull(name) + + + ClassNotFoundException + e cond-branch cond:c EQ null true branch : @@ -144,33 +153,31 @@ class ClassLoader false branch : return c - func findClass() throws: ClassNotFoundException - new ClassNotFoundException - func defineClass() throws: ClassFormatError - new UnsupportedOperationException - func defineClass() throws: ClassFormatError - new UnsupportedOperationException - func defineClass() throws: ClassFormatError - new UnsupportedOperationException - func defineClass() throws: ClassFormatError - new UnsupportedOperationException - func resolveClass() throws: - func findSystemClass() throws: ClassNotFoundException + func findClass(name) throws: ClassNotFoundException + new ClassNotFoundException(name) + func defineClass(b,off,len) throws: ClassFormatError + new UnsupportedOperationException("can't load this type of class file") + func defineClass(name,b,off,len) throws: ClassFormatError + new UnsupportedOperationException("can't load this type of class file") + func defineClass(name,b,off,len,protectionDomain) throws: ClassFormatError + new UnsupportedOperationException("can't load this type of class file") + func defineClass(name,b,protectionDomain) throws: ClassFormatError + new UnsupportedOperationException("can't load this type of class file") + func resolveClass(c) throws: + func findSystemClass(name) throws: ClassNotFoundException return Class.forName(name,false,getSystemClassLoader()) - func findBootstrapClassOrNull() throws: + func findBootstrapClassOrNull(name) throws: return null - func findLoadedClass() throws: - var:loader + func findLoadedClass(name) throws: + Decl: loader cond-branch cond:this EQ BootClassLoader.getInstance() true branch : - loader Assign null - false branch : + loader Assign null false branch : loader Assign this - return VMClassLoader.findLoadedClass(loader,name) - func setSigners() throws: - func getResource() throws: - var:url + func setSigners(c,signers) throws: + func getResource(name) throws: + Decl: url cond-branch cond:parent NE null true branch : url Assign parent.getResource(name) @@ -183,8 +190,8 @@ class ClassLoader false branch : return url - func getResources() throws: IOException - var:tmp=() + func getResources(name) throws: IOException + Decl: tmp=() cond-branch cond:parent NE null true branch : Assign parent.getResources(name) @@ -192,70 +199,80 @@ class ClassLoader Assign getBootstrapResources(name) Assign findResources(name) - return new CompoundEnumeration - func findResource() throws: + return new CompoundEnumeration(tmp) + func findResource(name) throws: return null - func findResources() throws: IOException + func findResources(name) throws: IOException return java.util.Collections.emptyEnumeration() func registerAsParallelCapable() throws: return true - func getSystemResource() throws: - var:system=getSystemClassLoader() + func getSystemResource(name) throws: + Decl: system=getSystemClassLoader() cond-branch cond:system EQ null true branch : return getBootstrapResource(name) false branch : return system.getResource(name) - func getSystemResources() throws: IOException - var:system=getSystemClassLoader() + func getSystemResources(name) throws: IOException + Decl: system=getSystemClassLoader() cond-branch cond:system EQ null true branch : return getBootstrapResources(name) false branch : return system.getResources(name) - func getBootstrapResource() throws: + func getBootstrapResource(name) throws: return null - func getBootstrapResources() throws: IOException + func getBootstrapResources(name) throws: IOException return null - func getResourceAsStream() throws: - var:url=getResource(name) + func getResourceAsStream(name) throws: + Decl: url=getResource(name) return + IOException + e + return null - func getSystemResourceAsStream() throws: - var:url=getSystemResource(name) + func getSystemResourceAsStream(name) throws: + Decl: url=getSystemResource(name) return + IOException + e + return null func getParent() throws: return parent func getSystemClassLoader() throws: return SystemClassLoader.loader - func definePackage() throws: IllegalArgumentException - packages - var:pkg=packages.get(name) + func definePackage(name,specTitle,specVersion,specVendor,implTitle,implVersion,implVendor,sealBase) throws: IllegalArgumentException + Decl: pkg=packages.get(name) + cond-branch cond:pkg NE null + true branch : + new IllegalArgumentException(name) + false branch : + pkg Assign new Package(name,specTitle,specVersion,specVendor,implTitle,implVersion,implVendor,sealBase,this) + packages.put(name,pkg) + return pkg - func getPackage() throws: - var:pkg - packages + func getPackage(name) throws: + Decl: pkg pkg Assign packages.get(name) return pkg func getPackages() throws: - var:map - packages - map Assign new HashMap + Decl: map + map Assign new HashMap(packages) - var:pkgs - return - func findLibrary() throws: + Decl: pkgs + return map.values().toArray(Package,map.size()) + func findLibrary(libname) throws: return null - func setDefaultAssertionStatus() throws: - func setPackageAssertionStatus() throws: - func setClassAssertionStatus() throws: + func setDefaultAssertionStatus(enabled) throws: + func setPackageAssertionStatus(packageName,enabled) throws: + func setClassAssertionStatus(className,enabled) throws: func clearAssertionStatus() throws: LocalClasses: class SystemClassLoader @@ -279,37 +296,41 @@ class BootClassLoader func getInstance() throws: cond-branch cond:instance EQ null true branch : - instance Assign new BootClassLoader + instance Assign new BootClassLoader() false branch : return instance - func findClass() throws: ClassNotFoundException + func findClass(name) throws: ClassNotFoundException return Class.classForName(name,false,null) - func findResource() throws: + func findResource(name) throws: return VMClassLoader.getResource(name) - func findResources() throws: IOException + func findResources(resName) throws: IOException return Collections.enumeration(VMClassLoader.getResources(resName)) - func getPackage() throws: + func getPackage(name) throws: cond-branch cond:name NE null Land name.isEmpty() true branch : - this - var:pack= + Decl: pack=super.getPackage(name) + cond-branch cond:pack EQ null + true branch : + pack Assign definePackage(name,"Unknown","0.0","Unknown","Unknown","0.0","Unknown",null) + false branch : + return pack false branch : return null - func getResource() throws: + func getResource(resName) throws: return findResource(resName) - func loadClass() throws: ClassNotFoundException - var:clazz=findLoadedClass(className) + func loadClass(className,resolve) throws: ClassNotFoundException + Decl: clazz=findLoadedClass(className) cond-branch cond:clazz EQ null true branch : clazz Assign findClass(className) false branch : return clazz - func getResources() throws: IOException + func getResources(resName) throws: IOException return findResources(resName) LocalClasses: LocalInterfaces: @@ -319,4 +340,12 @@ UserType:UnsupportedOperationException has no decl. UserType:UnsupportedOperationException has no decl. UserType:UnsupportedOperationException has no decl. UserType:UnsupportedOperationException has no decl. +Identifier:IOException has no decl. +Identifier:e has no decl. +Identifier:IOException has no decl. +Identifier:e has no decl. +Identifier:pkg has no decl. +UserType:Package has no decl. +Identifier:pkg has no decl. +Identifier:map has no decl. UserType:HashMap has no decl. diff --git a/src/MapleFE/test/openjdk/ClassNotFoundException.java b/src/MapleFE/test/java/openjdk/ClassNotFoundException.java similarity index 100% rename from src/MapleFE/test/openjdk/ClassNotFoundException.java rename to src/MapleFE/test/java/openjdk/ClassNotFoundException.java diff --git a/src/MapleFE/test/openjdk/ClassNotFoundException.java.result b/src/MapleFE/test/java/openjdk/ClassNotFoundException.java.result similarity index 80% rename from src/MapleFE/test/openjdk/ClassNotFoundException.java.result rename to src/MapleFE/test/java/openjdk/ClassNotFoundException.java.result index 3fa59df1521e1bc42fd7830f9cb1fa2f40e2aad3..2fb746eda8dc24d8092ee275bdddd7d7cb8a4176 100644 --- a/src/MapleFE/test/openjdk/ClassNotFoundException.java.result +++ b/src/MapleFE/test/java/openjdk/ClassNotFoundException.java.result @@ -10,8 +10,8 @@ class ClassNotFoundException Instance Initializer: Constructors: constructor ClassNotFoundException() throws: - constructor ClassNotFoundException() throws: - constructor ClassNotFoundException() throws: + constructor ClassNotFoundException(s) throws: + constructor ClassNotFoundException(s,ex) throws: this.ex Assign ex Methods: func getException() throws: diff --git a/src/MapleFE/test/openjdk/CloneNotSupportedException.java b/src/MapleFE/test/java/openjdk/CloneNotSupportedException.java similarity index 100% rename from src/MapleFE/test/openjdk/CloneNotSupportedException.java rename to src/MapleFE/test/java/openjdk/CloneNotSupportedException.java diff --git a/src/MapleFE/test/openjdk/CloneNotSupportedException.java.result b/src/MapleFE/test/java/openjdk/CloneNotSupportedException.java.result similarity index 85% rename from src/MapleFE/test/openjdk/CloneNotSupportedException.java.result rename to src/MapleFE/test/java/openjdk/CloneNotSupportedException.java.result index e9dbfc2a87bc8c3893fa28cea8dd811e372d51c4..b63bfc1f9a54290068194a116ccfe561190c4612 100644 --- a/src/MapleFE/test/openjdk/CloneNotSupportedException.java.result +++ b/src/MapleFE/test/java/openjdk/CloneNotSupportedException.java.result @@ -10,7 +10,7 @@ class CloneNotSupportedException Instance Initializer: Constructors: constructor CloneNotSupportedException() throws: - constructor CloneNotSupportedException() throws: + constructor CloneNotSupportedException(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/Cloneable.java b/src/MapleFE/test/java/openjdk/Cloneable.java similarity index 100% rename from src/MapleFE/test/openjdk/Cloneable.java rename to src/MapleFE/test/java/openjdk/Cloneable.java diff --git a/src/MapleFE/test/openjdk/Cloneable.java.result b/src/MapleFE/test/java/openjdk/Cloneable.java.result similarity index 100% rename from src/MapleFE/test/openjdk/Cloneable.java.result rename to src/MapleFE/test/java/openjdk/Cloneable.java.result diff --git a/src/MapleFE/test/openjdk/Comparable.java b/src/MapleFE/test/java/openjdk/Comparable.java similarity index 100% rename from src/MapleFE/test/openjdk/Comparable.java rename to src/MapleFE/test/java/openjdk/Comparable.java diff --git a/src/MapleFE/test/openjdk/Comparable.java.result b/src/MapleFE/test/java/openjdk/Comparable.java.result similarity index 86% rename from src/MapleFE/test/openjdk/Comparable.java.result rename to src/MapleFE/test/java/openjdk/Comparable.java.result index 2d737cec543cbff17120c020edeaa87d83f005ef..1a4f72012690673b17f68a7e024fb72ffe4548e0 100644 --- a/src/MapleFE/test/openjdk/Comparable.java.result +++ b/src/MapleFE/test/java/openjdk/Comparable.java.result @@ -11,5 +11,5 @@ interface Comparable Fields: Methods: - func compareTo() throws: + func compareTo(o) throws: diff --git a/src/MapleFE/test/openjdk/Compiler.java b/src/MapleFE/test/java/openjdk/Compiler.java similarity index 100% rename from src/MapleFE/test/openjdk/Compiler.java rename to src/MapleFE/test/java/openjdk/Compiler.java diff --git a/src/MapleFE/test/openjdk/Compiler.java.result b/src/MapleFE/test/java/openjdk/Compiler.java.result similarity index 75% rename from src/MapleFE/test/openjdk/Compiler.java.result rename to src/MapleFE/test/java/openjdk/Compiler.java.result index d8c352271fda6a7d2612ae65351aaa16fe264269..a6b3bc5da59adcff29ccbede3941e1593d56b599 100644 --- a/src/MapleFE/test/openjdk/Compiler.java.result +++ b/src/MapleFE/test/java/openjdk/Compiler.java.result @@ -11,11 +11,11 @@ class Compiler Constructors: constructor Compiler() throws: Methods: - func compileClass() throws: + func compileClass(classToCompile) throws: return false - func compileClasses() throws: + func compileClasses(nameRoot) throws: return false - func command() throws: + func command(cmd) throws: return null func enable() throws: func disable() throws: diff --git a/src/MapleFE/test/openjdk/Deprecated.java b/src/MapleFE/test/java/openjdk/Deprecated.java similarity index 100% rename from src/MapleFE/test/openjdk/Deprecated.java rename to src/MapleFE/test/java/openjdk/Deprecated.java diff --git a/src/MapleFE/test/openjdk/Deprecated.java.result b/src/MapleFE/test/java/openjdk/Deprecated.java.result similarity index 100% rename from src/MapleFE/test/openjdk/Deprecated.java.result rename to src/MapleFE/test/java/openjdk/Deprecated.java.result diff --git a/src/MapleFE/test/openjdk/Double-2.java b/src/MapleFE/test/java/openjdk/Double-2.java similarity index 100% rename from src/MapleFE/test/openjdk/Double-2.java rename to src/MapleFE/test/java/openjdk/Double-2.java diff --git a/src/MapleFE/test/openjdk/Double-2.java.result b/src/MapleFE/test/java/openjdk/Double-2.java.result similarity index 89% rename from src/MapleFE/test/openjdk/Double-2.java.result rename to src/MapleFE/test/java/openjdk/Double-2.java.result index 8d239862b275dd1b9801953098cfd98e7a63dc9a..65efd20c0deac2928bdfa238f6a21a7b51b3f2f9 100644 --- a/src/MapleFE/test/openjdk/Double-2.java.result +++ b/src/MapleFE/test/java/openjdk/Double-2.java.result @@ -7,7 +7,7 @@ class Double Instance Initializer: Constructors: Methods: - func equals() throws: + func equals(obj) throws: return doubleToLongBits((Double)obj.value) EQ doubleToLongBits(value) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/Double.java b/src/MapleFE/test/java/openjdk/Double.java similarity index 100% rename from src/MapleFE/test/openjdk/Double.java rename to src/MapleFE/test/java/openjdk/Double.java diff --git a/src/MapleFE/test/openjdk/Double.java.result b/src/MapleFE/test/java/openjdk/Double.java.result similarity index 42% rename from src/MapleFE/test/openjdk/Double.java.result rename to src/MapleFE/test/java/openjdk/Double.java.result index fecc8cc42968c1c08b816502b7b3c098d5fc2fe9..7092f2aa51d3f61a8f59e3870d43aa2cdd0cc09c 100644 --- a/src/MapleFE/test/openjdk/Double.java.result +++ b/src/MapleFE/test/java/openjdk/Double.java.result @@ -2,7 +2,7 @@ Matched 5 tokens. Matched 12 tokens. Matched 19 tokens. Matched 26 tokens. -Matched 976 tokens. +Matched 972 tokens. ============= Module =========== == Sub Tree == package java.lang @@ -15,37 +15,52 @@ import sun.misc.DoubleConsts == Sub Tree == class Double Fields: - POSITIVE_INFINITY= NEGATIVE_INFINITY= NaN= MAX_VALUE=1.79769e+308 MIN_NORMAL=2.22507e-308 MIN_VALUE=4.94066e-324 MAX_EXPONENT=1023 MIN_EXPONENT=Sub - 1022 SIZE=64 BYTES= TYPE=() value serialVersionUID=Sub - -694942468 + POSITIVE_INFINITY=1 Div 0 NEGATIVE_INFINITY=-1 Div 0 NaN=0 Div 0 MAX_VALUE=1.79769e+308 MIN_NORMAL=2.22507e-308 MIN_VALUE=4.94066e-324 MAX_EXPONENT=1023 MIN_EXPONENT=-1022 SIZE=64 BYTES=SIZE Div Byte.SIZE TYPE=()double.getComponentType() value serialVersionUID=694942468 Instance Initializer: Constructors: - constructor Double() throws: + constructor Double(value) throws: this.value Assign value - constructor Double() throws: + constructor Double(s) throws: value Assign parseDouble(s) Methods: - func toString() throws: + func toString(d) throws: return FloatingDecimal.toJavaFormatString(d) - func toHexString() throws: + func toHexString(d) throws: cond-branch cond:isFinite(d) true branch : - return Double.toString(d) - false branch : - var:answer=new StringBuilder + return Double.toString(d) false branch : + Decl: answer=new StringBuilder(24) + cond-branch cond:Math.copySign(1,d) EQ -1 + true branch : + answer.append("-") false branch : + + answer.append("0x") + d Assign Math.abs(d) + cond-branch cond:d EQ 0 + true branch : + answer.append("0.0p0") + false branch : + Decl: subnormal=(d LT DoubleConsts.MIN_NORMAL) + Decl: signifBits=(Double.doubleToLongBits(d) Band DoubleConsts.SIGNIF_BIT_MASK) Bor 0 + answer.append(subnormal,"0.","1.") + Decl: signif=Long.toHexString(signifBits).substring(3,16) + answer.append(signif.equals("0000000000000"),"0",signif.replaceFirst("0{1,12}$","")) + answer.append(p) + answer.append(subnormal,DoubleConsts.MIN_EXPONENT,Math.getExponent(d)) + return answer.toString() - func valueOf() throws: NumberFormatException - return new Double - func valueOf() throws: - return new Double - func parseDouble() throws: NumberFormatException + func valueOf(s) throws: NumberFormatException + return new Double(parseDouble(s)) + func valueOf(d) throws: + return new Double(d) + func parseDouble(s) throws: NumberFormatException return FloatingDecimal.parseDouble(s) - func isNaN() throws: + func isNaN(v) throws: return (v NE v) - func isInfinite() throws: + func isInfinite(v) throws: return (v EQ POSITIVE_INFINITY) Lor (v EQ NEGATIVE_INFINITY) - func isFinite() throws: + func isFinite(d) throws: return Math.abs(d) LE DoubleConsts.MAX_VALUE func isNaN() throws: return isNaN(value) @@ -67,43 +82,39 @@ class Double return value func hashCode() throws: return Double.hashCode(value) - func hashCode() throws: - var:bits=doubleToLongBits(value) + func hashCode(value) throws: + Decl: bits=doubleToLongBits(value) return (int)(bits Bxor (bits Zext 32)) - func equals() throws: - return () Land (doubleToLongBits((Double)obj.value) EQ doubleToLongBits(value)) - func doubleToLongBits() throws: - var:result=doubleToRawLongBits(value) + func equals(obj) throws: + return (obj instanceof Double) Land (doubleToLongBits((Double)obj.value) EQ doubleToLongBits(value)) + func doubleToLongBits(value) throws: + Decl: result=doubleToRawLongBits(value) cond-branch cond:((result Band DoubleConsts.EXP_BIT_MASK) EQ DoubleConsts.EXP_BIT_MASK) Land (result Band DoubleConsts.SIGNIF_BIT_MASK) NE 0 true branch : - result Assign 0 - false branch : + result Assign 0 false branch : return result - func doubleToRawLongBits() throws: - func longBitsToDouble() throws: - func compareTo() throws: + func doubleToRawLongBits(value) throws: + func longBitsToDouble(bits) throws: + func compareTo(anotherDouble) throws: return Double.compare(value,anotherDouble.value) - func compare() throws: + func compare(d1,d2) throws: cond-branch cond:d1 LT d2 true branch : - return Sub - 1 - false branch : + return -1 false branch : cond-branch cond:d1 GT d2 true branch : - return 1 - false branch : + return 1 false branch : - var:thisBits=Double.doubleToLongBits(d1) - var:anotherBits=Double.doubleToLongBits(d2) + Decl: thisBits=Double.doubleToLongBits(d1) + Decl: anotherBits=Double.doubleToLongBits(d2) return () - func sum() throws: + func sum(a,b) throws: return a Add b - func max() throws: + func max(a,b) throws: return Math.max(a,b) - func min() throws: + func min(a,b) throws: return Math.min(a,b) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/Enum.java b/src/MapleFE/test/java/openjdk/Enum.java similarity index 100% rename from src/MapleFE/test/openjdk/Enum.java rename to src/MapleFE/test/java/openjdk/Enum.java diff --git a/src/MapleFE/test/openjdk/Enum.java.result b/src/MapleFE/test/java/openjdk/Enum.java.result similarity index 68% rename from src/MapleFE/test/openjdk/Enum.java.result rename to src/MapleFE/test/java/openjdk/Enum.java.result index c3a82a8bdf1800105532533ff0d0fb9bd97fdf10..fa6cb1d2de4c2b14419e990aa4cafbcc07f16cf7 100644 --- a/src/MapleFE/test/openjdk/Enum.java.result +++ b/src/MapleFE/test/java/openjdk/Enum.java.result @@ -33,10 +33,10 @@ import libcore.util.EmptyArray == Sub Tree == class Enum Fields: - name ordinal sharedConstantsCache=new BasicLruCache + name ordinal sharedConstantsCache=new BasicLruCache(64) Instance Initializer: Constructors: - constructor Enum() throws: + constructor Enum(name,ordinal) throws: this.name Assign name this.ordinal Assign ordinal Methods: @@ -46,58 +46,57 @@ class Enum return ordinal func toString() throws: return name - func equals() throws: + func equals(other) throws: return this EQ other func hashCode() throws: - return hashCode + return super.hashCode() func clone() throws: CloneNotSupportedException - new CloneNotSupportedException - func compareTo() throws: - var:other=(Enum)o - var:self=this + new CloneNotSupportedException() + func compareTo(o) throws: + Decl: other=(Enum)o + Decl: self=this cond-branch cond:self.getClass() NE other.getClass() Land self.getDeclaringClass() NE other.getDeclaringClass() true branch : - new ClassCastException - false branch : + new ClassCastException() false branch : return self.ordinal Sub other.ordinal func getDeclaringClass() throws: - var:clazz=getClass() - var:zuper=clazz.getSuperclass() + Decl: clazz=getClass() + Decl: zuper=clazz.getSuperclass() return - func valueOf() throws: + func valueOf(enumType,name) throws: cond-branch cond:enumType EQ null true branch : - new NullPointerException + new NullPointerException("enumType == null") false branch : cond-branch cond:name EQ null true branch : - new NullPointerException + new NullPointerException("name == null") false branch : - var:values=getSharedConstants(enumType) + Decl: values=getSharedConstants(enumType) cond-branch cond:values EQ null true branch : - new IllegalArgumentException + new IllegalArgumentException(enumType.toString() Add " is not an enum type.") false branch : for ( ) - var:value= + Decl: value= cond-branch cond:name.equals(value.name()) true branch : return value false branch : - new IllegalArgumentException - func getSharedConstants() throws: + new IllegalArgumentException("No enum constant " Add enumType.getCanonicalName() Add "." Add name) + func getSharedConstants(enumType) throws: return (T[])sharedConstantsCache.get(enumType) func finalize() throws: - func readObject() throws: IOException ClassNotFoundException - new InvalidObjectException + func readObject(in) throws: IOException ClassNotFoundException + new InvalidObjectException("can't deserialize enum") func readObjectNoData() throws: ObjectStreamException - new InvalidObjectException + new InvalidObjectException("can't deserialize enum") LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/EnumConstantNotPresentException.java b/src/MapleFE/test/java/openjdk/EnumConstantNotPresentException.java similarity index 100% rename from src/MapleFE/test/openjdk/EnumConstantNotPresentException.java rename to src/MapleFE/test/java/openjdk/EnumConstantNotPresentException.java diff --git a/src/MapleFE/test/openjdk/EnumConstantNotPresentException.java.result b/src/MapleFE/test/java/openjdk/EnumConstantNotPresentException.java.result similarity index 78% rename from src/MapleFE/test/openjdk/EnumConstantNotPresentException.java.result rename to src/MapleFE/test/java/openjdk/EnumConstantNotPresentException.java.result index e1e8b1230b1af6a155ea5a8c2f4f1b74a256be72..b4d1d6a141a842aef6d533c1087756cb49f5874f 100644 --- a/src/MapleFE/test/openjdk/EnumConstantNotPresentException.java.result +++ b/src/MapleFE/test/java/openjdk/EnumConstantNotPresentException.java.result @@ -1,16 +1,15 @@ Matched 5 tokens. -Matched 105 tokens. +Matched 104 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class EnumConstantNotPresentException Fields: - serialVersionUID=Sub - 180673940 enumType constantName + serialVersionUID=-180673940 enumType constantName Instance Initializer: Constructors: - constructor EnumConstantNotPresentException() throws: + constructor EnumConstantNotPresentException(enumType,constantName) throws: this.enumType Assign enumType this.constantName Assign constantName Methods: diff --git a/src/MapleFE/test/openjdk/Error.java b/src/MapleFE/test/java/openjdk/Error.java similarity index 100% rename from src/MapleFE/test/openjdk/Error.java rename to src/MapleFE/test/java/openjdk/Error.java diff --git a/src/MapleFE/test/openjdk/Error.java.result b/src/MapleFE/test/java/openjdk/Error.java.result similarity index 58% rename from src/MapleFE/test/openjdk/Error.java.result rename to src/MapleFE/test/java/openjdk/Error.java.result index 5dd4622ea7053655bf6e1c4dcc81bba7800f9fb3..51113c2cd922e448298aebd5378e04c5c9ec02d8 100644 --- a/src/MapleFE/test/openjdk/Error.java.result +++ b/src/MapleFE/test/java/openjdk/Error.java.result @@ -10,10 +10,10 @@ class Error Instance Initializer: Constructors: constructor Error() throws: - constructor Error() throws: - constructor Error() throws: - constructor Error() throws: - constructor Error() throws: + constructor Error(message) throws: + constructor Error(message,cause) throws: + constructor Error(cause) throws: + constructor Error(message,cause,enableSuppression,writableStackTrace) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/Exception.java b/src/MapleFE/test/java/openjdk/Exception.java similarity index 100% rename from src/MapleFE/test/openjdk/Exception.java rename to src/MapleFE/test/java/openjdk/Exception.java diff --git a/src/MapleFE/test/openjdk/Exception.java.result b/src/MapleFE/test/java/openjdk/Exception.java.result similarity index 47% rename from src/MapleFE/test/openjdk/Exception.java.result rename to src/MapleFE/test/java/openjdk/Exception.java.result index be76c0a91de7ca3202633ccc22099abc4729e97c..f47c721d29f22195f9da1f72a77ebe994d9999a6 100644 --- a/src/MapleFE/test/openjdk/Exception.java.result +++ b/src/MapleFE/test/java/openjdk/Exception.java.result @@ -1,20 +1,19 @@ Matched 5 tokens. -Matched 102 tokens. +Matched 101 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class Exception Fields: - serialVersionUID=Sub - -440081604 + serialVersionUID=440081604 Instance Initializer: Constructors: constructor Exception() throws: - constructor Exception() throws: - constructor Exception() throws: - constructor Exception() throws: - constructor Exception() throws: + constructor Exception(message) throws: + constructor Exception(message,cause) throws: + constructor Exception(cause) throws: + constructor Exception(message,cause,enableSuppression,writableStackTrace) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/ExceptionInInitializerError.java b/src/MapleFE/test/java/openjdk/ExceptionInInitializerError.java similarity index 100% rename from src/MapleFE/test/openjdk/ExceptionInInitializerError.java rename to src/MapleFE/test/java/openjdk/ExceptionInInitializerError.java diff --git a/src/MapleFE/test/openjdk/ExceptionInInitializerError.java.result b/src/MapleFE/test/java/openjdk/ExceptionInInitializerError.java.result similarity index 82% rename from src/MapleFE/test/openjdk/ExceptionInInitializerError.java.result rename to src/MapleFE/test/java/openjdk/ExceptionInInitializerError.java.result index cafc9f7c7352ee5c569011a0e8f5d1fa3b724cb6..23df8f7d4d6f1ebc4424ebe5708fe741390d7c04 100644 --- a/src/MapleFE/test/openjdk/ExceptionInInitializerError.java.result +++ b/src/MapleFE/test/java/openjdk/ExceptionInInitializerError.java.result @@ -11,10 +11,10 @@ class ExceptionInInitializerError Constructors: constructor ExceptionInInitializerError() throws: initCause(null) - constructor ExceptionInInitializerError() throws: + constructor ExceptionInInitializerError(thrown) throws: initCause(null) this.exception Assign thrown - constructor ExceptionInInitializerError() throws: + constructor ExceptionInInitializerError(s) throws: initCause(null) Methods: func getException() throws: diff --git a/src/MapleFE/test/openjdk/Float.java b/src/MapleFE/test/java/openjdk/Float.java similarity index 100% rename from src/MapleFE/test/openjdk/Float.java rename to src/MapleFE/test/java/openjdk/Float.java diff --git a/src/MapleFE/test/openjdk/Float.java.result b/src/MapleFE/test/java/openjdk/Float.java.result similarity index 54% rename from src/MapleFE/test/openjdk/Float.java.result rename to src/MapleFE/test/java/openjdk/Float.java.result index d1727db3e9bb575fcc88b0116c9f43d413c189b3..25f8d35f633b02d35b2f9e4cdabb6647e0e56b50 100644 --- a/src/MapleFE/test/openjdk/Float.java.result +++ b/src/MapleFE/test/java/openjdk/Float.java.result @@ -2,7 +2,7 @@ Matched 5 tokens. Matched 12 tokens. Matched 19 tokens. Matched 26 tokens. -Matched 848 tokens. +Matched 844 tokens. ============= Module =========== == Sub Tree == package java.lang @@ -15,39 +15,36 @@ import sun.misc.DoubleConsts == Sub Tree == class Float Fields: - POSITIVE_INFINITY= NEGATIVE_INFINITY= NaN= MAX_VALUE=3.40282e+38 MIN_NORMAL=1.17549e-38 MIN_VALUE=1.4013e-45 MAX_EXPONENT=127 MIN_EXPONENT=Sub - 126 SIZE=32 BYTES= TYPE=() value serialVersionUID=Sub - 616763156 + POSITIVE_INFINITY=1 Div 0 NEGATIVE_INFINITY=-1 Div 0 NaN=0 Div 0 MAX_VALUE=3.40282e+38 MIN_NORMAL=1.17549e-38 MIN_VALUE=1.4013e-45 MAX_EXPONENT=127 MIN_EXPONENT=-126 SIZE=32 BYTES=SIZE Div Byte.SIZE TYPE=()float.getComponentType() value serialVersionUID=-616763156 Instance Initializer: Constructors: - constructor Float() throws: + constructor Float(value) throws: this.value Assign value - constructor Float() throws: + constructor Float(value) throws: this.value Assign (float)value - constructor Float() throws: + constructor Float(s) throws: value Assign parseFloat(s) Methods: - func toString() throws: + func toString(f) throws: return FloatingDecimal.toJavaFormatString(f) - func toHexString() throws: + func toHexString(f) throws: cond-branch cond:Math.abs(f) LT FloatConsts.MIN_NORMAL Land f NE 0 true branch : - var:s=Double.toHexString(Math.scalb((double)f,DoubleConsts.MIN_EXPONENT Sub FloatConsts.MIN_EXPONENT)) + Decl: s=Double.toHexString(Math.scalb((double)f,DoubleConsts.MIN_EXPONENT Sub FloatConsts.MIN_EXPONENT)) return s.replaceFirst("p-1022$","p-126") false branch : return Double.toHexString(f) - - func valueOf() throws: NumberFormatException - return new Float - func valueOf() throws: - return new Float - func parseFloat() throws: NumberFormatException + func valueOf(s) throws: NumberFormatException + return new Float(parseFloat(s)) + func valueOf(f) throws: + return new Float(f) + func parseFloat(s) throws: NumberFormatException return FloatingDecimal.parseFloat(s) - func isNaN() throws: + func isNaN(v) throws: return (v NE v) - func isInfinite() throws: + func isInfinite(v) throws: return (v EQ POSITIVE_INFINITY) Lor (v EQ NEGATIVE_INFINITY) - func isFinite() throws: + func isFinite(f) throws: return Math.abs(f) LE FloatConsts.MAX_VALUE func isNaN() throws: return isNaN(value) @@ -69,42 +66,38 @@ class Float return (double)value func hashCode() throws: return Float.hashCode(value) - func hashCode() throws: + func hashCode(value) throws: return floatToIntBits(value) - func equals() throws: - return () Land (floatToIntBits((Float)obj.value) EQ floatToIntBits(value)) - func floatToIntBits() throws: - var:result=floatToRawIntBits(value) + func equals(obj) throws: + return (obj instanceof Float) Land (floatToIntBits((Float)obj.value) EQ floatToIntBits(value)) + func floatToIntBits(value) throws: + Decl: result=floatToRawIntBits(value) cond-branch cond:((result Band FloatConsts.EXP_BIT_MASK) EQ FloatConsts.EXP_BIT_MASK) Land (result Band FloatConsts.SIGNIF_BIT_MASK) NE 0 true branch : - result Assign 2143289344 - false branch : + result Assign 2143289344 false branch : return result - func floatToRawIntBits() throws: - func intBitsToFloat() throws: - func compareTo() throws: + func floatToRawIntBits(value) throws: + func intBitsToFloat(bits) throws: + func compareTo(anotherFloat) throws: return Float.compare(value,anotherFloat.value) - func compare() throws: + func compare(f1,f2) throws: cond-branch cond:f1 LT f2 true branch : - return Sub - 1 - false branch : + return -1 false branch : cond-branch cond:f1 GT f2 true branch : - return 1 - false branch : + return 1 false branch : - var:thisBits=Float.floatToIntBits(f1) - var:anotherBits=Float.floatToIntBits(f2) + Decl: thisBits=Float.floatToIntBits(f1) + Decl: anotherBits=Float.floatToIntBits(f2) return () - func sum() throws: + func sum(a,b) throws: return a Add b - func max() throws: + func max(a,b) throws: return Math.max(a,b) - func min() throws: + func min(a,b) throws: return Math.min(a,b) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/FunctionalInterface.java b/src/MapleFE/test/java/openjdk/FunctionalInterface.java similarity index 100% rename from src/MapleFE/test/openjdk/FunctionalInterface.java rename to src/MapleFE/test/java/openjdk/FunctionalInterface.java diff --git a/src/MapleFE/test/openjdk/FunctionalInterface.java.result b/src/MapleFE/test/java/openjdk/FunctionalInterface.java.result similarity index 100% rename from src/MapleFE/test/openjdk/FunctionalInterface.java.result rename to src/MapleFE/test/java/openjdk/FunctionalInterface.java.result diff --git a/src/MapleFE/test/openjdk/IllegalAccessError.java b/src/MapleFE/test/java/openjdk/IllegalAccessError.java similarity index 100% rename from src/MapleFE/test/openjdk/IllegalAccessError.java rename to src/MapleFE/test/java/openjdk/IllegalAccessError.java diff --git a/src/MapleFE/test/openjdk/IllegalAccessError.java.result b/src/MapleFE/test/java/openjdk/IllegalAccessError.java.result similarity index 72% rename from src/MapleFE/test/openjdk/IllegalAccessError.java.result rename to src/MapleFE/test/java/openjdk/IllegalAccessError.java.result index 901d3350b0a4bfbed762b29a13590ceb71be2bdc..ee810c6fa19c6471f03a7890f4fe6eb372772f8c 100644 --- a/src/MapleFE/test/openjdk/IllegalAccessError.java.result +++ b/src/MapleFE/test/java/openjdk/IllegalAccessError.java.result @@ -1,17 +1,16 @@ Matched 5 tokens. -Matched 44 tokens. +Matched 43 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class IllegalAccessError Fields: - serialVersionUID=Sub - -2047083421 + serialVersionUID=2047083421 Instance Initializer: Constructors: constructor IllegalAccessError() throws: - constructor IllegalAccessError() throws: + constructor IllegalAccessError(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/IllegalAccessException.java b/src/MapleFE/test/java/openjdk/IllegalAccessException.java similarity index 100% rename from src/MapleFE/test/openjdk/IllegalAccessException.java rename to src/MapleFE/test/java/openjdk/IllegalAccessException.java diff --git a/src/MapleFE/test/openjdk/IllegalAccessException.java.result b/src/MapleFE/test/java/openjdk/IllegalAccessException.java.result similarity index 86% rename from src/MapleFE/test/openjdk/IllegalAccessException.java.result rename to src/MapleFE/test/java/openjdk/IllegalAccessException.java.result index 77c6d9fc962f50bb6aa151c2d40484d8735d3355..0f015ed019a86553dfc8e3ba415a62f922910507 100644 --- a/src/MapleFE/test/openjdk/IllegalAccessException.java.result +++ b/src/MapleFE/test/java/openjdk/IllegalAccessException.java.result @@ -10,7 +10,7 @@ class IllegalAccessException Instance Initializer: Constructors: constructor IllegalAccessException() throws: - constructor IllegalAccessException() throws: + constructor IllegalAccessException(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/IllegalArgumentException.java b/src/MapleFE/test/java/openjdk/IllegalArgumentException.java similarity index 100% rename from src/MapleFE/test/openjdk/IllegalArgumentException.java rename to src/MapleFE/test/java/openjdk/IllegalArgumentException.java diff --git a/src/MapleFE/test/openjdk/IllegalArgumentException.java.result b/src/MapleFE/test/java/openjdk/IllegalArgumentException.java.result similarity index 55% rename from src/MapleFE/test/openjdk/IllegalArgumentException.java.result rename to src/MapleFE/test/java/openjdk/IllegalArgumentException.java.result index ab0c987eeb13c769b7c4f476ceefdba5a1adb147..4a49c834ef69e8256d44a33bc036570a35ac0077 100644 --- a/src/MapleFE/test/openjdk/IllegalArgumentException.java.result +++ b/src/MapleFE/test/java/openjdk/IllegalArgumentException.java.result @@ -1,19 +1,18 @@ Matched 5 tokens. -Matched 75 tokens. +Matched 74 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class IllegalArgumentException Fields: - serialVersionUID=Sub - -2103873468 + serialVersionUID=2103873468 Instance Initializer: Constructors: constructor IllegalArgumentException() throws: - constructor IllegalArgumentException() throws: - constructor IllegalArgumentException() throws: - constructor IllegalArgumentException() throws: + constructor IllegalArgumentException(s) throws: + constructor IllegalArgumentException(message,cause) throws: + constructor IllegalArgumentException(cause) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/IllegalMonitorStateException.java b/src/MapleFE/test/java/openjdk/IllegalMonitorStateException.java similarity index 100% rename from src/MapleFE/test/openjdk/IllegalMonitorStateException.java rename to src/MapleFE/test/java/openjdk/IllegalMonitorStateException.java diff --git a/src/MapleFE/test/openjdk/IllegalMonitorStateException.java.result b/src/MapleFE/test/java/openjdk/IllegalMonitorStateException.java.result similarity index 85% rename from src/MapleFE/test/openjdk/IllegalMonitorStateException.java.result rename to src/MapleFE/test/java/openjdk/IllegalMonitorStateException.java.result index 40ea298ea916adf631527d36b962e3c27db9b05e..95aafffda59dce3ef79436e4408a2b565d85e670 100644 --- a/src/MapleFE/test/openjdk/IllegalMonitorStateException.java.result +++ b/src/MapleFE/test/java/openjdk/IllegalMonitorStateException.java.result @@ -10,7 +10,7 @@ class IllegalMonitorStateException Instance Initializer: Constructors: constructor IllegalMonitorStateException() throws: - constructor IllegalMonitorStateException() throws: + constructor IllegalMonitorStateException(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/IllegalStateException.java b/src/MapleFE/test/java/openjdk/IllegalStateException.java similarity index 100% rename from src/MapleFE/test/openjdk/IllegalStateException.java rename to src/MapleFE/test/java/openjdk/IllegalStateException.java diff --git a/src/MapleFE/test/openjdk/IllegalStateException.java.result b/src/MapleFE/test/java/openjdk/IllegalStateException.java.result similarity index 55% rename from src/MapleFE/test/openjdk/IllegalStateException.java.result rename to src/MapleFE/test/java/openjdk/IllegalStateException.java.result index 2e476ee0c2acf14ed9bfa1399d5202d7cbc4e9fc..783d279016e888712683788de8a8642a415974ee 100644 --- a/src/MapleFE/test/openjdk/IllegalStateException.java.result +++ b/src/MapleFE/test/java/openjdk/IllegalStateException.java.result @@ -1,19 +1,18 @@ Matched 5 tokens. -Matched 74 tokens. +Matched 73 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class IllegalStateException Fields: - serialVersionUID=Sub - 1706626488 + serialVersionUID=-1706626488 Instance Initializer: Constructors: constructor IllegalStateException() throws: - constructor IllegalStateException() throws: - constructor IllegalStateException() throws: - constructor IllegalStateException() throws: + constructor IllegalStateException(s) throws: + constructor IllegalStateException(message,cause) throws: + constructor IllegalStateException(cause) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/IllegalThreadStateException.java b/src/MapleFE/test/java/openjdk/IllegalThreadStateException.java similarity index 100% rename from src/MapleFE/test/openjdk/IllegalThreadStateException.java rename to src/MapleFE/test/java/openjdk/IllegalThreadStateException.java diff --git a/src/MapleFE/test/openjdk/IllegalThreadStateException.java.result b/src/MapleFE/test/java/openjdk/IllegalThreadStateException.java.result similarity index 72% rename from src/MapleFE/test/openjdk/IllegalThreadStateException.java.result rename to src/MapleFE/test/java/openjdk/IllegalThreadStateException.java.result index e38f91928f4e02d5d561399a4f4146e84394e4c6..f6bb4fa7292cb4e106a708430caa3bd3d11b6fd6 100644 --- a/src/MapleFE/test/openjdk/IllegalThreadStateException.java.result +++ b/src/MapleFE/test/java/openjdk/IllegalThreadStateException.java.result @@ -1,17 +1,16 @@ Matched 5 tokens. -Matched 44 tokens. +Matched 43 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class IllegalThreadStateException Fields: - serialVersionUID=Sub - 944817870 + serialVersionUID=-944817870 Instance Initializer: Constructors: constructor IllegalThreadStateException() throws: - constructor IllegalThreadStateException() throws: + constructor IllegalThreadStateException(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/IncompatibleClassChangeError.java b/src/MapleFE/test/java/openjdk/IncompatibleClassChangeError.java similarity index 100% rename from src/MapleFE/test/openjdk/IncompatibleClassChangeError.java rename to src/MapleFE/test/java/openjdk/IncompatibleClassChangeError.java diff --git a/src/MapleFE/test/openjdk/IncompatibleClassChangeError.java.result b/src/MapleFE/test/java/openjdk/IncompatibleClassChangeError.java.result similarity index 72% rename from src/MapleFE/test/openjdk/IncompatibleClassChangeError.java.result rename to src/MapleFE/test/java/openjdk/IncompatibleClassChangeError.java.result index 25ca234a7efa329407ffe8986d32037f14a742c2..f60b99e0394340369de81923419ee8cf2b0be7d0 100644 --- a/src/MapleFE/test/openjdk/IncompatibleClassChangeError.java.result +++ b/src/MapleFE/test/java/openjdk/IncompatibleClassChangeError.java.result @@ -1,17 +1,16 @@ Matched 5 tokens. -Matched 44 tokens. +Matched 43 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class IncompatibleClassChangeError Fields: - serialVersionUID=Sub - 241186759 + serialVersionUID=-241186759 Instance Initializer: Constructors: constructor IncompatibleClassChangeError() throws: - constructor IncompatibleClassChangeError() throws: + constructor IncompatibleClassChangeError(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/IndexOutOfBoundsException.java b/src/MapleFE/test/java/openjdk/IndexOutOfBoundsException.java similarity index 100% rename from src/MapleFE/test/openjdk/IndexOutOfBoundsException.java rename to src/MapleFE/test/java/openjdk/IndexOutOfBoundsException.java diff --git a/src/MapleFE/test/openjdk/IndexOutOfBoundsException.java.result b/src/MapleFE/test/java/openjdk/IndexOutOfBoundsException.java.result similarity index 85% rename from src/MapleFE/test/openjdk/IndexOutOfBoundsException.java.result rename to src/MapleFE/test/java/openjdk/IndexOutOfBoundsException.java.result index a9a24df9ace72b094554679225d98135c255e81d..e6b7f17cecc4299420eacca954aea24cfafdde28 100644 --- a/src/MapleFE/test/openjdk/IndexOutOfBoundsException.java.result +++ b/src/MapleFE/test/java/openjdk/IndexOutOfBoundsException.java.result @@ -10,7 +10,7 @@ class IndexOutOfBoundsException Instance Initializer: Constructors: constructor IndexOutOfBoundsException() throws: - constructor IndexOutOfBoundsException() throws: + constructor IndexOutOfBoundsException(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/InheritableThreadLocal.java b/src/MapleFE/test/java/openjdk/InheritableThreadLocal.java similarity index 100% rename from src/MapleFE/test/openjdk/InheritableThreadLocal.java rename to src/MapleFE/test/java/openjdk/InheritableThreadLocal.java diff --git a/src/MapleFE/test/openjdk/InheritableThreadLocal.java.result b/src/MapleFE/test/java/openjdk/InheritableThreadLocal.java.result similarity index 67% rename from src/MapleFE/test/openjdk/InheritableThreadLocal.java.result rename to src/MapleFE/test/java/openjdk/InheritableThreadLocal.java.result index 0b33682f73eef1c10daa3f17b985c1ae414280df..bc6d651cb6cdcaf8b1e8026f564c4f67cb7a9bf4 100644 --- a/src/MapleFE/test/openjdk/InheritableThreadLocal.java.result +++ b/src/MapleFE/test/java/openjdk/InheritableThreadLocal.java.result @@ -13,12 +13,12 @@ class InheritableThreadLocal Instance Initializer: Constructors: Methods: - func childValue() throws: + func childValue(parentValue) throws: return parentValue - func getMap() throws: + func getMap(t) throws: return t.inheritableThreadLocals - func createMap() throws: - t.inheritableThreadLocals Assign new ThreadLocalMap + func createMap(t,firstValue) throws: + t.inheritableThreadLocals Assign new ThreadLocalMap(this,firstValue) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/InstantiationError.java b/src/MapleFE/test/java/openjdk/InstantiationError.java similarity index 100% rename from src/MapleFE/test/openjdk/InstantiationError.java rename to src/MapleFE/test/java/openjdk/InstantiationError.java diff --git a/src/MapleFE/test/openjdk/InstantiationError.java.result b/src/MapleFE/test/java/openjdk/InstantiationError.java.result similarity index 72% rename from src/MapleFE/test/openjdk/InstantiationError.java.result rename to src/MapleFE/test/java/openjdk/InstantiationError.java.result index f0d4db7ccc4b17645252db9359af8d444201ecf3..7964f147d622897c77e3113656d4a2a0d65d86be 100644 --- a/src/MapleFE/test/openjdk/InstantiationError.java.result +++ b/src/MapleFE/test/java/openjdk/InstantiationError.java.result @@ -1,17 +1,16 @@ Matched 5 tokens. -Matched 44 tokens. +Matched 43 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class InstantiationError Fields: - serialVersionUID=Sub - 522274964 + serialVersionUID=-522274964 Instance Initializer: Constructors: constructor InstantiationError() throws: - constructor InstantiationError() throws: + constructor InstantiationError(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/InstantiationException.java b/src/MapleFE/test/java/openjdk/InstantiationException.java similarity index 100% rename from src/MapleFE/test/openjdk/InstantiationException.java rename to src/MapleFE/test/java/openjdk/InstantiationException.java diff --git a/src/MapleFE/test/openjdk/InstantiationException.java.result b/src/MapleFE/test/java/openjdk/InstantiationException.java.result similarity index 72% rename from src/MapleFE/test/openjdk/InstantiationException.java.result rename to src/MapleFE/test/java/openjdk/InstantiationException.java.result index 318389f21526ffc3a14de3431ebfe07c80245d46..88f6bce0e263177027356f7d18a53bfcb6822430 100644 --- a/src/MapleFE/test/openjdk/InstantiationException.java.result +++ b/src/MapleFE/test/java/openjdk/InstantiationException.java.result @@ -1,17 +1,16 @@ Matched 5 tokens. -Matched 44 tokens. +Matched 43 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class InstantiationException Fields: - serialVersionUID=Sub - -1305838986 + serialVersionUID=1305838986 Instance Initializer: Constructors: constructor InstantiationException() throws: - constructor InstantiationException() throws: + constructor InstantiationException(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/Integer.java b/src/MapleFE/test/java/openjdk/Integer.java similarity index 100% rename from src/MapleFE/test/openjdk/Integer.java rename to src/MapleFE/test/java/openjdk/Integer.java diff --git a/src/MapleFE/test/openjdk/Integer.java.result b/src/MapleFE/test/java/openjdk/Integer.java.result similarity index 47% rename from src/MapleFE/test/openjdk/Integer.java.result rename to src/MapleFE/test/java/openjdk/Integer.java.result index 99b7f591bf6b48af36ed1562f11c662a67264f8b..a5effda89d9f5254c3e76dd7ecc9f00f776e0f64 100644 --- a/src/MapleFE/test/openjdk/Integer.java.result +++ b/src/MapleFE/test/java/openjdk/Integer.java.result @@ -1,8 +1,6 @@ Matched 5 tokens. Matched 14 tokens. -Matched 3758 tokens. -Convert unary --> binary -Convert unary --> binary +Matched 3756 tokens. ============= Module =========== == Sub Tree == package java.lang @@ -11,103 +9,123 @@ import java.lang.annotation.Native == Sub Tree == class Integer Fields: - MIN_VALUE=-2147483648 MAX_VALUE=2147483647 TYPE=() digits= SMALL_NEG_VALUES= SMALL_NONNEG_VALUES= DigitTens= DigitOnes= sizeTable= value SIZE=32 BYTES= serialVersionUID=-142506184 + MIN_VALUE=-2147483648 MAX_VALUE=2147483647 TYPE=()int.getComponentType() digits= SMALL_NEG_VALUES= SMALL_NONNEG_VALUES= DigitTens= DigitOnes= sizeTable= value SIZE=32 BYTES=SIZE Div Byte.SIZE serialVersionUID=-142506184 Instance Initializer: Constructors: - constructor Integer() throws: + constructor Integer(value) throws: this.value Assign value - constructor Integer() throws: + constructor Integer(s) throws: this.value Assign parseInt(s,10) Methods: - func toString() throws: + func toString(i,radix) throws: cond-branch cond:radix LT Character.MIN_RADIX Lor radix GT Character.MAX_RADIX true branch : - radix Assign 10 - false branch : + radix Assign 10 false branch : cond-branch cond:radix EQ 10 true branch : return toString(i) false branch : - var:buf=[] - var:negative=(i LT 0) - var:charPos=32 + Decl: buf=[] + Decl: negative=(i LT 0) + Decl: charPos=32 cond-branch cond:negative true branch : - i Assign Sub - i + i Assign Minus i false branch : - while i LE Sub - radix Assign digits Sub () - i Assign + while i LE Minus radix Assign + i Assign i Div radix - Assign digits Sub i + Assign cond-branch cond:negative true branch : Assign - false branch : - return new String - func toUnsignedString() throws: + return new String(buf,charPos,(33 Sub charPos)) + func toUnsignedString(i,radix) throws: return Long.toUnsignedString(toUnsignedLong(i),radix) - func toHexString() throws: + func toHexString(i) throws: return toUnsignedString0(i,4) - func toOctalString() throws: + func toOctalString(i) throws: return toUnsignedString0(i,3) - func toBinaryString() throws: + func toBinaryString(i) throws: return toUnsignedString0(i,1) - func toUnsignedString0() throws: - var:mag=Integer.SIZE Sub Integer.numberOfLeadingZeros(val) - var:chars=Math.max(((mag Add (shift Sub 1))shift)1) - var:buf= + func toUnsignedString0(val,shift) throws: + Decl: mag=Integer.SIZE Sub Integer.numberOfLeadingZeros(val) + Decl: chars=Math.max(((mag Add (shift Sub 1)) Div shift)1) + Decl: buf= formatUnsignedInt(val,shift,buf,0,chars) - return new String - func formatUnsignedInt() throws: - var:charPos=len - var:radix=1 Shl shift - var:mask=radix Sub 1 + return new String(buf) + func formatUnsignedInt(val,shift,buf,offset,len) throws: + Decl: charPos=len + Decl: radix=1 Shl shift + Decl: mask=radix Sub 1 do Assign val ZextAssign shift while val NE 0 Land charPos GT 0 return charPos - func toString() throws: + func toString(i) throws: cond-branch cond:i EQ Integer.MIN_VALUE true branch : - return "-2147483648" - false branch : + return "-2147483648" false branch : - var:negative=i LT 0 - var:small= + Decl: negative=i LT 0 + Decl: small= cond-branch cond:small true branch : - var:smallValues= + Decl: smallValues= + cond-branch cond:negative + true branch : + i Assign Minus i + cond-branch cond: EQ null + true branch : + Assign + false branch : + false branch : + cond-branch cond: EQ null + true branch : + Assign + false branch : + + + return false branch : - var:size= - var:buf= + Decl: size= + Decl: buf= getChars(i,size,buf) - return new String - func toUnsignedString() throws: + return new String(buf) + func toUnsignedString(i) throws: return Long.toString(toUnsignedLong(i)) - func getChars() throws: - var:q,r - var:charPos=index - var:sign=0 + func getChars(i,index,buf) throws: + Decl: q,r + Decl: charPos=index + Decl: sign=0 cond-branch cond:i LT 0 true branch : sign Assign - - i Assign Sub - i + i Assign Minus i false branch : - while i GE 65536 q Assign - + while i GE 65536 q Assign i Div 100 + r Assign i Sub ((q Shl 6) Add (q Shl 5) Add (q Shl 2)) + i Assign q + Assign + Assign for ( ) - q Assign () Zext (16 Add 3) + q Assign (i Mul 52429) Zext (16 Add 3) + r Assign i Sub ((q Shl 3) Add (q Shl 1)) + Assign + i Assign q + cond-branch cond:i EQ 0 + true branch : + break: + false branch : cond-branch cond:sign NE 0 @@ -115,90 +133,125 @@ while val NE 0 Land charPos GT 0 Assign sign false branch : - func stringSize() throws: + func stringSize(x) throws: for ( ) cond-branch cond:x LE true branch : - return i Add 1 - false branch : - + return i Add 1 false branch : - func parseInt() throws: NumberFormatException + func parseInt(s,radix) throws: NumberFormatException cond-branch cond:s EQ null true branch : - new NumberFormatException + new NumberFormatException("s == null") false branch : cond-branch cond:radix LT Character.MIN_RADIX true branch : - new NumberFormatException + new NumberFormatException("radix " Add radix Add " less than Character.MIN_RADIX") false branch : cond-branch cond:radix GT Character.MAX_RADIX true branch : - new NumberFormatException + new NumberFormatException("radix " Add radix Add " greater than Character.MAX_RADIX") false branch : - var:result=0 - var:negative=false - var:i=0,len=s.length() - var:limit=Sub - Integer.MAX_VALUE - var:multmin - var:digit + Decl: result=0 + Decl: negative=false + Decl: i=0,len=s.length() + Decl: limit=Minus Integer.MAX_VALUE + Decl: multmin + Decl: digit cond-branch cond:len GT 0 true branch : - var:firstChar=s.charAt(0) + Decl: firstChar=s.charAt(0) + cond-branch cond:firstChar LT 0 + true branch : + cond-branch cond:firstChar EQ - + true branch : + negative Assign true + limit Assign Integer.MIN_VALUE + false branch : + cond-branch cond:firstChar NE + + true branch : + NumberFormatException.forInputString(s) false branch : + + cond-branch cond:len EQ 1 + true branch : + NumberFormatException.forInputString(s) false branch : + + i Inc + + false branch : + + multmin Assign limit Div radix + while i LT len digit Assign Character.digit(s.charAt(i Inc +),radix) + cond-branch cond:digit LT 0 + true branch : + NumberFormatException.forInputString(s) + false branch : + + cond-branch cond:result LT multmin + true branch : + NumberFormatException.forInputString(s) + false branch : + + result MulAssign radix + cond-branch cond:result LT limit Add digit + true branch : + NumberFormatException.forInputString(s) + false branch : + + result SubAssign digit false branch : NumberFormatException.forInputString(s) return - func parseInt() throws: NumberFormatException + func parseInt(s) throws: NumberFormatException return parseInt(s,10) - func parseUnsignedInt() throws: NumberFormatException + func parseUnsignedInt(s,radix) throws: NumberFormatException cond-branch cond:s EQ null true branch : - new NumberFormatException + new NumberFormatException("null") false branch : - var:len=s.length() + Decl: len=s.length() cond-branch cond:len GT 0 true branch : - var:firstChar=s.charAt(0) + Decl: firstChar=s.charAt(0) cond-branch cond:firstChar EQ - true branch : - new NumberFormatException + new NumberFormatException(String.format("Illegal leading minus sign " Add "on unsigned string %s.",s)) false branch : cond-branch cond:len LE 5 Lor (radix EQ 10 Land len LE 9) true branch : return parseInt(s,radix) false branch : - var:ell=Long.parseLong(s,radix) + Decl: ell=Long.parseLong(s,radix) cond-branch cond:(ell Band 0) EQ 0 true branch : return (int)ell false branch : - new NumberFormatException + new NumberFormatException(String.format("String value %s exceeds " Add "range of unsigned int.",s)) false branch : NumberFormatException.forInputString(s) - func parseUnsignedInt() throws: NumberFormatException + func parseUnsignedInt(s) throws: NumberFormatException return parseUnsignedInt(s,10) - func valueOf() throws: NumberFormatException + func valueOf(s,radix) throws: NumberFormatException return Integer.valueOf(parseInt(s,radix)) - func valueOf() throws: NumberFormatException + func valueOf(s) throws: NumberFormatException return Integer.valueOf(parseInt(s,10)) - func valueOf() throws: + func valueOf(i) throws: cond-branch cond:i GE IntegerCache.low Land i LE IntegerCache.high true branch : - return - false branch : + return false branch : - return new Integer + return new Integer(i) func byteValue() throws: return (byte)value func shortValue() throws: @@ -215,57 +268,59 @@ while val NE 0 Land charPos GT 0 return toString(value) func hashCode() throws: return Integer.hashCode(value) - func hashCode() throws: + func hashCode(value) throws: return value - func equals() throws: - cond-branch cond: + func equals(obj) throws: + cond-branch cond:obj instanceof Integer true branch : - return value EQ ((Integer)obj)intValue + return value EQ (Integer)obj.intValue() false branch : return false - func getInteger() throws: + func getInteger(nm) throws: return getInteger(nm,null) - func getInteger() throws: - var:result=getInteger(nm,null) + func getInteger(nm,val) throws: + Decl: result=getInteger(nm,null) return - func getInteger() throws: - var:v=null + func getInteger(nm,val) throws: + Decl: v=null v Assign System.getProperty(nm) + IllegalArgumentException + NullPointerException + e cond-branch cond:v NE null true branch : return Integer.decode(v) + NumberFormatException + e false branch : return val - func decode() throws: NumberFormatException - var:radix=10 - var:index=0 - var:negative=false - var:result + func decode(nm) throws: NumberFormatException + Decl: radix=10 + Decl: index=0 + Decl: negative=false + Decl: result cond-branch cond:nm.length() EQ 0 true branch : - new NumberFormatException - false branch : + new NumberFormatException("Zero length string") false branch : - var:firstChar=nm.charAt(0) + Decl: firstChar=nm.charAt(0) cond-branch cond:firstChar EQ - true branch : negative Assign true - indexInc + index Inc false branch : cond-branch cond:firstChar EQ + true branch : - indexInc - + index Inc false branch : - cond-branch cond:nm.startsWith("0x",index) Lor nm.startsWith("0X",index) true branch : index AddAssign 2 @@ -273,55 +328,57 @@ while val NE 0 Land charPos GT 0 false branch : cond-branch cond:nm.startsWith("#",index) true branch : - indexInc + index Inc radix Assign 16 false branch : cond-branch cond:nm.startsWith("0",index) Land nm.length() GT 1 Add index true branch : - indexInc + index Inc radix Assign 8 false branch : - - cond-branch cond:nm.startsWith("-",index) Lor nm.startsWith("+",index) true branch : - new NumberFormatException - false branch : + new NumberFormatException("Sign character in wrong position") false branch : + result Assign Integer.valueOf(nm.substring(index),radix) + result Assign + + NumberFormatException + e + Decl: constant= + result Assign Integer.valueOf(constant,radix) return result - func compareTo() throws: + func compareTo(anotherInteger) throws: return compare(this.value,anotherInteger.value) - func compare() throws: + func compare(x,y) throws: return - func compareUnsigned() throws: + func compareUnsigned(x,y) throws: return compare(x Add MIN_VALUE,y Add MIN_VALUE) - func toUnsignedLong() throws: + func toUnsignedLong(x) throws: return ((long)x) Band -1 - func divideUnsigned() throws: - return (int)() - func remainderUnsigned() throws: - return (int)() - func highestOneBit() throws: + func divideUnsigned(dividend,divisor) throws: + return (int)(toUnsignedLong(dividend) Div toUnsignedLong(divisor)) + func remainderUnsigned(dividend,divisor) throws: + return (int)(toUnsignedLong(dividend) Mod toUnsignedLong(divisor)) + func highestOneBit(i) throws: i BorAssign (i Shr 1) i BorAssign (i Shr 2) i BorAssign (i Shr 4) i BorAssign (i Shr 8) i BorAssign (i Shr 16) return i Sub (i Zext 1) - func lowestOneBit() throws: - return i Band Sub - i - func numberOfLeadingZeros() throws: + func lowestOneBit(i) throws: + return i Band Minus i + func numberOfLeadingZeros(i) throws: cond-branch cond:i EQ 0 true branch : - return 32 - false branch : + return 32 false branch : - var:n=1 + Decl: n=1 cond-branch cond:i Zext 16 EQ 0 true branch : n AddAssign 16 @@ -348,14 +405,13 @@ while val NE 0 Land charPos GT 0 n SubAssign i Zext 31 return n - func numberOfTrailingZeros() throws: - var:y + func numberOfTrailingZeros(i) throws: + Decl: y cond-branch cond:i EQ 0 true branch : - return 32 - false branch : + return 32 false branch : - var:n=31 + Decl: n=31 y Assign i Shl 16 cond-branch cond:y NE 0 true branch : @@ -385,41 +441,37 @@ while val NE 0 Land charPos GT 0 false branch : return n Sub ((i Shl 1) Zext 31) - func bitCount() throws: + func bitCount(i) throws: i Assign i Sub ((i Zext 1) Band 1431655765) i Assign (i Band 858993459) Add ((i Zext 2) Band 858993459) i Assign (i Add (i Zext 4)) Band 252645135 i Assign i Add (i Zext 8) i Assign i Add (i Zext 16) return i Band 63 - func rotateLeft() throws: - return (i Shl distance) Bor (i Zext Sub - distance) - func rotateRight() throws: - return (i Zext distance) Bor (i Shl Sub - distance) - func reverse() throws: + func rotateLeft(i,distance) throws: + return (i Shl distance) Bor (i Zext Minus distance) + func rotateRight(i,distance) throws: + return (i Zext distance) Bor (i Shl Minus distance) + func reverse(i) throws: i Assign (i Band 1431655765) Shl 1 Bor (i Zext 1) Band 1431655765 i Assign (i Band 858993459) Shl 2 Bor (i Zext 2) Band 858993459 i Assign (i Band 252645135) Shl 4 Bor (i Zext 4) Band 252645135 i Assign (i Shl 24) Bor ((i Band 65280) Shl 8) Bor ((i Zext 8) Band 65280) Bor (i Zext 24) return i - func signum() throws: - return (i Shr 31) Bor (Sub - i Zext 31) - func reverseBytes() throws: + func signum(i) throws: + return (i Shr 31) Bor (Minus i Zext 31) + func reverseBytes(i) throws: return ((i Zext 24)) Bor ((i Shr 8) Band 65280) Bor ((i Shl 8) Band 16711680) Bor ((i Shl 24)) - func sum() throws: + func sum(a,b) throws: return a Add b - func max() throws: + func max(a,b) throws: return Math.max(a,b) - func min() throws: + func min(a,b) throws: return Math.min(a,b) LocalClasses: class IntegerCache Fields: - low=Sub - 128 high cache[] + low=-128 high cache[] Instance Initializer: InstInit- 0 Constructors: @@ -429,3 +481,17 @@ while val NE 0 Land charPos GT 0 LocalInterfaces: LocalInterfaces: +Identifier:v has no decl. +Identifier:IllegalArgumentException has no decl. +Identifier:NullPointerException has no decl. +Identifier:e has no decl. +Identifier:result has no decl. +Identifier:result has no decl. +Identifier:NumberFormatException has no decl. +Identifier:e has no decl. +Identifier:result has no decl. +Identifier:n has no decl. +Identifier:y has no decl. +Identifier:y has no decl. +Identifier:y has no decl. +Identifier:y has no decl. diff --git a/src/MapleFE/test/openjdk/InternalError.java b/src/MapleFE/test/java/openjdk/InternalError.java similarity index 100% rename from src/MapleFE/test/openjdk/InternalError.java rename to src/MapleFE/test/java/openjdk/InternalError.java diff --git a/src/MapleFE/test/openjdk/InternalError.java.result b/src/MapleFE/test/java/openjdk/InternalError.java.result similarity index 56% rename from src/MapleFE/test/openjdk/InternalError.java.result rename to src/MapleFE/test/java/openjdk/InternalError.java.result index aa5aa18b05260e49ae3bd60fbf7b7c82c21a1211..52e5fe44bddeaf366d900e102b77ce79752153bf 100644 --- a/src/MapleFE/test/openjdk/InternalError.java.result +++ b/src/MapleFE/test/java/openjdk/InternalError.java.result @@ -1,19 +1,18 @@ Matched 5 tokens. -Matched 75 tokens. +Matched 74 tokens. ============= Module =========== == Sub Tree == package java.lang == Sub Tree == class InternalError Fields: - serialVersionUID=Sub - 1073036797 + serialVersionUID=-1073036797 Instance Initializer: Constructors: constructor InternalError() throws: - constructor InternalError() throws: - constructor InternalError() throws: - constructor InternalError() throws: + constructor InternalError(message) throws: + constructor InternalError(message,cause) throws: + constructor InternalError(cause) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/InterruptedException.java b/src/MapleFE/test/java/openjdk/InterruptedException.java similarity index 100% rename from src/MapleFE/test/openjdk/InterruptedException.java rename to src/MapleFE/test/java/openjdk/InterruptedException.java diff --git a/src/MapleFE/test/openjdk/InterruptedException.java.result b/src/MapleFE/test/java/openjdk/InterruptedException.java.result similarity index 86% rename from src/MapleFE/test/openjdk/InterruptedException.java.result rename to src/MapleFE/test/java/openjdk/InterruptedException.java.result index bc6668075dd302f708c1d2663feb797dd1dae816..4b5a4410f404b13956d2eb630afa274728926e7a 100644 --- a/src/MapleFE/test/openjdk/InterruptedException.java.result +++ b/src/MapleFE/test/java/openjdk/InterruptedException.java.result @@ -10,7 +10,7 @@ class InterruptedException Instance Initializer: Constructors: constructor InterruptedException() throws: - constructor InterruptedException() throws: + constructor InterruptedException(s) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/Iterable.java b/src/MapleFE/test/java/openjdk/Iterable.java similarity index 100% rename from src/MapleFE/test/openjdk/Iterable.java rename to src/MapleFE/test/java/openjdk/Iterable.java diff --git a/src/MapleFE/test/openjdk/Iterable.java.result b/src/MapleFE/test/java/openjdk/Iterable.java.result similarity index 95% rename from src/MapleFE/test/openjdk/Iterable.java.result rename to src/MapleFE/test/java/openjdk/Iterable.java.result index 712993fa4593bdbc490a20d453e69e13494d2fe8..79077a8567f50708cb59801ee147234b37165d22 100644 --- a/src/MapleFE/test/openjdk/Iterable.java.result +++ b/src/MapleFE/test/java/openjdk/Iterable.java.result @@ -24,7 +24,7 @@ interface Iterable Methods: func iterator() throws: - func forEach() throws: + func forEach(action) throws: Objects.requireNonNull(action) T t diff --git a/src/MapleFE/test/openjdk/JavaLangAccess.java b/src/MapleFE/test/java/openjdk/JavaLangAccess.java similarity index 100% rename from src/MapleFE/test/openjdk/JavaLangAccess.java rename to src/MapleFE/test/java/openjdk/JavaLangAccess.java diff --git a/src/MapleFE/test/openjdk/JavaLangAccess.java.result b/src/MapleFE/test/java/openjdk/JavaLangAccess.java.result similarity index 86% rename from src/MapleFE/test/openjdk/JavaLangAccess.java.result rename to src/MapleFE/test/java/openjdk/JavaLangAccess.java.result index 48ac6364014e58a1577560ba18036244a7e84c6e..df7b84bd64b7f464a202153cc11ab302c5b79a28 100644 --- a/src/MapleFE/test/openjdk/JavaLangAccess.java.result +++ b/src/MapleFE/test/java/openjdk/JavaLangAccess.java.result @@ -11,7 +11,7 @@ class JavaLangAccess Constructors: constructor JavaLangAccess() throws: Methods: - func getEnumConstantsShared() throws: + func getEnumConstantsShared(klass) throws: return klass.getEnumConstantsShared() LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/LinkageError.java b/src/MapleFE/test/java/openjdk/LinkageError.java similarity index 100% rename from src/MapleFE/test/openjdk/LinkageError.java rename to src/MapleFE/test/java/openjdk/LinkageError.java diff --git a/src/MapleFE/test/openjdk/LinkageError.java.result b/src/MapleFE/test/java/openjdk/LinkageError.java.result similarity index 77% rename from src/MapleFE/test/openjdk/LinkageError.java.result rename to src/MapleFE/test/java/openjdk/LinkageError.java.result index b6b70d10cc091620ed09dba39f5ee9c613776f18..25e4a97982edf6748707966b4502f0e63b60f305 100644 --- a/src/MapleFE/test/openjdk/LinkageError.java.result +++ b/src/MapleFE/test/java/openjdk/LinkageError.java.result @@ -10,8 +10,8 @@ class LinkageError Instance Initializer: Constructors: constructor LinkageError() throws: - constructor LinkageError() throws: - constructor LinkageError() throws: + constructor LinkageError(s) throws: + constructor LinkageError(s,cause) throws: Methods: LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/openjdk/Long.java b/src/MapleFE/test/java/openjdk/Long.java similarity index 100% rename from src/MapleFE/test/openjdk/Long.java rename to src/MapleFE/test/java/openjdk/Long.java diff --git a/src/MapleFE/test/openjdk/Long.java.result b/src/MapleFE/test/java/openjdk/Long.java.result similarity index 47% rename from src/MapleFE/test/openjdk/Long.java.result rename to src/MapleFE/test/java/openjdk/Long.java.result index bfd8e31deed5cc374eed04142ec59f642f62aef4..883f0af4538bc7c6061d3d85e876a8a0d797c913 100644 --- a/src/MapleFE/test/openjdk/Long.java.result +++ b/src/MapleFE/test/java/openjdk/Long.java.result @@ -1,7 +1,7 @@ Matched 5 tokens. Matched 14 tokens. Matched 21 tokens. -Matched 3482 tokens. +Matched 3480 tokens. ============= Module =========== == Sub Tree == package java.lang @@ -12,37 +12,33 @@ import java.math == Sub Tree == class Long Fields: - MIN_VALUE=0 MAX_VALUE=-1 TYPE=() value SIZE=64 BYTES= serialVersionUID=-863034401 + MIN_VALUE=0 MAX_VALUE=-1 TYPE=()long.getComponentType() value SIZE=64 BYTES=SIZE Div Byte.SIZE serialVersionUID=-863034401 Instance Initializer: Constructors: - constructor Long() throws: + constructor Long(value) throws: this.value Assign value - constructor Long() throws: + constructor Long(s) throws: this.value Assign parseLong(s,10) Methods: - func toString() throws: + func toString(i,radix) throws: cond-branch cond:radix LT Character.MIN_RADIX Lor radix GT Character.MAX_RADIX true branch : - radix Assign 10 - false branch : + radix Assign 10 false branch : cond-branch cond:radix EQ 10 true branch : - return toString(i) - false branch : + return toString(i) false branch : - var:buf= - var:charPos=64 - var:negative=(i LT 0) + Decl: buf= + Decl: charPos=64 + Decl: negative=(i LT 0) cond-branch cond:negative true branch : - i Assign Sub - i + i Assign Minus i false branch : - while i LE Sub - radix Assign - i Assign + while i LE Minus radix Assign + i Assign i Div radix Assign cond-branch cond:negative @@ -50,77 +46,87 @@ class Long Assign - false branch : - return new String - func toUnsignedString() throws: + return new String(buf,charPos,(65 Sub charPos)) + func toUnsignedString(i,radix) throws: cond-branch cond:i GE 0 true branch : - return toString(i,radix) - false branch : + return toString(i,radix) false branch : A switch - func toUnsignedBigInteger() throws: + func toUnsignedBigInteger(i) throws: cond-branch cond:i GE 0 true branch : - return BigInteger.valueOf(i) - false branch : - var:upper=(int)(i Zext 32) - + return BigInteger.valueOf(i) false branch : + Decl: upper=(int)(i Zext 32) + Decl: lower=(int)i + return BigInteger.valueOf(Integer.toUnsignedLong(upper)).shiftLeft(32).add(BigInteger.valueOf(Integer.toUnsignedLong(lower))) - func toHexString() throws: + func toHexString(i) throws: return toUnsignedString0(i,4) - func toOctalString() throws: + func toOctalString(i) throws: return toUnsignedString0(i,3) - func toBinaryString() throws: + func toBinaryString(i) throws: return toUnsignedString0(i,1) - func toUnsignedString0() throws: - var:mag=Long.SIZE Sub Long.numberOfLeadingZeros(val) - var:chars=Math.max(((mag Add (shift Sub 1))shift)1) - var:buf= + func toUnsignedString0(val,shift) throws: + Decl: mag=Long.SIZE Sub Long.numberOfLeadingZeros(val) + Decl: chars=Math.max(((mag Add (shift Sub 1)) Div shift)1) + Decl: buf= formatUnsignedLong(val,shift,buf,0,chars) - return new String - func formatUnsignedLong() throws: - var:charPos=len - var:radix=1 Shl shift - var:mask=radix Sub 1 + return new String(buf) + func formatUnsignedLong(val,shift,buf,offset,len) throws: + Decl: charPos=len + Decl: radix=1 Shl shift + Decl: mask=radix Sub 1 do Assign val ZextAssign shift while val NE 0 Land charPos GT 0 return charPos - func toString() throws: + func toString(i) throws: cond-branch cond:i EQ Long.MIN_VALUE true branch : - return "-9223372036854775808" - false branch : + return "-9223372036854775808" false branch : - var:size= - var:buf= + Decl: size= + Decl: buf= getChars(i,size,buf) - return new String - func toUnsignedString() throws: + return new String(buf) + func toUnsignedString(i) throws: return toUnsignedString(i,10) - func getChars() throws: - var:q - var:r - var:charPos=index - var:sign=0 + func getChars(i,index,buf) throws: + Decl: q + Decl: r + Decl: charPos=index + Decl: sign=0 cond-branch cond:i LT 0 true branch : sign Assign - - i Assign Sub - i + i Assign Minus i false branch : - while i GT Integer.MAX_VALUE q Assign - - - var:q2 - var:i2=(int)i - while i2 GE 65536 q2 Assign + while i GT Integer.MAX_VALUE q Assign i Div 100 + r Assign (int)(i Sub ((q Shl 6) Add (q Shl 5) Add (q Shl 2))) + i Assign q + Assign + Assign + Decl: q2 + Decl: i2=(int)i + while i2 GE 65536 q2 Assign i2 Div 100 + r Assign i2 Sub ((q2 Shl 6) Add (q2 Shl 5) Add (q2 Shl 2)) + i2 Assign q2 + Assign + Assign for ( ) - q2 Assign () Zext (16 Add 3) + q2 Assign (i2 Mul 52429) Zext (16 Add 3) + r Assign i2 Sub ((q2 Shl 3) Add (q2 Shl 1)) + Assign + i2 Assign q2 + cond-branch cond:i2 EQ 0 + true branch : + break: + false branch : cond-branch cond:sign NE 0 @@ -128,113 +134,159 @@ while val NE 0 Land charPos GT 0 Assign sign false branch : - func stringSize() throws: - var:p=10 + func stringSize(x) throws: + Decl: p=10 for ( ) cond-branch cond:x LT p true branch : - return i - false branch : + return i false branch : - p Assign + p Assign 10 Mul p return 19 - func parseLong() throws: NumberFormatException + func parseLong(s,radix) throws: NumberFormatException cond-branch cond:s EQ null true branch : - new NumberFormatException + new NumberFormatException("null") false branch : cond-branch cond:radix LT Character.MIN_RADIX true branch : - new NumberFormatException + new NumberFormatException("radix " Add radix Add " less than Character.MIN_RADIX") false branch : cond-branch cond:radix GT Character.MAX_RADIX true branch : - new NumberFormatException + new NumberFormatException("radix " Add radix Add " greater than Character.MAX_RADIX") false branch : - var:result=0 - var:negative=false - var:i=0,len=s.length() - var:limit=Sub - Long.MAX_VALUE - var:multmin - var:digit + Decl: result=0 + Decl: negative=false + Decl: i=0,len=s.length() + Decl: limit=Minus Long.MAX_VALUE + Decl: multmin + Decl: digit cond-branch cond:len GT 0 true branch : - var:firstChar=s.charAt(0) + Decl: firstChar=s.charAt(0) + cond-branch cond:firstChar LT 0 + true branch : + cond-branch cond:firstChar EQ - + true branch : + negative Assign true + limit Assign Long.MIN_VALUE + false branch : + cond-branch cond:firstChar NE + + true branch : + NumberFormatException.forInputString(s) false branch : + + cond-branch cond:len EQ 1 + true branch : + NumberFormatException.forInputString(s) false branch : + + i Inc + + false branch : + + multmin Assign limit Div radix + while i LT len digit Assign Character.digit(s.charAt(i Inc +),radix) + cond-branch cond:digit LT 0 + true branch : + NumberFormatException.forInputString(s) + false branch : + + cond-branch cond:result LT multmin + true branch : + NumberFormatException.forInputString(s) + false branch : + + result MulAssign radix + cond-branch cond:result LT limit Add digit + true branch : + NumberFormatException.forInputString(s) + false branch : + + result SubAssign digit false branch : NumberFormatException.forInputString(s) return - func parseLong() throws: NumberFormatException + func parseLong(s) throws: NumberFormatException return parseLong(s,10) - func parseUnsignedLong() throws: NumberFormatException + func parseUnsignedLong(s,radix) throws: NumberFormatException cond-branch cond:s EQ null true branch : - new NumberFormatException + new NumberFormatException("null") false branch : - var:len=s.length() + Decl: len=s.length() cond-branch cond:len GT 0 true branch : - var:firstChar=s.charAt(0) + Decl: firstChar=s.charAt(0) cond-branch cond:firstChar EQ - true branch : - new NumberFormatException + new NumberFormatException(String.format("Illegal leading minus sign " Add "on unsigned string %s.",s)) false branch : cond-branch cond:len LE 12 Lor (radix EQ 10 Land len LE 18) true branch : return parseLong(s,radix) false branch : + Decl: first=parseLong(s.substring(0,len Sub 1),radix) + Decl: second=Character.digit(s.charAt(len Sub 1),radix) + cond-branch cond:second LT 0 + true branch : + new NumberFormatException("Bad digit at end of " Add s) + false branch : + Decl: result=first Mul radix Add second + cond-branch cond:compareUnsigned(result,first) LT 0 + true branch : + new NumberFormatException(String.format("String value %s exceeds " Add "range of unsigned long.",s)) + false branch : + + return result false branch : NumberFormatException.forInputString(s) - func parseUnsignedLong() throws: NumberFormatException + func parseUnsignedLong(s) throws: NumberFormatException return parseUnsignedLong(s,10) - func valueOf() throws: NumberFormatException + func valueOf(s,radix) throws: NumberFormatException return Long.valueOf(parseLong(s,radix)) - func valueOf() throws: NumberFormatException + func valueOf(s) throws: NumberFormatException return Long.valueOf(parseLong(s,10)) - func valueOf() throws: - var:offset=128 - cond-branch cond:l GE Sub - 128 Land l LE 127 + func valueOf(l) throws: + Decl: offset=128 + cond-branch cond:l GE -128 Land l LE 127 true branch : return false branch : - return new Long - func decode() throws: NumberFormatException - var:radix=10 - var:index=0 - var:negative=false - var:result + return new Long(l) + func decode(nm) throws: NumberFormatException + Decl: radix=10 + Decl: index=0 + Decl: negative=false + Decl: result cond-branch cond:nm.length() EQ 0 true branch : - new NumberFormatException - false branch : + new NumberFormatException("Zero length string") false branch : - var:firstChar=nm.charAt(0) + Decl: firstChar=nm.charAt(0) cond-branch cond:firstChar EQ - true branch : negative Assign true - indexInc + index Inc false branch : cond-branch cond:firstChar EQ + true branch : - indexInc - + index Inc false branch : - cond-branch cond:nm.startsWith("0x",index) Lor nm.startsWith("0X",index) true branch : index AddAssign 2 @@ -242,24 +294,28 @@ while val NE 0 Land charPos GT 0 false branch : cond-branch cond:nm.startsWith("#",index) true branch : - indexInc + index Inc radix Assign 16 false branch : cond-branch cond:nm.startsWith("0",index) Land nm.length() GT 1 Add index true branch : - indexInc + index Inc radix Assign 8 false branch : - - cond-branch cond:nm.startsWith("-",index) Lor nm.startsWith("+",index) true branch : - new NumberFormatException - false branch : + new NumberFormatException("Sign character in wrong position") false branch : + result Assign Long.valueOf(nm.substring(index),radix) + result Assign + + NumberFormatException + e + Decl: constant= + result Assign Long.valueOf(constant,radix) return result func byteValue() throws: @@ -278,40 +334,45 @@ while val NE 0 Land charPos GT 0 return toString(value) func hashCode() throws: return Long.hashCode(value) - func hashCode() throws: + func hashCode(value) throws: return (int)(value Bxor (value Zext 32)) - func equals() throws: - cond-branch cond: + func equals(obj) throws: + cond-branch cond:obj instanceof Long true branch : - return value EQ ((Long)obj)longValue + return value EQ (Long)obj.longValue() false branch : return false - func getLong() throws: + func getLong(nm) throws: return getLong(nm,null) - func getLong() throws: - var:result=Long.getLong(nm,null) + func getLong(nm,val) throws: + Decl: result=Long.getLong(nm,null) return - func getLong() throws: - var:v=null + func getLong(nm,val) throws: + Decl: v=null v Assign System.getProperty(nm) + IllegalArgumentException + NullPointerException + e cond-branch cond:v NE null true branch : return Long.decode(v) + NumberFormatException + e false branch : return val - func compareTo() throws: + func compareTo(anotherLong) throws: return compare(this.value,anotherLong.value) - func compare() throws: + func compare(x,y) throws: return - func compareUnsigned() throws: + func compareUnsigned(x,y) throws: return compare(x Add MIN_VALUE,y Add MIN_VALUE) - func divideUnsigned() throws: + func divideUnsigned(dividend,divisor) throws: cond-branch cond:divisor LT 0 true branch : return @@ -319,23 +380,20 @@ while val NE 0 Land charPos GT 0 cond-branch cond:dividend GT 0 true branch : - return - false branch : - return + return dividend Div divisor false branch : + return toUnsignedBigInteger(dividend).divide(toUnsignedBigInteger(divisor)).longValue() - func remainderUnsigned() throws: + func remainderUnsigned(dividend,divisor) throws: cond-branch cond:dividend GT 0 Land divisor GT 0 true branch : - return + return dividend Mod divisor false branch : cond-branch cond:compareUnsigned(dividend,divisor) LT 0 true branch : - return dividend - false branch : - return - + return dividend false branch : + return toUnsignedBigInteger(dividend).remainder(toUnsignedBigInteger(divisor)).longValue() - func highestOneBit() throws: + func highestOneBit(i) throws: i BorAssign (i Shr 1) i BorAssign (i Shr 2) i BorAssign (i Shr 4) @@ -343,17 +401,15 @@ while val NE 0 Land charPos GT 0 i BorAssign (i Shr 16) i BorAssign (i Shr 32) return i Sub (i Zext 1) - func lowestOneBit() throws: - return i Band Sub - i - func numberOfLeadingZeros() throws: + func lowestOneBit(i) throws: + return i Band Minus i + func numberOfLeadingZeros(i) throws: cond-branch cond:i EQ 0 true branch : - return 64 - false branch : + return 64 false branch : - var:n=1 - var:x=(int)(i Zext 32) + Decl: n=1 + Decl: x=(int)(i Zext 32) cond-branch cond:x EQ 0 true branch : n AddAssign 32 @@ -386,14 +442,13 @@ while val NE 0 Land charPos GT 0 n SubAssign x Zext 31 return n - func numberOfTrailingZeros() throws: - var:x,y + func numberOfTrailingZeros(i) throws: + Decl: x,y cond-branch cond:i EQ 0 true branch : - return 64 - false branch : + return 64 false branch : - var:n=63 + Decl: n=63 y Assign (int)i cond-branch cond:y NE 0 true branch : @@ -401,7 +456,6 @@ while val NE 0 Land charPos GT 0 x Assign y false branch : x Assign (int)(i Zext 32) - y Assign x Shl 16 cond-branch cond:y NE 0 true branch : @@ -431,7 +485,7 @@ while val NE 0 Land charPos GT 0 false branch : return n Sub ((x Shl 1) Zext 31) - func bitCount() throws: + func bitCount(i) throws: i Assign i Sub ((i Zext 1) Band 1431655765) i Assign (i Band 858993459) Add ((i Zext 2) Band 858993459) i Assign (i Add (i Zext 4)) Band -252645121 @@ -439,30 +493,27 @@ while val NE 0 Land charPos GT 0 i Assign i Add (i Zext 16) i Assign i Add (i Zext 32) return (int)i Band 127 - func rotateLeft() throws: - return (i Shl distance) Bor (i Zext Sub - distance) - func rotateRight() throws: - return (i Zext distance) Bor (i Shl Sub - distance) - func reverse() throws: + func rotateLeft(i,distance) throws: + return (i Shl distance) Bor (i Zext Minus distance) + func rotateRight(i,distance) throws: + return (i Zext distance) Bor (i Shl Minus distance) + func reverse(i) throws: i Assign (i Band 1431655765) Shl 1 Bor (i Zext 1) Band 1431655765 i Assign (i Band 858993459) Shl 2 Bor (i Zext 2) Band 858993459 i Assign (i Band -252645121) Shl 4 Bor (i Zext 4) Band -252645121 i Assign (i Band 267390975) Shl 8 Bor (i Zext 8) Band 267390975 i Assign (i Shl 48) Bor ((i Band -1048576) Shl 16) Bor ((i Zext 16) Band -1048576) Bor (i Zext 48) return i - func signum() throws: - return (int)((i Shr 63) Bor (Sub - i Zext 63)) - func reverseBytes() throws: + func signum(i) throws: + return (int)((i Shr 63) Bor (Minus i Zext 63)) + func reverseBytes(i) throws: i Assign (i Band 267390975) Shl 8 Bor (i Zext 8) Band 267390975 return (i Shl 48) Bor ((i Band -1048576) Shl 16) Bor ((i Zext 16) Band -1048576) Bor (i Zext 48) - func sum() throws: + func sum(a,b) throws: return a Add b - func max() throws: + func max(a,b) throws: return Math.max(a,b) - func min() throws: + func min(a,b) throws: return Math.min(a,b) LocalClasses: class LongCache @@ -477,3 +528,23 @@ while val NE 0 Land charPos GT 0 LocalInterfaces: LocalInterfaces: +Identifier:result has no decl. +Identifier:result has no decl. +Identifier:NumberFormatException has no decl. +Identifier:e has no decl. +Identifier:result has no decl. +Identifier:v has no decl. +Identifier:IllegalArgumentException has no decl. +Identifier:NullPointerException has no decl. +Identifier:e has no decl. +Identifier:n has no decl. +Identifier:x has no decl. +Identifier:y has no decl. +Identifier:y has no decl. +Identifier:x has no decl. +Identifier:y has no decl. +Identifier:x has no decl. +Identifier:y has no decl. +Identifier:x has no decl. +Identifier:y has no decl. +Identifier:x has no decl. diff --git a/src/MapleFE/test/openjdk/Math.java b/src/MapleFE/test/java/openjdk/Math.java similarity index 100% rename from src/MapleFE/test/openjdk/Math.java rename to src/MapleFE/test/java/openjdk/Math.java diff --git a/src/MapleFE/test/openjdk/Math.java.result b/src/MapleFE/test/java/openjdk/Math.java.result similarity index 48% rename from src/MapleFE/test/openjdk/Math.java.result rename to src/MapleFE/test/java/openjdk/Math.java.result index a18f75bee0181b780ab8522c42a24c036ac53fbf..47d7b98538b0be951dfc432801ba38c220114992 100644 --- a/src/MapleFE/test/openjdk/Math.java.result +++ b/src/MapleFE/test/java/openjdk/Math.java.result @@ -3,9 +3,7 @@ Matched 14 tokens. Matched 21 tokens. Matched 28 tokens. Matched 35 tokens. -Matched 3300 tokens. -Convert unary --> binary -Convert unary --> binary +Matched 3286 tokens. ============= Module =========== == Sub Tree == package java.lang @@ -20,213 +18,214 @@ import sun.misc.DoubleConsts == Sub Tree == class Math Fields: - E=2.71828 PI=3.14159 negativeZeroFloatBits=Float.floatToRawIntBits(Sub - 0) negativeZeroDoubleBits=Double.doubleToRawLongBits(Sub - 0) twoToTheDoubleScaleUp=powerOfTwoD(512) twoToTheDoubleScaleDown=powerOfTwoD(Sub - 512) + E=2.71828 PI=3.14159 negativeZeroFloatBits=Float.floatToRawIntBits(-0) negativeZeroDoubleBits=Double.doubleToRawLongBits(-0) twoToTheDoubleScaleUp=powerOfTwoD(512) twoToTheDoubleScaleDown=powerOfTwoD(-512) Instance Initializer: Constructors: constructor Math() throws: Methods: - func sin() throws: - func cos() throws: - func tan() throws: - func asin() throws: - func acos() throws: - func atan() throws: - func toRadians() throws: - return - func toDegrees() throws: - return - func exp() throws: - func log() throws: - func log10() throws: - func sqrt() throws: - func cbrt() throws: - func IEEEremainder() throws: - func ceil() throws: - func floor() throws: - func rint() throws: - func atan2() throws: - func pow() throws: - func round() throws: - var:intBits=Float.floatToRawIntBits(a) - var:biasedExp=(intBits Band FloatConsts.EXP_BIT_MASK) Shr (FloatConsts.SIGNIFICAND_WIDTH Sub 1) - var:shift=(FloatConsts.SIGNIFICAND_WIDTH Sub 2 Add FloatConsts.EXP_BIAS) Sub biasedExp - cond-branch cond:(shift Band Sub - 32) EQ 0 - true branch : - var:r=((intBits Band FloatConsts.SIGNIF_BIT_MASK) Bor (FloatConsts.SIGNIF_BIT_MASK Add 1)) + func sin(a) throws: + func cos(a) throws: + func tan(a) throws: + func asin(a) throws: + func acos(a) throws: + func atan(a) throws: + func toRadians(angdeg) throws: + return angdeg Div 180 Mul PI + func toDegrees(angrad) throws: + return angrad Mul 180 Div PI + func exp(a) throws: + func log(a) throws: + func log10(a) throws: + func sqrt(a) throws: + func cbrt(a) throws: + func IEEEremainder(f1,f2) throws: + func ceil(a) throws: + func floor(a) throws: + func rint(a) throws: + func atan2(y,x) throws: + func pow(a,b) throws: + func round(a) throws: + Decl: intBits=Float.floatToRawIntBits(a) + Decl: biasedExp=(intBits Band FloatConsts.EXP_BIT_MASK) Shr (FloatConsts.SIGNIFICAND_WIDTH Sub 1) + Decl: shift=(FloatConsts.SIGNIFICAND_WIDTH Sub 2 Add FloatConsts.EXP_BIAS) Sub biasedExp + cond-branch cond:(shift Band -32) EQ 0 + true branch : + Decl: r=((intBits Band FloatConsts.SIGNIF_BIT_MASK) Bor (FloatConsts.SIGNIF_BIT_MASK Add 1)) + cond-branch cond:intBits LT 0 + true branch : + r Assign Minus r + false branch : + return ((r Shr shift) Add 1) Shr 1 false branch : return (int)a - func round() throws: - var:longBits=Double.doubleToRawLongBits(a) - var:biasedExp=(longBits Band DoubleConsts.EXP_BIT_MASK) Shr (DoubleConsts.SIGNIFICAND_WIDTH Sub 1) - var:shift=(DoubleConsts.SIGNIFICAND_WIDTH Sub 2 Add DoubleConsts.EXP_BIAS) Sub biasedExp - cond-branch cond:(shift Band Sub - 64) EQ 0 + func round(a) throws: + Decl: longBits=Double.doubleToRawLongBits(a) + Decl: biasedExp=(longBits Band DoubleConsts.EXP_BIT_MASK) Shr (DoubleConsts.SIGNIFICAND_WIDTH Sub 1) + Decl: shift=(DoubleConsts.SIGNIFICAND_WIDTH Sub 2 Add DoubleConsts.EXP_BIAS) Sub biasedExp + cond-branch cond:(shift Band -64) EQ 0 true branch : - var:r=((longBits Band DoubleConsts.SIGNIF_BIT_MASK) Bor (DoubleConsts.SIGNIF_BIT_MASK Add 1)) + Decl: r=((longBits Band DoubleConsts.SIGNIF_BIT_MASK) Bor (DoubleConsts.SIGNIF_BIT_MASK Add 1)) + cond-branch cond:longBits LT 0 + true branch : + r Assign Minus r + false branch : + return ((r Shr shift) Add 1) Shr 1 false branch : return (long)a func random() throws: return RandomNumberGeneratorHolder.randomNumberGenerator.nextDouble() - func setRandomSeedInternal() throws: + func setRandomSeedInternal(seed) throws: RandomNumberGeneratorHolder.randomNumberGenerator.setSeed(seed) func randomIntInternal() throws: return RandomNumberGeneratorHolder.randomNumberGenerator.nextInt() func randomLongInternal() throws: return RandomNumberGeneratorHolder.randomNumberGenerator.nextLong() - func addExact() throws: - var:r=x Add y + func addExact(x,y) throws: + Decl: r=x Add y cond-branch cond:((x Bxor r) Band (y Bxor r)) LT 0 true branch : - new ArithmeticException + new ArithmeticException("integer overflow") false branch : return r - func addExact() throws: - var:r=x Add y + func addExact(x,y) throws: + Decl: r=x Add y cond-branch cond:((x Bxor r) Band (y Bxor r)) LT 0 true branch : - new ArithmeticException + new ArithmeticException("long overflow") false branch : return r - func subtractExact() throws: - var:r=x Sub y + func subtractExact(x,y) throws: + Decl: r=x Sub y cond-branch cond:((x Bxor y) Band (x Bxor r)) LT 0 true branch : - new ArithmeticException + new ArithmeticException("integer overflow") false branch : return r - func subtractExact() throws: - var:r=x Sub y + func subtractExact(x,y) throws: + Decl: r=x Sub y cond-branch cond:((x Bxor y) Band (x Bxor r)) LT 0 true branch : - new ArithmeticException + new ArithmeticException("long overflow") false branch : return r - func multiplyExact() throws: - var:r= + func multiplyExact(x,y) throws: + Decl: r=(long)x Mul (long)y cond-branch cond:(int)r NE r true branch : - new ArithmeticException + new ArithmeticException("integer overflow") false branch : return (int)r - func multiplyExact() throws: - var:r= - var:ax=Math.abs(x) - var:ay=Math.abs(y) + func multiplyExact(x,y) throws: + Decl: r=x Mul y + Decl: ax=Math.abs(x) + Decl: ay=Math.abs(y) cond-branch cond:((ax Bor ay) Zext 31 NE 0) true branch : - cond-branch cond:((y NE 0) Land ( NE x)) Lor (x EQ Long.MIN_VALUE Land y EQ Sub - 1) + cond-branch cond:((y NE 0) Land (r Div y NE x)) Lor (x EQ Long.MIN_VALUE Land y EQ -1) true branch : - new ArithmeticException + new ArithmeticException("long overflow") false branch : false branch : return r - func incrementExact() throws: + func incrementExact(a) throws: cond-branch cond:a EQ Integer.MAX_VALUE true branch : - new ArithmeticException + new ArithmeticException("integer overflow") false branch : return a Add 1 - func incrementExact() throws: + func incrementExact(a) throws: cond-branch cond:a EQ Long.MAX_VALUE true branch : - new ArithmeticException + new ArithmeticException("long overflow") false branch : return a Add 1 - func decrementExact() throws: + func decrementExact(a) throws: cond-branch cond:a EQ Integer.MIN_VALUE true branch : - new ArithmeticException + new ArithmeticException("integer overflow") false branch : return a Sub 1 - func decrementExact() throws: + func decrementExact(a) throws: cond-branch cond:a EQ Long.MIN_VALUE true branch : - new ArithmeticException + new ArithmeticException("long overflow") false branch : return a Sub 1 - func negateExact() throws: + func negateExact(a) throws: cond-branch cond:a EQ Integer.MIN_VALUE true branch : - new ArithmeticException + new ArithmeticException("integer overflow") false branch : - return Sub - a - func negateExact() throws: + return Minus a + func negateExact(a) throws: cond-branch cond:a EQ Long.MIN_VALUE true branch : - new ArithmeticException + new ArithmeticException("long overflow") false branch : - return Sub - a - func toIntExact() throws: + return Minus a + func toIntExact(value) throws: cond-branch cond:(int)value NE value true branch : - new ArithmeticException + new ArithmeticException("integer overflow") false branch : return (int)value - func floorDiv() throws: - var:r= - cond-branch cond:(x Bxor y) LT 0 Land ( NE x) + func floorDiv(x,y) throws: + Decl: r=x Div y + cond-branch cond:(x Bxor y) LT 0 Land (r Mul y NE x) true branch : - rDec + r Dec false branch : return r - func floorDiv() throws: - var:r= - cond-branch cond:(x Bxor y) LT 0 Land ( NE x) + func floorDiv(x,y) throws: + Decl: r=x Div y + cond-branch cond:(x Bxor y) LT 0 Land (r Mul y NE x) true branch : - rDec + r Dec false branch : return r - func floorMod() throws: - var:r=x Sub + func floorMod(x,y) throws: + Decl: r=x Sub floorDiv(x,y) Mul y return r - func floorMod() throws: - return x Sub - func abs() throws: + func floorMod(x,y) throws: + return x Sub floorDiv(x,y) Mul y + func abs(a) throws: return - func abs() throws: + func abs(a) throws: return - func abs() throws: + func abs(a) throws: return Float.intBitsToFloat(2147483647 Band Float.floatToRawIntBits(a)) - func abs() throws: + func abs(a) throws: return Double.longBitsToDouble(-1 Band Double.doubleToRawLongBits(a)) - func max() throws: + func max(a,b) throws: return - func max() throws: + func max(a,b) throws: return - func max() throws: + func max(a,b) throws: cond-branch cond:a NE a true branch : - return a - false branch : + return a false branch : cond-branch cond:(a EQ 0) Land (b EQ 0) Land (Float.floatToRawIntBits(a) EQ negativeZeroFloatBits) true branch : @@ -234,11 +233,10 @@ class Math false branch : return - func max() throws: + func max(a,b) throws: cond-branch cond:a NE a true branch : - return a - false branch : + return a false branch : cond-branch cond:(a EQ 0) Land (b EQ 0) Land (Double.doubleToRawLongBits(a) EQ negativeZeroDoubleBits) true branch : @@ -246,15 +244,14 @@ class Math false branch : return - func min() throws: + func min(a,b) throws: return - func min() throws: + func min(a,b) throws: return - func min() throws: + func min(a,b) throws: cond-branch cond:a NE a true branch : - return a - false branch : + return a false branch : cond-branch cond:(a EQ 0) Land (b EQ 0) Land (Float.floatToRawIntBits(b) EQ negativeZeroFloatBits) true branch : @@ -262,11 +259,10 @@ class Math false branch : return - func min() throws: + func min(a,b) throws: cond-branch cond:a NE a true branch : - return a - false branch : + return a false branch : cond-branch cond:(a EQ 0) Land (b EQ 0) Land (Double.doubleToRawLongBits(b) EQ negativeZeroDoubleBits) true branch : @@ -274,33 +270,33 @@ class Math false branch : return - func ulp() throws: - var:exp=getExponent(d) + func ulp(d) throws: + Decl: exp=getExponent(d) A switch - func ulp() throws: - var:exp=getExponent(f) + func ulp(f) throws: + Decl: exp=getExponent(f) A switch - func signum() throws: + func signum(d) throws: return - func signum() throws: + func signum(f) throws: return - func sinh() throws: - func cosh() throws: - func tanh() throws: - func hypot() throws: - func expm1() throws: - func log1p() throws: - func copySign() throws: + func sinh(x) throws: + func cosh(x) throws: + func tanh(x) throws: + func hypot(x,y) throws: + func expm1(x) throws: + func log1p(x) throws: + func copySign(magnitude,sign) throws: return Double.longBitsToDouble((Double.doubleToRawLongBits(sign) Band (DoubleConsts.SIGN_BIT_MASK)) Bor (Double.doubleToRawLongBits(magnitude) Band (DoubleConsts.EXP_BIT_MASK Bor DoubleConsts.SIGNIF_BIT_MASK))) - func copySign() throws: + func copySign(magnitude,sign) throws: return Float.intBitsToFloat((Float.floatToRawIntBits(sign) Band (FloatConsts.SIGN_BIT_MASK)) Bor (Float.floatToRawIntBits(magnitude) Band (FloatConsts.EXP_BIT_MASK Bor FloatConsts.SIGNIF_BIT_MASK))) - func getExponent() throws: + func getExponent(f) throws: return ((Float.floatToRawIntBits(f) Band FloatConsts.EXP_BIT_MASK) Shr (FloatConsts.SIGNIFICAND_WIDTH Sub 1)) Sub FloatConsts.EXP_BIAS - func getExponent() throws: + func getExponent(d) throws: return (int)(((Double.doubleToRawLongBits(d) Band DoubleConsts.EXP_BIT_MASK) Shr (DoubleConsts.SIGNIFICAND_WIDTH Sub 1)) Sub DoubleConsts.EXP_BIAS) - func nextAfter() throws: + func nextAfter(start,direction) throws: cond-branch cond:Double.isNaN(start) Lor Double.isNaN(direction) true branch : return start Add direction @@ -309,11 +305,23 @@ class Math true branch : return direction false branch : - var:transducer=Double.doubleToRawLongBits(start Add 0) - - - - func nextAfter() throws: + Decl: transducer=Double.doubleToRawLongBits(start Add 0) + cond-branch cond:direction GT start + true branch : + transducer Assign transducer Add () + false branch : + assert direction LT start : + cond-branch cond:transducer GT 0 + true branch : + PreDec transducer false branch : + cond-branch cond:transducer LT 0 + true branch : + PreInc transducer false branch : + transducer Assign DoubleConsts.SIGN_BIT_MASK Bor 1 + + return Double.longBitsToDouble(transducer) + + func nextAfter(start,direction) throws: cond-branch cond:Float.isNaN(start) Lor Double.isNaN(direction) true branch : return start Add (float)direction @@ -322,67 +330,70 @@ class Math true branch : return (float)direction false branch : - var:transducer=Float.floatToRawIntBits(start Add 0) - - - - func nextUp() throws: + Decl: transducer=Float.floatToRawIntBits(start Add 0) + cond-branch cond:direction GT start + true branch : + transducer Assign transducer Add () + false branch : + assert direction LT start : + cond-branch cond:transducer GT 0 + true branch : + PreDec transducer false branch : + cond-branch cond:transducer LT 0 + true branch : + PreInc transducer false branch : + transducer Assign FloatConsts.SIGN_BIT_MASK Bor 1 + + return Float.intBitsToFloat(transducer) + + func nextUp(d) throws: cond-branch cond:Double.isNaN(d) Lor d EQ Double.POSITIVE_INFINITY true branch : - return d - false branch : + return d false branch : d AddAssign 0 return Double.longBitsToDouble(Double.doubleToRawLongBits(d) Add ()) - func nextUp() throws: + func nextUp(f) throws: cond-branch cond:Float.isNaN(f) Lor f EQ FloatConsts.POSITIVE_INFINITY true branch : - return f - false branch : + return f false branch : f AddAssign 0 return Float.intBitsToFloat(Float.floatToRawIntBits(f) Add ()) - func nextDown() throws: + func nextDown(d) throws: cond-branch cond:Double.isNaN(d) Lor d EQ Double.NEGATIVE_INFINITY true branch : - return d - false branch : + return d false branch : cond-branch cond:d EQ 0 true branch : - return Sub - Double.MIN_VALUE - false branch : + return Minus Double.MIN_VALUE false branch : return Double.longBitsToDouble(Double.doubleToRawLongBits(d) Add ()) - - func nextDown() throws: + func nextDown(f) throws: cond-branch cond:Float.isNaN(f) Lor f EQ Float.NEGATIVE_INFINITY true branch : - return f - false branch : + return f false branch : cond-branch cond:f EQ 0 true branch : - return Sub - Float.MIN_VALUE - false branch : + return Minus Float.MIN_VALUE false branch : return Float.intBitsToFloat(Float.floatToRawIntBits(f) Add ()) - - func scalb() throws: - var:MAX_SCALE=DoubleConsts.MAX_EXPONENT Add Sub - DoubleConsts.MIN_EXPONENT Add DoubleConsts.SIGNIFICAND_WIDTH Add 1 - var:exp_adjust=0 - var:scale_increment=0 - var:exp_delta=Double.NaN + func scalb(d,scaleFactor) throws: + Decl: MAX_SCALE=DoubleConsts.MAX_EXPONENT Add Minus DoubleConsts.MIN_EXPONENT Add DoubleConsts.SIGNIFICAND_WIDTH Add 1 + Decl: exp_adjust=0 + Decl: scale_increment=0 + Decl: exp_delta=Double.NaN cond-branch cond:scaleFactor LT 0 true branch : - scaleFactor Assign Math.max(scaleFactor Sub MAX_SCALE) - + scaleFactor Assign Math.max(scaleFactor,Minus MAX_SCALE) + scale_increment Assign -512 + exp_delta Assign twoToTheDoubleScaleDown false branch : scaleFactor Assign Math.min(scaleFactor,MAX_SCALE) + scale_increment Assign 512 + exp_delta Assign twoToTheDoubleScaleUp - - var:t=(scaleFactor Shr 9 Sub 1) Zext 32 Sub 9 + Decl: t=(scaleFactor Shr 9 Sub 1) Zext 32 Sub 9 exp_adjust Assign ((scaleFactor Add t) Band (512 Sub 1)) Sub t d MulAssign powerOfTwoD(exp_adjust) scaleFactor SubAssign exp_adjust @@ -390,21 +401,20 @@ class Math scaleFactor SubAssign scale_increment return d - func scalb() throws: - var:MAX_SCALE=FloatConsts.MAX_EXPONENT Add Sub - FloatConsts.MIN_EXPONENT Add FloatConsts.SIGNIFICAND_WIDTH Add 1 - scaleFactor Assign Math.max(Math.min(scaleFactor,MAX_SCALE) Sub MAX_SCALE) - return (float)() - func powerOfTwoD() throws: + func scalb(f,scaleFactor) throws: + Decl: MAX_SCALE=FloatConsts.MAX_EXPONENT Add Minus FloatConsts.MIN_EXPONENT Add FloatConsts.SIGNIFICAND_WIDTH Add 1 + scaleFactor Assign Math.max(Math.min(scaleFactor,MAX_SCALE),Minus MAX_SCALE) + return (float)((double)f Mul powerOfTwoD(scaleFactor)) + func powerOfTwoD(n) throws: assert (n GE DoubleConsts.MIN_EXPONENT Land n LE DoubleConsts.MAX_EXPONENT) : return Double.longBitsToDouble((((long)n Add (long)DoubleConsts.EXP_BIAS) Shl (DoubleConsts.SIGNIFICAND_WIDTH Sub 1)) Band DoubleConsts.EXP_BIT_MASK) - func powerOfTwoF() throws: + func powerOfTwoF(n) throws: assert (n GE FloatConsts.MIN_EXPONENT Land n LE FloatConsts.MAX_EXPONENT) : return Float.intBitsToFloat(((n Add FloatConsts.EXP_BIAS) Shl (FloatConsts.SIGNIFICAND_WIDTH Sub 1)) Band FloatConsts.EXP_BIT_MASK) LocalClasses: class RandomNumberGeneratorHolder Fields: - randomNumberGenerator=new Random + randomNumberGenerator=new Random() Instance Initializer: Constructors: Methods: @@ -412,3 +422,6 @@ class Math LocalInterfaces: LocalInterfaces: +Identifier:exp_adjust has no decl. +Identifier:t has no decl. +Identifier:exp_adjust has no decl. diff --git a/src/MapleFE/test/java/openjdk/NegativeArraySizeException.java b/src/MapleFE/test/java/openjdk/NegativeArraySizeException.java new file mode 100644 index 0000000000000000000000000000000000000000..a90c8157e5ba879eebcc1498498ba1481f43fe6b --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NegativeArraySizeException.java @@ -0,0 +1,55 @@ +/* + * Copyright (c) 1994, 2008, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * Thrown if an application tries to create an array with negative size. + * + * @author unascribed + * @since JDK1.0 + */ +public +class NegativeArraySizeException extends RuntimeException { + private static final long serialVersionUID = -8960118058596991861L; + + /** + * Constructs a NegativeArraySizeException with no + * detail message. + */ + public NegativeArraySizeException() { + super(); + } + + /** + * Constructs a NegativeArraySizeException with the + * specified detail message. + * + * @param s the detail message. + */ + public NegativeArraySizeException(String s) { + super(s); + } +} diff --git a/src/MapleFE/test/java/openjdk/NegativeArraySizeException.java.result b/src/MapleFE/test/java/openjdk/NegativeArraySizeException.java.result new file mode 100644 index 0000000000000000000000000000000000000000..c3722843d45de39a0e0df2186fe8e19593ab976d --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NegativeArraySizeException.java.result @@ -0,0 +1,17 @@ +Matched 5 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class NegativeArraySizeException + Fields: + serialVersionUID=868449419 + Instance Initializer: + Constructors: + constructor NegativeArraySizeException() throws: + constructor NegativeArraySizeException(s) throws: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/NoClassDefFoundError.java b/src/MapleFE/test/java/openjdk/NoClassDefFoundError.java new file mode 100644 index 0000000000000000000000000000000000000000..869c8d9fb2ba8ab303b3211433fd37149e0f37e9 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NoClassDefFoundError.java @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * Copyright (c) 1994, 2008, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * Thrown if the Java Virtual Machine or a ClassLoader instance + * tries to load in the definition of a class (as part of a normal method call + * or as part of creating a new instance using the new expression) + * and no definition of the class could be found. + *

+ * The searched-for class definition existed when the currently + * executing class was compiled, but the definition can no longer be + * found. + * + * @author unascribed + * @since JDK1.0 + */ +public +class NoClassDefFoundError extends LinkageError { + private static final long serialVersionUID = 9095859863287012458L; + + /** + * Constructs a NoClassDefFoundError with no detail message. + */ + public NoClassDefFoundError() { + super(); + } + + /** + * Constructs a NoClassDefFoundError with the specified + * detail message. + * + * @param s the detail message. + */ + public NoClassDefFoundError(String s) { + super(s); + } + + /** + * Constructs a new {@code NoClassDefFoundError} with the current stack + * trace, the specified detail message and the specified cause. Used + * internally by the Android runtime. + * + * @param detailMessage + * the detail message for this error. + * @param throwable + * the cause of this error. + */ + private NoClassDefFoundError(String detailMessage, Throwable throwable) { + super(detailMessage, throwable); + } +} diff --git a/src/MapleFE/test/java/openjdk/NoClassDefFoundError.java.result b/src/MapleFE/test/java/openjdk/NoClassDefFoundError.java.result new file mode 100644 index 0000000000000000000000000000000000000000..cc0491c55d3f01d700fcd1ec5edfb7530fee872e --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NoClassDefFoundError.java.result @@ -0,0 +1,18 @@ +Matched 5 tokens. +Matched 61 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class NoClassDefFoundError + Fields: + serialVersionUID=-1913349014 + Instance Initializer: + Constructors: + constructor NoClassDefFoundError() throws: + constructor NoClassDefFoundError(s) throws: + constructor NoClassDefFoundError(detailMessage,throwable) throws: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/NoSuchFieldError.java b/src/MapleFE/test/java/openjdk/NoSuchFieldError.java new file mode 100644 index 0000000000000000000000000000000000000000..735adbbf0953a919fceb1bf37ccb340afbbb8ebc --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NoSuchFieldError.java @@ -0,0 +1,59 @@ +/* + * Copyright (c) 1995, 2008, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * Thrown if an application tries to access or modify a specified + * field of an object, and that object no longer has that field. + *

+ * Normally, this error is caught by the compiler; this error can + * only occur at run time if the definition of a class has + * incompatibly changed. + * + * @author unascribed + * @since JDK1.0 + */ +public +class NoSuchFieldError extends IncompatibleClassChangeError { + private static final long serialVersionUID = -3456430195886129035L; + + /** + * Constructs a NoSuchFieldError with no detail message. + */ + public NoSuchFieldError() { + super(); + } + + /** + * Constructs a NoSuchFieldError with the specified + * detail message. + * + * @param s the detail message. + */ + public NoSuchFieldError(String s) { + super(s); + } +} diff --git a/src/MapleFE/test/java/openjdk/NoSuchFieldError.java.result b/src/MapleFE/test/java/openjdk/NoSuchFieldError.java.result new file mode 100644 index 0000000000000000000000000000000000000000..a0036f96ccd68048e728cac3d1f88141a434110a --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NoSuchFieldError.java.result @@ -0,0 +1,17 @@ +Matched 5 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class NoSuchFieldError + Fields: + serialVersionUID=-1085931403 + Instance Initializer: + Constructors: + constructor NoSuchFieldError() throws: + constructor NoSuchFieldError(s) throws: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/NoSuchFieldException.java b/src/MapleFE/test/java/openjdk/NoSuchFieldException.java new file mode 100644 index 0000000000000000000000000000000000000000..0058264e5a719e205d66d91ae57170ae58370f78 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NoSuchFieldException.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 1996, 2008, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * Signals that the class doesn't have a field of a specified name. + * + * @author unascribed + * @since JDK1.1 + */ +public class NoSuchFieldException extends ReflectiveOperationException { + private static final long serialVersionUID = -6143714805279938260L; + + /** + * Constructor. + */ + public NoSuchFieldException() { + super(); + } + + /** + * Constructor with a detail message. + * + * @param s the detail message + */ + public NoSuchFieldException(String s) { + super(s); + } +} diff --git a/src/MapleFE/test/java/openjdk/NoSuchFieldException.java.result b/src/MapleFE/test/java/openjdk/NoSuchFieldException.java.result new file mode 100644 index 0000000000000000000000000000000000000000..6bb1d4f27cb61fefbd8af53f9f7d896cea44ce58 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NoSuchFieldException.java.result @@ -0,0 +1,17 @@ +Matched 5 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class NoSuchFieldException + Fields: + serialVersionUID=1979394348 + Instance Initializer: + Constructors: + constructor NoSuchFieldException() throws: + constructor NoSuchFieldException(s) throws: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/NoSuchMethodError.java b/src/MapleFE/test/java/openjdk/NoSuchMethodError.java new file mode 100644 index 0000000000000000000000000000000000000000..248de62667c4915f189bbe7af2975559f1a92815 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NoSuchMethodError.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 1994, 2008, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * Thrown if an application tries to call a specified method of a + * class (either static or instance), and that class no longer has a + * definition of that method. + *

+ * Normally, this error is caught by the compiler; this error can + * only occur at run time if the definition of a class has + * incompatibly changed. + * + * @author unascribed + * @since JDK1.0 + */ +public +class NoSuchMethodError extends IncompatibleClassChangeError { + private static final long serialVersionUID = -3765521442372831335L; + + /** + * Constructs a NoSuchMethodError with no detail message. + */ + public NoSuchMethodError() { + super(); + } + + /** + * Constructs a NoSuchMethodError with the + * specified detail message. + * + * @param s the detail message. + */ + public NoSuchMethodError(String s) { + super(s); + } +} diff --git a/src/MapleFE/test/java/openjdk/NoSuchMethodError.java.result b/src/MapleFE/test/java/openjdk/NoSuchMethodError.java.result new file mode 100644 index 0000000000000000000000000000000000000000..091ac4af475856ec5f928564149b33513bff24cf --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NoSuchMethodError.java.result @@ -0,0 +1,17 @@ +Matched 5 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class NoSuchMethodError + Fields: + serialVersionUID=-515885159 + Instance Initializer: + Constructors: + constructor NoSuchMethodError() throws: + constructor NoSuchMethodError(s) throws: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/NoSuchMethodException.java b/src/MapleFE/test/java/openjdk/NoSuchMethodException.java new file mode 100644 index 0000000000000000000000000000000000000000..701437c9887b2f65f9a0c3887e912ea66ba5fd18 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NoSuchMethodException.java @@ -0,0 +1,53 @@ +/* + * Copyright (c) 1995, 2008, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * Thrown when a particular method cannot be found. + * + * @author unascribed + * @since JDK1.0 + */ +public +class NoSuchMethodException extends ReflectiveOperationException { + private static final long serialVersionUID = 5034388446362600923L; + + /** + * Constructs a NoSuchMethodException without a detail message. + */ + public NoSuchMethodException() { + super(); + } + + /** + * Constructs a NoSuchMethodException with a detail message. + * + * @param s the detail message. + */ + public NoSuchMethodException(String s) { + super(s); + } +} diff --git a/src/MapleFE/test/java/openjdk/NoSuchMethodException.java.result b/src/MapleFE/test/java/openjdk/NoSuchMethodException.java.result new file mode 100644 index 0000000000000000000000000000000000000000..29371ab7362ed4e9940537e646b7ed999580ade4 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NoSuchMethodException.java.result @@ -0,0 +1,17 @@ +Matched 5 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class NoSuchMethodException + Fields: + serialVersionUID=1590035931 + Instance Initializer: + Constructors: + constructor NoSuchMethodException() throws: + constructor NoSuchMethodException(s) throws: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/NullPointerException.java b/src/MapleFE/test/java/openjdk/NullPointerException.java new file mode 100644 index 0000000000000000000000000000000000000000..5b87ec4fd7c96bf2f7e70e68c8d40e9806c2ca75 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NullPointerException.java @@ -0,0 +1,72 @@ +/* + * Copyright (c) 1994, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * Thrown when an application attempts to use {@code null} in a + * case where an object is required. These include: + *

    + *
  • Calling the instance method of a {@code null} object. + *
  • Accessing or modifying the field of a {@code null} object. + *
  • Taking the length of {@code null} as if it were an array. + *
  • Accessing or modifying the slots of {@code null} as if it + * were an array. + *
  • Throwing {@code null} as if it were a {@code Throwable} + * value. + *
+ *

+ * Applications should throw instances of this class to indicate + * other illegal uses of the {@code null} object. + * + * {@code NullPointerException} objects may be constructed by the + * virtual machine as if {@linkplain Throwable#Throwable(String, + * Throwable, boolean, boolean) suppression were disabled and/or the + * stack trace was not writable}. + * + * @author unascribed + * @since JDK1.0 + */ +public +class NullPointerException extends RuntimeException { + private static final long serialVersionUID = 5162710183389028792L; + + /** + * Constructs a {@code NullPointerException} with no detail message. + */ + public NullPointerException() { + super(); + } + + /** + * Constructs a {@code NullPointerException} with the specified + * detail message. + * + * @param s the detail message. + */ + public NullPointerException(String s) { + super(s); + } +} diff --git a/src/MapleFE/test/java/openjdk/NullPointerException.java.result b/src/MapleFE/test/java/openjdk/NullPointerException.java.result new file mode 100644 index 0000000000000000000000000000000000000000..7553e10f11f1111adae6258d9fc53e9e8a21db99 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NullPointerException.java.result @@ -0,0 +1,17 @@ +Matched 5 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class NullPointerException + Fields: + serialVersionUID=-13508168 + Instance Initializer: + Constructors: + constructor NullPointerException() throws: + constructor NullPointerException(s) throws: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/Number.java b/src/MapleFE/test/java/openjdk/Number.java new file mode 100644 index 0000000000000000000000000000000000000000..d901609571b1ef1a6675bcb4caf096c858af3512 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Number.java @@ -0,0 +1,124 @@ +/* + * Copyright (c) 1994, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * The abstract class {@code Number} is the superclass of platform + * classes representing numeric values that are convertible to the + * primitive types {@code byte}, {@code double}, {@code float}, {@code + * int}, {@code long}, and {@code short}. + * + * The specific semantics of the conversion from the numeric value of + * a particular {@code Number} implementation to a given primitive + * type is defined by the {@code Number} implementation in question. + * + * For platform classes, the conversion is often analogous to a + * narrowing primitive conversion or a widening primitive conversion + * as defining in The Java™ Language Specification + * for converting between primitive types. Therefore, conversions may + * lose information about the overall magnitude of a numeric value, may + * lose precision, and may even return a result of a different sign + * than the input. + * + * See the documentation of a given {@code Number} implementation for + * conversion details. + * + * @author Lee Boynton + * @author Arthur van Hoff + * @jls 5.1.2 Widening Primitive Conversions + * @jls 5.1.3 Narrowing Primitive Conversions + * @since JDK1.0 + */ +public abstract class Number implements java.io.Serializable { + /** + * Returns the value of the specified number as an {@code int}, + * which may involve rounding or truncation. + * + * @return the numeric value represented by this object after conversion + * to type {@code int}. + */ + public abstract int intValue(); + + /** + * Returns the value of the specified number as a {@code long}, + * which may involve rounding or truncation. + * + * @return the numeric value represented by this object after conversion + * to type {@code long}. + */ + public abstract long longValue(); + + /** + * Returns the value of the specified number as a {@code float}, + * which may involve rounding. + * + * @return the numeric value represented by this object after conversion + * to type {@code float}. + */ + public abstract float floatValue(); + + /** + * Returns the value of the specified number as a {@code double}, + * which may involve rounding. + * + * @return the numeric value represented by this object after conversion + * to type {@code double}. + */ + public abstract double doubleValue(); + + /** + * Returns the value of the specified number as a {@code byte}, + * which may involve rounding or truncation. + * + *

This implementation returns the result of {@link #intValue} cast + * to a {@code byte}. + * + * @return the numeric value represented by this object after conversion + * to type {@code byte}. + * @since JDK1.1 + */ + public byte byteValue() { + return (byte)intValue(); + } + + /** + * Returns the value of the specified number as a {@code short}, + * which may involve rounding or truncation. + * + *

This implementation returns the result of {@link #intValue} cast + * to a {@code short}. + * + * @return the numeric value represented by this object after conversion + * to type {@code short}. + * @since JDK1.1 + */ + public short shortValue() { + return (short)intValue(); + } + + /** use serialVersionUID from JDK 1.0.2 for interoperability */ + private static final long serialVersionUID = -8742448824652078965L; +} diff --git a/src/MapleFE/test/java/openjdk/Number.java.result b/src/MapleFE/test/java/openjdk/Number.java.result new file mode 100644 index 0000000000000000000000000000000000000000..1893622d300feeefb89605521d99af6279a948cb --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Number.java.result @@ -0,0 +1,23 @@ +Matched 5 tokens. +Matched 83 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class Number + Fields: + serialVersionUID=194306187 + Instance Initializer: + Constructors: + Methods: + func intValue() throws: + func longValue() throws: + func floatValue() throws: + func doubleValue() throws: + func byteValue() throws: + return (byte)intValue() + func shortValue() throws: + return (short)intValue() + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/NumberFormatException.java b/src/MapleFE/test/java/openjdk/NumberFormatException.java new file mode 100644 index 0000000000000000000000000000000000000000..ea1ec9fd7714269e46a839c1810133afbbc8da6b --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NumberFormatException.java @@ -0,0 +1,67 @@ +/* + * Copyright (c) 1994, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * Thrown to indicate that the application has attempted to convert + * a string to one of the numeric types, but that the string does not + * have the appropriate format. + * + * @author unascribed + * @see java.lang.Integer#parseInt(String) + * @since JDK1.0 + */ +public +class NumberFormatException extends IllegalArgumentException { + static final long serialVersionUID = -2848938806368998894L; + + /** + * Constructs a NumberFormatException with no detail message. + */ + public NumberFormatException () { + super(); + } + + /** + * Constructs a NumberFormatException with the + * specified detail message. + * + * @param s the detail message. + */ + public NumberFormatException (String s) { + super (s); + } + + /** + * Factory method for making a NumberFormatException + * given the specified input which caused the error. + * + * @param s the input causing the error + */ + static NumberFormatException forInputString(String s) { + return new NumberFormatException("For input string: \"" + s + "\""); + } +} diff --git a/src/MapleFE/test/java/openjdk/NumberFormatException.java.result b/src/MapleFE/test/java/openjdk/NumberFormatException.java.result new file mode 100644 index 0000000000000000000000000000000000000000..3641aa05de9998fffb8fd18a83d8408c5a14bd58 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/NumberFormatException.java.result @@ -0,0 +1,19 @@ +Matched 5 tokens. +Matched 62 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class NumberFormatException + Fields: + serialVersionUID=-74651118 + Instance Initializer: + Constructors: + constructor NumberFormatException() throws: + constructor NumberFormatException(s) throws: + Methods: + func forInputString(s) throws: + return new NumberFormatException("For input string: \"" Add s Add "\"") + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/Object.java b/src/MapleFE/test/java/openjdk/Object.java new file mode 100644 index 0000000000000000000000000000000000000000..e9728e08224a92e160ed2b6fe3672a8a28973ff0 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Object.java @@ -0,0 +1,582 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * Copyright (c) 1994, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +import dalvik.annotation.optimization.FastNative; + +/** + * Class {@code Object} is the root of the class hierarchy. + * Every class has {@code Object} as a superclass. All objects, + * including arrays, implement the methods of this class. + * + * @author unascribed + * @see java.lang.Class + * @since JDK1.0 + */ +public class Object { + + private transient Class shadow$_klass_; + private transient int shadow$_monitor_; + + /** + * Returns the runtime class of this {@code Object}. The returned + * {@code Class} object is the object that is locked by {@code + * static synchronized} methods of the represented class. + * + *

The actual result type is {@code Class} + * where {@code |X|} is the erasure of the static type of the + * expression on which {@code getClass} is called. For + * example, no cast is required in this code fragment:

+ * + *

+ * {@code Number n = 0; }
+ * {@code Class c = n.getClass(); } + *

+ * + * @return The {@code Class} object that represents the runtime + * class of this object. + * @jls 15.8.2 Class Literals + */ + public final Class getClass() { + return shadow$_klass_; + } + + /** + * Returns a hash code value for the object. This method is + * supported for the benefit of hash tables such as those provided by + * {@link java.util.HashMap}. + *

+ * The general contract of {@code hashCode} is: + *

    + *
  • Whenever it is invoked on the same object more than once during + * an execution of a Java application, the {@code hashCode} method + * must consistently return the same integer, provided no information + * used in {@code equals} comparisons on the object is modified. + * This integer need not remain consistent from one execution of an + * application to another execution of the same application. + *
  • If two objects are equal according to the {@code equals(Object)} + * method, then calling the {@code hashCode} method on each of + * the two objects must produce the same integer result. + *
  • It is not required that if two objects are unequal + * according to the {@link java.lang.Object#equals(java.lang.Object)} + * method, then calling the {@code hashCode} method on each of the + * two objects must produce distinct integer results. However, the + * programmer should be aware that producing distinct integer results + * for unequal objects may improve the performance of hash tables. + *
+ *

+ * As much as is reasonably practical, the hashCode method defined by + * class {@code Object} does return distinct integers for distinct + * objects. (This is typically implemented by converting the internal + * address of the object into an integer, but this implementation + * technique is not required by the + * Java™ programming language.) + * + * @return a hash code value for this object. + * @see java.lang.Object#equals(java.lang.Object) + * @see java.lang.System#identityHashCode + */ + public int hashCode() { + return identityHashCode(this); + } + + // Android-changed: add a local helper for identityHashCode. + // Package-private to be used by j.l.System. We do the implementation here + // to avoid Object.hashCode doing a clinit check on j.l.System, and also + // to avoid leaking shadow$_monitor_ outside of this class. + /* package-private */ static int identityHashCode(Object obj) { + int lockWord = obj.shadow$_monitor_; + final int lockWordStateMask = 0xC0000000; // Top 2 bits. + final int lockWordStateHash = 0x80000000; // Top 2 bits are value 2 (kStateHash). + final int lockWordHashMask = 0x0FFFFFFF; // Low 28 bits. + if ((lockWord & lockWordStateMask) == lockWordStateHash) { + return lockWord & lockWordHashMask; + } + return identityHashCodeNative(obj); + } + + @FastNative + private static native int identityHashCodeNative(Object obj); + + /** + * Indicates whether some other object is "equal to" this one. + *

+ * The {@code equals} method implements an equivalence relation + * on non-null object references: + *

    + *
  • It is reflexive: for any non-null reference value + * {@code x}, {@code x.equals(x)} should return + * {@code true}. + *
  • It is symmetric: for any non-null reference values + * {@code x} and {@code y}, {@code x.equals(y)} + * should return {@code true} if and only if + * {@code y.equals(x)} returns {@code true}. + *
  • It is transitive: for any non-null reference values + * {@code x}, {@code y}, and {@code z}, if + * {@code x.equals(y)} returns {@code true} and + * {@code y.equals(z)} returns {@code true}, then + * {@code x.equals(z)} should return {@code true}. + *
  • It is consistent: for any non-null reference values + * {@code x} and {@code y}, multiple invocations of + * {@code x.equals(y)} consistently return {@code true} + * or consistently return {@code false}, provided no + * information used in {@code equals} comparisons on the + * objects is modified. + *
  • For any non-null reference value {@code x}, + * {@code x.equals(null)} should return {@code false}. + *
+ *

+ * The {@code equals} method for class {@code Object} implements + * the most discriminating possible equivalence relation on objects; + * that is, for any non-null reference values {@code x} and + * {@code y}, this method returns {@code true} if and only + * if {@code x} and {@code y} refer to the same object + * ({@code x == y} has the value {@code true}). + *

+ * Note that it is generally necessary to override the {@code hashCode} + * method whenever this method is overridden, so as to maintain the + * general contract for the {@code hashCode} method, which states + * that equal objects must have equal hash codes. + * + * @param obj the reference object with which to compare. + * @return {@code true} if this object is the same as the obj + * argument; {@code false} otherwise. + * @see #hashCode() + * @see java.util.HashMap + */ + public boolean equals(Object obj) { + return (this == obj); + } + + /** + * Creates and returns a copy of this object. The precise meaning + * of "copy" may depend on the class of the object. The general + * intent is that, for any object {@code x}, the expression: + *

+ *
+     * x.clone() != x
+ * will be true, and that the expression: + *
+ *
+     * x.clone().getClass() == x.getClass()
+ * will be {@code true}, but these are not absolute requirements. + * While it is typically the case that: + *
+ *
+     * x.clone().equals(x)
+ * will be {@code true}, this is not an absolute requirement. + *

+ * By convention, the returned object should be obtained by calling + * {@code super.clone}. If a class and all of its superclasses (except + * {@code Object}) obey this convention, it will be the case that + * {@code x.clone().getClass() == x.getClass()}. + *

+ * By convention, the object returned by this method should be independent + * of this object (which is being cloned). To achieve this independence, + * it may be necessary to modify one or more fields of the object returned + * by {@code super.clone} before returning it. Typically, this means + * copying any mutable objects that comprise the internal "deep structure" + * of the object being cloned and replacing the references to these + * objects with references to the copies. If a class contains only + * primitive fields or references to immutable objects, then it is usually + * the case that no fields in the object returned by {@code super.clone} + * need to be modified. + *

+ * The method {@code clone} for class {@code Object} performs a + * specific cloning operation. First, if the class of this object does + * not implement the interface {@code Cloneable}, then a + * {@code CloneNotSupportedException} is thrown. Note that all arrays + * are considered to implement the interface {@code Cloneable} and that + * the return type of the {@code clone} method of an array type {@code T[]} + * is {@code T[]} where T is any reference or primitive type. + * Otherwise, this method creates a new instance of the class of this + * object and initializes all its fields with exactly the contents of + * the corresponding fields of this object, as if by assignment; the + * contents of the fields are not themselves cloned. Thus, this method + * performs a "shallow copy" of this object, not a "deep copy" operation. + *

+ * The class {@code Object} does not itself implement the interface + * {@code Cloneable}, so calling the {@code clone} method on an object + * whose class is {@code Object} will result in throwing an + * exception at run time. + * + * @return a clone of this instance. + * @throws CloneNotSupportedException if the object's class does not + * support the {@code Cloneable} interface. Subclasses + * that override the {@code clone} method can also + * throw this exception to indicate that an instance cannot + * be cloned. + * @see java.lang.Cloneable + */ + protected Object clone() throws CloneNotSupportedException { + if (!(this instanceof Cloneable)) { + throw new CloneNotSupportedException("Class " + getClass().getName() + + " doesn't implement Cloneable"); + } + + return internalClone(); + } + + /* + * Native helper method for cloning. + */ + @FastNative + private native Object internalClone(); + + + /** + * Returns a string representation of the object. In general, the + * {@code toString} method returns a string that + * "textually represents" this object. The result should + * be a concise but informative representation that is easy for a + * person to read. + * It is recommended that all subclasses override this method. + *

+ * The {@code toString} method for class {@code Object} + * returns a string consisting of the name of the class of which the + * object is an instance, the at-sign character `{@code @}', and + * the unsigned hexadecimal representation of the hash code of the + * object. In other words, this method returns a string equal to the + * value of: + *

+ *
+     * getClass().getName() + '@' + Integer.toHexString(hashCode())
+     * 
+ * + * @return a string representation of the object. + */ + public String toString() { + return getClass().getName() + "@" + Integer.toHexString(hashCode()); + } + + /** + * Wakes up a single thread that is waiting on this object's + * monitor. If any threads are waiting on this object, one of them + * is chosen to be awakened. The choice is arbitrary and occurs at + * the discretion of the implementation. A thread waits on an object's + * monitor by calling one of the {@code wait} methods. + *

+ * The awakened thread will not be able to proceed until the current + * thread relinquishes the lock on this object. The awakened thread will + * compete in the usual manner with any other threads that might be + * actively competing to synchronize on this object; for example, the + * awakened thread enjoys no reliable privilege or disadvantage in being + * the next thread to lock this object. + *

+ * This method should only be called by a thread that is the owner + * of this object's monitor. A thread becomes the owner of the + * object's monitor in one of three ways: + *

    + *
  • By executing a synchronized instance method of that object. + *
  • By executing the body of a {@code synchronized} statement + * that synchronizes on the object. + *
  • For objects of type {@code Class,} by executing a + * synchronized static method of that class. + *
+ *

+ * Only one thread at a time can own an object's monitor. + * + * @throws IllegalMonitorStateException if the current thread is not + * the owner of this object's monitor. + * @see java.lang.Object#notifyAll() + * @see java.lang.Object#wait() + */ + @FastNative + public final native void notify(); + + /** + * Wakes up all threads that are waiting on this object's monitor. A + * thread waits on an object's monitor by calling one of the + * {@code wait} methods. + *

+ * The awakened threads will not be able to proceed until the current + * thread relinquishes the lock on this object. The awakened threads + * will compete in the usual manner with any other threads that might + * be actively competing to synchronize on this object; for example, + * the awakened threads enjoy no reliable privilege or disadvantage in + * being the next thread to lock this object. + *

+ * This method should only be called by a thread that is the owner + * of this object's monitor. See the {@code notify} method for a + * description of the ways in which a thread can become the owner of + * a monitor. + * + * @throws IllegalMonitorStateException if the current thread is not + * the owner of this object's monitor. + * @see java.lang.Object#notify() + * @see java.lang.Object#wait() + */ + @FastNative + public final native void notifyAll(); + + /** + * Causes the current thread to wait until either another thread invokes the + * {@link java.lang.Object#notify()} method or the + * {@link java.lang.Object#notifyAll()} method for this object, or a + * specified amount of time has elapsed. + *

+ * The current thread must own this object's monitor. + *

+ * This method causes the current thread (call it T) to + * place itself in the wait set for this object and then to relinquish + * any and all synchronization claims on this object. Thread T + * becomes disabled for thread scheduling purposes and lies dormant + * until one of four things happens: + *

    + *
  • Some other thread invokes the {@code notify} method for this + * object and thread T happens to be arbitrarily chosen as + * the thread to be awakened. + *
  • Some other thread invokes the {@code notifyAll} method for this + * object. + *
  • Some other thread {@linkplain Thread#interrupt() interrupts} + * thread T. + *
  • The specified amount of real time has elapsed, more or less. If + * {@code timeout} is zero, however, then real time is not taken into + * consideration and the thread simply waits until notified. + *
+ * The thread T is then removed from the wait set for this + * object and re-enabled for thread scheduling. It then competes in the + * usual manner with other threads for the right to synchronize on the + * object; once it has gained control of the object, all its + * synchronization claims on the object are restored to the status quo + * ante - that is, to the situation as of the time that the {@code wait} + * method was invoked. Thread T then returns from the + * invocation of the {@code wait} method. Thus, on return from the + * {@code wait} method, the synchronization state of the object and of + * thread {@code T} is exactly as it was when the {@code wait} method + * was invoked. + *

+ * A thread can also wake up without being notified, interrupted, or + * timing out, a so-called spurious wakeup. While this will rarely + * occur in practice, applications must guard against it by testing for + * the condition that should have caused the thread to be awakened, and + * continuing to wait if the condition is not satisfied. In other words, + * waits should always occur in loops, like this one: + *

+     *     synchronized (obj) {
+     *         while (<condition does not hold>)
+     *             obj.wait(timeout);
+     *         ... // Perform action appropriate to condition
+     *     }
+     * 
+ * (For more information on this topic, see Section 3.2.3 in Doug Lea's + * "Concurrent Programming in Java (Second Edition)" (Addison-Wesley, + * 2000), or Item 50 in Joshua Bloch's "Effective Java Programming + * Language Guide" (Addison-Wesley, 2001). + * + *

If the current thread is {@linkplain java.lang.Thread#interrupt() + * interrupted} by any thread before or while it is waiting, then an + * {@code InterruptedException} is thrown. This exception is not + * thrown until the lock status of this object has been restored as + * described above. + * + *

+ * Note that the {@code wait} method, as it places the current thread + * into the wait set for this object, unlocks only this object; any + * other objects on which the current thread may be synchronized remain + * locked while the thread waits. + *

+ * This method should only be called by a thread that is the owner + * of this object's monitor. See the {@code notify} method for a + * description of the ways in which a thread can become the owner of + * a monitor. + * + * @param millis the maximum time to wait in milliseconds. + * @throws IllegalArgumentException if the value of timeout is + * negative. + * @throws IllegalMonitorStateException if the current thread is not + * the owner of the object's monitor. + * @throws InterruptedException if any thread interrupted the + * current thread before or while the current thread + * was waiting for a notification. The interrupted + * status of the current thread is cleared when + * this exception is thrown. + * @see java.lang.Object#notify() + * @see java.lang.Object#notifyAll() + */ + public final void wait(long millis) throws InterruptedException { + wait(millis, 0); + } + + /** + * Causes the current thread to wait until another thread invokes the + * {@link java.lang.Object#notify()} method or the + * {@link java.lang.Object#notifyAll()} method for this object, or + * some other thread interrupts the current thread, or a certain + * amount of real time has elapsed. + *

+ * This method is similar to the {@code wait} method of one + * argument, but it allows finer control over the amount of time to + * wait for a notification before giving up. The amount of real time, + * measured in nanoseconds, is given by: + *

+ *
+     * 1000000*timeout+nanos
+ *

+ * In all other respects, this method does the same thing as the + * method {@link #wait(long)} of one argument. In particular, + * {@code wait(0, 0)} means the same thing as {@code wait(0)}. + *

+ * The current thread must own this object's monitor. The thread + * releases ownership of this monitor and waits until either of the + * following two conditions has occurred: + *

    + *
  • Another thread notifies threads waiting on this object's monitor + * to wake up either through a call to the {@code notify} method + * or the {@code notifyAll} method. + *
  • The timeout period, specified by {@code timeout} + * milliseconds plus {@code nanos} nanoseconds arguments, has + * elapsed. + *
+ *

+ * The thread then waits until it can re-obtain ownership of the + * monitor and resumes execution. + *

+ * As in the one argument version, interrupts and spurious wakeups are + * possible, and this method should always be used in a loop: + *

+     *     synchronized (obj) {
+     *         while (<condition does not hold>)
+     *             obj.wait(timeout, nanos);
+     *         ... // Perform action appropriate to condition
+     *     }
+     * 
+ * This method should only be called by a thread that is the owner + * of this object's monitor. See the {@code notify} method for a + * description of the ways in which a thread can become the owner of + * a monitor. + * + * @param millis the maximum time to wait in milliseconds. + * @param nanos additional time, in nanoseconds range + * 0-999999. + * @throws IllegalArgumentException if the value of timeout is + * negative or the value of nanos is + * not in the range 0-999999. + * @throws IllegalMonitorStateException if the current thread is not + * the owner of this object's monitor. + * @throws InterruptedException if any thread interrupted the + * current thread before or while the current thread + * was waiting for a notification. The interrupted + * status of the current thread is cleared when + * this exception is thrown. + */ + @FastNative + public final native void wait(long millis, int nanos) throws InterruptedException; + + /** + * Causes the current thread to wait until another thread invokes the + * {@link java.lang.Object#notify()} method or the + * {@link java.lang.Object#notifyAll()} method for this object. + * In other words, this method behaves exactly as if it simply + * performs the call {@code wait(0)}. + *

+ * The current thread must own this object's monitor. The thread + * releases ownership of this monitor and waits until another thread + * notifies threads waiting on this object's monitor to wake up + * either through a call to the {@code notify} method or the + * {@code notifyAll} method. The thread then waits until it can + * re-obtain ownership of the monitor and resumes execution. + *

+ * As in the one argument version, interrupts and spurious wakeups are + * possible, and this method should always be used in a loop: + *

+     *     synchronized (obj) {
+     *         while (<condition does not hold>)
+     *             obj.wait();
+     *         ... // Perform action appropriate to condition
+     *     }
+     * 
+ * This method should only be called by a thread that is the owner + * of this object's monitor. See the {@code notify} method for a + * description of the ways in which a thread can become the owner of + * a monitor. + * + * @throws IllegalMonitorStateException if the current thread is not + * the owner of the object's monitor. + * @throws InterruptedException if any thread interrupted the + * current thread before or while the current thread + * was waiting for a notification. The interrupted + * status of the current thread is cleared when + * this exception is thrown. + * @see java.lang.Object#notify() + * @see java.lang.Object#notifyAll() + */ + @FastNative + public final native void wait() throws InterruptedException; + + /** + * Called by the garbage collector on an object when garbage collection + * determines that there are no more references to the object. + * A subclass overrides the {@code finalize} method to dispose of + * system resources or to perform other cleanup. + *

+ * The general contract of {@code finalize} is that it is invoked + * if and when the Java™ virtual + * machine has determined that there is no longer any + * means by which this object can be accessed by any thread that has + * not yet died, except as a result of an action taken by the + * finalization of some other object or class which is ready to be + * finalized. The {@code finalize} method may take any action, including + * making this object available again to other threads; the usual purpose + * of {@code finalize}, however, is to perform cleanup actions before + * the object is irrevocably discarded. For example, the finalize method + * for an object that represents an input/output connection might perform + * explicit I/O transactions to break the connection before the object is + * permanently discarded. + *

+ * The {@code finalize} method of class {@code Object} performs no + * special action; it simply returns normally. Subclasses of + * {@code Object} may override this definition. + *

+ * The Java programming language does not guarantee which thread will + * invoke the {@code finalize} method for any given object. It is + * guaranteed, however, that the thread that invokes finalize will not + * be holding any user-visible synchronization locks when finalize is + * invoked. If an uncaught exception is thrown by the finalize method, + * the exception is ignored and finalization of that object terminates. + *

+ * After the {@code finalize} method has been invoked for an object, no + * further action is taken until the Java virtual machine has again + * determined that there is no longer any means by which this object can + * be accessed by any thread that has not yet died, including possible + * actions by other objects or classes which are ready to be finalized, + * at which point the object may be discarded. + *

+ * The {@code finalize} method is never invoked more than once by a Java + * virtual machine for any given object. + *

+ * Any exception thrown by the {@code finalize} method causes + * the finalization of this object to be halted, but is otherwise + * ignored. + * + * @throws Throwable the {@code Exception} raised by this method + * @see java.lang.ref.WeakReference + * @see java.lang.ref.PhantomReference + * @jls 12.6 Finalization of Class Instances + */ + protected void finalize() throws Throwable { } +} diff --git a/src/MapleFE/test/java/openjdk/Object.java.result b/src/MapleFE/test/java/openjdk/Object.java.result new file mode 100644 index 0000000000000000000000000000000000000000..d30bd56472a1e3a9411caeddeb4dbd5390e95311 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Object.java.result @@ -0,0 +1,53 @@ +Matched 5 tokens. +Matched 14 tokens. +Matched 299 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import dalvik.annotation.optimization.FastNative +== Sub Tree == +class Object + Fields: + shadow$_klass_ shadow$_monitor_ + Instance Initializer: + Constructors: + Methods: + func getClass() throws: + return shadow$_klass_ + func hashCode() throws: + return identityHashCode(this) + func identityHashCode(obj) throws: + Decl: lockWord=obj.shadow$_monitor_ + Decl: lockWordStateMask=-1073741824 + Decl: lockWordStateHash=-2147483648 + Decl: lockWordHashMask=268435455 + cond-branch cond:(lockWord Band lockWordStateMask) EQ lockWordStateHash + true branch : + return lockWord Band lockWordHashMask + false branch : + + return identityHashCodeNative(obj) + func identityHashCodeNative(obj) throws: + func equals(obj) throws: + return (this EQ obj) + func clone() throws: CloneNotSupportedException + cond-branch cond:(this instanceof Cloneable) + true branch : + new CloneNotSupportedException("Class " Add getClass().getName() Add " doesn't implement Cloneable") + false branch : + + return internalClone() + func internalClone() throws: + func toString() throws: + return getClass().getName() Add "@" Add Integer.toHexString(hashCode()) + func notify() throws: + func notifyAll() throws: + func wait(millis) throws: InterruptedException + wait(millis,0) + func wait(millis,nanos) throws: InterruptedException + func wait() throws: InterruptedException + func finalize() throws: Throwable + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/OutOfMemoryError.java b/src/MapleFE/test/java/openjdk/OutOfMemoryError.java new file mode 100644 index 0000000000000000000000000000000000000000..0f9df4e2f16519225e69515e7bd162d778346c76 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/OutOfMemoryError.java @@ -0,0 +1,60 @@ +/* + * Copyright (c) 1994, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +/** + * Thrown when the Java Virtual Machine cannot allocate an object + * because it is out of memory, and no more memory could be made + * available by the garbage collector. + * + * {@code OutOfMemoryError} objects may be constructed by the virtual + * machine as if {@linkplain Throwable#Throwable(String, Throwable, + * boolean, boolean) suppression were disabled and/or the stack trace was not + * writable}. + * + * @author unascribed + * @since JDK1.0 + */ +public class OutOfMemoryError extends VirtualMachineError { + private static final long serialVersionUID = 8228564086184010517L; + + /** + * Constructs an {@code OutOfMemoryError} with no detail message. + */ + public OutOfMemoryError() { + super(); + } + + /** + * Constructs an {@code OutOfMemoryError} with the specified + * detail message. + * + * @param s the detail message. + */ + public OutOfMemoryError(String s) { + super(s); + } +} diff --git a/src/MapleFE/test/java/openjdk/OutOfMemoryError.java.result b/src/MapleFE/test/java/openjdk/OutOfMemoryError.java.result new file mode 100644 index 0000000000000000000000000000000000000000..75397242fea0e8bb190ddc82dd91d2eb10e47c1f --- /dev/null +++ b/src/MapleFE/test/java/openjdk/OutOfMemoryError.java.result @@ -0,0 +1,17 @@ +Matched 5 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class OutOfMemoryError + Fields: + serialVersionUID=-2004294891 + Instance Initializer: + Constructors: + constructor OutOfMemoryError() throws: + constructor OutOfMemoryError(s) throws: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/Override.java b/src/MapleFE/test/java/openjdk/Override.java new file mode 100644 index 0000000000000000000000000000000000000000..bf77344296cb6f07b940fc132e5a6a3963c4650a --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Override.java @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +import java.lang.annotation.*; + +/** + * Indicates that a method declaration is intended to override a + * method declaration in a supertype. If a method is annotated with + * this annotation type compilers are required to generate an error + * message unless at least one of the following conditions hold: + * + *

  • + * The method does override or implement a method declared in a + * supertype. + *
  • + * The method has a signature that is override-equivalent to that of + * any public method declared in {@linkplain Object}. + *
+ * + * @author Peter von der Ahé + * @author Joshua Bloch + * @jls 9.6.1.4 @Override + * @since 1.5 + */ +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.SOURCE) +public @interface Override { +} diff --git a/src/MapleFE/test/java/openjdk/Override.java.result b/src/MapleFE/test/java/openjdk/Override.java.result new file mode 100644 index 0000000000000000000000000000000000000000..6f3311685210623aa18bcd56ecac4db5821da45d --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Override.java.result @@ -0,0 +1,10 @@ +Matched 5 tokens. +Matched 14 tokens. +Matched 34 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import java.lang.annotation +== Sub Tree == +annotation type : Override diff --git a/src/MapleFE/test/java/openjdk/Package.java b/src/MapleFE/test/java/openjdk/Package.java new file mode 100644 index 0000000000000000000000000000000000000000..55d2ba7df3299d9e49247e451ef828002efa2fbe --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Package.java @@ -0,0 +1,657 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +import java.lang.reflect.AnnotatedElement; +import java.io.InputStream; +import java.util.Enumeration; + +import java.util.StringTokenizer; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URL; +import java.net.MalformedURLException; +import java.security.AccessController; +import java.security.PrivilegedAction; + +import java.util.jar.JarInputStream; +import java.util.jar.Manifest; +import java.util.jar.Attributes; +import java.util.jar.Attributes.Name; +import java.util.jar.JarException; +import java.util.Map; +import java.util.HashMap; +import java.util.Iterator; + +import sun.net.www.ParseUtil; +import sun.reflect.CallerSensitive; +import dalvik.system.VMRuntime; +import dalvik.system.VMStack; + +import java.lang.annotation.Annotation; + +/** + * {@code Package} objects contain version information + * about the implementation and specification of a Java package. + * This versioning information is retrieved and made available + * by the {@link ClassLoader} instance that + * loaded the class(es). Typically, it is stored in the manifest that is + * distributed with the classes. + * + *

The set of classes that make up the package may implement a + * particular specification and if so the specification title, version number, + * and vendor strings identify that specification. + * An application can ask if the package is + * compatible with a particular version, see the {@link + * #isCompatibleWith isCompatibleWith} + * method for details. + * + *

Specification version numbers use a syntax that consists of nonnegative + * decimal integers separated by periods ".", for example "2.0" or + * "1.2.3.4.5.6.7". This allows an extensible number to be used to represent + * major, minor, micro, etc. versions. The version specification is described + * by the following formal grammar: + *

+ *
+ *
SpecificationVersion: + *
Digits RefinedVersionopt + + *
RefinedVersion: + *
{@code .} Digits + *
{@code .} Digits RefinedVersion + * + *
Digits: + *
Digit + *
Digits + * + *
Digit: + *
any character for which {@link Character#isDigit} returns {@code true}, + * e.g. 0, 1, 2, ... + *
+ *
+ * + *

The implementation title, version, and vendor strings identify an + * implementation and are made available conveniently to enable accurate + * reporting of the packages involved when a problem occurs. The contents + * all three implementation strings are vendor specific. The + * implementation version strings have no specified syntax and should + * only be compared for equality with desired version identifiers. + * + *

Within each {@code ClassLoader} instance all classes from the same + * java package have the same Package object. The static methods allow a package + * to be found by name or the set of all packages known to the current class + * loader to be found. + * + * @see ClassLoader#definePackage + */ +public class Package implements java.lang.reflect.AnnotatedElement { + /** + * Return the name of this package. + * + * @return The fully-qualified name of this package as defined in section 6.5.3 of + * The Java™ Language Specification, + * for example, {@code java.lang} + */ + public String getName() { + return pkgName; + } + + + /** + * Return the title of the specification that this package implements. + * @return the specification title, null is returned if it is not known. + */ + public String getSpecificationTitle() { + return specTitle; + } + + /** + * Returns the version number of the specification + * that this package implements. + * This version string must be a sequence of nonnegative decimal + * integers separated by "."'s and may have leading zeros. + * When version strings are compared the most significant + * numbers are compared. + * @return the specification version, null is returned if it is not known. + */ + public String getSpecificationVersion() { + return specVersion; + } + + /** + * Return the name of the organization, vendor, + * or company that owns and maintains the specification + * of the classes that implement this package. + * @return the specification vendor, null is returned if it is not known. + */ + public String getSpecificationVendor() { + return specVendor; + } + + /** + * Return the title of this package. + * @return the title of the implementation, null is returned if it is not known. + */ + public String getImplementationTitle() { + return implTitle; + } + + /** + * Return the version of this implementation. It consists of any string + * assigned by the vendor of this implementation and does + * not have any particular syntax specified or expected by the Java + * runtime. It may be compared for equality with other + * package version strings used for this implementation + * by this vendor for this package. + * @return the version of the implementation, null is returned if it is not known. + */ + public String getImplementationVersion() { + return implVersion; + } + + /** + * Returns the name of the organization, + * vendor or company that provided this implementation. + * @return the vendor that implemented this package.. + */ + public String getImplementationVendor() { + return implVendor; + } + + /** + * Returns true if this package is sealed. + * + * @return true if the package is sealed, false otherwise + */ + public boolean isSealed() { + return sealBase != null; + } + + /** + * Returns true if this package is sealed with respect to the specified + * code source url. + * + * @param url the code source url + * @return true if this package is sealed with respect to url + */ + public boolean isSealed(URL url) { + return url.equals(sealBase); + } + + /** + * Compare this package's specification version with a + * desired version. It returns true if + * this packages specification version number is greater than or equal + * to the desired version number.

+ * + * Version numbers are compared by sequentially comparing corresponding + * components of the desired and specification strings. + * Each component is converted as a decimal integer and the values + * compared. + * If the specification value is greater than the desired + * value true is returned. If the value is less false is returned. + * If the values are equal the period is skipped and the next pair of + * components is compared. + * + * @param desired the version string of the desired version. + * @return true if this package's version number is greater + * than or equal to the desired version number + * + * @exception NumberFormatException if the desired or current version + * is not of the correct dotted form. + */ + public boolean isCompatibleWith(String desired) + throws NumberFormatException + { + if (specVersion == null || specVersion.length() < 1) { + throw new NumberFormatException("Empty version string"); + } + + String [] sa = specVersion.split("\\.", -1); + int [] si = new int[sa.length]; + for (int i = 0; i < sa.length; i++) { + si[i] = Integer.parseInt(sa[i]); + if (si[i] < 0) + throw NumberFormatException.forInputString("" + si[i]); + } + + String [] da = desired.split("\\.", -1); + int [] di = new int[da.length]; + for (int i = 0; i < da.length; i++) { + di[i] = Integer.parseInt(da[i]); + if (di[i] < 0) + throw NumberFormatException.forInputString("" + di[i]); + } + + int len = Math.max(di.length, si.length); + for (int i = 0; i < len; i++) { + int d = (i < di.length ? di[i] : 0); + int s = (i < si.length ? si[i] : 0); + if (s < d) + return false; + if (s > d) + return true; + } + return true; + } + + /** + * Find a package by name in the callers {@code ClassLoader} instance. + * The callers {@code ClassLoader} instance is used to find the package + * instance corresponding to the named class. If the callers + * {@code ClassLoader} instance is null then the set of packages loaded + * by the system {@code ClassLoader} instance is searched to find the + * named package.

+ * + * Packages have attributes for versions and specifications only if the class + * loader created the package instance with the appropriate attributes. Typically, + * those attributes are defined in the manifests that accompany the classes. + * + * @param name a package name, for example, java.lang. + * @return the package of the requested name. It may be null if no package + * information is available from the archive or codebase. + */ + @CallerSensitive + public static Package getPackage(String name) { + ClassLoader l = VMStack.getCallingClassLoader(); + if (l != null) { + return l.getPackage(name); + } else { + return getSystemPackage(name); + } + } + + /** + * Get all the packages currently known for the caller's {@code ClassLoader} + * instance. Those packages correspond to classes loaded via or accessible by + * name to that {@code ClassLoader} instance. If the caller's + * {@code ClassLoader} instance is the bootstrap {@code ClassLoader} + * instance, which may be represented by {@code null} in some implementations, + * only packages corresponding to classes loaded by the bootstrap + * {@code ClassLoader} instance will be returned. + * + * @return a new array of packages known to the callers {@code ClassLoader} + * instance. An zero length array is returned if none are known. + */ + @CallerSensitive + public static Package[] getPackages() { + ClassLoader l = VMStack.getCallingClassLoader(); + if (l != null) { + return l.getPackages(); + } else { + return getSystemPackages(); + } + } + + /** + * Get the package for the specified class. + * The class's class loader is used to find the package instance + * corresponding to the specified class. If the class loader + * is the bootstrap class loader, which may be represented by + * {@code null} in some implementations, then the set of packages + * loaded by the bootstrap class loader is searched to find the package. + *

+ * Packages have attributes for versions and specifications only + * if the class loader created the package + * instance with the appropriate attributes. Typically those + * attributes are defined in the manifests that accompany + * the classes. + * + * @param c the class to get the package of. + * @return the package of the class. It may be null if no package + * information is available from the archive or codebase. */ + static Package getPackage(Class c) { + String name = c.getName(); + int i = name.lastIndexOf('.'); + if (i != -1) { + name = name.substring(0, i); + ClassLoader cl = c.getClassLoader(); + if (cl != null) { + return cl.getPackage(name); + } else { + return getSystemPackage(name); + } + } else { + return null; + } + } + + /** + * Return the hash code computed from the package name. + * @return the hash code computed from the package name. + */ + public int hashCode(){ + return pkgName.hashCode(); + } + + /** + * Returns the string representation of this Package. + * Its value is the string "package " and the package name. + * If the package title is defined it is appended. + * If the package version is defined it is appended. + * @return the string representation of the package. + */ + public String toString() { + // Android-changed start + // Several apps try to parse the output of toString(). This is a really + // bad idea - especially when there's a Package.getName() function as well as a + // Class.getName() function that can be used instead. + // Starting from the API level 25 the proper output is generated. + final int targetSdkVersion = VMRuntime.getRuntime().getTargetSdkVersion(); + if (targetSdkVersion > 0 && targetSdkVersion <= 24) { + return "package " + pkgName; + } + // Android-changed end + + String spec = specTitle; + String ver = specVersion; + if (spec != null && spec.length() > 0) + spec = ", " + spec; + else + spec = ""; + if (ver != null && ver.length() > 0) + ver = ", version " + ver; + else + ver = ""; + return "package " + pkgName + spec + ver; + } + + private Class getPackageInfo() { + if (packageInfo == null) { + try { + packageInfo = Class.forName(pkgName + ".package-info", false, loader); + } catch (ClassNotFoundException ex) { + // store a proxy for the package info that has no annotations + class PackageInfoProxy {} + packageInfo = PackageInfoProxy.class; + } + } + return packageInfo; + } + + /** + * @throws NullPointerException {@inheritDoc} + * @since 1.5 + */ + public A getAnnotation(Class annotationClass) { + return getPackageInfo().getAnnotation(annotationClass); + } + + /** + * {@inheritDoc} + * @throws NullPointerException {@inheritDoc} + * @since 1.5 + */ + @Override + public boolean isAnnotationPresent(Class annotationClass) { + return AnnotatedElement.super.isAnnotationPresent(annotationClass); + } + + /** + * @throws NullPointerException {@inheritDoc} + * @since 1.8 + */ + @Override + public A[] getAnnotationsByType(Class annotationClass) { + return getPackageInfo().getAnnotationsByType(annotationClass); + } + + /** + * @since 1.5 + */ + public Annotation[] getAnnotations() { + return getPackageInfo().getAnnotations(); + } + + /** + * @throws NullPointerException {@inheritDoc} + * @since 1.8 + */ + @Override + public A getDeclaredAnnotation(Class annotationClass) { + return getPackageInfo().getDeclaredAnnotation(annotationClass); + } + + /** + * @throws NullPointerException {@inheritDoc} + * @since 1.8 + */ + @Override + public A[] getDeclaredAnnotationsByType(Class annotationClass) { + return getPackageInfo().getDeclaredAnnotationsByType(annotationClass); + } + + /** + * @since 1.5 + */ + public Annotation[] getDeclaredAnnotations() { + return getPackageInfo().getDeclaredAnnotations(); + } + + /** + * Construct a package instance with the specified version + * information. + * @param name the name of the package + * @param spectitle the title of the specification + * @param specversion the version of the specification + * @param specvendor the organization that maintains the specification + * @param impltitle the title of the implementation + * @param implversion the version of the implementation + * @param implvendor the organization that maintains the implementation + */ + Package(String name, + String spectitle, String specversion, String specvendor, + String impltitle, String implversion, String implvendor, + URL sealbase, ClassLoader loader) + { + pkgName = name; + implTitle = impltitle; + implVersion = implversion; + implVendor = implvendor; + specTitle = spectitle; + specVersion = specversion; + specVendor = specvendor; + sealBase = sealbase; + this.loader = loader; + } + + /* + * Construct a package using the attributes from the specified manifest. + * + * @param name the package name + * @param man the optional manifest for the package + * @param url the optional code source url for the package + */ + private Package(String name, Manifest man, URL url, ClassLoader loader) { + String path = name.replace('.', '/').concat("/"); + String sealed = null; + String specTitle= null; + String specVersion= null; + String specVendor= null; + String implTitle= null; + String implVersion= null; + String implVendor= null; + URL sealBase= null; + Attributes attr = man.getAttributes(path); + if (attr != null) { + specTitle = attr.getValue(Name.SPECIFICATION_TITLE); + specVersion = attr.getValue(Name.SPECIFICATION_VERSION); + specVendor = attr.getValue(Name.SPECIFICATION_VENDOR); + implTitle = attr.getValue(Name.IMPLEMENTATION_TITLE); + implVersion = attr.getValue(Name.IMPLEMENTATION_VERSION); + implVendor = attr.getValue(Name.IMPLEMENTATION_VENDOR); + sealed = attr.getValue(Name.SEALED); + } + attr = man.getMainAttributes(); + if (attr != null) { + if (specTitle == null) { + specTitle = attr.getValue(Name.SPECIFICATION_TITLE); + } + if (specVersion == null) { + specVersion = attr.getValue(Name.SPECIFICATION_VERSION); + } + if (specVendor == null) { + specVendor = attr.getValue(Name.SPECIFICATION_VENDOR); + } + if (implTitle == null) { + implTitle = attr.getValue(Name.IMPLEMENTATION_TITLE); + } + if (implVersion == null) { + implVersion = attr.getValue(Name.IMPLEMENTATION_VERSION); + } + if (implVendor == null) { + implVendor = attr.getValue(Name.IMPLEMENTATION_VENDOR); + } + if (sealed == null) { + sealed = attr.getValue(Name.SEALED); + } + } + if ("true".equalsIgnoreCase(sealed)) { + sealBase = url; + } + pkgName = name; + this.specTitle = specTitle; + this.specVersion = specVersion; + this.specVendor = specVendor; + this.implTitle = implTitle; + this.implVersion = implVersion; + this.implVendor = implVendor; + this.sealBase = sealBase; + this.loader = loader; + } + + /* + * Returns the loaded system package for the specified name. + */ + static Package getSystemPackage(String name) { + synchronized (pkgs) { + Package pkg = pkgs.get(name); + if (pkg == null) { + name = name.replace('.', '/').concat("/"); + String fn = getSystemPackage0(name); + if (fn != null) { + pkg = defineSystemPackage(name, fn); + } + } + return pkg; + } + } + + /* + * Return an array of loaded system packages. + */ + static Package[] getSystemPackages() { + // First, update the system package map with new package names + String[] names = getSystemPackages0(); + synchronized (pkgs) { + for (int i = 0; i < names.length; i++) { + defineSystemPackage(names[i], getSystemPackage0(names[i])); + } + return pkgs.values().toArray(new Package[pkgs.size()]); + } + } + + private static Package defineSystemPackage(final String iname, + final String fn) + { + return AccessController.doPrivileged(new PrivilegedAction() { + public Package run() { + String name = iname; + // Get the cached code source url for the file name + URL url = urls.get(fn); + if (url == null) { + // URL not found, so create one + File file = new File(fn); + try { + url = ParseUtil.fileToEncodedURL(file); + } catch (MalformedURLException e) { + } + if (url != null) { + urls.put(fn, url); + // If loading a JAR file, then also cache the manifest + if (file.isFile()) { + mans.put(fn, loadManifest(fn)); + } + } + } + // Convert to "."-separated package name + name = name.substring(0, name.length() - 1).replace('/', '.'); + Package pkg; + Manifest man = mans.get(fn); + if (man != null) { + pkg = new Package(name, man, url, null); + } else { + pkg = new Package(name, null, null, null, + null, null, null, null, null); + } + pkgs.put(name, pkg); + return pkg; + } + }); + } + + /* + * Returns the Manifest for the specified JAR file name. + */ + private static Manifest loadManifest(String fn) { + try (FileInputStream fis = new FileInputStream(fn); + JarInputStream jis = new JarInputStream(fis, false)) + { + return jis.getManifest(); + } catch (IOException e) { + return null; + } + } + + // The map of loaded system packages + private static Map pkgs = new HashMap<>(31); + + // Maps each directory or zip file name to its corresponding url + private static Map urls = new HashMap<>(10); + + // Maps each code source url for a jar file to its manifest + private static Map mans = new HashMap<>(10); + + private static native String getSystemPackage0(String name); + private static native String[] getSystemPackages0(); + + /* + * Private storage for the package name and attributes. + */ + private final String pkgName; + private final String specTitle; + private final String specVersion; + private final String specVendor; + private final String implTitle; + private final String implVersion; + private final String implVendor; + private final URL sealBase; + private transient final ClassLoader loader; + private transient Class packageInfo; +} diff --git a/src/MapleFE/test/java/openjdk/Package.java.result b/src/MapleFE/test/java/openjdk/Package.java.result new file mode 100644 index 0000000000000000000000000000000000000000..018c563d1a7ccc902d87058ea27f9426d1b2a4c8 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Package.java.result @@ -0,0 +1,367 @@ +Matched 5 tokens. +Matched 14 tokens. +Matched 21 tokens. +Matched 28 tokens. +Matched 35 tokens. +Matched 42 tokens. +Matched 49 tokens. +Matched 56 tokens. +Matched 63 tokens. +Matched 70 tokens. +Matched 77 tokens. +Matched 84 tokens. +Matched 91 tokens. +Matched 100 tokens. +Matched 109 tokens. +Matched 118 tokens. +Matched 129 tokens. +Matched 138 tokens. +Matched 145 tokens. +Matched 152 tokens. +Matched 159 tokens. +Matched 168 tokens. +Matched 175 tokens. +Matched 182 tokens. +Matched 189 tokens. +Matched 198 tokens. +Matched 2146 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import java.lang.reflect.AnnotatedElement +== Sub Tree == +import java.io.InputStream +== Sub Tree == +import java.util.Enumeration +== Sub Tree == +import java.util.StringTokenizer +== Sub Tree == +import java.io.File +== Sub Tree == +import java.io.FileInputStream +== Sub Tree == +import java.io.FileNotFoundException +== Sub Tree == +import java.io.IOException +== Sub Tree == +import java.net.URL +== Sub Tree == +import java.net.MalformedURLException +== Sub Tree == +import java.security.AccessController +== Sub Tree == +import java.security.PrivilegedAction +== Sub Tree == +import java.util.jar.JarInputStream +== Sub Tree == +import java.util.jar.Manifest +== Sub Tree == +import java.util.jar.Attributes +== Sub Tree == +import java.util.jar.Attributes.Name +== Sub Tree == +import java.util.jar.JarException +== Sub Tree == +import java.util.Map +== Sub Tree == +import java.util.HashMap +== Sub Tree == +import java.util.Iterator +== Sub Tree == +import sun.net.www.ParseUtil +== Sub Tree == +import sun.reflect.CallerSensitive +== Sub Tree == +import dalvik.system.VMRuntime +== Sub Tree == +import dalvik.system.VMStack +== Sub Tree == +import java.lang.annotation.Annotation +== Sub Tree == +class Package + Fields: + pkgs=new HashMap(31) urls=new HashMap(10) mans=new HashMap(10) pkgName specTitle specVersion specVendor implTitle implVersion implVendor sealBase loader packageInfo + Instance Initializer: + Constructors: + constructor Package(name,spectitle,specversion,specvendor,impltitle,implversion,implvendor,sealbase,loader) throws: + pkgName Assign name + implTitle Assign impltitle + implVersion Assign implversion + implVendor Assign implvendor + specTitle Assign spectitle + specVersion Assign specversion + specVendor Assign specvendor + sealBase Assign sealbase + this.loader Assign loader + constructor Package(name,man,url,loader) throws: + Decl: path=name.replace(.,/).concat("/") + Decl: sealed=null + Decl: specTitle=null + Decl: specVersion=null + Decl: specVendor=null + Decl: implTitle=null + Decl: implVersion=null + Decl: implVendor=null + Decl: sealBase=null + Decl: attr=man.getAttributes(path) + cond-branch cond:attr NE null + true branch : + specTitle Assign attr.getValue(Name.SPECIFICATION_TITLE) + specVersion Assign attr.getValue(Name.SPECIFICATION_VERSION) + specVendor Assign attr.getValue(Name.SPECIFICATION_VENDOR) + implTitle Assign attr.getValue(Name.IMPLEMENTATION_TITLE) + implVersion Assign attr.getValue(Name.IMPLEMENTATION_VERSION) + implVendor Assign attr.getValue(Name.IMPLEMENTATION_VENDOR) + sealed Assign attr.getValue(Name.SEALED) + false branch : + + attr Assign man.getMainAttributes() + cond-branch cond:attr NE null + true branch : + cond-branch cond:specTitle EQ null + true branch : + specTitle Assign attr.getValue(Name.SPECIFICATION_TITLE) + false branch : + + cond-branch cond:specVersion EQ null + true branch : + specVersion Assign attr.getValue(Name.SPECIFICATION_VERSION) + false branch : + + cond-branch cond:specVendor EQ null + true branch : + specVendor Assign attr.getValue(Name.SPECIFICATION_VENDOR) + false branch : + + cond-branch cond:implTitle EQ null + true branch : + implTitle Assign attr.getValue(Name.IMPLEMENTATION_TITLE) + false branch : + + cond-branch cond:implVersion EQ null + true branch : + implVersion Assign attr.getValue(Name.IMPLEMENTATION_VERSION) + false branch : + + cond-branch cond:implVendor EQ null + true branch : + implVendor Assign attr.getValue(Name.IMPLEMENTATION_VENDOR) + false branch : + + cond-branch cond:sealed EQ null + true branch : + sealed Assign attr.getValue(Name.SEALED) + false branch : + + false branch : + + cond-branch cond:"true".equalsIgnoreCase(sealed) + true branch : + sealBase Assign url + false branch : + + pkgName Assign name + this.specTitle Assign specTitle + this.specVersion Assign specVersion + this.specVendor Assign specVendor + this.implTitle Assign implTitle + this.implVersion Assign implVersion + this.implVendor Assign implVendor + this.sealBase Assign sealBase + this.loader Assign loader + Methods: + func getName() throws: + return pkgName + func getSpecificationTitle() throws: + return specTitle + func getSpecificationVersion() throws: + return specVersion + func getSpecificationVendor() throws: + return specVendor + func getImplementationTitle() throws: + return implTitle + func getImplementationVersion() throws: + return implVersion + func getImplementationVendor() throws: + return implVendor + func isSealed() throws: + return sealBase NE null + func isSealed(url) throws: + return url.equals(sealBase) + func isCompatibleWith(desired) throws: NumberFormatException + cond-branch cond:specVersion EQ null Lor specVersion.length() LT 1 + true branch : + new NumberFormatException("Empty version string") + false branch : + + Decl: sa=specVersion.split("\\.",-1) + Decl: si= + for ( ) + Assign Integer.parseInt(sa,i) + cond-branch cond: LT 0 + true branch : + NumberFormatException.forInputString("" Add ) false branch : + + + Decl: da=desired.split("\\.",-1) + Decl: di= + for ( ) + Assign Integer.parseInt(da,i) + cond-branch cond: LT 0 + true branch : + NumberFormatException.forInputString("" Add ) false branch : + + + Decl: len=Math.max(di.length,si.length) + for ( ) + Decl: d=() + Decl: s=() + cond-branch cond:s LT d + true branch : + return false false branch : + + cond-branch cond:s GT d + true branch : + return true false branch : + + + return true + func getPackage(name) throws: + Decl: l=VMStack.getCallingClassLoader() + cond-branch cond:l NE null + true branch : + return l.getPackage(name) + false branch : + return getSystemPackage(name) + + func getPackages() throws: + Decl: l=VMStack.getCallingClassLoader() + cond-branch cond:l NE null + true branch : + return l.getPackages() + false branch : + return getSystemPackages() + + func getPackage(c) throws: + Decl: name=c.getName() + Decl: i=name.lastIndexOf(.) + cond-branch cond:i NE -1 + true branch : + name Assign name.substring(0,i) + Decl: cl=c.getClassLoader() + cond-branch cond:cl NE null + true branch : + return cl.getPackage(name) + false branch : + return getSystemPackage(name) + + false branch : + return null + + func hashCode() throws: + return pkgName.hashCode() + func toString() throws: + Decl: targetSdkVersion=VMRuntime.getRuntime().getTargetSdkVersion() + cond-branch cond:targetSdkVersion GT 0 Land targetSdkVersion LE 24 + true branch : + return "package " Add pkgName + false branch : + + Decl: spec=specTitle + Decl: ver=specVersion + cond-branch cond:spec NE null Land spec.length() GT 0 + true branch : + spec Assign ", " Add spec false branch : + spec Assign "" + cond-branch cond:ver NE null Land ver.length() GT 0 + true branch : + ver Assign ", version " Add ver false branch : + ver Assign "" + return "package " Add pkgName Add spec Add ver + func getPackageInfo() throws: + cond-branch cond:packageInfo EQ null + true branch : + packageInfo Assign Class.forName(pkgName Add ".package-info",false,loader) + + ClassNotFoundException + ex + class PackageInfoProxy + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + + packageInfo Assign PackageInfoProxy + + false branch : + + return packageInfo + func getAnnotation(annotationClass) throws: + return getPackageInfo().getAnnotation(annotationClass) + func isAnnotationPresent(annotationClass) throws: + return + func getAnnotationsByType(annotationClass) throws: + return getPackageInfo().getAnnotationsByType(annotationClass) + func getAnnotations() throws: + return getPackageInfo().getAnnotations() + func getDeclaredAnnotation(annotationClass) throws: + return getPackageInfo().getDeclaredAnnotation(annotationClass) + func getDeclaredAnnotationsByType(annotationClass) throws: + return getPackageInfo().getDeclaredAnnotationsByType(annotationClass) + func getDeclaredAnnotations() throws: + return getPackageInfo().getDeclaredAnnotations() + func getSystemPackage(name) throws: + Decl: pkg=pkgs.get(name) + cond-branch cond:pkg EQ null + true branch : + name Assign name.replace(.,/).concat("/") + Decl: fn=getSystemPackage0(name) + cond-branch cond:fn NE null + true branch : + pkg Assign defineSystemPackage(name,fn) + false branch : + + false branch : + + return pkg + + func getSystemPackages() throws: + Decl: names=getSystemPackages0() + for ( ) + defineSystemPackage(names,i,getSystemPackage0(names,i)) + + return pkgs.values().toArray(Package,pkgs.size()) + + func defineSystemPackage(iname,fn) throws: + return AccessController.doPrivileged(new PrivilegedAction()) + func loadManifest(fn) throws: + FileInputStream + fis + new FileInputStream(fn) + JarInputStream + jis + new JarInputStream(fis,false) + return jis.getManifest() + + IOException + e + return null + + func getSystemPackage0(name) throws: + func getSystemPackages0() throws: + LocalClasses: + LocalInterfaces: + +Identifier:FileInputStream has no decl. +Identifier:fis has no decl. +UserType:FileInputStream has no decl. +Identifier:JarInputStream has no decl. +Identifier:jis has no decl. +UserType:JarInputStream has no decl. +Identifier:fis has no decl. +Identifier:IOException has no decl. +Identifier:e has no decl. diff --git a/src/MapleFE/test/java/openjdk/Process.java b/src/MapleFE/test/java/openjdk/Process.java new file mode 100644 index 0000000000000000000000000000000000000000..4e94538246540920f91e8de412274c50a19b18ce --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Process.java @@ -0,0 +1,265 @@ +/* + * Copyright (c) 1995, 2012, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +import java.io.*; +import java.util.concurrent.TimeUnit; + +/** + * The {@link ProcessBuilder#start()} and + * {@link Runtime#exec(String[],String[],File) Runtime.exec} + * methods create a native process and return an instance of a + * subclass of {@code Process} that can be used to control the process + * and obtain information about it. The class {@code Process} + * provides methods for performing input from the process, performing + * output to the process, waiting for the process to complete, + * checking the exit status of the process, and destroying (killing) + * the process. + * + *

The methods that create processes may not work well for special + * processes on certain native platforms, such as native windowing + * processes, daemon processes, Win16/DOS processes on Microsoft + * Windows, or shell scripts. + * + *

By default, the created subprocess does not have its own terminal + * or console. All its standard I/O (i.e. stdin, stdout, stderr) + * operations will be redirected to the parent process, where they can + * be accessed via the streams obtained using the methods + * {@link #getOutputStream()}, + * {@link #getInputStream()}, and + * {@link #getErrorStream()}. + * The parent process uses these streams to feed input to and get output + * from the subprocess. Because some native platforms only provide + * limited buffer size for standard input and output streams, failure + * to promptly write the input stream or read the output stream of + * the subprocess may cause the subprocess to block, or even deadlock. + * + *

Where desired, + * subprocess I/O can also be redirected + * using methods of the {@link ProcessBuilder} class. + * + *

The subprocess is not killed when there are no more references to + * the {@code Process} object, but rather the subprocess + * continues executing asynchronously. + * + *

There is no requirement that a process represented by a {@code + * Process} object execute asynchronously or concurrently with respect + * to the Java process that owns the {@code Process} object. + * + *

As of 1.5, {@link ProcessBuilder#start()} is the preferred way + * to create a {@code Process}. + * + * @since JDK1.0 + */ +public abstract class Process { + /** + * Returns the output stream connected to the normal input of the + * subprocess. Output to the stream is piped into the standard + * input of the process represented by this {@code Process} object. + * + *

If the standard input of the subprocess has been redirected using + * {@link ProcessBuilder#redirectInput(Redirect) + * ProcessBuilder.redirectInput} + * then this method will return a + * null output stream. + * + *

Implementation note: It is a good idea for the returned + * output stream to be buffered. + * + * @return the output stream connected to the normal input of the + * subprocess + */ + public abstract OutputStream getOutputStream(); + + /** + * Returns the input stream connected to the normal output of the + * subprocess. The stream obtains data piped from the standard + * output of the process represented by this {@code Process} object. + * + *

If the standard output of the subprocess has been redirected using + * {@link ProcessBuilder#redirectOutput(Redirect) + * ProcessBuilder.redirectOutput} + * then this method will return a + * null input stream. + * + *

Otherwise, if the standard error of the subprocess has been + * redirected using + * {@link ProcessBuilder#redirectErrorStream(boolean) + * ProcessBuilder.redirectErrorStream} + * then the input stream returned by this method will receive the + * merged standard output and the standard error of the subprocess. + * + *

Implementation note: It is a good idea for the returned + * input stream to be buffered. + * + * @return the input stream connected to the normal output of the + * subprocess + */ + public abstract InputStream getInputStream(); + + /** + * Returns the input stream connected to the error output of the + * subprocess. The stream obtains data piped from the error output + * of the process represented by this {@code Process} object. + * + *

If the standard error of the subprocess has been redirected using + * {@link ProcessBuilder#redirectError(Redirect) + * ProcessBuilder.redirectError} or + * {@link ProcessBuilder#redirectErrorStream(boolean) + * ProcessBuilder.redirectErrorStream} + * then this method will return a + * null input stream. + * + *

Implementation note: It is a good idea for the returned + * input stream to be buffered. + * + * @return the input stream connected to the error output of + * the subprocess + */ + public abstract InputStream getErrorStream(); + + /** + * Causes the current thread to wait, if necessary, until the + * process represented by this {@code Process} object has + * terminated. This method returns immediately if the subprocess + * has already terminated. If the subprocess has not yet + * terminated, the calling thread will be blocked until the + * subprocess exits. + * + * @return the exit value of the subprocess represented by this + * {@code Process} object. By convention, the value + * {@code 0} indicates normal termination. + * @throws InterruptedException if the current thread is + * {@linkplain Thread#interrupt() interrupted} by another + * thread while it is waiting, then the wait is ended and + * an {@link InterruptedException} is thrown. + */ + public abstract int waitFor() throws InterruptedException; + + /** + * Causes the current thread to wait, if necessary, until the + * subprocess represented by this {@code Process} object has + * terminated, or the specified waiting time elapses. + * + *

If the subprocess has already terminated then this method returns + * immediately with the value {@code true}. If the process has not + * terminated and the timeout value is less than, or equal to, zero, then + * this method returns immediately with the value {@code false}. + * + *

The default implementation of this methods polls the {@code exitValue} + * to check if the process has terminated. Concrete implementations of this + * class are strongly encouraged to override this method with a more + * efficient implementation. + * + * @param timeout the maximum time to wait + * @param unit the time unit of the {@code timeout} argument + * @return {@code true} if the subprocess has exited and {@code false} if + * the waiting time elapsed before the subprocess has exited. + * @throws InterruptedException if the current thread is interrupted + * while waiting. + * @throws NullPointerException if unit is null + * @since 1.8 + */ + public boolean waitFor(long timeout, TimeUnit unit) + throws InterruptedException + { + long startTime = System.nanoTime(); + long rem = unit.toNanos(timeout); + + do { + try { + exitValue(); + return true; + } catch(IllegalThreadStateException ex) { + if (rem > 0) + Thread.sleep( + Math.min(TimeUnit.NANOSECONDS.toMillis(rem) + 1, 100)); + } + rem = unit.toNanos(timeout) - (System.nanoTime() - startTime); + } while (rem > 0); + return false; + } + + /** + * Returns the exit value for the subprocess. + * + * @return the exit value of the subprocess represented by this + * {@code Process} object. By convention, the value + * {@code 0} indicates normal termination. + * @throws IllegalThreadStateException if the subprocess represented + * by this {@code Process} object has not yet terminated + */ + public abstract int exitValue(); + + /** + * Kills the subprocess. Whether the subprocess represented by this + * {@code Process} object is forcibly terminated or not is + * implementation dependent. + */ + public abstract void destroy(); + + /** + * Kills the subprocess. The subprocess represented by this + * {@code Process} object is forcibly terminated. + * + *

The default implementation of this method invokes {@link #destroy} + * and so may not forcibly terminate the process. Concrete implementations + * of this class are strongly encouraged to override this method with a + * compliant implementation. Invoking this method on {@code Process} + * objects returned by {@link ProcessBuilder#start} and + * {@link Runtime#exec} will forcibly terminate the process. + * + *

Note: The subprocess may not terminate immediately. + * i.e. {@code isAlive()} may return true for a brief period + * after {@code destroyForcibly()} is called. This method + * may be chained to {@code waitFor()} if needed. + * + * @return the {@code Process} object representing the + * subprocess to be forcibly destroyed. + * @since 1.8 + */ + public Process destroyForcibly() { + destroy(); + return this; + } + + /** + * Tests whether the subprocess represented by this {@code Process} is + * alive. + * + * @return {@code true} if the subprocess represented by this + * {@code Process} object has not yet terminated. + * @since 1.8 + */ + public boolean isAlive() { + try { + exitValue(); + return false; + } catch(IllegalThreadStateException e) { + return true; + } + } +} diff --git a/src/MapleFE/test/java/openjdk/Process.java.result b/src/MapleFE/test/java/openjdk/Process.java.result new file mode 100644 index 0000000000000000000000000000000000000000..e3747d710b6c90f237343ebdfc614d92287055b2 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Process.java.result @@ -0,0 +1,56 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 21 tokens. +Matched 223 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import java.io +== Sub Tree == +import java.util.concurrent.TimeUnit +== Sub Tree == +class Process + Fields: + + Instance Initializer: + Constructors: + Methods: + func getOutputStream() throws: + func getInputStream() throws: + func getErrorStream() throws: + func waitFor() throws: InterruptedException + func waitFor(timeout,unit) throws: InterruptedException + Decl: startTime=System.nanoTime() + Decl: rem=unit.toNanos(timeout) + do exitValue() + return true + + IllegalThreadStateException + ex + cond-branch cond:rem GT 0 + true branch : + Thread.sleep(Math.min(TimeUnit.NANOSECONDS.toMillis(rem) Add 1,100)) false branch : + + + rem Assign unit.toNanos(timeout) Sub (System.nanoTime() Sub startTime) +while rem GT 0 + return false + func exitValue() throws: + func destroy() throws: + func destroyForcibly() throws: + destroy() + return this + func isAlive() throws: + exitValue() + return false + + IllegalThreadStateException + e + return true + + LocalClasses: + LocalInterfaces: + +Identifier:IllegalThreadStateException has no decl. +Identifier:e has no decl. diff --git a/src/MapleFE/test/java/openjdk/ProcessBuilder.java b/src/MapleFE/test/java/openjdk/ProcessBuilder.java new file mode 100644 index 0000000000000000000000000000000000000000..0c892c2358d7474157290aa1e7e6f994987972a2 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/ProcessBuilder.java @@ -0,0 +1,1055 @@ +/* + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Arrays; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +/** + * This class is used to create operating system processes. + * + *

Each {@code ProcessBuilder} instance manages a collection + * of process attributes. The {@link #start()} method creates a new + * {@link Process} instance with those attributes. The {@link + * #start()} method can be invoked repeatedly from the same instance + * to create new subprocesses with identical or related attributes. + * + *

Each process builder manages these process attributes: + * + *

+ * + *

Modifying a process builder's attributes will affect processes + * subsequently started by that object's {@link #start()} method, but + * will never affect previously started processes or the Java process + * itself. + * + *

Most error checking is performed by the {@link #start()} method. + * It is possible to modify the state of an object so that {@link + * #start()} will fail. For example, setting the command attribute to + * an empty list will not throw an exception unless {@link #start()} + * is invoked. + * + *

Note that this class is not synchronized. + * If multiple threads access a {@code ProcessBuilder} instance + * concurrently, and at least one of the threads modifies one of the + * attributes structurally, it must be synchronized externally. + * + *

Starting a new process which uses the default working directory + * and environment is easy: + * + *

 {@code
+ * Process p = new ProcessBuilder("myCommand", "myArg").start();
+ * }
+ * + *

Here is an example that starts a process with a modified working + * directory and environment, and redirects standard output and error + * to be appended to a log file: + * + *

 {@code
+ * ProcessBuilder pb =
+ *   new ProcessBuilder("myCommand", "myArg1", "myArg2");
+ * Map env = pb.environment();
+ * env.put("VAR1", "myValue");
+ * env.remove("OTHERVAR");
+ * env.put("VAR2", env.get("VAR1") + "suffix");
+ * pb.directory(new File("myDir"));
+ * File log = new File("log");
+ * pb.redirectErrorStream(true);
+ * pb.redirectOutput(Redirect.appendTo(log));
+ * Process p = pb.start();
+ * assert pb.redirectInput() == Redirect.PIPE;
+ * assert pb.redirectOutput().file() == log;
+ * assert p.getInputStream().read() == -1;
+ * }
+ * + *

To start a process with an explicit set of environment + * variables, first call {@link java.util.Map#clear() Map.clear()} + * before adding environment variables. + * + * @author Martin Buchholz + * @since 1.5 + */ + +public final class ProcessBuilder +{ + private List command; + private File directory; + private Map environment; + private boolean redirectErrorStream; + private Redirect[] redirects; + + /** + * Constructs a process builder with the specified operating + * system program and arguments. This constructor does not + * make a copy of the {@code command} list. Subsequent + * updates to the list will be reflected in the state of the + * process builder. It is not checked whether + * {@code command} corresponds to a valid operating system + * command. + * + * @param command the list containing the program and its arguments + * @throws NullPointerException if the argument is null + */ + public ProcessBuilder(List command) { + if (command == null) + throw new NullPointerException(); + this.command = command; + } + + /** + * Constructs a process builder with the specified operating + * system program and arguments. This is a convenience + * constructor that sets the process builder's command to a string + * list containing the same strings as the {@code command} + * array, in the same order. It is not checked whether + * {@code command} corresponds to a valid operating system + * command. + * + * @param command a string array containing the program and its arguments + */ + public ProcessBuilder(String... command) { + this.command = new ArrayList<>(command.length); + for (String arg : command) + this.command.add(arg); + } + + /** + * Sets this process builder's operating system program and + * arguments. This method does not make a copy of the + * {@code command} list. Subsequent updates to the list will + * be reflected in the state of the process builder. It is not + * checked whether {@code command} corresponds to a valid + * operating system command. + * + * @param command the list containing the program and its arguments + * @return this process builder + * + * @throws NullPointerException if the argument is null + */ + public ProcessBuilder command(List command) { + if (command == null) + throw new NullPointerException(); + this.command = command; + return this; + } + + /** + * Sets this process builder's operating system program and + * arguments. This is a convenience method that sets the command + * to a string list containing the same strings as the + * {@code command} array, in the same order. It is not + * checked whether {@code command} corresponds to a valid + * operating system command. + * + * @param command a string array containing the program and its arguments + * @return this process builder + */ + public ProcessBuilder command(String... command) { + this.command = new ArrayList<>(command.length); + for (String arg : command) + this.command.add(arg); + return this; + } + + /** + * Returns this process builder's operating system program and + * arguments. The returned list is not a copy. Subsequent + * updates to the list will be reflected in the state of this + * process builder. + * + * @return this process builder's program and its arguments + */ + public List command() { + return command; + } + + /** + * Returns a string map view of this process builder's environment. + * + * Whenever a process builder is created, the environment is + * initialized to a copy of the current process environment (see + * {@link System#getenv()}). Subprocesses subsequently started by + * this object's {@link #start()} method will use this map as + * their environment. + * + *

The returned object may be modified using ordinary {@link + * java.util.Map Map} operations. These modifications will be + * visible to subprocesses started via the {@link #start()} + * method. Two {@code ProcessBuilder} instances always + * contain independent process environments, so changes to the + * returned map will never be reflected in any other + * {@code ProcessBuilder} instance or the values returned by + * {@link System#getenv System.getenv}. + * + *

If the system does not support environment variables, an + * empty map is returned. + * + *

The returned map does not permit null keys or values. + * Attempting to insert or query the presence of a null key or + * value will throw a {@link NullPointerException}. + * Attempting to query the presence of a key or value which is not + * of type {@link String} will throw a {@link ClassCastException}. + * + *

The behavior of the returned map is system-dependent. A + * system may not allow modifications to environment variables or + * may forbid certain variable names or values. For this reason, + * attempts to modify the map may fail with + * {@link UnsupportedOperationException} or + * {@link IllegalArgumentException} + * if the modification is not permitted by the operating system. + * + *

Since the external format of environment variable names and + * values is system-dependent, there may not be a one-to-one + * mapping between them and Java's Unicode strings. Nevertheless, + * the map is implemented in such a way that environment variables + * which are not modified by Java code will have an unmodified + * native representation in the subprocess. + * + *

The returned map and its collection views may not obey the + * general contract of the {@link Object#equals} and + * {@link Object#hashCode} methods. + * + *

The returned map is typically case-sensitive on all platforms. + * + *

If a security manager exists, its + * {@link SecurityManager#checkPermission checkPermission} method + * is called with a + * {@link RuntimePermission}{@code ("getenv.*")} permission. + * This may result in a {@link SecurityException} being thrown. + * + *

When passing information to a Java subprocess, + * system properties + * are generally preferred over environment variables. + * + * @return this process builder's environment + * + * @throws SecurityException + * if a security manager exists and its + * {@link SecurityManager#checkPermission checkPermission} + * method doesn't allow access to the process environment + * + * @see Runtime#exec(String[],String[],java.io.File) + * @see System#getenv() + */ + public Map environment() { + SecurityManager security = System.getSecurityManager(); + if (security != null) + security.checkPermission(new RuntimePermission("getenv.*")); + + if (environment == null) + environment = ProcessEnvironment.environment(); + + assert environment != null; + + return environment; + } + + // Only for use by Runtime.exec(...envp...) + ProcessBuilder environment(String[] envp) { + assert environment == null; + if (envp != null) { + environment = ProcessEnvironment.emptyEnvironment(envp.length); + assert environment != null; + + for (String envstring : envp) { + // Before 1.5, we blindly passed invalid envstrings + // to the child process. + // We would like to throw an exception, but do not, + // for compatibility with old broken code. + + // Silently discard any trailing junk. + if (envstring.indexOf((int) '\u0000') != -1) + envstring = envstring.replaceFirst("\u0000.*", ""); + + int eqlsign = + envstring.indexOf('=', ProcessEnvironment.MIN_NAME_LENGTH); + // Silently ignore envstrings lacking the required `='. + if (eqlsign != -1) + environment.put(envstring.substring(0,eqlsign), + envstring.substring(eqlsign+1)); + } + } + return this; + } + + /** + * Returns this process builder's working directory. + * + * Subprocesses subsequently started by this object's {@link + * #start()} method will use this as their working directory. + * The returned value may be {@code null} -- this means to use + * the working directory of the current Java process, usually the + * directory named by the system property {@code user.dir}, + * as the working directory of the child process. + * + * @return this process builder's working directory + */ + public File directory() { + return directory; + } + + /** + * Sets this process builder's working directory. + * + * Subprocesses subsequently started by this object's {@link + * #start()} method will use this as their working directory. + * The argument may be {@code null} -- this means to use the + * working directory of the current Java process, usually the + * directory named by the system property {@code user.dir}, + * as the working directory of the child process. + * + * @param directory the new working directory + * @return this process builder + */ + public ProcessBuilder directory(File directory) { + this.directory = directory; + return this; + } + + // ---------------- I/O Redirection ---------------- + + /** + * Implements a null input stream. + */ + static class NullInputStream extends InputStream { + static final NullInputStream INSTANCE = new NullInputStream(); + private NullInputStream() {} + public int read() { return -1; } + public int available() { return 0; } + } + + /** + * Implements a null output stream. + */ + static class NullOutputStream extends OutputStream { + static final NullOutputStream INSTANCE = new NullOutputStream(); + private NullOutputStream() {} + public void write(int b) throws IOException { + throw new IOException("Stream closed"); + } + } + + /** + * Represents a source of subprocess input or a destination of + * subprocess output. + * + * Each {@code Redirect} instance is one of the following: + * + *

    + *
  • the special value {@link #PIPE Redirect.PIPE} + *
  • the special value {@link #INHERIT Redirect.INHERIT} + *
  • a redirection to read from a file, created by an invocation of + * {@link Redirect#from Redirect.from(File)} + *
  • a redirection to write to a file, created by an invocation of + * {@link Redirect#to Redirect.to(File)} + *
  • a redirection to append to a file, created by an invocation of + * {@link Redirect#appendTo Redirect.appendTo(File)} + *
+ * + *

Each of the above categories has an associated unique + * {@link Type Type}. + * + * @since 1.7 + */ + public static abstract class Redirect { + /** + * The type of a {@link Redirect}. + */ + public enum Type { + /** + * The type of {@link Redirect#PIPE Redirect.PIPE}. + */ + PIPE, + + /** + * The type of {@link Redirect#INHERIT Redirect.INHERIT}. + */ + INHERIT, + + /** + * The type of redirects returned from + * {@link Redirect#from Redirect.from(File)}. + */ + READ, + + /** + * The type of redirects returned from + * {@link Redirect#to Redirect.to(File)}. + */ + WRITE, + + /** + * The type of redirects returned from + * {@link Redirect#appendTo Redirect.appendTo(File)}. + */ + APPEND + }; + + /** + * Returns the type of this {@code Redirect}. + * @return the type of this {@code Redirect} + */ + public abstract Type type(); + + /** + * Indicates that subprocess I/O will be connected to the + * current Java process over a pipe. + * + * This is the default handling of subprocess standard I/O. + * + *

It will always be true that + *

 {@code
+         * Redirect.PIPE.file() == null &&
+         * Redirect.PIPE.type() == Redirect.Type.PIPE
+         * }
+ */ + public static final Redirect PIPE = new Redirect() { + public Type type() { return Type.PIPE; } + public String toString() { return type().toString(); }}; + + /** + * Indicates that subprocess I/O source or destination will be the + * same as those of the current process. This is the normal + * behavior of most operating system command interpreters (shells). + * + *

It will always be true that + *

 {@code
+         * Redirect.INHERIT.file() == null &&
+         * Redirect.INHERIT.type() == Redirect.Type.INHERIT
+         * }
+ */ + public static final Redirect INHERIT = new Redirect() { + public Type type() { return Type.INHERIT; } + public String toString() { return type().toString(); }}; + + /** + * Returns the {@link File} source or destination associated + * with this redirect, or {@code null} if there is no such file. + * + * @return the file associated with this redirect, + * or {@code null} if there is no such file + */ + public File file() { return null; } + + /** + * When redirected to a destination file, indicates if the output + * is to be written to the end of the file. + */ + boolean append() { + throw new UnsupportedOperationException(); + } + + /** + * Returns a redirect to read from the specified file. + * + *

It will always be true that + *

 {@code
+         * Redirect.from(file).file() == file &&
+         * Redirect.from(file).type() == Redirect.Type.READ
+         * }
+ * + * @param file The {@code File} for the {@code Redirect}. + * @throws NullPointerException if the specified file is null + * @return a redirect to read from the specified file + */ + public static Redirect from(final File file) { + if (file == null) + throw new NullPointerException(); + return new Redirect() { + public Type type() { return Type.READ; } + public File file() { return file; } + public String toString() { + return "redirect to read from file \"" + file + "\""; + } + }; + } + + /** + * Returns a redirect to write to the specified file. + * If the specified file exists when the subprocess is started, + * its previous contents will be discarded. + * + *

It will always be true that + *

 {@code
+         * Redirect.to(file).file() == file &&
+         * Redirect.to(file).type() == Redirect.Type.WRITE
+         * }
+ * + * @param file The {@code File} for the {@code Redirect}. + * @throws NullPointerException if the specified file is null + * @return a redirect to write to the specified file + */ + public static Redirect to(final File file) { + if (file == null) + throw new NullPointerException(); + return new Redirect() { + public Type type() { return Type.WRITE; } + public File file() { return file; } + public String toString() { + return "redirect to write to file \"" + file + "\""; + } + boolean append() { return false; } + }; + } + + /** + * Returns a redirect to append to the specified file. + * Each write operation first advances the position to the + * end of the file and then writes the requested data. + * Whether the advancement of the position and the writing + * of the data are done in a single atomic operation is + * system-dependent and therefore unspecified. + * + *

It will always be true that + *

 {@code
+         * Redirect.appendTo(file).file() == file &&
+         * Redirect.appendTo(file).type() == Redirect.Type.APPEND
+         * }
+ * + * @param file The {@code File} for the {@code Redirect}. + * @throws NullPointerException if the specified file is null + * @return a redirect to append to the specified file + */ + public static Redirect appendTo(final File file) { + if (file == null) + throw new NullPointerException(); + return new Redirect() { + public Type type() { return Type.APPEND; } + public File file() { return file; } + public String toString() { + return "redirect to append to file \"" + file + "\""; + } + boolean append() { return true; } + }; + } + + /** + * Compares the specified object with this {@code Redirect} for + * equality. Returns {@code true} if and only if the two + * objects are identical or both objects are {@code Redirect} + * instances of the same type associated with non-null equal + * {@code File} instances. + */ + public boolean equals(Object obj) { + if (obj == this) + return true; + if (! (obj instanceof Redirect)) + return false; + Redirect r = (Redirect) obj; + if (r.type() != this.type()) + return false; + assert this.file() != null; + return this.file().equals(r.file()); + } + + /** + * Returns a hash code value for this {@code Redirect}. + * @return a hash code value for this {@code Redirect} + */ + public int hashCode() { + File file = file(); + if (file == null) + return super.hashCode(); + else + return file.hashCode(); + } + + /** + * No public constructors. Clients must use predefined + * static {@code Redirect} instances or factory methods. + */ + private Redirect() {} + } + + private Redirect[] redirects() { + if (redirects == null) + redirects = new Redirect[] { + Redirect.PIPE, Redirect.PIPE, Redirect.PIPE + }; + return redirects; + } + + /** + * Sets this process builder's standard input source. + * + * Subprocesses subsequently started by this object's {@link #start()} + * method obtain their standard input from this source. + * + *

If the source is {@link Redirect#PIPE Redirect.PIPE} + * (the initial value), then the standard input of a + * subprocess can be written to using the output stream + * returned by {@link Process#getOutputStream()}. + * If the source is set to any other value, then + * {@link Process#getOutputStream()} will return a + * null output stream. + * + * @param source the new standard input source + * @return this process builder + * @throws IllegalArgumentException + * if the redirect does not correspond to a valid source + * of data, that is, has type + * {@link Redirect.Type#WRITE WRITE} or + * {@link Redirect.Type#APPEND APPEND} + * @since 1.7 + */ + public ProcessBuilder redirectInput(Redirect source) { + if (source.type() == Redirect.Type.WRITE || + source.type() == Redirect.Type.APPEND) + throw new IllegalArgumentException( + "Redirect invalid for reading: " + source); + redirects()[0] = source; + return this; + } + + /** + * Sets this process builder's standard output destination. + * + * Subprocesses subsequently started by this object's {@link #start()} + * method send their standard output to this destination. + * + *

If the destination is {@link Redirect#PIPE Redirect.PIPE} + * (the initial value), then the standard output of a subprocess + * can be read using the input stream returned by {@link + * Process#getInputStream()}. + * If the destination is set to any other value, then + * {@link Process#getInputStream()} will return a + * null input stream. + * + * @param destination the new standard output destination + * @return this process builder + * @throws IllegalArgumentException + * if the redirect does not correspond to a valid + * destination of data, that is, has type + * {@link Redirect.Type#READ READ} + * @since 1.7 + */ + public ProcessBuilder redirectOutput(Redirect destination) { + if (destination.type() == Redirect.Type.READ) + throw new IllegalArgumentException( + "Redirect invalid for writing: " + destination); + redirects()[1] = destination; + return this; + } + + /** + * Sets this process builder's standard error destination. + * + * Subprocesses subsequently started by this object's {@link #start()} + * method send their standard error to this destination. + * + *

If the destination is {@link Redirect#PIPE Redirect.PIPE} + * (the initial value), then the error output of a subprocess + * can be read using the input stream returned by {@link + * Process#getErrorStream()}. + * If the destination is set to any other value, then + * {@link Process#getErrorStream()} will return a + * null input stream. + * + *

If the {@link #redirectErrorStream redirectErrorStream} + * attribute has been set {@code true}, then the redirection set + * by this method has no effect. + * + * @param destination the new standard error destination + * @return this process builder + * @throws IllegalArgumentException + * if the redirect does not correspond to a valid + * destination of data, that is, has type + * {@link Redirect.Type#READ READ} + * @since 1.7 + */ + public ProcessBuilder redirectError(Redirect destination) { + if (destination.type() == Redirect.Type.READ) + throw new IllegalArgumentException( + "Redirect invalid for writing: " + destination); + redirects()[2] = destination; + return this; + } + + /** + * Sets this process builder's standard input source to a file. + * + *

This is a convenience method. An invocation of the form + * {@code redirectInput(file)} + * behaves in exactly the same way as the invocation + * {@link #redirectInput(Redirect) redirectInput} + * {@code (Redirect.from(file))}. + * + * @param file the new standard input source + * @return this process builder + * @since 1.7 + */ + public ProcessBuilder redirectInput(File file) { + return redirectInput(Redirect.from(file)); + } + + /** + * Sets this process builder's standard output destination to a file. + * + *

This is a convenience method. An invocation of the form + * {@code redirectOutput(file)} + * behaves in exactly the same way as the invocation + * {@link #redirectOutput(Redirect) redirectOutput} + * {@code (Redirect.to(file))}. + * + * @param file the new standard output destination + * @return this process builder + * @since 1.7 + */ + public ProcessBuilder redirectOutput(File file) { + return redirectOutput(Redirect.to(file)); + } + + /** + * Sets this process builder's standard error destination to a file. + * + *

This is a convenience method. An invocation of the form + * {@code redirectError(file)} + * behaves in exactly the same way as the invocation + * {@link #redirectError(Redirect) redirectError} + * {@code (Redirect.to(file))}. + * + * @param file the new standard error destination + * @return this process builder + * @since 1.7 + */ + public ProcessBuilder redirectError(File file) { + return redirectError(Redirect.to(file)); + } + + /** + * Returns this process builder's standard input source. + * + * Subprocesses subsequently started by this object's {@link #start()} + * method obtain their standard input from this source. + * The initial value is {@link Redirect#PIPE Redirect.PIPE}. + * + * @return this process builder's standard input source + * @since 1.7 + */ + public Redirect redirectInput() { + return (redirects == null) ? Redirect.PIPE : redirects[0]; + } + + /** + * Returns this process builder's standard output destination. + * + * Subprocesses subsequently started by this object's {@link #start()} + * method redirect their standard output to this destination. + * The initial value is {@link Redirect#PIPE Redirect.PIPE}. + * + * @return this process builder's standard output destination + * @since 1.7 + */ + public Redirect redirectOutput() { + return (redirects == null) ? Redirect.PIPE : redirects[1]; + } + + /** + * Returns this process builder's standard error destination. + * + * Subprocesses subsequently started by this object's {@link #start()} + * method redirect their standard error to this destination. + * The initial value is {@link Redirect#PIPE Redirect.PIPE}. + * + * @return this process builder's standard error destination + * @since 1.7 + */ + public Redirect redirectError() { + return (redirects == null) ? Redirect.PIPE : redirects[2]; + } + + /** + * Sets the source and destination for subprocess standard I/O + * to be the same as those of the current Java process. + * + *

This is a convenience method. An invocation of the form + *

 {@code
+     * pb.inheritIO()
+     * }
+ * behaves in exactly the same way as the invocation + *
 {@code
+     * pb.redirectInput(Redirect.INHERIT)
+     *   .redirectOutput(Redirect.INHERIT)
+     *   .redirectError(Redirect.INHERIT)
+     * }
+ * + * This gives behavior equivalent to most operating system + * command interpreters, or the standard C library function + * {@code system()}. + * + * @return this process builder + * @since 1.7 + */ + public ProcessBuilder inheritIO() { + Arrays.fill(redirects(), Redirect.INHERIT); + return this; + } + + /** + * Tells whether this process builder merges standard error and + * standard output. + * + *

If this property is {@code true}, then any error output + * generated by subprocesses subsequently started by this object's + * {@link #start()} method will be merged with the standard + * output, so that both can be read using the + * {@link Process#getInputStream()} method. This makes it easier + * to correlate error messages with the corresponding output. + * The initial value is {@code false}. + * + * @return this process builder's {@code redirectErrorStream} property + */ + public boolean redirectErrorStream() { + return redirectErrorStream; + } + + /** + * Sets this process builder's {@code redirectErrorStream} property. + * + *

If this property is {@code true}, then any error output + * generated by subprocesses subsequently started by this object's + * {@link #start()} method will be merged with the standard + * output, so that both can be read using the + * {@link Process#getInputStream()} method. This makes it easier + * to correlate error messages with the corresponding output. + * The initial value is {@code false}. + * + * @param redirectErrorStream the new property value + * @return this process builder + */ + public ProcessBuilder redirectErrorStream(boolean redirectErrorStream) { + this.redirectErrorStream = redirectErrorStream; + return this; + } + + /** + * Starts a new process using the attributes of this process builder. + * + *

The new process will + * invoke the command and arguments given by {@link #command()}, + * in a working directory as given by {@link #directory()}, + * with a process environment as given by {@link #environment()}. + * + *

This method checks that the command is a valid operating + * system command. Which commands are valid is system-dependent, + * but at the very least the command must be a non-empty list of + * non-null strings. + * + *

A minimal set of system dependent environment variables may + * be required to start a process on some operating systems. + * As a result, the subprocess may inherit additional environment variable + * settings beyond those in the process builder's {@link #environment()}. + * + *

If there is a security manager, its + * {@link SecurityManager#checkExec checkExec} + * method is called with the first component of this object's + * {@code command} array as its argument. This may result in + * a {@link SecurityException} being thrown. + * + *

Starting an operating system process is highly system-dependent. + * Among the many things that can go wrong are: + *

    + *
  • The operating system program file was not found. + *
  • Access to the program file was denied. + *
  • The working directory does not exist. + *
+ * + *

In such cases an exception will be thrown. The exact nature + * of the exception is system-dependent, but it will always be a + * subclass of {@link IOException}. + * + *

Subsequent modifications to this process builder will not + * affect the returned {@link Process}. + * + * @return a new {@link Process} object for managing the subprocess + * + * @throws NullPointerException + * if an element of the command list is null + * + * @throws IndexOutOfBoundsException + * if the command is an empty list (has size {@code 0}) + * + * @throws SecurityException + * if a security manager exists and + *

    + * + *
  • its + * {@link SecurityManager#checkExec checkExec} + * method doesn't allow creation of the subprocess, or + * + *
  • the standard input to the subprocess was + * {@linkplain #redirectInput redirected from a file} + * and the security manager's + * {@link SecurityManager#checkRead checkRead} method + * denies read access to the file, or + * + *
  • the standard output or standard error of the + * subprocess was + * {@linkplain #redirectOutput redirected to a file} + * and the security manager's + * {@link SecurityManager#checkWrite checkWrite} method + * denies write access to the file + * + *
+ * + * @throws IOException if an I/O error occurs + * + * @see Runtime#exec(String[], String[], java.io.File) + */ + public Process start() throws IOException { + // Must convert to array first -- a malicious user-supplied + // list might try to circumvent the security check. + String[] cmdarray = command.toArray(new String[command.size()]); + cmdarray = cmdarray.clone(); + + for (String arg : cmdarray) + if (arg == null) + throw new NullPointerException(); + // Throws IndexOutOfBoundsException if command is empty + String prog = cmdarray[0]; + + SecurityManager security = System.getSecurityManager(); + if (security != null) + security.checkExec(prog); + + String dir = directory == null ? null : directory.toString(); + + for (int i = 1; i < cmdarray.length; i++) { + if (cmdarray[i].indexOf('\u0000') >= 0) { + throw new IOException("invalid null character in command"); + } + } + + try { + return ProcessImpl.start(cmdarray, + environment, + dir, + redirects, + redirectErrorStream); + } catch (IOException | IllegalArgumentException e) { + String exceptionInfo = ": " + e.getMessage(); + Throwable cause = e; + if ((e instanceof IOException) && security != null) { + // Can not disclose the fail reason for read-protected files. + try { + security.checkRead(prog); + } catch (SecurityException se) { + exceptionInfo = ""; + cause = se; + } + } + // It's much easier for us to create a high-quality error + // message than the low-level C code which found the problem. + throw new IOException( + "Cannot run program \"" + prog + "\"" + + (dir == null ? "" : " (in directory \"" + dir + "\")") + + exceptionInfo, + cause); + } + } +} diff --git a/src/MapleFE/test/java/openjdk/ProcessBuilder.java.result b/src/MapleFE/test/java/openjdk/ProcessBuilder.java.result new file mode 100644 index 0000000000000000000000000000000000000000..b94bef0597edd4df522c431578f4fbc3d2436a9f --- /dev/null +++ b/src/MapleFE/test/java/openjdk/ProcessBuilder.java.result @@ -0,0 +1,305 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 19 tokens. +Matched 26 tokens. +Matched 33 tokens. +Matched 40 tokens. +Matched 47 tokens. +Matched 54 tokens. +Matched 61 tokens. +Matched 1635 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import java.io.File +== Sub Tree == +import java.io.IOException +== Sub Tree == +import java.io.InputStream +== Sub Tree == +import java.io.OutputStream +== Sub Tree == +import java.util.Arrays +== Sub Tree == +import java.util.ArrayList +== Sub Tree == +import java.util.List +== Sub Tree == +import java.util.Map +== Sub Tree == +class ProcessBuilder + Fields: + command directory environment redirectErrorStream redirects + Instance Initializer: + Constructors: + constructor ProcessBuilder(command) throws: + cond-branch cond:command EQ null + true branch : + new NullPointerException() false branch : + + this.command Assign command + constructor ProcessBuilder(command) throws: + this.command Assign new ArrayList(command.length) + String + arg + command + this.command.add(arg) + Methods: + func command(command) throws: + cond-branch cond:command EQ null + true branch : + new NullPointerException() false branch : + + this.command Assign command + return this + func command(command) throws: + this.command Assign new ArrayList(command.length) + String + arg + command + this.command.add(arg) + return this + func command() throws: + return command + func environment() throws: + Decl: security=System.getSecurityManager() + cond-branch cond:security NE null + true branch : + security.checkPermission(new RuntimePermission("getenv.*")) false branch : + + cond-branch cond:environment EQ null + true branch : + environment Assign ProcessEnvironment.environment() false branch : + + assert environment NE null : + return environment + func environment(envp) throws: + assert environment EQ null : + cond-branch cond:envp NE null + true branch : + environment Assign ProcessEnvironment.emptyEnvironment(envp.length) + assert environment NE null : + String + envstring + envp + cond-branch cond:envstring.indexOf((int)0) NE -1 + true branch : + envstring Assign envstring.replaceFirst("\u0000.*","") false branch : + + Decl: eqlsign=envstring.indexOf(=,ProcessEnvironment.MIN_NAME_LENGTH) + cond-branch cond:eqlsign NE -1 + true branch : + environment.put(envstring.substring(0,eqlsign),envstring.substring(eqlsign Add 1)) false branch : + + + false branch : + + return this + func directory() throws: + return directory + func directory(directory) throws: + this.directory Assign directory + return this + func redirects() throws: + cond-branch cond:redirects EQ null + true branch : + redirects Assign false branch : + + return redirects + func redirectInput(source) throws: + cond-branch cond:source.type() EQ Redirect.Type.WRITE Lor source.type() EQ Redirect.Type.APPEND + true branch : + new IllegalArgumentException("Redirect invalid for reading: " Add source) false branch : + + Assign source + return this + func redirectOutput(destination) throws: + cond-branch cond:destination.type() EQ Redirect.Type.READ + true branch : + new IllegalArgumentException("Redirect invalid for writing: " Add destination) false branch : + + Assign destination + return this + func redirectError(destination) throws: + cond-branch cond:destination.type() EQ Redirect.Type.READ + true branch : + new IllegalArgumentException("Redirect invalid for writing: " Add destination) false branch : + + Assign destination + return this + func redirectInput(file) throws: + return redirectInput(Redirect.from(file)) + func redirectOutput(file) throws: + return redirectOutput(Redirect.to(file)) + func redirectError(file) throws: + return redirectError(Redirect.to(file)) + func redirectInput() throws: + return + func redirectOutput() throws: + return + func redirectError() throws: + return + func inheritIO() throws: + Arrays.fill(redirects(),Redirect.INHERIT) + return this + func redirectErrorStream() throws: + return redirectErrorStream + func redirectErrorStream(redirectErrorStream) throws: + this.redirectErrorStream Assign redirectErrorStream + return this + func start() throws: IOException + Decl: cmdarray=command.toArray(String,command.size()) + cmdarray Assign cmdarray.clone() + String + arg + cmdarray + cond-branch cond:arg EQ null + true branch : + new NullPointerException() false branch : + + Decl: prog= + Decl: security=System.getSecurityManager() + cond-branch cond:security NE null + true branch : + security.checkExec(prog) false branch : + + Decl: dir= + for ( ) + cond-branch cond:.indexOf(0) GE 0 + true branch : + new IOException("invalid null character in command") + false branch : + + + return ProcessImpl.start(cmdarray,environment,dir,redirects,redirectErrorStream) + + IOException + IllegalArgumentException + e + Decl: exceptionInfo=": " Add e.getMessage() + Decl: cause=e + cond-branch cond:(e instanceof IOException) Land security NE null + true branch : + security.checkRead(prog) + + SecurityException + se + exceptionInfo Assign "" + cause Assign se + + false branch : + + new IOException("Cannot run program \"" Add prog Add "\"" Add () Add exceptionInfo,cause) + + LocalClasses: + class NullInputStream + Fields: + INSTANCE=new NullInputStream() + Instance Initializer: + Constructors: + constructor NullInputStream() throws: + Methods: + func read() throws: + return -1 + func available() throws: + return 0 + LocalClasses: + LocalInterfaces: + class NullOutputStream + Fields: + INSTANCE=new NullOutputStream() + Instance Initializer: + Constructors: + constructor NullOutputStream() throws: + Methods: + func write(b) throws: IOException + new IOException("Stream closed") + LocalClasses: + LocalInterfaces: + class Redirect + Fields: + PIPE=new Redirect() INHERIT=new Redirect() + Instance Initializer: + Constructors: + constructor Redirect() throws: + Methods: + func type() throws: + func file() throws: + return null + func append() throws: + new UnsupportedOperationException() + func from(file) throws: + cond-branch cond:file EQ null + true branch : + new NullPointerException() false branch : + + return new Redirect() + func to(file) throws: + cond-branch cond:file EQ null + true branch : + new NullPointerException() false branch : + + return new Redirect() + func appendTo(file) throws: + cond-branch cond:file EQ null + true branch : + new NullPointerException() false branch : + + return new Redirect() + func equals(obj) throws: + cond-branch cond:obj EQ this + true branch : + return true false branch : + + cond-branch cond:(obj instanceof Redirect) + true branch : + return false false branch : + + Decl: r=(Redirect)obj + cond-branch cond:r.type() NE this.type() + true branch : + return false false branch : + + assert this.file() NE null : + return this.file().equals(r.file()) + func hashCode() throws: + Decl: file=file() + cond-branch cond:file EQ null + true branch : + return super.hashCode() false branch : + return file.hashCode() + LocalClasses: + class[JavaEnum] Type + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + LocalInterfaces: + LocalInterfaces: + +Field Decl Duplication! Identifier:command is duplicated with Function:command +Field Decl Duplication! Identifier:command is duplicated with Function:command +Field Decl Duplication! Identifier:command is duplicated with Function:command +Field Decl Duplication! Identifier:directory is duplicated with Function:directory +Field Decl Duplication! Identifier:directory is duplicated with Function:directory +Field Decl Duplication! Identifier:environment is duplicated with Function:environment +Field Decl Duplication! Identifier:environment is duplicated with Function:environment +Field Decl Duplication! Identifier:redirectErrorStream is duplicated with Function:redirectErrorStream +Field Decl Duplication! Identifier:redirectErrorStream is duplicated with Function:redirectErrorStream +Field Decl Duplication! Identifier:redirects is duplicated with Function:redirects +UserType:ArrayList has no decl. +Identifier:String has no decl. +Identifier:arg has no decl. +Identifier:cmdarray has no decl. +Identifier:String has no decl. +Identifier:arg has no decl. +Identifier:cmdarray has no decl. +Identifier:IOException has no decl. +Identifier:IllegalArgumentException has no decl. +Identifier:e has no decl. +UserType:IOException has no decl. +Identifier:cause has no decl. diff --git a/src/MapleFE/test/java/openjdk/ProcessEnvironment.java b/src/MapleFE/test/java/openjdk/ProcessEnvironment.java new file mode 100644 index 0000000000000000000000000000000000000000..08d260cf4a3cd3a66a11d435d99ae404b68e9d8e --- /dev/null +++ b/src/MapleFE/test/java/openjdk/ProcessEnvironment.java @@ -0,0 +1,440 @@ +/* + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* We use APIs that access the standard Unix environ array, which + * is defined by UNIX98 to look like: + * + * char **environ; + * + * These are unsorted, case-sensitive, null-terminated arrays of bytes + * of the form FOO=BAR\000 which are usually encoded in the user's + * default encoding (file.encoding is an excellent choice for + * encoding/decoding these). However, even though the user cannot + * directly access the underlying byte representation, we take pains + * to pass on the child the exact byte representation we inherit from + * the parent process for any environment name or value not created by + * Javaland. So we keep track of all the byte representations. + * + * Internally, we define the types Variable and Value that exhibit + * String/byteArray duality. The internal representation of the + * environment then looks like a Map. But we don't + * expose this to the user -- we only provide a Map + * view, although we could also provide a Map view. + * + * The non-private methods in this class are not for general use even + * within this package. Instead, they are the system-dependent parts + * of the system-independent method of the same name. Don't even + * think of using this class unless your method's name appears below. + * + * @author Martin Buchholz + * @since 1.5 + */ + +package java.lang; + +import java.io.*; +import java.util.*; + + +final class ProcessEnvironment +{ + private static final HashMap theEnvironment; + private static final Map theUnmodifiableEnvironment; + static final int MIN_NAME_LENGTH = 0; + + static { + // We cache the C environment. This means that subsequent calls + // to putenv/setenv from C will not be visible from Java code. + byte[][] environ = environ(); + theEnvironment = new HashMap<>(environ.length/2 + 3); + // Read environment variables back to front, + // so that earlier variables override later ones. + for (int i = environ.length-1; i > 0; i-=2) + theEnvironment.put(Variable.valueOf(environ[i-1]), + Value.valueOf(environ[i])); + + theUnmodifiableEnvironment + = Collections.unmodifiableMap + (new StringEnvironment(theEnvironment)); + } + + /* Only for use by System.getenv(String) */ + static String getenv(String name) { + return theUnmodifiableEnvironment.get(name); + } + + /* Only for use by System.getenv() */ + static Map getenv() { + return theUnmodifiableEnvironment; + } + + /* Only for use by ProcessBuilder.environment() */ + @SuppressWarnings("unchecked") + static Map environment() { + return new StringEnvironment + ((Map)(theEnvironment.clone())); + } + + /* Only for use by Runtime.exec(...String[]envp...) */ + static Map emptyEnvironment(int capacity) { + return new StringEnvironment(new HashMap(capacity)); + } + + private static native byte[][] environ(); + + // This class is not instantiable. + private ProcessEnvironment() {} + + // Check that name is suitable for insertion into Environment map + private static void validateVariable(String name) { + if (name.indexOf('=') != -1 || + name.indexOf('\u0000') != -1) + throw new IllegalArgumentException + ("Invalid environment variable name: \"" + name + "\""); + } + + // Check that value is suitable for insertion into Environment map + private static void validateValue(String value) { + if (value.indexOf('\u0000') != -1) + throw new IllegalArgumentException + ("Invalid environment variable value: \"" + value + "\""); + } + + // A class hiding the byteArray-String duality of + // text data on Unixoid operating systems. + private static abstract class ExternalData { + protected final String str; + protected final byte[] bytes; + + protected ExternalData(String str, byte[] bytes) { + this.str = str; + this.bytes = bytes; + } + + public byte[] getBytes() { + return bytes; + } + + public String toString() { + return str; + } + + public boolean equals(Object o) { + return o instanceof ExternalData + && arrayEquals(getBytes(), ((ExternalData) o).getBytes()); + } + + public int hashCode() { + return arrayHash(getBytes()); + } + } + + private static class Variable + extends ExternalData implements Comparable + { + protected Variable(String str, byte[] bytes) { + super(str, bytes); + } + + public static Variable valueOfQueryOnly(Object str) { + return valueOfQueryOnly((String) str); + } + + public static Variable valueOfQueryOnly(String str) { + return new Variable(str, str.getBytes()); + } + + public static Variable valueOf(String str) { + validateVariable(str); + return valueOfQueryOnly(str); + } + + public static Variable valueOf(byte[] bytes) { + return new Variable(new String(bytes), bytes); + } + + public int compareTo(Variable variable) { + return arrayCompare(getBytes(), variable.getBytes()); + } + + public boolean equals(Object o) { + return o instanceof Variable && super.equals(o); + } + } + + private static class Value + extends ExternalData implements Comparable + { + protected Value(String str, byte[] bytes) { + super(str, bytes); + } + + public static Value valueOfQueryOnly(Object str) { + return valueOfQueryOnly((String) str); + } + + public static Value valueOfQueryOnly(String str) { + return new Value(str, str.getBytes()); + } + + public static Value valueOf(String str) { + validateValue(str); + return valueOfQueryOnly(str); + } + + public static Value valueOf(byte[] bytes) { + return new Value(new String(bytes), bytes); + } + + public int compareTo(Value value) { + return arrayCompare(getBytes(), value.getBytes()); + } + + public boolean equals(Object o) { + return o instanceof Value && super.equals(o); + } + } + + // This implements the String map view the user sees. + private static class StringEnvironment + extends AbstractMap + { + private Map m; + private static String toString(Value v) { + return v == null ? null : v.toString(); + } + public StringEnvironment(Map m) {this.m = m;} + public int size() {return m.size();} + public boolean isEmpty() {return m.isEmpty();} + public void clear() { m.clear();} + public boolean containsKey(Object key) { + return m.containsKey(Variable.valueOfQueryOnly(key)); + } + public boolean containsValue(Object value) { + return m.containsValue(Value.valueOfQueryOnly(value)); + } + public String get(Object key) { + return toString(m.get(Variable.valueOfQueryOnly(key))); + } + public String put(String key, String value) { + return toString(m.put(Variable.valueOf(key), + Value.valueOf(value))); + } + public String remove(Object key) { + return toString(m.remove(Variable.valueOfQueryOnly(key))); + } + public Set keySet() { + return new StringKeySet(m.keySet()); + } + public Set> entrySet() { + return new StringEntrySet(m.entrySet()); + } + public Collection values() { + return new StringValues(m.values()); + } + + // It is technically feasible to provide a byte-oriented view + // as follows: + // public Map asByteArrayMap() { + // return new ByteArrayEnvironment(m); + // } + + + // Convert to Unix style environ as a monolithic byte array + // inspired by the Windows Environment Block, except we work + // exclusively with bytes instead of chars, and we need only + // one trailing NUL on Unix. + // This keeps the JNI as simple and efficient as possible. + public byte[] toEnvironmentBlock(int[]envc) { + int count = m.size() * 2; // For added '=' and NUL + for (Map.Entry entry : m.entrySet()) { + count += entry.getKey().getBytes().length; + count += entry.getValue().getBytes().length; + } + + byte[] block = new byte[count]; + + int i = 0; + for (Map.Entry entry : m.entrySet()) { + byte[] key = entry.getKey ().getBytes(); + byte[] value = entry.getValue().getBytes(); + System.arraycopy(key, 0, block, i, key.length); + i+=key.length; + block[i++] = (byte) '='; + System.arraycopy(value, 0, block, i, value.length); + i+=value.length + 1; + // No need to write NUL byte explicitly + //block[i++] = (byte) '\u0000'; + } + envc[0] = m.size(); + return block; + } + } + + static byte[] toEnvironmentBlock(Map map, int[]envc) { + return map == null ? null : + ((StringEnvironment)map).toEnvironmentBlock(envc); + } + + + private static class StringEntry + implements Map.Entry + { + private final Map.Entry e; + public StringEntry(Map.Entry e) {this.e = e;} + public String getKey() {return e.getKey().toString();} + public String getValue() {return e.getValue().toString();} + public String setValue(String newValue) { + return e.setValue(Value.valueOf(newValue)).toString(); + } + public String toString() {return getKey() + "=" + getValue();} + public boolean equals(Object o) { + return o instanceof StringEntry + && e.equals(((StringEntry)o).e); + } + public int hashCode() {return e.hashCode();} + } + + private static class StringEntrySet + extends AbstractSet> + { + private final Set> s; + public StringEntrySet(Set> s) {this.s = s;} + public int size() {return s.size();} + public boolean isEmpty() {return s.isEmpty();} + public void clear() { s.clear();} + public Iterator> iterator() { + return new Iterator>() { + Iterator> i = s.iterator(); + public boolean hasNext() {return i.hasNext();} + public Map.Entry next() { + return new StringEntry(i.next()); + } + public void remove() {i.remove();} + }; + } + private static Map.Entry vvEntry(final Object o) { + if (o instanceof StringEntry) + return ((StringEntry)o).e; + return new Map.Entry() { + public Variable getKey() { + return Variable.valueOfQueryOnly(((Map.Entry)o).getKey()); + } + public Value getValue() { + return Value.valueOfQueryOnly(((Map.Entry)o).getValue()); + } + public Value setValue(Value value) { + throw new UnsupportedOperationException(); + } + }; + } + public boolean contains(Object o) { return s.contains(vvEntry(o)); } + public boolean remove(Object o) { return s.remove(vvEntry(o)); } + public boolean equals(Object o) { + return o instanceof StringEntrySet + && s.equals(((StringEntrySet) o).s); + } + public int hashCode() {return s.hashCode();} + } + + private static class StringValues + extends AbstractCollection + { + private final Collection c; + public StringValues(Collection c) {this.c = c;} + public int size() {return c.size();} + public boolean isEmpty() {return c.isEmpty();} + public void clear() { c.clear();} + public Iterator iterator() { + return new Iterator() { + Iterator i = c.iterator(); + public boolean hasNext() {return i.hasNext();} + public String next() {return i.next().toString();} + public void remove() {i.remove();} + }; + } + public boolean contains(Object o) { + return c.contains(Value.valueOfQueryOnly(o)); + } + public boolean remove(Object o) { + return c.remove(Value.valueOfQueryOnly(o)); + } + public boolean equals(Object o) { + return o instanceof StringValues + && c.equals(((StringValues)o).c); + } + public int hashCode() {return c.hashCode();} + } + + private static class StringKeySet extends AbstractSet { + private final Set s; + public StringKeySet(Set s) {this.s = s;} + public int size() {return s.size();} + public boolean isEmpty() {return s.isEmpty();} + public void clear() { s.clear();} + public Iterator iterator() { + return new Iterator() { + Iterator i = s.iterator(); + public boolean hasNext() {return i.hasNext();} + public String next() {return i.next().toString();} + public void remove() { i.remove();} + }; + } + public boolean contains(Object o) { + return s.contains(Variable.valueOfQueryOnly(o)); + } + public boolean remove(Object o) { + return s.remove(Variable.valueOfQueryOnly(o)); + } + } + + // Replace with general purpose method someday + private static int arrayCompare(byte[]x, byte[] y) { + int min = x.length < y.length ? x.length : y.length; + for (int i = 0; i < min; i++) + if (x[i] != y[i]) + return x[i] - y[i]; + return x.length - y.length; + } + + // Replace with general purpose method someday + private static boolean arrayEquals(byte[] x, byte[] y) { + if (x.length != y.length) + return false; + for (int i = 0; i < x.length; i++) + if (x[i] != y[i]) + return false; + return true; + } + + // Replace with general purpose method someday + private static int arrayHash(byte[] x) { + int hash = 0; + for (int i = 0; i < x.length; i++) + hash = 31 * hash + x[i]; + return hash; + } + +} diff --git a/src/MapleFE/test/java/openjdk/ProcessEnvironment.java.result b/src/MapleFE/test/java/openjdk/ProcessEnvironment.java.result new file mode 100644 index 0000000000000000000000000000000000000000..10edbc49196cff4198a5bfaacd3f0319fd209597 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/ProcessEnvironment.java.result @@ -0,0 +1,296 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 19 tokens. +Matched 2531 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import java.io +== Sub Tree == +import java.util +== Sub Tree == +class ProcessEnvironment + Fields: + theEnvironment theUnmodifiableEnvironment MIN_NAME_LENGTH=0 + Instance Initializer: + InstInit- 0 + Constructors: + constructor ProcessEnvironment() throws: + Methods: + func getenv(name) throws: + return theUnmodifiableEnvironment.get(name) + func getenv() throws: + return theUnmodifiableEnvironment + func environment() throws: + return new StringEnvironment(()(theEnvironment.clone())) + func emptyEnvironment(capacity) throws: + return new StringEnvironment(new HashMap(capacity)) + func environ() throws: + func validateVariable(name) throws: + cond-branch cond:name.indexOf(=) NE -1 Lor name.indexOf(0) NE -1 + true branch : + new IllegalArgumentException("Invalid environment variable name: \"" Add name Add "\"") false branch : + + func validateValue(value) throws: + cond-branch cond:value.indexOf(0) NE -1 + true branch : + new IllegalArgumentException("Invalid environment variable value: \"" Add value Add "\"") false branch : + + func toEnvironmentBlock(map,envc) throws: + return + func arrayCompare(x,y) throws: + Decl: min= + for ( ) + cond-branch cond: NE + true branch : + return Sub false branch : + + return x.length Sub y.length + func arrayEquals(x,y) throws: + cond-branch cond:x.length NE y.length + true branch : + return false false branch : + + for ( ) + cond-branch cond: NE + true branch : + return false false branch : + + return true + func arrayHash(x) throws: + Decl: hash=0 + for ( ) + hash Assign 31 Mul hash Add + return hash + LocalClasses: + class ExternalData + Fields: + str bytes + Instance Initializer: + Constructors: + constructor ExternalData(str,bytes) throws: + this.str Assign str + this.bytes Assign bytes + Methods: + func getBytes() throws: + return bytes + func toString() throws: + return str + func equals(o) throws: + return o instanceof ExternalData Land arrayEquals(getBytes(),(ExternalData)o.getBytes()) + func hashCode() throws: + return arrayHash(getBytes()) + LocalClasses: + LocalInterfaces: + class Variable + Fields: + + Instance Initializer: + Constructors: + constructor Variable(str,bytes) throws: + Methods: + func valueOfQueryOnly(str) throws: + return valueOfQueryOnly((String)str) + func valueOfQueryOnly(str) throws: + return new Variable(str,str.getBytes()) + func valueOf(str) throws: + validateVariable(str) + return valueOfQueryOnly(str) + func valueOf(bytes) throws: + return new Variable(new String(bytes),bytes) + func compareTo(variable) throws: + return arrayCompare(getBytes(),variable.getBytes()) + func equals(o) throws: + return o instanceof Variable Land super.equals(o) + LocalClasses: + LocalInterfaces: + class Value + Fields: + + Instance Initializer: + Constructors: + constructor Value(str,bytes) throws: + Methods: + func valueOfQueryOnly(str) throws: + return valueOfQueryOnly((String)str) + func valueOfQueryOnly(str) throws: + return new Value(str,str.getBytes()) + func valueOf(str) throws: + validateValue(str) + return valueOfQueryOnly(str) + func valueOf(bytes) throws: + return new Value(new String(bytes),bytes) + func compareTo(value) throws: + return arrayCompare(getBytes(),value.getBytes()) + func equals(o) throws: + return o instanceof Value Land super.equals(o) + LocalClasses: + LocalInterfaces: + class StringEnvironment + Fields: + m + Instance Initializer: + Constructors: + constructor StringEnvironment(m) throws: + this.m Assign m + Methods: + func toString(v) throws: + return + func size() throws: + return m.size() + func isEmpty() throws: + return m.isEmpty() + func clear() throws: + m.clear() + func containsKey(key) throws: + return m.containsKey(Variable.valueOfQueryOnly(key)) + func containsValue(value) throws: + return m.containsValue(Value.valueOfQueryOnly(value)) + func get(key) throws: + return toString(m.get(Variable.valueOfQueryOnly(key))) + func put(key,value) throws: + return toString(m.put(Variable.valueOf(key),Value.valueOf(value))) + func remove(key) throws: + return toString(m.remove(Variable.valueOfQueryOnly(key))) + func keySet() throws: + return new StringKeySet(m.keySet()) + func entrySet() throws: + return new StringEntrySet(m.entrySet()) + func values() throws: + return new StringValues(m.values()) + func toEnvironmentBlock(envc) throws: + Decl: count=m.size() Mul 2 + Map + Entry + Variable + Value + entry + m.entrySet() + count AddAssign entry.getKey().getBytes().length + count AddAssign entry.getValue().getBytes().length + + Decl: block= + Decl: i=0 + Map + Entry + Variable + Value + entry + m.entrySet() + Decl: key=entry.getKey().getBytes() + Decl: value=entry.getValue().getBytes() + System.arraycopy(key,0,block,i,key.length) + i AddAssign key.length + Assign (byte)= + System.arraycopy(value,0,block,i,value.length) + i AddAssign value.length Add 1 + + Assign m.size() + return block + LocalClasses: + LocalInterfaces: + class StringEntry + Fields: + e + Instance Initializer: + Constructors: + constructor StringEntry(e) throws: + this.e Assign e + Methods: + func getKey() throws: + return e.getKey().toString() + func getValue() throws: + return e.getValue().toString() + func setValue(newValue) throws: + return e.setValue(Value.valueOf(newValue)).toString() + func toString() throws: + return getKey() Add "=" Add getValue() + func equals(o) throws: + return o instanceof StringEntry Land e.equals((StringEntry)o.e) + func hashCode() throws: + return e.hashCode() + LocalClasses: + LocalInterfaces: + class StringEntrySet + Fields: + s + Instance Initializer: + Constructors: + constructor StringEntrySet(s) throws: + this.s Assign s + Methods: + func size() throws: + return s.size() + func isEmpty() throws: + return s.isEmpty() + func clear() throws: + s.clear() + func iterator() throws: + return new Iterator() + func vvEntry(o) throws: + cond-branch cond:o instanceof StringEntry + true branch : + return (StringEntry)o.e false branch : + + return new Map() + func contains(o) throws: + return s.contains(vvEntry(o)) + func remove(o) throws: + return s.remove(vvEntry(o)) + func equals(o) throws: + return o instanceof StringEntrySet Land s.equals((StringEntrySet)o.s) + func hashCode() throws: + return s.hashCode() + LocalClasses: + LocalInterfaces: + class StringValues + Fields: + c + Instance Initializer: + Constructors: + constructor StringValues(c) throws: + this.c Assign c + Methods: + func size() throws: + return c.size() + func isEmpty() throws: + return c.isEmpty() + func clear() throws: + c.clear() + func iterator() throws: + return new Iterator() + func contains(o) throws: + return c.contains(Value.valueOfQueryOnly(o)) + func remove(o) throws: + return c.remove(Value.valueOfQueryOnly(o)) + func equals(o) throws: + return o instanceof StringValues Land c.equals((StringValues)o.c) + func hashCode() throws: + return c.hashCode() + LocalClasses: + LocalInterfaces: + class StringKeySet + Fields: + s + Instance Initializer: + Constructors: + constructor StringKeySet(s) throws: + this.s Assign s + Methods: + func size() throws: + return s.size() + func isEmpty() throws: + return s.isEmpty() + func clear() throws: + s.clear() + func iterator() throws: + return new Iterator() + func contains(o) throws: + return s.contains(Variable.valueOfQueryOnly(o)) + func remove(o) throws: + return s.remove(Variable.valueOfQueryOnly(o)) + LocalClasses: + LocalInterfaces: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/Runtime.java b/src/MapleFE/test/java/openjdk/Runtime.java new file mode 100644 index 0000000000000000000000000000000000000000..3d5281479e13c4825896dc4fe34a0ee19dc6f3b1 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Runtime.java @@ -0,0 +1,1161 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * Copyright (c) 1995, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +import dalvik.annotation.optimization.FastNative; +import java.io.*; +import java.util.StringTokenizer; +import sun.reflect.CallerSensitive; +import java.lang.ref.FinalizerReference; +import java.util.ArrayList; +import java.util.List; +import dalvik.system.BaseDexClassLoader; +import dalvik.system.VMDebug; +import dalvik.system.VMStack; +import dalvik.system.VMRuntime; +import libcore.io.IoUtils; +import libcore.io.Libcore; +import libcore.util.EmptyArray; +import static android.system.OsConstants._SC_NPROCESSORS_CONF; + +/** + * Every Java application has a single instance of class + * Runtime that allows the application to interface with + * the environment in which the application is running. The current + * runtime can be obtained from the getRuntime method. + *

+ * An application cannot create its own instance of this class. + * + * @author unascribed + * @see java.lang.Runtime#getRuntime() + * @since JDK1.0 + */ + +public class Runtime { + private static Runtime currentRuntime = new Runtime(); + + /** + * Holds the list of threads to run when the VM terminates + */ + private List shutdownHooks = new ArrayList(); + + /** + * Reflects whether finalization should be run for all objects + * when the VM terminates. + */ + private static boolean finalizeOnExit; + + /** + * Reflects whether we are already shutting down the VM. + */ + private boolean shuttingDown; + + /** + * Reflects whether we are tracing method calls. + */ + private boolean tracingMethods; + + private static native void nativeExit(int code); + + /** + * Returns the runtime object associated with the current Java application. + * Most of the methods of class Runtime are instance + * methods and must be invoked with respect to the current runtime object. + * + * @return the Runtime object associated with the current + * Java application. + */ + public static Runtime getRuntime() { + return currentRuntime; + } + + /** Don't let anyone else instantiate this class */ + private Runtime() {} + + /** + * Terminates the currently running Java virtual machine by initiating its + * shutdown sequence. This method never returns normally. The argument + * serves as a status code; by convention, a nonzero status code indicates + * abnormal termination. + * + *

The virtual machine's shutdown sequence consists of two phases. In + * the first phase all registered {@link #addShutdownHook shutdown hooks}, + * if any, are started in some unspecified order and allowed to run + * concurrently until they finish. In the second phase all uninvoked + * finalizers are run if {@link #runFinalizersOnExit finalization-on-exit} + * has been enabled. Once this is done the virtual machine {@link #halt + * halts}. + * + *

If this method is invoked after the virtual machine has begun its + * shutdown sequence then if shutdown hooks are being run this method will + * block indefinitely. If shutdown hooks have already been run and on-exit + * finalization has been enabled then this method halts the virtual machine + * with the given status code if the status is nonzero; otherwise, it + * blocks indefinitely. + * + *

The {@link System#exit(int) System.exit} method is the + * conventional and convenient means of invoking this method.

+ * + * @param status + * Termination status. By convention, a nonzero status code + * indicates abnormal termination. + * + * @throws SecurityException + * If a security manager is present and its {@link + * SecurityManager#checkExit checkExit} method does not permit + * exiting with the specified status + * + * @see java.lang.SecurityException + * @see java.lang.SecurityManager#checkExit(int) + * @see #addShutdownHook + * @see #removeShutdownHook + * @see #runFinalizersOnExit + * @see #halt(int) + */ + public void exit(int status) { + // Make sure we don't try this several times + synchronized(this) { + if (!shuttingDown) { + shuttingDown = true; + + Thread[] hooks; + synchronized (shutdownHooks) { + // create a copy of the hooks + hooks = new Thread[shutdownHooks.size()]; + shutdownHooks.toArray(hooks); + } + + // Start all shutdown hooks concurrently + for (Thread hook : hooks) { + hook.start(); + } + + // Wait for all shutdown hooks to finish + for (Thread hook : hooks) { + try { + hook.join(); + } catch (InterruptedException ex) { + // Ignore, since we are at VM shutdown. + } + } + + // Ensure finalization on exit, if requested + if (finalizeOnExit) { + runFinalization(); + } + + // Get out of here finally... + nativeExit(status); + } + } + } + + /** + * Registers a new virtual-machine shutdown hook. + * + *

The Java virtual machine shuts down in response to two kinds + * of events: + * + *

    + * + *
  • The program exits normally, when the last non-daemon + * thread exits or when the {@link #exit exit} (equivalently, + * {@link System#exit(int) System.exit}) method is invoked, or + * + *
  • The virtual machine is terminated in response to a + * user interrupt, such as typing ^C, or a system-wide event, + * such as user logoff or system shutdown. + * + *
+ * + *

A shutdown hook is simply an initialized but unstarted + * thread. When the virtual machine begins its shutdown sequence it will + * start all registered shutdown hooks in some unspecified order and let + * them run concurrently. When all the hooks have finished it will then + * run all uninvoked finalizers if finalization-on-exit has been enabled. + * Finally, the virtual machine will halt. Note that daemon threads will + * continue to run during the shutdown sequence, as will non-daemon threads + * if shutdown was initiated by invoking the {@link #exit exit} + * method. + * + *

Once the shutdown sequence has begun it can be stopped only by + * invoking the {@link #halt halt} method, which forcibly + * terminates the virtual machine. + * + *

Once the shutdown sequence has begun it is impossible to register a + * new shutdown hook or de-register a previously-registered hook. + * Attempting either of these operations will cause an + * {@link IllegalStateException} to be thrown. + * + *

Shutdown hooks run at a delicate time in the life cycle of a virtual + * machine and should therefore be coded defensively. They should, in + * particular, be written to be thread-safe and to avoid deadlocks insofar + * as possible. They should also not rely blindly upon services that may + * have registered their own shutdown hooks and therefore may themselves in + * the process of shutting down. Attempts to use other thread-based + * services such as the AWT event-dispatch thread, for example, may lead to + * deadlocks. + * + *

Shutdown hooks should also finish their work quickly. When a + * program invokes {@link #exit exit} the expectation is + * that the virtual machine will promptly shut down and exit. When the + * virtual machine is terminated due to user logoff or system shutdown the + * underlying operating system may only allow a fixed amount of time in + * which to shut down and exit. It is therefore inadvisable to attempt any + * user interaction or to perform a long-running computation in a shutdown + * hook. + * + *

Uncaught exceptions are handled in shutdown hooks just as in any + * other thread, by invoking the {@link ThreadGroup#uncaughtException + * uncaughtException} method of the thread's {@link + * ThreadGroup} object. The default implementation of this method + * prints the exception's stack trace to {@link System#err} and + * terminates the thread; it does not cause the virtual machine to exit or + * halt. + * + *

In rare circumstances the virtual machine may abort, that is, + * stop running without shutting down cleanly. This occurs when the + * virtual machine is terminated externally, for example with the + * SIGKILL signal on Unix or the TerminateProcess call on + * Microsoft Windows. The virtual machine may also abort if a native + * method goes awry by, for example, corrupting internal data structures or + * attempting to access nonexistent memory. If the virtual machine aborts + * then no guarantee can be made about whether or not any shutdown hooks + * will be run.

+ * + * @param hook + * An initialized but unstarted {@link Thread} object + * + * @throws IllegalArgumentException + * If the specified hook has already been registered, + * or if it can be determined that the hook is already running or + * has already been run + * + * @throws IllegalStateException + * If the virtual machine is already in the process + * of shutting down + * + * @throws SecurityException + * If a security manager is present and it denies + * {@link RuntimePermission}("shutdownHooks") + * + * @see #removeShutdownHook + * @see #halt(int) + * @see #exit(int) + * @since 1.3 + */ + public void addShutdownHook(Thread hook) { + // Sanity checks + if (hook == null) { + throw new NullPointerException("hook == null"); + } + + if (shuttingDown) { + throw new IllegalStateException("VM already shutting down"); + } + + if (hook.started) { + throw new IllegalArgumentException("Hook has already been started"); + } + + synchronized (shutdownHooks) { + if (shutdownHooks.contains(hook)) { + throw new IllegalArgumentException("Hook already registered."); + } + + shutdownHooks.add(hook); + } + } + + /** + * De-registers a previously-registered virtual-machine shutdown hook.

+ * + * @param hook the hook to remove + * @return true if the specified hook had previously been + * registered and was successfully de-registered, false + * otherwise. + * + * @throws IllegalStateException + * If the virtual machine is already in the process of shutting + * down + * + * @throws SecurityException + * If a security manager is present and it denies + * {@link RuntimePermission}("shutdownHooks") + * + * @see #addShutdownHook + * @see #exit(int) + * @since 1.3 + */ + public boolean removeShutdownHook(Thread hook) { + // Sanity checks + if (hook == null) { + throw new NullPointerException("hook == null"); + } + + if (shuttingDown) { + throw new IllegalStateException("VM already shutting down"); + } + + synchronized (shutdownHooks) { + return shutdownHooks.remove(hook); + } + } + + /** + * Forcibly terminates the currently running Java virtual machine. This + * method never returns normally. + * + *

This method should be used with extreme caution. Unlike the + * {@link #exit exit} method, this method does not cause shutdown + * hooks to be started and does not run uninvoked finalizers if + * finalization-on-exit has been enabled. If the shutdown sequence has + * already been initiated then this method does not wait for any running + * shutdown hooks or finalizers to finish their work.

+ * + * @param status + * Termination status. By convention, a nonzero status code + * indicates abnormal termination. If the {@link Runtime#exit + * exit} (equivalently, {@link System#exit(int) + * System.exit}) method has already been invoked then this + * status code will override the status code passed to that method. + * + * @throws SecurityException + * If a security manager is present and its {@link + * SecurityManager#checkExit checkExit} method does not permit + * an exit with the specified status + * + * @see #exit + * @see #addShutdownHook + * @see #removeShutdownHook + * @since 1.3 + */ + public void halt(int status) { + nativeExit(status); + } + + /** + * Enable or disable finalization on exit; doing so specifies that the + * finalizers of all objects that have finalizers that have not yet been + * automatically invoked are to be run before the Java runtime exits. + * By default, finalization on exit is disabled. + * + *

If there is a security manager, + * its checkExit method is first called + * with 0 as its argument to ensure the exit is allowed. + * This could result in a SecurityException. + * + * @param value true to enable finalization on exit, false to disable + * @deprecated This method is inherently unsafe. It may result in + * finalizers being called on live objects while other threads are + * concurrently manipulating those objects, resulting in erratic + * behavior or deadlock. + * + * @throws SecurityException + * if a security manager exists and its checkExit + * method doesn't allow the exit. + * + * @see java.lang.Runtime#exit(int) + * @see java.lang.Runtime#gc() + * @see java.lang.SecurityManager#checkExit(int) + * @since JDK1.1 + */ + @Deprecated + public static void runFinalizersOnExit(boolean value) { + finalizeOnExit = value; + } + + /** + * Executes the specified string command in a separate process. + * + *

This is a convenience method. An invocation of the form + * exec(command) + * behaves in exactly the same way as the invocation + * {@link #exec(String, String[], File) exec}(command, null, null). + * + * @param command a specified system command. + * + * @return A new {@link Process} object for managing the subprocess + * + * @throws SecurityException + * If a security manager exists and its + * {@link SecurityManager#checkExec checkExec} + * method doesn't allow creation of the subprocess + * + * @throws IOException + * If an I/O error occurs + * + * @throws NullPointerException + * If command is null + * + * @throws IllegalArgumentException + * If command is empty + * + * @see #exec(String[], String[], File) + * @see ProcessBuilder + */ + public Process exec(String command) throws IOException { + return exec(command, null, null); + } + + /** + * Executes the specified string command in a separate process with the + * specified environment. + * + *

This is a convenience method. An invocation of the form + * exec(command, envp) + * behaves in exactly the same way as the invocation + * {@link #exec(String, String[], File) exec}(command, envp, null). + * + * @param command a specified system command. + * + * @param envp array of strings, each element of which + * has environment variable settings in the format + * name=value, or + * null if the subprocess should inherit + * the environment of the current process. + * + * @return A new {@link Process} object for managing the subprocess + * + * @throws SecurityException + * If a security manager exists and its + * {@link SecurityManager#checkExec checkExec} + * method doesn't allow creation of the subprocess + * + * @throws IOException + * If an I/O error occurs + * + * @throws NullPointerException + * If command is null, + * or one of the elements of envp is null + * + * @throws IllegalArgumentException + * If command is empty + * + * @see #exec(String[], String[], File) + * @see ProcessBuilder + */ + public Process exec(String command, String[] envp) throws IOException { + return exec(command, envp, null); + } + + /** + * Executes the specified string command in a separate process with the + * specified environment and working directory. + * + *

This is a convenience method. An invocation of the form + * exec(command, envp, dir) + * behaves in exactly the same way as the invocation + * {@link #exec(String[], String[], File) exec}(cmdarray, envp, dir), + * where cmdarray is an array of all the tokens in + * command. + * + *

More precisely, the command string is broken + * into tokens using a {@link StringTokenizer} created by the call + * new {@link StringTokenizer}(command) with no + * further modification of the character categories. The tokens + * produced by the tokenizer are then placed in the new string + * array cmdarray, in the same order. + * + * @param command a specified system command. + * + * @param envp array of strings, each element of which + * has environment variable settings in the format + * name=value, or + * null if the subprocess should inherit + * the environment of the current process. + * + * @param dir the working directory of the subprocess, or + * null if the subprocess should inherit + * the working directory of the current process. + * + * @return A new {@link Process} object for managing the subprocess + * + * @throws SecurityException + * If a security manager exists and its + * {@link SecurityManager#checkExec checkExec} + * method doesn't allow creation of the subprocess + * + * @throws IOException + * If an I/O error occurs + * + * @throws NullPointerException + * If command is null, + * or one of the elements of envp is null + * + * @throws IllegalArgumentException + * If command is empty + * + * @see ProcessBuilder + * @since 1.3 + */ + public Process exec(String command, String[] envp, File dir) + throws IOException { + if (command.length() == 0) + throw new IllegalArgumentException("Empty command"); + + StringTokenizer st = new StringTokenizer(command); + String[] cmdarray = new String[st.countTokens()]; + for (int i = 0; st.hasMoreTokens(); i++) + cmdarray[i] = st.nextToken(); + return exec(cmdarray, envp, dir); + } + + /** + * Executes the specified command and arguments in a separate process. + * + *

This is a convenience method. An invocation of the form + * exec(cmdarray) + * behaves in exactly the same way as the invocation + * {@link #exec(String[], String[], File) exec}(cmdarray, null, null). + * + * @param cmdarray array containing the command to call and + * its arguments. + * + * @return A new {@link Process} object for managing the subprocess + * + * @throws SecurityException + * If a security manager exists and its + * {@link SecurityManager#checkExec checkExec} + * method doesn't allow creation of the subprocess + * + * @throws IOException + * If an I/O error occurs + * + * @throws NullPointerException + * If cmdarray is null, + * or one of the elements of cmdarray is null + * + * @throws IndexOutOfBoundsException + * If cmdarray is an empty array + * (has length 0) + * + * @see ProcessBuilder + */ + public Process exec(String cmdarray[]) throws IOException { + return exec(cmdarray, null, null); + } + + /** + * Executes the specified command and arguments in a separate process + * with the specified environment. + * + *

This is a convenience method. An invocation of the form + * exec(cmdarray, envp) + * behaves in exactly the same way as the invocation + * {@link #exec(String[], String[], File) exec}(cmdarray, envp, null). + * + * @param cmdarray array containing the command to call and + * its arguments. + * + * @param envp array of strings, each element of which + * has environment variable settings in the format + * name=value, or + * null if the subprocess should inherit + * the environment of the current process. + * + * @return A new {@link Process} object for managing the subprocess + * + * @throws SecurityException + * If a security manager exists and its + * {@link SecurityManager#checkExec checkExec} + * method doesn't allow creation of the subprocess + * + * @throws IOException + * If an I/O error occurs + * + * @throws NullPointerException + * If cmdarray is null, + * or one of the elements of cmdarray is null, + * or one of the elements of envp is null + * + * @throws IndexOutOfBoundsException + * If cmdarray is an empty array + * (has length 0) + * + * @see ProcessBuilder + */ + public Process exec(String[] cmdarray, String[] envp) throws IOException { + return exec(cmdarray, envp, null); + } + + + /** + * Executes the specified command and arguments in a separate process with + * the specified environment and working directory. + * + *

Given an array of strings cmdarray, representing the + * tokens of a command line, and an array of strings envp, + * representing "environment" variable settings, this method creates + * a new process in which to execute the specified command. + * + *

This method checks that cmdarray is a valid operating + * system command. Which commands are valid is system-dependent, + * but at the very least the command must be a non-empty list of + * non-null strings. + * + *

If envp is null, the subprocess inherits the + * environment settings of the current process. + * + *

A minimal set of system dependent environment variables may + * be required to start a process on some operating systems. + * As a result, the subprocess may inherit additional environment variable + * settings beyond those in the specified environment. + * + *

{@link ProcessBuilder#start()} is now the preferred way to + * start a process with a modified environment. + * + *

The working directory of the new subprocess is specified by dir. + * If dir is null, the subprocess inherits the + * current working directory of the current process. + * + *

If a security manager exists, its + * {@link SecurityManager#checkExec checkExec} + * method is invoked with the first component of the array + * cmdarray as its argument. This may result in a + * {@link SecurityException} being thrown. + * + *

Starting an operating system process is highly system-dependent. + * Among the many things that can go wrong are: + *

    + *
  • The operating system program file was not found. + *
  • Access to the program file was denied. + *
  • The working directory does not exist. + *
+ * + *

In such cases an exception will be thrown. The exact nature + * of the exception is system-dependent, but it will always be a + * subclass of {@link IOException}. + * + * + * @param cmdarray array containing the command to call and + * its arguments. + * + * @param envp array of strings, each element of which + * has environment variable settings in the format + * name=value, or + * null if the subprocess should inherit + * the environment of the current process. + * + * @param dir the working directory of the subprocess, or + * null if the subprocess should inherit + * the working directory of the current process. + * + * @return A new {@link Process} object for managing the subprocess + * + * @throws SecurityException + * If a security manager exists and its + * {@link SecurityManager#checkExec checkExec} + * method doesn't allow creation of the subprocess + * + * @throws IOException + * If an I/O error occurs + * + * @throws NullPointerException + * If cmdarray is null, + * or one of the elements of cmdarray is null, + * or one of the elements of envp is null + * + * @throws IndexOutOfBoundsException + * If cmdarray is an empty array + * (has length 0) + * + * @see ProcessBuilder + * @since 1.3 + */ + public Process exec(String[] cmdarray, String[] envp, File dir) + throws IOException { + return new ProcessBuilder(cmdarray) + .environment(envp) + .directory(dir) + .start(); + } + + /** + * Returns the number of processors available to the Java virtual machine. + * + *

This value may change during a particular invocation of the virtual + * machine. Applications that are sensitive to the number of available + * processors should therefore occasionally poll this property and adjust + * their resource usage appropriately.

+ * + * @return the maximum number of processors available to the virtual + * machine; never smaller than one + * @since 1.4 + */ + public int availableProcessors() { + return (int) Libcore.os.sysconf(_SC_NPROCESSORS_CONF); + } + + /** + * Returns the amount of free memory in the Java Virtual Machine. + * Calling the + * gc method may result in increasing the value returned + * by freeMemory. + * + * @return an approximation to the total amount of memory currently + * available for future allocated objects, measured in bytes. + */ + @FastNative + public native long freeMemory(); + + /** + * Returns the total amount of memory in the Java virtual machine. + * The value returned by this method may vary over time, depending on + * the host environment. + *

+ * Note that the amount of memory required to hold an object of any + * given type may be implementation-dependent. + * + * @return the total amount of memory currently available for current + * and future objects, measured in bytes. + */ + @FastNative + public native long totalMemory(); + + /** + * Returns the maximum amount of memory that the Java virtual machine will + * attempt to use. If there is no inherent limit then the value {@link + * java.lang.Long#MAX_VALUE} will be returned. + * + * @return the maximum amount of memory that the virtual machine will + * attempt to use, measured in bytes + * @since 1.4 + */ + @FastNative + public native long maxMemory(); + + /** + * Runs the garbage collector. + * Calling this method suggests that the Java virtual machine expend + * effort toward recycling unused objects in order to make the memory + * they currently occupy available for quick reuse. When control + * returns from the method call, the virtual machine has made + * its best effort to recycle all discarded objects. + *

+ * The name gc stands for "garbage + * collector". The virtual machine performs this recycling + * process automatically as needed, in a separate thread, even if the + * gc method is not invoked explicitly. + *

+ * The method {@link System#gc()} is the conventional and convenient + * means of invoking this method. + */ + public native void gc(); + + /* Wormhole for calling java.lang.ref.Finalizer.runFinalization */ + private static native void runFinalization0(); + + /** + * Runs the finalization methods of any objects pending finalization. + * Calling this method suggests that the Java virtual machine expend + * effort toward running the finalize methods of objects + * that have been found to be discarded but whose finalize + * methods have not yet been run. When control returns from the + * method call, the virtual machine has made a best effort to + * complete all outstanding finalizations. + *

+ * The virtual machine performs the finalization process + * automatically as needed, in a separate thread, if the + * runFinalization method is not invoked explicitly. + *

+ * The method {@link System#runFinalization()} is the conventional + * and convenient means of invoking this method. + * + * @see java.lang.Object#finalize() + */ + public void runFinalization() { + VMRuntime.runFinalization(0); + } + + /** + * Enables/Disables tracing of instructions. + * If the boolean argument is true, this + * method suggests that the Java virtual machine emit debugging + * information for each instruction in the virtual machine as it + * is executed. The format of this information, and the file or other + * output stream to which it is emitted, depends on the host environment. + * The virtual machine may ignore this request if it does not support + * this feature. The destination of the trace output is system + * dependent. + *

+ * If the boolean argument is false, this + * method causes the virtual machine to stop performing the + * detailed instruction trace it is performing. + * + * @param on true to enable instruction tracing; + * false to disable this feature. + */ + public void traceInstructions(boolean on) { + } + + /** + * Enables/Disables tracing of method calls. + * If the boolean argument is true, this + * method suggests that the Java virtual machine emit debugging + * information for each method in the virtual machine as it is + * called. The format of this information, and the file or other output + * stream to which it is emitted, depends on the host environment. The + * virtual machine may ignore this request if it does not support + * this feature. + *

+ * Calling this method with argument false suggests that the + * virtual machine cease emitting per-call debugging information. + *

+ * Calling this method on Android Lollipop or later (API level >= 21) + * with {@code true} argument will cause it to throw an + * {@code UnsupportedOperationException}. + * + * @param on true to enable instruction tracing; + * false to disable this feature. + */ + public void traceMethodCalls(boolean on) { + if (on != tracingMethods) { + if (on) { + VMDebug.startMethodTracing(); + } else { + VMDebug.stopMethodTracing(); + } + tracingMethods = on; + } + } + + /** + * Loads the native library specified by the filename argument. The filename + * argument must be an absolute path name. + * (for example + * Runtime.getRuntime().load("/home/avh/lib/libX11.so");). + * + * If the filename argument, when stripped of any platform-specific library + * prefix, path, and file extension, indicates a library whose name is, + * for example, L, and a native library called L is statically linked + * with the VM, then the JNI_OnLoad_L function exported by the library + * is invoked rather than attempting to load a dynamic library. + * A filename matching the argument does not have to exist in the file + * system. See the JNI Specification for more details. + * + * Otherwise, the filename argument is mapped to a native library image in + * an implementation-dependent manner. + *

+ * First, if there is a security manager, its checkLink + * method is called with the filename as its argument. + * This may result in a security exception. + *

+ * This is similar to the method {@link #loadLibrary(String)}, but it + * accepts a general file name as an argument rather than just a library + * name, allowing any file of native code to be loaded. + *

+ * The method {@link System#load(String)} is the conventional and + * convenient means of invoking this method. + * + * @param filename the file to load. + * @exception SecurityException if a security manager exists and its + * checkLink method doesn't allow + * loading of the specified dynamic library + * @exception UnsatisfiedLinkError if either the filename is not an + * absolute path name, the native library is not statically + * linked with the VM, or the library cannot be mapped to + * a native library image by the host system. + * @exception NullPointerException if filename is + * null + * @see java.lang.Runtime#getRuntime() + * @see java.lang.SecurityException + * @see java.lang.SecurityManager#checkLink(java.lang.String) + */ + @CallerSensitive + public void load(String filename) { + load0(VMStack.getStackClass1(), filename); + } + + /** Check target sdk, if it's higher than N, we throw an UnsupportedOperationException */ + private void checkTargetSdkVersionForLoad(String methodName) { + final int targetSdkVersion = VMRuntime.getRuntime().getTargetSdkVersion(); + if (targetSdkVersion > 24) { + throw new UnsupportedOperationException(methodName + " is not supported on SDK " + + targetSdkVersion); + } + } + + // Fixes b/25859957 regression. Depending on private methods is bad, mkay. + void load(String absolutePath, ClassLoader loader) { + checkTargetSdkVersionForLoad("java.lang.Runtime#load(String, ClassLoader)"); + + java.lang.System.logE("java.lang.Runtime#load(String, ClassLoader)" + + " is private and will be removed in a future Android release"); + if (absolutePath == null) { + throw new NullPointerException("absolutePath == null"); + } + String error = doLoad(absolutePath, loader); + if (error != null) { + throw new UnsatisfiedLinkError(error); + } + } + + synchronized void load0(Class fromClass, String filename) { + if (!(new File(filename).isAbsolute())) { + throw new UnsatisfiedLinkError( + "Expecting an absolute path of the library: " + filename); + } + if (filename == null) { + throw new NullPointerException("filename == null"); + } + String error = doLoad(filename, fromClass.getClassLoader()); + if (error != null) { + throw new UnsatisfiedLinkError(error); + } + } + + /** + * Loads the native library specified by the libname + * argument. The libname argument must not contain any platform + * specific prefix, file extension or path. If a native library + * called libname is statically linked with the VM, then the + * JNI_OnLoad_libname function exported by the library is invoked. + * See the JNI Specification for more details. + * + * Otherwise, the libname argument is loaded from a system library + * location and mapped to a native library image in an implementation- + * dependent manner. + *

+ * First, if there is a security manager, its checkLink + * method is called with the libname as its argument. + * This may result in a security exception. + *

+ * The method {@link System#loadLibrary(String)} is the conventional + * and convenient means of invoking this method. If native + * methods are to be used in the implementation of a class, a standard + * strategy is to put the native code in a library file (call it + * LibFile) and then to put a static initializer: + *

+     * static { System.loadLibrary("LibFile"); }
+     * 
+ * within the class declaration. When the class is loaded and + * initialized, the necessary native code implementation for the native + * methods will then be loaded as well. + *

+ * If this method is called more than once with the same library + * name, the second and subsequent calls are ignored. + * + * @param libname the name of the library. + * @exception SecurityException if a security manager exists and its + * checkLink method doesn't allow + * loading of the specified dynamic library + * @exception UnsatisfiedLinkError if either the libname argument + * contains a file path, the native library is not statically + * linked with the VM, or the library cannot be mapped to a + * native library image by the host system. + * @exception NullPointerException if libname is + * null + * @see java.lang.SecurityException + * @see java.lang.SecurityManager#checkLink(java.lang.String) + */ + @CallerSensitive + public void loadLibrary(String libname) { + loadLibrary0(VMStack.getCallingClassLoader(), libname); + } + + /** + * Temporarily preserved for backward compatibility. Applications call this + * method using reflection. + * + * **** THIS METHOD WILL BE REMOVED IN A FUTURE ANDROID VERSION **** + * + * http://b/26217329 + * + * @hide + */ + public void loadLibrary(String libname, ClassLoader classLoader) { + checkTargetSdkVersionForLoad("java.lang.Runtime#loadLibrary(String, ClassLoader)"); + java.lang.System.logE("java.lang.Runtime#loadLibrary(String, ClassLoader)" + + " is private and will be removed in a future Android release"); + loadLibrary0(classLoader, libname); + } + + synchronized void loadLibrary0(ClassLoader loader, String libname) { + if (libname.indexOf((int)File.separatorChar) != -1) { + throw new UnsatisfiedLinkError( + "Directory separator should not appear in library name: " + libname); + } + String libraryName = libname; + if (loader != null) { + String filename = loader.findLibrary(libraryName); + if (filename == null) { + // It's not necessarily true that the ClassLoader used + // System.mapLibraryName, but the default setup does, and it's + // misleading to say we didn't find "libMyLibrary.so" when we + // actually searched for "liblibMyLibrary.so.so". + throw new UnsatisfiedLinkError(loader + " couldn't find \"" + + System.mapLibraryName(libraryName) + "\""); + } + String error = doLoad(filename, loader); + if (error != null) { + throw new UnsatisfiedLinkError(error); + } + return; + } + + String filename = System.mapLibraryName(libraryName); + List candidates = new ArrayList(); + String lastError = null; + for (String directory : getLibPaths()) { + String candidate = directory + filename; + candidates.add(candidate); + + if (IoUtils.canOpenReadOnly(candidate)) { + String error = doLoad(candidate, loader); + if (error == null) { + return; // We successfully loaded the library. Job done. + } + lastError = error; + } + } + + if (lastError != null) { + throw new UnsatisfiedLinkError(lastError); + } + throw new UnsatisfiedLinkError("Library " + libraryName + " not found; tried " + candidates); + } + + private volatile String[] mLibPaths = null; + + private String[] getLibPaths() { + if (mLibPaths == null) { + synchronized(this) { + if (mLibPaths == null) { + mLibPaths = initLibPaths(); + } + } + } + return mLibPaths; + } + + private static String[] initLibPaths() { + String javaLibraryPath = System.getProperty("java.library.path"); + if (javaLibraryPath == null) { + return EmptyArray.STRING; + } + String[] paths = javaLibraryPath.split(":"); + // Add a '/' to the end of each directory so we don't have to do it every time. + for (int i = 0; i < paths.length; ++i) { + if (!paths[i].endsWith("/")) { + paths[i] += "/"; + } + } + return paths; + } + private String doLoad(String name, ClassLoader loader) { + // Android apps are forked from the zygote, so they can't have a custom LD_LIBRARY_PATH, + // which means that by default an app's shared library directory isn't on LD_LIBRARY_PATH. + + // The PathClassLoader set up by frameworks/base knows the appropriate path, so we can load + // libraries with no dependencies just fine, but an app that has multiple libraries that + // depend on each other needed to load them in most-dependent-first order. + + // We added API to Android's dynamic linker so we can update the library path used for + // the currently-running process. We pull the desired path out of the ClassLoader here + // and pass it to nativeLoad so that it can call the private dynamic linker API. + + // We didn't just change frameworks/base to update the LD_LIBRARY_PATH once at the + // beginning because multiple apks can run in the same process and third party code can + // use its own BaseDexClassLoader. + + // We didn't just add a dlopen_with_custom_LD_LIBRARY_PATH call because we wanted any + // dlopen(3) calls made from a .so's JNI_OnLoad to work too. + + // So, find out what the native library search path is for the ClassLoader in question... + String librarySearchPath = null; + if (loader != null && loader instanceof BaseDexClassLoader) { + BaseDexClassLoader dexClassLoader = (BaseDexClassLoader) loader; + librarySearchPath = dexClassLoader.getLdLibraryPath(); + } + // nativeLoad should be synchronized so there's only one LD_LIBRARY_PATH in use regardless + // of how many ClassLoaders are in the system, but dalvik doesn't support synchronized + // internal natives. + synchronized (this) { + return nativeLoad(name, loader, librarySearchPath); + } + } + + // TODO: should be synchronized, but dalvik doesn't support synchronized internal natives. + private static native String nativeLoad(String filename, ClassLoader loader, + String librarySearchPath); + + /** + * Creates a localized version of an input stream. This method takes + * an InputStream and returns an InputStream + * equivalent to the argument in all respects except that it is + * localized: as characters in the local character set are read from + * the stream, they are automatically converted from the local + * character set to Unicode. + *

+ * If the argument is already a localized stream, it may be returned + * as the result. + * + * @param in InputStream to localize + * @return a localized input stream + * @see java.io.InputStream + * @see java.io.BufferedReader#BufferedReader(java.io.Reader) + * @see java.io.InputStreamReader#InputStreamReader(java.io.InputStream) + * @deprecated As of JDK 1.1, the preferred way to translate a byte + * stream in the local encoding into a character stream in Unicode is via + * the InputStreamReader and BufferedReader + * classes. + */ + @Deprecated + public InputStream getLocalizedInputStream(InputStream in) { + return in; + } + + /** + * Creates a localized version of an output stream. This method + * takes an OutputStream and returns an + * OutputStream equivalent to the argument in all respects + * except that it is localized: as Unicode characters are written to + * the stream, they are automatically converted to the local + * character set. + *

+ * If the argument is already a localized stream, it may be returned + * as the result. + * + * @deprecated As of JDK 1.1, the preferred way to translate a + * Unicode character stream into a byte stream in the local encoding is via + * the OutputStreamWriter, BufferedWriter, and + * PrintWriter classes. + * + * @param out OutputStream to localize + * @return a localized output stream + * @see java.io.OutputStream + * @see java.io.BufferedWriter#BufferedWriter(java.io.Writer) + * @see java.io.OutputStreamWriter#OutputStreamWriter(java.io.OutputStream) + * @see java.io.PrintWriter#PrintWriter(java.io.OutputStream) + */ + @Deprecated + public OutputStream getLocalizedOutputStream(OutputStream out) { + return out; + } + +} diff --git a/src/MapleFE/test/java/openjdk/Runtime.java.result b/src/MapleFE/test/java/openjdk/Runtime.java.result new file mode 100644 index 0000000000000000000000000000000000000000..7e96553a7a5b9943a44f7411a2714915bfad15db --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Runtime.java.result @@ -0,0 +1,319 @@ +Matched 5 tokens. +Matched 14 tokens. +Matched 21 tokens. +Matched 28 tokens. +Matched 35 tokens. +Matched 44 tokens. +Matched 51 tokens. +Matched 58 tokens. +Matched 65 tokens. +Matched 72 tokens. +Matched 79 tokens. +Matched 86 tokens. +Matched 93 tokens. +Matched 100 tokens. +Matched 107 tokens. +Matched 117 tokens. +Matched 1578 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import dalvik.annotation.optimization.FastNative +== Sub Tree == +import java.io +== Sub Tree == +import java.util.StringTokenizer +== Sub Tree == +import sun.reflect.CallerSensitive +== Sub Tree == +import java.lang.ref.FinalizerReference +== Sub Tree == +import java.util.ArrayList +== Sub Tree == +import java.util.List +== Sub Tree == +import dalvik.system.BaseDexClassLoader +== Sub Tree == +import dalvik.system.VMDebug +== Sub Tree == +import dalvik.system.VMStack +== Sub Tree == +import dalvik.system.VMRuntime +== Sub Tree == +import libcore.io.IoUtils +== Sub Tree == +import libcore.io.Libcore +== Sub Tree == +import libcore.util.EmptyArray +== Sub Tree == +import static android.system.OsConstants._SC_NPROCESSORS_CONF +== Sub Tree == +class Runtime + Fields: + currentRuntime=new Runtime() shutdownHooks=new ArrayList() finalizeOnExit shuttingDown tracingMethods mLibPaths=null + Instance Initializer: + Constructors: + constructor Runtime() throws: + Methods: + func nativeExit(code) throws: + func getRuntime() throws: + return currentRuntime + func exit(status) throws: + cond-branch cond:shuttingDown + true branch : + shuttingDown Assign true + Decl: hooks + hooks Assign + shutdownHooks.toArray(hooks) + + Thread + hook + hooks + hook.start() + + Thread + hook + hooks + hook.join() + + InterruptedException + ex + + + cond-branch cond:finalizeOnExit + true branch : + runFinalization() + false branch : + + nativeExit(status) + false branch : + + + func addShutdownHook(hook) throws: + cond-branch cond:hook EQ null + true branch : + new NullPointerException("hook == null") + false branch : + + cond-branch cond:shuttingDown + true branch : + new IllegalStateException("VM already shutting down") + false branch : + + cond-branch cond:hook.started + true branch : + new IllegalArgumentException("Hook has already been started") + false branch : + + cond-branch cond:shutdownHooks.contains(hook) + true branch : + new IllegalArgumentException("Hook already registered.") + false branch : + + shutdownHooks.add(hook) + + func removeShutdownHook(hook) throws: + cond-branch cond:hook EQ null + true branch : + new NullPointerException("hook == null") + false branch : + + cond-branch cond:shuttingDown + true branch : + new IllegalStateException("VM already shutting down") + false branch : + + return shutdownHooks.remove(hook) + + func halt(status) throws: + nativeExit(status) + func runFinalizersOnExit(value) throws: + finalizeOnExit Assign value + func exec(command) throws: IOException + return exec(command,null,null) + func exec(command,envp) throws: IOException + return exec(command,envp,null) + func exec(command,envp,dir) throws: IOException + cond-branch cond:command.length() EQ 0 + true branch : + new IllegalArgumentException("Empty command") false branch : + + Decl: st=new StringTokenizer(command) + Decl: cmdarray= + for ( ) + Assign st.nextToken() + return exec(cmdarray,envp,dir) + func exec(cmdarray[]) throws: IOException + return exec(cmdarray,null,null) + func exec(cmdarray,envp) throws: IOException + return exec(cmdarray,envp,null) + func exec(cmdarray,envp,dir) throws: IOException + return new ProcessBuilder(cmdarray).environment(envp).directory(dir).start() + func availableProcessors() throws: + return (int)Libcore.os.sysconf(_SC_NPROCESSORS_CONF) + func freeMemory() throws: + func totalMemory() throws: + func maxMemory() throws: + func gc() throws: + func runFinalization0() throws: + func runFinalization() throws: + VMRuntime.runFinalization(0) + func traceInstructions(on) throws: + func traceMethodCalls(on) throws: + cond-branch cond:on NE tracingMethods + true branch : + cond-branch cond:on + true branch : + VMDebug.startMethodTracing() + false branch : + VMDebug.stopMethodTracing() + + tracingMethods Assign on + false branch : + + func load(filename) throws: + load0(VMStack.getStackClass1(),filename) + func checkTargetSdkVersionForLoad(methodName) throws: + Decl: targetSdkVersion=VMRuntime.getRuntime().getTargetSdkVersion() + cond-branch cond:targetSdkVersion GT 24 + true branch : + new UnsupportedOperationException(methodName Add " is not supported on SDK " Add targetSdkVersion) + false branch : + + func load(absolutePath,loader) throws: + checkTargetSdkVersionForLoad("java.lang.Runtime#load(String, ClassLoader)") + java.lang.System.logE("java.lang.Runtime#load(String, ClassLoader)" Add " is private and will be removed in a future Android release") + cond-branch cond:absolutePath EQ null + true branch : + new NullPointerException("absolutePath == null") + false branch : + + Decl: error=doLoad(absolutePath,loader) + cond-branch cond:error NE null + true branch : + new UnsatisfiedLinkError(error) + false branch : + + func load0(fromClass,filename) throws: + cond-branch cond:(new File(filename).isAbsolute()) + true branch : + new UnsatisfiedLinkError("Expecting an absolute path of the library: " Add filename) + false branch : + + cond-branch cond:filename EQ null + true branch : + new NullPointerException("filename == null") + false branch : + + Decl: error=doLoad(filename,fromClass.getClassLoader()) + cond-branch cond:error NE null + true branch : + new UnsatisfiedLinkError(error) + false branch : + + func loadLibrary(libname) throws: + loadLibrary0(VMStack.getCallingClassLoader(),libname) + func loadLibrary(libname,classLoader) throws: + checkTargetSdkVersionForLoad("java.lang.Runtime#loadLibrary(String, ClassLoader)") + java.lang.System.logE("java.lang.Runtime#loadLibrary(String, ClassLoader)" Add " is private and will be removed in a future Android release") + loadLibrary0(classLoader,libname) + func loadLibrary0(loader,libname) throws: + cond-branch cond:libname.indexOf((int)File.separatorChar) NE -1 + true branch : + new UnsatisfiedLinkError("Directory separator should not appear in library name: " Add libname) + false branch : + + Decl: libraryName=libname + cond-branch cond:loader NE null + true branch : + Decl: filename=loader.findLibrary(libraryName) + cond-branch cond:filename EQ null + true branch : + new UnsatisfiedLinkError(loader Add " couldn't find \"" Add System.mapLibraryName(libraryName) Add "\"") + false branch : + + Decl: error=doLoad(filename,loader) + cond-branch cond:error NE null + true branch : + new UnsatisfiedLinkError(error) + false branch : + + return + false branch : + + Decl: filename=System.mapLibraryName(libraryName) + Decl: candidates=new ArrayList() + Decl: lastError=null + String + directory + getLibPaths() + Decl: candidate=directory Add filename + candidates.add(candidate) + cond-branch cond:IoUtils.canOpenReadOnly(candidate) + true branch : + Decl: error=doLoad(candidate,loader) + cond-branch cond:error EQ null + true branch : + return + false branch : + + lastError Assign error + false branch : + + + cond-branch cond:lastError NE null + true branch : + new UnsatisfiedLinkError(lastError) + false branch : + + new UnsatisfiedLinkError("Library " Add libraryName Add " not found; tried " Add candidates) + func getLibPaths() throws: + cond-branch cond:mLibPaths EQ null + true branch : + cond-branch cond:mLibPaths EQ null + true branch : + mLibPaths Assign initLibPaths() + false branch : + + + false branch : + + return mLibPaths + func initLibPaths() throws: + Decl: javaLibraryPath=System.getProperty("java.library.path") + cond-branch cond:javaLibraryPath EQ null + true branch : + return EmptyArray.STRING + false branch : + + Decl: paths=javaLibraryPath.split(":") + for ( ) + cond-branch cond:.endsWith("/") + true branch : + AddAssign "/" + false branch : + + + return paths + func doLoad(name,loader) throws: + Decl: librarySearchPath=null + cond-branch cond:loader NE null Land loader instanceof BaseDexClassLoader + true branch : + Decl: dexClassLoader=(BaseDexClassLoader)loader + librarySearchPath Assign dexClassLoader.getLdLibraryPath() + false branch : + + return nativeLoad(name,loader,librarySearchPath) + + func nativeLoad(filename,loader,librarySearchPath) throws: + func getLocalizedInputStream(in) throws: + return in + func getLocalizedOutputStream(out) throws: + return out + LocalClasses: + LocalInterfaces: + +Identifier:String has no decl. +Identifier:directory has no decl. +UserType:UnsatisfiedLinkError has no decl. diff --git a/src/MapleFE/test/java/openjdk/SafeVarargs.java b/src/MapleFE/test/java/openjdk/SafeVarargs.java new file mode 100644 index 0000000000000000000000000000000000000000..243b9f1fc95c746a69ab290aeab6b377ac12f73e --- /dev/null +++ b/src/MapleFE/test/java/openjdk/SafeVarargs.java @@ -0,0 +1,33 @@ + +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package java.lang; +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; +/** + * Claims to the compiler that the annotation target does nothing potentially unsafe + * to its varargs argument. + * + * @since 1.7 + */ +@Documented +@Retention(value=RetentionPolicy.RUNTIME) +@Target(value={ElementType.CONSTRUCTOR, ElementType.METHOD}) +public @interface SafeVarargs { +} diff --git a/src/MapleFE/test/java/openjdk/SafeVarargs.java.result b/src/MapleFE/test/java/openjdk/SafeVarargs.java.result new file mode 100644 index 0000000000000000000000000000000000000000..1d1ab5064a0f9d34c5e0e8c17c9a389ff739eeb6 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/SafeVarargs.java.result @@ -0,0 +1,22 @@ +Matched 5 tokens. +Matched 14 tokens. +Matched 23 tokens. +Matched 32 tokens. +Matched 41 tokens. +Matched 50 tokens. +Matched 82 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import java.lang.annotation.Documented +== Sub Tree == +import java.lang.annotation.ElementType +== Sub Tree == +import java.lang.annotation.Retention +== Sub Tree == +import java.lang.annotation.RetentionPolicy +== Sub Tree == +import java.lang.annotation.Target +== Sub Tree == +annotation type : SafeVarargs diff --git a/src/MapleFE/test/java/openjdk/SecurityManager.java b/src/MapleFE/test/java/openjdk/SecurityManager.java new file mode 100644 index 0000000000000000000000000000000000000000..2182ca6e22322ce1beb9638cc301e29014d7cc8e --- /dev/null +++ b/src/MapleFE/test/java/openjdk/SecurityManager.java @@ -0,0 +1,108 @@ + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package java.lang; +import java.io.FileDescriptor; +import java.net.InetAddress; +import java.security.Permission; +/** + * Legacy security code; do not use. + * + *

Security managers do not provide a + * secure environment for executing untrusted code. Untrusted code cannot be + * safely isolated within the Dalvik VM. + */ +public class SecurityManager { + /** + * @deprecated Use {@link #checkPermission} + */ + @Deprecated + protected boolean inCheck; + public SecurityManager() { } + public void checkAccept(String host, int port) { } + public void checkAccess(Thread thread) { } + public void checkAccess(ThreadGroup group) { } + public void checkConnect(String host, int port) { } + public void checkConnect(String host, int port, Object context) { } + public void checkCreateClassLoader() { } + public void checkDelete(String file) { } + public void checkExec(String cmd) { } + public void checkExit(int status) { } + public void checkLink(String libName) { } + public void checkListen(int port) { } + public void checkMemberAccess(Class cls, int type) { } + public void checkMulticast(InetAddress maddr) { } + /** + * @deprecated use {@link #checkMulticast(java.net.InetAddress)} + */ + @Deprecated public void checkMulticast(InetAddress maddr, byte ttl) { } + public void checkPackageAccess(String packageName) { } + public void checkPackageDefinition(String packageName) { } + public void checkPropertiesAccess() { } + public void checkPropertyAccess(String key) { } + public void checkRead(FileDescriptor fd) { } + public void checkRead(String file) { } + public void checkRead(String file, Object context) { } + public void checkSecurityAccess(String target) { } + public void checkSetFactory() { } + public boolean checkTopLevelWindow(Object window) { return true; } + public void checkSystemClipboardAccess() { } + public void checkAwtEventQueueAccess() { } + public void checkPrintJobAccess() { } + public void checkWrite(FileDescriptor fd) { } + public void checkWrite(String file) { } + /** + * @deprecated Use {@link #checkPermission}. + */ + @Deprecated public boolean getInCheck() { return inCheck; } + protected Class[] getClassContext() { return null; } + /** + * @deprecated Use {@link #checkPermission}. + */ + @Deprecated protected ClassLoader currentClassLoader() { return null; } + /** + * @deprecated Use {@link #checkPermission}. + */ + @Deprecated protected int classLoaderDepth() { + return -1; + } + /** + * @deprecated Use {@link #checkPermission}. + */ + @Deprecated protected Class currentLoadedClass() { return null; } + /** + * @deprecated Use {@link #checkPermission}. + */ + @Deprecated protected int classDepth(String name) { return -1; } + /** + * @deprecated Use {@link #checkPermission}. + */ + @Deprecated protected boolean inClass(String name) { return false; } + /** + * @deprecated Use {@link #checkPermission} + */ + @Deprecated protected boolean inClassLoader() { return false; } + /** + * Returns the current thread's thread group. + */ + public ThreadGroup getThreadGroup() { + return Thread.currentThread().getThreadGroup(); + } + public Object getSecurityContext() { return null; } + public void checkPermission(Permission permission) { } + public void checkPermission(Permission permission, Object context) { } +} diff --git a/src/MapleFE/test/java/openjdk/SecurityManager.java.result b/src/MapleFE/test/java/openjdk/SecurityManager.java.result new file mode 100644 index 0000000000000000000000000000000000000000..fad7670e870adc19712c7ccac717013294dc6a74 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/SecurityManager.java.result @@ -0,0 +1,77 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 19 tokens. +Matched 26 tokens. +Matched 475 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import java.io.FileDescriptor +== Sub Tree == +import java.net.InetAddress +== Sub Tree == +import java.security.Permission +== Sub Tree == +class SecurityManager + Fields: + inCheck + Instance Initializer: + Constructors: + constructor SecurityManager() throws: + Methods: + func checkAccept(host,port) throws: + func checkAccess(thread) throws: + func checkAccess(group) throws: + func checkConnect(host,port) throws: + func checkConnect(host,port,context) throws: + func checkCreateClassLoader() throws: + func checkDelete(file) throws: + func checkExec(cmd) throws: + func checkExit(status) throws: + func checkLink(libName) throws: + func checkListen(port) throws: + func checkMemberAccess(cls,type) throws: + func checkMulticast(maddr) throws: + func checkMulticast(maddr,ttl) throws: + func checkPackageAccess(packageName) throws: + func checkPackageDefinition(packageName) throws: + func checkPropertiesAccess() throws: + func checkPropertyAccess(key) throws: + func checkRead(fd) throws: + func checkRead(file) throws: + func checkRead(file,context) throws: + func checkSecurityAccess(target) throws: + func checkSetFactory() throws: + func checkTopLevelWindow(window) throws: + return true + func checkSystemClipboardAccess() throws: + func checkAwtEventQueueAccess() throws: + func checkPrintJobAccess() throws: + func checkWrite(fd) throws: + func checkWrite(file) throws: + func getInCheck() throws: + return inCheck + func getClassContext() throws: + return null + func currentClassLoader() throws: + return null + func classLoaderDepth() throws: + return -1 + func currentLoadedClass() throws: + return null + func classDepth(name) throws: + return -1 + func inClass(name) throws: + return false + func inClassLoader() throws: + return false + func getThreadGroup() throws: + return Thread.currentThread().getThreadGroup() + func getSecurityContext() throws: + return null + func checkPermission(permission) throws: + func checkPermission(permission,context) throws: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/java/openjdk/Short.java b/src/MapleFE/test/java/openjdk/Short.java new file mode 100644 index 0000000000000000000000000000000000000000..17324fb296bcf3ca873934de606ca7da3b3b85e6 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Short.java @@ -0,0 +1,290 @@ + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package java.lang; +/** + * The wrapper for the primitive type {@code short}. + * + * @see java.lang.Number + * @since 1.1 + */ +@FindBugsSuppressWarnings("DM_NUMBER_CTOR") +public final class Short extends Number implements Comparable { + private static final long serialVersionUID = 7515723908773894738L; + /** + * The value which the receiver represents. + */ + private final short value; + /** + * Constant for the maximum {@code short} value, 215-1. + */ + public static final short MAX_VALUE = (short) 0x7FFF; + /** + * Constant for the minimum {@code short} value, -215. + */ + public static final short MIN_VALUE = (short) 0x8000; + /** + * Constant for the number of bits needed to represent a {@code short} in + * two's complement form. + * + * @since 1.5 + */ + public static final int SIZE = 16; + /** + * The {@link Class} object that represents the primitive type {@code + * short}. + */ + @SuppressWarnings("unchecked") + public static final Class TYPE + = (Class) short[].class.getComponentType(); + // Note: Short.TYPE can't be set to "short.class", since *that* is + // defined to be "java.lang.Short.TYPE"; + /** + * Constructs a new {@code Short} from the specified string. + * + * @param string + * the string representation of a short value. + * @throws NumberFormatException + * if {@code string} cannot be parsed as a short value. + * @see #parseShort(String) + */ + public Short(String string) throws NumberFormatException { + this(parseShort(string)); + } + /** + * Constructs a new {@code Short} with the specified primitive short value. + * + * @param value + * the primitive short value to store in the new instance. + */ + public Short(short value) { + this.value = value; + } + @Override + public byte byteValue() { + return (byte) value; + } + /** + * Compares this object to the specified short object to determine their + * relative order. + * + * @param object + * the short object to compare this object to. + * @return a negative value if the value of this short is less than the + * value of {@code object}; 0 if the value of this short and the + * value of {@code object} are equal; a positive value if the value + * of this short is greater than the value of {@code object}. + * @throws NullPointerException + * if {@code object} is null. + * @see java.lang.Comparable + * @since 1.2 + */ + public int compareTo(Short object) { + return compare(value, object.value); + } + /** + * Compares two {@code short} values. + * @return 0 if lhs = rhs, less than 0 if lhs < rhs, and greater than 0 if lhs > rhs. + * @since 1.7 + */ + public static int compare(short lhs, short rhs) { + return lhs > rhs ? 1 : (lhs < rhs ? -1 : 0); + } + /** + * Parses the specified string and returns a {@code Short} instance if the + * string can be decoded into a short value. The string may be an optional + * minus sign "-" followed by a hexadecimal ("0x..." or "#..."), octal + * ("0..."), or decimal ("...") representation of a short. + * + * @param string + * a string representation of a short value. + * @return a {@code Short} containing the value represented by + * {@code string}. + * @throws NumberFormatException + * if {@code string} cannot be parsed as a short value. + */ + public static Short decode(String string) throws NumberFormatException { + int intValue = Integer.decode(string).intValue(); + short result = (short) intValue; + if (result == intValue) { + return valueOf(result); + } + throw new NumberFormatException("Value out of range for short: \"" + string + "\""); + } + @Override + public double doubleValue() { + return value; + } + /** + * Compares this instance with the specified object and indicates if they + * are equal. In order to be equal, {@code object} must be an instance of + * {@code Short} and have the same short value as this object. + * + * @param object + * the object to compare this short with. + * @return {@code true} if the specified object is equal to this + * {@code Short}; {@code false} otherwise. + */ + @Override + public boolean equals(Object object) { + return (object instanceof Short) && (((Short) object).value == value); + } + @Override + public float floatValue() { + return value; + } + @Override + public int hashCode() { + return value; + } + @Override + public int intValue() { + return value; + } + @Override + public long longValue() { + return value; + } + /** + * Parses the specified string as a signed decimal short value. The ASCII + * character \u002d ('-') is recognized as the minus sign. + * + * @param string + * the string representation of a short value. + * @return the primitive short value represented by {@code string}. + * @throws NumberFormatException + * if {@code string} cannot be parsed as a short value. + */ + public static short parseShort(String string) throws NumberFormatException { + return parseShort(string, 10); + } + /** + * Parses the specified string as a signed short value using the specified + * radix. The ASCII character \u002d ('-') is recognized as the minus sign. + * + * @param string + * the string representation of a short value. + * @param radix + * the radix to use when parsing. + * @return the primitive short value represented by {@code string} using + * {@code radix}. + * @throws NumberFormatException + * if {@code string} cannot be parsed as a short value, or + * {@code radix < Character.MIN_RADIX || + * radix > Character.MAX_RADIX}. + */ + public static short parseShort(String string, int radix) throws NumberFormatException { + int intValue = Integer.parseInt(string, radix); + short result = (short) intValue; + if (result == intValue) { + return result; + } + throw new NumberFormatException("Value out of range for short: \"" + string + "\""); + } + /** + * Gets the primitive value of this short. + * + * @return this object's primitive value. + */ + @Override + public short shortValue() { + return value; + } + @Override + public String toString() { + return Integer.toString(value); + } + /** + * Returns a string containing a concise, human-readable description of the + * specified short value with radix 10. + * + * @param value + * the short to convert to a string. + * @return a printable representation of {@code value}. + */ + public static String toString(short value) { + return Integer.toString(value); + } + /** + * Parses the specified string as a signed decimal short value. + * + * @param string + * the string representation of a short value. + * @return a {@code Short} instance containing the short value represented + * by {@code string}. + * @throws NumberFormatException + * if {@code string} cannot be parsed as a short value. + * @see #parseShort(String) + */ + public static Short valueOf(String string) throws NumberFormatException { + return valueOf(parseShort(string)); + } + /** + * Parses the specified string as a signed short value using the specified + * radix. + * + * @param string + * the string representation of a short value. + * @param radix + * the radix to use when parsing. + * @return a {@code Short} instance containing the short value represented + * by {@code string} using {@code radix}. + * @throws NumberFormatException + * if {@code string} cannot be parsed as a short value, or + * {@code radix < Character.MIN_RADIX || + * radix > Character.MAX_RADIX}. + * @see #parseShort(String, int) + */ + public static Short valueOf(String string, int radix) throws NumberFormatException { + return valueOf(parseShort(string, radix)); + } + /** + * Reverses the bytes of the specified short. + * + * @param s + * the short value for which to reverse bytes. + * @return the reversed value. + * @since 1.5 + */ + public static short reverseBytes(short s) { + return (short) ((s << 8) | ((s >>> 8) & 0xFF)); + } + /** + * Returns a {@code Short} instance for the specified short value. + *

+ * If it is not necessary to get a new {@code Short} instance, it is + * recommended to use this method instead of the constructor, since it + * maintains a cache of instances which may result in better performance. + * + * @param s + * the short value to store in the instance. + * @return a {@code Short} instance containing {@code s}. + * @since 1.5 + */ + public static Short valueOf(short s) { + return s < -128 || s >= 128 ? new Short(s) : SMALL_VALUES[s + 128]; + } + /** + * A cache of instances used by {@link Short#valueOf(short)} and auto-boxing. + */ + private static final Short[] SMALL_VALUES = new Short[256]; + static { + for (int i = -128; i < 128; i++) { + SMALL_VALUES[i + 128] = new Short((short) i); + } + } +} diff --git a/src/MapleFE/test/java/openjdk/Short.java.result b/src/MapleFE/test/java/openjdk/Short.java.result new file mode 100644 index 0000000000000000000000000000000000000000..f2e1a293dd41b0f80a45ed23d2c3a9570974d0ad --- /dev/null +++ b/src/MapleFE/test/java/openjdk/Short.java.result @@ -0,0 +1,73 @@ +Matched 5 tokens. +Matched 627 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +class Short + Fields: + serialVersionUID=878762578 value MAX_VALUE=(short)32767 MIN_VALUE=(short)32768 SIZE=16 TYPE=()short.getComponentType() SMALL_VALUES= + Instance Initializer: + InstInit- 0 + Constructors: + constructor Short(string) throws: + constructor Short(value) throws: + this.value Assign value + Methods: + func byteValue() throws: + return (byte)value + func compareTo(object) throws: + return compare(value,object.value) + func compare(lhs,rhs) throws: + return + func decode(string) throws: NumberFormatException + Decl: intValue=Integer.decode(string).intValue() + Decl: result=(short)intValue + cond-branch cond:result EQ intValue + true branch : + return valueOf(result) + false branch : + + new NumberFormatException("Value out of range for short: \"" Add string Add "\"") + func doubleValue() throws: + return value + func equals(object) throws: + return (object instanceof Short) Land ((Short)object.value EQ value) + func floatValue() throws: + return value + func hashCode() throws: + return value + func intValue() throws: + return value + func longValue() throws: + return value + func parseShort(string) throws: NumberFormatException + return parseShort(string,10) + func parseShort(string,radix) throws: NumberFormatException + Decl: intValue=Integer.parseInt(string,radix) + Decl: result=(short)intValue + cond-branch cond:result EQ intValue + true branch : + return result + false branch : + + new NumberFormatException("Value out of range for short: \"" Add string Add "\"") + func shortValue() throws: + return value + func toString() throws: + return Integer.toString(value) + func toString(value) throws: + return Integer.toString(value) + func valueOf(string) throws: NumberFormatException + return valueOf(parseShort(string)) + func valueOf(string,radix) throws: NumberFormatException + return valueOf(parseShort(string,radix)) + func reverseBytes(s) throws: + return (short)((s Shl 8) Bor ((s Zext 8) Band 255)) + func valueOf(s) throws: + return + LocalClasses: + LocalInterfaces: + +UserType:NumberFormatException has no decl. +UserType:NumberFormatException has no decl. diff --git a/src/MapleFE/test/java/openjdk/StackTraceElement.java b/src/MapleFE/test/java/openjdk/StackTraceElement.java new file mode 100644 index 0000000000000000000000000000000000000000..e519fea26af9e97cba4e99e6981c10b60dd13446 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/StackTraceElement.java @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; + +import java.util.Objects; + +/** + * An element in a stack trace, as returned by {@link + * Throwable#getStackTrace()}. Each element represents a single stack frame. + * All stack frames except for the one at the top of the stack represent + * a method invocation. The frame at the top of the stack represents the + * execution point at which the stack trace was generated. Typically, + * this is the point at which the throwable corresponding to the stack trace + * was created. + * + * @since 1.4 + * @author Josh Bloch + */ +public final class StackTraceElement implements java.io.Serializable { + // Normally initialized by VM (public constructor added in 1.5) + private String declaringClass; + private String methodName; + private String fileName; + private int lineNumber; + + /** + * Creates a stack trace element representing the specified execution + * point. + * + * @param declaringClass the fully qualified name of the class containing + * the execution point represented by the stack trace element + * @param methodName the name of the method containing the execution point + * represented by the stack trace element + * @param fileName the name of the file containing the execution point + * represented by the stack trace element, or {@code null} if + * this information is unavailable + * @param lineNumber the line number of the source line containing the + * execution point represented by this stack trace element, or + * a negative number if this information is unavailable. A value + * of -2 indicates that the method containing the execution point + * is a native method + * @throws NullPointerException if {@code declaringClass} or + * {@code methodName} is null + * @since 1.5 + */ + public StackTraceElement(String declaringClass, String methodName, + String fileName, int lineNumber) { + this.declaringClass = Objects.requireNonNull(declaringClass, "Declaring class is null"); + this.methodName = Objects.requireNonNull(methodName, "Method name is null"); + this.fileName = fileName; + this.lineNumber = lineNumber; + } + + /** + * Returns the name of the source file containing the execution point + * represented by this stack trace element. Generally, this corresponds + * to the {@code SourceFile} attribute of the relevant {@code class} + * file (as per The Java Virtual Machine Specification, Section + * 4.7.7). In some systems, the name may refer to some source code unit + * other than a file, such as an entry in source repository. + * + * @return the name of the file containing the execution point + * represented by this stack trace element, or {@code null} if + * this information is unavailable. + */ + public String getFileName() { + return fileName; + } + + /** + * Returns the line number of the source line containing the execution + * point represented by this stack trace element. Generally, this is + * derived from the {@code LineNumberTable} attribute of the relevant + * {@code class} file (as per The Java Virtual Machine + * Specification, Section 4.7.8). + * + * @return the line number of the source line containing the execution + * point represented by this stack trace element, or a negative + * number if this information is unavailable. + */ + public int getLineNumber() { + return lineNumber; + } + + /** + * Returns the fully qualified name of the class containing the + * execution point represented by this stack trace element. + * + * @return the fully qualified name of the {@code Class} containing + * the execution point represented by this stack trace element. + */ + public String getClassName() { + return declaringClass; + } + + /** + * Returns the name of the method containing the execution point + * represented by this stack trace element. If the execution point is + * contained in an instance or class initializer, this method will return + * the appropriate special method name, {@code } or + * {@code }, as per Section 3.9 of The Java Virtual + * Machine Specification. + * + * @return the name of the method containing the execution point + * represented by this stack trace element. + */ + public String getMethodName() { + return methodName; + } + + /** + * Returns true if the method containing the execution point + * represented by this stack trace element is a native method. + * + * @return {@code true} if the method containing the execution point + * represented by this stack trace element is a native method. + */ + public boolean isNativeMethod() { + return lineNumber == -2; + } + + /** + * Returns a string representation of this stack trace element. The + * format of this string depends on the implementation, but the following + * examples may be regarded as typical: + *

    + *
  • + * {@code "MyClass.mash(MyClass.java:9)"} - Here, {@code "MyClass"} + * is the fully-qualified name of the class containing the + * execution point represented by this stack trace element, + * {@code "mash"} is the name of the method containing the execution + * point, {@code "MyClass.java"} is the source file containing the + * execution point, and {@code "9"} is the line number of the source + * line containing the execution point. + *
  • + * {@code "MyClass.mash(MyClass.java)"} - As above, but the line + * number is unavailable. + *
  • + * {@code "MyClass.mash(Unknown Source)"} - As above, but neither + * the file name nor the line number are available. + *
  • + * {@code "MyClass.mash(Native Method)"} - As above, but neither + * the file name nor the line number are available, and the method + * containing the execution point is known to be a native method. + *
+ * @see Throwable#printStackTrace() + */ + public String toString() { + // Android-changed: When ART cannot find a line number, the lineNumber field is set + // to the dex_pc and the fileName field is set to null. + StringBuilder result = new StringBuilder(); + result.append(getClassName()).append(".").append(methodName); + if (isNativeMethod()) { + result.append("(Native Method)"); + } else if (fileName != null) { + if (lineNumber >= 0) { + result.append("(").append(fileName).append(":").append(lineNumber).append(")"); + } else { + result.append("(").append(fileName).append(")"); + } + } else { + if (lineNumber >= 0) { + // The line number is actually the dex pc. + result.append("(Unknown Source:").append(lineNumber).append(")"); + } else { + result.append("(Unknown Source)"); + } + } + return result.toString(); + } + + /** + * Returns true if the specified object is another + * {@code StackTraceElement} instance representing the same execution + * point as this instance. Two stack trace elements {@code a} and + * {@code b} are equal if and only if: + *
{@code
+     *     equals(a.getFileName(), b.getFileName()) &&
+     *     a.getLineNumber() == b.getLineNumber()) &&
+     *     equals(a.getClassName(), b.getClassName()) &&
+     *     equals(a.getMethodName(), b.getMethodName())
+     * }
+ * where {@code equals} has the semantics of {@link + * java.util.Objects#equals(Object, Object) Objects.equals}. + * + * @param obj the object to be compared with this stack trace element. + * @return true if the specified object is another + * {@code StackTraceElement} instance representing the same + * execution point as this instance. + */ + public boolean equals(Object obj) { + if (obj==this) + return true; + if (!(obj instanceof StackTraceElement)) + return false; + StackTraceElement e = (StackTraceElement)obj; + return e.declaringClass.equals(declaringClass) && + e.lineNumber == lineNumber && + Objects.equals(methodName, e.methodName) && + Objects.equals(fileName, e.fileName); + } + + /** + * Returns a hash code value for this stack trace element. + */ + public int hashCode() { + int result = 31*declaringClass.hashCode() + methodName.hashCode(); + result = 31*result + Objects.hashCode(fileName); + result = 31*result + lineNumber; + return result; + } + + private static final long serialVersionUID = 6992337162326171013L; +} diff --git a/src/MapleFE/test/java/openjdk/StackTraceElement.java.result b/src/MapleFE/test/java/openjdk/StackTraceElement.java.result new file mode 100644 index 0000000000000000000000000000000000000000..610f5efb0019c2a9debe1daa437eab8762710876 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/StackTraceElement.java.result @@ -0,0 +1,77 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 438 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import java.util.Objects +== Sub Tree == +class StackTraceElement + Fields: + declaringClass methodName fileName lineNumber serialVersionUID=641129861 + Instance Initializer: + Constructors: + constructor StackTraceElement(declaringClass,methodName,fileName,lineNumber) throws: + this.declaringClass Assign Objects.requireNonNull(declaringClass,"Declaring class is null") + this.methodName Assign Objects.requireNonNull(methodName,"Method name is null") + this.fileName Assign fileName + this.lineNumber Assign lineNumber + Methods: + func getFileName() throws: + return fileName + func getLineNumber() throws: + return lineNumber + func getClassName() throws: + return declaringClass + func getMethodName() throws: + return methodName + func isNativeMethod() throws: + return lineNumber EQ -2 + func toString() throws: + Decl: result=new StringBuilder() + result.append(getClassName()).append(".").append(methodName) + cond-branch cond:isNativeMethod() + true branch : + result.append("(Native Method)") + false branch : + cond-branch cond:fileName NE null + true branch : + cond-branch cond:lineNumber GE 0 + true branch : + result.append("(").append(fileName).append(":").append(lineNumber).append(")") + false branch : + result.append("(").append(fileName).append(")") + + false branch : + cond-branch cond:lineNumber GE 0 + true branch : + result.append("(Unknown Source:").append(lineNumber).append(")") + false branch : + result.append("(Unknown Source)") + + + return result.toString() + func equals(obj) throws: + cond-branch cond:obj EQ this + true branch : + return true false branch : + + cond-branch cond:(obj instanceof StackTraceElement) + true branch : + return false false branch : + + Decl: e=(StackTraceElement)obj + return e.declaringClass.equals(declaringClass) Land e.lineNumber EQ lineNumber Land Objects.equals(methodName,e.methodName) Land Objects.equals(fileName,e.fileName) + func hashCode() throws: + Decl: result=31 Mul declaringClass.hashCode() Add methodName.hashCode() + result Assign 31 Mul result Add Objects.hashCode(fileName) + result Assign 31 Mul result Add lineNumber + return result + LocalClasses: + LocalInterfaces: + +Identifier:result has no decl. +Identifier:result has no decl. +Identifier:result has no decl. +Identifier:result has no decl. diff --git a/src/MapleFE/test/java/openjdk/StrictMath.java b/src/MapleFE/test/java/openjdk/StrictMath.java new file mode 100644 index 0000000000000000000000000000000000000000..ae4af2bcac896b7a804e2d7cef1ab3ac7cef6c38 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/StrictMath.java @@ -0,0 +1,1710 @@ +/* + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package java.lang; +import java.util.Random; +import sun.misc.DoubleConsts; + +/** + * The class {@code StrictMath} contains methods for performing basic + * numeric operations such as the elementary exponential, logarithm, + * square root, and trigonometric functions. + * + *

To help ensure portability of Java programs, the definitions of + * some of the numeric functions in this package require that they + * produce the same results as certain published algorithms. These + * algorithms are available from the well-known network library + * {@code netlib} as the package "Freely Distributable Math + * Library," {@code fdlibm}. These + * algorithms, which are written in the C programming language, are + * then to be understood as executed with all floating-point + * operations following the rules of Java floating-point arithmetic. + * + *

The Java math library is defined with respect to + * {@code fdlibm} version 5.3. Where {@code fdlibm} provides + * more than one definition for a function (such as + * {@code acos}), use the "IEEE 754 core function" version + * (residing in a file whose name begins with the letter + * {@code e}). The methods which require {@code fdlibm} + * semantics are {@code sin}, {@code cos}, {@code tan}, + * {@code asin}, {@code acos}, {@code atan}, + * {@code exp}, {@code log}, {@code log10}, + * {@code cbrt}, {@code atan2}, {@code pow}, + * {@code sinh}, {@code cosh}, {@code tanh}, + * {@code hypot}, {@code expm1}, and {@code log1p}. + * + *

+ * The platform uses signed two's complement integer arithmetic with + * int and long primitive types. The developer should choose + * the primitive type to ensure that arithmetic operations consistently + * produce correct results, which in some cases means the operations + * will not overflow the range of values of the computation. + * The best practice is to choose the primitive type and algorithm to avoid + * overflow. In cases where the size is {@code int} or {@code long} and + * overflow errors need to be detected, the methods {@code addExact}, + * {@code subtractExact}, {@code multiplyExact}, and {@code toIntExact} + * throw an {@code ArithmeticException} when the results overflow. + * For other arithmetic operations such as divide, absolute value, + * increment, decrement, and negation overflow occurs only with + * a specific minimum or maximum value and should be checked against + * the minimum or maximum as appropriate. + * + * @author unascribed + * @author Joseph D. Darcy + * @since 1.3 + */ + +public final class StrictMath { + + /** + * Don't let anyone instantiate this class. + */ + private StrictMath() {} + + /** + * The {@code double} value that is closer than any other to + * e, the base of the natural logarithms. + */ + public static final double E = 2.7182818284590452354; + + /** + * The {@code double} value that is closer than any other to + * pi, the ratio of the circumference of a circle to its + * diameter. + */ + public static final double PI = 3.14159265358979323846; + + /** + * Returns the trigonometric sine of an angle. Special cases: + *

  • If the argument is NaN or an infinity, then the + * result is NaN. + *
  • If the argument is zero, then the result is a zero with the + * same sign as the argument.
+ * + * @param a an angle, in radians. + * @return the sine of the argument. + */ + public static native double sin(double a); + + /** + * Returns the trigonometric cosine of an angle. Special cases: + *
  • If the argument is NaN or an infinity, then the + * result is NaN.
+ * + * @param a an angle, in radians. + * @return the cosine of the argument. + */ + public static native double cos(double a); + + /** + * Returns the trigonometric tangent of an angle. Special cases: + *
  • If the argument is NaN or an infinity, then the result + * is NaN. + *
  • If the argument is zero, then the result is a zero with the + * same sign as the argument.
+ * + * @param a an angle, in radians. + * @return the tangent of the argument. + */ + public static native double tan(double a); + + /** + * Returns the arc sine of a value; the returned angle is in the + * range -pi/2 through pi/2. Special cases: + *
  • If the argument is NaN or its absolute value is greater + * than 1, then the result is NaN. + *
  • If the argument is zero, then the result is a zero with the + * same sign as the argument.
+ * + * @param a the value whose arc sine is to be returned. + * @return the arc sine of the argument. + */ + public static native double asin(double a); + + /** + * Returns the arc cosine of a value; the returned angle is in the + * range 0.0 through pi. Special case: + *
  • If the argument is NaN or its absolute value is greater + * than 1, then the result is NaN.
+ * + * @param a the value whose arc cosine is to be returned. + * @return the arc cosine of the argument. + */ + public static native double acos(double a); + + /** + * Returns the arc tangent of a value; the returned angle is in the + * range -pi/2 through pi/2. Special cases: + *
  • If the argument is NaN, then the result is NaN. + *
  • If the argument is zero, then the result is a zero with the + * same sign as the argument.
+ * + * @param a the value whose arc tangent is to be returned. + * @return the arc tangent of the argument. + */ + public static native double atan(double a); + + /** + * Converts an angle measured in degrees to an approximately + * equivalent angle measured in radians. The conversion from + * degrees to radians is generally inexact. + * + * @param angdeg an angle, in degrees + * @return the measurement of the angle {@code angdeg} + * in radians. + */ + public static strictfp double toRadians(double angdeg) { + // Do not delegate to Math.toRadians(angdeg) because + // this method has the strictfp modifier. + return angdeg / 180.0 * PI; + } + + /** + * Converts an angle measured in radians to an approximately + * equivalent angle measured in degrees. The conversion from + * radians to degrees is generally inexact; users should + * not expect {@code cos(toRadians(90.0))} to exactly + * equal {@code 0.0}. + * + * @param angrad an angle, in radians + * @return the measurement of the angle {@code angrad} + * in degrees. + */ + public static strictfp double toDegrees(double angrad) { + // Do not delegate to Math.toDegrees(angrad) because + // this method has the strictfp modifier. + return angrad * 180.0 / PI; + } + + /** + * Returns Euler's number e raised to the power of a + * {@code double} value. Special cases: + *
  • If the argument is NaN, the result is NaN. + *
  • If the argument is positive infinity, then the result is + * positive infinity. + *
  • If the argument is negative infinity, then the result is + * positive zero.
+ * + * @param a the exponent to raise e to. + * @return the value e{@code a}, + * where e is the base of the natural logarithms. + */ + public static native double exp(double a); + + /** + * Returns the natural logarithm (base e) of a {@code double} + * value. Special cases: + *
  • If the argument is NaN or less than zero, then the result + * is NaN. + *
  • If the argument is positive infinity, then the result is + * positive infinity. + *
  • If the argument is positive zero or negative zero, then the + * result is negative infinity.
+ * + * @param a a value + * @return the value ln {@code a}, the natural logarithm of + * {@code a}. + */ + public static native double log(double a); + + + /** + * Returns the base 10 logarithm of a {@code double} value. + * Special cases: + * + *
  • If the argument is NaN or less than zero, then the result + * is NaN. + *
  • If the argument is positive infinity, then the result is + * positive infinity. + *
  • If the argument is positive zero or negative zero, then the + * result is negative infinity. + *
  • If the argument is equal to 10n for + * integer n, then the result is n. + *
+ * + * @param a a value + * @return the base 10 logarithm of {@code a}. + * @since 1.5 + */ + public static native double log10(double a); + + /** + * Returns the correctly rounded positive square root of a + * {@code double} value. + * Special cases: + *
  • If the argument is NaN or less than zero, then the result + * is NaN. + *
  • If the argument is positive infinity, then the result is positive + * infinity. + *
  • If the argument is positive zero or negative zero, then the + * result is the same as the argument.
+ * Otherwise, the result is the {@code double} value closest to + * the true mathematical square root of the argument value. + * + * @param a a value. + * @return the positive square root of {@code a}. + */ + public static native double sqrt(double a); + + /** + * Returns the cube root of a {@code double} value. For + * positive finite {@code x}, {@code cbrt(-x) == + * -cbrt(x)}; that is, the cube root of a negative value is + * the negative of the cube root of that value's magnitude. + * Special cases: + * + *
    + * + *
  • If the argument is NaN, then the result is NaN. + * + *
  • If the argument is infinite, then the result is an infinity + * with the same sign as the argument. + * + *
  • If the argument is zero, then the result is a zero with the + * same sign as the argument. + * + *
+ * + * @param a a value. + * @return the cube root of {@code a}. + * @since 1.5 + */ + public static native double cbrt(double a); + + /** + * Computes the remainder operation on two arguments as prescribed + * by the IEEE 754 standard. + * The remainder value is mathematically equal to + * f1 - f2 × n, + * where n is the mathematical integer closest to the exact + * mathematical value of the quotient {@code f1/f2}, and if two + * mathematical integers are equally close to {@code f1/f2}, + * then n is the integer that is even. If the remainder is + * zero, its sign is the same as the sign of the first argument. + * Special cases: + *
  • If either argument is NaN, or the first argument is infinite, + * or the second argument is positive zero or negative zero, then the + * result is NaN. + *
  • If the first argument is finite and the second argument is + * infinite, then the result is the same as the first argument.
+ * + * @param f1 the dividend. + * @param f2 the divisor. + * @return the remainder when {@code f1} is divided by + * {@code f2}. + */ + public static native double IEEEremainder(double f1, double f2); + + /** + * Returns the smallest (closest to negative infinity) + * {@code double} value that is greater than or equal to the + * argument and is equal to a mathematical integer. Special cases: + *
  • If the argument value is already equal to a + * mathematical integer, then the result is the same as the + * argument.
  • If the argument is NaN or an infinity or + * positive zero or negative zero, then the result is the same as + * the argument.
  • If the argument value is less than zero but + * greater than -1.0, then the result is negative zero.
Note + * that the value of {@code StrictMath.ceil(x)} is exactly the + * value of {@code -StrictMath.floor(-x)}. + * + * @param a a value. + * @return the smallest (closest to negative infinity) + * floating-point value that is greater than or equal to + * the argument and is equal to a mathematical integer. + */ + public static double ceil(double a) { + return floorOrCeil(a, -0.0, 1.0, 1.0); + } + + /** + * Returns the largest (closest to positive infinity) + * {@code double} value that is less than or equal to the + * argument and is equal to a mathematical integer. Special cases: + *
  • If the argument value is already equal to a + * mathematical integer, then the result is the same as the + * argument.
  • If the argument is NaN or an infinity or + * positive zero or negative zero, then the result is the same as + * the argument.
+ * + * @param a a value. + * @return the largest (closest to positive infinity) + * floating-point value that less than or equal to the argument + * and is equal to a mathematical integer. + */ + public static double floor(double a) { + return floorOrCeil(a, -1.0, 0.0, -1.0); + } + + /** + * Internal method to share logic between floor and ceil. + * + * @param a the value to be floored or ceiled + * @param negativeBoundary result for values in (-1, 0) + * @param positiveBoundary result for values in (0, 1) + * @param increment value to add when the argument is non-integral + */ + private static double floorOrCeil(double a, + double negativeBoundary, + double positiveBoundary, + double sign) { + int exponent = Math.getExponent(a); + + if (exponent < 0) { + /* + * Absolute value of argument is less than 1. + * floorOrceil(-0.0) => -0.0 + * floorOrceil(+0.0) => +0.0 + */ + return ((a == 0.0) ? a : + ( (a < 0.0) ? negativeBoundary : positiveBoundary) ); + } else if (exponent >= 52) { + /* + * Infinity, NaN, or a value so large it must be integral. + */ + return a; + } + // Else the argument is either an integral value already XOR it + // has to be rounded to one. + assert exponent >= 0 && exponent <= 51; + + long doppel = Double.doubleToRawLongBits(a); + long mask = DoubleConsts.SIGNIF_BIT_MASK >> exponent; + + if ( (mask & doppel) == 0L ) + return a; // integral value + else { + double result = Double.longBitsToDouble(doppel & (~mask)); + if (sign*a > 0.0) + result = result + sign; + return result; + } + } + + /** + * Returns the {@code double} value that is closest in value + * to the argument and is equal to a mathematical integer. If two + * {@code double} values that are mathematical integers are + * equally close to the value of the argument, the result is the + * integer value that is even. Special cases: + *
  • If the argument value is already equal to a mathematical + * integer, then the result is the same as the argument. + *
  • If the argument is NaN or an infinity or positive zero or negative + * zero, then the result is the same as the argument.
+ * + * @param a a value. + * @return the closest floating-point value to {@code a} that is + * equal to a mathematical integer. + * @author Joseph D. Darcy + */ + public static double rint(double a) { + /* + * If the absolute value of a is not less than 2^52, it + * is either a finite integer (the double format does not have + * enough significand bits for a number that large to have any + * fractional portion), an infinity, or a NaN. In any of + * these cases, rint of the argument is the argument. + * + * Otherwise, the sum (twoToThe52 + a ) will properly round + * away any fractional portion of a since ulp(twoToThe52) == + * 1.0; subtracting out twoToThe52 from this sum will then be + * exact and leave the rounded integer portion of a. + * + * This method does *not* need to be declared strictfp to get + * fully reproducible results. Whether or not a method is + * declared strictfp can only make a difference in the + * returned result if some operation would overflow or + * underflow with strictfp semantics. The operation + * (twoToThe52 + a ) cannot overflow since large values of a + * are screened out; the add cannot underflow since twoToThe52 + * is too large. The subtraction ((twoToThe52 + a ) - + * twoToThe52) will be exact as discussed above and thus + * cannot overflow or meaningfully underflow. Finally, the + * last multiply in the return statement is by plus or minus + * 1.0, which is exact too. + */ + double twoToThe52 = (double)(1L << 52); // 2^52 + double sign = Math.copySign(1.0, a); // preserve sign info + a = Math.abs(a); + + if (a < twoToThe52) { // E_min <= ilogb(a) <= 51 + a = ((twoToThe52 + a ) - twoToThe52); + } + + return sign * a; // restore original sign + } + + /** + * Returns the angle theta from the conversion of rectangular + * coordinates ({@code x}, {@code y}) to polar + * coordinates (r, theta). + * This method computes the phase theta by computing an arc tangent + * of {@code y/x} in the range of -pi to pi. Special + * cases: + *
  • If either argument is NaN, then the result is NaN. + *
  • If the first argument is positive zero and the second argument + * is positive, or the first argument is positive and finite and the + * second argument is positive infinity, then the result is positive + * zero. + *
  • If the first argument is negative zero and the second argument + * is positive, or the first argument is negative and finite and the + * second argument is positive infinity, then the result is negative zero. + *
  • If the first argument is positive zero and the second argument + * is negative, or the first argument is positive and finite and the + * second argument is negative infinity, then the result is the + * {@code double} value closest to pi. + *
  • If the first argument is negative zero and the second argument + * is negative, or the first argument is negative and finite and the + * second argument is negative infinity, then the result is the + * {@code double} value closest to -pi. + *
  • If the first argument is positive and the second argument is + * positive zero or negative zero, or the first argument is positive + * infinity and the second argument is finite, then the result is the + * {@code double} value closest to pi/2. + *
  • If the first argument is negative and the second argument is + * positive zero or negative zero, or the first argument is negative + * infinity and the second argument is finite, then the result is the + * {@code double} value closest to -pi/2. + *
  • If both arguments are positive infinity, then the result is the + * {@code double} value closest to pi/4. + *
  • If the first argument is positive infinity and the second argument + * is negative infinity, then the result is the {@code double} + * value closest to 3*pi/4. + *
  • If the first argument is negative infinity and the second argument + * is positive infinity, then the result is the {@code double} value + * closest to -pi/4. + *
  • If both arguments are negative infinity, then the result is the + * {@code double} value closest to -3*pi/4.
+ * + * @param y the ordinate coordinate + * @param x the abscissa coordinate + * @return the theta component of the point + * (rtheta) + * in polar coordinates that corresponds to the point + * (xy) in Cartesian coordinates. + */ + public static native double atan2(double y, double x); + + + /** + * Returns the value of the first argument raised to the power of the + * second argument. Special cases: + * + *
  • If the second argument is positive or negative zero, then the + * result is 1.0. + *
  • If the second argument is 1.0, then the result is the same as the + * first argument. + *
  • If the second argument is NaN, then the result is NaN. + *
  • If the first argument is NaN and the second argument is nonzero, + * then the result is NaN. + * + *
  • If + *
      + *
    • the absolute value of the first argument is greater than 1 + * and the second argument is positive infinity, or + *
    • the absolute value of the first argument is less than 1 and + * the second argument is negative infinity, + *
    + * then the result is positive infinity. + * + *
  • If + *
      + *
    • the absolute value of the first argument is greater than 1 and + * the second argument is negative infinity, or + *
    • the absolute value of the + * first argument is less than 1 and the second argument is positive + * infinity, + *
    + * then the result is positive zero. + * + *
  • If the absolute value of the first argument equals 1 and the + * second argument is infinite, then the result is NaN. + * + *
  • If + *
      + *
    • the first argument is positive zero and the second argument + * is greater than zero, or + *
    • the first argument is positive infinity and the second + * argument is less than zero, + *
    + * then the result is positive zero. + * + *
  • If + *
      + *
    • the first argument is positive zero and the second argument + * is less than zero, or + *
    • the first argument is positive infinity and the second + * argument is greater than zero, + *
    + * then the result is positive infinity. + * + *
  • If + *
      + *
    • the first argument is negative zero and the second argument + * is greater than zero but not a finite odd integer, or + *
    • the first argument is negative infinity and the second + * argument is less than zero but not a finite odd integer, + *
    + * then the result is positive zero. + * + *
  • If + *
      + *
    • the first argument is negative zero and the second argument + * is a positive finite odd integer, or + *
    • the first argument is negative infinity and the second + * argument is a negative finite odd integer, + *
    + * then the result is negative zero. + * + *
  • If + *
      + *
    • the first argument is negative zero and the second argument + * is less than zero but not a finite odd integer, or + *
    • the first argument is negative infinity and the second + * argument is greater than zero but not a finite odd integer, + *
    + * then the result is positive infinity. + * + *
  • If + *
      + *
    • the first argument is negative zero and the second argument + * is a negative finite odd integer, or + *
    • the first argument is negative infinity and the second + * argument is a positive finite odd integer, + *
    + * then the result is negative infinity. + * + *
  • If the first argument is finite and less than zero + *
      + *
    • if the second argument is a finite even integer, the + * result is equal to the result of raising the absolute value of + * the first argument to the power of the second argument + * + *
    • if the second argument is a finite odd integer, the result + * is equal to the negative of the result of raising the absolute + * value of the first argument to the power of the second + * argument + * + *
    • if the second argument is finite and not an integer, then + * the result is NaN. + *
    + * + *
  • If both arguments are integers, then the result is exactly equal + * to the mathematical result of raising the first argument to the power + * of the second argument if that result can in fact be represented + * exactly as a {@code double} value.
+ * + *

(In the foregoing descriptions, a floating-point value is + * considered to be an integer if and only if it is finite and a + * fixed point of the method {@link #ceil ceil} or, + * equivalently, a fixed point of the method {@link #floor + * floor}. A value is a fixed point of a one-argument + * method if and only if the result of applying the method to the + * value is equal to the value.) + * + * @param a base. + * @param b the exponent. + * @return the value {@code a}{@code b}. + */ + public static native double pow(double a, double b); + + /** + * Returns the closest {@code int} to the argument, with ties + * rounding to positive infinity. + * + *

Special cases: + *

  • If the argument is NaN, the result is 0. + *
  • If the argument is negative infinity or any value less than or + * equal to the value of {@code Integer.MIN_VALUE}, the result is + * equal to the value of {@code Integer.MIN_VALUE}. + *
  • If the argument is positive infinity or any value greater than or + * equal to the value of {@code Integer.MAX_VALUE}, the result is + * equal to the value of {@code Integer.MAX_VALUE}.
+ * + * @param a a floating-point value to be rounded to an integer. + * @return the value of the argument rounded to the nearest + * {@code int} value. + * @see java.lang.Integer#MAX_VALUE + * @see java.lang.Integer#MIN_VALUE + */ + public static int round(float a) { + return Math.round(a); + } + + /** + * Returns the closest {@code long} to the argument, with ties + * rounding to positive infinity. + * + *

Special cases: + *

  • If the argument is NaN, the result is 0. + *
  • If the argument is negative infinity or any value less than or + * equal to the value of {@code Long.MIN_VALUE}, the result is + * equal to the value of {@code Long.MIN_VALUE}. + *
  • If the argument is positive infinity or any value greater than or + * equal to the value of {@code Long.MAX_VALUE}, the result is + * equal to the value of {@code Long.MAX_VALUE}.
+ * + * @param a a floating-point value to be rounded to a + * {@code long}. + * @return the value of the argument rounded to the nearest + * {@code long} value. + * @see java.lang.Long#MAX_VALUE + * @see java.lang.Long#MIN_VALUE + */ + public static long round(double a) { + return Math.round(a); + } + + private static final class RandomNumberGeneratorHolder { + static final Random randomNumberGenerator = new Random(); + } + + /** + * Returns a {@code double} value with a positive sign, greater + * than or equal to {@code 0.0} and less than {@code 1.0}. + * Returned values are chosen pseudorandomly with (approximately) + * uniform distribution from that range. + * + *

When this method is first called, it creates a single new + * pseudorandom-number generator, exactly as if by the expression + * + *

{@code new java.util.Random()}
+ * + * This new pseudorandom-number generator is used thereafter for + * all calls to this method and is used nowhere else. + * + *

This method is properly synchronized to allow correct use by + * more than one thread. However, if many threads need to generate + * pseudorandom numbers at a great rate, it may reduce contention + * for each thread to have its own pseudorandom-number generator. + * + * @return a pseudorandom {@code double} greater than or equal + * to {@code 0.0} and less than {@code 1.0}. + * @see Random#nextDouble() + */ + public static double random() { + return RandomNumberGeneratorHolder.randomNumberGenerator.nextDouble(); + } + + /** + * Returns the sum of its arguments, + * throwing an exception if the result overflows an {@code int}. + * + * @param x the first value + * @param y the second value + * @return the result + * @throws ArithmeticException if the result overflows an int + * @see Math#addExact(int,int) + * @since 1.8 + */ + public static int addExact(int x, int y) { + return Math.addExact(x, y); + } + + /** + * Returns the sum of its arguments, + * throwing an exception if the result overflows a {@code long}. + * + * @param x the first value + * @param y the second value + * @return the result + * @throws ArithmeticException if the result overflows a long + * @see Math#addExact(long,long) + * @since 1.8 + */ + public static long addExact(long x, long y) { + return Math.addExact(x, y); + } + + /** + * Returns the difference of the arguments, + * throwing an exception if the result overflows an {@code int}. + * + * @param x the first value + * @param y the second value to subtract from the first + * @return the result + * @throws ArithmeticException if the result overflows an int + * @see Math#subtractExact(int,int) + * @since 1.8 + */ + public static int subtractExact(int x, int y) { + return Math.subtractExact(x, y); + } + + /** + * Returns the difference of the arguments, + * throwing an exception if the result overflows a {@code long}. + * + * @param x the first value + * @param y the second value to subtract from the first + * @return the result + * @throws ArithmeticException if the result overflows a long + * @see Math#subtractExact(long,long) + * @since 1.8 + */ + public static long subtractExact(long x, long y) { + return Math.subtractExact(x, y); + } + + /** + * Returns the product of the arguments, + * throwing an exception if the result overflows an {@code int}. + * + * @param x the first value + * @param y the second value + * @return the result + * @throws ArithmeticException if the result overflows an int + * @see Math#multiplyExact(int,int) + * @since 1.8 + */ + public static int multiplyExact(int x, int y) { + return Math.multiplyExact(x, y); + } + + /** + * Returns the product of the arguments, + * throwing an exception if the result overflows a {@code long}. + * + * @param x the first value + * @param y the second value + * @return the result + * @throws ArithmeticException if the result overflows a long + * @see Math#multiplyExact(long,long) + * @since 1.8 + */ + public static long multiplyExact(long x, long y) { + return Math.multiplyExact(x, y); + } + + /** + * Returns the value of the {@code long} argument; + * throwing an exception if the value overflows an {@code int}. + * + * @param value the long value + * @return the argument as an int + * @throws ArithmeticException if the {@code argument} overflows an int + * @see Math#toIntExact(long) + * @since 1.8 + */ + public static int toIntExact(long value) { + return Math.toIntExact(value); + } + + /** + * Returns the largest (closest to positive infinity) + * {@code int} value that is less than or equal to the algebraic quotient. + * There is one special case, if the dividend is the + * {@linkplain Integer#MIN_VALUE Integer.MIN_VALUE} and the divisor is {@code -1}, + * then integer overflow occurs and + * the result is equal to the {@code Integer.MIN_VALUE}. + *

+ * See {@link Math#floorDiv(int, int) Math.floorDiv} for examples and + * a comparison to the integer division {@code /} operator. + * + * @param x the dividend + * @param y the divisor + * @return the largest (closest to positive infinity) + * {@code int} value that is less than or equal to the algebraic quotient. + * @throws ArithmeticException if the divisor {@code y} is zero + * @see Math#floorDiv(int, int) + * @see Math#floor(double) + * @since 1.8 + */ + public static int floorDiv(int x, int y) { + return Math.floorDiv(x, y); + } + + /** + * Returns the largest (closest to positive infinity) + * {@code long} value that is less than or equal to the algebraic quotient. + * There is one special case, if the dividend is the + * {@linkplain Long#MIN_VALUE Long.MIN_VALUE} and the divisor is {@code -1}, + * then integer overflow occurs and + * the result is equal to the {@code Long.MIN_VALUE}. + *

+ * See {@link Math#floorDiv(int, int) Math.floorDiv} for examples and + * a comparison to the integer division {@code /} operator. + * + * @param x the dividend + * @param y the divisor + * @return the largest (closest to positive infinity) + * {@code long} value that is less than or equal to the algebraic quotient. + * @throws ArithmeticException if the divisor {@code y} is zero + * @see Math#floorDiv(long, long) + * @see Math#floor(double) + * @since 1.8 + */ + public static long floorDiv(long x, long y) { + return Math.floorDiv(x, y); + } + + /** + * Returns the floor modulus of the {@code int} arguments. + *

+ * The floor modulus is {@code x - (floorDiv(x, y) * y)}, + * has the same sign as the divisor {@code y}, and + * is in the range of {@code -abs(y) < r < +abs(y)}. + *

+ * The relationship between {@code floorDiv} and {@code floorMod} is such that: + *

    + *
  • {@code floorDiv(x, y) * y + floorMod(x, y) == x} + *
+ *

+ * See {@link Math#floorMod(int, int) Math.floorMod} for examples and + * a comparison to the {@code %} operator. + * + * @param x the dividend + * @param y the divisor + * @return the floor modulus {@code x - (floorDiv(x, y) * y)} + * @throws ArithmeticException if the divisor {@code y} is zero + * @see Math#floorMod(int, int) + * @see StrictMath#floorDiv(int, int) + * @since 1.8 + */ + public static int floorMod(int x, int y) { + return Math.floorMod(x , y); + } + /** + * Returns the floor modulus of the {@code long} arguments. + *

+ * The floor modulus is {@code x - (floorDiv(x, y) * y)}, + * has the same sign as the divisor {@code y}, and + * is in the range of {@code -abs(y) < r < +abs(y)}. + *

+ * The relationship between {@code floorDiv} and {@code floorMod} is such that: + *

    + *
  • {@code floorDiv(x, y) * y + floorMod(x, y) == x} + *
+ *

+ * See {@link Math#floorMod(int, int) Math.floorMod} for examples and + * a comparison to the {@code %} operator. + * + * @param x the dividend + * @param y the divisor + * @return the floor modulus {@code x - (floorDiv(x, y) * y)} + * @throws ArithmeticException if the divisor {@code y} is zero + * @see Math#floorMod(long, long) + * @see StrictMath#floorDiv(long, long) + * @since 1.8 + */ + public static long floorMod(long x, long y) { + return Math.floorMod(x, y); + } + + /** + * Returns the absolute value of an {@code int} value. + * If the argument is not negative, the argument is returned. + * If the argument is negative, the negation of the argument is returned. + * + *

Note that if the argument is equal to the value of + * {@link Integer#MIN_VALUE}, the most negative representable + * {@code int} value, the result is that same value, which is + * negative. + * + * @param a the argument whose absolute value is to be determined. + * @return the absolute value of the argument. + */ + public static int abs(int a) { + return Math.abs(a); + } + + /** + * Returns the absolute value of a {@code long} value. + * If the argument is not negative, the argument is returned. + * If the argument is negative, the negation of the argument is returned. + * + *

Note that if the argument is equal to the value of + * {@link Long#MIN_VALUE}, the most negative representable + * {@code long} value, the result is that same value, which + * is negative. + * + * @param a the argument whose absolute value is to be determined. + * @return the absolute value of the argument. + */ + public static long abs(long a) { + return Math.abs(a); + } + + /** + * Returns the absolute value of a {@code float} value. + * If the argument is not negative, the argument is returned. + * If the argument is negative, the negation of the argument is returned. + * Special cases: + *

  • If the argument is positive zero or negative zero, the + * result is positive zero. + *
  • If the argument is infinite, the result is positive infinity. + *
  • If the argument is NaN, the result is NaN.
+ * In other words, the result is the same as the value of the expression: + *

{@code Float.intBitsToFloat(0x7fffffff & Float.floatToIntBits(a))} + * + * @param a the argument whose absolute value is to be determined + * @return the absolute value of the argument. + */ + public static float abs(float a) { + return Math.abs(a); + } + + /** + * Returns the absolute value of a {@code double} value. + * If the argument is not negative, the argument is returned. + * If the argument is negative, the negation of the argument is returned. + * Special cases: + *

  • If the argument is positive zero or negative zero, the result + * is positive zero. + *
  • If the argument is infinite, the result is positive infinity. + *
  • If the argument is NaN, the result is NaN.
+ * In other words, the result is the same as the value of the expression: + *

{@code Double.longBitsToDouble((Double.doubleToLongBits(a)<<1)>>>1)} + * + * @param a the argument whose absolute value is to be determined + * @return the absolute value of the argument. + */ + public static double abs(double a) { + return Math.abs(a); + } + + /** + * Returns the greater of two {@code int} values. That is, the + * result is the argument closer to the value of + * {@link Integer#MAX_VALUE}. If the arguments have the same value, + * the result is that same value. + * + * @param a an argument. + * @param b another argument. + * @return the larger of {@code a} and {@code b}. + */ + public static int max(int a, int b) { + return Math.max(a, b); + } + + /** + * Returns the greater of two {@code long} values. That is, the + * result is the argument closer to the value of + * {@link Long#MAX_VALUE}. If the arguments have the same value, + * the result is that same value. + * + * @param a an argument. + * @param b another argument. + * @return the larger of {@code a} and {@code b}. + */ + public static long max(long a, long b) { + return Math.max(a, b); + } + + /** + * Returns the greater of two {@code float} values. That is, + * the result is the argument closer to positive infinity. If the + * arguments have the same value, the result is that same + * value. If either value is NaN, then the result is NaN. Unlike + * the numerical comparison operators, this method considers + * negative zero to be strictly smaller than positive zero. If one + * argument is positive zero and the other negative zero, the + * result is positive zero. + * + * @param a an argument. + * @param b another argument. + * @return the larger of {@code a} and {@code b}. + */ + public static float max(float a, float b) { + return Math.max(a, b); + } + + /** + * Returns the greater of two {@code double} values. That + * is, the result is the argument closer to positive infinity. If + * the arguments have the same value, the result is that same + * value. If either value is NaN, then the result is NaN. Unlike + * the numerical comparison operators, this method considers + * negative zero to be strictly smaller than positive zero. If one + * argument is positive zero and the other negative zero, the + * result is positive zero. + * + * @param a an argument. + * @param b another argument. + * @return the larger of {@code a} and {@code b}. + */ + public static double max(double a, double b) { + return Math.max(a, b); + } + + /** + * Returns the smaller of two {@code int} values. That is, + * the result the argument closer to the value of + * {@link Integer#MIN_VALUE}. If the arguments have the same + * value, the result is that same value. + * + * @param a an argument. + * @param b another argument. + * @return the smaller of {@code a} and {@code b}. + */ + public static int min(int a, int b) { + return Math.min(a, b); + } + + /** + * Returns the smaller of two {@code long} values. That is, + * the result is the argument closer to the value of + * {@link Long#MIN_VALUE}. If the arguments have the same + * value, the result is that same value. + * + * @param a an argument. + * @param b another argument. + * @return the smaller of {@code a} and {@code b}. + */ + public static long min(long a, long b) { + return Math.min(a, b); + } + + /** + * Returns the smaller of two {@code float} values. That is, + * the result is the value closer to negative infinity. If the + * arguments have the same value, the result is that same + * value. If either value is NaN, then the result is NaN. Unlike + * the numerical comparison operators, this method considers + * negative zero to be strictly smaller than positive zero. If + * one argument is positive zero and the other is negative zero, + * the result is negative zero. + * + * @param a an argument. + * @param b another argument. + * @return the smaller of {@code a} and {@code b.} + */ + public static float min(float a, float b) { + return Math.min(a, b); + } + + /** + * Returns the smaller of two {@code double} values. That + * is, the result is the value closer to negative infinity. If the + * arguments have the same value, the result is that same + * value. If either value is NaN, then the result is NaN. Unlike + * the numerical comparison operators, this method considers + * negative zero to be strictly smaller than positive zero. If one + * argument is positive zero and the other is negative zero, the + * result is negative zero. + * + * @param a an argument. + * @param b another argument. + * @return the smaller of {@code a} and {@code b}. + */ + public static double min(double a, double b) { + return Math.min(a, b); + } + + /** + * Returns the size of an ulp of the argument. An ulp, unit in + * the last place, of a {@code double} value is the positive + * distance between this floating-point value and the {@code + * double} value next larger in magnitude. Note that for non-NaN + * x, ulp(-x) == ulp(x). + * + *

Special Cases: + *

    + *
  • If the argument is NaN, then the result is NaN. + *
  • If the argument is positive or negative infinity, then the + * result is positive infinity. + *
  • If the argument is positive or negative zero, then the result is + * {@code Double.MIN_VALUE}. + *
  • If the argument is ±{@code Double.MAX_VALUE}, then + * the result is equal to 2971. + *
+ * + * @param d the floating-point value whose ulp is to be returned + * @return the size of an ulp of the argument + * @author Joseph D. Darcy + * @since 1.5 + */ + public static double ulp(double d) { + return Math.ulp(d); + } + + /** + * Returns the size of an ulp of the argument. An ulp, unit in + * the last place, of a {@code float} value is the positive + * distance between this floating-point value and the {@code + * float} value next larger in magnitude. Note that for non-NaN + * x, ulp(-x) == ulp(x). + * + *

Special Cases: + *

    + *
  • If the argument is NaN, then the result is NaN. + *
  • If the argument is positive or negative infinity, then the + * result is positive infinity. + *
  • If the argument is positive or negative zero, then the result is + * {@code Float.MIN_VALUE}. + *
  • If the argument is ±{@code Float.MAX_VALUE}, then + * the result is equal to 2104. + *
+ * + * @param f the floating-point value whose ulp is to be returned + * @return the size of an ulp of the argument + * @author Joseph D. Darcy + * @since 1.5 + */ + public static float ulp(float f) { + return Math.ulp(f); + } + + /** + * Returns the signum function of the argument; zero if the argument + * is zero, 1.0 if the argument is greater than zero, -1.0 if the + * argument is less than zero. + * + *

Special Cases: + *

    + *
  • If the argument is NaN, then the result is NaN. + *
  • If the argument is positive zero or negative zero, then the + * result is the same as the argument. + *
+ * + * @param d the floating-point value whose signum is to be returned + * @return the signum function of the argument + * @author Joseph D. Darcy + * @since 1.5 + */ + public static double signum(double d) { + return Math.signum(d); + } + + /** + * Returns the signum function of the argument; zero if the argument + * is zero, 1.0f if the argument is greater than zero, -1.0f if the + * argument is less than zero. + * + *

Special Cases: + *

    + *
  • If the argument is NaN, then the result is NaN. + *
  • If the argument is positive zero or negative zero, then the + * result is the same as the argument. + *
+ * + * @param f the floating-point value whose signum is to be returned + * @return the signum function of the argument + * @author Joseph D. Darcy + * @since 1.5 + */ + public static float signum(float f) { + return Math.signum(f); + } + + /** + * Returns the hyperbolic sine of a {@code double} value. + * The hyperbolic sine of x is defined to be + * (ex - e-x)/2 + * where e is {@linkplain Math#E Euler's number}. + * + *

Special cases: + *

    + * + *
  • If the argument is NaN, then the result is NaN. + * + *
  • If the argument is infinite, then the result is an infinity + * with the same sign as the argument. + * + *
  • If the argument is zero, then the result is a zero with the + * same sign as the argument. + * + *
+ * + * @param x The number whose hyperbolic sine is to be returned. + * @return The hyperbolic sine of {@code x}. + * @since 1.5 + */ + public static native double sinh(double x); + + /** + * Returns the hyperbolic cosine of a {@code double} value. + * The hyperbolic cosine of x is defined to be + * (ex + e-x)/2 + * where e is {@linkplain Math#E Euler's number}. + * + *

Special cases: + *

    + * + *
  • If the argument is NaN, then the result is NaN. + * + *
  • If the argument is infinite, then the result is positive + * infinity. + * + *
  • If the argument is zero, then the result is {@code 1.0}. + * + *
+ * + * @param x The number whose hyperbolic cosine is to be returned. + * @return The hyperbolic cosine of {@code x}. + * @since 1.5 + */ + public static native double cosh(double x); + + /** + * Returns the hyperbolic tangent of a {@code double} value. + * The hyperbolic tangent of x is defined to be + * (ex - e-x)/(ex + e-x), + * in other words, {@linkplain Math#sinh + * sinh(x)}/{@linkplain Math#cosh cosh(x)}. Note + * that the absolute value of the exact tanh is always less than + * 1. + * + *

Special cases: + *

    + * + *
  • If the argument is NaN, then the result is NaN. + * + *
  • If the argument is zero, then the result is a zero with the + * same sign as the argument. + * + *
  • If the argument is positive infinity, then the result is + * {@code +1.0}. + * + *
  • If the argument is negative infinity, then the result is + * {@code -1.0}. + * + *
+ * + * @param x The number whose hyperbolic tangent is to be returned. + * @return The hyperbolic tangent of {@code x}. + * @since 1.5 + */ + public static native double tanh(double x); + + /** + * Returns sqrt(x2 +y2) + * without intermediate overflow or underflow. + * + *

Special cases: + *

    + * + *
  • If either argument is infinite, then the result + * is positive infinity. + * + *
  • If either argument is NaN and neither argument is infinite, + * then the result is NaN. + * + *
+ * + * @param x a value + * @param y a value + * @return sqrt(x2 +y2) + * without intermediate overflow or underflow + * @since 1.5 + */ + public static native double hypot(double x, double y); + + /** + * Returns ex -1. Note that for values of + * x near 0, the exact sum of + * {@code expm1(x)} + 1 is much closer to the true + * result of ex than {@code exp(x)}. + * + *

Special cases: + *

    + *
  • If the argument is NaN, the result is NaN. + * + *
  • If the argument is positive infinity, then the result is + * positive infinity. + * + *
  • If the argument is negative infinity, then the result is + * -1.0. + * + *
  • If the argument is zero, then the result is a zero with the + * same sign as the argument. + * + *
+ * + * @param x the exponent to raise e to in the computation of + * e{@code x} -1. + * @return the value e{@code x} - 1. + * @since 1.5 + */ + public static native double expm1(double x); + + /** + * Returns the natural logarithm of the sum of the argument and 1. + * Note that for small values {@code x}, the result of + * {@code log1p(x)} is much closer to the true result of ln(1 + * + {@code x}) than the floating-point evaluation of + * {@code log(1.0+x)}. + * + *

Special cases: + *

    + * + *
  • If the argument is NaN or less than -1, then the result is + * NaN. + * + *
  • If the argument is positive infinity, then the result is + * positive infinity. + * + *
  • If the argument is negative one, then the result is + * negative infinity. + * + *
  • If the argument is zero, then the result is a zero with the + * same sign as the argument. + * + *
+ * + * @param x a value + * @return the value ln({@code x} + 1), the natural + * log of {@code x} + 1 + * @since 1.5 + */ + public static native double log1p(double x); + + /** + * Returns the first floating-point argument with the sign of the + * second floating-point argument. For this method, a NaN + * {@code sign} argument is always treated as if it were + * positive. + * + * @param magnitude the parameter providing the magnitude of the result + * @param sign the parameter providing the sign of the result + * @return a value with the magnitude of {@code magnitude} + * and the sign of {@code sign}. + * @since 1.6 + */ + public static double copySign(double magnitude, double sign) { + return Math.copySign(magnitude, (Double.isNaN(sign)?1.0d:sign)); + } + + /** + * Returns the first floating-point argument with the sign of the + * second floating-point argument. For this method, a NaN + * {@code sign} argument is always treated as if it were + * positive. + * + * @param magnitude the parameter providing the magnitude of the result + * @param sign the parameter providing the sign of the result + * @return a value with the magnitude of {@code magnitude} + * and the sign of {@code sign}. + * @since 1.6 + */ + public static float copySign(float magnitude, float sign) { + return Math.copySign(magnitude, (Float.isNaN(sign)?1.0f:sign)); + } + /** + * Returns the unbiased exponent used in the representation of a + * {@code float}. Special cases: + * + *
    + *
  • If the argument is NaN or infinite, then the result is + * {@link Float#MAX_EXPONENT} + 1. + *
  • If the argument is zero or subnormal, then the result is + * {@link Float#MIN_EXPONENT} -1. + *
+ * @param f a {@code float} value + * @return the unbiased exponent of the argument + * @since 1.6 + */ + public static int getExponent(float f) { + return Math.getExponent(f); + } + + /** + * Returns the unbiased exponent used in the representation of a + * {@code double}. Special cases: + * + *
    + *
  • If the argument is NaN or infinite, then the result is + * {@link Double#MAX_EXPONENT} + 1. + *
  • If the argument is zero or subnormal, then the result is + * {@link Double#MIN_EXPONENT} -1. + *
+ * @param d a {@code double} value + * @return the unbiased exponent of the argument + * @since 1.6 + */ + public static int getExponent(double d) { + return Math.getExponent(d); + } + + /** + * Returns the floating-point number adjacent to the first + * argument in the direction of the second argument. If both + * arguments compare as equal the second argument is returned. + * + *

Special cases: + *

    + *
  • If either argument is a NaN, then NaN is returned. + * + *
  • If both arguments are signed zeros, {@code direction} + * is returned unchanged (as implied by the requirement of + * returning the second argument if the arguments compare as + * equal). + * + *
  • If {@code start} is + * ±{@link Double#MIN_VALUE} and {@code direction} + * has a value such that the result should have a smaller + * magnitude, then a zero with the same sign as {@code start} + * is returned. + * + *
  • If {@code start} is infinite and + * {@code direction} has a value such that the result should + * have a smaller magnitude, {@link Double#MAX_VALUE} with the + * same sign as {@code start} is returned. + * + *
  • If {@code start} is equal to ± + * {@link Double#MAX_VALUE} and {@code direction} has a + * value such that the result should have a larger magnitude, an + * infinity with same sign as {@code start} is returned. + *
+ * + * @param start starting floating-point value + * @param direction value indicating which of + * {@code start}'s neighbors or {@code start} should + * be returned + * @return The floating-point number adjacent to {@code start} in the + * direction of {@code direction}. + * @since 1.6 + */ + public static double nextAfter(double start, double direction) { + return Math.nextAfter(start, direction); + } + + /** + * Returns the floating-point number adjacent to the first + * argument in the direction of the second argument. If both + * arguments compare as equal a value equivalent to the second argument + * is returned. + * + *

Special cases: + *

    + *
  • If either argument is a NaN, then NaN is returned. + * + *
  • If both arguments are signed zeros, a value equivalent + * to {@code direction} is returned. + * + *
  • If {@code start} is + * ±{@link Float#MIN_VALUE} and {@code direction} + * has a value such that the result should have a smaller + * magnitude, then a zero with the same sign as {@code start} + * is returned. + * + *
  • If {@code start} is infinite and + * {@code direction} has a value such that the result should + * have a smaller magnitude, {@link Float#MAX_VALUE} with the + * same sign as {@code start} is returned. + * + *
  • If {@code start} is equal to ± + * {@link Float#MAX_VALUE} and {@code direction} has a + * value such that the result should have a larger magnitude, an + * infinity with same sign as {@code start} is returned. + *
+ * + * @param start starting floating-point value + * @param direction value indicating which of + * {@code start}'s neighbors or {@code start} should + * be returned + * @return The floating-point number adjacent to {@code start} in the + * direction of {@code direction}. + * @since 1.6 + */ + public static float nextAfter(float start, double direction) { + return Math.nextAfter(start, direction); + } + + /** + * Returns the floating-point value adjacent to {@code d} in + * the direction of positive infinity. This method is + * semantically equivalent to {@code nextAfter(d, + * Double.POSITIVE_INFINITY)}; however, a {@code nextUp} + * implementation may run faster than its equivalent + * {@code nextAfter} call. + * + *

Special Cases: + *

    + *
  • If the argument is NaN, the result is NaN. + * + *
  • If the argument is positive infinity, the result is + * positive infinity. + * + *
  • If the argument is zero, the result is + * {@link Double#MIN_VALUE} + * + *
+ * + * @param d starting floating-point value + * @return The adjacent floating-point value closer to positive + * infinity. + * @since 1.6 + */ + public static double nextUp(double d) { + return Math.nextUp(d); + } + + /** + * Returns the floating-point value adjacent to {@code f} in + * the direction of positive infinity. This method is + * semantically equivalent to {@code nextAfter(f, + * Float.POSITIVE_INFINITY)}; however, a {@code nextUp} + * implementation may run faster than its equivalent + * {@code nextAfter} call. + * + *

Special Cases: + *

    + *
  • If the argument is NaN, the result is NaN. + * + *
  • If the argument is positive infinity, the result is + * positive infinity. + * + *
  • If the argument is zero, the result is + * {@link Float#MIN_VALUE} + * + *
+ * + * @param f starting floating-point value + * @return The adjacent floating-point value closer to positive + * infinity. + * @since 1.6 + */ + public static float nextUp(float f) { + return Math.nextUp(f); + } + + /** + * Returns the floating-point value adjacent to {@code d} in + * the direction of negative infinity. This method is + * semantically equivalent to {@code nextAfter(d, + * Double.NEGATIVE_INFINITY)}; however, a + * {@code nextDown} implementation may run faster than its + * equivalent {@code nextAfter} call. + * + *

Special Cases: + *

    + *
  • If the argument is NaN, the result is NaN. + * + *
  • If the argument is negative infinity, the result is + * negative infinity. + * + *
  • If the argument is zero, the result is + * {@code -Double.MIN_VALUE} + * + *
+ * + * @param d starting floating-point value + * @return The adjacent floating-point value closer to negative + * infinity. + * @since 1.8 + */ + public static double nextDown(double d) { + return Math.nextDown(d); + } + + /** + * Returns the floating-point value adjacent to {@code f} in + * the direction of negative infinity. This method is + * semantically equivalent to {@code nextAfter(f, + * Float.NEGATIVE_INFINITY)}; however, a + * {@code nextDown} implementation may run faster than its + * equivalent {@code nextAfter} call. + * + *

Special Cases: + *

    + *
  • If the argument is NaN, the result is NaN. + * + *
  • If the argument is negative infinity, the result is + * negative infinity. + * + *
  • If the argument is zero, the result is + * {@code -Float.MIN_VALUE} + * + *
+ * + * @param f starting floating-point value + * @return The adjacent floating-point value closer to negative + * infinity. + * @since 1.8 + */ + public static float nextDown(float f) { + return Math.nextDown(f); + } + + /** + * Returns {@code d} × + * 2{@code scaleFactor} rounded as if performed + * by a single correctly rounded floating-point multiply to a + * member of the double value set. See the Java + * Language Specification for a discussion of floating-point + * value sets. If the exponent of the result is between {@link + * Double#MIN_EXPONENT} and {@link Double#MAX_EXPONENT}, the + * answer is calculated exactly. If the exponent of the result + * would be larger than {@code Double.MAX_EXPONENT}, an + * infinity is returned. Note that if the result is subnormal, + * precision may be lost; that is, when {@code scalb(x, n)} + * is subnormal, {@code scalb(scalb(x, n), -n)} may not equal + * x. When the result is non-NaN, the result has the same + * sign as {@code d}. + * + *

Special cases: + *

    + *
  • If the first argument is NaN, NaN is returned. + *
  • If the first argument is infinite, then an infinity of the + * same sign is returned. + *
  • If the first argument is zero, then a zero of the same + * sign is returned. + *
+ * + * @param d number to be scaled by a power of two. + * @param scaleFactor power of 2 used to scale {@code d} + * @return {@code d} × 2{@code scaleFactor} + * @since 1.6 + */ + public static double scalb(double d, int scaleFactor) { + return Math.scalb(d, scaleFactor); + } + + /** + * Returns {@code f} × + * 2{@code scaleFactor} rounded as if performed + * by a single correctly rounded floating-point multiply to a + * member of the float value set. See the Java + * Language Specification for a discussion of floating-point + * value sets. If the exponent of the result is between {@link + * Float#MIN_EXPONENT} and {@link Float#MAX_EXPONENT}, the + * answer is calculated exactly. If the exponent of the result + * would be larger than {@code Float.MAX_EXPONENT}, an + * infinity is returned. Note that if the result is subnormal, + * precision may be lost; that is, when {@code scalb(x, n)} + * is subnormal, {@code scalb(scalb(x, n), -n)} may not equal + * x. When the result is non-NaN, the result has the same + * sign as {@code f}. + * + *

Special cases: + *

    + *
  • If the first argument is NaN, NaN is returned. + *
  • If the first argument is infinite, then an infinity of the + * same sign is returned. + *
  • If the first argument is zero, then a zero of the same + * sign is returned. + *
+ * + * @param f number to be scaled by a power of two. + * @param scaleFactor power of 2 used to scale {@code f} + * @return {@code f} × 2{@code scaleFactor} + * @since 1.6 + */ + public static float scalb(float f, int scaleFactor) { + return Math.scalb(f, scaleFactor); + } +} diff --git a/src/MapleFE/test/java/openjdk/StrictMath.java.result b/src/MapleFE/test/java/openjdk/StrictMath.java.result new file mode 100644 index 0000000000000000000000000000000000000000..43c673cea33cbea5851d6c80142ce66fa203db18 --- /dev/null +++ b/src/MapleFE/test/java/openjdk/StrictMath.java.result @@ -0,0 +1,176 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 19 tokens. +Matched 1469 tokens. +============= Module =========== +== Sub Tree == +package java.lang +== Sub Tree == +import java.util.Random +== Sub Tree == +import sun.misc.DoubleConsts +== Sub Tree == +class StrictMath + Fields: + E=2.71828 PI=3.14159 + Instance Initializer: + Constructors: + constructor StrictMath() throws: + Methods: + func sin(a) throws: + func cos(a) throws: + func tan(a) throws: + func asin(a) throws: + func acos(a) throws: + func atan(a) throws: + func toRadians(angdeg) throws: + return angdeg Div 180 Mul PI + func toDegrees(angrad) throws: + return angrad Mul 180 Div PI + func exp(a) throws: + func log(a) throws: + func log10(a) throws: + func sqrt(a) throws: + func cbrt(a) throws: + func IEEEremainder(f1,f2) throws: + func ceil(a) throws: + return floorOrCeil(a,-0,1,1) + func floor(a) throws: + return floorOrCeil(a,-1,0,-1) + func floorOrCeil(a,negativeBoundary,positiveBoundary,sign) throws: + Decl: exponent=Math.getExponent(a) + cond-branch cond:exponent LT 0 + true branch : + return () + false branch : + cond-branch cond:exponent GE 52 + true branch : + return a + false branch : + + assert exponent GE 0 Land exponent LE 51 : + Decl: doppel=Double.doubleToRawLongBits(a) + Decl: mask=DoubleConsts.SIGNIF_BIT_MASK Shr exponent + cond-branch cond:(mask Band doppel) EQ 0 + true branch : + return a false branch : + Decl: result=Double.longBitsToDouble(doppel Band (mask)) + cond-branch cond:sign Mul a GT 0 + true branch : + result Assign result Add sign false branch : + + return result + + func rint(a) throws: + Decl: twoToThe52=(double)(1 Shl 52) + Decl: sign=Math.copySign(1,a) + a Assign Math.abs(a) + cond-branch cond:a LT twoToThe52 + true branch : + a Assign ((twoToThe52 Add a) Sub twoToThe52) + false branch : + + return sign Mul a + func atan2(y,x) throws: + func pow(a,b) throws: + func round(a) throws: + return Math.round(a) + func round(a) throws: + return Math.round(a) + func random() throws: + return RandomNumberGeneratorHolder.randomNumberGenerator.nextDouble() + func addExact(x,y) throws: + return Math.addExact(x,y) + func addExact(x,y) throws: + return Math.addExact(x,y) + func subtractExact(x,y) throws: + return Math.subtractExact(x,y) + func subtractExact(x,y) throws: + return Math.subtractExact(x,y) + func multiplyExact(x,y) throws: + return Math.multiplyExact(x,y) + func multiplyExact(x,y) throws: + return Math.multiplyExact(x,y) + func toIntExact(value) throws: + return Math.toIntExact(value) + func floorDiv(x,y) throws: + return Math.floorDiv(x,y) + func floorDiv(x,y) throws: + return Math.floorDiv(x,y) + func floorMod(x,y) throws: + return Math.floorMod(x,y) + func floorMod(x,y) throws: + return Math.floorMod(x,y) + func abs(a) throws: + return Math.abs(a) + func abs(a) throws: + return Math.abs(a) + func abs(a) throws: + return Math.abs(a) + func abs(a) throws: + return Math.abs(a) + func max(a,b) throws: + return Math.max(a,b) + func max(a,b) throws: + return Math.max(a,b) + func max(a,b) throws: + return Math.max(a,b) + func max(a,b) throws: + return Math.max(a,b) + func min(a,b) throws: + return Math.min(a,b) + func min(a,b) throws: + return Math.min(a,b) + func min(a,b) throws: + return Math.min(a,b) + func min(a,b) throws: + return Math.min(a,b) + func ulp(d) throws: + return Math.ulp(d) + func ulp(f) throws: + return Math.ulp(f) + func signum(d) throws: + return Math.signum(d) + func signum(f) throws: + return Math.signum(f) + func sinh(x) throws: + func cosh(x) throws: + func tanh(x) throws: + func hypot(x,y) throws: + func expm1(x) throws: + func log1p(x) throws: + func copySign(magnitude,sign) throws: + return Math.copySign(magnitude,()) + func copySign(magnitude,sign) throws: + return Math.copySign(magnitude,()) + func getExponent(f) throws: + return Math.getExponent(f) + func getExponent(d) throws: + return Math.getExponent(d) + func nextAfter(start,direction) throws: + return Math.nextAfter(start,direction) + func nextAfter(start,direction) throws: + return Math.nextAfter(start,direction) + func nextUp(d) throws: + return Math.nextUp(d) + func nextUp(f) throws: + return Math.nextUp(f) + func nextDown(d) throws: + return Math.nextDown(d) + func nextDown(f) throws: + return Math.nextDown(f) + func scalb(d,scaleFactor) throws: + return Math.scalb(d,scaleFactor) + func scalb(f,scaleFactor) throws: + return Math.scalb(f,scaleFactor) + LocalClasses: + class RandomNumberGeneratorHolder + Fields: + randomNumberGenerator=new Random() + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + LocalInterfaces: + diff --git a/src/MapleFE/test/openjdk/generate-result.sh b/src/MapleFE/test/java/openjdk/generate-result.sh similarity index 92% rename from src/MapleFE/test/openjdk/generate-result.sh rename to src/MapleFE/test/java/openjdk/generate-result.sh index bbd3acd878bd41d3a31fc5123972e954d7c9fcd0..08b3acc3b58dc1585fb15888064b004629e72007 100755 --- a/src/MapleFE/test/openjdk/generate-result.sh +++ b/src/MapleFE/test/java/openjdk/generate-result.sh @@ -17,5 +17,5 @@ FILES=$(pwd)/*.java for f in $FILES do echo "Generating result for $f ..." - ../../output/java/java2mpl $f > $f.result + ../../../output/java/java/java2mpl $f > $f.result done diff --git a/src/MapleFE/test/others/ChatTest-1.java b/src/MapleFE/test/java/others/ChatTest-1.java similarity index 100% rename from src/MapleFE/test/others/ChatTest-1.java rename to src/MapleFE/test/java/others/ChatTest-1.java diff --git a/src/MapleFE/test/others/ChatTest-1.java.result b/src/MapleFE/test/java/others/ChatTest-1.java.result similarity index 57% rename from src/MapleFE/test/others/ChatTest-1.java.result rename to src/MapleFE/test/java/others/ChatTest-1.java.result index d55049c9bf1d5050a278afd07f004dded5f5b1e1..199b485acd4c4704be2e0e6aed2e3025a514e691 100644 --- a/src/MapleFE/test/others/ChatTest-1.java.result +++ b/src/MapleFE/test/java/others/ChatTest-1.java.result @@ -28,7 +28,7 @@ class ChatTest Instance Initializer: Constructors: Methods: - func main() throws: Throwable + func main(args) throws: Throwable testStartStop() testPortOpen() testAsksForName() @@ -37,57 +37,63 @@ class ChatTest testUsernameAndMessage() testDontReceiveMessageInNameState() func startServer() throws: IOException - var:server=new ChatServer - var:address=(InetSocketAddress)server.getSocketAddress() + Decl: server=new ChatServer(0) + Decl: address=(InetSocketAddress)server.getSocketAddress() listeningPort Assign address.getPort() server.run() return server func testStartStop() throws: Exception - var:server=startServer() + Decl: server=startServer() server.shutdown() func testPortOpen() throws: Exception - var:server=startServer() - var:socket=new Socket + Decl: server=startServer() + Decl: socket=new Socket("localhost",listeningPort) cond-branch cond:socket.isConnected() true branch : - new RuntimeException + new RuntimeException("Failed to connect to server: port not open") false branch : server.shutdown() func testAsksForName() throws: Exception - var:server=startServer() - var:socket=new Socket + Decl: server=startServer() + Decl: socket=new Socket("localhost",listeningPort) + Decl: reader=new BufferedReader(new InputStreamReader(socket.getInputStream())) + Decl: string=readAvailableString(reader) + cond-branch cond:string.equals("Name: ") + true branch : + new RuntimeException("Server doesn't send Name: ") + false branch : server.shutdown() func testUseName() throws: Throwable - var:server=startServer() + Decl: server=startServer() performTestUseName() server.shutdown() func testConnectDisconnectConnect() throws: Exception - var:server=startServer() + Decl: server=startServer() performTestConnectDisconnectConnect() server.shutdown() func testUsernameAndMessage() throws: Exception - var:server=startServer() + Decl: server=startServer() performTestUsernameAndMessage() server.shutdown() func testDontReceiveMessageInNameState() throws: Exception - var:server=startServer() + Decl: server=startServer() performDontReceiveMessageInNameState() server.shutdown() - func assertEqual() throws: + func assertEqual(exception,value,expected) throws: cond-branch cond:expected EQ value true branch : return @@ -95,23 +101,23 @@ class ChatTest cond-branch cond:expected EQ null true branch : - exception.add(new RuntimeException) + exception.add(new RuntimeException("Expected null, but was: " Add value)) return false branch : cond-branch cond:expected.equals(value) true branch : - exception.add(new RuntimeException) + exception.add(new RuntimeException("Expected: " Add expected Add " but was: " Add value)) return false branch : func performDontReceiveMessageInNameState() throws: Exception - var:barrier1=new CyclicBarrier - var:barrier2=new CyclicBarrier - var:barrier3=new CyclicBarrier - var:exceptions=Collections.synchronizedList(new ArrayList) - var:chatConnection=new ChatConnection - var:client2=new Thread + Decl: barrier1=new CyclicBarrier(2) + Decl: barrier2=new CyclicBarrier(2) + Decl: barrier3=new CyclicBarrier(2) + Decl: exceptions=Collections.synchronizedList(new ArrayList()) + Decl: chatConnection=new ChatConnection() + Decl: client2=new Thread(new ChatConnection()) client2.start() chatConnection.run() cond-branch cond:exceptions.isEmpty() @@ -119,17 +125,17 @@ class ChatTest exceptions.get(0) false branch : - func waitForJoin() throws: IOException - var:joined + func waitForJoin(reader,s) throws: IOException + Decl: joined do joined Assign readAvailableString(reader) while (joined NE null Land joined.contains("Welcome " Add s)) func performTestUsernameAndMessage() throws: Exception - var:barrier1=new CyclicBarrier - var:barrier2=new CyclicBarrier - var:barrier3=new CyclicBarrier - var:exceptions=Collections.synchronizedList(new ArrayList) - var:chatConnection=new ChatConnection - var:client2=new Thread + Decl: barrier1=new CyclicBarrier(2) + Decl: barrier2=new CyclicBarrier(2) + Decl: barrier3=new CyclicBarrier(2) + Decl: exceptions=Collections.synchronizedList(new ArrayList()) + Decl: chatConnection=new ChatConnection() + Decl: client2=new Thread(new ChatConnection()) client2.start() chatConnection.run() cond-branch cond:exceptions.isEmpty() @@ -137,18 +143,19 @@ while (joined NE null Land joined.contains("Welcome " Add s)) exceptions.get(0) false branch : - func readAvailableString() throws: IOException + func readAvailableString(reader) throws: IOException return readAvailableString(reader,false) - func readAvailableString() throws: IOException - var:builder=new StringBuilder - var:bytes + func readAvailableString(reader,now) throws: IOException + Decl: builder=new StringBuilder() + Decl: bytes cond-branch cond:now Land reader.ready() true branch : return null false branch : - do var:buf= - + do Decl: buf= + bytes Assign reader.read(buf) + builder.append(buf,0,bytes) while bytes EQ 256 return builder.toString() LocalClasses: @@ -161,13 +168,21 @@ while bytes EQ 256 func run() throws: Socket socket - new Socket - + new Socket("localhost",listeningPort) + BufferedReader + reader + new BufferedReader(new InputStreamReader(socket.getInputStream())) + Writer + writer + new FlushingWriter(new OutputStreamWriter(socket.getOutputStream())) socket.setTcpNoDelay(true) run(socket,reader,writer) + Exception + e + exception Assign e - func run() throws: Exception + func run(socket,reader,writer) throws: Exception LocalClasses: LocalInterfaces: class FlushingWriter @@ -175,18 +190,17 @@ while bytes EQ 256 delegate Instance Initializer: Constructors: - constructor FlushingWriter() throws: + constructor FlushingWriter(delegate) throws: this.delegate Assign delegate Methods: - func write() throws: IOException + func write(cbuf,off,len) throws: IOException delegate.write(cbuf,off,len) func flush() throws: IOException delegate.flush() func close() throws: IOException delegate.close() - func write() throws: IOException - write - str + func write(str) throws: IOException + super.write(str) flush() LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/others/ChatTest-2.java b/src/MapleFE/test/java/others/ChatTest-2.java similarity index 100% rename from src/MapleFE/test/others/ChatTest-2.java rename to src/MapleFE/test/java/others/ChatTest-2.java diff --git a/src/MapleFE/test/others/ChatTest-2.java.result b/src/MapleFE/test/java/others/ChatTest-2.java.result similarity index 84% rename from src/MapleFE/test/others/ChatTest-2.java.result rename to src/MapleFE/test/java/others/ChatTest-2.java.result index c0aa2ba566e42a8972817cc2e3032afd719d2436..18139019faa869c267dead7f165103cf6f5280af 100644 --- a/src/MapleFE/test/others/ChatTest-2.java.result +++ b/src/MapleFE/test/java/others/ChatTest-2.java.result @@ -8,7 +8,7 @@ class ChatTest Constructors: Methods: func performDontReceiveMessageInNameState() throws: - var:exceptions=new ArrayList + Decl: exceptions=new ArrayList() LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/others/ChatTest-full.java b/src/MapleFE/test/java/others/ChatTest-full.java similarity index 100% rename from src/MapleFE/test/others/ChatTest-full.java rename to src/MapleFE/test/java/others/ChatTest-full.java diff --git a/src/MapleFE/test/others/ChatTest-full.java.result b/src/MapleFE/test/java/others/ChatTest-full.java.result similarity index 56% rename from src/MapleFE/test/others/ChatTest-full.java.result rename to src/MapleFE/test/java/others/ChatTest-full.java.result index 7117d4628595fdbaf3125491e34a7678146b49da..c8e02b22089dab33690a75a5a774d208074d1823 100644 --- a/src/MapleFE/test/others/ChatTest-full.java.result +++ b/src/MapleFE/test/java/others/ChatTest-full.java.result @@ -29,11 +29,11 @@ class ChatTest Constructors: Methods: func performTestConnectDisconnectConnect() throws: Exception - var:barrier1=new CyclicBarrier - var:barrier2=new CyclicBarrier - var:barrier3=new CyclicBarrier - var:exceptions=new ArrayList - func main() throws: Throwable + Decl: barrier1=new CyclicBarrier(2) + Decl: barrier2=new CyclicBarrier(2) + Decl: barrier3=new CyclicBarrier(2) + Decl: exceptions=new ArrayList() + func main(args) throws: Throwable testStartStop() testPortOpen() testAsksForName() @@ -42,57 +42,63 @@ class ChatTest testUsernameAndMessage() testDontReceiveMessageInNameState() func startServer() throws: IOException - var:server=new ChatServer - var:address=(InetSocketAddress)server.getSocketAddress() + Decl: server=new ChatServer(0) + Decl: address=(InetSocketAddress)server.getSocketAddress() listeningPort Assign address.getPort() server.run() return server func testStartStop() throws: Exception - var:server=startServer() + Decl: server=startServer() server.shutdown() func testPortOpen() throws: Exception - var:server=startServer() - var:socket=new Socket + Decl: server=startServer() + Decl: socket=new Socket("localhost",listeningPort) cond-branch cond:socket.isConnected() true branch : - new RuntimeException + new RuntimeException("Failed to connect to server: port not open") false branch : server.shutdown() func testAsksForName() throws: Exception - var:server=startServer() - var:socket=new Socket + Decl: server=startServer() + Decl: socket=new Socket("localhost",listeningPort) + Decl: reader=new BufferedReader(new InputStreamReader(socket.getInputStream())) + Decl: string=readAvailableString(reader) + cond-branch cond:string.equals("Name: ") + true branch : + new RuntimeException("Server doesn't send Name: ") + false branch : server.shutdown() func testUseName() throws: Throwable - var:server=startServer() + Decl: server=startServer() performTestUseName() server.shutdown() func testConnectDisconnectConnect() throws: Exception - var:server=startServer() + Decl: server=startServer() performTestConnectDisconnectConnect() server.shutdown() func testUsernameAndMessage() throws: Exception - var:server=startServer() + Decl: server=startServer() performTestUsernameAndMessage() server.shutdown() func testDontReceiveMessageInNameState() throws: Exception - var:server=startServer() + Decl: server=startServer() performDontReceiveMessageInNameState() server.shutdown() - func assertEqual() throws: + func assertEqual(exception,value,expected) throws: cond-branch cond:expected EQ value true branch : return @@ -100,23 +106,23 @@ class ChatTest cond-branch cond:expected EQ null true branch : - exception.add(new RuntimeException) + exception.add(new RuntimeException("Expected null, but was: " Add value)) return false branch : cond-branch cond:expected.equals(value) true branch : - exception.add(new RuntimeException) + exception.add(new RuntimeException("Expected: " Add expected Add " but was: " Add value)) return false branch : func performDontReceiveMessageInNameState() throws: Exception - var:barrier1=new CyclicBarrier - var:barrier2=new CyclicBarrier - var:barrier3=new CyclicBarrier - var:exceptions=Collections.synchronizedList(new ArrayList) - var:chatConnection=new ChatConnection - var:client2=new Thread + Decl: barrier1=new CyclicBarrier(2) + Decl: barrier2=new CyclicBarrier(2) + Decl: barrier3=new CyclicBarrier(2) + Decl: exceptions=Collections.synchronizedList(new ArrayList()) + Decl: chatConnection=new ChatConnection() + Decl: client2=new Thread(new ChatConnection()) client2.start() chatConnection.run() cond-branch cond:exceptions.isEmpty() @@ -124,17 +130,17 @@ class ChatTest exceptions.get(0) false branch : - func waitForJoin() throws: IOException - var:joined + func waitForJoin(reader,s) throws: IOException + Decl: joined do joined Assign readAvailableString(reader) while (joined NE null Land joined.contains("Welcome " Add s)) func performTestUsernameAndMessage() throws: Exception - var:barrier1=new CyclicBarrier - var:barrier2=new CyclicBarrier - var:barrier3=new CyclicBarrier - var:exceptions=Collections.synchronizedList(new ArrayList) - var:chatConnection=new ChatConnection - var:client2=new Thread + Decl: barrier1=new CyclicBarrier(2) + Decl: barrier2=new CyclicBarrier(2) + Decl: barrier3=new CyclicBarrier(2) + Decl: exceptions=Collections.synchronizedList(new ArrayList()) + Decl: chatConnection=new ChatConnection() + Decl: client2=new Thread(new ChatConnection()) client2.start() chatConnection.run() cond-branch cond:exceptions.isEmpty() @@ -143,13 +149,13 @@ while (joined NE null Land joined.contains("Welcome " Add s)) false branch : func performTestConnectDisconnectConnect() throws: Exception - var:barrier1=new CyclicBarrier - var:barrier2=new CyclicBarrier - var:barrier3=new CyclicBarrier - var:exceptions=new ArrayList - var:chatConnection=new ChatConnection - var:chatConnection2=new ChatConnection - var:client2=new Thread + Decl: barrier1=new CyclicBarrier(2) + Decl: barrier2=new CyclicBarrier(2) + Decl: barrier3=new CyclicBarrier(2) + Decl: exceptions=new ArrayList() + Decl: chatConnection=new ChatConnection() + Decl: chatConnection2=new ChatConnection() + Decl: client2=new Thread(new ChatConnection()) client2.start() chatConnection.run() chatConnection2.run() @@ -159,12 +165,12 @@ while (joined NE null Land joined.contains("Welcome " Add s)) false branch : func performTestUseName() throws: Exception - var:barrier1=new CyclicBarrier - var:barrier2=new CyclicBarrier - var:barrier3=new CyclicBarrier - var:exceptions=new ArrayList - var:chatConnection=new ChatConnection - var:client2=new Thread + Decl: barrier1=new CyclicBarrier(2) + Decl: barrier2=new CyclicBarrier(2) + Decl: barrier3=new CyclicBarrier(2) + Decl: exceptions=new ArrayList() + Decl: chatConnection=new ChatConnection() + Decl: client2=new Thread(new ChatConnection()) client2.start() chatConnection.run() cond-branch cond:exceptions.isEmpty() @@ -172,18 +178,19 @@ while (joined NE null Land joined.contains("Welcome " Add s)) exceptions.get(0) false branch : - func readAvailableString() throws: IOException + func readAvailableString(reader) throws: IOException return readAvailableString(reader,false) - func readAvailableString() throws: IOException - var:builder=new StringBuilder - var:bytes + func readAvailableString(reader,now) throws: IOException + Decl: builder=new StringBuilder() + Decl: bytes cond-branch cond:now Land reader.ready() true branch : return null false branch : - do var:buf= - + do Decl: buf= + bytes Assign reader.read(buf) + builder.append(buf,0,bytes) while bytes EQ 256 return builder.toString() LocalClasses: @@ -196,13 +203,21 @@ while bytes EQ 256 func run() throws: Socket socket - new Socket - + new Socket("localhost",listeningPort) + BufferedReader + reader + new BufferedReader(new InputStreamReader(socket.getInputStream())) + Writer + writer + new FlushingWriter(new OutputStreamWriter(socket.getOutputStream())) socket.setTcpNoDelay(true) run(socket,reader,writer) + Exception + e + exception Assign e - func run() throws: Exception + func run(socket,reader,writer) throws: Exception LocalClasses: LocalInterfaces: class FlushingWriter @@ -210,18 +225,17 @@ while bytes EQ 256 delegate Instance Initializer: Constructors: - constructor FlushingWriter() throws: + constructor FlushingWriter(delegate) throws: this.delegate Assign delegate Methods: - func write() throws: IOException + func write(cbuf,off,len) throws: IOException delegate.write(cbuf,off,len) func flush() throws: IOException delegate.flush() func close() throws: IOException delegate.close() - func write() throws: IOException - write - str + func write(str) throws: IOException + super.write(str) flush() LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/others/ChatTest.java b/src/MapleFE/test/java/others/ChatTest.java similarity index 100% rename from src/MapleFE/test/others/ChatTest.java rename to src/MapleFE/test/java/others/ChatTest.java diff --git a/src/MapleFE/test/others/ChatTest.java.result b/src/MapleFE/test/java/others/ChatTest.java.result similarity index 55% rename from src/MapleFE/test/others/ChatTest.java.result rename to src/MapleFE/test/java/others/ChatTest.java.result index c1c0ddabe2350f16c28c717327eb8783de0da2d9..d1ced6a9bec630e4c5b309f54e27d0f8e28e9d8b 100644 --- a/src/MapleFE/test/others/ChatTest.java.result +++ b/src/MapleFE/test/java/others/ChatTest.java.result @@ -7,7 +7,7 @@ class ChatTest Instance Initializer: Constructors: Methods: - func main() throws: Throwable + func main(args) throws: Throwable testStartStop() testPortOpen() testAsksForName() @@ -16,57 +16,63 @@ class ChatTest testUsernameAndMessage() testDontReceiveMessageInNameState() func startServer() throws: IOException - var:server=new ChatServer - var:address=(InetSocketAddress)server.getSocketAddress() + Decl: server=new ChatServer(0) + Decl: address=(InetSocketAddress)server.getSocketAddress() listeningPort Assign address.getPort() server.run() return server func testStartStop() throws: Exception - var:server=startServer() + Decl: server=startServer() server.shutdown() func testPortOpen() throws: Exception - var:server=startServer() - var:socket=new Socket + Decl: server=startServer() + Decl: socket=new Socket("localhost",listeningPort) cond-branch cond:socket.isConnected() true branch : - new RuntimeException + new RuntimeException("Failed to connect to server: port not open") false branch : server.shutdown() func testAsksForName() throws: Exception - var:server=startServer() - var:socket=new Socket + Decl: server=startServer() + Decl: socket=new Socket("localhost",listeningPort) + Decl: reader=new BufferedReader(new InputStreamReader(socket.getInputStream())) + Decl: string=readAvailableString(reader) + cond-branch cond:string.equals("Name: ") + true branch : + new RuntimeException("Server doesn't send Name: ") + false branch : server.shutdown() func testUseName() throws: Throwable - var:server=startServer() + Decl: server=startServer() performTestUseName() server.shutdown() func testConnectDisconnectConnect() throws: Exception - var:server=startServer() + Decl: server=startServer() performTestConnectDisconnectConnect() server.shutdown() func testUsernameAndMessage() throws: Exception - var:server=startServer() + Decl: server=startServer() performTestUsernameAndMessage() server.shutdown() func testDontReceiveMessageInNameState() throws: Exception - var:server=startServer() + Decl: server=startServer() performDontReceiveMessageInNameState() server.shutdown() - func assertEqual() throws: + func assertEqual(exception,value,expected) throws: cond-branch cond:expected EQ value true branch : return @@ -74,23 +80,23 @@ class ChatTest cond-branch cond:expected EQ null true branch : - exception.add(new RuntimeException) + exception.add(new RuntimeException("Expected null, but was: " Add value)) return false branch : cond-branch cond:expected.equals(value) true branch : - exception.add(new RuntimeException) + exception.add(new RuntimeException("Expected: " Add expected Add " but was: " Add value)) return false branch : func performDontReceiveMessageInNameState() throws: Exception - var:barrier1=new CyclicBarrier - var:barrier2=new CyclicBarrier - var:barrier3=new CyclicBarrier - var:exceptions=Collections.synchronizedList(new ArrayList) - var:chatConnection=new ChatConnection - var:client2=new Thread + Decl: barrier1=new CyclicBarrier(2) + Decl: barrier2=new CyclicBarrier(2) + Decl: barrier3=new CyclicBarrier(2) + Decl: exceptions=Collections.synchronizedList(new ArrayList()) + Decl: chatConnection=new ChatConnection() + Decl: client2=new Thread(new ChatConnection()) client2.start() chatConnection.run() cond-branch cond:exceptions.isEmpty() @@ -98,17 +104,17 @@ class ChatTest exceptions.get(0) false branch : - func waitForJoin() throws: IOException - var:joined + func waitForJoin(reader,s) throws: IOException + Decl: joined do joined Assign readAvailableString(reader) while (joined NE null Land joined.contains("Welcome " Add s)) func performTestUsernameAndMessage() throws: Exception - var:barrier1=new CyclicBarrier - var:barrier2=new CyclicBarrier - var:barrier3=new CyclicBarrier - var:exceptions=Collections.synchronizedList(new ArrayList) - var:chatConnection=new ChatConnection - var:client2=new Thread + Decl: barrier1=new CyclicBarrier(2) + Decl: barrier2=new CyclicBarrier(2) + Decl: barrier3=new CyclicBarrier(2) + Decl: exceptions=Collections.synchronizedList(new ArrayList()) + Decl: chatConnection=new ChatConnection() + Decl: client2=new Thread(new ChatConnection()) client2.start() chatConnection.run() cond-branch cond:exceptions.isEmpty() diff --git a/src/MapleFE/test/java2mpl/generate-result.sh b/src/MapleFE/test/java/others/generate-result.sh similarity index 92% rename from src/MapleFE/test/java2mpl/generate-result.sh rename to src/MapleFE/test/java/others/generate-result.sh index bbd3acd878bd41d3a31fc5123972e954d7c9fcd0..08b3acc3b58dc1585fb15888064b004629e72007 100755 --- a/src/MapleFE/test/java2mpl/generate-result.sh +++ b/src/MapleFE/test/java/others/generate-result.sh @@ -17,5 +17,5 @@ FILES=$(pwd)/*.java for f in $FILES do echo "Generating result for $f ..." - ../../output/java/java2mpl $f > $f.result + ../../../output/java/java/java2mpl $f > $f.result done diff --git a/src/MapleFE/test/others/plotter.java b/src/MapleFE/test/java/others/plotter.java similarity index 100% rename from src/MapleFE/test/others/plotter.java rename to src/MapleFE/test/java/others/plotter.java diff --git a/src/MapleFE/test/others/plotter.java.result b/src/MapleFE/test/java/others/plotter.java.result similarity index 67% rename from src/MapleFE/test/others/plotter.java.result rename to src/MapleFE/test/java/others/plotter.java.result index 78be976165d420319bc69a4617fcea334bcd11cf..c266a3188f1edb757c066e00dfa72b56d962494a 100644 --- a/src/MapleFE/test/others/plotter.java.result +++ b/src/MapleFE/test/java/others/plotter.java.result @@ -37,15 +37,23 @@ class Plotter Instance Initializer: Constructors: Methods: - func main() throws: IOException - var:path=Paths.get("footprint.csv") + func main(args) throws: IOException + Decl: path=Paths.get("footprint.csv") BufferedWriter stream Files.newBufferedWriter(path,StandardOpenOption.CREATE) - stream.write("bits,bool,bitset -") + stream.write("bits,bool,bitset\n") for ( ) System.out.println("Number of bits => " Add i) + Decl: ba= + Decl: bitSet=new BitSet(i) + Decl: baSize=ClassLayout.parseInstance(ba).instanceSize() + Decl: bitSetSize=GraphLayout.parseInstance(bitSet).totalSize() + stream.write((i Add "," Add baSize Add "," Add bitSetSize Add "\n")) + cond-branch cond:i Mod 10000 EQ 0 + true branch : + stream.flush() + false branch : diff --git a/src/MapleFE/test/others/receiver.java b/src/MapleFE/test/java/others/receiver.java similarity index 84% rename from src/MapleFE/test/others/receiver.java rename to src/MapleFE/test/java/others/receiver.java index 7bd8f8a09b64773ccf5d519d6532b88a320d74ab..280035847267b28da7e4d6ebe8c84d629ff79504 100644 --- a/src/MapleFE/test/others/receiver.java +++ b/src/MapleFE/test/java/others/receiver.java @@ -19,11 +19,13 @@ class Outer { Outer() {} void m(Outer this) {} class Inner { - Inner(Outer Outer.this) {} + //Inner(Outer Outer.this) {} + Inner(Outer Outer) {} void m(Inner this) {} class B { - B(Outer.Inner Inner.this) {} - void m(Outer.Inner.B this) {} + //B(Outer.Inner Inner.this) {} + B(Inner Inner) {} + void m(Outer this) {} } } } diff --git a/src/MapleFE/test/others/receiver.java.result b/src/MapleFE/test/java/others/receiver.java.result similarity index 71% rename from src/MapleFE/test/others/receiver.java.result rename to src/MapleFE/test/java/others/receiver.java.result index e3f63c08a412c4f8164eeb5573606acca0063cce..adb36777b5f0d0f38dddcd088e3376e437b01c49 100644 --- a/src/MapleFE/test/others/receiver.java.result +++ b/src/MapleFE/test/java/others/receiver.java.result @@ -1,4 +1,4 @@ -Matched 65 tokens. +Matched 55 tokens. ============= Module =========== == Sub Tree == class Outer @@ -8,25 +8,25 @@ class Outer Constructors: constructor Outer() throws: Methods: - func m() throws: + func m(Outer) throws: LocalClasses: class Inner Fields: Instance Initializer: Constructors: - constructor Inner() throws: + constructor Inner(Outer) throws: Methods: - func m() throws: + func m(Inner) throws: LocalClasses: class B Fields: Instance Initializer: Constructors: - constructor B() throws: + constructor B(Inner) throws: Methods: - func m() throws: + func m(Outer) throws: LocalClasses: LocalInterfaces: LocalInterfaces: diff --git a/src/MapleFE/test/others/sizing.java b/src/MapleFE/test/java/others/sizing.java similarity index 100% rename from src/MapleFE/test/others/sizing.java rename to src/MapleFE/test/java/others/sizing.java diff --git a/src/MapleFE/test/others/sizing.java.result b/src/MapleFE/test/java/others/sizing.java.result similarity index 66% rename from src/MapleFE/test/others/sizing.java.result rename to src/MapleFE/test/java/others/sizing.java.result index 5a9bf938c5873e9237d34bb5e7a59e0ba0d8121f..950e89f28d0fc42010b2bf592287eb68dab6f054 100644 --- a/src/MapleFE/test/others/sizing.java.result +++ b/src/MapleFE/test/java/others/sizing.java.result @@ -19,11 +19,11 @@ class Sizing Instance Initializer: Constructors: Methods: - func main() throws: - var:ba= - System.out.println(ClassLayout.parseInstance(ba),toPrintable) - var:bitSet=new BitSet - System.out.println(GraphLayout.parseInstance(bitSet),toPrintable) + func main(args) throws: + Decl: ba= + System.out.println(ClassLayout.parseInstance(ba).toPrintable()) + Decl: bitSet=new BitSet(10000) + System.out.println(GraphLayout.parseInstance(bitSet).toPrintable()) LocalClasses: LocalInterfaces: diff --git a/src/MapleFE/test/java/syntaxonly/generate-result.sh b/src/MapleFE/test/java/syntaxonly/generate-result.sh new file mode 100755 index 0000000000000000000000000000000000000000..08b3acc3b58dc1585fb15888064b004629e72007 --- /dev/null +++ b/src/MapleFE/test/java/syntaxonly/generate-result.sh @@ -0,0 +1,21 @@ +# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +#!/bin/bash +FILES=$(pwd)/*.java +for f in $FILES +do + echo "Generating result for $f ..." + ../../../output/java/java/java2mpl $f > $f.result +done diff --git a/src/MapleFE/test/syntaxonly/var-scope-1.java b/src/MapleFE/test/java/syntaxonly/var-scope-1.java similarity index 100% rename from src/MapleFE/test/syntaxonly/var-scope-1.java rename to src/MapleFE/test/java/syntaxonly/var-scope-1.java diff --git a/src/MapleFE/test/syntaxonly/var-scope-1.java.result b/src/MapleFE/test/java/syntaxonly/var-scope-1.java.result similarity index 55% rename from src/MapleFE/test/syntaxonly/var-scope-1.java.result rename to src/MapleFE/test/java/syntaxonly/var-scope-1.java.result index e2618617a5bfc8b1f83178266e81bd1a8bd3a168..5ca73a8308698c869f6edb6f3cd94e27b28a511b 100644 --- a/src/MapleFE/test/syntaxonly/var-scope-1.java.result +++ b/src/MapleFE/test/java/syntaxonly/var-scope-1.java.result @@ -7,12 +7,20 @@ class A Instance Initializer: Constructors: Methods: - func foo() throws: + func foo(x) throws: a Assign x cond-branch cond:a EQ 2 true branch : - var:a=2 + Decl: a=2 + x Assign a Add this.a Add 2 + a Assign x Add 2 + cond-branch cond:a EQ 4 + true branch : + Decl: a=4 + x Assign a Add this.a Add 4 + false branch : + x Assign a Add 5 false branch : a Assign x Add this.a Add a diff --git a/src/MapleFE/test/java2mpl_runtests.pl b/src/MapleFE/test/java2mpl_runtests.pl deleted file mode 100755 index 6a7b7af8b263d6ebd8d8df21b8bc0d015b1faa0e..0000000000000000000000000000000000000000 --- a/src/MapleFE/test/java2mpl_runtests.pl +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/perl -w -use Cwd; -use warnings; - -my $pwd = getcwd; - -#print "here1 current pwd $pwd\n"; - -if ((!(defined $ARGV[0])) || ($ARGV[0] ne "java2mpl")) { - print "------------------------------------------------\n"; - print "usage: java2mpl_runtests.pl java2mpl\n"; - print "------------------------------------------------\n"; - exit; -} - -$dirname = "./$ARGV[0]"; - -my $outdir = "$pwd/java2mpl_output"; -my $diffdir = "$pwd/java2mpl_diff"; -my $notexistsdir = "$pwd/java2mpl_notexists"; -if(!(-e "$outdir")) { - system("mkdir -p $outdir"); -} - -my @failed_java2mpl_file; -my @successed_file; - -my $count = 0; -my $countJAVA2MPL = 0; - - -chdir $dirname; -$dirname = "./"; -opendir (DIR, $dirname ) || die "Error in opening dir $dirname\n"; - -#print "here2 dirname $dirname\n"; - -print("\n====================== run tests: $ARGV[0] =====================\n"); - -while( ($srcdir = readdir(DIR))){ - if(-d $srcdir and $srcdir ne ".." and $srcdir ne "output" and $srcdir ne "temp") { - my $predir = getcwd; - chdir $srcdir; - my @javafiles; - @javafiles = <*.java>; - -#print "here3 source directory $srcdir\n"; -#print "here4 predir $predir\n"; - - my @allfiles = (@javafiles); - foreach $fullname (@allfiles) { - $count ++; - (my $file = $fullname) =~ s/\.[^.]+$//; - if(defined $ARGV[1]) { - print "\n$file"; - } else { - print "."; - } - if ($count % 50 == 0) { - print " $count\n"; - } - my $flag = 0; - my $src_file = $fullname; - my $java2mpl_oresult_file = $file.'.java.result'; - my $java2mpl_log_file = $file.'.java2mpl.log'; - my $java2mpl_result_file = $file.'.java.java2mpl.result'; - my $java2mpl_err_file = $file.'.err'; - my $java2mpl_diff_file = $file.'.java.diff'; - - if(!(-e "$outdir")) { - system("mkdir -p $outdir"); - } - -#print "here5 outdir $outdir\n"; -#print "here6 src_file $src_file\n"; - - system("cp $src_file $outdir/$src_file"); - $res = system("cd $pwd/..; $BUILDDIR/java/java2mpl $outdir/$src_file > $outdir/$java2mpl_result_file"); - if ($res > 0) { - print "\ngdb --args $BUILDDIR/java/java2mpl $outdir/$src_file\n"; - print " ==java2mpl===> $file\n\n"; - $countJAVA2MPL ++; - push(@failed_java2mpl_file, $file); - $flag ++; - next; - } - -#print "here7 diff src_file $pwd/java2mpl/$java2mpl_oresult_file\n"; -#print "here8 diff java2mpl_result_file $java2mpl_result_file\n"; - - if (!(-e $java2mpl_oresult_file)) { - if(!(-e "$notexistsdir")) { - system("mkdir -p $notexistsdir"); - } - -#print "here9 $java2mpl_oresult_file NOT exists\n"; - print "Original file $java2mpl_oresult_file does NOT exists!\n"; - system("touch $notexistsdir/$java2mpl_oresult_file"); - $countJAVA2MPL ++; - push(@failed_java2mpl_file, $file); - } - else { -#print "here10 java2mpl_result_file $outdir/$java2mpl_result_file\n"; - if ((!(-e "$outdir/$java2mpl_result_file")) || (-z "$outdir/$java2mpl_result_file")) { - if(!(-e "$notexistsdir")) { - system("mkdir -p $notexistsdir"); - } - -#print "here11 $outdir/$java2mpl_result_file NOT exists\n"; - print "$java2mpl_result_file either empty or not exists!\n"; - system("touch $notexistsdir/$java2mpl_result_file"); - $countJAVA2MPL ++; - push(@failed_java2mpl_file, $file); - } else { - $res2 = system("diff $pwd/java2mpl/$java2mpl_oresult_file $outdir/$java2mpl_result_file"); - if ($res2 > 0) { - if(!(-e "$diffdir")) { - system("mkdir -p $diffdir"); - } - -#print "here12 $java2mpl_diff_file Different!!!\n"; - print "$java2mpl_oresult_file $java2mpl_result_file are different!!!\n"; - system("touch $diffdir/$java2mpl_diff_file"); - $countJAVA2MPL ++; - push(@failed_java2mpl_file, $file); - } else { - push(@successed_file, $file); - } - - } - } - -# if($flag eq 0){ -# push(@successed_file, $file); -# } - next; - - if ($flag eq -1) { - push(@successed_file, $file); - system("rm -f $outdir/$src_file"); - system("rm -f $outdir/$java2mpl_log_file"); - system("rm -f $outdir/$java2mpl_oresult_file"); - system("rm -f $outdir/$java2mpl_result_file"); - system("rm -f $outdir/$java2mpl_err_file"); - system("rm -f $diffdir/$java2mpl_diff_file"); - system("rm -f $notexists/$java2mpl_oresult_file"); - system("rm -f $notexists/$java2mpl_result_file"); - next; - } - } - chdir $predir; - } -} - -print " $count\n"; -closedir(DIR); -chdir $pwd; - -my $countFailed = $countJAVA2MPL ; -my $countPassed = $count - $countFailed; - -my $reportFile = 'java2mpl_report.txt'; -open(my $fh, '>', $reportFile) or die "Could not open file '$reportFile' $!"; -print $fh "$ARGV[0] report: \n"; - -if ($countFailed eq 0) { - print("\n all $count tests passed\n"); - print("======================================================\n"); - print $fh "all $count tests passed\n"; - close $fh; -} else { - print "\n\n=====Scan Result=====\n\n"; - print "Total Test Cases: $count\n"; - if(scalar(@successed_file) > 0) { - print "\n=========================\npassed $countPassed tests:\n\n"; - foreach $passed (@successed_file) { - print $passed."\n"; - } - print $fh "$countPassed testcases passed\n"; - } - print "\n=========================\nfailed $countFailed tests:\n\n"; - if(scalar(@failed_java2mpl_file) > 0){ - print("=== failed java2mpl: $countJAVA2MPL tests\n"); - print $fh "$countJAVA2MPL testcases failed\n"; - - foreach $failed (@failed_java2mpl_file) { - print $failed."\n"; - } - print "\n"; - } - print "=========================\n"; - close $fh; -} diff --git a/src/MapleFE/test/msts_test.sh b/src/MapleFE/test/msts_test.sh new file mode 100755 index 0000000000000000000000000000000000000000..b502845fdc33c1ba5df9e3d406fbdec49c2b0686 --- /dev/null +++ b/src/MapleFE/test/msts_test.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +[ -n "$MAPLEFE_ROOT" ] || { echo MAPLE_ROOT not set. Please source envsetup.sh.; exit 1; } +cd ${MAPLEFE_ROOT}/test + +if [ ! -d ${MAPLEFE_ROOT}/test/TypeScript ]; then + git clone -b release-1.8 https://github.com/microsoft/TypeScript.git +fi + +if [ -f ${MAPLEFE_ROOT}/test/msts_testlist ]; then + cat msts_testlist | xargs -n1 -I % sh -c '{ rm -f typescript/ms_tests/%; cp -f TypeScript/tests/cases/compiler/% typescript/ms_tests/; cp -f TypeScript/LICENSE.txt typescript/ms_tests/; }' +fi + +# setup files only. do not run test +if [ "x$1" = "xsetup" ]; then + exit 0 +fi + +# export is for sh -c in xargs +export TS2AST=$MAPLEFE_ROOT/output/typescript/bin/ts2ast +export PASS_LIST=msts_passed.txt +FAIL_LIST=msts_failed.txt +MSTEST_DIR=$MAPLEFE_ROOT/test/TypeScript/tests/cases/compiler +N_JOBS=16 + +[ -n "$MAPLE_ROOT" ] || { echo MAPLE_ROOT not set. Please source envsetup.sh.; exit 1; } +if [ ! -d $MSTEST_DIR ]; then + echo "$MSTEST_DIR" does not exist. Please git clone https://github.com/microsoft/TypeScript.git under "$MAPLEFE_ROOT/test" + exit 1 +fi + +cd $MAPLEFE_ROOT/test +rm -f $PASS_LIST +find $MSTEST_DIR -name "*.ts" | xargs -n1 -P$N_JOBS -I % sh -c '{ $TS2AST %; exitcode=$?; if [ $exitcode -eq 0 ]; then basename % >> $PASS_LIST; fi }' +cd $MSTEST_DIR +ls *.ts | grep -v -x -f $MAPLEFE_ROOT/test/$PASS_LIST /dev/stdin > $MAPLEFE_ROOT/test/$FAIL_LIST +cd - +sort -o $PASS_LIST $PASS_LIST +echo +echo "Microsoft Typescript compiler testcases: Passed: " `wc -l < $PASS_LIST` " Failed: " `wc -l < $FAIL_LIST` +echo "List of passed and failed cases are in $PASS_LIST and $FAIL_LIST" +echo +echo "Note: Add testcase name from above lists into mstest_testlist if" +echo " you want it included in make test" +echo +cd - > /dev/null diff --git a/src/MapleFE/test/msts_testlist b/src/MapleFE/test/msts_testlist new file mode 100644 index 0000000000000000000000000000000000000000..5444b5c452e1b8e2c7a778b0cda3c34ff9efeb49 --- /dev/null +++ b/src/MapleFE/test/msts_testlist @@ -0,0 +1,5 @@ +checkInterfaceBases.ts +getsetReturnTypes.ts +reExportGlobalDeclaration3.ts +library_ObjectPrototypeProperties.ts + diff --git a/src/MapleFE/test/new_runtests.pl b/src/MapleFE/test/new_runtests.pl new file mode 100755 index 0000000000000000000000000000000000000000..06479ff92804476597d5731effb295d0039554d3 --- /dev/null +++ b/src/MapleFE/test/new_runtests.pl @@ -0,0 +1,228 @@ +#!/usr/bin/perl + +use Cwd; +use warnings; +use experimental 'smartmatch'; +use File::Find; +use File::Basename; + +use File::Find qw(find); + +if(!(defined $ARGV[0])) { + print "------------------------------------------------\n"; + print "usage: runtests.pl [ java | java/subdirectory | typescript | typescript/subdirectory ]\n"; + print "------------------------------------------------\n"; + exit; +} + +my $lang = $ARGV[0]; +my $pwd = getcwd; +my $currdir = "$pwd"; + +my @failed_file; +my @failed_file1; +my @failed_file2; +my @successed_file; + +my $count = 0; +my $counttotal = 0; +my $countfailedcases = 0; +my $countfailedcases1 = 0; +my $countfailedcases2 = 0; +my $countsub = 0; + +my $pinput = ''; +my $cmnd = ''; +my $outroot = ''; +print "Running $lang\n"; +if ($lang =~ /\Qjava\E/) { + $pinput = "java"; + $cmnd = "../output/java/bin/java2ast"; + $cmnd1 = "../output/java/bin/ast2mpl"; + $flag = ""; + $outroot = "$currdir/../output/$pinput/test"; +} elsif ($lang =~ /\Qtypescript\E/) { + $pinput = "ts"; + $cmnd = "../output/typescript/bin/ts2ast"; + $cmnd1 = "../output/typescript/bin/ast2cpp"; + $flag = "--no-imported"; + $outroot = "$currdir/../output/typescript/test"; +} else { + print "$lang is an invalid option\n"; + exit; +} + +my $testdir = $lang; +# get specified directory to test if other than $lang +if(defined $ARGV[1]) { + $testdir = $ARGV[1]; +} + +system("rm -Rf $outroot/report.txt $outroot/diff $outroot/notexists"); + +if(!(-e "$outroot")) { + system("mkdir -p $outroot"); +} + +opendir (my $DIR, $testdir) || die "Error in opening $testdir directory\n"; + +sub listdirs { + my @dirs = @_; + my @files; + + if ( $pinput ~~ [qw( java ts )] ) { + find({ wanted => sub { push @files, glob "\"$_/{*.$pinput,*.$pinput.result}\"" } , no_chdir => 1 }, @dirs); + } else { + find({ wanted => sub { push @files, $_ } , no_chdir => 1 }, @dirs); + } + + return @files; +} + +my @paths = listdirs($testdir) ; + +foreach my $file (@paths) { + my ($filename) = ( $file =~ /([^\\\/]+)$/s ) ; + my ($pathname) = dirname($file); + + if ( $pinput ~~ [qw( java ts )] ) { + system("rm -rf $outroot/$file; cp -rp --parents $file $outroot/"); + + if ( ($filename =~ (/(.+)[.]java$/)) || ($filename =~ (/(.+)[.]ts$/)) ) { + + my $origresult = "$pwd/$file.result"; + my $outresult = $file.'.result.'.$pinput; + my $diff_file = $file.'.result.diff'; + my $notexistsdir = "$outroot/notexists"; + my $diffdir = "$outroot/diff"; + + $count ++; + print "."; + if ($count % 50 == 0) { + print " $count\n"; + } + + #my $res = system("$pwd/$cmnd $outroot/$file > $outroot/$outresult"); + #my $res = system('$pwd/$cmnd $outroot/$file; $pwd/$cmnd1 $outroot/$file.ast > $outroot/$outresult'); + my $res = system("$pwd/$cmnd $outroot/$file > $outroot/$outresult"); + #print "$pwd/$cmnd $outroot/$file > $outroot/$outresult"; + + if ($res > 0) { + print " ==$pinput===> $file\n"; + print "$pwd/$cmnd $outroot/$file\n"; + $countfailedcases ++; + push(@failed_file, $pinput.": ".$file); + #print "---------------------------\n"; + next; + } else { + my $res1 = system("$cmnd1 $outroot/$file.ast $flag > $outroot/$outresult.1"); + #print "$cmnd1 $outroot/$file.ast $flag > $outroot/$outresult.1"; + + if ($res1 > 0) { + print " ==$pinput===> $file\n"; + print "$cmnd1 $outroot/$file.ast\n"; + $countfailedcases1 ++; + push(@failed_file1, $pinput.": ".$file); + #print "---------------------------\n"; + next; + } + } + + if (!(-e $origresult) ) { + if(!(-e "$notexistsdir")) { + system("mkdir -p $notexistsdir"); + } + print "\nOriginal file $origresult does NOT exists!\n"; + system("mkdir -p $notexistsdir/$file && touch $notexistsdir/$file"); + $countfailedcases2 ++; + push(@failed_file2, $pinput.": result file not exists: ".$origresult); + } else { + if ((!(-e "$outroot/$outresult")) || (-z "$outroot/$outresult")) { + if(!(-e "$notexistsdir")) { + system("mkdir -p $notexistsdir"); + } + + print "\n$outroot/$outresult either empty or not exists!\n"; + system("mkdir -p $notexistsdir/$file && touch $notexistsdir/$file"); + $countfailedcases2 ++; + push(@failed_file2, $pinput.": file empty or not exists: ".$file); + } else { + my $res2 = system("diff $origresult $outroot/$outresult"); + if ($res2 > 0) { + if(!(-e "$diffdir")) { + system("mkdir -p $diffdir"); + } + + print "\n$origresult $outroot/$outresult are different!!!\n"; + print "\ncp $outroot/$outresult $origresult\n"; + system("mkdir -p $diffdir/$diff_file && touch $diffdir/$diff_file"); + $countfailedcases2 ++; + push(@failed_file2, $pinput.": result files diff: ".$origresult); + } else { + push(@successed_file, $file." ".$pinput); + } + } + } + } # if #2 + + } # if #1 + +} + +my $countFailed = $countfailedcases + $countfailedcases1 + $countfailedcases2; +my $countPassed = $count - $countFailed; + +my $reportFile = "$outroot/report.txt"; +open(my $fh, '>', $reportFile) or die "Could not open file '$reportFile' $!"; + +if ($countFailed eq 0) { + print "\n\n=====Scan Result=====\n\n"; + print("\n all $count tests passed\n"); + print("======================================================\n"); + print $fh "all $count tests passed\n"; + close $fh; +} else { + print "\n\n=====Scan Result=====\n\n"; + print "Total Test Cases: $count\n"; + if(scalar(@successed_file) > 0) { + print "\n=========================\npassed $countPassed tests:\n\n"; + #foreach $passed (@successed_file) { + # print $passed."\n"; + #} + #print $fh "$countPassed testcases passed\n"; + } + print "\n=========================\nfailed $countFailed tests:\n\n"; + if(scalar(@failed_file) > 0){ + print("=== failed : $countfailedcases tests - $cmnd\n"); + print $fh "$countfailedcases testcases failed\n"; + + foreach $failed (@failed_file) { + print $failed."\n"; + print $fh $failed."\n"; + } + print "\n"; + } + if(scalar(@failed_file1) > 0){ + print("=== failed : $countfailedcases1 tests - $cmnd1\n"); + print $fh "$countfailedcases1 testcases failed\n"; + + foreach $failed (@failed_file1) { + print $failed."\n"; + print $fh $failed."\n"; + } + print "\n"; + } + if(scalar(@failed_file2) > 0){ + print("=== failed : $countfailedcases2 tests - result\n"); + print $fh "$countfailedcases2 testcases failed\n"; + + foreach $failed (@failed_file2) { + print $failed."\n"; + print $fh $failed."\n"; + } + print "\n"; + } + print "=========================\n"; + close $fh; +} + diff --git a/src/MapleFE/test/openjdk/AbstractStringBuilder-simplified.java.result b/src/MapleFE/test/openjdk/AbstractStringBuilder-simplified.java.result deleted file mode 100644 index e91770236ddee7cd6da5c826ea9abf44bc7271ac..0000000000000000000000000000000000000000 --- a/src/MapleFE/test/openjdk/AbstractStringBuilder-simplified.java.result +++ /dev/null @@ -1,43 +0,0 @@ -Matched 329 tokens. -============= Module =========== -== Sub Tree == -class AbstractStringBuilder - Fields: - value count MAX_ARRAY_SIZE=Integer.MAX_VALUE Sub 8 - Instance Initializer: - Constructors: - constructor AbstractStringBuilder() throws: - constructor AbstractStringBuilder() throws: - Methods: - func length() throws: - func capacity() throws: - func ensureCapacity() throws: - func ensureCapacityInternal() throws: - func newCapacity() throws: - func hugeCapacity() throws: - func trimToSize() throws: - func setLength() throws: - func charAt() throws: - func codePointAt() throws: - func codePointBefore() throws: - func codePointCount() throws: - func offsetByCodePoints() throws: - func getChars() throws: - func setCharAt() throws: - func append() throws: - func append() throws: - func append() throws: - func append() throws: - func append() throws: - func appendNull() throws: - func append() throws: - func append() throws: - func append() throws: - func append() throws: - func append() throws: - func append() throws: - func append() throws: - func append() throws: - LocalClasses: - LocalInterfaces: - diff --git a/src/MapleFE/test/openjdk/Character-1.java.result b/src/MapleFE/test/openjdk/Character-1.java.result deleted file mode 100644 index f13200748e4839d4e7b77a073301d734d18c6f81..0000000000000000000000000000000000000000 --- a/src/MapleFE/test/openjdk/Character-1.java.result +++ /dev/null @@ -1,95 +0,0 @@ -Matched 5 tokens. -Matched 14 tokens. -Matched 21 tokens. -Matched 28 tokens. -Matched 35 tokens. -Matched 42 tokens. -Matched 5259 tokens. -============= Module =========== -== Sub Tree == -package java.lang -== Sub Tree == -import dalvik.annotation.optimization.FastNative -== Sub Tree == -import java.util.Arrays -== Sub Tree == -import java.util.HashMap -== Sub Tree == -import java.util.Locale -== Sub Tree == -import java.util.Map -== Sub Tree == -class Character - Fields: - MIN_RADIX=2 MAX_RADIX=36 MIN_VALUE=0 MAX_VALUE=65535 TYPE=() UNASSIGNED=0 UPPERCASE_LETTER=1 LOWERCASE_LETTER=2 TITLECASE_LETTER=3 MODIFIER_LETTER=4 OTHER_LETTER=5 NON_SPACING_MARK=6 ENCLOSING_MARK=7 COMBINING_SPACING_MARK=8 DECIMAL_DIGIT_NUMBER=9 LETTER_NUMBER=10 OTHER_NUMBER=11 SPACE_SEPARATOR=12 LINE_SEPARATOR=13 PARAGRAPH_SEPARATOR=14 CONTROL=15 FORMAT=16 PRIVATE_USE=18 SURROGATE=19 DASH_PUNCTUATION=20 START_PUNCTUATION=21 END_PUNCTUATION=22 CONNECTOR_PUNCTUATION=23 OTHER_PUNCTUATION=24 MATH_SYMBOL=25 CURRENCY_SYMBOL=26 MODIFIER_SYMBOL=27 OTHER_SYMBOL=28 INITIAL_QUOTE_PUNCTUATION=29 FINAL_QUOTE_PUNCTUATION=30 ERROR=-1 DIRECTIONALITY_UNDEFINED=Sub - 1 DIRECTIONALITY_LEFT_TO_RIGHT=0 DIRECTIONALITY_RIGHT_TO_LEFT=1 DIRECTIONALITY_RIGHT_TO_LEFT_ARABIC=2 DIRECTIONALITY_EUROPEAN_NUMBER=3 DIRECTIONALITY_EUROPEAN_NUMBER_SEPARATOR=4 DIRECTIONALITY_EUROPEAN_NUMBER_TERMINATOR=5 DIRECTIONALITY_ARABIC_NUMBER=6 DIRECTIONALITY_COMMON_NUMBER_SEPARATOR=7 DIRECTIONALITY_NONSPACING_MARK=8 DIRECTIONALITY_BOUNDARY_NEUTRAL=9 DIRECTIONALITY_PARAGRAPH_SEPARATOR=10 DIRECTIONALITY_SEGMENT_SEPARATOR=11 DIRECTIONALITY_WHITESPACE=12 DIRECTIONALITY_OTHER_NEUTRALS=13 DIRECTIONALITY_LEFT_TO_RIGHT_EMBEDDING=14 DIRECTIONALITY_LEFT_TO_RIGHT_OVERRIDE=15 DIRECTIONALITY_RIGHT_TO_LEFT_EMBEDDING=16 DIRECTIONALITY_RIGHT_TO_LEFT_OVERRIDE=17 DIRECTIONALITY_POP_DIRECTIONAL_FORMAT=18 MIN_HIGH_SURROGATE=55296 MAX_HIGH_SURROGATE=56319 MIN_LOW_SURROGATE=56320 MAX_LOW_SURROGATE=57343 MIN_SURROGATE=MIN_HIGH_SURROGATE MAX_SURROGATE=MAX_LOW_SURROGATE MIN_SUPPLEMENTARY_CODE_POINT=65536 MIN_CODE_POINT=0 MAX_CODE_POINT=1114111 DIRECTIONALITY= - Instance Initializer: - Constructors: - Methods: - LocalClasses: - class Subset - Fields: - name - Instance Initializer: - Constructors: - constructor Subset() throws: - cond-branch cond:name EQ null - true branch : - new NullPointerException - false branch : - - this.name Assign name - Methods: - func equals() throws: - return (this EQ obj) - func hashCode() throws: - return hashCode - func toString() throws: - return name - LocalClasses: - LocalInterfaces: - class UnicodeBlock - Fields: - map=new HashMap BASIC_LATIN=new UnicodeBlock LATIN_1_SUPPLEMENT=new UnicodeBlock LATIN_EXTENDED_A=new UnicodeBlock LATIN_EXTENDED_B=new UnicodeBlock IPA_EXTENSIONS=new UnicodeBlock SPACING_MODIFIER_LETTERS=new UnicodeBlock COMBINING_DIACRITICAL_MARKS=new UnicodeBlock GREEK=new UnicodeBlock CYRILLIC=new UnicodeBlock ARMENIAN=new UnicodeBlock HEBREW=new UnicodeBlock ARABIC=new UnicodeBlock DEVANAGARI=new UnicodeBlock BENGALI=new UnicodeBlock GURMUKHI=new UnicodeBlock GUJARATI=new UnicodeBlock ORIYA=new UnicodeBlock TAMIL=new UnicodeBlock TELUGU=new UnicodeBlock KANNADA=new UnicodeBlock MALAYALAM=new UnicodeBlock THAI=new UnicodeBlock LAO=new UnicodeBlock TIBETAN=new UnicodeBlock GEORGIAN=new UnicodeBlock HANGUL_JAMO=new UnicodeBlock LATIN_EXTENDED_ADDITIONAL=new UnicodeBlock GREEK_EXTENDED=new UnicodeBlock GENERAL_PUNCTUATION=new UnicodeBlock SUPERSCRIPTS_AND_SUBSCRIPTS=new UnicodeBlock CURRENCY_SYMBOLS=new UnicodeBlock COMBINING_MARKS_FOR_SYMBOLS=new UnicodeBlock LETTERLIKE_SYMBOLS=new UnicodeBlock NUMBER_FORMS=new UnicodeBlock ARROWS=new UnicodeBlock MATHEMATICAL_OPERATORS=new UnicodeBlock MISCELLANEOUS_TECHNICAL=new UnicodeBlock CONTROL_PICTURES=new UnicodeBlock OPTICAL_CHARACTER_RECOGNITION=new UnicodeBlock ENCLOSED_ALPHANUMERICS=new UnicodeBlock BOX_DRAWING=new UnicodeBlock BLOCK_ELEMENTS=new UnicodeBlock GEOMETRIC_SHAPES=new UnicodeBlock MISCELLANEOUS_SYMBOLS=new UnicodeBlock DINGBATS=new UnicodeBlock CJK_SYMBOLS_AND_PUNCTUATION=new UnicodeBlock HIRAGANA=new UnicodeBlock KATAKANA=new UnicodeBlock BOPOMOFO=new UnicodeBlock HANGUL_COMPATIBILITY_JAMO=new UnicodeBlock KANBUN=new UnicodeBlock ENCLOSED_CJK_LETTERS_AND_MONTHS=new UnicodeBlock CJK_COMPATIBILITY=new UnicodeBlock CJK_UNIFIED_IDEOGRAPHS=new UnicodeBlock HANGUL_SYLLABLES=new UnicodeBlock PRIVATE_USE_AREA=new UnicodeBlock CJK_COMPATIBILITY_IDEOGRAPHS=new UnicodeBlock ALPHABETIC_PRESENTATION_FORMS=new UnicodeBlock ARABIC_PRESENTATION_FORMS_A=new UnicodeBlock COMBINING_HALF_MARKS=new UnicodeBlock CJK_COMPATIBILITY_FORMS=new UnicodeBlock SMALL_FORM_VARIANTS=new UnicodeBlock ARABIC_PRESENTATION_FORMS_B=new UnicodeBlock HALFWIDTH_AND_FULLWIDTH_FORMS=new UnicodeBlock SPECIALS=new UnicodeBlock SURROGATES_AREA=new UnicodeBlock SYRIAC=new UnicodeBlock THAANA=new UnicodeBlock SINHALA=new UnicodeBlock MYANMAR=new UnicodeBlock ETHIOPIC=new UnicodeBlock CHEROKEE=new UnicodeBlock UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS=new UnicodeBlock OGHAM=new UnicodeBlock RUNIC=new UnicodeBlock KHMER=new UnicodeBlock MONGOLIAN=new UnicodeBlock BRAILLE_PATTERNS=new UnicodeBlock CJK_RADICALS_SUPPLEMENT=new UnicodeBlock KANGXI_RADICALS=new UnicodeBlock IDEOGRAPHIC_DESCRIPTION_CHARACTERS=new UnicodeBlock BOPOMOFO_EXTENDED=new UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_A=new UnicodeBlock YI_SYLLABLES=new UnicodeBlock YI_RADICALS=new UnicodeBlock CYRILLIC_SUPPLEMENTARY=new UnicodeBlock TAGALOG=new UnicodeBlock HANUNOO=new UnicodeBlock BUHID=new UnicodeBlock TAGBANWA=new UnicodeBlock LIMBU=new UnicodeBlock TAI_LE=new UnicodeBlock KHMER_SYMBOLS=new UnicodeBlock PHONETIC_EXTENSIONS=new UnicodeBlock MISCELLANEOUS_MATHEMATICAL_SYMBOLS_A=new UnicodeBlock SUPPLEMENTAL_ARROWS_A=new UnicodeBlock SUPPLEMENTAL_ARROWS_B=new UnicodeBlock MISCELLANEOUS_MATHEMATICAL_SYMBOLS_B=new UnicodeBlock SUPPLEMENTAL_MATHEMATICAL_OPERATORS=new UnicodeBlock MISCELLANEOUS_SYMBOLS_AND_ARROWS=new UnicodeBlock KATAKANA_PHONETIC_EXTENSIONS=new UnicodeBlock YIJING_HEXAGRAM_SYMBOLS=new UnicodeBlock VARIATION_SELECTORS=new UnicodeBlock LINEAR_B_SYLLABARY=new UnicodeBlock LINEAR_B_IDEOGRAMS=new UnicodeBlock AEGEAN_NUMBERS=new UnicodeBlock OLD_ITALIC=new UnicodeBlock GOTHIC=new UnicodeBlock UGARITIC=new UnicodeBlock DESERET=new UnicodeBlock SHAVIAN=new UnicodeBlock OSMANYA=new UnicodeBlock CYPRIOT_SYLLABARY=new UnicodeBlock BYZANTINE_MUSICAL_SYMBOLS=new UnicodeBlock MUSICAL_SYMBOLS=new UnicodeBlock TAI_XUAN_JING_SYMBOLS=new UnicodeBlock MATHEMATICAL_ALPHANUMERIC_SYMBOLS=new UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_B=new UnicodeBlock CJK_COMPATIBILITY_IDEOGRAPHS_SUPPLEMENT=new UnicodeBlock TAGS=new UnicodeBlock VARIATION_SELECTORS_SUPPLEMENT=new UnicodeBlock SUPPLEMENTARY_PRIVATE_USE_AREA_A=new UnicodeBlock SUPPLEMENTARY_PRIVATE_USE_AREA_B=new UnicodeBlock HIGH_SURROGATES=new UnicodeBlock HIGH_PRIVATE_USE_SURROGATES=new UnicodeBlock LOW_SURROGATES=new UnicodeBlock ARABIC_SUPPLEMENT=new UnicodeBlock NKO=new UnicodeBlock SAMARITAN=new UnicodeBlock MANDAIC=new UnicodeBlock ETHIOPIC_SUPPLEMENT=new UnicodeBlock UNIFIED_CANADIAN_ABORIGINAL_SYLLABICS_EXTENDED=new UnicodeBlock NEW_TAI_LUE=new UnicodeBlock BUGINESE=new UnicodeBlock TAI_THAM=new UnicodeBlock BALINESE=new UnicodeBlock SUNDANESE=new UnicodeBlock BATAK=new UnicodeBlock LEPCHA=new UnicodeBlock OL_CHIKI=new UnicodeBlock VEDIC_EXTENSIONS=new UnicodeBlock PHONETIC_EXTENSIONS_SUPPLEMENT=new UnicodeBlock COMBINING_DIACRITICAL_MARKS_SUPPLEMENT=new UnicodeBlock GLAGOLITIC=new UnicodeBlock LATIN_EXTENDED_C=new UnicodeBlock COPTIC=new UnicodeBlock GEORGIAN_SUPPLEMENT=new UnicodeBlock TIFINAGH=new UnicodeBlock ETHIOPIC_EXTENDED=new UnicodeBlock CYRILLIC_EXTENDED_A=new UnicodeBlock SUPPLEMENTAL_PUNCTUATION=new UnicodeBlock CJK_STROKES=new UnicodeBlock LISU=new UnicodeBlock VAI=new UnicodeBlock CYRILLIC_EXTENDED_B=new UnicodeBlock BAMUM=new UnicodeBlock MODIFIER_TONE_LETTERS=new UnicodeBlock LATIN_EXTENDED_D=new UnicodeBlock SYLOTI_NAGRI=new UnicodeBlock COMMON_INDIC_NUMBER_FORMS=new UnicodeBlock PHAGS_PA=new UnicodeBlock SAURASHTRA=new UnicodeBlock DEVANAGARI_EXTENDED=new UnicodeBlock KAYAH_LI=new UnicodeBlock REJANG=new UnicodeBlock HANGUL_JAMO_EXTENDED_A=new UnicodeBlock JAVANESE=new UnicodeBlock CHAM=new UnicodeBlock MYANMAR_EXTENDED_A=new UnicodeBlock TAI_VIET=new UnicodeBlock ETHIOPIC_EXTENDED_A=new UnicodeBlock MEETEI_MAYEK=new UnicodeBlock HANGUL_JAMO_EXTENDED_B=new UnicodeBlock VERTICAL_FORMS=new UnicodeBlock ANCIENT_GREEK_NUMBERS=new UnicodeBlock ANCIENT_SYMBOLS=new UnicodeBlock PHAISTOS_DISC=new UnicodeBlock LYCIAN=new UnicodeBlock CARIAN=new UnicodeBlock OLD_PERSIAN=new UnicodeBlock IMPERIAL_ARAMAIC=new UnicodeBlock PHOENICIAN=new UnicodeBlock LYDIAN=new UnicodeBlock KHAROSHTHI=new UnicodeBlock OLD_SOUTH_ARABIAN=new UnicodeBlock AVESTAN=new UnicodeBlock INSCRIPTIONAL_PARTHIAN=new UnicodeBlock INSCRIPTIONAL_PAHLAVI=new UnicodeBlock OLD_TURKIC=new UnicodeBlock RUMI_NUMERAL_SYMBOLS=new UnicodeBlock BRAHMI=new UnicodeBlock KAITHI=new UnicodeBlock CUNEIFORM=new UnicodeBlock CUNEIFORM_NUMBERS_AND_PUNCTUATION=new UnicodeBlock EGYPTIAN_HIEROGLYPHS=new UnicodeBlock BAMUM_SUPPLEMENT=new UnicodeBlock KANA_SUPPLEMENT=new UnicodeBlock ANCIENT_GREEK_MUSICAL_NOTATION=new UnicodeBlock COUNTING_ROD_NUMERALS=new UnicodeBlock MAHJONG_TILES=new UnicodeBlock DOMINO_TILES=new UnicodeBlock PLAYING_CARDS=new UnicodeBlock ENCLOSED_ALPHANUMERIC_SUPPLEMENT=new UnicodeBlock ENCLOSED_IDEOGRAPHIC_SUPPLEMENT=new UnicodeBlock MISCELLANEOUS_SYMBOLS_AND_PICTOGRAPHS=new UnicodeBlock EMOTICONS=new UnicodeBlock TRANSPORT_AND_MAP_SYMBOLS=new UnicodeBlock ALCHEMICAL_SYMBOLS=new UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_C=new UnicodeBlock CJK_UNIFIED_IDEOGRAPHS_EXTENSION_D=new UnicodeBlock ARABIC_EXTENDED_A=new UnicodeBlock SUNDANESE_SUPPLEMENT=new UnicodeBlock MEETEI_MAYEK_EXTENSIONS=new UnicodeBlock MEROITIC_HIEROGLYPHS=new UnicodeBlock MEROITIC_CURSIVE=new UnicodeBlock SORA_SOMPENG=new UnicodeBlock CHAKMA=new UnicodeBlock SHARADA=new UnicodeBlock TAKRI=new UnicodeBlock MIAO=new UnicodeBlock ARABIC_MATHEMATICAL_ALPHABETIC_SYMBOLS=new UnicodeBlock blockStarts=[] blocks= - Instance Initializer: - Constructors: - constructor UnicodeBlock() throws: - constructor UnicodeBlock() throws: - cond-branch cond:isMap - true branch : - map.put(idName,this) - false branch : - - constructor UnicodeBlock() throws: - map.put(alias,this) - constructor UnicodeBlock() throws: - String - alias - aliases - map.put(alias,this) - Methods: - func of() throws: - return of((int)c) - func of() throws: - cond-branch cond:isValidCodePoint(codePoint) - true branch : - new IllegalArgumentException - false branch : - - var:top,bottom,current - bottom Assign 0 - top Assign blockStarts.length - current Assign - while top Sub bottom GT 1 cond-branch cond:codePoint GE - true branch : - bottom Assign current - false branch : - top Assign current - - current Assign (top Add bottom)2 - - return - LocalClasses: - LocalInterfaces: - LocalInterfaces: - diff --git a/src/MapleFE/test/runtests.pl b/src/MapleFE/test/runtests.pl index 7824d713863e65f3ece5fcc3b3d0d2df7e038db4..f1bd481822ca842ced060d3e79754e7b0f63ccf7 100755 --- a/src/MapleFE/test/runtests.pl +++ b/src/MapleFE/test/runtests.pl @@ -52,7 +52,7 @@ my $countsub = 0; system("rm -Rf report.txt $pwd/output $pwd/diff $pwd/notexists"); my $currdir = "$pwd"; -my $outroot = "$currdir/../output/test"; +my $outroot = "$currdir/../output/java/test"; if(!(-e "$outroot")) { system("mkdir -p $outroot"); } @@ -106,37 +106,37 @@ foreach my $dir (@dirname) { #print "here15 dir $dir\n"; #print "here16 src_file $src_file\n"; if ($dir eq "java2mpl") { - $res = system("$pwd/../output/java/java2mpl $outdir/$src_file > $outdir/$result_file"); + $res = system("$pwd/../output/java/java/java2mpl $outdir/$src_file > $outdir/$result_file"); } if ($dir eq "errtest") { - $res = system("$pwd/../output/java/java2mpl $outdir/$src_file > $outdir/$result_file"); + $res = system("$pwd/../output/java/java/java2mpl $outdir/$src_file > $outdir/$result_file"); } if ($dir eq "others") { - $res = system("$pwd/../output/java/java2mpl $outdir/$src_file > $outdir/$result_file"); + $res = system("$pwd/../output/java/java/java2mpl $outdir/$src_file > $outdir/$result_file"); } if ($dir eq "openjdk") { - $res = system("$pwd/../output/java/java2mpl $outdir/$src_file > $outdir/$result_file"); + $res = system("$pwd/../output/java/java/java2mpl $outdir/$src_file > $outdir/$result_file"); } if ($dir eq "syntaxonly") { - $res = system("$pwd/../output/java/java2mpl $outdir/$src_file > $outdir/$result_file"); + $res = system("$pwd/../output/java/java/java2mpl $outdir/$src_file > $outdir/$result_file"); } if ($res > 0) { #print "over here1...\n"; if ($dir eq "java2mpl") { - print "$pwd/../output/java/java2mpl $outdir/$src_file\n"; + print "$pwd/../output/java/java/java2mpl $outdir/$src_file\n"; } if ($dir eq "errtest") { - print "$pwd/../output/java/java2mpl $outdir/$src_file\n"; + print "$pwd/../output/java/java/java2mpl $outdir/$src_file\n"; } if ($dir eq "others") { - print "$pwd/../output/java/java2mpl $outdir/$src_file\n"; + print "$pwd/../output/java/java/java2mpl $outdir/$src_file\n"; } if ($dir eq "openjdk") { - print "$pwd/../output/java/java2mpl $outdir/$src_file\n"; + print "$pwd/../output/java/java/java2mpl $outdir/$src_file\n"; } if ($dir eq "syntaxonly") { - print "$pwd/../output/java/java2mpl $outdir/$src_file\n"; + print "$pwd/../output/java/java/java2mpl $outdir/$src_file\n"; } print " ==$dir===> $file\n"; $countfailedjava ++; diff --git a/src/MapleFE/test/sharedfe/add2.java.result b/src/MapleFE/test/sharedfe/add2.java.result deleted file mode 100644 index 651c3b881b2baed1eb034ff2218c979c64aeba1e..0000000000000000000000000000000000000000 --- a/src/MapleFE/test/sharedfe/add2.java.result +++ /dev/null @@ -1,35 +0,0 @@ -!!! Stack match result: 1 -!!! Stack match result: 1 -!!! Stack match result: 1 - - -============================== function ============================== -func foo (int a, int b) { - LocalVariableDeclarationStatement - "int" - Identifier - "c" - Assignment - LeftHandSide + AssignmentOperator + Expression - Identifier - "c" - '=' - Expression - BinaryExpression - AdditiveExpression - AdditiveExpression + '+' + MultiplicativeExpression - Identifier - "a" - '+' - Identifier - "b" - StatementWithoutTrailingSubstatement - ReturnStatement - "return" + ZEROORONE(Expression) + ';' - "return" - Identifier - "c" -} -====================================================================== - - diff --git a/src/MapleFE/test/sharedfe/add3.java.result b/src/MapleFE/test/sharedfe/add3.java.result deleted file mode 100644 index 7512d871815b308b5b203154fbec1c8109d90954..0000000000000000000000000000000000000000 --- a/src/MapleFE/test/sharedfe/add3.java.result +++ /dev/null @@ -1,40 +0,0 @@ -!!! Stack match result: 1 -!!! Stack match result: 1 -!!! Stack match result: 1 - - -============================== function ============================== -func foo (int a, int b) { - LocalVariableDeclarationStatement - "int" - Identifier - "c" - Assignment - LeftHandSide + AssignmentOperator + Expression - Identifier - "c" - '=' - Expression - BinaryExpression - AdditiveExpression - AdditiveExpression + '+' + MultiplicativeExpression - AdditiveExpression - AdditiveExpression + '+' + MultiplicativeExpression - Identifier - "a" - '+' - Identifier - "b" - '+' - Identifier - "c" - StatementWithoutTrailingSubstatement - ReturnStatement - "return" + ZEROORONE(Expression) + ';' - "return" - Identifier - "c" -} -====================================================================== - - diff --git a/src/MapleFE/test/sharedfe/run b/src/MapleFE/test/sharedfe/run deleted file mode 100755 index 1a06476d61d839c098c166018c83898a812c2939..0000000000000000000000000000000000000000 --- a/src/MapleFE/test/sharedfe/run +++ /dev/null @@ -1,8 +0,0 @@ -#! /bin/bash -DIR=`pwd | sed "s/MapleFE.*/MapleFE/"` -echo DIR=${DIR} -cd $BUILDDIR/autogen - -#test t.java -echo testing ... t.java -./sharedfe ../../test/sharedfe/t.java diff --git a/src/MapleFE/test/sharedfe_runtests.pl b/src/MapleFE/test/sharedfe_runtests.pl deleted file mode 100755 index 9e9a0d8c1c9da801ba72d4e0d5a8a8121736dfa2..0000000000000000000000000000000000000000 --- a/src/MapleFE/test/sharedfe_runtests.pl +++ /dev/null @@ -1,193 +0,0 @@ -#!/usr/bin/perl -w -use Cwd; -use warnings; - -my $pwd = getcwd; - -#print "here1 current pwd $pwd\n"; - -if ((!(defined $ARGV[0])) || ($ARGV[0] ne "sharedfe")) { - print "------------------------------------------------\n"; - print "usage: sharedfe_runtests.pl sharedfe \n"; - print "------------------------------------------------\n"; - exit; -} - -$dirname = "./$ARGV[0]"; - -my $outdir = "$pwd/sharedfe_output"; -my $diffdir = "$pwd/sharedfe_diff"; -my $notexistsdir = "$pwd/sharedfe_notexists"; -if(!(-e "$outdir")) { - system("mkdir -p $outdir"); -} - -my @failed_sharedfe_file; -my @successed_file; - -my $count = 0; -my $countsharedfe = 0; - - -chdir $dirname; -$dirname = "./"; -opendir (DIR, $dirname ) || die "Error in opening dir $dirname\n"; - -#print "here2 dirname $dirname\n"; - -print("\n====================== run tests: $ARGV[0] =====================\n"); - -while( ($srcdir = readdir(DIR))){ - if(-d $srcdir and $srcdir ne ".." and $srcdir ne "output" and $srcdir ne "temp") { - my $predir = getcwd; - chdir $srcdir; - my @javafiles; - @javafiles = <*.java>; - -#print "here3 source directory $srcdir\n"; -#print "here4 predir $predir\n"; - - my @allfiles = (@javafiles); - foreach $fullname (@allfiles) { - $count ++; - (my $file = $fullname) =~ s/\.[^.]+$//; - if(defined $ARGV[1]) { - print "\n$file"; - } else { - print "."; - } - if ($count % 50 == 0) { - print " $count\n"; - } - my $flag = 0; - my $src_file = $fullname; - my $sharedfe_oresult_file = $file.'.java.result'; - my $sharedfe_log_file = $file.'.sharedfe.log'; - my $sharedfe_result_file = $file.'.java.sharedfe.result'; - my $sharedfe_err_file = $file.'.err'; - my $sharedfe_diff_file = $file.'.java.diff'; - - if(!(-e "$outdir")) { - system("mkdir -p $outdir"); - } - -#print "here5 outdir $outdir\n"; -#print "here6 src_file $src_file\n"; - - system("cp $src_file $outdir/$src_file"); - $res = system("cd $BUILDDIR/autogen; ./sharedfe $outdir/$src_file > $outdir/$sharedfe_result_file"); - if ($res > 0) { - print "\n(cd $BUILDDIR/autogen; gdb --args ./sharedfe $outdir/$src_file)\n"; - print " ==sharedfe===> $file\n"; -# $countsharedfe ++; -# push(@failed_sharedfe_file, $file); - $flag ++; - next; - } - -#print "here7 diff src_file $pwd/sharedfe/$sharedfe_oresult_file\n"; -#print "here8 diff sharedfe_result_file $sharedfe_result_file\n"; - - if (!(-e $sharedfe_oresult_file)) { - if(!(-e "$notexistsdir")) { - system("mkdir -p $notexistsdir"); - } - -#print "here9 $sharedfe_oresult_file NOT exists\n"; - print "Original file $sharedfe_oresult_file does NOT exists!\n"; - system("touch $notexistsdir/$sharedfe_oresult_file"); - $countsharedfe ++; - push(@failed_sharedfe_file, $file); - } - else { -#print "here10 sharedfe_result_file $outdir/$sharedfe_result_file\n"; - if ((!(-e "$outdir/$sharedfe_result_file")) || (-z "$outdir/$sharedfe_result_file")) { - if(!(-e "$notexistsdir")) { - system("mkdir -p $notexistsdir"); - } - -#print "here11 $outdir/$sharedfe_result_file NOT exists\n"; - print "$sharedfe_result_file either empty or not exists!\n"; - system("touch $notexistsdir/$sharedfe_result_file"); - $countsharedfe ++; - push(@failed_sharedfe_file, $file); - } else { - $res2 = system("diff $pwd/sharedfe/$sharedfe_oresult_file $outdir/$sharedfe_result_file"); - if ($res2 > 0) { - if(!(-e "$diffdir")) { - system("mkdir -p $diffdir"); - } - -#print "here12 $sharedfe_diff_file Different!!!\n"; - print "$sharedfe_oresult_file $sharedfe_result_file are different!!!\n"; - system("touch $diffdir/$sharedfe_diff_file"); - $countsharedfe ++; - push(@failed_sharedfe_file, $file); - } else { - push(@successed_file, $file); - } - - } - } - -# if($flag eq 0){ -# push(@successed_file, $file); -# } - next; - - if ($flag eq -1) { - push(@successed_file, $file); - system("rm -f $outdir/$src_file"); - system("rm -f $outdir/$sharedfe_log_file"); - system("rm -f $outdir/$sharedfe_oresult_file"); - system("rm -f $outdir/$sharedfe_result_file"); - system("rm -f $outdir/$sharedfe_err_file"); - system("rm -f $diffdir/$sharedfe_diff_file"); - system("rm -f $notexists/$sharedfe_oresult_file"); - system("rm -f $notexists/$sharedfe_result_file"); - next; - } - } - chdir $predir; - } -} - -print " $count\n"; -closedir(DIR); -chdir $pwd; - -my $countFailed = $countsharedfe ; -my $countPassed = $count - $countFailed; - -my $reportFile = 'sharedfe_report.txt'; -open(my $fh, '>', $reportFile) or die "Could not open file '$reportFile' $!"; -print $fh "$ARGV[0] report: \n"; - -if ($countFailed eq 0) { - print("\n all $count tests passed\n"); - print("======================================================\n"); - print $fh "all $count tests passed\n"; - close $fh; -} else { - print "\n\n=====Scan Result=====\n\n"; - print "Total Test Cases: $count\n"; - if(scalar(@successed_file) > 0) { - print "\n=========================\npassed $countPassed tests:\n\n"; - foreach $passed (@successed_file) { - print $passed."\n"; - } - print $fh "$countPassed testcases passed\n"; - } - print "\n=========================\nfailed $countFailed tests:\n\n"; - if(scalar(@failed_sharedfe_file) > 0){ - print("=== failed sharedfe: $countsharedfe tests\n"); - print $fh "$countsharedfe testcases failed\n"; - - foreach $failed (@failed_sharedfe_file) { - print $failed."\n"; - } - print "\n"; - } - print "=========================\n"; - close $fh; -} diff --git a/src/MapleFE/test/typescript/generate-result.sh b/src/MapleFE/test/typescript/generate-result.sh new file mode 100755 index 0000000000000000000000000000000000000000..5e3e9c5354afa6bc5edbf0428348ba74db0f96c9 --- /dev/null +++ b/src/MapleFE/test/typescript/generate-result.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +FILES=`find . -name "*.ts"` +for f in $FILES +do + echo "Generating result for $f ..." + $MAPLEFE_ROOT/output/typescript/bin/ts2ast $f > $f.result +done diff --git a/src/MapleFE/test/typescript/ms_tests/README b/src/MapleFE/test/typescript/ms_tests/README new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/MapleFE/test/typescript/ms_tests/checkInterfaceBases.ts.result b/src/MapleFE/test/typescript/ms_tests/checkInterfaceBases.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..06e1681196983bfca57f375df768dced69a0d430 --- /dev/null +++ b/src/MapleFE/test/typescript/ms_tests/checkInterfaceBases.ts.result @@ -0,0 +1,13 @@ +Matched 16 tokens. +Matched 24 tokens. +Matched 32 tokens. +Matched 40 tokens. +============= Module =========== +== Sub Tree == +ts_interface: JQueryEventObjectTest {data;which;metaKey } +== Sub Tree == +trip-slash reference path = "jquery.d.ts" +== Sub Tree == +ts_interface: SecondEvent {data } +== Sub Tree == +ts_interface: Third { } diff --git a/src/MapleFE/test/typescript/ms_tests/getsetReturnTypes.ts.result b/src/MapleFE/test/typescript/ms_tests/getsetReturnTypes.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e0ca09b6180bf1a841fd45c57c16dc3775ae03e1 --- /dev/null +++ b/src/MapleFE/test/typescript/ms_tests/getsetReturnTypes.ts.result @@ -0,0 +1,14 @@ +Matched 22 tokens. +Matched 32 tokens. +Matched 44 tokens. +============= Module =========== +== Sub Tree == +func makePoint(x) throws: + return {x:get x() throws: + return x +} + +== Sub Tree == +js_var Decl: x=makePoint(2).x +== Sub Tree == +js_var Decl: y=makePoint(2).x diff --git a/src/MapleFE/test/typescript/ms_tests/library_ObjectPrototypeProperties.ts.result b/src/MapleFE/test/typescript/ms_tests/library_ObjectPrototypeProperties.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1cf0e482cb7d522e06cb8c2022e407bc2f85119f --- /dev/null +++ b/src/MapleFE/test/typescript/ms_tests/library_ObjectPrototypeProperties.ts.result @@ -0,0 +1,22 @@ +Matched 6 tokens. +Matched 14 tokens. +Matched 22 tokens. +Matched 30 tokens. +Matched 39 tokens. +Matched 48 tokens. +Matched 57 tokens. +============= Module =========== +== Sub Tree == +Object.prototype.constructor +== Sub Tree == +Object.prototype.toString() +== Sub Tree == +Object.prototype.toLocaleString() +== Sub Tree == +Object.prototype.valueOf() +== Sub Tree == +Object.prototype.hasOwnProperty("string") +== Sub Tree == +Object.prototype.isPrototypeOf(Object) +== Sub Tree == +Object.prototype.propertyIsEnumerable("string") diff --git a/src/MapleFE/test/typescript/ms_tests/reExportGlobalDeclaration3.ts.result b/src/MapleFE/test/typescript/ms_tests/reExportGlobalDeclaration3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c0e6053f15e6add6d4b1949ee1355d75bbe1c288 --- /dev/null +++ b/src/MapleFE/test/typescript/ms_tests/reExportGlobalDeclaration3.ts.result @@ -0,0 +1,23 @@ +Matched 11 tokens. +Matched 22 tokens. +Matched 31 tokens. +Matched 40 tokens. +Matched 47 tokens. +Matched 54 tokens. +============= Module =========== +== Sub Tree == +declare namespace NS1 + export {js_var Decl: foo} + +== Sub Tree == +declare namespace NS2 + export {js_var Decl: foo} + +== Sub Tree == +export {NS1,NS1 as NNS1} +== Sub Tree == +export {NS2,NS2 as NNS2} +== Sub Tree == +export {NS1 as NNNS1} +== Sub Tree == +export {NS2 as NNNS2} diff --git a/src/MapleFE/test/typescript/ts2cxx-test.sh b/src/MapleFE/test/typescript/ts2cxx-test.sh new file mode 100755 index 0000000000000000000000000000000000000000..33897fa18fd607b8c978d4a89637679d88b8f66d --- /dev/null +++ b/src/MapleFE/test/typescript/ts2cxx-test.sh @@ -0,0 +1,115 @@ +#!/bin/bash +# Usage: cd MapleFE/test/typescript/unit_tests; ../ts2cpp-test.sh *.ts +[ $# -lt 1 ] && exec $0 $(git ls-files "*.ts") +SUCC= +MPLFEPATH=$(cd $(dirname $0)/../../; pwd) +TSOUT=$MPLFEPATH/output/typescript +RTSRC=$MPLFEPATH/ast2cpp/runtime/src +RTINC=$MPLFEPATH/ast2cpp/runtime/include +ASTINC=$MPLFEPATH/astopt/include +TS2AST=$TSOUT/bin/ts2ast +AST2CPP=$TSOUT/bin/ast2cpp +TSCSH=$(dirname $0)/tsc.sh + +# Acquire/release a lock +typeset -i LockVar +LockVar=1 +function AcquireLock { + while [[ $LockVar -ne 0 ]] || sleep 0.1; do + ln -s Lock_$2 $1-lock-$((LockVar=(LockVar+1)%$3)) > /dev/null 2>&1 && break + done +} +function ReleaseLock { + rm -f $1-lock-$LockVar +} +trap "{ pstree -p $$ | tr ')' '\n' | sed 's/.*(//' | xargs kill -9 2> /dev/null; rm -f ts2cpp-lock-*; }" SIGINT SIGQUIT SIGKILL SIGTERM +rm -rf ts2cpp-lock-* *-ts2cpp.out ts2cpp.summary.out ts2cpp.failures*.out +cnt=0 +if [ $# -gt 1 ]; then + list1=$(grep -L -e "^ *import " -e "^ *export .* from " "$@") + list2=$(grep -l -e "^ *import " -e "^ *export .* from " "$@") +else + list1="$@" list2= +fi +single="no" +for list in "$list1" "$list2"; do +for f in $list; do + echo $((++cnt)): $f + t=$(basename $f .ts) + [ -f $t.ts ] && f=$t.ts + AcquireLock ts2cpp for_$t $(nproc) + (set -x + while true; do + $TS2AST $f || { echo "(ts2ast)$f" >> ts2cpp.failures.out; break; } + dep=$(sed 's/[ ,:|]\(import(\)/\n\1/g' "$f" | grep -E "^ *[ei][xm]port.*( from |require|\( *['\"])" | \ + sed -r "s/^ *[ei][xm]port.*( from |require *\(|\() *['\"]([^'\"]*).*/\2.cpp/" | sort -u) + for cpp in $dep; do + ts=$(sed 's/\.cpp/.ts/' <<< "$cpp") + $TS2AST $ts + dep="$dep "$(sed 's/[ ,:|]\(import(\)/\n\1/g' "$ts" | grep -E "^ *[ei][xm]port.*( from |require|\( *['\"])" | \ + sed -r "s/^ *[ei][xm]port.*( from |require *\(|\() *['\"]([^'\"]*).*/\2.cpp/" | sort -u) + done + dep=$(echo $dep | xargs -n1 | sort -u) + $AST2CPP $f.ast || { echo "(ast2cpp)$f" >> ts2cpp.failures.out; break; } + g++ -std=c++17 -g -I$RTINC -I$ASTINC $t.cpp $RTSRC/*.cpp $dep -o $t.out || { echo "(g++)$f" >> ts2cpp.failures2.out; break; } + ./$t.out 2>&1 > $f-run.out || { echo "(run)$f" >> ts2cpp.failures2.out; break; } + $TSCSH $f + diff $f-run.out $f-nodejs.out + if [ $? -ne 0 ]; then + sed -e 's/^[A-Za-z]* {/{/' $f-run.out | diff - $f-nodejs.out + if [ $? -ne 0 ]; then + sed -e 's/^[A-Za-z]* {/{/' -e 's/} [A-Za-z]* {/} {/' $f-run.out | diff - $f-nodejs.out + if [ $? -ne 0 ]; then + echo "(result)$f" >> ts2cpp.failures3.out + break + fi + fi + fi + echo $t >> ts2cpp.summary.out + break + done + ReleaseLock ts2cpp + ) >& $f-ts2cpp.out & + if [ $single = "yes" ]; then + wait + fi +done 2>&1 +wait +single="yes" +done +num=$(echo $list1 $list2 | wc -w) +total=$(git ls-files "*.ts" | wc -w) +log=cxx-tmp.log +[ $num -eq $total ] && log=cxx.log +if [ -f ts2cpp.summary.out ]; then + echo -e "\nDate: $(date)\nTest cases passed:" | tee -a $log + sort ts2cpp.summary.out | xargs -n1 | nl | tee -a $log +fi +if [ -f ts2cpp.failures3.out ]; then + echo -e "\nTest cases failed due to unexpected results:" | tee -a $log + sort ts2cpp.failures3.out | xargs -n1 | nl | tee -a $log + if [ $num -eq 1 ]; then + echo -e "\ndiff $t.ts-nodejs.out $t.ts-run.out" + diff $t.ts-nodejs.out $t.ts-run.out + fi +fi +if [ -f ts2cpp.failures2.out ]; then + echo -e "\nTest cases failed due to g++ or run:" | tee -a $log + sort ts2cpp.failures2.out | xargs -n1 | nl | tee -a $log + if [ $num -eq 1 ]; then + echo -e "\nCommand line to compile $t.cpp:" + grep -- "-std=c++17" $t.ts-ts2cpp.out | sed 's/^+/ /' + fi +fi +if [ -f ts2cpp.failures.out ]; then + echo -e "\nTest cases failed due to ts2ast or ast2cpp:" | tee -a $log + sort ts2cpp.failures.out | xargs -n1 | nl | tee -a $log +fi +if [ $num -eq $total ]; then + grep -c ": error:" *.ts-ts2cpp.out | sort -nrt: -k2 | grep -v :0 | sed 's/-ts2cpp.out//' > cxx-error.log + lines=$(grep -n -e "Test cases passed:" -e "Test cases failed due to g++ or run:" $log | \ + grep -A1 ":Test cases passed:" | tail -2 | cut -d: -f1) + sed -n $(echo $lines | sed 's/[^0-9]/,/')p $log | grep "[0-9]" | expand | cut -c9- > cxx-succ.log +else + echo Saved testing results to file $log +fi diff --git a/src/MapleFE/test/typescript/tsc.sh b/src/MapleFE/test/typescript/tsc.sh new file mode 100755 index 0000000000000000000000000000000000000000..1b2579cc6aa8169abb928c804af71e7c6cd88ce3 --- /dev/null +++ b/src/MapleFE/test/typescript/tsc.sh @@ -0,0 +1,63 @@ +#!/bin/bash +# Acquire/release a lock +typeset -i LockVar +LockVar=1 +function AcquireLock { + while [[ $LockVar -ne 0 ]] || sleep 0.1; do + ln -s Lock_$2 $1-lock-$((LockVar=(LockVar+1)%$3)) > /dev/null 2>&1 && break + done +} +function ReleaseLock { + rm -f $1-lock-$LockVar +} +rm -rf -- tsc-lock-* *-tsc.out tsc.summary.out tsc.failures*.out + +OPT="--target es2017 \ + --lib es2015,es2017,dom \ + --module commonjs \ + --downlevelIteration \ + --esModuleInterop \ + --experimentalDecorators" +# --sourceMap \ +# --isolatedModules \ +while [ "x${1:0:1}" = "x-" ]; do + OPT="$OPT $1" + shift +done +i=0 +for f; do + echo $((++i)). $f + js=$(dirname $f)/$(basename $f .ts).js + rm -f $js + AcquireLock tsc for_$t $(nproc) + (bash -x -c "tsc --strict $OPT $f" + if [ $? -ne 0 ]; then + echo "(--strict)"$f >> tsc.failures.out + bash -x -c "tsc $OPT $f" || echo "(non-strict)"$f >> tsc.failures.out + fi + if [ -f "$js" ]; then + bash -x -c "node $js" + [ $? -ne 0 ] && echo "(nodejs)"$f >> tsc.failures.out + fi 2>&1 > $f-nodejs.out + ReleaseLock tsc + ) >& $f-tsc.out & +done +wait +rc=0 +if [ -f tsc.failures.out ]; then + echo -e "\nTest cases failed with tsc strict mode enabled:" + sort tsc.failures.out | grep "(--strict)" | xargs -n1 | nl + grep -q -e "(non-strict)" -e "(nodejs)" tsc.failures.out + if [ $? -eq 0 ]; then + echo -e "\nTest cases failed with non-strict mode or nodejs:" + sort tsc.failures.out | grep -e "(non-strict)" -e "(nodejs)" | xargs -n1 | nl + rc=2 + else + echo -e "\nAll passed with tsc non-strict mode." + rc=1 + fi +elif [ $# -gt 0 ]; then + echo All passed +fi +[ $i -eq 1 -a -f $f-tsc.out ] && cat $f-tsc.out $f-nodejs.out +exit $rc diff --git a/src/MapleFE/test/typescript/unit_tests/App-copy.ts b/src/MapleFE/test/typescript/unit_tests/App-copy.ts new file mode 100644 index 0000000000000000000000000000000000000000..da2bce06f59f6ae0d2be5ec42666819bf150768d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App-copy.ts @@ -0,0 +1,4 @@ +import myX, * as M from "./M-copy"; +console.log(myX, M.getx()); +M.setx(3); +console.log(myX, M.getx()); diff --git a/src/MapleFE/test/typescript/unit_tests/App-copy.ts.result b/src/MapleFE/test/typescript/unit_tests/App-copy.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..29130fe6ddec7df47adeb50b2666ef352d4ba485 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App-copy.ts.result @@ -0,0 +1,13 @@ +Matched 9 tokens. +Matched 22 tokens. +Matched 29 tokens. +Matched 42 tokens. +============= Module =========== +== Sub Tree == +import { default as myX, * as M} "./M-copy" +== Sub Tree == +console.log(myX,M.getx()) +== Sub Tree == +M.setx(3) +== Sub Tree == +console.log(myX,M.getx()) diff --git a/src/MapleFE/test/typescript/unit_tests/App-import.ts b/src/MapleFE/test/typescript/unit_tests/App-import.ts new file mode 100644 index 0000000000000000000000000000000000000000..f00203e409f8a420d35178f31005ed05f2c63119 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App-import.ts @@ -0,0 +1,5 @@ +import myX, * as M from "./M"; +import getx = M.getx; +console.log(myX, getx()); +M.setx(3); +console.log(myX, getx()); diff --git a/src/MapleFE/test/typescript/unit_tests/App-import.ts.result b/src/MapleFE/test/typescript/unit_tests/App-import.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..da9ea239e3f9c3568bec0a928d882cc1ba3f292a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App-import.ts.result @@ -0,0 +1,16 @@ +Matched 9 tokens. +Matched 16 tokens. +Matched 27 tokens. +Matched 34 tokens. +Matched 45 tokens. +============= Module =========== +== Sub Tree == +import { default as myX, * as M} "./M" +== Sub Tree == +import {M.getx as getx} +== Sub Tree == +console.log(myX,getx()) +== Sub Tree == +M.setx(3) +== Sub Tree == +console.log(myX,getx()) diff --git a/src/MapleFE/test/typescript/unit_tests/App-re-export.ts b/src/MapleFE/test/typescript/unit_tests/App-re-export.ts new file mode 100644 index 0000000000000000000000000000000000000000..666b1b1a9253d51e8a1dc0c014afecc1159e701a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App-re-export.ts @@ -0,0 +1,4 @@ +import myX, * as M from "./re-export"; +console.log(myX, M.getx()); +M.setx(3); +console.log(myX, M.getx()); diff --git a/src/MapleFE/test/typescript/unit_tests/App-re-export.ts.result b/src/MapleFE/test/typescript/unit_tests/App-re-export.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5c98f56a103bf8886bc56404fa09eada9d8eba5c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App-re-export.ts.result @@ -0,0 +1,13 @@ +Matched 9 tokens. +Matched 22 tokens. +Matched 29 tokens. +Matched 42 tokens. +============= Module =========== +== Sub Tree == +import { default as myX, * as M} "./re-export" +== Sub Tree == +console.log(myX,M.getx()) +== Sub Tree == +M.setx(3) +== Sub Tree == +console.log(myX,M.getx()) diff --git a/src/MapleFE/test/typescript/unit_tests/App-re-export2.ts b/src/MapleFE/test/typescript/unit_tests/App-re-export2.ts new file mode 100644 index 0000000000000000000000000000000000000000..c6dbb897f9b7df1aa5acf345eea33c946ebdc796 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App-re-export2.ts @@ -0,0 +1,7 @@ +import myX, * as M from "./re-export2"; +import {Y as y} from "./re-export2"; +console.log(y); +console.log(myX, M.getx()); +M.setx(3); +console.log(myX, M.getx()); +console.log(y); diff --git a/src/MapleFE/test/typescript/unit_tests/App-re-export2.ts.result b/src/MapleFE/test/typescript/unit_tests/App-re-export2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..fd84b06b35dafc531d8f2f20cb9f4ff01840c9ff --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App-re-export2.ts.result @@ -0,0 +1,22 @@ +Matched 9 tokens. +Matched 18 tokens. +Matched 25 tokens. +Matched 38 tokens. +Matched 45 tokens. +Matched 58 tokens. +Matched 65 tokens. +============= Module =========== +== Sub Tree == +import { default as myX, * as M} "./re-export2" +== Sub Tree == +import {Y as y} "./re-export2" +== Sub Tree == +console.log(y) +== Sub Tree == +console.log(myX,M.getx()) +== Sub Tree == +M.setx(3) +== Sub Tree == +console.log(myX,M.getx()) +== Sub Tree == +console.log(y) diff --git a/src/MapleFE/test/typescript/unit_tests/App.ts b/src/MapleFE/test/typescript/unit_tests/App.ts new file mode 100644 index 0000000000000000000000000000000000000000..cc80a709ec968705cc976b4fd1cbd89c006638ab --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App.ts @@ -0,0 +1,4 @@ +import myX, * as M from "./M"; +console.log(myX, M.getx()); +M.setx(3); +console.log(myX, M.getx()); diff --git a/src/MapleFE/test/typescript/unit_tests/App.ts.result b/src/MapleFE/test/typescript/unit_tests/App.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..dd6dc8f514ff7e2f7a5196cc3325fbfa8cb8ad52 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App.ts.result @@ -0,0 +1,13 @@ +Matched 9 tokens. +Matched 22 tokens. +Matched 29 tokens. +Matched 42 tokens. +============= Module =========== +== Sub Tree == +import { default as myX, * as M} "./M" +== Sub Tree == +console.log(myX,M.getx()) +== Sub Tree == +M.setx(3) +== Sub Tree == +console.log(myX,M.getx()) diff --git a/src/MapleFE/test/typescript/unit_tests/App2.ts b/src/MapleFE/test/typescript/unit_tests/App2.ts new file mode 100644 index 0000000000000000000000000000000000000000..217529e23913ed6c6a6009119b7d42e8f2412584 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App2.ts @@ -0,0 +1,6 @@ +import myX from "./M"; +import * as M from "./M"; +import { X } from "./export-default-as"; +console.log(myX, M.getx(), X); +M.setx(3); +console.log(myX, M.getx(), X); diff --git a/src/MapleFE/test/typescript/unit_tests/App2.ts.result b/src/MapleFE/test/typescript/unit_tests/App2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..249a408c8184c473415bb95033b63cd89e62eb49 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App2.ts.result @@ -0,0 +1,19 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 19 tokens. +Matched 34 tokens. +Matched 41 tokens. +Matched 56 tokens. +============= Module =========== +== Sub Tree == +import { default as myX} "./M" +== Sub Tree == +import { * as M} "./M" +== Sub Tree == +import {X} "./export-default-as" +== Sub Tree == +console.log(myX,M.getx(),X) +== Sub Tree == +M.setx(3) +== Sub Tree == +console.log(myX,M.getx(),X) diff --git a/src/MapleFE/test/typescript/unit_tests/App3.ts b/src/MapleFE/test/typescript/unit_tests/App3.ts new file mode 100644 index 0000000000000000000000000000000000000000..48559a7bebd0451f08d473e81e46fefed7759d34 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App3.ts @@ -0,0 +1,6 @@ +import myX from "./export-default-as2"; +import * as M from "./M"; +import { X } from "./export-default-as"; +console.log(myX, M.getx(), X); +M.setx(3); +console.log(myX, M.getx(), X); diff --git a/src/MapleFE/test/typescript/unit_tests/App3.ts.result b/src/MapleFE/test/typescript/unit_tests/App3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..34009cc2f9199603cd7e9429abc9032f194971e9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App3.ts.result @@ -0,0 +1,19 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 19 tokens. +Matched 34 tokens. +Matched 41 tokens. +Matched 56 tokens. +============= Module =========== +== Sub Tree == +import { default as myX} "./export-default-as2" +== Sub Tree == +import { * as M} "./M" +== Sub Tree == +import {X} "./export-default-as" +== Sub Tree == +console.log(myX,M.getx(),X) +== Sub Tree == +M.setx(3) +== Sub Tree == +console.log(myX,M.getx(),X) diff --git a/src/MapleFE/test/typescript/unit_tests/App4.ts b/src/MapleFE/test/typescript/unit_tests/App4.ts new file mode 100644 index 0000000000000000000000000000000000000000..6de0137d3fede63ed01fd51f5dc0b6e318c66b7c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App4.ts @@ -0,0 +1,4 @@ +import myX, { MM as M } from "./re-export4"; +console.log(myX, M.getx()); +M.setx(3); +console.log(myX, M.getx()); diff --git a/src/MapleFE/test/typescript/unit_tests/App4.ts.result b/src/MapleFE/test/typescript/unit_tests/App4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0de69f258027c851b846ec14eaea36156dfc37fd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App4.ts.result @@ -0,0 +1,13 @@ +Matched 11 tokens. +Matched 24 tokens. +Matched 31 tokens. +Matched 44 tokens. +============= Module =========== +== Sub Tree == +import { default as myX,MM as M} "./re-export4" +== Sub Tree == +console.log(myX,M.getx()) +== Sub Tree == +M.setx(3) +== Sub Tree == +console.log(myX,M.getx()) diff --git a/src/MapleFE/test/typescript/unit_tests/App5.ts b/src/MapleFE/test/typescript/unit_tests/App5.ts new file mode 100644 index 0000000000000000000000000000000000000000..9dacf6d73f2d8479e0ef0b6a01a48ce08e7d2b92 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App5.ts @@ -0,0 +1,2 @@ +import { getx } from "./export-import2"; +console.log(getx()); diff --git a/src/MapleFE/test/typescript/unit_tests/App5.ts.result b/src/MapleFE/test/typescript/unit_tests/App5.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6fbc351181248dd3fa552c65e36ad67a2c53d059 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/App5.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 16 tokens. +============= Module =========== +== Sub Tree == +import {getx} "./export-import2" +== Sub Tree == +console.log(getx()) diff --git a/src/MapleFE/test/typescript/unit_tests/M-copy.ts b/src/MapleFE/test/typescript/unit_tests/M-copy.ts new file mode 100644 index 0000000000000000000000000000000000000000..252e1655aa8a80a9e04449a87bbb4b55b67b83f2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/M-copy.ts @@ -0,0 +1,10 @@ +var x: number = 2; +//export { x as default }; // ref +export default x; // copy +x = 12; +export function getx(): number { + return x; +} +export function setx(v: number): void { + x = v; +} diff --git a/src/MapleFE/test/typescript/unit_tests/M-copy.ts.result b/src/MapleFE/test/typescript/unit_tests/M-copy.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..73f2fb1c3534788e3ecc2c278ec0a14a184ea86b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/M-copy.ts.result @@ -0,0 +1,20 @@ +Matched 7 tokens. +Matched 11 tokens. +Matched 15 tokens. +Matched 27 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=2 +== Sub Tree == +export { default as x} +== Sub Tree == +x Assign 12 +== Sub Tree == +export {func getx() throws: + return x +} +== Sub Tree == +export {func setx(v) throws: + x Assign v +} diff --git a/src/MapleFE/test/typescript/unit_tests/M.ts b/src/MapleFE/test/typescript/unit_tests/M.ts new file mode 100644 index 0000000000000000000000000000000000000000..f49b68cf52af8d098313ad690b3cc54097ce1892 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/M.ts @@ -0,0 +1,9 @@ +var x: number = 2; +export { x as default }; // ref +//export default x; // copy +export function getx(): number { + return x; +} +export function setx(v: number): void { + x = v; +} diff --git a/src/MapleFE/test/typescript/unit_tests/M.ts.result b/src/MapleFE/test/typescript/unit_tests/M.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..84952de63aa56f4a000dbe0fa74eb6d3a75dab10 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/M.ts.result @@ -0,0 +1,17 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 26 tokens. +Matched 42 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=2 +== Sub Tree == +export {x as default} +== Sub Tree == +export {func getx() throws: + return x +} +== Sub Tree == +export {func setx(v) throws: + x Assign v +} diff --git a/src/MapleFE/test/typescript/unit_tests/Promise-catch.ts b/src/MapleFE/test/typescript/unit_tests/Promise-catch.ts new file mode 100644 index 0000000000000000000000000000000000000000..b2edeed401c7e645fe6ce65e583c98fa143eed49 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/Promise-catch.ts @@ -0,0 +1,11 @@ +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise/catch +// cocos: director.ts, root.ts +// tsc --lib es2015,dom Promise-catch.ts +const promise1 = new Promise((resolve, reject) => { + throw "something happened"; +}); + +promise1.catch((error) => { + console.log(error); +}); diff --git a/src/MapleFE/test/typescript/unit_tests/Promise-catch.ts.result b/src/MapleFE/test/typescript/unit_tests/Promise-catch.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..d82e45128883669847aed9b2698cc4e230e710a0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/Promise-catch.ts.result @@ -0,0 +1,10 @@ +Matched 19 tokens. +Matched 38 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: promise1=new Promise((resolve,reject) -> throw "something happened" + +) +== Sub Tree == +promise1.catch((error) -> console.log(error) +) diff --git a/src/MapleFE/test/typescript/unit_tests/abstract-class.ts b/src/MapleFE/test/typescript/unit_tests/abstract-class.ts new file mode 100644 index 0000000000000000000000000000000000000000..90eb9824872b87bbcfcf3c315081b644267b9bc7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/abstract-class.ts @@ -0,0 +1,8 @@ +abstract class Foo { + public f1: number = 0; + private f2: number = 0; + constructor(a: number, b: number) { + this.f1 = a; + this.f2 = b; + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/abstract-class.ts.result b/src/MapleFE/test/typescript/unit_tests/abstract-class.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..888597a8042fcead4b747ddfda25adef9c8c8a3f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/abstract-class.ts.result @@ -0,0 +1,15 @@ +Matched 43 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + f1=0 f2=0 + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.f1 Assign a + this.f2 Assign b + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/abstract-class2.ts b/src/MapleFE/test/typescript/unit_tests/abstract-class2.ts new file mode 100644 index 0000000000000000000000000000000000000000..993d6e5eaa91ec5c86b8bdc383caeaee0eb5ec53 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/abstract-class2.ts @@ -0,0 +1,3 @@ +abstract class Klass { + public abstract func(): void; +} diff --git a/src/MapleFE/test/typescript/unit_tests/abstract-class2.ts.result b/src/MapleFE/test/typescript/unit_tests/abstract-class2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c2dbc106515111dc84188ca97656c49f6e4b91bd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/abstract-class2.ts.result @@ -0,0 +1,13 @@ +Matched 13 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + func func() throws: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/abstract-new.ts b/src/MapleFE/test/typescript/unit_tests/abstract-new.ts new file mode 100644 index 0000000000000000000000000000000000000000..bd1ffeb4a8e0f587f8db27ec313ff848b2651539 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/abstract-new.ts @@ -0,0 +1,2 @@ +type Type any> = T extends abstract new (...args: infer P) => any ? P : never; + diff --git a/src/MapleFE/test/typescript/unit_tests/abstract-new.ts.result b/src/MapleFE/test/typescript/unit_tests/abstract-new.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e3e36bcc0bd887c67b0e743e6f9b6cdae543ea9c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/abstract-new.ts.result @@ -0,0 +1,4 @@ +Matched 35 tokens. +============= Module =========== +== Sub Tree == + type Type = T extends new (args) -> ? P : never diff --git a/src/MapleFE/test/typescript/unit_tests/add.ts b/src/MapleFE/test/typescript/unit_tests/add.ts new file mode 100644 index 0000000000000000000000000000000000000000..583789b9c703e11801e5e96f32be11aa57f887cc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/add.ts @@ -0,0 +1,4 @@ +var a: number = 1; +var b: number = 1; +var c: number; +c = a + b; diff --git a/src/MapleFE/test/typescript/unit_tests/add.ts.result b/src/MapleFE/test/typescript/unit_tests/add.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..028434f5242c519543c3021c12c790eaa8ea4cce --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/add.ts.result @@ -0,0 +1,13 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 19 tokens. +Matched 25 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: a=1 +== Sub Tree == +js_var Decl: b=1 +== Sub Tree == +js_var Decl: c +== Sub Tree == +c Assign a Add b diff --git a/src/MapleFE/test/typescript/unit_tests/array-default.ts b/src/MapleFE/test/typescript/unit_tests/array-default.ts new file mode 100644 index 0000000000000000000000000000000000000000..f0b75ed259ae31964b932df4d5acc402ac21bdf5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-default.ts @@ -0,0 +1,5 @@ +// the type of arr element is never by default +const arr: [] = []; +(arr as any[]).push([]); +arr.push.apply(arr, {} as any); +console.log(arr); diff --git a/src/MapleFE/test/typescript/unit_tests/array-default.ts.result b/src/MapleFE/test/typescript/unit_tests/array-default.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0b7026b4dd2e7ebaf64824e9cab98a44bcfbc052 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-default.ts.result @@ -0,0 +1,13 @@ +Matched 9 tokens. +Matched 23 tokens. +Matched 37 tokens. +Matched 44 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: arr=[] +== Sub Tree == +arr.push([]) +== Sub Tree == +arr.push.apply(arr, {}) +== Sub Tree == +console.log(arr) diff --git a/src/MapleFE/test/typescript/unit_tests/array-elem-as-type.ts b/src/MapleFE/test/typescript/unit_tests/array-elem-as-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..f73d2cbfcec10db79c64055237a4099cf9931e71 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-elem-as-type.ts @@ -0,0 +1,10 @@ +var arr: number[] = [7, 4, 5, 9, 2, 8, 1, 6, 3]; +var sum: number = 0; +var i; +var len; +(i = 0), (len = arr.length); +for (; i < len; ++i) { + var x = arr[i] as number; + sum += x; +} +console.log(sum); diff --git a/src/MapleFE/test/typescript/unit_tests/array-elem-as-type.ts.result b/src/MapleFE/test/typescript/unit_tests/array-elem-as-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f173498338d57b001c80dd07f7ad73761857096d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-elem-as-type.ts.result @@ -0,0 +1,27 @@ +Matched 27 tokens. +Matched 34 tokens. +Matched 37 tokens. +Matched 40 tokens. +Matched 54 tokens. +Matched 80 tokens. +Matched 87 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[7,4,5,9,2,8,1,6,3] +== Sub Tree == +js_var Decl: sum=0 +== Sub Tree == +js_var Decl: i +== Sub Tree == +js_var Decl: len +== Sub Tree == +i Assign 0 +== Sub Tree == +len Assign arr.length +== Sub Tree == +for ( ) + js_var Decl: x=arr[i] + sum AddAssign x + +== Sub Tree == +console.log(sum) diff --git a/src/MapleFE/test/typescript/unit_tests/array-elem-casting.ts b/src/MapleFE/test/typescript/unit_tests/array-elem-casting.ts new file mode 100644 index 0000000000000000000000000000000000000000..4b31f0e78b75023a77b181744e1f62ba023e066a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-elem-casting.ts @@ -0,0 +1,21 @@ +class Base { + str: string; + constructor(s: string) { this.str = s; } +} + +class Derived extends Base { + num: number; + constructor(s: string, n: number) { super(s); this.num = n; } +} + +function func(...args: Base[]): Derived { + if (args.length === 1 && args[0] instanceof Derived) { + return args[0]; + } + return { str: "Unkown", num: 0 }; +} + +var b: Base = new Base("Base"); +console.log(func(b)); +var d: Derived = new Derived("Derived", 123); +console.log(func(d)); diff --git a/src/MapleFE/test/typescript/unit_tests/array-elem-casting.ts.result b/src/MapleFE/test/typescript/unit_tests/array-elem-casting.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..59337de3cda4b0b4191755eb8c7aaba2e86ea13e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-elem-casting.ts.result @@ -0,0 +1,50 @@ +Matched 22 tokens. +Matched 55 tokens. +Matched 106 tokens. +Matched 117 tokens. +Matched 127 tokens. +Matched 140 tokens. +Matched 150 tokens. +============= Module =========== +== Sub Tree == +class Base + Fields: + str + Instance Initializer: + Constructors: + constructor (s) throws: + this.str Assign s + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Derived + Fields: + num + Instance Initializer: + Constructors: + constructor (s,n) throws: + super(s) + this.num Assign n + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func(...args) throws: + cond-branch cond:args.length StEq 1 Land args[0] instanceof Derived + true branch : + return (Derived)args[0] + false branch : + + return {str:"Unkown", num:0} + +== Sub Tree == +js_var Decl: b=new Base("Base") +== Sub Tree == +console.log(func(b)) +== Sub Tree == +js_var Decl: d=new Derived("Derived",123) +== Sub Tree == +console.log(func(d)) diff --git a/src/MapleFE/test/typescript/unit_tests/array-in-ctor.ts b/src/MapleFE/test/typescript/unit_tests/array-in-ctor.ts new file mode 100644 index 0000000000000000000000000000000000000000..b07b07525bb2d8d56c392da58000ff3701fcc84b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-in-ctor.ts @@ -0,0 +1,19 @@ +class Klass { + data: any; + constructor() { + { + this.data = [ + new Array(123, 456) + ]; + } + } + public dump (value: number) { + switch (value) { + case 1: + console.log(value, this.data); + } + } +} + +let obj: Klass = new Klass(); +obj.dump(1); diff --git a/src/MapleFE/test/typescript/unit_tests/array-in-ctor.ts.result b/src/MapleFE/test/typescript/unit_tests/array-in-ctor.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c8d8cb19b4ad241057695b6f79d93fe33c3625ad --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-in-ctor.ts.result @@ -0,0 +1,24 @@ +Matched 58 tokens. +Matched 68 tokens. +Matched 75 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + data + Instance Initializer: + Constructors: + constructor () throws: + this.data Assign [new Array(123,456)] + + Methods: + func dump(value) throws: + A switch + + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: obj=new Klass() +== Sub Tree == +obj.dump(1) diff --git a/src/MapleFE/test/typescript/unit_tests/array-keyof.ts b/src/MapleFE/test/typescript/unit_tests/array-keyof.ts new file mode 100644 index 0000000000000000000000000000000000000000..73934390e2193181394511892587acaeeb1d0425 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-keyof.ts @@ -0,0 +1 @@ +export declare type Type = { t: Array; }; diff --git a/src/MapleFE/test/typescript/unit_tests/array-keyof.ts.result b/src/MapleFE/test/typescript/unit_tests/array-keyof.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ba0b4104c0a161528b642857ea94a2561245f387 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-keyof.ts.result @@ -0,0 +1,4 @@ +Matched 26 tokens. +============= Module =========== +== Sub Tree == +export {declare type Type = {t }} diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal.ts b/src/MapleFE/test/typescript/unit_tests/array-literal.ts new file mode 100644 index 0000000000000000000000000000000000000000..a2b8c3d09d56a2057504ee21541a7e3d414cca2a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal.ts @@ -0,0 +1,2 @@ +var arr: number[] = [Math.ceil(5 / 3)]; +console.log(arr); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..bc92f1cb5f4257ee738e4d02aa35e34deb011554 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal.ts.result @@ -0,0 +1,7 @@ +Matched 18 tokens. +Matched 25 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[Math.ceil(5 Div 3)] +== Sub Tree == +console.log(arr) diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal10.ts b/src/MapleFE/test/typescript/unit_tests/array-literal10.ts new file mode 100644 index 0000000000000000000000000000000000000000..03553ff2a936d7103bab8a77bae243fc6f70900a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal10.ts @@ -0,0 +1,23 @@ +class Foo { + [key: string]: number; + public f1: number = 0; + private f2: number = 0; + constructor(a: number, b: number) { + this.f1 = a; + this.f2 = b; + } +} +var funcs = [ + function func(): Foo { + console.log("Returning a new object"); + return new Foo(789, 0); + }, +]; +var obj: Foo | undefined = undefined; +var i: number = 1; +var res: number = (obj || funcs[0]())[`f${i}`]; +console.log(res); +obj = new Foo(123, 456); +++i; +res = (obj || funcs[0]())[`f${i}`]; +console.log(res); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal10.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal10.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..759ae8eefbf133e6fd24499b4449484476c3c410 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal10.ts.result @@ -0,0 +1,47 @@ +Matched 50 tokens. +Matched 81 tokens. +Matched 90 tokens. +Matched 97 tokens. +Matched 116 tokens. +Matched 123 tokens. +Matched 133 tokens. +Matched 136 tokens. +Matched 152 tokens. +Matched 159 tokens. +Matched 160 tokens. +Matched 161 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + number f1=0 f2=0 + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.f1 Assign a + this.f2 Assign b + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: funcs=[func func() throws: + console.log("Returning a new object") + return new Foo(789,0) +] +== Sub Tree == +js_var Decl: obj=undefined +== Sub Tree == +js_var Decl: i=1 +== Sub Tree == +js_var Decl: res=obj Lor funcs[0]()[ template-literal: "f",i] +== Sub Tree == +console.log(res) +== Sub Tree == +obj Assign new Foo(123,456) +== Sub Tree == +PreInc i +== Sub Tree == +res Assign obj Lor funcs[0]()[ template-literal: "f",i] +== Sub Tree == +console.log(res) diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal11.ts b/src/MapleFE/test/typescript/unit_tests/array-literal11.ts new file mode 100644 index 0000000000000000000000000000000000000000..b6381be5e07c99952d7fb382738ea71b72d29f4c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal11.ts @@ -0,0 +1,7 @@ +enum Direction { + LEFT, + RIGHT, +} + +const rec = [true ? Direction.LEFT : Direction.RIGHT]; +console.log(rec); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal11.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal11.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a863b9610058f07b873ae3f4e747ccaccd0cc05c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal11.ts.result @@ -0,0 +1,10 @@ +Matched 8 tokens. +Matched 23 tokens. +Matched 30 tokens. +============= Module =========== +== Sub Tree == +ts_enum: Direction {LEFT;RIGHT } +== Sub Tree == +js_const Decl: rec=[true ? Direction.LEFT : Direction.RIGHT] +== Sub Tree == +console.log(rec) diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal2.ts b/src/MapleFE/test/typescript/unit_tests/array-literal2.ts new file mode 100644 index 0000000000000000000000000000000000000000..76f552860cd11ba7e0aa23d919045491b7c1b1f8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal2.ts @@ -0,0 +1,3 @@ +var arr: number[] = [1, 2, 3]; +arr = [9, arr[2], arr[1]]; +console.log(arr); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal2.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..bca281e730440eca5ee46297f21f163cb920a0d7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal2.ts.result @@ -0,0 +1,10 @@ +Matched 15 tokens. +Matched 31 tokens. +Matched 38 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[1,2,3] +== Sub Tree == +arr Assign [9,arr[2],arr[1]] +== Sub Tree == +console.log(arr) diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal3.ts b/src/MapleFE/test/typescript/unit_tests/array-literal3.ts new file mode 100644 index 0000000000000000000000000000000000000000..4322acdc44cb19d6289d1fc5ab9ef574f7ce4c46 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal3.ts @@ -0,0 +1,3 @@ +var _2 = 0; +this["_2"] = 22; +console.log(this["_2"]); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal3.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..38a609320e0d7402555c34ffc5ee0df7b0d6999a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal3.ts.result @@ -0,0 +1,10 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 22 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: _2=0 +== Sub Tree == +this["_2"] Assign 22 +== Sub Tree == +console.log(this["_2"]) diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal4.ts b/src/MapleFE/test/typescript/unit_tests/array-literal4.ts new file mode 100644 index 0000000000000000000000000000000000000000..8ea00dcf819bf765a527e7eaed054eda0f623933 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal4.ts @@ -0,0 +1,7 @@ +enum E { + LEFT = "left", + RIGHT = "right", +} + +var arr: E[] = [E.LEFT, E.RIGHT]; +console.log(arr); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal4.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ff97cb533540cb4a98ea44cff7951289d5f8506e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal4.ts.result @@ -0,0 +1,10 @@ +Matched 12 tokens. +Matched 29 tokens. +Matched 36 tokens. +============= Module =========== +== Sub Tree == +ts_enum: E {LEFT="left";RIGHT="right" } +== Sub Tree == +js_var Decl: arr=[E.LEFT,E.RIGHT] +== Sub Tree == +console.log(arr) diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal5.ts b/src/MapleFE/test/typescript/unit_tests/array-literal5.ts new file mode 100644 index 0000000000000000000000000000000000000000..c92f992fd391cc6347b5376be7f2f14e11ac02e0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal5.ts @@ -0,0 +1,7 @@ +enum Direction { + LEFT, + RIGHT, +} + +const rec = [Direction.LEFT, Direction.RIGHT]; +console.log(rec); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal5.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal5.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..fcba11dd60be145c61ad30095c8b87323b16e6c0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal5.ts.result @@ -0,0 +1,10 @@ +Matched 8 tokens. +Matched 21 tokens. +Matched 28 tokens. +============= Module =========== +== Sub Tree == +ts_enum: Direction {LEFT;RIGHT } +== Sub Tree == +js_const Decl: rec=[Direction.LEFT,Direction.RIGHT] +== Sub Tree == +console.log(rec) diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal6.ts b/src/MapleFE/test/typescript/unit_tests/array-literal6.ts new file mode 100644 index 0000000000000000000000000000000000000000..7c221537c71678f56557feda48bf07e667b1cdc4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal6.ts @@ -0,0 +1,3 @@ +var i = 10; +var face = [0, i + 1, i + 2]; +console.log(face); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal6.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal6.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..92c4549e714868a092af165169005f6c4fc85f63 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal6.ts.result @@ -0,0 +1,10 @@ +Matched 5 tokens. +Matched 20 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: i=10 +== Sub Tree == +js_var Decl: face=[0,i Add 1,i Add 2] +== Sub Tree == +console.log(face) diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal7.ts b/src/MapleFE/test/typescript/unit_tests/array-literal7.ts new file mode 100644 index 0000000000000000000000000000000000000000..42358e48c70aadc11d7bf9f5d786dcb1ea65f0ef --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal7.ts @@ -0,0 +1,11 @@ +const arr: (() => void)[] = [ + () => { + console.log("Lambda0"); + }, + () => { + console.log("Lambda1"); + }, +]; +console.log(arr); +arr[0](); +arr[1](); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal7.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal7.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4cf433c3c169d9e2ec8f63929edd281582bf5651 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal7.ts.result @@ -0,0 +1,15 @@ +Matched 41 tokens. +Matched 48 tokens. +Matched 55 tokens. +Matched 62 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: arr=[() -> console.log("Lambda0") +,() -> console.log("Lambda1") +] +== Sub Tree == +console.log(arr) +== Sub Tree == +arr[0]() +== Sub Tree == +arr[1]() diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal8.ts b/src/MapleFE/test/typescript/unit_tests/array-literal8.ts new file mode 100644 index 0000000000000000000000000000000000000000..d64691ed229ff2b4d7cd5bdc4c2526cb251c9fea --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal8.ts @@ -0,0 +1,3 @@ +var i = 10; +var face = [0, i - 2, -i]; +console.log(face); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal8.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal8.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3f249815bd52fe43ab00d43a493ce98c4ee89601 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal8.ts.result @@ -0,0 +1,10 @@ +Matched 5 tokens. +Matched 19 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: i=10 +== Sub Tree == +js_var Decl: face=[0,i Sub 2,Minus i] +== Sub Tree == +console.log(face) diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal9.ts b/src/MapleFE/test/typescript/unit_tests/array-literal9.ts new file mode 100644 index 0000000000000000000000000000000000000000..2837536a26a0ae85f63a05ca0c96b2582e867dc4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal9.ts @@ -0,0 +1,21 @@ +class Foo { + [key: string]: number; + public f1: number = 0; + private f2: number = 0; + constructor(a: number, b: number) { + this.f1 = a; + this.f2 = b; + } +} +function func(): Foo { + console.log("Returning a new object"); + return new Foo(789, 0); +} +var obj: Foo | undefined = undefined; +var i: number = 1; +var res: number = (obj || func())[`f${i}`]; +console.log(res); +obj = new Foo(123, 456); +++i; +res = (obj || func())[`f${i}`]; +console.log(res); diff --git a/src/MapleFE/test/typescript/unit_tests/array-literal9.ts.result b/src/MapleFE/test/typescript/unit_tests/array-literal9.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a0665e2795510928f1805094a2424ac4bd05ecb0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-literal9.ts.result @@ -0,0 +1,47 @@ +Matched 50 tokens. +Matched 74 tokens. +Matched 83 tokens. +Matched 90 tokens. +Matched 106 tokens. +Matched 113 tokens. +Matched 123 tokens. +Matched 126 tokens. +Matched 139 tokens. +Matched 146 tokens. +Matched 147 tokens. +Matched 148 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + number f1=0 f2=0 + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.f1 Assign a + this.f2 Assign b + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func() throws: + console.log("Returning a new object") + return new Foo(789,0) + +== Sub Tree == +js_var Decl: obj=undefined +== Sub Tree == +js_var Decl: i=1 +== Sub Tree == +js_var Decl: res=obj Lor func()[ template-literal: "f",i] +== Sub Tree == +console.log(res) +== Sub Tree == +obj Assign new Foo(123,456) +== Sub Tree == +PreInc i +== Sub Tree == +res Assign obj Lor func()[ template-literal: "f",i] +== Sub Tree == +console.log(res) diff --git a/src/MapleFE/test/typescript/unit_tests/array-multi-dims.ts b/src/MapleFE/test/typescript/unit_tests/array-multi-dims.ts new file mode 100644 index 0000000000000000000000000000000000000000..cc36afe28914bc6577aba29b1498360c045fdff1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-multi-dims.ts @@ -0,0 +1,7 @@ +const arr = [ + [4, 9, 2], + [3, 5, 7], + [8, 1, 6], +]; +console.log(arr[0][0] + arr[1][1] + arr[2][2]); +console.log(arr[0][2] + arr[1][1] + arr[2][0]); diff --git a/src/MapleFE/test/typescript/unit_tests/array-multi-dims.ts.result b/src/MapleFE/test/typescript/unit_tests/array-multi-dims.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2fba0af866e109ed8ee7e50a7715ae0442c2eb1f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-multi-dims.ts.result @@ -0,0 +1,10 @@ +Matched 30 tokens. +Matched 59 tokens. +Matched 88 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: arr=[[4,9,2],[3,5,7],[8,1,6]] +== Sub Tree == +console.log(arr[0][0] Add arr[1][1] Add arr[2][2]) +== Sub Tree == +console.log(arr[0][2] Add arr[1][1] Add arr[2][0]) diff --git a/src/MapleFE/test/typescript/unit_tests/array-multi-dims2.ts b/src/MapleFE/test/typescript/unit_tests/array-multi-dims2.ts new file mode 100644 index 0000000000000000000000000000000000000000..4b2f295fd5204dd0c1672a8f9c40d052207b4692 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-multi-dims2.ts @@ -0,0 +1,6 @@ +const arr = [ + [4, 9, 2], + [3, 5, 7], + [8, 1, 6], +]; +console.log(arr[0][0] + 1); diff --git a/src/MapleFE/test/typescript/unit_tests/array-multi-dims2.ts.result b/src/MapleFE/test/typescript/unit_tests/array-multi-dims2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..28342ba99577fba4f8fef191c59f343d1da9f9bf --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-multi-dims2.ts.result @@ -0,0 +1,7 @@ +Matched 30 tokens. +Matched 45 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: arr=[[4,9,2],[3,5,7],[8,1,6]] +== Sub Tree == +console.log(arr[0][0] Add 1) diff --git a/src/MapleFE/test/typescript/unit_tests/array-new-elem.ts b/src/MapleFE/test/typescript/unit_tests/array-new-elem.ts new file mode 100644 index 0000000000000000000000000000000000000000..ab299a77f92d3d6c3e4141044a00470ab297e051 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-new-elem.ts @@ -0,0 +1,2 @@ +var arr: Object[] = [new Object(), new Object()]; +console.log(arr); diff --git a/src/MapleFE/test/typescript/unit_tests/array-new-elem.ts.result b/src/MapleFE/test/typescript/unit_tests/array-new-elem.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..39fdc1a6f79b334c411d27613f4cc237bb4e62fb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-new-elem.ts.result @@ -0,0 +1,7 @@ +Matched 19 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[new Object(),new Object()] +== Sub Tree == +console.log(arr) diff --git a/src/MapleFE/test/typescript/unit_tests/array-object.ts b/src/MapleFE/test/typescript/unit_tests/array-object.ts new file mode 100644 index 0000000000000000000000000000000000000000..f77faff98b2a8f58e9ef3ce91447da273ec2b552 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-object.ts @@ -0,0 +1,6 @@ +function func(arg: any) { + console.log(arg); +} +func([ [`Number`, { type: Number, count: 1, }], + [`String`, { type: String, count: 2, }], + ] as Array<[ name: string, value: any, ]>); diff --git a/src/MapleFE/test/typescript/unit_tests/array-object.ts.result b/src/MapleFE/test/typescript/unit_tests/array-object.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..93c56af98019afa2b9f3410874718c9c9b1f9dfb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-object.ts.result @@ -0,0 +1,9 @@ +Matched 16 tokens. +Matched 66 tokens. +============= Module =========== +== Sub Tree == +func func(arg) throws: + console.log(arg) + +== Sub Tree == +func([[ template-literal: "Number",NULL, {type:Number, count:1}],[ template-literal: "String",NULL, {type:String, count:2}]]) diff --git a/src/MapleFE/test/typescript/unit_tests/array-typeof.ts b/src/MapleFE/test/typescript/unit_tests/array-typeof.ts new file mode 100644 index 0000000000000000000000000000000000000000..6a0bf0f58983e8d4ddf5ea627f39aad87a1100c3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-typeof.ts @@ -0,0 +1,5 @@ +class VType {} +class V2 extends VType {s: string = "abc"} +class V3 extends VType {n: number = 123} + +const BuiltinVTypes: typeof VType[] = [V2, V3]; diff --git a/src/MapleFE/test/typescript/unit_tests/array-typeof.ts.result b/src/MapleFE/test/typescript/unit_tests/array-typeof.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2619a263e550753b67d5adfd0a498975b6f8cff9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-typeof.ts.result @@ -0,0 +1,37 @@ +Matched 4 tokens. +Matched 15 tokens. +Matched 26 tokens. +Matched 40 tokens. +============= Module =========== +== Sub Tree == +class VType + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class V2 + Fields: + s="abc" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class V3 + Fields: + n=123 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_const Decl: BuiltinVTypes=[V2,V3] diff --git a/src/MapleFE/test/typescript/unit_tests/array-with-any.ts b/src/MapleFE/test/typescript/unit_tests/array-with-any.ts new file mode 100644 index 0000000000000000000000000000000000000000..978def01bdb8a14df648564e58a162029c58ffe1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-with-any.ts @@ -0,0 +1,6 @@ +class Klass { + protected n: Map void> = new Map(); +} + +var obj: Klass = new Klass(); +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/array-with-any.ts.result b/src/MapleFE/test/typescript/unit_tests/array-with-any.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..68721240385785d491d4a00636796bcb8492764a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-with-any.ts.result @@ -0,0 +1,18 @@ +Matched 29 tokens. +Matched 39 tokens. +Matched 46 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + n=new Map() + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/array-with-template-literal.ts b/src/MapleFE/test/typescript/unit_tests/array-with-template-literal.ts new file mode 100644 index 0000000000000000000000000000000000000000..34c66adddf4f8080f2ae08229dc6fb2870fd5fe5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-with-template-literal.ts @@ -0,0 +1,2 @@ +var arr = [null, [`template literal`, null] as unknown as (Array | null)[]]; +console.log(arr); diff --git a/src/MapleFE/test/typescript/unit_tests/array-with-template-literal.ts.result b/src/MapleFE/test/typescript/unit_tests/array-with-template-literal.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..95703f0d9280e671aec08b05ca33a88c33696189 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/array-with-template-literal.ts.result @@ -0,0 +1,7 @@ +Matched 26 tokens. +Matched 33 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[null,[ template-literal: "template literal",NULL,null]] +== Sub Tree == +console.log(arr) diff --git a/src/MapleFE/test/typescript/unit_tests/arrow-func.ts b/src/MapleFE/test/typescript/unit_tests/arrow-func.ts new file mode 100644 index 0000000000000000000000000000000000000000..187f525abab4d92014240b72687e3d5692db660d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/arrow-func.ts @@ -0,0 +1,5 @@ +let f1 = (x: number): number => { + return x / 3; +}; +let f2 = (x: number): number => x / 3; +console.log(f1(6), f2(9)); diff --git a/src/MapleFE/test/typescript/unit_tests/arrow-func.ts.result b/src/MapleFE/test/typescript/unit_tests/arrow-func.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..414e71ab055ea3bcb572cfab1e8f4836ced30d42 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/arrow-func.ts.result @@ -0,0 +1,11 @@ +Matched 19 tokens. +Matched 34 tokens. +Matched 49 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: f1=(x) -> return x Div 3 + +== Sub Tree == +js_let Decl: f2=(x) -> x Div 3 +== Sub Tree == +console.log(f1(6),f2(9)) diff --git a/src/MapleFE/test/typescript/unit_tests/as-any.ts b/src/MapleFE/test/typescript/unit_tests/as-any.ts new file mode 100644 index 0000000000000000000000000000000000000000..f8a7913ef1fa1febe692064759b7bce4a10f34c8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-any.ts @@ -0,0 +1,5 @@ +function func() { + return { list: [1, 2, 3] }; +} +let s = func().list as any; +console.log(s); diff --git a/src/MapleFE/test/typescript/unit_tests/as-any.ts.result b/src/MapleFE/test/typescript/unit_tests/as-any.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..55b43f4ae642cdb62f7e204b6b6de20327205967 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-any.ts.result @@ -0,0 +1,12 @@ +Matched 19 tokens. +Matched 30 tokens. +Matched 37 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + return {list:[1,2,3]} + +== Sub Tree == +js_let Decl: s=func().list +== Sub Tree == +console.log(s) diff --git a/src/MapleFE/test/typescript/unit_tests/as-const.ts b/src/MapleFE/test/typescript/unit_tests/as-const.ts new file mode 100644 index 0000000000000000000000000000000000000000..a2929ff44bf7ee07757023c317692bcb32f387c0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-const.ts @@ -0,0 +1,2 @@ +let s = "abc" as const; +console.log(s); diff --git a/src/MapleFE/test/typescript/unit_tests/as-const.ts.result b/src/MapleFE/test/typescript/unit_tests/as-const.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..99ca9082fab295b78836515677a59c2b1cd77aae --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-const.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 14 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: s="abc" +== Sub Tree == +console.log(s) diff --git a/src/MapleFE/test/typescript/unit_tests/as-const2.ts b/src/MapleFE/test/typescript/unit_tests/as-const2.ts new file mode 100644 index 0000000000000000000000000000000000000000..a434946630930b4a7f4c856a45b7ba576b4354e7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-const2.ts @@ -0,0 +1,5 @@ +enum Color { + Blue = "blue", +} +let s = Color.Blue as const; +console.log(s); diff --git a/src/MapleFE/test/typescript/unit_tests/as-const2.ts.result b/src/MapleFE/test/typescript/unit_tests/as-const2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5dd5949f6656a05ee1df9f073e4949398ac96472 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-const2.ts.result @@ -0,0 +1,10 @@ +Matched 8 tokens. +Matched 17 tokens. +Matched 24 tokens. +============= Module =========== +== Sub Tree == +ts_enum: Color {Blue="blue" } +== Sub Tree == +js_let Decl: s=Color.Blue +== Sub Tree == +console.log(s) diff --git a/src/MapleFE/test/typescript/unit_tests/as-const3.ts b/src/MapleFE/test/typescript/unit_tests/as-const3.ts new file mode 100644 index 0000000000000000000000000000000000000000..49781c19f481bdeb0b76af2dbb190320ef45e8e2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-const3.ts @@ -0,0 +1,5 @@ +enum Color { + Blue = "blue", +} +const s = (Color.Blue) as const; +console.log(s); diff --git a/src/MapleFE/test/typescript/unit_tests/as-const3.ts.result b/src/MapleFE/test/typescript/unit_tests/as-const3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c6224047ba7ca9528a6274e8f06df7188ff37917 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-const3.ts.result @@ -0,0 +1,10 @@ +Matched 8 tokens. +Matched 19 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +ts_enum: Color {Blue="blue" } +== Sub Tree == +js_const Decl: s=Color.Blue +== Sub Tree == +console.log(s) diff --git a/src/MapleFE/test/typescript/unit_tests/as-function-type.ts b/src/MapleFE/test/typescript/unit_tests/as-function-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..31ec2a2235020ea85ff759517ff7c5bb31e958bd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-function-type.ts @@ -0,0 +1,12 @@ +function func(cb: (s: string) => number): string { + console.log(cb("abc")); + return "OK"; +} + +var fn = function (s: string): number { + console.log(s); + return 123; +}; + +const f = func(fn as (s: string) => number); +console.log(f); diff --git a/src/MapleFE/test/typescript/unit_tests/as-function-type.ts.result b/src/MapleFE/test/typescript/unit_tests/as-function-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..45dce72dc6432c23a4d5673651c5c6bf964b315a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-function-type.ts.result @@ -0,0 +1,19 @@ +Matched 30 tokens. +Matched 54 tokens. +Matched 70 tokens. +Matched 77 tokens. +============= Module =========== +== Sub Tree == +func func(cb) throws: + console.log(cb("abc")) + return "OK" + +== Sub Tree == +js_var Decl: fn=func (s) throws: + console.log(s) + return 123 + +== Sub Tree == +js_const Decl: f=func(fn) +== Sub Tree == +console.log(f) diff --git a/src/MapleFE/test/typescript/unit_tests/as-namespace.d.ts b/src/MapleFE/test/typescript/unit_tests/as-namespace.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..0a893306950046bc266722e31af034a505fda3b4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-namespace.d.ts @@ -0,0 +1,6 @@ +// Proposal: export as namespace for UMD module output +// https://github.com/microsoft/TypeScript/issues/26532 + +// https://www.typescriptlang.org/docs/handbook/modules.html#umd-modules +export var x: number; +export as namespace NS; diff --git a/src/MapleFE/test/typescript/unit_tests/as-namespace.d.ts.result b/src/MapleFE/test/typescript/unit_tests/as-namespace.d.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..819b72b7c97e7689470604c34869967973d67359 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/as-namespace.d.ts.result @@ -0,0 +1,7 @@ +Matched 6 tokens. +Matched 11 tokens. +============= Module =========== +== Sub Tree == +export {js_var Decl: x} +== Sub Tree == +export {NS} diff --git a/src/MapleFE/test/typescript/unit_tests/ascii_escape.ts b/src/MapleFE/test/typescript/unit_tests/ascii_escape.ts new file mode 100644 index 0000000000000000000000000000000000000000..4976e247f6f5b188ed972d25e592b9fb486e055e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ascii_escape.ts @@ -0,0 +1 @@ +var x = "\\\n\"'\b\f\r"; diff --git a/src/MapleFE/test/typescript/unit_tests/ascii_escape.ts.result b/src/MapleFE/test/typescript/unit_tests/ascii_escape.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a031f7282aff0e7c32eed9ee8194e37174775a2f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ascii_escape.ts.result @@ -0,0 +1,4 @@ +Matched 5 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x="\\\n\"'\b\f\r" diff --git a/src/MapleFE/test/typescript/unit_tests/asserts-condition.ts b/src/MapleFE/test/typescript/unit_tests/asserts-condition.ts new file mode 100644 index 0000000000000000000000000000000000000000..a382744e5bf8716d4bf4cf64b06015e2015ec6fc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/asserts-condition.ts @@ -0,0 +1,8 @@ +function nonNullable(e: T): asserts e is NonNullable { + if (e === null || e === undefined) throw new Error("Assertion failure"); + console.log("nonNullable", e); +} + +class Klass {} +var obj: Klass = new Klass(); +nonNullable(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/asserts-condition.ts.result b/src/MapleFE/test/typescript/unit_tests/asserts-condition.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..09373fda959de25aab57ea0b472794c7dad8d8f5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/asserts-condition.ts.result @@ -0,0 +1,28 @@ +Matched 46 tokens. +Matched 50 tokens. +Matched 60 tokens. +Matched 68 tokens. +============= Module =========== +== Sub Tree == +func nonNullable(e) : assert e is NonNullable : throws: + cond-branch cond:e StEq null Lor e StEq undefined + true branch : + throw new Error("Assertion failure") + false branch : + + console.log("nonNullable",e) + +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +nonNullable(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/assign-quotes.ts b/src/MapleFE/test/typescript/unit_tests/assign-quotes.ts new file mode 100644 index 0000000000000000000000000000000000000000..2e8bd5285e81cd8f2b2a6ef418b2c89a56dd93b2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/assign-quotes.ts @@ -0,0 +1,4 @@ +var foo; +if (foo === "") { + foo = '""'; +} diff --git a/src/MapleFE/test/typescript/unit_tests/assign-quotes.ts.result b/src/MapleFE/test/typescript/unit_tests/assign-quotes.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a9c2370b68c52f084740b2416a1e806a9582c386 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/assign-quotes.ts.result @@ -0,0 +1,11 @@ +Matched 3 tokens. +Matched 15 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: foo +== Sub Tree == +cond-branch cond:foo StEq "" +true branch : + foo Assign """" +false branch : + diff --git a/src/MapleFE/test/typescript/unit_tests/async-function.ts b/src/MapleFE/test/typescript/unit_tests/async-function.ts new file mode 100644 index 0000000000000000000000000000000000000000..09efbe49be53e3795f87c17e3cd3314dd711d65f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/async-function.ts @@ -0,0 +1,8 @@ +async function func() { + return "done"; +} + +(async () => { + const val = await func(); + console.log(val); +})(); diff --git a/src/MapleFE/test/typescript/unit_tests/async-function.ts.result b/src/MapleFE/test/typescript/unit_tests/async-function.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..942cc2667a90d02078203623716deec0f37a0833 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/async-function.ts.result @@ -0,0 +1,11 @@ +Matched 10 tokens. +Matched 36 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + return "done" + +== Sub Tree == +() -> js_const Decl: val= await func() +console.log(val) +() diff --git a/src/MapleFE/test/typescript/unit_tests/async-in-literal.ts b/src/MapleFE/test/typescript/unit_tests/async-in-literal.ts new file mode 100644 index 0000000000000000000000000000000000000000..18c4e2fd48a8fd2629aade8ffda24995ff018043 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/async-in-literal.ts @@ -0,0 +1,9 @@ +function func() { + return { + async message() { + return "done"; + }, + }; +} + +console.log(func().message()); diff --git a/src/MapleFE/test/typescript/unit_tests/async-in-literal.ts.result b/src/MapleFE/test/typescript/unit_tests/async-in-literal.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e514d99fc30f5e9e56102068be896253d426bfe7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/async-in-literal.ts.result @@ -0,0 +1,11 @@ +Matched 20 tokens. +Matched 33 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + return {message:func message() throws: + return "done" +} + +== Sub Tree == +console.log(func().message()) diff --git a/src/MapleFE/test/typescript/unit_tests/await-import.ts b/src/MapleFE/test/typescript/unit_tests/await-import.ts new file mode 100644 index 0000000000000000000000000000000000000000..47ea73929177b224e463a6d08a1566e23bc489da --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/await-import.ts @@ -0,0 +1,6 @@ +(async () => { + const { default: myX, getx, setx } = await import("./M"); + console.log(myX, getx()); + setx(3); + console.log(myX, getx()); +})(); diff --git a/src/MapleFE/test/typescript/unit_tests/await-import.ts.result b/src/MapleFE/test/typescript/unit_tests/await-import.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3fa7f89a3b3ae1d4bc56d6399820055735a04bb0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/await-import.ts.result @@ -0,0 +1,8 @@ +Matched 55 tokens. +============= Module =========== +== Sub Tree == +() -> js_const Decl: {default:myX, :getx, :setx} +console.log(myX,getx()) +setx(3) +console.log(myX,getx()) +() diff --git a/src/MapleFE/test/typescript/unit_tests/bin-op-scoped.ts b/src/MapleFE/test/typescript/unit_tests/bin-op-scoped.ts new file mode 100644 index 0000000000000000000000000000000000000000..7f3f85714803e399e2fd2a8810a5dd1698cda9bb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/bin-op-scoped.ts @@ -0,0 +1,35 @@ +var a: number = 1; + +var b: number = 1; + +var c: number; + +c = a + b; + +c = a - b; + +c = a * b; + +c = a / b; + +c = a % b; + +{ + // Should generate a BlockNode here + + let a: number = 2; + + let b: number = 2; + + let c: number; + + c = a + b; + + c = a - b; + + c = a * b; + + c = a / b; + + c = a % b; +} diff --git a/src/MapleFE/test/typescript/unit_tests/bin-op-scoped.ts.result b/src/MapleFE/test/typescript/unit_tests/bin-op-scoped.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5a9d4b024703c0e882b626088bc56d6a8c700ed7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/bin-op-scoped.ts.result @@ -0,0 +1,36 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 19 tokens. +Matched 25 tokens. +Matched 31 tokens. +Matched 37 tokens. +Matched 43 tokens. +Matched 49 tokens. +Matched 100 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: a=1 +== Sub Tree == +js_var Decl: b=1 +== Sub Tree == +js_var Decl: c +== Sub Tree == +c Assign a Add b +== Sub Tree == +c Assign a Sub b +== Sub Tree == +c Assign a Mul b +== Sub Tree == +c Assign a Div b +== Sub Tree == +c Assign a Mod b +== Sub Tree == +js_let Decl: a=2 +js_let Decl: b=2 +js_let Decl: c +c Assign a Add b +c Assign a Sub b +c Assign a Mul b +c Assign a Div b +c Assign a Mod b + diff --git a/src/MapleFE/test/typescript/unit_tests/bin_op.ts b/src/MapleFE/test/typescript/unit_tests/bin_op.ts new file mode 100644 index 0000000000000000000000000000000000000000..ab73727295fca09696bf5b480045c8a00b55f182 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/bin_op.ts @@ -0,0 +1,14 @@ +var a: number = 1; +var b: number = 1; +var c: number; +c = a + b; +c = a - b; +c = a * b; +c = a / b; +c = a % b; +c = a >> b; +c = a << b; +c = a >>> b; +var d: boolean; +d = a < b; +d = a <= b; diff --git a/src/MapleFE/test/typescript/unit_tests/bin_op.ts.result b/src/MapleFE/test/typescript/unit_tests/bin_op.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..226184c6a19aab5d26cb953227cb9d0a8f8e8412 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/bin_op.ts.result @@ -0,0 +1,43 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 19 tokens. +Matched 25 tokens. +Matched 31 tokens. +Matched 37 tokens. +Matched 43 tokens. +Matched 49 tokens. +Matched 55 tokens. +Matched 61 tokens. +Matched 67 tokens. +Matched 72 tokens. +Matched 78 tokens. +Matched 84 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: a=1 +== Sub Tree == +js_var Decl: b=1 +== Sub Tree == +js_var Decl: c +== Sub Tree == +c Assign a Add b +== Sub Tree == +c Assign a Sub b +== Sub Tree == +c Assign a Mul b +== Sub Tree == +c Assign a Div b +== Sub Tree == +c Assign a Mod b +== Sub Tree == +c Assign a Shr b +== Sub Tree == +c Assign a Shl b +== Sub Tree == +c Assign a Zext b +== Sub Tree == +js_var Decl: d +== Sub Tree == +d Assign a LT b +== Sub Tree == +d Assign a LE b diff --git a/src/MapleFE/test/typescript/unit_tests/binary-search.ts b/src/MapleFE/test/typescript/unit_tests/binary-search.ts new file mode 100644 index 0000000000000000000000000000000000000000..9aaa1bcdf814d86e74c691eb2509c42c7a641244 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/binary-search.ts @@ -0,0 +1,19 @@ +function binarySearch(array: number[], value: number): number { + var low: number = 0; + var high: number = array.length - 1; + var mid: number = high >>> 1; + for (; low <= high; mid = (low + high) >>> 1) { + const test = array[mid]; + if (test > value) { + high = mid - 1; + } else if (test < value) { + low = mid + 1; + } else { + return mid; + } + } + return ~low; +} + +var sequence: number[] = [13, 21, 34, 55, 89, 144]; +console.log(binarySearch(sequence, 144)); diff --git a/src/MapleFE/test/typescript/unit_tests/binary-search.ts.result b/src/MapleFE/test/typescript/unit_tests/binary-search.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6303a4639f9eb62830475cb37afca3416c35ef61 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/binary-search.ts.result @@ -0,0 +1,28 @@ +Matched 110 tokens. +Matched 131 tokens. +Matched 143 tokens. +============= Module =========== +== Sub Tree == +func binarySearch(array,value) throws: + js_var Decl: low=0 + js_var Decl: high=array.length Sub 1 + js_var Decl: mid=high Zext 1 + for ( ) + js_const Decl: test=array[mid] + cond-branch cond:test GT value + true branch : + high Assign mid Sub 1 + false branch : + cond-branch cond:test LT value + true branch : + low Assign mid Add 1 + false branch : + return mid + + + return Bcomp low + +== Sub Tree == +js_var Decl: sequence=[13,21,34,55,89,144] +== Sub Tree == +console.log(binarySearch(sequence,144)) diff --git a/src/MapleFE/test/typescript/unit_tests/binding-pattern.ts b/src/MapleFE/test/typescript/unit_tests/binding-pattern.ts new file mode 100644 index 0000000000000000000000000000000000000000..4abf0710d4ed9f6e32463c43667e6749a0c12439 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/binding-pattern.ts @@ -0,0 +1,7 @@ +interface IFace { + Num: number; + Str: string; +} + +declare const func: ({ Num: num, Str: str }?: IFace) => boolean; +export { IFace, func }; diff --git a/src/MapleFE/test/typescript/unit_tests/binding-pattern.ts.result b/src/MapleFE/test/typescript/unit_tests/binding-pattern.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7e28023e40ee9f1e1371f29d4cf9afe4ba4e96b4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/binding-pattern.ts.result @@ -0,0 +1,10 @@ +Matched 12 tokens. +Matched 33 tokens. +Matched 40 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {Num;Str } +== Sub Tree == +declare js_const Decl: func +== Sub Tree == +export {IFace,func} diff --git a/src/MapleFE/test/typescript/unit_tests/bracket-notation.ts b/src/MapleFE/test/typescript/unit_tests/bracket-notation.ts new file mode 100644 index 0000000000000000000000000000000000000000..3da12ff7cd3217233b81605cacc55312b0c49dfd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/bracket-notation.ts @@ -0,0 +1,7 @@ +interface Foo { + [key: string]: number; +} + +let bar: Foo = {}; +bar["key1"] = 1; +console.log(bar["key1"]); diff --git a/src/MapleFE/test/typescript/unit_tests/bracket-notation.ts.result b/src/MapleFE/test/typescript/unit_tests/bracket-notation.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..cf441e4309529b1a2ec0cf8efc52ca2ebe835eed --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/bracket-notation.ts.result @@ -0,0 +1,13 @@ +Matched 12 tokens. +Matched 20 tokens. +Matched 27 tokens. +Matched 37 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Foo {string index type: number } +== Sub Tree == +js_let Decl: bar= {} +== Sub Tree == +bar["key1"] Assign 1 +== Sub Tree == +console.log(bar["key1"]) diff --git a/src/MapleFE/test/typescript/unit_tests/call-array-ctor.ts b/src/MapleFE/test/typescript/unit_tests/call-array-ctor.ts new file mode 100644 index 0000000000000000000000000000000000000000..c4524f59b731e58903b4a953e86d37606959f8a2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/call-array-ctor.ts @@ -0,0 +1,3 @@ +var obj1: string[] = Array("first", "second", "third"); +var obj2: string[] = new Array("1", "2", "3"); +console.log(obj1, obj2); diff --git a/src/MapleFE/test/typescript/unit_tests/call-array-ctor.ts.result b/src/MapleFE/test/typescript/unit_tests/call-array-ctor.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..285f106fa9963fafb9cfe99e55064ea2ba56f600 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/call-array-ctor.ts.result @@ -0,0 +1,10 @@ +Matched 16 tokens. +Matched 33 tokens. +Matched 42 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: obj1=Array("first","second","third") +== Sub Tree == +js_var Decl: obj2=new Array("1","2","3") +== Sub Tree == +console.log(obj1,obj2) diff --git a/src/MapleFE/test/typescript/unit_tests/call-func.ts b/src/MapleFE/test/typescript/unit_tests/call-func.ts new file mode 100644 index 0000000000000000000000000000000000000000..d18e988373d9cb1bd9207a5cb4470d09e7c15178 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/call-func.ts @@ -0,0 +1,12 @@ +function func(o: Object): Object { + return o; +} + +class Klass { + [key: string]: number; + x: number = 123; +} + +var obj: Klass = { x: 1 }; +(func(obj) as Klass)["x"] = 2; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/call-func.ts.result b/src/MapleFE/test/typescript/unit_tests/call-func.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4d0e7c34ab048b4fc85ac6d6364d8de83f674076 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/call-func.ts.result @@ -0,0 +1,26 @@ +Matched 14 tokens. +Matched 32 tokens. +Matched 43 tokens. +Matched 57 tokens. +Matched 64 tokens. +============= Module =========== +== Sub Tree == +func func(o) throws: + return o + +== Sub Tree == +class Klass + Fields: + number x=123 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj= {x:1} +== Sub Tree == +func(obj)["x"] Assign 2 +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/car.ts b/src/MapleFE/test/typescript/unit_tests/car.ts new file mode 100644 index 0000000000000000000000000000000000000000..26410d7987d970e5b10bd076e335deac66c1cdc7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/car.ts @@ -0,0 +1,13 @@ +class Vehicle { + name: string; + constructor(name: string) { + this.name = name; + } +} +class Car extends Vehicle { + constructor(name: string) { + super(name); + } +} +let car: Car = new Car("A car"); +console.log(car.name); diff --git a/src/MapleFE/test/typescript/unit_tests/car.ts.result b/src/MapleFE/test/typescript/unit_tests/car.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..79f120f5d1e290e03d2c08da60bf10d809446024 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/car.ts.result @@ -0,0 +1,33 @@ +Matched 22 tokens. +Matched 41 tokens. +Matched 52 tokens. +Matched 61 tokens. +============= Module =========== +== Sub Tree == +class Vehicle + Fields: + name + Instance Initializer: + Constructors: + constructor (name) throws: + this.name Assign name + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Car + Fields: + + Instance Initializer: + Constructors: + constructor (name) throws: + super(name) + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: car=new Car("A car") +== Sub Tree == +console.log(car.name) diff --git a/src/MapleFE/test/typescript/unit_tests/casting-with-generic-type.ts b/src/MapleFE/test/typescript/unit_tests/casting-with-generic-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..c9652eaecfce913e0a55bd1599962ba08cdd069c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/casting-with-generic-type.ts @@ -0,0 +1,3 @@ +function func(f: Function) { + return <() => Array>f; +} diff --git a/src/MapleFE/test/typescript/unit_tests/casting-with-generic-type.ts.result b/src/MapleFE/test/typescript/unit_tests/casting-with-generic-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..26122598b2a01c577e7f6caf04b546b75110b997 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/casting-with-generic-type.ts.result @@ -0,0 +1,6 @@ +Matched 23 tokens. +============= Module =========== +== Sub Tree == +func func(f) throws: + return (() -> )f + diff --git a/src/MapleFE/test/typescript/unit_tests/class-deco.ts b/src/MapleFE/test/typescript/unit_tests/class-deco.ts new file mode 100644 index 0000000000000000000000000000000000000000..93c2869d1ebe1ddc870ccccab38b3f25f4beeafc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-deco.ts @@ -0,0 +1,6 @@ +function class_deco(ctor: Function): void { + console.log("Class constructor is :", ctor); +} +@class_deco +class Klass {} +var o = new Klass(); diff --git a/src/MapleFE/test/typescript/unit_tests/class-deco.ts.result b/src/MapleFE/test/typescript/unit_tests/class-deco.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ed9f8b106b87edfb0c302c2bd35a5213fdb7d672 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-deco.ts.result @@ -0,0 +1,20 @@ +Matched 20 tokens. +Matched 26 tokens. +Matched 34 tokens. +============= Module =========== +== Sub Tree == +func class_deco(ctor) throws: + console.log("Class constructor is :",ctor) + +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: o=new Klass() diff --git a/src/MapleFE/test/typescript/unit_tests/class-deco2.ts b/src/MapleFE/test/typescript/unit_tests/class-deco2.ts new file mode 100644 index 0000000000000000000000000000000000000000..fe783cc93145707009fbc1536d5c6080df34efdb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-deco2.ts @@ -0,0 +1,9 @@ +function class_deco(name: string): Function { + function deco(ctor: Function): void { + console.log("Class constructor is :", ctor, ", Name is: ", name); + } + return deco; +} +@class_deco("Deco") +class Klass {} +var o = new Klass(); diff --git a/src/MapleFE/test/typescript/unit_tests/class-deco2.ts.result b/src/MapleFE/test/typescript/unit_tests/class-deco2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..626101c9d23cbb6d5100f514d28e81e4966f7709 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-deco2.ts.result @@ -0,0 +1,23 @@ +Matched 38 tokens. +Matched 47 tokens. +Matched 55 tokens. +============= Module =========== +== Sub Tree == +func class_deco(name) throws: + func deco(ctor) throws: + console.log("Class constructor is :",ctor,", Name is: ",name) + + return deco + +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: o=new Klass() diff --git a/src/MapleFE/test/typescript/unit_tests/class-deco3.ts b/src/MapleFE/test/typescript/unit_tests/class-deco3.ts new file mode 100644 index 0000000000000000000000000000000000000000..9e599b2155c6c1712aafee180d57f2582731cba9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-deco3.ts @@ -0,0 +1,29 @@ +function class_deco(name: string): Function { + function deco(ctor: Function): void { + console.log("Class constructor is :", ctor, ", Name is: ", name); + } + return deco; +} + +@class_deco('Klass') +class Klass { + data: any = null; + public setData(value: any) { + this.data= [ + { + n: value, + }, + ]; + } + + public dump (value: number) { + switch (value) { + case 1: + console.log(value, this.data); + } + } +} + +let obj: Klass = new Klass(); +obj.setData(123); +obj.dump(1); diff --git a/src/MapleFE/test/typescript/unit_tests/class-deco3.ts.result b/src/MapleFE/test/typescript/unit_tests/class-deco3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..d150e86876fa5ca48788a0859c9f42299389c81c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-deco3.ts.result @@ -0,0 +1,34 @@ +Matched 38 tokens. +Matched 105 tokens. +Matched 115 tokens. +Matched 122 tokens. +Matched 129 tokens. +============= Module =========== +== Sub Tree == +func class_deco(name) throws: + func deco(ctor) throws: + console.log("Class constructor is :",ctor,", Name is: ",name) + + return deco + +== Sub Tree == +class Klass + Fields: + data=null + Instance Initializer: + Constructors: + Methods: + func setData(value) throws: + this.data Assign [ {n:value}] + func dump(value) throws: + A switch + + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: obj=new Klass() +== Sub Tree == +obj.setData(123) +== Sub Tree == +obj.dump(1) diff --git a/src/MapleFE/test/typescript/unit_tests/class-deco4.ts b/src/MapleFE/test/typescript/unit_tests/class-deco4.ts new file mode 100644 index 0000000000000000000000000000000000000000..9174fb8079897328c8aaf3a4c02bbcb31e156753 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-deco4.ts @@ -0,0 +1,22 @@ +function class_deco(name: string): Function { + function deco(ctor: Function): void { + console.log("Class constructor is :", ctor, ", Name is: ", name); + } + return deco; +} + +@class_deco('Klass') +class Klass { + data: + {n: number} = {n : 123}; + + public dump (value: number) { + switch (value) { + case 1: + console.log(value, this.data); + } + } +} + +let obj: Klass = new Klass(); +obj.dump(1); diff --git a/src/MapleFE/test/typescript/unit_tests/class-deco4.ts.result b/src/MapleFE/test/typescript/unit_tests/class-deco4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b52fdf7351455c144a4bf82ff4623a2dd1db1b91 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-deco4.ts.result @@ -0,0 +1,29 @@ +Matched 38 tokens. +Matched 90 tokens. +Matched 100 tokens. +Matched 107 tokens. +============= Module =========== +== Sub Tree == +func class_deco(name) throws: + func deco(ctor) throws: + console.log("Class constructor is :",ctor,", Name is: ",name) + + return deco + +== Sub Tree == +class Klass + Fields: + data= {n:123} + Instance Initializer: + Constructors: + Methods: + func dump(value) throws: + A switch + + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: obj=new Klass() +== Sub Tree == +obj.dump(1) diff --git a/src/MapleFE/test/typescript/unit_tests/class-direct-fd.ts b/src/MapleFE/test/typescript/unit_tests/class-direct-fd.ts new file mode 100644 index 0000000000000000000000000000000000000000..f68d085fd6f21b6d5d2cd41575ffc5c00a39dedc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-direct-fd.ts @@ -0,0 +1,32 @@ +class Foo { + public f1: number = 0; + public f2: number = 0; + constructor(a: number, b: number) { + this.f1 = a; + this.f2 = b; + } +} + +var bar : Foo = {f1:1, f2:2}; +bar.f1 = 3; // direct dot +bar["f1"] = 4; // direct prop +bar["p1"] = 10; // prop + +bar.f2 = bar.f1; // direct dot = direct dot +bar.f2 = bar["f1"]; // direct dot = direct prop +bar["f1"] = bar["f2"]; // direct prop = direct prop +bar["f1"] = bar["p1"]; // direct prop = prop + +bar["p2"] = bar["f2"]; // prop = direct prop +bar["p2"] = bar["p1"]; // prop = prop +bar["p2"] = bar["p2"] >> bar["p1"]; // (int32_t)(xxx) = yyy >> zzz +bar["p2"] = bar["p2"] >>> bar["p1"]; // (uint32_t)(xxx) = yyy >> zzz + + +console.log(bar.f1); +console.log(bar.f2); +console.log(bar["f1"]); +console.log(bar["f2"]); +console.log(bar["p1"]); +console.log(bar["p2"]); + diff --git a/src/MapleFE/test/typescript/unit_tests/class-direct-fd.ts.result b/src/MapleFE/test/typescript/unit_tests/class-direct-fd.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1863507c9b01e410acbaafa80ad19103ce446590 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-direct-fd.ts.result @@ -0,0 +1,69 @@ +Matched 42 tokens. +Matched 57 tokens. +Matched 63 tokens. +Matched 70 tokens. +Matched 77 tokens. +Matched 85 tokens. +Matched 94 tokens. +Matched 104 tokens. +Matched 114 tokens. +Matched 124 tokens. +Matched 134 tokens. +Matched 149 tokens. +Matched 164 tokens. +Matched 173 tokens. +Matched 182 tokens. +Matched 192 tokens. +Matched 202 tokens. +Matched 212 tokens. +Matched 222 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + f1=0 f2=0 + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.f1 Assign a + this.f2 Assign b + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: bar= {f1:1, f2:2} +== Sub Tree == +bar.f1 Assign 3 +== Sub Tree == +bar["f1"] Assign 4 +== Sub Tree == +bar["p1"] Assign 10 +== Sub Tree == +bar.f2 Assign bar.f1 +== Sub Tree == +bar.f2 Assign bar["f1"] +== Sub Tree == +bar["f1"] Assign bar["f2"] +== Sub Tree == +bar["f1"] Assign bar["p1"] +== Sub Tree == +bar["p2"] Assign bar["f2"] +== Sub Tree == +bar["p2"] Assign bar["p1"] +== Sub Tree == +bar["p2"] Assign bar["p2"] Shr bar["p1"] +== Sub Tree == +bar["p2"] Assign bar["p2"] Zext bar["p1"] +== Sub Tree == +console.log(bar.f1) +== Sub Tree == +console.log(bar.f2) +== Sub Tree == +console.log(bar["f1"]) +== Sub Tree == +console.log(bar["f2"]) +== Sub Tree == +console.log(bar["p1"]) +== Sub Tree == +console.log(bar["p2"]) diff --git a/src/MapleFE/test/typescript/unit_tests/class-extends.ts b/src/MapleFE/test/typescript/unit_tests/class-extends.ts new file mode 100644 index 0000000000000000000000000000000000000000..176d1555e8b04bf5a36576c675ed0b9dc541cfa5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-extends.ts @@ -0,0 +1,23 @@ +class Car { + private _make: string; + constructor(make: string) { + this._make = make; + } + public getMake(): string { + return this._make; + } +} + +class Model extends Car { + private _model: string; + constructor(make: string, model: string) { + super(make); + this._model = model; + } + public getModel(): string { + return this._model; + } +} + +let passat: Model = new Model("VW", "Passat"); +console.log(passat.getMake(), passat.getModel()); diff --git a/src/MapleFE/test/typescript/unit_tests/class-extends.ts.result b/src/MapleFE/test/typescript/unit_tests/class-extends.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..194818d19c823c5616d1de1765403897d2f8f277 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-extends.ts.result @@ -0,0 +1,38 @@ +Matched 36 tokens. +Matched 83 tokens. +Matched 96 tokens. +Matched 113 tokens. +============= Module =========== +== Sub Tree == +class Car + Fields: + _make + Instance Initializer: + Constructors: + constructor (make) throws: + this._make Assign make + Methods: + func getMake() throws: + return this._make + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Model + Fields: + _model + Instance Initializer: + Constructors: + constructor (make,model) throws: + super(make) + this._model Assign model + Methods: + func getModel() throws: + return this._model + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: passat=new Model("VW","Passat") +== Sub Tree == +console.log(passat.getMake(),passat.getModel()) diff --git a/src/MapleFE/test/typescript/unit_tests/class-extends2.ts b/src/MapleFE/test/typescript/unit_tests/class-extends2.ts new file mode 100644 index 0000000000000000000000000000000000000000..ae538bf8cfdfab1c4fc94e3500908224f81b982f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-extends2.ts @@ -0,0 +1,23 @@ +class Car { + private _make: string; + constructor(make: string) { + this._make = make; + } + public getMake(): string { + return this._make; + } +} + +class Model extends Car { + private _model: string; + constructor(make: string, model: string) { + super(make); + this._model = super.getMake() + model; + } + public getModel(): string { + return this._model; + } +} + +let passat: Model = new Model("VW", "Passat"); +console.log(passat.getMake(), passat.getModel()); diff --git a/src/MapleFE/test/typescript/unit_tests/class-extends2.ts.result b/src/MapleFE/test/typescript/unit_tests/class-extends2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2ad27f7c11b90658965008e57e37cefcd85e8d61 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-extends2.ts.result @@ -0,0 +1,38 @@ +Matched 36 tokens. +Matched 89 tokens. +Matched 102 tokens. +Matched 119 tokens. +============= Module =========== +== Sub Tree == +class Car + Fields: + _make + Instance Initializer: + Constructors: + constructor (make) throws: + this._make Assign make + Methods: + func getMake() throws: + return this._make + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Model + Fields: + _model + Instance Initializer: + Constructors: + constructor (make,model) throws: + super(make) + this._model Assign super.getMake() Add model + Methods: + func getModel() throws: + return this._model + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: passat=new Model("VW","Passat") +== Sub Tree == +console.log(passat.getMake(),passat.getModel()) diff --git a/src/MapleFE/test/typescript/unit_tests/class-extends3.ts b/src/MapleFE/test/typescript/unit_tests/class-extends3.ts new file mode 100644 index 0000000000000000000000000000000000000000..356bf07792ee304fab0458f781062929096a0044 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-extends3.ts @@ -0,0 +1,22 @@ +class Car { + private _make: string; + constructor(make: string) { + this._make = make; + } + public getMake(): string { + return this._make; + } +} + +class Model extends Car { + private _model: string; + constructor(make: string, model: string) { + super(make) this._model = super.getMake() + model; + } + public getModel(): string { + return this._model; + } +} + +let passat: Model = new Model("VW", "Passat"); +console.log(passat.getMake(), passat.getModel()); diff --git a/src/MapleFE/test/typescript/unit_tests/class-extends3.ts.result b/src/MapleFE/test/typescript/unit_tests/class-extends3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5368fc30da365058f5d1934d73ee3dfecd5b2e61 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-extends3.ts.result @@ -0,0 +1,38 @@ +Matched 36 tokens. +Matched 88 tokens. +Matched 101 tokens. +Matched 118 tokens. +============= Module =========== +== Sub Tree == +class Car + Fields: + _make + Instance Initializer: + Constructors: + constructor (make) throws: + this._make Assign make + Methods: + func getMake() throws: + return this._make + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Model + Fields: + _model + Instance Initializer: + Constructors: + constructor (make,model) throws: + super(make) + this._model Assign super.getMake() Add model + Methods: + func getModel() throws: + return this._model + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: passat=new Model("VW","Passat") +== Sub Tree == +console.log(passat.getMake(),passat.getModel()) diff --git a/src/MapleFE/test/typescript/unit_tests/class-generics-arrowfunc.ts b/src/MapleFE/test/typescript/unit_tests/class-generics-arrowfunc.ts new file mode 100644 index 0000000000000000000000000000000000000000..28f611a4e94a9086fa416f9e52208c49078a3b3e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-generics-arrowfunc.ts @@ -0,0 +1,25 @@ +// Class with generics and arrow function. +// - Class constructor takes a function as parameter and saves it. +// - The saved function allocates and return an object with generics type. +// - Class method alloc() calls the saved function to allocate and return an obj. +class Foo { + private _ctor: () => T; + + constructor(ctor: () => T) { + this._ctor = ctor; + } + + public alloc(): T { + return this._ctor(); + } +} + +// Create class object with String type +// - Create class object with type String +// - Pass an arrow function (that returns a new String object) to the class constructor +const FooString: Foo = new Foo(() => new String("foo")); + +// Optional check. +// - Call alloc() of new class object to get a new object and +// - call the object's builtin toString() method to display it. +console.log(FooString.alloc().toString()); diff --git a/src/MapleFE/test/typescript/unit_tests/class-generics-arrowfunc.ts.result b/src/MapleFE/test/typescript/unit_tests/class-generics-arrowfunc.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a83ce45d57efe59786174ed01db3b6b11892246d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-generics-arrowfunc.ts.result @@ -0,0 +1,22 @@ +Matched 47 tokens. +Matched 71 tokens. +Matched 86 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + _ctor + Instance Initializer: + Constructors: + constructor (ctor) throws: + this._ctor Assign ctor + Methods: + func alloc() throws: + return this._ctor() + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_const Decl: FooString=new Foo(() -> new String("foo")) +== Sub Tree == +console.log(FooString.alloc().toString()) diff --git a/src/MapleFE/test/typescript/unit_tests/class-implements-interface.ts b/src/MapleFE/test/typescript/unit_tests/class-implements-interface.ts new file mode 100644 index 0000000000000000000000000000000000000000..d4bf9e97f3cdf069f9b9f72a96eb4731d4ff5dec --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-implements-interface.ts @@ -0,0 +1,5 @@ +interface Intf1 {} +class C1 implements Intf1 {} + +interface Intf2 {} +class C2 implements Intf2 {} diff --git a/src/MapleFE/test/typescript/unit_tests/class-implements-interface.ts.result b/src/MapleFE/test/typescript/unit_tests/class-implements-interface.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..89303989da48c640f2cfedff34ca4d1addec4ac6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-implements-interface.ts.result @@ -0,0 +1,29 @@ +Matched 4 tokens. +Matched 10 tokens. +Matched 17 tokens. +Matched 29 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Intf1 { } +== Sub Tree == +class C1 + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +ts_interface: Intf2 { } +== Sub Tree == +class C2 + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/class-in-func.ts b/src/MapleFE/test/typescript/unit_tests/class-in-func.ts new file mode 100644 index 0000000000000000000000000000000000000000..42a5e9634d166e994c1f1cf10812e8ca70dd2ff5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-in-func.ts @@ -0,0 +1,6 @@ +function func(): Function { + class Car {} + return Car; +} +console.log(func()); + diff --git a/src/MapleFE/test/typescript/unit_tests/class-in-func.ts.result b/src/MapleFE/test/typescript/unit_tests/class-in-func.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..382cdd5a0ba7c0731a823653f9643b9f4658cfce --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-in-func.ts.result @@ -0,0 +1,18 @@ +Matched 15 tokens. +Matched 24 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + class Car + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + + return Car + +== Sub Tree == +console.log(func()) diff --git a/src/MapleFE/test/typescript/unit_tests/class-type.ts b/src/MapleFE/test/typescript/unit_tests/class-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..af4104977e7dbab267736e405600e69122dd892f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-type.ts @@ -0,0 +1,2 @@ +console.log(typeof new (class {})); +console.log(new (class {f: number = 123;})); diff --git a/src/MapleFE/test/typescript/unit_tests/class-type.ts.result b/src/MapleFE/test/typescript/unit_tests/class-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..51e6940208ca513dab8d0f3517c1f9dc23440aee --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class-type.ts.result @@ -0,0 +1,7 @@ +Matched 13 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +console.log( typeof new ()) +== Sub Tree == +console.log(new ()) diff --git a/src/MapleFE/test/typescript/unit_tests/class.ts b/src/MapleFE/test/typescript/unit_tests/class.ts new file mode 100644 index 0000000000000000000000000000000000000000..6103ec8a6fe6a680ec46e13c950211da090f45bf --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class.ts @@ -0,0 +1,8 @@ +class Foo { + public f1: number = 0; + private f2: number = 0; + constructor(a: number, b: number) { + this.f1 = a; + this.f2 = b; + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/class.ts.result b/src/MapleFE/test/typescript/unit_tests/class.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..01b7a8b13f627aae8015a633428937e4d1bb71cd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class.ts.result @@ -0,0 +1,15 @@ +Matched 42 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + f1=0 f2=0 + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.f1 Assign a + this.f2 Assign b + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/class1.ts b/src/MapleFE/test/typescript/unit_tests/class1.ts new file mode 100644 index 0000000000000000000000000000000000000000..721b9a69277d69ef5bed73071021c144876645ca --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class1.ts @@ -0,0 +1,30 @@ +class Bar { + public b1: number = 5; + public b2: number = 6; +} + +class Foo { + public f1: number = 0; + public f2: number = 0; + public bar: Bar; + constructor(a: number, b: number) { + this.f1 = a; + this.f2 = b; + this.bar= new Bar(); + } +} + +class App { + public foo: Foo; + constructor(a: number, b: number) { + this.foo = new Foo(a, b); + } +} + +var foo: Foo = new Foo(1, 2); +console.log(foo); + +var app: App = new App(3, 4); +console.log(app); + + diff --git a/src/MapleFE/test/typescript/unit_tests/class1.ts.result b/src/MapleFE/test/typescript/unit_tests/class1.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..42cfac29ee3fbb55d509d9712f3490e3438b7e71 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class1.ts.result @@ -0,0 +1,52 @@ +Matched 18 tokens. +Matched 74 tokens. +Matched 107 tokens. +Matched 120 tokens. +Matched 127 tokens. +Matched 140 tokens. +Matched 147 tokens. +============= Module =========== +== Sub Tree == +class Bar + Fields: + b1=5 b2=6 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Foo + Fields: + f1=0 f2=0 bar + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.f1 Assign a + this.f2 Assign b + this.bar Assign new Bar() + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class App + Fields: + foo + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.foo Assign new Foo(a,b) + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: foo=new Foo(1,2) +== Sub Tree == +console.log(foo) +== Sub Tree == +js_var Decl: app=new App(3,4) +== Sub Tree == +console.log(app) diff --git a/src/MapleFE/test/typescript/unit_tests/class2.ts b/src/MapleFE/test/typescript/unit_tests/class2.ts new file mode 100644 index 0000000000000000000000000000000000000000..cef2e430e977d30ec47fa8da745e5329873b0f71 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class2.ts @@ -0,0 +1,15 @@ +class Klass {} +class Foo { + public f1: number = 0; + private f2: number = 0; + constructor(a: number, b: number) { + this.f1 = a; + this.f2 = b; + } + public static test(obj: unknown): obj is Klass { + return obj instanceof Klass; + } +} + +var obj: Klass = new Klass(); +console.log(Foo.test(obj)); diff --git a/src/MapleFE/test/typescript/unit_tests/class2.ts.result b/src/MapleFE/test/typescript/unit_tests/class2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..cc2dbc1ecfa7502adae0f99f43a7a19c8d2944da --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class2.ts.result @@ -0,0 +1,34 @@ +Matched 4 tokens. +Matched 65 tokens. +Matched 75 tokens. +Matched 87 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Foo + Fields: + f1=0 f2=0 + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.f1 Assign a + this.f2 Assign b + Methods: + func test(obj) throws: + return obj instanceof Klass + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(Foo.test(obj)) diff --git a/src/MapleFE/test/typescript/unit_tests/class3.ts b/src/MapleFE/test/typescript/unit_tests/class3.ts new file mode 100644 index 0000000000000000000000000000000000000000..9b2b79c39f13cdb9b6c9dca8454420dccb7a4052 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class3.ts @@ -0,0 +1,11 @@ +class Foo { + public out: number = 0; + public in: number = 0; + constructor(a: number, b: number) { + this.out = a; + this.in = b; + } +} + +var obj: Foo = new Foo(12, 34); +console.log(obj.in); diff --git a/src/MapleFE/test/typescript/unit_tests/class3.ts.result b/src/MapleFE/test/typescript/unit_tests/class3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a659f6a71e5e584e1a54c9b0e6b15f07b065d60d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class3.ts.result @@ -0,0 +1,21 @@ +Matched 42 tokens. +Matched 55 tokens. +Matched 64 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + out=0 in=0 + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.out Assign a + this.in Assign b + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Foo(12,34) +== Sub Tree == +console.log(obj.in) diff --git a/src/MapleFE/test/typescript/unit_tests/class4.ts b/src/MapleFE/test/typescript/unit_tests/class4.ts new file mode 100644 index 0000000000000000000000000000000000000000..f9aea57360141cf48427de02147514af1aaff5ec --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class4.ts @@ -0,0 +1,12 @@ +class Foo { + public s: string; + constructor(f: (args?: [d: string, i: string], obj?: string) => string) { + this.s = f(); + } +} + +function func(args?: [d: string, i: string], obj?: string): string { + return "abc"; +} +var obj: Foo = new Foo(func); +console.log(obj.s); diff --git a/src/MapleFE/test/typescript/unit_tests/class4.ts.result b/src/MapleFE/test/typescript/unit_tests/class4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0c805813345d538628820d057ab095d144a32c03 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class4.ts.result @@ -0,0 +1,25 @@ +Matched 45 tokens. +Matched 73 tokens. +Matched 84 tokens. +Matched 93 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + s + Instance Initializer: + Constructors: + constructor (f) throws: + this.s Assign f() + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func(args?,obj?) throws: + return "abc" + +== Sub Tree == +js_var Decl: obj=new Foo(func) +== Sub Tree == +console.log(obj.s) diff --git a/src/MapleFE/test/typescript/unit_tests/class5.ts b/src/MapleFE/test/typescript/unit_tests/class5.ts new file mode 100644 index 0000000000000000000000000000000000000000..9e6a4413d95e9586a6973e8328f0b1823eedc63b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class5.ts @@ -0,0 +1,6 @@ +class Klass { + public switch: number = 0; +} + +var obj: Klass = new Klass(); +console.log(obj, obj.switch); diff --git a/src/MapleFE/test/typescript/unit_tests/class5.ts.result b/src/MapleFE/test/typescript/unit_tests/class5.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1c10c42ffd7ecc28a2d1728c35432b3c9ebdd502 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class5.ts.result @@ -0,0 +1,18 @@ +Matched 11 tokens. +Matched 21 tokens. +Matched 32 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + switch=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj,obj.switch) diff --git a/src/MapleFE/test/typescript/unit_tests/class6.ts b/src/MapleFE/test/typescript/unit_tests/class6.ts new file mode 100644 index 0000000000000000000000000000000000000000..ad924fc06795ae44aa9fc447ddf7a8480482c9f2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class6.ts @@ -0,0 +1,20 @@ +class Klass { + public num: number = 1; + + if(n: number): boolean { + return this.num == n; + } + + try(n: number): void { + if(n == this.num) + console.log("EQ"); + else + console.log("NE"); + } +} + +var obj: Klass = new Klass(); +console.log(obj.if(0)); +console.log(obj.if(1)); +obj.try(0); +obj.try(1); diff --git a/src/MapleFE/test/typescript/unit_tests/class6.ts.result b/src/MapleFE/test/typescript/unit_tests/class6.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..08a20f91d9cb7bde8eba40afc7635cd73adeb7ca --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/class6.ts.result @@ -0,0 +1,34 @@ +Matched 61 tokens. +Matched 71 tokens. +Matched 83 tokens. +Matched 95 tokens. +Matched 102 tokens. +Matched 109 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + num=1 + Instance Initializer: + Constructors: + Methods: + func if(n) throws: + return this.num EQ n + func try(n) throws: + cond-branch cond:n EQ this.num + true branch : + console.log("EQ") false branch : + console.log("NE") + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj.if(0)) +== Sub Tree == +console.log(obj.if(1)) +== Sub Tree == +obj.try(0) +== Sub Tree == +obj.try(1) diff --git a/src/MapleFE/test/typescript/unit_tests/classFactory.ts b/src/MapleFE/test/typescript/unit_tests/classFactory.ts new file mode 100644 index 0000000000000000000000000000000000000000..08904c0b24442466500662faebd735fff8e1b422 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/classFactory.ts @@ -0,0 +1,24 @@ +/* 1. class factory */ + +// Generic constructor interface +type Constructor = new (...args: any[]) => T; + +// A standard interface to be incorporated into all generated classes +interface someStandardInterface {} + +// Class factory that takes a base class and generates a new class with a standard interface +function ClassFactory( + base: Constructor +): Constructor { + class GeneratedClass extends (base as unknown as any) {} + + return GeneratedClass as unknown as any; +} + +/* 2. Usage of the class factory */ + +// A class to be use as base for generating one with some standard interface +class someBaseClass {} + +// A class that uses ClassFactory to generate a new class with standard inteface +class newClassWithStandardInterface extends ClassFactory(someBaseClass) {} diff --git a/src/MapleFE/test/typescript/unit_tests/classFactory.ts.result b/src/MapleFE/test/typescript/unit_tests/classFactory.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..cce1551435250ee21f4aa440a66ef697e6288c8f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/classFactory.ts.result @@ -0,0 +1,43 @@ +Matched 21 tokens. +Matched 25 tokens. +Matched 66 tokens. +Matched 70 tokens. +Matched 79 tokens. +============= Module =========== +== Sub Tree == + type Constructor = new (args) -> +== Sub Tree == +ts_interface: someStandardInterface { } +== Sub Tree == +func ClassFactory(base) throws: + class GeneratedClass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + + return GeneratedClass + +== Sub Tree == +class someBaseClass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class newClassWithStandardInterface + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/classFactory2.ts b/src/MapleFE/test/typescript/unit_tests/classFactory2.ts new file mode 100644 index 0000000000000000000000000000000000000000..dc4f599ee9ce6c0c27a975d9799aead1a34c61d5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/classFactory2.ts @@ -0,0 +1,10 @@ +type Constructor = new (...args: any[]) => T; +interface someStandardInterface {} +function ClassFactory( + base: Constructor +): Constructor { + class GeneratedClass extends (base as unknown as any) {} + return GeneratedClass as unknown as any; +} +class someBaseClass {} +class newClassWithStandardInterface extends ClassFactory(someBaseClass) {} diff --git a/src/MapleFE/test/typescript/unit_tests/classFactory2.ts.result b/src/MapleFE/test/typescript/unit_tests/classFactory2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..cce1551435250ee21f4aa440a66ef697e6288c8f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/classFactory2.ts.result @@ -0,0 +1,43 @@ +Matched 21 tokens. +Matched 25 tokens. +Matched 66 tokens. +Matched 70 tokens. +Matched 79 tokens. +============= Module =========== +== Sub Tree == + type Constructor = new (args) -> +== Sub Tree == +ts_interface: someStandardInterface { } +== Sub Tree == +func ClassFactory(base) throws: + class GeneratedClass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + + return GeneratedClass + +== Sub Tree == +class someBaseClass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class newClassWithStandardInterface + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/closure-let.ts b/src/MapleFE/test/typescript/unit_tests/closure-let.ts new file mode 100644 index 0000000000000000000000000000000000000000..50f508b22cac66fa3033165004779f505361ece5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/closure-let.ts @@ -0,0 +1,12 @@ +let funcs: (() => void)[] = []; +function initialize() { + var msgs: [string, string] = ["Hello", "World"]; + for (var i = 0; i < msgs.length; i++) { + let msg: string = msgs[i]; + funcs[i] = () => console.log(msg); + } +} +initialize(); +for (var i = 0; i < funcs.length; i++) { + funcs[i](); +} diff --git a/src/MapleFE/test/typescript/unit_tests/closure-let.ts.result b/src/MapleFE/test/typescript/unit_tests/closure-let.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..fa793762dcd6d0452eca806758d0436aa4a7cdb3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/closure-let.ts.result @@ -0,0 +1,21 @@ +Matched 15 tokens. +Matched 79 tokens. +Matched 83 tokens. +Matched 108 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: funcs=[] +== Sub Tree == +func initialize() throws: + js_var Decl: msgs=["Hello","World"] + for ( ) + js_let Decl: msg=msgs[i] + funcs[i] Assign () -> console.log(msg) + + +== Sub Tree == +initialize() +== Sub Tree == +for ( ) + funcs[i]() + diff --git a/src/MapleFE/test/typescript/unit_tests/closure.ts b/src/MapleFE/test/typescript/unit_tests/closure.ts new file mode 100644 index 0000000000000000000000000000000000000000..880b66a5a31e55ab49992d5bb280290469a6b3e4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/closure.ts @@ -0,0 +1,17 @@ +var base: number = 10; + +function counter(private_count: number): () => number { + function increment(): number { + private_count += 1; + return base + private_count; + } + + return increment; +} + +var count: () => number = counter(0); +console.log(count()); // output: 11 +console.log(count()); // output: 12 +var count2: () => number = counter(100); +console.log(count2()); // output: 111 +console.log(count2()); // output: 112 diff --git a/src/MapleFE/test/typescript/unit_tests/closure.ts.result b/src/MapleFE/test/typescript/unit_tests/closure.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9672af69ac71c39bf99703d626b268cd38c671f6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/closure.ts.result @@ -0,0 +1,31 @@ +Matched 7 tokens. +Matched 41 tokens. +Matched 54 tokens. +Matched 63 tokens. +Matched 72 tokens. +Matched 85 tokens. +Matched 94 tokens. +Matched 103 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: base=10 +== Sub Tree == +func counter(private_count) throws: + func increment() throws: + private_count AddAssign 1 + return base Add private_count + + return increment + +== Sub Tree == +js_var Decl: count=counter(0) +== Sub Tree == +console.log(count()) +== Sub Tree == +console.log(count()) +== Sub Tree == +js_var Decl: count2=counter(100) +== Sub Tree == +console.log(count2()) +== Sub Tree == +console.log(count2()) diff --git a/src/MapleFE/test/typescript/unit_tests/comma.ts b/src/MapleFE/test/typescript/unit_tests/comma.ts new file mode 100644 index 0000000000000000000000000000000000000000..e2da02e7bce092263500ab2019fe72e2aa446074 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/comma.ts @@ -0,0 +1,9 @@ +var arr: number[] = [7, 4, 5, 9, 2, 8, 1, 6, 3]; +var sum: number = 0; +var i; +var len; +(i = 0), (len = arr.length); +for (; i < len; ++i) { + sum += arr[i]; +} +console.log(sum); diff --git a/src/MapleFE/test/typescript/unit_tests/comma.ts.result b/src/MapleFE/test/typescript/unit_tests/comma.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..228d2770b375c7a582b8f79b54951b5234a6b64d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/comma.ts.result @@ -0,0 +1,26 @@ +Matched 27 tokens. +Matched 34 tokens. +Matched 37 tokens. +Matched 40 tokens. +Matched 54 tokens. +Matched 73 tokens. +Matched 80 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[7,4,5,9,2,8,1,6,3] +== Sub Tree == +js_var Decl: sum=0 +== Sub Tree == +js_var Decl: i +== Sub Tree == +js_var Decl: len +== Sub Tree == +i Assign 0 +== Sub Tree == +len Assign arr.length +== Sub Tree == +for ( ) + sum AddAssign arr[i] + +== Sub Tree == +console.log(sum) diff --git a/src/MapleFE/test/typescript/unit_tests/comma2.ts b/src/MapleFE/test/typescript/unit_tests/comma2.ts new file mode 100644 index 0000000000000000000000000000000000000000..4aec7822ab8bd4b4c1739d29fbaa0a362929bdc0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/comma2.ts @@ -0,0 +1,9 @@ +var arr: number[] = [-1, +4, 5, 9, 2, 8, 1, 6, 3]; +var sum: number = 0; +var i; +var len; +(i = 0), (len = arr.length); +for (; i < len; ++i) { + sum += arr[i]; +} +console.log(sum); diff --git a/src/MapleFE/test/typescript/unit_tests/comma2.ts.result b/src/MapleFE/test/typescript/unit_tests/comma2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6448754f63ebf97c9d608070aaf46a8fcdc8fa15 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/comma2.ts.result @@ -0,0 +1,26 @@ +Matched 27 tokens. +Matched 34 tokens. +Matched 37 tokens. +Matched 40 tokens. +Matched 54 tokens. +Matched 73 tokens. +Matched 80 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[-1,4,5,9,2,8,1,6,3] +== Sub Tree == +js_var Decl: sum=0 +== Sub Tree == +js_var Decl: i +== Sub Tree == +js_var Decl: len +== Sub Tree == +i Assign 0 +== Sub Tree == +len Assign arr.length +== Sub Tree == +for ( ) + sum AddAssign arr[i] + +== Sub Tree == +console.log(sum) diff --git a/src/MapleFE/test/typescript/unit_tests/computed-func-name.ts b/src/MapleFE/test/typescript/unit_tests/computed-func-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..56d7ac01ea9c030c8514c9b7511a2cfe3b78c565 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/computed-func-name.ts @@ -0,0 +1,11 @@ +let prop: string = "foo"; + +class Klass { + [key: string]: () => void; + public [prop]() { + console.log(prop); + } +} + +var obj: Klass = new Klass(); +obj[prop](); diff --git a/src/MapleFE/test/typescript/unit_tests/computed-func-name.ts.result b/src/MapleFE/test/typescript/unit_tests/computed-func-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..820936de5bffb8298c0271c93bf259163fc7ed80 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/computed-func-name.ts.result @@ -0,0 +1,23 @@ +Matched 7 tokens. +Matched 37 tokens. +Matched 47 tokens. +Matched 54 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: prop="foo" +== Sub Tree == +class Klass + Fields: + () -> + Instance Initializer: + Constructors: + Methods: + func () throws: + console.log(prop) + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj[prop]() diff --git a/src/MapleFE/test/typescript/unit_tests/computed-func-name2.js.result b/src/MapleFE/test/typescript/unit_tests/computed-func-name2.js.result new file mode 100644 index 0000000000000000000000000000000000000000..f20041335dcc2d91f460d5dcddf3a16b4830dc12 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/computed-func-name2.js.result @@ -0,0 +1,38 @@ +Matched 5 tokens. +Matched 68 tokens. +Matched 76 tokens. +Matched 83 tokens. +Matched 90 tokens. +Matched 94 tokens. +Matched 102 tokens. +Matched 109 tokens. +Matched 116 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: prop="foo" +== Sub Tree == +js_var Decl: Klass=func () throws: + func Klass() throws: + + Klass.prototype[prop] Assign func () throws: + console.log("prop =",prop) + + Klass.prototype.bar Assign func () throws: + console.log("Function bar()") + + return Klass +() +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj[prop]() +== Sub Tree == +obj["foo"]() +== Sub Tree == +prop Assign "bar" +== Sub Tree == +js_var Decl: obj2=new Klass() +== Sub Tree == +obj2[prop]() +== Sub Tree == +obj2["foo"]() diff --git a/src/MapleFE/test/typescript/unit_tests/computed-func-name2.ts b/src/MapleFE/test/typescript/unit_tests/computed-func-name2.ts new file mode 100644 index 0000000000000000000000000000000000000000..65c6ee4e70f34935d330cc3f9e4cffba10721511 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/computed-func-name2.ts @@ -0,0 +1,20 @@ +let prop: string = "foo"; + +class Klass { + [key: string]: () => void; + public [prop]() { + console.log("prop =", prop); + } + public bar() { + console.log("Function bar()"); + } +} + +var obj: Klass = new Klass(); +obj[prop](); // prop = foo +obj["foo"](); // prop = foo + +prop = "bar"; +var obj2: Klass = new Klass(); +obj2[prop](); // Function bar() +obj2["foo"](); // prop = bar diff --git a/src/MapleFE/test/typescript/unit_tests/computed-func-name2.ts.result b/src/MapleFE/test/typescript/unit_tests/computed-func-name2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..59e2da842e36c91ce29d0aca4700b0de56d05c85 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/computed-func-name2.ts.result @@ -0,0 +1,40 @@ +Matched 7 tokens. +Matched 52 tokens. +Matched 62 tokens. +Matched 69 tokens. +Matched 76 tokens. +Matched 80 tokens. +Matched 90 tokens. +Matched 97 tokens. +Matched 104 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: prop="foo" +== Sub Tree == +class Klass + Fields: + () -> + Instance Initializer: + Constructors: + Methods: + func () throws: + console.log("prop =",prop) + func bar() throws: + console.log("Function bar()") + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj[prop]() +== Sub Tree == +obj["foo"]() +== Sub Tree == +prop Assign "bar" +== Sub Tree == +js_var Decl: obj2=new Klass() +== Sub Tree == +obj2[prop]() +== Sub Tree == +obj2["foo"]() diff --git a/src/MapleFE/test/typescript/unit_tests/computed-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/computed-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..404decc67c4709da9921e7ad00db4c0df0614a50 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/computed-prop-name.ts @@ -0,0 +1,8 @@ +const prop = "flag"; + +class Klass { + public [prop]?: string = "example"; +} + +var obj: Klass = new Klass(); +console.log(obj[prop]); diff --git a/src/MapleFE/test/typescript/unit_tests/computed-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/computed-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..80ebc2aad4aa944c9902c7df182d0d9181efce22 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/computed-prop-name.ts.result @@ -0,0 +1,21 @@ +Matched 5 tokens. +Matched 19 tokens. +Matched 29 tokens. +Matched 39 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: prop="flag" +== Sub Tree == +class Klass + Fields: + [prop] : string + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj[prop]) diff --git a/src/MapleFE/test/typescript/unit_tests/computed-prop-name2.ts b/src/MapleFE/test/typescript/unit_tests/computed-prop-name2.ts new file mode 100644 index 0000000000000000000000000000000000000000..2c9d4f65e21c5494f015a247023c3b6cb6e13709 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/computed-prop-name2.ts @@ -0,0 +1,9 @@ +function func() { + return {[prop] : "abc" }; +} + +var prop: string | number = "my key"; +console.log(func()); +prop = -12.3; +console.log(func()); + diff --git a/src/MapleFE/test/typescript/unit_tests/computed-prop-name2.ts.result b/src/MapleFE/test/typescript/unit_tests/computed-prop-name2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..90b354456c6c4b3fa99e322159ab0f783c088cdc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/computed-prop-name2.ts.result @@ -0,0 +1,18 @@ +Matched 15 tokens. +Matched 24 tokens. +Matched 33 tokens. +Matched 37 tokens. +Matched 46 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + return {[prop] : :"abc"} + +== Sub Tree == +js_var Decl: prop="my key" +== Sub Tree == +console.log(func()) +== Sub Tree == +prop Assign -12.3 +== Sub Tree == +console.log(func()) diff --git a/src/MapleFE/test/typescript/unit_tests/conditional-type.ts b/src/MapleFE/test/typescript/unit_tests/conditional-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..34aeef2a1635e481bd8ac2a3e85c35e65bcba5bf --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/conditional-type.ts @@ -0,0 +1,5 @@ +export type X = A extends true ? any[] : string; + +var x: X = [1, 2, 3]; +var y: X = "abc"; +console.log(x, y); diff --git a/src/MapleFE/test/typescript/unit_tests/conditional-type.ts.result b/src/MapleFE/test/typescript/unit_tests/conditional-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..cc85ca05590b18ccc7942827ca9700dcaecf897e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/conditional-type.ts.result @@ -0,0 +1,13 @@ +Matched 21 tokens. +Matched 37 tokens. +Matched 44 tokens. +Matched 53 tokens. +============= Module =========== +== Sub Tree == +export { type X = A extends true ? prim array-TBD : string} +== Sub Tree == +js_var Decl: x=[1,2,3] +== Sub Tree == +js_var Decl: y="abc" +== Sub Tree == +console.log(x,y) diff --git a/src/MapleFE/test/typescript/unit_tests/conditional-type2.ts b/src/MapleFE/test/typescript/unit_tests/conditional-type2.ts new file mode 100644 index 0000000000000000000000000000000000000000..d0e37de5d617f885f8f06d78b54ef1e3db109b30 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/conditional-type2.ts @@ -0,0 +1 @@ +type Type = { [K in keyof B]: B[K] extends T ? never : K }; diff --git a/src/MapleFE/test/typescript/unit_tests/conditional-type2.ts.result b/src/MapleFE/test/typescript/unit_tests/conditional-type2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..486f6911c9ab3a89fc244803614bfbc34bed6212 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/conditional-type2.ts.result @@ -0,0 +1,4 @@ +Matched 28 tokens. +============= Module =========== +== Sub Tree == + type Type = {[K in keyof B] : B[K] extends T ? never : K } diff --git a/src/MapleFE/test/typescript/unit_tests/conditional-type3.ts b/src/MapleFE/test/typescript/unit_tests/conditional-type3.ts new file mode 100644 index 0000000000000000000000000000000000000000..5ee4d803b9334354adba4c0e0b0182b124bbe60d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/conditional-type3.ts @@ -0,0 +1,2 @@ +type Names = { [K in keyof T]: T[K] extends (...args: Array) => any ? K : never; }[keyof T] & string; +type MyType>> = Required[M] extends (...args: any[]) => any ? string : number; diff --git a/src/MapleFE/test/typescript/unit_tests/conditional-type3.ts.result b/src/MapleFE/test/typescript/unit_tests/conditional-type3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..916713c050178409e45d488605ab3d378df5f608 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/conditional-type3.ts.result @@ -0,0 +1,7 @@ +Matched 43 tokens. +Matched 83 tokens. +============= Module =========== +== Sub Tree == + type Names = intersect = {[K in keyof T] : T[K] extends (args) -> ? K : never }[ keyof T] & string +== Sub Tree == + type MyType = Required[M] extends (args) -> ? string : number diff --git a/src/MapleFE/test/typescript/unit_tests/const-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/const-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..aae109523dd5f3dfa52d66712606f8607a4e2e9f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/const-as-prop-name.ts @@ -0,0 +1,3 @@ +interface IFace { + const: boolean | null; +} diff --git a/src/MapleFE/test/typescript/unit_tests/const-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/const-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..fc72ca5a897a23ab14653ae1e49983a10df33626 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/const-as-prop-name.ts.result @@ -0,0 +1,4 @@ +Matched 10 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {const } diff --git a/src/MapleFE/test/typescript/unit_tests/construct-signature.ts b/src/MapleFE/test/typescript/unit_tests/construct-signature.ts new file mode 100644 index 0000000000000000000000000000000000000000..d74e3ed3bba353b2f62117087add916faa578bda --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/construct-signature.ts @@ -0,0 +1,13 @@ +// https://www.typescriptlang.org/docs/handbook/2/classes.html#abstract-construct-signatures +// used in cocos creator deserialize.ts + +// construct signature using generics +// This creates a type alias for constructors that takes no arguments +type T_Ctor = new () => T; + +interface I_Class extends T_Ctor { + __vals__: string[]; +} + +type AnyCtor = T_Ctor; +type AnyClass = I_Class; diff --git a/src/MapleFE/test/typescript/unit_tests/construct-signature.ts.result b/src/MapleFE/test/typescript/unit_tests/construct-signature.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..716378336f60110d57d3a1d8c2568dd93c4985c4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/construct-signature.ts.result @@ -0,0 +1,13 @@ +Matched 12 tokens. +Matched 30 tokens. +Matched 38 tokens. +Matched 46 tokens. +============= Module =========== +== Sub Tree == + type T_Ctor = new () -> +== Sub Tree == +ts_interface: I_Class {__vals__ } +== Sub Tree == + type AnyCtor = T_Ctor +== Sub Tree == + type AnyClass = I_Class diff --git a/src/MapleFE/test/typescript/unit_tests/constructor-1.ts b/src/MapleFE/test/typescript/unit_tests/constructor-1.ts new file mode 100644 index 0000000000000000000000000000000000000000..637dcf847794cb2d0913102a555b0cc23590715e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/constructor-1.ts @@ -0,0 +1,11 @@ +// Constructor function for Person objects +function Person(first, last, age, eye) { + this.firstName = first; + this.lastName = last; + this.age = age; + this.eyeColor = eye; +} + +//// Create a Person object +var myFather = new Person("John", "Doe", 50, "blue"); +console.log(myFather.age); diff --git a/src/MapleFE/test/typescript/unit_tests/constructor-1.ts.result b/src/MapleFE/test/typescript/unit_tests/constructor-1.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c89f8c461daa73f532e12811afc55542b9cfe116 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/constructor-1.ts.result @@ -0,0 +1,15 @@ +Matched 37 tokens. +Matched 52 tokens. +Matched 61 tokens. +============= Module =========== +== Sub Tree == +func Person(first,last,age,eye) throws: + this.firstName Assign first + this.lastName Assign last + this.age Assign age + this.eyeColor Assign eye + +== Sub Tree == +js_var Decl: myFather=new Person("John","Doe",50,"blue") +== Sub Tree == +console.log(myFather.age) diff --git a/src/MapleFE/test/typescript/unit_tests/constructor-prop.ts b/src/MapleFE/test/typescript/unit_tests/constructor-prop.ts new file mode 100644 index 0000000000000000000000000000000000000000..7bb638b9992913d33a374532397cd45f59711c9b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/constructor-prop.ts @@ -0,0 +1,11 @@ +// object constructor property used in cocos-creator class.ts +function func() {} +let f = new func(); +console.log(f.constructor); +f.constructor.prop1 = 1; +Object.defineProperty(f.constructor, "prop2", { + value: 2, + writable: true, + enumerable: true, +}); +console.log(f.constructor); diff --git a/src/MapleFE/test/typescript/unit_tests/constructor-prop.ts.result b/src/MapleFE/test/typescript/unit_tests/constructor-prop.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..56d9cde7d9d948cc2890f1c64d0dc0c4233a8059 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/constructor-prop.ts.result @@ -0,0 +1,20 @@ +Matched 6 tokens. +Matched 14 tokens. +Matched 23 tokens. +Matched 31 tokens. +Matched 57 tokens. +Matched 66 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + +== Sub Tree == +js_let Decl: f=new func() +== Sub Tree == +console.log(f.constructor) +== Sub Tree == +f.constructor.prop1 Assign 1 +== Sub Tree == +Object.defineProperty(f.constructor,"prop2", {value:2, writable:true, enumerable:true}) +== Sub Tree == +console.log(f.constructor) diff --git a/src/MapleFE/test/typescript/unit_tests/continue-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/continue-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..871a70a35a3315bab7e1c0c06eb8035f6403a3b0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/continue-as-prop-name.ts @@ -0,0 +1,3 @@ +interface IFace { + continue: string; +} diff --git a/src/MapleFE/test/typescript/unit_tests/continue-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/continue-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..fd863b8f4c7b5e3441bc5eb7169e8eeb8eecc51f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/continue-as-prop-name.ts.result @@ -0,0 +1,4 @@ +Matched 8 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {continue } diff --git a/src/MapleFE/test/typescript/unit_tests/continue-stmt.ts b/src/MapleFE/test/typescript/unit_tests/continue-stmt.ts new file mode 100644 index 0000000000000000000000000000000000000000..244d1a8e6ee42a3c7d8038075ab9546a989630f0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/continue-stmt.ts @@ -0,0 +1,8 @@ +for (var i: number = 1; i < 10; ++i) { + if (i < 5) { + console.log(i, " is less than 5"); + continue; + } + if (i == 8) break; + console.log(i, " is greater than 4"); +} diff --git a/src/MapleFE/test/typescript/unit_tests/continue-stmt.ts.result b/src/MapleFE/test/typescript/unit_tests/continue-stmt.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1bd275e2d03beac414c6c52a7ef469cb4814dbe1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/continue-stmt.ts.result @@ -0,0 +1,18 @@ +Matched 54 tokens. +============= Module =========== +== Sub Tree == +for ( ) + cond-branch cond:i LT 5 + true branch : + console.log(i," is less than 5") + continue: + + false branch : + + cond-branch cond:i EQ 8 + true branch : + break: + false branch : + + console.log(i," is greater than 4") + diff --git a/src/MapleFE/test/typescript/unit_tests/ctor-interface.ts b/src/MapleFE/test/typescript/unit_tests/ctor-interface.ts new file mode 100644 index 0000000000000000000000000000000000000000..5a164709fcdbc19a1d481ef07b3de1e13ac54cba --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ctor-interface.ts @@ -0,0 +1,6 @@ +class Klass { + f: number = 123; +} +interface KlassConstructor { + new (): Klass; +} diff --git a/src/MapleFE/test/typescript/unit_tests/ctor-interface.ts.result b/src/MapleFE/test/typescript/unit_tests/ctor-interface.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8bd1120216f391c703ae39114e638d66d319d89d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ctor-interface.ts.result @@ -0,0 +1,16 @@ +Matched 10 tokens. +Matched 20 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + f=123 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +ts_interface: KlassConstructor {func () throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/ctor-signature.ts b/src/MapleFE/test/typescript/unit_tests/ctor-signature.ts new file mode 100644 index 0000000000000000000000000000000000000000..90c39ba9443c1bee72e1699e6143f40853ca36b0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ctor-signature.ts @@ -0,0 +1,21 @@ +interface IFace { + name: string; +} + +interface ICtor { + new(name: string): IFace; +} + +class Car implements IFace { + name: string; + constructor(n: string) { + this.name = n; + } +} + +function carFactory(myClass: ICtor, name: string) { + return new myClass(name); +} + +let car = carFactory(Car, "myCar"); +console.log(car); diff --git a/src/MapleFE/test/typescript/unit_tests/ctor-signature.ts.result b/src/MapleFE/test/typescript/unit_tests/ctor-signature.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..931c9a50452fa3eeeff76328cbc63b4a65cbec00 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ctor-signature.ts.result @@ -0,0 +1,32 @@ +Matched 8 tokens. +Matched 21 tokens. +Matched 45 tokens. +Matched 65 tokens. +Matched 75 tokens. +Matched 82 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {name } +== Sub Tree == +ts_interface: ICtor {func (name) throws: + } +== Sub Tree == +class Car + Fields: + name + Instance Initializer: + Constructors: + constructor (n) throws: + this.name Assign n + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func carFactory(myClass,name) throws: + return new myClass(name) + +== Sub Tree == +js_let Decl: car=carFactory(Car,"myCar") +== Sub Tree == +console.log(car) diff --git a/src/MapleFE/test/typescript/unit_tests/ctor-with-function.ts b/src/MapleFE/test/typescript/unit_tests/ctor-with-function.ts new file mode 100644 index 0000000000000000000000000000000000000000..62f6a90078fb43af2a843e2a91f3380fd0bc23c8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ctor-with-function.ts @@ -0,0 +1,11 @@ +//const obj = new Pool(() => ({ subModel: null!, passIdx: -1, dynamicOffsets: [], lights: [] }), 16); +class Klass { + func: () => any; + num: number; + constructor(f: () => any, n: number) { + this.func = f; + this.num = n; + } +} +const obj = new Klass(() => ({ n: 1 }), 16); +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/ctor-with-function.ts.result b/src/MapleFE/test/typescript/unit_tests/ctor-with-function.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3a5f7caf3a3e905858b6f3eb3cf55d534f295c90 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ctor-with-function.ts.result @@ -0,0 +1,21 @@ +Matched 42 tokens. +Matched 62 tokens. +Matched 69 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + func num + Instance Initializer: + Constructors: + constructor (f,n) throws: + this.func Assign f + this.num Assign n + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_const Decl: obj=new Klass(() -> {n:1},16) +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/ctor-with-function2.ts b/src/MapleFE/test/typescript/unit_tests/ctor-with-function2.ts new file mode 100644 index 0000000000000000000000000000000000000000..d05c98b56d6a49618409cd6042c58978fea10ea3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ctor-with-function2.ts @@ -0,0 +1,10 @@ +class Klass { + func: () => any; + num: number; + constructor(f: () => any, n: number) { + this.func = f; + this.num = n; + } +} +const obj = new Klass(() => { n: 1 }, 16); +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/ctor-with-function2.ts.result b/src/MapleFE/test/typescript/unit_tests/ctor-with-function2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ec43f0446f217e607308eaf652d9b396185b269e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ctor-with-function2.ts.result @@ -0,0 +1,21 @@ +Matched 42 tokens. +Matched 60 tokens. +Matched 67 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + func num + Instance Initializer: + Constructors: + constructor (f,n) throws: + this.func Assign f + this.num Assign n + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_const Decl: obj=new Klass(() -> {n:1},16) +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/debugger.ts b/src/MapleFE/test/typescript/unit_tests/debugger.ts new file mode 100644 index 0000000000000000000000000000000000000000..6131fe0b02277946b4b75a16da0e37a498da8959 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/debugger.ts @@ -0,0 +1,3 @@ +if (true) { + debugger; +} diff --git a/src/MapleFE/test/typescript/unit_tests/debugger.ts.result b/src/MapleFE/test/typescript/unit_tests/debugger.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8aedf7ee087ab5ddb5ef28cf23a4ff28b5d089a5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/debugger.ts.result @@ -0,0 +1,8 @@ +Matched 8 tokens. +============= Module =========== +== Sub Tree == +cond-branch cond:true +true branch : + debugger +false branch : + diff --git a/src/MapleFE/test/typescript/unit_tests/declare-class-with-ctor.ts b/src/MapleFE/test/typescript/unit_tests/declare-class-with-ctor.ts new file mode 100644 index 0000000000000000000000000000000000000000..72198abe66005438467d2f1bc5353b2aeeb465a1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-class-with-ctor.ts @@ -0,0 +1,8 @@ +declare class Klass { + constructor (name: string) + name: string; +} +declare class Klass2 { + constructor (name: string); + name: string; +} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-class-with-ctor.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-class-with-ctor.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..36dd5d810502e717caa8e6933edbd9b29e67faa2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-class-with-ctor.ts.result @@ -0,0 +1,25 @@ +Matched 15 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +declare class Klass + Fields: + name + Instance Initializer: + Constructors: + constructor (name) throws: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +declare class Klass2 + Fields: + name + Instance Initializer: + Constructors: + constructor (name) throws: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/declare-class.ts b/src/MapleFE/test/typescript/unit_tests/declare-class.ts new file mode 100644 index 0000000000000000000000000000000000000000..27a8f6bbd9af48aee64a160e71944ab2155b04a6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-class.ts @@ -0,0 +1,3 @@ +export declare class Klass { + obj: Object; +} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-class.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-class.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c76ea7abe40e06fe503f4fd97f89177e328b861d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-class.ts.result @@ -0,0 +1,12 @@ +Matched 10 tokens. +============= Module =========== +== Sub Tree == +export {declare class Klass + Fields: + obj + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: +} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-const.ts b/src/MapleFE/test/typescript/unit_tests/declare-const.ts new file mode 100644 index 0000000000000000000000000000000000000000..7e6a6408d6e237f09d26567cc0a6353e0fcd734f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-const.ts @@ -0,0 +1 @@ +declare const x: any; diff --git a/src/MapleFE/test/typescript/unit_tests/declare-const.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-const.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..66fb19511fe51abeb85b400a83ec8720eb9efd82 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-const.ts.result @@ -0,0 +1,4 @@ +Matched 6 tokens. +============= Module =========== +== Sub Tree == +declare js_const Decl: x diff --git a/src/MapleFE/test/typescript/unit_tests/declare-global.ts b/src/MapleFE/test/typescript/unit_tests/declare-global.ts new file mode 100644 index 0000000000000000000000000000000000000000..7aaeaa55cd472a2f79fbc1b88a9af9415926a914 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-global.ts @@ -0,0 +1,5 @@ +// declare an interface in the global scope +declare global { + interface IFace { name: string; } +} +export var obj: IFace = { name: "abc" }; diff --git a/src/MapleFE/test/typescript/unit_tests/declare-global.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-global.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..83c9e73a4321b788057672bccc87565eacbd3e68 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-global.ts.result @@ -0,0 +1,7 @@ +Matched 12 tokens. +Matched 24 tokens. +============= Module =========== +== Sub Tree == +declare ts_interface: IFace {name } +== Sub Tree == +export {js_var Decl: obj= {name:"abc"}} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-global2.ts b/src/MapleFE/test/typescript/unit_tests/declare-global2.ts new file mode 100644 index 0000000000000000000000000000000000000000..ae33b890770a09d1deeb7d5980b37794a9cb074a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-global2.ts @@ -0,0 +1,6 @@ +export interface IFace { } + +declare global { + const flag1: boolean; + const flag2: boolean; +} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-global2.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-global2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..28811e1ed540364440cad01e257bf91dedc0ac8a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-global2.ts.result @@ -0,0 +1,7 @@ +Matched 5 tokens. +Matched 19 tokens. +============= Module =========== +== Sub Tree == +export {ts_interface: IFace { }} +== Sub Tree == +declare js_const Decl: flag1js_const Decl: flag2 diff --git a/src/MapleFE/test/typescript/unit_tests/declare-interface.ts b/src/MapleFE/test/typescript/unit_tests/declare-interface.ts new file mode 100644 index 0000000000000000000000000000000000000000..060b2debb46a0a15920a4e5f29a9dcd5f1f28d5c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-interface.ts @@ -0,0 +1,4 @@ +declare interface IFace { + readonly n: number; + [index: number]: T; +} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-interface.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-interface.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a8cc988e84ff89312aaa153d057346772352ffbe --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-interface.ts.result @@ -0,0 +1,4 @@ +Matched 21 tokens. +============= Module =========== +== Sub Tree == +declare ts_interface: IFace {numeric index type: Tn } diff --git a/src/MapleFE/test/typescript/unit_tests/declare-module.ts b/src/MapleFE/test/typescript/unit_tests/declare-module.ts new file mode 100644 index 0000000000000000000000000000000000000000..85bf919ac9bb5130c82f5e667f7b08351e746162 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-module.ts @@ -0,0 +1,3 @@ +declare module Module { + function func(s: string): string; +} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-module.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-module.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..91105793f7e036902b7a378d583bcc2b551d6b02 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-module.ts.result @@ -0,0 +1,8 @@ +Matched 15 tokens. +============= Module =========== +== Sub Tree == +declare ============= Module =========== +== Sub Tree == +func func(s) throws: + + diff --git a/src/MapleFE/test/typescript/unit_tests/declare-module2.ts b/src/MapleFE/test/typescript/unit_tests/declare-module2.ts new file mode 100644 index 0000000000000000000000000000000000000000..be0850e00dc80128aecea7f16248ddf395db0da6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-module2.ts @@ -0,0 +1,4 @@ +declare module "FuncModule" { + const func: () => boolean; + export = func; +} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-module2.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-module2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f033bda5aa548b7d452da14a315493b66c978793 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-module2.ts.result @@ -0,0 +1,9 @@ +Matched 17 tokens. +============= Module =========== +== Sub Tree == +declare ============= Module =========== +== Sub Tree == +js_const Decl: func +== Sub Tree == +export { SINGLE func} + diff --git a/src/MapleFE/test/typescript/unit_tests/declare-module3.ts b/src/MapleFE/test/typescript/unit_tests/declare-module3.ts new file mode 100644 index 0000000000000000000000000000000000000000..c637d62d2f948ec9afe5a190f08889e38ec31158 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-module3.ts @@ -0,0 +1 @@ +declare module "mod/runtime" {} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-module3.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-module3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..de33fd5c99bb131cbcd4bf76152e63a0b73c3e80 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-module3.ts.result @@ -0,0 +1,5 @@ +Matched 5 tokens. +============= Module =========== +== Sub Tree == +declare ============= Module =========== + diff --git a/src/MapleFE/test/typescript/unit_tests/declare-module4.ts b/src/MapleFE/test/typescript/unit_tests/declare-module4.ts new file mode 100644 index 0000000000000000000000000000000000000000..931c5ee5519199619f90912bc78c8154bca0b814 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-module4.ts @@ -0,0 +1,8 @@ +export var x: number; +declare global { + module Module { + interface IFace { + expect: string; + } + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-module4.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-module4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5a444572ab6cbf9a1af96a66f9729b7f5440adc9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-module4.ts.result @@ -0,0 +1,10 @@ +Matched 6 tokens. +Matched 22 tokens. +============= Module =========== +== Sub Tree == +export {js_var Decl: x} +== Sub Tree == +declare ============= Module =========== +== Sub Tree == +ts_interface: IFace {expect } + diff --git a/src/MapleFE/test/typescript/unit_tests/declare-namespace.ts b/src/MapleFE/test/typescript/unit_tests/declare-namespace.ts new file mode 100644 index 0000000000000000000000000000000000000000..d5204a4e29c0928d15c36a7ded51a62d8264609d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-namespace.ts @@ -0,0 +1,9 @@ +export function func(opt?: func.Opts) { + opt = opt || {}; +} + +export declare namespace func { + export interface Opts { + debug?: boolean; + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-namespace.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-namespace.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..eeca548ef01d5c27ef2dda4be85fb6397e5024c7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-namespace.ts.result @@ -0,0 +1,11 @@ +Matched 20 tokens. +Matched 36 tokens. +============= Module =========== +== Sub Tree == +export {func func(opt?) throws: + opt Assign opt Lor {} +} +== Sub Tree == +export {declare namespace func + export {ts_interface: Opts {debug? }} +} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-namespace2.ts b/src/MapleFE/test/typescript/unit_tests/declare-namespace2.ts new file mode 100644 index 0000000000000000000000000000000000000000..d8d87c5b390936a736cd477c1794dff509f3d231 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-namespace2.ts @@ -0,0 +1 @@ +declare namespace NS {} diff --git a/src/MapleFE/test/typescript/unit_tests/declare-namespace2.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-namespace2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..681699f524d5d233a2feff9d5af1adbd54a40427 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-namespace2.ts.result @@ -0,0 +1,5 @@ +Matched 5 tokens. +============= Module =========== +== Sub Tree == +declare namespace NS + diff --git a/src/MapleFE/test/typescript/unit_tests/declare-var.ts b/src/MapleFE/test/typescript/unit_tests/declare-var.ts new file mode 100644 index 0000000000000000000000000000000000000000..bcf2fa2a5ee28208c9c68fb04950c4003b82fcb0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-var.ts @@ -0,0 +1 @@ +declare var x: number; diff --git a/src/MapleFE/test/typescript/unit_tests/declare-var.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-var.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..707d1adb0c6a26a07d99595f40cb86d5bd00d667 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-var.ts.result @@ -0,0 +1,4 @@ +Matched 6 tokens. +============= Module =========== +== Sub Tree == +declare js_var Decl: x diff --git a/src/MapleFE/test/typescript/unit_tests/declare-var2.ts b/src/MapleFE/test/typescript/unit_tests/declare-var2.ts new file mode 100644 index 0000000000000000000000000000000000000000..7814fc2d7009074f76edbbcda8d1485b4dda6b21 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-var2.ts @@ -0,0 +1 @@ +declare var v: JSON; diff --git a/src/MapleFE/test/typescript/unit_tests/declare-var2.ts.result b/src/MapleFE/test/typescript/unit_tests/declare-var2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..87a5be12d2f9c654e9e232e54d6e857b716f706d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/declare-var2.ts.result @@ -0,0 +1,4 @@ +Matched 6 tokens. +============= Module =========== +== Sub Tree == +declare js_var Decl: v diff --git a/src/MapleFE/test/typescript/unit_tests/deco-module.ts b/src/MapleFE/test/typescript/unit_tests/deco-module.ts new file mode 100644 index 0000000000000000000000000000000000000000..299de200d618a6e79b5f819d838ac09d006ee326 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/deco-module.ts @@ -0,0 +1,5 @@ +export function prop_deco(msg: string) { + return function (target: any, name: string) { + console.log("Accessed", name, msg, target); + }; +} diff --git a/src/MapleFE/test/typescript/unit_tests/deco-module.ts.result b/src/MapleFE/test/typescript/unit_tests/deco-module.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..726b8d368545a27168f9496a7f13290e26d0a749 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/deco-module.ts.result @@ -0,0 +1,8 @@ +Matched 37 tokens. +============= Module =========== +== Sub Tree == +export {func prop_deco(msg) throws: + return func (target,name) throws: + console.log("Accessed",name,msg,target) + +} diff --git a/src/MapleFE/test/typescript/unit_tests/default-prop.ts b/src/MapleFE/test/typescript/unit_tests/default-prop.ts new file mode 100644 index 0000000000000000000000000000000000000000..2f78feb3c49a972a8426ecac3a2f41a050c1cabb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/default-prop.ts @@ -0,0 +1,4 @@ +const obj = { + default: {}, +}; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/default-prop.ts.result b/src/MapleFE/test/typescript/unit_tests/default-prop.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3a075fd8c45ec326c8d8352eccd399c55a7d1745 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/default-prop.ts.result @@ -0,0 +1,7 @@ +Matched 11 tokens. +Matched 18 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {default: {}} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/define-prop-get-set.ts b/src/MapleFE/test/typescript/unit_tests/define-prop-get-set.ts new file mode 100644 index 0000000000000000000000000000000000000000..a99c1546b9bbfb138839726cc96a27d95a89382f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/define-prop-get-set.ts @@ -0,0 +1,16 @@ +var obj = { x: "abc", name: "" }; + +Object.defineProperty(obj, "name", { + get(this) { + console.log("Return obj.x"); + return this["x"]; + }, + set(this, val: any) { + console.log(`Set obj.x to '${val}'`); + this["x"] = val; + }, + enumerable: false, +}); + +obj.name = "def"; +console.log(obj, obj.name); diff --git a/src/MapleFE/test/typescript/unit_tests/define-prop-get-set.ts.result b/src/MapleFE/test/typescript/unit_tests/define-prop-get-set.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..180a0342af27f281ec3ae9cad50d23c2a111b34b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/define-prop-get-set.ts.result @@ -0,0 +1,20 @@ +Matched 13 tokens. +Matched 74 tokens. +Matched 80 tokens. +Matched 91 tokens. +Matched 92 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: obj= {x:"abc", name:""} +== Sub Tree == +Object.defineProperty(obj,"name", {get:func get(this) throws: + console.log("Return obj.x") + return this["x"] +, set:func set(this,val) throws: + console.log( template-literal: "Set obj.x to '",val,"'",NULL) + this["x"] Assign val +, enumerable:false}) +== Sub Tree == +obj.name Assign "def" +== Sub Tree == +console.log(obj,obj.name) diff --git a/src/MapleFE/test/typescript/unit_tests/delete-func.ts b/src/MapleFE/test/typescript/unit_tests/delete-func.ts new file mode 100644 index 0000000000000000000000000000000000000000..a5e6cf1014b2bfb358fb561639342da827b5a9a7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/delete-func.ts @@ -0,0 +1,8 @@ +class Klass { + public delete(s: string) { + console.log("delete " + s); + } +} + +var obj: Klass = new Klass(); +obj.delete("key"); diff --git a/src/MapleFE/test/typescript/unit_tests/delete-func.ts.result b/src/MapleFE/test/typescript/unit_tests/delete-func.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..51eeece4050078672c37fa0780d657d6b6820b5b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/delete-func.ts.result @@ -0,0 +1,20 @@ +Matched 22 tokens. +Matched 32 tokens. +Matched 39 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + func delete(s) throws: + console.log("delete " Add s) + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj.delete("key") diff --git a/src/MapleFE/test/typescript/unit_tests/delete.ts b/src/MapleFE/test/typescript/unit_tests/delete.ts new file mode 100644 index 0000000000000000000000000000000000000000..0af59349c38d6457823f8186a075bb398696b6f4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/delete.ts @@ -0,0 +1,5 @@ +class A { + a?: number = 0; +} +var x: A = new A(); +delete x.a; diff --git a/src/MapleFE/test/typescript/unit_tests/delete.ts.result b/src/MapleFE/test/typescript/unit_tests/delete.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..50d626c96650493b032c8df2d858fffff665357d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/delete.ts.result @@ -0,0 +1,18 @@ +Matched 11 tokens. +Matched 21 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + a?=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: x=new A() +== Sub Tree == + delete x.a diff --git a/src/MapleFE/test/typescript/unit_tests/direct-fd.ts b/src/MapleFE/test/typescript/unit_tests/direct-fd.ts new file mode 100644 index 0000000000000000000000000000000000000000..40cbd1365d05c7c731f74fd5087e20a48b443ddd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/direct-fd.ts @@ -0,0 +1,8 @@ +class Foo { + public f1: number = 0; +} + +var bar : Foo = new Foo; +bar.f1 = 456; +console.log(bar); + diff --git a/src/MapleFE/test/typescript/unit_tests/direct-fd.ts.result b/src/MapleFE/test/typescript/unit_tests/direct-fd.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..005189032378f76e6db9609a96ff8b76934a182b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/direct-fd.ts.result @@ -0,0 +1,21 @@ +Matched 11 tokens. +Matched 19 tokens. +Matched 25 tokens. +Matched 32 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + f1=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: bar=new Foo() +== Sub Tree == +bar.f1 Assign 456 +== Sub Tree == +console.log(bar) diff --git a/src/MapleFE/test/typescript/unit_tests/do-while-stmt.ts b/src/MapleFE/test/typescript/unit_tests/do-while-stmt.ts new file mode 100644 index 0000000000000000000000000000000000000000..9c119e130d4a78251f48c1d8ea218f0f412fb8c5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/do-while-stmt.ts @@ -0,0 +1,5 @@ +var k: number = 1; +do { + console.log(k); + k++; +} while (k < 10); diff --git a/src/MapleFE/test/typescript/unit_tests/do-while-stmt.ts.result b/src/MapleFE/test/typescript/unit_tests/do-while-stmt.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..200759d76d99509b98327e1470263451e213cebd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/do-while-stmt.ts.result @@ -0,0 +1,10 @@ +Matched 7 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: k=1 +== Sub Tree == +do console.log(k) + k Inc + +while k LT 10 diff --git a/src/MapleFE/test/typescript/unit_tests/dynamic-import.ts b/src/MapleFE/test/typescript/unit_tests/dynamic-import.ts new file mode 100644 index 0000000000000000000000000000000000000000..cd79bcfcd82568e002d54c387a4e329bbd542aeb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/dynamic-import.ts @@ -0,0 +1,2 @@ +const x: { f: typeof import("./M") } = {} as any; +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/dynamic-import.ts.result b/src/MapleFE/test/typescript/unit_tests/dynamic-import.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ed15e7f81b3b3cda11b07f941dcaa9693cd01bc5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/dynamic-import.ts.result @@ -0,0 +1,7 @@ +Matched 18 tokens. +Matched 25 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: x= {} +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..dd9965a57cfc8382a32c2ae6a50b371fa371ef6d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts @@ -0,0 +1,6 @@ +class Klass { + else: number = 0; +} + +var obj: Klass = new Klass(); +console.log(obj, obj.else); diff --git a/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b14f7e7d22946f638f60fdd316acf89110a295ee --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/else-as-prop-name.ts.result @@ -0,0 +1,18 @@ +Matched 10 tokens. +Matched 20 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + else=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj,obj.else) diff --git a/src/MapleFE/test/typescript/unit_tests/empty-func.ts b/src/MapleFE/test/typescript/unit_tests/empty-func.ts new file mode 100644 index 0000000000000000000000000000000000000000..5ec57fbdc559363f006b0ec83652cbd4f3b3f9d4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/empty-func.ts @@ -0,0 +1 @@ +function func() {} diff --git a/src/MapleFE/test/typescript/unit_tests/empty-func.ts.result b/src/MapleFE/test/typescript/unit_tests/empty-func.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..159c2abca77a363e9b562cdb2ddab1f732415556 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/empty-func.ts.result @@ -0,0 +1,5 @@ +Matched 6 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + diff --git a/src/MapleFE/test/typescript/unit_tests/enum-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/enum-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..413d5b35d0e89424ac23ff66e6a4efd6189b61dc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum-as-prop-name.ts @@ -0,0 +1,3 @@ +interface IFace { + enum?: string[]; +} diff --git a/src/MapleFE/test/typescript/unit_tests/enum-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/enum-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b672b9baab75017a47b4172777339453b4c412e5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum-as-prop-name.ts.result @@ -0,0 +1,4 @@ +Matched 11 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {enum? } diff --git a/src/MapleFE/test/typescript/unit_tests/enum-function-prop.ts b/src/MapleFE/test/typescript/unit_tests/enum-function-prop.ts new file mode 100644 index 0000000000000000000000000000000000000000..7ae2919fc6f049c73b137475db570fa7db557201 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum-function-prop.ts @@ -0,0 +1,5 @@ +enum E { + function = "function" +} + +console.log(E.function); diff --git a/src/MapleFE/test/typescript/unit_tests/enum-function-prop.ts.result b/src/MapleFE/test/typescript/unit_tests/enum-function-prop.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f5d20c02151036ace2241ae8da485a3176253572 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum-function-prop.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 16 tokens. +============= Module =========== +== Sub Tree == +ts_enum: E {function="function" } +== Sub Tree == +console.log(E.function) diff --git a/src/MapleFE/test/typescript/unit_tests/enum-function-prop2.ts b/src/MapleFE/test/typescript/unit_tests/enum-function-prop2.ts new file mode 100644 index 0000000000000000000000000000000000000000..eb2fbd1b7509805546537c8bb2a9e76bec13edcd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum-function-prop2.ts @@ -0,0 +1,5 @@ +enum E { + function = "function" +} + +type T = Array; diff --git a/src/MapleFE/test/typescript/unit_tests/enum-function-prop2.ts.result b/src/MapleFE/test/typescript/unit_tests/enum-function-prop2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5f5b36d334dc04882bcaffa84e426c82787fc0ea --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum-function-prop2.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 17 tokens. +============= Module =========== +== Sub Tree == +ts_enum: E {function="function" } +== Sub Tree == + type T = Array diff --git a/src/MapleFE/test/typescript/unit_tests/enum.ts b/src/MapleFE/test/typescript/unit_tests/enum.ts new file mode 100644 index 0000000000000000000000000000000000000000..29e2a13ca0ffa3abb8e0180b146007acae389fcd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum.ts @@ -0,0 +1,7 @@ +enum ET { + TOP = "top", + BOTTOM = "bottom", +} + +let et = ET.TOP; +console.log(et); diff --git a/src/MapleFE/test/typescript/unit_tests/enum.ts.result b/src/MapleFE/test/typescript/unit_tests/enum.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e4afff30c02415e10dfeab2420a63675b1286d5d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum.ts.result @@ -0,0 +1,10 @@ +Matched 12 tokens. +Matched 19 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +ts_enum: ET {TOP="top";BOTTOM="bottom" } +== Sub Tree == +js_let Decl: et=ET.TOP +== Sub Tree == +console.log(et) diff --git a/src/MapleFE/test/typescript/unit_tests/enum2.ts b/src/MapleFE/test/typescript/unit_tests/enum2.ts new file mode 100644 index 0000000000000000000000000000000000000000..430229eff9a22d1754690fbb81837d8989f7f88c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum2.ts @@ -0,0 +1,7 @@ +export enum ET { + TOP = 1 << 0, + BOTTOM = 1 << 1, +} + +let et = ET.TOP; +console.log(et); diff --git a/src/MapleFE/test/typescript/unit_tests/enum2.ts.result b/src/MapleFE/test/typescript/unit_tests/enum2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..60b0b04f50f3e6fa98ce66861d33f92b5ee837cd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum2.ts.result @@ -0,0 +1,10 @@ +Matched 17 tokens. +Matched 24 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +export {ts_enum: ET {TOP=1 Shl 0;BOTTOM=1 Shl 1 }} +== Sub Tree == +js_let Decl: et=ET.TOP +== Sub Tree == +console.log(et) diff --git a/src/MapleFE/test/typescript/unit_tests/enum3.ts b/src/MapleFE/test/typescript/unit_tests/enum3.ts new file mode 100644 index 0000000000000000000000000000000000000000..4e588449ceda7d493db9e7cf6d6afbb083195dcf --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum3.ts @@ -0,0 +1,3 @@ +enum E { + "" = 0, +} diff --git a/src/MapleFE/test/typescript/unit_tests/enum3.ts.result b/src/MapleFE/test/typescript/unit_tests/enum3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b9c2c344fee749b41b2484ce4597c74d1c0eb511 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum3.ts.result @@ -0,0 +1,4 @@ +Matched 8 tokens. +============= Module =========== +== Sub Tree == +ts_enum: E {"" = 0 } diff --git a/src/MapleFE/test/typescript/unit_tests/enum4.ts b/src/MapleFE/test/typescript/unit_tests/enum4.ts new file mode 100644 index 0000000000000000000000000000000000000000..7f096dc9a006ef37a5cf17f7361a2d3df629cc20 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum4.ts @@ -0,0 +1,4 @@ +export declare enum ET { + TOP = "top", + BOTTOM = "bottom", +} diff --git a/src/MapleFE/test/typescript/unit_tests/enum4.ts.result b/src/MapleFE/test/typescript/unit_tests/enum4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..701cda2962b78fae780c7c8bec9d2783dcdf9577 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/enum4.ts.result @@ -0,0 +1,4 @@ +Matched 14 tokens. +============= Module =========== +== Sub Tree == +export {declare ts_enum: ET {TOP="top";BOTTOM="bottom" }} diff --git a/src/MapleFE/test/typescript/unit_tests/exp.ts b/src/MapleFE/test/typescript/unit_tests/exp.ts new file mode 100644 index 0000000000000000000000000000000000000000..f2cade21f7e97e57720bd9f96be46380b049b6ab --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/exp.ts @@ -0,0 +1,2 @@ +var x: number = 10; +console.log(x ** (2 ** 3)); diff --git a/src/MapleFE/test/typescript/unit_tests/exp.ts.result b/src/MapleFE/test/typescript/unit_tests/exp.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ef802eb258e4623a51b6e0cedae277248f20db02 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/exp.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 20 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=10 +== Sub Tree == +console.log(x Exp 2 Exp 3) diff --git a/src/MapleFE/test/typescript/unit_tests/export-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/export-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..7d2c55a8dd22d80698da49c821b1b1e390e1dc53 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-as-prop-name.ts @@ -0,0 +1,6 @@ +interface IFace { + export() : string; +} + +var obj: IFace = { export: () => "Export" }; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/export-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/export-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..10708b76b405f378ac63a0d03d00b1a2f35e633c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-as-prop-name.ts.result @@ -0,0 +1,11 @@ +Matched 10 tokens. +Matched 24 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {func export() throws: + } +== Sub Tree == +js_var Decl: obj= {export:() -> "Export"} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/export-as-prop-name2.ts b/src/MapleFE/test/typescript/unit_tests/export-as-prop-name2.ts new file mode 100644 index 0000000000000000000000000000000000000000..c1a13a308c4f6208f4a3552767c3606bd8678b25 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-as-prop-name2.ts @@ -0,0 +1,3 @@ +declare class Klass { + export(arg: number): string; +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-as-prop-name2.ts.result b/src/MapleFE/test/typescript/unit_tests/export-as-prop-name2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5102c905c5b420e51d3ec11a98a2ec30bba2f100 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-as-prop-name2.ts.result @@ -0,0 +1,13 @@ +Matched 14 tokens. +============= Module =========== +== Sub Tree == +declare class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + func export(arg) throws: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/export-as-super.ts b/src/MapleFE/test/typescript/unit_tests/export-as-super.ts new file mode 100644 index 0000000000000000000000000000000000000000..3eb9fb29cce720932f685e6b8f16bba1d86a30ba --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-as-super.ts @@ -0,0 +1,2 @@ +declare function func(): number; +export { func as super }; diff --git a/src/MapleFE/test/typescript/unit_tests/export-as-super.ts.result b/src/MapleFE/test/typescript/unit_tests/export-as-super.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..04c1408eb3d4b3dfd856cd0f5efe2f91ddaad437 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-as-super.ts.result @@ -0,0 +1,8 @@ +Matched 8 tokens. +Matched 15 tokens. +============= Module =========== +== Sub Tree == +declare func func() throws: + +== Sub Tree == +export {func as super} diff --git a/src/MapleFE/test/typescript/unit_tests/export-class.ts b/src/MapleFE/test/typescript/unit_tests/export-class.ts new file mode 100644 index 0000000000000000000000000000000000000000..b78a31ce1715a775609bf6539dc0288ee2394ada --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-class.ts @@ -0,0 +1,9 @@ +class Foo { + public f1: number = 0; + private f2: number = 0; + constructor(a: number, b: number) { + this.f1 = a; + this.f2 = b; + } +} +export = Foo; diff --git a/src/MapleFE/test/typescript/unit_tests/export-class.ts.result b/src/MapleFE/test/typescript/unit_tests/export-class.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..63c191f909c73cf2f3f4082df34f7a8c69c08237 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-class.ts.result @@ -0,0 +1,18 @@ +Matched 42 tokens. +Matched 46 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + f1=0 f2=0 + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.f1 Assign a + this.f2 Assign b + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +export { SINGLE Foo} diff --git a/src/MapleFE/test/typescript/unit_tests/export-declare-func.ts b/src/MapleFE/test/typescript/unit_tests/export-declare-func.ts new file mode 100644 index 0000000000000000000000000000000000000000..6be8b9b40d17f990a289fea959b18af8bd5513a1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-declare-func.ts @@ -0,0 +1,2 @@ +export {}; +export declare function func(d: T): T; diff --git a/src/MapleFE/test/typescript/unit_tests/export-declare-func.ts.result b/src/MapleFE/test/typescript/unit_tests/export-declare-func.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2492af15c1738254d462e2f2b09a3d4f010bfae8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-declare-func.ts.result @@ -0,0 +1,8 @@ +Matched 4 tokens. +Matched 19 tokens. +============= Module =========== +== Sub Tree == +export +== Sub Tree == +export {declare func func(d) throws: +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-declare-func2.ts b/src/MapleFE/test/typescript/unit_tests/export-declare-func2.ts new file mode 100644 index 0000000000000000000000000000000000000000..8d8443bba14a4adae8428e7b622ef76747a4a5c9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-declare-func2.ts @@ -0,0 +1 @@ +export declare function isPromise(obj: T | Promise): obj is Promise; diff --git a/src/MapleFE/test/typescript/unit_tests/export-declare-func2.ts.result b/src/MapleFE/test/typescript/unit_tests/export-declare-func2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e0e1b6179a7ec2e907a15cc324b81d428f833719 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-declare-func2.ts.result @@ -0,0 +1,5 @@ +Matched 25 tokens. +============= Module =========== +== Sub Tree == +export {declare func isPromise(obj) throws: +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-deco.ts b/src/MapleFE/test/typescript/unit_tests/export-deco.ts new file mode 100644 index 0000000000000000000000000000000000000000..ff6123a355ac8cbb85bcbc104cb7ac865240f837 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-deco.ts @@ -0,0 +1,8 @@ +function class_deco(ctor: Function): void { + console.log("Class constructor is :", ctor); +} + +@class_deco +export class Foo { + readonly foo_var: number = 1; +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-deco.ts.result b/src/MapleFE/test/typescript/unit_tests/export-deco.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5c959e46ace88c7ac6ac00ead8c6d4533debd4e0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-deco.ts.result @@ -0,0 +1,17 @@ +Matched 20 tokens. +Matched 34 tokens. +============= Module =========== +== Sub Tree == +func class_deco(ctor) throws: + console.log("Class constructor is :",ctor) + +== Sub Tree == +export {class Foo + Fields: + foo_var=1 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-default-as.ts b/src/MapleFE/test/typescript/unit_tests/export-default-as.ts new file mode 100644 index 0000000000000000000000000000000000000000..b4a0da2eac8654d28ca6e4e4c6c49282ec3261d8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-default-as.ts @@ -0,0 +1 @@ +export { default as X } from "./M"; diff --git a/src/MapleFE/test/typescript/unit_tests/export-default-as.ts.result b/src/MapleFE/test/typescript/unit_tests/export-default-as.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3aa8d5dfc340238c8a13ef36bf8f1fd6c99be5af --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-default-as.ts.result @@ -0,0 +1,4 @@ +Matched 9 tokens. +============= Module =========== +== Sub Tree == +export {default as X} "./M" diff --git a/src/MapleFE/test/typescript/unit_tests/export-default-as2.ts b/src/MapleFE/test/typescript/unit_tests/export-default-as2.ts new file mode 100644 index 0000000000000000000000000000000000000000..497f6885918126ec8beea47e47380b8f502f5638 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-default-as2.ts @@ -0,0 +1 @@ +export { default as default } from "./M"; diff --git a/src/MapleFE/test/typescript/unit_tests/export-default-as2.ts.result b/src/MapleFE/test/typescript/unit_tests/export-default-as2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..aff9533e078dcc35dabcffafcf7ab756b6fc2e70 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-default-as2.ts.result @@ -0,0 +1,4 @@ +Matched 9 tokens. +============= Module =========== +== Sub Tree == +export {default as default} "./M" diff --git a/src/MapleFE/test/typescript/unit_tests/export-default-class.ts b/src/MapleFE/test/typescript/unit_tests/export-default-class.ts new file mode 100644 index 0000000000000000000000000000000000000000..387684c291bd160ef90a476efdc1d6aec287e6f7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-default-class.ts @@ -0,0 +1,5 @@ +export default class { + f1: number = 0; + f2: string = ""; +} + diff --git a/src/MapleFE/test/typescript/unit_tests/export-default-class.ts.result b/src/MapleFE/test/typescript/unit_tests/export-default-class.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..623d94e2a205f45382ad5cb95201b3b8dbf8bd9e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-default-class.ts.result @@ -0,0 +1,12 @@ +Matched 17 tokens. +============= Module =========== +== Sub Tree == +export { default as class + Fields: + f1=0 f2="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-default-function.ts b/src/MapleFE/test/typescript/unit_tests/export-default-function.ts new file mode 100644 index 0000000000000000000000000000000000000000..0ee9984fb9fd9147ff9fc7f5a253fb2c97c7c555 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-default-function.ts @@ -0,0 +1,3 @@ +export default function (arg: T): T[] { + return [arg]; +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-default-function.ts.result b/src/MapleFE/test/typescript/unit_tests/export-default-function.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..421d240a1b0378622e3f2af2013cf207c240b702 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-default-function.ts.result @@ -0,0 +1,6 @@ +Matched 22 tokens. +============= Module =========== +== Sub Tree == +export { default as func (arg) throws: + return [arg] +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-func-as-import.ts b/src/MapleFE/test/typescript/unit_tests/export-func-as-import.ts new file mode 100644 index 0000000000000000000000000000000000000000..93c7e958e8c08c795b8787ff8b80ac3b156fd671 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-func-as-import.ts @@ -0,0 +1,4 @@ +function func(n: string, o?: string): Promise { + return new Promise((resolve, reject) => { resolve("OK"); }); +} +export { func as import }; diff --git a/src/MapleFE/test/typescript/unit_tests/export-func-as-import.ts.result b/src/MapleFE/test/typescript/unit_tests/export-func-as-import.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c42ee9868155c334f0f8a9c5302c15a2241bdcba --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-func-as-import.ts.result @@ -0,0 +1,10 @@ +Matched 41 tokens. +Matched 48 tokens. +============= Module =========== +== Sub Tree == +func func(n,o?) throws: + return new Promise((resolve,reject) -> resolve("OK") +) + +== Sub Tree == +export {func as import} diff --git a/src/MapleFE/test/typescript/unit_tests/export-import.ts b/src/MapleFE/test/typescript/unit_tests/export-import.ts new file mode 100644 index 0000000000000000000000000000000000000000..f8311317972b4b0ffe51e39bbe51c6bc46239826 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-import.ts @@ -0,0 +1,4 @@ +import * as M from "./M"; +declare namespace NS { + export import MM = M; +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-import.ts.result b/src/MapleFE/test/typescript/unit_tests/export-import.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8660fdc0bb88ec7ef9cc0d9b31c225e22d557c2d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-import.ts.result @@ -0,0 +1,9 @@ +Matched 7 tokens. +Matched 18 tokens. +============= Module =========== +== Sub Tree == +import { * as M} "./M" +== Sub Tree == +declare namespace NS + export {import {M as MM} } + diff --git a/src/MapleFE/test/typescript/unit_tests/export-import2.ts b/src/MapleFE/test/typescript/unit_tests/export-import2.ts new file mode 100644 index 0000000000000000000000000000000000000000..483b9ad6397ec420f302ea790aea5b0e36585dc2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-import2.ts @@ -0,0 +1,5 @@ +import myX, * as M from "./M"; +export import getx = M.getx; +console.log(myX, getx()); +M.setx(3); +console.log(myX, getx()); diff --git a/src/MapleFE/test/typescript/unit_tests/export-import2.ts.result b/src/MapleFE/test/typescript/unit_tests/export-import2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..bbda54c0caf3a899e3c1c39685e62a5bcb1cb1fc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-import2.ts.result @@ -0,0 +1,16 @@ +Matched 9 tokens. +Matched 17 tokens. +Matched 28 tokens. +Matched 35 tokens. +Matched 46 tokens. +============= Module =========== +== Sub Tree == +import { default as myX, * as M} "./M" +== Sub Tree == +export {import {M.getx as getx} } +== Sub Tree == +console.log(myX,getx()) +== Sub Tree == +M.setx(3) +== Sub Tree == +console.log(myX,getx()) diff --git a/src/MapleFE/test/typescript/unit_tests/export-in-namespace.ts b/src/MapleFE/test/typescript/unit_tests/export-in-namespace.ts new file mode 100644 index 0000000000000000000000000000000000000000..a2b718d2d750b1ca891a61be12c1c3e973e2023b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-in-namespace.ts @@ -0,0 +1,4 @@ +export declare namespace ns { + class Klass {} + export { Klass }; +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-in-namespace.ts.result b/src/MapleFE/test/typescript/unit_tests/export-in-namespace.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5f86a5325c1fe955130e409e4889f17b956c1a6f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-in-namespace.ts.result @@ -0,0 +1,15 @@ +Matched 15 tokens. +============= Module =========== +== Sub Tree == +export {declare namespace ns + class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + + export {Klass} +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-interface.ts b/src/MapleFE/test/typescript/unit_tests/export-interface.ts new file mode 100644 index 0000000000000000000000000000000000000000..8fcaaf808f30dcdc7ff88153b9b128f611667e86 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-interface.ts @@ -0,0 +1,3 @@ +export interface MapLike { + [index: string]: T; +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-interface.ts.result b/src/MapleFE/test/typescript/unit_tests/export-interface.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7085ccc02b9c0e0b5f72fc9449009a10f06646b7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-interface.ts.result @@ -0,0 +1,4 @@ +Matched 16 tokens. +============= Module =========== +== Sub Tree == +export {ts_interface: MapLike {string index type: T }} diff --git a/src/MapleFE/test/typescript/unit_tests/export-module.d.ts b/src/MapleFE/test/typescript/unit_tests/export-module.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..7ed719cbd4202180d58496739f3a377fe060545b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-module.d.ts @@ -0,0 +1,5 @@ +declare module "export-module" { + export module Module { + const func: () => boolean; + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/export-module.d.ts.result b/src/MapleFE/test/typescript/unit_tests/export-module.d.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..be802a82355a77c30467f280ea39d7fb2b2cf749 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-module.d.ts.result @@ -0,0 +1,10 @@ +Matched 18 tokens. +============= Module =========== +== Sub Tree == +declare ============= Module =========== +== Sub Tree == +export {============= Module =========== +== Sub Tree == +js_const Decl: func +} + diff --git a/src/MapleFE/test/typescript/unit_tests/export-single.ts b/src/MapleFE/test/typescript/unit_tests/export-single.ts new file mode 100644 index 0000000000000000000000000000000000000000..63c0c96edfea8fd0152dc34a47713119cb6f880a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-single.ts @@ -0,0 +1,4 @@ +class Klass { + name: string = ""; +} +export = Klass; diff --git a/src/MapleFE/test/typescript/unit_tests/export-single.ts.result b/src/MapleFE/test/typescript/unit_tests/export-single.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..530373f531e6a8529f1487355532497834ed7591 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-single.ts.result @@ -0,0 +1,15 @@ +Matched 10 tokens. +Matched 14 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + name="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +export { SINGLE Klass} diff --git a/src/MapleFE/test/typescript/unit_tests/export-type.ts b/src/MapleFE/test/typescript/unit_tests/export-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..779287f898611afb4e2c780310c2dcc3e751007c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-type.ts @@ -0,0 +1,10 @@ +class Foo { + public out: number = 0; + public in: number = 0; + constructor(a: number, b: number) { + this.out = a; + this.in = b; + } +} + +export type { Foo }; diff --git a/src/MapleFE/test/typescript/unit_tests/export-type.ts.result b/src/MapleFE/test/typescript/unit_tests/export-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..fb266b095ac7a49342bbaf2b55163d9f00e9fb03 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-type.ts.result @@ -0,0 +1,18 @@ +Matched 42 tokens. +Matched 48 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + out=0 in=0 + Instance Initializer: + Constructors: + constructor (a,b) throws: + this.out Assign a + this.in Assign b + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +export type {Foo} diff --git a/src/MapleFE/test/typescript/unit_tests/export-type2.ts b/src/MapleFE/test/typescript/unit_tests/export-type2.ts new file mode 100644 index 0000000000000000000000000000000000000000..5b131b5b183d860a60ca338472abd67c8f9bb4a2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-type2.ts @@ -0,0 +1 @@ +export type { Foo } from "./export-type"; diff --git a/src/MapleFE/test/typescript/unit_tests/export-type2.ts.result b/src/MapleFE/test/typescript/unit_tests/export-type2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..efee0ee0257e1c87546028bec8b3eb5ae44298f0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-type2.ts.result @@ -0,0 +1,4 @@ +Matched 8 tokens. +============= Module =========== +== Sub Tree == +export type {Foo} "./export-type" diff --git a/src/MapleFE/test/typescript/unit_tests/export-type3.ts b/src/MapleFE/test/typescript/unit_tests/export-type3.ts new file mode 100644 index 0000000000000000000000000000000000000000..d3c109ca03bbf74654d82a323bd8da4cd14d6ec2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-type3.ts @@ -0,0 +1,4 @@ +export type Type += Pick> +& Partial> extends infer U ? {[KT in keyof U]: U[KT]} : never; + diff --git a/src/MapleFE/test/typescript/unit_tests/export-type3.ts.result b/src/MapleFE/test/typescript/unit_tests/export-type3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a77c87d926640d0525ca1f33f0cb213be7d25c8a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/export-type3.ts.result @@ -0,0 +1,4 @@ +Matched 55 tokens. +============= Module =========== +== Sub Tree == +export { type Type = intersect = Pick> & Partial> extends infer U ? {[KT in keyof U] : U[KT] } : never} diff --git a/src/MapleFE/test/typescript/unit_tests/extends-infer.ts b/src/MapleFE/test/typescript/unit_tests/extends-infer.ts new file mode 100644 index 0000000000000000000000000000000000000000..c02b328cff278ec8ab93e9ce99aadbabb040c6d1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/extends-infer.ts @@ -0,0 +1,3 @@ +export declare type NT = { + [K in keyof T]: NonNullable extends boolean ? K : never; + } extends { [_ in keyof T]: infer U; } ? U : never; diff --git a/src/MapleFE/test/typescript/unit_tests/extends-infer.ts.result b/src/MapleFE/test/typescript/unit_tests/extends-infer.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..d674cbdb1ff8292e64d108b9e6c1f9f15ec34943 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/extends-infer.ts.result @@ -0,0 +1,4 @@ +Matched 49 tokens. +============= Module =========== +== Sub Tree == +export {declare type NT = {[K in keyof T] : NonNullable extends boolean ? K : never } extends {[_ in keyof T] : infer U } ? U : never} diff --git a/src/MapleFE/test/typescript/unit_tests/fibonacci.ts b/src/MapleFE/test/typescript/unit_tests/fibonacci.ts new file mode 100644 index 0000000000000000000000000000000000000000..2ac2de9082600be3198b3bd64a0e6b418f7b6d47 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/fibonacci.ts @@ -0,0 +1,16 @@ +function fibonacci(m: number) { + var f0: number = 0; + var f1: number = 1; + var f2: number | undefined; + var i: number; + if (m <= 1) { + return m; + } else { + for (i = 2; i <= m; i++) { + f2 = f0 + f1; + f0 = f1; + f1 = f2; + } + return f2; + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/fibonacci.ts.result b/src/MapleFE/test/typescript/unit_tests/fibonacci.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..bf4d4d25ac1bc77785f201d751b2faa2b065761e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/fibonacci.ts.result @@ -0,0 +1,20 @@ +Matched 81 tokens. +============= Module =========== +== Sub Tree == +func fibonacci(m) throws: + js_var Decl: f0=0 + js_var Decl: f1=1 + js_var Decl: f2 + js_var Decl: i + cond-branch cond:m LE 1 + true branch : + return m + false branch : + for ( ) + f2 Assign f0 Add f1 + f0 Assign f1 + f1 Assign f2 + + return f2 + + diff --git a/src/MapleFE/test/typescript/unit_tests/field-func.ts b/src/MapleFE/test/typescript/unit_tests/field-func.ts new file mode 100644 index 0000000000000000000000000000000000000000..b87a09fe7b0d98a4fe9ef95dc8845e1267ac73da --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/field-func.ts @@ -0,0 +1,2 @@ +interface cs {} +declare const calls: { (): cs[] }; diff --git a/src/MapleFE/test/typescript/unit_tests/field-func.ts.result b/src/MapleFE/test/typescript/unit_tests/field-func.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c00be998c394437e2dc100d3e1ee49916a116768 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/field-func.ts.result @@ -0,0 +1,7 @@ +Matched 4 tokens. +Matched 17 tokens. +============= Module =========== +== Sub Tree == +ts_interface: cs { } +== Sub Tree == +declare js_const Decl: calls diff --git a/src/MapleFE/test/typescript/unit_tests/for-in-stmt-1.ts b/src/MapleFE/test/typescript/unit_tests/for-in-stmt-1.ts new file mode 100644 index 0000000000000000000000000000000000000000..6b553722369e3af586716557a82694d326cc3475 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-in-stmt-1.ts @@ -0,0 +1,10 @@ +class Klass { + [key: string]: number | string; + x: number = 0; + s: string = ""; +} + +var obj: Klass = { x: 1, s: "123" }; +for (const k in obj) { + console.log(k, obj[k]); +} diff --git a/src/MapleFE/test/typescript/unit_tests/for-in-stmt-1.ts.result b/src/MapleFE/test/typescript/unit_tests/for-in-stmt-1.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..637f2e422c010a68a832b49954ebfff6bf321a0f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-in-stmt-1.ts.result @@ -0,0 +1,20 @@ +Matched 26 tokens. +Matched 41 tokens. +Matched 62 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + union = number | string x=0 s="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj= {x:1, s:"123"} +== Sub Tree == +for ( ) + console.log(k,obj[k]) + diff --git a/src/MapleFE/test/typescript/unit_tests/for-in-stmt.ts b/src/MapleFE/test/typescript/unit_tests/for-in-stmt.ts new file mode 100644 index 0000000000000000000000000000000000000000..0cad8e5fa5a9e32ed4a643e90d0aca6abcaab63a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-in-stmt.ts @@ -0,0 +1,10 @@ +class Klass { + [key: string]: number | string; + x: number = 0; + s: string = ""; +} + +var obj: Klass = { x: 1, s: "123" }; +for (var k in obj) { + console.log(k, obj[k]); +} diff --git a/src/MapleFE/test/typescript/unit_tests/for-in-stmt.ts.result b/src/MapleFE/test/typescript/unit_tests/for-in-stmt.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..637f2e422c010a68a832b49954ebfff6bf321a0f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-in-stmt.ts.result @@ -0,0 +1,20 @@ +Matched 26 tokens. +Matched 41 tokens. +Matched 62 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + union = number | string x=0 s="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj= {x:1, s:"123"} +== Sub Tree == +for ( ) + console.log(k,obj[k]) + diff --git a/src/MapleFE/test/typescript/unit_tests/for-let.ts b/src/MapleFE/test/typescript/unit_tests/for-let.ts new file mode 100644 index 0000000000000000000000000000000000000000..4f9c93f7a61ccea17d0d732e6f0665014e94607e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-let.ts @@ -0,0 +1,3 @@ +for (let i = 0; i < 5; i++) { + console.log(i); +} diff --git a/src/MapleFE/test/typescript/unit_tests/for-let.ts.result b/src/MapleFE/test/typescript/unit_tests/for-let.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8283bf2da4b2f1e9736efff2db680204a4dd84f2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-let.ts.result @@ -0,0 +1,6 @@ +Matched 23 tokens. +============= Module =========== +== Sub Tree == +for ( ) + console.log(i) + diff --git a/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits.ts b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits.ts new file mode 100644 index 0000000000000000000000000000000000000000..a02de7c5d5e568265dc1e876fa8c6f9c4a8d0c62 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits.ts @@ -0,0 +1 @@ +for (let i = 0, j = 3; i < j; i++) console.log(i, j); diff --git a/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits.ts.result b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b27c90e9ea35dcbf7ff637525d063dc58a89791a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits.ts.result @@ -0,0 +1,5 @@ +Matched 27 tokens. +============= Module =========== +== Sub Tree == +for ( ) + console.log(i,j) diff --git a/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits2.ts b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits2.ts new file mode 100644 index 0000000000000000000000000000000000000000..998beff737361ead9b61fe174198a3f0d2c56c19 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits2.ts @@ -0,0 +1,12 @@ +class Klass { + items: number[] = []; + + func(num: number) { + let i, list; + for (i = 0, list = this.items; i < num; i++) console.log(i, list[i]); + } +} + +var obj: Klass = new Klass(); +obj.items.push(1, 2, 3, 4, 5, 6); +obj.func(6); diff --git a/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits2.ts.result b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..81669289b69486f5920246da19dffd3c8d5469d7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits2.ts.result @@ -0,0 +1,26 @@ +Matched 57 tokens. +Matched 67 tokens. +Matched 86 tokens. +Matched 93 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + items=[] + Instance Initializer: + Constructors: + Methods: + func func(num) throws: + js_let Decl: i + js_let Decl: list + for ( ) + console.log(i,list[i]) + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj.items.push(1,2,3,4,5,6) +== Sub Tree == +obj.func(6) diff --git a/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits3.ts b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits3.ts new file mode 100644 index 0000000000000000000000000000000000000000..c028beddbfe0dacdff4947a2767dbf3b8081e2d6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits3.ts @@ -0,0 +1,13 @@ +class Klass { + items: number[] = []; + + func() { + let i, list, len; + for (i = 0, list = this.items, len = this.items.length; i < len; i++) + console.log(i, list[i]); + } +} + +var obj: Klass = new Klass(); +obj.items.push(1, 2, 3, 4, 5, 6); +obj.func(); diff --git a/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits3.ts.result b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..98b323a089f1cd34e55057d364690d0dcaf8c08b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-inits3.ts.result @@ -0,0 +1,27 @@ +Matched 64 tokens. +Matched 74 tokens. +Matched 93 tokens. +Matched 99 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + items=[] + Instance Initializer: + Constructors: + Methods: + func func() throws: + js_let Decl: i + js_let Decl: list + js_let Decl: len + for ( ) + console.log(i,list[i]) + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj.items.push(1,2,3,4,5,6) +== Sub Tree == +obj.func() diff --git a/src/MapleFE/test/typescript/unit_tests/for-loop-multi-updates.ts b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-updates.ts new file mode 100644 index 0000000000000000000000000000000000000000..252807f5ccc5f4350c15a576ce8cdafab35d3ff3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-updates.ts @@ -0,0 +1 @@ +for (let i = 0, j = 3, k = 6; i < j + k; i += 3, j++, k++) console.log(i, j, k); diff --git a/src/MapleFE/test/typescript/unit_tests/for-loop-multi-updates.ts.result b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-updates.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9c170dfa5ac3ffc4970be942865299bf74b10af1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-loop-multi-updates.ts.result @@ -0,0 +1,5 @@ +Matched 42 tokens. +============= Module =========== +== Sub Tree == +for ( ) + console.log(i,j,k) diff --git a/src/MapleFE/test/typescript/unit_tests/for-of-stmt.ts b/src/MapleFE/test/typescript/unit_tests/for-of-stmt.ts new file mode 100644 index 0000000000000000000000000000000000000000..73764b9435314483f5509160d637d29883deca64 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-of-stmt.ts @@ -0,0 +1,4 @@ +var arr = [1, 2, 3]; +for (var v of arr) { + console.log(v); +} diff --git a/src/MapleFE/test/typescript/unit_tests/for-of-stmt.ts.result b/src/MapleFE/test/typescript/unit_tests/for-of-stmt.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f0611630907487a5e31026d8d58a1450f5e1844b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-of-stmt.ts.result @@ -0,0 +1,9 @@ +Matched 11 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[1,2,3] +== Sub Tree == +for ( ) + console.log(v) + diff --git a/src/MapleFE/test/typescript/unit_tests/for-switch-mixed.ts b/src/MapleFE/test/typescript/unit_tests/for-switch-mixed.ts new file mode 100644 index 0000000000000000000000000000000000000000..71d7d33cb24f5f206c189de160c9db838f66af36 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-switch-mixed.ts @@ -0,0 +1,19 @@ +const nums: number[] = [3, 9, 8, 2, 7, 5, 1, 6, 4]; + +for (const n of nums) { + switch (true) { + case n < 5: + console.log(n, " is less than 5"); + case n > 2 && n < 5: + console.log(n, " + 1 is equal to", n + 1); + break; + case n == 6: + console.log(n, " is equal to 6"); + break; + case n < 8: + console.log(n, " is greater than 4 and less than 8"); + break; + default: + console.log(n, " is greater than 7"); + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/for-switch-mixed.ts.result b/src/MapleFE/test/typescript/unit_tests/for-switch-mixed.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3d64cd1d402e63411a62f81196a0ceef9225cf64 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/for-switch-mixed.ts.result @@ -0,0 +1,10 @@ +Matched 27 tokens. +Matched 123 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: nums=[3,9,8,2,7,5,1,6,4] +== Sub Tree == +for ( ) + A switch + + diff --git a/src/MapleFE/test/typescript/unit_tests/func-builtin.ts b/src/MapleFE/test/typescript/unit_tests/func-builtin.ts new file mode 100644 index 0000000000000000000000000000000000000000..88a893009c6ae2ef31607c869f8c17f4859df287 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func-builtin.ts @@ -0,0 +1,37 @@ +interface Employee { + firstName: string; + lastName: string; +} + +let john: Employee = { + firstName: "John", + lastName: "Smith", +}; + +let jane: Employee = { + firstName: "Jane", + lastName: "Doe", +}; + +var func; +var res; + +function fullName(this: Employee): string { + return this.firstName + " " + this.lastName; +} + +// call() and apply() returns the result of calling the function directly with the obj instance arg +res = fullName.call(john); +console.log("Name: " + res); // output: "Name: John Smith" +res = fullName.call(jane); +console.log("Name: " + res); // output: "Name: Jane Doe" +res = fullName.apply(john); +console.log("Name: " + res); // output: "Name: John Smith" +res = fullName.apply(jane); +console.log("Name: " + res); // output: "Name: Jane Doe" + +// bind() returns a copy of the function bound to the obj instance arg +func = fullName.bind(john); +console.log("Name: " + func()); // output: "Name: John Smith" +func = fullName.bind(jane); +console.log("Name: " + func()); // output: "Name: Jane Doe" diff --git a/src/MapleFE/test/typescript/unit_tests/func-builtin.ts.result b/src/MapleFE/test/typescript/unit_tests/func-builtin.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..84146a9752502f9167057c6864f80c1a950d9ea7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func-builtin.ts.result @@ -0,0 +1,57 @@ +Matched 12 tokens. +Matched 28 tokens. +Matched 44 tokens. +Matched 47 tokens. +Matched 50 tokens. +Matched 72 tokens. +Matched 81 tokens. +Matched 90 tokens. +Matched 99 tokens. +Matched 108 tokens. +Matched 117 tokens. +Matched 126 tokens. +Matched 135 tokens. +Matched 144 tokens. +Matched 153 tokens. +Matched 164 tokens. +Matched 173 tokens. +Matched 184 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Employee {firstName;lastName } +== Sub Tree == +js_let Decl: john= {firstName:"John", lastName:"Smith"} +== Sub Tree == +js_let Decl: jane= {firstName:"Jane", lastName:"Doe"} +== Sub Tree == +js_var Decl: func +== Sub Tree == +js_var Decl: res +== Sub Tree == +func fullName(this) throws: + return this.firstName Add " " Add this.lastName + +== Sub Tree == +res Assign fullName.call(john) +== Sub Tree == +console.log("Name: " Add res) +== Sub Tree == +res Assign fullName.call(jane) +== Sub Tree == +console.log("Name: " Add res) +== Sub Tree == +res Assign fullName.apply(john) +== Sub Tree == +console.log("Name: " Add res) +== Sub Tree == +res Assign fullName.apply(jane) +== Sub Tree == +console.log("Name: " Add res) +== Sub Tree == +func Assign fullName.bind(john) +== Sub Tree == +console.log("Name: " Add func()) +== Sub Tree == +func Assign fullName.bind(jane) +== Sub Tree == +console.log("Name: " Add func()) diff --git a/src/MapleFE/test/typescript/unit_tests/func-call.ts b/src/MapleFE/test/typescript/unit_tests/func-call.ts new file mode 100644 index 0000000000000000000000000000000000000000..0efe45f8133a835e013384634669a76adc364c1c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func-call.ts @@ -0,0 +1,6 @@ +function func(a: number, b: number, c: number): number { + return a + b + c; +} + +var num = 10; +console.log(func(1, 5, -num)); diff --git a/src/MapleFE/test/typescript/unit_tests/func-call.ts.result b/src/MapleFE/test/typescript/unit_tests/func-call.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3b091ae645deb09f0cd555761266dede80492a0b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func-call.ts.result @@ -0,0 +1,12 @@ +Matched 26 tokens. +Matched 31 tokens. +Matched 46 tokens. +============= Module =========== +== Sub Tree == +func func(a,b,c) throws: + return a Add b Add c + +== Sub Tree == +js_var Decl: num=10 +== Sub Tree == +console.log(func(1,5,Minus num)) diff --git a/src/MapleFE/test/typescript/unit_tests/func2.ts b/src/MapleFE/test/typescript/unit_tests/func2.ts new file mode 100644 index 0000000000000000000000000000000000000000..1569fa1aae54cb54667f30504857a64e6a92150c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func2.ts @@ -0,0 +1,12 @@ +function func(this: any, arg: number) { + var x:number; + this.val1 = arg; + x = this.val1; + this.val2 = x; +} + +var a = {}; +var b = new (func as any)(456); // allocate an object and call func as constructor +func.call(a, 123); // pass an existing object to func +console.log(a); +console.log(b); diff --git a/src/MapleFE/test/typescript/unit_tests/func2.ts.result b/src/MapleFE/test/typescript/unit_tests/func2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..625e3d3318cb55f35c53863f451beff326e78473 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func2.ts.result @@ -0,0 +1,24 @@ +Matched 36 tokens. +Matched 42 tokens. +Matched 55 tokens. +Matched 64 tokens. +Matched 71 tokens. +Matched 78 tokens. +============= Module =========== +== Sub Tree == +func func(this,arg) throws: + js_var Decl: x + this.val1 Assign arg + x Assign this.val1 + this.val2 Assign x + +== Sub Tree == +js_var Decl: a= {} +== Sub Tree == +js_var Decl: b=new func(456) +== Sub Tree == +func.call(a,123) +== Sub Tree == +console.log(a) +== Sub Tree == +console.log(b) diff --git a/src/MapleFE/test/typescript/unit_tests/func3.ts b/src/MapleFE/test/typescript/unit_tests/func3.ts new file mode 100644 index 0000000000000000000000000000000000000000..193eef9e3605b7b2b8ce481abe6ee84ebb8a34d4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func3.ts @@ -0,0 +1,17 @@ +interface Foo { + val1: number; + val2: number; +}; + +function func(this: Foo , arg: number) { + var x:number; + this.val1 = arg; + x = this.val1; + this.val2 = x; +} + +var a: Foo = {val1:0, val2:0}; +var b = new (func as any)(456); // allocate an object and call func as constructor +func.call(a, 123); // pass an existing object to func +console.log(a); +console.log(b); diff --git a/src/MapleFE/test/typescript/unit_tests/func3.ts.result b/src/MapleFE/test/typescript/unit_tests/func3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a316b87d3fdb55370db56143852baa5b263dae61 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func3.ts.result @@ -0,0 +1,28 @@ +Matched 12 tokens. +Matched 13 tokens. +Matched 49 tokens. +Matched 64 tokens. +Matched 77 tokens. +Matched 86 tokens. +Matched 93 tokens. +Matched 100 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Foo {val1;val2 } +== Sub Tree == +func func(this,arg) throws: + js_var Decl: x + this.val1 Assign arg + x Assign this.val1 + this.val2 Assign x + +== Sub Tree == +js_var Decl: a= {val1:0, val2:0} +== Sub Tree == +js_var Decl: b=new func(456) +== Sub Tree == +func.call(a,123) +== Sub Tree == +console.log(a) +== Sub Tree == +console.log(b) diff --git a/src/MapleFE/test/typescript/unit_tests/func4.ts b/src/MapleFE/test/typescript/unit_tests/func4.ts new file mode 100644 index 0000000000000000000000000000000000000000..0ecb7843b2d7d2dfa13564ae4d76692782a6770f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func4.ts @@ -0,0 +1,17 @@ +class Foo { + public val1: number = 0; + public val2: number = 0; +}; + +function func(this: Foo , arg: number) { + var x:number; + this.val1 = arg; + x = this.val1; + this.val2 = x; +} + +var a = new Foo(); +var b = new (func as any)(456); // allocate an object and call func as constructor +func.call(a, 123); // pass an existing object to func +console.log(a); +console.log(b); diff --git a/src/MapleFE/test/typescript/unit_tests/func4.ts.result b/src/MapleFE/test/typescript/unit_tests/func4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..bd8ceb2fffed5b2005807c4b3e703831fcc92e33 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func4.ts.result @@ -0,0 +1,36 @@ +Matched 18 tokens. +Matched 19 tokens. +Matched 55 tokens. +Matched 63 tokens. +Matched 76 tokens. +Matched 85 tokens. +Matched 92 tokens. +Matched 99 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + val1=0 val2=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func(this,arg) throws: + js_var Decl: x + this.val1 Assign arg + x Assign this.val1 + this.val2 Assign x + +== Sub Tree == +js_var Decl: a=new Foo() +== Sub Tree == +js_var Decl: b=new func(456) +== Sub Tree == +func.call(a,123) +== Sub Tree == +console.log(a) +== Sub Tree == +console.log(b) diff --git a/src/MapleFE/test/typescript/unit_tests/func5.ts b/src/MapleFE/test/typescript/unit_tests/func5.ts new file mode 100644 index 0000000000000000000000000000000000000000..07b11f4dbdf8e1528f29059c9047d068967bb4b2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func5.ts @@ -0,0 +1,8 @@ +function func(arg: number): number { + return arg;; +} + +var a = new (func as any)(123); +console.log(a); +console.log(func(123)); + diff --git a/src/MapleFE/test/typescript/unit_tests/func5.ts.result b/src/MapleFE/test/typescript/unit_tests/func5.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..590d2ebbdbd504c9382d5447229f6987690dc0fb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/func5.ts.result @@ -0,0 +1,15 @@ +Matched 15 tokens. +Matched 28 tokens. +Matched 35 tokens. +Matched 45 tokens. +============= Module =========== +== Sub Tree == +func func(arg) throws: + return arg + +== Sub Tree == +js_var Decl: a=new func(123) +== Sub Tree == +console.log(a) +== Sub Tree == +console.log(func(123)) diff --git a/src/MapleFE/test/typescript/unit_tests/function-1.ts b/src/MapleFE/test/typescript/unit_tests/function-1.ts new file mode 100644 index 0000000000000000000000000000000000000000..cba43a07774dad3191704949e40f7d9a4f114f11 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-1.ts @@ -0,0 +1,6 @@ +function add(a: number, b: number) { + return a + b; +} + +var c = add(1, 2); +console.log(c); diff --git a/src/MapleFE/test/typescript/unit_tests/function-1.ts.result b/src/MapleFE/test/typescript/unit_tests/function-1.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..20ef4e8dbefea7b97213d5aa8ee02f751acaaa8a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-1.ts.result @@ -0,0 +1,12 @@ +Matched 18 tokens. +Matched 28 tokens. +Matched 35 tokens. +============= Module =========== +== Sub Tree == +func add(a,b) throws: + return a Add b + +== Sub Tree == +js_var Decl: c=add(1,2) +== Sub Tree == +console.log(c) diff --git a/src/MapleFE/test/typescript/unit_tests/function-2.ts b/src/MapleFE/test/typescript/unit_tests/function-2.ts new file mode 100644 index 0000000000000000000000000000000000000000..aad00728c6cfecb86c45a5fd4bfe59c4a4903fc4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-2.ts @@ -0,0 +1,7 @@ +function add(a: number, b: number) { + return a + b; +} + +// Allowed by ECMAScript 2017 +var c = add(1, 2); +console.log(c); diff --git a/src/MapleFE/test/typescript/unit_tests/function-2.ts.result b/src/MapleFE/test/typescript/unit_tests/function-2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..20ef4e8dbefea7b97213d5aa8ee02f751acaaa8a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-2.ts.result @@ -0,0 +1,12 @@ +Matched 18 tokens. +Matched 28 tokens. +Matched 35 tokens. +============= Module =========== +== Sub Tree == +func add(a,b) throws: + return a Add b + +== Sub Tree == +js_var Decl: c=add(1,2) +== Sub Tree == +console.log(c) diff --git a/src/MapleFE/test/typescript/unit_tests/function-3.ts b/src/MapleFE/test/typescript/unit_tests/function-3.ts new file mode 100644 index 0000000000000000000000000000000000000000..06a994b1570bc4fd78318043b679427c30b45647 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-3.ts @@ -0,0 +1,9 @@ +function func(x: number) { + //if (x < 0) {x = 0}; + if (x < 0) { + x = 0; + } + console.log(x); +} +func(10); +func(-10); diff --git a/src/MapleFE/test/typescript/unit_tests/function-3.ts.result b/src/MapleFE/test/typescript/unit_tests/function-3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..175ff10d9c688788dc56fce8c1a81e31675c9b06 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-3.ts.result @@ -0,0 +1,17 @@ +Matched 28 tokens. +Matched 33 tokens. +Matched 38 tokens. +============= Module =========== +== Sub Tree == +func func(x) throws: + cond-branch cond:x LT 0 + true branch : + x Assign 0 + false branch : + + console.log(x) + +== Sub Tree == +func(10) +== Sub Tree == +func(-10) diff --git a/src/MapleFE/test/typescript/unit_tests/function-arg-obj-literal.ts b/src/MapleFE/test/typescript/unit_tests/function-arg-obj-literal.ts new file mode 100644 index 0000000000000000000000000000000000000000..cedf317fd74b0158feb7c7a9285e2eea9db38f2f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-arg-obj-literal.ts @@ -0,0 +1,6 @@ +const f1: number = 1; +const f2: number = 2; + +function foo(arg: any) {} + +foo({ f1, f2 }); diff --git a/src/MapleFE/test/typescript/unit_tests/function-arg-obj-literal.ts.result b/src/MapleFE/test/typescript/unit_tests/function-arg-obj-literal.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..25f072ce4b3fb244444d4bdf15c89d7f480b9286 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-arg-obj-literal.ts.result @@ -0,0 +1,14 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 23 tokens. +Matched 32 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: f1=1 +== Sub Tree == +js_const Decl: f2=2 +== Sub Tree == +func foo(arg) throws: + +== Sub Tree == +foo( {:f1, :f2}) diff --git a/src/MapleFE/test/typescript/unit_tests/function-type.ts b/src/MapleFE/test/typescript/unit_tests/function-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..b82962914789605b311d4ca0c90c2c36af303777 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-type.ts @@ -0,0 +1,3 @@ +let f: (x: number) => number; +f = (x: number): number => x / 3; +console.log(f(6)); diff --git a/src/MapleFE/test/typescript/unit_tests/function-type.ts.result b/src/MapleFE/test/typescript/unit_tests/function-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..31a87da43ac52c1c41ce6a96becc14929bed746e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-type.ts.result @@ -0,0 +1,10 @@ +Matched 11 tokens. +Matched 25 tokens. +Matched 35 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: f +== Sub Tree == +f Assign (x) -> x Div 3 +== Sub Tree == +console.log(f(6)) diff --git a/src/MapleFE/test/typescript/unit_tests/function-with-this.ts b/src/MapleFE/test/typescript/unit_tests/function-with-this.ts new file mode 100644 index 0000000000000000000000000000000000000000..8512d316801d98d8ed90762288c959f04ea8f8d0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-with-this.ts @@ -0,0 +1,3 @@ +interface Foo { + func(callback: (this: void, v: T) => v is T, thisArg?: any): T; +} diff --git a/src/MapleFE/test/typescript/unit_tests/function-with-this.ts.result b/src/MapleFE/test/typescript/unit_tests/function-with-this.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b4073672ad44981d9989f58b6c38315bbf1f3512 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/function-with-this.ts.result @@ -0,0 +1,5 @@ +Matched 36 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Foo {func func(callback,thisArg?) throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/generate-result.sh b/src/MapleFE/test/typescript/unit_tests/generate-result.sh new file mode 100755 index 0000000000000000000000000000000000000000..8666e69ca4928442d8c44d64076f7704d59f348c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generate-result.sh @@ -0,0 +1,23 @@ +# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +#!/bin/bash +FILES=( class-implements-interface.ts construct-signature.ts interface-keyof.ts) + + +for f in ${FILES[@]} +do + echo "Generating result for $f ..." + ../../../output/typescript/bin/ts2ast $f > $f.result +done diff --git a/src/MapleFE/test/typescript/unit_tests/generator.ts b/src/MapleFE/test/typescript/unit_tests/generator.ts new file mode 100644 index 0000000000000000000000000000000000000000..6841e1c54cbd89c745f6ddd5b794b6996b931f02 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generator.ts @@ -0,0 +1,15 @@ +function* gen(n: number): Generator { + for (var i = 1; i <= n; i++) { + let res = yield i; + console.log(res); + if (res) + break; + } + return "done"; +} + +const obj: Generator = gen(10); +console.log(obj.next()); +console.log(obj.next(false)); +console.log(obj.next(true)); +console.log(obj.next()); diff --git a/src/MapleFE/test/typescript/unit_tests/generator.ts.result b/src/MapleFE/test/typescript/unit_tests/generator.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3bde9f39cf03cfd0dc24a61c026635c834548833 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generator.ts.result @@ -0,0 +1,30 @@ +Matched 57 tokens. +Matched 74 tokens. +Matched 85 tokens. +Matched 97 tokens. +Matched 109 tokens. +Matched 120 tokens. +============= Module =========== +== Sub Tree == +generator gen(n) throws: + for ( ) + js_let Decl: res=yield i + console.log(res) + cond-branch cond:res + true branch : + break: + false branch : + + + return "done" + +== Sub Tree == +js_const Decl: obj=gen(10) +== Sub Tree == +console.log(obj.next()) +== Sub Tree == +console.log(obj.next(false)) +== Sub Tree == +console.log(obj.next(true)) +== Sub Tree == +console.log(obj.next()) diff --git a/src/MapleFE/test/typescript/unit_tests/generator2.ts b/src/MapleFE/test/typescript/unit_tests/generator2.ts new file mode 100644 index 0000000000000000000000000000000000000000..d6c34586107e1575f9ac5c265d2bc9a71f34ac90 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generator2.ts @@ -0,0 +1,20 @@ +function* delegate(num: number) : Generator { + let r = yield num + 100; + return r; +} + +function* gen(n: number): Generator { + for (var i = 1; i <= n; i++) { + let res = yield* delegate(i); + console.log(res); + if (res) + break; + } + return "done"; +} + +const obj: Generator = gen(10); +console.log(obj.next()); +console.log(obj.next(false)); +console.log(obj.next(true)); +console.log(obj.next()); diff --git a/src/MapleFE/test/typescript/unit_tests/generator2.ts.result b/src/MapleFE/test/typescript/unit_tests/generator2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2ea5af65bc91b406983534dca5e4ecbd3891dff6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generator2.ts.result @@ -0,0 +1,36 @@ +Matched 26 tokens. +Matched 87 tokens. +Matched 104 tokens. +Matched 115 tokens. +Matched 127 tokens. +Matched 139 tokens. +Matched 150 tokens. +============= Module =========== +== Sub Tree == +generator delegate(num) throws: + js_let Decl: r=yield num Add 100 + return r + +== Sub Tree == +generator gen(n) throws: + for ( ) + js_let Decl: res=yield* delegate(i) + console.log(res) + cond-branch cond:res + true branch : + break: + false branch : + + + return "done" + +== Sub Tree == +js_const Decl: obj=gen(10) +== Sub Tree == +console.log(obj.next()) +== Sub Tree == +console.log(obj.next(false)) +== Sub Tree == +console.log(obj.next(true)) +== Sub Tree == +console.log(obj.next()) diff --git a/src/MapleFE/test/typescript/unit_tests/generator3.ts b/src/MapleFE/test/typescript/unit_tests/generator3.ts new file mode 100644 index 0000000000000000000000000000000000000000..bebfbb653f42a09ff2d5c5085169a6fe4247fb0d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generator3.ts @@ -0,0 +1,14 @@ +function* foo(index: number) { + while (index < 2) { + yield index; + index++; + } +} + +const iterator = foo(0); + +console.log(iterator.next().value); +// expected output: 0 + +console.log(iterator.next().value); +// expected output: 1 diff --git a/src/MapleFE/test/typescript/unit_tests/generator3.ts.result b/src/MapleFE/test/typescript/unit_tests/generator3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..dee32f9b52d6f90a88b406dcf35a2244e10976f0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generator3.ts.result @@ -0,0 +1,18 @@ +Matched 24 tokens. +Matched 32 tokens. +Matched 45 tokens. +Matched 58 tokens. +============= Module =========== +== Sub Tree == +generator foo(index) throws: + while index LT 2 yield index + index Inc + + + +== Sub Tree == +js_const Decl: iterator=foo(0) +== Sub Tree == +console.log(iterator.next().value) +== Sub Tree == +console.log(iterator.next().value) diff --git a/src/MapleFE/test/typescript/unit_tests/generator4.ts b/src/MapleFE/test/typescript/unit_tests/generator4.ts new file mode 100644 index 0000000000000000000000000000000000000000..8d61465c3a387781224ed42ecf6c87ceeff6dc51 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generator4.ts @@ -0,0 +1,25 @@ +class IterableClass { + public elements: number[]; + + constructor() { + this.elements = []; + } + + add(element:number) { + this.elements.push(element); + } + + * [Symbol.iterator]() { + let element:number; + for (element of this.elements) { + yield element; + } + } +} + +let obj = new IterableClass(); +obj.add(123); +obj.add(456); +for (let e of obj) { + console.log(e); +} diff --git a/src/MapleFE/test/typescript/unit_tests/generator4.ts.result b/src/MapleFE/test/typescript/unit_tests/generator4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b09da6af24a7c4b6cb26f6b81171329ead8cb5d8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generator4.ts.result @@ -0,0 +1,35 @@ +Matched 68 tokens. +Matched 76 tokens. +Matched 83 tokens. +Matched 90 tokens. +Matched 106 tokens. +============= Module =========== +== Sub Tree == +class IterableClass + Fields: + elements + Instance Initializer: + Constructors: + constructor () throws: + this.elements Assign [] + Methods: + func add(element) throws: + this.elements.push(element) + func () throws: + js_let Decl: element + for ( ) + yield element + + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: obj=new IterableClass() +== Sub Tree == +obj.add(123) +== Sub Tree == +obj.add(456) +== Sub Tree == +for ( ) + console.log(e) + diff --git a/src/MapleFE/test/typescript/unit_tests/generic-func-in-interface.ts b/src/MapleFE/test/typescript/unit_tests/generic-func-in-interface.ts new file mode 100644 index 0000000000000000000000000000000000000000..4f22113aacf9aea268ca30e23dc9c8a66e26c665 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-func-in-interface.ts @@ -0,0 +1,3 @@ +interface IFace { + any(arg: (T | Iterable)[] | string): Iterable; +} diff --git a/src/MapleFE/test/typescript/unit_tests/generic-func-in-interface.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-func-in-interface.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3e9868346023bbbfbfa187744b32abb2f5779acf --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-func-in-interface.ts.result @@ -0,0 +1,5 @@ +Matched 30 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {func any(arg) throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/generic-function.ts b/src/MapleFE/test/typescript/unit_tests/generic-function.ts new file mode 100644 index 0000000000000000000000000000000000000000..770e5fa265e6d3f7f641e61227cd00f0f5e1b079 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-function.ts @@ -0,0 +1,8 @@ +class Klass { + func void>(cb: T): typeof cb { + return cb; + } +} + +var obj: Klass = new Klass(); +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/generic-function.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-function.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f70238895d88062d4df73e715ea244b774516cce --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-function.ts.result @@ -0,0 +1,20 @@ +Matched 30 tokens. +Matched 40 tokens. +Matched 47 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + func func(cb) throws: + return cb + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/generic-function2.ts b/src/MapleFE/test/typescript/unit_tests/generic-function2.ts new file mode 100644 index 0000000000000000000000000000000000000000..4b4462f6ba2240c51da6023455630269c2fc6aa0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-function2.ts @@ -0,0 +1,5 @@ +class Klass { + func void>(cb: T): typeof cb { + return cb; + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/generic-function2.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-function2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..532d8cd1717395cac197f1156d302af708ebe5dc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-function2.ts.result @@ -0,0 +1,14 @@ +Matched 30 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + func func(cb) throws: + return cb + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/generic-lambda.ts b/src/MapleFE/test/typescript/unit_tests/generic-lambda.ts new file mode 100644 index 0000000000000000000000000000000000000000..abd3301280adb0d6c90a5a9076e2a5afdce4f4ff --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-lambda.ts @@ -0,0 +1,2 @@ +const func = (() => ((x: any): x is T[] => x && typeof x.length === 'number'))(); +console.log(func([1,2])); diff --git a/src/MapleFE/test/typescript/unit_tests/generic-lambda.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-lambda.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..15adaddb422ef8b456b40b469b76e2cdaee8117e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-lambda.ts.result @@ -0,0 +1,7 @@ +Matched 36 tokens. +Matched 50 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: func=() -> (x) -> x Land typeof x.length StEq "number"() +== Sub Tree == +console.log(func([1,2])) diff --git a/src/MapleFE/test/typescript/unit_tests/generic-prim-array.ts b/src/MapleFE/test/typescript/unit_tests/generic-prim-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..bc7ecd71559b91547597ac57cbbc3b80359a23a1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-prim-array.ts @@ -0,0 +1,9 @@ +class Klass { + f: T | undefined = undefined; +} + +const t = new Klass(); +t.f = [123, 456]; +console.log(typeof t); + +export type MyType = Klass; diff --git a/src/MapleFE/test/typescript/unit_tests/generic-prim-array.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-prim-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..add060b50d7fb34a9516c23e4f87675edd064c91 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-prim-array.ts.result @@ -0,0 +1,24 @@ +Matched 15 tokens. +Matched 28 tokens. +Matched 38 tokens. +Matched 46 tokens. +Matched 56 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + f=undefined + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_const Decl: t=new Klass() +== Sub Tree == +t.f Assign [123,456] +== Sub Tree == +console.log( typeof t) +== Sub Tree == +export { type MyType = Klass< typeof t>} diff --git a/src/MapleFE/test/typescript/unit_tests/generic-type.ts b/src/MapleFE/test/typescript/unit_tests/generic-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..7f615ab1c8cee4e83d473a591cb1049ce3e45bf3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-type.ts @@ -0,0 +1 @@ +type TYPE = { [P in K]?: T[P] }; diff --git a/src/MapleFE/test/typescript/unit_tests/generic-type.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9dba2969ad3ff698e1d3531d3915c036563c9e91 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-type.ts.result @@ -0,0 +1,4 @@ +Matched 25 tokens. +============= Module =========== +== Sub Tree == + type TYPE = {[P in K] : T[P] } diff --git a/src/MapleFE/test/typescript/unit_tests/generic-type2.ts b/src/MapleFE/test/typescript/unit_tests/generic-type2.ts new file mode 100644 index 0000000000000000000000000000000000000000..38e7871591b036632e47aa66e5cae7b90941a97f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-type2.ts @@ -0,0 +1,9 @@ +class Klass { + [x: string]: number; +} + +class EXT { + [key: string]: B; +} + +type TYPE = EXT[keyof Klass]; diff --git a/src/MapleFE/test/typescript/unit_tests/generic-type2.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-type2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..bcb9d64635b5da0e17549af29ad74868693c613f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-type2.ts.result @@ -0,0 +1,26 @@ +Matched 12 tokens. +Matched 29 tokens. +Matched 48 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + number + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class EXT + Fields: + B + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == + type TYPE = EXT[ keyof Klass] diff --git a/src/MapleFE/test/typescript/unit_tests/generic-type3.ts b/src/MapleFE/test/typescript/unit_tests/generic-type3.ts new file mode 100644 index 0000000000000000000000000000000000000000..5d109408f3f7b20d113a13d4a0a61a416904c5cc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-type3.ts @@ -0,0 +1,3 @@ +interface IFace { + func(): Promise ? U : T>; +} diff --git a/src/MapleFE/test/typescript/unit_tests/generic-type3.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-type3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a6f568ecd31d56fb1fe8e20f36b00be8e3f386af --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-type3.ts.result @@ -0,0 +1,5 @@ +Matched 26 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {func func() throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/generic-type4.ts b/src/MapleFE/test/typescript/unit_tests/generic-type4.ts new file mode 100644 index 0000000000000000000000000000000000000000..d9b3d4ab6c6f9460f11cdcfcb427250ae8603eef --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-type4.ts @@ -0,0 +1,2 @@ +type NT = ((args: Array) => PromiseLike>) | ((args: Array) => Array) | ((args: Array) => void); + diff --git a/src/MapleFE/test/typescript/unit_tests/generic-type4.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-type4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..64505c225af2ad79852ee5abebe2f0e1f41826ea --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-type4.ts.result @@ -0,0 +1,4 @@ +Matched 55 tokens. +============= Module =========== +== Sub Tree == + type NT = union = (args) -> | (args) -> | (args) -> diff --git a/src/MapleFE/test/typescript/unit_tests/generic-typeof.ts b/src/MapleFE/test/typescript/unit_tests/generic-typeof.ts new file mode 100644 index 0000000000000000000000000000000000000000..a2df74ba0364c61193d881848145bc34c2132333 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-typeof.ts @@ -0,0 +1,9 @@ +class Klass { + f: T | undefined = undefined; +} + +const t = new Klass(); +t.f = 123; +console.log(typeof t); + +export type MyType = Klass; diff --git a/src/MapleFE/test/typescript/unit_tests/generic-typeof.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-typeof.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e506463ca15a6fe7960c1a4a6239d6a257a124f1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-typeof.ts.result @@ -0,0 +1,24 @@ +Matched 15 tokens. +Matched 26 tokens. +Matched 32 tokens. +Matched 40 tokens. +Matched 50 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + f=undefined + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_const Decl: t=new Klass() +== Sub Tree == +t.f Assign 123 +== Sub Tree == +console.log( typeof t) +== Sub Tree == +export { type MyType = Klass< typeof t>} diff --git a/src/MapleFE/test/typescript/unit_tests/generic-typeof2.ts b/src/MapleFE/test/typescript/unit_tests/generic-typeof2.ts new file mode 100644 index 0000000000000000000000000000000000000000..9afd4945d80956963161c92a58d17e38f57695af --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-typeof2.ts @@ -0,0 +1,17 @@ +class Klass { + f: T; + constructor(t: T) { + this.f = t; + } +} +class Foo { + foo: number; + constructor(n: number) { + this.foo = n; + } +} +const PROP = "foo"; +type FooType = Klass; + +var obj: FooType = new Klass(123); +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/generic-typeof2.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-typeof2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e38a4c22cdbb31899776b7e81a965b4d541ee02a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-typeof2.ts.result @@ -0,0 +1,39 @@ +Matched 25 tokens. +Matched 47 tokens. +Matched 52 tokens. +Matched 64 tokens. +Matched 82 tokens. +Matched 89 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + f + Instance Initializer: + Constructors: + constructor (t) throws: + this.f Assign t + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Foo + Fields: + foo + Instance Initializer: + Constructors: + constructor (n) throws: + this.foo Assign n + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_const Decl: PROP="foo" +== Sub Tree == + type FooType = Klass +== Sub Tree == +js_var Decl: obj=new Klass(123) +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/generic-with-func.ts b/src/MapleFE/test/typescript/unit_tests/generic-with-func.ts new file mode 100644 index 0000000000000000000000000000000000000000..1983eb47463a1799a076cc9cb7863b11a334712d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-with-func.ts @@ -0,0 +1,2 @@ +var map: Map void> = new Map(); +console.log(map); diff --git a/src/MapleFE/test/typescript/unit_tests/generic-with-func.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-with-func.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..040306f38b2205de3b09b7e54a0fe44038ee439e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-with-func.ts.result @@ -0,0 +1,7 @@ +Matched 21 tokens. +Matched 28 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: map=new Map() +== Sub Tree == +console.log(map) diff --git a/src/MapleFE/test/typescript/unit_tests/generic-with-init.ts b/src/MapleFE/test/typescript/unit_tests/generic-with-init.ts new file mode 100644 index 0000000000000000000000000000000000000000..b30c847907193b6aeab06a753f3e559db991cec0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-with-init.ts @@ -0,0 +1,9 @@ +class Klass { + u: number = 0; + v: string = ""; + + constructor ({u, v}: Partial= {}) { + this.u = u ? u : this.u; + this.v = v ? v : this.v; + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/generic-with-init.ts.result b/src/MapleFE/test/typescript/unit_tests/generic-with-init.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2eae95e289350e27ee9220b4904ce31491de6b99 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generic-with-init.ts.result @@ -0,0 +1,15 @@ +Matched 58 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + u=0 v="" + Instance Initializer: + Constructors: + constructor ({:u, :v}) throws: + this.u Assign u ? u : this.u + this.v Assign v ? v : this.v + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/generics-array.ts b/src/MapleFE/test/typescript/unit_tests/generics-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..d5fb5bb87c2efaeb61e9285f3e5ad2f782158641 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generics-array.ts @@ -0,0 +1,32 @@ +// Define generic element pool using array +class Pool { + private _pool: T[] = []; + + public put(element: T) { + this._pool.push(element); + } + public get(): T | undefined{ + return this._pool.pop(); + } + public size(): number { + return this._pool.length; + } +} + +class Foo { + public _id: number; + + constructor(id: number) { + this._id = id; + } +} + +// Create array of primary type values +const primPool = new Pool(); +primPool.put(10); +console.log(primPool.get()); + +// Create array of objects +const objPool = new Pool(); +objPool.put(new Foo(100)); +console.log(objPool.get()?._id); diff --git a/src/MapleFE/test/typescript/unit_tests/generics-array.ts.result b/src/MapleFE/test/typescript/unit_tests/generics-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c330b689fe8a683f11e9156c3df428f819cacddd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generics-array.ts.result @@ -0,0 +1,49 @@ +Matched 69 tokens. +Matched 92 tokens. +Matched 103 tokens. +Matched 110 tokens. +Matched 121 tokens. +Matched 132 tokens. +Matched 143 tokens. +Matched 156 tokens. +============= Module =========== +== Sub Tree == +class Pool + Fields: + _pool=[] + Instance Initializer: + Constructors: + Methods: + func put(element) throws: + this._pool.push(element) + func get() throws: + return this._pool.pop() + func size() throws: + return this._pool.length + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Foo + Fields: + _id + Instance Initializer: + Constructors: + constructor (id) throws: + this._id Assign id + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_const Decl: primPool=new Pool() +== Sub Tree == +primPool.put(10) +== Sub Tree == +console.log(primPool.get()) +== Sub Tree == +js_const Decl: objPool=new Pool() +== Sub Tree == +objPool.put(new Foo(100)) +== Sub Tree == +console.log(objPool.get()._id) diff --git a/src/MapleFE/test/typescript/unit_tests/generics.ts b/src/MapleFE/test/typescript/unit_tests/generics.ts new file mode 100644 index 0000000000000000000000000000000000000000..0202303b548f143be66feaad72d31cf4448e3bed --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generics.ts @@ -0,0 +1,7 @@ +function foo(arg: T): T { + return arg; +} + +let output = foo("abc"); + +console.log(output); diff --git a/src/MapleFE/test/typescript/unit_tests/generics.ts.result b/src/MapleFE/test/typescript/unit_tests/generics.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0d04416aa8ff7aa45337bb340f4562741eacbf0c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generics.ts.result @@ -0,0 +1,12 @@ +Matched 17 tokens. +Matched 28 tokens. +Matched 35 tokens. +============= Module =========== +== Sub Tree == +func foo(arg) throws: + return arg + +== Sub Tree == +js_let Decl: output=foo("abc") +== Sub Tree == +console.log(output) diff --git a/src/MapleFE/test/typescript/unit_tests/generics2.ts b/src/MapleFE/test/typescript/unit_tests/generics2.ts new file mode 100644 index 0000000000000000000000000000000000000000..67ae8d57f8c3637d1428f21ae860a5e807c0d311 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generics2.ts @@ -0,0 +1,11 @@ +class E { + t: T | undefined = undefined; +} + +class Klass { + public n: E<{ s: string }> | undefined = undefined; +} + +var obj: Klass = new Klass(); +obj.n = { t: { s: "example" } }; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/generics2.ts.result b/src/MapleFE/test/typescript/unit_tests/generics2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2c8b746200f832bbb7c110000a9f3fbd2e689ecf --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/generics2.ts.result @@ -0,0 +1,32 @@ +Matched 15 tokens. +Matched 35 tokens. +Matched 45 tokens. +Matched 59 tokens. +Matched 66 tokens. +============= Module =========== +== Sub Tree == +class E + Fields: + t=undefined + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Klass + Fields: + n=undefined + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj.n Assign {t: {s:"example"}} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/getter-setter.ts b/src/MapleFE/test/typescript/unit_tests/getter-setter.ts new file mode 100644 index 0000000000000000000000000000000000000000..373eafc9ddfb268597ee4033fba1c8c45af72b9f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/getter-setter.ts @@ -0,0 +1,13 @@ +const obj = { + prop: "foo", + get getProp(): string { + return this.prop; + }, + set setProp(newVal: string) { + this.prop = newVal; + }, +}; + +console.log(obj.getProp); +obj.setProp = "bar"; +console.log(obj.getProp); diff --git a/src/MapleFE/test/typescript/unit_tests/getter-setter.ts.result b/src/MapleFE/test/typescript/unit_tests/getter-setter.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..da6ab4a6d553a4606e2a71d8606eea5ee9c4505c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/getter-setter.ts.result @@ -0,0 +1,17 @@ +Matched 40 tokens. +Matched 49 tokens. +Matched 55 tokens. +Matched 64 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {prop:"foo", getProp:get getProp() throws: + return this.prop +, setProp:set setProp(newVal) throws: + this.prop Assign newVal +} +== Sub Tree == +console.log(obj.getProp) +== Sub Tree == +obj.setProp Assign "bar" +== Sub Tree == +console.log(obj.getProp) diff --git a/src/MapleFE/test/typescript/unit_tests/getter.ts b/src/MapleFE/test/typescript/unit_tests/getter.ts new file mode 100644 index 0000000000000000000000000000000000000000..c4319fd45d12068e4166543eb704810856a8446c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/getter.ts @@ -0,0 +1,3 @@ +var obj = { name: "dummy" }; +Object.defineProperty(obj, "name", { get() { return "Name"; } }); +console.log(obj.name); diff --git a/src/MapleFE/test/typescript/unit_tests/getter.ts.result b/src/MapleFE/test/typescript/unit_tests/getter.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..67c69550bb773a969a1da079103858e5d6ce29b3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/getter.ts.result @@ -0,0 +1,12 @@ +Matched 9 tokens. +Matched 29 tokens. +Matched 38 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: obj= {name:"dummy"} +== Sub Tree == +Object.defineProperty(obj,"name", {get:func get() throws: + return "Name" +}) +== Sub Tree == +console.log(obj.name) diff --git a/src/MapleFE/test/typescript/unit_tests/global-in-module.ts b/src/MapleFE/test/typescript/unit_tests/global-in-module.ts new file mode 100644 index 0000000000000000000000000000000000000000..d10410d40a7eba58de62d2a88c65d5740f62b5cb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/global-in-module.ts @@ -0,0 +1,6 @@ +declare module "Module" { + global { + const flag: boolean; + } +} + diff --git a/src/MapleFE/test/typescript/unit_tests/global-in-module.ts.result b/src/MapleFE/test/typescript/unit_tests/global-in-module.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f113982c133c24b05e6342d5d88203fb4e47c389 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/global-in-module.ts.result @@ -0,0 +1,7 @@ +Matched 13 tokens. +============= Module =========== +== Sub Tree == +declare ============= Module =========== +== Sub Tree == +declare js_const Decl: flag + diff --git a/src/MapleFE/test/typescript/unit_tests/helloworld.ts b/src/MapleFE/test/typescript/unit_tests/helloworld.ts new file mode 100644 index 0000000000000000000000000000000000000000..940a3ff0ec202582f037ef6f743d606fadab9f32 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/helloworld.ts @@ -0,0 +1 @@ +console.log("Hello world!"); diff --git a/src/MapleFE/test/typescript/unit_tests/helloworld.ts.result b/src/MapleFE/test/typescript/unit_tests/helloworld.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ffe76bce9eb05088e26e69f3499b49ccd80654ba --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/helloworld.ts.result @@ -0,0 +1,4 @@ +Matched 7 tokens. +============= Module =========== +== Sub Tree == +console.log("Hello world!") diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers.ts b/src/MapleFE/test/typescript/unit_tests/identifiers.ts new file mode 100644 index 0000000000000000000000000000000000000000..cf4114c78248c87813d45bf7b23e322a6c007938 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers.ts @@ -0,0 +1,10 @@ +// Valid identifiers: +// abstract as async await +// constructor declare from get +// is module namespace of +// require set type + +function type() { + return "type"; +} +console.log(type()); diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a15445adc884360ece9677f9ceb6a8f5ce727d6e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers.ts.result @@ -0,0 +1,9 @@ +Matched 9 tokens. +Matched 18 tokens. +============= Module =========== +== Sub Tree == +func type() throws: + return "type" + +== Sub Tree == +console.log(type()) diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers10.ts b/src/MapleFE/test/typescript/unit_tests/identifiers10.ts new file mode 100644 index 0000000000000000000000000000000000000000..e1b196a6a6ee5451dba73b5dd2b1957edf077059 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers10.ts @@ -0,0 +1,3 @@ +interface IFace { + throw?: boolean; +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers10.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers10.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7444472b77e087220bf902b1d2a17477039e76d6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers10.ts.result @@ -0,0 +1,4 @@ +Matched 9 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {throw? } diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers11.ts b/src/MapleFE/test/typescript/unit_tests/identifiers11.ts new file mode 100644 index 0000000000000000000000000000000000000000..5e364b1047e3f264333bf85adb13aef5da68cd1a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers11.ts @@ -0,0 +1,7 @@ +enum ET { + type = "type", + with = "with" +} +abstract class Base { f1: U | null = null; f2: V | null = null; } +declare class Klass extends Base {} + diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers11.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers11.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f9e41d0f91caa8d444bf5cd3f7d36044f9562ea2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers11.ts.result @@ -0,0 +1,26 @@ +Matched 11 tokens. +Matched 41 tokens. +Matched 57 tokens. +============= Module =========== +== Sub Tree == +ts_enum: ET {type="type";with="with" } +== Sub Tree == +class Base + Fields: + f1=null f2=null + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +declare class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers12.ts b/src/MapleFE/test/typescript/unit_tests/identifiers12.ts new file mode 100644 index 0000000000000000000000000000000000000000..6aa95479017d778d0a3703d6ce09e3fe27743e84 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers12.ts @@ -0,0 +1,10 @@ +class Klass { + f1: number = 123; + f2: string = "abc"; +} +const obj = { + func: (o: Klass) : Klass => { + return { ...o, private: false }; + }, +}; +console.log(obj.func(new Klass())); diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers12.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers12.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..87ebf81724a87661aaf93e7663b789129ced54ac --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers12.ts.result @@ -0,0 +1,19 @@ +Matched 16 tokens. +Matched 48 tokens. +Matched 63 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + f1=123 f2="abc" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_const Decl: obj= {func:(o) -> return (any) {:...o, private:false} +} +== Sub Tree == +console.log(obj.func(new Klass())) diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers13.ts b/src/MapleFE/test/typescript/unit_tests/identifiers13.ts new file mode 100644 index 0000000000000000000000000000000000000000..0f84141b35505d04c131242a9923031e3bb9269c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers13.ts @@ -0,0 +1,4 @@ +interface IFace { + type: string; + break: boolean; +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers13.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers13.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..83468c8dd5fca97802cce142ce32f59e8e9706c8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers13.ts.result @@ -0,0 +1,4 @@ +Matched 12 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {type;break } diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers14.ts b/src/MapleFE/test/typescript/unit_tests/identifiers14.ts new file mode 100644 index 0000000000000000000000000000000000000000..9279f9018ad0e8b6d63540a4bd3033be726e7414 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers14.ts @@ -0,0 +1,3 @@ +interface IFace { + static(n: number): string; +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers14.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers14.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..323821496c69a5b309b580ec9fae0460784bb42c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers14.ts.result @@ -0,0 +1,5 @@ +Matched 13 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {func static(n) throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers15.ts b/src/MapleFE/test/typescript/unit_tests/identifiers15.ts new file mode 100644 index 0000000000000000000000000000000000000000..a49d4aceb2713a97d9bab889f56a00567c3db13d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers15.ts @@ -0,0 +1,2 @@ +const obj: { class: any } = { class : "Class" }; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers15.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers15.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4874d30a5c52a3a9d6be7ae319cfb17ac0022368 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers15.ts.result @@ -0,0 +1,7 @@ +Matched 15 tokens. +Matched 22 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {class:"Class"} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers16.ts b/src/MapleFE/test/typescript/unit_tests/identifiers16.ts new file mode 100644 index 0000000000000000000000000000000000000000..d907f06d33903035f39e1ace80e5b29e4186d26e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers16.ts @@ -0,0 +1,2 @@ +const obj: { unknown: any } = { unknown : "Unknown" }; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers16.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers16.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e3c738868ecca035515409697f480231b028c3e1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers16.ts.result @@ -0,0 +1,7 @@ +Matched 15 tokens. +Matched 22 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {unknown:"Unknown"} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers17.ts b/src/MapleFE/test/typescript/unit_tests/identifiers17.ts new file mode 100644 index 0000000000000000000000000000000000000000..dea0e73a4f5c357848ab07bdbc00d44781954bf6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers17.ts @@ -0,0 +1,2 @@ +const obj: { do: any } = { do : "do" }; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers17.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers17.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..eef4e0a980e5fb42aaeb9310582494dcd1375a67 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers17.ts.result @@ -0,0 +1,7 @@ +Matched 15 tokens. +Matched 22 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {do:"do"} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers18.ts b/src/MapleFE/test/typescript/unit_tests/identifiers18.ts new file mode 100644 index 0000000000000000000000000000000000000000..b6ae53f9809719f8a8fb08c30ab8ee04db36ea5b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers18.ts @@ -0,0 +1,5 @@ +interface IFace { + if?: string; + then?: string; + else?: string; +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers18.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers18.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..dc287fd11da7f2cf24c1e6a186f2208ece445b83 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers18.ts.result @@ -0,0 +1,4 @@ +Matched 19 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {if?;then?;else? } diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers19.ts b/src/MapleFE/test/typescript/unit_tests/identifiers19.ts new file mode 100644 index 0000000000000000000000000000000000000000..c0a7c875a11c3b4da0b3591fd6030d2f03d37138 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers19.ts @@ -0,0 +1,3 @@ +interface IFace { + continue(): void; +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers19.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers19.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..57e221624fb012faa2709aba01a73f22454a67ab --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers19.ts.result @@ -0,0 +1,5 @@ +Matched 10 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {func continue() throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers2.ts b/src/MapleFE/test/typescript/unit_tests/identifiers2.ts new file mode 100644 index 0000000000000000000000000000000000000000..766bd3d702050e868c326e124a9bb38a6890eff6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers2.ts @@ -0,0 +1,75 @@ +// Valid identifiers: +// abstract as async await +// constructor declare from get +// is module namespace of +// require set type + +function abstract() { + return "abstract"; +} +console.log(abstract()); + +function as() { + return "as"; +} +console.log(as()); + +function async() { + return "async"; +} +console.log(async()); + +function await() { + return "await"; +} +console.log(await()); + +function constructor() { + return "constructor"; +} +console.log(constructor()); + +function declare() { + return "declare"; +} +console.log(declare()); + +function from() { + return "from"; +} +console.log(from()); + +function get() { + return "get"; +} +console.log(get()); + +function is() { + return "is"; +} +console.log(is()); + +function module() { + return "module"; +} +console.log(module()); + +function namespace() { + return "namespace"; +} +console.log(namespace()); + +function of() { + return "of"; +} +console.log(of()); + +function require() { + return "require"; +} +console.log(require()); + +function set() { + return "set"; +} +console.log(set()); diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers2.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2171c7e9c0c72fbc1c1bd8656bd4a91605c2b8e4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers2.ts.result @@ -0,0 +1,113 @@ +Matched 9 tokens. +Matched 18 tokens. +Matched 27 tokens. +Matched 36 tokens. +Matched 45 tokens. +Matched 54 tokens. +Matched 63 tokens. +Matched 72 tokens. +Matched 81 tokens. +Matched 90 tokens. +Matched 99 tokens. +Matched 108 tokens. +Matched 117 tokens. +Matched 126 tokens. +Matched 135 tokens. +Matched 144 tokens. +Matched 153 tokens. +Matched 162 tokens. +Matched 171 tokens. +Matched 180 tokens. +Matched 189 tokens. +Matched 198 tokens. +Matched 207 tokens. +Matched 216 tokens. +Matched 225 tokens. +Matched 234 tokens. +Matched 243 tokens. +Matched 252 tokens. +============= Module =========== +== Sub Tree == +func abstract() throws: + return "abstract" + +== Sub Tree == +console.log(abstract()) +== Sub Tree == +func as() throws: + return "as" + +== Sub Tree == +console.log(as()) +== Sub Tree == +func async() throws: + return "async" + +== Sub Tree == +console.log(async()) +== Sub Tree == +func await() throws: + return "await" + +== Sub Tree == +console.log(await()) +== Sub Tree == +func constructor() throws: + return "constructor" + +== Sub Tree == +console.log(constructor()) +== Sub Tree == +func declare() throws: + return "declare" + +== Sub Tree == +console.log(declare()) +== Sub Tree == +func from() throws: + return "from" + +== Sub Tree == +console.log(from()) +== Sub Tree == +func get() throws: + return "get" + +== Sub Tree == +console.log(get()) +== Sub Tree == +func is() throws: + return "is" + +== Sub Tree == +console.log(is()) +== Sub Tree == +func module() throws: + return "module" + +== Sub Tree == +console.log(module()) +== Sub Tree == +func namespace() throws: + return "namespace" + +== Sub Tree == +console.log(namespace()) +== Sub Tree == +func of() throws: + return "of" + +== Sub Tree == +console.log(of()) +== Sub Tree == +func require() throws: + return "require" + +== Sub Tree == +console.log(require()) +== Sub Tree == +func set() throws: + return "set" + +== Sub Tree == +console.log(set()) diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers20.ts b/src/MapleFE/test/typescript/unit_tests/identifiers20.ts new file mode 100644 index 0000000000000000000000000000000000000000..afe4d26516226adf676dc6949e8967248f417174 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers20.ts @@ -0,0 +1,11 @@ +class Klass { + items: number[] = []; + break() { + for (const i of this.items) + console.log(i); + } +} + +var obj: Klass = new Klass(); +obj.items.push(6, 2, 1, 4, 5, 3); +obj.break(); diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers20.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers20.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e9df6598ad3261266dd98f7b1cf95cb3c98d99a9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers20.ts.result @@ -0,0 +1,24 @@ +Matched 34 tokens. +Matched 44 tokens. +Matched 63 tokens. +Matched 69 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + items=[] + Instance Initializer: + Constructors: + Methods: + func break() throws: + for ( ) + console.log(i) + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj.items.push(6,2,1,4,5,3) +== Sub Tree == +obj.break() diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers3.ts b/src/MapleFE/test/typescript/unit_tests/identifiers3.ts new file mode 100644 index 0000000000000000000000000000000000000000..af4744e292e7bd55cea6f78b7655a522711dee51 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers3.ts @@ -0,0 +1,6 @@ +export declare class Foo { + asserts: boolean; + trueType: string; + falseType: string; + constructor(asserts: boolean, trueType: string, falseType: string); +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers3.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..649256342bff594978c925f4f3330c1601ae6cc1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers3.ts.result @@ -0,0 +1,13 @@ +Matched 33 tokens. +============= Module =========== +== Sub Tree == +export {declare class Foo + Fields: + asserts trueType falseType + Instance Initializer: + Constructors: + constructor (asserts,trueType,falseType) throws: + Methods: + LocalClasses: + LocalInterfaces: +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers4.ts b/src/MapleFE/test/typescript/unit_tests/identifiers4.ts new file mode 100644 index 0000000000000000000000000000000000000000..dc8534dae2baabc0e5d0e74d7b31d494719c1822 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers4.ts @@ -0,0 +1,4 @@ +interface Inferf { + return(v: string): string; + throw(e: string): string; +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers4.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..145b8050dea45cd04f47baa3bf9c962cde3fef56 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers4.ts.result @@ -0,0 +1,6 @@ +Matched 22 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Inferf {func return(v) throws: +;func throw(e) throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers5.ts b/src/MapleFE/test/typescript/unit_tests/identifiers5.ts new file mode 100644 index 0000000000000000000000000000000000000000..afe36ce702b0083941acd589e6e52070543222e2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers5.ts @@ -0,0 +1,3 @@ +interface IFace { + finally(): IFace; +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers5.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers5.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5377924b5cd82ebf89a9a70847c55e0ba2aaa86e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers5.ts.result @@ -0,0 +1,5 @@ +Matched 16 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {func finally() throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers6.ts b/src/MapleFE/test/typescript/unit_tests/identifiers6.ts new file mode 100644 index 0000000000000000000000000000000000000000..1c13d2ae8877e80973c37af912747f538402cae3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers6.ts @@ -0,0 +1,3 @@ +interface IFace { + for(s: string): any; +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers6.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers6.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..bb3cb69bf520b441d046957bc591bb1d67d4625b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers6.ts.result @@ -0,0 +1,5 @@ +Matched 13 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {func for(s) throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers7.ts b/src/MapleFE/test/typescript/unit_tests/identifiers7.ts new file mode 100644 index 0000000000000000000000000000000000000000..975eed4d538faead8b25369477fa3d4e0b389798 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers7.ts @@ -0,0 +1,4 @@ +interface Inferf { + return?(v?: string): string; + throw?(e?: string): string; +} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers7.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers7.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ac09659add4eeebbb331a3cee7eaa2618c2c1b2c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers7.ts.result @@ -0,0 +1,6 @@ +Matched 26 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Inferf {func return?(v?) throws: +;func throw?(e?) throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers8.ts b/src/MapleFE/test/typescript/unit_tests/identifiers8.ts new file mode 100644 index 0000000000000000000000000000000000000000..1f2107c4064055b927c4815733ef310d85390a33 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers8.ts @@ -0,0 +1,3 @@ +var func = () => 1; +export { func as function }; + diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers8.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers8.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0b3473672a7a2b78f3b66cee0fd9e67af175ce1d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers8.ts.result @@ -0,0 +1,7 @@ +Matched 8 tokens. +Matched 15 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: func=() -> 1 +== Sub Tree == +export {func as function} diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers9.ts b/src/MapleFE/test/typescript/unit_tests/identifiers9.ts new file mode 100644 index 0000000000000000000000000000000000000000..8d2c02a8a9dacd626ac4b3bdfb0d36de596a3267 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers9.ts @@ -0,0 +1,6 @@ +enum E { + package = "package" +} + +var x: E = E.package; +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/identifiers9.ts.result b/src/MapleFE/test/typescript/unit_tests/identifiers9.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1f59266a702a0a2b14eb28988a4279f7fd21dc3e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/identifiers9.ts.result @@ -0,0 +1,10 @@ +Matched 7 tokens. +Matched 16 tokens. +Matched 23 tokens. +============= Module =========== +== Sub Tree == +ts_enum: E {package="package" } +== Sub Tree == +js_var Decl: x=E.package +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/if-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..c61336ffad50ab0113d6456c62f247e95ad7c504 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name.ts @@ -0,0 +1,3 @@ +interface IFace { + if: string; +} diff --git a/src/MapleFE/test/typescript/unit_tests/if-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b20da9ed661b00c1224343241f99eba3d7d75399 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name.ts.result @@ -0,0 +1,4 @@ +Matched 8 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {if } diff --git a/src/MapleFE/test/typescript/unit_tests/if-as-prop-name2.ts b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name2.ts new file mode 100644 index 0000000000000000000000000000000000000000..558b7aea596503d97597f5410cf6b0f279996440 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name2.ts @@ -0,0 +1,4 @@ +class Klass { + static if: string = "prop"; +} +console.log(Klass.if); diff --git a/src/MapleFE/test/typescript/unit_tests/if-as-prop-name2.ts.result b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f788043747f3ecbc48cd44e7948e59567f37e69e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name2.ts.result @@ -0,0 +1,15 @@ +Matched 11 tokens. +Matched 20 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + if="prop" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +console.log(Klass.if) diff --git a/src/MapleFE/test/typescript/unit_tests/if-as-prop-name3.ts b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name3.ts new file mode 100644 index 0000000000000000000000000000000000000000..a9cdaf09c6b2f2fc3866dca7ceb072072669d7ea --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name3.ts @@ -0,0 +1,2 @@ +const obj = { if() { console.log("if"); } } +obj.if(); diff --git a/src/MapleFE/test/typescript/unit_tests/if-as-prop-name3.ts.result b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..171ccc1b720dd1ef2071cd28c39e3182fa1189bb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-as-prop-name3.ts.result @@ -0,0 +1,9 @@ +Matched 17 tokens. +Matched 23 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {if:func if() throws: + console.log("if") +} +== Sub Tree == +obj.if() diff --git a/src/MapleFE/test/typescript/unit_tests/if-else-1.ts b/src/MapleFE/test/typescript/unit_tests/if-else-1.ts new file mode 100644 index 0000000000000000000000000000000000000000..0edb1d0abd2f9fed9a11d7980826a6e2e9519f8e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-else-1.ts @@ -0,0 +1,4 @@ +var a: number = 1; +var b: number = 2; +if (a > b) console.log("bigger"); +else console.log("smaller"); diff --git a/src/MapleFE/test/typescript/unit_tests/if-else-1.ts.result b/src/MapleFE/test/typescript/unit_tests/if-else-1.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..22c29c9f261138b846a88d1bcf341ce961a71789 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-else-1.ts.result @@ -0,0 +1,13 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 35 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: a=1 +== Sub Tree == +js_var Decl: b=2 +== Sub Tree == +cond-branch cond:a GT b +true branch : + console.log("bigger")false branch : + console.log("smaller") diff --git a/src/MapleFE/test/typescript/unit_tests/if-else-2.ts b/src/MapleFE/test/typescript/unit_tests/if-else-2.ts new file mode 100644 index 0000000000000000000000000000000000000000..1fb2acaadf2221aa6d7f6eb2371b8bfa0cbf3152 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-else-2.ts @@ -0,0 +1,4 @@ +var a: number = 2; +var b: number = 1;; +if (1) a + b; +else a - b; diff --git a/src/MapleFE/test/typescript/unit_tests/if-else-2.ts.result b/src/MapleFE/test/typescript/unit_tests/if-else-2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..add6831277eb203efe3bbd8feea79e13e0b949f7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-else-2.ts.result @@ -0,0 +1,14 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 15 tokens. +Matched 28 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: a=2 +== Sub Tree == +js_var Decl: b=1 +== Sub Tree == +cond-branch cond:1 +true branch : + a Add bfalse branch : + a Sub b diff --git a/src/MapleFE/test/typescript/unit_tests/if-else-3.ts b/src/MapleFE/test/typescript/unit_tests/if-else-3.ts new file mode 100644 index 0000000000000000000000000000000000000000..68d525a28faa6e0bb14b599725bbeaf21a49537a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-else-3.ts @@ -0,0 +1,7 @@ +var x: number = 1; +var a: number = 2; +var b: number = 3; +if (x) a + b; +else if (x) a - b; +else if (x) a * b; +else a / b; diff --git a/src/MapleFE/test/typescript/unit_tests/if-else-3.ts.result b/src/MapleFE/test/typescript/unit_tests/if-else-3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..93d0ecb30469890b7b1edbe10813b908ad7a39eb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-else-3.ts.result @@ -0,0 +1,22 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 21 tokens. +Matched 52 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=1 +== Sub Tree == +js_var Decl: a=2 +== Sub Tree == +js_var Decl: b=3 +== Sub Tree == +cond-branch cond:x +true branch : + a Add bfalse branch : + cond-branch cond:x + true branch : + a Sub b false branch : + cond-branch cond:x + true branch : + a Mul b false branch : + a Div b diff --git a/src/MapleFE/test/typescript/unit_tests/if-stmt-return.ts b/src/MapleFE/test/typescript/unit_tests/if-stmt-return.ts new file mode 100644 index 0000000000000000000000000000000000000000..a0c9900940d3a7ddb2f83abe1fa3404c313f2c2c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-stmt-return.ts @@ -0,0 +1,5 @@ +function func(x: number): void { + if (x > 10) return; + console.log(x); +} +func(2); diff --git a/src/MapleFE/test/typescript/unit_tests/if-stmt-return.ts.result b/src/MapleFE/test/typescript/unit_tests/if-stmt-return.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9c4909c8379577be7814160d02b9770a695f6ef1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/if-stmt-return.ts.result @@ -0,0 +1,13 @@ +Matched 26 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +func func(x) throws: + cond-branch cond:x GT 10 + true branch : + return false branch : + + console.log(x) + +== Sub Tree == +func(2) diff --git a/src/MapleFE/test/typescript/unit_tests/iife.ts b/src/MapleFE/test/typescript/unit_tests/iife.ts new file mode 100644 index 0000000000000000000000000000000000000000..cf2f22435ebe42212862e609cec140b3c1386ebf --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/iife.ts @@ -0,0 +1,4 @@ +let x: number = 3; +(function () { + console.log(x * x); +})(); diff --git a/src/MapleFE/test/typescript/unit_tests/iife.ts.result b/src/MapleFE/test/typescript/unit_tests/iife.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f2a003a9a9ae16dbc648b813b6843daa965bb912 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/iife.ts.result @@ -0,0 +1,9 @@ +Matched 7 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: x=3 +== Sub Tree == +func () throws: + console.log(x Mul x) +() diff --git a/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..49dfd11867a37486316b085c3c68a0ee359de450 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name.ts @@ -0,0 +1,6 @@ +interface IFace { + implements : string; +} + +var obj: IFace = { implements: "implements" }; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..85611f66fef88942c741d238ce59e61daf25548e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name.ts.result @@ -0,0 +1,10 @@ +Matched 8 tokens. +Matched 19 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {implements } +== Sub Tree == +js_var Decl: obj= {implements:"implements"} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name2.ts b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name2.ts new file mode 100644 index 0000000000000000000000000000000000000000..b5bae426828e40b8cd9770450e747a965843f426 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name2.ts @@ -0,0 +1,6 @@ +interface IFace { + implements? : string; +} + +var obj: IFace = { implements: "implements" }; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name2.ts.result b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..d491d27e0bf901e973ed0cf90ee7e782e3ebac13 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name2.ts.result @@ -0,0 +1,10 @@ +Matched 9 tokens. +Matched 20 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {implements? } +== Sub Tree == +js_var Decl: obj= {implements:"implements"} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name3.ts b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name3.ts new file mode 100644 index 0000000000000000000000000000000000000000..d457cadedcb3dedc87f55f38531ef4eaeb68b675 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name3.ts @@ -0,0 +1,6 @@ +interface IFace { + implements? : string; +} + +var obj: IFace = { implements: "implements" }; +console.log(obj.implements); diff --git a/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name3.ts.result b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..cc7ad1ff5167b09d496af637e30ca13940684cf1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/implements-as-prop-name3.ts.result @@ -0,0 +1,10 @@ +Matched 9 tokens. +Matched 20 tokens. +Matched 29 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {implements? } +== Sub Tree == +js_var Decl: obj= {implements:"implements"} +== Sub Tree == +console.log(obj.implements) diff --git a/src/MapleFE/test/typescript/unit_tests/import-ambient-module.ts b/src/MapleFE/test/typescript/unit_tests/import-ambient-module.ts new file mode 100644 index 0000000000000000000000000000000000000000..9f98d352e05f8124c1a09b25ceceb52540d2c595 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-ambient-module.ts @@ -0,0 +1,3 @@ +/// +import { X } from "M1"; +import { NS } from "M2"; diff --git a/src/MapleFE/test/typescript/unit_tests/import-ambient-module.ts.result b/src/MapleFE/test/typescript/unit_tests/import-ambient-module.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c71e900f9a1c38da2856901eb5858a378b61611c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-ambient-module.ts.result @@ -0,0 +1,10 @@ +Matched 8 tokens. +Matched 15 tokens. +Matched 22 tokens. +============= Module =========== +== Sub Tree == +trip-slash reference path = "import-in-module.ts" +== Sub Tree == +import {X} "M1" +== Sub Tree == +import {NS} "M2" diff --git a/src/MapleFE/test/typescript/unit_tests/import-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/import-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..98e0884aa562bc94c8fecdaf61e1bab04a2a9749 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-as-prop-name.ts @@ -0,0 +1,3 @@ +declare interface Load { + import(n: string, o?: string): Promise; +} diff --git a/src/MapleFE/test/typescript/unit_tests/import-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/import-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3cb7c20598b67fac6a040d32bcdc13d1d0696e0c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-as-prop-name.ts.result @@ -0,0 +1,5 @@ +Matched 22 tokens. +============= Module =========== +== Sub Tree == +declare ts_interface: Load {func import(n,o?) throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/import-class.ts b/src/MapleFE/test/typescript/unit_tests/import-class.ts new file mode 100644 index 0000000000000000000000000000000000000000..fc319f54b4dc05e65d586f1621822291b3494dee --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-class.ts @@ -0,0 +1,3 @@ +import Bar = require("./export-class"); +let obj = new Bar(123, 456); +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/import-class.ts.result b/src/MapleFE/test/typescript/unit_tests/import-class.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3660e1fc6f0565bc73149797f2ed1ec1a524e5d3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-class.ts.result @@ -0,0 +1,10 @@ +Matched 8 tokens. +Matched 19 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +import { SINGLE "./export-class" as Bar} +== Sub Tree == +js_let Decl: obj=new Bar(123,456) +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/import-class2.ts b/src/MapleFE/test/typescript/unit_tests/import-class2.ts new file mode 100644 index 0000000000000000000000000000000000000000..579dd6551cd5d8d37403afe44d6e2a5611b1b53d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-class2.ts @@ -0,0 +1,3 @@ +import { default as Bar } from "./export-class"; +let obj = new Bar(123, 456); +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/import-class2.ts.result b/src/MapleFE/test/typescript/unit_tests/import-class2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9f2aae989ad862ee6a2d93cdcce2ed2684efdbef --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-class2.ts.result @@ -0,0 +1,10 @@ +Matched 9 tokens. +Matched 20 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +import {default as Bar} "./export-class" +== Sub Tree == +js_let Decl: obj=new Bar(123,456) +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/import-deco.ts b/src/MapleFE/test/typescript/unit_tests/import-deco.ts new file mode 100644 index 0000000000000000000000000000000000000000..ed4af2f38218a42f5e9de614d3b1d638e6eb7094 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-deco.ts @@ -0,0 +1,12 @@ +import * as deco from "./deco-module"; + +class Klass { + @deco.prop_deco("of") + x: number; + constructor(i: number) { + this.x = i; + } +} + +var c = new Klass(3); +console.log(c.x); diff --git a/src/MapleFE/test/typescript/unit_tests/import-deco.ts.result b/src/MapleFE/test/typescript/unit_tests/import-deco.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b416506be02af92be2390cfdb75dd15052db926e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-deco.ts.result @@ -0,0 +1,23 @@ +Matched 7 tokens. +Matched 36 tokens. +Matched 45 tokens. +Matched 54 tokens. +============= Module =========== +== Sub Tree == +import { * as deco} "./deco-module" +== Sub Tree == +class Klass + Fields: + x + Instance Initializer: + Constructors: + constructor (i) throws: + this.x Assign i + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: c=new Klass(3) +== Sub Tree == +console.log(c.x) diff --git a/src/MapleFE/test/typescript/unit_tests/import-default-class.ts b/src/MapleFE/test/typescript/unit_tests/import-default-class.ts new file mode 100644 index 0000000000000000000000000000000000000000..fdfa5581e5e59b0c7bac91cb4f0b2457361ae6f4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-default-class.ts @@ -0,0 +1,3 @@ +import type Klass from "./export-default-class"; +var obj: Klass = { f1: 123, f2: "abc" }; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/import-default-class.ts.result b/src/MapleFE/test/typescript/unit_tests/import-default-class.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..85e020e5d1ba8d1c4b4bd96398f26e7e12b7c77b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-default-class.ts.result @@ -0,0 +1,10 @@ +Matched 6 tokens. +Matched 21 tokens. +Matched 28 tokens. +============= Module =========== +== Sub Tree == +import { default as Klass} "./export-default-class" +== Sub Tree == +js_var Decl: obj= {f1:123, f2:"abc"} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/import-in-module.ts b/src/MapleFE/test/typescript/unit_tests/import-in-module.ts new file mode 100644 index 0000000000000000000000000000000000000000..cd706044623cdefc7e52f80b0f2821e9516f0fe9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-in-module.ts @@ -0,0 +1,9 @@ +declare module 'M1' { + export interface X {} +} + +declare module 'M2' { + import { X } from "M1"; + export namespace NS { + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/import-in-module.ts.result b/src/MapleFE/test/typescript/unit_tests/import-in-module.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1dc7f0b4475a3f28245bb162d2f6a339efa8f1e7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-in-module.ts.result @@ -0,0 +1,16 @@ +Matched 10 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +declare ============= Module =========== +== Sub Tree == +export {ts_interface: X { }} + +== Sub Tree == +declare ============= Module =========== +== Sub Tree == +import {X} "M1" +== Sub Tree == +export {namespace NS +} + diff --git a/src/MapleFE/test/typescript/unit_tests/import-promise.ts b/src/MapleFE/test/typescript/unit_tests/import-promise.ts new file mode 100644 index 0000000000000000000000000000000000000000..9e3ee013a75a19c62f9517bd469905b43edaf889 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-promise.ts @@ -0,0 +1,2 @@ +import("./M").then(() => { console.log("Completed") } ); + diff --git a/src/MapleFE/test/typescript/unit_tests/import-promise.ts.result b/src/MapleFE/test/typescript/unit_tests/import-promise.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e9813196aa05ea002db4b550d65a4d48c7f8cf3a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-promise.ts.result @@ -0,0 +1,5 @@ +Matched 20 tokens. +============= Module =========== +== Sub Tree == +import "./M".then(() -> console.log("Completed") +) diff --git a/src/MapleFE/test/typescript/unit_tests/import-promise2.ts b/src/MapleFE/test/typescript/unit_tests/import-promise2.ts new file mode 100644 index 0000000000000000000000000000000000000000..5426f510aea1adc4d2e8a4ff99779ef0e2552f35 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-promise2.ts @@ -0,0 +1,6 @@ +const m : string = "./M"; +import(`${m}`).then(() => { + console.log("Completed") +}).catch(() => { + console.log("Failed") +}); diff --git a/src/MapleFE/test/typescript/unit_tests/import-promise2.ts.result b/src/MapleFE/test/typescript/unit_tests/import-promise2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..919ef976f976b0c311810a71d2efa0dddd6706f5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-promise2.ts.result @@ -0,0 +1,10 @@ +Matched 7 tokens. +Matched 42 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: m="./M" +== Sub Tree == +import template-literal: NULL,m.then(() -> console.log("Completed") +).catch(() -> console.log("Failed") +) diff --git a/src/MapleFE/test/typescript/unit_tests/import-type.ts b/src/MapleFE/test/typescript/unit_tests/import-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..8a61f6f18a5164570d361f7607fd17635f0d09b9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-type.ts @@ -0,0 +1 @@ +import type { Foo } from "./export-type"; diff --git a/src/MapleFE/test/typescript/unit_tests/import-type.ts.result b/src/MapleFE/test/typescript/unit_tests/import-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..202238fb32e24ef6a8669810faa478cb867fa005 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-type.ts.result @@ -0,0 +1,4 @@ +Matched 8 tokens. +============= Module =========== +== Sub Tree == +import {Foo} "./export-type" diff --git a/src/MapleFE/test/typescript/unit_tests/import-type2.ts b/src/MapleFE/test/typescript/unit_tests/import-type2.ts new file mode 100644 index 0000000000000000000000000000000000000000..214a68b24edceea1adc80ca6d92353339d1c7ea9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-type2.ts @@ -0,0 +1 @@ +import type { Foo } from "./export-type2"; diff --git a/src/MapleFE/test/typescript/unit_tests/import-type2.ts.result b/src/MapleFE/test/typescript/unit_tests/import-type2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..99a5ed674470358edcc9961ee0ed6be5083bce3f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-type2.ts.result @@ -0,0 +1,4 @@ +Matched 8 tokens. +============= Module =========== +== Sub Tree == +import {Foo} "./export-type2" diff --git a/src/MapleFE/test/typescript/unit_tests/import-type3.ts b/src/MapleFE/test/typescript/unit_tests/import-type3.ts new file mode 100644 index 0000000000000000000000000000000000000000..093737c50ae88d4521035b70944c883b0ac61d06 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-type3.ts @@ -0,0 +1 @@ +import type * as TY from "./export-type2"; diff --git a/src/MapleFE/test/typescript/unit_tests/import-type3.ts.result b/src/MapleFE/test/typescript/unit_tests/import-type3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..fe57c8b11faf12ffe743e2e0e0e32da2fab13a00 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-type3.ts.result @@ -0,0 +1,4 @@ +Matched 8 tokens. +============= Module =========== +== Sub Tree == +import { * as TY} "./export-type2" diff --git a/src/MapleFE/test/typescript/unit_tests/import-type4.ts b/src/MapleFE/test/typescript/unit_tests/import-type4.ts new file mode 100644 index 0000000000000000000000000000000000000000..21cacbd55c1103bf01d4704cc6e56b2940912681 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-type4.ts @@ -0,0 +1,4 @@ +declare const obj: { + readonly 'X.Foo': Record; +}; +export { obj }; diff --git a/src/MapleFE/test/typescript/unit_tests/import-type4.ts.result b/src/MapleFE/test/typescript/unit_tests/import-type4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..defa7e018de68be5a31a77516b014e8e348d1127 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/import-type4.ts.result @@ -0,0 +1,7 @@ +Matched 22 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +declare js_const Decl: obj +== Sub Tree == +export {obj} diff --git a/src/MapleFE/test/typescript/unit_tests/imported-type.ts b/src/MapleFE/test/typescript/unit_tests/imported-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..5ebc576861d532c59d68842b2f40aaf84a53e4b0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/imported-type.ts @@ -0,0 +1,3 @@ +declare class Klass { + public get flag(): import("./enum2").ET; +} diff --git a/src/MapleFE/test/typescript/unit_tests/imported-type.ts.result b/src/MapleFE/test/typescript/unit_tests/imported-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..d39d2f1ca4f41e9ff4e7c6be60e0b6f7f14f114f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/imported-type.ts.result @@ -0,0 +1,13 @@ +Matched 18 tokens. +============= Module =========== +== Sub Tree == +declare class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + get flag() throws: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/in-operator.ts b/src/MapleFE/test/typescript/unit_tests/in-operator.ts new file mode 100644 index 0000000000000000000000000000000000000000..574e83ca148e24328aa87bb481b611510bd0ad4e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/in-operator.ts @@ -0,0 +1,3 @@ +var arr: number[] = [1, 2, 3, 4, 5]; +console.log(3 in arr); +console.log(7 in arr); diff --git a/src/MapleFE/test/typescript/unit_tests/in-operator.ts.result b/src/MapleFE/test/typescript/unit_tests/in-operator.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4a97d98d41df6c250bc686cb7c6582001c9bbee9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/in-operator.ts.result @@ -0,0 +1,10 @@ +Matched 19 tokens. +Matched 28 tokens. +Matched 37 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[1,2,3,4,5] +== Sub Tree == +console.log(3 in arr) +== Sub Tree == +console.log(7 in arr) diff --git a/src/MapleFE/test/typescript/unit_tests/in-operator2.ts b/src/MapleFE/test/typescript/unit_tests/in-operator2.ts new file mode 100644 index 0000000000000000000000000000000000000000..0d0f5ffe7ab5510fe31d8826a9049149da56e804 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/in-operator2.ts @@ -0,0 +1,8 @@ +type T = { + name: string; + age: number; +}; + +var obj: T = { name: "John", age: 30 }; +console.log("name" in obj); +console.log("Age" in obj); diff --git a/src/MapleFE/test/typescript/unit_tests/in-operator2.ts.result b/src/MapleFE/test/typescript/unit_tests/in-operator2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a2534da4adfedc1fe7a6e3c2b35d0c4bfc5e2e7b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/in-operator2.ts.result @@ -0,0 +1,13 @@ +Matched 14 tokens. +Matched 29 tokens. +Matched 38 tokens. +Matched 47 tokens. +============= Module =========== +== Sub Tree == + type T = {name;age } +== Sub Tree == +js_var Decl: obj= {name:"John", age:30} +== Sub Tree == +console.log("name" in obj) +== Sub Tree == +console.log("Age" in obj) diff --git a/src/MapleFE/test/typescript/unit_tests/index-signature-1.ts b/src/MapleFE/test/typescript/unit_tests/index-signature-1.ts new file mode 100644 index 0000000000000000000000000000000000000000..54764a60512f5490c3173692eb894e5670eddfe6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/index-signature-1.ts @@ -0,0 +1,4 @@ +interface dictionary { + [index: string]: number; + id: number; +} diff --git a/src/MapleFE/test/typescript/unit_tests/index-signature-1.ts.result b/src/MapleFE/test/typescript/unit_tests/index-signature-1.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..44784cbe74a45d9e4f156bff07344e7bdce3b31b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/index-signature-1.ts.result @@ -0,0 +1,4 @@ +Matched 16 tokens. +============= Module =========== +== Sub Tree == +ts_interface: dictionary {string index type: numberid } diff --git a/src/MapleFE/test/typescript/unit_tests/index-signature-2.ts b/src/MapleFE/test/typescript/unit_tests/index-signature-2.ts new file mode 100644 index 0000000000000000000000000000000000000000..f4666349860fe800649cb94f27b28e01448fcdca --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/index-signature-2.ts @@ -0,0 +1,5 @@ +class Klass { + [key: string]: number; +}; + +type MyArray = Array; diff --git a/src/MapleFE/test/typescript/unit_tests/index-signature-2.ts.result b/src/MapleFE/test/typescript/unit_tests/index-signature-2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8d4f6fe7da05e49b9238e4cdaf927572bc62ec59 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/index-signature-2.ts.result @@ -0,0 +1,16 @@ +Matched 12 tokens. +Matched 13 tokens. +Matched 36 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + number + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == + type MyArray = Array diff --git a/src/MapleFE/test/typescript/unit_tests/infer-type.ts b/src/MapleFE/test/typescript/unit_tests/infer-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..3b4603d30c2b26ede91fbd5d663917a0269ba08f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/infer-type.ts @@ -0,0 +1,2 @@ +type U = { n: number }; +type E = T extends Array ? T : U; diff --git a/src/MapleFE/test/typescript/unit_tests/infer-type.ts.result b/src/MapleFE/test/typescript/unit_tests/infer-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0500019406dc00110d0225630086c31d28962974 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/infer-type.ts.result @@ -0,0 +1,7 @@ +Matched 9 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == + type U = {n } +== Sub Tree == + type E = T extends Array< infer U> ? T : U diff --git a/src/MapleFE/test/typescript/unit_tests/infer-type2.ts b/src/MapleFE/test/typescript/unit_tests/infer-type2.ts new file mode 100644 index 0000000000000000000000000000000000000000..0e826c6314f637744d5f0764ddf5c8f5551b04b0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/infer-type2.ts @@ -0,0 +1,8 @@ +// Extracted from https://github.com/cocos-creator/engine/blob/develop/cocos/core/gfx/pipeline-state.jsb.ts#L33 +declare type RecursivePartial = { + [P in keyof T]?: T[P] extends Array + ? Array> + : T[P] extends ReadonlyArray + ? ReadonlyArray> + : RecursivePartial; +}; diff --git a/src/MapleFE/test/typescript/unit_tests/infer-type2.ts.result b/src/MapleFE/test/typescript/unit_tests/infer-type2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..489bad3ffdcc6f5007bd545dc3b37712ae98863e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/infer-type2.ts.result @@ -0,0 +1,4 @@ +Matched 62 tokens. +============= Module =========== +== Sub Tree == +declare type RecursivePartial = {[P in keyof T] : T[P] extends Array< infer U> ? Array> : T[P] extends ReadonlyArray< infer V> ? ReadonlyArray> : RecursivePartial } diff --git a/src/MapleFE/test/typescript/unit_tests/infer-type3.ts b/src/MapleFE/test/typescript/unit_tests/infer-type3.ts new file mode 100644 index 0000000000000000000000000000000000000000..3418e72f9e37f5ab0309396b6461d88277b6f55a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/infer-type3.ts @@ -0,0 +1,4 @@ +// Extracted from https://github.com/cocos-creator/engine/blob/develop/cocos/core/gfx/pipeline-state.jsb.ts#L33 +export declare type NT = { + [P in keyof T]: T[P] extends Record ? Exclude | keyof T[P] : T[P]; +}; diff --git a/src/MapleFE/test/typescript/unit_tests/infer-type3.ts.result b/src/MapleFE/test/typescript/unit_tests/infer-type3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7ab557f1d216eb726ec60eb33ce02c3d713907ff --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/infer-type3.ts.result @@ -0,0 +1,4 @@ +Matched 49 tokens. +============= Module =========== +== Sub Tree == +export {declare type NT = {[P in keyof T] : T[P] extends Record ? union = Exclude | keyof T[P] : T[P] }} diff --git a/src/MapleFE/test/typescript/unit_tests/infer-type4.ts b/src/MapleFE/test/typescript/unit_tests/infer-type4.ts new file mode 100644 index 0000000000000000000000000000000000000000..47da6d2db2892b9851f32701905ddc7675158338 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/infer-type4.ts @@ -0,0 +1 @@ +type MyType = K extends [infer U, ...infer V] ? Extract : K; diff --git a/src/MapleFE/test/typescript/unit_tests/infer-type4.ts.result b/src/MapleFE/test/typescript/unit_tests/infer-type4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..039d0a6d76a5a89bb60ddf692d872949c63dbe72 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/infer-type4.ts.result @@ -0,0 +1,4 @@ +Matched 32 tokens. +============= Module =========== +== Sub Tree == + type MyType = K extends [ : infer U , : infer V , ] ? Extract : K diff --git a/src/MapleFE/test/typescript/unit_tests/instanceof-1.ts b/src/MapleFE/test/typescript/unit_tests/instanceof-1.ts new file mode 100644 index 0000000000000000000000000000000000000000..6be225473aaa871c83a5532f6fe8f0d44d17a691 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/instanceof-1.ts @@ -0,0 +1,13 @@ +class Foo {} + +class Bar { + foo: Function = () => null; +} + +var foo:Foo = new Foo(); +var bar:Bar = new Bar(); +bar.foo = Foo; +console.log(foo instanceof Bar); +console.log(foo instanceof bar.foo); +console.log(bar.foo instanceof Bar); + diff --git a/src/MapleFE/test/typescript/unit_tests/instanceof-1.ts.result b/src/MapleFE/test/typescript/unit_tests/instanceof-1.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6137523453960a1ceb93ec0b398681e2978958eb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/instanceof-1.ts.result @@ -0,0 +1,41 @@ +Matched 4 tokens. +Matched 17 tokens. +Matched 27 tokens. +Matched 37 tokens. +Matched 43 tokens. +Matched 52 tokens. +Matched 63 tokens. +Matched 74 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Bar + Fields: + foo=() -> null + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: foo=new Foo() +== Sub Tree == +js_var Decl: bar=new Bar() +== Sub Tree == +bar.foo Assign Foo +== Sub Tree == +console.log(foo instanceof Bar) +== Sub Tree == +console.log(foo instanceof bar.foo) +== Sub Tree == +console.log(bar.foo instanceof Bar) diff --git a/src/MapleFE/test/typescript/unit_tests/instanceof.ts b/src/MapleFE/test/typescript/unit_tests/instanceof.ts new file mode 100644 index 0000000000000000000000000000000000000000..55b72c52ea082d47550af61b77176f77e07a2355 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/instanceof.ts @@ -0,0 +1,2 @@ +var x: any = 3; +console.log(x instanceof Number); diff --git a/src/MapleFE/test/typescript/unit_tests/instanceof.ts.result b/src/MapleFE/test/typescript/unit_tests/instanceof.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..dc62911090503f0b87e40735cf502c0f4059f9f6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/instanceof.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 16 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=3 +== Sub Tree == +console.log(x instanceof Number) diff --git a/src/MapleFE/test/typescript/unit_tests/interface-extends.ts b/src/MapleFE/test/typescript/unit_tests/interface-extends.ts new file mode 100644 index 0000000000000000000000000000000000000000..2796b9867fc612d460451bb249b2c5a262564070 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-extends.ts @@ -0,0 +1,12 @@ +interface Base { + name: string; +} +interface Derived extends Base { + age: number; +} +function dump(obj: Derived) { + console.log(obj.name, obj.age); +} + +let o = { name: "John", age: 30 }; +dump(o); diff --git a/src/MapleFE/test/typescript/unit_tests/interface-extends.ts.result b/src/MapleFE/test/typescript/unit_tests/interface-extends.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ecb6a0965eb37387c924d6af8213367101b2229a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-extends.ts.result @@ -0,0 +1,18 @@ +Matched 8 tokens. +Matched 18 tokens. +Matched 40 tokens. +Matched 53 tokens. +Matched 58 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Base {name } +== Sub Tree == +ts_interface: Derived {age } +== Sub Tree == +func dump(obj) throws: + console.log(obj.name,obj.age) + +== Sub Tree == +js_let Decl: o= {name:"John", age:30} +== Sub Tree == +dump(o) diff --git a/src/MapleFE/test/typescript/unit_tests/interface-extends2.ts b/src/MapleFE/test/typescript/unit_tests/interface-extends2.ts new file mode 100644 index 0000000000000000000000000000000000000000..6cb460097753b5a55cf6ff219fa836b6e8b794f4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-extends2.ts @@ -0,0 +1,14 @@ +interface Base { + name: string; +} +interface Derived extends Base { + age: number; + readonly impl?: any; + initialize?(c: number): void; +} +function dump(obj: Derived) { + console.log(obj.name, obj.age); +} + +let o = { name: "John", age: 30 }; +dump(o); diff --git a/src/MapleFE/test/typescript/unit_tests/interface-extends2.ts.result b/src/MapleFE/test/typescript/unit_tests/interface-extends2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2575af09f78a5135b326e560caf263129fc04d9e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-extends2.ts.result @@ -0,0 +1,19 @@ +Matched 8 tokens. +Matched 34 tokens. +Matched 56 tokens. +Matched 69 tokens. +Matched 74 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Base {name } +== Sub Tree == +ts_interface: Derived {age;impl?func initialize?(c) throws: + } +== Sub Tree == +func dump(obj) throws: + console.log(obj.name,obj.age) + +== Sub Tree == +js_let Decl: o= {name:"John", age:30} +== Sub Tree == +dump(o) diff --git a/src/MapleFE/test/typescript/unit_tests/interface-func.ts b/src/MapleFE/test/typescript/unit_tests/interface-func.ts new file mode 100644 index 0000000000000000000000000000000000000000..01e5a0799bb87aa404d7a0a85e5dabd4c7fcfd97 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-func.ts @@ -0,0 +1,9 @@ +class Klass {} + +export interface Interf { + [key: string]: any; +} + +export interface Interf2 { + func(component: Klass): Interf; +} diff --git a/src/MapleFE/test/typescript/unit_tests/interface-func.ts.result b/src/MapleFE/test/typescript/unit_tests/interface-func.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..fa4ee3c2d9b8041f904d6ac04384dd8e24220609 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-func.ts.result @@ -0,0 +1,19 @@ +Matched 4 tokens. +Matched 17 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +export {ts_interface: Interf {string index type: any }} +== Sub Tree == +export {ts_interface: Interf2 {func func(component) throws: + }} diff --git a/src/MapleFE/test/typescript/unit_tests/interface-indexable.ts b/src/MapleFE/test/typescript/unit_tests/interface-indexable.ts new file mode 100644 index 0000000000000000000000000000000000000000..2299b8034efdc1de5b6a3595f55cba7412573901 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-indexable.ts @@ -0,0 +1,13 @@ +const enum TypeID { + Array_Class = 0, + Array, +} + +class Klass {} + +interface DataTypes { + [TypeID.Array_Class]: DataTypes[TypeID.Array][]; + [TypeID.Array]: Klass; +} + +type TYPE = DataTypes[Exclude]; diff --git a/src/MapleFE/test/typescript/unit_tests/interface-indexable.ts.result b/src/MapleFE/test/typescript/unit_tests/interface-indexable.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b5b74c76cd51dabbcb08a5d456131390d249f903 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-indexable.ts.result @@ -0,0 +1,21 @@ +Matched 11 tokens. +Matched 15 tokens. +Matched 42 tokens. +Matched 58 tokens. +============= Module =========== +== Sub Tree == +ts_enum: TypeID {Array_Class=0;Array } +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +ts_interface: DataTypes {[TypeID.Array_Class] : DataTypes[TypeID.Array][];[TypeID.Array] : Klass } +== Sub Tree == + type TYPE = DataTypes[Exclude< keyof DataTypes,TypeID.Array_Class>] diff --git a/src/MapleFE/test/typescript/unit_tests/interface-intersect-nonnullable-intf-fd.ts b/src/MapleFE/test/typescript/unit_tests/interface-intersect-nonnullable-intf-fd.ts new file mode 100644 index 0000000000000000000000000000000000000000..673654135f5736d516b9fdbe83f2ea905c528078 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-intersect-nonnullable-intf-fd.ts @@ -0,0 +1,6 @@ +// ref: cocos game.ts line 922 +interface IntfX { + IntfX_fd: boolean; +} + +type Test = IntfX & { intersect_fd: NonNullable }; diff --git a/src/MapleFE/test/typescript/unit_tests/interface-intersect-nonnullable-intf-fd.ts.result b/src/MapleFE/test/typescript/unit_tests/interface-intersect-nonnullable-intf-fd.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6f7d95622df3540c51ccb5af37021ad1934d0620 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-intersect-nonnullable-intf-fd.ts.result @@ -0,0 +1,7 @@ +Matched 8 tokens. +Matched 25 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IntfX {IntfX_fd } +== Sub Tree == + type Test = intersect = IntfX & {intersect_fd } diff --git a/src/MapleFE/test/typescript/unit_tests/interface-keyof.ts b/src/MapleFE/test/typescript/unit_tests/interface-keyof.ts new file mode 100644 index 0000000000000000000000000000000000000000..df837365df7199f215bf50c55b73383ae9df86ed --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-keyof.ts @@ -0,0 +1,4 @@ +interface Interf { + name: keyof T; + val: T[keyof T]; +} diff --git a/src/MapleFE/test/typescript/unit_tests/interface-keyof.ts.result b/src/MapleFE/test/typescript/unit_tests/interface-keyof.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..eaf8faa25e936c057e33e33b4f64de8b6da2a488 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-keyof.ts.result @@ -0,0 +1,4 @@ +Matched 20 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Interf {name;val } diff --git a/src/MapleFE/test/typescript/unit_tests/interface-opt-def.ts b/src/MapleFE/test/typescript/unit_tests/interface-opt-def.ts new file mode 100644 index 0000000000000000000000000000000000000000..33ee72d0d54b759f175e46cace3e86658e219de5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-opt-def.ts @@ -0,0 +1,11 @@ +interface I { + a: number; + b?: number; +} + +function f({ a, b = 1 }: I): number { + return a + b; +} + +console.log(f({ a: 1 })); +console.log(f({ a: 1, b: 2 })); diff --git a/src/MapleFE/test/typescript/unit_tests/interface-opt-def.ts.result b/src/MapleFE/test/typescript/unit_tests/interface-opt-def.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..46e46a34bcba99730ef9417d1de2207c356d0591 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-opt-def.ts.result @@ -0,0 +1,15 @@ +Matched 13 tokens. +Matched 35 tokens. +Matched 49 tokens. +Matched 67 tokens. +============= Module =========== +== Sub Tree == +ts_interface: I {a;b? } +== Sub Tree == +func f({:a, :b=1}) throws: + return a Add b + +== Sub Tree == +console.log(f( {a:1})) +== Sub Tree == +console.log(f( {a:1, b:2})) diff --git a/src/MapleFE/test/typescript/unit_tests/interface-optional-field.ts b/src/MapleFE/test/typescript/unit_tests/interface-optional-field.ts new file mode 100644 index 0000000000000000000000000000000000000000..3708b9805c41233d33346a17c0ba105fc6ec97bd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-optional-field.ts @@ -0,0 +1,3 @@ +interface Interf { + readonly?: boolean | { flag?: boolean }; +} diff --git a/src/MapleFE/test/typescript/unit_tests/interface-optional-field.ts.result b/src/MapleFE/test/typescript/unit_tests/interface-optional-field.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c92b24340b56389cab065ffce386791c466d58ae --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface-optional-field.ts.result @@ -0,0 +1,4 @@ +Matched 16 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Interf {readonly? } diff --git a/src/MapleFE/test/typescript/unit_tests/interface.ts b/src/MapleFE/test/typescript/unit_tests/interface.ts new file mode 100644 index 0000000000000000000000000000000000000000..1306b78f98647975d8180b677560e4ec9c06347a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface.ts @@ -0,0 +1,14 @@ +interface Employee { + firstName: string; + lastName: string; +} + +let john: Employee = { + firstName: "John", + lastName: "Smith", +}; + +let jane: Employee = { + firstName: "Jane", + lastName: "Doe", +}; diff --git a/src/MapleFE/test/typescript/unit_tests/interface.ts.result b/src/MapleFE/test/typescript/unit_tests/interface.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5b1138ffa5a23ec70f0fdbc6db712ee534db602f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface.ts.result @@ -0,0 +1,10 @@ +Matched 12 tokens. +Matched 28 tokens. +Matched 44 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Employee {firstName;lastName } +== Sub Tree == +js_let Decl: john= {firstName:"John", lastName:"Smith"} +== Sub Tree == +js_let Decl: jane= {firstName:"Jane", lastName:"Doe"} diff --git a/src/MapleFE/test/typescript/unit_tests/interface2.ts b/src/MapleFE/test/typescript/unit_tests/interface2.ts new file mode 100644 index 0000000000000000000000000000000000000000..490ad712035327a8e20301394fe28d6b3753bca6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface2.ts @@ -0,0 +1,3 @@ +interface Interf { + obj: { import: string[] }; +} diff --git a/src/MapleFE/test/typescript/unit_tests/interface2.ts.result b/src/MapleFE/test/typescript/unit_tests/interface2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b619a930d755f8bfef79827f7c005e2a944c3368 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface2.ts.result @@ -0,0 +1,4 @@ +Matched 14 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Interf {obj } diff --git a/src/MapleFE/test/typescript/unit_tests/interface3.ts b/src/MapleFE/test/typescript/unit_tests/interface3.ts new file mode 100644 index 0000000000000000000000000000000000000000..5412585d451cdcd705afbd4c89c1a9812564ed53 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface3.ts @@ -0,0 +1,7 @@ +// tsc -t es6 +interface IFace { + create( + args: Iterable + ): { [k: string]: T }; + create(args: Iterable): any; +} diff --git a/src/MapleFE/test/typescript/unit_tests/interface3.ts.result b/src/MapleFE/test/typescript/unit_tests/interface3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..94e3727736aa37357359b50b82b32fb1c42c69e3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/interface3.ts.result @@ -0,0 +1,6 @@ +Matched 49 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {func create(args) throws: +;func create(args) throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/intersection-type.ts b/src/MapleFE/test/typescript/unit_tests/intersection-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..774714751c5d4aec81009cb9da7373834c8011b9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/intersection-type.ts @@ -0,0 +1,14 @@ +interface A { + id: number; +} + +interface B { + name: string; +} + +type I = A & B; +let v: I; +v!.id = 10; +v!.name = "B"; + +console.log(v!.id, v!.name); diff --git a/src/MapleFE/test/typescript/unit_tests/intersection-type.ts.result b/src/MapleFE/test/typescript/unit_tests/intersection-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5d33332ee88b24e3d03543bbf9e2ee142962dda5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/intersection-type.ts.result @@ -0,0 +1,22 @@ +Matched 8 tokens. +Matched 16 tokens. +Matched 23 tokens. +Matched 28 tokens. +Matched 35 tokens. +Matched 42 tokens. +Matched 57 tokens. +============= Module =========== +== Sub Tree == +ts_interface: A {id } +== Sub Tree == +ts_interface: B {name } +== Sub Tree == + type I = intersect = A & B +== Sub Tree == +js_let Decl: v +== Sub Tree == +v!.id Assign 10 +== Sub Tree == +v!.name Assign "B" +== Sub Tree == +console.log(v!.id,v!.name) diff --git a/src/MapleFE/test/typescript/unit_tests/intersection-type2.ts b/src/MapleFE/test/typescript/unit_tests/intersection-type2.ts new file mode 100644 index 0000000000000000000000000000000000000000..6070c19c10b8f81b7f1d157db4a257b6c968069e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/intersection-type2.ts @@ -0,0 +1,18 @@ +class A { + id: number = 0; +} + +class B { + flag: false = false; +} + +class C { + name: string = ""; +} + +type I = A & (B["flag"] extends true ? B : C); +let v: I = {id: 0, name: ""}; +v!.id = 10; +v!.name = "B"; + +console.log(v!.id, v!.name); diff --git a/src/MapleFE/test/typescript/unit_tests/intersection-type2.ts.result b/src/MapleFE/test/typescript/unit_tests/intersection-type2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0995e58a7314d0af79726755e62fdd9ce8c48f60 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/intersection-type2.ts.result @@ -0,0 +1,49 @@ +Matched 10 tokens. +Matched 20 tokens. +Matched 30 tokens. +Matched 48 tokens. +Matched 63 tokens. +Matched 70 tokens. +Matched 77 tokens. +Matched 92 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + id=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class B + Fields: + flag=false + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class C + Fields: + name="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == + type I = intersect = A & B["flag"] extends true ? B : C +== Sub Tree == +js_let Decl: v= {id:0, name:""} +== Sub Tree == +v!.id Assign 10 +== Sub Tree == +v!.name Assign "B" +== Sub Tree == +console.log(v!.id,v!.name) diff --git a/src/MapleFE/test/typescript/unit_tests/intersection-type3.ts b/src/MapleFE/test/typescript/unit_tests/intersection-type3.ts new file mode 100644 index 0000000000000000000000000000000000000000..8af426e92796bca034e477453a69fc0c884ae8be --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/intersection-type3.ts @@ -0,0 +1 @@ +declare type Type = { [K in keyof T]: K; }[keyof T] & string; diff --git a/src/MapleFE/test/typescript/unit_tests/intersection-type3.ts.result b/src/MapleFE/test/typescript/unit_tests/intersection-type3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..dfbcdc17edc3cab50bde1dbe039b00a17eafb9b2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/intersection-type3.ts.result @@ -0,0 +1,4 @@ +Matched 25 tokens. +============= Module =========== +== Sub Tree == +declare type Type = intersect = {[K in keyof T] : K }[ keyof T] & string diff --git a/src/MapleFE/test/typescript/unit_tests/iterator.ts b/src/MapleFE/test/typescript/unit_tests/iterator.ts new file mode 100644 index 0000000000000000000000000000000000000000..e4d1e33a670c592af51db17254537877feae63f9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/iterator.ts @@ -0,0 +1,14 @@ +interface Iface { + name: string; +} + +const myIter: Iterator = Object.freeze({ + next () { + return { done: true, value: undefined, }; + }, +}); + + +const iter: Iterable = Object.freeze({ + [Symbol.iterator] () { return myIter; }, +}); diff --git a/src/MapleFE/test/typescript/unit_tests/iterator.ts.result b/src/MapleFE/test/typescript/unit_tests/iterator.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..d4048a3631bf33d36e2c4eb6e05802618574424b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/iterator.ts.result @@ -0,0 +1,14 @@ +Matched 8 tokens. +Matched 42 tokens. +Matched 71 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Iface {name } +== Sub Tree == +js_const Decl: myIter=Object.freeze( {next:func next() throws: + return {done:true, value:undefined} +}) +== Sub Tree == +js_const Decl: iter=Object.freeze( {[Symbol.iterator] : :func () throws: + return myIter +}) diff --git a/src/MapleFE/test/typescript/unit_tests/keyof-property.ts b/src/MapleFE/test/typescript/unit_tests/keyof-property.ts new file mode 100644 index 0000000000000000000000000000000000000000..855d0e42a03ccf24e035a95c9ab4f0122a66abc1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/keyof-property.ts @@ -0,0 +1,6 @@ +class Klass { + prop: { field: number } = { field: 0}; +} +export type T = keyof Klass["prop"]; +var n: T = "field"; +console.log(n); diff --git a/src/MapleFE/test/typescript/unit_tests/keyof-property.ts.result b/src/MapleFE/test/typescript/unit_tests/keyof-property.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7f5be0ce67791d42fc56be3315998309e988a78d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/keyof-property.ts.result @@ -0,0 +1,21 @@ +Matched 18 tokens. +Matched 28 tokens. +Matched 35 tokens. +Matched 42 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + prop= {field:0} + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +export { type T = keyof Klass["prop"]} +== Sub Tree == +js_var Decl: n="field" +== Sub Tree == +console.log(n) diff --git a/src/MapleFE/test/typescript/unit_tests/keyof.ts b/src/MapleFE/test/typescript/unit_tests/keyof.ts new file mode 100644 index 0000000000000000000000000000000000000000..a8eaa08ab49c0e1c012d0d5b3891b03f5ddef0f7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/keyof.ts @@ -0,0 +1,7 @@ +class Klass { + [x: string]: number; +} +var o: Klass = { abc: 10 }; +var x: keyof Klass = "abc"; +console.log(o[x]); +o[x] = 3; diff --git a/src/MapleFE/test/typescript/unit_tests/keyof.ts.result b/src/MapleFE/test/typescript/unit_tests/keyof.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3812d5e9488fd4a4af9ac8939adedea8f0d022a7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/keyof.ts.result @@ -0,0 +1,24 @@ +Matched 12 tokens. +Matched 23 tokens. +Matched 31 tokens. +Matched 41 tokens. +Matched 48 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + number + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: o= {abc:10} +== Sub Tree == +js_var Decl: x="abc" +== Sub Tree == +console.log(o[x]) +== Sub Tree == +o[x] Assign 3 diff --git a/src/MapleFE/test/typescript/unit_tests/keyof2.ts b/src/MapleFE/test/typescript/unit_tests/keyof2.ts new file mode 100644 index 0000000000000000000000000000000000000000..824c5b16f34a05b1226ad862eab89fd4ee76c6dd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/keyof2.ts @@ -0,0 +1,7 @@ +class Klass { + [x: string]: number; +} +var o: Klass = { abc: 10 }; +var x: keyof typeof o = "abc"; +console.log(o[x]); +o[x] = 3; diff --git a/src/MapleFE/test/typescript/unit_tests/keyof2.ts.result b/src/MapleFE/test/typescript/unit_tests/keyof2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9c56663543344013cf20078ef8dc07ce87fdaff3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/keyof2.ts.result @@ -0,0 +1,24 @@ +Matched 12 tokens. +Matched 23 tokens. +Matched 32 tokens. +Matched 42 tokens. +Matched 49 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + number + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: o= {abc:10} +== Sub Tree == +js_var Decl: x="abc" +== Sub Tree == +console.log(o[x]) +== Sub Tree == +o[x] Assign 3 diff --git a/src/MapleFE/test/typescript/unit_tests/keyof3.ts b/src/MapleFE/test/typescript/unit_tests/keyof3.ts new file mode 100644 index 0000000000000000000000000000000000000000..824c5b16f34a05b1226ad862eab89fd4ee76c6dd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/keyof3.ts @@ -0,0 +1,7 @@ +class Klass { + [x: string]: number; +} +var o: Klass = { abc: 10 }; +var x: keyof typeof o = "abc"; +console.log(o[x]); +o[x] = 3; diff --git a/src/MapleFE/test/typescript/unit_tests/keyof3.ts.result b/src/MapleFE/test/typescript/unit_tests/keyof3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9c56663543344013cf20078ef8dc07ce87fdaff3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/keyof3.ts.result @@ -0,0 +1,24 @@ +Matched 12 tokens. +Matched 23 tokens. +Matched 32 tokens. +Matched 42 tokens. +Matched 49 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + number + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: o= {abc:10} +== Sub Tree == +js_var Decl: x="abc" +== Sub Tree == +console.log(o[x]) +== Sub Tree == +o[x] Assign 3 diff --git a/src/MapleFE/test/typescript/unit_tests/labeled-block.ts b/src/MapleFE/test/typescript/unit_tests/labeled-block.ts new file mode 100644 index 0000000000000000000000000000000000000000..080acf9cfebe603c78ae9899556812ca395fc6c3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/labeled-block.ts @@ -0,0 +1,12 @@ +var x: number = 10; + +outer: { + if (x > 5) { + if (x > 8) { + console.log("x is greater than 8"); + break outer; + } + console.log("x is greater than 5"); + } else console.log("x is less than or equal to 5"); + console.log("out of block"); +} diff --git a/src/MapleFE/test/typescript/unit_tests/labeled-block.ts.result b/src/MapleFE/test/typescript/unit_tests/labeled-block.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3fa087629698ce26f806c1b7b7e726bcf3950c47 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/labeled-block.ts.result @@ -0,0 +1,21 @@ +Matched 7 tokens. +Matched 59 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=10 +== Sub Tree == +outer: +cond-branch cond:x GT 5 +true branch : + cond-branch cond:x GT 8 + true branch : + console.log("x is greater than 8") + break:outer + + false branch : + + console.log("x is greater than 5") +false branch : + console.log("x is less than or equal to 5") +console.log("out of block") + diff --git a/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt-in-loop.ts b/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt-in-loop.ts new file mode 100644 index 0000000000000000000000000000000000000000..8a98d672373c03d15fc58bdd170ddc7948e7f663 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt-in-loop.ts @@ -0,0 +1,15 @@ +var x: number = 10; +for (var i = 0; i < 10; ++i) { + x--; + outer: if (x > 0) { + if (x > 5) { + if (x > 8) { + console.log("x is greater than 8"); + break outer; + } + console.log("x is greater than 5"); + break; + } else console.log("x is less than or equal to 5"); + console.log("out of nested if-stmt"); + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt-in-loop.ts.result b/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt-in-loop.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a381c1c4ff519e34f01ffbeffeca61285866f19e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt-in-loop.ts.result @@ -0,0 +1,30 @@ +Matched 7 tokens. +Matched 86 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=10 +== Sub Tree == +for ( ) + x Dec + + outer: + cond-branch cond:x GT 0 + true branch : + cond-branch cond:x GT 5 + true branch : + cond-branch cond:x GT 8 + true branch : + console.log("x is greater than 8") + break:outer + + false branch : + + console.log("x is greater than 5") + break: + + false branch : + console.log("x is less than or equal to 5") + console.log("out of nested if-stmt") + false branch : + + diff --git a/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt.ts b/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt.ts new file mode 100644 index 0000000000000000000000000000000000000000..183b80995227e21c976176cab0c273793429588f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt.ts @@ -0,0 +1,10 @@ +var x: number = 10; +if (x > 0) { + outer: if (x > 5) { + if (x > 8) { + console.log("x is greater than 8"); + break outer; + } + console.log("x is greater than 5"); + } else console.log("x is less than or equal to 5"); +} diff --git a/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt.ts.result b/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..29aa37d8b7d71faa0dc795bdbbffb1217c9a2106 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/labeled-if-stmt.ts.result @@ -0,0 +1,23 @@ +Matched 7 tokens. +Matched 58 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=10 +== Sub Tree == +cond-branch cond:x GT 0 +true branch : + outer: + cond-branch cond:x GT 5 + true branch : + cond-branch cond:x GT 8 + true branch : + console.log("x is greater than 8") + break:outer + + false branch : + + console.log("x is greater than 5") + false branch : + console.log("x is less than or equal to 5") +false branch : + diff --git a/src/MapleFE/test/typescript/unit_tests/labeled-loop.ts b/src/MapleFE/test/typescript/unit_tests/labeled-loop.ts new file mode 100644 index 0000000000000000000000000000000000000000..e3724dbfcccc2f4bd6fef274568759709050c8b6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/labeled-loop.ts @@ -0,0 +1,12 @@ +var arr: number[] = [7, 4, 5, 9, 2, 8, 1, 6, 3]; +var len: number = arr.length; +var sum: number = 0; +outer: for (var i = 0; i < len; ++i) { + for (var j = i + 1; j < len; ++j) { + sum += arr[i]; + console.log(sum); + if (arr[j] > 5) continue outer; + if (sum >= 60) break outer; + } +} +console.log(sum); diff --git a/src/MapleFE/test/typescript/unit_tests/labeled-loop.ts.result b/src/MapleFE/test/typescript/unit_tests/labeled-loop.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ad0f4192f4dc37582d42ad0061377fb54b121b5a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/labeled-loop.ts.result @@ -0,0 +1,32 @@ +Matched 27 tokens. +Matched 36 tokens. +Matched 43 tokens. +Matched 114 tokens. +Matched 121 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[7,4,5,9,2,8,1,6,3] +== Sub Tree == +js_var Decl: len=arr.length +== Sub Tree == +js_var Decl: sum=0 +== Sub Tree == +outer: +for ( ) + for ( ) + sum AddAssign arr[i] + console.log(sum) + cond-branch cond:arr[j] GT 5 + true branch : + continue:outer + false branch : + + cond-branch cond:sum GE 60 + true branch : + break:outer + false branch : + + + +== Sub Tree == +console.log(sum) diff --git a/src/MapleFE/test/typescript/unit_tests/lambda.ts b/src/MapleFE/test/typescript/unit_tests/lambda.ts new file mode 100644 index 0000000000000000000000000000000000000000..e1303aa1555b657369ca3099566539e7bbdb65b2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/lambda.ts @@ -0,0 +1,4 @@ +var add = (x: number, y: number): number => { + return x + y; +}; +console.log(add(2, 3)); diff --git a/src/MapleFE/test/typescript/unit_tests/lambda.ts.result b/src/MapleFE/test/typescript/unit_tests/lambda.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f51402f201435e2a0672de19a4de0e3f7a17c8f1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/lambda.ts.result @@ -0,0 +1,8 @@ +Matched 23 tokens. +Matched 35 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: add=(x,y) -> return x Add y + +== Sub Tree == +console.log(add(2,3)) diff --git a/src/MapleFE/test/typescript/unit_tests/lambda2.ts b/src/MapleFE/test/typescript/unit_tests/lambda2.ts new file mode 100644 index 0000000000000000000000000000000000000000..180811544bbaf5207cb361def63f2fbe35b418d0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/lambda2.ts @@ -0,0 +1,2 @@ +var sqare: (x: number) => number = (x) => x * x; +console.log(sqare(2)); diff --git a/src/MapleFE/test/typescript/unit_tests/lambda2.ts.result b/src/MapleFE/test/typescript/unit_tests/lambda2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5c31f72bdcdd397ad1ee96be0944c14f251cccfb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/lambda2.ts.result @@ -0,0 +1,7 @@ +Matched 19 tokens. +Matched 29 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: sqare=(x) -> x Mul x +== Sub Tree == +console.log(sqare(2)) diff --git a/src/MapleFE/test/typescript/unit_tests/lambda3.ts b/src/MapleFE/test/typescript/unit_tests/lambda3.ts new file mode 100644 index 0000000000000000000000000000000000000000..b8b89b94fef7dbb892f35191c52354fb4a17154b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/lambda3.ts @@ -0,0 +1,8 @@ +// ref: cocos scheduler.ts line 58 +class CLS_test { + public static get = () => { + return 1; + }; +} + +console.log(CLS_test.get()); diff --git a/src/MapleFE/test/typescript/unit_tests/lambda3.ts.result b/src/MapleFE/test/typescript/unit_tests/lambda3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..354262be7c4ced3a0df15737cfd69e6b3d814837 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/lambda3.ts.result @@ -0,0 +1,16 @@ +Matched 17 tokens. +Matched 28 tokens. +============= Module =========== +== Sub Tree == +class CLS_test + Fields: + get=() -> return 1 + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +console.log(CLS_test.get()) diff --git a/src/MapleFE/test/typescript/unit_tests/let-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/let-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..0fee34fb08d4d74cedd49c0a0b9a30e0f8a80ff7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/let-as-prop-name.ts @@ -0,0 +1,32 @@ +export function func(arg: {let: Function | null}): void { + if (!arg.let) { + console.log("null"); + } else { + arg.let(); + } +} + +func({let: null}); +func({let: () => { console.log("calling let()"); }}); + +export function func1(arg: {var: Function | null}): void { + if (!arg.var) { + console.log("null"); + } else { + arg.var(); + } +} + +func1({var: null}); +func1({var: () => { console.log("calling var()"); }}); + +export function func2(arg: {break: Function | null}): void { + if (!arg.break) { + console.log("null"); + } else { + arg.break(); + } +} + +func2({break: null}); +func2({break: () => { console.log("calling break()"); }}); diff --git a/src/MapleFE/test/typescript/unit_tests/let-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/let-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5ecae0f33fd4d81d52516d162c474d52fbfcf9fc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/let-as-prop-name.ts.result @@ -0,0 +1,52 @@ +Matched 43 tokens. +Matched 52 tokens. +Matched 72 tokens. +Matched 115 tokens. +Matched 124 tokens. +Matched 144 tokens. +Matched 187 tokens. +Matched 196 tokens. +Matched 216 tokens. +============= Module =========== +== Sub Tree == +export {func func(arg) throws: + cond-branch cond:Not arg.let + true branch : + console.log("null") + false branch : + arg.let() + +} +== Sub Tree == +func( {let:null}) +== Sub Tree == +func( {let:() -> console.log("calling let()") +}) +== Sub Tree == +export {func func1(arg) throws: + cond-branch cond:Not arg.var + true branch : + console.log("null") + false branch : + arg.var() + +} +== Sub Tree == +func1( {var:null}) +== Sub Tree == +func1( {var:() -> console.log("calling var()") +}) +== Sub Tree == +export {func func2(arg) throws: + cond-branch cond:Not arg.break + true branch : + console.log("null") + false branch : + arg.break() + +} +== Sub Tree == +func2( {break:null}) +== Sub Tree == +func2( {break:() -> console.log("calling break()") +}) diff --git a/src/MapleFE/test/typescript/unit_tests/let-multi-decls.ts b/src/MapleFE/test/typescript/unit_tests/let-multi-decls.ts new file mode 100644 index 0000000000000000000000000000000000000000..b3f44de9688766101aa9072b18f38f3b14c6ee6e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/let-multi-decls.ts @@ -0,0 +1,5 @@ +let i: number, + j = 3, + k = 5; +i = 10; +console.log(i + j + k); diff --git a/src/MapleFE/test/typescript/unit_tests/let-multi-decls.ts.result b/src/MapleFE/test/typescript/unit_tests/let-multi-decls.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..61311782f069bcb318ced21cd6a6605b8ab759b8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/let-multi-decls.ts.result @@ -0,0 +1,14 @@ +Matched 13 tokens. +Matched 17 tokens. +Matched 28 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: i +== Sub Tree == +js_let Decl: j=3 +== Sub Tree == +js_let Decl: k=5 +== Sub Tree == +i Assign 10 +== Sub Tree == +console.log(i Add j Add k) diff --git a/src/MapleFE/test/typescript/unit_tests/let-multi-decls2.ts b/src/MapleFE/test/typescript/unit_tests/let-multi-decls2.ts new file mode 100644 index 0000000000000000000000000000000000000000..4de255fbb6deac156255950f8b84f650ff2faa6f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/let-multi-decls2.ts @@ -0,0 +1,11 @@ +function func() { + let i = 1, + j = 2, + k; + i = 10; + j = 3; + k = 5; + console.log(i + j + k); +} + +func(); diff --git a/src/MapleFE/test/typescript/unit_tests/let-multi-decls2.ts.result b/src/MapleFE/test/typescript/unit_tests/let-multi-decls2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a3bff642f5297036c4d30db6426ba8737485c92e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/let-multi-decls2.ts.result @@ -0,0 +1,15 @@ +Matched 40 tokens. +Matched 44 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + js_let Decl: i=1 + js_let Decl: j=2 + js_let Decl: k + i Assign 10 + j Assign 3 + k Assign 5 + console.log(i Add j Add k) + +== Sub Tree == +func() diff --git a/src/MapleFE/test/typescript/unit_tests/literal-type.ts b/src/MapleFE/test/typescript/unit_tests/literal-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..af5b84b57d5f3a63def536bf38b53a6516a46e3d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/literal-type.ts @@ -0,0 +1,4 @@ +var x: 0 | -1 = 0; +console.log(x); +x = -1; +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/literal-type.ts.result b/src/MapleFE/test/typescript/unit_tests/literal-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4597f8a34230202abe303aa580fe746b40c07cec --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/literal-type.ts.result @@ -0,0 +1,13 @@ +Matched 9 tokens. +Matched 16 tokens. +Matched 20 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=0 +== Sub Tree == +console.log(x) +== Sub Tree == +x Assign -1 +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/long-string.ts b/src/MapleFE/test/typescript/unit_tests/long-string.ts new file mode 100644 index 0000000000000000000000000000000000000000..4a3f849389269ee2960233a3b17ca6e382b2b36c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/long-string.ts @@ -0,0 +1,5 @@ +const obj = { + str: "this is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...", +}; + +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/long-string.ts.result b/src/MapleFE/test/typescript/unit_tests/long-string.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..96d27c6557181486010fb6d34da5f2d99640294c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/long-string.ts.result @@ -0,0 +1,7 @@ +Matched 10 tokens. +Matched 17 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {str:"this is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string...\nthis is a long long long string..."} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/mapped-type-with-modifier.ts b/src/MapleFE/test/typescript/unit_tests/mapped-type-with-modifier.ts new file mode 100644 index 0000000000000000000000000000000000000000..b45c6f5d9fcef9a78f17bfffc6abe75708c2a651 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/mapped-type-with-modifier.ts @@ -0,0 +1,6 @@ +// ref: cocos splash-screen.ts line 59. +// ref: https://www.typescriptlang.org/docs/handbook/2/mapped-types.html#mapping-modifiers + +type mutableType = { + -readonly [prop in keyof T]: T[prop]; +}; diff --git a/src/MapleFE/test/typescript/unit_tests/mapped-type-with-modifier.ts.result b/src/MapleFE/test/typescript/unit_tests/mapped-type-with-modifier.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b5634f4b07b05db1f5f2f83e352b5f42510b2ca4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/mapped-type-with-modifier.ts.result @@ -0,0 +1,4 @@ +Matched 23 tokens. +============= Module =========== +== Sub Tree == + type mutableType = {[prop in keyof T] : T[prop] } diff --git a/src/MapleFE/test/typescript/unit_tests/mapped-type.ts b/src/MapleFE/test/typescript/unit_tests/mapped-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..7b53529ee2da333b564d1a80f43237a91098913b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/mapped-type.ts @@ -0,0 +1,9 @@ +class Base { + [key: string]: number | string; +} +class Derived extends Base {} +type T = { [key in E[keyof E]]: string }; + +var obj: T = { str: "abc" }; +obj[0] = "zero"; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/mapped-type.ts.result b/src/MapleFE/test/typescript/unit_tests/mapped-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8df9e1714836cfd209ca567021afe6fbde22755d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/mapped-type.ts.result @@ -0,0 +1,35 @@ +Matched 14 tokens. +Matched 20 tokens. +Matched 42 tokens. +Matched 56 tokens. +Matched 63 tokens. +Matched 70 tokens. +============= Module =========== +== Sub Tree == +class Base + Fields: + union = number | string + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Derived + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == + type T = {[key in E[ keyof E]] : string } +== Sub Tree == +js_var Decl: obj= {str:"abc"} +== Sub Tree == +obj[0] Assign "zero" +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/mapping-modifier.ts b/src/MapleFE/test/typescript/unit_tests/mapping-modifier.ts new file mode 100644 index 0000000000000000000000000000000000000000..884989b2e1e1e291a87423f7ada1b91e7c16e56a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/mapping-modifier.ts @@ -0,0 +1,16 @@ +type Concrete = { + [P in keyof T]-?: T[P]; +}; + +interface IFace { + name: string; + age?: number; +} + +class Klass implements Concrete { + name: string = "No-name"; + age: number = 0; +} + +var obj: Klass = new Klass(); +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/mapping-modifier.ts.result b/src/MapleFE/test/typescript/unit_tests/mapping-modifier.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3769a5b1ddd713aa96dcec088ce2baa0119c5ed8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/mapping-modifier.ts.result @@ -0,0 +1,24 @@ +Matched 23 tokens. +Matched 36 tokens. +Matched 57 tokens. +Matched 67 tokens. +Matched 74 tokens. +============= Module =========== +== Sub Tree == + type Concrete = {[P in keyof T] : T[P] } +== Sub Tree == +ts_interface: IFace {name;age? } +== Sub Tree == +class Klass + Fields: + name="No-name" age=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/member-function-with-callback.ts b/src/MapleFE/test/typescript/unit_tests/member-function-with-callback.ts new file mode 100644 index 0000000000000000000000000000000000000000..935df78071b308aa2d1961fcb7bd34f444be9b26 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/member-function-with-callback.ts @@ -0,0 +1,19 @@ +abstract class Base { + func(f: () => void): void { + f(); + } + public abstract func2(f: () => void): void; +} + +class Klass extends Base { + func2(f: () => void): void { + f(); + } +} + +function foo(): void { + console.log("foo"); +} + +var obj: Klass = new Klass(); +obj.func(foo); diff --git a/src/MapleFE/test/typescript/unit_tests/member-function-with-callback.ts.result b/src/MapleFE/test/typescript/unit_tests/member-function-with-callback.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e397573a814cc33d5c5e09d37f064bcdf952214e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/member-function-with-callback.ts.result @@ -0,0 +1,39 @@ +Matched 36 tokens. +Matched 59 tokens. +Matched 74 tokens. +Matched 84 tokens. +Matched 91 tokens. +============= Module =========== +== Sub Tree == +class Base + Fields: + + Instance Initializer: + Constructors: + Methods: + func func(f) throws: + f() + func func2(f) throws: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + func func2(f) throws: + f() + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func foo() throws: + console.log("foo") + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj.func(foo) diff --git a/src/MapleFE/test/typescript/unit_tests/member-functions.ts b/src/MapleFE/test/typescript/unit_tests/member-functions.ts new file mode 100644 index 0000000000000000000000000000000000000000..510006896b64fabdb2bda9055aeca02586a88ff1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/member-functions.ts @@ -0,0 +1,20 @@ +class MyClass { + operand: number; + + constructor(opr: number) { + this.operand = opr; + } + + calc?: (x: number) => number; + + add = (x: number): number => this.operand + x; +} + +MyClass.prototype.calc = function (this: MyClass, x: number) { + return this.operand + x; +}; + +var myObj = new MyClass(1); + +console.log(myObj.calc!(2)); +console.log(myObj.add(3)); diff --git a/src/MapleFE/test/typescript/unit_tests/member-functions.ts.result b/src/MapleFE/test/typescript/unit_tests/member-functions.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1b0eefdc31cfa556f681a8c6766d65cfbcf9df9a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/member-functions.ts.result @@ -0,0 +1,28 @@ +Matched 49 tokens. +Matched 75 tokens. +Matched 84 tokens. +Matched 97 tokens. +Matched 109 tokens. +============= Module =========== +== Sub Tree == +class MyClass + Fields: + operand calc? add=(x) -> this.operand Add x + Instance Initializer: + Constructors: + constructor (opr) throws: + this.operand Assign opr + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +MyClass.prototype.calc Assign func (this,x) throws: + return this.operand Add x + +== Sub Tree == +js_var Decl: myObj=new MyClass(1) +== Sub Tree == +console.log(myObj.calc!(2)) +== Sub Tree == +console.log(myObj.add(3)) diff --git a/src/MapleFE/test/typescript/unit_tests/method-of-interface.ts b/src/MapleFE/test/typescript/unit_tests/method-of-interface.ts new file mode 100644 index 0000000000000000000000000000000000000000..17c6a390b7d9ea2a6f16f352109a7abec77beb87 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/method-of-interface.ts @@ -0,0 +1,9 @@ +interface Klass1 {} + +interface Klass2 { + name: string; +} + +interface Klass3 { + getObj(o: Klass1): Klass2; +} diff --git a/src/MapleFE/test/typescript/unit_tests/method-of-interface.ts.result b/src/MapleFE/test/typescript/unit_tests/method-of-interface.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0eacf21e19f3cd9b7640007aabadedd32ae5e140 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/method-of-interface.ts.result @@ -0,0 +1,11 @@ +Matched 4 tokens. +Matched 12 tokens. +Matched 25 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Klass1 { } +== Sub Tree == +ts_interface: Klass2 {name } +== Sub Tree == +ts_interface: Klass3 {func getObj(o) throws: + } diff --git a/src/MapleFE/test/typescript/unit_tests/method-with-return-as-name.ts b/src/MapleFE/test/typescript/unit_tests/method-with-return-as-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..abdbb740153406141e2d96b5ece5574a456df3f6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/method-with-return-as-name.ts @@ -0,0 +1,11 @@ +class Klass { + return(x: number) { + console.log(x); + } + foo(x: number) { + this.return(x); + } +} + +var obj: Klass = new Klass(); +obj.foo(10); diff --git a/src/MapleFE/test/typescript/unit_tests/method-with-return-as-name.ts.result b/src/MapleFE/test/typescript/unit_tests/method-with-return-as-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5cca1761f0b53ba18d5fc67ad77223d03288031f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/method-with-return-as-name.ts.result @@ -0,0 +1,22 @@ +Matched 34 tokens. +Matched 44 tokens. +Matched 51 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + func return(x) throws: + console.log(x) + func foo(x) throws: + this.return(x) + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj.foo(10) diff --git a/src/MapleFE/test/typescript/unit_tests/module-a.ts b/src/MapleFE/test/typescript/unit_tests/module-a.ts new file mode 100644 index 0000000000000000000000000000000000000000..e514f1b27e2be2e0a93d5eab7b58ee2a2fec9906 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/module-a.ts @@ -0,0 +1,3 @@ +import { add } from "./module-b"; +var x: number = 20; +console.log(add(x)); diff --git a/src/MapleFE/test/typescript/unit_tests/module-a.ts.result b/src/MapleFE/test/typescript/unit_tests/module-a.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3a3b8908dae1bd8e0c91149bae1d9a88ff430b54 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/module-a.ts.result @@ -0,0 +1,10 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 24 tokens. +============= Module =========== +== Sub Tree == +import {add} "./module-b" +== Sub Tree == +js_var Decl: x=20 +== Sub Tree == +console.log(add(x)) diff --git a/src/MapleFE/test/typescript/unit_tests/module-b.ts b/src/MapleFE/test/typescript/unit_tests/module-b.ts new file mode 100644 index 0000000000000000000000000000000000000000..071ca9289a55d6b3a6af41bb232c2d9ce844fc6a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/module-b.ts @@ -0,0 +1,4 @@ +var x: number = 10; +export function add(y: number) { + return x + y; +} diff --git a/src/MapleFE/test/typescript/unit_tests/module-b.ts.result b/src/MapleFE/test/typescript/unit_tests/module-b.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..59eb90cbd7e943c75ea5b74a72d52b8e2a40ae13 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/module-b.ts.result @@ -0,0 +1,9 @@ +Matched 7 tokens. +Matched 22 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=10 +== Sub Tree == +export {func add(y) throws: + return x Add y +} diff --git a/src/MapleFE/test/typescript/unit_tests/module-c.ts b/src/MapleFE/test/typescript/unit_tests/module-c.ts new file mode 100644 index 0000000000000000000000000000000000000000..7c69ac6668f1151318cdbec775f00724102b7565 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/module-c.ts @@ -0,0 +1,2 @@ +import * as x from "./module-b"; +console.log(x.add(100)); diff --git a/src/MapleFE/test/typescript/unit_tests/module-c.ts.result b/src/MapleFE/test/typescript/unit_tests/module-c.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..426f361cf0ca0dd0d4d6b5fd7e41af3dd913851f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/module-c.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 19 tokens. +============= Module =========== +== Sub Tree == +import { * as x} "./module-b" +== Sub Tree == +console.log(x.add(100)) diff --git a/src/MapleFE/test/typescript/unit_tests/module-vs-namespace.ts b/src/MapleFE/test/typescript/unit_tests/module-vs-namespace.ts new file mode 100644 index 0000000000000000000000000000000000000000..a5fbe24b3e2fbb615004c17788a984a93892080b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/module-vs-namespace.ts @@ -0,0 +1,24 @@ +module m_for_in { + let arr : Array = [12, 34, 56]; + for (let x in arr) { + console.log(x, arr[x]); + } +} +module m_for_of { + let arr : Array = [11, 22, 33]; + for (let x of arr) { + console.log(x); + } +} +namespace ns_for_in { + let arr : Array = [12, 34, 56]; + for (let x in arr) { + console.log(x, arr[x]); + } +} +namespace ns_for_of { + let arr : Array = [11, 22, 33]; + for (let x of arr) { + console.log(x); + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/module-vs-namespace.ts.result b/src/MapleFE/test/typescript/unit_tests/module-vs-namespace.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c569cd8d5134a72cd3038fc769ebf5a17ec55257 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/module-vs-namespace.ts.result @@ -0,0 +1,37 @@ +Matched 41 tokens. +Matched 77 tokens. +Matched 118 tokens. +Matched 154 tokens. +============= Module =========== +== Sub Tree == +============= Module =========== +== Sub Tree == +js_let Decl: arr=[12,34,56] +== Sub Tree == +for ( ) + console.log(x,arr[x]) + + +== Sub Tree == +============= Module =========== +== Sub Tree == +js_let Decl: arr=[11,22,33] +== Sub Tree == +for ( ) + console.log(x) + + +== Sub Tree == +namespace ns_for_in + js_let Decl: arr=[12,34,56] + for ( ) + console.log(x,arr[x]) + + +== Sub Tree == +namespace ns_for_of + js_let Decl: arr=[11,22,33] + for ( ) + console.log(x) + + diff --git a/src/MapleFE/test/typescript/unit_tests/multi-dim-array.ts b/src/MapleFE/test/typescript/unit_tests/multi-dim-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..615b0602ccdfd19e65692507e3e89c6217b46f02 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/multi-dim-array.ts @@ -0,0 +1 @@ +type Type = { func: () => Array>; }; diff --git a/src/MapleFE/test/typescript/unit_tests/multi-dim-array.ts.result b/src/MapleFE/test/typescript/unit_tests/multi-dim-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..18f2ae887f4b1242a010cc3bf86159bb4cc81d68 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/multi-dim-array.ts.result @@ -0,0 +1,4 @@ +Matched 18 tokens. +============= Module =========== +== Sub Tree == + type Type = {func } diff --git a/src/MapleFE/test/typescript/unit_tests/multi-line-string-literal.ts b/src/MapleFE/test/typescript/unit_tests/multi-line-string-literal.ts new file mode 100644 index 0000000000000000000000000000000000000000..26e132fab12b23e33936c24408faf09f8fedf865 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/multi-line-string-literal.ts @@ -0,0 +1,4 @@ +var str: string = "This \ +is \ +a string literal"; +console.log(str); diff --git a/src/MapleFE/test/typescript/unit_tests/multi-line-string-literal.ts.result b/src/MapleFE/test/typescript/unit_tests/multi-line-string-literal.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7a631af42512bbc917e468232fa4ac02bdac890c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/multi-line-string-literal.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 14 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: str="This is a string literal" +== Sub Tree == +console.log(str) diff --git a/src/MapleFE/test/typescript/unit_tests/multi-vars.ts b/src/MapleFE/test/typescript/unit_tests/multi-vars.ts new file mode 100644 index 0000000000000000000000000000000000000000..db336810cdcb5ebf7ef079ca24fc1b0a82e3931c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/multi-vars.ts @@ -0,0 +1,2 @@ +var x: number, y: number, z: number; +x = y = z = 1; diff --git a/src/MapleFE/test/typescript/unit_tests/multi-vars.ts.result b/src/MapleFE/test/typescript/unit_tests/multi-vars.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..abc43558d167ef5df41fd1541ff3fa3a8183d66c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/multi-vars.ts.result @@ -0,0 +1,11 @@ +Matched 13 tokens. +Matched 21 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x +== Sub Tree == +js_var Decl: y +== Sub Tree == +js_var Decl: z +== Sub Tree == +x Assign y Assign z Assign 1 diff --git a/src/MapleFE/test/typescript/unit_tests/namespace-with-closure.ts b/src/MapleFE/test/typescript/unit_tests/namespace-with-closure.ts new file mode 100644 index 0000000000000000000000000000000000000000..4adc7d8c09b8ec05265a671f6fc83e6bbc33aa9e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/namespace-with-closure.ts @@ -0,0 +1,16 @@ +namespace ns { + export function func() : Function { + let i: number = 10; + return () => ++i; + } +} + +import myFunc = ns.func; + +var f: Function = myFunc(); +console.log(f()); +console.log(f()); + +f = ns.func(); +console.log(f()); +console.log(f()); diff --git a/src/MapleFE/test/typescript/unit_tests/namespace-with-closure.ts.result b/src/MapleFE/test/typescript/unit_tests/namespace-with-closure.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3c7a2f5a188279e7137694a6266b8470a849d27c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/namespace-with-closure.ts.result @@ -0,0 +1,30 @@ +Matched 27 tokens. +Matched 34 tokens. +Matched 43 tokens. +Matched 52 tokens. +Matched 61 tokens. +Matched 69 tokens. +Matched 78 tokens. +Matched 87 tokens. +============= Module =========== +== Sub Tree == +namespace ns + export {func func() throws: + js_let Decl: i=10 + return () -> PreInc i +} + +== Sub Tree == +import {ns.func as myFunc} +== Sub Tree == +js_var Decl: f=myFunc() +== Sub Tree == +console.log(f()) +== Sub Tree == +console.log(f()) +== Sub Tree == +f Assign ns.func() +== Sub Tree == +console.log(f()) +== Sub Tree == +console.log(f()) diff --git a/src/MapleFE/test/typescript/unit_tests/namespace-with-closure2.ts b/src/MapleFE/test/typescript/unit_tests/namespace-with-closure2.ts new file mode 100644 index 0000000000000000000000000000000000000000..7d574427d5eb747f51d950fa45eb7649bec1c480 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/namespace-with-closure2.ts @@ -0,0 +1,16 @@ +namespace ns { + let i: number = 10; + export function func() : Function { + return () => ++i; + } +} + +import myFunc = ns.func; + +var f: Function = myFunc(); +console.log(f()); +console.log(f()); + +f = ns.func(); +console.log(f()); +console.log(f()); diff --git a/src/MapleFE/test/typescript/unit_tests/namespace-with-closure2.ts.result b/src/MapleFE/test/typescript/unit_tests/namespace-with-closure2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..575d2bbaa036fb2a261238713849b0c26c47f524 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/namespace-with-closure2.ts.result @@ -0,0 +1,30 @@ +Matched 27 tokens. +Matched 34 tokens. +Matched 43 tokens. +Matched 52 tokens. +Matched 61 tokens. +Matched 69 tokens. +Matched 78 tokens. +Matched 87 tokens. +============= Module =========== +== Sub Tree == +namespace ns + js_let Decl: i=10 + export {func func() throws: + return () -> PreInc i +} + +== Sub Tree == +import {ns.func as myFunc} +== Sub Tree == +js_var Decl: f=myFunc() +== Sub Tree == +console.log(f()) +== Sub Tree == +console.log(f()) +== Sub Tree == +f Assign ns.func() +== Sub Tree == +console.log(f()) +== Sub Tree == +console.log(f()) diff --git a/src/MapleFE/test/typescript/unit_tests/namespace.ts b/src/MapleFE/test/typescript/unit_tests/namespace.ts new file mode 100644 index 0000000000000000000000000000000000000000..0063f3281a004f26a4ef1b65ede34749f5456c02 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/namespace.ts @@ -0,0 +1,8 @@ +namespace ns_a { + var hello: string = "hello"; + export function foo(): string { + return hello; + } +} +console.log(ns_a.foo()); +export { ns_a as ns_1 }; diff --git a/src/MapleFE/test/typescript/unit_tests/namespace.ts.result b/src/MapleFE/test/typescript/unit_tests/namespace.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5bf02293716c720a8fc021ad99f67d75326cf63e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/namespace.ts.result @@ -0,0 +1,15 @@ +Matched 23 tokens. +Matched 34 tokens. +Matched 41 tokens. +============= Module =========== +== Sub Tree == +namespace ns_a + js_var Decl: hello="hello" + export {func foo() throws: + return hello +} + +== Sub Tree == +console.log(ns_a.foo()) +== Sub Tree == +export {ns_a as ns_1} diff --git a/src/MapleFE/test/typescript/unit_tests/namespace2.ts b/src/MapleFE/test/typescript/unit_tests/namespace2.ts new file mode 100644 index 0000000000000000000000000000000000000000..daf0a9e0e53b67d0d35d05944387690cc344ddc6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/namespace2.ts @@ -0,0 +1,10 @@ +namespace ns_a { + var x: number = 10; + var hello: string = "hello"; + export function foo(): string { + return hello; + } + for (var i = 0; i < x; ++i) console.log(i); +} +console.log(ns_a.foo()); +export { ns_a as ns_1 }; diff --git a/src/MapleFE/test/typescript/unit_tests/namespace2.ts.result b/src/MapleFE/test/typescript/unit_tests/namespace2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4cedbcd03f32b0053c73e2f37da2373d190a9e5e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/namespace2.ts.result @@ -0,0 +1,18 @@ +Matched 51 tokens. +Matched 62 tokens. +Matched 69 tokens. +============= Module =========== +== Sub Tree == +namespace ns_a + js_var Decl: x=10 + js_var Decl: hello="hello" + export {func foo() throws: + return hello +} + for ( ) + console.log(i) + +== Sub Tree == +console.log(ns_a.foo()) +== Sub Tree == +export {ns_a as ns_1} diff --git a/src/MapleFE/test/typescript/unit_tests/namespace3.ts b/src/MapleFE/test/typescript/unit_tests/namespace3.ts new file mode 100644 index 0000000000000000000000000000000000000000..d4e3550e0e43ed8db691f96cbba102d5bc69f397 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/namespace3.ts @@ -0,0 +1,7 @@ +namespace ns.a { + var hello: string = "hello"; + export function foo(): string { + return hello; + } +} +console.log(ns.a.foo()); diff --git a/src/MapleFE/test/typescript/unit_tests/namespace3.ts.result b/src/MapleFE/test/typescript/unit_tests/namespace3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..39f5a1f51e0770d99a529b86247cce0e6329f80d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/namespace3.ts.result @@ -0,0 +1,12 @@ +Matched 25 tokens. +Matched 38 tokens. +============= Module =========== +== Sub Tree == +namespace ns.a + js_var Decl: hello="hello" + export {func foo() throws: + return hello +} + +== Sub Tree == +console.log(ns.a.foo()) diff --git a/src/MapleFE/test/typescript/unit_tests/nested-loop.ts b/src/MapleFE/test/typescript/unit_tests/nested-loop.ts new file mode 100644 index 0000000000000000000000000000000000000000..bf840bd995669375a43dab0b299876cd9eff1035 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nested-loop.ts @@ -0,0 +1,12 @@ +var arr: number[] = [7, 4, 5, 9, 2, 8, 1, 6, 3]; +var len: number = arr.length; +for (var i = 1; i < len; ++i) { + var j: number = i - 1; + var n = arr[i]; + while (n < arr[j] && j >= 0) { + arr[j + 1] = arr[j]; + --j; + } + arr[j + 1] = n; +} +console.log(arr); diff --git a/src/MapleFE/test/typescript/unit_tests/nested-loop.ts.result b/src/MapleFE/test/typescript/unit_tests/nested-loop.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2f6bfa14e1794283fb23017275fa6b9f0c322e7f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nested-loop.ts.result @@ -0,0 +1,20 @@ +Matched 27 tokens. +Matched 36 tokens. +Matched 108 tokens. +Matched 115 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[7,4,5,9,2,8,1,6,3] +== Sub Tree == +js_var Decl: len=arr.length +== Sub Tree == +for ( ) + js_var Decl: j=i Sub 1 + js_var Decl: n=arr[i] + while n LT arr[j] Land j GE 0 arr[j Add 1] Assign arr[j] + PreDec j + + arr[j Add 1] Assign n + +== Sub Tree == +console.log(arr) diff --git a/src/MapleFE/test/typescript/unit_tests/nested-switch.ts b/src/MapleFE/test/typescript/unit_tests/nested-switch.ts new file mode 100644 index 0000000000000000000000000000000000000000..5c5169341037312f061a69a01d82760808fbd242 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nested-switch.ts @@ -0,0 +1,23 @@ +var n: number = 3; +outer: switch (true) { + case n < 5: + console.log(n, " is less than 5"); + case n > 2 && n < 5: + switch (n) { + case 3: + console.log(3); + break outer; + case 4: + console.log(4); + } + console.log(n, " + 1 is equal to", n + 1); + break; + case n == 6: + console.log(n, " is equal to 6"); + break; + case n < 8: + console.log(n, " is greater than 4 and less than 8"); + break; + default: + console.log(n, " is greater than 7"); +} diff --git a/src/MapleFE/test/typescript/unit_tests/nested-switch.ts.result b/src/MapleFE/test/typescript/unit_tests/nested-switch.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..d3cc51c43e971c32eeaa11cd84f0b1048cb026b4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nested-switch.ts.result @@ -0,0 +1,8 @@ +Matched 7 tokens. +Matched 125 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: n=3 +== Sub Tree == +A switch + diff --git a/src/MapleFE/test/typescript/unit_tests/nested.ts b/src/MapleFE/test/typescript/unit_tests/nested.ts new file mode 100644 index 0000000000000000000000000000000000000000..aa3a1265240d29bd72004b89f33d2c5e5b247c80 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nested.ts @@ -0,0 +1,11 @@ +let x: number = 10; + +function outer(y: number): number { + let x: number = 20; + function inner(): number { + return x + y; + } + return inner(); +} + +console.log(outer(100)); diff --git a/src/MapleFE/test/typescript/unit_tests/nested.ts.result b/src/MapleFE/test/typescript/unit_tests/nested.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0eb487d7f9410554c4bf0c4b89dc3e742e4b6cc5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nested.ts.result @@ -0,0 +1,16 @@ +Matched 7 tokens. +Matched 43 tokens. +Matched 53 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: x=10 +== Sub Tree == +func outer(y) throws: + js_let Decl: x=20 + func inner() throws: + return x Add y + + return inner() + +== Sub Tree == +console.log(outer(100)) diff --git a/src/MapleFE/test/typescript/unit_tests/new-array.ts b/src/MapleFE/test/typescript/unit_tests/new-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..5b43f4bd71e633f38ca7c4fc5801100d33a9db9e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/new-array.ts @@ -0,0 +1,6 @@ +var v: boolean = (() => { + const buf = new ArrayBuffer(2); + new DataView(buf).setInt16(0, 256, true); + return new Int16Array(buf)[0] === 256; +})(); +console.log(v); diff --git a/src/MapleFE/test/typescript/unit_tests/new-array.ts.result b/src/MapleFE/test/typescript/unit_tests/new-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..343c86d07ff381b8ec2bfe9c59df0657bd2fb08b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/new-array.ts.result @@ -0,0 +1,10 @@ +Matched 51 tokens. +Matched 58 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: v=() -> js_const Decl: buf=new ArrayBuffer(2) +new DataView(buf).setInt16(0,256,true) +return new Int16Array(buf)[0] StEq 256 +() +== Sub Tree == +console.log(v) diff --git a/src/MapleFE/test/typescript/unit_tests/new-typed.ts b/src/MapleFE/test/typescript/unit_tests/new-typed.ts new file mode 100644 index 0000000000000000000000000000000000000000..b4ce46d179ff0d57b7eb08fcdbb81337778a4670 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/new-typed.ts @@ -0,0 +1,5 @@ +class A { + a: number = 0; +} +var x : Function = A; +var y : A = new A(); diff --git a/src/MapleFE/test/typescript/unit_tests/new-typed.ts.result b/src/MapleFE/test/typescript/unit_tests/new-typed.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..061c44ed79a8fba937f33fdb2cc90d852af9ca1b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/new-typed.ts.result @@ -0,0 +1,18 @@ +Matched 10 tokens. +Matched 17 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + a=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: x=A +== Sub Tree == +js_var Decl: y=new A() diff --git a/src/MapleFE/test/typescript/unit_tests/new.ts b/src/MapleFE/test/typescript/unit_tests/new.ts new file mode 100644 index 0000000000000000000000000000000000000000..e15cd8cf434620b82476a358ed2d8fb1d73ad417 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/new.ts @@ -0,0 +1,5 @@ +class A { + a: number = 0; +} +var x = A; +var y = new A(); diff --git a/src/MapleFE/test/typescript/unit_tests/new.ts.result b/src/MapleFE/test/typescript/unit_tests/new.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b04a81a2f86a804f93954656c46a02a950ebb11e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/new.ts.result @@ -0,0 +1,18 @@ +Matched 10 tokens. +Matched 15 tokens. +Matched 23 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + a=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: x=A +== Sub Tree == +js_var Decl: y=new A() diff --git a/src/MapleFE/test/typescript/unit_tests/non-null-assertion-this.ts b/src/MapleFE/test/typescript/unit_tests/non-null-assertion-this.ts new file mode 100644 index 0000000000000000000000000000000000000000..56e3e9b6072acfea70473ecce6e42c59d2b202c2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/non-null-assertion-this.ts @@ -0,0 +1,6 @@ +class Foo { + public _val: number = 0; + public func(): number { + return this._val! as number; + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/non-null-assertion-this.ts.result b/src/MapleFE/test/typescript/unit_tests/non-null-assertion-this.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c1164fc9ae80badf698158420119fd189d987dae --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/non-null-assertion-this.ts.result @@ -0,0 +1,14 @@ +Matched 27 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + _val=0 + Instance Initializer: + Constructors: + Methods: + func func() throws: + return this._val! + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/non-null-assertion.ts b/src/MapleFE/test/typescript/unit_tests/non-null-assertion.ts new file mode 100644 index 0000000000000000000000000000000000000000..f45fb51b3d4b3642942aef6c8a293f3105bf68e2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/non-null-assertion.ts @@ -0,0 +1,6 @@ +// https://www.typescriptlang.org/docs/handbook/release-notes/typescript-2-0.html#non-null-assertion-operator +// used in cocos director.ts +function processEntity(e?: any) { + let s = e!.name; // Assert that e is non-null and access name + let t = e.name!; +} diff --git a/src/MapleFE/test/typescript/unit_tests/non-null-assertion.ts.result b/src/MapleFE/test/typescript/unit_tests/non-null-assertion.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6128e1977886dfada07b6d220535976a53290442 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/non-null-assertion.ts.result @@ -0,0 +1,7 @@ +Matched 26 tokens. +============= Module =========== +== Sub Tree == +func processEntity(e?) throws: + js_let Decl: s=e!.name + js_let Decl: t=e.name! + diff --git a/src/MapleFE/test/typescript/unit_tests/non-null-assertion2.ts b/src/MapleFE/test/typescript/unit_tests/non-null-assertion2.ts new file mode 100644 index 0000000000000000000000000000000000000000..f9baf6a8064932ecf8e720b1dbf0f9779a5e8cad --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/non-null-assertion2.ts @@ -0,0 +1,9 @@ +function foo(): Object | null { + return new Object(); +} + +export const obj = (() => { + return foo()!; +})(); + +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/non-null-assertion2.ts.result b/src/MapleFE/test/typescript/unit_tests/non-null-assertion2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..eeb0773123f8b1a989b172ced8916c6d6168658c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/non-null-assertion2.ts.result @@ -0,0 +1,13 @@ +Matched 16 tokens. +Matched 36 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +func foo() throws: + return new Object() + +== Sub Tree == +export {js_const Decl: obj=() -> return foo() +()} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/non-null-assertion3.ts b/src/MapleFE/test/typescript/unit_tests/non-null-assertion3.ts new file mode 100644 index 0000000000000000000000000000000000000000..a14849ff45cfbd56822b5391d7458c0ccf6de887 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/non-null-assertion3.ts @@ -0,0 +1,9 @@ +function foo(): Object | null { + return new Array(); +} + +export const obj = (() => { + return foo()! as unknown as Array; +})(); + +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/non-null-assertion3.ts.result b/src/MapleFE/test/typescript/unit_tests/non-null-assertion3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c1e0b4969cc0c35977380fb5b41128d91db051a1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/non-null-assertion3.ts.result @@ -0,0 +1,13 @@ +Matched 16 tokens. +Matched 43 tokens. +Matched 50 tokens. +============= Module =========== +== Sub Tree == +func foo() throws: + return new Array() + +== Sub Tree == +export {js_const Decl: obj=() -> return foo() +()} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/non-null-assertion4.ts b/src/MapleFE/test/typescript/unit_tests/non-null-assertion4.ts new file mode 100644 index 0000000000000000000000000000000000000000..594f1d1799a4b8c7a2f75c4879ce0341a14fd7e9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/non-null-assertion4.ts @@ -0,0 +1,3 @@ +var arr: Object[] = [new Object()]; +var obj = arr[0]!; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/non-null-assertion4.ts.result b/src/MapleFE/test/typescript/unit_tests/non-null-assertion4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1cb1d5237e689f6223c691aeb4b9d1cff6001dd3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/non-null-assertion4.ts.result @@ -0,0 +1,10 @@ +Matched 14 tokens. +Matched 23 tokens. +Matched 30 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[new Object()] +== Sub Tree == +js_var Decl: obj=arr[0] +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/nullish-assignment.ts b/src/MapleFE/test/typescript/unit_tests/nullish-assignment.ts new file mode 100644 index 0000000000000000000000000000000000000000..0624026d18104f1dad29eb3c55f0828b44c90cd9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nullish-assignment.ts @@ -0,0 +1,6 @@ +function func(num?: number): number { + num ??= 2; + return num * num; +} +console.log(func(10)); +console.log(func()); diff --git a/src/MapleFE/test/typescript/unit_tests/nullish-assignment.ts.result b/src/MapleFE/test/typescript/unit_tests/nullish-assignment.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f46445509ede779ac84e48bc3951eb61442bf54c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nullish-assignment.ts.result @@ -0,0 +1,13 @@ +Matched 21 tokens. +Matched 31 tokens. +Matched 40 tokens. +============= Module =========== +== Sub Tree == +func func(num?) throws: + num NullAssign 2 + return num Mul num + +== Sub Tree == +console.log(func(10)) +== Sub Tree == +console.log(func()) diff --git a/src/MapleFE/test/typescript/unit_tests/nullist-coalescing.ts b/src/MapleFE/test/typescript/unit_tests/nullist-coalescing.ts new file mode 100644 index 0000000000000000000000000000000000000000..958714e1fce13126fb96c2bc6cdd325c0c86db5e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nullist-coalescing.ts @@ -0,0 +1,4 @@ +// https://www.typescriptlang.org/docs/handbook/release-notes/typescript-3-7.html#nullish-coalescing +let a: any = null; +let b: boolean = a ?? true; +console.log(a, b); diff --git a/src/MapleFE/test/typescript/unit_tests/nullist-coalescing.ts.result b/src/MapleFE/test/typescript/unit_tests/nullist-coalescing.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ef193ece82ba4def24b6c52122acf3b35243f346 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nullist-coalescing.ts.result @@ -0,0 +1,10 @@ +Matched 7 tokens. +Matched 16 tokens. +Matched 25 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: a=null +== Sub Tree == +js_let Decl: b=a NullCoalesce true +== Sub Tree == +console.log(a,b) diff --git a/src/MapleFE/test/typescript/unit_tests/nullist-coalescing2.ts b/src/MapleFE/test/typescript/unit_tests/nullist-coalescing2.ts new file mode 100644 index 0000000000000000000000000000000000000000..2df1b8b9292897fd467b8844b3d9c1192adb7892 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nullist-coalescing2.ts @@ -0,0 +1,7 @@ +function func(x: any): any { + return x; +} +let a: any = { f: false }; +// https://www.typescriptlang.org/docs/handbook/release-notes/typescript-3-7.html#optional-chaining +let b: boolean = func(a)?.f ?? true; +console.log(a, b); diff --git a/src/MapleFE/test/typescript/unit_tests/nullist-coalescing2.ts.result b/src/MapleFE/test/typescript/unit_tests/nullist-coalescing2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..949c3926980ec2d7f6934f77de6333c1a022fc8e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/nullist-coalescing2.ts.result @@ -0,0 +1,15 @@ +Matched 14 tokens. +Matched 25 tokens. +Matched 39 tokens. +Matched 48 tokens. +============= Module =========== +== Sub Tree == +func func(x) throws: + return x + +== Sub Tree == +js_let Decl: a= {f:false} +== Sub Tree == +js_let Decl: b=func(a).f NullCoalesce true +== Sub Tree == +console.log(a,b) diff --git a/src/MapleFE/test/typescript/unit_tests/number-array.ts b/src/MapleFE/test/typescript/unit_tests/number-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..d4db7ddbcd1e3fcb4294e87be3f9fb035d09e967 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/number-array.ts @@ -0,0 +1,2 @@ +var arr: number[]; +var arr: number[] = [1]; diff --git a/src/MapleFE/test/typescript/unit_tests/number-array.ts.result b/src/MapleFE/test/typescript/unit_tests/number-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..95c41af3f33de13ad52751301695806a37248bff --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/number-array.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 18 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr +== Sub Tree == +js_var Decl: arr=[1] diff --git a/src/MapleFE/test/typescript/unit_tests/number-literal.ts b/src/MapleFE/test/typescript/unit_tests/number-literal.ts new file mode 100644 index 0000000000000000000000000000000000000000..901536ae9582089642c65f4959d052483f7ba2d9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/number-literal.ts @@ -0,0 +1,3 @@ +const x: number = .2; +const y: number = -.2; +console.log(x, y); diff --git a/src/MapleFE/test/typescript/unit_tests/number-literal.ts.result b/src/MapleFE/test/typescript/unit_tests/number-literal.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..577b2fff51d9752b137b0d5ba212dff293b9c373 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/number-literal.ts.result @@ -0,0 +1,10 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 23 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: x=0.2 +== Sub Tree == +js_const Decl: y=-0.2 +== Sub Tree == +console.log(x,y) diff --git a/src/MapleFE/test/typescript/unit_tests/number-var.ts b/src/MapleFE/test/typescript/unit_tests/number-var.ts new file mode 100644 index 0000000000000000000000000000000000000000..257f24faa4df5439f6cf4f99501500eb9092f555 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/number-var.ts @@ -0,0 +1,7 @@ +var func: (number: number) => string; +function conv(number: number): string { + return number as unknown as string; +} + +func = conv; +console.log(func(123)); diff --git a/src/MapleFE/test/typescript/unit_tests/number-var.ts.result b/src/MapleFE/test/typescript/unit_tests/number-var.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a1d0390d3fd0ea86fc6899efcd842cb7db124c69 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/number-var.ts.result @@ -0,0 +1,15 @@ +Matched 11 tokens. +Matched 29 tokens. +Matched 33 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: func +== Sub Tree == +func conv(number) throws: + return number + +== Sub Tree == +func Assign conv +== Sub Tree == +console.log(func(123)) diff --git a/src/MapleFE/test/typescript/unit_tests/number.ts b/src/MapleFE/test/typescript/unit_tests/number.ts new file mode 100644 index 0000000000000000000000000000000000000000..b0ed7e5f07171b97cbba2cf2ce4a08b2940dde42 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/number.ts @@ -0,0 +1,4 @@ +var x = 4278190335; +console.log(x); +x = -1; +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/number.ts.result b/src/MapleFE/test/typescript/unit_tests/number.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..836c3a5037a175231a1c1e698785a51d0f4eedb8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/number.ts.result @@ -0,0 +1,13 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 16 tokens. +Matched 23 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=4278190335 +== Sub Tree == +console.log(x) +== Sub Tree == +x Assign -1 +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/obj.ts b/src/MapleFE/test/typescript/unit_tests/obj.ts new file mode 100644 index 0000000000000000000000000000000000000000..1582c89d2e038dd77e8dda82d9b9fc76b2e0e6b4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/obj.ts @@ -0,0 +1,3 @@ +let john = { + lastName: "Smith" +}; diff --git a/src/MapleFE/test/typescript/unit_tests/obj.ts.result b/src/MapleFE/test/typescript/unit_tests/obj.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9c213f7401e72104ae4458172b7e61bfacb2ed20 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/obj.ts.result @@ -0,0 +1,4 @@ +Matched 9 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: john= {lastName:"Smith"} diff --git a/src/MapleFE/test/typescript/unit_tests/object-destructing.ts b/src/MapleFE/test/typescript/unit_tests/object-destructing.ts new file mode 100644 index 0000000000000000000000000000000000000000..9a189b6f038798483f376138a50ef441b841ac5d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/object-destructing.ts @@ -0,0 +1,4 @@ +var x = [{ a: 1 }, { b: 2 }, { c: 3 }]; +console.log(x.length); +var [{ a: u }, { b: v }] = x; +console.log(u, v); diff --git a/src/MapleFE/test/typescript/unit_tests/object-destructing.ts.result b/src/MapleFE/test/typescript/unit_tests/object-destructing.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..91161a894104ece0a4634175afda0438c20f6eee --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/object-destructing.ts.result @@ -0,0 +1,13 @@ +Matched 23 tokens. +Matched 32 tokens. +Matched 49 tokens. +Matched 58 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=[ {a:1}, {b:2}, {c:3}] +== Sub Tree == +console.log(x.length) +== Sub Tree == +js_var Decl: {{a:u}, {b:v}} +== Sub Tree == +console.log(u,v) diff --git a/src/MapleFE/test/typescript/unit_tests/object-destructing2.ts b/src/MapleFE/test/typescript/unit_tests/object-destructing2.ts new file mode 100644 index 0000000000000000000000000000000000000000..0dfbec3fd8ae78fa700968e7fad94b84f13eafb9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/object-destructing2.ts @@ -0,0 +1,6 @@ +function func(args: any): any { + const {foo, ...others} = args; + return others; +} + +console.log(func({foo: "Foo", bar: "Bar", tee: "Tee"})); diff --git a/src/MapleFE/test/typescript/unit_tests/object-destructing2.ts.result b/src/MapleFE/test/typescript/unit_tests/object-destructing2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6fc4e5d75956688e09ecf1d36a431fe684dae084 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/object-destructing2.ts.result @@ -0,0 +1,10 @@ +Matched 24 tokens. +Matched 46 tokens. +============= Module =========== +== Sub Tree == +func func(args) throws: + js_const Decl: {:foo, :...others} + return others + +== Sub Tree == +console.log(func( {foo:"Foo", bar:"Bar", tee:"Tee"})) diff --git a/src/MapleFE/test/typescript/unit_tests/object-func-prop.ts b/src/MapleFE/test/typescript/unit_tests/object-func-prop.ts new file mode 100644 index 0000000000000000000000000000000000000000..178b591ac2ab36db3bed90ac019d3e067689bd7b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/object-func-prop.ts @@ -0,0 +1,7 @@ +const initializer: any = { + foo(data: string) { + console.log(data); + }, +}; + +initializer.foo("test"); diff --git a/src/MapleFE/test/typescript/unit_tests/object-func-prop.ts.result b/src/MapleFE/test/typescript/unit_tests/object-func-prop.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3fbfed7752d0f03706a8c021e16058787818956c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/object-func-prop.ts.result @@ -0,0 +1,9 @@ +Matched 24 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: initializer= {foo:func foo(data) throws: + console.log(data) +} +== Sub Tree == +initializer.foo("test") diff --git a/src/MapleFE/test/typescript/unit_tests/object-keys-arr-map.ts b/src/MapleFE/test/typescript/unit_tests/object-keys-arr-map.ts new file mode 100644 index 0000000000000000000000000000000000000000..49a05a210e6a0d7bc1375281636431b25957b98c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/object-keys-arr-map.ts @@ -0,0 +1,4 @@ +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/keys +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/map +var objA = { objB: { objC: {} } }; +const list = Object.keys(objA.objB.objC).map((x) => objA.objB.objC[x]); diff --git a/src/MapleFE/test/typescript/unit_tests/object-keys-arr-map.ts.result b/src/MapleFE/test/typescript/unit_tests/object-keys-arr-map.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..60163cf00d8476cecead062e5c0c3b47276d79e1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/object-keys-arr-map.ts.result @@ -0,0 +1,7 @@ +Matched 14 tokens. +Matched 44 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: objA= {objB: {objC: {}}} +== Sub Tree == +js_const Decl: list=Object.keys(objA.objB.objC).map((x) -> objA.objB.objC[x]) diff --git a/src/MapleFE/test/typescript/unit_tests/object-literal.ts b/src/MapleFE/test/typescript/unit_tests/object-literal.ts new file mode 100644 index 0000000000000000000000000000000000000000..701e019ea40eeee9a9444f576f93cb35dbd8148b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/object-literal.ts @@ -0,0 +1,2 @@ +var x: any = { n: [-1.5, 0, 1.0] }; +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/object-literal.ts.result b/src/MapleFE/test/typescript/unit_tests/object-literal.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e5424fa6f8f3ef9115ccebc1bec2a4faf5d2f44f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/object-literal.ts.result @@ -0,0 +1,7 @@ +Matched 17 tokens. +Matched 24 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x= {n:[-1.5,0,1]} +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/objects.ts b/src/MapleFE/test/typescript/unit_tests/objects.ts new file mode 100644 index 0000000000000000000000000000000000000000..15f2f507ab535d94d382ee52ea7e4278c5e7fe36 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/objects.ts @@ -0,0 +1,22 @@ +// different ways of creating an object + +function func() {} + +let funcObj = function (name: string) { + this._name = name; +}; + +class ObjClass { + private _name: string; + constructor(name: string) { + this._name = name; + } +} + +let obj_a: Object = {}; +let obj_b: Object = Object.create({}); +let obj_c: Object = Object.create(null); +let obj_d: Object = new Object(); +let obj_e: Object = new func(); +let obj_f: Object = new funcObj("John"); +let obj_g: Object = new ObjClass("John"); diff --git a/src/MapleFE/test/typescript/unit_tests/objects.ts.result b/src/MapleFE/test/typescript/unit_tests/objects.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1888e60831d18dbe197bf43457cc8b20dc138707 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/objects.ts.result @@ -0,0 +1,44 @@ +Matched 6 tokens. +Matched 24 tokens. +Matched 47 tokens. +Matched 55 tokens. +Matched 68 tokens. +Matched 80 tokens. +Matched 90 tokens. +Matched 100 tokens. +Matched 111 tokens. +Matched 122 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + +== Sub Tree == +js_let Decl: funcObj=func (name) throws: + this._name Assign name + +== Sub Tree == +class ObjClass + Fields: + _name + Instance Initializer: + Constructors: + constructor (name) throws: + this._name Assign name + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: obj_a= {} +== Sub Tree == +js_let Decl: obj_b=Object.create( {}) +== Sub Tree == +js_let Decl: obj_c=Object.create(null) +== Sub Tree == +js_let Decl: obj_d=new Object() +== Sub Tree == +js_let Decl: obj_e=new func() +== Sub Tree == +js_let Decl: obj_f=new funcObj("John") +== Sub Tree == +js_let Decl: obj_g=new ObjClass("John") diff --git a/src/MapleFE/test/typescript/unit_tests/optional-chaining.ts b/src/MapleFE/test/typescript/unit_tests/optional-chaining.ts new file mode 100644 index 0000000000000000000000000000000000000000..c17627bdbe8af6e60d5a3aeea6e2c5bee0645bbe --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-chaining.ts @@ -0,0 +1,7 @@ +function func(arr?: number[]) { + return arr?.[0]; +} + +let arr: number[] = [1, 2, 3]; +console.log(func(arr)); +console.log(func()); diff --git a/src/MapleFE/test/typescript/unit_tests/optional-chaining.ts.result b/src/MapleFE/test/typescript/unit_tests/optional-chaining.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f08105ea81725c5c5454525f8d18adb18bfb86db --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-chaining.ts.result @@ -0,0 +1,15 @@ +Matched 19 tokens. +Matched 34 tokens. +Matched 44 tokens. +Matched 53 tokens. +============= Module =========== +== Sub Tree == +func func(arr?) throws: + return arr?[0] + +== Sub Tree == +js_let Decl: arr=[1,2,3] +== Sub Tree == +console.log(func(arr)) +== Sub Tree == +console.log(func()) diff --git a/src/MapleFE/test/typescript/unit_tests/optional-chaining2.ts b/src/MapleFE/test/typescript/unit_tests/optional-chaining2.ts new file mode 100644 index 0000000000000000000000000000000000000000..7b62c4ca9453e63f1fca540343ab5bc5efa2c4df --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-chaining2.ts @@ -0,0 +1,6 @@ +const obj = { + func: function () { + return 123; + }, +}; +console.log(obj.func?.()); diff --git a/src/MapleFE/test/typescript/unit_tests/optional-chaining2.ts.result b/src/MapleFE/test/typescript/unit_tests/optional-chaining2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3d0d7d1474b2f473ba1360e1985b5b97f1f9d1bb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-chaining2.ts.result @@ -0,0 +1,9 @@ +Matched 17 tokens. +Matched 29 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {func:func () throws: + return 123 +} +== Sub Tree == +console.log(obj.func()) diff --git a/src/MapleFE/test/typescript/unit_tests/optional-chaining3.ts b/src/MapleFE/test/typescript/unit_tests/optional-chaining3.ts new file mode 100644 index 0000000000000000000000000000000000000000..69722086d3e21fe50e10b751cfc3577011d72c55 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-chaining3.ts @@ -0,0 +1,10 @@ +const obj = { + func: function () { + return 123; + }, +}; + +function foo() { + return obj; +} +console.log(foo()?.func?.()); diff --git a/src/MapleFE/test/typescript/unit_tests/optional-chaining3.ts.result b/src/MapleFE/test/typescript/unit_tests/optional-chaining3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..958a6fde5c0f84277c676150dabbff01cd10b563 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-chaining3.ts.result @@ -0,0 +1,14 @@ +Matched 17 tokens. +Matched 26 tokens. +Matched 40 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {func:func () throws: + return 123 +} +== Sub Tree == +func foo() throws: + return obj + +== Sub Tree == +console.log(foo().func()) diff --git a/src/MapleFE/test/typescript/unit_tests/optional-method.ts b/src/MapleFE/test/typescript/unit_tests/optional-method.ts new file mode 100644 index 0000000000000000000000000000000000000000..585b9d26f326619164d176d53812903416b4540b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-method.ts @@ -0,0 +1,3 @@ +class Klass { + protected func?(): void; +} diff --git a/src/MapleFE/test/typescript/unit_tests/optional-method.ts.result b/src/MapleFE/test/typescript/unit_tests/optional-method.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1e36537786314b0f5e55efb76027d902fc1a223d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-method.ts.result @@ -0,0 +1,13 @@ +Matched 12 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + func func?() throws: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/optional-param.ts b/src/MapleFE/test/typescript/unit_tests/optional-param.ts new file mode 100644 index 0000000000000000000000000000000000000000..44d22da25c68181a5a71f3ff275137923836e813 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-param.ts @@ -0,0 +1,10 @@ +class Foo { + public a: string = ""; +} + +function func(x?: Foo): Foo { + return x ? x : { a: "default:a" }; +} + +console.log(func()); +console.log(func({ a: "Foo:a" })); diff --git a/src/MapleFE/test/typescript/unit_tests/optional-param.ts.result b/src/MapleFE/test/typescript/unit_tests/optional-param.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..fb05f46048eb9cc0d723b79fd7b1f3d747561fd0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-param.ts.result @@ -0,0 +1,23 @@ +Matched 11 tokens. +Matched 34 tokens. +Matched 43 tokens. +Matched 57 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + a="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func(x?) throws: + return x ? x : {a:"default:a"} + +== Sub Tree == +console.log(func()) +== Sub Tree == +console.log(func( {a:"Foo:a"})) diff --git a/src/MapleFE/test/typescript/unit_tests/optional-prop.ts b/src/MapleFE/test/typescript/unit_tests/optional-prop.ts new file mode 100644 index 0000000000000000000000000000000000000000..d635013d7e144fc5bffbebfd3f427200b35209f3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-prop.ts @@ -0,0 +1,11 @@ +class Klass { + a: number = 0; + b?: number; +} + +function f({ a, b = 1 }: Klass): number { + return a + b; +} + +console.log(f({ a: 1 })); +console.log(f({ a: 1, b: 2 })); diff --git a/src/MapleFE/test/typescript/unit_tests/optional-prop.ts.result b/src/MapleFE/test/typescript/unit_tests/optional-prop.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1dd4128a6ba5c4ffa15c49f2ec29766006cf2704 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/optional-prop.ts.result @@ -0,0 +1,23 @@ +Matched 15 tokens. +Matched 37 tokens. +Matched 51 tokens. +Matched 69 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + a=0 b? + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func f({:a, :b=1}) throws: + return a Add b + +== Sub Tree == +console.log(f( {a:1})) +== Sub Tree == +console.log(f( {a:1, b:2})) diff --git a/src/MapleFE/test/typescript/unit_tests/pick-key.ts b/src/MapleFE/test/typescript/unit_tests/pick-key.ts new file mode 100644 index 0000000000000000000000000000000000000000..a8ba3a4bc304b0ab536d985022bbea1427937241 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/pick-key.ts @@ -0,0 +1,6 @@ +interface IFace { + s: string; + n: number; +} +declare type Type = { [K in keyof Pick]: true; }; + diff --git a/src/MapleFE/test/typescript/unit_tests/pick-key.ts.result b/src/MapleFE/test/typescript/unit_tests/pick-key.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..03c535cb9bdf5df29aeb068ed4d28f20fca2d1e0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/pick-key.ts.result @@ -0,0 +1,7 @@ +Matched 12 tokens. +Matched 33 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {s;n } +== Sub Tree == +declare type Type = {[K in keyof Pick] : true } diff --git a/src/MapleFE/test/typescript/unit_tests/priv-declare.ts b/src/MapleFE/test/typescript/unit_tests/priv-declare.ts new file mode 100644 index 0000000000000000000000000000000000000000..8babfc0eeb8ece6cfa8e0b3a04f260f829a8c2e3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/priv-declare.ts @@ -0,0 +1,3 @@ +class CLS_test { + private declare priv_fd: number; +} diff --git a/src/MapleFE/test/typescript/unit_tests/priv-declare.ts.result b/src/MapleFE/test/typescript/unit_tests/priv-declare.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2c1241d605350d8dd7d5c873694daefc5696e6ec --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/priv-declare.ts.result @@ -0,0 +1,13 @@ +Matched 10 tokens. +============= Module =========== +== Sub Tree == +class CLS_test + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + Declares: + declare js_var Decl: priv_fd diff --git a/src/MapleFE/test/typescript/unit_tests/private-constructor.ts b/src/MapleFE/test/typescript/unit_tests/private-constructor.ts new file mode 100644 index 0000000000000000000000000000000000000000..280daea9af883d7eefca14378dc8cdebf33ad1ca --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/private-constructor.ts @@ -0,0 +1,6 @@ +declare namespace NS { + class Klass { + n: number; + private constructor(); + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/private-constructor.ts.result b/src/MapleFE/test/typescript/unit_tests/private-constructor.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ff6c6a0630e64923c7d45837a6072f9843fdbbb5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/private-constructor.ts.result @@ -0,0 +1,15 @@ +Matched 18 tokens. +============= Module =========== +== Sub Tree == +declare namespace NS + class Klass + Fields: + n + Instance Initializer: + Constructors: + constructor () throws: + Methods: + LocalClasses: + LocalInterfaces: + + diff --git a/src/MapleFE/test/typescript/unit_tests/private-field.d.ts b/src/MapleFE/test/typescript/unit_tests/private-field.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..70fb72ce9d09c8f65d5aa8e226f9cd7a06a7df2c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/private-field.d.ts @@ -0,0 +1,7 @@ +// #private; for ECMAScript’s pound (#) private fields +// https://www.typescriptlang.org/docs/handbook/2/classes.html#caveats +// https://github.com/microsoft/TypeScript/issues/38050 +declare class Klass { + #private; + constructor(); +} diff --git a/src/MapleFE/test/typescript/unit_tests/private-field.d.ts.result b/src/MapleFE/test/typescript/unit_tests/private-field.d.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7375df4dcbb2cf727e745c73eb8aa1818d5ff888 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/private-field.d.ts.result @@ -0,0 +1,13 @@ +Matched 12 tokens. +============= Module =========== +== Sub Tree == +declare class Klass + Fields: + private + Instance Initializer: + Constructors: + constructor () throws: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/private-prop.ts b/src/MapleFE/test/typescript/unit_tests/private-prop.ts new file mode 100644 index 0000000000000000000000000000000000000000..f59dc26801875779246b6c39d23851493ac83077 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/private-prop.ts @@ -0,0 +1,5 @@ +class Klass { + private "Klass.#private" = "abc"; +} +var obj : Klass = new Klass(); +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/private-prop.ts.result b/src/MapleFE/test/typescript/unit_tests/private-prop.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8804c42005a08a0b80281bbc1dfe7c3fe449577d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/private-prop.ts.result @@ -0,0 +1,18 @@ +Matched 9 tokens. +Matched 19 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + "Klass.#private" = "abc" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/prop-name-extends.ts b/src/MapleFE/test/typescript/unit_tests/prop-name-extends.ts new file mode 100644 index 0000000000000000000000000000000000000000..8a7afdd521d8706a8e00cbc5e200c22a8ea57938 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/prop-name-extends.ts @@ -0,0 +1 @@ +function func(arg: { extends: null | Function }) {} diff --git a/src/MapleFE/test/typescript/unit_tests/prop-name-extends.ts.result b/src/MapleFE/test/typescript/unit_tests/prop-name-extends.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b8774ca191a1440ea1eaebd14c13818adafd5721 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/prop-name-extends.ts.result @@ -0,0 +1,5 @@ +Matched 15 tokens. +============= Module =========== +== Sub Tree == +func func(arg) throws: + diff --git a/src/MapleFE/test/typescript/unit_tests/property-deco.ts b/src/MapleFE/test/typescript/unit_tests/property-deco.ts new file mode 100644 index 0000000000000000000000000000000000000000..cefae64156bc3fdd2374ab837e508db7fec6f399 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/property-deco.ts @@ -0,0 +1,16 @@ +function prop_deco(msg: string) { + return function (target: any, name: string) { + console.log("Accessed", name, msg, target); + }; +} + +class Klass { + @prop_deco("of") + x: number; + constructor(i: number) { + this.x = i; + } +} + +var c = new Klass(3); +console.log(c.x); diff --git a/src/MapleFE/test/typescript/unit_tests/property-deco.ts.result b/src/MapleFE/test/typescript/unit_tests/property-deco.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1ab368a50744badc22012660a8e8a7a6d211698d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/property-deco.ts.result @@ -0,0 +1,27 @@ +Matched 36 tokens. +Matched 63 tokens. +Matched 72 tokens. +Matched 81 tokens. +============= Module =========== +== Sub Tree == +func prop_deco(msg) throws: + return func (target,name) throws: + console.log("Accessed",name,msg,target) + + +== Sub Tree == +class Klass + Fields: + x + Instance Initializer: + Constructors: + constructor (i) throws: + this.x Assign i + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: c=new Klass(3) +== Sub Tree == +console.log(c.x) diff --git a/src/MapleFE/test/typescript/unit_tests/property-deco2.ts b/src/MapleFE/test/typescript/unit_tests/property-deco2.ts new file mode 100644 index 0000000000000000000000000000000000000000..5727298d19469dfcdcb8d5ba4cf7657d53a7ce2b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/property-deco2.ts @@ -0,0 +1,20 @@ +function prop_deco(msg: string) { + return function (target: any, name: string) { + console.log("Accessed", name, msg, target); + }; +} + +class Klass { + _x: number; + constructor(i: number) { + this._x = i; + } + + @prop_deco("of") + get x() { + return this._x; + } +} + +var c = new Klass(3); +console.log(c.x); diff --git a/src/MapleFE/test/typescript/unit_tests/property-deco2.ts.result b/src/MapleFE/test/typescript/unit_tests/property-deco2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3870d964ac97e61a58afca083c1530b5b2a8128c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/property-deco2.ts.result @@ -0,0 +1,29 @@ +Matched 36 tokens. +Matched 74 tokens. +Matched 83 tokens. +Matched 92 tokens. +============= Module =========== +== Sub Tree == +func prop_deco(msg) throws: + return func (target,name) throws: + console.log("Accessed",name,msg,target) + + +== Sub Tree == +class Klass + Fields: + _x + Instance Initializer: + Constructors: + constructor (i) throws: + this._x Assign i + Methods: + get x() throws: + return this._x + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: c=new Klass(3) +== Sub Tree == +console.log(c.x) diff --git a/src/MapleFE/test/typescript/unit_tests/property-deco3.ts b/src/MapleFE/test/typescript/unit_tests/property-deco3.ts new file mode 100644 index 0000000000000000000000000000000000000000..3f9686568426ed71dfd8b6a7e85bd5b01b7a6e01 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/property-deco3.ts @@ -0,0 +1,16 @@ +function type(msg: string) { + return function (target: any, name: string) { + console.log("Accessed", name, msg, target); + }; +} + +class Klass { + @type("of") + x: number; + constructor(i: number) { + this.x = i; + } +} + +var c = new Klass(3); +console.log(c.x); diff --git a/src/MapleFE/test/typescript/unit_tests/property-deco3.ts.result b/src/MapleFE/test/typescript/unit_tests/property-deco3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e1ebece167f1202a4fa8d0e70f8eab7f754cc60d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/property-deco3.ts.result @@ -0,0 +1,27 @@ +Matched 36 tokens. +Matched 63 tokens. +Matched 72 tokens. +Matched 81 tokens. +============= Module =========== +== Sub Tree == +func type(msg) throws: + return func (target,name) throws: + console.log("Accessed",name,msg,target) + + +== Sub Tree == +class Klass + Fields: + x + Instance Initializer: + Constructors: + constructor (i) throws: + this.x Assign i + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: c=new Klass(3) +== Sub Tree == +console.log(c.x) diff --git a/src/MapleFE/test/typescript/unit_tests/property-deco4.ts b/src/MapleFE/test/typescript/unit_tests/property-deco4.ts new file mode 100644 index 0000000000000000000000000000000000000000..539811a971930c13f0f9fd145bf259a0b1350c4d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/property-deco4.ts @@ -0,0 +1,21 @@ +function type(msg: string) { + return function (target: any, name: string) { + console.log("Accessed", name, msg, target); + }; +} + +function deco(target: any, name: string) { + console.log("Accessed", name, "OF", target); +} + +class Klass { + @deco + @type("of") + x: number; + constructor(i: number) { + this.x = i; + } +} + +var c = new Klass(3); +console.log(c.x); diff --git a/src/MapleFE/test/typescript/unit_tests/property-deco4.ts.result b/src/MapleFE/test/typescript/unit_tests/property-deco4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9aa9229bb9e5ebfd33877ffb7ade40f59dd11358 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/property-deco4.ts.result @@ -0,0 +1,32 @@ +Matched 36 tokens. +Matched 62 tokens. +Matched 91 tokens. +Matched 100 tokens. +Matched 109 tokens. +============= Module =========== +== Sub Tree == +func type(msg) throws: + return func (target,name) throws: + console.log("Accessed",name,msg,target) + + +== Sub Tree == +func deco(target,name) throws: + console.log("Accessed",name,"OF",target) + +== Sub Tree == +class Klass + Fields: + x + Instance Initializer: + Constructors: + constructor (i) throws: + this.x Assign i + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: c=new Klass(3) +== Sub Tree == +console.log(c.x) diff --git a/src/MapleFE/test/typescript/unit_tests/public-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/public-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..19784d994c4a50459b669a7b8fdac749a57847e0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/public-as-prop-name.ts @@ -0,0 +1,3 @@ +interface IFace { + public: string; +} diff --git a/src/MapleFE/test/typescript/unit_tests/public-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/public-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..55d9de52e70c78571eb902a72d547fc194f37043 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/public-as-prop-name.ts.result @@ -0,0 +1,4 @@ +Matched 8 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {public } diff --git a/src/MapleFE/test/typescript/unit_tests/re-export.ts b/src/MapleFE/test/typescript/unit_tests/re-export.ts new file mode 100644 index 0000000000000000000000000000000000000000..dea206bb65b44a718d251b8587b08ac6019a319a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/re-export.ts @@ -0,0 +1,4 @@ +export * from "./M"; +export { default as x } from "./M"; +import { default as x } from "./M"; +export { x as default }; diff --git a/src/MapleFE/test/typescript/unit_tests/re-export.ts.result b/src/MapleFE/test/typescript/unit_tests/re-export.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..cd3ee09e619a0c75c2353490484d6898c513b07e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/re-export.ts.result @@ -0,0 +1,13 @@ +Matched 5 tokens. +Matched 14 tokens. +Matched 23 tokens. +Matched 30 tokens. +============= Module =========== +== Sub Tree == +export { *} "./M" +== Sub Tree == +export {default as x} "./M" +== Sub Tree == +import {default as x} "./M" +== Sub Tree == +export {x as default} diff --git a/src/MapleFE/test/typescript/unit_tests/re-export2.ts b/src/MapleFE/test/typescript/unit_tests/re-export2.ts new file mode 100644 index 0000000000000000000000000000000000000000..387bd489e6ab49fbeb3d2832e7979469a1ac4d97 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/re-export2.ts @@ -0,0 +1,5 @@ +export * from "./M"; +export { default as Y } from "./M"; +import { default as x } from "./M"; +export { x as default }; +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/re-export2.ts.result b/src/MapleFE/test/typescript/unit_tests/re-export2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9794f6a145438cc1d64fab0250b29722b9e5f5e6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/re-export2.ts.result @@ -0,0 +1,16 @@ +Matched 5 tokens. +Matched 14 tokens. +Matched 23 tokens. +Matched 30 tokens. +Matched 37 tokens. +============= Module =========== +== Sub Tree == +export { *} "./M" +== Sub Tree == +export {default as Y} "./M" +== Sub Tree == +import {default as x} "./M" +== Sub Tree == +export {x as default} +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/re-export4.ts b/src/MapleFE/test/typescript/unit_tests/re-export4.ts new file mode 100644 index 0000000000000000000000000000000000000000..63ca2a02303b91f291aa96355171cc1a13c5f580 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/re-export4.ts @@ -0,0 +1,2 @@ +export * as MM from "./M"; +export { default as default } from "./M"; diff --git a/src/MapleFE/test/typescript/unit_tests/re-export4.ts.result b/src/MapleFE/test/typescript/unit_tests/re-export4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..877e3451ab06c83951e3d35059a90f57ecf1af12 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/re-export4.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 16 tokens. +============= Module =========== +== Sub Tree == +export { * as MM} "./M" +== Sub Tree == +export {default as default} "./M" diff --git a/src/MapleFE/test/typescript/unit_tests/readonly-return.ts b/src/MapleFE/test/typescript/unit_tests/readonly-return.ts new file mode 100644 index 0000000000000000000000000000000000000000..6fd9bb123332380f7d89f5814cc8610e446fa723 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/readonly-return.ts @@ -0,0 +1,6 @@ +function foo(): readonly number[] { + var v: number = 1; + return [v, v]; +} + +console.log(foo()); diff --git a/src/MapleFE/test/typescript/unit_tests/readonly-return.ts.result b/src/MapleFE/test/typescript/unit_tests/readonly-return.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4c57aa2b0093bd379d0d19f9037a1f84bb80d844 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/readonly-return.ts.result @@ -0,0 +1,10 @@ +Matched 25 tokens. +Matched 34 tokens. +============= Module =========== +== Sub Tree == +func foo() throws: + js_var Decl: v=1 + return [v,v] + +== Sub Tree == +console.log(foo()) diff --git a/src/MapleFE/test/typescript/unit_tests/readonly-return2.ts b/src/MapleFE/test/typescript/unit_tests/readonly-return2.ts new file mode 100644 index 0000000000000000000000000000000000000000..1754cffadfc19ee8679584edd24160437c0388cb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/readonly-return2.ts @@ -0,0 +1,6 @@ +class Klass {} + +export interface Interface { + foo(): readonly Klass[]; + bar(): void; +} diff --git a/src/MapleFE/test/typescript/unit_tests/readonly-return2.ts.result b/src/MapleFE/test/typescript/unit_tests/readonly-return2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..08c169df65f6fd8f75bbdb2858c91e49abaef68a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/readonly-return2.ts.result @@ -0,0 +1,17 @@ +Matched 4 tokens. +Matched 24 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +export {ts_interface: Interface {func foo() throws: +;func bar() throws: + }} diff --git a/src/MapleFE/test/typescript/unit_tests/readonly.ts b/src/MapleFE/test/typescript/unit_tests/readonly.ts new file mode 100644 index 0000000000000000000000000000000000000000..c8744efac8f3502a85c0d856571dd7d7f7a2f69f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/readonly.ts @@ -0,0 +1,3 @@ +export class Foo { + readonly foo_var: number = 1; +} diff --git a/src/MapleFE/test/typescript/unit_tests/readonly.ts.result b/src/MapleFE/test/typescript/unit_tests/readonly.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0be5e3d965213c608c710de39a5c8a81b6755575 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/readonly.ts.result @@ -0,0 +1,12 @@ +Matched 12 tokens. +============= Module =========== +== Sub Tree == +export {class Foo + Fields: + foo_var=1 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: +} diff --git a/src/MapleFE/test/typescript/unit_tests/record-parameter.ts b/src/MapleFE/test/typescript/unit_tests/record-parameter.ts new file mode 100644 index 0000000000000000000000000000000000000000..b6a2d644b8ced589ebe37d1671d212326d94d7dc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/record-parameter.ts @@ -0,0 +1,3 @@ +class Klass { + func(map: Record void>) {} +} diff --git a/src/MapleFE/test/typescript/unit_tests/record-parameter.ts.result b/src/MapleFE/test/typescript/unit_tests/record-parameter.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e84b88f33294cce537889c15676d3940720a379d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/record-parameter.ts.result @@ -0,0 +1,13 @@ +Matched 39 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + func func(map) throws: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/record.ts b/src/MapleFE/test/typescript/unit_tests/record.ts new file mode 100644 index 0000000000000000000000000000000000000000..9f71dde3de55dfe187008f0c2ffd36dbfeb3d2ab --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/record.ts @@ -0,0 +1,12 @@ +enum Direction { + LEFT, + RIGHT, +} + +const rec: Record = { + [Direction.LEFT]: "left", + [Direction.RIGHT]: "right", + default: "Unknown", +}; + +console.log(rec); diff --git a/src/MapleFE/test/typescript/unit_tests/record.ts.result b/src/MapleFE/test/typescript/unit_tests/record.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ac4d471a66850d660556cc5a15a014052cf182f9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/record.ts.result @@ -0,0 +1,10 @@ +Matched 8 tokens. +Matched 41 tokens. +Matched 48 tokens. +============= Module =========== +== Sub Tree == +ts_enum: Direction {LEFT;RIGHT } +== Sub Tree == +js_const Decl: rec= {[Direction.LEFT] : :"left", [Direction.RIGHT] : :"right", default:"Unknown"} +== Sub Tree == +console.log(rec) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp-array.ts b/src/MapleFE/test/typescript/unit_tests/regexp-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..9622797aa2089c79a9c0f5c7bc6923f588b3c7d9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp-array.ts @@ -0,0 +1,7 @@ +const obj = { + arr: [ + /[/\\]@cocos1/, + /[/\\]@cocos2/, + ], +}; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/regexp-array.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ef3e3385429d05a519a0a4c67c3cbb3508b85ad4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp-array.ts.result @@ -0,0 +1,7 @@ +Matched 15 tokens. +Matched 22 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {arr:[reg expr : [/\\]@cocos1,reg expr : [/\\]@cocos2]} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp-array2.ts b/src/MapleFE/test/typescript/unit_tests/regexp-array2.ts new file mode 100644 index 0000000000000000000000000000000000000000..cd8922dc441a16dacdc86f2464f1274fdb4aee8c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp-array2.ts @@ -0,0 +1,7 @@ +const obj = { + arr: [{ + ex1: /[/\\]@cocos1/, + ex2: /[/\\]@cocos2/, + }], +}; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/regexp-array2.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp-array2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e1e3c3558685c6a71d9f4b9c5a3cdbc20b7f43a4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp-array2.ts.result @@ -0,0 +1,7 @@ +Matched 21 tokens. +Matched 28 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {arr:[ {ex1:reg expr : [/\\]@cocos1, ex2:reg expr : [/\\]@cocos2}]} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp-array3.ts b/src/MapleFE/test/typescript/unit_tests/regexp-array3.ts new file mode 100644 index 0000000000000000000000000000000000000000..8188aebbf0c52f880aefa5799c82e931ee5eabe1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp-array3.ts @@ -0,0 +1,4 @@ +const obj = { + arr : [ /[/\\]@cocos1/, /[/\\]@cocos2/] +}; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/regexp-array3.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp-array3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a170be2cd1c56ef5ce70110dda04c63793c40d45 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp-array3.ts.result @@ -0,0 +1,7 @@ +Matched 13 tokens. +Matched 20 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {arr:[reg expr : [/\\]@cocos1,reg expr : [/\\]@cocos2]} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp-in-array.ts b/src/MapleFE/test/typescript/unit_tests/regexp-in-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..ab5b96853e42e1dd45b7ba167f70a1e449245858 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp-in-array.ts @@ -0,0 +1,3 @@ +var x: Array<[RegExp, string]> = []; +x.push([/</g, "<"]); +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/regexp-in-array.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp-in-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..643a8cebf99c240a6111a4d15a6bdb75b3a3a27e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp-in-array.ts.result @@ -0,0 +1,10 @@ +Matched 15 tokens. +Matched 26 tokens. +Matched 33 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=[] +== Sub Tree == +x.push([reg expr : < g,"<"]) +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp.ts b/src/MapleFE/test/typescript/unit_tests/regexp.ts new file mode 100644 index 0000000000000000000000000000000000000000..78e2c0dfbb6ebdc2fa530060df5421717cbb2de1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp.ts @@ -0,0 +1,4 @@ +var str: string = "abcba"; +console.log(str); +str = str.replace(/b/g, "B"); +console.log(str); diff --git a/src/MapleFE/test/typescript/unit_tests/regexp.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2562eb3a8b49eee841b3ba10fe403d999d336e51 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp.ts.result @@ -0,0 +1,13 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 25 tokens. +Matched 32 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: str="abcba" +== Sub Tree == +console.log(str) +== Sub Tree == +str Assign str.replace(reg expr : b g,"B") +== Sub Tree == +console.log(str) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp2.ts b/src/MapleFE/test/typescript/unit_tests/regexp2.ts new file mode 100644 index 0000000000000000000000000000000000000000..8145277a03845be1179d65075b731565db26802e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp2.ts @@ -0,0 +1,4 @@ +function repl(s: string) { + return s.replace(/\s\S/g, "."); +} +console.log(repl("abc def gh")); diff --git a/src/MapleFE/test/typescript/unit_tests/regexp2.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..19c8b14acbddb03a6003b9e69cba5b5e77fecb72 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp2.ts.result @@ -0,0 +1,9 @@ +Matched 19 tokens. +Matched 29 tokens. +============= Module =========== +== Sub Tree == +func repl(s) throws: + return s.replace(reg expr : \s\S g,".") + +== Sub Tree == +console.log(repl("abc def gh")) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp3.ts b/src/MapleFE/test/typescript/unit_tests/regexp3.ts new file mode 100644 index 0000000000000000000000000000000000000000..ec93450ba34f8c801a0c0f0d40eb165062148812 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp3.ts @@ -0,0 +1,3 @@ +var s = "abc de fg"; +s = s.replace(/[^A-Za-z0-9\+\/\=]/g, ""); +console.log(s); diff --git a/src/MapleFE/test/typescript/unit_tests/regexp3.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b6c6aa92deba4b1ca791a99c5f2115f262f11288 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp3.ts.result @@ -0,0 +1,10 @@ +Matched 5 tokens. +Matched 16 tokens. +Matched 23 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: s="abc de fg" +== Sub Tree == +s Assign s.replace(reg expr : [^A-Za-z0-9\+\/\=] g,"") +== Sub Tree == +console.log(s) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp4.ts b/src/MapleFE/test/typescript/unit_tests/regexp4.ts new file mode 100644 index 0000000000000000000000000000000000000000..50e475c0b422254e6077879251892e1854c0e755 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp4.ts @@ -0,0 +1,5 @@ +var str: string = "abc/ab/1234abcdef"; +var re = /.*[/\\][0-9a-fA-F]{2}[/\\]([0-9a-fA-F-@]{8,}).*/; +console.log(str); +str = str.replace(re, "replaced"); +console.log(str); diff --git a/src/MapleFE/test/typescript/unit_tests/regexp4.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c3641c93a5c57867aff677abd58f16559fae3028 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp4.ts.result @@ -0,0 +1,16 @@ +Matched 7 tokens. +Matched 12 tokens. +Matched 19 tokens. +Matched 30 tokens. +Matched 37 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: str="abc/ab/1234abcdef" +== Sub Tree == +js_var Decl: re=reg expr : .*[/\\][0-9a-fA-F]{2}[/\\]([0-9a-fA-F-@]{8,}).* +== Sub Tree == +console.log(str) +== Sub Tree == +str Assign str.replace(re,"replaced") +== Sub Tree == +console.log(str) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp5.ts b/src/MapleFE/test/typescript/unit_tests/regexp5.ts new file mode 100644 index 0000000000000000000000000000000000000000..feec44b29704eb4c28475e9a85d9ac025f49f5ac --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp5.ts @@ -0,0 +1,2 @@ +var x = true && /\/t/.test("/test"); +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/regexp5.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp5.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4b499ad4e3a5f7d3eefd34c22db3148c1a5cc09e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp5.ts.result @@ -0,0 +1,7 @@ +Matched 12 tokens. +Matched 19 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=true Land reg expr : \/t.test("/test") +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp6.ts b/src/MapleFE/test/typescript/unit_tests/regexp6.ts new file mode 100644 index 0000000000000000000000000000000000000000..7c93d2254f766ac6cbe8074fe07642b976597b58 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp6.ts @@ -0,0 +1,3 @@ +const str = "foo bar tee"; +let res: null | any[] = /Foo/.exec(str) || /bar/.exec(str); +console.log(res); diff --git a/src/MapleFE/test/typescript/unit_tests/regexp6.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp6.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4c7a7ef2c6cf41c6d2c5b1d948d8f2923dee0af8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp6.ts.result @@ -0,0 +1,10 @@ +Matched 5 tokens. +Matched 28 tokens. +Matched 35 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: str="foo bar tee" +== Sub Tree == +js_let Decl: res=reg expr : Foo.exec(str) Lor reg expr : bar.exec(str) +== Sub Tree == +console.log(res) diff --git a/src/MapleFE/test/typescript/unit_tests/regexp7.ts b/src/MapleFE/test/typescript/unit_tests/regexp7.ts new file mode 100644 index 0000000000000000000000000000000000000000..c9af3f44c5cab284ff0bdc1a41c4dec8bb34d1c9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp7.ts @@ -0,0 +1,2 @@ +export const re = + /["\u0000-\u001f]/g diff --git a/src/MapleFE/test/typescript/unit_tests/regexp7.ts.result b/src/MapleFE/test/typescript/unit_tests/regexp7.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b272ca0aea8e35cf16f504ab4e4c193d4ba3b6c1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regexp7.ts.result @@ -0,0 +1,4 @@ +Matched 5 tokens. +============= Module =========== +== Sub Tree == +export {js_const Decl: re=reg expr : ["\u0000-\u001f] g} diff --git a/src/MapleFE/test/typescript/unit_tests/regular-type.ts b/src/MapleFE/test/typescript/unit_tests/regular-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..d78d711bd0597d5c8a52a922ed1f93954dd473f2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regular-type.ts @@ -0,0 +1,3 @@ +class Klass {} +type K = Klass; +var a: K; diff --git a/src/MapleFE/test/typescript/unit_tests/regular-type.ts.result b/src/MapleFE/test/typescript/unit_tests/regular-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..bcc4082835a4e0350e46b808105292def0d5fc17 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/regular-type.ts.result @@ -0,0 +1,18 @@ +Matched 4 tokens. +Matched 9 tokens. +Matched 14 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == + type K = Klass +== Sub Tree == +js_var Decl: a diff --git a/src/MapleFE/test/typescript/unit_tests/rest-in-array.ts b/src/MapleFE/test/typescript/unit_tests/rest-in-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..0701d0a959d2cf15b64849d072608c91eeeb17d8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-in-array.ts @@ -0,0 +1,6 @@ +const enum TypeID { + Array_Class = 0, + Array, +} + +type TYPE = [string, string[], number, ...TypeID[]]; diff --git a/src/MapleFE/test/typescript/unit_tests/rest-in-array.ts.result b/src/MapleFE/test/typescript/unit_tests/rest-in-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8f69208f6e74b4388dbea5b833a559233a2f6f88 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-in-array.ts.result @@ -0,0 +1,7 @@ +Matched 11 tokens. +Matched 29 tokens. +============= Module =========== +== Sub Tree == +ts_enum: TypeID {Array_Class=0;Array } +== Sub Tree == + type TYPE = [ : string , : prim array-TBD , : number , : ...TypeID[] , ] diff --git a/src/MapleFE/test/typescript/unit_tests/rest-in-array2.ts b/src/MapleFE/test/typescript/unit_tests/rest-in-array2.ts new file mode 100644 index 0000000000000000000000000000000000000000..403d8e2af74983e42cea05db5c19c81a392c73da --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-in-array2.ts @@ -0,0 +1,4 @@ +module M { + let arr : Array = ["a", "b"]; + console.log([...arr, "c"]); +} diff --git a/src/MapleFE/test/typescript/unit_tests/rest-in-array2.ts.result b/src/MapleFE/test/typescript/unit_tests/rest-in-array2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f9a4286d2aee7e12ebae0f702b84d0a7b70302b7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-in-array2.ts.result @@ -0,0 +1,9 @@ +Matched 30 tokens. +============= Module =========== +== Sub Tree == +============= Module =========== +== Sub Tree == +js_let Decl: arr=["a","b"] +== Sub Tree == +console.log([...arr,"c"]) + diff --git a/src/MapleFE/test/typescript/unit_tests/rest-object-literal.ts b/src/MapleFE/test/typescript/unit_tests/rest-object-literal.ts new file mode 100644 index 0000000000000000000000000000000000000000..cd6a6ec0ccc71868e1aa2d647bf92d379d32dd91 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-object-literal.ts @@ -0,0 +1,8 @@ +function func(x: T): T { + return x; +} +var list = { f1: 0, f2: 1 }; +class Klass { + public static fd = func({ ...list }); +} +console.log(Klass.fd); diff --git a/src/MapleFE/test/typescript/unit_tests/rest-object-literal.ts.result b/src/MapleFE/test/typescript/unit_tests/rest-object-literal.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b30f75590b39455c4352e517e3555ecd3abfb098 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-object-literal.ts.result @@ -0,0 +1,23 @@ +Matched 17 tokens. +Matched 30 tokens. +Matched 46 tokens. +Matched 55 tokens. +============= Module =========== +== Sub Tree == +func func(x) throws: + return x + +== Sub Tree == +js_var Decl: list= {f1:0, f2:1} +== Sub Tree == +class Klass + Fields: + fd=func( {:...list}) + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +console.log(Klass.fd) diff --git a/src/MapleFE/test/typescript/unit_tests/rest-object-literal2.ts b/src/MapleFE/test/typescript/unit_tests/rest-object-literal2.ts new file mode 100644 index 0000000000000000000000000000000000000000..54f1135ba1b380cffadcb676d275254861bcaa70 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-object-literal2.ts @@ -0,0 +1,6 @@ +class Klass { + list = { f1: 0, f2: 1 }; +} +var obj: Klass = new Klass(); +var list2 = true ? { ...obj.list } : obj.list; +console.log(list2); diff --git a/src/MapleFE/test/typescript/unit_tests/rest-object-literal2.ts.result b/src/MapleFE/test/typescript/unit_tests/rest-object-literal2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..eedd040f94d5af412ce4e590fea6e01f2fd7f553 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-object-literal2.ts.result @@ -0,0 +1,21 @@ +Matched 16 tokens. +Matched 26 tokens. +Matched 42 tokens. +Matched 49 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + list= {f1:0, f2:1} + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +js_var Decl: list2=true ? {:obj.list} : obj.list +== Sub Tree == +console.log(list2) diff --git a/src/MapleFE/test/typescript/unit_tests/rest-object-literal3.ts b/src/MapleFE/test/typescript/unit_tests/rest-object-literal3.ts new file mode 100644 index 0000000000000000000000000000000000000000..b746a03c57b68d454631bd74026d885540c25664 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-object-literal3.ts @@ -0,0 +1,6 @@ +class Klass { + arr = [{ list: { f1: 0, f2: 1 } }]; +} +var obj: Klass = new Klass(); +var list2 = true ? { ...obj.arr[0] } : obj.arr[0].list; +console.log(list2); diff --git a/src/MapleFE/test/typescript/unit_tests/rest-object-literal3.ts.result b/src/MapleFE/test/typescript/unit_tests/rest-object-literal3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..555f965426b909d0ac2d6d29a53d68b160095939 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-object-literal3.ts.result @@ -0,0 +1,21 @@ +Matched 22 tokens. +Matched 32 tokens. +Matched 56 tokens. +Matched 63 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + arr=[ {list: {f1:0, f2:1}}] + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +js_var Decl: list2=true ? {:obj.arr[0]} : obj.arr[0].list +== Sub Tree == +console.log(list2) diff --git a/src/MapleFE/test/typescript/unit_tests/rest-params.ts b/src/MapleFE/test/typescript/unit_tests/rest-params.ts new file mode 100644 index 0000000000000000000000000000000000000000..977851d3b76b9ae7008600072cd4a7e90f7467d7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-params.ts @@ -0,0 +1,5 @@ +function func(msg: string, ...params: any[]) { + console.log(msg, params); +} + +func("Rest: ", 1, 2, 3, "abc", "def"); diff --git a/src/MapleFE/test/typescript/unit_tests/rest-params.ts.result b/src/MapleFE/test/typescript/unit_tests/rest-params.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..41eea9e79e3ce410c5b851e16d0140d2f81f1cc5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/rest-params.ts.result @@ -0,0 +1,9 @@ +Matched 25 tokens. +Matched 40 tokens. +============= Module =========== +== Sub Tree == +func func(msg,...params) throws: + console.log(msg,params) + +== Sub Tree == +func("Rest: ",1,2,3,"abc","def") diff --git a/src/MapleFE/test/typescript/unit_tests/return-as-any.ts b/src/MapleFE/test/typescript/unit_tests/return-as-any.ts new file mode 100644 index 0000000000000000000000000000000000000000..758d806c20e513dd666b5280fe081bd76f7c22e8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/return-as-any.ts @@ -0,0 +1,2 @@ +const func = () => 0 as any; +console.log(func); diff --git a/src/MapleFE/test/typescript/unit_tests/return-as-any.ts.result b/src/MapleFE/test/typescript/unit_tests/return-as-any.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4e68a8a2716740a6e91fd36a430e406a07884273 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/return-as-any.ts.result @@ -0,0 +1,7 @@ +Matched 10 tokens. +Matched 17 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: func=() -> 0 +== Sub Tree == +console.log(func) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing.ts new file mode 100644 index 0000000000000000000000000000000000000000..a418f65ca258ae23ba4ca0e9adf74d2621f4e640 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing.ts @@ -0,0 +1,10 @@ +class Klass {} + +export interface Interf { + [key: string]: any; +} + +export interface Interf2 { + func(component: Klass): Interf; + func2(component: Klass): Interf; +} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8b1f0dd44491b6d303a9c5243986f0117df2c3b3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing.ts.result @@ -0,0 +1,20 @@ +Matched 4 tokens. +Matched 17 tokens. +Matched 40 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +export {ts_interface: Interf {string index type: any }} +== Sub Tree == +export {ts_interface: Interf2 {func func(component) throws: +;func func2(component) throws: + }} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing10.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing10.ts new file mode 100644 index 0000000000000000000000000000000000000000..88c2e5ad80a6eb91cbd1d00d4eeb60a6333a60e9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing10.ts @@ -0,0 +1,11 @@ +function func(arr: Array): number { + var len = arr.length; + var sum = 0; + while(len > 0) { + len-- + sum += arr[len]; + } + return sum; +} + +console.log(func([1, 2, 3, 4])); diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing10.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing10.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..dca83fd7b5dc81f4341cce66d1d3f8332c48a468 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing10.ts.result @@ -0,0 +1,15 @@ +Matched 46 tokens. +Matched 64 tokens. +============= Module =========== +== Sub Tree == +func func(arr) throws: + js_var Decl: len=arr.length + js_var Decl: sum=0 + while len GT 0 len Dec + + sum AddAssign arr[len] + + return sum + +== Sub Tree == +console.log(func([1,2,3,4])) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing11.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing11.ts new file mode 100644 index 0000000000000000000000000000000000000000..5d563b22096797e7a6d4aaf7714839dbbc2b700a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing11.ts @@ -0,0 +1,3 @@ +export type Type = + | string + | number diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing11.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing11.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b3595f039db09b7776161fbc660c2ed0fc7ada81 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing11.ts.result @@ -0,0 +1,4 @@ +Matched 8 tokens. +============= Module =========== +== Sub Tree == +export { type Type = union = string | number} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing12.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing12.ts new file mode 100644 index 0000000000000000000000000000000000000000..08a3a4bbe0c6f7723a75520983fe2d9dda70d20f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing12.ts @@ -0,0 +1,2 @@ +console.log('Hello') +console.log diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing12.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing12.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c499b0516d5c4150e7c79c227737c21af60edb8b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing12.ts.result @@ -0,0 +1,7 @@ +Matched 6 tokens. +Matched 9 tokens. +============= Module =========== +== Sub Tree == +console.log("Hello") +== Sub Tree == +console.log diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing13.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing13.ts new file mode 100644 index 0000000000000000000000000000000000000000..31d304467719e802d76e2cde45fb395b1086a5be --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing13.ts @@ -0,0 +1,2 @@ +console.log +console.log(null) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing13.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing13.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a2f1cd875bc566c3ab5a97f1cdefe2334ab24e3b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing13.ts.result @@ -0,0 +1,7 @@ +Matched 3 tokens. +Matched 9 tokens. +============= Module =========== +== Sub Tree == +console.log +== Sub Tree == +console.log(null) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing14.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing14.ts new file mode 100644 index 0000000000000000000000000000000000000000..2311369e565c20dfa593325081aab7dc46b80c02 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing14.ts @@ -0,0 +1,21 @@ +class Car { + private _make: string; + constructor(make: string) { + this._make = make; + } + public getMake(): string { + return this._make; + } +} + +class Model extends Car { + private _model: string; + constructor(make: string, model: string) {super(make) this._model = super.getMake() + model; + } + public getModel(): string { + return this._model; + } +} + +let passat: Model = new Model("VW", "Passat"); +console.log(passat.getMake(), passat.getModel()); diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing14.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing14.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5368fc30da365058f5d1934d73ee3dfecd5b2e61 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing14.ts.result @@ -0,0 +1,38 @@ +Matched 36 tokens. +Matched 88 tokens. +Matched 101 tokens. +Matched 118 tokens. +============= Module =========== +== Sub Tree == +class Car + Fields: + _make + Instance Initializer: + Constructors: + constructor (make) throws: + this._make Assign make + Methods: + func getMake() throws: + return this._make + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Model + Fields: + _model + Instance Initializer: + Constructors: + constructor (make,model) throws: + super(make) + this._model Assign super.getMake() Add model + Methods: + func getModel() throws: + return this._model + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: passat=new Model("VW","Passat") +== Sub Tree == +console.log(passat.getMake(),passat.getModel()) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts new file mode 100644 index 0000000000000000000000000000000000000000..a21a6ce8a1190aa1bc6a8f85e5d85179ee1dbbb8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts @@ -0,0 +1,16 @@ +var n: number = 1; +switch (true) { + case n < 5: + console.log(n, " is less than 5"); + case n > 2 && n < 5: + console.log(n, " + 1 is equal to", n + 1); + break; + case n == 6: + console.log(n, " is equal to 6"); + break; + case n < 8: + console.log(n, " is greater than 4 and less than 8"); + break + default: + console.log(n, " is greater than 7"); +} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..78c460f980018fd17fd60b48153b7b3b98ca4a9e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing15.ts.result @@ -0,0 +1,8 @@ +Matched 7 tokens. +Matched 93 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: n=1 +== Sub Tree == +A switch + diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts new file mode 100644 index 0000000000000000000000000000000000000000..3caad616a659fe6951afcd48192e7063c6a0f87c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts @@ -0,0 +1,8 @@ +function func(arg: number): number | undefined { + if(arg < 1) return + for(let i = 0; i < arg; i++) + console.log(i); + return arg * 10; +} +console.log(func(3)); + diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5a85dd3971104c3fd5b7e648634c0a1e7b0ff316 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing16.ts.result @@ -0,0 +1,15 @@ +Matched 46 tokens. +Matched 56 tokens. +============= Module =========== +== Sub Tree == +func func(arg) throws: + cond-branch cond:arg LT 1 + true branch : + return false branch : + + for ( ) + console.log(i) + return arg Mul 10 + +== Sub Tree == +console.log(func(3)) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts new file mode 100644 index 0000000000000000000000000000000000000000..f5c4aff91fda35a3bfecf34ccd70dfe72cdf7624 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts @@ -0,0 +1,9 @@ +function func(arg: number): number | undefined { + for(let i = 0; i < arg; i++) { + if(i % 2 > 0) continue + console.log(i); + } + return arg * 10; +} +console.log(func(5)); + diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6de52c4b72ce0abb85198d4eeeaf62bc051bb110 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing17.ts.result @@ -0,0 +1,17 @@ +Matched 50 tokens. +Matched 60 tokens. +============= Module =========== +== Sub Tree == +func func(arg) throws: + for ( ) + cond-branch cond:i Mod 2 GT 0 + true branch : + continue: + false branch : + + console.log(i) + + return arg Mul 10 + +== Sub Tree == +console.log(func(5)) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing2.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing2.ts new file mode 100644 index 0000000000000000000000000000000000000000..99880ccb1eac73c69c9625ca9eb44b5f06691f42 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing2.ts @@ -0,0 +1,5 @@ +export abstract class Klass { + public abstract f1(a: boolean): void; + public abstract f2(a: boolean): void; + public abstract f3(): void; +} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing2.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..13effeab0dfc010356ebd8f18422ff1e5105af5f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing2.ts.result @@ -0,0 +1,15 @@ +Matched 36 tokens. +============= Module =========== +== Sub Tree == +export {class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + func f1(a) throws: + func f2(a) throws: + func f3() throws: + LocalClasses: + LocalInterfaces: +} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing3.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing3.ts new file mode 100644 index 0000000000000000000000000000000000000000..426aba5e7a0bbd8a1f28fbbd5878ee92575821f6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing3.ts @@ -0,0 +1,4 @@ +let func = function (n: number): number { + return n; +}; +console.log(func(123)); diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing3.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8aa220a9386b4d42d86b7d616c74e1f6cc36c38a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing3.ts.result @@ -0,0 +1,9 @@ +Matched 17 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: func=func (n) throws: + return n + +== Sub Tree == +console.log(func(123)) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing4.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing4.ts new file mode 100644 index 0000000000000000000000000000000000000000..e97a274786ba79926076cd2c814ef01bf381b09e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing4.ts @@ -0,0 +1,11 @@ +class Klass { + func?: (n: number) => number; +} +var obj: Klass = new Klass(); +obj.func = function (n: number): number { + return n; +}; + +export function show() { + console.log(obj.func!(123)); +} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing4.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..75efc0950a5dab815e9838e99d620abbca3e49ff --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing4.ts.result @@ -0,0 +1,25 @@ +Matched 15 tokens. +Matched 25 tokens. +Matched 43 tokens. +Matched 63 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + func? + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj.func Assign func (n) throws: + return n + +== Sub Tree == +export {func show() throws: + console.log(obj.func!(123)) +} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing5.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing5.ts new file mode 100644 index 0000000000000000000000000000000000000000..c9172d85056a295a43e80f7849f69f8aa1d14f33 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing5.ts @@ -0,0 +1,17 @@ +class Klass { + prop: string = ""; + get getProp(): string { + return this.prop; + } + init(val: string) { + this.prop = val; + } + set setProp(newVal: string) { + this.init(newVal); + } +} + +var obj: Klass = new Klass(); +console.log(obj.getProp); +obj.setProp = "bar"; +console.log(obj.getProp); diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing5.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing5.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..89bf2f9ee60c3a0ce49bf43ff3d95a91d766069c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing5.ts.result @@ -0,0 +1,30 @@ +Matched 53 tokens. +Matched 63 tokens. +Matched 72 tokens. +Matched 78 tokens. +Matched 87 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + prop="" + Instance Initializer: + Constructors: + Methods: + get getProp() throws: + return this.prop + func init(val) throws: + this.prop Assign val + set setProp(newVal) throws: + this.init(newVal) + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(obj.getProp) +== Sub Tree == +obj.setProp Assign "bar" +== Sub Tree == +console.log(obj.getProp) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing6.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing6.ts new file mode 100644 index 0000000000000000000000000000000000000000..db3654a5e2d29f83342e24b2627cd03a8f074d5c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing6.ts @@ -0,0 +1,4 @@ +export declare function func(s: string): string | null; +export declare function func2(s: string): string | null; +export declare const re1: RegExp; +export declare const re2: RegExp; diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing6.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing6.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ae1342002187e83e5e53b08ce875ebd74acb167f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing6.ts.result @@ -0,0 +1,15 @@ +Matched 14 tokens. +Matched 28 tokens. +Matched 35 tokens. +Matched 42 tokens. +============= Module =========== +== Sub Tree == +export {declare func func(s) throws: +} +== Sub Tree == +export {declare func func2(s) throws: +} +== Sub Tree == +export {declare js_const Decl: re1} +== Sub Tree == +export {declare js_const Decl: re2} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing7.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing7.ts new file mode 100644 index 0000000000000000000000000000000000000000..2b33409dbeae625edc38d0e58fe5f6b53043c4fd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing7.ts @@ -0,0 +1,4 @@ +declare namespace NS { + const num: number + interface IFace { } +} diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing7.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing7.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..05a1750950448b03ed5e993826e4d5d394d35d05 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing7.ts.result @@ -0,0 +1,7 @@ +Matched 13 tokens. +============= Module =========== +== Sub Tree == +declare namespace NS + js_const Decl: num + ts_interface: IFace { } + diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing8.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing8.ts new file mode 100644 index 0000000000000000000000000000000000000000..1dbb25e8f5d11d002e2f93fa50404e4e35b707bd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing8.ts @@ -0,0 +1,8 @@ +function func(f: Function): void { + f(); +} + +func((e:any) => { + if (e) throw e + console.log('OK') +}) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing8.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing8.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ddb68873c2ab84dac90529c3ad1053b9c0c53b00 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing8.ts.result @@ -0,0 +1,15 @@ +Matched 15 tokens. +Matched 38 tokens. +============= Module =========== +== Sub Tree == +func func(f) throws: + f() + +== Sub Tree == +func((e) -> cond-branch cond:e +true branch : + throw e +false branch : + +console.log("OK") +) diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing9.ts b/src/MapleFE/test/typescript/unit_tests/semicolon-missing9.ts new file mode 100644 index 0000000000000000000000000000000000000000..0c3d27d2febe5cc14b4a7467a93fef56731a57ea --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing9.ts @@ -0,0 +1,3 @@ +console.log("hello") +console.log; +console.log("world") diff --git a/src/MapleFE/test/typescript/unit_tests/semicolon-missing9.ts.result b/src/MapleFE/test/typescript/unit_tests/semicolon-missing9.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7106ec4ab1e08bd860ff523ba58da85e5a904ca0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/semicolon-missing9.ts.result @@ -0,0 +1,10 @@ +Matched 6 tokens. +Matched 10 tokens. +Matched 16 tokens. +============= Module =========== +== Sub Tree == +console.log("hello") +== Sub Tree == +console.log +== Sub Tree == +console.log("world") diff --git a/src/MapleFE/test/typescript/unit_tests/shebang.ts b/src/MapleFE/test/typescript/unit_tests/shebang.ts new file mode 100644 index 0000000000000000000000000000000000000000..114bffa6991b2997b4bb27fe419b528762dd4962 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/shebang.ts @@ -0,0 +1,2 @@ +#!/usr/bin/env node +console.log("Hello world!"); diff --git a/src/MapleFE/test/typescript/unit_tests/shebang.ts.result b/src/MapleFE/test/typescript/unit_tests/shebang.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ffe76bce9eb05088e26e69f3499b49ccd80654ba --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/shebang.ts.result @@ -0,0 +1,4 @@ +Matched 7 tokens. +============= Module =========== +== Sub Tree == +console.log("Hello world!") diff --git a/src/MapleFE/test/typescript/unit_tests/some.ts b/src/MapleFE/test/typescript/unit_tests/some.ts new file mode 100644 index 0000000000000000000000000000000000000000..3990a95e2ca6e87beada2fc49f0e7fb76a1c5ec2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/some.ts @@ -0,0 +1,3 @@ +// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/some +console.log([2, 5, 8, 1, 4].some((x) => x > 10)); // false +console.log([12, 5, 8, 1, 4].some((x) => x > 10)); // true diff --git a/src/MapleFE/test/typescript/unit_tests/some.ts.result b/src/MapleFE/test/typescript/unit_tests/some.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ea756cf94adbc08570cf333fc5a0c4e89c516530 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/some.ts.result @@ -0,0 +1,7 @@ +Matched 28 tokens. +Matched 56 tokens. +============= Module =========== +== Sub Tree == +console.log([2,5,8,1,4].some((x) -> x GT 10)) +== Sub Tree == +console.log([12,5,8,1,4].some((x) -> x GT 10)) diff --git a/src/MapleFE/test/typescript/unit_tests/spread-param.d.ts b/src/MapleFE/test/typescript/unit_tests/spread-param.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..0a2a6bedb2b34d62770a667684e4e1a3994bcfcf --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/spread-param.d.ts @@ -0,0 +1,2 @@ +declare function func( s1: T, s2: T | U, ...ss: Array< T | U | ((e: T | null) => void)>,): T; + diff --git a/src/MapleFE/test/typescript/unit_tests/spread-param.d.ts.result b/src/MapleFE/test/typescript/unit_tests/spread-param.d.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..78d64c1b433ff33b1c4dc7838ced36ace25c6f17 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/spread-param.d.ts.result @@ -0,0 +1,5 @@ +Matched 45 tokens. +============= Module =========== +== Sub Tree == +declare func func(s1,s2,...ss) throws: + diff --git a/src/MapleFE/test/typescript/unit_tests/spread.ts b/src/MapleFE/test/typescript/unit_tests/spread.ts new file mode 100644 index 0000000000000000000000000000000000000000..6f62a3faf38feb28f2aec371f1ca7bb2de8ce8f9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/spread.ts @@ -0,0 +1,9 @@ +const arr: number[] = [1, 2, 3]; +var arr2 = [ 0, ...arr ]; +console.log(arr2); + +function func() { + return { f1: 123, f2: "abc" }; +} +var obj2 = { f0: "OK", ...func() }; +console.log(obj2); diff --git a/src/MapleFE/test/typescript/unit_tests/spread.ts.result b/src/MapleFE/test/typescript/unit_tests/spread.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..99807845e80260e556cfbd48c0a0050d644ce2c6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/spread.ts.result @@ -0,0 +1,21 @@ +Matched 15 tokens. +Matched 25 tokens. +Matched 32 tokens. +Matched 49 tokens. +Matched 63 tokens. +Matched 70 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: arr=[1,2,3] +== Sub Tree == +js_var Decl: arr2=[0,...arr] +== Sub Tree == +console.log(arr2) +== Sub Tree == +func func() throws: + return {f1:123, f2:"abc"} + +== Sub Tree == +js_var Decl: obj2= {f0:"OK", :func()} +== Sub Tree == +console.log(obj2) diff --git a/src/MapleFE/test/typescript/unit_tests/static-readonly.ts b/src/MapleFE/test/typescript/unit_tests/static-readonly.ts new file mode 100644 index 0000000000000000000000000000000000000000..eeaf2b3f5673f100122aa9a28e7f444cb68dc197 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/static-readonly.ts @@ -0,0 +1,3 @@ +class Foo { + static readonly x: number = 0; +} diff --git a/src/MapleFE/test/typescript/unit_tests/static-readonly.ts.result b/src/MapleFE/test/typescript/unit_tests/static-readonly.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..42288272104e2894c17a108a743c00a5f83a4575 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/static-readonly.ts.result @@ -0,0 +1,12 @@ +Matched 12 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + x=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/static.ts b/src/MapleFE/test/typescript/unit_tests/static.ts new file mode 100644 index 0000000000000000000000000000000000000000..fbfac9dfb4f115725ec0f167f42fe079936c6d80 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/static.ts @@ -0,0 +1,9 @@ +class Foo { + static x: number = 1; + static inc(y: number) { + return this.x + y; + } +} + +console.log(Foo.x); +console.log(Foo.inc(10)); diff --git a/src/MapleFE/test/typescript/unit_tests/static.ts.result b/src/MapleFE/test/typescript/unit_tests/static.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3c0381cbd6314dae3720c496e1acdfd9699d1932 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/static.ts.result @@ -0,0 +1,20 @@ +Matched 27 tokens. +Matched 36 tokens. +Matched 48 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + x=1 + Instance Initializer: + Constructors: + Methods: + func inc(y) throws: + return this.x Add y + LocalClasses: + LocalInterfaces: + +== Sub Tree == +console.log(Foo.x) +== Sub Tree == +console.log(Foo.inc(10)) diff --git a/src/MapleFE/test/typescript/unit_tests/string-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/string-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..a1ddbdd1e005b91a095748138ae2fa5c8315a478 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/string-as-prop-name.ts @@ -0,0 +1,6 @@ +interface IFace { + "func-name"() : string; +} + +var obj: IFace = { "func-name": () => "Function name" }; +console.log(obj); diff --git a/src/MapleFE/test/typescript/unit_tests/string-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/string-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..47937f1f5d539a47af772c519227becc30709fa2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/string-as-prop-name.ts.result @@ -0,0 +1,11 @@ +Matched 10 tokens. +Matched 24 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {func () throws: + } +== Sub Tree == +js_var Decl: obj= {"func-name":() -> "Function name"} +== Sub Tree == +console.log(obj) diff --git a/src/MapleFE/test/typescript/unit_tests/stringString.ts b/src/MapleFE/test/typescript/unit_tests/stringString.ts new file mode 100644 index 0000000000000000000000000000000000000000..8ce486a185eb399671b0cc6cd0ba72573886a0ba --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/stringString.ts @@ -0,0 +1,6 @@ +class Student { + name: string = ""; +}; +class Parent { + name: String = ""; +}; diff --git a/src/MapleFE/test/typescript/unit_tests/stringString.ts.result b/src/MapleFE/test/typescript/unit_tests/stringString.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..19f19f0de5f7a28448d744781c09d982c18a117d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/stringString.ts.result @@ -0,0 +1,25 @@ +Matched 10 tokens. +Matched 11 tokens. +Matched 21 tokens. +Matched 22 tokens. +============= Module =========== +== Sub Tree == +class Student + Fields: + name="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Parent + Fields: + name="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/structural-typing.ts b/src/MapleFE/test/typescript/unit_tests/structural-typing.ts new file mode 100644 index 0000000000000000000000000000000000000000..c1c067e52b792e6d744f48a4e0bbda60c4c76f60 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/structural-typing.ts @@ -0,0 +1,16 @@ +class Foo { + f1: number = 0; + f2: string = ""; +} + +class Bar { + f1: number = 0; + f2: string = ""; +} + +var x = { f1: 123, f2: "John" }; +var y: Foo; +var z: Bar; +y = x; +z = y; +console.log(y, z); diff --git a/src/MapleFE/test/typescript/unit_tests/structural-typing.ts.result b/src/MapleFE/test/typescript/unit_tests/structural-typing.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6ba4006bdf6be05974e90d09b395a3bb1873624b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/structural-typing.ts.result @@ -0,0 +1,41 @@ +Matched 16 tokens. +Matched 32 tokens. +Matched 45 tokens. +Matched 50 tokens. +Matched 55 tokens. +Matched 59 tokens. +Matched 63 tokens. +Matched 72 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + f1=0 f2="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Bar + Fields: + f1=0 f2="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: x= {f1:123, f2:"John"} +== Sub Tree == +js_var Decl: y +== Sub Tree == +js_var Decl: z +== Sub Tree == +y Assign x +== Sub Tree == +z Assign y +== Sub Tree == +console.log(y,z) diff --git a/src/MapleFE/test/typescript/unit_tests/switch-in-loop.ts b/src/MapleFE/test/typescript/unit_tests/switch-in-loop.ts new file mode 100644 index 0000000000000000000000000000000000000000..1d2fd8889ec1d233929e172c08f29003bd3a02b9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/switch-in-loop.ts @@ -0,0 +1,19 @@ +for (var i: number = 1; i < 10; ++i) { + if (i < 5) + switch (i) { + case 2: + console.log(i, ", continue"); + continue; + case 3: + console.log(i, ", fall-through"); + case 4: + console.log(i, ", break"); + break; + default: + console.log(i, ", default"); + } + else { + console.log(i, ", else branch"); + break; + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/switch-in-loop.ts.result b/src/MapleFE/test/typescript/unit_tests/switch-in-loop.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2a2cbafd0e41b584a9c38e06861d9dd36b2ab782 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/switch-in-loop.ts.result @@ -0,0 +1,13 @@ +Matched 95 tokens. +============= Module =========== +== Sub Tree == +for ( ) + cond-branch cond:i LT 5 + true branch : + A switch + false branch : + console.log(i,", else branch") + break: + + + diff --git a/src/MapleFE/test/typescript/unit_tests/switch-in-loop2.ts b/src/MapleFE/test/typescript/unit_tests/switch-in-loop2.ts new file mode 100644 index 0000000000000000000000000000000000000000..a569c469f9e35d5954e75aa5769569bd78d7eb8d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/switch-in-loop2.ts @@ -0,0 +1,18 @@ +class Klass { + [key: number]: string; +} +function func() { + var obj: Klass = {}; + for (var i: number = 1; i < 4; ++i) { + switch (i) { + case 2: + break; + default: + obj[i] = "OK"; + break; + } + } + return obj; +} + +console.log(func()); diff --git a/src/MapleFE/test/typescript/unit_tests/switch-in-loop2.ts.result b/src/MapleFE/test/typescript/unit_tests/switch-in-loop2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3d3d8dcc03457c6e80a6696b1ff77224ee97f98e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/switch-in-loop2.ts.result @@ -0,0 +1,25 @@ +Matched 12 tokens. +Matched 69 tokens. +Matched 78 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + string + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func() throws: + js_var Decl: obj= {} + for ( ) + A switch + + + return obj + +== Sub Tree == +console.log(func()) diff --git a/src/MapleFE/test/typescript/unit_tests/switch-stmt-case.ts b/src/MapleFE/test/typescript/unit_tests/switch-stmt-case.ts new file mode 100644 index 0000000000000000000000000000000000000000..ad2a522153e283538fcb3105740c4c63975625b6 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/switch-stmt-case.ts @@ -0,0 +1,8 @@ +let obj: number = 1; + +switch (typeof obj) { + case "number": + case "string": + console.log(obj); + break; +} diff --git a/src/MapleFE/test/typescript/unit_tests/switch-stmt-case.ts.result b/src/MapleFE/test/typescript/unit_tests/switch-stmt-case.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1595ff943185d42c79f846e64828056b090f69aa --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/switch-stmt-case.ts.result @@ -0,0 +1,8 @@ +Matched 7 tokens. +Matched 29 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: obj=1 +== Sub Tree == +A switch + diff --git a/src/MapleFE/test/typescript/unit_tests/switch-stmt.ts b/src/MapleFE/test/typescript/unit_tests/switch-stmt.ts new file mode 100644 index 0000000000000000000000000000000000000000..51c42c254b34d4cfef490a40e1797b48d57bc89f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/switch-stmt.ts @@ -0,0 +1,16 @@ +var n: number = 1; +switch (true) { + case n < 5: + console.log(n, " is less than 5"); + case n > 2 && n < 5: + console.log(n, " + 1 is equal to", n + 1); + break; + case n == 6: + console.log(n, " is equal to 6"); + break; + case n < 8: + console.log(n, " is greater than 4 and less than 8"); + break; + default: + console.log(n, " is greater than 7"); +} diff --git a/src/MapleFE/test/typescript/unit_tests/switch-stmt.ts.result b/src/MapleFE/test/typescript/unit_tests/switch-stmt.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7f4347709053422b86a49935942a0c76ddf5f791 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/switch-stmt.ts.result @@ -0,0 +1,8 @@ +Matched 7 tokens. +Matched 94 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: n=1 +== Sub Tree == +A switch + diff --git a/src/MapleFE/test/typescript/unit_tests/symbol-as-key.ts b/src/MapleFE/test/typescript/unit_tests/symbol-as-key.ts new file mode 100644 index 0000000000000000000000000000000000000000..dd746c034c05f67221086af6961b8e80be42e597 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/symbol-as-key.ts @@ -0,0 +1,18 @@ +// tsc --target es2015 +const tag: unique symbol = Symbol("my symbol"); +interface IFace { + [tag]?: number; +} + +class Klass implements IFace { + [tag]: number; + s: string = "example"; +} + +var obj: Klass = new Klass(); +obj[tag] = 123; +console.log(obj); +console.log(Symbol.keyFor(tag)); + +const shared: symbol = Symbol.for("shared symbol"); +console.log(Symbol.keyFor(shared)); diff --git a/src/MapleFE/test/typescript/unit_tests/symbol-as-key.ts.result b/src/MapleFE/test/typescript/unit_tests/symbol-as-key.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ad51f89159c1cdc33f5af4558aed683808eccaf0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/symbol-as-key.ts.result @@ -0,0 +1,36 @@ +Matched 11 tokens. +Matched 22 tokens. +Matched 40 tokens. +Matched 50 tokens. +Matched 57 tokens. +Matched 64 tokens. +Matched 76 tokens. +Matched 88 tokens. +Matched 100 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: tag=Symbol("my symbol") +== Sub Tree == +ts_interface: IFace {[tag] : number } +== Sub Tree == +class Klass + Fields: + [tag] : number s="example" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +obj[tag] Assign 123 +== Sub Tree == +console.log(obj) +== Sub Tree == +console.log(Symbol.keyFor(tag)) +== Sub Tree == +js_const Decl: shared=Symbol.for("shared symbol") +== Sub Tree == +console.log(Symbol.keyFor(shared)) diff --git a/src/MapleFE/test/typescript/unit_tests/tagged-template.ts b/src/MapleFE/test/typescript/unit_tests/tagged-template.ts new file mode 100644 index 0000000000000000000000000000000000000000..aa39ee7a152f3e7b23e67a207d2472ac603e490d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/tagged-template.ts @@ -0,0 +1,12 @@ +// Tagged Template Literal +function bar(a: TemplateStringsArray, b:number, c:string) { + return "bar: a=[" + a + "] b=[" + b + "] c=[" + c + "]"; +} +function f() { + let x: number = 10; + let y: string = "abc"; + return bar`PX + ${x}px${y}PX + `; +} +console.log(f()); diff --git a/src/MapleFE/test/typescript/unit_tests/tagged-template.ts.result b/src/MapleFE/test/typescript/unit_tests/tagged-template.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..9d34dab4a01b6db066facfbae2e991c7add09c19 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/tagged-template.ts.result @@ -0,0 +1,18 @@ +Matched 32 tokens. +Matched 56 tokens. +Matched 65 tokens. +Matched 66 tokens. +Matched 67 tokens. +============= Module =========== +== Sub Tree == +func bar(a,b,c) throws: + return "bar: a=[" Add a Add "] b=[" Add b Add "] c=[" Add c Add "]" + +== Sub Tree == +func f() throws: + js_let Decl: x=10 + js_let Decl: y="abc" + return bar() + +== Sub Tree == +console.log(f()) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literal-as-type.ts b/src/MapleFE/test/typescript/unit_tests/template-literal-as-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..f8f2ece1da5366386c807255c8e160979b11fc42 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literal-as-type.ts @@ -0,0 +1,4 @@ +const X = "X"; +const T: `${typeof X}type` = `${X}type`; +console.log(typeof T); +console.log(T); diff --git a/src/MapleFE/test/typescript/unit_tests/template-literal-as-type.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literal-as-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e209915dffe9b82f8a18836edc7377d1be4210b2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literal-as-type.ts.result @@ -0,0 +1,15 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 20 tokens. +Matched 27 tokens. +Matched 29 tokens. +Matched 30 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: X="X" +== Sub Tree == +js_const Decl: T= template-literal: NULL,X,"type",NULL +== Sub Tree == +console.log( typeof T) +== Sub Tree == +console.log(T) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literal-in.ts b/src/MapleFE/test/typescript/unit_tests/template-literal-in.ts new file mode 100644 index 0000000000000000000000000000000000000000..3cab93dc7dabf5318ed0458a6ab2b9d17bf64ca9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literal-in.ts @@ -0,0 +1,3 @@ +type MyType> = { + [K in`${Extract}`]: B[K] +}; diff --git a/src/MapleFE/test/typescript/unit_tests/template-literal-in.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literal-in.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..da3d29940e4fd9b84cddd9c1b867d564208ebdab --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literal-in.ts.result @@ -0,0 +1,5 @@ +Matched 27 tokens. +Matched 36 tokens. +============= Module =========== +== Sub Tree == + type MyType = {[K in template-literal: NULL,Extract< keyof B,union = string | number>] : B[K] } diff --git a/src/MapleFE/test/typescript/unit_tests/template-literal-infer.ts b/src/MapleFE/test/typescript/unit_tests/template-literal-infer.ts new file mode 100644 index 0000000000000000000000000000000000000000..c94cf4771ce39a9e357cd30219973e68cda9a568 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literal-infer.ts @@ -0,0 +1,11 @@ +// https://github.com/microsoft/TypeScript/pull/40336 +type Split = + string extends S ? string[] : + S extends '' ? [] : + S extends `${infer T}${D}${infer U}` ? [T, ...Split] : + [S]; + +type T40 = Split<'foo', '.'>; // ['foo'] +type T41 = Split<'foo.bar.baz', '.'>; // ['foo', 'bar', 'baz'] +type T42 = Split<'foo.bar', ''>; // ['f', 'o', 'o', '.', 'b', 'a', 'r'] +type T43 = Split; // string[] diff --git a/src/MapleFE/test/typescript/unit_tests/template-literal-infer.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literal-infer.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1798d6ece23d77e6e2dafcba54644f57b0e74d80 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literal-infer.ts.result @@ -0,0 +1,19 @@ +Matched 47 tokens. +Matched 57 tokens. +Matched 67 tokens. +Matched 77 tokens. +Matched 87 tokens. +Matched 89 tokens. +Matched 90 tokens. +Matched 92 tokens. +============= Module =========== +== Sub Tree == + type Split = string extends S ? prim array-TBD : S extends "" ? prim array-TBD : S extends template-literal: NULL, infer T,NULL,D,NULL, infer U ? [ : T , : Split , ] : [ : S , ] +== Sub Tree == + type T40 = Split<"foo","."> +== Sub Tree == + type T41 = Split<"foo.bar.baz","."> +== Sub Tree == + type T42 = Split<"foo.bar",""> +== Sub Tree == + type T43 = Split diff --git a/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts b/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..073bae7598a49870fb29a75da43f6c479ed99402 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts @@ -0,0 +1,12 @@ +class Num { + neg: boolean = false; + val: number = 0; +} + +function func(v: Num): `${string}n` | `-${string}n` { + return `${v.neg ? '-' : ''}${v.val}n`; +} + +var obj : Num = {neg: true, val: 123}; +console.log(func(obj)); +console.log(typeof func(obj)); diff --git a/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..70ca48c63fb79ef537785175df99a5d33a80e5f2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literal-type.ts.result @@ -0,0 +1,30 @@ +Matched 16 tokens. +Matched 32 tokens. +Matched 47 tokens. +Matched 57 tokens. +Matched 68 tokens. +Matched 69 tokens. +Matched 70 tokens. +Matched 77 tokens. +Matched 80 tokens. +============= Module =========== +== Sub Tree == +class Num + Fields: + neg=false val=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func(v) throws: + return template-literal: NULL,v.neg ? "-" : "",NULL,v.val,"n",NULL + +== Sub Tree == +js_var Decl: obj= {neg:true, val:123} +== Sub Tree == +console.log(func(obj)) +== Sub Tree == +console.log( typeof func(obj)) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals.ts b/src/MapleFE/test/typescript/unit_tests/template-literals.ts new file mode 100644 index 0000000000000000000000000000000000000000..c8a9c9453dcb712fbf151594ac4d09940a7cb93d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals.ts @@ -0,0 +1,7 @@ +// Template Literal +let x = 10; +let y: number = 20; +let Left = `${x}px`; +let Right: string = `${x}px`; +let Top = `${y}px`; +let Bottom: string = `${y}px`; diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..17888604cec117983ac361103ca68b3ebf8166d2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals.ts.result @@ -0,0 +1,23 @@ +Matched 5 tokens. +Matched 12 tokens. +Matched 17 tokens. +Matched 24 tokens. +Matched 29 tokens. +Matched 36 tokens. +Matched 37 tokens. +Matched 38 tokens. +Matched 39 tokens. +Matched 40 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: x=10 +== Sub Tree == +js_let Decl: y=20 +== Sub Tree == +js_let Decl: Left= template-literal: NULL,x,"px",NULL +== Sub Tree == +js_let Decl: Right= template-literal: NULL,x,"px",NULL +== Sub Tree == +js_let Decl: Top= template-literal: NULL,y,"px",NULL +== Sub Tree == +js_let Decl: Bottom= template-literal: NULL,y,"px",NULL diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals10.ts b/src/MapleFE/test/typescript/unit_tests/template-literals10.ts new file mode 100644 index 0000000000000000000000000000000000000000..03f001540185e855c1b3f0357465a76ff312e734 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals10.ts @@ -0,0 +1,6 @@ +var flag = false; +var a = ".x"; +console.log( + `${`if(typeof ${flag ? "(prop)" : "prop"}!=="object"){` + "o"}${a}=prop;` + + `}` +); diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals10.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals10.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..959e7eeac8353c83c9073b71840ba10f000c8d13 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals10.ts.result @@ -0,0 +1,13 @@ +Matched 5 tokens. +Matched 10 tokens. +Matched 19 tokens. +Matched 22 tokens. +Matched 23 tokens. +Matched 28 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: flag=false +== Sub Tree == +js_var Decl: a=".x" +== Sub Tree == +console.log( template-literal: NULL, template-literal: "if(typeof ",flag ? "(prop)" : "prop","!=="object"){",NULL Add "o",NULL,a,"=prop;",NULL Add template-literal: "}",NULL) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals11.ts b/src/MapleFE/test/typescript/unit_tests/template-literals11.ts new file mode 100644 index 0000000000000000000000000000000000000000..723fb535f8673d9c1cc854db4c6df38a90b24340 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals11.ts @@ -0,0 +1,3 @@ +function func(name: string) { + return Function("target", `${"try {\n" + " target."}${name}();\n` + `}`); +} diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals11.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals11.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ebe9d1b700acf828762510171e4a15bd80e330dd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals11.ts.result @@ -0,0 +1,8 @@ +Matched 19 tokens. +Matched 22 tokens. +Matched 23 tokens. +============= Module =========== +== Sub Tree == +func func(name) throws: + return Function("target", template-literal: NULL,"try {\n" Add " target.",NULL,name,"();\n",NULL Add template-literal: "}",NULL) + diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals2.ts b/src/MapleFE/test/typescript/unit_tests/template-literals2.ts new file mode 100644 index 0000000000000000000000000000000000000000..7945e9ce5bf481797cb1073d68e205309ceb47dd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals2.ts @@ -0,0 +1,5 @@ +// Template Literal +function func(msg: string): string { + return "MSG: " + msg; +} +console.log(`Status: ${func("OK")}`); diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals2.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5ba2dd8e30834f775d51197b5461a189c6a0b26d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals2.ts.result @@ -0,0 +1,10 @@ +Matched 16 tokens. +Matched 23 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +func func(msg) throws: + return "MSG: " Add msg + +== Sub Tree == +console.log( template-literal: "Status: ",func("OK")) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals3.ts b/src/MapleFE/test/typescript/unit_tests/template-literals3.ts new file mode 100644 index 0000000000000000000000000000000000000000..aee708bb7fbf93c373981d704ba01a2d52e3d35d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals3.ts @@ -0,0 +1,2 @@ +var x: number = 20; +let Left = `${x}px`; diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals3.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..4359ca868c85380f840515525338bbceede24871 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals3.ts.result @@ -0,0 +1,8 @@ +Matched 7 tokens. +Matched 12 tokens. +Matched 13 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=20 +== Sub Tree == +js_let Decl: Left= template-literal: NULL,x,"px",NULL diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals4.ts b/src/MapleFE/test/typescript/unit_tests/template-literals4.ts new file mode 100644 index 0000000000000000000000000000000000000000..a04949c7aec8aa1dd36e0700bac24970a469138d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals4.ts @@ -0,0 +1,6 @@ +var x: number = 2; +var y: number = 4; +var z: number = 6; + +let px = `${x + y}px and ${z ? x + z : y + z}px`; +console.log(px); diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals4.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b90002372ff4582ca9ac1f4293e5daa176bb8401 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals4.ts.result @@ -0,0 +1,18 @@ +Matched 7 tokens. +Matched 14 tokens. +Matched 21 tokens. +Matched 26 tokens. +Matched 33 tokens. +Matched 36 tokens. +Matched 45 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=2 +== Sub Tree == +js_var Decl: y=4 +== Sub Tree == +js_var Decl: z=6 +== Sub Tree == +js_let Decl: px= template-literal: NULL,x Add y,"px and ",z ? x Add z : y Add z,"px",NULL +== Sub Tree == +console.log(px) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals5.ts b/src/MapleFE/test/typescript/unit_tests/template-literals5.ts new file mode 100644 index 0000000000000000000000000000000000000000..d04070aa091d9388f8d3625a35a328117a60179e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals5.ts @@ -0,0 +1,3 @@ +var x: number = 20; +let Left = `${x}px`; +console.log(Left); diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals5.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals5.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6ef3205e3dffededef95c6b69605836fc7c6f62e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals5.ts.result @@ -0,0 +1,11 @@ +Matched 7 tokens. +Matched 12 tokens. +Matched 19 tokens. +Matched 20 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=20 +== Sub Tree == +js_let Decl: Left= template-literal: NULL,x,"px",NULL +== Sub Tree == +console.log(Left) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals6.ts b/src/MapleFE/test/typescript/unit_tests/template-literals6.ts new file mode 100644 index 0000000000000000000000000000000000000000..bd273860a3cfdf95fdb5b3510b11f24db1d267dc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals6.ts @@ -0,0 +1,2 @@ +console.log(`string text line 1 +string text line 2`); diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals6.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals6.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..217df2d2d9560b9142ef8187de7b7cb1bb76298c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals6.ts.result @@ -0,0 +1,5 @@ +Matched 7 tokens. +============= Module =========== +== Sub Tree == +console.log( template-literal: "string text line 1 +string text line 2",NULL) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals7.ts b/src/MapleFE/test/typescript/unit_tests/template-literals7.ts new file mode 100644 index 0000000000000000000000000000000000000000..68d7abbd0909d3eac4fa71894b14184edb24740a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals7.ts @@ -0,0 +1,3 @@ +var arr: number[] = [1, 2, 3]; +var s: string = `${arr[0] + 1}/${arr[1] + 1}/${arr[2] + 1}`; +console.log(s); diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals7.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals7.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b4e72dc0a251032ac79f6856d19450f84da53a25 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals7.ts.result @@ -0,0 +1,13 @@ +Matched 15 tokens. +Matched 22 tokens. +Matched 29 tokens. +Matched 35 tokens. +Matched 41 tokens. +Matched 47 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[1,2,3] +== Sub Tree == +js_var Decl: s= template-literal: NULL,arr[0] Add 1,"/",arr[1] Add 1,"/",arr[2] Add 1 +== Sub Tree == +console.log(s) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals8.ts b/src/MapleFE/test/typescript/unit_tests/template-literals8.ts new file mode 100644 index 0000000000000000000000000000000000000000..e5dbf4db9cf53febe8cba505acdecf59f4459d32 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals8.ts @@ -0,0 +1,5 @@ +function func() { + return `${{}}`; +} + +console.log(func()); diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals8.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals8.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6f3e6604c048c3440cee9f6354000a535a841b02 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals8.ts.result @@ -0,0 +1,10 @@ +Matched 9 tokens. +Matched 18 tokens. +Matched 20 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + return template-literal: NULL, {} + +== Sub Tree == +console.log(func()) diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals9.ts b/src/MapleFE/test/typescript/unit_tests/template-literals9.ts new file mode 100644 index 0000000000000000000000000000000000000000..723fb535f8673d9c1cc854db4c6df38a90b24340 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals9.ts @@ -0,0 +1,3 @@ +function func(name: string) { + return Function("target", `${"try {\n" + " target."}${name}();\n` + `}`); +} diff --git a/src/MapleFE/test/typescript/unit_tests/template-literals9.ts.result b/src/MapleFE/test/typescript/unit_tests/template-literals9.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ebe9d1b700acf828762510171e4a15bd80e330dd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/template-literals9.ts.result @@ -0,0 +1,8 @@ +Matched 19 tokens. +Matched 22 tokens. +Matched 23 tokens. +============= Module =========== +== Sub Tree == +func func(name) throws: + return Function("target", template-literal: NULL,"try {\n" Add " target.",NULL,name,"();\n",NULL Add template-literal: "}",NULL) + diff --git a/src/MapleFE/test/typescript/unit_tests/ternary-op-1.ts b/src/MapleFE/test/typescript/unit_tests/ternary-op-1.ts new file mode 100644 index 0000000000000000000000000000000000000000..c8c730a03b379f30c2a94db988402c5af2634c62 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ternary-op-1.ts @@ -0,0 +1,8 @@ +function func(x: number): number { + return (x > 10 ? x : x * x) ? x * 2 : x * 3; +} + +let r = func(12); +console.log(r); +r = func(3); +console.log(r); diff --git a/src/MapleFE/test/typescript/unit_tests/ternary-op-1.ts.result b/src/MapleFE/test/typescript/unit_tests/ternary-op-1.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7890ae20aab32b63d32e01246dcacad6c25ce37d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ternary-op-1.ts.result @@ -0,0 +1,18 @@ +Matched 32 tokens. +Matched 40 tokens. +Matched 47 tokens. +Matched 54 tokens. +Matched 61 tokens. +============= Module =========== +== Sub Tree == +func func(x) throws: + return x GT 10 ? x : x Mul x ? x Mul 2 : x Mul 3 + +== Sub Tree == +js_let Decl: r=func(12) +== Sub Tree == +console.log(r) +== Sub Tree == +r Assign func(3) +== Sub Tree == +console.log(r) diff --git a/src/MapleFE/test/typescript/unit_tests/ternary-op.ts b/src/MapleFE/test/typescript/unit_tests/ternary-op.ts new file mode 100644 index 0000000000000000000000000000000000000000..96a416388641155447c5ac7cce9cc819540216aa --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ternary-op.ts @@ -0,0 +1,8 @@ +function func(x: number): number { + return x > 10 ? x : x * x; +} + +let r = func(12); +console.log(r); +r = func(3); +console.log(r); diff --git a/src/MapleFE/test/typescript/unit_tests/ternary-op.ts.result b/src/MapleFE/test/typescript/unit_tests/ternary-op.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2c97387863fe9be8681cf371febd97e63997442e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ternary-op.ts.result @@ -0,0 +1,18 @@ +Matched 22 tokens. +Matched 30 tokens. +Matched 37 tokens. +Matched 44 tokens. +Matched 51 tokens. +============= Module =========== +== Sub Tree == +func func(x) throws: + return x GT 10 ? x : x Mul x + +== Sub Tree == +js_let Decl: r=func(12) +== Sub Tree == +console.log(r) +== Sub Tree == +r Assign func(3) +== Sub Tree == +console.log(r) diff --git a/src/MapleFE/test/typescript/unit_tests/this-as-any.ts b/src/MapleFE/test/typescript/unit_tests/this-as-any.ts new file mode 100644 index 0000000000000000000000000000000000000000..5eb26e6910a2dba57f7f0a2847cae53661171d29 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/this-as-any.ts @@ -0,0 +1,23 @@ +class Car { + private _make: string; + constructor(make: string) { + this._make = make; + } + public getMake(): string { + return this._make; + } +} + +class Model extends Car { + private _model?: string; + constructor(make: string, model: string) { + super(make); + (this as any)._model = model; + } + public getModel(): string { + return this._model!; + } +} + +let passat: Model = new Model("VW", "Passat"); +console.log(passat.getMake(), passat.getModel()); diff --git a/src/MapleFE/test/typescript/unit_tests/this-as-any.ts.result b/src/MapleFE/test/typescript/unit_tests/this-as-any.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..948cd8e6b5e2334da5cf0f8bf4b014c2e07cd41d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/this-as-any.ts.result @@ -0,0 +1,38 @@ +Matched 36 tokens. +Matched 89 tokens. +Matched 102 tokens. +Matched 119 tokens. +============= Module =========== +== Sub Tree == +class Car + Fields: + _make + Instance Initializer: + Constructors: + constructor (make) throws: + this._make Assign make + Methods: + func getMake() throws: + return this._make + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Model + Fields: + _model? + Instance Initializer: + Constructors: + constructor (make,model) throws: + super(make) + this._model Assign model + Methods: + func getModel() throws: + return this._model! + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: passat=new Model("VW","Passat") +== Sub Tree == +console.log(passat.getMake(),passat.getModel()) diff --git a/src/MapleFE/test/typescript/unit_tests/this-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/this-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..8e35935ccc41c875c6d85539a7476d87692ea293 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/this-as-prop-name.ts @@ -0,0 +1,7 @@ +interface IFace { + this: Object; +}; + +var obj: IFace = { this: {name: "Name"} }; +console.log(obj); +console.log(obj["this"]); diff --git a/src/MapleFE/test/typescript/unit_tests/this-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/this-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..cf25f818b82006fb4e16b6dc769cedd2d8c78590 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/this-as-prop-name.ts.result @@ -0,0 +1,14 @@ +Matched 8 tokens. +Matched 9 tokens. +Matched 24 tokens. +Matched 31 tokens. +Matched 41 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {this } +== Sub Tree == +js_var Decl: obj= {this: {name:"Name"}} +== Sub Tree == +console.log(obj) +== Sub Tree == +console.log(obj["this"]) diff --git a/src/MapleFE/test/typescript/unit_tests/this-parameter.ts b/src/MapleFE/test/typescript/unit_tests/this-parameter.ts new file mode 100644 index 0000000000000000000000000000000000000000..909c2a101c0ce74fd7e57827a893d14b6f7f658b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/this-parameter.ts @@ -0,0 +1,26 @@ +class Klass { + [key: string]: Function | number; +} + +var func = (obj: Klass) => { + const desc = Object.getOwnPropertyDescriptor(obj, "f"); + console.log(desc); + if (typeof desc!.value === "function") { + const v = desc!.value; + obj["f"] = function (this) { + console.log("Calling the new function"); + return v.call(this, ...arguments); + }; + } +}; + +var o = { + n: 123, + f: function () { + console.log("Calling f()"); + return this; + }, +}; + +func(o); +console.log(o.f()); diff --git a/src/MapleFE/test/typescript/unit_tests/this-parameter.ts.result b/src/MapleFE/test/typescript/unit_tests/this-parameter.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..397e70c685d2143e702fd7d88f347e71ef5909c5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/this-parameter.ts.result @@ -0,0 +1,38 @@ +Matched 14 tokens. +Matched 95 tokens. +Matched 123 tokens. +Matched 128 tokens. +Matched 139 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + union = Function | number + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: func=(obj) -> js_const Decl: desc=Object.getOwnPropertyDescriptor(obj,"f") +console.log(desc) +cond-branch cond: typeof desc!.value StEq "function" +true branch : + js_const Decl: v=desc!.value + obj["f"] Assign func (this) throws: + console.log("Calling the new function") + return v.call(this,...arguments) + +false branch : + + +== Sub Tree == +js_var Decl: o= {n:123, f:func () throws: + console.log("Calling f()") + return this +} +== Sub Tree == +func(o) +== Sub Tree == +console.log(o.f()) diff --git a/src/MapleFE/test/typescript/unit_tests/three-dim-array.ts b/src/MapleFE/test/typescript/unit_tests/three-dim-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..ef80ea4042f85508a7f9b4a9d42848bf60caa227 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/three-dim-array.ts @@ -0,0 +1,6 @@ +class Klass { + n: number[][][] = [[[3]]]; +} + +var c = new Klass(); +console.log(c); diff --git a/src/MapleFE/test/typescript/unit_tests/three-dim-array.ts.result b/src/MapleFE/test/typescript/unit_tests/three-dim-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..2091e6f5a3cdd08dd9b15e1b7018b250c0cae068 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/three-dim-array.ts.result @@ -0,0 +1,18 @@ +Matched 22 tokens. +Matched 30 tokens. +Matched 37 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + n=[[[3]]] + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: c=new Klass() +== Sub Tree == +console.log(c) diff --git a/src/MapleFE/test/typescript/unit_tests/ti_call.ts b/src/MapleFE/test/typescript/unit_tests/ti_call.ts new file mode 100644 index 0000000000000000000000000000000000000000..c4f2d88d11bbae6e7bbcea747deae6495996f5ac --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ti_call.ts @@ -0,0 +1,5 @@ +function bar(value: number): number { + return value; +} + +bar(13); diff --git a/src/MapleFE/test/typescript/unit_tests/ti_call.ts.result b/src/MapleFE/test/typescript/unit_tests/ti_call.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7c335d8840e91417f3ba2277d8b755c650768431 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ti_call.ts.result @@ -0,0 +1,9 @@ +Matched 14 tokens. +Matched 19 tokens. +============= Module =========== +== Sub Tree == +func bar(value) throws: + return value + +== Sub Tree == +bar(13) diff --git a/src/MapleFE/test/typescript/unit_tests/ti_call2.ts b/src/MapleFE/test/typescript/unit_tests/ti_call2.ts new file mode 100644 index 0000000000000000000000000000000000000000..61245bfe40820426ebbf6e88e1be5d27b62123f2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ti_call2.ts @@ -0,0 +1,6 @@ +function bar(value: number): number { + return value; +} + +bar(13); +bar(13.9); diff --git a/src/MapleFE/test/typescript/unit_tests/ti_call2.ts.result b/src/MapleFE/test/typescript/unit_tests/ti_call2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e07fd001e55edc36c077d8cdb6082de6a70e0040 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ti_call2.ts.result @@ -0,0 +1,12 @@ +Matched 14 tokens. +Matched 19 tokens. +Matched 24 tokens. +============= Module =========== +== Sub Tree == +func bar(value) throws: + return value + +== Sub Tree == +bar(13) +== Sub Tree == +bar(13.9) diff --git a/src/MapleFE/test/typescript/unit_tests/ti_call_array.ts b/src/MapleFE/test/typescript/unit_tests/ti_call_array.ts new file mode 100644 index 0000000000000000000000000000000000000000..099c7511c64003dd5cae9c4db22ff849da00fc7a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ti_call_array.ts @@ -0,0 +1,6 @@ +function foo(array: number[], value: number): number { + return 0; +} + +var arr: number[] = [13, 21]; +foo(arr, 13); diff --git a/src/MapleFE/test/typescript/unit_tests/ti_call_array.ts.result b/src/MapleFE/test/typescript/unit_tests/ti_call_array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3d9b30e817b14a4dd300e064b91cc5b543403e76 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ti_call_array.ts.result @@ -0,0 +1,12 @@ +Matched 20 tokens. +Matched 33 tokens. +Matched 40 tokens. +============= Module =========== +== Sub Tree == +func foo(array,value) throws: + return 0 + +== Sub Tree == +js_var Decl: arr=[13,21] +== Sub Tree == +foo(arr,13) diff --git a/src/MapleFE/test/typescript/unit_tests/ti_call_array2.ts b/src/MapleFE/test/typescript/unit_tests/ti_call_array2.ts new file mode 100644 index 0000000000000000000000000000000000000000..7be49ffe3f4975c94b9d08253a84b337e6fb8c20 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ti_call_array2.ts @@ -0,0 +1,8 @@ +function foo(array: number[], value: number): number { + return 0; +} + +var arr: number[] = [13, 21]; +foo(arr, 13); +var arrf: number[] = [13.5, 21.5]; +foo(arrf, 13.5); diff --git a/src/MapleFE/test/typescript/unit_tests/ti_call_array2.ts.result b/src/MapleFE/test/typescript/unit_tests/ti_call_array2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..21a6c20b1bd8626041366302837a0538123acfcc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ti_call_array2.ts.result @@ -0,0 +1,18 @@ +Matched 20 tokens. +Matched 33 tokens. +Matched 40 tokens. +Matched 53 tokens. +Matched 60 tokens. +============= Module =========== +== Sub Tree == +func foo(array,value) throws: + return 0 + +== Sub Tree == +js_var Decl: arr=[13,21] +== Sub Tree == +foo(arr,13) +== Sub Tree == +js_var Decl: arrf=[13.5,21.5] +== Sub Tree == +foo(arrf,13.5) diff --git a/src/MapleFE/test/typescript/unit_tests/ti_export_func.ts b/src/MapleFE/test/typescript/unit_tests/ti_export_func.ts new file mode 100644 index 0000000000000000000000000000000000000000..c5de99ce61e765984fc2646a2c084eb56f3cabb1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ti_export_func.ts @@ -0,0 +1,5 @@ +export function bar(value: number): number { + return value; +} + +bar(13); diff --git a/src/MapleFE/test/typescript/unit_tests/ti_export_func.ts.result b/src/MapleFE/test/typescript/unit_tests/ti_export_func.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8cad8e66135ff3ac57ead29d7e64239c9e3d9603 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ti_export_func.ts.result @@ -0,0 +1,9 @@ +Matched 15 tokens. +Matched 20 tokens. +============= Module =========== +== Sub Tree == +export {func bar(value) throws: + return value +} +== Sub Tree == +bar(13) diff --git a/src/MapleFE/test/typescript/unit_tests/trailing-commas.ts b/src/MapleFE/test/typescript/unit_tests/trailing-commas.ts new file mode 100644 index 0000000000000000000000000000000000000000..aea4967b6dbb44b774e9d60d13cfe4fc47f6af46 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/trailing-commas.ts @@ -0,0 +1,15 @@ +export { a, o, func, u as U, v as V }; + +var a: number[] = [1, 2, 3]; +console.log(a.length); + +var o: Object = { x: 1, y: "abc" }; +console.log(o); + +function func(n: number): number { + return n * n; +} +console.log(func(10)); + +var [u, v] = a; +console.log(u, v); diff --git a/src/MapleFE/test/typescript/unit_tests/trailing-commas.ts.result b/src/MapleFE/test/typescript/unit_tests/trailing-commas.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..019848c0dd0af7b13854c195a814aa40e67ba0b0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/trailing-commas.ts.result @@ -0,0 +1,30 @@ +Matched 17 tokens. +Matched 32 tokens. +Matched 41 tokens. +Matched 56 tokens. +Matched 63 tokens. +Matched 79 tokens. +Matched 89 tokens. +Matched 98 tokens. +Matched 107 tokens. +============= Module =========== +== Sub Tree == +export {a,o,func,u as U,v as V} +== Sub Tree == +js_var Decl: a=[1,2,3] +== Sub Tree == +console.log(a.length) +== Sub Tree == +js_var Decl: o= {x:1, y:"abc"} +== Sub Tree == +console.log(o) +== Sub Tree == +func func(n) throws: + return n Mul n + +== Sub Tree == +console.log(func(10)) +== Sub Tree == +js_var Decl: {:u, :v} +== Sub Tree == +console.log(u,v) diff --git a/src/MapleFE/test/typescript/unit_tests/trailing-commas2.ts b/src/MapleFE/test/typescript/unit_tests/trailing-commas2.ts new file mode 100644 index 0000000000000000000000000000000000000000..6003c285d2efcf74774e250b16a847c43f8a44e3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/trailing-commas2.ts @@ -0,0 +1,9 @@ +class Klass { + n: T | undefined = undefined; +} + +function func(x = 0.5, y: Klass = { n: 3 }): number { + return x + y.n!; +} + +console.log(func()); diff --git a/src/MapleFE/test/typescript/unit_tests/trailing-commas2.ts.result b/src/MapleFE/test/typescript/unit_tests/trailing-commas2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..905afb4210f3344d535611006e77fc03ebd62cd7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/trailing-commas2.ts.result @@ -0,0 +1,20 @@ +Matched 15 tokens. +Matched 47 tokens. +Matched 56 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + n=undefined + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func(x=0.5,y= {n:3}) throws: + return x Add y.n! + +== Sub Tree == +console.log(func()) diff --git a/src/MapleFE/test/typescript/unit_tests/trailing-commas3.ts b/src/MapleFE/test/typescript/unit_tests/trailing-commas3.ts new file mode 100644 index 0000000000000000000000000000000000000000..b84579b57c617df71bc019a5a80481b049da4a63 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/trailing-commas3.ts @@ -0,0 +1,7 @@ +class Klass {} +function func(m?: Klass | number) { + return m; +} + +var obj: Klass = new Klass(); +console.log(func(obj)); diff --git a/src/MapleFE/test/typescript/unit_tests/trailing-commas3.ts.result b/src/MapleFE/test/typescript/unit_tests/trailing-commas3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..78af9b55080e024e39e4490864f098e0c3f8efd9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/trailing-commas3.ts.result @@ -0,0 +1,23 @@ +Matched 4 tokens. +Matched 19 tokens. +Matched 29 tokens. +Matched 39 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func(m?) throws: + return m + +== Sub Tree == +js_var Decl: obj=new Klass() +== Sub Tree == +console.log(func(obj)) diff --git a/src/MapleFE/test/typescript/unit_tests/trailing-commas4.ts b/src/MapleFE/test/typescript/unit_tests/trailing-commas4.ts new file mode 100644 index 0000000000000000000000000000000000000000..cf7481284cabbd766f05027ed46f64236855227b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/trailing-commas4.ts @@ -0,0 +1,7 @@ +var obj = { + x: false, +}; +var obj2 = { + x: false, +}; +console.log(obj, obj2); diff --git a/src/MapleFE/test/typescript/unit_tests/trailing-commas4.ts.result b/src/MapleFE/test/typescript/unit_tests/trailing-commas4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0c861a083e6c5d23c4eb4c1961677354017873bb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/trailing-commas4.ts.result @@ -0,0 +1,10 @@ +Matched 10 tokens. +Matched 20 tokens. +Matched 29 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: obj= {x:false} +== Sub Tree == +js_var Decl: obj2= {x:false} +== Sub Tree == +console.log(obj,obj2) diff --git a/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts b/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts new file mode 100644 index 0000000000000000000000000000000000000000..29e5a31ca3b3a3ba94f49fbc89206d3bd06ec68c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts @@ -0,0 +1,10 @@ +enum ET { + TOP = "top", /// < top string + BOTTOM = "bottom", ///< bottom string +} + +/// +/// + +let et = ET.TOP; +console.log(et); diff --git a/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts.result b/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e4afff30c02415e10dfeab2420a63675b1286d5d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/triple-slash-comment.ts.result @@ -0,0 +1,10 @@ +Matched 12 tokens. +Matched 19 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +ts_enum: ET {TOP="top";BOTTOM="bottom" } +== Sub Tree == +js_let Decl: et=ET.TOP +== Sub Tree == +console.log(et) diff --git a/src/MapleFE/test/typescript/unit_tests/triple-slash-dir.d.ts b/src/MapleFE/test/typescript/unit_tests/triple-slash-dir.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..c083b3bd4df8491a83d938882dca65783f30fa18 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/triple-slash-dir.d.ts @@ -0,0 +1,2 @@ +/// +/// diff --git a/src/MapleFE/test/typescript/unit_tests/triple-slash-dir.d.ts.result b/src/MapleFE/test/typescript/unit_tests/triple-slash-dir.d.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0e74c743cd4c0c3b70270bc58d388ad13603002f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/triple-slash-dir.d.ts.result @@ -0,0 +1,7 @@ +Matched 8 tokens. +Matched 16 tokens. +============= Module =========== +== Sub Tree == +trip-slash reference no-default-lib = "true" +== Sub Tree == +trip-slash reference lib = "es5" diff --git a/src/MapleFE/test/typescript/unit_tests/try-catch.ts b/src/MapleFE/test/typescript/unit_tests/try-catch.ts new file mode 100644 index 0000000000000000000000000000000000000000..25c2b3928a413450936c5b1e2ee40933c7d0a07f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/try-catch.ts @@ -0,0 +1,10 @@ +for (var i = 3; i >= 0; --i) { + try { + if (i == 0) throw new Error("Zero"); + console.log("try", 3 / i); + } catch (err) { + console.log("catch", err); + } finally { + console.log("finally", i); + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/try-catch.ts.result b/src/MapleFE/test/typescript/unit_tests/try-catch.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..619a4107820fac62047cc6d3f43f2881d4552701 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/try-catch.ts.result @@ -0,0 +1,14 @@ +Matched 70 tokens. +============= Module =========== +== Sub Tree == +for ( ) + try cond-branch cond:i EQ 0 + true branch : + throw new Error("Zero") + false branch : + + console.log("try",3 Div i) + catch() console.log("catch",err) + finally console.log("finally",i) + + diff --git a/src/MapleFE/test/typescript/unit_tests/try-catch2.ts b/src/MapleFE/test/typescript/unit_tests/try-catch2.ts new file mode 100644 index 0000000000000000000000000000000000000000..8e1f52c7ac07882bd5d0c33f2110ce5974cdcfd9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/try-catch2.ts @@ -0,0 +1,17 @@ +class Klass { + str: string = "no"; +} +function func(obj: Klass) { + if (obj.str === "yes") { + return obj; + } else { + try { + throw "Exception"; + } catch (e: unknown) { + console.log(e); + return obj; + } + } +} + +console.log(func(new Klass())); diff --git a/src/MapleFE/test/typescript/unit_tests/try-catch2.ts.result b/src/MapleFE/test/typescript/unit_tests/try-catch2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7ba89ee85c6afd8dba50bf6aca8355ccc46ff421 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/try-catch2.ts.result @@ -0,0 +1,29 @@ +Matched 10 tokens. +Matched 59 tokens. +Matched 72 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + str="no" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +func func(obj) throws: + cond-branch cond:obj.str StEq "yes" + true branch : + return obj + false branch : + try throw "Exception" + + catch() console.log(e) + return obj + + + +== Sub Tree == +console.log(func(new Klass())) diff --git a/src/MapleFE/test/typescript/unit_tests/try-catch3.ts b/src/MapleFE/test/typescript/unit_tests/try-catch3.ts new file mode 100644 index 0000000000000000000000000000000000000000..a2e76318bc62f93506435685ac50fa02826aa276 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/try-catch3.ts @@ -0,0 +1,7 @@ +for (var i = 3; i >= 0; --i) { + try { + if (i == 0) throw new Error("Zero"); + console.log("try", 3 / i); + } catch { + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/try-catch3.ts.result b/src/MapleFE/test/typescript/unit_tests/try-catch3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..49ac9127a4893cb5c77f4947eb002ba7c50f5785 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/try-catch3.ts.result @@ -0,0 +1,12 @@ +Matched 46 tokens. +============= Module =========== +== Sub Tree == +for ( ) + try cond-branch cond:i EQ 0 + true branch : + throw new Error("Zero") + false branch : + + console.log("try",3 Div i) + catch() + diff --git a/src/MapleFE/test/typescript/unit_tests/ts-maplike.d.ts b/src/MapleFE/test/typescript/unit_tests/ts-maplike.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..7b447e47157c5e18fc2d28050d07616f9ec8c827 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ts-maplike.d.ts @@ -0,0 +1 @@ +export declare const map: (m: import("./export-interface").MapLike) => string; diff --git a/src/MapleFE/test/typescript/unit_tests/ts-maplike.d.ts.result b/src/MapleFE/test/typescript/unit_tests/ts-maplike.d.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..d25ca12a57a2ebd394b583994396179135a9cd52 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/ts-maplike.d.ts.result @@ -0,0 +1,4 @@ +Matched 23 tokens. +============= Module =========== +== Sub Tree == +export {declare js_const Decl: map} diff --git a/src/MapleFE/test/typescript/unit_tests/tuple-type.ts b/src/MapleFE/test/typescript/unit_tests/tuple-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..1dbec1d7edc0790d2253797478554d7615fdea60 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/tuple-type.ts @@ -0,0 +1,6 @@ +function func(t: Function | [Function] | any): string { + return typeof t; +} + +var f: Function = func; +console.log(func(f)); diff --git a/src/MapleFE/test/typescript/unit_tests/tuple-type.ts.result b/src/MapleFE/test/typescript/unit_tests/tuple-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f3c4b75bb345ff34d1017b1f23a3b0c98465722f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/tuple-type.ts.result @@ -0,0 +1,12 @@ +Matched 21 tokens. +Matched 28 tokens. +Matched 38 tokens. +============= Module =========== +== Sub Tree == +func func(t) throws: + return typeof t + +== Sub Tree == +js_var Decl: f=func +== Sub Tree == +console.log(func(f)) diff --git a/src/MapleFE/test/typescript/unit_tests/tuple-type2.ts b/src/MapleFE/test/typescript/unit_tests/tuple-type2.ts new file mode 100644 index 0000000000000000000000000000000000000000..ccbb98b125039c8b6efef21c98a39d20177d0240 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/tuple-type2.ts @@ -0,0 +1,2 @@ +const x: Record = {}; +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/tuple-type2.ts.result b/src/MapleFE/test/typescript/unit_tests/tuple-type2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6ca8e406818f231dad3ad132cac6e2e21bd3fa0b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/tuple-type2.ts.result @@ -0,0 +1,7 @@ +Matched 17 tokens. +Matched 24 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: x= {} +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/two-dim-array.ts b/src/MapleFE/test/typescript/unit_tests/two-dim-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..b89e718d091878d176728582eec14bed4a5a9b7c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/two-dim-array.ts @@ -0,0 +1,7 @@ +var arr: number[][] = [ + [1, 2, 3], + [4, 5, 6], +]; +console.log(arr[0][1]); +console.log(arr[1][0]); +console.log(arr); diff --git a/src/MapleFE/test/typescript/unit_tests/two-dim-array.ts.result b/src/MapleFE/test/typescript/unit_tests/two-dim-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e0f27cff73934a255016676bb7cdc43fc69dab19 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/two-dim-array.ts.result @@ -0,0 +1,13 @@ +Matched 28 tokens. +Matched 41 tokens. +Matched 54 tokens. +Matched 61 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: arr=[[1,2,3],[4,5,6]] +== Sub Tree == +console.log(arr[0][1]) +== Sub Tree == +console.log(arr[1][0]) +== Sub Tree == +console.log(arr) diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias.ts b/src/MapleFE/test/typescript/unit_tests/type-alias.ts new file mode 100644 index 0000000000000000000000000000000000000000..d632a4656fad6d9f448fd3cad8539deb4385888a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias.ts @@ -0,0 +1,7 @@ +type Foo = { name: "foo" }; +type Bar = { name: "bar" }; +type Tee = Foo | Bar; + +var f: Tee = { name: "foo" }; +var b: Tee = { name: "bar" }; +console.log(f, b); diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias.ts.result b/src/MapleFE/test/typescript/unit_tests/type-alias.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3c1b4194ed51d235c91a2ebb14423e508a751671 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias.ts.result @@ -0,0 +1,19 @@ +Matched 9 tokens. +Matched 18 tokens. +Matched 25 tokens. +Matched 36 tokens. +Matched 47 tokens. +Matched 56 tokens. +============= Module =========== +== Sub Tree == + type Foo = {name } +== Sub Tree == + type Bar = {name } +== Sub Tree == + type Tee = union = Foo | Bar +== Sub Tree == +js_var Decl: f= {name:"foo"} +== Sub Tree == +js_var Decl: b= {name:"bar"} +== Sub Tree == +console.log(f,b) diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias2.ts b/src/MapleFE/test/typescript/unit_tests/type-alias2.ts new file mode 100644 index 0000000000000000000000000000000000000000..1111d0624f8bc92dfa03036c33c4ddef2ffacf75 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias2.ts @@ -0,0 +1,4 @@ +type UT = { + base: T; + ext: T extends ReadonlyArray ? UT : T; +}[I extends -1 ? "base" : "ext"]; diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias2.ts.result b/src/MapleFE/test/typescript/unit_tests/type-alias2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3f929a59f616245895ca6fe1737ccab5d9c0c277 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias2.ts.result @@ -0,0 +1,4 @@ +Matched 56 tokens. +============= Module =========== +== Sub Tree == + type UT = {base;ext }[I extends -1 ? "base" : "ext"] diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias3.ts b/src/MapleFE/test/typescript/unit_tests/type-alias3.ts new file mode 100644 index 0000000000000000000000000000000000000000..0dd747577fcbda545425b5f5102f2a4a926eeaf2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias3.ts @@ -0,0 +1,8 @@ +type Default = "default"; +type DefaultUnknown = "unknown"; + +type MyType = { + [K: string]: typeof K extends Default ? string : + typeof K extends DefaultUnknown ? unknown : never +}; + diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias3.ts.result b/src/MapleFE/test/typescript/unit_tests/type-alias3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b0d79f9a442e6c4dec640eac1b53bfc9a762a5d4 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias3.ts.result @@ -0,0 +1,10 @@ +Matched 5 tokens. +Matched 10 tokens. +Matched 37 tokens. +============= Module =========== +== Sub Tree == + type Default = "default" +== Sub Tree == + type DefaultUnknown = "unknown" +== Sub Tree == + type MyType = {string index type: typeof K extends Default ? string : typeof K extends DefaultUnknown ? unknown : never } diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias4.ts b/src/MapleFE/test/typescript/unit_tests/type-alias4.ts new file mode 100644 index 0000000000000000000000000000000000000000..98d356dc0ee7449aaa685389e05929284ee0cefc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias4.ts @@ -0,0 +1,2 @@ +type MyType = T extends string ? T : string; +type UT = MyType extends string ? string : number; diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias4.ts.result b/src/MapleFE/test/typescript/unit_tests/type-alias4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..d4a6dbad714c0183de803f17d3e2eb3135e4b5d8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias4.ts.result @@ -0,0 +1,7 @@ +Matched 14 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == + type MyType = T extends string ? T : string +== Sub Tree == + type UT = MyType extends string ? string : number diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias5.ts b/src/MapleFE/test/typescript/unit_tests/type-alias5.ts new file mode 100644 index 0000000000000000000000000000000000000000..3d5a7e8727089856075d67170b4b96036690995b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias5.ts @@ -0,0 +1 @@ +type U = T extends unknown ? unknown extends T ? T extends true ? true : false : false : false; diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias5.ts.result b/src/MapleFE/test/typescript/unit_tests/type-alias5.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5541ed585ab913028dc7444372a12fae127dce32 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias5.ts.result @@ -0,0 +1,4 @@ +Matched 26 tokens. +============= Module =========== +== Sub Tree == + type U = T extends unknown ? unknown extends T ? T extends true ? true : false : false : false diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias6.ts b/src/MapleFE/test/typescript/unit_tests/type-alias6.ts new file mode 100644 index 0000000000000000000000000000000000000000..0dbe94b9a61ff87efe7c7def6a28ff46a693b517 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias6.ts @@ -0,0 +1,3 @@ +type RemoveFirstType = + T['length'] extends 0 ? undefined : + (((...b: T) => void) extends (a: any, ...b: infer I) => void ? I : []) diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias6.ts.result b/src/MapleFE/test/typescript/unit_tests/type-alias6.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..d0bda51d72b5ecc93698426cf5fd96c8f36235f5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias6.ts.result @@ -0,0 +1,4 @@ +Matched 50 tokens. +============= Module =========== +== Sub Tree == + type RemoveFirstType = T["length"] extends 0 ? undefined : (b) -> extends (a,b) -> ? I : prim array-TBD diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias7.ts b/src/MapleFE/test/typescript/unit_tests/type-alias7.ts new file mode 100644 index 0000000000000000000000000000000000000000..c1ea47795e0e05bc6074a897d1cac15f05df0a51 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias7.ts @@ -0,0 +1,6 @@ +class Klass { + value: number = 0; + key: string = ""; +} + +type MyType= Array>; diff --git a/src/MapleFE/test/typescript/unit_tests/type-alias7.ts.result b/src/MapleFE/test/typescript/unit_tests/type-alias7.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..20e1da6916ecde0509ef457e5cb217444e5dd219 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-alias7.ts.result @@ -0,0 +1,15 @@ +Matched 16 tokens. +Matched 31 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + value=0 key="" + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == + type MyType = Array> diff --git a/src/MapleFE/test/typescript/unit_tests/type-assertion-as.ts b/src/MapleFE/test/typescript/unit_tests/type-assertion-as.ts new file mode 100644 index 0000000000000000000000000000000000000000..3d35b9c66907e1e9935c79e0e180fdff24bff1d8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-assertion-as.ts @@ -0,0 +1,2 @@ +let code: any = 123; +let employeeCode = code as number; diff --git a/src/MapleFE/test/typescript/unit_tests/type-assertion-as.ts.result b/src/MapleFE/test/typescript/unit_tests/type-assertion-as.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..83ded8c5176f2f819c11eb07f6731688b3aa1fee --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-assertion-as.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 14 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: code=123 +== Sub Tree == +js_let Decl: employeeCode=code diff --git a/src/MapleFE/test/typescript/unit_tests/type-assertion-as2.ts b/src/MapleFE/test/typescript/unit_tests/type-assertion-as2.ts new file mode 100644 index 0000000000000000000000000000000000000000..9e585d7121f97f6462f9097dfc3096aafcebbb5f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-assertion-as2.ts @@ -0,0 +1,6 @@ +function func(e: T) : Array { + return [e]; +} +function foo(arg: any) { + return func(arg)[0] as T; +} diff --git a/src/MapleFE/test/typescript/unit_tests/type-assertion-as2.ts.result b/src/MapleFE/test/typescript/unit_tests/type-assertion-as2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8a4fb1dbd6372aeaf04f7e7c1ec3ef0faf3ede99 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-assertion-as2.ts.result @@ -0,0 +1,11 @@ +Matched 22 tokens. +Matched 48 tokens. +============= Module =========== +== Sub Tree == +func func(e) throws: + return [e] + +== Sub Tree == +func foo(arg) throws: + return func(arg)[0] + diff --git a/src/MapleFE/test/typescript/unit_tests/type-assertion-retval.ts b/src/MapleFE/test/typescript/unit_tests/type-assertion-retval.ts new file mode 100644 index 0000000000000000000000000000000000000000..7e94c7c6c291de0b644044d7a6f185b778835176 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-assertion-retval.ts @@ -0,0 +1,7 @@ +class Foo { + public func() { + return 10; + } +} +let obj = new Foo(); +let employeeCode = obj.func() as number; diff --git a/src/MapleFE/test/typescript/unit_tests/type-assertion-retval.ts.result b/src/MapleFE/test/typescript/unit_tests/type-assertion-retval.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ba9e0c2d31c761e58b928187bef6ea8ce49b6077 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-assertion-retval.ts.result @@ -0,0 +1,20 @@ +Matched 13 tokens. +Matched 21 tokens. +Matched 32 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + + Instance Initializer: + Constructors: + Methods: + func func() throws: + return 10 + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_let Decl: obj=new Foo() +== Sub Tree == +js_let Decl: employeeCode=obj.func() diff --git a/src/MapleFE/test/typescript/unit_tests/type-assertion-this.ts b/src/MapleFE/test/typescript/unit_tests/type-assertion-this.ts new file mode 100644 index 0000000000000000000000000000000000000000..d7d0c97a5d0852b14ad172c9faf676cde041a287 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-assertion-this.ts @@ -0,0 +1,6 @@ +class Foo { + public _val: number = 0; + public func(): number { + return this._val as number; + } +} diff --git a/src/MapleFE/test/typescript/unit_tests/type-assertion-this.ts.result b/src/MapleFE/test/typescript/unit_tests/type-assertion-this.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..69d1ccc316f88a7d554168f5ec87bf9d1b9a6909 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-assertion-this.ts.result @@ -0,0 +1,14 @@ +Matched 26 tokens. +============= Module =========== +== Sub Tree == +class Foo + Fields: + _val=0 + Instance Initializer: + Constructors: + Methods: + func func() throws: + return this._val + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/type-cast.ts b/src/MapleFE/test/typescript/unit_tests/type-cast.ts new file mode 100644 index 0000000000000000000000000000000000000000..2bdac558b996636acc0fe27c56a77233a5788ee8 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-cast.ts @@ -0,0 +1,18 @@ +interface Base { + name: string; +} +interface Derived extends Base { + age: number; +} + +let o = { name: "John", age: 30 }; + +function dump(obj: Base) { + console.log(obj.name, (obj).age); +} +dump(o); + +function dump2(obj: Base) { + console.log(obj.name, (obj as Derived).age); +} +dump2(o); diff --git a/src/MapleFE/test/typescript/unit_tests/type-cast.ts.result b/src/MapleFE/test/typescript/unit_tests/type-cast.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b1e67b53bf15ff1e0587607fe54c0ff88972e57f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-cast.ts.result @@ -0,0 +1,26 @@ +Matched 8 tokens. +Matched 18 tokens. +Matched 31 tokens. +Matched 58 tokens. +Matched 63 tokens. +Matched 89 tokens. +Matched 94 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Base {name } +== Sub Tree == +ts_interface: Derived {age } +== Sub Tree == +js_let Decl: o= {name:"John", age:30} +== Sub Tree == +func dump(obj) throws: + console.log(obj.name,(Derived)obj.age) + +== Sub Tree == +dump(o) +== Sub Tree == +func dump2(obj) throws: + console.log(obj.name,obj.age) + +== Sub Tree == +dump2(o) diff --git a/src/MapleFE/test/typescript/unit_tests/type-only-export.ts b/src/MapleFE/test/typescript/unit_tests/type-only-export.ts new file mode 100644 index 0000000000000000000000000000000000000000..9152f5d448eedbe119eed47461f71eb880d5403e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-only-export.ts @@ -0,0 +1,4 @@ +class Klass { + n: number = 0; +} +export type K = Klass; diff --git a/src/MapleFE/test/typescript/unit_tests/type-only-export.ts.result b/src/MapleFE/test/typescript/unit_tests/type-only-export.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a81448254ce65483220fbf223428b10515c71b0d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-only-export.ts.result @@ -0,0 +1,15 @@ +Matched 10 tokens. +Matched 16 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + n=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +export { type K = Klass} diff --git a/src/MapleFE/test/typescript/unit_tests/type-only-import.ts b/src/MapleFE/test/typescript/unit_tests/type-only-import.ts new file mode 100644 index 0000000000000000000000000000000000000000..9505ab8d55aebdf2a1ded8646778077bf4c6f55d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-only-import.ts @@ -0,0 +1,4 @@ +import type { K } from "./type-only-export"; + +var x: K = { n: 123 }; +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/type-only-import.ts.result b/src/MapleFE/test/typescript/unit_tests/type-only-import.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..389dcc86a93b459f9d207dde338115a4dd50b242 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-only-import.ts.result @@ -0,0 +1,10 @@ +Matched 8 tokens. +Matched 19 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +import {K} "./type-only-export" +== Sub Tree == +js_var Decl: x= {n:123} +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/type-predicate.ts b/src/MapleFE/test/typescript/unit_tests/type-predicate.ts new file mode 100644 index 0000000000000000000000000000000000000000..7c8df56a02bcbd316f51592e442f657ca333a564 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-predicate.ts @@ -0,0 +1,24 @@ +interface Foo { + foo: number; +} + +interface Bar { + bar: number; +} + +function isFoo(arg: any): arg is Foo { + return arg.foo !== undefined; +} + +// user defined type guard +function doStuff(arg: Foo | Bar) { + if (isFoo(arg)) { + console.log(arg.foo); + } + else { + console.log(arg.bar); + } +} + +doStuff({ foo: 123 }); +doStuff({ bar: 123 }); diff --git a/src/MapleFE/test/typescript/unit_tests/type-predicate.ts.result b/src/MapleFE/test/typescript/unit_tests/type-predicate.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e51159f7779d196b7b0be90db2c6dce2ab6fe59d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/type-predicate.ts.result @@ -0,0 +1,28 @@ +Matched 8 tokens. +Matched 16 tokens. +Matched 36 tokens. +Matched 77 tokens. +Matched 86 tokens. +Matched 95 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Foo {foo } +== Sub Tree == +ts_interface: Bar {bar } +== Sub Tree == +func isFoo(arg) throws: + return arg.foo StNe undefined + +== Sub Tree == +func doStuff(arg) throws: + cond-branch cond:isFoo(arg) + true branch : + console.log(arg.foo) + false branch : + console.log(arg.bar) + + +== Sub Tree == +doStuff( {foo:123}) +== Sub Tree == +doStuff( {bar:123}) diff --git a/src/MapleFE/test/typescript/unit_tests/typed-array.ts b/src/MapleFE/test/typescript/unit_tests/typed-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..e30281a9778cdc16822dbf57201c942448771833 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/typed-array.ts @@ -0,0 +1,49 @@ +let buf = new ArrayBuffer(16); +let bytes = new Uint8Array(buf); +for (let i = 0; i < bytes.length; i++) + bytes[i] = i * 15; + +console.log("\nInt8Array", Int8Array); +let Int8View = new Int8Array(buf); +for (let i = 0; i < Int8View.length; i++) + console.log(i, Int8View[i]); +console.log("\nUint8Array", Uint8Array); +let Uint8View = new Uint8Array(buf); +for (let i = 0; i < Uint8View.length; i++) + console.log(i, Uint8View[i]); +console.log("\nUint8ClampedArray", Uint8ClampedArray); +let Uint8ClampedView = new Uint8ClampedArray(buf); +for (let i = 0; i < Uint8ClampedView.length; i++) + console.log(i, Uint8ClampedView[i]); +console.log("\nInt16Array", Int16Array); +let Int16View = new Int16Array(buf); +for (let i = 0; i < Int16View.length; i++) + console.log(i, Int16View[i]); +console.log("\nUint16Array", Uint16Array); +let Uint16View = new Uint16Array(buf); +for (let i = 0; i < Uint16View.length; i++) + console.log(i, Uint16View[i]); +console.log("\nInt32Array", Int32Array); +let Int32View = new Int32Array(buf); +for (let i = 0; i < Int32View.length; i++) + console.log(i, Int32View[i]); +console.log("\nUint32Array", Uint32Array); +let Uint32View = new Uint32Array(buf); +for (let i = 0; i < Uint32View.length; i++) + console.log(i, Uint32View[i]); +console.log("\nFloat32Array", Float32Array); +let Float32View = new Float32Array(buf); +for (let i = 0; i < Float32View.length; i++) + console.log(i, Float32View[i]); +console.log("\nFloat64Array", Float64Array); +let Float64View = new Float64Array(buf); +for (let i = 0; i < Float64View.length; i++) + console.log(i, Float64View[i]); +// console.log("\nBigInt64Array", BigInt64Array); +// let BigInt64View = new BigInt64Array(buf); +// for (let i = 0; i < BigInt64View.length; i++) +// console.log(i, BigInt64View[i]); +// console.log("\nBigUint64Array", BigUint64Array); +// let BigUint64View = new BigUint64Array(buf); +// for (let i = 0; i < BigUint64View.length; i++) +// console.log(i, BigUint64View[i]); diff --git a/src/MapleFE/test/typescript/unit_tests/typed-array.ts.result b/src/MapleFE/test/typescript/unit_tests/typed-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..543fad1c4316b00ca8dca19dae69cea805a88a16 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/typed-array.ts.result @@ -0,0 +1,101 @@ +Matched 9 tokens. +Matched 18 tokens. +Matched 43 tokens. +Matched 52 tokens. +Matched 61 tokens. +Matched 89 tokens. +Matched 98 tokens. +Matched 107 tokens. +Matched 135 tokens. +Matched 144 tokens. +Matched 153 tokens. +Matched 181 tokens. +Matched 190 tokens. +Matched 199 tokens. +Matched 227 tokens. +Matched 236 tokens. +Matched 245 tokens. +Matched 273 tokens. +Matched 282 tokens. +Matched 291 tokens. +Matched 319 tokens. +Matched 328 tokens. +Matched 337 tokens. +Matched 365 tokens. +Matched 374 tokens. +Matched 383 tokens. +Matched 411 tokens. +Matched 420 tokens. +Matched 429 tokens. +Matched 457 tokens. +============= Module =========== +== Sub Tree == +js_let Decl: buf=new ArrayBuffer(16) +== Sub Tree == +js_let Decl: bytes=new Uint8Array(buf) +== Sub Tree == +for ( ) + bytes[i] Assign i Mul 15 +== Sub Tree == +console.log("\nInt8Array",Int8Array) +== Sub Tree == +js_let Decl: Int8View=new Int8Array(buf) +== Sub Tree == +for ( ) + console.log(i,Int8View[i]) +== Sub Tree == +console.log("\nUint8Array",Uint8Array) +== Sub Tree == +js_let Decl: Uint8View=new Uint8Array(buf) +== Sub Tree == +for ( ) + console.log(i,Uint8View[i]) +== Sub Tree == +console.log("\nUint8ClampedArray",Uint8ClampedArray) +== Sub Tree == +js_let Decl: Uint8ClampedView=new Uint8ClampedArray(buf) +== Sub Tree == +for ( ) + console.log(i,Uint8ClampedView[i]) +== Sub Tree == +console.log("\nInt16Array",Int16Array) +== Sub Tree == +js_let Decl: Int16View=new Int16Array(buf) +== Sub Tree == +for ( ) + console.log(i,Int16View[i]) +== Sub Tree == +console.log("\nUint16Array",Uint16Array) +== Sub Tree == +js_let Decl: Uint16View=new Uint16Array(buf) +== Sub Tree == +for ( ) + console.log(i,Uint16View[i]) +== Sub Tree == +console.log("\nInt32Array",Int32Array) +== Sub Tree == +js_let Decl: Int32View=new Int32Array(buf) +== Sub Tree == +for ( ) + console.log(i,Int32View[i]) +== Sub Tree == +console.log("\nUint32Array",Uint32Array) +== Sub Tree == +js_let Decl: Uint32View=new Uint32Array(buf) +== Sub Tree == +for ( ) + console.log(i,Uint32View[i]) +== Sub Tree == +console.log("\nFloat32Array",Float32Array) +== Sub Tree == +js_let Decl: Float32View=new Float32Array(buf) +== Sub Tree == +for ( ) + console.log(i,Float32View[i]) +== Sub Tree == +console.log("\nFloat64Array",Float64Array) +== Sub Tree == +js_let Decl: Float64View=new Float64Array(buf) +== Sub Tree == +for ( ) + console.log(i,Float64View[i]) diff --git a/src/MapleFE/test/typescript/unit_tests/typename-as-var.ts b/src/MapleFE/test/typescript/unit_tests/typename-as-var.ts new file mode 100644 index 0000000000000000000000000000000000000000..4aa37a9a22f356ec02a890abe03afbc5306c0ba7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/typename-as-var.ts @@ -0,0 +1,7 @@ +const boolean = Boolean; +var b = new boolean(false); +console.log(boolean, b); + +const string = String; +var s = new string("abc"); +console.log(string, s); diff --git a/src/MapleFE/test/typescript/unit_tests/typename-as-var.ts.result b/src/MapleFE/test/typescript/unit_tests/typename-as-var.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..b13ac1054fdcc3f0e2088ac0c47b328aa453ed9c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/typename-as-var.ts.result @@ -0,0 +1,19 @@ +Matched 5 tokens. +Matched 14 tokens. +Matched 23 tokens. +Matched 28 tokens. +Matched 37 tokens. +Matched 46 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: boolean=Boolean +== Sub Tree == +js_var Decl: b=new boolean(false) +== Sub Tree == +console.log(boolean,b) +== Sub Tree == +js_const Decl: string=String +== Sub Tree == +js_var Decl: s=new string("abc") +== Sub Tree == +console.log(string,s) diff --git a/src/MapleFE/test/typescript/unit_tests/typeof.d.ts b/src/MapleFE/test/typescript/unit_tests/typeof.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..da86045759b7942cfee07feb09f277189b22ef71 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/typeof.d.ts @@ -0,0 +1,3 @@ +export declare const E: readonly ["left", "right", "top", "down"]; +export declare type A = (typeof E)[number]; + diff --git a/src/MapleFE/test/typescript/unit_tests/typeof.d.ts.result b/src/MapleFE/test/typescript/unit_tests/typeof.d.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..dc4d89f967c5db2a1158279a2b0cddaf331d6722 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/typeof.d.ts.result @@ -0,0 +1,7 @@ +Matched 16 tokens. +Matched 29 tokens. +============= Module =========== +== Sub Tree == +export {declare js_const Decl: E} +== Sub Tree == +export {declare type A = typeof E[number]} diff --git a/src/MapleFE/test/typescript/unit_tests/types.ts b/src/MapleFE/test/typescript/unit_tests/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..cf9e393b3e7fd6fedf04bb6e6411ecb2a9d1ba44 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/types.ts @@ -0,0 +1,48 @@ +// ECMAScript language types (aka primitive type, literal type, etc..) +console.log(typeof(undefined)); // undefined +console.log(typeof(null)); // object (javascrupt legacy) +console.log(typeof(true)); // boolean +console.log(typeof(false)); // boolean +console.log(typeof("abc")); // string +console.log(typeof(Symbol())); // symbol +console.log(typeof(123)); // number +//console.log(typeof(123n)); // bigint +//console.log(typeof(BigInt(123))); // bigint + +// complex/compound type +console.log(typeof({})); // object +console.log({} instanceof Object); // true +console.log(typeof([])); +console.log([] instanceof Array); +console.log(typeof(new Array)); +console.log("===="); +console.log(typeof Object()); // object +console.log(typeof new Object()); // object +console.log(Object() instanceof Object); // true +console.log(new Object instanceof Object); // true + +// builtin objects +console.log(typeof(String())); // string +console.log(typeof(Boolean())); // boolean +console.log(typeof(Date())); // string +console.log(typeof(Number())); // number +//console.log(typeof(BigInt(123))); // bigint +console.log(typeof(new String())); // object +console.log(typeof(new Boolean())); // object +console.log(typeof(new Date())); // object +console.log(typeof(new Number())); // object + +//console.log(String() instanceof String); +//console.log(Boolean() instanceof Boolean); +//console.log(Date() instanceof Date); +//console.log(Number() instanceof Number); +console.log(new String() instanceof String); // true +console.log(new Boolean() instanceof Boolean); // true +console.log(new Date() instanceof Date); // true +console.log(new Number() instanceof Number); // true + +console.log(typeof(class {} )); // function +console.log(class{} instanceof Function);// Function +console.log(typeof(new class {} )); // object +console.log(new class {} instanceof Object); // object + diff --git a/src/MapleFE/test/typescript/unit_tests/types.ts.result b/src/MapleFE/test/typescript/unit_tests/types.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..daf52ff17fb31e35eb90acea22e0008ff0690444 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/types.ts.result @@ -0,0 +1,116 @@ +Matched 10 tokens. +Matched 20 tokens. +Matched 30 tokens. +Matched 40 tokens. +Matched 50 tokens. +Matched 62 tokens. +Matched 72 tokens. +Matched 83 tokens. +Matched 93 tokens. +Matched 104 tokens. +Matched 114 tokens. +Matched 125 tokens. +Matched 132 tokens. +Matched 142 tokens. +Matched 153 tokens. +Matched 164 tokens. +Matched 174 tokens. +Matched 186 tokens. +Matched 198 tokens. +Matched 210 tokens. +Matched 222 tokens. +Matched 235 tokens. +Matched 248 tokens. +Matched 261 tokens. +Matched 274 tokens. +Matched 286 tokens. +Matched 298 tokens. +Matched 310 tokens. +Matched 322 tokens. +Matched 334 tokens. +Matched 345 tokens. +Matched 358 tokens. +Matched 370 tokens. +============= Module =========== +== Sub Tree == +console.log( typeof undefined) +== Sub Tree == +console.log( typeof null) +== Sub Tree == +console.log( typeof true) +== Sub Tree == +console.log( typeof false) +== Sub Tree == +console.log( typeof "abc") +== Sub Tree == +console.log( typeof Symbol()) +== Sub Tree == +console.log( typeof 123) +== Sub Tree == +console.log( typeof {}) +== Sub Tree == +console.log( {} instanceof Object) +== Sub Tree == +console.log( typeof []) +== Sub Tree == +console.log([] instanceof Array) +== Sub Tree == +console.log( typeof new Array()) +== Sub Tree == +console.log("====") +== Sub Tree == +console.log( typeof Object()) +== Sub Tree == +console.log( typeof new Object()) +== Sub Tree == +console.log(Object() instanceof Object) +== Sub Tree == +console.log(new Object() instanceof Object) +== Sub Tree == +console.log( typeof String()) +== Sub Tree == +console.log( typeof Boolean()) +== Sub Tree == +console.log( typeof Date()) +== Sub Tree == +console.log( typeof Number()) +== Sub Tree == +console.log( typeof new String()) +== Sub Tree == +console.log( typeof new Boolean()) +== Sub Tree == +console.log( typeof new Date()) +== Sub Tree == +console.log( typeof new Number()) +== Sub Tree == +console.log(new String() instanceof String) +== Sub Tree == +console.log(new Boolean() instanceof Boolean) +== Sub Tree == +console.log(new Date() instanceof Date) +== Sub Tree == +console.log(new Number() instanceof Number) +== Sub Tree == +console.log( typeof class + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: +) +== Sub Tree == +console.log(class + Fields: + + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + instanceof Function) +== Sub Tree == +console.log( typeof new ()) +== Sub Tree == +console.log(new () instanceof Object) diff --git a/src/MapleFE/test/typescript/unit_tests/unary_op.ts b/src/MapleFE/test/typescript/unit_tests/unary_op.ts new file mode 100644 index 0000000000000000000000000000000000000000..34893c274ab5c0094b0329a96c925fba46dc2300 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/unary_op.ts @@ -0,0 +1,27 @@ +class A { + a?: number = 0; + b: number = 0; +} +var x: A = {a: 1, b: 2}; +delete x.a; + +console.log(void 0); + +console.log(typeof 8); + +var a: number = 8; +console.log(++a); +console.log(--a); + +console.log(+8); +console.log(+"-8"); + +console.log(-"8"); +console.log(-a); + +console.log(~8); +console.log(~"8"); +console.log(~a); + +console.log(!false); +console.log(!true); diff --git a/src/MapleFE/test/typescript/unit_tests/unary_op.ts.result b/src/MapleFE/test/typescript/unit_tests/unary_op.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..44e37f79c25eb54a182e3e0b76a14104c195739c --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/unary_op.ts.result @@ -0,0 +1,60 @@ +Matched 17 tokens. +Matched 32 tokens. +Matched 37 tokens. +Matched 45 tokens. +Matched 53 tokens. +Matched 60 tokens. +Matched 68 tokens. +Matched 76 tokens. +Matched 83 tokens. +Matched 91 tokens. +Matched 99 tokens. +Matched 107 tokens. +Matched 115 tokens. +Matched 123 tokens. +Matched 131 tokens. +Matched 139 tokens. +Matched 147 tokens. +============= Module =========== +== Sub Tree == +class A + Fields: + a?=0 b=0 + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + +== Sub Tree == +js_var Decl: x= {a:1, b:2} +== Sub Tree == + delete x.a +== Sub Tree == +console.log(void) +== Sub Tree == +console.log( typeof 8) +== Sub Tree == +js_var Decl: a=8 +== Sub Tree == +console.log(PreInc a) +== Sub Tree == +console.log(PreDec a) +== Sub Tree == +console.log(8) +== Sub Tree == +console.log(Plus "-8") +== Sub Tree == +console.log(Minus "8") +== Sub Tree == +console.log(Minus a) +== Sub Tree == +console.log(Bcomp 8) +== Sub Tree == +console.log(Bcomp "8") +== Sub Tree == +console.log(Bcomp a) +== Sub Tree == +console.log(Not false) +== Sub Tree == +console.log(Not true) diff --git a/src/MapleFE/test/typescript/unit_tests/undefined-assign.ts b/src/MapleFE/test/typescript/unit_tests/undefined-assign.ts new file mode 100644 index 0000000000000000000000000000000000000000..d3677a2574853dea16c54071dc58e2b006899cf1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/undefined-assign.ts @@ -0,0 +1 @@ +var x = undefined; diff --git a/src/MapleFE/test/typescript/unit_tests/undefined-assign.ts.result b/src/MapleFE/test/typescript/unit_tests/undefined-assign.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..06beeb38ee7c3d68b0b9f5aed1b4fc79404025f9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/undefined-assign.ts.result @@ -0,0 +1,4 @@ +Matched 5 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=undefined diff --git a/src/MapleFE/test/typescript/unit_tests/undefined-type.ts b/src/MapleFE/test/typescript/unit_tests/undefined-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..f0e3c2e89988257e58d32b2283b301ce4c12137a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/undefined-type.ts @@ -0,0 +1,9 @@ +function func(x: unknown): number | undefined { + if (typeof x === "number") return x * x; + return undefined; +} + +let v = func("a"); +console.log(v); +v = func(3); +console.log(v); diff --git a/src/MapleFE/test/typescript/unit_tests/undefined-type.ts.result b/src/MapleFE/test/typescript/unit_tests/undefined-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..1f6d4c0e72c7c45e604a7413f242f2df326f6b2a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/undefined-type.ts.result @@ -0,0 +1,22 @@ +Matched 28 tokens. +Matched 36 tokens. +Matched 43 tokens. +Matched 50 tokens. +Matched 57 tokens. +============= Module =========== +== Sub Tree == +func func(x) throws: + cond-branch cond: typeof x StEq "number" + true branch : + return x Mul x false branch : + + return undefined + +== Sub Tree == +js_let Decl: v=func("a") +== Sub Tree == +console.log(v) +== Sub Tree == +v Assign func(3) +== Sub Tree == +console.log(v) diff --git a/src/MapleFE/test/typescript/unit_tests/unicode-string.ts b/src/MapleFE/test/typescript/unit_tests/unicode-string.ts new file mode 100644 index 0000000000000000000000000000000000000000..9ba9688f86fe738ce3ee1f8b19e8411e4bde6810 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/unicode-string.ts @@ -0,0 +1 @@ +console.log("cd/m²"); diff --git a/src/MapleFE/test/typescript/unit_tests/unicode-string.ts.result b/src/MapleFE/test/typescript/unit_tests/unicode-string.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a88eebc76c306d06da149cceb7082e77bc5df551 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/unicode-string.ts.result @@ -0,0 +1,4 @@ +Matched 7 tokens. +============= Module =========== +== Sub Tree == +console.log("cd/m²") diff --git a/src/MapleFE/test/typescript/unit_tests/unicode-string2.ts b/src/MapleFE/test/typescript/unit_tests/unicode-string2.ts new file mode 100644 index 0000000000000000000000000000000000000000..ef5a5d83d01f52dc678ee48daa431cf4ceb9110a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/unicode-string2.ts @@ -0,0 +1 @@ +console.log("你好"); diff --git a/src/MapleFE/test/typescript/unit_tests/unicode-string2.ts.result b/src/MapleFE/test/typescript/unit_tests/unicode-string2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..da685ec3174d1781efee3e577deaa9681cd33c11 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/unicode-string2.ts.result @@ -0,0 +1,4 @@ +Matched 7 tokens. +============= Module =========== +== Sub Tree == +console.log("你好") diff --git a/src/MapleFE/test/typescript/unit_tests/unicode-string3.ts b/src/MapleFE/test/typescript/unit_tests/unicode-string3.ts new file mode 100644 index 0000000000000000000000000000000000000000..1d285ea0dc11a1e4b4f2539c3d798b55790ee95e --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/unicode-string3.ts @@ -0,0 +1,5 @@ +var 变量: string | number = "abc"; +// UTF-16 codes for 变量: 53d8 91cf +console.log(变量); +变量 = 123; +console.log(变量); diff --git a/src/MapleFE/test/typescript/unit_tests/unicode-string4.ts b/src/MapleFE/test/typescript/unit_tests/unicode-string4.ts new file mode 100644 index 0000000000000000000000000000000000000000..9f2aa9007640701f1f5ab6375c796acd49a86eea --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/unicode-string4.ts @@ -0,0 +1,2 @@ +const str = "\u001F"; +console.log(str); diff --git a/src/MapleFE/test/typescript/unit_tests/unicode-string4.ts.result b/src/MapleFE/test/typescript/unit_tests/unicode-string4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3b23caffe9b938ea8c8863e6fd0b0dc2c94cc840 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/unicode-string4.ts.result @@ -0,0 +1,7 @@ +Matched 5 tokens. +Matched 12 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: str="\u001F" +== Sub Tree == +console.log(str) diff --git a/src/MapleFE/test/typescript/unit_tests/union-import-types.d.ts b/src/MapleFE/test/typescript/unit_tests/union-import-types.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..35f88d5860cf8136f1cb7a33e0ec2e3c7fea88d3 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-import-types.d.ts @@ -0,0 +1 @@ +export declare var x: { f: typeof import("./M") | import("./enum2").ET }; diff --git a/src/MapleFE/test/typescript/unit_tests/union-import-types.d.ts.result b/src/MapleFE/test/typescript/unit_tests/union-import-types.d.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..258f571ece0e0a0ecc73ad3117aa406503b01384 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-import-types.d.ts.result @@ -0,0 +1,4 @@ +Matched 22 tokens. +============= Module =========== +== Sub Tree == +export {declare js_var Decl: x} diff --git a/src/MapleFE/test/typescript/unit_tests/union-of-literals.ts b/src/MapleFE/test/typescript/unit_tests/union-of-literals.ts new file mode 100644 index 0000000000000000000000000000000000000000..6bcb1d3d3763db2aa116640d020aa279890af4be --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-of-literals.ts @@ -0,0 +1,6 @@ +var x: 0 | 1 | 2; +x = 1; +console.log(x); +var y: "left" | "right"; +y = "left"; +console.log(y); diff --git a/src/MapleFE/test/typescript/unit_tests/union-of-literals.ts.result b/src/MapleFE/test/typescript/unit_tests/union-of-literals.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..70069d55f920008984d3e0594a1879d77bb783e0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-of-literals.ts.result @@ -0,0 +1,19 @@ +Matched 9 tokens. +Matched 13 tokens. +Matched 20 tokens. +Matched 27 tokens. +Matched 31 tokens. +Matched 38 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x +== Sub Tree == +x Assign 1 +== Sub Tree == +console.log(x) +== Sub Tree == +js_var Decl: y +== Sub Tree == +y Assign "left" +== Sub Tree == +console.log(y) diff --git a/src/MapleFE/test/typescript/unit_tests/union-of-literals2.ts b/src/MapleFE/test/typescript/unit_tests/union-of-literals2.ts new file mode 100644 index 0000000000000000000000000000000000000000..56fb44c11dd899b62925c176bb5c7987988016a1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-of-literals2.ts @@ -0,0 +1,3 @@ +var y: "left" | "right" | string | undefined; +y = "left"; +console.log(y); diff --git a/src/MapleFE/test/typescript/unit_tests/union-of-literals2.ts.result b/src/MapleFE/test/typescript/unit_tests/union-of-literals2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e04ce3af94ad93c2c09053dae77cd908911e1ead --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-of-literals2.ts.result @@ -0,0 +1,10 @@ +Matched 11 tokens. +Matched 15 tokens. +Matched 22 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: y +== Sub Tree == +y Assign "left" +== Sub Tree == +console.log(y) diff --git a/src/MapleFE/test/typescript/unit_tests/union-of-prim-array.ts b/src/MapleFE/test/typescript/unit_tests/union-of-prim-array.ts new file mode 100644 index 0000000000000000000000000000000000000000..89331dd9f6efc0d056b83994ceb74818bbabf37a --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-of-prim-array.ts @@ -0,0 +1,4 @@ +var x: number[] | string[] = [1, 2, 3]; +console.log(x); +x = ["abc", "def"]; +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/union-of-prim-array.ts.result b/src/MapleFE/test/typescript/unit_tests/union-of-prim-array.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..cd5ee9bb2c95af4127fa23d80982f35852249412 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-of-prim-array.ts.result @@ -0,0 +1,13 @@ +Matched 19 tokens. +Matched 26 tokens. +Matched 34 tokens. +Matched 41 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=[1,2,3] +== Sub Tree == +console.log(x) +== Sub Tree == +x Assign ["abc","def"] +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/union-type.ts b/src/MapleFE/test/typescript/unit_tests/union-type.ts new file mode 100644 index 0000000000000000000000000000000000000000..f78209a4fdf5508ebd5cd43ac29d2aba2be8a2ce --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type.ts @@ -0,0 +1,7 @@ +type UnionType = number | string; +function add(x: UnionType, y: UnionType): UnionType { + if (typeof x === "number" && typeof y == "number") return x + y; + else return x.toString() + y.toString(); +} +console.log(add(1, 2)); +console.log(add("a", "b")); diff --git a/src/MapleFE/test/typescript/unit_tests/union-type.ts.result b/src/MapleFE/test/typescript/unit_tests/union-type.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..590db9e8c87f10865dcac5d536883101e0b5b763 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type.ts.result @@ -0,0 +1,18 @@ +Matched 7 tokens. +Matched 53 tokens. +Matched 65 tokens. +Matched 77 tokens. +============= Module =========== +== Sub Tree == + type UnionType = union = number | string +== Sub Tree == +func add(x,y) throws: + cond-branch cond: typeof x StEq "number" Land typeof y EQ "number" + true branch : + return x Add y false branch : + return x.toString() Add y.toString() + +== Sub Tree == +console.log(add(1,2)) +== Sub Tree == +console.log(add("a","b")) diff --git a/src/MapleFE/test/typescript/unit_tests/union-type2.ts b/src/MapleFE/test/typescript/unit_tests/union-type2.ts new file mode 100644 index 0000000000000000000000000000000000000000..22a02a0c38c5ffb464190190b43a56d25edd02a5 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type2.ts @@ -0,0 +1,7 @@ +type UT = string | ((k: number) => number); +var s: UT = "abc"; +console.log(s); +s = function func(k: number): number { + return k + k; +}; +console.log(s, s(3)); diff --git a/src/MapleFE/test/typescript/unit_tests/union-type2.ts.result b/src/MapleFE/test/typescript/unit_tests/union-type2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f368b31edb44b8836b9cf8b6b111ca760d09b6a7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type2.ts.result @@ -0,0 +1,18 @@ +Matched 15 tokens. +Matched 22 tokens. +Matched 29 tokens. +Matched 48 tokens. +Matched 60 tokens. +============= Module =========== +== Sub Tree == + type UT = union = string | (k) -> +== Sub Tree == +js_var Decl: s="abc" +== Sub Tree == +console.log(s) +== Sub Tree == +s Assign func func(k) throws: + return k Add k + +== Sub Tree == +console.log(s,s(3)) diff --git a/src/MapleFE/test/typescript/unit_tests/union-type3.ts b/src/MapleFE/test/typescript/unit_tests/union-type3.ts new file mode 100644 index 0000000000000000000000000000000000000000..42629ed59ec65e77aade9dbe3e95734c29663874 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type3.ts @@ -0,0 +1,9 @@ +function func(k: number): number { + return k + k; +} + +type UT = string | typeof func; +var s: UT = "abc"; +console.log(s); +s = func; +console.log(s, s(3)); diff --git a/src/MapleFE/test/typescript/unit_tests/union-type3.ts.result b/src/MapleFE/test/typescript/unit_tests/union-type3.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..3851bff8bc5ce2d7843d5b859a4e7f1091e0474b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type3.ts.result @@ -0,0 +1,21 @@ +Matched 16 tokens. +Matched 24 tokens. +Matched 31 tokens. +Matched 38 tokens. +Matched 42 tokens. +Matched 54 tokens. +============= Module =========== +== Sub Tree == +func func(k) throws: + return k Add k + +== Sub Tree == + type UT = union = string | typeof func +== Sub Tree == +js_var Decl: s="abc" +== Sub Tree == +console.log(s) +== Sub Tree == +s Assign func +== Sub Tree == +console.log(s,s(3)) diff --git a/src/MapleFE/test/typescript/unit_tests/union-type4.ts b/src/MapleFE/test/typescript/unit_tests/union-type4.ts new file mode 100644 index 0000000000000000000000000000000000000000..994b3993c430f861e8ecd0bff161573ddd9d3929 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type4.ts @@ -0,0 +1,11 @@ +function func(k: number): number { + return k + k; +} + +type UT = string | typeof func | Array; +var s: UT = "abc"; +console.log(s); +s = func; +console.log(s, s(3)); +s = new Array("first", "second"); +console.log(s); diff --git a/src/MapleFE/test/typescript/unit_tests/union-type4.ts.result b/src/MapleFE/test/typescript/unit_tests/union-type4.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..cb4401473c8420858aa766a4dbf9ec2d59c4ca32 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type4.ts.result @@ -0,0 +1,27 @@ +Matched 16 tokens. +Matched 29 tokens. +Matched 36 tokens. +Matched 43 tokens. +Matched 47 tokens. +Matched 59 tokens. +Matched 69 tokens. +Matched 76 tokens. +============= Module =========== +== Sub Tree == +func func(k) throws: + return k Add k + +== Sub Tree == + type UT = union = string | typeof func | Array +== Sub Tree == +js_var Decl: s="abc" +== Sub Tree == +console.log(s) +== Sub Tree == +s Assign func +== Sub Tree == +console.log(s,s(3)) +== Sub Tree == +s Assign new Array("first","second") +== Sub Tree == +console.log(s) diff --git a/src/MapleFE/test/typescript/unit_tests/union-type5.ts b/src/MapleFE/test/typescript/unit_tests/union-type5.ts new file mode 100644 index 0000000000000000000000000000000000000000..81947c80bc1ac1bc570eae8b920db913aa9a346b --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type5.ts @@ -0,0 +1,2 @@ +var x: { [key: string]: number } | null = null; +console.log(x); diff --git a/src/MapleFE/test/typescript/unit_tests/union-type5.ts.result b/src/MapleFE/test/typescript/unit_tests/union-type5.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..e586c3fc840d25af28e83b6ae36cd8bd8b63b4ea --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type5.ts.result @@ -0,0 +1,7 @@ +Matched 17 tokens. +Matched 24 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=null +== Sub Tree == +console.log(x) diff --git a/src/MapleFE/test/typescript/unit_tests/union-type6.ts b/src/MapleFE/test/typescript/unit_tests/union-type6.ts new file mode 100644 index 0000000000000000000000000000000000000000..28b76547e39b54d70539067d76c5a4c055d80c43 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type6.ts @@ -0,0 +1,6 @@ +var s: string | number | undefined = 123; +console.log(s); +s = "abc"; +console.log(s); +s = undefined; +console.log(s); diff --git a/src/MapleFE/test/typescript/unit_tests/union-type6.ts.result b/src/MapleFE/test/typescript/unit_tests/union-type6.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ba46d8f7a64b4ecd4d111c9ad498efe876a7d021 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type6.ts.result @@ -0,0 +1,19 @@ +Matched 11 tokens. +Matched 18 tokens. +Matched 22 tokens. +Matched 29 tokens. +Matched 33 tokens. +Matched 40 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: s=123 +== Sub Tree == +console.log(s) +== Sub Tree == +s Assign "abc" +== Sub Tree == +console.log(s) +== Sub Tree == +s Assign undefined +== Sub Tree == +console.log(s) diff --git a/src/MapleFE/test/typescript/unit_tests/union-type7.ts b/src/MapleFE/test/typescript/unit_tests/union-type7.ts new file mode 100644 index 0000000000000000000000000000000000000000..596d4563ff945c39883b00ad48c05ba4c1e4e7b0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type7.ts @@ -0,0 +1 @@ +type AType = (new() => Error) | Error; diff --git a/src/MapleFE/test/typescript/unit_tests/union-type7.ts.result b/src/MapleFE/test/typescript/unit_tests/union-type7.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..87d45a853db99d735fbbb05f038519ed1238fdd9 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type7.ts.result @@ -0,0 +1,4 @@ +Matched 13 tokens. +============= Module =========== +== Sub Tree == + type AType = union = new () -> | Error diff --git a/src/MapleFE/test/typescript/unit_tests/union-type8.ts b/src/MapleFE/test/typescript/unit_tests/union-type8.ts new file mode 100644 index 0000000000000000000000000000000000000000..2e43c4f8d1a1009015f18bbde2c41049fba3a7cc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type8.ts @@ -0,0 +1,9 @@ +interface IFace { + num: number; +} + +interface IFace2< +T extends null | IFace, +> extends IFace { + prop: T; +} diff --git a/src/MapleFE/test/typescript/unit_tests/union-type8.ts.result b/src/MapleFE/test/typescript/unit_tests/union-type8.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0275638610cec08579102cb7492bf5ced8c1c685 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/union-type8.ts.result @@ -0,0 +1,7 @@ +Matched 8 tokens. +Matched 26 tokens. +============= Module =========== +== Sub Tree == +ts_interface: IFace {num } +== Sub Tree == +ts_interface: IFace2 {prop } diff --git a/src/MapleFE/test/typescript/unit_tests/utility-type-instancetype.ts b/src/MapleFE/test/typescript/unit_tests/utility-type-instancetype.ts new file mode 100644 index 0000000000000000000000000000000000000000..ca53fcb7d1ff15dc2284e19987196f87b6b34a00 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/utility-type-instancetype.ts @@ -0,0 +1,10 @@ +class Klass { + x: number = 33; + public getNum() { + return this.x >> 2; + } +} + +class Klass2 { + map: Map> | undefined = undefined; +} diff --git a/src/MapleFE/test/typescript/unit_tests/utility-type-instancetype.ts.result b/src/MapleFE/test/typescript/unit_tests/utility-type-instancetype.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..06c16c8b85c9a43a8f376755c90bec74f4748f3f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/utility-type-instancetype.ts.result @@ -0,0 +1,25 @@ +Matched 23 tokens. +Matched 43 tokens. +============= Module =========== +== Sub Tree == +class Klass + Fields: + x=33 + Instance Initializer: + Constructors: + Methods: + func getNum() throws: + return this.x Shr 2 + LocalClasses: + LocalInterfaces: + +== Sub Tree == +class Klass2 + Fields: + map=undefined + Instance Initializer: + Constructors: + Methods: + LocalClasses: + LocalInterfaces: + diff --git a/src/MapleFE/test/typescript/unit_tests/utility-type-nonnullable.ts b/src/MapleFE/test/typescript/unit_tests/utility-type-nonnullable.ts new file mode 100644 index 0000000000000000000000000000000000000000..55b36eb5650f6a5daca29469a1d2d6c019c868f1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/utility-type-nonnullable.ts @@ -0,0 +1,7 @@ +interface Interf { + prop: { + pos: Object; + }; +} + +type TYPE = NonNullable; diff --git a/src/MapleFE/test/typescript/unit_tests/utility-type-nonnullable.ts.result b/src/MapleFE/test/typescript/unit_tests/utility-type-nonnullable.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..733641e3852fb19e3bc1d2aa320a4f0062cd5a02 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/utility-type-nonnullable.ts.result @@ -0,0 +1,7 @@ +Matched 13 tokens. +Matched 27 tokens. +============= Module =========== +== Sub Tree == +ts_interface: Interf {prop } +== Sub Tree == + type TYPE = NonNullable diff --git a/src/MapleFE/test/typescript/unit_tests/utility-type-parameters.ts b/src/MapleFE/test/typescript/unit_tests/utility-type-parameters.ts new file mode 100644 index 0000000000000000000000000000000000000000..0f9ef082bc38ba80af812d4931a219eda88213cc --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/utility-type-parameters.ts @@ -0,0 +1,6 @@ +var list = { f1: 0, f2: "abc" }; +function f(...list: any[]) {} +function func(t: Parameters[0]): Parameters[1] { + return list.f2 + t; +} +console.log(func(123)); diff --git a/src/MapleFE/test/typescript/unit_tests/utility-type-parameters.ts.result b/src/MapleFE/test/typescript/unit_tests/utility-type-parameters.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..5ba7c640a1851e4c661b4cdc47ce455ce072921d --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/utility-type-parameters.ts.result @@ -0,0 +1,16 @@ +Matched 13 tokens. +Matched 25 tokens. +Matched 57 tokens. +Matched 67 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: list= {f1:0, f2:"abc"} +== Sub Tree == +func f(...list) throws: + +== Sub Tree == +func func(t) throws: + return list.f2 Add t + +== Sub Tree == +console.log(func(123)) diff --git a/src/MapleFE/test/typescript/unit_tests/utility-type-parameters2.ts b/src/MapleFE/test/typescript/unit_tests/utility-type-parameters2.ts new file mode 100644 index 0000000000000000000000000000000000000000..730ac18acb3457c414ac6ada64924b34c909c954 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/utility-type-parameters2.ts @@ -0,0 +1,12 @@ +function func(n: number, s: string): string { + return n + s; +} + +function wrapper( + n: Parameters[0], + s: Parameters[1] | undefined +): string { + return func(n, s!); +} + +console.log(wrapper(123, "abc")); diff --git a/src/MapleFE/test/typescript/unit_tests/utility-type-parameters2.ts.result b/src/MapleFE/test/typescript/unit_tests/utility-type-parameters2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..a2f0036f2bb1f283383ebcc2a3030ea1d2abc5df --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/utility-type-parameters2.ts.result @@ -0,0 +1,14 @@ +Matched 20 tokens. +Matched 60 tokens. +Matched 72 tokens. +============= Module =========== +== Sub Tree == +func func(n,s) throws: + return n Add s + +== Sub Tree == +func wrapper(n,s) throws: + return func(n,s!) + +== Sub Tree == +console.log(wrapper(123,"abc")) diff --git a/src/MapleFE/test/typescript/unit_tests/values.ts b/src/MapleFE/test/typescript/unit_tests/values.ts new file mode 100644 index 0000000000000000000000000000000000000000..a57009b4ba6947604df116a233182bcfd0f6ab95 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/values.ts @@ -0,0 +1 @@ +export const values = [{ brand: "BMW", model: "325" }, {}]; diff --git a/src/MapleFE/test/typescript/unit_tests/values.ts.result b/src/MapleFE/test/typescript/unit_tests/values.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..19f5259985cf2198eeb7e6f60eb2f36ff9ccc5f2 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/values.ts.result @@ -0,0 +1,4 @@ +Matched 19 tokens. +============= Module =========== +== Sub Tree == +export {js_const Decl: values=[ {brand:"BMW", model:"325"}, {}]} diff --git a/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts b/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts new file mode 100644 index 0000000000000000000000000000000000000000..bd97f5cab3b2af51e3f45bca1c7320496a53b5ff --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts @@ -0,0 +1,10 @@ +const obj = { + else() { + return this; + }, + var() { + console.log("var"); + } +} + +obj.else().var(); diff --git a/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts.result b/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7fcef6b23a7beaf3d03a4115b40cfebcf5da9d1f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-as-prop-name.ts.result @@ -0,0 +1,11 @@ +Matched 26 tokens. +Matched 36 tokens. +============= Module =========== +== Sub Tree == +js_const Decl: obj= {else:func else() throws: + return this +, var:func var() throws: + console.log("var") +} +== Sub Tree == +obj.else().var() diff --git a/src/MapleFE/test/typescript/unit_tests/var-const.ts b/src/MapleFE/test/typescript/unit_tests/var-const.ts new file mode 100644 index 0000000000000000000000000000000000000000..70ad5911b8e2534198fcd0e5afeb2f8df39919b7 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-const.ts @@ -0,0 +1,10 @@ +function foo(j: number): number { + { + const i: number = 4; + j += 10 * i; + } + var i = 8; + return i + j; +} + +console.log(foo(5)); diff --git a/src/MapleFE/test/typescript/unit_tests/var-const.ts.result b/src/MapleFE/test/typescript/unit_tests/var-const.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..ae3c888bc2c46dc035ef4e6b1eea66c280e10644 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-const.ts.result @@ -0,0 +1,13 @@ +Matched 36 tokens. +Matched 46 tokens. +============= Module =========== +== Sub Tree == +func foo(j) throws: + js_const Decl: i=4 + j AddAssign 10 Mul i + + js_var Decl: i=8 + return i Add j + +== Sub Tree == +console.log(foo(5)) diff --git a/src/MapleFE/test/typescript/unit_tests/var-dup.ts b/src/MapleFE/test/typescript/unit_tests/var-dup.ts new file mode 100644 index 0000000000000000000000000000000000000000..935641621371303cef7f1dbeb0077525d6903beb --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-dup.ts @@ -0,0 +1,14 @@ +function foo(a: number) { + var sum = 0; + for (var i = 0; i < a; i++) { + var i = 4; + for (var i = 0; i < a; i++) { + let i = 8; + sum += i; + } + } + + return sum; +} + +console.log(foo(10)); diff --git a/src/MapleFE/test/typescript/unit_tests/var-dup.ts.result b/src/MapleFE/test/typescript/unit_tests/var-dup.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..8eb5ad8a69fec678a79cafa4625152bb409834b1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-dup.ts.result @@ -0,0 +1,17 @@ +Matched 63 tokens. +Matched 73 tokens. +============= Module =========== +== Sub Tree == +func foo(a) throws: + js_var Decl: sum=0 + for ( ) + js_var Decl: i=4 + for ( ) + js_let Decl: i=8 + sum AddAssign i + + + return sum + +== Sub Tree == +console.log(foo(10)) diff --git a/src/MapleFE/test/typescript/unit_tests/var-in-function.ts b/src/MapleFE/test/typescript/unit_tests/var-in-function.ts new file mode 100644 index 0000000000000000000000000000000000000000..71f0660cbef5dac84cfbdaa37e3584ccf16bcd47 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-in-function.ts @@ -0,0 +1,7 @@ +function func() { + const res = { + pos: [0.5, -0.5, 0], + }; + return res; +} +console.log(func()); diff --git a/src/MapleFE/test/typescript/unit_tests/var-in-function.ts.result b/src/MapleFE/test/typescript/unit_tests/var-in-function.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..02502c6870f4d9eb02ef83cd7aa344191ed7e352 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-in-function.ts.result @@ -0,0 +1,10 @@ +Matched 25 tokens. +Matched 34 tokens. +============= Module =========== +== Sub Tree == +func func() throws: + js_const Decl: res= {pos:[0.5,-0.5,0]} + return res + +== Sub Tree == +console.log(func()) diff --git a/src/MapleFE/test/typescript/unit_tests/var-scope.ts b/src/MapleFE/test/typescript/unit_tests/var-scope.ts new file mode 100644 index 0000000000000000000000000000000000000000..7ccfeb6eabeb9aabd71aa4cc2cd5aeacbf3618cd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-scope.ts @@ -0,0 +1,16 @@ +var i: number = 5; + +function foo(j: number): number { + i = j; + { + let i: number = 2; + j += 100 * i; + } + { + let i: number = 4; + j += 10 * i; + } + return i + j; +} + +console.log(foo(5)); diff --git a/src/MapleFE/test/typescript/unit_tests/var-scope.ts.result b/src/MapleFE/test/typescript/unit_tests/var-scope.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..f8c807fa7c41397190bc3766d0d85e958374086f --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-scope.ts.result @@ -0,0 +1,19 @@ +Matched 7 tokens. +Matched 57 tokens. +Matched 67 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: i=5 +== Sub Tree == +func foo(j) throws: + i Assign j + js_let Decl: i=2 + j AddAssign 100 Mul i + + js_let Decl: i=4 + j AddAssign 10 Mul i + + return i Add j + +== Sub Tree == +console.log(foo(5)) diff --git a/src/MapleFE/test/typescript/unit_tests/var-use-first.ts b/src/MapleFE/test/typescript/unit_tests/var-use-first.ts new file mode 100644 index 0000000000000000000000000000000000000000..f0c3d35b44361aef9696cc25634c9903d70819be --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-use-first.ts @@ -0,0 +1,7 @@ +function foo(j: number): number { + i = j; + var i: number = 4; + return i + j; +} + +console.log(foo(5)); diff --git a/src/MapleFE/test/typescript/unit_tests/var-use-first.ts.result b/src/MapleFE/test/typescript/unit_tests/var-use-first.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0a4550222ae6c27478f607b1c206fcdc04582983 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var-use-first.ts.result @@ -0,0 +1,11 @@ +Matched 27 tokens. +Matched 37 tokens. +============= Module =========== +== Sub Tree == +func foo(j) throws: + i Assign j + js_var Decl: i=4 + return i Add j + +== Sub Tree == +console.log(foo(5)) diff --git a/src/MapleFE/test/typescript/unit_tests/var.ts b/src/MapleFE/test/typescript/unit_tests/var.ts new file mode 100644 index 0000000000000000000000000000000000000000..370d5d30f71af1090875871aba51068550bc39bf --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var.ts @@ -0,0 +1,2 @@ +var a: number = 1; +console.log(a); diff --git a/src/MapleFE/test/typescript/unit_tests/var.ts.result b/src/MapleFE/test/typescript/unit_tests/var.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..c9c995c9ef89f05aed1a00e3bf818a13d1da5eda --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/var.ts.result @@ -0,0 +1,7 @@ +Matched 7 tokens. +Matched 14 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: a=1 +== Sub Tree == +console.log(a) diff --git a/src/MapleFE/test/typescript/unit_tests/void-zero.ts b/src/MapleFE/test/typescript/unit_tests/void-zero.ts new file mode 100644 index 0000000000000000000000000000000000000000..96176e42e8c0e3152540dd449059e5618a50aad1 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/void-zero.ts @@ -0,0 +1,3 @@ +var x: number | undefined = void 0; +console.log(x); +console.log(typeof x); diff --git a/src/MapleFE/test/typescript/unit_tests/void-zero.ts.result b/src/MapleFE/test/typescript/unit_tests/void-zero.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..7debc43dd58ea3dda242c7f773be3a7ad2dbda04 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/void-zero.ts.result @@ -0,0 +1,10 @@ +Matched 10 tokens. +Matched 17 tokens. +Matched 25 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=void +== Sub Tree == +console.log(x) +== Sub Tree == +console.log( typeof x) diff --git a/src/MapleFE/test/typescript/unit_tests/void-zero2.ts b/src/MapleFE/test/typescript/unit_tests/void-zero2.ts new file mode 100644 index 0000000000000000000000000000000000000000..caf1ac79394882cf39ce65dae45dc54d3eed14d0 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/void-zero2.ts @@ -0,0 +1,6 @@ +var x: number | undefined = void 0; +console.log(x); +console.log(typeof x); +x = 123; +console.log(x); +console.log(typeof x); diff --git a/src/MapleFE/test/typescript/unit_tests/void-zero2.ts.result b/src/MapleFE/test/typescript/unit_tests/void-zero2.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..0a400cde2108a958f4bf5259e47fb18b0f84c5cd --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/void-zero2.ts.result @@ -0,0 +1,19 @@ +Matched 10 tokens. +Matched 17 tokens. +Matched 25 tokens. +Matched 29 tokens. +Matched 36 tokens. +Matched 44 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: x=void +== Sub Tree == +console.log(x) +== Sub Tree == +console.log( typeof x) +== Sub Tree == +x Assign 123 +== Sub Tree == +console.log(x) +== Sub Tree == +console.log( typeof x) diff --git a/src/MapleFE/test/typescript/unit_tests/while-stmt.ts b/src/MapleFE/test/typescript/unit_tests/while-stmt.ts new file mode 100644 index 0000000000000000000000000000000000000000..828da50719d0e1ccbef4dc4ba7a2eac2ce8d5161 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/while-stmt.ts @@ -0,0 +1,6 @@ +var k: number = 1; +while (k < 10) { + console.log(k); + if (k == 8) break; + k++; +} diff --git a/src/MapleFE/test/typescript/unit_tests/while-stmt.ts.result b/src/MapleFE/test/typescript/unit_tests/while-stmt.ts.result new file mode 100644 index 0000000000000000000000000000000000000000..6364c9367e427b8de5e1aa50ff47e940ce2b0d83 --- /dev/null +++ b/src/MapleFE/test/typescript/unit_tests/while-stmt.ts.result @@ -0,0 +1,15 @@ +Matched 7 tokens. +Matched 33 tokens. +============= Module =========== +== Sub Tree == +js_var Decl: k=1 +== Sub Tree == +while k LT 10 console.log(k) + cond-branch cond:k EQ 8 + true branch : + break: + false branch : + + k Inc + + diff --git a/src/MapleFE/autogen/include/expr_gen.h b/src/MapleFE/tools/obfuscate/include/obfuscate.h similarity index 47% rename from src/MapleFE/autogen/include/expr_gen.h rename to src/MapleFE/tools/obfuscate/include/obfuscate.h index cef6a1a7c20e9013cae0d8cb1a79a24528a4e20c..63574ba6d02657eb5293605a36236e4a14374fe3 100644 --- a/src/MapleFE/autogen/include/expr_gen.h +++ b/src/MapleFE/tools/obfuscate/include/obfuscate.h @@ -1,5 +1,5 @@ /* -* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. @@ -12,29 +12,41 @@ * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ -//////////////////////////////////////////////////////////////////////// -// Expr Generation // -//////////////////////////////////////////////////////////////////////// -#ifndef __EXPR_GEN_H__ -#define __EXPR_GEN_H__ +////////////////////////////////////////////////////////////////////////////////////////////// +// This is the interface to translate AST to C++ +////////////////////////////////////////////////////////////////////////////////////////////// -#include "base_gen.h" -#include "all_supported.h" +#ifndef __OBFUSCATE_HEADER__ +#define __OBFUSCATE_HEADER__ + +#include "astopt.h" +#include "ast_handler.h" +#include "ast_module.h" namespace maplefe { -class ExprGen : public BaseGen { +class Obfuscate : public AstOpt { +private: + AST_Handler *mASTHandler; + unsigned mFlags; + unsigned mIndexImported; + public: - ExprGen(const char *dfile, const char *hfile, const char *cfile) - : BaseGen(dfile, hfile, cfile) {} - ~ExprGen(){} + explicit Obfuscate(AST_Handler *h, unsigned flags) : + AstOpt(h, flags), + mASTHandler(h), + mFlags(flags), + mIndexImported(0) {} + ~Obfuscate() = default; - void Generate(); - void GenCppFile(); - void GenHeaderFile(); + void EmitTS(); + bool LoadImportedModules(); + + // return 0 if successful + // return non-zero if failed + int ProcessAST(); }; } - #endif diff --git a/src/MapleFE/tools/obfuscate/src/main.cpp b/src/MapleFE/tools/obfuscate/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0865f8545ed5116743448c3bb710b7e33cb9289a --- /dev/null +++ b/src/MapleFE/tools/obfuscate/src/main.cpp @@ -0,0 +1,109 @@ +/* +* Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2022] Tencent. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include "gen_astload.h" +#include "ast_handler.h" +#include "obfuscate.h" + +static void help() { + std::cout << "obfuscate a.ast[,b.ast] [options]:" << std::endl; + std::cout << " --out=x.ts : ts output file" << std::endl; + std::cout << " --help : print this help" << std::endl; + std::cout << " --trace=n : Emit trace with 4-bit combo levels 1...15" << std::endl; + std::cout << " 1 : Emit ast tree visits" << std::endl; + std::cout << " 2 : Emit graph" << std::endl; + std::cout << " --emit-ts-only : Emit ts code only" << std::endl; + std::cout << " --emit-ts : Emit ts code" << std::endl; + std::cout << " --format-cpp : Format cpp" << std::endl; + std::cout << " --no-imported : Do not process the imported modules" << std::endl; + std::cout << "default out name uses the first input name: a.cpp" << std::endl; +} + +int main (int argc, char *argv[]) { + if (argc == 1 || (!strncmp(argv[1], "--help", 6) && (strlen(argv[1]) == 6))) { + help(); + exit(-1); + } + + unsigned flags; + // one or more input .ast files separated by ',' + const char *inputname = argv[1]; + // output .ast file + const char *outputname = nullptr; + + // Parse the argument + for (unsigned i = 2; i < argc; i++) { + if (!strncmp(argv[i], "--trace=", 8)) { + int val = atoi(argv[i] + 8); + if (val < 1 || val > 15) { + help(); + exit(-1); + } + flags |= val; + } else if (!strncmp(argv[i], "--emit-ts-only", 14)) { + flags |= maplefe::FLG_emit_ts_only; + } else if (!strncmp(argv[i], "--emit-ts", 9)) { + flags |= maplefe::FLG_emit_ts; + } else if (!strncmp(argv[i], "--format-cpp", 12)) { + flags |= maplefe::FLG_format_cpp; + } else if (!strncmp(argv[i], "--no-imported", 13)) { + flags |= maplefe::FLG_no_imported; + } else if (!strncmp(argv[i], "--in=", 5)) { + inputname = argv[i]+5; + } else if (!strncmp(argv[i], "--out=", 6)) { + outputname = argv[i]+6; + } else { + std::cerr << "unknown option " << argv[i] << std::endl; + exit(-1); + } + } + + // input ast files + std::vector inputfiles; + if (inputname) { + std::stringstream ss; + ss.str(inputname); + std::string item; + while (std::getline(ss, item, ',')) { + // std::cout << "item " << item << " xxx"<< std::endl; + inputfiles.push_back(item); + } + } + + unsigned trace = (flags & maplefe::FLG_trace); + maplefe::AST_Handler handler(trace); + for (auto astfile: inputfiles) { + std::ifstream input(astfile, std::ifstream::binary); + input >> std::noskipws; + std::istream_iterator s(input), e; + maplefe::AstBuffer vec(s, e); + maplefe::AstLoad loadAst; + maplefe::ModuleNode *mod = loadAst.LoadFromAstBuf(vec); + // add mod to the vector + while(mod) { + handler.AddModule(mod); + mod = loadAst.Next(); + } + } + + maplefe::Obfuscate *obfuscate = new maplefe::Obfuscate(&handler, flags); + int res = obfuscate->ProcessAST(); + + return res; +} diff --git a/src/MapleFE/tools/obfuscate/src/obfuscate.cpp b/src/MapleFE/tools/obfuscate/src/obfuscate.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cec38cbc9cd61b600310f72967651b849431cc77 --- /dev/null +++ b/src/MapleFE/tools/obfuscate/src/obfuscate.cpp @@ -0,0 +1,168 @@ +/* +* Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. +* Copyright (C) [2022] Tencent. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include +#include +#include +#include +#include + +#include "obfuscate.h" +#include "ast_handler.h" +#include "gen_astdump.h" +#include "gen_astgraph.h" +#include "gen_aststore.h" +#include "gen_astload.h" +#include "cpp_definition.h" +#include "cpp_declaration.h" +#include "a2c_util.h" + +namespace maplefe { + +bool Obfuscate::LoadImportedModules() { + std::queue queue; + for (HandlerIndex i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + ImportedFiles imported(module); + imported.VisitTreeNode(module); + for(const auto &e: imported.mFilenames) + queue.push(e); + } + + bool err = false; + while(!queue.empty()) { + std::string filename = queue.front(); + queue.pop(); + if(mASTHandler->GetHandlerIndex(filename.c_str()) == HandlerNotFound) { + std::ifstream input(filename, std::ifstream::binary); + if(input.fail()) { + std::cerr << "Error: File " << filename << " not found for imported module" << std::endl; + err = true; + continue; + } + input >> std::noskipws; + std::istream_iterator s(input), e; + maplefe::AstBuffer vec(s, e); + maplefe::AstLoad loadAst; + maplefe::ModuleNode *mod = loadAst.LoadFromAstBuf(vec); + // add mod to the vector + while(mod) { + mASTHandler->AddModule(mod); + ImportedFiles imported(mod); + imported.VisitTreeNode(mod); + for(const auto &e: imported.mFilenames) + queue.push(e); + mod = loadAst.Next(); + } + } + } + return err; +} + +// starting point of AST +int Obfuscate::ProcessAST() { + mIndexImported = GetModuleNum(); + + // load all imported modules + if (!(mFlags & FLG_no_imported)) { + if (LoadImportedModules()) { + return 1; + } + } + + // loop through module handlers + for (HandlerIndex i = 0; i < GetModuleNum(); i++) { + Module_Handler *handler = mASTHandler->GetModuleHandler(i); + ModuleNode *module = handler->GetASTModule(); + + if (mFlags & FLG_trace_1) { + std::cout << "============= in ProcessAST ===========" << std::endl; + std::cout << "srcLang : " << module->GetSrcLangString() << std::endl; + + for(unsigned k = 0; k < module->GetTreesNum(); k++) { + TreeNode *tnode = module->GetTree(k); + if (mFlags & FLG_trace_1) { + tnode->Dump(0); + std::cout << std::endl; + } + } + } + + if (mFlags & FLG_trace_2) { + std::cout << "============= AstGraph ===========" << std::endl; + AstGraph graph(module); + graph.DumpGraph("After LoadFromAstBuf()", &std::cout); + } + } + + // build dependency of modules + PreprocessModules(); + + // loop through module handlers in import/export dependency order + for (auto handler: mHandlersInOrder) { + ModuleNode *module = handler->GetASTModule(); + + // basic analysis + handler->BasicAnalysis(); + + if (mFlags & FLG_trace_2) { + std::cout << "============= After BasicAnalysis ===========" << std::endl; + for(unsigned k = 0; k < module->GetTreesNum(); k++) { + TreeNode *tnode = module->GetTree(k); + if (mFlags & FLG_trace_1) { + tnode->Dump(0); + std::cout << std::endl; + } + } + AstGraph graph(module); + graph.DumpGraph("After BasicAnalysis()", &std::cout); + } + } + + if (mFlags & FLG_trace_3) { + gStringPool.Dump(); + } + + gStringPool.SetAltStrIdxMap(); + + if (mFlags & FLG_trace_3) { + gStringPool.Dump(); + gStringPool.DumpAlt(); + } + + gStringPool.SetUseAltStr(true); + + for (auto handler: mHandlersInOrder) { + ModuleNode *module = handler->GetASTModule(); + + std::cout << "============= Emitter ===========" << std::endl; + maplefe::Emitter emitter(handler); + std::string code = emitter.Emit("Convert AST to TypeScript code"); + + // Emit to file + std::string of_name(module->GetFilename()); + of_name += ".obf"; + std::ofstream ofs; + ofs.open(of_name.c_str(), std::ofstream::out); + ofs << code; + ofs.close(); + } + + return 0; +} + +} // namespace maplefe diff --git a/src/MapleFE/typescript/Makefile b/src/MapleFE/typescript/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..266eec091a2e64f82189cca8c5f43e40f0207878 --- /dev/null +++ b/src/MapleFE/typescript/Makefile @@ -0,0 +1,26 @@ +# Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +include ../Makefile.in + +TARGS = ts2cpp + +$(TARGS): + $(MAKE) -C src + +clean: + rm -rf $(BUILDDIR)/typescript + +.PHONY: $(TARGS) + diff --git a/src/MapleFE/java/local_var.spec b/src/MapleFE/typescript/attr.spec similarity index 41% rename from src/MapleFE/java/local_var.spec rename to src/MapleFE/typescript/attr.spec index 3c7d8ac3c032e56b8eb60d2abe453cf12aa4869c..a632ede282f8dc13a14de62879ffafb1afacd429 100644 --- a/src/MapleFE/java/local_var.spec +++ b/src/MapleFE/typescript/attr.spec @@ -12,29 +12,29 @@ # See the Mulan PSL v2 for more details. # ################################################################################### -# This file defines the Java Local Variable declaration. +# This file defines the Attribute, Modifier, or any other terms which are used +# to describe some features of certian syntax component. # +# The keyword duplex is defined as <"keyword", AttributeId>. The AttributeId is defined +# in shared/supported_attributes.def and included in shared/supported.h. The 'keyword' +# is the keyword in a language defining that specific attribute. ################################################################################### -####include "type.spec" -###rule VariableModifier: (one of) Annotation final -rule VariableModifier: "final" +STRUCT Attribute : (("abstract", abstract), + ("const", const), + ("volatile", volatile), + ("final", final), + ("native", native), + ("private", private), + ("protected", protected), + ("public", public), + ("readonly", readonly), + ("static", static), + ("strictfp", strictfp), + ("default", default), + ("get", getter), + ("set", setter), + ("async", async), + ("synchronized", synchronized)) -#rule VariableDeclarator : VariableDeclaratorId + ZEROORONE('=' + VariableInitializer) -rule VariableDeclarator : VariableDeclaratorId -rule VariableDeclaratorList : VariableDeclarator + ZEROORMORE(',' + VariableDeclarator) - -###rule VariableDeclaratorId : Identifier [Dims] -rule VariableDeclaratorId : Identifier - -#rule UnannPrimitiveType : ONEOF(NumericType, Boolean) -rule UnannPrimitiveType : ONEOF("int", "boolean") - -###rule UnannType : ONEOF(UnannPrimitiveType, UnannReferenceType) -rule UnannType : ONEOF(UnannPrimitiveType) - -###rule LocalVariableDeclaration : ZEROORMORE(VariableModifier) + UnannType + VariableDeclaratorList -rule LocalVariableDeclaration : UnannType + VariableDeclaratorList - -rule LocalVariableDeclarationStatement : LocalVariableDeclaration + ';' diff --git a/src/MapleFE/typescript/identifier.spec b/src/MapleFE/typescript/identifier.spec new file mode 100644 index 0000000000000000000000000000000000000000..5723b480b3cd59e04d86d6d935f9bd949971c926 --- /dev/null +++ b/src/MapleFE/typescript/identifier.spec @@ -0,0 +1,20 @@ +# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +# An identifier is an unlimited-length sequence of Java letters and Java digits, the +# first of which must be a Java letter +# +rule JavaChar : ONEOF(CHAR, '_' , '$') +rule CharOrDigit : ONEOF(JavaChar, DIGIT) +rule Identifier : ONEOF("no-default-lib", + JavaChar + ZEROORMORE(CharOrDigit)) diff --git a/src/MapleFE/typescript/include/lang_builtin.def b/src/MapleFE/typescript/include/lang_builtin.def new file mode 100644 index 0000000000000000000000000000000000000000..791452525da5c9d65a88608d27840c712bfd885e --- /dev/null +++ b/src/MapleFE/typescript/include/lang_builtin.def @@ -0,0 +1,66 @@ +// Copyright (C) [2022] Futurewei Technologies, Inc. All rights reverved. +// +// OpenArkFE is licensed under the Mulan PSL v2. +// You can use this software according to the terms and conditions of the Mulan PSL v2. +// You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +// FIT FOR A PARTICULAR PURPOSE. +// See the Mulan PSL v2 for more details. +// + +// refer to docs/builtin-constructors.md + +// javascript builtin object types +BUILTIN(AggregateError) +BUILTIN(Array) +BUILTIN(ArrayBuffer) +BUILTIN(AsyncFunction) +BUILTIN(BigInt64Array) +BUILTIN(BigUint64Array) +BUILTIN(Boolean) +BUILTIN(DataView) +BUILTIN(Date) +BUILTIN(Error) +BUILTIN(EvalError) +BUILTIN(FinalizationRegistry) +BUILTIN(Float32Array) +BUILTIN(Float64Array) +BUILTIN(Function) +BUILTIN(Generator) +BUILTIN(GeneratorFunction) +BUILTIN(Int16Array) +BUILTIN(Int32Array) +BUILTIN(Int8Array) +BUILTIN(InternalError (Mozilla only)) +BUILTIN(Map) +BUILTIN(Number) +BUILTIN(Object) +BUILTIN(Promise) +BUILTIN(Proxy) +BUILTIN(RangeError) +BUILTIN(ReferenceError) +BUILTIN(RegExp) +BUILTIN(Set) +BUILTIN(SharedArrayBuffer) +BUILTIN(String) +BUILTIN(Symbol) +BUILTIN(SyntaxError) +BUILTIN(TypeError) +BUILTIN(Uint16Array) +BUILTIN(Uint32Array) +BUILTIN(Uint8Array) +BUILTIN(Uint8ClampedArray) +BUILTIN(URIError) +BUILTIN(WeakMap) +BUILTIN(WeakRef) +BUILTIN(WeakSet) + +// typescript builtin object types +BUILTIN(Record) +BUILTIN(Tuple) +BUILTIN(Iterable) +BUILTIN(Iterator) diff --git a/src/MapleFE/typescript/include/lang_keywords.def b/src/MapleFE/typescript/include/lang_keywords.def new file mode 100644 index 0000000000000000000000000000000000000000..096966bd2bc2907809676225773b9e3120929021 --- /dev/null +++ b/src/MapleFE/typescript/include/lang_keywords.def @@ -0,0 +1,122 @@ +// Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +// +// OpenArkFE is licensed under the Mulan PSL v2. +// You can use this software according to the terms and conditions of the Mulan PSL v2. +// You may obtain a copy of Mulan PSL v2 at: +// +// http://license.coscl.org.cn/MulanPSL2 +// +// THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +// FIT FOR A PARTICULAR PURPOSE. +// See the Mulan PSL v2 for more details. +// + +LANGKEYWORD(boolean) +LANGKEYWORD(number) +LANGKEYWORD(string) +LANGKEYWORD(symbol) +LANGKEYWORD(unique) +LANGKEYWORD(any) +LANGKEYWORD(unknown) +LANGKEYWORD(never) +LANGKEYWORD(undefined) +LANGKEYWORD(type) + +LANGKEYWORD(get) +LANGKEYWORD(set) +LANGKEYWORD(as) +LANGKEYWORD(from) +LANGKEYWORD(constructor) +LANGKEYWORD(namespace) +LANGKEYWORD(module) +LANGKEYWORD(declare) + +LANGKEYWORD(break) +LANGKEYWORD(do) +LANGKEYWORD(in) +LANGKEYWORD(is) +LANGKEYWORD(of) +LANGKEYWORD(typeof) +LANGKEYWORD(keyof) +LANGKEYWORD(infer) +LANGKEYWORD(case) +LANGKEYWORD(else) +LANGKEYWORD(instanceof) +LANGKEYWORD(var) +LANGKEYWORD(catch) +LANGKEYWORD(export) +LANGKEYWORD(new) +LANGKEYWORD(void) +LANGKEYWORD(class) +LANGKEYWORD(extends) +LANGKEYWORD(return) +LANGKEYWORD(while) +LANGKEYWORD(const) +LANGKEYWORD(finally) +LANGKEYWORD(super) +LANGKEYWORD(with) +LANGKEYWORD(continue) +LANGKEYWORD(for) +LANGKEYWORD(switch) +LANGKEYWORD(yield) +LANGKEYWORD(debugger) +LANGKEYWORD(function) +// LANGKEYWORD(this) +LANGKEYWORD(default) +LANGKEYWORD(if) +LANGKEYWORD(throw) +LANGKEYWORD(delete) +LANGKEYWORD(import) +LANGKEYWORD(require) +LANGKEYWORD(try) +LANGKEYWORD(asserts) + +LANGKEYWORD(let) +LANGKEYWORD(static) +LANGKEYWORD(implements) +LANGKEYWORD(protected) +LANGKEYWORD(interface) +LANGKEYWORD(private) +LANGKEYWORD(public) +LANGKEYWORD(abstract) + +LANGKEYWORD(readonly) + +LANGKEYWORD(enum) +LANGKEYWORD(await) +LANGKEYWORD(async) + +LANGKEYWORD(global) + +// Utility Types +LANGKEYWORD(Partial) +LANGKEYWORD(Required) +LANGKEYWORD(Readonly) +LANGKEYWORD(Record) +LANGKEYWORD(Pick) +LANGKEYWORD(Omit) +LANGKEYWORD(Exclude) +LANGKEYWORD(Extract) +LANGKEYWORD(NonNullable) +LANGKEYWORD(Parameters) +LANGKEYWORD(ConstructorParameters) +LANGKEYWORD(ReturnType) +LANGKEYWORD(InstanceType) +LANGKEYWORD(ThisParameterType) +LANGKEYWORD(OmitThisParameter) +LANGKEYWORD(ThisType) +LANGKEYWORD(Uppercase) +LANGKEYWORD(Lowercase) +LANGKEYWORD(Capitalize) +LANGKEYWORD(Uncapitalize) + +LANGKEYWORD(Error) +LANGKEYWORD(toString) + +// extra + +LANGKEYWORD(console) +LANGKEYWORD(log) + +LANGKEYWORD(null) diff --git a/src/MapleFE/typescript/include/lang_spec.h b/src/MapleFE/typescript/include/lang_spec.h new file mode 100644 index 0000000000000000000000000000000000000000..96aef8bf12195fd32df8cf8630e6beb6632f8c9a --- /dev/null +++ b/src/MapleFE/typescript/include/lang_spec.h @@ -0,0 +1,68 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +///////////////////////////////////////////////////////////////////////////////// +// Language Specific Implementations // +///////////////////////////////////////////////////////////////////////////////// + +#ifndef __LANG_SPEC_H__ +#define __LANG_SPEC_H__ + +#include "stringutil.h" +#include "token.h" +#include "lexer.h" +#include "parser.h" + +namespace maplefe { + +class StringToValueImpl : public StringToValue { +public: + float StringToFloat(std::string &s); + double StringToDouble(std::string &s); + bool StringToBool(std::string &s); + Char StringToChar(std::string &s); + bool StringIsNull(std::string &s); + const char* StringToString(std::string &); +}; + +extern LitData ProcessLiteral(LitId type, const char *str); + +//////////////////////////////////////////////////////////////////////////////////// +// Typescript Lexer +//////////////////////////////////////////////////////////////////////////////////// + +class TypescriptLexer : public Lexer { +public: + bool CharIsSeparator(const char); + + TempLitData* GetTempLit(); + bool FindNextTLFormat(unsigned start, std::string& s, unsigned& end); + bool FindNextTLPlaceHolder(unsigned start, std::string& s, unsigned& end); + bool FindTripleSlash(); +}; + +//////////////////////////////////////////////////////////////////////////////////// +// Typescript Parser +//////////////////////////////////////////////////////////////////////////////////// + +class TypescriptParser : public Parser { +public: + TypescriptParser(const char *f) : Parser(f) {} + Token* GetRegExpr(Token *t); + bool TokenSplit(Token *); + bool TraverseASI(RuleTable*, AppealNode*, AppealNode *&); +}; + +} +#endif diff --git a/src/MapleFE/typescript/keyword.spec b/src/MapleFE/typescript/keyword.spec new file mode 100644 index 0000000000000000000000000000000000000000..cbc04c0c4085bb06aed62e0ae3c951f539f7f4ec --- /dev/null +++ b/src/MapleFE/typescript/keyword.spec @@ -0,0 +1,104 @@ +# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +################################################################################## +# This file contains the keyword of a language. It doesn't clarify the semantics +# of a keyword, which will be done by RULEs, i.e., the parser will check the keyword +# while traversing the rule tables. +# +# The generated table of keywords are only used for +# (1) Parser to check while traversing rule tables +# (2) check the correctness of names so as not to conflict with keywords +################################################################################## + +STRUCT KeyWord : ((boolean), + (number), + (string), + (symbol), + (unique), + (any), + (unknown), + (never), + (undefined), + (type), + + (get), + (set), + (as), + (from), + (constructor), + (namespace), + (module), + (declare), + + (break), + (do), + (in), + (is), + (of), + (typeof), + (keyof), + (infer), + (case), + (else), + (instanceof), + (var), + (catch), + (export), + (new), + (void), + (class), + (extends), + (return), + (while), + (const), + (finally), + (super), + (with), + (continue), + (for), + (switch), + (yield), + (debugger), + (function), + (this), + (default), + (if), + (throw), + (delete), + (import), + (require), + (try), + (asserts), + +#this part is for strict mode + (let), + (static), + (implements), + (protected), + (interface), + (private), + (public), + (abstract), + + (readonly), + +#this part is for future reserved + (enum), + (await), + (async), + + (global), + +#this is for 'fake rule only' + (this_is_for_fake_rule)) diff --git a/src/MapleFE/typescript/literal.spec b/src/MapleFE/typescript/literal.spec new file mode 100644 index 0000000000000000000000000000000000000000..c2d61bb1213c8138ac01d449458ab29f0cba73b7 --- /dev/null +++ b/src/MapleFE/typescript/literal.spec @@ -0,0 +1,158 @@ +# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +# A literal is the source code representation of a value of a primitive type, the +# String type, or the null type. +# +# NOTE: Make sure there is a 'rule Literal'. This is the official rule recognized +# by autogen. + +######################################################################### +## Integer ## +######################################################################### + +### Decimal rules + +rule NonZeroDigit : ONEOF('1', '2', '3', '4', '5', '6', '7', '8', '9') +rule Underscores : '_' + ZEROORMORE('_') +rule Digit : ONEOF('0', NonZeroDigit) +rule DigitOrUnderscore : ONEOF(Digit, '_') +rule DigitsAndUnderscores : DigitOrUnderscore + ZEROORMORE(DigitOrUnderscore) +rule Digits : ONEOF(Digit, Digit + ZEROORONE(DigitsAndUnderscores) + Digit) + attr.property.%2 : SecondTry + +rule DecimalNumeral : ONEOF('0', NonZeroDigit + ZEROORONE(Digits), + NonZeroDigit + Underscores + Digits) + +### Hexadecimal rules + +rule HexDigit : ONEOF('0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + 'a', 'b', 'c', 'd', 'e', 'f', + 'A', 'B', 'C', 'D', 'E', 'F') +rule HexDigitOrUnderscore : ONEOF(HexDigit, '_') +rule HexDigitsAndUnderscores:HexDigitOrUnderscore + ZEROORMORE(HexDigitOrUnderscore) +rule HexDigits : ONEOF(HexDigit, + HexDigit + ZEROORONE(HexDigitsAndUnderscores) + HexDigit) + attr.property.%2 : SecondTry +rule HexNumeral : ONEOF("0x" + HexDigits, "0X" + HexDigits) + +### Octal rules + +rule OctalDigit : ONEOF('0', '1', '2', '3', '4', '5', '6', '7') +rule OctalDigitOrUnderscore : ONEOF(OctalDigit, '_') +rule OctalDigitsAndUnderscores:OctalDigitOrUnderscore + ZEROORMORE(OctalDigitOrUnderscore) +rule OctalDigits : ONEOF(OctalDigit, + OctalDigit + ZEROORONE(OctalDigitsAndUnderscores) + OctalDigit) + attr.property.%2 : SecondTry +rule OctalNumeral : ONEOF('0' + OctalDigits, '0' + Underscores + OctalDigits) + +### Binary rules + +rule BinDigit : ONEOF('0', '1') +rule BinDigitOrUnderscore : ONEOF(BinDigit, '_') +rule BinDigitsAndUnderscores:BinDigitOrUnderscore + ZEROORMORE(BinDigitOrUnderscore) +rule BinDigits : ONEOF(BinDigit, + BinDigit + ZEROORONE(BinDigitsAndUnderscores) + BinDigit) + attr.property.%2 : SecondTry +rule BinNumeral : ONEOF("0b" + BinDigits, "0B" + BinDigits) + +########## + +rule IntegerTypeSuffix : ONEOF('L', 'l') + +rule DecimalIntegerLiteral: DecimalNumeral + ZEROORONE(IntegerTypeSuffix) +rule HexIntegerLiteral : HexNumeral + ZEROORONE(IntegerTypeSuffix) +rule OctalIntegerLiteral : OctalNumeral + ZEROORONE(IntegerTypeSuffix) +rule BinaryIntegerLiteral : BinNumeral + ZEROORONE(IntegerTypeSuffix) + +rule IntegerLiteral: ONEOF(DecimalIntegerLiteral, + HexIntegerLiteral, + OctalIntegerLiteral, + BinaryIntegerLiteral) + +######################################################################### +## Floating Point ## +######################################################################### + +##### Decimal floating point literal + +rule Sign : ONEOF('+', '-') +rule FloatTypeSuffix : ONEOF('f', 'F', 'd', 'D') +rule ExponentIndicator : ONEOF('e', 'E') +rule SignedInteger : ZEROORONE(Sign) + Digits +rule ExponentPart : ExponentIndicator + SignedInteger + +rule DecFPLiteral : ONEOF(Digits + '.' + ZEROORONE(Digits) + ZEROORONE(ExponentPart) + ZEROORONE(FloatTypeSuffix), + '.'+Digits + ZEROORONE(ExponentPart) + ZEROORONE(FloatTypeSuffix), + Digits + ExponentPart + ZEROORONE(FloatTypeSuffix), + Digits + ZEROORONE(ExponentPart)) + +####### Hex floating point literal + +rule BinaryExponentIndicator : ONEOF('p', 'P') +rule BinaryExponent : BinaryExponentIndicator + SignedInteger +rule HexSignificand : ONEOF(HexNumeral + ZEROORONE('.'), + "0x" + ZEROORONE(HexDigits) + '.' + HexDigits, + "0X" + ZEROORONE(HexDigits) + '.' + HexDigits) +rule HexFPLiteral: HexSignificand + BinaryExponent + ZEROORONE(FloatTypeSuffix) + +###### Floating POint Literal + +rule FPLiteral : ONEOF(DecFPLiteral, HexFPLiteral) + +######################################################################### +## Boolean ## +######################################################################### + +rule BooleanLiteral : ONEOF ("true", "false") + +######################################################################### +## Character ## +## ESCAPE is a reserved rule in reserved.spec. ## +######################################################################### + +# I decided to simplify the unicode escape a little bit. I don't want to +# handle all odd cases. +rule UnicodeEscape: '\' + 'u' + HEXDIGIT + HEXDIGIT + HEXDIGIT + HEXDIGIT +rule RawInputCharacter : ONEOF(ASCII, ESCAPE, UTF8) +rule SingleCharacter: ONEOF(UnicodeEscape, RawInputCharacter) + +rule OctalEscape : ONEOF('\' + '0', '\' + '1') +rule EscapeSequence : ONEOF(ESCAPE, OctalEscape) +rule CharacterLiteral : ''' + ONEOF(SingleCharacter, EscapeSequence) + ''' + +######################################################################### +## String ## +######################################################################### +# The UnicodeEscape is limited from \u0000 to \u00ff. +rule StringUnicodeEscape: '\' + 'u' + '0' + '0' + HEXDIGIT + HEXDIGIT +rule SingleStringCharater: ONEOF(StringUnicodeEscape, RawInputCharacter, IRREGULAR_CHAR, '"') +rule DoubleStringCharater: ONEOF(StringUnicodeEscape, RawInputCharacter, IRREGULAR_CHAR, ''') +rule StringLiteral : ONEOF('"' + ZEROORMORE(DoubleStringCharater) + '"', + ''' + ZEROORMORE(SingleStringCharater) + ''') + +######################################################################### +## Null ## +######################################################################### + +rule NullLiteral : "null" + +######################################################################### +## Literal ## +######################################################################### + +rule Literal : ONEOF(IntegerLiteral, + FPLiteral, + BooleanLiteral, + StringLiteral, + NullLiteral) diff --git a/src/MapleFE/typescript/operator.spec b/src/MapleFE/typescript/operator.spec new file mode 100644 index 0000000000000000000000000000000000000000..d09db223d3cdc7069e1300fde8861a08f6ff9b8d --- /dev/null +++ b/src/MapleFE/typescript/operator.spec @@ -0,0 +1,79 @@ +# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +########################################################################## +# The syntax of operator.spec contains two parts. +# 1. The Keyword STRUCT defining the of all operations. +# keyword : the text defined in the language +# opr : the standard Autogen defined operator. +# See autogen/include/supported_operators.def +# 2. The rule part, defining the language restrictions of each operator. +########################################################################## + +# NOTE +# Some languages could have one synatx belonging to both separator and operators. +# eg., ':' in Java 8, it's both a separator colon and operator select. +# We need avoid such duplication in .spec files. + +STRUCT Operator : ( + # Arithmetic + ("+", Add), + ("-", Sub), + ("*", Mul), + ("/", Div), + ("%", Mod), + ("++", Inc), + ("--", Dec), + ("**", Exp), + ("??", NullCoalesce), + ("??=", NullAssign), + # Relation + ("==", EQ), + ("!=", NE), + (">", GT), + ("<", LT), + (">=", GE), + ("<=", LE), + ("===", StEq), + ("!==", StNe), + # Bitwise + ("&", Band), + ("|", Bor), + ("^", Bxor), + ("~", Bcomp), + # Shift + ("<<", Shl), + (">>", Shr), + (">>>", Zext), + # Logical + ("&&", Land), + ("||", Lor), + ("!", Not), + # Assign + ("=", Assign), + ("+=", AddAssign), + ("-=", SubAssign), + ("*=", MulAssign), + ("/=", DivAssign), + ("%=", ModAssign), + ("<<=", ShlAssign), + (">>=", ShrAssign), + ("&=", BandAssign), + ("|=", BorAssign), + ("^=", BxorAssign), + (">>>=", ZextAssign), + + ("///", TripleSlash), + + # arrow function + ("=>", ArrowFunction)) diff --git a/src/MapleFE/typescript/separator.spec b/src/MapleFE/typescript/separator.spec new file mode 100644 index 0000000000000000000000000000000000000000..b06a949337eb48de6e69c481724f8b14af47f545 --- /dev/null +++ b/src/MapleFE/typescript/separator.spec @@ -0,0 +1,45 @@ +# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +# NOTE - 1 +# +# This file defines the separator. The separators are defined using a STRUCT. +# Each separator in this STRUCT is a set of 2 elements. +# STRUCT Separator : ( ("(", LeftParenthesis), +# (")", RightParenthesis), +# The first element is the literal name of separator, it needs to be a string +# The second is the ID of the separator. Please check shared/include/supported_separators.def +# to see the supported separator ID. + +# NOTE - 2 +# Some languages could have one synatx belonging to both separator and operators. +# eg., ':' in Java 8, it's both a separator colon and operator select. +# We need avoid such duplication in .spec files. + +STRUCT Separator : ((" ", Whitespace), + ("(", Lparen), + (")", Rparen), + ("{", Lbrace), + ("}", Rbrace), + ("[", Lbrack), + ("]", Rbrack), + (";", Semicolon), + (",", Comma), + (".", Dot), + ("...", Dotdotdot), + (":", Colon), + ("?", Select), + ("?.",Optional), + ("@", At), + ("#", Pound), + ("\t", Tab)) diff --git a/src/MapleFE/typescript/src/Makefile b/src/MapleFE/typescript/src/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..392dc308bc7852a4e8b14578efad9d2f52306413 --- /dev/null +++ b/src/MapleFE/typescript/src/Makefile @@ -0,0 +1,80 @@ +# Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +include ../../Makefile.in +BUILDBIN=$(BUILDDIR)/bin +BUILD=$(BUILDDIR)/typescript +BUILDGEN=$(BUILDDIR)/gen +BUILDASTGEN=$(BUILDDIR)/ast_gen/shared +$(shell $(MKDIR_P) $(BUILD) $(BUILDGEN)) + +SRC=$(wildcard *.cpp) +OBJ :=$(patsubst %.cpp,%.o,$(SRC)) +DEP :=$(patsubst %.cpp,%.d,$(SRC)) + +SRCG := $(wildcard $(BUILDGEN)/gen*.cpp) +OBJG := $(patsubst %.cpp, %.o, $(SRCG)) +DEPG := $(patsubst %.cpp, %.d, $(SRCG)) + +OBJS :=$(foreach obj,$(OBJ), $(BUILD)/$(obj)) $(OBJG) +DEPS :=$(foreach dep,$(DEP), $(BUILD)/$(dep)) $(DEPG) + +INCLUDES := -I $(MAPLEFE_ROOT)/shared/include \ + -I $(MAPLEFE_ROOT)/typescript/include \ + -I $(MAPLEFE_ROOT)/autogen/include \ + -I ${BUILDDIR}/ast_gen/shared \ + -I $(BUILDGEN) + +INCLUDEGEN := -I $(BUILDGEN) -I $(MAPLEFE_ROOT)/shared/include + +TARGET=ts2ast + +SHAREDLIB = $(BUILDDIR)/shared/shared.a $(BUILDASTGEN)/genast.a + +.PHONY: all +all: $(BUILDBIN)/$(TARGET) + +-include $(DEPS) +.PHONY: clean + +vpath %.o $(BUILD) +vpath %.d $(BUILD) + +# Pattern Rules +$(BUILD)/%.o : %.cpp + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDES) -w -c $< -o $@ + +$(BUILD)/%.d : %.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDES) $< > $@ + @mv -f $(BUILD)/$*.d $(BUILD)/$*.d.tmp + @sed -e 's|.*:|$(BUILD)/$*.o:|' < $(BUILD)/$*.d.tmp > $(BUILD)/$*.d + @rm -f $(BUILD)/$*.d.tmp + +$(BUILDGEN)/%.o : $(BUILDGEN)/%.cpp $(BUILDGEN)/%.d + $(CXX) $(CXXFLAGS) -fpermissive $(INCLUDEGEN) -w -c $< -o $@ + +$(BUILDGEN)/%.d : $(BUILDGEN)/%.cpp + @$(CXX) $(CXXFLAGS) -std=c++11 -MM $(INCLUDEGEN) $< > $@ + @mv -f $(BUILDGEN)/$*.d $(BUILDGEN)/$*.d.tmp + @sed -e 's|.*:|$(BUILDGEN)/$*.o:|' < $(BUILDGEN)/$*.d.tmp > $(BUILDGEN)/$*.d + @rm -f $(BUILDGEN)/$*.d.tmp + +# TARGET depends on OBJS and shared OBJS from shared directory +# as well as mapleall libraries +$(BUILDBIN)/$(TARGET): $(OBJS) $(SHAREDLIB) + @mkdir -p $(BUILDBIN) + $(LD) -o $(BUILDBIN)/$(TARGET) $(OBJS) $(SHAREDLIB) + +clean: + rm -rf $(BUILD) diff --git a/src/MapleFE/typescript/src/lang_spec.cpp b/src/MapleFE/typescript/src/lang_spec.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d51d1f312d0184badb28eac07e422c6a24a65632 --- /dev/null +++ b/src/MapleFE/typescript/src/lang_spec.cpp @@ -0,0 +1,661 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#include "lang_spec.h" +#include "stringpool.h" + +namespace maplefe { + +// For all the string to value functions below, we assume the syntax of 's' is correct +// for a literal in Java. + +float StringToValueImpl::StringToFloat(std::string &s) { + return stof(s); +} + +// Java use 'd' or 'D' as double suffix. C++ use 'l' or 'L'. +double StringToValueImpl::StringToDouble(std::string &s) { + std::string str = s; + char suffix = str[str.length() - 1]; + if (suffix == 'd' || suffix == 'D') + str[str.length() - 1] = 'L'; + return stod(str); +} + +bool StringToValueImpl::StringToBool(std::string &s) { + if ((s.size() == 4) && (s.compare("true") == 0)) + return true; + else if ((s.size() == 5) && (s.compare("false") == 0)) + return false; + else + MERROR("unknown bool literal"); +} + +bool StringToValueImpl::StringIsNull(std::string &s) {return false;} + +const char* StringToValueImpl::StringToString(std::string &in_str) { + std::string target; + + // For most languages, the input 'in_str' still contains the leading " or ' and the + // ending " or '. They need to be removed. + std::string str; + + // If empty string literal, return the empty 'target'. + if (in_str.size() == 2) { + const char *s = gStringPool.FindString(target); + return s; + } else { + str.assign(in_str, 1, in_str.size() - 2); + } + + // For typescript, if a string literal is: + // s : string = "abc \ + // efg"; + // The \ is actually connnecting the next line into the string literal. + // We need handle the connection. + + std::string s_ret; + for (unsigned i = 0; i < str.length(); i++) { + char c = str[i]; + if (c == '\\') { + if ((i < str.length() - 1) && (str[i+1] == '\n')) { + // skip \ and \n + i += 1; + continue; + } + } + s_ret.push_back(c); + } + + const char *s = gStringPool.FindString(s_ret); + return s; +} + +static char DeEscape(char c) { + switch(c) { + case 'b': + return '\b'; + case 't': + return '\t'; + case 'n': + return '\n'; + case 'f': + return '\f'; + case 'r': + return '\r'; + case '"': + return '\"'; + case '\'': + return '\''; + case '\\': + return '\\'; + case '0': + return '\0'; + default: + MERROR("Unsupported in DeEscape()."); + } +} + +static int char2int(char c) { + if (c >= '0' && c <= '9') + return c - '0'; + else if (c >= 'a' && c <= 'f') + return c - 'a' + 10; + else if (c >= 'A' && c <= 'F') + return c - 'A' + 10; + else + MERROR("Unsupported char in char2int()."); +} + +Char StringToValueImpl::StringToChar(std::string &s) { + Char ret_char; + ret_char.mIsUnicode = false; + MASSERT (s[0] == '\''); + if (s[1] == '\\') { + if (s[2] == 'u') { + ret_char.mIsUnicode = true; + int first = char2int(s[3]); + int second = char2int(s[4]); + int third = char2int(s[5]); + int forth = char2int(s[6]); + MASSERT(s[7] == '\''); + ret_char.mData.mUniValue = (first << 12) + (second << 8) + + (third << 4) + forth; + } else { + ret_char.mData.mChar = DeEscape(s[2]); + } + } else { + MASSERT(s[2] == '\''); + ret_char.mData.mChar = s[1]; + } + return ret_char; +} + +// Each language has its own format of literal. So this function handles Typescript literals. +// It translate a string into a literal. +// +// 'str' is in the Lexer's string pool. +// +LitData ProcessLiteral(LitId id, const char *str) { + LitData data; + std::string value_text(str); + StringToValueImpl s2v; + + switch (id) { + case LT_IntegerLiteral: { + long l = s2v.StringToLong(value_text); + data.mType = LT_IntegerLiteral; + data.mData.mInt = l; + break; + } + case LT_FPLiteral: { + // Java spec doesn't define rules for double. Both float and double + // are covered by Float Point. But we need differentiate here. + // Check if it's a float of double. Non-suffix means double. + char suffix = value_text[value_text.length() - 1]; + if (suffix == 'f' || suffix == 'F') { + float f = s2v.StringToFloat(value_text); + data.mType = LT_FPLiteral; + data.mData.mFloat = f; + } else { + double d = s2v.StringToDouble(value_text); + data.mType = LT_DoubleLiteral; + data.mData.mDouble = d; + } + break; + } + case LT_BooleanLiteral: { + bool b = s2v.StringToBool(value_text); + data.mType = LT_BooleanLiteral; + data.mData.mBool = b; + break; } + case LT_CharacterLiteral: { + Char c = s2v.StringToChar(value_text); + data.mType = LT_CharacterLiteral; + data.mData.mChar = c; + break; } + case LT_StringLiteral: { + const char *s = s2v.StringToString(value_text); + data.mType = LT_StringLiteral; + data.mData.mStrIdx = gStringPool.GetStrIdx(s); + break; } + case LT_NullLiteral: { + // Just need set the id + data.mType = LT_NullLiteral; + break; } + case LT_NA: // N/A, + default: + data.mType = LT_NA; + break; + } + + return data; +} + +///////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////// +// Implementation of typescript Lexer +///////////////////////////////////////////////////////////////////////////////////// +///////////////////////////////////////////////////////////////////////////////////// + +bool TypescriptLexer::CharIsSeparator(const char c) { + if (c == '`') + return true; + return false; +} + +// NOTE: right now we rely on 'tsc' to assure the input is legal, +// so I'll make many things easier and will skip many lexical +// checks. Just make it easy for now. +// Also, I assume we don't handle multiple line template literal +// for the time being. +TempLitData* TypescriptLexer::GetTempLit() { + TempLitData *tld = NULL; + unsigned old_cur_idx = curidx; + + if (line[curidx] == '`') { + // It's certain that this is a template literal because tsc assures it. + tld = new TempLitData; + + unsigned start_idx; + unsigned end_idx; + start_idx = curidx + 1; + while(1) { + // Try string + end_idx = 0; + std::string fmt_str = ""; + bool s_found = FindNextTLFormat(start_idx, fmt_str, end_idx); + const char *addr = NULL; + if (s_found) { + MASSERT(fmt_str.size() > 0 && "found token has 0 data?"); + addr = gStringPool.FindString(fmt_str); + start_idx = end_idx + 1; + } + + // Try pattern + end_idx = 0; + const char *addr_ph = NULL; + std::string pl_str = ""; + bool p_found = FindNextTLPlaceHolder(start_idx, pl_str, end_idx); + if (p_found) { + unsigned len = pl_str.size(); + MASSERT(len > 0 && "found token has 0 data?"); + addr_ph = gStringPool.FindString(pl_str); + // We need skip the ending '}' of a pattern. + start_idx = end_idx + 2; + } + + // If both string and pattern failed to be found + if (!s_found && !p_found) { + break; + } else { + tld->mStrings.PushBack(addr); + tld->mStrings.PushBack(addr_ph); + } + } + + // It's for sure that this is the ending '`'. + MASSERT(line[start_idx] == '`'); + curidx = start_idx + 1; + } + + return tld; +} + +// Find the pure string of a template literal. +// Set end_idx as the last char of string. +bool TypescriptLexer::FindNextTLFormat(unsigned start_idx, std::string &str, unsigned& end_idx) { + unsigned working_idx = start_idx; + while(1) { + if ((line[working_idx] == '$' && line[working_idx+1] == '{') + || line[working_idx] == '`' ){ + end_idx = working_idx - 1; + break; + } + + // Template Literal allows \n in format and place holder. + // the \n was removed by Lexer in the beginning of ReadALine(); + if (working_idx == current_line_size) { + str += '\n'; + ReadALine(); + if (endoffile) + return false; + working_idx = 0; + continue; + } + + str += line[working_idx]; + working_idx++; + } + + if (str.size() > 0) + return true; + else + return false; +} + +// Find the pattern string of a template literal. +// Set end_idx as the last char of string. +// +// [NOTE] For nested template literals, we will matching the outmost +// template literal and treat the inner template literal as +// a plain string. The innter template literal will later +// be handled by the parsing of the outmost template literal. +// This makes things easier. +// +// [NOTE] We only support two level of nesting temp lit !! + +bool TypescriptLexer::FindNextTLPlaceHolder(unsigned start_idx, std::string& str, unsigned& end_idx) { + unsigned working_idx = start_idx; + if (line[working_idx] != '$' || line[working_idx+1] != '{') + return false; + + working_idx = start_idx + 2; + + // There could be {..} inside placeholder. + unsigned num_left_brace = 0; + + // There could be string literal inside placeholder, + bool in_string_literal = false; + + // There could be nested template literal inside a place holder. + bool in_nested_temp_lit = false; + bool waiting_right_brace = false; + unsigned num_left_brace_inner = 0; // there could be {..} for inner temp lit. + + while(1) { + + if (line[working_idx] == '`') { + if (in_nested_temp_lit) { + // finish inner temp lit. Need clear the status. + in_nested_temp_lit = false; + num_left_brace_inner = 0; + } else { + in_nested_temp_lit = true; + } + } else if (line[working_idx] == '$' && line[working_idx+1] == '{') { + MASSERT(in_nested_temp_lit); + str += line[working_idx]; + str += line[working_idx + 1]; + working_idx += 2; + waiting_right_brace = true; + continue; + } else if (line[working_idx] == '\'' || line[working_idx] == '\"') { + in_string_literal = in_string_literal ? false : true; + } else if (line[working_idx] == '{') { + if (!in_string_literal) { + if (in_nested_temp_lit) + num_left_brace_inner++; + else + num_left_brace++; + } + } else if (line[working_idx] == '}') { + if (!in_string_literal) { + if (waiting_right_brace) + waiting_right_brace = false; + else if (!in_nested_temp_lit && num_left_brace > 0) + num_left_brace--; + else if (in_nested_temp_lit && num_left_brace_inner > 0) + num_left_brace_inner--; + else + break; + } + } + + // Template Literal allows \n in format and place holder. + // the \n was removed by Lexer in the beginning of ReadALine(); + // + // I don't need worry about if the template literal is ended or not, + // since tsc guarantees it's correct. + if (working_idx == current_line_size) { + str += '\n'; + ReadALine(); + if (endoffile) + return false; + working_idx = 0; + } else { + str += line[working_idx]; + working_idx++; + } + } + + end_idx = working_idx - 1; + return true; +} + +// This is to catch TS triple-slash directives : /// IsOperator()) + return t; + + if (t->GetOprId() != OPR_Div) + return t; + + unsigned size = mActiveTokens.GetNum(); + if (size < 2) + return t; + + // We take care of only the following scenarios. + // If more need support, we will add later. + // (/abc*/g, ) + // [/abc*/g, ] + // =/abc*/g; + // && /abc*/g; + // ,/abc*/g; + // : /abc*/g; + // || /abc*/g; + + Token *sep = mActiveTokens.ValueAtIndex(size - 1); + bool is_sep = false; + if (sep->IsSeparator() && (sep->GetSepId() == SEP_Lparen)) + is_sep = true; + if (sep->IsSeparator() && (sep->GetSepId() == SEP_Lbrack)) + is_sep = true; + if (sep->IsSeparator() && (sep->GetSepId() == SEP_Comma)) + is_sep = true; + if (sep->IsSeparator() && (sep->GetSepId() == SEP_Colon)) + is_sep = true; + if (sep->IsOperator() && (sep->GetOprId() == OPR_Assign)) + is_sep = true; + if (sep->IsOperator() && (sep->GetOprId() == OPR_Land)) + is_sep = true; + if (sep->IsOperator() && (sep->GetOprId() == OPR_Lor)) + is_sep = true; + if (!is_sep) + return t; + + Token *regexpr = mLexer->FindRegExprToken(); + if (regexpr) + t = regexpr; + + return t; +} + +// return true if t should be split into multiple tokens. +// [NOTE] t is not push into mActiveTokens yet. +// +// We will handle these cases specifically. +// +// We take care of only one scenarios right now.. +// typename= initval +// Look at the '>='. It first recognazied by lexer as GE, +// but it's actually a > and a =. +// +// Another case is +// typename= s; + +bool TypescriptParser::TokenSplit(Token *t) { + if (!t->IsOperator() || t->GetOprId() != OPR_GE) + return false; + unsigned size = mActiveTokens.GetNum(); + if (size < 2) + return false; + + Token *type_arg = mActiveTokens.ValueAtIndex(size - 1); + if (!type_arg->IsIdentifier()) + return false; + + Token *extends_token = FindKeywordToken("extends"); + + Token *lt = mActiveTokens.ValueAtIndex(size - 2); + + if (lt->Equal(extends_token)) { + // This is a good candidate. Do nothing + } else { + if (!lt->IsOperator() || lt->GetOprId() != OPR_LT) + return false; + + Token *type_name = mActiveTokens.ValueAtIndex(size - 3); + if (!type_name->IsIdentifier()) + return false; + } + + // Now we got a matching case. + Token *gt_token = FindOperatorToken(OPR_GT); + Token *assign_token = FindOperatorToken(OPR_Assign); + mActiveTokens.PushBack(gt_token); + mActiveTokens.PushBack(assign_token); + + if (mLexer->mTrace) { + std::cout << "Split >= to > and =" << std::endl; + } + + return true; +} + + +// 'appeal' is the node of 'rule_table'. +// 'child' was NULL when passed in. +bool TypescriptParser::TraverseASI(RuleTable *rule_table, + AppealNode *appeal, + AppealNode *&child) { + // Usually mCurToken is a new token to be matched. So if it's end of file, we simply return false. + // However, (1) if mCurToken is actually an ATMToken, which means it needs to be matched + // multiple times, we are NOT at the end yet. + // (2) If we are traverse a Concatenate rule, and the previous sub-rule has multiple matches, + // and we are trying the current sub-rule, ie. 'data', using one of the matches. + // The lexer actually reaches the EndOfFile in previous matchings, but the mCurToken + // we are working on right now is not the last token. It's one of the previous matches. + // So we need check if we are matching the last token. + //if (mEndOfFile && mCurToken >= mActiveTokens.GetNum()) { + // if (!(mInAltTokensMatching && (mCurToken == mATMToken))) + // return false; + //} + + if (mCurToken <= 1) + return false; + + if (mEndOfFile && mCurToken == mActiveTokens.GetNum()) + return true; + + unsigned old_pos = mCurToken; + bool found = false; + Token *curr_token = GetActiveToken(mCurToken); + Token *prev_token = GetActiveToken(mCurToken - 1); + + MASSERT((rule_table->mNum == 1) && "ASI node has more than one elements?"); + + TableData *data = rule_table->mData; + MASSERT(data->mType == DT_Token && "ASI data is not a token?"); + + Token *semicolon = &gSystemTokens[data->mData.mTokenId]; + MASSERT(semicolon->IsSeparator()); + MASSERT(semicolon->GetSepId() == SEP_Semicolon); + + if (curr_token->Equal(semicolon)) { + // To simplify the code, I reused TraverseToken(). + found = TraverseToken(semicolon, appeal, child); + } else { + // case 1. We are crossing lines. + if (curr_token->mLineBegin && prev_token->mLineEnd) { + // If prev token (line end) is a separator + if (prev_token->IsSeparator() && + (prev_token->GetSepId() == SEP_Rbrace || + prev_token->GetSepId() == SEP_Rbrack || + prev_token->GetSepId() == SEP_Rparen)) { + if (mTraceTable) { + std::cout << "TraverseASI, Auto-insert one semicolon." << std::endl; + } + found = true; + } + + // if prev token is identifier or keyword AND the curr token is also id or keyword + // we can try to insert semicolon. A simple case is: + // a <-- we can insert semicolon + // console.log(); + if ( (prev_token->IsIdentifier() || prev_token->IsKeyword()) && + (curr_token->IsIdentifier() || curr_token->IsKeyword()) ){ + if (mTraceTable) { + std::cout << "TraverseASI, Auto-insert one semicolon." << std::endl; + } + found = true; + } + + if (found) + return found; + } + + // case 2. like: + // {foo()} <-- , is missed before } + if (curr_token->IsSeparator() && (curr_token->GetSepId() == SEP_Rbrace)) + return true; + + // case 3. This is a special case we want to catch: + // foo(x) a=b; <-- , is missing before a=b + // { foo(x) a=b; } <-- , is missing before a=b + // There could be many more similar cases, but we just match this special one in our + // unit test. We don't encourage people write weird code. + if ( (curr_token->IsIdentifier() || curr_token->IsKeyword()) && + (prev_token->IsSeparator() && (prev_token->GetSepId() == SEP_Rparen)) && + mActiveTokens.GetNum() > 4 ){ + Token *prev_2_token = GetActiveToken(mCurToken - 2); + Token *prev_3_token = GetActiveToken(mCurToken - 3); + Token *prev_4_token = GetActiveToken(mCurToken - 4); + + bool lbrace_ok = false; + if (mActiveTokens.GetNum() > 5){ + Token *prev_5_token = GetActiveToken(mCurToken - 5); + if (prev_5_token->IsSeparator() && (prev_5_token->GetSepId() == SEP_Lbrace)) + lbrace_ok = true; + } + + if ( (prev_4_token->mLineBegin || lbrace_ok) && + (prev_4_token->IsIdentifier() || prev_4_token->IsKeyword()) && + (prev_2_token->IsIdentifier() || prev_2_token->IsKeyword()) && + (prev_3_token->IsSeparator() && (prev_3_token->GetSepId() == SEP_Lparen)) ){ + // NOTE: Need make sure foo(x) is not if(x) or while(x). + found = true; + if (prev_4_token->IsKeyword() && + (!strncmp(prev_4_token->GetName(), "if", 2) || + !strncmp(prev_4_token->GetName(), "while", 5)) ) + found = false; + + if (found) + return true; + } + } + } + + if (child) { + child->SetChildIndex(0); + appeal->CopyMatch(child); + } + + return found; +} + +} diff --git a/src/MapleFE/typescript/src/main.cpp b/src/MapleFE/typescript/src/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a999a29d72b96978293c0b6f6127e023771e605b --- /dev/null +++ b/src/MapleFE/typescript/src/main.cpp @@ -0,0 +1,125 @@ +/* +* Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +* +* OpenArkFE is licensed under the Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include // std::ofstream +#include "parser.h" +#include "token.h" +#include "common_header_autogen.h" +#include "ruletable_util.h" +#include "gen_summary.h" +#include "gen_aststore.h" +#include "gen_astdump.h" +#include "gen_astgraph.h" +#include "lang_spec.h" + +static void help() { + std::cout << "ts2ast sourcefile [options]:\n" << std::endl; + std::cout << " --help : print this help" << std::endl; + std::cout << " --trace-lexer : Trace lexing" << std::endl; + std::cout << " --trace-table : Trace rule table when entering and exiting" << std::endl; + std::cout << " --trace-left-rec : Trace left recursion parsing" << std::endl; + std::cout << " --trace-appeal : Trace appeal process" << std::endl; + std::cout << " --trace-failed : Trace failed tokens of table" << std::endl; + std::cout << " --trace-timing : Trace parsing time" << std::endl; + std::cout << " --trace-stack : Trace visited-token stack of table" << std::endl; + std::cout << " --trace-sortout : Trace SortOut" << std::endl; + std::cout << " --trace-ast-build : Trace AST Builder" << std::endl; + std::cout << " --trace-patch-was-succ : Trace Patching of WasSucc nodes" << std::endl; + std::cout << " --trace-warning : Print Warning" << std::endl; + std::cout << " --dump-ast : Dump AST in text format" << std::endl; + std::cout << " --dump-dot : Dump AST in dot format" << std::endl; +} + +int main (int argc, char *argv[]) { + if (argc == 1 || (!strncmp(argv[1], "--help", 6) && (strlen(argv[1]) == 6))) { + help(); + exit(-1); + } + + maplefe::Parser *parser = new maplefe::TypescriptParser(argv[1]); + + bool dump_ast = false; + bool dump_dot = false; + bool succ; + + // Parse the argument + for (unsigned i = 2; i < argc; i++) { + if (!strncmp(argv[i], "--trace-lexer", 13) && (strlen(argv[i]) == 13)) { + parser->SetLexerTrace(); + } else if (!strncmp(argv[i], "--trace-table", 13) && (strlen(argv[i]) == 13)) { + parser->mTraceTable = true; + } else if (!strncmp(argv[i], "--trace-left-rec", 16) && (strlen(argv[i]) == 16)) { + parser->mTraceLeftRec = true; + } else if (!strncmp(argv[i], "--trace-appeal", 14) && (strlen(argv[i]) == 14)) { + parser->mTraceAppeal = true; + } else if (!strncmp(argv[i], "--trace-stack", 13) && (strlen(argv[i]) == 13)) { + parser->mTraceVisited = true; + } else if (!strncmp(argv[i], "--trace-failed", 14) && (strlen(argv[i]) == 14)) { + parser->mTraceFailed = true; + } else if (!strncmp(argv[i], "--trace-timing", 14) && (strlen(argv[i]) == 14)) { + parser->mTraceTiming = true; + } else if (!strncmp(argv[i], "--trace-sortout", 15) && (strlen(argv[i]) == 15)) { + parser->mTraceSortOut = true; + } else if (!strncmp(argv[i], "--trace-ast-build", 17) && (strlen(argv[i]) == 17)) { + parser->mTraceAstBuild = true; + } else if (!strncmp(argv[i], "--trace-patch-was-succ", 22) && (strlen(argv[i]) == 22)) { + parser->mTracePatchWasSucc = true; + } else if (!strncmp(argv[i], "--trace-warning", 15) && (strlen(argv[i]) == 15)) { + parser->mTraceWarning = true; + } else if (!strncmp(argv[i], "--dump-ast", 10) && (strlen(argv[i]) == 10)) { + dump_ast = true; + } else if (!strncmp(argv[i], "--dump-dot", 10) && (strlen(argv[i]) == 10)) { + dump_dot = true; + } else { + std::cerr << "unknown option " << argv[i] << std::endl; + exit(-1); + } + } + + parser->InitRecursion(); + succ = parser->Parse(); + if (!succ) { + delete parser; + return 1; + } + + // the module from parser + maplefe::ModuleNode *module = parser->GetModule(); + + if(dump_ast) { + maplefe::AstDump astdump(module); + astdump.Dump("ts2ast: Initial AST", &std::cout); + } + + if(dump_dot) { + maplefe::AstGraph graph(module); + graph.DumpGraph("ts2ast: Initial AST", &std::cout); + } + + maplefe::AstStore saveAst(module); + saveAst.StoreInAstBuf(); + maplefe::AstBuffer &ast_buf = saveAst.GetAstBuf(); + + std::ofstream ofs; + std::string fname(module->GetFilename()); + fname += ".ast"; + ofs.open(fname, std::ofstream::out); + const char *addr = (const char *)(&(ast_buf[0])); + ofs.write(addr, ast_buf.size()); + ofs.close(); + + delete parser; + return 0; +} diff --git a/src/MapleFE/typescript/stmt.spec b/src/MapleFE/typescript/stmt.spec new file mode 100644 index 0000000000000000000000000000000000000000..4bce8140eb6f1eb77e346f706f1f7210234aba67 --- /dev/null +++ b/src/MapleFE/typescript/stmt.spec @@ -0,0 +1,2491 @@ +# Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. +# Copyright 2022 Tencent. All rights reverved. +# +# MapleFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +#------------------------------------------------------------------------------- +# A.1 Lexical Grammar +#------------------------------------------------------------------------------- + +#### Template and TemplateLiteral are too complicated to be described +#### in rules. We handle them specifically in the source code of lexer. + +##Template :: +##NoSubstitutionTemplate +##TemplateHead + +##NoSubstitutionTemplate :: +##` TemplateCharactersopt ` + +##TemplateHead :: +##` TemplateCharactersopt ${ + +##See 11.8.6 +##TemplateSubstitutionTail :: +##TemplateMiddle +##TemplateTail +##See 11.8.6 +##TemplateMiddle :: +##} TemplateCharactersopt ${ +##See 11.8.6 +##TemplateTail :: +##} TemplateCharactersopt ` + +##TemplateCharacters :: +##TemplateCharacter TemplateCharactersopt + +##TemplateCharacter :: +##$ [lookahead ≠ { ] +##\ EscapeSequence +##LineContinuation +##LineTerminatorSequence +##SourceCharacter but not one of ` or \ or $ or LineTerminator + +#------------------------------------------------------------------------------- +# A.2 Expressions +#------------------------------------------------------------------------------- + +rule KeywordIdentifier : ONEOF("type", + "with", + "boolean", + "string", + "catch", + "get", + "set", + "undefined", + "never", + "number", + "symbol", + "unique", + "any", + "constructor", + "delete", + "abstract", + "private", + "static", + "as", + "async", + "await", + "finally", + "from", + "is", + "in", + "of", + "declare", + "readonly", + "debugger", + "default", + "namespace", + "module", + "switch", + "infer", + "asserts", + "require", + "global", + "throw", + "class", + "unknown", + "do", + "for") +## " + attr.action : BuildIdentifier() + +rule JSIdentifier: ONEOF(Identifier, + KeywordIdentifier, + Identifier + '!', + Identifier + '?', + KeywordIdentifier + '?') + attr.action.%3 : SetIsNonNull(%1) + attr.action.%4 : SetIsOptional(%1) + attr.action.%5 : SetIsOptional(%1) + +rule AsType : "as" + Type + attr.action : BuildAsType(%2) + +##----------------------------------- +##rule IdentifierReference[Yield] : +## Identifier +## [~Yield] yield + +rule IdentifierReference : ONEOF( + JSIdentifier) +# "yield") + +##----------------------------------- +##rule BindingIdentifier[Yield] : +## Identifier +## [~Yield] yield + +rule BindingIdentifier : ONEOF(JSIdentifier) + +##----------------------------------- +##rule LabelIdentifier[Yield] : +## Identifier +## [~Yield] yield +rule LabelIdentifier : ONEOF( + JSIdentifier) + +##----------------------------------- +##rule Identifier : +## IdentifierName but not ReservedWord +## +## Identifier and IdentifierName are tricky in Javascript. +## (1) Some 'keywords' like 'get', 'set', should be keyword and reserved, +## however, they are allowed as identifiers. +## (2) Identifier is a reserved rule in 'autogen', we won't define it +## in this spec. +## +## I decided to use JSIdentifier instead of Identifier and then I +## can include 'get', 'set' in the JSIdentifier. + +##----------------------------------- +##rule PrimaryExpression[Yield] : +## this +## IdentifierReference[?Yield] +## Literal +## ArrayLiteral[?Yield] +## ObjectLiteral[?Yield] +## FunctionExpression +## ClassExpression[?Yield] +## GeneratorExpression +## RegularExpressionLiteral +## TemplateLiteral[?Yield] +## CoverParenthesizedExpressionAndArrowParameterList[?Yield] + +rule PrimaryExpression : ONEOF("this", + "super", + IdentifierReference, + Literal, + ArrayLiteral, + ObjectLiteral, + FunctionExpression, +# ClassExpression[?Yield] +# GeneratorExpression + RegularExpression, + TemplateLiteral, + ParenthesizedExpression) + +##----------------------------------- +##rule CoverParenthesizedExpressionAndArrowParameterList[Yield] : +## ( Expression[In, ?Yield] ) +## ( ) +## ( ... BindingIdentifier[?Yield] ) +## ( Expression[In, ?Yield] , ... BindingIdentifier[?Yield] ) +## When processing the production +## PrimaryExpression[Yield] : CoverParenthesizedExpressionAndArrowParameterList[?Yield] +## the interpretation of CoverParenthesizedExpressionAndArrowParameterList is refined using the following grammar: +## ParenthesizedExpression[Yield] : +## ( Expression[In, ?Yield] ) +rule ParenthesizedExpression : '(' + Expression + ')' + +rule CoverParenthesizedExpressionAndArrowParameterList : ONEOF( + '(' + Expression + ')', + '(' + ')', + '(' + "..." + BindingIdentifier + ')', + '(' + "..." + BindingPattern + ')', + '(' + Expression + ',' + "..." + BindingIdentifier + ')', + '(' + Expression + ',' + "..." + BindingPattern + ')') + +##----------------------------------- + +##----------------------------------- +##rule Literal : +## NullLiteral +## BooleanLiteral +## NumericLiteral +## StringLiteral + +# Literal is handled in lexer as a token. Also we don't do any Lookahead detect or recursion detect +# inside Literal. So it means parsing stops at Literal. This works for most languages. +# +# NullLiteral, BooleanLiteral, NumericLiteral, StringLiteral can be handled specifically in parser +# to see if it's a string literal. + +##----------------------------------- +##rule ArrayLiteral[Yield] : +## [ Elisionopt ] +## [ ElementList[?Yield] ] +## [ ElementList[?Yield] , Elisionopt ] +rule ArrayLiteral : ONEOF( + '[' + ZEROORONE(Elision) + ']' + '[' + ElementList + ']' + '[' + ElementList + ',' + ZEROORONE(Elision) + ']') + attr.action.%1,%2,%3 : BuildArrayLiteral(%2) + +##----------------------------------- +##rule ElementList[Yield] : +## Elisionopt AssignmentExpression[In, ?Yield] +## Elisionopt SpreadElement[?Yield] +## ElementList[?Yield] , Elisionopt AssignmentExpression[In, ?Yield] +## ElementList[?Yield] , Elisionopt SpreadElement[?Yield] +rule ElementList : ONEOF( + ZEROORONE(Elision) + AssignmentExpression, + ZEROORONE(Elision) + SpreadElement, + ElementList + ',' + ZEROORONE(Elision) + AssignmentExpression, + ElementList + ',' + ZEROORONE(Elision) + SpreadElement) + attr.action.%1,%2 : PassChild(%2) + attr.action.%3,%4 : BuildExprList(%1, %4) + +##----------------------------------- +##rule Elision : +## , +## Elision , + +rule Elision : ONEOF(',', + Elision + ',') + +##----------------------------------- +##rule SpreadElement[Yield] : +## ... AssignmentExpression[In, ?Yield] +rule SpreadElement : "..." + AssignmentExpression + ZEROORONE(TypeArguments) + attr.action : SetIsRest(%2) + attr.action : PassChild(%2) + +##----------------------------------- +##rule ObjectLiteral[Yield] : +## { } +## { PropertyDefinitionList[?Yield] } +## { PropertyDefinitionList[?Yield] , } +rule ObjectLiteral : ONEOF('{' + '}', + '{' + PropertyDefinitionList + '}', + '{' + PropertyDefinitionList + ',' + '}') + attr.action.%1 : BuildStructLiteral() + attr.action.%2,%3 : BuildStructLiteral(%2) + +##----------------------------------- +##rule PropertyDefinitionList[Yield] : +## PropertyDefinition[?Yield] +## PropertyDefinitionList[?Yield] , PropertyDefinition[?Yield] +rule PropertyDefinitionList : ONEOF( + PropertyDefinition, + PropertyDefinitionList + ',' + PropertyDefinition) + +##----------------------------------- +# modified in 2016 +##rule PropertyDefinition[Yield] : +## IdentifierReference[?Yield] +## CoverInitializedName[?Yield] +## PropertyName[?Yield] : AssignmentExpression[In, ?Yield] +## MethodDefinition[?Yield] + +##----------------------------------- +##rule PropertyName[Yield] : +## LiteralPropertyName +## ComputedPropertyName[?Yield] +rule PropertyName : ONEOF(LiteralPropertyName, + ComputedPropertyName) + +##----------------------------------- +##rule LiteralPropertyName : +## IdentifierName +## StringLiteral +## NumericLiteral + +# I used Identifier instead of IdentifierName because keywords +# are processed before Identifier, and so Identifier is the same +# as IdentifierName here. +# +# I extend StringLiteral/NumericLiteral to Literal. 'tsc' will +# make sure it's legal and I don't need worry about it. + +rule LiteralPropertyName : ONEOF(JSIdentifier, Literal) + +##----------------------------------- +##rule ComputedPropertyName[Yield] : +## [ AssignmentExpression[In, ?Yield] ] +rule ComputedPropertyName : ONEOF( + '[' + AssignmentExpression + ']', + ZEROORONE(IndexSigPrefix) + ZEROORONE(IndexSigModifier) + '[' + AssignmentExpression + ']' + ZEROORONE(IndexSigPrefix) + ZEROORONE(IndexSigModifier)) + attr.action.%1 : BuildComputedName(%2) + attr.action.%2 : BuildComputedName(%4) + attr.action.%2 : AddModifier(%1, %2) + attr.action.%2 : AddModifier(%6, %7) + +##----------------------------------- +##rule CoverInitializedName[Yield] : +## IdentifierReference[?Yield] Initializer[In, ?Yield] +rule CoverInitializedName : IdentifierReference + Initializer + +##----------------------------------- +##rule Initializer[In, Yield] : +## = AssignmentExpression[?In, ?Yield] +rule Initializer : '=' + AssignmentExpression + +##----------------------------------- +##rule TemplateLiteral[Yield] : +## NoSubstitutionTemplate +## TemplateHead Expression[In, ?Yield] TemplateSpans[?Yield] +## +## NOTE: TemplateLiteral will be handled specifically in lexer code. +## rule TemplateLiteral : "this_is_for_fake_rule" is defined in reserved.spec + +##----------------------------------- +##rule TemplateSpans[Yield] : +## TemplateTail +## TemplateMiddleList[?Yield] TemplateTail + +##----------------------------------- +##rule TemplateMiddleList[Yield] : +## TemplateMiddle Expression[In, ?Yield] +## TemplateMiddleList[?Yield] TemplateMiddle Expression[In, ?Yield] + +##----------------------------------- +##rule MemberExpression[Yield] : +## PrimaryExpression[?Yield] +## MemberExpression[?Yield] [ Expression[In, ?Yield] ] +## MemberExpression[?Yield] . IdentifierName +## MemberExpression[?Yield] TemplateLiteral[?Yield] +## SuperProperty[?Yield] +## MetaProperty +## new MemberExpression[?Yield] Arguments[?Yield] + +rule KeywordPropName : ONEOF("break", + "this", + "public", + "export", + "const", + "if", + "try", + "else", + "continue", + "implements", + "enum", + "function", + "let", + "return", + "extends", + "import", + "get", + "set", + "var") + attr.action : BuildIdentifier() + +rule MemberExpression : ONEOF( + PrimaryExpression + ZEROORMORE(AsType), + MemberExpression + '[' + Expression + ']' + ZEROORMORE(AsType), + MemberExpression + '.' + JSIdentifier + ZEROORMORE(AsType), + MemberExpression + "?." + JSIdentifier + ZEROORMORE(AsType), + MemberExpression + TemplateLiteral, +# SuperProperty[?Yield] +# MetaProperty + "new" + MemberExpression + ZEROORONE(Arguments), +# NOTE: I created this rule. Typescript extended Type system and allow 'new' +# on a TypeReference + "new" + TypeReference + ZEROORONE(Arguments), + MemberExpression + "?." + '[' + Expression + ']' + ZEROORMORE(AsType), + IsExpression, + MemberExpression + '[' + KeyOf + ']', + MemberExpression + '!', + MemberExpression + "as" + "const", + '<' + Type + '>' + MemberExpression, + MemberExpression + '.' + KeywordPropName) + attr.action.%1 : AddAsType(%1, %2) + attr.action.%2 : BuildArrayElement(%1, %3) + attr.action.%2 : AddAsType(%5) + attr.action.%3 : BuildField(%1, %3) + attr.action.%3 : AddAsType(%4) + attr.action.%4 : SetIsOptional(%1) + attr.action.%4 : BuildField(%1, %3) + attr.action.%4 : AddAsType(%4) + attr.action.%6,%7 : BuildNewOperation(%2, %3) + attr.action.%8 : SetIsOptional(%1) + attr.action.%8 : BuildArrayElement(%1, %4) + attr.action.%8 : AddAsType(%6) + attr.action.%10: BuildArrayElement(%1, %3) + attr.action.%11: SetIsNonNull(%1) + attr.action.%12: SetIsConst(%1) + attr.action.%13: BuildCast(%2, %4) + attr.action.%14 : BuildField(%1, %3) + +rule IsExpression: ONEOF(PrimaryExpression + "is" + Type, + ArrowFunction + "is" + Type) + attr.action.%1,%2 : BuildIs(%1, %3) + +rule AssertExpression : "asserts" + MemberExpression + attr.action : BuildAssert(%2) + +##----------------------------------- +##rule SuperProperty[Yield] : +## super [ Expression[In, ?Yield] ] +## super . IdentifierName + +##----------------------------------- +##rule MetaProperty : +## NewTarget + +##----------------------------------- +##rule NewTarget : +## new . target + +##----------------------------------- +##rule NewExpression[Yield] : +## MemberExpression[?Yield] +## new NewExpression[?Yield] +rule NewExpression : ONEOF(MemberExpression, + "new" + NewExpression, + "new" + ClassDeclaration, + "new" + '(' + ClassDeclaration + ')') + attr.action.%2,%3 : BuildNewOperation(%2) + attr.action.%4 : BuildNewOperation(%3) + +##----------------------------------- +##rule CallExpression[Yield] : +## MemberExpression[?Yield] Arguments[?Yield] +## SuperCall[?Yield] +## CallExpression[?Yield] Arguments[?Yield] +## CallExpression[?Yield] [ Expression[In, ?Yield] ] +## CallExpression[?Yield] . IdentifierName +## CallExpression[?Yield] TemplateLiteral[?Yield] + +rule ImportFunction : ONEOF("import" + '(' + Literal + ')', + "import" + '(' + TemplateLiteral + ')') + attr.action.%1,%2 : BuildImport() + attr.action.%1,%2 : SetFromModule(%3) + +rule CallExpression : ONEOF( + MemberExpression + ZEROORONE(TypeArguments) + Arguments + ZEROORMORE(AsType), + SuperCall, + CallExpression + Arguments + ZEROORMORE(AsType), + CallExpression + '[' + Expression + ']' + ZEROORONE(AsType), + CallExpression + '.' + JSIdentifier + ZEROORMORE(AsType), + CallExpression + TemplateLiteral, + CallExpression + '!' + ZEROORMORE(AsType), + CallExpression + "?." + JSIdentifier + ZEROORMORE(AsType), + MemberExpression + "?." + ZEROORONE(TypeArguments) + Arguments + ZEROORMORE(AsType), + "set" + ZEROORONE(TypeArguments) + Arguments + ZEROORMORE(AsType), + "get" + ZEROORONE(TypeArguments) + Arguments + ZEROORMORE(AsType), + CallExpression + "?." + Arguments + ZEROORMORE(AsType), + ImportFunction, + CallExpression + '.' + KeywordPropName + ZEROORMORE(AsType)) + attr.action.%1,%3,%10,%11 : BuildCall(%1) + attr.action.%1,%10,%11 : AddAsType(%4) + attr.action.%1,%10,%11 : AddTypeGenerics(%2) + attr.action.%1,%10,%11 : AddArguments(%3) + attr.action.%3 : AddArguments(%2) + attr.action.%3 : AddAsType(%3) + attr.action.%4 : BuildArrayElement(%1, %3) + attr.action.%4 : AddAsType(%5) + attr.action.%5,%14 : BuildField(%1, %3) + attr.action.%5,%14 : AddAsType(%4) + attr.action.%7 : SetIsNonNull(%1) + attr.action.%7 : AddAsType(%1, %3) + attr.action.%8 : SetIsOptional(%1) + attr.action.%8 : BuildField(%1, %3) + attr.action.%8 : AddAsType(%4) + attr.action.%9 : SetIsOptional(%1) + attr.action.%9 : BuildCall(%1) + attr.action.%9 : AddTypeGenerics(%3) + attr.action.%9 : AddArguments(%4) + attr.action.%9 : AddAsType(%5) + attr.action.%12: SetIsOptional(%1) + attr.action.%12: BuildCall(%1) + attr.action.%12: AddArguments(%3) + attr.action.%12: AddAsType(%4) + +##----------------------------------- +##rule SuperCall[Yield] : +## super Arguments[?Yield] +rule SuperCall : "super" + Arguments + +##----------------------------------- +##rule Arguments[Yield] : +## ( ) +## ( ArgumentList[?Yield] ) + +rule Arguments : ONEOF( + '(' + ')', + '(' + ArgumentList + ')') + attr.action.%2 : PassChild(%2) + +##----------------------------------- +##rule ArgumentList[Yield] : +## AssignmentExpression[In, ?Yield] +## ... AssignmentExpression[In, ?Yield] +## ArgumentList[?Yield] , AssignmentExpression[In, ?Yield] +## ArgumentList[?Yield] , ... AssignmentExpression[In, ?Yield] + +## child #3, I added ZEROORONE() since ECMAScript 2017 allows empty argument after ','. +rule ArgumentList : ONEOF(AssignmentExpression, + "..." + AssignmentExpression, + ArgumentList + ',' + ZEROORONE(AssignmentExpression), + ArgumentList + ',' + "..." + AssignmentExpression) + attr.action.%2 : SetIsRest(%2) + attr.action.%4 : SetIsRest(%4) + +##----------------------------------- +##rule LeftHandSideExpression[Yield] : +## NewExpression[?Yield] +## CallExpression[?Yield] + +rule LeftHandSideExpression : ONEOF(NewExpression, + CallExpression, + "..." + NewExpression, + "..." + CallExpression, + "await" + CallExpression, + "await" + NewExpression) + attr.action.%3,%4 : SetIsRest(%2) + attr.action.%5,%6 : BuildAwait(%2) + +##----------------------------------- +##rule PostfixExpression[Yield] : +## LeftHandSideExpression[?Yield] +## LeftHandSideExpression[?Yield] [no LineTerminator here] ++ +## LeftHandSideExpression[?Yield] [no LineTerminator here] -- + +rule PostfixExpression : ONEOF( + LeftHandSideExpression, + LeftHandSideExpression + "++", + LeftHandSideExpression + "--") + attr.action.%2,%3 : BuildPostfixOperation(%2, %1) + +##----------------------------------- +##rule UnaryExpression[Yield] : +## PostfixExpression[?Yield] +## delete UnaryExpression[?Yield] +## void UnaryExpression[?Yield] +## typeof UnaryExpression[?Yield] +## ++ UnaryExpression[?Yield] +## -- UnaryExpression[?Yield] +## + UnaryExpression[?Yield] +## - UnaryExpression[?Yield] +## ~ UnaryExpression[?Yield] +## ! UnaryExpression[?Yield] + +rule UnaryExpression : ONEOF( + PostfixExpression, + "delete" + UnaryExpression, + "void" + UnaryExpression, + "typeof" + UnaryExpression, + "++" + UnaryExpression, + "--" + UnaryExpression, + '+' + UnaryExpression, + '-' + UnaryExpression, + '~' + UnaryExpression, + '!' + UnaryExpression, + "typeof" + '(' + ClassDeclaration + ')') + attr.action.%2 : BuildDeleteOperation(%2) + attr.action.%3 : BuildLiteral(%1) + attr.action.%4 : BuildTypeOf(%2) + attr.action.%5,%6,%7,%8,%9,%10 : BuildUnaryOperation(%1, %2) + attr.action.%11 : BuildTypeOf(%3) + +## UpdateExpression[Yield]: +## LeftHandSideExpression[?Yield] +## LeftHandSideExpression[?Yield][no LineTerminator here]++ +## LeftHandSideExpression[?Yield][no LineTerminator here]-- +## ++UnaryExpression[?Yield] +## --UnaryExpression[?Yield] +rule UpdateExpression : ONEOF(LeftHandSideExpression, + LeftHandSideExpression + "++", + LeftHandSideExpression + "--", + "++" + UnaryExpression, + "--" + UnaryExpression) + attr.action.%2,%3 : BuildPostfixOperation(%2, %1) + attr.action.%4,%5: BuildUnaryOperation(%1, %2) + + +## Added in 2016 +## ExponentiationExpression[Yield]: +## UnaryExpression[?Yield] +## UpdateExpression[?Yield]**ExponentiationExpression[?Yield] +rule ExponentiationExpression : ONEOF(UnaryExpression, + UpdateExpression + "**" + ExponentiationExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule MultiplicativeExpression[Yield] : +## UnaryExpression[?Yield] +## MultiplicativeExpression[?Yield] MultiplicativeOperator UnaryExpression[?Yield] +## 2016 +## MultiplicativeExpression[Yield]: +## ExponentiationExpression[?Yield] +## MultiplicativeExpression[?Yield]MultiplicativeOperatorExponentiationExpression[?Yield] + +rule MultiplicativeExpression : ONEOF( + ExponentiationExpression, + MultiplicativeExpression + MultiplicativeOperator + ExponentiationExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule MultiplicativeOperator : one of +## * / % + +rule MultiplicativeOperator : ONEOF( '*', '/', '%') + +##----------------------------------- +##rule AdditiveExpression[Yield] : +## MultiplicativeExpression[?Yield] +## AdditiveExpression[?Yield] + MultiplicativeExpression[?Yield] +## AdditiveExpression[?Yield] - MultiplicativeExpression[?Yield] + +rule AdditiveExpression : ONEOF( + MultiplicativeExpression, + AdditiveExpression + '+' + MultiplicativeExpression, + AdditiveExpression + '-' + MultiplicativeExpression) + attr.action.%2,%3 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule ShiftExpression[Yield] : +## AdditiveExpression[?Yield] +## ShiftExpression[?Yield] << AdditiveExpression[?Yield] +## ShiftExpression[?Yield] >> AdditiveExpression[?Yield] +## ShiftExpression[?Yield] >>> AdditiveExpression[?Yield] +rule ShiftExpression : ONEOF(AdditiveExpression, + ShiftExpression + "<<" + AdditiveExpression, + ShiftExpression + ">>" + AdditiveExpression, + ShiftExpression + ">>>" + AdditiveExpression) + attr.action.%2,%3,%4 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule RelationalExpression[In, Yield] : +## ShiftExpression[?Yield] +## RelationalExpression[?In, ?Yield] < ShiftExpression[?Yield] +## RelationalExpression[?In, ?Yield] > ShiftExpression[?Yield] +## RelationalExpression[?In, ?Yield] <= ShiftExpression[? Yield] +## RelationalExpression[?In, ?Yield] >= ShiftExpression[?Yield] +## RelationalExpression[?In, ?Yield] instanceof ShiftExpression[?Yield] +## [+In] RelationalExpression[In, ?Yield] in ShiftExpression[?Yield] + +rule InExpression : RelationalExpression + "in" + ONEOF(ShiftExpression, Type) +## " + attr.action : BuildIn(%1, %3) + +rule RelationalExpression : ONEOF(ShiftExpression, + RelationalExpression + '<' + ShiftExpression, + RelationalExpression + '>' + ShiftExpression, + RelationalExpression + "<=" + ShiftExpression, + RelationalExpression + ">=" + ShiftExpression, + RelationalExpression + "instanceof" + ShiftExpression, + ClassDeclaration + "instanceof" + ShiftExpression, + InExpression) + attr.property.%3 : NoAltToken + attr.action.%2,%3,%4,%5 : BuildBinaryOperation(%1, %2, %3) + attr.action.%6,%7 : BuildInstanceOf(%1, %3) + + +##----------------------------------- +##rule EqualityExpression[In, Yield] : +## RelationalExpression[?In, ?Yield] +## EqualityExpression[?In, ?Yield] == RelationalExpression[?In, ?Yield] +## EqualityExpression[?In, ?Yield] != RelationalExpression[?In, ?Yield] +## EqualityExpression[?In, ?Yield] === RelationalExpression[?In, ?Yield] +## EqualityExpression[?In, ?Yield] !== RelationalExpression[?In, ?Yield] + +rule EqualityExpression : ONEOF( + RelationalExpression, + EqualityExpression + "==" + RelationalExpression, + EqualityExpression + "!=" + RelationalExpression, + EqualityExpression + "===" + RelationalExpression, + EqualityExpression + "!==" + RelationalExpression) + attr.action.%2,%3,%4,%5 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule BitwiseANDExpression[In, Yield] : +## EqualityExpression[?In, ?Yield] +## BitwiseANDExpression[?In, ?Yield] & EqualityExpression[?In, ?Yield] + +rule BitwiseANDExpression : ONEOF( + EqualityExpression, + BitwiseANDExpression + '&' + EqualityExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule BitwiseXORExpression[In, Yield] : +## BitwiseANDExpression[?In, ?Yield] +## BitwiseXORExpression[?In, ?Yield] ^ BitwiseANDExpression[?In, ?Yield] + +rule BitwiseXORExpression : ONEOF( + BitwiseANDExpression, + BitwiseXORExpression + '^' + BitwiseANDExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule BitwiseORExpression[In, Yield] : +## BitwiseXORExpression[?In, ?Yield] +## BitwiseORExpression[?In, ?Yield] | BitwiseXORExpression[?In, ?Yield] + +rule BitwiseORExpression : ONEOF( + BitwiseXORExpression, + BitwiseORExpression + '|' + BitwiseXORExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule LogicalANDExpression[In, Yield] : +## BitwiseORExpression[?In, ?Yield] +## LogicalANDExpression[?In, ?Yield] && BitwiseORExpression[?In, ?Yield] + +rule LogicalANDExpression : ONEOF( + BitwiseORExpression, + LogicalANDExpression + "&&" + BitwiseORExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule LogicalORExpression[In, Yield] : +## LogicalANDExpression[?In, ?Yield] +## LogicalORExpression[?In, ?Yield] || LogicalANDExpression[?In, ?Yield] + +rule LogicalORExpression : ONEOF( + LogicalANDExpression, + LogicalORExpression + "||" + LogicalANDExpression) + attr.action.%2 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule ConditionalExpression[In, Yield] : +## LogicalORExpression[?In, ?Yield] +## LogicalORExpression[?In,?Yield] ? AssignmentExpression[In, ?Yield] : AssignmentExpression[?In, ?Yield] +rule ConditionalExpression : ONEOF( + LogicalORExpression, + LogicalORExpression + '?' + AssignmentExpression + ':' + AssignmentExpression, + ConditionalExpression + "??" + ConditionalExpression) + attr.action.%2 : BuildTernaryOperation(%1, %3, %5) + attr.action.%3 : BuildBinaryOperation(%1, %2, %3) + +##----------------------------------- +##rule AssignmentExpression[In, Yield] : +## ConditionalExpression[?In, ?Yield] +## [+Yield] YieldExpression[?In] +## ArrowFunction[?In, ?Yield] +## LeftHandSideExpression[?Yield] = AssignmentExpression[?In, ?Yield] +## LeftHandSideExpression[?Yield] AssignmentOperator AssignmentExpression[?In, ?Yield] + +rule AssignmentExpression : ONEOF( + ConditionalExpression, + YieldExpression, + ArrowFunction, + LeftHandSideExpression + '=' + AssignmentExpression, + LeftHandSideExpression + AssignmentOperator + AssignmentExpression) + attr.action.%4,%5 : BuildAssignment(%1, %2, %3) + +rule AssignmentOperator : ONEOF("*=", "/=", "%=", "+=", "-=", "<<=", ">>=", ">>>=", "&=", "^=", "|=", "??=") + +##----------------------------------- +##rule Expression[In, Yield] : +## AssignmentExpression[?In, ?Yield] +## Expression[?In, ?Yield] , AssignmentExpression[?In, ?Yield] + +## NOTE. I added "undefined" to expression because "undefined" is both a type and +## a value in Typescript. This is a weird rule. +rule Expression : ONEOF( + AssignmentExpression, + Expression + ',' + AssignmentExpression, + "undefined") + +#------------------------------------------------------------------------------- +# Statements +#------------------------------------------------------------------------------- + +rule TripleSlash : ONEOF( "///" + '<' + "reference" + "path" + '=' + Literal + '/' + '>', + "///" + '<' + "reference" + "types" + '=' + Literal + '/' + '>', + "///" + '<' + "reference" + "lib" + '=' + Literal + '/' + '>', + "///" + '<' + "reference" + "no-default-lib" + '=' + Literal + '/' + '>') + attr.action.%1,%2,%3,%4 : BuildTripleSlash(%4, %6) + +##----------------------------------- +##rule Statement[Yield, Return] : +## BlockStatement[?Yield, ?Return] +## VariableStatement[?Yield] +## EmptyStatement +## ExpressionStatement[?Yield] +## IfStatement[?Yield, ?Return] +## BreakableStatement[?Yield, ?Return] +## ContinueStatement[?Yield] +## BreakStatement[?Yield] +## [+Return] ReturnStatement[?Yield] +## WithStatement[?Yield, ?Return] +## LabelledStatement[?Yield, ?Return] +## ThrowStatement[?Yield] +## TryStatement[?Yield, ?Return] +## DebuggerStatement + +rule Statement : ONEOF( + BlockStatement, + VariableStatement, + EmptyStatement, + ExpressionStatement, + IfStatement, + BreakableStatement, + ContinueStatement, + BreakStatement, + ReturnStatement, +# WithStatement[?Yield, ?Return] + LabelledStatement, + ThrowStatement, + TryStatement, + TripleSlash) +# DebuggerStatement + attr.property : Top + attr.property : Single # This is extremely important to give CallExpression the + # last chance IFF all previous rules fail. + +##----------------------------------- +##rule Declaration[Yield] : +## HoistableDeclaration[?Yield] +## ClassDeclaration[?Yield] +## LexicalDeclaration[In, ?Yield] + +## NOTE. Typescript added InterfaceDeclaration, TypeAliasDeclaration, EnumDeclaration +rule Declaration : ONEOF(HoistableDeclaration, + ClassDeclaration, + LexicalDeclaration + ZEROORONE(';'), + InterfaceDeclaration, + TypeAliasDeclaration, + EnumDeclaration, + NamespaceDeclaration, + ExternalDeclaration, + GlobalDeclaration) + attr.property : Top + +##----------------------------------- +##rule HoistableDeclaration[Yield, Default] : +## FunctionDeclaration[?Yield,?Default] +## GeneratorDeclaration[?Yield, ?Default] +rule HoistableDeclaration : ONEOF(FunctionDeclaration, + GeneratorDeclaration) + +##----------------------------------- +##rule BreakableStatement[Yield, Return] : +## IterationStatement[?Yield, ?Return] +## SwitchStatement[?Yield, ?Return] +rule BreakableStatement : ONEOF(IterationStatement, + SwitchStatement) + +##----------------------------------- +##rule BlockStatement[Yield, Return] : +## Block[?Yield, ?Return] +rule BlockStatement : Block + +##----------------------------------- +##rule Block[Yield, Return] : +## { StatementList[?Yield, ?Return]opt } +rule Block : '{' + ZEROORONE(StatementList) + '}' + attr.action : BuildBlock(%2) + +##----------------------------------- +##rule StatementList[Yield, Return] : +## StatementListItem[?Yield, ?Return] +## StatementList[?Yield, ?Return] StatementListItem[?Yield, ?Return] +rule StatementList : ONEOF(StatementListItem, + StatementList + StatementListItem) + +##----------------------------------- +##rule StatementListItem[Yield, Return] : +## Statement[?Yield, ?Return] +## Declaration[?Yield] +rule StatementListItem : ONEOF(Statement, Declaration) + +##----------------------------------- +##rule LexicalDeclaration[In, Yield] : +## LetOrConst BindingList[?In, ?Yield] ; +rule LexicalDeclaration : ONEOF("let" + BindingList, + "const" + BindingList) + attr.action.%1,%2 : BuildDecl(%2) + attr.action.%1 : SetJSLet() + attr.action.%2 : SetJSConst() + +##----------------------------------- +##rule LetOrConst : +## let +## const +rule LetOrConst : ONEOF("let", "const") + +##----------------------------------- +##rule BindingList[In, Yield] : +## LexicalBinding[?In, ?Yield] +## BindingList[?In, ?Yield] , LexicalBinding[?In, ?Yield] +rule BindingList : ONEOF(LexicalBinding, + BindingList + ',' + LexicalBinding) + +##----------------------------------- +##rule LexicalBinding[In, Yield] : +## BindingIdentifier[?Yield] Initializer[?In, ?Yield]opt +## BindingPattern[?Yield] Initializer[?In, ?Yield] +rule LexicalBinding : ONEOF(BindingIdentifier + ZEROORONE(Initializer), + BindingIdentifier + ":" + Type + ZEROORONE(Initializer), + BindingPattern + ZEROORONE(Initializer), + BindingPattern + ":" + Type + ZEROORONE(Initializer)) + attr.action.%1,%3 : AddInitTo(%1, %2) + attr.action.%2,%4 : AddInitTo(%1, %4) + attr.action.%2,%4 : AddType(%1, %3) + +##----------------------------------- +##rule VariableStatement[Yield] : +## var VariableDeclarationList[In, ?Yield] ; +rule VariableStatement : "var" + VariableDeclarationList + ZEROORONE(';') + attr.action : PassChild(%2) + +##----------------------------------- +##rule VariableDeclarationList[In, Yield] : +## VariableDeclaration[?In, ?Yield] +## VariableDeclarationList[?In, ?Yield] , VariableDeclaration[?In, ?Yield] +rule VariableDeclarationList : ONEOF( + VariableDeclaration, + VariableDeclarationList + ',' + VariableDeclaration) + +##----------------------------------- +##rule VariableDeclaration[In, Yield] : +## BindingIdentifier[?Yield] Initializer[?In, ?Yield]opt +## BindingPattern[?Yield] Initializer[?In, ?Yield] + +# Typescript ask for explicit type. But it also allows implicit type if referrable. +rule VariableDeclaration : ONEOF(BindingIdentifier + ':' + Type + ZEROORONE(Initializer), + BindingIdentifier + ZEROORONE(Initializer), + BindingPattern + ZEROORONE(TypeAnnotation) + Initializer) + attr.action.%1 : AddInitTo(%1, %4) + attr.action.%1 : BuildDecl(%3, %1) + attr.action.%1 : SetJSVar() + attr.action.%2 : AddInitTo(%1, %2) + attr.action.%2 : BuildDecl(%1) + attr.action.%2 : SetJSVar() + attr.action.%3 : AddInitTo(%1, %3) + attr.action.%3 : BuildDecl(%2, %1) + attr.action.%3 : SetJSVar() + +##----------------------------------- +##rule BindingPattern[Yield] : +## ObjectBindingPattern[?Yield] +## ArrayBindingPattern[?Yield] +rule BindingPattern : ONEOF(ObjectBindingPattern, ArrayBindingPattern) + +##----------------------------------- +##rule ObjectBindingPattern[Yield] : +## { } +## { BindingPropertyList[?Yield] } +## { BindingPropertyList[?Yield] , } +rule ObjectBindingPattern : ONEOF('{' + '}', + '{' + BindingPropertyList + '}', + '{' + BindingPropertyList + ',' + '}') + attr.action.%1 : BuildBindingPattern() + attr.action.%2,%3 : BuildBindingPattern(%2) + attr.action.%1,%2,%3 : SetObjectBinding() + +##----------------------------------- +##rule ArrayBindingPattern[Yield] : +## [ Elisionopt BindingRestElement[?Yield]opt ] +## [ BindingElementList[?Yield] ] +## [ BindingElementList[?Yield] , Elisionopt BindingRestElement[?Yield]opt ] +rule ArrayBindingPattern : ONEOF( + '[' + ZEROORONE(Elision) + ZEROORONE(BindingRestElement) + ']', + '[' + BindingElementList + ']', + '[' + BindingElementList + ',' + ZEROORONE(Elision) + ZEROORONE(BindingRestElement) + ']') + attr.action.%1 : BuildBindingPattern(%3) + attr.action.%2,%3 : BuildBindingPattern(%2) + attr.action.%1,%2,%3 : SetArrayBinding() + +##----------------------------------- +##rule BindingPropertyList[Yield] : +## BindingProperty[?Yield] +## BindingPropertyList[?Yield] , BindingProperty[?Yield] +rule BindingPropertyList : ONEOF(BindingProperty, + BindingPropertyList + ',' + BindingProperty, + BindingPropertyList + ',' + BindingRestProperty) + +##----------------------------------- +##rule BindingElementList[Yield] : +## BindingElisionElement[?Yield] +## BindingElementList[?Yield] , BindingElisionElement[?Yield] +rule BindingElementList : ONEOF( + BindingElisionElement, + BindingElementList + ',' + BindingElisionElement) + +##----------------------------------- +##rule BindingElisionElement[Yield] : +## Elisionopt BindingElement[?Yield] +rule BindingElisionElement : ZEROORONE(Elision) + BindingElement + attr.action : PassChild(%2) + +##----------------------------------- +##rule BindingProperty[Yield] : +## SingleNameBinding[?Yield] +## PropertyName[?Yield] : BindingElement[?Yield] +rule BindingProperty : ONEOF(SingleNameBinding, + PropertyName + ':' + BindingElement) + attr.action.%2 : BuildBindingElement(%1, %3) + +rule BindingRestProperty : BindingRestElement + +##----------------------------------- +##rule BindingElement[Yield] : +## SingleNameBinding[?Yield] +## BindingPattern[?Yield] Initializer[In, ?Yield]opt +rule BindingElement : ONEOF(SingleNameBinding, + BindingPattern + ZEROORONE(Initializer)) + attr.action.%2 : AddInitTo(%1, %2) + +##----------------------------------- +##rule SingleNameBinding[Yield] : +## BindingIdentifier[?Yield] Initializer[In, ?Yield]opt +rule SingleNameBinding : BindingIdentifier + ZEROORONE(Initializer) + attr.action : AddInitTo(%1, %2) + attr.action : BuildBindingElement(%1) + +##----------------------------------- +##rule BindingRestElement[Yield] : +## ... BindingIdentifier[?Yield] +rule BindingRestElement : "..." + BindingIdentifier + attr.action : SetIsRest(%2) + attr.action : BuildBindingElement(%2) + +##----------------------------------- +##rule EmptyStatement : +## ; +rule EmptyStatement : ';' + +##----------------------------------- +##rule ExpressionStatement[Yield] : +## [lookahead NotIn {{, function, class, let [}] Expression[In, ?Yield] ; + +rule ExpressionStatement : ONEOF( + ConditionalExpression + ASI(';'), + YieldExpression + ';', + ArrowFunction + ';', + LeftHandSideExpression + '=' + AssignmentExpression + ZEROORONE(';'), + LeftHandSideExpression + AssignmentOperator + AssignmentExpression + ZEROORONE(';'), + Expression + ',' + AssignmentExpression + ';', + "undefined" + ';', + LeftHandSideExpression + "++", + LeftHandSideExpression + "--") + + attr.action.%4,%5 : BuildAssignment(%1, %2, %3) + attr.action.%8,%9 : BuildPostfixOperation(%2, %1) + +##----------------------------------- +##rule IfStatement[Yield, Return] : +## if ( Expression[In, ?Yield] ) Statement[?Yield, ?Return] else Statement[?Yield, ?Return] +## if ( Expression[In, ?Yield] ) Statement[?Yield, ?Return] +rule IfStatement : ONEOF( + "if" + '(' + Expression + ')' + Statement + "else" + Statement, + "if" + '(' + Expression + ')' + Statement) + attr.action.%1,%2: BuildCondBranch(%3) + attr.action.%1,%2: AddCondBranchTrueStatement(%5) + attr.action.%1: AddCondBranchFalseStatement(%7) + +## " // This line is to make my vim in right color + +##----------------------------------- +##rule IterationStatement[Yield, Return] : +## do Statement[?Yield, ?Return] while ( Expression[In, ?Yield] ) ; +## while ( Expression[In, ?Yield] ) Statement[?Yield, ?Return] +## for ( [lookahead NotIn {let [}] Expression[?Yield]opt ; Expression[In, ?Yield]opt ; Expression[In, ?Yield]opt ) Statement[?Yield, ?Return] +## for ( var VariableDeclarationList[?Yield] ; Expression[In, ?Yield]opt ; Expression[In, ?Yield]opt ) Statement[?Yield, ?Return] +## for ( LexicalDeclaration[?Yield] Expression[In, ?Yield]opt ; Expression[In, ?Yield]opt ) Statement[?Yield, ?Return] +## for ( [lookahead NotIn {let [}] LeftHandSideExpression[?Yield] in Expression[In, ?Yield] ) Statement[?Yield, ?Return] +## for ( var ForBinding[?Yield] in Expression[In, ?Yield] ) Statement[?Yield, ?Return] +## for ( ForDeclaration[?Yield] in Expression[In, ?Yield] ) Statement[?Yield, ?Return] +## for ( [lookahead NotEq let ] LeftHandSideExpression[?Yield] of AssignmentExpression[In, ?Yield] ) Statement[?Yield, ?Return] +## for ( var ForBinding[?Yield] of AssignmentExpression[In, ?Yield] ) Statement[?Yield, ?Return] +## for ( ForDeclaration[?Yield] of AssignmentExpression[In, ?Yield] ) Statement[?Yield, ?Return] +rule IterationStatement : ONEOF( + "do" + Statement + "while" + '(' + Expression + ')' + ';', + "while" + '(' + Expression + ')' + Statement, + "for" + '(' + ZEROORONE(Expression) + ';' + ZEROORONE(Expression) + ';' + ZEROORONE(Expression) + ')' + Statement, + "for" + '(' + "var" + VariableDeclarationList + ';' + ZEROORONE(Expression) + ';' + ZEROORONE(Expression) + ')' + Statement, + "for" + '(' + LexicalDeclaration + ';' + ZEROORONE(Expression) + ';' + ZEROORONE(Expression) + ')' + Statement, + "for" + '(' + LeftHandSideExpression + "in" + Expression + ')' + Statement, + "for" + '(' + "var" + ForBinding + "in" + Expression + ')' + Statement, + "for" + '(' + ForDeclaration + "in" + Expression + ')' + Statement, + "for" + '(' + LeftHandSideExpression + "of" + AssignmentExpression + ')' + Statement, + "for" + '(' + "var" + ForBinding + "of" + AssignmentExpression + ')' + Statement, + "for" + '(' + "let" + ForBinding + "of" + AssignmentExpression + ')' + Statement, + "for" + '(' + "const" + ForBinding + "of" + AssignmentExpression + ')' + Statement, + ) + attr.action.%1 : BuildDoLoop(%5, %2) + attr.action.%2 : BuildWhileLoop(%3, %5) + attr.action.%3 : BuildForLoop(%3, %5, %7, %9) + attr.action.%4 : BuildForLoop(%4, %6, %8, %10) + attr.action.%5 : BuildForLoop(%3, %5, %7, %9) + + attr.action.%7,%10 : BuildDecl(%4) + attr.action.%7,%10 : SetJSVar() + attr.action.%7 : BuildForLoop_In(%6, %8) + attr.action.%6,%8 : BuildForLoop_In(%3, %5, %7) + attr.action.%9 : BuildForLoop_Of(%3, %5, %7) + attr.action.%10 : BuildForLoop_Of(%6, %8) + + attr.action.%11,%12 : BuildDecl(%4) + attr.action.%11 : SetJSLet() + attr.action.%12 : SetJSConst() + attr.action.%11,%12 : BuildForLoop_Of(%6, %8) + +##----------------------------------- +##rule ForDeclaration[Yield] : +## LetOrConst ForBinding[?Yield] +rule ForDeclaration : ONEOF("let" + ForBinding, + "const" + ForBinding) + attr.action.%1,%2 : BuildDecl(%2) + attr.action.%1 : SetJSLet() + attr.action.%2 : SetJSConst() + +##----------------------------------- +##rule ForBinding[Yield] : +## BindingIdentifier[?Yield] +## BindingPattern[?Yield] +rule ForBinding : ONEOF(BindingIdentifier, + BindingPattern) + +##----------------------------------- +##rule ContinueStatement[Yield] : +## continue ; +## continue [no LineTerminator here] LabelIdentifier[?Yield] ; +rule ContinueStatement : ONEOF( + "continue" + ZEROORONE(';'), + "continue" + NoLineTerminator + LabelIdentifier + ZEROORONE(';')) + attr.action.%1 : BuildContinue() + attr.action.%2 : BuildContinue(%3) + +##----------------------------------- +##rule BreakStatement[Yield] : +## break ; +## break [no LineTerminator here] LabelIdentifier[?Yield] ; +rule BreakStatement : ONEOF( + "break" + ZEROORONE(';'), + "break" + NoLineTerminator + LabelIdentifier + ZEROORONE(';')) + attr.action.%1 : BuildBreak() + attr.action.%2 : BuildBreak(%3) + +##----------------------------------- +##rule ReturnStatement[Yield] : +## return ; +## return [no LineTerminator here] Expression[In, ?Yield] ; +rule ReturnStatement :ONEOF("return" + ZEROORONE(';'), + "return" + NoLineTerminator + Expression + ZEROORONE(';')) + attr.action.%1 : BuildReturn() + attr.action.%2 : BuildReturn(%3) + +##----------------------------------- +##rule WithStatement[Yield, Return] : +## with ( Expression[In, ?Yield] ) Statement[?Yield, ?Return] + +##----------------------------------- +##rule SwitchStatement[Yield, Return] : +## switch ( Expression[In, ?Yield] ) CaseBlock[?Yield, ?Return] +rule SwitchStatement : + "switch" + '(' + Expression + ')' + CaseBlock + attr.action : BuildSwitch(%3, %5) + +##----------------------------------- +##rule CaseBlock[Yield, Return] : +## { CaseClauses[?Yield, ?Return]opt } +## { CaseClauses[?Yield, ?Return]opt DefaultClause[?Yield, ?Return] CaseClauses[?Yield, ?Return]opt } +rule CaseBlock : ONEOF( + '{' + ZEROORONE(CaseClauses) + '}', + '{' + ZEROORONE(CaseClauses) + DefaultClause + ZEROORONE(CaseClauses) + '}') + +##----------------------------------- +##rule CaseClauses[Yield, Return] : +## CaseClause[?Yield, ?Return] +## CaseClauses[?Yield, ?Return] CaseClause[?Yield, ?Return] +rule CaseClauses : ONEOF( + CaseClause, + CaseClauses + CaseClause) + +##----------------------------------- +##rule CaseClause[Yield, Return] : +## case Expression[In, ?Yield] : StatementList[?Yield, ?Return]opt +rule CaseClause : + "case" + Expression + ':' + ZEROORONE(StatementList) + attr.action : BuildSwitchLabel(%2) + attr.action : BuildOneCase(%4) + +##----------------------------------- +##rule DefaultClause[Yield, Return] : +## default : StatementList[?Yield, ?Return]opt +rule DefaultClause : + "default" + ':' + ZEROORONE(StatementList) + attr.action : BuildDefaultSwitchLabel() + attr.action : BuildOneCase(%3) + +##----------------------------------- +##rule LabelledStatement[Yield, Return] : +## LabelIdentifier[?Yield] : LabelledItem[?Yield, ?Return] +rule LabelledStatement : + LabelIdentifier + ':' + LabelledItem + attr.action : AddLabel(%3, %1) + +##----------------------------------- +##rule LabelledItem[Yield, Return] : +## Statement[?Yield, ?Return] +## FunctionDeclaration[?Yield] +rule LabelledItem : ONEOF(Statement, FunctionDeclaration) + +##----------------------------------- +##rule ThrowStatement[Yield] : +## throw [no LineTerminator here] Expression[In, ?Yield] ; +rule ThrowStatement : "throw" + Expression + ZEROORONE(';') + attr.action : BuildThrows(%2) + +##----------------------------------- +##rule TryStatement[Yield, Return] : +## try Block[?Yield, ?Return] Catch[?Yield, ?Return] +## try Block[?Yield, ?Return] Finally[?Yield, ?Return] +## try Block[?Yield, ?Return] Catch[?Yield, ?Return] Finally[?Yield, ?Return] +rule TryStatement : ONEOF( + "try" + Block + Catch, + "try" + Block + Finally, + "try" + Block + Catch + Finally) + attr.action.%1,%2,%3 : BuildTry(%2) + attr.action.%1,%3 : AddCatch(%3) + attr.action.%2 : AddFinally(%3) + attr.action.%3 : AddFinally(%4) + +##----------------------------------- +##rule Catch[Yield, Return] : +## catch ( CatchParameter[?Yield] ) Block[?Yield, ?Return] +rule Catch : ONEOF("catch" + '(' + CatchParameter + ')' + Block, + "catch" + Block) + attr.action.%1 : BuildCatch(%3, %5) + attr.action.%2 : BuildCatch(%2) + +##----------------------------------- +##rule Finally[Yield, Return] : +## finally Block[?Yield, ?Return] +rule Finally : "finally" + Block + attr.action : BuildFinally(%2) + +##----------------------------------- +##rule CatchParameter[Yield] : +## BindingIdentifier[?Yield] +## BindingPattern[?Yield] +rule CatchParameter : ONEOF(BindingIdentifier + ZEROORONE(TypeAnnotation), + BindingPattern) + attr.action.%1 : AddType(%1, %2) + +##----------------------------------- +rule DebuggerStatement : "debugger" + ';' + + +###################################################################### +## Function and Class +###################################################################### + +## NOTE: Replaced by TS +## FunctionDeclaration[Yield, Default] : +## function BindingIdentifier[?Yield] ( FormalParameters ) { FunctionBody } +## [+Default] function ( FormalParameters ) { FunctionBody } + +## +## FunctionExpression : +## function BindingIdentifieropt ( FormalParameters ) { FunctionBody } + +## +## StrictFormalParameters[Yield] : +## FormalParameters[?Yield] +rule StrictFormalParameters : FormalParameters + +## +## FormalParameters[Yield] : +## [empty] +## FormalParameterList[?Yield] +rule FormalParameters : ZEROORONE(FormalParameterList) + +## +## FormalParameterList[Yield] : +## FunctionRestParameter[?Yield] +## FormalsList[?Yield] +## FormalsList[?Yield] , FunctionRestParameter[?Yield] +rule FormalParameterList : ONEOF(FunctionRestParameter, + FormalsList, + FormalsList + ',' + FunctionRestParameter) + +## +## FormalsList[Yield] : +## FormalParameter[?Yield] +## FormalsList[?Yield] , FormalParameter[?Yield] +rule FormalsList : ONEOF(FormalParameter, + FormalsList + ',' + FormalParameter) + +## +## FunctionRestParameter[Yield] : +## BindingRestElement[?Yield] +rule FunctionRestParameter : BindingRestElement + +## +## FormalParameter[Yield] : +## BindingElement[?Yield] + +## Typescript requires type. So this is different than JS spec. +rule FormalParameter : BindingElement + attr.action : BuildDecl(%3, %1) + +## +## FunctionBody[Yield] : +## FunctionStatementList[?Yield] +## NOTE. I used ZEROORONE(StatementList) directly in order to avoid +## an issue where FunctionStatementList fail when looking for +## its lookahead, if function body is empty. +rule FunctionBody : ZEROORONE(StatementList) + attr.action : BuildBlock(%1) + +## +## FunctionStatementList[Yield] : +## StatementList[?Yield, Return]opt +rule FunctionStatementList : ZEROORONE(StatementList) + +## See 14.2 +## ArrowFunction[In, Yield] : +## ArrowParameters[?Yield] [no LineTerminator here] => ConciseBody[?In] + +# (1) I inline ArrowParameters +# (2) In CoverParent.... is replaced by ArrowFormalParameters which in turn is changed +# to CallSignature in Typescript. I inline CallSignature here. +rule ArrowFunction : ONEOF( + BindingIdentifier + "=>" + ConciseBody, + ZEROORONE(AccessibilityModifier) + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + "=>" + ConciseBody) + attr.action.%1 : BuildLambda(%1, %3) + attr.action.%2 : BuildLambda(%4, %8) + attr.action.%2 : AddType(%6) + attr.action.%2 : AddTypeGenerics(%2) + attr.action.%2 : AddModifier(%1) + attr.action.%1,%2 : SetArrowFunction() + +## See 14.2 +## ArrowParameters[Yield] : +## BindingIdentifier[?Yield] +## CoverParenthesizedExpressionAndArrowParameterList[?Yield] +## When the production ArrowParameters:CoverParenthesizedExpressionAndArrowParameterList is recognized the following +## grammar is used to refine the interpretation of CoverParenthesizedExpressionAndArrowParameterList: +##ArrowFormalParameters[Yield]: +##(StrictFormalParameters[?Yield]) +rule ArrowParameters : ONEOF(BindingIdentifier, + ArrowFormalParameters) + +rule ArrowFormalParameters : '(' + StrictFormalParameters + ')' + +## See 14.2 +## ConciseBody[In] : +## [lookahead ≠ { ] AssignmentExpression[?In] +## { FunctionBody } +rule ConciseBody : ONEOF(AssignmentExpression, + '{' + FunctionBody + '}') + +## +## See 14.3 +## MethodDefinition[Yield] : +## PropertyName[?Yield] ( StrictFormalParameters ) { FunctionBody } +## GeneratorMethod[?Yield] +## get PropertyName[?Yield] ( ) { FunctionBody } +## set PropertyName[?Yield] ( PropertySetParameterList ) { FunctionBody } + +## See 14.3 +## PropertySetParameterList : +## FormalParameter +## See 14.4 +## GeneratorMethod[Yield] : +## * PropertyName[?Yield] ( StrictFormalParameters[Yield] ) { GeneratorBody } + +## See 14.4 +## GeneratorDeclaration[Yield, Default] : +## function * BindingIdentifier[?Yield] ( FormalParameters[Yield] ) { GeneratorBody } +## [+Default] function * ( FormalParameters[Yield] ) { GeneratorBody } +rule GeneratorDeclaration : + "function" + '*' + BindingIdentifier + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + '{' + FunctionBody + '}' + attr.action : BuildFunction(%3) + attr.action : AddParams(%5) + attr.action : AddType(%7) + attr.action : AddFunctionBody(%9) + attr.action : SetIsGenerator() + +## See 14.4 +## GeneratorExpression : +## function * BindingIdentifier[Yield]opt ( FormalParameters[Yield] ) { GeneratorBody } + +## See 14.4 +## GeneratorBody : +## FunctionBody[Yield] + +## See 14.4 +## YieldExpression[In] : +## yield +## yield [no LineTerminator here] AssignmentExpression[?In, Yield] +## yield [no LineTerminator here] * AssignmentExpression[?In, Yield] + +rule YieldExpression : ONEOF("yield", + "yield" + AssignmentExpression, + "yield" + '*' + AssignmentExpression) + attr.action.%1: BuildYield() + attr.action.%2: BuildYield(%2) + attr.action.%3: BuildYield(%3) + attr.action.%3: SetIsTransfer() + +## See 14.5 +## ClassDeclaration[Yield, Default] : +## class BindingIdentifier[?Yield] ClassTail[?Yield] +## [+Default] class ClassTail[?Yield] +## See 14.5 +## ClassExpression[Yield] : +## class BindingIdentifier[?Yield]opt ClassTail[?Yield] +## See 14.5 +## ClassTail[Yield] : +## ClassHeritage[?Yield]opt { ClassBody[?Yield]opt } +## See 14.5 +## ClassHeritage[Yield] : +## extends LeftHandSideExpression[?Yield] + +## See 14.5 +## ClassBody[Yield] : +## ClassElementList[?Yield] +rule ClassBody : ClassElementList + attr.action: BuildBlock(%1) + +## See 14.5 +## ClassElementList[Yield] : +## ClassElement[?Yield] +## ClassElementList[?Yield] ClassElement[?Yield] +rule ClassElementList : ONEOF(ClassElement, + ClassElementList + ClassElement) + +## See 14.5 +## ClassElement[Yield] : +## MethodDefinition[?Yield] +## static MethodDefinition[?Yield] +## ; + +############################################################################# +## A.5 Scripts and Modules +############################################################################# + +## See 15.1 +## Script : ## ScriptBodyopt +rule Script : ZEROORONE(ScriptBody) + +## See 15.1 +## ScriptBody : ## StatementList +rule ScriptBody : StatementList + +## Module : ModuleBodyopt +rule Module : ZEROORONE(ModuleBody) + +## ModuleBody : ModuleItemList +rule ModuleBody : ModuleItemList + +## ModuleItemList : +## ModuleItem +## ModuleItemList ModuleItem +rule ModuleItemList : ONEOF(ModuleItem, + ModuleItemList + ModuleItem) + +## ModuleItem : +## ImportDeclaration +## ExportDeclaration +## StatementListItem +rule ModuleItem : ONEOF(ImportDeclaration, + ExportDeclaration, + StatementListItem) + +## ImportDeclaration : +## import ImportClause FromClause ; +## import ModuleSpecifier ; +rule ImportDeclaration : ONEOF("import" + ImportClause + FromClause + ZEROORONE(';'), + "import" + ModuleSpecifier + ZEROORONE(';'), + "import" + BindingIdentifier + '=' + "require" + '(' + AssignmentExpression + ')' + ZEROORONE(';'), + "import" + "type" + NamedImports + FromClause + ZEROORONE(';'), + "import" + "type" + NameSpaceImport + FromClause + ZEROORONE(';'), + "import" + "type" + ImportedDefaultBinding + FromClause + ZEROORONE(';'), + ImportAliasDeclaration) + attr.property : Top + attr.action.%1,%2,%3,%4,%5,%6 : BuildImport() + attr.action.%1 : SetPairs(%2) + attr.action.%1 : SetFromModule(%3) + attr.action.%2 : SetFromModule(%2) + attr.action.%3 : SetSinglePairs(%6, %2) + attr.action.%4,%5,%6 : SetPairs(%3) + attr.action.%4,%5,%6 : SetFromModule(%4) + attr.action.%4,%5,%6 : SetIsXXportType() + +## ImportClause : +## ImportedDefaultBinding +## NameSpaceImport +## NamedImports +## ImportedDefaultBinding , NameSpaceImport +## ImportedDefaultBinding , NamedImports +rule ImportClause : ONEOF(ImportedDefaultBinding, + NameSpaceImport, + NamedImports, + ImportedDefaultBinding + ',' + NameSpaceImport, + ImportedDefaultBinding + ',' + NamedImports) + +## See 15.2.2 +## ImportedDefaultBinding : +## ImportedBinding +rule ImportedDefaultBinding : ImportedBinding + attr.action : BuildXXportAsPairDefault(%1) + +## See 15.2.2 +## NameSpaceImport : +## * as ImportedBinding +rule NameSpaceImport : '*' + "as" + ImportedBinding + attr.action : BuildXXportAsPairEverything(%3) + +## See 15.2.2 +## NamedImports : +## { } +## { ImportsList } +## { ImportsList , } +rule NamedImports : ONEOF('{' + '}', + '{' + ImportsList + '}', + '{' + ImportsList + ',' + '}') + +## See 15.2.2 +## FromClause : +## from ModuleSpecifier +rule FromClause : "from" + ModuleSpecifier + +## See 15.2.2 +## ImportsList : +## ImportSpecifier +## ImportsList , ImportSpecifier +rule ImportsList : ONEOF(ImportSpecifier, + ImportsList + ',' + ImportSpecifier) + +## See 15.2.2 +## ImportSpecifier : +## ImportedBinding +## IdentifierName as ImportedBinding +rule ImportSpecifier : ONEOF(ImportedBinding, + JSIdentifier + "as" + ImportedBinding, + "default" + "as" + ImportedBinding) + attr.action.%2 : BuildXXportAsPair(%1, %3) + attr.action.%3 : BuildXXportAsPairDefault(%3) + +## See 15.2.2 +## ModuleSpecifier : +## StringLiteral +## NOTE. I extend StringLiteral to Literal to ease parser. 'tsc' will make sure +## it's a string literal. +rule ModuleSpecifier : Literal + +## See 15.2.2 +## ImportedBinding : +## BindingIdentifier +rule ImportedBinding : BindingIdentifier + +## See 15.2.3 +## ExportDeclaration : +## export * FromClause ; +## export ExportClause FromClause ; +## export ExportClause ; +## export VariableStatement +## export Declaration +## export default HoistableDeclaration[Default] +## export default ClassDeclaration[Default] +## export default [lookahead ∉ {function, class}] AssignmentExpression[In] ; + +# export = expr; +# is for export single syntax. +rule ExportDeclaration : ONEOF(ZEROORMORE(Annotation) + "export" + '*' + FromClause + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + ExportClause + FromClause + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + ExportClause + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + VariableStatement, + ZEROORMORE(Annotation) + "export" + Declaration + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + "default" + HoistableDeclaration, + ZEROORMORE(Annotation) + "export" + "default" + ClassDeclaration, + ZEROORMORE(Annotation) + "export" + "default" + AssignmentExpression + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + "=" + AssignmentExpression + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + "type" + ExportClause + FromClause + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + "type" + ExportClause + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + AsNamespace + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + ExternalModuleDeclaration + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + NameSpaceExport + FromClause + ZEROORONE(';'), + ZEROORMORE(Annotation) + "export" + ImportAliasDeclaration, + ZEROORMORE(Annotation) + "export" + TypeAliasDeclaration + ZEROORONE(';')) + attr.property : Top + attr.action.%1,%2,%3,%4,%5,%6,%7,%8,%9,%10,%11,%12,%13,%14,%15,%16 : BuildExport() + attr.action.%1,%2,%3,%4,%5,%6,%7,%8,%9,%10,%11,%12,%13,%14,%15,%16 : AddModifier(%1) + attr.action.%1 : SetIsEverything() + attr.action.%2,%3,%4,%5,%12,%13,%14,%15,%16 : SetPairs(%3) + attr.action.%6,%7,%8 : SetDefaultPairs(%4) + attr.action.%1,%2,%14 : SetFromModule(%4) + attr.action.%9 : SetSinglePairs(%4) + attr.action.%10,%11 : SetPairs(%4) + attr.action.%10,%11 : SetIsXXportType() + attr.action.%10 : SetFromModule(%5) + +rule AsNamespace : "as" + "namespace" + JSIdentifier + attr.action : BuildXXportAsPair(%3) + attr.action : SetAsNamespace() + +rule NameSpaceExport : '*' + "as" + JSIdentifier + attr.action : BuildXXportAsPairEverything(%3) + +## See 15.2.3 +## ExportClause : +## { } +## { ExportsList } +## { ExportsList , } +rule ExportClause : ONEOF('{' + '}', + '{' + ExportsList + '}', + '{' + ExportsList + ',' + '}') + +## ExportsList : +## ExportSpecifier +## ExportsList , ExportSpecifier +rule ExportsList : ONEOF(ExportSpecifier, + ExportsList + ',' + ExportSpecifier) + +## See 15.2.3 +## ExportSpecifier : +## IdentifierName +## IdentifierName as IdentifierName +rule KeywordExportName : ONEOF("import") + attr.action : BuildIdentifier() + +rule ExportSpecifier : ONEOF(JSIdentifier, + JSIdentifier + "as" + BindingIdentifier, + JSIdentifier + "as" + "default", + "default" + "as" + JSIdentifier, + JSIdentifier + "as" + "super", + JSIdentifier + "as" + "function", + JSIdentifier + "as" + KeywordExportName) + attr.action.%2,%5,%6,%7 : BuildXXportAsPair(%1, %3) + attr.action.%3 : BuildXXportAsPairDefault(%1) + attr.action.%4 : BuildXXportAsPairDefault(%3) + +############################################################################# +############################################################################# +############################################################################# +## Below is Typescript specific +############################################################################# +############################################################################# +############################################################################# + +############################################################################# +## A.1 Type section +############################################################################# + +## rule TypeParameters: < TypeParameterList > +rule TypeParameters: '<' + TypeParameterList + '>' + +rule TypeParameterList: ONEOF(TypeParameter + ZEROORONE(Elision), + TypeParameterList + ',' + TypeParameter + ZEROORONE(Elision)) + +## rule TypeParameter: BindingIdentifier Constraintopt +## It supports default type value of type parameter now. +rule TypeParameter: BindingIdentifier + ZEROORONE(Constraint) + ZEROORONE(TypeInitializer) + attr.action : BuildTypeParameter(%1) + attr.action : AddInit(%3) + attr.action : AddTypeParameterExtends(%2) + +rule TypeInitializer : '=' + Type + +## rule Constraint: extends Type +rule Constraint: "extends" + Type + attr.action : PassChild(%2) + +## rule TypeArguments: < TypeArgumentList > +rule TypeArguments: '<' + TypeArgumentList + '>' + +## rule TypeArgumentList: TypeArgument TypeArgumentList , TypeArgument +rule TypeArgumentList: ONEOF(TypeArgument, + TypeArgumentList + ',' + TypeArgument) + +## rule TypeArgument: Type +rule TypeArgument: Type + +rule ConditionalType : ONEOF(MemberExpression + "extends" + Type + '?' + Type + ':' + Type, + TypeReference + "extends" + Type + '?' + Type + ':' + Type, + ObjectType + "extends" + Type + '?' + Type + ':' + Type, + "unknown" + "extends" + Type + '?' + Type + ':' + Type, + PrimaryType + "extends" + Type + '?' + Type + ':' + Type, + TypeQuery + "extends" + Type + '?' + Type + ':' + Type, + TypeArray + "extends" + Type + '?' + Type + ':' + Type) + attr.action.%1,%2,%3,%4,%5,%6,%7 : BuildConditionalType(%1, %3, %5, %7) + +rule KeyOf : ONEOF("keyof" + Identifier, + "keyof" + '(' + TypeQuery + ')', + "keyof" + TypeQuery, + "keyof" + MemberExpression, + "keyof" + TypeReference) + attr.action.%1,%3,%4,%5 : BuildKeyOf(%2) + attr.action.%2 : BuildKeyOf(%3) + +rule InferType : "infer" + Identifier + attr.action : BuildInfer(%2) + +rule TypeArray : ONEOF(PrimaryType + '[' + PrimaryExpression + ']', + PrimaryType + '[' + TypeReference + ']', + TypeArray + '[' + PrimaryExpression + ']', + PrimaryType + '[' + ConditionalType + ']', + PrimaryType + '[' + TypeArray + ']') + attr.action.%1,%2,%3,%4,%5 : BuildArrayElement(%1, %3) + +rule PrimaryTypeKeyOf : PrimaryType + '[' + KeyOf + ']' + attr.action : BuildArrayElement(%1, %3) + +#rule Type : ONEOF(UnionOrIntersectionOrPrimaryType, +# FunctionType, +# ConstructorType) +rule Type : ONEOF(UnionOrIntersectionOrPrimaryType, + FunctionType, + ConstructorType, + KeyOf, + ConditionalType, + # Typescript interface[index] can be seen as a type + TypeArray, + MemberExpression + '[' + KeyOf + ']', + PrimaryTypeKeyOf, + InferType, + IsExpression, + PrimaryType + '[' + TypeQuery + ']', + TemplateLiteral) + attr.action.%7,%11 : BuildArrayElement(%1, %3) + +#rule UnionOrIntersectionOrPrimaryType: ONEOF(UnionType, +# IntersectionOrPrimaryType) +rule UnionOrIntersectionOrPrimaryType: ONEOF(UnionType, + IntersectionOrPrimaryType) + +#rule IntersectionOrPrimaryType : ONEOF(IntersectionType, +# PrimaryType) +rule IntersectionOrPrimaryType : ONEOF(IntersectionType, PrimaryType, TypeArray) + +## rule PrimaryType: ParenthesizedType PredefinedType TypeReference ObjectType ArrayType TupleType TypeQuery ThisType +rule PrimaryType: ONEOF(ParenthesizedType, + PredefinedType, + TypeReference, + ObjectType, + ArrayType, + TupleType, + TypeQuery, + ThisType, + NeverArrayType, + Literal, + ArrayLiteral, + ImportFunction, + ImportFunction + '.' + TypeReference) + attr.action.%13 : BuildField(%1, %3) + +rule NeverArrayType : '[' + ']' + attr.action : BuildNeverArrayType() + +## rule ParenthesizedType: ( Type ) +rule ParenthesizedType: '(' + Type + ')' + +## rule PredefinedType: any number boolean string symbol void +rule PredefinedType: ONEOF(TYPE, + "unique" + TYPE) + attr.action.%2 : SetIsUnique(%2) + +## rule TypeReference: TypeName [no LineTerminator here] TypeArgumentsopt +rule TypeReference: TypeName + ZEROORONE(TypeArguments) + ZEROORMORE(AsType) + attr.action : BuildUserType(%1) + attr.action : AddTypeGenerics(%2) + attr.action : AddAsType(%3) + +## rule TypeName: IdentifierReference NamespaceName . IdentifierReference +rule TypeName: ONEOF(IdentifierReference, + NamespaceName + '.' + IdentifierReference, + NamespaceName + '.' + KeywordPropName, + '(' + IdentifierReference + ')', + '(' + NamespaceName + '.' + IdentifierReference + ')') + attr.action.%2,%3 : BuildField(%1, %3) + attr.action.%5 : BuildField(%2, %4) + +## rule NamespaceName: IdentifierReference NamespaceName . IdentifierReference +rule NamespaceName: ONEOF(IdentifierReference, + NamespaceName + '.' + IdentifierReference) + attr.action.%2 : BuildField(%1, %3) + +## rule ObjectType: { TypeBodyopt } +rule ObjectType : '{' + ZEROORONE(TypeBody) + '}' + attr.action : BuildStruct() + attr.action : AddStructField(%2) + +## rule TypeBody: TypeMemberList ;opt TypeMemberList ,opt +rule TypeBody : ONEOF(TypeMemberList + ZEROORONE(';'), + TypeMemberList + ZEROORONE(',')) + +## rule TypeMemberList: TypeMember TypeMemberList ; TypeMember TypeMemberList , TypeMember +rule TypeMemberList : ONEOF(TypeMember, + TypeMemberList + ZEROORONE(';') + TypeMember, + TypeMemberList + ZEROORONE(',') + TypeMember) + +## rule TypeMember: PropertySignature CallSignature ConstructSignature IndexSignature MethodSignature +rule TypeMember : ONEOF(PropertySignature, + CallSignature, + ConstructSignature, + IndexSignature, + MethodSignature) + +## rule ArrayType: PrimaryType [no LineTerminator here] [ ] +rule ArrayType: ONEOF(ZEROORONE("readonly") + PrimaryType + '[' + ']', + SpreadElement + '[' + ']', + MemberExpression + '[' + ']') + attr.action.%1 : BuildArrayType(%2, %2) + attr.action.%1 : AddModifier(%1) + attr.action.%2 : BuildArrayType(%1, %1) + attr.action.%3 : BuildArrayType(%1, %1) + +## rule TupleType: [ TupleElementTypes ] +rule TupleType: ZEROORONE("readonly") + '[' + TupleElementTypes + ZEROORONE(Elision) + ']' + attr.action : BuildTupleType() + attr.action : AddModifier(%1) + attr.action : AddStructField(%3) + +## rule TupleElementTypes: TupleElementType TupleElementTypes , TupleElementType +rule TupleElementTypes: ONEOF(TupleElementType, + TupleElementTypes + ',' + TupleElementType) + +## rule TupleElementType: Type +rule TupleElementType: ONEOF(ZEROORONE(JSIdentifier + ':') + Type, + "..." + Type) + attr.action.%1 : BuildNameTypePair(%1, %2) + attr.action.%2 : SetIsRest(%2) + attr.action.%2 : BuildNameTypePair(%2) + +## rule UnionType: UnionOrIntersectionOrPrimaryType | IntersectionOrPrimaryType +rule UnionType : ONEOF(ZEROORONE('|') + UnionOrIntersectionOrPrimaryType + '|' + IntersectionOrPrimaryType, + UnionOrIntersectionOrPrimaryType + '|' + KeyOf, + KeyOf + '|' + UnionOrIntersectionOrPrimaryType, + TypeQuery + '|' + UnionOrIntersectionOrPrimaryType, + TemplateLiteral + '|' + TemplateLiteral) + attr.action.%1 : BuildUnionUserType(%2, %4) + attr.action.%2,%3,%4,%5 : BuildUnionUserType(%1, %3) + +## rule IntersectionType: IntersectionOrPrimaryType & PrimaryType +rule IntersectionType: ONEOF(IntersectionOrPrimaryType + '&' + PrimaryType, + IntersectionOrPrimaryType + '&' + ConditionalType, + PrimaryTypeKeyOf + '&' + PrimaryType) + attr.action.%1,%2,%3 : BuildInterUserType(%1, %3) + +## rule FunctionType: TypeParametersopt ( ParameterListopt ) => Type +rule FunctionType: ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + "=>" + Type + attr.action : BuildLambda(%3) + attr.action : AddType(%6) + attr.action : AddTypeGenerics(%1) + +## rule ConstructorType: new TypeParametersopt ( ParameterListopt ) => Type +## This actually a literal. +rule ConstructorType: ZEROORONE(AccessibilityModifier) + "new" + FunctionType + attr.action : BuildNewOperation(%3) + attr.action : AddModifier(%1) + +## rule TypeQuery: typeof TypeQueryExpression +rule TypeQuery: ONEOF("typeof" + TypeQueryExpression, + "typeof" + '(' + ClassDeclaration + ')') + attr.action.%1 : BuildTypeOf(%2) + attr.action.%2 : BuildTypeOf(%3) + +## rule TypeQueryExpression: IdentifierReference TypeQueryExpression . IdentifierName +rule TypeQueryExpression: ONEOF(IdentifierReference, + TypeQueryExpression + '.' + JSIdentifier, + UnaryExpression, + ImportFunction) + attr.action.%2 : BuildField(%1, %3) + +## rule ThisType: this +rule ThisType: "this" + +rule PropertySignatureName : ONEOF(PropertyName, KeywordPropName) + +## rule PropertySignature: PropertyName ?opt TypeAnnotationopt +## +## NOTE: for KeywordPropName we require them to have an explicit TypeAnnotation, or ',' or ';' +## following it. Otherwise there will be ambiguity, like: +## interface A { +## export() : string; +## } +## It could be parsed as one property 'export' and one call signature "() : string". + +rule PropertySignature: ONEOF(ZEROORONE(AccessibilityModifier) + PropertyName + ZEROORONE(TypeAnnotation), + ZEROORONE(AccessibilityModifier) + PropertySignatureName + '?' + ZEROORONE(TypeAnnotation), + ZEROORONE(AccessibilityModifier) + KeywordPropName + TypeAnnotation, + ZEROORONE(AccessibilityModifier) + KeywordPropName + ',', + ZEROORONE(AccessibilityModifier) + KeywordPropName + ';') + attr.action.%1,%3 : AddType(%2, %3) + attr.action.%2 : AddType(%2, %4) + attr.action.%2 : SetIsOptional(%2) + attr.action.%1,%2,%3,%4,%5: AddModifierTo(%2, %1) + +## JS ECMA has more definition than this Typescript one. I use ECMA one. +## rule PropertyName: IdentifierName StringLiteral NumericLiteral +##rule PropertyName : ONEOF(JSIdentifier, + ##StringLiteral, + ##NumericLiteral, +## ) + +## rule TypeAnnotation: : Type +rule TypeAnnotation: ':' + Type + +## rule CallSignature: TypeParametersopt ( ParameterListopt ) TypeAnnotationopt +rule CallSignature: ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + attr.action : BuildFunction() + attr.action : AddParams(%3) + attr.action : AddType(%5) + attr.action : SetCallSignature() + +## rule ParameterList: RequiredParameterList OptionalParameterList RestParameter RequiredParameterList , OptionalParameterList RequiredParameterList , RestParameter OptionalParameterList , RestParameter RequiredParameterList , OptionalParameterList , RestParameter +rule ParameterList: ONEOF(RequiredParameterList + ZEROORONE(Elision), + OptionalParameterList + ZEROORONE(Elision), + RestParameter + ZEROORONE(Elision), + RequiredParameterList + ',' + OptionalParameterList + ZEROORONE(Elision), + RequiredParameterList + ',' + RestParameter + ZEROORONE(Elision), + OptionalParameterList + ',' + RestParameter + ZEROORONE(Elision), + RequiredParameterList + ',' + OptionalParameterList + ',' + RestParameter + ZEROORONE(Elision)) + +## rule RequiredParameterList: RequiredParameter RequiredParameterList , RequiredParameter +rule RequiredParameterList: ONEOF(RequiredParameter, + RequiredParameterList + ',' + RequiredParameter) + +## rule RequiredParameter: AccessibilityModifieropt BindingIdentifierOrPattern TypeAnnotationopt BindingIdentifier : StringLiteral +## NOTE: I extend StringLiteral to Literal. +## NOTE: I Added initializer. I guess the spec missed this part. +rule RequiredParameter: ONEOF( + ZEROORMORE(AccessibilityModifier) + BindingIdentifierOrPattern + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer), + "this" + ZEROORONE(TypeAnnotation), + BindingIdentifier + ':' + Literal, + ObjectType) + attr.action.%1 : AddInitTo(%2, %4) + attr.action.%1 : BuildDecl(%3, %2) + attr.action.%2 : BuildDecl(%2, %1) + +## rule AccessibilityModifier: public private protected +rule AccessibilityModifier: ONEOF("public", "private", "protected", "readonly", "static", "abstract", "async") + +## rule BindingIdentifierOrPattern: BindingIdentifier BindingPattern +rule BindingIdentifierOrPattern: ONEOF(BindingIdentifier, BindingPattern) + +## rule OptionalParameterList: OptionalParameter OptionalParameterList , OptionalParameter +rule OptionalParameterList: ONEOF(OptionalParameter, + OptionalParameterList + ',' + OptionalParameter) + +## rule OptionalParameter: AccessibilityModifieropt BindingIdentifierOrPattern ? TypeAnnotationopt AccessibilityModifieropt BindingIdentifierOrPattern TypeAnnotationopt Initializer BindingIdentifier ? : StringLiteral +rule OptionalParameter: ONEOF( + ZEROORMORE(AccessibilityModifier) + BindingIdentifierOrPattern + '?' + ZEROORONE(TypeAnnotation), + ZEROORMORE(AccessibilityModifier) + BindingIdentifierOrPattern + ZEROORONE(TypeAnnotation) + Initializer, + BindingIdentifier + '?' + ':' + Literal) + attr.action.%1 : SetIsOptional(%2) + attr.action.%1 : BuildDecl(%4, %2) + attr.action.%2 : AddInitTo(%2, %4) + attr.action.%2 : BuildDecl(%3, %2) + attr.action.%3 : SetIsOptional(%1) + +## rule RestParameter: ... BindingIdentifier TypeAnnotationopt +rule RestParameter: "..." + BindingIdentifier + ZEROORONE(TypeAnnotation) + attr.action : AddType(%2, %3) + attr.action : SetIsRest(%2) + +## rule ConstructSignature: new TypeParametersopt ( ParameterListopt ) TypeAnnotationopt +rule ConstructSignature : + "new" + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + attr.action : BuildFunction() + attr.action : AddParams(%4) + attr.action : AddType(%6) + attr.action : SetConstructSignature() + +## rule IndexSignature: [ BindingIdentifier : string ] TypeAnnotation [ BindingIdentifier : number ] TypeAnnotation +rule IndexSigPrefix : ONEOF('+', '-') +rule IndexSigModifier : ONEOF("readonly", '?') + +rule IndexSignature: ONEOF( + ZEROORONE(IndexSigPrefix) + ZEROORONE(IndexSigModifier) + '[' + BindingIdentifier + ':' + "string" + ']' + ZEROORONE(IndexSigPrefix) + ZEROORONE(IndexSigModifier) + TypeAnnotation, + ZEROORONE(IndexSigPrefix) + ZEROORONE(IndexSigModifier) + '[' + BindingIdentifier + ':' + "number" + ']' + ZEROORONE(IndexSigPrefix) + ZEROORONE(IndexSigModifier) + TypeAnnotation) + attr.action.%1 : BuildStrIndexSig(%4, %10) + attr.action.%2 : BuildNumIndexSig(%4, %10) + +rule KeywordMethodName : ONEOF("return", + "throw", + "continue", + "if", + "import", + "export") + attr.action : BuildIdentifier() + +## rule MethodSignature: PropertyName ?opt CallSignature +## I inlined CallSignature +rule MethodSignature: ONEOF( + PropertyName + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation), + PropertyName + '?' + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation), + KeywordMethodName + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation), + KeywordMethodName + '?' + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation)) + attr.action.%1,%2,%3,%4 : BuildFunction(%1) + attr.action.%1,%3 : AddParams(%4) + attr.action.%1,%3 : AddType(%6) + attr.action.%1,%3 : AddTypeGenerics(%2) + attr.action.%2,%4 : SetIsOptional(%1) + attr.action.%2,%4 : AddParams(%5) + attr.action.%2,%4 : AddType(%7) + attr.action.%2,%4 : AddTypeGenerics(%3) + +## rule TypeAliasDeclaration: type BindingIdentifier TypeParametersopt = Type ; +rule TypeAliasDeclaration: "type" + BindingIdentifier + ZEROORONE(TypeParameters) + '=' + Type + ZEROORONE(';') + attr.action : BuildTypeAlias(%2, %5) + attr.action : AddTypeGenerics(%3) + + +############################################################################################## +## A.2 Expression +############################################################################################## + +rule AllPropertyName : ONEOF(PropertyName, KeywordPropName) + +## PropertyDefinition: ( Modified ) IdentifierReference CoverInitializedName PropertyName : AssignmentExpression PropertyName CallSignature { FunctionBody } GetAccessor SetAccessor +rule PropertyDefinition: ONEOF(IdentifierReference, + CoverInitializedName, + AllPropertyName + ':' + AssignmentExpression, + ZEROORONE(AccessibilityModifier) + AllPropertyName + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + + ZEROORONE(TypeAnnotation) + '{' + FunctionBody + '}', + GetAccessor, + SetAccessor, + SpreadElement) + attr.action.%3 : BuildFieldLiteral(%1, %3) + attr.action.%4 : BuildFunction(%2) + attr.action.%4 : AddType(%7) + attr.action.%4 : AddParams(%5) + attr.action.%4 : AddFunctionBody(%9) + +## GetAccessor: get PropertyName ( ) TypeAnnotationopt { FunctionBody } +rule GetAccessor: ONEOF(ZEROORONE(AccessibilityModifier) + "get" + PropertyName + '(' + ')' + ZEROORONE(TypeAnnotation) + '{' + FunctionBody + '}', + ZEROORONE(AccessibilityModifier) + "get" + '(' + "this" + ')' + ZEROORONE(TypeAnnotation) + '{' + FunctionBody + '}', + ZEROORONE(AccessibilityModifier) + "get" + PropertyName + '(' + ')' + ZEROORONE(TypeAnnotation) + ';', + ZEROORONE(AccessibilityModifier) + "get" + '(' + "this" + ')' + ZEROORONE(TypeAnnotation) + ';', + ZEROORONE(AccessibilityModifier) + "get" + '(' + ')' + ZEROORONE(TypeAnnotation) + '{' + FunctionBody + '}') + attr.action.%1,%3 : BuildFunction(%3) + attr.action.%1,%3 : SetGetAccessor() + attr.action.%1,%3 : AddType(%6) + attr.action.%1 : AddFunctionBody(%8) + attr.action.%1,%3 : AddModifier(%2) + attr.action.%1,%3 : AddModifier(%1) + attr.action.%2,%4,%5 : BuildFunction(%2) + attr.action.%2,%4,%5 : SetGetAccessor() + attr.action.%2,%4 : AddType(%6) + attr.action.%2 : AddFunctionBody(%8) + attr.action.%2,%4 : AddModifier(%2) + attr.action.%2,%4 : AddModifier(%1) + attr.action.%5 : AddType(%5) + attr.action.%5 : AddFunctionBody(%7) + +## SetAccessor: set PropertyName ( BindingIdentifierOrPattern TypeAnnotationopt ) { FunctionBody } +rule SetAccessor: ONEOF(ZEROORONE(AccessibilityModifier) + "set" + PropertyName + '(' + BindingIdentifierOrPattern + ZEROORONE(TypeAnnotation) + ')' + '{' + FunctionBody + '}', + ZEROORONE(AccessibilityModifier) + "set" + '(' + "this" + ',' + BindingIdentifierOrPattern + ZEROORONE(TypeAnnotation) + ')' + '{' + FunctionBody + '}', + ZEROORONE(AccessibilityModifier) + "set" + PropertyName + '(' + BindingIdentifierOrPattern + ZEROORONE(TypeAnnotation) + ')' + ';', + ZEROORONE(AccessibilityModifier) + "set" + '(' + "this" + ',' + BindingIdentifierOrPattern + ZEROORONE(TypeAnnotation) + ')' + ';', + ZEROORONE(AccessibilityModifier) + "set" + '(' BindingIdentifierOrPattern + ZEROORONE(TypeAnnotation) + ')' + '{' + FunctionBody + '}') + attr.action.%1,%3 : AddType(%5, %6) + attr.action.%1,%3 : BuildFunction(%3) + attr.action.%1,%3 : SetSetAccessor() + attr.action.%1,%3 : AddParams(%5) + attr.action.%1 : AddFunctionBody(%9) + attr.action.%1,%3 : AddModifier(%2) + attr.action.%1,%3 : AddModifier(%1) + attr.action.%2,%4 : AddType(%6, %7) + attr.action.%2,%4,%5 : BuildFunction() + attr.action.%2,%4,%5 : SetSetAccessor() + attr.action.%2,%4 : AddParams(%6) + attr.action.%2 : AddFunctionBody(%10) + attr.action.%2,%4 : AddModifier(%2) + attr.action.%2,%4 : AddModifier(%1) + attr.action.%5 : AddParams(%4) + attr.action.%5 : AddFunctionBody(%8) + attr.action.%5 : AddModifier(%1) + +## We allow get/set as identifier for function name only. +## we don't want to see keywords as identifier happening in everywhere. +rule FunctionNameKeyword : ONEOF("get", "set") +rule FunctionName : ONEOF(BindingIdentifier, FunctionNameKeyword) + +## FunctionExpression: ( Modified ) function BindingIdentifieropt CallSignature { FunctionBody } +## FunctionExpression has the same syntax as FunctionDeclaration. But it appears as an expression. We will build it +## as a FunctionNode in AST. +rule FunctionExpression : + "function" + ZEROORONE(FunctionName) + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + '{' + FunctionBody + '}' + attr.action : BuildFunction(%2) + attr.action : AddTypeGenerics(%3) + attr.action : AddParams(%5) + attr.action : AddType(%7) + attr.action : AddFunctionBody(%9) + +# ArrowFormalParameter is used in ArrowFunction, and Typescript modified +# it to be CallSignature. As usual, I'd like to inline CallSignature in +# those rules. +## ArrowFormalParameters: ( Modified ) CallSignature + +## Arguments: ( Modified ) TypeArgumentsopt ( ArgumentListopt ) +## UnaryExpression: ( Modified ) … < Type > UnaryExpression + +############################################################################################## +## A.4 Functions +############################################################################################## + +## FunctionDeclaration: ( Modified ) +## function BindingIdentifieropt CallSignature { FunctionBody } +## function BindingIdentifieropt CallSignature ; + +# NOTE: Inline Call signature to make it easier to write action. +rule FunctionDeclaration : ONEOF( + ZEROORMORE(AccessibilityModifier) + "function" + ZEROORONE(FunctionName) + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + '{' + FunctionBody + '}', + ZEROORMORE(AccessibilityModifier) + "function" + ZEROORONE(FunctionName) + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ':' + AssertExpression + '{' + FunctionBody + '}', + ZEROORMORE(AccessibilityModifier) + "function" + ZEROORONE(FunctionName) + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ':' + IsExpression + '{' + FunctionBody + '}', + ZEROORMORE(AccessibilityModifier) + "function" + ZEROORONE(FunctionName) + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + ZEROORONE(';'), + ZEROORMORE(AccessibilityModifier) + "function" + ZEROORONE(FunctionName) + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ':' + IsExpression + ZEROORONE(';')) + attr.action.%1,%2,%3,%4,%5 : BuildFunction(%3) + attr.action.%1,%2,%3,%4,%5 : AddParams(%6) + attr.action.%1,%2,%3,%4,%5 : AddModifier(%1) + attr.action.%1,%4 : AddType(%8) + attr.action.%1,%2,%3,%4,%5 : AddTypeGenerics(%4) + attr.action.%2,%3,%5 : AddAssert(%9) + attr.action.%1 : AddFunctionBody(%10) + attr.action.%2,%3 : AddFunctionBody(%11) + +############################################################################################## +## A.5 Interface +############################################################################################## + +##InterfaceDeclaration: interface BindingIdentifier TypeParametersopt InterfaceExtendsClauseopt ObjectType +rule InterfaceDeclaration : + "interface" + BindingIdentifier + ZEROORONE(TypeParameters) + ZEROORONE(InterfaceExtendsClause) + '{' + ZEROORONE(TypeBody) + '}' + attr.action : BuildStruct(%2) + attr.action : SetTSInterface() + attr.action : AddTypeGenerics(%3) + attr.action : AddStructField(%6) + attr.action : AddSuperInterface(%4) + +##InterfaceExtendsClause: extends ClassOrInterfaceTypeList +rule InterfaceExtendsClause: "extends" + ClassOrInterfaceTypeList + +##ClassOrInterfaceTypeList: ClassOrInterfaceType ClassOrInterfaceTypeList , ClassOrInterfaceType +rule ClassOrInterfaceTypeList: ONEOF(ClassOrInterfaceType, + ClassOrInterfaceTypeList + ',' + ClassOrInterfaceType) + +##ClassOrInterfaceType: TypeReference +rule ClassOrInterfaceType: TypeReference + +############################################################################################## +## A.6 Class declaration +############################################################################################## + +## ClassDeclaration: ( Modified ) class BindingIdentifieropt TypeParametersopt ClassHeritage { ClassBody } +## NOTE. I inlined ClassHeritage to avoid 'lookahead fail' +rule ClassDeclaration: + ZEROORMORE(Annotation) + ZEROORONE("abstract") + "class" + ZEROORONE(BindingIdentifier) + ZEROORONE(TypeParameters) + ZEROORONE(ClassExtendsClause) + ZEROORONE(ImplementsClause) + '{' + ZEROORONE(ClassBody) + '}' + attr.action : BuildClass(%4) + attr.action : AddModifier(%1) + attr.action : AddModifier(%2) + attr.action : AddTypeGenerics(%5) + attr.action : AddSuperClass(%6) + attr.action : AddSuperInterface(%7) + attr.action : AddClassBody(%9) + +rule Annotation : '@' + MemberExpression + ZEROORONE(Arguments) + attr.action : BuildAnnotation(%2) + attr.action : AddArguments(%3) + +## ClassHeritage: ( Modified ) ClassExtendsClauseopt ImplementsClauseopt +rule ClassHeritage: ZEROORONE(ClassExtendsClause) + ZEROORONE(ImplementsClause) + +## ClassExtendsClause: extends ClassType +rule ClassExtendsClause: ONEOF("extends" + ZEROORONE('(') + ClassType + ZEROORONE(')'), + "extends" + CallExpression) + +## ClassType: TypeReference +rule ClassType: TypeReference + +## ImplementsClause: implements ClassOrInterfaceTypeList +rule ImplementsClause: "implements" + ClassOrInterfaceTypeList + +## ClassElement: ( Modified ) ConstructorDeclaration PropertyMemberDeclaration IndexMemberDeclaration +rule ClassElement: ONEOF(ConstructorDeclaration, + PropertyMemberDeclaration, + IndexMemberDeclaration) + attr.property : Single + +## ConstructorDeclaration: AccessibilityModifieropt constructor ( ParameterListopt ) { FunctionBody } AccessibilityModifieropt constructor ( ParameterListopt ) ; +rule ConstructorDeclaration: ONEOF( + ZEROORONE(AccessibilityModifier) + "constructor" + '(' + ZEROORONE(ParameterList) + ')' + '{' + FunctionBody + '}', + ZEROORONE(AccessibilityModifier) + "constructor" + '(' + ZEROORONE(ParameterList) + ')' + ';') + attr.action.%1,%2 : BuildConstructor() + attr.action.%1,%2 : AddParams(%4) + attr.action.%1,%2 : AddModifier(%1) + attr.action.%1 : AddFunctionBody(%7) + +## PropertyMemberDeclaration: MemberVariableDeclaration MemberFunctionDeclaration MemberAccessorDeclaration +rule PropertyMemberDeclaration: ONEOF(MemberVariableDeclaration, + MemberFunctionDeclaration + ZEROORONE(';'), + MemberAccessorDeclaration + ZEROORONE(';'), + MemberExternalDeclaration) + +## MemberVariableDeclaration: AccessibilityModifieropt staticopt PropertyName TypeAnnotationopt Initializeropt ; +rule MemberVariableDeclaration: ONEOF( + ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertySignatureName + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), + ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertySignatureName + '?' + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), + ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + "get" + '=' + ArrowFunction + ZEROORONE(';'), + ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + "set" + '=' + ArrowFunction + ZEROORONE(';'), + '#' + PropertySignatureName + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), + '#' + "private" + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';'), + ZEROORMORE(Annotation) + ZEROORMORE(AccessibilityModifier) + "if" + ZEROORONE(TypeAnnotation) + ZEROORONE(Initializer) + ZEROORONE(';')) + attr.action.%1: AddInitTo(%3, %5) + attr.action.%1: AddType(%3, %4) + attr.action.%1: AddModifierTo(%3, %2) + attr.action.%1: AddModifierTo(%3, %1) + attr.action.%1: BuildDecl(%4, %3) + attr.action.%2: AddInitTo(%3, %6) + attr.action.%2: AddType(%3, %5) + attr.action.%2: AddModifierTo(%3, %2) + attr.action.%2: AddModifierTo(%3, %1) + attr.action.%2: SetIsOptional(%3) + attr.action.%2: BuildDecl(%4, %3) + attr.action.%3,%4: BuildIdentifier(%3) + attr.action.%3,%4: AddInitTo(%5) + attr.action.%3,%4: AddModifier(%2) + attr.action.%3,%4: AddModifier(%1) + attr.action.%3,%4: BuildDecl() + attr.action.%5: AddInitTo(%2, %4) + attr.action.%6: BuildIdentifier(%2) + attr.action.%6: AddInitTo(%4) + attr.action.%5,%6: AddType(%3) + attr.action.%5,%6: AddModifier(%1) + attr.action.%5,%6: BuildDecl() + attr.action.%7 : BuildIdentifier(%3) + attr.action.%7 : AddType(%4) + attr.action.%7 : AddModifier(%1) + attr.action.%7 : AddModifier(%2) + attr.action.%7 : AddInitTo(%5) + attr.action.%7 : BuildDecl() + +rule KeywordMemberFunctionName : ONEOF("return", + "get", + "set", + "continue", + "break", + "const", + "let", + "var", + "if", + "else", + "for", + "try", + "export") + attr.action : BuildIdentifier() + +## MemberFunctionDeclaration: AccessibilityModifieropt staticopt PropertyName CallSignature { FunctionBody } AccessibilityModifieropt staticopt PropertyName CallSignature ; +#NOTE: I inlined CallSignature to make it easier for building function. +#rule CallSignature: ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) +rule MemberFunctionDeclaration: ONEOF( + ZEROORONE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertyName + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + '{' + FunctionBody + '}', + ZEROORONE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertyName + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + ZEROORONE(';'), + ZEROORONE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertyName + '?' + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + '{' + FunctionBody + '}', + ZEROORONE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertyName + '?' + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + ZEROORONE(';'), + ZEROORONE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertyName + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ':' + IsExpression + '{' + FunctionBody + '}', + ZEROORONE(Annotation) + ZEROORMORE(AccessibilityModifier) + PropertyName + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ':' + IsExpression + ZEROORONE(';'), + + ZEROORONE(Annotation) + ZEROORMORE(AccessibilityModifier) + KeywordMemberFunctionName + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + '{' + FunctionBody + '}', + + ZEROORONE(Annotation) + ZEROORMORE(AccessibilityModifier) + KeywordMemberFunctionName + ZEROORONE(TypeParameters) + '(' + ZEROORONE(ParameterList) + ')' + ZEROORONE(TypeAnnotation) + ZEROORONE(';'), + '*' + '[' + MemberExpression + ']' + '(' + ')' + '{' + FunctionBody + '}') + attr.action.%1,%2,%3,%4,%5,%6,%7,%8,%9 : BuildFunction(%3) + attr.action.%1,%2,%3,%4,%5,%6,%7,%8 : AddModifier(%2) + attr.action.%1,%2,%3,%4,%5,%6,%7,%8 : AddModifier(%1) + attr.action.%1,%2,%5,%6,%7,%8 : AddTypeGenerics(%4) + attr.action.%1,%2,%5,%6,%7,%8 : AddParams(%6) + attr.action.%1,%2,%7,%8 : AddType(%8) + attr.action.%1,%7 : AddFunctionBody(%10) + attr.action.%3,%4 : AddTypeGenerics(%5) + attr.action.%3,%4 : AddParams(%7) + attr.action.%3,%4 : AddType(%9) + attr.action.%3 : AddFunctionBody(%11) + attr.action.%3,%4 : SetIsOptional(%3) + attr.action.%5,%6 : AddType(%9) + attr.action.%5 : AddFunctionBody(%11) + attr.action.%9 : SetIsIterator() + attr.action.%9 : AddFunctionBody(%8) + +## MemberAccessorDeclaration: AccessibilityModifieropt staticopt GetAccessor AccessibilityModifieropt staticopt SetAccessor +rule MemberAccessorDeclaration: ONEOF( + ZEROORONE(Annotation) + ZEROORMORE(AccessibilityModifier) + GetAccessor, + ZEROORONE(Annotation) + ZEROORMORE(AccessibilityModifier) + SetAccessor) + attr.action.%1,%2 : AddModifierTo(%3, %1) + +## IndexMemberDeclaration: IndexSignature ; +rule IndexMemberDeclaration: IndexSignature + ZEROORONE(';') + +rule MemberExternalDeclaration : ZEROORMORE(AccessibilityModifier) + "declare" + VariableDeclaration + ';' + attr.action : BuildExternalDeclaration(%3) + attr.action : AddModifier(%1) +################################################################################################# +# A.7 Enums +################################################################################################# +## EnumDeclaration: constopt enum BindingIdentifier { EnumBodyopt } +rule EnumDeclaration: + ZEROORONE("const") + "enum" + BindingIdentifier + '{' + ZEROORONE(EnumBody) + '}' + attr.action : BuildStruct(%3) + attr.action : SetTSEnum() + attr.action : AddStructField(%5) + +## EnumBody: EnumMemberList ,opt +rule EnumBody: EnumMemberList + ZEROORONE(',') + +## EnumMemberList: EnumMember EnumMemberList , EnumMember +rule EnumMemberList: ONEOF(EnumMember, + EnumMemberList + ',' + EnumMember) + +## EnumMember: PropertyName PropertyName = EnumValue +rule EnumMember: ONEOF(PropertyName, + PropertyName + '=' + EnumValue, + KeywordPropName + '=' + EnumValue) + attr.action.%2,%3 : AddInitTo(%1, %3) + +## EnumValue: AssignmentExpression +rule EnumValue: AssignmentExpression + +################################################################################################# +## A.8 Namespaces +################################################################################################# + +##NamespaceDeclaration: namespace IdentifierPath { NamespaceBody } +rule NamespaceDeclaration: "namespace" + IdentifierPath + '{' + NamespaceBody + '}' + attr.property : Top + attr.action : BuildNamespace(%2) + attr.action : AddNamespaceBody(%4) + +##IdentifierPath: BindingIdentifier IdentifierPath . BindingIdentifier +rule IdentifierPath: ONEOF(BindingIdentifier, + IdentifierPath + '.' + BindingIdentifier) + attr.action.%2 : BuildField(%1, %3) + +##NamespaceBody: NamespaceElementsopt +rule NamespaceBody: ZEROORONE(NamespaceElements) + +##NamespaceElements: NamespaceElement NamespaceElements NamespaceElement +rule NamespaceElements: ONEOF(NamespaceElement, + NamespaceElements + NamespaceElement) + +##NamespaceElement: Statement LexicalDeclaration FunctionDeclaration GeneratorDeclaration ClassDeclaration InterfaceDeclaration TypeAliasDeclaration EnumDeclaration NamespaceDeclaration AmbientDeclaration ImportAliasDeclaration ExportNamespaceElement +rule NamespaceElement: ONEOF(Statement, + LexicalDeclaration + ZEROORONE(';'), + FunctionDeclaration, + #GeneratorDeclaration, + ClassDeclaration, + InterfaceDeclaration, + TypeAliasDeclaration, + EnumDeclaration, + NamespaceDeclaration, + #AmbientDeclaration, + ImportAliasDeclaration, + ExportNamespaceElement) + +##ExportNamespaceElement: export VariableStatement export LexicalDeclaration export FunctionDeclaration export GeneratorDeclaration export ClassDeclaration export InterfaceDeclaration export TypeAliasDeclaration export EnumDeclaration export NamespaceDeclaration export AmbientDeclaration export ImportAliasDeclaration +rule ExportNamespaceElement: ONEOF("export" + VariableStatement, + "export" + LexicalDeclaration + ZEROORONE(';'), + "export" + FunctionDeclaration, +# "export" + GeneratorDeclaration, + "export" + ClassDeclaration, + "export" + InterfaceDeclaration, + "export" + TypeAliasDeclaration, + "export" + EnumDeclaration, + "export" + NamespaceDeclaration, +# "export" + AmbientDeclaration, + "export" + ImportAliasDeclaration, + ExportDeclaration) + attr.action.%1,%2,%3,%4,%5,%6,%7,%8,%9 : BuildExport() + attr.action.%1,%2,%3,%4,%5,%6,%7,%8,%9 : SetPairs(%2) + +##ImportAliasDeclaration: import BindingIdentifier = EntityName ; +rule NamespaceImportPair: BindingIdentifier + '=' + EntityName + attr.action : BuildXXportAsPair(%3, %1) + +rule ImportAliasDeclaration: "import" + NamespaceImportPair + ';' + attr.action : BuildImport() + attr.action : SetPairs(%2) + +##EntityName: NamespaceName NamespaceName . IdentifierReference +rule EntityName: ONEOF(NamespaceName, + NamespaceName + '.' + IdentifierReference) + attr.action.%2 : BuildField(%1, %3) + +################################################################################################# +# declare syntax +################################################################################################# + +rule ExternalDeclaration : ONEOF("declare" + NamespaceDeclaration, + "declare" + LexicalDeclaration + ZEROORONE(';'), + "declare" + ClassDeclaration, + "declare" + InterfaceDeclaration, + "declare" + FunctionDeclaration, + "declare" + VariableStatement, + "declare" + TypeAliasDeclaration, + "declare" + EnumDeclaration, + "declare" + ExternalModuleDeclaration) + attr.action.%1,%2,%3,%4,%5,%6,%7,%8,%9 : BuildExternalDeclaration(%2) + +## for global declaration +## https://www.typescriptlang.org/docs/handbook/declaration-files/templates/global-modifying-module-d-ts.html +rule GlobalDeclMember : ONEOF(NamespaceDeclaration, + LexicalDeclaration + ZEROORONE(';'), + ClassDeclaration, + InterfaceDeclaration, + FunctionDeclaration, + VariableStatement, + TypeAliasDeclaration, + EnumDeclaration, + ExternalModuleDeclaration) +rule GlobalDeclMembers : "global" + '{' + ZEROORMORE(GlobalDeclMember) + '}' + attr.action : BuildGlobalExternalDeclaration(%3) +rule GlobalDeclaration : "declare" + GlobalDeclMembers + attr.action : PassChild(%2) + +################################################################################################# +# A.9 Scripts and Modules +################################################################################################# + +# The module name could be an identifier or string literal. +rule ExternalModuleDeclaration : ONEOF("module" + IdentifierReference + '{' + DeclarationModule + '}', + "module" + Literal + '{' + DeclarationModule + '}') + attr.property : Top + attr.action.%1,%2 : BuildModule(%2) + attr.action.%1,%2 : AddModuleBody(%4) + attr.action.%2 : SetIsAmbient() + +#DeclarationElement: InterfaceDeclaration TypeAliasDeclaration NamespaceDeclaration AmbientDeclaration ImportAliasDeclaration +rule DeclarationElement: ONEOF(InterfaceDeclaration, + TypeAliasDeclaration, + LexicalDeclaration + ZEROORONE(';'), + VariableDeclaration, + FunctionDeclaration, + ClassDeclaration, + EnumDeclaration, + NamespaceDeclaration, + AmbientDeclaration, + GlobalDeclMembers, + ImportAliasDeclaration) + +# ImplementationModule: ImplementationModuleElementsopt +# ImplementationModuleElements: ImplementationModuleElement ImplementationModuleElements ImplementationModuleElement +# ImplementationModuleElement: ImplementationElement ImportDeclaration ImportAliasDeclaration ImportRequireDeclaration ExportImplementationElement ExportDefaultImplementationElement ExportListDeclaration ExportAssignment + +# DeclarationModule: DeclarationModuleElementsopt +rule DeclarationModule: ZEROORONE(DeclarationModuleElements) + +# DeclarationModuleElements: DeclarationModuleElement DeclarationModuleElements DeclarationModuleElement +rule DeclarationModuleElements: ONEOF(DeclarationModuleElement, + DeclarationModuleElements + DeclarationModuleElement) + +# DeclarationModuleElement: DeclarationElement ImportDeclaration ImportAliasDeclaration ExportDeclarationElement ExportDefaultDeclarationElement ExportListDeclaration ExportAssignment +rule DeclarationModuleElement: ONEOF(DeclarationElement, + ImportDeclaration, + ImportAliasDeclaration, + ExportDeclaration, + Statement) + #ExportDefaultDeclarationElement, + #ExportListDeclaration, + #ExportAssignment) + +################################################################################################# +# A.10 Ambient +# NOTE : I changed the rules a lot, making it quite different than v1.8 spec. +################################################################################################# + +# AmbientDeclaration: declare AmbientVariableDeclaration declare AmbientFunctionDeclaration declare AmbientClassDeclaration declare AmbientEnumDeclaration declare AmbientNamespaceDeclaration +rule AmbientDeclaration: ONEOF("declare" + VariableDeclaration, + "declare" + FunctionDeclaration, + "declare" + ClassDeclaration, + "declare" + EnumDeclaration, + "declare" + NamespaceDeclaration) + attr.action.%1,%2,%3,%4,%5 : BuildExternalDeclaration(%2) diff --git a/src/MapleFE/typescript/type.spec b/src/MapleFE/typescript/type.spec new file mode 100644 index 0000000000000000000000000000000000000000..42f739d09b86610e5d10efc2602feae3666b4848 --- /dev/null +++ b/src/MapleFE/typescript/type.spec @@ -0,0 +1,81 @@ +# Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. +# +# OpenArkFE is licensed under the Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +################################################################################### +# This file defines the Java types. +# +# 1. Child rules have to be defined before parent rules. +# 2. For type.spec, Autogen allows the programmer to have special keyword section to +# define the individual . Please keep in mind this +# keyword section has to appear before the rule section. +# +# Keyword section is defined through STRUCT(). +# +# 3. Autogen will first read in the keyword section by TypeGen functions, then +# read the rules section by inheriting BaseGen::Parse, etc. +# 4. The keyword duplex is defined as . +# [NOTE] TYPE should be one of those recognized by the "parser". Please refer +# to ../shared/include/type.h. The data representation of same type +# doesnt have to be the same in each language, we just need have the +# same name of TYPE. The physical representation of each type of +# different language will be handled by HandleType() by each language. +# e.g. Char in java and in C are different. but Autogen doesnt care. +# java2mpl and c2mpl will provide their own HandleType() to map this +# to the correct types in Maple IR. +# +# So there are 4 type systems involved in frontend. +# (1) Types in the .spec file, of each language +# (2) Types in Autogen. Super set of (1) in all languages. +# types in Autogen have an exact mapping to those in Parser. +# Autogen will generate type related files used in parser, mapping +# types to it. +# (3) Types in Parser. From now on, physical representation is defined. +# Each language has its own HandleTypes() to map type in Parser to +# those in MapleIR, considering physical representation. +# (4) Types in Maple IR. This is the only place where types have physical +# representation. +# +# 5. The supported STRUCT in type.spec is: +# Keyword +# Right now, only one STRUCT is supported. +# +# It is highly possible that a .spec file needs more than one STRUCT. +################################################################################### + +# The types recoganized by Autogen are in shared/supported_types.def +# where Boolean, Byte, .. are defined. That said, "Boolean" and the likes are +# used in type.spec as a keyword. +# +# This STRUCT tells the primitive types + +STRUCT Keyword : (("boolean", Boolean), + ("string", String), + ("number", Number), + ("symbol", Symbol), + ("any", Any), + ("void", Void), + ("unknown", Unknown), + ("never", Never), + ("undefined", Undefined)) + +rule BooleanType : "boolean" +rule NumberType : "number" +rule SymbolType : "symbol" +rule AnyType : "any" +rule StringType : "string" +rule VoidType : "void" +rule UnknownType : "unknown" +rule NeverType : "never" +rule UndefinedType : "undefined" + +rule TYPE : ONEOF(BooleanType, NumberType, SymbolType, AnyType, StringType, VoidType, UnknownType, NeverType, UndefinedType) diff --git a/src/hir2mpl/BUILD.gn b/src/hir2mpl/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..070069af66364053092d7f2a42d8e74337924070 --- /dev/null +++ b/src/hir2mpl/BUILD.gn @@ -0,0 +1,431 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] +if (COV_CHECK == 1) { + cflags = [ "-DENABLE_COV_CHECK=1" ] +} + +cflags += [ + "-DMIR_FEATURE_FULL=1", + "-DHIR2MPL_FULL_INFO_DUMP=1", + "-DJAVA_OBJ_IN_MFILE=1", +] + +if (ONLY_C == 1) { + cflags += [ + "-w", + "-DONLY_C", + ] +} + +if (MAST == 1) { + cflags += [ + "-w", + "-DENABLE_MAST", + ] +} + +include_directories = [ + "${HIR2MPL_ROOT}/common/include", + "${HIR2MPL_ROOT}/optimize/include", + "${HIR2MPL_ROOT}/bytecode_input/class/include", + "${HIR2MPL_ROOT}/bytecode_input/common/include", + "${HIR2MPL_ROOT}/bytecode_input/dex/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_phase/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${HIR2MPL_ROOT}/ast_input/clang/include", + "${HIR2MPL_ROOT}/ast_input/clang/lib", + "${HIR2MPL_ROOT}/ast_input/common/include", + "${HIR2MPL_ROOT}/ast_input/maple/include", + "${HIR2MPL_ROOT}/ast_input/maple/lib", + "${LLVMINC}", + "${THIRD_PARTY_ROOT}/llvm_modified/clang/tools", +] + +if (MAST == 1) { + include_directories += [ + "${MAPLE_PARSER_PATH}/output/c/ast_gen/shared", + "${MAPLE_PARSER_PATH}/shared/include", + "${MAPLE_PARSER_PATH}/astopt/include", + ] +} + +static_library("lib_hir2mpl_common") { + sources = [ + "${HIR2MPL_ROOT}/common/src/base64.cpp", + "${HIR2MPL_ROOT}/common/src/basic_io.cpp", + "${HIR2MPL_ROOT}/common/src/fe_config_parallel.cpp", + "${HIR2MPL_ROOT}/common/src/fe_file_ops.cpp", + "${HIR2MPL_ROOT}/common/src/fe_file_type.cpp", + "${HIR2MPL_ROOT}/common/src/fe_function.cpp", + "${HIR2MPL_ROOT}/common/src/fe_function_phase_result.cpp", + "${HIR2MPL_ROOT}/common/src/fe_input_helper.cpp", + "${HIR2MPL_ROOT}/common/src/fe_java_string_manager.cpp", + "${HIR2MPL_ROOT}/common/src/fe_manager.cpp", + "${HIR2MPL_ROOT}/common/src/fe_options.cpp", + "${HIR2MPL_ROOT}/common/src/fe_struct_elem_info.cpp", + "${HIR2MPL_ROOT}/common/src/fe_timer_ns.cpp", + "${HIR2MPL_ROOT}/common/src/fe_type_hierarchy.cpp", + "${HIR2MPL_ROOT}/common/src/fe_type_manager.cpp", + "${HIR2MPL_ROOT}/common/src/fe_utils.cpp", + "${HIR2MPL_ROOT}/common/src/fe_utils_ast.cpp", + "${HIR2MPL_ROOT}/common/src/fe_utils_java.cpp", + "${HIR2MPL_ROOT}/common/src/feir_builder.cpp", + "${HIR2MPL_ROOT}/common/src/feir_stmt.cpp", + "${HIR2MPL_ROOT}/common/src/feir_type.cpp", + "${HIR2MPL_ROOT}/common/src/feir_type_helper.cpp", + "${HIR2MPL_ROOT}/common/src/feir_type_infer.cpp", + "${HIR2MPL_ROOT}/common/src/feir_var.cpp", + "${HIR2MPL_ROOT}/common/src/feir_var_name.cpp", + "${HIR2MPL_ROOT}/common/src/feir_var_reg.cpp", + "${HIR2MPL_ROOT}/common/src/feir_var_type_scatter.cpp", + "${HIR2MPL_ROOT}/common/src/hir2mpl_compiler.cpp", + "${HIR2MPL_ROOT}/common/src/hir2mpl_compiler_component.cpp", + "${HIR2MPL_ROOT}/common/src/hir2mpl_env.cpp", + "${HIR2MPL_ROOT}/common/src/hir2mpl_options.cpp", + "${HIR2MPL_ROOT}/common/src/hir2mpl_option.cpp", + "${HIR2MPL_ROOT}/common/src/simple_xml.cpp", + "${HIR2MPL_ROOT}/common/src/simple_zip.cpp", + "${HIR2MPL_ROOT}/common/src/generic_attrs.cpp", + "${HIR2MPL_ROOT}/common/src/enhance_c_checker.cpp", + "${HIR2MPL_ROOT}/common/src/feir_scope.cpp", + ] + include_dirs = include_directories + output_dir = "${root_out_dir}/ar" + + if (MAJOR_VERSION != "") { + cflags_cc += [ "-DMAJOR_VERSION=${MAJOR_VERSION}", ] + } + + if (MINOR_VERSION != "") { + cflags_cc += [ "-DMINOR_VERSION=${MINOR_VERSION}", ] + } + + if (RELEASE_VERSION != "") { + cflags_cc += [ "-DRELEASE_VERSION=\"${RELEASE_VERSION}\"", ] + } + + if (BUILD_VERSION != "") { + cflags_cc += [ "-DBUILD_VERSION=${BUILD_VERSION}", ] + } + + if (GIT_REVISION != "") { + cflags_cc += [ "-DGIT_REVISION=\"${GIT_REVISION}\"", ] + } +} + +static_library("lib_hir2mpl_optimize") { + sources = [ + "${HIR2MPL_ROOT}/optimize/src/ror.cpp", + "${HIR2MPL_ROOT}/optimize/src/conditional_operator.cpp", + "${HIR2MPL_ROOT}/optimize/src/feir_lower.cpp", + "${HIR2MPL_ROOT}/optimize/src/feir_bb.cpp", + "${HIR2MPL_ROOT}/optimize/src/feir_cfg.cpp", + "${HIR2MPL_ROOT}/optimize/src/feir_dfg.cpp" + ] + include_dirs = include_directories + output_dir = "${root_out_dir}/ar" +} + +static_library("lib_hir2mpl_input_helper") { + sources = [ "${HIR2MPL_ROOT}/common/src/fe_input_helper.cpp" ] + include_dirs = include_directories + output_dir = "${root_out_dir}/ar" +} + +executable("hir2mpl") { + sources = [ "${HIR2MPL_ROOT}/common/src/hir2mpl.cpp" ] + include_dirs = include_directories + deps = [ + ":lib_hir2mpl_ast_input_clang", + ":lib_hir2mpl_ast_input_common", + ":lib_hir2mpl_input_helper", + ":lib_hir2mpl_common", + ":lib_hir2mpl_optimize", + "${MAPLEALL_ROOT}/maple_driver:libdriver_option", + "${MAPLEALL_ROOT}/maple_driver:libmaple_driver", + "${MAPLEALL_ROOT}/maple_ir:libmplir", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libHWSecureC", + ] + + if (ONLY_C != 1) { + deps += [ + ":lib_hir2mpl_bytecode_input_common", + ":lib_hir2mpl_bytecode_input_dex", + ":lib_hir2mpl_bytecode_input_class", + ] + } + + if (MAST == 1) { + deps += [ + ":lib_hir2mpl_ast_input_maple", + ] + } + ldflags = [ + "-lz", + "-rdynamic", + "-L${LLVMLIBDIR}/", + ] + + if (MAST == 1) { + ldflags += [ + "-lstdc++fs", + "-L${MAPLE_PARSER_PATH}/output/c/ast_gen/shared", + ] + } + if (COV == 1) { + ldflags += ["--coverage"] + cflags_cc += [ + "-fprofile-arcs", + "-ftest-coverage" + ] + } + if (GPROF == 1) { + ldflags += ["-pg"] + cflags_cc += ["-pg"] + } +} + +include_bytecode_input_jbc_directories = [ + "${HIR2MPL_ROOT}/common/include", + "${HIR2MPL_ROOT}/optimize/include", + "${HIR2MPL_ROOT}/bytecode_input/class/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", +] + +static_library("lib_hir2mpl_bytecode_input_class") { + sources = [ + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_attr.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_attr_item.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_bb.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_class.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_class2fe_helper.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_class_const.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_class_const_pool.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_compiler_component.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_function.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_function_context.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_input.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_opcode.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_opcode_helper.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_stack2fe_helper.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_stack_helper.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_stmt.cpp", + "${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_util.cpp", + ] + include_dirs = include_bytecode_input_jbc_directories + output_dir = "${root_out_dir}/ar" +} + +if (ONLY_C != 1) { + include_bytecode_input_common_directories = [ + "${HIR2MPL_ROOT}/common/include", + "${HIR2MPL_ROOT}/optimize/include", + "${HIR2MPL_ROOT}/bytecode_input/common/include", + "${HIR2MPL_ROOT}/bytecode_input/dex/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + ] + + static_library("lib_hir2mpl_bytecode_input_common") { + sources = [ + "${HIR2MPL_ROOT}/bytecode_input/common/src/ark_annotation_map.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/ark_annotation_processor.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_class.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_class2fe_helper.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_function.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_instruction.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_io.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_parser_base.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_pragma.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/bc_util.cpp", + "${HIR2MPL_ROOT}/bytecode_input/common/src/rc_setter.cpp", + ] + include_dirs = include_bytecode_input_common_directories + output_dir = "${root_out_dir}/ar" + } + + include_bytecode_input_dex_directories = [ + "${HIR2MPL_ROOT}/common/include", + "${HIR2MPL_ROOT}/optimize/include", + "${HIR2MPL_ROOT}/bytecode_input/common/include", + "${HIR2MPL_ROOT}/bytecode_input/dex/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + ] + + static_library("lib_hir2mpl_bytecode_input_dex") { + sources = [ + "${HIR2MPL_ROOT}/bytecode_input/dex/src/class_linker.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/class_loader_context.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_class.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_class2fe_helper.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_encode_value.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_file_util.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_op.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_pragma.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_strfac.cpp", + ] + + # for libdexfile + include_dirs_dex = [ "${THIRD_PARTY_ROOT}/aosp_modified/system/core/include" ] + include_dirs_libdexfile = [ + #for libdexfile -start + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/liblog/include", + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/libutils/include", + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/base/include", + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/libziparchive/include", + "${THIRD_PARTY_ROOT}/aosp_modified/art/libartpalette/include", + "${THIRD_PARTY_ROOT}/aosp_modified/art/libartbase", + "${THIRD_PARTY_ROOT}/aosp_modified/art/libdexfile", + "${THIRD_PARTY_ROOT}/aosp_modified/include", + "${THIRD_PARTY_ROOT}/aosp_modified/libnativehelper/include_jni", + + #for libdexfile -end + ] + + sources += [ + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_parser.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_reader.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_factory.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_interface.cpp", + "${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_libdexfile.cpp", + ] + deps_libdexfile = [ + "${THIRD_PARTY_ROOT}/aosp_modified/art/libdexfile:libdexfile", + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/libziparchive:libziparchive", + "${THIRD_PARTY_ROOT}/aosp_modified/system/core/base:libbase", + ] + lib_dex = [ "${THIRD_PARTY_ROOT}/aosp_modified/system/core/liblog/liblog.a" ] + deps = deps_libdexfile + libs = lib_dex + include_dirs = + include_bytecode_input_dex_directories + include_dirs_libdexfile + include_dirs_dex + output_dir = "${root_out_dir}/ar" + } +} + +static_library("lib_hir2mpl_ast_input_clang_lib") { + sources = [ "${HIR2MPL_ROOT}/ast_input/clang/lib/ast_interface.cpp" ] + include_dirs = include_directories + defines = [ + "CLANG_ENABLE_ARCMT", + "CLANG_ENABLE_OBJC_REWRITER", + "CLANG_ENABLE_STATIC_ANALYZER", + "GTEST_HAS_RTTI=0", + "_DEBUG", + "_GNU_SOURCE", + "__STDC_CONSTANT_MACROS", + "__STDC_FORMAT_MACROS", + "__STDC_LIMIT_MACROS", + ] + + output_dir = "${root_out_dir}/ar" + libs = [ + "${LLVMLIBDIR}/libclang.so", + "${LLVMLIBDIR}/libclang-cpp.so", + "${LLVMLIBDIR}/libclangFrontend.a", + "${LLVMLIBDIR}/libclangDriver.a", + "${LLVMLIBDIR}/libclangSerialization.a", + "${LLVMLIBDIR}/libclangParse.a", + "${LLVMLIBDIR}/libclangSema.a", + "${LLVMLIBDIR}/libclangEdit.a", + "${LLVMLIBDIR}/libclangLex.a", + "${LLVMLIBDIR}/libclangAnalysis.a", + "${LLVMLIBDIR}/libclangAST.a", + "${LLVMLIBDIR}/libclangBasic.a", + "${LLVMLIBDIR}/libLLVMDemangle.a", + "${LLVMLIBDIR}/libLLVMMCParser.a", + "${LLVMLIBDIR}/libLLVMMC.a", + "${LLVMLIBDIR}/libLLVMBitReader.a", + "${LLVMLIBDIR}/libLLVMCore.a", + "${LLVMLIBDIR}/libLLVMBinaryFormat.a", + "${LLVMLIBDIR}/libLLVMProfileData.a", + "${LLVMLIBDIR}/libLLVMOption.a", + "${LLVMLIBDIR}/libLLVMSupport.a", + ] +} + +static_library("lib_hir2mpl_ast_input_common") { + sources = [ + "${HIR2MPL_ROOT}/ast_input/common/src/ast_decl.cpp", + ] + include_dirs = include_directories + output_dir = "${root_out_dir}/ar" +} + +static_library("lib_hir2mpl_ast_input_clang") { + sources = [ + "${HIR2MPL_ROOT}/ast_input/clang/lib/ast_type.cpp", + "${HIR2MPL_ROOT}/ast_input/clang/lib/ast_util.cpp", + "${HIR2MPL_ROOT}/ast_input/clang/src/ast_expr.cpp", + "${HIR2MPL_ROOT}/ast_input/clang/src/ast_function.cpp", + "${HIR2MPL_ROOT}/ast_input/clang/src/ast_parser.cpp", + "${HIR2MPL_ROOT}/ast_input/clang/src/ast_parser_builting_func.cpp", + "${HIR2MPL_ROOT}/ast_input/clang/src/ast_stmt.cpp", + "${HIR2MPL_ROOT}/ast_input/clang/src/ast_struct2fe_helper.cpp", + ] + include_dirs = include_directories + deps = [ ":lib_hir2mpl_ast_input_clang_lib" ] + output_dir = "${root_out_dir}/ar" +} + +if (MAST == 1) { + static_library("lib_hir2mpl_ast_input_maple_lib") { + sources = [ "${HIR2MPL_ROOT}/ast_input/maple/lib/maple_ast_interface.cpp" ] + include_dirs = include_directories + defines = [ + "DEBUG", + ] + output_dir = "${root_out_dir}/ar" + libs = [ + "${MAPLE_PARSER_PATH}/output/c/ast_gen/shared/genast.a", + "${MAPLE_PARSER_PATH}/output/c/shared/shared.a", + "${MAPLE_PARSER_PATH}/output/c/gen/gen.a", + "${MAPLE_PARSER_PATH}/output/c/astopt/astopt.a", + ] + } + + static_library("lib_hir2mpl_ast_input_maple") { + sources = [ + "${HIR2MPL_ROOT}/ast_input/maple/src/maple_ast_parser.cpp", + ] + include_dirs = include_directories + deps = [ ":lib_hir2mpl_ast_input_maple_lib" ] + output_dir = "${root_out_dir}/ar" + } +} + diff --git a/src/hir2mpl/CMakeLists.txt b/src/hir2mpl/CMakeLists.txt new file mode 100755 index 0000000000000000000000000000000000000000..e8d5a1176235593df8dff9873e0cf5ccee627a1a --- /dev/null +++ b/src/hir2mpl/CMakeLists.txt @@ -0,0 +1,490 @@ +# +# Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN AS IS BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +if (${COV_CHECK} STREQUAL "1") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DENABLE_COV_CHECK=1") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DENABLE_COV_CHECK=1") +endif() + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMIR_FEATURE_FULL=1 -DHIR2MPL_FULL_INFO_DUMP=1 -DJAVA_OBJ_IN_MFILE=1") +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DMIR_FEATURE_FULL=1 -DHIR2MPL_FULL_INFO_DUMP=1 -DJAVA_OBJ_IN_MFILE=1") + +if (${ONLY_C} STREQUAL "1") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -DONLY_C") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w -DONLY_C") +endif() + +if (${MAST} STREQUAL "1") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -DENABLE_MAST") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w -DENABLE_MAST") +endif() + +set(include_directories + ${HIR2MPL_ROOT}/common/include + ${HIR2MPL_ROOT}/optimize/include + ${HIR2MPL_ROOT}/bytecode_input/class/include + ${HIR2MPL_ROOT}/bytecode_input/common/include + ${HIR2MPL_ROOT}/bytecode_input/dex/include + ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/maple_ipa/include + ${MAPLEALL_ROOT}/maple_ipa/include/old + ${MAPLEALL_ROOT}/maple_util/include + ${MAPLEALL_ROOT}/maple_phase/include + ${MAPLEALL_ROOT}/maple_driver/include + ${MAPLEALL_ROOT}/mpl2mpl/include + ${MAPLEALL_ROOT}/maple_me/include + ${MAPLEALL_ROOT}/mempool/include + ${THIRD_PARTY_ROOT}/bounds_checking_function/include + ${HIR2MPL_ROOT}/ast_input/clang/include + ${HIR2MPL_ROOT}/ast_input/clang/lib + ${HIR2MPL_ROOT}/ast_input/common/include + ${HIR2MPL_ROOT}/ast_input/maple/include + ${HIR2MPL_ROOT}/ast_input/maple/lib + ${LLVMINC} + ${THIRD_PARTY_ROOT}/llvm_modified/clang/tools +) + +if (${MAST} STREQUAL "1") + set(include_directories + ${include_directories} + ${MAPLE_PARSER_PATH}/output/c/ast_gen/shared + ${MAPLE_PARSER_PATH}/shared/include + ${MAPLE_PARSER_PATH}/astopt/include + ) +endif() + +set(src_lib_hir2mpl_common + ${HIR2MPL_ROOT}/common/src/base64.cpp + ${HIR2MPL_ROOT}/common/src/basic_io.cpp + ${HIR2MPL_ROOT}/common/src/fe_config_parallel.cpp + ${HIR2MPL_ROOT}/common/src/fe_file_ops.cpp + ${HIR2MPL_ROOT}/common/src/fe_file_type.cpp + ${HIR2MPL_ROOT}/common/src/fe_function.cpp + ${HIR2MPL_ROOT}/common/src/fe_function_phase_result.cpp + ${HIR2MPL_ROOT}/common/src/fe_input_helper.cpp + ${HIR2MPL_ROOT}/common/src/fe_java_string_manager.cpp + ${HIR2MPL_ROOT}/common/src/fe_manager.cpp + ${HIR2MPL_ROOT}/common/src/fe_options.cpp + ${HIR2MPL_ROOT}/common/src/fe_struct_elem_info.cpp + ${HIR2MPL_ROOT}/common/src/fe_timer_ns.cpp + ${HIR2MPL_ROOT}/common/src/fe_type_hierarchy.cpp + ${HIR2MPL_ROOT}/common/src/fe_type_manager.cpp + ${HIR2MPL_ROOT}/common/src/fe_utils.cpp + ${HIR2MPL_ROOT}/common/src/fe_utils_ast.cpp + ${HIR2MPL_ROOT}/common/src/fe_utils_java.cpp + ${HIR2MPL_ROOT}/common/src/feir_builder.cpp + ${HIR2MPL_ROOT}/common/src/feir_stmt.cpp + ${HIR2MPL_ROOT}/common/src/feir_type.cpp + ${HIR2MPL_ROOT}/common/src/feir_type_helper.cpp + ${HIR2MPL_ROOT}/common/src/feir_type_infer.cpp + ${HIR2MPL_ROOT}/common/src/feir_var.cpp + ${HIR2MPL_ROOT}/common/src/feir_var_name.cpp + ${HIR2MPL_ROOT}/common/src/feir_var_reg.cpp + ${HIR2MPL_ROOT}/common/src/feir_var_type_scatter.cpp + ${HIR2MPL_ROOT}/common/src/hir2mpl_compiler.cpp + ${HIR2MPL_ROOT}/common/src/hir2mpl_compiler_component.cpp + ${HIR2MPL_ROOT}/common/src/hir2mpl_env.cpp + ${HIR2MPL_ROOT}/common/src/hir2mpl_options.cpp + ${HIR2MPL_ROOT}/common/src/hir2mpl_option.cpp + ${HIR2MPL_ROOT}/common/src/simple_xml.cpp + ${HIR2MPL_ROOT}/common/src/simple_zip.cpp + ${HIR2MPL_ROOT}/common/src/generic_attrs.cpp + ${HIR2MPL_ROOT}/common/src/enhance_c_checker.cpp + ${HIR2MPL_ROOT}/common/src/feir_scope.cpp +) +if (NOT ${MAJOR_VERSION} STREQUAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMAJOR_VERSION=${MAJOR_VERSION}") +endif() +if (NOT ${MINOR_VERSION} STREQUAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMINOR_VERSION=${MINOR_VERSION}") +endif() +if (NOT ${RELEASE_VERSION} STREQUAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DRELEASE_VERSION=\\\"${RELEASE_VERSION}\\\"") +endif() +if (NOT ${BUILD_VERSION} STREQUAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBUILD_VERSION=${BUILD_VERSION}") +endif() +if (NOT ${GIT_REVISION} STREQUAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DGIT_REVISION=\\\"${GIT_REVISION}\\\"") +endif() +#lib_hir2mpl_common +add_library(lib_hir2mpl_common STATIC ${src_lib_hir2mpl_common}) +set_target_properties(lib_hir2mpl_common PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_directories}" + LINK_LIBRARIES "" + ARCHIVE_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/ar" + COMPILE_FLAGS "" +) + +set(src_lib_hir2mpl_optimize + ${HIR2MPL_ROOT}/optimize/src/ror.cpp + ${HIR2MPL_ROOT}/optimize/src/conditional_operator.cpp + ${HIR2MPL_ROOT}/optimize/src/feir_lower.cpp + ${HIR2MPL_ROOT}/optimize/src/feir_bb.cpp + ${HIR2MPL_ROOT}/optimize/src/feir_cfg.cpp + ${HIR2MPL_ROOT}/optimize/src/feir_dfg.cpp +) + +#lib_hir2mpl_optimize +add_library(lib_hir2mpl_optimize STATIC ${src_lib_hir2mpl_optimize}) +set_target_properties(lib_hir2mpl_optimize PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_directories}" + LINK_LIBRARIES "" + ARCHIVE_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/ar" +) + +#lib_hir2mpl_input_helper +add_library(lib_hir2mpl_input_helper STATIC "${HIR2MPL_ROOT}/common/src/fe_input_helper.cpp") +set_target_properties(lib_hir2mpl_input_helper PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_directories}" + LINK_LIBRARIES "" + ARCHIVE_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/ar" +) + +set(src_hir2mpl + ${HIR2MPL_ROOT}/common/src/hir2mpl.cpp +) + +set(deps_hir2mpl + ${LLVMLIBDIR}/libclang.so + ${LLVMLIBDIR}/libclang-cpp.so + ${LLVMLIBDIR}/libclangFrontend.a + ${LLVMLIBDIR}/libclangDriver.a + ${LLVMLIBDIR}/libclangSerialization.a + ${LLVMLIBDIR}/libclangParse.a + ${LLVMLIBDIR}/libclangSema.a + ${LLVMLIBDIR}/libclangEdit.a + ${LLVMLIBDIR}/libclangLex.a + ${LLVMLIBDIR}/libclangAnalysis.a + ${LLVMLIBDIR}/libclangAST.a + ${LLVMLIBDIR}/libclangBasic.a + ${LLVMLIBDIR}/libLLVMDemangle.a + ${LLVMLIBDIR}/libLLVMMCParser.a + ${LLVMLIBDIR}/libLLVMMC.a + ${LLVMLIBDIR}/libLLVMBitReader.a + ${LLVMLIBDIR}/libLLVMCore.a + ${LLVMLIBDIR}/libLLVMBinaryFormat.a + ${LLVMLIBDIR}/libLLVMProfileData.a + ${LLVMLIBDIR}/libLLVMOption.a + ${LLVMLIBDIR}/libLLVMSupport.a + lib_hir2mpl_ast_input_clang_lib + libmplphase + libcommandline + lib_hir2mpl_ast_input_clang + lib_hir2mpl_ast_input_common + lib_hir2mpl_input_helper + lib_hir2mpl_common + lib_hir2mpl_optimize + libdriver_option + libmaple_driver + libmplir + libmplutil + libmempool + libmpl2mpl + libHWSecureC +) + +if (NOT ${ONLY_C} STREQUAL "1") + list(APPEND deps_hir2mpl + lib_hir2mpl_bytecode_input_common + lib_hir2mpl_bytecode_input_dex + lib_hir2mpl_bytecode_input_class + libdexfile + libziparchive + libbase + ${THIRD_PARTY_ROOT}/aosp_modified/system/core/liblog/liblog.a + ) +endif() +if (${MAST} STREQUAL "1") + list(APPEND deps_hir2mpl + lib_hir2mpl_ast_input_maple_lib + lib_hir2mpl_ast_input_maple + ) +endif() + +set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lz -rdynamic -L${LLVMLIBDIR}/") + +if (${MAST} STREQUAL "1") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lstdc++fs -L${MAPLE_PARSER_PATH}/output/c/ast_gen/shared") +endif() + +if (${COV} STREQUAL "1") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} --coverage") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage") +endif() + +if (${GPROF} STREQUAL "1") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pg") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pg") +endif() + +#hir2mpl +add_executable(hir2mpl "${src_hir2mpl}") +set_target_properties(hir2mpl PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_directories}" + LINK_LIBRARIES "${deps_hir2mpl}" + RUNTIME_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/bin" + +) + +set(inc_lib_hir2mpl_bytecode_input_class + ${HIR2MPL_ROOT}/common/include + ${HIR2MPL_ROOT}/optimize/include + ${HIR2MPL_ROOT}/bytecode_input/class/include + ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/maple_util/include + ${MAPLEALL_ROOT}/maple_driver/include + ${MAPLEALL_ROOT}/mempool/include + ${THIRD_PARTY_ROOT}/bounds_checking_function/include +) + +set(src_lib_hir2mpl_bytecode_input_class + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_attr.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_attr_item.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_bb.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_class.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_class2fe_helper.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_class_const.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_class_const_pool.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_compiler_component.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_function.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_function_context.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_input.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_opcode.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_opcode_helper.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_stack2fe_helper.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_stack_helper.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_stmt.cpp + ${HIR2MPL_ROOT}/bytecode_input/class/src/jbc_util.cpp +) + +# lib_hir2mpl_bytecode_input_class +add_library(lib_hir2mpl_bytecode_input_class STATIC ${src_lib_hir2mpl_bytecode_input_class}) +set_target_properties(lib_hir2mpl_bytecode_input_class PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_lib_hir2mpl_bytecode_input_class}" + LINK_LIBRARIES "" + RUNTIME_OUTPUT_DIRECTORY "${root_out_dir}/ar" +) + +if (NOT ${ONLY_C} STREQUAL "1") + set(inc_lib_hir2mpl_bytecode_input_common + ${HIR2MPL_ROOT}/common/include + ${HIR2MPL_ROOT}/optimize/include + ${HIR2MPL_ROOT}/bytecode_input/common/include + ${HIR2MPL_ROOT}/bytecode_input/dex/include + ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/maple_util/include + ${MAPLEALL_ROOT}/maple_driver/include + ${MAPLEALL_ROOT}/mempool/include + ${THIRD_PARTY_ROOT}/bounds_checking_function/include + ) + + set(src_lib_hir2mpl_bytecode_input_common + ${HIR2MPL_ROOT}/bytecode_input/common/src/ark_annotation_map.cpp + ${HIR2MPL_ROOT}/bytecode_input/common/src/ark_annotation_processor.cpp + ${HIR2MPL_ROOT}/bytecode_input/common/src/bc_class.cpp + ${HIR2MPL_ROOT}/bytecode_input/common/src/bc_class2fe_helper.cpp + ${HIR2MPL_ROOT}/bytecode_input/common/src/bc_function.cpp + ${HIR2MPL_ROOT}/bytecode_input/common/src/bc_instruction.cpp + ${HIR2MPL_ROOT}/bytecode_input/common/src/bc_io.cpp + ${HIR2MPL_ROOT}/bytecode_input/common/src/bc_parser_base.cpp + ${HIR2MPL_ROOT}/bytecode_input/common/src/bc_pragma.cpp + ${HIR2MPL_ROOT}/bytecode_input/common/src/bc_util.cpp + ${HIR2MPL_ROOT}/bytecode_input/common/src/rc_setter.cpp + ) + # lib_hir2mpl_bytecode_input_common + add_library(lib_hir2mpl_bytecode_input_common STATIC ${src_lib_hir2mpl_bytecode_input_common}) + set_target_properties(lib_hir2mpl_bytecode_input_common PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_lib_hir2mpl_bytecode_input_common}" + LINK_LIBRARIES "" + RUNTIME_OUTPUT_DIRECTORY "${root_out_dir}/ar" + ) + + set(include_bytecode_input_dex_directories + ${HIR2MPL_ROOT}/common/include + ${HIR2MPL_ROOT}/optimize/include + ${HIR2MPL_ROOT}/bytecode_input/common/include + ${HIR2MPL_ROOT}/bytecode_input/dex/include + ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/maple_util/include + ${MAPLEALL_ROOT}/maple_driver/include + ${MAPLEALL_ROOT}/mempool/include + ${THIRD_PARTY_ROOT}/bounds_checking_function/include + ) + + set(src_lib_hir2mpl_bytecode_input_dex + ${HIR2MPL_ROOT}/bytecode_input/dex/src/class_linker.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/class_loader_context.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_class.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_class2fe_helper.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_encode_value.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_file_util.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_op.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_pragma.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_strfac.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_parser.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dex_reader.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_factory.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_interface.cpp + ${HIR2MPL_ROOT}/bytecode_input/dex/src/dexfile_libdexfile.cpp + ) + + list(APPEND include_bytecode_input_dex_directories + ${THIRD_PARTY_ROOT}/aosp_modified/system/core/include + ) + + list(APPEND include_bytecode_input_dex_directories + ${THIRD_PARTY_ROOT}/aosp_modified/system/core/liblog/include + ${THIRD_PARTY_ROOT}/aosp_modified/system/core/libutils/include + ${THIRD_PARTY_ROOT}/aosp_modified/system/core/base/include + ${THIRD_PARTY_ROOT}/aosp_modified/system/core/libziparchive/include + ${THIRD_PARTY_ROOT}/aosp_modified/art/libartpalette/include + ${THIRD_PARTY_ROOT}/aosp_modified/art/libartbase + ${THIRD_PARTY_ROOT}/aosp_modified/art/libdexfile + ${THIRD_PARTY_ROOT}/aosp_modified/include + ${THIRD_PARTY_ROOT}/aosp_modified/libnativehelper/include_jni + ) + + set(deps_libdexfile + libdexfile + libziparchive + libbase + ${THIRD_PARTY_ROOT}/aosp_modified/system/core/liblog/liblog.a + ) + + # lib_hir2mpl_bytecode_input_dex + add_library(lib_hir2mpl_bytecode_input_dex STATIC ${src_lib_hir2mpl_bytecode_input_dex}) + set_target_properties(lib_hir2mpl_bytecode_input_dex PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_bytecode_input_dex_directories}" + LINK_LIBRARIES "${deps_libdexfile}" + RUNTIME_OUTPUT_DIRECTORY "${root_out_dir}/ar" + ) +endif() + +set(src_lib_hir2mpl_ast_input_clang_lib + ${HIR2MPL_ROOT}/ast_input/clang/lib/ast_interface.cpp +) + +set(deps_lib_hir2mpl_ast_input_clang_lib + ${LLVMLIBDIR}/libclang.so + ${LLVMLIBDIR}/libclang-cpp.so + ${LLVMLIBDIR}/libclangFrontend.a + ${LLVMLIBDIR}/libclangDriver.a + ${LLVMLIBDIR}/libclangSerialization.a + ${LLVMLIBDIR}/libclangParse.a + ${LLVMLIBDIR}/libclangSema.a + ${LLVMLIBDIR}/libclangEdit.a + ${LLVMLIBDIR}/libclangLex.a + ${LLVMLIBDIR}/libclangAnalysis.a + ${LLVMLIBDIR}/libclangAST.a + ${LLVMLIBDIR}/libclangBasic.a + ${LLVMLIBDIR}/libLLVMDemangle.a + ${LLVMLIBDIR}/libLLVMMCParser.a + ${LLVMLIBDIR}/libLLVMMC.a + ${LLVMLIBDIR}/libLLVMBitReader.a + ${LLVMLIBDIR}/libLLVMCore.a + ${LLVMLIBDIR}/libLLVMBinaryFormat.a + ${LLVMLIBDIR}/libLLVMProfileData.a + ${LLVMLIBDIR}/libLLVMOption.a + ${LLVMLIBDIR}/libLLVMSupport.a +) + +set(defines + CLANG_ENABLE_ARCMT + CLANG_ENABLE_OBJC_REWRITER + CLANG_ENABLE_STATIC_ANALYZER + GTEST_HAS_RTTI=0 + _DEBUG + _GNU_SOURCE + __STDC_CONSTANT_MACROS + __STDC_FORMAT_MACROS + __STDC_LIMIT_MACROS +) + +# lib_hir2mpl_ast_input_clang_lib +add_library(lib_hir2mpl_ast_input_clang_lib STATIC ${src_lib_hir2mpl_ast_input_clang_lib}) +set_target_properties(lib_hir2mpl_ast_input_clang_lib PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_directories}" + LINK_LIBRARIES "${deps_lib_hir2mpl_ast_input_clang_lib}" + RUNTIME_OUTPUT_DIRECTORY "${root_out_dir}/ar" + DEFINE_SYMBOL "${defines}" +) + +# lib_hir2mpl_ast_input_common +add_library(lib_hir2mpl_ast_input_common STATIC "${HIR2MPL_ROOT}/ast_input/common/src/ast_decl.cpp") +set_target_properties(lib_hir2mpl_ast_input_common PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_directories}" + LINK_LIBRARIES "" + RUNTIME_OUTPUT_DIRECTORY "${root_out_dir}/ar" +) + +set(src_lib_hir2mpl_ast_input_clang + ${HIR2MPL_ROOT}/ast_input/clang/lib/ast_type.cpp + ${HIR2MPL_ROOT}/ast_input/clang/lib/ast_util.cpp + ${HIR2MPL_ROOT}/ast_input/clang/src/ast_expr.cpp + ${HIR2MPL_ROOT}/ast_input/clang/src/ast_function.cpp + ${HIR2MPL_ROOT}/ast_input/clang/src/ast_parser.cpp + ${HIR2MPL_ROOT}/ast_input/clang/src/ast_parser_builting_func.cpp + ${HIR2MPL_ROOT}/ast_input/clang/src/ast_stmt.cpp + ${HIR2MPL_ROOT}/ast_input/clang/src/ast_struct2fe_helper.cpp +) + +set(deps_lib_hir2mpl_ast_input_clang + lib_hir2mpl_ast_input_clang_lib +) + +# lib_hir2mpl_ast_input_clang +add_library(lib_hir2mpl_ast_input_clang STATIC ${src_lib_hir2mpl_ast_input_clang}) +set_target_properties(lib_hir2mpl_ast_input_clang PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_directories}" + LINK_LIBRARIES "${deps_lib_hir2mpl_ast_input_clang}" + RUNTIME_OUTPUT_DIRECTORY "${root_out_dir}/ar" +) + +if (${MAST} STREQUAL "1") + set(deps_lib_hir2mpl_ast_input_maple_lib + ${MAPLE_PARSER_PATH}/output/c/ast_gen/shared/genast.a + ${MAPLE_PARSER_PATH}/output/c/shared/shared.a + ${MAPLE_PARSER_PATH}/output/c/gen/gen.a + ${MAPLE_PARSER_PATH}/output/c/astopt/astopt.a + ) +# lib_hir2mpl_ast_input_maple_lib + add_library(lib_hir2mpl_ast_input_maple_lib STATIC "${HIR2MPL_ROOT}/ast_input/maple/lib/maple_ast_interface.cpp") + set_target_properties(lib_hir2mpl_ast_input_maple_lib PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_directories}" + LINK_LIBRARIES "${deps_lib_hir2mpl_ast_input_maple_lib}" + RUNTIME_OUTPUT_DIRECTORY "${root_out_dir}/ar" + DEFINE_SYMBOL "DEBUG" + ) +# lib_hir2mpl_ast_input_maple + add_library(lib_hir2mpl_ast_input_maple STATIC "${HIR2MPL_ROOT}/ast_input/maple/src/maple_ast_parser.cpp") + set_target_properties(lib_hir2mpl_ast_input_maple PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_directories}" + LINK_LIBRARIES "lib_hir2mpl_ast_input_maple_lib" + RUNTIME_OUTPUT_DIRECTORY "${root_out_dir}/ar" + DEFINE_SYMBOL "DEBUG" + ) +endif() diff --git a/src/hir2mpl/README.md b/src/hir2mpl/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ce48eeaad2359730fbff5b92d4f0f74db6395bdf --- /dev/null +++ b/src/hir2mpl/README.md @@ -0,0 +1,108 @@ +``` +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +``` + +Hir2mpl supports .ast, .dex, .class and .jar as inputs. Currently, .class and .jar are not enabled. +hir2mpl enables the corresponding compilation process based on the inputs. + +## Building hir2mpl + +source build/envsetup.sh arm release/debug + +make hir2mpl + +## Usage + +hir2mpl -h to view available options + +## Compile .dex + +First, use java2dex to generate the required xxx.dex file from xxx.java. + +bash ${OUT_ROOT}/tools/bin/java2dex -o xxx.dex -p -i xxx.java + +If the xxx.dex file depends on other files, use the -mplt command to add the depended file. + +the depended mplts need to compile firstly. Also, you can use the -Xbootclasspath to + +load the dependent JAR package. + +${OUT_ROOT}/aarch64-clang-release/bin/hir2mpl -mplt xxx.dex -o xxx.mpl + +${OUT_ROOT}/aarch64-clang-release/bin/hir2mpl -Xbootclasspath= xxx.dex -o xxx.mpl + +Note: +1. The GC mode is used for memory management by default. + If you want to use the RC mode, you can use the "-rc" option to switch. +2. Invalid option scope "ast Compile Options" and "Security Check". + +## Compile .ast + +First, use clang to generate the required xxx.ast file from xxx.c. + +${MAPLE_ROOT}/tools/clang+llvm-10.0.0-x86_64-linux-gnu-ubuntu-18.04/bin/clang -emit-ast xxx.c -o xxx.ast + +${OUT_ROOT}/aarch64-clang-release/bin/hir2mpl xxx.ast -o xxx.mpl + +Note: +1. The backend supports variable arrays that are incomplete. + By default, the frontend intercepts variable arrays. + You can use the "-enable-variable-array" option to enable the frontend to support variable arrays. + +2. hir2mpl supports static security check and check statement insertion based on source code annotation. + However, the source code must be compiled using the Clang binary provided by the Maple community. + You can enable the corresponding check by selecting the corresponding option "-npe-check-dynamic" + and "-boundary-check-dynamic". + +3. Invalid option scope "BC Bytecode Compile Options" and "On Demand Type Creation". + +## UT Test + +Building UT target : make hir2mplUT + +Before running the UT test, compile the libcore which the UT depends. + +make libcore + +bash $MAPLE_ROOT/src/hir2mpl/test/hir2mplUT_check.sh + +## Directory structure + +hir2mpl +├── ast_input # .ast/.mast parser +│   ├── clang +│   ├── common +│   └── maple +├── BUILD.gn # build configuration files +├── bytecode_input # .class/.dex parser +│   ├── class +│   ├── common +│   └── dex +├── common # feir part +│   ├── include +│   └── src +├── optimize # lower/cfg/dfg(TODO Refactoring) and pattern match opt +│   ├── include +│   └── src +├── README.md +└── test # UT test + ├── ast_input + ├── BUILD.gn + ├── bytecode_input + ├── common + ├── cov_check.sh + ├── hir2mplUT_check.sh + └── ops_ut_check.sh diff --git a/src/hir2mpl/ast_input/clang/include/ast_builtin_func.def b/src/hir2mpl/ast_input/clang/include/ast_builtin_func.def new file mode 100644 index 0000000000000000000000000000000000000000..4577b601397825a0f5b93205b2ed5d00fbc5ed66 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/include/ast_builtin_func.def @@ -0,0 +1,33 @@ +// BUILTIN_FUNC(funcName) +BUILTIN_FUNC(printf) +BUILTIN_FUNC(snprintf) +BUILTIN_FUNC(strncpy) +BUILTIN_FUNC(strcpy) +BUILTIN_FUNC(strcmp) +BUILTIN_FUNC(strlen) +BUILTIN_FUNC(strchr) +BUILTIN_FUNC(strrchr) +BUILTIN_FUNC(memcmp) +BUILTIN_FUNC(memcpy) +BUILTIN_FUNC(memset) +BUILTIN_FUNC(memmove) +BUILTIN_FUNC(__memcpy_chk) +BUILTIN_FUNC(__memset_chk) +BUILTIN_FUNC(__strncpy_chk) +BUILTIN_FUNC(__strcpy_chk) +BUILTIN_FUNC(__stpcpy_chk) +BUILTIN_FUNC(__vsprintf_chk) +BUILTIN_FUNC(__vsnprintf_chk) +BUILTIN_FUNC(__snprintf_chk) +BUILTIN_FUNC(__sprintf_chk) +BUILTIN_FUNC(__strcat_chk) +BUILTIN_FUNC(__strncat_chk) +BUILTIN_FUNC(__mempcpy_chk) +BUILTIN_FUNC(__memmove_chk) + +BUILTIN_FUNC(abs) +BUILTIN_FUNC(abort) +BUILTIN_FUNC(frame_address) +BUILTIN_FUNC(setjmp) +BUILTIN_FUNC(longjmp) +BUILTIN_FUNC(mul_overflow) diff --git a/src/hir2mpl/ast_input/clang/include/ast_expr.h b/src/hir2mpl/ast_input/clang/include/ast_expr.h new file mode 100644 index 0000000000000000000000000000000000000000..ec1ad8ca92f091dbe099df7e3fa80ae441afc41c --- /dev/null +++ b/src/hir2mpl/ast_input/clang/include/ast_expr.h @@ -0,0 +1,1863 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_INPUT_INCLUDE_AST_EXPR_H +#define HIR2MPL_AST_INPUT_INCLUDE_AST_EXPR_H +#include +#include "ast_op.h" +#include "feir_stmt.h" + +namespace maple { +class ASTDecl; +class ASTFunc; +class ASTStmt; +struct ASTValue { + union Value { + uint8 u8; + uint16 u16; + uint32 u32; + uint64 u64; + int8 i8; + int16 i16; + int32 i32; + float f32; + int64 i64; + double f64; + UStrIdx strIdx; + } val = { 0 }; + PrimType pty = PTY_begin; + + PrimType GetPrimType() const { + return pty; + } + + MIRConst *Translate2MIRConst() const; +}; + +enum class ParentFlag { + kNoParent, + kArrayParent, + kStructParent +}; + +enum EvaluatedFlag : uint8 { + kEvaluatedAsZero, + kEvaluatedAsNonZero, + kNotEvaluated +}; + +class ASTExpr { + public: + explicit ASTExpr(ASTOp o) : op(o) {} + virtual ~ASTExpr() = default; + UniqueFEIRExpr Emit2FEExpr(std::list &stmts) const; + UniqueFEIRExpr ImplicitInitFieldValue(MIRType &type, std::list &stmts) const; + + virtual MIRType *GetType() const { + return mirType; + } + + void SetType(MIRType *type) { + mirType = type; + } + + void SetASTDecl(ASTDecl *astDecl) { + refedDecl = astDecl; + } + + ASTDecl *GetASTDecl() const { + return GetASTDeclImpl(); + } + + ASTOp GetASTOp() const { + return op; + } + + void SetConstantValue(ASTValue *val) { + isConstantFolded = (val != nullptr); + value = val; + } + + void SetIsConstantFolded(bool flag) { + isConstantFolded = flag; + } + + bool IsConstantFolded() const { + return isConstantFolded; + } + + ASTValue *GetConstantValue() const { + return GetConstantValueImpl(); + } + + MIRConst *GenerateMIRConst() const { + return GenerateMIRConstImpl(); + } + + void SetSrcLoc(const Loc &l) { + loc = l; + } + + Loc GetSrcLoc() const { + return loc; + } + + uint32 GetSrcFileIdx() const { + return loc.fileIdx; + } + + uint32 GetSrcFileLineNum() const { + return loc.line; + } + + uint32 GetSrcFileColumn() const { + return loc.column; + } + + void SetEvaluatedFlag(EvaluatedFlag flag) { + evaluatedflag = flag; + return; + } + + EvaluatedFlag GetEvaluatedFlag() const { + return evaluatedflag; + } + + bool IsRValue() const { + return isRValue; + } + + void SetRValue(bool flag) { + isRValue = flag; + } + + virtual void SetShortCircuitIdx(uint32 leftIdx, uint32 rightIdx) {} + + ASTExpr *IgnoreParens() { + return IgnoreParensImpl(); + } + + protected: + virtual ASTValue *GetConstantValueImpl() const { + return value; + } + virtual MIRConst *GenerateMIRConstImpl() const; + virtual UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const = 0; + virtual ASTExpr *IgnoreParensImpl(); + + virtual ASTDecl *GetASTDeclImpl() const { + return refedDecl; + } + + ASTOp op; + MIRType *mirType = nullptr; + ASTDecl *refedDecl = nullptr; + bool isConstantFolded = false; + ASTValue *value = nullptr; + Loc loc = {0, 0, 0}; + EvaluatedFlag evaluatedflag = kNotEvaluated; + bool isRValue = false; +}; + +class ASTCastExpr : public ASTExpr { + public: + explicit ASTCastExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpCast) { + (void)allocatorIn; + } + ~ASTCastExpr() = default; + + void SetASTExpr(ASTExpr *expr) { + child = expr; + } + + const ASTExpr *GetASTExpr() const { + return child; + } + + void SetSrcType(MIRType *type) { + src = type; + } + + const MIRType *GetSrcType() const { + return src; + } + + void SetDstType(MIRType *type) { + dst = type; + } + + const MIRType *GetDstType() const { + return dst; + } + + void SetNeededCvt(bool cvt) { + isNeededCvt = cvt; + } + + bool IsNeededCvt(const UniqueFEIRExpr &expr) const { + if (!isNeededCvt || expr == nullptr || dst == nullptr) { + return false; + } + PrimType srcPrimType = expr->GetPrimType(); + return srcPrimType != dst->GetPrimType() && srcPrimType != PTY_agg && srcPrimType != PTY_void; + } + + void SetComplexType(MIRType *type) { + complexType = type; + } + + void SetComplexCastKind(bool flag) { + imageZero = flag; + } + + void SetIsArrayToPointerDecay(bool flag) { + isArrayToPointerDecay = flag; + } + + void SetIsFunctionToPointerDecay(bool flag) { + isFunctionToPointerDecay = flag; + } + + bool IsBuilinFunc() const { + return isBuilinFunc; + } + + void SetBuilinFunc(bool flag) { + isBuilinFunc = flag; + } + + void SetUnionCast(bool flag) { + isUnoinCast = flag; + } + + void SetBitCast(bool flag) { + isBitCast = flag; + } + + void SetVectorSplat(bool flag) { + isVectorSplat = flag; + } + + protected: + MIRConst *GenerateMIRConstImpl() const override; + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + + ASTDecl *GetASTDeclImpl() const override { + return child->GetASTDecl(); + } + + UniqueFEIRExpr Emit2FEExprForComplex(const UniqueFEIRExpr &subExpr, const UniqueFEIRType &srcType, + std::list &stmts) const; + UniqueFEIRExpr Emit2FEExprForFunctionOrArray2Pointer(std::list &stmts) const; + + private: + MIRConst *GenerateMIRDoubleConst() const; + MIRConst *GenerateMIRFloatConst() const; + MIRConst *GenerateMIRIntConst() const; + UniqueFEIRExpr EmitExprVdupVector(PrimType primtype, UniqueFEIRExpr &subExpr) const; + + ASTExpr *child = nullptr; + MIRType *src = nullptr; + MIRType *dst = nullptr; + bool isNeededCvt = false; + bool isBitCast = false; + MIRType *complexType = nullptr; + bool imageZero = false; + bool isArrayToPointerDecay = false; + bool isFunctionToPointerDecay = false; + bool isBuilinFunc = false; + bool isUnoinCast = false; + bool isVectorSplat = false; +}; + +class ASTDeclRefExpr : public ASTExpr { + public: + explicit ASTDeclRefExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpRef) { + (void)allocatorIn; + } + ~ASTDeclRefExpr() = default; + + protected: + MIRConst *GenerateMIRConstImpl() const override; + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTUnaryOperatorExpr : public ASTExpr { + public: + explicit ASTUnaryOperatorExpr(ASTOp o) : ASTExpr(o) {} + ASTUnaryOperatorExpr(MapleAllocator &allocatorIn, ASTOp o) : ASTExpr(o) { + (void)allocatorIn; + } + virtual ~ASTUnaryOperatorExpr() = default; + void SetUOExpr(ASTExpr *astExpr); + + const ASTExpr *GetUOExpr() const { + return expr; + } + + void SetSubType(MIRType *type); + + const MIRType *GetMIRType() const { + return subType; + } + + void SetUOType(MIRType *type) { + uoType = type; + } + + const MIRType *GetUOType() const { + return uoType; + } + + void SetPointeeLen(int64 len) { + pointeeLen = len; + } + + int64 GetPointeeLen() const { + return pointeeLen; + } + + void SetGlobal(bool isGlobalArg) { + isGlobal = isGlobalArg; + } + + bool IsGlobal() const { + return isGlobal; + } + + UniqueFEIRExpr ASTUOSideEffectExpr(Opcode op, std::list &stmts, + const std::string &varName = "", bool post = false) const; + + protected: + bool isGlobal = false; + ASTExpr *expr = nullptr; + MIRType *subType = nullptr; + MIRType *uoType = nullptr; + int64 pointeeLen = 0; +}; + +class ASTUOMinusExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOMinusExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpMinus) {} + ~ASTUOMinusExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTUONotExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUONotExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpNot) {} + ~ASTUONotExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTUOLNotExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOLNotExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpLNot) {} + ~ASTUOLNotExpr() = default; + + void SetShortCircuitIdx(uint32 leftIdx, uint32 rightIdx) override { + trueIdx = leftIdx; + falseIdx = rightIdx; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + uint32 trueIdx = 0; + uint32 falseIdx = 0; +}; + +class ASTUOPostIncExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOPostIncExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPostInc), + tempVarName(FEUtils::GetSequentialName("postinc_")) {} + ~ASTUOPostIncExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + std::string tempVarName; +}; + +class ASTUOPostDecExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOPostDecExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPostDec), + tempVarName(FEUtils::GetSequentialName("postdec_")) {} + ~ASTUOPostDecExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + std::string tempVarName; +}; + +class ASTUOPreIncExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOPreIncExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPreInc) {} + ~ASTUOPreIncExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTUOPreDecExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOPreDecExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPreDec) {} + ~ASTUOPreDecExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + std::string tempVarName; +}; + +class ASTUOAddrOfExpr : public ASTUnaryOperatorExpr { + public: + ASTUOAddrOfExpr() : ASTUnaryOperatorExpr(kASTOpAddrOf) {} + explicit ASTUOAddrOfExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpAddrOf) {} + ~ASTUOAddrOfExpr() = default; + + protected: + MIRConst *GenerateMIRConstImpl() const override; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTUOAddrOfLabelExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOAddrOfLabelExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpAddrOfLabel), + labelName("", allocatorIn.GetMemPool()) {} + ~ASTUOAddrOfLabelExpr() = default; + + void SetLabelName(const std::string &name) { + labelName = name; + } + + const std::string GetLabelName() const { + return labelName.c_str() == nullptr ? "" : labelName.c_str(); + } + + protected: + MIRConst *GenerateMIRConstImpl() const override; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + MapleString labelName; +}; + +class ASTUODerefExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUODerefExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpDeref) {} + ~ASTUODerefExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + void InsertNonnullChecking(std::list &stmts, UniqueFEIRExpr baseExpr) const; + bool InsertBoundaryChecking(std::list &stmts, UniqueFEIRExpr expr) const; +}; + +class ASTUOPlusExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOPlusExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpPlus) {} + ~ASTUOPlusExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTUORealExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUORealExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpReal) {} + ~ASTUORealExpr() = default; + + void SetElementType(MIRType *type) { + elementType = type; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + MIRType *elementType = nullptr; +}; + +class ASTUOImagExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOImagExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpImag) {} + ~ASTUOImagExpr() = default; + + void SetElementType(MIRType *type) { + elementType = type; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + MIRType *elementType = nullptr; +}; + +class ASTUOExtensionExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOExtensionExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpExtension) {} + ~ASTUOExtensionExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTUOCoawaitExpr : public ASTUnaryOperatorExpr { + public: + explicit ASTUOCoawaitExpr(MapleAllocator &allocatorIn) : ASTUnaryOperatorExpr(allocatorIn, kASTOpCoawait) {} + ~ASTUOCoawaitExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTPredefinedExpr : public ASTExpr { + public: + explicit ASTPredefinedExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpPredefined) { + (void)allocatorIn; + } + ~ASTPredefinedExpr() = default; + void SetASTExpr(ASTExpr *astExpr); + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + ASTExpr *child = nullptr; +}; + +class ASTOpaqueValueExpr : public ASTExpr { + public: + explicit ASTOpaqueValueExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpOpaqueValue) { + (void)allocatorIn; + } + ~ASTOpaqueValueExpr() = default; + void SetASTExpr(ASTExpr *astExpr); + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + ASTExpr *child = nullptr; +}; + +class ASTNoInitExpr : public ASTExpr { + public: + explicit ASTNoInitExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpNoInitExpr) { + (void)allocatorIn; + } + ~ASTNoInitExpr() = default; + void SetNoInitType(MIRType *type); + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + MIRType *noInitType = nullptr; +}; + +class ASTCompoundLiteralExpr : public ASTExpr { + public: + explicit ASTCompoundLiteralExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpCompoundLiteralExpr) { + (void)allocatorIn; + } + ~ASTCompoundLiteralExpr() = default; + void SetCompoundLiteralType(MIRType *clType); + void SetASTExpr(ASTExpr *astExpr); + + void SetAddrof(bool flag) { + isAddrof = flag; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + MIRConst *GenerateMIRConstImpl() const override; + MIRConst *GenerateMIRPtrConst() const; + ASTExpr *child = nullptr; + MIRType *compoundLiteralType = nullptr; + bool isAddrof = false; +}; + +class ASTOffsetOfExpr : public ASTExpr { + public: + explicit ASTOffsetOfExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpOffsetOfExpr) { + (void)allocatorIn; + } + ~ASTOffsetOfExpr() = default; + void SetStructType(MIRType *stype); + void SetFieldName(const std::string &fName); + + void SetOffset(size_t val) { + offset = val; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + MIRType *structType = nullptr; + std::string fieldName; + size_t offset = 0; +}; + +class ASTInitListExpr : public ASTExpr { + public: + explicit ASTInitListExpr(MapleAllocator &allocatorIn) + : ASTExpr(kASTOpInitListExpr), initExprs(allocatorIn.Adapter()), varName("", allocatorIn.GetMemPool()) {} + ~ASTInitListExpr() = default; + void SetInitExprs(ASTExpr *astExpr); + void SetInitListType(MIRType *type); + + const MIRType *GetInitListType() const { + return initListType; + } + + MapleVector GetInitExprs() const { + return initExprs; + } + + void SetInitListVarName(const std::string &argVarName) { + varName = argVarName; + } + + const std::string GetInitListVarName() const { + return varName.c_str() == nullptr ? "" : varName.c_str(); + } + + void SetParentFlag(ParentFlag argParentFlag) { + parentFlag = argParentFlag; + } + + void SetUnionInitFieldIdx(uint32 idx) { + unionInitFieldIdx = idx; + } + + uint32 GetUnionInitFieldIdx() const { + return unionInitFieldIdx; + } + + void SetHasArrayFiller(bool flag) { + hasArrayFiller = flag; + } + + bool HasArrayFiller() const { + return hasArrayFiller; + } + + void SetTransparent(bool flag) { + isTransparent = flag; + } + + bool IsTransparent() const { + return isTransparent; + } + + void SetArrayFiller(ASTExpr *expr) { + arrayFillerExpr = expr; + } + + const ASTExpr *GetArrayFillter() const { + return arrayFillerExpr; + } + + void SetHasVectorType(bool flag) { + hasVectorType = flag; + } + + bool HasVectorType() const { + return hasVectorType; + } + + private: + MIRConst *GenerateMIRConstImpl() const override; + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + void ProcessInitList(std::variant, UniqueFEIRExpr> &base, + const ASTInitListExpr &initList, std::list &stmts) const; + void ProcessArrayInitList(const UniqueFEIRExpr &addrOfArray, const ASTInitListExpr &initList, + std::list &stmts) const; + void SolveArrayElementInitWithInitListExpr(const UniqueFEIRExpr &addrOfArray, const UniqueFEIRExpr &addrOfElementExpr, + const MIRType &elementType, const ASTExpr &subExpr, size_t index, + std::list &stmts) const; + void HandleImplicitInitSections(const UniqueFEIRExpr &addrOfArray, const ASTInitListExpr &initList, + const MIRType &elementType, std::list &stmts) const; + void ProcessStructInitList(std::variant, UniqueFEIRExpr> &base, + const ASTInitListExpr &initList, std::list &stmts) const; + void SolveInitListFullOfZero(const MIRStructType &baseStructType, FieldID baseFieldID, const UniqueFEIRVar &var, + const ASTInitListExpr &initList, std::list &stmts) const; + bool SolveInitListPartialOfZero(std::variant, UniqueFEIRExpr> &base, + FieldID fieldID, uint32 &index, const ASTInitListExpr &initList, + std::list &stmts) const; + void SolveInitListExprOrDesignatedInitUpdateExpr(FieldID fieldID, ASTExpr &initExpr, + const UniqueFEIRType &baseStructPtrType, std::variant, UniqueFEIRExpr> &base, + std::list &stmts) const; + void SolveStructFieldOfArrayTypeInitWithStringLiteral(std::tuple fieldInfo, + const ASTExpr &initExpr, const UniqueFEIRType &baseStructPtrType, + std::variant, UniqueFEIRExpr> &base, std::list &stmts) const; + void SolveStructFieldOfBasicType(FieldID fieldID, const ASTExpr &initExpr, const UniqueFEIRType &baseStructPtrType, + std::variant, UniqueFEIRExpr> &base, + std::list &stmts) const; + std::tuple GetStructFieldInfo(uint32 fieldIndex, FieldID baseFieldID, + MIRStructType &structMirType) const; + UniqueFEIRExpr CalculateStartAddressForMemset(const UniqueFEIRVar &varIn, uint32 initSizeIn, FieldID fieldIDIn, + const std::variant, UniqueFEIRExpr> &baseIn) const; + UniqueFEIRExpr GetAddrofArrayFEExprByStructArrayField(MIRType &fieldType, + const UniqueFEIRExpr &addrOfArrayField) const; + void ProcessVectorInitList(std::variant, UniqueFEIRExpr> &base, + const ASTInitListExpr &initList, std::list &stmts) const; + MIRIntrinsicID SetVectorSetLane(const MIRType &type) const; + void ProcessDesignatedInitUpdater(std::variant, UniqueFEIRExpr> &base, + ASTExpr *expr, std::list &stmts) const; + void ProcessStringLiteralInitList(const UniqueFEIRExpr &addrOfCharArray, const UniqueFEIRExpr &addrOfStringLiteral, + size_t stringLength, std::list &stmts) const; + void ProcessImplicitInit(const UniqueFEIRExpr &addrExpr, uint32 initSize, uint32 total, uint32 elemSize, + std::list &stmts, const Loc loc = {0, 0, 0}) const; + MIRConst *GenerateMIRConstForArray() const; + MIRConst *GenerateMIRConstForStruct() const; + MapleVector initExprs; + ASTExpr *arrayFillerExpr = nullptr; + MIRType *initListType = nullptr; + MapleString varName; + ParentFlag parentFlag = ParentFlag::kNoParent; + uint32 unionInitFieldIdx = UINT32_MAX; + bool hasArrayFiller = false; + bool isTransparent = false; + bool hasVectorType = false; + mutable bool isGenerating = false; +}; + +class ASTBinaryConditionalOperator : public ASTExpr { + public: + explicit ASTBinaryConditionalOperator(MapleAllocator &allocatorIn) : ASTExpr(kASTOpBinaryConditionalOperator) { + (void)allocatorIn; + } + ~ASTBinaryConditionalOperator() = default; + void SetCondExpr(ASTExpr *expr); + void SetFalseExpr(ASTExpr *expr); + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + ASTExpr *condExpr = nullptr; + ASTExpr *falseExpr = nullptr; +}; + +class ASTBinaryOperatorExpr : public ASTExpr { + public: + ASTBinaryOperatorExpr(MapleAllocator &allocatorIn, ASTOp o) : ASTExpr(o) { + (void)allocatorIn; + } + explicit ASTBinaryOperatorExpr(MapleAllocator &allocatorIn) + : ASTExpr(kASTOpBO), varName(FEUtils::GetSequentialName(FEUtils::kCondGoToStmtLabelNamePrefix), + allocatorIn.GetMemPool()) {} + + ~ASTBinaryOperatorExpr() override = default; + + void SetRetType(MIRType *type) { + retType = type; + } + + MIRType *GetRetType() const { + return retType; + } + + void SetLeftExpr(ASTExpr *expr) { + leftExpr = expr; + } + + void SetRightExpr(ASTExpr *expr) { + rightExpr = expr; + } + + void SetOpcode(Opcode op) { + opcode = op; + } + + Opcode GetOp() const { + return opcode; + } + + void SetComplexElementType(MIRType *type) { + complexElementType = type; + } + + void SetComplexLeftRealExpr(ASTExpr *expr) { + leftRealExpr = expr; + } + + void SetComplexLeftImagExpr(ASTExpr *expr) { + leftImagExpr = expr; + } + + void SetComplexRightRealExpr(ASTExpr *expr) { + rightRealExpr = expr; + } + + void SetComplexRightImagExpr(ASTExpr *expr) { + rightImagExpr = expr; + } + + void SetCvtNeeded(bool needed) { + cvtNeeded = needed; + } + + void SetShortCircuitIdx(uint32 leftIdx, uint32 rightIdx) override { + trueIdx = leftIdx; + falseIdx = rightIdx; + } + + std::string GetVarName() const { + return varName.c_str() == nullptr ? "" : varName.c_str(); + } + + UniqueFEIRType SelectBinaryOperatorType(UniqueFEIRExpr &left, UniqueFEIRExpr &right) const; + + protected: + MIRConst *GenerateMIRConstImpl() const override; + MIRConst *SolveOpcodeLiorOrCior(const MIRConst &leftConst) const; + MIRConst *SolveOpcodeLandOrCand(const MIRConst &leftConst, const MIRConst &rightConst) const; + MIRConst *SolveOpcodeAdd(const MIRConst &leftConst, const MIRConst &rightConst) const; + MIRConst *SolveOpcodeSub(const MIRConst &leftConst, const MIRConst &rightConst) const; + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + UniqueFEIRExpr Emit2FEExprComplexCalculations(std::list &stmts) const; + UniqueFEIRExpr Emit2FEExprComplexCompare(std::list &stmts) const; + UniqueFEIRExpr Emit2FEExprLogicOperate(std::list &stmts) const; + UniqueFEIRExpr Emit2FEExprLogicOperateSimplify(std::list &stmts) const; + + Opcode opcode = OP_undef; + MIRType *retType = nullptr; + MIRType *complexElementType = nullptr; + ASTExpr *leftExpr = nullptr; + ASTExpr *rightExpr = nullptr; + ASTExpr *leftRealExpr = nullptr; + ASTExpr *leftImagExpr = nullptr; + ASTExpr *rightRealExpr = nullptr; + ASTExpr *rightImagExpr = nullptr; + bool cvtNeeded = false; + MapleString varName; + uint32 trueIdx = 0; + uint32 falseIdx = 0; +}; + +class ASTImplicitValueInitExpr : public ASTExpr { + public: + explicit ASTImplicitValueInitExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTImplicitValueInitExpr) { + (void)allocatorIn; + } + ~ASTImplicitValueInitExpr() = default; + + protected: + MIRConst *GenerateMIRConstImpl() const override; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTStringLiteral : public ASTExpr { + public: + explicit ASTStringLiteral(MapleAllocator &allocatorIn) : ASTExpr(kASTStringLiteral), + codeUnits(allocatorIn.Adapter()), str(allocatorIn.Adapter()) {} + ~ASTStringLiteral() = default; + + void SetLength(size_t len) { + length = len; + } + + size_t GetLength() const { + return length; + } + + void SetCodeUnits(MapleVector &units) { + codeUnits = std::move(units); + } + + const MapleVector &GetCodeUnits() const { + return codeUnits; + } + + void SetStr(const std::string &strIn) { + if (str.size() > 0) { + str.clear(); + str.shrink_to_fit(); + } + (void)str.insert(str.cend(), strIn.cbegin(), strIn.cend()); + } + + const std::string GetStr() const { + return std::string(str.cbegin(), str.cend()); + } + + void SetIsArrayToPointerDecay(bool argIsArrayToPointerDecay) { + isArrayToPointerDecay = argIsArrayToPointerDecay; + } + + bool IsArrayToPointerDecay() const { + return isArrayToPointerDecay; + } + + protected: + MIRConst *GenerateMIRConstImpl() const override; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + size_t length = 0; + MapleVector codeUnits; + MapleVector str; // Ascii string + bool isArrayToPointerDecay = false; +}; + +class ASTArraySubscriptExpr : public ASTExpr { + public: + explicit ASTArraySubscriptExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTSubscriptExpr) { + (void)allocatorIn; + } + ~ASTArraySubscriptExpr() = default; + + void SetBaseExpr(ASTExpr *astExpr) { + baseExpr = astExpr; + } + + const ASTExpr *GetBaseExpr() const { + return baseExpr; + } + + void SetIdxExpr(ASTExpr *astExpr) { + idxExpr = astExpr; + } + + const ASTExpr *GetIdxExpr() const { + return idxExpr; + } + + void SetArrayType(MIRType *ty) { + arrayType = ty; + } + + const MIRType *GetArrayType() const { + return arrayType; + } + + size_t CalculateOffset() const; + + void SetIsVLA(bool flag) { + isVLA = flag; + } + + void SetVLASizeExpr(ASTExpr *expr) { + vlaSizeExpr = expr; + } + + private: + ASTExpr *FindFinalBase() const; + MIRConst *GenerateMIRConstImpl() const override; + bool CheckFirstDimIfZero(const MIRType *arrType) const; + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + MIRType *GetArrayTypeForPointerArray() const; + UniqueFEIRExpr SolveMultiDimArray(UniqueFEIRExpr &baseAddrFEExpr, UniqueFEIRType &arrayFEType, + bool isArrayTypeOpt, std::list &stmts) const; + UniqueFEIRExpr SolveOtherArrayType(const UniqueFEIRExpr &baseAddrFEExpr, std::list &stmts) const; + void InsertNonnullChecking(std::list &stmts, const UniqueFEIRExpr &indexExpr, + const UniqueFEIRExpr &baseAddrExpr) const; + bool InsertBoundaryChecking(std::list &stmts, UniqueFEIRExpr indexExpr, + UniqueFEIRExpr baseAddrFEExpr) const; + + ASTExpr *baseExpr = nullptr; + MIRType *arrayType = nullptr; + ASTExpr *idxExpr = nullptr; + bool isVLA = false; + ASTExpr *vlaSizeExpr = nullptr; +}; + +class ASTExprUnaryExprOrTypeTraitExpr : public ASTExpr { + public: + explicit ASTExprUnaryExprOrTypeTraitExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTExprUnaryExprOrTypeTraitExpr) { + (void)allocatorIn; + } + ~ASTExprUnaryExprOrTypeTraitExpr() = default; + + void SetIsType(bool type) { + isType = type; + } + + void SetArgType(MIRType *type) { + argType = type; + } + + void SetArgExpr(ASTExpr *astExpr) { + argExpr = astExpr; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + bool isType = false; + MIRType *argType = nullptr; + ASTExpr *argExpr = nullptr; +}; + +class ASTMemberExpr : public ASTExpr { + public: + explicit ASTMemberExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTMemberExpr), + memberName("", allocatorIn.GetMemPool()) {} + ~ASTMemberExpr() = default; + + void SetBaseExpr(ASTExpr *astExpr) { + baseExpr = astExpr; + } + + const ASTExpr *GetBaseExpr() const { + return baseExpr; + } + + void SetMemberName(std::string name) { + memberName = std::move(name); + } + + std::string GetMemberName() const { + return memberName.c_str() == nullptr ? "" : memberName.c_str(); + } + + void SetMemberType(MIRType *type) { + memberType = type; + } + + void SetBaseType(MIRType *type) { + baseType = type; + } + + const MIRType *GetMemberType() const { + return memberType; + } + + const MIRType *GetBaseType() const { + return baseType; + } + + void SetIsArrow(bool arrow) { + isArrow = arrow; + } + + bool GetIsArrow() const { + return isArrow; + } + + void SetFiledOffsetBits(uint64 offset) { + fieldOffsetBits = offset; + } + + uint64 GetFieldOffsetBits() const { + return fieldOffsetBits; + } + + private: + MIRConst *GenerateMIRConstImpl() const override; + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + const ASTMemberExpr &FindFinalMember(const ASTMemberExpr &startExpr, std::list &memberNames) const; + void InsertNonnullChecking(std::list &stmts, UniqueFEIRExpr baseFEExpr) const; + + ASTExpr *baseExpr = nullptr; + MapleString memberName; + MIRType *memberType = nullptr; + MIRType *baseType = nullptr; + bool isArrow = false; + uint64 fieldOffsetBits = 0; +}; + +class ASTDesignatedInitUpdateExpr : public ASTExpr { + public: + explicit ASTDesignatedInitUpdateExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTASTDesignatedInitUpdateExpr) { + (void)allocatorIn; + } + ~ASTDesignatedInitUpdateExpr() = default; + + void SetBaseExpr(ASTExpr *astExpr) { + baseExpr = astExpr; + } + + const ASTExpr *GetBaseExpr() const{ + return baseExpr; + } + + void SetUpdaterExpr(ASTExpr *astExpr) { + updaterExpr = astExpr; + } + + const ASTExpr *GetUpdaterExpr() const{ + return updaterExpr; + } + + void SetInitListType(MIRType *type) { + initListType = type; + } + + const MIRType *GetInitListType() const { + return initListType; + } + + void SetInitListVarName(const std::string &name) { + initListVarName = name; + } + + const std::string &GetInitListVarName() const { + return initListVarName; + } + + private: + MIRConst *GenerateMIRConstImpl() const override; + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + ASTExpr *baseExpr = nullptr; + ASTExpr *updaterExpr = nullptr; + MIRType *initListType = nullptr; + std::string initListVarName; +}; + +class ASTAssignExpr : public ASTBinaryOperatorExpr { + public: + explicit ASTAssignExpr(MapleAllocator &allocatorIn) : ASTBinaryOperatorExpr(allocatorIn, kASTOpAssign), + isCompoundAssign(false) {} + ~ASTAssignExpr() override = default; + + void SetIsCompoundAssign(bool argIsCompoundAssign) { + isCompoundAssign = argIsCompoundAssign; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + void GetActualRightExpr(UniqueFEIRExpr &right, const UniqueFEIRExpr &left) const; + bool IsInsertNonnullChecking(const UniqueFEIRExpr &rExpr) const; + bool isCompoundAssign = false; +}; + +class ASTBOComma : public ASTBinaryOperatorExpr { + public: + explicit ASTBOComma(MapleAllocator &allocatorIn) : ASTBinaryOperatorExpr(allocatorIn, kASTOpComma) {} + ~ASTBOComma() override = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTBOPtrMemExpr : public ASTBinaryOperatorExpr { + public: + explicit ASTBOPtrMemExpr(MapleAllocator &allocatorIn) : ASTBinaryOperatorExpr(allocatorIn, kASTOpPtrMemD) {} + ~ASTBOPtrMemExpr() override = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTCallExpr : public ASTExpr { + public: + explicit ASTCallExpr(MapleAllocator &allocatorIn) + : ASTExpr(kASTOpCall), args(allocatorIn.Adapter()), funcName("", allocatorIn.GetMemPool()), + varName(FEUtils::GetSequentialName("retVar_"), allocatorIn.GetMemPool()) {} + ~ASTCallExpr() = default; + void SetCalleeExpr(ASTExpr *astExpr) { + calleeExpr = astExpr; + } + + const ASTExpr *GetCalleeExpr() const { + return calleeExpr; + } + + void SetArgs(MapleVector &argsVector) { + args = std::move(argsVector); + } + + const MapleVector &GetArgsExpr() const { + return args; + } + + MIRType *GetRetType() const { + return mirType; + } + + const std::string GetRetVarName() const { + return varName.c_str() == nullptr ? "" : varName.c_str(); + } + + void SetFuncName(const std::string &name) { + funcName = name; + } + + const std::string GetFuncName() const { + return funcName.c_str() == nullptr ? "" : funcName.c_str(); + } + + void SetFuncAttrs(const FuncAttrs &attrs) { + funcAttrs = attrs; + } + + const FuncAttrs &GetFuncAttrs() const { + return funcAttrs; + } + + void SetIcall(bool icall) { + isIcall = icall; + } + + bool IsIcall() const { + return isIcall; + } + + bool IsNeedRetExpr() const { + return mirType->GetPrimType() != PTY_void; + } + + bool IsFirstArgRet() const { + // If the return value exceeds 16 bytes, it is passed as the first parameter. + return mirType->GetPrimType() == PTY_agg && mirType->GetSize() > 16; + } + + void SetFuncDecl(ASTFunc *decl) { + funcDecl = decl; + } + + void SetReturnVarAttrs(const GenericAttrs &attrs) { + returnVarAttrs = attrs; + } + + const GenericAttrs &GetReturnVarAttrs() const { + return returnVarAttrs; + } + + std::string CvtBuiltInFuncName(std::string builtInName) const; + UniqueFEIRExpr ProcessBuiltinFunc(std::list &stmts, bool &isFinish) const; + std::unique_ptr GenCallStmt() const; + void AddArgsExpr(const std::unique_ptr &callStmt, std::list &stmts) const; + UniqueFEIRExpr AddRetExpr(const std::unique_ptr &callStmt) const; + void InsertBoundaryCheckingInArgs(std::list &stmts) const; + void InsertBoundaryCheckingInArgsForICall(std::list &stmts, const UniqueFEIRExpr &calleeFEExpr) const; + void InsertBoundaryVarInRet(std::list &stmts) const; + void InsertNonnullCheckingForIcall(const UniqueFEIRExpr &expr, std::list &stmts) const; + void CheckNonnullFieldInStruct() const; + + private: + using FuncPtrBuiltinFunc = UniqueFEIRExpr (ASTCallExpr::*)(std::list &stmts) const; + static std::unordered_map InitBuiltinFuncPtrMap(); + UniqueFEIRExpr CreateIntrinsicopForC(std::list &stmts, MIRIntrinsicID argIntrinsicID, + bool genTempVar = true) const; + UniqueFEIRExpr CreateIntrinsicCallAssignedForC(std::list &stmts, MIRIntrinsicID argIntrinsicID) const; + UniqueFEIRExpr CreateBinaryExpr(std::list &stmts, Opcode op) const; + UniqueFEIRExpr EmitBuiltinFunc(std::list &stmts) const; + UniqueFEIRExpr EmitBuiltinVectorLoad(std::list &stmts, bool &isFinish) const; + UniqueFEIRExpr EmitBuiltinVectorStore(std::list &stmts, bool &isFinish) const; + UniqueFEIRExpr EmitBuiltinVectorShli(std::list &stmts, bool &isFinish) const; + UniqueFEIRExpr EmitBuiltinVectorShri(std::list &stmts, bool &isFinish) const; + UniqueFEIRExpr EmitBuiltinVectorShru(std::list &stmts, bool &isFinish) const; + UniqueFEIRExpr EmitBuiltinVectorZip(std::list &stmts, bool &isFinish) const; + UniqueFEIRExpr EmitBuiltinRotate(std::list &stmts, PrimType rotType, bool isLeft) const; +#define EMIT_BUILTIIN_FUNC(FUNC) EmitBuiltin##FUNC(std::list &stmts) const + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Ctz); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Ctzl); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Clz); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Clzl); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Popcount); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Popcountl); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Popcountll); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Parity); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Parityl); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Parityll); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Clrsb); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Clrsbl); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Clrsbll); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Ffs); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Ffsl); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Ffsll); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(IsAligned); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(AlignUp); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(AlignDown); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Alloca); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Expect); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(VaStart); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(VaEnd); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(VaCopy); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Prefetch); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Abs); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ACos); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ACosf); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ASin); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ASinf); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ATan); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ATanf); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Cos); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Cosf); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Cosh); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Coshf); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Sin); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Sinf); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Sinh); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Sinhf); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Exp); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Expf); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Fmax); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Bswap64); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Bswap32); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Bswap16); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Fmin); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Log); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Logf); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Log10); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Log10f); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Isunordered); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Isless); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Islessequal); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Isgreater); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Isgreaterequal); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(Islessgreater); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(WarnMemsetZeroLen); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(RotateLeft8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(RotateLeft16); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(RotateLeft32); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(RotateLeft64); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(RotateRight8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(RotateRight16); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(RotateRight32); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(RotateRight64); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAddAndFetch8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAddAndFetch4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAddAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAddAndFetch1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncSubAndFetch8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncSubAndFetch4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncSubAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncSubAndFetch1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndSub8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndSub4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndSub2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndSub1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAdd8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAdd4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAdd2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAdd1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncBoolCompareAndSwap1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncBoolCompareAndSwap2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncBoolCompareAndSwap4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncBoolCompareAndSwap8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockTestAndSet8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockTestAndSet4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockTestAndSet2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockTestAndSet1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncValCompareAndSwap8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncValCompareAndSwap4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncValCompareAndSwap2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncValCompareAndSwap1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockRelease8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockRelease4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockRelease2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncLockRelease1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAnd1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAnd2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAnd4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndAnd8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndOr1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndOr2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndOr4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndOr8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndXor1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndXor2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndXor4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndXor8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndNand1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndNand2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndNand4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncFetchAndNand8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAndAndFetch1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAndAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAndAndFetch4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncAndAndFetch8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncOrAndFetch1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncOrAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncOrAndFetch4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncOrAndFetch8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncXorAndFetch1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncXorAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncXorAndFetch4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncXorAndFetch8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncNandAndFetch1); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncNandAndFetch2); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncNandAndFetch4); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncNandAndFetch8); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(SyncSynchronize); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(AtomicExchangeN); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ObjectSize); + + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ReturnAddress); + UniqueFEIRExpr EMIT_BUILTIIN_FUNC(ExtractReturnAddr); + +// vector builtinfunc +#define DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ...) \ +UniqueFEIRExpr EmitBuiltin##STR(std::list &stmts) const; +#include "intrinsic_vector.def" +#undef DEF_MIR_INTRINSIC + + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + + static std::unordered_map builtingFuncPtrMap; + MapleVector args; + ASTExpr *calleeExpr = nullptr; + MapleString funcName; + FuncAttrs funcAttrs; + bool isIcall = false; + MapleString varName; + ASTFunc *funcDecl = nullptr; + GenericAttrs returnVarAttrs; +}; + +class ASTParenExpr : public ASTExpr { + public: + explicit ASTParenExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTParen) { + (void)allocatorIn; + } + ~ASTParenExpr() = default; + + void SetASTExpr(ASTExpr *astExpr) { + child = astExpr; + } + + void SetShortCircuitIdx(uint32 leftIdx, uint32 rightIdx) override { + trueIdx = leftIdx; + falseIdx = rightIdx; + } + + protected: + MIRConst *GenerateMIRConstImpl() const override { + return child->GenerateMIRConst(); + } + + ASTExpr *IgnoreParensImpl() override { + return child->IgnoreParens(); + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + + ASTValue *GetConstantValueImpl() const override { + return child->GetConstantValue(); + } + + ASTExpr *child = nullptr; + uint32 trueIdx = 0; + uint32 falseIdx = 0; +}; + +class ASTIntegerLiteral : public ASTExpr { + public: + explicit ASTIntegerLiteral(MapleAllocator &allocatorIn) : ASTExpr(kASTIntegerLiteral) { + (void)allocatorIn; + } + ~ASTIntegerLiteral() = default; + + int64 GetVal() const { + return val; + } + + void SetVal(int64 valIn) { + val = valIn; + } + + protected: + MIRConst *GenerateMIRConstImpl() const override; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + + int64 val = 0; +}; + +enum class FloatKind { + F32, + F64 +}; + +class ASTFloatingLiteral : public ASTExpr { + public: + explicit ASTFloatingLiteral(MapleAllocator &allocatorIn) : ASTExpr(kASTFloatingLiteral) { + (void)allocatorIn; + } + ~ASTFloatingLiteral() = default; + + double GetVal() const { + return val; + } + + void SetVal(double valIn) { + val = valIn; + } + + void SetKind(FloatKind argKind) { + kind = argKind; + } + + FloatKind GetKind() const { + return kind; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + MIRConst *GenerateMIRConstImpl() const override; + double val = 0; + FloatKind kind = FloatKind::F32; +}; + +class ASTCharacterLiteral : public ASTExpr { + public: + explicit ASTCharacterLiteral(MapleAllocator &allocatorIn) : ASTExpr(kASTCharacterLiteral) { + (void)allocatorIn; + } + ~ASTCharacterLiteral() = default; + + int64 GetVal() const { + return val; + } + + void SetVal(int64 valIn) { + val = valIn; + } + + void SetPrimType(PrimType primType) { + type = primType; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + int64 val = 0; + PrimType type = PTY_begin; +}; + +struct VaArgInfo { + bool isGPReg; // GP or FP/SIMD arg reg + int regOffset; + int stackOffset; + // If the argument type is a Composite Type that is larger than 16 bytes, + // then the argument is copied to memory allocated by the caller and replaced by a pointer to the copy. + bool isCopyedMem; + MIRType *HFAType; // Homogeneous Floating-point Aggregate +}; + +class ASTVAArgExpr : public ASTExpr { + public: + explicit ASTVAArgExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTVAArgExpr) { + (void)allocatorIn; + } + ~ASTVAArgExpr() = default; + + void SetASTExpr(ASTExpr *astExpr) { + child = astExpr; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + VaArgInfo ProcessValistArgInfo(const MIRType &type) const; + MIRType *IsHFAType(const MIRStructType &type) const; + void CvtHFA2Struct(const MIRStructType &type, MIRType &fieldType, const UniqueFEIRVar &vaArgVar, + std::list &stmts) const; + void ProcessBigEndianForReg(std::list &stmts, MIRType &vaArgType, + const UniqueFEIRVar &offsetVar, const VaArgInfo &info) const; + void ProcessBigEndianForStack(std::list &stmts, MIRType &vaArgType, + const UniqueFEIRVar &vaArgVar) const; + + ASTExpr *child = nullptr; +}; + +class ASTConstantExpr : public ASTExpr { + public: + explicit ASTConstantExpr(MapleAllocator &allocatorIn) : ASTExpr(kConstantExpr) { + (void)allocatorIn; + } + ~ASTConstantExpr() = default; + void SetASTExpr(ASTExpr *astExpr) { + child = astExpr; + } + + const ASTExpr *GetChild() const{ + return child; + } + + protected: + MIRConst *GenerateMIRConstImpl() const override; + + private: + ASTExpr *child = nullptr; + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTImaginaryLiteral : public ASTExpr { + public: + explicit ASTImaginaryLiteral(MapleAllocator &allocatorIn) : ASTExpr(kASTImaginaryLiteral) { + (void)allocatorIn; + } + ~ASTImaginaryLiteral() = default; + void SetASTExpr(ASTExpr *astExpr) { + child = astExpr; + } + + void SetComplexType(MIRType *structType) { + complexType = structType; + } + + void SetElemType(MIRType *type) { + elemType = type; + } + + private: + MIRType *complexType = nullptr; + MIRType *elemType = nullptr; + ASTExpr *child = nullptr; + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTConditionalOperator : public ASTExpr { + public: + explicit ASTConditionalOperator(MapleAllocator &allocatorIn) : ASTExpr(kASTConditionalOperator) { + (void)allocatorIn; + } + ~ASTConditionalOperator() = default; + + void SetCondExpr(ASTExpr *astExpr) { + condExpr = astExpr; + } + + void SetTrueExpr(ASTExpr *astExpr) { + trueExpr = astExpr; + } + + void SetFalseExpr(ASTExpr *astExpr) { + falseExpr = astExpr; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + + MIRConst *GenerateMIRConstImpl() const override { + MIRConst *condConst = condExpr->GenerateMIRConst(); + if (condConst->IsZero()) { + return falseExpr->GenerateMIRConst(); + } else { + return trueExpr->GenerateMIRConst(); + } + } + + ASTExpr *condExpr = nullptr; + ASTExpr *trueExpr = nullptr; + ASTExpr *falseExpr = nullptr; + std::string varName = FEUtils::GetSequentialName("levVar_"); +}; + +class ASTArrayInitLoopExpr : public ASTExpr { + public: + explicit ASTArrayInitLoopExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpArrayInitLoop) { + (void)allocatorIn; + } + ~ASTArrayInitLoopExpr() = default; + + void SetCommonExpr(ASTExpr *expr) { + commonExpr = expr; + } + + const ASTExpr *GetCommonExpr() const { + return commonExpr; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + ASTExpr* commonExpr = nullptr; +}; + +class ASTArrayInitIndexExpr : public ASTExpr { + public: + explicit ASTArrayInitIndexExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpArrayInitLoop) { + (void)allocatorIn; + } + ~ASTArrayInitIndexExpr() = default; + + void SetPrimType(MIRType *pType) { + primType = pType; + } + + void SetValueStr(const std::string &val) { + valueStr = val; + } + + const MIRType *GetPrimeType() const { + return primType; + } + + std::string GetValueStr() const { + return valueStr; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + MIRType *primType = nullptr; + std::string valueStr; +}; + +class ASTExprWithCleanups : public ASTExpr { + public: + explicit ASTExprWithCleanups(MapleAllocator &allocatorIn) : ASTExpr(kASTOpExprWithCleanups) { + (void)allocatorIn; + } + ~ASTExprWithCleanups() = default; + + void SetSubExpr(ASTExpr *sub) { + subExpr = sub; + } + + const ASTExpr *GetSubExpr() const { + return subExpr; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + ASTExpr *subExpr = nullptr; +}; + +class ASTMaterializeTemporaryExpr : public ASTExpr { + public: + explicit ASTMaterializeTemporaryExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpMaterializeTemporary) { + (void)allocatorIn; + } + ~ASTMaterializeTemporaryExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTSubstNonTypeTemplateParmExpr : public ASTExpr { + public: + explicit ASTSubstNonTypeTemplateParmExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpSubstNonTypeTemplateParm) { + (void)allocatorIn; + } + ~ASTSubstNonTypeTemplateParmExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTDependentScopeDeclRefExpr : public ASTExpr { + public: + explicit ASTDependentScopeDeclRefExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpDependentScopeDeclRef) { + (void)allocatorIn; + } + ~ASTDependentScopeDeclRefExpr() = default; + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; +}; + +class ASTAtomicExpr : public ASTExpr { + public: + explicit ASTAtomicExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpAtomic) { + (void)allocatorIn; + } + ~ASTAtomicExpr() = default; + + void SetRefType(MIRType *ref) { + refType = ref; + } + + void SetAtomicOp(ASTAtomicOp op) { + atomicOp = op; + } + + const MIRType *GetRefType() const { + return refType; + } + + ASTAtomicOp GetAtomicOp() const { + return atomicOp; + } + + void SetValExpr1(ASTExpr *val) { + valExpr1 = val; + } + + void SetValExpr2(ASTExpr *val) { + valExpr2 = val; + } + + void SetObjExpr(ASTExpr *obj) { + objExpr = obj; + } + + void SetOrderExpr(ASTExpr *order) { + orderExpr = order; + } + + const ASTExpr *GetValExpr1() const { + return valExpr1; + } + + const ASTExpr *GetValExpr2() const { + return valExpr2; + } + + const ASTExpr *GetObjExpr() const { + return objExpr; + } + + const ASTExpr *GetOrderExpr() const { + return orderExpr; + } + + void SetVal1Type(MIRType *ty) { + val1Type = ty; + } + + void SetVal2Type(MIRType *ty) { + val2Type = ty; + } + + void SetFromStmt(bool fromStmt) { + isFromStmt = fromStmt; + } + + bool IsFromStmt() const { + return isFromStmt; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + MIRType *refType = nullptr; + MIRType *val1Type = nullptr; + MIRType *val2Type = nullptr; + ASTExpr *objExpr = nullptr; + ASTExpr *valExpr1 = nullptr; + ASTExpr *valExpr2 = nullptr; + ASTExpr *orderExpr = nullptr; + ASTAtomicOp atomicOp = kAtomicOpUndefined; + bool isFromStmt = false; +}; + +class ASTExprStmtExpr : public ASTExpr { + public: + explicit ASTExprStmtExpr(MapleAllocator &allocatorIn) : ASTExpr(kASTOpStmtExpr) { + (void)allocatorIn; + } + ~ASTExprStmtExpr() = default; + void SetCompoundStmt(ASTStmt *sub) { + cpdStmt = sub; + } + + const ASTStmt *GetSubExpr() const { + return cpdStmt; + } + + private: + UniqueFEIRExpr Emit2FEExprImpl(std::list &stmts) const override; + + ASTStmt *cpdStmt = nullptr; +}; +} +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_EXPR_H diff --git a/src/hir2mpl/ast_input/clang/include/ast_function.h b/src/hir2mpl/ast_input/clang/include/ast_function.h new file mode 100644 index 0000000000000000000000000000000000000000..814c3c34a212947db099de9191eacbc47466a029 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/include/ast_function.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_AST_INPUT_AST_FUNTION_H +#define MPL_FE_AST_INPUT_AST_FUNTION_H +#include "fe_function.h" +#include "ast_struct2fe_helper.h" + +namespace maple { +class ASTFunction : public FEFunction { + public: + ASTFunction(const ASTFunc2FEHelper &argMethodHelper, MIRFunction &mirFunc, + const std::unique_ptr &argPhaseResultTotal); + virtual ~ASTFunction() = default; + + protected: + bool GenerateGeneralStmt(const std::string &phaseName) override { + WARN(kLncWarn, "Phase: %s may not need.", phaseName.c_str()); + return true; + } + + bool GenerateArgVarList(const std::string &phaseName) override; + bool GenerateAliasVars(const std::string &phaseName) override; + + bool PreProcessTypeNameIdx() override { + return true; + } + + void GenerateGeneralStmtFailCallBack() override {} + + void GenerateGeneralDebugInfo() override {} + + bool VerifyGeneral() override { + return true; + } + + void VerifyGeneralFailCallBack() override {} + + bool HasThis() override { + return funcHelper.HasThis(); + } + + bool IsNative() override { + return funcHelper.IsNative(); + } + + bool EmitToFEIRStmt(const std::string &phaseName) override; + + void PreProcessImpl() override; + bool ProcessImpl() override; + bool ProcessFEIRFunction() override; + void FinishImpl() override; + bool EmitToMIR(const std::string &phaseName) override; + void SetMIRFunctionInfo(); + void AddVLACleanupStmts(std::list &stmts) override; + + const ASTFunc2FEHelper &funcHelper; + ASTFunc &astFunc; + std::list vlaCleanupStmts; + bool error = false; +}; +} // namespace maple +#endif // MPL_FE_AST_INPUT_AST_FUNTION_H \ No newline at end of file diff --git a/src/hir2mpl/ast_input/clang/include/ast_op.h b/src/hir2mpl/ast_input/clang/include/ast_op.h new file mode 100644 index 0000000000000000000000000000000000000000..a1f8802e8aae2d2aa71398e33509e905f8cd676c --- /dev/null +++ b/src/hir2mpl/ast_input/clang/include/ast_op.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_INPUT_INCLUDE_AST_OP_H +#define HIR2MPL_AST_INPUT_INCLUDE_AST_OP_H +namespace maple { +enum ASTOp { + kASTOpNone = 0, + kASTStringLiteral, + kASTSubscriptExpr, + kASTExprUnaryExprOrTypeTraitExpr, + kASTMemberExpr, + kASTASTDesignatedInitUpdateExpr, + kASTImplicitValueInitExpr, + kASTOpRef, + + // unaryopcode + kASTOpMinus, + kASTOpNot, + kASTOpLNot, + kASTOpPostInc, + kASTOpPostDec, + kASTOpPreInc, + kASTOpPreDec, + kASTOpAddrOf, + kASTOpAddrOfLabel, + kASTOpDeref, + kASTOpPlus, + kASTOpReal, + kASTOpImag, + kASTOpExtension, + kASTOpCoawait, + + // BinaryOperators + // [C++ 5.5] Pointer-to-member operators. + kASTOpBO, + kASTOpCompoundAssign, + kASTOpPtrMemD, + kASTOpPtrMemI, + // [C99 6.5.5] Multiplicative operators. + kASTOpMul, + kASTOpDiv, + kASTOpRem, + // [C99 6.5.6] Additive operators. + kASTOpAdd, + kASTOpSub, + // [C99 6.5.7] Bitwise shift operators. + kASTOpShl, + kASTOpShr, + // [C99 6.5.8] Relational operators. + kASTOpLT, + kASTOpGT, + kASTOpLE, + kASTOpGE, + // [C99 6.5.9] Equality operators. + kASTOpEQ, + kASTOpNE, + // [C99 6.5.10] Bitwise AND operator. + kASTOpAnd, + // [C99 6.5.11] Bitwise XOR operator. + kASTOpXor, + // [C99 6.5.12] Bitwise OR operator. + kASTOpOr, + // [C99 6.5.13] Logical AND operator. + kASTOpLAnd, + // [C99 6.5.14] Logical OR operator. + kASTOpLOr, + // [C99 6.5.16] Assignment operators. + kASTOpAssign, + kASTOpMulAssign, + kASTOpDivAssign, + kASTOpRemAssign, + kASTOpAddAssign, + kASTOpSubAssign, + kASTOpShlAssign, + kASTOpShrAssign, + kASTOpAndAssign, + kASTOpXorAssign, + kASTOpOrAssign, + // [C99 6.5.17] Comma operator. + kASTOpComma, + // cast + kASTOpCast, + + // call + kASTOpCall, + + kASTParen, + kASTIntegerLiteral, + kASTFloatingLiteral, + kASTCharacterLiteral, + kASTConditionalOperator, + kConstantExpr, + kASTImaginaryLiteral, + kASTCallExpr, + kCpdAssignOp, + kASTVAArgExpr, + + // others + kASTOpPredefined, + kASTOpOpaqueValue, + kASTOpBinaryConditionalOperator, + kASTOpNoInitExpr, + kASTOpCompoundLiteralExpr, + kASTOpOffsetOfExpr, + kASTOpGenericSelectionExpr, + kASTOpInitListExpr, + kASTOpArrayInitLoop, + kASTOpArrayInitIndex, + kASTOpExprWithCleanups, + kASTOpMaterializeTemporary, + kASTOpSubstNonTypeTemplateParm, + kASTOpDependentScopeDeclRef, + kASTOpAtomic, + kASTOpStmtExpr, +}; + +enum ASTStmtOp { + kASTStmtNone, + kASTStmtDummy, + // branch + kASTStmtIf, + kASTStmtGoto, + kASTStmtIndirectGoto, + + kASTStmtLabel, + kASTStmtAddrOfLabelExpr, + + kASTStmtDo, + kASTStmtFor, + kASTStmtWhile, + kASTStmtBreak, + kASTStmtContinue, + + kASTStmtReturn, + kASTStmtBO, + kASTStmtBOAssign, + kASTStmtBOCompoundAssign, + kASTStmtUO, + kASTStmtCompound, + kASTStmtSwitch, + kASTStmtCase, + kASTStmtDefault, + kASTStmtNull, + kASTStmtDecl, + kASTStmtCAO, + kASTStmtImplicitCastExpr, + kASTStmtParenExpr, + kASTStmtIntegerLiteral, + kASTStmtFloatingLiteral, + kASTStmtVAArgExpr, + kASTStmtConditionalOperator, + kASTStmtCharacterLiteral, + kASTStmtStmtExpr, + kASTStmtCStyleCastExpr, + kASTStmtCallExpr, + kASTStmtAtomicExpr, + kASTStmtGCCAsmStmt, + kASTOffsetOfStmt, + kASTGenericSelectionExprStmt, + kASTStmtAttributed, + kASTStmtDeclRefExpr, + kASTStmtUnaryExprOrTypeTraitExpr, +}; +} // namespace maple +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_OP_H diff --git a/src/hir2mpl/ast_input/clang/include/ast_parser.h b/src/hir2mpl/ast_input/clang/include/ast_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..ff4ae96a72d1625880d71903667bdf59cc2b0d83 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/include/ast_parser.h @@ -0,0 +1,280 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_INPUT_INCLUDE_AST_PARSER_H +#define HIR2MPL_AST_INPUT_INCLUDE_AST_PARSER_H +#include +#include +#include "mempool_allocator.h" +#include "ast_decl.h" +#include "ast_interface.h" + +namespace maple { +class ASTParser { + public: + ASTParser(MapleAllocator &allocatorIn, uint32 fileIdxIn, const std::string &fileNameIn, + MapleList &astStructsIn, MapleList &astFuncsIn, MapleList &astVarsIn, + MapleList &astFileScopeAsmsIn, MapleList &astEnumsIn) + : fileIdx(fileIdxIn), fileName(fileNameIn, allocatorIn.GetMemPool()), globalVarDecles(allocatorIn.Adapter()), + funcDecles(allocatorIn.Adapter()), recordDecles(allocatorIn.Adapter()), + globalEnumDecles(allocatorIn.Adapter()), globalTypeDefDecles(allocatorIn.Adapter()), + globalFileScopeAsm(allocatorIn.Adapter()), astStructs(astStructsIn), astFuncs(astFuncsIn), + astVars(astVarsIn), astFileScopeAsms(astFileScopeAsmsIn), astEnums(astEnumsIn), + vlaSizeMap(allocatorIn.Adapter()) {} + virtual ~ASTParser() = default; + bool OpenFile(MapleAllocator &allocator); + bool Release() const; + + bool Verify() const; + bool PreProcessAST(); + + bool RetrieveStructs(MapleAllocator &allocator); + bool RetrieveFuncs(MapleAllocator &allocator); + bool RetrieveGlobalVars(MapleAllocator &allocator); + bool RetrieveFileScopeAsms(MapleAllocator &allocator); + bool RetrieveGlobalTypeDef(MapleAllocator &allocator); + bool RetrieveEnums(MapleAllocator &allocator); + + const std::string GetSourceFileName() const; + const uint32 GetFileIdx() const; + + // ProcessStmt + ASTStmt *ProcessStmt(MapleAllocator &allocator, const clang::Stmt &stmt); + ASTStmt *ProcessFunctionBody(MapleAllocator &allocator, const clang::CompoundStmt &compoundStmt); +#define PROCESS_STMT(CLASS) ProcessStmt##CLASS(MapleAllocator&, const clang::CLASS&) + ASTStmt *PROCESS_STMT(AttributedStmt); + ASTStmt *PROCESS_STMT(UnaryOperator); + ASTStmt *PROCESS_STMT(BinaryOperator); + ASTStmt *PROCESS_STMT(CompoundAssignOperator); + ASTStmt *PROCESS_STMT(ImplicitCastExpr); + ASTStmt *PROCESS_STMT(ParenExpr); + ASTStmt *PROCESS_STMT(IntegerLiteral); + ASTStmt *PROCESS_STMT(FloatingLiteral); + ASTStmt *PROCESS_STMT(VAArgExpr); + ASTStmt *PROCESS_STMT(ConditionalOperator); + ASTStmt *PROCESS_STMT(CharacterLiteral); + ASTStmt *PROCESS_STMT(StmtExpr); + ASTStmt *PROCESS_STMT(CallExpr); + ASTStmt *PROCESS_STMT(ReturnStmt); + ASTStmt *PROCESS_STMT(IfStmt); + ASTStmt *PROCESS_STMT(ForStmt); + ASTStmt *PROCESS_STMT(WhileStmt); + ASTStmt *PROCESS_STMT(DoStmt); + ASTStmt *PROCESS_STMT(BreakStmt); + ASTStmt *PROCESS_STMT(LabelStmt); + ASTStmt *PROCESS_STMT(ContinueStmt); + ASTStmt *PROCESS_STMT(CompoundStmt); + ASTStmt *PROCESS_STMT(GotoStmt); + ASTStmt *PROCESS_STMT(IndirectGotoStmt); + ASTStmt *PROCESS_STMT(SwitchStmt); + ASTStmt *PROCESS_STMT(CaseStmt); + ASTStmt *PROCESS_STMT(DefaultStmt); + ASTStmt *PROCESS_STMT(NullStmt); + ASTStmt *PROCESS_STMT(CStyleCastExpr); + ASTStmt *PROCESS_STMT(DeclStmt); + ASTStmt *PROCESS_STMT(AtomicExpr); + ASTStmt *PROCESS_STMT(GCCAsmStmt); + ASTStmt *PROCESS_STMT(OffsetOfExpr); + ASTStmt *PROCESS_STMT(GenericSelectionExpr); + ASTStmt *PROCESS_STMT(DeclRefExpr); + ASTStmt *PROCESS_STMT(UnaryExprOrTypeTraitExpr); + ASTStmt *PROCESS_STMT(AddrLabelExpr); + bool HasDefault(const clang::Stmt &stmt); + + // ProcessExpr + const clang::Expr *PeelParen(const clang::Expr &expr) const; + const clang::Expr *PeelParen2(const clang::Expr &expr) const; + ASTUnaryOperatorExpr *AllocUnaryOperatorExpr(MapleAllocator &allocator, const clang::UnaryOperator &expr) const; + ASTValue *AllocASTValue(const MapleAllocator &allocator) const; + ASTValue *TranslateExprEval(MapleAllocator &allocator, const clang::Expr *expr) const; + ASTExpr *EvaluateExprAsConst(MapleAllocator &allocator, const clang::Expr *expr); + bool HasLabelStmt(const clang::Stmt *expr); + ASTExpr *ProcessExpr(MapleAllocator &allocator, const clang::Expr *expr); + void SaveVLASizeExpr(MapleAllocator &allocator, const clang::QualType &qualType, std::list &vlaSizeExprs); + ASTBinaryOperatorExpr *AllocBinaryOperatorExpr(MapleAllocator &allocator, const clang::BinaryOperator &bo) const; + ASTExpr *ProcessExprCastExpr(MapleAllocator &allocator, const clang::CastExpr &expr); + ASTExpr *SolvePointerOffsetOperation(MapleAllocator &allocator, const clang::BinaryOperator &bo, + ASTBinaryOperatorExpr &astBinOpExpr, ASTExpr &astRExpr, ASTExpr &astLExpr); + ASTExpr *SolvePointerSubPointerOperation(MapleAllocator &allocator, const clang::BinaryOperator &bo, + ASTBinaryOperatorExpr &astBinOpExpr) const; +#define PROCESS_EXPR(CLASS) ProcessExpr##CLASS(MapleAllocator&, const clang::CLASS&) + ASTExpr *PROCESS_EXPR(UnaryOperator); + ASTExpr *PROCESS_EXPR(AddrLabelExpr); + ASTExpr *PROCESS_EXPR(NoInitExpr); + ASTExpr *PROCESS_EXPR(PredefinedExpr); + ASTExpr *PROCESS_EXPR(OpaqueValueExpr); + ASTExpr *PROCESS_EXPR(BinaryConditionalOperator); + ASTExpr *PROCESS_EXPR(CompoundLiteralExpr); + ASTExpr *PROCESS_EXPR(OffsetOfExpr); + ASTExpr *PROCESS_EXPR(InitListExpr); + ASTExpr *PROCESS_EXPR(BinaryOperator); + ASTExpr *PROCESS_EXPR(ImplicitValueInitExpr); + ASTExpr *PROCESS_EXPR(StringLiteral); + ASTExpr *PROCESS_EXPR(ArraySubscriptExpr); + ASTExpr *PROCESS_EXPR(UnaryExprOrTypeTraitExpr); + ASTExpr *PROCESS_EXPR(MemberExpr); + ASTExpr *PROCESS_EXPR(DesignatedInitUpdateExpr); + ASTExpr *PROCESS_EXPR(ImplicitCastExpr); + ASTExpr *PROCESS_EXPR(DeclRefExpr); + ASTExpr *PROCESS_EXPR(ParenExpr); + ASTExpr *PROCESS_EXPR(IntegerLiteral); + ASTExpr *PROCESS_EXPR(FloatingLiteral); + ASTExpr *PROCESS_EXPR(CharacterLiteral); + ASTExpr *PROCESS_EXPR(ConditionalOperator); + ASTExpr *PROCESS_EXPR(VAArgExpr); + ASTExpr *PROCESS_EXPR(GNUNullExpr); + ASTExpr *PROCESS_EXPR(SizeOfPackExpr); + ASTExpr *PROCESS_EXPR(UserDefinedLiteral); + ASTExpr *PROCESS_EXPR(ShuffleVectorExpr); + ASTExpr *PROCESS_EXPR(TypeTraitExpr); + ASTExpr *PROCESS_EXPR(ConstantExpr); + ASTExpr *PROCESS_EXPR(ImaginaryLiteral); + ASTExpr *PROCESS_EXPR(CallExpr); + ASTExpr *PROCESS_EXPR(CompoundAssignOperator); + ASTExpr *PROCESS_EXPR(StmtExpr); + ASTExpr *PROCESS_EXPR(CStyleCastExpr); + ASTExpr *PROCESS_EXPR(ArrayInitLoopExpr); + ASTExpr *PROCESS_EXPR(ArrayInitIndexExpr); + ASTExpr *PROCESS_EXPR(ExprWithCleanups); + ASTExpr *PROCESS_EXPR(MaterializeTemporaryExpr); + ASTExpr *PROCESS_EXPR(SubstNonTypeTemplateParmExpr); + ASTExpr *PROCESS_EXPR(DependentScopeDeclRefExpr); + ASTExpr *PROCESS_EXPR(AtomicExpr); + ASTExpr *PROCESS_EXPR(ChooseExpr); + ASTExpr *PROCESS_EXPR(GenericSelectionExpr); + + MapleVector SolveFuncParameterDecls(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + MapleVector &typeDescIn, std::list &stmts); + GenericAttrs SolveFunctionAttributes(const clang::FunctionDecl &funcDecl, std::string &funcName) const; + ASTDecl *ProcessDecl(MapleAllocator &allocator, const clang::Decl &decl); + ASTStmt *SolveFunctionBody(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, ASTFunc &astFunc, + const std::list &stmts); + + void SetInitExprForASTVar(MapleAllocator &allocator, const clang::VarDecl &varDecl, const GenericAttrs &attrs, + ASTVar &astVar); + void SetAlignmentForASTVar(const clang::VarDecl &varDecl, ASTVar &astVar) const; +#define PROCESS_DECL(CLASS) ProcessDecl##CLASS##Decl(MapleAllocator &allocator, const clang::CLASS##Decl&) + ASTDecl *PROCESS_DECL(Field); + ASTDecl *PROCESS_DECL(Function); + ASTDecl *PROCESS_DECL(Record); + ASTDecl *PROCESS_DECL(Var); + ASTDecl *PROCESS_DECL(ParmVar); + ASTDecl *PROCESS_DECL(Enum); + ASTDecl *PROCESS_DECL(Typedef); + ASTDecl *PROCESS_DECL(EnumConstant); + ASTDecl *PROCESS_DECL(FileScopeAsm); + ASTDecl *PROCESS_DECL(Label); + ASTDecl *PROCESS_DECL(StaticAssert); + + static ASTExpr *GetAddrShiftExpr(MapleAllocator &allocator, ASTExpr &expr, uint32 typeSize); + static ASTExpr *GetSizeMulExpr(MapleAllocator &allocator, ASTExpr &expr, ASTExpr &ptrSizeExpr); + + private: + void ProcessNonnullFuncAttrs(const clang::FunctionDecl &funcDecl, ASTFunc &astFunc) const; + void ProcessNonnullFuncPtrAttrs(MapleAllocator &allocator, const clang::ValueDecl &valueDecl, ASTDecl &astVar); + void ProcessBoundaryFuncAttrs(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, ASTFunc &astFunc); + void ProcessByteBoundaryFuncAttrs(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, ASTFunc &astFunc); + void ProcessBoundaryFuncAttrsByIndex(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + ASTFunc &astFunc); + void ProcessBoundaryParamAttrs(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, ASTFunc &astFunc); + void ProcessBoundaryParamAttrsByIndex(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + ASTFunc &astFunc); + void ProcessBoundaryVarAttrs(MapleAllocator &allocator, const clang::VarDecl &varDecl, ASTVar &astVar); + void ProcessBoundaryFieldAttrs(MapleAllocator &allocator, const ASTStruct &structDecl, + const clang::RecordDecl &recDecl); + void ProcessBoundaryFuncPtrAttrs(MapleAllocator &allocator, const clang::ValueDecl &valueDecl, ASTDecl &astDecl); + template + bool ProcessBoundaryFuncPtrAttrsForParams(T *attr, MapleAllocator &allocator, const MIRFuncType &funcType, + const clang::FunctionProtoType &proto, std::vector &attrsVec); + template + bool ProcessBoundaryFuncPtrAttrsForRet(T *attr, MapleAllocator &allocator, const MIRFuncType &funcType, + const clang::FunctionType &clangFuncType, TypeAttrs &retAttr); + void ProcessBoundaryFuncPtrAttrsByIndex(MapleAllocator &allocator, const clang::ValueDecl &valueDecl, + ASTDecl &astDecl, const MIRFuncType &funcType); + template + bool ProcessBoundaryFuncPtrAttrsByIndexForParams(T *attr, ASTDecl &astDecl, const MIRFuncType &funcType, + std::vector &attrsVec) const; + void ProcessBoundaryLenExpr(MapleAllocator &allocator, ASTDecl &ptrDecl, const clang::QualType &qualType, + const std::function &getLenExprFromStringLiteral, + ASTExpr *lenExpr, bool isSize); + void ProcessBoundaryLenExprInFunc(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + unsigned int idx, ASTFunc &astFunc, ASTExpr *lenExpr, bool isSize); + void ProcessBoundaryLenExprInFunc(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + unsigned int idx, ASTFunc &astFunc, unsigned int lenIdx, bool isSize); + void ProcessBoundaryLenExprInVar(MapleAllocator &allocator, ASTDecl &ptrDecl, const clang::VarDecl &varDecl, + ASTExpr *lenExpr, bool isSize); + void ProcessBoundaryLenExprInVar(MapleAllocator &allocator, ASTDecl &ptrDecl, const clang::QualType &qualType, + ASTExpr *lenExpr, bool isSize); + void ProcessBoundaryLenExprInField(MapleAllocator &allocator, ASTDecl &ptrDecl, const ASTStruct &structDecl, + const clang::QualType &qualType, ASTExpr *lenExpr, bool isSize); + ASTValue *TranslateConstantValue2ASTValue(MapleAllocator &allocator, const clang::Expr *expr) const; + ASTValue *TranslateLValue2ASTValue(MapleAllocator &allocator, + const clang::Expr::EvalResult &result, const clang::Expr *expr) const; + void TraverseDecl(const clang::Decl *decl, std::function const &functor) const; + ASTDecl *GetAstDeclOfDeclRefExpr(MapleAllocator &allocator, const clang::Expr &expr); + uint32 GetSizeFromQualType(const clang::QualType qualType) const; + ASTExpr *GetTypeSizeFromQualType(MapleAllocator &allocator, const clang::QualType qualType); + uint32_t GetAlignOfType(const clang::QualType currQualType, clang::UnaryExprOrTypeTrait exprKind) const; + uint32_t GetAlignOfExpr(const clang::Expr &expr, clang::UnaryExprOrTypeTrait exprKind) const; + ASTExpr *BuildExprToComputeSizeFromVLA(MapleAllocator &allocator, const clang::QualType &qualType); + ASTExpr *ProcessExprBinaryOperatorComplex(MapleAllocator &allocator, const clang::BinaryOperator &bo); + +using FuncPtrBuiltinFunc = ASTExpr *(ASTParser::*)(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const; +static std::map InitBuiltinFuncPtrMap(); +ASTExpr *ProcessBuiltinFuncByName(MapleAllocator &allocator, const clang::CallExpr &expr, std::stringstream &ss, + const std::string &name) const; +ASTExpr *ParseBuiltinFunc(MapleAllocator &allocator, const clang::CallExpr &expr, std::stringstream &ss) const; +#define PARSE_BUILTIIN_FUNC(FUNC) ParseBuiltin##FUNC(MapleAllocator &allocator, const clang::CallExpr &expr, \ + std::stringstream &ss) const + ASTExpr *PARSE_BUILTIIN_FUNC(ClassifyType); + ASTExpr *PARSE_BUILTIIN_FUNC(ConstantP); + ASTExpr *PARSE_BUILTIIN_FUNC(Isinfsign); + ASTExpr *PARSE_BUILTIIN_FUNC(HugeVal); + ASTExpr *PARSE_BUILTIIN_FUNC(HugeValf); + ASTExpr *PARSE_BUILTIIN_FUNC(Inf); + ASTExpr *PARSE_BUILTIIN_FUNC(Inff); + ASTExpr *PARSE_BUILTIIN_FUNC(Nan); + ASTExpr *PARSE_BUILTIIN_FUNC(Nanf); + ASTExpr *PARSE_BUILTIIN_FUNC(Signbit); + ASTExpr *PARSE_BUILTIIN_FUNC(SignBitf); + ASTExpr *PARSE_BUILTIIN_FUNC(SignBitl); + ASTExpr *PARSE_BUILTIIN_FUNC(Trap); + ASTExpr *PARSE_BUILTIIN_FUNC(IsUnordered); + ASTExpr *PARSE_BUILTIIN_FUNC(Copysignf); + ASTExpr *PARSE_BUILTIIN_FUNC(Copysign); + ASTExpr *PARSE_BUILTIIN_FUNC(Copysignl); + ASTExpr *PARSE_BUILTIIN_FUNC(Objectsize); + + static std::map builtingFuncPtrMap; + uint32 fileIdx; + const MapleString fileName; + LibAstFile *astFile = nullptr; + const AstUnitDecl *astUnitDecl = nullptr; + MapleList globalVarDecles; + MapleList funcDecles; + MapleList recordDecles; + MapleList globalEnumDecles; + MapleList globalTypeDefDecles; + MapleList globalFileScopeAsm; + + MapleList &astStructs; + MapleList &astFuncs; + MapleList &astVars; + MapleList &astFileScopeAsms; + MapleList &astEnums; + MapleMap vlaSizeMap; +}; +} // namespace maple +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_PARSER_H diff --git a/src/hir2mpl/ast_input/clang/include/ast_stmt.h b/src/hir2mpl/ast_input/clang/include/ast_stmt.h new file mode 100644 index 0000000000000000000000000000000000000000..ea910bbba7be8e3dcde4855e07d0330c52900ca0 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/include/ast_stmt.h @@ -0,0 +1,713 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_INPUT_INCLUDE_AST_STMT_H +#define HIR2MPL_AST_INPUT_INCLUDE_AST_STMT_H +#include "ast_op.h" +#include "ast_expr.h" +#include "feir_stmt.h" + +namespace maple { +class ASTDecl; + +enum class SafeSS { + kNoneSS, + kSafeSS, + kUnsafeSS, +}; + +class ASTStmt { + public: + explicit ASTStmt(MapleAllocator &allocatorIn, ASTStmtOp o = kASTStmtNone) : exprs(allocatorIn.Adapter()), op(o) {} + virtual ~ASTStmt() = default; + void SetASTExpr(ASTExpr* astExpr); + + std::list Emit2FEStmt() const { + auto stmts = Emit2FEStmtImpl(); + for (UniqueFEIRStmt &stmt : stmts) { + if (stmt != nullptr && !stmt->HasSetLOCInfo()) { + stmt->SetSrcLoc(loc); + } + } + return stmts; + } + + ASTStmtOp GetASTStmtOp() const { + return op; + } + + const MapleVector &GetExprs() const { + return exprs; + } + + void SetSrcLoc(const Loc &l) { + loc = l; + } + + const Loc &GetSrcLoc() const { + return loc; + } + + uint32 GetSrcFileIdx() const { + return loc.fileIdx; + } + + uint32 GetSrcFileLineNum() const { + return loc.line; + } + + protected: + virtual std::list Emit2FEStmtImpl() const = 0; + MapleVector exprs; + ASTStmtOp op; + Loc loc = {0, 0, 0}; +}; + +class ASTStmtDummy : public ASTStmt { + public: + explicit ASTStmtDummy(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDummy) {} + ~ASTStmtDummy() = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTCompoundStmt : public ASTStmt { + public: + explicit ASTCompoundStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCompound), + astStmts(allocatorIn.Adapter()) {} + ~ASTCompoundStmt() = default; + void SetASTStmt(ASTStmt *astStmt); + void InsertASTStmtsAtFront(const std::list &stmts); + const MapleList &GetASTStmtList() const; + + void SetSafeSS(SafeSS state) { + safeSS = state; + } + + SafeSS GetSafeSS() const { + return safeSS; + } + + void SetEndLoc(const Loc &loc) { + endLoc = loc; + } + + const Loc &GetEndLoc() const { + return endLoc; + } + + private: + SafeSS safeSS = SafeSS::kNoneSS; + MapleList astStmts; // stmts + Loc endLoc = {0, 0, 0}; + std::list Emit2FEStmtImpl() const override; + mutable bool hasEmitted2MIRScope = false; +}; + +// Any other expressions or stmts should be extended here +class ASTReturnStmt : public ASTStmt { + public: + explicit ASTReturnStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtReturn) {} + ~ASTReturnStmt() = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTAttributedStmt : public ASTStmt { + public: + explicit ASTAttributedStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtAttributed) {} + ~ASTAttributedStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override{ return {}; }; +}; + +class ASTIfStmt : public ASTStmt { + public: + explicit ASTIfStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtIf) {} + ~ASTIfStmt() override = default; + + void SetCondExpr(ASTExpr *astExpr) { + condExpr = astExpr; + } + + void SetThenStmt(ASTStmt *astStmt) { + thenStmt = astStmt; + } + + void SetElseStmt(ASTStmt *astStmt) { + elseStmt = astStmt; + } + + private: + std::list Emit2FEStmtImpl() const override; + ASTExpr *condExpr = nullptr; + ASTStmt *thenStmt = nullptr; + ASTStmt *elseStmt = nullptr; +}; + +class ASTForStmt : public ASTStmt { + public: + explicit ASTForStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtFor) {} + ~ASTForStmt() override = default; + + void SetInitStmt(ASTStmt *astStmt) { + initStmt = astStmt; + } + + void SetCondExpr(ASTExpr *astExpr) { + condExpr = astExpr; + } + + void SetIncExpr(ASTExpr *astExpr) { + incExpr = astExpr; + } + + void SetBodyStmt(ASTStmt *astStmt) { + bodyStmt = astStmt; + } + + void SetEndLoc(const Loc &loc) { + endLoc = loc; + } + + const Loc &GetEndLoc() const { + return endLoc; + } + private: + std::list Emit2FEStmtImpl() const override; + ASTStmt *initStmt = nullptr; + ASTExpr *condExpr = nullptr; + ASTExpr *incExpr = nullptr; + ASTStmt *bodyStmt = nullptr; + Loc endLoc = {0, 0, 0}; + mutable bool hasEmitted2MIRScope = false; +}; + +class ASTWhileStmt : public ASTStmt { + public: + explicit ASTWhileStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtWhile) {} + ~ASTWhileStmt() override = default; + + void SetCondExpr(ASTExpr *astExpr) { + condExpr = astExpr; + } + + void SetBodyStmt(ASTStmt *astStmt) { + bodyStmt = astStmt; + } + + private: + std::list Emit2FEStmtImpl() const override; + ASTExpr *condExpr = nullptr; + ASTStmt *bodyStmt = nullptr; + mutable bool hasEmitted2MIRScope = false; +}; + +class ASTDoStmt : public ASTStmt { + public: + explicit ASTDoStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDo) {} + ~ASTDoStmt() override = default; + + void SetBodyStmt(ASTStmt *astStmt) { + bodyStmt = astStmt; + } + + void SetCondExpr(ASTExpr *astExpr) { + condExpr = astExpr; + } + + private: + std::list Emit2FEStmtImpl() const override; + ASTStmt *bodyStmt = nullptr; + ASTExpr *condExpr = nullptr; + mutable bool hasEmitted2MIRScope = false; +}; + +class ASTBreakStmt : public ASTStmt { + public: + explicit ASTBreakStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtBreak) {} + ~ASTBreakStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTLabelStmt : public ASTStmt { + public: + explicit ASTLabelStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtLabel), + labelName("", allocatorIn.GetMemPool()) {} + ~ASTLabelStmt() override = default; + + void SetSubStmt(ASTStmt *stmt) { + subStmt = stmt; + } + + const ASTStmt* GetSubStmt() const { + return subStmt; + } + + void SetLabelName(const std::string &name) { + labelName = name; + } + + const std::string GetLabelName() const { + return labelName.c_str() == nullptr ? "" : labelName.c_str(); + } + + private: + std::list Emit2FEStmtImpl() const override; + MapleString labelName; + ASTStmt *subStmt = nullptr; +}; + +class ASTContinueStmt : public ASTStmt { + public: + explicit ASTContinueStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtContinue) {} + ~ASTContinueStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTUnaryOperatorStmt : public ASTStmt { + public: + explicit ASTUnaryOperatorStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtUO) {} + ~ASTUnaryOperatorStmt() = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTBinaryOperatorStmt : public ASTStmt { + public: + explicit ASTBinaryOperatorStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtBO) {} + ~ASTBinaryOperatorStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTGotoStmt : public ASTStmt { + public: + explicit ASTGotoStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtGoto), + labelName("", allocatorIn.GetMemPool()) {} + ~ASTGotoStmt() = default; + + std::string GetLabelName() const { + return labelName.c_str() == nullptr ? "" : labelName.c_str(); + } + + void SetLabelName(const std::string &name) { + labelName = name; + } + + private: + std::list Emit2FEStmtImpl() const override; + MapleString labelName; +}; + +class ASTIndirectGotoStmt : public ASTStmt { + public: + explicit ASTIndirectGotoStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtIndirectGoto) {} + ~ASTIndirectGotoStmt() = default; + + protected: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTSwitchStmt : public ASTStmt { + public: + explicit ASTSwitchStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtSwitch) {} + ~ASTSwitchStmt() = default; + + void SetCondStmt(ASTStmt *cond) { + condStmt = cond; + } + + void SetBodyStmt(ASTStmt *body) { + bodyStmt = body; + } + + void SetCondExpr(ASTExpr *cond) { + condExpr = cond; + } + + const ASTStmt *GetCondStmt() const { + return condStmt; + } + + const ASTExpr *GetCondExpr() const { + return condExpr; + } + + const ASTStmt *GetBodyStmt() const { + return bodyStmt; + } + + void SetHasDefault(bool argHasDefault) { + hasDefualt = argHasDefault; + } + + bool HasDefault() const { + return hasDefualt; + } + + void SetCondType(MIRType *type) { + condType = type; + } + + private: + std::list Emit2FEStmtImpl() const override; + ASTStmt *condStmt = nullptr; + ASTExpr *condExpr = nullptr; + ASTStmt *bodyStmt = nullptr; + MIRType *condType = nullptr; + bool hasDefualt = false; + mutable bool hasEmitted2MIRScope = false; +}; + +class ASTCaseStmt : public ASTStmt { + public: + explicit ASTCaseStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCase) {} + ~ASTCaseStmt() = default; + + void SetLHS(ASTExpr *l) { + lhs = l; + } + + void SetRHS(ASTExpr *r) { + rhs = r; + } + + void SetSubStmt(ASTStmt *sub) { + subStmt = sub; + } + + const ASTExpr *GetLHS() const { + return lhs; + } + + const ASTExpr *GetRHS() const { + return rhs; + } + + const ASTStmt *GetSubStmt() const { + return subStmt; + } + + int64 GetLCaseTag() const { + return lCaseTag; + } + + int64 GetRCaseTag() const { + return rCaseTag; + } + + void SetLCaseTag(int64 l) { + lCaseTag = l; + } + + void SetRCaseTag(int64 r) { + rCaseTag = r; + } + + private: + std::list Emit2FEStmtImpl() const override; + ASTExpr *lhs = nullptr; + ASTExpr *rhs = nullptr; + ASTStmt *subStmt = nullptr; + int64 lCaseTag = 0; + int64 rCaseTag = 0; +}; + +class ASTDefaultStmt : public ASTStmt { + public: + explicit ASTDefaultStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDefault) {} + ~ASTDefaultStmt() = default; + + void SetChildStmt(ASTStmt* ch) { + child = ch; + } + + const ASTStmt* GetChildStmt() const { + return child; + } + + private: + std::list Emit2FEStmtImpl() const override; + ASTStmt* child = nullptr; +}; + +class ASTNullStmt : public ASTStmt { + public: + explicit ASTNullStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtNull) {} + ~ASTNullStmt() = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTDeclStmt : public ASTStmt { + public: + explicit ASTDeclStmt(MapleAllocator &allocatorIn) + : ASTStmt(allocatorIn, kASTStmtDecl), + subDecls(allocatorIn.Adapter()), + subDeclInfos(allocatorIn.Adapter()) {} + ~ASTDeclStmt() = default; + + void SetSubDecl(ASTDecl *decl) { + subDecls.emplace_back(decl); + (void)subDeclInfos.emplace_back(decl); + } + + void SetVLASizeExpr(ASTExpr *astExpr) { + (void)subDeclInfos.emplace_back(astExpr); + } + + const MapleList &GetSubDecls() const { + return subDecls; + } + + private: + std::list Emit2FEStmtImpl() const override; + + MapleList subDecls; + // saved vla size exprs before a vla decl + MapleList> subDeclInfos; +}; + +class ASTCompoundAssignOperatorStmt : public ASTStmt { + public: + explicit ASTCompoundAssignOperatorStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCAO) {} + ~ASTCompoundAssignOperatorStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTImplicitCastExprStmt : public ASTStmt { + public: + explicit ASTImplicitCastExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtImplicitCastExpr) {} + ~ASTImplicitCastExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTParenExprStmt : public ASTStmt { + public: + explicit ASTParenExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtParenExpr) {} + ~ASTParenExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTIntegerLiteralStmt : public ASTStmt { + public: + explicit ASTIntegerLiteralStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtIntegerLiteral) {} + ~ASTIntegerLiteralStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTFloatingLiteralStmt : public ASTStmt { + public: + explicit ASTFloatingLiteralStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtFloatingLiteral) {} + ~ASTFloatingLiteralStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTVAArgExprStmt : public ASTStmt { + public: + explicit ASTVAArgExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtVAArgExpr) {} + ~ASTVAArgExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTConditionalOperatorStmt : public ASTStmt { + public: + explicit ASTConditionalOperatorStmt(MapleAllocator &allocatorIn) + : ASTStmt(allocatorIn, kASTStmtConditionalOperator) {} + ~ASTConditionalOperatorStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTCharacterLiteralStmt : public ASTStmt { + public: + explicit ASTCharacterLiteralStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCharacterLiteral) {} + ~ASTCharacterLiteralStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTStmtExprStmt : public ASTStmt { + public: + explicit ASTStmtExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtStmtExpr) {} + ~ASTStmtExprStmt() override = default; + + void SetBodyStmt(ASTStmt *stmt) { + cpdStmt = stmt; + } + + const ASTStmt *GetBodyStmt() const { + return cpdStmt; + } + + private: + std::list Emit2FEStmtImpl() const override; + + ASTStmt *cpdStmt = nullptr; +}; + +class ASTCStyleCastExprStmt : public ASTStmt { + public: + explicit ASTCStyleCastExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtCStyleCastExpr) {} + ~ASTCStyleCastExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTCallExprStmt : public ASTStmt { + public: + ASTCallExprStmt(MapleAllocator &allocatorIn, const std::string &varNameIn) + : ASTStmt(allocatorIn, kASTStmtCallExpr), varName(varNameIn) {} + ~ASTCallExprStmt() override = default; + + private: + using FuncPtrBuiltinFunc = std::list (ASTCallExprStmt::*)() const; + static std::map InitFuncPtrMap(); + std::list Emit2FEStmtImpl() const override; + + std::string varName; +}; + +class ASTAtomicExprStmt : public ASTStmt { + public: + explicit ASTAtomicExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtAtomicExpr) {} + ~ASTAtomicExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTGCCAsmStmt : public ASTStmt { + public: + explicit ASTGCCAsmStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtGCCAsmStmt), + asmStr("", allocatorIn.GetMemPool()), outputs(allocatorIn.Adapter()), inputs(allocatorIn.Adapter()), + clobbers(allocatorIn.Adapter()), labels(allocatorIn.Adapter()) {} + ~ASTGCCAsmStmt() override = default; + + void SetAsmStr(const std::string &str) { + asmStr = str; + } + + const std::string GetAsmStr() const { + return asmStr.c_str() == nullptr ? "" : asmStr.c_str(); + } + + void InsertOutput(std::tuple &&output) { + outputs.emplace_back(output); + } + + void InsertInput(std::pair &&input) { + inputs.emplace_back(input); + } + + void InsertClobber(std::string &&clobber) { + clobbers.emplace_back(clobber); + } + + void InsertLabel(const std::string &label) { + labels.emplace_back(label); + } + + void SetIsGoto(bool flag) { + isGoto = flag; + } + + void SetIsVolatile(bool flag) { + isVolatile = flag; + } + + private: + std::list Emit2FEStmtImpl() const override; + MapleString asmStr; + MapleVector> outputs; + MapleVector> inputs; + MapleVector clobbers; + MapleVector labels; + bool isGoto = false; + bool isVolatile = false; +}; + +class ASTOffsetOfStmt : public ASTStmt { + public: + explicit ASTOffsetOfStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTOffsetOfStmt) {} + ~ASTOffsetOfStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTGenericSelectionExprStmt : public ASTStmt { + public: + explicit ASTGenericSelectionExprStmt(MapleAllocator &allocatorIn) + : ASTStmt(allocatorIn, kASTGenericSelectionExprStmt) {} + ~ASTGenericSelectionExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTDeclRefExprStmt : public ASTStmt { + public: + explicit ASTDeclRefExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDeclRefExpr) {} + ~ASTDeclRefExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTUnaryExprOrTypeTraitExprStmt : public ASTStmt { + public: + explicit ASTUnaryExprOrTypeTraitExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtDeclRefExpr) {} + ~ASTUnaryExprOrTypeTraitExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; + +class ASTUOAddrOfLabelExprStmt : public ASTStmt { + public: + explicit ASTUOAddrOfLabelExprStmt(MapleAllocator &allocatorIn) : ASTStmt(allocatorIn, kASTStmtAddrOfLabelExpr) {} + ~ASTUOAddrOfLabelExprStmt() override = default; + + private: + std::list Emit2FEStmtImpl() const override; +}; +} // namespace maple +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_STMT_H diff --git a/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h b/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..3879607e44882f576f68a004eeb99765fb685e61 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/include/ast_struct2fe_helper.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_INPUT_INCLUDE_AST_STRUCT2FE_HELPER_H +#define HIR2MPL_AST_INPUT_INCLUDE_AST_STRUCT2FE_HELPER_H +#include "fe_input_helper.h" +#include "ast_decl.h" +#include "mempool_allocator.h" + +namespace maple { +class ASTStruct2FEHelper : public FEInputStructHelper { + public: + ASTStruct2FEHelper(MapleAllocator &allocator, ASTStruct &structIn); + ~ASTStruct2FEHelper() = default; + + const ASTStruct &GetASTStruct() const { + return astStruct; + } + + protected: + bool ProcessDeclImpl() override; + void InitFieldHelpersImpl() override; + void InitMethodHelpersImpl() override; + TypeAttrs GetStructAttributeFromInputImpl() const override; + std::string GetStructNameOrinImpl() const override; + std::string GetStructNameMplImpl() const override; + std::list GetSuperClassNamesImpl() const override; + std::vector GetInterfaceNamesImpl() const override; + std::string GetSourceFileNameImpl() const override; + MIRStructType *CreateMIRStructTypeImpl(bool &error) const override; + uint64 GetRawAccessFlagsImpl() const override; + GStrIdx GetIRSrcFileSigIdxImpl() const override; + bool IsMultiDefImpl() const override; + std::string GetSrcFileNameImpl() const override; + + ASTStruct &astStruct; +}; + +class ASTGlobalVar2FEHelper : public FEInputGlobalVarHelper { + public: + ASTGlobalVar2FEHelper(MapleAllocator &allocatorIn, const ASTVar &varIn) + : FEInputGlobalVarHelper(allocatorIn), + astVar(varIn) {} + ~ASTGlobalVar2FEHelper() = default; + + protected: + bool ProcessDeclImpl(MapleAllocator &allocator) override; + const ASTVar &astVar; +}; + +class ASTFileScopeAsm2FEHelper : public FEInputFileScopeAsmHelper { + public: + ASTFileScopeAsm2FEHelper(MapleAllocator &allocatorIn, const ASTFileScopeAsm &astAsmIn) + : FEInputFileScopeAsmHelper(allocatorIn), + astAsm(astAsmIn) {} + ~ASTFileScopeAsm2FEHelper() = default; + + protected: + bool ProcessDeclImpl(MapleAllocator &allocator) override; + const ASTFileScopeAsm &astAsm; +}; + +class ASTEnum2FEHelper : public FEInputEnumHelper { + public: + ASTEnum2FEHelper(MapleAllocator &allocatorIn, const ASTEnumDecl &astEnumIn) + : FEInputEnumHelper(allocatorIn), + astEnum(astEnumIn) {} + ~ASTEnum2FEHelper() = default; + + protected: + bool ProcessDeclImpl(MapleAllocator &allocator) override; + const ASTEnumDecl &astEnum; +}; + +class ASTStructField2FEHelper : public FEInputFieldHelper { + public: + ASTStructField2FEHelper(MapleAllocator &allocator, ASTField &fieldIn, MIRType &structTypeIn) + : FEInputFieldHelper(allocator), + field(fieldIn), structType(structTypeIn) {} + ~ASTStructField2FEHelper() = default; + + protected: + bool ProcessDeclImpl(MapleAllocator &allocator) override; + bool ProcessDeclWithContainerImpl(MapleAllocator &allocator) override; + ASTField &field; + MIRType &structType; +}; + +class ASTFunc2FEHelper : public FEInputMethodHelper { + public: + ASTFunc2FEHelper(MapleAllocator &allocator, ASTFunc &funcIn) + : FEInputMethodHelper(allocator), + func(funcIn) { + srcLang = kSrcLangC; + } + ~ASTFunc2FEHelper() = default; + ASTFunc &GetMethod() const { + return func; + } + + const std::string GetSrcFileName() const; + + protected: + bool ProcessDeclImpl(MapleAllocator &allocator) override; + void SolveReturnAndArgTypesImpl(MapleAllocator &allocator) override; + std::string GetMethodNameImpl(bool inMpl, bool full) const override; + bool IsVargImpl() const override; + bool HasThisImpl() const override; + MIRType *GetTypeForThisImpl() const override; + FuncAttrs GetAttrsImpl() const override; + bool IsStaticImpl() const override; + bool IsVirtualImpl() const override; + bool IsNativeImpl() const override; + bool HasCodeImpl() const override; + + void SolveFunctionArguments() const; + void SolveFunctionAttributes(); + ASTFunc &func; + bool firstArgRet = false; +}; +} // namespace maple +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_STRUCT2FE_HELPER_H diff --git a/src/hir2mpl/ast_input/clang/include/builtin_func_emit.def b/src/hir2mpl/ast_input/clang/include/builtin_func_emit.def new file mode 100644 index 0000000000000000000000000000000000000000..cbf2a41f2c88906bafe44a2067789c72c9b28009 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/include/builtin_func_emit.def @@ -0,0 +1,186 @@ +BUILTIN_FUNC_EMIT("alloca", &ASTCallExpr::EmitBuiltinAlloca) +BUILTIN_FUNC_EMIT("__builtin_ctz", &ASTCallExpr::EmitBuiltinCtz) +BUILTIN_FUNC_EMIT("__builtin_ctzl", &ASTCallExpr::EmitBuiltinCtzl) +BUILTIN_FUNC_EMIT("__builtin_ctzll", &ASTCallExpr::EmitBuiltinCtzl) +BUILTIN_FUNC_EMIT("__builtin_clz", &ASTCallExpr::EmitBuiltinClz) +BUILTIN_FUNC_EMIT("__builtin_clzl", &ASTCallExpr::EmitBuiltinClzl) +BUILTIN_FUNC_EMIT("__builtin_clzll", &ASTCallExpr::EmitBuiltinClzl) +BUILTIN_FUNC_EMIT("__builtin_popcount", &ASTCallExpr::EmitBuiltinPopcount) +BUILTIN_FUNC_EMIT("__builtin_popcountl", &ASTCallExpr::EmitBuiltinPopcountl) +BUILTIN_FUNC_EMIT("__builtin_popcountll", &ASTCallExpr::EmitBuiltinPopcountll) +BUILTIN_FUNC_EMIT("__builtin_parity", &ASTCallExpr::EmitBuiltinParity) +BUILTIN_FUNC_EMIT("__builtin_parityl", &ASTCallExpr::EmitBuiltinParityl) +BUILTIN_FUNC_EMIT("__builtin_parityll", &ASTCallExpr::EmitBuiltinParityll) +BUILTIN_FUNC_EMIT("__builtin_clrsb", &ASTCallExpr::EmitBuiltinClrsb) +BUILTIN_FUNC_EMIT("__builtin_clrsbl", &ASTCallExpr::EmitBuiltinClrsbl) +BUILTIN_FUNC_EMIT("__builtin_clrsbll", &ASTCallExpr::EmitBuiltinClrsbll) +BUILTIN_FUNC_EMIT("__builtin_ffs", &ASTCallExpr::EmitBuiltinFfs) +BUILTIN_FUNC_EMIT("__builtin_ffsl", &ASTCallExpr::EmitBuiltinFfsl) +BUILTIN_FUNC_EMIT("__builtin_ffsll", &ASTCallExpr::EmitBuiltinFfsll) +BUILTIN_FUNC_EMIT("__builtin_is_aligned", &ASTCallExpr::EmitBuiltinIsAligned) +BUILTIN_FUNC_EMIT("__builtin_align_up", &ASTCallExpr::EmitBuiltinAlignUp) +BUILTIN_FUNC_EMIT("__builtin_align_down", &ASTCallExpr::EmitBuiltinAlignDown) +BUILTIN_FUNC_EMIT("__builtin_alloca", &ASTCallExpr::EmitBuiltinAlloca) +BUILTIN_FUNC_EMIT("__builtin_expect", &ASTCallExpr::EmitBuiltinExpect) +BUILTIN_FUNC_EMIT("__builtin_va_start", &ASTCallExpr::EmitBuiltinVaStart) +BUILTIN_FUNC_EMIT("__builtin_va_end", &ASTCallExpr::EmitBuiltinVaEnd) +BUILTIN_FUNC_EMIT("__builtin_va_copy", &ASTCallExpr::EmitBuiltinVaCopy) +BUILTIN_FUNC_EMIT("__builtin_prefetch", &ASTCallExpr::EmitBuiltinPrefetch) +BUILTIN_FUNC_EMIT("__builtin_abs", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("__builtin_fabs", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("__builtin_fabsf", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("__builtin_fabsl", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("__builtin_fabsf16", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("__builtin_fabsf128", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("__builtin_labs", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("__builtin_llabs", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("abs", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("labs", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("llabs", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("fabs", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("fabsf", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("fabsl", &ASTCallExpr::EmitBuiltinAbs) +BUILTIN_FUNC_EMIT("__builtin_acos", &ASTCallExpr::EmitBuiltinACos) +BUILTIN_FUNC_EMIT("acos", &ASTCallExpr::EmitBuiltinACos) +BUILTIN_FUNC_EMIT("__builtin_acosf", &ASTCallExpr::EmitBuiltinACosf) +BUILTIN_FUNC_EMIT("acosf", &ASTCallExpr::EmitBuiltinACosf) +BUILTIN_FUNC_EMIT("__builtin_asin", &ASTCallExpr::EmitBuiltinASin) +BUILTIN_FUNC_EMIT("asin", &ASTCallExpr::EmitBuiltinASin) +BUILTIN_FUNC_EMIT("__builtin_asinf", &ASTCallExpr::EmitBuiltinASinf) +BUILTIN_FUNC_EMIT("asinf", &ASTCallExpr::EmitBuiltinASinf) +BUILTIN_FUNC_EMIT("__builtin_atan", &ASTCallExpr::EmitBuiltinATan) +BUILTIN_FUNC_EMIT("atan", &ASTCallExpr::EmitBuiltinATan) +BUILTIN_FUNC_EMIT("__builtin_atanf", &ASTCallExpr::EmitBuiltinATanf) +BUILTIN_FUNC_EMIT("atanf", &ASTCallExpr::EmitBuiltinATanf) +BUILTIN_FUNC_EMIT("__builtin_cos", &ASTCallExpr::EmitBuiltinCos) +BUILTIN_FUNC_EMIT("cos", &ASTCallExpr::EmitBuiltinCos) +BUILTIN_FUNC_EMIT("__builtin_cosf", &ASTCallExpr::EmitBuiltinCosf) +BUILTIN_FUNC_EMIT("cosf", &ASTCallExpr::EmitBuiltinCosf) +BUILTIN_FUNC_EMIT("__builtin_cosh", &ASTCallExpr::EmitBuiltinCosh) +BUILTIN_FUNC_EMIT("cosh", &ASTCallExpr::EmitBuiltinCosh) +BUILTIN_FUNC_EMIT("__builtin_coshf", &ASTCallExpr::EmitBuiltinCoshf) +BUILTIN_FUNC_EMIT("coshf", &ASTCallExpr::EmitBuiltinCoshf) +BUILTIN_FUNC_EMIT("__builtin_sin", &ASTCallExpr::EmitBuiltinSin) +BUILTIN_FUNC_EMIT("sin", &ASTCallExpr::EmitBuiltinSin) +BUILTIN_FUNC_EMIT("__builtin_sinf", &ASTCallExpr::EmitBuiltinSinf) +BUILTIN_FUNC_EMIT("sinf", &ASTCallExpr::EmitBuiltinSinf) +BUILTIN_FUNC_EMIT("__builtin_sinh", &ASTCallExpr::EmitBuiltinSinh) +BUILTIN_FUNC_EMIT("sinh", &ASTCallExpr::EmitBuiltinSinh) +BUILTIN_FUNC_EMIT("__builtin_sinhf", &ASTCallExpr::EmitBuiltinSinhf) +BUILTIN_FUNC_EMIT("sinhf", &ASTCallExpr::EmitBuiltinSinhf) +BUILTIN_FUNC_EMIT("__builtin_exp", &ASTCallExpr::EmitBuiltinExp) +BUILTIN_FUNC_EMIT("exp", &ASTCallExpr::EmitBuiltinExp) +BUILTIN_FUNC_EMIT("__builtin_expf", &ASTCallExpr::EmitBuiltinExpf) +BUILTIN_FUNC_EMIT("expf", &ASTCallExpr::EmitBuiltinExpf) +BUILTIN_FUNC_EMIT("__builtin_fmax", &ASTCallExpr::EmitBuiltinFmax) +BUILTIN_FUNC_EMIT("__builtin_fmaxf", &ASTCallExpr::EmitBuiltinFmax) +BUILTIN_FUNC_EMIT("__builtin_fmaxf16", &ASTCallExpr::EmitBuiltinFmax) +BUILTIN_FUNC_EMIT("__builtin_fmaxl", &ASTCallExpr::EmitBuiltinFmax) +BUILTIN_FUNC_EMIT("fmax", &ASTCallExpr::EmitBuiltinFmax) +BUILTIN_FUNC_EMIT("fmaxf", &ASTCallExpr::EmitBuiltinFmax) +BUILTIN_FUNC_EMIT("fmaxl", &ASTCallExpr::EmitBuiltinFmax) +BUILTIN_FUNC_EMIT("__builtin_fmin", &ASTCallExpr::EmitBuiltinFmin) +BUILTIN_FUNC_EMIT("__builtin_fminf", &ASTCallExpr::EmitBuiltinFmin) +BUILTIN_FUNC_EMIT("__builtin_fmin16", &ASTCallExpr::EmitBuiltinFmin) +BUILTIN_FUNC_EMIT("__builtin_fminl", &ASTCallExpr::EmitBuiltinFmin) +BUILTIN_FUNC_EMIT("fmin", &ASTCallExpr::EmitBuiltinFmin) +BUILTIN_FUNC_EMIT("fminf", &ASTCallExpr::EmitBuiltinFmin) +BUILTIN_FUNC_EMIT("fminl", &ASTCallExpr::EmitBuiltinFmin) +BUILTIN_FUNC_EMIT("__builtin_log", &ASTCallExpr::EmitBuiltinLog) +BUILTIN_FUNC_EMIT("__builtin_logf", &ASTCallExpr::EmitBuiltinLogf) +BUILTIN_FUNC_EMIT("__builtin_log10", &ASTCallExpr::EmitBuiltinLog10) +BUILTIN_FUNC_EMIT("__builtin_log10f", &ASTCallExpr::EmitBuiltinLog10f) +BUILTIN_FUNC_EMIT("__builtin_isunordered", &ASTCallExpr::EmitBuiltinIsunordered) +BUILTIN_FUNC_EMIT("__builtin_isless", &ASTCallExpr::EmitBuiltinIsless) +BUILTIN_FUNC_EMIT("__builtin_islessequal", &ASTCallExpr::EmitBuiltinIslessequal) +BUILTIN_FUNC_EMIT("__builtin_isgreater", &ASTCallExpr::EmitBuiltinIsgreater) +BUILTIN_FUNC_EMIT("__builtin_isgreaterequal", &ASTCallExpr::EmitBuiltinIsgreaterequal) +BUILTIN_FUNC_EMIT("__builtin_islessgreater", &ASTCallExpr::EmitBuiltinIslessgreater) +BUILTIN_FUNC_EMIT("__warn_memset_zero_len", &ASTCallExpr::EmitBuiltinWarnMemsetZeroLen) +BUILTIN_FUNC_EMIT("__builtin_rotateleft8", &ASTCallExpr::EmitBuiltinRotateLeft8) +BUILTIN_FUNC_EMIT("__builtin_rotateleft16", &ASTCallExpr::EmitBuiltinRotateLeft16) +BUILTIN_FUNC_EMIT("__builtin_rotateleft32", &ASTCallExpr::EmitBuiltinRotateLeft32) +BUILTIN_FUNC_EMIT("__builtin_rotateleft64", &ASTCallExpr::EmitBuiltinRotateLeft64) +BUILTIN_FUNC_EMIT("__builtin_rotateright8", &ASTCallExpr::EmitBuiltinRotateRight8) +BUILTIN_FUNC_EMIT("__builtin_rotateright16", &ASTCallExpr::EmitBuiltinRotateRight16) +BUILTIN_FUNC_EMIT("__builtin_rotateright32", &ASTCallExpr::EmitBuiltinRotateRight32) +BUILTIN_FUNC_EMIT("__builtin_rotateright64", &ASTCallExpr::EmitBuiltinRotateRight64) + +BUILTIN_FUNC_EMIT("__sync_add_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncAddAndFetch8) +BUILTIN_FUNC_EMIT("__sync_add_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncAddAndFetch4) +BUILTIN_FUNC_EMIT("__sync_add_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncAddAndFetch2) +BUILTIN_FUNC_EMIT("__sync_add_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncAddAndFetch1) +BUILTIN_FUNC_EMIT("__sync_sub_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncSubAndFetch8) +BUILTIN_FUNC_EMIT("__sync_sub_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncSubAndFetch4) +BUILTIN_FUNC_EMIT("__sync_sub_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncSubAndFetch2) +BUILTIN_FUNC_EMIT("__sync_sub_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncSubAndFetch1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_sub_8", &ASTCallExpr::EmitBuiltinSyncFetchAndSub8) +BUILTIN_FUNC_EMIT("__sync_fetch_and_sub_4", &ASTCallExpr::EmitBuiltinSyncFetchAndSub4) +BUILTIN_FUNC_EMIT("__sync_fetch_and_sub_2", &ASTCallExpr::EmitBuiltinSyncFetchAndSub2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_sub_1", &ASTCallExpr::EmitBuiltinSyncFetchAndSub1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_add_8", &ASTCallExpr::EmitBuiltinSyncFetchAndAdd8) +BUILTIN_FUNC_EMIT("__sync_fetch_and_add_4", &ASTCallExpr::EmitBuiltinSyncFetchAndAdd4) +BUILTIN_FUNC_EMIT("__sync_fetch_and_add_2", &ASTCallExpr::EmitBuiltinSyncFetchAndAdd2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_add_1", &ASTCallExpr::EmitBuiltinSyncFetchAndAdd1) +BUILTIN_FUNC_EMIT("__sync_bool_compare_and_swap_8", &ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap8) +BUILTIN_FUNC_EMIT("__sync_bool_compare_and_swap_4", &ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap4) +BUILTIN_FUNC_EMIT("__sync_bool_compare_and_swap_2", &ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap2) +BUILTIN_FUNC_EMIT("__sync_bool_compare_and_swap_1", &ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap1) +BUILTIN_FUNC_EMIT("__sync_val_compare_and_swap_8", &ASTCallExpr::EmitBuiltinSyncValCompareAndSwap8) +BUILTIN_FUNC_EMIT("__sync_val_compare_and_swap_4", &ASTCallExpr::EmitBuiltinSyncValCompareAndSwap4) +BUILTIN_FUNC_EMIT("__sync_val_compare_and_swap_2", &ASTCallExpr::EmitBuiltinSyncValCompareAndSwap2) +BUILTIN_FUNC_EMIT("__sync_val_compare_and_swap_1", &ASTCallExpr::EmitBuiltinSyncValCompareAndSwap1) +BUILTIN_FUNC_EMIT("__sync_lock_test_and_set_8", &ASTCallExpr::EmitBuiltinSyncLockTestAndSet8) +BUILTIN_FUNC_EMIT("__sync_lock_test_and_set_4", &ASTCallExpr::EmitBuiltinSyncLockTestAndSet4) +BUILTIN_FUNC_EMIT("__sync_lock_test_and_set_2", &ASTCallExpr::EmitBuiltinSyncLockTestAndSet2) +BUILTIN_FUNC_EMIT("__sync_lock_test_and_set_1", &ASTCallExpr::EmitBuiltinSyncLockTestAndSet1) +BUILTIN_FUNC_EMIT("__sync_lock_release_8", &ASTCallExpr::EmitBuiltinSyncLockRelease8) +BUILTIN_FUNC_EMIT("__sync_lock_release_4", &ASTCallExpr::EmitBuiltinSyncLockRelease4) +BUILTIN_FUNC_EMIT("__sync_lock_release_2", &ASTCallExpr::EmitBuiltinSyncLockRelease2) +BUILTIN_FUNC_EMIT("__sync_lock_release_1", &ASTCallExpr::EmitBuiltinSyncLockRelease1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_and_1", &ASTCallExpr::EmitBuiltinSyncFetchAndAnd1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_and_2", &ASTCallExpr::EmitBuiltinSyncFetchAndAnd2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_and_4", &ASTCallExpr::EmitBuiltinSyncFetchAndAnd4) +BUILTIN_FUNC_EMIT("__sync_fetch_and_and_8", &ASTCallExpr::EmitBuiltinSyncFetchAndAnd8) +BUILTIN_FUNC_EMIT("__sync_fetch_and_or_1", &ASTCallExpr::EmitBuiltinSyncFetchAndOr1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_or_2", &ASTCallExpr::EmitBuiltinSyncFetchAndOr2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_or_4", &ASTCallExpr::EmitBuiltinSyncFetchAndOr4) +BUILTIN_FUNC_EMIT("__sync_fetch_and_or_8", &ASTCallExpr::EmitBuiltinSyncFetchAndOr8) +BUILTIN_FUNC_EMIT("__sync_fetch_and_xor_1", &ASTCallExpr::EmitBuiltinSyncFetchAndXor1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_xor_2", &ASTCallExpr::EmitBuiltinSyncFetchAndXor2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_xor_4", &ASTCallExpr::EmitBuiltinSyncFetchAndXor4) +BUILTIN_FUNC_EMIT("__sync_fetch_and_xor_8", &ASTCallExpr::EmitBuiltinSyncFetchAndXor8) +BUILTIN_FUNC_EMIT("__sync_fetch_and_nand_1", &ASTCallExpr::EmitBuiltinSyncFetchAndNand1) +BUILTIN_FUNC_EMIT("__sync_fetch_and_nand_2", &ASTCallExpr::EmitBuiltinSyncFetchAndNand2) +BUILTIN_FUNC_EMIT("__sync_fetch_and_nand_4", &ASTCallExpr::EmitBuiltinSyncFetchAndNand4) +BUILTIN_FUNC_EMIT("__sync_fetch_and_nand_8", &ASTCallExpr::EmitBuiltinSyncFetchAndNand8) +BUILTIN_FUNC_EMIT("__sync_and_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncAndAndFetch1) +BUILTIN_FUNC_EMIT("__sync_and_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncAndAndFetch2) +BUILTIN_FUNC_EMIT("__sync_and_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncAndAndFetch4) +BUILTIN_FUNC_EMIT("__sync_and_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncAndAndFetch8) +BUILTIN_FUNC_EMIT("__sync_or_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncOrAndFetch1) +BUILTIN_FUNC_EMIT("__sync_or_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncOrAndFetch2) +BUILTIN_FUNC_EMIT("__sync_or_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncOrAndFetch4) +BUILTIN_FUNC_EMIT("__sync_or_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncOrAndFetch8) +BUILTIN_FUNC_EMIT("__sync_xor_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncXorAndFetch1) +BUILTIN_FUNC_EMIT("__sync_xor_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncXorAndFetch2) +BUILTIN_FUNC_EMIT("__sync_xor_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncXorAndFetch4) +BUILTIN_FUNC_EMIT("__sync_xor_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncXorAndFetch8) +BUILTIN_FUNC_EMIT("__sync_nand_and_fetch_1", &ASTCallExpr::EmitBuiltinSyncNandAndFetch1) +BUILTIN_FUNC_EMIT("__sync_nand_and_fetch_2", &ASTCallExpr::EmitBuiltinSyncNandAndFetch2) +BUILTIN_FUNC_EMIT("__sync_nand_and_fetch_4", &ASTCallExpr::EmitBuiltinSyncNandAndFetch4) +BUILTIN_FUNC_EMIT("__sync_nand_and_fetch_8", &ASTCallExpr::EmitBuiltinSyncNandAndFetch8) +BUILTIN_FUNC_EMIT("__sync_synchronize", &ASTCallExpr::EmitBuiltinSyncSynchronize) + +BUILTIN_FUNC_EMIT("__atomic_exchange_1", &ASTCallExpr::EmitBuiltinAtomicExchangeN) +BUILTIN_FUNC_EMIT("__atomic_exchange_2", &ASTCallExpr::EmitBuiltinAtomicExchangeN) +BUILTIN_FUNC_EMIT("__atomic_exchange_4", &ASTCallExpr::EmitBuiltinAtomicExchangeN) +BUILTIN_FUNC_EMIT("__atomic_exchange_8", &ASTCallExpr::EmitBuiltinAtomicExchangeN) + +BUILTIN_FUNC_EMIT("__builtin_return_address", &ASTCallExpr::EmitBuiltinReturnAddress) +BUILTIN_FUNC_EMIT("__builtin_extract_return_addr", &ASTCallExpr::EmitBuiltinExtractReturnAddr) +BUILTIN_FUNC_EMIT("__builtin_object_size", &ASTCallExpr::EmitBuiltinObjectSize) + +BUILTIN_FUNC_EMIT("__builtin_bswap64", &ASTCallExpr::EmitBuiltinBswap64) +BUILTIN_FUNC_EMIT("__builtin_bswap32", &ASTCallExpr::EmitBuiltinBswap32) +BUILTIN_FUNC_EMIT("__builtin_bswap16", &ASTCallExpr::EmitBuiltinBswap16) \ No newline at end of file diff --git a/src/hir2mpl/ast_input/clang/include/builtin_func_parse.def b/src/hir2mpl/ast_input/clang/include/builtin_func_parse.def new file mode 100644 index 0000000000000000000000000000000000000000..9765fdb7b7b51a24ce6b4e0240e1e525649be7a3 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/include/builtin_func_parse.def @@ -0,0 +1,19 @@ +BUILTIN_FUNC_PARSE("__builtin_classify_type", &ASTParser::ParseBuiltinClassifyType) +BUILTIN_FUNC_PARSE("__builtin_constant_p", &ASTParser::ParseBuiltinConstantP) +BUILTIN_FUNC_PARSE("__builtin_isinf_sign", &ASTParser::ParseBuiltinIsinfsign) +BUILTIN_FUNC_PARSE("__builtin_huge_val", &ASTParser::ParseBuiltinHugeVal) +BUILTIN_FUNC_PARSE("__builtin_huge_valf", &ASTParser::ParseBuiltinHugeValf) +BUILTIN_FUNC_PARSE("__builtin_huge_vall", &ASTParser::ParseBuiltinHugeVal) +BUILTIN_FUNC_PARSE("__builtin_inf", &ASTParser::ParseBuiltinInf) +BUILTIN_FUNC_PARSE("__builtin_inff", &ASTParser::ParseBuiltinInff) +BUILTIN_FUNC_PARSE("__builtin_infl", &ASTParser::ParseBuiltinInf) +BUILTIN_FUNC_PARSE("__builtin_nan", &ASTParser::ParseBuiltinNan) +BUILTIN_FUNC_PARSE("__builtin_nanf", &ASTParser::ParseBuiltinNanf) +BUILTIN_FUNC_PARSE("__builtin_nanl", &ASTParser::ParseBuiltinNan) +BUILTIN_FUNC_PARSE("__builtin_signbit", &ASTParser::ParseBuiltinSignbit) +BUILTIN_FUNC_PARSE("__builtin_signbitf", &ASTParser::ParseBuiltinSignBitf) +BUILTIN_FUNC_PARSE("__builtin_signbitl", &ASTParser::ParseBuiltinSignBitl) +BUILTIN_FUNC_PARSE("__builtin_trap", &ASTParser::ParseBuiltinTrap) +BUILTIN_FUNC_PARSE("__builtin_copysignf", &ASTParser::ParseBuiltinCopysignf) +BUILTIN_FUNC_PARSE("__builtin_copysign", &ASTParser::ParseBuiltinCopysign) +BUILTIN_FUNC_PARSE("__builtin_copysignl", &ASTParser::ParseBuiltinCopysignl) \ No newline at end of file diff --git a/src/hir2mpl/ast_input/clang/lib/ast_alias.h b/src/hir2mpl/ast_input/clang/lib/ast_alias.h new file mode 100644 index 0000000000000000000000000000000000000000..e877a685fa0e66fcce0d2f70a6829cb2b6cffd65 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/lib/ast_alias.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_FILE_INCLUDE_AST_ALIAS_H +#define HIR2MPL_AST_FILE_INCLUDE_AST_ALIAS_H +#include "libclang/CIndexer.h" +#include "libclang/CXTranslationUnit.h" +#include "clang/Frontend/ASTUnit.h" +#include "clang/AST/Decl.h" +#include "clang/AST/AST.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/RecursiveASTVisitor.h" +#include "clang/AST/RecordLayout.h" +#include "clang/AST/GlobalDecl.h" +#include "clang/AST/Mangle.h" +#include "clang/AST/VTableBuilder.h" +#include "clang/AST/VTTBuilder.h" +#include "clang/Frontend/CompilerInstance.h" +#include "clang/AST/DeclBase.h" + +namespace maple { + using AstASTContext = clang::ASTContext; + using AstUnitDecl = clang::TranslationUnitDecl; + using AstASTUnit = clang::ASTUnit; +} // namespace maple +#endif // HIR2MPL_AST_FILE_INCLUDE_AST_ALIAS_H diff --git a/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp b/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp new file mode 100644 index 0000000000000000000000000000000000000000..441030d0e693539120b1bc6b7945e9f24e2061b1 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/lib/ast_interface.cpp @@ -0,0 +1,656 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_interface.h" +#include "mpl_logging.h" +#include "ast_util.h" +#include "fe_utils.h" +#include "fe_manager.h" + +namespace maple { +const std::unordered_map kUnsupportedFuncAttrsMap = { + {clang::attr::NoInstrumentFunction, "no_instrument_function"}, + {clang::attr::StdCall, "stdcall"}, + {clang::attr::CDecl, "cdecl"}, + {clang::attr::MipsLongCall, "mips_long_call"}, + {clang::attr::MipsShortCall, "mips_short_call"}, + {clang::attr::ARMInterrupt, "arm_interrupt"}, + {clang::attr::AnyX86Interrupt, "x86_interrupt"}, + {clang::attr::Naked, "naked"}, + {clang::attr::AllocAlign, "alloc_align"}, + {clang::attr::AssumeAligned, "assume_aligned"}, + {clang::attr::Flatten, "flatten"}, + {clang::attr::Cold, "cold"}, + {clang::attr::IFunc, "ifunc"}, + {clang::attr::NoSanitize, "no_sanitize"}, + {clang::attr::NoSplitStack, "no_split_stack"}, + {clang::attr::PatchableFunctionEntry, "patchable_function_entry"}, + {clang::attr::Target, "target"}}; +const std::unordered_map kUnsupportedVarAttrsMap = { + {clang::attr::Mode, "mode"}, + {clang::attr::NoCommon, "nocommon"}, + {clang::attr::TransparentUnion, "transparent_union"}, + {clang::attr::Alias, "alias"}, + {clang::attr::Cleanup, "cleanup"}, + {clang::attr::Common, "common"}, + {clang::attr::Uninitialized, "uninitialized"}}; +const std::unordered_map kUnsupportedTypeAttrsMap = { + {clang::attr::MSStruct, "ms_struct"}}; + +bool LibAstFile::Open(const MapleString &fileName, + int excludeDeclFromPCH, int displayDiagnostics) { + astFileName = fileName; + index = clang_createIndex(excludeDeclFromPCH, displayDiagnostics); + translationUnit = clang_createTranslationUnit(index, fileName.c_str()); + if (translationUnit == nullptr) { + return false; + } + clang::ASTUnit *astUnit = translationUnit->TheASTUnit; + if (astUnit == nullptr) { + return false; + } + astContext = &astUnit->getASTContext(); + if (astContext == nullptr) { + return false; + } + astUnitDecl = astContext->getTranslationUnitDecl(); + if (astUnitDecl == nullptr) { + return false; + } + mangleContext = astContext->createMangleContext(); + if (mangleContext == nullptr) { + return false; + } + return true; +} + +void LibAstFile::DisposeTranslationUnit() { + clang_disposeIndex(index); + clang_disposeTranslationUnit(translationUnit); + delete mangleContext; + mangleContext = nullptr; + translationUnit = nullptr; + index = nullptr; +} + +const AstASTContext *LibAstFile::GetAstContext() const { + return astContext; +} + +AstASTContext *LibAstFile::GetNonConstAstContext() const { + return astContext; +} + +const AstUnitDecl *LibAstFile::GetAstUnitDecl() const { + return astUnitDecl; +} + +std::string LibAstFile::GetMangledName(const clang::NamedDecl &decl) const { + std::string mangledName; + if (!mangleContext->shouldMangleDeclName(&decl)) { + mangledName = decl.getNameAsString(); + } else { + llvm::raw_string_ostream ostream(mangledName); + if (llvm::isa(&decl)) { + const auto *ctor = static_cast(&decl); + mangleContext->mangleCtorBlock(ctor, static_cast(0), nullptr, ostream); + } else if (llvm::isa(&decl)) { + const auto *dtor = static_cast(&decl); + mangleContext->mangleDtorBlock(dtor, static_cast(0), nullptr, ostream); + } else { + mangleContext->mangleName(&decl, ostream); + } + ostream.flush(); + } + return mangledName; +} + +Loc LibAstFile::GetStmtLOC(const clang::Stmt &stmt) const { + return GetLOC(stmt.getBeginLoc()); +} + +Loc LibAstFile::GetExprLOC(const clang::Expr &expr) const { + return GetLOC(expr.getExprLoc()); +} + +Loc LibAstFile::GetLOC(const clang::SourceLocation &srcLoc) const { + clang::PresumedLoc pLoc = astContext->getSourceManager().getPresumedLoc(srcLoc); + if (pLoc.isInvalid()) { + return {0, 0, 0}; + } + if (srcLoc.isFileID()) { + std::string fileName = pLoc.getFilename(); + if (fileName.empty()) { + return {0, 0, 0}; + } + unsigned line = pLoc.getLine(); + unsigned colunm = pLoc.getColumn(); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fileName); + for (const auto &info : FEManager::GetModule().GetSrcFileInfo()) { + if (info.first == strIdx) { + return {info.second, static_cast(line), static_cast(colunm)}; + } + } + if (FEManager::GetModule().GetSrcFileInfo().empty()) { + // src files start from 2, 1 is mpl file + FEManager::GetModule().PushbackFileInfo(MIRInfoPair(strIdx, 2)); + return {2, static_cast(line), static_cast(colunm)}; + } else { + auto last = FEManager::GetModule().GetSrcFileInfo().rbegin(); + FEManager::GetModule().PushbackFileInfo(MIRInfoPair(strIdx, last->second + 1)); + return {last->second + 1, static_cast(line), static_cast(colunm)}; + } + } else { // For macro line: The expansion location is the line in the source code where the macro was expanded + return GetLOC(astContext->getSourceManager().getExpansionLoc(srcLoc)); + } +} + +uint32 LibAstFile::GetMaxAlign(const clang::Decl &decl) const { + uint32 align = 0; + const clang::Decl *canonicalDecl = decl.getCanonicalDecl(); + if (canonicalDecl->getKind() == clang::Decl::Field) { + const clang::FieldDecl *fieldDecl = llvm::cast(canonicalDecl); + clang::QualType qualTy = fieldDecl->getType().getCanonicalType(); + align = RetrieveAggTypeAlign(qualTy.getTypePtr()); + } + uint32 selfAlign = canonicalDecl->getMaxAlignment(); + return align > selfAlign ? align : selfAlign; +} + +uint32 LibAstFile::RetrieveAggTypeAlign(const clang::Type *ty) const { + ASSERT_NOT_NULL(ty); + if (ty->isRecordType()) { + const auto *recordType = llvm::cast(ty); + clang::RecordDecl *recordDecl = recordType->getDecl(); + return (recordDecl->getMaxAlignment()) >> 3; // 8 bit = 2^3 bit = 1 byte + } else if (ty->isArrayType()) { + const clang::Type *elemType = ty->getArrayElementTypeNoTypeQual(); + return RetrieveAggTypeAlign(elemType); + } + return 0; +} + +void LibAstFile::GetCVRAttrs(uint32_t qualifiers, GenericAttrs &genAttrs, bool isConst) const { + if (isConst && (qualifiers & clang::Qualifiers::Const) != 0) { + genAttrs.SetAttr(GENATTR_const); + } + if ((qualifiers & clang::Qualifiers::Restrict) != 0) { + genAttrs.SetAttr(GENATTR_restrict); + } + if ((qualifiers & clang::Qualifiers::Volatile) != 0) { + genAttrs.SetAttr(GENATTR_volatile); + } +} + +void LibAstFile::GetSClassAttrs(const clang::StorageClass storageClass, GenericAttrs &genAttrs) const { + switch (storageClass) { + case clang::SC_Extern: + case clang::SC_PrivateExtern: + genAttrs.SetAttr(GENATTR_extern); + break; + case clang::SC_Static: + genAttrs.SetAttr(GENATTR_static); + break; + default: + break; + } +} + +void LibAstFile::GetStorageAttrs(const clang::NamedDecl &decl, GenericAttrs &genAttrs) const { + switch (decl.getKind()) { + case clang::Decl::Function: + case clang::Decl::CXXMethod: { + const auto *funcDecl = llvm::cast(&decl); + const clang::StorageClass storageClass = funcDecl->getStorageClass(); + GetSClassAttrs(storageClass, genAttrs); + // static or extern maybe missing in current FunctionDecls, + // Since a given function can be declared several times in a program, + // Only one of those FunctionDecls will be found when traversing the list of declarations in the context. + const clang::FunctionDecl *prev = funcDecl->getPreviousDecl(); + while (prev != nullptr && prev->isDefined()) { + GetStorageAttrs(*prev, genAttrs); + prev = prev->getPreviousDecl(); + } + break; + } + case clang::Decl::ParmVar: + case clang::Decl::Var: { + const auto *varDecl = llvm::cast(&decl); + const clang::StorageClass storageClass = varDecl->getStorageClass(); + GetSClassAttrs(storageClass, genAttrs); + break; + } + case clang::Decl::Field: + default: + break; + } + return; +} + +void LibAstFile::GetAccessAttrs(AccessKind access, GenericAttrs &genAttrs) const { + switch (access) { + case kPublic: + genAttrs.SetAttr(GENATTR_public); + break; + case kProtected: + genAttrs.SetAttr(GENATTR_protected); + break; + case kPrivate: + genAttrs.SetAttr(GENATTR_private); + break; + case kNone: + break; + default: + ASSERT(false, "shouldn't reach here"); + break; + } + return; +} + +void LibAstFile::GetQualAttrs(const clang::NamedDecl &decl, GenericAttrs &genAttrs) const { + switch (decl.getKind()) { + case clang::Decl::Function: + case clang::Decl::CXXMethod: + case clang::Decl::ParmVar: + case clang::Decl::Var: + case clang::Decl::Field: { + const auto *valueDecl = llvm::dyn_cast(&decl); + ASSERT(valueDecl != nullptr, "ERROR:null pointer!"); + const clang::QualType qualType = valueDecl->getType(); + uint32_t qualifiers = qualType.getCVRQualifiers(); + GetCVRAttrs(qualifiers, genAttrs); + break; + } + default: + break; + } +} + +void LibAstFile::GetQualAttrs(const clang::QualType &qualType, GenericAttrs &genAttrs, bool isSourceType) const { + uint32_t qualifiers = qualType.getCVRQualifiers(); + GetCVRAttrs(qualifiers, genAttrs, isSourceType); +} + +void LibAstFile::CollectAttrs(const clang::NamedDecl &decl, GenericAttrs &genAttrs, AccessKind access) const { + GetStorageAttrs(decl, genAttrs); + GetAccessAttrs(access, genAttrs); + GetQualAttrs(decl, genAttrs); + if (decl.isImplicit()) { + genAttrs.SetAttr(GENATTR_implicit); + } + if (decl.isUsed()) { + genAttrs.SetAttr(GENATTR_used); + } + if (decl.hasAttr()) { + genAttrs.SetAttr(GENATTR_weak); + } + if (decl.hasAttr() && decl.getKind() != clang::Decl::Function) { + for (const auto *nonNull : decl.specific_attrs()) { + if (nonNull->args_size() > 0) { + // nonnull with args in function type pointers need special handling to mark nonnull arg + continue; + } + genAttrs.SetAttr(GENATTR_nonnull); + } + } +} + +void LibAstFile::CollectFuncReturnVarAttrs(const clang::CallExpr &expr, GenericAttrs &genAttrs) const { + if (LibAstFile::IsOneElementVector(expr.getCallReturnType(*astContext))) { + genAttrs.SetAttr(GenericAttrKind::GENATTR_oneelem_simd); + } +} + +void LibAstFile::CollectFuncAttrs(const clang::FunctionDecl &decl, GenericAttrs &genAttrs, AccessKind access) const { + CollectAttrs(decl, genAttrs, access); + if (decl.isVirtualAsWritten()) { + genAttrs.SetAttr(GENATTR_virtual); + } + if (decl.isDeletedAsWritten()) { + genAttrs.SetAttr(GENATTR_delete); + } + if (decl.isPure()) { + genAttrs.SetAttr(GENATTR_pure); + } + if (decl.isInlineSpecified()) { + genAttrs.SetAttr(GENATTR_inline); + } else if (decl.hasAttr()) { + genAttrs.SetAttr(GENATTR_noinline); + } + if (decl.hasAttr()) { + genAttrs.SetAttr(GENATTR_always_inline); + } + if (decl.hasAttr()) { + genAttrs.SetAttr(GENATTR_gnu_inline); + } + if (decl.isDefaulted()) { + genAttrs.SetAttr(GENATTR_default); + } + if (decl.getKind() == clang::Decl::CXXConstructor) { + genAttrs.SetAttr(GENATTR_constructor); + } + if (decl.getKind() == clang::Decl::CXXDestructor) { + genAttrs.SetAttr(GENATTR_destructor); + } + if (decl.isVariadic()) { + genAttrs.SetAttr(GENATTR_varargs); + } + if (decl.isNoReturn()) { + genAttrs.SetAttr(GENATTR_noreturn); + } + clang::AliasAttr *aliasAttr = decl.getAttr(); + if (aliasAttr != nullptr) { + genAttrs.SetAttr(GENATTR_alias); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(aliasAttr->getAliasee().str()); + genAttrs.InsertStrIdxContentMap(GENATTR_alias, strIdx); + } + clang::ConstructorAttr *constructorAttr = decl.getAttr(); + if (constructorAttr != nullptr) { + genAttrs.SetAttr(GENATTR_constructor_priority); + genAttrs.InsertIntContentMap(GENATTR_constructor_priority, constructorAttr->getPriority()); + } + clang::DestructorAttr *destructorAttr = decl.getAttr(); + if (destructorAttr != nullptr) { + genAttrs.SetAttr(GENATTR_destructor_priority); + genAttrs.InsertIntContentMap(GENATTR_destructor_priority, destructorAttr->getPriority()); + } + // one element vector type in rettype + if (LibAstFile::IsOneElementVector(decl.getReturnType())) { + genAttrs.SetAttr(GENATTR_oneelem_simd); + } + if (FEOptions::GetInstance().IsEnableSafeRegion()) { + if (decl.getSafeSpecifier() == clang::SS_Unsafe) { + genAttrs.SetAttr(GENATTR_unsafed); + } else if (decl.getSafeSpecifier() == clang::SS_Safe || FEOptions::GetInstance().IsDefaultSafe()) { + genAttrs.SetAttr(GENATTR_safed); + } + } + // If a non-static function defined with inline has non-static and non-inline function declaration, it should be + // an externally visible function. + if (decl.isThisDeclarationADefinition() && genAttrs.GetAttr(GENATTR_inline) && !genAttrs.GetAttr(GENATTR_static)) { + bool isExternallyVisible = false; + for (const clang::FunctionDecl *funcDecl : decl.redecls()) { + if (!funcDecl->isThisDeclarationADefinition() && !funcDecl->isInlineSpecified()) { + isExternallyVisible = true; + break; + } + } + if (isExternallyVisible) { + genAttrs.SetAttr(GENATTR_extern); + } + } + // If a function is defined with attrinute 'gnu_inline' but without 'extern', the 'extern' from function declarations + // should be ignored. + if (decl.isThisDeclarationADefinition() && genAttrs.GetAttr(GENATTR_gnu_inline) && + (decl.getStorageClass() != clang::SC_Extern) && (decl.getStorageClass() != clang::SC_PrivateExtern) && + genAttrs.GetAttr(GENATTR_extern)) { + genAttrs.ResetAttr(GENATTR_extern); + } + CheckUnsupportedFuncAttrs(decl); +} + +void LibAstFile::CheckUnsupportedFuncAttrs(const clang::FunctionDecl &decl) const { + if (!decl.hasAttrs()) { + return; + } + std::string unsupportedFuncAttrs = ""; + const clang::AttrVec &funcAttrs = decl.getAttrs(); + for (const auto *attr : funcAttrs) { + clang::attr::Kind attrKind = attr->getKind(); + auto iterator = kUnsupportedFuncAttrsMap.find(attrKind); + if (iterator != kUnsupportedFuncAttrsMap.end()) { + unsupportedFuncAttrs += iterator->second + " "; + } + } + CHECK_FATAL(unsupportedFuncAttrs.empty(), "%s:%d error: The function %s has unsupported attribute(s): %s", + FEManager::GetModule().GetFileNameFromFileNum(GetLOC(decl.getLocation()).fileIdx).c_str(), + GetLOC(decl.getLocation()).line, + GetMangledName(decl).c_str(), + unsupportedFuncAttrs.c_str()); +} + +void LibAstFile::CollectVarAttrs(const clang::VarDecl &decl, GenericAttrs &genAttrs, AccessKind access) const { + CollectAttrs(decl, genAttrs, access); + // handle __thread + if (decl.getTLSKind() == clang::VarDecl::TLS_Static) { + genAttrs.SetAttr(GENATTR_tls_static); + } else if (decl.getTLSKind() == clang::VarDecl::TLS_Dynamic) { + genAttrs.SetAttr(GENATTR_tls_dynamic); + } + // one elem vector type + if (IsOneElementVector(decl.getType())) { + genAttrs.SetAttr(GENATTR_oneelem_simd); + } + CheckUnsupportedVarAttrs(decl); +} + +void LibAstFile::CheckUnsupportedVarAttrs(const clang::VarDecl &decl) const { + if (!decl.hasAttrs()) { + return; + } + std::string unsupportedVarAttrs = ""; + const clang::AttrVec &varAttrs = decl.getAttrs(); + for (const auto *attr : varAttrs) { + clang::attr::Kind attrKind = attr->getKind(); + auto iterator = kUnsupportedVarAttrsMap.find(attrKind); + if (iterator != kUnsupportedVarAttrsMap.end()) { + unsupportedVarAttrs += iterator->second + " "; + } + } + CHECK_FATAL(unsupportedVarAttrs.empty(), "%s:%d error: The variable %s has unsupported attribute(s): %s", + FEManager::GetModule().GetFileNameFromFileNum(GetLOC(decl.getLocation()).fileIdx).c_str(), + GetLOC(decl.getLocation()).line, + GetMangledName(decl).c_str(), + unsupportedVarAttrs.c_str()); +} + +void LibAstFile::CollectRecordAttrs(const clang::RecordDecl &decl, GenericAttrs &genAttrs) const { + clang::PackedAttr *packedAttr = decl.getAttr(); + if (packedAttr != nullptr) { + genAttrs.SetAttr(GENATTR_pack); + genAttrs.InsertIntContentMap(GENATTR_pack, 1); // 1 byte + } + clang::MaxFieldAlignmentAttr *maxFieldAlignAttr = decl.getAttr(); + if (maxFieldAlignAttr != nullptr) { + genAttrs.SetAttr(GENATTR_pack); + int value = static_cast(maxFieldAlignAttr->getAlignment() / 8); // bits to byte + genAttrs.InsertIntContentMap(GENATTR_pack, value); + } + CheckUnsupportedTypeAttrs(decl); +} + +void LibAstFile::CheckUnsupportedTypeAttrs(const clang::RecordDecl &decl) const { + if (!decl.hasAttrs()) { + return; + } + std::string unsupportedTypeAttrs = ""; + const clang::AttrVec &typeAttrs = decl.getAttrs(); + for (const auto *attr : typeAttrs) { + clang::attr::Kind attrKind = attr->getKind(); + auto iterator = kUnsupportedTypeAttrsMap.find(attrKind); + if (iterator != kUnsupportedTypeAttrsMap.end()) { + unsupportedTypeAttrs += iterator->second + " "; + } + } + CHECK_FATAL(unsupportedTypeAttrs.empty(), "%s:%d error: struct or union %s has unsupported type attribute(s): %s", + FEManager::GetModule().GetFileNameFromFileNum(GetLOC(decl.getLocation()).fileIdx).c_str(), + GetLOC(decl.getLocation()).line, + GetMangledName(decl).c_str(), + unsupportedTypeAttrs.c_str()); +} + +void LibAstFile::CollectFieldAttrs(const clang::FieldDecl &decl, GenericAttrs &genAttrs, AccessKind access) const { + CollectAttrs(decl, genAttrs, access); + clang::PackedAttr *packedAttr = decl.getAttr(); + if (packedAttr != nullptr) { + genAttrs.SetAttr(GENATTR_pack); + genAttrs.InsertIntContentMap(GENATTR_pack, 1); // 1 byte + } +} + +void LibAstFile::EmitTypeName(const clang::QualType qualType, std::stringstream &ss) { + switch (qualType->getTypeClass()) { + case clang::Type::LValueReference: { + ss << "R"; + const clang::QualType pointeeType = qualType->castAs()->getPointeeType(); + EmitTypeName(pointeeType, ss); + break; + } + case clang::Type::Pointer: { + ss << "P"; + const clang::QualType pointeeType = qualType->castAs()->getPointeeType(); + EmitTypeName(pointeeType, ss); + break; + } + case clang::Type::Record: { + EmitTypeName(*qualType->getAs(), ss); + break; + } + default: { + EmitQualifierName(qualType, ss); + MIRType *type = CvtType(qualType); + ss << ASTUtil::GetTypeString(*type); + break; + } + } +} + +void LibAstFile::EmitQualifierName(const clang::QualType qualType, std::stringstream &ss) const { + uint32_t cvrQual = qualType.getCVRQualifiers(); + if ((cvrQual & clang::Qualifiers::Const) != 0) { + ss << "K"; + } + if ((cvrQual & clang::Qualifiers::Volatile) != 0) { + ss << "U"; + } +} + +const std::string LibAstFile::GetOrCreateMappedUnnamedName(const clang::Decl &decl) { + uint32 uid; + if (FEOptions::GetInstance().GetFuncInlineSize() != 0 && !decl.getLocation().isMacroID()) { + // use loc as key for wpaa mode + Loc l = GetLOC(decl.getLocation()); + CHECK_FATAL(l.fileIdx != 0, "loc is invaild"); + std::map::const_iterator itLoc = unnamedLocMap.find(l); + if (itLoc == unnamedLocMap.cend()) { + uid = FEUtils::GetSequentialNumber(); + unnamedLocMap[l] = uid; + } else { + uid = itLoc->second; + } + return FEUtils::GetSequentialName0("unnamed.", uid); + } + std::map::const_iterator it = unnamedSymbolMap.find(decl.getID()); + if (it == unnamedSymbolMap.cend()) { + uid = FEUtils::GetSequentialNumber(); + unnamedSymbolMap[decl.getID()] = uid; + } else { + uid = it->second; + } + return FEUtils::GetSequentialName0("unnamed.", uid); +} + +const std::string LibAstFile::GetDeclName(const clang::NamedDecl &decl, bool isRename) { + std::string name = decl.getNameAsString(); + if (name.empty()) { + name = GetOrCreateMappedUnnamedName(decl); + } + if (isRename && !decl.isDefinedOutsideFunctionOrMethod()) { + Loc l = GetLOC(decl.getLocation()); + std::stringstream ss; + ss << name << "_" << l.line << "_" << l.column; + name = ss.str(); + } + return name; +} + +void LibAstFile::EmitTypeName(const clang::RecordType &recordType, std::stringstream &ss) { + clang::RecordDecl *recordDecl = recordType.getDecl(); + std::string str = recordType.desugar().getAsString(); + if (!recordDecl->isAnonymousStructOrUnion() && str.find("anonymous") == std::string::npos) { + clang::DeclContext *ctx = recordDecl->getDeclContext(); + MapleStack nsStack(module->GetMPAllocator().Adapter()); + while (!ctx->isTranslationUnit()) { + auto *primCtxNsDc = llvm::dyn_cast(ctx->getPrimaryContext()); + if (primCtxNsDc != nullptr) { + nsStack.push(primCtxNsDc); + } + auto *primCtxRecoDc = llvm::dyn_cast(ctx->getPrimaryContext()); + if (primCtxRecoDc != nullptr) { + nsStack.push(primCtxRecoDc); + } + ctx = ctx->getParent(); + } + while (!nsStack.empty()) { + auto *nsDc = llvm::dyn_cast(nsStack.top()); + if (nsDc != nullptr) { + ss << nsDc->getName().data() << "|"; + } + auto *rcDc = llvm::dyn_cast(nsStack.top()); + if (rcDc != nullptr) { + EmitTypeName(*rcDc->getTypeForDecl()->getAs(), ss); + } + nsStack.pop(); + } + auto nameStr = recordDecl->getName().str(); + if (nameStr.empty()) { + nameStr = GetTypedefNameFromUnnamedStruct(*recordDecl); + } + if (nameStr.empty()) { + nameStr = GetOrCreateMappedUnnamedName(*recordDecl); + } + ss << nameStr; + } else { + ss << GetOrCreateMappedUnnamedName(*recordDecl); + } + if (FEOptions::GetInstance().GetFuncInlineSize() != 0) { + std::string recordStr = recordDecl->getDefinition() == nullptr ? "" : GetRecordLayoutString(*recordDecl); + std::string filename = astContext->getSourceManager().getFilename(recordDecl->getLocation()).str(); + ss << FEUtils::GetFileNameHashStr(filename + recordStr); + } + CHECK_FATAL(ss.rdbuf()->in_avail() != 0, "stringstream is empty"); +} + +std::string LibAstFile::GetRecordLayoutString(const clang::RecordDecl &recordDecl) { + std::stringstream recordLayoutStr; + const clang::ASTRecordLayout &recordLayout = GetContext()->getASTRecordLayout(&recordDecl); + unsigned int fieldCount = recordLayout.getFieldCount(); + uint64_t recordSize = static_cast(recordLayout.getSize().getQuantity()); + recordLayoutStr << std::to_string(fieldCount) << std::to_string(recordSize); + clang::RecordDecl::field_iterator it = recordDecl.field_begin(); + for (unsigned i = 0, e = recordLayout.getFieldCount(); i != e; ++i, ++it) { + const clang::FieldDecl *fieldDecl = *it; + recordLayoutStr << std::to_string(recordLayout.getFieldOffset(i)); + std::string fieldName = GetMangledName(*fieldDecl); + if (fieldName.empty()) { + fieldName = GetOrCreateMappedUnnamedName(*fieldDecl); + } + recordLayoutStr << fieldName; + } + return recordLayoutStr.str(); +} + +// get TypedefDecl name for the unnamed struct, e.g. typedef struct {} foo; +std::string LibAstFile::GetTypedefNameFromUnnamedStruct(const clang::RecordDecl &recoDecl) const { + // typedef is parsed in debug mode + if (FEOptions::GetInstance().IsDbgFriendly()) { + return std::string(); + } + auto *defnameDcel = recoDecl.getTypedefNameForAnonDecl(); + if (defnameDcel != nullptr) { + return defnameDcel->getQualifiedNameAsString(); + } + return std::string(); +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/clang/lib/ast_interface.h b/src/hir2mpl/ast_input/clang/lib/ast_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..a2476c06cac6a311efc4fcc3f5887b623531b1d6 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/lib/ast_interface.h @@ -0,0 +1,134 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_FILE_INCLUDE_AST_INTERFACE_H +#define HIR2MPL_AST_FILE_INCLUDE_AST_INTERFACE_H +#include +#include "ast_alias.h" +#include "mir_type.h" +#include "mir_nodes.h" +#include "mpl_logging.h" +#include "generic_attrs.h" +#include "fe_utils.h" +#include "clang/Basic/AttrKinds.h" + +namespace maple { +enum AccessKind { + kPublic, + kProtected, + kPrivate, + kNone +}; + +class LibAstFile { + public: + explicit LibAstFile(MapleAllocator &allocatorIn, MapleList &recordDeclesIn, + MapleList &enumDeclesIn) + : recordDeclSet(allocatorIn.Adapter()), + unnamedSymbolMap(allocatorIn.Adapter()), compoundLiteralExprInitSymbolMap(allocatorIn.Adapter()), + recordDecles(recordDeclesIn), enumDecles(enumDeclesIn), astFileName("", allocatorIn.GetMemPool()) {} + ~LibAstFile() = default; + + bool Open(const MapleString &fileName, + int excludeDeclFromPCH, int displayDiagnostics); + void DisposeTranslationUnit(); + const AstASTContext *GetAstContext() const; + AstASTContext *GetNonConstAstContext() const; + const AstUnitDecl *GetAstUnitDecl() const; + std::string GetMangledName(const clang::NamedDecl &decl) const; + const std::string GetOrCreateMappedUnnamedName(const clang::Decl &decl); + const std::string GetDeclName(const clang::NamedDecl &decl, bool isRename = false); + void EmitTypeName(const clang::QualType qualType, std::stringstream &ss); + void EmitTypeName(const clang::RecordType &recordType, std::stringstream &ss); + void EmitQualifierName(const clang::QualType qualType, std::stringstream &ss) const; + std::string GetTypedefNameFromUnnamedStruct(const clang::RecordDecl &recoDecl) const; + std::string GetRecordLayoutString(const clang::RecordDecl &recordDecl); + void CollectBaseEltTypeAndSizesFromConstArrayDecl(const clang::QualType &currQualType, MIRType *&elemType, + TypeAttrs &elemAttr, std::vector &operands, + bool isSourceType); + void CollectBaseEltTypeAndDimFromVariaArrayDecl(const clang::QualType &currQualType, MIRType *&elemType, + TypeAttrs &elemAttr, uint8_t &dim, bool isSourceType); + void CollectBaseEltTypeAndDimFromDependentSizedArrayDecl(const clang::QualType currQualType, MIRType *&elemType, + TypeAttrs &elemAttr, std::vector &operands, + bool isSourceType); + void CollectBaseEltTypeFromArrayDecl(const clang::QualType &currQualType, MIRType *&elemType, TypeAttrs &elemAttr, + bool isSourceType = false); + void GetCVRAttrs(uint32_t qualifiers, GenericAttrs &genAttrs, bool isConst = true) const; + void GetSClassAttrs(const clang::StorageClass storageClass, GenericAttrs &genAttrs) const; + void GetStorageAttrs(const clang::NamedDecl &decl, GenericAttrs &genAttrs) const; + void GetAccessAttrs(AccessKind access, GenericAttrs &genAttrs) const; + void GetQualAttrs(const clang::NamedDecl &decl, GenericAttrs &genAttrs) const; + void GetQualAttrs(const clang::QualType &qualType, GenericAttrs &genAttrs, bool isSourceType) const; + void CollectAttrs(const clang::NamedDecl &decl, GenericAttrs &genAttrs, AccessKind access) const; + void CollectFuncAttrs(const clang::FunctionDecl &decl, GenericAttrs &genAttrs, AccessKind access) const; + void CollectFuncReturnVarAttrs(const clang::CallExpr &expr, GenericAttrs &genAttrs) const; + void CheckUnsupportedFuncAttrs(const clang::FunctionDecl &decl) const; + void CollectVarAttrs(const clang::VarDecl &decl, GenericAttrs &genAttrs, AccessKind access) const; + void CheckUnsupportedVarAttrs(const clang::VarDecl &decl) const; + void CollectRecordAttrs(const clang::RecordDecl &decl, GenericAttrs &genAttrs) const; + void CheckUnsupportedTypeAttrs(const clang::RecordDecl &decl) const; + void CollectFieldAttrs(const clang::FieldDecl &decl, GenericAttrs &genAttrs, AccessKind access) const; + MIRType *CvtPrimType(const clang::QualType qualType, bool isSourceType = false) const; + PrimType CvtPrimType(const clang::BuiltinType::Kind kind, bool isSourceType) const; + MIRType *CvtPrimType2SourceType(const clang::BuiltinType::Kind kind) const; + MIRType *CvtSourceType(const clang::QualType qualType); + MIRType *CvtType(const clang::QualType qualType, bool isSourceType = false); + MIRType *CvtOtherType(const clang::QualType srcType, bool isSourceType); + MIRType *CvtArrayType(const clang::QualType &srcType, bool isSourceType); + MIRType *CvtFunctionType(const clang::QualType srcType, bool isSourceType); + MIRType *CvtEnumType(const clang::QualType &qualType, bool isSourceType); + MIRType *CvtRecordType(const clang::QualType qualType); + MIRType *CvtFieldType(const clang::NamedDecl &decl); + MIRType *CvtComplexType(const clang::QualType srcType) const; + MIRType *CvtVectorType(const clang::QualType srcType); + MIRType *CvtTypedef(const clang::QualType &qualType); + MIRType *CvtTypedefDecl(const clang::TypedefNameDecl &typedefDecl); + bool TypeHasMayAlias(const clang::QualType srcType) const; + static bool IsOneElementVector(const clang::QualType &qualType); + static bool IsOneElementVector(const clang::Type &type); + + const clang::ASTContext *GetContext() const { + return astContext; + } + + const std::string GetAstFileNameHashStr() const { + std::string fileName = (astFileName.c_str() == nullptr ? "" : astFileName.c_str()); + return FEUtils::GetFileNameHashStr(fileName); + } + + Loc GetStmtLOC(const clang::Stmt &stmt) const; + Loc GetExprLOC(const clang::Expr &expr) const; + Loc GetLOC(const clang::SourceLocation &srcLoc) const; + uint32 GetMaxAlign(const clang::Decl &decl) const; + uint32 RetrieveAggTypeAlign(const clang::Type *ty) const; + + private: + MapleSet recordDeclSet; + MapleMap unnamedSymbolMap; + MapleMap compoundLiteralExprInitSymbolMap; + MIRModule *module = nullptr; + + MapleList &recordDecles; + MapleList &enumDecles; + + clang::ASTContext *astContext = nullptr; + clang::TranslationUnitDecl *astUnitDecl = nullptr; + clang::MangleContext *mangleContext = nullptr; + CXTranslationUnit translationUnit = nullptr; + CXIndex index = nullptr; + MapleString astFileName; + static std::map unnamedLocMap; +}; +} // namespace maple +#endif // HIR2MPL_AST_FILE_INCLUDE_AST_INTERFACE_H diff --git a/src/hir2mpl/ast_input/clang/lib/ast_macros.h b/src/hir2mpl/ast_input/clang/lib/ast_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..87e9f0555863d0b837913de7ddd3ac6b6e82a646 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/lib/ast_macros.h @@ -0,0 +1,260 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef AST2MPL_INCLUDE_ASTMACROS_H +#define AST2MPL_INCLUDE_ASTMACROS_H +#include + +namespace maple { +const uint32_t kSrcFileNum = 2; + +// ast2mpl options +// these values can be use such as -o=1 at command lines +const uint32_t kCheckAssertion = 1; +const uint32_t kNoComment = 2; +const uint32_t kNoLoc = 4; + +// these values can be use such as -d=3 at command lines +const int kDebugLevelZero = 0; +const int kDebugLevelOne = 1; +const int kDebugLevelTwo = 2; +const int kDebugLevelThree = 3; + +const int kComplexRealID = 1; +const int kComplexImagID = 2; + +const int kBitToByteShift = 3; + +const uint32_t kFloat128Size = 2; +const uint32_t kInt32Width = 32; +const uint32_t kInt32Mask = 0xFFFFFFFFULL; +const int kDefaultIndent = 1; + +#define ASTDEBUG +#ifdef ASTDEBUG +#define LOCATION __func__ << "() at " << __FILE__ << ":" << __LINE__ + +#define DUMPINFO(stmtClass, s) \ + do { \ + if (maple::ast2mplDebug > kDebugLevelOne) { \ + std::cout << LOCATION << '\n'; \ + s->dump(); \ + std::cout << " " << '\n'; \ + } \ + } while (0) + +#define NOTYETHANDLED(s) \ + do { \ + std::cout << "\n" << LOCATION << " <<<<<<<<<<<<<<<<<< Not Yet Handled: " << s << "<<<<<<<<<<<<<<<<<<" << '\n'; \ + if ((maple::ast2mplOption & kCheckAssertion) != 0) { \ + ASSERT(false, "Not yet handled"); \ + } \ + } while (0) + +// print empty line +#define DEBUGPRINT_N(n) \ + do { \ + if (maple::ast2mplDebug >= n) { \ + std::cout << " " << '\n'; \ + } \ + } while (0) + +// print indent +#define DEBUGPRINTIND(n) \ + do { \ + if (maple::ast2mplDebug > kDebugLevelZero) { \ + PrintIndentation(n); \ + } \ + } while (0) + +// print str +#define DEBUGPRINT_S_LEVEL(str, level) \ + do { \ + if (maple::ast2mplDebug >= level) { \ + PrintIndentation(ast2mplDebugIndent); \ + std::cout << " " << str << '\n'; \ + } \ + } while (0) + +#define DEBUGPRINT_FUNC(name) \ + do { \ + int ind = maple::ast2mplDebugIndent; \ + Util::SetIndent(kDefaultIndent); \ + if (maple::ast2mplDebug > kDebugLevelZero) { \ + PrintIndentation(ast2mplDebugIndent); \ + std::cout << name << " {" << '\n'; \ + } \ + Util::SetIndent(ind); \ + } while (0) + +#define DEBUGPRINT_FUNC_END(name) \ + do { \ + int ind = maple::ast2mplDebugIndent; \ + Util::SetIndent(kDefaultIndent); \ + if (maple::ast2mplDebug > kDebugLevelZero) { \ + PrintIndentation(ast2mplDebugIndent); \ + std::cout << "}\n" << '\n'; \ + } \ + Util::SetIndent(ind); \ + } while (0) + +#define DEBUGPRINT_NODE(node, type) \ + do { \ + if (maple::ast2mplDebug > kDebugLevelOne) { \ + PrintIndentation(maple::ast2mplDebugIndent); \ + std::cout << " >> node: "; \ + static_cast(node)->Print(static_cast(module), 0); \ + std::cout << "\n"; \ + } \ + } while (0) + +// print var = val +#define DEBUGPRINT_V_LEVEL(var, level) \ + do { \ + if (maple::ast2mplDebug >= level) { \ + PrintIndentation(maple::ast2mplDebugIndent); \ + std::cout << LOCATION << " " << #var << " = " << var << '\n'; \ + } \ + } while (0) + +#define DEBUGPRINT_X_LEVEL(var, level) \ + do { \ + if (maple::ast2mplDebug >= level) { \ + PrintIndentation(maple::ast2mplDebugIndent); \ + std::cout << LOCATION << " " << #var << " = " << std::hex << "0x" << var << std::dec << '\n'; \ + } \ + } while (0) + +// print var = val +#define DEBUGPRINT_V_LEVEL_PURE(var, level) \ + do { \ + if (maple::ast2mplDebug >= level) { \ + PrintIndentation(maple::ast2mplDebugIndent); \ + std::cout << #var << " = " << var << '\n'; \ + } \ + } while (0) + +// print val0 val1 +#define DEBUGPRINT_NN_LEVEL(var0, var1, level) \ + do { \ + if (maple::ast2mplDebug >= level) { \ + PrintIndentation(maple::ast2mplDebugIndent); \ + std::cout << var0 << " " << var1; \ + } \ + } while (0) + +// print var0 = val0, var1 = val1 +#define DEBUGPRINT_VV_LEVEL(var0, var1, level) \ + do { \ + if (maple::ast2mplDebug >= level) { \ + PrintIndentation(maple::ast2mplDebugIndent); \ + std::cout << LOCATION << " " << #var0 << " = " << var0 << ", " << #var1 << " = " << var1 << '\n'; \ + } \ + } while (0) + +// print val0, var = val +#define DEBUGPRINT_SV_LEVEL(val0, var, level) \ + do { \ + if (maple::ast2mplDebug >= level) { \ + PrintIndentation(maple::ast2mplDebugIndent); \ + std::cout << LOCATION << " " << val0 << ", " << #var << " = " << var << '\n'; \ + } \ + } while (0) + +#define DEBUGPRINT_SX_LEVEL(val0, var, level) \ + do { \ + if (maple::ast2mplDebug >= level) { \ + PrintIndentation(maple::ast2mplDebugIndent); \ + std::cout << LOCATION << " " << val0 << ", " << #var << " = " << std::hex << "0x" << var << std::dec \ + << '\n'; \ + } \ + } while (0) +#else + +#define DUMPINFO(stmtClass, s) +#define NOTHANDLED +#define DEBUGPRINT_N(n) +#define DEBUGPRINTIND(n) +#define DEBUGPRINT_S_LEVEL(str, level) +#define DEBUGPRINT_FUNC(name) +#define DEBUGPRINT_FUNC_END(name) +#define DEBUGPRINT_NODE(node, type) +#define DEBUGPRINT_V_LEVEL(var, level) +#define DEBUGPRINT_X_LEVEL(var, level) +#define DEBUGPRINT_V_LEVEL_PURE(var, level) +#define DEBUGPRINT_NN_LEVEL(var0, var1, level) +#define DEBUGPRINT_VV_LEVEL(var0, var1, level) +#define DEBUGPRINT_SV_LEVEL(val0, var, level) +#endif + +#define DEBUGPRINT00 DEBUGPRINT_N(0) +#define DEBUGPRINT01 DEBUGPRINT_N(1) +#define DEBUGPRINT02 DEBUGPRINT_N(2) +#define DEBUGPRINT03 DEBUGPRINT_N(3) + +#define DEBUGPRINT_S(var) DEBUGPRINT_S_LEVEL(var, 1) +#define DEBUGPRINT_S2(var) DEBUGPRINT_S_LEVEL(var, 2) +#define DEBUGPRINT_S3(var) DEBUGPRINT_S_LEVEL(var, 3) +#define DEBUGPRINT_S4(var) DEBUGPRINT_S_LEVEL(var, 4) +#define DEBUGPRINT_S5(var) DEBUGPRINT_S_LEVEL(var, 5) +#define DEBUGPRINT_S6(var) DEBUGPRINT_S_LEVEL(var, 6) + +#define DEBUGPRINT0(var) DEBUGPRINT_V_LEVEL(var, 0) +#define DEBUGPRINT(var) DEBUGPRINT_V_LEVEL(var, 1) +#define DEBUGPRINT2(var) DEBUGPRINT_V_LEVEL(var, 2) +#define DEBUGPRINT3(var) DEBUGPRINT_V_LEVEL(var, 3) +#define DEBUGPRINT4(var) DEBUGPRINT_V_LEVEL(var, 4) +#define DEBUGPRINT5(var) DEBUGPRINT_V_LEVEL(var, 5) +#define DEBUGPRINT6(var) DEBUGPRINT_V_LEVEL(var, 6) + +#define DEBUGPRINT_X(var) DEBUGPRINT_X_LEVEL(var, 1) +#define DEBUGPRINT_X2(var) DEBUGPRINT_X_LEVEL(var, 2) +#define DEBUGPRINT_X3(var) DEBUGPRINT_X_LEVEL(var, 3) +#define DEBUGPRINT_X4(var) DEBUGPRINT_X_LEVEL(var, 4) +#define DEBUGPRINT_X5(var) DEBUGPRINT_X_LEVEL(var, 5) +#define DEBUGPRINT_X6(var) DEBUGPRINT_X_LEVEL(var, 6) + +#define DEBUGPRINT_PURE(var) DEBUGPRINT_V_LEVEL_PURE(var, 1) +#define DEBUGPRINT2_PURE(var) DEBUGPRINT_V_LEVEL_PURE(var, 2) +#define DEBUGPRINT3_PURE(var) DEBUGPRINT_V_LEVEL_PURE(var, 3) +#define DEBUGPRINT4_PURE(var) DEBUGPRINT_V_LEVEL_PURE(var, 4) +#define DEBUGPRINT5_PURE(var) DEBUGPRINT_V_LEVEL_PURE(var, 5) +#define DEBUGPRINT6_PURE(var) DEBUGPRINT_V_LEVEL_PURE(var, 6) + +#define DEBUGPRINT_NN(var0, var1) DEBUGPRINT_NN_LEVEL(var0, var1, 1) +#define DEBUGPRINT_NN_2(var0, var1) DEBUGPRINT_NN_LEVEL(var0, var1, 2) +#define DEBUGPRINT_NN_3(var0, var1) DEBUGPRINT_NN_LEVEL(var0, var1, 3) + +#define DEBUGPRINT_VV(var0, var1) DEBUGPRINT_VV_LEVEL(var0, var1, 1) +#define DEBUGPRINT_VV_2(var0, var1) DEBUGPRINT_VV_LEVEL(var0, var1, 2) +#define DEBUGPRINT_VV_3(var0, var1) DEBUGPRINT_VV_LEVEL(var0, var1, 3) + +#define DEBUGPRINT_SV(var0, var) DEBUGPRINT_SV_LEVEL(var0, var, 1) +#define DEBUGPRINT_SV_2(var0, var) DEBUGPRINT_SV_LEVEL(var0, var, 2) +#define DEBUGPRINT_SV_3(var0, var) DEBUGPRINT_SV_LEVEL(var0, var, 3) + +#define DEBUGPRINT_SX(var0, var) DEBUGPRINT_SX_LEVEL(var0, var, 1) +#define DEBUGPRINT_SX_2(var0, var) DEBUGPRINT_SX_LEVEL(var0, var, 2) +#define DEBUGPRINT_SX_3(var0, var) DEBUGPRINT_SX_LEVEL(var0, var, 3) + +// A: module->fileinfo, B:"filename", +// C: stridx, D:module->fileinfo_isstring, E:true; +#define SET_INFO_PAIR(a, b, c, d, e) \ + do { \ + (a).emplace_back(builder->GetOrCreateStringIndex(b), c); \ + (d).emplace_back(e) \ + } while (0) +} // namespace maple +#endif // AST2MPL_INCLUDE_ASTMACROS_H diff --git a/src/hir2mpl/ast_input/clang/lib/ast_type.cpp b/src/hir2mpl/ast_input/clang/lib/ast_type.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5613458f5dcdcbce7373d487739e4773546aeca4 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/lib/ast_type.cpp @@ -0,0 +1,615 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_macros.h" +#include "ast_interface.h" +#include "ast_util.h" +#include "fe_manager.h" +#include "fe_options.h" +#include "driver_options.h" +#include "triple.h" + +namespace maple { +std::map LibAstFile::unnamedLocMap; +MIRType *LibAstFile::CvtPrimType(const clang::QualType qualType, bool isSourceType) const { + clang::QualType srcType = qualType.getCanonicalType(); + if (srcType.isNull()) { + return nullptr; + } + + MIRType *destType = nullptr; + if (llvm::isa(srcType)) { + const auto *builtinType = llvm::cast(srcType); + if (isSourceType) { + MIRType *sourceType = CvtPrimType2SourceType(builtinType->getKind()); + if (sourceType != nullptr) { + return sourceType; + } + } + PrimType primType = CvtPrimType(builtinType->getKind(), isSourceType); + destType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(primType); + } + return destType; +} + +MIRType *LibAstFile::CvtPrimType2SourceType(const clang::BuiltinType::Kind kind) const { + switch (kind) { + case clang::BuiltinType::ULong: + return FEManager::GetTypeManager().GetOrCreateTypeByNameType(kDbgULong); + case clang::BuiltinType::Long: + return FEManager::GetTypeManager().GetOrCreateTypeByNameType(kDbgLong); + case clang::BuiltinType::LongDouble: + return FEManager::GetTypeManager().GetOrCreateTypeByNameType(kDbgLongDouble); + default: + return nullptr; + } +} + +PrimType LibAstFile::CvtPrimType(const clang::BuiltinType::Kind kind, bool isSourceType) const { + switch (kind) { + case clang::BuiltinType::Bool: + return PTY_u1; + case clang::BuiltinType::Char_U: + return (FEOptions::GetInstance().IsUseSignedChar() || isSourceType) ? PTY_i8 : PTY_u8; + case clang::BuiltinType::UChar: + return PTY_u8; + case clang::BuiltinType::WChar_U: + return (FEOptions::GetInstance().IsUseSignedChar() || isSourceType) ? PTY_i16 : PTY_u16; + case clang::BuiltinType::UShort: + return PTY_u16; + case clang::BuiltinType::UInt: + return PTY_u32; + case clang::BuiltinType::ULong: + return Triple::GetTriple().GetEnvironment() == Triple::GNUILP32 ? PTY_u32 : PTY_u64; + case clang::BuiltinType::ULongLong: + return PTY_u64; + case clang::BuiltinType::UInt128: + return PTY_u128; + case clang::BuiltinType::Char_S: + case clang::BuiltinType::SChar: + return PTY_i8; + case clang::BuiltinType::WChar_S: + case clang::BuiltinType::Short: + case clang::BuiltinType::Char16: + return PTY_i16; + case clang::BuiltinType::Char32: + case clang::BuiltinType::Int: + return PTY_i32; + case clang::BuiltinType::Long: + return Triple::GetTriple().GetEnvironment() == Triple::GNUILP32 ? PTY_i32 : PTY_i64; + case clang::BuiltinType::LongLong: + return PTY_i64; + case clang::BuiltinType::Int128: + return PTY_i128; + case clang::BuiltinType::Float: + return PTY_f32; + case clang::BuiltinType::Double: + case clang::BuiltinType::LongDouble: + return PTY_f64; + case clang::BuiltinType::Float128: + return PTY_f64; + case clang::BuiltinType::NullPtr: // default 64-bit, need to update + return PTY_a64; + case clang::BuiltinType::Half: // PTY_f16, NOTYETHANDLED + case clang::BuiltinType::Float16: + CHECK_FATAL(false, "Float16 types not implemented yet"); + return PTY_void; + case clang::BuiltinType::Void: + default: + return PTY_void; + } +} + +bool LibAstFile::TypeHasMayAlias(const clang::QualType srcType) const { + auto *td = srcType->getAsTagDecl(); + if (td != nullptr && td->hasAttr()) { + return true; + } + + clang::QualType qualType = srcType; + while (auto *tt = qualType->getAs()) { + if (tt->getDecl()->hasAttr()) { + return true; + } + qualType = tt->desugar(); + } + + return false; +} + +MIRType *LibAstFile::CvtTypedef(const clang::QualType &qualType) { + const clang::TypedefType *typedefType = llvm::dyn_cast(qualType); + if (typedefType == nullptr) { + return nullptr; + } + return CvtTypedefDecl(*typedefType->getDecl()); +} + +MIRType *LibAstFile::CvtTypedefDecl(const clang::TypedefNameDecl &typedefDecl) { + std::string typedefName = GetDeclName(typedefDecl, true); + MIRTypeByName *typdefType = nullptr; + clang::QualType underlyTy = typedefDecl.getCanonicalDecl()->getUnderlyingType(); + MIRType *type = CvtType(underlyTy, true); + if (type != nullptr) { + typdefType = FEManager::GetTypeManager().CreateTypedef(typedefName, *type); + } + return typdefType; +} + +MIRType *LibAstFile::CvtSourceType(const clang::QualType qualType) { + return CvtType(qualType, true); +} + +MIRType *LibAstFile::CvtType(const clang::QualType qualType, bool isSourceType) { + clang::QualType srcType = qualType.getCanonicalType(); + if (isSourceType) { + MIRType *nameType = CvtTypedef(qualType); + if (nameType != nullptr) { + return nameType; + } + srcType = qualType; + } + if (srcType.isNull()) { + return nullptr; + } + + MIRType *destType = CvtPrimType(srcType, isSourceType); + if (destType != nullptr) { + return destType; + } + + // handle pointer types + const clang::QualType srcPteType = srcType->getPointeeType(); + if (!srcPteType.isNull()) { + MIRType *mirPointeeType = CvtType(srcPteType, isSourceType); + if (mirPointeeType == nullptr) { + return nullptr; + } + + GenericAttrs genAttrs; + GetQualAttrs(srcPteType, genAttrs, isSourceType); + TypeAttrs attrs = genAttrs.ConvertToTypeAttrs(); + // Get alignment from the pointee type + uint32 alignmentBits = astContext->getTypeAlignIfKnown(srcPteType); + if (alignmentBits != 0) { + if (alignmentBits > astContext->getTypeUnadjustedAlign(srcPteType)) { + attrs.SetAlign(alignmentBits / 8); // bits to byte + } + } + if (IsOneElementVector(srcPteType)) { + attrs.SetAttr(ATTR_oneelem_simd); + } + + // Currently, only the pointer type is needed to handle may alias. + // The input parameter must be the raw pointee type. + if (TypeHasMayAlias(qualType->getPointeeType())) { + attrs.SetAttr(ATTR_may_alias); + } + // Variably Modified type is the type of a Variable Length Array. (C99 6.7.5) + // Convert the vla to a single-dimensional pointer, e.g. int(*)[N] + if (qualType->isVariablyModifiedType() && mirPointeeType->IsMIRPtrType()) { + static_cast(mirPointeeType)->SetTypeAttrs(attrs); + return mirPointeeType; + } + MIRType *prtType; + if (attrs == TypeAttrs()) { + prtType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirPointeeType); + } else { + prtType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirPointeeType, PTY_ptr, attrs); + } + return prtType; + } + + return CvtOtherType(srcType, isSourceType); +} + +MIRType *LibAstFile::CvtOtherType(const clang::QualType srcType, bool isSourceType) { + MIRType *destType = nullptr; + if (srcType->isArrayType()) { + destType = CvtArrayType(srcType, isSourceType); + } else if (srcType->isRecordType()) { + destType = CvtRecordType(srcType); + // isComplexType() does not include complex integers (a GCC extension) + } else if (srcType->isAnyComplexType()) { + destType = CvtComplexType(srcType); + } else if (srcType->isFunctionType()) { + destType = CvtFunctionType(srcType, isSourceType); + } else if (srcType->isEnumeralType()) { + destType = CvtEnumType(srcType, isSourceType); + } else if (srcType->isAtomicType()) { + const auto *atomicType = llvm::cast(srcType); + destType = CvtType(atomicType->getValueType()); + } else if (srcType->isVectorType()) { + destType = CvtVectorType(srcType); + } + CHECK_FATAL(destType != nullptr, "unsuport type %s", srcType.getAsString().c_str()); + return destType; +} + +MIRType *LibAstFile::CvtEnumType(const clang::QualType &qualType, bool isSourceType) { + const clang::EnumType *enumTy = llvm::dyn_cast(qualType.getCanonicalType()); + CHECK_NULL_FATAL(enumTy); + clang::EnumDecl *enumDecl = enumTy->getDecl(); + if (enumDecl->getDefinition() != nullptr) { + enumDecl = enumDecl->getDefinition(); + } + MIRType *type; + if (isSourceType) { + auto itor = std::find(enumDecles.cbegin(), enumDecles.cend(), enumDecl); + if (itor == enumDecles.end()) { + (void)enumDecles.emplace_back(enumDecl); + } + std::string enumName = GetDeclName(*enumDecl, true); + MIRTypeByName *typdefType = FEManager::GetTypeManager().GetOrCreateTypeByNameType(enumName); + type = typdefType; + } else { + clang::QualType qt = enumDecl->getIntegerType(); + type = CvtType(qt, isSourceType); + } + return type; +} + +MIRType *LibAstFile::CvtRecordType(const clang::QualType qualType) { + clang::QualType srcType = qualType.getCanonicalType(); + const auto *recordType = llvm::cast(srcType); + clang::RecordDecl *recordDecl = recordType->getDecl(); + if (!recordDecl->isLambda() && recordDeclSet.emplace(recordDecl).second) { + auto itor = std::find(recordDecles.cbegin(), recordDecles.cend(), recordDecl); + if (itor == recordDecles.end()) { + recordDecles.emplace_back(recordDecl); + } + } + MIRStructType *type = nullptr; + std::stringstream ss; + EmitTypeName(srcType, ss); + std::string name(ss.str()); + if (!recordDecl->isDefinedOutsideFunctionOrMethod()) { + Loc l = GetLOC(recordDecl->getLocation()); + std::stringstream ss; + ss << name << "_" << l.line << "_" << l.column; + name = ss.str(); + } + type = FEManager::GetTypeManager().GetOrCreateStructType(name); + type->SetMIRTypeKind(srcType->isUnionType() ? kTypeUnion : kTypeStruct); + if (recordType->isIncompleteType()) { + type->SetMIRTypeKind(kTypeStructIncomplete); + } + return recordDecl->isLambda() ? GlobalTables::GetTypeTable().GetOrCreatePointerType(*type) : type; +} + +MIRType *LibAstFile::CvtArrayType(const clang::QualType &srcType, bool isSourceType) { + MIRType *elemType = nullptr; + TypeAttrs elemAttrs; + std::vector operands; + uint8_t dim = 0; + if (srcType->isConstantArrayType()) { + CollectBaseEltTypeAndSizesFromConstArrayDecl(srcType, elemType, elemAttrs, operands, isSourceType); + ASSERT(operands.size() < kMaxArrayDim, "The max array dimension is kMaxArrayDim"); + dim = static_cast(operands.size()); + } else if (srcType->isIncompleteArrayType()) { + const clang::ArrayType *arrType = srcType->getAsArrayTypeUnsafe(); + const auto *inArrType = llvm::cast(arrType); + CollectBaseEltTypeAndSizesFromConstArrayDecl( + inArrType->getElementType(), elemType, elemAttrs, operands, isSourceType); + dim = static_cast(operands.size()); + ASSERT(operands.size() < kMaxArrayDim, "The max array dimension is kMaxArrayDim"); + } else if (srcType->isVariableArrayType()) { + CollectBaseEltTypeAndDimFromVariaArrayDecl(srcType, elemType, elemAttrs, dim, isSourceType); + } else if (srcType->isDependentSizedArrayType()) { + CollectBaseEltTypeAndDimFromDependentSizedArrayDecl(srcType, elemType, elemAttrs, operands, isSourceType); + ASSERT(operands.size() < kMaxArrayDim, "The max array dimension is kMaxArrayDim"); + dim = static_cast(operands.size()); + } else { + NOTYETHANDLED(srcType.getAsString().c_str()); + } + uint32_t *sizeArray = nullptr; + uint32_t tempSizeArray[kMaxArrayDim]; + MIRType *retType = nullptr; + if (dim > 0) { + CHECK_NULL_FATAL(elemType); + if (!srcType->isVariableArrayType()) { + for (uint8_t k = 0; k < dim; ++k) { + tempSizeArray[k] = operands[k]; + } + sizeArray = tempSizeArray; + retType = GlobalTables::GetTypeTable().GetOrCreateArrayType(*elemType, dim, sizeArray, elemAttrs); + } else { + retType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*elemType, PTY_ptr, elemAttrs); + } + } else { + bool asFlag = srcType->isIncompleteArrayType(); + CHECK_FATAL(asFlag, "Incomplete Array Type"); + retType = elemType; + } + + if (srcType->isIncompleteArrayType()) { + // For an incomplete array type, assume a length of 1. If enable MIRFarrayType, delete ATTR_incomplete_array + elemAttrs.SetAttr(ATTR_incomplete_array); + retType = GlobalTables::GetTypeTable().GetOrCreateArrayType(*retType, 1, elemAttrs); + } + return retType; +} + +MIRType *LibAstFile::CvtComplexType(const clang::QualType srcType) const { + clang::QualType srcElemType = llvm::cast(srcType)->getElementType(); + MIRType *destElemType = CvtPrimType(srcElemType); + CHECK_NULL_FATAL(destElemType); + return FEManager::GetTypeManager().GetOrCreateComplexStructType(*destElemType); +} + +MIRType *LibAstFile::CvtFunctionType(const clang::QualType srcType, bool isSourceType) { + const auto *funcType = srcType.getTypePtr()->castAs(); + CHECK_NULL_FATAL(funcType); + MIRType *retType = CvtType(funcType->getReturnType(), isSourceType); + std::vector argsVec; + std::vector attrsVec; + bool isFirstArgRet = false; + const clang::QualType &retQualType = funcType->getReturnType().getCanonicalType(); + // setup first_arg_retrun if ret struct size > 16 + if (!isSourceType && retQualType->isRecordType()) { + const auto *recordType = llvm::cast(retQualType); + clang::RecordDecl *recordDecl = recordType->getDecl(); + const clang::ASTRecordLayout &layout = astContext->getASTRecordLayout(recordDecl->getDefinition()); + const unsigned twoByteSize = 16; + if (layout.getSize().getQuantity() > twoByteSize) { + MIRType *ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*retType); + GenericAttrs genAttrs; + if (IsOneElementVector(retQualType)) { + genAttrs.SetAttr(GENATTR_oneelem_simd); + } + attrsVec.push_back(genAttrs.ConvertToTypeAttrs()); + argsVec.push_back(ptrType->GetTypeIndex()); + retType = GlobalTables::GetTypeTable().GetVoid(); + isFirstArgRet = true; + } + } + if (funcType->isFunctionProtoType()) { + const auto *funcProtoType = funcType->castAs(); + using ItType = clang::FunctionProtoType::param_type_iterator; + for (ItType it = funcProtoType->param_type_begin(); it != funcProtoType->param_type_end(); ++it) { + clang::QualType protoQualType = *it; + argsVec.push_back(CvtType(protoQualType, isSourceType)->GetTypeIndex()); + GenericAttrs genAttrs; + // collect storage class, access, and qual attributes + // ASTCompiler::GetSClassAttrs(SC_Auto, genAttrs); -- no-op + // ASTCompiler::GetAccessAttrs(genAttrs); -- no-op for params + GetCVRAttrs(protoQualType.getCVRQualifiers(), genAttrs); + if (IsOneElementVector(protoQualType)) { + genAttrs.SetAttr(GENATTR_oneelem_simd); + } + attrsVec.push_back(genAttrs.ConvertToTypeAttrs()); + } + // The 'void' is allowed only as a single parameter to a function with no other parameters (C99 6.7.5.3p10). + // e.g. 'int foo(void)'. But parameter list of FunctionProtoType is empty. + // The void parameter source type is needs to be recorded in the debuginfo. + if (isSourceType && argsVec.empty()) { + argsVec.push_back(GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex()); + } + } + MIRType *mirFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( + retType->GetTypeIndex(), argsVec, attrsVec); + if (isFirstArgRet) { + static_cast(mirFuncType)->SetFirstArgReturn(); + } + return GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirFuncType); +} + + +void LibAstFile::CollectBaseEltTypeAndSizesFromConstArrayDecl(const clang::QualType &currQualType, MIRType *&elemType, + TypeAttrs &elemAttr, std::vector &operands, + bool isSourceType) { + if (isSourceType) { + MIRType *nameType = CvtTypedef(currQualType); + if (nameType != nullptr) { + elemType = nameType; + return; + } + } + const clang::Type *ptrType = currQualType.getTypePtrOrNull(); + ASSERT(ptrType != nullptr, "Null type", currQualType.getAsString().c_str()); + if (ptrType->isArrayType()) { + const clang::ArrayType *arrType = ptrType->getAsArrayTypeUnsafe(); + ASSERT(arrType->isConstantArrayType(), "Must be a ConstantArrayType", currQualType.getAsString().c_str()); + const auto *constArrayType = llvm::dyn_cast(arrType); + ASSERT(constArrayType != nullptr, "ERROR : null pointer!"); + llvm::APInt size = constArrayType->getSize(); + ASSERT(size.getSExtValue() >= 0, "Array Size must be positive or zero", currQualType.getAsString().c_str()); + operands.push_back(size.getSExtValue()); + CollectBaseEltTypeAndSizesFromConstArrayDecl(constArrayType->getElementType(), elemType, elemAttr, operands, + isSourceType); + } else { + CollectBaseEltTypeFromArrayDecl(currQualType, elemType, elemAttr, isSourceType); + } +} + +void LibAstFile::CollectBaseEltTypeAndDimFromVariaArrayDecl(const clang::QualType &currQualType, MIRType *&elemType, + TypeAttrs &elemAttr, uint8_t &dim, bool isSourceType) { + if (isSourceType) { + MIRType *nameType = CvtTypedef(currQualType); + if (nameType != nullptr) { + elemType = nameType; + return; + } + } + const clang::Type *ptrType = currQualType.getTypePtrOrNull(); + ASSERT(ptrType != nullptr, "Null type", currQualType.getAsString().c_str()); + if (ptrType->isArrayType()) { + const auto *arrayType = ptrType->getAsArrayTypeUnsafe(); + CollectBaseEltTypeAndDimFromVariaArrayDecl(arrayType->getElementType(), elemType, elemAttr, dim, isSourceType); + ++dim; + } else { + CollectBaseEltTypeFromArrayDecl(currQualType, elemType, elemAttr, isSourceType); + } +} + +void LibAstFile::CollectBaseEltTypeAndDimFromDependentSizedArrayDecl( + const clang::QualType currQualType, MIRType *&elemType, TypeAttrs &elemAttr, std::vector &operands, + bool isSourceType) { + if (isSourceType) { + MIRType *nameType = CvtTypedef(currQualType); + if (nameType != nullptr) { + elemType = nameType; + return; + } + } + const clang::Type *ptrType = currQualType.getTypePtrOrNull(); + ASSERT(ptrType != nullptr, "ERROR:null pointer!"); + if (ptrType->isArrayType()) { + const auto *arrayType = ptrType->getAsArrayTypeUnsafe(); + ASSERT(arrayType != nullptr, "ERROR:null pointer!"); + // variable sized + operands.push_back(0); + CollectBaseEltTypeAndDimFromDependentSizedArrayDecl(arrayType->getElementType(), elemType, elemAttr, operands, + isSourceType); + } else { + CollectBaseEltTypeFromArrayDecl(currQualType, elemType, elemAttr, isSourceType); + } +} + +void LibAstFile::CollectBaseEltTypeFromArrayDecl(const clang::QualType &currQualType, + MIRType *&elemType, TypeAttrs &elemAttr, bool isSourceType) { + elemType = CvtType(currQualType, isSourceType); + // Get alignment from the element type + uint32 alignmentBits = astContext->getTypeAlignIfKnown(currQualType); + if (alignmentBits != 0) { + if (alignmentBits > astContext->getTypeUnadjustedAlign(currQualType)) { + elemAttr.SetAlign(alignmentBits / 8); // bits to byte + } + } + if (IsOneElementVector(currQualType)) { + elemAttr.SetAttr(ATTR_oneelem_simd); + } +} + +MIRType *LibAstFile::CvtVectorType(const clang::QualType srcType) { + const auto *vectorType = llvm::cast(srcType); + MIRType *elemType = CvtType(vectorType->getElementType()); + unsigned numElems = vectorType->getNumElements(); + MIRType *destType = nullptr; + switch (elemType->GetPrimType()) { + case PTY_i64: + if (numElems == 1) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_i64); + } else if (numElems == 2) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v2i64); + } else { + CHECK_FATAL(false, "Unsupported vector type"); + } + break; + case PTY_i32: + if (numElems == 1) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_i64); + } else if (numElems == 2) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v2i32); + } else if (numElems == 4) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v4i32); + } else if (numElems == 8) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v8i16); + } else if (numElems == 16) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v16i8); + } else { + CHECK_FATAL(false, "Unsupported vector type"); + } + break; + case PTY_i16: + if (numElems == 4) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v4i16); + } else if (numElems == 8) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v8i16); + } else { + CHECK_FATAL(false, "Unsupported vector type"); + } + break; + case PTY_i8: + if (numElems == 8) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v8i8); + } else if (numElems == 16) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v16i8); + } else { + CHECK_FATAL(false, "Unsupported vector type"); + } + break; + case PTY_u64: + if (numElems == 1) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_u64); + } else if (numElems == 2) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v2u64); + } else { + CHECK_FATAL(false, "Unsupported vector type"); + } + break; + case PTY_u32: + if (numElems == 2) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v2u32); + } else if (numElems == 4) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v4u32); + } else { + CHECK_FATAL(false, "Unsupported vector type"); + } + break; + case PTY_u16: + if (numElems == 4) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v4u16); + } else if (numElems == 8) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v8u16); + } else { + CHECK_FATAL(false, "Unsupported vector type"); + } + break; + case PTY_u8: + if (numElems == 8) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v8u8); + } else if (numElems == 16) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v16u8); + } else { + CHECK_FATAL(false, "Unsupported vector type"); + } + break; + case PTY_f64: + if (numElems == 1) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_f64); + } else if (numElems == 2) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v2f64); + } else { + CHECK_FATAL(false, "Unsupported vector type"); + } + break; + case PTY_f32: + if (numElems == 2) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v2f32); + } else if (numElems == 4) { + destType = GlobalTables::GetTypeTable().GetPrimType(PTY_v4f32); + } else { + CHECK_FATAL(false, "Unsupported vector type"); + } + break; + default: + CHECK_FATAL(false, "Unsupported vector type"); + break; + } + return destType; +} + +bool LibAstFile::IsOneElementVector(const clang::QualType &qualType) { + return IsOneElementVector(*qualType.getTypePtr()); +} + +bool LibAstFile::IsOneElementVector(const clang::Type &type) { + const clang::VectorType *vectorType = llvm::dyn_cast(type.getUnqualifiedDesugaredType()); + if (vectorType != nullptr && vectorType->getNumElements() == 1) { + return true; + } + return false; +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/clang/lib/ast_util.cpp b/src/hir2mpl/ast_input/clang/lib/ast_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2759e5cf443f9b82d380fef7f0449e2fec8ad527 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/lib/ast_util.cpp @@ -0,0 +1,346 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "ast_util.h" +#include "clang/AST/AST.h" +#include "clang/Serialization/ASTDeserializationListener.h" +#include "mir_nodes.h" +#include "mir_builder.h" +#include "ast_macros.h" +#include "fe_utils_ast.h" + +namespace maple { +int ast2mplDebug = 0; +int ast2mplDebugIndent = 0; +uint32_t ast2mplOption = kCheckAssertion; +const char *checkFuncName = nullptr; + +void ASTUtil::AdjIndent(int n) { + ast2mplDebugIndent += n; +} + +void ASTUtil::SetIndent(int n) { + ast2mplDebugIndent = n; +} + +bool ASTUtil::ValidInName(char c) { + return isalnum(c) || (c == '_' || c == '|' || c == ';' || c == '/' || c == '$'); +} + +bool ASTUtil::IsValidName(const std::string &name) { + for (size_t i = 0; i < name.length(); ++i) { + if (!ValidInName(name[i])) { + return false; + } + } + return true; +} + +bool ASTUtil::IsSignedType(const MIRType &type) { + PrimType primType = type.GetPrimType(); + if (primType == PTY_i8 || primType == PTY_i16 || primType == PTY_i32 || primType == PTY_i64) { + return true; + } + return false; +} + +void ASTUtil::AdjustName(std::string &name) { + for (size_t i = 0; name[i] != '\0'; ++i) { + char c = name[i]; + if (ASTUtil::ValidInName(c)) { + name[i] = c; + continue; + } + switch (c) { + case '(': + case ')': + case '<': + case '>': + case '-': + case ' ': + name[i] = '_'; + break; + case '[': + name[i] = 'A'; + break; + case ']': + name[i] = 'B'; + break; + default: + name[i] = '$'; + break; + } + } +} + +std::string ASTUtil::GetAdjustVarName(const std::string &name, uint32_t &num) { + std::stringstream ss; + ss << name << "." << num; + + DEBUGPRINT2(ss.str()); + ++num; + return ss.str(); +} + +std::string ASTUtil::GetNameWithSuffix(const std::string &origName, const std::string &suffix) { + std::stringstream ss; + ss << origName << suffix; + + return ss.str(); +} + +Opcode ASTUtil::CvtBinaryOpcode(uint32_t opcode, PrimType pty) { + switch (opcode) { + case clang::BO_Mul: + return OP_mul; // "*" + case clang::BO_Div: + return OP_div; // "/" + case clang::BO_Rem: + return OP_rem; // "%" + case clang::BO_Add: + return OP_add; // "+" + case clang::BO_Sub: + return OP_sub; // "-" + case clang::BO_Shl: + return OP_shl; // "<<" + case clang::BO_Shr: + return IsUnsignedInteger(pty) ? OP_lshr : OP_ashr; // ">>" + case clang::BO_LT: + return OP_lt; // "<" + case clang::BO_GT: + return OP_gt; // ">" + case clang::BO_LE: + return OP_le; // "<=" + case clang::BO_GE: + return OP_ge; // ">=" + case clang::BO_EQ: + return OP_eq; // "==" + case clang::BO_NE: + return OP_ne; // "!=" + case clang::BO_And: + return OP_band; // "&" + case clang::BO_Xor: + return OP_bxor; // "^" + case clang::BO_Or: + return OP_bior; // "|" + case clang::BO_LAnd: + return OP_cand; // "&&" + case clang::BO_LOr: + return OP_cior; // "||" + default: + return OP_undef; + } +} + +// these do not have equivalent opcode in mapleir +Opcode ASTUtil::CvtBinaryAssignOpcode(uint32_t opcode) { + switch (opcode) { + case clang::BO_Assign: + return OP_eq; // "=" + case clang::BO_MulAssign: + return OP_mul; // "*=" + case clang::BO_DivAssign: + return OP_div; // "/=" + case clang::BO_RemAssign: + return OP_rem; // "%=" + case clang::BO_AddAssign: + return OP_add; // "+=" + case clang::BO_SubAssign: + return OP_sub; // "-=" + case clang::BO_ShlAssign: + return OP_shl; // "<<=" + case clang::BO_ShrAssign: + return OP_lshr; // ">>=" + case clang::BO_AndAssign: + return OP_band; // "&=" + case clang::BO_XorAssign: + return OP_bxor; // "^=" + case clang::BO_OrAssign: + return OP_bior; // "|=" + case clang::BO_Comma: + return OP_undef; // "," + case clang::BO_PtrMemD: + return OP_undef; // ".*" + case clang::BO_PtrMemI: + return OP_undef; // "->*" + default: + return OP_undef; + } +} + +Opcode ASTUtil::CvtUnaryOpcode(uint32_t opcode) { + switch (opcode) { + case clang::UO_Minus: + return OP_neg; // "-" + case clang::UO_Not: + return OP_bnot; // "~" + case clang::UO_LNot: + return OP_lnot; // "!" + case clang::UO_PostInc: + return OP_add; // "++" + case clang::UO_PostDec: + return OP_sub; // "--" + case clang::UO_PreInc: + return OP_add; // "++" + case clang::UO_PreDec: + return OP_sub; // "--" + case clang::UO_AddrOf: + return OP_addrof; // "&" + case clang::UO_Deref: + return OP_undef; // "*" + case clang::UO_Plus: + return OP_undef; // "+" + case clang::UO_Real: + return OP_undef; // "__real" + case clang::UO_Imag: + return OP_undef; // "__imag" + case clang::UO_Extension: + return OP_undef; // "__extension__" + case clang::UO_Coawait: + return OP_undef; // "co_await" + default: + CHECK_FATAL(false, "NYI ASTUtil::CvtUnaryOpcode", opcode); + } +} + +uint32 ASTUtil::GetDim(MIRType &type) { + MIRType *ptrType = &type; + if (type.GetKind() == kTypePointer) { + auto *ptr = static_cast(&type); + ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptr->GetPointedTyIdx()); + } + uint32 dim = 0; + while (ptrType->GetKind() == kTypeArray) { + auto *array = static_cast(ptrType); + ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(array->GetElemTyIdx()); + if (ptrType->GetKind() == kTypePointer) { + auto *ptr = static_cast(ptrType); + ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptr->GetPointedTyIdx()); + } + ++dim; + } + + DEBUGPRINT2(dim); + return dim; +} + +std::string ASTUtil::GetTypeString(MIRType &type) { + std::stringstream ss; + if (ast2mplDebug > kDebugLevelThree) { + type.Dump(1); + } + switch (type.GetKind()) { + case kTypeScalar: + ss << FEUtilAST::Type2Label(type.GetPrimType()); + break; + case kTypeStruct: + case kTypeClass: + case kTypeInterface: + case kTypeBitField: + case kTypeByName: + case kTypeParam: { + ss << GlobalTables::GetStrTable().GetStringFromStrIdx(type.GetNameStrIdx()).c_str() << ";"; + break; + } + case kTypeArray: { + auto &array = static_cast(type); + MIRType *elemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(array.GetElemTyIdx()); + uint32 dim = GetDim(type); + for (size_t i = 0; i < dim; ++i) { + ss << 'A'; + } + ss << GetTypeString(*elemType); + break; + } + case kTypePointer: { + auto &ptr = static_cast(type); + MIRType *pType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptr.GetPointedTyIdx()); + ss << "P"; + std::string str = GetTypeString(*pType); + ss << str; + break; + } + default: + break; + } + + return ss.str(); +} + +void ASTUtil::DumpMplTypes() { + uint32 size = static_cast(GlobalTables::GetTypeTable().GetTypeTableSize()); + DEBUGPRINT(size); + DEBUGPRINT_S(" dump type table "); + for (uint32 i = 1; i < size; ++i) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeTable()[i]; + std::string name = GlobalTables::GetStrTable().GetStringFromStrIdx(type->GetNameStrIdx()); + (void)printf("%-4u %4u %s\n", i, type->GetNameStrIdx().GetIdx(), name.c_str()); + } + DEBUGPRINT_S(" end dump type table "); +} + +void ASTUtil::DumpMplStrings() { + size_t size = GlobalTables::GetStrTable().StringTableSize(); + DEBUGPRINT(size); + DEBUGPRINT_S(" dump string table "); + for (size_t i = 1; i < size; ++i) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeTable()[i]; + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(type->GetNameStrIdx()); + (void)printf("%-4d %4d %s\n", static_cast(i), static_cast(str.length()), str.c_str()); + } + DEBUGPRINT_S(" end dump string table "); +} + +bool ASTUtil::IsVoidPointerType(const TyIdx &tyIdx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + auto *ptr = static_cast(type); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptr->GetPointedTyIdx()); + if (type->GetPrimType() == PTY_void) { + return true; + } + return false; +} + +std::string ASTUtil::AdjustFuncName(std::string funcName) { + std::size_t found = funcName.find('\"'); + const std::size_t offsetByTwoChar = 2; // skip the replaced char + while (found != std::string::npos) { + (void)funcName.replace(found, 1, "\\\""); + found += offsetByTwoChar; + found = funcName.find('\"', found); + } + return funcName; +} + +bool ASTUtil::InsertFuncSet(const GStrIdx &idx) { + static std::set funcIdxSet; + return funcIdxSet.insert(idx).second; +} + +bool ASTUtil::HasTypdefType(clang::QualType qualType) { + if (llvm::isa(qualType)) { + return true; + } + auto pointerType = llvm::dyn_cast(qualType); + if (pointerType != nullptr) { + return HasTypdefType(qualType->getPointeeType()); + } + const auto *arrayType = llvm::dyn_cast(qualType); + if (arrayType != nullptr) { + return HasTypdefType(arrayType->getElementType()); + } + return false; +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/clang/lib/ast_util.h b/src/hir2mpl/ast_input/clang/lib/ast_util.h new file mode 100644 index 0000000000000000000000000000000000000000..af364ffe73c1765f184cb1666bb794b7ca3875e4 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/lib/ast_util.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef AST2MPL_INCLUDE_ASTUTIL_H +#define AST2MPL_INCLUDE_ASTUTIL_H +#include "clang/AST/AST.h" +#include "clang/AST/RecordLayout.h" +#include "mir_type.h" +#include "ast_macros.h" + +namespace maple { +extern int ast2mplDebug; +extern int ast2mplDebugIndent; +extern uint32_t ast2mplOption; +extern const char *checkFuncName; + +class ASTUtil { + public: + static const int opcodeNameLength = 228; + static const char *opcodeName[opcodeNameLength]; + static void AdjIndent(int n); + static void SetIndent(int n); + static bool ValidInName(char c); + static bool IsValidName(const std::string &name); + static bool IsSignedType(const MIRType &type); + static void AdjustName(std::string &name); + static std::string GetAdjustVarName(const std::string &name, uint32_t &num); + static std::string GetNameWithSuffix(const std::string &origName, const std::string &suffix); + + static void DumpMplStrings(); + static void DumpMplTypes(); + + static uint32 GetDim(MIRType &type); + static std::string GetTypeString(MIRType &type); + static Opcode CvtUnaryOpcode(uint32_t opcode); + static Opcode CvtBinaryOpcode(uint32_t opcode, PrimType pty = PTY_begin); + static Opcode CvtBinaryAssignOpcode(uint32_t opcode); + + static bool IsVoidPointerType(const TyIdx &tyIdx); + static std::string AdjustFuncName(std::string funcName); + static bool InsertFuncSet(const GStrIdx &idx); + static bool HasTypdefType(clang::QualType qualType); + + template + static std::string Join(const Range &elements, const char *delimiter) { + std::ostringstream os; + auto b = begin(elements); + auto e = end(elements); + if (b != e) { + std::copy(b, prev(e), std::ostream_iterator(os, delimiter)); + b = prev(e); + } + if (b != e) { + os << *b; + } + return os.str(); + } +}; +} // namespace maple +#endif // AST2MPL_INCLUDE_ASTUTIL_H_ diff --git a/src/hir2mpl/ast_input/clang/lib/sys/arm_neon.h b/src/hir2mpl/ast_input/clang/lib/sys/arm_neon.h new file mode 100644 index 0000000000000000000000000000000000000000..e4942b09fb34819e9cfbb5c4aabcfa7ae1c725f4 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/lib/sys/arm_neon.h @@ -0,0 +1,1435 @@ +/*===---- arm_neon.h - ARM Neon intrinsics ---------------------------------=== + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + * + *===-----------------------------------------------------------------------=== + */ + +#ifndef __ARM_NEON_H +#define __ARM_NEON_H + +#include + +typedef float float32_t; +#ifdef __aarch64__ +typedef double float64_t; +#endif + +typedef __attribute__((neon_vector_type(8))) int8_t int8x8_t; +typedef __attribute__((neon_vector_type(16))) int8_t int8x16_t; +typedef __attribute__((neon_vector_type(4))) int16_t int16x4_t; +typedef __attribute__((neon_vector_type(8))) int16_t int16x8_t; +typedef __attribute__((neon_vector_type(2))) int32_t int32x2_t; +typedef __attribute__((neon_vector_type(4))) int32_t int32x4_t; +typedef __attribute__((neon_vector_type(1))) int64_t int64x1_t; +typedef __attribute__((neon_vector_type(2))) int64_t int64x2_t; +typedef __attribute__((neon_vector_type(8))) uint8_t uint8x8_t; +typedef __attribute__((neon_vector_type(16))) uint8_t uint8x16_t; +typedef __attribute__((neon_vector_type(4))) uint16_t uint16x4_t; +typedef __attribute__((neon_vector_type(8))) uint16_t uint16x8_t; +typedef __attribute__((neon_vector_type(2))) uint32_t uint32x2_t; +typedef __attribute__((neon_vector_type(4))) uint32_t uint32x4_t; +typedef __attribute__((neon_vector_type(1))) uint64_t uint64x1_t; +typedef __attribute__((neon_vector_type(2))) uint64_t uint64x2_t; +typedef __attribute__((neon_vector_type(2))) float32_t float32x2_t; +typedef __attribute__((neon_vector_type(4))) float32_t float32x4_t; +#ifdef __aarch64__ +typedef __attribute__((neon_vector_type(1))) float64_t float64x1_t; +typedef __attribute__((neon_vector_type(2))) float64_t float64x2_t; +#endif + +typedef struct int8x8x2_t { + int8x8_t val[2]; +} int8x8x2_t; + +typedef struct int16x4x2_t { + int16x4_t val[2]; +} int16x4x2_t; + +typedef struct int32x2x2_t { + int32x2_t val[2]; +} int32x2x2_t; + +typedef struct uint8x8x2_t { + uint8x8_t val[2]; +} uint8x8x2_t; + +typedef struct uint16x4x2_t { + uint16x4_t val[2]; +} uint16x4x2_t; + +typedef struct uint32x2x2_t { + uint32x2_t val[2]; +} uint32x2x2_t; + +typedef struct float32x2x2_t { + float32x2_t val[2]; +} float32x2x2_t; + +// vecTy vector_abs(vecTy src) +// Create a vector by getting the absolute value of the elements in src. +int8x8_t __builtin_mpl_vector_abs_v8i8(int8x8_t); +int16x4_t __builtin_mpl_vector_abs_v4i16(int16x4_t); +int32x2_t __builtin_mpl_vector_abs_v2i32(int32x2_t); +int64x1_t __builtin_mpl_vector_abs_v1i64(int64x1_t); +float32x2_t __builtin_mpl_vector_abs_v2f32(float32x2_t); +float64x1_t __builtin_mpl_vector_abs_v1f64(float64x1_t); +int8x16_t __builtin_mpl_vector_abs_v16i8(int8x16_t); +int16x8_t __builtin_mpl_vector_abs_v8i16(int16x8_t); +int32x4_t __builtin_mpl_vector_abs_v4i32(int32x4_t); +int64x2_t __builtin_mpl_vector_abs_v2i64(int64x2_t); +float32x4_t __builtin_mpl_vector_abs_v4f32(float32x4_t); +float64x2_t __builtin_mpl_vector_abs_v2f64(float64x2_t); + +// vecTy vector_mov_narrow(vecTy src) +// copies each element of the operand vector to the corresponding element of the destination vector. +// The result element is half the width of the operand element, and values are saturated to the result width. +// The results are the same type as the operands. +uint8x8_t __builtin_mpl_vector_mov_narrow_v8u16(uint16x8_t); +uint16x4_t __builtin_mpl_vector_mov_narrow_v4u32(uint32x4_t); +uint32x2_t __builtin_mpl_vector_mov_narrow_v2u64(uint64x2_t); +int8x8_t __builtin_mpl_vector_mov_narrow_v8i16(int16x8_t); +int16x4_t __builtin_mpl_vector_mov_narrow_v4i32(int32x4_t); +int32x2_t __builtin_mpl_vector_mov_narrow_v2i64(int64x2_t); + +// vecTy vector_addl_low(vecTy src1, vecTy src2) +// Add each element of the source vector to second source +// widen the result into the destination vector. +int16x8_t __builtin_mpl_vector_addl_low_v8i8(int8x8_t, int8x8_t); +int32x4_t __builtin_mpl_vector_addl_low_v4i16(int16x4_t, int16x4_t); +int64x2_t __builtin_mpl_vector_addl_low_v2i32(int32x2_t, int32x2_t); +uint16x8_t __builtin_mpl_vector_addl_low_v8u8(uint8x8_t, uint8x8_t); +uint32x4_t __builtin_mpl_vector_addl_low_v4u16(uint16x4_t, uint16x4_t); +uint64x2_t __builtin_mpl_vector_addl_low_v2u32(uint32x2_t, uint32x2_t); + +// vecTy vector_addl_high(vecTy src1, vecTy src2) +// Add each element of the source vector to upper half of second source +// widen the result into the destination vector. +int16x8_t __builtin_mpl_vector_addl_high_v8i8(int8x16_t, int8x16_t); +int32x4_t __builtin_mpl_vector_addl_high_v4i16(int16x8_t, int16x8_t); +int64x2_t __builtin_mpl_vector_addl_high_v2i32(int32x4_t, int32x4_t); +uint16x8_t __builtin_mpl_vector_addl_high_v8u8(uint8x16_t, uint8x16_t); +uint32x4_t __builtin_mpl_vector_addl_high_v4u16(uint16x8_t, uint16x8_t); +uint64x2_t __builtin_mpl_vector_addl_high_v2u32(uint32x4_t, uint32x4_t); + +// vecTy vector_addw_low(vecTy src1, vecTy src2) +// Add each element of the source vector to second source +// widen the result into the destination vector. +int16x8_t __builtin_mpl_vector_addw_low_v8i8(int16x8_t, int8x8_t); +int32x4_t __builtin_mpl_vector_addw_low_v4i16(int32x4_t, int16x4_t); +int64x2_t __builtin_mpl_vector_addw_low_v2i32(int64x2_t, int32x2_t); +uint16x8_t __builtin_mpl_vector_addw_low_v8u8(uint16x8_t, uint8x8_t); +uint32x4_t __builtin_mpl_vector_addw_low_v4u16(uint32x4_t, uint16x4_t); +uint64x2_t __builtin_mpl_vector_addw_low_v2u32(uint64x2_t, uint32x2_t); + +// vecTy vector_addw_high(vecTy src1, vecTy src2) +// Add each element of the source vector to upper half of second source +// widen the result into the destination vector. +int16x8_t __builtin_mpl_vector_addw_high_v8i8(int16x8_t, int8x16_t); +int32x4_t __builtin_mpl_vector_addw_high_v4i16(int32x4_t, int16x8_t); +int64x2_t __builtin_mpl_vector_addw_high_v2i32(int64x2_t, int32x4_t); +uint16x8_t __builtin_mpl_vector_addw_high_v8u8(uint16x8_t, uint8x16_t); +uint32x4_t __builtin_mpl_vector_addw_high_v4u16(uint32x4_t, uint16x8_t); +uint64x2_t __builtin_mpl_vector_addw_high_v2u32(uint64x2_t, uint32x4_t); + +// vectTy vector_from_scalar(scalarTy val) +// Create a vector by replicating the scalar value to all elements of the +// vector. +int64x2_t __builtin_mpl_vector_from_scalar_v2i64(int64_t); +int32x4_t __builtin_mpl_vector_from_scalar_v4i32(int32_t); +int16x8_t __builtin_mpl_vector_from_scalar_v8i16(int16_t); +int8x16_t __builtin_mpl_vector_from_scalar_v16i8(int8_t); +uint64x2_t __builtin_mpl_vector_from_scalar_v2u64(uint64_t); +uint32x4_t __builtin_mpl_vector_from_scalar_v4u32(uint32_t); +uint16x8_t __builtin_mpl_vector_from_scalar_v8u16(uint16_t); +uint8x16_t __builtin_mpl_vector_from_scalar_v16u8(uint8_t); +float64x2_t __builtin_mpl_vector_from_scalar_v2f64(float64_t); +float32x4_t __builtin_mpl_vector_from_scalar_v4f32(float32_t); +int64x1_t __builtin_mpl_vector_from_scalar_v1i64(int64_t); +int32x2_t __builtin_mpl_vector_from_scalar_v2i32(int32_t); +int16x4_t __builtin_mpl_vector_from_scalar_v4i16(int16_t); +int8x8_t __builtin_mpl_vector_from_scalar_v8i8(int8_t); +uint64x1_t __builtin_mpl_vector_from_scalar_v1u64(uint64_t); +uint32x2_t __builtin_mpl_vector_from_scalar_v2u32(uint32_t); +uint16x4_t __builtin_mpl_vector_from_scalar_v4u16(uint16_t); +uint8x8_t __builtin_mpl_vector_from_scalar_v8u8(uint8_t); +float64x1_t __builtin_mpl_vector_from_scalar_v1f64(float64_t); +float32x2_t __builtin_mpl_vector_from_scalar_v2f32(float32_t); + +// vecTy2 vector_madd(vecTy2 accum, vecTy1 src1, vecTy1 src2) +// Multiply the elements of src1 and src2, then accumulate into accum. +// Elements of vecTy2 are twice as long as elements of vecTy1. +int64x2_t __builtin_mpl_vector_madd_v2i32(int64x2_t, int32x2_t, int32x2_t); +int32x4_t __builtin_mpl_vector_madd_v4i16(int32x4_t, int16x4_t, int16x4_t); +int16x8_t __builtin_mpl_vector_madd_v8i8(int16x8_t, int8x8_t, int8x8_t); +uint64x2_t __builtin_mpl_vector_madd_v2u32(uint64x2_t, uint32x2_t, uint32x2_t); +uint32x4_t __builtin_mpl_vector_madd_v4u16(uint32x4_t, uint16x4_t, uint16x4_t); +uint16x8_t __builtin_mpl_vector_madd_v8u8(uint16x8_t, uint8x8_t, uint8x8_t); + +// vecTy2 vector_mull_low(vecTy1 src1, vecTy1 src2) +// Multiply the elements of src1 and src2. Elements of vecTy2 are twice as +// long as elements of vecTy1. +int64x2_t __builtin_mpl_vector_mull_low_v2i32(int32x2_t, int32x2_t); +int32x4_t __builtin_mpl_vector_mull_low_v4i16(int16x4_t, int16x4_t); +int16x8_t __builtin_mpl_vector_mull_low_v8i8(int8x8_t, int8x8_t); +uint64x2_t __builtin_mpl_vector_mull_low_v2u32(uint32x2_t, uint32x2_t); +uint32x4_t __builtin_mpl_vector_mull_low_v4u16(uint16x4_t, uint16x4_t); +uint16x8_t __builtin_mpl_vector_mull_low_v8u8(uint8x8_t, uint8x8_t); + +// vecTy2 vector_mull_high(vecTy1 src1, vecTy1 src2) +// Multiply the upper elements of src1 and src2. Elements of vecTy2 are twice +// as long as elements of vecTy1. +int64x2_t __builtin_mpl_vector_mull_high_v2i32(int32x4_t, int32x4_t); +int32x4_t __builtin_mpl_vector_mull_high_v4i16(int16x8_t, int16x8_t); +int16x8_t __builtin_mpl_vector_mull_high_v8i8(int8x16_t, int8x16_t); +uint64x2_t __builtin_mpl_vector_mull_high_v2u32(uint32x4_t, uint32x4_t); +uint32x4_t __builtin_mpl_vector_mull_high_v4u16(uint16x8_t, uint16x8_t); +uint16x8_t __builtin_mpl_vector_mull_high_v8u8(uint8x16_t, uint8x16_t); + +// vecTy vector_merge(vecTy src1, vecTy src2, int n) +// Create a vector by concatenating the high elements of src1, starting +// with the nth element, followed by the low elements of src2. +int64x2_t __builtin_mpl_vector_merge_v2i64(int64x2_t, int64x2_t, int32_t); +int32x4_t __builtin_mpl_vector_merge_v4i32(int32x4_t, int32x4_t, int32_t); +int16x8_t __builtin_mpl_vector_merge_v8i16(int16x8_t, int16x8_t, int32_t); +int8x16_t __builtin_mpl_vector_merge_v16i8(int8x16_t, int8x16_t, int32_t); +uint64x2_t __builtin_mpl_vector_merge_v2u64(uint64x2_t, uint64x2_t, int32_t); +uint32x4_t __builtin_mpl_vector_merge_v4u32(uint32x4_t, uint32x4_t, int32_t); +uint16x8_t __builtin_mpl_vector_merge_v8u16(uint16x8_t, uint16x8_t, int32_t); +uint8x16_t __builtin_mpl_vector_merge_v16u8(uint8x16_t, uint8x16_t, int32_t); +float64x2_t __builtin_mpl_vector_merge_v2f64(float64x2_t, float64x2_t, int32_t); +float32x4_t __builtin_mpl_vector_merge_v4f32(float32x4_t, float32x4_t, int32_t); +int64x1_t __builtin_mpl_vector_merge_v1i64(int64x1_t, int64x1_t, int32_t); +int32x2_t __builtin_mpl_vector_merge_v2i32(int32x2_t, int32x2_t, int32_t); +int16x4_t __builtin_mpl_vector_merge_v4i16(int16x4_t, int16x4_t, int32_t); +int8x8_t __builtin_mpl_vector_merge_v8i8(int8x8_t, int8x8_t, int32_t); +uint64x1_t __builtin_mpl_vector_merge_v1u64(uint64x1_t, uint64x1_t, int32_t); +uint32x2_t __builtin_mpl_vector_merge_v2u32(uint32x2_t, uint32x2_t, int32_t); +uint16x4_t __builtin_mpl_vector_merge_v4u16(uint16x4_t, uint16x4_t, int32_t); +uint8x8_t __builtin_mpl_vector_merge_v8u8(uint8x8_t, uint8x8_t, int32_t); +float64x1_t __builtin_mpl_vector_merge_v1f64(float64x1_t, float64x1_t, int32_t); +float32x2_t __builtin_mpl_vector_merge_v2f32(float32x2_t, float32x2_t, int32_t); + +// vecTy2 vector_get_low(vecTy1 src) +// Create a vector from the low part of the source vector. +int64x1_t __builtin_mpl_vector_get_low_v2i64(int64x2_t); +int32x2_t __builtin_mpl_vector_get_low_v4i32(int32x4_t); +int16x4_t __builtin_mpl_vector_get_low_v8i16(int16x8_t); +int8x8_t __builtin_mpl_vector_get_low_v16i8(int8x16_t); +uint64x1_t __builtin_mpl_vector_get_low_v2u64(uint64x2_t); +uint32x2_t __builtin_mpl_vector_get_low_v4u32(uint32x4_t); +uint16x4_t __builtin_mpl_vector_get_low_v8u16(uint16x8_t); +uint8x8_t __builtin_mpl_vector_get_low_v16u8(uint8x16_t); +float64x1_t __builtin_mpl_vector_get_low_v2f64(float64x2_t); +float32x2_t __builtin_mpl_vector_get_low_v4f32(float32x4_t); + +// vecTy2 vector_get_high(vecTy1 src) +// Create a vector from the high part of the source vector. +int64x1_t __builtin_mpl_vector_get_high_v2i64(int64x2_t); +int32x2_t __builtin_mpl_vector_get_high_v4i32(int32x4_t); +int16x4_t __builtin_mpl_vector_get_high_v8i16(int16x8_t); +int8x8_t __builtin_mpl_vector_get_high_v16i8(int8x16_t); +uint64x1_t __builtin_mpl_vector_get_high_v2u64(uint64x2_t); +uint32x2_t __builtin_mpl_vector_get_high_v4u32(uint32x4_t); +uint16x4_t __builtin_mpl_vector_get_high_v8u16(uint16x8_t); +uint8x8_t __builtin_mpl_vector_get_high_v16u8(uint8x16_t); +float64x1_t __builtin_mpl_vector_get_high_v2f64(float64x2_t); +float32x2_t __builtin_mpl_vector_get_high_v4f32(float32x4_t); + +// scalarTy vector_get_element(vecTy src, int n) +// Get the nth element of the source vector. +int64_t __builtin_mpl_vector_get_element_v2i64(int64x2_t, int32_t); +int32_t __builtin_mpl_vector_get_element_v4i32(int32x4_t, int32_t); +int16_t __builtin_mpl_vector_get_element_v8i16(int16x8_t, int32_t); +int8_t __builtin_mpl_vector_get_element_v16i8(int8x16_t, int32_t); +uint64_t __builtin_mpl_vector_get_element_v2u64(uint64x2_t, int32_t); +uint32_t __builtin_mpl_vector_get_element_v4u32(uint32x4_t, int32_t); +uint16_t __builtin_mpl_vector_get_element_v8u16(uint16x8_t, int32_t); +uint8_t __builtin_mpl_vector_get_element_v16u8(uint8x16_t, int32_t); +float64_t __builtin_mpl_vector_get_element_v2f64(float64x2_t, int32_t); +float32_t __builtin_mpl_vector_get_element_v4f32(float32x4_t, int32_t); +int64_t __builtin_mpl_vector_get_element_v1i64(int64x1_t, int32_t); +int32_t __builtin_mpl_vector_get_element_v2i32(int32x2_t, int32_t); +int16_t __builtin_mpl_vector_get_element_v4i16(int16x4_t, int32_t); +int8_t __builtin_mpl_vector_get_element_v8i8(int8x8_t, int32_t); +uint64_t __builtin_mpl_vector_get_element_v1u64(uint64x1_t, int32_t); +uint32_t __builtin_mpl_vector_get_element_v2u32(uint32x2_t, int32_t); +uint16_t __builtin_mpl_vector_get_element_v4u16(uint16x4_t, int32_t); +uint8_t __builtin_mpl_vector_get_element_v8u8(uint8x8_t, int32_t); +float64_t __builtin_mpl_vector_get_element_v1f64(float64x1_t, int32_t); +float32_t __builtin_mpl_vector_get_element_v2f32(float32x2_t, int32_t); + +// vecTy vector_set_element(ScalarTy value, VecTy vec, int n) +// Set the nth element of the source vector to value. +int64x2_t __builtin_mpl_vector_set_element_v2i64(int64_t, int64x2_t, int32_t); +int32x4_t __builtin_mpl_vector_set_element_v4i32(int32_t, int32x4_t, int32_t); +int16x8_t __builtin_mpl_vector_set_element_v8i16(int16_t, int16x8_t, int32_t); +int8x16_t __builtin_mpl_vector_set_element_v16i8(int8_t, int8x16_t, int32_t); +uint64x2_t __builtin_mpl_vector_set_element_v2u64(uint64_t, uint64x2_t, + int32_t); +uint32x4_t __builtin_mpl_vector_set_element_v4u32(uint32_t, uint32x4_t, + int32_t); +uint16x8_t __builtin_mpl_vector_set_element_v8u16(uint16_t, uint16x8_t, + int32_t); +uint8x16_t __builtin_mpl_vector_set_element_v16u8(uint8_t, uint8x16_t, int32_t); +float64x2_t __builtin_mpl_vector_set_element_v2f64(float64_t, float64x2_t, + int32_t); +float32x4_t __builtin_mpl_vector_set_element_v4f32(float32_t, float32x4_t, + int32_t); +int64x1_t __builtin_mpl_vector_set_element_v1i64(int64_t, int64x1_t, int32_t); +int32x2_t __builtin_mpl_vector_set_element_v2i32(int32_t, int32x2_t, int32_t); +int16x4_t __builtin_mpl_vector_set_element_v4i16(int16_t, int16x4_t, int32_t); +int8x8_t __builtin_mpl_vector_set_element_v8i8(int8_t, int8x8_t, int32_t); +uint64x1_t __builtin_mpl_vector_set_element_v1u64(uint64_t, uint64x1_t, + int32_t); +uint32x2_t __builtin_mpl_vector_set_element_v2u32(uint32_t, uint32x2_t, + int32_t); +uint16x4_t __builtin_mpl_vector_set_element_v4u16(uint16_t, uint16x4_t, + int32_t); +uint8x8_t __builtin_mpl_vector_set_element_v8u8(uint8_t, uint8x8_t, int32_t); +float64x1_t __builtin_mpl_vector_set_element_v1f64(float64_t, float64x1_t, + int32_t); +float32x2_t __builtin_mpl_vector_set_element_v2f32(float32_t, float32x2_t, + int32_t); + +// vecTy2 vector_abdl(vectTy1 src2, vectTy2 src2) +// Create a widened vector by getting the abs value of subtracted arguments. +int16x8_t __builtin_mpl_vector_labssub_low_v8i8(int8x8_t, int8x8_t); +int32x4_t __builtin_mpl_vector_labssub_low_v4i16(int16x4_t, int16x4_t); +int64x2_t __builtin_mpl_vector_labssub_low_v2i32(int32x2_t, int32x2_t); +uint16x8_t __builtin_mpl_vector_labssub_low_v8u8(uint8x8_t, uint8x8_t); +uint32x4_t __builtin_mpl_vector_labssub_low_v4u16(uint16x4_t, uint16x4_t); +uint64x2_t __builtin_mpl_vector_labssub_low_v2u32(uint32x2_t, uint32x2_t); + +// vecTy2 vector_abdl_high(vectTy1 src2, vectTy2 src2) +// Create a widened vector by getting the abs value of subtracted high arguments. +int16x8_t __builtin_mpl_vector_labssub_high_v8i8(int8x16_t, int8x16_t); +int32x4_t __builtin_mpl_vector_labssub_high_v4i16(int16x8_t, int16x8_t); +int64x2_t __builtin_mpl_vector_labssub_high_v2i32(int32x4_t, int32x4_t); +uint16x8_t __builtin_mpl_vector_labssub_high_v8u8(uint8x16_t, uint8x16_t); +uint32x4_t __builtin_mpl_vector_labssub_high_v4u16(uint16x8_t, uint16x8_t); +uint64x2_t __builtin_mpl_vector_labssub_high_v2u32(uint32x4_t, uint32x4_t); + +// vecTy2 vector_narrow_low(vecTy1 src) +// Narrow each element of the source vector to half of the original width, +// writing the lower half into the destination vector. +int32x2_t __builtin_mpl_vector_narrow_low_v2i64(int64x2_t); +int16x4_t __builtin_mpl_vector_narrow_low_v4i32(int32x4_t); +int8x8_t __builtin_mpl_vector_narrow_low_v8i16(int16x8_t); +uint32x2_t __builtin_mpl_vector_narrow_low_v2u64(uint64x2_t); +uint16x4_t __builtin_mpl_vector_narrow_low_v4u32(uint32x4_t); +uint8x8_t __builtin_mpl_vector_narrow_low_v8u16(uint16x8_t); + +// vecTy2 vector_narrow_high(vecTy1 src1, vecTy2 src2) +// Narrow each element of the source vector to half of the original width, +// concatenate the upper half into the destination vector. +int32x4_t __builtin_mpl_vector_narrow_high_v2i64(int32x2_t, int64x2_t); +int16x8_t __builtin_mpl_vector_narrow_high_v4i32(int16x4_t, int32x4_t); +int8x16_t __builtin_mpl_vector_narrow_high_v8i16(int8x8_t, int16x8_t); +uint32x4_t __builtin_mpl_vector_narrow_high_v2u64(uint32x2_t, uint64x2_t); +uint16x8_t __builtin_mpl_vector_narrow_high_v4u32(uint16x4_t, uint32x4_t); +uint8x16_t __builtin_mpl_vector_narrow_high_v8u16(uint8x8_t, uint16x8_t); + +// vecTy1 vector_adapl(vecTy1 src1, vecTy2 src2) +// Vector pairwise addition and accumulate +int16x4_t __builtin_mpl_vector_pairwise_adalp_v8i8(int16x4_t, int8x8_t); +int32x2_t __builtin_mpl_vector_pairwise_adalp_v4i16(int32x2_t, int16x4_t); +int64x1_t __builtin_mpl_vector_pairwise_adalp_v2i32(int64x1_t, int32x2_t); +uint16x4_t __builtin_mpl_vector_pairwise_adalp_v8u8(uint16x4_t, uint8x8_t); +uint32x2_t __builtin_mpl_vector_pairwise_adalp_v4u16(uint32x2_t, uint16x4_t); +uint64x1_t __builtin_mpl_vector_pairwise_adalp_v2u32(uint64x1_t, uint32x2_t); +int16x8_t __builtin_mpl_vector_pairwise_adalp_v16i8(int16x8_t, int8x16_t); +int32x4_t __builtin_mpl_vector_pairwise_adalp_v8i16(int32x4_t, int16x8_t); +int64x2_t __builtin_mpl_vector_pairwise_adalp_v4i32(int64x2_t, int32x4_t); +uint16x8_t __builtin_mpl_vector_pairwise_adalp_v16u8(uint16x8_t, uint8x16_t); +uint32x4_t __builtin_mpl_vector_pairwise_adalp_v8u16(uint32x4_t, uint16x8_t); +uint64x2_t __builtin_mpl_vector_pairwise_adalp_v4u32(uint64x2_t, uint32x4_t); + +// vecTy2 vector_pairwise_add(vecTy1 src) +// Add pairs of elements from the source vector and put the result into the +// destination vector, whose element size is twice and the number of +// elements is half of the source vector type. +int64x2_t __builtin_mpl_vector_pairwise_add_v4i32(int32x4_t); +int32x4_t __builtin_mpl_vector_pairwise_add_v8i16(int16x8_t); +int16x8_t __builtin_mpl_vector_pairwise_add_v16i8(int8x16_t); +uint64x2_t __builtin_mpl_vector_pairwise_add_v4u32(uint32x4_t); +uint32x4_t __builtin_mpl_vector_pairwise_add_v8u16(uint16x8_t); +uint16x8_t __builtin_mpl_vector_pairwise_add_v16u8(uint8x16_t); +int64x1_t __builtin_mpl_vector_pairwise_add_v2i32(int32x2_t); +int32x2_t __builtin_mpl_vector_pairwise_add_v4i16(int16x4_t); +int16x4_t __builtin_mpl_vector_pairwise_add_v8i8(int8x8_t); +uint64x1_t __builtin_mpl_vector_pairwise_add_v2u32(uint32x2_t); +uint32x2_t __builtin_mpl_vector_pairwise_add_v4u16(uint16x4_t); +uint16x4_t __builtin_mpl_vector_pairwise_add_v8u8(uint8x8_t); + +// vecTy vector_reverse(vecTy src) +// Create a vector by reversing the order of the elements in src. +int64x2_t __builtin_mpl_vector_reverse_v2i64(int64x2_t); +int32x4_t __builtin_mpl_vector_reverse_v4i32(int32x4_t); +int16x8_t __builtin_mpl_vector_reverse_v8i16(int16x8_t); +int8x16_t __builtin_mpl_vector_reverse_v16i8(int8x16_t); +uint64x2_t __builtin_mpl_vector_reverse_v2u64(uint64x2_t); +uint32x4_t __builtin_mpl_vector_reverse_v4u32(uint32x4_t); +uint16x8_t __builtin_mpl_vector_reverse_v8u16(uint16x8_t); +uint8x16_t __builtin_mpl_vector_reverse_v16u8(uint8x16_t); +float64x2_t __builtin_mpl_vector_reverse_v2f64(float64x2_t); +float32x4_t __builtin_mpl_vector_reverse_v4f32(float32x4_t); +int64x1_t __builtin_mpl_vector_reverse_v1i64(int64x1_t); +int32x2_t __builtin_mpl_vector_reverse_v2i32(int32x2_t); +int16x4_t __builtin_mpl_vector_reverse_v4i16(int16x4_t); +int8x8_t __builtin_mpl_vector_reverse_v8i8(int8x8_t); +uint64x1_t __builtin_mpl_vector_reverse_v1u64(uint64x1_t); +uint32x2_t __builtin_mpl_vector_reverse_v2u32(uint32x2_t); +uint16x4_t __builtin_mpl_vector_reverse_v4u16(uint16x4_t); +uint8x8_t __builtin_mpl_vector_reverse_v8u8(uint8x8_t); +float64x1_t __builtin_mpl_vector_reverse_v1f64(float64x1_t); +float32x2_t __builtin_mpl_vector_reverse_v2f32(float32x2_t); + +// vecTy vector_shli(vecTy src, const int n) +// Shift each element in the vector left by n. +int64x2_t __builtin_mpl_vector_shli_v2i64(int64x2_t, const int); +int32x4_t __builtin_mpl_vector_shli_v4i32(int32x4_t, const int); +int16x8_t __builtin_mpl_vector_shli_v8i16(int16x8_t, const int); +int8x16_t __builtin_mpl_vector_shli_v16i8(int8x16_t, const int); +uint64x2_t __builtin_mpl_vector_shli_v2u64(uint64x2_t, const int); +uint32x4_t __builtin_mpl_vector_shli_v4u32(uint32x4_t, const int); +uint16x8_t __builtin_mpl_vector_shli_v8u16(uint16x8_t, const int); +uint8x16_t __builtin_mpl_vector_shli_v16u8(uint8x16_t, const int); +int64x1_t __builtin_mpl_vector_shli_v1i64(int64x1_t, const int); +int32x2_t __builtin_mpl_vector_shli_v2i32(int32x2_t, const int); +int16x4_t __builtin_mpl_vector_shli_v4i16(int16x4_t, const int); +int8x8_t __builtin_mpl_vector_shli_v8i8(int8x8_t, const int); +uint64x1_t __builtin_mpl_vector_shli_v1u64(uint64x1_t, const int); +uint32x2_t __builtin_mpl_vector_shli_v2u32(uint32x2_t, const int); +uint16x4_t __builtin_mpl_vector_shli_v4u16(uint16x4_t, const int); +uint8x8_t __builtin_mpl_vector_shli_v8u8(uint8x8_t, const int); + +// vecTy vector_shri(vecTy src, const int n) +// Shift each element in the vector right by n. +int64x2_t __builtin_mpl_vector_shri_v2i64(int64x2_t, const int); +int32x4_t __builtin_mpl_vector_shri_v4i32(int32x4_t, const int); +int16x8_t __builtin_mpl_vector_shri_v8i16(int16x8_t, const int); +int8x16_t __builtin_mpl_vector_shri_v16i8(int8x16_t, const int); +uint64x2_t __builtin_mpl_vector_shru_v2u64(uint64x2_t, const int); +uint32x4_t __builtin_mpl_vector_shru_v4u32(uint32x4_t, const int); +uint16x8_t __builtin_mpl_vector_shru_v8u16(uint16x8_t, const int); +uint8x16_t __builtin_mpl_vector_shru_v16u8(uint8x16_t, const int); +int64x1_t __builtin_mpl_vector_shri_v1i64(int64x1_t, const int); +int32x2_t __builtin_mpl_vector_shri_v2i32(int32x2_t, const int); +int16x4_t __builtin_mpl_vector_shri_v4i16(int16x4_t, const int); +int8x8_t __builtin_mpl_vector_shri_v8i8(int8x8_t, const int); +uint64x1_t __builtin_mpl_vector_shru_v1u64(uint64x1_t, const int); +uint32x2_t __builtin_mpl_vector_shru_v2u32(uint32x2_t, const int); +uint16x4_t __builtin_mpl_vector_shru_v4u16(uint16x4_t, const int); +uint8x8_t __builtin_mpl_vector_shru_v8u8(uint8x8_t, const int); + +// vecTy2 vector_shift_narrow_low(vecTy1 src, const int n) +// Shift each element in the vector right by n, narrow each element to half +// of the original width (truncating), then write the result to the lower +// half of the destination vector. +int32x2_t __builtin_mpl_vector_shr_narrow_low_v2i64(int64x2_t, const int); +int16x4_t __builtin_mpl_vector_shr_narrow_low_v4i32(int32x4_t, const int); +int8x8_t __builtin_mpl_vector_shr_narrow_low_v8i16(int16x8_t, const int); +uint32x2_t __builtin_mpl_vector_shr_narrow_low_v2u64(uint64x2_t, const int); +uint16x4_t __builtin_mpl_vector_shr_narrow_low_v4u32(uint32x4_t, const int); +uint8x8_t __builtin_mpl_vector_shr_narrow_low_v8u16(uint16x8_t, const int); + +// scalarTy vector_sum(vecTy src) +// Sum all of the elements in the vector into a scalar. +int64_t __builtin_mpl_vector_sum_v2i64(int64x2_t); +int32_t __builtin_mpl_vector_sum_v4i32(int32x4_t); +int16_t __builtin_mpl_vector_sum_v8i16(int16x8_t); +int8_t __builtin_mpl_vector_sum_v16i8(int8x16_t); +uint64_t __builtin_mpl_vector_sum_v2u64(uint64x2_t); +uint32_t __builtin_mpl_vector_sum_v4u32(uint32x4_t); +uint16_t __builtin_mpl_vector_sum_v8u16(uint16x8_t); +uint8_t __builtin_mpl_vector_sum_v16u8(uint8x16_t); +float64_t __builtin_mpl_vector_sum_v2f64(float64x2_t); +float32_t __builtin_mpl_vector_sum_v4f32(float32x4_t); +int32_t __builtin_mpl_vector_sum_v2i32(int32x2_t); +int16_t __builtin_mpl_vector_sum_v4i16(int16x4_t); +int8_t __builtin_mpl_vector_sum_v8i8(int8x8_t); +uint32_t __builtin_mpl_vector_sum_v2u32(uint32x2_t); +uint16_t __builtin_mpl_vector_sum_v4u16(uint16x4_t); +uint8_t __builtin_mpl_vector_sum_v8u8(uint8x8_t); +float32_t __builtin_mpl_vector_sum_v2f32(float32x2_t); + +// vecTy table_lookup(vecTy tbl, vecTy idx) +// Performs a table vector lookup. +int64x2_t __builtin_mpl_vector_table_lookup_v2i64(int64x2_t, int64x2_t); +int32x4_t __builtin_mpl_vector_table_lookup_v4i32(int32x4_t, int32x4_t); +int16x8_t __builtin_mpl_vector_table_lookup_v8i16(int16x8_t, int16x8_t); +int8x16_t __builtin_mpl_vector_table_lookup_v16i8(int8x16_t, int8x16_t); +uint64x2_t __builtin_mpl_vector_table_lookup_v2u64(uint64x2_t, uint64x2_t); +uint32x4_t __builtin_mpl_vector_table_lookup_v4u32(uint32x4_t, uint32x4_t); +uint16x8_t __builtin_mpl_vector_table_lookup_v8u16(uint16x8_t, uint16x8_t); +uint8x16_t __builtin_mpl_vector_table_lookup_v16u8(uint8x16_t, uint8x16_t); +float64x2_t __builtin_mpl_vector_table_lookup_v2f64(float64x2_t, float64x2_t); +float32x4_t __builtin_mpl_vector_table_lookup_v4f32(float32x4_t, float32x4_t); +int64x1_t __builtin_mpl_vector_table_lookup_v1i64(int64x1_t, int64x1_t); +int32x2_t __builtin_mpl_vector_table_lookup_v2i32(int32x2_t, int32x2_t); +int16x4_t __builtin_mpl_vector_table_lookup_v4i16(int16x4_t, int16x4_t); +int8x8_t __builtin_mpl_vector_table_lookup_v8i8(int8x8_t, int8x8_t); +uint64x1_t __builtin_mpl_vector_table_lookup_v1u64(uint64x1_t, uint64x1_t); +uint32x2_t __builtin_mpl_vector_table_lookup_v2u32(uint32x2_t, uint32x2_t); +uint16x4_t __builtin_mpl_vector_table_lookup_v4u16(uint16x4_t, uint16x4_t); +uint8x8_t __builtin_mpl_vector_table_lookup_v8u8(uint8x8_t, uint8x8_t); +float64x1_t __builtin_mpl_vector_table_lookup_v1f64(float64x1_t, float64x1_t); +float32x2_t __builtin_mpl_vector_table_lookup_v2f32(float32x2_t, float32x2_t); + +// vecTy2 vector_widen_low(vecTy1 src) +// Widen each element of the source vector to half of the original width, +// writing the lower half into the destination vector. +int64x2_t __builtin_mpl_vector_widen_low_v2i32(int32x2_t); +int32x4_t __builtin_mpl_vector_widen_low_v4i16(int16x4_t); +int16x8_t __builtin_mpl_vector_widen_low_v8i8(int8x8_t); +uint64x2_t __builtin_mpl_vector_widen_low_v2u32(uint32x2_t); +uint32x4_t __builtin_mpl_vector_widen_low_v4u16(uint16x4_t); +uint16x8_t __builtin_mpl_vector_widen_low_v8u8(uint8x8_t); + +// vecTy2 vector_widen_high(vecTy1 src) +// Widen each element of the source vector to half of the original width, +// writing the higher half into the destination vector. +int64x2_t __builtin_mpl_vector_widen_high_v2i32(int32x4_t); +int32x4_t __builtin_mpl_vector_widen_high_v4i16(int16x8_t); +int16x8_t __builtin_mpl_vector_widen_high_v8i8(int8x16_t); +uint64x2_t __builtin_mpl_vector_widen_high_v2u32(uint32x4_t); +uint32x4_t __builtin_mpl_vector_widen_high_v4u16(uint16x8_t); +uint16x8_t __builtin_mpl_vector_widen_high_v8u8(uint8x16_t); + +// vecArrTy vector_zip(vecTy a, vecTy b) +// Interleave the upper half of elements from a and b into the destination +// vector. +int32x2x2_t __builtin_mpl_vector_zip_v2i32(int32x2_t, int32x2_t); +int16x4x2_t __builtin_mpl_vector_zip_v4i16(int16x4_t, int16x4_t); +int8x8x2_t __builtin_mpl_vector_zip_v8i8(int8x8_t, int8x8_t); +uint32x2x2_t __builtin_mpl_vector_zip_v2u32(uint32x2_t, uint32x2_t); +uint16x4x2_t __builtin_mpl_vector_zip_v4u16(uint16x4_t, uint16x4_t); +uint8x8x2_t __builtin_mpl_vector_zip_v8u8(uint8x8_t, uint8x8_t); +float32x2x2_t __builtin_mpl_vector_zip_v2f32(float32x2_t, float32x2_t); + +// vecTy vector_load(scalarTy *ptr) +// Load the elements pointed to by ptr into a vector. +int64x2_t __builtin_mpl_vector_load_v2i64(int64_t *); +int32x4_t __builtin_mpl_vector_load_v4i32(int32_t *); +int16x8_t __builtin_mpl_vector_load_v8i16(int16_t *); +int8x16_t __builtin_mpl_vector_load_v16i8(int8_t *); +uint64x2_t __builtin_mpl_vector_load_v2u64(uint64_t *); +uint32x4_t __builtin_mpl_vector_load_v4u32(uint32_t *); +uint16x8_t __builtin_mpl_vector_load_v8u16(uint16_t *); +uint8x16_t __builtin_mpl_vector_load_v16u8(uint8_t *); +float64x2_t __builtin_mpl_vector_load_v2f64(float64_t *); +float32x4_t __builtin_mpl_vector_load_v4f32(float32_t *); +int64x1_t __builtin_mpl_vector_load_v1i64(int64_t *); +int32x2_t __builtin_mpl_vector_load_v2i32(int32_t *); +int16x4_t __builtin_mpl_vector_load_v4i16(int16_t *); +int8x8_t __builtin_mpl_vector_load_v8i8(int8_t *); +uint64x1_t __builtin_mpl_vector_load_v1u64(uint64_t *); +uint32x2_t __builtin_mpl_vector_load_v2u32(uint32_t *); +uint16x4_t __builtin_mpl_vector_load_v4u16(uint16_t *); +uint8x8_t __builtin_mpl_vector_load_v8u8(uint8_t *); +float64x1_t __builtin_mpl_vector_load_v1f64(float64_t *); +float32x2_t __builtin_mpl_vector_load_v2f32(float32_t *); + +// void vector_store(scalarTy *ptr, vecTy src) +// Store the elements from src into the memory pointed to by ptr. +void __builtin_mpl_vector_store_v2i64(int64_t *, int64x2_t); +void __builtin_mpl_vector_store_v4i32(int32_t *, int32x4_t); +void __builtin_mpl_vector_store_v8i16(int16_t *, int16x8_t); +void __builtin_mpl_vector_store_v16i8(int8_t *, int8x16_t); +void __builtin_mpl_vector_store_v2u64(uint64_t *, uint64x2_t); +void __builtin_mpl_vector_store_v4u32(uint32_t *, uint32x4_t); +void __builtin_mpl_vector_store_v8u16(uint16_t *, uint16x8_t); +void __builtin_mpl_vector_store_v16u8(uint8_t *, uint8x16_t); +void __builtin_mpl_vector_store_v2f64(float64_t *, float64x2_t); +void __builtin_mpl_vector_store_v4f32(float32_t *, float32x4_t); +void __builtin_mpl_vector_store_v1i64(int64_t *, int64x1_t); +void __builtin_mpl_vector_store_v2i32(int32_t *, int32x2_t); +void __builtin_mpl_vector_store_v4i16(int16_t *, int16x4_t); +void __builtin_mpl_vector_store_v8i8(int8_t *, int8x8_t); +void __builtin_mpl_vector_store_v1u64(uint64_t *, uint64x1_t); +void __builtin_mpl_vector_store_v2u32(uint32_t *, uint32x2_t); +void __builtin_mpl_vector_store_v4u16(uint16_t *, uint16x4_t); +void __builtin_mpl_vector_store_v8u8(uint8_t *, uint8x8_t); +void __builtin_mpl_vector_store_v1f64(float64_t *, float64x1_t); +void __builtin_mpl_vector_store_v2f32(float32_t *, float32x2_t); + +// vecTy vector_subl_low(vecTy src1, vecTy src2) +// Subtract each element of the source vector to second source +// widen the result into the destination vector. +int16x8_t __builtin_mpl_vector_subl_low_v8i8(int8x8_t, int8x8_t); +int32x4_t __builtin_mpl_vector_subl_low_v4i16(int16x4_t, int16x4_t); +int64x2_t __builtin_mpl_vector_subl_low_v2i32(int32x2_t, int32x2_t); +uint16x8_t __builtin_mpl_vector_subl_low_v8u8(uint8x8_t, uint8x8_t); +uint32x4_t __builtin_mpl_vector_subl_low_v4u16(uint16x4_t, uint16x4_t); +uint64x2_t __builtin_mpl_vector_subl_low_v2u32(uint32x2_t, uint32x2_t); + +// vecTy vector_subl_high(vecTy src1, vecTy src2) +// Subtract each element of the source vector to upper half of second source +// widen the result into the destination vector. +int16x8_t __builtin_mpl_vector_subl_high_v8i8(int8x16_t, int8x16_t); +int32x4_t __builtin_mpl_vector_subl_high_v4i16(int16x8_t, int16x8_t); +int64x2_t __builtin_mpl_vector_subl_high_v2i32(int32x4_t, int32x4_t); +uint16x8_t __builtin_mpl_vector_subl_high_v8u8(uint8x16_t, uint8x16_t); +uint32x4_t __builtin_mpl_vector_subl_high_v4u16(uint16x8_t, uint16x8_t); +uint64x2_t __builtin_mpl_vector_subl_high_v2u32(uint32x4_t, uint32x4_t); + +// vecTy vector_subw_low(vecTy src1, vecTy src2) +// Subtract each element of the source vector to second source +// widen the result into the destination vector. +int16x8_t __builtin_mpl_vector_subw_low_v8i8(int16x8_t, int8x8_t); +int32x4_t __builtin_mpl_vector_subw_low_v4i16(int32x4_t, int16x4_t); +int64x2_t __builtin_mpl_vector_subw_low_v2i32(int64x2_t, int32x2_t); +uint16x8_t __builtin_mpl_vector_subw_low_v8u8(uint16x8_t, uint8x8_t); +uint32x4_t __builtin_mpl_vector_subw_low_v4u16(uint32x4_t, uint16x4_t); +uint64x2_t __builtin_mpl_vector_subw_low_v2u32(uint64x2_t, uint32x2_t); + +// vecTy vector_subw_high(vecTy src1, vecTy src2) +// Subtract each element of the source vector to upper half of second source +// widen the result into the destination vector. +int16x8_t __builtin_mpl_vector_subw_high_v8i8(int16x8_t, int8x16_t); +int32x4_t __builtin_mpl_vector_subw_high_v4i16(int32x4_t, int16x8_t); +int64x2_t __builtin_mpl_vector_subw_high_v2i32(int64x2_t, int32x4_t); +uint16x8_t __builtin_mpl_vector_subw_high_v8u8(uint16x8_t, uint8x16_t); +uint32x4_t __builtin_mpl_vector_subw_high_v4u16(uint32x4_t, uint16x8_t); +uint64x2_t __builtin_mpl_vector_subw_high_v2u32(uint64x2_t, uint32x4_t); + +// ************************* +// Supported Neon Intrinsics +// ************************* + +// vabdl +#define vabdl_s8(a, b) __builtin_mpl_vector_labssub_low_v8i8(a, b) +#define vabdl_s16(a, b) __builtin_mpl_vector_labssub_low_v4i16(a, b) +#define vabdl_s32(a, b) __builtin_mpl_vector_labssub_low_v2i32(a, b) +#define vabdl_u8(a, b) __builtin_mpl_vector_labssub_low_v8u8(a, b) +#define vabdl_u16(a, b) __builtin_mpl_vector_labssub_low_v4u16(a, b) +#define vabdl_u32(a, b) __builtin_mpl_vector_labssub_low_v2u32(a, b) + +// vabdl_high +#define vabdl_high_s8(a, b) __builtin_mpl_vector_labssub_high_v8i8(a, b) +#define vabdl_high_s16(a, b) __builtin_mpl_vector_labssub_high_v4i16(a, b) +#define vabdl_high_s32(a, b) __builtin_mpl_vector_labssub_high_v2i32(a, b) +#define vabdl_high_u8(a, b) __builtin_mpl_vector_labssub_high_v8u8(a, b) +#define vabdl_high_u16(a, b) __builtin_mpl_vector_labssub_high_v4u16(a, b) +#define vabdl_high_u32(a, b) __builtin_mpl_vector_labssub_high_v2u32(a, b) + +// vabs +#define vabs_s8(a) __builtin_mpl_vector_abs_v8i8(a) +#define vabs_s16(a) __builtin_mpl_vector_abs_v4i16(a) +#define vabs_s32(a) __builtin_mpl_vector_abs_v2i32(a) +#define vabs_s64(a) __builtin_mpl_vector_abs_v1i64(a) +#define vabs_f32(a) __builtin_mpl_vector_abs_v2f32(a) +#define vabs_f64(a) __builtin_mpl_vector_abs_v1f64(a) +#define vabsq_s8(a) __builtin_mpl_vector_abs_v16i8(a) +#define vabsq_s16(a) __builtin_mpl_vector_abs_v8i16(a) +#define vabsq_s32(a) __builtin_mpl_vector_abs_v4i32(a) +#define vabsq_s64(a) __builtin_mpl_vector_abs_v2i64(a) +#define vabsq_f32(a) __builtin_mpl_vector_abs_v4f32(a) +#define vabsq_f64(a) __builtin_mpl_vector_abs_v2f64(a) + +// vaddv +#define vaddv_s8(a) __builtin_mpl_vector_sum_v8i8(a) +#define vaddv_s16(a) __builtin_mpl_vector_sum_v4i16(a) +#define vaddv_s32(a) __builtin_mpl_vector_sum_v2i32(a) +#define vaddv_u8 (a) __builtin_mpl_vector_sum_v8u8(a) +#define vaddv_u16(a) __builtin_mpl_vector_sum_v4u16(a) +#define vaddv_u32(a) __builtin_mpl_vector_sum_v2u32(a) +#define vaddv_f32(a) __builtin_mpl_vector_sum_v2f32(a) +#define vaddvq_s8(a) __builtin_mpl_vector_sum_v16i8(a) +#define vaddvq_s16(a) __builtin_mpl_vector_sum_v8i16(a) +#define vaddvq_s32(a) __builtin_mpl_vector_sum_v4i32(a) +#define vaddvq_s64(a) __builtin_mpl_vector_sum_v2i64(a) +#define vaddvq_u8(a) __builtin_mpl_vector_sum_v16u8(a) +#define vaddvq_u16(a) __builtin_mpl_vector_sum_v8u16(a) +#define vaddvq_u32(a) __builtin_mpl_vector_sum_v4u32(a) +#define vaddvq_u64(a) __builtin_mpl_vector_sum_v2u64(a) +#define vaddvq_f32(a) __builtin_mpl_vector_sum_v4f32(a) +#define vaddvq_f64(a) __builtin_mpl_vector_sum_v2f64(a) + +// vqmovn +#define vqmovn_u16(a) __builtin_mpl_vector_mov_narrow_v8u16(a) +#define vqmovn_u32(a) __builtin_mpl_vector_mov_narrow_v4u32(a) +#define vqmovn_u64(a) __builtin_mpl_vector_mov_narrow_v2u64(a) +#define vqmovn_s16(a) __builtin_mpl_vector_mov_narrow_v8i16(a) +#define vqmovn_s32(a) __builtin_mpl_vector_mov_narrow_v4i32(a) +#define vqmovn_s64(a) __builtin_mpl_vector_mov_narrow_v2i64(a) + +// vaddl +#define vaddl_s8(a, b) __builtin_mpl_vector_addl_low_v8i8(a, b) +#define vaddl_s16(a, b) __builtin_mpl_vector_addl_low_v4i16(a, b) +#define vaddl_s32(a, b) __builtin_mpl_vector_addl_low_v2i32(a, b) +#define vaddl_u8(a, b) __builtin_mpl_vector_addl_low_v8u8(a, b) +#define vaddl_u16(a, b) __builtin_mpl_vector_addl_low_v4u16(a, b) +#define vaddl_u32(a, b) __builtin_mpl_vector_addl_low_v2u32(a, b) + +// vaddl_high +#define vaddl_high_s8(a, b) __builtin_mpl_vector_addl_high_v8i8(a, b) +#define vaddl_high_s16(a, b) __builtin_mpl_vector_addl_high_v4i16(a, b) +#define vaddl_high_s32(a, b) __builtin_mpl_vector_addl_high_v2i32(a, b) +#define vaddl_high_u8(a, b) __builtin_mpl_vector_addl_high_v8u8(a, b) +#define vaddl_high_u16(a, b) __builtin_mpl_vector_addl_high_v4u16(a, b) +#define vaddl_high_u32(a, b) __builtin_mpl_vector_addl_high_v2u32(a, b) + +// vaddw +#define vaddw_s8(a, b) __builtin_mpl_vector_addw_low_v8i8(a, b) +#define vaddw_s16(a, b) __builtin_mpl_vector_addw_low_v4i16(a, b) +#define vaddw_s32(a, b) __builtin_mpl_vector_addw_low_v2i32(a, b) +#define vaddw_u8(a, b) __builtin_mpl_vector_addw_low_v8u8(a, b) +#define vaddw_u16(a, b) __builtin_mpl_vector_addw_low_v4u16(a, b) +#define vaddw_u32(a, b) __builtin_mpl_vector_addw_low_v2u32(a, b) + +// vaddw_high +#define vaddw_high_s8(a, b) __builtin_mpl_vector_addw_high_v8i8(a, b) +#define vaddw_high_s16(a, b) __builtin_mpl_vector_addw_high_v4i16(a, b) +#define vaddw_high_s32(a, b) __builtin_mpl_vector_addw_high_v2i32(a, b) +#define vaddw_high_u8(a, b) __builtin_mpl_vector_addw_high_v8u8(a, b) +#define vaddw_high_u16(a, b) __builtin_mpl_vector_addw_high_v4u16(a, b) +#define vaddw_high_u32(a, b) __builtin_mpl_vector_addw_high_v2u32(a, b) + +// vadd +#define vadd_s8(a, b) (a + b) +#define vadd_s16(a, b) (a + b) +#define vadd_s32(a, b) (a + b) +#define vadd_s64(a, b) (a + b) +#define vadd_u8(a, b) (a + b) +#define vadd_u16(a, b) (a + b) +#define vadd_u32(a, b) (a + b) +#define vadd_u64(a, b) (a + b) +#define vadd_f16(a, b) (a + b) +#define vadd_f32(a, b) (a + b) +#define vadd_f64(a, b) (a + b) +#define vaddq_s8(a, b) (a + b) +#define vaddq_s16(a, b) (a + b) +#define vaddq_s32(a, b) (a + b) +#define vaddq_s64(a, b) (a + b) +#define vaddq_u8(a, b) (a + b) +#define vaddq_u16(a, b) (a + b) +#define vaddq_u32(a, b) (a + b) +#define vaddq_u64(a, b) (a + b) +#define vaddq_f16(a, b) (a + b) +#define vaddq_f32(a, b) (a + b) +#define vaddq_f64(a, b) (a + b) + +// vand +#define vand_s8(a, b) (a & b) +#define vand_s16(a, b) (a & b) +#define vand_s32(a, b) (a & b) +#define vand_s64(a, b) (a & b) +#define vand_u8(a, b) (a & b) +#define vand_u16(a, b) (a & b) +#define vand_u32(a, b) (a & b) +#define vand_u64(a, b) (a & b) +#define vandq_s8(a, b) (a & b) +#define vandq_s16(a, b) (a & b) +#define vandq_s32(a, b) (a & b) +#define vandq_s64(a, b) (a & b) +#define vandq_u8(a, b) (a & b) +#define vandq_u16(a, b) (a & b) +#define vandq_u32(a, b) (a & b) +#define vandq_u64(a, b) (a & b) + +// vand +#define vorr_s8(a, b) (a | b) +#define vorr_s16(a, b) (a | b) +#define vorr_s32(a, b) (a | b) +#define vorr_s64(a, b) (a | b) +#define vorr_u8(a, b) (a | b) +#define vorr_u16(a, b) (a | b) +#define vorr_u32(a, b) (a | b) +#define vorr_u64(a, b) (a | b) +#define vorrq_s8(a, b) (a | b) +#define vorrq_s16(a, b) (a | b) +#define vorrq_s32(a, b) (a | b) +#define vorrq_s64(a, b) (a | b) +#define vorrq_u8(a, b) (a | b) +#define vorrq_u16(a, b) (a | b) +#define vorrq_u32(a, b) (a | b) +#define vorrq_u64(a, b) (a | b) + +// vdup +#define vdup_n_s8(a) __builtin_mpl_vector_from_scalar_v8i8(a) +#define vdup_n_s16(a) __builtin_mpl_vector_from_scalar_v4i16(a) +#define vdup_n_s32(a) __builtin_mpl_vector_from_scalar_v2i32(a) +#define vdup_n_s64(a) __builtin_mpl_vector_from_scalar_v1i64(a) +#define vdup_n_u8(a) __builtin_mpl_vector_from_scalar_v8u8(a) +#define vdup_n_u16(a) __builtin_mpl_vector_from_scalar_v4u16(a) +#define vdup_n_u32(a) __builtin_mpl_vector_from_scalar_v2u32(a) +#define vdup_n_u64(a) __builtin_mpl_vector_from_scalar_v1u64(a) +#define vdup_n_f16(a) __builtin_mpl_vector_from_scalar_v4f16(a) +#define vdup_n_f32(a) __builtin_mpl_vector_from_scalar_v2f32(a) +#define vdup_n_f64(a) __builtin_mpl_vector_from_scalar_v1f64(a) +#define vdupq_n_s8(a) __builtin_mpl_vector_from_scalar_v16i8(a) +#define vdupq_n_s16(a) __builtin_mpl_vector_from_scalar_v8i16(a) +#define vdupq_n_s32(a) __builtin_mpl_vector_from_scalar_v4i32(a) +#define vdupq_n_s64(a) __builtin_mpl_vector_from_scalar_v2i64(a) +#define vdupq_n_u8(a) __builtin_mpl_vector_from_scalar_v16u8(a) +#define vdupq_n_u16(a) __builtin_mpl_vector_from_scalar_v8u16(a) +#define vdupq_n_u32(a) __builtin_mpl_vector_from_scalar_v4u32(a) +#define vdupq_n_u64(a) __builtin_mpl_vector_from_scalar_v2u64(a) +#define vdupq_n_f16(a) __builtin_mpl_vector_from_scalar_v8f16(a) +#define vdupq_n_f32(a) __builtin_mpl_vector_from_scalar_v4f32(a) +#define vdupq_n_f64(a) __builtin_mpl_vector_from_scalar_v2f64(a) + +// vceq +#define vceq_s8(a, b) (a == b) +#define vceq_s16(a, b) (a == b) +#define vceq_s32(a, b) (a == b) +#define vceq_s64(a, b) (a == b) +#define vceq_u8(a, b) (a == b) +#define vceq_u16(a, b) (a == b) +#define vceq_u32(a, b) (a == b) +#define vceq_u64(a, b) (a == b) +#define vceq_f16(a, b) (a == b) +#define vceq_f32(a, b) (a == b) +#define vceq_f64(a, b) (a == b) +#define vceqq_s8(a, b) (a == b) +#define vceqq_s16(a, b) (a == b) +#define vceqq_s32(a, b) (a == b) +#define vceqq_s64(a, b) (a == b) +#define vceqq_u8(a, b) (a == b) +#define vceqq_u16(a, b) (a == b) +#define vceqq_u32(a, b) (a == b) +#define vceqq_u64(a, b) (a == b) +#define vceqq_f16(a, b) (a == b) +#define vceqq_f32(a, b) (a == b) +#define vceqq_f64(a, b) (a == b) + +// vcgt +#define vcgt_s8(a, b) (a > b) +#define vcgt_s16(a, b) (a > b) +#define vcgt_s32(a, b) (a > b) +#define vcgt_s64(a, b) (a > b) +#define vcgt_u8(a, b) (a > b) +#define vcgt_u16(a, b) (a > b) +#define vcgt_u32(a, b) (a > b) +#define vcgt_u64(a, b) (a > b) +#define vcgt_f16(a, b) (a > b) +#define vcgt_f32(a, b) (a > b) +#define vcgt_f64(a, b) (a > b) +#define vcgtq_s8(a, b) (a > b) +#define vcgtq_s16(a, b) (a > b) +#define vcgtq_s32(a, b) (a > b) +#define vcgtq_s64(a, b) (a > b) +#define vcgtq_u8(a, b) (a > b) +#define vcgtq_u16(a, b) (a > b) +#define vcgtq_u32(a, b) (a > b) +#define vcgtq_u64(a, b) (a > b) +#define vcgtq_f16(a, b) (a > b) +#define vcgtq_f32(a, b) (a > b) +#define vcgtq_f64(a, b) (a > b) + +// vcge +#define vcge_s8(a, b) (a >= b) +#define vcge_s16(a, b) (a >= b) +#define vcge_s32(a, b) (a >= b) +#define vcge_s64(a, b) (a >= b) +#define vcge_u8(a, b) (a >= b) +#define vcge_u16(a, b) (a >= b) +#define vcge_u32(a, b) (a >= b) +#define vcge_u64(a, b) (a >= b) +#define vcge_f16(a, b) (a >= b) +#define vcge_f32(a, b) (a >= b) +#define vcge_f64(a, b) (a >= b) +#define vcgeq_s8(a, b) (a >= b) +#define vcgeq_s16(a, b) (a >= b) +#define vcgeq_s32(a, b) (a >= b) +#define vcgeq_s64(a, b) (a >= b) +#define vcgeq_u8(a, b) (a >= b) +#define vcgeq_u16(a, b) (a >= b) +#define vcgeq_u32(a, b) (a >= b) +#define vcgeq_u64(a, b) (a >= b) +#define vcgeq_f16(a, b) (a >= b) +#define vcgeq_f32(a, b) (a >= b) +#define vcgeq_f64(a, b) (a >= b) + +// vclt +#define vclt_s8(a, b) (a < b) +#define vclt_s16(a, b) (a < b) +#define vclt_s32(a, b) (a < b) +#define vclt_s64(a, b) (a < b) +#define vclt_u8(a, b) (a < b) +#define vclt_u16(a, b) (a < b) +#define vclt_u32(a, b) (a < b) +#define vclt_u64(a, b) (a < b) +#define vclt_f16(a, b) (a < b) +#define vclt_f32(a, b) (a < b) +#define vclt_f64(a, b) (a < b) +#define vcltq_s8(a, b) (a < b) +#define vcltq_s16(a, b) (a < b) +#define vcltq_s32(a, b) (a < b) +#define vcltq_s64(a, b) (a < b) +#define vcltq_u8(a, b) (a < b) +#define vcltq_u16(a, b) (a < b) +#define vcltq_u32(a, b) (a < b) +#define vcltq_u64(a, b) (a < b) +#define vcltq_f16(a, b) (a < b) +#define vcltq_f32(a, b) (a < b) +#define vcltq_f64(a, b) (a < b) + +// vcle +#define vcle_s8(a, b) (a <= b) +#define vcle_s16(a, b) (a <= b) +#define vcle_s32(a, b) (a <= b) +#define vcle_s64(a, b) (a <= b) +#define vcle_u8(a, b) (a <= b) +#define vcle_u16(a, b) (a <= b) +#define vcle_u32(a, b) (a <= b) +#define vcle_u64(a, b) (a <= b) +#define vcle_f16(a, b) (a <= b) +#define vcle_f32(a, b) (a <= b) +#define vcle_f64(a, b) (a <= b) +#define vcleq_s8(a, b) (a <= b) +#define vcleq_s16(a, b) (a <= b) +#define vcleq_s32(a, b) (a <= b) +#define vcleq_s64(a, b) (a <= b) +#define vcleq_u8(a, b) (a <= b) +#define vcleq_u16(a, b) (a <= b) +#define vcleq_u32(a, b) (a <= b) +#define vcleq_u64(a, b) (a <= b) +#define vcleq_f16(a, b) (a <= b) +#define vcleq_f32(a, b) (a <= b) +#define vcleq_f64(a, b) (a <= b) + +// veor +#define veor_s8(a, b) (a ^ b) +#define veor_s16(a, b) (a ^ b) +#define veor_s32(a, b) (a ^ b) +#define veor_s64(a, b) (a ^ b) +#define veor_u8(a, b) (a ^ b) +#define veor_u16(a, b) (a ^ b) +#define veor_u32(a, b) (a ^ b) +#define veor_u64(a, b) (a ^ b) +#define veorq_s8(a, b) (a ^ b) +#define veorq_s16(a, b) (a ^ b) +#define veorq_s32(a, b) (a ^ b) +#define veorq_s64(a, b) (a ^ b) +#define veorq_u8(a, b) (a ^ b) +#define veorq_u16(a, b) (a ^ b) +#define veorq_u32(a, b) (a ^ b) +#define veorq_u64(a, b) (a ^ b) + +// vext +#define vext_s8(a, b, n) __builtin_mpl_vector_merge_v8i8(a, b, n) +#define vext_s16(a, b, n) __builtin_mpl_vector_merge_v4i16(a, b, n) +#define vext_s32(a, b, n) __builtin_mpl_vector_merge_v2i32(a, b, n) +#define vext_s64(a, b, n) __builtin_mpl_vector_merge_v1i64(a, b, n) +#define vext_u8(a, b, n) __builtin_mpl_vector_merge_v8u8(a, b, n) +#define vext_u16(a, b, n) __builtin_mpl_vector_merge_v4u16(a, b, n) +#define vext_u32(a, b, n) __builtin_mpl_vector_merge_v2u32(a, b, n) +#define vext_u64(a, b, n) __builtin_mpl_vector_merge_v1u64(a, b, n) +#define vext_f16(a, b, n) __builtin_mpl_vector_merge_v4f16(a, b, n) +#define vext_f32(a, b, n) __builtin_mpl_vector_merge_v2f32(a, b, n) +#define vext_f64(a, b, n) __builtin_mpl_vector_merge_v1f64(a, b, n) +#define vextq_s8(a, b, n) __builtin_mpl_vector_merge_v16i8(a, b, n) +#define vextq_s16(a, b, n) __builtin_mpl_vector_merge_v8i16(a, b, n) +#define vextq_s32(a, b, n) __builtin_mpl_vector_merge_v4i32(a, b, n) +#define vextq_s64(a, b, n) __builtin_mpl_vector_merge_v2i64(a, b, n) +#define vextq_u8(a, b, n) __builtin_mpl_vector_merge_v16u8(a, b, n) +#define vextq_u16(a, b, n) __builtin_mpl_vector_merge_v8u16(a, b, n) +#define vextq_u32(a, b, n) __builtin_mpl_vector_merge_v4u32(a, b, n) +#define vextq_u64(a, b, n) __builtin_mpl_vector_merge_v2u64(a, b, n) +#define vextq_f16(a, b, n) __builtin_mpl_vector_merge_v8f16(a, b, n) +#define vextq_f32(a, b, n) __builtin_mpl_vector_merge_v4f32(a, b, n) +#define vextq_f64(a, b, n) __builtin_mpl_vector_merge_v2f64(a, b, n) + +// vget_high +#define vget_high_s8(a) __builtin_mpl_vector_get_high_v16i8(a) +#define vget_high_s16(a) __builtin_mpl_vector_get_high_v8i16(a) +#define vget_high_s32(a) __builtin_mpl_vector_get_high_v4i32(a) +#define vget_high_s64(a) __builtin_mpl_vector_get_high_v2i64(a) +#define vget_high_u8(a) __builtin_mpl_vector_get_high_v16u8(a) +#define vget_high_u16(a) __builtin_mpl_vector_get_high_v8u16(a) +#define vget_high_u32(a) __builtin_mpl_vector_get_high_v4u32(a) +#define vget_high_u64(a) __builtin_mpl_vector_get_high_v2u64(a) +#define vget_high_f16(a) __builtin_mpl_vector_get_high_v4f16(a) +#define vget_high_f32(a) __builtin_mpl_vector_get_high_v2f32(a) +#define vget_high_f64(a) __builtin_mpl_vector_get_high_v1f64(a) + +// vget_lane +#define vget_lane_s8(a, n) __builtin_mpl_vector_get_element_v8i8(a, n) +#define vget_lane_s16(a, n) __builtin_mpl_vector_get_element_v4i16(a, n) +#define vget_lane_s32(a, n) __builtin_mpl_vector_get_element_v2i32(a, n) +#define vget_lane_s64(a, n) __builtin_mpl_vector_get_element_v1i64(a, n) +#define vget_lane_u8(a, n) __builtin_mpl_vector_get_element_v8u8(a, n) +#define vget_lane_u16(a, n) __builtin_mpl_vector_get_element_v4u16(a, n) +#define vget_lane_u32(a, n) __builtin_mpl_vector_get_element_v2u32(a, n) +#define vget_lane_u64(a, n) __builtin_mpl_vector_get_element_v1u64(a, n) +#define vget_lane_f16(a, n) __builtin_mpl_vector_get_element_v4f16(a, n) +#define vget_lane_f32(a, n) __builtin_mpl_vector_get_element_v2f32(a, n) +#define vget_lane_f64(a, n) __builtin_mpl_vector_get_element_v1f64(a, n) +#define vgetq_lane_s8(a, n) __builtin_mpl_vector_get_element_v16i8(a, n) +#define vgetq_lane_s16(a, n) __builtin_mpl_vector_get_element_v8i16(a, n) +#define vgetq_lane_s32(a, n) __builtin_mpl_vector_get_element_v4i32(a, n) +#define vgetq_lane_s64(a, n) __builtin_mpl_vector_get_element_v2i64(a, n) +#define vgetq_lane_u8(a, n) __builtin_mpl_vector_get_element_v16u8(a, n) +#define vgetq_lane_u16(a, n) __builtin_mpl_vector_get_element_v8u16(a, n) +#define vgetq_lane_u32(a, n) __builtin_mpl_vector_get_element_v4u32(a, n) +#define vgetq_lane_u64(a, n) __builtin_mpl_vector_get_element_v2u64(a, n) +#define vgetq_lane_f16(a, n) __builtin_mpl_vector_get_element_v8f16(a, n) +#define vgetq_lane_f32(a, n) __builtin_mpl_vector_get_element_v4f32(a, n) +#define vgetq_lane_f64(a, n) __builtin_mpl_vector_get_element_v2f64(a, n) + +// vget_low +#define vget_low_s8(a) __builtin_mpl_vector_get_low_v16i8(a) +#define vget_low_s16(a) __builtin_mpl_vector_get_low_v8i16(a) +#define vget_low_s32(a) __builtin_mpl_vector_get_low_v4i32(a) +#define vget_low_s64(a) __builtin_mpl_vector_get_low_v2i64(a) +#define vget_low_u8(a) __builtin_mpl_vector_get_low_v16u8(a) +#define vget_low_u16(a) __builtin_mpl_vector_get_low_v8u16(a) +#define vget_low_u32(a) __builtin_mpl_vector_get_low_v4u32(a) +#define vget_low_u64(a) __builtin_mpl_vector_get_low_v2u64(a) +#define vget_low_f16(a) __builtin_mpl_vector_get_low_v4f16(a) +#define vget_low_f32(a) __builtin_mpl_vector_get_low_v2f32(a) +#define vget_low_f64(a) __builtin_mpl_vector_get_low_v1f64(a) + +// vld1 +#define vld1_s8(a) __builtin_mpl_vector_load_v8i8(a) +#define vld1_s16(a) __builtin_mpl_vector_load_v4i16(a) +#define vld1_s32(a) __builtin_mpl_vector_load_v2i32(a) +#define vld1_s64(a) __builtin_mpl_vector_load_v1i64(a) +#define vld1_u8(a) __builtin_mpl_vector_load_v8u8(a) +#define vld1_u16(a) __builtin_mpl_vector_load_v4u16(a) +#define vld1_u32(a) __builtin_mpl_vector_load_v2u32(a) +#define vld1_u64(a) __builtin_mpl_vector_load_v1u64(a) +#define vld1_f16(a) __builtin_mpl_vector_load_v4f16(a) +#define vld1_f32(a) __builtin_mpl_vector_load_v2f32(a) +#define vld1_f64(a) __builtin_mpl_vector_load_v1f64(a) +#define vld1q_s8(a) __builtin_mpl_vector_load_v16i8(a) +#define vld1q_s16(a) __builtin_mpl_vector_load_v8i16(a) +#define vld1q_s32(a) __builtin_mpl_vector_load_v4i32(a) +#define vld1q_s64(a) __builtin_mpl_vector_load_v2i64(a) +#define vld1q_u8(a) __builtin_mpl_vector_load_v16u8(a) +#define vld1q_u16(a) __builtin_mpl_vector_load_v8u16(a) +#define vld1q_u32(a) __builtin_mpl_vector_load_v4u32(a) +#define vld1q_u64(a) __builtin_mpl_vector_load_v2u64(a) +#define vld1q_f16(a) __builtin_mpl_vector_load_v8f16(a) +#define vld1q_f32(a) __builtin_mpl_vector_load_v4f32(a) +#define vld1q_f64(a) __builtin_mpl_vector_load_v2f64(a) + +// vmlal +#define vmlal_s8(acc, a, b) __builtin_mpl_vector_madd_v8i8(acc, a, b) +#define vmlal_s16(acc, a, b) __builtin_mpl_vector_madd_v4i16(acc, a, b) +#define vmlal_s32(acc, a, b) __builtin_mpl_vector_madd_v2i32(acc, a, b) +#define vmlal_u8(acc, a, b) __builtin_mpl_vector_madd_v8u8(acc, a, b) +#define vmlal_u16(acc, a, b) __builtin_mpl_vector_madd_v4u16(acc, a, b) +#define vmlal_u32(acc, a, b) __builtin_mpl_vector_madd_v2u32(acc, a, b) + +// vmovl +#define vmovl_s32(a) __builtin_mpl_vector_widen_low_v2i32(a) +#define vmovl_s16(a) __builtin_mpl_vector_widen_low_v4i16(a) +#define vmovl_s8(a) __builtin_mpl_vector_widen_low_v8i8(a) +#define vmovl_u32(a) __builtin_mpl_vector_widen_low_v2u32(a) +#define vmovl_u16(a) __builtin_mpl_vector_widen_low_v4u16(a) +#define vmovl_u8(a) __builtin_mpl_vector_widen_low_v8u8(a) + +// vmovl_high +#define vmovl_high_s32(a) __builtin_mpl_vector_widen_high_v2i32(a) +#define vmovl_high_s16(a) __builtin_mpl_vector_widen_high_v4i16(a) +#define vmovl_high_s8(a) __builtin_mpl_vector_widen_high_v8i8(a) +#define vmovl_high_u32(a) __builtin_mpl_vector_widen_high_v2u32(a) +#define vmovl_high_u16(a) __builtin_mpl_vector_widen_high_v4u16(a) +#define vmovl_high_u8(a) __builtin_mpl_vector_widen_high_v8u8(a) + +// vmovn +#define vmovn_s64(a) __builtin_mpl_vector_narrow_low_v2i64(a) +#define vmovn_s32(a) __builtin_mpl_vector_narrow_low_v4i32(a) +#define vmovn_s16(a) __builtin_mpl_vector_narrow_low_v8i16(a) +#define vmovn_u64(a) __builtin_mpl_vector_narrow_low_v2u64(a) +#define vmovn_u32(a) __builtin_mpl_vector_narrow_low_v4u32(a) +#define vmovn_u16(a) __builtin_mpl_vector_narrow_low_v8u16(a) + +// vmovn_high +#define vmovn_high_s64(a, b) __builtin_mpl_vector_narrow_high_v2i64(a, b) +#define vmovn_high_s32(a, b) __builtin_mpl_vector_narrow_high_v4i32(a, b) +#define vmovn_high_s16(a, b) __builtin_mpl_vector_narrow_high_v8i16(a, b) +#define vmovn_high_u64(a, b) __builtin_mpl_vector_narrow_high_v2u64(a, b) +#define vmovn_high_u32(a, b) __builtin_mpl_vector_narrow_high_v4u32(a, b) +#define vmovn_high_u16(a, b) __builtin_mpl_vector_narrow_high_v8u16(a, b) + +// vmull +#define vmull_s8(a, b) __builtin_mpl_vector_mull_low_v8i8(a, b) +#define vmull_s16(a, b) __builtin_mpl_vector_mull_low_v4i16(a, b) +#define vmull_s32(a, b) __builtin_mpl_vector_mull_low_v2i32(a, b) +#define vmull_u8(a, b) __builtin_mpl_vector_mull_low_v8u8(a, b) +#define vmull_u16(a, b) __builtin_mpl_vector_mull_low_v4u16(a, b) +#define vmull_u32(a, b) __builtin_mpl_vector_mull_low_v2u32(a, b) + +// vmull_high +#define vmull_high_s8(a, b) __builtin_mpl_vector_mull_high_v8i8(a, b) +#define vmull_high_s16(a, b) __builtin_mpl_vector_mull_high_v4i16(a, b) +#define vmull_high_s32(a, b) __builtin_mpl_vector_mull_high_v2i32(a, b) +#define vmull_high_u8(a, b) __builtin_mpl_vector_mull_high_v8u8(a, b) +#define vmull_high_u16(a, b) __builtin_mpl_vector_mull_high_v4u16(a, b) +#define vmull_high_u32(a, b) __builtin_mpl_vector_mull_high_v2u32(a, b) + +// vor +#define vor_s8(a, b) (a | b) +#define vor_s16(a, b) (a | b) +#define vor_s32(a, b) (a | b) +#define vor_s64(a, b) (a | b) +#define vor_u8(a, b) (a | b) +#define vor_u16(a, b) (a | b) +#define vor_u32(a, b) (a | b) +#define vor_u64(a, b) (a | b) +#define vorq_s8(a, b) (a | b) +#define vorq_s16(a, b) (a | b) +#define vorq_s32(a, b) (a | b) +#define vorq_s64(a, b) (a | b) +#define vorq_u8(a, b) (a | b) +#define vorq_u16(a, b) (a | b) +#define vorq_u32(a, b) (a | b) +#define vorq_u64(a, b) (a | b) + +// vpadal (add and accumulate long pairwise) +#define vpadal_s8(a, b) __builtin_mpl_vector_pairwise_adalp_v8i8(a, b) +#define vpadal_s16(a, b) __builtin_mpl_vector_pairwise_adalp_v4i16(a, b) +#define vpadal_s32(a, b) __builtin_mpl_vector_pairwise_adalp_v2i32(a, b) +#define vpadal_u8(a, b) __builtin_mpl_vector_pairwise_adalp_v8u8(a, b) +#define vpadal_u16(a, b) __builtin_mpl_vector_pairwise_adalp_v4u16(a, b) +#define vpadal_u32(a, b) __builtin_mpl_vector_pairwise_adalp_v2u32(a, b) +#define vpadalq_s8(a, b) __builtin_mpl_vector_pairwise_adalp_v16i8(a, b) +#define vpadalq_s16(a, b) __builtin_mpl_vector_pairwise_adalp_v8i16(a, b) +#define vpadalq_s32(a, b) __builtin_mpl_vector_pairwise_adalp_v4i32(a, b) +#define vpadalq_u8(a, b) __builtin_mpl_vector_pairwise_adalp_v16u8(a, b) +#define vpadalq_u16(a, b) __builtin_mpl_vector_pairwise_adalp_v8u16(a, b) +#define vpadalq_u32(a, b) __builtin_mpl_vector_pairwise_adalp_v4u32(a, b) + +// vpaddl +#define vpaddl_s8(a) __builtin_mpl_vector_pairwise_add_v8i8(a) +#define vpaddl_s16(a) __builtin_mpl_vector_pairwise_add_v4i16(a) +#define vpaddl_s32(a) __builtin_mpl_vector_pairwise_add_v2i32(a) +#define vpaddl_u8(a) __builtin_mpl_vector_pairwise_add_v8u8(a) +#define vpaddl_u16(a) __builtin_mpl_vector_pairwise_add_v4u16(a) +#define vpaddl_u32(a) __builtin_mpl_vector_pairwise_add_v2u32(a) +#define vpaddlq_s8(a) __builtin_mpl_vector_pairwise_add_v16i8(a) +#define vpaddlq_s16(a) __builtin_mpl_vector_pairwise_add_v8i16(a) +#define vpaddlq_s32(a) __builtin_mpl_vector_pairwise_add_v4i32(a) +#define vpaddlq_u8(a) __builtin_mpl_vector_pairwise_add_v16u8(a) +#define vpaddlq_u16(a) __builtin_mpl_vector_pairwise_add_v8u16(a) +#define vpaddlq_u32(a) __builtin_mpl_vector_pairwise_add_v4u32(a) + +// vqtbl1 +#define vqtbl1_s8(a, b) __builtin_mpl_vector_table_lookup_v8i8(a, b) +#define vqtbl1_u8(a, b) __builtin_mpl_vector_table_lookup_v8u8(a, b) +#define vqtbl1q_s8(a, b) __builtin_mpl_vector_table_lookup_v16i8(a, b) +#define vqtbl1q_u8(a, b) __builtin_mpl_vector_table_lookup_v16u8(a, b) + +// vreinterpret 8 +#define vreinterpret_s16_s8(a) ((int16x4_t)a) +#define vreinterpret_s32_s8(a) ((int32x2_t)a) +#define vreinterpret_s64_s8(a) ((int64x1_t)a) +#define vreinterpret_u16_u8(a) ((uint16x4_t)a) +#define vreinterpret_u32_u8(a) ((uint32x2_t)a) +#define vreinterpret_u64_u8(a) ((uint64x1_t)a) +#define vreinterpret_f16_s8(a) ((float16x4_t)a) +#define vreinterpret_f32_s8(a) ((float32x2_t)a) +#define vreinterpret_f64_s8(a) ((float64x1_t)a) +#define vreinterpret_f16_u8(a) ((float16x4_t)a) +#define vreinterpret_f32_u8(a) ((float32x2_t)a) +#define vreinterpret_f64_u8(a) ((float64x1_t)a) +#define vreinterpretq_s16_s8(a) ((int16x8_t)a) +#define vreinterpretq_s32_s8(a) ((int32x4_t)a) +#define vreinterpretq_s64_s8(a) ((int64x2_t)a) +#define vreinterpretq_u16_u8(a) ((uint16x8_t)a) +#define vreinterpretq_u32_u8(a) ((uint32x4_t)a) +#define vreinterpretq_u64_u8(a) ((uint64x2_t)a) +#define vreinterpretq_f16_s8(a) ((float16x8_t)a) +#define vreinterpretq_f32_s8(a) ((float32x4_t)a) +#define vreinterpretq_f64_s8(a) ((float64x2_t)a) +#define vreinterpretq_f16_u8(a) ((float16x8_t)a) +#define vreinterpretq_f32_u8(a) ((float32x4_t)a) +#define vreinterpretq_f64_u8(a) ((float64x2_t)a) + +// vreinterpret 16 +#define vreinterpret_s8_s16(a) ((int8x8_t)a) +#define vreinterpret_s32_s16(a) ((int32x2_t)a) +#define vreinterpret_s64_s16(a) ((int64x1_t)a) +#define vreinterpret_u8_u16(a) ((uint8x8_t)a) +#define vreinterpret_u32_u16(a) ((uint32x2_t)a) +#define vreinterpret_u64_u16(a) ((uint64x1_t)a) +#define vreinterpret_f16_s16(a) ((float16x4_t)a) +#define vreinterpret_f32_s16(a) ((float32x2_t)a) +#define vreinterpret_f64_s16(a) ((float64x1_t)a) +#define vreinterpret_f16_u16(a) ((float16x4_t)a) +#define vreinterpret_f32_u16(a) ((float32x2_t)a) +#define vreinterpret_f64_u16(a) ((float64x1_t)a) +#define vreinterpretq_s8_s16(a) ((int16x8_t)a) +#define vreinterpretq_s32_s16(a) ((int32x4_t)a) +#define vreinterpretq_s64_s16(a) ((int64x2_t)a) +#define vreinterpretq_u8_u16(a) ((uint16x8_t)a) +#define vreinterpretq_u32_u16(a) ((uint32x4_t)a) +#define vreinterpretq_u64_u16(a) ((uint64x2_t)a) +#define vreinterpretq_f16_s16(a) ((float16x8_t)a) +#define vreinterpretq_f32_s16(a) ((float32x4_t)a) +#define vreinterpretq_f64_s16(a) ((float64x2_t)a) +#define vreinterpretq_f16_u16(a) ((float16x8_t)a) +#define vreinterpretq_f32_u16(a) ((float32x4_t)a) +#define vreinterpretq_f64_u16(a) ((float64x2_t)a) + +// vreinterpret 32 +#define vreinterpret_s8_s32(a) ((int8x8_t)a) +#define vreinterpret_s16_s32(a) ((int16x4_t)a) +#define vreinterpret_s64_s32(a) ((int64x1_t)a) +#define vreinterpret_u8_u32(a) ((uint8x8_t)a) +#define vreinterpret_u16_u32(a) ((uint16x4_t)a) +#define vreinterpret_u64_u32(a) ((uint64x1_t)a) +#define vreinterpret_f16_s32(a) ((float16x4_t)a) +#define vreinterpret_f32_s32(a) ((float32x2_t)a) +#define vreinterpret_f64_s32(a) ((float64x1_t)a) +#define vreinterpret_f16_u32(a) ((float16x4_t)a) +#define vreinterpret_f32_u32(a) ((float32x2_t)a) +#define vreinterpret_f64_u32(a) ((float64x1_t)a) +#define vreinterpretq_s8_s32(a) ((int16x8_t)a) +#define vreinterpretq_s16_s32(a) ((int16x8_t)a) +#define vreinterpretq_s64_s32(a) ((int64x2_t)a) +#define vreinterpretq_u8_u32(a) ((uint16x8_t)a) +#define vreinterpretq_u16_u32(a) ((uint16x8_t)a) +#define vreinterpretq_u64_u32(a) ((uint64x2_t)a) +#define vreinterpretq_f16_s32(a) ((float16x8_t)a) +#define vreinterpretq_f32_s32(a) ((float32x4_t)a) +#define vreinterpretq_f64_s32(a) ((float64x2_t)a) +#define vreinterpretq_f16_u32(a) ((float16x8_t)a) +#define vreinterpretq_f32_u32(a) ((float32x4_t)a) +#define vreinterpretq_f64_u32(a) ((float64x2_t)a) + +// vreinterpret 64 +#define vreinterpret_s8_s64(a) ((int8x8_t)a) +#define vreinterpret_s16_s64(a) ((int16x4_t)a) +#define vreinterpret_s32_s64(a) ((int32x2_t)a) +#define vreinterpret_u8_u64(a) ((uint8x8_t)a) +#define vreinterpret_u16_u64(a) ((uint16x4_t)a) +#define vreinterpret_u32_u64(a) ((uint32x2_t)a) +#define vreinterpret_f16_s64(a) ((float16x4_t)a) +#define vreinterpret_f32_s64(a) ((float32x2_t)a) +#define vreinterpret_f64_s64(a) ((float64x1_t)a) +#define vreinterpret_f16_u64(a) ((float16x4_t)a) +#define vreinterpret_f32_u64(a) ((float32x2_t)a) +#define vreinterpret_f64_u64(a) ((float64x1_t)a) +#define vreinterpretq_s8_s64(a) ((int8x16_t)a) +#define vreinterpretq_s16_s64(a) ((int16x8_t)a) +#define vreinterpretq_s32_s64(a) ((int32x4_t)a) +#define vreinterpretq_u8_u64(a) ((uint8x16_t)a) +#define vreinterpretq_u16_u64(a) ((uint16x8_t)a) +#define vreinterpretq_u32_u64(a) ((uint32x4_t)a) +#define vreinterpretq_f16_s64(a) ((float16x8_t)a) +#define vreinterpretq_f32_s64(a) ((float32x4_t)a) +#define vreinterpretq_f64_s64(a) ((float64x2_t)a) +#define vreinterpretq_f16_u64(a) ((float16x8_t)a) +#define vreinterpretq_f32_u64(a) ((float32x4_t)a) +#define vreinterpretq_f64_u64(a) ((float64x2_t)a) + +// vrev32 +#define vrev32_s8(a) __builtin_mpl_vector_reverse_v8i8(a) +#define vrev32_s16(a) __builtin_mpl_vector_reverse_v4i16(a) +#define vrev32_u8(a) __builtin_mpl_vector_reverse_v8u8(a) +#define vrev32_u16(a) __builtin_mpl_vector_reverse_v4u16(a) +#define vrev32q_s8(a) __builtin_mpl_vector_reverse_v16i8(a) +#define vrev32q_s16(a) __builtin_mpl_vector_reverse_v8i16(a) +#define vrev32q_u8(a) __builtin_mpl_vector_reverse_v16u8(a) +#define vrev32q_u16(a) __builtin_mpl_vector_reverse_v8u16(a) + +// vset_lane +#define vset_lane_s8(v, a, n) __builtin_mpl_vector_set_element_v8i8(v, a, n) +#define vset_lane_s16(v, a, n) __builtin_mpl_vector_set_element_v4i16(v, a, n) +#define vset_lane_s32(v, a, n) __builtin_mpl_vector_set_element_v2i32(v, a, n) +#define vset_lane_s64(v, a, n) __builtin_mpl_vector_set_element_v1i64(v, a, n) +#define vset_lane_u8(v, a, n) __builtin_mpl_vector_set_element_v8u8(v, a, n) +#define vset_lane_u16(v, a, n) __builtin_mpl_vector_set_element_v4u16(v, a, n) +#define vset_lane_u32(v, a, n) __builtin_mpl_vector_set_element_v2u32(v, a, n) +#define vset_lane_u64(v, a, n) __builtin_mpl_vector_set_element_v1u64(v, a, n) +#define vset_lane_f16(v, a, n) __builtin_mpl_vector_set_element_v4f16(v, a, n) +#define vset_lane_f32(v, a, n) __builtin_mpl_vector_set_element_v2f32(v, a, n) +#define vset_lane_f64(v, a, n) __builtin_mpl_vector_set_element_v1f64(v, a, n) +#define vsetq_lane_s8(v, a, n) __builtin_mpl_vector_set_element_v16i8(v, a, n) +#define vsetq_lane_s16(v, a, n) __builtin_mpl_vector_set_element_v8i16(v, a, n) +#define vsetq_lane_s32(v, a, n) __builtin_mpl_vector_set_element_v4i32(v, a, n) +#define vsetq_lane_s64(v, a, n) __builtin_mpl_vector_set_element_v2i64(v, a, n) +#define vsetq_lane_u8(v, a, n) __builtin_mpl_vector_set_element_v16u8(v, a, n) +#define vsetq_lane_u16(v, a, n) __builtin_mpl_vector_set_element_v8u16(v, a, n) +#define vsetq_lane_u32(v, a, n) __builtin_mpl_vector_set_element_v4u32(v, a, n) +#define vsetq_lane_u64(v, a, n) __builtin_mpl_vector_set_element_v2u64(v, a, n) +#define vsetq_lane_f16(v, a, n) __builtin_mpl_vector_set_element_v8f16(v, a, n) +#define vsetq_lane_f32(v, a, n) __builtin_mpl_vector_set_element_v4f32(v, a, n) +#define vsetq_lane_f64(v, a, n) __builtin_mpl_vector_set_element_v2f64(v, a, n) + +// vshl +#define vshl_s8(a, b) (a << b) +#define vshl_s16(a, b) (a << b) +#define vshl_s32(a, b) (a << b) +#define vshl_s64(a, b) (a << b) +#define vshl_u8(a, b) (a << b) +#define vshl_u16(a, b) (a << b) +#define vshl_u32(a, b) (a << b) +#define vshl_u64(a, b) (a << b) +#define vshlq_s8(a, b) (a << b) +#define vshlq_s16(a, b) (a << b) +#define vshlq_s32(a, b) (a << b) +#define vshlq_s64(a, b) (a << b) +#define vshlq_u8(a, b) (a << b) +#define vshlq_u16(a, b) (a << b) +#define vshlq_u32(a, b) (a << b) +#define vshlq_u64(a, b) (a << b) + +// vshl_n +#define vshlq_n_s64(a, n) __builtin_mpl_vector_shli_v2i64(a, n) +#define vshlq_n_s32(a, n) __builtin_mpl_vector_shli_v4i32(a, n) +#define vshlq_n_s16(a, n) __builtin_mpl_vector_shli_v8i16(a, n) +#define vshlq_n_s8(a, n) __builtin_mpl_vector_shli_v16i8(a, n) +#define vshlq_n_u64(a, n) __builtin_mpl_vector_shli_v2u64(a, n) +#define vshlq_n_u32(a, n) __builtin_mpl_vector_shli_v4u32(a, n) +#define vshlq_n_u16(a, n) __builtin_mpl_vector_shli_v8u16(a, n) +#define vshlq_n_u8(a, n) __builtin_mpl_vector_shli_v16u8(a, n) +#define vshl_n_s64(a, n) __builtin_mpl_vector_shli_v1i64(a, n) +#define vshl_n_s32(a, n) __builtin_mpl_vector_shli_v2i32(a, n) +#define vshl_n_s16(a, n) __builtin_mpl_vector_shli_v4i16(a, n) +#define vshl_n_s8(a, n) __builtin_mpl_vector_shli_v8i8(a, n) +#define vshl_n_u64(a, n) __builtin_mpl_vector_shli_v1u64(a, n) +#define vshl_n_u32(a, n) __builtin_mpl_vector_shli_v2u32(a, n) +#define vshl_n_u16(a, n) __builtin_mpl_vector_shli_v4u16(a, n) +#define vshl_n_u8(a, n) __builtin_mpl_vector_shli_v8u8(a, n) + +// vshr +#define vshr_s8(a, b) (a >> b) +#define vshr_s16(a, b) (a >> b) +#define vshr_s32(a, b) (a >> b) +#define vshr_s64(a, b) (a >> b) +#define vshr_u8(a, b) (a >> b) +#define vshr_u16(a, b) (a >> b) +#define vshr_u32(a, b) (a >> b) +#define vshr_u64(a, b) (a >> b) +#define vshrq_s8(a, b) (a >> b) +#define vshrq_s16(a, b) (a >> b) +#define vshrq_s32(a, b) (a >> b) +#define vshrq_s64(a, b) (a >> b) +#define vshrq_u8(a, b) (a >> b) +#define vshrq_u16(a, b) (a >> b) +#define vshrq_u32(a, b) (a >> b) +#define vshrq_u64(a, b) (a >> b) + +// vshr_n +#define vshrq_n_s64(a, n) __builtin_mpl_vector_shri_v2i64(a, n) +#define vshrq_n_s32(a, n) __builtin_mpl_vector_shri_v4i32(a, n) +#define vshrq_n_s16(a, n) __builtin_mpl_vector_shri_v8i16(a, n) +#define vshrq_n_s8(a, n) __builtin_mpl_vector_shri_v16i8(a, n) +#define vshrq_n_u64(a, n) __builtin_mpl_vector_shru_v2u64(a, n) +#define vshrq_n_u32(a, n) __builtin_mpl_vector_shru_v4u32(a, n) +#define vshrq_n_u16(a, n) __builtin_mpl_vector_shru_v8u16(a, n) +#define vshrq_n_u8(a, n) __builtin_mpl_vector_shru_v16u8(a, n) +#define vshr_n_s64(a, n) __builtin_mpl_vector_shri_v1i64(a, n) +#define vshr_n_s32(a, n) __builtin_mpl_vector_shri_v2i32(a, n) +#define vshr_n_s16(a, n) __builtin_mpl_vector_shri_v4i16(a, n) +#define vshr_n_s8(a, n) __builtin_mpl_vector_shri_v8i8(a, n) +#define vshr_n_u64(a, n) __builtin_mpl_vector_shru_v1u64(a, n) +#define vshr_n_u32(a, n) __builtin_mpl_vector_shru_v2u32(a, n) +#define vshr_n_u16(a, n) __builtin_mpl_vector_shru_v4u16(a, n) +#define vshr_n_u8(a, n) __builtin_mpl_vector_shru_v8u8(a, n) + +// vshrn_n +#define vshrn_n_s16(a, n) __builtin_mpl_vector_shr_narrow_low_v8i16(a, n) +#define vshrn_n_s32(a, n) __builtin_mpl_vector_shr_narrow_low_v4i32(a, n) +#define vshrn_n_s64(a, n) __builtin_mpl_vector_shr_narrow_low_v2i64(a, n) +#define vshrn_n_u16(a, n) __builtin_mpl_vector_shr_narrow_low_v8u16(a, n) +#define vshrn_n_u32(a, n) __builtin_mpl_vector_shr_narrow_low_v4u32(a, n) +#define vshrn_n_u64(a, n) __builtin_mpl_vector_shr_narrow_low_v2u64(a, n) + +// vst1 +#define vst1_s8(p, v) __builtin_mpl_vector_store_v8i8(p, v) +#define vst1_s16(p, v) __builtin_mpl_vector_store_v4i16(p, v) +#define vst1_s32(p, v) __builtin_mpl_vector_store_v2i32(p, v) +#define vst1_s64(p, v) __builtin_mpl_vector_store_v1i64(p, v) +#define vst1_u8(p, v) __builtin_mpl_vector_store_v8u8(p, v) +#define vst1_u16(p, v) __builtin_mpl_vector_store_v4u16(p, v) +#define vst1_u32(p, v) __builtin_mpl_vector_store_v2u32(p, v) +#define vst1_u64(p, v) __builtin_mpl_vector_store_v1u64(p, v) +#define vst1_f16(p, v) __builtin_mpl_vector_store_v4f16(p, v) +#define vst1_f32(p, v) __builtin_mpl_vector_store_v2f32(p, v) +#define vst1_f64(p, v) __builtin_mpl_vector_store_v1f64(p, v) +#define vst1q_s8(p, v) __builtin_mpl_vector_store_v16i8(p, v) +#define vst1q_s16(p, v) __builtin_mpl_vector_store_v8i16(p, v) +#define vst1q_s32(p, v) __builtin_mpl_vector_store_v4i32(p, v) +#define vst1q_s64(p, v) __builtin_mpl_vector_store_v2i64(p, v) +#define vst1q_u8(p, v) __builtin_mpl_vector_store_v16u8(p, v) +#define vst1q_u16(p, v) __builtin_mpl_vector_store_v8u16(p, v) +#define vst1q_u32(p, v) __builtin_mpl_vector_store_v4u32(p, v) +#define vst1q_u64(p, v) __builtin_mpl_vector_store_v2u64(p, v) +#define vst1q_f16(p, v) __builtin_mpl_vector_store_v8f16(p, v) +#define vst1q_f32(p, v) __builtin_mpl_vector_store_v4f32(p, v) +#define vst1q_f64(p, v) __builtin_mpl_vector_store_v2f64(p, v) + +// vsub +#define vsub_s8(a, b) (a - b) +#define vsub_s16(a, b) (a - b) +#define vsub_s32(a, b) (a - b) +#define vsub_s64(a, b) (a - b) +#define vsub_u8(a, b) (a - b) +#define vsub_u16(a, b) (a - b) +#define vsub_u32(a, b) (a - b) +#define vsub_u64(a, b) (a - b) +#define vsub_f16(a, b) (a - b) +#define vsub_f32(a, b) (a - b) +#define vsub_f64(a, b) (a - b) +#define vsubq_s8(a, b) (a - b) +#define vsubq_s16(a, b) (a - b) +#define vsubq_s32(a, b) (a - b) +#define vsubq_s64(a, b) (a - b) +#define vsubq_u8(a, b) (a - b) +#define vsubq_u16(a, b) (a - b) +#define vsubq_u32(a, b) (a - b) +#define vsubq_u64(a, b) (a - b) +#define vsubq_f16(a, b) (a - b) +#define vsubq_f32(a, b) (a - b) +#define vsubq_f64(a, b) (a - b) + +// vsub[lw] +#define vsubl_s8(a, b) __builtin_mpl_vector_subl_low_v8i8(a, b) +#define vsubl_s16(a, b) __builtin_mpl_vector_subl_low_v4i16(a, b) +#define vsubl_s32(a, b) __builtin_mpl_vector_subl_low_v2i32(a, b) +#define vsubl_u8(a, b) __builtin_mpl_vector_subl_low_v8u8(a, b) +#define vsubl_u16(a, b) __builtin_mpl_vector_subl_low_v4u16(a, b) +#define vsubl_u32(a, b) __builtin_mpl_vector_subl_low_v2u32(a, b) +#define vsubl_high_s8(a, b) __builtin_mpl_vector_subl_high_v8i8(a, b) +#define vsubl_high_s16(a, b) __builtin_mpl_vector_subl_high_v4i16(a, b) +#define vsubl_high_s32(a, b) __builtin_mpl_vector_subl_high_v2i32(a, b) +#define vsubl_high_u8(a, b) __builtin_mpl_vector_subl_high_v8u8(a, b) +#define vsubl_high_u16(a, b) __builtin_mpl_vector_subl_high_v4u16(a, b) +#define vsubl_high_u32(a, b) __builtin_mpl_vector_subl_high_v2u32(a, b) +#define vsubw_s8(a, b) __builtin_mpl_vector_subw_low_v8i8(a, b) +#define vsubw_s16(a, b) __builtin_mpl_vector_subw_low_v4i16(a, b) +#define vsubw_s32(a, b) __builtin_mpl_vector_subw_low_v2i32(a, b) +#define vsubw_u8(a, b) __builtin_mpl_vector_subw_low_v8u8(a, b) +#define vsubw_u16(a, b) __builtin_mpl_vector_subw_low_v4u16(a, b) +#define vsubw_u32(a, b) __builtin_mpl_vector_subw_low_v2u32(a, b) +#define vsubw_high_s8(a, b) __builtin_mpl_vector_subw_high_v8i8(a, b) +#define vsubw_high_s16(a, b) __builtin_mpl_vector_subw_high_v4i16(a, b) +#define vsubw_high_s32(a, b) __builtin_mpl_vector_subw_high_v2i32(a, b) +#define vsubw_high_u8(a, b) __builtin_mpl_vector_subw_high_v8u8(a, b) +#define vsubw_high_u16(a, b) __builtin_mpl_vector_subw_high_v4u16(a, b) +#define vsubw_high_u32(a, b) __builtin_mpl_vector_subw_high_v2u32(a, b) + +// vzip +#define vzip_s8(a, b) __builtin_mpl_vector_zip_v8i8(a, b) +#define vzip_s16(a, b) __builtin_mpl_vector_zip_v4i16(a, b) +#define vzip_s32(a, b) __builtin_mpl_vector_zip_v2i32(a, b) +#define vzip_u8(a, b) __builtin_mpl_vector_zip_v8u8(a, b) +#define vzip_u16(a, b) __builtin_mpl_vector_zip_v4u16(a, b) +#define vzip_u32(a, b) __builtin_mpl_vector_zip_v2u32(a, b) +#define vzip_f32(a, b) __builtin_mpl_vector_zip_v2f32(a, b) + +#endif /* __ARM_NEON_H */ diff --git a/src/hir2mpl/ast_input/clang/src/ast_expr.cpp b/src/hir2mpl/ast_input/clang/src/ast_expr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..939940c59a1d384f0a6956952ff37cd442e032bf --- /dev/null +++ b/src/hir2mpl/ast_input/clang/src/ast_expr.cpp @@ -0,0 +1,2963 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_expr.h" +#include "ast_decl.h" +#include "ast_macros.h" +#include "mpl_logging.h" +#include "feir_stmt.h" +#include "feir_builder.h" +#include "fe_utils_ast.h" +#include "feir_type_helper.h" +#include "fe_manager.h" +#include "ast_stmt.h" +#include "ast_util.h" +#include "enhance_c_checker.h" +#include "ror.h" +#include "conditional_operator.h" + +#include + +namespace maple { + +namespace { + +const uint32 kOneByte = 8; + +template +std::optional GenerateConstCommon(Opcode op, const T p0, const T p1) { + switch (op) { + case OP_add: { + return p0 + p1; + } + case OP_sub: { + return p0 - p1; + } + case OP_mul: { + return p0 * p1; + } + case OP_div: { + return p0 / p1; + } + default: { + return std::nullopt; + } + } +} + +template , bool> = true> +T GenerateConst(Opcode op, T p0, T p1) { + auto res = GenerateConstCommon(op, p0, p1); + ASSERT(res, "invalid operations for floating point values"); + return *res; +} + +IntVal GenerateConst(Opcode op, const IntVal &p0, const IntVal &p1) { + ASSERT(p0.GetBitWidth() == p1.GetBitWidth() && p0.IsSigned() == p1.IsSigned(), "width and sign must be the same"); + + if (auto res = GenerateConstCommon(op, p0, p1)) { + return *res; + } + + switch (op) { + case OP_rem: { + return p0 % p1; + } + case OP_shl: { + return p0 << p1; + } + case OP_lshr: + case OP_ashr: { + return p0 >> p1; + } + case OP_bior: { + return p0 | p1; + } + case OP_band: { + return p0 & p1; + } + case OP_bxor: { + return p0 ^ p1; + } + case OP_land: + case OP_cand: { + return IntVal(p0.GetExtValue() && p1.GetExtValue(), p0.GetBitWidth(), p0.IsSigned()); + } + case OP_lior: + case OP_cior: { + return IntVal(p0.GetExtValue() || p1.GetExtValue(), p0.GetBitWidth(), p0.IsSigned()); + } + default: + CHECK_FATAL(false, "unsupported operation"); + } +} + +MIRConst *MIRConstGenerator(MemPool *mp, MIRConst *konst0, MIRConst *konst1, Opcode op) { +#define RET_VALUE_IF_CONST_TYPE_IS(TYPE) \ + do { \ + auto *c0 = safe_cast(konst0); \ + if (c0) { \ + auto *c1 = safe_cast(konst1); \ + ASSERT(c1, "invalid const type"); \ + ASSERT(c0->GetType().GetPrimType() == c1->GetType().GetPrimType(), "types are not equal"); \ + return mp->New(GenerateConst(op, c0->GetValue(), c1->GetValue()), c0->GetType()); \ + } \ + } while (0) + + RET_VALUE_IF_CONST_TYPE_IS(MIRIntConst); + RET_VALUE_IF_CONST_TYPE_IS(MIRFloatConst); + RET_VALUE_IF_CONST_TYPE_IS(MIRDoubleConst); + +#undef RET_VALUE_IF_CONST_TYPE_IS + + CHECK_FATAL(false, "unreachable code"); +} + +} // anonymous namespace + +// ---------- ASTValue ---------- +MIRConst *ASTValue::Translate2MIRConst() const { + switch (pty) { + case PTY_u1: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + val.u8, *GlobalTables::GetTypeTable().GetPrimType(PTY_u1)); + } + case PTY_u8: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + val.u8, *GlobalTables::GetTypeTable().GetPrimType(PTY_u8)); + } + case PTY_u16: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + val.u16, *GlobalTables::GetTypeTable().GetPrimType(PTY_u16)); + } + case PTY_u32: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + val.u32, *GlobalTables::GetTypeTable().GetPrimType(PTY_u32)); + } + case PTY_u64: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + val.u64, *GlobalTables::GetTypeTable().GetPrimType(PTY_u64)); + } + case PTY_i8: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(static_cast(val.i8)), *GlobalTables::GetTypeTable().GetPrimType(PTY_i8)); + } + case PTY_i16: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(static_cast(val.i16)), *GlobalTables::GetTypeTable().GetPrimType(PTY_i16)); + } + case PTY_i32: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(static_cast(val.i32)), *GlobalTables::GetTypeTable().GetPrimType(PTY_i32)); + } + case PTY_i64: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(val.i64), *GlobalTables::GetTypeTable().GetPrimType(PTY_i64)); + } + case PTY_f32: { + return FEManager::GetModule().GetMemPool()->New( + val.f32, *GlobalTables::GetTypeTable().GetPrimType(PTY_f32)); + } + case PTY_f64: { + return FEManager::GetModule().GetMemPool()->New( + val.f64, *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); + } + case PTY_a64: { + return FEManager::GetModule().GetMemPool()->New( + val.strIdx, *GlobalTables::GetTypeTable().GetPrimType(PTY_a64)); + } + default: { + CHECK_FATAL(false, "Unsupported Primitive type: %d", pty); + } + } +} + +// ---------- ASTExpr ---------- +UniqueFEIRExpr ASTExpr::Emit2FEExpr(std::list &stmts) const { + auto feirExpr = Emit2FEExprImpl(stmts); + if (feirExpr != nullptr) { + feirExpr->SetLoc(loc); + } + for (auto &stmt : stmts) { + if (!stmt->HasSetLOCInfo()) { + stmt->SetSrcLoc(loc); + } + } + return feirExpr; +} + +UniqueFEIRExpr ASTExpr::ImplicitInitFieldValue(MIRType &type, std::list &stmts) const { + UniqueFEIRExpr implicitInitFieldExpr; + MIRTypeKind noInitExprKind = type.GetKind(); + if (noInitExprKind == kTypeStruct || noInitExprKind == kTypeUnion) { + auto *structType = static_cast(&type); + std::string tmpName = FEUtils::GetSequentialName("implicitInitStruct_"); + UniqueFEIRVar tmpVar = FEIRBuilder::CreateVarNameForC(tmpName, type); + for (size_t i = 0; i < structType->GetFieldsSize(); ++i) { + FieldID fieldID = 0; + FEUtils::TraverseToNamedField(*structType, structType->GetElemStrIdx(i), fieldID); + MIRType *fieldType = structType->GetFieldType(fieldID); + UniqueFEIRExpr fieldExpr = ImplicitInitFieldValue(*fieldType, stmts); + UniqueFEIRStmt fieldStmt = std::make_unique(tmpVar->Clone(), std::move(fieldExpr), fieldID); + stmts.emplace_back(std::move(fieldStmt)); + } + implicitInitFieldExpr = FEIRBuilder::CreateExprDRead(std::move(tmpVar)); + } else if (noInitExprKind == kTypeArray) { + auto *arrayType = static_cast(&type); + size_t elemSize = arrayType->GetElemType()->GetSize(); + CHECK_FATAL(elemSize != 0, "elemSize is 0"); + size_t numElems = arrayType->GetSize() / elemSize; + UniqueFEIRType typeNative = FEIRTypeHelper::CreateTypeNative(type); + std::string tmpName = FEUtils::GetSequentialName("implicitInitArray_"); + UniqueFEIRVar tmpVar = FEIRBuilder::CreateVarNameForC(tmpName, type); + UniqueFEIRExpr arrayExpr = FEIRBuilder::CreateExprDRead(tmpVar->Clone()); + for (uint32 i = 0; i < numElems; ++i) { + UniqueFEIRExpr exprIndex = FEIRBuilder::CreateExprConstI32(i); + MIRType *fieldType = arrayType->GetElemType(); + UniqueFEIRExpr exprElem = ImplicitInitFieldValue(*fieldType, stmts); + UniqueFEIRType typeNativeTmp = typeNative->Clone(); + UniqueFEIRExpr arrayExprTmp = arrayExpr->Clone(); + auto stmt = FEIRBuilder::CreateStmtArrayStoreOneStmtForC(std::move(exprElem), std::move(arrayExprTmp), + std::move(exprIndex), std::move(typeNativeTmp), + tmpName); + stmts.emplace_back(std::move(stmt)); + } + implicitInitFieldExpr = FEIRBuilder::CreateExprDRead(std::move(tmpVar)); + } else if (noInitExprKind == kTypePointer) { + implicitInitFieldExpr = std::make_unique(static_cast(0), PTY_ptr); + } else { + CHECK_FATAL(noInitExprKind == kTypeScalar, "noInitExprKind isn't kTypeScalar"); + implicitInitFieldExpr = FEIRBuilder::CreateExprConstAnyScalar(type.GetPrimType(), 0); + } + return implicitInitFieldExpr; +} + +MIRConst *ASTExpr::GenerateMIRConstImpl() const { + CHECK_FATAL(isConstantFolded && value != nullptr, "Unsupported for ASTExpr: %d", op); + return value->Translate2MIRConst(); +} + +ASTExpr *ASTExpr::IgnoreParensImpl() { + return this; +} + +// ---------- ASTDeclRefExpr --------- +MIRConst *ASTDeclRefExpr::GenerateMIRConstImpl() const { + MIRType *mirType = refedDecl->GetTypeDesc().front(); + if (mirType->GetKind() == kTypePointer && + static_cast(mirType)->GetPointedType()->GetKind() == kTypeFunction) { + GStrIdx idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(refedDecl->GetName()); + MIRSymbol *funcSymbol = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(idx); + CHECK_FATAL(funcSymbol != nullptr, "Should process func decl before var decl"); + MIRFunction *mirFunc = funcSymbol->GetFunction(); + CHECK_FATAL(mirFunc != nullptr, "Same name symbol with function: %s", refedDecl->GetName().c_str()); + return FEManager::GetModule().GetMemPool()->New(mirFunc->GetPuidx(), *mirType); + } else if (!isConstantFolded) { + ASTDecl *var = refedDecl; + MIRSymbol *mirSymbol; + if (var->IsGlobal()) { + mirSymbol = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl( + var->GenerateUniqueVarName(), *(var->GetTypeDesc().front())); + } else { + mirSymbol = FEManager::GetMIRBuilder().GetOrCreateLocalDecl( + var->GenerateUniqueVarName(), *(var->GetTypeDesc().front())); + } + return FEManager::GetModule().GetMemPool()->New( + mirSymbol->GetStIdx(), 0, *(var->GetTypeDesc().front())); + } else { + return GetConstantValue()->Translate2MIRConst(); + } +} + +UniqueFEIRExpr ASTDeclRefExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + MIRType *mirType = refedDecl->GetTypeDesc().front(); + UniqueFEIRExpr feirRefExpr; + auto attrs = refedDecl->GetGenericAttrs(); + if (mirType->GetKind() == kTypePointer && + static_cast(mirType)->GetPointedType()->GetKind() == kTypeFunction) { + feirRefExpr = FEIRBuilder::CreateExprAddrofFunc(refedDecl->GetName()); + } else { + if (refedDecl->GetDeclKind() == kASTEnumConstant) { + return FEIRBuilder::CreateExprConstAnyScalar(refedDecl->GetTypeDesc().front()->GetPrimType(), + static_cast(refedDecl)->GetValue().GetExtValue()); + } + UniqueFEIRVar feirVar = + FEIRBuilder::CreateVarNameForC(refedDecl->GenerateUniqueVarName(), *mirType, refedDecl->IsGlobal(), false); + feirVar->SetAttrs(attrs); + if (mirType->GetKind() == kTypeArray) { + feirRefExpr = FEIRBuilder::CreateExprAddrofVar(std::move(feirVar)); + } else { + feirRefExpr = FEIRBuilder::CreateExprDRead(std::move(feirVar)); + } + } + if (refedDecl->IsParam() && refedDecl->GetDeclKind() == kASTVar) { + PrimType promoted = static_cast(refedDecl)->GetPromotedType(); + if (promoted != PTY_void) { + feirRefExpr = FEIRBuilder::CreateExprCastPrim(std::move(feirRefExpr), promoted); + } + } + return feirRefExpr; +} + +// ---------- ASTCallExpr ---------- +std::string ASTCallExpr::CvtBuiltInFuncName(std::string builtInName) const { +#define BUILTIN_FUNC(funcName) \ + {"__builtin_"#funcName, #funcName}, + static std::map cvtMap = { +#include "ast_builtin_func.def" +#undef BUILTIN_FUNC + }; + std::map::const_iterator it = cvtMap.find(builtInName); + if (it != cvtMap.cend()) { + return cvtMap.find(builtInName)->second; + } else { + return builtInName; + } +} + +std::unordered_map ASTCallExpr::builtingFuncPtrMap = + ASTCallExpr::InitBuiltinFuncPtrMap(); + +void ASTCallExpr::AddArgsExpr(const std::unique_ptr &callStmt, std::list &stmts) const { + for (int32 i = (static_cast(args.size()) - 1); i >= 0; --i) { + UniqueFEIRExpr expr = args[i]->Emit2FEExpr(stmts); + callStmt->AddExprArgReverse(std::move(expr)); + } + if (IsFirstArgRet()) { + UniqueFEIRVar var = FEIRBuilder::CreateVarNameForC(GetRetVarName(), *mirType, false, false); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprAddrofVar(var->Clone()); + callStmt->AddExprArgReverse(std::move(expr)); + } + if (isIcall) { + UniqueFEIRExpr expr = calleeExpr->Emit2FEExpr(stmts); + InsertNonnullCheckingForIcall(expr, stmts); + InsertBoundaryCheckingInArgsForICall(stmts, expr); + callStmt->AddExprArgReverse(std::move(expr)); + } + InsertBoundaryCheckingInArgs(stmts); + CheckNonnullFieldInStruct(); +} + +void ASTCallExpr::InsertNonnullCheckingForIcall(const UniqueFEIRExpr &expr, std::list &stmts) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || expr->GetPrimType() != PTY_ptr) { + return; + } + UniqueFEIRStmt stmt = std::make_unique(OP_assertnonnull, expr->Clone()); + stmts.emplace_back(std::move(stmt)); +} + +UniqueFEIRExpr ASTCallExpr::AddRetExpr(const std::unique_ptr &callStmt) const { + UniqueFEIRVar var = FEIRBuilder::CreateVarNameForC(GetRetVarName(), *mirType, false, false); + var->SetAttrs(GetReturnVarAttrs()); + UniqueFEIRVar dreadVar = var->Clone(); + if (!IsFirstArgRet()) { + callStmt->SetVar(var->Clone()); + } + return FEIRBuilder::CreateExprDRead(dreadVar->Clone()); +} + +std::unique_ptr ASTCallExpr::GenCallStmt() const { + MemPool *mp = FEManager::GetManager().GetStructElemMempool(); + std::unique_ptr callStmt; + if (isIcall) { + auto icallStmt = std::make_unique(); + CHECK_FATAL(calleeExpr->GetType()->IsMIRPtrType(), "cannot find func pointer for icall"); + MIRFuncType *funcType = static_cast(calleeExpr->GetType())->GetPointedFuncType(); + CHECK_FATAL(funcType != nullptr, "cannot find func prototype for icall"); + icallStmt->SetPrototype(FEIRTypeHelper::CreateTypeNative(*funcType)); + callStmt = std::move(icallStmt); + } else { + StructElemNameIdx *nameIdx = mp->New(GetFuncName()); + ASSERT_NOT_NULL(nameIdx); + FEStructMethodInfo *info = static_cast( + FEManager::GetTypeManager().RegisterStructMethodInfo(*nameIdx, kSrcLangC, false)); + ASSERT_NOT_NULL(info); + info->SetFuncAttrs(funcAttrs); + FEIRTypeNative *retTypeInfo = nullptr; + if (IsFirstArgRet()) { + retTypeInfo = mp->New(*GlobalTables::GetTypeTable().GetPrimType(PTY_void)); + } else { + retTypeInfo = mp->New(*mirType); + } + info->SetReturnType(retTypeInfo); + Opcode op; + if (retTypeInfo->GetPrimType() != PTY_void) { + op = OP_callassigned; + } else { + op = OP_call; + } + callStmt = std::make_unique(*info, op, nullptr, false); + } + return callStmt; +} + +UniqueFEIRExpr ASTCallExpr::Emit2FEExprImpl(std::list &stmts) const { + if (!isIcall) { + bool isFinish = false; + UniqueFEIRExpr buitinExpr = ProcessBuiltinFunc(stmts, isFinish); + if (isFinish) { + return buitinExpr; + } + } + std::unique_ptr callStmt = GenCallStmt(); + AddArgsExpr(callStmt, stmts); + UniqueFEIRExpr retExpr = nullptr; + if (IsNeedRetExpr()) { + retExpr = AddRetExpr(callStmt); + } + stmts.emplace_back(std::move(callStmt)); + InsertBoundaryVarInRet(stmts); + return retExpr; +} + +// ---------- ASTCastExpr ---------- +MIRConst *ASTCastExpr::GenerateMIRConstImpl() const { + std::list stmts; + auto feExpr = child->Emit2FEExpr(stmts); + if (isArrayToPointerDecay && feExpr->GetKind() == FEIRNodeKind::kExprAddrof) { + return FEManager::GetModule().GetMemPool()->New( + GetConstantValue()->val.strIdx, *GlobalTables::GetTypeTable().GetPrimType(PTY_a64)); + } else if (isArrayToPointerDecay && child->IgnoreParens()->GetASTOp() == kASTOpCompoundLiteralExpr) { + static_cast(child->IgnoreParens())->SetAddrof(true); + return child->GenerateMIRConst(); + } else if (isNeededCvt) { + if (dst->GetPrimType() == PTY_f64) { + return GenerateMIRDoubleConst(); + } else if (dst->GetPrimType() == PTY_f32) { + return GenerateMIRFloatConst(); + } else { + return GenerateMIRIntConst(); + } + } else { + return child->GenerateMIRConst(); + } +} + +MIRConst *ASTCastExpr::GenerateMIRDoubleConst() const { + MIRConst *childConst = child->GenerateMIRConst(); + if (childConst == nullptr) { + return nullptr; + } + switch (childConst->GetKind()) { + case kConstFloatConst: { + return FEManager::GetModule().GetMemPool()->New( + static_cast(static_cast(childConst)->GetValue()), + *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); + } + case kConstInt: { + return FEManager::GetModule().GetMemPool()->New( + static_cast(static_cast(childConst)->GetExtValue()), + *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); + } + case kConstDoubleConst: { + return FEManager::GetModule().GetMemPool()->New( + static_cast(static_cast(childConst)->GetValue()), + *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); + } + default: { + CHECK_FATAL(false, "Unsupported pty type: %d", GetConstantValue()->pty); + return nullptr; + } + } +} + +MIRConst *ASTCastExpr::GenerateMIRFloatConst() const { + MIRConst *childConst = child->GenerateMIRConst(); + if (childConst == nullptr) { + return nullptr; + } + switch (childConst->GetKind()) { + case kConstDoubleConst: { + return FEManager::GetModule().GetMemPool()->New( + static_cast(static_cast(childConst)->GetValue()), + *GlobalTables::GetTypeTable().GetPrimType(PTY_f32)); + } + case kConstInt: { + return FEManager::GetModule().GetMemPool()->New( + static_cast(static_cast(childConst)->GetExtValue()), + *GlobalTables::GetTypeTable().GetPrimType(PTY_f32)); + } + default: { + CHECK_FATAL(false, "Unsupported pty type: %d", GetConstantValue()->pty); + return nullptr; + } + } +} + +MIRConst *ASTCastExpr::GenerateMIRIntConst() const { + MIRConst *childConst = child->GenerateMIRConst(); + if (childConst == nullptr) { + return nullptr; + } + switch (childConst->GetKind()) { + case kConstDoubleConst: + case kConstInt: { + int64 val = childConst->GetKind() == kConstDoubleConst + ? static_cast(static_cast(childConst)->GetValue()) + : static_cast(childConst)->GetExtValue(); + + PrimType destPrimType = mirType->GetPrimType(); + switch (destPrimType) { + case PTY_i8: + val = static_cast(val); + break; + case PTY_i16: + val = static_cast(val); + break; + case PTY_i32: + val = static_cast(val); + break; + case PTY_i64: + val = static_cast(val); + break; + case PTY_u8: + val = static_cast(val); + break; + case PTY_u16: + val = static_cast(val); + break; + case PTY_u32: + val = static_cast(val); + break; + case PTY_u64: + val = static_cast(val); + break; + default: + break; + } + return FEManager::GetModule().GetMemPool()->New( + val, *GlobalTables::GetTypeTable().GetPrimType(PTY_i64)); + } + case kConstStrConst: { + return FEManager::GetModule().GetMemPool()->New( + static_cast(static_cast(childConst)->GetValue()), + *GlobalTables::GetTypeTable().GetPrimType(PTY_a64)); + } + case kConstAddrof: { + return FEManager::GetModule().GetMemPool()->New( + static_cast(static_cast(childConst)->GetOffset()), + *GlobalTables::GetTypeTable().GetPrimType(PTY_i64)); + } + case kConstLblConst: { + // init by initListExpr, Only MIRConst kind is set here. + return childConst; + } + default: { + CHECK_FATAL(false, "Unsupported pty type: %d", GetConstantValue()->pty); + return nullptr; + } + } +} + +UniqueFEIRExpr ASTCastExpr::Emit2FEExprForComplex(const UniqueFEIRExpr &subExpr, const UniqueFEIRType &srcType, + std::list &stmts) const { + std::string tmpName = FEUtils::GetSequentialName("Complex_"); + UniqueFEIRVar tmpVar = FEIRBuilder::CreateVarNameForC(tmpName, *complexType); + UniqueFEIRExpr dreadAgg; + if (imageZero) { + UniqueFEIRStmt realStmtNode = std::make_unique(tmpVar->Clone(), + subExpr->Clone(), kComplexRealID); + stmts.emplace_back(std::move(realStmtNode)); + UniqueFEIRExpr imagExpr = FEIRBuilder::CreateExprConstAnyScalar(src->GetPrimType(), 0); + UniqueFEIRStmt imagStmtNode = std::make_unique(tmpVar->Clone(), + imagExpr->Clone(), kComplexImagID); + stmts.emplace_back(std::move(imagStmtNode)); + dreadAgg = FEIRBuilder::CreateExprDRead(std::move(tmpVar)); + static_cast(dreadAgg.get())->SetFieldType(srcType->Clone()); + } else { + UniqueFEIRExpr realExpr; + UniqueFEIRExpr imagExpr; + FEIRNodeKind subNodeKind = subExpr->GetKind(); + UniqueFEIRExpr cloneSubExpr = subExpr->Clone(); + if (subNodeKind == kExprIRead) { + static_cast(subExpr.get())->SetFieldID(kComplexRealID); + static_cast(cloneSubExpr.get())->SetFieldID(kComplexImagID); + } else if (subNodeKind == kExprDRead) { + static_cast(subExpr.get())->SetFieldID(kComplexRealID); + static_cast(subExpr.get())->SetFieldType(srcType->Clone()); + static_cast(cloneSubExpr.get())->SetFieldID(kComplexImagID); + static_cast(cloneSubExpr.get())->SetFieldType(srcType->Clone()); + } + realExpr = FEIRBuilder::CreateExprCastPrim(subExpr->Clone(), dst->GetPrimType()); + imagExpr = FEIRBuilder::CreateExprCastPrim(std::move(cloneSubExpr), dst->GetPrimType()); + UniqueFEIRStmt realStmt = std::make_unique(tmpVar->Clone(), std::move(realExpr), kComplexRealID); + stmts.emplace_back(std::move(realStmt)); + UniqueFEIRStmt imagStmt = std::make_unique(tmpVar->Clone(), std::move(imagExpr), kComplexImagID); + stmts.emplace_back(std::move(imagStmt)); + dreadAgg = FEIRBuilder::CreateExprDRead(std::move(tmpVar)); + } + return dreadAgg; +} + +UniqueFEIRExpr ASTCastExpr::Emit2FEExprForFunctionOrArray2Pointer(std::list &stmts) const { + const ASTExpr *childExpr = child; + CHECK_FATAL(childExpr != nullptr, "childExpr is nullptr"); + auto childFEExpr = childExpr->Emit2FEExpr(stmts); + if (childFEExpr->GetKind() == kExprDRead) { + return std::make_unique( + static_cast(childFEExpr.get())->GetVar()->Clone(), childFEExpr->GetFieldID()); + } else if (childFEExpr->GetKind() == kExprIRead) { + auto iread = static_cast(childFEExpr.get()); + if (iread->GetFieldID() == 0) { + auto addrOfExpr = iread->GetClonedOpnd(); + ENCChecker::ReduceBoundaryChecking(stmts, addrOfExpr); + return addrOfExpr; + } else { + return std::make_unique(iread->GetClonedPtrType(), iread->GetFieldID(), iread->GetClonedOpnd()); + } + } else if (childFEExpr->GetKind() == kExprIAddrof || childFEExpr->GetKind() == kExprAddrofVar || + childFEExpr->GetKind() == kExprAddrofFunc || childFEExpr->GetKind() == kExprAddrof) { + return childFEExpr; + } else { + CHECK_FATAL(false, "unsupported expr kind %d", childFEExpr->GetKind()); + } +} + +UniqueFEIRExpr ASTCastExpr::Emit2FEExprImpl(std::list &stmts) const { + const ASTExpr *childExpr = child; + CHECK_FATAL(childExpr != nullptr, "childExpr is nullptr"); + if (isArrayToPointerDecay || isFunctionToPointerDecay) { + return Emit2FEExprForFunctionOrArray2Pointer(stmts); + } + UniqueFEIRExpr subExpr = childExpr->Emit2FEExpr(stmts); + if (isUnoinCast && dst->GetKind() == kTypeUnion) { + std::string varName = FEUtils::GetSequentialName("anon.union."); + UniqueFEIRType dstType = std::make_unique(*dst); + UniqueFEIRVar unionVar = FEIRBuilder::CreateVarNameForC(varName, std::move(dstType)); + UniqueFEIRStmt unionStmt = FEIRBuilder::CreateStmtDAssign(unionVar->Clone(), subExpr->Clone()); + (void)stmts.emplace_back(std::move(unionStmt)); + return FEIRBuilder::CreateExprDRead(std::move(unionVar)); + } + if (isBitCast) { + if (src->GetPrimType() == dst->GetPrimType() && src->IsScalarType()) { + // This case may show up when casting from a 1-element vector to its scalar type. + return subExpr; + } + UniqueFEIRType dstType = std::make_unique(*dst); + if (dst->GetKind() == kTypePointer) { + MIRType *funcType = static_cast(dst)->GetPointedFuncType(); + if (funcType != nullptr) { + return std::make_unique(std::move(dstType), OP_retype, std::move(subExpr)); + } else { + return subExpr; + } + } else { + return std::make_unique(std::move(dstType), OP_retype, std::move(subExpr)); + } + } + if (isVectorSplat) { + return EmitExprVdupVector(dst->GetPrimType(), subExpr); + } + if (complexType != nullptr) { + UniqueFEIRType srcType = std::make_unique(*src); + return Emit2FEExprForComplex(subExpr, srcType, stmts); + } + if (!IsNeededCvt(subExpr)) { + return subExpr; + } + if (IsPrimitiveFloat(subExpr->GetPrimType()) && IsPrimitiveInteger(dst->GetPrimType()) && + dst->GetPrimType() != PTY_u1) { + return FEIRBuilder::CreateExprCvtPrim(OP_trunc, std::move(subExpr), dst->GetPrimType()); + } + return FEIRBuilder::CreateExprCastPrim(std::move(subExpr), dst->GetPrimType()); +} + +UniqueFEIRExpr ASTCastExpr::EmitExprVdupVector(PrimType primtype, UniqueFEIRExpr &subExpr) const { +MIRIntrinsicID intrinsic; + switch (primtype) { +#define SET_VDUP(TY) \ + case PTY_##TY: \ + intrinsic = INTRN_vector_from_scalar_##TY; \ + break; + + SET_VDUP(v2i64) + SET_VDUP(v4i32) + SET_VDUP(v8i16) + SET_VDUP(v16i8) + SET_VDUP(v2u64) + SET_VDUP(v4u32) + SET_VDUP(v8u16) + SET_VDUP(v16u8) + SET_VDUP(v2f64) + SET_VDUP(v4f32) + SET_VDUP(v2i32) + SET_VDUP(v4i16) + SET_VDUP(v8i8) + SET_VDUP(v2u32) + SET_VDUP(v4u16) + SET_VDUP(v8u8) + SET_VDUP(v2f32) + case PTY_i64: + intrinsic = INTRN_vector_from_scalar_v1i64; + break; + case PTY_u64: + intrinsic = INTRN_vector_from_scalar_v1f64; + break; + case PTY_f64: + intrinsic = INTRN_vector_from_scalar_v1f64; + break; + default: + CHECK_FATAL(false, "Unhandled vector type in CreateExprVdupAnyVector"); + } + UniqueFEIRType feType = FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPrimType(primtype)); + std::vector> argOpnds; + argOpnds.push_back(std::move(subExpr)); + return std::make_unique(std::move(feType), intrinsic, argOpnds); +} + +// ---------- ASTUnaryOperatorExpr ---------- +void ASTUnaryOperatorExpr::SetUOExpr(ASTExpr *astExpr) { + expr = astExpr; +} + +void ASTUnaryOperatorExpr::SetSubType(MIRType *type) { + subType = type; +} + +UniqueFEIRExpr ASTUOMinusExpr::Emit2FEExprImpl(std::list &stmts) const { + ASTExpr *childExpr = expr; + CHECK_FATAL(childExpr != nullptr, "childExpr is nullptr"); + UniqueFEIRExpr childFEIRExpr = childExpr->Emit2FEExpr(stmts); + PrimType dstType = uoType->GetPrimType(); + CHECK_NULL_FATAL(subType); + if (childFEIRExpr->GetPrimType() != dstType) { + UniqueFEIRExpr minusExpr = std::make_unique( + FEIRTypeHelper::CreateTypeNative(*subType), OP_neg, std::move(childFEIRExpr)); + return FEIRBuilder::CreateExprCastPrim(std::move(minusExpr), dstType); + } + return std::make_unique(FEIRTypeHelper::CreateTypeNative(*subType), OP_neg, std::move(childFEIRExpr)); +} + +UniqueFEIRExpr ASTUOPlusExpr::Emit2FEExprImpl(std::list &stmts) const { + ASTExpr *childExpr = expr; + CHECK_FATAL(childExpr != nullptr, "childExpr is nullptr"); + UniqueFEIRExpr plusExpr = childExpr->Emit2FEExpr(stmts); + return plusExpr; +} + +UniqueFEIRExpr ASTUONotExpr::Emit2FEExprImpl(std::list &stmts) const { + ASTExpr *childExpr = expr; + CHECK_FATAL(childExpr != nullptr, "childExpr is nullptr"); + UniqueFEIRExpr childFEIRExpr = childExpr->Emit2FEExpr(stmts); + PrimType dstType = uoType->GetPrimType(); + CHECK_NULL_FATAL(subType); + if (childFEIRExpr->GetPrimType() != dstType) { + UniqueFEIRExpr notExpr = std::make_unique( + FEIRTypeHelper::CreateTypeNative(*subType), OP_bnot, std::move(childFEIRExpr)); + return FEIRBuilder::CreateExprCastPrim(std::move(notExpr), dstType); + } + return std::make_unique(FEIRTypeHelper::CreateTypeNative(*subType), OP_bnot, std::move(childFEIRExpr)); +} + +UniqueFEIRExpr ASTUOLNotExpr::Emit2FEExprImpl(std::list &stmts) const { + ASTExpr *childExpr = expr; + CHECK_FATAL(childExpr != nullptr, "childExpr is nullptr"); + childExpr->SetShortCircuitIdx(falseIdx, trueIdx); + UniqueFEIRExpr childFEIRExpr = childExpr->Emit2FEExpr(stmts); + if (childFEIRExpr != nullptr) { + return FEIRBuilder::CreateExprZeroCompare(OP_eq, std::move(childFEIRExpr)); + } + return childFEIRExpr; +} + +UniqueFEIRExpr ASTUnaryOperatorExpr::ASTUOSideEffectExpr(Opcode op, std::list &stmts, + const std::string &varName, bool post) const { + ASTExpr *childExpr = expr; + CHECK_FATAL(childExpr != nullptr, "childExpr is nullptr"); + UniqueFEIRExpr childFEIRExpr = childExpr->Emit2FEExpr(stmts); + UniqueFEIRVar tempVar; + if (post) { + tempVar = FEIRBuilder::CreateVarNameForC(varName, *subType); + UniqueFEIRStmt readSelfstmt = FEIRBuilder::CreateStmtDAssign(tempVar->Clone(), childFEIRExpr->Clone()); + if (!IsRValue()) { + readSelfstmt->SetDummy(); + } + stmts.emplace_back(std::move(readSelfstmt)); + } + + PrimType subPrimType = subType->GetPrimType(); + UniqueFEIRExpr subExpr = (subPrimType == PTY_ptr) ? std::make_unique(pointeeLen, PTY_i32) : + FEIRBuilder::CreateExprConstAnyScalar(subPrimType, 1); + UniqueFEIRExpr sideEffectExpr = FEIRBuilder::CreateExprMathBinary(op, childFEIRExpr->Clone(), std::move(subExpr)); + UniqueFEIRStmt sideEffectStmt = FEIRBuilder::AssginStmtField(childFEIRExpr->Clone(), std::move(sideEffectExpr), 0); + stmts.emplace_back(std::move(sideEffectStmt)); + + if (post) { + return FEIRBuilder::CreateExprDRead(std::move(tempVar)); + } + return childFEIRExpr; +} + +UniqueFEIRExpr ASTUOPostIncExpr::Emit2FEExprImpl(std::list &stmts) const { + return ASTUOSideEffectExpr(OP_add, stmts, tempVarName, true); +} + +UniqueFEIRExpr ASTUOPostDecExpr::Emit2FEExprImpl(std::list &stmts) const { + return ASTUOSideEffectExpr(OP_sub, stmts, tempVarName, true); +} + +UniqueFEIRExpr ASTUOPreIncExpr::Emit2FEExprImpl(std::list &stmts) const { + return ASTUOSideEffectExpr(OP_add, stmts); +} + +UniqueFEIRExpr ASTUOPreDecExpr::Emit2FEExprImpl(std::list &stmts) const { + return ASTUOSideEffectExpr(OP_sub, stmts); +} + +MIRConst *ASTUOAddrOfExpr::GenerateMIRConstImpl() const { + switch (expr->GetASTOp()) { + case kASTOpCompoundLiteralExpr: { + static_cast(expr)->SetAddrof(true); + return expr->GenerateMIRConst(); + } + case kASTOpRef: { + expr->SetIsConstantFolded(false); + return expr->GenerateMIRConst(); + } + case kASTSubscriptExpr: + case kASTMemberExpr: { + return expr->GenerateMIRConst(); + } + case kASTStringLiteral: { + return FEManager::GetModule().GetMemPool()->New( + expr->GetConstantValue()->val.strIdx, *GlobalTables::GetTypeTable().GetPrimType(PTY_a64)); + } + default: { + CHECK_FATAL(false, "lValue in expr: %d NIY", expr->GetASTOp()); + } + } + return nullptr; +} + +UniqueFEIRExpr ASTUOAddrOfExpr::Emit2FEExprImpl(std::list &stmts) const { + ASTExpr *childExpr = expr; + UniqueFEIRExpr addrOfExpr; + CHECK_FATAL(childExpr != nullptr, "childExpr is nullptr"); + UniqueFEIRExpr childFEIRExpr = childExpr->Emit2FEExpr(stmts); + if (childFEIRExpr->GetKind() == kExprDRead) { + addrOfExpr = std::make_unique( + static_cast(childFEIRExpr.get())->GetVar()->Clone(), childFEIRExpr->GetFieldID()); + } else if (childFEIRExpr->GetKind() == kExprIRead) { + auto ireadExpr = static_cast(childFEIRExpr.get()); + if (ireadExpr->GetFieldID() == 0) { + addrOfExpr = ireadExpr->GetClonedOpnd(); + ENCChecker::ReduceBoundaryChecking(stmts, addrOfExpr); + } else { + addrOfExpr = std::make_unique(ireadExpr->GetClonedPtrType(), ireadExpr->GetFieldID(), + ireadExpr->GetClonedOpnd()); + } + } else if (childFEIRExpr->GetKind() == kExprIAddrof || childFEIRExpr->GetKind() == kExprAddrofVar || + childFEIRExpr->GetKind() == kExprAddrofFunc || childFEIRExpr->GetKind() == kExprAddrof) { + return childFEIRExpr; + } else if (childFEIRExpr->GetKind() == kExprConst) { + std::string tmpName = FEUtils::GetSequentialName("tmpvar_"); + UniqueFEIRVar tmpVar = FEIRBuilder::CreateVarNameForC(tmpName, childFEIRExpr->GetType()->Clone()); + auto tmpStmt = FEIRBuilder::CreateStmtDAssign(tmpVar->Clone(), std::move(childFEIRExpr)); + (void)stmts.emplace_back(std::move(tmpStmt)); + return FEIRBuilder::CreateExprAddrofVar(std::move(tmpVar)); + } else { + CHECK_FATAL(false, "unsupported expr kind %d", childFEIRExpr->GetKind()); + } + return addrOfExpr; +} + +// ---------- ASTUOAddrOfLabelExpr --------- +MIRConst *ASTUOAddrOfLabelExpr::GenerateMIRConstImpl() const { + return FEManager::GetMIRBuilder().GetCurrentFuncCodeMp()->New( + FEManager::GetMIRBuilder().GetOrCreateMIRLabel(GetLabelName()), + FEManager::GetMIRBuilder().GetCurrentFunction()->GetPuidx(), // GetCurrentFunction need to be optimized + *GlobalTables::GetTypeTable().GetVoidPtr()); // when parallel features +} + +UniqueFEIRExpr ASTUOAddrOfLabelExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + return FEIRBuilder::CreateExprAddrofLabel(GetLabelName(), std::make_unique(*uoType)); +} + +UniqueFEIRExpr ASTUODerefExpr::Emit2FEExprImpl(std::list &stmts) const { + std::list subStmts; // To delete redundant bounds checks in one ASTUODerefExpr stmts. + ASTExpr *childExpr = expr; + CHECK_FATAL(childExpr != nullptr, "childExpr is nullptr"); + UniqueFEIRExpr childFEIRExpr = childExpr->Emit2FEExpr(subStmts); + UniqueFEIRType retType = std::make_unique(*uoType); + UniqueFEIRType ptrType = std::make_unique(*subType); + InsertNonnullChecking(subStmts, childFEIRExpr->Clone()); + if (InsertBoundaryChecking(subStmts, childFEIRExpr->Clone())) { + childFEIRExpr->SetIsBoundaryChecking(true); + } + UniqueFEIRExpr derefExpr = FEIRBuilder::CreateExprIRead(std::move(retType), std::move(ptrType), + std::move(childFEIRExpr)); + stmts.splice(stmts.end(), subStmts); + return derefExpr; +} + +void ASTUODerefExpr::InsertNonnullChecking(std::list &stmts, UniqueFEIRExpr baseExpr) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic()) { + return; + } + if (baseExpr->GetPrimType() == PTY_ptr) { + UniqueFEIRStmt stmt = std::make_unique(OP_assertnonnull, std::move(baseExpr)); + stmts.emplace_back(std::move(stmt)); + } +} + +UniqueFEIRExpr ASTUORealExpr::Emit2FEExprImpl(std::list &stmts) const { + ASTExpr *childExpr = expr; + ASTOp astOP = childExpr->GetASTOp(); + UniqueFEIRExpr subFEIRExpr; + if (astOP == kASTStringLiteral || astOP == kASTIntegerLiteral || astOP == kASTFloatingLiteral || + astOP == kASTCharacterLiteral || astOP == kASTImaginaryLiteral) { + subFEIRExpr = childExpr->Emit2FEExpr(stmts); + } else { + subFEIRExpr = childExpr->Emit2FEExpr(stmts); + FEIRNodeKind subNodeKind = subFEIRExpr->GetKind(); + if (subNodeKind == kExprIRead) { + static_cast(subFEIRExpr.get())->SetFieldID(kComplexRealID); + } else if (subNodeKind == kExprDRead) { + static_cast(subFEIRExpr.get())->SetFieldID(kComplexRealID); + UniqueFEIRType elementFEType = std::make_unique(*elementType); + static_cast(subFEIRExpr.get())->SetFieldType(std::move(elementFEType)); + } else { + CHECK_FATAL(false, "NIY"); + } + } + return subFEIRExpr; +} + +UniqueFEIRExpr ASTUOImagExpr::Emit2FEExprImpl(std::list &stmts) const { + ASTExpr *childrenExpr = expr; + ASTOp astOP = childrenExpr->GetASTOp(); + UniqueFEIRExpr subFEIRExpr; + if (astOP == kASTStringLiteral || astOP == kASTCharacterLiteral || astOP == kASTImaginaryLiteral || + astOP == kASTIntegerLiteral || astOP == kASTFloatingLiteral) { + subFEIRExpr = childrenExpr->Emit2FEExpr(stmts); + } else { + subFEIRExpr = childrenExpr->Emit2FEExpr(stmts); + FEIRNodeKind subNodeKind = subFEIRExpr->GetKind(); + if (subNodeKind == kExprIRead) { + static_cast(subFEIRExpr.get())->SetFieldID(kComplexImagID); + } else if (subNodeKind == kExprDRead) { + static_cast(subFEIRExpr.get())->SetFieldID(kComplexImagID); + UniqueFEIRType elementFEType = std::make_unique(*elementType); + static_cast(subFEIRExpr.get())->SetFieldType(std::move(elementFEType)); + } else { + CHECK_FATAL(false, "NIY"); + } + } + return subFEIRExpr; +} + +UniqueFEIRExpr ASTUOExtensionExpr::Emit2FEExprImpl(std::list &stmts) const { + return expr->Emit2FEExpr(stmts); +} + +UniqueFEIRExpr ASTUOCoawaitExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + CHECK_FATAL(false, "C++ feature"); + return nullptr; +} + +// ---------- ASTPredefinedExpr ---------- +UniqueFEIRExpr ASTPredefinedExpr::Emit2FEExprImpl(std::list &stmts) const { + return child->Emit2FEExpr(stmts); +} + +void ASTPredefinedExpr::SetASTExpr(ASTExpr *astExpr) { + child = astExpr; +} + +// ---------- ASTOpaqueValueExpr ---------- +UniqueFEIRExpr ASTOpaqueValueExpr::Emit2FEExprImpl(std::list &stmts) const { + return child->Emit2FEExpr(stmts); +} + +void ASTOpaqueValueExpr::SetASTExpr(ASTExpr *astExpr) { + child = astExpr; +} + +// ---------- ASTBinaryConditionalOperator ---------- +UniqueFEIRExpr ASTBinaryConditionalOperator::Emit2FEExprImpl(std::list &stmts) const { + UniqueFEIRExpr condFEIRExpr = condExpr->Emit2FEExpr(stmts); + UniqueFEIRExpr trueFEIRExpr; + CHECK_NULL_FATAL(mirType); + // if a conditional expr is noncomparative, e.g., b = a ?: c + // the conditional expr will be use for trueExpr before it will be converted to comparative expr + if (!(condFEIRExpr->GetKind() == kExprBinary && static_cast(condFEIRExpr.get())->IsComparative())) { + trueFEIRExpr = condFEIRExpr->Clone(); + condFEIRExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(condFEIRExpr)); + } else { + // if a conditional expr already is comparative (only return u1 val 0 or 1), e.g., b = (a < 0) ?: c + // the conditional expr will be assigned var used for comparative expr and true expr meanwhile + MIRType *condType = condFEIRExpr->GetType()->GenerateMIRTypeAuto(); + ASSERT_NOT_NULL(condType); + UniqueFEIRVar condVar = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("condVal_"), *condType); + UniqueFEIRVar condVarCloned = condVar->Clone(); + UniqueFEIRVar condVarCloned2 = condVar->Clone(); + UniqueFEIRStmt condStmt = FEIRBuilder::CreateStmtDAssign(std::move(condVar), std::move(condFEIRExpr)); + stmts.emplace_back(std::move(condStmt)); + condFEIRExpr = FEIRBuilder::CreateExprDRead(std::move(condVarCloned)); + if (condType->GetPrimType() != mirType->GetPrimType()) { + trueFEIRExpr = FEIRBuilder::CreateExprCvtPrim(std::move(condVarCloned2), mirType->GetPrimType()); + } else { + trueFEIRExpr = FEIRBuilder::CreateExprDRead(std::move(condVarCloned2)); + } + } + std::list falseStmts; + UniqueFEIRExpr falseFEIRExpr = falseExpr->Emit2FEExpr(falseStmts); + // There are no extra nested statements in false expressions, (e.g., a < 1 ?: 2), use ternary FEIRExpr + if (falseStmts.empty()) { + UniqueFEIRType type = std::make_unique(*mirType); + return FEIRBuilder::CreateExprTernary(OP_select, std::move(type), std::move(condFEIRExpr), + std::move(trueFEIRExpr), std::move(falseFEIRExpr)); + } + // Otherwise, (e.g., a < 1 ?: a++) create a temporary var to hold the return trueExpr or falseExpr value + CHECK_FATAL(falseFEIRExpr->GetPrimType() == mirType->GetPrimType(), "The type of falseFEIRExpr are inconsistent"); + UniqueFEIRVar tempVar = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("levVar_"), *mirType); + UniqueFEIRVar tempVarCloned1 = tempVar->Clone(); + UniqueFEIRVar tempVarCloned2 = tempVar->Clone(); + UniqueFEIRStmt retTrueStmt = FEIRBuilder::CreateStmtDAssign(std::move(tempVar), std::move(trueFEIRExpr)); + retTrueStmt->SetSrcLoc(condExpr->GetSrcLoc()); + std::list trueStmts; + trueStmts.emplace_back(std::move(retTrueStmt)); + UniqueFEIRStmt retFalseStmt = FEIRBuilder::CreateStmtDAssign(std::move(tempVarCloned1), std::move(falseFEIRExpr)); + retFalseStmt->SetSrcLoc(falseExpr->GetSrcLoc()); + falseStmts.emplace_back(std::move(retFalseStmt)); + UniqueFEIRStmt stmtIf = FEIRBuilder::CreateStmtIf(std::move(condFEIRExpr), trueStmts, falseStmts); + stmts.emplace_back(std::move(stmtIf)); + return FEIRBuilder::CreateExprDRead(std::move(tempVarCloned2)); +} + +void ASTBinaryConditionalOperator::SetCondExpr(ASTExpr *expr) { + condExpr = expr; +} + +void ASTBinaryConditionalOperator::SetFalseExpr(ASTExpr *expr) { + falseExpr = expr; +} + +// ---------- ASTNoInitExpr ---------- +UniqueFEIRExpr ASTNoInitExpr::Emit2FEExprImpl(std::list &stmts) const { + return ImplicitInitFieldValue(*noInitType, stmts); +} + +void ASTNoInitExpr::SetNoInitType(MIRType *type) { + noInitType = type; +} + +// ---------- ASTCompoundLiteralExpr ---------- +UniqueFEIRExpr ASTCompoundLiteralExpr::Emit2FEExprImpl(std::list &stmts) const { + UniqueFEIRExpr feirExpr; + if (!IsRValue() || child->GetASTOp() == kASTOpInitListExpr) { // other potential expr should concern + std::string tmpName = FEUtils::GetSequentialName("clvar_"); + if (child->GetASTOp() == kASTOpInitListExpr) { + static_cast(child)->SetInitListVarName(tmpName); + } + UniqueFEIRVar tmpVar = FEIRBuilder::CreateVarNameForC(tmpName, *compoundLiteralType); + auto expr = child->Emit2FEExpr(stmts); + if (expr != nullptr) { + auto tmpStmt = FEIRBuilder::CreateStmtDAssign(tmpVar->Clone(), std::move(expr)); + (void)stmts.emplace_back(std::move(tmpStmt)); + } + feirExpr = FEIRBuilder::CreateExprDRead(std::move(tmpVar)); + } else { + feirExpr = child->Emit2FEExpr(stmts); + } + return feirExpr; +} + +MIRConst *ASTCompoundLiteralExpr::GenerateMIRPtrConst() const { + CHECK_NULL_FATAL(compoundLiteralType); + std::string tmpName = FEUtils::GetSequentialName("cle."); + if (FEOptions::GetInstance().GetFuncInlineSize() != 0) { + tmpName = tmpName + FEUtils::GetFileNameHashStr(FEManager::GetModule().GetFileName()); + } + // If a var is pointer type, agg value cannot be directly assigned to it + // Create a temporary symbol for addrof agg value + MIRSymbol *cleSymbol = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl( + tmpName, *compoundLiteralType); + auto mirConst = child->GenerateMIRConst(); // InitListExpr in CompoundLiteral gen struct + cleSymbol->SetKonst(mirConst); + MIRAddrofConst *mirAddrofConst = FEManager::GetModule().GetMemPool()->New( + cleSymbol->GetStIdx(), 0, *compoundLiteralType); + return mirAddrofConst; +} + +MIRConst *ASTCompoundLiteralExpr::GenerateMIRConstImpl() const { + if (isAddrof) { + return GenerateMIRPtrConst(); + } + return child->GenerateMIRConst(); +} + +void ASTCompoundLiteralExpr::SetCompoundLiteralType(MIRType *clType) { + compoundLiteralType = clType; +} + +void ASTCompoundLiteralExpr::SetASTExpr(ASTExpr *astExpr) { + child = astExpr; +} + +// ---------- ASTOffsetOfExpr ---------- +void ASTOffsetOfExpr::SetStructType(MIRType *stype) { + structType = stype; +} + +void ASTOffsetOfExpr::SetFieldName(const std::string &fName) { + fieldName = fName; +} + +UniqueFEIRExpr ASTOffsetOfExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + return FEIRBuilder::CreateExprConstU64(static_cast(offset)); +} + +// ---------- ASTInitListExpr ---------- +MIRConst *ASTInitListExpr::GenerateMIRConstImpl() const { + // avoid the infinite loop + if (isGenerating) { + return nullptr; + } + isGenerating = true; + if (initListType->GetKind() == kTypeArray) { + return GenerateMIRConstForArray(); + } else if (initListType->GetKind() == kTypeStruct || initListType->GetKind() == kTypeUnion) { + return GenerateMIRConstForStruct(); + } else if (isTransparent) { + return initExprs[0]->GenerateMIRConst(); + } else { + CHECK_FATAL(false, "not handle now"); + } +} + +MIRConst *ASTInitListExpr::GenerateMIRConstForArray() const { + CHECK_FATAL(initListType->GetKind() == kTypeArray, "Must be array type"); + auto arrayMirType = static_cast(initListType); + if (arrayMirType->GetDim() == 1 && initExprs.size() == 1 && initExprs[0]->GetASTOp() == kASTStringLiteral) { + return initExprs[0]->GenerateMIRConst(); + } + MIRAggConst *aggConst = FEManager::GetModule().GetMemPool()->New(FEManager::GetModule(), *initListType); + CHECK_FATAL(initExprs.size() <= arrayMirType->GetSizeArrayItem(0), "InitExpr size must less or equal array size"); + for (size_t i = 0; i < initExprs.size(); ++i) { + auto konst = initExprs[i]->GenerateMIRConst(); + if (konst == nullptr) { + return nullptr; + } + aggConst->AddItem(konst, 0); + } + if (HasArrayFiller()) { + auto fillerConst = arrayFillerExpr->GenerateMIRConst(); + for (uint32 i = initExprs.size(); i < arrayMirType->GetSizeArrayItem(0); ++i) { + aggConst->AddItem(fillerConst, 0); + } + } + return aggConst; +} + +MIRConst *ASTInitListExpr::GenerateMIRConstForStruct() const { + if (initExprs.empty()) { + return nullptr; // No var constant generation + } + bool hasFiller = false; + for (auto e : initExprs) { + if (e != nullptr) { + hasFiller = true; + break; + } + } + if (!hasFiller) { + return nullptr; + } + MIRAggConst *aggConst = FEManager::GetModule().GetMemPool()->New(FEManager::GetModule(), *initListType); + CHECK_FATAL(initExprs.size() <= UINT_MAX, "Too large elem size"); + if (initListType->GetKind() == kTypeUnion) { + CHECK_FATAL(initExprs.size() == 1, "union should only have one elem"); + } + for (uint32 i = 0; i < static_cast(initExprs.size()); ++i) { + if (initExprs[i] == nullptr) { + continue; + } + auto konst = initExprs[i]->GenerateMIRConst(); + if (konst == nullptr) { + continue; + } + if (konst->GetKind() == kConstLblConst) { + // init by initListExpr, Only MIRConst kind is set here. + return konst; + } + uint32 fieldIdx = (initListType->GetKind() == kTypeUnion) ? unionInitFieldIdx : i; + aggConst->AddItem(konst, fieldIdx + 1); + } + ENCChecker::CheckNullFieldInGlobalStruct(*initListType, *aggConst, initExprs); + return aggConst; +} + +UniqueFEIRExpr ASTInitListExpr::Emit2FEExprImpl(std::list &stmts) const { + UniqueFEIRVar feirVar = FEIRBuilder::CreateVarNameForC(GetInitListVarName(), *initListType); + if (initListType->GetKind() == MIRTypeKind::kTypeArray) { + UniqueFEIRExpr arrayExpr = FEIRBuilder::CreateExprAddrofVar(feirVar->Clone()); + auto base = std::variant, UniqueFEIRExpr>(arrayExpr->Clone()); + ProcessInitList(base, *this, stmts); + } else if (initListType->IsStructType()) { + auto base = std::variant, UniqueFEIRExpr>(std::make_pair(feirVar->Clone(), 0)); + ProcessInitList(base, *this, stmts); + } else if (isTransparent) { + CHECK_FATAL(initExprs.size() == 1, "Transparent init list size must be 1"); + return initExprs[0]->Emit2FEExpr(stmts); + } else if (hasVectorType) { + auto base = std::variant, UniqueFEIRExpr>(std::make_pair(feirVar->Clone(), 0)); + ProcessInitList(base, *this, stmts); + } else { + CHECK_FATAL(true, "Unsupported init list type"); + } + return nullptr; +} + +void ASTInitListExpr::ProcessInitList(std::variant, UniqueFEIRExpr> &base, + const ASTInitListExpr &initList, + std::list &stmts) const { + if (initList.initListType->GetKind() == kTypeArray) { + if (std::holds_alternative(base)) { + ProcessArrayInitList(std::get(base)->Clone(), initList, stmts); + } else { + auto addrExpr = std::make_unique( + std::get>(base).first->Clone()); + addrExpr->SetFieldID(std::get>(base).second); + ProcessArrayInitList(addrExpr->Clone(), initList, stmts); + } + } else if (initList.initListType->GetKind() == kTypeStruct || initList.initListType->GetKind() == kTypeUnion) { + ProcessStructInitList(base, initList, stmts); + } else if (initList.isTransparent) { + CHECK_FATAL(initList.initExprs.size() == 1, "Transparent init list size must be 1"); + auto feExpr = initList.initExprs[0]->Emit2FEExpr(stmts); + MIRType *retType = initList.initListType; + MIRType *retPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*retType); + UniqueFEIRType fePtrType = std::make_unique(*retPtrType); + if (std::holds_alternative(base)) { + auto stmt = FEIRBuilder::CreateStmtIAssign(fePtrType->Clone(), std::get(base)->Clone(), + feExpr->Clone(), 0); + stmts.emplace_back(std::move(stmt)); + } else { + UniqueFEIRVar feirVar = std::get>(base).first->Clone(); + FieldID fieldID = std::get>(base).second; + auto stmt = FEIRBuilder::CreateStmtDAssignAggField(feirVar->Clone(), feExpr->Clone(), fieldID); + stmts.emplace_back(std::move(stmt)); + } + } else if (initList.HasVectorType()) { + ProcessVectorInitList(base, initList, stmts); + } +} + +void ASTInitListExpr::ProcessStringLiteralInitList(const UniqueFEIRExpr &addrOfCharArray, + const UniqueFEIRExpr &addrOfStringLiteral, + size_t stringLength, std::list &stmts) const { + std::unique_ptr> argExprList = std::make_unique>(); + argExprList->emplace_back(addrOfCharArray->Clone()); + argExprList->emplace_back(addrOfStringLiteral->Clone()); + CHECK_FATAL(stringLength <= INT_MAX, "Too large length range"); + argExprList->emplace_back(FEIRBuilder::CreateExprConstI32(static_cast(stringLength))); + std::unique_ptr memcpyStmt = std::make_unique( + INTRN_C_memcpy, nullptr, nullptr, std::move(argExprList)); + memcpyStmt->SetSrcLoc(addrOfStringLiteral->GetLoc()); + stmts.emplace_back(std::move(memcpyStmt)); + + // Handling Implicit Initialization When Incomplete Initialization + if (addrOfCharArray->GetKind() != kExprAddrofArray) { + return; + } + auto type = static_cast(addrOfCharArray.get())->GetTypeArray()->Clone(); + MIRType *mirType = type->GenerateMIRType(); + if (mirType->GetKind() == kTypeArray) { + MIRArrayType *arrayType = static_cast(mirType); + if (arrayType->GetDim() > 2) { // only processing one or two dimensional arrays. + return; + } + uint32 dimSize = arrayType->GetSizeArrayItem(static_cast(arrayType->GetDim() - 1)); + uint32 elemSize = static_cast(arrayType->GetElemType()->GetSize()); + ProcessImplicitInit(addrOfCharArray->Clone(), stringLength, dimSize, elemSize, stmts, + addrOfStringLiteral->GetLoc()); + } +} + +void ASTInitListExpr::ProcessImplicitInit(const UniqueFEIRExpr &addrExpr, uint32 initSize, uint32 total, + uint32 elemSize, std::list &stmts, const Loc loc) const { + if (initSize >= total) { + return; + } + std::unique_ptr> argExprList = std::make_unique>(); + UniqueFEIRExpr realAddr = addrExpr->Clone(); + CHECK_FATAL(elemSize <= INT_MAX, "Too large elem size"); + CHECK_FATAL(initSize <= INT_MAX, "Too large init size"); + UniqueFEIRExpr elemSizeExpr = FEIRBuilder::CreateExprConstI32(static_cast(elemSize)); + if (initSize != 0) { + UniqueFEIRExpr initSizeExpr = FEIRBuilder::CreateExprConstI32(static_cast(initSize)); + if (elemSize != 1) { + initSizeExpr = FEIRBuilder::CreateExprBinary(OP_mul, std::move(initSizeExpr), elemSizeExpr->Clone()); + } + realAddr = FEIRBuilder::CreateExprBinary(OP_add, std::move(realAddr), initSizeExpr->Clone()); + } + argExprList->emplace_back(std::move(realAddr)); + argExprList->emplace_back(FEIRBuilder::CreateExprConstI32(0)); + UniqueFEIRExpr cntExpr = FEIRBuilder::CreateExprConstI32(static_cast(total - initSize)); + if (elemSize != 1) { + cntExpr = FEIRBuilder::CreateExprBinary(OP_mul, std::move(cntExpr), elemSizeExpr->Clone()); + } + argExprList->emplace_back(std::move(cntExpr)); + std::unique_ptr memsetStmt = std::make_unique( + INTRN_C_memset, nullptr, nullptr, std::move(argExprList)); + if (loc.fileIdx != 0) { + memsetStmt->SetSrcLoc(loc); + } + stmts.emplace_back(std::move(memsetStmt)); +} + +void ASTInitListExpr::ProcessDesignatedInitUpdater( + std::variant, UniqueFEIRExpr> &base, + ASTExpr *expr, std::list &stmts) const { + auto designatedInitUpdateExpr = static_cast(expr); + const ASTExpr *baseExpr = designatedInitUpdateExpr->GetBaseExpr(); + const ASTExpr *updaterExpr = designatedInitUpdateExpr->GetUpdaterExpr(); + auto feExpr = baseExpr->Emit2FEExpr(stmts); + if (std::holds_alternative(base)) { + const MIRType *mirType = designatedInitUpdateExpr->GetInitListType(); + MIRType *mirPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType); + UniqueFEIRType fePtrType = std::make_unique(*mirPtrType); + auto stmt = FEIRBuilder::CreateStmtIAssign(fePtrType->Clone(), std::get(base)->Clone(), + feExpr->Clone(), 0); + stmts.emplace_back(std::move(stmt)); + } else { + UniqueFEIRVar feirVar = std::get>(base).first->Clone(); + FieldID fieldID = std::get>(base).second; + auto stmt = FEIRBuilder::CreateStmtDAssignAggField(feirVar->Clone(), feExpr->Clone(), fieldID); + stmts.emplace_back(std::move(stmt)); + } + ProcessInitList(base, *(static_cast(updaterExpr)), stmts); +} + +UniqueFEIRExpr ASTInitListExpr::CalculateStartAddressForMemset(const UniqueFEIRVar &varIn, uint32 initSizeIn, + FieldID fieldIDIn, const std::variant, UniqueFEIRExpr> &baseIn) const { + UniqueFEIRExpr addrOfExpr; + if (std::holds_alternative(baseIn)) { + UniqueFEIRExpr offsetExpr = FEIRBuilder::CreateExprConstU32(initSizeIn); + addrOfExpr = FEIRBuilder::CreateExprBinary(OP_add, std::get(baseIn)->Clone(), + std::move(offsetExpr)); + } else { + addrOfExpr = std::make_unique(varIn->Clone(), fieldIDIn); + } + return addrOfExpr; +} + +std::tuple ASTInitListExpr::GetStructFieldInfo(uint32 fieldIndex, + FieldID baseFieldID, + MIRStructType &structMirType) const { + FieldID curFieldID = 0; + (void)FEUtils::TraverseToNamedField(structMirType, structMirType.GetElemStrIdx(fieldIndex), curFieldID); + FieldID fieldID = baseFieldID + curFieldID; + MIRType *fieldMirType = structMirType.GetFieldType(curFieldID); + uint32 fieldTypeSize = static_cast(fieldMirType->GetSize()); + return std::make_tuple(fieldID, fieldTypeSize, fieldMirType); +} + +void ASTInitListExpr::SolveInitListFullOfZero(const MIRStructType &baseStructType, FieldID baseFieldID, + const UniqueFEIRVar &var, const ASTInitListExpr &initList, + std::list &stmts) const { + MIRStructType *currStructType = static_cast(initList.initListType); + if (baseStructType.GetKind() == kTypeStruct) { + std::tuple fieldInfo = GetStructFieldInfo(0, baseFieldID, *currStructType); + FieldID fieldID = std::get<0>(fieldInfo); + // Use 'fieldID - 1' (start address of the nested struct or union) instead of 'fieldID' (start address of the + // first field in the nested struct or union), because even though these two addresses have the same value, + // they have different pointer type. + UniqueFEIRExpr addrOfExpr = std::make_unique(var->Clone(), fieldID - 1); + ProcessImplicitInit(addrOfExpr->Clone(), 0, static_cast(currStructType->GetSize()), 1, stmts, + initList.GetSrcLoc()); + } else { // kTypeUnion + UniqueFEIRExpr addrOfExpr = std::make_unique(var->Clone(), 0); + ProcessImplicitInit(addrOfExpr->Clone(), 0, static_cast(currStructType->GetSize()), 1, stmts, + initList.GetSrcLoc()); + } +} + +bool ASTInitListExpr::SolveInitListPartialOfZero(std::variant, UniqueFEIRExpr> &base, + FieldID fieldID, uint32 &index, const ASTInitListExpr &initList, std::list &stmts) const { + UniqueFEIRVar var; + MIRStructType *baseStructMirType = nullptr; + if (std::holds_alternative(base)) { + var = std::get(base)->GetVarUses().front()->Clone(); + baseStructMirType = static_cast(initList.initListType); + } else { + var = std::get>(base).first->Clone(); + baseStructMirType = static_cast(var->GetType()->GenerateMIRTypeAuto()); + } + FieldID baseFieldID = 0; + if (!std::holds_alternative(base)) { + baseFieldID = std::get>(base).second; + } + MIRStructType *curStructMirType = static_cast(initList.initListType); + uint32 fieldsCount = 0; + int64 initBitSize = baseStructMirType->GetBitOffsetFromBaseAddr(fieldID); // in bit + uint32 fieldSizeOfLastZero = 0; // in byte + FieldID fieldIdOfLastZero = fieldID; + uint32 start = index; + while (index < initList.initExprs.size() && initList.initExprs[index] != nullptr && + initList.initExprs[index]->GetEvaluatedFlag() == kEvaluatedAsZero) { + std::tuple fieldInfo = GetStructFieldInfo(index, baseFieldID, *curStructMirType); + uint32 curFieldTypeSize = std::get<1>(fieldInfo); + MIRType *fieldMirType = std::get<2>(fieldInfo); + if (fieldMirType->GetKind() == kTypeBitField) { + break; + } + fieldSizeOfLastZero = curFieldTypeSize; + fieldIdOfLastZero = std::get<0>(fieldInfo); + ++fieldsCount; + ++index; + } + // consider struct alignment + int64 fieldsBitSize = + (baseStructMirType->GetBitOffsetFromBaseAddr(fieldIdOfLastZero) + fieldSizeOfLastZero * kOneByte) - initBitSize; + if (fieldsCount >= 2 && fieldsBitSize % kOneByte == 0 && (fieldsBitSize / kOneByte) % 4 == 0) { + auto addrOfExpr = CalculateStartAddressForMemset(var, static_cast(initBitSize / 8), fieldID, base); + ProcessImplicitInit(addrOfExpr->Clone(), 0, static_cast(fieldsBitSize / kOneByte), 1, stmts, + initList.initExprs[start]->GetSrcLoc()); + --index; + return true; + } else { + index -= fieldsCount; + return false; + } +} + +void ASTInitListExpr::SolveInitListExprOrDesignatedInitUpdateExpr(FieldID fieldID, ASTExpr &initExpr, + const UniqueFEIRType &baseStructPtrType, std::variant, UniqueFEIRExpr> &base, + std::list &stmts) const { + std::variant, UniqueFEIRExpr> subBase; + if (std::holds_alternative(base)) { + auto addrOfElemExpr = std::make_unique(baseStructPtrType->Clone(), fieldID, + std::get(base)->Clone()); + subBase = std::variant, UniqueFEIRExpr>(addrOfElemExpr->Clone()); + } else { + auto subVar = std::get>(base).first->Clone(); + subBase = std::variant, UniqueFEIRExpr>( + std::make_pair(subVar->Clone(), static_cast(fieldID))); + } + if (initExpr.GetASTOp() == kASTOpInitListExpr) { + ProcessInitList(subBase, *(static_cast(&initExpr)), stmts); + } else { + ProcessDesignatedInitUpdater(subBase, static_cast(&initExpr), stmts); + } +} + +void ASTInitListExpr::SolveStructFieldOfArrayTypeInitWithStringLiteral(std::tuple fieldInfo, + const ASTExpr &initExpr, const UniqueFEIRType &baseStructPtrType, + std::variant, UniqueFEIRExpr> &base, std::list &stmts) const { + auto elemExpr = initExpr.Emit2FEExpr(stmts); + if (elemExpr == nullptr) { + return; + } + FieldID fieldID = std::get<0>(fieldInfo); + MIRType *fieldMirType = std::get<2>(fieldInfo); + if (std::holds_alternative(base)) { + auto addrOfElement = std::make_unique(baseStructPtrType->Clone(), fieldID, + std::get(base)->Clone()); + auto addrOfArrayExpr = GetAddrofArrayFEExprByStructArrayField(*fieldMirType, addrOfElement->Clone()); + ProcessStringLiteralInitList(addrOfArrayExpr->Clone(), elemExpr->Clone(), + static_cast(&initExpr)->GetLength(), stmts); + } else { + auto subVar = std::get>(base).first->Clone(); + auto addrOfElement = std::make_unique(subVar->Clone()); + addrOfElement->SetFieldID(fieldID); + auto addrOfArrayExpr = GetAddrofArrayFEExprByStructArrayField(*fieldMirType, addrOfElement->Clone()); + ProcessStringLiteralInitList(addrOfArrayExpr->Clone(), elemExpr->Clone(), + static_cast(&initExpr)->GetLength(), stmts); + } +} + +void ASTInitListExpr::SolveStructFieldOfBasicType(FieldID fieldID, const ASTExpr &initExpr, + const UniqueFEIRType &baseStructPtrType, + std::variant, UniqueFEIRExpr> &base, + std::list &stmts) const { + auto elemExpr = initExpr.Emit2FEExpr(stmts); + if (elemExpr == nullptr) { + return; + } + if (std::holds_alternative(base)) { + auto stmt = std::make_unique(baseStructPtrType->Clone(), std::get(base)->Clone(), + elemExpr->Clone(), fieldID); + stmt->SetSrcLoc(initExpr.GetSrcLoc()); + (void)stmts.emplace_back(std::move(stmt)); + } else { + auto subVar = std::get>(base).first->Clone(); + auto stmt = std::make_unique(subVar->Clone(), elemExpr->Clone(), fieldID); + stmt->SetSrcLoc(initExpr.GetSrcLoc()); + (void)stmts.emplace_back(std::move(stmt)); + } +} + +void ASTInitListExpr::ProcessStructInitList(std::variant, UniqueFEIRExpr> &base, + const ASTInitListExpr &initList, + std::list &stmts) const { + MIRType *baseStructMirPtrType = nullptr; + MIRStructType *baseStructMirType = nullptr; + UniqueFEIRType baseStructFEType = nullptr; + UniqueFEIRType baseStructFEPtrType = nullptr; + MIRStructType *curStructMirType = static_cast(initList.initListType); + UniqueFEIRVar var; + if (std::holds_alternative(base)) { + var = std::get(base)->GetVarUses().front()->Clone(); + baseStructMirType = static_cast(initList.initListType); + baseStructMirPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*baseStructMirType); + baseStructFEType = FEIRTypeHelper::CreateTypeNative(*baseStructMirType); + baseStructFEPtrType = std::make_unique(*baseStructMirPtrType); + } else { + var = std::get>(base).first->Clone(); + baseStructFEType = var->GetType()->Clone(); + baseStructMirType = static_cast(baseStructFEType->GenerateMIRTypeAuto()); + baseStructMirPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*baseStructMirType); + baseStructFEPtrType = std::make_unique(*baseStructMirPtrType); + } + + FieldID baseFieldID = 0; + if (!std::holds_alternative(base)) { + baseFieldID = std::get>(base).second; + } + + if (initList.initExprs.size() == 0) { + UniqueFEIRExpr addrOfExpr = std::make_unique(var->Clone(), 0); + ProcessImplicitInit(addrOfExpr->Clone(), 0, static_cast(curStructMirType->GetSize()), 1, stmts, + initList.GetSrcLoc()); + return; + } + + if (!FEOptions::GetInstance().IsNpeCheckDynamic() && initList.GetEvaluatedFlag() == kEvaluatedAsZero) { + SolveInitListFullOfZero(*baseStructMirType, baseFieldID, var, initList, stmts); + return; + } + + uint32 curFieldTypeSize = 0; + uint32 offset = 0; + for (uint32 i = 0; i < initList.initExprs.size(); ++i) { + if (initList.initExprs[i] == nullptr) { + continue; // skip anonymous field + } + + uint32 fieldIdx = (curStructMirType->GetKind() == kTypeUnion) ? initList.GetUnionInitFieldIdx() : i; + std::tuple fieldInfo = GetStructFieldInfo(fieldIdx, baseFieldID, *curStructMirType); + FieldID fieldID = std::get<0>(fieldInfo); + curFieldTypeSize = std::get<1>(fieldInfo); + MIRType *fieldMirType = std::get<2>(fieldInfo); + offset += curFieldTypeSize; + // use one instrinsic call memset to initialize zero for partial continuous fields of struct to need reduce code + // size need to follow these three rules: (1) offset from the start address of the continuous fields to the start + // addresss of struct should be an integer multiples of 4 bytes (2) size of the continuous fields should be an + // integer multiples of 4 bytes (3) the continuous fields count should be two at least + if (!FEOptions::GetInstance().IsNpeCheckDynamic() && + curStructMirType->GetKind() == kTypeStruct && + fieldMirType->GetKind() != kTypeBitField && // skip bitfield type field because it not follows byte alignment + initList.initExprs[i]->GetEvaluatedFlag() == kEvaluatedAsZero && + (baseStructMirType->GetBitOffsetFromBaseAddr(fieldID) / kOneByte) % 4 == 0) { + if (SolveInitListPartialOfZero(base, fieldID, i, initList, stmts)) { + continue; + } + } + + if (initList.initExprs[i]->GetASTOp() == kASTImplicitValueInitExpr && fieldMirType->GetPrimType() == PTY_agg) { + auto addrOfExpr = CalculateStartAddressForMemset(var, offset - curFieldTypeSize, fieldID, base); + ProcessImplicitInit(addrOfExpr->Clone(), 0, static_cast(fieldMirType->GetSize()), 1, stmts, + initList.initExprs[i]->GetSrcLoc()); + continue; + } + + if (initList.initExprs[i]->GetASTOp() == kASTOpInitListExpr || + initList.initExprs[i]->GetASTOp() == kASTASTDesignatedInitUpdateExpr) { + SolveInitListExprOrDesignatedInitUpdateExpr(fieldID, *(initList.initExprs[i]), baseStructFEPtrType, base, stmts); + } else if (fieldMirType->GetKind() == kTypeArray && initList.initExprs[i]->GetASTOp() == kASTStringLiteral) { + SolveStructFieldOfArrayTypeInitWithStringLiteral(fieldInfo, *(initList.initExprs[i]), + baseStructFEPtrType, base, stmts); + } else { + SolveStructFieldOfBasicType(fieldID, *(initList.initExprs[i]), baseStructFEPtrType, base, stmts); + } + } + + // Handling Incomplete Union Initialization + if (curStructMirType->GetKind() == kTypeUnion) { + UniqueFEIRExpr addrOfExpr = std::make_unique(var->Clone(), baseFieldID); + ProcessImplicitInit(addrOfExpr->Clone(), curFieldTypeSize, + curStructMirType->GetSize(), 1, stmts, initList.GetSrcLoc()); + } +} + +UniqueFEIRExpr ASTInitListExpr::GetAddrofArrayFEExprByStructArrayField(MIRType &fieldType, + const UniqueFEIRExpr &addrOfArrayField) const { + CHECK_FATAL(fieldType.GetKind() == kTypeArray, "invalid field type"); + auto arrayFEType = FEIRTypeHelper::CreateTypeNative(fieldType); + std::list indexExprs; + auto indexExpr = FEIRBuilder::CreateExprConstI32(0); + indexExprs.emplace_back(std::move(indexExpr)); + auto addrOfArrayExpr = FEIRBuilder::CreateExprAddrofArray(arrayFEType->Clone(), addrOfArrayField->Clone(), + "", indexExprs); + return addrOfArrayExpr; +} + +void ASTInitListExpr::SolveArrayElementInitWithInitListExpr(const UniqueFEIRExpr &addrOfArray, + const UniqueFEIRExpr &addrOfElementExpr, + const MIRType &elementType, const ASTExpr &subExpr, + size_t index, std::list &stmts) const { + auto base = std::variant, UniqueFEIRExpr>(addrOfElementExpr->Clone()); + if (!FEOptions::GetInstance().IsNpeCheckDynamic() && subExpr.GetEvaluatedFlag() == kEvaluatedAsZero) { + UniqueFEIRExpr realAddr = addrOfArray->Clone(); + if (index > 0) { + UniqueFEIRExpr idxExpr = FEIRBuilder::CreateExprConstI32(static_cast(index)); + UniqueFEIRExpr elemSizeExpr = FEIRBuilder::CreateExprConstI32(static_cast(elementType.GetSize())); + UniqueFEIRExpr offsetSizeExpr = FEIRBuilder::CreateExprBinary(OP_mul, std::move(idxExpr), elemSizeExpr->Clone()); + realAddr = FEIRBuilder::CreateExprBinary(OP_add, std::move(realAddr), offsetSizeExpr->Clone()); + } + ProcessImplicitInit(realAddr->Clone(), 0, static_cast(elementType.GetSize()), 1, stmts, + subExpr.GetSrcLoc()); + } else { + ProcessInitList(base, *(static_cast(&subExpr)), stmts); + } +} + +void ASTInitListExpr::HandleImplicitInitSections(const UniqueFEIRExpr &addrOfArray, const ASTInitListExpr &initList, + const MIRType &elementType, std::list &stmts) const { + auto arrayMirType = static_cast(initList.initListType); + auto allSize = arrayMirType->GetSize(); + auto elemSize = elementType.GetSize(); + CHECK_FATAL(elemSize != 0, "elemSize should not 0"); + auto allElemCnt = allSize / elemSize; + ProcessImplicitInit(addrOfArray->Clone(), static_cast(initList.initExprs.size()), allElemCnt, elemSize, stmts, + initList.GetSrcLoc()); +} + +void ASTInitListExpr::ProcessArrayInitList(const UniqueFEIRExpr &addrOfArray, const ASTInitListExpr &initList, + std::list &stmts) const { + auto arrayMirType = static_cast(initList.initListType); + UniqueFEIRType arrayFEType = FEIRTypeHelper::CreateTypeNative(*arrayMirType); + MIRType *elementType; + if (arrayMirType->GetDim() > 1) { + uint32 subSizeArray[arrayMirType->GetDim()]; + for (int dim = 1; dim < arrayMirType->GetDim(); ++dim) { + subSizeArray[dim - 1] = arrayMirType->GetSizeArrayItem(static_cast(dim)); + } + elementType = GlobalTables::GetTypeTable().GetOrCreateArrayType(*arrayMirType->GetElemType(), + static_cast(arrayMirType->GetDim() - 1), subSizeArray); + } else { + elementType = arrayMirType->GetElemType(); + } + auto elementPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*elementType); + CHECK_FATAL(initExprs.size() <= INT_MAX, "invalid index"); + if (!FEOptions::GetInstance().IsNpeCheckDynamic() && initList.GetEvaluatedFlag() == kEvaluatedAsZero) { + ProcessImplicitInit(addrOfArray->Clone(), 0, static_cast(arrayMirType->GetSize()), 1, stmts, + initList.GetSrcLoc()); + return; + } + for (size_t i = 0; i < initList.initExprs.size(); ++i) { + std::list indexExprs; + UniqueFEIRExpr indexExpr = FEIRBuilder::CreateExprConstI32(static_cast(i)); + indexExprs.emplace_back(std::move(indexExpr)); + auto addrOfElemExpr = FEIRBuilder::CreateExprAddrofArray(arrayFEType->Clone(), addrOfArray->Clone(), "", + indexExprs); + const ASTExpr *subExpr = initList.initExprs[i]; + while (subExpr->GetASTOp() == kConstantExpr) { + subExpr = static_cast(subExpr)->GetChild(); + } + if (subExpr->GetASTOp() == kASTOpInitListExpr) { + SolveArrayElementInitWithInitListExpr(addrOfArray, addrOfElemExpr, *elementType, *subExpr, i, stmts); + continue; + } + UniqueFEIRExpr elemExpr = subExpr->Emit2FEExpr(stmts); + if ((elementType->GetKind() == kTypeArray || arrayMirType->GetDim() == 1) && + subExpr->GetASTOp() == kASTStringLiteral) { + ProcessStringLiteralInitList(addrOfElemExpr->Clone(), elemExpr->Clone(), + static_cast(subExpr)->GetLength(), stmts); + if (arrayMirType->GetDim() == 1) { + return; + } + continue; + } + auto stmt = FEIRBuilder::CreateStmtIAssign(FEIRTypeHelper::CreateTypeNative(*elementPtrType)->Clone(), + addrOfElemExpr->Clone(), elemExpr->Clone(), 0); + stmt->SetSrcLoc(initList.initExprs[i]->GetSrcLoc()); + stmts.emplace_back(std::move(stmt)); + } + + HandleImplicitInitSections(addrOfArray, initList, *elementType, stmts); +} + +void ASTInitListExpr::ProcessVectorInitList(std::variant, UniqueFEIRExpr> &base, + const ASTInitListExpr &initList, std::list &stmts) const { + UniqueFEIRType srcType = FEIRTypeHelper::CreateTypeNative(*(initList.initListType)); + if (std::holds_alternative(base)) { + CHECK_FATAL(false, "unsupported case"); + } else { + UniqueFEIRVar srcVar = std::get>(base).first->Clone(); + FieldID fieldID = std::get>(base).second; + UniqueFEIRExpr dreadVar; + if (fieldID != 0) { + dreadVar = FEIRBuilder::CreateExprDReadAggField(srcVar->Clone(), fieldID, srcType->Clone()); + } else { + dreadVar = FEIRBuilder::CreateExprDRead(srcVar->Clone()); + } + for (size_t index = 0; index < initList.initExprs.size(); ++index) { + UniqueFEIRExpr indexExpr = FEIRBuilder::CreateExprConstI32(index); + UniqueFEIRExpr elemExpr = initList.initExprs[index]->Emit2FEExpr(stmts); + std::vector> argOpnds; + argOpnds.push_back(std::move(elemExpr)); + argOpnds.push_back(dreadVar->Clone()); + argOpnds.push_back(std::move(indexExpr)); + UniqueFEIRExpr intrinsicExpr = std::make_unique( + srcType->Clone(), SetVectorSetLane(*(initList.initListType)), argOpnds); + auto stmt = FEIRBuilder::CreateStmtDAssignAggField(srcVar->Clone(), std::move(intrinsicExpr), fieldID); + stmts.emplace_back(std::move(stmt)); + } + } +} + +MIRIntrinsicID ASTInitListExpr::SetVectorSetLane(const MIRType &type) const { + MIRIntrinsicID intrinsic; + switch (type.GetPrimType()) { +#define SETQ_LANE(TY) \ + case PTY_##TY: \ + intrinsic = INTRN_vector_set_element_##TY; \ + break; + + SETQ_LANE(v2i64) + SETQ_LANE(v4i32) + SETQ_LANE(v8i16) + SETQ_LANE(v16i8) + SETQ_LANE(v2u64) + SETQ_LANE(v4u32) + SETQ_LANE(v8u16) + SETQ_LANE(v16u8) + SETQ_LANE(v2f64) + SETQ_LANE(v4f32) + SETQ_LANE(v2i32) + SETQ_LANE(v4i16) + SETQ_LANE(v8i8) + SETQ_LANE(v2u32) + SETQ_LANE(v4u16) + SETQ_LANE(v8u8) + SETQ_LANE(v2f32) + case PTY_i64: + intrinsic = INTRN_vector_set_element_v1i64; + break; + case PTY_u64: + intrinsic = INTRN_vector_set_element_v1u64; + break; + case PTY_f64: + intrinsic = INTRN_vector_set_element_v1f64; + break; + default: + CHECK_FATAL(false, "Unhandled vector type"); + return INTRN_UNDEFINED; + } + return intrinsic; +} + +void ASTInitListExpr::SetInitExprs(ASTExpr *astExpr) { + initExprs.emplace_back(astExpr); +} + +void ASTInitListExpr::SetInitListType(MIRType *type) { + initListType = type; +} + +// ---------- ASTImplicitValueInitExpr ---------- +MIRConst *ASTImplicitValueInitExpr::GenerateMIRConstImpl() const { + return FEUtils::CreateImplicitConst(mirType); +} + +UniqueFEIRExpr ASTImplicitValueInitExpr::Emit2FEExprImpl(std::list &stmts) const { + return ImplicitInitFieldValue(*mirType, stmts); +} + +MIRConst *ASTStringLiteral::GenerateMIRConstImpl() const { + auto *arrayType = static_cast(mirType); + uint32 arraySize = arrayType->GetSizeArrayItem(0); + auto elemType = arrayType->GetElemType(); + auto *val = FEManager::GetModule().GetMemPool()->New(FEManager::GetModule(), *arrayType); + for (uint32 i = 0; i < arraySize; ++i) { + MIRConst *cst; + if (i < codeUnits.size()) { + cst = FEManager::GetModule().GetMemPool()->New(codeUnits[i], *elemType); + } else { + cst = FEManager::GetModule().GetMemPool()->New(0, *elemType); + } + val->PushBack(cst); + } + return val; +} + +UniqueFEIRExpr ASTStringLiteral::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + MIRType *elemType = static_cast(mirType)->GetElemType(); + std::vector codeUnitsVec; + (void)codeUnitsVec.insert(codeUnitsVec.cend(), codeUnits.cbegin(), codeUnits.cend()); + UniqueFEIRExpr expr = std::make_unique(codeUnitsVec, elemType, GetStr()); + CHECK_NULL_FATAL(expr); + return expr; +} + +// ---------- ASTArraySubscriptExpr ---------- +size_t ASTArraySubscriptExpr::CalculateOffset() const { + size_t offset = 0; + CHECK_FATAL(idxExpr->GetConstantValue() != nullptr, "Not constant value for constant initializer"); + offset += mirType->GetSize() * static_cast(idxExpr->GetConstantValue()->val.i64); + return offset; +} + +ASTExpr *ASTArraySubscriptExpr::FindFinalBase() const { + if (baseExpr->GetASTOp() == kASTSubscriptExpr) { + return static_cast(baseExpr)->FindFinalBase(); + } + return baseExpr; +} + +MIRConst *ASTArraySubscriptExpr::GenerateMIRConstImpl() const { + size_t offset = CalculateOffset(); + const ASTExpr *base = FindFinalBase(); + MIRConst *baseConst = base->GenerateMIRConst(); + if (baseConst->GetKind() == kConstStrConst) { + MIRStrConst *strConst = static_cast(baseConst); + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(strConst->GetValue()); + CHECK_FATAL(str.length() >= offset, "Invalid operation"); + str = str.substr(offset); + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str); + return FEManager::GetModule().GetMemPool()->New( + strIdx, *GlobalTables::GetTypeTable().GetPrimType(PTY_a64)); + } else if (baseConst->GetKind() == kConstAddrof) { + MIRAddrofConst *konst = static_cast(baseConst); + CHECK_FATAL(offset <= INT_MAX, "Invalid operation"); + return FEManager::GetModule().GetMemPool()->New(konst->GetSymbolIndex(), konst->GetFieldID(), + konst->GetType(), konst->GetOffset() + static_cast(offset)); + } else { + CHECK_FATAL(false, "Unsupported MIRConst: %d", baseConst->GetKind()); + } +} + +bool ASTArraySubscriptExpr::CheckFirstDimIfZero(const MIRType *arrType) const { + auto tmpArrayType = static_cast(arrType); + uint32 size = tmpArrayType->GetSizeArrayItem(0); + uint32 oriDim = tmpArrayType->GetDim(); + if (size == 0 && oriDim >= 2) { // 2 is the array dim + return true; + } + return false; +} + +void ASTArraySubscriptExpr::InsertNonnullChecking(std::list &stmts, const UniqueFEIRExpr &indexExpr, + const UniqueFEIRExpr &baseAddrExpr) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || indexExpr->GetKind() != kExprConst) { + return; + } + if (FEIRBuilder::IsZeroConstExpr(indexExpr)) { // insert nonnull checking when ptr[0] + UniqueFEIRStmt stmt = std::make_unique(OP_assertnonnull, baseAddrExpr->Clone()); + stmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(stmt)); + } +} + +MIRType *ASTArraySubscriptExpr::GetArrayTypeForPointerArray() const { + MIRType *arrayTypeOpt = nullptr; + MIRPtrType *ptrTy = static_cast(arrayType); + MIRType *pointedTy = ptrTy->GetPointedType(); + if (pointedTy->GetKind() == kTypeArray) { + MIRArrayType *pointedArrTy = static_cast(pointedTy); + std::vector sizeArray{1}; + for (uint32 i = 0; i < pointedArrTy->GetDim(); ++i) { + sizeArray.push_back(pointedArrTy->GetSizeArrayItem(i)); + } + MIRArrayType newArrTy(pointedArrTy->GetElemTyIdx(), sizeArray); + arrayTypeOpt = static_cast(GlobalTables::GetTypeTable().GetOrCreateMIRTypeNode(newArrTy)); + } else { + arrayTypeOpt = GlobalTables::GetTypeTable().GetOrCreateArrayType(*pointedTy, 1); + } + return arrayTypeOpt; +} + +UniqueFEIRExpr ASTArraySubscriptExpr::SolveMultiDimArray(UniqueFEIRExpr &baseAddrFEExpr, UniqueFEIRType &arrayFEType, + bool isArrayTypeOpt, std::list &stmts) const { + std::list feIdxExprs; + if (baseAddrFEExpr->GetKind() == kExprAddrofArray && !isArrayTypeOpt) { + auto baseArrayExpr = static_cast(baseAddrFEExpr.get()); + for (auto &e : baseArrayExpr->GetExprIndexs()) { + (void)feIdxExprs.emplace_back(e->Clone()); + } + arrayFEType = baseArrayExpr->GetTypeArray()->Clone(); + baseAddrFEExpr = baseArrayExpr->GetExprArray()->Clone(); + } + auto feIdxExpr = idxExpr->Emit2FEExpr(stmts); + if (isArrayTypeOpt) { + InsertNonnullChecking(stmts, feIdxExpr, baseAddrFEExpr); + } + (void)feIdxExprs.emplace_back(std::move(feIdxExpr)); + return FEIRBuilder::CreateExprAddrofArray(arrayFEType->Clone(), baseAddrFEExpr->Clone(), "", feIdxExprs); +} + +UniqueFEIRExpr ASTArraySubscriptExpr::SolveOtherArrayType(const UniqueFEIRExpr &baseAddrFEExpr, + std::list &stmts) const { + std::vector offsetExprs; + UniqueFEIRExpr offsetExpr; + auto feIdxExpr = idxExpr->Emit2FEExpr(stmts); + PrimType indexPty = feIdxExpr->GetPrimType(); + UniqueFEIRType sizeType = IsSignedInteger(indexPty) ? std::make_unique(*GlobalTables::GetTypeTable(). + GetPrimType(PTY_i64)) : std::make_unique(*GlobalTables::GetTypeTable().GetPrimType(PTY_ptr)); + feIdxExpr = IsSignedInteger(indexPty) ? FEIRBuilder::CreateExprCvtPrim(std::move(feIdxExpr), GetRegPrimType( + indexPty), PTY_i64) : FEIRBuilder::CreateExprCvtPrim(std::move(feIdxExpr), GetRegPrimType(indexPty), PTY_ptr); + if (isVLA) { + auto feSizeExpr = vlaSizeExpr->Emit2FEExpr(stmts); + feIdxExpr = FEIRBuilder::CreateExprBinary(sizeType->Clone(), OP_mul, std::move(feIdxExpr), std::move(feSizeExpr)); + } else if (mirType->GetSize() != 1) { + auto typeSizeExpr = std::make_unique(mirType->GetSize(), sizeType->GetPrimType()); + feIdxExpr = FEIRBuilder::CreateExprBinary(sizeType->Clone(), OP_mul, std::move(feIdxExpr), std::move(typeSizeExpr)); + } + (void)offsetExprs.emplace_back(std::move(feIdxExpr)); + if (offsetExprs.size() != 0) { + offsetExpr = std::move(offsetExprs[0]); + for (size_t i = 1; i < offsetExprs.size(); i++) { + offsetExpr = FEIRBuilder::CreateExprBinary(std::move(sizeType), OP_add, std::move(offsetExpr), + std::move(offsetExprs[i])); + } + } + return FEIRBuilder::CreateExprBinary(std::move(sizeType), OP_add, baseAddrFEExpr->Clone(), std::move(offsetExpr)); +} + +UniqueFEIRExpr ASTArraySubscriptExpr::Emit2FEExprImpl(std::list &stmts) const { + std::list subStmts; // To delete redundant bounds checks in one ASTArraySubscriptExpr stmts. + auto baseAddrFEExpr = baseExpr->Emit2FEExpr(subStmts); + auto retFEType = std::make_unique(*mirType); + MIRType *arrayTypeOpt = arrayType; + bool isArrayTypeOpt = false; + if (arrayTypeOpt->GetKind() == kTypePointer && !isVLA) { + arrayTypeOpt = GetArrayTypeForPointerArray(); + isArrayTypeOpt = true; + } + UniqueFEIRType arrayFEType = std::make_unique(*arrayTypeOpt); + auto mirPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType); + auto fePtrType = std::make_unique(*mirPtrType); + UniqueFEIRExpr addrOfArray; + if (arrayTypeOpt->GetKind() == MIRTypeKind::kTypeArray && !isVLA) { + if (CheckFirstDimIfZero(arrayTypeOpt)) { + // return multi-dim array addr directly if its first dim size was 0. + stmts.splice(stmts.end(), subStmts); + return baseAddrFEExpr; + } + addrOfArray = SolveMultiDimArray(baseAddrFEExpr, arrayFEType, isArrayTypeOpt, subStmts); + } else { + addrOfArray = SolveOtherArrayType(baseAddrFEExpr, subStmts); + } + if (InsertBoundaryChecking(subStmts, addrOfArray->Clone(), std::move(baseAddrFEExpr))) { + addrOfArray->SetIsBoundaryChecking(true); + } + stmts.splice(stmts.end(), subStmts); + return FEIRBuilder::CreateExprIRead(std::move(retFEType), fePtrType->Clone(), addrOfArray->Clone()); +} + +UniqueFEIRExpr ASTExprUnaryExprOrTypeTraitExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +MIRConst *ASTMemberExpr::GenerateMIRConstImpl() const { + uint64 fieldOffset = fieldOffsetBits / kOneByte; + const ASTExpr *base = baseExpr; + while (base->GetASTOp() == kASTMemberExpr) { // find final BaseExpr and calculate FieldOffsets + const ASTMemberExpr *memberExpr = static_cast(base); + fieldOffset += memberExpr->GetFieldOffsetBits() / kOneByte; + base = memberExpr->GetBaseExpr(); + } + MIRConst *baseConst = base->GenerateMIRConst(); + MIRAddrofConst *konst = nullptr; + if (baseConst->GetKind() == kConstAddrof) { + konst = static_cast(baseConst); + } else if (baseConst->GetKind() == kConstInt) { + return FEManager::GetModule().GetMemPool()->New(static_cast(fieldOffset), + *GlobalTables::GetTypeTable().GetInt64()); + } else { + CHECK_FATAL(false, "base const kind NYI."); + } + MIRType *baseStructType = + base->GetType()->IsMIRPtrType() ? static_cast(base->GetType())->GetPointedType() : + base->GetType(); + CHECK_FATAL(baseStructType->IsMIRStructType() || baseStructType->GetKind() == kTypeUnion, "Invalid"); + return FEManager::GetModule().GetMemPool()->New(konst->GetSymbolIndex(), konst->GetFieldID(), + konst->GetType(), konst->GetOffset() + static_cast(fieldOffset)); +} + +const ASTMemberExpr &ASTMemberExpr::FindFinalMember(const ASTMemberExpr &startExpr, + std::list &memberNames) const { + (void)memberNames.emplace_back(startExpr.GetMemberName()); + if (startExpr.isArrow || startExpr.baseExpr->GetASTOp() != kASTMemberExpr) { + return startExpr; + } + return FindFinalMember(*(static_cast(startExpr.baseExpr)), memberNames); +} + +void ASTMemberExpr::InsertNonnullChecking(std::list &stmts, UniqueFEIRExpr baseFEExpr) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic()) { + return; + } + if (baseFEExpr->GetPrimType() == PTY_ptr) { + UniqueFEIRStmt stmt = std::make_unique(OP_assertnonnull, std::move(baseFEExpr)); + stmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(stmt)); + } +} + +UniqueFEIRExpr ASTMemberExpr::Emit2FEExprImpl(std::list &stmts) const { + UniqueFEIRExpr baseFEExpr; + std::string fieldName = GetMemberName(); + bool tmpIsArrow = this->isArrow; + MIRType *tmpBaseType = this->baseType; + if (baseExpr->GetASTOp() == kASTMemberExpr) { + std::list memberNameList; + (void)memberNameList.emplace_back(GetMemberName()); + const ASTMemberExpr &finalMember = FindFinalMember(*(static_cast(baseExpr)), memberNameList); + baseFEExpr = finalMember.baseExpr->Emit2FEExpr(stmts); + tmpIsArrow = finalMember.isArrow; + tmpBaseType = finalMember.baseType; + fieldName = ASTUtil::Join(memberNameList, "$"); // add structure nesting relationship + } else { + baseFEExpr = baseExpr->Emit2FEExpr(stmts); + } + UniqueFEIRType baseFEType = std::make_unique(*tmpBaseType); + if (tmpIsArrow) { + CHECK_FATAL(tmpBaseType->IsMIRPtrType(), "Must be ptr type!"); + MIRPtrType *mirPtrType = static_cast(tmpBaseType); + MIRType *pointedMirType = mirPtrType->GetPointedType(); + CHECK_FATAL(pointedMirType->IsStructType(), "pointedMirType must be StructType"); + MIRStructType *structType = static_cast(pointedMirType); + FieldID fieldID = FEUtils::GetStructFieldID(structType, fieldName); + MIRType *reType = FEUtils::GetStructFieldType(structType, fieldID); + CHECK_FATAL(reType->GetPrimType() == memberType->GetPrimType(), "traverse fieldID error, type is inconsistent"); + UniqueFEIRType retFEType = std::make_unique(*reType); + if (retFEType->IsArray()) { + return std::make_unique(std::move(baseFEType), fieldID, std::move(baseFEExpr)); + } else { + InsertNonnullChecking(stmts, baseFEExpr->Clone()); + return FEIRBuilder::CreateExprIRead(std::move(retFEType), std::move(baseFEType), std::move(baseFEExpr), fieldID); + } + } else { + CHECK_FATAL(tmpBaseType->IsStructType(), "basetype must be StructType"); + MIRStructType *structType = static_cast(tmpBaseType); + FieldID fieldID = FEUtils::GetStructFieldID(structType, fieldName); + MIRType *reType = FEUtils::GetStructFieldType(structType, fieldID); + CHECK_FATAL(reType->GetPrimType() == memberType->GetPrimType(), "traverse fieldID error, type is inconsistent"); + UniqueFEIRType reFEType = std::make_unique(*reType); + FieldID baseID = baseFEExpr->GetFieldID(); + if (baseFEExpr->GetKind() == FEIRNodeKind::kExprIRead) { + baseFEExpr->SetFieldID(baseID + fieldID); + baseFEExpr->SetType(std::move(reFEType)); + return baseFEExpr; + } + UniqueFEIRVar tmpVar = static_cast(baseFEExpr.get())->GetVar()->Clone(); + if (reFEType->IsArray()) { + auto addrofExpr = std::make_unique(std::move(tmpVar)); + addrofExpr->SetFieldID(baseID + fieldID); + return addrofExpr; + } else { + return FEIRBuilder::CreateExprDReadAggField(std::move(tmpVar), baseID + fieldID, std::move(reFEType)); + } + } +} + +// ---------- ASTDesignatedInitUpdateExpr ---------- +MIRConst *ASTDesignatedInitUpdateExpr::GenerateMIRConstImpl() const { + return FEManager::GetModule().GetMemPool()->New(FEManager::GetModule(), *initListType); +} + +UniqueFEIRExpr ASTDesignatedInitUpdateExpr::Emit2FEExprImpl(std::list &stmts) const { + UniqueFEIRVar feirVar = FEIRBuilder::CreateVarNameForC(initListVarName, *initListType); + UniqueFEIRExpr baseFEIRExpr = baseExpr->Emit2FEExpr(stmts); + UniqueFEIRStmt baseFEIRStmt = std::make_unique(std::move(feirVar), std::move(baseFEIRExpr), 0); + stmts.emplace_back(std::move(baseFEIRStmt)); + static_cast(updaterExpr)->SetInitListVarName(initListVarName); + updaterExpr->Emit2FEExpr(stmts); + return nullptr; +} + +MIRConst *ASTBinaryOperatorExpr::SolveOpcodeLiorOrCior(const MIRConst &leftConst) const { + if (!leftConst.IsZero()) { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst(1, *GlobalTables::GetTypeTable().GetPrimType(PTY_i32)); + } else { + MIRConst *rightConst = rightExpr->GenerateMIRConst(); + if (!rightConst->IsZero()) { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 1, *GlobalTables::GetTypeTable().GetPrimType(PTY_i32)); + } else { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_i32)); + } + } +} + +MIRConst *ASTBinaryOperatorExpr::SolveOpcodeLandOrCand(const MIRConst &leftConst, const MIRConst &rightConst) const { + if (leftConst.IsZero()) { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_i32)); + } else if (rightConst.IsZero()) { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_i32)); + } else { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 1, *GlobalTables::GetTypeTable().GetPrimType(PTY_i32)); + } +} + +MIRConst *ASTBinaryOperatorExpr::SolveOpcodeAdd(const MIRConst &leftConst, const MIRConst &rightConst) const { + const MIRIntConst *constInt = nullptr; + const MIRConst *baseConst = nullptr; + if (leftConst.GetKind() == kConstInt) { + constInt = static_cast(&leftConst); + baseConst = &rightConst; + } else if (rightConst.GetKind() == kConstInt) { + constInt = static_cast(&rightConst); + baseConst = &leftConst; + } else { + CHECK_FATAL(false, "Unsupported yet"); + } + int64 value = constInt->GetExtValue(); + ASSERT_NOT_NULL(baseConst); + if (baseConst->GetKind() == kConstStrConst) { + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx( + static_cast(baseConst)->GetValue()); + CHECK_FATAL(str.length() >= static_cast(value), "Invalid operation"); + str = str.substr(static_cast(value)); + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str); + return FEManager::GetModule().GetMemPool()->New( + strIdx, *GlobalTables::GetTypeTable().GetPrimType(PTY_a64)); + } else if (baseConst->GetKind() == kConstAddrof) { + const MIRAddrofConst *konst = static_cast(baseConst); + auto idx = konst->GetSymbolIndex(); + auto id = konst->GetFieldID(); + auto ty = konst->GetType(); + auto offset = konst->GetOffset(); + return FEManager::GetModule().GetMemPool()->New(idx, id, ty, offset + value); + } else { + CHECK_FATAL(false, "NIY"); + } +} + +MIRConst *ASTBinaryOperatorExpr::SolveOpcodeSub(const MIRConst &leftConst, const MIRConst &rightConst) const { + CHECK_FATAL(leftConst.GetKind() == kConstAddrof && rightConst.GetKind() == kConstInt, "Unsupported"); + const MIRAddrofConst *konst = static_cast(&leftConst); + auto idx = konst->GetSymbolIndex(); + auto id = konst->GetFieldID(); + auto ty = konst->GetType(); + auto offset = konst->GetOffset(); + int64 value = static_cast(&rightConst)->GetExtValue(); + return FEManager::GetModule().GetMemPool()->New(idx, id, ty, offset - value); +} + +MIRConst *ASTBinaryOperatorExpr::GenerateMIRConstImpl() const { + MIRConst *leftConst = leftExpr->GenerateMIRConst(); + MIRConst *rightConst = nullptr; + if (opcode == OP_lior || opcode == OP_cior) { + return SolveOpcodeLiorOrCior(*leftConst); + } + rightConst = rightExpr->GenerateMIRConst(); + if (leftConst->GetKind() == kConstLblConst || rightConst->GetKind() == kConstLblConst) { + // if left or right is label mirconst, not currently implemented + return nullptr; + } + if (opcode == OP_land || opcode == OP_cand) { + return SolveOpcodeLandOrCand(*leftConst, *rightConst); + } + if (leftConst->GetKind() == rightConst->GetKind()) { + if (isConstantFolded) { + return value->Translate2MIRConst(); + } + switch (leftConst->GetKind()) { + case kConstInt: { + return MIRConstGenerator(FEManager::GetModule().GetMemPool(), static_cast(leftConst), + static_cast(rightConst), opcode); + } + case kConstFloatConst: { + return MIRConstGenerator(FEManager::GetModule().GetMemPool(), static_cast(leftConst), + static_cast(rightConst), opcode); + } + case kConstDoubleConst: { + return MIRConstGenerator(FEManager::GetModule().GetMemPool(), static_cast(leftConst), + static_cast(rightConst), opcode); + } + default: { + CHECK_FATAL(false, "Unsupported yet"); + return nullptr; + } + } + } + if (opcode == OP_add) { + return SolveOpcodeAdd(*leftConst, *rightConst); + } else if (opcode == OP_sub) { + return SolveOpcodeSub(*leftConst, *rightConst); + } else { + CHECK_FATAL(false, "NIY"); + } + return nullptr; +} + +UniqueFEIRType ASTBinaryOperatorExpr::SelectBinaryOperatorType(UniqueFEIRExpr &left, UniqueFEIRExpr &right) const { + // For arithmetical calculation only + std::map binaryTypePriority = { + {PTY_u1, 0}, + {PTY_i8, 1}, + {PTY_u8, 2}, + {PTY_i16, 3}, + {PTY_u16, 4}, + {PTY_i32, 5}, + {PTY_u32, 6}, + {PTY_i64, 7}, + {PTY_u64, 8}, + {PTY_f32, 9}, + {PTY_f64, 10} + }; + UniqueFEIRType feirType = std::make_unique(*retType); + if (!cvtNeeded) { + return feirType; + } + if (binaryTypePriority.find(left->GetPrimType()) == binaryTypePriority.end() || + binaryTypePriority.find(right->GetPrimType()) == binaryTypePriority.end()) { + if (left->GetPrimType() != feirType->GetPrimType()) { + left = FEIRBuilder::CreateExprCastPrim(std::move(left), feirType->GetPrimType()); + } + if (right->GetPrimType() != feirType->GetPrimType()) { + right = FEIRBuilder::CreateExprCastPrim(std::move(right), feirType->GetPrimType()); + } + return feirType; + } + MIRType *dstType; + if (binaryTypePriority[left->GetPrimType()] > binaryTypePriority[right->GetPrimType()]) { + right = FEIRBuilder::CreateExprCastPrim(std::move(right), left->GetPrimType()); + dstType = left->GetType()->GenerateMIRTypeAuto(); + } else { + left = FEIRBuilder::CreateExprCastPrim(std::move(left), right->GetPrimType()); + dstType = right->GetType()->GenerateMIRTypeAuto(); + } + return std::make_unique(*dstType); +} + +UniqueFEIRExpr ASTBinaryOperatorExpr::Emit2FEExprComplexCalculations(std::list &stmts) const { + UniqueFEIRVar tempVar = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("Complex_"), *retType); + auto complexElementFEType = std::make_unique(*complexElementType); + UniqueFEIRExpr realFEExpr = FEIRBuilder::CreateExprBinary(complexElementFEType->Clone(), opcode, + leftRealExpr->Emit2FEExpr(stmts), + rightRealExpr->Emit2FEExpr(stmts)); + UniqueFEIRExpr imagFEExpr = FEIRBuilder::CreateExprBinary(complexElementFEType->Clone(), opcode, + leftImagExpr->Emit2FEExpr(stmts), + rightImagExpr->Emit2FEExpr(stmts)); + auto realStmt = FEIRBuilder::CreateStmtDAssignAggField(tempVar->Clone(), std::move(realFEExpr), kComplexRealID); + auto imagStmt = FEIRBuilder::CreateStmtDAssignAggField(tempVar->Clone(), std::move(imagFEExpr), kComplexImagID); + stmts.emplace_back(std::move(realStmt)); + stmts.emplace_back(std::move(imagStmt)); + auto dread = FEIRBuilder::CreateExprDRead(std::move(tempVar)); + static_cast(dread.get())->SetFieldType(std::move(complexElementFEType)); + return dread; +} + +UniqueFEIRExpr ASTBinaryOperatorExpr::Emit2FEExprComplexCompare(std::list &stmts) const { + auto boolFEType = std::make_unique(*GlobalTables::GetTypeTable().GetPrimType(PTY_i32)); + UniqueFEIRExpr realFEExpr = FEIRBuilder::CreateExprBinary(boolFEType->Clone(), opcode, + leftRealExpr->Emit2FEExpr(stmts), + rightRealExpr->Emit2FEExpr(stmts)); + UniqueFEIRExpr imagFEExpr = FEIRBuilder::CreateExprBinary(boolFEType->Clone(), opcode, + leftImagExpr->Emit2FEExpr(stmts), + rightImagExpr->Emit2FEExpr(stmts)); + UniqueFEIRExpr finalExpr; + if (opcode == OP_eq) { + finalExpr = FEIRBuilder::CreateExprBinary(boolFEType->Clone(), OP_land, std::move(realFEExpr), + std::move(imagFEExpr)); + } else { + finalExpr = FEIRBuilder::CreateExprBinary(boolFEType->Clone(), OP_lior, std::move(realFEExpr), + std::move(imagFEExpr)); + } + return finalExpr; +} + +UniqueFEIRExpr ASTBinaryOperatorExpr::Emit2FEExprLogicOperate(std::list &stmts) const { + bool inShortCircuit = true; + uint32 trueLabelIdx = trueIdx; + uint32 falseLabelIdx = falseIdx; + uint32 fallthrouLabelIdx, jumpToLabelIdx; + uint32 rightCondLabelIdx = FEUtils::GetSequentialNumber(); + MIRType *tempVarType = GlobalTables::GetTypeTable().GetUInt32(); + UniqueFEIRVar shortCircuit = FEIRBuilder::CreateVarNameForC(GetVarName(), *tempVarType); + + // check short circuit boundary + if (trueLabelIdx == 0) { + trueLabelIdx = FEUtils::GetSequentialNumber(); + falseLabelIdx = FEUtils::GetSequentialNumber(); + inShortCircuit = false; + } + + Opcode op = opcode == OP_cior ? OP_brtrue : OP_brfalse; + if (op == OP_brtrue) { + fallthrouLabelIdx = falseLabelIdx; + jumpToLabelIdx = trueLabelIdx; + leftExpr->SetShortCircuitIdx(jumpToLabelIdx, rightCondLabelIdx); + } else { + fallthrouLabelIdx = trueLabelIdx; + jumpToLabelIdx = falseLabelIdx; + leftExpr->SetShortCircuitIdx(rightCondLabelIdx, jumpToLabelIdx); + } + rightExpr->SetShortCircuitIdx(trueLabelIdx, falseLabelIdx); + + std::string rightCondLabel = FEUtils::GetSequentialName0(FEUtils::kCondGoToStmtLabelNamePrefix, rightCondLabelIdx); + std::string fallthrouLabel = FEUtils::GetSequentialName0(FEUtils::kCondGoToStmtLabelNamePrefix, fallthrouLabelIdx); + std::string jumpToLabel = FEUtils::GetSequentialName0(FEUtils::kCondGoToStmtLabelNamePrefix, jumpToLabelIdx); + + // brfalse/brtrue label (leftCond) + auto leftFEExpr = leftExpr->Emit2FEExpr(stmts); + if (leftFEExpr != nullptr) { + auto leftCond = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(leftFEExpr)); + UniqueFEIRStmt leftCondGoToExpr = std::make_unique(leftCond->Clone(), op, jumpToLabel); + leftCondGoToExpr->SetSrcLoc(leftExpr->GetSrcLoc()); + stmts.emplace_back(std::move(leftCondGoToExpr)); + } + + auto rightCondlabelStmt = std::make_unique(rightCondLabel); + rightCondlabelStmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(rightCondlabelStmt)); + + // brfalse/brtrue label (rightCond) + auto rightFEExpr = rightExpr->Emit2FEExpr(stmts); + if (rightFEExpr != nullptr) { + auto rightCond = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(rightFEExpr)); + UniqueFEIRStmt rightCondGoToExpr = std::make_unique(rightCond->Clone(), op, jumpToLabel); + rightCondGoToExpr->SetSrcLoc(rightExpr->GetSrcLoc()); + UniqueFEIRStmt goStmt = FEIRBuilder::CreateStmtGoto(fallthrouLabel); + goStmt->SetSrcLoc(rightExpr->GetSrcLoc()); + stmts.emplace_back(std::move(rightCondGoToExpr)); + stmts.emplace_back(std::move(goStmt)); + } + + UniqueFEIRExpr returnValue(nullptr); + // when reaching the outer layer of a short circuit, return explicit value for each branch + if (!inShortCircuit) { + std::string nextLabel = FEUtils::GetSequentialName(FEUtils::kCondGoToStmtLabelNamePrefix); + UniqueFEIRExpr trueConst = FEIRBuilder::CreateExprConstAnyScalar(PTY_u32, 1); + UniqueFEIRExpr falseConst = FEIRBuilder::CreateExprConstAnyScalar(PTY_u32, 0); + UniqueFEIRStmt goStmt = FEIRBuilder::CreateStmtGoto(nextLabel); + goStmt->SetSrcLoc(rightExpr->GetSrcLoc()); + auto trueCircuit = std::make_unique(shortCircuit->Clone(), std::move(trueConst), 0); + trueCircuit->SetSrcLoc(rightExpr->GetSrcLoc()); + auto falseCircuit = std::make_unique(shortCircuit->Clone(), std::move(falseConst), 0); + falseCircuit->SetSrcLoc(rightExpr->GetSrcLoc()); + auto labelFallthrouStmt = std::make_unique(fallthrouLabel); + labelFallthrouStmt->SetSrcLoc(rightExpr->GetSrcLoc()); + auto labelJumpToStmt = std::make_unique(jumpToLabel); + labelJumpToStmt->SetSrcLoc(rightExpr->GetSrcLoc()); + auto labelNextStmt = std::make_unique(nextLabel); + labelNextStmt->SetSrcLoc(rightExpr->GetSrcLoc()); + stmts.emplace_back(std::move(labelFallthrouStmt)); + stmts.emplace_back(op == OP_brtrue ? std::move(falseCircuit) : std::move(trueCircuit)); + stmts.emplace_back(std::move(goStmt)); + stmts.emplace_back(std::move(labelJumpToStmt)); + stmts.emplace_back(op == OP_brtrue ? std::move(trueCircuit) : std::move(falseCircuit)); + stmts.emplace_back(std::move(labelNextStmt)); + returnValue = FEIRBuilder::CreateExprDRead(shortCircuit->Clone()); + } + return returnValue; +} + +UniqueFEIRExpr ASTBinaryOperatorExpr::Emit2FEExprLogicOperateSimplify(std::list &stmts) const { + std::list lStmts; + std::list cStmts; + std::list rStmts; + Opcode op = opcode == OP_cior ? OP_brtrue : OP_brfalse; + MIRType *tempVarType = GlobalTables::GetTypeTable().GetInt32(); + UniqueFEIRType tempFeirType = std::make_unique(*tempVarType); + UniqueFEIRVar shortCircuit = FEIRBuilder::CreateVarNameForC(GetVarName(), *tempVarType); + std::string labelName = FEUtils::GetSequentialName(FEUtils::kCondGoToStmtLabelNamePrefix + "label_"); + auto leftFEExpr = leftExpr->Emit2FEExpr(lStmts); + leftFEExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(leftFEExpr)); + auto leftStmt = std::make_unique(shortCircuit->Clone(), leftFEExpr->Clone(), 0); + cStmts.emplace_back(std::move(leftStmt)); + auto dreadExpr = FEIRBuilder::CreateExprDRead(shortCircuit->Clone()); + UniqueFEIRStmt condGoToExpr = std::make_unique(dreadExpr->Clone(), op, labelName); + cStmts.emplace_back(std::move(condGoToExpr)); + auto rightFEExpr = rightExpr->Emit2FEExpr(rStmts); + rightFEExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(rightFEExpr)); + if (rStmts.empty()) { + stmts.splice(stmts.end(), lStmts); + UniqueFEIRType feirType = SelectBinaryOperatorType(leftFEExpr, rightFEExpr); + return FEIRBuilder::CreateExprBinary(std::move(feirType), opcode, std::move(leftFEExpr), std::move(rightFEExpr)); + } + auto rightStmt = std::make_unique(shortCircuit->Clone(), rightFEExpr->Clone(), 0); + rStmts.emplace_back(std::move(rightStmt)); + auto labelStmt = std::make_unique(labelName); + rStmts.emplace_back(std::move(labelStmt)); + stmts.splice(stmts.end(), lStmts); + stmts.splice(stmts.end(), cStmts); + stmts.splice(stmts.end(), rStmts); + return FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(dreadExpr)); +} + +UniqueFEIRExpr ASTBinaryOperatorExpr::Emit2FEExprImpl(std::list &stmts) const { + if (complexElementType != nullptr) { + if (opcode == OP_add || opcode == OP_sub) { + return Emit2FEExprComplexCalculations(stmts); + } else if (opcode == OP_eq || opcode == OP_ne) { + return Emit2FEExprComplexCompare(stmts); + } else { + CHECK_FATAL(false, "NIY"); + } + } else { + if (opcode == OP_cior || opcode == OP_cand) { + if (FEOptions::GetInstance().IsSimplifyShortCircuit()) { + return Emit2FEExprLogicOperateSimplify(stmts); + } else { + return Emit2FEExprLogicOperate(stmts); + } + } + auto leftFEExpr = leftExpr->Emit2FEExpr(stmts); + auto rightFEExpr = rightExpr->Emit2FEExpr(stmts); + if (FEOptions::GetInstance().IsO2()) { + Ror ror(opcode, leftFEExpr, rightFEExpr); + auto rorExpr = ror.Emit2FEExpr(); + if (rorExpr != nullptr) { + return rorExpr; + } + } + UniqueFEIRType feirType = SelectBinaryOperatorType(leftFEExpr, rightFEExpr); + return FEIRBuilder::CreateExprBinary(std::move(feirType), opcode, std::move(leftFEExpr), std::move(rightFEExpr)); + } +} + +void ASTAssignExpr::GetActualRightExpr(UniqueFEIRExpr &right, const UniqueFEIRExpr &left) const { + if (right->GetPrimType() != left->GetPrimType() && + right->GetPrimType() != PTY_void && + right->GetPrimType() != PTY_agg) { + PrimType dstType = left->GetPrimType(); + if (right->GetPrimType() == PTY_f32 || right->GetPrimType() == PTY_f64) { + if (left->GetPrimType() == PTY_u8 || left->GetPrimType() == PTY_u16) { + dstType = PTY_u32; + } + if (left->GetPrimType() == PTY_i8 || left->GetPrimType() == PTY_i16) { + dstType = PTY_i32; + } + } + right = FEIRBuilder::CreateExprCastPrim(std::move(right), dstType); + } +} + +UniqueFEIRExpr ASTAssignExpr::Emit2FEExprImpl(std::list &stmts) const { + UniqueFEIRExpr rightFEExpr = rightExpr->Emit2FEExpr(stmts); // parse the right expr to generate stmt first + UniqueFEIRExpr leftFEExpr; + if (isCompoundAssign) { + std::list dummyStmts; + leftFEExpr = leftExpr->Emit2FEExpr(dummyStmts); + } else { + leftFEExpr = leftExpr->Emit2FEExpr(stmts); + } + // C89 does not support lvalue casting, but cxx support, needs to improve here + if (leftFEExpr->GetKind() == FEIRNodeKind::kExprDRead && !leftFEExpr->GetType()->IsArray()) { + auto dreadFEExpr = static_cast(leftFEExpr.get()); + FieldID fieldID = dreadFEExpr->GetFieldID(); + UniqueFEIRVar var = dreadFEExpr->GetVar()->Clone(); + if (ConditionalOptimize::DeleteRedundantTmpVar(rightFEExpr, stmts, var, leftFEExpr->GetPrimType(), fieldID)) { + return leftFEExpr; + } + GetActualRightExpr(rightFEExpr, leftFEExpr); + auto preStmt = std::make_unique(std::move(var), rightFEExpr->Clone(), fieldID); + stmts.emplace_back(std::move(preStmt)); + return leftFEExpr; + } else if (leftFEExpr->GetKind() == FEIRNodeKind::kExprIRead) { + auto ireadFEExpr = static_cast(leftFEExpr.get()); + FieldID fieldID = ireadFEExpr->GetFieldID(); + GetActualRightExpr(rightFEExpr, leftFEExpr); + auto preStmt = std::make_unique(ireadFEExpr->GetClonedPtrType(), ireadFEExpr->GetClonedOpnd(), + rightFEExpr->Clone(), fieldID); + stmts.emplace_back(std::move(preStmt)); + return leftFEExpr; + } + return nullptr; +} + +bool ASTAssignExpr::IsInsertNonnullChecking(const UniqueFEIRExpr &rExpr) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic()) { + return false; + } + // The pointer assignment + if (retType == nullptr || !(retType->IsMIRPtrType())) { + return false; + } + // The Rvalue is a pointer type + if ((rExpr->GetKind() == kExprDRead && rExpr->GetPrimType() == PTY_ptr) || + (rExpr->GetKind() == kExprIRead && rExpr->GetFieldID() != 0 && rExpr->GetPrimType() == PTY_ptr)) { + return true; + } + // The Rvalue is NULL + if (ENCChecker::HasNullExpr(rExpr)) { + return true; + } + return false; +} + +UniqueFEIRExpr ASTBOComma::Emit2FEExprImpl(std::list &stmts) const { + (void)leftExpr->Emit2FEExpr(stmts); + return rightExpr->Emit2FEExpr(stmts); +} + +UniqueFEIRExpr ASTBOPtrMemExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + CHECK_FATAL(false, "NYI"); + return nullptr; +} + +// ---------- ASTParenExpr ---------- +UniqueFEIRExpr ASTParenExpr::Emit2FEExprImpl(std::list &stmts) const { + ASTExpr *childExpr = child; + CHECK_FATAL(childExpr != nullptr, "childExpr is nullptr"); + childExpr->SetShortCircuitIdx(trueIdx, falseIdx); + return childExpr->Emit2FEExpr(stmts); +} + +// ---------- ASTIntegerLiteral ---------- +MIRConst *ASTIntegerLiteral::GenerateMIRConstImpl() const { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(val), + *GlobalTables::GetTypeTable().GetPrimType(PTY_i64)); +} + +UniqueFEIRExpr ASTIntegerLiteral::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + UniqueFEIRExpr constExpr = std::make_unique(val, mirType->GetPrimType()); + return constExpr; +} + +// ---------- ASTFloatingLiteral ---------- +MIRConst *ASTFloatingLiteral::GenerateMIRConstImpl() const { + MemPool *mp = FEManager::GetModule().GetMemPool(); + MIRConst *cst; + MIRType *type; + if (kind == FloatKind::F32) { + type = GlobalTables::GetTypeTable().GetPrimType(PTY_f32); + cst = mp->New(static_cast(val), *type); + } else { + type = GlobalTables::GetTypeTable().GetPrimType(PTY_f64); + cst = mp->New(val, *type); + } + return cst; +} + +UniqueFEIRExpr ASTFloatingLiteral::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + UniqueFEIRExpr expr; + if (kind == FloatKind::F32) { + expr = FEIRBuilder::CreateExprConstF32(static_cast(val)); + } else { + expr = FEIRBuilder::CreateExprConstF64(val); + } + CHECK_NULL_FATAL(expr); + return expr; +} + +// ---------- ASTCharacterLiteral ---------- +UniqueFEIRExpr ASTCharacterLiteral::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + UniqueFEIRExpr constExpr = FEIRBuilder::CreateExprConstAnyScalar(type, val); + return constExpr; +} + +// ---------- ASTConditionalOperator ---------- +UniqueFEIRExpr ASTConditionalOperator::Emit2FEExprImpl(std::list &stmts) const { + UniqueFEIRExpr condFEIRExpr = condExpr->Emit2FEExpr(stmts); + // a noncomparative conditional expr need to be converted a comparative conditional expr + if (!(condFEIRExpr->GetKind() == kExprBinary && static_cast(condFEIRExpr.get())->IsComparative())) { + condFEIRExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(condFEIRExpr)); + } + std::list trueStmts; + UniqueFEIRExpr trueFEIRExpr = trueExpr->Emit2FEExpr(trueStmts); + std::list falseStmts; + UniqueFEIRExpr falseFEIRExpr = falseExpr->Emit2FEExpr(falseStmts); + // when subExpr is void + if (trueFEIRExpr == nullptr || falseFEIRExpr == nullptr || mirType->GetPrimType() == PTY_void) { + UniqueFEIRStmt stmtIf = FEIRBuilder::CreateStmtIf(std::move(condFEIRExpr), trueStmts, falseStmts); + stmts.emplace_back(std::move(stmtIf)); + return nullptr; + } + // Otherwise, (e.g., a < 1 ? 1 : a++) create a temporary var to hold the return trueExpr or falseExpr value + MIRType *retType = mirType; + if (retType->GetKind() == kTypeBitField) { + retType = GlobalTables::GetTypeTable().GetPrimType(retType->GetPrimType()); + } + trueFEIRExpr->SetIsEnhancedChecking(false); + falseFEIRExpr->SetIsEnhancedChecking(false); + UniqueFEIRVar tempVar = FEIRBuilder::CreateVarNameForC(varName, *retType); + UniqueFEIRVar tempVarCloned1 = tempVar->Clone(); + UniqueFEIRVar tempVarCloned2 = tempVar->Clone(); + UniqueFEIRStmt retTrueStmt = FEIRBuilder::CreateStmtDAssign(std::move(tempVar), std::move(trueFEIRExpr)); + retTrueStmt->SetSrcLoc(trueExpr->GetSrcLoc()); + trueStmts.emplace_back(std::move(retTrueStmt)); + UniqueFEIRStmt retFalseStmt = FEIRBuilder::CreateStmtDAssign(std::move(tempVarCloned1), std::move(falseFEIRExpr)); + retFalseStmt->SetSrcLoc(falseExpr->GetSrcLoc()); + falseStmts.emplace_back(std::move(retFalseStmt)); + UniqueFEIRStmt stmtIf = FEIRBuilder::CreateStmtIf(std::move(condFEIRExpr), trueStmts, falseStmts); + stmts.emplace_back(std::move(stmtIf)); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(tempVarCloned2)); + expr->SetIsEnhancedChecking(false); + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || !FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + expr->SetKind(kExprTernary); + } + return expr; +} + +// ---------- ASTConstantExpr ---------- +UniqueFEIRExpr ASTConstantExpr::Emit2FEExprImpl(std::list &stmts) const { + return child->Emit2FEExpr(stmts); +} + +MIRConst *ASTConstantExpr::GenerateMIRConstImpl() const { + if (child->GetConstantValue() == nullptr || child->GetConstantValue()->GetPrimType() == PTY_begin) { + return child->GenerateMIRConst(); + } else { + return child->GetConstantValue()->Translate2MIRConst(); + } +} + +// ---------- ASTImaginaryLiteral ---------- +UniqueFEIRExpr ASTImaginaryLiteral::Emit2FEExprImpl(std::list &stmts) const { + CHECK_NULL_FATAL(complexType); + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(FEUtils::GetSequentialName("Complex_")); + UniqueFEIRVar complexVar = FEIRBuilder::CreateVarNameForC(nameIdx, *complexType); + UniqueFEIRVar clonedComplexVar = complexVar->Clone(); + UniqueFEIRVar clonedComplexVar2 = complexVar->Clone(); + // real number + UniqueFEIRExpr zeroConstExpr = FEIRBuilder::CreateExprConstAnyScalar(elemType->GetPrimType(), 0); + UniqueFEIRStmt realStmt = std::make_unique(std::move(complexVar), std::move(zeroConstExpr), 1); + stmts.emplace_back(std::move(realStmt)); + // imaginary number + CHECK_FATAL(child != nullptr, "childExpr is nullptr"); + UniqueFEIRExpr childFEIRExpr = child->Emit2FEExpr(stmts); + UniqueFEIRStmt imagStmt = std::make_unique(std::move(clonedComplexVar), std::move(childFEIRExpr), 2); + stmts.emplace_back(std::move(imagStmt)); + // return expr to parent operation + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(clonedComplexVar2)); + return expr; +} + +// ---------- ASTVAArgExpr ---------- +UniqueFEIRExpr ASTVAArgExpr::Emit2FEExprImpl(std::list &stmts) const { + CHECK_NULL_FATAL(mirType); + VaArgInfo info = ProcessValistArgInfo(*mirType); + UniqueFEIRExpr readVaList = child->Emit2FEExpr(stmts); + // The va_arg_offset temp var is created and assigned from __gr_offs or __vr_offs of va_list + MIRType *int32Type = GlobalTables::GetTypeTable().GetInt32(); + UniqueFEIRType int32FETRType = std::make_unique(*int32Type); + UniqueFEIRVar offsetVar = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("va_arg_offs_"), *int32Type); + UniqueFEIRExpr dreadVaListOffset = FEIRBuilder::ReadExprField( + readVaList->Clone(), info.isGPReg ? 4 : 5, int32FETRType->Clone()); + UniqueFEIRStmt dassignOffsetVar = FEIRBuilder::CreateStmtDAssign(offsetVar->Clone(), dreadVaListOffset->Clone()); + stmts.emplace_back(std::move(dassignOffsetVar)); + UniqueFEIRExpr dreadOffsetVar = FEIRBuilder::CreateExprDRead(offsetVar->Clone()); + // The va_arg temp var is created and assigned in tow following way: + // If the va_arg_offs is vaild, i.e., its value should be (0 - (8 - named_arg) * 8) ~ 0, + // the va_arg will be got from GP or FP/SIMD arg reg, otherwise va_arg will be got from stack. + // See https://developer.arm.com/documentation/ihi0055/latest/?lang=en#the-va-arg-macro for more info + const std::string onStackStr = FEUtils::GetSequentialName("va_on_stack_"); + UniqueFEIRExpr cond1 = FEIRBuilder::CreateExprBinary( // checking validity: regOffs >= 0, goto on_stack + OP_ge, dreadOffsetVar->Clone(), FEIRBuilder::CreateExprConstI32(0)); + UniqueFEIRStmt condGoTo1 = std::make_unique(cond1->Clone(), OP_brtrue, onStackStr); + stmts.emplace_back(std::move(condGoTo1)); + // The va_arg set next reg setoff + UniqueFEIRExpr argAUnitOffs = FEIRBuilder::CreateExprBinary( + OP_add, dreadOffsetVar->Clone(), FEIRBuilder::CreateExprConstI32(info.regOffset)); + UniqueFEIRStmt assignArgNextOffs = FEIRBuilder::AssginStmtField( + readVaList->Clone(), std::move(argAUnitOffs), info.isGPReg ? 4 : 5); + stmts.emplace_back(std::move(assignArgNextOffs)); + UniqueFEIRExpr cond2 = FEIRBuilder::CreateExprBinary( // checking validity: regOffs + next offset > 0, goto on_stack + OP_gt, dreadVaListOffset->Clone(), FEIRBuilder::CreateExprConstI32(0)); + UniqueFEIRStmt condGoTo2 = std::make_unique(cond2->Clone(), OP_brtrue, onStackStr); + stmts.emplace_back(std::move(condGoTo2)); + // The va_arg will be got from GP or FP/SIMD arg reg + MIRType *sizeType = GlobalTables::GetTypeTable().GetPtrType(); + UniqueFEIRType sizeFEIRType = std::make_unique(*sizeType); + MIRType *vaArgType = !info.isCopyedMem ? mirType : GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType); + MIRType *ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*vaArgType); + UniqueFEIRVar vaArgVar = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("va_arg_"), *ptrType); + UniqueFEIRExpr dreadVaArgTop = FEIRBuilder::ReadExprField( + readVaList->Clone(), info.isGPReg ? 2 : 3, sizeFEIRType->Clone()); + ProcessBigEndianForReg(stmts, *vaArgType, offsetVar, info); // process big endian for reg + UniqueFEIRExpr cvtOffset = FEIRBuilder::CreateExprCastPrim(dreadOffsetVar->Clone(), PTY_i64); + UniqueFEIRExpr addTopAndOffs = FEIRBuilder::CreateExprBinary(OP_add, std::move(dreadVaArgTop), std::move(cvtOffset)); + UniqueFEIRStmt dassignVaArgFromReg = FEIRBuilder::CreateStmtDAssign(vaArgVar->Clone(), std::move(addTopAndOffs)); + stmts.emplace_back(std::move(dassignVaArgFromReg)); + if (info.HFAType != nullptr && !info.isCopyedMem) { + CvtHFA2Struct(*static_cast(mirType), *info.HFAType, vaArgVar->Clone(), stmts); + } + const std::string endStr = FEUtils::GetSequentialName("va_end_"); + UniqueFEIRStmt goToEnd = FEIRBuilder::CreateStmtGoto(endStr); + stmts.emplace_back(std::move(goToEnd)); + // Otherwise, the va_arg will be got from stack and set next stack setoff + UniqueFEIRStmt onStackLabelStmt = std::make_unique(onStackStr); + stmts.emplace_back(std::move(onStackLabelStmt)); + UniqueFEIRExpr dreadStackTop = FEIRBuilder::ReadExprField(readVaList->Clone(), 1, sizeFEIRType->Clone()); + UniqueFEIRStmt dassignVaArgFromStack = FEIRBuilder::CreateStmtDAssign(vaArgVar->Clone(), dreadStackTop->Clone()); + UniqueFEIRExpr stackAUnitOffs = FEIRBuilder::CreateExprBinary( + OP_add, dreadStackTop->Clone(), FEIRBuilder::CreateExprConstPtr(info.stackOffset)); + stmts.emplace_back(std::move(dassignVaArgFromStack)); + UniqueFEIRStmt dassignStackNextOffs = FEIRBuilder::AssginStmtField( + readVaList->Clone(), std::move(stackAUnitOffs), 1); + stmts.emplace_back(std::move(dassignStackNextOffs)); + ProcessBigEndianForStack(stmts, *vaArgType, vaArgVar); // process big endian for stack + // return va_arg + UniqueFEIRStmt endLabelStmt = std::make_unique(endStr); + stmts.emplace_back(std::move(endLabelStmt)); + UniqueFEIRExpr dreadRetVar = FEIRBuilder::CreateExprDRead(vaArgVar->Clone()); + UniqueFEIRType ptrFEIRType = std::make_unique(*ptrType); + if (info.isCopyedMem) { + MIRType *tmpType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType); + UniqueFEIRType tmpFETRType = std::make_unique(*tmpType); + dreadRetVar = FEIRBuilder::CreateExprIRead(tmpFETRType->Clone(), ptrFEIRType->Clone(), std::move(dreadRetVar)); + ptrFEIRType = std::move(tmpFETRType); + } + UniqueFEIRType baseFEIRType = std::make_unique(*mirType); + UniqueFEIRExpr retExpr = FEIRBuilder::CreateExprIRead( + std::move(baseFEIRType), std::move(ptrFEIRType), std::move(dreadRetVar)); + return retExpr; +} + +void ASTVAArgExpr::ProcessBigEndianForReg(std::list &stmts, MIRType &vaArgType, + const UniqueFEIRVar &offsetVar, const VaArgInfo &info) const { + if (!FEOptions::GetInstance().IsBigEndian()) { + return; + } + int offset = 0; + if (info.isGPReg && !vaArgType.IsStructType() && vaArgType.GetSize() < 8) { // general reg + offset = 8 - static_cast(vaArgType.GetSize()); + } else if (info.HFAType != nullptr) { // HFA + offset = 16 - static_cast(info.HFAType->GetSize()); + } else if (!info.isGPReg && !vaArgType.IsStructType() && vaArgType.GetSize() < 16) { // fp/simd reg + offset = 16 - static_cast(vaArgType.GetSize()); + } + if (offset == 0) { + return; + } + UniqueFEIRExpr addExpr = FEIRBuilder::CreateExprBinary( + OP_add, FEIRBuilder::CreateExprDRead(offsetVar->Clone()), FEIRBuilder::CreateExprConstI32(offset)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(offsetVar->Clone(), std::move(addExpr)); + stmts.emplace_back(std::move(stmt)); +} + +void ASTVAArgExpr::ProcessBigEndianForStack(std::list &stmts, MIRType &vaArgType, + const UniqueFEIRVar &vaArgVar) const { + if (!FEOptions::GetInstance().IsBigEndian()) { + return; + } + int offset = 0; + if (!vaArgType.IsStructType() && vaArgType.GetSize() < 8) { + offset = 8 - static_cast(vaArgType.GetSize()); + } + if (offset == 0) { + return; + } + UniqueFEIRExpr addExpr = FEIRBuilder::CreateExprBinary( + OP_add, FEIRBuilder::CreateExprDRead(vaArgVar->Clone()), FEIRBuilder::CreateExprConstU64(offset)); + UniqueFEIRExpr dreadRetVar = FEIRBuilder::CreateExprDRead(vaArgVar->Clone()); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(vaArgVar->Clone(), std::move(addExpr)); + stmts.emplace_back(std::move(stmt)); +} + +VaArgInfo ASTVAArgExpr::ProcessValistArgInfo(const MIRType &type) const { + VaArgInfo info; + if (type.IsScalarType()) { + switch (type.GetPrimType()) { + case PTY_f32: // float is automatically promoted to double when passed to va_arg + WARN(kLncWarn, "error: float is promoted to double when passed to va_arg"); + case PTY_f64: // double + info = { false, 16, 8, false, nullptr }; + break; + case PTY_i32: + case PTY_u32: + case PTY_i64: + case PTY_u64: + info = { true, 8, 8, false, nullptr }; + break; + default: // bool, char, short, and unscoped enumerations are converted to int or wider integer types + WARN(kLncWarn, "error: bool, char, short, and unscoped enumerations are promoted to int or "\ + "wider integer types when passed to va_arg"); + info = { true, 8, 8, false, nullptr }; + break; + } + } else if (type.IsMIRPtrType()) { + info = { true, 8, 8, false, nullptr }; + } else if (type.IsStructType()) { + MIRStructType structType = static_cast(type); + size_t size = structType.GetSize(); + size = (size + 7) & -8; // size round up 8 + if (size > 16) { + info = { true, 8, 8, true, nullptr }; + } else { + MIRType *hfa = IsHFAType(structType); + if (hfa != nullptr) { + int fieldsNum = static_cast(structType.GetSize() / hfa->GetSize()); + info = { false, fieldsNum * 16, static_cast(size), false, hfa }; + } else { + info = { true, static_cast(size), static_cast(size), false, nullptr }; + } + } + } else { + CHECK_FATAL(false, "unsupport mirtype"); + } + return info; +} + +// Homogeneous Floating-point Aggregate: +// A data type with 2 to 4 identical floating-point members, either floats or doubles. +// (including 1 members here, struct nested array) +MIRType *ASTVAArgExpr::IsHFAType(const MIRStructType &type) const { + uint32 size = static_cast(type.GetFieldsSize()); + if (size < 1 || size > 4) { + return nullptr; + } + MIRType *firstType = nullptr; + for (uint32 i = 0; i < size; ++i) { + MIRType *fieldType = type.GetElemType(i); + if (fieldType->GetKind() == kTypeArray) { + MIRArrayType *arrayType = static_cast(fieldType); + MIRType *elemType = arrayType->GetElemType(); + if (elemType->GetPrimType() != PTY_f32 && elemType->GetPrimType() != PTY_f64) { + return nullptr; + } + fieldType = elemType; + } else if (fieldType->GetPrimType() != PTY_f32 && fieldType->GetPrimType() != PTY_f64) { + return nullptr; + } + if (firstType == nullptr) { + firstType = fieldType; + } else if (fieldType != firstType) { + return nullptr; + } + } + return firstType; +} + +// When va_arg is HFA struct, +// if it is passed as parameter in register then each uniquely addressable field goes in its own register. +// So its fields in FP/SIMD arg reg are still 128 bit and should be converted float or double type fields. +void ASTVAArgExpr::CvtHFA2Struct(const MIRStructType &type, MIRType &fieldType, const UniqueFEIRVar &vaArgVar, + std::list &stmts) const { + UniqueFEIRVar copyedVar = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("va_arg_struct_"), *mirType); + MIRType *ptrMirType = GlobalTables::GetTypeTable().GetOrCreatePointerType(fieldType); + UniqueFEIRType baseType = std::make_unique(fieldType); + UniqueFEIRType ptrType = std::make_unique(*ptrMirType); + size_t size = type.GetSize() / fieldType.GetSize(); // fieldType must be nonzero + for (size_t i = 0; i < size; ++i) { + UniqueFEIRExpr dreadVaArg = FEIRBuilder::CreateExprDRead(vaArgVar->Clone()); + if (i != 0) { + dreadVaArg = FEIRBuilder::CreateExprBinary( + OP_add, std::move(dreadVaArg), FEIRBuilder::CreateExprConstPtr(static_cast(16 * i))); + } + UniqueFEIRExpr ireadVaArg = FEIRBuilder::CreateExprIRead(baseType->Clone(), ptrType->Clone(), dreadVaArg->Clone()); + UniqueFEIRExpr addrofVar = FEIRBuilder::CreateExprAddrofVar(copyedVar->Clone()); + if (i != 0) { + addrofVar = FEIRBuilder::CreateExprBinary( + OP_add, std::move(addrofVar), FEIRBuilder::CreateExprConstPtr(static_cast(fieldType.GetSize() * i))); + } + MIRType *fieldPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(fieldType); + UniqueFEIRType fieldFEIRType = std::make_unique(*fieldPtrType); + UniqueFEIRStmt iassignCopyedVar = FEIRBuilder::CreateStmtIAssign( + std::move(fieldFEIRType), std::move(addrofVar), std::move(ireadVaArg)); + stmts.emplace_back(std::move(iassignCopyedVar)); + } + UniqueFEIRExpr addrofCopyedVar = FEIRBuilder::CreateExprAddrofVar(copyedVar->Clone()); + UniqueFEIRStmt assignVar = FEIRBuilder::CreateStmtDAssign(vaArgVar->Clone(), std::move(addrofCopyedVar)); + stmts.emplace_back(std::move(assignVar)); +} + +// ---------- ASTArrayInitLoopExpr ---------- +UniqueFEIRExpr ASTArrayInitLoopExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +// ---------- ASTArrayInitIndexExpr ---------- +UniqueFEIRExpr ASTArrayInitIndexExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +// ---------- ASTExprWithCleanups ---------- +UniqueFEIRExpr ASTExprWithCleanups::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +// ---------- ASTMaterializeTemporaryExpr ---------- +UniqueFEIRExpr ASTMaterializeTemporaryExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +// ---------- ASTSubstNonTypeTemplateParmExpr ---------- +UniqueFEIRExpr ASTSubstNonTypeTemplateParmExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +// ---------- ASTDependentScopeDeclRefExpr ---------- +UniqueFEIRExpr ASTDependentScopeDeclRefExpr::Emit2FEExprImpl(std::list &stmts) const { + (void)stmts; + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +// ---------- ASTAtomicExpr ---------- +UniqueFEIRExpr ASTAtomicExpr::Emit2FEExprImpl(std::list &stmts) const { + auto atomicExpr = std::make_unique(mirType, refType, objExpr->Emit2FEExpr(stmts), atomicOp); + if (atomicOp != kAtomicOpLoadN) { + static_cast(atomicExpr.get())->SetVal1Expr(valExpr1->Emit2FEExpr(stmts)); + static_cast(atomicExpr.get())->SetVal1Type(val1Type); + } + if (atomicOp == kAtomicOpExchange) { + static_cast(atomicExpr.get())->SetVal2Expr(valExpr2->Emit2FEExpr(stmts)); + static_cast(atomicExpr.get())->SetVal2Type(val2Type); + } + static_cast(atomicExpr.get())->SetOrderExpr(orderExpr->Emit2FEExpr(stmts)); + auto var = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("ret.var."), *refType, false, false); + atomicExpr->SetValVar(var->Clone()); + if (!isFromStmt) { + auto stmt = std::make_unique(std::move(atomicExpr)); + stmts.emplace_back(std::move(stmt)); + return FEIRBuilder::CreateExprDRead(var->Clone()); + } + return atomicExpr; +} + +// ---------- ASTExprStmtExpr ---------- +UniqueFEIRExpr ASTExprStmtExpr::Emit2FEExprImpl(std::list &stmts) const { + CHECK_FATAL(cpdStmt->GetASTStmtOp() == kASTStmtCompound, "Invalid in ASTExprStmtExpr"); + const auto *lastCpdStmt = static_cast(cpdStmt); + while (lastCpdStmt->GetASTStmtList().back()->GetASTStmtOp() == kASTStmtStmtExpr) { + auto bodyStmt = static_cast(lastCpdStmt->GetASTStmtList().back())->GetBodyStmt(); + lastCpdStmt = static_cast(bodyStmt); + } + UniqueFEIRExpr feirExpr = nullptr; + std::list stmts0; + if (lastCpdStmt->GetASTStmtList().size() != 0 && lastCpdStmt->GetASTStmtList().back()->GetExprs().size() != 0) { + feirExpr = lastCpdStmt->GetASTStmtList().back()->GetExprs().back()->Emit2FEExpr(stmts0); + lastCpdStmt->GetASTStmtList().back()->GetExprs().back()->SetRValue(true); + } + stmts0 = cpdStmt->Emit2FEStmt(); + for (auto &stmt : stmts0) { + stmts.emplace_back(std::move(stmt)); + } + return feirExpr; +} +} diff --git a/src/hir2mpl/ast_input/clang/src/ast_function.cpp b/src/hir2mpl/ast_input/clang/src/ast_function.cpp new file mode 100644 index 0000000000000000000000000000000000000000..08baa5a6599e56da0ef657a361ed2a60406cbf76 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/src/ast_function.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_function.h" +#include "fe_macros.h" +#include "fe_manager.h" + +namespace maple { +ASTFunction::ASTFunction(const ASTFunc2FEHelper &argMethodHelper, MIRFunction &mirFunc, + const std::unique_ptr &argPhaseResultTotal) + : FEFunction(mirFunc, argPhaseResultTotal), + funcHelper(argMethodHelper), + astFunc(funcHelper.GetMethod()) {} + +bool ASTFunction::GenerateArgVarList(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + argVarList = astFunc.GenArgVarList(); + // EnhanceC: Initialize and assign args boundary + std::list boundaryStmts = astFunc.InitArgsBoundaryVar(mirFunction); + AppendFEIRStmts(boundaryStmts); + return phaseResult.Finish(); +} + +bool ASTFunction::GenerateAliasVars(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + return phaseResult.Finish(true); +} + +bool ASTFunction::EmitToFEIRStmt(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + std::list feirStmts = astFunc.EmitASTStmtToFEIR(); + AppendFEIRStmts(feirStmts); + AppendFEIRStmts(vlaCleanupStmts); + return phaseResult.Finish(true); +} + +void ASTFunction::PreProcessImpl() { + CHECK_FATAL(false, "NIY"); +} + +void ASTFunction::SetMIRFunctionInfo() { + GStrIdx idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(astFunc.GetName()); + mirFunction.PushbackMIRInfo(MIRInfoPair(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_fullname"), idx)); + mirFunction.PushbackIsString(true); +} + +void ASTFunction::AddVLACleanupStmts(std::list &stmts) { + vlaCleanupStmts.splice(vlaCleanupStmts.end(), stmts); +} + +bool ASTFunction::ProcessImpl() { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "ASTFunction::Process() for %s", astFunc.GetName().c_str()); + bool success = true; + mirFunction.NewBody(); + FEManager::GetMIRBuilder().SetCurrentFunction(mirFunction); + FEManager::SetCurrentFEFunction(*this); + SetMIRFunctionInfo(); + success = success && GenerateArgVarList("gen arg var list"); + success = success && EmitToFEIRStmt("emit to feir"); + return success; +} + +bool ASTFunction::ProcessFEIRFunction() { + CHECK_FATAL(false, "NIY"); + return false; +} + +void ASTFunction::FinishImpl() { + if (FEOptions::GetInstance().IsDumpFEIRBB()) { + (void)LowerFunc("low feir func"); + (void)DumpFEIRBBs("dump bb list"); + } + if (FEOptions::GetInstance().IsDumpFEIRCFGGraph(GetGeneralFuncName())) { + (void)LowerFunc("low feir func"); + (void)DumpFEIRCFGGraph("dump cfg graph"); + } + (void)EmitToMIR("finish/emit to mir"); + (void)GenerateAliasVars("finish/generate alias vars"); +} + +bool ASTFunction::EmitToMIR(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + EmitToMIRStmt(); + return phaseResult.Finish(); +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/clang/src/ast_parser.cpp b/src/hir2mpl/ast_input/clang/src/ast_parser.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a12ad01de7c9d63c9d25d715603d1eaec402965f --- /dev/null +++ b/src/hir2mpl/ast_input/clang/src/ast_parser.cpp @@ -0,0 +1,3062 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_parser.h" +#include "driver_options.h" +#include "mpl_logging.h" +#include "mir_module.h" +#include "mpl_logging.h" +#include "ast_decl_builder.h" +#include "ast_interface.h" +#include "ast_decl.h" +#include "ast_macros.h" +#include "ast_util.h" +#include "ast_input.h" +#include "fe_manager.h" +#include "enhance_c_checker.h" +#include "fe_macros.h" + +namespace maple { +std::map ASTDeclsBuilder::declesTable; + +bool ASTParser::OpenFile(MapleAllocator &allocator) { + astFile = allocator.GetMemPool()->New(allocator, recordDecles, globalEnumDecles); + bool res = astFile->Open(fileName, 0, 0); + if (!res) { + return false; + } + astUnitDecl = astFile->GetAstUnitDecl(); + return true; +} + +bool ASTParser::Release() const { + astFile->DisposeTranslationUnit(); + ASTDeclsBuilder::Clear(); + return true; +} + +bool ASTParser::Verify() const { + return true; +} + +ASTBinaryOperatorExpr *ASTParser::AllocBinaryOperatorExpr(MapleAllocator &allocator, + const clang::BinaryOperator &bo) const { + if (bo.isAssignmentOp() && !bo.isCompoundAssignmentOp()) { + return ASTDeclsBuilder::ASTExprBuilder(allocator); + } + if (bo.getOpcode() == clang::BO_Comma) { + return ASTDeclsBuilder::ASTExprBuilder(allocator); + } + // [C++ 5.5] Pointer-to-member operators. + if (bo.isPtrMemOp()) { + return ASTDeclsBuilder::ASTExprBuilder(allocator); + } + MIRType *lhTy = astFile->CvtType(bo.getLHS()->getType()); + ASSERT_NOT_NULL(lhTy); + auto opcode = bo.getOpcode(); + if (bo.isCompoundAssignmentOp()) { + opcode = clang::BinaryOperator::getOpForCompoundAssignment(bo.getOpcode()); + } + Opcode mirOpcode = ASTUtil::CvtBinaryOpcode(opcode, lhTy->GetPrimType()); + CHECK_FATAL(mirOpcode != OP_undef, "Opcode not support!"); + auto *expr = ASTDeclsBuilder::ASTExprBuilder(allocator); + expr->SetOpcode(mirOpcode); + return expr; +} + +ASTStmt *ASTParser::ProcessFunctionBody(MapleAllocator &allocator, const clang::CompoundStmt &compoundStmt) { + CHECK_FATAL(false, "NIY"); + return ProcessStmtCompoundStmt(allocator, compoundStmt); +} + +ASTStmt *ASTParser::ProcessStmtCompoundStmt(MapleAllocator &allocator, const clang::CompoundStmt &cpdStmt) { + ASTCompoundStmt *astCompoundStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astCompoundStmt != nullptr, "astCompoundStmt is nullptr"); + astCompoundStmt->SetEndLoc(astFile->GetLOC(cpdStmt.getEndLoc())); + clang::CompoundStmt::const_body_iterator it; + ASTStmt *childStmt = nullptr; + for (it = cpdStmt.body_begin(); it != cpdStmt.body_end(); ++it) { + childStmt = ProcessStmt(allocator, **it); + if (childStmt != nullptr) { + astCompoundStmt->SetASTStmt(childStmt); + } else { + continue; + } + } + + if (FEOptions::GetInstance().IsEnableSafeRegion()) { + switch (cpdStmt.getSafeSpecifier()) { + case clang::SS_None: + astCompoundStmt->SetSafeSS(SafeSS::kNoneSS); + break; + case clang::SS_Unsafe: + astCompoundStmt->SetSafeSS(SafeSS::kUnsafeSS); + break; + case clang::SS_Safe: + astCompoundStmt->SetSafeSS(SafeSS::kSafeSS); + break; + default: break; + } + } + return astCompoundStmt; +} + +#define STMT_CASE(CLASS) \ + case clang::Stmt::CLASS##Class: { \ + ASTStmt *astStmt = ProcessStmt##CLASS(allocator, llvm::cast(stmt)); \ + Loc loc = astFile->GetStmtLOC(stmt); \ + astStmt->SetSrcLoc(loc); \ + return astStmt; \ + } + +ASTStmt *ASTParser::ProcessStmt(MapleAllocator &allocator, const clang::Stmt &stmt) { + switch (stmt.getStmtClass()) { + STMT_CASE(UnaryOperator); + STMT_CASE(BinaryOperator); + STMT_CASE(CompoundAssignOperator); + STMT_CASE(ImplicitCastExpr); + STMT_CASE(ParenExpr); + STMT_CASE(IntegerLiteral); + STMT_CASE(FloatingLiteral); + STMT_CASE(VAArgExpr); + STMT_CASE(ConditionalOperator); + STMT_CASE(CharacterLiteral); + STMT_CASE(StmtExpr); + STMT_CASE(CallExpr); + STMT_CASE(ReturnStmt); + STMT_CASE(CompoundStmt); + STMT_CASE(IfStmt); + STMT_CASE(ForStmt); + STMT_CASE(WhileStmt); + STMT_CASE(DoStmt); + STMT_CASE(BreakStmt); + STMT_CASE(LabelStmt); + STMT_CASE(ContinueStmt); + STMT_CASE(GotoStmt); + STMT_CASE(IndirectGotoStmt); + STMT_CASE(SwitchStmt); + STMT_CASE(CaseStmt); + STMT_CASE(DefaultStmt); + STMT_CASE(CStyleCastExpr); + STMT_CASE(DeclStmt); + STMT_CASE(NullStmt); + STMT_CASE(AtomicExpr); + STMT_CASE(GCCAsmStmt); + STMT_CASE(OffsetOfExpr); + STMT_CASE(GenericSelectionExpr); + STMT_CASE(AttributedStmt); + STMT_CASE(DeclRefExpr); + STMT_CASE(UnaryExprOrTypeTraitExpr); + STMT_CASE(AddrLabelExpr); + default: { + CHECK_FATAL(false, "ASTStmt: %s NIY", stmt.getStmtClassName()); + return nullptr; + } + } +} + +ASTStmt *ASTParser::ProcessStmtAttributedStmt(MapleAllocator &allocator, const clang::AttributedStmt &attrStmt) { + ASSERT(clang::hasSpecificAttr(attrStmt.getAttrs()), "AttrStmt is not fallthrough"); + ASTAttributedStmt *astAttributedStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astAttributedStmt != nullptr, "astAttributedStmt is nullptr"); + return astAttributedStmt; +} + +ASTStmt *ASTParser::ProcessStmtOffsetOfExpr(MapleAllocator &allocator, const clang::OffsetOfExpr &expr) { + auto *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &expr); + if (astExpr == nullptr) { + return nullptr; + } + astStmt->SetASTExpr(astExpr); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtGenericSelectionExpr(MapleAllocator &allocator, + const clang::GenericSelectionExpr &expr) { + auto *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &expr); + if (astExpr == nullptr) { + return nullptr; + } + astStmt->SetASTExpr(astExpr); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtUnaryOperator(MapleAllocator &allocator, const clang::UnaryOperator &unaryOp) { + ASTUnaryOperatorStmt *astUOStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astUOStmt != nullptr, "astUOStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &unaryOp); + if (astExpr == nullptr) { + return nullptr; + } + astUOStmt->SetASTExpr(astExpr); + return astUOStmt; +} + +ASTStmt *ASTParser::ProcessStmtBinaryOperator(MapleAllocator &allocator, const clang::BinaryOperator &binaryOp) { + ASTBinaryOperatorStmt *astBOStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astBOStmt != nullptr, "astBOStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &binaryOp); + if (astExpr == nullptr) { + return nullptr; + } + astBOStmt->SetASTExpr(astExpr); + return astBOStmt; +} + +ASTStmt *ASTParser::ProcessStmtCallExpr(MapleAllocator &allocator, const clang::CallExpr &callExpr) { + ASTExpr *astExpr = ProcessExpr(allocator, &callExpr); + if (astExpr == nullptr) { + return nullptr; + } + ASTCallExprStmt *astCallExprStmt = + allocator.GetMemPool()->New(allocator, static_cast(astExpr)->GetRetVarName()); + CHECK_FATAL(astCallExprStmt != nullptr, "astCallExprStmt is nullptr"); + astCallExprStmt->SetASTExpr(astExpr); + return astCallExprStmt; +} + +ASTStmt *ASTParser::ProcessStmtImplicitCastExpr(MapleAllocator &allocator, + const clang::ImplicitCastExpr &implicitCastExpr) { + ASTImplicitCastExprStmt *astImplicitCastExprStmt = + ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astImplicitCastExprStmt != nullptr, "astImplicitCastExprStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &implicitCastExpr); + if (astExpr == nullptr) { + return nullptr; + } + astImplicitCastExprStmt->SetASTExpr(astExpr); + return astImplicitCastExprStmt; +} + +ASTStmt *ASTParser::ProcessStmtParenExpr(MapleAllocator &allocator, const clang::ParenExpr &parenExpr) { + ASTParenExprStmt *astParenExprStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astParenExprStmt != nullptr, "astCallExprStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &parenExpr); + if (astExpr == nullptr) { + return nullptr; + } + astParenExprStmt->SetASTExpr(astExpr); + return astParenExprStmt; +} + +ASTStmt *ASTParser::ProcessStmtIntegerLiteral(MapleAllocator &allocator, const clang::IntegerLiteral &integerLiteral) { + ASTIntegerLiteralStmt *astIntegerLiteralStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astIntegerLiteralStmt != nullptr, "astIntegerLiteralStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &integerLiteral); + if (astExpr == nullptr) { + return nullptr; + } + astIntegerLiteralStmt->SetASTExpr(astExpr); + return astIntegerLiteralStmt; +} + +ASTStmt *ASTParser::ProcessStmtFloatingLiteral(MapleAllocator &allocator, + const clang::FloatingLiteral &floatingLiteral) { + ASTFloatingLiteralStmt *astFloatingLiteralStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astFloatingLiteralStmt != nullptr, "astFloatingLiteralStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &floatingLiteral); + if (astExpr == nullptr) { + return nullptr; + } + astFloatingLiteralStmt->SetASTExpr(astExpr); + return astFloatingLiteralStmt; +} + +ASTStmt *ASTParser::ProcessStmtVAArgExpr(MapleAllocator &allocator, const clang::VAArgExpr &vAArgExpr) { + ASTVAArgExprStmt *astVAArgExprStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astVAArgExprStmt != nullptr, "astVAArgExprStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &vAArgExpr); + if (astExpr == nullptr) { + return nullptr; + } + astVAArgExprStmt->SetASTExpr(astExpr); + return astVAArgExprStmt; +} + +ASTStmt *ASTParser::ProcessStmtConditionalOperator(MapleAllocator &allocator, + const clang::ConditionalOperator &conditionalOperator) { + ASTConditionalOperatorStmt *astConditionalOperatorStmt = + ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astConditionalOperatorStmt != nullptr, "astConditionalOperatorStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &conditionalOperator); + if (astExpr == nullptr) { + return nullptr; + } + astConditionalOperatorStmt->SetASTExpr(astExpr); + return astConditionalOperatorStmt; +} + +ASTStmt *ASTParser::ProcessStmtCharacterLiteral(MapleAllocator &allocator, + const clang::CharacterLiteral &characterLiteral) { + ASTCharacterLiteralStmt *astCharacterLiteralStmt = + ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astCharacterLiteralStmt != nullptr, "astCharacterLiteralStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &characterLiteral); + if (astExpr == nullptr) { + return nullptr; + } + astCharacterLiteralStmt->SetASTExpr(astExpr); + return astCharacterLiteralStmt; +} + +ASTStmt *ASTParser::ProcessStmtCStyleCastExpr(MapleAllocator &allocator, const clang::CStyleCastExpr &cStyleCastExpr) { + ASTCStyleCastExprStmt *astCStyleCastExprStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astCStyleCastExprStmt != nullptr, "astCStyleCastExprStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &cStyleCastExpr); + if (astExpr == nullptr) { + return nullptr; + } + astCStyleCastExprStmt->SetASTExpr(astExpr); + return astCStyleCastExprStmt; +} + +ASTStmt *ASTParser::ProcessStmtStmtExpr(MapleAllocator &allocator, const clang::StmtExpr &stmtExpr) { + ASTStmtExprStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + const clang::CompoundStmt *cpdStmt = stmtExpr.getSubStmt(); + ASTStmt *astCompoundStmt = ProcessStmt(allocator, *cpdStmt); + astStmt->SetBodyStmt(astCompoundStmt); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtCompoundAssignOperator(MapleAllocator &allocator, + const clang::CompoundAssignOperator &cpdAssignOp) { + ASTCompoundAssignOperatorStmt *astCAOStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astCAOStmt != nullptr, "astCAOStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &cpdAssignOp); + if (astExpr == nullptr) { + return nullptr; + } + astCAOStmt->SetASTExpr(astExpr); + return astCAOStmt; +} + +ASTStmt *ASTParser::ProcessStmtAtomicExpr(MapleAllocator &allocator, const clang::AtomicExpr &atomicExpr) { + ASTAtomicExprStmt *astAtomicExprStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astAtomicExprStmt != nullptr, "astAtomicExprStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &atomicExpr); + if (astExpr == nullptr) { + return nullptr; + } + static_cast(astExpr)->SetFromStmt(true); + astAtomicExprStmt->SetASTExpr(astExpr); + return astAtomicExprStmt; +} + +ASTStmt *ASTParser::ProcessStmtReturnStmt(MapleAllocator &allocator, const clang::ReturnStmt &retStmt) { + ASTReturnStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, retStmt.getRetValue()); + astStmt->SetASTExpr(astExpr); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtIfStmt(MapleAllocator &allocator, const clang::IfStmt &ifStmt) { + auto *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, ifStmt.getCond()); + if (astExpr == nullptr) { + return nullptr; + } + astStmt->SetCondExpr(astExpr); + ASTStmt *astThenStmt = nullptr; + const clang::Stmt *thenStmt = ifStmt.getThen(); + if (thenStmt->getStmtClass() == clang::Stmt::CompoundStmtClass) { + astThenStmt = ProcessStmt(allocator, *llvm::cast(thenStmt)); + } else { + astThenStmt = ProcessStmt(allocator, *thenStmt); + } + astStmt->SetThenStmt(astThenStmt); + if (ifStmt.hasElseStorage()) { + ASTStmt *astElseStmt = nullptr; + const clang::Stmt *elseStmt = ifStmt.getElse(); + if (elseStmt->getStmtClass() == clang::Stmt::CompoundStmtClass) { + astElseStmt = ProcessStmt(allocator, *llvm::cast(elseStmt)); + } else { + astElseStmt = ProcessStmt(allocator, *elseStmt); + } + astStmt->SetElseStmt(astElseStmt); + } + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtForStmt(MapleAllocator &allocator, const clang::ForStmt &forStmt) { + auto *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + astStmt->SetEndLoc(astFile->GetLOC(forStmt.getEndLoc())); + if (forStmt.getInit() != nullptr) { + ASTStmt *initStmt = ProcessStmt(allocator, *forStmt.getInit()); + if (initStmt == nullptr) { + return nullptr; + } + astStmt->SetInitStmt(initStmt); + } + if (forStmt.getCond() != nullptr) { + ASTExpr *condExpr = ProcessExpr(allocator, forStmt.getCond()); + if (condExpr == nullptr) { + return nullptr; + } + astStmt->SetCondExpr(condExpr); + } + if (forStmt.getInc() != nullptr) { + ASTExpr *incExpr = ProcessExpr(allocator, forStmt.getInc()); + if (incExpr == nullptr) { + return nullptr; + } + astStmt->SetIncExpr(incExpr); + } + ASTStmt *bodyStmt = nullptr; + if (forStmt.getBody()->getStmtClass() == clang::Stmt::CompoundStmtClass) { + const auto *tmpCpdStmt = llvm::cast(forStmt.getBody()); + bodyStmt = ProcessStmt(allocator, *tmpCpdStmt); + } else { + bodyStmt = ProcessStmt(allocator, *forStmt.getBody()); + } + astStmt->SetBodyStmt(bodyStmt); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtWhileStmt(MapleAllocator &allocator, const clang::WhileStmt &whileStmt) { + ASTWhileStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASTExpr *condExpr = ProcessExpr(allocator, whileStmt.getCond()); + if (condExpr == nullptr) { + return nullptr; + } + astStmt->SetCondExpr(condExpr); + ASTStmt *bodyStmt = ProcessStmt(allocator, *whileStmt.getBody()); + astStmt->SetBodyStmt(bodyStmt); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtGotoStmt(MapleAllocator &allocator, const clang::GotoStmt &gotoStmt) { + ASTGotoStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASSERT_NOT_NULL(gotoStmt.getLabel()); + ASTDecl *astDecl = ProcessDecl(allocator, *gotoStmt.getLabel()); + astStmt->SetLabelName(astDecl->GetName()); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtIndirectGotoStmt(MapleAllocator &allocator, const clang::IndirectGotoStmt &iGotoStmt) { + ASTIndirectGotoStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + astStmt->SetASTExpr(ProcessExpr(allocator, iGotoStmt.getTarget())); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtGCCAsmStmt(MapleAllocator &allocator, const clang::GCCAsmStmt &asmStmt) { + ASTGCCAsmStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + astStmt->SetAsmStr(asmStmt.generateAsmString(*(astFile->GetAstContext()))); + // set output + for (unsigned i = 0; i < asmStmt.getNumOutputs(); ++i) { + bool isPlusConstraint = asmStmt.isOutputPlusConstraint(i); + astStmt->InsertOutput(std::make_tuple(asmStmt.getOutputName(i).str(), + asmStmt.getOutputConstraint(i).str(), isPlusConstraint)); + astStmt->SetASTExpr(ProcessExpr(allocator, asmStmt.getOutputExpr(i))); + } + // set input + for (unsigned i = 0; i < asmStmt.getNumInputs(); ++i) { + astStmt->InsertInput(std::make_pair(asmStmt.getInputName(i).str(), asmStmt.getInputConstraint(i).str())); + astStmt->SetASTExpr(ProcessExpr(allocator, asmStmt.getInputExpr(i))); + } + // set clobbers + for (unsigned i = 0; i < asmStmt.getNumClobbers(); ++i) { + astStmt->InsertClobber(asmStmt.getClobber(i).str()); + } + // set label + for (unsigned i = 0; i < asmStmt.getNumLabels(); ++i) { + astStmt->InsertLabel(asmStmt.getLabelName(i).str()); + } + // set goto/volatile flag + if (asmStmt.isVolatile()) { + astStmt->SetIsVolatile(true); + } + if (asmStmt.isAsmGoto()) { + astStmt->SetIsGoto(true); + } + return astStmt; +} + +bool ASTParser::HasDefault(const clang::Stmt &stmt) { + if (llvm::isa(stmt)) { + return true; + } else if (llvm::isa(stmt)) { + const auto *cpdStmt = llvm::cast(&stmt); + clang::CompoundStmt::const_body_iterator it; + for (it = cpdStmt->body_begin(); it != cpdStmt->body_end(); ++it) { + const auto *bodyStmt = llvm::dyn_cast(*it); + if (bodyStmt == nullptr) { + continue; + } + if (HasDefault(*bodyStmt)) { + return true; + } + } + } else if (llvm::isa(stmt)) { + const auto *caseStmt = llvm::cast(&stmt); + if (HasDefault(*caseStmt->getSubStmt())) { + return true; + } + } else if (llvm::isa(stmt)) { + const auto *labelStmt = llvm::cast(&stmt); + if (HasDefault(*labelStmt->getSubStmt())) { + return true; + } + } + return false; +} + +ASTStmt *ASTParser::ProcessStmtSwitchStmt(MapleAllocator &allocator, const clang::SwitchStmt &switchStmt) { + // if switch cond expr has var decl, we need to handle it. + ASTSwitchStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASTStmt *condStmt = switchStmt.getConditionVariableDeclStmt() == nullptr ? nullptr : + ProcessStmt(allocator, *switchStmt.getConditionVariableDeclStmt()); + astStmt->SetCondStmt(condStmt); + // switch cond expr + ASTExpr *condExpr = switchStmt.getCond() == nullptr ? nullptr : ProcessExpr(allocator, switchStmt.getCond()); + if (condExpr != nullptr) { + astStmt->SetCondType(astFile->CvtType(switchStmt.getCond()->getType())); + } + astStmt->SetCondExpr(condExpr); + // switch body stmt + ASTStmt *bodyStmt = switchStmt.getBody() == nullptr ? nullptr : + ProcessStmt(allocator, *switchStmt.getBody()); + astStmt->SetBodyStmt(bodyStmt); + astStmt->SetHasDefault(HasDefault(*switchStmt.getBody())); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtDoStmt(MapleAllocator &allocator, const clang::DoStmt &doStmt) { + ASTDoStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASTExpr *condExpr = ProcessExpr(allocator, doStmt.getCond()); + if (condExpr == nullptr) { + return nullptr; + } + astStmt->SetCondExpr(condExpr); + ASTStmt *bodyStmt = ProcessStmt(allocator, *doStmt.getBody()); + astStmt->SetBodyStmt(bodyStmt); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtBreakStmt(MapleAllocator &allocator, const clang::BreakStmt &breakStmt) { + (void)breakStmt; + auto *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtLabelStmt(MapleAllocator &allocator, const clang::LabelStmt &stmt) { + auto *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + std::string name; + ASTStmt *astSubStmt = ProcessStmt(allocator, *stmt.getSubStmt()); + if (stmt.getDecl() != nullptr) { + ASTDecl *astDecl = ProcessDecl(allocator, *stmt.getDecl()); + name = astDecl->GetName(); + } else { + name = stmt.getName(); + } + astStmt->SetLabelName(name); + astStmt->SetSubStmt(astSubStmt); + if (astSubStmt->GetExprs().size() != 0 && astSubStmt->GetExprs().back() != nullptr) { + astStmt->SetASTExpr(astSubStmt->GetExprs().back()); + } + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtAddrLabelExpr(MapleAllocator &allocator, const clang::AddrLabelExpr &expr) { + ASTUOAddrOfLabelExprStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, &expr); + CHECK_FATAL(astExpr != nullptr, "astExpr is nullptr"); + astStmt->SetASTExpr(astExpr); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtCaseStmt(MapleAllocator &allocator, const clang::CaseStmt &caseStmt) { + ASTCaseStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + astStmt->SetLHS(ProcessExpr(allocator, caseStmt.getLHS())); + astStmt->SetRHS(ProcessExpr(allocator, caseStmt.getRHS())); + clang::Expr::EvalResult resL; + (void)caseStmt.getLHS()->EvaluateAsInt(resL, *astFile->GetAstContext()); + astStmt->SetLCaseTag(resL.Val.getInt().getExtValue()); + if (caseStmt.getRHS() != nullptr) { + clang::Expr::EvalResult resR; + (void)caseStmt.getLHS()->EvaluateAsInt(resR, *astFile->GetAstContext()); + astStmt->SetRCaseTag(resR.Val.getInt().getExtValue()); + } else { + astStmt->SetRCaseTag(resL.Val.getInt().getExtValue()); + } + ASTStmt* subStmt = caseStmt.getSubStmt() == nullptr ? nullptr : ProcessStmt(allocator, *caseStmt.getSubStmt()); + astStmt->SetSubStmt(subStmt); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtDefaultStmt(MapleAllocator &allocator, const clang::DefaultStmt &defaultStmt) { + ASTDefaultStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + auto *subStmt = defaultStmt.getSubStmt() == nullptr ? nullptr : ProcessStmt(allocator, *defaultStmt.getSubStmt()); + astStmt->SetChildStmt(subStmt); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtNullStmt(MapleAllocator &allocator, const clang::NullStmt &nullStmt) { + (void)nullStmt; + ASTNullStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtContinueStmt(MapleAllocator &allocator, const clang::ContinueStmt &continueStmt) { + (void)continueStmt; + auto *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtDeclStmt(MapleAllocator &allocator, const clang::DeclStmt &declStmt) { + ASTDeclStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + std::list decls; + if (declStmt.isSingleDecl()) { + const clang::Decl *decl = declStmt.getSingleDecl(); + if (decl != nullptr) { + (void)decls.emplace_back(decl); + } + } else { + // multiple decls + clang::DeclGroupRef declGroupRef = declStmt.getDeclGroup(); + clang::DeclGroupRef::const_iterator it; + for (it = declGroupRef.begin(); it != declGroupRef.end(); ++it) { + (void)decls.emplace_back(*it); + } + } + for (auto decl : std::as_const(decls)) { + // save vla size expr + std::list astExprs; + if (decl->getKind() == clang::Decl::Var) { + const clang::VarDecl *varDecl = llvm::cast(decl); + SaveVLASizeExpr(allocator, varDecl->getType(), astExprs); + } else if (decl->getKind() == clang::Decl::Typedef) { + clang::QualType underType = llvm::cast(decl)->getUnderlyingType(); + SaveVLASizeExpr(allocator, underType, astExprs); + } + for (auto expr : std::as_const(astExprs)) { + astStmt->SetVLASizeExpr(expr); + } + ASTDecl *ad = ProcessDecl(allocator, *decl); + // extern func decl in function + if (decl->getKind() == clang::Decl::Function) { + const clang::FunctionDecl *funcDecl = llvm::cast(decl); + if (!funcDecl->isDefined()) { + astFuncs.emplace_back(static_cast(ad)); + } + } + if (ad != nullptr) { + astStmt->SetSubDecl(ad); + } + } + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtDeclRefExpr(MapleAllocator &allocator, const clang::DeclRefExpr &expr) { + ASTDeclRefExprStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + ASTExpr *astExpr = ProcessExpr(allocator, &expr); + if (astExpr == nullptr) { + return nullptr; + } + astStmt->SetASTExpr(astExpr); + return astStmt; +} + +ASTStmt *ASTParser::ProcessStmtUnaryExprOrTypeTraitExpr(MapleAllocator &allocator, + const clang::UnaryExprOrTypeTraitExpr &expr) { + auto *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + ASTExpr *astExpr = ProcessExpr(allocator, &expr); + if (astExpr == nullptr) { + return nullptr; + } + astStmt->SetASTExpr(astExpr); + return astStmt; +} + +ASTValue *ASTParser::TranslateConstantValue2ASTValue(MapleAllocator &allocator, const clang::Expr *expr) const { + ASSERT_NOT_NULL(expr); + ASTValue *astValue = nullptr; + clang::Expr::EvalResult result; + if (expr->getStmtClass() == clang::Stmt::StringLiteralClass && + expr->EvaluateAsLValue(result, *(astFile->GetContext()))) { + return TranslateLValue2ASTValue(allocator, result, expr); + } + if (expr->EvaluateAsRValue(result, *(astFile->GetContext()))) { + if (result.Val.isLValue()) { + return TranslateLValue2ASTValue(allocator, result, expr); + } + auto *constMirType = astFile->CvtType(expr->getType().getCanonicalType()); + ASSERT_NOT_NULL(constMirType); + if (result.Val.isInt()) { + astValue = AllocASTValue(allocator); + switch (constMirType->GetPrimType()) { + case PTY_i8: + astValue->val.i8 = static_cast(result.Val.getInt().getExtValue()); + astValue->pty = PTY_i8; + break; + case PTY_i16: + astValue->val.i16 = static_cast(result.Val.getInt().getExtValue()); + astValue->pty = PTY_i16; + break; + case PTY_i32: + if (expr->getStmtClass() == clang::Stmt::CharacterLiteralClass) { + if (FEOptions::GetInstance().IsUseSignedChar()) { + astValue->val.i8 = static_cast(llvm::cast(expr)->getValue()); + astValue->pty = PTY_i8; + } else { + astValue->val.u8 = static_cast(llvm::cast(expr)->getValue()); + astValue->pty = PTY_u8; + } + } else { + astValue->val.i32 = static_cast(result.Val.getInt().getExtValue()); + astValue->pty = PTY_i32; + } + break; + case PTY_i64: + if (result.Val.getInt().getBitWidth() > 64) { + astValue->val.i64 = static_cast(result.Val.getInt().getSExtValue()); + } else { + astValue->val.i64 = static_cast(result.Val.getInt().getExtValue()); + } + astValue->pty = PTY_i64; + break; + case PTY_i128: + astValue->val.i64 = static_cast(result.Val.getInt().getSExtValue()); + astValue->pty = PTY_i128; + static bool i128Warning = true; + if (i128Warning) { + WARN(kLncWarn, "%s:%d PTY_i128 is not fully supported", + FEManager::GetModule().GetFileNameFromFileNum(astFile->GetExprLOC(*expr).fileIdx).c_str(), + astFile->GetExprLOC(*expr).line); + i128Warning = false; + } + break; + case PTY_u8: + astValue->val.u8 = static_cast(result.Val.getInt().getExtValue()); + astValue->pty = PTY_u8; + break; + case PTY_u16: + astValue->val.u16 = static_cast(result.Val.getInt().getExtValue()); + astValue->pty = PTY_u16; + break; + case PTY_u32: + astValue->val.u32 = static_cast(result.Val.getInt().getExtValue()); + astValue->pty = PTY_u32; + break; + case PTY_u64: + if (result.Val.getInt().getBitWidth() > 64) { + astValue->val.u64 = static_cast(result.Val.getInt().getZExtValue()); + } else { + astValue->val.u64 = static_cast(result.Val.getInt().getExtValue()); + } + astValue->pty = PTY_u64; + break; + case PTY_u128: + astValue->val.u64 = static_cast(result.Val.getInt().getZExtValue()); + astValue->pty = PTY_u128; + static bool u128Warning = true; + if (u128Warning) { + WARN(kLncWarn, "%s:%d PTY_u128 is not fully supported", + FEManager::GetModule().GetFileNameFromFileNum(astFile->GetExprLOC(*expr).fileIdx).c_str(), + astFile->GetExprLOC(*expr).line); + u128Warning = false; + } + break; + case PTY_u1: + astValue->val.u8 = (result.Val.getInt().getExtValue() == 0 ? 0 : 1); + astValue->pty = PTY_u1; + break; + default: { + CHECK_FATAL(false, "Invalid"); + break; + } + } + } else if (result.Val.isFloat()) { + astValue = AllocASTValue(allocator); + llvm::APFloat fValue = result.Val.getFloat(); + llvm::APFloat::Semantics semantics = llvm::APFloatBase::SemanticsToEnum(fValue.getSemantics()); + switch (semantics) { + case llvm::APFloat::S_IEEEsingle: + astValue->val.f32 = fValue.convertToFloat(); + break; + case llvm::APFloat::S_IEEEdouble: + astValue->val.f64 = fValue.convertToDouble(); + break; + case llvm::APFloat::S_IEEEquad: + case llvm::APFloat::S_PPCDoubleDouble: + case llvm::APFloat::S_x87DoubleExtended: { + auto ty = expr->getType().getCanonicalType(); + static bool f128Warning = true; + if (f128Warning && (ty->isFloat128Type() || + (ty->isRealFloatingType() && astFile->GetAstContext()->getTypeSize(ty) == 128))) { + WARN(kLncWarn, "%s:%d PTY_f128 is not fully supported", + FEManager::GetModule().GetFileNameFromFileNum(astFile->GetExprLOC(*expr).fileIdx).c_str(), + astFile->GetExprLOC(*expr).line); + f128Warning = false; + } + bool losesInfo; + if (constMirType->GetPrimType() == PTY_f64) { + (void)fValue.convert(llvm::APFloat::IEEEdouble(), + llvm::APFloatBase::rmNearestTiesToAway, + &losesInfo); + astValue->val.f64 = fValue.convertToDouble(); + } else { + (void)fValue.convert(llvm::APFloat::IEEEsingle(), + llvm::APFloatBase::rmNearestTiesToAway, + &losesInfo); + astValue->val.f32 = fValue.convertToFloat(); + } + break; + } + default: + CHECK_FATAL(false, "unsupported semantics"); + } + astValue->pty = constMirType->GetPrimType(); + } else if (result.Val.isComplexInt() || result.Val.isComplexFloat()) { + WARN(kLncWarn, "Unsupported complex value in MIR"); + } else if (result.Val.isVector()) { + // vector type var must be init by initListExpr + return nullptr; + } else if (result.Val.isMemberPointer()) { + CHECK_FATAL(false, "NIY"); + } + // Others: Agg const processed in `InitListExpr` + } + return astValue; +} + +ASTValue *ASTParser::TranslateLValue2ASTValue( + MapleAllocator &allocator, const clang::Expr::EvalResult &result, const clang::Expr *expr) const { + ASSERT_NOT_NULL(expr); + ASTValue *astValue = nullptr; + const clang::APValue::LValueBase &lvBase = result.Val.getLValueBase(); + if (lvBase.is()) { + const clang::Expr *lvExpr = lvBase.get(); + if (lvExpr == nullptr) { + return astValue; + } + if (expr->getStmtClass() == clang::Stmt::MemberExprClass) { + // meaningless, just for Initialization + astValue = AllocASTValue(allocator); + astValue->pty = PTY_i32; + astValue->val.i64 = 0; + return astValue; + } + switch (lvExpr->getStmtClass()) { + case clang::Stmt::StringLiteralClass: { + const clang::StringLiteral &strExpr = llvm::cast(*lvExpr); + std::string str = ""; + if (strExpr.isWide() || strExpr.isUTF16() || strExpr.isUTF32()) { + static bool wcharWarning = true; + if (wcharWarning && strExpr.isWide()) { + WARN(kLncWarn, "%s:%d wchar is not fully supported", + FEManager::GetModule().GetFileNameFromFileNum(astFile->GetExprLOC(*lvExpr).fileIdx).c_str(), + astFile->GetExprLOC(*lvExpr).line); + wcharWarning = false; + } + str = strExpr.getBytes().str(); + } else { + str = strExpr.getString().str(); + } + astValue = AllocASTValue(allocator); + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str); + astValue->val.strIdx = strIdx; + astValue->pty = PTY_a64; + break; + } + case clang::Stmt::PredefinedExprClass: { + astValue = AllocASTValue(allocator); + std::string str = llvm::cast(*lvExpr).getFunctionName()->getString().str(); + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str); + astValue->val.strIdx = strIdx; + astValue->pty = PTY_a64; + break; + } + case clang::Stmt::AddrLabelExprClass: + case clang::Stmt::CompoundLiteralExprClass: { + // Processing in corresponding expr, skipping + break; + } + default: { + CHECK_FATAL(false, "Unsupported expr :%s in LValue", lvExpr->getStmtClassName()); + } + } + } else { + // `valueDecl` processed in corresponding expr + bool isValueDeclInLValueBase = lvBase.is(); + CHECK_FATAL(isValueDeclInLValueBase, "Unsupported lValue base"); + } + + return astValue; +} + +ASTValue *ASTParser::TranslateExprEval(MapleAllocator &allocator, const clang::Expr *expr) const { + return TranslateConstantValue2ASTValue(allocator, expr); +} + +#define EXPR_CASE(CLASS) \ + case clang::Stmt::CLASS##Class: { \ + ASTExpr *astExpr = EvaluateExprAsConst(allocator, expr); \ + if (astExpr == nullptr) { \ + astExpr = ProcessExpr##CLASS(allocator, llvm::cast(*expr)); \ + if (astExpr == nullptr) { \ + return nullptr; \ + } \ + } \ + MIRType *exprType = astFile->CvtType(expr->getType()); \ + astExpr->SetType(exprType); \ + if (expr->isConstantInitializer(*astFile->GetNonConstAstContext(), false, nullptr)) { \ + astExpr->SetConstantValue(TranslateExprEval(allocator, expr)); \ + } \ + Loc loc = astFile->GetExprLOC(*expr); \ + astExpr->SetSrcLoc(loc); \ + return astExpr; \ + } + +ASTExpr *ASTParser::EvaluateExprAsConst(MapleAllocator &allocator, const clang::Expr *expr) { + ASSERT_NOT_NULL(expr); + clang::Expr::EvalResult constResult; + if (!expr->EvaluateAsConstantExpr(constResult, *astFile->GetNonConstAstContext())) { + return nullptr; + } + + // Supplement SideEffects for EvaluateAsConstantExpr, + // If the expression contains a LabelStmt, the expression is unfoldable + // e.g. int x = 0 && ({ a : 1; }); goto a; + if (HasLabelStmt(expr)) { + return nullptr; + } + + clang::APValue constVal = constResult.Val; + if (constVal.isInt()) { + ASTIntegerLiteral *intExpr = allocator.New(allocator); + llvm::APSInt intVal = constVal.getInt(); + intExpr->SetVal(intVal.getExtValue()); + if (intVal.getExtValue() == 0) { + intExpr->SetEvaluatedFlag(kEvaluatedAsZero); + } else { + intExpr->SetEvaluatedFlag(kEvaluatedAsNonZero); + } + return intExpr; + } else if (constVal.isFloat()) { + ASTFloatingLiteral *floatExpr = allocator.New(allocator); + llvm::APFloat floatVal = constVal.getFloat(); + const llvm::fltSemantics &fltSem = floatVal.getSemantics(); + double val = 0; + if (&fltSem == &llvm::APFloat::IEEEsingle()) { + val = static_cast(floatVal.convertToFloat()); + floatExpr->SetKind(FloatKind::F32); + floatExpr->SetVal(val); + } else if (&fltSem == &llvm::APFloat::IEEEdouble()) { + val = static_cast(floatVal.convertToDouble()); + floatExpr->SetKind(FloatKind::F64); + floatExpr->SetVal(val); + } else if (&fltSem == &llvm::APFloat::IEEEquad() || &fltSem == &llvm::APFloat::x87DoubleExtended()) { + bool losesInfo; + (void)floatVal.convert(llvm::APFloat::IEEEdouble(), llvm::APFloatBase::rmNearestTiesToAway, &losesInfo); + val = static_cast(floatVal.convertToDouble()); + floatExpr->SetKind(FloatKind::F64); + floatExpr->SetVal(val); + } else { + return nullptr; + } + if (floatVal.isPosZero()) { + floatExpr->SetEvaluatedFlag(kEvaluatedAsZero); + } else { + floatExpr->SetEvaluatedFlag(kEvaluatedAsNonZero); + } + return floatExpr; + } + return nullptr; +} + +bool ASTParser::HasLabelStmt(const clang::Stmt *expr) { + ASSERT_NOT_NULL(expr); + if (expr->getStmtClass() == clang::Stmt::LabelStmtClass) { + return true; + } + for (const clang::Stmt *subStmt : expr->children()) { + if (subStmt == nullptr) { + continue; + } + if (HasLabelStmt(subStmt)) { + return true; + } + } + return false; +} + +ASTExpr *ASTParser::ProcessExpr(MapleAllocator &allocator, const clang::Expr *expr) { + if (expr == nullptr) { + return nullptr; + } + switch (expr->getStmtClass()) { + EXPR_CASE(UnaryOperator); + EXPR_CASE(AddrLabelExpr); + EXPR_CASE(NoInitExpr); + EXPR_CASE(PredefinedExpr); + EXPR_CASE(OpaqueValueExpr); + EXPR_CASE(BinaryConditionalOperator); + EXPR_CASE(CompoundLiteralExpr); + EXPR_CASE(OffsetOfExpr); + EXPR_CASE(InitListExpr); + EXPR_CASE(BinaryOperator); + EXPR_CASE(ImplicitValueInitExpr); + EXPR_CASE(ArraySubscriptExpr); + EXPR_CASE(UnaryExprOrTypeTraitExpr); + EXPR_CASE(MemberExpr); + EXPR_CASE(DesignatedInitUpdateExpr); + EXPR_CASE(ImplicitCastExpr); + EXPR_CASE(DeclRefExpr); + EXPR_CASE(ParenExpr); + EXPR_CASE(IntegerLiteral); + EXPR_CASE(CharacterLiteral); + EXPR_CASE(StringLiteral); + EXPR_CASE(FloatingLiteral); + EXPR_CASE(ConditionalOperator); + EXPR_CASE(VAArgExpr); + EXPR_CASE(GNUNullExpr); + EXPR_CASE(SizeOfPackExpr); + EXPR_CASE(UserDefinedLiteral); + EXPR_CASE(ShuffleVectorExpr); + EXPR_CASE(TypeTraitExpr); + EXPR_CASE(ConstantExpr); + EXPR_CASE(ImaginaryLiteral); + EXPR_CASE(CallExpr); + EXPR_CASE(CompoundAssignOperator); + EXPR_CASE(StmtExpr); + EXPR_CASE(CStyleCastExpr); + EXPR_CASE(ArrayInitLoopExpr); + EXPR_CASE(ArrayInitIndexExpr); + EXPR_CASE(ExprWithCleanups); + EXPR_CASE(MaterializeTemporaryExpr); + EXPR_CASE(SubstNonTypeTemplateParmExpr); + EXPR_CASE(DependentScopeDeclRefExpr); + EXPR_CASE(AtomicExpr); + EXPR_CASE(ChooseExpr); + EXPR_CASE(GenericSelectionExpr); + default: + CHECK_FATAL(false, "ASTExpr %s NIY", expr->getStmtClassName()); + return nullptr; + } +} + +void ASTParser::SaveVLASizeExpr(MapleAllocator &allocator, const clang::QualType &qualType, + std::list &vlaSizeExprs) { + const clang::Type *type = qualType.getCanonicalType().getTypePtr(); + if (!type->isVariableArrayType()) { + return; + } + const clang::VariableArrayType *vlaType = llvm::cast(type); + if (vlaSizeMap.find(vlaType->getSizeExpr()) != vlaSizeMap.cend()) { + return; // vla size expr already exists + } + ASTExpr *vlaSizeExpr = BuildExprToComputeSizeFromVLA(allocator, qualType.getCanonicalType()); + if (vlaSizeExpr == nullptr) { + return; + } + ASTDeclRefExpr *vlaSizeVarExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + MIRType *vlaSizeType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); + ASTDecl *vlaSizeVar = ASTDeclsBuilder::ASTDeclBuilder( + allocator, MapleString("", allocator.GetMemPool()), FEUtils::GetSequentialName("vla_size."), + MapleVector({vlaSizeType}, allocator.Adapter())); + vlaSizeVar->SetIsParam(true); + vlaSizeVarExpr->SetASTDecl(vlaSizeVar); + ASTAssignExpr *expr = ASTDeclsBuilder::ASTStmtBuilder(allocator); + expr->SetLeftExpr(vlaSizeVarExpr); + expr->SetRightExpr(vlaSizeExpr); + vlaSizeMap[vlaType->getSizeExpr()] = vlaSizeVarExpr; + (void)vlaSizeExprs.emplace_back(expr); + SaveVLASizeExpr(allocator, vlaType->getElementType(), vlaSizeExprs); +} + +ASTUnaryOperatorExpr *ASTParser::AllocUnaryOperatorExpr(MapleAllocator &allocator, + const clang::UnaryOperator &expr) const { + clang::UnaryOperator::Opcode clangOpCode = expr.getOpcode(); + switch (clangOpCode) { + case clang::UO_Minus: // "-" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_Not: // "~" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_LNot: // "!" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_PostInc: // "++" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_PostDec: // "--" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_PreInc: // "++" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_PreDec: // "--" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_AddrOf: // "&" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_Deref: // "*" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_Plus: // "+" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_Real: // "__real" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_Imag: // "__imag" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_Extension: // "__extension__" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + case clang::UO_Coawait: // "co_await" + return ASTDeclsBuilder::ASTExprBuilder(allocator); + default: + CHECK_FATAL(false, "NYI"); + } +} + +ASTValue *ASTParser::AllocASTValue(const MapleAllocator &allocator) const { + return allocator.GetMemPool()->New(); +} + +const clang::Expr *ASTParser::PeelParen(const clang::Expr &expr) const { + const clang::Expr *exprPtr = &expr; + while (llvm::isa(exprPtr) || + (llvm::isa(exprPtr) && + llvm::cast(exprPtr)->getOpcode() == clang::UO_Extension) || + (llvm::isa(exprPtr) && + llvm::cast(exprPtr)->getCastKind() == clang::CK_LValueToRValue)) { + if (llvm::isa(exprPtr)) { + exprPtr = llvm::cast(exprPtr)->getSubExpr(); + } else if (llvm::isa(exprPtr)) { + exprPtr = llvm::cast(exprPtr)->getSubExpr(); + } else { + exprPtr = llvm::cast(exprPtr)->getSubExpr(); + } + } + return exprPtr; +} + +const clang::Expr *ASTParser::PeelParen2(const clang::Expr &expr) const { + const clang::Expr *exprPtr = &expr; + while (llvm::isa(exprPtr) || + (llvm::isa(exprPtr) && + llvm::cast(exprPtr)->getOpcode() == clang::UO_Extension)) { + if (llvm::isa(exprPtr)) { + exprPtr = llvm::cast(exprPtr)->getSubExpr(); + } else { + exprPtr = llvm::cast(exprPtr)->getSubExpr(); + } + } + return exprPtr; +} + +ASTExpr *ASTParser::ProcessExprUnaryOperator(MapleAllocator &allocator, const clang::UnaryOperator &uo) { + ASTUnaryOperatorExpr *astUOExpr = AllocUnaryOperatorExpr(allocator, uo); + CHECK_FATAL(astUOExpr != nullptr, "astUOExpr is nullptr"); + const clang::Expr *subExpr = PeelParen(*uo.getSubExpr()); + clang::UnaryOperator::Opcode clangOpCode = uo.getOpcode(); + MIRType *subType = astFile->CvtType(subExpr->getType()); + astUOExpr->SetSubType(subType); + MIRType *uoType = astFile->CvtType(uo.getType()); + astUOExpr->SetUOType(uoType); + if (clangOpCode == clang::UO_PostInc || clangOpCode == clang::UO_PostDec || + clangOpCode == clang::UO_PreInc || clangOpCode == clang::UO_PreDec) { + const auto *declRefExpr = llvm::dyn_cast(subExpr); + if (declRefExpr != nullptr && declRefExpr->getDecl()->getKind() == clang::Decl::Var) { + const auto *varDecl = llvm::cast(declRefExpr->getDecl()->getCanonicalDecl()); + astUOExpr->SetGlobal(!varDecl->isLocalVarDeclOrParm()); + } + if (subType->GetPrimType() == PTY_ptr) { + int64 len; + const clang::QualType qualType = subExpr->getType()->getPointeeType(); + if (astFile->CvtType(qualType) != nullptr && astFile->CvtType(qualType)->GetPrimType() == PTY_ptr) { + MIRType *pointeeType = GlobalTables::GetTypeTable().GetPtr(); + len = static_cast(pointeeType->GetSize()); + } else { + const clang::QualType desugaredType = qualType.getDesugaredType(*(astFile->GetContext())); + len = astFile->GetContext()->getTypeSizeInChars(desugaredType).getQuantity(); + } + astUOExpr->SetPointeeLen(len); + } + } + if (clangOpCode == clang::UO_Imag || clangOpCode == clang::UO_Real) { + clang::QualType elementType = llvm::cast( + uo.getSubExpr()->getType().getCanonicalType())->getElementType(); + MIRType *elementMirType = astFile->CvtType(elementType); + if (clangOpCode == clang::UO_Real) { + static_cast(astUOExpr)->SetElementType(elementMirType); + } else { + static_cast(astUOExpr)->SetElementType(elementMirType); + } + } + ASTExpr *astExpr = ProcessExpr(allocator, subExpr); + if (astExpr == nullptr) { + return nullptr; + } + // vla as a pointer is not need to be addrof + if (clangOpCode == clang::UO_AddrOf && subExpr->getType()->isVariableArrayType()) { + return astExpr; + } + astUOExpr->SetASTDecl(astExpr->GetASTDecl()); + astUOExpr->SetUOExpr(astExpr); + return astUOExpr; +} + +ASTExpr *ASTParser::ProcessExprAddrLabelExpr(MapleAllocator &allocator, const clang::AddrLabelExpr &expr) { + ASTUOAddrOfLabelExpr *astAddrOfLabelExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + const clang::LabelDecl *lbDecl = expr.getLabel(); + CHECK_NULL_FATAL(lbDecl); + ASTDecl *astDecl = ProcessDecl(allocator, *lbDecl); + astAddrOfLabelExpr->SetLabelName(astDecl->GetName()); + astAddrOfLabelExpr->SetUOType(GlobalTables::GetTypeTable().GetPrimType(PTY_ptr)); + return astAddrOfLabelExpr; +} + +ASTExpr *ASTParser::ProcessExprNoInitExpr(MapleAllocator &allocator, const clang::NoInitExpr &expr) { + ASTNoInitExpr *astNoInitExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astNoInitExpr != nullptr, "astNoInitExpr is nullptr"); + clang::QualType qualType = expr.getType(); + MIRType *noInitType = astFile->CvtType(qualType); + astNoInitExpr->SetNoInitType(noInitType); + return astNoInitExpr; +} + +ASTExpr *ASTParser::ProcessExprPredefinedExpr(MapleAllocator &allocator, const clang::PredefinedExpr &expr) { + ASTPredefinedExpr *astPredefinedExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astPredefinedExpr != nullptr, "astPredefinedExpr is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, expr.getFunctionName()); + if (astExpr == nullptr) { + return nullptr; + } + astPredefinedExpr->SetASTExpr(astExpr); + return astPredefinedExpr; +} + +ASTExpr *ASTParser::ProcessExprOpaqueValueExpr(MapleAllocator &allocator, const clang::OpaqueValueExpr &expr) { + ASTOpaqueValueExpr *astOpaqueValueExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astOpaqueValueExpr != nullptr, "astOpaqueValueExpr is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, expr.getSourceExpr()); + if (astExpr == nullptr) { + return nullptr; + } + astOpaqueValueExpr->SetASTExpr(astExpr); + return astOpaqueValueExpr; +} + +ASTExpr *ASTParser::ProcessExprBinaryConditionalOperator(MapleAllocator &allocator, + const clang::BinaryConditionalOperator &expr) { + ASTBinaryConditionalOperator *astBinaryConditionalOperator = + ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astBinaryConditionalOperator != nullptr, "astBinaryConditionalOperator is nullptr"); + ASTExpr *condExpr = ProcessExpr(allocator, expr.getCond()); + if (condExpr == nullptr) { + return nullptr; + } + astBinaryConditionalOperator->SetCondExpr(condExpr); + ASTExpr *falseExpr = ProcessExpr(allocator, expr.getFalseExpr()); + if (falseExpr == nullptr) { + return nullptr; + } + astBinaryConditionalOperator->SetFalseExpr(falseExpr); + astBinaryConditionalOperator->SetType(astFile->CvtType(expr.getType())); + return astBinaryConditionalOperator; +} + +ASTExpr *ASTParser::ProcessExprCompoundLiteralExpr(MapleAllocator &allocator, + const clang::CompoundLiteralExpr &expr) { + ASTCompoundLiteralExpr *astCompoundLiteralExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astCompoundLiteralExpr != nullptr, "astCompoundLiteralExpr is nullptr"); + const clang::Expr *initExpr = expr.getInitializer(); + CHECK_FATAL(initExpr != nullptr, "initExpr is nullptr"); + clang::QualType qualType = initExpr->getType(); + astCompoundLiteralExpr->SetCompoundLiteralType(astFile->CvtType(qualType)); + ASTExpr *astExpr = ProcessExpr(allocator, initExpr); + if (astExpr == nullptr) { + return nullptr; + } + astCompoundLiteralExpr->SetASTExpr(astExpr); + return astCompoundLiteralExpr; +} + +ASTExpr *ASTParser::ProcessExprInitListExpr(MapleAllocator &allocator, const clang::InitListExpr &expr) { + ASTInitListExpr *astInitListExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astInitListExpr != nullptr, "ASTInitListExpr is nullptr"); + MIRType *initListType = astFile->CvtType(expr.getType()); + clang::QualType aggType = expr.getType().getCanonicalType(); + astInitListExpr->SetInitListType(initListType); + const clang::FieldDecl *fieldDecl = expr.getInitializedFieldInUnion(); + if (fieldDecl != nullptr) { + astInitListExpr->SetUnionInitFieldIdx(fieldDecl->getFieldIndex()); + } + uint32 n = expr.getNumInits(); + clang::Expr * const *le = expr.getInits(); + std::unordered_set evaluatedFlags; + if (aggType->isRecordType()) { + const auto *recordType = llvm::cast(aggType); + clang::RecordDecl *recordDecl = recordType->getDecl(); + ASTDecl *astDecl = ProcessDecl(allocator, *recordDecl); + CHECK_FATAL(astDecl != nullptr && astDecl->GetDeclKind() == kASTStruct, "Undefined record type"); + uint i = 0; + for (const auto field : static_cast(astDecl)->GetFields()) { + if (field->IsAnonymousField() && fieldDecl == nullptr && + n != static_cast(astDecl)->GetFields().size()) { + astInitListExpr->SetInitExprs(nullptr); + } else { + if (i < n) { + const clang::Expr *eExpr = le[i]; + ASTExpr *astExpr = ProcessExpr(allocator, eExpr); + CHECK_FATAL(astExpr != nullptr, "Invalid InitListExpr"); + (void)evaluatedFlags.insert(astExpr->GetEvaluatedFlag()); + astInitListExpr->SetInitExprs(astExpr); + i++; + } + } + } + } else { + if (expr.hasArrayFiller()) { + auto *astFilterExpr = ProcessExpr(allocator, expr.getArrayFiller()); + astInitListExpr->SetArrayFiller(astFilterExpr); + astInitListExpr->SetHasArrayFiller(true); + } + if (expr.isTransparent()) { + astInitListExpr->SetTransparent(true); + } + if (aggType->isVectorType()) { + astInitListExpr->SetHasVectorType(true); + // for one elem vector type + if (LibAstFile::IsOneElementVector(aggType)) { + astInitListExpr->SetTransparent(true); + } + } + for (uint32 i = 0; i < n; ++i) { + const clang::Expr *eExpr = le[i]; + ASTExpr *astExpr = ProcessExpr(allocator, eExpr); + if (astExpr == nullptr) { + return nullptr; + } + (void)evaluatedFlags.insert(astExpr->GetEvaluatedFlag()); + astInitListExpr->SetInitExprs(astExpr); + } + } + if (evaluatedFlags.count(kNotEvaluated) > 0 || evaluatedFlags.count(kEvaluatedAsNonZero) > 0) { + astInitListExpr->SetEvaluatedFlag(kEvaluatedAsNonZero); + } else { + astInitListExpr->SetEvaluatedFlag(kEvaluatedAsZero); + } + return astInitListExpr; +} + +ASTExpr *ASTParser::ProcessExprOffsetOfExpr(MapleAllocator &allocator, const clang::OffsetOfExpr &expr) { + if (expr.isEvaluatable(*astFile->GetContext())) { + clang::Expr::EvalResult result; + bool success = expr.EvaluateAsInt(result, *astFile->GetContext()); + if (success) { + auto astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astExpr->SetVal(result.Val.getInt().getExtValue()); + astExpr->SetType(GlobalTables::GetTypeTable().GetUInt64()); + return astExpr; + } + } + int64_t offset = 0; + std::vector vlaOffsetExprs; + for (unsigned i = 0; i < expr.getNumComponents(); i++) { + auto comp = expr.getComponent(i); + if (comp.getKind() == clang::OffsetOfNode::Kind::Field) { + uint filedIdx = comp.getField()->getFieldIndex(); + offset += static_cast(astFile->GetContext()->getASTRecordLayout( + comp.getField()->getParent()).getFieldOffset(filedIdx) >> kBitToByteShift); + } else if (comp.getKind() == clang::OffsetOfNode::Kind::Array) { + uint32 idx = comp.getArrayExprIndex(); + auto idxExpr = expr.getIndexExpr(idx); + auto leftExpr = ProcessExpr(allocator, idxExpr); + ASSERT(i >= 1, "arg should be nonnegative number"); + auto arrayType = expr.getComponent(i - 1).getField()->getType(); + auto elementType = llvm::cast(arrayType)->getElementType(); + uint32 elementSize = GetSizeFromQualType(elementType); + if (elementSize == 1) { + vlaOffsetExprs.emplace_back(leftExpr); + } else { + auto astSizeExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astSizeExpr->SetVal(elementSize); + astSizeExpr->SetType(GlobalTables::GetTypeTable().GetUInt64()); + auto astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astExpr->SetOpcode(OP_mul); + astExpr->SetLeftExpr(leftExpr); + astExpr->SetRightExpr(astSizeExpr); + astExpr->SetRetType(GlobalTables::GetTypeTable().GetPrimType(PTY_u64)); + vlaOffsetExprs.emplace_back(astExpr); + } + } else { + CHECK_FATAL(false, "NIY"); + } + } + ASTExpr *vlaOffsetExpr = nullptr; + if (vlaOffsetExprs.size() == 1) { + vlaOffsetExpr = vlaOffsetExprs[0]; + } else if (vlaOffsetExprs.size() >= 2) { + auto astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astExpr->SetRetType(GlobalTables::GetTypeTable().GetPrimType(PTY_u64)); + astExpr->SetLeftExpr(vlaOffsetExprs[0]); + astExpr->SetRightExpr(vlaOffsetExprs[1]); + if (vlaOffsetExprs.size() >= 3) { + for (size_t i = 2; i < vlaOffsetExprs.size(); i++) { + auto astSubExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astSubExpr->SetRetType(GlobalTables::GetTypeTable().GetPrimType(PTY_u64)); + astSubExpr->SetLeftExpr(astExpr); + astSubExpr->SetRightExpr(vlaOffsetExprs[i]); + astExpr = astSubExpr; + } + } + vlaOffsetExpr = astExpr; + } + auto astSizeExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astSizeExpr->SetVal(offset); + astSizeExpr->SetType(GlobalTables::GetTypeTable().GetUInt64()); + auto astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astExpr->SetOpcode(OP_add); + astExpr->SetLeftExpr(astSizeExpr); + astExpr->SetRightExpr(vlaOffsetExpr); + astExpr->SetRetType(GlobalTables::GetTypeTable().GetPrimType(PTY_u64)); + return astExpr; +} + +ASTExpr *ASTParser::ProcessExprVAArgExpr(MapleAllocator &allocator, const clang::VAArgExpr &expr) { + ASTVAArgExpr *astVAArgExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASSERT(astVAArgExpr != nullptr, "astVAArgExpr is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, expr.getSubExpr()); + if (astExpr == nullptr) { + return nullptr; + } + astVAArgExpr->SetASTExpr(astExpr); + astVAArgExpr->SetType(astFile->CvtType(expr.getType())); + return astVAArgExpr; +} + +ASTExpr *ASTParser::ProcessExprImplicitValueInitExpr(MapleAllocator &allocator, + const clang::ImplicitValueInitExpr &expr) { + auto *astImplicitValueInitExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astImplicitValueInitExpr != nullptr, "astImplicitValueInitExpr is nullptr"); + astImplicitValueInitExpr->SetType(astFile->CvtType(expr.getType())); + astImplicitValueInitExpr->SetEvaluatedFlag(kEvaluatedAsZero); + return astImplicitValueInitExpr; +} + +ASTExpr *ASTParser::ProcessExprStringLiteral(MapleAllocator &allocator, const clang::StringLiteral &expr) { + auto *astStringLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astStringLiteral != nullptr, "astStringLiteral is nullptr"); + astStringLiteral->SetType(astFile->CvtType(expr.getType())); + astStringLiteral->SetLength(expr.getLength()); + MapleVector codeUnits(allocator.Adapter()); + for (size_t i = 0; i < expr.getLength(); ++i) { + codeUnits.emplace_back(expr.getCodeUnit(i)); + } + astStringLiteral->SetCodeUnits(codeUnits); + if (expr.isAscii()) { + astStringLiteral->SetStr(expr.getString().str()); + } + return astStringLiteral; +} + +ASTExpr *ASTParser::ProcessExprArraySubscriptExpr(MapleAllocator &allocator, const clang::ArraySubscriptExpr &expr) { + auto *astArraySubscriptExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astArraySubscriptExpr != nullptr, "astArraySubscriptExpr is nullptr"); + auto base = expr.getBase(); + + base = PeelParen2(*base); + ASTExpr *idxExpr = ProcessExpr(allocator, expr.getIdx()); + astArraySubscriptExpr->SetIdxExpr(idxExpr); + + clang::QualType arrayQualType = base->getType().getCanonicalType(); + if (base->getStmtClass() == clang::Stmt::ImplicitCastExprClass && + !static_cast(base)->isPartOfExplicitCast()) { + arrayQualType = static_cast(base)->getSubExpr()->getType().getCanonicalType(); + } + auto arrayMirType = astFile->CvtType(arrayQualType); + astArraySubscriptExpr->SetArrayType(arrayMirType); + + clang::QualType exprType = expr.getType().getCanonicalType(); + if (arrayQualType->isVariablyModifiedType()) { + astArraySubscriptExpr->SetIsVLA(true); + ASTExpr *vlaTypeSizeExpr = BuildExprToComputeSizeFromVLA(allocator, exprType); + astArraySubscriptExpr->SetVLASizeExpr(vlaTypeSizeExpr); + } + ASTExpr *astBaseExpr = ProcessExpr(allocator, base); + astArraySubscriptExpr->SetBaseExpr(astBaseExpr); + auto *mirType = astFile->CvtType(exprType); + astArraySubscriptExpr->SetType(mirType); + return astArraySubscriptExpr; +} + +uint32 ASTParser::GetSizeFromQualType(const clang::QualType qualType) const { + const clang::QualType desugaredType = qualType.getDesugaredType(*astFile->GetContext()); + return astFile->GetContext()->getTypeSizeInChars(desugaredType).getQuantity(); +} + +ASTExpr *ASTParser::GetTypeSizeFromQualType(MapleAllocator &allocator, const clang::QualType qualType) { + const clang::QualType desugaredType = qualType.getDesugaredType(*astFile->GetContext()); + if (llvm::isa(desugaredType)) { + ASTExpr *vlaSizeExpr = ProcessExpr(allocator, llvm::cast(desugaredType)->getSizeExpr()); + ASTExpr *vlaElemTypeSizeExpr = + GetTypeSizeFromQualType(allocator, llvm::cast(desugaredType)->getElementType()); + ASTBinaryOperatorExpr *sizeExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + sizeExpr->SetRetType(GlobalTables::GetTypeTable().GetPrimType(PTY_i64)); + sizeExpr->SetOpcode(OP_mul); + sizeExpr->SetLeftExpr(vlaSizeExpr); + sizeExpr->SetRightExpr(vlaElemTypeSizeExpr); + return sizeExpr; + } else { + ASTIntegerLiteral *sizeExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + sizeExpr->SetVal(astFile->GetContext()->getTypeSizeInChars(desugaredType).getQuantity()); + sizeExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + return sizeExpr; + } +} + +uint32_t ASTParser::GetAlignOfType(const clang::QualType currQualType, clang::UnaryExprOrTypeTrait exprKind) const { + clang::QualType qualType = currQualType; + clang::CharUnits alignInCharUnits = clang::CharUnits::Zero(); + if (const auto *ref = currQualType->getAs()) { + qualType = ref->getPointeeType(); + } + if (qualType.getQualifiers().hasUnaligned()) { + alignInCharUnits = clang::CharUnits::One(); + } + if (exprKind == clang::UETT_AlignOf) { + alignInCharUnits = astFile->GetContext()->getTypeAlignInChars(qualType.getTypePtr()); + } else if (exprKind == clang::UETT_PreferredAlignOf) { + alignInCharUnits = astFile->GetContext()->toCharUnitsFromBits( + astFile->GetContext()->getPreferredTypeAlign(qualType.getTypePtr())); + } else { + CHECK_FATAL(false, "NIY"); + } + return static_cast(alignInCharUnits.getQuantity()); +} + +uint32_t ASTParser::GetAlignOfExpr(const clang::Expr &expr, clang::UnaryExprOrTypeTrait exprKind) const { + clang::CharUnits alignInCharUnits = clang::CharUnits::Zero(); + const clang::Expr *exprNoParens = expr.IgnoreParens(); + if (const auto *declRefExpr = clang::dyn_cast(exprNoParens)) { + alignInCharUnits = astFile->GetContext()->getDeclAlign(declRefExpr->getDecl(), true); + } else if (const auto *memberExpr = clang::dyn_cast(exprNoParens)) { + alignInCharUnits = astFile->GetContext()->getDeclAlign(memberExpr->getMemberDecl(), true); + } else { + return GetAlignOfType(exprNoParens->getType(), exprKind); + } + return static_cast(alignInCharUnits.getQuantity()); +} + +ASTExpr *ASTParser::GetAddrShiftExpr(MapleAllocator &allocator, ASTExpr &expr, uint32 typeSize) { + MIRType *retType = nullptr; + if (IsSignedInteger(expr.GetType()->GetPrimType())) { + retType = GlobalTables::GetTypeTable().GetInt64(); + } else { + retType = GlobalTables::GetTypeTable().GetPtr(); + } + if (expr.GetASTOp() == kASTIntegerLiteral) { + auto intExpr = static_cast(&expr); + auto retExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + retExpr->SetVal(intExpr->GetVal() * typeSize); + retExpr->SetType(retType); + return retExpr; + } + auto ptrSizeExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ptrSizeExpr->SetVal(typeSize); + ptrSizeExpr->SetType(retType); + auto shiftExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + shiftExpr->SetLeftExpr(&expr); + shiftExpr->SetRightExpr(ptrSizeExpr); + shiftExpr->SetOpcode(OP_mul); + shiftExpr->SetRetType(retType); + shiftExpr->SetCvtNeeded(true); + shiftExpr->SetSrcLoc(expr.GetSrcLoc()); + return shiftExpr; +} + +ASTExpr *ASTParser::GetSizeMulExpr(MapleAllocator &allocator, ASTExpr &expr, ASTExpr &ptrSizeExpr) { + MIRType *retType = nullptr; + if (IsSignedInteger(expr.GetType()->GetPrimType())) { + retType = GlobalTables::GetTypeTable().GetInt64(); + } else { + retType = GlobalTables::GetTypeTable().GetPtr(); + } + auto shiftExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + shiftExpr->SetLeftExpr(&expr); + shiftExpr->SetRightExpr(&ptrSizeExpr); + shiftExpr->SetOpcode(OP_mul); + shiftExpr->SetRetType(retType); + shiftExpr->SetCvtNeeded(true); + shiftExpr->SetSrcLoc(expr.GetSrcLoc()); + return shiftExpr; +} + +ASTExpr *ASTParser::BuildExprToComputeSizeFromVLA(MapleAllocator &allocator, const clang::QualType &qualType) { + if (llvm::isa(qualType)) { + ASTExpr *lhs = BuildExprToComputeSizeFromVLA(allocator, llvm::cast(qualType)->getElementType()); + ASTExpr *rhs = nullptr; + CHECK_FATAL(llvm::isa(qualType), "the type must be array type"); + clang::Expr *sizeExpr = nullptr; + if (llvm::isa(qualType)) { + sizeExpr = llvm::cast(qualType)->getSizeExpr(); + if (sizeExpr == nullptr) { + return nullptr; + } + MapleMap::const_iterator iter = vlaSizeMap.find(sizeExpr); + if (iter != vlaSizeMap.cend()) { + return iter->second; + } + rhs = ProcessExpr(allocator, sizeExpr); + CHECK_FATAL(sizeExpr->getType()->isIntegerType(), "the type should be integer"); + } else if (llvm::isa(qualType)) { + uint32 size = static_cast(llvm::cast(qualType)->getSize().getSExtValue()); + if (size == 1) { + return lhs; + } + auto astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astExpr->SetVal(size); + astExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + rhs = astExpr; + } else { + CHECK_FATAL(false, "NIY"); + } + auto *astBOExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astBOExpr->SetRetType(GlobalTables::GetTypeTable().GetPrimType(PTY_u64)); + astBOExpr->SetOpcode(OP_mul); + astBOExpr->SetLeftExpr(lhs); + astBOExpr->SetRightExpr(rhs); + return astBOExpr; + } + uint32 size = GetSizeFromQualType(qualType); + auto integerExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + integerExpr->SetType(GlobalTables::GetTypeTable().GetUInt64()); + integerExpr->SetVal(size); + return integerExpr; +} + +ASTExpr *ASTParser::ProcessExprUnaryExprOrTypeTraitExpr(MapleAllocator &allocator, + const clang::UnaryExprOrTypeTraitExpr &expr) { + auto *astExprUnaryExprOrTypeTraitExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astExprUnaryExprOrTypeTraitExpr != nullptr, "astExprUnaryExprOrTypeTraitExpr is nullptr"); + switch (expr.getKind()) { + case clang::UETT_SizeOf: { + clang::QualType qualType = expr.isArgumentType() ? expr.getArgumentType().getCanonicalType() + : expr.getArgumentExpr()->getType().getCanonicalType(); + if (llvm::isa(qualType)) { + return BuildExprToComputeSizeFromVLA(allocator, qualType); + } + uint32 size = GetSizeFromQualType(qualType); + auto integerExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + integerExpr->SetType(GlobalTables::GetTypeTable().GetUInt64()); + integerExpr->SetVal(size); + return integerExpr; + } + case clang::UETT_PreferredAlignOf: + case clang::UETT_AlignOf: { + // C11 specification: ISO/IEC 9899:201x + uint32_t align; + if (expr.isArgumentType()) { + align = GetAlignOfType(expr.getArgumentType(), expr.getKind()); + } else { + align = GetAlignOfExpr(*expr.getArgumentExpr(), expr.getKind()); + } + auto integerExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + integerExpr->SetType(GlobalTables::GetTypeTable().GetUInt64()); + integerExpr->SetVal(align); + return integerExpr; + } + case clang::UETT_VecStep: + CHECK_FATAL(false, "NIY"); + break; + case clang::UETT_OpenMPRequiredSimdAlign: + CHECK_FATAL(false, "NIY"); + break; + } + return astExprUnaryExprOrTypeTraitExpr; +} + +ASTExpr *ASTParser::ProcessExprMemberExpr(MapleAllocator &allocator, const clang::MemberExpr &expr) { + auto *astMemberExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astMemberExpr != nullptr, "astMemberExpr is nullptr"); + ASTExpr *baseExpr = ProcessExpr(allocator, expr.getBase()); + if (baseExpr == nullptr) { + return nullptr; + } + astMemberExpr->SetBaseExpr(baseExpr); + astMemberExpr->SetBaseType(astFile->CvtType(expr.getBase()->getType())); + auto memberName = expr.getMemberDecl()->getNameAsString(); + if (memberName.empty()) { + memberName = astFile->GetOrCreateMappedUnnamedName(*expr.getMemberDecl()); + } + astMemberExpr->SetMemberName(memberName); + astMemberExpr->SetMemberType(astFile->CvtType(expr.getMemberDecl()->getType())); + astMemberExpr->SetIsArrow(expr.isArrow()); + uint64_t offsetBits = astFile->GetContext()->getFieldOffset(expr.getMemberDecl()); + astMemberExpr->SetFiledOffsetBits(offsetBits); + return astMemberExpr; +} + +ASTExpr *ASTParser::ProcessExprDesignatedInitUpdateExpr(MapleAllocator &allocator, + const clang::DesignatedInitUpdateExpr &expr) { + auto *astDesignatedInitUpdateExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astDesignatedInitUpdateExpr != nullptr, "astDesignatedInitUpdateExpr is nullptr"); + ASTExpr *baseExpr = ProcessExpr(allocator, expr.getBase()); + if (baseExpr == nullptr) { + return nullptr; + } + astDesignatedInitUpdateExpr->SetBaseExpr(baseExpr); + clang::InitListExpr *initListExpr = expr.getUpdater(); + MIRType *initListType = astFile->CvtType(expr.getType()); + astDesignatedInitUpdateExpr->SetInitListType(initListType); + ASTExpr *updaterExpr = ProcessExpr(allocator, initListExpr); + if (updaterExpr == nullptr) { + return nullptr; + } + astDesignatedInitUpdateExpr->SetUpdaterExpr(updaterExpr); + return astDesignatedInitUpdateExpr; +} + +ASTExpr *ASTParser::ProcessExprStmtExpr(MapleAllocator &allocator, const clang::StmtExpr &expr) { + ASTExprStmtExpr *astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTStmt *compoundStmt = ProcessStmt(allocator, *expr.getSubStmt()); + astExpr->SetCompoundStmt(compoundStmt); + return astExpr; +} + +ASTExpr *ASTParser::ProcessExprConditionalOperator(MapleAllocator &allocator, const clang::ConditionalOperator &expr) { + ASTConditionalOperator *astConditionalOperator = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASSERT(astConditionalOperator != nullptr, "astConditionalOperator is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, expr.getCond()); + if (astExpr == nullptr) { + return nullptr; + } + astConditionalOperator->SetCondExpr(astExpr); + ASTExpr *astTrueExpr = ProcessExpr(allocator, expr.getTrueExpr()); + if (astTrueExpr == nullptr) { + return nullptr; + } + astConditionalOperator->SetTrueExpr(astTrueExpr); + ASTExpr *astFalseExpr = ProcessExpr(allocator, expr.getFalseExpr()); + if (astFalseExpr == nullptr) { + return nullptr; + } + astConditionalOperator->SetFalseExpr(astFalseExpr); + astConditionalOperator->SetType(astFile->CvtType(expr.getType())); + return astConditionalOperator; +} + +ASTExpr *ASTParser::ProcessExprCompoundAssignOperator(MapleAllocator &allocator, + const clang::CompoundAssignOperator &expr) { + return ProcessExprBinaryOperator(allocator, expr); +} + +ASTExpr *ASTParser::ProcessExprSizeOfPackExpr(MapleAllocator &allocator, const clang::SizeOfPackExpr &expr) { + // CXX feature + (void)allocator; + (void)expr; + return nullptr; +} + +ASTExpr *ASTParser::ProcessExprUserDefinedLiteral(MapleAllocator &allocator, const clang::UserDefinedLiteral &expr) { + // CXX feature + (void)allocator; + (void)expr; + return nullptr; +} + +ASTExpr *ASTParser::ProcessExprTypeTraitExpr(MapleAllocator &allocator, const clang::TypeTraitExpr &expr) { + ASTIntegerLiteral *astIntegerLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + if (expr.getValue()) { + astIntegerLiteral->SetVal(1); + } else { + astIntegerLiteral->SetVal(0); + } + astIntegerLiteral->SetType(astFile->CvtType(expr.getType())); + return astIntegerLiteral; +} + +ASTExpr *ASTParser::ProcessExprShuffleVectorExpr(MapleAllocator &allocator, const clang::ShuffleVectorExpr &expr) { + (void)allocator; + (void)expr; + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +ASTExpr *ASTParser::ProcessExprGNUNullExpr(MapleAllocator &allocator, const clang::GNUNullExpr &expr) { + (void)allocator; + (void)expr; + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +ASTExpr *ASTParser::ProcessExprConstantExpr(MapleAllocator &allocator, const clang::ConstantExpr &expr) { + ASTConstantExpr *astConstantExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASSERT(astConstantExpr != nullptr, "astConstantExpr is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, expr.getSubExpr()); + if (astExpr == nullptr) { + return nullptr; + } + astConstantExpr->SetASTExpr(astExpr); + return astConstantExpr; +} + +ASTExpr *ASTParser::ProcessExprImaginaryLiteral(MapleAllocator &allocator, const clang::ImaginaryLiteral &expr) { + clang::QualType complexQualType = expr.getType().getCanonicalType(); + MIRType *complexType = astFile->CvtType(complexQualType); + CHECK_NULL_FATAL(complexType); + clang::QualType elemQualType = llvm::cast(complexQualType)->getElementType(); + MIRType *elemType = astFile->CvtType(elemQualType); + CHECK_NULL_FATAL(elemType); + ASTImaginaryLiteral *astImaginaryLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astImaginaryLiteral->SetComplexType(complexType); + astImaginaryLiteral->SetElemType(elemType); + ASTExpr *astExpr = ProcessExpr(allocator, expr.getSubExpr()); + if (astExpr == nullptr) { + return nullptr; + } + astImaginaryLiteral->SetASTExpr(astExpr); + return astImaginaryLiteral; +} + +std::map ASTParser::builtingFuncPtrMap = + ASTParser::InitBuiltinFuncPtrMap(); + +ASTExpr *ASTParser::ProcessExprCallExpr(MapleAllocator &allocator, const clang::CallExpr &expr) { + ASTCallExpr *astCallExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASSERT(astCallExpr != nullptr, "astCallExpr is nullptr"); + // callee + ASTExpr *astCallee = ProcessExpr(allocator, expr.getCallee()); + if (astCallee == nullptr) { + return nullptr; + } + astCallExpr->SetCalleeExpr(astCallee); + // return + astCallExpr->SetType(astFile->CvtType(expr.getType())); + // return var attrs + GenericAttrs returnVarAttrs; + astFile->CollectFuncReturnVarAttrs(expr, returnVarAttrs); + astCallExpr->SetReturnVarAttrs(returnVarAttrs); + // args + MapleVector args(allocator.Adapter()); + for (uint32_t i = 0; i < expr.getNumArgs(); ++i) { + const clang::Expr *subExpr = expr.getArg(i); + ASTExpr *arg = ProcessExpr(allocator, subExpr); + arg->SetType(astFile->CvtType(subExpr->getType())); + args.push_back(arg); + } + astCallExpr->SetArgs(args); + // Obtain the function name directly + const clang::FunctionDecl *funcDecl = expr.getDirectCallee(); + if (funcDecl != nullptr) { + std::string funcName = astFile->GetMangledName(*funcDecl); + funcName = astCallExpr->CvtBuiltInFuncName(funcName); + if (!ASTUtil::IsValidName(funcName)) { + ASTUtil::AdjustName(funcName); + } + + if (builtingFuncPtrMap.find(funcName) != builtingFuncPtrMap.end()) { + static std::stringstream ss; + ss.clear(); + ss.str(std::string()); + ss << funcName; + ASTExpr *builtinFuncExpr = ParseBuiltinFunc(allocator, expr, ss); + if (builtinFuncExpr != nullptr) { + return builtinFuncExpr; + } + funcName = ss.str(); + } + + GenericAttrs attrs = SolveFunctionAttributes(*funcDecl, funcName); + astCallExpr->SetFuncName(funcName); + astCallExpr->SetFuncAttrs(attrs.ConvertToFuncAttrs()); + ASTFunc *astFunc = static_cast(ProcessDecl(allocator, *funcDecl)); + if (astFunc != nullptr) { + astCallExpr->SetFuncDecl(astFunc); + } + } else { + astCallExpr->SetIcall(true); + } + return astCallExpr; +} + +ASTExpr *ASTParser::ProcessExprParenExpr(MapleAllocator &allocator, const clang::ParenExpr &expr) { + ASTParenExpr *astParenExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASSERT(astParenExpr != nullptr, "astParenExpr is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, expr.getSubExpr()); + if (astExpr == nullptr) { + return nullptr; + } + astParenExpr->SetEvaluatedFlag(astExpr->GetEvaluatedFlag()); + astParenExpr->SetASTExpr(astExpr); + return astParenExpr; +} + +ASTExpr *ASTParser::ProcessExprCharacterLiteral(MapleAllocator &allocator, const clang::CharacterLiteral &expr) { + ASTCharacterLiteral *astCharacterLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASSERT(astCharacterLiteral != nullptr, "astCharacterLiteral is nullptr"); + const clang::QualType qualType = expr.getType(); + const auto *type = llvm::cast(qualType.getTypePtr()); + clang::BuiltinType::Kind kind = type->getKind(); + if (qualType->isPromotableIntegerType()) { + kind = clang::BuiltinType::Int; + } + PrimType primType = PTY_i32; + switch (kind) { + case clang::BuiltinType::UInt: + primType = PTY_u32; + break; + case clang::BuiltinType::Int: + primType = PTY_i32; + break; + case clang::BuiltinType::ULong: + case clang::BuiltinType::ULongLong: + primType = PTY_u64; + break; + case clang::BuiltinType::Long: + case clang::BuiltinType::LongLong: + primType = PTY_i64; + break; + default: + break; + } + astCharacterLiteral->SetVal(expr.getValue()); + astCharacterLiteral->SetPrimType(primType); + return astCharacterLiteral; +} + +ASTExpr *ASTParser::ProcessExprIntegerLiteral(MapleAllocator &allocator, const clang::IntegerLiteral &expr) { + ASTIntegerLiteral *astIntegerLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASSERT(astIntegerLiteral != nullptr, "astIntegerLiteral is nullptr"); + MIRType *type = nullptr; + llvm::APInt api = expr.getValue(); + int64 val = api.getSExtValue(); + if (api.getBitWidth() > kInt32Width) { + type = expr.getType()->isSignedIntegerOrEnumerationType() ? + GlobalTables::GetTypeTable().GetInt64() : GlobalTables::GetTypeTable().GetUInt64(); + } else { + type = expr.getType()->isSignedIntegerOrEnumerationType() ? + GlobalTables::GetTypeTable().GetInt32() : GlobalTables::GetTypeTable().GetUInt32(); + } + astIntegerLiteral->SetVal(val); + astIntegerLiteral->SetType(type); + return astIntegerLiteral; +} + +ASTExpr *ASTParser::ProcessExprFloatingLiteral(MapleAllocator &allocator, const clang::FloatingLiteral &expr) { + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASSERT(astFloatingLiteral != nullptr, "astFloatingLiteral is nullptr"); + llvm::APFloat apf = expr.getValue(); + const llvm::fltSemantics &fltSem = expr.getSemantics(); + double val = 0; + if (&fltSem == &llvm::APFloat::IEEEdouble()) { + val = static_cast(apf.convertToDouble()); + astFloatingLiteral->SetKind(FloatKind::F64); + astFloatingLiteral->SetVal(val); + } else if (&fltSem == &llvm::APFloat::IEEEsingle()) { + val = static_cast(apf.convertToFloat()); + astFloatingLiteral->SetKind(FloatKind::F32); + astFloatingLiteral->SetVal(val); + } else if (&fltSem == &llvm::APFloat::IEEEquad() || &fltSem == &llvm::APFloat::x87DoubleExtended()) { + bool losesInfo; + (void)apf.convert(llvm::APFloat::IEEEdouble(), + llvm::APFloatBase::rmNearestTiesToAway, + &losesInfo); + val = static_cast(apf.convertToDouble()); + astFloatingLiteral->SetKind(FloatKind::F64); + astFloatingLiteral->SetVal(val); + } else { + CHECK_FATAL(false, "unsupported floating literal"); + } + return astFloatingLiteral; +} + +ASTExpr *ASTParser::ProcessExprCastExpr(MapleAllocator &allocator, const clang::CastExpr &expr) { + ASTCastExpr *astCastExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astCastExpr != nullptr, "astCastExpr is nullptr"); + MIRType *srcType = astFile->CvtType(expr.getSubExpr()->getType()); + MIRType *toType = astFile->CvtType(expr.getType()); + astCastExpr->SetSrcType(srcType); + astCastExpr->SetDstType(toType); + + switch (expr.getCastKind()) { + case clang::CK_NoOp: + case clang::CK_ToVoid: + break; + case clang::CK_FunctionToPointerDecay: + astCastExpr->SetIsFunctionToPointerDecay(true); + break; + case clang::CK_LValueToRValue: + astCastExpr->SetRValue(true); + break; + case clang::CK_BitCast: + astCastExpr->SetBitCast(true); + break; + case clang::CK_ArrayToPointerDecay: + if (!(expr.getSubExpr()->getType()->isVariableArrayType() && + expr.getSubExpr()->getStmtClass() == clang::Stmt::DeclRefExprClass)) { + astCastExpr->SetIsArrayToPointerDecay(true); // vla as a pointer is not need to be addrof + } + break; + case clang::CK_BuiltinFnToFnPtr: + astCastExpr->SetBuilinFunc(true); + break; + case clang::CK_VectorSplat: + astCastExpr->SetVectorSplat(true); + CHECK_FATAL(expr.getType()->isVectorType(), "dst type must be vector type in VectorSplat"); + break; + case clang::CK_NullToPointer: + case clang::CK_IntegralToPointer: + case clang::CK_FloatingToIntegral: + case clang::CK_IntegralToFloating: + case clang::CK_FloatingCast: + case clang::CK_IntegralCast: + case clang::CK_IntegralToBoolean: + case clang::CK_PointerToBoolean: + case clang::CK_FloatingToBoolean: + case clang::CK_PointerToIntegral: + astCastExpr->SetNeededCvt(true); + break; + case clang::CK_ToUnion: + astCastExpr->SetUnionCast(true); + break; + case clang::CK_IntegralRealToComplex: + case clang::CK_FloatingRealToComplex: + case clang::CK_IntegralComplexCast: + case clang::CK_FloatingComplexCast: + case clang::CK_IntegralComplexToFloatingComplex: + case clang::CK_FloatingComplexToIntegralComplex: + case clang::CK_FloatingComplexToReal: + case clang::CK_IntegralComplexToReal: + case clang::CK_FloatingComplexToBoolean: + case clang::CK_IntegralComplexToBoolean: { + clang::QualType qualType = expr.getType().getCanonicalType(); + astCastExpr->SetComplexType(astFile->CvtType(qualType)); + clang::QualType dstQualType = llvm::cast(qualType)->getElementType(); + astCastExpr->SetDstType(astFile->CvtType(dstQualType)); + astCastExpr->SetNeededCvt(true); + if (expr.getCastKind() == clang::CK_IntegralRealToComplex || + expr.getCastKind() == clang::CK_FloatingRealToComplex) { + astCastExpr->SetComplexCastKind(true); + astCastExpr->SetSrcType(astFile->CvtType(expr.getSubExpr()->getType().getCanonicalType())); + } else { + clang::QualType subQualType = expr.getSubExpr()->getType().getCanonicalType(); + clang::QualType srcQualType = llvm::cast(subQualType)->getElementType(); + astCastExpr->SetSrcType(astFile->CvtType(srcQualType)); + } + break; + } + default: + CHECK_FATAL(false, "NIY"); + return nullptr; + } + ASTExpr *astExpr = ProcessExpr(allocator, expr.getSubExpr()); + if (astExpr == nullptr) { + return nullptr; + } + astExpr->SetRValue(astCastExpr->IsRValue()); + astCastExpr->SetEvaluatedFlag(astExpr->GetEvaluatedFlag()); + astCastExpr->SetASTExpr(astExpr); + return astCastExpr; +} + +ASTExpr *ASTParser::ProcessExprImplicitCastExpr(MapleAllocator &allocator, const clang::ImplicitCastExpr &expr) { + return ProcessExprCastExpr(allocator, expr); +} + +ASTExpr *ASTParser::ProcessExprDeclRefExpr(MapleAllocator &allocator, const clang::DeclRefExpr &expr) { + ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astRefExpr != nullptr, "astRefExpr is nullptr"); + if (auto enumConst = llvm::dyn_cast(expr.getDecl())) { + const llvm::APSInt value = enumConst->getInitVal(); + ASTIntegerLiteral *astIntegerLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astIntegerLiteral->SetVal(value.getExtValue()); + astIntegerLiteral->SetType(astFile->CvtType(expr.getType())); + return astIntegerLiteral; + } + switch (expr.getStmtClass()) { + case clang::Stmt::DeclRefExprClass: { + ASTDecl *astDecl = ASTDeclsBuilder::GetASTDecl(expr.getDecl()->getCanonicalDecl()->getID()); + if (astDecl == nullptr) { + astDecl = ProcessDecl(allocator, *(expr.getDecl()->getCanonicalDecl())); + } + astRefExpr->SetASTDecl(astDecl); + astRefExpr->SetType(astDecl->GetTypeDesc().front()); + return astRefExpr; + } + default: + CHECK_FATAL(false, "NIY"); + return nullptr; + } +} + +ASTExpr *ASTParser::ProcessExprBinaryOperatorComplex(MapleAllocator &allocator, const clang::BinaryOperator &bo) { + ASTBinaryOperatorExpr *astBinOpExpr = AllocBinaryOperatorExpr(allocator, bo); + CHECK_FATAL(astBinOpExpr != nullptr, "astBinOpExpr is nullptr"); + clang::QualType qualType = bo.getType(); + astBinOpExpr->SetRetType(astFile->CvtType(qualType)); + ASTExpr *astRExpr = ProcessExpr(allocator, bo.getRHS()); + ASTExpr *astLExpr = ProcessExpr(allocator, bo.getLHS()); + clang::QualType elementType = llvm::cast( + bo.getLHS()->getType().getCanonicalType())->getElementType(); + MIRType *elementMirType = astFile->CvtType(elementType); + astBinOpExpr->SetComplexElementType(elementMirType); + auto *leftImage = ASTDeclsBuilder::ASTExprBuilder(allocator); + leftImage->SetUOExpr(astLExpr); + leftImage->SetElementType(elementMirType); + astBinOpExpr->SetComplexLeftImagExpr(leftImage); + auto *leftReal = ASTDeclsBuilder::ASTExprBuilder(allocator); + leftReal->SetUOExpr(astLExpr); + leftReal->SetElementType(elementMirType); + astBinOpExpr->SetComplexLeftRealExpr(leftReal); + auto *rightImage = ASTDeclsBuilder::ASTExprBuilder(allocator); + rightImage->SetUOExpr(astRExpr); + rightImage->SetElementType(elementMirType); + astBinOpExpr->SetComplexRightImagExpr(rightImage); + auto *rightReal = ASTDeclsBuilder::ASTExprBuilder(allocator); + rightReal->SetUOExpr(astRExpr); + rightReal->SetElementType(elementMirType); + astBinOpExpr->SetComplexRightRealExpr(rightReal); + return astBinOpExpr; +} + +ASTExpr *ASTParser::SolvePointerOffsetOperation(MapleAllocator &allocator, const clang::BinaryOperator &bo, + ASTBinaryOperatorExpr &astBinOpExpr, ASTExpr &astRExpr, + ASTExpr &astLExpr) { + auto boType = bo.getType().getCanonicalType(); + auto lhsType = bo.getLHS()->getType().getCanonicalType(); + auto rhsType = bo.getRHS()->getType().getCanonicalType(); + auto boMirType = astFile->CvtType(boType); + auto ptrType = lhsType->isPointerType() ? lhsType : rhsType; + auto astSizeExpr = lhsType->isPointerType() ? &astRExpr : &astLExpr; + if (ptrType->getPointeeType()->isVariableArrayType()) { + ASTExpr *vlaTypeSizeExpr = BuildExprToComputeSizeFromVLA(allocator, ptrType->getPointeeType()); + astSizeExpr = GetSizeMulExpr(allocator, *astSizeExpr, *vlaTypeSizeExpr); + } else { + auto typeSize = GetSizeFromQualType(boType->getPointeeType()); + MIRType *pointedType = GlobalTables::GetTypeTable().GetTypeFromTyIdx( + static_cast(boMirType)->GetPointedTyIdx()); + if (pointedType->GetPrimType() == PTY_f64) { + typeSize = 8; // 8 is f64 byte num, because now f128 also cvt to f64 + } + astSizeExpr = GetAddrShiftExpr(allocator, *astSizeExpr, typeSize); + } + astBinOpExpr.SetCvtNeeded(false); // the type cannot be cvt. + return astSizeExpr; +} + +ASTExpr *ASTParser::SolvePointerSubPointerOperation(MapleAllocator &allocator, const clang::BinaryOperator &bo, + ASTBinaryOperatorExpr &astBinOpExpr) const { + auto rhsType = bo.getRHS()->getType().getCanonicalType(); + auto ptrSizeExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ptrSizeExpr->SetType(astBinOpExpr.GetRetType()); + ptrSizeExpr->SetVal(GetSizeFromQualType(rhsType->getPointeeType())); + auto retASTExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + retASTExpr->SetLeftExpr(&astBinOpExpr); + retASTExpr->SetRightExpr(ptrSizeExpr); + retASTExpr->SetOpcode(OP_div); + retASTExpr->SetRetType(astBinOpExpr.GetRetType()); + return retASTExpr; +} + +ASTExpr *ASTParser::ProcessExprBinaryOperator(MapleAllocator &allocator, const clang::BinaryOperator &bo) { + ASTBinaryOperatorExpr *astBinOpExpr = AllocBinaryOperatorExpr(allocator, bo); + CHECK_FATAL(astBinOpExpr != nullptr, "astBinOpExpr is nullptr"); + auto boType = bo.getType().getCanonicalType(); + auto lhsType = bo.getLHS()->getType().getCanonicalType(); + auto rhsType = bo.getRHS()->getType().getCanonicalType(); + auto leftMirType = astFile->CvtType(lhsType); + auto rightMirType = astFile->CvtType(rhsType); + auto clangOpCode = bo.getOpcode(); + astBinOpExpr->SetRetType(astFile->CvtType(boType)); + if (bo.isCompoundAssignmentOp()) { + clangOpCode = clang::BinaryOperator::getOpForCompoundAssignment(clangOpCode); + clang::QualType res = llvm::cast(bo).getComputationLHSType().getCanonicalType(); + astBinOpExpr->SetRetType(astFile->CvtType(res)); + } + if ((boType->isAnyComplexType() && + (clang::BinaryOperator::isAdditiveOp(clangOpCode) || clang::BinaryOperator::isMultiplicativeOp(clangOpCode))) || + (clang::BinaryOperator::isEqualityOp(clangOpCode) && lhsType->isAnyComplexType() && + rhsType->isAnyComplexType())) { + return ProcessExprBinaryOperatorComplex(allocator, bo); + } + ASTExpr *astRExpr = ProcessExpr(allocator, bo.getRHS()); + ASTExpr *astLExpr = ProcessExpr(allocator, bo.getLHS()); + ASSERT_NOT_NULL(astRExpr); + ASSERT_NOT_NULL(astLExpr); + if (clangOpCode == clang::BO_Div || clangOpCode == clang::BO_Mul || + clangOpCode == clang::BO_DivAssign || clangOpCode == clang::BO_MulAssign) { + if (astBinOpExpr->GetRetType()->GetPrimType() == PTY_u16 || astBinOpExpr->GetRetType()->GetPrimType() == PTY_u8) { + astBinOpExpr->SetRetType(GlobalTables::GetTypeTable().GetPrimType(PTY_u32)); + } + if (astBinOpExpr->GetRetType()->GetPrimType() == PTY_i16 || astBinOpExpr->GetRetType()->GetPrimType() == PTY_i8) { + astBinOpExpr->SetRetType(GlobalTables::GetTypeTable().GetPrimType(PTY_i32)); + } + } + if ((leftMirType->GetPrimType() != astBinOpExpr->GetRetType()->GetPrimType() || + rightMirType->GetPrimType() != astBinOpExpr->GetRetType()->GetPrimType()) && + (clang::BinaryOperator::isAdditiveOp(clangOpCode) || clang::BinaryOperator::isMultiplicativeOp(clangOpCode))) { + astBinOpExpr->SetCvtNeeded(true); + } + // ptr +/- + if (boType->isPointerType() && clang::BinaryOperator::isAdditiveOp(clangOpCode) && + ((lhsType->isPointerType() && rhsType->isIntegerType()) || + (lhsType->isIntegerType() && rhsType->isPointerType())) && + !boType->isVoidPointerType() && GetSizeFromQualType(boType->getPointeeType()) != 1) { + ASTExpr *astSizeExpr = SolvePointerOffsetOperation(allocator, bo, *astBinOpExpr, *astRExpr, *astLExpr); + if (lhsType->isPointerType()) { + astRExpr = astSizeExpr; + } else { + astLExpr = astSizeExpr; + } + } + astBinOpExpr->SetLeftExpr(astLExpr); + astBinOpExpr->SetRightExpr(astRExpr); + // ptr - ptr + if (clangOpCode == clang::BO_Sub && rhsType->isPointerType() && + lhsType->isPointerType() && !rhsType->isVoidPointerType() && + GetSizeFromQualType(rhsType->getPointeeType()) != 1) { + astBinOpExpr = static_cast(SolvePointerSubPointerOperation(allocator, bo, *astBinOpExpr)); + } + if (bo.isCompoundAssignmentOp()) { + auto assignExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + assignExpr->SetLeftExpr(astLExpr); + assignExpr->SetRightExpr(astBinOpExpr); + assignExpr->SetRetType(astBinOpExpr->GetRetType()); + assignExpr->SetIsCompoundAssign(true); + return assignExpr; + } + return astBinOpExpr; +} + +ASTDecl *ASTParser::GetAstDeclOfDeclRefExpr(MapleAllocator &allocator, const clang::Expr &expr) { + switch (expr.getStmtClass()) { + case clang::Stmt::DeclRefExprClass: + return static_cast(ProcessExpr(allocator, &expr))->GetASTDecl(); + case clang::Stmt::ImplicitCastExprClass: + case clang::Stmt::CXXStaticCastExprClass: + case clang::Stmt::CXXReinterpretCastExprClass: + case clang::Stmt::CStyleCastExprClass: + return GetAstDeclOfDeclRefExpr(allocator, *llvm::cast(expr).getSubExpr()); + case clang::Stmt::ParenExprClass: + return GetAstDeclOfDeclRefExpr(allocator, *llvm::cast(expr).getSubExpr()); + case clang::Stmt::UnaryOperatorClass: + return GetAstDeclOfDeclRefExpr(allocator, *llvm::cast(expr).getSubExpr()); + case clang::Stmt::ConstantExprClass: + return GetAstDeclOfDeclRefExpr(allocator, *llvm::cast(expr).getSubExpr()); + default: + break; + } + return nullptr; +} + +ASTExpr *ASTParser::ProcessExprCStyleCastExpr(MapleAllocator &allocator, const clang::CStyleCastExpr &castExpr) { + return ProcessExprCastExpr(allocator, castExpr); +} + +ASTExpr *ASTParser::ProcessExprArrayInitLoopExpr(MapleAllocator &allocator, + const clang::ArrayInitLoopExpr &arrInitLoopExpr) { + ASTArrayInitLoopExpr *astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astExpr != nullptr, "astCastExpr is nullptr"); + ASTExpr *common = arrInitLoopExpr.getCommonExpr() == nullptr ? nullptr : + ProcessExpr(allocator, arrInitLoopExpr.getCommonExpr()); + astExpr->SetCommonExpr(common); + return astExpr; +} + +ASTExpr *ASTParser::ProcessExprArrayInitIndexExpr(MapleAllocator &allocator, + const clang::ArrayInitIndexExpr &arrInitIndexExpr) { + ASTArrayInitIndexExpr *astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astExpr != nullptr, "astCastExpr is nullptr"); + astExpr->SetPrimType(astFile->CvtType(arrInitIndexExpr.getType())); + astExpr->SetValueStr("0"); + return astExpr; +} + +ASTExpr *ASTParser::ProcessExprAtomicExpr(MapleAllocator &allocator, + const clang::AtomicExpr &atomicExpr) { + ASTAtomicExpr *astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astExpr != nullptr, "astCastExpr is nullptr"); + astExpr->SetObjExpr(ProcessExpr(allocator, atomicExpr.getPtr())); + astExpr->SetType(astFile->CvtType(atomicExpr.getPtr()->getType())); + astExpr->SetRefType(astFile->CvtType(atomicExpr.getPtr()->getType()->getPointeeType())); + if (atomicExpr.getOp() != clang::AtomicExpr::AO__atomic_load_n) { + astExpr->SetValExpr1(ProcessExpr(allocator, atomicExpr.getVal1())); + astExpr->SetVal1Type(astFile->CvtType(atomicExpr.getVal1()->getType())); + } + if (atomicExpr.getOp() == clang::AtomicExpr::AO__atomic_exchange) { + astExpr->SetValExpr2(ProcessExpr(allocator, atomicExpr.getVal2())); + astExpr->SetVal2Type(astFile->CvtType(atomicExpr.getVal2()->getType())); + } + astExpr->SetOrderExpr(ProcessExpr(allocator, atomicExpr.getOrder())); + + static std::unordered_map astOpMap = { + {clang::AtomicExpr::AO__atomic_load_n, kAtomicOpLoadN}, + {clang::AtomicExpr::AO__atomic_load, kAtomicOpLoad}, + {clang::AtomicExpr::AO__atomic_store_n, kAtomicOpStoreN}, + {clang::AtomicExpr::AO__atomic_store, kAtomicOpStore}, + {clang::AtomicExpr::AO__atomic_exchange, kAtomicOpExchange}, + {clang::AtomicExpr::AO__atomic_exchange_n, kAtomicOpExchangeN}, + {clang::AtomicExpr::AO__atomic_add_fetch, kAtomicOpAddFetch}, + {clang::AtomicExpr::AO__atomic_sub_fetch, kAtomicOpSubFetch}, + {clang::AtomicExpr::AO__atomic_and_fetch, kAtomicOpAndFetch}, + {clang::AtomicExpr::AO__atomic_xor_fetch, kAtomicOpXorFetch}, + {clang::AtomicExpr::AO__atomic_or_fetch, kAtomicOpOrFetch}, + {clang::AtomicExpr::AO__atomic_fetch_add, kAtomicOpFetchAdd}, + {clang::AtomicExpr::AO__atomic_fetch_sub, kAtomicOpFetchSub}, + {clang::AtomicExpr::AO__atomic_fetch_and, kAtomicOpFetchAnd}, + {clang::AtomicExpr::AO__atomic_fetch_xor, kAtomicOpFetchXor}, + {clang::AtomicExpr::AO__atomic_fetch_or, kAtomicOpFetchOr}, + }; + ASSERT(astOpMap.find(atomicExpr.getOp()) != astOpMap.end(), "%s:%d error: atomic expr op not supported!", + FEManager::GetModule().GetFileNameFromFileNum(astFile->GetLOC(atomicExpr.getBuiltinLoc()).fileIdx).c_str(), + astFile->GetLOC(atomicExpr.getBuiltinLoc()).line); + astExpr->SetAtomicOp(astOpMap[atomicExpr.getOp()]); + return astExpr; +} + +ASTExpr *ASTParser::ProcessExprExprWithCleanups(MapleAllocator &allocator, + const clang::ExprWithCleanups &cleanupsExpr) { + ASTExprWithCleanups *astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astExpr != nullptr, "astCastExpr is nullptr"); + ASTExpr *sub = cleanupsExpr.getSubExpr() == nullptr ? nullptr : ProcessExpr(allocator, cleanupsExpr.getSubExpr()); + astExpr->SetSubExpr(sub); + return astExpr; +} + +ASTExpr *ASTParser::ProcessExprMaterializeTemporaryExpr(MapleAllocator &allocator, + const clang::MaterializeTemporaryExpr &matTempExpr) { + // cxx feature + (void)allocator; + (void)matTempExpr; + return nullptr; +} + +ASTExpr *ASTParser::ProcessExprSubstNonTypeTemplateParmExpr(MapleAllocator &allocator, + const clang::SubstNonTypeTemplateParmExpr &subTempExpr) { + // cxx feature + (void)allocator; + (void)subTempExpr; + return nullptr; +} + +ASTExpr *ASTParser::ProcessExprDependentScopeDeclRefExpr(MapleAllocator &allocator, + const clang::DependentScopeDeclRefExpr &depScopeExpr) { + // cxx feature + (void)allocator; + (void)depScopeExpr; + return nullptr; +} + +ASTExpr *ASTParser::ProcessExprChooseExpr(MapleAllocator &allocator, const clang::ChooseExpr &chs) { + return ProcessExpr(allocator, chs.getChosenSubExpr()); +} + +ASTExpr *ASTParser::ProcessExprGenericSelectionExpr(MapleAllocator &allocator, const clang::GenericSelectionExpr &gse) { + return ProcessExpr(allocator, gse.getResultExpr()); +} + +bool ASTParser::PreProcessAST() { + TraverseDecl(astUnitDecl, [this](clang::Decl *child) { + ASSERT_NOT_NULL(child); + switch (child->getKind()) { + case clang::Decl::Var: { + globalVarDecles.emplace_back(child); + break; + } + case clang::Decl::Function: { + funcDecles.emplace_back(child); + break; + } + case clang::Decl::Record: { + recordDecles.emplace_back(child); + break; + } + case clang::Decl::Typedef: + globalTypeDefDecles.emplace_back(child); + break; + case clang::Decl::Enum: + globalEnumDecles.emplace_back(child); + break; + case clang::Decl::FileScopeAsm: + globalFileScopeAsm.emplace_back(child); + break; + case clang::Decl::Empty: + case clang::Decl::StaticAssert: + break; + default: { + WARN(kLncWarn, "Unsupported decl kind: %u", child->getKind()); + } + } + }); + return true; +} + +#define DECL_CASE(CLASS) \ + case clang::Decl::CLASS: { \ + ASTDecl *astDeclaration = ProcessDecl##CLASS##Decl(allocator, llvm::cast(decl)); \ + if (astDeclaration != nullptr) { \ + astDeclaration->SetGlobal(decl.isDefinedOutsideFunctionOrMethod()); \ + if (astDeclaration->GetSrcFileIdx() == 0) { \ + Loc loc = astFile->GetLOC(decl.getLocation()); \ + astDeclaration->SetSrcLoc(loc); \ + } \ + } \ + return astDeclaration; \ + } +ASTDecl *ASTParser::ProcessDecl(MapleAllocator &allocator, const clang::Decl &decl) { + ASTDecl *astDecl = ASTDeclsBuilder::GetASTDecl(decl.getID()); + if (astDecl != nullptr) { + return astDecl; + } + switch (decl.getKind()) { + DECL_CASE(Function); + DECL_CASE(Field); + DECL_CASE(Record); + DECL_CASE(Var); + DECL_CASE(ParmVar); + DECL_CASE(Enum); + DECL_CASE(Typedef); + DECL_CASE(EnumConstant); + DECL_CASE(Label); + DECL_CASE(StaticAssert); + DECL_CASE(FileScopeAsm); + default: + CHECK_FATAL(false, "ASTDecl: %s NIY", decl.getDeclKindName()); + return nullptr; + } +} + +ASTDecl *ASTParser::ProcessDeclStaticAssertDecl(MapleAllocator &allocator, const clang::StaticAssertDecl &assertDecl) { + (void)allocator; + (void)assertDecl; + return nullptr; +} + +ASTDecl *ASTParser::ProcessDeclRecordDecl(MapleAllocator &allocator, const clang::RecordDecl &recDecl) { + ASTStruct *curStructOrUnion = static_cast(ASTDeclsBuilder::GetASTDecl(recDecl.getID())); + if (curStructOrUnion != nullptr) { + return curStructOrUnion; + } + std::stringstream recName; + clang::QualType qType = recDecl.getTypeForDecl()->getCanonicalTypeInternal(); + astFile->EmitTypeName(*qType->getAs(), recName); + MIRType *recType = astFile->CvtType(qType); + if (recType == nullptr) { + return nullptr; + } + GenericAttrs attrs; + astFile->CollectRecordAttrs(recDecl, attrs); + std::string structName = recName.str(); + curStructOrUnion = ASTDeclsBuilder::ASTStructBuilder(allocator, + fileName, + structName, + MapleVector({recType}, allocator.Adapter()), + attrs, + recDecl.getID()); + if (recDecl.isUnion()) { + curStructOrUnion->SetIsUnion(); + } + const auto *declContext = llvm::dyn_cast(&recDecl); + if (declContext == nullptr) { + return nullptr; + } + for (auto *loadDecl : declContext->decls()) { + if (loadDecl == nullptr) { + return nullptr; + } + auto *fieldDecl = llvm::dyn_cast(loadDecl); + if (llvm::isa(loadDecl)) { + clang::RecordDecl *subRecordDecl = llvm::cast(loadDecl->getCanonicalDecl()); + ASTStruct *sub = static_cast(ProcessDecl(allocator, *subRecordDecl)); + if (sub == nullptr) { + return nullptr; + } + } + + if (llvm::isa(loadDecl)) { + ASTField *af = static_cast(ProcessDecl(allocator, *fieldDecl)); + if (af == nullptr) { + return nullptr; + } + curStructOrUnion->SetField(af); + } + } + if (!recDecl.isDefinedOutsideFunctionOrMethod()) { + // Record function scope type decl in global with unique suffix identified + auto itor = std::find(astStructs.cbegin(), astStructs.cend(), curStructOrUnion); + if (itor == astStructs.end()) { + astStructs.emplace_back(curStructOrUnion); + } + } + ProcessBoundaryFieldAttrs(allocator, *curStructOrUnion, recDecl); + return curStructOrUnion; +} + +MapleVector ASTParser::SolveFuncParameterDecls(MapleAllocator &allocator, + const clang::FunctionDecl &funcDecl, + MapleVector &typeDescIn, + std::list &stmts) { + MapleVector paramDecls(allocator.Adapter()); + unsigned int numParam = funcDecl.getNumParams(); + for (uint32_t i = 0; i < numParam; ++i) { + const clang::ParmVarDecl *parmDecl = funcDecl.getParamDecl(i); + std::list astExprs; + SaveVLASizeExpr(allocator, parmDecl->getOriginalType(), astExprs); + ASTStmtDummy *stmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + for (auto expr : std::as_const(astExprs)) { + stmt->SetASTExpr(expr); + } + (void)stmts.emplace_back(stmt); + ASTDecl *parmVarDecl = ProcessDecl(allocator, *parmDecl); + ASSERT_NOT_NULL(parmVarDecl); + paramDecls.push_back(parmVarDecl); + typeDescIn.push_back(parmVarDecl->GetTypeDesc().front()); + } + return paramDecls; +} + +GenericAttrs ASTParser::SolveFunctionAttributes(const clang::FunctionDecl &funcDecl, std::string &funcName) const { + GenericAttrs attrs; + astFile->CollectFuncAttrs(funcDecl, attrs, kPublic); + // for inline optimize + if (attrs.GetAttr(GENATTR_static) && FEOptions::GetInstance().GetFuncInlineSize() != 0) { + funcName = funcName + astFile->GetAstFileNameHashStr(); + } + + // set inline functions as weak symbols as it's in C++ + if (opts::inlineAsWeak == true && attrs.GetAttr(GENATTR_inline) && !attrs.GetAttr(GENATTR_static)) { + attrs.SetAttr(GENATTR_weak); + } + + return attrs; +} + +ASTStmt *ASTParser::SolveFunctionBody(MapleAllocator &allocator, + const clang::FunctionDecl &funcDecl, + ASTFunc &astFunc, const std::list &stmts) { + ASTStmt *astCompoundStmt = ProcessStmt(allocator, *llvm::cast(funcDecl.getBody())); + if (astCompoundStmt != nullptr) { + astFunc.SetCompoundStmt(astCompoundStmt); + astFunc.InsertStmtsIntoCompoundStmtAtFront(stmts); + } else { + return nullptr; + } + return astCompoundStmt; +} + +ASTDecl *ASTParser::ProcessDeclFunctionDecl(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl) { + ASTFunc *astFunc = static_cast(ASTDeclsBuilder::GetASTDecl(funcDecl.getID())); + if (astFunc != nullptr) { + return astFunc; + } + std::string funcName = astFile->GetMangledName(funcDecl); + if (funcName.empty()) { + return nullptr; + } + if (!ASTUtil::IsValidName(funcName)) { + ASTUtil::AdjustName(funcName); + } + MapleVector typeDescIn(allocator.Adapter()); + clang::QualType funcQualType = funcDecl.getType(); + MIRType *mirFuncType = astFile->CvtType(funcQualType); + typeDescIn.push_back(mirFuncType); + clang::QualType qualType = funcDecl.getReturnType(); + MIRType *retType = astFile->CvtType(qualType); + if (retType == nullptr) { + return nullptr; + } + typeDescIn.push_back(retType); + + std::list implicitStmts; + MapleVector paramDecls = SolveFuncParameterDecls(allocator, funcDecl, typeDescIn, implicitStmts); + GenericAttrs attrs = SolveFunctionAttributes(funcDecl, funcName); + astFunc = ASTDeclsBuilder::ASTFuncBuilder( + allocator, fileName, funcName, typeDescIn, attrs, paramDecls, funcDecl.getID()); + CHECK_FATAL(astFunc != nullptr, "astFunc is nullptr"); + clang::SectionAttr *sa = funcDecl.getAttr(); + if (sa != nullptr && !sa->isImplicit()) { + astFunc->SetSectionAttr(sa->getName().str()); + } + // collect EnhanceC func attr + ProcessNonnullFuncAttrs(funcDecl, *astFunc); + ProcessBoundaryFuncAttrs(allocator, funcDecl, *astFunc); + ProcessBoundaryParamAttrs(allocator, funcDecl, *astFunc); + clang::WeakRefAttr *weakrefAttr = funcDecl.getAttr(); + if (weakrefAttr != nullptr) { + astFunc->SetWeakrefAttr(std::pair{true, weakrefAttr->getAliasee().str()}); + } + if (funcDecl.hasBody()) { + if (SolveFunctionBody(allocator, funcDecl, *astFunc, implicitStmts) == nullptr) { + return nullptr; + } + } + return astFunc; +} + +void ASTParser::ProcessNonnullFuncAttrs(const clang::FunctionDecl &funcDecl, ASTFunc &astFunc) const { + if (funcDecl.hasAttr()) { + astFunc.SetAttr(GENATTR_nonnull); + } + for (const auto *nonNull : funcDecl.specific_attrs()) { + if (nonNull->args_size() == 0) { + // Lack of attribute parameters means that all of the pointer parameters are + // implicitly marked as nonnull. + for (auto paramDecl : astFunc.GetParamDecls()) { + if (paramDecl->GetTypeDesc().front()->IsMIRPtrType()) { + paramDecl->SetAttr(GENATTR_nonnull); + } + } + break; + } + for (const clang::ParamIdx ¶mIdx : nonNull->args()) { + // The clang ensures that nonnull attribute only applies to pointer parameter + unsigned int idx = paramIdx.getASTIndex(); + if (idx >= astFunc.GetParamDecls().size()) { + continue; + } + astFunc.GetParamDecls()[idx]->SetAttr(GENATTR_nonnull); + } + } +} + +ASTDecl *ASTParser::ProcessDeclFieldDecl(MapleAllocator &allocator, const clang::FieldDecl &decl) { + ASTField *astField = static_cast(ASTDeclsBuilder::GetASTDecl(decl.getID())); + if (astField != nullptr) { + return astField; + } + clang::QualType qualType = decl.getType(); + std::string fieldName = astFile->GetMangledName(decl); + bool isAnonymousField = false; + if (fieldName.empty()) { + isAnonymousField = true; + fieldName = astFile->GetOrCreateMappedUnnamedName(decl); + } + CHECK_FATAL(!fieldName.empty(), "fieldName is empty"); + MIRType *fieldType = astFile->CvtType(qualType); + if (fieldType == nullptr) { + return nullptr; + } + if (decl.isBitField()) { + unsigned bitSize = decl.getBitWidthValue(*(astFile->GetContext())); + MIRBitFieldType mirBFType(static_cast(bitSize), fieldType->GetPrimType()); + auto bfTypeIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&mirBFType); + fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(bfTypeIdx); + } + GenericAttrs attrs; + astFile->CollectFieldAttrs(decl, attrs, kNone); + // one elem vector type + if (LibAstFile::IsOneElementVector(qualType)) { + attrs.SetAttr(GENATTR_oneelem_simd); + } + auto fieldDecl = ASTDeclsBuilder::ASTFieldBuilder( + allocator, fileName, fieldName, MapleVector({fieldType}, allocator.Adapter()), + attrs, decl.getID(), isAnonymousField); + clang::CharUnits alignment = astFile->GetContext()->getDeclAlign(&decl); + clang::CharUnits unadjust = astFile->GetContext()->toCharUnitsFromBits( + astFile->GetContext()->getTypeUnadjustedAlign(qualType)); + uint32 maxAlign = std::max(alignment.getQuantity(), unadjust.getQuantity()); + fieldDecl->SetAlign(maxAlign); + const auto *valueDecl = llvm::dyn_cast(&decl); + if (valueDecl != nullptr) { + ProcessNonnullFuncPtrAttrs(allocator, *valueDecl, *fieldDecl); + ProcessBoundaryFuncPtrAttrs(allocator, *valueDecl, *fieldDecl); + } + if (FEOptions::GetInstance().IsDbgFriendly()) { + MIRType *sourceType = astFile->CvtSourceType(qualType); + fieldDecl->SetSourceType(sourceType); + } + return fieldDecl; +} + +void ASTParser::SetInitExprForASTVar(MapleAllocator &allocator, const clang::VarDecl &varDecl, + const GenericAttrs &attrs, ASTVar &astVar) { + bool isStaticStorageVar = (varDecl.getStorageDuration() == clang::SD_Static || attrs.GetAttr(GENATTR_tls_static)); + astVar.SetSrcLoc(astFile->GetLOC(varDecl.getLocation())); + auto initExpr = varDecl.getInit(); + auto astInitExpr = ProcessExpr(allocator, initExpr); + ASSERT_NOT_NULL(astInitExpr); + if (initExpr->getStmtClass() == clang::Stmt::InitListExprClass && astInitExpr->GetASTOp() == kASTOpInitListExpr) { + static_cast(astInitExpr)->SetInitListVarName(astVar.GenerateUniqueVarName()); + } + EvaluatedFlag flag = astInitExpr->GetEvaluatedFlag(); + // For thoese global and static local variables initialized with zero or the init list only + // has zero, they won't be set initExpr and will be stored into .bss section instead of .data section + // to reduce code size. + if (!isStaticStorageVar || flag != kEvaluatedAsZero) { + astVar.SetInitExpr(astInitExpr); + } else { + astVar.SetAttr(GENATTR_static_init_zero); // used to distinguish with uninitialized vars + } +} + +void ASTParser::SetAlignmentForASTVar(const clang::VarDecl &varDecl, ASTVar &astVar) const { + int64 naturalAlignment = astFile->GetContext()->toCharUnitsFromBits( + astFile->GetContext()->getTypeUnadjustedAlign(varDecl.getType())).getQuantity(); + // Get alignment from the decl + uint32 alignmentBits = varDecl.getMaxAlignment(); + if (alignmentBits != 0) { + uint32 alignment = alignmentBits / 8; + if (alignment > naturalAlignment) { + astVar.SetAlign(alignment); + } + } + // Get alignment from the type + alignmentBits = astFile->GetContext()->getTypeAlignIfKnown(varDecl.getType()); + if (alignmentBits != 0) { + uint32 alignment = alignmentBits / 8; + if (alignment > astVar.GetAlign() && alignment > naturalAlignment) { + astVar.SetAlign(alignment); + } + } +} + +ASTDecl *ASTParser::ProcessDeclVarDecl(MapleAllocator &allocator, const clang::VarDecl &varDecl) { + ASTVar *astVar = static_cast(ASTDeclsBuilder::GetASTDecl(varDecl.getID())); + if (astVar != nullptr) { + return astVar; + } + std::string varName = astFile->GetMangledName(varDecl); + if (varName.empty()) { + return nullptr; + } + clang::QualType qualType = varDecl.getType(); + MIRType *varType = astFile->CvtType(qualType); + if (varType == nullptr) { + return nullptr; + } + GenericAttrs attrs; + astFile->CollectVarAttrs(varDecl, attrs, kNone); + // for inline optimize + if (attrs.GetAttr(GENATTR_static) && FEOptions::GetInstance().GetFuncInlineSize() != 0) { + varName = varName + astFile->GetAstFileNameHashStr(); + } + if (varType->IsMIRIncompleteStructType() && !attrs.GetAttr(GENATTR_extern)) { + FE_ERR(kLncErr, astFile->GetLOC(varDecl.getLocation()), "tentative definition of variable '%s' has incomplete" + " struct type 'struct '%s''", varName.c_str(), varType->GetName().c_str()); + } + astVar = ASTDeclsBuilder::ASTVarBuilder( + allocator, fileName, varName, MapleVector({varType}, allocator.Adapter()), attrs, varDecl.getID()); + if (FEOptions::GetInstance().IsDbgFriendly()) { + MIRType *sourceType = astFile->CvtSourceType(qualType); + astVar->SetSourceType(sourceType); + } + astVar->SetIsMacro(varDecl.getLocation().isMacroID()); + clang::SectionAttr *sa = varDecl.getAttr(); + if (sa != nullptr && !sa->isImplicit()) { + astVar->SetSectionAttr(sa->getName().str()); + } + clang::AsmLabelAttr *ala = varDecl.getAttr(); + if (ala != nullptr) { + astVar->SetAsmAttr(ala->getLabel().str()); + } + if (varDecl.hasInit()) { + SetInitExprForASTVar(allocator, varDecl, attrs, *astVar); + } + if (llvm::isa(qualType.getCanonicalType())) { + ASTExpr *lenExpr = BuildExprToComputeSizeFromVLA(allocator, qualType.getCanonicalType()); + astVar->SetVariableArrayExpr(lenExpr); + astVar->SetBoundaryLenExpr(lenExpr); + } + if (!varDecl.getType()->isIncompleteType()) { + SetAlignmentForASTVar(varDecl, *astVar); + } + const auto *valueDecl = llvm::dyn_cast(&varDecl); + if (valueDecl != nullptr) { + ProcessNonnullFuncPtrAttrs(allocator, *valueDecl, *astVar); + ProcessBoundaryFuncPtrAttrs(allocator, *valueDecl, *astVar); + } + ProcessBoundaryVarAttrs(allocator, varDecl, *astVar); + return astVar; +} + +ASTDecl *ASTParser::ProcessDeclParmVarDecl(MapleAllocator &allocator, const clang::ParmVarDecl &parmVarDecl) { + ASTVar *parmVar = static_cast(ASTDeclsBuilder::GetASTDecl(parmVarDecl.getID())); + if (parmVar != nullptr) { + return parmVar; + } + const clang::QualType parmQualType = parmVarDecl.getType(); + std::string parmName = parmVarDecl.getNameAsString(); + if (parmName.length() == 0) { + parmName = FEUtils::GetSequentialName("arg|"); + } + MIRType *paramType = astFile->CvtType(parmQualType); + if (paramType == nullptr) { + return nullptr; + } + // C99 6.5.2.2. + // If the expression that denotes the called function has a type + // that does not include a prototype, the integer promotions are + // performed on each argument, and arguments that have type float + // are promoted to double. + PrimType promotedType = PTY_void; + if (parmVarDecl.isKNRPromoted()) { + promotedType = paramType->GetPrimType(); + paramType = FEUtils::IsInteger(paramType->GetPrimType()) ? + GlobalTables::GetTypeTable().GetInt32() : GlobalTables::GetTypeTable().GetDouble(); + } + GenericAttrs attrs; + astFile->CollectAttrs(parmVarDecl, attrs, kNone); + if (LibAstFile::IsOneElementVector(parmQualType)) { + attrs.SetAttr(GENATTR_oneelem_simd); + } + parmVar = ASTDeclsBuilder::ASTVarBuilder(allocator, + fileName, + parmName, + MapleVector({paramType}, allocator.Adapter()), + attrs, + parmVarDecl.getID()); + parmVar->SetIsParam(true); + parmVar->SetPromotedType(promotedType); + if (FEOptions::GetInstance().IsDbgFriendly()) { + MIRType *sourceType = astFile->CvtSourceType(parmQualType); + parmVar->SetSourceType(sourceType); + } + const auto *valueDecl = llvm::dyn_cast(&parmVarDecl); + if (valueDecl != nullptr) { + ProcessNonnullFuncPtrAttrs(allocator, *valueDecl, *parmVar); + ProcessBoundaryFuncPtrAttrs(allocator, *valueDecl, *parmVar); + } + return parmVar; +} + +ASTDecl *ASTParser::ProcessDeclFileScopeAsmDecl(MapleAllocator &allocator, const clang::FileScopeAsmDecl &asmDecl) { + ASTFileScopeAsm *astAsmDecl = allocator.GetMemPool()->New(allocator, fileName); + astAsmDecl->SetAsmStr(asmDecl.getAsmString()->getString().str()); + return astAsmDecl; +} + +ASTDecl *ASTParser::ProcessDeclEnumDecl(MapleAllocator &allocator, const clang::EnumDecl &rawEnumDecl) { + const clang::EnumDecl *enumDecl = rawEnumDecl.getDefinition(); + if (enumDecl == nullptr) { + enumDecl = &rawEnumDecl; + } + ASTEnumDecl *astEnum = static_cast(ASTDeclsBuilder::GetASTDecl(enumDecl->getID())); + if (astEnum != nullptr) { + return astEnum; + } + GenericAttrs attrs; + astFile->CollectAttrs(*enumDecl, attrs, kNone); + std::string enumName = astFile->GetDeclName(*enumDecl); + MIRType *mirType; + if (enumDecl->getPromotionType().isNull()) { + mirType = GlobalTables::GetTypeTable().GetInt32(); + } else { + mirType = astFile->CvtType(enumDecl->getPromotionType()); + } + astEnum = ASTDeclsBuilder::ASTLocalEnumDeclBuilder(allocator, fileName, enumName, + MapleVector({mirType}, allocator.Adapter()), attrs, enumDecl->getID()); + TraverseDecl(enumDecl, [&astEnum, &allocator, this](clang::Decl *child) { + ASSERT_NOT_NULL(child); + CHECK_FATAL(child->getKind() == clang::Decl::EnumConstant, "Unsupported decl kind: %u", child->getKind()); + astEnum->PushConstant(static_cast(ProcessDecl(allocator, *child))); + }); + Loc l = astFile->GetLOC(enumDecl->getLocation()); + astEnum->SetSrcLoc(l); + auto itor = std::find(astEnums.cbegin(), astEnums.cend(), astEnum); + if (itor == astEnums.end()) { + (void)astEnums.emplace_back(astEnum); + } + return astEnum; +} + +ASTDecl *ASTParser::ProcessDeclEnumConstantDecl(MapleAllocator &allocator, const clang::EnumConstantDecl &decl) { + ASTEnumConstant *astConst = static_cast(ASTDeclsBuilder::GetASTDecl(decl.getID())); + if (astConst != nullptr) { + return astConst; + } + GenericAttrs attrs; + astFile->CollectAttrs(decl, attrs, kNone); + const std::string &varName = decl.getNameAsString(); + MIRType *mirType = astFile->CvtType(decl.getType()); + CHECK_NULL_FATAL(mirType); + astConst = ASTDeclsBuilder::ASTEnumConstBuilder( + allocator, fileName, varName, MapleVector({mirType}, allocator.Adapter()), attrs, decl.getID()); + IntVal val(decl.getInitVal().getExtValue(), mirType->GetPrimType()); + astConst->SetValue(val); + return astConst; +} + +ASTDecl *ASTParser::ProcessDeclTypedefDecl(MapleAllocator &allocator, const clang::TypedefDecl &decl) { + if (FEOptions::GetInstance().IsDbgFriendly()) { + ASTTypedefDecl *astTypedef = static_cast(ASTDeclsBuilder::GetASTDecl(decl.getID())); + if (astTypedef != nullptr) { + return astTypedef; + } + std::string typedefName = astFile->GetDeclName(decl); + GenericAttrs attrs; + astFile->CollectAttrs(decl, attrs, kNone); + clang::QualType underlyTy = decl.getUnderlyingType(); + MIRType *type = astFile->CvtType(underlyTy, true); + CHECK_NULL_FATAL(type); + astTypedef = ASTDeclsBuilder::ASTTypedefBuilder( + allocator, fileName, typedefName, MapleVector({type}, allocator.Adapter()), attrs, decl.getID()); + const clang::TypedefType *underlyingTypedefType = llvm::dyn_cast(underlyTy); + if (underlyingTypedefType != nullptr) { + auto *subTypedeDecl = static_cast(ProcessDecl(allocator, *underlyingTypedefType->getDecl())); + astTypedef->SetSubTypedefDecl(subTypedeDecl); + } + if (decl.isDefinedOutsideFunctionOrMethod()) { + astTypedef->SetGlobal(true); + } + return astTypedef; + } + clang::QualType underlyCanonicalTy = decl.getCanonicalDecl()->getUnderlyingType().getCanonicalType(); + if (underlyCanonicalTy->isRecordType()) { + const auto *recordType = llvm::cast(underlyCanonicalTy); + clang::RecordDecl *recordDecl = recordType->getDecl(); + if (recordDecl->isImplicit()) { + return ProcessDecl(allocator, *recordDecl); + } + } + return nullptr; +} + +ASTDecl *ASTParser::ProcessDeclLabelDecl(MapleAllocator &allocator, const clang::LabelDecl &decl) { + ASTDecl *astDecl = static_cast(ASTDeclsBuilder::GetASTDecl(decl.getID())); + if (astDecl != nullptr) { + return astDecl; + } + std::string varName = astFile->GetMangledName(decl); + CHECK_FATAL(!varName.empty(), "label string is null"); + varName = FEUtils::GetSequentialName0(varName + "@", FEUtils::GetSequentialNumber()); + MapleVector typeDescVec(allocator.Adapter()); + astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, fileName, varName, typeDescVec, decl.getID()); + return astDecl; +} + +bool ASTParser::RetrieveStructs(MapleAllocator &allocator) { + for (auto &decl : std::as_const(recordDecles)) { + clang::RecordDecl *recDecl = llvm::cast(decl->getCanonicalDecl()); + if (!recDecl->isCompleteDefinition()) { + clang::RecordDecl *recDeclDef = recDecl->getDefinition(); + if (recDeclDef == nullptr) { + continue; + } else { + recDecl = recDeclDef; + } + } + ASTStruct *curStructOrUnion = static_cast(ProcessDecl(allocator, *recDecl)); + if (curStructOrUnion == nullptr) { + return false; + } + auto itor = std::find(astStructs.cbegin(), astStructs.cend(), curStructOrUnion); + if (itor != astStructs.end()) { + } else { + astStructs.emplace_back(curStructOrUnion); + } + } + return true; +} + +bool ASTParser::RetrieveFuncs(MapleAllocator &allocator) { + for (auto &func : std::as_const(funcDecles)) { + clang::FunctionDecl *funcDecl = llvm::cast(func); + CHECK_NULL_FATAL(funcDecl); + if (funcDecl->isDefined()) { + clang::SafeScopeSpecifier spec = funcDecl->getSafeSpecifier(); + funcDecl = funcDecl->getDefinition(); + if (funcDecl->getSafeSpecifier() != spec) { + if (funcDecl->getSafeSpecifier() != clang::SS_None && spec != clang::SS_None) { + std::string funcName = astFile->GetMangledName(*funcDecl); + Loc loc = astFile->GetLOC(funcDecl->getLocation()); + FE_ERR(kLncWarn, loc, "The function %s declaration and definition security attributes " + "are inconsistent.", funcName.c_str()); + } else if (funcDecl->getSafeSpecifier() == clang::SS_None) { + funcDecl->setSafeSpecifier(spec); + } + } + } + ASTFunc *af = static_cast(ProcessDecl(allocator, *funcDecl)); + if (af == nullptr) { + return false; + } + af->SetGlobal(true); + astFuncs.emplace_back(af); + } + return true; +} + +// seperate MP with astparser +bool ASTParser::RetrieveGlobalVars(MapleAllocator &allocator) { + for (auto &decl : std::as_const(globalVarDecles)) { + clang::VarDecl *varDecl = llvm::cast(decl); + ASTVar *val = static_cast(ProcessDecl(allocator, *varDecl)); + if (val == nullptr) { + return false; + } + astVars.emplace_back(val); + } + return true; +} + +bool ASTParser::RetrieveFileScopeAsms(MapleAllocator &allocator) { + for (auto &decl : std::as_const(globalFileScopeAsm)) { + clang::FileScopeAsmDecl *fileScopeAsmDecl = llvm::cast(decl); + ASTFileScopeAsm *asmDecl = static_cast(ProcessDecl(allocator, *fileScopeAsmDecl)); + if (asmDecl == nullptr) { + return false; + } + astFileScopeAsms.emplace_back(asmDecl); + } + return true; +} + +bool ASTParser::RetrieveGlobalTypeDef(MapleAllocator &allocator) { + for (auto &gTypeDefDecl : std::as_const(globalTypeDefDecles)) { + if (gTypeDefDecl->isImplicit()) { + continue; + } + (void)ProcessDecl(allocator, *gTypeDefDecl); + } + return true; +} + +bool ASTParser::RetrieveEnums(MapleAllocator &allocator) { + for (auto &decl : std::as_const(globalEnumDecles)) { + clang::EnumDecl *enumDecl = llvm::cast(decl->getCanonicalDecl()); + ASTEnumDecl *astEnum = static_cast(ProcessDecl(allocator, *enumDecl)); + if (astEnum == nullptr) { + return false; + } + (void)astEnums.emplace_back(astEnum); + } + return true; +} + +const std::string ASTParser::GetSourceFileName() const { + return fileName.c_str() == nullptr ? "" : fileName.c_str(); +} + +const uint32 ASTParser::GetFileIdx() const { + return fileIdx; +} + +void ASTParser::TraverseDecl(const clang::Decl *decl, std::function const &functor) const { + if (decl == nullptr) { + return; + } + ASSERT_NOT_NULL(clang::dyn_cast(decl)); + for (auto *child : clang::dyn_cast(decl)->decls()) { + if (child != nullptr) { + functor(child); + } + } +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp b/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e02610b225814e74a5bf09b5828cb941af8024b7 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/src/ast_parser_builting_func.cpp @@ -0,0 +1,1060 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_expr.h" +#include "ast_decl.h" +#include "ast_decl_builder.h" +#include "ast_interface.h" +#include "ast_util.h" +#include "ast_input.h" +#include "ast_stmt.h" +#include "ast_parser.h" +#include "feir_stmt.h" +#include "feir_builder.h" +#include "fe_utils_ast.h" +#include "feir_type_helper.h" +#include "fe_manager.h" +#include "mir_module.h" +#include "mpl_logging.h" +#include "fe_macros.h" + +namespace maple { +std::unordered_map ASTCallExpr::InitBuiltinFuncPtrMap() { + std::unordered_map ans; +#define BUILTIN_FUNC_EMIT(funcName, FuncPtrBuiltinFunc) \ + ans[funcName] = FuncPtrBuiltinFunc; +#include "builtin_func_emit.def" +#undef BUILTIN_FUNC_EMIT + // vector builtinfunc +#define DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ...) \ + ans["__builtin_mpl_"#STR] = &ASTCallExpr::EmitBuiltin##STR; +#include "intrinsic_vector.def" +#undef DEF_MIR_INTRINSIC + + return ans; +} + +UniqueFEIRExpr ASTCallExpr::CreateIntrinsicopForC(std::list &stmts, + MIRIntrinsicID argIntrinsicID, bool genTempVar) const { + auto feTy = std::make_unique(*mirType); + std::vector> argOpnds; + for (auto arg : args) { + argOpnds.push_back(arg->Emit2FEExpr(stmts)); + } + auto feExpr = std::make_unique(std::move(feTy), argIntrinsicID, argOpnds); + if (mirType->GetPrimType() == PTY_void) { + std::list feExprs; + feExprs.emplace_back(std::move(feExpr)); + UniqueFEIRStmt evalStmt = std::make_unique(OP_eval, std::move(feExprs)); + stmts.emplace_back(std::move(evalStmt)); + return nullptr; + } else { + if (!genTempVar) { + return feExpr; + } + std::string tmpName = FEUtils::GetSequentialName("intrinsicop_var_"); + UniqueFEIRVar tmpVar = FEIRBuilder::CreateVarNameForC(tmpName, *mirType); + UniqueFEIRStmt dAssign = std::make_unique(tmpVar->Clone(), std::move(feExpr), 0); + stmts.emplace_back(std::move(dAssign)); + auto dread = FEIRBuilder::CreateExprDRead(tmpVar->Clone()); + return dread; + } +} + +UniqueFEIRExpr ASTCallExpr::CreateIntrinsicCallAssignedForC(std::list &stmts, + MIRIntrinsicID argIntrinsicID) const { + std::unique_ptr> argExprList = std::make_unique>(); + for (auto arg : args) { + (void)argExprList->emplace_back(arg->Emit2FEExpr(stmts)); + } + if (!IsNeedRetExpr()) { + auto stmt = std::make_unique(argIntrinsicID, nullptr, nullptr, + std::move(argExprList)); + stmts.emplace_back(std::move(stmt)); + return nullptr; + } + UniqueFEIRVar retVar = FEIRBuilder::CreateVarNameForC(GetRetVarName(), *mirType, false); + auto stmt = std::make_unique(argIntrinsicID, nullptr, retVar->Clone(), + std::move(argExprList)); + stmts.emplace_back(std::move(stmt)); + UniqueFEIRExpr dread = FEIRBuilder::CreateExprDRead(std::move(retVar)); + return dread; +} + +UniqueFEIRExpr ASTCallExpr::CreateBinaryExpr(std::list &stmts, Opcode op) const { + auto feTy = std::make_unique(*mirType); + auto arg1 = args[0]->Emit2FEExpr(stmts); + auto arg2 = args[1]->Emit2FEExpr(stmts); + return std::make_unique(std::move(feTy), op, std::move(arg1), std::move(arg2)); +} + +UniqueFEIRExpr ASTCallExpr::ProcessBuiltinFunc(std::list &stmts, bool &isFinish) const { + // process a kind of builtinFunc + std::string prefix = "__builtin_mpl_vector_load"; + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { + return EmitBuiltinVectorLoad(stmts, isFinish); + } + prefix = "__builtin_mpl_vector_store"; + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { + return EmitBuiltinVectorStore(stmts, isFinish); + } + prefix = "__builtin_mpl_vector_zip"; + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { + return EmitBuiltinVectorZip(stmts, isFinish); + } + prefix = "__builtin_mpl_vector_shli"; + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { + return EmitBuiltinVectorShli(stmts, isFinish); + } + prefix = "__builtin_mpl_vector_shri"; + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { + return EmitBuiltinVectorShri(stmts, isFinish); + } + prefix = "__builtin_mpl_vector_shru"; + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { + return EmitBuiltinVectorShru(stmts, isFinish); + } + // process a single builtinFunc + auto ptrFunc = builtingFuncPtrMap.find(GetFuncName()); + if (ptrFunc != builtingFuncPtrMap.end()) { + isFinish = true; + return EmitBuiltinFunc(stmts); + } + isFinish = false; + if (FEOptions::GetInstance().GetDumpLevel() >= FEOptions::kDumpLevelInfo) { + prefix = "__builtin"; + if (GetFuncName().compare(0, prefix.size(), prefix) == 0) { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "%s:%d BuiltinFunc (%s) has not been implemented", + FEManager::GetModule().GetFileNameFromFileNum(GetSrcFileIdx()).c_str(), GetSrcFileLineNum(), + GetFuncName().c_str()); + } + } + return nullptr; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinFunc(std::list &stmts) const { + return (this->*(builtingFuncPtrMap[GetFuncName()]))(stmts); +} + +#define DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ...) \ +UniqueFEIRExpr ASTCallExpr::EmitBuiltin##STR(std::list &stmts) const { \ + auto feType = FEIRTypeHelper::CreateTypeNative(*mirType); \ + std::vector> argOpnds; \ + for (auto arg : args) { \ + argOpnds.push_back(arg->Emit2FEExpr(stmts)); \ + } \ + return std::make_unique(std::move(feType), INTRN_##STR, argOpnds); \ +} +#include "intrinsic_vector.def" +#undef DEF_MIR_INTRINSIC + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorLoad(std::list &stmts, bool &isFinish) const { + auto argExpr = args[0]->Emit2FEExpr(stmts); + UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*mirType); + UniqueFEIRType ptrType = FEIRTypeHelper::CreateTypeNative( + *GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType)); + isFinish = true; + return FEIRBuilder::CreateExprIRead(std::move(type), std::move(ptrType), std::move(argExpr)); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorStore(std::list &stmts, bool &isFinish) const { + auto arg1Expr = args[0]->Emit2FEExpr(stmts); + auto arg2Expr = args[1]->Emit2FEExpr(stmts); + UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative( + *GlobalTables::GetTypeTable().GetOrCreatePointerType(*args[1]->GetType())); + auto stmt = FEIRBuilder::CreateStmtIAssign(std::move(type), std::move(arg1Expr), std::move(arg2Expr)); + (void)stmts.emplace_back(std::move(stmt)); + isFinish = true; + return nullptr; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorShli(std::list &stmts, bool &isFinish) const { + isFinish = true; + UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*args[0]->GetType()); + auto arg1Expr = args[0]->Emit2FEExpr(stmts); + auto arg2Expr = args[1]->Emit2FEExpr(stmts); + return FEIRBuilder::CreateExprBinary(std::move(type), OP_shl, std::move(arg1Expr), std::move(arg2Expr)); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorShri(std::list &stmts, bool &isFinish) const { + isFinish = true; + UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*args[0]->GetType()); + auto arg1Expr = args[0]->Emit2FEExpr(stmts); + auto arg2Expr = args[1]->Emit2FEExpr(stmts); + return FEIRBuilder::CreateExprBinary(std::move(type), OP_ashr, std::move(arg1Expr), std::move(arg2Expr)); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorShru(std::list &stmts, bool &isFinish) const { + isFinish = true; + UniqueFEIRType type = FEIRTypeHelper::CreateTypeNative(*args[0]->GetType()); + auto arg1Expr = args[0]->Emit2FEExpr(stmts); + auto arg2Expr = args[1]->Emit2FEExpr(stmts); + return FEIRBuilder::CreateExprBinary(std::move(type), OP_lshr, std::move(arg1Expr), std::move(arg2Expr)); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVectorZip(std::list &stmts, bool &isFinish) const { + std::unique_ptr> argExprList = std::make_unique>(); + for (auto arg : args) { + UniqueFEIRExpr expr = arg->Emit2FEExpr(stmts); + argExprList->emplace_back(std::move(expr)); + } + CHECK_NULL_FATAL(mirType); + std::string retName = FEUtils::GetSequentialName("vector_zip_retvar_"); + UniqueFEIRVar retVar = FEIRBuilder::CreateVarNameForC(retName, *mirType); + +#define VECTOR_INTRINSICCALL_TYPE(OP_NAME, VECTY) \ + if (FEUtils::EndsWith(GetFuncName(), #VECTY)) { \ + stmt = std::make_unique( \ + INTRN_vector_##OP_NAME##_##VECTY, nullptr, retVar->Clone(), std::move(argExprList)); \ + } + UniqueFEIRStmt stmt; + + VECTOR_INTRINSICCALL_TYPE(zip, v2i32) + else VECTOR_INTRINSICCALL_TYPE(zip, v4i16) + else VECTOR_INTRINSICCALL_TYPE(zip, v8i8) + else VECTOR_INTRINSICCALL_TYPE(zip, v2u32) + else VECTOR_INTRINSICCALL_TYPE(zip, v4u16) + else VECTOR_INTRINSICCALL_TYPE(zip, v8u8) + else VECTOR_INTRINSICCALL_TYPE(zip, v2f32) + + stmts.emplace_back(std::move(stmt)); + isFinish = true; + return FEIRBuilder::CreateExprDRead(std::move(retVar)); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVaStart(std::list &stmts) const { + // args + auto exprArgList = std::make_unique>(); + for (auto arg : args) { + UniqueFEIRExpr expr = arg->Emit2FEExpr(stmts); + exprArgList->emplace_back(std::move(expr)); + } + // addrof va_list instead of dread va_list + exprArgList->front()->SetAddrof(true); + std::unique_ptr stmt = std::make_unique( + INTRN_C_va_start, nullptr /* type */, nullptr /* retVar */, std::move(exprArgList)); + stmts.emplace_back(std::move(stmt)); + return nullptr; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVaEnd(std::list &stmts) const { + // args + ASSERT(args.size() == 1, "va_end expects 1 arguments"); + std::list exprArgList; + for (auto arg : args) { + UniqueFEIRExpr expr = arg->Emit2FEExpr(stmts); + // addrof va_list instead of dread va_list + expr->SetAddrof(true); + exprArgList.emplace_back(std::move(expr)); + } + auto stmt = std::make_unique(OP_eval, std::move(exprArgList)); + stmts.emplace_back(std::move(stmt)); + return nullptr; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinVaCopy(std::list &stmts) const { + // args + auto exprArgList = std::make_unique>(); + UniqueFEIRType vaListType; + for (auto arg : args) { + UniqueFEIRExpr expr = arg->Emit2FEExpr(stmts); + // addrof va_list instead of dread va_list + expr->SetAddrof(true); + vaListType = expr->GetType()->Clone(); + exprArgList->emplace_back(std::move(expr)); + } + // Add the size of the va_list structure as the size to memcpy. + size_t elemSizes = vaListType->GenerateMIRTypeAuto()->GetSize(); + CHECK_FATAL(elemSizes <= INT_MAX, "Too large elem size"); + UniqueFEIRExpr sizeExpr = FEIRBuilder::CreateExprConstI32(static_cast(elemSizes)); + exprArgList->emplace_back(std::move(sizeExpr)); + std::unique_ptr stmt = std::make_unique( + INTRN_C_memcpy, nullptr /* type */, nullptr /* retVar */, std::move(exprArgList)); + stmts.emplace_back(std::move(stmt)); + return nullptr; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinPrefetch(std::list &stmts) const { + // __builtin_prefetch is not supported, only parsing args including stmts + for (size_t i = 0; i <= args.size() - 1; ++i) { + (void)args[i]->Emit2FEExpr(stmts); + } + return nullptr; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinCtz(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_ctz32); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinCtzl(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_ctz64); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinClz(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_clz32); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinClzl(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_clz64); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinPopcount(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_popcount32); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinPopcountl(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_popcount64); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinPopcountll(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_popcount64); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinParity(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_parity32); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinParityl(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_parity64); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinParityll(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_parity64); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinClrsb(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_clrsb32); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinClrsbl(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_clrsb64); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinClrsbll(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_clrsb64); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinFfs(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_ffs); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinFfsl(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_ffs); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinFfsll(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_ffs); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinIsAligned(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_isaligned); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinAlignUp(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_alignup); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinAlignDown(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_aligndown); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAddAndFetch8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_add_and_fetch_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAddAndFetch4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_add_and_fetch_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAddAndFetch2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_add_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAddAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_add_and_fetch_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncSubAndFetch8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_sub_and_fetch_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncSubAndFetch4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_sub_and_fetch_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncSubAndFetch2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_sub_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncSubAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_sub_and_fetch_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndSub8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_sub_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndSub4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_sub_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndSub2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_sub_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndSub1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_sub_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAdd8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_add_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAdd4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_add_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAdd2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_add_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAdd1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_add_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncValCompareAndSwap8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_val_compare_and_swap_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncValCompareAndSwap4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_val_compare_and_swap_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncValCompareAndSwap2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_val_compare_and_swap_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncValCompareAndSwap1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_val_compare_and_swap_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockRelease8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_release_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockRelease4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_release_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockRelease2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_release_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockRelease1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_release_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_bool_compare_and_swap_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_bool_compare_and_swap_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_bool_compare_and_swap_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncBoolCompareAndSwap1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_bool_compare_and_swap_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockTestAndSet8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_test_and_set_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockTestAndSet4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_test_and_set_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockTestAndSet2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_test_and_set_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncLockTestAndSet1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_lock_test_and_set_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAnd1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_and_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAnd2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_and_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAnd4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_and_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndAnd8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_and_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndOr1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_or_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndOr2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_or_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndOr4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_or_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndOr8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_or_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndXor1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_xor_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndXor2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_xor_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndXor4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_xor_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndXor8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_xor_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndNand1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_nand_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndNand2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_nand_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndNand4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_nand_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncFetchAndNand8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_fetch_and_nand_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAndAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_and_and_fetch_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAndAndFetch2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_and_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAndAndFetch4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_and_and_fetch_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncAndAndFetch8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_and_and_fetch_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncOrAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_or_and_fetch_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncOrAndFetch2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_or_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncOrAndFetch4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_or_and_fetch_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncOrAndFetch8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_or_and_fetch_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncXorAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_xor_and_fetch_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncXorAndFetch2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_xor_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncXorAndFetch4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_xor_and_fetch_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncXorAndFetch8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_xor_and_fetch_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncNandAndFetch1(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_nand_and_fetch_1); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncNandAndFetch2(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_nand_and_fetch_2); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncNandAndFetch4(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_nand_and_fetch_4); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncNandAndFetch8(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_nand_and_fetch_8); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSyncSynchronize(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___sync_synchronize); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinAtomicExchangeN(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___atomic_exchange_n); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinObjectSize(std::list &stmts) const { + return CreateIntrinsicCallAssignedForC(stmts, INTRN_C___builtin_object_size); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinReturnAddress(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C__builtin_return_address); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinExtractReturnAddr(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C__builtin_extract_return_addr); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinAlloca(std::list &stmts) const { + auto arg = args[0]->Emit2FEExpr(stmts); + CHECK_NULL_FATAL(mirType); + auto alloca = std::make_unique(FEIRTypeHelper::CreateTypeNative(*mirType), OP_alloca, std::move(arg)); + return alloca; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinExpect(std::list &stmts) const { + ASSERT(args.size() == 2, "__builtin_expect requires two arguments"); + std::list subStmts; + UniqueFEIRExpr feExpr = CreateIntrinsicopForC(subStmts, INTRN_C___builtin_expect, false); + bool isOptimized = false; + for (auto &stmt : std::as_const(subStmts)) { + // If there are mutiple conditions combined with logical AND '&&' or logical OR '||' in __builtin_expect, generate + // a __builtin_expect intrinsicop for each one condition in mpl + if (stmt->GetKind() == FEIRNodeKind::kStmtCondGoto) { + isOptimized = true; + auto *condGotoStmt = static_cast(stmt.get()); + // skip if condition label name not starts with + if (condGotoStmt->GetLabelName().rfind(FEUtils::kCondGoToStmtLabelNamePrefix, 0) != 0) { + continue; + } + const auto &conditionExpr = condGotoStmt->GetConditionExpr(); + // skip if __builtin_expect intrinsicop has been generated for condition expr + if (conditionExpr->GetKind() == kExprBinary) { + auto &opnd0 = static_cast(conditionExpr.get())->GetOpnd0(); + if (opnd0->GetKind() == FEIRNodeKind::kExprIntrinsicop && + static_cast(opnd0.get())->GetIntrinsicID() == INTRN_C___builtin_expect) { + continue; + } + } + std::vector> argOpnds; + auto &builtInExpectArgs = static_cast(feExpr.get())->GetOpnds(); + auto cvtFeExpr = FEIRBuilder::CreateExprCvtPrim(conditionExpr->Clone(), builtInExpectArgs.front()->GetPrimType()); + argOpnds.push_back(std::move(cvtFeExpr)); + argOpnds.push_back(builtInExpectArgs.back()->Clone()); + auto returnType = std::make_unique(*mirType); + auto builtinExpectExpr = std::make_unique(std::move(returnType), + INTRN_C___builtin_expect, argOpnds); + auto newConditionExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(builtinExpectExpr)); + condGotoStmt->SetCondtionExpr(newConditionExpr); + } + } + stmts.splice(stmts.end(), subStmts); + return isOptimized ? static_cast(feExpr.get())->GetOpnds().front()->Clone() + : feExpr->Clone(); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinAbs(std::list &stmts) const { + auto arg = args[0]->Emit2FEExpr(stmts); + CHECK_NULL_FATAL(mirType); + auto abs = std::make_unique(FEIRTypeHelper::CreateTypeNative(*mirType), OP_abs, std::move(arg)); + auto feType = std::make_unique(*mirType); + abs->SetType(std::move(feType)); + return abs; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinACos(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_acos); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinACosf(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_acosf); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinASin(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_asin); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinASinf(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_asinf); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinATan(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_atan); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinATanf(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_atanf); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinCos(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_cos); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinCosf(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_cosf); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinCosh(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_cosh); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinCoshf(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_coshf); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSin(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_sin); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSinf(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_sinf); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSinh(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_sinh); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinSinhf(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_sinhf); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinExp(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_exp); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinExpf(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_expf); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinBswap64(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_bswap64); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinBswap32(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_bswap32); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinBswap16(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_bswap16); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinFmax(std::list &stmts) const { + return CreateBinaryExpr(stmts, OP_max); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinFmin(std::list &stmts) const { + return CreateBinaryExpr(stmts, OP_min); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinLog(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_log); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinLogf(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_logf); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinLog10(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_log10); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinLog10f(std::list &stmts) const { + return CreateIntrinsicopForC(stmts, INTRN_C_log10f); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinIsunordered(std::list &stmts) const { + auto feTy = std::make_unique(*mirType); + auto arg1 = args[0]->Emit2FEExpr(stmts); + auto arg2 = args[1]->Emit2FEExpr(stmts); + auto nan1 = std::make_unique(feTy->Clone(), OP_ne, arg1->Clone(), arg1->Clone()); + auto nan2 = std::make_unique(feTy->Clone(), OP_ne, arg2->Clone(), arg2->Clone()); + auto res = std::make_unique(feTy->Clone(), OP_lior, std::move(nan1), std::move(nan2)); + return res; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinIsless(std::list &stmts) const { + return CreateBinaryExpr(stmts, OP_lt); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinIslessequal(std::list &stmts) const { + return CreateBinaryExpr(stmts, OP_le); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinIsgreater(std::list &stmts) const { + return CreateBinaryExpr(stmts, OP_gt); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinIsgreaterequal(std::list &stmts) const { + return CreateBinaryExpr(stmts, OP_ge); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinIslessgreater(std::list &stmts) const { + auto feTy = std::make_unique(*mirType); + auto arg1 = args[0]->Emit2FEExpr(stmts); + auto arg2 = args[1]->Emit2FEExpr(stmts); + auto cond1 = std::make_unique(feTy->Clone(), OP_lt, arg1->Clone(), arg2->Clone()); + auto cond2 = std::make_unique(feTy->Clone(), OP_gt, arg1->Clone(), arg2->Clone()); + auto res = std::make_unique(feTy->Clone(), OP_lior, std::move(cond1), std::move(cond2)); + return res; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinWarnMemsetZeroLen(std::list &stmts) const { + (void)stmts; + return nullptr; +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinRotateLeft8(std::list &stmts) const { + return EmitBuiltinRotate(stmts, PTY_u8, true); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinRotateLeft16(std::list &stmts) const { + return EmitBuiltinRotate(stmts, PTY_u16, true); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinRotateLeft32(std::list &stmts) const { + return EmitBuiltinRotate(stmts, PTY_u32, true); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinRotateLeft64(std::list &stmts) const { + return EmitBuiltinRotate(stmts, PTY_u64, true); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinRotateRight8(std::list &stmts) const { + return EmitBuiltinRotate(stmts, PTY_u8, false); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinRotateRight16(std::list &stmts) const { + return EmitBuiltinRotate(stmts, PTY_u16, false); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinRotateRight32(std::list &stmts) const { + return EmitBuiltinRotate(stmts, PTY_u32, false); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinRotateRight64(std::list &stmts) const { + return EmitBuiltinRotate(stmts, PTY_u64, false); +} + +UniqueFEIRExpr ASTCallExpr::EmitBuiltinRotate(std::list &stmts, PrimType rotType, bool isLeft) const { + const int mask = FEUtils::GetWidth(rotType) - 1; + UniqueFEIRExpr maskExpr = FEIRBuilder::CreateExprConstAnyScalar(rotType, mask); + UniqueFEIRExpr valExpr = args[0]->Emit2FEExpr(stmts); + UniqueFEIRExpr bitExpr = args[1]->Emit2FEExpr(stmts); + bitExpr = FEIRBuilder::CreateExprBinary(OP_band, bitExpr->Clone(), maskExpr->Clone()); + // RotateLeft: (val >> bit) | (val << ((-bit) & mask)) + // RotateRight: (val << bit) | (val >> ((-bit) & mask)) + return FEIRBuilder::CreateExprBinary(OP_bior, + FEIRBuilder::CreateExprBinary((isLeft ? OP_shl : OP_lshr), valExpr->Clone(), bitExpr->Clone()), + FEIRBuilder::CreateExprBinary((isLeft ? OP_lshr : OP_shl), + valExpr->Clone(), + FEIRBuilder::CreateExprBinary(OP_band, + FEIRBuilder::CreateExprMathUnary(OP_neg, bitExpr->Clone()), + maskExpr->Clone()))); +} + +std::map ASTParser::InitBuiltinFuncPtrMap() { + std::map ans; +#define BUILTIN_FUNC_PARSE(funcName, FuncPtrBuiltinFunc) \ + ans[funcName] = FuncPtrBuiltinFunc; +#include "builtin_func_parse.def" +#undef BUILTIN_FUNC_PARSE + return ans; +} + +ASTExpr *ASTParser::ParseBuiltinFunc(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + return (this->*(builtingFuncPtrMap[ss.str()]))(allocator, expr, ss); +} + +ASTExpr *ASTParser::ProcessBuiltinFuncByName(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss, const std::string &name) const { + (void)allocator; + (void)expr; + ss.clear(); + ss.str(std::string()); + ss << name; + return nullptr; +} + +ASTExpr *ASTParser::ParseBuiltinClassifyType(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + (void)ss; + clang::Expr::EvalResult res; + bool success = expr.EvaluateAsInt(res, *(astFile->GetContext())); + CHECK_FATAL(success, "Failed to evaluate __builtin_classify_type"); + llvm::APSInt apVal = res.Val.getInt(); + ASTIntegerLiteral *astIntegerLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astIntegerLiteral->SetVal(apVal.getExtValue()); + astIntegerLiteral->SetType(GlobalTables::GetTypeTable().GetInt32()); + return astIntegerLiteral; +} + +ASTExpr *ASTParser::ParseBuiltinConstantP(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + (void)ss; + int64 constP = expr.getArg(0)->isConstantInitializer(*astFile->GetNonConstAstContext(), false) ? 1 : 0; + // Pointers are not considered constant + if (expr.getArg(0)->getType()->isPointerType() && + !llvm::isa(expr.getArg(0)->IgnoreParenCasts())) { + constP = 0; + } + ASTIntegerLiteral *astIntegerLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astIntegerLiteral->SetVal(constP); + astIntegerLiteral->SetType(astFile->CvtType(expr.getType())); + return astIntegerLiteral; +} + +ASTExpr *ASTParser::ParseBuiltinSignbit(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + return ProcessBuiltinFuncByName(allocator, expr, ss, "__signbit"); +} + +ASTExpr *ASTParser::ParseBuiltinIsinfsign(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + (void)allocator; + ss.clear(); + ss.str(std::string()); + MIRType *mirType = astFile->CvtType(expr.getArg(0)->getType()); + if (mirType != nullptr) { + PrimType type = mirType->GetPrimType(); + if (type == PTY_f64) { + ss << "__isinf"; + } else if (type == PTY_f32) { + ss << "__isinff"; + } else { + ASSERT(false, "Unsupported type passed to isinf"); + } + } + return nullptr; +} + +ASTExpr *ASTParser::ParseBuiltinHugeVal(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + (void)expr; + (void)ss; + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral->SetKind(FloatKind::F64); + astFloatingLiteral->SetVal(std::numeric_limits::infinity()); + return astFloatingLiteral; +} + +ASTExpr *ASTParser::ParseBuiltinHugeValf(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + (void)expr; + (void)ss; + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral->SetKind(FloatKind::F32); + astFloatingLiteral->SetVal(std::numeric_limits::infinity()); + return astFloatingLiteral; +} + +ASTExpr *ASTParser::ParseBuiltinInf(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + (void)expr; + (void)ss; + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral->SetKind(FloatKind::F64); + astFloatingLiteral->SetVal(std::numeric_limits::infinity()); + return astFloatingLiteral; +} + +ASTExpr *ASTParser::ParseBuiltinInff(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + (void)expr; + (void)ss; + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral->SetKind(FloatKind::F32); + astFloatingLiteral->SetVal(std::numeric_limits::infinity()); + return astFloatingLiteral; +} + +ASTExpr *ASTParser::ParseBuiltinNan(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + (void)expr; + (void)ss; + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral->SetKind(FloatKind::F64); + astFloatingLiteral->SetVal(nan("")); + return astFloatingLiteral; +} + +ASTExpr *ASTParser::ParseBuiltinNanf(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + (void)expr; + (void)ss; + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral->SetKind(FloatKind::F32); + astFloatingLiteral->SetVal(nanf("")); + return astFloatingLiteral; +} + +ASTExpr *ASTParser::ParseBuiltinSignBitf(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + return ProcessBuiltinFuncByName(allocator, expr, ss, "__signbitf"); +} + +ASTExpr *ASTParser::ParseBuiltinSignBitl(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + return ProcessBuiltinFuncByName(allocator, expr, ss, "__signbitl"); +} + +ASTExpr *ASTParser::ParseBuiltinTrap(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + return ProcessBuiltinFuncByName(allocator, expr, ss, "abort"); +} + +ASTExpr *ASTParser::ParseBuiltinCopysignf(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + return ProcessBuiltinFuncByName(allocator, expr, ss, "copysignf"); +} + +ASTExpr *ASTParser::ParseBuiltinCopysign(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + return ProcessBuiltinFuncByName(allocator, expr, ss, "copysign"); +} + +ASTExpr *ASTParser::ParseBuiltinCopysignl(MapleAllocator &allocator, const clang::CallExpr &expr, + std::stringstream &ss) const { + return ProcessBuiltinFuncByName(allocator, expr, ss, "copysignl"); +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp b/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c91a562df0f3cc9d1b67bfea6e1ff9f5d002b071 --- /dev/null +++ b/src/hir2mpl/ast_input/clang/src/ast_stmt.cpp @@ -0,0 +1,681 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_stmt.h" +#include "ast_decl.h" +#include "ast_util.h" +#include "mpl_logging.h" +#include "feir_stmt.h" +#include "feir_builder.h" +#include "fe_utils_ast.h" +#include "fe_manager.h" +#include "ast_util.h" +#include "enhance_c_checker.h" +#include "conditional_operator.h" + +namespace maple { +// ---------- ASTStmt ---------- +void ASTStmt::SetASTExpr(ASTExpr *astExpr) { + exprs.emplace_back(astExpr); +} + +// ---------- ASTStmtDummy ---------- +std::list ASTStmtDummy::Emit2FEStmtImpl() const { + std::list stmts; + for (auto expr : exprs) { + (void)expr->Emit2FEExpr(stmts); + } + return stmts; +} + +// ---------- ASTCompoundStmt ---------- +void ASTCompoundStmt::SetASTStmt(ASTStmt *astStmt) { + astStmts.emplace_back(astStmt); +} + +void ASTCompoundStmt::InsertASTStmtsAtFront(const std::list &stmts) { + (void)astStmts.insert(astStmts.cbegin(), stmts.cbegin(), stmts.cend()); +} + +const MapleList &ASTCompoundStmt::GetASTStmtList() const { + return astStmts; +} + +std::list ASTCompoundStmt::Emit2FEStmtImpl() const { + std::list stmts; + auto insertStmt = [&stmts, this](bool flag) { + if (!FEOptions::GetInstance().IsEnableSafeRegion()) { + return; + } + UniqueFEIRStmt stmt; + if (safeSS == SafeSS::kSafeSS) { + stmt = std::make_unique(flag); + } else if (safeSS == SafeSS::kUnsafeSS) { + stmt = std::make_unique(flag); + } + if (stmt != nullptr) { + if (flag) { + stmt->SetSrcLoc(endLoc); + } + stmts.emplace_back(std::move(stmt)); + } + }; + insertStmt(false); + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + if (!hasEmitted2MIRScope) { + feFunction.PushStmtScope(FEUtils::CvtLoc2SrcPosition(GetSrcLoc()), FEUtils::CvtLoc2SrcPosition(GetEndLoc())); + } + for (auto it : astStmts) { + stmts.splice(stmts.end(), it->Emit2FEStmt()); + } + if (!hasEmitted2MIRScope) { + UniqueFEIRScope scope = feFunction.PopTopScope(); + if (scope->GetVLASavedStackVar() != nullptr) { + auto stackRestoreStmt = scope->GenVLAStackRestoreStmt(); + stackRestoreStmt->SetSrcLoc(endLoc); + (void)stmts.emplace_back(std::move(stackRestoreStmt)); + } + hasEmitted2MIRScope = true; + } + insertStmt(true); + return stmts; +} + +// ---------- ASTReturnStmt ---------- +std::list ASTReturnStmt::Emit2FEStmtImpl() const { + std::list stmts; + auto astExpr = exprs.front(); + UniqueFEIRExpr feExpr = (astExpr != nullptr) ? astExpr->Emit2FEExpr(stmts) : nullptr; + if (astExpr != nullptr && ConditionalOptimize::DeleteRedundantTmpVar(feExpr, stmts)) { + return stmts; + } + FEIRBuilder::EmitVLACleanupStmts(FEManager::GetCurrentFEFunction(), stmts); + UniqueFEIRStmt stmt = std::make_unique(std::move(feExpr)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ---------- ASTIfStmt ---------- +std::list ASTIfStmt::Emit2FEStmtImpl() const { + std::list stmts; + std::list thenStmts; + std::list elseStmts; + if (thenStmt != nullptr) { + thenStmts = thenStmt->Emit2FEStmt(); + } + if (elseStmt != nullptr) { + elseStmts = elseStmt->Emit2FEStmt(); + } + UniqueFEIRExpr condFEExpr = condExpr->Emit2FEExpr(stmts); + condFEExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(condFEExpr)); + UniqueFEIRStmt ifStmt = FEIRBuilder::CreateStmtIf(std::move(condFEExpr), thenStmts, elseStmts); + ifStmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(ifStmt)); + return stmts; +} + +std::list ASTForStmt::Emit2FEStmtImpl() const { + std::list stmts; + std::string loopBodyEndLabelName = FEUtils::GetSequentialName("dowhile_body_end_"); + std::string loopEndLabelName = FEUtils::GetSequentialName("dowhile_end_"); + AstLoopUtil::Instance().PushContinue(loopBodyEndLabelName); + AstLoopUtil::Instance().PushBreak(loopEndLabelName); + auto labelBodyEndStmt = std::make_unique(loopBodyEndLabelName); + auto labelLoopEndStmt = std::make_unique(loopEndLabelName); + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + if (!hasEmitted2MIRScope) { + feFunction.PushStmtScope(FEUtils::CvtLoc2SrcPosition(GetSrcLoc()), + FEUtils::CvtLoc2SrcPosition(GetEndLoc()), true); + } + if (initStmt != nullptr) { + std::list feStmts = initStmt->Emit2FEStmt(); + stmts.splice(stmts.cend(), feStmts); + } + std::list bodyFEStmts = bodyStmt->Emit2FEStmt(); + if (AstLoopUtil::Instance().IsCurrentContinueLabelUsed()) { + bodyFEStmts.emplace_back(std::move(labelBodyEndStmt)); + } + UniqueFEIRExpr condFEExpr; + if (condExpr != nullptr) { + (void)condExpr->Emit2FEExpr(stmts); + } else { + condFEExpr = std::make_unique(static_cast(1), PTY_i32); + } + if (incExpr != nullptr) { + std::list incStmts; + UniqueFEIRExpr incFEExpr = incExpr->Emit2FEExpr(incStmts); + if (incFEExpr != nullptr && incStmts.size() == 2 && incStmts.front()->IsDummy()) { + incStmts.pop_front(); + } + bodyFEStmts.splice(bodyFEStmts.cend(), incStmts); + } + if (condExpr != nullptr) { + std::list condStmts; + condFEExpr = condExpr->Emit2FEExpr(condStmts); + bodyFEStmts.splice(bodyFEStmts.cend(), condStmts); + } + condFEExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(condFEExpr)); + UniqueFEIRStmt whileStmt = std::make_unique(OP_while, std::move(condFEExpr), std::move(bodyFEStmts)); + whileStmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(whileStmt)); + if (AstLoopUtil::Instance().IsCurrentBreakLabelUsed()) { + stmts.emplace_back(std::move(labelLoopEndStmt)); + } + if (!hasEmitted2MIRScope) { + UniqueFEIRScope feirScope = feFunction.PopTopScope(); + if (feirScope->GetVLASavedStackVar() != nullptr) { + auto stkRestoreStmt = feirScope->GenVLAStackRestoreStmt(); + stkRestoreStmt->SetSrcLoc(endLoc); + (void)stmts.emplace_back(std::move(stkRestoreStmt)); + } + hasEmitted2MIRScope = true; + } + AstLoopUtil::Instance().PopCurrentBreak(); + AstLoopUtil::Instance().PopCurrentContinue(); + return stmts; +} + +std::list ASTWhileStmt::Emit2FEStmtImpl() const { + std::list stmts; + std::string loopBodyEndLabelName = FEUtils::GetSequentialName("dowhile_body_end_"); + std::string loopEndLabelName = FEUtils::GetSequentialName("dowhile_end_"); + AstLoopUtil::Instance().PushBreak(loopEndLabelName); + AstLoopUtil::Instance().PushContinue(loopBodyEndLabelName); + auto labelBodyEndStmt = std::make_unique(loopBodyEndLabelName); + auto labelLoopEndStmt = std::make_unique(loopEndLabelName); + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + if (!hasEmitted2MIRScope) { + feFunction.PushStmtScope(true); + } + std::list bodyFEStmts = bodyStmt->Emit2FEStmt(); + std::list condStmts; + std::list condPreStmts; + UniqueFEIRExpr condFEExpr = condExpr->Emit2FEExpr(condStmts); + (void)condExpr->Emit2FEExpr(condPreStmts); + if (AstLoopUtil::Instance().IsCurrentContinueLabelUsed()) { + bodyFEStmts.emplace_back(std::move(labelBodyEndStmt)); + } + bodyFEStmts.splice(bodyFEStmts.end(), condPreStmts); + condFEExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(condFEExpr)); + auto whileStmt = std::make_unique(OP_while, std::move(condFEExpr), std::move(bodyFEStmts)); + whileStmt->SetSrcLoc(loc); + stmts.splice(stmts.end(), condStmts); + stmts.emplace_back(std::move(whileStmt)); + if (AstLoopUtil::Instance().IsCurrentBreakLabelUsed()) { + stmts.emplace_back(std::move(labelLoopEndStmt)); + } + AstLoopUtil::Instance().PopCurrentBreak(); + AstLoopUtil::Instance().PopCurrentContinue(); + if (!hasEmitted2MIRScope) { + (void)feFunction.PopTopScope(); + hasEmitted2MIRScope = true; + } + return stmts; +} + +std::list ASTDoStmt::Emit2FEStmtImpl() const { + std::list feStmts; + FEFunction &feFunc = FEManager::GetCurrentFEFunction(); + if (!hasEmitted2MIRScope) { + feFunc.PushStmtScope(true); + } + std::string loopBodyEndLabelName = FEUtils::GetSequentialName("dowhile_body_end_"); + std::string loopEndLabelName = FEUtils::GetSequentialName("dowhile_end_"); + AstLoopUtil::Instance().PushBreak(loopEndLabelName); + AstLoopUtil::Instance().PushContinue(loopBodyEndLabelName); + auto labelBodyEndStmt = std::make_unique(loopBodyEndLabelName); + auto labelLoopEndStmt = std::make_unique(loopEndLabelName); + std::list bodyFEStmts; + if (bodyStmt != nullptr) { + bodyFEStmts = bodyStmt->Emit2FEStmt(); + } + if (AstLoopUtil::Instance().IsCurrentContinueLabelUsed()) { + bodyFEStmts.emplace_back(std::move(labelBodyEndStmt)); + } + std::list condStmts; + UniqueFEIRExpr condFEExpr = condExpr->Emit2FEExpr(condStmts); + bodyFEStmts.splice(bodyFEStmts.end(), condStmts); + condFEExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(condFEExpr)); + UniqueFEIRStmt whileStmt = std::make_unique(OP_dowhile, std::move(condFEExpr), + std::move(bodyFEStmts)); + whileStmt->SetSrcLoc(loc); + (void)feStmts.emplace_back(std::move(whileStmt)); + if (AstLoopUtil::Instance().IsCurrentBreakLabelUsed()) { + (void)feStmts.emplace_back(std::move(labelLoopEndStmt)); + } + AstLoopUtil::Instance().PopCurrentBreak(); + AstLoopUtil::Instance().PopCurrentContinue(); + if (!hasEmitted2MIRScope) { + (void)feFunc.PopTopScope(); + hasEmitted2MIRScope = true; + } + return feStmts; +} + +std::list ASTBreakStmt::Emit2FEStmtImpl() const { + std::string breakName; + if (!AstLoopUtil::Instance().IsBreakLabelsEmpty()) { + breakName = AstLoopUtil::Instance().GetCurrentBreak(); + } + std::string vlaLabelName = FEIRBuilder::EmitVLACleanupStmts(FEManager::GetCurrentFEFunction(), breakName, loc); + if (!vlaLabelName.empty()) { + breakName = vlaLabelName; + } + std::list stmts; + auto stmt = std::make_unique(); + stmt->SetBreakLabelName(breakName); + stmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +std::list ASTLabelStmt::Emit2FEStmtImpl() const { + // collect scopes of label stmt + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + std::map scopes; + for (const auto &scope : feFunction.GetScopeStack()) { + scopes.insert(std::make_pair(scope->GetID(), scope->Clone())); + } + feFunction.SetLabelWithScopes(GetLabelName(), std::move(scopes)); + std::list stmts; + auto feStmt = std::make_unique(GetLabelName()); + feStmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(feStmt)); + stmts.splice(stmts.end(), subStmt->Emit2FEStmt()); + return stmts; +} + +std::list ASTContinueStmt::Emit2FEStmtImpl() const { + std::string continueName = AstLoopUtil::Instance().GetCurrentContinue(); + std::string vlaLabelName = FEIRBuilder::EmitVLACleanupStmts(FEManager::GetCurrentFEFunction(), continueName, loc); + if (!vlaLabelName.empty()) { + continueName = vlaLabelName; + } + std::list stmts; + auto stmt = std::make_unique(); + stmt->SetLabelName(continueName); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ---------- ASTUnaryOperatorStmt ---------- +std::list ASTUnaryOperatorStmt::Emit2FEStmtImpl() const { + std::list stmts; + std::list feExprs; + auto feExpr = exprs.front()->Emit2FEExpr(stmts); + if (feExpr != nullptr) { + if (stmts.size() == 2 && stmts.front()->IsDummy()) { + stmts.pop_front(); + return stmts; + } + feExprs.emplace_back(std::move(feExpr)); + auto stmt = std::make_unique(OP_eval, std::move(feExprs)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +// ---------- ASTGotoStmt ---------- +std::list ASTGotoStmt::Emit2FEStmtImpl() const { + std::list stmts; + auto stmt = std::make_unique(GetLabelName()); + // collect scopes with vla of goto stmt + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + std::vector> vlaSvaedStackVars; + for (const auto &scope : feFunction.GetScopeStack()) { + if (scope->GetVLASavedStackVar() != nullptr) { + stmt->AddVLASvaedStackVars(scope->GetID(), scope->GetVLASavedStackVar()->Clone()); + } + } + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ---------- ASTIndirectGotoStmt ---------- +std::list ASTIndirectGotoStmt::Emit2FEStmtImpl() const { + std::list stmts; + UniqueFEIRExpr targetExpr = exprs.front()->Emit2FEExpr(stmts); + stmts.emplace_back(FEIRBuilder::CreateStmtIGoto(std::move(targetExpr))); + return stmts; +} + +// ---------- ASTSwitchStmt ---------- +std::list ASTSwitchStmt::Emit2FEStmtImpl() const { + std::list stmts; + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + if (!hasEmitted2MIRScope) { + feFunction.PushStmtScope(true); + } + UniqueFEIRExpr expr = condExpr->Emit2FEExpr(stmts); + std::string exitName = AstSwitchUtil::Instance().CreateEndOrExitLabelName(); + AstLoopUtil::Instance().PushBreak(exitName); + std::string tmpName = FEUtils::GetSequentialName("switch_cond"); + UniqueFEIRVar tmpVar = FEIRBuilder::CreateVarNameForC(tmpName, *condType); + UniqueFEIRStmt condFEStmt = FEIRBuilder::CreateStmtDAssign(tmpVar->Clone(), std::move(expr)); + stmts.emplace_back(std::move(condFEStmt)); + auto dread = FEIRBuilder::CreateExprDRead(tmpVar->Clone()); + auto switchStmt = std::make_unique(std::move(dread), hasDefualt); + switchStmt->SetBreakLabelName(exitName); + for (auto &s : bodyStmt->Emit2FEStmt()) { + switchStmt.get()->AddFeirStmt(std::move(s)); + } + stmts.emplace_back(std::move(switchStmt)); + AstLoopUtil::Instance().PopCurrentBreak(); + if (!hasEmitted2MIRScope) { + (void)feFunction.PopTopScope(); + hasEmitted2MIRScope = true; + } + return stmts; +} + +// ---------- ASTCaseStmt ---------- +std::list ASTCaseStmt::Emit2FEStmtImpl() const { + std::list stmts; + auto caseStmt = std::make_unique(lCaseTag); + caseStmt.get()->AddCaseTag2CaseVec(lCaseTag, rCaseTag); + for (auto &s : subStmt->Emit2FEStmt()) { + caseStmt.get()->AddFeirStmt(std::move(s)); + } + stmts.emplace_back(std::move(caseStmt)); + return stmts; +} + +// ---------- ASTDefaultStmt ---------- +std::list ASTDefaultStmt::Emit2FEStmtImpl() const { + std::list stmts; + auto defaultStmt = std::make_unique(); + for (auto &s : child->Emit2FEStmt()) { + defaultStmt.get()->AddFeirStmt(std::move(s)); + } + stmts.emplace_back(std::move(defaultStmt)); + return stmts; +} + +// ---------- ASTNullStmt ---------- +std::list ASTNullStmt::Emit2FEStmtImpl() const { + // there is no need to process this stmt + std::list stmts; + return stmts; +} + +// ---------- ASTDeclStmt ---------- +std::list ASTDeclStmt::Emit2FEStmtImpl() const { + std::list stmts; + for (auto declInfo : subDeclInfos) { + if (std::holds_alternative(declInfo)) { + ASTExpr *vlaSizeExpr = std::get(declInfo); + if (vlaSizeExpr == nullptr) { + continue; + } + (void)vlaSizeExpr->Emit2FEExpr(stmts); + } else { + ASTDecl *decl = std::get(declInfo); + if (decl == nullptr) { + continue; + } + decl->GenerateInitStmt(stmts); + } + } + return stmts; +} + +// ---------- ASTCallExprStmt ---------- +std::list ASTCallExprStmt::Emit2FEStmtImpl() const { + std::list stmts; + ASTCallExpr *callExpr = static_cast(exprs.front()); + if (!callExpr->IsIcall()) { + bool isFinish = false; + (void)callExpr->ProcessBuiltinFunc(stmts, isFinish); + if (isFinish) { + return stmts; + } + } + std::unique_ptr callStmt = callExpr->GenCallStmt(); + callExpr->AddArgsExpr(callStmt, stmts); + if (callExpr->IsNeedRetExpr()) { + UniqueFEIRVar var = FEIRBuilder::CreateVarNameForC(varName, *callExpr->GetRetType(), false, false); + callStmt->SetVar(std::move(var)); + } + stmts.emplace_back(std::move(callStmt)); + return stmts; +} + +// ---------- ASTImplicitCastExprStmt ---------- +std::list ASTImplicitCastExprStmt::Emit2FEStmtImpl() const { + CHECK_FATAL(exprs.size() == 1, "Only one sub expr supported!"); + std::list stmts; + UniqueFEIRExpr feirExpr = exprs.front()->Emit2FEExpr(stmts); + if (feirExpr != nullptr) { + std::list feirExprs; + feirExprs.emplace_back(std::move(feirExpr)); + auto stmt = std::make_unique(OP_eval, std::move(feirExprs)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +// ---------- ASTParenExprStmt ---------- +std::list ASTParenExprStmt::Emit2FEStmtImpl() const { + std::list stmts; + exprs.front()->Emit2FEExpr(stmts); + return stmts; +} + +// ---------- ASTIntegerLiteralStmt ---------- +std::list ASTIntegerLiteralStmt::Emit2FEStmtImpl() const { + std::list stmts; + std::list feExprs; + auto feExpr = exprs.front()->Emit2FEExpr(stmts); + if (feExpr != nullptr) { + feExprs.emplace_back(std::move(feExpr)); + auto stmt = std::make_unique(OP_eval, std::move(feExprs)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +// ---------- ASTFloatingLiteralStmt ---------- +std::list ASTFloatingLiteralStmt::Emit2FEStmtImpl() const { + std::list stmts; + std::list feExprs; + auto feExpr = exprs.front()->Emit2FEExpr(stmts); + if (feExpr != nullptr) { + feExprs.emplace_back(std::move(feExpr)); + auto stmt = std::make_unique(OP_eval, std::move(feExprs)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +// ---------- ASTVAArgExprStmt ---------- +std::list ASTVAArgExprStmt::Emit2FEStmtImpl() const { + std::list stmts; + exprs.front()->Emit2FEExpr(stmts); + return stmts; +} + +// ---------- ASTConditionalOperatorStmt ---------- +std::list ASTConditionalOperatorStmt::Emit2FEStmtImpl() const { + std::list stmts; + std::list feExprs; + auto feExpr = exprs.front()->Emit2FEExpr(stmts); + if (feExpr != nullptr) { + feExprs.emplace_back(std::move(feExpr)); + auto stmt = std::make_unique(OP_eval, std::move(feExprs)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +// ---------- ASTCharacterLiteralStmt ---------- +std::list ASTCharacterLiteralStmt::Emit2FEStmtImpl() const { + std::list stmts; + std::list feExprs; + auto feExpr = exprs.front()->Emit2FEExpr(stmts); + if (feExpr != nullptr) { + feExprs.emplace_back(std::move(feExpr)); + auto stmt = std::make_unique(OP_eval, std::move(feExprs)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +// ---------- ASTStmtExprStmt ---------- +std::list ASTStmtExprStmt::Emit2FEStmtImpl() const { + return cpdStmt->Emit2FEStmt(); +} + +// ---------- ASTCStyleCastExprStmt ---------- +std::list ASTCStyleCastExprStmt::Emit2FEStmtImpl() const { + CHECK_FATAL(exprs.front() != nullptr, "child expr must not be nullptr!"); + std::list stmts; + std::list feExprs; + auto feExpr = exprs.front()->Emit2FEExpr(stmts); + if (feExpr != nullptr) { + feExprs.emplace_back(std::move(feExpr)); + auto stmt = std::make_unique(OP_eval, std::move(feExprs)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +// ---------- ASTCompoundAssignOperatorStmt ---------- +std::list ASTCompoundAssignOperatorStmt::Emit2FEStmtImpl() const { + CHECK_FATAL(exprs.size() == 1, "ASTCompoundAssignOperatorStmt must contain only one bo expr!"); + std::list stmts; + CHECK_FATAL(static_cast(exprs.front()) != nullptr, "Child expr must be ASTCompoundAssignOperator!"); + exprs.front()->Emit2FEExpr(stmts); + return stmts; +} + +std::list ASTBinaryOperatorStmt::Emit2FEStmtImpl() const { + CHECK_FATAL(exprs.size() == 1, "ASTBinaryOperatorStmt must contain only one bo expr!"); + std::list stmts; + auto boExpr = static_cast(exprs.front()); + if (boExpr->GetASTOp() == kASTOpBO) { + UniqueFEIRExpr boFEExpr = boExpr->Emit2FEExpr(stmts); + if (boFEExpr != nullptr) { + std::list exprs; + exprs.emplace_back(std::move(boFEExpr)); + auto stmt = std::make_unique(OP_eval, std::move(exprs)); + stmts.emplace_back(std::move(stmt)); + } + } else { + // has been processed by child expr emit, skip here + UniqueFEIRExpr boFEExpr = boExpr->Emit2FEExpr(stmts); + return stmts; + } + return stmts; +} + +// ---------- ASTAtomicExprStmt ---------- +std::list ASTAtomicExprStmt::Emit2FEStmtImpl() const { + std::list stmts; + auto astExpr = exprs.front(); + UniqueFEIRExpr feExpr = astExpr->Emit2FEExpr(stmts); + auto stmt = std::make_unique(std::move(feExpr)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ---------- ASTGCCAsmStmt ---------- +std::list ASTGCCAsmStmt::Emit2FEStmtImpl() const { + std::list stmts; + std::vector outputsExprs; + std::vector inputsExprs; + std::unique_ptr stmt = std::make_unique(GetAsmStr(), isGoto, isVolatile); + std::vector> outputsVec(outputs.begin(), outputs.end()); + stmt->SetOutputs(outputsVec); + for (uint32 i = 0; i < outputs.size(); ++i) { + outputsExprs.emplace_back(exprs[i]->Emit2FEExpr(stmts)); + } + stmt->SetOutputsExpr(outputsExprs); + std::vector> inputsVec(inputs.begin(), inputs.end()); + stmt->SetInputs(inputsVec); + for (uint32 i = 0; i < inputs.size(); ++i) { + UniqueFEIRExpr expr; + if (inputs[i].second == "m") { + std::unique_ptr addrOf = std::make_unique(); + addrOf->SetUOExpr(exprs[i + outputs.size()]); + expr = addrOf->Emit2FEExpr(stmts); + } else { + expr = exprs[i + outputs.size()]->Emit2FEExpr(stmts); + } + inputsExprs.emplace_back(std::move(expr)); + } + stmt->SetInputsExpr(inputsExprs); + std::vector clobbersVec(clobbers.begin(), clobbers.end()); + stmt->SetClobbers(clobbersVec); + std::vector labelsVec(labels.begin(), labels.end()); + stmt->SetLabels(labelsVec); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +std::list ASTOffsetOfStmt::Emit2FEStmtImpl() const { + CHECK_FATAL(exprs.front() != nullptr, "child expr must not be nullptr!"); + std::list stmts; + std::list feExprs; + auto feExpr = exprs.front()->Emit2FEExpr(stmts); + if (feExpr != nullptr) { + feExprs.emplace_back(std::move(feExpr)); + auto stmt = std::make_unique(OP_eval, std::move(feExprs)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +std::list ASTGenericSelectionExprStmt::Emit2FEStmtImpl() const { + CHECK_FATAL(exprs.front() != nullptr, "child expr must not be nullptr!"); + std::list stmts; + std::list feExprs; + auto feExpr = exprs.front()->Emit2FEExpr(stmts); + if (feExpr != nullptr) { + feExprs.emplace_back(std::move(feExpr)); + auto stmt = std::make_unique(OP_eval, std::move(feExprs)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +std::list ASTDeclRefExprStmt::Emit2FEStmtImpl() const { + std::list stmts; + for (auto expr : exprs) { + (void)expr->Emit2FEExpr(stmts); + } + return stmts; +} + +std::list ASTUnaryExprOrTypeTraitExprStmt::Emit2FEStmtImpl() const { + std::list stmts; + for (auto expr : exprs) { + (void)expr->Emit2FEExpr(stmts); + } + return stmts; +} + +std::list ASTUOAddrOfLabelExprStmt::Emit2FEStmtImpl() const { + CHECK_FATAL(exprs.size() == 1, "ASTUOAddrOfLabelExprStmt must contain only one expr!"); + CHECK_FATAL(exprs.front() != nullptr, "child expr must not be nullptr!"); + std::list stmts; + std::list feExprs; + UniqueFEIRExpr feExpr = exprs.front()->Emit2FEExpr(stmts); + if (feExpr != nullptr) { + feExprs.emplace_back(std::move(feExpr)); + UniqueFEIRStmt stmt = std::make_unique(OP_eval, std::move(feExprs)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp b/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2e73ac1c498d855f029073881077bf0769364f9a --- /dev/null +++ b/src/hir2mpl/ast_input/clang/src/ast_struct2fe_helper.cpp @@ -0,0 +1,392 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_struct2fe_helper.h" +#include "fe_manager.h" +#include "feir_builder.h" +#include "fe_utils_ast.h" +#include "ast_util.h" +#include "ast_decl_builder.h" +#include "enhance_c_checker.h" + +namespace maple { +// ---------- ASTStruct2FEHelper ---------- +bool ASTStruct2FEHelper::ProcessDeclImpl() { + if (isSkipped) { + astStruct.ClearGenericAttrsContentMap(); + return true; + } + if (mirStructType == nullptr) { + astStruct.ClearGenericAttrsContentMap(); + return false; + } + mirStructType->SetTypeAttrs(GetStructAttributeFromInput()); + // Process Fields + InitFieldHelpers(); + ProcessFieldDef(); + // Process Methods + InitMethodHelpers(); + ProcessMethodDef(); + astStruct.ClearGenericAttrsContentMap(); + return true; +} + +void ASTStruct2FEHelper::InitFieldHelpersImpl() { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mem pool is nullptr"); + for (ASTField *field : astStruct.GetFields()) { + ASSERT(field != nullptr, "field is nullptr"); + ASTStructField2FEHelper *fieldHelper = mp->New( + allocator, *field, *astStruct.GetTypeDesc().front()); + fieldHelpers.push_back(fieldHelper); + } +} + +void ASTStruct2FEHelper::InitMethodHelpersImpl() { +} + +TypeAttrs ASTStruct2FEHelper::GetStructAttributeFromInputImpl() const { + GenericAttrs attrs = astStruct.GetGenericAttrs(); + return attrs.ConvertToTypeAttrs(); +} + +ASTStruct2FEHelper::ASTStruct2FEHelper(MapleAllocator &allocator, ASTStruct &structIn) + : FEInputStructHelper(allocator), astStruct(structIn) { + srcLang = kSrcLangC; +} + +std::string ASTStruct2FEHelper::GetStructNameOrinImpl() const { + return astStruct.GetStructName(false); +} + +std::string ASTStruct2FEHelper::GetStructNameMplImpl() const { + return astStruct.GetStructName(true); +} + +std::list ASTStruct2FEHelper::GetSuperClassNamesImpl() const { + CHECK_FATAL(false, "NIY"); + return std::list {}; +} + +std::vector ASTStruct2FEHelper::GetInterfaceNamesImpl() const { + CHECK_FATAL(false, "NIY"); + return std::vector {}; +} + +std::string ASTStruct2FEHelper::GetSourceFileNameImpl() const { + return astStruct.GetSrcFileName(); +} + +std::string ASTStruct2FEHelper::GetSrcFileNameImpl() const { + return astStruct.GetSrcFileName(); +} + +MIRStructType *ASTStruct2FEHelper::CreateMIRStructTypeImpl(bool &error) const { + std::string name = GetStructNameOrinImpl(); + if (name.empty()) { + error = true; + ERR(kLncErr, "class name is empty"); + return nullptr; + } + MIRStructType *type = FEManager::GetTypeManager().GetOrCreateStructType(astStruct.GenerateUniqueVarName()); + error = false; + if (astStruct.IsUnion()) { + type->SetMIRTypeKind(kTypeUnion); + } else { + type->SetMIRTypeKind(kTypeStruct); + } + if (FEOptions::GetInstance().IsDbgFriendly() && type->GetAlias() == nullptr) { + MIRAlias *alias = allocator.GetMemPool()->New(&FEManager::GetModule()); + type->SetAlias(alias); + } + return type; +} + +uint64 ASTStruct2FEHelper::GetRawAccessFlagsImpl() const { + CHECK_FATAL(false, "NIY"); + return 0; +} + +GStrIdx ASTStruct2FEHelper::GetIRSrcFileSigIdxImpl() const { + // Not implemented, just return a invalid value + return GStrIdx(0); +} + +bool ASTStruct2FEHelper::IsMultiDefImpl() const { + // Not implemented, alway return false + return false; +} + +// ---------- ASTGlobalVar2FEHelper --------- +bool ASTStructField2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { + (void)allocator; + CHECK_FATAL(false, "should not run here"); + return false; +} + +bool ASTStructField2FEHelper::ProcessDeclWithContainerImpl(MapleAllocator &allocator) { + (void)allocator; + std::string fieldName = field.GetName(); + GStrIdx idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fieldName); + FieldAttrs attrs = field.GetGenericAttrs().ConvertToFieldAttrs(); + attrs.SetAlign(field.GetAlign()); + MIRType *fieldType = field.GetTypeDesc().front(); + ASSERT(fieldType != nullptr, "nullptr check for fieldType"); + ENCChecker::InsertBoundaryInAtts(attrs, field.GetBoundaryInfo()); + if (attrs.GetAttr(FLDATTR_nonnull) || + FEManager::GetTypeManager().IsOwnedNonnullFieldStructSet(fieldType->GetTypeIndex())) { // nested struct + FEManager::GetTypeManager().InsertOwnedNonnullFieldStructSet(structType.GetTypeIndex()); + } + mirFieldPair.first = idx; + mirFieldPair.second.first = fieldType->GetTypeIndex(); + mirFieldPair.second.second = attrs; + if (FEOptions::GetInstance().IsDbgFriendly()) { + MIRAlias *mirAlias = static_cast(structType).GetAlias(); + CHECK_NULL_FATAL(mirAlias); + TypeAttrs typeAttrs = field.GetGenericAttrs().ConvertToTypeAttrs(); + MIRAliasVars aliasVar = FEUtils::AddAlias(idx, field.GetSourceType(), typeAttrs); + mirAlias->SetAliasVarMap(idx, aliasVar); + } + field.ClearGenericAttrsContentMap(); + return true; +} + +// ---------- ASTGlobalVar2FEHelper --------- +bool ASTGlobalVar2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { + (void)allocator; + const std::string varName = astVar.GetName(); + MIRType *type = astVar.GetTypeDesc().front(); + TypeAttrs typeAttrs = astVar.GetGenericAttrs().ConvertToTypeAttrs(); + ENCChecker::InsertBoundaryInAtts(typeAttrs, astVar.GetBoundaryInfo()); + MIRSymbol *mirSymbol = FEManager::GetMIRBuilder().GetGlobalDecl(varName); + if (mirSymbol != nullptr) { + // do not allow extern var override global var + if (mirSymbol->GetStorageClass() != MIRStorageClass::kScExtern && typeAttrs.GetAttr(ATTR_extern)) { + typeAttrs.ResetAttr(ATTR_extern); + } else if (mirSymbol->GetStorageClass() == MIRStorageClass::kScExtern && !typeAttrs.GetAttr(ATTR_extern)) { + mirSymbol->SetStorageClass(MIRStorageClass::kScGlobal); + } + } else { + mirSymbol = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl(varName, *type); + } + if (mirSymbol == nullptr) { + return false; + } + // Set the type here in case a previous declaration had an incomplete + // array type and the definition has the complete type. + if (mirSymbol->GetType()->GetTypeIndex() != type->GetTypeIndex()) { + mirSymbol->SetTyIdx(type->GetTypeIndex()); + } + if (mirSymbol->GetSrcPosition().LineNum() == 0) { + mirSymbol->SetSrcPosition(FEUtils::CvtLoc2SrcPosition(astVar.GetSrcLoc())); + } + if (typeAttrs.GetAttr(ATTR_extern)) { + mirSymbol->SetStorageClass(MIRStorageClass::kScExtern); + typeAttrs.ResetAttr(AttrKind::ATTR_extern); + } else if (typeAttrs.GetAttr(ATTR_static)) { + mirSymbol->SetStorageClass(MIRStorageClass::kScFstatic); + } else { + mirSymbol->SetStorageClass(MIRStorageClass::kScGlobal); + } + typeAttrs.SetAlign(astVar.GetAlign()); + mirSymbol->AddAttrs(typeAttrs); + if (!astVar.GetSectionAttr().empty()) { + mirSymbol->sectionAttr = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(astVar.GetSectionAttr()); + } + if (!astVar.GetAsmAttr().empty()) { + mirSymbol->SetAsmAttr(GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(astVar.GetAsmAttr())); + } + if (FEOptions::GetInstance().IsDbgFriendly()) { + MIRScope *scope = FEManager::GetModule().GetScope(); + FEUtils::AddAliasInMIRScope(*scope, varName, *mirSymbol, astVar.GetSourceType()); + } + const ASTExpr *initExpr = astVar.GetInitExpr(); + MIRConst *cst = nullptr; + if (initExpr != nullptr) { + cst = initExpr->GenerateMIRConst(); + mirSymbol->SetKonst(cst); + } + ENCChecker::CheckNonnullGlobalVarInit(*mirSymbol, cst); + return true; +} + +// ---------- ASTFileScopeAsm2FEHelper --------- +bool ASTFileScopeAsm2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { + MapleString asmDecl(astAsm.GetAsmStr().c_str(), allocator.GetMemPool()); + FEManager::GetModule().GetAsmDecls().emplace_back(asmDecl); + return true; +} + +// ---------- ASTEnum2FEHelper --------- +bool ASTEnum2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { + (void)allocator; + MIREnum *enumType = FEManager::GetTypeManager().GetOrCreateEnum( + astEnum.GenerateUniqueVarName(), astEnum.GetTypeDesc().front()->GetPrimType()); + if (!astEnum.GetEnumConstants().empty() && enumType->GetElements().empty()) { + for (auto elem : astEnum.GetEnumConstants()) { + GStrIdx elemNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(elem->GetName()); + enumType->NewElement(elemNameIdx, elem->GetValue()); + } + enumType->SetPrimType(astEnum.GetEnumConstants().front()->GetTypeDesc().front()->GetPrimType()); + } + return true; +} + +// ---------- ASTFunc2FEHelper ---------- +bool ASTFunc2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { + HIR2MPL_PARALLEL_FORBIDDEN(); + ASSERT(srcLang != kSrcLangUnknown, "src lang not set"); + std::string methodShortName = GetMethodName(false, false); + CHECK_FATAL(!methodShortName.empty(), "error: method name is empty"); + if (methodShortName.compare("main") == 0) { + FEManager::GetMIRBuilder().GetMirModule().SetEntryFuncName(methodShortName); + } + methodNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(methodShortName); + if (!ASTUtil::InsertFuncSet(methodNameIdx)) { + func.ClearGenericAttrsContentMap(); + return true; + } + SolveReturnAndArgTypes(allocator); + bool isStatic = IsStatic(); + bool isVarg = IsVarg(); + CHECK_FATAL(retMIRType != nullptr, "function must have return type"); + std::vector argsTypeIdx; + for (auto *type : argMIRTypes) { + argsTypeIdx.push_back(type->GetTypeIndex()); + } + mirFunc = FEManager::GetTypeManager().CreateFunction(methodNameIdx, retMIRType->GetTypeIndex(), + argsTypeIdx, isVarg, isStatic); + mirFunc->SetSrcPosition(FEUtils::CvtLoc2SrcPosition(func.GetSrcLoc())); + MIRSymbol *funSym = mirFunc->GetFuncSymbol(); + ASSERT_NOT_NULL(funSym); + if (!func.GetSectionAttr().empty()) { + funSym->sectionAttr = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(func.GetSectionAttr()); + } + if (func.GetWeakrefAttr().first) { + std::string attrStr = func.GetWeakrefAttr().second; + UStrIdx idx { 0 }; + if (!attrStr.empty()) { + idx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(attrStr); + } + funSym->SetWeakrefAttr(std::pair { true, idx }); + } + SolveFunctionArguments(); + SolveFunctionAttributes(); + func.ClearGenericAttrsContentMap(); + return true; +} + +void ASTFunc2FEHelper::SolveFunctionArguments() const { + MapleVector paramDecls = func.GetParamDecls(); + if (firstArgRet) { + ASTDecl *returnParamVar = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("", allocator.GetMemPool()), + "first_arg_return", MapleVector({}, allocator.Adapter()), GenericAttrs()); + returnParamVar->SetIsParam(true); + (void)paramDecls.insert(paramDecls.cbegin(), returnParamVar); + } + for (uint32 i = 0; i < paramDecls.size(); ++i) { + MIRSymbol *sym = FEManager::GetMIRBuilder().GetOrCreateDeclInFunc( + paramDecls[i]->GetName(), *argMIRTypes[i], *mirFunc); + ASSERT_NOT_NULL(sym); + sym->SetStorageClass(kScFormal); + sym->SetSKind(kStVar); + TypeAttrs typeAttrs = paramDecls[i]->GetGenericAttrs().ConvertToTypeAttrs(); + ENCChecker::InsertBoundaryInAtts(typeAttrs, paramDecls[i]->GetBoundaryInfo()); + sym->AddAttrs(typeAttrs); + mirFunc->AddArgument(sym); + if (FEOptions::GetInstance().IsDbgFriendly() && paramDecls[i]->GetDeclKind() == kASTVar && + (!firstArgRet || i != 0)) { + FEUtils::AddAliasInMIRScope(*mirFunc->GetScope(), paramDecls[i]->GetName(), *sym, + static_cast(paramDecls[i])->GetSourceType()); + } + } +} + +void ASTFunc2FEHelper::SolveFunctionAttributes() { + FuncAttrs attrs = GetAttrs(); + if (firstArgRet) { + attrs.SetAttr(FUNCATTR_firstarg_return); + } + mirMethodPair.first = mirFunc->GetStIdx(); + mirMethodPair.second.first = mirFunc->GetMIRFuncType()->GetTypeIndex(); + ENCChecker::InsertBoundaryInAtts(attrs, func.GetBoundaryInfo()); + mirMethodPair.second.second = attrs; + mirFunc->SetFuncAttrs(attrs); + if (firstArgRet) { + mirFunc->GetMIRFuncType()->funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } +} + +const std::string ASTFunc2FEHelper::GetSrcFileName() const { + return func.GetSrcFileName(); +} + +void ASTFunc2FEHelper::SolveReturnAndArgTypesImpl(MapleAllocator &allocator) { + (void)allocator; + const MapleVector &returnAndArgTypeNames = func.GetTypeDesc(); + retMIRType = returnAndArgTypeNames[1]; + // skip funcType and returnType + (void)argMIRTypes.insert(argMIRTypes.cbegin(), returnAndArgTypeNames.cbegin() + 2, returnAndArgTypeNames.cend()); + if (retMIRType->GetPrimType() == PTY_agg && retMIRType->GetSize() > 16) { + firstArgRet = true; + MIRType *retPointerType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*retMIRType); + (void)argMIRTypes.insert(argMIRTypes.cbegin(), retPointerType); + retMIRType = GlobalTables::GetTypeTable().GetPrimType(PTY_void); + } +} + +std::string ASTFunc2FEHelper::GetMethodNameImpl(bool inMpl, bool full) const { + std::string funcName = func.GetName(); + if (!full) { + return inMpl ? namemangler::EncodeName(funcName) : funcName; + } + // fullName not implemented yet + return funcName; +} + +bool ASTFunc2FEHelper::IsVargImpl() const { + return false; +} + +bool ASTFunc2FEHelper::HasThisImpl() const { + CHECK_FATAL(false, "NIY"); + return false; +} + +MIRType *ASTFunc2FEHelper::GetTypeForThisImpl() const { + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +FuncAttrs ASTFunc2FEHelper::GetAttrsImpl() const { + return func.GetGenericAttrs().ConvertToFuncAttrs(); +} + +bool ASTFunc2FEHelper::IsStaticImpl() const { + return false; +} + +bool ASTFunc2FEHelper::IsVirtualImpl() const { + CHECK_FATAL(false, "NIY"); + return false; +} +bool ASTFunc2FEHelper::IsNativeImpl() const { + CHECK_FATAL(false, "NIY"); + return false; +} + +bool ASTFunc2FEHelper::HasCodeImpl() const { + return func.HasCode(); +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/common/include/ast_compiler_component-inl.h b/src/hir2mpl/ast_input/common/include/ast_compiler_component-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..019bc25d1730b078593fc5a7cb86869ad937f199 --- /dev/null +++ b/src/hir2mpl/ast_input/common/include/ast_compiler_component-inl.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_compiler_component.h" +#include "ast_struct2fe_helper.h" +#include "ast_function.h" +#include "fe_timer.h" +#include "fe_manager.h" + +namespace maple { +template +ASTCompilerComponent::ASTCompilerComponent(MIRModule &module) + : HIR2MPLCompilerComponent(module, kSrcLangC), + mp(FEUtils::NewMempool("MemPool for ASTCompilerComponent", false /* isLcalPool */)), + allocator(mp), + astInput(module, allocator) {} + +template +ASTCompilerComponent::~ASTCompilerComponent() { + mp = nullptr; +} + +template +bool ASTCompilerComponent::ParseInputImpl() { + FETimer timer; + bool success = true; + timer.StartAndDump("ASTCompilerComponent::ParseInput()"); + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process ASTCompilerComponent::ParseInput() ====="); + std::vector inputNames; + if (typeid(T) == typeid(ASTParser)) { + inputNames = FEOptions::GetInstance().GetInputASTFiles(); +#ifdef ENABLE_MAST + } else if (typeid(T) == typeid(MapleASTParser)) { + inputNames = FEOptions::GetInstance().GetInputMASTFiles(); +#endif + } + success = success && astInput.ReadASTFiles(allocator, inputNames); + CHECK_FATAL(success, "ASTCompilerComponent::ParseInput failed. Exit."); + ParseInputStructs(); + ParseInputFuncs(); + + for (auto &astVar : astInput.GetASTVars()) { + FEInputGlobalVarHelper *varHelper = allocator.GetMemPool()->New(allocator, *astVar); + globalVarHelpers.emplace_back(varHelper); + } + + for (auto &astFileScopeAsm : astInput.GetASTFileScopeAsms()) { + FEInputFileScopeAsmHelper *asmHelper = allocator.GetMemPool()->New( + allocator, *astFileScopeAsm); + globalFileScopeAsmHelpers.emplace_back(asmHelper); + } + + for (auto &astEnum : astInput.GetASTEnums()) { + ASTEnum2FEHelper *enumHelper = allocator.GetMemPool()->New(allocator, *astEnum); + (void)enumHelpers.emplace_back(enumHelper); + } + timer.StopAndDumpTimeMS("ASTCompilerComponent::ParseInput()"); + return success; +} + +template +void ASTCompilerComponent::ParseInputStructs() { + for (auto &astStruct : astInput.GetASTStructs()) { + std::string structName = astStruct->GenerateUniqueVarName(); + // skip same name structs + if (structNameSet.find(structName) != structNameSet.cend()) { + continue; + } + FEInputStructHelper *structHelper = allocator.GetMemPool()->New(allocator, *astStruct); + structHelpers.emplace_back(structHelper); + structNameSet.insert(structName); + } +} + +template +void ASTCompilerComponent::ParseInputFuncs() { + for (auto &astFunc : astInput.GetASTFuncs()) { + auto it = funcNameMap.find(astFunc->GetName()); + if (it != funcNameMap.cend()) { + // save the function with funcbody + if (it->second->HasCode() && !astFunc->HasCode()) { + continue; + } else { + (void)funcNameMap.erase(it); + auto itHelper = std::find_if(std::begin(globalFuncHelpers), std::end(globalFuncHelpers), + [&astFunc](FEInputMethodHelper *s) -> bool { + return (s->GetMethodName(false) == astFunc->GetName()); + }); + CHECK_FATAL(itHelper != globalFuncHelpers.end(), "astFunc not found"); + (void)globalFuncHelpers.erase(itHelper); + } + } + FEInputMethodHelper *funcHelper = allocator.GetMemPool()->New(allocator, *astFunc); + globalFuncHelpers.emplace_back(funcHelper); + funcNameMap.insert(std::make_pair(astFunc->GetName(), funcHelper)); + } +} + +template +bool ASTCompilerComponent::PreProcessDeclImpl() { + FETimer timer; + timer.StartAndDump("ASTCompilerComponent::PreProcessDecl()"); + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process ASTCompilerComponent::PreProcessDecl() ====="); + bool success = true; + for (FEInputStructHelper *helper : structHelpers) { + ASSERT_NOT_NULL(helper); + success = helper->PreProcessDecl() ? success : false; + } + timer.StopAndDumpTimeMS("ASTCompilerComponent::PreProcessDecl()"); + return success; +} + +template +std::unique_ptr ASTCompilerComponent::CreatFEFunctionImpl(FEInputMethodHelper *methodHelper) { + ASTFunc2FEHelper *astFuncHelper = static_cast(methodHelper); + GStrIdx methodNameIdx = methodHelper->GetMethodNameIdx(); + bool isStatic = methodHelper->IsStatic(); + MIRFunction *mirFunc = FEManager::GetTypeManager().GetMIRFunction(methodNameIdx, isStatic); + CHECK_NULL_FATAL(mirFunc); + module.AddFunction(mirFunc); + std::unique_ptr feFunction = std::make_unique(*astFuncHelper, *mirFunc, phaseResultTotal); + feFunction->Init(); + return feFunction; +} + +template +bool ASTCompilerComponent::ProcessFunctionSerialImpl() { + std::stringstream ss; + ss << GetComponentName() << "::ProcessFunctionSerial()"; + FETimer timer; + timer.StartAndDump(ss.str()); + bool success = true; + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process %s =====", ss.str().c_str()); + for (FEInputMethodHelper *methodHelper : globalFuncHelpers) { + ASSERT_NOT_NULL(methodHelper); + if (methodHelper->HasCode()) { + ASTFunc2FEHelper *astFuncHelper = static_cast(methodHelper); + std::unique_ptr feFunction = CreatFEFunction(methodHelper); + feFunction->SetSrcFileName(astFuncHelper->GetSrcFileName()); + bool processResult = feFunction->Process(); + if (!processResult) { + (void)compileFailedFEFunctions.insert(feFunction.get()); + } + success = success && processResult; + feFunction->Finish(); + } + funcSize++; + } + timer.StopAndDumpTimeMS(ss.str()); + return success; +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/common/include/ast_compiler_component.h b/src/hir2mpl/ast_input/common/include/ast_compiler_component.h new file mode 100644 index 0000000000000000000000000000000000000000..be00e27ab356d0c60bd1e70dfa29084da80d24b6 --- /dev/null +++ b/src/hir2mpl/ast_input/common/include/ast_compiler_component.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_INPUT_INCLUDE_AST_COMPILER_COMPONENT_H +#define HIR2MPL_AST_INPUT_INCLUDE_AST_COMPILER_COMPONENT_H +#include "fe_macros.h" +#include "hir2mpl_compiler_component.h" +#include "ast_input.h" +#include "ast_input-inl.h" + +namespace maple { +template +class ASTCompilerComponent : public HIR2MPLCompilerComponent { + public: + explicit ASTCompilerComponent(MIRModule &module); + ~ASTCompilerComponent() override; + + protected: + bool ParseInputImpl() override; + bool PreProcessDeclImpl() override; + std::unique_ptr CreatFEFunctionImpl(FEInputMethodHelper *methodHelper) override; + bool ProcessFunctionSerialImpl() override; + std::string GetComponentNameImpl() const override { + return "ASTCompilerComponent"; + } + bool ParallelableImpl() const override { + return true; + } + void DumpPhaseTimeTotalImpl() const override { + INFO(kLncInfo, "[PhaseTime] ASTCompilerComponent"); + HIR2MPLCompilerComponent::DumpPhaseTimeTotalImpl(); + } + void ReleaseMemPoolImpl() override { + FEUtils::DeleteMempoolPtr(mp); + } + + private: + MemPool *mp; + MapleAllocator allocator; + ASTInput astInput; + std::unordered_set structNameSet; + std::unordered_map funcNameMap; + + void ParseInputStructs(); + void ParseInputFuncs(); +}; // class ASTCompilerComponent +} // namespace maple +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_COMPILER_COMPONENT_H diff --git a/src/hir2mpl/ast_input/common/include/ast_decl.h b/src/hir2mpl/ast_input/common/include/ast_decl.h new file mode 100644 index 0000000000000000000000000000000000000000..1e7069d22a05343ec03eee23bce5846ab60b2116 --- /dev/null +++ b/src/hir2mpl/ast_input/common/include/ast_decl.h @@ -0,0 +1,441 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_INPUT_INCLUDE_AST_DECL_H +#define HIR2MPL_AST_INPUT_INCLUDE_AST_DECL_H +#include +#include +#include +#include "types_def.h" +#include "ast_stmt.h" +#include "feir_var.h" +#include "fe_function.h" +#include "generic_attrs.h" + +namespace maple { +enum DeclKind { + kUnknownDecl = 0, + kASTDecl, + kASTField, + kASTFunc, + kASTStruct, + kASTVar, + kASTEnumConstant, + kASTEnumDecl, + kASTFileScopeAsm, + kASTTypedefDecl, +}; + +struct BoundaryInfo { + ASTExpr *lenExpr = nullptr; + int8 lenParamIdx = -1; // -1 means not on the parameter + bool isBytedLen = false; +}; + +class ASTDecl { + public: + ASTDecl(const MapleString &srcFile, const MapleString &nameIn, const MapleVector &typeDescIn) + : isGlobalDecl(false), srcFileName(srcFile), name(nameIn), typeDesc(typeDescIn) {} + virtual ~ASTDecl() = default; + const std::string GetSrcFileName() const; + const std::string GetName() const; + const MapleVector &GetTypeDesc() const; + void SetTypeDesc(const MapleVector &typeVecIn); + GenericAttrs GetGenericAttrs() const { + return genAttrs; + } + + void SetGlobal(bool isGlobal) { + isGlobalDecl = isGlobal; + } + + bool IsGlobal() const { + return isGlobalDecl; + } + + void SetIsParam(bool flag) { + isParam = flag; + } + + bool IsParam() const { + return isParam; + } + + void SetIsMacro(bool flag) { + if (flag) { + isMacroID = FEUtils::GetSequentialNumber(); + } else { + isMacroID = static_cast(flag); + } + } + + uint32 IsMacroID() const { + return isMacroID; + } + + void SetAlign(uint32 n) { + if (n > align) { + align = n; + } + } + + uint32 GetAlign() const { + return align; + } + + void SetAttr(GenericAttrKind attrKind) { + genAttrs.SetAttr(attrKind); + } + + void SetSectionAttr(const std::string &str) { + sectionAttr = str; + } + + const std::string &GetSectionAttr() const { + return sectionAttr; + } + + void GenerateInitStmt(std::list &stmts) { + return GenerateInitStmtImpl(stmts); + } + + void SetSrcLoc(const Loc &l) { + loc = l; + } + + const Loc &GetSrcLoc() const { + return loc; + } + + uint32 GetSrcFileIdx() const { + return loc.fileIdx; + } + + uint32 GetSrcFileLineNum() const { + return loc.line; + } + + uint32 GetSrcFileColumn() const { + return loc.column; + } + + DeclKind GetDeclKind() const { + return declKind; + } + + MIRConst *Translate2MIRConst() const; + + std::string GenerateUniqueVarName() const; + + void SetBoundaryLenExpr(ASTExpr *expr) { + boundary.lenExpr = expr; + } + + const BoundaryInfo &GetBoundaryInfo() const { + return boundary; + } + + ASTExpr *GetBoundaryLenExpr() const { + return boundary.lenExpr; + } + + void SetBoundaryLenParamIdx(int8 idx) { + boundary.lenParamIdx = idx; + } + + int8 GetBoundaryLenParamIdx() const { + return boundary.lenParamIdx; + } + + void SetIsBytedLen(bool flag) { + boundary.isBytedLen = flag; + } + + bool IsBytedLen() const { + return boundary.isBytedLen; + } + + void ClearGenericAttrsContentMap() { + genAttrs.ClearContentMap(); + } + + const MIRType *GetSourceType() const { + return sourceType; + } + + void SetSourceType(MIRType *type) { + sourceType = type; + } + + protected: + virtual MIRConst *Translate2MIRConstImpl() const { + CHECK_FATAL(false, "Maybe implemented for other ASTDecls"); + return nullptr; + } + virtual void GenerateInitStmtImpl(std::list &stmts) {} + bool isGlobalDecl; + bool isParam = false; + uint32 align = 1; // in byte + const MapleString srcFileName; + + MapleString name; + MapleVector typeDesc; + GenericAttrs genAttrs; + Loc loc = { 0, 0, 0 }; + uint32 isMacroID = false; + DeclKind declKind = kASTDecl; + BoundaryInfo boundary; + std::string sectionAttr; + MIRType *sourceType = nullptr; +}; + +class ASTField : public ASTDecl { + public: + ASTField(const MapleString &srcFile, const MapleString &nameIn, const MapleVector &typeDescIn, + const GenericAttrs &genAttrsIn, bool isAnonymous = false) + : ASTDecl(srcFile, nameIn, typeDescIn), isAnonymousField(isAnonymous) { + genAttrs = genAttrsIn; + declKind = kASTField; + } + ~ASTField() = default; + bool IsAnonymousField() const { + return isAnonymousField; + } + + private: + bool isAnonymousField = false; +}; + +class ASTFunc : public ASTDecl { + public: + ASTFunc(const MapleString &srcFile, const MapleString &nameIn, const MapleVector &typeDescIn, + const GenericAttrs &genAttrsIn, const MapleVector ¶mDeclsIn) + : ASTDecl(srcFile, nameIn, typeDescIn), compound(nullptr), paramDecls(paramDeclsIn) { + genAttrs = genAttrsIn; + declKind = kASTFunc; + } + ~ASTFunc() override { + compound = nullptr; + } + void SetCompoundStmt(ASTStmt *astCompoundStmt); + void InsertStmtsIntoCompoundStmtAtFront(const std::list &stmts) const; + const ASTStmt *GetCompoundStmt() const; + const MapleVector &GetParamDecls() const { + return paramDecls; + } + std::vector> GenArgVarList() const; + std::list EmitASTStmtToFEIR() const; + std::list InitArgsBoundaryVar(MIRFunction &mirFunc) const; + void InsertBoundaryCheckingInRet(std::list &stmts) const; + + void SetWeakrefAttr(const std::pair &attr) { + weakrefAttr = attr; + } + + const std::pair &GetWeakrefAttr() const { + return weakrefAttr; + } + + bool HasCode() const { + if (compound == nullptr) { + return false; + } + return true; + } + + private: + // typeDesc format: [funcType, retType, arg0, arg1 ... argN] + ASTStmt *compound = nullptr; // func body + MapleVector paramDecls; + std::pair weakrefAttr; +}; + +class ASTStruct : public ASTDecl { + public: + ASTStruct(MapleAllocator &allocatorIn, const MapleString &srcFile, const MapleString &nameIn, + const MapleVector &typeDescIn, const GenericAttrs &genAttrsIn) + : ASTDecl(srcFile, nameIn, typeDescIn), + isUnion(false), fields(allocatorIn.Adapter()), methods(allocatorIn.Adapter()) { + genAttrs = genAttrsIn; + declKind = kASTStruct; + } + ~ASTStruct() = default; + + std::string GetStructName(bool mapled) const; + + void SetField(ASTField *f) { + fields.emplace_back(f); + } + + const MapleList &GetFields() const { + return fields; + } + + void SetIsUnion() { + isUnion = true; + } + + bool IsUnion() const { + return isUnion; + } + + private: + void GenerateInitStmtImpl(std::list &stmts) override; + + bool isUnion = false; + MapleList fields; + MapleList methods; +}; + +class ASTVar : public ASTDecl { + public: + ASTVar(const MapleString &srcFile, const MapleString &nameIn, const MapleVector &typeDescIn, + const GenericAttrs &genAttrsIn) + : ASTDecl(srcFile, nameIn, typeDescIn) { + genAttrs = genAttrsIn; + declKind = kASTVar; + } + virtual ~ASTVar() = default; + + void SetInitExpr(ASTExpr *init) { + initExpr = init; + } + + const ASTExpr *GetInitExpr() const { + return initExpr; + } + + void SetAsmAttr(const std::string &str) { + asmAttr = str; + } + + const std::string &GetAsmAttr() const { + return asmAttr; + } + + void SetVariableArrayExpr(ASTExpr *expr) { + variableArrayExpr = expr; + } + + void SetPromotedType(PrimType primType) { + promotedType = primType; + } + + PrimType GetPromotedType() const { + return promotedType; + } + + std::unique_ptr Translate2FEIRVar() const; + MIRSymbol *Translate2MIRSymbol() const; + + private: + MIRConst *Translate2MIRConstImpl() const override; + void GenerateInitStmtImpl(std::list &stmts) override; + void GenerateInitStmt4StringLiteral(const ASTExpr *initASTExpr, const UniqueFEIRVar &feirVar, + const UniqueFEIRExpr &initFeirExpr, std::list &stmts) const; + ASTExpr *initExpr = nullptr; + std::string asmAttr; + ASTExpr *variableArrayExpr = nullptr; + PrimType promotedType = PTY_void; + bool hasAddedInMIRScope = false; +}; + +class ASTFileScopeAsm : public ASTDecl { + public: + ASTFileScopeAsm(MapleAllocator &allocatorIn, const MapleString &srcFile) + : ASTDecl(srcFile, MapleString("", allocatorIn.GetMemPool()), MapleVector(allocatorIn.Adapter())) { + declKind = kASTFileScopeAsm; + } + ~ASTFileScopeAsm() = default; + + void SetAsmStr(const std::string &str) { + asmStr = str; + } + + const std::string &GetAsmStr() const { + return asmStr; + } + + private: + std::string asmStr; +}; + +class ASTEnumConstant : public ASTDecl { + public: + ASTEnumConstant(const MapleString &srcFile, const MapleString &nameIn, const MapleVector &typeDescIn, + const GenericAttrs &genAttrsIn) + : ASTDecl(srcFile, nameIn, typeDescIn) { + genAttrs = genAttrsIn; + declKind = kASTEnumConstant; + } + ~ASTEnumConstant() = default; + + void SetValue(const IntVal &val); + const IntVal &GetValue() const; + + private: + MIRConst *Translate2MIRConstImpl() const override; + IntVal value; +}; + +class ASTEnumDecl : public ASTDecl { + public: + ASTEnumDecl(MapleAllocator &allocatorIn, const MapleString &srcFile, const MapleString &nameIn, + const MapleVector &typeDescIn, const GenericAttrs &genAttrsIn) + : ASTDecl(srcFile, nameIn, typeDescIn), consts(allocatorIn.Adapter()) { + genAttrs = genAttrsIn; + declKind = kASTEnumDecl; + } + ~ASTEnumDecl() = default; + + void PushConstant(ASTEnumConstant *c) { + consts.emplace_back(c); + } + + const MapleList &GetEnumConstants() const { + return consts; + } + + private: + void GenerateInitStmtImpl(std::list &stmts) override; + + MapleList consts; +}; + +class ASTTypedefDecl : public ASTDecl { + public: + ASTTypedefDecl(const MapleString &srcFile, const MapleString &nameIn, + const MapleVector &typeDescIn, const GenericAttrs &genAttrsIn) + : ASTDecl(srcFile, nameIn, typeDescIn) { + genAttrs = genAttrsIn; + declKind = kASTTypedefDecl; + } + ~ASTTypedefDecl() = default; + + void SetSubTypedefDecl(ASTTypedefDecl *decl) { + subTypedefDecl = decl; + } + + const ASTTypedefDecl *GetSubTypedefDecl() const { + return subTypedefDecl; + } + + private: + void GenerateInitStmtImpl(std::list &stmts) override; + + ASTTypedefDecl* subTypedefDecl = nullptr; + +}; +} // namespace maple +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_DECL_H diff --git a/src/hir2mpl/ast_input/common/include/ast_decl_builder.h b/src/hir2mpl/ast_input/common/include/ast_decl_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..6b4272f8812a2c3809e767ea93f1c8acc424b8d6 --- /dev/null +++ b/src/hir2mpl/ast_input/common/include/ast_decl_builder.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_INPUT_INCLUDE_AST_DECL_BUILDER_H +#define HIR2MPL_AST_INPUT_INCLUDE_AST_DECL_BUILDER_H +#include "ast_decl.h" +#include "mempool_allocator.h" + +namespace maple { +class ASTDeclsBuilder { + public: + static ASTDecl *GetASTDecl(int64 id) { + ASTDecl *decl = declesTable[id]; + return decl; + } + + static void Clear() { + declesTable.clear(); + } + + static ASTDecl *ASTDeclBuilder(const MapleAllocator &allocator, const MapleString &srcFile, + const std::string &nameIn, const MapleVector &typeDescIn, int64 id = INT64_MAX) { + MapleString nameStr(nameIn, allocator.GetMemPool()); + if (id == INT64_MAX) { + return allocator.GetMemPool()->New(srcFile, nameStr, typeDescIn); // for temp decl + } else if (declesTable[id] == nullptr) { + declesTable[id] = allocator.GetMemPool()->New(srcFile, nameStr, typeDescIn); + } + return declesTable[id]; + } + + static ASTVar *ASTVarBuilder(const MapleAllocator &allocator, const MapleString &srcFile, const std::string &varName, + const MapleVector &desc, const GenericAttrs &genAttrsIn, int64 id = INT64_MAX) { + MapleString varNameStr(varName, allocator.GetMemPool()); + if (id == INT64_MAX) { + return allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn); + } else if (declesTable[id] == nullptr) { + declesTable[id] = allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn); + } + return static_cast(declesTable[id]); + } + + static ASTEnumConstant *ASTEnumConstBuilder(const MapleAllocator &allocator, const MapleString &srcFile, + const std::string &varName, const MapleVector &desc, + const GenericAttrs &genAttrsIn, int64 id = INT64_MAX) { + MapleString varNameStr(varName, allocator.GetMemPool()); + if (id == INT64_MAX) { + return allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn); + } else if (declesTable[id] == nullptr) { + declesTable[id] = allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn); + } + return static_cast(declesTable[id]); + } + + static ASTEnumDecl *ASTLocalEnumDeclBuilder(MapleAllocator &allocator, const MapleString &srcFile, + const std::string &varName, const MapleVector &desc, const GenericAttrs &genAttrsIn, + int64 id = INT64_MAX) { + MapleString varNameStr(varName, allocator.GetMemPool()); + if (id == INT64_MAX) { + return allocator.GetMemPool()->New(allocator, srcFile, varNameStr, desc, genAttrsIn); + } else if (declesTable[id] == nullptr) { + declesTable[id] = allocator.GetMemPool()->New(allocator, srcFile, varNameStr, desc, genAttrsIn); + } + return static_cast(declesTable[id]); + } + + static ASTFunc *ASTFuncBuilder(const MapleAllocator &allocator, const MapleString &srcFile, const std::string &nameIn, + const MapleVector &typeDescIn, const GenericAttrs &genAttrsIn, + MapleVector ¶mDeclsIn, int64 id = INT64_MAX) { + MapleString funcNameStr(nameIn, allocator.GetMemPool()); + if (id == INT64_MAX) { + return allocator.GetMemPool()->New(srcFile, funcNameStr, typeDescIn, genAttrsIn, paramDeclsIn); + } else if (declesTable[id] == nullptr) { + declesTable[id] = allocator.GetMemPool()->New(srcFile, funcNameStr, typeDescIn, genAttrsIn, + paramDeclsIn); + } + return static_cast(declesTable[id]); + } + + static ASTTypedefDecl *ASTTypedefBuilder(const MapleAllocator &allocator, const MapleString &srcFile, + const std::string &varName, const MapleVector &desc, + const GenericAttrs &genAttrsIn, int64 id = INT64_MAX) { + MapleString varNameStr(varName, allocator.GetMemPool()); + if (id == INT64_MAX) { + return allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn); + } else if (declesTable[id] == nullptr) { + declesTable[id] = allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn); + } + return static_cast(declesTable[id]); + } + + template + static T *ASTStmtBuilder(MapleAllocator &allocator) { + return allocator.GetMemPool()->New(allocator); + } + + template + static T *ASTExprBuilder(MapleAllocator &allocator) { + return allocator.GetMemPool()->New(allocator); + } + + static ASTStruct *ASTStructBuilder(MapleAllocator &allocator, const MapleString &srcFile, + const std::string &nameIn, const MapleVector &typeDescIn, + const GenericAttrs &genAttrsIn, int64 id = INT64_MAX) { + MapleString structNameStr(nameIn, allocator.GetMemPool()); + if (id == INT64_MAX) { + return allocator.GetMemPool()->New(allocator, srcFile, structNameStr, typeDescIn, genAttrsIn); + } else if (declesTable[id] == nullptr) { + declesTable[id] = allocator.GetMemPool()->New(allocator, srcFile, structNameStr, typeDescIn, + genAttrsIn); + } + return static_cast(declesTable[id]); + } + + static ASTField *ASTFieldBuilder(const MapleAllocator &allocator, const MapleString &srcFile, + const std::string &varName, const MapleVector &desc, + const GenericAttrs &genAttrsIn, int64 id = INT64_MAX, + bool isAnonymous = false) { + MapleString varNameStr(varName, allocator.GetMemPool()); + if (id == INT64_MAX) { + return allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn, isAnonymous); + } else if (declesTable[id] == nullptr) { + declesTable[id] = allocator.GetMemPool()->New(srcFile, varNameStr, desc, genAttrsIn, + isAnonymous); + } + return static_cast(declesTable[id]); + } + + private: + static std::map declesTable; +}; +} // namespace maple +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_DECL_BUILDER_H diff --git a/src/hir2mpl/ast_input/common/include/ast_input-inl.h b/src/hir2mpl/ast_input/common/include/ast_input-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..536ea52f6cecde1b36fc866bb0b8f2244455a5eb --- /dev/null +++ b/src/hir2mpl/ast_input/common/include/ast_input-inl.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_input.h" +#include "global_tables.h" +#include "fe_macros.h" + +namespace maple { +template +ASTInput::ASTInput(MIRModule &moduleIn, MapleAllocator &allocatorIn) + : module(moduleIn), allocator(allocatorIn), parserMap(allocatorIn.Adapter()), + astStructs(allocatorIn.Adapter()), astFuncs(allocatorIn.Adapter()), astVars(allocatorIn.Adapter()), + astFileScopeAsms(allocatorIn.Adapter()), astEnums(allocatorIn.Adapter()) {} + +template +bool ASTInput::ReadASTFile(MapleAllocator &allocatorIn, uint32 index, const std::string &fileName) { + T *parser = allocator.GetMemPool()->New(allocator, index, fileName, + astStructs, astFuncs, astVars, + astFileScopeAsms, astEnums); + TRY_DO(parser->OpenFile(allocatorIn)); + TRY_DO(parser->Verify()); + TRY_DO(parser->PreProcessAST()); + // Some implicit record or enum decl would be retrieved in func body at use, + // so we put `RetrieveFuncs` before `RetrieveStructs` + TRY_DO(parser->RetrieveFuncs(allocatorIn)); + TRY_DO(parser->RetrieveStructs(allocatorIn)); + if (FEOptions::GetInstance().IsDbgFriendly()) { + TRY_DO(parser->RetrieveEnums(allocatorIn)); + TRY_DO(parser->RetrieveGlobalTypeDef(allocatorIn)); + } + TRY_DO(parser->RetrieveGlobalVars(allocatorIn)); + TRY_DO(parser->RetrieveFileScopeAsms(allocatorIn)); + TRY_DO(parser->Release()); + parserMap.emplace(fileName, parser); + return true; +} + +template +bool ASTInput::ReadASTFiles(MapleAllocator &allocatorIn, const std::vector &fileNames) { + bool res = true; + for (uint32 i = 0; res && i < fileNames.size(); ++i) { + FETimer timer; + std::stringstream ss; + ss << "ReadASTFile[" << (i + 1) << "/" << fileNames.size() << "]: " << fileNames[i]; + timer.StartAndDump(ss.str()); + res = res && ReadASTFile(allocatorIn, i, fileNames[i]); + RegisterFileInfo(fileNames[i]); + timer.StopAndDumpTimeMS(ss.str()); + } + return res; +} + +template +void ASTInput::RegisterFileInfo(const std::string &fileName) { + GStrIdx fileNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fileName); + GStrIdx fileInfoIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename"); + module.PushFileInfoPair(MIRInfoPair(fileInfoIdx, fileNameIdx)); + module.PushFileInfoIsString(true); +} +} \ No newline at end of file diff --git a/src/hir2mpl/ast_input/common/include/ast_input.h b/src/hir2mpl/ast_input/common/include/ast_input.h new file mode 100644 index 0000000000000000000000000000000000000000..f06532b33404f2fcc719d5dff97f3841448a1658 --- /dev/null +++ b/src/hir2mpl/ast_input/common/include/ast_input.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_AST_INPUT_INCLUDE_AST_INPUT_H +#define HIR2MPL_AST_INPUT_INCLUDE_AST_INPUT_H +#include +#include "mir_module.h" +#include "ast_decl.h" +#include "ast_parser.h" +#ifdef ENABLE_MAST +#include "maple_ast_parser.h" +#endif + +namespace maple { +template +class ASTInput { + public: + ASTInput(MIRModule &moduleIn, MapleAllocator &allocatorIn); + ~ASTInput() = default; + bool ReadASTFile(MapleAllocator &allocatorIn, uint32 index, const std::string &fileName); + bool ReadASTFiles(MapleAllocator &allocatorIn, const std::vector &fileNames); + const MIRModule &GetModule() const { + return module; + } + + void RegisterFileInfo(const std::string &fileName); + const MapleList &GetASTStructs() const { + return astStructs; + } + + void AddASTStruct(ASTStruct *astStruct) { + auto itor = std::find(astStructs.begin(), astStructs.end(), astStruct); + if (itor == astStructs.end()) { + astStructs.emplace_back(astStruct); + } + } + + const MapleList &GetASTFuncs() const { + return astFuncs; + } + + void AddASTFunc(ASTFunc *astFunc) { + astFuncs.emplace_back(astFunc); + } + + const MapleList &GetASTVars() const { + return astVars; + } + + void AddASTVar(ASTVar *astVar) { + astVars.emplace_back(astVar); + } + + const MapleList &GetASTFileScopeAsms() const { + return astFileScopeAsms; + } + + void AddASTFileScopeAsm(ASTFileScopeAsm *fileScopeAsm) { + astFileScopeAsms.emplace_back(fileScopeAsm); + } + + const MapleList &GetASTEnums() const { + return astEnums; + } + + private: + MIRModule &module; + MapleAllocator &allocator; + MapleMap parserMap; + + MapleList astStructs; + MapleList astFuncs; + MapleList astVars; + MapleList astFileScopeAsms; + MapleList astEnums; +}; +} +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_INPUT_H \ No newline at end of file diff --git a/src/hir2mpl/ast_input/common/src/ast_decl.cpp b/src/hir2mpl/ast_input/common/src/ast_decl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0548df17d15bfa9fa4cf462373ef34b9f504200d --- /dev/null +++ b/src/hir2mpl/ast_input/common/src/ast_decl.cpp @@ -0,0 +1,341 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ast_decl.h" +#include "ast_parser.h" +#include "global_tables.h" +#include "ast_stmt.h" +#include "feir_var_name.h" +#include "feir_builder.h" +#include "fe_manager.h" +#include "enhance_c_checker.h" +#include "conditional_operator.h" + +namespace maple { +// ---------- ASTDecl --------- +const std::string ASTDecl::GetSrcFileName() const { + return srcFileName.c_str() == nullptr ? "" : srcFileName.c_str(); +} + +const std::string ASTDecl::GetName() const { + return name.c_str() == nullptr ? "" : name.c_str(); +} + +const MapleVector &ASTDecl::GetTypeDesc() const { + return typeDesc; +} + +void ASTDecl::SetTypeDesc(const MapleVector &typeVecIn) { + typeDesc = typeVecIn; +} + +MIRConst *ASTDecl::Translate2MIRConst() const { + return Translate2MIRConstImpl(); +} + +std::string ASTDecl::GenerateUniqueVarName() const { + // add `_line_column` suffix for avoiding local var name conflict + if (isGlobalDecl || isParam) { + return GetName(); + } else { + std::stringstream os; + os << GetName(); + if (isMacroID != 0) { + // for macro expansion, variable names of same location need to be unique + os << "_" << std::to_string(isMacroID); + } else { + os << "_" << std::to_string(loc.line) << "_" << std::to_string(loc.column); + } + return os.str(); + } +} + +// ---------- ASTVar ---------- +std::unique_ptr ASTVar::Translate2FEIRVar() const { + CHECK_FATAL(typeDesc.size() == 1, "Invalid ASTVar"); + auto feirVar = + std::make_unique(GenerateUniqueVarName(), std::make_unique(*(typeDesc[0]))); + feirVar->SetGlobal(isGlobalDecl); + feirVar->SetAttrs(genAttrs); + feirVar->SetSrcLoc(loc); + feirVar->SetSectionAttr(sectionAttr); + if (boundary.lenExpr != nullptr) { + std::list nullStmts; + UniqueFEIRExpr lenExpr = boundary.lenExpr->Emit2FEExpr(nullStmts); + feirVar->SetBoundaryLenExpr(std::move(lenExpr)); + } + return feirVar; +} + +MIRConst *ASTVar::Translate2MIRConstImpl() const { + return initExpr->GenerateMIRConst(); +} + +void ASTVar::GenerateInitStmt4StringLiteral(const ASTExpr *initASTExpr, const UniqueFEIRVar &feirVar, + const UniqueFEIRExpr &initFeirExpr, std::list &stmts) const { + if (!static_cast(initASTExpr)->IsArrayToPointerDecay()) { + std::unique_ptr> argExprList = std::make_unique>(); + UniqueFEIRExpr dstExpr = FEIRBuilder::CreateExprAddrofVar(feirVar->Clone()); + uint32 stringLiteralSize = static_cast(initFeirExpr.get())->GetStringLiteralSize(); + auto uDstExpr = dstExpr->Clone(); + auto uSrcExpr = initFeirExpr->Clone(); + argExprList->emplace_back(std::move(uDstExpr)); + argExprList->emplace_back(std::move(uSrcExpr)); + MIRType *mirArrayType = feirVar->GetType()->GenerateMIRTypeAuto(); + UniqueFEIRExpr sizeExpr; + if (mirArrayType->GetKind() == kTypeArray && + static_cast(mirArrayType)->GetElemType()->GetSize() != 1) { + UniqueFEIRExpr leftExpr = FEIRBuilder::CreateExprConstI32(stringLiteralSize); + size_t elemSizes = static_cast(mirArrayType)->GetElemType()->GetSize(); + CHECK_FATAL(elemSizes <= INT_MAX, "Too large elem size"); + UniqueFEIRExpr rightExpr = FEIRBuilder::CreateExprConstI32(static_cast(elemSizes)); + sizeExpr = FEIRBuilder::CreateExprBinary(OP_mul, std::move(leftExpr), std::move(rightExpr)); + } else { + sizeExpr = FEIRBuilder::CreateExprConstI32(stringLiteralSize); + } + argExprList->emplace_back(sizeExpr->Clone()); + std::unique_ptr memcpyStmt = std::make_unique( + INTRN_C_memcpy, nullptr, nullptr, std::move(argExprList)); + memcpyStmt->SetSrcLoc(initFeirExpr->GetLoc()); + stmts.emplace_back(std::move(memcpyStmt)); + if (mirArrayType->GetKind() != kTypeArray) { + return; + } + auto allSize = static_cast(mirArrayType)->GetSize(); + auto elemSize = static_cast(mirArrayType)->GetElemType()->GetSize(); + CHECK_FATAL(elemSize != 0, "elemSize should not 0"); + auto allElemCnt = allSize / elemSize; + uint32 needInitFurtherCnt = static_cast(allElemCnt - stringLiteralSize); + if (needInitFurtherCnt > 0) { + argExprList = std::make_unique>(); + auto addExpr = FEIRBuilder::CreateExprBinary(OP_add, std::move(dstExpr), sizeExpr->Clone()); + argExprList->emplace_back(std::move(addExpr)); + argExprList->emplace_back(FEIRBuilder::CreateExprConstI32(0)); + argExprList->emplace_back(FEIRBuilder::CreateExprConstI32(needInitFurtherCnt * elemSize)); + std::unique_ptr memsetStmt = std::make_unique( + INTRN_C_memset, nullptr, nullptr, std::move(argExprList)); + memsetStmt->SetSrcLoc(initFeirExpr->GetLoc()); + stmts.emplace_back(std::move(memsetStmt)); + } + return; + } +} + +void ASTVar::GenerateInitStmtImpl(std::list &stmts) { + MIRSymbol *sym = Translate2MIRSymbol(); + ENCChecker::CheckNonnullLocalVarInit(*sym, initExpr); + UniqueFEIRVar feirVar = Translate2FEIRVar(); + if (FEOptions::GetInstance().IsDbgFriendly() && !hasAddedInMIRScope) { + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + MIRScope *mirScope = feFunction.GetTopMIRScope(); + FEUtils::AddAliasInMIRScope(*mirScope, GetName(), *sym, sourceType); + hasAddedInMIRScope = true; + } + if (variableArrayExpr != nullptr) { // vla declaration point + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + if (feFunction.GetTopFEIRScopePtr() != nullptr && + feFunction.GetTopFEIRScopePtr()->GetVLASavedStackVar() == nullptr) { + // stack save + MIRType *retType = GlobalTables::GetTypeTable().GetOrCreatePointerType( + *GlobalTables::GetTypeTable().GetPrimType(PTY_void)); + auto stackVar = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("saved_stack."), *retType, false); + std::unique_ptr> argExprList = std::make_unique>(); + auto stackSaveStmt = std::make_unique(INTRN_C_stack_save, nullptr, stackVar->Clone(), + std::move(argExprList)); + stackSaveStmt->SetSrcLoc(feirVar->GetSrcLoc()); + (void)stmts.emplace_back(std::move(stackSaveStmt)); + // push saved stack var into scope + feFunction.GetTopFEIRScopePtr()->SetVLASavedStackVar(std::move(stackVar)); + } + // alloca + UniqueFEIRExpr variableArrayFEIRExpr = variableArrayExpr->Emit2FEExpr(stmts); + MIRType *mirType = GlobalTables::GetTypeTable().GetPrimType(PTY_a64); + UniqueFEIRType feType = std::make_unique(*mirType); + UniqueFEIRExpr allocaExpr = std::make_unique(std::move(feType), OP_alloca, + std::move(variableArrayFEIRExpr)); + UniqueFEIRStmt allocaStmt = FEIRBuilder::CreateStmtDAssign(feirVar->Clone(), std::move(allocaExpr)); + allocaStmt->SetSrcLoc(feirVar->GetSrcLoc()); + (void)stmts.emplace_back(std::move(allocaStmt)); + } + ENCChecker::InsertBoundaryVar(*this, stmts); + if (initExpr == nullptr) { + return; + } + if (genAttrs.GetAttr(GENATTR_static) && sym->GetKonst() != nullptr) { + return; + } + UniqueFEIRExpr initFeirExpr = initExpr->Emit2FEExpr(stmts); + if (initFeirExpr == nullptr) { + return; + } + ENCChecker::CheckNonnullLocalVarInit(*sym, initFeirExpr, stmts); + if (initExpr->GetASTOp() == kASTStringLiteral) { // init for StringLiteral + return GenerateInitStmt4StringLiteral(initExpr, feirVar, initFeirExpr, stmts); + } + + if (ConditionalOptimize::DeleteRedundantTmpVar(initFeirExpr, stmts, feirVar, feirVar->GetType()->GetPrimType())) { + return; + } + + PrimType srcPrimType = initFeirExpr->GetPrimType(); + UniqueFEIRStmt stmt; + if (srcPrimType != feirVar->GetType()->GetPrimType() && srcPrimType != PTY_agg && srcPrimType != PTY_void) { + auto castExpr = FEIRBuilder::CreateExprCastPrim(std::move(initFeirExpr), feirVar->GetType()->GetPrimType()); + stmt = FEIRBuilder::CreateStmtDAssign(feirVar->Clone(), std::move(castExpr)); + } else { + stmt = FEIRBuilder::CreateStmtDAssign(feirVar->Clone(), std::move(initFeirExpr)); + } + stmt->SetSrcLoc(feirVar->GetSrcLoc()); + stmts.emplace_back(std::move(stmt)); +} + +MIRSymbol *ASTVar::Translate2MIRSymbol() const { + UniqueFEIRVar feirVar = Translate2FEIRVar(); + MIRSymbol *mirSymbol = feirVar->GenerateMIRSymbol(FEManager::GetMIRBuilder()); + if (initExpr != nullptr && genAttrs.GetAttr(GENATTR_static)) { + MIRConst *cst = initExpr->GenerateMIRConst(); + if (cst != nullptr && cst->GetKind() != kConstInvalid) { + mirSymbol->SetKonst(cst); + } + } + if (!sectionAttr.empty()) { + mirSymbol->sectionAttr = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(sectionAttr); + } + if (!asmAttr.empty()) { + mirSymbol->SetAsmAttr(GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(asmAttr)); + } + return mirSymbol; +} + +// ---------- ASTEnumConstant ---------- +void ASTEnumConstant::SetValue(const IntVal &val) { + value = val; +} + +const IntVal &ASTEnumConstant::GetValue() const { + return value; +} + +MIRConst *ASTEnumConstant::Translate2MIRConstImpl() const { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst(value, *typeDesc.front()); +} + +// ---------- ASTFunc --------- +void ASTFunc::SetCompoundStmt(ASTStmt *astCompoundStmt) { + compound = astCompoundStmt; +} + +void ASTFunc::InsertStmtsIntoCompoundStmtAtFront(const std::list &stmts) const { + static_cast(compound)->InsertASTStmtsAtFront(stmts); +} + +const ASTStmt *ASTFunc::GetCompoundStmt() const { + return compound; +} + +std::vector> ASTFunc::GenArgVarList() const { + std::vector> args; + return args; +} + +std::list ASTFunc::EmitASTStmtToFEIR() const { + std::list stmts; + const ASTStmt *astStmt = GetCompoundStmt(); + if (astStmt == nullptr) { + return stmts; + } + const ASTCompoundStmt *astCpdStmt = static_cast(astStmt); + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + feFunction.PushFuncScope(FEUtils::CvtLoc2SrcPosition(astCpdStmt->GetSrcLoc()), + FEUtils::CvtLoc2SrcPosition(astCpdStmt->GetEndLoc())); + const MapleList &astStmtList = astCpdStmt->GetASTStmtList(); + for (auto stmtNode : astStmtList) { + std::list childStmts = stmtNode->Emit2FEStmt(); + for (auto &stmt : childStmts) { + // Link jump stmt not implemented yet + stmts.emplace_back(std::move(stmt)); + } + } + UniqueFEIRScope scope = feFunction.PopTopScope(); + // fix int main() no return 0 and void func() no return. there are multiple branches, insert return at the end. + if (stmts.size() == 0 || stmts.back()->GetKind() != kStmtReturn) { + if (scope->GetVLASavedStackVar() != nullptr) { + auto stackRestoreStmt = scope->GenVLAStackRestoreStmt(); + stackRestoreStmt->SetSrcLoc(astCpdStmt->GetEndLoc()); + (void)stmts.emplace_back(std::move(stackRestoreStmt)); + } + UniqueFEIRExpr retExpr = nullptr; + PrimType retType = typeDesc[1]->GetPrimType(); + if (retType != PTY_void) { + if (!typeDesc[1]->IsScalarType()) { + retType = PTY_i32; + } + retExpr = FEIRBuilder::CreateExprConstAnyScalar(retType, static_cast(0)); + } + UniqueFEIRStmt retStmt = std::make_unique(std::move(retExpr)); + Loc endLoc = astCpdStmt->GetEndLoc(); + endLoc.column = 0; + retStmt->SetSrcLoc(endLoc); + stmts.emplace_back(std::move(retStmt)); + } + InsertBoundaryCheckingInRet(stmts); + return stmts; +} + +// ---------- ASTStruct ---------- +std::string ASTStruct::GetStructName(bool mapled) const { + return mapled ? namemangler::EncodeName(GetName()) : GetName(); +} + +void ASTStruct::GenerateInitStmtImpl(std::list &stmts) { + (void)stmts; + if (!FEOptions::GetInstance().IsDbgFriendly()) { + return; + } + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + MIRScope *mirScope = feFunction.GetTopMIRScope(); + mirScope->SetTypeAliasMap(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(GetName()), + typeDesc.front()->GetTypeIndex()); +} + +// ---------- ASTEnumDecl ---------- +void ASTEnumDecl::GenerateInitStmtImpl(std::list &stmts) { + (void)stmts; + if (!FEOptions::GetInstance().IsDbgFriendly()) { + return; + } + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + MIRScope *mirScope = feFunction.GetTopMIRScope(); + MIRTypeByName *type = FEManager::GetTypeManager().GetOrCreateTypeByNameType(GenerateUniqueVarName()); + mirScope->SetTypeAliasMap(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(GetName()), type->GetTypeIndex()); +} + +// ---------- ASTTypedefDecl ---------- +void ASTTypedefDecl::GenerateInitStmtImpl(std::list &stmts) { + (void)stmts; + if (!FEOptions::GetInstance().IsDbgFriendly()) { + return; + } + FEFunction &feFunction = FEManager::GetCurrentFEFunction(); + MIRScope *mirScope = feFunction.GetTopMIRScope(); + const ASTTypedefDecl *astTypedef = this; + while (astTypedef != nullptr && !astTypedef->isGlobalDecl) { + MIRTypeByName *type = FEManager::GetTypeManager().CreateTypedef( + astTypedef->GenerateUniqueVarName(), *astTypedef->GetTypeDesc().front()); + mirScope->SetTypeAliasMap(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + astTypedef->GetName()), type->GetTypeIndex()); + astTypedef = astTypedef->GetSubTypedefDecl(); + } +} +} // namespace maple diff --git a/src/hir2mpl/ast_input/maple/include/maple_ast_parser.h b/src/hir2mpl/ast_input/maple/include/maple_ast_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..f38e3c3ba222b5e2f8ba40bce3e26ab8fd131f88 --- /dev/null +++ b/src/hir2mpl/ast_input/maple/include/maple_ast_parser.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_AST_PARSER_H +#define MAPLE_AST_PARSER_H +#include +#include +#include "mempool_allocator.h" +#include "ast_decl.h" +#include "maple_ast_interface.h" +#include "ast.h" + +namespace maple { +class MapleASTParser { + public: + MapleASTParser(MapleAllocator &allocatorIn, uint32 fileIdxIn, const std::string &fileNameIn, + MapleList &astStructsIn, MapleList &astFuncsIn, MapleList &astVarsIn, + MapleList &astFileScopeAsmsIn) + : fileIdx(fileIdxIn), fileName(fileNameIn), + globalVarDecles(allocatorIn.Adapter()), funcDecles(allocatorIn.Adapter()), + astStructs(astStructsIn), astFuncs(astFuncsIn), + astVars(astVarsIn), astFileScopeAsms(astFileScopeAsmsIn), + astFuncMap(allocatorIn.Adapter()) {} + virtual ~MapleASTParser() = default; + bool OpenFile(); + const uint32 GetFileIdx() const; + bool Verify() const; + bool PreProcessAST(); + + std::list ProcessDeclNode(MapleAllocator &allocator, maplefe::DeclNode *decl); + ASTDecl *ProcessDecl(MapleAllocator &allocator, maplefe::TreeNode *decl); +#define MAPLE_PROCESS_DECL(CLASS) ProcessDecl##CLASS##Node(MapleAllocator &allocator, maplefe::CLASS##Node*) + ASTDecl *MAPLE_PROCESS_DECL(Function); + ASTDecl *MAPLE_PROCESS_DECL(Identifier); + + ASTStmt *ProcessStmt(MapleAllocator &allocator, maplefe::TreeNode *stmt); +#define MAPLE_PROCESS_STMT(CLASS) ProcessStmt##CLASS##Node(MapleAllocator&, maplefe::CLASS##Node*) + ASTStmt *MAPLE_PROCESS_STMT(Block); + ASTStmt *MAPLE_PROCESS_STMT(Decl); + ASTStmt *MAPLE_PROCESS_STMT(Return); + + ASTExpr *ProcessExpr(MapleAllocator &allocator, maplefe::TreeNode *expr); +#define MAPLE_PROCESS_EXPR(CLASS) ProcessExpr##CLASS##Node(MapleAllocator&, maplefe::CLASS##Node*) + ASTExpr *MAPLE_PROCESS_EXPR(Literal); + ASTExpr *MAPLE_PROCESS_EXPR(Call); + ASTExpr *MAPLE_PROCESS_EXPR(Identifier); + + bool RetrieveStructs(MapleAllocator &allocator); + bool RetrieveFuncs(MapleAllocator &allocator); + bool RetrieveGlobalVars(MapleAllocator &allocator); + bool RetrieveFileScopeAsms(MapleAllocator &allocator); + + private: + uint32 fileIdx; + const std::string fileName; + std::unique_ptr astFile; + MapleList globalVarDecles; + MapleList funcDecles; + MapleList &astStructs; + MapleList &astFuncs; + MapleList &astVars; + MapleList &astFileScopeAsms; + MapleMap astFuncMap; +}; +} // namespace maple +#endif // MAPLE_AST_PARSER_H diff --git a/src/hir2mpl/ast_input/maple/lib/maple_ast_interface.cpp b/src/hir2mpl/ast_input/maple/lib/maple_ast_interface.cpp new file mode 100644 index 0000000000000000000000000000000000000000..678f3c15dc9093543eee615cbcb79cecc9f58dc8 --- /dev/null +++ b/src/hir2mpl/ast_input/maple/lib/maple_ast_interface.cpp @@ -0,0 +1,110 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "maple_ast_interface.h" +#include +#include +#include "gen_aststore.h" +#include "gen_astload.h" +#include "global_tables.h" + +namespace maple { +bool LibMapleAstFile::Open(const std::string &fileName) { + std::ifstream input(fileName, std::ifstream::binary); + input >> std::noskipws; + std::istream_iterator s(input), e; + maplefe::AstBuffer vec(s, e); + maplefe::AstLoad loadAst; + maplefe::ModuleNode *mod = loadAst.LoadFromAstBuf(vec); + // add mod to the vector + while (mod) { + handler.AddModule(mod); + mod = loadAst.Next(); + } + return true; +} + +PrimType LibMapleAstFile::MapPrim(maplefe::TypeId id) { + PrimType prim; + switch (id) { + case maplefe::TY_Boolean: + prim = PTY_u1; + break; + case maplefe::TY_Byte: + prim = PTY_u8; + break; + case maplefe::TY_Short: + prim = PTY_i16; + break; + case maplefe::TY_Int: + prim = PTY_i32; + break; + case maplefe::TY_Long: + prim = PTY_i64; + break; + case maplefe::TY_Char: + prim = PTY_u16; + break; + case maplefe::TY_Float: + prim = PTY_f32; + break; + case maplefe::TY_Double: + prim = PTY_f64; + break; + case maplefe::TY_Void: + prim = PTY_void; + break; + case maplefe::TY_Null: + prim = PTY_void; + break; + default: + CHECK_FATAL(false, "Unsupported PrimType"); + break; + } + return prim; +} + +MIRType *LibMapleAstFile::MapPrimType(maplefe::TypeId id) { + PrimType prim = MapPrim(id); + TyIdx tid(prim); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(tid); +} + +MIRType *LibMapleAstFile::MapPrimType(maplefe::PrimTypeNode *ptnode) { + return MapPrimType(ptnode->GetPrimType()); +} + +MIRType *LibMapleAstFile::MapType(maplefe::TreeNode *type) { + if (type == nullptr) { + return GlobalTables::GetTypeTable().GetVoid(); + } + + MIRType *mirType = nullptr; + if (type->IsPrimType()) { + maplefe::PrimTypeNode *ptnode = static_cast(type); + mirType = MapPrimType(ptnode); + } else if (type->IsUserType()) { + if (type->IsIdentifier()) { + maplefe::IdentifierNode *inode = static_cast(type); + mirType = MapType(inode->GetType()); + } else { + CHECK_FATAL(false, "MapType IsUserType"); + } + } else { + CHECK_FATAL(false, "MapType unknown type"); + } + + return mirType; +} +} \ No newline at end of file diff --git a/src/hir2mpl/ast_input/maple/lib/maple_ast_interface.h b/src/hir2mpl/ast_input/maple/lib/maple_ast_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..77d8b4f9e7559d578790d3c81760062938b5f4ca --- /dev/null +++ b/src/hir2mpl/ast_input/maple/lib/maple_ast_interface.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_AST_INTERFACE_H +#define MAPLE_AST_INTERFACE_H +#include +#include "ast_handler.h" +#include "mir_type.h" + +namespace maple { +class LibMapleAstFile { + public: + LibMapleAstFile() : handler(maplefe::FLG_trace) {} + ~LibMapleAstFile() = default; + + bool Open(const std::string &fileName); + + maplefe::AST_Handler &GetASTHandler() { + return handler; + } + + PrimType MapPrim(maplefe::TypeId id); + MIRType *MapPrimType(maplefe::TypeId id); + MIRType *MapPrimType(maplefe::PrimTypeNode *ptnode); + MIRType *MapType(maplefe::TreeNode *type); + + private: + maplefe::AST_Handler handler; +}; +} // namespace maple +#endif // MAPLE_AST_INTERFACE_H \ No newline at end of file diff --git a/src/hir2mpl/ast_input/maple/src/maple_ast_parser.cpp b/src/hir2mpl/ast_input/maple/src/maple_ast_parser.cpp new file mode 100644 index 0000000000000000000000000000000000000000..33b6137e56451963c6d41f254821fdd251ef9c5a --- /dev/null +++ b/src/hir2mpl/ast_input/maple/src/maple_ast_parser.cpp @@ -0,0 +1,339 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "maple_ast_parser.h" +#include "ast_decl_builder.h" + +namespace maple { +bool MapleASTParser::OpenFile() { + (void)astStructs; + (void)astFileScopeAsms; + astFile = std::make_unique(); + bool res = astFile->Open(fileName); + if (!res) { + return false; + } + return true; +} + +const uint32 MapleASTParser::GetFileIdx() const { + return fileIdx; +} + +bool MapleASTParser::Verify() const { + return true; +} + +bool MapleASTParser::PreProcessAST() { + for (uint32 i = 0; i < astFile->GetASTHandler().GetSize(); ++i) { + maplefe::Module_Handler *handler = astFile->GetASTHandler().GetModuleHandler(i); + maplefe::ModuleNode *module = handler->GetASTModule(); + for (uint32 i = 0; i < module->GetTreesNum(); ++i) { + maplefe::TreeNode *node = module->GetTree(i); + switch (node->GetKind()) { + case maplefe::NK_Decl: + globalVarDecles.emplace_back(node); + break; + case maplefe::NK_Function: + funcDecles.emplace_back(node); + break; + default: break; + } + } + } + return true; +} + + +#define MAPLE_DECL_CASE(CLASS) \ + case maplefe::NK_##CLASS: { \ + ASTDecl *astDecl = ProcessDecl##CLASS##Node(allocator, static_cast(decl)); \ + return astDecl; \ + } + +ASTDecl *MapleASTParser::ProcessDecl(MapleAllocator &allocator, maplefe::TreeNode *decl) { + ASTDecl *astDecl = ASTDeclsBuilder::GetASTDecl(decl->GetNodeId()); + if (astDecl != nullptr) { + return astDecl; + } + + switch (decl->GetKind()) { + MAPLE_DECL_CASE(Function) + MAPLE_DECL_CASE(Identifier) + default: + decl->Dump(0); + CHECK_FATAL(false, "ASTDecl NIY"); + return nullptr; + } +} + +ASTDecl *MapleASTParser::ProcessDeclIdentifierNode(MapleAllocator &allocator, maplefe::IdentifierNode *identifierDecl) { + ASTVar *astVar = static_cast(ASTDeclsBuilder::GetASTDecl(identifierDecl->GetNodeId())); + if (astVar != nullptr) { + return astVar; + } + + std::string varName = identifierDecl->GetName(); + if (varName.empty()) { + return nullptr; + } + MIRType *varType = astFile->MapType(identifierDecl->GetType()); + if (varType == nullptr) { + return nullptr; + } + GenericAttrs attrs; + astVar = ASTDeclsBuilder::ASTVarBuilder( + allocator, fileName, varName, MapleVector{varType}, attrs, identifierDecl->GetNodeId()); + + if (identifierDecl->GetInit() != nullptr) { + auto astInitExpr = ProcessExpr(allocator, identifierDecl->GetInit()); + astVar->SetInitExpr(astInitExpr); + } + + return astVar; +} + +std::list MapleASTParser::ProcessDeclNode(MapleAllocator &allocator, maplefe::DeclNode *varDecl) { + maplefe::VarListNode *varList = static_cast(varDecl->GetVar()); + std::list astVars; + for (uint32 i = 0; i < varList->GetVarsNum(); ++i) { + maplefe::IdentifierNode *iNode = varList->GetVarAtIndex(i); + ASTVar *astVar = static_cast(ProcessDecl(allocator, iNode)); + astVars.emplace_back(astVar); + } + return astVars; +} + +ASTDecl *MapleASTParser::ProcessDeclFunctionNode(MapleAllocator &allocator, maplefe::FunctionNode *funcDecl) { + ASTFunc *astFunc = static_cast(ASTDeclsBuilder::GetASTDecl(funcDecl->GetNodeId())); + if (astFunc != nullptr) { + return astFunc; + } + + std::string funcName = funcDecl->GetName(); + if (funcName.empty()) { + return nullptr; + } + + MapleVector typeDescIn; + typeDescIn.push_back(nullptr); // mirFuncType + MIRType *retType = astFile->MapType(funcDecl->GetType()); + if (retType == nullptr) { + return nullptr; + } + typeDescIn.push_back(retType); + + MapleVector paramDecls; + uint32 numParam = funcDecl->GetParamsNum(); + for (uint32 i = 0; i < numParam; ++i) { + maplefe::TreeNode *param = funcDecl->GetParam(i); + ASTDecl *parmVarDecl = nullptr; + if (param->IsIdentifier()) { + parmVarDecl = ProcessDecl(allocator, param); + } else { + continue; + } + paramDecls.push_back(parmVarDecl); + typeDescIn.push_back(parmVarDecl->GetTypeDesc().front()); + } + + GenericAttrs attrs; + astFunc = ASTDeclsBuilder::ASTFuncBuilder( + allocator, fileName, funcName, typeDescIn, attrs, paramDecls, funcDecl->GetNodeId()); + CHECK_FATAL(astFunc != nullptr, "astFunc is nullptr"); + + maplefe::BlockNode *astBody = funcDecl->GetBody(); + if (astBody != nullptr) { + ASTStmt *astCompoundStmt = ProcessStmt(allocator, astBody); + if (astCompoundStmt != nullptr) { + astFunc->SetCompoundStmt(astCompoundStmt); + } + } + astFuncMap[funcDecl->GetStrIdx()] = astFunc; + return astFunc; +} + + +#define MAPLE_STMT_CASE(CLASS) \ + case maplefe::NK_##CLASS: { \ + ASTStmt *astStmt = ProcessStmt##CLASS##Node(allocator, static_cast(stmt)); \ + return astStmt; \ + } + +ASTStmt *MapleASTParser::ProcessStmt(MapleAllocator &allocator, maplefe::TreeNode *stmt) { + switch (stmt->GetKind()) { + MAPLE_STMT_CASE(Block); + MAPLE_STMT_CASE(Decl); + MAPLE_STMT_CASE(Return); + default: { + stmt->Dump(0); + CHECK_FATAL(false, "ASTStmt NIY"); + return nullptr; + } + } +} + +ASTStmt *MapleASTParser::ProcessStmtBlockNode(MapleAllocator &allocator, maplefe::BlockNode *stmt) { + ASTCompoundStmt *astCompoundStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astCompoundStmt != nullptr, "astCompoundStmt is nullptr"); + ASTStmt *childStmt = nullptr; + for (uint32 i = 0; i < stmt->GetChildrenNum(); ++i) { + maplefe::TreeNode *child = stmt->GetChildAtIndex(i); + childStmt = ProcessStmt(allocator, child); + if (childStmt != nullptr) { + astCompoundStmt->SetASTStmt(childStmt); + } else { + continue; + } + } + return astCompoundStmt; +} + +ASTStmt *MapleASTParser::ProcessStmtDeclNode(MapleAllocator &allocator, maplefe::DeclNode *stmt) { + ASTDeclStmt *declStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + std::list astVars = ProcessDeclNode(allocator, stmt); + for (auto &var : astVars) { + declStmt->SetSubDecl(var); + } + return declStmt; +} + +ASTStmt *MapleASTParser::ProcessStmtReturnNode(MapleAllocator &allocator, maplefe::ReturnNode *stmt) { + ASTReturnStmt *astStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + CHECK_FATAL(astStmt != nullptr, "astStmt is nullptr"); + ASTExpr *astExpr = ProcessExpr(allocator, stmt->GetResult()); + astStmt->SetASTExpr(astExpr); + return astStmt; +} + +#define MAPLE_EXPR_CASE(CLASS) \ + case maplefe::NK_##CLASS: { \ + ASTExpr *astExpr = ProcessExpr##CLASS##Node(allocator, static_cast(expr)); \ + return astExpr; \ + } + +ASTExpr *MapleASTParser::ProcessExpr(MapleAllocator &allocator, maplefe::TreeNode *expr) { + if (expr == nullptr) { + return nullptr; + } + + switch (expr->GetKind()) { + MAPLE_EXPR_CASE(Literal); + MAPLE_EXPR_CASE(Call); + MAPLE_EXPR_CASE(Identifier); + default: + expr->Dump(0); + CHECK_FATAL(false, "ASTExpr NIY"); + return nullptr; + } +} + +ASTExpr *MapleASTParser::ProcessExprLiteralNode(MapleAllocator &allocator, maplefe::LiteralNode *expr) { + ASTExpr *literalExpr = nullptr; + switch (expr->GetData().mType) { + case maplefe::LT_IntegerLiteral: + literalExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + literalExpr->SetType(GlobalTables::GetTypeTable().GetInt64()); + static_cast(literalExpr)->SetVal(expr->GetData().mData.mInt64); + break; + case maplefe::LT_FPLiteral: + literalExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + literalExpr->SetType(GlobalTables::GetTypeTable().GetFloat()); + static_cast(literalExpr)->SetVal(expr->GetData().mData.mFloat); + break; + case maplefe::LT_DoubleLiteral: + literalExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + literalExpr->SetType(GlobalTables::GetTypeTable().GetDouble()); + static_cast(literalExpr)->SetVal(expr->GetData().mData.mDouble); + break; + default: + CHECK_FATAL(false, "NYI"); + break; + } + return literalExpr; +} + +ASTExpr *MapleASTParser::ProcessExprIdentifierNode(MapleAllocator &allocator, maplefe::IdentifierNode *expr) { + ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + CHECK_FATAL(astRefExpr != nullptr, "astRefExpr is nullptr"); + ASTDecl *astDecl = ASTDeclsBuilder::GetASTDecl(expr->GetNodeId()); + if (astDecl == nullptr) { + astDecl = ProcessDecl(allocator, expr); + } + astRefExpr->SetASTDecl(astDecl); + astRefExpr->SetType(astDecl->GetTypeDesc().front()); + return astRefExpr; +} + +ASTExpr *MapleASTParser::ProcessExprCallNode(MapleAllocator &allocator, maplefe::CallNode *expr) { + ASTCallExpr *astCallExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASSERT(astCallExpr != nullptr, "astCallExpr is nullptr"); + // callee + ASTExpr *astCallee = ProcessExpr(allocator, expr->GetMethod()); + if (astCallee == nullptr) { + return nullptr; + } + astCallExpr->SetCalleeExpr(astCallee); + // return + // C language function names can be used as unique identifiers + MIRType *retType = astFuncMap[expr->GetMethod()->GetStrIdx()]->GetTypeDesc()[1]; + astCallExpr->SetRetType(retType); + // args + std::vector args; + for (uint32_t i = 0; i < expr->GetArgsNum(); ++i) { + maplefe::TreeNode *subExpr = expr->GetArg(i); + ASTExpr *arg = ProcessExpr(allocator, subExpr); + args.push_back(arg); + } + astCallExpr->SetArgs(args); + if (expr->GetMethod() != nullptr) { + GenericAttrs attrs; + astCallExpr->SetFuncName(astCallee->GetASTDecl()->GetName()); + astCallExpr->SetFuncAttrs(attrs.ConvertToFuncAttrs()); + } else { + astCallExpr->SetIcall(true); + } + return astCallExpr; +} + +bool MapleASTParser::RetrieveStructs(MapleAllocator &allocator) { + return true; +} + +bool MapleASTParser::RetrieveFuncs(MapleAllocator &allocator) { + for (auto &decl : funcDecles) { + ASTFunc *funcDecl = static_cast(ProcessDecl(allocator, decl)); + if (funcDecl == nullptr) { + return false; + } + funcDecl->SetGlobal(true); + astFuncs.emplace_back(funcDecl); + } + return true; +} + +bool MapleASTParser::RetrieveGlobalVars(MapleAllocator &allocator) { + for (auto &decl : globalVarDecles) { + std::list astVarList = ProcessDeclNode(allocator, static_cast(decl)); + for (auto &astVar : astVarList) { + astVars.emplace_back(static_cast(astVar)); + } + } + return true; +} + +bool MapleASTParser::RetrieveFileScopeAsms(MapleAllocator &allocator) { + return true; +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_attr.def b/src/hir2mpl/bytecode_input/class/include/jbc_attr.def new file mode 100644 index 0000000000000000000000000000000000000000..4fe4da2e62a49bb97ac696d9897181e4a2eb36bf --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_attr.def @@ -0,0 +1,37 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +JBC_ATTR("ConstantValue", ConstantValue) +JBC_ATTR("Code", Code) +JBC_ATTR("StackMapTable", StackMapTable) +JBC_ATTR("Exceptions", Exception) +JBC_ATTR("InnerClasses", InnerClass) +JBC_ATTR("EnclosingMethod", EnclosingMethod) +JBC_ATTR("Synthetic", Synthetic) +JBC_ATTR("Signature", Signature) +JBC_ATTR("SourceFile", SourceFile) +JBC_ATTR("SourceDebugExtension", SourceDebugEx) +JBC_ATTR("LineNumberTable", LineNumberTable) +JBC_ATTR("LocalVariableTable", LocalVariableTable) +JBC_ATTR("LocalVariableTypeTable", LocalVariableTypeTable) +JBC_ATTR("Deprecated", Deprecated) +JBC_ATTR("RuntimeVisibleAnnotations", RTVisAnnotations) +JBC_ATTR("RuntimeInvisibleAnnotations", RTInvisAnnotations) +JBC_ATTR("RuntimeVisibleParameterAnnotations", RTVisParamAnnotations) +JBC_ATTR("RuntimeInvisibleParameterAnnotations", RTInvisParamAnnotations) +JBC_ATTR("RuntimeVisibleTypeAnnotations", RTVisTypeAnnotations) +JBC_ATTR("RuntimeInvisibleTypeAnnotations", RTInvisTypeAnnotations) +JBC_ATTR("AnnotationDefault", AnnotationDefault) +JBC_ATTR("BootstrapMethods", BootstrapMethods) +JBC_ATTR("MethodParameters", MethodParameters) \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_attr.h b/src/hir2mpl/bytecode_input/class/include/jbc_attr.h new file mode 100644 index 0000000000000000000000000000000000000000..ea6ccc9f535724d6bab36cea2fb6cd49bb74efcc --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_attr.h @@ -0,0 +1,583 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_ATTR_H +#define HIR2MPL_INCLUDE_JBC_ATTR_H +#include "mempool_allocator.h" +#include "types_def.h" +#include "fe_configs.h" +#include "basic_io.h" +#include "jbc_class_const_pool.h" +#include "jbc_attr_item.h" +#include "jbc_opcode.h" + +namespace maple { +namespace jbc { +const static uint32 kInvalidPC = UINT32_MAX; +const static uint16 kInvalidPC16 = UINT16_MAX; +const static uint32 kMaxPC32 = 0x0000FFFF; + +enum JBCAttrKind : uint8 { + kAttrUnknown, + kAttrRaw, +#undef JBC_ATTR +#define JBC_ATTR(name, type) kAttr##type, +#include "jbc_attr.def" +#undef JBC_ATTR +}; + +class JBCAttr { + public: + JBCAttr(JBCAttrKind kindIn, uint16 nameIdxIn, uint32 lengthIn); + virtual ~JBCAttr() = default; + static JBCAttr *InAttr(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool); + static JBCAttrKind AttrKind(const std::string &str); + JBCAttrKind GetKind() const { + return kind; + } + + bool ParseFile(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + return ParseFileImpl(allocator, io, constPool); + } + + bool PreProcess(const JBCConstPool &constPool) { + return PreProcessImpl(constPool); + } + + SimpleXMLElem *GenXmlElem(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const { + return GenXmlElemImpl(allocator, constPool, idx); + } + + protected: + virtual bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) = 0; + virtual bool PreProcessImpl(const JBCConstPool &constPool) = 0; + virtual SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const = 0; + + JBCAttrKind kind; + uint16 nameIdx; + uint32 length; +}; + +class JBCAttrMap { + public: + explicit JBCAttrMap(MapleAllocator &allocatorIn); + ~JBCAttrMap() = default; + void RegisterAttr(JBCAttr &attr); + std::list GetAttrs(JBCAttrKind kind) const; + const JBCAttr *GetAttr(JBCAttrKind kind) const; + bool PreProcess(const JBCConstPool &constPool); + + private: + MapleAllocator &allocator; + MapleMap*> mapAttrs; +}; + +class JBCAttrRaw : public JBCAttr { + public: + JBCAttrRaw(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrRaw() override; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint8 *rawData; +}; + +struct JavaAttrLocalVariableInfoItem { + JavaAttrLocalVariableInfoItem() + : slotIdx(0), + start(0), + length(0), + nameIdx(0), + feirType(nullptr), + signatureNameIdx(0) {} + + uint16 slotIdx; + uint16 start; + uint16 length; + GStrIdx nameIdx; // in java format + const FEIRType *feirType; + GStrIdx signatureNameIdx; // in java format +}; + +class JBCAttrLocalVariableInfo { + public: + explicit JBCAttrLocalVariableInfo(MapleAllocator &argAllocator); + ~JBCAttrLocalVariableInfo() = default; + void RegisterItem(const attr::LocalVariableTableItem &itemAttr); + void RegisterTypeItem(const attr::LocalVariableTypeTableItem &itemAttr); + const JavaAttrLocalVariableInfoItem &GetItemByStart(uint16 slotIdx, uint16 start) const; + uint16 GetStart(uint16 slotIdx, uint16 pc) const; + std::list EmitToStrings() const; + static bool IsInvalidLocalVariableInfoItem(const JavaAttrLocalVariableInfoItem &item); + + private: + void AddSlotStartMap(uint16 slotIdx, uint16 startPC); + void CheckItemAvaiable(uint16 slotIdx, uint16 start) const; + JavaAttrLocalVariableInfoItem *GetItemByStartInternal(uint16 slotIdx, uint16 start); + + MapleAllocator &allocator; + MapleMap> slotStartMap; // map> + MapleMap, JavaAttrLocalVariableInfoItem> itemMap; + static JavaAttrLocalVariableInfoItem kInvalidInfoItem; +}; + +// ConstantValue Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.2 +class JBCAttrConstantValue : public JBCAttr { + public: + JBCAttrConstantValue(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrConstantValue() override; + const JBCConst *GetConstValue() const { + return constValue; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 constIdx; + JBCConst *constValue; +}; + +// Code Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.3 +class JBCAttrLocalVariableInfo; +class JBCOp; +class JBCAttrCode : public JBCAttr { + public: + JBCAttrCode(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrCode() override; + void InitLocalVarInfo(); + void SetLoadStoreType() const; + const MapleMap &GetInstMap() const { + return instructions; + } + + uint16 GetMaxStack() const { + return maxStack; + } + + uint16 GetMaxLocals() const { + return maxLocals; + } + + uint32 GetCodeLength() const { + return codeLength; + } + + LLT_MOCK_TARGET const MapleVector &GetExceptionInfos() const { + return exceptions; + } + + const JBCAttr *GetAttr(JBCAttrKind kind) const { + return attrMap.GetAttr(kind); + } + + const std::list GetAttrs(JBCAttrKind kind) const { + return attrMap.GetAttrs(kind); + } + + std::list GetLocalVarInfoByString() const { + return localVarInfo.EmitToStrings(); + } + + const JBCAttrLocalVariableInfo &GetLocalVarInfo() const { + return localVarInfo; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + bool ParseOpcodes(MapleAllocator &allocator); + + uint16 maxStack; + uint16 maxLocals; + uint32 codeLength; + uint8 *code; + uint16 nException; + MapleVector exceptions; + uint16 nAttr; + MapleVector attrs; + MapleMap instructions; + JBCAttrMap attrMap; + JBCAttrLocalVariableInfo localVarInfo; +}; + +// StackMapTable Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.4 +class JBCAttrStackMapTable : public JBCAttr { + public: + JBCAttrStackMapTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrStackMapTable() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 count; + MapleVector entries; +}; + +// Exceptions Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.5 +class JBCAttrException : public JBCAttr { + public: + JBCAttrException(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrException() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 count; + MapleVector tbExceptionIdx; +}; + +// InnerClass Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.6 +class JBCAttrInnerClass : public JBCAttr { + public: + JBCAttrInnerClass(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrInnerClass() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 count; + MapleVector tbClasses; +}; + +// EnclosingMethod Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.7 +class JBCAttrEnclosingMethod : public JBCAttr { + public: + JBCAttrEnclosingMethod(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrEnclosingMethod() override; + const JBCConstClass *GetConstClass() const { + return constClass; + } + + const JBCConstNameAndType *GetConstNameAndType() const { + return constNameAndType; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 classIdx; + uint16 methodIdx; + JBCConstClass *constClass; + JBCConstNameAndType *constNameAndType; +}; + +// Synthetic Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.8 +class JBCAttrSynthetic : public JBCAttr { + public: + JBCAttrSynthetic(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrSynthetic() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; +}; + +// Signature Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.9 +class JBCAttrSignature : public JBCAttr { + public: + JBCAttrSignature(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrSignature() override; + const JBCConstUTF8 *GetConstSignatureName() const { + return constSignatureName; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 signatureIdx; + JBCConstUTF8 *constSignatureName; +}; + +// SourceFile Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.10 +class JBCAttrSourceFile : public JBCAttr { + public: + JBCAttrSourceFile(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrSourceFile() override; + const JBCConstUTF8 *GetConstFileName() const { + return constFileName; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 sourceFileIdx; + const JBCConstUTF8 *constFileName; +}; + +// SourceDebugExtension Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.11 +class JBCAttrSourceDebugEx : public JBCAttr { + public: + JBCAttrSourceDebugEx(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrSourceDebugEx() override; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + char *data; +}; + +// LineNumberTable Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.12 +class JBCAttrLineNumberTable : public JBCAttr { + public: + JBCAttrLineNumberTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrLineNumberTable() = default; + const MapleVector &GetLineNums() const { + return lineNums; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 size; + MapleVector lineNums; +}; + +// LocalVariableTable Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.13 +class JBCAttrLocalVariableTable : public JBCAttr { + public: + JBCAttrLocalVariableTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrLocalVariableTable() = default; + const MapleVector &GetLocalVarInfos() const { + return localVarInfos; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 size; + MapleVector localVarInfos; +}; + +// LocalVariableTypeTable Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.14 +class JBCAttrLocalVariableTypeTable : public JBCAttr { + public: + JBCAttrLocalVariableTypeTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrLocalVariableTypeTable() = default; + const MapleVector &GetLocalVarTypeInfos() const { + return localVarTypeInfos; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 size; + MapleVector localVarTypeInfos; +}; + +// Deprecated Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.15 +class JBCAttrDeprecated : public JBCAttr { + public: + JBCAttrDeprecated(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrDeprecated() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; +}; + +// RuntimeAnnoations Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.16 +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.17 +class JBCAttrRTAnnotations : public JBCAttr { + public: + JBCAttrRTAnnotations(MapleAllocator &allocator, JBCAttrKind kindIn, uint16 nameIdx, uint32 length); + virtual ~JBCAttrRTAnnotations() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + uint16 size; + MapleVector annotations; +}; + +class JBCAttrRTVisAnnotations : public JBCAttrRTAnnotations { + public: + JBCAttrRTVisAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrRTVisAnnotations() = default; +}; + +class JBCAttrRTInvisAnnotations : public JBCAttrRTAnnotations { + public: + JBCAttrRTInvisAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrRTInvisAnnotations() = default; +}; + +// RuntimeParamAnnoations Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.18 +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.19 +class JBCAttrRTParamAnnotations : public JBCAttr { + public: + JBCAttrRTParamAnnotations(MapleAllocator &allocator, JBCAttrKind kindIn, uint16 nameIdx, uint32 length); + virtual ~JBCAttrRTParamAnnotations() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + uint8 size; + MapleVector annotations; +}; + +class JBCAttrRTVisParamAnnotations : public JBCAttrRTParamAnnotations { + public: + JBCAttrRTVisParamAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrRTVisParamAnnotations() override {} +}; + +class JBCAttrRTInvisParamAnnotations : public JBCAttrRTParamAnnotations { + public: + JBCAttrRTInvisParamAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrRTInvisParamAnnotations() override {} +}; + +// RuntimeVisibleTypeAnnotations Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.20 +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.21 +class JBCAttrRTTypeAnnotations : public JBCAttr { + public: + JBCAttrRTTypeAnnotations(MapleAllocator &allocator, JBCAttrKind kindIn, uint16 nameIdx, uint32 length); + virtual ~JBCAttrRTTypeAnnotations() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + uint16 size; + MapleVector annotations; +}; + +class JBCAttrRTVisTypeAnnotations : public JBCAttrRTTypeAnnotations { + public: + JBCAttrRTVisTypeAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrRTVisTypeAnnotations() = default; +}; + +class JBCAttrRTInvisTypeAnnotations : public JBCAttrRTTypeAnnotations { + public: + JBCAttrRTInvisTypeAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrRTInvisTypeAnnotations() = default; +}; + +// AnnotationDefault Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.22 +class JBCAttrAnnotationDefault : public JBCAttr { + public: + JBCAttrAnnotationDefault(const MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrAnnotationDefault() override; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + attr::ElementValue *value; +}; + +// BootstrapMethods Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.23 +class JBCAttrBootstrapMethods : public JBCAttr { + public: + JBCAttrBootstrapMethods(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrBootstrapMethods() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 size; + MapleVector methods; +}; + +// MethodParameters Attribute +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.24 +class JBCAttrMethodParameters : public JBCAttr { + public: + JBCAttrMethodParameters(MapleAllocator &allocator, uint16 nameIdx, uint32 length); + ~JBCAttrMethodParameters() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint8 size; + MapleVector params; +}; +} // namespace jbc +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_ATTR_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_attr_item.h b/src/hir2mpl/bytecode_input/class/include/jbc_attr_item.h new file mode 100644 index 0000000000000000000000000000000000000000..e94c7d12c58877e5f036c4dc968083bb0cf2e75a --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_attr_item.h @@ -0,0 +1,912 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_ATTR_ITEM_H +#define HIR2MPL_INCLUDE_JBC_ATTR_ITEM_H +#include +#include +#include "mempool_allocator.h" +#include "types_def.h" +#include "jbc_class_const_pool.h" + +namespace maple { +namespace jbc { +namespace attr { +// ElementValueItem +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.16 +enum ElementValueKind : uint8 { + kElementValueDefault = 0, + kElementValueConst, + kElementValueEnum, + kElementValueClassInfo, + kElementValueAnnotation, + kElementValueArray +}; + +class JBCAttrItem { + public: + JBCAttrItem() = default; + virtual ~JBCAttrItem() = default; + bool ParseFile(MapleAllocator &allocator, BasicIORead &io) { + return ParseFileImpl(allocator, io); + } + + bool PreProcess(const JBCConstPool &constPool) { + return PreProcessImpl(constPool); + } + + SimpleXMLElem *GenXmlElem(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const { + return GenXmlElemImpl(allocator, constPool, idx); + } + + protected: + virtual bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) = 0; + virtual bool PreProcessImpl(const JBCConstPool &constPool) = 0; + virtual SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const = 0; +}; + +// ExceptiionTableItem in Code Attr +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.3 +class ExceptionTableItem : public JBCAttrItem { + public: + ExceptionTableItem(); + ~ExceptionTableItem() override; + uint16 GetStartPC() const { + return startPC; + } + + uint16 GetEndPC() const { + return endPC; + } + + uint16 GetHandlerPC() const { + return handlerPC; + } + + const JBCConstClass *GetCatchType() const { + return catchType; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 startPC; + uint16 endPC; + uint16 handlerPC; + uint16 catchTypeIdx; + const JBCConstClass *catchType; +}; + +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.4 +enum VerificationTypeInfoTag : uint8 { + kVerTypeInfoItemTop = 0, + kVerTypeInfoItemInteger = 1, + kVerTypeInfoItemFloat = 2, + kVerTypeInfoItemDouble = 3, + kVerTypeInfoItemLong = 4, + kVerTypeInfoItemNull = 5, + kVerTypeInfoItemUninitializedThis = 6, + kVerTypeInfoItemObject = 7, + kVerTypeInfoItemUninitialized = 8, + kVerTypeInfoItemUnknown = 0xFF, +}; + +class VerificationTypeInfo : public JBCAttrItem { + public: + VerificationTypeInfo(); + ~VerificationTypeInfo() override; + static std::map InitTagNameMap(); + static std::string TagName(VerificationTypeInfoTag t); + const JBCConstClass &GetClassInfoRef() const { + CHECK_NULL_FATAL(classInfo); + return *classInfo; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + VerificationTypeInfoTag tag; + union { + uint16 cpoolIdx; + uint16 offset; + uint16 raw; + } data; + JBCConstClass *classInfo; + static std::map tagNameMap; +}; + +enum StackMapFrameItemTag : uint8 { + kStackSame, + kStackSameLocals1StackItem, + kStackSameLocals1StackItemEx, + kStackChop, + kStackSameFrameEx, + kStackAppend, + kStackFullFrame, + kStackReserved, +}; + +class StackMapFrameItem : public JBCAttrItem { + public: + StackMapFrameItem(uint8 frameTypeIn, StackMapFrameItemTag tagIn); + virtual ~StackMapFrameItem() = default; + static std::map InitTagName(); + static std::string TagName(StackMapFrameItemTag tag); + static StackMapFrameItemTag FrameType2Tag(uint8 frameType); + static StackMapFrameItem *NewItem(MapleAllocator &allocator, BasicIORead &io, uint8 frameType); + + protected: + StackMapFrameItemTag tag; + uint8 frameType; + static std::map tagNameMap; +}; + +class StackMapFrameItemSame : public StackMapFrameItem { + public: + explicit StackMapFrameItemSame(uint8 frameType); + ~StackMapFrameItemSame() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; +}; + +class StackMapFrameItemSameLocals1 : public StackMapFrameItem { + public: + explicit StackMapFrameItemSameLocals1(uint8 frameType); + ~StackMapFrameItemSameLocals1() override; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + VerificationTypeInfo *stack; +}; + +class StackMapFrameItemSameLocals1Ex : public StackMapFrameItem { + public: + explicit StackMapFrameItemSameLocals1Ex(uint8 frameType); + ~StackMapFrameItemSameLocals1Ex() override; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 offsetDelta; + VerificationTypeInfo *stack; +}; + +class StackMapFrameItemChop : public StackMapFrameItem { + public: + explicit StackMapFrameItemChop(uint8 frameType); + ~StackMapFrameItemChop() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 offsetDelta; +}; + +class StackMapFrameItemSameEx : public StackMapFrameItem { + public: + explicit StackMapFrameItemSameEx(uint8 frameType); + ~StackMapFrameItemSameEx() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 offsetDelta; +}; + +class StackMapFrameItemAppend : public StackMapFrameItem { + public: + StackMapFrameItemAppend(MapleAllocator &allocator, uint8 frameType); + ~StackMapFrameItemAppend() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 offsetDelta; + MapleVector locals; +}; + +class StackMapFrameItemFull : public StackMapFrameItem { + public: + StackMapFrameItemFull(MapleAllocator &allocator, uint8 frameType); + ~StackMapFrameItemFull() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 offsetDelta; + uint16 nLocals; + MapleVector locals; + uint16 nStacks; + MapleVector stacks; +}; + +// InnerClassItem in Attr InnerClass +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.6 +class InnerClassItem : public JBCAttrItem { + public: + InnerClassItem(); + ~InnerClassItem() override; + const JBCConstClass *GetConstClassInnerRef() const { + return constClassInner; + } + + const JBCConstClass *GetConstClassOuter() const { + return constClassOuter; + } + + const JBCConstUTF8 *GetConstNameInner() const { + return constNameInner; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 innerClassInfoIdx; + uint16 outerClassInfoIdx; + uint16 innerNameIdx; + uint16 innerClassAccessFlag; + JBCConstClass *constClassInner; + JBCConstClass *constClassOuter; + JBCConstUTF8 *constNameInner; +}; + +// LineNumberTableItem in Attr LineNumberTable +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.12 +class LineNumberTableItem : public JBCAttrItem { + public: + LineNumberTableItem(); + ~LineNumberTableItem() = default; + uint16 GetStartPC() const { + return startPC; + } + + uint16 GetLineNumber() const { + return lineNumber; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 startPC; + uint16 lineNumber; +}; + +// LocalVariableTableItem in Attr LocalVariableTable +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.13 +class LocalVariableTableItem : public JBCAttrItem { + public: + LocalVariableTableItem(); + ~LocalVariableTableItem() override; + const JBCConstUTF8 *GetConstName() const { + return constName; + } + + const JBCConstUTF8 *GetConstDesc() const { + return constDesc; + } + + uint16 GetStartPC() const { + return startPC; + } + + uint16 GetLength() const { + return length; + } + + uint16 GetIndex() const { + return index; + } + + GStrIdx GetNameStrIdx() const { + CHECK_NULL_FATAL(constName); + return constName->GetStrIdx(); + } + + const FEIRType *GetFEIRType() const { + return feirType; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 startPC; + uint16 length; + uint16 nameIdx; + uint16 descIdx; + uint16 index; + const JBCConstUTF8 *constName; + const JBCConstUTF8 *constDesc; + GStrIdx nameIdxMpl; + const FEIRType *feirType; +}; + +// LocalVariableTypeTableItem in Attr LocalVariableTypeTable +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.14 +class LocalVariableTypeTableItem : public JBCAttrItem { + public: + LocalVariableTypeTableItem(); + ~LocalVariableTypeTableItem() override; + const JBCConstUTF8 *GetConstName() const { + return constName; + } + + const JBCConstUTF8 *GetConstSignature() const { + return constSignature; + } + + uint16 GetStartPC() const { + return startPC; + } + + uint16 GetLength() const { + return length; + } + + uint16 GetIndex() const { + return index; + } + + GStrIdx GetNameStrIdx() const { + CHECK_NULL_FATAL(constName); + return constName->GetStrIdx(); + } + + GStrIdx GetSignatureStrIdx() const { + CHECK_NULL_FATAL(constSignature); + return constSignature->GetStrIdx(); + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 startPC; + uint16 length; + uint16 nameIdx; + uint16 signatureIdx; + uint16 index; + const JBCConstUTF8 *constName; + const JBCConstUTF8 *constSignature; + GStrIdx nameIdxMpl; +}; + +class ElementValueItem : public JBCAttrItem { + public: + ElementValueItem(ElementValueKind kindIn, char tagIn); + virtual ~ElementValueItem() = default; + static std::map InitTagKindMap(); + static std::map InitKindNameMap(); + static std::string KindName(ElementValueKind kind); + static ElementValueKind TagToKind(char tag); + static ElementValueItem *NewItem(MapleAllocator &allocator, BasicIORead &io, char tag); + + protected: + ElementValueKind kind; + char tag; + static std::map tagKindMap; + static std::map kindNameMap; +}; + +class ElementValue : public JBCAttrItem { + public: + ElementValue(); + ~ElementValue() override; + ElementValueKind GetKind() const { + return kind; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + char tag; + ElementValueKind kind; + ElementValueItem *value; +}; + +class ElementValuePair : public JBCAttrItem { + public: + ElementValuePair(); + ~ElementValuePair() override; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 nameIdx; + ElementValue *value; +}; + +// Annotation +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.16 +class Annotation : public JBCAttrItem { + public: + explicit Annotation(MapleAllocator &allocator); + ~Annotation() override; + const JBCConstUTF8 *GetConstTypeName() const { + return constTypeName; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 typeIdx; + uint16 nElemPairs; + MapleVector tbElemPairs; + JBCConstUTF8 *constTypeName; +}; + +// ParamAnnotationItem in Attr ParamAnnotation +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.18 +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.19 +class ParamAnnotationItem : public JBCAttrItem { + public: + explicit ParamAnnotationItem(MapleAllocator &allocator); + ~ParamAnnotationItem() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 count; + MapleVector annotations; +}; + +// BootstrapMethodItem in Attr BootstrapMethod +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.23 +class BootstrapMethodItem : public JBCAttrItem { + public: + explicit BootstrapMethodItem(MapleAllocator &allocator); + ~BootstrapMethodItem() override; + const JBCConstMethodHandleInfo *GetConstMethodHandleInfo() const { + return methodHandle; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 methodRefIdx; + uint16 nArgs; + MapleVector argsIdx; + JBCConstMethodHandleInfo *methodHandle; + MapleVector args; +}; + +// MethodParamItem in Attr MethodParam +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.24 +class MethodParamItem : public JBCAttrItem { + public: + MethodParamItem(); + ~MethodParamItem() override; + const JBCConstUTF8 *GetConstName() const { + return constName; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 nameIdx; + uint16 accessFlag; + JBCConstUTF8 *constName; +}; + +class ElementValuePair; + +class ElementValueConst : public ElementValueItem { + public: + explicit ElementValueConst(uint8 t); + ~ElementValueConst() override; + const JBCConst *GetConstValue() const { + return constValue; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 constValueIdx; + JBCConst *constValue; +}; + +class ElementValueEnum : public ElementValueItem { + public: + ElementValueEnum(); + ~ElementValueEnum() override; + const JBCConstUTF8 *GetConstTypeName() const { + return constTypeName; + } + + const JBCConstUTF8 *GetConstName() const { + return constName; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 typeNameIdx; + uint16 constNameIdx; + JBCConstUTF8 *constTypeName; + JBCConstUTF8 *constName; +}; + +class ElementValueClassInfo : public ElementValueItem { + public: + ElementValueClassInfo(); + ~ElementValueClassInfo() override; + const JBCConstUTF8 *GetConstClassInfo() const { + return constClassInfo; + } + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 classInfoIdx; + JBCConstUTF8 *constClassInfo; +}; + +class ElementValueAnnotation : public ElementValueItem { + public: + ElementValueAnnotation(); + ~ElementValueAnnotation() override; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + Annotation *annotation; +}; + +class ElementValueArray : public ElementValueItem { + public: + explicit ElementValueArray(MapleAllocator &allocator); + ~ElementValueArray() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 size; + MapleVector values; +}; + +// TargetInfo +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.20.1 +enum TargetInfoItemTag : uint8 { + kTargetTagUndefine, + kTargetTagTypeParam, + kTargetTagSuperType, + kTargetTagTypeParamBound, + kTargetTagEmpty, + kTargetTagFormalParam, + kTargetTagThrows, + kTargetTagLocalVar, + kTargetTagCatch, + kTargetTagOffset, + kTargetTagTypeArg, +}; + +enum TargetInfoItemType : uint8 { + kTargetTypeParamClass = 0x00, + kTargetTypeParamMethod = 0x01, + kTargetTypeHierarchy = 0x10, + kTargetTypeBoundClass = 0x11, + kTargetTypeBoundMethod = 0x12, + kTargetTypeFieldDecl = 0x13, + kTargetTypeReturn = 0x14, + kTargetTypeReceiver = 0x15, + kTargetTypeFormal = 0x16, + kTargetTypeThrows = 0x17, + kTargetTypeLocalVar = 0x40, + kTargetTypeResourceVar = 0x41, + kTargetTypeExpectionParam = 0x42, + kTargetTypeInstanceof = 0x43, + kTargetTypeNew = 0x44, + kTargetTypeMethodRefNew = 0x45, + kTargetTypeMethodRefIdentifier = 0x46, + kTargetTypeCast = 0x47, + kTargetTypeConstructorInvoke = 0x48, + kTargetTypeMethodInvoke = 0x49, + kTargetTypeConstructorNew = 0x4A, + kTargetTypeConstructorIdentifier = 0x4B, +}; + +class TargetInfoItem : public JBCAttrItem { + public: + explicit TargetInfoItem(TargetInfoItemTag tagIn); + virtual ~TargetInfoItem() = default; + static std::map InitTypeTagMap(); + static TargetInfoItemTag TargetType2Tag(TargetInfoItemType type); + static TargetInfoItem *NewItem(MapleAllocator &allocator, BasicIORead &io, TargetInfoItemType targetType); + + protected: + TargetInfoItemTag tag; + static std::map typeTagMap; +}; + +class TargetTypeParam : public TargetInfoItem { + public: + TargetTypeParam(); + ~TargetTypeParam() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint8 paramIdx; +}; + +class TargetSuperType : public TargetInfoItem { + public: + TargetSuperType(); + ~TargetSuperType() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 index; +}; + +class TargetTypeParamBound : public TargetInfoItem { + public: + TargetTypeParamBound(); + ~TargetTypeParamBound() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint8 paramIdx; + uint8 boundIdx; +}; + +class TargetEmpty : public TargetInfoItem { + public: + TargetEmpty(); + ~TargetEmpty() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; +}; + +class TargetFormalParam : public TargetInfoItem { + public: + TargetFormalParam(); + ~TargetFormalParam() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint8 paramIdx; +}; + +class TargetThrows : public TargetInfoItem { + public: + TargetThrows(); + ~TargetThrows() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 typeIdx; +}; + +class TargetLocalVarItem : public TargetInfoItem { + public: + TargetLocalVarItem(); + ~TargetLocalVarItem() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 startPC; + uint16 length; + uint16 index; +}; + +class TargetLocalVar : public TargetInfoItem { + public: + explicit TargetLocalVar(MapleAllocator &allocator); + ~TargetLocalVar() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 size; + MapleVector table; +}; + +class TargetCatch : public TargetInfoItem { + public: + TargetCatch(); + ~TargetCatch() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 exTableIdx; +}; + +class TargetOffset : public TargetInfoItem { + public: + TargetOffset(); + ~TargetOffset() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 offset; +}; + +class TargetTypeArg : public TargetInfoItem { + public: + TargetTypeArg(); + ~TargetTypeArg() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint16 offset; + uint8 typeArgIdx; +}; + +class TypePathItem : public JBCAttrItem { + public: + TypePathItem(); + ~TypePathItem() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint8 typePathKind; + uint8 typeArgIdx; +}; + +// TypePath +// ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.7.20.2 +class TypePath : public JBCAttrItem { + public: + explicit TypePath(MapleAllocator &allocator); + ~TypePath() = default; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + uint8 pathLength; + MapleVector tbPath; +}; + +class TypeAnnotationItem : public JBCAttrItem { + public: + explicit TypeAnnotationItem(MapleAllocator &allocator); + ~TypeAnnotationItem() override; + + protected: + bool ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const override; + + private: + TargetInfoItemType targetType; + TargetInfoItem *targetInfo; + TypePath *targetPath; + uint16 typeIdx; + uint16 nElemPairs; + MapleVector elemPairs; +}; +} // namespace attr +} // namespace jbc +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_ATTR_ITEM_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_bb.h b/src/hir2mpl/bytecode_input/class/include/jbc_bb.h new file mode 100644 index 0000000000000000000000000000000000000000..889e994988a1b7faca88d448e50caa71f3e36059 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_bb.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_BB_H +#define HIR2MPL_INCLUDE_JBC_BB_H +#include +#include "feir_bb.h" +#include "jbc_stack_helper.h" + +namespace maple { +class JBCBB : public FEIRBB { + public: + JBCBB(const jbc::JBCConstPool &argConstPool); + JBCBB(uint8 argBBKind, const jbc::JBCConstPool &argConstPool); + ~JBCBB() override = default; + bool InitForFuncHeader(); + bool InitForCatch(); + bool UpdateStack(); + bool UpdateStackByPredBB(const JBCBB &bb); + bool UpdateStackByPredBBEnd() const; + bool CheckStack(); + uint32 GetSwapSize() const; + bool GetStackError() const { + return stackError; + } + + const JBCStackHelper &GetMinStackIn() const { + return minStackIn; + } + + const JBCStackHelper &GetMinStackOut() const { + return minStackOut; + } + + protected: + void Dump() const override; + + private: + const jbc::JBCConstPool &constPool; + bool stackError : 1; + bool stackInUpdated : 1; + bool stackOutUpdated : 1; + bool updatePredEnd : 1; + JBCStackHelper minStackIn; + JBCStackHelper minStackOut; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_BB_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class.h b/src/hir2mpl/bytecode_input/class/include/jbc_class.h new file mode 100644 index 0000000000000000000000000000000000000000..39e20721cab501ead4c94a95fe9072a88112108f --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_CLASS_H +#define HIR2MPL_INCLUDE_JBC_CLASS_H +#include "mempool_allocator.h" +#include "fe_configs.h" +#include "jbc_class_access.h" +#include "jbc_class_header.h" +#include "jbc_class_const_pool.h" +#include "jbc_attr.h" + +namespace maple { +namespace jbc { +class JBCClass; +class JBCClassElem { + public: + JBCClassElem(MapleAllocator &allocator, const JBCClass &argKlass); + virtual ~JBCClassElem() = default; + bool ParseFile(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool); + const JBCConstPool &GetConstPool() const; + std::string GetFullName() const; + LLT_MOCK_TARGET uint16 GetAccessFlag() const { + return accessFlag; + } + + LLT_MOCK_TARGET bool IsStatic() const { + return (accessFlag & kAccFieldStatic) != 0; + } + + LLT_MOCK_TARGET std::string GetClassName() const; + + LLT_MOCK_TARGET std::string GetName(const JBCConstPool &constPool) const { + return constPool.GetStringByConstUTF8(nameIdx); + } + + LLT_MOCK_TARGET std::string GetName() const { + return GetConstPool().GetStringByConstUTF8(nameIdx); + } + + LLT_MOCK_TARGET std::string GetDescription(const JBCConstPool &constPool) const { + return constPool.GetStringByConstUTF8(descIdx); + } + + LLT_MOCK_TARGET std::string GetDescription() const { + return GetConstPool().GetStringByConstUTF8(descIdx); + } + + uint16 GetNameIdx() const { + return nameIdx; + } + + uint16 GetDescriptionIdx() const { + return descIdx; + } + + const JBCClass &GetClass() const { + return klass; + } + + const JBCAttr *GetAttr(JBCAttrKind kind) const { + return attrMap.GetAttr(kind); + } + + const std::list GetAttrs(JBCAttrKind kind) const { + return attrMap.GetAttrs(kind); + } + + SimpleXMLElem *GenXmlElem(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) { + return GenXmlElemImpl(allocator, constPool, idx); + } + + protected: + virtual SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) = 0; + + const JBCClass &klass; + uint16 accessFlag; + uint16 nameIdx; + uint16 descIdx; + uint16 nAttr; + MapleVector attrs; + JBCAttrMap attrMap; +}; + +class JBCClassField : public JBCClassElem { + public: + JBCClassField(MapleAllocator &allocator, const JBCClass &argKlass); + ~JBCClassField() = default; + + protected: + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) override; +}; + +class JBCClassMethod : public JBCClassElem { + public: + JBCClassMethod(MapleAllocator &allocator, const JBCClass &argKlass); + ~JBCClassMethod() = default; + bool PreProcess(); + const JBCAttrCode *GetCode() const; + bool IsVirtual() const; + bool IsNative() const; + bool HasCode() const; + + protected: + SimpleXMLElem *GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) override; +}; + +class JBCClass { + public: + JBCClass(MapleAllocator &allocatorIn); + LLT_MOCK_TARGET ~JBCClass() = default; + + bool ParseFile(BasicIORead &io); + bool PreProcess(); + GStrIdx GetClassNameIdxOrin() const; + GStrIdx GetClassNameIdxMpl() const; + LLT_MOCK_TARGET std::string GetClassNameOrin() const; + LLT_MOCK_TARGET std::string GetClassNameMpl() const; + LLT_MOCK_TARGET std::string GetSourceFileName() const; + LLT_MOCK_TARGET std::string GetSuperClassName() const; + LLT_MOCK_TARGET std::vector GetInterfaceNames() const; + static JBCClass *InClass(MapleAllocator &allocator, BasicIORead &io); + + uint16 GetAccessFlag() const { + return header.accessFlag; + } + + const JBCConstPool &GetConstPool() const { + return constPool; + } + + uint16 GetConstPoolCount() const { + return header.constPoolCount; + } + + uint16 GetFieldCount() const { + return header.fieldsCount; + } + + uint16 GetMethodCount() const { + return header.methodsCount; + } + + uint16 GetAttrCount() const { + return header.attrsCount; + } + + bool IsInterface() const { + return (header.accessFlag & JBCClassAccessFlag::kAccClassInterface) != 0; + } + + const MapleVector &GetFields() const { + return tbFields; + } + + const MapleVector &GetMethods() const { + return tbMethods; + } + + void SetFilePathName(const std::string &name) { + filePathName = name; + } + + const std::string GetFilePathName() const { + return filePathName.length() == 0 ? "" : std::string(filePathName.c_str()); + } + + const std::string GetFileName() const { + return fileName.length() == 0 ? "" : std::string(fileName.c_str()); + } + + void SetSrcFileInfoIdx(uint32 idx) { + srcFileInfoIdx = idx; + } + + uint32 GetSrcFileInfoIdx() const { + return srcFileInfoIdx; + } + + const JBCAttr *GetAttr(JBCAttrKind kind) const { + return attrMap.GetAttr(kind); + } + + std::list GetAttrs(JBCAttrKind kind) const { + return attrMap.GetAttrs(kind); + } + + LLT_PRIVATE: + void InitHeader(); + bool ParseFileForConstPool(BasicIORead &io); + bool ParseFileForFields(BasicIORead &io); + bool ParseFileForMethods(BasicIORead &io); + bool ParseFileForAttrs(BasicIORead &io); + + struct { + uint32 magic; + uint16 minorVersion; + uint16 majorVersion; + uint16 constPoolCount; + uint16 accessFlag; + uint16 thisClass; + uint16 superClass; + uint16 interfacesCount; + uint16 fieldsCount; + uint16 methodsCount; + uint16 attrsCount; + } header; + MapleAllocator &allocator; + JBCConstPool constPool; + MapleVector tbInterfaces; + MapleVector tbFields; + MapleVector tbMethods; + MapleVector tbAttrs; + JBCAttrMap attrMap; + MapleString filePathName; + MapleString fileName; + uint32 srcFileInfoIdx = 0; +}; +} // namespace jbc +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_CLASS_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class2fe_helper.h b/src/hir2mpl/bytecode_input/class/include/jbc_class2fe_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..eb73f924090a3193cf8c741d9612a1f690d99f9c --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class2fe_helper.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC2FE_INPUT_HELPER_STRUCT_H +#define HIR2MPL_INCLUDE_JBC2FE_INPUT_HELPER_STRUCT_H +#include "fe_configs.h" +#include "jbc_class.h" +#include "jbc_input.h" +#include "fe_input_helper.h" + +namespace maple { +class JBCClass2FEHelper : public FEInputStructHelper { + public: + JBCClass2FEHelper(MapleAllocator &allocator, const jbc::JBCClass &klassIn); + ~JBCClass2FEHelper() = default; + const jbc::JBCConstPool &GetConstPool() const { + return klass.GetConstPool(); + } + + LLT_MOCK_TARGET bool IsStaticFieldProguard() const { + return isStaticFieldProguard; + } + + protected: + std::string GetStructNameOrinImpl() const override; + std::string GetStructNameMplImpl() const override; + std::list GetSuperClassNamesImpl() const override; + std::vector GetInterfaceNamesImpl() const override; + std::string GetSourceFileNameImpl() const override; + MIRStructType *CreateMIRStructTypeImpl(bool &error) const override; + TypeAttrs GetStructAttributeFromInputImpl() const override; + uint64 GetRawAccessFlagsImpl() const override; + GStrIdx GetIRSrcFileSigIdxImpl() const override; + bool IsMultiDefImpl() const override; + void InitFieldHelpersImpl() override; + void InitMethodHelpersImpl() override; + std::string GetSrcFileNameImpl() const override; + + private: + const jbc::JBCClass &klass; + bool isStaticFieldProguard; +}; + +class JBCClassField2FEHelper : public FEInputFieldHelper { + public: + JBCClassField2FEHelper(MapleAllocator &allocator, const jbc::JBCClassField &fieldIn) + : FEInputFieldHelper(allocator), + field(fieldIn) {} + ~JBCClassField2FEHelper() = default; + static FieldAttrs AccessFlag2Attribute(uint16 accessFlag); + + protected: + bool ProcessDeclImpl(MapleAllocator &allocator) override; + bool ProcessDeclWithContainerImpl(MapleAllocator &allocator) override; + + private: + const jbc::JBCClassField &field; +}; + +class JBCClassMethod2FEHelper : public FEInputMethodHelper { + public: + JBCClassMethod2FEHelper(MapleAllocator &allocator, const jbc::JBCClassMethod &methodIn); + ~JBCClassMethod2FEHelper() = default; + const jbc::JBCClassMethod &GetMethod() const { + return method; + } + + protected: + bool ProcessDeclImpl(MapleAllocator &allocator) override; + void SolveReturnAndArgTypesImpl(MapleAllocator &allocator) override; + std::string GetMethodNameImpl(bool inMpl, bool full) const override; + FuncAttrs GetAttrsImpl() const override; + bool IsStaticImpl() const override; + bool IsVargImpl() const override; + bool HasThisImpl() const override; + MIRType *GetTypeForThisImpl() const override; + bool IsVirtualImpl() const override; + bool IsNativeImpl() const override; + bool HasCodeImpl() const override; + + private: + bool IsClinit() const; + bool IsInit() const; + + const jbc::JBCClassMethod &method; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC2FE_INPUT_HELPER_STRUCT_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class_access.h b/src/hir2mpl/bytecode_input/class/include/jbc_class_access.h new file mode 100644 index 0000000000000000000000000000000000000000..d38ba68c4e20e8760d230b020689f7f367c9510f --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class_access.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_CLASS_ACCESS_H +#define HIR2MPL_INCLUDE_JBC_CLASS_ACCESS_H + +namespace maple { +namespace jbc { +enum JBCClassAccessFlag: uint16 { + kAccClassPublic = 0x0001, + kAccClassFinal = 0x0010, + kAccClassSuper = 0x0020, + kAccClassInterface = 0x0200, + kAccClassAbstract = 0x0400, + kAccClassSynthetic = 0x1000, + kAccClassAnnotation = 0x2000, + kAccClassEnum = 0x4000, +}; + +enum JBCClassFieldAccessFlag { + kAccFieldPublic = 0x0001, + kAccFieldPrivate = 0x0002, + kAccFieldProtected = 0x0004, + kAccFieldStatic = 0x0008, + kAccFieldFinal = 0x0010, + kAccFieldVolatile = 0x0040, + kAccFieldTransient = 0x0080, + kAccFieldSynthetic = 0x1000, + kAccFieldEnum = 0x4000, +}; + +enum JBCClassMethodAccessFlag: uint16 { + kAccMethodPublic = 0x0001, + kAccMethodPrivate = 0x0002, + kAccMethodProtected = 0x0004, + kAccMethodStatic = 0x0008, + kAccMethodFinal = 0x0010, + kAccMethodSynchronized = 0x0020, + kAccMethodBridge = 0x0040, + kAccMethodVarargs = 0x0080, + kAccMethodNative = 0x0100, + kAccMethodAbstract = 0x0400, + kAccMethodStrict = 0x0800, + kAccMethodSynthetic = 0x1000, +}; + +enum JBCAccessFlag { + kAccPublic = 0x0001, + kAccPrivate = 0x0002, + kAccProtected = 0x0004, + kAccStatic = 0x0008, + kAccFinal = 0x0010, + kAccSuperOrSynchronized = 0x0020, + kAccBridgeOrVolatile = 0x0040, + kAccVarargsOrTransient = 0x0080, + kAccNative = 0x0100, + kAccInterface = 0x0200, + kAccAbstract = 0x0400, + kAccStrict = 0x0800, + kAccSynthetic = 0x1000, + kAccAnnotation = 0x2000, + kAccEnum = 0x4000, +}; +} // namespace jbc +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_CLASS_ACCESS_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class_builder.h b/src/hir2mpl/bytecode_input/class/include/jbc_class_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..99760fc2965b16a31a7142f21e63ca409e2f8287 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class_builder.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_CLASS_BUILDER_H +#define HIR2MPL_INCLUDE_JBC_CLASS_BUILDER_H +#include "jbc_class.h" + +namespace maple { +namespace jbc { +class JBCClassBuilder { + public: + JBCClassBuilder() = default; + ~JBCClassBuilder() = default; + + public: +}; +} // namespace jbc +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_CLASS_BUILDER_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class_const.def b/src/hir2mpl/bytecode_input/class/include/jbc_class_const.def new file mode 100644 index 0000000000000000000000000000000000000000..49ef783b5ef68f320d45bf65488ca09cf4864f13 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class_const.def @@ -0,0 +1,29 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +// JBC_CONST(tag, tagName, className) +JBC_CONST(kConstUTF8, "ConstUTF8", JBCConstUTF8) +JBC_CONST(kConstInteger, "ConstInteger", JBCConst4Byte) +JBC_CONST(kConstFloat, "ConstFloat", JBCConst4Byte) +JBC_CONST(kConstLong, "ConstLong", JBCConst8Byte) +JBC_CONST(kConstDouble, "ConstDouble", JBCConst8Byte) +JBC_CONST(kConstClass, "ConstClass", JBCConstClass) +JBC_CONST(kConstString, "ConstString", JBCConstString) +JBC_CONST(kConstFieldRef, "ConstFieldRef", JBCConstRef) +JBC_CONST(kConstMethodRef, "ConstMethodRef", JBCConstRef) +JBC_CONST(kConstInterfaceMethodRef, "ConstInterfaceMethodRef", JBCConstRef) +JBC_CONST(kConstNameAndType, "ConstNameAndType", JBCConstNameAndType) +JBC_CONST(kConstMethodHandleInfo, "ConstMethodHandleInfo", JBCConstMethodHandleInfo) +JBC_CONST(kConstMethodType, "ConstMethodType", JBCConstMethodType) +JBC_CONST(kConstInvokeDynamic, "ConstInvokeDynamic", JBCConstInvokeDynamic) \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class_const.h b/src/hir2mpl/bytecode_input/class/include/jbc_class_const.h new file mode 100644 index 0000000000000000000000000000000000000000..e15f6648e1ee93f0f5d8619f4b2d609a0e5e3a37 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class_const.h @@ -0,0 +1,425 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_CLASS_CONST_H +#define HIR2MPL_INCLUDE_JBC_CLASS_CONST_H +#include +#include +#include "mempool_allocator.h" +#include "maple_string.h" +#include "global_tables.h" +#include "types_def.h" +#include "simple_xml.h" +#include "basic_io.h" +#include "fe_struct_elem_info.h" +#include "feir_type.h" + +namespace maple { +namespace jbc { +// Const Kind +enum JBCConstTag : uint8 { + kConstUnknown = 0, + kConstUTF8 = 1, + kConstInteger = 3, + kConstFloat = 4, + kConstLong = 5, + kConstDouble = 6, + kConstClass = 7, + kConstString = 8, + kConstFieldRef = 9, + kConstMethodRef = 10, + kConstInterfaceMethodRef = 11, + kConstNameAndType = 12, + kConstMethodHandleInfo = 15, + kConstMethodType = 16, + kConstInvokeDynamic = 18, +}; + +enum JavaRefKind : uint8 { + kRefGetField = 1, + kRefGetStatic = 2, + kRefPutField = 3, + kRefPutStatic = 4, + kRefInvokeVirtual = 5, + kRefInvokeStatic = 6, + kRefInvokeSpecial = 7, + kRefNewInvokeSpecial = 8, + kRefInvokeInterface = 9, +}; + +class JBCConstTagName { + public: + JBCConstTagName() = default; + ~JBCConstTagName() = default; + static std::map InitTagNameMap(); + static std::string GetTagName(JBCConstTag tag); + + private: + static std::map tagNameMap; +}; + +class JBCConstPool; +class JBCConst { + public: + JBCConst(MapleAllocator &allocIn, JBCConstTag t) : alloc(allocIn), tag(t) {} + virtual ~JBCConst() = default; + static JBCConst *InConst(MapleAllocator &alloc, BasicIORead &io); + static std::string InternalNameToFullName(const std::string &name); + static std::string FullNameToInternalName(const std::string &name); + JBCConstTag GetTag() const { + return tag; + } + + bool IsWide() const { + return (tag == kConstLong || tag == kConstDouble); + } + + bool IsValue() const { + return (tag == kConstInteger || tag == kConstLong || tag == kConstFloat || tag == kConstDouble); + } + + bool IsValue4Byte() const { + return (tag == kConstInteger || tag == kConstFloat); + } + + bool IsValue8Byte() const { + return (tag == kConstLong || tag == kConstDouble); + } + + bool ParseFile(BasicIORead &io) { + return ParseFileImpl(io); + } + + bool PreProcess(const JBCConstPool &constPool) { + return PreProcessImpl(constPool); + } + + SimpleXMLElem *GenXMLElem(MapleAllocator &allocIn, uint32 id) const { + return GenXMLElemImpl(allocIn, id); + } + + protected: + virtual bool ParseFileImpl(BasicIORead &io) = 0; + virtual bool PreProcessImpl(const JBCConstPool &constPool) = 0; + virtual SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const; + const std::string MapleStringToStd(const MapleString &mapleStr) const { + return mapleStr.length() == 0 ? "" : std::string(mapleStr.c_str()); + } + + MapleAllocator &alloc; + JBCConstTag tag; +}; + +using JBCConstPoolIdx = uint16; + +class JBCConstUTF8 : public JBCConst { + public: + JBCConstUTF8(MapleAllocator &alloc, JBCConstTag t); + JBCConstUTF8(MapleAllocator &alloc, JBCConstTag t, const std::string &argStr); + ~JBCConstUTF8() = default; + + const std::string GetString() const { + return MapleStringToStd(str); + } + + GStrIdx GetStrIdx() const { + return strIdx; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const override; + + private: + uint16 length; + GStrIdx strIdx; + MapleString str; +}; + +class JBCConst4Byte : public JBCConst { + public: + JBCConst4Byte(MapleAllocator &alloc, JBCConstTag t); + JBCConst4Byte(MapleAllocator &alloc, JBCConstTag t, int32 arg); + JBCConst4Byte(MapleAllocator &alloc, JBCConstTag t, float arg); + ~JBCConst4Byte() = default; + int32 GetInt32() const { + return value.ivalue; + } + + float GetFloat() const { + return value.fvalue; + } + + uint32 GetRaw() const { + return value.raw; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const override; + + private: + union { + int32 ivalue; + float fvalue; + uint32 raw; + } value; +}; + +class JBCConst8Byte : public JBCConst { + public: + JBCConst8Byte(MapleAllocator &alloc, JBCConstTag t); + JBCConst8Byte(MapleAllocator &alloc, JBCConstTag t, int64 arg); + JBCConst8Byte(MapleAllocator &alloc, JBCConstTag t, double arg); + ~JBCConst8Byte() = default; + + int64 GetInt64() const { + return value.lvalue; + } + + double GetDouble() const { + return value.dvalue; + } + + uint64 GetRaw() const { + return value.raw; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const override; + + private: + union { + int64 lvalue; + double dvalue; + uint64 raw; + } value; +}; + +class JBCConstClass : public JBCConst { + public: + JBCConstClass(MapleAllocator &alloc, JBCConstTag t); + JBCConstClass(MapleAllocator &alloc, JBCConstTag t, JBCConstPoolIdx argNameIdx); + ~JBCConstClass() override; + + GStrIdx GetClassNameIdxOrin() const { + return strIdxOrin; + } + + GStrIdx GetClassNameIdxMpl() const { + return strIdxMpl; + } + + const std::string GetClassNameOrin() const { + return MapleStringToStd(nameOrin); + } + + const std::string GetClassNameMpl() const { + return MapleStringToStd(nameMpl); + } + + const FEIRType *GetFEIRType() const { + return feType; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const override; + + private: + struct { + JBCConstPoolIdx nameIdx; + } rawData; + const JBCConstUTF8 *constUTF8; + GStrIdx strIdxOrin; + GStrIdx strIdxMpl; + MapleString nameOrin; + MapleString nameMpl; + FEIRType *feType = nullptr; +}; + +class JBCConstString : public JBCConst { + public: + JBCConstString(MapleAllocator &alloc, JBCConstTag t); + JBCConstString(MapleAllocator &alloc, JBCConstTag t, JBCConstPoolIdx argStringIdx); + ~JBCConstString() = default; + void SetValue(const GStrIdx &argStrIdx) { + strIdx = argStrIdx; + str = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + } + + GStrIdx GetStrIdx() const { + return strIdx; + } + + const std::string GetString() const { + return MapleStringToStd(str); + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const override; + + private: + struct { + JBCConstPoolIdx stringIdx; + } rawData; + GStrIdx strIdx; + MapleString str; +}; + +class JBCConstNameAndType : public JBCConst { + public: + JBCConstNameAndType(MapleAllocator &alloc, JBCConstTag t); + JBCConstNameAndType(MapleAllocator &alloc, JBCConstTag t, JBCConstPoolIdx argNameIdx, JBCConstPoolIdx argDescIdx); + ~JBCConstNameAndType() override; + + const std::string GetName() const { + CHECK_FATAL(constName != nullptr, "invalid const index"); + return constName->GetString(); + } + + const std::string GetDesc() const { + CHECK_FATAL(constDesc != nullptr, "invalid const index"); + return constDesc->GetString(); + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const override; + + private: + struct { + JBCConstPoolIdx nameIdx; + JBCConstPoolIdx descIdx; + } rawData; + const JBCConstUTF8 *constName; + const JBCConstUTF8 *constDesc; +}; + +class JBCConstRef : public JBCConst { + public: + JBCConstRef(MapleAllocator &alloc, JBCConstTag t); + JBCConstRef(MapleAllocator &alloc, JBCConstTag t, JBCConstPoolIdx argClassIdx, + JBCConstPoolIdx argClassNameAndTypeIdx); + ~JBCConstRef() override; + bool PrepareFEStructElemInfo(); + const std::string GetName() const; + const std::string GetDesc() const; + + const JBCConstClass *GetConstClass() const { + return constClass; + } + + FEStructElemInfo *GetFEStructElemInfo() const { + CHECK_NULL_FATAL(feStructElemInfo); + return feStructElemInfo; + } + + const FEIRType *GetOwnerFEIRType() const { + CHECK_NULL_FATAL(constClass); + return constClass->GetFEIRType(); + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const override; + + private: + struct { + JBCConstPoolIdx classIdx; + JBCConstPoolIdx nameAndTypeIdx; + } rawData; + const JBCConstClass *constClass; + const JBCConstNameAndType *constNameAndType; + FEStructElemInfo *feStructElemInfo; +}; + +class JBCConstMethodHandleInfo : public JBCConst { + public: + JBCConstMethodHandleInfo(MapleAllocator &alloc, JBCConstTag t); + ~JBCConstMethodHandleInfo() override; + + protected: + bool ParseFileImpl(BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const override; + + private: + struct { + JBCConstPoolIdx refKind; + JBCConstPoolIdx refIdx; + } rawData; + const JBCConstRef *constRef; +}; + +class JBCConstMethodType : public JBCConst { + public: + JBCConstMethodType(MapleAllocator &alloc, JBCConstTag t); + ~JBCConstMethodType() override; + + protected: + bool ParseFileImpl(BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const override; + + private: + struct { + JBCConstPoolIdx descIdx; + } rawData; + const JBCConstUTF8 *constDesc; +}; + +class JBCConstInvokeDynamic : public JBCConst { + public: + JBCConstInvokeDynamic(MapleAllocator &alloc, JBCConstTag t); + ~JBCConstInvokeDynamic() override; + bool PrepareFEStructElemInfo(const std::string &ownerClassName); + JBCConstPoolIdx GetBSMAttrIdx() const { + return rawData.bsmAttrIdx; + } + + const JBCConstNameAndType *GetConstNameAndType() const { + return constNameAndType; + } + + FEStructElemInfo *GetFEStructElemInfo() const { + CHECK_NULL_FATAL(feStructElemInfo); + return feStructElemInfo; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + bool PreProcessImpl(const JBCConstPool &constPool) override; + SimpleXMLElem *GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const override; + + private: + struct { + JBCConstPoolIdx bsmAttrIdx; + JBCConstPoolIdx nameAndTypeIdx; + } rawData; + const JBCConstNameAndType *constNameAndType; + FEStructElemInfo *feStructElemInfo; +}; +} // namespace jbc +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_CLASS_CONST_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class_const_pool.h b/src/hir2mpl/bytecode_input/class/include/jbc_class_const_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..0acdb08a6e2c2b8292631939b4f0204206626b66 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class_const_pool.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_CLASS_CONST_POOL_H +#define HIR2MPL_INCLUDE_JBC_CLASS_CONST_POOL_H +#include "jbc_class_const.h" + +namespace maple { +namespace jbc { +class JBCConstPool { + public: + JBCConstPool(MapleAllocator &alloc); + ~JBCConstPool() = default; + uint16 InsertConst(JBCConst &objConst); + void InsertConstDummyForWide(); + + /* + * GetConstByIdx + * GetConstXXXByIdx + * If safe is true, the return value must be not nullptr or fatal and exit. + * If safe is false, the return value may be nullptr with error printed. + */ + const JBCConst *GetConstByIdx(uint16 idx, bool safed = false) const; + const JBCConst *GetConstByIdxWithTag(uint16 idx, JBCConstTag tag, bool safed = false) const; + const JBCConst *GetConstValueByIdx(uint16 idx, bool safed = false) const; + const JBCConst *GetConstValue4ByteByIdx(uint16 idx, bool safed = false) const; + const JBCConst *GetConstValue8ByteByIdx(uint16 idx, bool safed = false) const; + std::string GetNameByClassInfoIdx(uint16 idx, bool safed = false) const; + bool PreProcess(uint16 argMajorVersion); + bool PrepareFEStructElemInfo(const std::string &ownerClassName); + JBCConstUTF8 *NewConstUTF8(uint16 &idx, const std::string &str); + JBCConst4Byte *NewConst4Byte(uint16 &idx, int32 value); + JBCConst4Byte *NewConst4Byte(uint16 &idx, float value); + JBCConst8Byte *NewConst8Byte(uint16 &idx, int64 value); + JBCConst8Byte *NewConst8Byte(uint16 &idx, double value); + JBCConstClass *NewConstClass(uint16 &idx, const std::string &className); + JBCConstString *NewConstString(uint16 &idx, const std::string &str); + JBCConstRef *NewConstRef(uint16 &idx, JBCConstTag tag, const std::string &className, const std::string &name, + const std::string &desc); + JBCConstNameAndType *NewConstNameAndType(uint16 &idx, const std::string &name, const std::string &desc); + + uint16 GetMajorVersion() const { + return majorVersion; + } + + std::string GetStringByConstUTF8(uint16 idx, bool safed = false) const { + const JBCConst *constUTF8 = GetConstByIdxWithTag(idx, JBCConstTag::kConstUTF8, safed); + return constUTF8 == nullptr ? "" : static_cast(constUTF8)->GetString(); + } + + private: + MapleAllocator &allocator; + MapleVector pool; + uint16 majorVersion; +}; +} // namespace jbc +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_CLASS_CONST_POOL_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_class_header.h b/src/hir2mpl/bytecode_input/class/include/jbc_class_header.h new file mode 100644 index 0000000000000000000000000000000000000000..c2605537ca5c43af54fc665c4714d87cfee631fd --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_class_header.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_CLASS_HEADER_H +#define HIR2MPL_INCLUDE_JBC_CLASS_HEADER_H +#include "types_def.h" +#include "basic_io.h" + +namespace maple { +namespace jbc { +class JBCClassHeader { + public: + JBCClassHeader(); + ~JBCClassHeader() = default; + bool ParseClassFile(BasicIORead &io) const; + + private: + uint32 magic; + uint16 minorVersion; + uint16 majorVersion; + uint16 constPoolCount; + uint16 accessFlag; + uint16 thisClass; + uint16 superClass; + uint16 interfacesCount; + uint16 fieldsCount; + uint16 methodsCount; + uint16 attrsCount; +}; +} // namespace jbc +} // namespace maple +#endif \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_compiler_component.h b/src/hir2mpl/bytecode_input/class/include/jbc_compiler_component.h new file mode 100644 index 0000000000000000000000000000000000000000..bf8ea0f203e48205b9374c1b913a9c5dac1801af --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_compiler_component.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_COMPILER_COMPONENT_H +#define HIR2MPL_INCLUDE_JBC_COMPILER_COMPONENT_H +#include "fe_macros.h" +#include "hir2mpl_compiler_component.h" +#include "jbc_input.h" +#include "fe_function_phase_result.h" + +namespace maple { +class JBCCompilerComponent : public HIR2MPLCompilerComponent { + public: + JBCCompilerComponent(MIRModule &module); + ~JBCCompilerComponent() override; + + protected: + bool ParseInputImpl() override; + bool LoadOnDemandTypeImpl() override; + void ProcessPragmaImpl() override; + std::unique_ptr CreatFEFunctionImpl(FEInputMethodHelper *methodHelper) override; + std::string GetComponentNameImpl() const override; + bool ParallelableImpl() const override; + void DumpPhaseTimeTotalImpl() const override; + void ReleaseMemPoolImpl() override; + + private: + MemPool *mp; + MapleAllocator allocator; + jbc::JBCInput jbcInput; +}; // class JBCCompilerComponent +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_COMPILER_COMPONENT_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_function.h b/src/hir2mpl/bytecode_input/class/include/jbc_function.h new file mode 100644 index 0000000000000000000000000000000000000000..7f86e8c492c8efcc8ac25e79d62befe35b07f73c --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_function.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_JBC_FUNCTION_H +#define HIR2MPL_INCLUDE_COMMON_JBC_FUNCTION_H +#include +#include +#include +#include +#include +#include "fe_configs.h" +#include "fe_function.h" +#include "jbc_class.h" +#include "jbc_attr.h" +#include "jbc_stmt.h" +#include "jbc_bb.h" +#include "jbc_class2fe_helper.h" +#include "jbc_stack2fe_helper.h" +#include "jbc_function_context.h" + +namespace maple { +class JBCBBPesudoCatchPred : public FEIRBB { + public: + static const uint8 kBBKindPesudoCatchPred = FEIRBBKind::kBBKindExt + 1; + JBCBBPesudoCatchPred() + : FEIRBB(kBBKindPesudoCatchPred) {} + ~JBCBBPesudoCatchPred() override = default; +}; // class JBCBBPesudoCatchPred + +class JBCFunction : public FEFunction { + public: + JBCFunction(const JBCClassMethod2FEHelper &argMethodHelper, MIRFunction &mirFunc, + const std::unique_ptr &argPhaseResultTotal); + ~JBCFunction(); + + LLT_PROTECTED: + // run phase routines + bool GenerateGeneralStmt(const std::string &phaseName) override; + bool LabelLabelIdx(const std::string &phaseName); + bool CheckJVMStack(const std::string &phaseName); + bool GenerateArgVarList(const std::string &phaseName) override; + bool GenerateAliasVars(const std::string &phaseName) override; + bool ProcessFunctionArgs(const std::string &phaseName); + bool EmitLocalVarInfo(const std::string &phaseName); + bool EmitToFEIRStmt(const std::string &phaseName) override; + + // interface implement + void InitImpl() override; + void PreProcessImpl() override; + bool ProcessImpl() override; + void FinishImpl() override; + bool PreProcessTypeNameIdx() override; + void GenerateGeneralStmtFailCallBack() override; + void GenerateGeneralDebugInfo() override; + bool VerifyGeneral() override; + void VerifyGeneralFailCallBack() override; + std::string GetGeneralFuncName() const override; + + bool HasThis() override { + return methodHelper.HasThis(); + } + + bool IsNative() override { + return methodHelper.IsNative(); + } + + void EmitToFEIRStmt(const JBCBB &bb); + + LLT_PRIVATE: + const JBCClassMethod2FEHelper &methodHelper; + const jbc::JBCClassMethod &method; + JBCStack2FEHelper stack2feHelper; + JBCFunctionContext context; + bool error = false; + FEIRBB *pesudoBBCatchPred = nullptr; + + bool PreBuildJsrInfo(const jbc::JBCAttrCode &code); + bool BuildStmtFromInstruction(const jbc::JBCAttrCode &code); + FEIRStmt *BuildStmtFromInstructionForBranch(const jbc::JBCOp &op); + FEIRStmt *BuildStmtFromInstructionForGoto(const jbc::JBCOp &op); + FEIRStmt *BuildStmtFromInstructionForSwitch(const jbc::JBCOp &op); + FEIRStmt *BuildStmtFromInstructionForJsr(const jbc::JBCOp &op); + FEIRStmt *BuildStmtFromInstructionForRet(const jbc::JBCOp &op); + void BuildStmtForCatch(const jbc::JBCAttrCode &code); + void BuildStmtForTry(const jbc::JBCAttrCode &code); + void BuildTryInfo(const std::map, std::vector> &rawInfo, + std::map &outMapStartEnd, + std::map> &outMapStartCatch); + void BuildTryInfoCatch(const std::map, std::vector> &rawInfo, + const std::deque> &blockQueue, + uint32 startPos, + std::map> &outMapStartCatch); + void BuildStmtForLOC(const jbc::JBCAttrCode &code); + void BuildStmtForInstComment(const jbc::JBCAttrCode &code); + FEIRStmt *BuildAndUpdateLabel(uint32 dstPC, const std::unique_ptr &srcStmt); + void ArrangeStmts(); + bool CheckJVMStackResult(); + void InitStack2FEHelper(); + uint32 CalculateMaxSwapSize() const; + bool NeedConvertToInt32(const std::unique_ptr &var); +}; // class JBCFunction +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_JBC_FUNCTION_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_function_context.h b/src/hir2mpl/bytecode_input/class/include/jbc_function_context.h new file mode 100644 index 0000000000000000000000000000000000000000..01ec6454e850c09cc861fb58cdc9cc35041a9d09 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_function_context.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_INPUT_JBC_FUNCTION_CONTEXT_H +#define HIR2MPL_INCLUDE_JBC_INPUT_JBC_FUNCTION_CONTEXT_H +#include "mir_type.h" +#include "jbc_class_const_pool.h" +#include "jbc_attr.h" +#include "jbc_stack2fe_helper.h" +#include "jbc_stmt.h" + +namespace maple { +class JBCFunctionContext { + public: + JBCFunctionContext(const jbc::JBCConstPool &argConstPool, + JBCStack2FEHelper &argStack2feHelper, + const jbc::JBCAttrCode *argCode) + : constPool(argConstPool), + stack2feHelper(argStack2feHelper), + code(argCode) {} + + ~JBCFunctionContext() { + code = nullptr; + } + + int32 RegisterJsrSlotRetAddr(uint16 slotIdx, uint32 nextPC); + void ArrangeStmts(); + const FEIRType *GetSlotType(uint16 slotIdx, uint32 pc) const; + const FEIRType *GetSlotType(uint16 slotIdx) const; + + JBCStack2FEHelper &GetStack2FEHelper() { + return stack2feHelper; + } + + const jbc::JBCConstPool &GetConstPool() const { + return constPool; + } + + const jbc::JBCAttrCode *GetCode() const { + return code; + } + + const std::map &GetMapPCStmtInst() const { + return mapPCStmtInst; + } + + const std::map &GetMapPCTryStmt() const { + return mapPCTryStmt; + } + + const std::map &GetMapPCEndTryStmt() const { + return mapPCEndTryStmt; + } + + const std::map &GetMapPCCatchStmt() const { + return mapPCCatchStmt; + } + + const std::map &GetMapPCLabelStmt() const { + return mapPCLabelStmt; + } + + const std::map &GetMapPCStmtLOC() const { + return mapPCStmtLOC; + } + + const std::map &GetMapPCCommentStmt() const { + return mapPCCommentStmt; + } + + const std::map> &GetMapJsrSlotRetAddr() const { + return mapJsrSlotRetAddr; + } + + void UpdateMapPCStmtInst(uint32 pc, FEIRStmt *stmt) { + mapPCStmtInst[pc] = stmt; + } + + void UpdateMapPCTryStmt(uint32 pc, JBCStmtPesudoTry *stmt) { + mapPCTryStmt[pc] = stmt; + } + + void UpdateMapPCEndTryStmt(uint32 pc, JBCStmtPesudoEndTry *stmt) { + mapPCEndTryStmt[pc] = stmt; + } + + void UpdateMapPCCatchStmt(uint32 pc, JBCStmtPesudoCatch *stmt) { + mapPCCatchStmt[pc] = stmt; + } + + void UpdateMapPCLabelStmt(uint32 pc, JBCStmtPesudoLabel *stmt) { + mapPCLabelStmt[pc] = stmt; + } + + void UpdateMapPCStmtLOC(uint32 pc, JBCStmtPesudoLOC *stmt) { + mapPCStmtLOC[pc] = stmt; + } + + void UpdateMapPCCommentStmt(uint32 pc, JBCStmtPesudoComment *stmt) { + mapPCCommentStmt[pc] = stmt; + } + + const jbc::JBCAttrLocalVariableInfo &GetLocalVarInfo() const { + CHECK_NULL_FATAL(code); + return code->GetLocalVarInfo(); + } + + void SetCurrPC(uint32 pc) { + currPC = pc; + } + + uint32 GetCurrPC() const { + return currPC; + } + + private: + const jbc::JBCConstPool &constPool; + JBCStack2FEHelper &stack2feHelper; + const jbc::JBCAttrCode *code; + std::map mapPCStmtInst; + std::map mapPCTryStmt; // key: tryStartPC, value: stmt + std::map mapPCEndTryStmt; // key: tryEndPC, value: stmt + std::map mapPCCatchStmt; // key: handlePC, value: stmt + std::map mapPCLabelStmt; // key: labelPC, value: stmt + std::map mapPCStmtLOC; // key: locPC, value: stmt + std::map mapPCCommentStmt; // key: commentPC, value: stmt + std::map> mapJsrSlotRetAddr; // key: slotIdx, value: map + uint32 currPC = 0; +}; // class JBCFunctionContext +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_INPUT_JBC_FUNCTION_CONTEXT_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_input.h b/src/hir2mpl/bytecode_input/class/include/jbc_input.h new file mode 100644 index 0000000000000000000000000000000000000000..ce211d00c68451bdf9d2c5338be491c964418f37 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_input.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_INPUT_H +#define HIR2MPL_INCLUDE_JBC_INPUT_H +#include +#include +#include "mempool_allocator.h" +#include "jbc_class.h" + +namespace maple { +namespace jbc { +class JBCInput { + public: + explicit JBCInput(MIRModule &moduleIn); + ~JBCInput(); + void ReleaseMemPool(); + bool ReadClassFile(const std::string &fileName); + bool ReadClassFiles(const std::list &fileNames); + bool ReadJarFile(const std::string &fileName); + bool ReadJarFiles(const std::list &fileNames); + const JBCClass *GetFirstClass(); + const JBCClass *GetNextClass(); + void RegisterSrcFileInfo(JBCClass &klass); + + const MIRModule &GetModule() const { + return module; + } + + protected: + MIRModule &module; + MemPool *mp; + MapleAllocator allocator; + MapleList klassList; + + private: + MapleList::const_iterator itKlass; +}; +} // namespace jbc +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_INPUT_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_opcode.def b/src/hir2mpl/bytecode_input/class/include/jbc_opcode.def new file mode 100644 index 0000000000000000000000000000000000000000..ede7d7eb28cd409b1e2d44dfb52a4fa5e63e2e11 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_opcode.def @@ -0,0 +1,271 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +// JBC_OP(opcode, value, type, name, flags) +JBC_OP(Nop, 0x00, Default, "nop", (kOpFlagFallThru)) +JBC_OP(AConstNull, 0x01, Const, "aconst_null", (kOpFlagFallThru)) +JBC_OP(IConstM1, 0x02, Const, "iconst_m1", (kOpFlagFallThru)) +JBC_OP(IConst0, 0x03, Const, "iconst_0", (kOpFlagFallThru)) +JBC_OP(IConst1, 0x04, Const, "iconst_1", (kOpFlagFallThru)) +JBC_OP(IConst2, 0x05, Const, "iconst_2", (kOpFlagFallThru)) +JBC_OP(IConst3, 0x06, Const, "iconst_3", (kOpFlagFallThru)) +JBC_OP(IConst4, 0x07, Const, "iconst_4", (kOpFlagFallThru)) +JBC_OP(IConst5, 0x08, Const, "iconst_5", (kOpFlagFallThru)) +JBC_OP(LConst0, 0x09, Const, "lconst_0", (kOpFlagFallThru)) +JBC_OP(LConst1, 0x0A, Const, "lconst_1", (kOpFlagFallThru)) +JBC_OP(FConst0, 0x0B, Const, "fconst_0", (kOpFlagFallThru)) +JBC_OP(FConst1, 0x0C, Const, "fconst_1", (kOpFlagFallThru)) +JBC_OP(FConst2, 0x0D, Const, "fconst_2", (kOpFlagFallThru)) +JBC_OP(DConst0, 0x0E, Const, "dconst_0", (kOpFlagFallThru)) +JBC_OP(DConst1, 0x0F, Const, "dconst_1", (kOpFlagFallThru)) +JBC_OP(BiPush, 0x10, Const, "bipush", (kOpFlagFallThru)) +JBC_OP(SiPush, 0x11, Const, "sipush", (kOpFlagFallThru)) +JBC_OP(Ldc, 0x12, Const, "ldc", (kOpFlagFallThru)) +JBC_OP(LdcW, 0x13, Const, "ldc_w", (kOpFlagFallThru)) +JBC_OP(Ldc2W, 0x14, Const, "ldc2_w", (kOpFlagFallThru)) +JBC_OP(ILoad, 0x15, Load, "iload", (kOpFlagFallThru)) +JBC_OP(LLoad, 0x16, Load, "lload", (kOpFlagFallThru)) +JBC_OP(FLoad, 0x17, Load, "fload", (kOpFlagFallThru)) +JBC_OP(DLoad, 0x18, Load, "dload", (kOpFlagFallThru)) +JBC_OP(ALoad, 0x19, Load, "aload", (kOpFlagFallThru)) +JBC_OP(ILoad0, 0x1A, Load, "iload_0", (kOpFlagFallThru)) +JBC_OP(ILoad1, 0x1B, Load, "iload_1", (kOpFlagFallThru)) +JBC_OP(ILoad2, 0x1C, Load, "iload_2", (kOpFlagFallThru)) +JBC_OP(ILoad3, 0x1D, Load, "iload_3", (kOpFlagFallThru)) +JBC_OP(LLoad0, 0x1E, Load, "lload_0", (kOpFlagFallThru)) +JBC_OP(LLoad1, 0x1F, Load, "lload_1", (kOpFlagFallThru)) +JBC_OP(LLoad2, 0x20, Load, "lload_2", (kOpFlagFallThru)) +JBC_OP(LLoad3, 0x21, Load, "lload_3", (kOpFlagFallThru)) +JBC_OP(FLoad0, 0x22, Load, "fload_0", (kOpFlagFallThru)) +JBC_OP(FLoad1, 0x23, Load, "fload_1", (kOpFlagFallThru)) +JBC_OP(FLoad2, 0x24, Load, "fload_2", (kOpFlagFallThru)) +JBC_OP(FLoad3, 0x25, Load, "fload_3", (kOpFlagFallThru)) +JBC_OP(DLoad0, 0x26, Load, "dload_0", (kOpFlagFallThru)) +JBC_OP(DLoad1, 0x27, Load, "dload_1", (kOpFlagFallThru)) +JBC_OP(DLoad2, 0x28, Load, "dload_2", (kOpFlagFallThru)) +JBC_OP(DLoad3, 0x29, Load, "dload_3", (kOpFlagFallThru)) +JBC_OP(ALoad0, 0x2A, Load, "aload_0", (kOpFlagFallThru)) +JBC_OP(ALoad1, 0x2B, Load, "aload_1", (kOpFlagFallThru)) +JBC_OP(ALoad2, 0x2C, Load, "aload_2", (kOpFlagFallThru)) +JBC_OP(ALoad3, 0x2D, Load, "aload_3", (kOpFlagFallThru)) +JBC_OP(IALoad, 0x2E, ArrayLoad, "iaload", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(LALoad, 0x2F, ArrayLoad, "laload", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(FALoad, 0x30, ArrayLoad, "faload", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(DALoad, 0x31, ArrayLoad, "daload", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(AALoad, 0x32, ArrayLoad, "aaload", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(BALoad, 0x33, ArrayLoad, "baload", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(CALoad, 0x34, ArrayLoad, "caload", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(SALoad, 0x35, ArrayLoad, "saload", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(IStore, 0x36, Store, "istore", (kOpFlagFallThru)) +JBC_OP(LStore, 0x37, Store, "lstore", (kOpFlagFallThru)) +JBC_OP(FStore, 0x38, Store, "fstore", (kOpFlagFallThru)) +JBC_OP(DStore, 0x39, Store, "dstore", (kOpFlagFallThru)) +JBC_OP(AStore, 0x3A, Store, "astore", (kOpFlagFallThru)) +JBC_OP(IStore0, 0x3B, Store, "istore_0", (kOpFlagFallThru)) +JBC_OP(IStore1, 0x3C, Store, "istore_1", (kOpFlagFallThru)) +JBC_OP(IStore2, 0x3D, Store, "istore_2", (kOpFlagFallThru)) +JBC_OP(IStore3, 0x3E, Store, "istore_3", (kOpFlagFallThru)) +JBC_OP(LStore0, 0x3F, Store, "lstore_0", (kOpFlagFallThru)) +JBC_OP(LStore1, 0x40, Store, "lstore_1", (kOpFlagFallThru)) +JBC_OP(LStore2, 0x41, Store, "lstore_2", (kOpFlagFallThru)) +JBC_OP(LStore3, 0x42, Store, "lstore_3", (kOpFlagFallThru)) +JBC_OP(FStore0, 0x43, Store, "fstore_0", (kOpFlagFallThru)) +JBC_OP(FStore1, 0x44, Store, "fstore_1", (kOpFlagFallThru)) +JBC_OP(FStore2, 0x45, Store, "fstore_2", (kOpFlagFallThru)) +JBC_OP(FStore3, 0x46, Store, "fstore_3", (kOpFlagFallThru)) +JBC_OP(DStore0, 0x47, Store, "dstore_0", (kOpFlagFallThru)) +JBC_OP(DStore1, 0x48, Store, "dstore_1", (kOpFlagFallThru)) +JBC_OP(DStore2, 0x49, Store, "dstore_2", (kOpFlagFallThru)) +JBC_OP(DStore3, 0x4A, Store, "dstore_3", (kOpFlagFallThru)) +JBC_OP(AStore0, 0x4B, Store, "astore_0", (kOpFlagFallThru)) +JBC_OP(AStore1, 0x4C, Store, "astore_1", (kOpFlagFallThru)) +JBC_OP(AStore2, 0x4D, Store, "astore_2", (kOpFlagFallThru)) +JBC_OP(AStore3, 0x4E, Store, "astore_3", (kOpFlagFallThru)) +JBC_OP(IAStore, 0x4F, ArrayStore, "iastore", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(LAStore, 0x50, ArrayStore, "lastore", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(FAStore, 0x51, ArrayStore, "fastore", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(DAStore, 0x52, ArrayStore, "dastore", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(AAStore, 0x53, ArrayStore, "aastore", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(BAStore, 0x54, ArrayStore, "bastore", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(CAStore, 0x55, ArrayStore, "castore", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(SAStore, 0x56, ArrayStore, "sastore", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(Pop, 0x57, Pop, "pop", (kOpFlagFallThru)) +JBC_OP(Pop2, 0x58, Pop, "pop2", (kOpFlagFallThru)) +JBC_OP(Dup, 0x59, Dup, "dup", (kOpFlagFallThru)) +JBC_OP(DupX1, 0x5A, Dup, "dup_x1", (kOpFlagFallThru)) +JBC_OP(DupX2, 0x5B, Dup, "dup_x2", (kOpFlagFallThru)) +JBC_OP(Dup2, 0x5C, Dup, "dup2", (kOpFlagFallThru)) +JBC_OP(Dup2X1, 0x5D, Dup, "dup2_x1", (kOpFlagFallThru)) +JBC_OP(Dup2X2, 0x5E, Dup, "dup2_x2", (kOpFlagFallThru)) +JBC_OP(Swap, 0x5F, Swap, "swap", (kOpFlagFallThru)) +JBC_OP(IAdd, 0x60, MathBinop, "iadd", (kOpFlagFallThru)) +JBC_OP(LAdd, 0x61, MathBinop, "ladd", (kOpFlagFallThru)) +JBC_OP(FAdd, 0x62, MathBinop, "fadd", (kOpFlagFallThru)) +JBC_OP(DAdd, 0x63, MathBinop, "dadd", (kOpFlagFallThru)) +JBC_OP(ISub, 0x64, MathBinop, "isub", (kOpFlagFallThru)) +JBC_OP(LSub, 0x65, MathBinop, "lsub", (kOpFlagFallThru)) +JBC_OP(FSub, 0x66, MathBinop, "fsub", (kOpFlagFallThru)) +JBC_OP(DSub, 0x67, MathBinop, "dsub", (kOpFlagFallThru)) +JBC_OP(IMul, 0x68, MathBinop, "imul", (kOpFlagFallThru)) +JBC_OP(LMul, 0x69, MathBinop, "lmul", (kOpFlagFallThru)) +JBC_OP(FMul, 0x6A, MathBinop, "fmul", (kOpFlagFallThru)) +JBC_OP(DMul, 0x6B, MathBinop, "dmul", (kOpFlagFallThru)) +JBC_OP(IDiv, 0x6C, MathBinop, "idiv", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(LDiv, 0x6D, MathBinop, "ldiv", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(FDiv, 0x6E, MathBinop, "fdiv", (kOpFlagFallThru)) +JBC_OP(DDiv, 0x6F, MathBinop, "ddiv", (kOpFlagFallThru)) +JBC_OP(IRem, 0x70, MathBinop, "irem", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(LRem, 0x71, MathBinop, "lrem", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(FRem, 0x72, MathBinop, "frem", (kOpFlagFallThru)) +JBC_OP(DRem, 0x73, MathBinop, "drem", (kOpFlagFallThru)) +JBC_OP(INeg, 0x74, MathUnop, "ineg", (kOpFlagFallThru)) +JBC_OP(LNeg, 0x75, MathUnop, "lneg", (kOpFlagFallThru)) +JBC_OP(FNeg, 0x76, MathUnop, "fneg", (kOpFlagFallThru)) +JBC_OP(DNeg, 0x77, MathUnop, "dneg", (kOpFlagFallThru)) +JBC_OP(IShl, 0x78, MathBinop, "ishl", (kOpFlagFallThru)) +JBC_OP(LShl, 0x79, MathBinop, "lshl", (kOpFlagFallThru)) +JBC_OP(IShr, 0x7A, MathBinop, "ishr", (kOpFlagFallThru)) +JBC_OP(LShr, 0x7B, MathBinop, "lshr", (kOpFlagFallThru)) +JBC_OP(IUShr, 0x7C, MathBinop, "iushr", (kOpFlagFallThru)) +JBC_OP(LUShr, 0x7D, MathBinop, "lushr", (kOpFlagFallThru)) +JBC_OP(IAnd, 0x7E, MathBinop, "iand", (kOpFlagFallThru)) +JBC_OP(LAnd, 0x7F, MathBinop, "land", (kOpFlagFallThru)) +JBC_OP(IOr, 0x80, MathBinop, "ior", (kOpFlagFallThru)) +JBC_OP(LOr, 0x81, MathBinop, "lor", (kOpFlagFallThru)) +JBC_OP(IXor, 0x82, MathBinop, "ixor", (kOpFlagFallThru)) +JBC_OP(LXor, 0x83, MathBinop, "lxor", (kOpFlagFallThru)) +JBC_OP(IInc, 0x84, MathInc, "iinc", (kOpFlagFallThru)) +JBC_OP(I2L, 0x85, Convert, "i2l", (kOpFlagFallThru)) +JBC_OP(I2F, 0x86, Convert, "i2f", (kOpFlagFallThru)) +JBC_OP(I2D, 0x87, Convert, "i2d", (kOpFlagFallThru)) +JBC_OP(L2I, 0x88, Convert, "l2i", (kOpFlagFallThru)) +JBC_OP(L2F, 0x89, Convert, "l2f", (kOpFlagFallThru)) +JBC_OP(L2D, 0x8A, Convert, "l2d", (kOpFlagFallThru)) +JBC_OP(F2I, 0x8B, Convert, "f2i", (kOpFlagFallThru)) +JBC_OP(F2L, 0x8C, Convert, "f2l", (kOpFlagFallThru)) +JBC_OP(F2D, 0x8D, Convert, "f2d", (kOpFlagFallThru)) +JBC_OP(D2I, 0x8E, Convert, "d2i", (kOpFlagFallThru)) +JBC_OP(D2L, 0x8F, Convert, "d2l", (kOpFlagFallThru)) +JBC_OP(D2F, 0x90, Convert, "d2f", (kOpFlagFallThru)) +JBC_OP(I2B, 0x91, Convert, "i2b", (kOpFlagFallThru)) +JBC_OP(I2C, 0x92, Convert, "i2c", (kOpFlagFallThru)) +JBC_OP(I2S, 0x93, Convert, "i2s", (kOpFlagFallThru)) +JBC_OP(LCmp, 0x94, Compare, "lcmp", (kOpFlagFallThru)) +JBC_OP(FCmpl, 0x95, Compare, "fcmpl", (kOpFlagFallThru)) +JBC_OP(FCmpg, 0x96, Compare, "fcmpg", (kOpFlagFallThru)) +JBC_OP(DCmpl, 0x97, Compare, "dcmpl", (kOpFlagFallThru)) +JBC_OP(DCmpg, 0x98, Compare, "dcmpg", (kOpFlagFallThru)) +JBC_OP(Ifeq, 0x99, Branch, "ifeq", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(Ifne, 0x9A, Branch, "ifne", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(Iflt, 0x9B, Branch, "iflt", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(Ifge, 0x9C, Branch, "ifge", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(Ifgt, 0x9D, Branch, "ifgt", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(Ifle, 0x9E, Branch, "ifle", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(IfICmpeq, 0x9F, Branch, "if_icmpeq", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(IfICmpne, 0xA0, Branch, "if_icmpne", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(IfICmplt, 0xA1, Branch, "if_icmplt", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(IfICmpge, 0xA2, Branch, "if_icmpge", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(IfICmpgt, 0xA3, Branch, "if_icmpgt", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(IfICmple, 0xA4, Branch, "if_icmple", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(IfACmpeq, 0xA5, Branch, "if_acmpeq", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(IfACmpne, 0xA6, Branch, "if_acmpne", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(Goto, 0xA7, Goto, "goto", (kOpFlagBranch)) +JBC_OP(Jsr, 0xA8, Jsr, "jsr", (kOpFlagBranch)) +JBC_OP(Ret, 0xA9, Ret, "ret", (kOpFlagBranch)) +JBC_OP(TableSwitch, 0xAA, Switch, "tableswitch", (kOpFlagBranch)) +JBC_OP(LookupSwitch, 0xAB, Switch, "lookupswitch", (kOpFlagBranch)) +JBC_OP(IReturn, 0xAC, Return, "ireturn", (kOpFlagThrowable)) +JBC_OP(LReturn, 0xAD, Return, "lreturn", (kOpFlagThrowable)) +JBC_OP(FReturn, 0xAE, Return, "freturn", (kOpFlagThrowable)) +JBC_OP(DReturn, 0xAF, Return, "dreturn", (kOpFlagThrowable)) +JBC_OP(AReturn, 0xB0, Return, "areturn", (kOpFlagThrowable)) +JBC_OP(Return, 0xB1, Return, "return", (kOpFlagThrowable)) +JBC_OP(GetStatic, 0xB2, StaticFieldOpr, "getstatic", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(PutStatic, 0xB3, StaticFieldOpr, "putstatic", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(GetField, 0xB4, FieldOpr, "getfield", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(PutField, 0xB5, FieldOpr, "putfield", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(InvokeVirtual, 0xB6, Invoke, "invokevirtual", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(InvokeSpecial, 0xB7, Invoke, "invokespecial", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(InvokeStatic, 0xB8, Invoke, "invokestatic", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(InvokeInterface, 0xB9, Invoke, "invokeinterface", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(InvokeDynamic, 0xBA, Invoke, "invokedynamic", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(New, 0xBB, New, "new", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(NewArray, 0xBC, New, "newarray", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(ANewArray, 0xBD, New, "anewarray", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(ArrayLength, 0xBE, ArrayLength, "arraylength", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(AThrow, 0xBF, Throw, "athrow", (kOpFlagThrowable)) +JBC_OP(CheckCast, 0xC0, TypeCheck, "checkcast", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(InstanceOf, 0xC1, TypeCheck, "instanceof", (kOpFlagFallThru)) +JBC_OP(MonitorEnter, 0xC2, Monitor, "monitorenter", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(MonitorExit, 0xC3, Monitor, "monitorexit", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(Wide, 0xC4, Default, "wide", (kOpFlagFallThru)) +JBC_OP(MultiANewArray, 0xC5, MultiANewArray, "multianewarray", (kOpFlagFallThru | kOpFlagThrowable)) +JBC_OP(IfNull, 0xC6, Branch, "ifnull", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(IfNonNull, 0xC7, Branch, "ifnonnull", (kOpFlagFallThru | kOpFlagBranch)) +JBC_OP(GotoW, 0xC8, Goto, "goto_w", (kOpFlagBranch)) +JBC_OP(JsrW, 0xC9, Jsr, "jsr_w", (kOpFlagBranch)) +JBC_OP(BreakPoint, 0xCA, Reversed, "breakpoint", (kOpFlagFallThru)) +JBC_OP(UnusedCB, 0xCB, Unused, "unused_cb", (kOpFlagFallThru)) +JBC_OP(UnusedCC, 0xCC, Unused, "unused_cc", (kOpFlagFallThru)) +JBC_OP(UnusedCD, 0xCD, Unused, "unused_cd", (kOpFlagFallThru)) +JBC_OP(UnusedCE, 0xCE, Unused, "unused_ce", (kOpFlagFallThru)) +JBC_OP(UnusedCF, 0xCF, Unused, "unused_cf", (kOpFlagFallThru)) +JBC_OP(UnusedD0, 0xD0, Unused, "unused_d0", (kOpFlagFallThru)) +JBC_OP(UnusedD1, 0xD1, Unused, "unused_d1", (kOpFlagFallThru)) +JBC_OP(UnusedD2, 0xD2, Unused, "unused_d2", (kOpFlagFallThru)) +JBC_OP(UnusedD3, 0xD3, Unused, "unused_d3", (kOpFlagFallThru)) +JBC_OP(UnusedD4, 0xD4, Unused, "unused_d4", (kOpFlagFallThru)) +JBC_OP(UnusedD5, 0xD5, Unused, "unused_d5", (kOpFlagFallThru)) +JBC_OP(UnusedD6, 0xD6, Unused, "unused_d6", (kOpFlagFallThru)) +JBC_OP(UnusedD7, 0xD7, Unused, "unused_d7", (kOpFlagFallThru)) +JBC_OP(UnusedD8, 0xD8, Unused, "unused_d8", (kOpFlagFallThru)) +JBC_OP(UnusedD9, 0xD9, Unused, "unused_d9", (kOpFlagFallThru)) +JBC_OP(UnusedDA, 0xDA, Unused, "unused_da", (kOpFlagFallThru)) +JBC_OP(UnusedDB, 0xDB, Unused, "unused_db", (kOpFlagFallThru)) +JBC_OP(UnusedDC, 0xDC, Unused, "unused_dc", (kOpFlagFallThru)) +JBC_OP(UnusedDD, 0xDD, Unused, "unused_dd", (kOpFlagFallThru)) +JBC_OP(UnusedDE, 0xDE, Unused, "unused_de", (kOpFlagFallThru)) +JBC_OP(UnusedDF, 0xDF, Unused, "unused_df", (kOpFlagFallThru)) +JBC_OP(UnusedE0, 0xE0, Unused, "unused_e0", (kOpFlagFallThru)) +JBC_OP(UnusedE1, 0xE1, Unused, "unused_e1", (kOpFlagFallThru)) +JBC_OP(UnusedE2, 0xE2, Unused, "unused_e2", (kOpFlagFallThru)) +JBC_OP(UnusedE3, 0xE3, Unused, "unused_e3", (kOpFlagFallThru)) +JBC_OP(UnusedE4, 0xE4, Unused, "unused_e4", (kOpFlagFallThru)) +JBC_OP(UnusedE5, 0xE5, Unused, "unused_e5", (kOpFlagFallThru)) +JBC_OP(UnusedE6, 0xE6, Unused, "unused_e6", (kOpFlagFallThru)) +JBC_OP(UnusedE7, 0xE7, Unused, "unused_e7", (kOpFlagFallThru)) +JBC_OP(UnusedE8, 0xE8, Unused, "unused_e8", (kOpFlagFallThru)) +JBC_OP(UnusedE9, 0xE9, Unused, "unused_e9", (kOpFlagFallThru)) +JBC_OP(UnusedEA, 0xEA, Unused, "unused_ea", (kOpFlagFallThru)) +JBC_OP(UnusedEB, 0xEB, Unused, "unused_eb", (kOpFlagFallThru)) +JBC_OP(UnusedEC, 0xEC, Unused, "unused_ec", (kOpFlagFallThru)) +JBC_OP(UnusedED, 0xED, Unused, "unused_ed", (kOpFlagFallThru)) +JBC_OP(UnusedEE, 0xEE, Unused, "unused_ee", (kOpFlagFallThru)) +JBC_OP(UnusedEF, 0xEF, Unused, "unused_ef", (kOpFlagFallThru)) +JBC_OP(UnusedF0, 0xF0, Unused, "unused_f0", (kOpFlagFallThru)) +JBC_OP(UnusedF1, 0xF1, Unused, "unused_f1", (kOpFlagFallThru)) +JBC_OP(UnusedF2, 0xF2, Unused, "unused_f2", (kOpFlagFallThru)) +JBC_OP(UnusedF3, 0xF3, Unused, "unused_f3", (kOpFlagFallThru)) +JBC_OP(UnusedF4, 0xF4, Unused, "unused_f4", (kOpFlagFallThru)) +JBC_OP(UnusedF5, 0xF5, Unused, "unused_f5", (kOpFlagFallThru)) +JBC_OP(UnusedF6, 0xF6, Unused, "unused_f6", (kOpFlagFallThru)) +JBC_OP(UnusedF7, 0xF7, Unused, "unused_f7", (kOpFlagFallThru)) +JBC_OP(UnusedF8, 0xF8, Unused, "unused_f8", (kOpFlagFallThru)) +JBC_OP(UnusedF9, 0xF9, Unused, "unused_f9", (kOpFlagFallThru)) +JBC_OP(UnusedFA, 0xFA, Unused, "unused_fa", (kOpFlagFallThru)) +JBC_OP(UnusedFB, 0xFB, Unused, "unused_fb", (kOpFlagFallThru)) +JBC_OP(UnusedFC, 0xFC, Unused, "unused_fc", (kOpFlagFallThru)) +JBC_OP(UnusedFD, 0xFD, Unused, "unused_fd", (kOpFlagFallThru)) +JBC_OP(ImpDep1, 0xFE, Reversed, "impdep1", (kOpFlagFallThru)) +JBC_OP(ImpDep2, 0xFF, Reversed, "impdep2", (kOpFlagFallThru)) \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_opcode.h b/src/hir2mpl/bytecode_input/class/include/jbc_opcode.h new file mode 100644 index 0000000000000000000000000000000000000000..d10e5e43654ea7ce48b5836809027bc27c1f8888 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_opcode.h @@ -0,0 +1,691 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef HIR2MPL_INCLUDE_JBC_OPCODE_H +#define HIR2MPL_INCLUDE_JBC_OPCODE_H +#include +#include +#include +#include +#include "types_def.h" +#include "mempool_allocator.h" +#include "factory.h" +#include "basic_io.h" +#include "feir_type.h" + +namespace maple { +namespace jbc { +enum JBCOpcode : uint8 { +#define JBC_OP(op, value, type, name, flag) \ + kOp##op = value, +#include "jbc_opcode.def" +#undef JBC_OP +}; + +enum JBCOpcodeKind : uint8 { + kOpKindDefault = 0, +#define JBC_OP_KIND(kind, name) \ + kOpKind##kind, +#include "jbc_opcode_kind.def" +#undef JBC_OP_KIND + kOpKindSize +}; + +enum JBCPrimType : uint8 { + kTypeDefault = 0, + kTypeInt, + kTypeLong, + kTypeFloat, + kTypeDouble, + kTypeByteOrBoolean, + kTypeChar, + kTypeShort, + kTypeRef, + kTypeAddress, + kTypeLongDummy, + kTypeDoubleDummy, + kTypeSize +}; + +enum JBCOpcodeFlag : uint8 { + kOpFlagNone = 0, + kOpFlagFallThru = 1, + kOpFlagBranch = 1 << 1, + kOpFlagThrowable = 1 << 2 +}; + +struct JBCOpcodeDesc { + JBCOpcodeKind kind; + std::string name; + uint8 flags; +}; + +class JBCOpcodeInfo { + public: + static const uint32 kOpSize = 0x100; + JBCOpcodeInfo(); + ~JBCOpcodeInfo() = default; + + JBCOpcodeKind GetOpcodeKind(JBCOpcode op) const { + return table[static_cast(op)].kind; + } + + const std::string &GetOpcodeName(JBCOpcode op) const { + return table[static_cast(op)].name; + } + + bool IsFallThru(JBCOpcode op) const { + return (table[static_cast(op)].flags & kOpFlagFallThru) != 0; + } + + bool IsBranch(JBCOpcode op) const { + return (table[static_cast(op)].flags & kOpFlagBranch) != 0; + } + + bool IsThrowable(JBCOpcode op) const { + return (table[static_cast(op)].flags & kOpFlagThrowable) != 0; + } + + private: + JBCOpcodeDesc table[kOpSize]; +}; + +class JBCConstPool; +class JBCOp { + public: + JBCOp(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + virtual ~JBCOp() = default; + bool CheckNotWide(const BasicIORead &io) const; + bool ParseFile(BasicIORead &io) { + return ParseFileImpl(io); + } + + std::string Dump(const JBCConstPool &constPool) const { + return DumpImpl(constPool); + } + + const std::vector &GetInputTypesFromStack() const { + return GetInputTypesFromStackImpl(); + } + + std::vector GetInputTypesFromStack(const JBCConstPool &constPool) const { + return GetInputTypesFromStackImpl(constPool); + } + + JBCPrimType GetOutputTypesToStack() const { + return GetOutputTypesToStackImpl(); + } + + JBCPrimType GetOutputTypesToStack(const JBCConstPool &constPool) const { + return GetOutputTypesToStackImpl(constPool); + } + + JBCOpcode GetOpcode() const { + return op; + } + + JBCOpcodeKind GetOpcodeKind() const { + return kind; + } + + bool IsWide() const { + return wide; + } + + const std::string &GetOpcodeName() const { + return opcodeInfo.GetOpcodeName(op); + } + + bool IsFallThru() const { + return opcodeInfo.IsFallThru(op); + } + + bool IsBranch() const { + return opcodeInfo.IsBranch(op); + } + + bool IsThrowable() const { + return opcodeInfo.IsThrowable(op); + } + + static JBCOpcodeInfo &GetOpcodeInfo() { + return opcodeInfo; + } + + static uint32 GetTargetbyInt64(int64 pc) { + if (pc < 0 || pc > UINT16_MAX) { + CHECK_FATAL(false, "invalid PC: %ld", pc); + return 0; + } else { + return static_cast(pc); + } + } + + protected: + virtual bool ParseFileImpl(BasicIORead &io) = 0; + virtual const std::vector &GetInputTypesFromStackImpl() const; + virtual std::vector GetInputTypesFromStackImpl(const JBCConstPool &constPool) const; + virtual JBCPrimType GetOutputTypesToStackImpl() const; + virtual JBCPrimType GetOutputTypesToStackImpl(const JBCConstPool &constPool) const; + virtual std::string DumpImpl(const JBCConstPool &constPool) const; + + MapleAllocator &alloc; + JBCOpcode op : 8; + JBCOpcodeKind kind : 7; + bool wide : 1; + static JBCOpcodeInfo opcodeInfo; + static std::vector emptyPrimTypes; +}; + +class JBCOpUnused : public JBCOp { + public: + JBCOpUnused(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpUnused() = default; + + protected: + bool ParseFileImpl(BasicIORead &io) override; +}; + +class JBCOpReversed : public JBCOp { + public: + JBCOpReversed(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpReversed() = default; + + protected: + bool ParseFileImpl(BasicIORead &io) override; +}; + +class JBCOpDefault : public JBCOp { + public: + JBCOpDefault(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpDefault() = default; + + protected: + bool ParseFileImpl(BasicIORead &io) override; + const std::vector &GetInputTypesFromStackImpl() const override; + JBCPrimType GetOutputTypesToStackImpl() const override; + + private: + static std::map> mapOpInputTypes; + static std::map> InitMapOpInputTypes(); + static std::map mapOpOutputType; + static std::map InitMapOpOutputType(); + static void InitMapOpInputTypesForArrayLoad(std::map> &ans); + static void InitMapOpOutputTypesForArrayLoad(std::map &ans); + static void InitMapOpInputTypesForArrayStore(std::map> &ans); + static void InitMapOpOutputTypesForArrayStore(std::map &ans); + static void InitMapOpInputTypesForMathBinop(std::map> &ans); + static void InitMapOpOutputTypesForMathBinop(std::map &ans); + static void InitMapOpInputTypesForMathUnop(std::map> &ans); + static void InitMapOpOutputTypesForMathUnop(std::map &ans); + static void InitMapOpInputTypesForConvert(std::map> &ans); + static void InitMapOpOutputTypesForConvert(std::map &ans); + static void InitMapOpInputTypesForCompare(std::map> &ans); + static void InitMapOpOutputTypesForCompare(std::map &ans); + static void InitMapOpInputTypesForReturn(std::map> &ans); + static void InitMapOpOutputTypesForReturn(std::map &ans); + static void InitMapOpInputTypesForThrow(std::map> &ans); + static void InitMapOpOutputTypesForThrow(std::map &ans); + static void InitMapOpInputTypesForMonitor(std::map> &ans); + static void InitMapOpOutputTypesForMonitor(std::map &ans); + static void InitMapOpInputTypesForArrayLength(std::map> &ans); + static void InitMapOpOutputTypesForArrayLength(std::map &ans); +}; + +class JBCOpConst : public JBCOp { + public: + JBCOpConst(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpConst() = default; + std::string DumpBiPush() const; + std::string DumpSiPush() const; + std::string DumpLdc(const JBCConstPool &constPool) const; + int32 GetValueInt() const; + int64 GetValueLong() const; + float GetValueFloat() const; + double GetValueDouble() const; + int8 GetValueByte() const; + int16 GetValueShort() const; + uint16 GetIndex() const { + return u.index; + } + + void SetValue(int8 value) { + u.bvalue = value; + } + + void SetValue(int16 value) { + u.svalue = value; + } + + void SetIndex(uint16 argIdx) { + u.index = argIdx; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + JBCPrimType GetOutputTypesToStackImpl(const JBCConstPool &constPool) const override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + union { + int8 bvalue; + int16 svalue; + uint16 index; + uint16 raw; + } u; + + static std::map InitValueMapI(); + static std::map InitValueMapJ(); + static std::map InitValueMapF(); + static std::map InitValueMapD(); + + static std::map valueMapI; + static std::map valueMapJ; + static std::map valueMapF; + static std::map valueMapD; +}; + +class JBCOpSlotOpr : public JBCOp { + public: + JBCOpSlotOpr(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpSlotOpr() = default; + bool IsAStore() const; + uint16 GetSlotIdx() const { + return slotIdx; + } + + void SetSlotIdx(uint16 argSlotIdx) { + slotIdx = argSlotIdx; + } + + bool IsAddressOpr() const { + return isAddressOpr; + } + + void SetAddressOpr() { + isAddressOpr = true; + } + + void UnsetAddressOpr() { + isAddressOpr = false; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + const std::vector &GetInputTypesFromStackImpl() const override; + JBCPrimType GetOutputTypesToStackImpl() const override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + static std::map> InitMapSlotIdxAndType(); + static std::map> InitMapOpInputTypes(); + static std::map InitMapOpOutputType(); + + uint16 slotIdx; + bool isAddressOpr; + static std::map> mapSlotIdxAndType; + static std::map> mapOpInputTypes; + static std::map mapOpOutputType; + static std::vector inputTypesAddressOpr; +}; + +class JBCOpMathInc : public JBCOp { + public: + JBCOpMathInc(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpMathInc() = default; + uint16 GetIndex() const { + return index; + } + + void SetIndex(uint16 argIndex) { + index = argIndex; + } + + int16 GetIncr() const { + return incr; + } + + void SetIncr(int16 argIncr) { + incr = argIncr; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + + private: + static std::map> InitMapOpInputTypes(); + static std::map InitMapOpOutputType(); + + uint16 index; + int16 incr; + static std::map> mapOpInputTypes; + static std::map mapOpOutputType; +}; + +class JBCOpBranch : public JBCOp { + public: + JBCOpBranch(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpBranch() = default; + uint32 GetTarget() const { + return target; + } + + void SetTarget(uint32 argTarget) { + target = argTarget; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + const std::vector &GetInputTypesFromStackImpl() const override; + + private: + static std::map> InitMapOpInputTypes(); + + uint32 target; + static std::map> mapOpInputTypes; +}; + +class JBCOpGoto : public JBCOp { + public: + JBCOpGoto(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpGoto() = default; + uint32 GetTarget() const { + return target; + } + + void SetTarget(uint32 argTarget) { + target = argTarget; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + uint32 target; +}; + +class JBCOpSwitch : public JBCOp { + public: + JBCOpSwitch(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpSwitch() = default; + const MapleMap &GetTargets() const { + return targets; + } + + void ClearTargets() { + targets.clear(); + } + + void AddOrSetTarget(int32 value, uint32 targetPC) { + targets[value] = targetPC; + } + + uint32 GetDefaultTarget() const { + return targetDefault; + } + + void SetDefaultTarget(uint32 targetPC) { + targetDefault = targetPC; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + const std::vector &GetInputTypesFromStackImpl() const override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + static std::map> InitMapOpInputTypes(); + + MapleMap targets; + uint32 targetDefault; + static std::map> mapOpInputTypes; +}; + +class JBCOpFieldOpr : public JBCOp { + public: + JBCOpFieldOpr(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpFieldOpr() = default; + std::string GetFieldType(const JBCConstPool &constPool) const; + uint16 GetFieldIdx() const { + return fieldIdx; + } + + void SetFieldIdx(uint16 idx) { + fieldIdx = idx; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + const std::vector &GetInputTypesFromStackImpl() const override; + std::vector GetInputTypesFromStackImpl(const JBCConstPool &constPool) const override; + JBCPrimType GetOutputTypesToStackImpl() const override; + JBCPrimType GetOutputTypesToStackImpl(const JBCConstPool &constPool) const override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + std::vector GetInputTypesFromStackForGet(const JBCConstPool &constPool) const; + std::vector GetInputTypesFromStackForPut(const JBCConstPool &constPool) const; + + uint16 fieldIdx; +}; + +class JBCOpInvoke : public JBCOp { + public: + JBCOpInvoke(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpInvoke() = default; + std::string GetMethodDescription(const JBCConstPool &constPool) const; + uint16 GetMethodIdx() const { + return methodIdx; + } + + void SetMethodIdx(uint16 idx) { + methodIdx = idx; + } + + uint8 GetCount() const { + return count; + } + + void SetCount(uint8 argCount) { + count = argCount; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + const std::vector &GetInputTypesFromStackImpl() const override; + std::vector GetInputTypesFromStackImpl(const JBCConstPool &constPool) const override; + JBCPrimType GetOutputTypesToStackImpl() const override; + JBCPrimType GetOutputTypesToStackImpl(const JBCConstPool &constPool) const override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + uint16 methodIdx; + uint8 count; +}; + +class JBCOpJsr : public JBCOp { + public: + JBCOpJsr(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpJsr() = default; + uint32 GetTarget() const { + return target; + } + + void SetTarget(uint32 argTarget) { + target = argTarget; + } + + uint16 GetSlotIdx() const { + return slotIdx; + } + + void SetSlotIdx(uint32 argSlotIdx) { + slotIdx = static_cast(argSlotIdx); + } + + int32 GetJsrID() const { + return jsrID; + } + + void SetJsrID(int32 argJsrID) { + jsrID = argJsrID; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + JBCPrimType GetOutputTypesToStackImpl() const override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + uint32 target; + uint16 slotIdx; + int32 jsrID; +}; + +class JBCOpRet : public JBCOp { + public: + JBCOpRet(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpRet() = default; + uint16 GetIndex() const { + return index; + } + + void SetIndex(uint16 argIndex) { + index = argIndex; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + uint16 index; +}; + +class JBCOpNew : public JBCOp { + public: + enum PrimType : uint8 { + kPrimNone = 0, + kPrimBoolean = 4, + kPrimChar = 5, + kPrimFloat = 6, + kPrimDouble = 7, + kPrimByte = 8, + kPrimShort = 9, + kPrimInt = 10, + kPrimLong = 11 + }; + + JBCOpNew(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpNew() = default; + + GStrIdx GetTypeNameIdx(const JBCConstPool &constPool) const; + std::string GetTypeName(const JBCConstPool &constPool) const; + const FEIRType *GetFEIRType(const JBCConstPool &constPool) const; + uint16 GetRefTypeIdx() const { + return refTypeIdx; + } + + void SetRefTypeIdx(uint16 idx) { + refTypeIdx = idx; + } + + uint8 GetPrimType() const { + return primType; + } + + void SetPrimType(uint8 type) { + primType = type; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + const std::vector &GetInputTypesFromStackImpl() const override; + JBCPrimType GetOutputTypesToStackImpl() const override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + static std::map> InitMapOpInputTypes(); + std::string GetPrimTypeName() const; + const UniqueFEIRType &GetPrimFEIRType() const; + + uint16 refTypeIdx; + uint8 primType; + static std::map> mapOpInputTypes; +}; + +class JBCOpMultiANewArray : public JBCOp { + public: + JBCOpMultiANewArray(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpMultiANewArray() = default; + uint16 GetRefTypeIdx() const { + return refTypeIdx; + } + + void SetRefTypeIdx(uint16 idx) { + refTypeIdx = idx; + } + + uint8 GetDim() const { + return dim; + } + + void SetDim(uint8 argDim) { + dim = argDim; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + const std::vector &GetInputTypesFromStackImpl() const override; + std::vector GetInputTypesFromStackImpl(const JBCConstPool &constPool) const override; + JBCPrimType GetOutputTypesToStackImpl() const override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + uint16 refTypeIdx; + uint8 dim; +}; + +class JBCOpTypeCheck : public JBCOp { + public: + JBCOpTypeCheck(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn); + ~JBCOpTypeCheck() = default; + uint16 GetTypeIdx() const { + return typeIdx; + } + + void SetTypeIdx(uint16 idx) { + typeIdx = idx; + } + + protected: + bool ParseFileImpl(BasicIORead &io) override; + const std::vector &GetInputTypesFromStackImpl() const override; + JBCPrimType GetOutputTypesToStackImpl() const override; + std::string DumpImpl(const JBCConstPool &constPool) const override; + + private: + static std::map> InitMapOpInputTypes(); + static std::map InitMapOpOutputType(); + + uint16 typeIdx; + static std::map> mapOpInputTypes; + static std::map mapOpOutputType; +}; +} // namespace jbc +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_OPCODE_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_opcode_helper.h b/src/hir2mpl/bytecode_input/class/include/jbc_opcode_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..b20b3f7d7f79b45e67214e0e468033bca05c18fe --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_opcode_helper.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_JBC_OPCODE_HELPER_H +#define HIR2MPL_INCLUDE_COMMON_JBC_OPCODE_HELPER_H +#include +#include +#include +#include +#include "mpl_logging.h" +#include "jbc_opcode.h" +#include "jbc_class.h" + +namespace maple { +class JBCOpcodeHelper { + public: + JBCOpcodeHelper(const jbc::JBCClassMethod &argMethod); + ~JBCOpcodeHelper() = default; + std::string GetLastErorr() const { + return ssLastError.str(); + } + std::vector GetBaseTypeNamesForOP(const jbc::JBCOp &op, bool &success); + static std::vector GetInputPrimTypeForOP(const jbc::JBCOp &op, const jbc::JBCConstPool &constPool); + static jbc::JBCPrimType GetOutputPrimTypeForOP(const jbc::JBCOp &op, const jbc::JBCConstPool &constPool); + + LLT_PRIVATE: + using FuncPtrGetBaseTypeName = std::vector (JBCOpcodeHelper::*)(const jbc::JBCOp &op, bool &success); + std::vector GetBaseTypeNamesForOPDefault(bool &success) const; + std::vector GetBaseTypeNamesForOPConst(const jbc::JBCOp &op, bool &success); + std::vector GetBaseTypeNamesForOPFieldOpr(const jbc::JBCOp &op, bool &success); + std::vector GetBaseTypeNamesForOPInvoke(const jbc::JBCOp &op, bool &success); + std::vector GetBaseTypeNamesForOPNew(const jbc::JBCOp &op, bool &success); + std::vector GetBaseTypeNamesForOPMultiANewArray(const jbc::JBCOp &op, bool &success); + std::vector GetBaseTypeNamesForOPTypeCheck(const jbc::JBCOp &op, bool &success); + static std::map> InitMapOpInputPrimTypes(); + static std::map InitFuncPtrMapGetBaseTypeName(); + static void InitMapOpInputPrimTypesForConst(std::map> &ans); + static void InitMapOpInputPrimTypesForLoad(); + + const jbc::JBCClassMethod &method; + std::stringstream ssLastError; + static std::map> mapOpInputPrimTypes; + static std::map funcPtrMapGetBaseTypeName; +}; // class JBCOpcodeHelper +} // namespace maple +#endif diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_opcode_kind.def b/src/hir2mpl/bytecode_input/class/include/jbc_opcode_kind.def new file mode 100644 index 0000000000000000000000000000000000000000..5e0bda7e42f79799cf0b12bf04eb74c4cb4bc7d7 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_opcode_kind.def @@ -0,0 +1,47 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +// JBC_OP_KIND(kind, name) +JBC_OP_KIND(Unused, "Unused") +JBC_OP_KIND(Reversed, "Reversed") +JBC_OP_KIND(Const, "Const") +JBC_OP_KIND(SlotOpr, "SlotOpr") +JBC_OP_KIND(Load, "Load") +JBC_OP_KIND(Store, "Store") +JBC_OP_KIND(ArrayLoad, "ArrayLoad") +JBC_OP_KIND(ArrayStore, "ArrayStore") +JBC_OP_KIND(Pop, "Pop") +JBC_OP_KIND(Dup, "Dup") +JBC_OP_KIND(Swap, "Swap") +JBC_OP_KIND(Stack, "Stack") +JBC_OP_KIND(MathBinop, "MathBinop") +JBC_OP_KIND(MathUnop, "MathUnop") +JBC_OP_KIND(MathInc, "MathInc") +JBC_OP_KIND(Convert, "Convert") +JBC_OP_KIND(Compare, "Compare") +JBC_OP_KIND(Branch, "Branch") +JBC_OP_KIND(Goto, "Goto") +JBC_OP_KIND(Switch, "Switch") +JBC_OP_KIND(Return, "Return") +JBC_OP_KIND(StaticFieldOpr, "StaticFieldOpr") +JBC_OP_KIND(FieldOpr, "FieldOpr") +JBC_OP_KIND(Invoke, "Invoke") +JBC_OP_KIND(Jsr, "Jsr") +JBC_OP_KIND(Ret, "Ret") +JBC_OP_KIND(New, "New") +JBC_OP_KIND(MultiANewArray, "MultiANewArray") +JBC_OP_KIND(Throw, "Throw") +JBC_OP_KIND(TypeCheck, "TypeCheck") +JBC_OP_KIND(Monitor, "Monitor") +JBC_OP_KIND(ArrayLength, "ArrayLength") \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_stack2fe_helper.h b/src/hir2mpl/bytecode_input/class/include/jbc_stack2fe_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..9df0dac9a35313e2e77ae0d17dab6a6b49dd6d3b --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_stack2fe_helper.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_STACK2FE_HELPER_H +#define HIR2MPL_INCLUDE_JBC_STACK2FE_HELPER_H +#include +#include +#include +#include +#include "mempool_allocator.h" +#include "jbc_stack_helper.h" +#include "feir_builder.h" +#include "feir_var_reg.h" + +namespace maple { +class JBCStack2FEHelper { + public: + static const uint8 kRegNumOff = 1; + static const uint8 kRegNumOffWide = 2; + explicit JBCStack2FEHelper(bool argUseNestExpr = false); + ~JBCStack2FEHelper() = default; + uint32 GetRegNumForSlot(uint32 slotNum) const; + uint32 GetRegNumForStack() const; + bool PushItem(UniqueFEIRVar var, PrimType pty); + UniqueFEIRVar PushItem(PrimType pty); + UniqueFEIRStmt PushItem(UniqueFEIRExpr expr, PrimType pty, bool hasException = false); + UniqueFEIRStmt PushItem(UniqueFEIRExpr expr, UniqueFEIRType type, bool hasException = false); + UniqueFEIRVar PopItem(PrimType pty); + UniqueFEIRVar PopItem(UniqueFEIRType type); + UniqueFEIRVar PopItem(bool isWide, PrimType &pty); + UniqueFEIRVar PopItemAuto(PrimType &pty); + bool Swap(); + bool Pop(jbc::JBCOpcode opcode); + bool Dup(jbc::JBCOpcode opcode); + std::list GenerateSwapStmts(); + std::list LoadSwapStack(const JBCStackHelper &stackHelper, bool &success); + std::string DumpStackInJavaFormat() const; + std::string DumpStackInInternalFormat() const; + void ClearStack() { + stack.clear(); + } + + void SetNStacks(uint32 argNStacks) { + nStacks = argNStacks; + regNumForStacks.clear(); + for (uint32 i = 0; i < argNStacks; i++) { + CHECK_FATAL(regNumForStacks.insert(i).second, "regNumForStacks insert failed"); + } + } + + uint32 GetNStacks() const { + return nStacks; + } + + void SetNSwaps(uint32 argNSwaps) { + nSwaps = argNSwaps; + } + + uint32 GetNSwaps() const { + return nSwaps; + } + + void SetNLocals(uint32 argNLocals) { + nLocals = argNLocals; + } + + uint32 GetNLocals() const { + return nLocals; + } + + void SetNArgs(uint32 argNArgs) { + nArgs = argNArgs; + } + + uint32 GetNArgs() const { + return nArgs; + } + + static PrimType JBCStackItemTypeToPrimType(jbc::JBCPrimType itemType); + static PrimType SimplifyPrimType(PrimType pty); + + protected: + bool useNestExpr; + uint32 nStacks = 0; + uint32 nSwaps = 0; + uint32 nLocals = 0; + uint32 nArgs = 0; + using StackItem = std::pair; + std::vector stack; // list> + std::set regNumForStacks; + bool Pop(); + bool Pop2(); + bool Dup(); + bool DupX1(); + bool DupX2(); + bool Dup2(); + bool Dup2X1(); + bool Dup2X2(); + std::vector JBCStackItemTypesToPrimTypes(const std::vector itemTypes); + bool CheckSwapValid(const std::vector items) const; + std::vector> GeneralSwapRegNum(const std::vector items); + StackItem MakeStackItem(UniqueFEIRVar var, PrimType pty) const { + return std::make_pair(std::move(var), pty); + } + + bool IsItemDummy(const StackItem &item) const { + return item.first == nullptr && IsPrimTypeWide(item.second); + } + + bool IsItemNormal(const StackItem &item) const { + return item.first != nullptr && IsPrimTypeNormal(item.second); + } + + bool IsItemWide(const StackItem &item) const { + return item.first != nullptr && IsPrimTypeWide(item.second); + } + + bool IsPrimTypeNormal(PrimType pty) const { + return pty == PTY_i32 || pty == PTY_f32 || pty == PTY_ref || pty == PTY_a32; + } + + bool IsPrimTypeWide(PrimType pty) const { + return pty == PTY_i64 || pty == PTY_f64; + } +}; // class JBCStack2FEHelper +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_STACK2FE_HELPER_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_stack_helper.h b/src/hir2mpl/bytecode_input/class/include/jbc_stack_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..f34936f9d094d19b05ec923c23b40665a6f022aa --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_stack_helper.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_STACK_HELPER_H +#define HIR2MPL_INCLUDE_JBC_STACK_HELPER_H +#include "fe_configs.h" +#include "jbc_opcode.h" +#include "jbc_class_const_pool.h" + +namespace maple { +class JBCStackHelper { + public: + JBCStackHelper() = default; + ~JBCStackHelper() = default; + void Reset(); + bool StackChange(const jbc::JBCOp &op, const jbc::JBCConstPool &constPool); + void PushItem(jbc::JBCPrimType type); + void PushItems(const std::vector &types); + bool PopItem(jbc::JBCPrimType type); + bool PopItems(const std::vector &types); + void CopyFrom(const JBCStackHelper &src); + bool EqualTo(const JBCStackHelper &src); + bool Contains(const JBCStackHelper &src); + void Dump() const; + static std::string GetTypeName(jbc::JBCPrimType type); + std::vector GetStackItems() const; + uint32 GetStackSize() const { + size_t stackSize = stack.size(); + CHECK_FATAL(stackSize < UINT32_MAX, "stack size is too large"); + return static_cast(stackSize); + } + + LLT_PRIVATE: + std::vector stack; + bool Pop(jbc::JBCOpcode opcode); + bool Dup(jbc::JBCOpcode opcode); + bool Dup(); + bool DupX1(); + bool DupX2(); + bool Dup2(); + bool Dup2X1(); + bool Dup2X2(); + bool Swap(); + bool IsType1(jbc::JBCPrimType type) const; + bool IsType2(jbc::JBCPrimType type) const; + bool IsType2Dummy(jbc::JBCPrimType type) const; + jbc::JBCPrimType GetGeneralType(jbc::JBCPrimType type) const; +}; // class JBCStack +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_STACK_HELPER_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_stmt.h b/src/hir2mpl/bytecode_input/class/include/jbc_stmt.h new file mode 100644 index 0000000000000000000000000000000000000000..bf04c3a2b804545832a8983e32d011255dfbebd6 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_stmt.h @@ -0,0 +1,373 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_STMT_H +#define HIR2MPL_INCLUDE_JBC_STMT_H +#include +#include +#include "jbc_opcode.h" +#include "jbc_stack2fe_helper.h" + +namespace maple { +enum JBCStmtKind : uint8 { + kJBCStmtDefault = 0, + kJBCStmtFuncBeing, + kJBCStmtFuncEnd, + kJBCStmtInst, + kJBCStmtInstBranch, + kJBCStmtInstBranchRet, + kJBCStmtPesudoComment, + kJBCStmtPesudoLOC, + kJBCStmtPesudoLabel, + kJBCStmtPesudoTry, + kJBCStmtPesudoEndTry, + kJBCStmtPesudoCatch +}; + +class JBCFunctionContext; +class JBCStmtKindHelper { + public: + static std::string JBCStmtKindName(JBCStmtKind kind); + + private: + JBCStmtKindHelper() = default; + ~JBCStmtKindHelper() = default; +}; // class JBCStmtKindHelper + +class JBCStmt : public FEIRStmt { + public: + explicit JBCStmt(JBCStmtKind argKind) + : FEIRStmt(kStmt), JBCkind(argKind), + pc(UINT32_MAX) {} + + JBCStmt(FEIRNodeKind argGenKind, JBCStmtKind argKind) + : FEIRStmt(argGenKind), + JBCkind(argKind), + pc(UINT32_MAX) {} + + virtual ~JBCStmt() = default; + std::list EmitToFEIR(JBCFunctionContext &context, bool &success) const { + std::list feirStmts = EmitToFEIRImpl(context, success); + return feirStmts; + } + + JBCStmtKind GetJBCKind() const { + return JBCkind; + } + + void SetJBCKind(JBCStmtKind argKind) { + JBCkind = argKind; + } + + void SetPC(uint32 argPC) { + pc = argPC; + } + + uint32 GetPC() const { + return pc; + } + + bool IsJBCBranch() const { + return JBCkind == JBCStmtKind::kJBCStmtInstBranch || JBCkind == JBCStmtKind::kJBCStmtInstBranchRet; + } + + protected: + virtual std::list EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const = 0; + + JBCStmtKind JBCkind; + uint32 pc; +}; + +class JBCStmtInst : public JBCStmt { + public: + explicit JBCStmtInst(const jbc::JBCOp &argOp); + ~JBCStmtInst() = default; + const jbc::JBCOp &GetOp() const { + return op; + } + + protected: + bool IsStmtInstImpl() const override; + void DumpImpl(const std::string &prefix) const override; + std::string DumpDotStringImpl() const override; + std::list EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const override; + + private: + const jbc::JBCOp &op; + using FuncPtrEmitToFEIR = + std::list (JBCStmtInst::*)(JBCFunctionContext &context, bool &success) const; + static std::map funcPtrMapForEmitToFEIR; + static std::map InitFuncPtrMapForEmitToFEIR(); + static std::map opcodeMapForMathBinop; + static std::map InitOpcodeMapForMathBinop(); + static std::map opcodeMapForMathUnop; + static std::map InitOpcodeMapForMathUnop(); + static std::map opcodeMapForMonitor; + static std::map InitOpcodeMapForMonitor(); + std::list EmitToFEIRForOpConst(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpConstCommon(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpAConstNull(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpIConst(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpLConst(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpFConst(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpDConst(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpBiPush(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpSiPush(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpLdc(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpLoad(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpStore(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpArrayLoad(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpArrayStore(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpPop(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpDup(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpSwap(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpMathBinop(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpMathUnop(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpMathInc(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpConvert(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpCompare(const JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpReturn(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpStaticFieldOpr(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpFieldOpr(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpInvoke(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpInvokeVirtual(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpInvokeStatic(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpInvokeInterface(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpInvokeSpecial(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpInvokeDynamic(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpNew(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpMultiANewArray(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpThrow(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpTypeCheck(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpMonitor(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpArrayLength(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRCommon(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRCommon2(JBCFunctionContext &context, bool &success) const; + UniqueFEIRStmt GenerateStmtForConstI32(JBCStack2FEHelper &stack2feHelper, int32 val, bool &success) const; + UniqueFEIRStmt GenerateStmtForConstI64(JBCStack2FEHelper &stack2feHelper, int64 val, bool &success) const; + UniqueFEIRStmt GenerateStmtForConstF32(JBCStack2FEHelper &stack2feHelper, float val, bool &success) const; + UniqueFEIRStmt GenerateStmtForConstF64(JBCStack2FEHelper &stack2feHelper, double val, bool &success) const; + void PrepareInvokeParametersAndReturn(JBCStack2FEHelper &stack2feHelper, const FEStructMethodInfo &info, + FEIRStmtCallAssign &callStmt, bool isStatic) const; +}; + +class JBCStmtPesudoLabel; + +class JBCStmtInstBranch : public JBCStmt { + public: + explicit JBCStmtInstBranch(const jbc::JBCOp &argOp); + ~JBCStmtInstBranch() = default; + const jbc::JBCOp &GetOp() const { + return op; + } + + protected: + bool IsStmtInstImpl() const override; + void DumpImpl(const std::string &prefix) const override; + std::string DumpDotStringImpl() const override; + std::list EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const override; + JBCStmtPesudoLabel *GetTarget(const std::map &mapPCStmtLabel, uint32 pc) const; + virtual std::list EmitToFEIRForOpRetImpl(JBCFunctionContext &context, bool &success) const { + (void) context; + (void) success; + return std::list(); + } + + const jbc::JBCOp &op; + + private: + // bitwise mode + enum { + kModeDefault = 0, // for int32 using normal opnd + kModeUseRef = 0x1, // bit0: 1 for ref, 0 for int32 + kModeUseZeroAsSecondOpnd = 0x2 // bit1: 1 for using 0 for 2nd opnd, 0 for using normal opnd + }; + + using FuncPtrEmitToFEIR = + std::list (JBCStmtInstBranch::*)(JBCFunctionContext &context, bool &success) const; + static std::map funcPtrMapForEmitToFEIR; + static std::map InitFuncPtrMapForEmitToFEIR(); + static std::map> opcodeMapForCondGoto; + static std::map> InitOpcodeMapForCondGoto(); + std::list EmitToFEIRForOpGoto(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpBranch(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpSwitch(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpJsr(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRForOpRet(JBCFunctionContext &context, bool &success) const; + std::list EmitToFEIRCommon(JBCFunctionContext &context, bool &success) const; +}; + +class JBCStmtInstBranchRet : public JBCStmtInstBranch { + public: + JBCStmtInstBranchRet(const jbc::JBCOp &argOp); + ~JBCStmtInstBranchRet() = default; + + protected: + std::list EmitToFEIRForOpRetImpl(JBCFunctionContext &context, bool &success) const override; +}; + +class JBCStmtPesudoLabel : public JBCStmt { + public: + JBCStmtPesudoLabel() + : JBCStmt(kStmtPesudo, kJBCStmtPesudoLabel), + labelIdx(0) {} + + ~JBCStmtPesudoLabel() = default; + void SetLabelIdx(uint32 arg) { + labelIdx = arg; + } + + uint32 GetLabelIdx() const { + return labelIdx; + } + + protected: + void DumpImpl(const std::string &prefix) const override; + std::string DumpDotStringImpl() const override; + std::list EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const override; + + uint32 labelIdx; +}; + +class JBCStmtPesudoCatch : public JBCStmtPesudoLabel { + public: + JBCStmtPesudoCatch() + : JBCStmtPesudoLabel() { + JBCkind = kJBCStmtPesudoCatch; + } + ~JBCStmtPesudoCatch() = default; + void AddCatchTypeName(const GStrIdx &nameIdx) { + if (catchTypeNames.find(nameIdx) == catchTypeNames.end()) { + CHECK_FATAL(catchTypeNames.insert(nameIdx).second, "catchTypeNames insert failed"); + } + } + + protected: + void DumpImpl(const std::string &prefix) const override; + std::string DumpDotStringImpl() const override; + std::list EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const override; + + private: + std::set catchTypeNames; +}; + +class JBCStmtPesudoTry : public JBCStmt { + public: + JBCStmtPesudoTry() + : JBCStmt(kJBCStmtPesudoTry) {} + + ~JBCStmtPesudoTry() = default; + void AddCatchStmt(JBCStmtPesudoCatch &stmt) { + catchStmts.push_back(&stmt); + } + + size_t GetCatchCount() const { + return catchStmts.size(); + } + + JBCStmtPesudoCatch *GetCatchStmt(uint32 idx) { + ASSERT(idx < catchStmts.size(), "index out of range"); + return static_cast(catchStmts[idx]); + } + + protected: + void DumpImpl(const std::string &prefix) const override; + std::string DumpDotStringImpl() const override; + std::list EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const override; + + private: + std::vector catchStmts; +}; + +class JBCStmtPesudoEndTry : public JBCStmt { + public: + JBCStmtPesudoEndTry() + : JBCStmt(kJBCStmtPesudoEndTry) { + isAuxPost = true; + } + + ~JBCStmtPesudoEndTry() = default; + + protected: + void DumpImpl(const std::string &prefix) const override; + std::string DumpDotStringImpl() const override; + std::list EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const override; +}; + +class JBCStmtPesudoComment : public JBCStmt { + public: + explicit JBCStmtPesudoComment(const std::string &argContent) + : JBCStmt(kJBCStmtPesudoComment), + content(argContent) { + isAuxPre = true; + } + + ~JBCStmtPesudoComment() = default; + void SetContent(const std::string &argContent) { + content = argContent; + } + + protected: + void DumpImpl(const std::string &prefix) const override; + std::string DumpDotStringImpl() const override; + std::list EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const override; + + private: + std::string content = ""; +}; // class JBCStmtPesudoComment + +class JBCStmtPesudoLOC : public JBCStmt { + public: + JBCStmtPesudoLOC() + : JBCStmt(kJBCStmtPesudoLOC), + srcFileIdx(0), + lineNumber(0) { + isAuxPre = true; + } + + JBCStmtPesudoLOC(uint32 argSrcFileIdx, uint32 argLineNumber) + : JBCStmt(kJBCStmtPesudoLOC), + srcFileIdx(argSrcFileIdx), + lineNumber(argLineNumber) { + isAuxPre = true; + } + + ~JBCStmtPesudoLOC() = default; + void SetSrcFileIdx(uint32 idx) { + srcFileIdx = idx; + } + + uint32 GetPesudoLOCSrcFileIdx() const { + return srcFileIdx; + } + + void SetLineNumber(uint32 line) { + lineNumber = line; + } + + uint32 GetLineNumber() const { + return lineNumber; + } + + protected: + void DumpImpl(const std::string &prefix) const override; + std::string DumpDotStringImpl() const override; + std::list EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const override; + + private: + uint32 srcFileIdx; + uint32 lineNumber; +}; +} +#endif // HIR2MPL_INCLUDE_JBC_STMT_H diff --git a/src/hir2mpl/bytecode_input/class/include/jbc_util.h b/src/hir2mpl/bytecode_input/class/include/jbc_util.h new file mode 100644 index 0000000000000000000000000000000000000000..a6e1eff00b80e06d0bc4071c85252af91601b487 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/include/jbc_util.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_JBC_UTIL_H +#define HIR2MPL_INCLUDE_JBC_UTIL_H +#include +#include +#include "jbc_opcode.h" + +namespace maple { +namespace jbc { +class JBCUtil { + public: + static std::string ClassInternalNameToFullName(const std::string &name); + static JBCPrimType GetPrimTypeForName(const std::string &name); + + private: + JBCUtil() = default; + ~JBCUtil() = default; +}; +} // namespace jbc +} // namespace maple +#endif // HIR2MPL_INCLUDE_JBC_UTIL_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_attr.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_attr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..40f7a215ee4e405c5a985086cb251e610ef6cb7d --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_attr.cpp @@ -0,0 +1,1144 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_attr.h" +#include "jbc_attr_item.h" + +namespace maple { +namespace jbc { +// ---------- JBCAttr ---------- +JBCAttr::JBCAttr(JBCAttrKind kindIn, uint16 nameIdxIn, uint32 lengthIn) + : kind(kindIn), nameIdx(nameIdxIn), length(lengthIn) {} + +JBCAttr* JBCAttr::InAttr(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + JBCAttr *attrInfo = nullptr; + uint16 nameIdx = io.ReadUInt16(); + uint32 length = io.ReadUInt32(); + uint32 posStart = io.GetPos(); + const JBCConst *constNameRaw = constPool.GetConstByIdxWithTag(nameIdx, kConstUTF8); + if (constNameRaw == nullptr) { + ERR(kLncErr, "invalid nameIdx %d for attr name.", nameIdx); + return nullptr; + } + const JBCConstUTF8 *constName = static_cast(constNameRaw); + std::string strName = constName->GetString(); + if (strName.empty()) { + ERR(kLncErr, "AttrInfo@0x%x parse error: empty attr name.", posStart); +#define JBC_ATTR(name, Type) \ + } else if (strName.compare(name) == 0) { \ + attrInfo = mp->New(allocator, nameIdx, length); \ + if (!attrInfo->ParseFile(allocator, io, constPool)) { \ + CHECK_FATAL(false, "failed to parse attr info"); \ + } +#include "jbc_attr.def" +#undef JBC_ATTR + } else { + attrInfo = mp->New(allocator, nameIdx, length); + if (!attrInfo->ParseFile(allocator, io, constPool)) { + CHECK_FATAL(false, "failed to parse attr info"); + } + } + if (io.GetPos() - posStart != length) { + ERR(kLncErr, "AttrInfo@0x%x parse error: incorrect data length.", posStart); + io.SetPos(posStart + length); + } + return attrInfo; +} + +JBCAttrKind JBCAttr::AttrKind(const std::string &str) { +#define JBC_ATTR(name, type) \ + if (str.compare(name) == 0) { \ + return kAttr##type; \ + } +#include "jbc_attr.def" +#undef JBC_ATTR + return kAttrUnknown; +} + +// ---------- JBCAttrMap ---------- +JBCAttrMap::JBCAttrMap(MapleAllocator &allocatorIn) + : allocator(allocatorIn), mapAttrs(std::less(), allocator.Adapter()) {} + +void JBCAttrMap::RegisterAttr(JBCAttr &attr) { + JBCAttrKind kind = attr.GetKind(); + MapleMap*>::const_iterator it = mapAttrs.find(kind); + if (it == mapAttrs.end()) { + MemPool *mp = allocator.GetMemPool(); + MapleList *attrList = mp->New>(allocator.Adapter()); + attrList->push_back(&attr); + CHECK_FATAL(mapAttrs.insert(std::make_pair(kind, attrList)).second, "mapAttrs insert error"); + } else { + it->second->push_back(&attr); + } +} + +std::list JBCAttrMap::GetAttrs(JBCAttrKind kind) const { + auto it = mapAttrs.find(kind); + if (it == mapAttrs.end()) { + return std::list(); + } else { + std::list ans; + for (JBCAttr *attr : *(it->second)) { + ans.push_back(attr); + } + return ans; + } +} + +const JBCAttr *JBCAttrMap::GetAttr(JBCAttrKind kind) const { + auto it = mapAttrs.find(kind); + if (it == mapAttrs.end()) { + return nullptr; + } else { + CHECK_FATAL(it->second->size() == 1, "more than one attrs"); + return *(it->second->begin()); + } +} + +bool JBCAttrMap::PreProcess(const JBCConstPool &constPool) { + bool success = true; + for (auto itList : mapAttrs) { + for (JBCAttr *attr : *(itList.second)) { + switch (attr->GetKind()) { + case kAttrLocalVariableTable: + case kAttrLocalVariableTypeTable: + success = success && attr->PreProcess(constPool); + break; + default: + break; + } + } + } + return success; +} + +// ---------- JBCAttrRaw ---------- +JBCAttrRaw::JBCAttrRaw(const MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrRaw, nameIdx, length), + rawData(nullptr) { + (void) allocator; +} + +JBCAttrRaw::~JBCAttrRaw() { + rawData = nullptr; +} + +bool JBCAttrRaw::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + rawData = static_cast(mp->Malloc(length)); + CHECK_NULL_FATAL(rawData); + io.ReadBufferUInt8(rawData, length, success); + return success; +} + +bool JBCAttrRaw::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *JBCAttrRaw::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrLocalVariableInfo ---------- +JavaAttrLocalVariableInfoItem JBCAttrLocalVariableInfo::kInvalidInfoItem; + +JBCAttrLocalVariableInfo::JBCAttrLocalVariableInfo(MapleAllocator &argAllocator) + : allocator(argAllocator), + slotStartMap(allocator.Adapter()), + itemMap(allocator.Adapter()) {} + +void JBCAttrLocalVariableInfo::RegisterItem(const attr::LocalVariableTableItem &itemAttr) { + uint16 slotIdx = itemAttr.GetIndex(); + uint16 startPC = itemAttr.GetStartPC(); + uint16 length = itemAttr.GetLength(); + JavaAttrLocalVariableInfoItem *item = GetItemByStartInternal(slotIdx, startPC); + if (item == nullptr) { + CheckItemAvaiable(slotIdx, startPC); + AddSlotStartMap(slotIdx, startPC); + JavaAttrLocalVariableInfoItem &itemRef = itemMap[std::make_pair(slotIdx, startPC)]; + itemRef.slotIdx = slotIdx; + itemRef.start = startPC; + itemRef.length = length; + itemRef.nameIdx = itemAttr.GetNameStrIdx(); + itemRef.feirType = itemAttr.GetFEIRType(); + } else { + if (item->start == startPC && item->length == length && item->nameIdx == itemAttr.GetNameStrIdx()) { + CHECK_FATAL(item->feirType == nullptr, "Item already defined"); + item->feirType = itemAttr.GetFEIRType(); + } else { + CHECK_FATAL(false, "Item mismatch in RegisterItem()"); + } + } +} + +void JBCAttrLocalVariableInfo::RegisterTypeItem(const attr::LocalVariableTypeTableItem &itemAttr) { + uint16 slotIdx = itemAttr.GetIndex(); + uint16 startPC = itemAttr.GetStartPC(); + uint16 length = itemAttr.GetLength(); + JavaAttrLocalVariableInfoItem *item = GetItemByStartInternal(slotIdx, startPC); + if (item == nullptr) { + CheckItemAvaiable(slotIdx, startPC); + AddSlotStartMap(slotIdx, startPC); + JavaAttrLocalVariableInfoItem &itemRef = itemMap[std::make_pair(slotIdx, startPC)]; + itemRef.slotIdx = slotIdx; + itemRef.start = startPC; + itemRef.length = length; + itemRef.nameIdx = itemAttr.GetNameStrIdx(); + itemRef.signatureNameIdx = itemAttr.GetSignatureStrIdx(); + } else { + if (item->start == startPC && item->length == length && item->nameIdx == itemAttr.GetNameStrIdx()) { + CHECK_FATAL(item->signatureNameIdx == 0, "Item already defined"); + item->signatureNameIdx = itemAttr.GetSignatureStrIdx(); + } else { + CHECK_FATAL(false, "Item mismatch in RegisterItem()"); + } + } +} + +const JavaAttrLocalVariableInfoItem &JBCAttrLocalVariableInfo::GetItemByStart(uint16 slotIdx, uint16 start) const { + uint32 itemPCStart = GetStart(slotIdx, start); + if (itemPCStart != start) { + return kInvalidInfoItem; + } + std::map, JavaAttrLocalVariableInfoItem>::const_iterator it = + itemMap.find(std::make_pair(slotIdx, itemPCStart)); + if (it != itemMap.end()) { + return it->second; + } else { + return kInvalidInfoItem; + } +} + +JavaAttrLocalVariableInfoItem *JBCAttrLocalVariableInfo::GetItemByStartInternal(uint16 slotIdx, uint16 start) { + uint32 itemPCStart = GetStart(slotIdx, start); + if (itemPCStart != start) { + return nullptr; + } + std::map, JavaAttrLocalVariableInfoItem>::iterator it = + itemMap.find(std::make_pair(slotIdx, itemPCStart)); + CHECK_FATAL(it != itemMap.end(), "Item@%d not found", start); + return &(it->second); +} + +uint16 JBCAttrLocalVariableInfo::GetStart(uint16 slotIdx, uint16 pc) const { + MapleMap>::const_iterator it = slotStartMap.find(slotIdx); + if (it == slotStartMap.end()) { + return jbc::kInvalidPC16; + } + uint16 startLast = jbc::kInvalidPC16; + for (uint16 start : it->second) { + if (pc == start) { + return start; + } else if (pc < start) { + return startLast; + } + startLast = start; + } + return startLast; +} + +std::list JBCAttrLocalVariableInfo::EmitToStrings() const { + std::list ans; + std::stringstream ss; + ans.emplace_back("===== Local Variable Info ====="); + for (const std::pair, JavaAttrLocalVariableInfoItem> &itemPair : itemMap) { + const JavaAttrLocalVariableInfoItem &item = itemPair.second; + ss.str(""); + ss << "slot[" << item.slotIdx << "]: "; + ss << "start=" << item.start << ", "; + ss << "lenght=" << item.length << ", "; + ss << "name=\'" << GlobalTables::GetStrTable().GetStringFromStrIdx(item.nameIdx) << "\', "; + ss << "type=\'" << item.feirType->GetTypeName() << "\', "; + ss << "signature=\'" << GlobalTables::GetStrTable().GetStringFromStrIdx(item.signatureNameIdx) << "\'"; + ans.push_back(ss.str()); + } + ans.emplace_back("==============================="); + return ans; +} + +bool JBCAttrLocalVariableInfo::IsInvalidLocalVariableInfoItem(const JavaAttrLocalVariableInfoItem &item) { + return (item.nameIdx == 0 && item.feirType == nullptr && item.signatureNameIdx == 0); +} + +void JBCAttrLocalVariableInfo::AddSlotStartMap(uint16 slotIdx, uint16 startPC) { + auto it = slotStartMap.find(slotIdx); + if (it == slotStartMap.end()) { + MapleSet startSet(allocator.Adapter()); + CHECK_FATAL(startSet.insert(startPC).second, "insert failed"); + CHECK_FATAL(slotStartMap.insert(std::make_pair(slotIdx, startSet)).second, "insert failed"); + } else { + CHECK_FATAL(it->second.insert(startPC).second, "insert failed"); + } +} + +void JBCAttrLocalVariableInfo::CheckItemAvaiable(uint16 slotIdx, uint16 start) const { + uint32 itemPCStart = GetStart(slotIdx, start); + if (itemPCStart == jbc::kInvalidPC16) { + return; + } + CHECK_FATAL(itemPCStart <= jbc::kMaxPC32, "Invalid PC"); + uint16 itemPCStart16 = static_cast(itemPCStart); + const JavaAttrLocalVariableInfoItem &item = GetItemByStart(slotIdx, itemPCStart16); + CHECK_FATAL(!JBCAttrLocalVariableInfo::IsInvalidLocalVariableInfoItem(item), "Item@%d not found", itemPCStart16); + CHECK_FATAL(start >= item.start + item.length, "PC range overlapped"); +} + +// ---------- JBCAttrConstantValue ---------- +JBCAttrConstantValue::JBCAttrConstantValue(const MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrConstantValue, nameIdx, length), + constIdx(0), + constValue(nullptr) { + (void) allocator; +} + +JBCAttrConstantValue::~JBCAttrConstantValue() { + constValue = nullptr; +} + +bool JBCAttrConstantValue::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) allocator; + (void) constPool; + bool success = false; + constIdx = io.ReadUInt16(success); + return success; +} + +bool JBCAttrConstantValue::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *JBCAttrConstantValue::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrCode ---------- +JBCAttrCode::JBCAttrCode(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrCode, nameIdx, length), + maxStack(0), + maxLocals(0), + codeLength(0), + code(nullptr), + nException(0), + exceptions(allocator.Adapter()), + nAttr(0), + attrs(allocator.Adapter()), + instructions(allocator.Adapter()), + attrMap(allocator), + localVarInfo(allocator) {} + +JBCAttrCode::~JBCAttrCode() { + code = nullptr; +} + +bool JBCAttrCode::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + CHECK_FATAL(mp, "mempool is nullptr"); + maxStack = io.ReadUInt16(success); + maxLocals = io.ReadUInt16(success); + codeLength = io.ReadUInt32(success); + code = static_cast(mp->Malloc(codeLength)); + CHECK_NULL_FATAL(code); + io.ReadBufferUInt8(code, codeLength, success); + nException = io.ReadUInt16(success); + for (uint16 i = 0; i < nException; i++) { + attr::ExceptionTableItem *item = mp->New(); + success = item->ParseFile(allocator, io); + exceptions.push_back(item); + } + nAttr = io.ReadUInt16(success); + for (uint16 i = 0; i < nAttr; i++) { + JBCAttr *attr = nullptr; + attr = JBCAttr::InAttr(allocator, io, constPool); + if (attr == nullptr) { + return false; + } + attrs.push_back(attr); + attrMap.RegisterAttr(*attr); + } + // ParseOpcode + success = ParseOpcodes(allocator); + return success; +} + +bool JBCAttrCode::PreProcessImpl(const JBCConstPool &constPool) { + bool success = true; + for (attr::ExceptionTableItem *item : exceptions) { + success = success && item->PreProcess(constPool); + } + success = success && attrMap.PreProcess(constPool); + InitLocalVarInfo(); + return success; +} + +SimpleXMLElem *JBCAttrCode::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// Remove const when implemented +void JBCAttrCode::InitLocalVarInfo() { + // LocalVariableTable + std::list localVars = attrMap.GetAttrs(jbc::kAttrLocalVariableTable); + for (JBCAttr *attrRaw : localVars) { + CHECK_NULL_FATAL(attrRaw); + JBCAttrLocalVariableTable *attr = static_cast(attrRaw); + for (attr::LocalVariableTableItem *itemAttr : attr->GetLocalVarInfos()) { + localVarInfo.RegisterItem(*itemAttr); + } + } + // LocalVariableTypeTable + std::list localVarTypes = attrMap.GetAttrs(jbc::kAttrLocalVariableTypeTable); + for (JBCAttr *attrRaw : localVarTypes) { + CHECK_NULL_FATAL(attrRaw); + JBCAttrLocalVariableTypeTable *attr = static_cast(attrRaw); + for (attr::LocalVariableTypeTableItem *itemAttr : attr->GetLocalVarTypeInfos()) { + localVarInfo.RegisterTypeItem(*itemAttr); + } + } +} + +// Remove const when implemented +void JBCAttrCode::SetLoadStoreType() const { +} + +bool JBCAttrCode::ParseOpcodes(MapleAllocator &allocator) { + BasicIOMapFile file("code", code, codeLength); + BasicIORead io(file, true); + bool success = true; + bool wide = false; + JBCOp *objOP = nullptr; + uint32 pc = 0; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + while (io.GetPos() < codeLength && success) { + if (wide == false) { + pc = io.GetPos(); + } + JBCOpcode opcode = static_cast(io.ReadUInt8(success)); + if (opcode == kOpWide) { + wide = true; + continue; + } + JBCOpcodeKind kind = JBCOp::GetOpcodeInfo().GetOpcodeKind(opcode); + switch (kind) { + case kOpKindArrayLoad: + case kOpKindArrayStore: + case kOpKindPop: + case kOpKindDup: + case kOpKindSwap: + case kOpKindStack: + case kOpKindMathBinop: + case kOpKindMathUnop: + case kOpKindConvert: + case kOpKindCompare: + case kOpKindReturn: + case kOpKindThrow: + case kOpKindMonitor: + case kOpKindArrayLength: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindUnused: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindReversed: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindConst: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindLoad: + case kOpKindStore: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindMathInc: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindBranch: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindGoto: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindSwitch: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindStaticFieldOpr: + case kOpKindFieldOpr: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindInvoke: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindJsr: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindRet: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindNew: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindMultiANewArray: + objOP = mp->New(allocator, opcode, kind, wide); + break; + case kOpKindTypeCheck: + objOP = mp->New(allocator, opcode, kind, wide); + break; + default: + CHECK_NULL_FATAL(objOP); + break; + } + wide = false; + success = objOP->ParseFile(io); + if (success) { + instructions[pc] = objOP; + } + } + return success; +} + +// ---------- JBCAttrStackMapTable ---------- +JBCAttrStackMapTable::JBCAttrStackMapTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrStackMapTable, nameIdx, length), + count(0), + entries(allocator.Adapter()) {} + +bool JBCAttrStackMapTable::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + count = io.ReadUInt16(success); + for (uint16 i = 0; i < count; i++) { + uint8 frameType = io.ReadUInt8(); + attr::StackMapFrameItem *item = attr::StackMapFrameItem::NewItem(allocator, io, frameType); + if (item == nullptr) { + return false; + } + entries.push_back(item); + } + return success; +} + +bool JBCAttrStackMapTable::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *JBCAttrStackMapTable::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrException ---------- +JBCAttrException::JBCAttrException(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrException, nameIdx, length), + count(0), + tbExceptionIdx(allocator.Adapter()) {} + +bool JBCAttrException::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) allocator; + (void) constPool; + bool success = false; + count = io.ReadUInt16(success); + for (uint16 i = 0; i < count; i++) { + uint16 idx = io.ReadUInt16(success); + tbExceptionIdx.push_back(idx); + } + return success; +} + +bool JBCAttrException::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *JBCAttrException::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrInnerClass ---------- +JBCAttrInnerClass::JBCAttrInnerClass(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrInnerClass, nameIdx, length), + count(0), + tbClasses(allocator.Adapter()) {} + +bool JBCAttrInnerClass::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + count = io.ReadUInt16(success); + for (uint16 i = 0; i < count; i++) { + attr::InnerClassItem *item = mp->New(); + success = item->ParseFile(allocator, io); + tbClasses.push_back(item); + } + return success; +} + +bool JBCAttrInnerClass::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *JBCAttrInnerClass::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrEnclosingMethod ---------- +JBCAttrEnclosingMethod::JBCAttrEnclosingMethod(const MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrEnclosingMethod, nameIdx, length), + classIdx(0), + methodIdx(0), + constClass(nullptr), + constNameAndType(nullptr) { + (void) allocator; +} + +JBCAttrEnclosingMethod::~JBCAttrEnclosingMethod() { + constClass = nullptr; + constNameAndType = nullptr; +} + +bool JBCAttrEnclosingMethod::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) allocator; + (void) constPool; + bool success = false; + classIdx = io.ReadUInt16(success); + methodIdx = io.ReadUInt16(success); + return success; +} + +bool JBCAttrEnclosingMethod::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *JBCAttrEnclosingMethod::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrSynthetic ---------- +JBCAttrSynthetic::JBCAttrSynthetic(const MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrSynthetic, nameIdx, length) { + (void) allocator; +} + +bool JBCAttrSynthetic::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) allocator; + (void) io; + (void) constPool; + return true; +} + +bool JBCAttrSynthetic::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *JBCAttrSynthetic::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrSignature ---------- +JBCAttrSignature::JBCAttrSignature(const MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrSignature, nameIdx, length), + signatureIdx(0), + constSignatureName(nullptr) { + (void) allocator; +} + +JBCAttrSignature::~JBCAttrSignature() { + constSignatureName = nullptr; +} + +bool JBCAttrSignature::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) allocator; + (void) constPool; + bool success = false; + signatureIdx = io.ReadUInt16(success); + return success; +} + +bool JBCAttrSignature::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *JBCAttrSignature::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrSourceFile ---------- +JBCAttrSourceFile::JBCAttrSourceFile(const MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrSourceFile, nameIdx, length), + sourceFileIdx(0), + constFileName(nullptr) { + (void) allocator; +} + +JBCAttrSourceFile::~JBCAttrSourceFile() { + constFileName = nullptr; +} + +bool JBCAttrSourceFile::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) allocator; + (void) constPool; + bool success = false; + sourceFileIdx = io.ReadUInt16(success); + return success; +} + +bool JBCAttrSourceFile::PreProcessImpl(const JBCConstPool &constPool) { + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(sourceFileIdx, JBCConstTag::kConstUTF8); + if (constRaw == nullptr) { + return false; + } + constFileName = static_cast(constRaw); + return true; +} + +SimpleXMLElem *JBCAttrSourceFile::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrSourceDebugEx ---------- +JBCAttrSourceDebugEx::JBCAttrSourceDebugEx(const MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrSourceDebugEx, nameIdx, length), + data(nullptr) { + (void) allocator; +} + +JBCAttrSourceDebugEx::~JBCAttrSourceDebugEx() { + data = nullptr; +} + +bool JBCAttrSourceDebugEx::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + bool success = false; + (void) constPool; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + if (length > 0) { + data = static_cast(mp->Malloc(length + 1)); + CHECK_NULL_FATAL(data); + io.ReadBufferChar(data, length, success); + data[length] = 0; + } + return success; +} + +bool JBCAttrSourceDebugEx::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *JBCAttrSourceDebugEx::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrLineNumberTable ---------- +JBCAttrLineNumberTable::JBCAttrLineNumberTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrLineNumberTable, nameIdx, length), + size(0), + lineNums(allocator.Adapter()) {} + +bool JBCAttrLineNumberTable::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + size = io.ReadUInt16(success); + for (uint16 i = 0; i < size && success; i++) { + attr::LineNumberTableItem *item = mp->New(); + success = item->ParseFile(allocator, io); + lineNums.push_back(item); + } + return success; +} + +bool JBCAttrLineNumberTable::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *JBCAttrLineNumberTable::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrLocalVariableTable ---------- +JBCAttrLocalVariableTable::JBCAttrLocalVariableTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrLocalVariableTable, nameIdx, length), size(0), localVarInfos(allocator.Adapter()) {} + +bool JBCAttrLocalVariableTable::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, + const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + size = io.ReadUInt16(success); + for (uint16 i = 0; i < size && success; i++) { + attr::LocalVariableTableItem *item = mp->New(); + success = item->ParseFile(allocator, io); + localVarInfos.push_back(item); + } + return success; +} + +bool JBCAttrLocalVariableTable::PreProcessImpl(const JBCConstPool &constPool) { + bool success = true; + for (attr::LocalVariableTableItem *info : localVarInfos) { + success = success && info->PreProcess(constPool); + } + return success; +} + +SimpleXMLElem *JBCAttrLocalVariableTable::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrLocalVariableTypeTable ---------- +JBCAttrLocalVariableTypeTable::JBCAttrLocalVariableTypeTable(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrLocalVariableTypeTable, nameIdx, length), size(0), localVarTypeInfos(allocator.Adapter()) {} + +bool JBCAttrLocalVariableTypeTable::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, + const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + size = io.ReadUInt16(success); + for (uint16 i = 0; i < size && success; i++) { + attr::LocalVariableTypeTableItem *item = mp->New(); + success = item->ParseFile(allocator, io); + localVarTypeInfos.push_back(item); + } + return success; +} + +bool JBCAttrLocalVariableTypeTable::PreProcessImpl(const JBCConstPool &constPool) { + bool success = true; + for (attr::LocalVariableTypeTableItem *info : localVarTypeInfos) { + success = success && info->PreProcess(constPool); + } + return success; +} + +SimpleXMLElem *JBCAttrLocalVariableTypeTable::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrDeprecated ---------- +JBCAttrDeprecated::JBCAttrDeprecated(const MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrDeprecated, nameIdx, length) { + (void) allocator; +} + +bool JBCAttrDeprecated::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) allocator; + (void) io; + (void) constPool; + return true; +} + +bool JBCAttrDeprecated::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *JBCAttrDeprecated::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrRTAnnotations ---------- +JBCAttrRTAnnotations::JBCAttrRTAnnotations(MapleAllocator &allocator, JBCAttrKind kindIn, uint16 nameIdx, uint32 length) + : JBCAttr(kindIn, nameIdx, length), size(0), annotations(allocator.Adapter()) {} + +bool JBCAttrRTAnnotations::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + size = io.ReadUInt16(success); + for (uint16 i = 0; i < size && success; i++) { + attr::Annotation *item = mp->New(allocator); + success = item->ParseFile(allocator, io); + annotations.push_back(item); + } + return success; +} + +bool JBCAttrRTAnnotations::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *JBCAttrRTAnnotations::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrRTVisAnnotations ---------- +JBCAttrRTVisAnnotations::JBCAttrRTVisAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttrRTAnnotations(allocator, kAttrRTVisAnnotations, nameIdx, length) {} + +// ---------- JBCAttrRTInvisAnnotations ---------- +JBCAttrRTInvisAnnotations::JBCAttrRTInvisAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttrRTAnnotations(allocator, kAttrRTInvisAnnotations, nameIdx, length) {} + +// ---------- JBCAttrRTParamAnnotations ---------- +JBCAttrRTParamAnnotations::JBCAttrRTParamAnnotations(MapleAllocator &allocator, JBCAttrKind kindIn, uint16 nameIdx, + uint32 length) + : JBCAttr(kindIn, nameIdx, length), size(0), annotations(allocator.Adapter()) {} + +bool JBCAttrRTParamAnnotations::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, + const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + size = io.ReadUInt8(success); + for (uint16 i = 0; i < size && success; i++) { + attr::ParamAnnotationItem *item = mp->New(allocator); + success = item->ParseFile(allocator, io); + annotations.push_back(item); + } + return success; +} + +bool JBCAttrRTParamAnnotations::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *JBCAttrRTParamAnnotations::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrRTVisParamAnnotations ---------- +JBCAttrRTVisParamAnnotations::JBCAttrRTVisParamAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttrRTParamAnnotations(allocator, kAttrRTVisParamAnnotations, nameIdx, length) {} + +// ---------- JBCAttrRTInvisParamAnnotations ---------- +JBCAttrRTInvisParamAnnotations::JBCAttrRTInvisParamAnnotations(MapleAllocator &allocator, uint16 nameIdx, + uint32 length) + : JBCAttrRTParamAnnotations(allocator, kAttrRTInvisParamAnnotations, nameIdx, length) {} + +// ---------- JBCAttrRTTypeAnnotations ---------- +JBCAttrRTTypeAnnotations::JBCAttrRTTypeAnnotations(MapleAllocator &allocator, JBCAttrKind kindIn, uint16 nameIdx, + uint32 length) + : JBCAttr(kindIn, nameIdx, length), size(0), annotations(allocator.Adapter()) {} + +bool JBCAttrRTTypeAnnotations::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, + const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + size = io.ReadUInt16(success); + for (uint16 i = 0; i < size && success; i++) { + attr::TypeAnnotationItem *item = mp->New(allocator); + success = item->ParseFile(allocator, io); + annotations.push_back(item); + } + return success; +} + +bool JBCAttrRTTypeAnnotations::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *JBCAttrRTTypeAnnotations::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrRTVisTypeAnnotations ---------- +JBCAttrRTVisTypeAnnotations::JBCAttrRTVisTypeAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttrRTTypeAnnotations(allocator, kAttrRTVisTypeAnnotations, nameIdx, length) {} + +// ---------- JBCAttrRTInvisTypeAnnotations ---------- +JBCAttrRTInvisTypeAnnotations::JBCAttrRTInvisTypeAnnotations(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttrRTTypeAnnotations(allocator, kAttrRTInvisTypeAnnotations, nameIdx, length) {} + +// ---------- JBCAttrAnnotationDefault ---------- +JBCAttrAnnotationDefault::JBCAttrAnnotationDefault(const MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrAnnotationDefault, nameIdx, length), value(nullptr) { + (void) allocator; +} + +JBCAttrAnnotationDefault::~JBCAttrAnnotationDefault() { + value = nullptr; +} + +bool JBCAttrAnnotationDefault::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, + const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + value = mp->New(); + success = value->ParseFile(allocator, io); + return success; +} + +bool JBCAttrAnnotationDefault::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *JBCAttrAnnotationDefault::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrBootstrapMethods ---------- +JBCAttrBootstrapMethods::JBCAttrBootstrapMethods(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrBootstrapMethods, nameIdx, length), size(0), methods(allocator.Adapter()) {} + +bool JBCAttrBootstrapMethods::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + size = io.ReadUInt16(success); + for (uint16 i = 0; i < size && success; i++) { + attr::BootstrapMethodItem *item = mp->New(allocator); + success = item->ParseFile(allocator, io); + methods.push_back(item); + } + return success; +} + +bool JBCAttrBootstrapMethods::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *JBCAttrBootstrapMethods::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCAttrMethodParameters ---------- +JBCAttrMethodParameters::JBCAttrMethodParameters(MapleAllocator &allocator, uint16 nameIdx, uint32 length) + : JBCAttr(kAttrMethodParameters, nameIdx, length), size(0), params(allocator.Adapter()) {} + +bool JBCAttrMethodParameters::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + (void) constPool; + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + size = io.ReadUInt8(success); + for (uint8 i = 0; i < size && success; i++) { + attr::MethodParamItem *item = mp->New(); + success = item->ParseFile(allocator, io); + params.push_back(item); + } + return success; +} + +bool JBCAttrMethodParameters::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *JBCAttrMethodParameters::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} +} // namespace jbc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_attr_item.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_attr_item.cpp new file mode 100644 index 0000000000000000000000000000000000000000..045b3bce202e3e24e8e0e51fc872cf73f23f35f3 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_attr_item.cpp @@ -0,0 +1,1457 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_attr_item.h" +#include "fe_manager.h" + +namespace maple { +namespace jbc { +namespace attr { +inline GStrIdx GetOrCreateGStrIdx(const std::string &str) { + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); +} + +inline GStrIdx GetOrCreateGStrIdxWithMangler(const std::string &str) { + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(str)); +} + +// ---------- ExceptionTableItem ---------- +ExceptionTableItem::ExceptionTableItem() : startPC(0), endPC(0), handlerPC(0), catchTypeIdx(0), catchType(nullptr) {} + +ExceptionTableItem::~ExceptionTableItem() { + catchType = nullptr; +} + +bool ExceptionTableItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + startPC = io.ReadUInt16(success); + endPC = io.ReadUInt16(success); + handlerPC = io.ReadUInt16(success); + catchTypeIdx = io.ReadUInt16(success); + return success; +} + +bool ExceptionTableItem::PreProcessImpl(const JBCConstPool &constPool) { + if (catchTypeIdx == 0) { + return true; + } + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(catchTypeIdx, kConstClass); + if (constRaw == nullptr) { + catchType = nullptr; + return false; + } else { + catchType = static_cast(constRaw); + return true; + } +} + +SimpleXMLElem *ExceptionTableItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- VerificationTypeInfo ---------- +std::map VerificationTypeInfo::tagNameMap = + VerificationTypeInfo::InitTagNameMap(); + +VerificationTypeInfo::VerificationTypeInfo() + : tag(kVerTypeInfoItemUnknown), + classInfo(nullptr) { + data.raw = 0; +} + +VerificationTypeInfo::~VerificationTypeInfo() { + classInfo = nullptr; +} + +bool VerificationTypeInfo::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + uint8 t = io.ReadUInt8(success); + tag = static_cast(t); + switch (t) { + case kVerTypeInfoItemTop: + case kVerTypeInfoItemInteger: + case kVerTypeInfoItemFloat: + case kVerTypeInfoItemDouble: + case kVerTypeInfoItemLong: + case kVerTypeInfoItemNull: + case kVerTypeInfoItemUninitializedThis: + break; + case kVerTypeInfoItemObject: + data.cpoolIdx = io.ReadUInt16(success); + break; + case kVerTypeInfoItemUninitialized: + data.offset = io.ReadUInt16(success); + break; + default: + ERR(kLncErr, "undefined tag %d for verification_type_info", tag); + return false; + } + return success; +} + +bool VerificationTypeInfo::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *VerificationTypeInfo::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +std::map VerificationTypeInfo::InitTagNameMap() { + std::map result; + result[kVerTypeInfoItemTop] = "ITEM_Top"; + result[kVerTypeInfoItemInteger] = "ITEM_Integer"; + result[kVerTypeInfoItemFloat] = "ITEM_Float"; + result[kVerTypeInfoItemDouble] = "ITEM_Double"; + result[kVerTypeInfoItemLong] = "ITEM_Long"; + result[kVerTypeInfoItemNull] = "ITEM_Null"; + result[kVerTypeInfoItemUninitializedThis] = "ITEM_UninitializedThis"; + result[kVerTypeInfoItemObject] = "ITEM_Object"; + result[kVerTypeInfoItemUninitialized] = "ITEM_Uninitialized"; + return result; +} + +std::string VerificationTypeInfo::TagName(VerificationTypeInfoTag t) { + std::map::const_iterator it = tagNameMap.find(t); + if (it != tagNameMap.end()) { + return it->second; + } + return "ITEM_Unknown"; +} + +// ---------- StackMapFrameItem ---------- +std::map StackMapFrameItem::tagNameMap = StackMapFrameItem::InitTagName(); + +StackMapFrameItem::StackMapFrameItem(uint8 frameTypeIn, StackMapFrameItemTag tagIn) + : tag(tagIn), frameType(frameTypeIn) {} + +std::map StackMapFrameItem::InitTagName() { + std::map result; + result[kStackSame] = "SAME"; + result[kStackSameLocals1StackItem] = "SAME_LOCALS_1_STACK_ITEM"; + result[kStackSameLocals1StackItemEx] = "SAME_LOCALS_1_STACK_ITEM_EXTENDED"; + result[kStackChop] = "CHOP"; + result[kStackSameFrameEx] = "SAME_FRAME_EXTENDED"; + result[kStackAppend] = "APPEND"; + result[kStackFullFrame] = "FULL_FRAME"; + result[kStackReserved] = "RESERVED"; + return result; +} + +std::string StackMapFrameItem::TagName(StackMapFrameItemTag tag) { + std::map::const_iterator it = tagNameMap.find(tag); + if (it != tagNameMap.end()) { + return it->second; + } + return "UNKNOWN"; +} + +StackMapFrameItemTag StackMapFrameItem::FrameType2Tag(uint8 frameType) { + // frame type tag : 64, 128, 247, 251, 255 + if (frameType < 64) { + return kStackSame; + } else if (frameType < 128) { + return kStackSameLocals1StackItem; + } else if (frameType < 247) { + ERR(kLncErr, "Reserved frametype %d for stack_map_frame", frameType); + return kStackReserved; + } else if (frameType == 247) { + return kStackSameLocals1StackItemEx; + } else if (frameType < 251) { + return kStackChop; + } else if (frameType < 255) { + return kStackAppend; + } else { + return kStackFullFrame; + } +} + +StackMapFrameItem *StackMapFrameItem::NewItem(MapleAllocator &allocator, BasicIORead &io, uint8 frameType) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + StackMapFrameItem *item = nullptr; + StackMapFrameItemTag tag = StackMapFrameItem::FrameType2Tag(frameType); + switch (tag) { + case kStackSame: + item = mp->New(frameType); + break; + case kStackSameLocals1StackItem: + item = mp->New(frameType); + break; + case kStackSameLocals1StackItemEx: + item = mp->New(frameType); + break; + case kStackChop: + item = mp->New(frameType); + break; + case kStackSameFrameEx: + item = mp->New(frameType); + break; + case kStackAppend: + item = mp->New(allocator, frameType); + break; + case kStackFullFrame: + item = mp->New(allocator, frameType); + break; + default: + CHECK_FATAL(false, "Should not run here"); + } + if (!item->ParseFile(allocator, io)) { + CHECK_FATAL(false, "Failed to new StackMapFrame item"); + return nullptr; + } + return item; +} + +// ---------- StackMapFrameItemSame ---------- +StackMapFrameItemSame::StackMapFrameItemSame(uint8 frameType) : StackMapFrameItem(frameType, kStackSame) {} + +bool StackMapFrameItemSame::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + (void) io; + return true; +} + +bool StackMapFrameItemSame::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *StackMapFrameItemSame::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- StackMapFrameItemSameLocals1 ---------- +StackMapFrameItemSameLocals1::StackMapFrameItemSameLocals1(uint8 frameType) + : StackMapFrameItem(frameType, kStackSameLocals1StackItem), + stack(nullptr) {} + +StackMapFrameItemSameLocals1::~StackMapFrameItemSameLocals1() { + stack = nullptr; +} + +bool StackMapFrameItemSameLocals1::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + stack = mp->New(); + return stack->ParseFile(allocator, io); +} + +bool StackMapFrameItemSameLocals1::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *StackMapFrameItemSameLocals1::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- StackMapFrameItemSameLocals1Ex ---------- +StackMapFrameItemSameLocals1Ex::StackMapFrameItemSameLocals1Ex(uint8 frameType) + : StackMapFrameItem(frameType, kStackSameLocals1StackItemEx), + offsetDelta(0), + stack(nullptr) {} + +StackMapFrameItemSameLocals1Ex::~StackMapFrameItemSameLocals1Ex() { + stack = nullptr; +} + +bool StackMapFrameItemSameLocals1Ex::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + offsetDelta = io.ReadUInt16(success); + stack = mp->New(); + success = stack->ParseFile(allocator, io); + return success; +} + +bool StackMapFrameItemSameLocals1Ex::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *StackMapFrameItemSameLocals1Ex::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- StackMapFrameItemChop ---------- +StackMapFrameItemChop::StackMapFrameItemChop(uint8 frameType) + : StackMapFrameItem(frameType, kStackChop), + offsetDelta(0) {} + +bool StackMapFrameItemChop::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + offsetDelta = io.ReadUInt16(success); + return success; +} + +bool StackMapFrameItemChop::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *StackMapFrameItemChop::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- StackMapFrameItemSameEx ---------- +StackMapFrameItemSameEx::StackMapFrameItemSameEx(uint8 frameType) + : StackMapFrameItem(frameType, kStackSameFrameEx), + offsetDelta(0) {} + +bool StackMapFrameItemSameEx::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + offsetDelta = io.ReadUInt16(success); + return success; +} + +bool StackMapFrameItemSameEx::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *StackMapFrameItemSameEx::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- StackMapFrameItemAppend ---------- +StackMapFrameItemAppend::StackMapFrameItemAppend(MapleAllocator &allocator, uint8 frameType) + : StackMapFrameItem(frameType, kStackAppend), + offsetDelta(0), + locals(allocator.Adapter()) {} + +bool StackMapFrameItemAppend::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + offsetDelta = io.ReadUInt16(success); + for (uint8 i = 0; i < frameType - 251; i++) { // 251 : frame type tag + VerificationTypeInfo *local = mp->New(); + success = local->ParseFile(allocator, io); + locals.push_back(local); + } + return success; +} + +bool StackMapFrameItemAppend::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *StackMapFrameItemAppend::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- StackMapFrameItemFull ---------- +StackMapFrameItemFull::StackMapFrameItemFull(MapleAllocator &allocator, uint8 frameType) + : StackMapFrameItem(frameType, kStackFullFrame), + offsetDelta(0), + nLocals(0), + locals(allocator.Adapter()), + nStacks(0), + stacks(allocator.Adapter()) {} + +bool StackMapFrameItemFull::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + offsetDelta = io.ReadUInt16(success); + nLocals = io.ReadUInt16(success); + for (uint32 i = 0; i < nLocals; i++) { + VerificationTypeInfo *local = mp->New(); + success = local->ParseFile(allocator, io); + locals.push_back(local); + } + nStacks = io.ReadUInt16(success); + for (uint32 i = 0; i < nStacks; i++) { + VerificationTypeInfo *stack = mp->New(); + success = stack->ParseFile(allocator, io); + stacks.push_back(stack); + } + return success; +} + +bool StackMapFrameItemFull::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *StackMapFrameItemFull::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- InnerClassItem ---------- +InnerClassItem::InnerClassItem() + : innerClassInfoIdx(0), + outerClassInfoIdx(0), + innerNameIdx(0), + innerClassAccessFlag(0), + constClassInner(nullptr), + constClassOuter(nullptr), + constNameInner(nullptr) {} + +InnerClassItem::~InnerClassItem() { + constClassInner = nullptr; + constClassOuter = nullptr; + constNameInner = nullptr; +} + +bool InnerClassItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + innerClassInfoIdx = io.ReadUInt16(success); + outerClassInfoIdx = io.ReadUInt16(success); + innerNameIdx = io.ReadUInt16(success); + innerClassAccessFlag = io.ReadUInt16(success); + return success; +} + +bool InnerClassItem::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *InnerClassItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- LineNumberTableItem ---------- +LineNumberTableItem::LineNumberTableItem() : startPC(0), lineNumber(0) {} + +bool LineNumberTableItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + startPC = io.ReadUInt16(success); + lineNumber = io.ReadUInt16(success); + return success; +} + +bool LineNumberTableItem::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *LineNumberTableItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- LocalVariableTableItem ---------- +LocalVariableTableItem::LocalVariableTableItem() + : startPC(0), + length(0), + nameIdx(0), + descIdx(0), + index(0), + constName(nullptr), + constDesc(nullptr), + nameIdxMpl(0), + feirType(nullptr) {} + +LocalVariableTableItem::~LocalVariableTableItem() { + constName = nullptr; + constDesc = nullptr; + feirType = nullptr; +} + +bool LocalVariableTableItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + startPC = io.ReadUInt16(success); + length = io.ReadUInt16(success); + nameIdx = io.ReadUInt16(success); + descIdx = io.ReadUInt16(success); + index = io.ReadUInt16(success); + return success; +} + +bool LocalVariableTableItem::PreProcessImpl(const JBCConstPool &constPool) { + constName = static_cast(constPool.GetConstByIdxWithTag(nameIdx, JBCConstTag::kConstUTF8)); + constDesc = static_cast(constPool.GetConstByIdxWithTag(descIdx, JBCConstTag::kConstUTF8)); + if (constName == nullptr || constDesc == nullptr) { + return false; + } + nameIdxMpl = GetOrCreateGStrIdx(constName->GetString()); + feirType = FEManager::GetTypeManager().GetOrCreateFEIRTypeByName(namemangler::EncodeName(constDesc->GetString()), + GStrIdx(0), kSrcLangJava); + return true; +} + +SimpleXMLElem *LocalVariableTableItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- LocalVariableTypeTableItem ---------- +LocalVariableTypeTableItem::LocalVariableTypeTableItem() + : startPC(0), + length(0), + nameIdx(0), + signatureIdx(0), + index(0), + constName(nullptr), + constSignature(nullptr), + nameIdxMpl(0) {} + +LocalVariableTypeTableItem::~LocalVariableTypeTableItem() { + constName = nullptr; + constSignature = nullptr; +} + +bool LocalVariableTypeTableItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + startPC = io.ReadUInt16(success); + length = io.ReadUInt16(success); + nameIdx = io.ReadUInt16(success); + signatureIdx = io.ReadUInt16(success); + index = io.ReadUInt16(success); + return success; +} + +bool LocalVariableTypeTableItem::PreProcessImpl(const JBCConstPool &constPool) { + constName = static_cast(constPool.GetConstByIdxWithTag(nameIdx, JBCConstTag::kConstUTF8)); + constSignature = + static_cast(constPool.GetConstByIdxWithTag(signatureIdx, JBCConstTag::kConstUTF8)); + if (constName == nullptr || constSignature == nullptr) { + return false; + } + nameIdxMpl = GetOrCreateGStrIdx(constName->GetString()); + return true; +} + +SimpleXMLElem *LocalVariableTypeTableItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- ParamAnnotationItem ---------- +ParamAnnotationItem::ParamAnnotationItem(MapleAllocator &allocator) + : count(0), annotations(allocator.Adapter()) {} + +bool ParamAnnotationItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + count = io.ReadUInt16(success); + for (uint16 i = 0; i < count; i++) { + Annotation *anno = mp->New(allocator); + success = anno->ParseFile(allocator, io); + annotations.push_back(anno); + } + return success; +} + +bool ParamAnnotationItem::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *ParamAnnotationItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- BootstrapMethodItem ---------- +BootstrapMethodItem::BootstrapMethodItem(MapleAllocator &allocator) + : methodRefIdx(0), + nArgs(0), + argsIdx(allocator.Adapter()), + methodHandle(nullptr), + args(allocator.Adapter()) {} + +BootstrapMethodItem::~BootstrapMethodItem() { + methodHandle = nullptr; +} + +bool BootstrapMethodItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + methodRefIdx = io.ReadUInt16(success); + nArgs = io.ReadUInt16(success); + for (uint16 i = 0; i < nArgs; i++) { + uint16 idx = io.ReadUInt16(success); + argsIdx.push_back(idx); + } + return success; +} + +bool BootstrapMethodItem::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *BootstrapMethodItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- MethodParamItem ---------- +MethodParamItem::MethodParamItem() + : nameIdx(0), accessFlag(0), constName(nullptr) {} + +MethodParamItem::~MethodParamItem() { + constName = nullptr; +} + +bool MethodParamItem::MethodParamItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + nameIdx = io.ReadUInt16(success); + accessFlag = io.ReadUInt16(success); + return success; +} + +bool MethodParamItem::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *MethodParamItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- Annotation ---------- +Annotation::Annotation(MapleAllocator &allocator) + : typeIdx(0), nElemPairs(0), tbElemPairs(allocator.Adapter()), constTypeName(nullptr) {} + +Annotation::~Annotation() { + constTypeName = nullptr; +} + +bool Annotation::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + typeIdx = io.ReadUInt16(success); + nElemPairs = io.ReadUInt16(success); + for (uint16 i = 0; i < nElemPairs; i++) { + ElementValuePair *pair = mp->New(); + success = pair->ParseFile(allocator, io); + tbElemPairs.push_back(pair); + } + return success; +} + +bool Annotation::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *Annotation::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- ElementValueItem ---------- +std::map ElementValueItem::tagKindMap = ElementValueItem::InitTagKindMap(); +std::map ElementValueItem::kindNameMap = ElementValueItem::InitKindNameMap(); + +ElementValueItem::ElementValueItem(ElementValueKind kindIn, char tagIn) : kind(kindIn), tag(tagIn) {} + +std::map ElementValueItem::InitTagKindMap() { + std::map result; + result['B'] = kElementValueConst; + result['C'] = kElementValueConst; + result['D'] = kElementValueConst; + result['F'] = kElementValueConst; + result['I'] = kElementValueConst; + result['J'] = kElementValueConst; + result['S'] = kElementValueConst; + result['Z'] = kElementValueConst; + result['s'] = kElementValueConst; + result['e'] = kElementValueEnum; + result['c'] = kElementValueClassInfo; + result['@'] = kElementValueAnnotation; + result['['] = kElementValueArray; + return result; +} + +std::map ElementValueItem::InitKindNameMap() { + std::map result; + result[kElementValueConst] = "ElementValueConst"; + result[kElementValueEnum] = "ElementValueEnum"; + result[kElementValueClassInfo] = "ElementValueClassInfo"; + result[kElementValueAnnotation] = "ElementValueAnnotation"; + result[kElementValueArray] = "ElementValueArray"; + return result; +} + +std::string ElementValueItem::KindName(ElementValueKind kind) { + std::map::const_iterator it = kindNameMap.find(kind); + if (it != kindNameMap.end()) { + return it->second; + } + return "Unknown"; +} + +ElementValueKind ElementValueItem::TagToKind(char tag) { + std::map::const_iterator it = tagKindMap.find(tag); + if (it != tagKindMap.end()) { + return it->second; + } + return kElementValueDefault; +} + +ElementValueItem *ElementValueItem::NewItem(MapleAllocator &allocator, BasicIORead &io, char tag) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + ElementValueKind kind = ElementValueItem::TagToKind(tag); + ElementValueItem *item = nullptr; + switch (kind) { + case kElementValueConst: + item = mp->New(tag); + break; + case kElementValueEnum: + item = mp->New(); + break; + case kElementValueClassInfo: + item = mp->New(); + break; + case kElementValueAnnotation: + item = mp->New(); + break; + case kElementValueArray: + item = mp->New(allocator); + break; + default: + ERR(kLncErr, "unsupported kind"); + return nullptr; + } + if (item->ParseFile(allocator, io) == false) { + return nullptr; + } + return item; +} + +// ---------- ElementValueConst ---------- +ElementValueConst::ElementValueConst(uint8 t) + : ElementValueItem(kElementValueConst, t), constValueIdx(0), constValue(nullptr) {} + +ElementValueConst::~ElementValueConst() { + constValue = nullptr; +} + +bool ElementValueConst::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + constValueIdx = io.ReadUInt16(success); + return success; +} + +bool ElementValueConst::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *ElementValueConst::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- ElementValueEnum ---------- +ElementValueEnum::ElementValueEnum() + : ElementValueItem(kElementValueEnum, 'e'), + typeNameIdx(0), + constNameIdx(0), + constTypeName(nullptr), + constName(nullptr) {} + +ElementValueEnum::~ElementValueEnum() { + constTypeName = nullptr; + constName = nullptr; +} + +bool ElementValueEnum::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + typeNameIdx = io.ReadUInt16(success); + constNameIdx = io.ReadUInt16(success); + return success; +} + +bool ElementValueEnum::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *ElementValueEnum::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- ElementValueClassInfo ---------- +ElementValueClassInfo::ElementValueClassInfo() + : ElementValueItem(kElementValueClassInfo, 'c'), + classInfoIdx(0), + constClassInfo(nullptr) {} + +ElementValueClassInfo::~ElementValueClassInfo() { + constClassInfo = nullptr; +} + +bool ElementValueClassInfo::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + classInfoIdx = io.ReadUInt16(success); + return success; +} + +bool ElementValueClassInfo::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *ElementValueClassInfo::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- ElementValueAnnotation ---------- +ElementValueAnnotation::ElementValueAnnotation() + : ElementValueItem(kElementValueAnnotation, '@'), annotation(nullptr) {} + +ElementValueAnnotation::~ElementValueAnnotation() { + annotation = nullptr; +} + +bool ElementValueAnnotation::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + annotation = mp->New(allocator); + return annotation->ParseFile(allocator, io); +} + +bool ElementValueAnnotation::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *ElementValueAnnotation::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- ElementValueArray ---------- +ElementValueArray::ElementValueArray(MapleAllocator &allocator) + : ElementValueItem(kElementValueArray, '['), size(0), values(allocator.Adapter()) {} + +bool ElementValueArray::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + size = io.ReadUInt16(success); + for (uint16 i = 0; i < size; i++) { + ElementValue *elem = mp->New(); + success = elem->ParseFile(allocator, io); + values.push_back(elem); + } + return success; +} + +bool ElementValueArray::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *ElementValueArray::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- ElementValue ---------- +ElementValue::ElementValue() + : tag('\0'), kind(kElementValueDefault), value(nullptr) {} + +ElementValue::~ElementValue() { + value = nullptr; +} + +bool ElementValue::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + tag = io.ReadChar(success); + value = ElementValueItem::NewItem(allocator, io, tag); + return success && value != nullptr; +} + +bool ElementValue::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return false; +} + +SimpleXMLElem *ElementValue::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- ElementValuePair ---------- +ElementValuePair::ElementValuePair() + : nameIdx(0), value(nullptr) {} + +ElementValuePair::~ElementValuePair() { + value = nullptr; +} + +bool ElementValuePair::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + nameIdx = io.ReadUInt16(success); + value = mp->New(); + return (success && value->ParseFile(allocator, io)); +} + +bool ElementValuePair::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *ElementValuePair::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetInfoItem ---------- +std::map TargetInfoItem::typeTagMap = TargetInfoItem::InitTypeTagMap(); + +TargetInfoItem::TargetInfoItem(TargetInfoItemTag tagIn) : tag(tagIn) {} + +std::map TargetInfoItem::InitTypeTagMap() { + std::map result; + result[kTargetTypeParamClass] = kTargetTagTypeParam; + result[kTargetTypeParamMethod] = kTargetTagTypeParam; + result[kTargetTypeHierarchy] = kTargetTagSuperType; + result[kTargetTypeBoundClass] = kTargetTagTypeParamBound; + result[kTargetTypeBoundMethod] = kTargetTagTypeParamBound; + result[kTargetTypeFieldDecl] = kTargetTagEmpty; + result[kTargetTypeReturn] = kTargetTagEmpty; + result[kTargetTypeReceiver] = kTargetTagEmpty; + result[kTargetTypeFormal] = kTargetTagFormalParam; + result[kTargetTypeThrows] = kTargetTagThrows; + result[kTargetTypeLocalVar] = kTargetTagLocalVar; + result[kTargetTypeResourceVar] = kTargetTagLocalVar; + result[kTargetTypeExpectionParam] = kTargetTagCatch; + result[kTargetTypeInstanceof] = kTargetTagOffset; + result[kTargetTypeNew] = kTargetTagOffset; + result[kTargetTypeMethodRefNew] = kTargetTagOffset; + result[kTargetTypeMethodRefIdentifier] = kTargetTagOffset; + result[kTargetTypeCast] = kTargetTagTypeArg; + result[kTargetTypeConstructorInvoke] = kTargetTagTypeArg; + result[kTargetTypeMethodInvoke] = kTargetTagTypeArg; + result[kTargetTypeConstructorNew] = kTargetTagTypeArg; + result[kTargetTypeConstructorIdentifier] = kTargetTagTypeArg; + return result; +} + +TargetInfoItemTag TargetInfoItem::TargetType2Tag(TargetInfoItemType type) { + std::map::const_iterator it = typeTagMap.find(type); + if (it != typeTagMap.end()) { + return it->second; + } + return kTargetTagUndefine; +} + +TargetInfoItem *TargetInfoItem::NewItem(MapleAllocator &allocator, BasicIORead &io, TargetInfoItemType targetType) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + TargetInfoItemTag tag = TargetInfoItem::TargetType2Tag(targetType); + TargetInfoItem *item = nullptr; + switch (tag) { + case kTargetTagTypeParam: + item = mp->New(); + break; + case kTargetTagSuperType: + item = mp->New(); + break; + case kTargetTagTypeParamBound: + item = mp->New(); + break; + case kTargetTagEmpty: + item = mp->New(); + break; + case kTargetTagFormalParam: + item = mp->New(); + break; + case kTargetTagThrows: + item = mp->New(); + break; + case kTargetTagLocalVar: + item = mp->New(allocator); + break; + case kTargetTagCatch: + item = mp->New(); + break; + case kTargetTagOffset: + item = mp->New(); + break; + case kTargetTagTypeArg: + item = mp->New(); + break; + default: + ERR(kLncErr, "TargetInfoItem::NewItem(): undefined tag"); + return nullptr; + } + if (item->ParseFile(allocator, io) == false) { + return nullptr; + } + return item; +} + +// ---------- TargetTypeParam ---------- +TargetTypeParam::TargetTypeParam() : TargetInfoItem(kTargetTagTypeParam), paramIdx(0) {} + +bool TargetTypeParam::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + paramIdx = io.ReadUInt8(success); + return success; +} + +bool TargetTypeParam::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetTypeParam::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetSuperType ---------- +TargetSuperType::TargetSuperType() : TargetInfoItem(kTargetTagSuperType), index(0) {} + +bool TargetSuperType::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + index = io.ReadUInt16(success); + return success; +} + +bool TargetSuperType::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetSuperType::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetTypeParamBound ---------- +TargetTypeParamBound::TargetTypeParamBound() : TargetInfoItem(kTargetTagTypeParamBound), paramIdx(0), boundIdx(0) {} + +bool TargetTypeParamBound::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + paramIdx = io.ReadUInt8(success); + boundIdx = io.ReadUInt8(success); + return success; +} + +bool TargetTypeParamBound::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetTypeParamBound::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetEmpty ---------- +TargetEmpty::TargetEmpty() : TargetInfoItem(kTargetTagEmpty) {} + +bool TargetEmpty::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + (void) io; + return true; +} + +bool TargetEmpty::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetEmpty::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetFormalParam ---------- +TargetFormalParam::TargetFormalParam() : TargetInfoItem(kTargetTagFormalParam), paramIdx(0) {} + +bool TargetFormalParam::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + paramIdx = io.ReadUInt8(success); + return success; +} + +bool TargetFormalParam::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetFormalParam::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetThrows ---------- +TargetThrows::TargetThrows() : TargetInfoItem(kTargetTagThrows), typeIdx(0) {} + +bool TargetThrows::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + typeIdx = io.ReadUInt16(success); + return success; +} + +bool TargetThrows::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetThrows::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetLocalVarItem ---------- +TargetLocalVarItem::TargetLocalVarItem() : TargetInfoItem(kTargetTagLocalVar), startPC(0), length(0), index(0) {} + +bool TargetLocalVarItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + startPC = io.ReadUInt16(success); + length = io.ReadUInt16(success); + index = io.ReadUInt16(success); + return success; +} +bool TargetLocalVarItem::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetLocalVarItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetLocalVar ---------- +TargetLocalVar::TargetLocalVar(MapleAllocator &allocator) + : TargetInfoItem(kTargetTagLocalVar), size(0), table(allocator.Adapter()) {} + +bool TargetLocalVar::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + size = io.ReadUInt16(success); + for (uint16 i = 0; i < size; i++) { + TargetLocalVarItem *item = mp->New(); + success = item->ParseFile(allocator, io); + table.push_back(item); + } + return success; +} + +bool TargetLocalVar::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetLocalVar::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetCatch ---------- +TargetCatch::TargetCatch() : TargetInfoItem(kTargetTagCatch), exTableIdx(0) {} + +bool TargetCatch::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + exTableIdx = io.ReadUInt16(success); + return true; +} + +bool TargetCatch::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetCatch::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetOffset ---------- +TargetOffset::TargetOffset() : TargetInfoItem(kTargetTagOffset), offset(0) {} + +bool TargetOffset::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + offset = io.ReadUInt16(success); + return success; +} + +bool TargetOffset::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetOffset::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TargetTypeArg ---------- +TargetTypeArg::TargetTypeArg() : TargetInfoItem(kTargetTagTypeArg), offset(0), typeArgIdx(0) {} + +bool TargetTypeArg::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + offset = io.ReadUInt16(success); + typeArgIdx = io.ReadUInt8(success); + return success; +} + +bool TargetTypeArg::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TargetTypeArg::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TypeAnnotationItem ---------- +TypeAnnotationItem::TypeAnnotationItem(MapleAllocator &allocator) + : targetType(kTargetTypeParamClass), + targetInfo(nullptr), + targetPath(nullptr), + typeIdx(0), + nElemPairs(0), + elemPairs(allocator.Adapter()) {} + +TypeAnnotationItem::~TypeAnnotationItem() { + targetInfo = nullptr; + targetPath = nullptr; +} + +bool TypeAnnotationItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + targetType = static_cast(io.ReadUInt8(success)); + targetInfo = TargetInfoItem::NewItem(allocator, io, targetType); + targetPath = mp->New(allocator); + success = targetPath->ParseFile(allocator, io); + typeIdx = io.ReadUInt16(success); + nElemPairs = io.ReadUInt16(success); + for (uint16 i = 0; i < nElemPairs; i++) { + ElementValuePair *e = mp->New(); + success = e->ParseFile(allocator, io); + elemPairs.push_back(e); + } + return success; +} + +bool TypeAnnotationItem::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TypeAnnotationItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TypePathItem ---------- +TypePathItem::TypePathItem() : typePathKind(0), typeArgIdx(0) {} + +bool TypePathItem::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + (void) allocator; + bool success = false; + typePathKind = io.ReadUInt8(success); + typeArgIdx = io.ReadUInt8(success); + return success; +} + +bool TypePathItem::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TypePathItem::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, + uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- TypePath ---------- +TypePath::TypePath(MapleAllocator &allocator) : pathLength(0), tbPath(allocator.Adapter()) {} + +bool TypePath::ParseFileImpl(MapleAllocator &allocator, BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + pathLength = io.ReadUInt8(success); + for (uint8 i = 0; i < pathLength; i++) { + TypePathItem *path = mp->New(); + success = path->ParseFile(allocator, io); + tbPath.push_back(path); + } + return success; +} + +bool TypePath::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *TypePath::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) const { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} +} // namespace attr +} // namespace jbc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_bb.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_bb.cpp new file mode 100644 index 0000000000000000000000000000000000000000..784431e2fd4faf77679857b5b972a936572b53db --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_bb.cpp @@ -0,0 +1,155 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_bb.h" +#include "jbc_stmt.h" +#include "jbc_function.h" + +namespace maple { +JBCBB::JBCBB(const jbc::JBCConstPool &argConstPool) + : JBCBB(FEIRBBKind::kBBKindDefault, argConstPool) {} + +JBCBB::JBCBB(uint8 argBBKind, const jbc::JBCConstPool &argConstPool) + : FEIRBB(argBBKind), + constPool(argConstPool), + stackError(false), + stackInUpdated(false), + stackOutUpdated(false), + updatePredEnd(false) {} + +bool JBCBB::InitForFuncHeader() { + updatePredEnd = true; + stackInUpdated = true; + return UpdateStack(); +} + +bool JBCBB::InitForCatch() { + updatePredEnd = true; + stackInUpdated = true; + minStackIn.PushItem(jbc::JBCPrimType::kTypeRef); + return UpdateStack(); +} + +bool JBCBB::UpdateStack() { + minStackOut.CopyFrom(minStackIn); + const JBCStmt *stmt = static_cast(stmtHead); + while (stmt != nullptr) { + JBCStmtKind kind = stmt->GetJBCKind(); + if (kind == JBCStmtKind::kJBCStmtInst) { + const JBCStmtInst *stmtInst = static_cast(stmt); + const jbc::JBCOp &op = stmtInst->GetOp(); + stackError = stackError || (!minStackOut.StackChange(op, constPool)); + } else if (kind == JBCStmtKind::kJBCStmtInstBranch) { + const JBCStmtInstBranch *stmtInstBranch = static_cast(stmt); + const jbc::JBCOp &op = stmtInstBranch->GetOp(); + stackError = stackError || (!minStackOut.StackChange(op, constPool)); + } + if (stackError) { + return false; + } + if (stmt == stmtTail) { + break; + } + const FELinkListNode *node = stmt->GetNext(); + stmt = static_cast(node); + } + stackOutUpdated = true; + return true; +} + +bool JBCBB::UpdateStackByPredBB(const JBCBB &bb) { + if (bb.stackError) { + return false; + } + if (!bb.stackOutUpdated) { + return true; + } + if (!stackInUpdated) { + minStackIn.CopyFrom(bb.minStackOut); + stackInUpdated = true; + return UpdateStack(); + } + if (!minStackIn.EqualTo(bb.minStackOut)) { + stackError = true; + return false; + } + return true; +} + +bool JBCBB::UpdateStackByPredBBEnd() const { + return updatePredEnd; +} + +bool JBCBB::CheckStack() { + if (!stackInUpdated || !stackOutUpdated) { + return false; + } + if (stackError) { + return false; + } + for (FEIRBB *bb : GetPredBBs()) { + if (bb->GetBBKind() == FEIRBBKind::kBBKindPesudoHead) { + continue; + } + if (bb->GetBBKind() == JBCBBPesudoCatchPred::kBBKindPesudoCatchPred) { + continue; + } + JBCBB *jbcBB = static_cast(bb); + if (!minStackIn.EqualTo(jbcBB->minStackOut)) { + return false; + } + } + return true; +} + +void JBCBB::Dump() const { + std::cout << "FEIRBB (id=" << GetID() << ", kind=" << GetBBKindName() << + ", preds={"; + for (FEIRBB *bb : GetPredBBs()) { + if (bb->GetBBKind() == FEIRBBKind::kBBKindPesudoHead) { + std::cout << "FuncHead "; + } else if (bb->GetBBKind() == JBCBBPesudoCatchPred::kBBKindPesudoCatchPred) { + std::cout << "CatchPred "; + } else { + std::cout << bb->GetID() << " "; + } + } + std::cout << "}, succs={"; + for (FEIRBB *bb : GetSuccBBs()) { + std::cout << bb->GetID() << " "; + } + std::cout << "})" << std::endl; + std::cout << " StackIn (" << (stackInUpdated ? "updated" : "") << "): "; + minStackIn.Dump(); + std::cout << std::endl; + const FELinkListNode *nodeStmt = GetStmtHead(); + while (nodeStmt != nullptr) { + const FEIRStmt *stmt = static_cast(nodeStmt); + stmt->Dump(" "); + if (nodeStmt == stmtTail) { + break; + } + nodeStmt = nodeStmt->GetNext(); + } + std::cout << " StackOut (" << (stackOutUpdated ? "updated" : "") << "): "; + minStackOut.Dump(); + std::cout << std::endl; +} + +uint32 JBCBB::GetSwapSize() const { + uint32 sizeIn = minStackIn.GetStackSize(); + uint32 sizeOut = minStackOut.GetStackSize(); + return sizeIn > sizeOut ? sizeIn : sizeOut; +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_class.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_class.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7bf92f7d9e63e5890ea95d85cd9a132be4dce755 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_class.cpp @@ -0,0 +1,351 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_class.h" +#include + +namespace maple { +namespace { +const uint32 kMagicJVMClass = 0xCAFEBABE; +const uint32 kJavaClassNoIndex = 0; +} + +namespace jbc { +// ---------- JBCClassElem ---------- +JBCClassElem::JBCClassElem(MapleAllocator &allocator, const JBCClass &argKlass) + : klass(argKlass), + accessFlag(0), + nameIdx(0), + descIdx(0), + nAttr(0), + attrs(allocator.Adapter()), + attrMap(allocator) {} + +bool JBCClassElem::ParseFile(MapleAllocator &allocator, BasicIORead &io, const JBCConstPool &constPool) { + bool success = false; + accessFlag = io.ReadUInt16(success); + nameIdx = io.ReadUInt16(success); + descIdx = io.ReadUInt16(success); + nAttr = io.ReadUInt16(success); + for (uint16 i = 0; i < nAttr; i++) { + JBCAttr *attr = JBCAttr::InAttr(allocator, io, constPool); + if (attr == nullptr) { + return false; + } + attrs.push_back(attr); + attrMap.RegisterAttr(*attr); + } + return true; +} + +std::string JBCClassElem::GetClassName() const { + return klass.GetClassNameOrin(); +} + +const JBCConstPool &JBCClassElem::GetConstPool() const { + return klass.GetConstPool(); +} + +std::string JBCClassElem::GetFullName() const { + std::stringstream ss; + ss << GetClassName() << "|" << GetName() << "|" << GetDescription(); + return ss.str(); +} + +// ---------- JBCClassField ---------- +JBCClassField::JBCClassField(MapleAllocator &allocator, const JBCClass &argKlass) + : JBCClassElem(allocator, argKlass) {} + +SimpleXMLElem *JBCClassField::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +// ---------- JBCClassMethod ---------- +JBCClassMethod::JBCClassMethod(MapleAllocator &allocator, const JBCClass &argKlass) + : JBCClassElem(allocator, argKlass) {} + +SimpleXMLElem *JBCClassMethod::GenXmlElemImpl(MapleAllocator &allocator, const JBCConstPool &constPool, uint32 idx) { + (void) allocator; + (void) constPool; + (void) idx; + return nullptr; +} + +bool JBCClassMethod::PreProcess() { + bool success = true; + for (JBCAttr *attr : attrs) { + switch (attr->GetKind()) { + case kAttrCode: + case kAttrLocalVariableTable: + case kAttrLocalVariableTypeTable: + success = success && attr->PreProcess(klass.GetConstPool()); + break; + default: + break; + } + } + return success; +} + +const JBCAttrCode *JBCClassMethod::GetCode() const { + return static_cast(attrMap.GetAttr(jbc::JBCAttrKind::kAttrCode)); +} + +bool JBCClassMethod::IsVirtual() const { + std::string classNameOrg = GetClass().GetClassNameOrin(); + CHECK_FATAL(classNameOrg.length() > 2, "Invalid calss name: %s", classNameOrg.c_str()); // LclassName; + std::string className = classNameOrg.substr(1, classNameOrg.length() - 2); + return (accessFlag & kAccMethodFinal) == 0 && (accessFlag & kAccMethodPrivate) == 0 && + (accessFlag & kAccMethodProtected) == 0 && (accessFlag & kAccMethodStatic) == 0 && + GetName().compare(className) != 0 && GetName().compare("") != 0 && GetName().compare("") != 0; +} + +bool JBCClassMethod::HasCode() const { + uint16 flag = GetAccessFlag(); + return ((flag & kAccMethodAbstract) == 0) && ((flag & kAccMethodNative) == 0); +} + +bool JBCClassMethod::IsNative() const { + uint16 flag = GetAccessFlag(); + return flag & kAccMethodNative; +} + +// ---------- JBCClass ---------- +JBCClass::JBCClass(MapleAllocator &allocatorIn) + : allocator(allocatorIn), + constPool(allocator), + tbInterfaces(allocator.Adapter()), + tbFields(allocator.Adapter()), + tbMethods(allocator.Adapter()), + tbAttrs(allocator.Adapter()), + attrMap(allocator), + filePathName("", allocator.GetMemPool()), + fileName("", allocator.GetMemPool()) { + InitHeader(); +} + +bool JBCClass::ParseFile(BasicIORead &io) { + // begin parsing + bool success = false; + header.magic = io.ReadUInt32(success); + if (header.magic != kMagicJVMClass) { + ERR(kLncErr, "JBCClass::ParseFile() failed: invalid java class file (wrong magic number)."); + return false; + } + header.minorVersion = io.ReadUInt16(success); + header.majorVersion = io.ReadUInt16(success); + if (!ParseFileForConstPool(io)) { + return false; + } + header.accessFlag = io.ReadUInt16(success); + header.thisClass = io.ReadUInt16(success); + header.superClass = io.ReadUInt16(success); + header.interfacesCount = io.ReadUInt16(success); + for (uint16 i = 0; i < header.interfacesCount && success; i++) { + uint16 idx = io.ReadUInt16(success); + tbInterfaces.push_back(idx); + } + if (!success) { + return success; + } + if (!ParseFileForFields(io)) { + return false; + } + if (!ParseFileForMethods(io)) { + return false; + } + if (!ParseFileForAttrs(io)) { + return false; + } + return true; +} + +bool JBCClass::ParseFileForConstPool(BasicIORead &io) { + bool success = false; + bool wide = false; + const uint8 kIdxOff = 1; + const uint8 kIdxOffWide = 2; + header.constPoolCount = io.ReadUInt16(success); + for (uint16_t i = 1; i < header.constPoolCount && success; i += (wide ? kIdxOffWide : kIdxOff)) { + JBCConst *objConst = JBCConst::InConst(allocator, io); + if (objConst != nullptr) { + (void)constPool.InsertConst(*objConst); + wide = objConst->IsWide(); + if (wide) { + constPool.InsertConstDummyForWide(); + } + } else { + ERR(kLncErr, "JBCClass::ParseFile() failed: invalid const @idx=%d", i); + success = false; + } + } + return success; +} + +bool JBCClass::ParseFileForFields(BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + header.fieldsCount = io.ReadUInt16(success); + for (uint16 i = 0; i < header.fieldsCount && success; i++) { + JBCClassField *field = mp->New(allocator, *this); + success = field->ParseFile(allocator, io, constPool); + tbFields.push_back(field); + } + return success; +} + +bool JBCClass::ParseFileForMethods(BasicIORead &io) { + bool success = false; + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + header.methodsCount = io.ReadUInt16(success); + for (uint16 i = 0; i < header.methodsCount && success; i++) { + JBCClassMethod *method = mp->New(allocator, *this); + success = method->ParseFile(allocator, io, constPool); + tbMethods.push_back(method); + } + return success; +} + +bool JBCClass::ParseFileForAttrs(BasicIORead &io) { + bool success = false; + header.attrsCount = io.ReadUInt16(success); + for (uint16 i = 0; i < header.attrsCount && success; i++) { + JBCAttr *attr = JBCAttr::InAttr(allocator, io, constPool); + if (attr == nullptr) { + return false; + } + tbAttrs.push_back(attr); + attrMap.RegisterAttr(*attr); + } + return success; +} + +bool JBCClass::PreProcess() { + bool success = true; + success = success && constPool.PreProcess(header.majorVersion); + success = success && constPool.PrepareFEStructElemInfo(GetClassNameOrin()); + for (JBCClassMethod *method : tbMethods) { + success = success && method->PreProcess(); + } + for (JBCAttr *attr : tbAttrs) { + if (attr->GetKind() == JBCAttrKind::kAttrSourceFile) { + success = success && attr->PreProcess(constPool); + } + } + return success; +} + +GStrIdx JBCClass::GetClassNameIdxOrin() const { + const JBCConstClass *constClass = + static_cast(constPool.GetConstByIdxWithTag(header.thisClass, kConstClass)); + if (constClass != nullptr) { + return constClass->GetClassNameIdxOrin(); + } else { + return GStrIdx(0); + } +} + +GStrIdx JBCClass::GetClassNameIdxMpl() const { + const JBCConstClass *constClass = + static_cast(constPool.GetConstByIdxWithTag(header.thisClass, kConstClass)); + if (constClass != nullptr) { + return constClass->GetClassNameIdxMpl(); + } else { + return GStrIdx(0); + } +} + +std::string JBCClass::GetClassNameOrin() const { + GStrIdx idx = GetClassNameIdxOrin(); + if (idx.GetIdx() == 0) { + return ""; + } else { + return GlobalTables::GetStrTable().GetStringFromStrIdx(idx); + } +} + +std::string JBCClass::GetClassNameMpl() const { + GStrIdx idx = GetClassNameIdxMpl(); + if (idx.GetIdx() == 0) { + return ""; + } else { + return GlobalTables::GetStrTable().GetStringFromStrIdx(idx); + } +} + +std::string JBCClass::GetSourceFileName() const { + return ""; +} + +std::string JBCClass::GetSuperClassName() const { + if (header.superClass == kJavaClassNoIndex) { + return ""; + } else { + return constPool.GetNameByClassInfoIdx(header.superClass); + } +} + +std::vector JBCClass::GetInterfaceNames() const { + std::vector results; + for (uint16 idx : tbInterfaces) { + if (idx != kJavaClassNoIndex) { + std::string name = constPool.GetNameByClassInfoIdx(idx); + results.push_back(name); + } + } + return results; +} + +JBCClass *JBCClass::InClass(MapleAllocator &allocator, BasicIORead &io) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp, "mempool is nullptr"); + JBCClass *klass = mp->New(allocator); + if (klass->ParseFile(io) == false) { + return nullptr; + } + if (klass->PreProcess() == false) { + return nullptr; + } + // update fileName + const JBCAttr *attrRaw = klass->attrMap.GetAttr(JBCAttrKind::kAttrSourceFile); + if (attrRaw == nullptr) { + klass->fileName = "unknown"; + } else { + const JBCAttrSourceFile *attrSourceFile = static_cast(attrRaw); + const JBCConstUTF8 *constName = attrSourceFile->GetConstFileName(); + klass->fileName = constName->GetString(); + } + return klass; +} + +void JBCClass::InitHeader() { + header.magic = 0; + header.minorVersion = 0; + header.majorVersion = 0; + header.constPoolCount = 0; + header.accessFlag = 0; + header.thisClass = 0; + header.superClass = 0; + header.interfacesCount = 0; + header.fieldsCount = 0; + header.methodsCount = 0; + header.attrsCount = 0; +} +} // namespace jbc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_class2fe_helper.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_class2fe_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2f1e3ca1571026ca1b144ff0e9b424f2faf04b44 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_class2fe_helper.cpp @@ -0,0 +1,382 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_class2fe_helper.h" +#include "fe_configs.h" +#include "fe_options.h" +#include "fe_macros.h" +#include "fe_manager.h" +#include "fe_utils_java.h" + +namespace maple { +// ---------- JBCClass2FEHelper ---------- +JBCClass2FEHelper::JBCClass2FEHelper(MapleAllocator &allocator, const jbc::JBCClass &klassIn) + : FEInputStructHelper(allocator), + klass(klassIn), + isStaticFieldProguard(false) { + srcLang = kSrcLangJava; +} + +// interface implements +std::string JBCClass2FEHelper::GetStructNameOrinImpl() const { + return klass.GetClassNameOrin(); +} + +std::string JBCClass2FEHelper::GetStructNameMplImpl() const { + return klass.GetClassNameMpl(); +} + +std::list JBCClass2FEHelper::GetSuperClassNamesImpl() const { + std::string superName = klass.GetSuperClassName(); + return std::list({ superName }); +} + +std::vector JBCClass2FEHelper::GetInterfaceNamesImpl() const { + return klass.GetInterfaceNames(); +} + +std::string JBCClass2FEHelper::GetSourceFileNameImpl() const { + return klass.GetSourceFileName(); +} + +MIRStructType *JBCClass2FEHelper::CreateMIRStructTypeImpl(bool &error) const { + std::string classNameOrin = klass.GetClassNameOrin(); + std::string classNameMpl = klass.GetClassNameMpl(); + if (classNameMpl.empty()) { + error = true; + ERR(kLncErr, "class name is empty"); + return nullptr; + } + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "CreateMIRStrucType for %s", classNameOrin.c_str()); + bool isCreate = false; + MIRStructType *type = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType(classNameMpl, klass.IsInterface(), + FETypeFlag::kSrcInput, isCreate); + error = false; + // fill global type name table + GStrIdx typeNameIdx = type->GetNameStrIdx(); + TyIdx prevTyIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(typeNameIdx); + if (prevTyIdx == TyIdx(0)) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(typeNameIdx, type->GetTypeIndex()); + } + return isCreate ? type : nullptr; +} + +TypeAttrs JBCClass2FEHelper::GetStructAttributeFromInputImpl() const { + const uint8 bitOfUInt16 = 16; + TypeAttrs attrs; + uint16 klassAccessFlag = klass.GetAccessFlag(); + for (uint8 bit = 0; bit < bitOfUInt16; bit++) { + uint16 flag = static_cast(klassAccessFlag & (1u << bit)); + switch (flag) { + case jbc::kAccClassPublic: + attrs.SetAttr(ATTR_public); + break; + case jbc::kAccClassFinal: + attrs.SetAttr(ATTR_final); + break; + case jbc::kAccClassSuper: + case jbc::kAccClassInterface: + break; + case jbc::kAccClassAbstract: + attrs.SetAttr(ATTR_abstract); + break; + case jbc::kAccClassSynthetic: + attrs.SetAttr(ATTR_synthetic); + break; + case jbc::kAccClassAnnotation: + attrs.SetAttr(ATTR_annotation); + break; + case jbc::kAccClassEnum: + attrs.SetAttr(ATTR_enum); + break; + default: + break; + } + } + return attrs; +} + +uint64 JBCClass2FEHelper::GetRawAccessFlagsImpl() const { + return uint64{ klass.GetAccessFlag() }; +} + +GStrIdx JBCClass2FEHelper::GetIRSrcFileSigIdxImpl() const { + // Not implemented, just return a invalid value + return GStrIdx(0); +} + +bool JBCClass2FEHelper::IsMultiDefImpl() const { + // Not implemented, alway return false + return false; +} + +void JBCClass2FEHelper::InitFieldHelpersImpl() { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mem pool is nullptr"); + for (jbc::JBCClassField *field : klass.GetFields()) { + ASSERT(field != nullptr, "field is nullptr"); + JBCClassField2FEHelper *fieldHelper = mp->New(allocator, *field); + fieldHelpers.push_back(fieldHelper); + } +} + +void JBCClass2FEHelper::InitMethodHelpersImpl() { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mem pool is nullptr"); + for (jbc::JBCClassMethod *method : klass.GetMethods()) { + ASSERT(method != nullptr, "method is nullptr"); + JBCClassMethod2FEHelper *methodHelper = mp->New(allocator, *method); + methodHelpers.push_back(methodHelper); + } +} + +std::string JBCClass2FEHelper::GetSrcFileNameImpl() const { + return klass.GetFileName(); +} + +// ---------- JBCClassField2FEHelper ---------- +bool JBCClassField2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { + (void) allocator; + CHECK_FATAL(false, "should not run here"); + return false; +} + +bool JBCClassField2FEHelper::ProcessDeclWithContainerImpl(MapleAllocator &allocator) { + std::string klassName = field.GetClassName(); + std::string fieldName = field.GetName(); + std::string typeName = field.GetDescription(); + if (fieldName.empty()) { + ERR(kLncErr, "invalid name_index(%d) for field in class %s", field.GetNameIdx(), klassName.c_str()); + return false; + } + if (typeName.empty()) { + ERR(kLncErr, "invalid descriptor_index(%d) for field in class %s", field.GetDescriptionIdx(), klassName.c_str()); + return false; + } + FEOptions::ModeJavaStaticFieldName modeStaticField = FEOptions::GetInstance().GetModeJavaStaticFieldName(); + bool withType = (modeStaticField == FEOptions::kAllType) || + (modeStaticField == FEOptions::kSmart && field.IsStatic()); + std::string name = field.IsStatic() ? (klassName + "|") : ""; + name += fieldName; + name += withType ? ("|" + typeName) : ""; + GStrIdx idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name)); + StructElemNameIdx *structElemNameIdx = allocator.GetMemPool()->New(klassName, fieldName, typeName); + FEStructElemInfo *elemInfo = FEManager::GetTypeManager().RegisterStructFieldInfo( + *structElemNameIdx, kSrcLangJava, field.IsStatic()); + elemInfo->SetDefined(); + elemInfo->SetFromDex(); + FieldAttrs attrs = AccessFlag2Attribute(field.GetAccessFlag()); + std::string typeNameMpl = namemangler::EncodeName(typeName); + MIRType *fieldType = FEManager::GetTypeManager().GetOrCreateTypeFromName(typeNameMpl, FETypeFlag::kSrcUnknown, true); + ASSERT(fieldType != nullptr, "nullptr check for fieldType"); + mirFieldPair.first = idx; + mirFieldPair.second.first = fieldType->GetTypeIndex(); + mirFieldPair.second.second = attrs; + return true; +} + +FieldAttrs JBCClassField2FEHelper::AccessFlag2Attribute(uint16 accessFlag) { + const uint32 bitOfUInt16 = 16; + FieldAttrs attrs; + for (uint32 bit = 0; bit < bitOfUInt16; bit++) { + uint16 flag = static_cast(accessFlag & (1u << bit)); + switch (flag) { + case jbc::JBCClassFieldAccessFlag::kAccFieldPublic: + attrs.SetAttr(FLDATTR_public); + break; + case jbc::JBCClassFieldAccessFlag::kAccFieldPrivate: + attrs.SetAttr(FLDATTR_private); + break; + case jbc::JBCClassFieldAccessFlag::kAccFieldProtected: + attrs.SetAttr(FLDATTR_protected); + break; + case jbc::JBCClassFieldAccessFlag::kAccFieldStatic: + attrs.SetAttr(FLDATTR_static); + break; + case jbc::JBCClassFieldAccessFlag::kAccFieldFinal: + attrs.SetAttr(FLDATTR_final); + break; + case jbc::JBCClassFieldAccessFlag::kAccFieldVolatile: + attrs.SetAttr(FLDATTR_volatile); + break; + case jbc::JBCClassFieldAccessFlag::kAccFieldTransient: + attrs.SetAttr(FLDATTR_transient); + break; + case jbc::JBCClassFieldAccessFlag::kAccFieldSynthetic: + attrs.SetAttr(FLDATTR_synthetic); + break; + case jbc::JBCClassFieldAccessFlag::kAccFieldEnum: + attrs.SetAttr(FLDATTR_enum); + break; + default: + break; + } + } + return attrs; +} + +// ---------- JBCClassMethod2FEHelper ---------- +JBCClassMethod2FEHelper::JBCClassMethod2FEHelper(MapleAllocator &allocator, const jbc::JBCClassMethod &methodIn) + : FEInputMethodHelper(allocator), + method(methodIn) { + srcLang = kSrcLangJava; +} + +bool JBCClassMethod2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { + StructElemNameIdx *structElemNameIdx = allocator.GetMemPool()->New( + method.GetClassName(), method.GetName(), method.GetDescription()); + FEStructElemInfo *elemInfo = FEManager::GetTypeManager().RegisterStructMethodInfo( + *structElemNameIdx, kSrcLangJava, IsStatic()); + elemInfo->SetDefined(); + elemInfo->SetFromDex(); + return FEInputMethodHelper::ProcessDeclImpl(allocator); +} + +void JBCClassMethod2FEHelper::SolveReturnAndArgTypesImpl(MapleAllocator &allocator) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + std::string klassName = method.GetClassName(); + std::string methodName = GetMethodName(false); + if (HasThis()) { + FEIRTypeDefault *type = mp->New(); + type->LoadFromJavaTypeName(klassName, false); + argTypes.push_back(type); + } + std::vector returnAndArgTypeNames = FEUtilJava::SolveMethodSignature(methodName); + bool first = true; + for (const std::string &typeName : returnAndArgTypeNames) { + FEIRTypeDefault *type = mp->New(); + type->LoadFromJavaTypeName(typeName, false); + if (first) { + retType = type; + first = false; + } else { + argTypes.push_back(type); + } + } +} + +std::string JBCClassMethod2FEHelper::GetMethodNameImpl(bool inMpl, bool full) const { + const jbc::JBCConstPool &constPool = method.GetConstPool(); + std::string klassName = method.GetClassName(); + std::string methodName = method.GetName(constPool); + if (!full) { + return inMpl ? namemangler::EncodeName(methodName) : methodName; + } + std::string descName = method.GetDescription(constPool); + std::string fullName = klassName + "|" + methodName + "|" + descName; + return inMpl ? namemangler::EncodeName(fullName) : fullName; +} + +FuncAttrs JBCClassMethod2FEHelper::GetAttrsImpl() const { + FuncAttrs attrs; + const uint32 bitOfUInt16 = 16; + uint16 accessFlag = method.GetAccessFlag(); + for (uint32 bit = 0; bit < bitOfUInt16; bit++) { + uint16 flag = static_cast(accessFlag & (1u << bit)); + switch (flag) { + case jbc::JBCClassMethodAccessFlag::kAccMethodPublic: + attrs.SetAttr(FUNCATTR_public); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodPrivate: + attrs.SetAttr(FUNCATTR_private); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodProtected: + attrs.SetAttr(FUNCATTR_protected); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodStatic: + attrs.SetAttr(FUNCATTR_static); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodFinal: + attrs.SetAttr(FUNCATTR_final); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodSynchronized: + attrs.SetAttr(FUNCATTR_synchronized); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodBridge: + attrs.SetAttr(FUNCATTR_bridge); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodVarargs: + attrs.SetAttr(FUNCATTR_varargs); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodNative: + attrs.SetAttr(FUNCATTR_native); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodAbstract: + attrs.SetAttr(FUNCATTR_abstract); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodStrict: + attrs.SetAttr(FUNCATTR_strict); + break; + case jbc::JBCClassMethodAccessFlag::kAccMethodSynthetic: + attrs.SetAttr(FUNCATTR_synthetic); + break; + default: + break; + } + attrs.SetAttr(IsVirtual() ? FUNCATTR_virtual : static_cast(0)); + } + return attrs; +} + +bool JBCClassMethod2FEHelper::IsStaticImpl() const { + uint16 accessFlag = method.GetAccessFlag(); + if ((accessFlag & jbc::JBCClassMethodAccessFlag::kAccMethodStatic) != 0) { + return true; + } + if (IsClinit()) { + return true; + } + return false; +} + +bool JBCClassMethod2FEHelper::IsVargImpl() const { + return false; +} + +bool JBCClassMethod2FEHelper::HasThisImpl() const { + return !IsStatic(); +} + +bool JBCClassMethod2FEHelper::IsClinit() const { + const jbc::JBCConstPool &constPool = method.GetConstPool(); + std::string methodName = method.GetName(constPool); + return methodName.compare("") == 0; +} + +bool JBCClassMethod2FEHelper::IsInit() const { + const jbc::JBCConstPool &constPool = method.GetConstPool(); + std::string methodName = method.GetName(constPool); + return methodName.compare("") == 0; +} + +bool JBCClassMethod2FEHelper::IsVirtualImpl() const { + return method.IsVirtual(); +} + +bool JBCClassMethod2FEHelper::IsNativeImpl() const { + return method.IsNative(); +} + +MIRType *JBCClassMethod2FEHelper::GetTypeForThisImpl() const { + FEIRTypeDefault type; + std::string klassName = method.GetClassName(); + type.LoadFromJavaTypeName(klassName, false); + return type.GenerateMIRType(true); +} + +bool JBCClassMethod2FEHelper::HasCodeImpl() const { + return method.HasCode(); +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_class_const.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_class_const.cpp new file mode 100644 index 0000000000000000000000000000000000000000..029e965631c6b145af5b46a4c0cd1bd44f72152e --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_class_const.cpp @@ -0,0 +1,519 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_class_const.h" +#include +#include "mpl_logging.h" +#include "jbc_class.h" +#include "jbc_util.h" +#include "fe_manager.h" + +namespace maple { +namespace jbc { +// ---------- JBCConstTagName ---------- +std::map JBCConstTagName::tagNameMap = JBCConstTagName::InitTagNameMap(); +std::map JBCConstTagName::InitTagNameMap() { + std::map ret; + ret[kConstUnknown] = "ConstUnknown"; +#undef JBC_CONST +#define JBC_CONST(tag, tagName, className) \ + ret[tag] = tagName; +#include "jbc_class_const.def" +#undef JBC_CONST + return ret; +} + +std::string JBCConstTagName::GetTagName(JBCConstTag tag) { + std::map::const_iterator it = tagNameMap.find(tag); + if (it != tagNameMap.end()) { + return it->second; + } + std::stringstream ss; + ss << "unknown tag (" << static_cast(tag) << ")"; + return ss.str(); +} + +// ---------- JBCConst ---------- +SimpleXMLElem *JBCConst::GenXMLElemImpl(MapleAllocator &allocIn, uint32 id) const { + (void) allocIn; + (void) id; + return nullptr; +} + +JBCConst *JBCConst::InConst(MapleAllocator &alloc, BasicIORead &io) { + MemPool *mp = alloc.GetMemPool(); + ASSERT(mp, "mp is nullptr"); + JBCConst *constObj = nullptr; + uint8 t = io.ReadUInt8(); + switch (t) { +#undef JBC_CONST +#define JBC_CONST(tag, tagName, ConstClassType) \ + case tag: \ + constObj = mp->New(alloc, static_cast(t)); \ + if (constObj->ParseFile(io) == false) { \ + return nullptr; \ + } \ + break; +#include "jbc_class_const.def" +#undef JBC_CONST + default: + break; + } + return constObj; +} + +std::string JBCConst::InternalNameToFullName(const std::string &name) { + // ref: https://docs.oracle.com/javase/specs/jvms/se8/html/jvms-4.html#jvms-4.4.1 + if (name[0] == '[') { + return name; + } else { + return "L" + name + ";"; + } +} + +std::string JBCConst::FullNameToInternalName(const std::string &name) { + if (name[0] == '[') { + return name; + } else { + return name.substr(1, name.length() - 2); // 1 : start pos, name.length() - 2 : substr length + } +} + +// ---------- JBCConstUTF8 ---------- +JBCConstUTF8::JBCConstUTF8(MapleAllocator &alloc, JBCConstTag t) + : JBCConst(alloc, t), length(0), strIdx(0), str("", alloc.GetMemPool()) {} + +JBCConstUTF8::JBCConstUTF8(MapleAllocator &alloc, JBCConstTag t, const std::string &argStr) + : JBCConst(alloc, t), str(argStr, alloc.GetMemPool()) { + CHECK_FATAL(t == kConstUTF8, "invalid tag"); + size_t rawLength = str.length(); + CHECK_FATAL(rawLength < UINT16_MAX, "input string is too long"); + length = static_cast(rawLength); + strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(MapleStringToStd(str)); +} + +bool JBCConstUTF8::ParseFileImpl(BasicIORead &io) { + bool success = false; + length = io.ReadUInt16(success); + str = io.ReadString(length, success); + strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(MapleStringToStd(str)); + return success; +} + +bool JBCConstUTF8::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *JBCConstUTF8::GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const { + (void) alloc; + (void) id; + return nullptr; +} + +// ---------- JBCConst4Byte ---------- +JBCConst4Byte::JBCConst4Byte(MapleAllocator &alloc, JBCConstTag t) : JBCConst(alloc, t) { + value.raw = 0; +} + +JBCConst4Byte::JBCConst4Byte(MapleAllocator &alloc, JBCConstTag t, int32 arg) : JBCConst(alloc, t) { + CHECK_FATAL(t == kConstInteger, "invalid tag"); + value.ivalue = arg; +} + +JBCConst4Byte::JBCConst4Byte(MapleAllocator &alloc, JBCConstTag t, float arg) : JBCConst(alloc, t) { + CHECK_FATAL(t == kConstFloat, "invalid tag"); + value.fvalue = arg; +} + +bool JBCConst4Byte::ParseFileImpl(BasicIORead &io) { + bool success = false; + value.raw = io.ReadUInt32(success); + return success; +} + +bool JBCConst4Byte::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *JBCConst4Byte::GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const { + (void) alloc; + (void) id; + return nullptr; +} + +// ---------- JBCConst8Byte ---------- +JBCConst8Byte::JBCConst8Byte(MapleAllocator &alloc, JBCConstTag t) : JBCConst(alloc, t) { + value.raw = 0; +} + +JBCConst8Byte::JBCConst8Byte(MapleAllocator &alloc, JBCConstTag t, int64 arg) : JBCConst(alloc, t) { + CHECK_FATAL(t == kConstLong, "invalid tag"); + value.lvalue = arg; +} + +JBCConst8Byte::JBCConst8Byte(MapleAllocator &alloc, JBCConstTag t, double arg) : JBCConst(alloc, t) { + CHECK_FATAL(t == kConstDouble, "invalid tag"); + value.dvalue = arg; +} + +bool JBCConst8Byte::ParseFileImpl(BasicIORead &io) { + bool success = false; + value.raw = io.ReadUInt64(success); + return success; +} + +bool JBCConst8Byte::PreProcessImpl(const JBCConstPool &constPool) { + (void) constPool; + return true; +} + +SimpleXMLElem *JBCConst8Byte::GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const { + (void) alloc; + (void) id; + return nullptr; +} + +// ---------- JBCConstClass ---------- +JBCConstClass::JBCConstClass(MapleAllocator &alloc, JBCConstTag t) + : JBCConst(alloc, t), + constUTF8(nullptr), + strIdxOrin(0), + strIdxMpl(0), + nameOrin("", alloc.GetMemPool()), + nameMpl("", alloc.GetMemPool()) { + rawData.nameIdx = 0; + feType = alloc.GetMemPool()->New(PTY_ref); +} + +JBCConstClass::JBCConstClass(MapleAllocator &alloc, JBCConstTag t, JBCConstPoolIdx argNameIdx) + : JBCConst(alloc, t), + constUTF8(nullptr), + strIdxOrin(0), + strIdxMpl(0), + nameOrin("", alloc.GetMemPool()), + nameMpl("", alloc.GetMemPool()) { + CHECK_FATAL(t == kConstClass, "invalid tag"); + rawData.nameIdx = argNameIdx; + feType = alloc.GetMemPool()->New(PTY_ref); +} + +JBCConstClass::~JBCConstClass() { + constUTF8 = nullptr; + feType = nullptr; +} + +bool JBCConstClass::ParseFileImpl(BasicIORead &io) { + bool success = false; + rawData.nameIdx = io.ReadUInt16(success); + return success; +} + +bool JBCConstClass::PreProcessImpl(const JBCConstPool &constPool) { + constUTF8 = static_cast(constPool.GetConstByIdxWithTag(rawData.nameIdx, kConstUTF8)); + if (constUTF8 == nullptr) { + return false; + } + const std::string &classNameInternal = constUTF8->GetString(); + nameOrin = JBCUtil::ClassInternalNameToFullName(classNameInternal); + strIdxOrin = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(MapleStringToStd(nameOrin)); + nameMpl = namemangler::EncodeName(nameOrin.c_str()); + strIdxMpl = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(MapleStringToStd(nameMpl)); + static_cast(feType)->LoadFromJavaTypeName(MapleStringToStd(nameMpl), true); + return true; +} + +SimpleXMLElem *JBCConstClass::GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const { + (void) alloc; + (void) id; + return nullptr; +} + +// ---------- JBCConstString ---------- +JBCConstString::JBCConstString(MapleAllocator &alloc, JBCConstTag t) + : JBCConst(alloc, t), strIdx(0), str("", alloc.GetMemPool()) { + rawData.stringIdx = 0; +} + +JBCConstString::JBCConstString(MapleAllocator &alloc, JBCConstTag t, JBCConstPoolIdx argStringIdx) + : JBCConst(alloc, t), strIdx(0), str("", alloc.GetMemPool()) { + CHECK_FATAL(t == kConstString, "invalid tag"); + rawData.stringIdx = argStringIdx; +} + +bool JBCConstString::ParseFileImpl(BasicIORead &io) { + bool success = false; + rawData.stringIdx = io.ReadUInt16(success); + return success; +} + +bool JBCConstString::PreProcessImpl(const JBCConstPool &constPool) { + const JBCConstUTF8 *constUTF8 = + static_cast(constPool.GetConstByIdxWithTag(rawData.stringIdx, kConstUTF8)); + if (constUTF8 == nullptr) { + return false; + } + strIdx = constUTF8->GetStrIdx(); + str = constUTF8->GetString(); + return true; +} + +SimpleXMLElem *JBCConstString::GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const { + (void) alloc; + (void) id; + return nullptr; +} + +// ---------- JBCConstRef ---------- +JBCConstRef::JBCConstRef(MapleAllocator &alloc, JBCConstTag t) + : JBCConst(alloc, t), constClass(nullptr), constNameAndType(nullptr), feStructElemInfo(nullptr) { + rawData.classIdx = 0; + rawData.nameAndTypeIdx = 0; +} + +JBCConstRef::JBCConstRef(MapleAllocator &alloc, JBCConstTag t, JBCConstPoolIdx argClassIdx, + JBCConstPoolIdx argClassNameAndTypeIdx) + : JBCConst(alloc, t), constClass(nullptr), constNameAndType(nullptr), feStructElemInfo(nullptr) { + rawData.classIdx = argClassIdx; + rawData.nameAndTypeIdx = argClassNameAndTypeIdx; +} + +JBCConstRef::~JBCConstRef() { + constClass = nullptr; + constNameAndType = nullptr; + feStructElemInfo = nullptr; +} + +bool JBCConstRef::ParseFileImpl(BasicIORead &io) { + bool success = false; + rawData.classIdx = io.ReadUInt16(success); + rawData.nameAndTypeIdx = io.ReadUInt16(success); + return success; +} + +bool JBCConstRef::PreProcessImpl(const JBCConstPool &constPool) { + constClass = static_cast(constPool.GetConstByIdxWithTag(rawData.classIdx, kConstClass)); + constNameAndType = static_cast(constPool.GetConstByIdxWithTag(rawData.nameAndTypeIdx, + kConstNameAndType)); + return (constClass != nullptr) && (constNameAndType != nullptr); +} + +bool JBCConstRef::PrepareFEStructElemInfo() { + CHECK_NULL_FATAL(constClass); + CHECK_NULL_FATAL(constNameAndType); + const std::string &className = constClass->GetClassNameOrin(); + const std::string &elemName = constNameAndType->GetName(); + const std::string &descName = constNameAndType->GetDesc(); + StructElemNameIdx *structElemNameIdx = alloc.GetMemPool()->New(className, elemName, descName); + if (tag == kConstFieldRef) { + feStructElemInfo = FEManager::GetTypeManager().RegisterStructFieldInfo( + *structElemNameIdx, kSrcLangJava, false); + } else { + feStructElemInfo = FEManager::GetTypeManager().RegisterStructMethodInfo( + *structElemNameIdx, kSrcLangJava, false); + } + return feStructElemInfo != nullptr; +} + +SimpleXMLElem *JBCConstRef::GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const { + (void) alloc; + (void) id; + return nullptr; +} + +const std::string JBCConstRef::GetName() const { + CHECK_FATAL(constNameAndType != nullptr, "invalid const index"); + return constNameAndType->GetName(); +} + +const std::string JBCConstRef::GetDesc() const { + CHECK_FATAL(constNameAndType != nullptr, "invalid const index"); + return constNameAndType->GetDesc(); +} + +// ---------- JBCConstNameAndType ---------- +JBCConstNameAndType::JBCConstNameAndType(MapleAllocator &alloc, JBCConstTag t) + : JBCConst(alloc, t), constName(nullptr), constDesc(nullptr) { + rawData.nameIdx = 0; + rawData.descIdx = 0; +} + +JBCConstNameAndType::JBCConstNameAndType(MapleAllocator &alloc, JBCConstTag t, JBCConstPoolIdx argNameIdx, + JBCConstPoolIdx argDescIdx) + : JBCConst(alloc, t), constName(nullptr), constDesc(nullptr) { + rawData.nameIdx = argNameIdx; + rawData.descIdx = argDescIdx; +} + +JBCConstNameAndType::~JBCConstNameAndType() { + constName = nullptr; + constDesc = nullptr; +} + +bool JBCConstNameAndType::ParseFileImpl(BasicIORead &io) { + bool success = false; + rawData.nameIdx = io.ReadUInt16(success); + rawData.descIdx = io.ReadUInt16(success); + return success; +} + +bool JBCConstNameAndType::PreProcessImpl(const JBCConstPool &constPool) { + constName = static_cast(constPool.GetConstByIdxWithTag(rawData.nameIdx, kConstUTF8)); + constDesc = static_cast(constPool.GetConstByIdxWithTag(rawData.descIdx, kConstUTF8)); + return (constName != nullptr) && (constDesc != nullptr); +} + +SimpleXMLElem *JBCConstNameAndType::GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const { + (void) alloc; + (void) id; + return nullptr; +} + +// ---------- JBCConstMethodHandleInfo ---------- +JBCConstMethodHandleInfo::JBCConstMethodHandleInfo(MapleAllocator &alloc, JBCConstTag t) + : JBCConst(alloc, t), constRef(nullptr) { + rawData.refKind = 0; + rawData.refIdx = 0; +} + +JBCConstMethodHandleInfo::~JBCConstMethodHandleInfo() { + constRef = nullptr; +} + +bool JBCConstMethodHandleInfo::ParseFileImpl(BasicIORead &io) { + bool success = false; + rawData.refKind = io.ReadUInt8(success); + rawData.refIdx = io.ReadUInt16(success); + return success; +} + +bool JBCConstMethodHandleInfo::PreProcessImpl(const JBCConstPool &constPool) { + constRef = nullptr; + switch (rawData.refKind) { + case kRefGetField: + case kRefGetStatic: + case kRefPutField: + case kRefPutStatic: + constRef = static_cast(constPool.GetConstByIdxWithTag(rawData.refIdx, kConstFieldRef)); + break; + case jbc::kRefInvokeVirtual: + case jbc::kRefNewInvokeSpecial: + constRef = static_cast(constPool.GetConstByIdxWithTag(rawData.refIdx, kConstMethodRef)); + break; + case jbc::kRefInvokeStatic: + case jbc::kRefInvokeSpecial: + if (constPool.GetMajorVersion() < 52) { // 52 : class file version number + constRef = static_cast(constPool.GetConstByIdxWithTag(rawData.refIdx, kConstMethodRef)); + } else { + constRef = static_cast(constPool.GetConstByIdx(rawData.refIdx)); + CHECK_NULL_FATAL(constRef); + if (constRef->GetTag() != kConstMethodRef && constRef->GetTag() != kConstInterfaceMethodRef) { + ERR(kLncErr, "Unexpected tag (%s) for const MethodHandle info. Expected %s or %s", + JBCConstTagName::GetTagName(constRef->GetTag()).c_str(), + JBCConstTagName::GetTagName(kConstMethodRef).c_str(), + JBCConstTagName::GetTagName(kConstInterfaceMethodRef).c_str()); + return false; + } + } + break; + case jbc::kRefInvokeInterface: + constRef = static_cast(constPool.GetConstByIdxWithTag(rawData.refIdx, + kConstInterfaceMethodRef)); + break; + default: + CHECK_FATAL(false, "Unsupported ref kind (%d)", rawData.refKind); + break; + } + return constRef != nullptr; +} + +SimpleXMLElem *JBCConstMethodHandleInfo::GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const { + (void) alloc; + (void) id; + return nullptr; +} + +// ---------- JBCConstMethodType ---------- +JBCConstMethodType::JBCConstMethodType(MapleAllocator &alloc, JBCConstTag t) + : JBCConst(alloc, t), constDesc(nullptr) { + rawData.descIdx = 0; +} + +JBCConstMethodType::~JBCConstMethodType() { + constDesc = nullptr; +} + +bool JBCConstMethodType::ParseFileImpl(BasicIORead &io) { + bool success = false; + rawData.descIdx = io.ReadUInt16(success); + return success; +} + +bool JBCConstMethodType::PreProcessImpl(const JBCConstPool &constPool) { + constDesc = static_cast(constPool.GetConstByIdxWithTag(rawData.descIdx, kConstUTF8)); + return constDesc != nullptr; +} + +SimpleXMLElem *JBCConstMethodType::GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const { + (void) alloc; + (void) id; + return nullptr; +} + +// ---------- JBCConstInvokeDynamic ---------- +JBCConstInvokeDynamic::JBCConstInvokeDynamic(MapleAllocator &alloc, JBCConstTag t) + : JBCConst(alloc, t), constNameAndType(nullptr), feStructElemInfo(nullptr) { + rawData.bsmAttrIdx = 0; + rawData.nameAndTypeIdx = 0; +} + +JBCConstInvokeDynamic::~JBCConstInvokeDynamic() { + constNameAndType = nullptr; + feStructElemInfo = nullptr; +} + +bool JBCConstInvokeDynamic::PrepareFEStructElemInfo(const std::string &ownerClassName) { + CHECK_NULL_FATAL(constNameAndType); + const std::string &className = ownerClassName + "$DynamicCall$"; + const std::string &elemName = constNameAndType->GetName(); + const std::string &descName = constNameAndType->GetDesc(); + StructElemNameIdx *structElemNameIdx = alloc.GetMemPool()->New(className, elemName, descName); + feStructElemInfo = FEManager::GetTypeManager().RegisterStructMethodInfo( + *structElemNameIdx, kSrcLangJava, false); + static_cast(feStructElemInfo)->SetJavaDyamicCall(); + return feStructElemInfo != nullptr; +} + +bool JBCConstInvokeDynamic::ParseFileImpl(BasicIORead &io) { + bool success = false; + rawData.bsmAttrIdx = io.ReadUInt16(success); + rawData.nameAndTypeIdx = io.ReadUInt16(success); + return success; +} + +bool JBCConstInvokeDynamic::PreProcessImpl(const JBCConstPool &constPool) { + constNameAndType = + static_cast(constPool.GetConstByIdxWithTag(rawData.nameAndTypeIdx, + kConstNameAndType)); + return constNameAndType != nullptr; +} + +SimpleXMLElem *JBCConstInvokeDynamic::GenXMLElemImpl(MapleAllocator &alloc, uint32 id) const { + (void) alloc; + (void) id; + return nullptr; +} +} // namespace jbc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_class_const_pool.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_class_const_pool.cpp new file mode 100644 index 0000000000000000000000000000000000000000..49703d94fbe329f927371864e79e78665792f72e --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_class_const_pool.cpp @@ -0,0 +1,241 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_class_const_pool.h" +#include + +namespace maple { +namespace jbc { +JBCConstPool::JBCConstPool(MapleAllocator &alloc) + : allocator(alloc), + pool(allocator.Adapter()), + majorVersion(0) { + pool.push_back(nullptr); +} + +uint16 JBCConstPool::InsertConst(JBCConst &objConst) { + size_t ans = pool.size(); + pool.push_back(&objConst); + CHECK_FATAL(ans < 0xFFFF, "const pool is full"); + return static_cast(ans); +} + +void JBCConstPool::InsertConstDummyForWide() { + size_t ans = pool.size(); + pool.push_back(nullptr); + CHECK_FATAL(ans < 0xFFFF, "const pool is full"); +} + +const JBCConst *JBCConstPool::GetConstByIdx(uint16 idx, bool safed) const { + if (idx == 0 || idx >= pool.size()) { + std::stringstream ss; + ss << "invalid const pool idx " << static_cast(idx) << "."; + CHECK_FATAL(!safed, "%s", ss.str().c_str()); + ERR(kLncErr, "%s", ss.str().c_str()); + return nullptr; + } + return pool[idx]; +} + +const JBCConst *JBCConstPool::GetConstByIdxWithTag(uint16 idx, JBCConstTag tag, bool safed) const { + const JBCConst *obj = GetConstByIdx(idx, safed); + if (obj != nullptr && obj->GetTag() != tag) { + std::stringstream ss; + ss << "invalid const pool idx " << static_cast(idx) << " (tag=" << + JBCConstTagName::GetTagName(obj->GetTag()).c_str() << ", expect " << + JBCConstTagName::GetTagName(tag).c_str() << ")."; + CHECK_FATAL(!safed, "%s Exit.", ss.str().c_str()); + ERR(kLncErr, "%s", ss.str().c_str()); + return nullptr; + } + return obj; +} + +const JBCConst *JBCConstPool::GetConstValueByIdx(uint16 idx, bool safed) const { + const JBCConst *obj = GetConstByIdx(idx, safed); + if (obj != nullptr && obj->IsValue() == false) { + std::stringstream ss; + ss << "invalid const pool idx " << static_cast(idx) << " (not value const)."; + CHECK_FATAL(!safed, "%s Exit.", ss.str().c_str()); + ERR(kLncErr, "%s", ss.str().c_str()); + return nullptr; + } + return obj; +} + +const JBCConst *JBCConstPool::GetConstValue4ByteByIdx(uint16 idx, bool safed) const { + const JBCConst *obj = GetConstByIdx(idx, safed); + if (obj != nullptr && obj->IsValue4Byte() == false) { + std::stringstream ss; + ss << "invalid const pool idx " << static_cast(idx) << " (not 4-byte value const)."; + CHECK_FATAL(!safed, "%s Exit.", ss.str().c_str()); + ERR(kLncErr, "%s", ss.str().c_str()); + return nullptr; + } + return obj; +} + +const JBCConst *JBCConstPool::GetConstValue8ByteByIdx(uint16 idx, bool safed) const { + const JBCConst *obj = GetConstByIdx(idx, safed); + if (obj != nullptr && obj->IsValue8Byte() == false) { + std::stringstream ss; + ss << "invalid const pool idx " << static_cast(idx) << " (not 8-byte value const)."; + CHECK_FATAL(!safed, "%s Exit.", ss.str().c_str()); + ERR(kLncErr, "%s", ss.str().c_str()); + return nullptr; + } + return obj; +} + +std::string JBCConstPool::GetNameByClassInfoIdx(uint16 idx, bool safed) const { + const JBCConstClass *constClass = static_cast(GetConstByIdxWithTag(idx, kConstClass, safed)); + if (constClass == nullptr) { + return ""; + } else { + return constClass->GetClassNameOrin(); + } +} + +bool JBCConstPool::PreProcess(uint16 argMajorVersion) { + majorVersion = argMajorVersion; + for (JBCConst *c : pool) { + if (c != nullptr && !c->PreProcess(*this)) { + return false; + } + } + return true; +} + +bool JBCConstPool::PrepareFEStructElemInfo(const std::string &ownerClassName) { + bool success = true; + for (JBCConst *c : pool) { + if (c == nullptr) { + continue; + } + JBCConstTag tag = c->GetTag(); + if (tag == JBCConstTag::kConstFieldRef || tag == JBCConstTag::kConstMethodRef || + tag == JBCConstTag::kConstInterfaceMethodRef) { + JBCConstRef *constRef = static_cast(c); + success = success && constRef->PrepareFEStructElemInfo(); + } + if (tag == JBCConstTag::kConstInvokeDynamic) { + JBCConstInvokeDynamic *constRef = static_cast(c); + success = success && constRef->PrepareFEStructElemInfo(ownerClassName); + } + } + return success; +} + +JBCConstUTF8 *JBCConstPool::NewConstUTF8(uint16 &idx, const std::string &str) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + JBCConstUTF8 *constItem = mp->New(allocator, kConstUTF8, str); + idx = InsertConst(*constItem); + return constItem; +} + +JBCConst4Byte *JBCConstPool::NewConst4Byte(uint16 &idx, int32 value) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + JBCConst4Byte *constItem = mp->New(allocator, kConstInteger, value); + idx = InsertConst(*constItem); + return constItem; +} + +JBCConst4Byte *JBCConstPool::NewConst4Byte(uint16 &idx, float value) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + JBCConst4Byte *constItem = mp->New(allocator, kConstFloat, value); + idx = InsertConst(*constItem); + return constItem; +} + +JBCConst8Byte *JBCConstPool::NewConst8Byte(uint16 &idx, int64 value) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + JBCConst8Byte *constItem = mp->New(allocator, kConstLong, value); + idx = InsertConst(*constItem); + return constItem; +} + +JBCConst8Byte *JBCConstPool::NewConst8Byte(uint16 &idx, double value) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + JBCConst8Byte *constItem = mp->New(allocator, kConstDouble, value); + idx = InsertConst(*constItem); + return constItem; +} + +JBCConstClass *JBCConstPool::NewConstClass(uint16 &idx, const std::string &className) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + std::string classNameInternal = JBCConst::FullNameToInternalName(className); + JBCConstUTF8 *constUTF8 = mp->New(allocator, kConstUTF8, classNameInternal); + uint16 idxTmp = InsertConst(*constUTF8); + CHECK_FATAL(idxTmp != UINT16_MAX, "constpool insert failed"); + JBCConstClass *constClass = mp->New(allocator, kConstClass, idxTmp); + CHECK_FATAL(constClass->PreProcess(*this), "New ConstClass failed"); + idx = InsertConst(*constClass); + return constClass; +} + +JBCConstString *JBCConstPool::NewConstString(uint16 &idx, const std::string &str) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + JBCConstUTF8 *constUTF8 = mp->New(allocator, kConstUTF8, str); + uint16 idxTmp = InsertConst(*constUTF8); + CHECK_FATAL(idxTmp != UINT16_MAX, "constpool insert failed"); + JBCConstString *constString = mp->New(allocator, kConstString, idxTmp); + CHECK_FATAL(constString->PreProcess(*this), "New ConstClass failed"); + idx = InsertConst(*constString); + return constString; +} + +JBCConstRef *JBCConstPool::NewConstRef(uint16 &idx, JBCConstTag tag, const std::string &className, + const std::string &name, const std::string &desc) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + uint16 idxConstClass; + uint16 idxConstNameAndType; + JBCConstClass *constClass = NewConstClass(idxConstClass, className); + CHECK_FATAL(idxConstClass != UINT16_MAX, "constpool insert failed"); + CHECK_NULL_FATAL(constClass); + JBCConstNameAndType *constNameAndType = NewConstNameAndType(idxConstNameAndType, name, desc); + CHECK_FATAL(idxConstNameAndType != UINT16_MAX, "constpool insert failed"); + CHECK_NULL_FATAL(constNameAndType); + JBCConstRef *constRef = mp->New(allocator, tag, idxConstClass, idxConstNameAndType); + CHECK_FATAL(constRef->PreProcess(*this), "New ConstClass failed"); + idx = InsertConst(*constRef); + return constRef; +} + +JBCConstNameAndType *JBCConstPool::NewConstNameAndType(uint16 &idx, const std::string &name, const std::string &desc) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + uint16 idxConstName; + uint16 idxConstDesc; + JBCConstUTF8 *constName = NewConstUTF8(idxConstName, name); + CHECK_FATAL(idxConstName != UINT16_MAX, "constpool insert failed"); + CHECK_NULL_FATAL(constName); + JBCConstUTF8 *constDesc = NewConstUTF8(idxConstDesc, desc); + CHECK_FATAL(idxConstDesc != UINT16_MAX, "constpool insert failed"); + CHECK_NULL_FATAL(constDesc); + JBCConstNameAndType *constNameAndType = mp->New(allocator, kConstNameAndType, idxConstName, + idxConstDesc); + CHECK_FATAL(constNameAndType->PreProcess(*this), "New ConstClass failed"); + idx = InsertConst(*constNameAndType); + return constNameAndType; +} +} // namespace jbc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_compiler_component.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_compiler_component.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bc5be8e8fb76aaa26242151328dcd5606a61a4d8 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_compiler_component.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_compiler_component.h" +#include "jbc_class2fe_helper.h" +#include "fe_timer.h" +#include "fe_manager.h" +#include "jbc_function.h" + +namespace maple { +JBCCompilerComponent::JBCCompilerComponent(MIRModule &module) + : HIR2MPLCompilerComponent(module, kSrcLangJava), + mp(FEUtils::NewMempool("MemPool for JBCCompilerComponent", false /* isLocalPool */)), + allocator(mp), + jbcInput(module) {} + +JBCCompilerComponent::~JBCCompilerComponent() { + mp = nullptr; +} + +void JBCCompilerComponent::ReleaseMemPoolImpl() { + FEUtils::DeleteMempoolPtr(mp); +} + +bool JBCCompilerComponent::ParseInputImpl() { + FETimer timer; + bool success = true; + timer.StartAndDump("JBCCompilerComponent::ParseInput()"); + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process JBCCompilerComponent::ParseInput() ====="); + const std::list &inputClassNames = FEOptions::GetInstance().GetInputClassFiles(); + const std::list &inputJarNames = FEOptions::GetInstance().GetInputJarFiles(); + success = success && jbcInput.ReadClassFiles(inputClassNames); + success = success && jbcInput.ReadJarFiles(inputJarNames); + CHECK_FATAL(success, "JBCCompilerComponent::ParseInput failed. Exit."); + const jbc::JBCClass *klass = jbcInput.GetFirstClass(); + while (klass != nullptr) { + FEInputStructHelper *structHelper = allocator.GetMemPool()->New(allocator, *klass); + structHelpers.push_back(structHelper); + klass = jbcInput.GetNextClass(); + } + timer.StopAndDumpTimeMS("JBCCompilerComponent::ParseInput()"); + return success; +} + +bool JBCCompilerComponent::LoadOnDemandTypeImpl() { + return false; +} + +void JBCCompilerComponent::ProcessPragmaImpl() {} + +std::unique_ptr JBCCompilerComponent::CreatFEFunctionImpl(FEInputMethodHelper *methodHelper) { + JBCClassMethod2FEHelper *jbcMethodHelper = static_cast(methodHelper); + GStrIdx methodNameIdx = methodHelper->GetMethodNameIdx(); + bool isStatic = methodHelper->IsStatic(); + MIRFunction *mirFunc = FEManager::GetTypeManager().GetMIRFunction(methodNameIdx, isStatic); + CHECK_NULL_FATAL(mirFunc); + std::unique_ptr feFunction = std::make_unique(*jbcMethodHelper, *mirFunc, phaseResultTotal); + module.AddFunction(mirFunc); + feFunction->Init(); + return feFunction; +} + +std::string JBCCompilerComponent::GetComponentNameImpl() const { + return "JBCCompilerComponent"; +} + +bool JBCCompilerComponent::ParallelableImpl() const { + return true; +} + +void JBCCompilerComponent::DumpPhaseTimeTotalImpl() const { + INFO(kLncInfo, "[PhaseTime] JBCCompilerComponent"); + HIR2MPLCompilerComponent::DumpPhaseTimeTotalImpl(); +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_function.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_function.cpp new file mode 100644 index 0000000000000000000000000000000000000000..806b6a8654280671122aabf054e1a8d802cbc5de --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_function.cpp @@ -0,0 +1,743 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_function.h" +#include "unistd.h" +#include "fe_macros.h" +#include "fe_algorithm.h" +#include "feir_var_reg.h" +#include "jbc_attr.h" +#include "fe_timer.h" + +namespace maple { +JBCFunction::JBCFunction(const JBCClassMethod2FEHelper &argMethodHelper, MIRFunction &mirFunc, + const std::unique_ptr &argPhaseResultTotal) + : FEFunction(mirFunc, argPhaseResultTotal), + methodHelper(argMethodHelper), + method(methodHelper.GetMethod()), + context(method.GetConstPool(), stack2feHelper, method.GetCode()) { + srcLang = MIRSrcLang::kSrcLangJava; +} + +JBCFunction::~JBCFunction() { + pesudoBBCatchPred = nullptr; +} + +void JBCFunction::InitImpl() { + FEFunction::InitImpl(); + feirCFG = std::make_unique(feirStmtHead, feirStmtHead); + feirCFG->Init(); + pesudoBBCatchPred = RegisterFEIRBB(std::make_unique()); +} + +void JBCFunction::PreProcessImpl() { +} + +bool JBCFunction::ProcessImpl() { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "JBCFunction::Process() for %s", method.GetFullName().c_str()); + bool success = true; + if (!methodHelper.HasCode()) { + return success; // Skip abstract and native method, not emit it to mpl but mplts. + } + success = success && GenerateGeneralStmt("create general stmt"); + success = success && LabelGeneralStmts("label general stmt"); + success = success && LabelFEIRBBs("label general bb"); + success = success && LabelLabelIdx("label label idx"); + success = success && CheckJVMStack("check jvm stack"); + success = success && GenerateArgVarList("gen arg var list"); + success = success && ProcessFunctionArgs("process func args"); + if (FEOptions::GetInstance().IsEmitJBCLocalVarInfo()) { + success = success && EmitLocalVarInfo("emit localvar info"); + } + success = success && EmitToFEIRStmt("emit to feir"); + success = success && ProcessFEIRFunction(); + if (!success) { + error = true; + ERR(kLncErr, "JBCFunction::Process() failed for %s", method.GetFullName().c_str()); + } + return success; +} + +void JBCFunction::FinishImpl() { + bool dumpFunc = true; + dumpFunc = (FEOptions::GetInstance().IsDumpJBCErrorOnly() && error) || + FEOptions::GetInstance().IsDumpJBCAll() || + FEOptions::GetInstance().IsDumpJBCFuncName(method.GetFullName()); + if (dumpFunc) { + (void)BuildGeneralStmtBBMap("gen stmt_bb_map"); + } + if (FEOptions::GetInstance().IsDumpJBCStmt() && dumpFunc) { + DumpGeneralStmts(); + } + if (FEOptions::GetInstance().IsDumpFEIRBB() && dumpFunc) { + INFO(kLncInfo, "not implemented"); + } + if (FEOptions::GetInstance().IsDumpFEIRCFGGraph(method.GetFullName())) { + (void)DumpFEIRCFGGraph("dump cfg graph"); + } + (void)UpdateFormal("finish/update formal"); + // Not gen func body for abstract method + if (methodHelper.HasCode() || methodHelper.IsNative()) { + (void)EmitToMIR("finish/emit to mir"); + } + bool recordTime = FEOptions::GetInstance().IsDumpPhaseTime() || FEOptions::GetInstance().IsDumpPhaseTimeDetail(); + if (phaseResultTotal != nullptr && recordTime) { + phaseResultTotal->Combine(phaseResult); + } + if (FEOptions::GetInstance().IsDumpPhaseTimeDetail()) { + INFO(kLncInfo, "[PhaseTime] function: %s", method.GetFullName().c_str()); + phaseResult.Dump(); + } +} + +bool JBCFunction::PreProcessTypeNameIdx() { + return false; +} + +bool JBCFunction::GenerateGeneralStmt(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + const jbc::JBCAttrCode *code = method.GetCode(); + if (phaseResult.IsSuccess() && code != nullptr) { + if (!PreBuildJsrInfo(*code)) { + return phaseResult.Finish(false); + } + if (!BuildStmtFromInstruction(*code)) { + return phaseResult.Finish(false); + } + BuildStmtForCatch(*code); + BuildStmtForTry(*code); + BuildStmtForLOC(*code); + if (FEOptions::GetInstance().IsDumpInstComment()) { + BuildStmtForInstComment(*code); + } + ArrangeStmts(); + } + return phaseResult.Finish(true); +} + +void JBCFunction::GenerateGeneralStmtFailCallBack() { +} + +void JBCFunction::GenerateGeneralDebugInfo() { +} + +bool JBCFunction::VerifyGeneral() { + return false; +} + +void JBCFunction::VerifyGeneralFailCallBack() { +} + +bool JBCFunction::GenerateAliasVars(const std::string &phaseName) { + (void) phaseName; + return true; +} + +bool JBCFunction::GenerateArgVarList(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + InitStack2FEHelper(); + uint32 slotIdx = 0; + for (FEIRType *type : methodHelper.GetArgTypes()) { + PrimType pt = type->GetPrimType(); + std::unique_ptr var = std::make_unique(stack2feHelper.GetRegNumForSlot(slotIdx), + type->Clone()); + argVarList.push_back(std::move(var)); + if (type->IsScalar() && (pt == PTY_i64 || pt == PTY_f64)) { + slotIdx += JBCStack2FEHelper::kRegNumOffWide; + } else { + slotIdx += JBCStack2FEHelper::kRegNumOff; + } + } + return phaseResult.Finish(); +} + +bool JBCFunction::ProcessFunctionArgs(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + for (const UniqueFEIRVar &argVar : argVarList) { + ASSERT(argVar != nullptr, "nullptr check for argVar"); + const UniqueFEIRType &type = argVar->GetType(); + ASSERT(type != nullptr, "nullptr check for type"); + if (!type->IsScalar()) { + continue; + } + PrimType pty = type->GetPrimType(); + bool useZExt = (pty == PTY_u1 || pty == PTY_u16); + bool useSExt = (pty == PTY_i8 || pty == PTY_i16); + if (useZExt || useSExt) { + UniqueFEIRVar dstVar = argVar->Clone(); + dstVar->GetType()->SetPrimType(PTY_i32); + UniqueFEIRExpr exprExt = useZExt ? FEIRBuilder::CreateExprZExt(argVar->Clone()) : + FEIRBuilder::CreateExprSExt(argVar->Clone()); + UniqueFEIRStmt stmtExt = FEIRBuilder::CreateStmtDAssign(std::move(dstVar), std::move(exprExt)); + FEIRStmt *ptrFEIRStmt = RegisterFEIRStmt(std::move(stmtExt)); + feirStmtTail->InsertBefore(ptrFEIRStmt); + } + } + return phaseResult.Finish(); +} + +bool JBCFunction::EmitLocalVarInfo(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + const jbc::JBCAttrCode *code = method.GetCode(); + if (code == nullptr) { + return phaseResult.Finish(); + } + FEIRStmt *insPos = feirStmtHead; + std::list listInfo = code->GetLocalVarInfoByString(); + for (const std::string &infoStr : listInfo) { + UniqueFEIRStmt stmt = std::make_unique(infoStr); + FEIRStmt *ptrFEIRStmt = RegisterFEIRStmt(std::move(stmt)); + insPos->InsertAfter(ptrFEIRStmt); + insPos = ptrFEIRStmt; + } + return phaseResult.Finish(); +} + +bool JBCFunction::EmitToFEIRStmt(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + const FELinkListNode *bbNode = genBBHead->GetNext(); + while (bbNode != nullptr && bbNode != genBBTail) { + const JBCBB *bb = static_cast(bbNode); + EmitToFEIRStmt(*bb); + bbNode = bbNode->GetNext(); + } + return phaseResult.Finish(); +} + +void JBCFunction::EmitToFEIRStmt(const JBCBB &bb) { + bool success = false; + std::list feirStmts = stack2feHelper.LoadSwapStack(bb.GetMinStackIn(), success); + if (success == false) { + ERR(kLncErr, "LoadSwapStack Error for %s", method.GetFullName().c_str()); + return; + } + AppendFEIRStmts(feirStmts); + const FELinkListNode *stmtNode = bb.GetStmtHead(); + while (stmtNode != nullptr && success) { + const JBCStmt *stmt = static_cast(stmtNode); + feirStmts = stmt->EmitToFEIR(context, success); + AppendFEIRStmts(feirStmts); + if (stmtNode == bb.GetStmtTail()) { + break; + } + stmtNode = stmtNode->GetNext(); + } +} + +std::string JBCFunction::GetGeneralFuncName() const { + return method.GetFullName(); +} + +bool JBCFunction::CheckJVMStack(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + bool success = phaseResult.IsSuccess(); + FELinkListNode *nodeBB = genBBHead->GetNext(); + if (nodeBB == genBBTail) { + // empty function + return phaseResult.Finish(success); + } + // initialize function head + JBCBB *jbcBB = static_cast(nodeBB); + success = success && jbcBB->InitForFuncHeader(); + // initialize catch + for (FEIRBB *bbCatch : pesudoBBCatchPred->GetSuccBBs()) { + JBCBB *jbcBBCatch = static_cast(bbCatch); + success = success && jbcBBCatch->InitForCatch(); + } + // process + if (!success) { + return phaseResult.Finish(false); + } + std::map> correlation; + while (nodeBB != genBBTail) { + jbcBB = static_cast(nodeBB); + for (FEIRBB *bb : jbcBB->GetPredBBs()) { + if (bb != genBBHead && bb != pesudoBBCatchPred) { + CHECK_FATAL(correlation[jbcBB].insert(static_cast(bb)).second, "correlation map insert failed"); + } + } + nodeBB = nodeBB->GetNext(); + } + CorrelativeMerge stackUpdater(correlation, &JBCBB::UpdateStackByPredBB, &JBCBB::UpdateStackByPredBBEnd); + stackUpdater.ProcessAll(); + success = !stackUpdater.GetError(); + success = success && CheckJVMStackResult(); + return phaseResult.Finish(success); +} + +bool JBCFunction::CheckJVMStackResult() { + FELinkListNode *nodeBB = genBBHead->GetNext(); + while (nodeBB != nullptr && nodeBB != genBBTail) { + JBCBB *jbcBB = static_cast(nodeBB); + if (!jbcBB->CheckStack()) { + return false; + } + nodeBB = nodeBB->GetNext(); + } + return true; +} + +bool JBCFunction::PreBuildJsrInfo(const jbc::JBCAttrCode &code) { + const MapleMap &instMap = code.GetInstMap(); + for (const std::pair &it : instMap) { + uint32 pc = it.first; + jbc::JBCOp *op = it.second; + ASSERT_NOT_NULL(op); + jbc::JBCOpcode opcode = op->GetOpcode(); + jbc::JBCOpcodeKind kind = op->GetOpcodeKind(); + if (kind != jbc::kOpKindJsr) { + continue; + } + uint32 width = (opcode == jbc::kOpJsr ? 3 : 5); // instruction width for jsr is 3, and for jsr_w is 5 + uint32 nextPC = pc + width; + jbc::JBCOpJsr *opJsr = static_cast(op); + auto itTarget = instMap.find(opJsr->GetTarget()); + if (itTarget == instMap.end()) { + ERR(kLncErr, "invalid target %u for jsr @pc=%u", opJsr->GetTarget(), pc); + return false; + } + jbc::JBCOp *opTarget = itTarget->second; + if (opTarget->GetOpcodeKind() != jbc::kOpKindStore) { + ERR(kLncErr, "invalid target (without astore) for jsr @pc=%u", pc); + return false; + } + jbc::JBCOpSlotOpr *opSlotOpr = static_cast(opTarget); + uint16 slotIdx = opSlotOpr->GetSlotIdx(); + opSlotOpr->SetAddressOpr(); + opJsr->SetSlotIdx(slotIdx); + int32 jsrID = context.RegisterJsrSlotRetAddr(slotIdx, nextPC); + opJsr->SetJsrID(jsrID); + } + return true; +} + +bool JBCFunction::BuildStmtFromInstruction(const jbc::JBCAttrCode &code) { + FEIRStmt *stmt = nullptr; + const MapleMap &instMap = code.GetInstMap(); + for (const std::pair &it : instMap) { + uint32 pc = it.first; + const jbc::JBCOp *op = it.second; + ASSERT(op != nullptr, "null ptr check"); + switch (op->GetOpcodeKind()) { + case jbc::kOpKindBranch: + stmt = BuildStmtFromInstructionForBranch(*op); + break; + case jbc::kOpKindGoto: + stmt = BuildStmtFromInstructionForGoto(*op); + break; + case jbc::kOpKindSwitch: + stmt = BuildStmtFromInstructionForSwitch(*op); + break; + case jbc::kOpKindJsr: + stmt = BuildStmtFromInstructionForJsr(*op); + break; + case jbc::kOpKindRet: + stmt = BuildStmtFromInstructionForRet(*op); + if (stmt == nullptr) { + return false; + } + break; + default: + stmt = RegisterFEIRStmt(std::make_unique(*op)); + break; + } + stmt->SetThrowable(op->IsThrowable()); + static_cast(stmt)->SetPC(pc); + genStmtTail->InsertBefore(stmt); + context.UpdateMapPCStmtInst(pc, stmt); + } + context.UpdateMapPCStmtInst(code.GetCodeLength(), genStmtTail); + return true; +} + +FEIRStmt *JBCFunction::BuildStmtFromInstructionForBranch(const jbc::JBCOp &op) { + const std::unique_ptr &uniStmt = + RegisterGeneralStmtUniqueReturn(std::make_unique(op)); + FEIRStmt *stmt = uniStmt.get(); + const jbc::JBCOpBranch &opBranch = static_cast(op); + FEIRStmt *target = BuildAndUpdateLabel(opBranch.GetTarget(), uniStmt); + static_cast(stmt)->AddExtraSucc(*target); + return stmt; +} + +FEIRStmt *JBCFunction::BuildStmtFromInstructionForGoto(const jbc::JBCOp &op) { + const std::unique_ptr &uniStmt = + RegisterGeneralStmtUniqueReturn(std::make_unique(op)); + FEIRStmt *stmt = uniStmt.get(); + stmt->SetFallThru(false); + const jbc::JBCOpGoto &opGoto = static_cast(op); + FEIRStmt *target = BuildAndUpdateLabel(opGoto.GetTarget(), uniStmt); + static_cast(stmt)->AddExtraSucc(*target); + return stmt; +} + +FEIRStmt *JBCFunction::BuildStmtFromInstructionForSwitch(const jbc::JBCOp &op) { + const std::unique_ptr &uniStmt = + RegisterGeneralStmtUniqueReturn(std::make_unique(op)); + FEIRStmt *stmt = uniStmt.get(); + stmt->SetFallThru(false); + const jbc::JBCOpSwitch &opSwitch = static_cast(op); + for (const std::pair &targetInfo : opSwitch.GetTargets()) { + FEIRStmt *target = BuildAndUpdateLabel(targetInfo.second, uniStmt); + static_cast(stmt)->AddExtraSucc(*target); + } + FEIRStmt *target = BuildAndUpdateLabel(opSwitch.GetDefaultTarget(), uniStmt); + static_cast(stmt)->AddExtraSucc(*target); + return stmt; +} + +FEIRStmt *JBCFunction::BuildStmtFromInstructionForJsr(const jbc::JBCOp &op) { + const std::unique_ptr &uniStmt = + RegisterGeneralStmtUniqueReturn(std::make_unique(op)); + FEIRStmt *stmt = uniStmt.get(); + stmt->SetFallThru(false); + const jbc::JBCOpJsr &opJsr = static_cast(op); + FEIRStmt *target = BuildAndUpdateLabel(opJsr.GetTarget(), uniStmt); + static_cast(stmt)->AddExtraSucc(*target); + return stmt; +} + +FEIRStmt *JBCFunction::BuildStmtFromInstructionForRet(const jbc::JBCOp &op) { + const std::map> &mapJsrSlotRetAddr = context.GetMapJsrSlotRetAddr(); + const std::unique_ptr &uniStmt = + RegisterGeneralStmtUniqueReturn(std::make_unique(op)); + FEIRStmt *stmt = uniStmt.get(); + stmt->SetFallThru(false); + const jbc::JBCOpRet &opRet = static_cast(op); + auto itJsrInfo = mapJsrSlotRetAddr.find(opRet.GetIndex()); + if (itJsrInfo == mapJsrSlotRetAddr.end()) { + ERR(kLncWarn, "invalid slotIdx for ret instruction"); + return nullptr; + } + for (auto itTarget : itJsrInfo->second) { + uint32 pc = itTarget.second; + FEIRStmt *target = BuildAndUpdateLabel(pc, uniStmt); + static_cast(stmt)->AddExtraSucc(*target); + } + return stmt; +} + +void JBCFunction::BuildStmtForCatch(const jbc::JBCAttrCode &code) { + const std::map &mapPCCatchStmt = context.GetMapPCCatchStmt(); + const MapleVector &exceptionInfo = code.GetExceptionInfos(); + for (const jbc::attr::ExceptionTableItem *item : exceptionInfo) { + uint16 handlerPC = item->GetHandlerPC(); + JBCStmtPesudoCatch *stmtCatch = nullptr; + auto it = mapPCCatchStmt.find(handlerPC); + if (it == mapPCCatchStmt.end()) { + stmtCatch = static_cast(RegisterFEIRStmt(std::make_unique())); + context.UpdateMapPCCatchStmt(handlerPC, stmtCatch); + } else { + stmtCatch = static_cast(it->second); + } + const jbc::JBCConstClass *catchType = item->GetCatchType(); + if (catchType != nullptr) { + stmtCatch->AddCatchTypeName(catchType->GetClassNameIdxMpl()); + } else { + stmtCatch->AddCatchTypeName(GStrIdx(0)); + } + } +} + +void JBCFunction::BuildStmtForTry(const jbc::JBCAttrCode &code) { + std::map, std::vector> rawInfo; + std::map outMapStartEnd; + std::map> outMapStartCatch; + const std::map &mapPCCatchStmt = context.GetMapPCCatchStmt(); + const MapleVector &exceptionInfo = code.GetExceptionInfos(); + for (const jbc::attr::ExceptionTableItem *item : exceptionInfo) { + uint32 start = item->GetStartPC(); + uint32 end = item->GetEndPC(); + uint32 handlerPC = item->GetHandlerPC(); + rawInfo[std::make_pair(start, end)].push_back(handlerPC); + } + BuildTryInfo(rawInfo, outMapStartEnd, outMapStartCatch); + for (const std::pair &startEnd : outMapStartEnd) { + // Try + JBCStmtPesudoTry *stmtTry = + static_cast(RegisterFEIRStmt(std::make_unique())); + auto it = outMapStartCatch.find(startEnd.first); + CHECK_FATAL(it != outMapStartCatch.end(), "catch info not exist"); + for (uint32 handlerPC : it->second) { + auto itHandler = mapPCCatchStmt.find(handlerPC); + CHECK_FATAL(itHandler != mapPCCatchStmt.end(), "catch stmt not exist"); + stmtTry->AddCatchStmt(*(itHandler->second)); + } + context.UpdateMapPCTryStmt(startEnd.first, stmtTry); + // EndTry + JBCStmtPesudoEndTry *stmtEndTry = + static_cast(RegisterFEIRStmt(std::make_unique())); + context.UpdateMapPCEndTryStmt(startEnd.second, stmtEndTry); + } +} + +void JBCFunction::BuildTryInfo(const std::map, std::vector> &rawInfo, + std::map &outMapStartEnd, + std::map> &outMapStartCatch) { + const uint8 flagStart = 0x1; + const uint8 flagEnd = 0x2; + const uint8 flagCatch = 0x4; + std::map keyPoints; + std::map> mapStartEnds; + std::map> mapEndStarts; + for (auto info : rawInfo) { + keyPoints[info.first.first] |= flagStart; + keyPoints[info.first.second] |= flagEnd; + for (uint32 handlerPC : info.second) { + keyPoints[handlerPC] |= flagCatch; + } + CHECK_FATAL(mapStartEnds[info.first.first].insert(info.first.second).second, "mapStartEnds insert failed"); + CHECK_FATAL(mapEndStarts[info.first.second].insert(info.first.first).second, "mapEndStarts insert failed"); + } + std::deque> blockQueue; + const uint32 posInvalid = 0xFFFFFFFF; + uint32 startPos = posInvalid; + for (auto keyPoint : keyPoints) { + uint32 curPos = keyPoint.first; + uint8 flag = keyPoint.second; + // set try block + if (startPos != posInvalid) { + outMapStartEnd[startPos] = curPos; + BuildTryInfoCatch(rawInfo, blockQueue, startPos, outMapStartCatch); + startPos = curPos; + } + // process end + if ((flag & flagEnd) != 0) { + blockQueue.push_back(std::make_pair(posInvalid, posInvalid)); + std::pair top; + do { + top = blockQueue.front(); + blockQueue.pop_front(); + bool deleted = false; + if (top.second == curPos) { + ASSERT(mapEndStarts.find(curPos) != mapEndStarts.end(), "try end not found"); + if (mapEndStarts[curPos].find(top.first) != mapEndStarts[curPos].end()) { + deleted = true; + } + } + if (!deleted && top.first != posInvalid) { + blockQueue.push_back(top); + } + } while (top.first != posInvalid); + if (blockQueue.size() == 0) { + startPos = posInvalid; + } + } + // process start + if ((flag & flagStart) != 0) { + startPos = curPos; + for (uint32 endPos : mapStartEnds[curPos]) { + blockQueue.push_back(std::make_pair(curPos, endPos)); + } + } + } +} + +void JBCFunction::BuildTryInfoCatch(const std::map, std::vector> &rawInfo, + const std::deque> &blockQueue, + uint32 startPos, + std::map> &outMapStartCatch) { + std::set catchSet; + for (const std::pair &block : blockQueue) { + auto it = rawInfo.find(block); + CHECK_FATAL(it != rawInfo.end(), "block not found"); + for (uint32 handlerPC : it->second) { + if (catchSet.find(handlerPC) == catchSet.end()) { + outMapStartCatch[startPos].push_back(handlerPC); + CHECK_FATAL(catchSet.insert(handlerPC).second, "catchSet insert failed"); + } + } + } +} + +void JBCFunction::BuildStmtForLOC(const jbc::JBCAttrCode &code) { + const jbc::JBCAttr *attrRaw = code.GetAttr(jbc::JBCAttrKind::kAttrLineNumberTable); + if (attrRaw == nullptr) { + return; + } + const jbc::JBCAttrLineNumberTable *attrLineNumTab = static_cast(attrRaw); + uint32 srcFileIdx = method.GetClass().GetSrcFileInfoIdx(); + const MapleVector &lineNums = attrLineNumTab->GetLineNums(); + for (const jbc::attr::LineNumberTableItem *item : lineNums) { + JBCStmtPesudoLOC *stmtLOC = + static_cast(RegisterFEIRStmt(std::make_unique())); + stmtLOC->SetSrcFileIdx(srcFileIdx); + stmtLOC->SetLineNumber(item->GetLineNumber()); + context.UpdateMapPCStmtLOC(item->GetStartPC(), stmtLOC); + } +} + +void JBCFunction::BuildStmtForInstComment(const jbc::JBCAttrCode &code) { + const jbc::JBCAttr *attrRaw = code.GetAttr(jbc::JBCAttrKind::kAttrLineNumberTable); + std::map mapPCLineNum; + if (attrRaw != nullptr) { + const jbc::JBCAttrLineNumberTable *attrLineNumTab = static_cast(attrRaw); + const MapleVector &lineNums = attrLineNumTab->GetLineNums(); + for (const jbc::attr::LineNumberTableItem *item : lineNums) { + ASSERT_NOT_NULL(item); + mapPCLineNum[item->GetStartPC()] = item->GetLineNumber(); + } + } + uint16 currLineNum = 0xFFFF; // use 0xFFFF as invalid line number + std::stringstream ss; + const jbc::JBCConstPool &constPool = method.GetConstPool(); + const MapleMap &instMap = code.GetInstMap(); + for (const std::pair &it : instMap) { + uint32 pc = it.first; + jbc::JBCOp *op = it.second; + if (mapPCLineNum.find(pc) != mapPCLineNum.end()) { + currLineNum = mapPCLineNum[pc]; + } + ASSERT(pc < 0x10000, "invalid pc"); // pc use 16bit in java bytecode (less than 0x10000) + ss << "LINE " << srcFileName << " : "; + if (currLineNum != 0xFFFF) { // use 0xFFFF as invalid line number + ss << std::dec << currLineNum; + } else { + ss << "unknown"; + } + ss << ", PC : " << std::setfill(' ') << std::setw(5) << pc << "||" << + std::setfill('0') << std::setw(4) << std::hex << pc << " : " << + op->Dump(constPool); + std::unique_ptr stmt = std::make_unique(ss.str()); + JBCStmtPesudoComment *ptrStmt = static_cast(RegisterFEIRStmt(std::move(stmt))); + context.UpdateMapPCCommentStmt(pc, ptrStmt); + ss.str(""); + } +} + +FEIRStmt *JBCFunction::BuildAndUpdateLabel(uint32 dstPC, const std::unique_ptr &srcStmt) { + const std::map &mapPCLabelStmt = context.GetMapPCLabelStmt(); + auto it = mapPCLabelStmt.find(dstPC); + JBCStmtPesudoLabel *stmtLabel = nullptr; + if (it == mapPCLabelStmt.end()) { + stmtLabel = static_cast(RegisterFEIRStmt(std::make_unique())); + context.UpdateMapPCLabelStmt(dstPC, stmtLabel); + } else { + stmtLabel = it->second; + } + ASSERT(stmtLabel != nullptr, "null ptr check"); + stmtLabel->AddExtraPred(*srcStmt); + return stmtLabel; +} + +void JBCFunction::ArrangeStmts() { + context.ArrangeStmts(); +} + +void JBCFunction::InitStack2FEHelper() { + const jbc::JBCAttrCode *code = method.GetCode(); + // args + uint32 nargs = 0; + for (FEIRType *type : methodHelper.GetArgTypes()) { + PrimType pt = type->GetPrimType(); + if (type->IsScalar() && (pt == PTY_i64 || pt == PTY_f64)) { + nargs += JBCStack2FEHelper::kRegNumOffWide; + } else { + nargs += JBCStack2FEHelper::kRegNumOff; + } + } + if (code == nullptr) { + stack2feHelper.SetNArgs(nargs); + stack2feHelper.SetNLocals(0); + stack2feHelper.SetNStacks(0); + stack2feHelper.SetNSwaps(0); + return; + } + stack2feHelper.SetNArgs(nargs); + CHECK_FATAL(nargs <= code->GetMaxLocals(), "invalid jbc method"); + stack2feHelper.SetNLocals(code->GetMaxLocals() - nargs); + stack2feHelper.SetNStacks(code->GetMaxStack()); + stack2feHelper.SetNSwaps(CalculateMaxSwapSize()); + if (!FEOptions::GetInstance().IsDumpInstComment()) { + return; + } + std::stringstream ss; + ss << "StackToReg Info: nStacks=" << stack2feHelper.GetNStacks() << + ", nSwaps=" << stack2feHelper.GetNSwaps() << + ", maxLocals=" << code->GetMaxLocals() << + ", nArgs=" << stack2feHelper.GetNArgs(); + feirStmtTail->InsertBefore(RegisterFEIRStmt(FEIRBuilder::CreateStmtComment(ss.str()))); + feirStmtTail->InsertBefore(RegisterFEIRStmt(FEIRBuilder::CreateStmtComment("==== Reg Map ===="))); + uint32 regStart = 0; + ss.str(""); + ss << " " << regStart << " - " << (regStart + stack2feHelper.GetNStacks() - 1) << ": stacks"; + feirStmtTail->InsertBefore(RegisterFEIRStmt(FEIRBuilder::CreateStmtComment(ss.str()))); + regStart += stack2feHelper.GetNStacks(); + if (stack2feHelper.GetNSwaps() > 0) { + ss.str(""); + ss << " " << regStart << " - " << (regStart + stack2feHelper.GetNSwaps() - 1) << ": swaps"; + feirStmtTail->InsertBefore(RegisterFEIRStmt(FEIRBuilder::CreateStmtComment(ss.str()))); + regStart += stack2feHelper.GetNSwaps(); + } + if (stack2feHelper.GetNLocals() > 0) { + ss.str(""); + ss << " " << regStart << " - " << (regStart + stack2feHelper.GetNLocals() - 1) << ": locals"; + feirStmtTail->InsertBefore(RegisterFEIRStmt(FEIRBuilder::CreateStmtComment(ss.str()))); + regStart += stack2feHelper.GetNLocals(); + } + if (stack2feHelper.GetNArgs() > 0) { + ss.str(""); + ss << " " << regStart << " - " << (regStart + stack2feHelper.GetNArgs() - 1) << ": args"; + feirStmtTail->InsertBefore(RegisterFEIRStmt(FEIRBuilder::CreateStmtComment(ss.str()))); + } + feirStmtTail->InsertBefore(RegisterFEIRStmt(FEIRBuilder::CreateStmtComment("================="))); +} + +uint32 JBCFunction::CalculateMaxSwapSize() const { + const FELinkListNode *nodeBB = genBBHead->GetNext(); + uint32 size = 0; + while (nodeBB != nullptr && nodeBB != genBBTail) { + const JBCBB *bb = static_cast(nodeBB); + uint32 tmp = bb->GetSwapSize(); + size = size > tmp ? size : tmp; + nodeBB = nodeBB->GetNext(); + } + return size; +} + +bool JBCFunction::LabelLabelIdx(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + uint32 idx = 0; + FELinkListNode *nodeStmt = genStmtHead->GetNext(); + while (nodeStmt != nullptr && nodeStmt != genStmtTail) { + JBCStmt *stmt = static_cast(nodeStmt); + if (stmt->GetJBCKind() == JBCStmtKind::kJBCStmtPesudoLabel || + stmt->GetJBCKind() == JBCStmtKind::kJBCStmtPesudoCatch) { + JBCStmtPesudoLabel *stmtLabel = static_cast(stmt); + stmtLabel->SetLabelIdx(idx); + idx++; + } + nodeStmt = nodeStmt->GetNext(); + } + return phaseResult.Finish(); +} + +bool JBCFunction::NeedConvertToInt32(const std::unique_ptr &var) { + ASSERT(var != nullptr, "nullptr check for var"); + const std::unique_ptr &type = var->GetType(); + if (!type->IsScalar()) { + return false; + } + PrimType pty = type->GetPrimType(); + // Z: PTY_u1 + // B: PTY_i8 + // C: PTY_u16 + // S: PTY_i16 + if (pty == PTY_u1 || pty == PTY_u8 || pty == PTY_i8 || pty == PTY_u16 || pty == PTY_i16) { + return true; + } else { + return false; + } +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_function_context.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_function_context.cpp new file mode 100644 index 0000000000000000000000000000000000000000..87ceb84e22b6108d8a08c48a41160354aa980f15 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_function_context.cpp @@ -0,0 +1,83 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_function_context.h" + +namespace maple { +int32 JBCFunctionContext::RegisterJsrSlotRetAddr(uint16 slotIdx, uint32 nextPC) { + auto itInfo = mapJsrSlotRetAddr.find(slotIdx); + int32 jsrID; + if (itInfo == mapJsrSlotRetAddr.end()) { + jsrID = 0; + } else { + size_t size = itInfo->second.size(); + CHECK_FATAL(size < INT32_MAX, "jsr ID out of range"); + jsrID = static_cast(itInfo->second.size()); + } + mapJsrSlotRetAddr[slotIdx][jsrID] = nextPC; + return jsrID; +} + +void JBCFunctionContext::ArrangeStmts() { + /* Type of stmt: inst, label, try, endtry, catch, comment, loc + * endtry + * loc + * catch + * label + * try + * comment + * inst + */ + for (const std::pair &pcInst : mapPCStmtInst) { + uint32 pc = pcInst.first; + FEIRStmt *stmtInst = pcInst.second; + if (mapPCEndTryStmt.find(pc) != mapPCEndTryStmt.end()) { + stmtInst->InsertBefore(mapPCEndTryStmt[pc]); + } + if (mapPCStmtLOC.find(pc) != mapPCStmtLOC.end()) { + stmtInst->InsertBefore(mapPCStmtLOC[pc]); + } + if (mapPCCatchStmt.find(pc) != mapPCCatchStmt.end()) { + stmtInst->InsertBefore(mapPCCatchStmt[pc]); + } + if (mapPCLabelStmt.find(pc) != mapPCLabelStmt.end()) { + stmtInst->InsertBefore(mapPCLabelStmt[pc]); + } + if (mapPCTryStmt.find(pc) != mapPCTryStmt.end()) { + stmtInst->InsertBefore(mapPCTryStmt[pc]); + } + if (mapPCCommentStmt.find(pc) != mapPCCommentStmt.end()) { + stmtInst->InsertBefore(mapPCCommentStmt[pc]); + } + } +} + +const FEIRType *JBCFunctionContext::GetSlotType(uint16 slotIdx, uint32 pc) const { + CHECK_NULL_FATAL(code); + const jbc::JBCAttrLocalVariableInfo &localVarInfo = code->GetLocalVarInfo(); + CHECK_FATAL(pc < UINT16_MAX, "pc out of range"); + uint16 startPC = localVarInfo.GetStart(slotIdx, static_cast(pc)); + const jbc::JavaAttrLocalVariableInfoItem &info = localVarInfo.GetItemByStart(slotIdx, startPC); + return info.feirType; +} + +const FEIRType *JBCFunctionContext::GetSlotType(uint16 slotIdx) const { + CHECK_NULL_FATAL(code); + const jbc::JBCAttrLocalVariableInfo &localVarInfo = code->GetLocalVarInfo(); + CHECK_FATAL(currPC < UINT16_MAX, "pc out of range"); + uint16 startPC = localVarInfo.GetStart(slotIdx, static_cast(currPC)); + const jbc::JavaAttrLocalVariableInfoItem &info = localVarInfo.GetItemByStart(slotIdx, startPC); + return info.feirType; +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_input.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_input.cpp new file mode 100644 index 0000000000000000000000000000000000000000..458dc34eed539fa484eb518575b139620279cc08 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_input.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_input.h" +#include "basic_io.h" +#include "simple_zip.h" +#include "fe_macros.h" +#include "fe_options.h" +#include "hir2mpl_env.h" +#include "fe_manager.h" + +namespace maple { +namespace jbc { +namespace { +const std::string kClassFileSuffix = ".class"; +const uint32 kClassFileSuffixLength = 6; +const std::string kJarMetaInf = "META-INF"; +const uint32 kJarMetaInfLength = 8; +} +JBCInput::JBCInput(MIRModule &moduleIn) + : module(moduleIn), + mp(FEUtils::NewMempool("mempool for JBC Input Helper", false /* isLocalPool */)), + allocator(mp), + klassList(allocator.Adapter()) { + itKlass = klassList.end(); +} + +JBCInput::~JBCInput() { + mp = nullptr; +} + +void JBCInput::ReleaseMemPool() { + FEUtils::DeleteMempoolPtr(mp); +} + +bool JBCInput::ReadClassFile(const std::string &fileName) { + (void)GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fileName); + BasicIOMapFile file(fileName); + BasicIORead io(file, true); + if (file.OpenAndMap() == false) { + ERR(kLncErr, "Unable to open class file %s", fileName.c_str()); + return false; + } + JBCClass *klass = JBCClass::InClass(allocator, io); + if (klass == nullptr) { + ERR(kLncErr, "Unable to parse class file %s", fileName.c_str()); + file.Close(); + return false; + } + klass->SetFilePathName(fileName); + RegisterSrcFileInfo(*klass); + klassList.push_back(klass); + file.Close(); + return true; +} + +bool JBCInput::ReadClassFiles(const std::list &fileNames) { + bool success = true; + for (const std::string &fileName : fileNames) { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "===== Process JBCInput::ReadClassFiles(%s) =====", + fileName.c_str()); + success = ReadClassFile(fileName) ? success : false; + } + return success; +} + +bool JBCInput::ReadJarFile(const std::string &fileName) { + (void)GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fileName); + BasicIOMapFile file(fileName); + bool success = true; + if (file.OpenAndMap() == false) { + ERR(kLncErr, "Unable to open jar file %s", fileName.c_str()); + return false; + } + SimpleZip zipFile(file); + zipFile.ParseFile(); + for (const std::unique_ptr &zipLocalFile : zipFile.GetFiles()) { + std::string zipLocalFileName = zipLocalFile->GetFileName(); + size_t len = zipLocalFileName.length(); + if (len > kClassFileSuffixLength && + zipLocalFileName.substr(len - kClassFileSuffixLength).compare(kClassFileSuffix) == 0) { + if (zipLocalFileName.length() >= kJarMetaInfLength && + zipLocalFileName.substr(0, kJarMetaInfLength).compare(kJarMetaInf) == 0) { + continue; + } + // class file + BasicIOMapFile classFile(zipLocalFileName, zipLocalFile->GetUnCompData(), zipLocalFile->GetUnCompDataSize()); + BasicIORead ioClassFile(classFile, true); + JBCClass *klass = JBCClass::InClass(allocator, ioClassFile); + if (klass == nullptr) { + ERR(kLncErr, "Unable to parse class file %s", zipLocalFileName.c_str()); + success = false; + } else { + klass->SetFilePathName(zipLocalFileName); + RegisterSrcFileInfo(*klass); + klassList.push_back(klass); + } + } + } + file.Close(); + if (!success) { + return false; + } + return true; +} + +bool JBCInput::ReadJarFiles(const std::list &fileNames) { + bool success = true; + for (const std::string &fileName : fileNames) { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "===== Process JBCInput::ReadJarFiles(%s) =====", fileName.c_str()); + success = ReadJarFile(fileName) ? success : false; + } + return success; +} + +const JBCClass *JBCInput::GetFirstClass() { + if (klassList.size() == 0) { + return nullptr; + } + itKlass = klassList.begin(); + return *itKlass; +} + +const JBCClass *JBCInput::GetNextClass() { + if (itKlass == klassList.end()) { + return nullptr; + } + ++itKlass; + if (itKlass == klassList.end()) { + return nullptr; + } + return *itKlass; +} + +void JBCInput::RegisterSrcFileInfo(JBCClass &klass) { + GStrIdx fileNameIdx; + if (FEOptions::GetInstance().IsJBCInfoUsePathName()) { + fileNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(klass.GetFilePathName()); + } else { + fileNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(klass.GetFileName()); + } + uint32 srcFileIdx = HIR2MPLEnv::GetInstance().NewSrcFileIdx(fileNameIdx); + module.PushbackFileInfo(MIRInfoPair(fileNameIdx, srcFileIdx)); + klass.SetSrcFileInfoIdx(srcFileIdx); +} +} // namespace jbc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_opcode.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_opcode.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2a04f49c15e04b77182a112a42f732dafd516277 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_opcode.cpp @@ -0,0 +1,1574 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_opcode.h" +#include "jbc_class_const.h" +#include "jbc_class_const_pool.h" +#include "jbc_util.h" +#include "fe_utils_java.h" +#include "fe_type_manager.h" + +namespace maple { +namespace jbc { +// ---------- JBCOpcodeInfo ---------- +JBCOpcodeInfo::JBCOpcodeInfo() { +#define JBC_OP(mOp, mValue, mKind, mName, mFlag) \ + table[kOp##mOp].kind = kOpKind##mKind; \ + table[kOp##mOp].name = mName; \ + table[kOp##mOp].flags = mFlag; +#include "jbc_opcode.def" +#undef JBC_OP +} + +// ---------- JBCOp ---------- +JBCOpcodeInfo JBCOp::opcodeInfo; +std::vector JBCOp::emptyPrimTypes; + +JBCOp::JBCOp(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : alloc(allocator), op(opIn), kind(kindIn), wide(wideIn) {} + +bool JBCOp::CheckNotWide(const BasicIORead &io) const { + // wide only can be used with i/f/l/d/aload, i/f/l/d/astore, ret, and iinc + if (wide) { + ERR(kLncErr, "opcode %s @ pc=%u can not own wide prefix", GetOpcodeName().c_str(), io.GetPos() - 1); + return false; + } + return true; +} + +std::string JBCOp::DumpImpl(const JBCConstPool &constPool) const { + (void) constPool; + return GetOpcodeName(); +} + +const std::vector &JBCOp::GetInputTypesFromStackImpl() const { + return emptyPrimTypes; +} + +std::vector JBCOp::GetInputTypesFromStackImpl(const JBCConstPool &constPool) const { + (void) constPool; + return GetInputTypesFromStackImpl(); +} + +JBCPrimType JBCOp::GetOutputTypesToStackImpl() const { + return kTypeDefault; +} + +JBCPrimType JBCOp::GetOutputTypesToStackImpl(const JBCConstPool &constPool) const { + (void) constPool; + return GetOutputTypesToStackImpl(); +} + +// ---------- JBCOpUnused ---------- +JBCOpUnused::JBCOpUnused(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn) {} + +bool JBCOpUnused::ParseFileImpl(BasicIORead &io) { + (void) io; + WARN(kLncWarn, "Unused opcode %s", GetOpcodeName().c_str()); + return true; +} + +// ---------- JBCOpReversed ---------- +JBCOpReversed::JBCOpReversed(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn) {} + +bool JBCOpReversed::ParseFileImpl(BasicIORead &io) { + (void) io; + ERR(kLncErr, "Reversed opcode %s", GetOpcodeName().c_str()); + return false; +} + +// ---------- JBCOpDefault ---------- +std::map> JBCOpDefault::mapOpInputTypes = JBCOpDefault::InitMapOpInputTypes(); +std::map JBCOpDefault::mapOpOutputType = JBCOpDefault::InitMapOpOutputType(); + +JBCOpDefault::JBCOpDefault(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn) {} + +bool JBCOpDefault::ParseFileImpl(BasicIORead &io) { + if (JBCOp::CheckNotWide(io) == false) { + return false; + } + return true; +} + +const std::vector &JBCOpDefault::GetInputTypesFromStackImpl() const { + auto it = mapOpInputTypes.find(op); + CHECK_FATAL(it != mapOpInputTypes.end(), "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +JBCPrimType JBCOpDefault::GetOutputTypesToStackImpl() const { + auto it = mapOpOutputType.find(op); + CHECK_FATAL(it != mapOpOutputType.end(), "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +std::map> JBCOpDefault::InitMapOpInputTypes() { + std::map> ans; + ans[kOpNop] = emptyPrimTypes; + InitMapOpInputTypesForArrayLoad(ans); + InitMapOpInputTypesForArrayStore(ans); + InitMapOpInputTypesForMathBinop(ans); + InitMapOpInputTypesForMathUnop(ans); + InitMapOpInputTypesForConvert(ans); + InitMapOpInputTypesForCompare(ans); + InitMapOpInputTypesForReturn(ans); + InitMapOpInputTypesForThrow(ans); + InitMapOpInputTypesForMonitor(ans); + InitMapOpInputTypesForArrayLength(ans); + return ans; +} + +std::map JBCOpDefault::InitMapOpOutputType() { + std::map ans; + ans[kOpNop] = kTypeDefault; + InitMapOpOutputTypesForArrayLoad(ans); + InitMapOpOutputTypesForArrayStore(ans); + InitMapOpOutputTypesForMathBinop(ans); + InitMapOpOutputTypesForMathUnop(ans); + InitMapOpOutputTypesForConvert(ans); + InitMapOpOutputTypesForCompare(ans); + InitMapOpOutputTypesForReturn(ans); + InitMapOpOutputTypesForThrow(ans); + InitMapOpOutputTypesForMonitor(ans); + InitMapOpOutputTypesForArrayLength(ans); + return ans; +} + +void JBCOpDefault::InitMapOpInputTypesForArrayLoad(std::map> &ans) { + ans[kOpIALoad] = std::vector({ kTypeRef, kTypeInt }); + ans[kOpLALoad] = std::vector({ kTypeRef, kTypeInt }); + ans[kOpFALoad] = std::vector({ kTypeRef, kTypeInt }); + ans[kOpDALoad] = std::vector({ kTypeRef, kTypeInt }); + ans[kOpAALoad] = std::vector({ kTypeRef, kTypeInt }); + ans[kOpBALoad] = std::vector({ kTypeRef, kTypeInt }); + ans[kOpCALoad] = std::vector({ kTypeRef, kTypeInt }); + ans[kOpSALoad] = std::vector({ kTypeRef, kTypeInt }); +} + +void JBCOpDefault::InitMapOpOutputTypesForArrayLoad(std::map &ans) { + ans[kOpIALoad] = kTypeInt; + ans[kOpLALoad] = kTypeLong; + ans[kOpFALoad] = kTypeFloat; + ans[kOpDALoad] = kTypeDouble; + ans[kOpAALoad] = kTypeRef; + ans[kOpBALoad] = kTypeByteOrBoolean; + ans[kOpCALoad] = kTypeChar; + ans[kOpSALoad] = kTypeShort; +} + +void JBCOpDefault::InitMapOpInputTypesForArrayStore(std::map> &ans) { + ans[kOpIAStore] = std::vector({ kTypeRef, kTypeInt, kTypeInt }); + ans[kOpLAStore] = std::vector({ kTypeRef, kTypeInt, kTypeLong }); + ans[kOpFAStore] = std::vector({ kTypeRef, kTypeInt, kTypeFloat }); + ans[kOpDAStore] = std::vector({ kTypeRef, kTypeInt, kTypeDouble }); + ans[kOpAAStore] = std::vector({ kTypeRef, kTypeInt, kTypeRef }); + ans[kOpBAStore] = std::vector({ kTypeRef, kTypeInt, kTypeByteOrBoolean }); + ans[kOpCAStore] = std::vector({ kTypeRef, kTypeInt, kTypeChar }); + ans[kOpSAStore] = std::vector({ kTypeRef, kTypeInt, kTypeShort }); +} + +void JBCOpDefault::InitMapOpOutputTypesForArrayStore(std::map &ans) { + ans[kOpIAStore] = kTypeDefault; + ans[kOpLAStore] = kTypeDefault; + ans[kOpFAStore] = kTypeDefault; + ans[kOpDAStore] = kTypeDefault; + ans[kOpAAStore] = kTypeDefault; + ans[kOpBAStore] = kTypeDefault; + ans[kOpCAStore] = kTypeDefault; + ans[kOpSAStore] = kTypeDefault; +} + +void JBCOpDefault::InitMapOpInputTypesForMathBinop(std::map> &ans) { + ans[kOpIAdd] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLAdd] = std::vector({ kTypeLong, kTypeLong }); + ans[kOpFAdd] = std::vector({ kTypeFloat, kTypeFloat }); + ans[kOpDAdd] = std::vector({ kTypeDouble, kTypeDouble }); + ans[kOpISub] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLSub] = std::vector({ kTypeLong, kTypeLong }); + ans[kOpFSub] = std::vector({ kTypeFloat, kTypeFloat }); + ans[kOpDSub] = std::vector({ kTypeDouble, kTypeDouble }); + ans[kOpIMul] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLMul] = std::vector({ kTypeLong, kTypeLong }); + ans[kOpFMul] = std::vector({ kTypeFloat, kTypeFloat }); + ans[kOpDMul] = std::vector({ kTypeDouble, kTypeDouble }); + ans[kOpIDiv] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLDiv] = std::vector({ kTypeLong, kTypeLong }); + ans[kOpFDiv] = std::vector({ kTypeFloat, kTypeFloat }); + ans[kOpDDiv] = std::vector({ kTypeDouble, kTypeDouble }); + ans[kOpIRem] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLRem] = std::vector({ kTypeLong, kTypeLong }); + ans[kOpFRem] = std::vector({ kTypeFloat, kTypeFloat }); + ans[kOpDRem] = std::vector({ kTypeDouble, kTypeDouble }); + ans[kOpIShl] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLShl] = std::vector({ kTypeLong, kTypeInt }); + ans[kOpIShr] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLShr] = std::vector({ kTypeLong, kTypeInt }); + ans[kOpIUShr] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLUShr] = std::vector({ kTypeLong, kTypeInt }); + ans[kOpIAnd] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLAnd] = std::vector({ kTypeLong, kTypeLong }); + ans[kOpIOr] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLOr] = std::vector({ kTypeLong, kTypeLong }); + ans[kOpIXor] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpLXor] = std::vector({ kTypeLong, kTypeLong }); +} + +void JBCOpDefault::InitMapOpOutputTypesForMathBinop(std::map &ans) { + ans[kOpIAdd] = kTypeInt; + ans[kOpLAdd] = kTypeLong; + ans[kOpFAdd] = kTypeFloat; + ans[kOpDAdd] = kTypeDouble; + ans[kOpISub] = kTypeInt; + ans[kOpLSub] = kTypeLong; + ans[kOpFSub] = kTypeFloat; + ans[kOpDSub] = kTypeDouble; + ans[kOpIMul] = kTypeInt; + ans[kOpLMul] = kTypeLong; + ans[kOpFMul] = kTypeFloat; + ans[kOpDMul] = kTypeDouble; + ans[kOpIDiv] = kTypeInt; + ans[kOpLDiv] = kTypeLong; + ans[kOpFDiv] = kTypeFloat; + ans[kOpDDiv] = kTypeDouble; + ans[kOpIRem] = kTypeInt; + ans[kOpLRem] = kTypeLong; + ans[kOpFRem] = kTypeFloat; + ans[kOpDRem] = kTypeDouble; + ans[kOpIShl] = kTypeInt; + ans[kOpLShl] = kTypeLong; + ans[kOpIShr] = kTypeInt; + ans[kOpLShr] = kTypeLong; + ans[kOpIUShr] = kTypeInt; + ans[kOpLUShr] = kTypeLong; + ans[kOpIAnd] = kTypeInt; + ans[kOpLAnd] = kTypeLong; + ans[kOpIOr] = kTypeInt; + ans[kOpLOr] = kTypeLong; + ans[kOpIXor] = kTypeInt; + ans[kOpLXor] = kTypeLong; +} + +void JBCOpDefault::InitMapOpInputTypesForMathUnop(std::map> &ans) { + ans[kOpINeg] = std::vector({ kTypeInt }); + ans[kOpLNeg] = std::vector({ kTypeLong }); + ans[kOpFNeg] = std::vector({ kTypeFloat }); + ans[kOpDNeg] = std::vector({ kTypeDouble }); +} + +void JBCOpDefault::InitMapOpOutputTypesForMathUnop(std::map &ans) { + ans[kOpINeg] = kTypeInt; + ans[kOpLNeg] = kTypeLong; + ans[kOpFNeg] = kTypeFloat; + ans[kOpDNeg] = kTypeDouble; +} + +void JBCOpDefault::InitMapOpInputTypesForConvert(std::map> &ans) { + ans[kOpI2L] = std::vector({ kTypeInt }); + ans[kOpI2F] = std::vector({ kTypeInt }); + ans[kOpI2D] = std::vector({ kTypeInt }); + ans[kOpL2I] = std::vector({ kTypeLong }); + ans[kOpL2F] = std::vector({ kTypeLong }); + ans[kOpL2D] = std::vector({ kTypeLong }); + ans[kOpF2I] = std::vector({ kTypeFloat }); + ans[kOpF2L] = std::vector({ kTypeFloat }); + ans[kOpF2D] = std::vector({ kTypeFloat }); + ans[kOpD2I] = std::vector({ kTypeDouble }); + ans[kOpD2L] = std::vector({ kTypeDouble }); + ans[kOpD2F] = std::vector({ kTypeDouble }); + ans[kOpI2B] = std::vector({ kTypeInt }); + ans[kOpI2C] = std::vector({ kTypeInt }); + ans[kOpI2S] = std::vector({ kTypeInt }); +} + +void JBCOpDefault::InitMapOpOutputTypesForConvert(std::map &ans) { + ans[kOpI2L] = kTypeLong; + ans[kOpI2F] = kTypeFloat; + ans[kOpI2D] = kTypeDouble; + ans[kOpL2I] = kTypeInt; + ans[kOpL2F] = kTypeFloat; + ans[kOpL2D] = kTypeDouble; + ans[kOpF2I] = kTypeInt; + ans[kOpF2L] = kTypeLong; + ans[kOpF2D] = kTypeDouble; + ans[kOpD2I] = kTypeInt; + ans[kOpD2L] = kTypeLong; + ans[kOpD2F] = kTypeFloat; + ans[kOpI2B] = kTypeByteOrBoolean; + ans[kOpI2C] = kTypeChar; + ans[kOpI2S] = kTypeShort; +} + +void JBCOpDefault::InitMapOpInputTypesForCompare(std::map> &ans) { + ans[kOpLCmp] = std::vector({ kTypeLong, kTypeLong }); + ans[kOpFCmpl] = std::vector({ kTypeFloat, kTypeFloat }); + ans[kOpFCmpg] = std::vector({ kTypeFloat, kTypeFloat }); + ans[kOpDCmpl] = std::vector({ kTypeDouble, kTypeDouble }); + ans[kOpDCmpg] = std::vector({ kTypeDouble, kTypeDouble }); +} + +void JBCOpDefault::InitMapOpOutputTypesForCompare(std::map &ans) { + ans[kOpLCmp] = kTypeInt; + ans[kOpFCmpl] = kTypeInt; + ans[kOpFCmpg] = kTypeInt; + ans[kOpDCmpl] = kTypeInt; + ans[kOpDCmpg] = kTypeInt; +} + +void JBCOpDefault::InitMapOpInputTypesForReturn(std::map> &ans) { + ans[kOpIReturn] = std::vector({ kTypeInt }); + ans[kOpLReturn] = std::vector({ kTypeLong }); + ans[kOpFReturn] = std::vector({ kTypeFloat }); + ans[kOpDReturn] = std::vector({ kTypeDouble }); + ans[kOpAReturn] = std::vector({ kTypeRef }); + ans[kOpReturn] = emptyPrimTypes; +} + +void JBCOpDefault::InitMapOpOutputTypesForReturn(std::map &ans) { + ans[kOpIReturn] = kTypeDefault; + ans[kOpLReturn] = kTypeDefault; + ans[kOpFReturn] = kTypeDefault; + ans[kOpDReturn] = kTypeDefault; + ans[kOpAReturn] = kTypeDefault; + ans[kOpReturn] = kTypeDefault; +} + +void JBCOpDefault::InitMapOpInputTypesForThrow(std::map> &ans) { + ans[kOpAThrow] = std::vector({ kTypeRef }); +} + +void JBCOpDefault::InitMapOpOutputTypesForThrow(std::map &ans) { + ans[kOpAThrow] = kTypeDefault; +} + +void JBCOpDefault::InitMapOpInputTypesForMonitor(std::map> &ans) { + ans[kOpMonitorEnter] = std::vector({ kTypeRef }); + ans[kOpMonitorExit] = std::vector({ kTypeRef }); +} + +void JBCOpDefault::InitMapOpOutputTypesForMonitor(std::map &ans) { + ans[kOpMonitorEnter] = kTypeDefault; + ans[kOpMonitorExit] = kTypeDefault; +} + +void JBCOpDefault::InitMapOpInputTypesForArrayLength(std::map> &ans) { + ans[kOpArrayLength] = std::vector({ kTypeRef }); +} + +void JBCOpDefault::InitMapOpOutputTypesForArrayLength(std::map &ans) { + ans[kOpArrayLength] = kTypeInt; +} + +// ---------- JBCOpConst ---------- +std::map JBCOpConst::valueMapI = JBCOpConst::InitValueMapI(); +std::map JBCOpConst::valueMapJ = JBCOpConst::InitValueMapJ(); +std::map JBCOpConst::valueMapF = JBCOpConst::InitValueMapF(); +std::map JBCOpConst::valueMapD = JBCOpConst::InitValueMapD(); + +JBCOpConst::JBCOpConst(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn) { + u.raw = 0; +} + +bool JBCOpConst::ParseFileImpl(BasicIORead &io) { + bool success = false; + if (CheckNotWide(io) == false) { + return false; + } + switch (op) { + case jbc::kOpBiPush: + u.bvalue = io.ReadInt8(success); + break; + case jbc::kOpSiPush: + u.svalue = io.ReadInt16(success); + break; + case jbc::kOpLdc: + u.index = io.ReadUInt8(success); + break; + case jbc::kOpLdcW: + case jbc::kOpLdc2W: + u.index = io.ReadUInt16(success); + break; + default: + success = true; + break; + } + return success; +} + +JBCPrimType JBCOpConst::GetOutputTypesToStackImpl(const JBCConstPool &constPool) const { + switch (op) { + case jbc::kOpAConstNull: + return kTypeRef; + case jbc::kOpIConstM1: + case jbc::kOpIConst0: + case jbc::kOpIConst1: + case jbc::kOpIConst2: + case jbc::kOpIConst3: + case jbc::kOpIConst4: + case jbc::kOpIConst5: + return kTypeInt; + case jbc::kOpLConst0: + case jbc::kOpLConst1: + return kTypeLong; + case jbc::kOpFConst0: + case jbc::kOpFConst1: + case jbc::kOpFConst2: + return kTypeFloat; + case jbc::kOpDConst0: + case jbc::kOpDConst1: + return kTypeDouble; + case jbc::kOpBiPush: + return kTypeByteOrBoolean; + case jbc::kOpSiPush: + return kTypeShort; + case jbc::kOpLdc: + case jbc::kOpLdcW: + case jbc::kOpLdc2W: { + const JBCConst *constRaw = constPool.GetConstByIdx(GetIndex()); + if (constRaw != nullptr) { + switch (constRaw->GetTag()) { + case kConstInteger: + return jbc::kTypeInt; + case kConstFloat: + return jbc::kTypeFloat; + case kConstLong: + return jbc::kTypeLong; + case kConstDouble: + return jbc::kTypeDouble; + case kConstClass: + case kConstString: + return jbc::kTypeRef; + default: + CHECK_FATAL(false, "Unsupported const tag %d", constRaw->GetTag()); + } + } + break; + } + default: + CHECK_FATAL(false, "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + } + return kTypeDefault; +} + +std::string JBCOpConst::DumpImpl(const JBCConstPool &constPool) const { + switch (op) { + case jbc::kOpBiPush: + return DumpBiPush(); + case jbc::kOpSiPush: + return DumpSiPush(); + case jbc::kOpLdc: + case jbc::kOpLdcW: + case jbc::kOpLdc2W: + return DumpLdc(constPool); + default: + return JBCOp::DumpImpl(constPool); + } +} + +std::string JBCOpConst::DumpBiPush() const { + std::stringstream ss; + ss << GetOpcodeName() << " byte " << static_cast(u.bvalue); + return ss.str(); +} + +std::string JBCOpConst::DumpSiPush() const { + std::stringstream ss; + ss << GetOpcodeName() << " short " << static_cast(u.svalue); + return ss.str(); +} + +std::string JBCOpConst::DumpLdc(const JBCConstPool &constPool) const { + std::stringstream ss; + ss << GetOpcodeName() << " "; + const JBCConst *constRaw = constPool.GetConstByIdx(GetIndex()); + if (constRaw != nullptr) { + switch (constRaw->GetTag()) { + case kConstInteger: + case kConstFloat: { + const JBCConst4Byte *const4B = static_cast(constRaw); + if (constRaw->GetTag() == kConstInteger) { + ss << "int " << const4B->GetInt32(); + } else { + ss << "float " << const4B->GetFloat(); + } + break; + } + case kConstLong: + case kConstDouble: { + const JBCConst8Byte *const8B = static_cast(constRaw); + if (constRaw->GetTag() == kConstLong) { + ss << "long " << const8B->GetInt64(); + } else { + ss << "double " << const8B->GetDouble(); + } + break; + } + case kConstClass: { + const JBCConstClass *constClass = static_cast(constRaw); + ss << "Class " << constClass->GetClassNameOrin(); + break; + } + case kConstString: { + const JBCConstString *constString = static_cast(constRaw); + ss << "String " << "\"" << constString->GetString() << "\""; + break; + } + case kConstMethodType: { + ss << "MethodType "; + break; + } + case kConstMethodHandleInfo: { + ss << "MethodHandle "; + break; + } + default: + ss << "Unsupported const tag " << constRaw->GetTag(); + break; + } + } else { + ss << "invalid const index"; + } + return ss.str(); +} + +int32 JBCOpConst::GetValueInt() const { + auto it = valueMapI.find(op); + CHECK_FATAL(it != valueMapI.end(), "unsupport opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +int64 JBCOpConst::GetValueLong() const { + auto it = valueMapJ.find(op); + CHECK_FATAL(it != valueMapJ.end(), "unsupport opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +float JBCOpConst::GetValueFloat() const { + auto it = valueMapF.find(op); + CHECK_FATAL(it != valueMapF.end(), "unsupport opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +double JBCOpConst::GetValueDouble() const { + auto it = valueMapD.find(op); + CHECK_FATAL(it != valueMapD.end(), "unsupport opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +int8 JBCOpConst::GetValueByte() const { + CHECK_FATAL(op == kOpBiPush, "unsupport opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return u.bvalue; +} + +int16 JBCOpConst::GetValueShort() const { + CHECK_FATAL(op == kOpSiPush, "unsupport opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return u.svalue; +} + +std::map JBCOpConst::InitValueMapI() { + std::map valueMap; + valueMap[jbc::kOpIConstM1] = -1; + valueMap[jbc::kOpIConst0] = 0; + valueMap[jbc::kOpIConst1] = 1; + valueMap[jbc::kOpIConst2] = 2; + valueMap[jbc::kOpIConst3] = 3; + valueMap[jbc::kOpIConst4] = 4; + valueMap[jbc::kOpIConst5] = 5; + return valueMap; +} + +std::map JBCOpConst::InitValueMapJ() { + std::map valueMap; + valueMap[jbc::kOpLConst0] = 0L; + valueMap[jbc::kOpLConst1] = 1L; + return valueMap; +} + +std::map JBCOpConst::InitValueMapF() { + std::map valueMap; + valueMap[jbc::kOpFConst0] = 0.0F; + valueMap[jbc::kOpFConst1] = 1.0F; + valueMap[jbc::kOpFConst2] = 2.0F; + return valueMap; +} + +std::map JBCOpConst::InitValueMapD() { + std::map valueMap; + valueMap[jbc::kOpDConst0] = 0.0; + valueMap[jbc::kOpDConst1] = 1.0; + return valueMap; +} + +// ---------- JBCOpSlotOpr ---------- +std::map> JBCOpSlotOpr::mapSlotIdxAndType = + JBCOpSlotOpr::InitMapSlotIdxAndType(); +std::map> JBCOpSlotOpr::mapOpInputTypes = JBCOpSlotOpr::InitMapOpInputTypes(); +std::map JBCOpSlotOpr::mapOpOutputType = JBCOpSlotOpr::InitMapOpOutputType(); +std::vector JBCOpSlotOpr::inputTypesAddressOpr = { JBCPrimType::kTypeAddress }; + +JBCOpSlotOpr::JBCOpSlotOpr(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), slotIdx(0), isAddressOpr(false) {} + +bool JBCOpSlotOpr::IsAStore() const { + switch (op) { + case jbc::kOpAStore: + case jbc::kOpAStore0: + case jbc::kOpAStore1: + case jbc::kOpAStore2: + case jbc::kOpAStore3: + return true; + default: + return false; + } +} + +bool JBCOpSlotOpr::ParseFileImpl(BasicIORead &io) { + bool success = false; + switch (op) { + case jbc::kOpILoad: + case jbc::kOpLLoad: + case jbc::kOpFLoad: + case jbc::kOpDLoad: + case jbc::kOpALoad: + case jbc::kOpIStore: + case jbc::kOpLStore: + case jbc::kOpFStore: + case jbc::kOpDStore: + case jbc::kOpAStore: + if (wide) { + slotIdx = io.ReadUInt16(success); + } else { + slotIdx = io.ReadUInt8(success); + } + break; + default: + std::map>::const_iterator it = mapSlotIdxAndType.find(op); + if (it == mapSlotIdxAndType.end()) { + ERR(kLncErr, "Unexpected opcode %s for SlotOpr", GetOpcodeName().c_str()); + success = false; + } else { + slotIdx = it->second.first; + success = true; + } + break; + } + return success; +} + +const std::vector &JBCOpSlotOpr::GetInputTypesFromStackImpl() const { + if (isAddressOpr) { + return inputTypesAddressOpr; + } + auto it = mapOpInputTypes.find(op); + CHECK_FATAL(it != mapOpInputTypes.end(), "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +JBCPrimType JBCOpSlotOpr::GetOutputTypesToStackImpl() const { + auto it = mapOpOutputType.find(op); + CHECK_FATAL(it != mapOpOutputType.end(), "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +std::string JBCOpSlotOpr::DumpImpl(const JBCConstPool &constPool) const { + (void) constPool; + std::stringstream ss; + ss << GetOpcodeName() << " " << slotIdx; + return ss.str(); +} + +std::map> JBCOpSlotOpr::InitMapSlotIdxAndType() { + std::map> result; + result[kOpILoad] = std::make_pair(0, kTypeInt); + result[kOpLLoad] = std::make_pair(0, kTypeLong); + result[kOpFLoad] = std::make_pair(0, kTypeFloat); + result[kOpDLoad] = std::make_pair(0, kTypeDouble); + result[kOpALoad] = std::make_pair(0, kTypeRef); + result[kOpILoad0] = std::make_pair(0, kTypeInt); + result[kOpILoad1] = std::make_pair(1, kTypeInt); + result[kOpILoad2] = std::make_pair(2, kTypeInt); + result[kOpILoad3] = std::make_pair(3, kTypeInt); + result[kOpLLoad0] = std::make_pair(0, kTypeLong); + result[kOpLLoad1] = std::make_pair(1, kTypeLong); + result[kOpLLoad2] = std::make_pair(2, kTypeLong); + result[kOpLLoad3] = std::make_pair(3, kTypeLong); + result[kOpFLoad0] = std::make_pair(0, kTypeFloat); + result[kOpFLoad1] = std::make_pair(1, kTypeFloat); + result[kOpFLoad2] = std::make_pair(2, kTypeFloat); + result[kOpFLoad3] = std::make_pair(3, kTypeFloat); + result[kOpDLoad0] = std::make_pair(0, kTypeDouble); + result[kOpDLoad1] = std::make_pair(1, kTypeDouble); + result[kOpDLoad2] = std::make_pair(2, kTypeDouble); + result[kOpDLoad3] = std::make_pair(3, kTypeDouble); + result[kOpALoad0] = std::make_pair(0, kTypeRef); + result[kOpALoad1] = std::make_pair(1, kTypeRef); + result[kOpALoad2] = std::make_pair(2, kTypeRef); + result[kOpALoad3] = std::make_pair(3, kTypeRef); + result[kOpIStore] = std::make_pair(0, kTypeInt); + result[kOpLStore] = std::make_pair(0, kTypeLong); + result[kOpFStore] = std::make_pair(0, kTypeFloat); + result[kOpDStore] = std::make_pair(0, kTypeDouble); + result[kOpAStore] = std::make_pair(0, kTypeRef); + result[kOpIStore0] = std::make_pair(0, kTypeInt); + result[kOpIStore1] = std::make_pair(1, kTypeInt); + result[kOpIStore2] = std::make_pair(2, kTypeInt); + result[kOpIStore3] = std::make_pair(3, kTypeInt); + result[kOpLStore0] = std::make_pair(0, kTypeLong); + result[kOpLStore1] = std::make_pair(1, kTypeLong); + result[kOpLStore2] = std::make_pair(2, kTypeLong); + result[kOpLStore3] = std::make_pair(3, kTypeLong); + result[kOpFStore0] = std::make_pair(0, kTypeFloat); + result[kOpFStore1] = std::make_pair(1, kTypeFloat); + result[kOpFStore2] = std::make_pair(2, kTypeFloat); + result[kOpFStore3] = std::make_pair(3, kTypeFloat); + result[kOpDStore0] = std::make_pair(0, kTypeDouble); + result[kOpDStore1] = std::make_pair(1, kTypeDouble); + result[kOpDStore2] = std::make_pair(2, kTypeDouble); + result[kOpDStore3] = std::make_pair(3, kTypeDouble); + result[kOpAStore0] = std::make_pair(0, kTypeRef); + result[kOpAStore1] = std::make_pair(1, kTypeRef); + result[kOpAStore2] = std::make_pair(2, kTypeRef); + result[kOpAStore3] = std::make_pair(3, kTypeRef); + return result; +} + +std::map> JBCOpSlotOpr::InitMapOpInputTypes() { + std::map> ans; + ans[kOpILoad] = emptyPrimTypes; + ans[kOpFLoad] = emptyPrimTypes; + ans[kOpLLoad] = emptyPrimTypes; + ans[kOpDLoad] = emptyPrimTypes; + ans[kOpALoad] = emptyPrimTypes; + ans[kOpILoad0] = emptyPrimTypes; + ans[kOpILoad1] = emptyPrimTypes; + ans[kOpILoad2] = emptyPrimTypes; + ans[kOpILoad3] = emptyPrimTypes; + ans[kOpLLoad0] = emptyPrimTypes; + ans[kOpLLoad1] = emptyPrimTypes; + ans[kOpLLoad2] = emptyPrimTypes; + ans[kOpLLoad3] = emptyPrimTypes; + ans[kOpFLoad0] = emptyPrimTypes; + ans[kOpFLoad1] = emptyPrimTypes; + ans[kOpFLoad2] = emptyPrimTypes; + ans[kOpFLoad3] = emptyPrimTypes; + ans[kOpDLoad0] = emptyPrimTypes; + ans[kOpDLoad1] = emptyPrimTypes; + ans[kOpDLoad2] = emptyPrimTypes; + ans[kOpDLoad3] = emptyPrimTypes; + ans[kOpALoad0] = emptyPrimTypes; + ans[kOpALoad1] = emptyPrimTypes; + ans[kOpALoad2] = emptyPrimTypes; + ans[kOpALoad3] = emptyPrimTypes; + ans[kOpIStore] = std::vector({ kTypeInt }); + ans[kOpLStore] = std::vector({ kTypeLong }); + ans[kOpFStore] = std::vector({ kTypeFloat }); + ans[kOpDStore] = std::vector({ kTypeDouble }); + ans[kOpAStore] = std::vector({ kTypeRef }); + ans[kOpIStore0] = std::vector({ kTypeInt }); + ans[kOpIStore1] = std::vector({ kTypeInt }); + ans[kOpIStore2] = std::vector({ kTypeInt }); + ans[kOpIStore3] = std::vector({ kTypeInt }); + ans[kOpLStore0] = std::vector({ kTypeLong }); + ans[kOpLStore1] = std::vector({ kTypeLong }); + ans[kOpLStore2] = std::vector({ kTypeLong }); + ans[kOpLStore3] = std::vector({ kTypeLong }); + ans[kOpFStore0] = std::vector({ kTypeFloat }); + ans[kOpFStore1] = std::vector({ kTypeFloat }); + ans[kOpFStore2] = std::vector({ kTypeFloat }); + ans[kOpFStore3] = std::vector({ kTypeFloat }); + ans[kOpDStore0] = std::vector({ kTypeDouble }); + ans[kOpDStore1] = std::vector({ kTypeDouble }); + ans[kOpDStore2] = std::vector({ kTypeDouble }); + ans[kOpDStore3] = std::vector({ kTypeDouble }); + ans[kOpAStore0] = std::vector({ kTypeRef }); + ans[kOpAStore1] = std::vector({ kTypeRef }); + ans[kOpAStore2] = std::vector({ kTypeRef }); + ans[kOpAStore3] = std::vector({ kTypeRef }); + return ans; +} + +std::map JBCOpSlotOpr::InitMapOpOutputType() { + std::map ans; + ans[kOpILoad] = kTypeInt; + ans[kOpFLoad] = kTypeFloat; + ans[kOpLLoad] = kTypeLong; + ans[kOpDLoad] = kTypeDouble; + ans[kOpALoad] = kTypeRef; + ans[kOpILoad0] = kTypeInt; + ans[kOpILoad1] = kTypeInt; + ans[kOpILoad2] = kTypeInt; + ans[kOpILoad3] = kTypeInt; + ans[kOpLLoad0] = kTypeLong; + ans[kOpLLoad1] = kTypeLong; + ans[kOpLLoad2] = kTypeLong; + ans[kOpLLoad3] = kTypeLong; + ans[kOpFLoad0] = kTypeFloat; + ans[kOpFLoad1] = kTypeFloat; + ans[kOpFLoad2] = kTypeFloat; + ans[kOpFLoad3] = kTypeFloat; + ans[kOpDLoad0] = kTypeDouble; + ans[kOpDLoad1] = kTypeDouble; + ans[kOpDLoad2] = kTypeDouble; + ans[kOpDLoad3] = kTypeDouble; + ans[kOpALoad0] = kTypeRef; + ans[kOpALoad1] = kTypeRef; + ans[kOpALoad2] = kTypeRef; + ans[kOpALoad3] = kTypeRef; + ans[kOpIStore] = kTypeDefault; + ans[kOpLStore] = kTypeDefault; + ans[kOpFStore] = kTypeDefault; + ans[kOpDStore] = kTypeDefault; + ans[kOpAStore] = kTypeDefault; + ans[kOpIStore0] = kTypeDefault; + ans[kOpIStore1] = kTypeDefault; + ans[kOpIStore2] = kTypeDefault; + ans[kOpIStore3] = kTypeDefault; + ans[kOpLStore0] = kTypeDefault; + ans[kOpLStore1] = kTypeDefault; + ans[kOpLStore2] = kTypeDefault; + ans[kOpLStore3] = kTypeDefault; + ans[kOpFStore0] = kTypeDefault; + ans[kOpFStore1] = kTypeDefault; + ans[kOpFStore2] = kTypeDefault; + ans[kOpFStore3] = kTypeDefault; + ans[kOpDStore0] = kTypeDefault; + ans[kOpDStore1] = kTypeDefault; + ans[kOpDStore2] = kTypeDefault; + ans[kOpDStore3] = kTypeDefault; + ans[kOpAStore0] = kTypeDefault; + ans[kOpAStore1] = kTypeDefault; + ans[kOpAStore2] = kTypeDefault; + ans[kOpAStore3] = kTypeDefault; + return ans; +} + +// ---------- JBCOpMathInc ---------- +JBCOpMathInc::JBCOpMathInc(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), index(0), incr(0) {} + +bool JBCOpMathInc::ParseFileImpl(BasicIORead &io) { + bool success = false; + if (wide) { + index = io.ReadUInt16(success); + incr = io.ReadInt16(success); + } else { + index = io.ReadUInt8(success); + incr = io.ReadInt8(success); + } + return success; +} + +// ---------- JBCOpBranch ---------- +std::map> JBCOpBranch::mapOpInputTypes = JBCOpBranch::InitMapOpInputTypes(); + +JBCOpBranch::JBCOpBranch(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), target(0) {} + +bool JBCOpBranch::ParseFileImpl(BasicIORead &io) { + if (JBCOp::CheckNotWide(io) == false) { + return false; + } + bool success = true; + int64 pc = io.GetPos() - 1; + target = GetTargetbyInt64(pc + io.ReadInt16(success)); + return success; +} + +std::string JBCOpBranch::DumpImpl(const JBCConstPool &constPool) const { + (void) constPool; + std::stringstream ss; + ss << GetOpcodeName() << " " << target; + return ss.str(); +} + +const std::vector &JBCOpBranch::GetInputTypesFromStackImpl() const { + auto it = mapOpInputTypes.find(op); + CHECK_FATAL(it != mapOpInputTypes.end(), "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +std::map> JBCOpBranch::InitMapOpInputTypes() { + std::map> ans; + ans[kOpIfeq] = std::vector({ kTypeInt }); + ans[kOpIfne] = std::vector({ kTypeInt }); + ans[kOpIflt] = std::vector({ kTypeInt }); + ans[kOpIfge] = std::vector({ kTypeInt }); + ans[kOpIfgt] = std::vector({ kTypeInt }); + ans[kOpIfle] = std::vector({ kTypeInt }); + ans[kOpIfeq] = std::vector({ kTypeInt }); + ans[kOpIfICmpeq] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpIfICmpne] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpIfICmplt] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpIfICmpge] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpIfICmpgt] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpIfICmple] = std::vector({ kTypeInt, kTypeInt }); + ans[kOpIfACmpeq] = std::vector({ kTypeRef, kTypeRef }); + ans[kOpIfACmpne] = std::vector({ kTypeRef, kTypeRef }); + ans[kOpIfNull] = std::vector({ kTypeRef }); + ans[kOpIfNonNull] = std::vector({ kTypeRef }); + return ans; +} + +// ---------- JBCOpGoto ---------- +JBCOpGoto::JBCOpGoto(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), target(0) {} + +bool JBCOpGoto::ParseFileImpl(BasicIORead &io) { + if (JBCOp::CheckNotWide(io) == false) { + return false; + } + bool success = true; + int64 pc = io.GetPos() - 1; + switch (op) { + case jbc::kOpGoto: + target = GetTargetbyInt64(pc + io.ReadInt16(success)); + break; + case jbc::kOpGotoW: + target = GetTargetbyInt64(pc + io.ReadInt32(success)); + break; + default: + CHECK_FATAL(false, "Unexpected opcode %s for Goto", GetOpcodeName().c_str()); + return false; + } + return success; +} + +std::string JBCOpGoto::DumpImpl(const JBCConstPool &constPool) const { + (void) constPool; + std::stringstream ss; + ss << GetOpcodeName() << " " << target; + return ss.str(); +} + +// ---------- JBCOpSwitch ---------- +std::map> JBCOpSwitch::mapOpInputTypes = JBCOpSwitch::InitMapOpInputTypes(); + +JBCOpSwitch::JBCOpSwitch(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), + targets(std::less(), allocator.Adapter()), + targetDefault(0) {} + +bool JBCOpSwitch::ParseFileImpl(BasicIORead &io) { + bool success = false; + int64 pc = io.GetPos() - 1; + const uint32 kAlign32 = 4; + if (op == jbc::kOpTableSwitch) { + while (io.GetPos() % kAlign32 != 0) { + (void)io.ReadUInt8(success); + } + targetDefault = GetTargetbyInt64(pc + io.ReadUInt32(success)); + int32 low = io.ReadInt32(success); + int32 high = io.ReadInt32(success); + for (int32 i = low; i <= high && success; ++i) { + int32 offset = io.ReadInt32(success); + CHECK_FATAL(targets.insert(std::make_pair(i, GetTargetbyInt64(offset + pc))).second, "targets insert failed"); + } + } else if (op == jbc::kOpLookupSwitch) { + while (io.GetPos() % kAlign32 != 0) { + (void)io.ReadUInt8(success); + } + targetDefault = GetTargetbyInt64(pc + io.ReadUInt32(success)); + uint32 npairs = io.ReadUInt32(success); + for (uint32 i = 0; i < npairs && success; ++i) { + int32 value = io.ReadInt32(success); + int32 offset = io.ReadInt32(success); + CHECK_FATAL(targets.insert(std::make_pair(value, GetTargetbyInt64(offset + pc))).second, "targets insert failed"); + } + } + return success; +} + +const std::vector &JBCOpSwitch::GetInputTypesFromStackImpl() const { + auto it = mapOpInputTypes.find(op); + CHECK_FATAL(it != mapOpInputTypes.end(), "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +std::string JBCOpSwitch::DumpImpl(const JBCConstPool &constPool) const { + (void) constPool; + std::stringstream ss; + ss << GetOpcodeName() << " { default=" << targetDefault; + for (auto it : targets) { + ss << "," << it.first << ":" << it.second; + } + ss << " }"; + return ss.str(); +} + +std::map> JBCOpSwitch::InitMapOpInputTypes() { + std::map> ans; + ans[kOpTableSwitch] = std::vector({ kTypeInt }); + ans[kOpLookupSwitch] = std::vector({ kTypeInt }); + return ans; +} + +// ---------- JBCOpFieldOpr ---------- +JBCOpFieldOpr::JBCOpFieldOpr(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), fieldIdx(0) {} + +bool JBCOpFieldOpr::ParseFileImpl(BasicIORead &io) { + if (JBCOp::CheckNotWide(io) == false) { + return false; + } + bool success = false; + fieldIdx = io.ReadUInt16(success); + return success; +} + +const std::vector &JBCOpFieldOpr::GetInputTypesFromStackImpl() const { + CHECK_FATAL(false, "Restricted Calls"); + return emptyPrimTypes; +} + +std::vector JBCOpFieldOpr::GetInputTypesFromStackImpl(const JBCConstPool &constPool) const { + switch (op) { + case kOpGetField: + case kOpGetStatic: + return GetInputTypesFromStackForGet(constPool); + case kOpPutField: + case kOpPutStatic: + return GetInputTypesFromStackForPut(constPool); + default: + CHECK_FATAL(false, "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return emptyPrimTypes; + } +} + +JBCPrimType JBCOpFieldOpr::GetOutputTypesToStackImpl() const { + CHECK_FATAL(false, "Restricted Calls"); + return kTypeDefault; +} + +JBCPrimType JBCOpFieldOpr::GetOutputTypesToStackImpl(const JBCConstPool &constPool) const { + switch (op) { + case kOpGetField: + case kOpGetStatic: { + std::string desc = GetFieldType(constPool); + return JBCUtil::GetPrimTypeForName(desc); + } + case kOpPutField: + case kOpPutStatic: + return kTypeDefault; + default: + CHECK_FATAL(false, "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return kTypeDefault; + } +} + +std::string JBCOpFieldOpr::DumpImpl(const JBCConstPool &constPool) const { + std::stringstream ss; + ss << GetOpcodeName() << " "; + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(fieldIdx, JBCConstTag::kConstFieldRef); + if (constRaw == nullptr) { + ss << "unknown"; + } else { + const JBCConstRef *constRef = static_cast(constRaw); + CHECK_NULL_FATAL(constRef->GetConstClass()); + ss << constRef->GetConstClass()->GetClassNameOrin() << "." << constRef->GetName() << ":" << + constRef->GetDesc(); + } + return ss.str(); +} + +std::vector JBCOpFieldOpr::GetInputTypesFromStackForGet(const JBCConstPool &constPool) const { + (void) constPool; + std::vector ans; + if (op == kOpGetField) { + ans.push_back(kTypeRef); + } + return ans; +} + +std::vector JBCOpFieldOpr::GetInputTypesFromStackForPut(const JBCConstPool &constPool) const { + std::vector ans; + std::string desc = GetFieldType(constPool); + if (op == kOpPutField) { + ans.push_back(kTypeRef); + } + ans.push_back(JBCUtil::GetPrimTypeForName(desc)); + return ans; +} + +std::string JBCOpFieldOpr::GetFieldType(const JBCConstPool &constPool) const { + std::string desc = ""; + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(fieldIdx, kConstFieldRef); + if (constRaw != nullptr) { + const JBCConstRef *constRef = static_cast(constRaw); + desc = constRef->GetDesc(); + } + return desc; +} + +// ---------- JBCOpInvoke ---------- +JBCOpInvoke::JBCOpInvoke(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), methodIdx(0), count(0) {} + +bool JBCOpInvoke::ParseFileImpl(BasicIORead &io) { + if (JBCOp::CheckNotWide(io) == false) { + return false; + } + bool success = false; + switch (op) { + case jbc::kOpInvokeVirtual: + case jbc::kOpInvokeSpecial: + case jbc::kOpInvokeStatic: + methodIdx = io.ReadUInt16(success); + break; + case jbc::kOpInvokeInterface: + methodIdx = io.ReadUInt16(success); + count = io.ReadUInt8(success); + (void)io.ReadUInt8(success); + break; + case jbc::kOpInvokeDynamic: + methodIdx = io.ReadUInt16(success); + (void)io.ReadUInt8(success); + (void)io.ReadUInt8(success); + break; + default: + ASSERT(false, "Unexpected opcode %s for Invoke", GetOpcodeName().c_str()); + return false; + } + return success; +} + +const std::vector &JBCOpInvoke::GetInputTypesFromStackImpl() const { + CHECK_FATAL(false, "Restricted Calls"); + return emptyPrimTypes; +} + +std::vector JBCOpInvoke::GetInputTypesFromStackImpl(const JBCConstPool &constPool) const { + std::vector ans; + std::string desc = GetMethodDescription(constPool); + std::vector typeNames = FEUtilJava::SolveMethodSignature(desc); + CHECK_FATAL(typeNames.size() > 0, "Invalid method description: %s", desc.c_str()); + if (op != kOpInvokeStatic && op != kOpInvokeDynamic) { + ans.push_back(kTypeRef); + } + for (size_t i = 1; i < typeNames.size(); i++) { + ans.push_back(JBCUtil::GetPrimTypeForName(typeNames[i])); + } + return ans; +} + +JBCPrimType JBCOpInvoke::GetOutputTypesToStackImpl() const { + CHECK_FATAL(false, "Restricted Calls"); + return kTypeDefault; +} + +JBCPrimType JBCOpInvoke::GetOutputTypesToStackImpl(const JBCConstPool &constPool) const { + std::string desc = GetMethodDescription(constPool); + std::vector typeNames = FEUtilJava::SolveMethodSignature(desc); + CHECK_FATAL(typeNames.size() > 0, "Invalid method description: %s", desc.c_str()); + return JBCUtil::GetPrimTypeForName(typeNames[0]); +} + +std::string JBCOpInvoke::DumpImpl(const JBCConstPool &constPool) const { + std::stringstream ss; + ss << GetOpcodeName() << " "; + const JBCConst *constRaw = constPool.GetConstByIdx(methodIdx); + CHECK_NULL_FATAL(constRaw); + JBCConstTag tag = constRaw->GetTag(); + if (op == jbc::kOpInvokeDynamic) { + if (tag == kConstInvokeDynamic) { + const JBCConstInvokeDynamic *constInvokdDynamic = static_cast(constRaw); + CHECK_NULL_FATAL(constInvokdDynamic->GetConstNameAndType()); + ss << constInvokdDynamic->GetBSMAttrIdx() << ":" << + constInvokdDynamic->GetConstNameAndType()->GetName() << ":" << + constInvokdDynamic->GetConstNameAndType()->GetDesc(); + } else { + ss << "invalid const tag"; + } + } else { + if (tag == kConstMethodRef || tag == kConstInterfaceMethodRef) { + const JBCConstRef *constRef = static_cast(constRaw); + CHECK_NULL_FATAL(constRef->GetConstClass()); + ss << constRef->GetConstClass()->GetClassNameOrin() << "." << constRef->GetName() << ":" << + constRef->GetDesc(); + } else { + ss << "invalid const tag"; + } + } + return ss.str(); +} + +std::string JBCOpInvoke::GetMethodDescription(const JBCConstPool &constPool) const { + std::string desc = ""; + switch (op) { + case jbc::kOpInvokeVirtual: { + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(methodIdx, kConstMethodRef); + if (constRaw != nullptr) { + const JBCConstRef *constRef = static_cast(constRaw); + desc = constRef->GetDesc(); + } + break; + } + case jbc::kOpInvokeInterface: { + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(methodIdx, kConstInterfaceMethodRef); + if (constRaw != nullptr) { + const JBCConstRef *constRef = static_cast(constRaw); + desc = constRef->GetDesc(); + } + break; + } + case jbc::kOpInvokeStatic: + case jbc::kOpInvokeSpecial: { + const JBCConst *constRaw = constPool.GetConstByIdx(methodIdx); + if (constRaw != nullptr && (constRaw->GetTag() == kConstMethodRef || + constRaw->GetTag() == kConstInterfaceMethodRef)) { + const JBCConstRef *constRef = static_cast(constRaw); + desc = constRef->GetDesc(); + } + break; + } + case jbc::kOpInvokeDynamic: { + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(methodIdx, kConstInvokeDynamic); + if (constRaw != nullptr) { + const JBCConstInvokeDynamic *constDynamic = static_cast(constRaw); + desc = constDynamic->GetConstNameAndType()->GetDesc(); + } + break; + } + default: + CHECK_FATAL(false, "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + break; + } + return desc; +} + +// ---------- JBCOpJsrRet ---------- +JBCOpJsr::JBCOpJsr(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), target(0), slotIdx(0), jsrID(0) {} + +bool JBCOpJsr::ParseFileImpl(BasicIORead &io) { + if (JBCOp::CheckNotWide(io) == false) { + return false; + } + bool success = false; + int64 pc = io.GetPos() - 1; + switch (op) { + case jbc::kOpJsr: + target = GetTargetbyInt64(pc + io.ReadInt16(success)); + break; + case jbc::kOpJsrW: + target = GetTargetbyInt64(pc + io.ReadInt32(success)); + break; + default: + ASSERT(false, "Unexpected opcode %s for JSR", GetOpcodeName().c_str()); + return false; + } + return success; +} + +JBCPrimType JBCOpJsr::GetOutputTypesToStackImpl() const { + return JBCPrimType::kTypeAddress; +} + +std::string JBCOpJsr::DumpImpl(const JBCConstPool &constPool) const { + (void) constPool; + std::stringstream ss; + ss << GetOpcodeName() << " " << target; + return ss.str(); +} + +// ---------- JBCOpRet ---------- +JBCOpRet::JBCOpRet(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), index(0) {} + +bool JBCOpRet::ParseFileImpl(BasicIORead &io) { + bool success = false; + if (wide) { + index = io.ReadUInt16(success); + } else { + index = io.ReadUInt8(success); + } + return success; +} + +std::string JBCOpRet::DumpImpl(const JBCConstPool &constPool) const { + (void) constPool; + std::stringstream ss; + ss << GetOpcodeName() << " " << index; + return ss.str(); +} + +// ---------- JBCOpNew ---------- +std::map> JBCOpNew::mapOpInputTypes = JBCOpNew::InitMapOpInputTypes(); + +JBCOpNew::JBCOpNew(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), refTypeIdx(0), primType(kPrimNone) {} + +bool JBCOpNew::ParseFileImpl(BasicIORead &io) { + bool success = false; + switch (op) { + case jbc::kOpNew: + case jbc::kOpANewArray: + refTypeIdx = io.ReadUInt16(success); + break; + case jbc::kOpNewArray: + primType = io.ReadUInt8(success); + break; + default: + ASSERT(false, "Unexpected opcode %s for New", GetOpcodeName().c_str()); + return false; + } + return success; +} + +const std::vector &JBCOpNew::GetInputTypesFromStackImpl() const { + auto it = mapOpInputTypes.find(op); + CHECK_FATAL(it != mapOpInputTypes.end(), "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +JBCPrimType JBCOpNew::GetOutputTypesToStackImpl() const { + return kTypeRef; +} + +std::string JBCOpNew::DumpImpl(const JBCConstPool &constPool) const { + switch (op) { + case jbc::kOpNew: + case jbc::kOpANewArray: { + std::stringstream ss; + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(refTypeIdx, JBCConstTag::kConstClass); + CHECK_NULL_FATAL(constRaw); + const JBCConstClass *constClass = static_cast(constRaw); + ss << GetOpcodeName() << " " << constClass->GetClassNameOrin(); + return ss.str(); + } + case jbc::kOpNewArray: { + std::stringstream ss; + ss << GetOpcodeName() << " " << GetPrimTypeName(); + return ss.str(); + } + default: + ASSERT(false, "Unexpected opcode %s for New", GetOpcodeName().c_str()); + return ""; + } +} + +GStrIdx JBCOpNew::GetTypeNameIdx(const JBCConstPool &constPool) const { + switch (op) { + case jbc::kOpNew: + case jbc::kOpANewArray: { + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(refTypeIdx, JBCConstTag::kConstClass); + CHECK_NULL_FATAL(constRaw); + const JBCConstClass *constClass = static_cast(constRaw); + return constClass->GetClassNameIdxMpl(); + } + case jbc::kOpNewArray: + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(GetPrimTypeName()); + default: + ASSERT(false, "Unexpected opcode %s for New", GetOpcodeName().c_str()); + return GStrIdx(0); + } +} + +std::string JBCOpNew::GetTypeName(const JBCConstPool &constPool) const { + switch (op) { + case jbc::kOpNew: + case jbc::kOpANewArray: { + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(refTypeIdx, JBCConstTag::kConstClass); + CHECK_NULL_FATAL(constRaw); + const JBCConstClass *constClass = static_cast(constRaw); + return constClass->GetClassNameMpl(); + } + case jbc::kOpNewArray: + return GetPrimTypeName(); + default: + ASSERT(false, "Unexpected opcode %s for New", GetOpcodeName().c_str()); + return ""; + } +} + +const FEIRType *JBCOpNew::GetFEIRType(const JBCConstPool &constPool) const { + switch (op) { + case jbc::kOpNew: + case jbc::kOpANewArray: { + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(refTypeIdx, JBCConstTag::kConstClass); + CHECK_NULL_FATAL(constRaw); + const JBCConstClass *constClass = static_cast(constRaw); + return constClass->GetFEIRType(); + } + case jbc::kOpNewArray: + return GetPrimFEIRType().get(); + default: + CHECK_FATAL(false, "Unexpected opcode %s for New", GetOpcodeName().c_str()); + return FETypeManager::kPrimFEIRTypeUnknown.get(); + } +} + +std::string JBCOpNew::GetPrimTypeName() const { + switch (primType) { + case kPrimInt: + return "I"; + case kPrimBoolean: + return "Z"; + case kPrimByte: + return "B"; + case kPrimShort: + return "S"; + case kPrimChar: + return "C"; + case kPrimLong: + return "J"; + case kPrimFloat: + return "F"; + case kPrimDouble: + return "D"; + default: + return "undefined"; + } +} + +const UniqueFEIRType &JBCOpNew::GetPrimFEIRType() const { + switch (primType) { + case kPrimInt: + return FETypeManager::kPrimFEIRTypeI32; + case kPrimBoolean: + return FETypeManager::kPrimFEIRTypeU1; + case kPrimByte: + return FETypeManager::kPrimFEIRTypeI8; + case kPrimShort: + return FETypeManager::kPrimFEIRTypeI16; + case kPrimChar: + return FETypeManager::kPrimFEIRTypeU16; + case kPrimLong: + return FETypeManager::kPrimFEIRTypeI64; + case kPrimFloat: + return FETypeManager::kPrimFEIRTypeF32; + case kPrimDouble: + return FETypeManager::kPrimFEIRTypeF64; + default: + return FETypeManager::kPrimFEIRTypeUnknown; + } +} + +std::map> JBCOpNew::InitMapOpInputTypes() { + std::map> ans; + ans[kOpNew] = emptyPrimTypes; + ans[kOpNewArray] = std::vector({ kTypeInt }); + ans[kOpANewArray] = std::vector({ kTypeInt }); + return ans; +} + +// ---------- JBCOpMultiANewArray ---------- +JBCOpMultiANewArray::JBCOpMultiANewArray(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), refTypeIdx(0), dim(0) {} + +bool JBCOpMultiANewArray::ParseFileImpl(BasicIORead &io) { + bool success = false; + refTypeIdx = io.ReadUInt16(success); + dim = io.ReadUInt8(success); + return true; +} + +const std::vector &JBCOpMultiANewArray::GetInputTypesFromStackImpl() const { + CHECK_FATAL(false, "Restricted Calls"); + return emptyPrimTypes; +} + +std::vector JBCOpMultiANewArray::GetInputTypesFromStackImpl(const JBCConstPool &constPool) const { + (void) constPool; + std::vector ans; + for (uint8 i = 0; i < dim; i++) { + ans.push_back(kTypeInt); + } + return ans; +} + +JBCPrimType JBCOpMultiANewArray::GetOutputTypesToStackImpl() const { + return kTypeRef; +} + +std::string JBCOpMultiANewArray::DumpImpl(const JBCConstPool &constPool) const { + std::stringstream ss; + ss << GetOpcodeName() << " "; + const JBCConst *constRaw = constPool.GetConstByIdx(refTypeIdx); + CHECK_NULL_FATAL(constRaw); + if (constRaw->GetTag() == kConstClass) { + const JBCConstClass *constClass = static_cast(constRaw); + ss << constClass->GetClassNameOrin() << " dim=" << uint32{ dim }; + } else { + ss << "invalid const tag"; + } + return ss.str(); +} + +// ---------- JBCOpTypeCheck ---------- +std::map> JBCOpTypeCheck::mapOpInputTypes = JBCOpTypeCheck::InitMapOpInputTypes(); +std::map JBCOpTypeCheck::mapOpOutputType = JBCOpTypeCheck::InitMapOpOutputType(); + +JBCOpTypeCheck::JBCOpTypeCheck(MapleAllocator &allocator, JBCOpcode opIn, JBCOpcodeKind kindIn, bool wideIn) + : JBCOp(allocator, opIn, kindIn, wideIn), typeIdx(0) {} + +bool JBCOpTypeCheck::ParseFileImpl(BasicIORead &io) { + if (JBCOp::CheckNotWide(io) == false) { + return false; + } + bool success = false; + switch (op) { + case jbc::kOpCheckCast: + case jbc::kOpInstanceOf: + typeIdx = io.ReadUInt16(success); + break; + default: + ASSERT(false, "Unexpected opcode %s for TypeCheck", GetOpcodeName().c_str()); + return false; + } + return success; +} + +const std::vector &JBCOpTypeCheck::GetInputTypesFromStackImpl() const { + auto it = mapOpInputTypes.find(op); + CHECK_FATAL(it != mapOpInputTypes.end(), "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +JBCPrimType JBCOpTypeCheck::GetOutputTypesToStackImpl() const { + auto it = mapOpOutputType.find(op); + CHECK_FATAL(it != mapOpOutputType.end(), "Unsupported opcode %s", opcodeInfo.GetOpcodeName(op).c_str()); + return it->second; +} + +std::string JBCOpTypeCheck::DumpImpl(const JBCConstPool &constPool) const { + std::stringstream ss; + ss << GetOpcodeName() << " "; + const JBCConst *constRaw = constPool.GetConstByIdxWithTag(typeIdx, kConstClass); + if (constRaw == nullptr) { + ss << "invalid type idx"; + return ss.str(); + } + const JBCConstClass *constClass = static_cast(constRaw); + ss << constClass->GetClassNameOrin(); + return ss.str(); +} + +std::map> JBCOpTypeCheck::InitMapOpInputTypes() { + std::map> ans; + ans[kOpCheckCast] = std::vector({ kTypeRef }); + ans[kOpInstanceOf] = std::vector({ kTypeRef }); + return ans; +} + +std::map JBCOpTypeCheck::InitMapOpOutputType() { + std::map ans; + ans[kOpCheckCast] = kTypeRef; + ans[kOpInstanceOf] = kTypeInt; + return ans; +} +} // namespace jbc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_opcode_helper.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_opcode_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..736ba958ef132e9887decdb6dee6a96c47915e8b --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_opcode_helper.cpp @@ -0,0 +1,191 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_opcode_helper.h" +#include "jbc_class_const.h" +#include "fe_utils_java.h" +#include "fe_type_manager.h" + +namespace maple { +std::map> JBCOpcodeHelper::mapOpInputPrimTypes = + JBCOpcodeHelper::InitMapOpInputPrimTypes(); + +std::map JBCOpcodeHelper::funcPtrMapGetBaseTypeName = + JBCOpcodeHelper::InitFuncPtrMapGetBaseTypeName(); + +JBCOpcodeHelper::JBCOpcodeHelper(const jbc::JBCClassMethod &argMethod) : method(argMethod) {} + +std::vector JBCOpcodeHelper::GetBaseTypeNamesForOP(const jbc::JBCOp &op, bool &success) { + auto it = funcPtrMapGetBaseTypeName.find(op.GetOpcodeKind()); + if (it == funcPtrMapGetBaseTypeName.end()) { + return GetBaseTypeNamesForOPDefault(success); + } else { + return (this->*(it->second))(op, success); + } +} + +std::map JBCOpcodeHelper::InitFuncPtrMapGetBaseTypeName() { + std::map ans; + ans[jbc::kOpKindConst] = &JBCOpcodeHelper::GetBaseTypeNamesForOPConst; + ans[jbc::kOpKindStaticFieldOpr] = &JBCOpcodeHelper::GetBaseTypeNamesForOPFieldOpr; + ans[jbc::kOpKindFieldOpr] = &JBCOpcodeHelper::GetBaseTypeNamesForOPFieldOpr; + ans[jbc::kOpKindInvoke] = &JBCOpcodeHelper::GetBaseTypeNamesForOPInvoke; + ans[jbc::kOpKindNew] = &JBCOpcodeHelper::GetBaseTypeNamesForOPNew; + ans[jbc::kOpKindMultiANewArray] = &JBCOpcodeHelper::GetBaseTypeNamesForOPMultiANewArray; + ans[jbc::kOpKindTypeCheck] = &JBCOpcodeHelper::GetBaseTypeNamesForOPTypeCheck; + return ans; +} + +std::vector JBCOpcodeHelper::GetBaseTypeNamesForOPDefault(bool &success) const { + success = true; + return std::vector(); +} + +std::vector JBCOpcodeHelper::GetBaseTypeNamesForOPConst(const jbc::JBCOp &op, bool &success) { + std::vector ans; + const jbc::JBCOpConst &opConst = static_cast(op); + if (op.GetOpcode() == jbc::kOpLdc || op.GetOpcode() == jbc::kOpLdcW) { + const jbc::JBCConstPool &constPool = method.GetConstPool(); + const jbc::JBCConst *constItem = constPool.GetConstByIdx(opConst.GetIndex()); + if (constItem == nullptr) { + ssLastError.str(""); + ssLastError << "invalid const pool index (" << opConst.GetIndex() << ") in method " + << method.GetClassName() << "." << method.GetName() << ":" << method.GetDescription(); + success = false; + return ans; + } + if (constItem->GetTag() == jbc::kConstClass) { + success = true; + const jbc::JBCConstClass *constClass = static_cast(constItem); + std::string typeName = constClass->GetClassNameOrin(); + std::string baseTypeName = FETypeManager::GetBaseTypeName(typeName, false); + ans.push_back(baseTypeName); + } + } + success = true; + return ans; +} + +std::vector JBCOpcodeHelper::GetBaseTypeNamesForOPFieldOpr(const jbc::JBCOp &op, bool &success) { + std::vector ans; + const jbc::JBCOpFieldOpr &opFieldOpr = static_cast(op); + const jbc::JBCConstPool &constPool = method.GetConstPool(); + const jbc::JBCConst *constItem = constPool.GetConstByIdxWithTag(opFieldOpr.GetFieldIdx(), jbc::kConstFieldRef); + if (constItem == nullptr) { + success = false; + return ans; + } + const jbc::JBCConstRef *constRef = static_cast(constItem); + ans.push_back(FETypeManager::GetBaseTypeName(constRef->GetDesc(), false)); + success = true; + return ans; +} + +std::vector JBCOpcodeHelper::GetBaseTypeNamesForOPInvoke(const jbc::JBCOp &op, bool &success) { + std::vector ans; + const jbc::JBCOpInvoke &opInvoke = static_cast(op); + const jbc::JBCConstPool &constPool = method.GetConstPool(); + const jbc::JBCConst *constItem = constPool.GetConstByIdx(opInvoke.GetMethodIdx()); + if (constItem == nullptr || + (constItem->GetTag() != jbc::kConstMethodRef && constItem->GetTag() != jbc::kConstInterfaceMethodRef)) { + success = false; + return ans; + } + const jbc::JBCConstRef *constRef = static_cast(constItem); + const std::string desc = constRef->GetDesc(); + std::vector typeNames = FEUtilJava::SolveMethodSignature(desc); + for (const std::string &typeName : typeNames) { + ans.push_back(FETypeManager::GetBaseTypeName(typeName, false)); + } + success = true; + return ans; +} + +std::vector JBCOpcodeHelper::GetBaseTypeNamesForOPNew(const jbc::JBCOp &op, bool &success) { + std::vector ans; + const jbc::JBCOpNew &opNew = static_cast(op); + const jbc::JBCConstPool &constPool = method.GetConstPool(); + if (op.GetOpcode() == jbc::kOpNew || op.GetOpcode() == jbc::kOpANewArray) { + const jbc::JBCConst *constItem = constPool.GetConstByIdxWithTag(opNew.GetRefTypeIdx(), jbc::kConstClass); + if (constItem == nullptr) { + success = false; + return ans; + } + const jbc::JBCConstClass *constClass = static_cast(constItem); + ans.push_back(FETypeManager::GetBaseTypeName(constClass->GetClassNameOrin(), false)); + } + success = true; + return ans; +} + +std::vector JBCOpcodeHelper::GetBaseTypeNamesForOPMultiANewArray(const jbc::JBCOp &op, bool &success) { + std::vector ans; + const jbc::JBCOpMultiANewArray &opArray = static_cast(op); + const jbc::JBCConstPool &constPool = method.GetConstPool(); + const jbc::JBCConst *constItem = constPool.GetConstByIdxWithTag(opArray.GetRefTypeIdx(), jbc::kConstClass); + if (constItem == nullptr) { + success = false; + return ans; + } + const jbc::JBCConstClass *constClass = static_cast(constItem); + ans.push_back(FETypeManager::GetBaseTypeName(constClass->GetClassNameOrin(), false)); + success = true; + return ans; +} + +std::vector JBCOpcodeHelper::GetBaseTypeNamesForOPTypeCheck(const jbc::JBCOp &op, bool &success) { + std::vector ans; + const jbc::JBCOpTypeCheck &opTypeCheck = static_cast(op); + const jbc::JBCConstPool &constPool = method.GetConstPool(); + const jbc::JBCConst *constItem = constPool.GetConstByIdxWithTag(opTypeCheck.GetTypeIdx(), jbc::kConstClass); + if (constItem == nullptr) { + success = false; + return ans; + } + const jbc::JBCConstClass *constClass = static_cast(constItem); + ans.push_back(FETypeManager::GetBaseTypeName(constClass->GetClassNameOrin(), false)); + success = true; + return ans; +} + +std::map> JBCOpcodeHelper::InitMapOpInputPrimTypes() { + std::map> ans; + InitMapOpInputPrimTypesForConst(ans); + + return ans; +} + +void JBCOpcodeHelper::InitMapOpInputPrimTypesForConst(std::map> &ans) { + ans[jbc::kOpAConstNull] = std::vector(); + ans[jbc::kOpIConstM1] = std::vector(); + ans[jbc::kOpIConst0] = std::vector(); + ans[jbc::kOpIConst1] = std::vector(); + ans[jbc::kOpIConst2] = std::vector(); + ans[jbc::kOpIConst3] = std::vector(); + ans[jbc::kOpIConst4] = std::vector(); + ans[jbc::kOpIConst5] = std::vector(); + ans[jbc::kOpLConst0] = std::vector(); + ans[jbc::kOpLConst1] = std::vector(); + ans[jbc::kOpFConst0] = std::vector(); + ans[jbc::kOpFConst1] = std::vector(); + ans[jbc::kOpFConst2] = std::vector(); + ans[jbc::kOpDConst0] = std::vector(); + ans[jbc::kOpDConst1] = std::vector(); + ans[jbc::kOpBiPush] = std::vector(); + ans[jbc::kOpSiPush] = std::vector(); + ans[jbc::kOpLdc] = std::vector(); + ans[jbc::kOpLdcW] = std::vector(); + ans[jbc::kOpLdc2W] = std::vector(); +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_stack2fe_helper.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_stack2fe_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..916d2cbe0bfccd369c115ef4397fa2fc351e3afb --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_stack2fe_helper.cpp @@ -0,0 +1,707 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_stack2fe_helper.h" +#include +#include "mpl_logging.h" + +namespace maple { +JBCStack2FEHelper::JBCStack2FEHelper(bool argUseNestExpr) + : useNestExpr(argUseNestExpr) {} + +// Function Name: GetRegNumForSlot +// Description: adjust slotNum using following layout +// set baseLocal as nStacks + nSwaps +// set allLocals as nLocals + nArgs +// 0 ~ baseLocal - 1: stack reg (local) +// baseLocal ~ baseLocal + nLocals - 1: local var +// (mapping to slotNum: nArgs ~ allLocals - 1) +// baseLocal + nLocals ~ baseLocal + allLocals - 1: args var +// (mapping to slotNum: 0 ~ nArgs - 1) + +uint32 JBCStack2FEHelper::GetRegNumForSlot(uint32 slotNum) const { + uint32 baseLocal = nStacks + nSwaps; + uint32 allLocals = nLocals + nArgs; + CHECK_FATAL(slotNum < allLocals, "GetRegNumForSlot: out of range"); + if (slotNum >= nArgs) { + // local var + // nArgs ~ nArgs + nLocals - 1 --> baseLocal ~ baseLocal + nLocals - 1 + return baseLocal - nArgs + slotNum; + } else { + // arg var + // 0 ~ nArgs - 1 --> baseLocal + nLocals ~ baseLocal + nLocals + nArgs - 1 + return baseLocal + nLocals + slotNum; + } +} + +uint32 JBCStack2FEHelper::GetRegNumForStack() const { + std::set setAvaliable = regNumForStacks; + for (const StackItem &item : stack) { + const UniqueFEIRVar &var = item.first; + if (IsItemDummy(item)) { + continue; + } + ASSERT(var->GetKind() == FEIRVarKind::kFEIRVarReg, "unsupported var kind"); + FEIRVarReg *ptrVarReg = static_cast(var.get()); + (void)setAvaliable.erase(ptrVarReg->GetRegNum()); + if (IsItemWide(item)) { + (void)setAvaliable.erase(ptrVarReg->GetRegNum() + 1); + } + } + CHECK_FATAL(setAvaliable.size() > 0, "no avaliable reg number to use"); + return *(setAvaliable.begin()); +} + +bool JBCStack2FEHelper::PushItem(UniqueFEIRVar var, PrimType pty) { + if (stack.size() >= nStacks) { + ERR(kLncErr, "stack out of range"); + return false; + } + stack.push_back(MakeStackItem(std::move(var), pty)); + if (IsPrimTypeWide(pty)) { + stack.push_back(MakeStackItem(UniqueFEIRVar(nullptr), pty)); + } + return true; +} + +UniqueFEIRVar JBCStack2FEHelper::PushItem(PrimType pty) { + uint32 regNum = GetRegNumForStack(); + UniqueFEIRVar var = FEIRBuilder::CreateVarReg(regNum, pty); + if (!PushItem(var->Clone(), pty)) { + return UniqueFEIRVar(nullptr); + } else { + return var; + } +} + +UniqueFEIRStmt JBCStack2FEHelper::PushItem(UniqueFEIRExpr expr, PrimType pty, bool hasException) { + uint32 regNum = GetRegNumForStack(); + UniqueFEIRVar varDst = FEIRBuilder::CreateVarReg(regNum, pty); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(varDst->Clone(), std::move(expr), hasException); + if (PushItem(std::move(varDst), pty)) { + return UniqueFEIRStmt(nullptr); + } else { + return stmt; + } +} + +UniqueFEIRStmt JBCStack2FEHelper::PushItem(UniqueFEIRExpr expr, UniqueFEIRType type, bool hasException) { + uint32 regNum = GetRegNumForStack(); + UniqueFEIRVar varDst = FEIRBuilder::CreateVarReg(regNum, std::move(type)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(varDst->Clone(), std::move(expr), hasException); + PrimType pty = PTY_unknown; + if (!type->IsScalar()) { + pty = PTY_ref; + } else { + pty = type->GetPrimType(); + } + if (!PushItem(std::move(varDst), pty)) { + return UniqueFEIRStmt(nullptr); + } else { + return stmt; + } +} + +UniqueFEIRVar JBCStack2FEHelper::PopItem(PrimType pty) { + size_t size = stack.size(); + if (IsPrimTypeWide(pty)) { + if (size < 2) { // pop wide item operation need at least 2 items in stack + ERR(kLncErr, "stack items are not enough for pop operation"); + return UniqueFEIRVar(); + } + StackItem &item1 = stack[size - 1]; // the 1st item from top + StackItem &item2 = stack[size - 2]; // the 2nd item from top + if (IsItemDummy(item1) && IsItemWide(item2) && item2.second == pty) { + UniqueFEIRVar ans = std::move(item2.first); + stack.pop_back(); + stack.pop_back(); + return ans; + } else { + ERR(kLncErr, "invalid stack items for pop wide item operation"); + return UniqueFEIRVar(nullptr); + } + } else { + if (size < 1) { // pop normal item operation need at least 1 item in stack + ERR(kLncErr, "stack items are not enough for pop operation"); + return UniqueFEIRVar(nullptr); + } + StackItem &item = stack[size - 1]; // the 1st item from top + if (IsItemNormal(item) && item.second == pty) { + UniqueFEIRVar ans = std::move(item.first); + stack.pop_back(); + return ans; + } else { + ERR(kLncErr, "invalid stack items for pop item operation"); + return UniqueFEIRVar(nullptr); + } + } +} + +UniqueFEIRVar JBCStack2FEHelper::PopItem(UniqueFEIRType type) { + UniqueFEIRVar var = PopItem(type->GetPrimType()); + var->SetType(std::move(type)); + return var; +} + +UniqueFEIRVar JBCStack2FEHelper::PopItem(bool isWide, PrimType &pty) { + size_t size = stack.size(); + pty = PTY_unknown; + if (isWide) { + if (size < 2) { // pop wide item operation need at least 2 items in stack + ERR(kLncErr, "stack items are not enough for pop operation"); + return UniqueFEIRVar(nullptr); + } + StackItem &item1 = stack[size - 1]; // the 1st item from top + StackItem &item2 = stack[size - 2]; // the 2nd item from top + if (IsItemDummy(item1) && IsItemWide(item2)) { + UniqueFEIRVar ans = std::move(item2.first); + pty = item2.second; + stack.pop_back(); + stack.pop_back(); + return ans; + } else { + ERR(kLncErr, "invalid stack items for pop wide item operation"); + return UniqueFEIRVar(nullptr); + } + } else { + if (size < 1) { // pop normal item operation need at least 1 item in stack + ERR(kLncErr, "stack items are not enough for pop operation"); + return UniqueFEIRVar(nullptr); + } + StackItem &item = stack[size - 1]; // the 1st item from top + if (IsItemNormal(item)) { + UniqueFEIRVar ans = std::move(item.first); + pty = item.second; + stack.pop_back(); + return ans; + } else { + ERR(kLncErr, "invalid stack items for pop wide item operation"); + return UniqueFEIRVar(nullptr); + } + } +} + +UniqueFEIRVar JBCStack2FEHelper::PopItemAuto(PrimType &pty) { + size_t size = stack.size(); + if (size < 1) { // pop item operation need at least 1 item in stack + ERR(kLncErr, "stack items are not enough for pop operation"); + return UniqueFEIRVar(nullptr); + } + pty = PTY_unknown; + StackItem &item1 = stack[size - 1]; // the 1st item from top + if (IsItemDummy(item1)) { + if (size < 2) { // pop wide item operation need at least 2 items in stack + ERR(kLncErr, "stack items are not enough for pop operation"); + return UniqueFEIRVar(nullptr); + } + StackItem &item2 = stack[size - 2]; // the 2nd item from top + if (IsItemWide(item2)) { + UniqueFEIRVar ans = std::move(item2.first); + pty = item2.second; + stack.pop_back(); + stack.pop_back(); + return ans; + } else { + ERR(kLncErr, "invalid stack items for pop wide item operation"); + return UniqueFEIRVar(nullptr); + } + } else if (IsItemNormal(item1)) { + UniqueFEIRVar ans = std::move(item1.first); + pty = item1.second; + stack.pop_back(); + return ans; + } else { + ERR(kLncErr, "invalid stack items for pop wide item operation"); + return UniqueFEIRVar(nullptr); + } +} + +bool JBCStack2FEHelper::Swap() { + size_t size = stack.size(); + if (size < 2) { // swap operation need at least 2 items in stack + ERR(kLncErr, "stack is not enough for swap operation"); + return false; + } + StackItem &item1 = stack[size - 1]; // the 1st item from top + StackItem &item2 = stack[size - 2]; // the 2nd item from top + if (IsItemNormal(item1) && IsItemNormal(item2)) { + UniqueFEIRVar var1 = std::move(item1.first); + PrimType pty1 = item1.second; + UniqueFEIRVar var2 = std::move(item2.first); + PrimType pty2 = item2.second; + stack.pop_back(); + stack.pop_back(); + stack.push_back(MakeStackItem(std::move(var1), pty1)); + stack.push_back(MakeStackItem(std::move(var2), pty2)); + return true; + } else { + ERR(kLncErr, "invalid stack items for swap operation"); + return false; + } +} + +bool JBCStack2FEHelper::Pop(jbc::JBCOpcode opcode) { + switch (opcode) { + case jbc::kOpPop: + return Pop(); + case jbc::kOpPop2: + return Pop2(); + default: + ERR(kLncErr, "unsupported op: %s", jbc::JBCOp::GetOpcodeInfo().GetOpcodeName(opcode).c_str()); + return false; + } +} + +bool JBCStack2FEHelper::Dup(jbc::JBCOpcode opcode) { + switch (opcode) { + case jbc::kOpDup: + return Dup(); + case jbc::kOpDupX1: + return DupX1(); + case jbc::kOpDupX2: + return DupX2(); + case jbc::kOpDup2: + return Dup2(); + case jbc::kOpDup2X1: + return Dup2X1(); + case jbc::kOpDup2X2: + return Dup2X2(); + default: + ERR(kLncErr, "unsupported op: %s", jbc::JBCOp::GetOpcodeInfo().GetOpcodeName(opcode).c_str()); + return false; + } +} + +std::list JBCStack2FEHelper::GenerateSwapStmts() { + std::list ans; + PrimType pty = PTY_unknown; + UniqueFEIRVar varStack = PopItemAuto(pty); + uint32 swapRegNum = nStacks; + while (varStack != nullptr) { + UniqueFEIRVar varSwap = std::make_unique(swapRegNum, pty); + UniqueFEIRExpr exprDRead = FEIRBuilder::CreateExprDRead(std::move(varStack)); + UniqueFEIRStmt stmtDAssign = FEIRBuilder::CreateStmtDAssign(std::move(varSwap), std::move(exprDRead)); + ans.push_back(std::move(stmtDAssign)); + if (pty == PTY_i64 || pty == PTY_f64) { + swapRegNum += kRegNumOffWide; + } else { + swapRegNum += kRegNumOff; + } + varStack = PopItemAuto(pty); + } + return ans; +} + +std::list JBCStack2FEHelper::LoadSwapStack(const JBCStackHelper &stackHelper, bool &success) { + std::list ans; + std::vector jbcStackItems = stackHelper.GetStackItems(); + std::vector primStackItems = JBCStackItemTypesToPrimTypes(jbcStackItems); + stack.clear(); + if (!CheckSwapValid(primStackItems)) { + success = false; + return ans; + } + std::vector> swapRegs = GeneralSwapRegNum(primStackItems); + uint32 regNumStack = 0; + for (const std::pair &item : swapRegs) { + PrimType pty = item.second; + UniqueFEIRVar varSwap = std::make_unique(item.first, pty); + UniqueFEIRVar varStack = std::make_unique(regNumStack, pty); + UniqueFEIRExpr exprDRead = FEIRBuilder::CreateExprDRead(std::move(varSwap)); + UniqueFEIRStmt stmtDAssign = FEIRBuilder::CreateStmtDAssign(varStack->Clone(), std::move(exprDRead)); + ans.push_back(std::move(stmtDAssign)); + stack.push_back(MakeStackItem(std::move(varStack), pty)); + if (pty == PTY_i64 || pty == PTY_f64) { + regNumStack += kRegNumOffWide; + stack.push_back(MakeStackItem(UniqueFEIRVar(nullptr), pty)); + } else { + regNumStack += kRegNumOff; + } + } + success = true; + return ans; +} + +PrimType JBCStack2FEHelper::JBCStackItemTypeToPrimType(jbc::JBCPrimType itemType) { + switch (itemType) { + case jbc::JBCPrimType::kTypeInt: + case jbc::JBCPrimType::kTypeByteOrBoolean: + case jbc::JBCPrimType::kTypeShort: + case jbc::JBCPrimType::kTypeChar: + return PTY_i32; + case jbc::JBCPrimType::kTypeLong: + return PTY_i64; + case jbc::JBCPrimType::kTypeFloat: + return PTY_f32; + case jbc::JBCPrimType::kTypeDouble: + return PTY_f64; + case jbc::JBCPrimType::kTypeRef: + return PTY_ref; + case jbc::JBCPrimType::kTypeAddress: + return PTY_a32; + case jbc::JBCPrimType::kTypeDefault: + return PTY_unknown; + default: + ERR(kLncErr, "Should not run here: unsupported type"); + return PTY_unknown; + } +} + +PrimType JBCStack2FEHelper::SimplifyPrimType(PrimType pty) { + switch (pty) { + case PTY_u1: // boolean + case PTY_i8: // byte + case PTY_i16: // short + case PTY_u16: // char + return PTY_i32; + default: + return pty; + } +} + +std::vector JBCStack2FEHelper::JBCStackItemTypesToPrimTypes(const std::vector itemTypes) { + std::vector primTypes; + for (jbc::JBCPrimType itemType : itemTypes) { + primTypes.push_back(JBCStackItemTypeToPrimType(itemType)); + } + return primTypes; +} + +bool JBCStack2FEHelper::CheckSwapValid(const std::vector items) const { + uint32 size = 0; + for (PrimType pty : items) { + if (pty == PTY_i64 || pty == PTY_f64) { + size += kRegNumOffWide; + } else { + size += kRegNumOff; + } + } + if (size > nSwaps) { + ERR(kLncErr, "swap stack out of range"); + return false; + } else { + return true; + } +} + +std::vector> JBCStack2FEHelper::GeneralSwapRegNum(const std::vector items) { + std::vector> ans; + size_t size = items.size(); + uint32 regNum = nStacks; + for (size_t i = 1; i <= size; i++) { + PrimType pty = items[size - i]; + ans.push_back(std::make_pair(regNum, pty)); + if (pty == PTY_i64 || pty == PTY_f64) { + regNum += 2; // wide type uses 2 stack items + } else { + regNum += 1; // normal type uses 1 stack items + } + } + std::reverse(ans.begin(), ans.end()); + return ans; +} + +std::string JBCStack2FEHelper::DumpStackInJavaFormat() const { + std::string ans; + for (const StackItem &item : stack) { + switch (item.second) { + case PTY_i32: + ans.push_back('I'); + break; + case PTY_i64: + ans.push_back('J'); + break; + case PTY_f32: + ans.push_back('F'); + break; + case PTY_f64: + ans.push_back('D'); + break; + case PTY_ref: + ans.push_back('R'); + break; + default: + return std::string("unsupport type") + GetPrimTypeName(item.second); + } + } + return ans; +} + +std::string JBCStack2FEHelper::DumpStackInInternalFormat() const { + std::string ans; + for (const StackItem &item : stack) { + if (IsItemNormal(item)) { + ans.push_back('N'); + } else if (IsItemWide(item)) { + ans.push_back('W'); + } else if (IsItemDummy(item)) { + ans.push_back('D'); + } else { + return "unsupport item"; + } + } + return ans; +} + +bool JBCStack2FEHelper::Pop() { + size_t size = stack.size(); + if (size < 1) { + ERR(kLncErr, "stack is not enough for pop operation"); + return false; + } + StackItem &item = stack[size - 1]; // the 1st item from top + if (IsItemNormal(item)) { + stack.pop_back(); + return true; + } else { + ERR(kLncErr, "invalid stack top item for pop operation"); + return false; + } +} + +bool JBCStack2FEHelper::Pop2() { + size_t size = stack.size(); + if (size < 2) { // 2 : The minimum size of the stack + ERR(kLncErr, "stack is not enough for pop2 operation"); + return false; + } + StackItem &item1 = stack[size - 1]; // the 1st item from top + StackItem &item2 = stack[size - 2]; // the 2nd item from top + if ((IsItemDummy(item1) && IsItemWide(item2)) || (IsItemNormal(item1) && IsItemNormal(item2))) { + stack.pop_back(); + stack.pop_back(); + return true; + } else { + ERR(kLncErr, "invalid stack top item for pop2 operation"); + return false; + } +} + +// notation for value used in Dup +// value(N): normal value (i32, f32) +// value(W): wide value (i64, f64) +// value(D): dummy value of wide value +bool JBCStack2FEHelper::Dup() { + size_t size = stack.size(); + if (size < 1) { // dup operation need at least 1 item in stack + ERR(kLncErr, "stack is not enough for dup operation"); + return false; + } + StackItem &item = stack[size - 1]; // the 1st item from top + if (IsItemNormal(item)) { + // before: ..., value(N) -> + // after: ..., value(N), value(N) -> + UniqueFEIRVar var = item.first->Clone(); + PrimType pty = item.second; + stack.push_back(MakeStackItem(std::move(var), pty)); + return true; + } else { + ERR(kLncErr, "invalid stack items for dup operation"); + return false; + } +} + +bool JBCStack2FEHelper::DupX1() { + size_t size = stack.size(); + if (size < 2) { // dup_x1 operation need at least 2 item in stack + ERR(kLncErr, "stack is not enough for dup_x1 operation"); + return false; + } + StackItem &item1 = stack[size - 1]; // the 1st item from top + StackItem &item2 = stack[size - 2]; // the 2nd item from top + if (IsItemNormal(item1) && IsItemNormal(item2)) { + // before: ..., value2(N), value1(N) -> + // after: ..., value1(N), value2(N), value1(N) -> + UniqueFEIRVar var1 = item1.first->Clone(); + PrimType pty1 = item1.second; + std::vector::iterator it = stack.end() - 2; // the 2nd item position from top + CHECK_FATAL(stack.insert(it, MakeStackItem(std::move(var1), pty1)) != stack.end(), "stack insert failed"); + return true; + } else { + ERR(kLncErr, "invalid stack items for dup_x1 operation"); + return false; + } +} + +bool JBCStack2FEHelper::DupX2() { + size_t size = stack.size(); + if (size < 3) { // dup_x1 operation need at least 3 item in stack + ERR(kLncErr, "stack is not enough for dup_x2 operation"); + return false; + } + StackItem &item1 = stack[size - 1]; // the 1st item from top + StackItem &item2 = stack[size - 2]; // the 2nd item from top + StackItem &item3 = stack[size - 3]; // the 3rd item from top + // situation 1 + // before: ..., value3(N), value2(N), value1(N) -> + // after: ..., value1(N), value3(N), value2(N), value1(N) -> + bool isSituation1 = (IsItemNormal(item1) && IsItemNormal(item2) && IsItemNormal(item3)); + // situation 2 + // before: ..., value3(W), value2(D), value1(N) -> + // after: ..., value1(N), value3(W), value2(D), value1(N) -> + bool isSituation2 = (IsItemNormal(item1) && IsItemDummy(item2) && IsItemWide(item3)); + if (!(isSituation1 || isSituation2)) { + ERR(kLncErr, "invalid stack items for dup_x2 operation"); + return false; + } + // situation1 situation2 + // value1 N N + UniqueFEIRVar var1 = item1.first->Clone(); + PrimType pty1 = item1.second; + // insert copy of value1 before the 3rd item from top + std::vector::iterator it = stack.end() - 3; + CHECK_FATAL(stack.insert(it, MakeStackItem(std::move(var1), pty1)) != stack.end(), "stack insert failed"); + return true; +} + +bool JBCStack2FEHelper::Dup2() { + size_t size = stack.size(); + if (size < 2) { // dup2 operation need at least 2 item in stack + ERR(kLncErr, "stack is not enough for dup2 operation"); + return false; + } + StackItem &item1 = stack[size - 1]; // the 1st item from top + StackItem &item2 = stack[size - 2]; // the 2nd item from top + // situation 1 + // before: ..., value2(N), value1(N) -> + // after: ..., value2(N), value1(N), value2(N), value1(N) -> + bool isSituation1 = (IsItemNormal(item1) && IsItemNormal(item2)); + // situation 2 + // before: ..., value2(W), value1(D) -> + // after: ..., value2(W), value1(D), value2(W), value1(D) -> + bool isSituation2 = (IsItemDummy(item1) && IsItemWide(item2)); + if (!(isSituation1 || isSituation2)) { + ERR(kLncErr, "invalid stack items for dup2 operation"); + return false; + } + // situation1 situation2 + // value1 N D + // value2 N W + UniqueFEIRVar var1; + UniqueFEIRVar var2; + PrimType pty1 = item1.second; + PrimType pty2 = item2.second; + if (isSituation1) { + var1 = item1.first->Clone(); + var2 = item2.first->Clone(); + } else { + var1 = UniqueFEIRVar(nullptr); + var2 = item2.first->Clone(); + } + // insert copy of value2 before the 2nd item from top + std::vector::iterator it = stack.end() - 2; + CHECK_FATAL(stack.insert(it, MakeStackItem(std::move(var2), pty2)) != stack.end(), "stack insert failed"); + // insert copy of value1 before the 2nd item from top + it = stack.end() - 2; + CHECK_FATAL(stack.insert(it, MakeStackItem(std::move(var1), pty1)) != stack.end(), "stack insert failed"); + return true; +} + +bool JBCStack2FEHelper::Dup2X1() { + size_t size = stack.size(); + if (size < 3) { // dup2_x1 operation need at least 3 item in stack + ERR(kLncErr, "stack is not enough for dup2_x1 operation"); + return false; + } + StackItem &item1 = stack[size - 1]; // the 1st item from top + StackItem &item2 = stack[size - 2]; // the 2nd item from top + StackItem &item3 = stack[size - 3]; // the 3rd item from top + // situation 1 + // before: ..., value3(N), value2(N), value1(N) -> + // after: ..., value2(N), value1(N), value3(N), value2(N), value1(N) -> + bool isSituation1 = (IsItemNormal(item1) && IsItemNormal(item2) && IsItemNormal(item3)); + // situation 2 + // before: ..., value3(N), value2(W), value1(D) -> + // after: ..., value2(W), value1(D), value3(N), value2(W), value1(D) -> + bool isSituation2 = (IsItemDummy(item1) && IsItemWide(item2) && IsItemNormal(item3)); + if (!(isSituation1 || isSituation2)) { + ERR(kLncErr, "invalid stack items for dup2_x1 operation"); + return false; + } + // situation1 situation2 + // value1 N D + // value2 N W + UniqueFEIRVar var1; + UniqueFEIRVar var2; + PrimType pty1 = item1.second; + PrimType pty2 = item2.second; + if (isSituation1) { + var1 = item1.first->Clone(); + var2 = item2.first->Clone(); + } else { + var1 = UniqueFEIRVar(nullptr); + var2 = item2.first->Clone(); + } + // insert copy of value2 before the 3rd item from top + std::vector::iterator it = stack.end() - 3; + CHECK_FATAL(stack.insert(it, MakeStackItem(std::move(var2), pty2)) != stack.end(), "stack insert failed"); + // insert copy of value1 before the 3rd item from top + it = stack.end() - 3; + CHECK_FATAL(stack.insert(it, MakeStackItem(std::move(var1), pty1)) != stack.end(), "stack insert failed"); + return true; +} + +bool JBCStack2FEHelper::Dup2X2() { + size_t size = stack.size(); + if (size < 4) { // dup2_x2 operation need at least 4 item in stack + ERR(kLncErr, "stack is not enough for dup2_x2 operation"); + return false; + } + StackItem &item1 = stack[size - 1]; // the 1st item from top + StackItem &item2 = stack[size - 2]; // the 2nd item from top + StackItem &item3 = stack[size - 3]; // the 3rd item from top + StackItem &item4 = stack[size - 4]; // the 4th item from top + // situation 1 + // before: ..., value4(N), value3(N), value2(N), value1(N) -> + // after: ..., value2(N), value1(N), value4(N), value3(N), value2(N), value1(N) -> + bool isSituation1 = (IsItemNormal(item1) && IsItemNormal(item2) && IsItemNormal(item3) && IsItemNormal(item4)); + // situation 2 + // before: ..., value4(N), value3(N), value2(W), value1(D) -> + // after: ..., value2(W), value1(D), value4(N), value3(N), value2(W), value1(D) -> + bool isSituation2 = (IsItemDummy(item1) && IsItemWide(item2) && IsItemNormal(item3) && IsItemNormal(item4)); + // situation 3 + // before: ..., value4(W), value3(D), value2(N), value1(N) -> + // after: ..., value2(N), value1(N), value4(W), value3(D), value2(W), value1(D) -> + bool isSituation3 = (IsItemNormal(item1) && IsItemNormal(item2) && IsItemDummy(item3) && IsItemWide(item4)); + // situation 4 + // before: ..., value4(W), value3(D), value2(W), value1(D) -> + // after: ..., value2(W), value1(D), value4(W), value3(D), value2(W), value1(D) -> + bool isSituation4 = (IsItemDummy(item1) && IsItemWide(item2) && IsItemDummy(item3) && IsItemWide(item4)); + if (!(isSituation1 || isSituation2 || isSituation3 || isSituation4)) { + ERR(kLncErr, "invalid stack items for dup2_x1 operation"); + return false; + } + // situation1 situation2 situation3 situation4 + // value1 N D N D + // value2 N W N W + UniqueFEIRVar var1; + UniqueFEIRVar var2; + PrimType pty1 = item1.second; + PrimType pty2 = item2.second; + if (isSituation1 || isSituation3) { + var1 = item1.first->Clone(); + var2 = item2.first->Clone(); + } else { + var1 = UniqueFEIRVar(nullptr); + var2 = item2.first->Clone(); + } + // insert copy of value2 before the 4th item from top + std::vector::iterator it = stack.end() - 4; + CHECK_FATAL(stack.insert(it, MakeStackItem(std::move(var2), pty2)) != stack.end(), "stack insert failed"); + // insert copy of value1 before the 4th item from top + it = stack.end() - 4; + CHECK_FATAL(stack.insert(it, MakeStackItem(std::move(var1), pty1)) != stack.end(), "stack insert failed"); + return true; +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_stack_helper.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_stack_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..326b089027d22a68cc2a6a22af24aa15e9527853 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_stack_helper.cpp @@ -0,0 +1,426 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_stack_helper.h" + +namespace maple { +namespace { +const uint32 kSize1 = 1; +const uint32 kSize2 = 2; +const uint32 kSize3 = 3; +const uint32 kSize4 = 4; +} // namespace + +void JBCStackHelper::Reset() { + stack.clear(); +} + +bool JBCStackHelper::StackChange(const jbc::JBCOp &op, const jbc::JBCConstPool &constPool) { + switch (op.GetOpcodeKind()) { + case jbc::kOpKindConst: { + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(constPool); + if (stackOutType != jbc::JBCPrimType::kTypeDefault) { + PushItem(stackOutType); + } else { + return false; + } + return true; + } + case jbc::kOpKindPop: + return Pop(op.GetOpcode()); + case jbc::kOpKindDup: + return Dup(op.GetOpcode()); + case jbc::kOpKindSwap: + return Swap(); + case jbc::kOpKindFieldOpr: + case jbc::kOpKindStaticFieldOpr: + case jbc::kOpKindInvoke: + case jbc::kOpKindMultiANewArray: { + std::vector stackInTypes = op.GetInputTypesFromStack(constPool); + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(constPool); + bool success = PopItems(stackInTypes); + if (success && stackOutType != jbc::JBCPrimType::kTypeDefault) { + PushItem(stackOutType); + } + return success; + } + default: { + const std::vector &stackInTypes = op.GetInputTypesFromStack(); + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(); + bool success = PopItems(stackInTypes); + if (success && stackOutType != jbc::JBCPrimType::kTypeDefault) { + PushItem(stackOutType); + } + return success; + } + } +} + +void JBCStackHelper::PushItem(jbc::JBCPrimType type) { + stack.push_back(GetGeneralType(type)); + if (type == jbc::JBCPrimType::kTypeLong) { + stack.push_back(jbc::JBCPrimType::kTypeLongDummy); + } else if (type == jbc::JBCPrimType::kTypeDouble) { + stack.push_back(jbc::JBCPrimType::kTypeDoubleDummy); + } else { + // nothing to be done + } +} + +void JBCStackHelper::PushItems(const std::vector &types) { + for (jbc::JBCPrimType type : types) { + PushItem(type); + } +} + +bool JBCStackHelper::PopItem(jbc::JBCPrimType type) { + if (stack.size() == 0) { + return false; + } + type = GetGeneralType(type); + switch (type) { + case jbc::JBCPrimType::kTypeInt: + case jbc::JBCPrimType::kTypeFloat: + case jbc::JBCPrimType::kTypeRef: + case jbc::JBCPrimType::kTypeAddress: + if (stack.size() >= kSize1 && stack.back() == type) { + stack.pop_back(); + return true; + } + break; + case jbc::JBCPrimType::kTypeLong: + if (stack.size() >= kSize2 && stack.back() == jbc::JBCPrimType::kTypeLongDummy) { + stack.pop_back(); + stack.pop_back(); + return true; + } + break; + case jbc::JBCPrimType::kTypeDouble: + if (stack.size() >= kSize2 && stack.back() == jbc::JBCPrimType::kTypeDoubleDummy) { + stack.pop_back(); + stack.pop_back(); + return true; + } + break; + case jbc::JBCPrimType::kTypeDefault: + return true; + default: + CHECK_FATAL(false, "Should not run here: invalid type"); + break; + } + return false; +} + +bool JBCStackHelper::PopItems(const std::vector &types) { + size_t idx = types.size() - 1; + for (size_t i = 0; i < types.size(); i++, idx--) { + if (PopItem(types[idx]) == false) { + return false; + } + } + return true; +} + +bool JBCStackHelper::Pop(jbc::JBCOpcode opcode) { + switch (opcode) { + case jbc::kOpPop: + if (stack.size() >= kSize1 && IsType1(stack.back())) { + stack.pop_back(); + return true; + } + break; + case jbc::kOpPop2: + if (stack.size() >= kSize2 && IsType2Dummy(stack.back())) { + stack.pop_back(); + stack.pop_back(); + return true; + } + break; + default: + CHECK_FATAL(false, "Unsupported opcode: %s", jbc::JBCOp::GetOpcodeInfo().GetOpcodeName(opcode).c_str()); + break; + } + return false; +} + +bool JBCStackHelper::Dup(jbc::JBCOpcode opcode) { + switch (opcode) { + case jbc::kOpDup: + return Dup(); + case jbc::kOpDupX1: + return DupX1(); + case jbc::kOpDupX2: + return DupX2(); + case jbc::kOpDup2: + return Dup2(); + case jbc::kOpDup2X1: + return Dup2X1(); + case jbc::kOpDup2X2: + return Dup2X2(); + default: + CHECK_FATAL(false, "Unsupported opcode: %s", jbc::JBCOp::GetOpcodeInfo().GetOpcodeName(opcode).c_str()); + break; + } + return false; +} + +bool JBCStackHelper::Dup() { + if (stack.size() < kSize1) { + return false; + } + jbc::JBCPrimType t = stack.back(); + if (IsType1(t)) { + stack.push_back(t); + return true; + } + return false; +} + +bool JBCStackHelper::DupX1() { + if (stack.size() < kSize2) { + return false; + } + jbc::JBCPrimType t1 = stack.back(); + stack.pop_back(); + jbc::JBCPrimType t2 = stack.back(); + stack.pop_back(); + bool success = (IsType1(t1) && IsType1(t2)); + if (success) { + stack.push_back(t1); + } + stack.push_back(t2); + stack.push_back(t1); + return success; +} + +bool JBCStackHelper::DupX2() { + if (stack.size() < kSize3) { + return false; + } + jbc::JBCPrimType t1 = stack.back(); + stack.pop_back(); + jbc::JBCPrimType t2 = stack.back(); + stack.pop_back(); + jbc::JBCPrimType t3 = stack.back(); + stack.pop_back(); + bool success = IsType1(t1); + if (success) { + stack.push_back(t1); + } + stack.push_back(t3); + stack.push_back(t2); + stack.push_back(t1); + return success; +} + +bool JBCStackHelper::Dup2() { + if (stack.size() < kSize2) { + return false; + } + jbc::JBCPrimType t1 = stack.back(); + stack.pop_back(); + jbc::JBCPrimType t2 = stack.back(); + stack.pop_back(); + bool success = ((IsType1(t1) && IsType1(t2)) || (IsType2Dummy(t1) && IsType2(t2))); + if (success) { + stack.push_back(t2); + stack.push_back(t1); + } + stack.push_back(t2); + stack.push_back(t1); + return success; +} + +bool JBCStackHelper::Dup2X1() { + if (stack.size() < kSize3) { + return false; + } + jbc::JBCPrimType t1 = stack.back(); + stack.pop_back(); + jbc::JBCPrimType t2 = stack.back(); + stack.pop_back(); + jbc::JBCPrimType t3 = stack.back(); + stack.pop_back(); + bool success = IsType1(t3); + if (success) { + stack.push_back(t2); + stack.push_back(t1); + } + stack.push_back(t3); + stack.push_back(t2); + stack.push_back(t1); + return success; +} + +bool JBCStackHelper::Dup2X2() { + if (stack.size() < kSize4) { + return false; + } + jbc::JBCPrimType t1 = stack.back(); + stack.pop_back(); + jbc::JBCPrimType t2 = stack.back(); + stack.pop_back(); + jbc::JBCPrimType t3 = stack.back(); + stack.pop_back(); + jbc::JBCPrimType t4 = stack.back(); + stack.pop_back(); + stack.push_back(t2); + stack.push_back(t1); + stack.push_back(t4); + stack.push_back(t3); + stack.push_back(t2); + stack.push_back(t1); + return true; +} + +bool JBCStackHelper::Swap() { + if (stack.size() < kSize2) { + return false; + } + jbc::JBCPrimType t1 = stack.back(); + stack.pop_back(); + jbc::JBCPrimType t2 = stack.back(); + stack.pop_back(); + if (IsType1(t1) && IsType1(t2)) { + stack.push_back(t1); + stack.push_back(t2); + return true; + } else { + stack.push_back(t2); + stack.push_back(t1); + return false; + } +} + +bool JBCStackHelper::IsType1(jbc::JBCPrimType type) const { + if (type == jbc::JBCPrimType::kTypeInt || type == jbc::JBCPrimType::kTypeFloat || + type == jbc::JBCPrimType::kTypeByteOrBoolean || type == jbc::JBCPrimType::kTypeChar || + type == jbc::JBCPrimType::kTypeShort || type == jbc::JBCPrimType::kTypeRef || + type == jbc::JBCPrimType::kTypeAddress) { + return true; + } else { + return false; + } +} + +bool JBCStackHelper::IsType2(jbc::JBCPrimType type) const { + if (type == jbc::JBCPrimType::kTypeLong || type == jbc::JBCPrimType::kTypeDouble) { + return true; + } else { + return false; + } +} + +bool JBCStackHelper::IsType2Dummy(jbc::JBCPrimType type) const { + if (type == jbc::JBCPrimType::kTypeLongDummy || type == jbc::JBCPrimType::kTypeDoubleDummy) { + return true; + } else { + return false; + } +} + +jbc::JBCPrimType JBCStackHelper::GetGeneralType(jbc::JBCPrimType type) const { + if (type == jbc::JBCPrimType::kTypeByteOrBoolean || type == jbc::JBCPrimType::kTypeChar || + type == jbc::JBCPrimType::kTypeShort) { + return jbc::JBCPrimType::kTypeInt; + } else { + return type; + } +} + +void JBCStackHelper::CopyFrom(const JBCStackHelper &src) { + for (jbc::JBCPrimType item : src.stack) { + stack.push_back(item); + } +} + +bool JBCStackHelper::EqualTo(const JBCStackHelper &src) { + if (stack.size() != src.stack.size()) { + return false; + } + for (size_t i = 0; i < stack.size(); i++) { + if (stack[i] != src.stack[i]) { + return false; + } + } + return true; +} + +bool JBCStackHelper::Contains(const JBCStackHelper &src) { + // this contains src (this >= src) + if (EqualTo(src)) { + return true; + } + size_t size = stack.size(); + size_t sizeSrc = src.stack.size(); + if (size < sizeSrc) { + return false; + } + for (size_t i = 1; i <= sizeSrc; i++) { + if (stack[size - i] != src.stack[sizeSrc - 1]) { + return false; + } + } + return true; +} + +void JBCStackHelper::Dump() const { + std::cout << "{"; + for (jbc::JBCPrimType item : stack) { + std::cout << GetTypeName(item) << " "; + } + std::cout << "}"; +} + +std::string JBCStackHelper::GetTypeName(jbc::JBCPrimType type) { + switch (type) { + case jbc::JBCPrimType::kTypeDefault: + return "default"; + case jbc::JBCPrimType::kTypeInt: + return "int"; + case jbc::JBCPrimType::kTypeLong: + return "long"; + case jbc::JBCPrimType::kTypeFloat: + return "float"; + case jbc::JBCPrimType::kTypeDouble: + return "double"; + case jbc::JBCPrimType::kTypeByteOrBoolean: + return "byte/boolean"; + case jbc::JBCPrimType::kTypeChar: + return "char"; + case jbc::JBCPrimType::kTypeShort: + return "short"; + case jbc::JBCPrimType::kTypeRef: + return "ref"; + case jbc::JBCPrimType::kTypeAddress: + return "address"; + case jbc::JBCPrimType::kTypeLongDummy: + return "long-dummy"; + case jbc::JBCPrimType::kTypeDoubleDummy: + return "double-dummy"; + default: + return "unknown"; + } +} + +std::vector JBCStackHelper::GetStackItems() const { + std::vector ans; + for (jbc::JBCPrimType item : stack) { + if (item != jbc::JBCPrimType::kTypeLongDummy && item != jbc::JBCPrimType::kTypeDoubleDummy) { + ans.push_back(item); + } + } + return ans; +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_stmt.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_stmt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..708395d4a5620ec2dc3be0a642ad5119b1276252 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_stmt.cpp @@ -0,0 +1,1559 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_stmt.h" +#include +#include +#include +#include "opcodes.h" +#include "feir_stmt.h" +#include "feir_builder.h" +#include "fe_struct_elem_info.h" +#include "jbc_function_context.h" + +namespace maple { +// ---------- JBCStmtKindHelper ---------- +std::string JBCStmtKindHelper::JBCStmtKindName(JBCStmtKind kind) { + switch (kind) { + case kJBCStmtDefault: + return "JBCStmtDefault"; + case kJBCStmtFuncBeing: + return "JBCStmtFuncBegin"; + case kJBCStmtFuncEnd: + return "JBCStmtFuncEnd"; + case kJBCStmtInst: + return "JBCStmtInst"; + case kJBCStmtInstBranch: + return "JBCStmtInstBranch"; + case kJBCStmtPesudoComment: + return "JBCStmtPesudoComment"; + case kJBCStmtPesudoLabel: + return "JBCStmtPesudoLabel"; + case kJBCStmtPesudoLOC: + return "JBCStmtPesudoLOC"; + case kJBCStmtPesudoTry: + return "JBCStmtPesudoTry"; + case kJBCStmtPesudoEndTry: + return "JBCStmtPesudoEndTry"; + case kJBCStmtPesudoCatch: + return "JBCStmtPesudoCatch"; + default: + return "unknown"; + } +} + +// ---------- JBCStmtInst ---------- +std::map JBCStmtInst::funcPtrMapForEmitToFEIR = + JBCStmtInst::InitFuncPtrMapForEmitToFEIR(); +std::map JBCStmtInst::opcodeMapForMathBinop = JBCStmtInst::InitOpcodeMapForMathBinop(); +std::map JBCStmtInst::opcodeMapForMathUnop = JBCStmtInst::InitOpcodeMapForMathUnop(); +std::map JBCStmtInst::opcodeMapForMonitor = JBCStmtInst::InitOpcodeMapForMonitor(); + +JBCStmtInst::JBCStmtInst(const jbc::JBCOp &argOp) + : JBCStmt(kJBCStmtInst), + op(argOp) { + SetFallThru(op.IsFallThru()); +} + +bool JBCStmtInst::IsStmtInstImpl() const { + return true; +} + +void JBCStmtInst::DumpImpl(const std::string &prefix) const { + std::cout << prefix << "JBCStmtInst (id=" << id << ", " << + "kind=" << JBCStmtKindHelper::JBCStmtKindName(JBCkind) << ", " << + "op=" << op.GetOpcodeName() << + ")" << std::endl; +} + +std::string JBCStmtInst::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << op.GetOpcodeName(); + return ss.str(); +} + +std::list JBCStmtInst::EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const { + auto it = funcPtrMapForEmitToFEIR.find(op.GetOpcodeKind()); + if (it != funcPtrMapForEmitToFEIR.end()) { + return (this->*(it->second))(context, success); + } else { + return EmitToFEIRCommon(context, success); + } +} + +std::map JBCStmtInst::InitFuncPtrMapForEmitToFEIR() { + std::map ans; + ans[jbc::JBCOpcodeKind::kOpKindConst] = &JBCStmtInst::EmitToFEIRForOpConst; + ans[jbc::JBCOpcodeKind::kOpKindLoad] = &JBCStmtInst::EmitToFEIRForOpLoad; + ans[jbc::JBCOpcodeKind::kOpKindStore] = &JBCStmtInst::EmitToFEIRForOpStore; + ans[jbc::JBCOpcodeKind::kOpKindArrayLoad] = &JBCStmtInst::EmitToFEIRForOpArrayLoad; + ans[jbc::JBCOpcodeKind::kOpKindArrayStore] = &JBCStmtInst::EmitToFEIRForOpArrayStore; + ans[jbc::JBCOpcodeKind::kOpKindPop] = &JBCStmtInst::EmitToFEIRForOpPop; + ans[jbc::JBCOpcodeKind::kOpKindDup] = &JBCStmtInst::EmitToFEIRForOpDup; + ans[jbc::JBCOpcodeKind::kOpKindSwap] = &JBCStmtInst::EmitToFEIRForOpSwap; + ans[jbc::JBCOpcodeKind::kOpKindMathBinop] = &JBCStmtInst::EmitToFEIRForOpMathBinop; + ans[jbc::JBCOpcodeKind::kOpKindMathUnop] = &JBCStmtInst::EmitToFEIRForOpMathUnop; + ans[jbc::JBCOpcodeKind::kOpKindMathInc] = &JBCStmtInst::EmitToFEIRForOpMathInc; + ans[jbc::JBCOpcodeKind::kOpKindConvert] = &JBCStmtInst::EmitToFEIRForOpConvert; + ans[jbc::JBCOpcodeKind::kOpKindCompare] = &JBCStmtInst::EmitToFEIRForOpMathBinop; + ans[jbc::JBCOpcodeKind::kOpKindReturn] = &JBCStmtInst::EmitToFEIRForOpReturn; + ans[jbc::JBCOpcodeKind::kOpKindStaticFieldOpr] = &JBCStmtInst::EmitToFEIRForOpStaticFieldOpr; + ans[jbc::JBCOpcodeKind::kOpKindFieldOpr] = &JBCStmtInst::EmitToFEIRForOpFieldOpr; + ans[jbc::JBCOpcodeKind::kOpKindInvoke] = &JBCStmtInst::EmitToFEIRForOpInvoke; + ans[jbc::JBCOpcodeKind::kOpKindNew] = &JBCStmtInst::EmitToFEIRForOpNew; + ans[jbc::JBCOpcodeKind::kOpKindMultiANewArray] = &JBCStmtInst::EmitToFEIRForOpMultiANewArray; + ans[jbc::JBCOpcodeKind::kOpKindThrow] = &JBCStmtInst::EmitToFEIRForOpThrow; + ans[jbc::JBCOpcodeKind::kOpKindTypeCheck] = &JBCStmtInst::EmitToFEIRForOpTypeCheck; + ans[jbc::JBCOpcodeKind::kOpKindMonitor] = &JBCStmtInst::EmitToFEIRForOpMonitor; + ans[jbc::JBCOpcodeKind::kOpKindArrayLength] = &JBCStmtInst::EmitToFEIRForOpArrayLength; + return ans; +} + +std::map JBCStmtInst::InitOpcodeMapForMathBinop() { + std::map ans; + ans[jbc::kOpIAdd] = OP_add; + ans[jbc::kOpLAdd] = OP_add; + ans[jbc::kOpFAdd] = OP_add; + ans[jbc::kOpDAdd] = OP_add; + ans[jbc::kOpISub] = OP_sub; + ans[jbc::kOpLSub] = OP_sub; + ans[jbc::kOpFSub] = OP_sub; + ans[jbc::kOpDSub] = OP_sub; + ans[jbc::kOpIMul] = OP_mul; + ans[jbc::kOpLMul] = OP_mul; + ans[jbc::kOpFMul] = OP_mul; + ans[jbc::kOpDMul] = OP_mul; + ans[jbc::kOpIDiv] = OP_div; + ans[jbc::kOpLDiv] = OP_div; + ans[jbc::kOpFDiv] = OP_div; + ans[jbc::kOpDDiv] = OP_div; + ans[jbc::kOpIRem] = OP_rem; + ans[jbc::kOpLRem] = OP_rem; + ans[jbc::kOpFRem] = OP_rem; + ans[jbc::kOpDRem] = OP_rem; + ans[jbc::kOpIShl] = OP_shl; + ans[jbc::kOpLShl] = OP_shl; + ans[jbc::kOpIShr] = OP_ashr; + ans[jbc::kOpLShr] = OP_ashr; + ans[jbc::kOpIUShr] = OP_lshr; + ans[jbc::kOpLUShr] = OP_lshr; + ans[jbc::kOpIAnd] = OP_band; + ans[jbc::kOpLAnd] = OP_band; + ans[jbc::kOpIOr] = OP_bior; + ans[jbc::kOpLOr] = OP_bior; + ans[jbc::kOpIXor] = OP_bxor; + ans[jbc::kOpLXor] = OP_bxor; + ans[jbc::kOpLCmp] = OP_cmp; + ans[jbc::kOpFCmpl] = OP_cmpl; + ans[jbc::kOpFCmpg] = OP_cmpg; + ans[jbc::kOpDCmpl] = OP_cmpl; + ans[jbc::kOpDCmpg] = OP_cmpg; + return ans; +} + +std::map JBCStmtInst::InitOpcodeMapForMathUnop() { + std::map ans; + ans[jbc::kOpINeg] = OP_neg; + ans[jbc::kOpLNeg] = OP_neg; + ans[jbc::kOpFNeg] = OP_neg; + ans[jbc::kOpDNeg] = OP_neg; + return ans; +} + +std::map JBCStmtInst::InitOpcodeMapForMonitor() { + std::map ans; + ans[jbc::kOpMonitorEnter] = OP_syncenter; + ans[jbc::kOpMonitorExit] = OP_syncexit; + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpConst(JBCFunctionContext &context, bool &success) const { + switch (op.GetOpcode()) { + case jbc::kOpAConstNull: + return EmitToFEIRForOpAConstNull(context, success); + case jbc::kOpIConstM1: + case jbc::kOpIConst0: + case jbc::kOpIConst1: + case jbc::kOpIConst2: + case jbc::kOpIConst3: + case jbc::kOpIConst4: + case jbc::kOpIConst5: + return EmitToFEIRForOpIConst(context, success); + case jbc::kOpLConst0: + case jbc::kOpLConst1: + return EmitToFEIRForOpLConst(context, success); + case jbc::kOpFConst0: + case jbc::kOpFConst1: + case jbc::kOpFConst2: + return EmitToFEIRForOpFConst(context, success); + case jbc::kOpDConst0: + case jbc::kOpDConst1: + return EmitToFEIRForOpDConst(context, success); + case jbc::kOpBiPush: + return EmitToFEIRForOpBiPush(context, success); + case jbc::kOpSiPush: + return EmitToFEIRForOpSiPush(context, success); + case jbc::kOpLdc: + case jbc::kOpLdcW: + case jbc::kOpLdc2W: + return EmitToFEIRForOpLdc(context, success); + default: + ERR(kLncErr, "EmitToFEIRForOpConst: unsupport jbc opcode %s", + jbc::JBCOp::GetOpcodeInfo().GetOpcodeName(op.GetOpcode()).c_str()); + success = false; + return std::list(); + } +} + +std::list JBCStmtInst::EmitToFEIRForOpConstCommon(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(constPool); + if (stackOutType != jbc::JBCPrimType::kTypeDefault) { + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackOutType); + UniqueFEIRVar var = stack2feHelper.PushItem(pty); + if (var == nullptr) { + success = false; + } + } else { + success = false; + } + if (!success) { + ERR(kLncErr, "Error when EmitToFEIRForOpConst"); + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpAConstNull(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + UniqueFEIRExpr exprConst = FEIRBuilder::CreateExprConstRefNull(); + UniqueFEIRVar varDst = stack2feHelper.PushItem(PTY_ref); + if (varDst == nullptr) { + success = false; + return ans; + } + UniqueFEIRStmt stmtDAssign = FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(exprConst)); + ans.push_back(std::move(stmtDAssign)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpIConst(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpConst &opConst = static_cast(op); + int32 val = opConst.GetValueInt(); + UniqueFEIRStmt stmtDAssign = GenerateStmtForConstI32(stack2feHelper, val, success); + if (success) { + ans.push_back(std::move(stmtDAssign)); + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpLConst(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpConst &opConst = static_cast(op); + int64 val = opConst.GetValueLong(); + UniqueFEIRStmt stmtDAssign = GenerateStmtForConstI64(stack2feHelper, val, success); + if (success) { + ans.push_back(std::move(stmtDAssign)); + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpFConst(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpConst &opConst = static_cast(op); + float val = opConst.GetValueFloat(); + UniqueFEIRStmt stmtDAssign = GenerateStmtForConstF32(stack2feHelper, val, success); + if (success) { + ans.push_back(std::move(stmtDAssign)); + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpDConst(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpConst &opConst = static_cast(op); + double val = opConst.GetValueDouble(); + UniqueFEIRStmt stmtDAssign = GenerateStmtForConstF64(stack2feHelper, val, success); + if (success) { + ans.push_back(std::move(stmtDAssign)); + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpBiPush(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpConst &opConst = static_cast(op); + int8 val = opConst.GetValueByte(); + UniqueFEIRStmt stmtDAssign = GenerateStmtForConstI32(stack2feHelper, int32{ val }, success); + if (success) { + ans.push_back(std::move(stmtDAssign)); + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpSiPush(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpConst &opConst = static_cast(op); + int16 val = opConst.GetValueShort(); + UniqueFEIRStmt stmtDAssign = GenerateStmtForConstI32(stack2feHelper, int32{ val }, success); + if (success) { + ans.push_back(std::move(stmtDAssign)); + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpLdc(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpConst &opConst = static_cast(op); + const jbc::JBCConst *constRaw = constPool.GetConstByIdx(opConst.GetIndex()); + if (constRaw == nullptr) { + success = false; + return ans; + } + UniqueFEIRStmt stmtDAssign; + switch (constRaw->GetTag()) { + case jbc::kConstInteger: { + const jbc::JBCConst4Byte *const4B = static_cast(constRaw); + stmtDAssign = GenerateStmtForConstI32(stack2feHelper, const4B->GetInt32(), success); + break; + } + case jbc::kConstFloat: { + const jbc::JBCConst4Byte *const4B = static_cast(constRaw); + stmtDAssign = GenerateStmtForConstF32(stack2feHelper, const4B->GetFloat(), success); + break; + } + case jbc::kConstLong: { + const jbc::JBCConst8Byte *const8B = static_cast(constRaw); + stmtDAssign = GenerateStmtForConstI64(stack2feHelper, const8B->GetInt64(), success); + break; + } + case jbc::kConstDouble: { + const jbc::JBCConst8Byte *const8B = static_cast(constRaw); + stmtDAssign = GenerateStmtForConstF64(stack2feHelper, const8B->GetDouble(), success); + break; + } + case jbc::kConstString: { + const jbc::JBCConstString *constString = static_cast(constRaw); + UniqueFEIRVar varDst = stack2feHelper.PushItem(PTY_ref); + stmtDAssign = FEIRBuilder::CreateStmtJavaConstString(std::move(varDst), constString->GetString()); + break; + } + case jbc::kConstClass: { + const jbc::JBCConstClass *constClass = static_cast(constRaw); + UniqueFEIRVar varDst = stack2feHelper.PushItem(PTY_ref); + stmtDAssign = FEIRBuilder::CreateStmtJavaConstClass(std::move(varDst), constClass->GetFEIRType()->Clone()); + break; + } + default: + ERR(kLncErr, "EmitToFEIRForOpLdc: unsupported const kind"); + success = false; + } + if (success && stmtDAssign != nullptr) { + ans.push_back(std::move(stmtDAssign)); + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpLoad(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpSlotOpr &opLoad = static_cast(op); + std::vector stackInTypes = op.GetInputTypesFromStack(); + CHECK_FATAL(stackInTypes.empty(), "no items should be popped"); + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(); + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackOutType); + uint32 regNum = stack2feHelper.GetRegNumForSlot(opLoad.GetSlotIdx()); + const FEIRType *slotType = context.GetSlotType(opLoad.GetSlotIdx(), pc); + UniqueFEIRVar var; + UniqueFEIRVar varStack = stack2feHelper.PushItem(pty); + if (slotType != nullptr) { + var = FEIRBuilder::CreateVarReg(regNum, slotType->Clone()); + varStack->SetType(slotType->Clone()); + } else { + var = FEIRBuilder::CreateVarReg(regNum, pty); + } + success = success && (varStack != nullptr); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(var)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(std::move(varStack), std::move(expr)); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpStore(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpSlotOpr &opStore = static_cast(op); + if (opStore.IsAddressOpr()) { + UniqueFEIRVar varTmp = stack2feHelper.PopItem(PTY_a32); + if (varTmp.get() == nullptr) { + success = false; + } + return ans; + } + std::vector stackInTypes = op.GetInputTypesFromStack(); + CHECK_FATAL(stackInTypes.size() == 1, "store op need one stack opnd"); + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[0]); + uint32 regSlot = stack2feHelper.GetRegNumForSlot(opStore.GetSlotIdx()); + const FEIRType *slotType = context.GetSlotType(opStore.GetSlotIdx(), pc); + UniqueFEIRVar varDst; + UniqueFEIRVar varSrc; + if (slotType != nullptr) { + varDst = FEIRBuilder::CreateVarReg(regSlot, slotType->Clone()); + varSrc = stack2feHelper.PopItem(pty); + varSrc->SetType(slotType->Clone()); + } else { + varDst = FEIRBuilder::CreateVarReg(regSlot, pty); + varSrc = stack2feHelper.PopItem(pty); + } + if (varSrc == nullptr) { + success = false; + return ans; + } + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(varSrc)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(expr)); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpArrayLoad(JBCFunctionContext &context, bool &success) const { + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + // Process In + std::vector stackInTypes = op.GetInputTypesFromStack(); + // ArrayLoad need 2 input opnds + CHECK_FATAL(stackInTypes.size() == 2, "invalid in types for ArrayLoad"); + PrimType ptyArray = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[0]); // opnd0: array + PrimType ptyIndex = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[1]); // opnd1: index + UniqueFEIRVar varIndex = stack2feHelper.PopItem(ptyIndex); + UniqueFEIRVar varArray = stack2feHelper.PopItem(ptyArray); + // Process Out + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(); + CHECK_FATAL(stackOutType != jbc::JBCPrimType::kTypeDefault, "invalid out type for ArrayLoad"); + PrimType ptyElem = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackOutType); + UniqueFEIRVar varElem = stack2feHelper.PushItem(ptyElem); + success = true; + return FEIRBuilder::CreateStmtArrayLoad(std::move(varElem), std::move(varArray), std::move(varIndex)); +} + +std::list JBCStmtInst::EmitToFEIRForOpArrayStore(JBCFunctionContext &context, bool &success) const { + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + // Process In + std::vector stackInTypes = op.GetInputTypesFromStack(); + // ArrayStore need 3 input opnds + CHECK_FATAL(stackInTypes.size() == 3, "invalid in types for ArrayStore"); + PrimType ptyArray = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[0]); // opnd0: array + PrimType ptyIndex = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[1]); // opnd1: index + PrimType ptyElem = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[2]); // opnd2: elem + UniqueFEIRVar varElem = stack2feHelper.PopItem(ptyElem); + UniqueFEIRVar varIndex = stack2feHelper.PopItem(ptyIndex); + UniqueFEIRVar varArray = stack2feHelper.PopItem(ptyArray); + success = true; + return FEIRBuilder::CreateStmtArrayStore(std::move(varElem), std::move(varArray), std::move(varIndex)); +} + +std::list JBCStmtInst::EmitToFEIRForOpPop(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + success = stack2feHelper.Pop(op.GetOpcode()); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpDup(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + success = stack2feHelper.Dup(op.GetOpcode()); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpSwap(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + success = stack2feHelper.Swap(); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpMathBinop(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + // obtain in/out types + std::vector stackInTypes = op.GetInputTypesFromStack(); + CHECK_FATAL(stackInTypes.size() == 2, "Not enough input opnds for math binary op"); // 2 : opnds num limit + PrimType pty0 = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[0]); + PrimType pty1 = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[1]); + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(); + PrimType ptyOut = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackOutType); + // stack operation + UniqueFEIRVar var1 = stack2feHelper.PopItem(pty1); + UniqueFEIRVar var0 = stack2feHelper.PopItem(pty0); + UniqueFEIRVar varOut = stack2feHelper.PushItem(ptyOut); + if (var1 == nullptr || var0 == nullptr || varOut == nullptr) { + success = false; + return ans; + } + auto it = opcodeMapForMathBinop.find(op.GetOpcode()); + if (it == opcodeMapForMathBinop.end()) { + success = false; + ERR(kLncErr, "EmitToFEIRForOpMathBinop: unsupport opcode %s", op.GetOpcodeName().c_str()); + return ans; + } + UniqueFEIRExpr expr = FEIRBuilder::CreateExprMathBinary(it->second, std::move(var0), std::move(var1)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(std::move(varOut), std::move(expr)); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpMathUnop(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + // obtain in/out types + std::vector stackInTypes = op.GetInputTypesFromStack(); + ASSERT(stackInTypes.size() == 1, "Not enough input opnds for math unary op"); + PrimType pty0 = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[0]); + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(); + PrimType ptyOut = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackOutType); + // stack operation + UniqueFEIRVar var0 = stack2feHelper.PopItem(pty0); + UniqueFEIRVar varOut = stack2feHelper.PushItem(ptyOut); + if (var0 == nullptr || varOut == nullptr) { + success = false; + return ans; + } + auto it = opcodeMapForMathUnop.find(op.GetOpcode()); + if (it == opcodeMapForMathUnop.end()) { + success = false; + ERR(kLncErr, "EmitToFEIRForOpMathUnop: unsupport opcode %s", op.GetOpcodeName().c_str()); + return ans; + } + UniqueFEIRExpr expr = FEIRBuilder::CreateExprMathUnary(it->second, std::move(var0)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(std::move(varOut), std::move(expr)); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpMathInc(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpMathInc &opInc = static_cast(op); + // no stack operation + uint32 regNum = stack2feHelper.GetRegNumForSlot(opInc.GetIndex()); + UniqueFEIRVar var0 = FEIRBuilder::CreateVarReg(regNum, PTY_i32); + UniqueFEIRVar varOut = var0->Clone(); + UniqueFEIRExpr opnd0 = FEIRBuilder::CreateExprDRead(std::move(var0)); + UniqueFEIRExpr opnd1 = FEIRBuilder::CreateExprConstI32(int32{ opInc.GetIncr() }); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprMathBinary(OP_add, std::move(opnd0), std::move(opnd1)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(std::move(varOut), std::move(expr)); + ans.push_back(std::move(stmt)); + success = true; + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpConvert(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + std::vector stackInTypes = op.GetInputTypesFromStack(); + ASSERT(stackInTypes.size() == 1, "invalid in type for convert"); + PrimType ptyIn = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[0]); + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(); + ASSERT(stackOutType != jbc::JBCPrimType::kTypeDefault, "invalid out type for convert"); + PrimType ptyOut = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackOutType); + UniqueFEIRVar varIn = stack2feHelper.PopItem(ptyIn); + if (varIn == nullptr) { + success = false; + return ans; + } + UniqueFEIRVar varOut = stack2feHelper.PushItem(ptyOut); + if (varOut == nullptr) { + success = false; + return ans; + } + UniqueFEIRExpr expr(nullptr); + uint8 bitSize = 0; + Opcode opExt = OP_sext; + switch (op.GetOpcode()) { + case jbc::kOpI2B: + bitSize = 8; + opExt = OP_sext; + break; + case jbc::kOpI2C: + bitSize = 16; + opExt = OP_zext; + break; + case jbc::kOpI2S: + bitSize = 16; + opExt = OP_sext; + break; + default: + expr = FEIRBuilder::CreateExprCvtPrim(std::move(varIn), ptyOut); + break; + } + if (expr == nullptr) { + expr = (opExt == OP_sext) ? FEIRBuilder::CreateExprSExt(std::move(varIn)) : + FEIRBuilder::CreateExprZExt(std::move(varIn)); + FEIRExprExtractBits *ptrExprExt = static_cast(expr.get()); + ptrExprExt->SetBitSize(bitSize); + } + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(std::move(varOut), std::move(expr)); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpCompare(const JBCFunctionContext &context, bool &success) const { + (void) context; + std::list ans; + success = true; + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpReturn(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + // obtain in/out types + std::vector stackInTypes = op.GetInputTypesFromStack(); + if (op.GetOpcode() == jbc::kOpReturn) { + ASSERT(stackInTypes.empty(), "Not enough input opnds for return op"); + UniqueFEIRStmt stmt = std::make_unique(UniqueFEIRExpr(nullptr)); + ans.push_back(std::move(stmt)); + } else { + ASSERT(stackInTypes.size() == 1, "Not enough input opnds for return op"); + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[0]); + UniqueFEIRVar var = stack2feHelper.PopItem(pty); + if (var == nullptr) { + success = false; + return ans; + } + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(var)); + UniqueFEIRStmt stmt = std::make_unique(std::move(expr)); + ans.push_back(std::move(stmt)); + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpStaticFieldOpr(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpFieldOpr &opField = static_cast(op); + const jbc::JBCConst *constRaw = + constPool.GetConstByIdxWithTag(opField.GetFieldIdx(), jbc::JBCConstTag::kConstFieldRef); + CHECK_NULL_FATAL(constRaw); + const jbc::JBCConstRef *constRef = static_cast(constRaw); + FEStructFieldInfo *fieldInfo = static_cast(constRef->GetFEStructElemInfo()); + CHECK_NULL_FATAL(fieldInfo); + const FEIRType *fieldType = fieldInfo->GetType(); + PrimType pty = fieldType->IsScalar() ? fieldType->GetPrimType() : PTY_ref; + pty = JBCStack2FEHelper::SimplifyPrimType(pty); + if (op.GetOpcode() == jbc::kOpGetStatic) { + UniqueFEIRVar var = stack2feHelper.PushItem(pty); + UniqueFEIRStmt stmt = std::make_unique(nullptr, std::move(var), *fieldInfo, true); + ans.push_back(std::move(stmt)); + } else { + UniqueFEIRVar var = stack2feHelper.PopItem(pty); + UniqueFEIRStmt stmt = std::make_unique(nullptr, std::move(var), *fieldInfo, true); + ans.push_back(std::move(stmt)); + } + success = true; + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpFieldOpr(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpFieldOpr &opField = static_cast(op); + const jbc::JBCConst *constRaw = + constPool.GetConstByIdxWithTag(opField.GetFieldIdx(), jbc::JBCConstTag::kConstFieldRef); + CHECK_NULL_FATAL(constRaw); + const jbc::JBCConstRef *constRef = static_cast(constRaw); + FEStructFieldInfo *fieldInfo = static_cast(constRef->GetFEStructElemInfo()); + CHECK_NULL_FATAL(fieldInfo); + const FEIRType *fieldType = fieldInfo->GetType(); + PrimType pty = fieldType->IsScalar() ? fieldType->GetPrimType() : PTY_ref; + pty = JBCStack2FEHelper::SimplifyPrimType(pty); + if (op.GetOpcode() == jbc::kOpGetField) { + UniqueFEIRVar varObj = stack2feHelper.PopItem(PTY_ref); + UniqueFEIRVar var = stack2feHelper.PushItem(pty); + UniqueFEIRStmt stmt = std::make_unique(std::move(varObj), std::move(var), *fieldInfo, false); + ans.push_back(std::move(stmt)); + } else { + UniqueFEIRVar var = stack2feHelper.PopItem(pty); + UniqueFEIRVar varObj = stack2feHelper.PopItem(PTY_ref); + UniqueFEIRStmt stmt = std::make_unique(std::move(varObj), std::move(var), *fieldInfo, false); + ans.push_back(std::move(stmt)); + } + success = true; + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpInvoke(JBCFunctionContext &context, bool &success) const { + switch (op.GetOpcode()) { + case jbc::kOpInvokeVirtual: + return EmitToFEIRForOpInvokeVirtual(context, success); + case jbc::kOpInvokeSpecial: + return EmitToFEIRForOpInvokeSpecial(context, success); + case jbc::kOpInvokeStatic: + return EmitToFEIRForOpInvokeStatic(context, success); + case jbc::kOpInvokeInterface: + return EmitToFEIRForOpInvokeInterface(context, success); + case jbc::kOpInvokeDynamic: + return EmitToFEIRForOpInvokeDynamic(context, success); + default: + CHECK_FATAL(false, "unsupported op: %s", op.GetOpcodeName().c_str()); + break; + } + return EmitToFEIRCommon2(context, success); +} + +void JBCStmtInst::PrepareInvokeParametersAndReturn(JBCStack2FEHelper &stack2feHelper, + const FEStructMethodInfo &info, + FEIRStmtCallAssign &callStmt, + bool isStatic) const { + const MapleVector &argTypes = info.GetArgTypes(); + for (size_t i = argTypes.size(); i > 0; --i) { + const FEIRType *argType = argTypes[static_cast(i - 1)]; + PrimType pty = argType->GetPrimType(); + UniqueFEIRVar var = stack2feHelper.PopItem(JBCStack2FEHelper::SimplifyPrimType(pty)); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(var)); + callStmt.AddExprArgReverse(std::move(expr)); + } + if (!isStatic) { + // push this + const FEIRType *thisType = info.GetOwnerType(); + PrimType pty = thisType->GetPrimType(); + UniqueFEIRVar var = stack2feHelper.PopItem(JBCStack2FEHelper::SimplifyPrimType(pty)); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(var)); + callStmt.AddExprArgReverse(std::move(expr)); + } + if (!info.IsReturnVoid()) { + const FEIRType *retType = info.GetReturnType(); + PrimType pty = retType->GetPrimType(); + UniqueFEIRVar var = stack2feHelper.PushItem(JBCStack2FEHelper::SimplifyPrimType(pty)); + callStmt.SetVar(std::move(var)); + } +} + +std::list JBCStmtInst::EmitToFEIRForOpInvokeVirtual(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpInvoke &opInvoke = static_cast(op); + const jbc::JBCConst *constRaw = constPool.GetConstByIdx(opInvoke.GetMethodIdx()); + if (constRaw == nullptr || + (constRaw->GetTag() != jbc::JBCConstTag::kConstMethodRef && + constRaw->GetTag() != jbc::JBCConstTag::kConstInterfaceMethodRef)) { + success = false; + return ans; + } + const jbc::JBCConstRef *constRef = static_cast(constRaw); + FEStructMethodInfo *methodInfo = static_cast(constRef->GetFEStructElemInfo()); + UniqueFEIRStmt stmt = std::make_unique(*methodInfo, OP_virtualcall, nullptr, false); + FEIRStmtCallAssign *ptrStmt = static_cast(stmt.get()); + PrepareInvokeParametersAndReturn(stack2feHelper, *methodInfo, *ptrStmt, false); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpInvokeStatic(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpInvoke &opInvoke = static_cast(op); + const jbc::JBCConst *constRaw = constPool.GetConstByIdx(opInvoke.GetMethodIdx()); + if (constRaw == nullptr || + (constRaw->GetTag() != jbc::JBCConstTag::kConstMethodRef && + constRaw->GetTag() != jbc::JBCConstTag::kConstInterfaceMethodRef)) { + success = false; + return ans; + } + const jbc::JBCConstRef *constRef = static_cast(constRaw); + FEStructMethodInfo *methodInfo = static_cast(constRef->GetFEStructElemInfo()); + UniqueFEIRStmt stmt = std::make_unique(*methodInfo, OP_call, nullptr, true); + FEIRStmtCallAssign *ptrStmt = static_cast(stmt.get()); + PrepareInvokeParametersAndReturn(stack2feHelper, *methodInfo, *ptrStmt, true); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpInvokeInterface(JBCFunctionContext &context, + bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpInvoke &opInvoke = static_cast(op); + const jbc::JBCConst *constRaw = constPool.GetConstByIdx(opInvoke.GetMethodIdx()); + if (constRaw == nullptr || + (constRaw->GetTag() != jbc::JBCConstTag::kConstMethodRef && + constRaw->GetTag() != jbc::JBCConstTag::kConstInterfaceMethodRef)) { + success = false; + return ans; + } + const jbc::JBCConstRef *constRef = static_cast(constRaw); + FEStructMethodInfo *methodInfo = static_cast(constRef->GetFEStructElemInfo()); + UniqueFEIRStmt stmt = std::make_unique(*methodInfo, OP_interfacecall, nullptr, false); + FEIRStmtCallAssign *ptrStmt = static_cast(stmt.get()); + PrepareInvokeParametersAndReturn(stack2feHelper, *methodInfo, *ptrStmt, false); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpInvokeSpecial(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpInvoke &opInvoke = static_cast(op); + const jbc::JBCConst *constRaw = constPool.GetConstByIdx(opInvoke.GetMethodIdx()); + if (constRaw == nullptr || + (constRaw->GetTag() != jbc::JBCConstTag::kConstMethodRef && + constRaw->GetTag() != jbc::JBCConstTag::kConstInterfaceMethodRef)) { + success = false; + return ans; + } + const jbc::JBCConstRef *constRef = static_cast(constRaw); + FEStructMethodInfo *methodInfo = static_cast(constRef->GetFEStructElemInfo()); + Opcode mirOp = methodInfo->IsConstructor() ? OP_call : OP_superclasscall; + UniqueFEIRStmt stmt = std::make_unique(*methodInfo, mirOp, nullptr, false); + FEIRStmtCallAssign *ptrStmt = static_cast(stmt.get()); + PrepareInvokeParametersAndReturn(stack2feHelper, *methodInfo, *ptrStmt, false); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpInvokeDynamic(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpInvoke &opInvoke = static_cast(op); + const jbc::JBCConst *constRaw = constPool.GetConstByIdx(opInvoke.GetMethodIdx()); + if (constRaw == nullptr || constRaw->GetTag() != jbc::JBCConstTag::kConstInvokeDynamic) { + success = false; + return ans; + } + const jbc::JBCConstInvokeDynamic *constDynamic = static_cast(constRaw); + FEStructMethodInfo *methodInfo = static_cast(constDynamic->GetFEStructElemInfo()); + UniqueFEIRStmt stmt = std::make_unique(*methodInfo, OP_call, nullptr, true); + FEIRStmtCallAssign *ptrStmt = static_cast(stmt.get()); + PrepareInvokeParametersAndReturn(stack2feHelper, *methodInfo, *ptrStmt, true); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpNew(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpNew &opNew = static_cast(op); + UniqueFEIRType type = opNew.GetFEIRType(constPool)->Clone(); + UniqueFEIRExpr expr(nullptr); + switch (op.GetOpcode()) { + case jbc::kOpNew: { + expr = FEIRBuilder::CreateExprJavaNewInstance(type->Clone()); + break; + } + case jbc::kOpNewArray: + case jbc::kOpANewArray: { + UniqueFEIRVar varSize = stack2feHelper.PopItem(PTY_i32); + if (varSize == nullptr) { + success = false; + return ans; + } + (void)type->ArrayIncrDim(); + UniqueFEIRExpr exprSize = FEIRBuilder::CreateExprDRead(std::move(varSize)); + expr = FEIRBuilder::CreateExprJavaNewArray(type->Clone(), std::move(exprSize)); + break; + } + default: + FATAL(kLncFatal, "should not run here"); + return ans; + } + UniqueFEIRVar varDst = stack2feHelper.PushItem(PTY_ref); + if (varDst == nullptr || expr == nullptr) { + success = false; + return ans; + } + varDst->SetType(std::move(type)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(expr), true); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpMultiANewArray(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCOpMultiANewArray &opArray = static_cast(op); + const jbc::JBCConst *constRaw = constPool.GetConstByIdxWithTag(opArray.GetRefTypeIdx(), + jbc::JBCConstTag::kConstClass); + if (constRaw == nullptr) { + success = false; + return ans; + } + const jbc::JBCConstClass *constClass = static_cast(constRaw); + UniqueFEIRStmt stmt = std::make_unique(nullptr, constClass->GetFEIRType()->Clone(), + nullptr); + std::vector stackInTypes = op.GetInputTypesFromStack(constPool); + std::reverse(stackInTypes.begin(), stackInTypes.end()); + FEIRStmtJavaMultiANewArray *javaMultiANewArray = static_cast(stmt.get()); + for (jbc::JBCPrimType popType : stackInTypes) { + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(popType); + UniqueFEIRVar var = stack2feHelper.PopItem(pty); + if (var == nullptr) { + success = false; + return ans; + } + javaMultiANewArray->AddVarSizeRev(std::move(var)); + } + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(constPool); + if (stackOutType == jbc::JBCPrimType::kTypeDefault) { + success = false; + return ans; + } + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackOutType); + UniqueFEIRVar varRet = stack2feHelper.PushItem(pty); + javaMultiANewArray->SetArrayType(varRet->GetType()->Clone()); + javaMultiANewArray->SetVar(std::move(varRet)); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpThrow(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + // obtain in/out types + std::vector stackInTypes = op.GetInputTypesFromStack(); + ASSERT(stackInTypes.size() == 1, "Not enough input opnds for return op"); + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackInTypes[0]); + UniqueFEIRVar var = stack2feHelper.PopItem(pty); + if (var == nullptr) { + success = false; + return ans; + } + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(var)); + UniqueFEIRStmt stmt = std::make_unique(OP_throw, std::move(expr)); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpTypeCheck(JBCFunctionContext &context, bool &success) const { + std::list ans; + const jbc::JBCConstPool &constPool = context.GetConstPool(); + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + jbc::JBCOpcode opcode = op.GetOpcode(); + const jbc::JBCOpTypeCheck &opTypeCheck = static_cast(op); + PrimType ptyIn = PTY_ref; + PrimType ptyOut = (opcode == jbc::kOpCheckCast) ? PTY_ref : PTY_i32; + UniqueFEIRVar varIn = stack2feHelper.PopItem(ptyIn); + UniqueFEIRVar varOut = stack2feHelper.PushItem(ptyOut); + uint16 constIdx = opTypeCheck.GetTypeIdx(); + const jbc::JBCConst *constRaw = constPool.GetConstByIdxWithTag(constIdx, jbc::JBCConstTag::kConstClass); + if (constRaw == nullptr) { + success = false; + return ans; + } + const jbc::JBCConstClass *constClass = static_cast(constRaw); + UniqueFEIRType type = constClass->GetFEIRType()->Clone(); + if (opcode == jbc::kOpCheckCast) { + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtJavaCheckCast(std::move(varOut), std::move(varIn), std::move(type)); + ans.push_back(std::move(stmt)); + } else { + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtJavaInstanceOf(std::move(varOut), std::move(varIn), std::move(type)); + ans.push_back(std::move(stmt)); + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpMonitor(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + // stack operation + UniqueFEIRVar var = stack2feHelper.PopItem(PTY_ref); + if (var == nullptr) { + success = false; + return ans; + } + auto it = opcodeMapForMonitor.find(op.GetOpcode()); + if (it == opcodeMapForMonitor.end()) { + success = false; + ERR(kLncErr, "EmitToFEIRForOpMathUnop: unsupport opcode %s", op.GetOpcodeName().c_str()); + return ans; + } + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(var)); + UniqueFEIRStmt stmt = std::make_unique(it->second, std::move(expr)); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRForOpArrayLength(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + // stack operation + // in + UniqueFEIRVar varArray = stack2feHelper.PopItem(PTY_ref); + if (varArray == nullptr) { + success = false; + return ans; + } + // out + UniqueFEIRVar varDst = stack2feHelper.PushItem(PTY_i32); + if (varDst == nullptr) { + success = false; + return ans; + } + UniqueFEIRExpr exprArray = FEIRBuilder::CreateExprDRead(std::move(varArray)); + UniqueFEIRExpr exprArrayLength = FEIRBuilder::CreateExprJavaArrayLength(std::move(exprArray)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(exprArrayLength), true); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInst::EmitToFEIRCommon(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + std::vector stackInTypes = op.GetInputTypesFromStack(); + std::reverse(stackInTypes.begin(), stackInTypes.end()); + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(); + for (jbc::JBCPrimType popType : stackInTypes) { + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(popType); + UniqueFEIRVar var = stack2feHelper.PopItem(pty); + if (var == nullptr) { + success = false; + break; + } + } + if (success && stackOutType != jbc::JBCPrimType::kTypeDefault) { + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackOutType); + UniqueFEIRVar var = stack2feHelper.PushItem(pty); + if (var == nullptr) { + success = false; + } + } + return ans; +} + +std::list JBCStmtInst::EmitToFEIRCommon2(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const jbc::JBCConstPool &constPool = context.GetConstPool(); + std::vector stackInTypes = op.GetInputTypesFromStack(constPool); + std::reverse(stackInTypes.begin(), stackInTypes.end()); + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(constPool); + for (jbc::JBCPrimType popType : stackInTypes) { + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(popType); + UniqueFEIRVar var = stack2feHelper.PopItem(pty); + if (var == nullptr) { + success = false; + break; + } + } + if (success && stackOutType != jbc::JBCPrimType::kTypeDefault) { + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackOutType); + UniqueFEIRVar var = stack2feHelper.PushItem(pty); + if (var == nullptr) { + success = false; + } + } + return ans; +} + +UniqueFEIRStmt JBCStmtInst::GenerateStmtForConstI32(JBCStack2FEHelper &stack2feHelper, int32 val, bool &success) const { + UniqueFEIRExpr exprConst = FEIRBuilder::CreateExprConstI32(val); + UniqueFEIRVar varDst = stack2feHelper.PushItem(PTY_i32); + if (varDst == nullptr) { + success = false; + return UniqueFEIRStmt(nullptr); + } + UniqueFEIRStmt stmtDAssign = FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(exprConst)); + return stmtDAssign; +} + +UniqueFEIRStmt JBCStmtInst::GenerateStmtForConstI64(JBCStack2FEHelper &stack2feHelper, int64 val, bool &success) const { + UniqueFEIRExpr exprConst = FEIRBuilder::CreateExprConstI64(val); + UniqueFEIRVar varDst = stack2feHelper.PushItem(PTY_i64); + if (varDst == nullptr) { + success = false; + return UniqueFEIRStmt(nullptr); + } + UniqueFEIRStmt stmtDAssign = FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(exprConst)); + return stmtDAssign; +} + +UniqueFEIRStmt JBCStmtInst::GenerateStmtForConstF32(JBCStack2FEHelper &stack2feHelper, float val, bool &success) const { + UniqueFEIRExpr exprConst = FEIRBuilder::CreateExprConstF32(val); + UniqueFEIRVar varDst = stack2feHelper.PushItem(PTY_f32); + if (varDst == nullptr) { + success = false; + return UniqueFEIRStmt(nullptr); + } + UniqueFEIRStmt stmtDAssign = FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(exprConst)); + return stmtDAssign; +} + +UniqueFEIRStmt JBCStmtInst::GenerateStmtForConstF64(JBCStack2FEHelper &stack2feHelper, double val, + bool &success) const { + UniqueFEIRExpr exprConst = FEIRBuilder::CreateExprConstF64(val); + UniqueFEIRVar varDst = stack2feHelper.PushItem(PTY_f64); + if (varDst == nullptr) { + success = false; + return UniqueFEIRStmt(nullptr); + } + UniqueFEIRStmt stmtDAssign = FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(exprConst)); + return stmtDAssign; +} + +// ---------- JBCStmtInstBranch ---------- +std::map> JBCStmtInstBranch::opcodeMapForCondGoto = + JBCStmtInstBranch::InitOpcodeMapForCondGoto(); + +std::map JBCStmtInstBranch::funcPtrMapForEmitToFEIR = + JBCStmtInstBranch::InitFuncPtrMapForEmitToFEIR(); + +JBCStmtInstBranch::JBCStmtInstBranch(const jbc::JBCOp &argOp) + : JBCStmt(kStmtPesudo, kJBCStmtInstBranch), + op(argOp) { + SetFallThru(op.IsFallThru()); +} + +bool JBCStmtInstBranch::IsStmtInstImpl() const { + return true; +} + +void JBCStmtInstBranch::DumpImpl(const std::string &prefix) const { + std::cout << prefix << "JBCStmtInstBranch (id=" << id << ", " << + "kind=" << JBCStmtKindHelper::JBCStmtKindName(JBCkind) << ", " << + "op=" << op.GetOpcodeName() << ", " << + "targets={"; + for (FEIRStmt *stmt : extraSuccs) { + std::cout << stmt->GetID() << " "; + } + std::cout << "})" << std::endl; +} + +std::string JBCStmtInstBranch::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << op.GetOpcodeName(); + return ss.str(); +} + +std::list JBCStmtInstBranch::EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const { + auto it = funcPtrMapForEmitToFEIR.find(op.GetOpcodeKind()); + if (it != funcPtrMapForEmitToFEIR.end()) { + return (this->*(it->second))(context, success); + } else { + return EmitToFEIRCommon(context, success); + } +} + +JBCStmtPesudoLabel *JBCStmtInstBranch::GetTarget(const std::map &mapPCStmtLabel, + uint32 pc) const { + auto itTarget = mapPCStmtLabel.find(pc); + if (itTarget == mapPCStmtLabel.end()) { + ERR(kLncErr, "target@pc=%u not found", pc); + return nullptr; + } + return itTarget->second; +} + +std::map JBCStmtInstBranch::InitFuncPtrMapForEmitToFEIR() { + std::map ans; + ans[jbc::JBCOpcodeKind::kOpKindGoto] = &JBCStmtInstBranch::EmitToFEIRForOpGoto; + ans[jbc::JBCOpcodeKind::kOpKindBranch] = &JBCStmtInstBranch::EmitToFEIRForOpBranch; + ans[jbc::JBCOpcodeKind::kOpKindSwitch] = &JBCStmtInstBranch::EmitToFEIRForOpSwitch; + ans[jbc::JBCOpcodeKind::kOpKindJsr] = &JBCStmtInstBranch::EmitToFEIRForOpJsr; + ans[jbc::JBCOpcodeKind::kOpKindRet] = &JBCStmtInstBranch::EmitToFEIRForOpRet; + return ans; +} + +std::list JBCStmtInstBranch::EmitToFEIRForOpGoto(JBCFunctionContext &context, bool &success) const { + std::list ans; + const std::map &mapPCStmtLabel = context.GetMapPCLabelStmt(); + const jbc::JBCOpGoto &opGoto = static_cast(op); + auto it = mapPCStmtLabel.find(opGoto.GetTarget()); + if (it == mapPCStmtLabel.end()) { + ERR(kLncErr, "target not found for inst branch"); + success = false; + } else { + JBCStmtPesudoLabel *stmtLabel = it->second; + CHECK_NULL_FATAL(stmtLabel); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtGoto(stmtLabel->GetLabelIdx()); + ans.push_back(std::move(stmt)); + } + return ans; +} + +std::list JBCStmtInstBranch::EmitToFEIRForOpBranch(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const std::map &mapPCStmtLabel = context.GetMapPCLabelStmt(); + const jbc::JBCOpBranch &opBranch = static_cast(op); + auto itTarget = mapPCStmtLabel.find(opBranch.GetTarget()); + if (itTarget == mapPCStmtLabel.end()) { + ERR(kLncErr, "target not found for inst branch"); + success = false; + return ans; + } + JBCStmtPesudoLabel *stmtLabel = itTarget->second; + CHECK_NULL_FATAL(stmtLabel); + auto it = opcodeMapForCondGoto.find(op.GetOpcode()); + if (it == opcodeMapForCondGoto.end()) { + ERR(kLncErr, "unsupport opcode %s", op.GetOpcodeName().c_str()); + success = false; + return ans; + } + Opcode opStmt = std::get<0>(it->second); + Opcode opCompExpr = std::get<1>(it->second); + uint8 mode = std::get<2>(it->second); + // opnds + UniqueFEIRExpr expr0; + UniqueFEIRExpr expr1; + PrimType pty = ((mode & kModeUseRef) == 0) ? PTY_i32 : PTY_ref; + if ((mode & kModeUseZeroAsSecondOpnd) == 0) { + UniqueFEIRVar var1 = stack2feHelper.PopItem(pty); + if (var1 == nullptr) { + success = false; + return ans; + } + expr1 = FEIRBuilder::CreateExprDRead(std::move(var1)); + } else { + expr1 = (pty == PTY_ref) ? FEIRBuilder::CreateExprConstRefNull() : FEIRBuilder::CreateExprConstI32(0); + } + UniqueFEIRVar var0 = stack2feHelper.PopItem(pty); + if (var0 == nullptr) { + success = false; + return ans; + } + expr0 = FEIRBuilder::CreateExprDRead(std::move(var0)); + UniqueFEIRExpr exprComp = FEIRBuilder::CreateExprMathBinary(opCompExpr, std::move(expr0), std::move(expr1)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtCondGoto(stmtLabel->GetLabelIdx(), opStmt, std::move(exprComp)); + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInstBranch::EmitToFEIRForOpSwitch(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const std::map &mapPCStmtLabel = context.GetMapPCLabelStmt(); + const jbc::JBCOpSwitch &opSwitch = static_cast(op); + UniqueFEIRVar var = stack2feHelper.PopItem(PTY_i32); + if (var == nullptr) { + success = false; + return ans; + } + UniqueFEIRExpr exprValue = FEIRBuilder::CreateExprDRead(std::move(var)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtSwitch(std::move(exprValue)); + FEIRStmtSwitch *ptrStmtSwitch = static_cast(stmt.get()); + CHECK_NULL_FATAL(ptrStmtSwitch); + // default target + auto itTargetDefault = mapPCStmtLabel.find(opSwitch.GetDefaultTarget()); + if (itTargetDefault == mapPCStmtLabel.end()) { + ERR(kLncErr, "target not found for inst switch"); + success = false; + return ans; + } + CHECK_NULL_FATAL(itTargetDefault->second); + ptrStmtSwitch->SetDefaultLabelIdx(itTargetDefault->second->GetLabelIdx()); + // value targets + for (const auto &itItem : opSwitch.GetTargets()) { + auto itTarget = mapPCStmtLabel.find(itItem.second); + if (itTarget == mapPCStmtLabel.end()) { + ERR(kLncErr, "target not found for inst switch"); + success = false; + return ans; + } + CHECK_NULL_FATAL(itTarget->second); + ptrStmtSwitch->AddTarget(itItem.first, itTarget->second->GetLabelIdx()); + } + ans.push_back(std::move(stmt)); + return ans; +} + +std::list JBCStmtInstBranch::EmitToFEIRForOpJsr(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + const std::map &mapPCStmtLabel = context.GetMapPCLabelStmt(); + const jbc::JBCOpJsr &opJsr = static_cast(op); + auto itTarget = mapPCStmtLabel.find(opJsr.GetTarget()); + if (itTarget == mapPCStmtLabel.end()) { + ERR(kLncErr, "target not found for inst jsr"); + success = false; + return ans; + } + JBCStmtPesudoLabel *stmtLabel = itTarget->second; + CHECK_NULL_FATAL(stmtLabel); + uint32 slotRegNum = stack2feHelper.GetRegNumForSlot(opJsr.GetSlotIdx()); + UniqueFEIRVar var = FEIRBuilder::CreateVarReg(slotRegNum, PTY_i32, false); + (void)stack2feHelper.PushItem(var->Clone(), PTY_a32); + UniqueFEIRExpr exprConst = FEIRBuilder::CreateExprConstI32(opJsr.GetJsrID()); + UniqueFEIRStmt stmtJsr = FEIRBuilder::CreateStmtDAssign(std::move(var), std::move(exprConst)); + UniqueFEIRStmt stmtGoto = FEIRBuilder::CreateStmtGoto(stmtLabel->GetLabelIdx()); + ans.push_back(std::move(stmtJsr)); + ans.push_back(std::move(stmtGoto)); + return ans; +} + +std::list JBCStmtInstBranch::EmitToFEIRForOpRet(JBCFunctionContext &context, bool &success) const { + return EmitToFEIRForOpRetImpl(context, success); +} + +std::list JBCStmtInstBranch::EmitToFEIRCommon(JBCFunctionContext &context, bool &success) const { + std::list ans; + JBCStack2FEHelper &stack2feHelper = context.GetStack2FEHelper(); + std::vector stackInTypes = op.GetInputTypesFromStack(); + std::reverse(stackInTypes.begin(), stackInTypes.end()); + jbc::JBCPrimType stackOutType = op.GetOutputTypesToStack(); + for (jbc::JBCPrimType popType : stackInTypes) { + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(popType); + UniqueFEIRVar var = stack2feHelper.PopItem(pty); + if (var == nullptr) { + success = false; + break; + } + } + if (success && stackOutType != jbc::JBCPrimType::kTypeDefault) { + PrimType pty = JBCStack2FEHelper::JBCStackItemTypeToPrimType(stackOutType); + UniqueFEIRVar var = stack2feHelper.PushItem(pty); + if (var == nullptr) { + success = false; + } + } + return ans; +} + +std::map> JBCStmtInstBranch::InitOpcodeMapForCondGoto() { + std::map> ans; + ans[jbc::kOpIfeq] = std::make_tuple(OP_brtrue, OP_eq, kModeUseZeroAsSecondOpnd); + ans[jbc::kOpIfne] = std::make_tuple(OP_brfalse, OP_eq, kModeUseZeroAsSecondOpnd); + ans[jbc::kOpIflt] = std::make_tuple(OP_brtrue, OP_lt, kModeUseZeroAsSecondOpnd); + ans[jbc::kOpIfge] = std::make_tuple(OP_brtrue, OP_ge, kModeUseZeroAsSecondOpnd); + ans[jbc::kOpIfgt] = std::make_tuple(OP_brtrue, OP_gt, kModeUseZeroAsSecondOpnd); + ans[jbc::kOpIfle] = std::make_tuple(OP_brtrue, OP_le, kModeUseZeroAsSecondOpnd); + ans[jbc::kOpIfICmpeq] = std::make_tuple(OP_brtrue, OP_eq, kModeDefault); + ans[jbc::kOpIfICmpne] = std::make_tuple(OP_brfalse, OP_eq, kModeDefault); + ans[jbc::kOpIfICmplt] = std::make_tuple(OP_brtrue, OP_lt, kModeDefault); + ans[jbc::kOpIfICmpge] = std::make_tuple(OP_brtrue, OP_ge, kModeDefault); + ans[jbc::kOpIfICmpgt] = std::make_tuple(OP_brtrue, OP_gt, kModeDefault); + ans[jbc::kOpIfICmple] = std::make_tuple(OP_brtrue, OP_le, kModeDefault); + ans[jbc::kOpIfACmpeq] = std::make_tuple(OP_brtrue, OP_eq, kModeUseRef); + ans[jbc::kOpIfACmpne] = std::make_tuple(OP_brfalse, OP_eq, kModeUseRef); + ans[jbc::kOpIfNull] = std::make_tuple(OP_brtrue, OP_eq, kModeUseRef | kModeUseZeroAsSecondOpnd); + ans[jbc::kOpIfNonNull] = std::make_tuple(OP_brfalse, OP_eq, kModeUseRef | kModeUseZeroAsSecondOpnd); + return ans; +} + +// ---------- JBCStmtInstBranchRet ---------- +JBCStmtInstBranchRet::JBCStmtInstBranchRet(const jbc::JBCOp &argOp) + : JBCStmtInstBranch(argOp) { + JBCkind = kJBCStmtInstBranchRet; +} + +std::list JBCStmtInstBranchRet::EmitToFEIRForOpRetImpl(JBCFunctionContext &context, + bool &success) const { + std::list ans; + const std::map &mapPCStmtLabel = context.GetMapPCLabelStmt(); + const std::map> &mapJsrSlotRetAddr = context.GetMapJsrSlotRetAddr(); + const jbc::JBCOpRet &opRet = static_cast(op); + uint16 slotIdx = opRet.GetIndex(); + auto itJsrInfo = mapJsrSlotRetAddr.find(slotIdx); + if (itJsrInfo == mapJsrSlotRetAddr.end()) { + WARN(kLncWarn, "jsr info not found"); + success = false; + return ans; + } + const std::map &jsrInfo = itJsrInfo->second; + if (jsrInfo.size() == 1) { + auto itInfo = jsrInfo.begin(); + JBCStmtPesudoLabel *stmtLabel = GetTarget(mapPCStmtLabel, itInfo->second); + if (stmtLabel == nullptr) { + success = false; + return ans; + } + UniqueFEIRStmt stmtGoto = FEIRBuilder::CreateStmtGoto(stmtLabel->GetLabelIdx()); + ans.push_back(std::move(stmtGoto)); + } else { + uint32 idx = 0; + UniqueFEIRVar var = FEIRBuilder::CreateVarReg(slotIdx, PTY_i32); + UniqueFEIRExpr exprValue = FEIRBuilder::CreateExprDRead(std::move(var)); + UniqueFEIRStmt stmtSwitch = FEIRBuilder::CreateStmtSwitch(std::move(exprValue)); + FEIRStmtSwitch *ptrStmtSwitch = static_cast(stmtSwitch.get()); + for (auto itInfo : jsrInfo) { + JBCStmtPesudoLabel *stmtLabel = GetTarget(mapPCStmtLabel, itInfo.second); + if (stmtLabel == nullptr) { + success = false; + return ans; + } + if (idx == jsrInfo.size() - 1) { + ptrStmtSwitch->SetDefaultLabelIdx(stmtLabel->GetLabelIdx()); + } else { + ptrStmtSwitch->AddTarget(itInfo.first, stmtLabel->GetLabelIdx()); + } + ++idx; + } + ans.push_back(std::move(stmtSwitch)); + } + return ans; +} + +// ---------- JBCStmtPesudoLabel ---------- +void JBCStmtPesudoLabel::DumpImpl(const std::string &prefix) const { + std::cout << prefix << "JBCStmtPesudoLabel (id=" << id << "," << + "kind=" << JBCStmtKindHelper::JBCStmtKindName(JBCkind) << ", " << + "preds={"; + for (FEIRStmt *stmt : extraPreds) { + std::cout << stmt->GetID() << " "; + } + std::cout << "})" << std::endl; +} + +std::string JBCStmtPesudoLabel::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": label" << labelIdx; + return ss.str(); +} + +std::list JBCStmtPesudoLabel::EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const { + (void) context; + std::list ans; + UniqueFEIRStmt stmt = std::make_unique(labelIdx); + ans.push_back(std::move(stmt)); + success = true; + return ans; +} + +// ---------- JBCStmtPesudoCatch ---------- +void JBCStmtPesudoCatch::DumpImpl(const std::string &prefix) const { + std::cout << prefix << "JBCStmtPesudoCatch (id=" << id << "," << + "kind=" << JBCStmtKindHelper::JBCStmtKindName(JBCkind) << ", " << + "preds={"; + for (FEIRStmt *stmt : extraPreds) { + std::cout << stmt->GetID() << " "; + } + std::cout << "})" << std::endl; +} + +std::string JBCStmtPesudoCatch::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": catch" << labelIdx; + return ss.str(); +} + +std::list JBCStmtPesudoCatch::EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const { + (void) context; + std::list ans; + UniqueFEIRStmt stmt = std::make_unique(labelIdx); + FEIRStmtPesudoCatch *feirStmt = static_cast(stmt.get()); + for (GStrIdx typeNameIdx : catchTypeNames) { + feirStmt->AddCatchTypeNameIdx(typeNameIdx); + } + ans.push_back(std::move(stmt)); + success = true; + return ans; +} + +// ---------- JBCStmtPesudoTry ---------- +void JBCStmtPesudoTry::DumpImpl(const std::string &prefix) const { + std::cout << prefix << "JBCStmtPesudoTry (id=" << id << "," << + "kind=" << JBCStmtKindHelper::JBCStmtKindName(JBCkind) << ", " << + "succs={"; + for (JBCStmtPesudoCatch *stmt : catchStmts) { + std::cout << stmt->GetID() << " "; + } + std::cout << "})" << std::endl; +} + +std::string JBCStmtPesudoTry::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": try"; + return ss.str(); +} + +std::list JBCStmtPesudoTry::EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const { + (void) context; + std::list ans; + UniqueFEIRStmt stmt = std::make_unique(); + FEIRStmtPesudoJavaTry *feirStmt = static_cast(stmt.get()); + for (JBCStmtPesudoCatch *stmtCatch : catchStmts) { + feirStmt->AddCatchLabelIdx(stmtCatch->GetLabelIdx()); + } + ans.push_back(std::move(stmt)); + success = true; + return ans; +} + +// ---------- JBCStmtPesudoEndTry ---------- +void JBCStmtPesudoEndTry::DumpImpl(const std::string &prefix) const { + std::cout << prefix << "JBCStmtPesudoEndTry (id=" << id << ", " << + "kind=" << JBCStmtKindHelper::JBCStmtKindName(JBCkind) << + ")" << std::endl; +} + +std::string JBCStmtPesudoEndTry::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": endtry"; + return ss.str(); +} + +std::list JBCStmtPesudoEndTry::EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const { + (void) context; + std::list ans; + UniqueFEIRStmt stmt = std::make_unique(); + ans.push_back(std::move(stmt)); + success = true; + return ans; +} + +// ---------- JBCStmtPesudoComment ---------- +void JBCStmtPesudoComment::DumpImpl(const std::string &prefix) const { + (void) prefix; +} + +std::string JBCStmtPesudoComment::DumpDotStringImpl() const { + return ""; +} + +std::list JBCStmtPesudoComment::EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const { + (void) context; + std::list ans; + UniqueFEIRStmt stmt = std::make_unique(content); + ans.push_back(std::move(stmt)); + success = true; + return ans; +} + +// ---------- JBCStmtPesudoLOC ---------- +void JBCStmtPesudoLOC::DumpImpl(const std::string &prefix) const { + std::cout << prefix << "LOC " << srcFileIdx << " " << lineNumber << std::endl; +} + +std::string JBCStmtPesudoLOC::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": LOC " << srcFileIdx << " " << lineNumber; + return ss.str(); +} + +std::list JBCStmtPesudoLOC::EmitToFEIRImpl(JBCFunctionContext &context, bool &success) const { + (void) context; + std::list ans; + UniqueFEIRStmt stmt = std::make_unique(srcFileIdx, lineNumber); + ans.push_back(std::move(stmt)); + success = true; + return ans; +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/class/src/jbc_util.cpp b/src/hir2mpl/bytecode_input/class/src/jbc_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..951a5eb75e6c25d9b6d23e1d7b3b8351d41fc860 --- /dev/null +++ b/src/hir2mpl/bytecode_input/class/src/jbc_util.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "jbc_util.h" +#include "mpl_logging.h" + +namespace maple { +namespace jbc { +std::string JBCUtil::ClassInternalNameToFullName(const std::string &name) { + if (name[0] == '[') { + return name; + } else { + return "L" + name + ";"; + } +} + +JBCPrimType JBCUtil::GetPrimTypeForName(const std::string &name) { + switch (name[0]) { + case '[': + case 'L': + return kTypeRef; + case 'I': + return kTypeInt; + case 'J': + return kTypeLong; + case 'F': + return kTypeFloat; + case 'D': + return kTypeDouble; + case 'B': + return kTypeByteOrBoolean; + case 'Z': + return kTypeByteOrBoolean; + case 'C': + return kTypeChar; + case 'S': + return kTypeShort; + case 'V': + return kTypeDefault; + default: + CHECK_FATAL(false, "Unsupported type name %s", name.c_str()); + } + return kTypeDefault; +} +} // namespace jbc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/include/ark_annotation_map.h b/src/hir2mpl/bytecode_input/common/include/ark_annotation_map.h new file mode 100644 index 0000000000000000000000000000000000000000..c52e272deb996353866aab1577ddc32d5dec6a85 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/ark_annotation_map.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_ARK_ANNOTATION_MAP_H +#define HIR2MPL_BC_INPUT_INCLUDE_ARK_ANNOTATION_MAP_H +#include +#include "global_tables.h" + +namespace maple { +namespace bc { +class ArkAnnotationMap { + public: + inline static ArkAnnotationMap &GetArkAnnotationMap() { + return annotationMap; + } + + void Init(); + const std::string &GetAnnotationTypeName(const std::string &orinName); + const std::set &GetArkAnnotationTypeNames() const { + return arkAnnotationTypeNames; + } + + private: + ArkAnnotationMap() = default; + ~ArkAnnotationMap() = default; + + static ArkAnnotationMap annotationMap; + std::map pragmaTypeNameMap; + std::set arkAnnotationTypeNames; +}; +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_ARK_ANNOTATION_MAP_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/include/ark_annotation_processor.h b/src/hir2mpl/bytecode_input/common/include/ark_annotation_processor.h new file mode 100644 index 0000000000000000000000000000000000000000..c4ba6b5d43529ad45304ecba98822ee5355fd5d7 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/ark_annotation_processor.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_ARK_ANNOTATION_PROCESSOR_H +#define HIR2MPL_BC_INPUT_INCLUDE_ARK_ANNOTATION_PROCESSOR_H +#include "global_tables.h" +#include "namemangler.h" +namespace maple { +namespace bc { +class ArkAnnotationProcessor { + public: + static void Process(); +}; + +class ArkAnnotation { + public: + void Init(); + bool IsFastNative(const TyIdx &tyIdx) const; + bool IsCriticalNative(const TyIdx &tyIdx) const; + bool IsCallerSensitive(const TyIdx &tyIdx) const; + bool IsPermanent(const TyIdx &tyIdx) const; + bool IsPermanent(const std::string &str) const; + bool IsRCUnowned(const TyIdx &tyIdx) const; + bool IsRCUnownedCap(const TyIdx &tyIdx) const; + bool IsRCUnownedCapList(const TyIdx &tyIdx) const; + bool IsRCUnownedLocal(const TyIdx &tyIdx) const; + bool IsRCUnownedLocalOld(const TyIdx &tyIdx) const; + bool IsRCUnownedThis(const TyIdx &tyIdx) const; + bool IsRCUnownedOuter(const TyIdx &tyIdx) const; + bool IsRCWeak(const TyIdx &tyIdx) const; + static ArkAnnotation &GetInstance() { + return instance; + } + + static GStrIdx GetStrIdxFromDexName(const std::string &name) { + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name)); + } + + private: + ArkAnnotation() = default; + ~ArkAnnotation() = default; + static MIRStructType *GetStructType(const TyIdx &tyIdx); + + static ArkAnnotation instance; + std::set typeNameSetForFastNative; + std::set typeNameSetForCriticalNative; + std::set typeNameSetForCallerSensitive; + std::set typeNameSetForPermanent; + std::set typeNameSetForRCUnowned; + std::set typeNameSetForRCUnownedCap; + std::set typeNameSetForRCUnownedCapList; + std::set typeNameSetForRCUnownedLocal; + std::set typeNameSetForRCUnownedThis; + std::set typeNameSetForRCUnownedOuter; + std::set typeNameSetForRCWeak; + GStrIdx typeNameIdxForRCUnownedLocalOld; +}; +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_ARK_ANNOTATION_PROCESSOR_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/include/bc_class.h b/src/hir2mpl/bytecode_input/common/include/bc_class.h new file mode 100644 index 0000000000000000000000000000000000000000..c7d2422f054c0e1325906306dd3a0e6962cb36a7 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_class.h @@ -0,0 +1,369 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_CLASS_H +#define HIR2MPL_BC_INPUT_INCLUDE_BC_CLASS_H +#include +#include +#include +#include +#include "types_def.h" +#include "bc_instruction.h" +#include "feir_stmt.h" +#include "bc_parser_base.h" +#include "feir_var.h" +#include "mempool.h" +#include "bc_pragma.h" + +namespace maple { +namespace bc { +using BCAttrKind = uint32; +class BCClass; + +class BCAttr { + public: + BCAttr(BCAttrKind k, const GStrIdx &idx) : kind(k), nameIdx(idx) {} + ~BCAttr() = default; + + protected: + BCAttrKind kind; + GStrIdx nameIdx; +}; + +class BCAttrMap { + public: + BCAttrMap() = default; + ~BCAttrMap() = default; + + protected: + std::map>>> mapAttrs; +}; + +class BCClassElem { + public: + BCClassElem(const BCClass &klassIn, uint32 acc, const std::string &nameIn, const std::string &descIn); + virtual ~BCClassElem() = default; + const std::string &GetName() const; + const std::string &GetDescription() const; + uint32 GetItemIdx() const; + uint32 GetIdx() const; + uint32 GetAccessFlag() const; + bool IsStatic() const; + const std::string &GetClassName() const; + const BCClass &GetBCClass() const; + GStrIdx GetClassNameMplIdx() const; + + protected: + virtual uint32 GetItemIdxImpl() const = 0; + virtual uint32 GetIdxImpl() const = 0; + virtual bool IsStaticImpl() const = 0; + const BCClass &klass; + uint32 accessFlag; + std::string name; + std::string descriptor; + BCAttrMap attrMap; +}; + +class BCClassField : public BCClassElem { + public: + BCClassField(const BCClass &klassIn, uint32 acc, const std::string &nameIn, const std::string &descIn) + : BCClassElem(klassIn, acc, nameIn, descIn) {} + ~BCClassField() = default; + + protected: + uint32 GetItemIdxImpl() const override = 0; + uint32 GetIdxImpl() const override = 0; +}; + +class BCCatchInfo { + public: + BCCatchInfo(uint32 handlerAddrIn, const GStrIdx &argExceptionNameIdx, bool isCatchAllIn); + ~BCCatchInfo() = default; + + uint32 GetHandlerAddr() const { + return handlerAddr; + } + + GStrIdx GetExceptionNameIdx() const { + return exceptionNameIdx; + } + + bool GetIsCatchAll() const { + return isCatchAll; + } + + protected: + uint32 handlerAddr; + GStrIdx exceptionNameIdx; + bool isCatchAll = false; +}; + +class BCTryInfo { + public: + BCTryInfo(uint32 startAddrIn, uint32 endAddrIn, std::unique_ptr>> catchesIn) + : startAddr(startAddrIn), endAddr(endAddrIn), catches(std::move(catchesIn)) {} + ~BCTryInfo() = default; + + uint32 GetStartAddr() const { + return startAddr; + } + + uint32 GetEndAddr() const { + return endAddr; + } + + const std::list> *GetCatches() const { + return catches.get(); + } + static void DumpTryCatchInfo(const std::unique_ptr>> &tryInfos); + + protected: + uint32 startAddr; + uint32 endAddr; + std::unique_ptr>> catches; +}; + +class BCClassMethod : public BCClassElem { + public: + BCClassMethod(const BCClass &klassIn, uint32 acc, bool isVirtualIn, const std::string &nameIn, + const std::string &descIn) + : BCClassElem(klassIn, acc, nameIn, descIn), isVirtual(isVirtualIn), + methodMp(FEUtils::NewMempool("MemPool for BCClassMethod", true /* isLcalPool */)), + allocator(methodMp) {} + ~BCClassMethod() { + methodMp = nullptr; + } + void SetPCBCInstructionMap(MapleMap *mapIn) { + pcBCInstructionMap = mapIn; + } + void SetMethodInstOffset(const uint16 *pos); + void SetRegisterTotalSize(uint16 size); + uint16 GetRegisterTotalSize() const; + void SetRegisterInsSize(uint16 size); + void SetCodeOff(uint32 off); + uint32 GetCodeOff() const; + void AddMethodDepSet(const std::string &depType); + bool IsVirtual() const; + bool IsNative() const; + bool IsInit() const; + bool IsClinit() const; + std::string GetFullName() const; + void ProcessInstructions(); + std::list EmitInstructionsToFEIR() const; + const uint16 *GetInstPos() const; + + void SetTryInfos(std::unique_ptr>> infos) { + tryInfos = std::move(infos); + } + + std::size_t GetTriesSize() const { + return tryInfos->size(); + } + + uint32 GetThisRegNum() const { + return IsStatic() ? UINT32_MAX : static_cast(registerTotalSize - registerInsSize); + } + + void SetSrcPositionInfo(const std::map &srcPosInfo) { +#ifdef DEBUG + pSrcPosInfo = &srcPosInfo; +#else + (void) srcPosInfo; +#endif + } + + const std::map>> *GetSrcLocalInfoPtr() const { + return srcLocals.get(); + } + + void SetSrcLocalInfo( + std::unique_ptr>>> srcLocalsIn) { + srcLocals = std::move(srcLocalsIn); + } + + std::vector> GenArgVarList() const; + void GenArgRegs(); + std::vector GetSigTypeNames() const { + return sigTypeNames; + } + bool HasCode() const { + return pcBCInstructionMap != nullptr && !pcBCInstructionMap->empty(); + } + + bool IsRcPermanent() const { + return isPermanent; + } + + void SetIsRcPermanent(bool flag) { + isPermanent = flag; + } + + const MemPool *GetMemPool() const { + return methodMp; + } + + void ReleaseMempool() const { + FEUtils::DeleteMempoolPtr(methodMp); + } + + MapleAllocator &GetAllocator() { + return allocator; + } + + protected: + uint32 GetItemIdxImpl() const override = 0; + uint32 GetIdxImpl() const override = 0; + virtual bool IsVirtualImpl() const = 0; + virtual bool IsNativeImpl() const = 0; + virtual bool IsInitImpl() const = 0; + virtual bool IsClinitImpl() const = 0; + void ProcessTryCatch(); + virtual std::vector> GenArgVarListImpl() const = 0; + void TypeInfer(); + void InsertPhi(const std::vector &dom, std::vector &src); + static TypeInferItem *ConstructTypeInferItem(MapleAllocator &alloc, uint32 pos, BCReg* bcReg, TypeInferItem *prev); + static std::vector ConstructNewRegTypeMap(MapleAllocator &alloc, uint32 pos, + const std::vector ®TypeMap); + std::list GenReTypeStmtsThroughArgs() const; + void Traverse(std::list>> &pcDefedRegsList, + std::vector> &dominances, + std::set &visitedSet); + void PrecisifyRegType(); + virtual void GenArgRegsImpl() = 0; + static void LinkJumpTarget(const std::map &targetFEIRStmtMap, + const std::list &gotoFEIRStmts, + const std::list &switchFEIRStmts); + void DumpBCInstructionMap() const; + void SetSrcPosInfo(); + MapleMap *pcBCInstructionMap = nullptr; + std::unique_ptr>> tryInfos; +#ifdef DEBUG + const std::map *pSrcPosInfo = nullptr; +#endif + // map>> + std::unique_ptr>>> srcLocals; + std::vector> argRegs; + std::vector sigTypeNames; + bool isVirtual = false; + uint16 registerTotalSize = UINT16_MAX; + uint16 registerInsSize = UINT16_MAX; + uint32 codeOff = UINT32_MAX; + const uint16 *instPos = nullptr; // method instructions start pos in bc file + std::set visitedPcSet; + std::set multiInDegreeSet; + std::list regTypes; + // isPermanent is true means the rc annotation @Permanent is used + bool isPermanent = false; + + MemPool *methodMp; + MapleAllocator allocator; +}; + +class BCClass { + public: + BCClass(uint32 idx, const BCParserBase &parserIn) : classIdx(idx), parser(parserIn) {} + ~BCClass() = default; + + uint32 GetClassIdx() const { + return classIdx; + } + + const BCParserBase &GetBCParser() const { + return parser; + } + + bool IsInterface() const { + return isInterface; + } + + void SetFilePathName(const std::string &path) { + filePathName = path; + } + + void SetSrcFileInfo(const std::string &name); + void SetIRSrcFileSigIdx(const GStrIdx &strIdx) { + irSrcFileSigIdx = strIdx; + } + + bool IsMultiDef() const { + return isMultiDef; + } + + void SetIsMultiDef(bool flag) { + isMultiDef = flag; + } + + uint32 GetSrcFileIdx() const { + return srcFileIdx; + } + + void SetIsInterface(bool flag) { + isInterface = flag; + } + void SetSuperClasses(const std::list &names); + void SetInterface(const std::string &name); + void SetAccFlag(uint32 flag); + void SetField(std::unique_ptr field); + void SetMethod(std::unique_ptr method); + void SetClassName(const std::string &classNameOrinIn); + void SetAnnotationsDirectory(std::unique_ptr annotationsDirectory); + void InsertStaticFieldConstVal(MIRConst *cst); + void InsertFinalStaticStringID(uint32 stringID); + std::vector GetFinalStaticStringIDVec() const { + return finalStaticStringID; + } + std::vector GetStaticFieldsConstVal() const; + const std::string &GetClassName(bool mapled) const; + + GStrIdx GetClassNameMplIdx() const { + return classNameMplIdx; + } + + const std::list &GetSuperClassNames() const; + const std::vector &GetSuperInterfaceNames() const; + std::string GetSourceFileName() const; + GStrIdx GetIRSrcFileSigIdx() const; + uint32 GetAccessFlag() const; + int32 GetFileNameHashId() const; + + const std::vector> &GetFields() const; + std::vector> &GetMethods(); + const std::unique_ptr &GetAnnotationsDirectory() const; + + protected: + bool isInterface = false; + bool isMultiDef = false; + uint32 classIdx; + uint32 srcFileIdx = 0; + uint32 accFlag = 0; + GStrIdx classNameOrinIdx; + GStrIdx classNameMplIdx; + GStrIdx srcFileNameIdx; + GStrIdx irSrcFileSigIdx; + const BCParserBase &parser; + std::list superClassNameList; + std::vector interfaces; + std::vector> fields; + std::vector> methods; + std::unique_ptr annotationsDirectory; + std::vector staticFieldsConstVal; + std::vector finalStaticStringID; + BCAttrMap attrMap; + std::string filePathName; + mutable std::mutex bcClassMtx; +}; +} +} +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_CLASS_H diff --git a/src/hir2mpl/bytecode_input/common/include/bc_class2fe_helper.h b/src/hir2mpl/bytecode_input/common/include/bc_class2fe_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..a66b6e6b5ed3a007f60e90b80dbe53c297fbe349 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_class2fe_helper.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_CLASS2FE_HELPER_H +#define HIR2MPL_BC_INPUT_INCLUDE_BC_CLASS2FE_HELPER_H +#include "fe_input_helper.h" +#include "bc_class.h" +#include "bc_pragma.h" +namespace maple { +namespace bc { +class BCClass2FEHelper : public FEInputStructHelper { + public: + BCClass2FEHelper(MapleAllocator &allocator, BCClass &klassIn); + ~BCClass2FEHelper() = default; + + protected: + std::string GetStructNameOrinImpl() const override; + std::string GetStructNameMplImpl() const override; + std::list GetSuperClassNamesImpl() const override; + std::vector GetInterfaceNamesImpl() const override; + std::string GetSourceFileNameImpl() const override; + MIRStructType *CreateMIRStructTypeImpl(bool &error) const override; + uint64 GetRawAccessFlagsImpl() const override; + GStrIdx GetIRSrcFileSigIdxImpl() const override; + bool IsMultiDefImpl() const override; + std::string GetSrcFileNameImpl() const override; + + BCClass &klass; + + private: + void TryMarkMultiDefClass(MIRStructType &typeImported) const; +}; + +class BCClassField2FEHelper : public FEInputFieldHelper { + public: + BCClassField2FEHelper(MapleAllocator &allocator, const BCClassField &fieldIn) + : FEInputFieldHelper(allocator), + field(fieldIn) {} + ~BCClassField2FEHelper() = default; + FieldAttrs AccessFlag2Attribute(uint32 accessFlag) const; + + protected: + virtual FieldAttrs AccessFlag2AttributeImpl(uint32 accessFlag) const = 0; + bool ProcessDeclImpl(MapleAllocator &allocator) override; + bool ProcessDeclWithContainerImpl(MapleAllocator &allocator) override; + const BCClassField &field; +}; + +class BCClassMethod2FEHelper : public FEInputMethodHelper { + public: + BCClassMethod2FEHelper(MapleAllocator &allocator, std::unique_ptr &methodIn); + ~BCClassMethod2FEHelper() = default; + std::unique_ptr &GetMethod() const { + return method; + } + + protected: + bool ProcessDeclImpl(MapleAllocator &allocator) override; + void SolveReturnAndArgTypesImpl(MapleAllocator &allocator) override; + std::string GetMethodNameImpl(bool inMpl, bool full) const override; + bool IsVargImpl() const override; + bool HasThisImpl() const override; + MIRType *GetTypeForThisImpl() const override; + + virtual bool IsClinit() const = 0; + virtual bool IsInit() const = 0; + bool IsVirtualImpl() const override; + bool IsNativeImpl() const override; + bool HasCodeImpl() const override; + + std::unique_ptr &method; +}; + +class BCInputPragmaHelper : public FEInputPragmaHelper { + public: + BCInputPragmaHelper(BCAnnotationsDirectory &bcAnnotationsDirectoryIn) + : bcAnnotationsDirectory(bcAnnotationsDirectoryIn) {} + virtual ~BCInputPragmaHelper() = default; + + protected: + std::vector &GenerateMIRPragmasImpl() override { + return bcAnnotationsDirectory.EmitPragmas(); + } + + private: + BCAnnotationsDirectory &bcAnnotationsDirectory; +}; +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_CLASS2FE_HELPER_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/include/bc_compiler_component-inl.h b/src/hir2mpl/bytecode_input/common/include/bc_compiler_component-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..4cc8a3fa7c6cd26bd7bd96bede8cc21af1549d75 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_compiler_component-inl.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_COMPILER_COMPONENT_INL_H_ +#define HIR2MPL_BC_INPUT_INCLUDE_BC_COMPILER_COMPONENT_INL_H_ +#include "bc_compiler_component.h" +#include "fe_timer.h" +#include "bc_function.h" +#include "dex_class2fe_helper.h" +#include "fe_manager.h" +#include "class_loader_context.h" +#include "class_linker.h" + +namespace maple { +namespace bc { +template +BCCompilerComponent::BCCompilerComponent(MIRModule &module) + : HIR2MPLCompilerComponent(module, kSrcLangJava), + mp(FEUtils::NewMempool("MemPool for BCCompilerComponent", false /* isLcalPool */)), + allocator(mp), + bcInput(std::make_unique>(module)) {} + +template +BCCompilerComponent::~BCCompilerComponent() { + mp = nullptr; +} + +template +bool BCCompilerComponent::ParseInputImpl() { + FETimer timer; + bool success = true; + timer.StartAndDump("BCCompilerComponent::ParseInput()"); + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process BCCompilerComponent::ParseInput() ====="); + const std::list &inputClassNames = {}; + std::vector inputNames; + if (typeid(T) == typeid(DexReader)) { + inputNames = FEOptions::GetInstance().GetInputDexFiles(); + } else { + CHECK_FATAL(false, "Reader is not supported. Exit."); + } + success = success && bcInput->ReadBCFiles(inputNames, inputClassNames); + CHECK_FATAL(success, "BCCompilerComponent::ParseInput failed. Exit."); + bc::BCClass *klass = bcInput->GetFirstClass(); + while (klass != nullptr) { + if (typeid(T) == typeid(DexReader)) { + FEInputStructHelper *structHelper = allocator.GetMemPool()->template New(allocator, *klass); + FEInputPragmaHelper *pragmaHelper = + allocator.GetMemPool()->template New(*(klass->GetAnnotationsDirectory())); + structHelper->SetPragmaHelper(pragmaHelper); + structHelper->SetStaticFieldsConstVal(klass->GetStaticFieldsConstVal()); + structHelpers.push_back(structHelper); + } else { + CHECK_FATAL(false, "Reader is not supported. Exit."); + } + klass = bcInput->GetNextClass(); + } + timer.StopAndDumpTimeMS("BCCompilerComponent::ParseInput()"); + return success; +} + +template +bool BCCompilerComponent::LoadOnDemandTypeImpl() { + FETimer timer; + bool success = true; + timer.StartAndDump("BCCompilerComponent::LoadOnDemandType()"); + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process BCCompilerComponent::LoadOnDemandType() ====="); + // Collect dependent type names in dexfile + std::unordered_set allDepSet; + success = success && bcInput->CollectDependentTypeNamesOnAllBCFiles(allDepSet); + CHECK_FATAL(success, "BCCompilerComponent::CollectDepTypeNames failed. Exit."); + // Collect class def type names in dexfile + std::unordered_set allClassSet; + success = success && bcInput->CollectClassNamesOnAllBCFiles(allClassSet); + // Find dependent types and resolve types to bcClasses + std::list> klassList; + success = success && LoadOnDemandType2BCClass(allDepSet, allClassSet, klassList); + // All dependent bcClasses are loaded into the mir globaltable and used like mplt + std::list> structHelpers; + success = success && LoadOnDemandBCClass2FEClass(klassList, structHelpers, !FEOptions::GetInstance().IsNoMplFile()); + timer.StopAndDumpTimeMS("BCCompilerComponent::LoadOnDemandType()"); + return success; +} + +template +bool BCCompilerComponent::LoadOnDemandType2BCClass(const std::unordered_set &allDepsSet, + const std::unordered_set &allDefsSet, + std::list> &klassList) { + FETimer timer; + timer.StartAndDump("LoadOnDemandType2BCClass::Open dep dexfiles"); + bool success = true; + ClassLoaderContext *classLoaderContext = nullptr; + const ClassLoaderInfo *classLoader = nullptr; + std::vector> bootClassPaths; + std::string spec = FEOptions::GetInstance().GetXBootClassPath(); + if (!spec.empty()) { // Xbootclasspath + INFO(kLncInfo, "XBootClassPath=%s", spec.c_str()); + success = success && ClassLoaderContext::OpenDexFiles(spec, bootClassPaths); + } + spec = FEOptions::GetInstance().GetClassLoaderContext(); + if (!spec.empty()) { // PCL + INFO(kLncInfo, "PCL=%s", spec.c_str()); + classLoaderContext = ClassLoaderContext::Create(spec, *mp); + if (classLoaderContext != nullptr) { + classLoader = classLoaderContext->GetClassLoader(); + } + } + timer.StopAndDumpTimeMS("LoadOnDemandType2BCClass::Open dep dexfiles"); + ClassLinker *classLinker = mp->New(bootClassPaths); + for (const std::string &className : allDepsSet) { + classLinker->FindClass(className, nullptr, klassList); + } + // if same class name is existed in src dexFile and dependent set, import class from dependent set preferentially + for (const std::string &className : allDefsSet) { + classLinker->FindClass(className, nullptr, klassList, true); + } + return success; +} + +template +bool BCCompilerComponent::LoadOnDemandBCClass2FEClass( + const std::list> &klassList, + std::list> &structHelpers, + bool isEmitDepsMplt) { + for (const std::unique_ptr &klass : klassList) { + std::unique_ptr structHelper = std::make_unique(allocator, *klass); + FEInputPragmaHelper *pragmaHelper = + allocator.GetMemPool()->template New(*(klass->GetAnnotationsDirectory())); + structHelper->SetPragmaHelper(pragmaHelper); + structHelper->SetIsOnDemandLoad(true); + structHelpers.push_back(std::move(structHelper)); + } + for (std::unique_ptr const &helper : structHelpers) { + ASSERT_NOT_NULL(helper); + (void)helper->PreProcessDecl(); + } + for (std::unique_ptr const &helper : structHelpers) { + ASSERT_NOT_NULL(helper); + (void)helper->ProcessDecl(); + helper->ProcessPragma(); + } + if (isEmitDepsMplt) { + std::string outName = module.GetFileName(); + size_t lastDot = outName.find_last_of("."); + std::string outNameWithoutType; + if (lastDot == std::string::npos) { + outNameWithoutType = outName; + } else { + outNameWithoutType = outName.substr(0, lastDot); + } + std::string depFileName = outNameWithoutType + ".Deps.mplt"; + module.GetImportFiles().push_back(module.GetMIRBuilder()->GetOrCreateStringIndex(depFileName)); + module.DumpToHeaderFile(!FEOptions::GetInstance().IsGenAsciiMplt(), depFileName); + } + FEOptions::ModeDepSameNamePolicy mode = FEOptions::GetInstance().GetModeDepSameNamePolicy(); + switch (mode) { + case FEOptions::ModeDepSameNamePolicy::kSys: + // Set kSrcMpltSys flag, loading on-demand dependent types from system + FEManager::GetTypeManager().SetMirImportedTypes(FETypeFlag::kSrcMpltSys); + break; + case FEOptions::ModeDepSameNamePolicy::kSrc: + // Set kSrcMplt flag, same name types are then overridden from src file after loaded on-demand dependent type + FEManager::GetTypeManager().SetMirImportedTypes(FETypeFlag::kSrcMplt); + break; + } + for (uint32 i = 1; i < GlobalTables::GetGsymTable().GetSymbolTableSize(); ++i) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbol(i); + if ((symbol != nullptr) && (symbol->GetSKind() == kStFunc)) { + symbol->SetIsImportedDecl(true); + } + } + return true; +} + +template +void BCCompilerComponent::ProcessPragmaImpl() { + FETimer timer; + timer.StartAndDump("BCCompilerComponent::ProcessPragmaImpl()"); + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process BCCompilerComponent::ProcessPragmaImpl() ====="); + for (FEInputStructHelper *helper : structHelpers) { + ASSERT_NOT_NULL(helper); + helper->ProcessPragma(); + } + timer.StopAndDumpTimeMS("BCCompilerComponent::ProcessPragmaImpl()"); +} + +template +std::unique_ptr BCCompilerComponent::CreatFEFunctionImpl(FEInputMethodHelper *methodHelper) { + BCClassMethod2FEHelper *bcMethodHelper = static_cast(methodHelper); + GStrIdx methodNameIdx = methodHelper->GetMethodNameIdx(); + bool isStatic = methodHelper->IsStatic(); + MIRFunction *mirFunc = FEManager::GetTypeManager().GetMIRFunction(methodNameIdx, isStatic); + CHECK_NULL_FATAL(mirFunc); + std::unique_ptr feFunction = std::make_unique(*bcMethodHelper, *mirFunc, phaseResultTotal); + module.AddFunction(mirFunc); + feFunction->Init(); + return feFunction; +} + +template +std::string BCCompilerComponent::GetComponentNameImpl() const { + return "BCCompilerComponent"; +} + +template +bool BCCompilerComponent::ParallelableImpl() const { + return true; +} + +template +void BCCompilerComponent::DumpPhaseTimeTotalImpl() const { + INFO(kLncInfo, "[PhaseTime] BCCompilerComponent"); + HIR2MPLCompilerComponent::DumpPhaseTimeTotalImpl(); +} + +template +void BCCompilerComponent::ReleaseMemPoolImpl() { + FEUtils::DeleteMempoolPtr(mp); +} +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_COMPILER_COMPONENT_INL_H_ diff --git a/src/hir2mpl/bytecode_input/common/include/bc_compiler_component.h b/src/hir2mpl/bytecode_input/common/include/bc_compiler_component.h new file mode 100644 index 0000000000000000000000000000000000000000..b243f570e03f64974d57be3663b74416d8b9bfec --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_compiler_component.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_COMPILER_COMPONENT_H +#define HIR2MPL_BC_INPUT_INCLUDE_BC_COMPILER_COMPONENT_H +#include "fe_macros.h" +#include "hir2mpl_compiler_component.h" +#include "bc_input.h" + +namespace maple { +namespace bc { +template +class BCCompilerComponent : public HIR2MPLCompilerComponent { + public: + explicit BCCompilerComponent(MIRModule &module); + ~BCCompilerComponent() override; + + protected: + bool ParseInputImpl() override; + bool LoadOnDemandTypeImpl() override; + void ProcessPragmaImpl() override; + std::unique_ptr CreatFEFunctionImpl(FEInputMethodHelper *methodHelper) override; + std::string GetComponentNameImpl() const override; + bool ParallelableImpl() const override; + void DumpPhaseTimeTotalImpl() const override; + void ReleaseMemPoolImpl() override; + + private: + bool LoadOnDemandType2BCClass(const std::unordered_set &allDepsSet, + const std::unordered_set &allDefsSet, + std::list> &klassList); + bool LoadOnDemandBCClass2FEClass(const std::list> &klassList, + std::list> &structHelpers, + bool isEmitDepsMplt); + + MemPool *mp; + MapleAllocator allocator; + std::unique_ptr> bcInput; +}; // class BCCompilerComponent +} // namespace bc +} // namespace maple +#include "bc_compiler_component-inl.h" +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_COMPILER_COMPONENT_H diff --git a/src/hir2mpl/bytecode_input/common/include/bc_function.h b/src/hir2mpl/bytecode_input/common/include/bc_function.h new file mode 100644 index 0000000000000000000000000000000000000000..dedebf5387aeb9a1a17328c4820314f5a31944c0 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_function.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_BC_INPUT_BC_FUNTION_H +#define MPL_FE_BC_INPUT_BC_FUNTION_H +#include "fe_function.h" +#include "bc_class2fe_helper.h" + +namespace maple { +namespace bc { +class BCFunction : public FEFunction { + public: + BCFunction(const BCClassMethod2FEHelper &argMethodHelper, MIRFunction &mirFunc, + const std::unique_ptr &argPhaseResultTotal); + virtual ~BCFunction() = default; + + protected: + bool GenerateGeneralStmt(const std::string &phaseName) override { + WARN(kLncWarn, "Phase: %s may not need.", phaseName.c_str()); + return true; + } + + bool GenerateArgVarList(const std::string &phaseName) override; + bool GenerateAliasVars(const std::string &phaseName) override; + + bool PreProcessTypeNameIdx() override { + return true; + } + + void GenerateGeneralStmtFailCallBack() override {} + + void GenerateGeneralDebugInfo() override {} + + bool VerifyGeneral() override { + return true; + } + + void VerifyGeneralFailCallBack() override {} + + bool HasThis() override { + return methodHelper.HasThis(); + } + + bool IsNative() override { + return methodHelper.IsNative(); + } + + bool EmitToFEIRStmt(const std::string &phaseName) override; + + void InitImpl() override; + void SetMIRFunctionInfo(); + void PreProcessImpl() override; + bool ProcessImpl() override; + bool ProcessFEIRFunction() override; + void FinishImpl() override; + bool EmitToMIR(const std::string &phaseName) override; + + const BCClassMethod2FEHelper &methodHelper; + std::unique_ptr &method; + bool error = false; +}; +} // namespace bc +} // namespace maple +#endif \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/include/bc_input-inl.h b/src/hir2mpl/bytecode_input/common/include/bc_input-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..8a211e390462fc8ecb5d6ecdb0d55f713e693eb2 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_input-inl.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_INPUT_INL_H_ +#define HIR2MPL_BC_INPUT_INCLUDE_BC_INPUT_INL_H_ +#include "bc_input.h" +#include +#include "dex_parser.h" +#include "hir2mpl_env.h" + +namespace maple { +namespace bc { +template +bool BCInput::ReadBCFile(uint32 index, const std::string &fileName, const std::list &classNamesIn) { + std::unique_ptr bcParser; + if (typeid(T) == typeid(DexReader)) { + bcParser = std::make_unique(index, fileName, classNamesIn); + } else { + CHECK_FATAL(false, "Unsupported BC reader: %s in BCInput", typeid(T).name()); + } + if (!bcParser->OpenFile()) { + return false; + } + if (!bcParser->ParseHeader()) { + ERR(kLncErr, "Parse Header failed in : %s.", fileName.c_str()); + return false; + } + if (!bcParser->Verify()) { + ERR(kLncErr, "Verify file failed in : %s.", fileName.c_str()); + return false; + } + if (bcParser->RetrieveClasses(bcClasses) == false) { + ERR(kLncErr, "Retrieve classes failed in : %s.", fileName.c_str()); + return false; + } + (void)bcParserMap.insert(std::make_pair(fileName, std::move(bcParser))); + return true; +} + +template +bool BCInput::ReadBCFiles(const std::vector &fileNames, const std::list &classNamesIn) { + for (uint32 i = 0; i < fileNames.size(); ++i) { + if (BCInput::ReadBCFile(i, fileNames[i], classNamesIn) == false) { + return false; + } + RegisterFileInfo(fileNames[i]); + } + return true; +} + +template +BCClass *BCInput::GetFirstClass() { + if (bcClasses.size() == 0) { + return nullptr; + } + itKlass = bcClasses.begin(); + return itKlass->get(); +} + +template +BCClass *BCInput::GetNextClass() { + if (itKlass == bcClasses.end()) { + return nullptr; + } + ++itKlass; + if (itKlass == bcClasses.end()) { + return nullptr; + } + return itKlass->get(); +} + +template +void BCInput::RegisterFileInfo(const std::string &fileName) { + GStrIdx fileNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fileName); + GStrIdx fileInfoIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename"); + module.PushFileInfoPair(MIRInfoPair(fileInfoIdx, fileNameIdx)); + module.PushFileInfoIsString(true); +} + +template +bool BCInput::CollectDependentTypeNamesOnAllBCFiles(std::unordered_set &allDepSet) { + FEOptions::ModeCollectDepTypes mode = FEOptions::GetInstance().GetModeCollectDepTypes(); + bool isSuccess = true; + switch (mode) { + case FEOptions::ModeCollectDepTypes::kAll: + isSuccess = CollectAllDepTypeNamesOnAllBCFiles(allDepSet); + break; + case FEOptions::ModeCollectDepTypes::kFunc: + isSuccess = CollectMethodDepTypeNamesOnAllBCFiles(allDepSet); + break; + } + return isSuccess; +} + +template +bool BCInput::CollectAllDepTypeNamesOnAllBCFiles(std::unordered_set &allDepSet) { + for (auto &item : bcParserMap) { + std::unordered_set depSet; + if (item.second->CollectAllDepTypeNames(depSet) == false) { + ERR(kLncErr, "Collect all dependent typenames failed in : %s.", item.first.c_str()); + return false; + } + allDepSet.insert(depSet.cbegin(), depSet.cend()); + } + BCUtil::AddDefaultDepSet(allDepSet); // allDepSet is equal to "DefaultTypeSet + TypeSet - ClassSet" + std::unordered_set classSet; + (void)CollectClassNamesOnAllBCFiles(classSet); + for (const auto &elem : classSet) { + (void)allDepSet.erase(elem); + } + return true; +} + +template +bool BCInput::CollectMethodDepTypeNamesOnAllBCFiles(std::unordered_set &depSet) { + bool isSuccess = true; + for (const std::unique_ptr &klass : bcClasses) { + if (klass == nullptr) { + continue; + } + std::list superClassNames = klass->GetSuperClassNames(); + depSet.insert(superClassNames.cbegin(), superClassNames.cend()); + std::vector superInterfaceNames = klass->GetSuperInterfaceNames(); + depSet.insert(superInterfaceNames.cbegin(), superInterfaceNames.cend()); + for (const std::unique_ptr &method : klass->GetMethods()) { + if (method == nullptr) { + continue; + } + (void)klass->GetBCParser().CollectMethodDepTypeNames(depSet, *method); + } + } + std::unordered_set classSet; + isSuccess = isSuccess && CollectClassNamesOnAllBCFiles(classSet); + BCUtil::AddDefaultDepSet(depSet); // DepSet is equal to "DefaultTypeSet + TypeSet - ClassSet" + for (const auto &elem : classSet) { + (void)depSet.erase(elem); + } + return isSuccess; +} + +template +bool BCInput::CollectClassNamesOnAllBCFiles(std::unordered_set &allClassSet) { + for (auto &item : bcParserMap) { + std::unordered_set classSet; + if (item.second->CollectAllClassNames(classSet) == false) { + ERR(kLncErr, "Collect all class names failed in : %s.", item.first.c_str()); + return false; + } + allClassSet.insert(classSet.cbegin(), classSet.cend()); + } + return true; +} +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_INPUT_INL_H_ diff --git a/src/hir2mpl/bytecode_input/common/include/bc_input.h b/src/hir2mpl/bytecode_input/common/include/bc_input.h new file mode 100644 index 0000000000000000000000000000000000000000..21d657fc3a5de219f8212f22e7e8252d394159f3 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_input.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_INPUT_H +#define HIR2MPL_BC_INPUT_INCLUDE_BC_INPUT_H +#include +#include +#include "mir_module.h" +#include "bc_class.h" +#include "bc_parser.h" + +namespace maple { +namespace bc { +template +class BCInput { + public: + explicit BCInput(MIRModule &moduleIn) : module(moduleIn) {} + ~BCInput() = default; + bool ReadBCFile(uint32 index, const std::string &fileName, const std::list &classNamesIn); + bool ReadBCFiles(const std::vector &fileNames, const std::list &classNamesIn); + const MIRModule &GetModule() const { + return module; + } + BCClass *GetFirstClass(); + BCClass *GetNextClass(); + void RegisterFileInfo(const std::string &fileName); + bool CollectDependentTypeNamesOnAllBCFiles(std::unordered_set &allDepSet); + bool CollectClassNamesOnAllBCFiles(std::unordered_set &allClassSet); + + private: + bool CollectAllDepTypeNamesOnAllBCFiles(std::unordered_set &allDepSet); + bool CollectMethodDepTypeNamesOnAllBCFiles(std::unordered_set &allDepSet); + + std::map> bcParserMap; // map + MIRModule &module; + + std::list> bcClasses; + std::list>::const_iterator itKlass; +}; +} +} +#include "bc_input-inl.h" +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_INPUT_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/include/bc_instruction.h b/src/hir2mpl/bytecode_input/common/include/bc_instruction.h new file mode 100644 index 0000000000000000000000000000000000000000..3673af772a73cad84d8084a2cf8de16bf4896d97 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_instruction.h @@ -0,0 +1,448 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_INSTRUCTION_H +#define HIR2MPL_BC_INPUT_INCLUDE_BC_INSTRUCTION_H +#include +#include +#include "types_def.h" +#include "bc_util.h" +#include "feir_stmt.h" +#include "feir_var_reg.h" + +namespace maple { +namespace bc { +class BCClassMethod; +struct BCReg; +class BCInstruction { + public: + BCInstruction(MapleAllocator &allocatorIn, uint32 pcIn, uint8 opcodeIn) + : opcode(opcodeIn), + pc(pcIn), + allocator(allocatorIn), + catchedExTypeNamesIdx(allocator.Adapter()), + handlerTargets(allocator.Adapter()), + defedRegs(allocator.Adapter()), + usedRegs(allocator.Adapter()), + handlers(allocator.Adapter()) {} + virtual ~BCInstruction() = default; + void InitBCInStruction(uint16 kind, bool wide, bool throwable); + BCInstructionKind GetInstKind() const; + bool IsWide() const; + void SetInstructionKind(BCInstructionKind kind); + bool IsConditionBranch() const; + bool IsGoto() const; + bool IsSwitch() const; + bool IsTarget() const; + bool IsTryStart() const; + bool IsTryEnd() const; + bool IsCatch() const; + void SetReturnInst(BCInstruction *inst); + bool HasReturn() const; + bool IsReturn() const; + uint32 GetPC() const; + uint8 GetOpcode() const; + std::vector GetTargets() const; + void SetDefaultTarget(BCInstruction *inst); + void AddHandler(BCInstruction *handler); + void SetWidth(uint8 size); + uint8 GetWidth() const; + void SetCatchable(); + bool IsCatchable() const; + bool IsFallThru() const; + void SetBCRegType(const BCInstruction &inst); + void AddHandlerTarget(uint32 target); + MapleVector GetHandlerTargets() const; + MapleList *GetDefedRegs(); + MapleList *GetUsedRegs(); + void SetRegTypeInTypeInfer(); + void SetFuncNameIdx(const GStrIdx &methodIdx); + void SetSrcPositionInfo(uint32 fileIdxIn, uint32 lineNumIn); + void SetOpName(const char *name); + const char *GetOpName() const; + void Parse(BCClassMethod &method); + void SetExceptionType(const GStrIdx &typeNameIdx); + std::list EmitToFEIRStmts(); + + protected: + virtual std::vector GetTargetsImpl() const; + virtual void ParseImpl(BCClassMethod &method) = 0; + virtual std::list EmitToFEIRStmtsImpl() = 0; + void GenCommentStmt(std::list &stmts) const; + UniqueFEIRStmt GenLabelStmt() const; + UniqueFEIRStmt GenCatchStmt() const; + UniqueFEIRStmt GenTryLabelStmt() const; + UniqueFEIRStmt GenTryEndLabelStmt() const; + virtual void SetRegTypeInTypeInferImpl() {} + virtual void SetBCRegTypeImpl(const BCInstruction &inst) { + (void) inst; + } // for move-result and move-exception + std::list GenRetypeStmtsAfterDef() const; + std::list GenRetypeStmtsBeforeUse() const; + void SetSrcFileInfo(std::list &stmts) const; + bool isThrowable = false; + bool isWide = false; + bool isReturn = false; + bool isCatchable = false; + uint8 width = UINT8_MAX; // Default value, unuseable + uint8 opcode; + BCInstructionKind instKind = kUnKnownKind; + uint32 funcNameIdx = UINT32_MAX; + uint32 srcFileIdx = 0; + uint32 srcFileLineNum = 0; + uint32 pc; +#ifdef DEBUG + const char *opName = nullptr; +#endif + BCInstruction *returnInst = nullptr; + BCInstruction *defaultTarget = nullptr; + MapleAllocator &allocator; + MapleSet catchedExTypeNamesIdx; + MapleVector handlerTargets; // This instruction may throw exception and reach these handlers + MapleList defedRegs; + MapleList usedRegs; + MapleList handlers; +}; + +// Forward declaration +struct BCReg; +struct TypeInferItem; + +struct BCRegTypeItem { + BCRegTypeItem(const GStrIdx &idx, bool isIndeterminateIn = false, bool isFromDefIn = false) + : typeNameIdx(idx), + isIndeterminate(isIndeterminateIn), + isFromDef(isFromDefIn) {} + BCRegTypeItem(const BCRegTypeItem &item) + : typeNameIdx(item.typeNameIdx), + isIndeterminate(item.isIndeterminate) {} + ~BCRegTypeItem() = default; + + PrimType GetPrimType() const; + PrimType GetBasePrimType() const; + BCRegTypeItem *Clone(const MapleAllocator &allocator) { + return allocator.GetMemPool()->New(typeNameIdx, isIndeterminate); + } + + void Copy(const BCRegTypeItem &src) { + typeNameIdx = src.typeNameIdx; + isIndeterminate = src.isIndeterminate; + isFromDef = src.isFromDef; + } + bool operator==(const BCRegTypeItem &item) const { + return typeNameIdx == item.typeNameIdx; + } + + bool IsRef() const { + return GetPrimType() == PTY_ref; + } + + bool IsMorePreciseType(const BCRegTypeItem &typeItemIn) const; + + // for debug + void DumpTypeName() const { + LogInfo::MapleLogger(kLlDbg) << GlobalTables::GetStrTable().GetStringFromStrIdx(typeNameIdx) << "\n"; + } + + void SetPos(uint32 pc) { + pos = pc; + } + + void SetDom(bool flag) { + isDom = flag; + } + + bool IsDom() const { + return isDom; + } + + GStrIdx typeNameIdx; + bool isIndeterminate = false; + // means `is def ` or `come from def` + bool isFromDef = false; + uint32 pos = UINT32_MAX; + + bool isDom = false; +}; + +class BCRegType { + public: + BCRegType(MapleAllocator &allocatorIn, BCReg ®, const GStrIdx &typeNameIdxIn, bool isIndeterminateIn = false); + ~BCRegType() = default; + MapleAllocator &GetAllocator() { + return allocator; + } + + void SetTypeNameIdx(const GStrIdx &idx) const { + regTypeItem->typeNameIdx = idx; + } + + bool IsIndeterminate() const { + return regTypeItem->isIndeterminate; + } + + void SetIsIndeterminate(bool flag) const { + regTypeItem->isIndeterminate = flag; + } + + static void InsertUniqueTypeItem(MapleList &itemSet, BCRegTypeItem *item) { + for (auto const &elem : itemSet) { + if ((!elem->isIndeterminate && !item->isIndeterminate && (*elem) == (*item)) || elem == item) { + return; + } + } + itemSet.emplace_back(item); + } + + void UpdateUsedSet(BCRegTypeItem *typeItem) { + if (typeItem->isIndeterminate) { + InsertUniqueTypeItem(fuzzyTypesUsedAs, typeItem); + } + InsertUniqueTypeItem(*typesUsedAs, typeItem); + } + + void UpdateFuzzyUsedSet(BCRegTypeItem *typeItem) { + if (typeItem->isIndeterminate) { + InsertUniqueTypeItem(fuzzyTypesUsedAs, typeItem); + } + } + + void SetRegTypeItem(BCRegTypeItem *item) { + regTypeItem = item; + } + + const BCRegTypeItem *GetRegTypeItem() const { + return regTypeItem; + } + + MapleList *GetUsedTypes() { + return typesUsedAs; + } + + void PrecisifyTypes(bool isTry = false); + + void PrecisifyRelatedTypes(const BCRegTypeItem *realType); + + void PrecisifyElemTypes(const BCRegTypeItem *realType); + + bool IsPrecisified() const { + return precisified; + } + + static BCRegTypeItem *GetMostPreciseType(const MapleList &types); + + void AddElemType(BCRegTypeItem *item) { + elemTypes.emplace(item); + } + + void SetUsedTypes(MapleList *types) { + typesUsedAs = types; + } + + void SetPos(uint32 pc) { + pos = pc; + } + + uint32 GetPos() const { + return pos; + } + + void RegisterRelatedBCRegType(BCRegType *ty) { + relatedBCRegTypes.emplace_back(ty); + } + + void RegisterLivesInfo(uint32 posStart) { + livesBegins.emplace_back(posStart); + } + + bool IsBefore(uint32 posCurr, uint32 end) const { + for (auto p : livesBegins) { + if (p == posCurr) { + return true; + } + if (p == end) { + return false; + } + } + CHECK_FATAL(false, "Should not get here."); + return true; + } + + private: + MapleAllocator &allocator; + BCReg &curReg; + BCRegTypeItem *regTypeItem; + MapleList *typesUsedAs = nullptr; + MapleList fuzzyTypesUsedAs; + bool precisified = false; + MapleSet elemTypes; + uint32 pos = UINT32_MAX; + // `0` for args, `pc + 1` for local defs + MapleVector livesBegins; + + std::list relatedBCRegTypes; +}; + +struct BCRegValue { + union { + uint64 raw64 = 0; + uint32 raw32; + } primValue; +}; + +struct BCReg { + BCReg() = default; + virtual ~BCReg() = default; + bool isDef = false; + uint32 regNum = UINT32_MAX; + BCRegType *regType = nullptr; + BCRegValue *regValue = nullptr; + BCRegTypeItem *regTypeItem = nullptr; + bool IsConstZero() const; + UniqueFEIRType GenFEIRType() const; + UniqueFEIRVar GenFEIRVarReg() const; + std::list GenRetypeStmtsAfterDef() const; + std::list GenRetypeStmtsBeforeUse() const; + // a reg maybe store a primitive pointer type var. + // If it is a primitive ptr, GetPrimType() return a PTY_ref + // GetBasePrimType() return a corresponding prim type + // If it is not a primitive pointer type, GetPrimType() and GetBasePrimType() return a same PrimType. + PrimType GetPrimType() const; + PrimType GetBasePrimType() const; + std::unique_ptr Clone() const { + return CloneImpl(); + } + + protected: + virtual UniqueFEIRType GenFEIRTypeImpl() const; + virtual UniqueFEIRVar GenFEIRVarRegImpl() const; + virtual std::unique_ptr CloneImpl() const; +}; + +// for TypeInfer +struct TypeInferItem { + TypeInferItem(MapleAllocator &alloc, uint32 pos, BCReg *regIn, TypeInferItem *prev) + : beginPos(pos), + prevs(alloc.Adapter()), + aliveUsedTypes(alloc.GetMemPool()->New>(alloc.Adapter())), + reg(regIn) { + if (prev == nullptr) { + reg->regType->SetUsedTypes(aliveUsedTypes); + } else { + (void)RegisterInPrevs(prev); + } + reg->regType->RegisterLivesInfo(pos); + } + + bool RegisterInPrevs(TypeInferItem *prev) { + for (auto const e : prevs) { + if (e == prev) { + return false; + } + } + prevs.emplace_back(prev); + return true; + } + + void InsertUniqueAliveTypes(const TypeInferItem *end, const MapleList *types) { + if (end != nullptr && end->reg == this->reg && end->reg->regType->IsBefore(this->beginPos, end->beginPos)) { + return; + } + std::vector workList; + uint32 index = 0; + workList.emplace_back(this); + while (index < workList.size()) { + auto currItem = workList[index++]; + currItem->isAlive = true; + for (auto type : *types) { + bool duplicate = false; + for (auto const ty : *(currItem->aliveUsedTypes)) { + if (ty == type) { + duplicate = true; + break; + } + } + if (!duplicate) { + currItem->aliveUsedTypes->emplace_back(type); + currItem->reg->regType->UpdateFuzzyUsedSet(type); + } + } + // insert into its prevs cyclely + for (auto const prev : currItem->prevs) { + if (end != nullptr && end->reg == prev->reg && end->reg->regType->IsBefore(prev->beginPos, end->beginPos)) { + continue; + } + bool exist = false; + for (uint32 i = 0; i < workList.size(); ++i) { + if (workList[i] == prev) { + exist = true; + break; + } + } + if (!exist) { + workList.emplace_back(prev); + } + } + } + } + + void InsertUniqueAliveType(const TypeInferItem *end, BCRegTypeItem *type) { + if (end != nullptr && end->reg == this->reg && end->reg->regType->IsBefore(this->beginPos, end->beginPos)) { + return; + } + std::vector workList; + uint32 index = 0; + workList.emplace_back(this); + while (index < workList.size()) { + auto currItem = workList[index++]; + currItem->isAlive = true; + bool duplicate = false; + for (auto const ty : *(currItem->aliveUsedTypes)) { + if (ty == type) { + duplicate = true; + break; + } + } + if (!duplicate) { + currItem->aliveUsedTypes->emplace_back(type); + currItem->reg->regType->UpdateFuzzyUsedSet(type); + } + // insert into its prevs cyclely + for (auto prev : std::as_const(currItem->prevs)) { + if (end != nullptr && end->reg == prev->reg && end->reg->regType->IsBefore(prev->beginPos, end->beginPos)) { + continue; + } + bool exist = false; + for (uint32 i = 0; i < workList.size(); ++i) { + if (workList[i] == prev) { + exist = true; + break; + } + } + if (!exist) { + workList.emplace_back(prev); + } + } + } + } + + uint32 beginPos; + MapleList prevs; + MapleList *aliveUsedTypes = nullptr; + BCReg *reg = nullptr; + bool isAlive = false; +}; +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_INSTRUCTION_H diff --git a/src/hir2mpl/bytecode_input/common/include/bc_io.h b/src/hir2mpl/bytecode_input/common/include/bc_io.h new file mode 100644 index 0000000000000000000000000000000000000000..e47cc2575371f4306a97e616350b13b9b9e3e5d9 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_io.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_BC_INPUT_BC_IO_H +#define MPL_FE_BC_INPUT_BC_IO_H +#include +#include "basic_io.h" + +namespace maple { +namespace bc { +class BCIO : public BasicIOMapFile { + public: + explicit BCIO(const std::string &fileName); + ~BCIO() = default; + + static bool IsBigEndian() { + union { + uint16 v16; + uint8 a[sizeof(uint16) / sizeof(uint8)]; + } u; + u.v16 = 0x1234; + return u.a[0] == 0x12; + } + + protected: + bool isBigEndianHost = true; +}; + +struct RawData { + const uint8 *context; + uint32 size; +}; + +class BCReader : public BCIO, public BasicIORead { + public: + explicit BCReader(const std::string &fileName); + ~BCReader() override; + bool RetrieveHeader(RawData &data); + void SetEndianTag(bool isBigEndian); + template + T GetRealInteger(T value) const { + if (isBigEndian == isBigEndianHost || sizeof(T) == sizeof(uint8)) { + return value; // Same Endian, not need to convert + } else { + union U { + uint8 a[sizeof(T)]; + T val; + } u0; + u0.val = value; + U u1; + for (size_t i = 0; i < sizeof(T); ++i) { + u1.a[i] = u0.a[sizeof(T) - 1 - i]; + } + return u1.val; + } + } + + struct ClassElem { + std::string className; + std::string elemName; + std::string typeName; + }; + + std::string GetStringFromIdx(uint32 idx) const; + std::string GetTypeNameFromIdx(uint32 idx) const; + ClassElem GetClassMethodFromIdx(uint32 idx) const; + ClassElem GetClassFieldFromIdx(uint32 idx) const; + std::string GetSignature(uint32 idx) const; + uint32 GetFileIndex() const; + std::string GetIRSrcFileSignature() const; + + protected: + virtual std::string GetStringFromIdxImpl(uint32 idx) const = 0; + virtual std::string GetTypeNameFromIdxImpl(uint32 idx) const = 0; + virtual ClassElem GetClassMethodFromIdxImpl(uint32 idx) const = 0; + virtual ClassElem GetClassFieldFromIdxImpl(uint32 idx) const = 0; + virtual std::string GetSignatureImpl(uint32 idx) const = 0; + virtual uint32 GetFileIndexImpl() const = 0; + virtual bool RetrieveHeaderImpl(RawData &data); + std::string irSrcFileSignature; +}; +} // namespace bc +} // namespace maple +#endif // MPL_FE_BC_INPUT_BC_IO_H diff --git a/src/hir2mpl/bytecode_input/common/include/bc_op_factory.h b/src/hir2mpl/bytecode_input/common/include/bc_op_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..ca3c274ff2497a1ca4cc4e6bbe589f43c169a865 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_op_factory.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_BC_INPUT_BC_OP_FACTORY_H +#define MPL_FE_BC_INPUT_BC_OP_FACTORY_H +#include +#include +#include "mempool_allocator.h" +#include "types_def.h" +#include "bc_instruction.h" + +namespace maple { +namespace bc { +class BCOpFactory { + public: + template + static BCInstruction *BCOpGenerator(MapleAllocator &allocator, uint32 pc) { + MemPool *mp = allocator.GetMemPool(); + BCInstruction *op = mp->New(allocator, pc, opcode); + op->InitBCInStruction(kind, wide, throwable); + return op; + } + using funcPtr = BCInstruction*(*)(MapleAllocator&, uint32); +}; +} // namespace bc +} // namespace maple +#endif // MPL_FE_BC_INPUT_BC_OP_FACTORY_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/include/bc_parser-inl.h b/src/hir2mpl/bytecode_input/common/include/bc_parser-inl.h new file mode 100644 index 0000000000000000000000000000000000000000..48938f822d346e8a92dcee22d0996c5cf2ff8a0a --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_parser-inl.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_PARSER_INL_H_ +#define HIR2MPL_BC_INPUT_INCLUDE_BC_PARSER_INL_H_ +#include "bc_parser.h" + +namespace maple { +namespace bc { +template +BCParser::BCParser(uint32 fileIdxIn, const std::string &fileNameIn, const std::list &classNamesIn) + : BCParserBase(fileIdxIn, fileNameIn, classNamesIn), reader(std::make_unique(fileIdxIn, fileNameIn)) {} + +template +bool BCParser::OpenFileImpl() { + return reader->OpenAndMap(); +} + +template +int MethodProcessTask::RunImpl(MplTaskParam *param) { + (void)param; + parser.ProcessDexClassMethod(klass, isVirtual, index, idxPair); + return 0; +} + +template +int MethodProcessTask::FinishImpl(MplTaskParam *param) { + (void)param; + return 0; +} + +template +void MethodProcessSchedular::AddMethodProcessTask(T &parser, + std::unique_ptr &klass, bool isVirtual, uint32 index, std::pair &idxPair) { + std::unique_ptr> task = + std::make_unique>(parser, klass, isVirtual, index, idxPair); + AddTask(task.get()); + tasks.push_back(std::move(task)); +} + +template +void MethodProcessSchedular::CallbackThreadMainStart() { + std::thread::id tid = std::this_thread::get_id(); + if (FEOptions::GetInstance().GetDumpLevel() >= FEOptions::kDumpLevelInfoDebug) { + INFO(kLncInfo, "Start Run Thread (tid=%lx)", std::hash{}(tid)); + } + FEConfigParallel::GetInstance().RegisterRunThreadID(tid); +} +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_PARSER_INL_H_ \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/include/bc_parser.h b/src/hir2mpl/bytecode_input/common/include/bc_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..25a5eb000594f13583850f8a34797020d3438270 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_parser.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_BC_INPUT_BC_PARSER_H +#define MPL_FE_BC_INPUT_BC_PARSER_H +#include +#include +#include +#include "bc_io.h" +#include "bc_class.h" +#include "bc_parser_base.h" +#include "mpl_scheduler.h" +#include "fe_options.h" + +namespace maple { +namespace bc { +template +class BCParser : public BCParserBase { + public: + BCParser(uint32 fileIdxIn, const std::string &fileNameIn, const std::list &classNamesIn); + ~BCParser() = default; + + protected: + bool OpenFileImpl(); + uint32 CalculateCheckSumImpl(const uint8 *data, uint32 size) = 0; + bool ParseHeaderImpl() = 0; + bool VerifyImpl() = 0; + virtual bool RetrieveIndexTables() = 0; + bool RetrieveUserSpecifiedClasses(std::list> &klasses) = 0; + bool RetrieveAllClasses(std::list> &klasses) = 0; + bool CollectAllDepTypeNamesImpl(std::unordered_set &depSet) = 0; + bool CollectMethodDepTypeNamesImpl(std::unordered_set &depSet, BCClassMethod &bcMethod) const = 0; + bool CollectAllClassNamesImpl(std::unordered_set &classSet) = 0; + + std::unique_ptr reader; +}; + +template +class MethodProcessTask : public MplTask { + public: + MethodProcessTask(T &argParser, std::unique_ptr &argKlass, + bool argIsVirtual, uint32 index, std::pair argIdxPair) + : isVirtual(argIsVirtual), + index(index), + klass(argKlass), + idxPair(argIdxPair), + parser(argParser) {} + virtual ~MethodProcessTask() = default; + + protected: + int RunImpl(MplTaskParam *param) override; + int FinishImpl(MplTaskParam *param) override; + + private: + bool isVirtual; + uint32 index; + std::unique_ptr &klass; + std::pair idxPair; + T &parser; +}; + +template +class MethodProcessSchedular : public MplScheduler { + public: + MethodProcessSchedular(const std::string &name) + : MplScheduler(name) {} + ~MethodProcessSchedular() override { + FEConfigParallel::GetInstance().RunThreadIDCleanUp(); + } + + void AddMethodProcessTask(T &parser, std::unique_ptr &klass, + bool isVirtual, uint32 index, std::pair &idxPair); + void SetDumpTime(bool arg) { + dumpTime = arg; + } + + protected: + void CallbackThreadMainStart() override; + + private: + std::list>> tasks; +}; +} // namespace bc +} // namespace maple +#include "bc_parser-inl.h" +#endif // MPL_FE_BC_INPUT_BC_PARSER_H diff --git a/src/hir2mpl/bytecode_input/common/include/bc_parser_base.h b/src/hir2mpl/bytecode_input/common/include/bc_parser_base.h new file mode 100644 index 0000000000000000000000000000000000000000..e23f02d9a2720dac3b38d74531d4c8398493ce85 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_parser_base.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_PARSER_BASE_H +#define HIR2MPL_BC_INPUT_INCLUDE_BC_PARSER_BASE_H +#include +#include +#include +#include "types_def.h" +#include "bc_io.h" + +namespace maple { +namespace bc { +class BCClass; +class BCClassMethod; +class BCParserBase { + public: + BCParserBase(uint32 fileIdxIn, const std::string &fileNameIn, const std::list &classNamesIn); + virtual ~BCParserBase() = default; + bool OpenFile(); + bool ParseHeader(); + bool Verify(); + int32 GetFileNameHashId() const { + return fileNameHashId; + } + uint32 CalculateCheckSum(const uint8 *data, uint32 size); + bool RetrieveClasses(std::list> &klasses); + const BCReader *GetReader() const; + bool CollectAllDepTypeNames(std::unordered_set &depSet); + bool CollectMethodDepTypeNames(std::unordered_set &depSet, BCClassMethod &bcMethod) const; + bool CollectAllClassNames(std::unordered_set &classSet); + void ProcessMethodBody(BCClassMethod &method, uint32 classIdx, uint32 methodItemIdx, bool isVirtual) const { + ProcessMethodBodyImpl(method, classIdx, methodItemIdx, isVirtual); + } + + protected: + virtual const BCReader *GetReaderImpl() const = 0; + virtual bool OpenFileImpl() = 0; + virtual uint32 CalculateCheckSumImpl(const uint8 *data, uint32 size) = 0; + virtual bool ParseHeaderImpl() = 0; + virtual bool VerifyImpl() = 0; + virtual bool RetrieveIndexTables() = 0; + virtual bool RetrieveUserSpecifiedClasses(std::list> &klasses) = 0; + virtual bool RetrieveAllClasses(std::list> &klasses) = 0; + virtual bool CollectAllDepTypeNamesImpl(std::unordered_set &depSet) = 0; + virtual bool CollectMethodDepTypeNamesImpl(std::unordered_set &depSet, + BCClassMethod &bcMethod) const = 0; + virtual bool CollectAllClassNamesImpl(std::unordered_set &classSet) = 0; + virtual void ProcessMethodBodyImpl(BCClassMethod &method, + uint32 classIdx, uint32 methodItemIdx, bool isVirtual) const = 0; + + uint32 fileIdx; + const std::string fileName; + const std::list &classNames; + const int32 fileNameHashId; +}; +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_PARSER_BASE_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/include/bc_pragma.h b/src/hir2mpl/bytecode_input/common/include/bc_pragma.h new file mode 100644 index 0000000000000000000000000000000000000000..cb33222911e76a5e5cf1cde05ddf86bda2f0a07e --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_pragma.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_PRAGMA_H +#define HIR2MPL_BC_INPUT_INCLUDE_BC_PRAGMA_H +#include +#include +#include +#include "mir_module.h" +#include "mir_pragma.h" +#include "mempool.h" + +namespace maple { +namespace bc { +class BCAnnotationsDirectory { + public: + BCAnnotationsDirectory(MIRModule &moduleArg, MemPool &mpArg) + : module(moduleArg), + mp(mpArg) {} + virtual ~BCAnnotationsDirectory() = default; + std::vector &EmitPragmas() { + return EmitPragmasImpl(); + } + + protected: + virtual std::vector &EmitPragmasImpl() = 0; + MIRModule &module; + MemPool ∓ + std::vector pragmas; +}; +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_PRAGMA_H diff --git a/src/hir2mpl/bytecode_input/common/include/bc_util.h b/src/hir2mpl/bytecode_input/common/include/bc_util.h new file mode 100644 index 0000000000000000000000000000000000000000..43ba35277f48bb07c5ba9b355515bf349ecd96f6 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/bc_util.h @@ -0,0 +1,264 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_BC_UTIL_H +#define HIR2MPL_BC_INPUT_INCLUDE_BC_UTIL_H +#include +#include +#include +#include +#include "types_def.h" +#include "cfg_primitive_types.h" +#include "global_tables.h" +namespace maple { +namespace bc { +enum BCInstructionKind : uint16 { + kUnKnownKind = 0, + kFallThru = 0x0001, + kConditionBranch = 0x0002, + kGoto = 0x0004, + kSwitch = 0x0008, + kTarget = 0x0010, + kTryStart = 0x0020, + kTryEnd = 0x0040, + kCatch = 0x0080, + kReturn = 0x0100, +}; + +enum BCRegVarType { + kUnknownType = 0, + kPrimitive = 1, + kBoolean = 3, + kLong = 17, + kFloat = 33, + kDouble = 65, + kRef = 2, +}; + +class BCUtil { + public: + static const std::string kUnknown; + static const std::string kPrimitive; + static const std::string kBoolean; + static const std::string kByte; + static const std::string kChar; + static const std::string kShort; + static const std::string kInt; + static const std::string kLong; + static const std::string kFloat; + static const std::string kDouble; + static const std::string kVoid; + static const std::string kWide; + static const std::string kAggregate; + static const std::string kJavaObjectName; + static const std::string kJavaStringName; + static const std::string kJavaByteClassName; + static const std::string kJavaShortClassName; + static const std::string kJavaIntClassName; + static const std::string kJavaLongClassName; + static const std::string kJavaFloatClassName; + static const std::string kJavaDoubleClassName; + static const std::string kJavaCharClassName; + static const std::string kJavaBoolClassName; + static const std::string kJavaClassName; + static const std::string kJavaMethodHandleName; + static const std::string kJavaExceptionName; + static const std::string kJavaThrowableName; + + static const std::string kJavaMethodHandleInvoke; + static const std::string kJavaMethodHandleInvokeExact; + + static const std::string kABoolean; + static const std::string kAByte; + static const std::string kAShort; + static const std::string kAChar; + static const std::string kAInt; + static const std::string kALong; + static const std::string kAFloat; + static const std::string kADouble; + static const std::string kAJavaObjectName; + + static inline GStrIdx &GetBooleanIdx() { + static GStrIdx booleanIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kBoolean); + return booleanIdx; + } + + static inline GStrIdx &GetIntIdx() { + static GStrIdx intIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kInt); + return intIdx; + } + + static inline GStrIdx &GetLongIdx() { + static GStrIdx longIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kLong); + return longIdx; + } + + static inline GStrIdx &GetFloatIdx() { + static GStrIdx floatIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kFloat); + return floatIdx; + } + + static inline GStrIdx &GetDoubleIdx() { + static GStrIdx doubleIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kDouble); + return doubleIdx; + } + + static inline GStrIdx &GetByteIdx() { + static GStrIdx byteIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kByte); + return byteIdx; + } + + static inline GStrIdx &GetCharIdx() { + static GStrIdx charIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kChar); + return charIdx; + } + + static inline GStrIdx &GetShortIdx() { + static GStrIdx shortIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kShort); + return shortIdx; + } + + static inline GStrIdx &GetVoidIdx() { + static GStrIdx voidIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kVoid); + return voidIdx; + } + + static inline GStrIdx &GetJavaObjectNameMplIdx() { + static GStrIdx javaObjectNameMplIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(kJavaObjectName)); + return javaObjectNameMplIdx; + } + + static inline GStrIdx &GetJavaStringNameMplIdx() { + static GStrIdx javaStringNameMplIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(kJavaStringName)); + return javaStringNameMplIdx; + } + + static inline GStrIdx &GetJavaClassNameMplIdx() { + static GStrIdx javaClassNameMplIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(kJavaClassName)); + return javaClassNameMplIdx; + } + + static inline GStrIdx &GetJavaExceptionNameMplIdx() { + static GStrIdx javaExceptionNameMplIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(kJavaExceptionName)); + return javaExceptionNameMplIdx; + } + + static inline GStrIdx &GetJavaThrowableNameMplIdx() { + static GStrIdx javaThrowableNameMplIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(kJavaThrowableName)); + return javaThrowableNameMplIdx; + } + + static inline GStrIdx &GetJavaMethodHandleNameMplIdx() { + static GStrIdx javaMethodHandleNameMplIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(kJavaMethodHandleName)); + return javaMethodHandleNameMplIdx; + } + + static inline GStrIdx &GetABooleanIdx() { + static GStrIdx aBooleanIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kABoolean); + return aBooleanIdx; + } + + static inline GStrIdx &GetAIntIdx() { + static GStrIdx aIntIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kAInt); + return aIntIdx; + } + + static inline GStrIdx &GetALongIdx() { + static GStrIdx aLongIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kALong); + return aLongIdx; + } + + static inline GStrIdx &GetAByteIdx() { + static GStrIdx aByteIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kAByte); + return aByteIdx; + } + + static inline GStrIdx &GetACharIdx() { + static GStrIdx aCharIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kAChar); + return aCharIdx; + } + + static inline GStrIdx &GetAShortIdx() { + static GStrIdx aShortIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kAShort); + return aShortIdx; + } + + static inline GStrIdx &GetAFloatIdx() { + static GStrIdx aFloatIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kAFloat); + return aFloatIdx; + } + + static inline GStrIdx &GetADoubleIdx() { + static GStrIdx aDoubleIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kADouble); + return aDoubleIdx; + } + + static inline GStrIdx &GetAJavaObjectNameMplIdx() { + static GStrIdx aJavaObjectNameMplIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(kAJavaObjectName)); + return aJavaObjectNameMplIdx; + } + + // JavaMultiANewArray + static inline GStrIdx &GetMultiANewArrayFullIdx() { + static GStrIdx multiANewArrayFullIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("Ljava/lang/reflect/Array;|newInstance|(Ljava/lang/Class;[I)Ljava/lang/Object;")); + return multiANewArrayFullIdx; + } + + static inline GStrIdx &GetMultiANewArrayClassIdx() { + static GStrIdx multiANewArrayClassIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("Ljava/lang/reflect/Array;")); + return multiANewArrayClassIdx; + } + + static inline GStrIdx &GetMultiANewArrayElemIdx() { + static GStrIdx multiANewArrayElemIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("newInstance")); + return multiANewArrayElemIdx; + } + + static inline GStrIdx &GetMultiANewArrayTypeIdx() { + static GStrIdx multiANewArrayTypeIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("(Ljava/lang/Class;[I)Ljava/lang/Object;")); + return multiANewArrayTypeIdx; + } + + // value element name + static inline GStrIdx &GetPragmaElementNameValueIdx() { + static GStrIdx pragmaElementNameValueIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("value"); + return pragmaElementNameValueIdx; + } + + static bool IsWideType(const GStrIdx &name); + static bool IsMorePrecisePrimitiveType(const GStrIdx &name0, const GStrIdx &name1); + static PrimType GetPrimType(const GStrIdx &name); + static bool IsJavaReferenceType(const GStrIdx &typeNameIdx); + static bool IsJavaPrimitveType(const GStrIdx &typeNameIdx); + static bool IsJavaPrimitiveTypeName(const std::string typeName); + static bool IsArrayType(const GStrIdx &typeNameIdx); + static std::string TrimArrayModifier(const std::string &typeName); + static void AddDefaultDepSet(std::unordered_set &typeSet); + static uint32 Name2RegNum(const std::string &name); + static bool HasContainSuffix(const std::string &value, const std::string &suffix); +}; // BCUtil +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_BC_UTIL_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/include/rc_setter.h b/src/hir2mpl/bytecode_input/common/include/rc_setter.h new file mode 100644 index 0000000000000000000000000000000000000000..85116a18f052fc87a47472c0d1bab45b530f190e --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/include/rc_setter.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef BC_INPUT_INCLUDE_RC_SETTER_H +#define BC_INPUT_INCLUDE_RC_SETTER_H +#include +#include +#include +#include "types_def.h" +#include "bc_class.h" +#include "mir_type.h" +#include "mir_function.h" + +namespace maple { +namespace bc { +class RCSetter { + public: + static RCSetter &GetRCSetter() { + ASSERT(rcSetter != nullptr, "rcSetter is not initialize"); + return *rcSetter; + } + + static void InitRCSetter(const std::string &whiteListName) { + rcSetter = new RCSetter(whiteListName); + } + + static void ReleaseRCSetter() { + if (rcSetter != nullptr) { + delete rcSetter; + rcSetter = nullptr; + } + } + + void ProcessClassRCAnnotation(const GStrIdx &classIdx, const std::vector &pragmas); + void ProcessMethodRCAnnotation(MIRFunction &mirFunc, const std::string &className, + MIRStructType &structType, const MIRPragma &pragma); + void ProcessFieldRCAnnotation(const StructElemNameIdx &fieldElemNameIdx, + const MIRType &fieldType, const std::vector &pragmas); + void GetUnownedVarInLocalVars(const BCClassMethod &method, MIRFunction &mirFunction); + void CollectUnownedLocalFuncs(MIRFunction *func); + void CollectUnownedLocalVars(MIRFunction *func, const GStrIdx &strIdx); + void CollectInputStmtField(StmtNode *stmt, const GStrIdx &fieldName); + void SetRCFieldAttrByWhiteList(FieldAttrs &attr, const std::string &className, const std::string &fieldName); + void SetAttrRCunowned(MIRFunction &mirFunction, const std::set &unownedRegNums) const; + void MarkRCAttributes() const; + + private: + explicit RCSetter(const std::string &whiteListName) + : rcFieldAttrWhiteListName(whiteListName) { + if (!rcFieldAttrWhiteListName.empty()) { + LoadRCFieldAttrWhiteList(rcFieldAttrWhiteListName); + } + } + ~RCSetter() = default; + void MarkRCUnownedForUnownedLocalFunctions() const; + void MarkRCUnownedForAnonymousClasses() const; + void MarkRCUnownedForLambdaClasses() const; + void SetRCUnownedAttributeInternalSetFieldAttrRCUnowned(size_t opnd, const MIRFunction &calledFunc) const; + void SetRCUnownedAttribute(const CallNode &callNode, MIRFunction &func, + const MIRFunction &calledFunc, const std::set &gStrIdx) const; + bool IsAnonymousInner(const MIRStructType &structType) const; + bool IsMethodEnclosing(const MIRFunction &func, const MIRStructType &structType) const; + void LoadRCFieldAttrWhiteList(const std::string &file); + + static RCSetter *rcSetter; + const std::string rcFieldAttrWhiteListName; + std::map>> rcFieldAttrWhiteListMap; + std::set unownedLocalFuncs; + std::map> unownedLocalVars; + std::map iputStmtFieldMap; +}; +} // namespace bc +} // namespace maple +#endif // BC_INPUT_INCLUDE_RC_SETTER_H diff --git a/src/hir2mpl/bytecode_input/common/src/ark_annotation_map.cpp b/src/hir2mpl/bytecode_input/common/src/ark_annotation_map.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5a64cb4f3d5e791ab055a69d681d27e3cb9b6273 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/ark_annotation_map.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ark_annotation_map.h" + +namespace maple { +namespace bc { +ArkAnnotationMap ArkAnnotationMap::annotationMap; + +void ArkAnnotationMap::Init() { + // AnnotationDefault + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FAnnotationDefault_3B", + "Lark_2Fannotation_2FAnnotationDefault_3B")); + // EnclosingClass + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FEnclosingClass_3B", + "Lark_2Fannotation_2FEnclosingClass_3B")); + // EnclosingMethod + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FEnclosingMethod_3B", + "Lark_2Fannotation_2FEnclosingMethod_3B")); + // InnerClass + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FInnerClass_3B", + "Lark_2Fannotation_2FInnerClass_3B")); + // KnownFailure + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FKnownFailure_3B", + "Lark_2Fannotation_2FKnownFailure_3B")); + // MemberClasses + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FMemberClasses_3B", + "Lark_2Fannotation_2FMemberClasses_3B")); + // MethodParameters + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FMethodParameters_3B", + "Lark_2Fannotation_2FMethodParameters_3B")); + // Signature + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FSignature_3B", + "Lark_2Fannotation_2FSignature_3B")); + // SourceDebugExtension + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FSourceDebugExtension_3B", + "Lark_2Fannotation_2FSourceDebugExtension_3B")); + // TestTargetClass + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FTestTargetClass_3B", + "Lark_2Fannotation_2FTestTargetClass_3B")); + // TestTarget + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FTestTarget_3B", + "Lark_2Fannotation_2FTestTarget_3B")); + // Throws + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2FThrows_3B", + "Lark_2Fannotation_2FThrows_3B")); + // compact + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2Fcompat_2FUnsupportedAppUsage_3B", + "Lark_2Fannotation_2Fcompat_2FUnsupportedAppUsage_3B")); + // codegen + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2Fcodegen_2FCovariantReturnType_3B", + "Lark_2Fannotation_2Fcodegen_2FCovariantReturnType_3B")); + // optimization + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2Foptimization_2FCriticalNative_3B", + "Lark_2Fannotation_2Foptimization_2FCriticalNative_3B")); + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2Foptimization_2FDeadReferenceSafe_3B", + "Lark_2Fannotation_2Foptimization_2FDeadReferenceSafe_3B")); + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2Foptimization_2FFastNative_3B", + "Lark_2Fannotation_2Foptimization_2FFastNative_3B")); + pragmaTypeNameMap.insert(std::make_pair("Ldalvik_2Fannotation_2Foptimization_2FReachabilitySensitive_3B", + "Lark_2Fannotation_2Foptimization_2FReachabilitySensitive_3B")); + for (auto &it : pragmaTypeNameMap) { + (void)arkAnnotationTypeNames.insert(it.second); + } +} + +const std::string &ArkAnnotationMap::GetAnnotationTypeName(const std::string &orinName) { + std::map::iterator it = pragmaTypeNameMap.find(orinName); + if (it == pragmaTypeNameMap.end()) { + return orinName; + } else { + return it->second; + } +} +} // namespace bc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/src/ark_annotation_processor.cpp b/src/hir2mpl/bytecode_input/common/src/ark_annotation_processor.cpp new file mode 100644 index 0000000000000000000000000000000000000000..86d37127bc922c06fd2286071018347d6729368f --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/ark_annotation_processor.cpp @@ -0,0 +1,166 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ark_annotation_processor.h" +#include "ark_annotation_map.h" +#include "fe_manager.h" +#include "mpl_logging.h" +namespace maple { +namespace bc { +void ArkAnnotationProcessor::Process() { + ArkAnnotationMap::GetArkAnnotationMap().Init(); + ArkAnnotation::GetInstance().Init(); + const std::set &arkAnnotationTypeNames = + ArkAnnotationMap::GetArkAnnotationMap().GetArkAnnotationTypeNames(); + for (const std::string &arkAnnotationTypeName : arkAnnotationTypeNames) { + (void)FEManager::GetTypeManager().CreateClassOrInterfaceType(arkAnnotationTypeName, true, FETypeFlag::kSrcUnknown); + } +} + +// ---------- ArkAnnotation ---------- +ArkAnnotation ArkAnnotation::instance; + +void ArkAnnotation::Init() { + // FastNative + typeNameSetForFastNative.insert(GetStrIdxFromDexName("Ldalvik/annotation/optimization/FastNative;")); + typeNameSetForFastNative.insert(GetStrIdxFromDexName("Lark/annotation/optimization/FastNative;")); + // CriticalNative + typeNameSetForCriticalNative.insert(GetStrIdxFromDexName("Ldalvik/annotation/optimization/CriticalNative;")); + typeNameSetForCriticalNative.insert(GetStrIdxFromDexName("Lark/annotation/optimization/CriticalNative;")); + // CallerSensitive + typeNameSetForCallerSensitive.insert(GetStrIdxFromDexName("Lsun/reflect/CallerSensitive;")); + // Permanent + typeNameSetForPermanent.insert(GetStrIdxFromDexName("Lcom/huawei/ark/annotation/Permanent;")); + typeNameSetForPermanent.insert(GetStrIdxFromDexName("Lharmonyos/annotation/Permanent;")); + typeNameSetForPermanent.insert(GetStrIdxFromDexName("Lark/annotation/Permanent;")); + // RCU + typeNameSetForRCUnowned.insert(GetStrIdxFromDexName("Ljava/lang/annotation/RCUnownedRef;")); + typeNameSetForRCUnowned.insert(GetStrIdxFromDexName("Lcom/huawei/ark/annotation/Unowned;")); + typeNameSetForRCUnowned.insert(GetStrIdxFromDexName("Lharmonyos/annotation/Unowned;")); + typeNameSetForRCUnowned.insert(GetStrIdxFromDexName("Lark/annotation/Unowned;")); + // RCUnownedCap + typeNameSetForRCUnownedCap.insert(GetStrIdxFromDexName("Ljava/lang/annotation/RCUnownedCapRef;")); + // RCUnownedCapList + typeNameSetForRCUnownedCapList.insert(GetStrIdxFromDexName("Ljava/lang/annotation/RCUnownedCapRef$List;")); + // RCUnownedLocal + typeNameSetForRCUnownedLocal.insert(GetStrIdxFromDexName("Lcom/huawei/ark/annotation/UnownedLocal;")); + typeNameSetForRCUnownedLocal.insert(GetStrIdxFromDexName("Lharmonyos/annotation/UnownedLocal;")); + typeNameSetForRCUnownedLocal.insert(GetStrIdxFromDexName("Lark/annotation/UnownedLocal;")); + // RCUnownedLocalOld + typeNameIdxForRCUnownedLocalOld = GetStrIdxFromDexName("Ljava/lang/annotation/RCLocalUnownedRef;"); + // RCUnownedThis + typeNameSetForRCUnownedThis.insert(GetStrIdxFromDexName("Ljava/lang/annotation/RCUnownedThisRef;")); + // RCUnownedOuter + typeNameSetForRCUnownedOuter.insert(GetStrIdxFromDexName("Lcom/huawei/ark/annotation/UnownedOuter;")); + typeNameSetForRCUnownedOuter.insert(GetStrIdxFromDexName("Lharmonyos/annotation/UnownedOuter;")); + typeNameSetForRCUnownedOuter.insert(GetStrIdxFromDexName("Lark/annotation/UnownedOuter;")); + // RCWeak + typeNameSetForRCWeak.insert(GetStrIdxFromDexName("Ljava/lang/annotation/RCWeakRef;")); + typeNameSetForRCWeak.insert(GetStrIdxFromDexName("Lcom/huawei/ark/annotation/Weak;")); + typeNameSetForRCWeak.insert(GetStrIdxFromDexName("Lharmonyos/annotation/Weak;")); + typeNameSetForRCWeak.insert(GetStrIdxFromDexName("Lark/annotation/Weak;")); +} + +bool ArkAnnotation::IsFastNative(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForFastNative.find(sType->GetNameStrIdx()) != typeNameSetForFastNative.end(); +} + +bool ArkAnnotation::IsCriticalNative(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForCriticalNative.find(sType->GetNameStrIdx()) != typeNameSetForCriticalNative.end(); +} + +bool ArkAnnotation::IsCallerSensitive(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForCallerSensitive.find(sType->GetNameStrIdx()) != typeNameSetForCallerSensitive.end(); +} + +bool ArkAnnotation::IsPermanent(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForPermanent.find(sType->GetNameStrIdx()) != typeNameSetForPermanent.end(); +} + +bool ArkAnnotation::IsPermanent(const std::string &str) const { + for (auto idx : typeNameSetForPermanent) { + std::string annoName = GlobalTables::GetStrTable().GetStringFromStrIdx(idx); + annoName = namemangler::DecodeName(annoName); + if (annoName.compare(str) == 0) { + return true; + } + } + return false; +} + +bool ArkAnnotation::IsRCUnowned(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForRCUnowned.find(sType->GetNameStrIdx()) != typeNameSetForRCUnowned.end(); +} + +bool ArkAnnotation::IsRCUnownedCap(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForRCUnownedCap.find(sType->GetNameStrIdx()) != typeNameSetForRCUnownedCap.end(); +} + +bool ArkAnnotation::IsRCUnownedCapList(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForRCUnownedCapList.find(sType->GetNameStrIdx()) != typeNameSetForRCUnownedCapList.end(); +} + +bool ArkAnnotation::IsRCUnownedLocal(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForRCUnownedLocal.find(sType->GetNameStrIdx()) != typeNameSetForRCUnownedLocal.end(); +} + +bool ArkAnnotation::IsRCUnownedLocalOld(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameIdxForRCUnownedLocalOld == sType->GetNameStrIdx(); +} + +bool ArkAnnotation::IsRCUnownedThis(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForRCUnownedThis.find(sType->GetNameStrIdx()) != typeNameSetForRCUnownedThis.end(); +} + +bool ArkAnnotation::IsRCUnownedOuter(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForRCUnownedOuter.find(sType->GetNameStrIdx()) != typeNameSetForRCUnownedOuter.end(); +} + +bool ArkAnnotation::IsRCWeak(const TyIdx &tyIdx) const { + MIRStructType *sType = GetStructType(tyIdx); + return sType == nullptr ? + false : typeNameSetForRCWeak.find(sType->GetNameStrIdx()) != typeNameSetForRCWeak.end(); +} + +MIRStructType *ArkAnnotation::GetStructType(const TyIdx &tyIdx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (!type->IsMIRClassType() && !type->IsMIRInterfaceType()) { + return nullptr; + } + return static_cast(type); +} +} // namespace bc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/src/bc_class.cpp b/src/hir2mpl/bytecode_input/common/src/bc_class.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1cfb6e5ac6be776790882c66d9bfdc5e29489ab2 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/bc_class.cpp @@ -0,0 +1,601 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bc_class.h" +#include "global_tables.h" +#include "fe_utils_java.h" +#include "fe_utils.h" +#include "fe_manager.h" +#include "fe_options.h" + +namespace maple { +namespace bc { +// ========== BCClassElem ========== +BCClassElem::BCClassElem(const BCClass &klassIn, uint32 acc, const std::string &nameIn, const std::string &descIn) + : klass(klassIn), + accessFlag(acc), + name(nameIn), + descriptor(descIn) {} + +const std::string &BCClassElem::GetName() const { + return name; +} + +const std::string &BCClassElem::GetDescription() const { + return descriptor; +} + +uint32 BCClassElem::GetItemIdx() const { + return GetItemIdxImpl(); +} + +uint32 BCClassElem::GetIdx() const { + return GetIdxImpl(); +} + +uint32 BCClassElem::GetAccessFlag() const { + return accessFlag; +} + +bool BCClassElem::IsStatic() const { + return IsStaticImpl(); +} + +const std::string &BCClassElem::GetClassName() const { + return klass.GetClassName(false); +} + +GStrIdx BCClassElem::GetClassNameMplIdx() const { + return klass.GetClassNameMplIdx(); +} + +const BCClass &BCClassElem::GetBCClass() const { + return klass; +} + +// ========== BCTryInfo ========== +void BCTryInfo::DumpTryCatchInfo(const std::unique_ptr>> &tryInfos) { + DEBUG_STMT( + for (const auto &tryInfo : *tryInfos) { + LogInfo::MapleLogger(kLlDbg) << "tryBlock: [" << std::hex << "0x" << tryInfo->GetStartAddr() << ", 0x" << + tryInfo->GetEndAddr() << ")"; + LogInfo::MapleLogger(kLlDbg) << " handler: "; + for (const auto &e : (*tryInfo->GetCatches())) { + LogInfo::MapleLogger(kLlDbg) << "0x" << e->GetHandlerAddr() << " "; + } + LogInfo::MapleLogger(kLlDbg) << std::endl; + }) +} + +// ========== BCClassMethod ========== +void BCClassMethod::SetMethodInstOffset(const uint16 *pos) { + instPos = pos; +} + +void BCClassMethod::SetRegisterTotalSize(uint16 size) { + registerTotalSize = size; +} + +uint16 BCClassMethod::GetRegisterTotalSize() const{ + return registerTotalSize; +} + +void BCClassMethod::SetCodeOff(uint32 off) { + codeOff = off; +} + +uint32 BCClassMethod::GetCodeOff() const { + return codeOff; +} + +void BCClassMethod::SetRegisterInsSize(uint16 size) { + registerInsSize = size; +} + +bool BCClassMethod::IsVirtual() const { + return IsVirtualImpl(); +} + +bool BCClassMethod::IsNative() const { + return IsNativeImpl(); +} + +bool BCClassMethod::IsInit() const { + return IsInitImpl(); +} + +bool BCClassMethod::IsClinit() const { + return IsClinitImpl(); +} + +std::string BCClassMethod::GetFullName() const { + return GetClassName() + "|" + GetName() + "|" + GetDescription(); +} + +void BCClassMethod::SetSrcPosInfo() { +#ifdef DEBUG + if ((FEOptions::GetInstance().IsDumpComment() || FEOptions::GetInstance().IsDumpLOC()) && + pSrcPosInfo != nullptr) { + uint32 srcPos = 0; + for (auto &[pos, inst] : *pcBCInstructionMap) { + auto it = pSrcPosInfo->find(pos); + if (it != pSrcPosInfo->end()) { + srcPos = it->second; // when pSrcPosInfo is valid, update srcPos + } + inst->SetSrcPositionInfo(klass.GetSrcFileIdx(), srcPos); + } + } +#endif +} + +void BCClassMethod::ProcessInstructions() { + if (pcBCInstructionMap->empty()) { + return; + } + // some instructions depend on exception handler, so we process try-catch info first + ProcessTryCatch(); + visitedPcSet.emplace(pcBCInstructionMap->begin()->first); + GStrIdx methodIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(GetFullName())); + for (auto itor = pcBCInstructionMap->begin(); itor != pcBCInstructionMap->end();) { + BCInstruction *inst = itor->second; + inst->SetFuncNameIdx(methodIdx); + inst->Parse(*this); + ++itor; + if (inst->IsConditionBranch() || inst->IsGoto() || inst->IsSwitch()) { + std::vector targets = inst->GetTargets(); + for (uint32 target : targets) { + auto it = pcBCInstructionMap->find(target); + CHECK_FATAL(it != pcBCInstructionMap->end(), "Invalid branch target 0x%x in method: %s", + target, GetFullName().c_str()); + it->second->SetInstructionKind(kTarget); + if (visitedPcSet.emplace(target).second == false) { + multiInDegreeSet.emplace(target); + } + } + if (inst->IsSwitch() && itor != pcBCInstructionMap->end()) { + itor->second->SetInstructionKind(kTarget); + inst->SetDefaultTarget(itor->second); + } + } + if (itor == pcBCInstructionMap->end()) { + continue; + } + if (inst->IsFallThru()) { + if (visitedPcSet.emplace(itor->first).second == false) { + multiInDegreeSet.emplace(itor->first); + } + } + if (itor->second->IsReturn()) { + inst->SetReturnInst(itor->second); + } + } + SetSrcPosInfo(); + if (FEOptions::GetInstance().GetTypeInferKind() == FEOptions::kLinearScan) { + TypeInfer(); + } +} + +void BCClassMethod::ProcessTryCatch() { + if (tryInfos == nullptr) { + return; + } + for (const auto &e : *tryInfos) { + uint32 tryStart = e->GetStartAddr(); + uint32 tryEnd = e->GetEndAddr(); + auto it = pcBCInstructionMap->find(tryStart); + CHECK_FATAL(it != pcBCInstructionMap->end(), "Invalid try start pos 0x%x in method: %s", tryStart, + GetFullName().c_str()); + it->second->SetInstructionKind(kTryStart); + BCInstruction *inst = it->second; + auto it1 = pcBCInstructionMap->find(tryEnd); + if (tryStart == tryEnd) { + it->second->SetInstructionKind(kTryEnd); + } else if (it1 != pcBCInstructionMap->end()) { + --it1; + it1->second->SetInstructionKind(kTryEnd); + } else { + // behind the last instruction + pcBCInstructionMap->rbegin()->second->SetInstructionKind(kTryEnd); + } + // Calculate catchable instructions + std::list catchableInsts; + while (it->second->GetPC() < tryEnd) { + it->second->SetCatchable(); + if (it->second->IsCatchable()) { + catchableInsts.emplace_back(it->second); + } + ++it; + if (it == pcBCInstructionMap->end()) { + break; + } + } + for (const auto &handler: *(e->GetCatches())) { + uint32 handlerPc = handler->GetHandlerAddr(); + auto elem = pcBCInstructionMap->find(handlerPc); + CHECK_FATAL(elem != pcBCInstructionMap->end(), "Invalid catch pos 0x%x in method: %s", handlerPc, + GetFullName().c_str()); + elem->second->SetInstructionKind(kCatch); + elem->second->SetExceptionType(handler->GetExceptionNameIdx()); + inst->AddHandler(elem->second); + for (auto catchableInst : catchableInsts) { + catchableInst->AddHandlerTarget(handlerPc); + if (visitedPcSet.emplace(handlerPc).second == false) { + multiInDegreeSet.emplace(handlerPc); + } + } + } + } +} + +// Use linear scan with SSA +// We insert phi behind the def in a edge before the dominance if the reg is alive under this dominance. +// A reg is default dead at its definition, we mark a reg alive only when it was used. +// And insert the reg def-type into `used alive types` if it is defined in a determinate type. +// We transfer reg live info through shared memory, record live-interval in dominancesMap and BCRegType::livesBegins. +// After a multi-in pos, we create a new TypeInferItem for a new live range. +// Insert `in-edge` into dominance `prevs`, for record complete reg use in circles by one pass. +void BCClassMethod::TypeInfer() { + std::set visitedSet; + // [pc, [regNum, TypeInferItem*]] + std::list>> pcDefedRegsList; + // [pc, [regNum, TypeInferItem*]] + std::vector> dominances((*pcBCInstructionMap).rbegin()->first + 1); + std::vector regTypeMap(registerTotalSize, nullptr); + for (auto ® : argRegs) { + regTypeMap[reg->regNum] = ConstructTypeInferItem(allocator, 0, reg.get(), nullptr); + regTypes.emplace_back(reg->regType); + } + visitedSet.emplace(0); + if (multiInDegreeSet.find(0) != multiInDegreeSet.end()) { + std::vector typeMap = ConstructNewRegTypeMap(allocator, 1, regTypeMap); + pcDefedRegsList.emplace_front(0, typeMap); + dominances[0] = typeMap; + } else { + pcDefedRegsList.emplace_front(0, regTypeMap); + } + Traverse(pcDefedRegsList, dominances, visitedSet); + PrecisifyRegType(); +} + +void BCClassMethod::Traverse(std::list>> &pcDefedRegsList, + std::vector> &dominances, std::set &visitedSet) { + while (!pcDefedRegsList.empty()) { + auto head = pcDefedRegsList.front(); + pcDefedRegsList.pop_front(); + BCInstruction *currInst = (*pcBCInstructionMap)[head.first]; + std::vector nextRegTypeMap = head.second; + auto usedRegs = currInst->GetUsedRegs(); + for (auto usedReg : *usedRegs) { + auto defedItem = nextRegTypeMap[usedReg->regNum]; + CHECK_FATAL(defedItem != nullptr && defedItem->reg != nullptr, + "Cannot find Reg%u defination at 0x%x:0x%x in method %s", + usedReg->regNum, head.first, currInst->GetOpcode(), GetFullName().c_str()); + usedReg->regValue = defedItem->reg->regValue; + usedReg->regType = defedItem->reg->regType; + currInst->SetRegTypeInTypeInfer(); + // Make the reg alive when it used, live range [defPos, usePos] + usedReg->regTypeItem->SetPos(currInst->GetPC()); + defedItem->InsertUniqueAliveType(nullptr, usedReg->regTypeItem); + } + auto defedRegs = currInst->GetDefedRegs(); + auto exHandlerTargets = currInst->GetHandlerTargets(); + uint32 next = currInst->GetPC() + currInst->GetWidth(); + for (auto exHandlerTarget : exHandlerTargets) { + if (visitedSet.emplace(exHandlerTarget).second == false) { + auto &domIt = dominances[exHandlerTarget]; + InsertPhi(domIt, nextRegTypeMap); + continue; + } + if ((multiInDegreeSet.find(exHandlerTarget) != multiInDegreeSet.end())) { + std::vector typeMap = ConstructNewRegTypeMap(allocator, exHandlerTarget + 1, nextRegTypeMap); + pcDefedRegsList.emplace_front(exHandlerTarget, typeMap); + dominances[exHandlerTarget] = typeMap; + } else { + pcDefedRegsList.emplace_front(exHandlerTarget, nextRegTypeMap); + } + Traverse(pcDefedRegsList, dominances, visitedSet); + } + for (auto defedReg : *defedRegs) { + TypeInferItem *item = ConstructTypeInferItem(allocator, currInst->GetPC() + 1, defedReg, nullptr); + nextRegTypeMap[defedReg->regNum] = item; + defedReg->regType->SetPos(currInst->GetPC()); + regTypes.emplace_back(defedReg->regType); + } + if (currInst->IsFallThru() && next <= pcBCInstructionMap->rbegin()->first) { + if (visitedSet.emplace(next).second == false) { + auto &domIt = dominances[next]; + InsertPhi(domIt, nextRegTypeMap); + } else { + if (multiInDegreeSet.find(next) != multiInDegreeSet.end()) { + std::vector typeMap = ConstructNewRegTypeMap(allocator, next + 1, nextRegTypeMap); + pcDefedRegsList.emplace_front(next, typeMap); + dominances[next] = typeMap; + } else { + pcDefedRegsList.emplace_front(next, nextRegTypeMap); + } + // Use stack to replace recursive call, avoid `call stack` overflow in long `fallthru` code. + // And `fallthu` instructions can not disrupt visit order. + if (currInst->IsConditionBranch() || currInst->IsSwitch()) { + Traverse(pcDefedRegsList, dominances, visitedSet); + } + } + } + auto normalTargets = currInst->GetTargets(); + for (auto normalTarget : normalTargets) { + if (visitedSet.emplace(normalTarget).second == false) { + auto &domIt = dominances[normalTarget]; + InsertPhi(domIt, nextRegTypeMap); + continue; + } + if (multiInDegreeSet.find(normalTarget) != multiInDegreeSet.end()) { + std::vector typeMap = ConstructNewRegTypeMap(allocator, normalTarget + 1, nextRegTypeMap); + pcDefedRegsList.emplace_front(normalTarget, typeMap); + dominances[normalTarget] = typeMap; + } else { + pcDefedRegsList.emplace_front(normalTarget, nextRegTypeMap); + } + Traverse(pcDefedRegsList, dominances, visitedSet); + } + } +} + +void BCClassMethod::InsertPhi(const std::vector &dom, std::vector &src) { + for (auto &d : dom) { + if (d == nullptr) { + continue; + } + auto srcItem = src[d->reg->regNum]; + if (srcItem == d) { + continue; + } + if (d->RegisterInPrevs(srcItem) == false) { + continue; + } + + if (d->isAlive == false) { + continue; + } + CHECK_FATAL(srcItem != nullptr, "ByteCode RA error."); + for (auto ty : *(d->aliveUsedTypes)) { + ty->SetDom(true); + } + srcItem->InsertUniqueAliveTypes(d, d->aliveUsedTypes); + } +} + +TypeInferItem *BCClassMethod::ConstructTypeInferItem( + MapleAllocator &alloc, uint32 pos, BCReg *bcReg, TypeInferItem *prev) { + TypeInferItem *item = alloc.GetMemPool()->New(alloc, pos, bcReg, prev); + return item; +} + +std::vector BCClassMethod::ConstructNewRegTypeMap(MapleAllocator &alloc, uint32 pos, + const std::vector ®TypeMap) { + std::vector res(regTypeMap.size(), nullptr); + size_t i = 0; + for (const auto &elem : regTypeMap) { + if (elem != nullptr) { + res[i] = ConstructTypeInferItem(alloc, pos, elem->reg, elem); + } + ++i; + } + return res; +} + +std::list BCClassMethod::GenReTypeStmtsThroughArgs() const { + std::list stmts; + for (const auto &argReg : argRegs) { + std::list stmts0 = argReg->GenRetypeStmtsAfterDef(); + for (auto &stmt : stmts0) { + stmts.emplace_back(std::move(stmt)); + } + } + return stmts; +} + +void BCClassMethod::PrecisifyRegType() { + for (auto elem : regTypes) { + elem->PrecisifyTypes(); + } +} + +std::list BCClassMethod::EmitInstructionsToFEIR() const { + std::list stmts; + if (!HasCode()) { + return stmts; // Skip abstract and native method, not emit it to mpl but mplt. + } + if (IsStatic() && GetName().compare("") != 0) { // Not insert JAVA_CLINIT_CHECK in + GStrIdx containerNameIdx = GetClassNameMplIdx(); + uint32 typeID = UINT32_MAX; + if (FEOptions::GetInstance().IsAOT()) { + const std::string &mplClassName = GetBCClass().GetClassName(true); + int32 dexFileHashCode = GetBCClass().GetBCParser().GetFileNameHashId(); + typeID = FEManager::GetTypeManager().GetTypeIDFromMplClassName(mplClassName, dexFileHashCode); + } + UniqueFEIRStmt stmt = + std::make_unique(INTRN_JAVA_CLINIT_CHECK, + std::make_unique(PTY_ref, containerNameIdx), + nullptr, typeID); + stmts.emplace_back(std::move(stmt)); + } + std::map targetFEIRStmtMap; + std::list gotoFEIRStmts; + std::list switchFEIRStmts; + std::list retypeStmts = GenReTypeStmtsThroughArgs(); + for (auto &stmt : retypeStmts) { + stmts.emplace_back(std::move(stmt)); + } + for (const auto &elem : *pcBCInstructionMap) { + std::list instStmts = elem.second->EmitToFEIRStmts(); + for (auto &e : instStmts) { + if (e->GetKind() == kStmtPesudoLabel) { + targetFEIRStmtMap.emplace(static_cast(e.get())->GetPos(), + static_cast(e.get())); + } + if (e->GetKind() == kStmtGoto || e->GetKind() == kStmtCondGoto) { + gotoFEIRStmts.emplace_back(static_cast(e.get())); + } + if (e->GetKind() == kStmtSwitch) { + switchFEIRStmts.emplace_back(static_cast(e.get())); + } + stmts.emplace_back(std::move(e)); + } + } + LinkJumpTarget(targetFEIRStmtMap, gotoFEIRStmts, switchFEIRStmts); // Link jump target + return stmts; +} + +void BCClassMethod::LinkJumpTarget(const std::map &targetFEIRStmtMap, + const std::list &gotoFEIRStmts, + const std::list &switchFEIRStmts) { + for (auto &e : gotoFEIRStmts) { + auto target = targetFEIRStmtMap.find(e->GetTarget()); + CHECK_FATAL(target != targetFEIRStmtMap.end(), "Cannot find the target for goto/condGoto"); + e->SetStmtTarget(*(target->second)); + } + for (auto &e : switchFEIRStmts) { + uint32 label = e->GetDefaultLabelIdx(); + if (label != UINT32_MAX) { + auto defaultTarget = targetFEIRStmtMap.find(label); + CHECK_FATAL(defaultTarget != targetFEIRStmtMap.end(), "Cannot find the default target for Switch"); + e->SetDefaultTarget(defaultTarget->second); + } + for (const auto &valueLabel : e->GetMapValueLabelIdx()) { + auto target = targetFEIRStmtMap.find(valueLabel.second); + CHECK_FATAL(target != targetFEIRStmtMap.end(), "Cannot find the target for Switch"); + e->AddTarget(valueLabel.first, target->second); + } + } +} + +void BCClassMethod::DumpBCInstructionMap() const { + // Only used in DEBUG manually + DEBUG_STMT( + uint32 idx = 0; + for (const auto &[pos, instPtr] : *pcBCInstructionMap) { + LogInfo::MapleLogger(kLlDbg) << "index: " << std::dec << idx++ << " pc: 0x" << std::hex << pos << + " opcode: 0x" << instPtr->GetOpcode() << std::endl; + }); +} + +const uint16 *BCClassMethod::GetInstPos() const { + return instPos; +} + +std::vector> BCClassMethod::GenArgVarList() const { + return GenArgVarListImpl(); +} + +void BCClassMethod::GenArgRegs() { + return GenArgRegsImpl(); +} + +// ========== BCCatchInfo ========== +BCCatchInfo::BCCatchInfo(uint32 handlerAddrIn, const GStrIdx &argExceptionNameIdx, bool iscatchAllIn) + : handlerAddr(handlerAddrIn), + exceptionNameIdx(argExceptionNameIdx), + isCatchAll(iscatchAllIn) {} +// ========== BCClass ========== +void BCClass::SetSrcFileInfo(const std::string &name) { + srcFileNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + srcFileIdx = FEManager::GetManager().RegisterSourceFileIdx(srcFileNameIdx); +} + +void BCClass::SetSuperClasses(const std::list &names) { + if (names.empty()) { + return; // No parent class + } + superClassNameList = names; +} + +void BCClass::SetInterface(const std::string &name) { + interfaces.push_back(name); +} + +void BCClass::SetAccFlag(uint32 flag) { + accFlag = flag; +} + +void BCClass::SetField(std::unique_ptr field) { + fields.push_back(std::move(field)); +} + +void BCClass::SetMethod(std::unique_ptr method) { + std::lock_guard lock(bcClassMtx); + methods.push_back(std::move(method)); +} + +void BCClass::InsertFinalStaticStringID(uint32 stringID) { + finalStaticStringID.push_back(stringID); +} + +void BCClass::SetClassName(const std::string &classNameOrin) { + classNameOrinIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(classNameOrin); + classNameMplIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(classNameOrin)); +} + +const std::unique_ptr &BCClass::GetAnnotationsDirectory() const { + return annotationsDirectory; +} + +void BCClass::SetAnnotationsDirectory(std::unique_ptr annotationsDirectoryIn) { + annotationsDirectory = std::move(annotationsDirectoryIn); +} + +std::vector BCClass::GetStaticFieldsConstVal() const { + return staticFieldsConstVal; +} + +void BCClass::InsertStaticFieldConstVal(MIRConst *cst) { + staticFieldsConstVal.push_back(cst); +} + +const std::string &BCClass::GetClassName(bool mapled) const { + return mapled ? GlobalTables::GetStrTable().GetStringFromStrIdx(classNameMplIdx) : + GlobalTables::GetStrTable().GetStringFromStrIdx(classNameOrinIdx); +} + +const std::list &BCClass::GetSuperClassNames() const { + return superClassNameList; +} + +const std::vector &BCClass::GetSuperInterfaceNames() const { + return interfaces; +} + +std::string BCClass::GetSourceFileName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(srcFileNameIdx); +} + +GStrIdx BCClass::GetIRSrcFileSigIdx() const { + return irSrcFileSigIdx; +} + +int32 BCClass::GetFileNameHashId() const { + return parser.GetFileNameHashId(); +} + +uint32 BCClass::GetAccessFlag() const { + return accFlag; +} + +const std::vector> &BCClass::GetFields() const { + return fields; +} + +std::vector> &BCClass::GetMethods() { + return methods; +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/common/src/bc_class2fe_helper.cpp b/src/hir2mpl/bytecode_input/common/src/bc_class2fe_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..16454593af3942f3d6867afeaa99cbd63fac2cbf --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/bc_class2fe_helper.cpp @@ -0,0 +1,308 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bc_class2fe_helper.h" +#include "fe_macros.h" +#include "fe_manager.h" +#include "fe_utils_java.h" +namespace maple { +namespace bc { +BCClass2FEHelper::BCClass2FEHelper(MapleAllocator &allocator, bc::BCClass &klassIn) + : FEInputStructHelper(allocator), klass(klassIn) { + srcLang = kSrcLangJava; +} + +std::string BCClass2FEHelper::GetStructNameOrinImpl() const { + return klass.GetClassName(false); +} + +std::string BCClass2FEHelper::GetStructNameMplImpl() const { + return klass.GetClassName(true); +} + +std::list BCClass2FEHelper::GetSuperClassNamesImpl() const { + return klass.GetSuperClassNames(); +} + +std::vector BCClass2FEHelper::GetInterfaceNamesImpl() const { + return klass.GetSuperInterfaceNames(); +} + +std::string BCClass2FEHelper::GetSourceFileNameImpl() const { + return klass.GetSourceFileName(); +} + +void BCClass2FEHelper::TryMarkMultiDefClass(MIRStructType &typeImported) const { + MIRTypeKind kind = typeImported.GetKind(); + uint32 typeSrcSigIdx = 0; + static const GStrIdx keySignatureStrIdx = + GlobalTables::GetStrTable().GetStrIdxFromName("INFO_ir_srcfile_signature"); + if (kind == kTypeClass || kind == kTypeClassIncomplete) { + auto &classType = static_cast(typeImported); + typeSrcSigIdx = classType.GetInfo(keySignatureStrIdx); + } else if (kind == kTypeInterface || kind == kTypeInterfaceIncomplete) { + auto &interfaceType = static_cast(typeImported); + typeSrcSigIdx = interfaceType.GetInfo(keySignatureStrIdx); + } + // Find type definition in both dependent libraries and current compilation unit, mark the type multidef, + // and we will mark all it's methods multidef later. + // MiddleEnd will NOT inline the caller and callee with multidef attr. + if (typeSrcSigIdx != 0 && klass.GetIRSrcFileSigIdx() != typeSrcSigIdx) { + klass.SetIsMultiDef(true); + } +} + +MIRStructType *BCClass2FEHelper::CreateMIRStructTypeImpl(bool &error) const { + std::string classNameMpl = GetStructNameMplImpl(); + if (classNameMpl.empty()) { + error = true; + ERR(kLncErr, "class name is empty"); + return nullptr; + } + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "CreateMIRStrucType for %s", classNameMpl.c_str()); + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(classNameMpl); + MIRStructType *typeImported = FEManager::GetTypeManager().GetImportedType(nameIdx); + if (typeImported != nullptr) { + TryMarkMultiDefClass(*typeImported); + } + + bool isCreate = false; + MIRStructType *type = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType(nameIdx, klass.IsInterface(), + FETypeFlag::kSrcInput, isCreate); + error = false; + // fill global type name table + GStrIdx typeNameIdx = type->GetNameStrIdx(); + TyIdx prevTyIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(typeNameIdx); + if (prevTyIdx == TyIdx(0)) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(typeNameIdx, type->GetTypeIndex()); + } + // setup eh root type + if (FEManager::GetModule().GetThrowableTyIdx() == 0 && + (type->GetKind() == kTypeClass || type->GetKind() == kTypeClassIncomplete)) { + GStrIdx ehTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::GetInternalNameLiteral(namemangler::kJavaLangObjectStr)); + if (ehTypeNameIdx == type->GetNameStrIdx()) { + FEManager::GetModule().SetThrowableTyIdx(type->GetTypeIndex()); + } + } + return isCreate ? type : nullptr; +} + +uint64 BCClass2FEHelper::GetRawAccessFlagsImpl() const { + return static_cast(klass.GetAccessFlag()); +} + +GStrIdx BCClass2FEHelper::GetIRSrcFileSigIdxImpl() const { + return klass.GetIRSrcFileSigIdx(); +} + +bool BCClass2FEHelper::IsMultiDefImpl() const { + return klass.IsMultiDef(); +} + +std::string BCClass2FEHelper::GetSrcFileNameImpl() const { + return klass.GetSourceFileName(); +} + +// ========== BCClassField2FEHelper ========== +FieldAttrs BCClassField2FEHelper::AccessFlag2Attribute(uint32 accessFlag) const { + return AccessFlag2AttributeImpl(accessFlag); +} + +bool BCClassField2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { + (void) allocator; + CHECK_FATAL(false, "should not run here"); + return false; +} + +bool BCClassField2FEHelper::ProcessDeclWithContainerImpl(MapleAllocator &allocator) { + (void) allocator; + std::string klassNameMpl; + std::string fieldNameMpl; + std::string typeNameMpl; + bool isStatic = field.IsStatic(); + uint64 mapIdx = (static_cast(field.GetBCClass().GetBCParser().GetReader()->GetFileIndex()) << 32) | + field.GetIdx(); + StructElemNameIdx *structElemNameIdx = FEManager::GetManager().GetFieldStructElemNameIdx(mapIdx); + if (structElemNameIdx == nullptr) { + klassNameMpl = namemangler::EncodeName(field.GetClassName()); + fieldNameMpl = namemangler::EncodeName(field.GetName()); + typeNameMpl = namemangler::EncodeName(field.GetDescription()); + if (fieldNameMpl.empty()) { + ERR(kLncErr, "invalid name for %s field: %u in class %s", field.IsStatic() ? "static" : "instance", + field.GetItemIdx(), klassNameMpl.c_str()); + return false; + } + if (typeNameMpl.empty()) { + ERR(kLncErr, "invalid descriptor for %s field: %u in class %s", field.IsStatic() ? "static" : "instance", + field.GetItemIdx(), klassNameMpl.c_str()); + return false; + } + structElemNameIdx = FEManager::GetManager().GetStructElemMempool()->New( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(klassNameMpl), + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fieldNameMpl), + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeNameMpl), + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(klassNameMpl + namemangler::kNameSplitterStr + + fieldNameMpl + namemangler::kNameSplitterStr + typeNameMpl)); + FEManager::GetManager().SetFieldStructElemNameIdx(mapIdx, *structElemNameIdx); + } else { + klassNameMpl = GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx->klass); + fieldNameMpl = GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx->elem); + typeNameMpl = GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx->type); + } + FEStructElemInfo *elemInfo = FEManager::GetTypeManager().RegisterStructFieldInfo( + *structElemNameIdx, kSrcLangJava, isStatic); + elemInfo->SetDefined(); + elemInfo->SetFromDex(); + // control anti-proguard through FEOptions only. + FEOptions::ModeJavaStaticFieldName modeStaticField = FEOptions::GetInstance().GetModeJavaStaticFieldName(); + bool withType = (modeStaticField == FEOptions::kAllType) || + (isStatic && modeStaticField == FEOptions::kSmart); + GStrIdx idx; + if (!isStatic && !withType) { + idx = structElemNameIdx->elem; + } else if (isStatic && withType) { + idx = structElemNameIdx->full; + } else { + std::string name = isStatic ? (klassNameMpl + namemangler::kNameSplitterStr) : ""; + name += fieldNameMpl; + name += withType ? (namemangler::kNameSplitterStr + typeNameMpl) : ""; + idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + } + FieldAttrs attrs = AccessFlag2Attribute(field.GetAccessFlag()); + MIRType *fieldType = FEManager::GetTypeManager().GetOrCreateTypeFromName(typeNameMpl, FETypeFlag::kSrcUnknown, true); + ASSERT(fieldType != nullptr, "nullptr check for fieldType"); + mirFieldPair.first = idx; + mirFieldPair.second.first = fieldType->GetTypeIndex(); + mirFieldPair.second.second = attrs; + return true; +} + +// ========== BCClassMethod2FEHelper ========== +BCClassMethod2FEHelper::BCClassMethod2FEHelper(MapleAllocator &allocator, std::unique_ptr &methodIn) + : FEInputMethodHelper(allocator), + method(methodIn) { + srcLang = kSrcLangJava; +} + +bool BCClassMethod2FEHelper::ProcessDeclImpl(MapleAllocator &allocator) { + uint64 mapIdx = (static_cast(method->GetBCClass().GetBCParser().GetReader()->GetFileIndex()) << 32) | + method->GetIdx(); + StructElemNameIdx *structElemNameIdx = FEManager::GetManager().GetMethodStructElemNameIdx(mapIdx); + if (structElemNameIdx == nullptr) { + structElemNameIdx = FEManager::GetManager().GetStructElemMempool()->New( + method->GetClassName(), method->GetName(), method->GetDescription()); + FEManager::GetManager().SetMethodStructElemNameIdx(mapIdx, *structElemNameIdx); + } + const std::string &methodShortName = method->GetName(); + CHECK_FATAL(!methodShortName.empty(), "error: method name is empty"); + if (methodShortName.compare("main") == 0) { + FEManager::GetMIRBuilder().GetMirModule().SetEntryFuncName( + GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx->full)); + } + methodNameIdx = structElemNameIdx->full; + SolveReturnAndArgTypes(allocator); + FuncAttrs attrs = GetAttrs(); + bool isStatic = IsStatic(); + bool isVarg = IsVarg(); + CHECK_FATAL(retType != nullptr, "function must have return type"); + MIRType *mirReturnType = nullptr; + bool usePtr = (srcLang == kSrcLangJava); + if (retType->GetPrimType() == PTY_void) { + mirReturnType = retType->GenerateMIRType(srcLang, false); + } else { + mirReturnType = retType->GenerateMIRType(srcLang, usePtr); + } + ASSERT(mirReturnType != nullptr, "return type is nullptr"); + std::vector argsTypeIdx; + for (FEIRType *type : argTypes) { + MIRType *argType = type->GenerateMIRType(srcLang, usePtr); + argsTypeIdx.emplace_back(argType->GetTypeIndex()); + } + FEStructElemInfo *elemInfo = FEManager::GetTypeManager().RegisterStructMethodInfo( + *structElemNameIdx, kSrcLangJava, isStatic); + elemInfo->SetDefined(); + elemInfo->SetFromDex(); + mirFunc = FEManager::GetTypeManager().CreateFunction(methodNameIdx, mirReturnType->GetTypeIndex(), + argsTypeIdx, isVarg, isStatic); + mirMethodPair.first = mirFunc->GetStIdx(); + mirMethodPair.second.first = mirFunc->GetMIRFuncType()->GetTypeIndex(); + mirMethodPair.second.second = attrs; + mirFunc->SetFuncAttrs(attrs); + return true; +} + +std::string BCClassMethod2FEHelper::GetMethodNameImpl(bool inMpl, bool full) const { + std::string klassName = method->GetClassName(); + std::string methodName = method->GetName(); + if (!full) { + return inMpl ? namemangler::EncodeName(methodName) : methodName; + } + std::string descName = method->GetDescription(); + std::string fullName = klassName + "|" + methodName + "|" + descName; + return inMpl ? namemangler::EncodeName(fullName) : fullName; +} + +void BCClassMethod2FEHelper::SolveReturnAndArgTypesImpl(MapleAllocator &allocator) { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mempool is nullptr"); + std::string klassName = method->GetClassName(); + std::string methodName = GetMethodName(false); + if (HasThis()) { + FEIRTypeDefault *type = mp->New(); + type->LoadFromJavaTypeName(klassName, false); + argTypes.push_back(type); + } + const std::vector &returnAndArgTypeNames = FEUtilJava::SolveMethodSignature(methodName); + bool first = true; + for (const std::string &typeName : returnAndArgTypeNames) { + FEIRTypeDefault *type = mp->New(); + type->LoadFromJavaTypeName(typeName, false); + if (first) { + retType = type; + first = false; + } else { + argTypes.push_back(type); + } + } +} + +bool BCClassMethod2FEHelper::IsVargImpl() const { + return false; // No variable arguments +} + +bool BCClassMethod2FEHelper::HasThisImpl() const { + return !IsStatic(); +} + +MIRType *BCClassMethod2FEHelper::GetTypeForThisImpl() const { + FEIRTypeDefault type; + const std::string &klassName = method->GetClassName(); + type.LoadFromJavaTypeName(klassName, false); + return type.GenerateMIRType(true); +} + +bool BCClassMethod2FEHelper::IsVirtualImpl() const { + return method->IsVirtual(); +} + +bool BCClassMethod2FEHelper::IsNativeImpl() const { + return method->IsNative(); +} + +bool BCClassMethod2FEHelper::HasCodeImpl() const { + return method->HasCode(); +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/common/src/bc_function.cpp b/src/hir2mpl/bytecode_input/common/src/bc_function.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9c5633b0992ab125b8f2309312418ac7a24c19bd --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/bc_function.cpp @@ -0,0 +1,152 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bc_function.h" +#include "fe_macros.h" +#include "fe_manager.h" +#include "feir_type_helper.h" +#include "feir_builder.h" +#include "rc_setter.h" + +namespace maple { +namespace bc { +BCFunction::BCFunction(const BCClassMethod2FEHelper &argMethodHelper, MIRFunction &mirFunc, + const std::unique_ptr &argPhaseResultTotal) + : FEFunction(mirFunc, argPhaseResultTotal), + methodHelper(argMethodHelper), + method(methodHelper.GetMethod()) {} + +void BCFunction::PreProcessImpl() { + ; // Empty +} + +void BCFunction::InitImpl() { + FEFunction::InitImpl(); +} + +void BCFunction::SetMIRFunctionInfo() { + GStrIdx idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(method->GetFullName()); + SET_FUNC_INFO_PAIR(mirFunction, "INFO_fullname", idx, true); + idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(method->GetClassName()); + SET_FUNC_INFO_PAIR(mirFunction, "INFO_classname", idx, true); + idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(method->GetName()); + SET_FUNC_INFO_PAIR(mirFunction, "INFO_funcname", idx, true); + SET_FUNC_INFO_PAIR(mirFunction, "INFO_methodidx", method->GetIdx(), false); + SET_FUNC_INFO_PAIR(mirFunction, "INFO_registers", method->GetRegisterTotalSize(), false); + SET_FUNC_INFO_PAIR(mirFunction, "INFO_tries_size", method->GetTriesSize(), false); + SET_FUNC_INFO_PAIR(mirFunction, "INFO_dexthisreg", method->GetThisRegNum(), false); + SET_FUNC_INFO_PAIR(mirFunction, "INFO_codeoff", method->GetCodeOff(), false); +} + +bool BCFunction::ProcessImpl() { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "BCFunction::Process() for %s", method->GetFullName().c_str()); + bool success = true; + method->GetBCClass().GetBCParser().ProcessMethodBody(*method, + method->GetBCClass().GetClassIdx(), + method->GetItemIdx(), + method->IsVirtual()); + SetMIRFunctionInfo(); + success = success && GenerateArgVarList("gen arg var list"); + success = success && EmitToFEIRStmt("emit to feir"); + success = success && ProcessFEIRFunction(); + if (!success) { + error = true; + ERR(kLncErr, "BCFunction::Process() failed for %s", method->GetFullName().c_str()); + } + return success; +} + +bool BCFunction::GenerateAliasVars(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + if (method == nullptr || method->GetSrcLocalInfoPtr() == nullptr) { + return phaseResult.Finish(true); + } + // map>> + for (auto &local : *method->GetSrcLocalInfoPtr()) { + for (auto &item : local.second) { + if (std::get<0>(item) == "this") { + continue; + } + UniqueFEIRType type = FEIRTypeHelper::CreateTypeByJavaName(std::get<1>(item), false, false); + MIRType *mirType = FEManager::GetTypeManager().GetOrCreateTypeFromName( + namemangler::EncodeName(std::get<1>(item)), FETypeFlag::kSrcUnknown, true); + UniqueFEIRVar localVar = FEIRBuilder::CreateVarReg(local.first, std::move(type)); + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName(std::get<0>(item))); + MIRAliasVars aliasVar; + aliasVar.mplStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(localVar->GetName(*mirType)); + aliasVar.atk = kATKType; + aliasVar.index = mirType->GetTypeIndex().GetIdx(); + aliasVar.isLocal = !localVar->IsGlobal(); + if (!std::get<2>(item).empty()) { + aliasVar.sigStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(std::get<2>(item)); + } + mirFunction.SetAliasVarMap(nameIdx, aliasVar); + } + } + return phaseResult.Finish(true); +} + +bool BCFunction::ProcessFEIRFunction() { + bool success = true; + success = success && UpdateRegNum2This("fe/update reg num to this pointer"); + return success; +} + +bool BCFunction::GenerateArgVarList(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + argVarList = method->GenArgVarList(); + return phaseResult.Finish(); +} + +bool BCFunction::EmitToFEIRStmt(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + std::list feirStmts = method->EmitInstructionsToFEIR(); + AppendFEIRStmts(feirStmts); + return phaseResult.Finish(true); +} + +void BCFunction::FinishImpl() { + (void)UpdateFormal("finish/update formal"); + (void)EmitToMIR("finish/emit to mir"); + (void)GenerateAliasVars("finish/generate alias vars"); + if (FEOptions::GetInstance().IsRC()) { + RCSetter::GetRCSetter().GetUnownedVarInLocalVars(*method, mirFunction); + } + bool recordTime = FEOptions::GetInstance().IsDumpPhaseTime() || FEOptions::GetInstance().IsDumpPhaseTimeDetail(); + if (phaseResultTotal != nullptr && recordTime) { + phaseResultTotal->Combine(phaseResult); + } + if (FEOptions::GetInstance().IsDumpPhaseTimeDetail()) { + INFO(kLncInfo, "[PhaseTime] function: %s", method->GetFullName().c_str()); + phaseResult.Dump(); + } + method->ReleaseMempool(); + BCClassMethod *methodPtr = method.release(); + delete methodPtr; +} + +bool BCFunction::EmitToMIR(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + // Not gen funcbody for abstract method + if (methodHelper.HasCode() || methodHelper.IsNative()) { + mirFunction.NewBody(); + FEManager::GetMIRBuilder().SetCurrentFunction(mirFunction); + FEManager::SetCurrentFEFunction(*this); + EmitToMIRStmt(); + } + return phaseResult.Finish(); +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/common/src/bc_instruction.cpp b/src/hir2mpl/bytecode_input/common/src/bc_instruction.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3edcf1646e55ea8dced98ceac506c33f0f8d2487 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/bc_instruction.cpp @@ -0,0 +1,562 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bc_instruction.h" +#include +#include "bc_class.h" +#include "feir_builder.h" +#include "fe_manager.h" +#include "fe_options.h" + +namespace maple { +namespace bc { +void BCInstruction::InitBCInStruction(uint16 kind, bool wide, bool throwable) { + instKind = static_cast(instKind | kind); + isWide = wide; + isThrowable = throwable; +} + +void BCInstruction::SetWidth(uint8 size) { + width = size; +} + +uint8 BCInstruction::GetWidth() const { + return width; +} + +void BCInstruction::SetCatchable() { + isCatchable = isThrowable; +} + +bool BCInstruction::IsCatchable() const { + return isCatchable; +} + +BCInstructionKind BCInstruction::GetInstKind() const { + return instKind; +} + +bool BCInstruction::IsWide() const { + return isWide; +} + +void BCInstruction::SetInstructionKind(BCInstructionKind kind) { + instKind = static_cast(static_cast(instKind) | static_cast(kind)); +} + +void BCInstruction::Parse(BCClassMethod &method) { + ParseImpl(method); +} + +void BCInstruction::SetExceptionType(const GStrIdx &typeNameIdx) { + catchedExTypeNamesIdx.emplace(typeNameIdx); + SetBCRegType(*this); +} + +std::list BCInstruction::EmitToFEIRStmts() { + std::list stmts; + // Do not modify following stmt order + GenCommentStmt(stmts); + if ((instKind & kTarget) || (instKind & kCatch)) { + if (instKind & kCatch) { + UniqueFEIRStmt stmt = GenCatchStmt(); + stmts.emplace_back(std::move(stmt)); + } else { + UniqueFEIRStmt stmt = GenLabelStmt(); + stmts.emplace_back(std::move(stmt)); + } + } + if (instKind & kTryStart) { + UniqueFEIRStmt stmt = GenTryLabelStmt(); + stmts.emplace_back(std::move(stmt)); + } + std::list instStmts = EmitToFEIRStmtsImpl(); + for (auto it = instStmts.begin(); it != instStmts.end(); ++it) { + it->get()->SetThrowable(isThrowable); + if (FEOptions::GetInstance().IsAOT()) { + it->get()->SetHexPC(pc); + } + stmts.emplace_back(std::move(*it)); + } + std::list retypeStmts = GenRetypeStmtsAfterDef(); + for (auto it = retypeStmts.begin(); it != retypeStmts.end(); ++it) { + stmts.emplace_back(std::move(*it)); + } + if (instKind & kTryEnd) { + UniqueFEIRStmt stmt = GenTryEndLabelStmt(); + stmts.emplace_back(std::move(stmt)); + } + SetSrcFileInfo(stmts); + return stmts; +} + +void BCInstruction::SetSrcFileInfo(std::list &stmts) const { +#ifdef DEBUG + if (FEOptions::GetInstance().IsDumpLOC() && !stmts.empty()) { + Loc loc = {srcFileIdx, srcFileLineNum, 0}; + (*stmts.begin())->SetSrcLoc(loc); + } +#else + (void) stmts; +#endif +} + +bool BCInstruction::IsFallThru() const { + return instKind & kFallThru; +} + +bool BCInstruction::IsConditionBranch() const { + return instKind & kConditionBranch; +} + +bool BCInstruction::IsGoto() const { + return instKind & kGoto; +} + +bool BCInstruction::IsSwitch() const { + return instKind & kSwitch; +} + +bool BCInstruction::IsTarget() const { + return instKind & kTarget; +} + +bool BCInstruction::IsTryStart() const { + return instKind & kTryStart; +} + +bool BCInstruction::IsTryEnd() const { + return instKind & kTryEnd; +} + +bool BCInstruction::IsCatch() const { + return instKind & kCatch; +} + +void BCInstruction::SetReturnInst(BCInstruction *inst) { + inst->SetBCRegType(*this); + returnInst = inst; +} + +void BCInstruction::SetBCRegType(const BCInstruction &inst) { + SetBCRegTypeImpl(inst); +} + +bool BCInstruction::HasReturn() const { + return returnInst != nullptr; +} + +bool BCInstruction::IsReturn() const { + return isReturn; +} + +uint32 BCInstruction::GetPC() const { + return pc; +} + +uint8 BCInstruction::GetOpcode() const { + return opcode; +} + +std::vector BCInstruction::GetTargets() const { + return GetTargetsImpl(); +} + +void BCInstruction::SetDefaultTarget(BCInstruction *inst) { + defaultTarget = inst; +} + +void BCInstruction::AddHandler(BCInstruction *handler) { + auto it = std::find(handlers.begin(), handlers.end(), handler); + if (it == handlers.end()) { + handlers.emplace_back(handler); + } +} + +void BCInstruction::AddHandlerTarget(uint32 target) { + handlerTargets.emplace_back(target); +} + +MapleVector BCInstruction::GetHandlerTargets() const { + return handlerTargets; +} + +MapleList *BCInstruction::GetDefedRegs() { + return &defedRegs; +} + +MapleList *BCInstruction::GetUsedRegs() { + return &usedRegs; +} + +void BCInstruction::SetRegTypeInTypeInfer() { + SetRegTypeInTypeInferImpl(); +} + +std::vector BCInstruction::GetTargetsImpl() const { + return std::vector{}; // Default empty, means invalid +} + +void BCInstruction::GenCommentStmt(std::list &stmts) const { +#ifdef DEBUG + if (FEOptions::GetInstance().IsDumpComment()) { + std::ostringstream oss; + // 4 means 4-character width + oss << "LINE " << FEManager::GetManager().GetSourceFileNameFromIdx(srcFileIdx) << " : " << srcFileLineNum << + ", INST_IDX : " << pc << " ||" << std::setfill('0') << std::setw(4) << std::hex << pc << ": " << + (opName == nullptr ? "invalid op" : opName); + stmts.emplace_back(FEIRBuilder::CreateStmtComment(oss.str())); + } +#else + (void) stmts; +#endif +} + +UniqueFEIRStmt BCInstruction::GenLabelStmt() const { + return std::make_unique(funcNameIdx, pc); +} + +UniqueFEIRStmt BCInstruction::GenCatchStmt() const { + std::unique_ptr stmt = std::make_unique(funcNameIdx, pc); + for (const auto &exTypeNameIdx : catchedExTypeNamesIdx) { + stmt->AddCatchTypeNameIdx(exTypeNameIdx); + } + return stmt; +} + +UniqueFEIRStmt BCInstruction::GenTryLabelStmt() const { + std::unique_ptr javaTry = std::make_unique(funcNameIdx); + for (const auto &handler : handlers) { + javaTry->AddCatchLabelIdx(handler->GetPC()); + } + return javaTry; +} + +UniqueFEIRStmt BCInstruction::GenTryEndLabelStmt() const { + return std::make_unique(); +} + +std::list BCInstruction::GenRetypeStmtsAfterDef() const { + std::list stmts; + for (BCReg *reg : defedRegs) { + std::list stmts0 = reg->GenRetypeStmtsAfterDef(); + for (auto &stmt : stmts0) { + stmts.emplace_back(std::move(stmt)); + } + } + return stmts; +} + +std::list BCInstruction::GenRetypeStmtsBeforeUse() const { + std::list stmts; + for (BCReg *reg : usedRegs) { + std::list stmts0 = reg->GenRetypeStmtsBeforeUse(); + for (auto &stmt : stmts0) { + stmts.emplace_back(std::move(stmt)); + } + } + return stmts; +} + +void BCInstruction::SetFuncNameIdx(const GStrIdx &methodIdx) { + funcNameIdx = methodIdx; +} + +void BCInstruction::SetSrcPositionInfo(uint32 fileIdxIn, uint32 lineNumIn) { + srcFileIdx = fileIdxIn; + srcFileLineNum = lineNumIn; +} + + +void BCInstruction::SetOpName(const char *name) { +#ifdef DEBUG + opName = name; +#else + (void) name; +#endif +} + +const char *BCInstruction::GetOpName() const { +#ifdef DEBUG + return opName; +#else + return nullptr; +#endif +} + +// ========== BCRegTypeItem ========== +PrimType BCRegTypeItem::GetPrimType() const { + return GetBasePrimType(); +} + +PrimType BCRegTypeItem::GetBasePrimType() const { + return BCUtil::GetPrimType(typeNameIdx); +} + +bool BCRegTypeItem::IsMorePreciseType(const BCRegTypeItem &typeItemIn) const { + if (IsRef() && !typeItemIn.IsRef()) { + return true; + } else if (!IsRef() && typeItemIn.IsRef()) { + return false; + } else if (IsRef() && typeItemIn.IsRef()) { + const std::string &name0 = GlobalTables::GetStrTable().GetStringFromStrIdx(typeNameIdx); + const std::string &name1 = GlobalTables::GetStrTable().GetStringFromStrIdx(typeItemIn.typeNameIdx); + uint8 dim0 = FEUtils::GetDim(name0); + uint8 dim1 = FEUtils::GetDim(name1); + if (dim0 == dim1) { + GStrIdx name0Idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name0.substr(dim0)); + GStrIdx name1Idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name1.substr(dim0)); + if (name0Idx == name1Idx || dim0 == 0) { + if (!isIndeterminate && typeItemIn.isIndeterminate) { + return true; + } else if (isIndeterminate && !typeItemIn.isIndeterminate) { + return false; + } else if (isIndeterminate && typeItemIn.isIndeterminate) { + return name1Idx == BCUtil::GetJavaObjectNameMplIdx(); + } else { + return isFromDef ? + (name0Idx == BCUtil::GetJavaObjectNameMplIdx() || name1Idx != BCUtil::GetJavaObjectNameMplIdx()) : + (name0Idx != BCUtil::GetJavaObjectNameMplIdx() || name1Idx == BCUtil::GetJavaObjectNameMplIdx()); + } + } else { + BCRegTypeItem item0(name0Idx, isIndeterminate); + BCRegTypeItem item1(name1Idx, typeItemIn.isIndeterminate); + return item0.IsMorePreciseType(item1); + } + } else { + return dim0 > dim1; + } + } else { + if (!isIndeterminate && typeItemIn.isIndeterminate) { + return true; + } else if (isIndeterminate && !typeItemIn.isIndeterminate) { + return false; + } else { + return BCUtil::IsMorePrecisePrimitiveType(typeNameIdx, typeItemIn.typeNameIdx); + } + } +} + +// ========== BCRegType ========== +BCRegType::BCRegType(MapleAllocator &allocatorIn, BCReg ®, const GStrIdx &typeNameIdxIn, bool isIndeterminateIn) + : allocator(allocatorIn), curReg(reg), + regTypeItem(allocator.GetMemPool()->New(typeNameIdxIn, isIndeterminateIn, true)), + fuzzyTypesUsedAs(allocator.Adapter()), + elemTypes(allocator.Adapter()), + livesBegins(allocator.Adapter()) { + curReg.regTypeItem = regTypeItem; + if (isIndeterminateIn) { + fuzzyTypesUsedAs.emplace_back(regTypeItem); + } +} + +void BCRegType::PrecisifyTypes(bool isTry) { + if (precisified) { + return; + } + if (!isTry) { + precisified = true; + } + if (curReg.IsConstZero() || typesUsedAs == nullptr || typesUsedAs->empty()) { + return; + } + // insert defed type into `used` + typesUsedAs->emplace_back(regTypeItem); + BCRegTypeItem *realType = nullptr; + PrecisifyRelatedTypes(realType); + if (fuzzyTypesUsedAs.size() != 0 || realType == nullptr) { + if (realType == nullptr || realType->isIndeterminate) { + realType = GetMostPreciseType(*typesUsedAs); + if (realType != nullptr) { + for (auto &elem : fuzzyTypesUsedAs) { + elem->Copy(*realType); + } + } + } + } + if (!realType->isIndeterminate) { + PrecisifyElemTypes(realType); + } +} + +void BCRegType::PrecisifyRelatedTypes(const BCRegTypeItem *realType) { + for (auto rType : relatedBCRegTypes) { + // try to get type from use first. + // if failed, get type from def. + rType->PrecisifyTypes(true); + if (rType->IsIndeterminate()) { + if (realType == nullptr) { + realType = GetMostPreciseType(*typesUsedAs); + if (realType != nullptr) { + for (auto &fuzzyTy : fuzzyTypesUsedAs) { + fuzzyTy->Copy(*realType); + } + } + } + if (!realType->isIndeterminate) { + rType->PrecisifyTypes(true); + } + } else { + rType->precisified = true; + } + } +} + +void BCRegType::PrecisifyElemTypes(const BCRegTypeItem *arrayType) { + for (auto elem : elemTypes) { + if (elem->isIndeterminate) { + const std::string &arrTypeName = GlobalTables::GetStrTable().GetStringFromStrIdx(arrayType->typeNameIdx); + if (arrTypeName.size() > 1 && arrTypeName.at(0) == 'A') { + GStrIdx elemTypeIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(arrTypeName.substr(1)); + elem->typeNameIdx = elemTypeIdx; + elem->isIndeterminate = false; + } else { + CHECK_FATAL(curReg.regValue != nullptr, "Not an array type or const 0"); + } + } + } +} + +BCRegTypeItem *BCRegType::GetMostPreciseType(const MapleList &types) { + BCRegTypeItem *retType = nullptr; + if (types.empty()) { + return retType; + } + auto it = types.begin(); + retType = *it; + ++it; + while (it != types.end()) { + if ((*it)->IsMorePreciseType(*retType)) { + retType = *it; + } + ++it; + } + return retType; +} + +// ========== BCReg ========== +PrimType BCReg::GetPrimType() const { + return regTypeItem->GetPrimType(); +} + +PrimType BCReg::GetBasePrimType() const { + return regTypeItem->GetBasePrimType(); +} + +bool BCReg::IsConstZero() const { + if (regValue != nullptr) { + return regValue->primValue.raw32 == 0; + } + return false; +} + +UniqueFEIRType BCReg::GenFEIRType() const { + return GenFEIRTypeImpl(); +} + +UniqueFEIRVar BCReg::GenFEIRVarReg() const { + return GenFEIRVarRegImpl(); +} + +UniqueFEIRVar BCReg::GenFEIRVarRegImpl() const { + return std::make_unique(regNum, GenFEIRTypeImpl()); +} + +UniqueFEIRType BCReg::GenFEIRTypeImpl() const { + PrimType pty = GetBasePrimType(); + return std::make_unique(pty, regTypeItem->typeNameIdx); +} + +std::list BCReg::GenRetypeStmtsAfterDef() const { + std::list retypeStmts; + // Not gen retype stmt for use reg, same def-use type reg. + if (!isDef) { + return retypeStmts; + } + std::unique_ptr dstReg = this->Clone(); + std::unique_ptr tmpItem = std::make_unique(GStrIdx(0), false); + dstReg->regTypeItem = tmpItem.get(); + PrimType ptyDef = regTypeItem->GetPrimType(); + + std::list unqTypeItems; + if (regType->GetUsedTypes() != nullptr) { + for (const auto &usedType : *(regType->GetUsedTypes())) { + bool exist = false; + for (auto &elem : unqTypeItems) { + if ((*usedType) == (*elem)) { + if (usedType->IsDom()) { + elem->SetDom(true); + } + exist = true; + break; + } + } + if (exist == false) { + unqTypeItems.emplace_back(usedType); + } + } + } + + for (const auto &usedType : unqTypeItems) { + PrimType ptyUsed = usedType->GetPrimType(); + if ((*usedType) == (*regTypeItem) || + (usedType->isIndeterminate && + ((ptyUsed == PTY_i64 && ptyDef == PTY_f64) || (ptyUsed == PTY_i32 && ptyDef == PTY_f32)))) { + continue; + } + // Create retype stmt + dstReg->regTypeItem->Copy(*usedType); + UniqueFEIRStmt retypeStmt = + FEIRBuilder::CreateStmtRetype(dstReg->GenFEIRVarReg(), this->GenFEIRVarReg()); + if (retypeStmt != nullptr) { + if (FEOptions::GetInstance().IsAOT() && usedType->IsDom()) { + retypeStmt->SetHexPC(regType->GetPos()); + } + retypeStmts.emplace_back(std::move(retypeStmt)); + } + } + return retypeStmts; +} + +std::list BCReg::GenRetypeStmtsBeforeUse() const { + std::list retypeStmts; + // Not gen retype stmt for def reg, same def-use type reg. + // And Not gen retype stmt, if the defiend type is indeterminate. + const BCRegTypeItem *defed = regType->GetRegTypeItem(); + if (isDef) { + return retypeStmts; + } + if (!((*regTypeItem) == (*defed))) { + BCReg srcReg; + std::unique_ptr tmpItem = std::make_unique(GStrIdx(0), false); + srcReg.regTypeItem = tmpItem.get(); + // Create retype stmt + srcReg.regNum = regNum; + srcReg.regTypeItem->Copy(*defed); + UniqueFEIRStmt retypeStmt = + FEIRBuilder::CreateStmtRetype(this->GenFEIRVarReg(), srcReg.GenFEIRVarReg()); + if (retypeStmt != nullptr) { + retypeStmts.emplace_back(std::move(retypeStmt)); + } + } + return retypeStmts; +} + +std::unique_ptr BCReg::CloneImpl() const { + auto reg = std::make_unique(); + *reg = *this; + return reg; +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/common/src/bc_io.cpp b/src/hir2mpl/bytecode_input/common/src/bc_io.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fab0438a6a3f3e437d0d38ca056382726f08d714 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/bc_io.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bc_io.h" +namespace maple { +namespace bc { +BCIO::BCIO(const std::string &fileName) : BasicIOMapFile(fileName), isBigEndianHost(IsBigEndian()) {} + +BCReader::BCReader(const std::string &fileName) : BCIO(fileName), BasicIORead(*this, false) {} + +BCReader::~BCReader() { + Close(); +} + +bool BCReader::RetrieveHeader(RawData &data) { + return RetrieveHeaderImpl(data); +} + +void BCReader::SetEndianTag(bool isBigEndianIn) { + isBigEndian = isBigEndianIn; +} + +bool BCReader::RetrieveHeaderImpl(RawData &data) { + (void)data; + CHECK_FATAL(false, "NIY"); + return false; +} + +std::string BCReader::GetStringFromIdx(uint32 idx) const { + return GetStringFromIdxImpl(idx); +} + +std::string BCReader::GetTypeNameFromIdx(uint32 idx) const { + return GetTypeNameFromIdxImpl(idx); +} + +BCReader::ClassElem BCReader::GetClassMethodFromIdx(uint32 idx) const { + return GetClassMethodFromIdxImpl(idx); +} + +BCReader::ClassElem BCReader::GetClassFieldFromIdx(uint32 idx) const { + return GetClassFieldFromIdxImpl(idx); +} + +std::string BCReader::GetSignature(uint32 idx) const { + return GetSignatureImpl(idx); +} + +uint32 BCReader::GetFileIndex() const { + return GetFileIndexImpl(); +} + +std::string BCReader::GetIRSrcFileSignature() const { + return irSrcFileSignature; +} +} // namespace bc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/src/bc_parser_base.cpp b/src/hir2mpl/bytecode_input/common/src/bc_parser_base.cpp new file mode 100644 index 0000000000000000000000000000000000000000..89ba8f0bf287b5dac8d16c38843dca212df70876 --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/bc_parser_base.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bc_parser_base.h" +#include "mpl_logging.h" +#include "mir_module.h" +namespace maple { +namespace bc { +BCParserBase::BCParserBase(uint32 fileIdxIn, const std::string &fileNameIn, const std::list &classNamesIn) + : fileIdx(fileIdxIn), fileName(fileNameIn), classNames(classNamesIn), + fileNameHashId(-1) {} + +const BCReader *BCParserBase::GetReader() const { + return GetReaderImpl(); +} + +bool BCParserBase::OpenFile() { + return OpenFileImpl(); +} + +bool BCParserBase::ParseHeader() { + return ParseHeaderImpl(); +} + +bool BCParserBase::Verify() { + return VerifyImpl(); +} + +uint32 BCParserBase::CalculateCheckSum(const uint8 *data, uint32 size) { + return CalculateCheckSumImpl(data, size); +} + +bool BCParserBase::RetrieveClasses(std::list> &klasses) { + if (RetrieveIndexTables() == false) { + ERR(kLncErr, "RetrieveIndexTables failed"); + return false; + } + if (!classNames.empty()) { + return RetrieveUserSpecifiedClasses(klasses); + } else { + return RetrieveAllClasses(klasses); + } +} + +bool BCParserBase::CollectAllDepTypeNames(std::unordered_set &depSet) { + return CollectAllDepTypeNamesImpl(depSet); +} + +bool BCParserBase::CollectMethodDepTypeNames(std::unordered_set &depSet, BCClassMethod &bcMethod) const { + return CollectMethodDepTypeNamesImpl(depSet, bcMethod); +} + +bool BCParserBase::CollectAllClassNames(std::unordered_set &classSet) { + return CollectAllClassNamesImpl(classSet); +} +} // namespace bc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/src/bc_pragma.cpp b/src/hir2mpl/bytecode_input/common/src/bc_pragma.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8d432ec58cae12dba602a297b3afa69bd96c5cbd --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/bc_pragma.cpp @@ -0,0 +1,23 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bc_pragma.h" +namespace maple { +namespace bc { +std::vector &BCAnnotationsDirectory::EmitPragmasImpl() { + CHECK_FATAL(false, "this method must be overrided!!!"); + return pragmas; +} +} // namespace bc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/common/src/bc_util.cpp b/src/hir2mpl/bytecode_input/common/src/bc_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c6f04d880e6223dd0560aa137f265b2bed35cc8b --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/bc_util.cpp @@ -0,0 +1,217 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bc_util.h" +#include "fe_utils.h" +#include "fe_options.h" +#include "mpl_logging.h" +namespace maple { +namespace bc { +const std::string BCUtil::kUnknown = "Unknown"; +const std::string BCUtil::kPrimitive = "Primitive"; +const std::string BCUtil::kBoolean = "Z"; +const std::string BCUtil::kByte = "B"; +const std::string BCUtil::kShort = "S"; +const std::string BCUtil::kChar = "C"; +const std::string BCUtil::kInt = "I"; +const std::string BCUtil::kLong = "J"; +const std::string BCUtil::kFloat = "F"; +const std::string BCUtil::kDouble = "D"; +const std::string BCUtil::kVoid = "V"; +const std::string BCUtil::kWide = "wide"; +const std::string BCUtil::kAggregate = "Aggregate"; +const std::string BCUtil::kJavaObjectName = "Ljava/lang/Object;"; +const std::string BCUtil::kJavaStringName = "Ljava/lang/String;"; +const std::string BCUtil::kJavaByteClassName = "Ljava/lang/Byte;"; +const std::string BCUtil::kJavaShortClassName = "Ljava/lang/Short;"; +const std::string BCUtil::kJavaIntClassName = "Ljava/lang/Integer;"; +const std::string BCUtil::kJavaLongClassName = "Ljava/lang/Long;"; +const std::string BCUtil::kJavaFloatClassName = "Ljava/lang/Float;"; +const std::string BCUtil::kJavaDoubleClassName = "Ljava/lang/Double;"; +const std::string BCUtil::kJavaCharClassName = "Ljava/lang/Character;"; +const std::string BCUtil::kJavaBoolClassName = "Ljava/lang/Boolean;"; +const std::string BCUtil::kJavaClassName = "Ljava/lang/Class;"; +const std::string BCUtil::kJavaMethodHandleName = "Ljava/lang/invoke/MethodHandle;"; +const std::string BCUtil::kJavaExceptionName = "Ljava/lang/Exception;"; +const std::string BCUtil::kJavaThrowableName = "Ljava/lang/Throwable;"; + +const std::string BCUtil::kJavaMethodHandleInvoke = "Ljava/lang/invoke/MethodHandle;|invoke|"; +const std::string BCUtil::kJavaMethodHandleInvokeExact = "Ljava/lang/invoke/MethodHandle;|invokeExact|"; + +const std::string BCUtil::kABoolean = "AZ"; +const std::string BCUtil::kAByte = "AB"; +const std::string BCUtil::kAShort = "AS"; +const std::string BCUtil::kAChar = "AC"; +const std::string BCUtil::kAInt = "AI"; +const std::string BCUtil::kALong = "AJ"; +const std::string BCUtil::kAFloat = "AF"; +const std::string BCUtil::kADouble = "AD"; +const std::string BCUtil::kAJavaObjectName = "ALjava/lang/Object;"; + +bool BCUtil::IsWideType(const GStrIdx &name) { + return name == GetDoubleIdx() || name == GetLongIdx(); +} + +bool BCUtil::IsMorePrecisePrimitiveType(const GStrIdx &name0, const GStrIdx &name1) { + static std::vector typeWidthMap = { + BCUtil::GetVoidIdx(), + BCUtil::GetBooleanIdx(), + BCUtil::GetByteIdx(), + BCUtil::GetCharIdx(), + BCUtil::GetShortIdx(), + BCUtil::GetIntIdx(), + BCUtil::GetFloatIdx(), + BCUtil::GetLongIdx(), + BCUtil::GetDoubleIdx() + }; + if (name0 == name1) { + return false; + } + uint32 name0Idx = UINT32_MAX; + uint32 name1Idx = UINT32_MAX; + for (uint32 i = 0; i < typeWidthMap.size(); ++i) { + if (typeWidthMap[i] == name0) { + name0Idx = i; + continue; + } + if (typeWidthMap[i] == name1) { + name1Idx = i; + continue; + } + } + CHECK_FATAL(name0Idx != UINT32_MAX && name1Idx != UINT32_MAX, "name0's or name1's primitive type is not supported."); + return name0Idx > name1Idx; +} + +PrimType BCUtil::GetPrimType(const GStrIdx &typeNameIdx) { + if (typeNameIdx == BCUtil::GetBooleanIdx()) { + return PTY_u1; + } + if (typeNameIdx == BCUtil::GetByteIdx()) { + return PTY_i8; + } + if (typeNameIdx == BCUtil::GetShortIdx()) { + return PTY_i16; + } + if (typeNameIdx == BCUtil::GetCharIdx()) { + return PTY_u16; + } + if (typeNameIdx == BCUtil::GetIntIdx()) { + return PTY_i32; + } + if (typeNameIdx == BCUtil::GetLongIdx()) { + return PTY_i64; + } + if (typeNameIdx == BCUtil::GetFloatIdx()) { + return PTY_f32; + } + if (typeNameIdx == BCUtil::GetDoubleIdx()) { + return PTY_f64; + } + if (typeNameIdx == BCUtil::GetVoidIdx()) { + return PTY_void; + } + // Wide, Primitive, Agg + return PTY_ref; +} + +bool BCUtil::IsJavaReferenceType(const GStrIdx &typeNameIdx) { + PrimType primType = GetPrimType(typeNameIdx); + return (primType == PTY_ref); +} + +bool BCUtil::IsJavaPrimitveType(const GStrIdx &typeNameIdx) { + return !IsJavaReferenceType(typeNameIdx); +} + +bool BCUtil::IsJavaPrimitiveTypeName(const std::string typeName) { + return ((typeName == kBoolean) || (typeName == kByte) || (typeName == kBoolean) || (typeName == kShort) || + (typeName == kChar) || (typeName == kInt) || (typeName == kLong) || (typeName == kFloat) || + (typeName == kDouble)); +} + +bool BCUtil::IsArrayType(const GStrIdx &typeNameIdx) { + std::string typeName = GlobalTables::GetStrTable().GetStringFromStrIdx(typeNameIdx); + uint8 dim = FEUtils::GetDim(typeName); + return dim != 0; +} + +std::string BCUtil::TrimArrayModifier(const std::string &typeName) { + std::size_t index = 0; + for (; index < typeName.size(); ++index) { + if (typeName[index] != '[') { + break; + } + } + if (index != 0) { + return typeName.substr(index, typeName.size()); + } else { + return typeName; + } +} + +void BCUtil::AddDefaultDepSet(std::unordered_set &typeTable) { + typeTable.emplace("Ljava/lang/Class;"); + typeTable.emplace("Ljava/lang/Runnable;"); + typeTable.emplace("Ljava/lang/ClassLoader;"); + typeTable.emplace("Ljava/lang/StringFactory;"); + // pre-load dependent types for maple_ipa preinline phase + typeTable.emplace("Ljava/lang/System;"); + typeTable.emplace("Ljava/lang/String;"); + typeTable.emplace("Ljava/lang/Math;"); + typeTable.emplace("Ljava/lang/Long;"); + typeTable.emplace("Ljava/lang/Throwable;"); + typeTable.emplace("Ljava/io/PrintStream;"); + typeTable.emplace("Ljava/io/InputStream;"); + typeTable.emplace("Lsun/misc/FloatingDecimal;"); + typeTable.emplace("Ljava/lang/reflect/Field;"); + typeTable.emplace("Ljava/lang/annotation/Annotation;"); + typeTable.emplace("Ljava/lang/AbstractStringBuilder;"); + typeTable.emplace("Ljava/io/UnixFileSystem;"); + typeTable.emplace("Ljava/util/concurrent/atomic/AtomicInteger;"); + typeTable.emplace("Ljava/lang/reflect/Method;"); +} + +// get the serial number in register name, for example 2 in Reg2_I +uint32 BCUtil::Name2RegNum(const std::string &name) { + const std::size_t regPrefixLen = strlen("Reg"); + std::size_t numLen = name.length() - regPrefixLen; + // Nonreg names also reach here, e.g. "_this". Make sure they are not handle. + const std::size_t regVarMinLen = 6; + if (numLen < regVarMinLen - regPrefixLen || name.compare(0, regPrefixLen, "Reg") != 0) { + return UINT32_MAX; + } + std::string regName = name.substr(regPrefixLen); + std::size_t i = 0; + for (; i < numLen; i++) { + if (regName[i] < '0' || regName[i] > '9') { + break; + } + } + + if (i == 0) { + return UINT32_MAX; + } + int regNum = std::stoi(regName.substr(0, i)); + return static_cast(regNum); +} + +bool BCUtil::HasContainSuffix(const std::string &value, const std::string &suffix) { + if (suffix.size() > value.size()) { + return false; + } + return std::equal(suffix.rbegin(), suffix.rend(), value.rbegin()); +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/common/src/rc_setter.cpp b/src/hir2mpl/bytecode_input/common/src/rc_setter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..526a82ab154937be12cc99fa4706de01416efa7c --- /dev/null +++ b/src/hir2mpl/bytecode_input/common/src/rc_setter.cpp @@ -0,0 +1,444 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "rc_setter.h" +#include +#include "ark_annotation_map.h" +#include "ark_annotation_processor.h" +#include "fe_manager.h" +#include "bc_util.h" + +namespace maple { +namespace bc { +RCSetter *RCSetter::rcSetter = nullptr; + +void RCSetter::ProcessClassRCAnnotation(const GStrIdx &classIdx, const std::vector &pragmas) { + if (pragmas.empty()) { + return; + } + MIRStructType *type = FEManager::GetTypeManager().GetStructTypeFromName(classIdx); + for (auto pragma : pragmas) { + if (pragma->GetKind() != kPragmaClass || pragma->GetStrIdx() != type->GetNameStrIdx()) { + continue; + } + if (!ArkAnnotation::GetInstance().IsRCUnownedOuter(pragma->GetTyIdx()) && + !ArkAnnotation::GetInstance().IsRCUnownedThis(pragma->GetTyIdx())) { + continue; + } + const std::string &className = type->GetName(); + MIRSymbol *jcSymbol = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl(className, *type); + if (jcSymbol != nullptr) { + jcSymbol->SetAttr(ATTR_rcunownedthis); + } + for (auto &field : type->GetFields()) { + std::string fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(field.first); + const std::string prefix = "this_24"; + if (fieldName.compare(0, prefix.size(), prefix) == 0) { + field.second.second.SetAttr(FLDATTR_rcunowned); + } + } + } +} + +void RCSetter::ProcessMethodRCAnnotation(MIRFunction &mirFunc, const std::string &className, + MIRStructType &structType, const MIRPragma &pragma) { + if (ArkAnnotation::GetInstance().IsRCUnownedOuter(pragma.GetTyIdx()) || + ArkAnnotation::GetInstance().IsRCUnownedThis(pragma.GetTyIdx())) { + // set ATTR_rcunowned to the field this$n of local class + // the current function belongs to + MIRSymbol *jcSymbol = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl(className, structType); + if (jcSymbol != nullptr) { + jcSymbol->SetAttr(ATTR_rcunownedthis); + } + for (auto &field : structType.GetFields()) { + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(field.first); + const std::string prefix = "this_24"; + if (fieldName.compare(0, prefix.size(), prefix) == 0) { + field.second.second.SetAttr(FLDATTR_rcunowned); + } + } + } else if (ArkAnnotation::GetInstance().IsRCUnownedCap(pragma.GetTyIdx()) || + ArkAnnotation::GetInstance().IsRCUnownedCapList(pragma.GetTyIdx())) { + // handle old RCUnownedCapRef annotation. + MIRPragmaElement *elem = pragma.GetElementVector()[0]; + if (ArkAnnotation::GetInstance().IsRCUnownedCap(pragma.GetTyIdx())) { + GStrIdx strIdx(elem->GetI32Val()); + CollectUnownedLocalVars(&mirFunc, strIdx); + } else if (ArkAnnotation::GetInstance().IsRCUnownedCapList(pragma.GetTyIdx())) { + for (auto eit : elem->GetSubElemVec()) { + if (eit->GetSubElemVec().empty()) { + continue; + } + MIRPragmaElement *e = eit->GetSubElemVec()[0]; + GStrIdx strIdx(e->GetI32Val()); + CollectUnownedLocalVars(&mirFunc, strIdx); + } + } + } +} + +void RCSetter::ProcessFieldRCAnnotation(const StructElemNameIdx &fieldElemNameIdx, + const MIRType &fieldType, const std::vector &pragmas) { + if (pragmas.empty()) { + return; + } + for (auto pragma : pragmas) { + if (pragma->GetKind() != kPragmaVar || + pragma->GetStrIdx() != fieldElemNameIdx.elem || + pragma->GetTyIdxEx() != fieldType.GetTypeIndex()) { + continue; + } + if (!ArkAnnotation::GetInstance().IsRCWeak(pragma->GetTyIdx()) && + !ArkAnnotation::GetInstance().IsRCUnowned(pragma->GetTyIdx())) { + continue; + } + FieldAttrKind attr; + if (ArkAnnotation::GetInstance().IsRCWeak(pragma->GetTyIdx())) { + attr = FLDATTR_rcweak; + } else if (ArkAnnotation::GetInstance().IsRCUnowned(pragma->GetTyIdx())) { + attr = FLDATTR_rcunowned; + } + MIRStructType *structType = FEManager::GetTypeManager().GetStructTypeFromName(fieldElemNameIdx.klass); + for (auto &field : structType->GetFields()) { + if (field.first == fieldElemNameIdx.elem) { + field.second.second.SetAttr(attr); + break; + } + } + } +} + +void RCSetter::GetUnownedVarInLocalVars(const BCClassMethod &method, MIRFunction &mirFunction) { + if (method.GetSrcLocalInfoPtr() == nullptr) { + return; + } + std::set unownedRegNums; + // map>> + for (auto &local : *method.GetSrcLocalInfoPtr()) { + for (auto &item : local.second) { + bool isUnowned = BCUtil::HasContainSuffix(std::get<0>(item), "$unowned"); + if (isUnowned) { + GStrIdx strIdx = FEManager::GetMIRBuilder().GetOrCreateStringIndex(namemangler::EncodeName(std::get<0>(item))); + CollectUnownedLocalVars(&mirFunction, strIdx); + (void)unownedRegNums.insert(local.first); + } + } + } + // set ATTR_rcunowned for symbols according their reg num. + SetAttrRCunowned(mirFunction, unownedRegNums); +} + +void RCSetter::SetAttrRCunowned(MIRFunction &mirFunction, const std::set &unownedRegNums) const { + if (unownedRegNums.empty()) { + return; + } + MIRSymbolTable *symTab = mirFunction.GetSymTab(); + if (symTab == nullptr) { + return; + } + for (uint32 i = 0; i < mirFunction.GetSymTab()->GetSymbolTableSize(); ++i) { + MIRSymbol *symbol = mirFunction.GetSymTab()->GetSymbolFromStIdx(i); + if (symbol == nullptr) { + continue; + } + MIRType *ty = symbol->GetType(); + if (ty->GetPrimType() == PTY_ref || ty->GetPrimType() == PTY_ptr) { + uint32 regNum = BCUtil::Name2RegNum(symbol->GetName()); + if (unownedRegNums.find(regNum) != unownedRegNums.end()) { + symbol->SetAttr(ATTR_rcunowned); + } + } + } +} + +void RCSetter::MarkRCUnownedForUnownedLocalFunctions() const { + // mark all local variables unowned for @UnownedLocal functions. + for (auto func : unownedLocalFuncs) { + for (uint32 idx = 0; idx < func->GetSymTab()->GetSymbolTableSize(); idx++) { + MIRSymbol *sym = func->GetSymTab()->GetSymbolFromStIdx(idx); + if (sym == nullptr) { + continue; + } + MIRType *ty = sym->GetType(); + if (ty->GetPrimType() == PTY_ref || ty->GetPrimType() == PTY_ptr) { + sym->SetAttr(ATTR_rcunowned); + } + } + } +} + +bool RCSetter::IsAnonymousInner(const MIRStructType &structType) const { + if (!(structType.GetKind() == kTypeClass || structType.GetKind() == kTypeInterface)) { + return false; + } + // inner class has annotation Ldalvik/annotation/InnerClass; + bool isCreat = false; + const std::string &className = + ArkAnnotationMap::GetArkAnnotationMap().GetAnnotationTypeName("Ldalvik_2Fannotation_2FInnerClass_3B"); + MIRStructType *inner = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType( + className, true, FETypeFlag::kSrcExtern, isCreat); + GStrIdx strIdx = FEManager::GetMIRBuilder().GetOrCreateStringIndex("name"); + unsigned tyIdx = inner->GetTypeIndex(); + if (structType.GetKind() == kTypeClass) { + const MIRClassType &classType = static_cast(structType); + for (auto it : classType.GetPragmaVec()) { + if (it->GetTyIdx() == tyIdx) { + for (auto eit : it->GetElementVector()) { + // inner class and has no name + if (eit->GetNameStrIdx() == strIdx && eit->GetI32Val() == 0) { + return true; + } + } + } + } + } else if (structType.GetKind() == kTypeInterface) { + const MIRInterfaceType &interfaceType = static_cast(structType); + for (auto it : interfaceType.GetPragmaVec()) { + if (it->GetTyIdx() == tyIdx) { + for (auto eit : it->GetElementVector()) { + // inner class and has no name + if (eit->GetNameStrIdx() == strIdx && eit->GetI32Val() == 0) { + return true; + } + } + } + } + } + return false; +} + +bool RCSetter::IsMethodEnclosing(const MIRFunction &func, const MIRStructType &structType) const { + if (!(structType.GetKind() == kTypeClass || structType.GetKind() == kTypeInterface)) { + return false; + } + bool isCreat = false; + const std::string &className = namemangler::GetInternalNameLiteral( + ArkAnnotationMap::GetArkAnnotationMap().GetAnnotationTypeName("Ldalvik_2Fannotation_2FEnclosingMethod_3B")); + MIRStructType *inner = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType( + className, true, FETypeFlag::kSrcExtern, isCreat); + unsigned tyIdx = inner->GetTypeIndex(); + if (structType.GetKind() == kTypeClass) { + const MIRClassType &classType = static_cast(structType); + for (auto it : classType.GetPragmaVec()) { + if (it->GetTyIdx() == tyIdx && !(it->GetElementVector().empty()) && + func.GetNameStrIdx() == it->GetElementVector()[0]->GetI32Val()) { + return true; + } + } + } else if (structType.GetKind() == kTypeInterface) { + const MIRInterfaceType &interfaceType = static_cast(structType); + for (auto it : interfaceType.GetPragmaVec()) { + if (it->GetTyIdx() == tyIdx && (!it->GetElementVector().empty()) && + func.GetNameStrIdx() == it->GetElementVector()[0]->GetI32Val()) { + return true; + } + } + } + return false; +} + +void RCSetter::MarkRCUnownedForAnonymousClasses() const { + // mark captured unowned fields for anonymous class. + for (auto mit : unownedLocalVars) { + MIRFunction *func = mit.first; + // check for anonymous class + // mark captured var name aaa -> val$aaa (or val_24aaa) field in anonymous class + for (auto sit : FEManager::GetTypeManager().GetStructNameTypeMap()) { + ASSERT_NOT_NULL(sit.second.first); + if (IsAnonymousInner(*(sit.second.first)) && IsMethodEnclosing(*func, *(sit.second.first))) { + for (auto &fit : sit.second.first->GetFields()) { + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fit.first); + constexpr size_t prefixLength = sizeof("val_24") - 1; + if (fieldName.compare(0, prefixLength, "val_24") == 0) { + std::string nameWithSuffix = fieldName + "_24unowned"; + for (auto nit : mit.second) { + GStrIdx strIdx(nit); + const std::string &varName = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + if (nameWithSuffix.compare(prefixLength, varName.length(), varName) == 0 || + fieldName.compare(prefixLength, varName.length(), varName) == 0) { + fit.second.second.SetAttr(FLDATTR_rcunowned); + } + } + } + } + } + } + } +} + +void RCSetter::SetRCUnownedAttributeInternalSetFieldAttrRCUnowned(size_t opnd, const MIRFunction &calledFunc) const { + const MIRSymbol *arg = calledFunc.GetFormal(opnd); + for (const StmtNode *s = calledFunc.GetBody()->GetFirst(); s != nullptr; s = s->GetNext()) { + // 1. checking iassign statements that assign parameter arg + if (s->GetOpCode() != OP_iassign) { + continue; + } + const IassignNode *ian = static_cast(s); + const BaseNode *val = ian->GetRHS(); + if (val->GetOpCode() != OP_dread) { + continue; + } + const DreadNode *valDrn = static_cast(val); + const MIRSymbol *valSymbol = calledFunc.GetLocalOrGlobalSymbol(valDrn->GetStIdx()); + const GStrIdx &valStrIdx = valSymbol->GetNameStrIdx(); + if (valStrIdx != arg->GetNameStrIdx()) { + continue; + } + // 2. get the class fields of the iassign statement + auto it = iputStmtFieldMap.find(s); + if (it != iputStmtFieldMap.end()) { + const GStrIdx &fieldStrIdx = it->second; + // 3. set rcunwoned attribute for that field + const MIRType *type = calledFunc.GetClassType(); + CHECK_NULL_FATAL(type); + MIRType *typePtr = GlobalTables::GetTypeTable().GetTypeFromTyIdx(type->GetTypeIndex()); + MIRStructType *structType = static_cast(typePtr); + for (auto &fit : structType->GetFields()) { + if (fit.first == fieldStrIdx) { + fit.second.second.SetAttr(FLDATTR_rcunowned); + break; + } + } + } + } +} + +void RCSetter::SetRCUnownedAttribute(const CallNode &callNode, MIRFunction &func, + const MIRFunction &calledFunc, const std::set &gStrIdx) const { + for (size_t i = 0; i < callNode.NumOpnds(); ++i) { + BaseNode *bn = callNode.GetNopndAt(i); + if (bn->GetOpCode() != OP_dread) { + continue; + } + DreadNode *drn = static_cast(bn); + const MIRSymbol *symbol = func.GetLocalOrGlobalSymbol(drn->GetStIdx()); + const GStrIdx &strIdx = symbol->GetNameStrIdx(); + // checking maple name in ALIAS + for (auto als : func.GetAliasVarMap()) { + if (als.second.mplStrIdx != strIdx) { + continue; + } + for (auto sit : gStrIdx) { + if (sit != als.first) { + continue; + } + // now we have a link and a field need to be set rcunowned + // + // the focus is the i_th parameter arg. + // we will go through body of of the lambda class + // 1. checking iassign statements that assign parameter arg + // 2. get the class fields of the iassign statement + // 3. set rcunwoned attribute for that field + SetRCUnownedAttributeInternalSetFieldAttrRCUnowned(i, calledFunc); + } + } + } +} + +void RCSetter::MarkRCUnownedForLambdaClasses() const { + // mark captured unowned fields for lambda classes. + for (auto mit : unownedLocalVars) { + MIRFunction *func = mit.first; + // scan function body to find lambda functions used + // and set rcunowned attribute on corresponding captured fields + // of local classes for lambda functions + for (const StmtNode *stmt = func->GetBody()->GetFirst(); stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt->GetOpCode() != OP_callassigned) { + continue; + } + const CallNode *call = static_cast(stmt); + MIRFunction *calledFunc = GlobalTables::GetFunctionTable().GetFuncTable().at(call->GetPUIdx()); + // only care calling of local classes for lambda functions + if (!(calledFunc->GetAttr(FUNCATTR_constructor) && calledFunc->GetAttr(FUNCATTR_public) && + calledFunc->GetAttr(FUNCATTR_synthetic))) { + continue; + } + // go through call arguments to find maple names with ALIAS info + // and their src names matching captured var names + SetRCUnownedAttribute(*call, *func, *calledFunc, mit.second); + } + } +} + +void RCSetter::MarkRCAttributes() const { + MarkRCUnownedForUnownedLocalFunctions(); + MarkRCUnownedForAnonymousClasses(); + MarkRCUnownedForLambdaClasses(); +} + +void RCSetter::CollectUnownedLocalFuncs(MIRFunction *func) { + (void)unownedLocalFuncs.insert(func); +} + +void RCSetter::CollectUnownedLocalVars(MIRFunction *func, const GStrIdx &strIdx) { + (void)unownedLocalVars[func].insert(strIdx); +} + +void RCSetter::CollectInputStmtField(StmtNode *stmt, const GStrIdx &fieldName) { + (void)iputStmtFieldMap.emplace(stmt, fieldName); +} + +void RCSetter::LoadRCFieldAttrWhiteList(const std::string &file) { + std::string line; + std::ifstream infile(file); + uint32 lineNum = 0; + std::vector vecItem; + std::vector vecAttr; + std::string item; + std::string className; + std::string fieldName; + CHECK_FATAL(infile.is_open(), "(ToIDEUser)RCFieldAttrWhiteList file %s open failed", file.c_str()); + while (std::getline(infile, line)) { + lineNum++; + if (line.at(0) == '#') { + continue; + } + std::stringstream ss; + ss.str(line); + vecItem.clear(); + while (std::getline(ss, item, ':')) { + vecItem.push_back(item); + } + CHECK_FATAL(vecItem.size() > 2, "(ToIDEUser)invalid line %d in RCFieldAttrWhiteList file %s", lineNum, + file.c_str()); + className = vecItem[0]; + fieldName = vecItem[1]; + vecAttr.clear(); + for (size_t i = 2; i < vecItem.size(); i++) { + std::string &strAttr = vecItem[i]; + if (strAttr.compare("RCWeak") == 0) { + vecAttr.push_back(FLDATTR_rcweak); + } else if (strAttr.compare("RCUnowned") == 0) { + vecAttr.push_back(FLDATTR_rcunowned); + } + } + rcFieldAttrWhiteListMap[className][fieldName] = vecAttr; + } + infile.close(); +} + +void RCSetter::SetRCFieldAttrByWhiteList(FieldAttrs &attr, const std::string &className, + const std::string &fieldName) { + auto itClass = rcFieldAttrWhiteListMap.find(className); + if (itClass != rcFieldAttrWhiteListMap.end()) { + auto itField = itClass->second.find(fieldName); + if (itField != itClass->second.end()) { + for (auto &attrIt : itField->second) { + attr.SetAttr(attrIt); + } + } + } +} +} // namespace bc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/class_linker.h b/src/hir2mpl/bytecode_input/dex/include/class_linker.h new file mode 100644 index 0000000000000000000000000000000000000000..e76a7d9eb3c49ab5691c24d61d3492b4a2f1164a --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/class_linker.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef HIR2MPL_BC_INPUT_CLASS_LINKER_H +#define HIR2MPL_BC_INPUT_CLASS_LINKER_H +#include "class_loader_context.h" + +namespace maple { +class ClassLinker { + public: + explicit ClassLinker(std::vector> &classPath); + virtual ~ClassLinker() = default; + + void FindClass(const std::string &className, ClassLoaderInfo *classLoader, + std::list> &list, bool isDefClass = false); + void LoadSuperAndInterfaces(const std::unique_ptr &klass, ClassLoaderInfo *classLoader, + std::list> &klassList); + private: + std::unique_ptr FindInSharedLib(const std::string &className, ClassLoaderInfo &classLoader); + std::unique_ptr FindInBaseClassLoader(const std::string &className, ClassLoaderInfo *classLoader); + std::unique_ptr FindInClassLoaderClassPath(const std::string &className, + ClassLoaderInfo &classLoader) const; + std::vector> bootClassPath; + std::unordered_set processedClassName; +}; +} // end namespace maple +#endif // HIR2MPL_BC_INPUT_CLASS_LINKER_H diff --git a/src/hir2mpl/bytecode_input/dex/include/class_loader_context.h b/src/hir2mpl/bytecode_input/dex/include/class_loader_context.h new file mode 100644 index 0000000000000000000000000000000000000000..7b2e09f915038a1ca1efc7585036b7270019e21a --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/class_loader_context.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_CLASS_LOADER_CONTEXT_H +#define HIR2MPL_BC_INPUT_CLASS_LOADER_CONTEXT_H +#include "dex_parser.h" + +namespace maple { +enum ClassLoaderType { + kInvalidClassLoader = 0, + kPathClassLoader = 1, + kDelegateLastClassLoader = 2, + kInMemoryDexClassLoader = 3 +}; + +class ClassLoaderInfo { + public: + ClassLoaderType type; + ClassLoaderInfo *parent; + std::vector classPaths; + std::vector> hexElements; + std::vector checksums; + std::vector sharedLibraries; +}; + +class ClassLoaderContext { + public: + ClassLoaderContext(MemPool &mpIn) : mp(mpIn) {} + virtual ~ClassLoaderContext() { + loaderChain = nullptr; + } + + static ClassLoaderContext *Create(const std::string &spec, MemPool &mp); + ClassLoaderInfo *CreateClassLoader(const std::string &spec); + static bool OpenDexFiles(const std::string &spec, std::vector> &dexFileParsers); + + bool IsSpecialSharedLib() const { + return isSpecialSharedLib; + } + + const ClassLoaderInfo *GetClassLoader() const { + return loaderChain; + } + + private: + ClassLoaderType GetCLType(const std::string &clString); + const char *GetCLTypeName(ClassLoaderType type) const; + size_t FindMatchingSharedLibraryCloseMarker(const std::string& spec, size_t sharedLibraryOpenIndex); + ClassLoaderInfo *ParseInternal(const std::string &spec); + ClassLoaderInfo *ParseClassLoaderSpec(const std::string &spec); + bool ParseSharedLibraries(std::string &sharedLibrariesSpec, ClassLoaderInfo &info); + bool Parse(const std::string &spec); + ClassLoaderInfo *loaderChain = nullptr; + bool isSpecialSharedLib = false; + MemPool ∓ + static const char kPathClassLoaderString[]; + static const char kDelegateLastClassLoaderString[]; + static const char kInMemoryDexClassLoaderString[]; + static const char kClassLoaderOpeningMark; + static const char kClassLoaderClosingMark; + static const char kClassLoaderSharedLibraryOpeningMark; + static const char kClassLoaderSharedLibraryClosingMark; + static const char kClassLoaderSharedLibrarySeparator; + static const char kClassLoaderSeparator; + static const char kClasspathSeparator; + static const char kSpecialSharedLib[]; +}; +} // end namespace maple +#endif // HIR2MPL_BC_INPUT_CLASS_LOADER_CONTEXT_H diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_class.h b/src/hir2mpl/bytecode_input/dex/include/dex_class.h new file mode 100644 index 0000000000000000000000000000000000000000..98829312fa78fcc81e02109ee55c1ac1ffdc8736 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_class.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_BC_INPUT_DEX_CLASS_H +#define MPL_FE_BC_INPUT_DEX_CLASS_H +#include "bc_class.h" +namespace maple { +namespace bc { +class DexClass : public BCClass { + public: + DexClass(uint32 idx, const BCParserBase &parser) : BCClass(idx, parser) {} + ~DexClass() = default; +}; + +class DexClassField : public BCClassField { + public: + DexClassField(const BCClass &klassIn, uint32 itemIdxIn, uint32 idxIn, uint32 acc, const std::string &nameIn, + const std::string &descIn) + : BCClassField(klassIn, acc, nameIn, descIn), + itemIdx(itemIdxIn), + idx(idxIn) {} + ~DexClassField() = default; + + protected: + uint32 GetItemIdxImpl() const override; + uint32 GetIdxImpl() const override; + bool IsStaticImpl() const override; + uint32 itemIdx; + uint32 idx; +}; + +class DEXTryInfo : public BCTryInfo { + public: + DEXTryInfo(uint32 startAddrIn, uint32 endAddrIn, std::unique_ptr>> catchesIn) + : BCTryInfo(startAddrIn, endAddrIn, std::move(catchesIn)) {} + ~DEXTryInfo() = default; +}; + +class DEXCatchInfo : public BCCatchInfo { + public: + DEXCatchInfo(uint32 pcIn, const GStrIdx &exceptionNameIdx, bool catchAll) + : BCCatchInfo(pcIn, exceptionNameIdx, catchAll) {} + ~DEXCatchInfo() = default; +}; + +class DexClassMethod : public BCClassMethod { + public: + DexClassMethod(const BCClass &klassIn, uint32 itemIdxIn, uint32 idxIn, bool isVirtualIn, uint32 acc, + const std::string &nameIn, const std::string &descIn) + : BCClassMethod(klassIn, acc, isVirtualIn, nameIn, descIn), + itemIdx(itemIdxIn), + idx(idxIn) {} + ~DexClassMethod() = default; + + protected: + uint32 GetItemIdxImpl() const override; + uint32 GetIdxImpl() const override; + bool IsStaticImpl() const override; + bool IsVirtualImpl() const override; + bool IsNativeImpl() const override; + bool IsInitImpl() const override; + bool IsClinitImpl() const override; + std::vector> GenArgVarListImpl() const override; + void GenArgRegsImpl() override; + uint32 itemIdx; + uint32 idx; +}; +} // namespace bc +} // namespace maple +#endif // MPL_FE_BC_INPUT_DEX_CLASS_H diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_class2fe_helper.h b/src/hir2mpl/bytecode_input/dex/include/dex_class2fe_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..1980671e617a0b466bd625dc741fddfd1e5bfa48 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_class2fe_helper.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef HIR2MPL_BC_INPUT_INCLUDE_DEX_CLASS2FE_HELPER_H +#define HIR2MPL_BC_INPUT_INCLUDE_DEX_CLASS2FE_HELPER_H +#include "bc_class2fe_helper.h" +namespace maple { +namespace bc { +class DexClass2FEHelper : public BCClass2FEHelper { + public: + DexClass2FEHelper(MapleAllocator &allocator, bc::BCClass &klassIn); + ~DexClass2FEHelper() = default; + + protected: + void InitFieldHelpersImpl() override; + void InitMethodHelpersImpl() override; + TypeAttrs GetStructAttributeFromInputImpl() const override; +}; + +class DexClassField2FEHelper : public BCClassField2FEHelper { + public: + DexClassField2FEHelper(MapleAllocator &allocator, const BCClassField &fieldIn) + : BCClassField2FEHelper(allocator, fieldIn) {} + ~DexClassField2FEHelper() = default; + + protected: + FieldAttrs AccessFlag2AttributeImpl(uint32 accessFlag) const override; +}; + +class DexClassMethod2FEHelper : public BCClassMethod2FEHelper { + public: + DexClassMethod2FEHelper(MapleAllocator &allocator, std::unique_ptr &methodIn) + : BCClassMethod2FEHelper(allocator, methodIn) {} + ~DexClassMethod2FEHelper() = default; + + protected: + FuncAttrs GetAttrsImpl() const override; + bool IsStaticImpl() const override; + + bool IsClinit() const override; + bool IsInit() const override; +}; +} +} +#endif // HIR2MPL_BC_INPUT_INCLUDE_DEX_CLASS2FE_HELPER_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_encode_value.h b/src/hir2mpl/bytecode_input/dex/include/dex_encode_value.h new file mode 100644 index 0000000000000000000000000000000000000000..e5a1b2b3bb1b0331c72f0655f3e2b14f1f5c62e3 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_encode_value.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef DEX_ENCODE_VALUE_H +#define DEX_ENCODE_VALUE_H +#include "dex_class.h" +#include "dexfile_interface.h" + +namespace maple { +namespace bc { +enum DexEncodeValueType { + kValueByte = 0x00, // (none; must be 0) ubyte[1] + kValueShort = 0x02, // size - 1 (0…1) ubyte[size] + kValueChar = 0x03, // size - 1 (0…1) ubyte[size] + kValueInt = 0x04, // size - 1 (0…3) ubyte[size] + kValueLong = 0x06, // size - 1 (0…7) ubyte[size] + kValueFloat = 0x10, // size - 1 (0…3) ubyte[size] + kValueDouble = 0x11, // size - 1 (0…7) ubyte[size] + kValueMethodType = 0x15, // size - 1 (0…3) ubyte[size] + kValueMethodHandle = 0x16, // size - 1 (0…3) ubyte[size] + kValueString = 0x17, // size - 1 (0…3) ubyte[size] + kValueType = 0x18, // size - 1 (0…3) ubyte[size] + kValueField = 0x19, // size - 1 (0…3) ubyte[size] + kValueMethod = 0x1a, // size - 1 (0…3) ubyte[size] + kValueEnum = 0x1b, // size - 1 (0…3) ubyte[size] + kValueArray = 0x1c, // (none; must be 0) encoded_array + kValueAnnotation = 0x1d, // (none; must be 0) encoded_annotation + kValueNull = 0x1e, // (none; must be 0) (none) + kValueBoolean = 0x1f // boolean (0…1) (none) +}; + +class DexEncodeValue { + public: + DexEncodeValue(MemPool &mpIn, maple::IDexFile &dexFileIn) : mp(mpIn), dexFile(dexFileIn) {} + ~DexEncodeValue() = default; + void ProcessEncodedValue(const uint8 **data, uint8 valueType, uint8 valueArg, MIRConst *&cst, uint32 &stringID); + + private: + uint64 GetUVal(const uint8 **data, uint8 len) const; + MIRType *GetTypeFromValueType(uint8 valueType) const; + void ProcessEncodedValue(const uint8 **data, MIRConst *&cst); + MIRStr16Const *ProcessStringValue(const uint8 **data, uint8 valueArg, uint32 &stringID); + MIRIntConst *ProcessIntValue(const uint8 **data, uint8 valueArg, MIRType &type); + MIRAggConst *ProcessArrayValue(const uint8 **data); + void ProcessAnnotationValue(const uint8 **data); + + MemPool ∓ + maple::IDexFile &dexFile; + union { + int32 i; + int64 j; + uint64 u; + float f; + double d; + } valBuf; +}; +} // namespace bc +} // namespace maple +#endif // DEX_ENCODE_VALUE_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_file_item_type.def b/src/hir2mpl/bytecode_input/dex/include/dex_file_item_type.def new file mode 100644 index 0000000000000000000000000000000000000000..fb8dc93c63e0f2885049c2c81ec70a3ddfc5673e --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_file_item_type.def @@ -0,0 +1,22 @@ +// DEX_FILE_ITEM_TYPE(name) +// DEX_FILE_ITEM_TYPE(HeaderItem) +DEX_FILE_ITEM_TYPE(StringIdItem) +DEX_FILE_ITEM_TYPE(TypeIdItem) +DEX_FILE_ITEM_TYPE(ProtoIdItem) +DEX_FILE_ITEM_TYPE(FieldIdItem) +DEX_FILE_ITEM_TYPE(MethodIdItem) +DEX_FILE_ITEM_TYPE(ClassDefItem) +DEX_FILE_ITEM_TYPE(CallSiteIdItem) +DEX_FILE_ITEM_TYPE(MethodHandleItem) +// DEX_FILE_ITEM_TYPE(MapList) +DEX_FILE_ITEM_TYPE(TypeList) +DEX_FILE_ITEM_TYPE(AnnotationSetRefList) +DEX_FILE_ITEM_TYPE(AnnotationSetItem) +/*DEX_FILE_ITEM_TYPE(ClassDataItem) +DEX_FILE_ITEM_TYPE(CodeItem) +DEX_FILE_ITEM_TYPE(StringDataItem) +DEX_FILE_ITEM_TYPE(DebugInfoItem) +DEX_FILE_ITEM_TYPE(AnnotationItem) +DEX_FILE_ITEM_TYPE(EncodedArrayItem) +DEX_FILE_ITEM_TYPE(AnnotationsDirectoryItem) +DEX_FILE_ITEM_TYPE(HiddenapiClassDataItem)*/ \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_file_util.h b/src/hir2mpl/bytecode_input/dex/include/dex_file_util.h new file mode 100644 index 0000000000000000000000000000000000000000..d2741f6ed83ff20bf18e79bd5909ec0867618b5c --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_file_util.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_BC_INPUT_DEX_FILE_UTIL_H +#define MPL_FE_BC_INPUT_DEX_FILE_UTIL_H +#include "types_def.h" +namespace maple { +namespace bc { +enum DexOpCode { +#define OP(opcode, category, kind, wide, throwable) kDexOp##opcode, +#include "dex_opcode.def" +#undef OP +}; + +class DexFileUtil { + public: + // Access flags + static constexpr uint32 kDexAccPublic = 0x0001; // class, method, field + static constexpr uint32 kDexAccPrivate = 0x0002; // method, field + static constexpr uint32 kDexAccProtected = 0x0004; // method, field + static constexpr uint32 kDexAccStatic = 0x0008; // method, field + static constexpr uint32 kDexAccFinal = 0x0010; // class, method, field + static constexpr uint32 kAccDexSynchronized = 0x0020; // method (only allowed on natives) + static constexpr uint32 kDexAccSuper = 0x0020; // class + static constexpr uint32 kDexAccBridge = 0x0040; // method + static constexpr uint32 kDexAccVolatile = 0x0040; // field + static constexpr uint32 kDexAccVarargs = 0x0080; // method, method + static constexpr uint32 kDexAccTransient = 0x0080; // field + static constexpr uint32 kDexAccNative = 0x0100; // method + static constexpr uint32 kDexAccInterface = 0x0200; // class + static constexpr uint32 kDexAccAbstract = 0x0400; // class, method + static constexpr uint32 kDexAccStrict = 0x0800; // method + static constexpr uint32 kDexAccSynthetic = 0x1000; // class, method + static constexpr uint32 kDexAccAnnotation = 0x2000; // class + static constexpr uint32 kDexAccEnum = 0x4000; + static constexpr uint32 kDexAccConstructor = 0x00010000; // method + static constexpr uint32 kDexDeclaredSynchronized = 0x00020000; + + enum ItemType { + kTypeHeaderItem = 0x0000, + kTypeStringIdItem, + kTypeTypeIdItem, + kTypeProtoIdItem, + kTypeFieldIdItem, + kTypeMethodIdItem, + kTypeClassDefItem, + kTypeCallSiteIdItem, + kTypeMethodHandleItem, + kTypeMapList = 0x1000, + kTypeTypeList, + kTypeAnnotationSetRefList, + kTypeAnnotationSetItem, + kTypeClassDataItem = 0x2000, + kTypeCodeItem, + kTypeStringDataItem, + kTypeDebugInfoItem, + kTypeAnnotationItem, + kTypeEncodedArrayItem, + kTypeAnnotationsDirectoryItem, + kTypeHiddenapiClassDataItem = 0xF000 + }; + + static uint32 Adler32(const uint8 *data, uint32 size); + static constexpr uint8 kMagicSize = 8; + static const uint8 kDexFileMagic[kMagicSize]; + static constexpr uint32 kCheckSumDataOffSet = 12; + static constexpr uint32 kFileSizeOffSet = 32; + static constexpr uint32 kHeaderSizeOffSet = 36; + static constexpr uint32 kEndianTagOffSet = 40; + static const uint8 kEndianConstant[4]; // 0x12345678; + static const uint8 kReverseEndianConstant[4]; // 0x78563412; + static constexpr uint32 kNoIndex = 0xffffffff; +}; +} // namespace bc +} // namespace maple +#endif // MPL_FE_BC_INPUT_DEX_FILE_UTIL_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_op.h b/src/hir2mpl/bytecode_input/dex/include/dex_op.h new file mode 100644 index 0000000000000000000000000000000000000000..4a98e2425a1d43ee8822b4cc182f8b4a2f11ee50 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_op.h @@ -0,0 +1,718 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_DEX_OP_H +#define HIR2MPL_BC_INPUT_INCLUDE_DEX_OP_H +#include +#include +#include "bc_op_factory.h" +#include "bc_instruction.h" +#include "dex_file_util.h" +#include "bc_parser_base.h" +#include "fe_manager.h" +namespace maple { +namespace bc { +class DexOp : public BCInstruction { + public: + DexOp(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOp() = default; + void SetVA(uint32 num); + void SetVB(uint32 num); + void SetWideVB(uint64 num); + void SetArgs(const MapleList &args); + void SetVC(uint32 num); + void SetVH(uint32 num); + static std::string GetArrayElementTypeFromArrayType(const std::string &typeName); + + protected: + void ParseImpl(BCClassMethod &method) override { + (void) method; + } + // Should be removed after all instruction impled + std::list EmitToFEIRStmtsImpl() override { + return std::list(); + } + + virtual void SetVAImpl(uint32 num) {} + virtual void SetVBImpl(uint32 num) {} + virtual void SetWideVBImpl(uint64 num) {} + virtual void SetArgsImpl(const MapleList &args) {}; + virtual void SetVCImpl(uint32 num) {} + virtual void SetVHImpl(uint32 num) {} + + StructElemNameIdx *structElemNameIdx = nullptr; +}; + +struct DexReg : public BCReg { + uint32 dexLitStrIdx = UINT32_MAX; // string idx of dex + uint32 dexTypeIdx = UINT32_MAX; // type idx of dex +}; + +class DexOpNop : public DexOp { + public: + DexOpNop(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpNop() = default; +}; + +// 0x01: move vA, vB ~ 0x09: move-object/16 vAAAA, vBBBB +class DexOpMove : public DexOp { + public: + DexOpMove(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpMove() = default; + + protected: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetRegTypeInTypeInferImpl() override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vB; +}; + +// 0x0a: move-result vAA ~ 0x0c: move-result-object vAA +class DexOpMoveResult : public DexOp { + public: + DexOpMoveResult(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpMoveResult() = default; + void SetVATypeNameIdx(const GStrIdx &idx); + DexReg GetVA() const; + + protected: + void SetVAImpl(uint32 num) override; + void SetBCRegTypeImpl(const BCInstruction &inst) override; + DexReg vA; +}; + +// 0x0c +class DexOpMoveException : public DexOp { + public: + DexOpMoveException(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpMoveException() = default; + + protected: + void SetVAImpl(uint32 num) override; + void SetBCRegTypeImpl(const BCInstruction &inst) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; +}; + +// 0x0e ~ 0x11 +class DexOpReturn : public DexOp { + public: + DexOpReturn(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpReturn() = default; + + protected: + void SetVAImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + bool isReturnVoid = false; +}; + +// 0x12 ~ 0x19 +class DexOpConst : public DexOp { + public: + DexOpConst(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpConst() = default; + + protected: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetWideVBImpl(uint64 num) override; + std::list EmitToFEIRStmtsImpl() override; + + private: + DexReg vA; +}; + +// 0x1a ~ 0x1b +class DexOpConstString : public DexOp { + public: + DexOpConstString(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpConstString() = default; + + protected: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + uint32 fileIdx = 0; + DexReg vA; + MapleString strValue; +}; + +// 0x1c +class DexOpConstClass : public DexOp { + public: + DexOpConstClass(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpConstClass() = default; + + protected: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + uint32 dexTypeIdx = 0; + GStrIdx mplTypeNameIdx; +}; + +// 0x1d ~ 0x1e +class DexOpMonitor : public DexOp { + public: + DexOpMonitor(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpMonitor() = default; + + protected: + void SetVAImpl(uint32 num) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; +}; + +// 0x1f +class DexOpCheckCast : public DexOp { + public: + DexOpCheckCast(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpCheckCast() = default; + + protected: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vDef; + uint32 targetDexTypeIdx = 0; + GStrIdx targetTypeNameIdx; +}; + +// 0x20 +class DexOpInstanceOf : public DexOp { + public: + DexOpInstanceOf(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpInstanceOf() = default; + + protected: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vB; + uint32 targetDexTypeIdx = 0; + std::string typeName; +}; + +// 0x21 +class DexOpArrayLength : public DexOp { + public: + DexOpArrayLength(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpArrayLength() = default; + + protected: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vB; +}; + +// 0x22 +class DexOpNewInstance : public DexOp { + public: + DexOpNewInstance(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpNewInstance() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + bool isSkipNewString = false; + // isRcPermanent is true means the rc annotation @Permanent is used + bool isRcPermanent = false; +}; + +// 0x23 +class DexOpNewArray : public DexOp { + public: + DexOpNewArray(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpNewArray() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vB; + // isRcPermanent is true means the rc annotation @Permanent is used + bool isRcPermanent = false; +}; + +// 0x24 ~ 0x25 +class DexOpFilledNewArray : public DexOp { + public: + DexOpFilledNewArray(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpFilledNewArray() = default; + GStrIdx GetReturnType() const; + + protected: + std::list EmitToFEIRStmtsImpl() override; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetArgsImpl(const MapleList &args) override; + void ParseImpl(BCClassMethod &method) override; + bool isRange = false; + uint32 argsSize = 0; + uint32 dexArrayTypeIdx = UINT32_MAX; + GStrIdx arrayTypeNameIdx; + GStrIdx elemTypeNameIdx; + MapleList argRegs; + MapleVector vRegs; +}; + +// 0x26 +class DexOpFillArrayData : public DexOp { + public: + DexOpFillArrayData(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpFillArrayData() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + const int8 *arrayData = nullptr; + int32 offset = INT32_MAX; + uint32 size = 0; +}; + +// 0x27 +class DexOpThrow : public DexOp { + public: + DexOpThrow(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpThrow() = default; + + private: + void SetVAImpl(uint32 num) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; +}; + +// 0x28 ~ 0x2a +class DexOpGoto : public DexOp { + public: + DexOpGoto(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpGoto() = default; + + private: + std::vector GetTargetsImpl() const override; + void SetVAImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + int32 offset = 0; + uint32 target = 0; +}; + +// 0x2b ~ 0x2c +class DexOpSwitch : public DexOp { + public: + DexOpSwitch(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpSwitch() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::vector GetTargetsImpl() const override; + std::list EmitToFEIRStmtsImpl() override; + bool isPacked = false; + int32 offset = 0; + DexReg vA; + MapleMap> keyTargetOPpcMap; +}; + +// 0x2d ~ 0x31 +class DexOpCompare : public DexOp { + public: + DexOpCompare(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpCompare() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + std::list EmitToFEIRStmtsImpl() override; + Opcode GetOpcodeFromDexIns() const; + DexReg vA; + DexReg vB; + DexReg vC; +}; + +// 0x32 ~ 0x37 +class DexOpIfTest : public DexOp { + public: + DexOpIfTest(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpIfTest() = default; + + protected: + std::vector GetTargetsImpl() const override; + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vB; + int32 offset = 0; + uint32 target = 0; +}; + +// 0x38 ~ 0x3d +class DexOpIfTestZ : public DexOp { + public: + DexOpIfTestZ(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpIfTestZ() = default; + + private: + std::vector GetTargetsImpl() const override; + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + int32 offset = 0; + uint32 target = 0; +}; + +// 0x3e ~ 0x43, 0x73, 0x79 ~ 0x7a, 0xe3 ~ 0x f9 +class DexOpUnused : public DexOp { + public: + DexOpUnused(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpUnused() = default; +}; + +// 0x44 ~ 0x4a +class DexOpAget : public DexOp { + public: + DexOpAget(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpAget() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + void SetRegTypeInTypeInferImpl() override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vB; + DexReg vC; +}; + +// 0x4a ~ 0x51 +class DexOpAput : public DexOp { + public: + DexOpAput(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpAput() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + void SetRegTypeInTypeInferImpl() override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vB; + DexReg vC; +}; + +// 0x52 ~ 0x58 +class DexOpIget : public DexOp { + public: + DexOpIget(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpIget() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vB; + uint32 index = UINT32_MAX; +}; + +// 0x59 ~ 0x5f +class DexOpIput : public DexOp { + public: + DexOpIput(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpIput() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vB; + uint32 index = UINT32_MAX; +}; + +// 0x60 ~ 0x66 +class DexOpSget : public DexOp { + public: + DexOpSget(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpSget() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + uint32 index = UINT32_MAX; + GStrIdx containerNameIdx; + DexReg vA; + int32 dexFileHashCode = -1; +}; + +// 0x67 ~ 0x6d +class DexOpSput : public DexOp { + public: + DexOpSput(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpSput() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + uint32 index = UINT32_MAX; + int32 dexFileHashCode = -1; +}; + +class DexOpInvoke : public DexOp { + public: + DexOpInvoke(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpInvoke() = default; + std::string GetReturnType() const; + + protected: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + void SetArgsImpl(const MapleList &args) override; + void ParseImpl(BCClassMethod &method) override; + void PrepareInvokeParametersAndReturn(const FEStructMethodInfo &feMethodInfo, FEIRStmtCallAssign &stmt) const; + std::list EmitToFEIRStmtsImpl() override; + bool IsStatic() const; + bool ReplaceStringFactory(BCReader::ClassElem &methodInfo, MapleList &argRegNums); + bool isStringFactory = false; + uint32 argSize = 0; + uint32 arg0VRegNum = UINT32_MAX; + uint32 methodIdx = 0; + MapleList argRegs; + MapleVector argVRegs; + std::vector retArgsTypeNames; + DexReg retReg; +}; + +// 0x6e, 0x74 +class DexOpInvokeVirtual : public DexOpInvoke { + public: + DexOpInvokeVirtual(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpInvokeVirtual() = default; +}; + +// 0x6f, 0x75 +class DexOpInvokeSuper : public DexOpInvoke { + public: + DexOpInvokeSuper(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpInvokeSuper() = default; +}; + +// 0x70, 0x76 +class DexOpInvokeDirect : public DexOpInvoke { + public: + DexOpInvokeDirect(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpInvokeDirect() = default; +}; + +// 0x71, 0x77 +class DexOpInvokeStatic : public DexOpInvoke { + public: + DexOpInvokeStatic(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpInvokeStatic() = default; +}; + +// 0x72, 0x78 +class DexOpInvokeInterface : public DexOpInvoke { + public: + DexOpInvokeInterface(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpInvokeInterface() = default; +}; + +// 0x7b, 0x8f +class DexOpUnaryOp : public DexOp { + public: + DexOpUnaryOp(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpUnaryOp() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + std::list EmitToFEIRStmtsImpl() override; + static std::map> InitOpcodeMapForUnary(); + // map> + static inline std::map> &GetOpcodeMapForUnary() { + static std::map> opcodeMapForUnary = InitOpcodeMapForUnary(); + return opcodeMapForUnary; + } + DexReg vA; + DexReg vB; + Opcode mirOp = OP_undef; +}; + +// 0x90, 0xaf +class DexOpBinaryOp : public DexOp { + public: + DexOpBinaryOp(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpBinaryOp() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + Opcode GetOpcodeFromDexIns() const; + std::list EmitToFEIRStmtsImpl() override; + DexReg vA; + DexReg vB; + DexReg vC; +}; + +// 0xb0, 0xcf +class DexOpBinaryOp2Addr : public DexOp { + public: + DexOpBinaryOp2Addr(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpBinaryOp2Addr() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + Opcode GetOpcodeFromDexIns() const; + std::list EmitToFEIRStmtsImpl() override; + DexReg vDef; + DexReg vA; + DexReg vB; +}; + +// 0xd0 ~ 0xe2 +class DexOpBinaryOpLit : public DexOp { + public: + DexOpBinaryOpLit(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpBinaryOpLit() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + void SetVCImpl(uint32 num) override; + Opcode GetOpcodeFromDexIns() const; + std::list EmitToFEIRStmtsImpl() override; + bool isLit8; // 8 bits / 16 bits signed int constant + union { + int8 i8; + int16 i16; + } constValue; + DexReg vA; + DexReg vB; +}; + +// 0xfa ~ 0xfb +class DexOpInvokePolymorphic: public DexOpInvoke { + public: + DexOpInvokePolymorphic(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpInvokePolymorphic() = default; + + protected: + void SetVHImpl(uint32 num) override; + void ParseImpl(BCClassMethod &method) override; + std::list EmitToFEIRStmtsImpl() override; + bool isStatic = false; + uint32 protoIdx = 0; + std::string fullNameMpl; + std::string protoName; + uint32 callerClassID = UINT32_MAX; +}; + +// 0xfc ~ 0xfd +class DexOpInvokeCustom : public DexOp { + public: + DexOpInvokeCustom(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpInvokeCustom() = default; + + private: + void SetVBImpl(uint32 num) override; + void SetArgsImpl(const MapleList &args) override; + MapleList argRegs; + MapleVector argVRegs; + uint32 callSiteIdx = 0; +}; + +// 0xfe +class DexOpConstMethodHandle : public DexOp { + public: + DexOpConstMethodHandle(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpConstMethodHandle() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + DexReg vA; + uint32 mhIdx = 0; +}; + +// 0xff +class DexOpConstMethodType : public DexOp { + public: + DexOpConstMethodType(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn); + ~DexOpConstMethodType() = default; + + private: + void SetVAImpl(uint32 num) override; + void SetVBImpl(uint32 num) override; + DexReg vA; + uint32 protoIdx = 0; +}; + +constexpr BCOpFactory::funcPtr dexOpGeneratorMap[] = { +#define OP(opcode, category, kind, wide, throwable) \ + BCOpFactory::BCOpGenerator, +#include "dex_opcode.def" +#undef OP +}; +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_DEX_OP_H diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_opcode.def b/src/hir2mpl/bytecode_input/dex/include/dex_opcode.def new file mode 100644 index 0000000000000000000000000000000000000000..9ab9e739fb953b0139767393855a055663fb05d6 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_opcode.def @@ -0,0 +1,257 @@ +// OP(name, category, kind, isWide, throwable) +OP(Nop, Nop, FallThru, false, false) +OP(Move, Move, FallThru, false, false) +OP(MoveFrom16, Move, FallThru, false, false) +OP(Move16, Move, FallThru, false, false) +OP(MoveWide, Move, FallThru, true, false) +OP(MoveWideFrom16, Move, FallThru, true, false) +OP(MoveWide16, Move, FallThru, true, false) +OP(MoveObject, Move, FallThru, false, false) +OP(MoveObjectFrom16, Move, FallThru, false, false) +OP(MoveObject16, Move, FallThru, false, false) +OP(MoveResult, MoveResult, FallThru, false, false) +OP(MoveResultWide, MoveResult, FallThru, true, false) +OP(MoveResultObject, MoveResult, FallThru, false, false) +OP(MoveException, MoveException, Catch | kFallThru, false, false) +OP(ReturnVoid, Return, UnKnownKind, false, false) +OP(Return, Return, UnKnownKind, false, false) +OP(ReturnWide, Return, UnKnownKind, true, false) +OP(ReturnObject, Return, UnKnownKind, false, false) +OP(Const4, Const, FallThru, false, false) +OP(Const16, Const, FallThru, false, false) +OP(Const, Const, FallThru, false, false) +OP(ConstHigh16, Const, FallThru, false, false) +OP(ConstWide16, Const, FallThru, true, false) +OP(ConstWide32, Const, FallThru, true, false) +OP(ConstWide, Const, FallThru, true, false) +OP(ConstWideHigh16, Const, FallThru, true, false) +OP(ConstString, ConstString, FallThru, false, true) +OP(ConstStringJumbo, ConstString, FallThru, false, true) +OP(ConstClass, ConstClass, FallThru, false, true) +OP(MonitorEnter, Monitor, FallThru, false, true) +OP(MonitorExit, Monitor, FallThru, false, true) +OP(CheckCast, CheckCast, FallThru, false, true) +OP(InstanceOf, InstanceOf, FallThru, false, true) +OP(ArrayLength, ArrayLength, FallThru, false, true) +OP(NewInstance, NewInstance, FallThru, false, true) +OP(NewArray, NewArray, FallThru, false, true) +OP(FilledNewArray, FilledNewArray, FallThru, false, true) +OP(FilledNewArrayRange, FilledNewArray, FallThru, false, true) +OP(FillArrayData, FillArrayData, FallThru, false, true) +OP(Throw, Throw, UnKnownKind, false, true) +OP(Goto, Goto, Goto, false, false) +OP(Goto16, Goto, Goto, false, false) +OP(Goto32, Goto, Goto, false, false) +OP(PackedSwitch, Switch, Switch | kFallThru, false, false) +OP(SparseSwitch, Switch, Switch | kFallThru, false, false) +OP(CmplFloat, Compare, FallThru, false, false) +OP(CmpgFloat, Compare, FallThru, false, false) +OP(CmplDouble, Compare, FallThru, true, false) +OP(CmpgDouble, Compare, FallThru, true, false) +OP(CmpLong, Compare, FallThru, true, false) +OP(IfEq, IfTest, ConditionBranch | kFallThru, false, false) +OP(IfNe, IfTest, ConditionBranch | kFallThru, false, false) +OP(IfLt, IfTest, ConditionBranch | kFallThru, false, false) +OP(IfGe, IfTest, ConditionBranch | kFallThru, false, false) +OP(IfGt, IfTest, ConditionBranch | kFallThru, false, false) +OP(IfLe, IfTest, ConditionBranch | kFallThru, false, false) +OP(IfEqZ, IfTestZ, ConditionBranch | kFallThru, false, false) +OP(IfNeZ, IfTestZ, ConditionBranch | kFallThru, false, false) +OP(IfLtZ, IfTestZ, ConditionBranch | kFallThru, false, false) +OP(IfGeZ, IfTestZ, ConditionBranch | kFallThru, false, false) +OP(IfGtZ, IfTestZ, ConditionBranch | kFallThru, false, false) +OP(IfLeZ, IfTestZ, ConditionBranch | kFallThru, false, false) +OP(Unused3E, Unused, UnKnownKind, false, false) +OP(Unused3F, Unused, UnKnownKind, false, false) +OP(Unused40, Unused, UnKnownKind, false, false) +OP(Unused41, Unused, UnKnownKind, false, false) +OP(Unused42, Unused, UnKnownKind, false, false) +OP(Unused43, Unused, UnKnownKind, false, false) +OP(Aget, Aget, FallThru, false, true) +OP(AgetWide, Aget, FallThru, true, true) +OP(AgetObject, Aget, FallThru, false, true) +OP(AgetBoolean, Aget, FallThru, false, true) +OP(AgetByte, Aget, FallThru, false, true) +OP(AgetChar, Aget, FallThru, false, true) +OP(AgetShort, Aget, FallThru, false, true) +OP(Aput, Aput, FallThru, false, true) +OP(AputWide, Aput, FallThru, true, true) +OP(AputObject, Aput, FallThru, false, true) +OP(AputBoolean, Aput, FallThru, false, true) +OP(AputByte, Aput, FallThru, false, true) +OP(AputChar, Aput, FallThru, false, true) +OP(AputShort, Aput, FallThru, false, true) +OP(Iget, Iget, FallThru, false, true) +OP(IgetWide, Iget, FallThru, true, true) +OP(IgetObject, Iget, FallThru, false, true) +OP(IgetBoolean, Iget, FallThru, false, true) +OP(IgetByte, Iget, FallThru, false, true) +OP(IgetChar, Iget, FallThru, false, true) +OP(IgetShort, Iget, FallThru, false, true) +OP(Iput, Iput, FallThru, false, true) +OP(IputWide, Iput, FallThru, true, true) +OP(IputObject, Iput, FallThru, false, true) +OP(IputBoolean, Iput, FallThru, false, true) +OP(IputByte, Iput, FallThru, false, true) +OP(IputChar, Iput, FallThru, false, true) +OP(IputShort, Iput, FallThru, false, true) +OP(Sget, Sget, FallThru, false, true) +OP(SgetWide, Sget, FallThru, true, true) +OP(SgetObject, Sget, FallThru, false, true) +OP(SgetBoolean, Sget, FallThru, false, true) +OP(SgetByte, Sget, FallThru, false, true) +OP(SgetChar, Sget, FallThru, false, true) +OP(SgetShort, Sget, FallThru, false, true) +OP(Sput, Sput, FallThru, false, true) +OP(SputWide, Sput, FallThru, true, true) +OP(SputObject, Sput, FallThru, false, true) +OP(SputBoolean, Sput, FallThru, false, true) +OP(SputByte, Sput, FallThru, false, true) +OP(SputChar, Sput, FallThru, false, true) +OP(SputShort, Sput, FallThru, false, true) +OP(InvokeVirtual, InvokeVirtual, FallThru, false, true) +OP(InvokeSuper, InvokeSuper, FallThru, false, true) +OP(InvokeDirect, InvokeDirect, FallThru, false, true) +OP(InvokeStatic, InvokeStatic, FallThru, false, true) +OP(InvokeInterface, InvokeInterface, FallThru, false, true) +OP(Unused73, Unused, UnKnownKind, false, false) +OP(InvokeVirtualRange, InvokeVirtual, FallThru, false, true) +OP(InvokeSuperRange, InvokeSuper, FallThru, false, true) +OP(InvokeDirectRange, InvokeDirect, FallThru, false, true) +OP(InvokeStaticRange, InvokeStatic, FallThru, false, true) +OP(InvokeInterfaceRange, InvokeInterface, FallThru, false, true) +OP(Unused79, Unused, UnKnownKind, false, false) +OP(Unused7a, Unused, UnKnownKind, false, false) +OP(NegInt, UnaryOp, FallThru, false, false) +OP(NotInt, UnaryOp, FallThru, false, false) +OP(NegLong, UnaryOp, FallThru, true, false) +OP(NotLong, UnaryOp, FallThru, true, false) +OP(NegFloat, UnaryOp, FallThru, false, false) +OP(NegDouble, UnaryOp, FallThru, true, false) +OP(IntToLong, UnaryOp, FallThru, true, false) +OP(IntToFloat, UnaryOp, FallThru, false, false) +OP(IntToDouble, UnaryOp, FallThru, true, false) +OP(LongToInt, UnaryOp, FallThru, true, false) +OP(LongToFloat, UnaryOp, FallThru, true, false) +OP(LongToDouble, UnaryOp, FallThru, true, false) +OP(FloatToInt, UnaryOp, FallThru, false, false) +OP(FloatToLong, UnaryOp, FallThru, true, false) +OP(FloatToDouble, UnaryOp, FallThru, true, false) +OP(DoubleToInt, UnaryOp, FallThru, true, false) +OP(DoubleToLong, UnaryOp, FallThru, true, false) +OP(DoubleToFloat, UnaryOp, FallThru, true, false) +OP(IntToByte, UnaryOp, FallThru, false, false) +OP(IntToChar, UnaryOp, FallThru, false, false) +OP(IntToShort, UnaryOp, FallThru, false, false) +OP(AddInt, BinaryOp, FallThru, false, false) +OP(SubInt, BinaryOp, FallThru, false, false) +OP(MulInt, BinaryOp, FallThru, false, false) +OP(DivInt, BinaryOp, FallThru, false, true) +OP(RemInt, BinaryOp, FallThru, false, true) +OP(AndInt, BinaryOp, FallThru, false, false) +OP(OrInt, BinaryOp, FallThru, false, false) +OP(XorInt, BinaryOp, FallThru, false, false) +OP(ShlInt, BinaryOp, FallThru, false, false) +OP(ShrInt, BinaryOp, FallThru, false, false) +OP(UshrInt, BinaryOp, FallThru, false, false) +OP(AddLong, BinaryOp, FallThru, true, false) +OP(SubLong, BinaryOp, FallThru, true, false) +OP(MulLong, BinaryOp, FallThru, true, false) +OP(DivLong, BinaryOp, FallThru, true, true) +OP(RemLong, BinaryOp, FallThru, true, true) +OP(AndLong, BinaryOp, FallThru, true, false) +OP(OrLong, BinaryOp, FallThru, true, false) +OP(XorLong, BinaryOp, FallThru, true, false) +OP(ShlLong, BinaryOp, FallThru, true, false) +OP(ShrLong, BinaryOp, FallThru, true, false) +OP(UshrLong, BinaryOp, FallThru, true, false) +OP(AddFloat, BinaryOp, FallThru, false, false) +OP(SubFloat, BinaryOp, FallThru, false, false) +OP(MulFloat, BinaryOp, FallThru, false, false) +OP(DivFloat, BinaryOp, FallThru, false, false) +OP(RemFloat, BinaryOp, FallThru, false, false) +OP(AddDouble, BinaryOp, FallThru, true, false) +OP(SubDouble, BinaryOp, FallThru, true, false) +OP(MulDouble, BinaryOp, FallThru, true, false) +OP(DivDouble, BinaryOp, FallThru, true, false) +OP(RemDouble, BinaryOp, FallThru, true, false) +OP(AddInt2Addr, BinaryOp2Addr, FallThru, false, false) +OP(SubInt2Addr, BinaryOp2Addr, FallThru, false, false) +OP(MulInt2Addr, BinaryOp2Addr, FallThru, false, false) +OP(DivInt2Addr, BinaryOp2Addr, FallThru, false, true) +OP(RemInt2Addr, BinaryOp2Addr, FallThru, false, true) +OP(AndInt2Addr, BinaryOp2Addr, FallThru, false, false) +OP(OrInt2Addr, BinaryOp2Addr, FallThru, false, false) +OP(XorInt2Addr, BinaryOp2Addr, FallThru, false, false) +OP(ShlInt2Addr, BinaryOp2Addr, FallThru, false, false) +OP(ShrInt2Addr, BinaryOp2Addr, FallThru, false, false) +OP(UshrInt2Addr, BinaryOp2Addr, FallThru, false, false) +OP(AddLong2Addr, BinaryOp2Addr, FallThru, true, false) +OP(SubLong2Addr, BinaryOp2Addr, FallThru, true, false) +OP(MulLong2Addr, BinaryOp2Addr, FallThru, true, false) +OP(DivLong2Addr, BinaryOp2Addr, FallThru, true, true) +OP(RemLong2Addr, BinaryOp2Addr, FallThru, true, true) +OP(AndLong2Addr, BinaryOp2Addr, FallThru, true, false) +OP(OrLong2Addr, BinaryOp2Addr, FallThru, true, false) +OP(XorLong2Addr, BinaryOp2Addr, FallThru, true, false) +OP(ShlLong2Addr, BinaryOp2Addr, FallThru, true, false) +OP(ShrLong2Addr, BinaryOp2Addr, FallThru, true, false) +OP(UshrLong2Addr, BinaryOp2Addr, FallThru, true, false) +OP(AddFloat2Addr, BinaryOp2Addr, FallThru, false, false) +OP(SubFloat2Addr, BinaryOp2Addr, FallThru, false, false) +OP(MulFloat2Addr, BinaryOp2Addr, FallThru, false, false) +OP(DivFloat2Addr, BinaryOp2Addr, FallThru, false, false) +OP(RemFloat2Addr, BinaryOp2Addr, FallThru, false, false) +OP(AddDouble2Addr, BinaryOp2Addr, FallThru, true, false) +OP(SubDouble2Addr, BinaryOp2Addr, FallThru, true, false) +OP(MulDouble2Addr, BinaryOp2Addr, FallThru, true, false) +OP(DivDouble2Addr, BinaryOp2Addr, FallThru, true, false) +OP(RemDouble2Addr, BinaryOp2Addr, FallThru, true, false) +OP(AddIntLit16, BinaryOpLit, FallThru, false, false) +OP(RsubInt, BinaryOpLit, FallThru, false, false) +OP(MulIntLit16, BinaryOpLit, FallThru, false, false) +OP(DivIntLit16, BinaryOpLit, FallThru, false, true) +OP(RemIntLit16, BinaryOpLit, FallThru, false, true) +OP(AndIntLit16, BinaryOpLit, FallThru, false, false) +OP(OrIntLit16, BinaryOpLit, FallThru, false, false) +OP(XorIntLit16, BinaryOpLit, FallThru, false, false) +OP(AddIntLit8, BinaryOpLit, FallThru, false, false) +OP(RsubIntLit8, BinaryOpLit, FallThru, false, false) +OP(MulIntLit8, BinaryOpLit, FallThru, false, false) +OP(DivIntLit8, BinaryOpLit, FallThru, false, true) +OP(RemIntLit8, BinaryOpLit, FallThru, false, true) +OP(AndIntLit8, BinaryOpLit, FallThru, false, false) +OP(OrIntLit8, BinaryOpLit, FallThru, false, false) +OP(XorIntLit8, BinaryOpLit, FallThru, false, false) +OP(ShlIntLit8, BinaryOpLit, FallThru, false, false) +OP(ShrIntLit8, BinaryOpLit, FallThru, false, false) +OP(UshrIntLit8, BinaryOpLit, FallThru, false, false) +OP(UnusedE3, Unused, UnKnownKind, false, false) +OP(UnusedE4, Unused, UnKnownKind, false, false) +OP(UnusedE5, Unused, UnKnownKind, false, false) +OP(UnusedE6, Unused, UnKnownKind, false, false) +OP(UnusedE7, Unused, UnKnownKind, false, false) +OP(UnusedE8, Unused, UnKnownKind, false, false) +OP(UnusedE9, Unused, UnKnownKind, false, false) +OP(UnusedEA, Unused, UnKnownKind, false, false) +OP(UnusedEB, Unused, UnKnownKind, false, false) +OP(UnusedEC, Unused, UnKnownKind, false, false) +OP(UnusedED, Unused, UnKnownKind, false, false) +OP(UnusedEE, Unused, UnKnownKind, false, false) +OP(UnusedEF, Unused, UnKnownKind, false, false) +OP(UnusedF0, Unused, UnKnownKind, false, false) +OP(UnusedF1, Unused, UnKnownKind, false, false) +OP(UnusedF2, Unused, UnKnownKind, false, false) +OP(UnusedF3, Unused, UnKnownKind, false, false) +OP(UnusedF4, Unused, UnKnownKind, false, false) +OP(UnusedF5, Unused, UnKnownKind, false, false) +OP(UnusedF6, Unused, UnKnownKind, false, false) +OP(UnusedF7, Unused, UnKnownKind, false, false) +OP(UnusedF8, Unused, UnKnownKind, false, false) +OP(UnusedF9, Unused, UnKnownKind, false, false) +OP(InvokePolymorphic, InvokePolymorphic, FallThru, false, true) +OP(InvokePolymorphicRange, InvokePolymorphic, FallThru, false, true) +OP(InvokeCustom, InvokeCustom, FallThru, false, true) +OP(InvokeCustomRange, InvokeCustom, FallThru, false, true) +OP(ConstMethodHandle, ConstMethodHandle, FallThru, false, true) +OP(ConstMethodType, ConstMethodType, FallThru, false, true) \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_parser.h b/src/hir2mpl/bytecode_input/dex/include/dex_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..180c69f23e988a9be63a8c2d4fb2da860228d782 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_parser.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_DEX_INPUT_DEX_PARSER_H +#define MPL_FE_DEX_INPUT_DEX_PARSER_H +#include "bc_parser.h" +#include "dex_reader.h" +#include "dex_class.h" +#include "types_def.h" + +namespace maple { +namespace bc { +class DexParser : public BCParser { + public: + DexParser(uint32 fileIdxIn, const std::string &fileNameIn, const std::list &classNamesIn); + ~DexParser() = default; + void ProcessDexClassMethod(const std::unique_ptr &dexClass, bool isVirtual, + uint32 index, std::pair &idxPair); + void SetDexFile(std::unique_ptr iDexFileIn); + std::unique_ptr FindClassDef(const std::string &className); + + protected: + const BCReader *GetReaderImpl() const override; + uint32 CalculateCheckSumImpl(const uint8 *data, uint32 size) override; + bool ParseHeaderImpl() override; + bool VerifyImpl() override; + bool RetrieveIndexTables() override; + bool RetrieveUserSpecifiedClasses(std::list> &klasses) override; + bool RetrieveAllClasses(std::list> &klasses) override; + bool CollectAllDepTypeNamesImpl(std::unordered_set &depSet) override; + bool CollectMethodDepTypeNamesImpl(std::unordered_set &depSet, BCClassMethod &bcMethod) const override; + bool CollectAllClassNamesImpl(std::unordered_set &classSet) override; + void ProcessMethodBodyImpl(BCClassMethod &method, + uint32 classIdx, uint32 methodItemIdx, bool isVirtual) const override; + + private: + std::unique_ptr ProcessDexClass(uint32 classIdx); + void ProcessDexClassDef(const std::unique_ptr &dexClass); + void ProcessDexClassInterfaceParent(const std::unique_ptr &dexClass); + void ProcessDexClassFields(const std::unique_ptr &dexClass); + void ProcessDexClassMethods(const std::unique_ptr &dexClass, bool isVirtual); + void ProcessDexClassMethodDecls(const std::unique_ptr &dexClass, bool isVirtual); + void ProcessDexClassAnnotationDirectory(const std::unique_ptr &dexClass); + void ProcessDexClassStaticFieldInitValue(const std::unique_ptr &dexClass); +}; +} // namespace bc +} // namespace maple +#endif // MPL_FE_DEX_INPUT_DEX_PARSER_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_pragma.h b/src/hir2mpl/bytecode_input/dex/include/dex_pragma.h new file mode 100644 index 0000000000000000000000000000000000000000..d6d0b00accb6aa96a10c7e41d4e5f597aff557f3 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_pragma.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_BC_INPUT_INCLUDE_DEX_PRAGMA_H +#define HIR2MPL_BC_INPUT_INCLUDE_DEX_PRAGMA_H +#include +#include +#include +#include "bc_pragma.h" +#include "mir_module.h" +#include "mir_const.h" +#include "mempool.h" +#include "dexfile_interface.h" + +namespace maple { +namespace bc { +class DexBCAnnotationElement { + public: + DexBCAnnotationElement(MIRModule &moduleArg, MemPool &mpArg, IDexFile &iDexFileArg, const uint8_t **annotationDataArg) + : module(moduleArg), + mp(mpArg), + iDexFile(iDexFileArg), + annotationData(annotationDataArg) {} + ~DexBCAnnotationElement() { + annotationData = nullptr; + } + + static uint64 GetUVal(const uint8 **data, uint8 len) { + // get value, max 8 bytes, little-endian + uint64 val = 0; + for (uint8 j = 0; j <= len; j++) { + val |= (static_cast(*(*data)++) << (j << 3)); + } + return val; + } + + MIRPragmaElement *EmitPragmaElement() { + return ProcessAnnotationElement(annotationData); + } + + private: + MIRPragmaElement *ProcessAnnotationElement(const uint8 **data); + void ProcessAnnotationEncodedValue(const uint8 **data, MIRPragmaElement &element, MIRConst *&cst); + void ProcessAnnotationEncodedValue(const uint8 **data, MIRPragmaElement &element, PragmaValueType valueType, + uint8 valueArg, MIRConst *&cst); + MIRIntConst *ProcessAnnotationEncodedValueInternalProcessIntValue(const uint8 **data, MIRPragmaElement &element, + uint8 valueArg, MIRType &type); + MIRStr16Const *ProcessAnnotationEncodedValueInternalProcessStringValue(const uint8 **data, MIRPragmaElement &element, + uint8 valueArg); + void ProcessAnnotationEncodedValueInternalProcessTypeValue(const uint8 **data, MIRPragmaElement &element, + uint8 valueArg); + void ProcessAnnotationEncodedValueInternalProcessFieldValue(const uint8 **data, MIRPragmaElement &element, + uint8 valueArg); + void ProcessAnnotationEncodedValueInternalProcessMethodValue(const uint8 **data, MIRPragmaElement &element, + uint8 valueArg); + MIRAggConst *ProcessAnnotationEncodedValueInternalProcessArrayValue(const uint8 **data, MIRPragmaElement &element); + void ProcessAnnotationEncodedValueInternalProcessAnnotationValue(const uint8 **data, MIRPragmaElement &element); + static MIRType *GetTypeFromValueType(PragmaValueType valueType); + + MIRModule &module; + MemPool ∓ + IDexFile &iDexFile; + const uint8_t **annotationData; +}; + +class DexBCAnnotation { + public: + DexBCAnnotation(MIRModule &moduleArg, MemPool &mpArg, IDexFile &iDexFileArg, const IDexAnnotation *iDexAnnotationArg) + : module(moduleArg), + mp(mpArg), + iDexFile(iDexFileArg), + iDexAnnotation(iDexAnnotationArg) {} + ~DexBCAnnotation() { + iDexAnnotation = nullptr; + } + + MIRPragma *EmitPragma(PragmaKind kind, const GStrIdx &pragIdx, int32 paramNum = -1, const TyIdx &tyIdxEx = TyIdx(0)); + + private: + MIRModule &module; + MemPool ∓ + IDexFile &iDexFile; + const IDexAnnotation *iDexAnnotation; +}; + +class DexBCAnnotationSet { + public: + DexBCAnnotationSet(MIRModule &moduleArg, MemPool &mpArg, IDexFile &iDexFileArg, + const IDexAnnotationSet *iDexAnnotationSetArg) + : module(moduleArg), + mp(mpArg), + iDexFile(iDexFileArg), + iDexAnnotationSet(iDexAnnotationSetArg) { + Init(); + } + + ~DexBCAnnotationSet() { + iDexAnnotationSet = nullptr; + annotations.clear(); + } + + size_t GetSize() const { + return annotations.size(); + } + + const DexBCAnnotation &GetAnnotation(size_t idx) const { + return *(annotations[idx]); + } + + void AddAnnotation(std::unique_ptr annotation) { + annotations.push_back(std::move(annotation)); + } + + bool IsValid() const { + return iDexAnnotationSet->IsValid(); + } + + std::vector &EmitPragmas(PragmaKind kind, const GStrIdx &pragIdx, int32 paramNum = -1, + const TyIdx &tyIdxEx = TyIdx(0)); + + private: + void Init(); + MIRModule &module; + MemPool ∓ + IDexFile &iDexFile; + const IDexAnnotationSet *iDexAnnotationSet; + std::vector> annotations; + std::vector pragmas; +}; + +class DexBCAnnotationSetList { + public: + DexBCAnnotationSetList(MIRModule &moduleArg, MemPool &mpArg, IDexFile &iDexFileArg, + const IDexAnnotationSetList *iDexAnnotationSetListArg) + : module(moduleArg), + mp(mpArg), + iDexFile(iDexFileArg), + iDexAnnotationSetList(iDexAnnotationSetListArg) { + Init(); + } + + ~DexBCAnnotationSetList() { + iDexAnnotationSetList = nullptr; + annotationSets.clear(); + } + + std::vector &EmitPragmas(PragmaKind kind, const GStrIdx &pragIdx); + + private: + void Init(); + MIRModule &module; + MemPool ∓ + IDexFile &iDexFile; + const IDexAnnotationSetList *iDexAnnotationSetList; + std::vector> annotationSets; + std::vector pragmas; +}; + +class DexBCFieldAnnotations { + public: + DexBCFieldAnnotations(MIRModule &moduleArg, MemPool &mpArg, IDexFile &iDexFileArg, + const IDexFieldAnnotations *iDexFieldAnnotationsArg) + : module(moduleArg), + mp(mpArg), + iDexFile(iDexFileArg), + iDexFieldAnnotations(iDexFieldAnnotationsArg) { + Init(); + } + + ~DexBCFieldAnnotations() { + iDexFieldAnnotations = nullptr; + } + std::vector &EmitPragmas(); + + private: + void Init(); + MIRModule &module; + MemPool ∓ + IDexFile &iDexFile; + const IDexFieldAnnotations *iDexFieldAnnotations; + std::unique_ptr annotationSet; +}; + +class DexBCMethodAnnotations { + public: + DexBCMethodAnnotations(MIRModule &moduleArg, MemPool &mpArg, IDexFile &iDexFileArg, + const IDexMethodAnnotations *iDexMethodAnnotationsArg) + : module(moduleArg), + mp(mpArg), + iDexFile(iDexFileArg), + iDexMethodAnnotations(iDexMethodAnnotationsArg) { + Init(); + } + + ~DexBCMethodAnnotations() { + iDexMethodAnnotations = nullptr; + } + + std::vector &EmitPragmas(); + + private: + void Init(); + void SetupFuncAttrs(); + void SetupFuncAttrWithPragma(MIRFunction &mirFunc, const MIRPragma &pragma); + MIRFunction *GetMIRFunction(const GStrIdx &nameIdx) const; + MIRModule &module; + MemPool ∓ + IDexFile &iDexFile; + const IDexMethodAnnotations *iDexMethodAnnotations = nullptr; + std::unique_ptr annotationSet; + std::vector *pragmasPtr = nullptr; + GStrIdx methodFullNameStrIdx; + const IDexMethodIdItem *methodID = nullptr; +}; + +class DexBCParameterAnnotations { + public: + DexBCParameterAnnotations(MIRModule &moduleArg, MemPool &mpArg, IDexFile &iDexFileArg, + const IDexParameterAnnotations *iDexParameterAnnotationsArg) + : module(moduleArg), + mp(mpArg), + iDexFile(iDexFileArg), + iDexParameterAnnotations(iDexParameterAnnotationsArg) { + Init(); + } + + virtual ~DexBCParameterAnnotations() = default; + std::vector &EmitPragmas(); + + private: + void Init(); + MIRModule &module; + MemPool ∓ + IDexFile &iDexFile; + const IDexParameterAnnotations *iDexParameterAnnotations; + std::unique_ptr annotationSetList; + std::vector pragmas; +}; + +class DexBCAnnotationsDirectory : public BCAnnotationsDirectory { + public: + DexBCAnnotationsDirectory(MIRModule &moduleArg, MemPool &mpArg, IDexFile &iDexFileArg, + const std::string &classNameArg, + const IDexAnnotationsDirectory *iDexAnnotationsDirectoryArg) + : BCAnnotationsDirectory(moduleArg, mpArg), + iDexFile(iDexFileArg), + className(classNameArg), + iDexAnnotationsDirectory(iDexAnnotationsDirectoryArg) { + Init(); + } + + ~DexBCAnnotationsDirectory() { + iDexAnnotationsDirectory = nullptr; + } + + protected: + std::vector &EmitPragmasImpl() override; + + private: + void Init(); + IDexFile &iDexFile; + std::string className; + const IDexAnnotationsDirectory *iDexAnnotationsDirectory; + std::unique_ptr classAnnotationSet; + std::vector> fieldAnnotationsItems; + std::vector> methodAnnotationsItems; + std::vector> parameterAnnotationsItems; +}; +} // namespace bc +} // namespace maple +#endif // HIR2MPL_BC_INPUT_INCLUDE_DEX_PRAGMA_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_reader.h b/src/hir2mpl/bytecode_input/dex/include/dex_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..eb99cc1bd1baa4c6f467f9630e3ef0b10fadb256 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_reader.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_DEX_INPUT_DEX_READER_H +#define MPL_FE_DEX_INPUT_DEX_READER_H +#include +#include +#include "dexfile_interface.h" +#include "types_def.h" +#include "bc_instruction.h" +#include "bc_class.h" +#include "bc_io.h" + +namespace maple { +namespace bc { +typedef std::map>> SrcLocalInfo; + +class DexReader : public BCReader { + public: + DexReader(uint32 fileIdxIn, const std::string &fileNameIn) + : BCReader(fileNameIn), fileIdx(fileIdxIn) {} + ~DexReader() = default; + void SetDexFile(std::unique_ptr dexFile); + uint32 GetClassItemsSize() const; + const char *GetClassJavaSourceFileName(uint32 classIdx) const; + bool IsInterface(uint32 classIdx) const; + uint32 GetClassAccFlag(uint32 classIdx) const; + std::string GetClassName(uint32 classIdx) const; + std::list GetSuperClasses(uint32 classIdx, bool mapled = false) const; + std::vector GetClassInterfaceNames(uint32 classIdx, bool mapled = false) const; + std::string GetClassFieldName(uint32 fieldIdx, bool mapled = false) const; + std::string GetClassFieldTypeName(uint32 fieldIdx, bool mapled = false) const; + std::string GetClassMethodName(uint32 methodIdx, bool mapled = false) const; + std::string GetClassMethodDescName(uint32 methodIdx, bool mapled = false) const; + MapleMap *ResolveInstructions(MapleAllocator &allocator, + const IDexMethodItem* dexMethodItem, + bool mapled = false) const; + std::unique_ptr>> ResolveTryInfos(const IDexMethodItem* dexMethodItem) const; + void ResovleSrcPositionInfo(const IDexMethodItem* dexMethodItem, + std::map &srcPosInfo) const; + std::unique_ptr ResovleSrcLocalInfo(const IDexMethodItem &dexMethodItem) const; + bool ReadAllDepTypeNames(std::unordered_set &depSet); + bool ReadMethodDepTypeNames(std::unordered_set &depSet, + uint32 classIdx, uint32 methodItemx, bool isVirtual) const; + bool ReadAllClassNames(std::unordered_set &classSet) const; + + maple::IDexFile &GetIDexFile() const { + return *iDexFile; + } + std::unordered_map GetDefiningClassNameTypeIdMap() const; // for init + const uint16 *GetMethodInstOffset(const IDexMethodItem* dexMethodItem) const; + uint16 GetClassMethodRegisterTotalSize(const IDexMethodItem* dexMethodItem) const; + uint16 GetClassMethodRegisterInSize(const IDexMethodItem* dexMethodItem) const; + uint32 GetCodeOff(const IDexMethodItem* dexMethodItem) const; + + protected: + bool OpenAndMapImpl() override; + + private: + std::string GetStringFromIdxImpl(uint32 idx) const override; + std::string GetTypeNameFromIdxImpl(uint32 idx) const override; + ClassElem GetClassMethodFromIdxImpl(uint32 idx) const override; + ClassElem GetClassFieldFromIdxImpl(uint32 idx) const override; + std::string GetSignatureImpl(uint32 idx) const override; + uint32 GetFileIndexImpl() const override; + MapleMap *ConstructBCPCInstructionMap( + MapleAllocator &allocator, const std::map> &pcInstMap) const; + BCInstruction *ConstructBCInstruction(MapleAllocator &allocator, + const std::pair> &p) const; + std::unique_ptr>> ConstructBCTryInfoList( + uint32 codeOff, const std::vector &tryItems) const; + std::unique_ptr>> ConstructBCCatchList( + std::vector &catchHandlerItems) const; + uint32 GetVA(const IDexInstruction *inst) const; + uint32 GetVB(const IDexInstruction *inst) const; + uint64 GetWideVB(const IDexInstruction *inst) const; + uint32 GetVC(const IDexInstruction *inst) const; + void GetArgVRegs(const IDexInstruction *inst, MapleList &vRegs) const; + uint32 GetVH(const IDexInstruction *inst) const; + uint8 GetWidth(const IDexInstruction *inst) const; + const char *GetOpName(const IDexInstruction *inst) const; + void ReadMethodTryCatchDepTypeNames(std::unordered_set &depSet, const IDexMethodItem &method) const; + void AddDepTypeName(std::unordered_set &depSet, const std::string &typeName, bool isTrim) const; + + uint32 fileIdx; + std::unique_ptr iDexFile; +}; +} // namespace bc +} // namespace maple +#endif // MPL_FE_DEX_INPUT_DEX_READER_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_strfac.h b/src/hir2mpl/bytecode_input/dex/include/dex_strfac.h new file mode 100644 index 0000000000000000000000000000000000000000..3b069b2c3cce4c2e24dfd63e1b83ce89422b8d11 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_strfac.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_BC_INPUT_DEX_STRFAC_H +#define MPL_FE_BC_INPUT_DEX_STRFAC_H +#include + +namespace maple { +class DexStrFactory { + public: + static std::string GetStringFactoryFuncname(const std::string &funcName); + static bool IsStringInit(const std::string &funcName); +}; +} // namespace maple +#endif // MPL_FE_BC_INPUT_DEX_STRFAC_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_strfac_map.def b/src/hir2mpl/bytecode_input/dex/include/dex_strfac_map.def new file mode 100644 index 0000000000000000000000000000000000000000..0613bd0b291de7d26a8770798ffe962ca8719f60 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_strfac_map.def @@ -0,0 +1,80 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +const char * const strV2 = "Ljava/lang/String;||()V"; +const char * const strAB2 = "Ljava/lang/String;||([B)V"; +const char * const strABI2 = "Ljava/lang/String;||([BI)V"; +const char * const strABII2 = "Ljava/lang/String;||([BII)V"; +const char * const strABIII2 = "Ljava/lang/String;||([BIII)V"; +const char * const strABIIS2 = "Ljava/lang/String;||([BIILjava/lang/String;)V"; +const char * const strABIIT2 = "Ljava/lang/String;||([BIILjava/nio/charset/Charset;)V"; +const char * const strABS2 = "Ljava/lang/String;||([BLjava/lang/String;)V"; +const char * const strABT2 = "Ljava/lang/String;||([BLjava/nio/charset/Charset;)V"; +const char * const strAC2 = "Ljava/lang/String;||([C)V"; +const char * const strACZ2 = "Ljava/lang/String;||([CZ)V"; +const char * const strACII2 = "Ljava/lang/String;||([CII)V"; +const char * const strAIII2 = "Ljava/lang/String;||([III)V"; +const char * const strIIAC2 = "Ljava/lang/String;||(II[C)V"; +const char * const strS2 = "Ljava/lang/String;||(Ljava/lang/String;)V"; +const char * const strF2 = "Ljava/lang/String;||(Ljava/lang/StringBuffer;)V"; +const char * const strD2 = "Ljava/lang/String;||(Ljava/lang/StringBuilder;)V"; +const char * const strFacV2 = "Ljava/lang/StringFactory;|newEmptyString|()Ljava/lang/String;"; +const char * const strFacAB2 = + "Ljava/lang/StringFactory;|newStringFromBytes|([B)Ljava/lang/String;"; +const char * const strFacABI2 = + "Ljava/lang/StringFactory;|newStringFromBytes|([BI)Ljava/lang/String;"; +const char * const strFacABII2 = + "Ljava/lang/StringFactory;|newStringFromBytes|([BII)Ljava/lang/String;"; +const char * const strFacABIII2 = + "Ljava/lang/StringFactory;|newStringFromBytes|([BIII)Ljava/lang/String;"; +const char * const strFacABIIS2 = + "Ljava/lang/StringFactory;|newStringFromBytes|([BIILjava/lang/String;)Ljava/lang/String;"; +const char * const strFacABIIT2 = + "Ljava/lang/StringFactory;|newStringFromBytes|([BIILjava/nio/charset/Charset;)Ljava/lang/String;"; +const char * const strFacABS2 = + "Ljava/lang/StringFactory;|newStringFromBytes|([BLjava/lang/String;)Ljava/lang/String;"; +const char * const strFacABT2 = + "Ljava/lang/StringFactory;|newStringFromBytes|([BLjava/nio/charset/Charset;)Ljava/lang/String;"; +const char * const strFacAC2 = + "Ljava/lang/StringFactory;|newStringFromChars|([C)Ljava/lang/String;"; +const char * const strFacACII2 = + "Ljava/lang/StringFactory;|newStringFromChars|([CII)Ljava/lang/String;"; +const char * const strFacAIII2 = + "Ljava/lang/StringFactory;|newStringFromCodePoints|([III)Ljava/lang/String;"; +const char * const strFacIIAC2 = + "Ljava/lang/StringFactory;|newStringFromChars|(II[C)Ljava/lang/String;"; +const char * const strFacS2 = + "Ljava/lang/StringFactory;|newStringFromString|(Ljava/lang/String;)Ljava/lang/String;"; +const char * const strFacF2 = + "Ljava/lang/StringFactory;|newStringFromStringBuffer|(Ljava/lang/StringBuffer;)Ljava/lang/String;"; +const char * const strFacD2 = + "Ljava/lang/StringFactory;|newStringFromStringBuilder|(Ljava/lang/StringBuilder;)Ljava/lang/String;"; + +STR_STRFAC_MAP2(strV2, strFacV2) +STR_STRFAC_MAP2(strAB2, strFacAB2) +STR_STRFAC_MAP2(strABI2, strFacABI2) +STR_STRFAC_MAP2(strABII2, strFacABII2) +STR_STRFAC_MAP2(strABIII2, strFacABIII2) +STR_STRFAC_MAP2(strABIIS2, strFacABIIS2) +STR_STRFAC_MAP2(strABIIT2, strFacABIIT2) +STR_STRFAC_MAP2(strABS2, strFacABS2) +STR_STRFAC_MAP2(strABT2, strFacABT2) +STR_STRFAC_MAP2(strAC2, strFacAC2) +STR_STRFAC_MAP2(strACZ2, strFacAC2) +STR_STRFAC_MAP2(strACII2, strFacACII2) +STR_STRFAC_MAP2(strAIII2, strFacAIII2) +STR_STRFAC_MAP2(strIIAC2, strFacIIAC2) +STR_STRFAC_MAP2(strS2, strFacS2) +STR_STRFAC_MAP2(strF2, strFacF2) +STR_STRFAC_MAP2(strD2, strFacD2) diff --git a/src/hir2mpl/bytecode_input/dex/include/dex_util.h b/src/hir2mpl/bytecode_input/dex/include/dex_util.h new file mode 100644 index 0000000000000000000000000000000000000000..8c51e01b7e363b06be4c0a8d9a21618fba07d475 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dex_util.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MPL_FE_BC_INPUT_DEX_UTIL_H +#define MPL_FE_BC_INPUT_DEX_UTIL_H +#include +#include "dex_op.h" +#include "bc_instruction.h" +#include "types_def.h" +namespace maple { +namespace bc { +static std::map dexOp2MIROp = { + { kDexOpAddInt, OP_add }, + { kDexOpSubInt, OP_sub }, + { kDexOpMulInt, OP_mul }, + { kDexOpDivInt, OP_div }, + { kDexOpRemInt, OP_rem }, + { kDexOpAndInt, OP_band }, + { kDexOpOrInt, OP_bior }, + { kDexOpXorInt, OP_bxor }, + { kDexOpShlInt, OP_shl }, + { kDexOpShrInt, OP_ashr }, + { kDexOpUshrInt, OP_lshr }, + + { kDexOpAddLong, OP_add }, + { kDexOpSubLong, OP_sub }, + { kDexOpMulLong, OP_mul }, + { kDexOpDivLong, OP_div }, + { kDexOpRemLong, OP_rem }, + { kDexOpAndLong, OP_band }, + { kDexOpOrLong, OP_bior }, + { kDexOpXorLong, OP_bxor }, + { kDexOpShlLong, OP_shl }, + { kDexOpShrLong, OP_ashr }, + { kDexOpUshrLong, OP_lshr }, + + { kDexOpAddFloat, OP_add }, + { kDexOpSubFloat, OP_sub }, + { kDexOpMulFloat, OP_mul }, + { kDexOpDivFloat, OP_div }, + { kDexOpRemFloat, OP_rem }, + { kDexOpAddDouble, OP_add }, + { kDexOpSubDouble, OP_sub }, + { kDexOpMulDouble, OP_mul }, + { kDexOpDivDouble, OP_div }, + { kDexOpRemDouble, OP_rem }, +}; + +static std::map dexOp2Addr2MIROp = { + { kDexOpAddInt2Addr, OP_add }, + { kDexOpSubInt2Addr, OP_sub }, + { kDexOpMulInt2Addr, OP_mul }, + { kDexOpDivInt2Addr, OP_div }, + { kDexOpRemInt2Addr, OP_rem }, + { kDexOpAndInt2Addr, OP_band }, + { kDexOpOrInt2Addr, OP_bior }, + { kDexOpXorInt2Addr, OP_bxor }, + { kDexOpShlInt2Addr, OP_shl }, + { kDexOpShrInt2Addr, OP_ashr }, + { kDexOpUshrInt2Addr, OP_lshr }, + + { kDexOpAddLong2Addr, OP_add }, + { kDexOpSubLong2Addr, OP_sub }, + { kDexOpMulLong2Addr, OP_mul }, + { kDexOpDivLong2Addr, OP_div }, + { kDexOpRemLong2Addr, OP_rem }, + { kDexOpAndLong2Addr, OP_band }, + { kDexOpOrLong2Addr, OP_bior }, + { kDexOpXorLong2Addr, OP_bxor }, + { kDexOpShlLong2Addr, OP_shl }, + { kDexOpShrLong2Addr, OP_ashr }, + { kDexOpUshrLong2Addr, OP_lshr }, + + { kDexOpAddFloat2Addr, OP_add }, + { kDexOpSubFloat2Addr, OP_sub }, + { kDexOpMulFloat2Addr, OP_mul }, + { kDexOpDivFloat2Addr, OP_div }, + { kDexOpRemFloat2Addr, OP_rem }, + { kDexOpAddDouble2Addr, OP_add }, + { kDexOpSubDouble2Addr, OP_sub }, + { kDexOpMulDouble2Addr, OP_mul }, + { kDexOpDivDouble2Addr, OP_div }, + { kDexOpRemDouble2Addr, OP_rem }, +}; + +static std::map dexOpCmp2MIROp = { + { kDexOpCmpLong, OP_cmp }, + { kDexOpCmplFloat, OP_cmpl }, + { kDexOpCmpgFloat, OP_cmpg }, + { kDexOpCmplDouble, OP_cmpl }, + { kDexOpCmpgDouble, OP_cmpg }, +}; + +static std::map dexOpLit2MIROp = { + { kDexOpAddIntLit16, OP_add }, + { kDexOpRsubInt, OP_sub }, + { kDexOpMulIntLit16, OP_mul }, + { kDexOpDivIntLit16, OP_div }, + { kDexOpRemIntLit16, OP_rem }, + { kDexOpAndIntLit16, OP_band }, + { kDexOpOrIntLit16, OP_bior }, + { kDexOpXorIntLit16, OP_bxor }, + + { kDexOpAddIntLit8, OP_add }, + { kDexOpRsubIntLit8, OP_sub }, + { kDexOpMulIntLit8, OP_mul }, + { kDexOpDivIntLit8, OP_div }, + { kDexOpRemIntLit8, OP_rem }, + { kDexOpAndIntLit8, OP_band }, + { kDexOpOrIntLit8, OP_bior }, + { kDexOpXorIntLit8, OP_bxor }, + { kDexOpShlIntLit8, OP_shl }, + { kDexOpShrIntLit8, OP_ashr }, + { kDexOpUshrIntLit8, OP_lshr }, +}; + +static std::map dexOpConditionOp2MIROp = { + { kDexOpIfEq, OP_eq }, + { kDexOpIfNe, OP_ne }, + { kDexOpIfLt, OP_lt }, + { kDexOpIfGe, OP_ge }, + { kDexOpIfGt, OP_gt }, + { kDexOpIfLe, OP_le }, + + { kDexOpIfEqZ, OP_eq }, + { kDexOpIfNeZ, OP_ne }, + { kDexOpIfLtZ, OP_lt }, + { kDexOpIfGeZ, OP_ge }, + { kDexOpIfGtZ, OP_gt }, + { kDexOpIfLeZ, OP_le }, +}; + +static std::map dexOpInvokeOp2MIROp = { + { kDexOpInvokeVirtual, OP_virtualcallassigned }, + { kDexOpInvokeVirtualRange, OP_virtualcallassigned }, + { kDexOpInvokeSuper, OP_superclasscallassigned }, + { kDexOpInvokeSuperRange, OP_superclasscallassigned }, + { kDexOpInvokeDirect, OP_callassigned }, + { kDexOpInvokeDirectRange, OP_callassigned }, + { kDexOpInvokeStatic, OP_callassigned }, + { kDexOpInvokeStaticRange, OP_callassigned }, + { kDexOpInvokeInterface, OP_interfacecallassigned }, + { kDexOpInvokeInterfaceRange, OP_interfacecallassigned }, +}; + +class DEXUtil { + private: + DEXUtil() = default; + ~DEXUtil() = default; +}; +} // namespace bc +} // namespace maple +#endif // MPL_FE_BC_INPUT_DEX_UTIL_H \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/include/dexfile_factory.h b/src/hir2mpl/bytecode_input/dex/include/dexfile_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..9110ab59b64eb6d0cb2f105c9a497bdbc129fa1b --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dexfile_factory.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_DEX_INPUT_INCLUDE_DEXFILE_FACTORY_H +#define HIR2MPL_DEX_INPUT_INCLUDE_DEXFILE_FACTORY_H + +#include +#include "dexfile_interface.h" + +namespace maple { +class DexFileFactory { + public: + std::unique_ptr NewInstance() const; +}; +} // namespace maple + +#endif /* HIR2MPL_DEX_INPUT_INCLUDE_DEXFILE_FACTORY_H_ */ diff --git a/src/hir2mpl/bytecode_input/dex/include/dexfile_interface.h b/src/hir2mpl/bytecode_input/dex/include/dexfile_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..57e9ee0ac2b11b9fc63cea77aa2496cf39f55123 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dexfile_interface.h @@ -0,0 +1,750 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_DEX_INPUT_INCLUDE_DEXFILE_INTERFACE_H +#define HIR2MPL_DEX_INPUT_INCLUDE_DEXFILE_INTERFACE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "namemangler.h" + +namespace maple { +enum ValueType { + kByte = 0x00, + kShort = 0x02, + kChar = 0x03, + kInt = 0x04, + kLong = 0x06, + kFloat = 0x10, + kDouble = 0x11, + kMethodType = 0x15, + kMethodHandle = 0x16, + kString = 0x17, + kType = 0x18, + kField = 0x19, + kMethod = 0x1a, + kEnum = 0x1b, + kArray = 0x1c, + kAnnotation = 0x1d, + kNull = 0x1e, + kBoolean = 0x1f +}; +const uint16_t kIDexNumPackedOpcodes = 0x100; +const uint16_t kIDexPackedSwitchSignature = 0x100; +const uint16_t kIDexSparseSwitchSignature = 0x200; + +// opcode in dex file +enum IDexOpcode : uint8_t { + kOpNop = 0x00, + kOpMove = 0x01, + kOpMoveFrom16 = 0x02, + kOpMove16 = 0x03, + kOpMoveWide = 0x04, + kOpMoveWideFrom16 = 0x05, + kOpMoveWide16 = 0x06, + kOpMoveObject = 0x07, + kOpMoveObjectFrom16 = 0x08, + kOpMoveObject16 = 0x09, + kOpMoveResult = 0x0a, + kOpMoveResultWide = 0x0b, + kOpMoveResultObject = 0x0c, + kOpMoveException = 0x0d, + kOpReturnVoid = 0x0e, + kOpReturn = 0x0f, + kOpReturnWide = 0x10, + kOpReturnObject = 0x11, + kOpConst4 = 0x12, + kOpConst16 = 0x13, + kOpConst = 0x14, + kOpConstHigh16 = 0x15, + kOpConstWide16 = 0x16, + kOpConstWide32 = 0x17, + kOpConstWide = 0x18, + kOpConstWideHigh16 = 0x19, + kOpConstString = 0x1a, + kOpConstStringJumbo = 0x1b, + kOpConstClass = 0x1c, + kOpMonitorEnter = 0x1d, + kOpMonitorExit = 0x1e, + kOpCheckCast = 0x1f, + kOpInstanceOf = 0x20, + kOpArrayLength = 0x21, + kOpNewInstance = 0x22, + kOpNewArray = 0x23, + kOpFilledNewArray = 0x24, + kOpFilledNewArrayRange = 0x25, + kOpFillArrayData = 0x26, + kOpThrow = 0x27, + kOpGoto = 0x28, + kOpGoto16 = 0x29, + kOpGoto32 = 0x2a, + kOpPackedSwitch = 0x2b, + kOpSparseSwitch = 0x2c, + kOpCmplFloat = 0x2d, + kOpCmpgFloat = 0x2e, + kOpCmplDouble = 0x2f, + kOpCmpgDouble = 0x30, + kOpCmpLong = 0x31, + kOpIfEq = 0x32, + kOpIfNe = 0x33, + kOpIfLt = 0x34, + kOpIfGe = 0x35, + kOpIfGt = 0x36, + kOpIfLe = 0x37, + kOpIfEqz = 0x38, + kOpIfNez = 0x39, + kOpIfLtz = 0x3a, + kOpIfGez = 0x3b, + kOpIfGtz = 0x3c, + kOpIfLez = 0x3d, + kOpUnused3E = 0x3e, + kOpUnused3F = 0x3f, + kOpUnused40 = 0x40, + kOpUnused41 = 0x41, + kOpUnused42 = 0x42, + kOpUnused43 = 0x43, + kOpAget = 0x44, + kOpAgetWide = 0x45, + kOpAgetObject = 0x46, + kOpAgetBoolean = 0x47, + kOpAgetByte = 0x48, + kOpAgetChar = 0x49, + kOpAgetShort = 0x4a, + kOpAput = 0x4b, + kOpAputWide = 0x4c, + kOpAputObject = 0x4d, + kOpAputBoolean = 0x4e, + kOpAputByte = 0x4f, + kOpAputChar = 0x50, + kOpAputShort = 0x51, + kOpIget = 0x52, + kOpIgetWide = 0x53, + kOpIgetObject = 0x54, + kOpIgetBoolean = 0x55, + kOpIgetByte = 0x56, + kOpIgetChar = 0x57, + kOpIgetShort = 0x58, + kOpIput = 0x59, + kOpIputWide = 0x5a, + kOpIputObject = 0x5b, + kOpIputBoolean = 0x5c, + kOpIputByte = 0x5d, + kOpIputChar = 0x5e, + kOpIputShort = 0x5f, + kOpSget = 0x60, + kOpSgetWide = 0x61, + kOpSgetObject = 0x62, + kOpSgetBoolean = 0x63, + kOpSgetByte = 0x64, + kOpSgetChar = 0x65, + kOpSgetShort = 0x66, + kOpSput = 0x67, + kOpSputWide = 0x68, + kOpSputObject = 0x69, + kOpSputBoolean = 0x6a, + kOpSputByte = 0x6b, + kOpSputChar = 0x6c, + kOpSputShort = 0x6d, + kOpInvokeVirtual = 0x6e, + kOpInvokeSuper = 0x6f, + kOpInvokeDirect = 0x70, + kOpInvokeStatic = 0x71, + kOpInvokeInterface = 0x72, + kOpUnused73 = 0x73, + kOpInvokeVirtualRange = 0x74, + kOpInvokeSuperRange = 0x75, + kOpInvokeDirectRange = 0x76, + kOpInvokeStaticRange = 0x77, + kOpInvokeInterfaceRange = 0x78, + kOpUnused79 = 0x79, + kOpUnused7A = 0x7a, + kOpNegInt = 0x7b, + kOpNotInt = 0x7c, + kOpNegLong = 0x7d, + kOpNotLong = 0x7e, + kOpNegFloat = 0x7f, + kOpNegDouble = 0x80, + kOpIntToLong = 0x81, + kOpIntToFloat = 0x82, + kOpIntToDouble = 0x83, + kOpLongToInt = 0x84, + kOpLongToFloat = 0x85, + kOpLongToDouble = 0x86, + kOpFloatToInt = 0x87, + kOpFloatToLong = 0x88, + kOpFloatToDouble = 0x89, + kOpDoubleToInt = 0x8a, + kOpDoubleToLong = 0x8b, + kOpDoubleToFloat = 0x8c, + kOpIntToByte = 0x8d, + kOpIntToChar = 0x8e, + kOpIntToShort = 0x8f, + kOpAddInt = 0x90, + kOpSubInt = 0x91, + kOpMulInt = 0x92, + kOpDivInt = 0x93, + kOpRemInt = 0x94, + kOpAndInt = 0x95, + kOpOrInt = 0x96, + kOpXorInt = 0x97, + kOpShlInt = 0x98, + kOpShrInt = 0x99, + kOpUshrInt = 0x9a, + kOpAddLong = 0x9b, + kOpSubLong = 0x9c, + kOpMulLong = 0x9d, + kOpDivLong = 0x9e, + kOpRemLong = 0x9f, + kOpAndLong = 0xa0, + kOpOrLong = 0xa1, + kOpXorLong = 0xa2, + kOpShlLong = 0xa3, + kOpShrLong = 0xa4, + kOpUshrLong = 0xa5, + kOpAddFloat = 0xa6, + kOpSubFloat = 0xa7, + kOpMulFloat = 0xa8, + kOpDivFloat = 0xa9, + kOpRemFloat = 0xaa, + kOpAddDouble = 0xab, + kOpSubDouble = 0xac, + kOpMulDouble = 0xad, + kOpDivDouble = 0xae, + kOpRemDouble = 0xaf, + kOpAddInt2Addr = 0xb0, + kOpSubInt2Addr = 0xb1, + kOpMulInt2Addr = 0xb2, + kOpDivInt2Addr = 0xb3, + kOpRemInt2Addr = 0xb4, + kOpAndInt2Addr = 0xb5, + kOpOrInt2Addr = 0xb6, + kOpXorInt2Addr = 0xb7, + kOpShlInt2Addr = 0xb8, + kOpShrInt2Addr = 0xb9, + kOpUshrInt2Addr = 0xba, + kOpAddLong2Addr = 0xbb, + kOpSubLong2Addr = 0xbc, + kOpMulLong2Addr = 0xbd, + kOpDivLong2Addr = 0xbe, + kOpRemLong2Addr = 0xbf, + kOpAndLong2Addr = 0xc0, + kOpOrLong2Addr = 0xc1, + kOpXorLong2Addr = 0xc2, + kOpShlLong2Addr = 0xc3, + kOpShrLong2Addr = 0xc4, + kOpUshrLong2Addr = 0xc5, + kOpAddFloat2Addr = 0xc6, + kOpSubFloat2Addr = 0xc7, + kOpMulFloat2Addr = 0xc8, + kOpDivFloat2Addr = 0xc9, + kOpRemFloat2Addr = 0xca, + kOpAddDouble2Addr = 0xcb, + kOpSubDouble2Addr = 0xcc, + kOpMulDouble2Addr = 0xcd, + kOpDivDouble2Addr = 0xce, + kOpRemDouble2Addr = 0xcf, + kOpAddIntLit16 = 0xd0, + kOpRsubInt = 0xd1, + kOpMulIntLit16 = 0xd2, + kOpDivIntLit16 = 0xd3, + kOpRemIntLit16 = 0xd4, + kOpAndIntLit16 = 0xd5, + kOpOrIntLit16 = 0xd6, + kOpXorIntLit16 = 0xd7, + kOpAddIntLit8 = 0xd8, + kOpRsubIntLit8 = 0xd9, + kOpMulIntLit8 = 0xda, + kOpDivIntLit8 = 0xdb, + kOpRemIntLit8 = 0xdc, + kOpAndIntLit8 = 0xdd, + kOpOrIntLit8 = 0xde, + kOpXorIntLit8 = 0xdf, + kOpShlIntLit8 = 0xe0, + kOpShrIntLit8 = 0xe1, + kOpUshrIntLit8 = 0xe2, + kOpIgetVolatile = 0xe3, + kOpIputVolatile = 0xe4, + kOpSgetVolatile = 0xe5, + kOpSputVolatile = 0xe6, + kOpIgetObjectVolatile = 0xe7, + kOpIgetWideVolatile = 0xe8, + kOpIputWideVolatile = 0xe9, + kOpSgetWideVolatile = 0xea, + kOpSputWideVolatile = 0xeb, + kOpBreakpoint = 0xec, + kOpThrowVerificationError = 0xed, + kOpExecuteInline = 0xee, + kOpExecuteInlineRange = 0xef, + kOpInvokeObjectInitRange = 0xf0, + kOpReturnVoidBarrier = 0xf1, + kOpIgetQuick = 0xf2, + kOpIgetWideQuick = 0xf3, + kOpIgetObjectQuick = 0xf4, + kOpIputQuick = 0xf5, + kOpIputWideQuick = 0xf6, + kOpIputObjectQuick = 0xf7, + kOpInvokeVirtualQuick = 0xf8, + kOpInvokeVirtualQuickRange = 0xf9, + kOpInvokePolymorphic = 0xfa, + kOpInvokePolymorphicRange = 0xfb, + kOpInvokeCustom = 0xfc, + kOpInvokeCustomRange = 0xfd, + kOpSputObjectVolatile = 0xfe, + kOpUnusedFF = 0xff +}; + +enum IDexInstructionIndexType : uint8_t { + kIDexIndexUnknown = 0, + kIDexIndexNone, // has no index + kIDexIndexTypeRef, // type reference index + kIDexIndexStringRef, // string reference index + kIDexIndexMethodRef, // method reference index + kIDexIndexFieldRef, // field reference index + kIDexIndexFieldOffset, // field offset (for static linked fields) + kIDexIndexVtableOffset, // vtable offset (for static linked methods) + kIDexIndexMethodAndProtoRef, // method index and proto indexv + kIDexIndexCallSiteRef, // call site index + kIDexIndexInlineMethod, // inline method index (for inline linked methods) + kIDexIndexVaries // "It depends." Used for throw-verification-error +}; + +enum IDexInstructionFormat : uint8_t { + kIDexFmt10x, // op + kIDexFmt12x, // op vA, vB + kIDexFmt11n, // op vA, #+B + kIDexFmt11x, // op vAA + kIDexFmt10t, // op +AA + kIDexFmt20t, // op +AAAA + kIDexFmt22x, // op vAA, vBBBB + kIDexFmt21t, // op vAA, +BBBB + kIDexFmt21s, // op vAA, #+BBBB + kIDexFmt21h, // op vAA, #+BBBB00000[00000000] + kIDexFmt21c, // op vAA, thing@BBBB + kIDexFmt23x, // op vAA, vBB, vCC + kIDexFmt22b, // op vAA, vBB, #+CC + kIDexFmt22t, // op vA, vB, +CCCC + kIDexFmt22s, // op vA, vB, #+CCCC + kIDexFmt22c, // op vA, vB, thing@CCCC + kIDexFmt32x, // op vAAAA, vBBBB + kIDexFmt30t, // op +AAAAAAAA + kIDexFmt31t, // op vAA, +BBBBBBBB + kIDexFmt31i, // op vAA, #+BBBBBBBB + kIDexFmt31c, // op vAA, string@BBBBBBBB + kIDexFmt35c, // op {vC,vD,vE,vF,vG}, thing@BBBB + kIDexFmt3rc, // op {vCCCC .. v(CCCC+AA-1)}, thing@BBBB + kIDexFmt45cc, // op {vC, vD, vE, vF, vG}, meth@BBBB, proto@HHHH + kIDexFmt4rcc, // op {VCCCC .. v(CCCC+AA-1)}, meth@BBBB, proto@HHHH + kIDexFmt51l // op vAA, #+BBBBBBBBBBBBBBBB +}; + +class IDexFile; + +class IDexInstruction { + public: + IDexInstruction() = default; + virtual ~IDexInstruction() = default; + virtual IDexOpcode GetOpcode() const = 0; + virtual const char *GetOpcodeName() const = 0; + virtual uint32_t GetVRegA() const = 0; + virtual uint32_t GetVRegB() const = 0; + virtual uint64_t GetVRegBWide() const = 0; + virtual uint32_t GetVRegC() const = 0; + virtual uint32_t GetVRegH() const = 0; + virtual uint32_t GetArg(uint32_t index) const = 0; + virtual bool HasVRegA() const = 0; + virtual bool HasVRegB() const = 0; + virtual bool HasWideVRegB() const = 0; + virtual bool HasVRegC() const = 0; + virtual bool HasVRegH() const = 0; + virtual bool HasArgs() const = 0; + virtual IDexInstructionIndexType GetIndexType() const = 0; + virtual IDexInstructionFormat GetFormat() const = 0; + virtual size_t GetWidth() const = 0; + virtual void SetOpcode(IDexOpcode opcode) = 0; + virtual void SetVRegB(uint32_t vRegB) = 0; + virtual void SetVRegBWide(uint64_t vRegBWide) = 0; + virtual void SetIndexType(IDexInstructionIndexType indexType) = 0; +}; + +class IDexCatchHandlerItem { + public: + IDexCatchHandlerItem(uint32_t typeIdx, uint32_t address) : typeIdx(typeIdx), address(address) {} + ~IDexCatchHandlerItem() = default; + IDexCatchHandlerItem(const IDexCatchHandlerItem &item) = default; + IDexCatchHandlerItem &operator=(const IDexCatchHandlerItem&) = default; + uint32_t GetHandlerTypeIdx() const; + uint32_t GetHandlerAddress() const; + bool IsCatchAllHandlerType() const; + + private: + uint32_t typeIdx; // input parameter + uint32_t address; // input parameter +}; + +class IDexTryItem { + public: + static const IDexTryItem *GetInstance(const void *item); + uint32_t GetStartAddr() const; + uint32_t GetEndAddr() const; + void GetCatchHandlerItems(const IDexFile &dexFile, uint32_t codeOff, + std::vector &items) const; + + private: + IDexTryItem() = default; + virtual ~IDexTryItem() = default; +}; + +class IDexProtoIdItem { + public: + static const IDexProtoIdItem *GetInstance(const IDexFile &dexFile, uint32_t index); + const char *GetReturnTypeName(const IDexFile &dexFile) const; + uint32_t GetParameterTypeSize(const IDexFile &dexFile) const; + void GetParameterTypeIndexes(const IDexFile &dexFile, std::vector &indexes) const; + std::string GetDescriptor(const IDexFile &dexFile) const; + const char *GetShorty(const IDexFile &dexFile) const; + + private: + IDexProtoIdItem() = default; + virtual ~IDexProtoIdItem() = default; +}; + +class IDexMethodIdItem { + public: + static const IDexMethodIdItem *GetInstance(const IDexFile &dexFile, uint32_t index); + uint32_t GetClassIdx() const; + const char *GetDefiningClassName(const IDexFile &dexFile) const; + uint16_t GetProtoIdx() const; + const IDexProtoIdItem *GetProtoIdItem(const IDexFile &dexFile) const; + uint32_t GetNameIdx() const; + const char *GetShortMethodName(const IDexFile &dexFile) const; + std::string GetDescriptor(const IDexFile &dexFile) const; + + private: + IDexMethodIdItem() = default; + virtual ~IDexMethodIdItem() = default; +}; + +class IDexMethodItem { + public: + IDexMethodItem(uint32_t methodIdx, uint32_t accessFlags, uint32_t codeOff); + uint32_t GetMethodCodeOff(const IDexMethodItem *item) const; + virtual ~IDexMethodItem() = default; + uint32_t GetMethodIdx() const; + const IDexMethodIdItem *GetMethodIdItem(const IDexFile &dexFile) const; + uint32_t GetAccessFlags() const; + bool HasCode(const IDexFile &dexFile) const; + uint16_t GetRegistersSize(const IDexFile &dexFile) const; + uint16_t GetInsSize(const IDexFile &dexFile) const; + uint16_t GetOutsSize(const IDexFile &dexFile) const; + uint16_t GetTriesSize(const IDexFile &dexFile) const; + uint32_t GetInsnsSize(const IDexFile &dexFile) const; + const uint16_t *GetInsnsByOffset(const IDexFile &dexFile, uint32_t offset) const; + uint32_t GetCodeOff() const; + bool IsStatic() const; + void GetPCInstructionMap(const IDexFile &dexFile, + std::map> &pcInstructionMap) const; + void GetTryItems(const IDexFile &dexFile, std::vector &tryItems) const; + void GetSrcPositionInfo(const IDexFile &dexFile, std::map &srcPosInfo) const; + void GetSrcLocalInfo(const IDexFile &dexFile, + std::map>> &srcLocal) const; + + private: + uint32_t methodIdx; // input parameter + uint32_t accessFlags; // input parameter + uint32_t codeOff; // input parameter +}; + +class IDexFieldIdItem { + public: + static const IDexFieldIdItem *GetInstance(const IDexFile &dexFile, uint32_t index); + uint32_t GetClassIdx() const; + const char *GetDefiningClassName(const IDexFile &dexFile) const; + uint32_t GetTypeIdx() const; + const char *GetFieldTypeName(const IDexFile &dexFile) const; + uint32_t GetNameIdx() const; + const char *GetShortFieldName(const IDexFile &dexFile) const; + + private: + IDexFieldIdItem() = default; + virtual ~IDexFieldIdItem() = default; +}; + +class IDexFieldItem { + public: + IDexFieldItem(uint32_t fieldIndex, uint32_t accessFlags); + virtual ~IDexFieldItem() = default; + uint32_t GetFieldIdx() const; + const IDexFieldIdItem *GetFieldIdItem(const IDexFile &dexFile) const; + uint32_t GetAccessFlags() const; + + private: + uint32_t fieldIndex; // input parameter + uint32_t accessFlags; // input parameter +}; + +class IDexAnnotation { + public: + static const IDexAnnotation *GetInstance(const void *data); + uint8_t GetVisibility() const; + const uint8_t *GetAnnotationData() const; + + private: + IDexAnnotation() = default; + virtual ~IDexAnnotation() = default; +}; + +class IDexAnnotationSet { + public: + static const IDexAnnotationSet *GetInstance(const void *data); + void GetAnnotations(const IDexFile &dexFile, std::vector &item) const; + bool IsValid() const; + + private: + IDexAnnotationSet() = default; + virtual ~IDexAnnotationSet() = default; +}; + +class IDexAnnotationSetList { + public: + static const IDexAnnotationSetList *GetInstance(const void *data); + void GetAnnotationSets(const IDexFile &dexFile, std::vector &items) const; + + private: + IDexAnnotationSetList() = default; + virtual ~IDexAnnotationSetList() = default; +}; + +class IDexFieldAnnotations { + public: + static const IDexFieldAnnotations *GetInstance(const void *data); + const IDexFieldIdItem *GetFieldIdItem(const IDexFile &dexFile) const; + const IDexAnnotationSet *GetAnnotationSet(const IDexFile &dexFile) const; + uint32_t GetFieldIdx() const; + + private: + IDexFieldAnnotations() = default; + virtual ~IDexFieldAnnotations() = default; +}; + +class IDexMethodAnnotations { + public: + static const IDexMethodAnnotations *GetInstance(const void *data); + const IDexMethodIdItem *GetMethodIdItem(const IDexFile &dexFile) const; + const IDexAnnotationSet *GetAnnotationSet(const IDexFile &dexFile) const; + uint32_t GetMethodIdx() const; + + private: + IDexMethodAnnotations() = default; + virtual ~IDexMethodAnnotations() = default; +}; + +class IDexParameterAnnotations { + public: + static const IDexParameterAnnotations *GetInstance(const void *data); + const IDexMethodIdItem *GetMethodIdItem(const IDexFile &dexFile) const; + const IDexAnnotationSetList *GetAnnotationSetList(const IDexFile &dexFile) const; + uint32_t GetMethodIdx() const; + + private: + IDexParameterAnnotations() = default; + virtual ~IDexParameterAnnotations() = default; +}; + +class IDexAnnotationsDirectory { + public: + static const IDexAnnotationsDirectory *GetInstance(const void *data); + bool HasClassAnnotationSet(const IDexFile &dexFile) const; + bool HasFieldAnnotationsItems(const IDexFile &dexFile) const; + bool HasMethodAnnotationsItems(const IDexFile &dexFile) const; + bool HasParameterAnnotationsItems(const IDexFile &dexFile) const; + const IDexAnnotationSet *GetClassAnnotationSet(const IDexFile &dexFile) const; + void GetFieldAnnotationsItems(const IDexFile &dexFile, + std::vector &items) const; + void GetMethodAnnotationsItems(const IDexFile &dexFile, + std::vector &items) const; + void GetParameterAnnotationsItems(const IDexFile &dexFile, + std::vector &items) const; + + private: + IDexAnnotationsDirectory() = default; + virtual ~IDexAnnotationsDirectory() = default; +}; + +class IDexClassItem { + public: + uint32_t GetClassIdx() const; + const char *GetClassName(const IDexFile &dexFile) const; + uint32_t GetAccessFlags() const; + uint32_t GetSuperclassIdx() const; + const char *GetSuperClassName(const IDexFile &dexFile) const; + uint32_t GetInterfacesOff() const; + void GetInterfaceTypeIndexes(const IDexFile &dexFile, std::vector &indexes) const; + void GetInterfaceNames(const IDexFile &dexFile, std::vector &names) const; + uint32_t GetSourceFileIdx() const; + const char *GetJavaSourceFileName(const IDexFile &dexFile) const; + bool HasAnnotationsDirectory(const IDexFile &dexFile) const; + uint32_t GetAnnotationsOff() const; + const IDexAnnotationsDirectory *GetAnnotationsDirectory(const IDexFile &dexFile) const; + uint32_t GetClassDataOff() const; + std::vector> GetFields(const IDexFile &dexFile) const; + bool HasStaticValuesList() const; + const uint8_t *GetStaticValuesList(const IDexFile &dexFile) const; + bool IsInterface() const; + bool IsSuperclassValid() const; + std::vector> GetMethodsIdxAndFlag(const IDexFile &dexFile, bool isVirtual) const; + std::unique_ptr GetDirectMethod(const IDexFile &dexFile, uint32_t index) const; + std::unique_ptr GetVirtualMethod(const IDexFile &dexFile, uint32_t index) const; + + private: + IDexClassItem() = default; + virtual ~IDexClassItem() = default; +}; + +class IDexHeader { + public: + static const IDexHeader *GetInstance(const IDexFile &dexFile); + uint8_t GetMagic(uint32_t index) const; + uint32_t GetChecksum() const; + std::string GetSignature() const; + uint32_t GetFileSize() const; + uint32_t GetHeaderSize() const; + uint32_t GetEndianTag() const; + uint32_t GetLinkSize() const; + uint32_t GetLinkOff() const; + uint32_t GetMapOff() const; + uint32_t GetStringIdsSize() const; + uint32_t GetStringIdsOff() const; + uint32_t GetTypeIdsSize() const; + uint32_t GetTypeIdsOff() const; + uint32_t GetProtoIdsSize() const; + uint32_t GetProtoIdsOff() const; + uint32_t GetFieldIdsSize() const; + uint32_t GetFieldIdsOff() const; + uint32_t GetMethodIdsSize() const; + uint32_t GetMethodIdsOff() const; + uint32_t GetClassDefsSize() const; + uint32_t GetClassDefsOff() const; + uint32_t GetDataSize() const; + uint32_t GetDataOff() const; + std::string GetDexVesion() const; + + private: + IDexHeader() = default; + virtual ~IDexHeader() = default; +}; + +class IDexMapList { + public: + static const IDexMapList *GetInstance(const void *data); + uint32_t GetSize() const; + uint16_t GetType(uint32_t index) const; + uint32_t GetTypeSize(uint32_t index) const; + + private: + IDexMapList() = default; + virtual ~IDexMapList() = default; +}; + +class ResolvedMethodHandleItem; +class ResolvedMethodType { + public: + static const ResolvedMethodType *GetInstance(const IDexFile &dexFile, const ResolvedMethodHandleItem &item); + static const ResolvedMethodType *GetInstance(const IDexFile &dexFile, uint32_t callSiteId); + static void SignatureTypes(const std::string &mt, std::list &types); + static std::string SignatureReturnType(const std::string &mt); + const std::string GetReturnType(const IDexFile &dexFile) const; + const std::string GetRawType(const IDexFile &dexFile) const; + void GetArgTypes(const IDexFile &dexFile, std::list &types) const; + private: + ResolvedMethodType() = default; + ~ResolvedMethodType() = default; +}; + +class ResolvedMethodHandleItem { + public: + static const ResolvedMethodHandleItem *GetInstance(const IDexFile &dexFile, uint32_t index); + bool IsInstance() const; + bool IsInvoke() const; + const std::string GetDeclaringClass(const IDexFile &dexFile) const; + const std::string GetMember(const IDexFile &dexFile) const; + const std::string GetMemeberProto(const IDexFile &dexFile) const; + const ResolvedMethodType *GetMethodType(const IDexFile &dexFile) const; + const std::string GetInvokeKind() const; + + private: + ResolvedMethodHandleItem() = default; + ~ResolvedMethodHandleItem() = default; +}; + +class ResolvedCallSiteIdItem { + public: + static const ResolvedCallSiteIdItem *GetInstance(const IDexFile &dexFile, uint32_t index); + uint32_t GetDataOff() const; + uint32_t GetMethodHandleIndex(const IDexFile &dexFile) const; + const ResolvedMethodType *GetMethodType(const IDexFile &dexFile) const; + const std::string GetProto(const IDexFile &dexFile) const; + const std::string GetMethodName(const IDexFile &dexFile) const; + void GetLinkArgument(const IDexFile &dexFile, std::list> &args) const; + + private: + ResolvedCallSiteIdItem() = default; + ~ResolvedCallSiteIdItem() = default; +}; + +class IDexFile { + public: + IDexFile() = default; + virtual ~IDexFile() = default; + virtual bool Open(const std::string &fileName) = 0; + virtual const void *GetData() const = 0; + virtual uint32_t GetFileIdx() const = 0; + virtual void SetFileIdx(uint32_t fileIdxArg) = 0; + virtual const uint8_t *GetBaseAddress() const = 0; + virtual const IDexHeader *GetHeader() const = 0; + virtual const IDexMapList *GetMapList() const = 0; + virtual uint32_t GetStringDataOffset(uint32_t index) const = 0; + virtual uint32_t GetTypeDescriptorIndex(uint32_t index) const = 0; + virtual const char *GetStringByIndex(uint32_t index) const = 0; + virtual const char *GetStringByTypeIndex(uint32_t index) const = 0; + virtual const IDexProtoIdItem *GetProtoIdItem(uint32_t index) const = 0; + virtual const IDexFieldIdItem *GetFieldIdItem(uint32_t index) const = 0; + virtual const IDexMethodIdItem *GetMethodIdItem(uint32_t index) const = 0; + virtual uint32_t GetClassItemsSize() const = 0; + virtual const IDexClassItem *GetClassItem(uint32_t index) const = 0; + virtual bool IsNoIndex(uint32_t index) const = 0; + virtual uint32_t GetTypeIdFromName(const std::string &className) const = 0; + virtual uint32_t ReadUnsignedLeb128(const uint8_t **pStream) const = 0; + virtual uint32_t FindClassDefIdx(const std::string &descriptor) const = 0; + virtual std::unordered_map GetDefiningClassNameTypeIdMap() const = 0; + + // ===== DecodeDebug ===== + using DebugNewPositionCallback = int (*)(void *cnxt, uint32_t address, uint32_t lineNum); + using DebugNewLocalCallback = void (*)(void *cnxt, uint16_t reg, uint32_t startAddress, uint32_t endAddress, + const char *name, const char *descriptor, const char *signature); + virtual void DecodeDebugLocalInfo(const IDexMethodItem &method, DebugNewLocalCallback newLocalCb) = 0; + virtual void DecodeDebugPositionInfo(const IDexMethodItem &method, DebugNewPositionCallback newPositionCb) = 0; + + // ===== Call Site ======= + virtual const ResolvedCallSiteIdItem *GetCallSiteIdItem(uint32_t idx) const = 0; + virtual const ResolvedMethodHandleItem *GetMethodHandleItem(uint32_t idx) const = 0; +}; +} // namespace maple +#endif // HIR2MPL_DEX_INPUT_INCLUDE_DEXFILE_INTERFACE_H diff --git a/src/hir2mpl/bytecode_input/dex/include/dexfile_libdexfile.h b/src/hir2mpl/bytecode_input/dex/include/dexfile_libdexfile.h new file mode 100644 index 0000000000000000000000000000000000000000..bbb2a59dd3e8656af312124183bfde7c94dfb7a3 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/dexfile_libdexfile.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_DEX_INPUT_INCLUDE_DEXFILE_LIBDEXFILE_H +#define HIR2MPL_DEX_INPUT_INCLUDE_DEXFILE_LIBDEXFILE_H + +#include "dexfile_interface.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace maple { +class DexInstruction : public IDexInstruction { + public: + explicit DexInstruction(const art::Instruction &artInstruction); + ~DexInstruction() = default; + void Init(); + IDexOpcode GetOpcode() const override; + const char *GetOpcodeName() const override; + uint32_t GetVRegA() const override; + uint32_t GetVRegB() const override; + uint64_t GetVRegBWide() const override; + uint32_t GetVRegC() const override; + uint32_t GetVRegH() const override; + bool HasVRegA() const override; + bool HasVRegB() const override; + bool HasWideVRegB() const override; + bool HasVRegC() const override; + bool HasVRegH() const override; + bool HasArgs() const override; + uint32_t GetArg(uint32_t index) const override; + IDexInstructionIndexType GetIndexType() const override; + IDexInstructionFormat GetFormat() const override; + size_t GetWidth() const override; + void SetOpcode(IDexOpcode opcode) override; + void SetVRegB(uint32_t vRegB) override; + void SetVRegBWide(uint64_t vRegBWide) override; + void SetIndexType(IDexInstructionIndexType indexType) override; + + private: + IDexOpcode opcode; + IDexInstructionIndexType indexType; + uint32_t vA; + uint32_t vB; + uint64_t vBWide; + uint32_t vC; + uint32_t vH; + const art::Instruction *artInstruction = nullptr; // input parameter + uint32_t arg[art::Instruction::kMaxVarArgRegs]; +}; + +class LibDexFile : public IDexFile { + public: + LibDexFile() = default; + LibDexFile(std::unique_ptr artDexFile, + std::unique_ptr contentPtrIn); + ~LibDexFile(); + bool Open(const std::string &fileName) override; + const void *GetData() const override { + return dexFile; + } + + uint32_t GetFileIdx() const override { + return fileIdx; + } + + void SetFileIdx(uint32_t fileIdxArg) override { + fileIdx = fileIdxArg; + } + + const uint8_t *GetBaseAddress() const override; + const IDexHeader *GetHeader() const override; + const IDexMapList *GetMapList() const override; + uint32_t GetStringDataOffset(uint32_t index) const override; + uint32_t GetTypeDescriptorIndex(uint32_t index) const override; + const char *GetStringByIndex(uint32_t index) const override; + const char *GetStringByTypeIndex(uint32_t index) const override; + const IDexProtoIdItem *GetProtoIdItem(uint32_t index) const override; + const IDexFieldIdItem *GetFieldIdItem(uint32_t index) const override; + const IDexMethodIdItem *GetMethodIdItem(uint32_t index) const override; + uint32_t GetClassItemsSize() const override; + const IDexClassItem *GetClassItem(uint32_t index) const override; + bool IsNoIndex(uint32_t index) const override; + uint32_t GetTypeIdFromName(const std::string &className) const override; + uint32_t ReadUnsignedLeb128(const uint8_t **pStream) const override; + uint32_t FindClassDefIdx(const std::string &descriptor) const override; + std::unordered_map GetDefiningClassNameTypeIdMap() const override; + + // ===== DecodeDebug ===== + void DecodeDebugLocalInfo(const IDexMethodItem &method, DebugNewLocalCallback newLocalCb) override; + void DecodeDebugPositionInfo(const IDexMethodItem &method, DebugNewPositionCallback newPositionCb) override; + + // ===== Call Site ======= + const ResolvedCallSiteIdItem *GetCallSiteIdItem(uint32_t idx) const override; + const ResolvedMethodHandleItem *GetMethodHandleItem(uint32_t idx) const override; + + private: + void DebugNewLocalCb(void *context, const art::DexFile::LocalInfo &entry) const; + bool DebugNewPositionCb(void *context, const art::DexFile::PositionInfo &entry) const; + bool CheckFileSize(size_t fileSize); + uint32_t fileIdx = 0; + // use for save opened content when load on-demand type from opened dexfile + std::unique_ptr contentPtr; + const art::DexFile *dexFile = nullptr; + const IDexHeader *header = nullptr; + const IDexMapList *mapList = nullptr; + DebugNewLocalCallback debugNewLocalCb = nullptr; + DebugNewPositionCallback debugNewPositionCb = nullptr; + std::string content; + std::vector> dexFiles; +}; +} // namespace maple +#endif // HIR2MPL_DEX_INPUT_INCLUDE_DEXFILE_LIBDEXFILE_H diff --git a/src/hir2mpl/bytecode_input/dex/include/string_view_format.h b/src/hir2mpl/bytecode_input/dex/include/string_view_format.h new file mode 100644 index 0000000000000000000000000000000000000000..7aafd7d83cb169d3c0fc7c7ccb853519aeb729a8 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/include/string_view_format.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef STRING_VIEW_FORMAT_H +#define STRING_VIEW_FORMAT_H +#if defined(USE_LLVM) || defined(__ANDROID__) || defined(DARWIN) +#include +using StringView = std::string_view; +#else +#include +using StringView = std::experimental::string_view; +#endif +#endif // STRING_VIEW_FORMAT_H diff --git a/src/hir2mpl/bytecode_input/dex/src/class_linker.cpp b/src/hir2mpl/bytecode_input/dex/src/class_linker.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f613b045766865408ddc2fbe3205fc2752ec12c4 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/class_linker.cpp @@ -0,0 +1,127 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "class_linker.h" +#include "fe_macros.h" + +namespace maple { +ClassLinker::ClassLinker(std::vector> &classPath) { + for (std::unique_ptr& dexFileParser : classPath) { + bootClassPath.push_back(std::move(dexFileParser)); + } +} + +static std::unique_ptr FindInClassPath(const std::string &descriptor, + std::vector> &dexFileParsers) { + for (std::unique_ptr &dexFileParser : dexFileParsers) { + CHECK_NULL_FATAL(dexFileParser); + std::unique_ptr klass = dexFileParser->FindClassDef(descriptor); + if (klass != nullptr) { + return klass; + } + } + return nullptr; +} + +std::unique_ptr ClassLinker::FindInSharedLib(const std::string &className, ClassLoaderInfo &classLoader) { + for (uint32 i = 0; i < classLoader.sharedLibraries.size(); ++i) { + std::unique_ptr result = FindInBaseClassLoader(className, classLoader.sharedLibraries[i]); + if (result) { + return result; + } + } + return nullptr; +} + +std::unique_ptr ClassLinker::FindInClassLoaderClassPath(const std::string &className, + ClassLoaderInfo &classLoader) const { + return FindInClassPath(className, classLoader.hexElements); +} + +std::unique_ptr ClassLinker::FindInBaseClassLoader(const std::string &className, + ClassLoaderInfo *classLoader) { + if (classLoader == nullptr) { + return FindInClassPath(className, bootClassPath); + } + if (classLoader->type == kPathClassLoader || classLoader->type == kInMemoryDexClassLoader) { + // Parent; Shared Libraries; Class loader dex files + std::unique_ptr result = FindInBaseClassLoader(className, classLoader->parent); + if (result != nullptr) { + return result; + } + result = FindInSharedLib(className, *classLoader); + if (result != nullptr) { + return result; + } + return FindInClassLoaderClassPath(className, *classLoader); + } + if (classLoader->type == kDelegateLastClassLoader) { + // Boot class path; Shared class path; Class loader dex files; Parent + std::unique_ptr result = FindInClassPath(className, bootClassPath); + if (result != nullptr) { + return result; + } + result = FindInSharedLib(className, *classLoader); + if (result != nullptr) { + return result; + } + result = FindInClassLoaderClassPath(className, *classLoader); + if (result != nullptr) { + return result; + } + return FindInBaseClassLoader(className, classLoader->parent); + } + return nullptr; +} + +void ClassLinker::LoadSuperAndInterfaces(const std::unique_ptr &klass, ClassLoaderInfo *classLoader, + std::list> &klassList) { + if (klass->GetSuperClassNames().size() > 0) { + FindClass(klass->GetSuperClassNames().front(), classLoader, klassList); + } + for (std::string interfaceName : klass->GetSuperInterfaceNames()) { + FindClass(interfaceName, classLoader, klassList); + } +} + +void ClassLinker::FindClass(const std::string &className, ClassLoaderInfo *classLoader, + std::list> &klassList, bool isDefClass) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(className); + auto it = processedClassName.find(nameIdx); + if (it != processedClassName.end()) { + if (isDefClass) { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "Same class is existed: %s", className.c_str()); + } + return; + } + // Find in bootclasspath + std::unique_ptr klass; + if (classLoader == nullptr) { + klass = FindInClassPath(className, bootClassPath); + } else { // Find in classLoader files + klass = FindInBaseClassLoader(className, classLoader); + } + if (klass != nullptr) { + if (isDefClass) { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "Same class is existed: %s", className.c_str()); + } + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "klassName=%s", klass->GetClassName(false).c_str()); + LoadSuperAndInterfaces(klass, classLoader, klassList); + (void)processedClassName.insert(nameIdx); + klassList.push_back(std::move(klass)); + } else if (!isDefClass) { + WARN(kLncWarn, "Find class failed: %s", className.c_str()); + } +} +} diff --git a/src/hir2mpl/bytecode_input/dex/src/class_loader_context.cpp b/src/hir2mpl/bytecode_input/dex/src/class_loader_context.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c47b6e6d60d3b8d1b15d6d00b83fac6f1b76f623 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/class_loader_context.cpp @@ -0,0 +1,281 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "dexfile_libdexfile.h" +#include "class_loader_context.h" + +namespace maple { +const char ClassLoaderContext::kPathClassLoaderString[] = "PCL"; +const char ClassLoaderContext::kDelegateLastClassLoaderString[] = "DLC"; +const char ClassLoaderContext::kInMemoryDexClassLoaderString[] = "IMC"; +const char ClassLoaderContext::kClassLoaderOpeningMark = '['; +const char ClassLoaderContext::kClassLoaderClosingMark = ']'; +const char ClassLoaderContext::kClassLoaderSharedLibraryOpeningMark = '{'; +const char ClassLoaderContext::kClassLoaderSharedLibraryClosingMark = '}'; +const char ClassLoaderContext::kClassLoaderSharedLibrarySeparator = '#'; +const char ClassLoaderContext::kClassLoaderSeparator = ';'; +const char ClassLoaderContext::kClasspathSeparator = ','; +const char ClassLoaderContext::kSpecialSharedLib[] = "&"; + +size_t ClassLoaderContext::FindMatchingSharedLibraryCloseMarker(const std::string& spec, + size_t sharedLibraryOpenIndex) { + uint32_t counter = 1; + size_t stringIndex = sharedLibraryOpenIndex + 1; + size_t sharedLibraryClose = std::string::npos; + while (counter != 0) { + sharedLibraryClose = spec.find_first_of(kClassLoaderSharedLibraryClosingMark, stringIndex); + size_t sharedLibraryOpen = spec.find_first_of(kClassLoaderSharedLibraryOpeningMark, stringIndex); + if (sharedLibraryClose == std::string::npos) { + break; + } + if ((sharedLibraryOpen == std::string::npos) || + (sharedLibraryClose < sharedLibraryOpen)) { + --counter; + stringIndex = sharedLibraryClose + 1; + } else { + ++counter; + stringIndex = sharedLibraryOpen + 1; + } + } + return sharedLibraryClose; +} + +ClassLoaderType ClassLoaderContext::GetCLType(const std::string& clString) { + if (clString.compare(0, strlen(kPathClassLoaderString), kPathClassLoaderString) == 0) { + return kPathClassLoader; + } else if (clString.compare(0, strlen(kDelegateLastClassLoaderString), kDelegateLastClassLoaderString) == 0) { + return kDelegateLastClassLoader; + } else if (clString.compare(0, strlen(kInMemoryDexClassLoaderString), kInMemoryDexClassLoaderString) == 0) { + return kInMemoryDexClassLoader; + } + return kInvalidClassLoader; +} + +const char* ClassLoaderContext::GetCLTypeName(ClassLoaderType type) const { + switch (type) { + case kPathClassLoader: { + return kPathClassLoaderString; + } + case kDelegateLastClassLoader: { + return kDelegateLastClassLoaderString; + } + case kInMemoryDexClassLoader: { + return kInMemoryDexClassLoaderString; + } + default: { + return nullptr; + } + } +} + +ClassLoaderInfo *ClassLoaderContext::ParseClassLoaderSpec(const std::string &spec) { + ClassLoaderType classLoaderType = GetCLType(spec); + if (classLoaderType == kInvalidClassLoader) { + return nullptr; + } + if (classLoaderType == kInMemoryDexClassLoader) { + return nullptr; + } + const char* classLoaderTypeStr = GetCLTypeName(classLoaderType); + CHECK_FATAL(classLoaderTypeStr != nullptr, "classLoaderTypeStr return a nullptr!"); + size_t typeStrZize = strlen(classLoaderTypeStr); + // Check the opening and closing markers. + if (spec[typeStrZize] != kClassLoaderOpeningMark) { + return nullptr; + } + if ((spec[spec.length() - 1] != kClassLoaderClosingMark) && + (spec[spec.length() - 1] != kClassLoaderSharedLibraryClosingMark)) { + return nullptr; + } + size_t closingIndex = spec.find_first_of(kClassLoaderClosingMark); + std::string classpath = spec.substr(typeStrZize + 1, closingIndex - typeStrZize - 1); + ClassLoaderInfo *info = mp.New(); + info->type = classLoaderType; + if (!OpenDexFiles(classpath, info->hexElements)) { + return nullptr; + }; + if ((spec[spec.length() - 1] == kClassLoaderSharedLibraryClosingMark) && + (spec[spec.length() - 2] != kClassLoaderSharedLibraryOpeningMark)) { + size_t startIndex = spec.find_first_of(kClassLoaderSharedLibraryOpeningMark); + if (startIndex == std::string::npos) { + return nullptr; + } + std::string sharedLibrariesSpec = spec.substr(startIndex + 1, spec.length() - startIndex - 2); + if (!ParseSharedLibraries(sharedLibrariesSpec, *info)) { + return nullptr; + } + } + return info; +} + +bool ClassLoaderContext::ParseSharedLibraries(std::string &sharedLibrariesSpec, ClassLoaderInfo &info) { + size_t cursor = 0; + while (cursor != sharedLibrariesSpec.length()) { + size_t sharedLibrarySeparator = sharedLibrariesSpec.find_first_of(kClassLoaderSharedLibrarySeparator, cursor); + size_t sharedLibraryOpen = sharedLibrariesSpec.find_first_of(kClassLoaderSharedLibraryOpeningMark, cursor); + std::string sharedLibrarySpec; + if (sharedLibrarySeparator == std::string::npos) { + sharedLibrarySpec = sharedLibrariesSpec.substr(cursor, sharedLibrariesSpec.length() - cursor); + cursor = sharedLibrariesSpec.length(); + } else if ((sharedLibraryOpen == std::string::npos) || (sharedLibraryOpen > sharedLibrarySeparator)) { + sharedLibrarySpec = sharedLibrariesSpec.substr(cursor, sharedLibrarySeparator - cursor); + cursor = sharedLibrarySeparator + 1; + } else { + size_t closing_marker = FindMatchingSharedLibraryCloseMarker(sharedLibrariesSpec, sharedLibraryOpen); + if (closing_marker == std::string::npos) { + return false; + } + sharedLibrarySpec = sharedLibrariesSpec.substr(cursor, closing_marker + 1 - cursor); + cursor = closing_marker + 1; + if (cursor != sharedLibrariesSpec.length() && + sharedLibrariesSpec[cursor] == kClassLoaderSharedLibrarySeparator) { + ++cursor; + } + } + ClassLoaderInfo *sharedLibrary = ParseInternal(sharedLibrarySpec); + if (sharedLibrary == nullptr) { + return false; + } + info.sharedLibraries.push_back(sharedLibrary); + } + return true; +} + +ClassLoaderInfo *ClassLoaderContext::ParseInternal(const std::string &spec) { + std::string remaining = spec; + ClassLoaderInfo *first = nullptr; + ClassLoaderInfo *previousIteration = nullptr; + while (!remaining.empty()) { + std::string currentSpec; + size_t classLoaderSeparator = remaining.find_first_of(kClassLoaderSeparator); + size_t sharedLibraryOpen = remaining.find_first_of(kClassLoaderSharedLibraryOpeningMark); + if (classLoaderSeparator == std::string::npos) { + currentSpec = remaining; + remaining = ""; + } else if ((sharedLibraryOpen == std::string::npos) || (sharedLibraryOpen > classLoaderSeparator)) { + currentSpec = remaining.substr(0, classLoaderSeparator); + remaining = remaining.substr(sharedLibraryOpen + 1, remaining.size() - classLoaderSeparator - 1); + } else { + size_t sharedLibraryClose = FindMatchingSharedLibraryCloseMarker(remaining, sharedLibraryOpen); + CHECK_FATAL(sharedLibraryClose != std::string::npos, + "Invalid class loader spec: %s", currentSpec.c_str()); + currentSpec = remaining.substr(0, sharedLibraryClose + 1); + if (remaining.size() == sharedLibraryClose + 1) { + remaining = ""; + } else if ((remaining.size() == sharedLibraryClose + 2) || + (remaining.at(sharedLibraryClose + 1) != kClassLoaderSeparator)) { + CHECK_FATAL(false, "Invalid class loader spec: %s", currentSpec.c_str()); + return nullptr; + } else { + remaining = remaining.substr(sharedLibraryClose + 2, remaining.size() - sharedLibraryClose - 2); + } + } + ClassLoaderInfo *info = ParseClassLoaderSpec(currentSpec); + CHECK_FATAL(info != nullptr, "Invalid class loader spec: %s", currentSpec.c_str()); + if (first == nullptr) { + first = info; + previousIteration = first; + } else { + CHECK_NULL_FATAL(previousIteration); + previousIteration->parent = info; + previousIteration = previousIteration->parent; + } + } + return first; +} + +// Process PCL/DLC string, and open the corresponding byte code file (dex/jar/apk) +bool ClassLoaderContext::Parse(const std::string &spec) { + if (spec.empty()) { + return false; + } + // Output to IFile, used by collision check when trying to load IFile. + if (spec.compare(kSpecialSharedLib) == 0) { + isSpecialSharedLib = true; + return false; + } + loaderChain = ParseInternal(spec); + return loaderChain != nullptr; +} + +// PCL +ClassLoaderContext *ClassLoaderContext::Create(const std::string &spec, MemPool &mp) { + ClassLoaderContext *retv = mp.New(mp); + if (retv->Parse(spec)) { + return retv; + } else { + return nullptr; + } +} + +// compiled File +ClassLoaderInfo *ClassLoaderContext::CreateClassLoader(const std::string &spec) { + ClassLoaderInfo *classLoader = mp.New(); + classLoader->type = kPathClassLoader; + classLoader->parent = loaderChain; + loaderChain = classLoader; + if (!OpenDexFiles(spec, classLoader->hexElements)) { + CHECK_FATAL(false, "Fail to Open Dex Files!"); + }; + return loaderChain; +} + +bool ClassLoaderContext::OpenDexFiles(const std::string &spec, + std::vector> &openedFileParsers) { + if (spec.empty()) { + return false; + } + bool isSuccess = true; + const std::vector &fileNames = FEUtils::Split(spec, kClasspathSeparator); + uint depFileIdx = UINT32_MAX; + for (const std::string &fileName : fileNames) { + const bool verifyChecksum = true; + const bool verify = true; + std::unique_ptr content = std::make_unique(); + // If the file is not a .dex file, the function tries .zip/.jar/.apk files, + // all of which are Zip archives with "classes.dex" inside. + if (!android::base::ReadFileToString(fileName, content.get())) { + WARN(kLncErr, "%s ReadFileToString failed", fileName.c_str()); + isSuccess = false; + continue; + } + // content size must > 0, otherwise previous step return false + const art::DexFileLoader dexFileLoader; + art::DexFileLoaderErrorCode errorCode; + std::string errorMsg; + std::vector> dexFiles; + if (!dexFileLoader.OpenAll(reinterpret_cast(content->data()), content->size(), fileName, verify, + verifyChecksum, &errorCode, &errorMsg, &dexFiles)) { + // Display returned error message to user. Note that this error behavior + // differs from the error messages shown by the original Dalvik dexdump. + WARN(kLncErr, "%s open fialed, errorMsg: %s", fileName.c_str(), errorMsg.c_str()); + isSuccess = false; + continue; + } + for (size_t i = 0; i < dexFiles.size(); ++i) { + std::unique_ptr iDexFile = std::make_unique( + std::move(dexFiles[i]), std::move(content)); + iDexFile->SetFileIdx(depFileIdx); + const std::list &inputClassNames = {}; + const std::string str = fileName + " (classes" + std::to_string(i + 1) + ")"; // mark dependent dexfile name + std::unique_ptr bcParser = std::make_unique(depFileIdx, str, inputClassNames); + bcParser->SetDexFile(std::move(iDexFile)); + openedFileParsers.push_back(std::move(bcParser)); + --depFileIdx; + } + } + return isSuccess; +} +} diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_class.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_class.cpp new file mode 100644 index 0000000000000000000000000000000000000000..90da7c6329d9c43fdb7010e672d783551c4c6d91 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dex_class.cpp @@ -0,0 +1,97 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dex_class.h" +#include "dex_file_util.h" +#include "dex_op.h" +#include "fe_utils_java.h" +namespace maple { +namespace bc { +// ========== DexClassField ========== +bool DexClassField::IsStaticImpl() const { + return accessFlag & DexFileUtil::kDexAccStatic; +} + +uint32 DexClassField::GetItemIdxImpl() const { + return itemIdx; +} + +uint32 DexClassField::GetIdxImpl() const { + return idx; +} + +// ========== DexClassMethod ========== +bool DexClassMethod::IsStaticImpl() const { + return accessFlag & DexFileUtil::kDexAccStatic; +} + +bool DexClassMethod::IsVirtualImpl() const { + return isVirtual; +} + +bool DexClassMethod::IsNativeImpl() const { + return accessFlag & DexFileUtil::kDexAccNative; +} + +bool DexClassMethod::IsInitImpl() const { + return GetName().compare("") == 0; +} + +bool DexClassMethod::IsClinitImpl() const { + return GetName().compare("") == 0; +} + +uint32 DexClassMethod::GetItemIdxImpl() const { + return itemIdx; +} + +uint32 DexClassMethod::GetIdxImpl() const { + return idx; +} + +std::vector> DexClassMethod::GenArgVarListImpl() const { + std::vector> args; + for (const auto ® : argRegs) { + args.emplace_back(reg->GenFEIRVarReg()); + } + return args; +} + +void DexClassMethod::GenArgRegsImpl() { + uint32 regNum = registerTotalSize - registerInsSize; + std::string proto = GetDescription(); + sigTypeNames = FEUtilJava::SolveMethodSignature(proto); + if (!IsClinit() && ((GetAccessFlag() & DexFileUtil::kDexAccStatic) == 0)) { + std::unique_ptr reg = std::make_unique(); + reg->regNum = regNum; + reg->isDef = true; + regNum += 1; + reg->regType = allocator.GetMemPool()->New(allocator, *reg, GetClassNameMplIdx()); + argRegs.emplace_back(std::move(reg)); + } + if (sigTypeNames.size() > 1) { + for (size_t i = 1; i < sigTypeNames.size(); ++i) { + std::unique_ptr reg = std::make_unique(); + reg->regNum = regNum; + reg->isDef = true; + GStrIdx sigTypeNamesIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(sigTypeNames.at(i))); + reg->regType = allocator.GetMemPool()->New(allocator, *reg, sigTypeNamesIdx); + argRegs.emplace_back(std::move(reg)); + regNum += (BCUtil::IsWideType(sigTypeNamesIdx) ? 2 : 1); + } + } +} +} // namespace bc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_class2fe_helper.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_class2fe_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4e0d39b7b75ed1323aeda397023a530ef3ee196b --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dex_class2fe_helper.cpp @@ -0,0 +1,174 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dex_class2fe_helper.h" +#include "dex_file_util.h" +#include "fe_options.h" +#include "mpl_logging.h" +#include "fe_manager.h" +namespace maple { +namespace bc { +// ========== DexClass2FEHelper ========== +DexClass2FEHelper::DexClass2FEHelper(MapleAllocator &allocator, bc::BCClass &klassIn) + : BCClass2FEHelper(allocator, klassIn) {} + +TypeAttrs DexClass2FEHelper::GetStructAttributeFromInputImpl() const { + TypeAttrs attrs; + uint32 klassAccessFlag = klass.GetAccessFlag(); + if (klassAccessFlag & DexFileUtil::kDexAccPublic) { + attrs.SetAttr(ATTR_public); + } + if (klassAccessFlag & DexFileUtil::kDexAccFinal) { + attrs.SetAttr(ATTR_final); + } + if (klassAccessFlag & DexFileUtil::kDexAccAbstract) { + attrs.SetAttr(ATTR_abstract); + } + if (klassAccessFlag & DexFileUtil::kDexAccSynthetic) { + attrs.SetAttr(ATTR_synthetic); + } + if (klassAccessFlag & DexFileUtil::kDexAccAnnotation) { + attrs.SetAttr(ATTR_annotation); + } + if (klassAccessFlag & DexFileUtil::kDexAccEnum) { + attrs.SetAttr(ATTR_enum); + } + return attrs; +} + +void DexClass2FEHelper::InitFieldHelpersImpl() { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mem pool is nullptr"); + for (const std::unique_ptr &field : klass.GetFields()) { + ASSERT(field != nullptr, "field is nullptr"); + BCClassField2FEHelper *fieldHelper = mp->New(allocator, *field); + fieldHelpers.push_back(fieldHelper); + } +} + +void DexClass2FEHelper::InitMethodHelpersImpl() { + MemPool *mp = allocator.GetMemPool(); + ASSERT(mp != nullptr, "mem pool is nullptr"); + for (std::unique_ptr &method : klass.GetMethods()) { + ASSERT(method != nullptr, "method is nullptr"); + BCClassMethod2FEHelper *methodHelper = mp->New(allocator, method); + methodHelpers.push_back(methodHelper); + } +} + +// ========== DexClassField2FEHelper ========== +FieldAttrs DexClassField2FEHelper::AccessFlag2AttributeImpl(uint32 accessFlag) const { + FieldAttrs attrs; + if (accessFlag & DexFileUtil::kDexAccPublic) { + attrs.SetAttr(FLDATTR_public); + } + if (accessFlag & DexFileUtil::kDexAccPrivate) { + attrs.SetAttr(FLDATTR_private); + } + if (accessFlag & DexFileUtil::kDexAccProtected) { + attrs.SetAttr(FLDATTR_protected); + } + if (accessFlag & DexFileUtil::kDexAccStatic) { + attrs.SetAttr(FLDATTR_static); + } + if (accessFlag & DexFileUtil::kDexAccFinal) { + attrs.SetAttr(FLDATTR_final); + } + if (accessFlag & DexFileUtil::kDexAccVolatile) { + attrs.SetAttr(FLDATTR_volatile); + } + if (accessFlag & DexFileUtil::kDexAccTransient) { + attrs.SetAttr(FLDATTR_transient); + } + if (accessFlag & DexFileUtil::kDexAccSynthetic) { + attrs.SetAttr(FLDATTR_synthetic); + } + if (accessFlag & DexFileUtil::kDexAccEnum) { + attrs.SetAttr(FLDATTR_enum); + } + return attrs; +} + +// ========== DexClassMethod2FEHelper ========== +FuncAttrs DexClassMethod2FEHelper::GetAttrsImpl() const { + FuncAttrs attrs; + uint32 accessFlag = method->GetAccessFlag(); + if (accessFlag & DexFileUtil::kDexAccPublic) { + attrs.SetAttr(FUNCATTR_public); + } + if (accessFlag & DexFileUtil::kDexAccPrivate) { + attrs.SetAttr(FUNCATTR_private); + } + if (accessFlag & DexFileUtil::kDexAccProtected) { + attrs.SetAttr(FUNCATTR_protected); + } + if (accessFlag & DexFileUtil::kDexAccStatic) { + attrs.SetAttr(FUNCATTR_static); + } + if (accessFlag & DexFileUtil::kDexAccFinal) { + attrs.SetAttr(FUNCATTR_final); + } + if (accessFlag & DexFileUtil::kAccDexSynchronized) { + attrs.SetAttr(FUNCATTR_synchronized); + } + if (accessFlag & DexFileUtil::kDexAccBridge) { + attrs.SetAttr(FUNCATTR_bridge); + } + if (accessFlag & DexFileUtil::kDexAccVarargs) { + attrs.SetAttr(FUNCATTR_varargs); + } + if (accessFlag & DexFileUtil::kDexAccNative) { + attrs.SetAttr(FUNCATTR_native); + } + if (accessFlag & DexFileUtil::kDexAccAbstract) { + attrs.SetAttr(FUNCATTR_abstract); + } + if (accessFlag & DexFileUtil::kDexAccStrict) { + attrs.SetAttr(FUNCATTR_strict); + } + if (accessFlag & DexFileUtil::kDexAccSynthetic) { + attrs.SetAttr(FUNCATTR_synthetic); + } + if (accessFlag & DexFileUtil::kDexAccConstructor) { + attrs.SetAttr(FUNCATTR_constructor); + } + if (accessFlag & DexFileUtil::kDexDeclaredSynchronized) { + attrs.SetAttr(FUNCATTR_declared_synchronized); + } + if (IsVirtual()) { + attrs.SetAttr(FUNCATTR_virtual); + } + return attrs; +} + +bool DexClassMethod2FEHelper::IsStaticImpl() const { + if (IsClinit()) { + return true; + } + uint32 accessFlag = method->GetAccessFlag(); + if ((accessFlag & DexFileUtil::kDexAccStatic) != 0) { + return true; + } + return false; +} + +bool DexClassMethod2FEHelper::IsClinit() const { + return method->IsClinit(); +} + +bool DexClassMethod2FEHelper::IsInit() const { + return method->IsInit(); +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_encode_value.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_encode_value.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cd0ef7a118b25118d72b6c76b999a7c3af59ff27 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dex_encode_value.cpp @@ -0,0 +1,192 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dex_encode_value.h" +#include "fe_manager.h" + +namespace maple { +namespace bc { +uint64 DexEncodeValue::GetUVal(const uint8 **data, uint8 len) const { + // get value, max 8 bytes, little-endian + uint64 val = 0; + for (uint8 j = 0; j <= len; j++) { + val |= (static_cast(*(*data)++) << (j << 3)); + } + return val; +} + +MIRIntConst *DexEncodeValue::ProcessIntValue(const uint8 **data, uint8 valueArg, MIRType &type) { + // sign extended val + uint64 val = GetUVal(data, valueArg); + uint32 shiftBit = static_cast((7 - valueArg) * 8); // 7 : max shift bit args + CHECK_FATAL(valueArg <= 7, "shiftBit positive check"); + uint64 sVal = (static_cast(val) << shiftBit) >> shiftBit; + MIRIntConst *intCst = mp.New(sVal, type); + return intCst; +} + +MIRStr16Const *DexEncodeValue::ProcessStringValue(const uint8 **data, uint8 valueArg, uint32 &stringID) { + uint64 val = GetUVal(data, valueArg); + stringID = static_cast(val); + std::string str = namemangler::GetOriginalNameLiteral(dexFile.GetStringByIndex(static_cast(val))); + std::u16string str16; + (void)namemangler::UTF8ToUTF16(str16, str); + MIRStr16Const *strCst = mp.New(str16, *GlobalTables::GetTypeTable().GetPtr()); + return strCst; +} + +MIRType *DexEncodeValue::GetTypeFromValueType(uint8 valueType) const { + switch (valueType) { + case kValueBoolean: + return GlobalTables::GetTypeTable().GetInt8(); + case kValueByte: + return GlobalTables::GetTypeTable().GetInt8(); + case kValueShort: + return GlobalTables::GetTypeTable().GetInt16(); + case kValueChar: + return GlobalTables::GetTypeTable().GetUInt16(); + case kValueInt: + return GlobalTables::GetTypeTable().GetInt32(); + case kValueLong: + return GlobalTables::GetTypeTable().GetInt64(); + case kValueFloat: + return GlobalTables::GetTypeTable().GetFloat(); + case kValueDouble: + return GlobalTables::GetTypeTable().GetDouble(); + default: + return GlobalTables::GetTypeTable().GetPtr(); + } +} + +MIRAggConst *DexEncodeValue::ProcessArrayValue(const uint8 **data) { + unsigned arraySize = dexFile.ReadUnsignedLeb128(data); + uint64 dataVal = *(*data); + uint8 newValueType = dataVal & 0x1f; + MIRType *elemType = GetTypeFromValueType(static_cast(newValueType)); + MIRType *arrayTypeWithSize = GlobalTables::GetTypeTable().GetOrCreateArrayType(*elemType, 1, &arraySize); + MIRAggConst *aggCst = mp.New(FEManager::GetModule(), *arrayTypeWithSize); + for (uint32 i = 0; i < arraySize; ++i) { + MIRConst *mirConst = nullptr; + ProcessEncodedValue(data, mirConst); + aggCst->PushBack(mirConst); + } + return aggCst; +} + +void DexEncodeValue::ProcessAnnotationValue(const uint8 **data) { + uint32 annoSize = dexFile.ReadUnsignedLeb128(data); + MIRConst *mirConst = nullptr; + for (uint32 i = 0; i < annoSize; ++i) { + ProcessEncodedValue(data, mirConst); + } +} + +void DexEncodeValue::ProcessEncodedValue(const uint8 **data, MIRConst *&cst) { + uint64 dataVal = *(*data)++; + uint32 noMeaning = 0; + // valueArgs : dataVal >> 5, The higher three bits are valid. + ProcessEncodedValue(data, static_cast(dataVal & 0x1f), static_cast(dataVal >> 5), cst, noMeaning); +} + +void DexEncodeValue::ProcessEncodedValue(const uint8 **data, uint8 valueType, uint8 valueArg, MIRConst *&cst, + uint32 &stringID) { + uint64 val = 0; + cst = mp.New(0, *GlobalTables::GetTypeTable().GetInt32()); + switch (valueType) { + case kValueByte: { + val = GetUVal(data, valueArg); + cst = mp.New(val, *GlobalTables::GetTypeTable().GetInt8()); + break; + } + case kValueShort: { + cst = ProcessIntValue(data, valueArg, *GlobalTables::GetTypeTable().GetInt16()); + break; + } + case kValueChar: { + val = GetUVal(data, valueArg); + cst = mp.New(val, *GlobalTables::GetTypeTable().GetUInt16()); + break; + } + case kValueInt: { + cst = ProcessIntValue(data, valueArg, *GlobalTables::GetTypeTable().GetInt32()); + break; + } + case kValueLong: { + cst = ProcessIntValue(data, valueArg, *GlobalTables::GetTypeTable().GetInt64()); + break; + } + case kValueFloat: { + val = GetUVal(data, valueArg); + // fill 0s for least significant bits + valBuf.u = val << ((3 - valueArg) << 3); + cst = mp.New(valBuf.f, *GlobalTables::GetTypeTable().GetFloat()); + break; + } + case kValueDouble: { + val = GetUVal(data, valueArg); + // fill 0s for least significant bits + valBuf.u = val << ((7 - valueArg) << 3); + cst = mp.New(valBuf.d, *GlobalTables::GetTypeTable().GetDouble()); + break; + } + case kValueString: { + cst = ProcessStringValue(data, valueArg, stringID); + break; + } + case kValueMethodType: { + break; + } + case kValueType: { + (void)GetUVal(data, valueArg); + break; + } + case kValueMethodHandle: { + break; + } + case kValueEnum: + // should fallthrough + [[fallthrough]]; + case kValueField: { + (void)GetUVal(data, valueArg); + break; + } + case kValueMethod: { + (void)GetUVal(data, valueArg); + break; + } + case kValueArray: { + CHECK_FATAL(!valueArg, "value_arg != 0"); + cst = ProcessArrayValue(data); + break; + } + case kValueAnnotation: { + (void)GetUVal(data, valueArg); + break; + } + case kValueNull: { + CHECK_FATAL(!valueArg, "value_arg != 0"); + cst = mp.New(0, *GlobalTables::GetTypeTable().GetInt8()); + break; + } + case kValueBoolean: { + cst = mp.New(valueArg, *GlobalTables::GetTypeTable().GetInt8()); + break; + } + default: { + break; + } + } +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_file_util.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_file_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6d09cc7c2c51f6516b975e6f9274de7b535e60f5 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dex_file_util.cpp @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dex_file_util.h" +namespace maple { +namespace bc { +const uint8 DexFileUtil::kDexFileMagic[] = {0x64, 0x65, 0x78, 0x0a, 0x30, 0x33, 0x39, 0x00}; // dex\n039\0 +const uint8 DexFileUtil::kEndianConstant[] = {0x12, 0x34, 0x56, 0x78}; // 0x12345678 +const uint8 DexFileUtil::kReverseEndianConstant[] = {0x78, 0x56, 0x34, 0x12}; // 0x78563412 + +uint32 DexFileUtil::Adler32(const uint8 *data, uint32 size) { + // May be different with adler32 in libz + uint32 kMod = 65521; + // Calculate A + uint16 a = 1; + for (uint32 i = 0; i < size; ++i) { + a += *(data + i); // truncate the data over 16bit + } + a %= kMod; + + // Caculate B + uint16 item = 1; + uint16 b = 0; + for (uint32 i = 0; i < size; ++i) { + item = item + *(data + i); + b += item; + } + b %= kMod; + + return (static_cast(b) << 16) | static_cast(a); +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_op.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_op.cpp new file mode 100644 index 0000000000000000000000000000000000000000..24e85c957ad5ab3500414ed5e93d2e96bdd1c595 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dex_op.cpp @@ -0,0 +1,1851 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dex_op.h" +#include "dex_file_util.h" +#include "mpl_logging.h" +#include "namemangler.h" +#include "global_tables.h" +#include "bc_class.h" +#include "feir_builder.h" +#include "fe_utils_java.h" +#include "dex_util.h" +#include "feir_type_helper.h" +#include "dex_strfac.h" +#include "fe_options.h" +#include "feir_var_name.h" +#include "ark_annotation_processor.h" + +namespace maple { +namespace bc { +// ========== DexOp ========== +DexOp::DexOp(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : BCInstruction(allocatorIn, pcIn, opcodeIn) {} + +void DexOp::SetVA(uint32 num) { + SetVAImpl(num); +} + +void DexOp::SetVB(uint32 num) { + SetVBImpl(num); +} + +void DexOp::SetWideVB(uint64 num) { + SetWideVBImpl(num); +} + +void DexOp::SetArgs(const MapleList &args) { + SetArgsImpl(args); +} + +void DexOp::SetVC(uint32 num) { + SetVCImpl(num); +} + +void DexOp::SetVH(uint32 num) { + SetVHImpl(num); +} + +std::string DexOp::GetArrayElementTypeFromArrayType(const std::string &typeName) { + CHECK_FATAL(!typeName.empty() && typeName[0] == 'A', "Invalid array type name %s", typeName.c_str()); + return typeName.substr(1); +} + +// ========== DexOpNop ========== +DexOpNop::DexOpNop(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) : DexOp(allocatorIn, pcIn, opcodeIn) {} + +// ========== DexOpMove ========== +DexOpMove::DexOpMove(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpMove::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + GStrIdx typeNameIdx; + if (kDexOpMove <= opcode && opcode <= kDexOpMove16) { + typeNameIdx = BCUtil::GetIntIdx(); + } else if (kDexOpMoveWide <= opcode && opcode <= kDexOpMoveWide16) { + typeNameIdx = BCUtil::GetLongIdx(); + } else if (kDexOpMoveObject <= opcode && opcode <= kDexOpMoveObject16) { + typeNameIdx = BCUtil::GetJavaObjectNameMplIdx(); + } else { + CHECK_FATAL(false, "invalid opcode: %x in `DexOpMove`", opcode); + } + vA.regType = allocator.GetMemPool()->New(allocator, vA, typeNameIdx, true); + defedRegs.emplace_back(&vA); +} + +void DexOpMove::SetVBImpl(uint32 num) { + vB.regNum = num; + vB.regTypeItem = vA.regTypeItem; + usedRegs.emplace_back(&vB); +} + +void DexOpMove::SetRegTypeInTypeInferImpl() { + vB.regType->RegisterRelatedBCRegType(vA.regType); +} + +std::list DexOpMove::EmitToFEIRStmtsImpl() { + std::list stmts; + std::unique_ptr dreadExpr = std::make_unique(vB.GenFEIRVarReg()); + UniqueFEIRStmt stmt = std::make_unique(vA.GenFEIRVarReg(), std::move(dreadExpr)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpMoveResult ========= +DexOpMoveResult::DexOpMoveResult(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) { + isReturn = true; +} + +void DexOpMoveResult::SetVATypeNameIdx(const GStrIdx &idx) { + vA.regType->SetTypeNameIdx(idx); +} + +DexReg DexOpMoveResult::GetVA() const { + return vA; +} + +void DexOpMoveResult::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); +} + +void DexOpMoveResult::SetBCRegTypeImpl(const BCInstruction &inst) { + if ((kDexOpInvokeVirtual <= inst.GetOpcode() && inst.GetOpcode() <= kDexOpInvokeInterface) || + (kDexOpInvokeVirtualRange <= inst.GetOpcode() && inst.GetOpcode() <= kDexOpInvokeInterfaceRange) || + (kDexOpInvokePolymorphic <= inst.GetOpcode() && inst.GetOpcode() <= kDexOpInvokeCustomRange)) { + GStrIdx retTyIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName(static_cast(inst).GetReturnType())); + vA.regType = allocator.GetMemPool()->New(allocator, vA, retTyIdx); + } else if (inst.GetOpcode() == kDexOpFilledNewArray || inst.GetOpcode() == kDexOpFilledNewArrayRange) { + vA.regType = allocator.GetMemPool()->New( + allocator, vA, static_cast(inst).GetReturnType()); + } else { + CHECK_FATAL(false, "the reg of opcode %u should be inserted in move-result", inst.GetOpcode()); + } +} + +// ========== DexOpMoveException ========== +DexOpMoveException::DexOpMoveException(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpMoveException::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); +} + +void DexOpMoveException::SetBCRegTypeImpl(const BCInstruction &inst) { + (void) inst; + GStrIdx exceptionTypeNameIdx = *(catchedExTypeNamesIdx.begin()); + vA.regType = allocator.GetMemPool()->New(allocator, vA, exceptionTypeNameIdx); + // If exception type is primitive type, it should be <* void> + if (vA.GetBasePrimType() != PTY_ref) { + vA.regTypeItem->typeNameIdx = BCUtil::GetJavaThrowableNameMplIdx(); + } +} + +std::list DexOpMoveException::EmitToFEIRStmtsImpl() { + std::list stmts; + std::unique_ptr readExpr = std::make_unique(PTY_ref, -kSregThrownval); + UniqueFEIRStmt stmt = std::make_unique(vA.GenFEIRVarReg(), std::move(readExpr)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpReturn ========== +DexOpReturn::DexOpReturn(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpReturn::SetVAImpl(uint32 num) { + if (opcode == kDexOpReturnVoid) { + isReturnVoid = true; + return; + } + vA.regNum = num; + usedRegs.emplace_back(&vA); +} + +void DexOpReturn::ParseImpl(BCClassMethod &method) { + GStrIdx usedTypeNameIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(method.GetSigTypeNames().at(0))); + vA.regTypeItem = allocator.GetMemPool()->New(usedTypeNameIdx); +} + +std::list DexOpReturn::EmitToFEIRStmtsImpl() { + std::list ans; + if (isReturnVoid) { + UniqueFEIRStmt stmt = std::make_unique(UniqueFEIRExpr(nullptr)); + ans.emplace_back(std::move(stmt)); + } else { + UniqueFEIRVar var = vA.GenFEIRVarReg(); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(var)); + UniqueFEIRStmt stmt = std::make_unique(std::move(expr)); + ans.emplace_back(std::move(stmt)); + } + return ans; +} + +// ========== DexOpConst ========== +DexOpConst::DexOpConst(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpConst::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + vA.regValue = allocator.GetMemPool()->New(); + defedRegs.emplace_back(&vA); +} + +void DexOpConst::SetVBImpl(uint32 num) { + if (opcode != kDexOpConstWide) { + vA.regValue->primValue.raw32 = num; + } + if (!isWide) { + vA.regType = allocator.GetMemPool()->New(allocator, vA, BCUtil::GetIntIdx()); + } +} + +void DexOpConst::SetWideVBImpl(uint64 num) { + if (opcode == kDexOpConstWide) { + vA.regValue->primValue.raw64 = num; + } + if (isWide) { + vA.regType = allocator.GetMemPool()->New(allocator, vA, BCUtil::GetLongIdx()); + } +} + +std::list DexOpConst::EmitToFEIRStmtsImpl() { + std::list ans; + UniqueFEIRExpr expr; + DexReg vATmp; + vATmp.regNum = vA.regNum; + GStrIdx typeNameIdx; + if (isWide) { + typeNameIdx = BCUtil::GetLongIdx(); + } else { + typeNameIdx = BCUtil::GetIntIdx(); + } + vATmp.regType = allocator.GetMemPool()->New(allocator, vATmp, typeNameIdx, true); + switch (opcode) { + case kDexOpConst4: { + expr = FEIRBuilder::CreateExprConstI8(static_cast(vA.regValue->primValue.raw32)); + expr = FEIRBuilder::CreateExprCvtPrim(std::move(expr), PTY_i32); + break; + } + case kDexOpConst16: { + expr = FEIRBuilder::CreateExprConstI16(static_cast(vA.regValue->primValue.raw32)); + expr = FEIRBuilder::CreateExprCvtPrim(std::move(expr), PTY_i32); + break; + } + case kDexOpConst: { + expr = FEIRBuilder::CreateExprConstI32(static_cast(vA.regValue->primValue.raw32)); + break; + } + case kDexOpConstHigh16: { + expr = FEIRBuilder::CreateExprConstI16(static_cast(vA.regValue->primValue.raw32)); + expr = FEIRBuilder::CreateExprCvtPrim(std::move(expr), PTY_i32); + UniqueFEIRExpr exprBit = FEIRBuilder::CreateExprConstI32(16); // 16 means right-zero-extended to 32 bits + expr = FEIRBuilder::CreateExprMathBinary(OP_shl, std::move(expr), std::move(exprBit)); + break; + } + case kDexOpConstWide16: { + expr = FEIRBuilder::CreateExprConstI16(static_cast(vA.regValue->primValue.raw64)); + expr = FEIRBuilder::CreateExprCvtPrim(std::move(expr), PTY_i64); + break; + } + case kDexOpConstWide32: { + expr = FEIRBuilder::CreateExprConstI32(static_cast(vA.regValue->primValue.raw64)); + expr = FEIRBuilder::CreateExprCvtPrim(std::move(expr), PTY_i64); + break; + } + case kDexOpConstWide: { + expr = FEIRBuilder::CreateExprConstI64(static_cast(vA.regValue->primValue.raw64)); + break; + } + case kDexOpConstWideHigh16: { + expr = FEIRBuilder::CreateExprConstI16(static_cast(vA.regValue->primValue.raw64)); + expr = FEIRBuilder::CreateExprCvtPrim(std::move(expr), PTY_i64); + UniqueFEIRExpr exprBit = FEIRBuilder::CreateExprConstI32(48); // 48 means right-zero-extended to 64 bits + expr = FEIRBuilder::CreateExprMathBinary(OP_shl, std::move(expr), std::move(exprBit)); + break; + } + default: { + CHECK_FATAL(false, "Unsupported const-op: %u", opcode); + break; + } + } + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(vATmp.GenFEIRVarReg(), std::move(expr)); + ans.emplace_back(std::move(stmt)); + if (!(*vA.regTypeItem == *vATmp.regTypeItem)) { + stmt = FEIRBuilder::CreateStmtRetype(vA.GenFEIRVarReg(), vATmp.GenFEIRVarReg()); + ans.emplace_back(std::move(stmt)); + } + return ans; +} + +// ========== DexOpConstString ========== +DexOpConstString::DexOpConstString(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn), strValue(allocator.GetMemPool()) {} + +void DexOpConstString::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.regType = allocator.GetMemPool()->New(allocator, vA, BCUtil::GetJavaStringNameMplIdx()); + vA.isDef = true; + defedRegs.emplace_back(&vA); +} + +void DexOpConstString::SetVBImpl(uint32 num) { + vA.dexLitStrIdx = num; +} + +void DexOpConstString::ParseImpl(BCClassMethod &method) { + fileIdx = method.GetBCClass().GetBCParser().GetReader()->GetFileIndex(); + strValue = method.GetBCClass().GetBCParser().GetReader()->GetStringFromIdx(vA.dexLitStrIdx); + if (FEOptions::GetInstance().IsRC() && !method.IsRcPermanent() && + ArkAnnotation::GetInstance().IsPermanent(strValue.c_str())) { + // "Lcom/huawei/ark/annotation/Permanent;" and "Lark/annotation/Permanent;" + // indicates next new-instance / new-array Permanent object + method.SetIsRcPermanent(true); + } +} + +std::list DexOpConstString::EmitToFEIRStmtsImpl() { + std::list stmts; + UniqueFEIRVar dstVar = vA.GenFEIRVarReg(); + UniqueFEIRStmt stmt = std::make_unique(std::move(dstVar), strValue.c_str(), + fileIdx, vA.dexLitStrIdx); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpConstClass ========== +DexOpConstClass::DexOpConstClass(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpConstClass::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + vA.regType = allocator.GetMemPool()->New(allocator, vA, BCUtil::GetJavaClassNameMplIdx()); + defedRegs.emplace_back(&vA); +} + +void DexOpConstClass::SetVBImpl(uint32 num) { + dexTypeIdx = num; +} + +void DexOpConstClass::ParseImpl(BCClassMethod &method) { + const std::string &typeName = method.GetBCClass().GetBCParser().GetReader()->GetTypeNameFromIdx(dexTypeIdx); + const std::string &mplTypeName = namemangler::EncodeName(typeName); + mplTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(mplTypeName); +} + +std::list DexOpConstClass::EmitToFEIRStmtsImpl() { + std::list stmts; + UniqueFEIRVar dstVar = vA.GenFEIRVarReg(); + UniqueFEIRType refType = std::make_unique(BCUtil::GetPrimType(mplTypeNameIdx), mplTypeNameIdx); + UniqueFEIRExpr expr = std::make_unique(std::move(refType), INTRN_JAVA_CONST_CLASS, + std::make_unique(PTY_ref), dexTypeIdx); + UniqueFEIRStmt stmt = std::make_unique(std::move(dstVar), std::move(expr)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpMonitor ========== +DexOpMonitor::DexOpMonitor(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpMonitor::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetJavaObjectNameMplIdx(), true); + usedRegs.emplace_back(&vA); +} + +std::list DexOpMonitor::EmitToFEIRStmtsImpl() { + std::list stmts; + std::list exprs; + UniqueFEIRExpr expr = std::make_unique(vA.GenFEIRVarReg()); + exprs.emplace_back(std::move(expr)); + if (opcode == kDexOpMonitorEnter) { + expr = std::make_unique(static_cast(2), PTY_i32); + exprs.emplace_back(std::move(expr)); + } + UniqueFEIRStmt stmt = + std::make_unique(opcode == kDexOpMonitorEnter ? OP_syncenter : OP_syncexit, std::move(exprs)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpCheckCast ========= +DexOpCheckCast::DexOpCheckCast(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpCheckCast::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetJavaObjectNameMplIdx(), true); + usedRegs.emplace_back(&vA); // use vA to gen vDef + vDef.regNum = num; + vDef.isDef = true; + defedRegs.emplace_back(&vDef); +} + +void DexOpCheckCast::SetVBImpl(uint32 num) { + targetDexTypeIdx = num; +} + +void DexOpCheckCast::ParseImpl(BCClassMethod &method) { + const std::string &typeName = method.GetBCClass().GetBCParser().GetReader()->GetTypeNameFromIdx(targetDexTypeIdx); + targetTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(typeName)); + vDef.regType = allocator.GetMemPool()->New(allocator, vDef, targetTypeNameIdx); +} + +std::list DexOpCheckCast::EmitToFEIRStmtsImpl() { + std::list stmts; + if (vA.GetPrimType() == PTY_i32) { + // only null ptr approach here + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtRetype(vDef.GenFEIRVarReg(), vA.GenFEIRVarReg()); + stmts.emplace_back(std::move(stmt)); + stmt = FEIRBuilder::CreateStmtJavaCheckCast(vDef.GenFEIRVarReg(), vDef.GenFEIRVarReg(), + FEIRBuilder::CreateTypeByJavaName(GlobalTables::GetStrTable().GetStringFromStrIdx(targetTypeNameIdx), true), + targetDexTypeIdx); + stmts.emplace_back(std::move(stmt)); + } else { + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtJavaCheckCast(vDef.GenFEIRVarReg(), vA.GenFEIRVarReg(), + FEIRBuilder::CreateTypeByJavaName(GlobalTables::GetStrTable().GetStringFromStrIdx(targetTypeNameIdx), true), + targetDexTypeIdx); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +// ========== DexOpInstanceOf ========= +DexOpInstanceOf::DexOpInstanceOf(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpInstanceOf::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); + vA.regType = allocator.GetMemPool()->New(allocator, vA, BCUtil::GetBooleanIdx()); +} + +void DexOpInstanceOf::SetVBImpl(uint32 num) { + vB.regNum = num; + vB.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetJavaObjectNameMplIdx(), true); + usedRegs.emplace_back(&vB); +} + +void DexOpInstanceOf::SetVCImpl(uint32 num) { + targetDexTypeIdx = num; +} + +void DexOpInstanceOf::ParseImpl(BCClassMethod &method) { + typeName = method.GetBCClass().GetBCParser().GetReader()->GetTypeNameFromIdx(targetDexTypeIdx); +} + +std::list DexOpInstanceOf::EmitToFEIRStmtsImpl() { + std::list stmts; + const std::string &targetTypeName = namemangler::EncodeName(typeName); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtJavaInstanceOf(vA.GenFEIRVarReg(), vB.GenFEIRVarReg(), + FEIRBuilder::CreateTypeByJavaName(targetTypeName, true), + targetDexTypeIdx); + stmts.emplace_back(std::move(stmt)); + typeName.clear(); + typeName.shrink_to_fit(); + return stmts; +} + +// ========== DexOpArrayLength ========= +DexOpArrayLength::DexOpArrayLength(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpArrayLength::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); + vA.regType = allocator.GetMemPool()->New(allocator, vA, BCUtil::GetIntIdx()); +} + +void DexOpArrayLength::SetVBImpl(uint32 num) { + vB.regNum = num; + vB.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetABooleanIdx(), true); + usedRegs.emplace_back(&vB); +} + +std::list DexOpArrayLength::EmitToFEIRStmtsImpl() { + CHECK_FATAL(BCUtil::IsArrayType(vB.regTypeItem->typeNameIdx), + "Invalid array type: %s in DexOpArrayLength", + GlobalTables::GetStrTable().GetStringFromStrIdx(vB.regTypeItem->typeNameIdx).c_str()); + std::list stmts; + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtArrayLength(vA.GenFEIRVarReg(), vB.GenFEIRVarReg()); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpNewInstance ========= +DexOpNewInstance::DexOpNewInstance(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpNewInstance::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; +} + +void DexOpNewInstance::SetVBImpl(uint32 num) { + vA.dexTypeIdx = num; +} + +void DexOpNewInstance::ParseImpl(BCClassMethod &method) { + const std::string &typeName = method.GetBCClass().GetBCParser().GetReader()->GetTypeNameFromIdx(vA.dexTypeIdx); + // we should register vA in defs, even if it was new-instance java.lang.String. + // Though new-instance java.lang.String would be replaced by StringFactory at following java.lang.String., + // and vA is defed by StringFactory. + // In some cases, vA may be used between new-instance and its invokation. + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(typeName))); + defedRegs.emplace_back(&vA); + if (method.IsRcPermanent()) { + isRcPermanent = true; + method.SetIsRcPermanent(false); + } +} + +std::list DexOpNewInstance::EmitToFEIRStmtsImpl() { + std::list stmts; + if (isSkipNewString) { + return stmts; + } + std::unique_ptr stmtCall = std::make_unique( + INTRN_JAVA_CLINIT_CHECK, std::make_unique(PTY_ref, vA.regTypeItem->typeNameIdx), nullptr, + vA.dexTypeIdx); + stmts.emplace_back(std::move(stmtCall)); + UniqueFEIRExpr exprJavaNewInstance = FEIRBuilder::CreateExprJavaNewInstance( + vA.GenFEIRType(), vA.dexTypeIdx, isRcPermanent); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(vA.GenFEIRVarReg(), std::move(exprJavaNewInstance)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpNewArray ========= +DexOpNewArray::DexOpNewArray(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpNewArray::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); +} + +void DexOpNewArray::SetVBImpl(uint32 num) { + vB.regNum = num; + vB.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetIntIdx()); + usedRegs.emplace_back(&vB); +} + +void DexOpNewArray::SetVCImpl(uint32 num) { + vA.dexTypeIdx = num; +} + +void DexOpNewArray::ParseImpl(BCClassMethod &method) { + const std::string &typeName = method.GetBCClass().GetBCParser().GetReader()->GetTypeNameFromIdx(vA.dexTypeIdx); + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(typeName))); + if (method.IsRcPermanent()) { + isRcPermanent = true; + method.SetIsRcPermanent(false); + } +} + +std::list DexOpNewArray::EmitToFEIRStmtsImpl() { + std::list stmts; + UniqueFEIRExpr exprSize = FEIRBuilder::CreateExprDRead(vB.GenFEIRVarReg()); + UniqueFEIRExpr exprJavaNewArray = FEIRBuilder::CreateExprJavaNewArray(vA.GenFEIRType(), std::move(exprSize), + vA.dexTypeIdx, isRcPermanent); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(vA.GenFEIRVarReg(), std::move(exprJavaNewArray)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpFilledNewArray ========= +DexOpFilledNewArray::DexOpFilledNewArray(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn), argRegs(allocator.Adapter()), vRegs(allocator.Adapter()) { + isRange = (opcode == kDexOpFilledNewArrayRange); +} + +void DexOpFilledNewArray::SetVAImpl(uint32 num) { + argsSize = num; +} + +void DexOpFilledNewArray::SetVBImpl(uint32 num) { + dexArrayTypeIdx = num; +} + +void DexOpFilledNewArray::SetArgsImpl(const MapleList &args) { + argRegs = args; +} + +GStrIdx DexOpFilledNewArray::GetReturnType() const { + return arrayTypeNameIdx; +} + +void DexOpFilledNewArray::ParseImpl(BCClassMethod &method) { + const std::string &typeName = method.GetBCClass().GetBCParser().GetReader()->GetTypeNameFromIdx(dexArrayTypeIdx); + const std::string &typeNameMpl = namemangler::EncodeName(typeName); + arrayTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeNameMpl); + const std::string &elemTypeName = GetArrayElementTypeFromArrayType(typeNameMpl); + elemTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(elemTypeName); + for (uint32 regNum : argRegs) { + DexReg reg; + reg.regNum = regNum; + reg.regTypeItem = allocator.GetMemPool()->New(elemTypeNameIdx); + vRegs.emplace_back(reg); + } + for (auto ® : vRegs) { + usedRegs.emplace_back(®); + } +} + +std::list DexOpFilledNewArray::EmitToFEIRStmtsImpl() { + std::list stmts; + auto exprRegList = std::make_unique>(); + for (auto vReg : vRegs) { + UniqueFEIRExpr exprReg = FEIRBuilder::CreateExprDRead(vReg.GenFEIRVarReg()); + exprRegList->emplace_back(std::move(exprReg)); + } + UniqueFEIRVar var = nullptr; + if (HasReturn()) { + static_cast(returnInst)->SetVATypeNameIdx(arrayTypeNameIdx); + var = static_cast(returnInst)->GetVA().GenFEIRVarReg(); + } else { + return stmts; + } + const std::string &elemName = GlobalTables::GetStrTable().GetStringFromStrIdx(elemTypeNameIdx); + UniqueFEIRType elemType = FEIRTypeHelper::CreateTypeByJavaName(elemName, true, false); + std::unique_ptr stmt = std::make_unique( + INTRN_JAVA_FILL_NEW_ARRAY, std::move(elemType), std::move(var), std::move(exprRegList)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpFillArrayData ========= +DexOpFillArrayData::DexOpFillArrayData(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpFillArrayData::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetVoidIdx(), true); + usedRegs.emplace_back(&vA); +} + +void DexOpFillArrayData::SetVBImpl(uint32 num) { + offset = static_cast(num); // signed value in unsigned data buffer +} + +void DexOpFillArrayData::ParseImpl(BCClassMethod &method) { + const uint16 *data = method.GetInstPos() + pc + offset; + size = *(reinterpret_cast(&data[2])); + arrayData = reinterpret_cast(&data[4]); +} + +std::list DexOpFillArrayData::EmitToFEIRStmtsImpl() { + CHECK_FATAL(BCUtil::IsArrayType(vA.regTypeItem->typeNameIdx), + "Invalid array type: %s in DexOpFillArrayData", + GlobalTables::GetStrTable().GetStringFromStrIdx(vA.regTypeItem->typeNameIdx).c_str()); + std::list stmts; + thread_local static std::stringstream ss(""); + ss.str(""); + ss << "const_array_" << funcNameIdx << "_" << pc; + UniqueFEIRVar arrayReg = vA.GenFEIRVarReg(); + const std::string &arrayTypeName = GlobalTables::GetStrTable().GetStringFromStrIdx(vA.regTypeItem->typeNameIdx); + CHECK_FATAL(arrayTypeName.size() > 1 && arrayTypeName.at(0) == 'A' && + BCUtil::IsJavaPrimitveType(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(arrayTypeName.substr(1))), + "Invalid type %s", arrayTypeName.c_str()); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtJavaFillArrayData(std::move(arrayReg), arrayData, size, ss.str()); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpThrow ========= +DexOpThrow::DexOpThrow(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpThrow::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetJavaThrowableNameMplIdx(), true); + usedRegs.emplace_back(&vA); +} + +std::list DexOpThrow::EmitToFEIRStmtsImpl() { + std::list stmts; + if (BCUtil::IsJavaReferenceType(vA.regTypeItem->typeNameIdx)) { + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(vA.GenFEIRVarReg()); + UniqueFEIRStmt stmt = std::make_unique(OP_throw, std::move(expr)); + stmts.emplace_back(std::move(stmt)); + } else { + CHECK_FATAL(vA.regValue != nullptr && vA.regValue->primValue.raw32 == 0, "only null or ref can be thrown."); + DexReg vAtmp; + vAtmp.regNum = vA.regNum; + vAtmp.regType = allocator.GetMemPool()->New(allocator, vAtmp, BCUtil::GetJavaThrowableNameMplIdx()); + UniqueFEIRStmt retypeStmt = FEIRBuilder::CreateStmtRetype(vAtmp.GenFEIRVarReg(), vA.GenFEIRVarReg()); + stmts.emplace_back(std::move(retypeStmt)); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(vAtmp.GenFEIRVarReg()); + UniqueFEIRStmt stmt = std::make_unique(OP_throw, std::move(expr)); + stmts.emplace_back(std::move(stmt)); + } + return stmts; +} + +// ========== DexOpGoto ========= +DexOpGoto::DexOpGoto(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpGoto::SetVAImpl(uint32 num) { + offset = static_cast(num); +} + +std::vector DexOpGoto::GetTargetsImpl() const { + std::vector res; + res.emplace_back(static_cast(offset + pc)); + return res; +} + +void DexOpGoto::ParseImpl(BCClassMethod &method) { + (void) method; + target = static_cast(offset + pc); +} + +std::list DexOpGoto::EmitToFEIRStmtsImpl() { + std::list stmts; + UniqueFEIRStmt stmt = std::make_unique(funcNameIdx, target); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpSwitch ========= +DexOpSwitch::DexOpSwitch(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn), keyTargetOPpcMap(allocator.Adapter()) { + isPacked = (opcode == kDexOpPackedSwitch); +} + +void DexOpSwitch::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetIntIdx()); + usedRegs.emplace_back(&vA); +} + +void DexOpSwitch::SetVBImpl(uint32 num) { + offset = static_cast(num); +} + +void DexOpSwitch::ParseImpl(BCClassMethod &method) { + const uint16 *data = method.GetInstPos() + pc + offset; + if (isPacked) { + uint16 size = data[1]; + int32 key = *(reinterpret_cast(&data[2])); + const int32 *targets = reinterpret_cast(&data[4]); + for (uint16 i = 0; i < size; i++) { + uint32 target = static_cast(pc + targets[i]); + keyTargetOPpcMap.emplace(key, std::make_pair(target, pc)); + key = key + 1; + } + } else { + uint16 size = data[1]; + const int32 *keys = reinterpret_cast(&data[2]); + const int32 *targets = reinterpret_cast(&data[2 + size * 2]); + for (uint16 i = 0; i < size; i++) { + int32 key = keys[i]; + uint32 target = static_cast(pc + targets[i]); + keyTargetOPpcMap.emplace(key, std::make_pair(target, pc)); + } + } +} + +std::vector DexOpSwitch::GetTargetsImpl() const { + std::vector res; + for (const auto &elem : keyTargetOPpcMap) { + res.emplace_back(elem.second.first); + } + return res; +} + +std::list DexOpSwitch::EmitToFEIRStmtsImpl() { + std::list stmts; + UniqueFEIRExpr expr = std::make_unique(vA.GenFEIRVarReg()); + std::unique_ptr stmt = std::make_unique(funcNameIdx, std::move(expr)); + if (defaultTarget == nullptr) { + stmt->SetDefaultLabelIdx(UINT32_MAX); // no default target, switch is the last instruction of the file + } else { + stmt->SetDefaultLabelIdx(defaultTarget->GetPC()); + } + for (auto &e : keyTargetOPpcMap) { + stmt->AddTarget(e.first, e.second.first); + } + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpCompare ========= +DexOpCompare::DexOpCompare(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} +void DexOpCompare::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); + vA.regType = allocator.GetMemPool()->New(allocator, vA, BCUtil::GetIntIdx()); +} + +void DexOpCompare::SetVBImpl(uint32 num) { + vB.regNum = num; + GStrIdx typeNameIdx; + if (opcode == kDexOpCmplFloat || opcode == kDexOpCmpgFloat) { + typeNameIdx = BCUtil::GetFloatIdx(); + } else if (opcode == kDexOpCmplDouble || opcode == kDexOpCmpgDouble) { + typeNameIdx = BCUtil::GetDoubleIdx(); + } else { + // kDexOpCmpLong + typeNameIdx = BCUtil::GetLongIdx(); + } + vB.regTypeItem = allocator.GetMemPool()->New(typeNameIdx); + usedRegs.emplace_back(&vB); +} + +void DexOpCompare::SetVCImpl(uint32 num) { + vC = vB; + vC.regNum = num; + vC.regTypeItem = vB.regTypeItem; + usedRegs.emplace_back(&vC); +} + +Opcode DexOpCompare::GetOpcodeFromDexIns() const { + auto mirOp = dexOpCmp2MIROp.find(opcode); + CHECK_FATAL(mirOp != dexOpCmp2MIROp.end(), "Invalid opcode: 0x%x in dexOpCmp2MIROp", opcode); + return mirOp->second; +} + +std::list DexOpCompare::EmitToFEIRStmtsImpl() { + std::list stmts; + UniqueFEIRExpr cmpExpr = + FEIRBuilder::CreateExprMathBinary(GetOpcodeFromDexIns(), vB.GenFEIRVarReg(), vC.GenFEIRVarReg()); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(vA.GenFEIRVarReg(), std::move(cmpExpr)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpIfTest ========= +DexOpIfTest::DexOpIfTest(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpIfTest::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetIntIdx(), true); + usedRegs.emplace_back(&vA); +} + +void DexOpIfTest::SetVBImpl(uint32 num) { + vB.regNum = num; + vB.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetIntIdx(), true); + usedRegs.emplace_back(&vB); +} + +void DexOpIfTest::SetVCImpl(uint32 num) { + offset = static_cast(num); +} + +std::vector DexOpIfTest::GetTargetsImpl() const { + std::vector res; + res.emplace_back(static_cast(offset + pc)); + return res; +} + +void DexOpIfTest::ParseImpl(BCClassMethod &method) { + (void) method; + target = static_cast(offset + pc); +} + +std::list DexOpIfTest::EmitToFEIRStmtsImpl() { + std::list stmts; + DexReg vATmp; + DexReg vBTmp; + vATmp = vA; + vBTmp = vB; + // opnd of if-test should be same PTY type. + // Or one opnd must be nullptr, such as const 0. + if (vA.GetPrimType() == PTY_ref && vB.GetPrimType() != PTY_ref) { + CHECK_FATAL(vB.regValue != nullptr && vB.regValue->primValue.raw32 == 0, + "Cannot compare a ref var with an integer."); + vBTmp.regTypeItem = vATmp.regTypeItem; + UniqueFEIRStmt retypeStmt = FEIRBuilder::CreateStmtRetype(vBTmp.GenFEIRVarReg(), vB.GenFEIRVarReg()); + stmts.emplace_back(std::move(retypeStmt)); + } else if (vA.GetPrimType() != PTY_ref && vB.GetPrimType() == PTY_ref) { + CHECK_FATAL(vA.regValue != nullptr && vA.regValue->primValue.raw32 == 0, + "Cannot compare a ref var with an integer."); + vATmp.regTypeItem = vBTmp.regTypeItem; + UniqueFEIRStmt retypeStmt = FEIRBuilder::CreateStmtRetype(vATmp.GenFEIRVarReg(), vA.GenFEIRVarReg()); + stmts.emplace_back(std::move(retypeStmt)); + } + UniqueFEIRExpr exprA = std::make_unique(vATmp.GenFEIRVarReg()); + UniqueFEIRExpr exprB = std::make_unique(vBTmp.GenFEIRVarReg()); + auto itor = dexOpConditionOp2MIROp.find(static_cast(opcode)); + CHECK_FATAL(itor != dexOpConditionOp2MIROp.end(), "Invalid opcode: %u in DexOpIfTest", opcode); + // All primitive var should be compared in i32. + if (vATmp.GetPrimType() != PTY_ref) { + if (vATmp.GetPrimType() != PTY_i32) { + exprA = std::make_unique(std::make_unique(PTY_i32, BCUtil::GetIntIdx()), + OP_cvt, std::move(exprA)); + } + if (vBTmp.GetPrimType() != PTY_i32) { + exprB = std::make_unique(std::make_unique(PTY_i32, BCUtil::GetIntIdx()), + OP_cvt, std::move(exprB)); + } + } + std::unique_ptr expr = + std::make_unique(itor->second, std::move(exprA), std::move(exprB)); + UniqueFEIRStmt stmt = + std::make_unique(OP_brtrue, funcNameIdx, target, std::move(expr)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpIfTestZ ========= +DexOpIfTestZ::DexOpIfTestZ(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpIfTestZ::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetIntIdx(), true); + usedRegs.emplace_back(&vA); +} + +void DexOpIfTestZ::SetVBImpl(uint32 num) { + offset = static_cast(num); +} + +std::vector DexOpIfTestZ::GetTargetsImpl() const { + std::vector res; + res.emplace_back(static_cast(offset + pc)); + return res; +} + +void DexOpIfTestZ::ParseImpl(BCClassMethod &method) { + (void) method; + target = static_cast(offset + pc); +} + +std::list DexOpIfTestZ::EmitToFEIRStmtsImpl() { + std::list stmts; + UniqueFEIRStmt stmt = nullptr; + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(vA.GenFEIRVarReg()); + if (vA.GetPrimType() == PTY_u1 && opcode == kDexOpIfEqZ) { + stmt = std::make_unique(OP_brfalse, funcNameIdx, target, std::move(expr)); + } else if (vA.GetPrimType() == PTY_u1 && opcode == kDexOpIfNeZ) { + stmt = std::make_unique(OP_brtrue, funcNameIdx, target, std::move(expr)); + } else { + UniqueFEIRExpr constExpr = std::make_unique(static_cast(0), vA.GetPrimType()); + auto itor = dexOpConditionOp2MIROp.find(static_cast(opcode)); + CHECK_FATAL(itor != dexOpConditionOp2MIROp.end(), "Invalid opcode: %u in DexOpIfTestZ", opcode); + expr = FEIRBuilder::CreateExprMathBinary(itor->second, std::move(expr), std::move(constExpr)); + stmt = std::make_unique(OP_brtrue, funcNameIdx, target, std::move(expr)); + } + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpUnused ========= +DexOpUnused::DexOpUnused(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +// ========== DexOpAget ========= +DexOpAget::DexOpAget(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpAget::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); +} + +void DexOpAget::SetVBImpl(uint32 num) { + vB.regNum = num; + usedRegs.emplace_back(&vB); +} + +void DexOpAget::SetVCImpl(uint32 num) { + vC.regNum = num; + vC.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetIntIdx()); + usedRegs.emplace_front(&vC); +} + +void DexOpAget::ParseImpl(BCClassMethod &method) { + (void) method; + GStrIdx elemTypeNameIdx; + GStrIdx usedTypeNameIdx; + bool isIndeterminateType = false; + switch (opcode) { + case kDexOpAget: { + // int or float + elemTypeNameIdx = BCUtil::GetIntIdx(); + usedTypeNameIdx = BCUtil::GetAIntIdx(); + isIndeterminateType = true; + break; + } + case kDexOpAgetWide: { + elemTypeNameIdx = BCUtil::GetLongIdx(); + usedTypeNameIdx = BCUtil::GetALongIdx(); + isIndeterminateType = true; + break; + } + case kDexOpAgetObject: { + elemTypeNameIdx = BCUtil::GetJavaObjectNameMplIdx(); + usedTypeNameIdx = BCUtil::GetAJavaObjectNameMplIdx(); + isIndeterminateType = true; + break; + } + case kDexOpAgetBoolean: { + elemTypeNameIdx = BCUtil::GetBooleanIdx(); + usedTypeNameIdx = BCUtil::GetABooleanIdx(); + break; + } + case kDexOpAgetByte: { + elemTypeNameIdx = BCUtil::GetByteIdx(); + usedTypeNameIdx = BCUtil::GetAByteIdx(); + break; + } + case kDexOpAgetChar: { + elemTypeNameIdx = BCUtil::GetCharIdx(); + usedTypeNameIdx = BCUtil::GetACharIdx(); + break; + } + case kDexOpAgetShort: { + elemTypeNameIdx = BCUtil::GetShortIdx(); + usedTypeNameIdx = BCUtil::GetAShortIdx(); + break; + } + default: { + CHECK_FATAL(false, "Invalid opcode : 0x%x in DexOpAget", opcode); + break; + } + } + vA.regType = allocator.GetMemPool()->New(allocator, vA, elemTypeNameIdx, isIndeterminateType); + vB.regTypeItem = allocator.GetMemPool()->New(usedTypeNameIdx, isIndeterminateType); +} + +void DexOpAget::SetRegTypeInTypeInferImpl() { + if (vB.regType != nullptr && vC.regType != nullptr) { + vB.regType->AddElemType(vA.regTypeItem); + } +} + +std::list DexOpAget::EmitToFEIRStmtsImpl() { + DexReg vAtmp; + vAtmp.regNum = vA.regNum; + const std::string arrayTypeName = GlobalTables::GetStrTable().GetStringFromStrIdx(vB.regTypeItem->typeNameIdx); + if (arrayTypeName.size() > 1 && arrayTypeName.at(0) == 'A') { + vAtmp.regTypeItem = allocator.GetMemPool()->New( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(arrayTypeName.substr(1))); + } else { + CHECK_FATAL(false, "Invalid array type %s", arrayTypeName.c_str()); + } + std::list stmts = FEIRBuilder::CreateStmtArrayLoad(vAtmp.GenFEIRVarReg(), vB.GenFEIRVarReg(), + vC.GenFEIRVarReg()); + + if (!((*vAtmp.regTypeItem) == (*vA.regTypeItem))) { + UniqueFEIRStmt retypeStmt = + FEIRBuilder::CreateStmtRetype(vA.GenFEIRVarReg(), vAtmp.GenFEIRVarReg()); + stmts.emplace_back(std::move(retypeStmt)); + } + return stmts; +} + +// ========== DexOpAput ========= +DexOpAput::DexOpAput(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpAput::SetVAImpl(uint32 num) { + vA.regNum = num; + usedRegs.emplace_back(&vA); +} + +void DexOpAput::SetVBImpl(uint32 num) { + vB.regNum = num; + usedRegs.emplace_back(&vB); +} + +void DexOpAput::SetVCImpl(uint32 num) { + vC.regNum = num; + vC.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetIntIdx()); + usedRegs.emplace_back(&vC); +} + +void DexOpAput::ParseImpl(BCClassMethod &method) { + (void) method; + GStrIdx elemTypeNameIdx; + GStrIdx usedTypeNameIdx; + bool isIndeterminate = false; + switch (opcode) { + case kDexOpAput: { + // Int or float + elemTypeNameIdx = BCUtil::GetIntIdx(); + usedTypeNameIdx = BCUtil::GetAIntIdx(); + isIndeterminate = true; + break; + } + case kDexOpAputWide: { + elemTypeNameIdx = BCUtil::GetLongIdx(); + usedTypeNameIdx = BCUtil::GetALongIdx(); + isIndeterminate = true; + break; + } + case kDexOpAputObject: { + elemTypeNameIdx = BCUtil::GetJavaObjectNameMplIdx(); + usedTypeNameIdx = BCUtil::GetAJavaObjectNameMplIdx(); + isIndeterminate = true; + break; + } + case kDexOpAputBoolean: { + elemTypeNameIdx = BCUtil::GetBooleanIdx(); + usedTypeNameIdx = BCUtil::GetABooleanIdx(); + break; + } + case kDexOpAputByte: { + elemTypeNameIdx = BCUtil::GetByteIdx(); + usedTypeNameIdx = BCUtil::GetAByteIdx(); + break; + } + case kDexOpAputChar: { + elemTypeNameIdx = BCUtil::GetCharIdx(); + usedTypeNameIdx = BCUtil::GetACharIdx(); + break; + } + case kDexOpAputShort: { + elemTypeNameIdx = BCUtil::GetShortIdx(); + usedTypeNameIdx = BCUtil::GetAShortIdx(); + break; + } + default: { + CHECK_FATAL(false, "Invalid opcode : 0x%x in DexOpAput", opcode); + break; + } + } + vA.regTypeItem = allocator.GetMemPool()->New(elemTypeNameIdx, isIndeterminate); + vB.regTypeItem = allocator.GetMemPool()->New(usedTypeNameIdx, isIndeterminate); +} + +void DexOpAput::SetRegTypeInTypeInferImpl() { + if (vA.regType != nullptr && vB.regType != nullptr && vC.regType != nullptr) { + vB.regType->AddElemType(vA.regTypeItem); + } +} + +std::list DexOpAput::EmitToFEIRStmtsImpl() { + DexReg vAtmp; + vAtmp.regNum = vA.regNum; + const std::string &arrayTypeName = GlobalTables::GetStrTable().GetStringFromStrIdx(vB.regTypeItem->typeNameIdx); + if (arrayTypeName.size() > 1 && arrayTypeName.at(0) == 'A') { + vAtmp.regTypeItem = allocator.GetMemPool()->New( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(arrayTypeName.substr(1))); + } else { + CHECK_FATAL(false, "Invalid array type %s", arrayTypeName.c_str()); + } + std::list stmts = FEIRBuilder::CreateStmtArrayStore(vAtmp.GenFEIRVarReg(), vB.GenFEIRVarReg(), + vC.GenFEIRVarReg()); + if (!((*vAtmp.regTypeItem) == (*vA.regTypeItem))) { + UniqueFEIRStmt retypeStmt = + FEIRBuilder::CreateStmtRetype(vAtmp.GenFEIRVarReg(), vA.GenFEIRVarReg()); + stmts.emplace_front(std::move(retypeStmt)); + } + return stmts; +} + +// ========== DexOpIget ========= +DexOpIget::DexOpIget(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpIget::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); +} + +void DexOpIget::SetVBImpl(uint32 num) { + vB.regNum = num; + usedRegs.emplace_back(&vB); +} + +void DexOpIget::SetVCImpl(uint32 num) { + index = num; +} + +void DexOpIget::ParseImpl(BCClassMethod &method) { + BCReader::ClassElem field = method.GetBCClass().GetBCParser().GetReader()->GetClassFieldFromIdx(index); + uint64 mapIdx = (static_cast(method.GetBCClass().GetBCParser().GetReader()->GetFileIndex()) << 32) | index; + structElemNameIdx = FEManager::GetManager().GetFieldStructElemNameIdx(mapIdx); + if (structElemNameIdx == nullptr) { + structElemNameIdx = FEManager::GetManager().GetStructElemMempool()->New( + field.className, field.elemName, field.typeName); + FEManager::GetManager().SetFieldStructElemNameIdx(mapIdx, *structElemNameIdx); + } + vA.regType = allocator.GetMemPool()->New(allocator, vA, structElemNameIdx->type); + vB.regTypeItem = allocator.GetMemPool()->New(structElemNameIdx->klass); +} + +std::list DexOpIget::EmitToFEIRStmtsImpl() { + std::list stmts; + FEStructFieldInfo *fieldInfo = static_cast( + FEManager::GetTypeManager().RegisterStructFieldInfo(*structElemNameIdx, kSrcLangJava, false)); + UniqueFEIRStmt stmt = std::make_unique(vB.GenFEIRVarReg(), vA.GenFEIRVarReg(), *fieldInfo, false); + stmts.push_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpIput ========= +DexOpIput::DexOpIput(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpIput::SetVAImpl(uint32 num) { + vA.regNum = num; + usedRegs.emplace_back(&vA); +} + +void DexOpIput::SetVBImpl(uint32 num) { + vB.regNum = num; + usedRegs.emplace_back(&vB); +} + +void DexOpIput::SetVCImpl(uint32 num) { + index = num; +} + +void DexOpIput::ParseImpl(BCClassMethod &method) { + BCReader::ClassElem field = method.GetBCClass().GetBCParser().GetReader()->GetClassFieldFromIdx(index); + uint64 mapIdx = (static_cast(method.GetBCClass().GetBCParser().GetReader()->GetFileIndex()) << 32) | index; + structElemNameIdx = FEManager::GetManager().GetFieldStructElemNameIdx(mapIdx); + if (structElemNameIdx == nullptr) { + structElemNameIdx = FEManager::GetManager().GetStructElemMempool()->New( + field.className, field.elemName, field.typeName); + FEManager::GetManager().SetFieldStructElemNameIdx(mapIdx, *structElemNameIdx); + } + vA.regTypeItem = allocator.GetMemPool()->New(structElemNameIdx->type); + vB.regTypeItem = allocator.GetMemPool()->New(structElemNameIdx->klass); +} + +std::list DexOpIput::EmitToFEIRStmtsImpl() { + std::list stmts; + FEStructFieldInfo *fieldInfo = static_cast( + FEManager::GetTypeManager().RegisterStructFieldInfo(*structElemNameIdx, kSrcLangJava, false)); + UniqueFEIRStmt stmt = std::make_unique(vB.GenFEIRVarReg(), vA.GenFEIRVarReg(), *fieldInfo, false); + stmts.push_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpSget ========= +DexOpSget::DexOpSget(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpSget::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); +} + +void DexOpSget::SetVBImpl(uint32 num) { + index = num; +} + +void DexOpSget::ParseImpl(BCClassMethod &method) { + BCReader::ClassElem field = method.GetBCClass().GetBCParser().GetReader()->GetClassFieldFromIdx(index); + uint64 mapIdx = (static_cast(method.GetBCClass().GetBCParser().GetReader()->GetFileIndex()) << 32) | index; + structElemNameIdx = FEManager::GetManager().GetFieldStructElemNameIdx(mapIdx); + if (structElemNameIdx == nullptr) { + structElemNameIdx = FEManager::GetManager().GetStructElemMempool()->New( + field.className, field.elemName, field.typeName); + FEManager::GetManager().SetFieldStructElemNameIdx(mapIdx, *structElemNameIdx); + } + vA.regType = allocator.GetMemPool()->New(allocator, vA, structElemNameIdx->type); + dexFileHashCode = method.GetBCClass().GetBCParser().GetFileNameHashId(); +} + +std::list DexOpSget::EmitToFEIRStmtsImpl() { + std::list ans; + FEStructFieldInfo *fieldInfo = static_cast( + FEManager::GetTypeManager().RegisterStructFieldInfo(*structElemNameIdx, kSrcLangJava, true)); + CHECK_NULL_FATAL(fieldInfo); + fieldInfo->SetFieldID(index); + UniqueFEIRVar var = vA.GenFEIRVarReg(); + UniqueFEIRStmt stmt = std::make_unique(nullptr, std::move(var), *fieldInfo, true, dexFileHashCode); + ans.emplace_back(std::move(stmt)); + return ans; +} + +// ========== DexOpSput ========= +DexOpSput::DexOpSput(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpSput::SetVAImpl(uint32 num) { + vA.regNum = num; + usedRegs.emplace_back(&vA); +} + +void DexOpSput::SetVBImpl(uint32 num) { + index = num; +} + +void DexOpSput::ParseImpl(BCClassMethod &method) { + BCReader::ClassElem field = method.GetBCClass().GetBCParser().GetReader()->GetClassFieldFromIdx(index); + uint64 mapIdx = (static_cast(method.GetBCClass().GetBCParser().GetReader()->GetFileIndex()) << 32) | index; + structElemNameIdx = FEManager::GetManager().GetFieldStructElemNameIdx(mapIdx); + if (structElemNameIdx == nullptr) { + structElemNameIdx = FEManager::GetManager().GetStructElemMempool()->New( + field.className, field.elemName, field.typeName); + FEManager::GetManager().SetFieldStructElemNameIdx(mapIdx, *structElemNameIdx); + } + vA.regTypeItem = allocator.GetMemPool()->New(structElemNameIdx->type); + dexFileHashCode = method.GetBCClass().GetBCParser().GetFileNameHashId(); +} + +std::list DexOpSput::EmitToFEIRStmtsImpl() { + std::list stmts; + FEStructFieldInfo *fieldInfo = static_cast( + FEManager::GetTypeManager().RegisterStructFieldInfo(*structElemNameIdx, kSrcLangJava, true)); + CHECK_NULL_FATAL(fieldInfo); + UniqueFEIRVar var = vA.GenFEIRVarReg(); + fieldInfo->SetFieldID(index); + UniqueFEIRStmt stmt = std::make_unique(nullptr, std::move(var), *fieldInfo, true, + dexFileHashCode); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpInvoke ========= +DexOpInvoke::DexOpInvoke(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn), argRegs(allocator.Adapter()), argVRegs(allocator.Adapter()) {} + +std::string DexOpInvoke::GetReturnType() const { + CHECK_FATAL(!retArgsTypeNames.empty(), "Should parse DexOpInvoke first for retrieving signature."); + return retArgsTypeNames[0]; +} + +void DexOpInvoke::SetVAImpl(uint32 num) { + argSize = num; +} + +void DexOpInvoke::SetVBImpl(uint32 num) { + methodIdx = num; +} + +void DexOpInvoke::SetVCImpl(uint32 num) { + arg0VRegNum = num; +} + +void DexOpInvoke::SetArgsImpl(const MapleList &args) { + argRegs = args; +} + +bool DexOpInvoke::ReplaceStringFactory(BCReader::ClassElem &methodInfo, MapleList &argRegNums) { + const std::string &fullName = methodInfo.className + "|" + methodInfo.elemName + "|" + methodInfo.typeName; + if (!DexStrFactory::IsStringInit(fullName)) { + return false; + } + isStringFactory = true; + std::string strFacCalleeName = DexStrFactory::GetStringFactoryFuncname(fullName); + CHECK_FATAL(!strFacCalleeName.empty(), "Can not find string factory call for %s", fullName.c_str()); + std::vector names = FEUtils::Split(strFacCalleeName, '|'); + // 3 parts: ClassName|ElemName|Signature + CHECK_FATAL(names.size() == 3, "Invalid funcname %s", strFacCalleeName.c_str()); + methodInfo.className = names.at(0); + methodInfo.elemName = names.at(1); + methodInfo.typeName = names.at(2); + // push this as return + retReg.regNum = argRegNums.front(); + retReg.isDef = true; + argRegNums.pop_front(); + retReg.regType = allocator.GetMemPool()->New(allocator, retReg, BCUtil::GetJavaStringNameMplIdx()); + defedRegs.emplace_back(&retReg); + return true; +} + +void DexOpInvoke::ParseImpl(BCClassMethod &method) { + MapleList &argRegNums = argRegs; + uint64 mapIdx = (static_cast(method.GetBCClass().GetBCParser().GetReader()->GetFileIndex()) << 32) | + methodIdx; + BCReader::ClassElem methodInfo = method.GetBCClass().GetBCParser().GetReader()->GetClassMethodFromIdx(methodIdx); + // for string factory, the highest bit for string factory flag + if (ReplaceStringFactory(methodInfo, argRegNums)) { + mapIdx = mapIdx | 0x8000000000000000; + } + structElemNameIdx = FEManager::GetManager().GetMethodStructElemNameIdx(mapIdx); + if (structElemNameIdx == nullptr) { + structElemNameIdx = FEManager::GetManager().GetStructElemMempool()->New( + methodInfo.className, methodInfo.elemName, methodInfo.typeName); + FEManager::GetManager().SetMethodStructElemNameIdx(mapIdx, *structElemNameIdx); + } + retArgsTypeNames = FEUtilJava::SolveMethodSignature(methodInfo.typeName); + DexReg reg; + if (!IsStatic()) { + reg.regNum = argRegNums.front(); + argRegNums.pop_front(); + reg.regTypeItem = allocator.GetMemPool()->New(structElemNameIdx->klass); + argVRegs.emplace_back(reg); + } + for (size_t i = 1; i < retArgsTypeNames.size(); ++i) { + reg.regNum = argRegNums.front(); + argRegNums.pop_front(); + GStrIdx usedTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName(retArgsTypeNames[i])); + if (BCUtil::IsWideType(usedTypeNameIdx)) { + argRegNums.pop_front(); // next regNum is consumed by wide type(long, double) + } + reg.regTypeItem = allocator.GetMemPool()->New(usedTypeNameIdx); + argVRegs.emplace_back(reg); + } + for (auto &vReg : argVRegs) { + usedRegs.emplace_back(&vReg); + } +} + +void DexOpInvoke::PrepareInvokeParametersAndReturn(const FEStructMethodInfo &feMethodInfo, + FEIRStmtCallAssign &stmt) const { + const MapleVector &argTypes = feMethodInfo.GetArgTypes(); + for (size_t i = argTypes.size(); i > 0; --i) { + UniqueFEIRVar var = argVRegs[i - 1 + (IsStatic() ? 0 : 1)].GenFEIRVarReg(); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(var)); + stmt.AddExprArgReverse(std::move(expr)); + } + if (!IsStatic()) { + // push this + UniqueFEIRVar var = argVRegs[0].GenFEIRVarReg(); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(var)); + stmt.AddExprArgReverse(std::move(expr)); + } + if (HasReturn()) { + static_cast(returnInst)->SetVATypeNameIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(retArgsTypeNames.at(0)))); + UniqueFEIRVar var = static_cast(returnInst)->GetVA().GenFEIRVarReg(); + stmt.SetVar(std::move(var)); + } + if (isStringFactory) { + stmt.SetVar(retReg.GenFEIRVarReg()); + } +} + +std::list DexOpInvoke::EmitToFEIRStmtsImpl() { + std::list ans; + UniqueFEIRStmt stmt; + FEStructMethodInfo *info = static_cast( + FEManager::GetTypeManager().RegisterStructMethodInfo(*structElemNameIdx, kSrcLangJava, IsStatic())); + auto itor = dexOpInvokeOp2MIROp.find(static_cast(opcode)); + CHECK_FATAL(itor != dexOpInvokeOp2MIROp.end(), "Unsupport opcode: 0x%x in DexOpInvoke", opcode); + stmt = std::make_unique(*info, itor->second, nullptr, IsStatic()); + FEIRStmtCallAssign *ptrStmt = static_cast(stmt.get()); + PrepareInvokeParametersAndReturn(*info, *ptrStmt); + ans.emplace_back(std::move(stmt)); + retArgsTypeNames.clear(); + retArgsTypeNames.shrink_to_fit(); + return ans; +} + +bool DexOpInvoke::IsStatic() const { + return opcode == kDexOpInvokeStatic || opcode == kDexOpInvokeStaticRange || isStringFactory; +} + +// ========== DexOpInvokeVirtual ========= +DexOpInvokeVirtual::DexOpInvokeVirtual(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOpInvoke(allocatorIn, pcIn, opcodeIn) {} + +// ========== DexOpInvokeSuper ========= +DexOpInvokeSuper::DexOpInvokeSuper(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOpInvoke(allocatorIn, pcIn, opcodeIn) {} + +// ========== DexOpInvokeDirect ========= +DexOpInvokeDirect::DexOpInvokeDirect(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOpInvoke(allocatorIn, pcIn, opcodeIn) {} + +// ========== DexOpInvokeStatic ========= +DexOpInvokeStatic::DexOpInvokeStatic(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOpInvoke(allocatorIn, pcIn, opcodeIn) {} + +// ========== DexOpInvokeInterface ========= +DexOpInvokeInterface::DexOpInvokeInterface(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOpInvoke(allocatorIn, pcIn, opcodeIn) {} + +// ========== DexOpUnaryOp ========= +DexOpUnaryOp::DexOpUnaryOp(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpUnaryOp::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); + auto it = GetOpcodeMapForUnary().find(opcode); + CHECK_FATAL(it != GetOpcodeMapForUnary().end(), "Invalid opcode: %u in DexOpUnaryOp", opcode); + mirOp = std::get<0>(it->second); + vA.regType = allocator.GetMemPool()->New(allocator, vA, std::get<1>(it->second)); + vB.regTypeItem = allocator.GetMemPool()->New(std::get<2>(it->second)); +} + +void DexOpUnaryOp::SetVBImpl(uint32 num) { + vB.regNum = num; + // vB's type is set in SetVAImpl + usedRegs.emplace_back(&vB); +} + +std::list DexOpUnaryOp::EmitToFEIRStmtsImpl() { + std::list stmts; + UniqueFEIRExpr exprDread = FEIRBuilder::CreateExprDRead(vB.GenFEIRVarReg()); + UniqueFEIRExpr experUnary = nullptr; + switch (mirOp) { + case OP_cvt: + experUnary = FEIRBuilder::CreateExprCvtPrim(std::move(exprDread), vA.GetPrimType()); + break; + case OP_sext: + experUnary = FEIRBuilder::CreateExprSExt(std::move(exprDread), vA.GetPrimType()); + break; + case OP_zext: + experUnary = FEIRBuilder::CreateExprZExt(std::move(exprDread), vA.GetPrimType()); + break; + case OP_neg: + case OP_bnot: + experUnary = FEIRBuilder::CreateExprMathUnary(mirOp, std::move(exprDread)); + break; + default: + CHECK_FATAL(false, "Invalid mir opcode: %u in DexOpUnaryOp", mirOp); + break; + } + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(vA.GenFEIRVarReg(), std::move(experUnary)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +std::map> DexOpUnaryOp::InitOpcodeMapForUnary() { + std::map> ans; + ans[kDexOpIntToLong] = std::make_tuple(OP_cvt, BCUtil::GetLongIdx(), BCUtil::GetIntIdx()); + ans[kDexOpIntToFloat] = std::make_tuple(OP_cvt, BCUtil::GetFloatIdx(), BCUtil::GetIntIdx()); + ans[kDexOpIntToDouble] = std::make_tuple(OP_cvt, BCUtil::GetDoubleIdx(), BCUtil::GetIntIdx()); + ans[kDexOpLongToInt] = std::make_tuple(OP_cvt, BCUtil::GetIntIdx(), BCUtil::GetLongIdx()); + ans[kDexOpLongToFloat] = std::make_tuple(OP_cvt, BCUtil::GetFloatIdx(), BCUtil::GetLongIdx()); + ans[kDexOpLongToDouble] = std::make_tuple(OP_cvt, BCUtil::GetDoubleIdx(), BCUtil::GetLongIdx()); + ans[kDexOpFloatToInt] = std::make_tuple(OP_cvt, BCUtil::GetIntIdx(), BCUtil::GetFloatIdx()); + ans[kDexOpFloatToLong] = std::make_tuple(OP_cvt, BCUtil::GetLongIdx(), BCUtil::GetFloatIdx()); + ans[kDexOpFloatToDouble] = std::make_tuple(OP_cvt, BCUtil::GetDoubleIdx(), BCUtil::GetFloatIdx()); + ans[kDexOpDoubleToInt] = std::make_tuple(OP_cvt, BCUtil::GetIntIdx(), BCUtil::GetDoubleIdx()); + ans[kDexOpDoubleToLong] = std::make_tuple(OP_cvt, BCUtil::GetLongIdx(), BCUtil::GetDoubleIdx()); + ans[kDexOpDoubleToFloat] = std::make_tuple(OP_cvt, BCUtil::GetFloatIdx(), BCUtil::GetDoubleIdx()); + ans[kDexOpIntToByte] = std::make_tuple(OP_sext, BCUtil::GetByteIdx(), BCUtil::GetIntIdx()); + ans[kDexOpIntToChar] = std::make_tuple(OP_zext, BCUtil::GetCharIdx(), BCUtil::GetIntIdx()); + ans[kDexOpIntToShort] = std::make_tuple(OP_sext, BCUtil::GetShortIdx(), BCUtil::GetIntIdx()); + ans[kDexOpNegInt] = std::make_tuple(OP_neg, BCUtil::GetIntIdx(), BCUtil::GetIntIdx()); + ans[kDexOpNotInt] = std::make_tuple(OP_bnot, BCUtil::GetIntIdx(), BCUtil::GetIntIdx()); + ans[kDexOpNegLong] = std::make_tuple(OP_neg, BCUtil::GetLongIdx(), BCUtil::GetLongIdx()); + ans[kDexOpNotLong] = std::make_tuple(OP_bnot, BCUtil::GetLongIdx(), BCUtil::GetLongIdx()); + ans[kDexOpNegFloat] = std::make_tuple(OP_neg, BCUtil::GetFloatIdx(), BCUtil::GetFloatIdx()); + ans[kDexOpNegDouble] = std::make_tuple(OP_neg, BCUtil::GetDoubleIdx(), BCUtil::GetDoubleIdx()); + return ans; +} + +// ========== DexOpBinaryOp ========= +DexOpBinaryOp::DexOpBinaryOp(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpBinaryOp::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); + GStrIdx typeNameIdx; // typeName of A, B, C are same + if (kDexOpAddInt <= opcode && opcode <= kDexOpUshrInt) { + typeNameIdx = BCUtil::GetIntIdx(); + } else if (kDexOpAddLong <= opcode && opcode <= kDexOpUshrLong) { + typeNameIdx = BCUtil::GetLongIdx(); + } else if (kDexOpAddFloat <= opcode && opcode <= kDexOpRemFloat) { + typeNameIdx = BCUtil::GetFloatIdx(); + } else if (kDexOpAddDouble <= opcode && opcode <= kDexOpRemDouble) { + typeNameIdx = BCUtil::GetDoubleIdx(); + } else { + CHECK_FATAL(false, "Invalid opcode: 0x%x in DexOpBinaryOp", opcode); + } + vA.regType = allocator.GetMemPool()->New(allocator, vA, typeNameIdx); +} + +void DexOpBinaryOp::SetVBImpl(uint32 num) { + vB.regNum = num; + vB.regTypeItem = allocator.GetMemPool()->New(*(vA.regTypeItem)); + usedRegs.emplace_back(&vB); +} + +void DexOpBinaryOp::SetVCImpl(uint32 num) { + vC.regNum = num; + if (kDexOpShlLong <= opcode && opcode <= kDexOpUshrLong) { + vC.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetIntIdx()); + } else { + vC.regTypeItem = allocator.GetMemPool()->New(*(vA.regTypeItem)); + } + usedRegs.emplace_back(&vC); +} + +Opcode DexOpBinaryOp::GetOpcodeFromDexIns(void) const { + auto mirOp = dexOp2MIROp.find(opcode); + CHECK_FATAL(mirOp != dexOp2MIROp.end(), "Invalid opcode: 0x%x in DexOpBinaryOp", opcode); + return mirOp->second; +} + +std::list DexOpBinaryOp::EmitToFEIRStmtsImpl() { + std::list stmts; + if (kDexOpAddInt <= opcode && opcode <= kDexOpRemDouble) { + UniqueFEIRExpr exprB = std::make_unique(vB.GenFEIRVarReg()); + UniqueFEIRExpr exprC = std::make_unique(vC.GenFEIRVarReg()); + std::unique_ptr binaryOp2AddrExpr = + std::make_unique(GetOpcodeFromDexIns(), std::move(exprB), std::move(exprC)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(vA.GenFEIRVarReg(), std::move(binaryOp2AddrExpr)); + stmts.emplace_back(std::move(stmt)); + } else { + CHECK_FATAL(false, "Invalid opcode: 0x%x in DexOpBinaryOp", opcode); + } + return stmts; +} + +// ========== DexOpBinaryOp2Addr ========= +DexOpBinaryOp2Addr::DexOpBinaryOp2Addr(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpBinaryOp2Addr::SetVAImpl(uint32 num) { + vDef.regNum = num; + vDef.isDef = true; + vA.regNum = num; + defedRegs.emplace_back(&vDef); + usedRegs.emplace_back(&vA); + // typeName of A, B are same, except `shl-long/2addr` ~ `ushr-long/2addr` + GStrIdx typeNameAIdx; + GStrIdx typeNameBIdx; + if (kDexOpAddInt2Addr <= opcode && opcode <= kDexOpUshrInt2Addr) { + typeNameAIdx = BCUtil::GetIntIdx(); + typeNameBIdx = typeNameAIdx; + } else if (kDexOpAddLong2Addr <= opcode && opcode <= kDexOpXorLong2Addr) { + typeNameAIdx = BCUtil::GetLongIdx(); + typeNameBIdx = typeNameAIdx; + } else if (kDexOpShlLong2Addr <= opcode && opcode <= kDexOpUshrLong2Addr) { + typeNameAIdx = BCUtil::GetLongIdx(); + typeNameBIdx = BCUtil::GetIntIdx(); + } else if (kDexOpAddFloat2Addr <= opcode && opcode <= kDexOpRemFloat2Addr) { + typeNameAIdx = BCUtil::GetFloatIdx(); + typeNameBIdx = typeNameAIdx; + } else if (kDexOpAddDouble2Addr <= opcode && opcode <= kDexOpRemDouble2Addr) { + typeNameAIdx = BCUtil::GetDoubleIdx(); + typeNameBIdx = typeNameAIdx; + } else { + CHECK_FATAL(false, "Invalid opcode: 0x%x in DexOpBinaryOp2Addr", opcode); + } + vDef.regType = allocator.GetMemPool()->New(allocator, vDef, typeNameAIdx); + vA.regTypeItem = allocator.GetMemPool()->New(typeNameAIdx); + vB.regTypeItem = allocator.GetMemPool()->New(typeNameBIdx); +} + +void DexOpBinaryOp2Addr::SetVBImpl(uint32 num) { + vB.regNum = num; + // type is set in SetVAImpl + usedRegs.emplace_back(&vB); +} + +Opcode DexOpBinaryOp2Addr::GetOpcodeFromDexIns(void) const { + auto mirOp = dexOp2Addr2MIROp.find(opcode); + CHECK_FATAL(mirOp != dexOp2Addr2MIROp.end(), "Invalid opcode: 0x%x in DexOpBinaryOp2Addr", opcode); + return mirOp->second; +} + +std::list DexOpBinaryOp2Addr::EmitToFEIRStmtsImpl() { + std::list stmts; + if (kDexOpAddInt2Addr <= opcode && opcode <= kDexOpRemDouble2Addr) { + UniqueFEIRExpr exprDst = std::make_unique(vA.GenFEIRVarReg()); + UniqueFEIRExpr exprSrc = std::make_unique(vB.GenFEIRVarReg()); + std::unique_ptr binaryOp2AddrExpr = + std::make_unique(GetOpcodeFromDexIns(), std::move(exprDst), std::move(exprSrc)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(vA.GenFEIRVarReg(), std::move(binaryOp2AddrExpr)); + stmts.emplace_back(std::move(stmt)); + } else { + CHECK_FATAL(false, "Invalid opcode: 0x%x in DexOpBinaryOp2Addr", opcode); + } + return stmts; +} + +// ========== DexOpBinaryOpLit ========= +DexOpBinaryOpLit::DexOpBinaryOpLit(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) { + isLit8 = (kDexOpAddIntLit8 <= opcode && opcode <= kDexOpUshrIntLit8); +} + +void DexOpBinaryOpLit::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); + vA.regType = allocator.GetMemPool()->New(allocator, vA, BCUtil::GetIntIdx()); +} + +void DexOpBinaryOpLit::SetVBImpl(uint32 num) { + vB.regNum = num; + vB.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetIntIdx()); + usedRegs.emplace_back(&vB); +} + +void DexOpBinaryOpLit::SetVCImpl(uint32 num) { + if (isLit8) { + constValue.i8 = static_cast(num); + } else { + constValue.i16 = static_cast(num); + } +} + +Opcode DexOpBinaryOpLit::GetOpcodeFromDexIns() const { + auto mirOp = dexOpLit2MIROp.find(opcode); + CHECK_FATAL(mirOp != dexOpLit2MIROp.end(), "Invalid opcode: 0x%x in DexOpBinaryOpLit", opcode); + return mirOp->second; +} + +std::list DexOpBinaryOpLit::EmitToFEIRStmtsImpl() { + std::list stmts; + UniqueFEIRExpr exprConst = isLit8 ? FEIRBuilder::CreateExprConstI8(constValue.i8) : + FEIRBuilder::CreateExprConstI16(constValue.i16); + UniqueFEIRExpr exprCvt = FEIRBuilder::CreateExprCvtPrim(std::move(exprConst), PTY_i32); + UniqueFEIRExpr exprDreadB = FEIRBuilder::CreateExprDRead(vB.GenFEIRVarReg()); + UniqueFEIRExpr exprBinary = nullptr; + if (opcode == kDexOpRsubInt || opcode == kDexOpRsubIntLit8) { + // reverse opnd + exprBinary = FEIRBuilder::CreateExprMathBinary(GetOpcodeFromDexIns(), std::move(exprCvt), std::move(exprDreadB)); + } else { + exprBinary = FEIRBuilder::CreateExprMathBinary(GetOpcodeFromDexIns(), std::move(exprDreadB), std::move(exprCvt)); + } + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtDAssign(vA.GenFEIRVarReg(), std::move(exprBinary)); + stmts.emplace_back(std::move(stmt)); + return stmts; +} + +// ========== DexOpInvokePolymorphic ========= +DexOpInvokePolymorphic::DexOpInvokePolymorphic(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOpInvoke(allocatorIn, pcIn, opcodeIn) {} + +void DexOpInvokePolymorphic::SetVHImpl(uint32 num) { + protoIdx = num; +} + +void DexOpInvokePolymorphic::ParseImpl(BCClassMethod &method) { + isStatic = method.IsStatic(); + const BCReader::ClassElem &methodInfo = + method.GetBCClass().GetBCParser().GetReader()->GetClassMethodFromIdx(methodIdx); + const std::string &funcName = methodInfo.className + "|" + methodInfo.elemName + "|" + methodInfo.typeName; + if (FEOptions::GetInstance().IsAOT()) { + std::string callerClassName = method.GetBCClass().GetClassName(true); + int32 dexFileHashCode = method.GetBCClass().GetBCParser().GetFileNameHashId(); + callerClassID = FEManager::GetTypeManager().GetTypeIDFromMplClassName(callerClassName, dexFileHashCode); + } + fullNameMpl = namemangler::EncodeName(funcName); + protoName = method.GetBCClass().GetBCParser().GetReader()->GetSignature(protoIdx); + retArgsTypeNames = FEUtilJava::SolveMethodSignature(protoName); + DexReg reg; + MapleList argRegNums = argRegs; + std::string typeName; + // Receiver + { + reg.regNum = arg0VRegNum; + reg.regTypeItem = allocator.GetMemPool()->New(BCUtil::GetJavaMethodHandleNameMplIdx()); + argVRegs.emplace_back(reg); + } + if (opcode == kDexOpInvokePolymorphicRange) { + argRegNums.pop_front(); // Skip first reg num for 'receiver' + } + for (size_t i = 1; i < retArgsTypeNames.size(); ++i) { + reg.regNum = argRegNums.front(); + argRegNums.pop_front(); + typeName = retArgsTypeNames[i]; + typeName = namemangler::EncodeName(typeName); + GStrIdx usedTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + if (BCUtil::IsWideType(usedTypeNameIdx)) { + argRegNums.pop_front(); // next regNum is consumed by wide type(long, double) + } + reg.regTypeItem = allocator.GetMemPool()->New(usedTypeNameIdx); + argVRegs.emplace_back(reg); + } + for (auto &vReg : argVRegs) { + usedRegs.emplace_back(&vReg); + } +} + +std::list DexOpInvokePolymorphic::EmitToFEIRStmtsImpl() { + std::list stmts; + std::unique_ptr> args = std::make_unique>(); + for (const auto ® : argVRegs) { + args->emplace_back(reg.GenFEIRVarReg()); + } + std::unique_ptr stmt = std::make_unique( + INTRN_JAVA_POLYMORPHIC_CALL, fullNameMpl, protoName, std::move(args), callerClassID, isStatic); + if (HasReturn()) { + static_cast(returnInst)->SetVATypeNameIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(retArgsTypeNames.at(0)))); + UniqueFEIRVar var = static_cast(returnInst)->GetVA().GenFEIRVarReg(); + stmt->SetVar(std::move(var)); + } else if (retArgsTypeNames[0] != "V") { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("retvarpoly"); + std::unique_ptr retTmpVarType = FEIRBuilder::CreateTypeByJavaName(retArgsTypeNames[0], false); + UniqueFEIRVar retTmpVar = std::make_unique(nameIdx, retTmpVarType->Clone(), false); + stmt->SetVar(std::move(retTmpVar)); + } + + stmts.emplace_back(std::move(stmt)); + retArgsTypeNames.clear(); + retArgsTypeNames.shrink_to_fit(); + fullNameMpl.clear(); + fullNameMpl.shrink_to_fit(); + protoName.clear(); + protoName.shrink_to_fit(); + return stmts; +} + +// ========== DexOpInvokeCustom ========= +DexOpInvokeCustom::DexOpInvokeCustom(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn), argRegs(allocator.Adapter()), argVRegs(allocator.Adapter()) {} + +void DexOpInvokeCustom::SetVBImpl(uint32 num) { + callSiteIdx = num; +} + +void DexOpInvokeCustom::SetArgsImpl(const MapleList &args) { + argRegs = args; +} + +// ========== DexOpConstMethodHandle ========= +DexOpConstMethodHandle::DexOpConstMethodHandle(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpConstMethodHandle::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); +} + +void DexOpConstMethodHandle::SetVBImpl(uint32 num) { + mhIdx = num; +} +// ========== DexOpConstMethodType ========= +DexOpConstMethodType::DexOpConstMethodType(MapleAllocator &allocatorIn, uint32 pcIn, DexOpCode opcodeIn) + : DexOp(allocatorIn, pcIn, opcodeIn) {} + +void DexOpConstMethodType::SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); +} + +void DexOpConstMethodType::SetVBImpl(uint32 num) { + protoIdx = num; +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_parser.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_parser.cpp new file mode 100644 index 0000000000000000000000000000000000000000..48092dd79bd2ead3f504036443a1899cde1ad465 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dex_parser.cpp @@ -0,0 +1,258 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dex_parser.h" +#include "fe_manager.h" +#include "dex_pragma.h" +#include "dex_encode_value.h" +#include "fe_manager.h" +#include "fe_options.h" +#include "dex_file_util.h" + +namespace maple { +namespace bc { +DexParser::DexParser(uint32 fileIdxIn, const std::string &fileNameIn, const std::list &classNamesIn) + : BCParser(fileIdxIn, fileNameIn, classNamesIn) {} + +const BCReader *DexParser::GetReaderImpl() const { + return reader.get(); +} + +void DexParser::SetDexFile(std::unique_ptr iDexFileIn) { + reader->SetDexFile(std::move(iDexFileIn)); +} + +uint32 DexParser::CalculateCheckSumImpl(const uint8 *data, uint32 size) { + (void) data; + (void) size; + return 0; // Not work in DexParser +} + +bool DexParser::ParseHeaderImpl() { + return true; // Not work in DexParser +} + +bool DexParser::VerifyImpl() { + return true; // Not work in DexParser +} + +bool DexParser::RetrieveIndexTables() { + return true; +} + +bool DexParser::RetrieveUserSpecifiedClasses(std::list> &klasses) { + (void) klasses; + return false; +} + +bool DexParser::RetrieveAllClasses(std::list> &klasses) { + uint32 classItemsSize = reader->GetClassItemsSize(); + for (uint32 classIdx = 0; classIdx < classItemsSize; ++classIdx) { + klasses.push_back(ProcessDexClass(classIdx)); + } + return true; +} + +bool DexParser::CollectAllDepTypeNamesImpl(std::unordered_set &depSet) { + return reader->ReadAllDepTypeNames(depSet); +} + +bool DexParser::CollectMethodDepTypeNamesImpl(std::unordered_set &depSet, BCClassMethod &bcMethod) const { + return reader->ReadMethodDepTypeNames(depSet, bcMethod.GetBCClass().GetClassIdx(), + bcMethod.GetItemIdx(), bcMethod.IsVirtual()); +} + +bool DexParser::CollectAllClassNamesImpl(std::unordered_set &classSet) { + return reader->ReadAllClassNames(classSet); +} + +std::unique_ptr DexParser::FindClassDef(const std::string &className) { + const BCReader *reader = GetReader(); + const DexReader *dexReader = static_cast(reader); + maple::IDexFile &iDexFile = dexReader->GetIDexFile(); + uint32 classIdx = iDexFile.FindClassDefIdx(className); + if (classIdx == DexFileUtil::kNoIndex) { + return nullptr; + } + std::unique_ptr dexClass = std::make_unique(classIdx, *this); + ProcessDexClassDef(dexClass); + ProcessDexClassFields(dexClass); + ProcessDexClassMethodDecls(dexClass, false); + ProcessDexClassMethodDecls(dexClass, true); + ProcessDexClassAnnotationDirectory(dexClass); + return dexClass; +} + +std::unique_ptr DexParser::ProcessDexClass(uint32 classIdx) { + std::unique_ptr dexClass = std::make_unique(classIdx, *this); + ProcessDexClassDef(dexClass); + ProcessDexClassFields(dexClass); + // direct methods are ahead of virtual methods in class + ProcessDexClassMethods(dexClass, false); + ProcessDexClassMethods(dexClass, true); + ProcessDexClassAnnotationDirectory(dexClass); + if (!FEOptions::GetInstance().IsGenMpltOnly()) { + ProcessDexClassStaticFieldInitValue(dexClass); + } + return dexClass; +} + +void DexParser::ProcessDexClassDef(const std::unique_ptr &dexClass) { + uint32 classIdx = dexClass->GetClassIdx(); + dexClass->SetFilePathName(fileName); + const char *srcFileName = reader->GetClassJavaSourceFileName(classIdx); + dexClass->SetSrcFileInfo(srcFileName == nullptr ? "unknown" : srcFileName); + dexClass->SetAccFlag(reader->GetClassAccFlag(classIdx)); + const std::string &className = reader->GetClassName(classIdx); + dexClass->SetClassName(className); + dexClass->SetIsInterface(reader->IsInterface(classIdx)); + dexClass->SetSuperClasses(reader->GetSuperClasses(classIdx)); + GStrIdx irSrcFileSigIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(reader->GetIRSrcFileSignature()); + dexClass->SetIRSrcFileSigIdx(irSrcFileSigIdx); + ProcessDexClassInterfaceParent(dexClass); +} + +void DexParser::ProcessDexClassFields(const std::unique_ptr &dexClass) { + uint32 classIdx = dexClass->GetClassIdx(); + maple::IDexFile &iDexFile = reader->GetIDexFile(); + std::vector> fieldItems = iDexFile.GetClassItem(classIdx)->GetFields(iDexFile); + for (uint32 idx = 0; idx < fieldItems.size(); ++idx) { + std::unique_ptr dexClassField = std::make_unique( + *dexClass, idx, fieldItems[idx]->GetFieldIdx(), + fieldItems[idx]->GetAccessFlags(), + reader->GetClassFieldName(fieldItems[idx]->GetFieldIdx()), + reader->GetClassFieldTypeName(fieldItems[idx]->GetFieldIdx())); + dexClass->SetField(std::move(dexClassField)); + } +} + +void DexParser::ProcessDexClassInterfaceParent(const std::unique_ptr &dexClass) { + uint32 classIdx = dexClass->GetClassIdx(); + const std::vector &names = reader->GetClassInterfaceNames(classIdx); + for (const auto &name : names) { + dexClass->SetInterface(name); + } +} + +void DexParser::ProcessMethodBodyImpl(BCClassMethod &method, + uint32 classIdx, uint32 methodItemIdx, bool isVirtual) const { + maple::IDexFile &iDexFile = reader->GetIDexFile(); + std::unique_ptr dexMethodItem; + if (isVirtual) { + dexMethodItem = iDexFile.GetClassItem(classIdx)->GetVirtualMethod(iDexFile, methodItemIdx); + } else { + dexMethodItem = iDexFile.GetClassItem(classIdx)->GetDirectMethod(iDexFile, methodItemIdx); + } + method.GenArgRegs(); + method.SetMethodInstOffset(reader->GetMethodInstOffset(dexMethodItem.get())); + method.SetPCBCInstructionMap( + reader->ResolveInstructions(method.GetAllocator(), dexMethodItem.get())); + method.SetSrcLocalInfo(reader->ResovleSrcLocalInfo(*dexMethodItem)); + method.SetTryInfos(reader->ResolveTryInfos(dexMethodItem.get())); +#ifdef DEBUG + std::map srcPosInfo; + reader->ResovleSrcPositionInfo(dexMethodItem.get(), srcPosInfo); + method.SetSrcPositionInfo(srcPosInfo); +#endif + method.ProcessInstructions(); +} + +void DexParser::ProcessDexClassMethod(const std::unique_ptr &dexClass, + bool isVirtual, uint32 index, std::pair &idxPair) { + std::unique_ptr method = std::make_unique( + *dexClass, index, idxPair.first, isVirtual, idxPair.second, + reader->GetClassMethodName(idxPair.first), reader->GetClassMethodDescName(idxPair.first)); + uint32 classIdx = dexClass->GetClassIdx(); + maple::IDexFile &iDexFile = reader->GetIDexFile(); + std::unique_ptr dexMethodItem; + if (isVirtual) { + dexMethodItem = iDexFile.GetClassItem(classIdx)->GetVirtualMethod(iDexFile, method->GetItemIdx()); + } else { + dexMethodItem = iDexFile.GetClassItem(classIdx)->GetDirectMethod(iDexFile, method->GetItemIdx()); + } + method->SetRegisterTotalSize(reader->GetClassMethodRegisterTotalSize(dexMethodItem.get())); + method->SetRegisterInsSize(reader->GetClassMethodRegisterInSize(dexMethodItem.get())); + method->SetCodeOff(reader->GetCodeOff(dexMethodItem.get())); + dexClass->SetMethod(std::move(method)); +} + +void DexParser::ProcessDexClassMethods(const std::unique_ptr &dexClass, bool isVirtual) { + uint32 classIdx = dexClass->GetClassIdx(); + maple::IDexFile &iDexFile = reader->GetIDexFile(); + std::vector> methodItemsIdxAndFlag = + iDexFile.GetClassItem(classIdx)->GetMethodsIdxAndFlag(iDexFile, isVirtual); + uint32 index = 0; + for (auto idxPair : methodItemsIdxAndFlag) { + ProcessDexClassMethod(dexClass, isVirtual, index++, idxPair); + } +} + +void DexParser::ProcessDexClassMethodDecls(const std::unique_ptr &dexClass, bool isVirtual) { + uint32 classIdx = dexClass->GetClassIdx(); + maple::IDexFile &iDexFile = reader->GetIDexFile(); + std::vector> methodItemsIdxAndFlag = + iDexFile.GetClassItem(classIdx)->GetMethodsIdxAndFlag(iDexFile, isVirtual); + uint32 index = 0; + for (auto idxPair : methodItemsIdxAndFlag) { + std::unique_ptr method = std::make_unique( + *dexClass, index++, idxPair.first, isVirtual, idxPair.second, + reader->GetClassMethodName(idxPair.first), reader->GetClassMethodDescName(idxPair.first)); + dexClass->SetMethod(std::move(method)); + } +} + +void DexParser::ProcessDexClassAnnotationDirectory(const std::unique_ptr &dexClass) { + const BCReader *reader = GetReader(); + const DexReader *dexReader = static_cast(reader); + maple::IDexFile &iDexFile = dexReader->GetIDexFile(); + uint32 classDefIdx = dexClass->GetClassIdx(); + const IDexClassItem *iDexClassItem = iDexFile.GetClassItem(classDefIdx); + const IDexAnnotationsDirectory *iDexAnnotationsDirectory = iDexClassItem->GetAnnotationsDirectory(iDexFile); + MIRModule &module = FEManager::GetModule(); + std::unique_ptr annotationsDirectory = + std::make_unique(module, *(module.GetMemPool()), iDexFile, + iDexClassItem->GetClassName(iDexFile), iDexAnnotationsDirectory); + dexClass->SetAnnotationsDirectory(std::move(annotationsDirectory)); +} + +void DexParser::ProcessDexClassStaticFieldInitValue(const std::unique_ptr &dexClass) { + const BCReader *reader = GetReader(); + const DexReader *dexReader = static_cast(reader); + maple::IDexFile &iDexFile = dexReader->GetIDexFile(); + std::unique_ptr dexEncodeValue = + std::make_unique(*FEManager::GetModule().GetMemPool(), iDexFile); + uint32 classDefIdx = dexClass->GetClassIdx(); + const IDexClassItem *classItem = iDexFile.GetClassItem(classDefIdx); + const uint8 *staticValuesList = classItem->GetStaticValuesList(iDexFile); + if (staticValuesList == nullptr) { + return; + } + const uint8 **data = &staticValuesList; + uint32 size = iDexFile.ReadUnsignedLeb128(data); + for (uint32 i = 0; i < size; ++i) { + MIRConst *cst = nullptr; + uint64 dataVal = *(*data)++; + uint8 valueArg = static_cast(dataVal >> 5); // valueArgs : dataVal >> 5, The higher three bits are valid. + uint8 valueType = dataVal & 0x1f; + // get initialization value, max 8 bytes, little-endian + uint32 stringID = 0; + dexEncodeValue->ProcessEncodedValue(data, valueType, valueArg, cst, stringID); + dexClass->InsertStaticFieldConstVal(cst); + if (FEOptions::GetInstance().IsAOT() && (valueType == kValueString)) { + dexClass->InsertFinalStaticStringID(stringID); + } + } +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_pragma.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_pragma.cpp new file mode 100644 index 0000000000000000000000000000000000000000..da55cd4d955212289839918307aeed8bf6a8c1f3 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dex_pragma.cpp @@ -0,0 +1,538 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dex_pragma.h" +#include "global_tables.h" +#include "ark_annotation_map.h" +#include "fe_manager.h" +#include "bc_util.h" +#include "fe_manager.h" +#include "ark_annotation_processor.h" +#include "rc_setter.h" + +namespace maple { +namespace bc { +// ---------- DexBCAnnotationElement ---------- +MIRPragmaElement *DexBCAnnotationElement::ProcessAnnotationElement(const uint8 **data) { + uint32 nameIdx = iDexFile.ReadUnsignedLeb128(data); + const char *elemNameFieldOrig = iDexFile.GetStringByIndex(nameIdx); + const std::string elemNameField = namemangler::EncodeName(elemNameFieldOrig); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(elemNameField); + MIRPragmaElement *element = mp.New(module); + element->SetNameStrIdx(strIdx); + // Process encoded value. + uint64 dataVal = *(*data)++; + uint8 valueArg = static_cast(dataVal >> 5); + uint8 valueType = dataVal & 0x1f; + PragmaValueType pragmaValueType = static_cast(valueType); + element->SetType(pragmaValueType); + MIRConst *cst = nullptr; + ProcessAnnotationEncodedValue(data, *element, pragmaValueType, valueArg, cst); + return element; +} + +void DexBCAnnotationElement::ProcessAnnotationEncodedValue(const uint8 **data, MIRPragmaElement &element, + MIRConst *&cst) { + uint64 dataVal = *(*data)++; + ProcessAnnotationEncodedValue(data, element, static_cast(dataVal & 0x1f), + static_cast(dataVal >> 5), cst); +} + +void DexBCAnnotationElement::ProcessAnnotationEncodedValue(const uint8 **data, MIRPragmaElement &element, + PragmaValueType valueType, uint8 valueArg, MIRConst *&cst) { + element.SetType(valueType); + uint64 val = 0; + cst = mp.New(0, *GlobalTables::GetTypeTable().GetInt32()); + switch (valueType) { + case kValueByte: { + val = GetUVal(data, valueArg); + element.SetU64Val(val); + cst = mp.New(val, *GlobalTables::GetTypeTable().GetInt8()); + break; + } + case kValueShort: { + cst = ProcessAnnotationEncodedValueInternalProcessIntValue(data, element, valueArg, + *GlobalTables::GetTypeTable().GetInt16()); + break; + } + case kValueChar: { + val = GetUVal(data, valueArg); + cst = mp.New(val, *GlobalTables::GetTypeTable().GetUInt16()); + element.SetU64Val(val); + break; + } + case kValueInt: { + cst = ProcessAnnotationEncodedValueInternalProcessIntValue(data, element, valueArg, + *GlobalTables::GetTypeTable().GetInt32()); + break; + } + case kValueLong: { + cst = ProcessAnnotationEncodedValueInternalProcessIntValue(data, element, valueArg, + *GlobalTables::GetTypeTable().GetInt64()); + break; + } + case kValueFloat: { + val = GetUVal(data, valueArg); + // fill 0s for least significant bits + element.SetU64Val(val << ((3 - valueArg) << 3)); + cst = mp.New(element.GetFloatVal(), *GlobalTables::GetTypeTable().GetFloat()); + break; + } + case kValueDouble: { + val = GetUVal(data, valueArg); + // fill 0s for least significant bits + element.SetU64Val(val << ((7 - valueArg) << 3)); + cst = mp.New(element.GetDoubleVal(), *GlobalTables::GetTypeTable().GetDouble()); + break; + } + case kValueString: { + cst = ProcessAnnotationEncodedValueInternalProcessStringValue(data, element, valueArg); + break; + } + case kValueMethodType: { + element.SetU64Val(0xdeadbeef); + break; + } + case kValueType: { + ProcessAnnotationEncodedValueInternalProcessTypeValue(data, element, valueArg); + break; + } + case kValueMethodHandle: { + element.SetU64Val(0xdeadbeef); + break; + } + case kValueEnum: + // should fallthrough + [[fallthrough]]; + case kValueField: { + ProcessAnnotationEncodedValueInternalProcessFieldValue(data, element, valueArg); + break; + } + case kValueMethod: { + ProcessAnnotationEncodedValueInternalProcessMethodValue(data, element, valueArg); + break; + } + case kValueArray: { + CHECK_FATAL(!valueArg, "value_arg != 0"); + cst = ProcessAnnotationEncodedValueInternalProcessArrayValue(data, element); + break; + } + case kValueAnnotation: { + CHECK_FATAL(!valueArg, "value_arg != 0"); + ProcessAnnotationEncodedValueInternalProcessAnnotationValue(data, element); + break; + } + case kValueNull: { + CHECK_FATAL(!valueArg, "value_arg != 0"); + element.SetU64Val(0); + cst = mp.New(0, *GlobalTables::GetTypeTable().GetInt8()); + break; + } + case kValueBoolean: { + element.SetU64Val(valueArg); + cst = mp.New(valueArg, *GlobalTables::GetTypeTable().GetInt8()); + break; + } + default: { + break; + } + } +} + +MIRIntConst *DexBCAnnotationElement::ProcessAnnotationEncodedValueInternalProcessIntValue(const uint8 **data, + MIRPragmaElement &element, + uint8 valueArg, + MIRType &type) { + // sign extended val + uint64 val = GetUVal(data, valueArg); + uint32 shiftBit = static_cast((7 - valueArg) * 8); + CHECK_FATAL(valueArg <= 7, "shiftBit positive check"); + uint64 sVal = (static_cast(val) << shiftBit) >> shiftBit; + element.SetI64Val(static_cast(sVal)); + MIRIntConst *intCst = mp.New(sVal, type); + return intCst; +} + +MIRStr16Const *DexBCAnnotationElement::ProcessAnnotationEncodedValueInternalProcessStringValue(const uint8 **data, + MIRPragmaElement &ele, + uint8 valueArg) { + uint64 val = GetUVal(data, valueArg); + std::string str = namemangler::GetOriginalNameLiteral(iDexFile.GetStringByIndex(static_cast(val))); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + ele.SetU64Val(static_cast(strIdx)); + std::u16string str16; + (void)namemangler::UTF8ToUTF16(str16, str); + MIRStr16Const *strCst = mp.New(str16, *GlobalTables::GetTypeTable().GetPtr()); + return strCst; +} + +void DexBCAnnotationElement::ProcessAnnotationEncodedValueInternalProcessTypeValue(const uint8 **data, + MIRPragmaElement &element, + uint8 valueArg) { + uint64 val = GetUVal(data, valueArg); + std::string str = iDexFile.GetStringByTypeIndex(static_cast(val)); + const std::string name = namemangler::EncodeName(str); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + element.SetU64Val(static_cast(strIdx)); +} + +void DexBCAnnotationElement::ProcessAnnotationEncodedValueInternalProcessFieldValue(const uint8 **data, + MIRPragmaElement &element, + uint8 valueArg) { + uint64 val = GetUVal(data, valueArg); + const IDexFieldIdItem *fieldID = iDexFile.GetFieldIdItem(static_cast(val)); + ASSERT_NOT_NULL(fieldID); + std::string str = fieldID->GetShortFieldName(iDexFile); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + element.SetU64Val(static_cast(strIdx)); +} + +void DexBCAnnotationElement::ProcessAnnotationEncodedValueInternalProcessMethodValue(const uint8 **data, + MIRPragmaElement &element, + uint8 valueArg) { + uint64 val = GetUVal(data, valueArg); + const IDexMethodIdItem *methodID = iDexFile.GetMethodIdItem(static_cast(val)); + ASSERT_NOT_NULL(methodID); + std::string fullName = std::string(methodID->GetDefiningClassName(iDexFile)) + "|" + + methodID->GetShortMethodName(iDexFile) + "|" + + methodID->GetDescriptor(iDexFile); + std::string funcName = namemangler::EncodeName(fullName); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + element.SetU64Val(static_cast(strIdx)); +} + +MIRAggConst *DexBCAnnotationElement::ProcessAnnotationEncodedValueInternalProcessArrayValue(const uint8 **data, + MIRPragmaElement &element) { + unsigned arraySize = iDexFile.ReadUnsignedLeb128(data); + uint64 dataVal = *(*data); + uint8 newValueType = dataVal & 0x1f; + MIRType *elemType = GetTypeFromValueType(static_cast(newValueType)); + MIRType *arrayTypeWithSize = GlobalTables::GetTypeTable().GetOrCreateArrayType(*elemType, 1, &arraySize); + MIRAggConst *aggCst = module.GetMemPool()->New(module, *arrayTypeWithSize); + for (unsigned int i = 0; i < arraySize; ++i) { + MIRPragmaElement *subElement = mp.New(module); + MIRConst *mirConst = nullptr; + ProcessAnnotationEncodedValue(data, *subElement, mirConst); + element.SubElemVecPushBack(subElement); + aggCst->PushBack(mirConst); + } + return aggCst; +} + +void DexBCAnnotationElement::ProcessAnnotationEncodedValueInternalProcessAnnotationValue(const uint8 **data, + MIRPragmaElement &element) { + unsigned typeIdx = iDexFile.ReadUnsignedLeb128(data); + std::string str = iDexFile.GetStringByTypeIndex(typeIdx); + const std::string name = namemangler::EncodeName(str); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + element.SetTypeStrIdx(strIdx); + unsigned annoSize = iDexFile.ReadUnsignedLeb128(data); + MIRConst *mirConst = nullptr; + for (unsigned int i = 0; i < annoSize; ++i) { + MIRPragmaElement *subElement = mp.New(module); + unsigned nameIdx = iDexFile.ReadUnsignedLeb128(data); + str = iDexFile.GetStringByIndex(nameIdx); + strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + subElement->SetNameStrIdx(strIdx); + ProcessAnnotationEncodedValue(data, *subElement, mirConst); + element.SubElemVecPushBack(subElement); + } +} + +MIRType *DexBCAnnotationElement::GetTypeFromValueType(PragmaValueType valueType) { + switch (valueType) { + case kValueBoolean: + return GlobalTables::GetTypeTable().GetInt8(); + case kValueByte: + return GlobalTables::GetTypeTable().GetInt8(); + case kValueShort: + return GlobalTables::GetTypeTable().GetInt16(); + case kValueChar: + return GlobalTables::GetTypeTable().GetUInt16(); + case kValueInt: + return GlobalTables::GetTypeTable().GetInt32(); + case kValueLong: + return GlobalTables::GetTypeTable().GetInt64(); + case kValueFloat: + return GlobalTables::GetTypeTable().GetFloat(); + case kValueDouble: + return GlobalTables::GetTypeTable().GetDouble(); + default: + return GlobalTables::GetTypeTable().GetPtr(); + } +} + +// ---------- DexBCAnnotation ---------- +MIRPragma *DexBCAnnotation::EmitPragma(PragmaKind kind, const GStrIdx &pragIdx, int32 paramNum, const TyIdx &tyIdxEx) { + uint8 visibility = iDexAnnotation->GetVisibility(); + const uint8 *annotationData = iDexAnnotation->GetAnnotationData(); + uint32 typeIdx = iDexFile.ReadUnsignedLeb128(&annotationData); + std::string className = namemangler::EncodeName(iDexFile.GetStringByTypeIndex(typeIdx)); + className = ArkAnnotationMap::GetArkAnnotationMap().GetAnnotationTypeName(className); + bool isCreate = false; + MIRStructType *structType = + FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType(className, false, FETypeFlag::kSrcUnknown, isCreate); + MIRPragma *pragma = mp.New(module); + pragma->SetKind(kind); + pragma->SetStrIdx(pragIdx); + pragma->SetTyIdx(structType->GetTypeIndex()); + pragma->SetTyIdxEx(tyIdxEx); + pragma->SetVisibility(visibility); + pragma->SetParamNum(paramNum); + uint32 size = iDexFile.ReadUnsignedLeb128(&annotationData); + while (size-- != 0) { + DexBCAnnotationElement dexBCAnnotationElement(module, mp, iDexFile, &annotationData); + MIRPragmaElement *element = dexBCAnnotationElement.EmitPragmaElement(); + pragma->PushElementVector(element); + } + return pragma; +} + +// ---------- DexBCAnnotationSet ---------- +void DexBCAnnotationSet::Init() { + std::vector iDexAnnotations; + iDexAnnotationSet->GetAnnotations(iDexFile, iDexAnnotations); + for (uint32 i = 0; i < iDexAnnotations.size(); i++) { + const IDexAnnotation *iDexAnnotation = iDexAnnotations[i]; + std::unique_ptr annotation = std::make_unique(module, mp, iDexFile, + iDexAnnotation); + annotations.push_back(std::move(annotation)); + } +} + +std::vector &DexBCAnnotationSet::EmitPragmas(PragmaKind kind, const GStrIdx &pragIdx, int32 paramNum, + const TyIdx &tyIdxEx) { + pragmas.clear(); + for (const std::unique_ptr &annotation : annotations) { + MIRPragma *pragma = annotation->EmitPragma(kind, pragIdx, paramNum, tyIdxEx); + pragmas.push_back(pragma); + } + return pragmas; +} + +// ---------- DexBCAnnotationSetList ---------- +void DexBCAnnotationSetList::Init() { + std::vector iDexAnnotationSets; + iDexAnnotationSetList->GetAnnotationSets(iDexFile, iDexAnnotationSets); + for (uint32 i = 0; i < iDexAnnotationSets.size(); i++) { + const IDexAnnotationSet *iDexAnnotationSet = iDexAnnotationSets[i]; + std::unique_ptr annotationSet = std::make_unique(module, mp, iDexFile, + iDexAnnotationSet); + annotationSets.push_back(std::move(annotationSet)); + } +} + +std::vector &DexBCAnnotationSetList::EmitPragmas(PragmaKind kind, const GStrIdx &pragIdx) { + pragmas.clear(); + for (uint32 i = 0; i < annotationSets.size(); i++) { + const std::unique_ptr &annotationSet = annotationSets[i]; + if (annotationSet->IsValid()) { + std::vector &innerPragmas = annotationSet->EmitPragmas(kind, pragIdx, static_cast(i)); + for (MIRPragma *pragma : innerPragmas) { + pragmas.push_back(pragma); + } + } + } + return pragmas; +} + +// ---------- DexBCFieldAnnotations ---------- +void DexBCFieldAnnotations::Init() { + const IDexAnnotationSet *iDexAnnotationSet = iDexFieldAnnotations->GetAnnotationSet(iDexFile); + annotationSet = std::make_unique(module, mp, iDexFile, iDexAnnotationSet); +} + +std::vector &DexBCFieldAnnotations::EmitPragmas() { + const IDexFieldIdItem *fieldID = iDexFieldAnnotations->GetFieldIdItem(iDexFile); + const std::string &fieldTypeName = namemangler::EncodeName(fieldID->GetFieldTypeName(iDexFile)); + MIRType *fieldType = FEManager::GetTypeManager().GetOrCreateTypeFromName(fieldTypeName, FETypeFlag::kSrcUnknown, + true); + CHECK_NULL_FATAL(fieldType); + uint64 mapIdx = (static_cast(iDexFile.GetFileIdx()) << 32) | iDexFieldAnnotations->GetFieldIdx(); + StructElemNameIdx *structElemNameIdx = FEManager::GetManager().GetFieldStructElemNameIdx(mapIdx); + ASSERT(structElemNameIdx != nullptr, "structElemNameIdx is nullptr."); + std::vector &pragmas = + annotationSet->EmitPragmas(kPragmaVar, structElemNameIdx->elem, -1, fieldType->GetTypeIndex()); + if (FEOptions::GetInstance().IsRC()) { + RCSetter::GetRCSetter().ProcessFieldRCAnnotation(*structElemNameIdx, *fieldType, pragmas); + } + return pragmas; +} + +// ---------- DexBCMethodAnnotations ---------- +void DexBCMethodAnnotations::Init() { + const IDexAnnotationSet *iDexAnnotationSet = iDexMethodAnnotations->GetAnnotationSet(iDexFile); + annotationSet = std::make_unique(module, mp, iDexFile, iDexAnnotationSet); +} + +std::vector &DexBCMethodAnnotations::EmitPragmas() { + methodID = iDexMethodAnnotations->GetMethodIdItem(iDexFile); + uint64 mapIdx = (static_cast(iDexFile.GetFileIdx()) << 32) | iDexMethodAnnotations->GetMethodIdx(); + StructElemNameIdx *structElemNameIdx = FEManager::GetManager().GetMethodStructElemNameIdx(mapIdx); + ASSERT(structElemNameIdx != nullptr, "structElemNameIdx is nullptr."); + methodFullNameStrIdx = structElemNameIdx->full; + pragmasPtr = &(annotationSet->EmitPragmas(kPragmaFunc, methodFullNameStrIdx)); + if (pragmasPtr->size() > 0) { + SetupFuncAttrs(); + } + return *pragmasPtr; +} + +void DexBCMethodAnnotations::SetupFuncAttrs() { + MIRFunction *func = GetMIRFunction(methodFullNameStrIdx); + CHECK_NULL_FATAL(func); + for (MIRPragma *pragma : *pragmasPtr) { + SetupFuncAttrWithPragma(*func, *pragma); + } +} + +void DexBCMethodAnnotations::SetupFuncAttrWithPragma(MIRFunction &mirFunc, const MIRPragma &pragma) { + FuncAttrKind attr; + bool isAttrSet = true; + if (ArkAnnotation::GetInstance().IsFastNative(pragma.GetTyIdx())) { + attr = FUNCATTR_fast_native; + } else if (ArkAnnotation::GetInstance().IsCriticalNative(pragma.GetTyIdx())) { + attr = FUNCATTR_critical_native; + } else if (ArkAnnotation::GetInstance().IsCallerSensitive(pragma.GetTyIdx())) { + attr = FUNCATTR_callersensitive; + } else if (FEOptions::GetInstance().IsRC() && + (ArkAnnotation::GetInstance().IsRCUnownedLocal(pragma.GetTyIdx()) || + (ArkAnnotation::GetInstance().IsRCUnownedLocalOld(pragma.GetTyIdx()) && + pragma.GetElementVector().empty()))) { + attr = FUNCATTR_rclocalunowned; + RCSetter::GetRCSetter().CollectUnownedLocalFuncs(&mirFunc); + } else { + isAttrSet = false; // empty, for codedex cleanup + } + const char *definingClassName = methodID->GetDefiningClassName(iDexFile); + std::string mplClassName = namemangler::EncodeName(definingClassName); + MIRStructType *currStructType = FEManager::GetTypeManager().GetStructTypeFromName(mplClassName); + if (isAttrSet) { + mirFunc.SetAttr(attr); + // update method attribute in structure type as well + for (auto &mit : currStructType->GetMethods()) { + if (mit.first == mirFunc.GetStIdx()) { + mit.second.second.SetAttr(attr); + break; + } + } + } + if (FEOptions::GetInstance().IsRC()) { + RCSetter::GetRCSetter().ProcessMethodRCAnnotation(mirFunc, mplClassName, *currStructType, pragma); + } +} + +MIRFunction *DexBCMethodAnnotations::GetMIRFunction(const GStrIdx &nameIdx) const { + MIRFunction *func = nullptr; + bool isStatic = true; + func = FEManager::GetTypeManager().GetMIRFunction(nameIdx, isStatic); + if (func != nullptr) { + return func; + } + isStatic = false; + func = FEManager::GetTypeManager().GetMIRFunction(nameIdx, isStatic); + return func; +} + +// ---------- DexBCParameterAnnotations ---------- +void DexBCParameterAnnotations::Init() { + if (iDexParameterAnnotations == nullptr) { + return; + } + const IDexAnnotationSetList *iDexAnnotationSetList = iDexParameterAnnotations->GetAnnotationSetList(iDexFile); + if (iDexAnnotationSetList == nullptr) { + return; + } + annotationSetList = std::make_unique(module, mp, iDexFile, iDexAnnotationSetList); +} + +std::vector &DexBCParameterAnnotations::EmitPragmas() { + pragmas.clear(); + if (annotationSetList == nullptr) { + return pragmas; + } + uint64 mapIdx = (static_cast(iDexFile.GetFileIdx()) << 32) | iDexParameterAnnotations->GetMethodIdx(); + StructElemNameIdx *structElemNameIdx = FEManager::GetManager().GetMethodStructElemNameIdx(mapIdx); + ASSERT(structElemNameIdx != nullptr, "structElemNameIdx is nullptr."); + return annotationSetList->EmitPragmas(kPragmaParam, structElemNameIdx->full); +} + +// ---------- DexBCAnnotationsDirectory ---------- +void DexBCAnnotationsDirectory::Init() { + if (iDexAnnotationsDirectory == nullptr) { + return; + } + if (iDexAnnotationsDirectory->HasClassAnnotationSet(iDexFile)) { + classAnnotationSet = std::make_unique(module, mp, iDexFile, + iDexAnnotationsDirectory->GetClassAnnotationSet(iDexFile)); + } + if (iDexAnnotationsDirectory->HasFieldAnnotationsItems(iDexFile)) { + std::vector iDexFieldAnnotationsItems; + iDexAnnotationsDirectory->GetFieldAnnotationsItems(iDexFile, iDexFieldAnnotationsItems); + for (const IDexFieldAnnotations* iDexFieldAnnotations : iDexFieldAnnotationsItems) { + std::unique_ptr fieldAnnotations = + std::make_unique(module, mp, iDexFile, iDexFieldAnnotations); + fieldAnnotationsItems.push_back(std::move(fieldAnnotations)); + } + } + if (iDexAnnotationsDirectory->HasMethodAnnotationsItems(iDexFile)) { + std::vector iDexMethodAnnotationsItems; + iDexAnnotationsDirectory->GetMethodAnnotationsItems(iDexFile, iDexMethodAnnotationsItems); + for (const IDexMethodAnnotations *iDexMethodAnnotations : iDexMethodAnnotationsItems) { + std::unique_ptr methodAnnotations = + std::make_unique(module, mp, iDexFile, iDexMethodAnnotations); + methodAnnotationsItems.push_back(std::move(methodAnnotations)); + } + } + if (iDexAnnotationsDirectory->HasParameterAnnotationsItems(iDexFile)) { + std::vector iDexParameterAnnotationsItems; + iDexAnnotationsDirectory->GetParameterAnnotationsItems(iDexFile, iDexParameterAnnotationsItems); + for (const IDexParameterAnnotations *iDexParameterAnnotations : iDexParameterAnnotationsItems) { + std::unique_ptr parameterAnnotations = + std::make_unique(module, mp, iDexFile, iDexParameterAnnotations); + parameterAnnotationsItems.push_back(std::move(parameterAnnotations)); + } + } +} + +std::vector &DexBCAnnotationsDirectory::EmitPragmasImpl() { + pragmas.clear(); + if (iDexAnnotationsDirectory == nullptr) { + return pragmas; + } + std::vector pragmasInner; + if (classAnnotationSet != nullptr) { + const GStrIdx &strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(className)); + pragmasInner = classAnnotationSet->EmitPragmas(kPragmaClass, strIdx); + if (FEOptions::GetInstance().IsRC()) { + RCSetter::GetRCSetter().ProcessClassRCAnnotation(strIdx, pragmasInner); + } + pragmas.insert(pragmas.end(), pragmasInner.begin(), pragmasInner.end()); + } + for (const std::unique_ptr &fieldAnnotations : fieldAnnotationsItems) { + pragmasInner = fieldAnnotations->EmitPragmas(); + pragmas.insert(pragmas.end(), pragmasInner.begin(), pragmasInner.end()); + } + for (const std::unique_ptr &methodAnnotations : methodAnnotationsItems) { + pragmasInner = methodAnnotations->EmitPragmas(); + pragmas.insert(pragmas.end(), pragmasInner.begin(), pragmasInner.end()); + } + for (const std::unique_ptr ¶meterAnnotations : parameterAnnotationsItems) { + pragmasInner = parameterAnnotations->EmitPragmas(); + pragmas.insert(pragmas.end(), pragmasInner.begin(), pragmasInner.end()); + } + return pragmas; +} +} // namespace bc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_reader.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_reader.cpp new file mode 100644 index 0000000000000000000000000000000000000000..88d62d251fc24cd62f2c2311eadd01bfdf8909b3 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dex_reader.cpp @@ -0,0 +1,406 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dex_reader.h" +#include "dexfile_factory.h" +#include "mpl_logging.h" +#include "dex_file_util.h" +#include "dex_op.h" +#include "dex_class.h" +#include "fe_utils_java.h" + +namespace maple { +namespace bc { +bool DexReader::OpenAndMapImpl() { + DexFileFactory dexFileFactory; + iDexFile = dexFileFactory.NewInstance(); + bool openResult = iDexFile->Open(fileName); + if (!openResult) { + ERR(kLncErr, "Failed to open dex file %s", fileName.c_str()); + return false; + } + iDexFile->SetFileIdx(fileIdx); + irSrcFileSignature = iDexFile->GetHeader()->GetSignature(); + return true; +} + +void DexReader::SetDexFile(std::unique_ptr iDexFileIn) { + iDexFile = std::move(iDexFileIn); +} + +uint32 DexReader::GetClassItemsSize() const { + return iDexFile->GetClassItemsSize(); +} + +const char *DexReader::GetClassJavaSourceFileName(uint32 classIdx) const { + return iDexFile->GetClassItem(classIdx)->GetJavaSourceFileName(GetIDexFile()); +} + +bool DexReader::IsInterface(uint32 classIdx) const { + return iDexFile->GetClassItem(classIdx)->IsInterface(); +} + +uint32 DexReader::GetClassAccFlag(uint32 classIdx) const { + return iDexFile->GetClassItem(classIdx)->GetAccessFlags(); +} + +std::string DexReader::GetClassName(uint32 classIdx) const { + const char *className = iDexFile->GetClassItem(classIdx)->GetClassName(GetIDexFile()); + CHECK_FATAL(className != nullptr, "class name of classItem: %u is empty.", classIdx); + return className; +} + +std::list DexReader::GetSuperClasses(uint32 classIdx, bool mapled) const { + const char *super = iDexFile->GetClassItem(classIdx)->GetSuperClassName(GetIDexFile()); + std::list superClasses; + if (super != nullptr) { + superClasses.emplace_back(mapled ? namemangler::EncodeName(super) : super); + } + return superClasses; +} + +std::vector DexReader::GetClassInterfaceNames(uint32 classIdx, bool mapled) const { + std::vector names; + iDexFile->GetClassItem(classIdx)->GetInterfaceNames(GetIDexFile(), names); + std::vector ret; + for (auto name : names) { + if (name != nullptr) { + ret.push_back(mapled ? namemangler::EncodeName(name) : name); + } + } + return ret; +} + +std::string DexReader::GetClassFieldName(uint32 fieldIdx, bool mapled) const { + const IDexFieldIdItem *fieldIdItem = iDexFile->GetFieldIdItem(fieldIdx); + const char *name = iDexFile->GetStringByIndex(fieldIdItem->GetNameIdx()); + return (mapled && name != nullptr) ? namemangler::EncodeName(name) : (name != nullptr ? name : ""); +} + +std::string DexReader::GetClassFieldTypeName(uint32 fieldIdx, bool mapled) const { + const IDexFieldIdItem *fieldIdItem = iDexFile->GetFieldIdItem(fieldIdx); + const char *name = fieldIdItem->GetFieldTypeName(GetIDexFile()); + return (mapled && name != nullptr) ? namemangler::EncodeName(name) : (name != nullptr ? name : ""); +} + +std::string DexReader::GetClassMethodName(uint32 methodIdx, bool mapled) const { + const IDexMethodIdItem *methodIdItem = iDexFile->GetMethodIdItem(methodIdx); + const char *name = iDexFile->GetStringByIndex(methodIdItem->GetNameIdx()); + return (mapled && name != nullptr) ? namemangler::EncodeName(name) : (name != nullptr ? name : ""); +} + +std::string DexReader::GetClassMethodDescName(uint32 methodIdx, bool mapled) const { + const IDexMethodIdItem *methodIdItem = iDexFile->GetMethodIdItem(methodIdx); + const std::string name = iDexFile->GetProtoIdItem(methodIdItem->GetProtoIdx())->GetDescriptor(GetIDexFile()); + return mapled ? namemangler::EncodeName(name) : name; +} + +std::string DexReader::GetStringFromIdxImpl(uint32 idx) const { + const char *str = iDexFile->GetStringByIndex(idx); + return str == nullptr ? "" : str; +} + +std::string DexReader::GetTypeNameFromIdxImpl(uint32 idx) const { + const char *str = iDexFile->GetStringByTypeIndex(idx); + return str == nullptr ? "" : str; +} + +BCReader::ClassElem DexReader::GetClassFieldFromIdxImpl(uint32 idx) const { + ClassElem elem; + const char *name = iDexFile->GetFieldIdItem(idx)->GetDefiningClassName(GetIDexFile()); + CHECK_FATAL(name != nullptr, "Failed: field class name is empty."); + elem.className = name; + name = iDexFile->GetFieldIdItem(idx)->GetShortFieldName(GetIDexFile()); + CHECK_FATAL(name != nullptr, "Failed: field name is empty."); + elem.elemName = name; + name = iDexFile->GetFieldIdItem(idx)->GetFieldTypeName(GetIDexFile()); + CHECK_FATAL(name != nullptr, "Failed: field type name is empty."); + elem.typeName = name; + return elem; +} + +std::unordered_map DexReader::GetDefiningClassNameTypeIdMap() const { + return iDexFile->GetDefiningClassNameTypeIdMap(); +} + +const uint16 *DexReader::GetMethodInstOffset(const IDexMethodItem* dexMethodItem) const { + return dexMethodItem->GetInsnsByOffset(*iDexFile, 0); +} + +uint16 DexReader::GetClassMethodRegisterTotalSize(const IDexMethodItem* dexMethodItem) const { + return dexMethodItem->GetRegistersSize(*iDexFile); +} + +uint16 DexReader::GetClassMethodRegisterInSize(const IDexMethodItem* dexMethodItem) const { + return dexMethodItem->GetInsSize(*iDexFile); +} + +uint32 DexReader::GetCodeOff(const IDexMethodItem* dexMethodItem) const { + return dexMethodItem->GetCodeOff(); +} + +BCReader::ClassElem DexReader::GetClassMethodFromIdxImpl(uint32 idx) const { + ClassElem elem; + const char *name = iDexFile->GetMethodIdItem(idx)->GetDefiningClassName(GetIDexFile()); + CHECK_FATAL(name != nullptr, "Failed: field class name is empty."); + elem.className = name; + name = iDexFile->GetMethodIdItem(idx)->GetShortMethodName(GetIDexFile()); + CHECK_FATAL(name != nullptr, "Failed: field name is empty."); + elem.elemName = name; + elem.typeName = iDexFile->GetMethodIdItem(idx)->GetDescriptor(GetIDexFile()); + return elem; +} + +std::string DexReader::GetSignatureImpl(uint32 idx) const { + return iDexFile->GetProtoIdItem(idx)->GetDescriptor(GetIDexFile()); +} + +uint32 DexReader::GetFileIndexImpl() const { + return iDexFile->GetFileIdx(); +} + +void DexReader::ResovleSrcPositionInfo( + const IDexMethodItem* dexMethodItem, + std::map &srcPosInfo) const { + dexMethodItem->GetSrcPositionInfo(GetIDexFile(), srcPosInfo); +} + + +std::unique_ptr DexReader::ResovleSrcLocalInfo(const IDexMethodItem &dexMethodItem) const { + auto srcLocals = std::make_unique(); + dexMethodItem.GetSrcLocalInfo(GetIDexFile(), *srcLocals); + return srcLocals; +} + +MapleMap *DexReader::ResolveInstructions( + MapleAllocator &allocator, const IDexMethodItem* dexMethodItem, bool mapled) const { + (void) mapled; + std::map> pcInstMap; + dexMethodItem->GetPCInstructionMap(GetIDexFile(), pcInstMap); + return ConstructBCPCInstructionMap(allocator, pcInstMap); +} + +MapleMap *DexReader::ConstructBCPCInstructionMap( + MapleAllocator &allocator, const std::map> &pcInstMap) const { + MapleMap *pcBCInstMap = + allocator.GetMemPool()->New>(allocator.Adapter()); + for (const auto &pcInst : pcInstMap) { + pcBCInstMap->emplace(pcInst.first, ConstructBCInstruction(allocator, pcInst)); + } + return pcBCInstMap; +} + +BCInstruction *DexReader::ConstructBCInstruction( + MapleAllocator &allocator, const std::pair> &p) const { + auto dexOp = + static_cast(dexOpGeneratorMap[static_cast(p.second.get()->GetOpcode())](allocator, p.first)); + dexOp->SetVA(GetVA(p.second.get())); + dexOp->SetVB(GetVB(p.second.get())); + dexOp->SetWideVB(GetWideVB(p.second.get())); + MapleList vRegs(allocator.Adapter()); + GetArgVRegs(p.second.get(), vRegs); + dexOp->SetArgs(vRegs); + dexOp->SetVC(GetVC(p.second.get())); + dexOp->SetVH(GetVH(p.second.get())); + dexOp->SetWidth(GetWidth(p.second.get())); + dexOp->SetOpName(GetOpName(p.second.get())); + return dexOp; +} + +uint32 DexReader::GetVA(const IDexInstruction *inst) const { + return inst->GetVRegA(); +} + +uint32 DexReader::GetVB(const IDexInstruction *inst) const { + return inst->GetVRegB(); +} + +uint64 DexReader::GetWideVB(const IDexInstruction *inst) const { + return inst->GetVRegBWide(); +} + +uint32 DexReader::GetVC(const IDexInstruction *inst) const { + return inst->GetVRegC(); +} + +void DexReader::GetArgVRegs(const IDexInstruction *inst, MapleList &vRegs) const { + if (inst->GetOpcode() == kOpFilledNewArray || + (kOpInvokeVirtual <= inst->GetOpcode() && inst->GetOpcode() <= kOpInvokeInterface) || + inst->GetOpcode() == kOpInvokePolymorphic || + inst->GetOpcode() == kOpInvokeCustom) { + for (uint32 i = 0; i < inst->GetVRegA(); ++i) { + vRegs.push_back(inst->GetArg(i)); + } + } else if (inst->GetOpcode() == kOpFilledNewArrayRange || + (kOpInvokeVirtualRange <= inst->GetOpcode() && inst->GetOpcode() <= kOpInvokeInterfaceRange) || + inst->GetOpcode() == kOpInvokePolymorphicRange || + inst->GetOpcode() == kOpInvokeCustomRange) { + for (uint32 i = 0; i < inst->GetVRegA(); ++i) { + vRegs.push_back(inst->GetVRegC() + i); + } + } +} + +uint32 DexReader::GetVH(const IDexInstruction *inst) const { + return inst->GetVRegH(); +} + +uint8 DexReader::GetWidth(const IDexInstruction *inst) const { + return inst->GetWidth(); +} + +const char *DexReader::GetOpName(const IDexInstruction *inst) const { + return inst->GetOpcodeName(); +} + +std::unique_ptr>> DexReader::ResolveTryInfos( + const IDexMethodItem* dexMethodItem) const { + std::vector tryItems; + dexMethodItem->GetTryItems(GetIDexFile(), tryItems); + uint32 codeOff = dexMethodItem->GetCodeOff(); + return ConstructBCTryInfoList(codeOff, tryItems); +} + +std::unique_ptr>> DexReader::ConstructBCTryInfoList( + uint32 codeOff, const std::vector &tryItems) const { + auto tryInfos = + std::make_unique>>(); + std::list> info; + for (const auto *tryItem : tryItems) { + std::vector dexCatchItems; + tryItem->GetCatchHandlerItems(GetIDexFile(), codeOff, dexCatchItems); + std::unique_ptr tryInfo = std::make_unique( + tryItem->GetStartAddr(), + tryItem->GetEndAddr(), + ConstructBCCatchList(dexCatchItems)); + tryInfos->push_back(std::move(tryInfo)); + info.push_back(std::move(tryInfo)); + } + return tryInfos; +} + +std::unique_ptr>> DexReader::ConstructBCCatchList( + std::vector &catchHandlerItems) const { + std::unique_ptr>> catches = + std::make_unique>>(); + for (const auto catchHandlerItem : catchHandlerItems) { + // Use V (void) catch exceptions + GStrIdx exceptionNameIdx = catchHandlerItem.IsCatchAllHandlerType() ? BCUtil::GetVoidIdx() : + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName(iDexFile->GetStringByTypeIndex(catchHandlerItem.GetHandlerTypeIdx()))); + std::unique_ptr catchInfo = std::make_unique( + catchHandlerItem.GetHandlerAddress(), + exceptionNameIdx, + catchHandlerItem.IsCatchAllHandlerType()); + catches->push_back(std::move(catchInfo)); + } + return catches; +} + +bool DexReader::ReadAllDepTypeNames(std::unordered_set &depSet) { + for (uint32 i = 0; i < iDexFile->GetHeader()->GetTypeIdsSize(); ++i) { + std::string typeName = iDexFile->GetStringByTypeIndex(i); + std::string trimmedTypeName = BCUtil::TrimArrayModifier(typeName); + if (trimmedTypeName.size() != 0 && trimmedTypeName[0] == 'L') { + depSet.insert(trimmedTypeName); + } + } + return true; +} + +bool DexReader::ReadAllClassNames(std::unordered_set &classSet) const { + for (uint32 classIdx = 0; classIdx < iDexFile->GetClassItemsSize(); ++classIdx) { + const IDexClassItem *classItem = iDexFile->GetClassItem(classIdx); + classSet.emplace(classItem->GetClassName(GetIDexFile())); + } + return true; +} + +bool DexReader::ReadMethodDepTypeNames(std::unordered_set &depSet, + uint32 classIdx, uint32 methodItemx, bool isVirtual) const { + std::unique_ptr method; + if (isVirtual) { + method = iDexFile->GetClassItem(classIdx)->GetVirtualMethod(*iDexFile, methodItemx); + } else { + method = iDexFile->GetClassItem(classIdx)->GetDirectMethod(*iDexFile, methodItemx); + } + CHECK_NULL_FATAL(method.get()); + std::map> pcInstMap; + method->GetPCInstructionMap(GetIDexFile(), pcInstMap); + for (const auto &pcInst : pcInstMap) { + const std::unique_ptr &inst = pcInst.second; + IDexOpcode op = inst->GetOpcode(); + if (op >= kOpIget && op <= kOpIputShort) { + // field operation + const char *className = iDexFile->GetFieldIdItem(inst->GetVRegC())->GetDefiningClassName(GetIDexFile()); + CHECK_FATAL(className != nullptr, "field class name is empty."); + AddDepTypeName(depSet, className, false); + } else if (op >= kOpSget && op <= kOpSputShort) { + // field operation + const char *className = iDexFile->GetFieldIdItem(inst->GetVRegB())->GetDefiningClassName(GetIDexFile()); + CHECK_FATAL(className != nullptr, "field class name is empty."); + AddDepTypeName(depSet, className, false); + } else if (op >= kOpInvokeVirtual && op <= kOpInvokeInterfaceRange) { + // invoke inst + const IDexMethodIdItem *methodIdTmp = iDexFile->GetMethodIdItem(inst->GetVRegB()); + const char *className = methodIdTmp->GetDefiningClassName(GetIDexFile()); + CHECK_FATAL(className != nullptr, "method class name is empty."); + AddDepTypeName(depSet, className, true); + const std::string &typeName = methodIdTmp->GetDescriptor(GetIDexFile()); + std::vector retArgsTypeNames = FEUtilJava::SolveMethodSignature(typeName); + for (const std::string &item : retArgsTypeNames) { + AddDepTypeName(depSet, item, true); + } + } else if (op == kOpConstClass || op == kOpCheckCast || op == kOpFilledNewArray || op == kOpFilledNewArrayRange) { + AddDepTypeName(depSet, iDexFile->GetStringByTypeIndex(inst->GetVRegB()), true); + } else if (op == kOpInstanceOf || op == kOpNewArray) { + AddDepTypeName(depSet, iDexFile->GetStringByTypeIndex(inst->GetVRegC()), true); + } else if (op == kOpNewInstance) { + AddDepTypeName(depSet, iDexFile->GetStringByTypeIndex(inst->GetVRegB()), false); + } + ReadMethodTryCatchDepTypeNames(depSet, *method); + } + return true; +} + +void DexReader::ReadMethodTryCatchDepTypeNames(std::unordered_set &depSet, + const IDexMethodItem &method) const { + uint32 tryNum = method.GetTriesSize(GetIDexFile()); + if (tryNum == 0) { + return; + } + std::vector tryItems; + method.GetTryItems(GetIDexFile(), tryItems); + uint32 codeOff = method.GetCodeOff(); + for (const auto tryItem : tryItems) { + std::vector dexCatchItems; + tryItem->GetCatchHandlerItems(GetIDexFile(), codeOff, dexCatchItems); + for (const auto &handler: dexCatchItems) { + if (!handler.IsCatchAllHandlerType()) { + uint32 typeIdx = handler.GetHandlerTypeIdx(); + AddDepTypeName(depSet, iDexFile->GetStringByTypeIndex(typeIdx), false); + } + } + } +} + +void DexReader::AddDepTypeName(std::unordered_set &depSet, + const std::string &typeName, bool isTrim) const { + const std::string &trimmedTypeName = isTrim ? BCUtil::TrimArrayModifier(typeName) : typeName; + if (trimmedTypeName.size() != 0 && trimmedTypeName[0] == 'L') { + depSet.insert(trimmedTypeName); + } +} +} // namespace bc +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/dex/src/dex_strfac.cpp b/src/hir2mpl/bytecode_input/dex/src/dex_strfac.cpp new file mode 100644 index 0000000000000000000000000000000000000000..87aa86bc7c32e3ddd3a54fc93db10a273af9285b --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dex_strfac.cpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dex_strfac.h" +#include "namemangler.h" + +namespace maple { +std::string DexStrFactory::GetStringFactoryFuncname(const std::string &funcName) { +#define STR_STRFAC_MAP2(N1, N2) \ + if (funcName.compare(N1) == 0) { \ + return N2; \ + } +#include "dex_strfac_map.def" +#undef STR_STRFAC_MAP2 + return ""; +} + +bool DexStrFactory::IsStringInit(const std::string &funcName) { + const std::string &mplName = "Ljava/lang/String;|"; + if (funcName.compare(0, mplName.length(), mplName) == 0) { + return true; + } + return false; +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/bytecode_input/dex/src/dexfile_factory.cpp b/src/hir2mpl/bytecode_input/dex/src/dexfile_factory.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6d292b5d3493e58a12558d7d983e957b9c4eba8d --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dexfile_factory.cpp @@ -0,0 +1,22 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dexfile_factory.h" +#include "dexfile_libdexfile.h" + +namespace maple { +std::unique_ptr DexFileFactory::NewInstance() const { + return std::make_unique(); +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/dex/src/dexfile_interface.cpp b/src/hir2mpl/bytecode_input/dex/src/dexfile_interface.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3027b0ce7182ba0c657d0a355e66b1dc1ab9fce6 --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dexfile_interface.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dexfile_interface.h" +#include "mpl_logging.h" + +namespace maple{ +void ResolvedMethodType::SignatureTypes(const std::string &mt, std::list &types) { + // three pointers linear scan algo + size_t startPos = 1; // pos 0 should be '(' + size_t currentPos = startPos; + size_t endPos = mt.find(")"); + CHECK_FATAL(endPos != std::string::npos, "(ToIDEUser)Invalid string format: %s", mt.c_str()); + CHECK_FATAL(startPos <= endPos, "(ToIDEUser)Invalid string format: %s", mt.c_str()); + while (startPos < endPos) { + switch (mt[currentPos]) { + case 'I': + case 'Z': + case 'B': + case 'C': + case 'V': + case 'S': + case 'J': + case 'F': + case 'D': { + types.push_back(namemangler::EncodeName(mt.substr(startPos, currentPos - startPos + 1))); + ++currentPos; + break; + } + case '[': + ++currentPos; + continue; + case 'L': + while (mt[currentPos++] != ';') {} // empty + types.push_back(namemangler::EncodeName(mt.substr(startPos, currentPos - startPos))); + break; + default: + std::cerr << "ResolvedMethodType: catastrophic error" << std::endl; + break; + } + startPos = currentPos; + } +} + +std::string ResolvedMethodType::SignatureReturnType(const std::string &mt) { + size_t endPos = mt.find(")"); + return namemangler::EncodeName(mt.substr(endPos + 1)); +} +} // namespace maple diff --git a/src/hir2mpl/bytecode_input/dex/src/dexfile_libdexfile.cpp b/src/hir2mpl/bytecode_input/dex/src/dexfile_libdexfile.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4f5d067a1092577503c3f69fe54b814f81f0173e --- /dev/null +++ b/src/hir2mpl/bytecode_input/dex/src/dexfile_libdexfile.cpp @@ -0,0 +1,1704 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dexfile_libdexfile.h" +#include +#include +#include +#include +#include +#include +#ifdef DARWIN +#include +#else +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "securec.h" + +std::ostream &art::operator<<(std::ostream &os, const art::Instruction::Format &format) { + switch (format) { + case art::Instruction::k10x: { + os << "k10x"; + break; + } + case art::Instruction::k12x: { + os << "k12x"; + break; + } + case art::Instruction::k11n: { + os << "k11n"; + break; + } + case art::Instruction::k11x: { + os << "k11x"; + break; + } + case art::Instruction::k10t: { + os << "k10t"; + break; + } + case art::Instruction::k20t: { + os << "k20t"; + break; + } + case art::Instruction::k22x: { + os << "k22x"; + break; + } + case art::Instruction::k21t: { + os << "k21t"; + break; + } + case art::Instruction::k21s: { + os << "k21s"; + break; + } + case art::Instruction::k21h: { + os << "k21h"; + break; + } + case art::Instruction::k21c: { + os << "k21c"; + break; + } + case art::Instruction::k23x: { + os << "k23x"; + break; + } + case art::Instruction::k22b: { + os << "k22b"; + break; + } + case art::Instruction::k22t: { + os << "k22t"; + break; + } + case art::Instruction::k22s: { + os << "k22s"; + break; + } + case art::Instruction::k22c: { + os << "k22c"; + break; + } + case art::Instruction::k32x: { + os << "k32x"; + break; + } + case art::Instruction::k30t: { + os << "k30t"; + break; + } + case art::Instruction::k31t: { + os << "k31t"; + break; + } + case art::Instruction::k31i: { + os << "k31i"; + break; + } + case art::Instruction::k31c: { + os << "k31c"; + break; + } + case art::Instruction::k35c: { + os << "k35c"; + break; + } + case art::Instruction::k3rc: { + os << "k3rc"; + break; + } + case art::Instruction::k45cc: { + os << "k45cc"; + break; + } + case art::Instruction::k4rcc: { + os << "k4rcc"; + break; + } + case art::Instruction::k51l: { + os << "k51l"; + break; + } + default: { + os << "unknown"; + } + } + return os; +} + +std::ostream &art::operator<<(std::ostream &os, const art::EncodedArrayValueIterator::ValueType &code) { + switch (code) { + case art::EncodedArrayValueIterator::ValueType::kByte: { + os << "Byte"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kShort: { + os << "Short"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kChar: { + os << "Char"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kInt: { + os << "Int"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kLong: { + os << "Long"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kFloat: { + os << "Float"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kDouble: { + os << "Double"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kMethodType: { + os << "MethodType"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kMethodHandle: { + os << "MethodHandle"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kString: { + os << "String"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kType: { + os << "Type"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kField: { + os << "Field"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kMethod: { + os << "Method"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kEnum: { + os << "Enum"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kArray: { + os << "Array"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kAnnotation: { + os << "Annotation"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kNull: { + os << "Null"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kBoolean: { + os << "Boolean"; + break; + } + default: { + os << "Unknown type (" << static_cast(code) << ")"; + } + } + return os; +} + +namespace maple { +const size_t kDexFileVersionStringLength = 3; +// =====DexInstruction start====== +DexInstruction::DexInstruction(const art::Instruction &artInstruction) + : opcode(kOpNop), + indexType(kIDexIndexUnknown), + vA(UINT32_MAX), + vB(UINT32_MAX), + vBWide(UINT32_MAX), + vC(UINT32_MAX), + vH(UINT32_MAX), + artInstruction(&artInstruction) {} + +IDexOpcode DexInstruction::GetOpcode() const { + return opcode; +} + +const char *DexInstruction::GetOpcodeName() const { + IDexOpcode iOpcode = GetOpcode(); + const art::Instruction::Code &artCode = art::Instruction::Code(static_cast(iOpcode)); + return art::Instruction::Name(artCode); +} + +uint32_t DexInstruction::GetVRegA() const { + return vA; +} + +uint32_t DexInstruction::GetVRegB() const { + return vB; +} + +uint64_t DexInstruction::GetVRegBWide() const { + return vBWide; +} + +uint32_t DexInstruction::GetVRegC() const { + return vC; +} + +uint32_t DexInstruction::GetVRegH() const { + return vH; +} + +uint32_t DexInstruction::GetArg(uint32_t index) const { + return arg[index]; +} + +IDexInstructionIndexType DexInstruction::GetIndexType() const { + return indexType; +} + +IDexInstructionFormat DexInstruction::GetFormat() const { + IDexOpcode iOpcode = GetOpcode(); + art::Instruction::Code artCode = art::Instruction::Code(static_cast(iOpcode)); + art::Instruction::Format artFormat = art::Instruction::FormatOf(artCode); + IDexInstructionFormat iFormat = static_cast(artFormat); + return iFormat; +} + +size_t DexInstruction::GetWidth() const { + return artInstruction->SizeInCodeUnits(); +} + +void DexInstruction::SetOpcode(IDexOpcode iOpCode) { + opcode = iOpCode; +} + +void DexInstruction::SetVRegB(uint32_t vRegB) { + vB = vRegB; +} + +void DexInstruction::SetVRegBWide(uint64_t vRegBWide) { + vBWide = vRegBWide; +} + +void DexInstruction::SetIndexType(IDexInstructionIndexType iIndexType) { + indexType = iIndexType; +} + +void DexInstruction::Init() { + art::Instruction::Code artCode = artInstruction->Opcode(); + opcode = static_cast(artCode); + if (artInstruction->HasVRegA()) { + vA = artInstruction->VRegA(); + } + if (artInstruction->HasVRegB() && !artInstruction->HasWideVRegB()) { + vB = artInstruction->VRegB(); + } + if (artInstruction->HasWideVRegB()) { + vBWide = artInstruction->WideVRegB(); + } + if (artInstruction->HasVRegC()) { + vC = artInstruction->VRegC(); + } + if (artInstruction->HasVRegH()) { + vH = artInstruction->VRegH(); + } + if (artInstruction->HasVarArgs()) { + (void)artInstruction->GetVarArgs(arg); + } + if ((artInstruction->Opcode() == art::Instruction::INVOKE_POLYMORPHIC) || + (artInstruction->Opcode() == art::Instruction::INVOKE_POLYMORPHIC_RANGE)) { + for (uint32_t i = 1; i < art::Instruction::kMaxVarArgRegs; i++) { + arg[i - 1] = arg[i]; + } + } + art::Instruction::IndexType artIndexType = art::Instruction::IndexTypeOf(artCode); + indexType = static_cast(artIndexType); +} + +bool DexInstruction::HasVRegA() const { + return artInstruction->HasVRegA(); +} + +bool DexInstruction::HasVRegB() const { + return artInstruction->HasVRegB(); +} + +bool DexInstruction::HasWideVRegB() const { + return artInstruction->HasWideVRegB(); +} + +bool DexInstruction::HasVRegC() const { + return artInstruction->HasVRegC(); +} + +bool DexInstruction::HasVRegH() const { + return artInstruction->HasVRegH(); +} + +bool DexInstruction::HasArgs() const { + return artInstruction->HasVarArgs(); +} +// =====DexInstruction end================ +// =====IDexCatchHandlerItem start========= +const art::DexFile *GetDexFile(const IDexFile &dexFile) { + return reinterpret_cast(dexFile.GetData()); +} + +const art::dex::ClassDef *GetClassDef(const IDexClassItem *item) { + return reinterpret_cast(item); +} + +uint32_t IDexCatchHandlerItem::GetHandlerTypeIdx() const { + return typeIdx; +} + +uint32_t IDexCatchHandlerItem::GetHandlerAddress() const { + return address; +} + +bool IDexCatchHandlerItem::IsCatchAllHandlerType() const { + return (typeIdx == art::DexFile::kDexNoIndex16); +} +// =====IDexCatchHandlerItem end=========== +// =====IDexTryItem start================== +const art::dex::TryItem *GetTryItem(const IDexTryItem *item) { + return reinterpret_cast(item); +} + +const IDexTryItem *IDexTryItem::GetInstance(const void *item) { + return reinterpret_cast(item); +} + +uint32_t IDexTryItem::GetStartAddr() const { + return GetTryItem(this)->start_addr_; +} + +uint32_t IDexTryItem::GetEndAddr() const { + return (GetTryItem(this)->start_addr_ + GetTryItem(this)->insn_count_); +} + +void IDexTryItem::GetCatchHandlerItems(const IDexFile &dexFile, uint32_t codeOff, + std::vector &items) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(codeOff); + if (artCodeItem == nullptr) { + return; + } + art::CodeItemDataAccessor accessor(*GetDexFile(dexFile), artCodeItem); + art::CatchHandlerIterator handlerIterator(accessor, *GetTryItem(this)); + + while (handlerIterator.HasNext()) { + art::dex::TypeIndex artTypeIndex = handlerIterator.GetHandlerTypeIndex(); + uint32_t address = handlerIterator.GetHandlerAddress(); + uint16_t typeIdx = artTypeIndex.index_; + items.emplace_back(IDexCatchHandlerItem(typeIdx, address)); + handlerIterator.Next(); + } +} +// =====IDexTryItem end===================== +// =====IDexProtoIdItem start=============== +const art::dex::ProtoId *GetProtoId(const IDexProtoIdItem *item) { + return reinterpret_cast(item); +} + +const IDexProtoIdItem *GetDexProtoIdInstance(const art::DexFile &dexFile, uint32_t index) { + return reinterpret_cast(&(dexFile.GetProtoId(art::dex::ProtoIndex(index)))); +} + +const IDexProtoIdItem *IDexProtoIdItem::GetInstance(const IDexFile &dexFile, uint32_t index) { + return reinterpret_cast(&(GetDexFile(dexFile)->GetProtoId(art::dex::ProtoIndex(index)))); +} + +const char *IDexProtoIdItem::GetReturnTypeName(const IDexFile &dexFile) const { + return GetDexFile(dexFile)->GetReturnTypeDescriptor(*GetProtoId(this)); +} + +uint32_t IDexProtoIdItem::GetParameterTypeSize(const IDexFile &dexFile) const { + const art::dex::TypeList *typeList = GetDexFile(dexFile)->GetProtoParameters(*GetProtoId(this)); + if (typeList == nullptr) { + return 0; + } + return typeList->Size(); +} + +void IDexProtoIdItem::GetParameterTypeIndexes(const IDexFile &dexFile, std::vector &indexes) const { + const art::dex::TypeList *typeList = GetDexFile(dexFile)->GetProtoParameters(*GetProtoId(this)); + if (typeList == nullptr) { + return; + } + for (uint32_t i = 0; i < typeList->Size(); i++) { + uint16_t typeIndex = typeList->GetTypeItem(i).type_idx_.index_; + indexes.push_back(typeIndex); + } +} + +std::string IDexProtoIdItem::GetDescriptor(const IDexFile &dexFile) const { + std::string desc = GetDexFile(dexFile)->GetProtoSignature(*GetProtoId(this)).ToString(); + return desc; +} + +const char *IDexProtoIdItem::GetShorty(const IDexFile &dexFile) const { + return GetDexFile(dexFile)->StringDataByIdx(GetProtoId(this)->shorty_idx_); +} +// =====IDexProtoIdItem end========= +// =====IDexMethodIdItem start====== +const art::dex::MethodId *GetMethodId(const IDexMethodIdItem *item) { + return reinterpret_cast(item); +} + +const IDexMethodIdItem *IDexMethodIdItem::GetInstance(const IDexFile &dexFile, uint32_t index) { + return reinterpret_cast(&(GetDexFile(dexFile)->GetMethodId(index))); +} + +const IDexMethodIdItem *GetDexMethodIdInstance(const art::DexFile &dexFile, uint32_t index) { + return reinterpret_cast(&(dexFile.GetMethodId(index))); +} + +uint32_t IDexMethodIdItem::GetClassIdx() const { + return GetMethodId(this)->class_idx_.index_; +} + +const char *IDexMethodIdItem::GetDefiningClassName(const IDexFile &dexFile) const { + return GetDexFile(dexFile)->StringByTypeIdx(GetMethodId(this)->class_idx_); +} + +uint16_t IDexMethodIdItem::GetProtoIdx() const { + return GetMethodId(this)->proto_idx_.index_; +} + +const IDexProtoIdItem *IDexMethodIdItem::GetProtoIdItem(const IDexFile &dexFile) const { + return IDexProtoIdItem::GetInstance(dexFile, GetMethodId(this)->proto_idx_.index_); +} + +uint32_t IDexMethodIdItem::GetNameIdx() const { + return GetMethodId(this)->name_idx_.index_; +} + +const char *IDexMethodIdItem::GetShortMethodName(const IDexFile &dexFile) const { + return GetDexFile(dexFile)->StringDataByIdx(GetMethodId(this)->name_idx_); +} + +std::string IDexMethodIdItem::GetDescriptor(const IDexFile &dexFile) const { + return GetDexFile(dexFile)->GetMethodSignature(*GetMethodId(this)).ToString(); +} +// =====IDexMethodIdItem end======== +// =====IDexMethodItem start======== +const art::ClassAccessor::Method *GetMethod(const IDexMethodItem *item) { + return reinterpret_cast(item); +} + +IDexMethodItem::IDexMethodItem(uint32_t methodIdx, uint32_t accessFlags, uint32_t codeOff) + : methodIdx(methodIdx), accessFlags(accessFlags), codeOff(codeOff) {} + +uint32_t IDexMethodItem::GetMethodCodeOff(const IDexMethodItem *item) const { + (void) item; + return codeOff; +} + +uint32_t IDexMethodItem::GetMethodIdx() const { + return methodIdx; +} + +const IDexMethodIdItem *IDexMethodItem::GetMethodIdItem(const IDexFile &dexFile) const { + return IDexMethodIdItem::GetInstance(dexFile, GetMethodIdx()); +} + +uint32_t IDexMethodItem::GetAccessFlags() const { + return accessFlags; +} + +bool IDexMethodItem::HasCode(const IDexFile &dexFile) const { + art::CodeItemDataAccessor accessor(*GetDexFile(dexFile), GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this))); + return accessor.HasCodeItem(); +} + +uint16_t IDexMethodItem::GetRegistersSize(const IDexFile &dexFile) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this)); + if (artCodeItem == nullptr) { + return 0; + } + art::CodeItemDataAccessor accessor(*GetDexFile(dexFile), artCodeItem); + return accessor.RegistersSize(); +} + +uint16_t IDexMethodItem::GetInsSize(const IDexFile &dexFile) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this)); + if (artCodeItem == nullptr) { + return 0; + } + art::CodeItemDataAccessor accessor(*GetDexFile(dexFile), artCodeItem); + return accessor.InsSize(); +} + +uint16_t IDexMethodItem::GetOutsSize(const IDexFile &dexFile) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this)); + if (artCodeItem == nullptr) { + return 0; + } + art::CodeItemDataAccessor accessor(*GetDexFile(dexFile), artCodeItem); + return accessor.OutsSize(); +} + +uint16_t IDexMethodItem::GetTriesSize(const IDexFile &dexFile) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this)); + if (artCodeItem == nullptr) { + return 0; + } + art::CodeItemDataAccessor accessor(*GetDexFile(dexFile), artCodeItem); + return accessor.TriesSize(); +} + +uint32_t IDexMethodItem::GetInsnsSize(const IDexFile &dexFile) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this)); + if (artCodeItem == nullptr) { + return 0; + } + art::CodeItemDataAccessor accessor(*GetDexFile(dexFile), artCodeItem); + return accessor.InsnsSizeInCodeUnits(); +} + +const uint16_t *IDexMethodItem::GetInsnsByOffset(const IDexFile &dexFile, uint32_t offset) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this)); + if (artCodeItem == nullptr) { + return nullptr; + } + art::CodeItemDataAccessor accessor(*GetDexFile(dexFile), artCodeItem); + return (accessor.Insns() + offset); +} + +uint32_t IDexMethodItem::GetCodeOff() const { + return GetMethodCodeOff(this); +} + +bool IDexMethodItem::IsStatic() const { + bool isStatic = ((GetAccessFlags() & art::kAccStatic) > 0) ? true : false; + return isStatic; +} + +void IDexMethodItem::GetPCInstructionMap(const IDexFile &dexFile, + std::map> &pcInstructionMap) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this)); + if (artCodeItem == nullptr) { + return; + } + std::unique_ptr artCodeItemDataAccessor = + std::make_unique(*GetDexFile(dexFile), artCodeItem); + art::CodeItemDataAccessor &accessor = *(artCodeItemDataAccessor.get()); + + for (const art::DexInstructionPcPair &pair : accessor) { + uint32_t dexPc = pair.DexPc(); + const art::Instruction &instruction = pair.Inst(); + std::unique_ptr dexInstruction = std::make_unique(instruction); + dexInstruction->Init(); + pcInstructionMap[dexPc] = std::move(dexInstruction); + } +} + +void IDexMethodItem::GetTryItems(const IDexFile &dexFile, std::vector &tryItems) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this)); + if (artCodeItem == nullptr) { + return; + } + art::CodeItemDataAccessor accessor(*GetDexFile(dexFile), artCodeItem); + uint32_t triesSize = accessor.TriesSize(); + if (triesSize == 0) { + return; + } + const art::dex::TryItem *artTryItems = accessor.TryItems().begin(); + for (uint32_t tryIndex = 0; tryIndex < triesSize; tryIndex++) { + const art::dex::TryItem *artTry = artTryItems + tryIndex; + tryItems.push_back(IDexTryItem::GetInstance(artTry)); + } +} + +void IDexMethodItem::GetSrcPositionInfo(const IDexFile &dexFile, std::map &srcPosInfo) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this)); + if (artCodeItem == nullptr) { + return; + } + art::CodeItemDebugInfoAccessor accessor(*GetDexFile(dexFile), artCodeItem, GetMethodIdx()); + bool succ = accessor.DecodeDebugPositionInfo([&](const art::DexFile::PositionInfo& entry) { + srcPosInfo.emplace(entry.address_, entry.line_); + return false; + }); + if (!succ) { + LOG(ERROR) << "ReadFileToString failed"; + return; + } +} + +void IDexMethodItem::GetSrcLocalInfo(const IDexFile &dexFile, + std::map>> &srcLocal) const { + const art::dex::CodeItem *artCodeItem = GetDexFile(dexFile)->GetCodeItem(GetMethodCodeOff(this)); + if (artCodeItem == nullptr) { + return; + } + art::CodeItemDebugInfoAccessor accessor(*GetDexFile(dexFile), artCodeItem, this->GetMethodIdx()); + (void)accessor.DecodeDebugLocalInfo(this->IsStatic(), this->GetMethodIdx(), + [&](const art::DexFile::LocalInfo &entry) { + if (entry.name_ != nullptr && entry.descriptor_ != nullptr) { + std::string signature = entry.signature_ != nullptr ? entry.signature_ : ""; + auto item = std::make_tuple(entry.name_, entry.descriptor_, signature); + srcLocal[entry.reg_].insert(item); + } + }); +} +// =====IDexMethodItem end========== +// =====IDexFieldIdItem start======= +const art::dex::FieldId *GetFieldId(const IDexFieldIdItem *item) { + return reinterpret_cast(item); +} + +const IDexFieldIdItem *IDexFieldIdItem::GetInstance(const IDexFile &dexFile, uint32_t index) { + return reinterpret_cast(&(GetDexFile(dexFile)->GetFieldId(index))); +} + +const IDexFieldIdItem *GetDexFieldIdInstance(const art::DexFile &dexFile, uint32_t index) { + return reinterpret_cast(&(dexFile.GetFieldId(index))); +} + +uint32_t IDexFieldIdItem::GetClassIdx() const { + return GetFieldId(this)->class_idx_.index_; +} + +const char *IDexFieldIdItem::GetDefiningClassName(const IDexFile &dexFile) const { + return GetDexFile(dexFile)->StringByTypeIdx(GetFieldId(this)->class_idx_); +} + +uint32_t IDexFieldIdItem::GetTypeIdx() const { + return GetFieldId(this)->type_idx_.index_; +} + +const char *IDexFieldIdItem::GetFieldTypeName(const IDexFile &dexFile) const { + return GetDexFile(dexFile)->StringByTypeIdx(GetFieldId(this)->type_idx_); +} + +uint32_t IDexFieldIdItem::GetNameIdx() const { + return GetFieldId(this)->name_idx_.index_; +} + +const char *IDexFieldIdItem::GetShortFieldName(const IDexFile &dexFile) const { + return GetDexFile(dexFile)->StringDataByIdx(GetFieldId(this)->name_idx_); +} +// =====IDexFieldIdItem end========= +// =====IDexFieldItem start========= +IDexFieldItem::IDexFieldItem(uint32_t fieldIndex, uint32_t accessFlags) + : fieldIndex(fieldIndex), accessFlags(accessFlags) {} + +uint32_t IDexFieldItem::GetFieldIdx() const { + return fieldIndex; +} + +const IDexFieldIdItem *IDexFieldItem::GetFieldIdItem(const IDexFile &dexFile) const { + return IDexFieldIdItem::GetInstance(dexFile, GetFieldIdx()); +} + +uint32_t IDexFieldItem::GetAccessFlags() const { + return accessFlags; +} +// =====IDexFieldItem end=========== +// =====IDexAnnotation start======== +const art::dex::AnnotationItem *GetAnnotationItem(const IDexAnnotation *item) { + return reinterpret_cast(item); +} + +const IDexAnnotation *IDexAnnotation::GetInstance(const void *data) { + return reinterpret_cast(data); +} + +uint8_t IDexAnnotation::GetVisibility() const { + return GetAnnotationItem(this)->visibility_; +} + +const uint8_t *IDexAnnotation::GetAnnotationData() const { + return GetAnnotationItem(this)->annotation_; +} +// =====IDexAnnotation end========== +// =====IDexAnnotationSet start===== +const art::dex::AnnotationSetItem *GetAnnotationSet(const IDexAnnotationSet *item) { + return reinterpret_cast(item); +} + +const IDexAnnotationSet *IDexAnnotationSet::GetInstance(const void *data) { + return reinterpret_cast(data); +} + +void IDexAnnotationSet::GetAnnotations(const IDexFile &dexFile, std::vector &items) const { + if (!IsValid()) { + return; + } + + for (uint32_t i = 0; i < GetAnnotationSet(this)->size_; i++) { + const art::dex::AnnotationItem *artAnnotationItem = + GetDexFile(dexFile)->GetAnnotationItem(GetAnnotationSet(this), i); + items.push_back(IDexAnnotation::GetInstance(artAnnotationItem)); + } +} + +bool IDexAnnotationSet::IsValid() const { + return (GetAnnotationSet(this) != nullptr) && (GetAnnotationSet(this)->size_ > 0); +} +// =====IDexAnnotationSet end========== +// =====IDexAnnotationSetList start==== +const art::dex::AnnotationSetRefList *GetAnnotationSetList(const IDexAnnotationSetList *list) { + return reinterpret_cast(list); +} + +const IDexAnnotationSetList *IDexAnnotationSetList::GetInstance(const void *data) { + return reinterpret_cast(data); +} + +void IDexAnnotationSetList::GetAnnotationSets(const IDexFile &dexFile, + std::vector &items) const { + for (uint32_t i = 0; i < GetAnnotationSetList(this)->size_; i++) { + const art::dex::AnnotationSetItem *setItem = + GetDexFile(dexFile)->GetSetRefItemItem(GetAnnotationSetList(this)->list_ + i); + // setItem == nullptr is a valid parameter, will check valid in later using. + items.push_back(IDexAnnotationSet::GetInstance(setItem)); + } +} +// =====IDexAnnotationSetList end====== +// =====IDexFieldAnnotations start===== +const art::dex::FieldAnnotationsItem *GetFieldAnnotations(const IDexFieldAnnotations *item) { + return reinterpret_cast(item); +} + +const IDexFieldAnnotations *IDexFieldAnnotations::GetInstance(const void *data) { + return reinterpret_cast(data); +} + +const IDexFieldIdItem *IDexFieldAnnotations::GetFieldIdItem(const IDexFile &dexFile) const { + return IDexFieldIdItem::GetInstance(dexFile, GetFieldAnnotations(this)->field_idx_); +} + +const IDexAnnotationSet *IDexFieldAnnotations::GetAnnotationSet(const IDexFile &dexFile) const { + const art::dex::AnnotationSetItem *artAnnotationSet = + GetDexFile(dexFile)->GetFieldAnnotationSetItem(*GetFieldAnnotations(this)); + // artAnnotationSet == nullptr is a valid parameter, will check valid in later using. + return IDexAnnotationSet::GetInstance(artAnnotationSet); +} + +uint32_t IDexFieldAnnotations::GetFieldIdx() const { + return GetFieldAnnotations(this)->field_idx_; +} +// =====IDexFieldAnnotations end======= +// =====IDexMethodAnnotations start==== +const art::dex::MethodAnnotationsItem *GetMethodAnnotations(const IDexMethodAnnotations *item) { + return reinterpret_cast(item); +} + +const IDexMethodAnnotations *IDexMethodAnnotations::GetInstance(const void *data) { + return reinterpret_cast(data); +} + +const IDexMethodIdItem *IDexMethodAnnotations::GetMethodIdItem(const IDexFile &dexFile) const { + return IDexMethodIdItem::GetInstance(dexFile, GetMethodAnnotations(this)->method_idx_); +} + +const IDexAnnotationSet *IDexMethodAnnotations::GetAnnotationSet(const IDexFile &dexFile) const { + const art::dex::AnnotationSetItem *artAnnotationSetItem = + GetDexFile(dexFile)->GetMethodAnnotationSetItem(*GetMethodAnnotations(this)); + // artAnnotationSetItem == nullptr is a valid parameter, will check valid in later using. + return IDexAnnotationSet::GetInstance(artAnnotationSetItem); +} + +uint32_t IDexMethodAnnotations::GetMethodIdx() const { + return GetMethodAnnotations(this)->method_idx_; +} +// =====IDexMethodAnnotations end======== +// =====IDexParameterAnnotations start=== +const art::dex::ParameterAnnotationsItem *GetParameterAnnotations(const IDexParameterAnnotations *item) { + return reinterpret_cast(item); +} + +const IDexParameterAnnotations *IDexParameterAnnotations::GetInstance(const void *data) { + return reinterpret_cast(data); +} + +const IDexMethodIdItem *IDexParameterAnnotations::GetMethodIdItem(const IDexFile &dexFile) const { + return IDexMethodIdItem::GetInstance(dexFile, GetMethodIdx()); +} + +const IDexAnnotationSetList *IDexParameterAnnotations::GetAnnotationSetList(const IDexFile &dexFile) const { + const art::dex::AnnotationSetRefList *setRefList = + GetDexFile(dexFile)->GetParameterAnnotationSetRefList(GetParameterAnnotations(this)); + if (setRefList == nullptr) { + return nullptr; + } else { + return IDexAnnotationSetList::GetInstance(setRefList); + } +} + +uint32_t IDexParameterAnnotations::GetMethodIdx() const { + return GetParameterAnnotations(this)->method_idx_; +} +// =====IDexParameterAnnotations end===== +// =====IDexAnnotationsDirectory start=== +const art::dex::AnnotationsDirectoryItem *GetAnnotationsDirectory(const IDexAnnotationsDirectory *item) { + return reinterpret_cast(item); +} + +const IDexAnnotationsDirectory *IDexAnnotationsDirectory::GetInstance(const void *data) { + return reinterpret_cast(data); +} + +bool IDexAnnotationsDirectory::HasClassAnnotationSet(const IDexFile &dexFile) const { + const art::dex::AnnotationSetItem *classAnnotationSetItem = + GetDexFile(dexFile)->GetClassAnnotationSet(GetAnnotationsDirectory(this)); + return classAnnotationSetItem != nullptr; +} + +bool IDexAnnotationsDirectory::HasFieldAnnotationsItems(const IDexFile &dexFile) const { + const art::dex::FieldAnnotationsItem *artFieldAnnotationsItem = + GetDexFile(dexFile)->GetFieldAnnotations(GetAnnotationsDirectory(this)); + return artFieldAnnotationsItem != nullptr; +} + +bool IDexAnnotationsDirectory::HasMethodAnnotationsItems(const IDexFile &dexFile) const { + const art::dex::MethodAnnotationsItem *artMethodAnnotationsItem = + GetDexFile(dexFile)->GetMethodAnnotations(GetAnnotationsDirectory(this)); + return artMethodAnnotationsItem != nullptr; +} + +bool IDexAnnotationsDirectory::HasParameterAnnotationsItems(const IDexFile &dexFile) const { + const art::dex::ParameterAnnotationsItem *parameterItem = + GetDexFile(dexFile)->GetParameterAnnotations(GetAnnotationsDirectory(this)); + return parameterItem != nullptr; +} + +const IDexAnnotationSet *IDexAnnotationsDirectory::GetClassAnnotationSet(const IDexFile &dexFile) const { + const art::dex::AnnotationSetItem *classAnnotationSetItem = + GetDexFile(dexFile)->GetClassAnnotationSet(GetAnnotationsDirectory(this)); + // classAnnotationSetItem == nullptr is a valid parameter, will check valid in later using. + return IDexAnnotationSet::GetInstance(classAnnotationSetItem); +} + +void IDexAnnotationsDirectory::GetFieldAnnotationsItems(const IDexFile &dexFile, + std::vector &items) const { + const art::dex::FieldAnnotationsItem *artFieldAnnotationsItem = + GetDexFile(dexFile)->GetFieldAnnotations(GetAnnotationsDirectory(this)); + if (artFieldAnnotationsItem == nullptr) { + return; + } + for (uint32_t i = 0; i < GetAnnotationsDirectory(this)->fields_size_; i++) { + items.push_back(IDexFieldAnnotations::GetInstance(artFieldAnnotationsItem + i)); + } +} + +void IDexAnnotationsDirectory::GetMethodAnnotationsItems(const IDexFile &dexFile, + std::vector &items) const { + const art::dex::MethodAnnotationsItem *artMethodAnnotationsItem = + GetDexFile(dexFile)->GetMethodAnnotations(GetAnnotationsDirectory(this)); + if (artMethodAnnotationsItem == nullptr) { + return; + } + for (uint32_t i = 0; i < GetAnnotationsDirectory(this)->methods_size_; i++) { + items.push_back(IDexMethodAnnotations::GetInstance(artMethodAnnotationsItem + i)); + } +} + +void IDexAnnotationsDirectory::GetParameterAnnotationsItems(const IDexFile &dexFile, + std::vector &items) + const { + const art::dex::ParameterAnnotationsItem *parameterItem = + GetDexFile(dexFile)->GetParameterAnnotations(GetAnnotationsDirectory(this)); + if (parameterItem == nullptr) { + return; + } + for (uint32_t i = 0; i < GetAnnotationsDirectory(this)->parameters_size_; i++) { + items.push_back(IDexParameterAnnotations::GetInstance(parameterItem + i)); + } +} +// =====IDexAnnotationsDirectory end===== +// =====IDexClassItem start============== +const IDexClassItem *GetDexClassInstance(const art::DexFile &dexFile, uint32_t index) { + return reinterpret_cast(&(dexFile.GetClassDef(index))); +} + +uint32_t IDexClassItem::GetClassIdx() const { + return GetClassDef(this)->class_idx_.index_; +} + +const char *IDexClassItem::GetClassName(const IDexFile &dexFile) const { + return GetDexFile(dexFile)->StringByTypeIdx(GetClassDef(this)->class_idx_); +} + +uint32_t IDexClassItem::GetAccessFlags() const { + return GetClassDef(this)->access_flags_; +} + +uint32_t IDexClassItem::GetSuperclassIdx() const { + return GetClassDef(this)->superclass_idx_.index_; +} + +const char *IDexClassItem::GetSuperClassName(const IDexFile &dexFile) const { + if (!GetClassDef(this)->superclass_idx_.IsValid()) { + return nullptr; + } + return GetDexFile(dexFile)->StringByTypeIdx(GetClassDef(this)->superclass_idx_); +} + +uint32_t IDexClassItem::GetInterfacesOff() const { + return GetClassDef(this)->interfaces_off_; +} + +void IDexClassItem::GetInterfaceTypeIndexes(const IDexFile &dexFile, std::vector &indexes) const { + const art::dex::TypeList *interfaces = GetDexFile(dexFile)->GetInterfacesList(*GetClassDef(this)); + if (interfaces == nullptr) { + return; + } + for (uint32_t i = 0; i < interfaces->Size(); i++) { + const art::dex::TypeItem &typeItem = interfaces->GetTypeItem(i); + art::dex::TypeIndex artTypeIndex = typeItem.type_idx_; + indexes.push_back(artTypeIndex.index_); + } +} + +void IDexClassItem::GetInterfaceNames(const IDexFile &dexFile, std::vector &names) const { + const art::dex::TypeList *interfaces = GetDexFile(dexFile)->GetInterfacesList(*GetClassDef(this)); + if (interfaces == nullptr) { + return; + } + for (uint32_t i = 0; i < interfaces->Size(); i++) { + const art::dex::TypeItem &typeItem = interfaces->GetTypeItem(i); + art::dex::TypeIndex artTypeIndex = typeItem.type_idx_; + const char *interfaceName = GetDexFile(dexFile)->StringByTypeIdx(artTypeIndex); + names.push_back(interfaceName); + } +} + +uint32_t IDexClassItem::GetSourceFileIdx() const { + return GetClassDef(this)->source_file_idx_.index_; +} + +const char *IDexClassItem::GetJavaSourceFileName(const IDexFile &dexFile) const { + if (GetClassDef(this)->source_file_idx_.index_ == art::dex::kDexNoIndex) { + return nullptr; + } + return GetDexFile(dexFile)->StringDataByIdx(GetClassDef(this)->source_file_idx_); +} + +bool IDexClassItem::HasAnnotationsDirectory(const IDexFile &dexFile) const { + return (GetDexFile(dexFile)->GetAnnotationsDirectory(*GetClassDef(this)) != nullptr); +} + +uint32_t IDexClassItem::GetAnnotationsOff() const { + return GetClassDef(this)->annotations_off_; +} + +const IDexAnnotationsDirectory *IDexClassItem::GetAnnotationsDirectory(const IDexFile &dexFile) const { + const art::dex::AnnotationsDirectoryItem *artDirectoryItem = + GetDexFile(dexFile)->GetAnnotationsDirectory(*GetClassDef(this)); + if (artDirectoryItem == nullptr) { + return nullptr; + } else { + return IDexAnnotationsDirectory::GetInstance(artDirectoryItem); + } +} + +uint32_t IDexClassItem::GetClassDataOff() const { + return GetClassDef(this)->class_data_off_; +} + +std::vector> IDexClassItem::GetFields(const IDexFile &dexFile) const { + std::vector> fields; + art::ClassAccessor accessor(*GetDexFile(dexFile), *GetClassDef(this)); + for (const art::ClassAccessor::Field &field : accessor.GetFields()) { + uint32_t fieldIdx = field.GetIndex(); + uint32_t accessFlags = field.GetAccessFlags(); + std::unique_ptr dexFieldItem = std::make_unique(fieldIdx, accessFlags); + fields.push_back(std::move(dexFieldItem)); + } + return fields; +} + +bool IDexClassItem::HasStaticValuesList() const { + return (GetClassDef(this)->static_values_off_ != 0); +} + +const uint8_t *IDexClassItem::GetStaticValuesList(const IDexFile &dexFile) const { + return GetDexFile(dexFile)->GetEncodedStaticFieldValuesArray(*GetClassDef(this)); +} + +bool IDexClassItem::IsInterface() const { + bool isInterface = ((GetAccessFlags() & art::kAccInterface) > 0) ? true : false; + return isInterface; +} + +bool IDexClassItem::IsSuperclassValid() const { + const art::dex::ClassDef *artClassDef = GetClassDef(this); + return artClassDef->superclass_idx_.IsValid(); +} + +std::vector> IDexClassItem::GetMethodsIdxAndFlag(const IDexFile &dexFile, + bool isVirtual) const { + std::vector> methodsIdx; + art::ClassAccessor accessor(*GetDexFile(dexFile), *GetClassDef(this)); + if (isVirtual) { + for (const art::ClassAccessor::Method &method : accessor.GetVirtualMethods()) { + methodsIdx.push_back(std::make_pair(method.GetIndex(), method.GetAccessFlags())); + } + } else { + for (const art::ClassAccessor::Method &method : accessor.GetDirectMethods()) { + methodsIdx.push_back(std::make_pair(method.GetIndex(), method.GetAccessFlags())); + } + } + return methodsIdx; +} + +std::unique_ptr IDexClassItem::GetDirectMethod(const IDexFile &dexFile, uint32_t index) const { + art::ClassAccessor accessor(*GetDexFile(dexFile), *GetClassDef(this)); + auto directMethodIt = accessor.GetDirectMethods().begin(); + for (uint32_t i = 0; i < index; ++i) { + ++directMethodIt; + } + std::unique_ptr dexMethodItem = std::make_unique(directMethodIt->GetIndex(), + directMethodIt->GetAccessFlags(), directMethodIt->GetCodeItemOffset()); + return dexMethodItem; +} + +std::unique_ptr IDexClassItem::GetVirtualMethod(const IDexFile &dexFile, uint32_t index) const { + art::ClassAccessor accessor(*GetDexFile(dexFile), *GetClassDef(this)); + auto virtualMethodIt = accessor.GetVirtualMethods().begin(); + for (uint32_t i = 0; i < index; ++i) { + ++virtualMethodIt; + } + std::unique_ptr dexMethodItem = std::make_unique(virtualMethodIt->GetIndex(), + virtualMethodIt->GetAccessFlags(), virtualMethodIt->GetCodeItemOffset()); + return dexMethodItem; +} +// =====IDexClassItem end================ +// =====Header start==================== +const art::DexFile *GetDexFile(const IDexHeader *header) { + return reinterpret_cast(header); +} + +const IDexHeader *IDexHeader::GetInstance(const IDexFile &dexFile) { + return reinterpret_cast(dexFile.GetData()); +} + +uint8_t IDexHeader::GetMagic(uint32_t index) const { + return GetDexFile(this)->GetHeader().magic_[index]; +} + +uint32_t IDexHeader::GetChecksum() const { + return GetDexFile(this)->GetHeader().checksum_; +} + +std::string IDexHeader::GetSignature() const { + static const char *const kHex = "0123456789abcdef"; + static constexpr size_t kHexNum = 16; + static constexpr size_t kSignatureSize = 20; + const uint8_t *signature = GetDexFile(this)->GetHeader().signature_; + std::string result; + for (size_t i = 0; i < kSignatureSize; ++i) { + uint8_t value = signature[i]; + result.push_back(kHex[value / kHexNum]); + result.push_back(kHex[value % kHexNum]); + } + return result; +} + +uint32_t IDexHeader::GetFileSize() const { + return GetDexFile(this)->GetHeader().file_size_; +} + +uint32_t IDexHeader::GetHeaderSize() const { + return GetDexFile(this)->GetHeader().header_size_; +} + +uint32_t IDexHeader::GetEndianTag() const { + return GetDexFile(this)->GetHeader().endian_tag_; +} + +uint32_t IDexHeader::GetLinkSize() const { + return GetDexFile(this)->GetHeader().link_size_; +} + +uint32_t IDexHeader::GetLinkOff() const { + return GetDexFile(this)->GetHeader().link_off_; +} + +uint32_t IDexHeader::GetMapOff() const { + return GetDexFile(this)->GetHeader().map_off_; +} + +uint32_t IDexHeader::GetStringIdsSize() const { + return GetDexFile(this)->GetHeader().string_ids_size_; +} + +uint32_t IDexHeader::GetStringIdsOff() const { + return GetDexFile(this)->GetHeader().string_ids_off_; +} + +uint32_t IDexHeader::GetTypeIdsSize() const { + return GetDexFile(this)->GetHeader().type_ids_size_; +} + +uint32_t IDexHeader::GetTypeIdsOff() const { + return GetDexFile(this)->GetHeader().type_ids_off_; +} + +uint32_t IDexHeader::GetProtoIdsSize() const { + return GetDexFile(this)->GetHeader().proto_ids_size_; +} + +uint32_t IDexHeader::GetProtoIdsOff() const { + return GetDexFile(this)->GetHeader().proto_ids_off_; +} + +uint32_t IDexHeader::GetFieldIdsSize() const { + return GetDexFile(this)->GetHeader().field_ids_size_; +} + +uint32_t IDexHeader::GetFieldIdsOff() const { + return GetDexFile(this)->GetHeader().field_ids_off_; +} + +uint32_t IDexHeader::GetMethodIdsSize() const { + return GetDexFile(this)->GetHeader().method_ids_size_; +} + +uint32_t IDexHeader::GetMethodIdsOff() const { + return GetDexFile(this)->GetHeader().method_ids_off_; +} + +uint32_t IDexHeader::GetClassDefsSize() const { + return GetDexFile(this)->GetHeader().class_defs_size_; +} + +uint32_t IDexHeader::GetClassDefsOff() const { + return GetDexFile(this)->GetHeader().class_defs_off_; +} + +uint32_t IDexHeader::GetDataSize() const { + return GetDexFile(this)->GetHeader().data_size_; +} + +uint32_t IDexHeader::GetDataOff() const { + return GetDexFile(this)->GetHeader().data_off_; +} + +std::string IDexHeader::GetDexVesion() const { + auto magic = reinterpret_cast(GetDexFile(this)->GetHeader().magic_); + std::string magicStr(magic); + size_t totalLength = magicStr.length(); + size_t pos = totalLength - kDexFileVersionStringLength; + std::string res = magicStr.substr(pos, kDexFileVersionStringLength); + return res; +} +// =====Header end=================== +// =====MapList start================ +const art::dex::MapList *GetMapList(const IDexMapList *list) { + return reinterpret_cast(list); +} + +const IDexMapList *IDexMapList::GetInstance(const void *data) { + return reinterpret_cast(data); +} + +uint32_t IDexMapList::GetSize() const { + return static_cast(GetMapList(this)->Size()); +} + +uint16_t IDexMapList::GetType(uint32_t index) const { + return GetMapList(this)->list_[index].type_; +} + +uint32_t IDexMapList::GetTypeSize(uint32_t index) const { + return GetMapList(this)->list_[index].size_; +} +// =====MapList end================== +// =====ResolvedMethodHandleItem start===== +const art::dex::MethodHandleItem *GetMethodHandle(const ResolvedMethodHandleItem *item) { + return reinterpret_cast(item); +} + +const ResolvedMethodHandleItem *ResolvedMethodHandleItem::GetInstance(const IDexFile &dexFile, uint32_t index) { + return reinterpret_cast(&(GetDexFile(dexFile)->GetMethodHandle(index))); +} + +bool ResolvedMethodHandleItem::IsInstance() const { + art::DexFile::MethodHandleType type = + static_cast(GetMethodHandle(this)->method_handle_type_); + return (type == art::DexFile::MethodHandleType::kInvokeInstance) || + (type == art::DexFile::MethodHandleType::kInvokeConstructor); +} + +bool ResolvedMethodHandleItem::IsInvoke() const { + art::DexFile::MethodHandleType type = + static_cast(GetMethodHandle(this)->method_handle_type_); + return (type == art::DexFile::MethodHandleType::kInvokeInstance) || + (type == art::DexFile::MethodHandleType::kInvokeConstructor) || + (type == art::DexFile::MethodHandleType::kInvokeStatic); +} + +const std::string ResolvedMethodHandleItem::GetDeclaringClass(const IDexFile &dexFile) const { + if (!IsInvoke()) { + return std::string(); + } + const char *declaringClass = nullptr; + const art::dex::MethodId &methodId = GetDexFile(dexFile)->GetMethodId(GetMethodHandle(this)->field_or_method_idx_); + declaringClass = GetDexFile(dexFile)->GetMethodDeclaringClassDescriptor(methodId); + return namemangler::EncodeName(declaringClass); +} + +const std::string ResolvedMethodHandleItem::GetMember(const IDexFile &dexFile) const { + if (!IsInvoke()) { + return std::string(); + } + const char *member = nullptr; + const art::dex::MethodId &methodId = GetDexFile(dexFile)->GetMethodId(GetMethodHandle(this)->field_or_method_idx_); + member = GetDexFile(dexFile)->GetMethodName(methodId); + return namemangler::EncodeName(member); +} + +const std::string ResolvedMethodHandleItem::GetMemeberProto(const IDexFile &dexFile) const { + if (!IsInvoke()) { + return std::string(); + } + const art::dex::MethodId &methodId = GetDexFile(dexFile)->GetMethodId(GetMethodHandle(this)->field_or_method_idx_); + std::string memberType = GetDexFile(dexFile)->GetMethodSignature(methodId).ToString(); + return GetDeclaringClass(dexFile) + "_7C" + GetMember(dexFile) + "_7C" + namemangler::EncodeName(memberType); +} + +const ResolvedMethodType *ResolvedMethodHandleItem::GetMethodType(const IDexFile &dexFile) const { + return ResolvedMethodType::GetInstance(dexFile, *this); +} + +const std::string ResolvedMethodHandleItem::GetInvokeKind() const { + art::DexFile::MethodHandleType type = + static_cast(GetMethodHandle(this)->method_handle_type_); + switch (type) { + case art::DexFile::MethodHandleType::kInvokeStatic: + return std::string("invoke-static"); + case art::DexFile::MethodHandleType::kInvokeInstance: + return std::string("invoke-instance"); + case art::DexFile::MethodHandleType::kInvokeConstructor: + return std::string("invoke-constructor"); + default: + return std::string(); + } +} +// =====ResolvedMethodHandleItem end===== +// =====ResolvedMethodType start===== +const art::dex::MethodId *GetMethodHandleId(const ResolvedMethodType *type) { + return reinterpret_cast(type); +} + +const art::dex::ProtoId *GetMethodCallSiteId(const ResolvedMethodType *type) { + auto value = reinterpret_cast(type); + return reinterpret_cast(value & (~1)); +} + +const ResolvedMethodType *ResolvedMethodType::GetInstance(const IDexFile &dexFile, + const ResolvedMethodHandleItem &item) { + const art::dex::MethodId *methodId = + &(GetDexFile(dexFile)->GetMethodId(GetMethodHandle(&item)->field_or_method_idx_)); + if ((reinterpret_cast(methodId) & 0x1) == 1) { + std::cerr << "Invalid MethodId address" << std::endl; + return nullptr; + } + return reinterpret_cast(methodId); +} + +const ResolvedMethodType *ResolvedMethodType::GetInstance(const IDexFile &dexFile, uint32_t callSiteId) { + const art::dex::ProtoId *methodId = &(GetDexFile(dexFile)->GetProtoId((art::dex::ProtoIndex)callSiteId)); + if ((reinterpret_cast(methodId) & 0x1) == 1) { + std::cerr << "Invalid MethodId address" << std::endl; + return nullptr; + } + return reinterpret_cast(reinterpret_cast(methodId) | 0x1); +} + +const std::string ResolvedMethodType::GetReturnType(const IDexFile &dexFile) const { + std::string rawType = GetRawType(dexFile); + return SignatureReturnType(rawType); +} + +const std::string ResolvedMethodType::GetRawType(const IDexFile &dexFile) const { + auto value = reinterpret_cast(this); + if ((value & 0x1) == 0) { + return GetDexFile(dexFile)->GetMethodSignature(*GetMethodHandleId(this)).ToString(); + } + return GetDexFile(dexFile)->GetProtoSignature(*GetMethodCallSiteId(this)).ToString(); +} + +void ResolvedMethodType::GetArgTypes(const IDexFile &dexFile, std::list &types) const { + std::string rawType = GetRawType(dexFile); + SignatureTypes(rawType, types); +} +// =====ResolvedMethodType end======= +// =====ResolvedCallSiteIdItem start===== +const art::dex::CallSiteIdItem *GetCallSiteId(const ResolvedCallSiteIdItem *item) { + return reinterpret_cast(item); +} + +const ResolvedCallSiteIdItem *ResolvedCallSiteIdItem::GetInstance(const IDexFile &dexFile, uint32_t index) { + const art::dex::CallSiteIdItem &callSiteId = GetDexFile(dexFile)->GetCallSiteId(index); + art::CallSiteArrayValueIterator it(*GetDexFile(dexFile), callSiteId); + if (it.Size() < 3) { // 3 stands for the min size of call site values. + std::cerr << "ERROR: Call site" << index << " has too few values." << std::endl; + return nullptr; + } + return reinterpret_cast(&callSiteId); +} + +uint32_t ResolvedCallSiteIdItem::GetDataOff() const { + return GetCallSiteId(this)->data_off_; +} + +uint32_t ResolvedCallSiteIdItem::GetMethodHandleIndex(const IDexFile &dexFile) const { + art::CallSiteArrayValueIterator it(*GetDexFile(dexFile), *GetCallSiteId(this)); + return static_cast(it.GetJavaValue().i); +} + +const std::string ResolvedCallSiteIdItem::GetMethodName(const IDexFile &dexFile) const { + art::CallSiteArrayValueIterator it(*GetDexFile(dexFile), *GetCallSiteId(this)); + it.Next(); + art::dex::StringIndex methodNameIdx = static_cast(it.GetJavaValue().i); + return std::string(GetDexFile(dexFile)->StringDataByIdx(methodNameIdx)); +} + +const std::string ResolvedCallSiteIdItem::GetProto(const IDexFile &dexFile) const { + art::CallSiteArrayValueIterator it(*GetDexFile(dexFile), *GetCallSiteId(this)); + it.Next(); + it.Next(); + uint32_t methodTypeIdx = static_cast(it.GetJavaValue().i); + const art::dex::ProtoId &methodTypeId = GetDexFile(dexFile)->GetProtoId((art::dex::ProtoIndex)methodTypeIdx); + return GetDexFile(dexFile)->GetProtoSignature(methodTypeId).ToString(); +} + +const ResolvedMethodType *ResolvedCallSiteIdItem::GetMethodType(const IDexFile &dexFile) const { + art::CallSiteArrayValueIterator it(*GetDexFile(dexFile), *GetCallSiteId(this)); + it.Next(); + it.Next(); + uint32_t methodTypeIdx = static_cast(it.GetJavaValue().i); + return ResolvedMethodType::GetInstance(dexFile, methodTypeIdx); +} + +void ResolvedCallSiteIdItem::GetLinkArgument(const IDexFile &dexFile, + std::list> &args) const { + art::CallSiteArrayValueIterator it(*GetDexFile(dexFile), *GetCallSiteId(this)); + it.Next(); + it.Next(); + it.Next(); + while (it.HasNext()) { + ValueType type = ValueType::kByte; + std::string value; + switch (it.GetValueType()) { + case art::EncodedArrayValueIterator::ValueType::kByte: { + type = ValueType::kByte; + value = android::base::StringPrintf("%u", it.GetJavaValue().b); + break; + } + case art::EncodedArrayValueIterator::ValueType::kString: { + // cannot define var in switch need brackets + type = ValueType::kString; + art::dex::StringIndex stringIdx = static_cast(it.GetJavaValue().i); + const char *str = GetDexFile(dexFile)->StringDataByIdx(stringIdx); + if (str == nullptr) { + LOG(FATAL) << "Invalid string from DexFile"; + } + value = str; + break; + } + case art::EncodedArrayValueIterator::ValueType::kMethodType: { + uint32_t protoIdx = static_cast(it.GetJavaValue().i); + const art::dex::ProtoId &protoId = GetDexFile(dexFile)->GetProtoId((art::dex::ProtoIndex)protoIdx); + type = ValueType::kMethodType; + value = GetDexFile(dexFile)->GetProtoSignature(protoId).ToString(); + break; + } + case art::EncodedArrayValueIterator::ValueType::kMethodHandle: { + type = ValueType::kMethodHandle; + value = android::base::StringPrintf("%d", it.GetJavaValue().i); + break; + } + case art::EncodedArrayValueIterator::ValueType::kType: { + art::dex::TypeIndex typeIdx = static_cast(it.GetJavaValue().i); + const char *str = GetDexFile(dexFile)->StringByTypeIdx(typeIdx); + if (str == nullptr) { + LOG(FATAL) << "Invalid string from DexFile"; + } + value = str; + type = ValueType::kType; + break; + } + case art::EncodedArrayValueIterator::ValueType::kNull: { + type = ValueType::kNull; + value = "null"; + break; + } + case art::EncodedArrayValueIterator::ValueType::kBoolean: { + value = it.GetJavaValue().z ? "true" : "false"; + type = ValueType::kBoolean; + break; + } + case art::EncodedArrayValueIterator::ValueType::kShort: { + type = ValueType::kShort; + value = android::base::StringPrintf("%d", it.GetJavaValue().s); + break; + } + case art::EncodedArrayValueIterator::ValueType::kChar: { + type = ValueType::kChar; + value = android::base::StringPrintf("%u", it.GetJavaValue().c); + break; + } + case art::EncodedArrayValueIterator::ValueType::kInt: { + type = ValueType::kInt; + value = android::base::StringPrintf("%d", it.GetJavaValue().i); + break; + } + case art::EncodedArrayValueIterator::ValueType::kLong: { + type = ValueType::kLong; + value = android::base::StringPrintf("%" PRId64 "64", it.GetJavaValue().j); + break; + } + case art::EncodedArrayValueIterator::ValueType::kFloat: { + type = ValueType::kFloat; + value = android::base::StringPrintf("%g", it.GetJavaValue().f); + break; + } + case art::EncodedArrayValueIterator::ValueType::kDouble: { + type = ValueType::kDouble; + value = android::base::StringPrintf("%g", it.GetJavaValue().d); + break; + } + default: { + LOG(FATAL) << "Unimplemented type " << it.GetValueType(); + } + } + args.push_back(std::make_pair(type, value)); + it.Next(); + } +} +// =====ResolvedCallSiteIdItem end=== +// =====LibDexFile start============= +LibDexFile::LibDexFile(std::unique_ptr artDexFile, + std::unique_ptr contentPtrIn) + : contentPtr(std::move(contentPtrIn)) { + dexFiles.push_back(std::move(artDexFile)); + dexFile = dexFiles[0].get(); + header = IDexHeader::GetInstance(*this); + mapList = IDexMapList::GetInstance(dexFile->GetMapList()); +} + +LibDexFile::~LibDexFile() { + dexFile = nullptr; +} + +bool LibDexFile::Open(const std::string &fileName) { + const bool kVerifyChecksum = true; + const bool kVerify = true; + // If the file is not a .dex file, the function tries .zip/.jar/.apk files, + // all of which are Zip archives with "classes.dex" inside. + if (!android::base::ReadFileToString(fileName, &content)) { + LOG(ERROR) << "ReadFileToString failed"; + return false; + } + // content size must > 0, otherwise previous step return false + if (!CheckFileSize(content.size())) { + return false; + } + const art::DexFileLoader dexFileLoader; + art::DexFileLoaderErrorCode errorCode; + std::string errorMsg; + if (!dexFileLoader.OpenAll(reinterpret_cast(content.data()), + content.size(), + fileName, + kVerify, + kVerifyChecksum, + &errorCode, + &errorMsg, + &dexFiles)) { + // Display returned error message to user. Note that this error behavior + // differs from the error messages shown by the original Dalvik dexdump. + LOG(ERROR) << errorMsg; + return false; + } + if (dexFiles.size() != 1) { + LOG(FATAL) << "Only support one dexfile now"; + } + dexFile = dexFiles[0].get(); + header = IDexHeader::GetInstance(*this); + mapList = IDexMapList::GetInstance(dexFile->GetMapList()); + return true; +} + +const uint8_t *LibDexFile::GetBaseAddress() const { + return dexFile->Begin(); +} + +const IDexHeader *LibDexFile::GetHeader() const { + return header; +} + +const IDexMapList *LibDexFile::GetMapList() const { + return mapList; +} + +uint32_t LibDexFile::GetStringDataOffset(uint32_t index) const { + const art::dex::StringId &artStringId = dexFile->GetStringId(art::dex::StringIndex(index)); + return artStringId.string_data_off_; +} + +uint32_t LibDexFile::GetTypeDescriptorIndex(uint32_t index) const { + const art::dex::TypeId &artTypeId = dexFile->GetTypeId(art::dex::TypeIndex(static_cast(index))); + return artTypeId.descriptor_idx_.index_; +} + +const char *LibDexFile::GetStringByIndex(uint32_t index) const { + const char *str = dexFile->StringDataByIdx(art::dex::StringIndex(index)); + if (str == nullptr) { + LOG(FATAL) << "Invalid or truncated dex file"; + } + return str; +} + +const char *LibDexFile::GetStringByTypeIndex(uint32_t index) const { + return dexFile->StringByTypeIdx(art::dex::TypeIndex(index)); +} + +const IDexProtoIdItem *LibDexFile::GetProtoIdItem(uint32_t index) const { + return IDexProtoIdItem::GetInstance(*this, index); +} + +const IDexFieldIdItem *LibDexFile::GetFieldIdItem(uint32_t index) const { + return IDexFieldIdItem::GetInstance(*this, index); +} + +const IDexMethodIdItem *LibDexFile::GetMethodIdItem(uint32_t index) const { + return IDexMethodIdItem::GetInstance(*this, index); +} + +uint32_t LibDexFile::GetClassItemsSize() const { + return dexFile->NumClassDefs(); +} + +const IDexClassItem *LibDexFile::GetClassItem(uint32_t index) const { + return GetDexClassInstance(*dexFile, index); +} + +bool LibDexFile::IsNoIndex(uint32_t index) const { + uint16_t index16 = static_cast(index); + return (index16 == art::DexFile::kDexNoIndex16); +} + +uint32_t LibDexFile::GetTypeIdFromName(const std::string &className) const { + return dexFile->GetIndexForTypeId(*dexFile->FindTypeId(className.c_str())).index_; +} + +uint32_t LibDexFile::ReadUnsignedLeb128(const uint8_t **pStream) const { + uint32_t result = art::DecodeUnsignedLeb128(pStream); + return result; +} + +uint32_t LibDexFile::FindClassDefIdx(const std::string &descriptor) const { + const art::dex::TypeId* typeId = dexFile->FindTypeId(descriptor.c_str()); + if (typeId != nullptr) { + art::dex::TypeIndex typeIdx = dexFile->GetIndexForTypeId(*typeId); + size_t classDefsSize = dexFile->NumClassDefs(); + if (classDefsSize == 0) { + return art::dex::kDexNoIndex; + } + for (uint32_t i = 0; i < classDefsSize; ++i) { + const art::dex::ClassDef &class_def = dexFile->GetClassDef(i); + if (class_def.class_idx_ == typeIdx) { + return i; + } + } + } + return art::dex::kDexNoIndex; +} + +std::unordered_map LibDexFile::GetDefiningClassNameTypeIdMap() const { + std::unordered_map definingClassNameTypeIdMap; + uint32_t classIdsSize = dexFile->NumTypeIds(); + for (uint32_t classIdIndex = 0; classIdIndex < classIdsSize; classIdIndex++) { + std::string className = dexFile->StringByTypeIdx(art::dex::TypeIndex(classIdIndex)); + definingClassNameTypeIdMap.insert(std::make_pair(namemangler::EncodeName(className), classIdIndex)); + } + return definingClassNameTypeIdMap; +} + +void LibDexFile::DecodeDebugLocalInfo(const IDexMethodItem &iDexMethodItem, + DebugNewLocalCallback newLocalCb) { + debugNewLocalCb = newLocalCb; + const art::dex::CodeItem *codeItem = dexFile->GetCodeItem(iDexMethodItem.GetCodeOff()); + art::CodeItemDebugInfoAccessor accessor(*dexFile, codeItem, iDexMethodItem.GetMethodIdx()); + (void)accessor.DecodeDebugLocalInfo(iDexMethodItem.IsStatic(), iDexMethodItem.GetMethodIdx(), + [&](const art::DexFile::LocalInfo &entry) { + DebugNewLocalCb(nullptr, entry); + }); +} + +void LibDexFile::DecodeDebugPositionInfo(const IDexMethodItem &iDexMethodItem, + DebugNewPositionCallback newPositionCb) { + debugNewPositionCb = newPositionCb; + const art::dex::CodeItem *codeItem = dexFile->GetCodeItem(iDexMethodItem.GetCodeOff()); + art::CodeItemDebugInfoAccessor accessor(*dexFile, codeItem, iDexMethodItem.GetMethodIdx()); + (void)accessor.DecodeDebugPositionInfo([&](const art::DexFile::PositionInfo &entry) { + return DebugNewPositionCb(nullptr, entry); + }); +} + +const ResolvedCallSiteIdItem *LibDexFile::GetCallSiteIdItem(uint32_t idx) const { + return ResolvedCallSiteIdItem::GetInstance(*this, idx); +} + +const ResolvedMethodHandleItem *LibDexFile::GetMethodHandleItem(uint32_t idx) const { + return ResolvedMethodHandleItem::GetInstance(*this, idx); +} + +void LibDexFile::DebugNewLocalCb(void *context, const art::DexFile::LocalInfo &entry) const { + debugNewLocalCb(context, entry.reg_, entry.start_address_, entry.end_address_, + entry.name_ == nullptr ? "this" : entry.name_, entry.descriptor_ == nullptr ? "" : entry.descriptor_, + entry.signature_ == nullptr ? "" : entry.signature_); +} + +bool LibDexFile::DebugNewPositionCb(void *context, const art::DexFile::PositionInfo &entry) const { + debugNewPositionCb(context, entry.address_, entry.line_); + return false; +} + +bool LibDexFile::CheckFileSize(size_t fileSize) { + if (fileSize < sizeof(art::DexFile::Header)) { + LOG(ERROR) << "Invalid or truncated dex file"; + return false; + } + const art::DexFile::Header *fileHeader = reinterpret_cast(content.data()); + if (fileSize != fileHeader->file_size_) { + LOG(ERROR) << "Bad file size:" << fileSize << ", " << "expected:" << fileHeader->file_size_; + return false; + } + return true; +} +// =====LibDexFile end=============== +} // namespace maple diff --git a/src/hir2mpl/common/include/base64.h b/src/hir2mpl/common/include/base64.h new file mode 100644 index 0000000000000000000000000000000000000000..9ef98385f470522bfacff13cc21417ba0034e4bc --- /dev/null +++ b/src/hir2mpl/common/include/base64.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_BASE64_H +#define HIR2MPL_INCLUDE_COMMON_BASE64_H +#include +#include +#include "types_def.h" + +namespace maple { +class Base64 { + public: + static std::string Encode(const uint8 *input, size_t length); + static uint8 *Decode(const std::string &input, size_t &lengthRet); + static std::map InitEncodeMap(); + static std::map InitDecodeMap(); + + private: + Base64() = default; + ~Base64() = default; + + static std::map encodeMap; + static std::map decodeMap; + static size_t DecodeLength(const std::string &input); +}; +} // namespace maple +#endif \ No newline at end of file diff --git a/src/hir2mpl/common/include/basic_io.h b/src/hir2mpl/common/include/basic_io.h new file mode 100644 index 0000000000000000000000000000000000000000..fd755bd4b6724b5583fd8ebb3ae212989d76201b --- /dev/null +++ b/src/hir2mpl/common/include/basic_io.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_BASIC_IO_H +#define HIR2MPL_INCLUDE_COMMON_BASIC_IO_H +#include +#include +#include +#include "mempool_allocator.h" +#include "types_def.h" +#include "mpl_logging.h" +#include "securec.h" + +namespace maple { +class BasicIOEndian { + public: + static const uint32 kLengthByte = 1; + static const uint32 kLengthWord = 2; + static const uint32 kLengthDWord = 4; + static const uint32 kLengthQWord = 8; + + static uint16 GetUInt16BigEndian(const uint8 *p) { + // for performance, validation of p must be done by caller + return static_cast((p[kBufIndex0] << kOffset8) | p[kBufIndex1]); + } + + static uint16 GetUInt16LittleEndian(const uint8 *p) { + // for performance, validation of p must be done by caller + return static_cast((p[kBufIndex1] << kOffset8) | p[kBufIndex0]); + } + + static uint32 GetUInt32BigEndian(const uint8 *p) { + // for performance, validation of p must be done by caller + uint32 value = 0; + for (uint32 i = 0; i < kLengthDWord; i++) { + value = (value << kOffset8) | p[i]; + } + return value; + } + + static uint32 GetUInt32LittleEndian(const uint8 *p) { + // for performance, validation of p must be done by caller + uint32 value = 0; + for (int i = static_cast(kLengthDWord) - 1; i >= 0; i--) { + value = (value << kOffset8) | p[i]; + } + return value; + } + + static uint64 GetUInt64BigEndian(const uint8 *p) { + // for performance, validation of p must be done by caller + uint64 value = 0; + for (uint32 i = 0; i < kLengthQWord; i++) { + value = (value << kOffset8) | p[i]; + } + return value; + } + + static uint64 GetUInt64LittleEndian(const uint8 *p) { + // for performance, validation of p must be done by caller + uint64 value = 0; + for (int i = static_cast(kLengthQWord) - 1; i >= 0; i--) { + value = (value << kOffset8) | p[i]; + } + return value; + } + + private: + static const uint32 kBufIndex0 = 0; + static const uint32 kBufIndex1 = 1; + static const uint32 kBufIndex2 = 2; + static const uint32 kBufIndex3 = 3; + static const uint32 kOffset8 = 8; + static const uint32 kOffset16 = 16; + static const uint32 kOffset24 = 24; + + BasicIOEndian() = default; + ~BasicIOEndian() = default; +}; + +class BasicIOMapFile { + public: + explicit BasicIOMapFile(const std::string &name); + BasicIOMapFile(const std::string &name, const uint8 *ptrIn, long lengthIn); + virtual ~BasicIOMapFile(); + bool OpenAndMap() { + return OpenAndMapImpl(); + } + void Close(); + static std::unique_ptr GenFileInMemory(const std::string &name, const uint8 *buf, size_t len); + + size_t GetLength() const { + return length; + } + + const uint8 *GetPtr() const { + return ptr; + } + + const uint8 *GetPtrOffset(size_t offset) const { + if (offset >= length) { + CHECK_FATAL(false, "offset is out of range"); + return nullptr; + } + return ptr + offset; + } + + const std::string &GetFileName() const { + return fileName; + } + + protected: + virtual bool OpenAndMapImpl(); + int fd; + const uint8 *ptr; + uint8 *ptrMemMap; + size_t length; + std::string fileName; +}; + +class BasicIORead { + public: + explicit BasicIORead(BasicIOMapFile &f, bool bigEndian = false); + virtual ~BasicIORead() = default; + uint8 ReadUInt8(); + uint8 ReadUInt8(bool &success); + int8 ReadInt8(); + int8 ReadInt8(bool &success); + char ReadChar(); + char ReadChar(bool &success); + uint16 ReadUInt16(); + uint16 ReadUInt16(bool &success); + int16 ReadInt16(); + int16 ReadInt16(bool &success); + uint32 ReadUInt32(); + uint32 ReadUInt32(bool &success); + int32 ReadInt32(); + int32 ReadInt32(bool &success); + uint64 ReadUInt64(); + uint64 ReadUInt64(bool &success); + int64 ReadInt64(); + int64 ReadInt64(bool &success); + float ReadFloat(); + float ReadFloat(bool &success); + double ReadDouble(); + double ReadDouble(bool &success); + void ReadBufferUInt8(uint8 *dst, uint32 length); + void ReadBufferUInt8(uint8 *dst, uint32 length, bool &success); + void ReadBufferInt8(int8 *dst, uint32 length); + void ReadBufferInt8(int8 *dst, uint32 length, bool &success); + void ReadBufferChar(char *dst, uint32 length); + void ReadBufferChar(char *dst, uint32 length, bool &success); + std::string ReadString(uint32 length); + std::string ReadString(uint32 length, bool &success); + + const uint8 *GetBuffer(uint32 size) const { + if (pos + size > file.GetLength()) { + ERR(kLncErr, "BasicIORead: not enough data"); + return nullptr; + } else { + return file.GetPtrOffset(pos); + } + } + + const uint8 *GetSafeBuffer(uint32 size) const { + CHECK_FATAL(pos + size <= file.GetLength(), "not enough data"); + return file.GetPtrOffset(pos); + } + + const uint8 *GetSafeBufferAt(uint32 pos0, uint32 size) const { + CHECK_FATAL(pos0 + size <= file.GetLength(), "not enough data"); + return file.GetPtrOffset(pos0); + } + + const uint8 *GetSafeDataAt(uint32 offset) const { + CHECK_FATAL(0 < offset && offset < file.GetLength(), "Invalid offset: 0x%x, file total size: 0x%x", + offset, file.GetLength()); + return file.GetPtrOffset(offset); + } + + uint32 GetPos() const { + return pos; + } + + void SetPos(uint32 p) { + CHECK_FATAL(p < file.GetLength(), "invalid pos %d exceeds the length of file %ld", p, file.GetLength()); + pos = p; + } + + size_t GetFileLength() const { + return file.GetLength(); + } + + protected: + BasicIOMapFile &file; + bool isBigEndian; + uint32 pos; +}; +} // namespace maple +#endif diff --git a/src/hir2mpl/common/include/enhance_c_checker.h b/src/hir2mpl/common/include/enhance_c_checker.h new file mode 100644 index 0000000000000000000000000000000000000000..a47d1f144c73b1212ec3df888f9f6a0b8cf00c9b --- /dev/null +++ b/src/hir2mpl/common/include/enhance_c_checker.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_ENCCHECKER_H +#define HIR2MPL_INCLUDE_COMMON_ENCCHECKER_H +#include "feir_var.h" +#include "feir_stmt.h" +#include "ast_expr.h" +#include "ast_decl.h" + +namespace maple { +constexpr int64 kUndefValue = 0xdeadbeef; +class ENCChecker { + public: + ENCChecker() = default; + ~ENCChecker() = default; + static bool HasNonnullAttrInExpr(MIRBuilder &mirBuilder, const UniqueFEIRExpr &expr, bool isNested = false); + static bool HasNullExpr(const UniqueFEIRExpr &expr); + static void CheckNonnullGlobalVarInit(const MIRSymbol &sym, const MIRConst *cst); + static void CheckNullFieldInGlobalStruct(MIRType &type, MIRAggConst &cst, const MapleVector &initExprs); + static void CheckNonnullLocalVarInit(const MIRSymbol &sym, const ASTExpr *initExpr); + static void CheckNonnullLocalVarInit(const MIRSymbol &sym, const UniqueFEIRExpr &initFEExpr, + std::list &stmts); + static std::string GetNthStr(size_t index); + static std::string PrintParamIdx(const std::list &idxs); + static void CheckNonnullArgsAndRetForFuncPtr(const MIRType &dstType, const UniqueFEIRExpr &srcExpr, const Loc &loc); + static bool HasNonnullFieldInStruct(const MIRType &mirType); + static bool HasNonnullFieldInPtrStruct(const MIRType &mirType); + static void CheckNonnullFieldInStruct(const MIRType &src, const MIRType &dst, const Loc &loc); + static bool IsSameBoundary(const AttrBoundary &arg1, const AttrBoundary &arg2); + static void CheckBoundaryArgsAndRetForFuncPtr(const MIRType &dstType, const UniqueFEIRExpr &srcExpr, const Loc &loc); + static UniqueFEIRExpr FindBaseExprInPointerOperation(const UniqueFEIRExpr &expr, bool isIncludingAddrof = false); + static MIRType *GetTypeFromAddrExpr(const UniqueFEIRExpr &expr); + static MIRType *GetArrayTypeFromExpr(const UniqueFEIRExpr &expr); + static MIRConst *GetMIRConstFromExpr(const UniqueFEIRExpr &expr); + static void AssignBoundaryVar(MIRBuilder &mirBuilder, const UniqueFEIRExpr &dstExpr, const UniqueFEIRExpr &srcExpr, + const UniqueFEIRExpr &lRealLenExpr, std::list &ans); + static void AssignUndefVal(MIRBuilder &mirBuilder, MIRSymbol &sym); + static std::string GetBoundaryName(const UniqueFEIRExpr &expr); + static bool IsGlobalVarInExpr(const UniqueFEIRExpr &expr); + static std::pair InsertBoundaryVar(MIRBuilder &mirBuilder, const UniqueFEIRExpr &expr); + static void InsertBoundaryVar(const ASTDecl &ptrDecl, std::list &stmts); + static bool IsConstantIndex(const UniqueFEIRExpr &expr); + static void PeelNestedBoundaryChecking(std::list &stmts, const UniqueFEIRExpr &baseExpr); + static void ReduceBoundaryChecking(std::list &stmts, const UniqueFEIRExpr &expr); + static UniqueFEIRExpr GetRealBoundaryLenExprInFunc(const UniqueFEIRExpr &lenExpr, const ASTFunc &astFunc, + const ASTCallExpr &astCallExpr); + static UniqueFEIRExpr GetRealBoundaryLenExprInFuncByIndex(const TypeAttrs &typeAttrs, const MIRType &type, + const ASTCallExpr &astCallExpr); + static UniqueFEIRExpr GetRealBoundaryLenExprInField(const UniqueFEIRExpr &lenExpr, MIRStructType &baseType, + const UniqueFEIRExpr &dstExpr); + static void InitBoundaryVarFromASTDecl(MapleAllocator &allocator, ASTDecl *ptrDecl, + ASTExpr *lenExpr, std::list &stmts); + static void InitBoundaryVar(MIRFunction &curFunction, const ASTDecl &ptrDecl, + UniqueFEIRExpr lenExpr, std::list &stmts); + static void InitBoundaryVar(MIRFunction &curFunction, const std::string &ptrName, MIRType &ptrType, + UniqueFEIRExpr lenExpr, std::list &stmts); + static std::pair InitBoundaryVar(MIRFunction &curFunction, const UniqueFEIRExpr &ptrExpr, + UniqueFEIRExpr lenExpr, std::list &stmts); + static UniqueFEIRExpr GetGlobalOrFieldLenExprInExpr(MIRBuilder &mirBuilder, const UniqueFEIRExpr &expr); + static void InsertBoundaryAssignChecking(MIRBuilder &mirBuilder, std::list &ans, + const UniqueFEIRExpr &srcExpr, const Loc &loc); + static UniqueFEIRStmt InsertBoundaryLEChecking(UniqueFEIRExpr lenExpr, const UniqueFEIRExpr &srcExpr, + const UniqueFEIRExpr &dstExpr); + static void CheckBoundaryLenFinalAssign(MIRBuilder &mirBuilder, const UniqueFEIRVar &var, FieldID fieldID, + const Loc &loc); + static void CheckBoundaryLenFinalAssign(MIRBuilder &mirBuilder, const UniqueFEIRType &addrType, FieldID fieldID, + const Loc &loc); + static void CheckBoundaryLenFinalAddr(MIRBuilder &mirBuilder, const UniqueFEIRExpr &expr, const Loc &loc); + static MapleVector ReplaceBoundaryChecking(MIRBuilder &mirBuilder, const FEIRStmtNary *stmt); + static void ReplaceBoundaryErr(const MIRBuilder &mirBuilder, const FEIRStmtNary *stmt); + static UniqueFEIRExpr GetBoundaryLenExprCache(uint32 hash); + static UniqueFEIRExpr GetBoundaryLenExprCache(const TypeAttrs &attr); + static UniqueFEIRExpr GetBoundaryLenExprCache(const FieldAttrs &attr); + static void InsertBoundaryInAtts(TypeAttrs &attr, const BoundaryInfo &boundary); + static void InsertBoundaryLenExprInAtts(TypeAttrs &attr, const UniqueFEIRExpr &expr); + static void InsertBoundaryInAtts(FieldAttrs &attr, const BoundaryInfo &boundary); + static void InsertBoundaryInAtts(FuncAttrs &attr, const BoundaryInfo &boundary); + static bool IsSafeRegion(const MIRBuilder &mirBuilder); + static bool IsUnsafeRegion(const MIRBuilder &mirBuilder); + static void CheckLenExpr(const ASTExpr &lenExpr, const std::list &nullstmts); +}; // class ENCChecker +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_ENCCHECKER_H diff --git a/src/hir2mpl/common/include/fe_algorithm.h b/src/hir2mpl/common/include/fe_algorithm.h new file mode 100644 index 0000000000000000000000000000000000000000..7e7df74ad76b8974cec95f83cd35c931346aa046 --- /dev/null +++ b/src/hir2mpl/common/include/fe_algorithm.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_ALGORITHM_H +#define HIR2MPL_INCLUDE_COMMON_FE_ALGORITHM_H +#include +#include +#include +#include "types_def.h" +#include "fe_configs.h" + +namespace maple { +template +class CorrelativeMerge { + public: + using PtrMergeFunc = bool (T::*)(const T&); + using PtrMergableFunc = bool (T::*)() const; + CorrelativeMerge(std::map> &argCorrelationMap, PtrMergeFunc argPtrMergeFunc, + PtrMergableFunc argPtrMergableFunc) + : correlationMap(argCorrelationMap), + ptrMergeFunc(argPtrMergeFunc), + ptrMergableFunc(argPtrMergableFunc), + inLoop(false), + error(false), + success(false), + visitCount(0) {} + + ~CorrelativeMerge() = default; + void ProcessAll() { + visitCount = 0; + // loop check + LoopCheckAll(); + // process + CHECK_NULL_FATAL(ptrMergableFunc); + for (const std::pair> &item : correlationMap) { + T *dst = item.first; + CHECK_NULL_FATAL(dst); + if ((dst->*ptrMergableFunc)()) { + continue; + } + ProcessOne(*dst, true); + if (!success) { + error = true; + break; + } + } + } + + void LoopCheckAll() { + for (const std::pair> &item : correlationMap) { + visitedSet.clear(); + T *dst = item.first; + CHECK_NULL_FATAL(dst); + inLoop = false; + LoopCheck(*dst); + loopStatus[dst] = inLoop; + } + } + + void LoopCheck(T &dst) { + // loop check + if (visitedSet.find(&dst) != visitedSet.end()) { + inLoop = true; + return; + } + // correlation check + CHECK_FATAL(visitedSet.insert(&dst).second, "visitedSet insert failed"); + auto itCorr = correlationMap.find(&dst); + if (itCorr != correlationMap.end()) { + CHECK_NULL_FATAL(ptrMergableFunc); + for (T *src : itCorr->second) { + CHECK_NULL_FATAL(src); + if (!(src->*ptrMergableFunc)()) { + LoopCheck(*src); + if (inLoop) { + return; + } + } + } + } + } + + void ProcessOne(T &dst, bool first) { + if (first) { + success = true; + visitedSet.clear(); + } + // loop check + visitCount++; + // correlation end check + auto itCorr = correlationMap.find(&dst); + if (itCorr == correlationMap.end()) { + return; + } + CHECK_FATAL(visitedSet.insert(&dst).second, "visitedSet insert failed"); + // correlation check + auto itLoopStatus = loopStatus.find(&dst); + ASSERT(itLoopStatus != loopStatus.end(), "loop status not existed"); + bool updated = (first || !itLoopStatus->second); + if (ptrMergableFunc == nullptr || ptrMergeFunc == nullptr) { + CHECK_FATAL(false, "nullptr error."); + } + for (T *src : itCorr->second) { + CHECK_NULL_FATAL(src); + if (correlationMap.find(src) == correlationMap.end()) { + success = success && (dst.*ptrMergeFunc)(*src); + } else if ((src->*ptrMergableFunc)()) { + success = success && (dst.*ptrMergeFunc)(*src); + } else if (updatedSet.find(src) != updatedSet.end()) { + success = success && (dst.*ptrMergeFunc)(*src); + } else { + if (visitedSet.find(src) == visitedSet.end()) { + ProcessOne(*src, false); + success = success && (dst.*ptrMergeFunc)(*src); + } + } + } + if (!updated) { + return; + } + if (updatedSet.find(&dst) == updatedSet.end()) { + CHECK_FATAL(updatedSet.insert(&dst).second, "updatedSet insert failed"); + } + } + + uint32 GetVisitCount() const { + return visitCount; + } + + bool GetError() const { + return error; + } + + LLT_PRIVATE: + std::map> &correlationMap; + PtrMergeFunc ptrMergeFunc; + PtrMergableFunc ptrMergableFunc; + std::set visitedSet; + std::set updatedSet; + std::map loopStatus; + bool inLoop : 1; + bool error : 1; + bool success : 1; + uint32 visitCount; +}; // class CorrelativeMerge +} // namespace maple +#endif diff --git a/src/hir2mpl/common/include/fe_config_parallel.h b/src/hir2mpl/common/include/fe_config_parallel.h new file mode 100644 index 0000000000000000000000000000000000000000..8d65a4d3aea91479480e0d3638ad7dcc06cbb1fa --- /dev/null +++ b/src/hir2mpl/common/include/fe_config_parallel.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_CONFIG_PARALLEL_H +#define HIR2MPL_INCLUDE_COMMON_FE_CONFIG_PARALLEL_H +#include +#include +#include +#include "types_def.h" +#include "mpl_logging.h" + +namespace maple { +class FEConfigParallel { + public: + FEConfigParallel(); + ~FEConfigParallel() = default; + static FEConfigParallel &GetInstance() { + return instance; + } + + uint32 GetNThread() const { + return nThread; + } + + void EnableParallel() { + enableParallel = true; + } + + void DisableParallel() { + enableParallel = false; + } + + bool IsInParallelMode() const { + return enableParallel && (nThread > 1); + } + + void RegisterRunThreadID(const std::thread::id &tid) { + mtx.lock(); + CHECK_FATAL(runThreadIDs.insert(tid).second, "failed to register thread id"); + mtx.unlock(); + } + + bool RunThreadParallelForbidden() { + if (!enableParallel) { + return false; + } + std::thread::id tid = std::this_thread::get_id(); + return runThreadIDs.find(tid) != runThreadIDs.end(); + } + + void RunThreadIDCleanUp() { + runThreadIDs.clear(); + } + + private: + static FEConfigParallel instance; + uint32 nThread; + bool enableParallel; + std::set runThreadIDs; + std::mutex mtx; +}; + +#define HIR2MPL_PARALLEL_FORBIDDEN() \ + do { \ + if (FEConfigParallel::GetInstance().RunThreadParallelForbidden()) { \ + maple::logInfo.EmitErrorMessage("HIR2MPL_PARALLEL_FORBIDDEN", __FILE__, __LINE__, "\n"); \ + FATAL(kLncFatal, "Forbidden invocation in parallel run thread"); \ + } \ + } while (0) +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FE_PARALLEL_CONFIG_H diff --git a/src/hir2mpl/common/include/fe_configs.h b/src/hir2mpl/common/include/fe_configs.h new file mode 100644 index 0000000000000000000000000000000000000000..b6088219f3ffa7b4aefbbd9a109e802aa41a751f --- /dev/null +++ b/src/hir2mpl/common/include/fe_configs.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_CONFIGS_H +#define HIR2MPL_INCLUDE_COMMON_FE_CONFIGS_H +#include "types_def.h" + +#if ENABLE_COV_CHECK == 1 +#define LLT_MOCK_TARGET virtual +#define LLT_MOCK_TARGET_VIRTUAL virtual +#define LLT_PUBLIC public +#define LLT_PROTECTED public +#define LLT_PRIVATE public +#else +#define LLT_MOCK_TARGET +#define LLT_MOCK_TARGET_VIRTUAL virtual +#define LLT_PUBLIC public +#define LLT_PROTECTED protected +#define LLT_PRIVATE private +#endif + +namespace maple { +using TypeDim = uint8; + +class FEConstants { + public: + const static uint8 kDimMax = UINT8_MAX; +}; // class FEContants +} // namespace maple + +#include "fe_config_parallel.h" +#endif // HIR2MPL_INCLUDE_COMMON_FE_CONFIGS_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/fe_file_ops.h b/src/hir2mpl/common/include/fe_file_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0102587f171292bd9f032f5461c39b2fdd2b1b4c --- /dev/null +++ b/src/hir2mpl/common/include/fe_file_ops.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_FILE_OPS_H +#define HIR2MPL_INCLUDE_COMMON_FE_FILE_OPS_H +#include + +namespace maple { +class FEFileOps { + public: + static std::string GetFilePath(const std::string &pathName); + static std::string GetFileNameWithExt(const std::string &pathName); + static std::string GetFileName(const std::string &pathName); + static std::string GetFileExtName(const std::string &pathName); + + private: + FEFileOps() = default; + ~FEFileOps() = default; +}; +} // namespace maple +#endif \ No newline at end of file diff --git a/src/hir2mpl/common/include/fe_file_type.h b/src/hir2mpl/common/include/fe_file_type.h new file mode 100644 index 0000000000000000000000000000000000000000..f7ce0726405cbef407b21347befef998de75395b --- /dev/null +++ b/src/hir2mpl/common/include/fe_file_type.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_FILE_TYPE_H +#define HIR2MPL_INCLUDE_COMMON_FE_FILE_TYPE_H +#include +#include +#include "types_def.h" +#include "basic_io.h" + +namespace maple { +class FEFileType { + public: + enum FileType { + kUnknownType, + kClass, + kJar, + kDex, + kAST, + kMAST, + }; + + inline static FEFileType &GetInstance() { + return fileType; + } + + FileType GetFileTypeByExtName(const std::string &extName) const; + FileType GetFileTypeByPathName(const std::string &pathName) const; + FileType GetFileTypeByMagicNumber(const std::string &pathName) const; + FileType GetFileTypeByMagicNumber(BasicIOMapFile &file) const; + FileType GetFileTypeByMagicNumber(uint32 magic) const; + void Reset(); + void LoadDefault(); + void RegisterExtName(FileType argFileType, const std::string &extName); + void RegisterMagicNumber(FileType argFileType, uint32 magicNumber); + static std::string GetPath(const std::string &pathName); + static std::string GetName(const std::string &pathName, bool withExt = true); + static std::string GetExtName(const std::string &pathName); + + private: + static FEFileType fileType; + static const uint32 kMagicClass = 0xBEBAFECA; + static const uint32 kMagicZip = 0x04034B50; + static const uint32 kMagicDex = 0x0A786564; + static const uint32 kMagicAST = 0x48435043; + static const uint32 kMagicMAST = 0xAB4C504D; + std::map mapExtNameType; + std::map mapTypeMagic; + std::map mapMagicType; + + FEFileType(); + ~FEFileType() = default; +}; +} +#endif diff --git a/src/hir2mpl/common/include/fe_function.h b/src/hir2mpl/common/include/fe_function.h new file mode 100644 index 0000000000000000000000000000000000000000..d7c2d2c29edc38379f280802d7d66530f1b5333b --- /dev/null +++ b/src/hir2mpl/common/include/fe_function.h @@ -0,0 +1,262 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_FUNCTION_H +#define HIR2MPL_INCLUDE_COMMON_FE_FUNCTION_H +#include +#include +#include +#include "types_def.h" +#include "mempool_allocator.h" +#include "safe_ptr.h" +#include "mir_function.h" +#include "fe_utils.h" +#include "feir_bb.h" +#include "feir_stmt.h" +#include "fe_timer_ns.h" +#include "feir_lower.h" +#include "feir_cfg.h" +#include "fe_function_phase_result.h" +#include "feir_type_infer.h" +#include "feir_scope.h" + +namespace maple { +#define SET_FUNC_INFO_PAIR(A, B, C, D) \ + do { \ + (A).PushbackMIRInfo(MIRInfoPair(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(B), C)); \ + (A).PushbackIsString(D); \ + } while (0) \ + +class FEFunction { + public: + FEFunction(MIRFunction &argMIRFunction, const std::unique_ptr &argPhaseResultTotal); + virtual ~FEFunction(); + + // element memory manage method + FEIRStmt *RegisterGeneralStmt(std::unique_ptr stmt); + const std::unique_ptr &RegisterGeneralStmtUniqueReturn(std::unique_ptr stmt); + FEIRBB *RegisterFEIRBB(std::unique_ptr bb); + FEIRStmt *RegisterFEIRStmt(UniqueFEIRStmt stmt); + std::string GetDescription(); + void OutputUseDefChain(); + void OutputDefUseChain(); + void PushFuncScope(const SrcPosition &startOfScope, const SrcPosition &endOfScope); + void PushStmtScope(const SrcPosition &startOfScope, const SrcPosition &endOfScope, bool isControllScope = false); + void PushStmtScope(bool isControllScope); + UniqueFEIRScope PopTopScope(); + FEIRScope *GetTopFEIRScopePtr() const; + MIRScope *GetTopMIRScope() const; + virtual void AddVLACleanupStmts(std::list &stmts); + + void SetSrcFileName(const std::string &fileName) { + srcFileName = fileName; + } + + const FEIRStmt *GetFEIRStmtHead() const { + ASSERT_NOT_NULL(feirStmtHead); + return feirStmtHead; + } + + const FEIRStmt *GetFEIRStmtTail() const { + ASSERT_NOT_NULL(feirStmtTail); + return feirStmtTail; + } + + void Init() { + InitImpl(); + } + + void AppendFEIRStmts(std::list &stmts); + void InsertFEIRStmtsBefore(FEIRStmt &pos, std::list &stmts); + + void PreProcess() { + PreProcessImpl(); + } + + bool Process() { + return ProcessImpl(); + } + + void Finish() { + FinishImpl(); + } + + uint32 GetStmtCount() const { + return stmtCount; + } + + std::unordered_map> &GetBoundaryMap() { + return boundaryMap; + } + + const std::unordered_map> &GetBoundaryMap() const { + return boundaryMap; + } + + void SetBoundaryMap(uint32 tag, const std::pair &boundaryVar) { + boundaryMap[tag] = boundaryVar; + } + + std::stack &GetSafeRegionFlag() { + return safeRegionFlag; + } + + const std::stack &GetSafeRegionFlag() const { + return safeRegionFlag; + } + + const std::deque &GetScopeStack() const { + return scopeStack; + } + + void SetLabelWithScopes(std::string labelName, std::map scopes) { + labelWithScopes.insert(std::make_pair(labelName, std::move(scopes))); + } + + const std::unordered_map> &GetLabelWithScopes() const { + return labelWithScopes; + } + + LLT_PROTECTED: + // run phase routines + virtual bool GenerateGeneralStmt(const std::string &phaseName) = 0; + virtual bool LabelGeneralStmts(const std::string &phaseName); + virtual bool LabelFEIRBBs(const std::string &phaseName); + virtual bool ProcessFEIRFunction(); + + virtual bool GenerateArgVarList(const std::string &phaseName) = 0; + virtual bool GenerateAliasVars(const std::string &phaseName) = 0; + virtual bool EmitToFEIRStmt(const std::string &phaseName) = 0; + virtual bool BuildMapLabelStmt(const std::string &phaseName); + virtual bool SetupFEIRStmtJavaTry(const std::string &phaseName); + virtual bool SetupFEIRStmtBranch(const std::string &phaseName); + virtual bool UpdateRegNum2This(const std::string &phaseName); + bool LowerFunc(const std::string &phaseName); + bool DumpFEIRBBs(const std::string &phaseName); + bool DumpFEIRCFGGraph(const std::string &phaseName); + bool BuildFEIRDFG(const std::string &phaseName); // process fe ir check point, build fe ir DFG + bool BuildFEIRUDDU(const std::string &phaseName); // build fe ir UD DU chain + bool TypeInfer(const std::string &phaseName); // feir based Type Infer + + // finish phase routines + bool BuildGeneralStmtBBMap(const std::string &phaseName); + bool UpdateFormal(const std::string &phaseName); + virtual bool EmitToMIR(const std::string &phaseName); + + // interface methods + virtual void InitImpl(); + virtual void PreProcessImpl() {} + virtual bool ProcessImpl() { + return true; + } + + virtual void FinishImpl() {} + virtual bool PreProcessTypeNameIdx() = 0; + virtual void GenerateGeneralStmtFailCallBack() = 0; + virtual void GenerateGeneralDebugInfo() = 0; + virtual bool VerifyGeneral() = 0; + virtual void VerifyGeneralFailCallBack() = 0; + virtual void DumpGeneralStmts(); + virtual std::string GetGeneralFuncName() const; + void EmitToMIRStmt(); + + void PhaseTimerStart(FETimerNS &timer) const; + void PhaseTimerStopAndDump(FETimerNS &timer, const std::string &label) const; + virtual void DumpFEIRCFGGraphForDFGEdge(std::ofstream &file); + virtual bool HasThis() = 0; + virtual bool IsNative() = 0; + void BuildMapLabelIdx(); + bool CheckPhaseResult(const std::string &phaseName); + + FEIRStmt *genStmtHead; + FEIRStmt *genStmtTail; + std::list genStmtListRaw; + FEIRBB *genBBHead; + FEIRBB *genBBTail; + FEIRStmt *feirStmtHead; + FEIRStmt *feirStmtTail; + FEIRBB *feirBBHead; + FEIRBB *feirBBTail; + std::unique_ptr feirLower; + std::unique_ptr feirCFG; + std::map genStmtBBMap; + std::vector> argVarList; + std::map mapLabelIdx; + std::map mapLabelStmt; + FEFunctionPhaseResult phaseResult; + const std::unique_ptr &phaseResultTotal; + std::string srcFileName = ""; + MIRSrcLang srcLang = kSrcLangJava; + MIRFunction &mirFunction; + + LLT_PRIVATE: + void OutputStmts() const; + bool SetupFEIRStmtGoto(FEIRStmtGoto &stmt); + bool SetupFEIRStmtSwitch(FEIRStmtSwitch &stmt); + const FEIRStmtPesudoLOC *GetLOCForStmt(const FEIRStmt &feIRStmt) const; + void AddLocForStmt(const FEIRStmt &stmt, std::list &mirStmts) const; + void LabelFEIRStmts(); // label fe ir stmts + FEIRBB *NewFEIRBB(uint32 &id); + bool IsBBEnd(const FEIRStmt &stmt) const; + bool MayBeBBEnd(const FEIRStmt &stmt) const; + bool ShouldNewBB(const FEIRBB *currBB, const FEIRStmt &currStmt) const; + void LinkFallThroughBBAndItsNext(FEIRBB &bb); + void LinkBranchBBAndItsTargets(FEIRBB &bb); + void LinkGotoBBAndItsTarget(FEIRBB &bb, const FEIRStmt &stmtTail); + void LinkSwitchBBAndItsTargets(FEIRBB &bb, const FEIRStmt &stmtTail); + void LinkBB(FEIRBB &predBB, FEIRBB &succBB); + FEIRBB &GetFEIRBBByStmt(const FEIRStmt &stmt); + bool CheckBBsStmtNoAuxTail(const FEIRBB &bb) const; + void ProcessCheckPoints(); + void InsertCheckPointForBBs(); + void InsertCheckPointForTrys(); + void InitFirstVisibleStmtForCheckPoints(); + void InitFEIRStmtCheckPointMap(); + void RegisterDFGNodes2CheckPoints(); + void RegisterDFGNodesForFuncParameters(); + void RegisterDFGNodesForStmts(); + bool CalculateDefs4AllUses(); + void InitTrans4AllVars(); + void InsertRetypeStmtsAfterDef(const UniqueFEIRVar& def); + FEIRStmtPesudoJavaTry2 &GetJavaTryByCheckPoint(FEIRStmtCheckPoint &checkPoint); + FEIRStmtCheckPoint &GetCheckPointByFEIRStmt(const FEIRStmt &stmt); + void SetUpDefVarTypeScatterStmtMap(); + FEIRStmt &GetStmtByDefVarTypeScatter(const FEIRVarTypeScatter &varTypeScatter); + void InsertRetypeStmt(const FEIRVarTypeScatter &fromVar, const FEIRType &toType); + void InsertCvtStmt(const FEIRVarTypeScatter &fromVar, const FEIRType &toType); + void InsertJavaMergeStmt(const FEIRVarTypeScatter &fromVar, const FEIRType &toType); + void InsertDAssignStmt4TypeCvt(const FEIRVarTypeScatter &fromVar, const FEIRType &toType, UniqueFEIRExpr expr); + bool WithFinalFieldsNeedBarrier(MIRClassType *classType, bool isStatic) const; + bool IsNeedInsertBarrier(); + + std::list> genStmtList; + std::list> genBBList; + // FeirStmts generated by feirLower are inserted after the kStmtPesudoFuncEnd and linked, + // Access sequential feirStmt instructions through a doubly linked FELinkListNode from feirStmtHead + std::list feirStmtList; + std::list> feirBBList; + std::map feirStmtBBMap; + std::map feirStmtCheckPointMap; + std::map checkPointJavaTryMap; + std::unique_ptr typeInfer; + uint32 stmtCount = 0; + std::map defVarTypeScatterStmtMap; + std::unordered_map> boundaryMap; // EnhanceC boundary var + std::stack safeRegionFlag; // EnhanceC saferegion(true: safe, false: unsafe) + std::deque scopeStack; // an element of the stack represents the scope + uint32 scopeID = 0; + std::unordered_map> labelWithScopes; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FE_FUNCTION_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/fe_function_phase_result.h b/src/hir2mpl/common/include/fe_function_phase_result.h new file mode 100644 index 0000000000000000000000000000000000000000..192981ed3e1af05ac45b3bbbe2d36ea515c7b375 --- /dev/null +++ b/src/hir2mpl/common/include/fe_function_phase_result.h @@ -0,0 +1,83 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_FUNCTION_PHASE_RESULT_H +#define HIR2MPL_INCLUDE_COMMON_FE_FUNCTION_PHASE_RESULT_H +#include +#include +#include +#include "types_def.h" +#include "fe_timer_ns.h" + +namespace maple { +class FEFunctionPhaseResult { + public: + explicit FEFunctionPhaseResult(bool argRecordTime = false) + : recordTime(argRecordTime), + success(true), + enable(true) {} + + ~FEFunctionPhaseResult() = default; + void Combine(const FEFunctionPhaseResult &result); + void Dump(); + void DumpMS(); + bool Finish(bool isSuccess); + bool Finish() { + return Finish(success); + } + + void Start() { + if (enable && recordTime) { + timer.Start(); + } + } + + bool IsSuccess() const { + return success; + } + + void Disable() { + enable = false; + } + + void Enable() { + enable = true; + } + + void RegisterPhaseName(const std::string &name) { + if (enable && recordTime) { + currPhaseName = name; + phaseNames.push_back(name); + } + } + + void RegisterPhaseNameAndStart(const std::string &name) { + if (enable && recordTime) { + currPhaseName = name; + phaseNames.push_back(name); + timer.Start(); + } + } + + private: + FETimerNS timer; + bool recordTime : 1; + bool success : 1; + bool enable : 1; + std::string currPhaseName = ""; + std::list phaseNames; + std::map phaseTimes; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FE_FUNCTION_PHASE_RESULT_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/fe_input.h b/src/hir2mpl/common/include/fe_input.h new file mode 100644 index 0000000000000000000000000000000000000000..b9cdfd2d2ace67c6fce6270297a56baf618a7f53 --- /dev/null +++ b/src/hir2mpl/common/include/fe_input.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FE_INPUT_H +#define HIR2MPL_INCLUDE_FE_INPUT_H +#include "global_tables.h" +#include "mir_module.h" +#include "mpl_logging.h" + +namespace maple { +class FEInputUnit { + public: + FEInputUnit() {} + virtual ~FEInputUnit() = default; + GStrIdx GetNameIdxOrin() { + return GetNameIdxOrinImpl(); + } + + GStrIdx GetNameIdxMpl() { + return GetNameIdxMplImpl(); + } + + std::string GetCatagoryName() { + return GetCatagoryNameImpl(); + } + + protected: + virtual GStrIdx GetNameIdxOrinImpl() = 0; + virtual GStrIdx GetNameIdxMplImpl() = 0; + virtual std::string GetCatagoryNameImpl() = 0; +}; + +enum FEInputSameNamePolicy { + kUseFirst, + kUseNewest, + kFatalOnce, + kFatalAll +}; + +template +class FEInputContent { + public: + explicit FEInputContent(MapleAllocator &alloc); + ~FEInputContent() = default; + void RegisterItem(T &item); + void CheckSameName(); + + private: + MapleAllocator &allocator; + MapleList items; + MapleUnorderedMap nameItemMap; + MapleList sameNames; + FEInputSameNamePolicy policySameName; + + void EraseItem(GStrIdx nameIdxMpl); +}; + +class FEInputUnitExt { +}; + +class FEInputProgramUnit { +}; + +class FEInputUnitMethod : public FEInputUnit { + public: + explicit FEInputUnitMethod(MapleAllocator &alloc); + ~FEInputUnitMethod() = default; + + protected: + std::string GetCatagoryNameImpl() override; + + MapleAllocator &allocator; +}; + +class FEInputUnitVariable : public FEInputUnit { + public: + explicit FEInputUnitVariable(MapleAllocator &alloc); + ~FEInputUnitVariable() = default; + + protected: + std::string GetCatagoryNameImpl() override; + + MapleAllocator &allocator; +}; + +class FEInputUnitStruct : public FEInputUnit { + public: + explicit FEInputUnitStruct(MapleAllocator &alloc); + virtual ~FEInputUnitStruct() = default; + MIRTypeKind GetMIRTypeKind() const { + return typeKind; + } + + protected: + std::string GetCatagoryNameImpl() override; + + MapleAllocator &allocator; + FEInputContent methods; + FEInputContent methodsStatic; + FEInputContent fields; + FEInputContent fieldsStatic; + MIRTypeKind typeKind; +}; +} // namespace maple +#endif diff --git a/src/hir2mpl/common/include/fe_input_helper.h b/src/hir2mpl/common/include/fe_input_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..edb5a9bb71e1eecc22ef583156a2f96dfa0e20b1 --- /dev/null +++ b/src/hir2mpl/common/include/fe_input_helper.h @@ -0,0 +1,461 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FE_INPUT_HELPER_H +#define HIR2MPL_INCLUDE_FE_INPUT_HELPER_H +#include "mempool_allocator.h" +#include "mir_type.h" +#include "mir_pragma.h" +#include "mir_symbol.h" +#include "feir_type.h" +#include "fe_function.h" + +namespace maple { +struct ExtraField { + std::string klass; + std::string field; + std::string type; + std::string attr; +}; + +class FEInputContainer { + public: + FEInputContainer() = default; + virtual ~FEInputContainer() = default; + MIRStructType *GetContainer() { + return GetContainerImpl(); + } + + protected: + virtual MIRStructType *GetContainerImpl() = 0; +}; + +class FEInputPragmaHelper { + public: + FEInputPragmaHelper() = default; + virtual ~FEInputPragmaHelper() = default; + std::vector &GenerateMIRPragmas() { + return GenerateMIRPragmasImpl(); + } + + protected: + virtual std::vector &GenerateMIRPragmasImpl() = 0; +}; + +class FEInputStructHelper; + +class FEInputGlobalVarHelper { + public: + explicit FEInputGlobalVarHelper(MapleAllocator &allocatorIn) : allocator(allocatorIn) {} + virtual ~FEInputGlobalVarHelper() = default; + + bool ProcessDecl() { + return ProcessDecl(allocator); + } + + bool ProcessDecl(MapleAllocator &allocatorIn) { + return ProcessDeclImpl(allocatorIn); + } + + protected: + MapleAllocator &allocator; + virtual bool ProcessDeclImpl(MapleAllocator &allocator) = 0; +}; + +class FEInputFileScopeAsmHelper { + public: + explicit FEInputFileScopeAsmHelper(MapleAllocator &allocatorIn) : allocator(allocatorIn) {} + virtual ~FEInputFileScopeAsmHelper() = default; + + bool ProcessDecl() { + return ProcessDecl(allocator); + } + + bool ProcessDecl(MapleAllocator &allocatorIn) { + return ProcessDeclImpl(allocatorIn); + } + + protected: + MapleAllocator &allocator; + virtual bool ProcessDeclImpl(MapleAllocator &allocator) = 0; +}; + +class FEInputEnumHelper { + public: + explicit FEInputEnumHelper(MapleAllocator &allocatorIn) : allocator(allocatorIn) {} + virtual ~FEInputEnumHelper() = default; + + bool ProcessDecl() { + return ProcessDecl(allocator); + } + + bool ProcessDecl(MapleAllocator &allocatorIn) { + return ProcessDeclImpl(allocatorIn); + } + + protected: + MapleAllocator &allocator; + virtual bool ProcessDeclImpl(MapleAllocator &allocator) = 0; +}; + +class FEInputFieldHelper { + public: + explicit FEInputFieldHelper(MapleAllocator &allocatorIn) : allocator(allocatorIn) {} + virtual ~FEInputFieldHelper() = default; + const FieldPair &GetMIRFieldPair() const { + return mirFieldPair; + } + + bool IsStatic() const { + return mirFieldPair.second.second.GetAttr(FLDATTR_static); + } + + bool ProcessDecl() { + return ProcessDecl(allocator); + } + + bool ProcessDecl(MapleAllocator &alloc) { + return ProcessDeclImpl(alloc); + } + + bool ProcessDeclWithContainer(MapleAllocator &alloc) { + return ProcessDeclWithContainerImpl(alloc); + } + + static void SetFieldAttribute(const std::string &name, FieldAttrs &attr) { +#define FIELD_ATTR +#define ATTR(A) \ + if (!strcmp(name.c_str(), #A)) { \ + attr.SetAttr(FLDATTR_##A); \ + return; \ + } +#include "all_attributes.def" +#undef ATTR +#undef FIELD_ATTR + } + + protected: + virtual bool ProcessDeclImpl(MapleAllocator &allocator) = 0; + virtual bool ProcessDeclWithContainerImpl(MapleAllocator &allocator) = 0; + + MapleAllocator &allocator; + FieldPair mirFieldPair; +}; + +class FEInputMethodHelper { + public: + explicit FEInputMethodHelper(MapleAllocator &allocatorIn) + : allocator(allocatorIn), + srcLang(kSrcLangUnknown), + feFunc(nullptr), + mirFunc(nullptr), + retType(nullptr), + argTypes(allocator.Adapter()), + retMIRType(nullptr), + argMIRTypes(allocator.Adapter()), + methodNameIdx(GStrIdx(0)) {} + + virtual ~FEInputMethodHelper() { + feFunc = nullptr; + mirFunc = nullptr; + retType = nullptr; + retMIRType = nullptr; + } + + const MethodPair &GetMIRMethodPair() const { + return mirMethodPair; + } + + const FEIRType *GetReturnType() const { + return retType; + } + + const MapleVector &GetArgTypes() const { + return argTypes; + } + + bool ProcessDecl() { + return ProcessDecl(allocator); + } + + bool ProcessDecl(MapleAllocator &allocatorIn) { + return ProcessDeclImpl(allocatorIn); + } + + void SolveReturnAndArgTypes(MapleAllocator &allocatorIn) { + SolveReturnAndArgTypesImpl(allocatorIn); + } + + std::string GetMethodName(bool inMpl, bool full = true) const { + return GetMethodNameImpl(inMpl, full); + } + + FuncAttrs GetAttrs() const { + return GetAttrsImpl(); + } + + bool IsStatic() const { + return IsStaticImpl(); + } + + bool IsVirtual() const { + return IsVirtualImpl(); + } + + bool IsNative() const { + return IsNativeImpl(); + } + + bool IsVarg() const { + return IsVargImpl(); + } + + bool HasThis() const { + return HasThisImpl(); + } + + MIRType *GetTypeForThis() const { + return GetTypeForThisImpl(); + } + + GStrIdx GetMethodNameIdx() const { + return methodNameIdx; + } + + bool HasCode() const { + return HasCodeImpl(); + } + + void SetClassTypeInfo(const MIRStructType &structType) { + mirFunc->SetClassTyIdx(structType.GetTypeIndex()); + } + + protected: + virtual bool ProcessDeclImpl(MapleAllocator &allocatorIn); + virtual void SolveReturnAndArgTypesImpl(MapleAllocator &allocator) = 0; + virtual std::string GetMethodNameImpl(bool inMpl, bool full) const = 0; + virtual FuncAttrs GetAttrsImpl() const = 0; + virtual bool IsStaticImpl() const = 0; + virtual bool IsVirtualImpl() const = 0; + virtual bool IsNativeImpl() const = 0; + virtual bool IsVargImpl() const = 0; + virtual bool HasThisImpl() const = 0; + virtual MIRType *GetTypeForThisImpl() const = 0; + virtual bool HasCodeImpl() const = 0; + + MapleAllocator &allocator; + MIRSrcLang srcLang; + FEFunction *feFunc; + MIRFunction *mirFunc; + MethodPair mirMethodPair; + FEIRType *retType; + MapleVector argTypes; + // Added MIRType to support C and extended to Java later. + MIRType *retMIRType; + MapleVector argMIRTypes; + GStrIdx methodNameIdx; +}; + +class FEInputStructHelper : public FEInputContainer { + public: + explicit FEInputStructHelper(MapleAllocator &allocatorIn) + : allocator(allocatorIn), + mirStructType(nullptr), + mirSymbol(nullptr), + fieldHelpers(allocator.Adapter()), + methodHelpers(allocator.Adapter()), + pragmaHelper(nullptr), + staticFieldsConstVal(allocator.Adapter()), + isSkipped(false), + srcLang(kSrcLangJava) {} + + ~FEInputStructHelper() override { + mirStructType = nullptr; + mirSymbol = nullptr; + pragmaHelper = nullptr; + } + + bool IsSkipped() const { + return isSkipped; + } + + const TypeAttrs GetStructAttributeFromSymbol() const { + if (mirSymbol != nullptr) { + return mirSymbol->GetAttrs(); + } + return TypeAttrs(); + } + + MIRSrcLang GetSrcLang() const { + return srcLang; + } + + const MapleList &GetMethodHelpers() const { + return methodHelpers; + } + + bool PreProcessDecl() { + return PreProcessDeclImpl(); + } + + bool ProcessDecl() { + return ProcessDeclImpl(); + } + + std::string GetStructNameOrin() const { + return GetStructNameOrinImpl(); + } + + std::string GetStructNameMpl() const { + return GetStructNameMplImpl(); + } + + std::list GetSuperClassNames() const { + return GetSuperClassNamesImpl(); + } + + std::vector GetInterfaceNames() const { + return GetInterfaceNamesImpl(); + } + + std::string GetSourceFileName() const { + return GetSourceFileNameImpl(); + } + + std::string GetSrcFileName() const { + return GetSrcFileNameImpl(); + } + + MIRStructType *CreateMIRStructType(bool &error) const { + return CreateMIRStructTypeImpl(error); + } + + TypeAttrs GetStructAttributeFromInput() const { + return GetStructAttributeFromInputImpl(); + } + + uint64 GetRawAccessFlags() const { + return GetRawAccessFlagsImpl(); + } + + GStrIdx GetIRSrcFileSigIdx() const { + return GetIRSrcFileSigIdxImpl(); + } + + bool IsMultiDef() const { + return IsMultiDefImpl(); + } + + void InitFieldHelpers() { + InitFieldHelpersImpl(); + } + + void InitMethodHelpers() { + InitMethodHelpersImpl(); + } + + void SetPragmaHelper(FEInputPragmaHelper *pragmaHelperIn) { + pragmaHelper = pragmaHelperIn; + } + + void SetStaticFieldsConstVal(const std::vector &val) { + staticFieldsConstVal.resize(val.size()); + for (uint32 i = 0; i < val.size(); ++i) { + staticFieldsConstVal[i] = val[i]; + } + } + + void SetFinalStaticStringIDVec(const std::vector &stringIDVec) { + finalStaticStringID = stringIDVec; + } + + void SetIsOnDemandLoad(bool flag) { + isOnDemandLoad = flag; + } + + void ProcessPragma(); + + protected: + MIRStructType *GetContainerImpl() override; + virtual bool PreProcessDeclImpl(); + virtual bool ProcessDeclImpl(); + virtual std::string GetStructNameOrinImpl() const = 0; + virtual std::string GetStructNameMplImpl() const = 0; + virtual std::list GetSuperClassNamesImpl() const = 0; + virtual std::vector GetInterfaceNamesImpl() const = 0; + virtual std::string GetSourceFileNameImpl() const = 0; + virtual std::string GetSrcFileNameImpl() const; + virtual MIRStructType *CreateMIRStructTypeImpl(bool &error) const = 0; + virtual TypeAttrs GetStructAttributeFromInputImpl() const = 0; + virtual uint64 GetRawAccessFlagsImpl() const = 0; + virtual GStrIdx GetIRSrcFileSigIdxImpl() const = 0; + virtual bool IsMultiDefImpl() const = 0; + virtual void InitFieldHelpersImpl() = 0; + virtual void InitMethodHelpersImpl() = 0; + void CreateSymbol(); + void ProcessDeclSuperClass(); + void ProcessDeclSuperClassForJava(); + void ProcessDeclImplements(); + void ProcessDeclDefInfo(); + void ProcessDeclDefInfoSuperNameForJava(); + void ProcessDeclDefInfoImplementNameForJava(); + void ProcessFieldDef(); + void ProcessExtraFields(); + void ProcessMethodDef(); + void ProcessStaticFields(); + + MapleAllocator &allocator; + MIRStructType *mirStructType; + MIRSymbol *mirSymbol; + MapleList fieldHelpers; + MapleList methodHelpers; + FEInputPragmaHelper *pragmaHelper; + MapleVector staticFieldsConstVal; + std::vector finalStaticStringID; + bool isSkipped; + MIRSrcLang srcLang; + bool isOnDemandLoad = false; +}; + +class FEInputHelper { + public: + explicit FEInputHelper(MapleAllocator &allocatorIn) + : allocator(allocatorIn), + fieldHelpers(allocator.Adapter()), + methodHelpers(allocator.Adapter()), + structHelpers(allocator.Adapter()) {} + ~FEInputHelper() = default; + bool PreProcessDecl(); + bool ProcessDecl(); + bool ProcessImpl() const; + void RegisterFieldHelper(FEInputFieldHelper &helper) { + fieldHelpers.push_back(&helper); + } + + void RegisterMethodHelper(FEInputMethodHelper &helper) { + methodHelpers.push_back(&helper); + } + + void RegisterStructHelper(FEInputStructHelper &helper) { + structHelpers.push_back(&helper); + } + + private: + MapleAllocator &allocator; + MapleList fieldHelpers; + MapleList methodHelpers; + MapleList structHelpers; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FE_INPUT_HELPER_H diff --git a/src/hir2mpl/common/include/fe_java_string_manager.h b/src/hir2mpl/common/include/fe_java_string_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..870c65bdb46baa7d94b4bc699647f211c1e04054 --- /dev/null +++ b/src/hir2mpl/common/include/fe_java_string_manager.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_JAVA_STRING_MANAGER_H +#define HIR2MPL_INCLUDE_COMMON_FE_JAVA_STRING_MANAGER_H +#include +#include +#include +#include "mir_module.h" +#include "mir_builder.h" + +namespace maple { +class FEJavaStringManager { + public: + FEJavaStringManager(MIRModule &argModule, MIRBuilder &mirBuilderIn); + ~FEJavaStringManager(); + void ClearStringMetaClassSymbolExternFlag(); + // profiling + void LoadProfilingData(const std::string &profileFileName); + MIRSymbol *GetLiteralPtrVar(const MIRSymbol *var) const; + MIRSymbol *GetLiteralPtrVar(const std::string &str) const; + MIRSymbol *GetLiteralPtrVar(const std::u16string &strU16) const; + // methods for string + MIRSymbol *CreateLiteralVar(MIRBuilder &mirBuilderIn, const std::string &str, bool isFieldValue); + MIRSymbol *CreateLiteralVar(MIRBuilder &mirBuilderIn, const std::u16string &strU16, bool isFieldValue); + MIRSymbol *GetLiteralVar(const std::string &str) const; + MIRSymbol *GetLiteralVar(const std::u16string &strU16) const; + static std::string GetLiteralGlobalName(const std::u16string &strU16); + static bool IsAllASCII(const std::u16string &strU16); + void GenStringMetaClassVar(); + + private: + using DWBuffer = struct { + uint64_t data; + uint32_t pos; + }; + + MIRArrayType *ConstructArrayType4Str(const std::u16string &strU16, bool compressible) const; + MIRAggConst *CreateByteArrayConst(const std::u16string &strU16, MIRArrayType &byteArrayType, bool compressible) const; + static std::vector SwapBytes(const std::u16string &strU16); + static uint16 ExchangeBytesPosition(uint16 input); + template + static void AddDataIntoByteArray(MIRAggConst &newConst, MemPool &mp, DWBuffer &buf, T data, MIRType &uInt64); + static void FinishByteArray(MIRAggConst &newConst, MemPool &mp, DWBuffer &buf, MIRType &uInt64); + + MIRModule &module; + MIRBuilder &mirBuilder; + bool useCompressedJavaString = true; + std::unordered_set preloadSet; + std::unordered_set literalSet; + std::unordered_set fieldValueSet; + std::map literalMap; + MIRType *typeString = nullptr; + MIRSymbol *stringMetaClassSymbol = nullptr; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FE_JAVA_STRING_MANAGER_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/fe_macros.h b/src/hir2mpl/common/include/fe_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..c607f79fd4ec0fabebcc2948845c4508ff833eec --- /dev/null +++ b/src/hir2mpl/common/include/fe_macros.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FE_MACROS_H +#define HIR2MPL_INCLUDE_FE_MACROS_H +#include +#include +#include +#include +#include "fe_options.h" +#include "mpl_logging.h" + +#define TIMEIT(message, stmt) do { \ + if (FEOptions::GetInstance().IsDumpTime()) { \ + struct timeval start, end; \ + (void)gettimeofday(&start, NULL); \ + stmt; \ + (void)gettimeofday(&end, NULL); \ + float t = ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)) * 1.0 / 1000000.0; \ + INFO(kLncInfo, "[TIME] %s: %.3f sec", message, t); \ + } else { \ + stmt; \ + } \ +} while (0) \ + +#define PHASE_TIMER(message, stmt) do { \ + if (FEOptions::GetInstance().IsDumpPhaseTime()) { \ + struct timespec start = {0, 0}; \ + struct timespec end = {0, 0}; \ + (void)clock_gettime(CLOCK_REALTIME, &start); \ + stmt; \ + (void)clock_gettime(CLOCK_REALTIME, &end); \ + constexpr int64_t nsInS = 1000000000; \ + int64_t t = (end.tv_sec - start.tv_sec) * nsInS + (end.tv_nsec - start.tv_nsec); \ + INFO(kLncInfo, "[PhaseTime] %s: %lld ns", message, t); \ + } else { \ + stmt; \ + } \ +} while (0) \ + +#define FE_INFO_LEVEL(level, fmt, ...) do { \ + if (FEOptions::GetInstance().GetDumpLevel() >= level) { \ + INFO(kLncInfo, fmt, ##__VA_ARGS__); \ + } \ +} while (0) \ + +#define BIT_XOR(v1, v2) (((v1) && (!(v2))) || ((!(v1)) && (v2))) + +#define TRY_DO(run) do { \ + CHECK_FATAL(run, "%s", #run); \ +} while (0) + +// for user +#define FE_ERR(num, loc, fmt, ...) do { \ + if (PRINT_LEVEL_USER <= kLlErr) { \ + std::ostringstream ss; \ + ss << FEManager::GetModule().GetFileNameFromFileNum(loc.fileIdx) << ":" << loc.line << " error: " << fmt; \ + logInfo.InsertUserErrorMessage( \ + loc, logInfo.EmitLogToStringForUser(num, kLlErr, ss.str().c_str(), ##__VA_ARGS__)); \ + } \ +} while (0) + +#define FE_WARN(num, loc, fmt, ...) do { \ + if (PRINT_LEVEL_USER <= kLlWarn) { \ + std::ostringstream ss; \ + ss << FEManager::GetModule().GetFileNameFromFileNum(loc.fileIdx) << ":" << loc.line << " warning: " << fmt; \ + logInfo.InsertUserWarnMessage( \ + loc, logInfo.EmitLogToStringForUser(num, kLlWarn, ss.str().c_str(), ##__VA_ARGS__)); \ + } \ +} while (0) +#endif // ~HIR2MPL_INCLUDE_FE_MACROS_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/fe_manager.h b/src/hir2mpl/common/include/fe_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..3ca99cecc49af3490beade5533e018cf14cdc338 --- /dev/null +++ b/src/hir2mpl/common/include/fe_manager.h @@ -0,0 +1,159 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_MANAGER_H +#define HIR2MPL_INCLUDE_COMMON_FE_MANAGER_H +#include +#include "mir_module.h" +#include "mir_builder.h" +#include "fe_type_manager.h" +#include "fe_java_string_manager.h" +#include "fe_function.h" + +namespace maple { +class FEManager { + public: + static FEManager &GetManager() { + ASSERT(manager != nullptr, "manager is not initialize"); + return *manager; + } + + static FETypeManager &GetTypeManager() { + ASSERT(manager != nullptr, "manager is not initialize"); + return manager->typeManager; + } + + static FEJavaStringManager &GetJavaStringManager() { + ASSERT(manager != nullptr, "manager is not initialize"); + return manager->javaStringManager; + } + + static MIRBuilder &GetMIRBuilder() { + ASSERT(manager != nullptr, "manager is not initialize"); + return manager->builder; + } + + static MIRModule &GetModule() { + ASSERT(manager != nullptr, "manager is not initialize"); + return manager->module; + } + + static void SetCurrentFEFunction(FEFunction &func) { + ASSERT(manager != nullptr, "manager is not initialize"); + manager->curFEFunction = &func; + } + + static FEFunction &GetCurrentFEFunction() { + ASSERT(manager != nullptr, "manager is not initialize"); + CHECK_NULL_FATAL(manager->curFEFunction); + return *manager->curFEFunction; + } + + static void Init(MIRModule &moduleIn) { + manager = new FEManager(moduleIn); + } + + static void Release() { + if (manager != nullptr) { + manager->typeManager.ReleaseMemPool(); + delete manager; + manager = nullptr; + } + } + + StructElemNameIdx *GetFieldStructElemNameIdx(uint64 index) { + auto it = mapFieldStructElemNameIdx.find(index); + if (it != mapFieldStructElemNameIdx.end()) { + return it->second; + } + return nullptr; + } + + void SetFieldStructElemNameIdx(uint64 index, StructElemNameIdx &structElemNameIdx) { + std::lock_guard lk(feManagerMapStructElemNameIdxMtx); + mapFieldStructElemNameIdx[index] = &structElemNameIdx; + } + + StructElemNameIdx *GetMethodStructElemNameIdx(uint64 index) { + auto it = mapMethodStructElemNameIdx.find(index); + if (it != mapMethodStructElemNameIdx.end()) { + return it->second; + } + return nullptr; + } + + void SetMethodStructElemNameIdx(uint64 index, StructElemNameIdx &structElemNameIdx) { + std::lock_guard lk(feManagerMapStructElemNameIdxMtx); + mapMethodStructElemNameIdx[index] = &structElemNameIdx; + } + + MemPool *GetStructElemMempool() { + return structElemMempool; + } + + void ReleaseStructElemMempool() { + FEUtils::DeleteMempoolPtr(structElemMempool); + } + + uint32 RegisterSourceFileIdx(const GStrIdx &strIdx) { + const auto it = sourceFileIdxMap.find(strIdx); + if (it != sourceFileIdxMap.end()) { + return it->second; + } else { + // make src files start from #2, #1 is mpl file + size_t num = sourceFileIdxMap.size() + 2; + (void)sourceFileIdxMap.emplace(strIdx, num); +#ifdef DEBUG + idxSourceFileMap.emplace(num, strIdx); +#endif + module.PushbackFileInfo(MIRInfoPair(strIdx, num)); + return static_cast(num); + } + } + + std::string GetSourceFileNameFromIdx(uint32 idx) const { + auto it = idxSourceFileMap.find(idx); + if (it != idxSourceFileMap.end()) { + return GlobalTables::GetStrTable().GetStringFromStrIdx(it->second); + } + return "unknown"; + } + + private: + static FEManager *manager; + MIRModule &module; + FETypeManager typeManager; + MIRBuilder builder; + FEFunction *curFEFunction = nullptr; + FEJavaStringManager javaStringManager; + MemPool *structElemMempool; + MapleAllocator structElemAllocator; + std::unordered_map mapFieldStructElemNameIdx; + std::unordered_map mapMethodStructElemNameIdx; + std::map sourceFileIdxMap; + std::map idxSourceFileMap; + explicit FEManager(MIRModule &moduleIn) + : module(moduleIn), + typeManager(module), + builder(&module), + javaStringManager(moduleIn, builder), + structElemMempool(FEUtils::NewMempool("MemPool for StructElemNameIdx", false /* isLcalPool */)), + structElemAllocator(structElemMempool) {} + ~FEManager() { + structElemMempool = nullptr; + } + mutable std::mutex feManagerMapStructElemNameIdxMtx; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FE_MANAGER_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/fe_options.h b/src/hir2mpl/common/include/fe_options.h new file mode 100644 index 0000000000000000000000000000000000000000..d115838d32ead18bc749980137f21384201710eb --- /dev/null +++ b/src/hir2mpl/common/include/fe_options.h @@ -0,0 +1,581 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_OPTIONS_H +#define HIR2MPL_INCLUDE_COMMON_FE_OPTIONS_H +#include +#include +#include +#include +#include "mpl_logging.h" +#include "types_def.h" + +namespace maple { +class FEOptions { + public: + static const int kDumpLevelDisable = 0; + static const int kDumpLevelInfo = 1; + static const int kDumpLevelInfoDetail = 2; + static const int kDumpLevelInfoDebug = 3; + + enum ModeJavaStaticFieldName { + kNoType = 0, // without type + kAllType, // with type + kSmart // auto anti-proguard + }; + + enum ModeCollectDepTypes { + kAll = 0, // collect all dependent types + kFunc // collect func dependent types + }; + + enum ModeDepSameNamePolicy { + kSys = 0, // load type form sys when on-demand load same name type + kSrc // load type form src when on-demand load same name type + }; + + enum TypeInferKind { + kNo = 0, + kRoahAlgorithm, + kLinearScan + }; + + static FEOptions &GetInstance() { + return options; + } + + void Init() { + isJBCUseImpreciseType = true; + } + + // input control options + void AddInputClassFile(const std::string &fileName); + const std::list &GetInputClassFiles() const { + return inputClassFiles; + } + + void AddInputJarFile(const std::string &fileName); + const std::list &GetInputJarFiles() const { + return inputJarFiles; + } + + void AddInputDexFile(const std::string &fileName); + const std::vector &GetInputDexFiles() const { + return inputDexFiles; + } + + void AddInputASTFile(const std::string &fileName); + const std::vector &GetInputASTFiles() const { + return inputASTFiles; + } + + void AddInputMASTFile(const std::string &fileName); + const std::vector &GetInputMASTFiles() const { + return inputMASTFiles; + } + + void AddInputMpltFileFromSys(const std::string &fileName) { + inputMpltFilesFromSys.push_back(fileName); + } + + const std::list &GetInputMpltFilesFromSys() const { + return inputMpltFilesFromSys; + } + + void AddInputMpltFileFromApk(const std::string &fileName) { + inputMpltFilesFromApk.push_back(fileName); + } + + const std::list &GetInputMpltFilesFromApk() const { + return inputMpltFilesFromApk; + } + + void AddInputMpltFile(const std::string &fileName) { + inputMpltFiles.push_back(fileName); + } + + const std::list &GetInputMpltFiles() const { + return inputMpltFiles; + } + + // On Demand Type Creation + void SetXBootClassPath(const std::string &fileName) { + strXBootClassPath = fileName; + } + + const std::string &GetXBootClassPath() const { + return strXBootClassPath; + } + + void SetClassLoaderContext(const std::string &fileName) { + strClassLoaderContext = fileName; + } + + const std::string &GetClassLoaderContext() const { + return strClassLoaderContext; + } + + void SetCompileFileName(const std::string &fileName) { + strCompileFileName = fileName; + } + + const std::string &GetCompileFileName() const { + return strCompileFileName; + } + + void SetModeCollectDepTypes(ModeCollectDepTypes mode) { + modeCollectDepTypes = mode; + } + + ModeCollectDepTypes GetModeCollectDepTypes() const { + return modeCollectDepTypes; + } + + void SetModeDepSameNamePolicy(ModeDepSameNamePolicy mode) { + modeDepSameNamePolicy = mode; + } + + ModeDepSameNamePolicy GetModeDepSameNamePolicy() const { + return modeDepSameNamePolicy; + } + + // output control options + void SetIsGenMpltOnly(bool flag) { + isGenMpltOnly = flag; + } + + bool IsGenMpltOnly() const { + return isGenMpltOnly; + } + + void SetIsGenAsciiMplt(bool flag) { + isGenAsciiMplt = flag; + } + + bool IsGenAsciiMplt() const { + return isGenAsciiMplt; + } + + void SetOutputPath(const std::string &path) { + outputPath = path; + } + + const std::string &GetOutputPath() const { + return outputPath; + } + + void SetOutputName(const std::string &name) { + outputName = name; + } + + const std::string &GetOutputName() const { + return outputName; + } + + void EnableDumpInstComment() { + isDumpInstComment = true; + } + + void DisableDumpInstComment() { + isDumpInstComment = false; + } + + bool IsDumpInstComment() const { + return isDumpInstComment; + } + + void SetNoMplFile() { + isNoMplFile = true; + } + + bool IsNoMplFile() const { + return isNoMplFile; + } + + // debug info control options + void SetDumpLevel(int level) { + dumpLevel = level; + } + + int GetDumpLevel() const { + return dumpLevel; + } + + void SetIsDumpTime(bool flag) { + isDumpTime = flag; + } + + bool IsDumpTime() const { + return isDumpTime; + } + + void SetIsDumpComment(bool flag) { + isDumpComment = flag; + } + + bool IsDumpComment() const { + return isDumpComment; + } + + void SetIsDumpLOC(bool flag) { + isDumpLOC = flag; + } + + bool IsDumpLOC() const { + return isDumpLOC; + } + + void SetDbgFriendly(bool flag) { + isDbgFriendly = flag; + // set isDumpLOC if flag is true + isDumpLOC = flag ? flag : isDumpLOC; + } + + bool IsDbgFriendly() const { + return isDbgFriendly; + } + + void SetIsDumpPhaseTime(bool flag) { + isDumpPhaseTime = flag; + } + + bool IsDumpPhaseTime() const { + return isDumpPhaseTime; + } + + void SetIsDumpPhaseTimeDetail(bool flag) { + isDumpPhaseTimeDetail = flag; + } + + bool IsDumpPhaseTimeDetail() const { + return isDumpPhaseTimeDetail; + } + + // java compiler options + void SetModeJavaStaticFieldName(ModeJavaStaticFieldName mode) { + modeJavaStaticField = mode; + } + + ModeJavaStaticFieldName GetModeJavaStaticFieldName() const { + return modeJavaStaticField; + } + + void SetIsJBCUseImpreciseType(bool flag) { + isJBCUseImpreciseType = flag; + } + + bool IsJBCUseImpreciseType() const { + return isJBCUseImpreciseType; + } + + void SetIsJBCInfoUsePathName(bool flag) { + isJBCInfoUsePathName = flag; + } + + bool IsJBCInfoUsePathName() const { + return isJBCInfoUsePathName; + } + + void SetIsDumpJBCStmt(bool flag) { + isDumpJBCStmt = flag; + } + + bool IsDumpJBCStmt() const { + return isDumpJBCStmt; + } + + void SetIsDumpFEIRBB(bool flag) { + isDumpBB = flag; + } + + bool IsDumpFEIRBB() const { + return isDumpBB; + } + + void AddFuncNameForDumpCFGGraph(const std::string &funcName) { + funcNamesForDumpCFGGraph.insert(funcName); + } + + bool IsDumpFEIRCFGGraph(const std::string &funcName) const { + return funcNamesForDumpCFGGraph.find(funcName) != + funcNamesForDumpCFGGraph.end(); + } + + void SetIsDumpJBCAll(bool flag) { + isDumpJBCAll = flag; + } + + bool IsDumpJBCAll() const { + return isDumpJBCAll; + } + + void SetIsDumpJBCErrorOnly(bool flag) { + isDumpJBCErrorOnly = flag; + } + + bool IsDumpJBCErrorOnly() const { + return isDumpJBCErrorOnly; + } + + void SetIsEmitJBCLocalVarInfo(bool flag) { + isEmitJBCLocalVarInfo = flag; + } + + bool IsEmitJBCLocalVarInfo() const { + return isEmitJBCLocalVarInfo; + } + + // parallel + void SetNThreads(uint32 n) { + nthreads = n; + } + + uint32 GetNThreads() const { + return nthreads; + } + + void SetDumpThreadTime(bool arg) { + dumpThreadTime = arg; + } + + bool IsDumpThreadTime() const { + return dumpThreadTime; + } + + void AddDumpJBCFuncName(const std::string &funcName) { + if (!funcName.empty()) { + CHECK_FATAL(dumpJBCFuncNames.insert(funcName).second, "dumpJBCFuncNames insert failed"); + } + } + + const std::set &GetDumpJBCFuncNames() const { + return dumpJBCFuncNames; + } + + bool IsDumpJBCFuncName(const std::string &funcName) const { + return dumpJBCFuncNames.find(funcName) != dumpJBCFuncNames.end(); + } + + void SetTypeInferKind(TypeInferKind arg) { + typeInferKind = arg; + } + + TypeInferKind GetTypeInferKind() const { + return typeInferKind; + } + + bool IsRC() const { + return isRC; + } + + void SetRC(bool arg) { + isRC = arg; + } + + bool IsNoBarrier() const { + return isNoBarrier; + } + + void SetNoBarrier(bool arg) { + isNoBarrier = arg; + } + + bool HasJBC() const { + return ((inputClassFiles.size() != 0) || (inputJarFiles.size() != 0)); + } + + void SetIsAOT(bool flag) { + isAOT = flag; + } + + bool IsAOT() const { + return isAOT; + } + + void SetUseSignedChar(bool flag) { + useSignedChar = flag; + } + + bool IsUseSignedChar() const { + return useSignedChar; + } + + void SetBigEndian(bool flag) { + isBigEndian = flag; + } + + bool IsBigEndian() const { + return isBigEndian; + } + + void SetNpeCheckDynamic(bool flag) { + isNpeCheckDynamic = flag; + } + + bool IsNpeCheckDynamic() const { + return isNpeCheckDynamic; + } + + void SetBoundaryCheckDynamic(bool flag) { + isBoundaryCheckDynamic = flag; + } + + bool IsBoundaryCheckDynamic() const { + return isBoundaryCheckDynamic; + } + + void SetSafeRegion(bool flag) { + isEnableSafeRegion = flag; + } + + bool IsEnableSafeRegion() const { + return isEnableSafeRegion; + } + + void SetDefaultSafe(bool flag) { + isDefaultSafe = flag; + } + + bool IsDefaultSafe() const { + if (IsEnableSafeRegion()) { + return isDefaultSafe; + } + return false; + } + + void SetO2(bool flag) { + isO2 = flag; + } + + bool IsO2() const { + return isO2; + } + + void SetSimplifyShortCircuit(bool flag) { + isSimplifyShortCircuit = flag; + } + + bool IsSimplifyShortCircuit() const { + return isSimplifyShortCircuit; + } + + void SetEnableVariableArray(bool flag) { + isEnableVariableArray = flag; + } + + bool IsEnableVariableArray() const { + return isEnableVariableArray; + } + + void SetFuncInlineSize(uint32 size) { + funcInlineSize = size; + } + + uint32 GetFuncInlineSize() const { + return funcInlineSize; + } + + void SetWPAA(bool flag) { + wpaa = flag; + } + + bool GetWPAA() const { + return wpaa; + } + + private: + static FEOptions options; + // input control options + std::list inputClassFiles; + std::list inputJarFiles; + std::vector inputDexFiles; + std::vector inputASTFiles; + std::vector inputMASTFiles; + std::list inputMpltFilesFromSys; + std::list inputMpltFilesFromApk; + std::list inputMpltFiles; + + // On Demand Type Creation + std::string strXBootClassPath; + std::string strClassLoaderContext; + std::string strCompileFileName; + ModeCollectDepTypes modeCollectDepTypes = ModeCollectDepTypes::kFunc; + ModeDepSameNamePolicy modeDepSameNamePolicy = ModeDepSameNamePolicy::kSys; + + // output control options + bool isGenMpltOnly; + bool isGenAsciiMplt; + std::string outputPath; + std::string outputName; + bool isDumpInstComment = false; + bool isNoMplFile = false; + + // debug info control options + int dumpLevel; + bool isDumpTime; + bool isDumpComment = false; + bool isDumpLOC = true; + bool isDbgFriendly = false; + bool isDumpPhaseTime = false; + bool isDumpPhaseTimeDetail = false; + + // java compiler options + ModeJavaStaticFieldName modeJavaStaticField = ModeJavaStaticFieldName::kNoType; + bool isJBCUseImpreciseType = false; + bool isJBCInfoUsePathName = false; + bool isDumpJBCStmt = false; + bool isDumpJBCAll = false; + bool isDumpJBCErrorOnly = false; + std::set dumpJBCFuncNames; + bool isEmitJBCLocalVarInfo = false; + + // bc compiler options + bool isRC = false; + bool isNoBarrier = false; + bool isO2 = false; + bool isSimplifyShortCircuit = false; + bool isEnableVariableArray = false; + + // ast compiler options + bool useSignedChar = false; + bool isBigEndian = false; + + // general stmt/bb/cfg debug options + bool isDumpBB = false; + std::set funcNamesForDumpCFGGraph; + + // parallel + uint32 nthreads; + bool dumpThreadTime; + + // type-infer + TypeInferKind typeInferKind = kLinearScan; + + // symbol resolve + bool isAOT = false; + + // EnhanceC + bool isNpeCheckDynamic = false; + bool isBoundaryCheckDynamic = false; + bool isEnableSafeRegion = false; + bool isDefaultSafe = false; + + uint32 funcInlineSize = 0; + bool wpaa = false; + + FEOptions(); + ~FEOptions() = default; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FE_OPTIONS_H diff --git a/src/hir2mpl/common/include/fe_struct_elem_info.h b/src/hir2mpl/common/include/fe_struct_elem_info.h new file mode 100644 index 0000000000000000000000000000000000000000..98130125bf7be4f71acfb1b953f26f8b5c01a008 --- /dev/null +++ b/src/hir2mpl/common/include/fe_struct_elem_info.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_STRUCT_ELEM_INFO_H +#define HIR2MPL_INCLUDE_COMMON_FE_STRUCT_ELEM_INFO_H +#include +#include +#include "global_tables.h" +#include "fe_configs.h" +#include "feir_type.h" + +namespace maple { +struct StructElemNameIdx { + GStrIdx klass; + GStrIdx elem; + GStrIdx type; + GStrIdx full; + + StructElemNameIdx(const GStrIdx &argKlass, const GStrIdx &argElem, const GStrIdx &argType, const GStrIdx &argFull) + : klass(argKlass), elem(argElem), type(argType), full(argFull) {} + + StructElemNameIdx(const std::string &klassStr, const std::string &elemStr, const std::string &typeStr) { + const std::string &fullName = klassStr + "|" + elemStr + "|" + typeStr; + klass = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(klassStr)); + elem = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(elemStr)); + type = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(typeStr)); + full = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(fullName)); + } + + explicit StructElemNameIdx(const std::string &funcStr) { + klass = GStrIdx(0); + elem = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcStr); + type = GStrIdx(0); + full = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcStr); + } + ~StructElemNameIdx() = default; +}; + +class FEStructElemInfo { + public: + FEStructElemInfo(MapleAllocator &allocatorIn, const StructElemNameIdx &argStructElemNameIdx, + MIRSrcLang argSrcLang, bool argIsStatic); + virtual ~FEStructElemInfo() = default; + + void Prepare(MIRBuilder &mirBuilder, bool argIsStatic) { + PrepareImpl(mirBuilder, argIsStatic); + } + + const std::string &GetStructName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx.klass); + } + + const std::string &GetElemName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx.elem); + } + + const GStrIdx &GetElemNameIdx() const { + return structElemNameIdx.elem; + } + + const std::string &GetSignatureName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx.type); + } + + bool IsStatic() const { + return isStatic; + } + + bool IsMethod() const { + return isMethod; + } + + bool IsDefined() const { + return isDefined; + } + + void SetDefined() { + isDefined = true; + } + + void SetUndefined() { + isDefined = false; + } + + bool IsFromDex() const { + return isFromDex; + } + + void SetFromDex() { + isFromDex = true; + } + + MIRSrcLang GetSrcLang() const { + return srcLang; + } + + void SetSrcLang(MIRSrcLang lang) { + srcLang = lang; + } + + UniqueFEIRType GetActualContainerType() const; + const std::string GetActualContainerName() const { + return actualContainer.c_str(); + } + + LLT_PROTECTED: + virtual void PrepareImpl(MIRBuilder &mirBuilder, bool argIsStatic) = 0; + + bool isStatic : 1; + bool isMethod : 1; + bool isDefined : 1; + bool isFromDex : 1; + bool isPrepared : 1; + MIRSrcLang srcLang : 8; + MapleAllocator &allocator; + StructElemNameIdx structElemNameIdx; + MapleString actualContainer; // in maple format +}; + +using UniqueFEStructElemInfo = std::unique_ptr; + +class FEStructFieldInfo : public FEStructElemInfo { + public: + FEStructFieldInfo(MapleAllocator &allocatorIn, const StructElemNameIdx &argStructElemNameIdx, + MIRSrcLang argSrcLang, bool argIsStatic); + ~FEStructFieldInfo() override { + fieldType = nullptr; + } + GStrIdx GetFieldNameIdx() const { + return fieldNameIdx; + } + + FieldID GetFieldID() const { + return fieldID; + } + + void SetFieldID(const FieldID argFieldID) { + fieldID = argFieldID; + } + + const FEIRType *GetType() const { + return fieldType; + } + + bool IsVolatile() const { + return isVolatile; + } + + LLT_PROTECTED: + void PrepareImpl(MIRBuilder &mirBuilder, bool argIsStatic) override; + + LLT_PRIVATE: + void LoadFieldType(); + void LoadFieldTypeJava(); + void PrepareStaticField(const MIRStructType &structType); + void PrepareNonStaticField(MIRBuilder &mirBuilder); + bool SearchStructFieldJava(MIRStructType &structType, MIRBuilder &mirBuilder, bool argIsStatic, + bool allowPrivate = true); + bool SearchStructFieldJava(const TyIdx &tyIdx, MIRBuilder &mirBuilder, bool argIsStatic, bool allowPrivate = true); + bool CompareFieldType(const FieldPair &fieldPair) const; + + FEIRType *fieldType; + GStrIdx fieldNameIdx; + FieldID fieldID; + bool isVolatile; +}; + +class FEStructMethodInfo : public FEStructElemInfo { + public: + FEStructMethodInfo(MapleAllocator &allocatorIn, const StructElemNameIdx &argStructElemNameIdx, + MIRSrcLang argSrcLang, bool argIsStatic); + ~FEStructMethodInfo() override; + PUIdx GetPuIdx() const; + bool IsConstructor() const { + return isConstructor; + } + + bool IsReturnVoid() const { + return isReturnVoid; + } + + void SetReturnVoid() { + isReturnVoid = true; + } + + bool IsJavaPolymorphicCall() const { + return isJavaPolymorphicCall; + } + + bool IsJavaDynamicCall() const { + return isJavaDynamicCall; + } + + void SetJavaDyamicCall() { + isJavaDynamicCall = true; + } + + void UnsetJavaDynamicCall() { + isJavaDynamicCall = false; + } + + const FEIRType *GetReturnType() const { + return retType; + } + + void SetReturnType(FEIRType *type) { + retType = type; + } + + const FEIRType *GetOwnerType() const { + return ownerType; + } + + const MapleVector &GetArgTypes() const { + return argTypes; + } + + void SetArgTypes(const MapleVector &argTypesIn) { + argTypes = argTypesIn; + } + + void SetArgsName(const std::vector &names) { + argNames = names; + } + + const std::vector &GetArgsName() const { + return argNames; + } + + void SetFuncAttrs(const FuncAttrs &attrs) { + funcAttrs = attrs; + } + + const FuncAttrs &GetFuncAttrs() const { + return funcAttrs; + } + + MIRFunction *GetMirFunc() const { + CHECK_NULL_FATAL(mirFunc); + return mirFunc; + } + + LLT_PROTECTED: + void PrepareImpl(MIRBuilder &mirBuilder, bool argIsStatic) override; + + LLT_PRIVATE: + void LoadMethodType(); + void LoadMethodTypeJava(); + void PrepareMethod(); + void PrepareMethodC(); + void PrepareImplJava(MIRBuilder &mirBuilder, bool argIsStatic); + bool SearchStructMethodJava(MIRStructType &structType, MIRBuilder &mirBuilder, bool argIsStatic, + bool allowPrivate = true); + bool SearchStructMethodJavaInParent(MIRStructType &structType, MIRBuilder &mirBuilder, bool argIsStatic); + bool SearchStructMethodJava(const TyIdx &tyIdx, MIRBuilder &mirBuilder, bool argIsStatic, bool allowPrivate = true); + + bool isReturnVoid; + bool isConstructor; + bool isJavaPolymorphicCall; + bool isJavaDynamicCall; + GStrIdx methodNameIdx; + FEIRType *retType; + FEIRType *ownerType; + MIRFunction *mirFunc; + MapleVector argTypes; + std::vector argNames; + FuncAttrs funcAttrs; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FE_STRUCT_ELEM_INFO_H diff --git a/src/hir2mpl/common/include/fe_timer.h b/src/hir2mpl/common/include/fe_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..7ff67103e1047e442d7fd697f83f382494242f70 --- /dev/null +++ b/src/hir2mpl/common/include/fe_timer.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_TIMER_H +#define HIR2MPL_INCLUDE_COMMON_FE_TIMER_H +#include +#include +#include "mpl_timer.h" +#include "mpl_logging.h" +#include "fe_options.h" + +namespace maple { +class FETimer { + public: + FETimer() = default; + ~FETimer() = default; + void Start() { + if (!FEOptions::GetInstance().IsDumpTime()) { + return; + } + timer.Start(); + } + + void StartAndDump(const std::string &message) { + if (!FEOptions::GetInstance().IsDumpTime()) { + return; + } + CHECK_FATAL(!message.empty(), "message is empty"); + timer.Start(); + INFO(kLncInfo, "[Time] %s: starting...", message.c_str()); + } + + void StopAndDumpTimeMS(const std::string &message) { + if (!FEOptions::GetInstance().IsDumpTime()) { + return; + } + CHECK_FATAL(!message.empty(), "message is empty"); + timer.Stop(); + INFO(kLncInfo, "[Time] %s: finished (%ld ms)", message.c_str(), timer.ElapsedMilliseconds()); + } + + void StopAndDumpTimeS(const std::string &message) { + if (!FEOptions::GetInstance().IsDumpTime()) { + return; + } + CHECK_FATAL(!message.empty(), "message is empty"); + timer.Stop(); + INFO(kLncInfo, "[Time] %s: finished (%lf s)", message.c_str(), timer.ElapsedMilliseconds() / 1000.0); + } + + private: + MPLTimer timer; +}; // class FETimer +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FE_TIMER_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/fe_timer_ns.h b/src/hir2mpl/common/include/fe_timer_ns.h new file mode 100644 index 0000000000000000000000000000000000000000..f8a73d19888eb7ad8de097a5b79b87ee7cf6d7c8 --- /dev/null +++ b/src/hir2mpl/common/include/fe_timer_ns.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_TIMER_NS_H +#define HIR2MPL_INCLUDE_COMMON_FE_TIMER_NS_H +#include +#include + +namespace maple { +class FETimerNS { + public: + FETimerNS() = default; + ~FETimerNS() = default; + void Start(); + void Stop(); + int64_t GetTimeNS() const; + + private: + struct timespec timeStart = {0, 0}; + struct timespec timeEnd = {0, 0}; +}; // class FETimerNS +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FE_TIMER_NS_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/fe_type_hierarchy.h b/src/hir2mpl/common/include/fe_type_hierarchy.h new file mode 100644 index 0000000000000000000000000000000000000000..2c1899fd3ede2ccf6f59279c9e48839e2c4c2144 --- /dev/null +++ b/src/hir2mpl/common/include/fe_type_hierarchy.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FE_TYPE_HIERARCHY_H +#define HIR2MPL_INCLUDE_FE_TYPE_HIERARCHY_H +#include +#include +#include "mir_type.h" + +namespace maple { +class FETypeHierarchy { + public: + static FETypeHierarchy &GetInstance() { + return instance; + } + void InitByGlobalTable(); + void AddMIRType(const MIRClassType &type); + void AddMIRType(const MIRInterfaceType &type); + bool IsParentOf(const GStrIdx &parentIdx, const GStrIdx &childIdx); + + private: + static FETypeHierarchy instance; + std::map> mapIdxChildParent; + std::set> cache; + FETypeHierarchy() = default; + ~FETypeHierarchy() = default; + void AddParentChildRelation(const GStrIdx &parentIdx, const GStrIdx &childIdx); +}; // class FETypeHierarchy +} // namespace maple +#endif // HIR2MPL_INCLUDE_FE_TYPE_HIERARCHY_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/fe_type_manager.h b/src/hir2mpl/common/include/fe_type_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..3d68a6c089b7a761cce9c688fec9ca2ce47d9c73 --- /dev/null +++ b/src/hir2mpl/common/include/fe_type_manager.h @@ -0,0 +1,341 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FE_TYPE_MANAGER_H +#define HIR2MPL_INCLUDE_COMMON_FE_TYPE_MANAGER_H +#include +#include +#include +#include "mempool_allocator.h" +#include "mir_module.h" +#include "mir_type.h" +#include "mir_builder.h" +#include "feir_type.h" +#include "fe_struct_elem_info.h" +#include "fe_utils.h" +#include "feir_stmt.h" + +namespace maple { +enum FETypeFlag : uint16 { + kDefault = 0, + kSrcMpltSys = 1, + kSrcInput = 1 << 1, + kSrcMpltApk = 1 << 2, + kSrcMplt = 1 << 3, + kSrcExtern = 1 << 4, + kSrcUnknown = 1 << 5, + kSrcMask = 0x3F, +}; + +class FETypeSameNamePolicy { + public: + FETypeSameNamePolicy() : flag(0) {} + ~FETypeSameNamePolicy() = default; + bool IsUseLastest() const { + return (flag & kFlagUseLastest) != 0; + } + + bool IsFatal() const { + return (flag & kFlagFatal) != 0; + } + + void SetFlag(uint8 flagIn) { + flag = flagIn; + } + + static constexpr uint8 kFlagUseLastest = 0x1; + static constexpr uint8 kFlagFatal = 0x2; + + private: + // bitwise flag + // bit0: 0-use first, 1-use lastest + // bit1: 0-warning, 1-fatal + uint8 flag; +}; + +using FEStructTypePair = std::pair; + +class FETypeManager { + public: + // ---------- prim FEIRType ---------- + const static UniqueFEIRType kPrimFEIRTypeUnknown; + const static UniqueFEIRType kPrimFEIRTypeU1; + const static UniqueFEIRType kPrimFEIRTypeI8; + const static UniqueFEIRType kPrimFEIRTypeU8; + const static UniqueFEIRType kPrimFEIRTypeI16; + const static UniqueFEIRType kPrimFEIRTypeU16; + const static UniqueFEIRType kPrimFEIRTypeI32; + const static UniqueFEIRType kPrimFEIRTypeU32; + const static UniqueFEIRType kPrimFEIRTypeI64; + const static UniqueFEIRType kPrimFEIRTypeU64; + const static UniqueFEIRType kPrimFEIRTypeF32; + const static UniqueFEIRType kPrimFEIRTypeF64; + const static UniqueFEIRType kFEIRTypeJavaObject; + const static UniqueFEIRType kFEIRTypeJavaClass; + const static UniqueFEIRType kFEIRTypeJavaString; + + explicit FETypeManager(MIRModule &moduleIn); + ~FETypeManager(); + void ReleaseMemPool(); + bool LoadMplts(const std::list &mpltNames, FETypeFlag flag, const std::string &phaseName); + bool LoadMplt(const std::string &mpltName, FETypeFlag flag); + void CheckSameNamePolicy() const; + // method for Class or Interface generation + MIRStructType *GetClassOrInterfaceType(const GStrIdx &nameIdx) const; + MIRStructType *GetClassOrInterfaceType(const std::string &name) const { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + return GetClassOrInterfaceType(nameIdx); + } + + FETypeFlag GetClassOrInterfaceTypeFlag(const GStrIdx &nameIdx) const; + FETypeFlag GetClassOrInterfaceTypeFlag(const std::string &name) const { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + return GetClassOrInterfaceTypeFlag(nameIdx); + } + + MIRStructType *CreateClassOrInterfaceType(const GStrIdx &nameIdx, bool isInterface, FETypeFlag typeFlag); + MIRStructType *CreateClassOrInterfaceType(const std::string &name, bool isInterface, FETypeFlag typeFlag) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + return CreateClassOrInterfaceType(nameIdx, isInterface, typeFlag); + } + + MIRStructType *GetOrCreateStructType(const std::string &name); + MIRStructType *CreateStructType(const std::string &name); + MIRTypeByName *CreateTypeByNameType(const std::string &name); + MIRTypeByName *GetOrCreateTypeByNameType(const std::string &name); + MIRTypeByName *GetTypeByNameType(const std::string &name); + MIRTypeByName *GetTypeByNameType(const GStrIdx &nameIdx); + MIRTypeByName *CreateTypedef(const std::string &name, const MIRType &type); + MIREnum *CreateEnum(const std::string &name, PrimType primType); + MIREnum *GetOrCreateEnum(const std::string &name, PrimType primType = PTY_i32); + size_t GetEnumIdx(const std::string &name); + MIRType *GetOrCreateComplexStructType(const MIRType &elemType); + MIRStructType *GetOrCreateClassOrInterfaceType(const GStrIdx &nameIdx, bool isInterface, FETypeFlag typeFlag, + bool &isCreate); + MIRStructType *GetOrCreateClassOrInterfaceType(const std::string &name, bool isInterface, FETypeFlag typeFlag, + bool &isCreate) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + return GetOrCreateClassOrInterfaceType(nameIdx, isInterface, typeFlag, isCreate); + } + + MIRType *GetOrCreateClassOrInterfacePtrType(const GStrIdx &nameIdx, bool isInterface, FETypeFlag typeFlag, + bool &isCreate); + MIRType *GetOrCreateClassOrInterfacePtrType(const std::string &name, bool isInterface, FETypeFlag typeFlag, + bool &isCreate) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + return GetOrCreateClassOrInterfacePtrType(nameIdx, isInterface, typeFlag, isCreate); + } + + uint32 GetTypeIDFromMplClassName(const std::string &mplClassName, int32 dexFileHashCode) const; + MIRStructType *GetStructTypeFromName(const std::string &name); + MIRStructType *GetStructTypeFromName(const GStrIdx &nameIdx); + MIRType *GetOrCreateTypeFromName(const std::string &name, FETypeFlag typeFlag, bool usePtr); + MIRType *GetOrCreatePointerType(const MIRType &type, PrimType ptyPtr = PTY_ref); + MIRType *GetOrCreateArrayType(MIRType &elemType, uint8 dim, PrimType ptyPtr = PTY_ref); + MIRType *GetOrCreateJArrayType(MIRType &elemType, uint8 dim, PrimType ptyPtr = PTY_ref); + void AddClassToModule(const MIRStructType &structType); + + // ---------- methods for StructElemInfo ---------- + // structIdx = 0: global field/function without owner structure + FEStructElemInfo *RegisterStructFieldInfo( + const StructElemNameIdx &argStructElemNameIdx, MIRSrcLang argSrcLang, bool isStatic); + FEStructElemInfo *RegisterStructMethodInfo( + const StructElemNameIdx &argStructElemNameIdx, MIRSrcLang argSrcLang, bool isStatic); + FEStructElemInfo *RegisterStructMethodInfoC( + const std::string &name, MIRSrcLang argSrcLang, bool isStatic); + FEStructElemInfo *GetStructElemInfo(const GStrIdx &fullNameIdx) const; + + // ---------- methods for MIRFunction ---------- + /** + * Function Name: GetMIRFunction + * Description: get MIRFunction by class method name + * 1. return if exist in mpltFuncNameSymbolMap + * 2. return if exist in nameFuncMap + * 3. return null otherwise + * Parallel note: Parallelable + */ + MIRFunction *GetMIRFunction(const std::string &classMethodName, bool isStatic); + MIRFunction *GetMIRFunction(const GStrIdx &nameIdx, bool isStatic); + + /* + * Function Name: CreateMIRFunction + * Description: create MIRFunction by class method name + * 1. call GetMIRFunction first, if exist, return + * 2. create MIRFunction + * Parallel note: Non-parallelable + */ + MIRFunction *CreateFunction(const GStrIdx &nameIdx, const TyIdx &retTypeIdx, const std::vector &argsTypeIdx, + bool isVarg, bool isStatic); + MIRFunction *CreateFunction(const std::string &methodName, const std::string &returnTypeName, + const std::vector &argTypeNames, bool isVarg, bool isStatic); + + // FEIRType GetOrCreate + const FEIRType *GetOrCreateFEIRTypeByName(const std::string &typeName, const GStrIdx &typeNameIdx, + MIRSrcLang argSrcLang = kSrcLangJava); + const FEIRType *GetOrCreateFEIRTypeByName(const GStrIdx &typeNameIdx, MIRSrcLang argSrcLang = kSrcLangJava); + const FEIRType *GetFEIRTypeByName(const std::string &typeName) const; + const FEIRType *GetFEIRTypeByName(const GStrIdx &typeNameIdx) const; + + // MCC function + void InitMCCFunctions(); + MIRFunction *GetMCCFunction(const std::string &funcName) const; + MIRFunction *GetMCCFunction(const GStrIdx &funcNameIdx) const; + PUIdx GetPuIdxForMCCGetOrInsertLiteral() const { + return funcMCCGetOrInsertLiteral->GetPuidx(); + } + + // anti-proguard + bool IsAntiProguardFieldStruct(const GStrIdx &structNameIdx); + + static bool IsStructType(const MIRType &type); + static PrimType GetPrimType(const std::string &name); + static MIRType *GetMIRTypeForPrim(char c); + static MIRType *GetMIRTypeForPrim(const std::string &name) { + if (name.length() != 1) { + return nullptr; + } + return GetMIRTypeForPrim(name[0]); + } + + static std::string GetBaseTypeName(const std::string &name, uint32 &dim, bool inMpl = true); + static std::string GetBaseTypeName(const std::string &name, bool inMpl = true) { + uint32 dim = 0; + return GetBaseTypeName(name, dim, inMpl); + } + static void SetComplete(MIRStructType &structType); + static std::string TypeAttrsToString(const TypeAttrs &attrs); + + bool IsImportedType(const GStrIdx &typeNameIdx) const { + return structNameTypeMap.find(typeNameIdx) != structNameTypeMap.end(); + } + + MIRStructType *GetImportedType(const GStrIdx &typeNameIdx) const { + auto it = structNameTypeMap.find(typeNameIdx); + if (it != structNameTypeMap.end()) { + return it->second.first; + } + return nullptr; + } + + const std::unordered_map &GetStructNameTypeMap() const { + return structNameTypeMap; + } + void MarkExternStructType(); + void SetMirImportedTypes(FETypeFlag flag); + + void SetSrcLang(const MIRSrcLang &argSrcLang) { + srcLang = argSrcLang; + } + + void InsertBoundaryLenExprHashMap(uint32 hash, UniqueFEIRExpr expr) { + (void)boundaryLenExprHashMap.insert(std::make_pair(hash, std::move(expr))); + } + + UniqueFEIRExpr GetBoundaryLenExprFromMap(uint32 hash) { + auto iter = boundaryLenExprHashMap.find(hash); + if (iter != boundaryLenExprHashMap.end()) { + return iter->second->Clone(); + } + return nullptr; + } + + void InsertOwnedNonnullFieldStructSet(const TyIdx &tyidx) { + (void)ownedNonnullFieldStructSet.insert(tyidx); + } + + bool IsOwnedNonnullFieldStructSet(const TyIdx &tyidx) { + auto iter = ownedNonnullFieldStructSet.find(tyidx); + if (iter != ownedNonnullFieldStructSet.end()) { + return true; + } + return false; + } + + private: + void UpdateStructNameTypeMapFromTypeTable(const std::string &mpltName, FETypeFlag flag); + void UpdateNameFuncMapFromTypeTable(); + void UpdateDupTypes(const GStrIdx &nameIdx, bool isInterface, + const std::unordered_map::iterator &importedTypeIt); + + // MCC function + void InitFuncMCCGetOrInsertLiteral(); + using mirFuncPair = std::pair; + + MIRModule &module; + MemPool *mp; + MapleAllocator allocator; + MIRBuilder builder; + // map + std::unordered_map structNameTypeMap; + // map + std::unordered_map structNameSrcMap; + // list> + std::list> structSameNameSrcList; + FETypeSameNamePolicy sameNamePolicy; + MIRSrcLang srcLang; + + // ---------- class name ---> type id map info ---------- + using classNameTypeIDMapT = std::unordered_map; + std::map classNameTypeIDMapAllDex; // dexFileHashCode className classTpyeID + // ---------- struct elem info ---------- + std::map mapStructElemInfo; + + // ---------- function list ---------- + std::unordered_map nameFuncMap; + std::unordered_map nameStaticFuncMap; + std::unordered_map mpltNameFuncMap; + std::unordered_map mpltNameStaticFuncMap; + + // ---------- FEIRType list ---------- + std::unordered_map nameFEIRTypeMap; + std::list nameFEIRTypeList; + + // ---------- MCC function list ---------- + std::unordered_map nameMCCFuncMap; + MIRFunction *funcMCCGetOrInsertLiteral; + + // ---------- ast C ---------- + std::unordered_map nameTypeMap; + std::unordered_map enumNameMap; + + // ---------- Enhance C ---------- + std::unordered_map boundaryLenExprHashMap; + std::unordered_set ownedNonnullFieldStructSet; + + MIRFunction *funcMCCStaticFieldGetBool = nullptr; + MIRFunction *funcMCCStaticFieldGetByte = nullptr; + MIRFunction *funcMCCStaticFieldGetShort = nullptr; + MIRFunction *funcMCCStaticFieldGetChar = nullptr; + MIRFunction *funcMCCStaticFieldGetInt = nullptr; + MIRFunction *funcMCCStaticFieldGetLong = nullptr; + MIRFunction *funcMCCStaticFieldGetFloat = nullptr; + MIRFunction *funcMCCStaticFieldGetDouble = nullptr; + MIRFunction *funcMCCStaticFieldGetObject = nullptr; + + MIRFunction *funcMCCStaticFieldSetBool = nullptr; + MIRFunction *funcMCCStaticFieldSetByte = nullptr; + MIRFunction *funcMCCStaticFieldSetShort = nullptr; + MIRFunction *funcMCCStaticFieldSetChar = nullptr; + MIRFunction *funcMCCStaticFieldSetInt = nullptr; + MIRFunction *funcMCCStaticFieldSetLong = nullptr; + MIRFunction *funcMCCStaticFieldSetFloat = nullptr; + MIRFunction *funcMCCStaticFieldSetDouble = nullptr; + MIRFunction *funcMCCStaticFieldSetObject = nullptr; + + // ---------- antiproguard ---------- + std::set setAntiProguardFieldStructIdx; + mutable std::mutex feTypeManagerMtx; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FE_TYPE_MANAGER_H diff --git a/src/hir2mpl/common/include/fe_utils.h b/src/hir2mpl/common/include/fe_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..7d1e532183ee3cb75b1dab6a9e47e16e8a22f461 --- /dev/null +++ b/src/hir2mpl/common/include/fe_utils.h @@ -0,0 +1,384 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FE_UTILS_H +#define HIR2MPL_INCLUDE_FE_UTILS_H +#include +#include +#include +#include "mpl_logging.h" +#include "prim_types.h" +#include "mir_type.h" +#include "global_tables.h" +#include "mempool.h" +#include "mir_nodes.h" +#include "mir_scope.h" + +namespace maple { + +enum FEErrno : int { + kNoError = 0, + kCmdParseError = 1, + kNoValidInput = 2, + kFEError = 3, +}; + +class FEUtils { + public: + FEUtils() = default; + ~FEUtils() = default; + static std::vector Split(const std::string &str, char delim); + static uint8 GetWidth(PrimType primType); + static bool IsInteger(PrimType primType); + static bool IsSignedInteger(PrimType primType); + static bool IsUnsignedInteger(PrimType primType); + static PrimType MergePrimType(PrimType primType1, PrimType primType2); + static uint8 GetDim(const std::string &typeName); + static std::string GetBaseTypeName(const std::string &typeName); + static PrimType GetPrimType(const GStrIdx &typeNameIdx); + static uint32 GetSequentialNumber(); + static std::string GetFileNameHashStr(const std::string &fileName, uint32 seed = 211); + static std::string GetSequentialName0(const std::string &prefix, uint32_t num); + static std::string GetSequentialName(const std::string &prefix); + static std::string CreateLabelName(); + static FieldID GetStructFieldID(MIRStructType *base, const std::string &fieldName); + static bool TraverseToNamedField(MIRStructType &structType, const GStrIdx &nameIdx, FieldID &fieldID, + bool isTopLevel = true); + static MIRType *GetStructFieldType(const MIRStructType *type, FieldID fieldID); + static const MIRFuncType *GetFuncPtrType(const MIRType &type); + static MIRConst *CreateImplicitConst(MIRType *type); + static PrimType GetVectorElementPrimType(PrimType vectorPrimType); + static bool EndsWith(const std::string &value, const std::string &ending); + static MIRConst *TraverseToMIRConst(MIRAggConst *aggConst, const MIRStructType &structType, FieldID &fieldID); + static Loc GetSrcLocationForMIRSymbol(const MIRSymbol &symbol); + static MIRAliasVars AddAlias(const GStrIdx &mplNameIdx, const MIRType *sourceType, const TypeAttrs &attrs, + bool isLocal = true); + static void AddAliasInMIRScope(MIRScope &scope, const std::string &srcVarName, const MIRSymbol &symbol, + const MIRType *sourceType); + static SrcPosition CvtLoc2SrcPosition(const Loc &loc); + + static const std::string kBoolean; + static const std::string kByte; + static const std::string kShort; + static const std::string kChar; + static const std::string kInt; + static const std::string kLong; + static const std::string kFloat; + static const std::string kDouble; + static const std::string kVoid; + static const std::string kThis; + static const std::string kMCCStaticFieldGetBool; + static const std::string kMCCStaticFieldGetByte; + static const std::string kMCCStaticFieldGetChar; + static const std::string kMCCStaticFieldGetShort; + static const std::string kMCCStaticFieldGetInt; + static const std::string kMCCStaticFieldGetLong; + static const std::string kMCCStaticFieldGetFloat; + static const std::string kMCCStaticFieldGetDouble; + static const std::string kMCCStaticFieldGetObject; + + static const std::string kMCCStaticFieldSetBool; + static const std::string kMCCStaticFieldSetByte; + static const std::string kMCCStaticFieldSetChar; + static const std::string kMCCStaticFieldSetShort; + static const std::string kMCCStaticFieldSetInt; + static const std::string kMCCStaticFieldSetLong; + static const std::string kMCCStaticFieldSetFloat; + static const std::string kMCCStaticFieldSetDouble; + static const std::string kMCCStaticFieldSetObject; + + static const std::string kCondGoToStmtLabelNamePrefix; + + static inline MemPool *NewMempool(const std::string &name, bool isLocalPool) { + return memPoolCtrler.NewMemPool(name, isLocalPool); + } + + static inline void DeleteMempoolPtr(MemPool *memPoolPtr) { + memPoolCtrler.DeleteMemPool(memPoolPtr); + } + + static inline GStrIdx &GetBooleanIdx() { + static GStrIdx booleanIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kBoolean); + return booleanIdx; + } + + static inline GStrIdx &GetByteIdx() { + static GStrIdx byteIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kByte); + return byteIdx; + } + + static inline GStrIdx &GetShortIdx() { + static GStrIdx shortIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kShort); + return shortIdx; + } + + static inline GStrIdx &GetCharIdx() { + static GStrIdx charIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kChar); + return charIdx; + } + + static inline GStrIdx &GetIntIdx() { + static GStrIdx intIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kInt); + return intIdx; + } + + static inline GStrIdx &GetLongIdx() { + static GStrIdx longIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kLong); + return longIdx; + } + + static inline GStrIdx &GetFloatIdx() { + static GStrIdx floatIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kFloat); + return floatIdx; + } + + static inline GStrIdx &GetDoubleIdx() { + static GStrIdx doubleIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kDouble); + return doubleIdx; + } + + static inline GStrIdx &GetVoidIdx() { + static GStrIdx voidIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kVoid); + return voidIdx; + } + + static inline GStrIdx &GetThisIdx() { + static GStrIdx thisIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kThis); + return thisIdx; + } + + static inline GStrIdx &GetMCCStaticFieldGetBoolIdx() { + static GStrIdx mccStaticFieldGetBoolIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldGetBool); + return mccStaticFieldGetBoolIdx; + } + + static inline GStrIdx &GetMCCStaticFieldGetByteIdx() { + static GStrIdx mccStaticFieldGetByteIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldGetByte); + return mccStaticFieldGetByteIdx; + } + + static inline GStrIdx &GetMCCStaticFieldGetShortIdx() { + static GStrIdx mccStaticFieldGetShortIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldGetShort); + return mccStaticFieldGetShortIdx; + } + + static inline GStrIdx &GetMCCStaticFieldGetCharIdx() { + static GStrIdx mccStaticFieldGetCharIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldGetChar); + return mccStaticFieldGetCharIdx; + } + + static inline GStrIdx &GetMCCStaticFieldGetIntIdx() { + static GStrIdx mccStaticFieldGetIntIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldGetInt); + return mccStaticFieldGetIntIdx; + } + + static inline GStrIdx &GetMCCStaticFieldGetLongIdx() { + static GStrIdx mccStaticFieldGetLongIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldGetLong); + return mccStaticFieldGetLongIdx; + } + + static inline GStrIdx &GetMCCStaticFieldGetFloatIdx() { + static GStrIdx mccStaticFieldGetFloatIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldGetFloat); + return mccStaticFieldGetFloatIdx; + } + + static inline GStrIdx &GetMCCStaticFieldGetDoubleIdx() { + static GStrIdx mccStaticFieldGetDoubleIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldGetDouble); + return mccStaticFieldGetDoubleIdx; + } + + static inline GStrIdx &GetMCCStaticFieldGetObjectIdx() { + static GStrIdx mccStaticFieldGetObjectIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldGetObject); + return mccStaticFieldGetObjectIdx; + } + + static inline GStrIdx &GetMCCStaticFieldSetBoolIdx() { + static GStrIdx mccStaticFieldSetBoolIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldSetBool); + return mccStaticFieldSetBoolIdx; + } + + static inline GStrIdx &GetMCCStaticFieldSetByteIdx() { + static GStrIdx mccStaticFieldSetByteIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldSetByte); + return mccStaticFieldSetByteIdx; + } + + static inline GStrIdx &GetMCCStaticFieldSetShortIdx() { + static GStrIdx mccStaticFieldSetShortIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldSetShort); + return mccStaticFieldSetShortIdx; + } + + static inline GStrIdx &GetMCCStaticFieldSetCharIdx() { + static GStrIdx mccStaticFieldSetCharIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldSetChar); + return mccStaticFieldSetCharIdx; + } + + static inline GStrIdx &GetMCCStaticFieldSetIntIdx() { + static GStrIdx mccStaticFieldSetIntIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldSetInt); + return mccStaticFieldSetIntIdx; + } + + static inline GStrIdx &GetMCCStaticFieldSetLongIdx() { + static GStrIdx mccStaticFieldSetLongIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldSetLong); + return mccStaticFieldSetLongIdx; + } + + static inline GStrIdx &GetMCCStaticFieldSetFloatIdx() { + static GStrIdx mccStaticFieldSetFloatIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldSetFloat); + return mccStaticFieldSetFloatIdx; + } + + static inline GStrIdx &GetMCCStaticFieldSetDoubleIdx() { + static GStrIdx mccStaticFieldSetDoubleIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldSetDouble); + return mccStaticFieldSetDoubleIdx; + } + + static inline GStrIdx &GetMCCStaticFieldSetObjectIdx() { + static GStrIdx mccStaticFieldSetObjectIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(kMCCStaticFieldSetObject); + return mccStaticFieldSetObjectIdx; + } + + static void InitPrimTypeFuncNameIdxMap(std::map &primTypeFuncNameIdxMap); + + private: + static bool LogicXOR(bool v1, bool v2) { + return (v1 && !v2) || (!v1 && v2); + } +}; + +class FELinkListNode { + public: + FELinkListNode(); + virtual ~FELinkListNode(); + void InsertBefore(FELinkListNode *ins); + void InsertAfter(FELinkListNode *ins); + static void InsertBefore(FELinkListNode *ins, FELinkListNode *pos); + static void InsertAfter(FELinkListNode *ins, FELinkListNode *pos); + static void SpliceNodes(FELinkListNode *head, FELinkListNode *tail, FELinkListNode *newTail); + FELinkListNode *GetPrev() const { + return prev; + } + + void SetPrev(FELinkListNode *node) { + CHECK_NULL_FATAL(node); + prev = node; + } + + void SetPrevNull() { + prev = nullptr; + } + + FELinkListNode *GetNext() const { + return next; + } + + void SetNextNull() { + next = nullptr; + } + + void SetNext(FELinkListNode *node) { + CHECK_NULL_FATAL(node); + next = node; + } + + protected: + FELinkListNode *prev; + FELinkListNode *next; +}; + +class AstSwitchUtil { + public: + static AstSwitchUtil &Instance() { + static AstSwitchUtil local; + return local; + } + std::string CreateEndOrExitLabelName() const; + void MarkLabelUsed(const std::string &label); + void MarkLabelUnUsed(const std::string &label); + void PushNestedBreakLabels(const std::string &label); + void PopNestedBreakLabels(); + void PushNestedCaseVectors(const std::pair &caseVec); + void PopNestedCaseVectors(); + bool CheckLabelUsed(const std::string &label); + const std::pair &GetTopOfNestedCaseVectors() const; + const std::string &GetTopOfBreakLabels() const; + std::map &GetLabelUseMap() { + return labelUsed; + } + static uint32_t tempVarNo; + static const char *cleanLabel; + static const char *exitLabel; + static const char *blockLabel; + static const char *caseLabel; + static const char *catchLabel; + static const char *endehLabel; + + private: + AstSwitchUtil() = default; + ~AstSwitchUtil() = default; + std::map labelUsed = std::map(); + std::stack nestedBreakLabels = std::stack(); // loop and switch blocks + std::stack nestedContinueLabels = std::stack(); // loop blocks only + std::stack> nestedCaseVectors = std::stack>(); +}; // end of AstSwitchUtil + +class AstLoopUtil { + public: + static AstLoopUtil &Instance() { + static AstLoopUtil local; + return local; + } + + ~AstLoopUtil() = default; + void PushBreak(std::string label); + std::string GetCurrentBreak(); + void PopCurrentBreak(); + bool IsBreakLabelsEmpty() const; + void PushContinue(std::string label); + std::string GetCurrentContinue(); + bool IsContinueLabelsEmpty() const; + void PopCurrentContinue(); + + bool IsCurrentBreakLabelUsed() { + return breakLabels.top().second; + } + + bool IsCurrentContinueLabelUsed() { + return continueLabels.top().second; + } + + private: + AstLoopUtil() = default; + std::stack> breakLabels = std::stack>(); + std::stack> continueLabels = std::stack>(); +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FE_UTILS_H diff --git a/src/hir2mpl/common/include/fe_utils_ast.h b/src/hir2mpl/common/include/fe_utils_ast.h new file mode 100644 index 0000000000000000000000000000000000000000..debf40a5916dc400b678f8752e26e3276166644d --- /dev/null +++ b/src/hir2mpl/common/include/fe_utils_ast.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FE_UTILS_AST_H +#define HIR2MPL_INCLUDE_FE_UTILS_AST_H +#include +#include "types_def.h" +#include "cfg_primitive_types.h" +#include "mempool.h" +#include "opcodes.h" +#include "mir_const.h" + +namespace maple { +class FEUtilAST { + public: + static PrimType GetTypeFromASTTypeName(const std::string &typeName); + static const std::string Type2Label(PrimType primType); + + private: + FEUtilAST() = default; + ~FEUtilAST() = default; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FE_UTILS_AST_H diff --git a/src/hir2mpl/common/include/fe_utils_java.h b/src/hir2mpl/common/include/fe_utils_java.h new file mode 100644 index 0000000000000000000000000000000000000000..9a22390c4d43467bf2ec1ac6282d91261977203c --- /dev/null +++ b/src/hir2mpl/common/include/fe_utils_java.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FE_UTILS_JAVA_H +#define HIR2MPL_INCLUDE_FE_UTILS_JAVA_H +#include +#include +#include "feir_type.h" +#include "global_tables.h" +#include "types_def.h" + +namespace maple { +class FEUtilJava { + public: + static std::vector SolveMethodSignature(const std::string &signature); + static std::string SolveParamNameInJavaFormat(const std::string &signature); + + static GStrIdx &GetMultiANewArrayClassIdx() { + static GStrIdx multiANewArrayClassIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("Ljava/lang/reflect/Array;")); + return multiANewArrayClassIdx; + } + + static GStrIdx &GetMultiANewArrayElemIdx() { + static GStrIdx multiANewArrayElemIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("newInstance")); + return multiANewArrayElemIdx; + } + + static GStrIdx &GetMultiANewArrayTypeIdx() { + static GStrIdx multiANewArrayTypeIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("(Ljava/lang/Class;[I)Ljava/lang/Object;")); + return multiANewArrayTypeIdx; + } + + static GStrIdx &GetMultiANewArrayFullIdx() { + static GStrIdx multiANewArrayFullIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::EncodeName("Ljava/lang/reflect/Array;|newInstance|(Ljava/lang/Class;[I)Ljava/lang/Object;")); + return multiANewArrayFullIdx; + } + + static GStrIdx &GetJavaThrowableNameMplIdx() { + static GStrIdx javaThrowableNameMplIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName("Ljava/lang/Throwable;")); + return javaThrowableNameMplIdx; + } + + private: + FEUtilJava() = default; + ~FEUtilJava() = default; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FE_UTILS_JAVA_H diff --git a/src/hir2mpl/common/include/feir_builder.h b/src/hir2mpl/common/include/feir_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..48feebcb33785fcc42110069458ae0c468e71649 --- /dev/null +++ b/src/hir2mpl/common/include/feir_builder.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FEIR_BUILDER_H +#define HIR2MPL_INCLUDE_COMMON_FEIR_BUILDER_H +#include +#include "mir_function.h" +#include "mpl_logging.h" +#include "feir_var.h" +#include "feir_stmt.h" +#include "fe_function.h" + +namespace maple { +class FEIRBuilder { + public: + FEIRBuilder() = default; + ~FEIRBuilder() = default; + // Type + static UniqueFEIRType CreateType(PrimType basePty, const GStrIdx &baseNameIdx, uint32 dim); + static UniqueFEIRType CreateArrayElemType(const UniqueFEIRType &arrayType); + static UniqueFEIRType CreateRefType(const GStrIdx &baseNameIdx, uint32 dim); + static UniqueFEIRType CreateTypeByJavaName(const std::string &typeName, bool inMpl); + // Var + static UniqueFEIRVar CreateVarReg(uint32 regNum, PrimType primType, bool isGlobal = false); + static UniqueFEIRVar CreateVarReg(uint32 regNum, UniqueFEIRType type, bool isGlobal = false); + static UniqueFEIRVar CreateVarName(GStrIdx nameIdx, PrimType primType, bool isGlobal = false, + bool withType = false); + static UniqueFEIRVar CreateVarName(const std::string &name, PrimType primType, bool isGlobal = false, + bool withType = false); + static UniqueFEIRVar CreateVarNameForC(GStrIdx nameIdx, MIRType &mirType, bool isGlobal = false, + bool withType = false); + static UniqueFEIRVar CreateVarNameForC(const std::string &name, MIRType &mirType, bool isGlobal = false, + bool withType = false); + static UniqueFEIRVar CreateVarNameForC(const std::string &name, UniqueFEIRType type, + bool isGlobal = false, bool withType = false); + // Expr + static UniqueFEIRExpr CreateExprSizeOfType(UniqueFEIRType ty); + static UniqueFEIRExpr CreateExprDRead(UniqueFEIRVar srcVar); + static UniqueFEIRExpr CreateExprDReadAggField(UniqueFEIRVar srcVar, FieldID fieldID, UniqueFEIRType fieldType); + static UniqueFEIRExpr CreateExprAddrofLabel(const std::string &lbName, UniqueFEIRType exprTy); + static UniqueFEIRExpr CreateExprAddrofVar(UniqueFEIRVar srcVar); + static UniqueFEIRExpr CreateExprAddrofFunc(const std::string &addr); + static UniqueFEIRExpr CreateExprAddrofArray(UniqueFEIRType argTypeNativeArray, + UniqueFEIRExpr argExprArray, std::string argArrayName, + std::list &argExprIndexs); + static UniqueFEIRExpr CreateExprIRead(UniqueFEIRType returnType, UniqueFEIRType ptrType, + UniqueFEIRExpr expr, FieldID id = 0); + static UniqueFEIRExpr CreateExprTernary(Opcode op, UniqueFEIRType type, UniqueFEIRExpr cExpr, + UniqueFEIRExpr tExpr, UniqueFEIRExpr fExpr); + static UniqueFEIRExpr CreateExprConstRefNull(); + static UniqueFEIRExpr CreateExprConstPtrNull(); + static UniqueFEIRExpr CreateExprConstI8(int8 val); + static UniqueFEIRExpr CreateExprConstI16(int16 val); + static UniqueFEIRExpr CreateExprConstI32(int32 val); + static UniqueFEIRExpr CreateExprConstU32(uint32 val); + static UniqueFEIRExpr CreateExprConstI64(int64 val); + static UniqueFEIRExpr CreateExprConstU64(uint64 val); + static UniqueFEIRExpr CreateExprConstF32(float val); + static UniqueFEIRExpr CreateExprConstF64(double val); + static UniqueFEIRExpr CreateExprConstPtr(int64 val); + static UniqueFEIRExpr CreateExprConstAnyScalar(PrimType primType, int64 val); + static UniqueFEIRExpr CreateExprVdupAnyVector(PrimType primtype, int64 val); + static UniqueFEIRExpr CreateExprMathUnary(Opcode op, UniqueFEIRVar var0); + static UniqueFEIRExpr CreateExprMathUnary(Opcode op, UniqueFEIRExpr expr); + static UniqueFEIRExpr CreateExprZeroCompare(Opcode op, UniqueFEIRExpr expr); + static UniqueFEIRExpr CreateExprMathBinary(Opcode op, UniqueFEIRVar var0, UniqueFEIRVar var1); + static UniqueFEIRExpr CreateExprMathBinary(Opcode op, UniqueFEIRExpr expr0, UniqueFEIRExpr expr1); + static UniqueFEIRExpr CreateExprBinary(Opcode op, UniqueFEIRExpr expr0, UniqueFEIRExpr expr1); + static UniqueFEIRExpr CreateExprBinary(UniqueFEIRType exprType, Opcode op, + UniqueFEIRExpr expr0, UniqueFEIRExpr expr1); + static UniqueFEIRExpr CreateExprSExt(UniqueFEIRVar srcVar); + static UniqueFEIRExpr CreateExprSExt(UniqueFEIRExpr srcExpr, PrimType dstType); + static UniqueFEIRExpr CreateExprZExt(UniqueFEIRVar srcVar); + static UniqueFEIRExpr CreateExprZExt(UniqueFEIRExpr srcExpr, PrimType dstType); + static UniqueFEIRExpr CreateExprCvtPrim(UniqueFEIRVar srcVar, PrimType dstType); + static UniqueFEIRExpr CreateExprCvtPrim(UniqueFEIRExpr srcExpr, PrimType dstType); + static UniqueFEIRExpr CreateExprCvtPrim(UniqueFEIRExpr srcExpr, PrimType srcType, PrimType dstType); + static UniqueFEIRExpr CreateExprCvtPrim(Opcode argOp, UniqueFEIRExpr srcExpr, PrimType dstType); + static UniqueFEIRExpr CreateExprCastPrim(UniqueFEIRExpr srcExpr, PrimType dstType); + static UniqueFEIRExpr CreateExprJavaNewInstance(UniqueFEIRType type); + static UniqueFEIRExpr CreateExprJavaNewInstance(UniqueFEIRType type, uint32 argTypeID); + static UniqueFEIRExpr CreateExprJavaNewInstance(UniqueFEIRType type, uint32 argTypeID, bool isRcPermanent); + static UniqueFEIRExpr CreateExprJavaNewArray(UniqueFEIRType type, UniqueFEIRExpr exprSize); + static UniqueFEIRExpr CreateExprJavaNewArray(UniqueFEIRType type, UniqueFEIRExpr exprSize, uint32 typeID); + static UniqueFEIRExpr CreateExprJavaNewArray(UniqueFEIRType type, UniqueFEIRExpr exprSize, uint32 typeID, + bool isRcPermanent); + static UniqueFEIRExpr CreateExprJavaArrayLength(UniqueFEIRExpr exprArray); + + // Stmt + static UniqueFEIRStmt CreateStmtDAssign(UniqueFEIRVar dstVar, UniqueFEIRExpr srcExpr, bool hasException = false); + static UniqueFEIRStmt CreateStmtDAssignAggField(UniqueFEIRVar dstVar, UniqueFEIRExpr srcExpr, FieldID fieldID); + static UniqueFEIRStmt CreateStmtIAssign(UniqueFEIRType dstType, UniqueFEIRExpr dstExpr, + UniqueFEIRExpr srcExpr, FieldID fieldID = 0); + static UniqueFEIRStmt CreateStmtGoto(uint32 targetLabelIdx); + static UniqueFEIRStmt CreateStmtGoto(const std::string &labelName); + static UniqueFEIRStmt CreateStmtIGoto(UniqueFEIRExpr targetExpr); + static UniqueFEIRStmt CreateStmtCondGoto(uint32 targetLabelIdx, Opcode op, UniqueFEIRExpr expr); + static UniqueFEIRStmt CreateStmtSwitch(UniqueFEIRExpr expr); + static UniqueFEIRStmt CreateStmtIfWithoutElse(UniqueFEIRExpr cond, std::list &thenStmts); + static UniqueFEIRStmt CreateStmtIf(UniqueFEIRExpr cond, std::list &thenStmts, + std::list &elseStmts); + static UniqueFEIRStmt CreateStmtJavaConstClass(UniqueFEIRVar dstVar, UniqueFEIRType type); + static UniqueFEIRStmt CreateStmtJavaConstString(UniqueFEIRVar dstVar, const std::string &strVal); + static UniqueFEIRStmt CreateStmtJavaCheckCast(UniqueFEIRVar dstVar, UniqueFEIRVar srcVar, UniqueFEIRType type); + static UniqueFEIRStmt CreateStmtJavaCheckCast(UniqueFEIRVar dstVar, UniqueFEIRVar srcVar, UniqueFEIRType type, + uint32 argTypeID); + static UniqueFEIRStmt CreateStmtJavaInstanceOf(UniqueFEIRVar dstVar, UniqueFEIRVar srcVar, UniqueFEIRType type); + static UniqueFEIRStmt CreateStmtJavaInstanceOf(UniqueFEIRVar dstVar, UniqueFEIRVar srcVar, UniqueFEIRType type, + uint32 argTypeID); + static UniqueFEIRStmt CreateStmtJavaFillArrayData(UniqueFEIRVar srcVar, const int8 *arrayData, + uint32 size, const std::string &arrayName); + static std::list CreateStmtArrayStore(UniqueFEIRVar varElem, UniqueFEIRVar varArray, + UniqueFEIRVar varIndex); + static UniqueFEIRStmt CreateStmtArrayStoreOneStmt(UniqueFEIRVar varElem, UniqueFEIRVar varArray, + UniqueFEIRExpr exprIndex); + static UniqueFEIRStmt CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, + UniqueFEIRExpr exprIndex, UniqueFEIRType arrayType); + static UniqueFEIRStmt CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, + UniqueFEIRExpr exprIndex, UniqueFEIRType arrayType, + const std::string &argArrayName); + static UniqueFEIRStmt CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, + std::list exprIndexs, + UniqueFEIRType arrayType, const std::string &argArrayName); + static UniqueFEIRStmt CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, + UniqueFEIRExpr exprIndex, UniqueFEIRType arrayType, + UniqueFEIRType elemType, const std::string &argArrayName); + static std::list CreateStmtArrayLoad(UniqueFEIRVar varElem, UniqueFEIRVar varArray, + UniqueFEIRVar varIndex); + static UniqueFEIRStmt CreateStmtArrayLength(UniqueFEIRVar varLength, UniqueFEIRVar varArray); + static UniqueFEIRStmt CreateStmtRetype(UniqueFEIRVar varDst, const UniqueFEIRVar &varSrc); + static UniqueFEIRStmt CreateStmtComment(const std::string &comment); + static UniqueFEIRExpr ReadExprField(UniqueFEIRExpr expr, FieldID fieldID, UniqueFEIRType fieldType); + static UniqueFEIRStmt AssginStmtField(const UniqueFEIRExpr &addrExpr, UniqueFEIRExpr srcExpr, FieldID fieldID); + static bool IsZeroConstExpr(const UniqueFEIRExpr &expr); + static UniqueFEIRStmt CreateVLAStackRestore(const UniqueFEIRVar &vlaSavedStackVar); + static std::string EmitVLACleanupStmts(FEFunction &feFunction, const std::string &labelName, const Loc &loc); + static void EmitVLACleanupStmts(const FEFunction &feFunction, std::list &stmts); +}; // class FEIRBuilder +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FEIR_BUILDER_H diff --git a/src/hir2mpl/common/include/feir_node_kind.def b/src/hir2mpl/common/include/feir_node_kind.def new file mode 100644 index 0000000000000000000000000000000000000000..42d9797447be5639f57e569b28b2a089a4b39c9b --- /dev/null +++ b/src/hir2mpl/common/include/feir_node_kind.def @@ -0,0 +1,87 @@ +// FEIR_NODE_KIND (kind, description) +FEIR_NODE_KIND(Stmt, "Stmt") +FEIR_NODE_KIND(StmtNary, "StmtNary") +FEIR_NODE_KIND(StmtAssign, "StmtAssign") +FEIR_NODE_KIND(StmtNonAssign, "StmtNonAssign") +FEIR_NODE_KIND(StmtPesudo, "StmtPesudo") +FEIR_NODE_KIND(StmtDAssign, "StmtDAssign") +FEIR_NODE_KIND(StmtJavaTypeCheck, "StmtJavaTypeCheck") +FEIR_NODE_KIND(StmtJavaConstClass, "StmtJavaConstClass") +FEIR_NODE_KIND(StmtJavaConstString, "StmtJavaConstString") +FEIR_NODE_KIND(StmtJavaMultiANewArray, "StmtJavaMultiANewArray") +FEIR_NODE_KIND(StmtCallAssign, "StmtCallAssign") +FEIR_NODE_KIND(StmtICallAssign, "StmtICallAssign") +FEIR_NODE_KIND(StmtJavaDynamicCallAssign, "StmtJavaDynamicCallAssign") +FEIR_NODE_KIND(StmtIntrinsicCallAssign, "StmtIntrinsicCallAssign") +FEIR_NODE_KIND(StmtIAssign, "StmtIAssign") +FEIR_NODE_KIND(StmtUseOnly, "StmtUseOnly") +FEIR_NODE_KIND(StmtReturn, "StmtReturn") +FEIR_NODE_KIND(StmtBranch, "StmtBranch") +FEIR_NODE_KIND(StmtGoto, "StmtGoto") +FEIR_NODE_KIND(StmtIGoto, "StmtIGoto") +FEIR_NODE_KIND(StmtCondGoto, "StmtCondGoto") +FEIR_NODE_KIND(StmtSwitch, "StmtSwitch") +FEIR_NODE_KIND(StmtArrayStore, "StmtArrayStore") +FEIR_NODE_KIND(StmtFieldStore, "StmtFieldStore") +FEIR_NODE_KIND(StmtFieldLoad, "StmtFieldLoad") +FEIR_NODE_KIND(StmtFieldStoreForC, "StmtFieldStoreForC") +FEIR_NODE_KIND(ExprFieldLoadForC, "ExprFieldLoadForC") +FEIR_NODE_KIND(Expr, "Expr") +FEIR_NODE_KIND(ExprNestable, "ExprNestable") +FEIR_NODE_KIND(ExprNonNestable, "ExprNonNestable") +FEIR_NODE_KIND(ExprConst, "ExprConst") +FEIR_NODE_KIND(ExprSizeOfType, "ExprSizeOfType") +FEIR_NODE_KIND(ExprDRead, "ExprDRead") +FEIR_NODE_KIND(ExprIRead, "ExprIRead") +FEIR_NODE_KIND(ExprRegRead, "ExprRegRead") +FEIR_NODE_KIND(ExprConvert, "ExprConvert") +FEIR_NODE_KIND(ExprIntExt, "ExprIntExt") +FEIR_NODE_KIND(ExprRetype, "ExprRetype") +FEIR_NODE_KIND(ExprCompare, "ExprCompare") +FEIR_NODE_KIND(ExprUnary, "ExprUnary") +FEIR_NODE_KIND(ExprBinary, "ExprBinary") +FEIR_NODE_KIND(ExprTernary, "ExprTernary") +FEIR_NODE_KIND(ExprNary, "ExprNary") +FEIR_NODE_KIND(ExprArray, "ExprArray") +FEIR_NODE_KIND(ExprAddrof, "ExprAddrof") +FEIR_NODE_KIND(ExprAddrofLabel, "ExprAddrofLabel") +FEIR_NODE_KIND(ExprIAddrof, "ExprIAddrof") +FEIR_NODE_KIND(ExprAddrofVar, "ExprAddrofVar") +FEIR_NODE_KIND(ExprAddrofFunc, "ExprAddrofFunc") +FEIR_NODE_KIND(ExprAddrofArray, "ExprAddrofArray") +FEIR_NODE_KIND(ExprAddrofStruct, "ExprAddrofStruct") +FEIR_NODE_KIND(ExprIntrinsicop, "ExprIntrinsicop") +FEIR_NODE_KIND(FEIRExprJavaMerge, "FEIRExprJavaMerge") +FEIR_NODE_KIND(ExprTypeCvt, "ExprTypeCvt") +FEIR_NODE_KIND(ExprJavaNewInstance, "ExprJavaNewInstance") +FEIR_NODE_KIND(ExprJavaNewArray, "ExprJavaNewArray") +FEIR_NODE_KIND(ExprJavaArrayLength, "ExprJavaArrayLength") +FEIR_NODE_KIND(ExprJavaInstanceOf, "ExprJavaInstanceOf") +FEIR_NODE_KIND(ExprArrayStoreForC, "ExprArrayStoreForC") +FEIR_NODE_KIND(ExprArrayLoad, "ExprArrayLoad") +FEIR_NODE_KIND(ExprCStyleCast, "ExprCStyleCast") +FEIR_NODE_KIND(ExprAtomic, "ExprAtomic") +FEIR_NODE_KIND(StmtJavaFillArrayData, "StmtJavaFillArrayData") +FEIR_NODE_KIND(StmtPesudoFuncStart, "StmtPesudoFuncStart") +FEIR_NODE_KIND(StmtPesudoFuncEnd, "StmtPesudoFuncEnd") +FEIR_NODE_KIND(StmtCheckPoint, "StmtCheckPoint") +FEIR_NODE_KIND(StmtPesudoLOC, "StmtPesudoLOC") +FEIR_NODE_KIND(StmtPesudoLabel, "StmtPesudoLabel") +FEIR_NODE_KIND(StmtPesudoJavaTry, "StmtPesudoJavaTry") +FEIR_NODE_KIND(StmtPesudoEndTry, "StmtPesudoEndTry") +FEIR_NODE_KIND(StmtPesudoJavaCatch, "StmtPesudoJavaCatch") +FEIR_NODE_KIND(StmtPesudoSafe, "StmtPesudoSafe") +FEIR_NODE_KIND(StmtPesudoUnsafe, "StmtPesudoUnsafe") +FEIR_NODE_KIND(StmtPesudoComment, "StmtPesudoComment") +FEIR_NODE_KIND(StmtPesudoCommentForInst, "StmtPesudoCommentForInst") +FEIR_NODE_KIND(StmtCaseForC, "StmtCaseForC") +FEIR_NODE_KIND(StmtDefaultForC, "StmtDefaultForC") +FEIR_NODE_KIND(StmtIf, "StmtIf") +FEIR_NODE_KIND(StmtDoWhile, "StmtDoWhile") +FEIR_NODE_KIND(StmtBreak, "StmtBreak") +FEIR_NODE_KIND(StmtContinue, "StmtContinue") +FEIR_NODE_KIND(StmtLabel, "StmtLabel") +FEIR_NODE_KIND(StmtAtomic, "StmtAtomic") +FEIR_NODE_KIND(StmtGCCAsm, "StmtGCCAsm") +FEIR_NODE_KIND(StmtPesudoHead, "StmtPesudoHead") +FEIR_NODE_KIND(StmtPesudoTail, "StmtPesudoTail") diff --git a/src/hir2mpl/common/include/feir_scope.h b/src/hir2mpl/common/include/feir_scope.h new file mode 100644 index 0000000000000000000000000000000000000000..cfb58540f56281132c1e6226c10b8e8f288a0ccd --- /dev/null +++ b/src/hir2mpl/common/include/feir_scope.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_COMMON_INCLUDE_FEIR_SCOPE_H +#define HIR2MPL_COMMON_INCLUDE_FEIR_SCOPE_H +#include "mir_scope.h" +#include "mpl_logging.h" +#include "src_position.h" +#include "feir_var.h" +#include "feir_stmt.h" + +namespace maple { +class FEIRScope; +using UniqueFEIRScope = std::unique_ptr; + +class FEIRScope { + public: + explicit FEIRScope(uint32 currID) : id(currID) {} + FEIRScope(uint32 currID, MIRScope *scope) : id(currID), mirScope(scope) {} + FEIRScope(uint32 currID, bool isControll) : id(currID), isControllScope(isControll) {} + virtual ~FEIRScope() = default; + + uint32 GetID() const { + return id; + } + + void SetMIRScope(MIRScope *scope) { + mirScope = scope; + } + + MIRScope *GetMIRScope() { + return mirScope; + } + + const MIRScope *GetMIRScope() const { + return mirScope; + } + + void SetVLASavedStackVar(UniqueFEIRVar var) { + vlaSavedStackVar = std::move(var); + } + + const UniqueFEIRVar &GetVLASavedStackVar() const { + return vlaSavedStackVar; + } + + void SetIsControllScope(bool flag) { + isControllScope = flag; + } + + bool IsControllScope() const { + return isControllScope; + } + + UniqueFEIRStmt GenVLAStackRestoreStmt() const; + UniqueFEIRScope Clone() const; + + private: + uint32 id; + MIRScope *mirScope = nullptr; // one func, compoundstmt or forstmt scope includes decls + UniqueFEIRVar vlaSavedStackVar = nullptr; + bool isControllScope = false; // The controlling scope in a if/switch/while/for statement +}; +} // namespace maple +#endif // HIR2MPL_AST_INPUT_INCLUDE_AST_VAR_SCOPE_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/feir_stmt.h b/src/hir2mpl/common/include/feir_stmt.h new file mode 100644 index 0000000000000000000000000000000000000000..dde5c575b45e44180172de48085c8c872939add4 --- /dev/null +++ b/src/hir2mpl/common/include/feir_stmt.h @@ -0,0 +1,2950 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_FEIR_STMT_H +#define HIR2MPL_INCLUDE_COMMON_FEIR_STMT_H +#include +#include +#include +#include +#include +#include +#include +#include +#include "types_def.h" +#include "mempool_allocator.h" +#include "mir_builder.h" +#include "factory.h" +#include "safe_ptr.h" +#include "fe_utils.h" +#include "feir_var.h" +#include "fe_struct_elem_info.h" +#include "feir_var_type_scatter.h" +#include "fe_options.h" +#include "feir_type_helper.h" +#include "feir_dfg.h" + +namespace maple { +class FEIRBuilder; + +enum FEIRNodeKind : uint8 { +#define FEIR_NODE_KIND(kind, description) \ + k##kind, +#include "feir_node_kind.def" +#undef FEIR_NODE_KIND +}; + +std::string GetFEIRNodeKindDescription(FEIRNodeKind kindArg); +constexpr uint32 kOpHashShift = 24; +constexpr uint32 kTypeHashShift = 8; +constexpr uint32 kOtherShift = 16; +constexpr uint32 kRandomNum = 0x9e3779b9; +constexpr uint32 kSeedLeftShift = 6; +constexpr uint32 kSeedRightShift = 2; + +// ---------- FEIRNode ---------- +class FEIRNode { + public: + explicit FEIRNode(FEIRNodeKind argKind) + : kind(argKind) {} + virtual ~FEIRNode() = default; + + protected: + FEIRNodeKind kind; +}; // class FEIRNode + +// ---------- FEIRDFGNode ---------- +class FEIRDFGNode { + public: + explicit FEIRDFGNode(const UniqueFEIRVar &argVar) + : var(argVar) { + CHECK_NULL_FATAL(argVar); + } + + virtual ~FEIRDFGNode() = default; + bool operator==(const FEIRDFGNode &node) const { + return var->EqualsTo(node.var); + } + + size_t Hash() const { + return var->Hash(); + } + + std::string GetNameRaw() const { + return var->GetNameRaw(); + } + + private: + const UniqueFEIRVar &var; +}; + +class FEIRDFGNodeHash { + public: + std::size_t operator()(const FEIRDFGNode &node) const { + return node.Hash(); + } +}; + +using UniqueFEIRDFGNode = std::unique_ptr; + +class FEIRStmtCheckPoint; +// ---------- FEIRStmt ---------- +class FEIRStmt : public FELinkListNode { + public: + explicit FEIRStmt(FEIRNodeKind argKind) + : kind(argKind) {} + + FEIRStmt() + : kind(kStmt) {} // kStmt as default + + virtual ~FEIRStmt() = default; + void RegisterDFGNodes2CheckPoint(FEIRStmtCheckPoint &checkPoint) { + RegisterDFGNodes2CheckPointImpl(checkPoint); + } + + bool CalculateDefs4AllUses(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return CalculateDefs4AllUsesImpl(checkPoint, udChain); + } + + void InitTrans4AllVars() { + InitTrans4AllVarsImpl(); + } + + FEIRVarTypeScatter* GetTypeScatterDefVar() const { + return GetTypeScatterDefVarImpl(); + } + + std::list GenMIRStmts(MIRBuilder &mirBuilder) const { + std::list stmts = GenMIRStmtsImpl(mirBuilder); + SetMIRStmtSrcPos(stmts); + return stmts; + } + + FEIRNodeKind GetKind() const { + return kind; + } + + void SetKind(FEIRNodeKind argKind) { + kind = argKind; + } + + bool IsFallThru() const { + return IsFallThroughImpl(); + } + + void SetFallThru(bool arg) { + isFallThru = arg; + } + + bool IsBranch() const { + return IsBranchImpl(); + } + + bool IsStmtInst() const { + return IsStmtInstImpl(); + } + + bool IsTarget() const { + return IsTargetImpl(); + } + + bool IsThrowable() const { + return isThrowable; + } + + void SetThrowable(bool argIsThrowable) { + isThrowable = argIsThrowable; + } + + uint32 GetID() const { + return id; + } + + void SetID(uint32 arg) { + id = arg; + } + + bool IsAuxPre() const { + return isAuxPre; + } + + bool IsAuxPost() const { + return isAuxPost; + } + + bool IsAux() const { + return isAuxPre || isAuxPost; + } + + const std::vector &GetExtraPreds() const { + return extraPreds; + } + + const std::vector &GetExtraSuccs() const { + return extraSuccs; + } + + void AddExtraPred(FEIRStmt &stmt) { + extraPreds.push_back(&stmt); + } + + void AddExtraSucc(FEIRStmt &stmt) { + extraSuccs.push_back(&stmt); + } + + bool HasDef() const { + return HasDefImpl(); + } + + void SetHexPC(uint32 argHexPC) { + return SetHexPCImpl(argHexPC); + } + + uint32 GetHexPC(void) const { + return GetHexPCImpl(); + } + + bool IsStmtInstComment() const; + bool ShouldHaveLOC() const; + BaseNode *ReplaceAddrOfNode(BaseNode *node) const; + void SetSrcLoc(const Loc &l) { + loc = l; + } + + Loc GetSrcLoc() const { + return loc; + } + + uint32 GetSrcFileIdx() const { + return loc.fileIdx; + } + + uint32 GetSrcFileLineNum() const { + return loc.line; + } + + uint32 GetSrcFileColumn() const { + return loc.column; + } + + bool HasSetLOCInfo() const { + return (loc.fileIdx != 0 || loc.line != 0); + } + + bool IsDummy() const { + return isDummy; + } + + void SetDummy() { + isDummy = true; + } + + std::string DumpDotString() const { + return DumpDotStringImpl(); + } + + void Dump(const std::string &prefix = "") const { + return DumpImpl(prefix); + } + + protected: + virtual std::string DumpDotStringImpl() const; + virtual void DumpImpl(const std::string &prefix) const; + virtual void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) {} + virtual bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return true; + } + + virtual FEIRVarTypeScatter* GetTypeScatterDefVarImpl() const { + return nullptr; + } + + virtual void InitTrans4AllVarsImpl() {} + virtual std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const; + virtual bool IsStmtInstImpl() const; + virtual bool IsFallThroughImpl() const { + return isFallThru; + } + + virtual bool IsBranchImpl() const { + return false; + } + + virtual bool IsTargetImpl() const { + return false; + } + + virtual bool HasDefImpl() const { + return false; + } + + void SetHexPCImpl(uint32 argHexPC) { + hexPC = argHexPC; + } + + uint32 GetHexPCImpl(void) const { + return hexPC; + } + + void SetMIRStmtSrcPos(std::list &stmts) const { + if (FEOptions::GetInstance().IsDumpLOC() && !stmts.empty()) { + (*stmts.begin())->GetSrcPos().SetFileNum(static_cast(loc.fileIdx)); + (*stmts.begin())->GetSrcPos().SetLineNum(loc.line); + (*stmts.begin())->GetSrcPos().SetColumn(static_cast(loc.column)); + } + } + + FEIRNodeKind kind; + uint32 id = 0; + Loc loc = {0, 0, 0}; + uint32 hexPC = UINT32_MAX; + bool isDummy = false; + bool isFallThru = true; + bool isAuxPre = false; + bool isAuxPost = false; + bool isThrowable = false; + bool isEnhancedChecking = true; + std::vector extraPreds; + std::vector extraSuccs; +}; + +using UniqueFEIRStmt = std::unique_ptr; + +// ---------- FEIRStmtCheckPoint ---------- +class FEIRStmtCheckPoint : public FEIRStmt { + public: + FEIRStmtCheckPoint() + : FEIRStmt(FEIRNodeKind::kStmtCheckPoint), + firstVisibleStmt(nullptr) {} + ~FEIRStmtCheckPoint() override { + firstVisibleStmt = nullptr; + } + + void Reset(); + void RegisterDFGNode(UniqueFEIRVar &var); + void RegisterDFGNodes(const std::list &vars); + void RegisterDFGNodeFromAllVisibleStmts(); + void AddPredCheckPoint(FEIRStmtCheckPoint &stmtCheckPoint); + std::set &CalcuDef(UniqueFEIRVar &use); + void SetFirstVisibleStmt(FEIRStmt &stmt) { + CHECK_FATAL((stmt.GetKind() != FEIRNodeKind::kStmtCheckPoint), "check point should not be DFG Node."); + firstVisibleStmt = &stmt; + } + + protected: + std::string DumpDotStringImpl() const override; + + private: + void CalcuDefDFS(std::set &result, const UniqueFEIRVar &use, const FEIRStmtCheckPoint &cp, + std::set &visitSet) const; + std::set predCPs; + std::list defs; + std::list uses; + FEIRUseDefChain localUD; + std::unordered_map lastDef; + std::unordered_map, FEIRDFGNodeHash> cacheUD; + FEIRStmt *firstVisibleStmt; +}; + +// ---------- FEIRExpr ---------- +class FEIRExpr { + public: + explicit FEIRExpr(FEIRNodeKind argKind); + FEIRExpr(FEIRNodeKind argKind, std::unique_ptr argType); + virtual ~FEIRExpr() = default; + FEIRExpr(const FEIRExpr&) = delete; + FEIRExpr& operator=(const FEIRExpr&) = delete; + std::string DumpDotString() const; + std::unique_ptr Clone(); + + virtual bool operator==(const FEIRExpr &expr) const { + return false; + } + + virtual bool operator!=(const FEIRExpr &expr) const { + return true; + } + + BaseNode *GenMIRNode(MIRBuilder &mirBuilder) const { + return GenMIRNodeImpl(mirBuilder); + } + + std::vector GetVarUses() const { + return GetVarUsesImpl(); + } + + bool IsNestable() const { + return IsNestableImpl(); + } + + bool IsAddrof() const { + return IsAddrofImpl(); + } + + void SetAddrof(bool flag) { + isAddrof = flag; + } + + bool HasException() const { + return HasExceptionImpl(); + } + + void SetIsBoundaryChecking(bool flag) { + isBoundaryChecking = flag; + } + + bool IsBoundaryChecking() const { + return isBoundaryChecking; + } + + void SetIsEnhancedChecking(bool flag) { + isEnhancedChecking = flag; + } + + bool IsEnhancedChecking() const { + return isEnhancedChecking; + } + + void SetType(std::unique_ptr argType) { + SetTypeImpl(std::move(argType)); + } + + FEIRNodeKind GetKind() const { + return kind; + } + + void SetKind(FEIRNodeKind specKind) { + kind = specKind; + } + + FEIRType *GetType() const { + return GetTypeImpl(); + } + + const FEIRType &GetTypeRef() const { + return GetTypeRefImpl(); + } + + PrimType GetPrimType() const { + return GetPrimTypeImpl(); + } + + FieldID GetFieldID() const { + return GetFieldIDImpl(); + } + + void SetFieldID(FieldID fieldID) { + return SetFieldIDImpl(fieldID); + } + + void SetFieldType(std::unique_ptr fieldType) { + return SetFieldTypeImpl(std::move(fieldType)); + } + + void RegisterDFGNodes2CheckPoint(FEIRStmtCheckPoint &checkPoint) { + RegisterDFGNodes2CheckPointImpl(checkPoint); + } + + bool CalculateDefs4AllUses(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return CalculateDefs4AllUsesImpl(checkPoint, udChain); + } + + uint32 Hash() const { + return HashImpl(); + } + + void SetLoc(const Loc &locIn) { + loc = locIn; + } + + const Loc GetLoc() const { + return loc; + } + + std::string Dump() const { + return GetFEIRNodeKindDescription(kind); + } + + protected: + virtual std::unique_ptr CloneImpl() const = 0; + virtual BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const = 0; + virtual std::vector GetVarUsesImpl() const; + virtual void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) {} + virtual bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return true; + } + + virtual void SetTypeImpl(std::unique_ptr argType) { + ASSERT_NOT_NULL(argType); + type = std::move(argType); + } + + virtual PrimType GetPrimTypeImpl() const { + return type->GetPrimType(); + } + + virtual FEIRType *GetTypeImpl() const { + ASSERT(type != nullptr, "type is nullptr"); + return type.get(); + } + + virtual const FEIRType &GetTypeRefImpl() const { + ASSERT(GetTypeImpl() != nullptr, "type is nullptr"); + return *GetTypeImpl(); + } + + virtual FieldID GetFieldIDImpl() const { + CHECK_FATAL(false, "unsupported in base class"); + } + + virtual void SetFieldIDImpl(FieldID fieldID) { + CHECK_FATAL(false, "unsupported in base class"); + } + + virtual void SetFieldTypeImpl(std::unique_ptr fieldType) { + CHECK_FATAL(false, "unsupported in base class"); + } + + virtual uint32 HashImpl() const { + CHECK_FATAL(false, "unsupported in base class"); + return 0; + } + + virtual bool IsNestableImpl() const; + virtual bool IsAddrofImpl() const; + virtual bool HasExceptionImpl() const; + + FEIRNodeKind kind; + bool isNestable : 1; + bool isAddrof : 1; + bool hasException : 1; + bool isBoundaryChecking : 1; + bool isEnhancedChecking : 1; + std::unique_ptr type; + Loc loc = {0, 0, 0}; +}; // class FEIRExpr + +using UniqueFEIRExpr = std::unique_ptr; + +// ---------- FEIRExprConst ---------- +union ConstExprValue { + bool b; + uint8 u8; + int8 i8; + uint16 u16; + int16 i16; + uint32 u32; + int32 i32; + float f32; + uint64 u64 = 0; + int64 i64; + double f64; +}; + +class FEIRExprConst : public FEIRExpr { + public: + FEIRExprConst(); + FEIRExprConst(int64 val, PrimType argType); + FEIRExprConst(uint64 val, PrimType argType); + explicit FEIRExprConst(uint32 val); + explicit FEIRExprConst(float val); + explicit FEIRExprConst(double val); + ~FEIRExprConst() = default; + FEIRExprConst(const FEIRExprConst&) = delete; + FEIRExprConst& operator=(const FEIRExprConst&) = delete; + + ConstExprValue GetValue() const { + return value; + } + + protected: + std::unique_ptr CloneImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + uint32 HashImpl() const override; + + private: + void CheckRawValue2SetZero(); + ConstExprValue value; +}; + +// ---------- FEIRExprSizeOfType ---------- +class FEIRExprSizeOfType : public FEIRExpr { + public: + explicit FEIRExprSizeOfType(UniqueFEIRType ty); + ~FEIRExprSizeOfType() = default; + + protected: + std::unique_ptr CloneImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + uint32 HashImpl() const override { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + feirType->Hash(); + } + + private: + UniqueFEIRType feirType; +}; + +// ---------- FEIRExprDRead ---------- +class FEIRExprDRead : public FEIRExpr { + public: + explicit FEIRExprDRead(std::unique_ptr argVarSrc); + FEIRExprDRead(std::unique_ptr argType, std::unique_ptr argVarSrc); + ~FEIRExprDRead() = default; + void SetVarSrc(std::unique_ptr argVarSrc); + void SetTrans(UniqueFEIRVarTrans argTrans) { + varSrc->SetTrans(std::move(argTrans)); + } + + bool operator==(const FEIRExpr &expr) const override { + return expr.GetKind() == kExprDRead && *varSrc == *(expr.GetVarUses().front()) && fieldID == expr.GetFieldID(); + } + + bool operator!=(const FEIRExpr &expr) const override { + return expr.GetKind() != kExprDRead || *varSrc != *(expr.GetVarUses().front()) || fieldID != expr.GetFieldID(); + } + + UniqueFEIRVarTrans CreateTransDirect() { + UniqueFEIRVarTrans trans = std::make_unique(FEIRVarTransKind::kFEIRVarTransDirect, varSrc); + return trans; + } + + UniqueFEIRVarTrans CreateTransArrayDimDecr() { + UniqueFEIRVarTrans trans = std::make_unique(FEIRVarTransKind::kFEIRVarTransArrayDimDecr, varSrc); + return trans; + } + + UniqueFEIRVarTrans CreateTransArrayDimIncr() { + UniqueFEIRVarTrans trans = std::make_unique(FEIRVarTransKind::kFEIRVarTransArrayDimIncr, varSrc); + return trans; + } + + const UniqueFEIRVar &GetVar() const { + return varSrc; + } + + std::unique_ptr GetFieldType() const { + return fieldType->Clone(); + } + + protected: + std::unique_ptr CloneImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + std::vector GetVarUsesImpl() const override; + PrimType GetPrimTypeImpl() const override; + void SetTypeImpl(std::unique_ptr argType) override; + FEIRType *GetTypeImpl() const override; + const FEIRType &GetTypeRefImpl() const override; + + FieldID GetFieldIDImpl() const override { + return fieldID; + } + + void SetFieldTypeImpl(std::unique_ptr type) override { + fieldType = std::move(type); + } + + void SetFieldIDImpl(FieldID argFieldID) override { + fieldID = argFieldID; + } + + uint32 HashImpl() const override { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + (static_cast(fieldID) << kOtherShift) + varSrc->Hash(); + } + + private: + std::unique_ptr varSrc; + FieldID fieldID = 0; + std::unique_ptr fieldType; +}; + +// ---------- FEIRExprRegRead ---------- +class FEIRExprRegRead : public FEIRExpr { + public: + FEIRExprRegRead(PrimType pty, int32 regNumIn); + ~FEIRExprRegRead() = default; + + protected: + std::unique_ptr CloneImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + PrimType prmType; + int32 regNum; +}; + +// ---------- FEIRExprAddrofConstArray ---------- +class FEIRExprAddrofConstArray : public FEIRExpr { + public: + FEIRExprAddrofConstArray(const std::vector &arrayIn, MIRType *typeIn, const std::string &strIn); + FEIRExprAddrofConstArray(const std::string &arrayNameIn, const std::vector &arrayIn, MIRType *typeIn, + const std::string &strIn) + : FEIRExpr(FEIRNodeKind::kExprAddrof, + FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), + arrayName(arrayNameIn), + elemType(typeIn), + str(strIn) { + std::copy(arrayIn.begin(), arrayIn.end(), std::back_inserter(array)); + } + + ~FEIRExprAddrofConstArray() = default; + + uint32 GetStringLiteralSize() const { + return static_cast(array.size()); + } + + const MIRType *GetElemType() const { + return elemType; + } + + protected: + std::unique_ptr CloneImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + uint32 HashImpl() const override { + uint32 hash = (static_cast(kind) << kOpHashShift); + for (uint32 elem : array) { + hash += static_cast(std::hash{}(elem)); + } + std::size_t seed = array.size(); + for (uint32 elem : array) { + seed ^= elem + kRandomNum + (seed << kSeedLeftShift) + (seed >> kSeedRightShift); + } + hash += static_cast(seed); + if (elemType != nullptr) { + hash += static_cast(elemType->GetHashIndex()); + } + return hash; + } + + private: + std::string arrayName; + std::vector array; + MIRType *elemType = nullptr; + std::string str; +}; + +// ---------- FEIRExprAddrOfLabel ---------- +class FEIRExprAddrOfLabel : public FEIRExpr { + public: + FEIRExprAddrOfLabel(const std::string &lbName, UniqueFEIRType exprType) + : FEIRExpr(FEIRNodeKind::kExprAddrofLabel, std::move(exprType)), labelName(lbName) {} + ~FEIRExprAddrOfLabel() = default; + + protected: + std::unique_ptr CloneImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + uint32 HashImpl() const override { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + static_cast(std::hash{}(labelName)); + } + + private: + std::string labelName; +}; + +// ---------- FEIRExprAddrofVar ---------- +class FEIRExprAddrofVar : public FEIRExpr { + public: + explicit FEIRExprAddrofVar(std::unique_ptr argVarSrc) + : FEIRExpr(FEIRNodeKind::kExprAddrofVar, + FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), + varSrc(std::move(argVarSrc)) {} + + FEIRExprAddrofVar(std::unique_ptr argVarSrc, FieldID id) + : FEIRExpr(FEIRNodeKind::kExprAddrofVar, + FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), + varSrc(std::move(argVarSrc)), fieldID(id) {} + ~FEIRExprAddrofVar() = default; + + void SetVarValue(MIRConst *val) { + cst = val; + } + + MIRConst *GetVarValue() const { + return cst; + } + + protected: + std::unique_ptr CloneImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + std::vector GetVarUsesImpl() const override; + + FieldID GetFieldIDImpl() const override { + return fieldID; + } + + void SetFieldIDImpl(FieldID id) override { + fieldID = id; + } + + uint32 HashImpl() const override { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + (static_cast(fieldID) << kOtherShift) + varSrc->Hash(); + } + + private: + std::unique_ptr varSrc; + FieldID fieldID = 0; + MIRConst *cst = nullptr; +}; + +// ---------- FEIRExprIAddrof ---------- +class FEIRExprIAddrof : public FEIRExpr { + public: + FEIRExprIAddrof(UniqueFEIRType pointeeType, FieldID id, UniqueFEIRExpr expr) + : FEIRExpr(FEIRNodeKind::kExprIAddrof, + FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), + ptrType(std::move(pointeeType)), + fieldID(id), + subExpr(std::move(expr)) {} + ~FEIRExprIAddrof() = default; + + UniqueFEIRType GetClonedRetType() const { + return type->Clone(); + } + + UniqueFEIRExpr GetClonedOpnd() const { + return subExpr->Clone(); + } + + UniqueFEIRType GetClonedPtrType() const { + return ptrType->Clone(); + } + + protected: + std::unique_ptr CloneImpl() const override; + std::vector GetVarUsesImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + FieldID GetFieldIDImpl() const override { + return fieldID; + } + + void SetFieldIDImpl(FieldID argFieldID) override { + fieldID = argFieldID; + } + + uint32 HashImpl() const override { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + (static_cast(fieldID) << kOtherShift) + ptrType->Hash() + subExpr->Hash(); + } + + private: + UniqueFEIRType ptrType; + FieldID fieldID = 0; + UniqueFEIRExpr subExpr; +}; + +// ---------- FEIRExprAddrofFunc ---------- +class FEIRExprAddrofFunc : public FEIRExpr { + public: + explicit FEIRExprAddrofFunc(const std::string &addr) + : FEIRExpr(FEIRNodeKind::kExprAddrofFunc, + FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), + funcAddr(addr) {} + ~FEIRExprAddrofFunc() = default; + + const std::string &GetFuncAddr() const { + return funcAddr; + } + + protected: + std::unique_ptr CloneImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + uint32 HashImpl() const override { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + static_cast(std::hash{}(funcAddr)); + } + + private: + std::string funcAddr; +}; + +// ---------- FEIRExprAddrofArray ---------- +class FEIRExprAddrofArray : public FEIRExpr { + public: + FEIRExprAddrofArray(UniqueFEIRType argTypeNativeArray, UniqueFEIRExpr argExprArray, const std::string &argArrayName, + std::list &argExprIndexs); + ~FEIRExprAddrofArray() = default; + + void SetIndexsExprs(std::list &exprs) { + exprIndexs.clear(); + for (const auto &e : exprs) { + auto ue = e->Clone(); + exprIndexs.push_back(std::move(ue)); + } + } + + const UniqueFEIRType &GetTypeArray() const { + return typeNativeArray; + } + + const std::list &GetExprIndexs() const { + return exprIndexs; + } + + const UniqueFEIRExpr &GetExprArray() const { + return exprArray; + } + + protected: + std::unique_ptr CloneImpl() const override; + std::vector GetVarUsesImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + uint32 HashImpl() const override { + uint32 hash = (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + (static_cast(std::hash{}(arrayName)) << kOtherShift); + if (typeNativeArray != nullptr) { + hash += typeNativeArray->Hash(); + } + if (exprArray != nullptr) { + hash += exprArray->Hash(); + } + std::size_t seed = exprIndexs.size(); + for (const auto &idx : exprIndexs) { + if (idx != nullptr) { + seed ^= idx->Hash() + kRandomNum + (seed << kSeedLeftShift) + (seed >> kSeedRightShift); + } + } + return hash + static_cast(seed); + } + + private: + mutable std::list exprIndexs; + UniqueFEIRType typeNativeArray = nullptr; + UniqueFEIRExpr exprArray = nullptr; + std::string arrayName; +}; + +// ---------- FEIRExprUnary ---------- +class FEIRExprUnary : public FEIRExpr { + public: + FEIRExprUnary(Opcode argOp, std::unique_ptr argOpnd); + FEIRExprUnary(std::unique_ptr argType, Opcode argOp, std::unique_ptr argOpnd); + ~FEIRExprUnary() = default; + void SetOpnd(std::unique_ptr argOpnd); + const UniqueFEIRExpr &GetOpnd() const; + static std::map InitMapOpNestableForExprUnary(); + + Opcode GetOp() const { + return op; + } + + protected: + std::unique_ptr CloneImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + std::vector GetVarUsesImpl() const override; + + uint32 HashImpl() const override { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + (static_cast(op) << kOtherShift) + opnd->Hash(); + } + + Opcode op; + std::unique_ptr opnd; + + private: + void SetExprTypeByOp(); + + static std::map mapOpNestable; +}; // class FEIRExprUnary + +// ---------- FEIRExprTypeCvt ---------- +class FEIRExprTypeCvt : public FEIRExprUnary { + public: + FEIRExprTypeCvt(Opcode argOp, std::unique_ptr argOpnd); + FEIRExprTypeCvt(std::unique_ptr exprType, Opcode argOp, std::unique_ptr argOpnd); + ~FEIRExprTypeCvt() = default; + static std::map InitMapOpNestableForTypeCvt(); + static Opcode ChooseOpcodeByFromVarAndToVar(const FEIRVar &fromVar, const FEIRVar &toVar); + + void SetSrcPrimType(PrimType pty) { + srcPrimType = pty; + } + + protected: + std::unique_ptr CloneImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + uint32 HashImpl() const override { + return FEIRExprUnary::HashImpl() + static_cast(srcPrimType); + } + + private: + using FuncPtrGenMIRNode = BaseNode* (FEIRExprTypeCvt::*)(MIRBuilder &mirBuilder) const; + static std::map InitFuncPtrMapForParseExpr(); + + // MIR: op () + BaseNode *GenMIRNodeMode1(MIRBuilder &mirBuilder) const; + + // MIR: op () + BaseNode *GenMIRNodeMode2(MIRBuilder &mirBuilder) const; + + // MIR: retype () + BaseNode *GenMIRNodeMode3(MIRBuilder &mirBuilder) const; + + static std::map mapOpNestable; + static std::map funcPtrMapForParseExpr; + PrimType srcPrimType = PTY_unknown; +}; // FEIRExprTypeCvt + +// ---------- FEIRExprExtractBits ---------- +class FEIRExprExtractBits : public FEIRExprUnary { + public: + FEIRExprExtractBits(Opcode argOp, PrimType argPrimType, uint8 argBitOffset, uint8 argBitSize, + std::unique_ptr argOpnd); + FEIRExprExtractBits(Opcode argOp, PrimType argPrimType, std::unique_ptr argOpnd); + ~FEIRExprExtractBits() = default; + static std::map InitMapOpNestableForExtractBits(); + void SetBitOffset(uint8 offset) { + bitOffset = offset; + } + + void SetBitSize(uint8 size) { + bitSize = size; + } + + protected: + std::unique_ptr CloneImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + uint32 HashImpl() const override { + return FEIRExprUnary::HashImpl() + (static_cast(bitOffset) << kOtherShift) + static_cast(bitSize); + } + + private: + using FuncPtrGenMIRNode = BaseNode* (FEIRExprExtractBits::*)(MIRBuilder &mirBuilder) const; + static std::map InitFuncPtrMapForParseExpr(); + BaseNode *GenMIRNodeForExtrabits(MIRBuilder &mirBuilder) const; + BaseNode *GenMIRNodeForExt(MIRBuilder &mirBuilder) const; + + uint8 bitOffset; + uint8 bitSize; + static std::map mapOpNestable; + static std::map funcPtrMapForParseExpr; +}; // FEIRExprExtractBit + +// ---------- FEIRExprIRead ---------- +class FEIRExprIRead : public FEIRExpr { + public: + FEIRExprIRead(UniqueFEIRType returnType, UniqueFEIRType pointeeType, FieldID id, UniqueFEIRExpr expr) + : FEIRExpr(FEIRNodeKind::kExprIRead, std::move(returnType)), + ptrType(std::move(pointeeType)), + fieldID(id), + subExpr(std::move(expr)) {} + ~FEIRExprIRead() override = default; + + UniqueFEIRType GetClonedRetType() const { + return type->Clone(); + } + + void SetClonedOpnd(UniqueFEIRExpr argOpnd) { + CHECK_FATAL(argOpnd != nullptr, "opnd is nullptr"); + subExpr = std::move(argOpnd); + } + + UniqueFEIRExpr GetClonedOpnd() const { + return subExpr->Clone(); + } + + UniqueFEIRType GetClonedPtrType() const { + return ptrType->Clone(); + } + + protected: + std::unique_ptr CloneImpl() const override; + std::vector GetVarUsesImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + FieldID GetFieldIDImpl() const override { + return fieldID; + } + + void SetFieldIDImpl(FieldID argFieldID) override { + fieldID = argFieldID; + } + + void SetFieldTypeImpl(UniqueFEIRType argFieldType) override { + type = std::move(argFieldType); + } + + uint32 HashImpl() const override { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + (static_cast(fieldID) << kOtherShift) + ptrType->Hash() + subExpr->Hash(); + } + + private: + UniqueFEIRType ptrType = nullptr; + FieldID fieldID = 0; + UniqueFEIRExpr subExpr = nullptr; +}; + +// ---------- FEIRExprBinary ---------- +class FEIRExprBinary : public FEIRExpr { + public: + FEIRExprBinary(Opcode argOp, std::unique_ptr argOpnd0, std::unique_ptr argOpnd1); + FEIRExprBinary(std::unique_ptr exprType, Opcode argOp, std::unique_ptr argOpnd0, + std::unique_ptr argOpnd1); + ~FEIRExprBinary() = default; + void SetOpnd0(std::unique_ptr argOpnd); + void SetOpnd1(std::unique_ptr argOpnd); + const std::unique_ptr &GetOpnd0() const; + const std::unique_ptr &GetOpnd1() const; + bool IsComparative() const; + + Opcode GetOp() const { + return op; + } + + protected: + std::unique_ptr CloneImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + std::vector GetVarUsesImpl() const override; + bool IsNestableImpl() const override; + bool IsAddrofImpl() const override; + + uint32 HashImpl() const override { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + (static_cast(op) << kOtherShift) + (opnd0->Hash() << kTypeHashShift) + opnd1->Hash(); + } + + private: + using FuncPtrGenMIRNode = BaseNode* (FEIRExprBinary::*)(MIRBuilder &mirBuilder) const; + static std::map InitFuncPtrMapForGenMIRNode(); + BaseNode *GenMIRNodeNormal(MIRBuilder &mirBuilder) const; + BaseNode *GenMIRNodeCompare(MIRBuilder &mirBuilder) const; + BaseNode *GenMIRNodeCompareU1(MIRBuilder &mirBuilder) const; + void SetExprTypeByOp(); + void SetExprTypeByOpNormal(); + void SetExprTypeByOpShift(); + void SetExprTypeByOpLogic(); + void SetExprTypeByOpCompare(); + + Opcode op; + std::unique_ptr opnd0; + std::unique_ptr opnd1; + static std::map funcPtrMapForGenMIRNode; +}; // class FEIRExprUnary + +// ---------- FEIRExprTernary ---------- +class FEIRExprTernary : public FEIRExpr { + public: + FEIRExprTernary(Opcode argOp, std::unique_ptr argOpnd0, std::unique_ptr argOpnd1, + std::unique_ptr argOpnd2); + FEIRExprTernary(Opcode argOp, std::unique_ptr argType, std::unique_ptr argOpnd0, + std::unique_ptr argOpnd1, std::unique_ptr argOpnd2); + ~FEIRExprTernary() = default; + void SetOpnd(std::unique_ptr argOpnd, uint32 idx); + + protected: + std::unique_ptr CloneImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + std::vector GetVarUsesImpl() const override; + bool IsNestableImpl() const override; + bool IsAddrofImpl() const override; + + uint32 HashImpl() const override { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + (static_cast(op) << kOtherShift) + + (opnd0->Hash() << kTypeHashShift) + (opnd1->Hash() << kOtherShift) + opnd2->Hash(); + } + + private: + void SetExprTypeByOp(); + + Opcode op; + std::unique_ptr opnd0; + std::unique_ptr opnd1; + std::unique_ptr opnd2; +}; + +// ---------- FEIRExprNary ---------- +class FEIRExprNary : public FEIRExpr { + public: + explicit FEIRExprNary(Opcode argOp); + virtual ~FEIRExprNary() = default; + void AddOpnd(std::unique_ptr argOpnd); + void AddOpnds(const std::vector> &argOpnds); + void ResetOpnd(); + const std::vector> &GetOpnds() const { + return opnds; + } + + protected: + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::vector GetVarUsesImpl() const override; + + uint32 HashImpl() const override { + uint32 hash = (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + (static_cast(op) << kOtherShift); + std::size_t seed = opnds.size(); + for (auto &opnd : opnds) { + seed ^= opnd->Hash() + kRandomNum + (seed << kSeedLeftShift) + (seed >> kSeedRightShift); + } + hash += static_cast(seed); + return hash; + } + + Opcode op; + std::vector> opnds; +}; // class FEIRExprNary + +// ---------- FEIRExprArray ---------- +class FEIRExprArray : public FEIRExprNary { + public: + FEIRExprArray(Opcode argOp, std::unique_ptr argArray, std::unique_ptr argIndex); + ~FEIRExprArray() = default; + void SetOpndArray(std::unique_ptr opndArray); + void SetOpndIndex(std::unique_ptr opndIndex); + + protected: + std::unique_ptr CloneImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + bool IsNestableImpl() const override; + bool IsAddrofImpl() const override; +}; // class FEIRExprArray + +// ---------- FEIRExprIntrinsicop ---------- +class FEIRExprIntrinsicop : public FEIRExprNary { + public: + FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID); + FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, + std::unique_ptr argParamType); + FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, + const std::vector> &argOpnds); + FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, + std::unique_ptr argParamType, uint32 argTypeID); + FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, + std::unique_ptr argParamType, + const std::vector> &argOpnds); + ~FEIRExprIntrinsicop() = default; + + protected: + std::unique_ptr CloneImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + bool IsNestableImpl() const override; + bool IsAddrofImpl() const override; + + uint32 HashImpl() const override { + uint32 hash = FEIRExprNary::HashImpl() + (static_cast(intrinsicID) << 16) + typeID; + if (paramType != nullptr) { + hash += paramType->Hash(); + } + return hash; + } + + private: + MIRIntrinsicID intrinsicID; + std::unique_ptr paramType; + uint32 typeID = UINT32_MAX; +}; // class FEIRExprIntrinsicop + +class FEIRExprIntrinsicopForC : public FEIRExprNary { + public: + FEIRExprIntrinsicopForC(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, + const std::vector> &argOpnds); + ~FEIRExprIntrinsicopForC() = default; + + MIRIntrinsicID GetIntrinsicID() const { + return intrinsicID; + } + + protected: + std::unique_ptr CloneImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + uint32 HashImpl() const override { + return FEIRExprNary::HashImpl() + static_cast(intrinsicID); + } + + private: + MIRIntrinsicID intrinsicID; +}; // class FEIRExprIntrinsicopForC + +class FEIRExprJavaMerge : public FEIRExprNary { + public: + FEIRExprJavaMerge(std::unique_ptr mergedTypeArg, const std::vector> &argOpnds); + ~FEIRExprJavaMerge() = default; + + protected: + std::unique_ptr CloneImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; +}; + +// ---------- FEIRExprJavaNewInstance ---------- +class FEIRExprJavaNewInstance : public FEIRExpr { + public: + explicit FEIRExprJavaNewInstance(UniqueFEIRType argType); + FEIRExprJavaNewInstance(UniqueFEIRType argType, uint32 argTypeID); + FEIRExprJavaNewInstance(UniqueFEIRType argType, uint32 argTypeID, bool argIsRcPermanent); + ~FEIRExprJavaNewInstance() = default; + + protected: + std::unique_ptr CloneImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + uint32 typeID = UINT32_MAX; + // isRcPermanent is true means the rc annotation @Permanent is used + bool isRcPermanent = false; +}; + +// ---------- FEIRExprJavaNewArray ---------- +class FEIRExprJavaNewArray : public FEIRExpr { + public: + FEIRExprJavaNewArray(UniqueFEIRType argArrayType, UniqueFEIRExpr argExprSize); + FEIRExprJavaNewArray(UniqueFEIRType argArrayType, UniqueFEIRExpr argExprSize, uint32 argTypeID); + FEIRExprJavaNewArray(UniqueFEIRType argArrayType, UniqueFEIRExpr argExprSize, uint32 argTypeID, + bool argIsRcPermanent); + ~FEIRExprJavaNewArray() = default; + void SetArrayType(UniqueFEIRType argArrayType) { + CHECK_NULL_FATAL(argArrayType); + arrayType = std::move(argArrayType); + } + + void SetExprSize(UniqueFEIRExpr argExprSize) { + CHECK_NULL_FATAL(argExprSize); + exprSize = std::move(argExprSize); + } + + protected: + std::unique_ptr CloneImpl() const override; + std::vector GetVarUsesImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + private: + UniqueFEIRType arrayType; + UniqueFEIRExpr exprSize; + uint32 typeID = UINT32_MAX; + // isRcPermanent is true means the rc annotation @Permanent is used + bool isRcPermanent = false; +}; + +// ---------- FEIRExprJavaArrayLength ---------- +class FEIRExprJavaArrayLength : public FEIRExpr { + public: + explicit FEIRExprJavaArrayLength(UniqueFEIRExpr argExprArray); + ~FEIRExprJavaArrayLength() = default; + void SetExprArray(UniqueFEIRExpr argExprArray) { + CHECK_NULL_FATAL(argExprArray); + exprArray = std::move(argExprArray); + } + + protected: + std::unique_ptr CloneImpl() const override; + std::vector GetVarUsesImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + private: + UniqueFEIRExpr exprArray; +}; + +// ---------- FEIRExprArrayLoad ---------- +class FEIRExprArrayLoad : public FEIRExpr { + public: + FEIRExprArrayLoad(UniqueFEIRExpr argExprArray, UniqueFEIRExpr argExprIndex, UniqueFEIRType argTypeArray); + ~FEIRExprArrayLoad() = default; + const UniqueFEIRType GetElemType() const { + UniqueFEIRType typeElem = typeArray->Clone(); + (void)typeElem->ArrayDecrDim(); + return typeElem; + } + + UniqueFEIRVarTrans CreateTransArrayDimDecr() { + FEIRExprDRead *dRead = static_cast(exprArray.get()); + return dRead->CreateTransArrayDimDecr(); + } + + void SetTrans(UniqueFEIRVarTrans argTrans) { + CHECK_FATAL(argTrans->GetTransKind() == kFEIRVarTransArrayDimIncr, "ArrayLoad must hold DimIncr Transfer Function"); + FEIRExprDRead *dRead = static_cast(exprArray.get()); + dRead->SetTrans(std::move(argTrans)); + } + + protected: + std::unique_ptr CloneImpl() const override; + std::vector GetVarUsesImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + private: + UniqueFEIRExpr exprArray; + UniqueFEIRExpr exprIndex; + UniqueFEIRType typeArray; +}; + +class FEIRExprCStyleCast : public FEIRExpr { + public: + FEIRExprCStyleCast(MIRType *src, MIRType *dest, UniqueFEIRExpr sub, bool isArr2Pty); + ~FEIRExprCStyleCast() = default; + void SetArray2Pointer(bool isArr2Ptr) { + isArray2Pointer = isArr2Ptr; + } + + void SetRefName(const std::string &name) { + refName = name; + } + + const UniqueFEIRExpr &GetSubExpr() const { + return subExpr; + } + + MIRType *GetMIRType() const { + CHECK_NULL_FATAL(destType); + return destType; + } + + protected: + std::unique_ptr CloneImpl() const override; + std::vector GetVarUsesImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + private: + MIRType *srcType = nullptr; + MIRType *destType = nullptr; + UniqueFEIRExpr subExpr; + bool isArray2Pointer; + std::string refName; +}; + +enum ASTAtomicOp { + kAtomicOpUndefined, + kAtomicOpLoad, + kAtomicOpLoadN, + kAtomicOpStore, + kAtomicOpStoreN, + kAtomicOpExchange, + kAtomicOpExchangeN, + kAtomicOpAddFetch, + kAtomicOpSubFetch, + kAtomicOpAndFetch, + kAtomicOpXorFetch, + kAtomicOpOrFetch, + kAtomicOpNandFetch, + kAtomicOpFetchAdd, + kAtomicOpFetchSub, + kAtomicOpFetchAnd, + kAtomicOpFetchXor, + kAtomicOpFetchOr, + kAtomicOpFetchNand, +}; + +class FEIRExprAtomic : public FEIRExpr { + public: + FEIRExprAtomic(MIRType *ty, MIRType *ref, UniqueFEIRExpr obj, ASTAtomicOp atomOp); + ~FEIRExprAtomic() = default; + + void SetVal1Type(MIRType *ty) { + val1Type = ty; + } + + void SetVal1Expr(UniqueFEIRExpr expr) { + valExpr1 = std::move(expr); + } + + void SetVal2Type(MIRType *ty) { + val2Type = ty; + } + + void SetVal2Expr(UniqueFEIRExpr expr) { + valExpr2 = std::move(expr); + } + + void SetOrderExpr(UniqueFEIRExpr order) { + orderExpr = std::move(order); + } + + void SetValVar(UniqueFEIRVar value) { + val = std::move(value); + } + + protected: + std::unique_ptr CloneImpl() const override; + BaseNode *GenMIRNodeImpl(MIRBuilder &mirBuilder) const override; + + private: + MIRType *mirType = nullptr; + MIRType *refType = nullptr; + MIRType *ptrType = nullptr; + MIRType *val1Type = nullptr; + MIRType *val2Type = nullptr; + UniqueFEIRExpr objExpr; + UniqueFEIRExpr valExpr1; + UniqueFEIRExpr valExpr2; + UniqueFEIRExpr orderExpr; + ASTAtomicOp atomicOp; + UniqueFEIRVar val; +}; + +// ---------- FEIRStmtNary ---------- +class FEIRStmtNary : public FEIRStmt { + public: + FEIRStmtNary(Opcode opIn, std::list> argExprsIn); + virtual ~FEIRStmtNary() = default; + + void SetOP(Opcode opIn) { + op = opIn; + } + + Opcode GetOP() const { + return op; + } + + const std::list> &GetArgExprs() const { + return argExprs; + } + + protected: + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + Opcode op; + std::list> argExprs; +}; + +// ---------- FEIRStmtAssign ---------- +class FEIRStmtAssign : public FEIRStmt { + public: + FEIRStmtAssign(FEIRNodeKind argKind, std::unique_ptr argVar); + ~FEIRStmtAssign() = default; + FEIRVar *GetVar() const { + return var.get(); + } + + void SetVar(std::unique_ptr argVar) { + var = std::move(argVar); + var->SetDef(HasDef()); + } + + void AddExprArg(UniqueFEIRExpr exprArg) { + exprArgs.push_back(std::move(exprArg)); + } + + void AddExprArgReverse(UniqueFEIRExpr exprArg) { + exprArgs.push_front(std::move(exprArg)); + } + + bool HasException() const { + return hasException; + } + + void SetHasException(bool arg) { + hasException = arg; + } + + protected: + bool HasDefImpl() const override { + return ((var != nullptr) && (var.get() != nullptr)); + } + + FEIRVarTypeScatter* GetTypeScatterDefVarImpl() const override { + if (!HasDefImpl()) { + return nullptr; + } + if (var->GetKind() == kFEIRVarTypeScatter) { + FEIRVarTypeScatter *varTypeScatter = static_cast(var.get()); + return varTypeScatter; + } + return nullptr; + } + + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool hasException; + std::unique_ptr var = nullptr; + std::list exprArgs; +}; + +// ---------- FEIRStmtDAssign ---------- +class FEIRStmtDAssign : public FEIRStmtAssign { + public: + FEIRStmtDAssign(std::unique_ptr argVar, std::unique_ptr argExpr, int32 argFieldID = 0); + ~FEIRStmtDAssign() = default; + FEIRExpr *GetExpr() const { + return expr.get(); + } + + void SetExpr(std::unique_ptr argExpr) { + expr = std::move(argExpr); + } + + protected: + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + void InitTrans4AllVarsImpl() override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + void InsertNonnullChecking(MIRBuilder &mirBuilder, const MIRSymbol &dstSym, std::list &ans) const; + void AssignBoundaryVarAndChecking(MIRBuilder &mirBuilder, std::list &ans) const; + void CheckNonnullArgsAndRetForFuncPtr(const MIRBuilder &mirBuilder) const; + void CheckBoundaryArgsAndRetForFuncPtr(const MIRBuilder &mirBuilder) const; + + std::unique_ptr expr; + FieldID fieldID; +}; + +// ---------- FEIRStmtIAssign ---------- +class FEIRStmtIAssign : public FEIRStmt { + public: + FEIRStmtIAssign(UniqueFEIRType argAddrType, UniqueFEIRExpr argAddrExpr, UniqueFEIRExpr argBaseExpr, FieldID id) + : FEIRStmt(FEIRNodeKind::kStmtIAssign), + addrType(std::move(argAddrType)), + addrExpr(std::move(argAddrExpr)), + baseExpr(std::move(argBaseExpr)), + fieldID(id) {} + ~FEIRStmtIAssign() = default; + + protected: + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + void InsertNonnullChecking(MIRBuilder &mirBuilder, const MIRType &baseType, std::list &ans) const; + void CheckNonnullArgsAndRetForFuncPtr(const MIRBuilder &mirBuilder, const MIRType &baseType) const; + void CheckBoundaryArgsAndRetForFuncPtr(const MIRBuilder &mirBuilder, const MIRType &baseType) const; + void AssignBoundaryVarAndChecking(MIRBuilder &mirBuilder, std::list &ans) const; + + UniqueFEIRType addrType; + UniqueFEIRExpr addrExpr; + UniqueFEIRExpr baseExpr; + FieldID fieldID; +}; + +// ---------- FEIRStmtJavaTypeCheck ---------- +class FEIRStmtJavaTypeCheck : public FEIRStmtAssign { + public: + enum CheckKind { + kCheckCast, + kInstanceOf + }; + + FEIRStmtJavaTypeCheck(std::unique_ptr argVar, std::unique_ptr argExpr, + std::unique_ptr argType, CheckKind argCheckKind); + FEIRStmtJavaTypeCheck(std::unique_ptr argVar, std::unique_ptr argExpr, + std::unique_ptr argType, CheckKind argCheckKind, uint32 argTypeID); + ~FEIRStmtJavaTypeCheck() = default; + + protected: + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + CheckKind checkKind; + std::unique_ptr expr; + std::unique_ptr type; + uint32 typeID = UINT32_MAX; +}; + +// ---------- FEIRStmtJavaConstClass ---------- +class FEIRStmtJavaConstClass : public FEIRStmtAssign { + public: + FEIRStmtJavaConstClass(std::unique_ptr argVar, std::unique_ptr argType); + ~FEIRStmtJavaConstClass() = default; + + protected: + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + std::unique_ptr type; +}; + +// ---------- FEIRStmtJavaConstString ---------- +class FEIRStmtJavaConstString : public FEIRStmtAssign { + public: + FEIRStmtJavaConstString(std::unique_ptr argVar, const std::string &argStrVal, + uint32 argFileIdx, uint32 argStringID); + ~FEIRStmtJavaConstString() = default; + + protected: + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + std::string strVal; + uint32 fileIdx; + uint32 stringID = UINT32_MAX; +}; + +// ---------- FEIRStmtJavaFillArrayData ---------- +class FEIRStmtJavaFillArrayData : public FEIRStmtAssign { + public: + FEIRStmtJavaFillArrayData(std::unique_ptr arrayExprIn, const int8 *arrayDataIn, + uint32 sizeIn, const std::string &arrayNameIn); + ~FEIRStmtJavaFillArrayData() = default; + + protected: + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + LLT_PRIVATE: + PrimType ProcessArrayElemPrimType() const; + MIRSymbol *ProcessArrayElemData(const MIRBuilder &mirBuilder, PrimType elemPrimType) const; + MIRAggConst *FillArrayElem(const MIRBuilder &mirBuilder, PrimType elemPrimType, MIRType &arrayTypeWithSize, + uint32 elemSize) const; + + std::unique_ptr arrayExpr; + const int8 *arrayData = nullptr; + uint32 size = 0; + const std::string arrayName; +}; + +// ---------- FEIRStmtJavaMultiANewArray ---------- +class FEIRStmtJavaMultiANewArray : public FEIRStmtAssign { + public: + FEIRStmtJavaMultiANewArray(std::unique_ptr argVar, std::unique_ptr argElemType, + std::unique_ptr argArrayType); + ~FEIRStmtJavaMultiANewArray() = default; + void AddVarSize(std::unique_ptr argVarSize); + void AddVarSizeRev(std::unique_ptr argVarSize); + void SetArrayType(std::unique_ptr argArrayType) { + arrayType = std::move(argArrayType); + } + + protected: + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + static const UniqueFEIRVar &GetVarSize(); + static const UniqueFEIRVar &GetVarClass(); + static const UniqueFEIRType &GetTypeAnnotation(); + static FEStructMethodInfo &GetMethodInfoNewInstance(); + + std::unique_ptr elemType; + std::unique_ptr arrayType; + std::list> exprSizes; + static UniqueFEIRVar varSize; + static UniqueFEIRVar varClass; + static UniqueFEIRType typeAnnotation; + static FEStructMethodInfo *methodInfoNewInstance; +}; + +// ---------- FEIRStmtUseOnly ---------- +class FEIRStmtUseOnly : public FEIRStmt { + public: + FEIRStmtUseOnly(FEIRNodeKind argKind, Opcode argOp, std::unique_ptr argExpr); + FEIRStmtUseOnly(Opcode argOp, std::unique_ptr argExpr); + virtual ~FEIRStmtUseOnly() = default; + + const std::unique_ptr &GetExpr() const { + return expr; + } + + protected: + bool IsFallThroughImpl() const override { + if ((op == OP_return) || (op == OP_throw)) { + return false; + } + return true; + } + + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + bool SkipNonnullChecking(MIRBuilder &mirBuilder) const; + + Opcode op; + std::unique_ptr expr; +}; + +// ---------- FEIRStmtSafetyCallAssert ---------- +class FEIRStmtSafetyCallAssert { + public: + FEIRStmtSafetyCallAssert(const std::string &funcName, size_t paramIndex) + : funcNameIdx(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName)), paramIndex(paramIndex) {} + + virtual ~FEIRStmtSafetyCallAssert() = default; + + const std::string &GetFuncName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(funcNameIdx); + } + + GStrIdx GetFuncNameIdx() const { + return funcNameIdx; + } + + size_t GetParamIndex() const { + return paramIndex; + } + + private: + GStrIdx funcNameIdx; + size_t paramIndex; +}; + +// ---------- FEIRStmtAssertNonnull ---------- +class FEIRStmtAssertNonnull : public FEIRStmtUseOnly { + public: + FEIRStmtAssertNonnull(Opcode argOp, std::unique_ptr argExpr) + : FEIRStmtUseOnly(argOp, std::move(argExpr)) {} + + ~FEIRStmtAssertNonnull() = default; + + protected: + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; +}; + +// ---------- FEIRStmtCallAssertNonnull ---------- +class FEIRStmtCallAssertNonnull : public FEIRStmtUseOnly, public FEIRStmtSafetyCallAssert { + public: + FEIRStmtCallAssertNonnull(Opcode argOp, std::unique_ptr argExpr, const std::string &funcName, + size_t paramIndex) + : FEIRStmtUseOnly(argOp, std::move(argExpr)), FEIRStmtSafetyCallAssert(funcName, paramIndex) {} + + ~FEIRStmtCallAssertNonnull() = default; + + protected: + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; +}; + +// ---------- FEIRStmtCallAssertBoundary ---------- +class FEIRStmtCallAssertBoundary : public FEIRStmtNary, public FEIRStmtSafetyCallAssert { + public: + FEIRStmtCallAssertBoundary(Opcode opIn, std::list> argExprsIn, const std::string &funcName, + size_t paramIndex) + : FEIRStmtNary(opIn, std::move(argExprsIn)), FEIRStmtSafetyCallAssert(funcName, paramIndex) {} + + ~FEIRStmtCallAssertBoundary() = default; + + protected: + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; +}; + +// ---------- FEIRStmtAssertBoundary ---------- +class FEIRStmtAssertBoundary : public FEIRStmtNary { + public: + FEIRStmtAssertBoundary(Opcode opIn, + std::list> argExprsIn) : FEIRStmtNary(opIn, std::move(argExprsIn)) {} + ~FEIRStmtAssertBoundary() = default; + + void SetIsComputable(bool flag) { + isComputable = flag; + } + + bool IsComputable() const { + return isComputable; + } + + protected: + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + bool isComputable = false; +}; + +// ---------- FEIRStmtReturn ---------- +class FEIRStmtReturn : public FEIRStmtUseOnly { + public: + explicit FEIRStmtReturn(std::unique_ptr argExpr); + ~FEIRStmtReturn() = default; + + protected: + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + void InsertNonnullChecking(MIRBuilder &mirBuilder, std::list &ans) const; +}; + +// ---------- FEIRStmtPesudoLabel ---------- +class FEIRStmtPesudoLabel : public FEIRStmt { + public: + explicit FEIRStmtPesudoLabel(uint32 argLabelIdx); + ~FEIRStmtPesudoLabel() = default; + void GenerateLabelIdx(const MIRBuilder &mirBuilder); + + uint32 GetLabelIdx() const { + return labelIdx; + } + + LabelIdx GetMIRLabelIdx() const { + return mirLabelIdx; + } + + protected: + bool IsTargetImpl() const override { + return true; + } + + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + uint32 labelIdx; + LabelIdx mirLabelIdx; +}; + +class FEIRStmtPesudoLabel2 : public FEIRStmt { + public: + FEIRStmtPesudoLabel2(uint32 qIdx0, uint32 qIdx1) + : FEIRStmt(FEIRNodeKind::kStmtPesudoLabel), labelIdxOuter(qIdx0), labelIdxInner(qIdx1) {} + + ~FEIRStmtPesudoLabel2() = default; + static LabelIdx GenMirLabelIdx(const MIRBuilder &mirBuilder, uint32 qIdx0, uint32 qIdx1); + std::pair GetLabelIdx() const; + uint32 GetPos() const { + return labelIdxInner; + } + + protected: + bool IsTargetImpl() const override { + return true; + } + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + uint32 labelIdxOuter; + uint32 labelIdxInner; +}; + +// ---------- FEIRStmtGoto ---------- +class FEIRStmtGoto : public FEIRStmt { + public: + explicit FEIRStmtGoto(uint32 argLabelIdx); + ~FEIRStmtGoto() override; + void SetLabelIdx(uint32 argLabelIdx) { + labelIdx = argLabelIdx; + } + + uint32 GetLabelIdx() const { + return labelIdx; + } + + void SetStmtTarget(FEIRStmtPesudoLabel &argStmtTarget) { + stmtTarget = &argStmtTarget; + } + + const FEIRStmtPesudoLabel &GetStmtTargetRef() const { + CHECK_NULL_FATAL(stmtTarget); + return *stmtTarget; + } + + protected: + bool IsFallThroughImpl() const override { + return false; + } + + bool IsBranchImpl() const override { + return true; + } + + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + uint32 labelIdx; + FEIRStmtPesudoLabel *stmtTarget; +}; + +// ---------- FEIRStmtGoto2 ---------- +class FEIRStmtGoto2 : public FEIRStmt { + public: + FEIRStmtGoto2(uint32 qIdx0, uint32 qIdx1); + virtual ~FEIRStmtGoto2() = default; + std::pair GetLabelIdx() const; + uint32 GetTarget() const { + return labelIdxInner; + } + + void SetStmtTarget(FEIRStmtPesudoLabel2 &argStmtTarget) { + stmtTarget = &argStmtTarget; + } + + const FEIRStmtPesudoLabel2 &GetStmtTargetRef() const { + CHECK_NULL_FATAL(stmtTarget); + return *stmtTarget; + } + + protected: + bool IsFallThroughImpl() const override { + return false; + } + + bool IsBranchImpl() const override { + return true; + } + + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + uint32 labelIdxOuter; + uint32 labelIdxInner; + FEIRStmtPesudoLabel2 *stmtTarget = nullptr; +}; + +// ---------- FEIRStmtGoto ---------- +class FEIRStmtGotoForC : public FEIRStmt { + public: + explicit FEIRStmtGotoForC(const std::string &name); + virtual ~FEIRStmtGotoForC() = default; + void SetLabelName(const std::string &name) { + labelName = name; + } + + std::string GetLabelName() const { + return labelName; + } + + void AddVLASvaedStackVars(uint32 scopeID, UniqueFEIRVar vlaSavedStackVar) { + vlaSvaedStackVars.emplace_back(std::make_pair(scopeID, std::move(vlaSavedStackVar))); + } + + protected: + bool IsFallThroughImpl() const override { + return false; + } + + bool IsBranchImpl() const override { + return true; + } + + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + void GenVLACleanup(MIRBuilder &mirBuilder, std::list &ans) const; + + std::string labelName; + std::vector> vlaSvaedStackVars; +}; + +// ---------- FEIRStmtIGoto ---------- +class FEIRStmtIGoto : public FEIRStmt { + public: + explicit FEIRStmtIGoto(UniqueFEIRExpr expr); + virtual ~FEIRStmtIGoto() = default; + + protected: + bool IsFallThroughImpl() const override { + return false; + } + + bool IsBranchImpl() const override { + return true; + } + + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + UniqueFEIRExpr targetExpr; +}; + +// ---------- FEIRStmtCondGotoForC ---------- +class FEIRStmtCondGotoForC : public FEIRStmt { + public: + explicit FEIRStmtCondGotoForC(UniqueFEIRExpr argExpr, Opcode op, const std::string &name) + : FEIRStmt(FEIRNodeKind::kStmtCondGoto), expr(std::move(argExpr)), opCode(op), labelName(name) {} + virtual ~FEIRStmtCondGotoForC() = default; + void SetLabelName(const std::string &name) { + labelName = name; + } + + std::string GetLabelName() const { + return labelName; + } + + const UniqueFEIRExpr &GetConditionExpr() const { + return expr; + } + + void SetCondtionExpr(UniqueFEIRExpr &exprIn) { + expr = std::move(exprIn); + } + + protected: + bool IsBranchImpl() const override { + return true; + } + + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + UniqueFEIRExpr expr; + Opcode opCode; + std::string labelName; +}; + +// ---------- FEIRStmtCondGoto ---------- +class FEIRStmtCondGoto : public FEIRStmtGoto { + public: + FEIRStmtCondGoto(Opcode argOp, uint32 argLabelIdx, UniqueFEIRExpr argExpr); + ~FEIRStmtCondGoto() = default; + void SetOpcode(Opcode argOp) { + op = argOp; + } + + Opcode GetOpcode() const { + return op; + } + + void SetExpr(UniqueFEIRExpr argExpr) { + CHECK_NULL_FATAL(argExpr); + expr = std::move(argExpr); + } + + protected: + bool IsFallThroughImpl() const override { + return true; + } + + bool IsBranchImpl() const override { + return true; + } + + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + Opcode op; + UniqueFEIRExpr expr; +}; + +// ---------- FEIRStmtCondGoto2 ---------- +class FEIRStmtCondGoto2 : public FEIRStmtGoto2 { + public: + FEIRStmtCondGoto2(Opcode argOp, uint32 qIdx0, uint32 qIdx1, UniqueFEIRExpr argExpr); + ~FEIRStmtCondGoto2() = default; + + protected: + bool IsFallThroughImpl() const override { + return true; + } + + bool IsBranchImpl() const override { + return true; + } + + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + Opcode op; + UniqueFEIRExpr expr; +}; + +// ---------- FEIRStmtSwitch ---------- +class FEIRStmtSwitch : public FEIRStmt { + public: + explicit FEIRStmtSwitch(UniqueFEIRExpr argExpr); + ~FEIRStmtSwitch() override; + void SetDefaultLabelIdx(uint32 labelIdx) { + defaultLabelIdx = labelIdx; + } + + uint32 GetDefaultLabelIdx() const { + return defaultLabelIdx; + } + + void SetDefaultTarget(FEIRStmtPesudoLabel &stmtTarget) { + defaultTarget = &stmtTarget; + } + + const FEIRStmtPesudoLabel &GetDefaultTarget() const { + return *defaultTarget; + } + + const std::map &GetMapValueLabelIdx() const { + return mapValueLabelIdx; + } + + const std::map &GetMapValueTargets() const { + return mapValueTargets; + } + + void AddTarget(int32 value, uint32 labelIdx) { + mapValueLabelIdx[value] = labelIdx; + } + + void AddTarget(int32 value, FEIRStmtPesudoLabel &target) { + mapValueTargets[value] = ⌖ + } + + void SetExpr(UniqueFEIRExpr argExpr) { + CHECK_NULL_FATAL(argExpr); + expr = std::move(argExpr); + } + + protected: + bool IsBranchImpl() const override { + return true; + } + + bool IsFallThroughImpl() const override; + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + uint32 defaultLabelIdx; + FEIRStmtPesudoLabel *defaultTarget; + std::map mapValueLabelIdx; + std::map mapValueTargets; + UniqueFEIRExpr expr; +}; + +// ---------- FEIRStmtSwitch2 ---------- +class FEIRStmtSwitch2 : public FEIRStmt { + public: + explicit FEIRStmtSwitch2(uint32 outerIdxIn, UniqueFEIRExpr argExpr); + ~FEIRStmtSwitch2() override; + void SetDefaultLabelIdx(uint32 labelIdx) { + defaultLabelIdx = labelIdx; + } + + uint32 GetDefaultLabelIdx() const { + return defaultLabelIdx; + } + + void SetDefaultTarget(FEIRStmtPesudoLabel2 *stmtTarget) { + defaultTarget = stmtTarget; + } + + const FEIRStmtPesudoLabel2 &GetDefaultTarget() const { + return *defaultTarget; + } + + const std::map &GetMapValueLabelIdx() const { + return mapValueLabelIdx; + } + + const std::map &GetMapValueTargets() const { + return mapValueTargets; + } + + void AddTarget(int32 value, uint32 labelIdx) { + mapValueLabelIdx[value] = labelIdx; + } + + void AddTarget(int32 value, FEIRStmtPesudoLabel2 *target) { + mapValueTargets[value] = target; + } + + void SetExpr(UniqueFEIRExpr argExpr) { + CHECK_NULL_FATAL(argExpr); + expr = std::move(argExpr); + } + + protected: + bool IsBranchImpl() const override { + return true; + } + + bool IsFallThroughImpl() const override; + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + uint32 outerIdx; + uint32 defaultLabelIdx = UINT32_MAX; + FEIRStmtPesudoLabel2 *defaultTarget; + std::map mapValueLabelIdx; + std::map mapValueTargets; + UniqueFEIRExpr expr; +}; + +// ---------- FEIRStmtSwitchForC ---------- +class FEIRStmtSwitchForC : public FEIRStmt { + public: + FEIRStmtSwitchForC(UniqueFEIRExpr argCondExpr, bool argHasDefault); + ~FEIRStmtSwitchForC() = default; + void AddFeirStmt(UniqueFEIRStmt stmt) { + subStmts.emplace_back(std::move(stmt)); + } + + void SetExpr(UniqueFEIRExpr argExpr) { + CHECK_NULL_FATAL(argExpr); + expr = std::move(argExpr); + } + + void SetHasDefault(bool argHasDefault) { + hasDefault = argHasDefault; + } + + void SetBreakLabelName(std::string name) { + breakLabelName = std::move(name); + } + + protected: + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + UniqueFEIRExpr expr; + bool hasDefault = true; + std::list subStmts; + std::string breakLabelName; +}; + +// ---------- FEIRStmtCaseForC ---------- +class FEIRStmtCaseForC : public FEIRStmt { + public: + explicit FEIRStmtCaseForC(int64 label); + void AddCaseTag2CaseVec(int64 lCaseTag, int64 rCaseTag); + ~FEIRStmtCaseForC() = default; + void AddFeirStmt(UniqueFEIRStmt stmt) { + subStmts.emplace_back(std::move(stmt)); + } + const std::map> &GetPesudoLabelMap() const { + return pesudoLabelMap; + } + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + int64 lCaseLabel; + std::map> pesudoLabelMap = + std::map>(); + std::list subStmts; +}; + +// ---------- FEIRStmtDefaultForC ---------- +class FEIRStmtDefaultForC : public FEIRStmt { + public: + explicit FEIRStmtDefaultForC(); + ~FEIRStmtDefaultForC() = default; + void AddFeirStmt(UniqueFEIRStmt stmt) { + subStmts.emplace_back(std::move(stmt)); + } + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + std::list subStmts; +}; + +// ---------- FEIRStmtArrayStore ---------- +class FEIRStmtArrayStore : public FEIRStmt { + public: + FEIRStmtArrayStore(UniqueFEIRExpr argExprElem, UniqueFEIRExpr argExprArray, UniqueFEIRExpr argExprIndex, + UniqueFEIRType argTypeArray); + + // for C + FEIRStmtArrayStore(UniqueFEIRExpr argExprElem, UniqueFEIRExpr argExprArray, UniqueFEIRExpr argExprIndex, + UniqueFEIRType argTypeArray, const std::string &argArrayName); + + // for C mul array + FEIRStmtArrayStore(UniqueFEIRExpr argExprElem, UniqueFEIRExpr argExprArray, UniqueFEIRExpr argExprIndex, + UniqueFEIRType argTypeArray, UniqueFEIRType argTypeElem, const std::string &argArrayName); + // for C mul array + FEIRStmtArrayStore(UniqueFEIRExpr argExprElem, UniqueFEIRExpr argExprArray, std::list &argExprIndexs, + UniqueFEIRType argTypeArray, const std::string &argArrayName); + + // for C array in struct + FEIRStmtArrayStore(UniqueFEIRExpr argExprElem, UniqueFEIRExpr argExprArray, std::list &argExprIndexs, + UniqueFEIRType argTypeArray, UniqueFEIRExpr argExprStruct, UniqueFEIRType argTypeStruct, + const std::string &argArrayName); + + ~FEIRStmtArrayStore() = default; + + void SetIndexsExprs(std::list &exprs) { + exprIndexs.clear(); + for (const auto &e : exprs) { + auto ue = e->Clone(); + exprIndexs.push_back(std::move(ue)); + } + } + + protected: + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + void InitTrans4AllVarsImpl() override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + void GenMIRStmtsImplForCPart(MIRBuilder &mirBuilder, MIRType *ptrMIRArrayType, MIRType **mIRElemType, + BaseNode **arrayExpr) const; + + private: + UniqueFEIRExpr exprElem; + UniqueFEIRExpr exprArray; + UniqueFEIRExpr exprIndex; + // for C mul array + mutable std::list exprIndexs; + UniqueFEIRType typeArray; + mutable UniqueFEIRType typeElem = nullptr; + + // for C array in struct + UniqueFEIRExpr exprStruct; + UniqueFEIRType typeStruct; + std::string arrayName; +}; + +// ---------- FEIRStmtFieldStore ---------- +class FEIRStmtFieldStore : public FEIRStmt { + public: + FEIRStmtFieldStore(UniqueFEIRVar argVarObj, UniqueFEIRVar argVarField, FEStructFieldInfo &argFieldInfo, + bool argIsStatic); + FEIRStmtFieldStore(UniqueFEIRVar argVarObj, UniqueFEIRVar argVarField, FEStructFieldInfo &argFieldInfo, + bool argIsStatic, int32 argDexFileHashCode); + ~FEIRStmtFieldStore() = default; + + protected: + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + void RegisterDFGNodes2CheckPointForStatic(FEIRStmtCheckPoint &checkPoint); + void RegisterDFGNodes2CheckPointForNonStatic(FEIRStmtCheckPoint &checkPoint); + bool CalculateDefs4AllUsesForStatic(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain); + bool CalculateDefs4AllUsesForNonStatic(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain); + bool NeedMCCForStatic(uint32 &typeID) const; + std::list GenMIRStmtsImplForStatic(MIRBuilder &mirBuilder) const; + std::list GenMIRStmtsImplForNonStatic(MIRBuilder &mirBuilder) const; + + UniqueFEIRVar varObj; + UniqueFEIRVar varField; + FEStructFieldInfo &fieldInfo; + bool isStatic; + int32 dexFileHashCode = -1; +}; + +// ---------- FEIRStmtFieldLoad ---------- +class FEIRStmtFieldLoad : public FEIRStmtAssign { + public: + FEIRStmtFieldLoad(UniqueFEIRVar argVarObj, UniqueFEIRVar argVarField, FEStructFieldInfo &argFieldInfo, + bool argIsStatic); + FEIRStmtFieldLoad(UniqueFEIRVar argVarObj, UniqueFEIRVar argVarField, FEStructFieldInfo &argFieldInfo, + bool argIsStatic, int32 argDexFileHashCode); + ~FEIRStmtFieldLoad() = default; + + protected: + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + void RegisterDFGNodes2CheckPointForStatic(FEIRStmtCheckPoint &checkPoint); + void RegisterDFGNodes2CheckPointForNonStatic(FEIRStmtCheckPoint &checkPoint); + bool CalculateDefs4AllUsesForStatic(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain); + bool CalculateDefs4AllUsesForNonStatic(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain); + bool NeedMCCForStatic(uint32 &typeID) const; + std::list GenMIRStmtsImplForStatic(MIRBuilder &mirBuilder) const; + std::list GenMIRStmtsImplForNonStatic(MIRBuilder &mirBuilder) const; + + UniqueFEIRVar varObj; + FEStructFieldInfo &fieldInfo; + bool isStatic; + int32 dexFileHashCode = -1; +}; + +// ---------- FEIRStmtCallAssign ---------- +class FEIRStmtCallAssign : public FEIRStmtAssign { + public: + FEIRStmtCallAssign(FEStructMethodInfo &argMethodInfo, Opcode argMIROp, UniqueFEIRVar argVarRet, bool argIsStatic); + ~FEIRStmtCallAssign() = default; + + static std::map InitMapOpAssignToOp(); + static std::map InitMapOpToOpAssign(); + + protected: + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + std::list GenMIRStmtsUseZeroReturn(MIRBuilder &mirBuilder) const; + + private: + Opcode AdjustMIROp() const; + void InsertNonnullInRetVar(MIRSymbol &retVarSym) const; + void InsertNonnullCheckingInArgs(const UniqueFEIRExpr &expr, size_t index, MIRBuilder &mirBuilder, + std::list &ans, const std::string &funcName) const; + FEStructMethodInfo &methodInfo; + Opcode mirOp; + bool isStatic; + static std::map mapOpAssignToOp; + static std::map mapOpToOpAssign; +}; + +// ---------- FEIRStmtICallAssign ---------- +class FEIRStmtICallAssign : public FEIRStmtAssign { + public: + FEIRStmtICallAssign(); + ~FEIRStmtICallAssign() = default; + + void SetPrototype(UniqueFEIRType type) { + prototype = std::move(type); + } + + protected: + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + bool CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) override; + + private: + void InsertNonnullCheckingInArgs(MIRBuilder &mirBuilder, std::list &ans) const; + void InsertNonnullInRetVar(MIRSymbol &retVarSym) const; + + UniqueFEIRType prototype = nullptr; +}; + +// ---------- FEIRStmtIntrinsicCallAssign ---------- +class FEIRStmtIntrinsicCallAssign : public FEIRStmtAssign { + public: + FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, UniqueFEIRType typeIn, UniqueFEIRVar argVarRet); + FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, UniqueFEIRType typeIn, UniqueFEIRVar argVarRet, + std::unique_ptr> exprListIn); + FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, const std::string &funcNameIn, const std::string &protoIN, + std::unique_ptr> argsIn); + FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, const std::string &funcNameIn, const std::string &protoIN, + std::unique_ptr> argsIn, uint32 callerClassTypeIDIn, + bool isInStaticFuncIn); + FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, UniqueFEIRType typeIn, UniqueFEIRVar argVarRet, + uint32 typeIDIn); + ~FEIRStmtIntrinsicCallAssign() = default; + + protected: + std::string DumpDotStringImpl() const override; + void RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + void ConstructArgsForInvokePolyMorphic(MIRBuilder &mirBuilder, MapleVector &intrnCallargs) const; + std::list GenMIRStmtsForFillNewArray(MIRBuilder &mirBuilder) const; + std::list GenMIRStmtsForInvokePolyMorphic(MIRBuilder &mirBuilder) const; + + MIRIntrinsicID intrinsicId; + UniqueFEIRType type; + std::unique_ptr> exprList; + // for polymorphic + const std::string funcName; + const std::string proto; + std::unique_ptr> polyArgs; + uint32 typeID = UINT32_MAX; + uint32 callerClassTypeID = UINT32_MAX; + bool isInStaticFunc = false; +}; + +// ---------- FEIRStmtPesudoLOC ---------- +class FEIRStmtPesudoLOC : public FEIRStmt { + public: + FEIRStmtPesudoLOC(uint32 argSrcFileIdx, uint32 argLineNumber); + ~FEIRStmtPesudoLOC() = default; + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; +}; + +// ---------- FEIRStmtPesudoJavaTry ---------- +class FEIRStmtPesudoJavaTry : public FEIRStmt { + public: + FEIRStmtPesudoJavaTry(); + ~FEIRStmtPesudoJavaTry() = default; + void AddCatchLabelIdx(uint32 labelIdx) { + catchLabelIdxVec.push_back(labelIdx); + } + + const std::vector GetCatchLabelIdxVec() const { + return catchLabelIdxVec; + } + + void AddCatchTarget(FEIRStmtPesudoLabel &stmtLabel) { + catchTargets.push_back(&stmtLabel); + } + + const std::vector &GetCatchTargets() const { + return catchTargets; + } + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + std::vector catchLabelIdxVec; + std::vector catchTargets; +}; + +// ---------- FEIRStmtPesudoJavaTry2 ---------- +class FEIRStmtPesudoJavaTry2 : public FEIRStmt { + public: + explicit FEIRStmtPesudoJavaTry2(uint32 outerIdxIn); + ~FEIRStmtPesudoJavaTry2() = default; + void AddCatchLabelIdx(uint32 labelIdx) { + catchLabelIdxVec.push_back(labelIdx); + } + + const std::vector GetCatchLabelIdxVec() const { + return catchLabelIdxVec; + } + + void AddCatchTarget(FEIRStmtPesudoLabel2 *stmtLabel) { + catchTargets.push_back(stmtLabel); + } + + const std::vector &GetCatchTargets() const { + return catchTargets; + } + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + uint32 outerIdx; + std::vector catchLabelIdxVec; + std::vector catchTargets; +}; + +// ---------- FEIRStmtPesudoEndTry ---------- +class FEIRStmtPesudoEndTry : public FEIRStmt { + public: + FEIRStmtPesudoEndTry(); + ~FEIRStmtPesudoEndTry() = default; + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; +}; + +// ---------- FEIRStmtPesudoCatch ---------- +class FEIRStmtPesudoCatch : public FEIRStmtPesudoLabel { + public: + explicit FEIRStmtPesudoCatch(uint32 argLabelIdx); + ~FEIRStmtPesudoCatch() = default; + void AddCatchTypeNameIdx(GStrIdx typeNameIdx); + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + std::list catchTypes; +}; + +// ---------- FEIRStmtPesudoCatch2 ---------- +class FEIRStmtPesudoCatch2 : public FEIRStmtPesudoLabel2 { + public: + explicit FEIRStmtPesudoCatch2(uint32 qIdx0, uint32 qIdx1); + ~FEIRStmtPesudoCatch2() = default; + void AddCatchTypeNameIdx(GStrIdx typeNameIdx); + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + std::list catchTypes; +}; + +class FEIRStmtPesudoSafe : public FEIRStmt { + public: + explicit FEIRStmtPesudoSafe(bool isEnd); + ~FEIRStmtPesudoSafe() = default; + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + bool end = false; +}; + +class FEIRStmtPesudoUnsafe : public FEIRStmt { + public: + explicit FEIRStmtPesudoUnsafe(bool isEnd); + ~FEIRStmtPesudoUnsafe() = default; + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + bool end = false; +}; + +// ---------- FEIRStmtPesudoComment ---------- +class FEIRStmtPesudoComment : public FEIRStmt { + public: + explicit FEIRStmtPesudoComment(FEIRNodeKind argKind = kStmtPesudoComment); + explicit FEIRStmtPesudoComment(const std::string &argContent); + ~FEIRStmtPesudoComment() = default; + void SetContent(const std::string &argContent) { + content = argContent; + } + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + std::string content = ""; +}; + +// ---------- FEIRStmtPesudoCommentForInst ---------- +class FEIRStmtPesudoCommentForInst : public FEIRStmtPesudoComment { + public: + FEIRStmtPesudoCommentForInst(); + ~FEIRStmtPesudoCommentForInst() = default; + void SetFileIdx(uint32 argFileIdx) { + fileIdx = argFileIdx; + } + + void SetLineNum(uint32 argLineNum) { + lineNum = argLineNum; + } + + void SetPC(uint32 argPC) { + pc = argPC; + } + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + constexpr static uint32 invalid = 0xFFFFFFFF; + uint32 fileIdx = invalid; + uint32 lineNum = invalid; + uint32 pc = invalid; +}; + +// ---------- FEIRStmtIf ---------- +class FEIRStmtIf : public FEIRStmt { + public: + FEIRStmtIf(UniqueFEIRExpr argCondExpr, std::list &argThenStmts); + FEIRStmtIf(UniqueFEIRExpr argCondExpr, + std::list &argThenStmts, + std::list &argElseStmts); + ~FEIRStmtIf() = default; + + void SetCondExpr(UniqueFEIRExpr argCondExpr) { + CHECK_NULL_FATAL(argCondExpr); + condExpr = std::move(argCondExpr); + } + + const UniqueFEIRExpr &GetCondExpr() const { + return condExpr; + } + + void SetHasElse(bool argHasElse) { + hasElse = argHasElse; + } + + void SetThenStmts(std::list &stmts) { + std::move(begin(stmts), end(stmts), std::inserter(thenStmts, end(thenStmts))); + } + + void SetElseStmts(std::list &stmts) { + std::move(begin(stmts), end(stmts), std::inserter(elseStmts, end(elseStmts))); + } + + std::list &GetThenStmt() { + return thenStmts; + } + + std::list &GetElseStmt() { + return elseStmts; + } + + protected: + std::string DumpDotStringImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + bool IsFallThroughImpl() const override; + + bool IsBranchImpl() const override { + return true; + } + + private: + UniqueFEIRExpr condExpr; + bool hasElse = false; + std::list thenStmts; + std::list elseStmts; +}; + +class FEIRStmtDoWhile : public FEIRStmt { + public: + FEIRStmtDoWhile(Opcode argOpcode, UniqueFEIRExpr argCondExpr, std::list argBodyStmts) + : FEIRStmt(FEIRNodeKind::kStmtDoWhile), + opcode(argOpcode), + condExpr(std::move(argCondExpr)), + bodyStmts(std::move(argBodyStmts)) {} + ~FEIRStmtDoWhile() = default; + + const std::list &GetBodyStmts() const { + return bodyStmts; + } + + const UniqueFEIRExpr &GetCondExpr() const { + return condExpr; + } + + const Opcode GetOpcode() const { + return opcode; + } + + protected: + bool IsBranchImpl() const override { + return true; + } + + bool IsFallThroughImpl() const override; + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + Opcode opcode; + UniqueFEIRExpr condExpr; + std::list bodyStmts; +}; + +class FEIRStmtBreak : public FEIRStmt { + public: + FEIRStmtBreak(): FEIRStmt(FEIRNodeKind::kStmtBreak) {} + ~FEIRStmtBreak() = default; + + void SetBreakLabelName(std::string name) { + breakLabelName = std::move(name); + } + + protected: + bool IsBranchImpl() const override { + return true; + } + + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + std::string breakLabelName; +}; + +class FEIRStmtContinue : public FEIRStmt { + public: + FEIRStmtContinue(): FEIRStmt(FEIRNodeKind::kStmtContinue) {} + ~FEIRStmtContinue() = default; + + void SetLabelName(std::string name) { + labelName = std::move(name); + } + + protected: + bool IsBranchImpl() const override { + return true; + } + + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + private: + std::string labelName; +}; + +class FEIRStmtLabel : public FEIRStmt { + public: + explicit FEIRStmtLabel(const std::string &name) : FEIRStmt(FEIRNodeKind::kStmtLabel), labelName(name) {} + ~FEIRStmtLabel() = default; + + const std::string &GetLabelName() const { + return labelName; + } + + protected: + bool IsBranchImpl() const override { + return true; + } + + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + + private: + std::string labelName; +}; + +class FEIRStmtAtomic : public FEIRStmt { + public: + explicit FEIRStmtAtomic(UniqueFEIRExpr expr); + ~FEIRStmtAtomic() = default; + + protected: + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + UniqueFEIRExpr atomicExpr; +}; + +class FEIRStmtGCCAsm : public FEIRStmt { + public: + FEIRStmtGCCAsm(const std::string &str, bool isGotoArg, bool isVolatileArg) + : FEIRStmt(FEIRNodeKind::kStmtGCCAsm), asmStr(str), isGoto(isGotoArg), isVolatile(isVolatileArg) {} + ~FEIRStmtGCCAsm() = default; + + void SetLabels(const std::vector &labelsArg) { + labels = labelsArg; + } + + void SetClobbers(const std::vector &clobbersArg) { + clobbers = clobbersArg; + } + + void SetInputs(const std::vector> &inputsArg) { + inputs = inputsArg; + } + + void SetInputsExpr(std::vector &expr) { + std::move(begin(expr), end(expr), std::inserter(inputsExprs, end(inputsExprs))); + } + + void SetOutputs(const std::vector> &outputsArg) { + outputs = outputsArg; + } + + void SetOutputsExpr(std::vector &expr) { + std::move(begin(expr), end(expr), std::inserter(outputsExprs, end(outputsExprs))); + } + + protected: + std::list GenMIRStmtsImpl(MIRBuilder &mirBuilder) const override; + bool HandleConstraintPlusQm(MIRBuilder &mirBuilder, AsmNode *asmNode, uint32 index, + std::list &stmts, std::list &initStmts) const; + std::pair HandleGlobalAsmOutOperand(const UniqueFEIRVar &asmOut, + const FieldID fieldID, + std::list &stmts, + MIRBuilder &mirBuilder) const; + std::pair HandleAsmOutOperandWithPtrType(const FEIRExprIRead *ireadExpr, + std::list &stmts, + MIRBuilder &mirBuilder) const; + + private: + std::vector> outputs; + std::vector outputsExprs; + std::vector> inputs; + std::vector inputsExprs; + std::vector clobbers; + std::vector labels; + std::string asmStr; + bool isGoto = false; + bool isVolatile = false; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_FEIR_STMT_H diff --git a/src/hir2mpl/common/include/feir_type.h b/src/hir2mpl/common/include/feir_type.h new file mode 100644 index 0000000000000000000000000000000000000000..7daf3670c114570076153c7e7068d6e128414015 --- /dev/null +++ b/src/hir2mpl/common/include/feir_type.h @@ -0,0 +1,424 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FEIR_TYPE_H +#define HIR2MPL_INCLUDE_FEIR_TYPE_H +#include +#include +#include +#include "prim_types.h" +#include "types_def.h" +#include "mir_type.h" +#include "global_tables.h" +#include "fe_configs.h" +#include "fe_utils_ast.h" + +namespace maple { +enum FEIRTypeKind { + kFEIRTypeDefault, + kFEIRTypeByName, + kFEIRTypePointer, + kFEIRTypeNative +}; + +class FEIRType { + public: + explicit FEIRType(FEIRTypeKind argKind); + virtual ~FEIRType() = default; + static std::unique_ptr NewType(FEIRTypeKind argKind = kFEIRTypeDefault); + static std::map> InitLangConfig(); + MIRType *GenerateMIRTypeAuto(MIRSrcLang argSrcLang) const; + MIRType *GenerateMIRTypeAuto() const { + return GenerateMIRTypeAutoImpl(); + } + + bool IsSameKind(const FEIRType &type) const { + return kind == type.kind; + } + + FEIRTypeKind GetKind() const { + return kind; + } + + PrimType GetPrimType() const { + return GetPrimTypeImpl(); + } + + void SetPrimType(PrimType pt) { + SetPrimTypeImpl(pt); + } + + MIRSrcLang GetSrcLang() const { + return srcLang; + } + + bool IsZero() const { + return isZero; + } + + void SetZero(bool arg) { + isZero = arg; + } + + void CopyFrom(const FEIRType &type) { + return CopyFromImpl(type); + } + + std::unique_ptr Clone() const { + return CloneImpl(); + } + + MIRType *GenerateMIRType(MIRSrcLang argSrcLang, bool usePtr) const { + (void)argSrcLang; + return GenerateMIRType(usePtr); + } + + MIRType *GenerateMIRType() const { + return GenerateMIRType(false); + } + + bool IsPreciseRefType() const { + return IsPrecise() && IsRef(); + } + + bool IsScalar() const { + return IsScalarImpl(); + } + + bool IsRef() const { + return IsRefImpl(); + } + + bool IsArray() const { + return IsArrayImpl(); + } + + bool IsPrecise() const { + return IsPreciseImpl(); + } + + bool IsValid() const { + return IsValidImpl(); + } + + MIRType *GenerateMIRType(bool usePtr, PrimType ptyPtr = PTY_ref) const { + return GenerateMIRTypeImpl(usePtr, ptyPtr); + } + + TypeDim ArrayIncrDim(TypeDim delta = 1) { + return ArrayIncrDimImpl(delta); + } + + TypeDim ArrayDecrDim(TypeDim delta = 1) { + return ArrayDecrDimImpl(delta); + } + + bool IsEqualTo(const std::unique_ptr &argType) const { + return IsEqualToImpl(argType); + } + + bool IsEqualTo(const FEIRType &argType) const { + return IsEqualToImpl(argType); + } + + uint32 Hash() const { + return HashImpl(); + } + + std::string GetTypeName() const { + return GetTypeNameImpl(); + } + + static std::map> langConfig; + + protected: + virtual MIRType *GenerateMIRTypeAutoImpl() const { + return GenerateMIRTypeAuto(srcLang); + } + virtual MIRType *GenerateMIRTypeAutoImpl(MIRSrcLang argSrcLang) const; + virtual void CopyFromImpl(const FEIRType &type); + virtual std::unique_ptr CloneImpl() const = 0; + virtual MIRType *GenerateMIRTypeImpl(bool usePtr, PrimType ptyPtr) const = 0; + virtual bool IsScalarImpl() const = 0; + virtual TypeDim ArrayIncrDimImpl(TypeDim delta) = 0; + virtual TypeDim ArrayDecrDimImpl(TypeDim delta) = 0; + virtual bool IsEqualToImpl(const std::unique_ptr &argType) const; + virtual bool IsEqualToImpl(const FEIRType &argType) const; + virtual uint32 HashImpl() const = 0; + virtual PrimType GetPrimTypeImpl() const { + return PTY_begin; // Means no valid primtype + } + + virtual void SetPrimTypeImpl(PrimType pt) = 0; + virtual bool IsRefImpl() const = 0; + virtual bool IsArrayImpl() const = 0; + virtual bool IsPreciseImpl() const = 0; + virtual bool IsValidImpl() const = 0; + virtual std::string GetTypeNameImpl() const { + return ""; + } + + FEIRTypeKind kind : 7; + bool isZero : 1; + MIRSrcLang srcLang : 8; +}; // class FEIRType + +using UniqueFEIRType = std::unique_ptr; + +class FEIRTypeDefault : public FEIRType { + public: + FEIRTypeDefault(); + explicit FEIRTypeDefault(PrimType argPrimType); + FEIRTypeDefault(PrimType argPrimType, const GStrIdx &argTypeNameIdx); + FEIRTypeDefault(PrimType argPrimType, const GStrIdx &argTypeNameIdx, TypeDim argDim); + ~FEIRTypeDefault() = default; + FEIRTypeDefault(const FEIRTypeDefault&) = delete; + FEIRTypeDefault &operator=(const FEIRTypeDefault&) = delete; + void LoadFromJavaTypeName(const std::string &typeName, bool inMpl = true); + void LoadFromASTTypeName(const std::string &typeName); + MIRType *GenerateMIRTypeForPrim() const; + FEIRTypeDefault &SetTypeNameIdx(const GStrIdx &argTypeNameIdx) { + typeNameIdx = argTypeNameIdx; + return *this; + } + + FEIRTypeDefault &SetTypeName(const std::string &typeName) { + GStrIdx idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + return SetTypeNameIdx(idx); + } + + FEIRTypeDefault &SetTypeNameWithoutCreate(const std::string &typeName) { + GStrIdx idx = GlobalTables::GetStrTable().GetStrIdxFromName(typeName); + CHECK_FATAL(idx.GetIdx() != 0, "typeNameIdx should be pre-created"); + return SetTypeNameIdx(idx); + } + + GStrIdx GetTypeNameIdx() const { + return typeNameIdx; + } + + TypeDim GetDim() const { + return dim; + } + + FEIRTypeDefault &SetDim(TypeDim argDim) { + dim = argDim; + return *this; + } + + LLT_PROTECTED: + void CopyFromImpl(const FEIRType &type) override; + std::unique_ptr CloneImpl() const override; + MIRType *GenerateMIRTypeImpl(bool usePtr, PrimType ptyPtr) const override; + TypeDim ArrayIncrDimImpl(TypeDim delta) override; + TypeDim ArrayDecrDimImpl(TypeDim delta) override; + bool IsEqualToImpl(const FEIRType &argType) const override; + bool IsEqualToImpl(const std::unique_ptr &argType) const override; + uint32 HashImpl() const override; + bool IsScalarImpl() const override; + PrimType GetPrimTypeImpl() const override; + void SetPrimTypeImpl(PrimType pt) override; + MIRType *GenerateMIRTypeInternal(const GStrIdx &argTypeNameIdx, bool usePtr) const; + MIRType *GenerateMIRTypeInternal(const GStrIdx &argTypeNameIdx, bool usePtr, PrimType ptyPtr) const; + std::string GetTypeNameImpl() const override; + + bool IsRefImpl() const override { + return dim > 0 || !IsScalarPrimType(primType); + } + + bool IsArrayImpl() const override { + return dim > 0; + } + + bool IsPreciseImpl() const override { + return IsScalarPrimType(primType) || typeNameIdx != 0; + } + + bool IsValidImpl() const override { + return !IsScalarPrimType(primType) || typeNameIdx == 0; + } + + static bool IsScalarPrimType(PrimType pty) { + return pty != PTY_ref && (IsPrimitiveInteger(pty) || IsPrimitiveFloat(pty) || IsAddress(pty) || pty == PTY_void); + } + + PrimType primType; + GStrIdx typeNameIdx; + TypeDim dim = 0; +}; + +// ---------- FEIRTypeByName ---------- +class FEIRTypeByName : public FEIRTypeDefault { + public: + FEIRTypeByName(PrimType argPrimType, const std::string &argTypeName, TypeDim argDim = 0); + ~FEIRTypeByName() = default; + FEIRTypeByName(const FEIRTypeByName&) = delete; + FEIRTypeByName &operator=(const FEIRTypeByName&) = delete; + + protected: + std::unique_ptr CloneImpl() const override; + MIRType *GenerateMIRTypeImpl(bool usePtr, PrimType ptyPtr) const override; + bool IsEqualToImpl(const FEIRType &argType) const override; + uint32 HashImpl() const override; + bool IsScalarImpl() const override; + bool IsPreciseImpl() const override { + return IsScalarPrimType(primType) || !typeName.empty(); + } + + bool IsValidImpl() const override { + return !IsScalarPrimType(primType) || typeName.empty(); + } + + private: + std::string typeName; +}; + + +// ---------- FEIRTypeNative ---------- +// MIRType is enclosed directly in FEIRTypeNative. +// Right now, FEIRTypeNative is only used for c-language. +// Because var type is translated as MIRType directly in stage of ast parse. +class FEIRTypeNative : public FEIRType { + public: + explicit FEIRTypeNative(MIRType &argMIRType); + ~FEIRTypeNative() = default; + FEIRTypeNative(const FEIRTypeNative&) = delete; + FEIRTypeNative &operator=(const FEIRTypeNative&) = delete; + + protected: + MIRType *GenerateMIRTypeAutoImpl() const override; + PrimType GetPrimTypeImpl() const override; + void SetPrimTypeImpl(PrimType pt) override; + void CopyFromImpl(const FEIRType &type) override; + std::unique_ptr CloneImpl() const override; + MIRType *GenerateMIRTypeImpl(bool usePtr, PrimType ptyPtr) const override; + bool IsEqualToImpl(const FEIRType &argType) const override; + uint32 HashImpl() const override; + std::string GetTypeNameImpl() const override; + bool IsScalarImpl() const override; + bool IsRefImpl() const override; + bool IsArrayImpl() const override; + + bool IsPreciseImpl() const override { + return true; + } + + bool IsValidImpl() const override { + return true; + } + + TypeDim ArrayIncrDimImpl(TypeDim delta) override; + TypeDim ArrayDecrDimImpl(TypeDim delta) override; + + private: + MIRType &mirType; +}; + +// ---------- FEIRTypePointer ---------- +class FEIRTypePointer : public FEIRType { + public: + explicit FEIRTypePointer(std::unique_ptr argBaseType, PrimType argPrimType = PTY_ref); + ~FEIRTypePointer() override = default; + FEIRTypePointer(const FEIRTypePointer&) = delete; + FEIRTypePointer &operator=(const FEIRTypePointer&) = delete; + const UniqueFEIRType &GetBaseType() const { + return baseType; + } + + void SetBaseType(UniqueFEIRType argBaseType) { + CHECK_NULL_FATAL(argBaseType); + baseType = std::move(argBaseType); + } + + protected: + std::unique_ptr CloneImpl() const override; + MIRType *GenerateMIRTypeImpl(bool usePtr, PrimType ptyPtr) const override; + bool IsEqualToImpl(const FEIRType &argType) const override; + uint32 HashImpl() const override; + bool IsScalarImpl() const override; + TypeDim ArrayIncrDimImpl(TypeDim delta) override; + TypeDim ArrayDecrDimImpl(TypeDim delta) override; + PrimType GetPrimTypeImpl() const override; + void SetPrimTypeImpl(PrimType pt) override; + bool IsRefImpl() const override { + return true; // pointer type is ref + } + + bool IsArrayImpl() const override { + return baseType->IsArray(); + } + + bool IsPreciseImpl() const override { + return baseType->IsPrecise(); + } + + bool IsValidImpl() const override { + return baseType->IsValid(); + } + + private: + PrimType primType; + std::unique_ptr baseType; +}; + +// ---------- FEIRTypeKey ---------- +class FEIRTypeKey { + public: + explicit FEIRTypeKey(const UniqueFEIRType &argType) { + ASSERT(argType != nullptr, "nullptr check"); + type = argType->Clone(); + } + + explicit FEIRTypeKey(const FEIRTypeKey &key) { + ASSERT(key.type != nullptr, "nullptr check"); + type = key.type->Clone(); + } + + ~FEIRTypeKey() = default; + FEIRTypeKey &operator=(const FEIRTypeKey &key) { + if (&key != this) { + CHECK_NULL_FATAL(key.type); + SetType(key.type->Clone()); + } + return *this; + } + + bool operator==(const FEIRTypeKey &key) const { + return type->IsEqualTo(key.type); + } + + size_t Hash() const { + return type->Hash(); + } + + const UniqueFEIRType &GetType() const { + return type; + } + + void SetType(UniqueFEIRType argType) { + CHECK_NULL_FATAL(argType); + type = std::move(argType); + } + + private: + UniqueFEIRType type; +}; + +struct FEIRTypeKeyHash { + size_t operator()(const FEIRTypeKey &key) const { + return key.Hash(); + } +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FEIR_TYPE_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/feir_type_helper.h b/src/hir2mpl/common/include/feir_type_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..ded22389501e8199c883cf7a4a6336fe9250d668 --- /dev/null +++ b/src/hir2mpl/common/include/feir_type_helper.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FEIR_TYPE_HELPER_H +#define HIR2MPL_INCLUDE_FEIR_TYPE_HELPER_H +#include +#include +#include "fe_configs.h" +#include "feir_type.h" + +namespace maple { +class FEIRTypeHelper { + public: + static UniqueFEIRType CreateTypeByPrimType(PrimType primType, TypeDim dim = 0, bool usePtr = false); + static UniqueFEIRType CreateTypeByJavaName(const std::string &typeName, bool inMpl, bool usePtr); + static UniqueFEIRType CreatePointerType(UniqueFEIRType baseType, PrimType primType = PTY_ptr); + static UniqueFEIRType CreateTypeByDimIncr(const UniqueFEIRType &srcType, uint8 delta, bool usePtr = false, + PrimType primType = PTY_ptr); + static UniqueFEIRType CreateTypeByDimDecr(const UniqueFEIRType &srcType, uint8 delta); + static UniqueFEIRType CreateTypeByGetAddress(const UniqueFEIRType &srcType, PrimType primType = PTY_ptr); + static UniqueFEIRType CreateTypeByDereferrence(const UniqueFEIRType &srcType); + static UniqueFEIRType CreateTypeDefault(PrimType primType, const GStrIdx &typeNameIdx, TypeDim dim); + static UniqueFEIRType CreateTypeNative(MIRType &mirType); + + private: + FEIRTypeHelper() = default; + ~FEIRTypeHelper() = default; + static UniqueFEIRType CreateTypeByJavaNamePrim(char primTypeFlag, uint8 dim8); +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FEIR_TYPE_HELPER_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/feir_type_infer.h b/src/hir2mpl/common/include/feir_type_infer.h new file mode 100644 index 0000000000000000000000000000000000000000..22333623c117bb4135d68339660f3e6329b4c47e --- /dev/null +++ b/src/hir2mpl/common/include/feir_type_infer.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FEIR_TYPE_INFER_H +#define HIR2MPL_INCLUDE_FEIR_TYPE_INFER_H +#include "fe_configs.h" +#include "feir_type.h" +#include "feir_var.h" +#include "feir_dfg.h" + +namespace maple { +class FEIRTypeMergeHelper { + public: + FEIRTypeMergeHelper(); + explicit FEIRTypeMergeHelper(const UniqueFEIRType &argTypeDefault); + ~FEIRTypeMergeHelper() = default; + void Reset(); + void ResetTypeDefault(const UniqueFEIRType &argTypeDefault); + bool MergeType(const UniqueFEIRType &argType, bool parent = true); + UniqueFEIRType GetResult() const; + const std::string &GetError() const { + return error; + } + + const UniqueFEIRType &GetType() const { + return type; + } + + UniqueFEIRType GetTypeClone() const { + return type->Clone(); + } + + LLT_PRIVATE: + UniqueFEIRType typeDefault; + UniqueFEIRType type; + std::string error; + bool firstType; + void SetDefaultType(UniqueFEIRType &typeDst); + void SetType(UniqueFEIRType &typeDst, const UniqueFEIRType &typeSrc); + bool MergeType(UniqueFEIRType &typeDst, const UniqueFEIRType &typeSrc, bool parent = true); +}; + +class FEIRTypeInfer { + public: + FEIRTypeInfer(MIRSrcLang argSrcLang, const FEIRDefUseChain &argMapDefUse); + ~FEIRTypeInfer() = default; + void LoadTypeDefault(); + void Reset(); + UniqueFEIRType GetTypeForVarUse(const UniqueFEIRVar &varUse); + UniqueFEIRType GetTypeForVarDef(UniqueFEIRVar &varDef); + UniqueFEIRType GetTypeByTransForVarUse(const UniqueFEIRVar &varUse); + void ProcessVarDef(UniqueFEIRVar &varDef); + + private: + MIRSrcLang srcLang; + UniqueFEIRType typeDefault; + FEIRTypeMergeHelper mergeHelper; + const FEIRDefUseChain &mapDefUse; + std::set visitVars; + bool withCircle = false; + bool first = false; +}; + +class FEIRTypeCvtHelper { + public: + static Opcode ChooseCvtOpcodeByFromTypeAndToType(const FEIRType &fromType, const FEIRType &toType); + + private: + static bool IsRetypeable(const FEIRType &fromType, const FEIRType &toType); + static bool IsIntCvt2Ref(const FEIRType &fromType, const FEIRType &toType); +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FEIR_TYPE_INFER_H diff --git a/src/hir2mpl/common/include/feir_var.h b/src/hir2mpl/common/include/feir_var.h new file mode 100644 index 0000000000000000000000000000000000000000..8bcb328e14de2a7163f61006d29aa59edafc35d8 --- /dev/null +++ b/src/hir2mpl/common/include/feir_var.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FEIR_VAR_H +#define HIR2MPL_INCLUDE_FEIR_VAR_H +#include +#include "mir_type.h" +#include "mir_symbol.h" +#include "mir_module.h" +#include "mir_builder.h" +#include "feir_type.h" +#include "fe_utils.h" +#include "generic_attrs.h" + +namespace maple { +enum FEIRVarTransKind : uint8 { + kFEIRVarTransDefault = 0, + kFEIRVarTransDirect, + kFEIRVarTransArrayDimIncr, + kFEIRVarTransArrayDimDecr, +}; + +class FEIRVar; +class FEIRVarTrans { + public: + FEIRVarTrans(FEIRVarTransKind argKind, std::unique_ptr &argVar); + FEIRVarTrans(FEIRVarTransKind argKind, std::unique_ptr &argVar, uint8 dimDelta); + ~FEIRVarTrans() = default; + UniqueFEIRType GetType(const UniqueFEIRType &type, PrimType primType = PTY_ref, bool usePtr = true); + std::unique_ptr &GetVar() const { + return var; + } + + void SetTransKind(FEIRVarTransKind argKind) { + kind = argKind; + } + + FEIRVarTransKind GetTransKind() const { + return kind; + } + + private: + FEIRVarTransKind kind; + std::unique_ptr &var; + union { + uint8 dimDelta; + } param; +}; + +using UniqueFEIRVarTrans = std::unique_ptr; + +enum FEIRVarKind : uint8 { + kFEIRVarDefault = 0, + kFEIRVarReg, + kFEIRVarAccumulator, + kFEIRVarName, + kFEIRVarTypeScatter, +}; + +// forward declaration for smart pointers +class FEIRExpr; +class FEIRVar { + public: + explicit FEIRVar(FEIRVarKind argKind); + FEIRVar(FEIRVarKind argKind, std::unique_ptr argType); + virtual ~FEIRVar(); + void SetType(std::unique_ptr argType); + FEIRVarKind GetKind() const { + return kind; + } + + bool operator==(const FEIRVar &var) const { + return GetNameRaw() == var.GetNameRaw(); + } + + bool operator!=(const FEIRVar &var) const { + return GetNameRaw() != var.GetNameRaw(); + } + + const UniqueFEIRType &GetType() const { + return type; + } + + const FEIRType &GetTypeRef() const { + ASSERT(type != nullptr, "type is nullptr"); + return *type.get(); + } + + bool IsGlobal() const { + return isGlobal; + } + + void SetGlobal(bool arg) { + isGlobal = arg; + } + + bool IsDef() const { + return isDef; + } + + void SetDef(bool arg) { + isDef = std::move(arg); + } + + void SetTrans(UniqueFEIRVarTrans argTrans) { + trans = std::move(argTrans); + } + + const UniqueFEIRVarTrans &GetTrans() const { + return trans; + } + + MIRSymbol *GenerateGlobalMIRSymbol(MIRBuilder &builder) const { + MIRSymbol *mirSym = GenerateGlobalMIRSymbolImpl(builder); + if (mirSym->GetSrcPosition().LineNum() == 0) { + mirSym->SetSrcPosition(FEUtils::CvtLoc2SrcPosition(loc)); + } + return mirSym; + } + + MIRSymbol *GenerateLocalMIRSymbol(MIRBuilder &builder) const { + MIRSymbol *mirSym = GenerateLocalMIRSymbolImpl(builder); + if (mirSym->GetSrcPosition().LineNum() == 0) { + mirSym->SetSrcPosition(FEUtils::CvtLoc2SrcPosition(loc)); + } + return mirSym; + } + + MIRSymbol *GenerateMIRSymbol(MIRBuilder &builder) const { + return GenerateMIRSymbolImpl(builder); + } + + std::string GetName(const MIRType &mirType) const { + return GetNameImpl(mirType); + } + + std::string GetNameRaw() const { + return GetNameRawImpl(); + } + + bool EqualsTo(const std::unique_ptr &var) const { + return EqualsToImpl(var); + } + + uint32 Hash() const { + return HashImpl(); + } + + void SetAttrs(const GenericAttrs &argGenericAttrs) { + genAttrs = argGenericAttrs; + } + + void SetSectionAttr(const std::string &str) { + sectionAttr = str; + } + + void SetSrcLoc(const Loc &l) { + loc = l; + } + + Loc GetSrcLoc() const { + return loc; + } + + uint32 GetSrcFileIdx() const { + return loc.fileIdx; + } + + uint32 GetSrcFileLineNum() const { + return loc.line; + } + + std::unique_ptr Clone() const; + void SetBoundaryLenExpr(std::unique_ptr expr); + const std::unique_ptr &GetBoundaryLenExpr() const; + + protected: + virtual MIRSymbol *GenerateGlobalMIRSymbolImpl(MIRBuilder &builder) const; + virtual MIRSymbol *GenerateLocalMIRSymbolImpl(MIRBuilder &builder) const; + virtual MIRSymbol *GenerateMIRSymbolImpl(MIRBuilder &builder) const; + virtual std::string GetNameImpl(const MIRType &mirType) const = 0; + virtual std::string GetNameRawImpl() const = 0; + virtual std::unique_ptr CloneImpl() const = 0; + virtual bool EqualsToImpl(const std::unique_ptr &var) const = 0; + virtual uint32 HashImpl() const = 0; + + FEIRVarKind kind : 6; + bool isGlobal : 1; + bool isDef : 1; + UniqueFEIRType type; + UniqueFEIRVarTrans trans; + GenericAttrs genAttrs; + Loc loc = {0, 0, 0}; + std::string sectionAttr; + std::unique_ptr boundaryLenExpr; +}; + +using UniqueFEIRVar = std::unique_ptr; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FEIR_VAR_H diff --git a/src/hir2mpl/common/include/feir_var_name.h b/src/hir2mpl/common/include/feir_var_name.h new file mode 100644 index 0000000000000000000000000000000000000000..713ec5603470998548b51bc4611e33a2afd8f127 --- /dev/null +++ b/src/hir2mpl/common/include/feir_var_name.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FEIR_VAR_NAME_H +#define HIR2MPL_INCLUDE_FEIR_VAR_NAME_H +#include +#include "mir_symbol.h" +#include "types_def.h" +#include "global_tables.h" +#include "feir_var.h" + +namespace maple { +// ---------- FEIRVarName ---------- +class FEIRVarName : public FEIRVar { + public: + explicit FEIRVarName(const GStrIdx &argNameIdx, bool argWithType = false) + : FEIRVar(FEIRVarKind::kFEIRVarName), + nameIdx(argNameIdx), + withType(argWithType) {} + + FEIRVarName(const GStrIdx &argNameIdx, std::unique_ptr argType, bool argWithType = false) + : FEIRVar(FEIRVarKind::kFEIRVarName, std::move(argType)), + nameIdx(argNameIdx), + withType(argWithType) {} + + FEIRVarName(const std::string &argName, std::unique_ptr argType, bool argWithType = false) + : FEIRVarName(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(argName), std::move(argType), argWithType) {} + + virtual ~FEIRVarName() = default; + + protected: + std::string GetNameImpl(const MIRType &mirType) const override; + std::string GetNameRawImpl() const override; + std::unique_ptr CloneImpl() const override; + bool EqualsToImpl(const std::unique_ptr &var) const override; + uint32 HashImpl() const override; + + GStrIdx nameIdx; + // means emit this symbol with type + bool withType : 1; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FEIR_VAR_REG_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/feir_var_reg.h b/src/hir2mpl/common/include/feir_var_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..a37fdfbdc78a2dfe64c840ef1fe5eb229f9f41c5 --- /dev/null +++ b/src/hir2mpl/common/include/feir_var_reg.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FEIR_VAR_REG_H +#define HIR2MPL_INCLUDE_FEIR_VAR_REG_H +#include "mir_symbol.h" +#include "feir_var.h" + +namespace maple { +class FEIRVarReg : public FEIRVar { + public: + explicit FEIRVarReg(uint32 argRegNum, FEIRVarKind kind = FEIRVarKind::kFEIRVarReg) + : FEIRVar(kind), + regNum(argRegNum) {} + + FEIRVarReg(uint32 argRegNum, PrimType argPrimType) + : FEIRVarReg(argRegNum) { + type->SetPrimType(argPrimType); + } + + FEIRVarReg(uint32 argRegNum, std::unique_ptr argType, FEIRVarKind kind = FEIRVarKind::kFEIRVarReg) + : FEIRVar(kind, std::move(argType)), + regNum(argRegNum) {} + + ~FEIRVarReg() = default; + uint32 GetRegNum() const { + return regNum; + } + + protected: + std::string GetNameImpl(const MIRType &mirType) const override; + std::string GetNameRawImpl() const override; + MIRSymbol *GenerateLocalMIRSymbolImpl(MIRBuilder &builder) const override; + std::unique_ptr CloneImpl() const override; + bool EqualsToImpl(const std::unique_ptr &var) const override; + uint32 HashImpl() const override; + uint32 regNum; +}; + +class FEIRVarAccumulator : public FEIRVarReg { + public: + explicit FEIRVarAccumulator(uint32 argRegNum) + : FEIRVarReg(argRegNum, FEIRVarKind::kFEIRVarAccumulator) {} + + FEIRVarAccumulator(uint32 argRegNum, PrimType argPrimType) + : FEIRVarAccumulator(argRegNum) { + type->SetPrimType(argPrimType); + } + + FEIRVarAccumulator(uint32 argRegNum, std::unique_ptr argType) + : FEIRVarReg(argRegNum, std::move(argType), FEIRVarKind::kFEIRVarAccumulator) {} + + ~FEIRVarAccumulator() = default; + + protected: + std::string GetNameImpl(const MIRType &mirType) const override; + std::string GetNameRawImpl() const override; + std::unique_ptr CloneImpl() const override; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FEIR_VAR_REG_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/feir_var_type_scatter.h b/src/hir2mpl/common/include/feir_var_type_scatter.h new file mode 100644 index 0000000000000000000000000000000000000000..d23a717015f7da2dd553f666fd5bc2490b938a6e --- /dev/null +++ b/src/hir2mpl/common/include/feir_var_type_scatter.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef FEIR_VAR_TYPE_SCATTER_H +#define FEIR_VAR_TYPE_SCATTER_H +#include +#include "feir_var.h" +#include "feir_type.h" + +namespace maple { +class FEIRVarTypeScatter : public FEIRVar { + public: + explicit FEIRVarTypeScatter(UniqueFEIRVar argVar); + ~FEIRVarTypeScatter() = default; + void AddScatterType(const UniqueFEIRType &type); + const std::unordered_set &GetScatterTypes() const { + return scatterTypes; + } + + const UniqueFEIRVar &GetVar() const { + return var; + } + + protected: + MIRSymbol *GenerateGlobalMIRSymbolImpl(MIRBuilder &builder) const override; + MIRSymbol *GenerateLocalMIRSymbolImpl(MIRBuilder &builder) const override; + MIRSymbol *GenerateMIRSymbolImpl(MIRBuilder &builder) const override; + std::string GetNameImpl(const MIRType &mirType) const override; + std::string GetNameRawImpl() const override; + std::unique_ptr CloneImpl() const override; + bool EqualsToImpl(const std::unique_ptr &argVar) const override; + uint32 HashImpl() const override; + + private: + UniqueFEIRVar var; + std::unordered_set scatterTypes; +}; // class FEIRVarTypeScatter +} // namespace maple +#endif // FEIR_VAR_TYPE_SCATTER_H diff --git a/src/hir2mpl/common/include/generic_attrs.h b/src/hir2mpl/common/include/generic_attrs.h new file mode 100644 index 0000000000000000000000000000000000000000..6b7406d30dd1516e305de461292c3369d429bb86 --- /dev/null +++ b/src/hir2mpl/common/include/generic_attrs.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef GENERIC_ATTRS_H +#define GENERIC_ATTRS_H +#include +#include +#include "mir_type.h" + +namespace maple { +using AttrContent = std::variant; +// only for internal use, not emitted +enum GenericAttrKind { +#define FUNC_ATTR +#define TYPE_ATTR +#define FIELD_ATTR +#define ATTR(STR) GENATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef FUNC_ATTR +#undef TYPE_ATTR +#undef FIELD_ATTR +}; +constexpr uint32 kMaxATTRNum = 128; + +class GenericAttrs { + public: + GenericAttrs() = default; + GenericAttrs(const GenericAttrs &ta) = default; + GenericAttrs &operator=(const GenericAttrs &p) = default; + ~GenericAttrs() = default; + + void SetAttr(GenericAttrKind x) { + attrFlag.set(x); + } + + void ResetAttr(GenericAttrKind x) { + attrFlag.reset(x); + } + + bool GetAttr(GenericAttrKind x) const { + return attrFlag[x]; + } + + bool operator==(const GenericAttrs &tA) const { + return attrFlag == tA.attrFlag; + } + + bool operator!=(const GenericAttrs &tA) const { + return !(*this == tA); + } + + void InitContentMap() { + contentMap.resize(kMaxATTRNum); + isInit = true; + } + + bool GetContentFlag(GenericAttrKind key) const { + return contentFlag[key]; + } + + void InsertIntContentMap(GenericAttrKind key, int val) { + if (!isInit) { + InitContentMap(); + } + if (!contentFlag[key]) { + contentMap[key] = val; + contentFlag.set(key); + } + } + + void InsertStrIdxContentMap(GenericAttrKind key, GStrIdx nameIdx) { + if (!isInit) { + InitContentMap(); + } + if (!contentFlag[key]) { + contentMap[key] = nameIdx; + contentFlag.set(key); + } + } + + void ClearContentMap() { + contentMap.clear(); + contentMap.shrink_to_fit(); + } + + FieldAttrs ConvertToFieldAttrs(); + TypeAttrs ConvertToTypeAttrs() const; + FuncAttrs ConvertToFuncAttrs(); + + private: + std::bitset attrFlag = 0; + std::bitset contentFlag = 0; + std::vector contentMap; + bool isInit = false; +}; +} +#endif // GENERIC_ATTRS_H \ No newline at end of file diff --git a/src/hir2mpl/common/include/hir2mpl_compiler.h b/src/hir2mpl/common/include/hir2mpl_compiler.h new file mode 100644 index 0000000000000000000000000000000000000000..423e67f82d95349ed2e18fb1ac23ad05f0010c1a --- /dev/null +++ b/src/hir2mpl/common/include/hir2mpl_compiler.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_HIR2MPL_COMPILER_NEW_H +#define HIR2MPL_INCLUDE_COMMON_HIR2MPL_COMPILER_NEW_H +#include +#include +#include "fe_macros.h" +#include "hir2mpl_compiler_component.h" +#include "mpl_logging.h" +#include "hir2mpl_options.h" +#include "jbc_compiler_component.h" +#include "fe_options.h" +#include "bc_compiler_component.h" +#include "ark_annotation_processor.h" +#include "dex_reader.h" +#include "ast_compiler_component.h" +#include "ast_compiler_component-inl.h" +#include "ast_parser.h" +#ifdef ENABLE_MAST +#include "maple_ast_parser.h" +#endif +#include "hir2mpl_env.h" +#include "fe_manager.h" +#include "fe_type_hierarchy.h" + +namespace maple { +class HIR2MPLCompiler { + public: + explicit HIR2MPLCompiler(MIRModule &argModule); + ~HIR2MPLCompiler(); + // common process + int Run(); + void Init(); + void Release(); + void CheckInput(); + void SetupOutputPathAndName(); + bool LoadMplt(); + void ExportMpltFile(); + void ExportMplFile(); + + // component process + void RegisterCompilerComponent(std::unique_ptr comp); + void ParseInputs(); + void LoadOnDemandTypes(); + void PreProcessDecls(); + void ProcessDecls(); + void ProcessPragmas(); + void ProcessFunctions(); + + private: + void RegisterCompilerComponent(); + inline void InsertImportInMpl(const std::list &mplt) const; + void FindMinCompileFailedFEFunctions(); + MIRModule &module; + MIRSrcLang srcLang; + MemPool *mp; + MapleAllocator allocator; + std::string firstInputName; + std::string outputPath; + std::string outputName; + std::string outNameWithoutType; + std::list> components; + std::set compileFailedFEFunctions; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_HIR2MPL_COMPILER_NEW_H diff --git a/src/hir2mpl/common/include/hir2mpl_compiler_component.h b/src/hir2mpl/common/include/hir2mpl_compiler_component.h new file mode 100644 index 0000000000000000000000000000000000000000..3e9040f0995b45669455bf44dad327450090daca --- /dev/null +++ b/src/hir2mpl/common/include/hir2mpl_compiler_component.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_HIR2MPL_COMPILER_COMPONENT_H +#define HIR2MPL_INCLUDE_COMMON_HIR2MPL_COMPILER_COMPONENT_H +#include +#include +#include "mir_module.h" +#include "fe_function.h" +#include "fe_input.h" +#include "fe_input_helper.h" +#include "mpl_scheduler.h" + +namespace maple { +class FEFunctionProcessTask : public MplTask { + public: + explicit FEFunctionProcessTask(std::unique_ptr argFunction); + virtual ~FEFunctionProcessTask() = default; + + protected: + int RunImpl(MplTaskParam *param) override; + int FinishImpl(MplTaskParam *param) override; + + private: + std::unique_ptr function; +}; + +class FEFunctionProcessSchedular : public MplScheduler { + public: + explicit FEFunctionProcessSchedular(const std::string &name) + : MplScheduler(name) {} + virtual ~FEFunctionProcessSchedular() = default; + void AddFunctionProcessTask(std::unique_ptr function); + void SetDumpTime(bool arg) { + dumpTime = arg; + } + + protected: + void CallbackThreadMainStart() override; + + private: + std::list> tasks; +}; + +class HIR2MPLCompilerComponent { + public: + HIR2MPLCompilerComponent(MIRModule &argModule, MIRSrcLang argSrcLang); + virtual ~HIR2MPLCompilerComponent() = default; + bool ParseInput() { + return ParseInputImpl(); + } + + bool LoadOnDemandType() { + return LoadOnDemandTypeImpl(); + } + + bool PreProcessDecl() { + return PreProcessDeclImpl(); + } + + bool ProcessDecl() { + return ProcessDeclImpl(); + } + + void ProcessPragma() { + ProcessPragmaImpl(); + } + + std::unique_ptr CreatFEFunction(FEInputMethodHelper *methodHelper) { + return CreatFEFunctionImpl(methodHelper); + } + + bool ProcessFunctionSerial() { + return ProcessFunctionSerialImpl(); + } + + bool ProcessFunctionParallel(uint32 nthreads) { + return ProcessFunctionParallelImpl(nthreads); + } + + std::string GetComponentName() const { + return GetComponentNameImpl(); + } + + bool Parallelable() const { + return ParallelableImpl(); + } + + void DumpPhaseTimeTotal() const { + DumpPhaseTimeTotalImpl(); + } + + uint32 GetFunctionsSize() const { + return funcSize; + } + + const std::set &GetCompileFailedFEFunctions() const { + return compileFailedFEFunctions; + } + + void ReleaseMemPool() { + ReleaseMemPoolImpl(); + } + + protected: + virtual bool ParseInputImpl() = 0; + virtual bool LoadOnDemandTypeImpl(); + virtual bool PreProcessDeclImpl(); + virtual bool ProcessDeclImpl(); + virtual void ProcessPragmaImpl() {}; + virtual std::unique_ptr CreatFEFunctionImpl(FEInputMethodHelper *methodHelper) = 0; + virtual bool ProcessFunctionSerialImpl(); + virtual bool ProcessFunctionParallelImpl(uint32 nthreads); + virtual std::string GetComponentNameImpl() const; + virtual bool ParallelableImpl() const; + virtual void DumpPhaseTimeTotalImpl() const; + virtual void ReleaseMemPoolImpl() = 0; + + uint32 funcSize; + MIRModule &module; + MIRSrcLang srcLang; + std::list> fieldHelpers; + std::list> methodHelpers; + std::list structHelpers; + std::list globalFuncHelpers; + std::list globalVarHelpers; + std::list globalFileScopeAsmHelpers; + std::list enumHelpers; + std::unique_ptr phaseResultTotal; + std::set compileFailedFEFunctions; +}; +} // namespace maple +#endif diff --git a/src/hir2mpl/common/include/hir2mpl_env.h b/src/hir2mpl/common/include/hir2mpl_env.h new file mode 100644 index 0000000000000000000000000000000000000000..4d5069893c32407952ad03baaa712c68c82f282e --- /dev/null +++ b/src/hir2mpl/common/include/hir2mpl_env.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_HIR2MPL_ENV_H +#define HIR2MPL_INCLUDE_COMMON_HIR2MPL_ENV_H +#include +#include +#include +#include "mir_module.h" + +namespace maple { +class HIR2MPLEnv { + public: + void Init(); + void Finish(); + uint32 NewSrcFileIdx(const GStrIdx &nameIdx); + GStrIdx GetFileNameIdx(uint32 fileIdx) const; + std::string GetFileName(uint32 fileIdx) const; + static HIR2MPLEnv &GetInstance() { + return instance; + } + + uint32 GetGlobalLabelIdx() const { + return globalLabelIdx; + } + + void IncrGlobalLabelIdx() { + globalLabelIdx++; + } + + private: + static HIR2MPLEnv instance; + std::map srcFileIdxNameMap; + uint32 globalLabelIdx = GStrIdx(0); + HIR2MPLEnv() = default; + ~HIR2MPLEnv() = default; +}; // class HIR2MPLEnv +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_HIR2MPL_ENV_H diff --git a/src/hir2mpl/common/include/hir2mpl_option.h b/src/hir2mpl/common/include/hir2mpl_option.h new file mode 100644 index 0000000000000000000000000000000000000000..80d8c3995b50b6a37576ba948ed97c40b3ef96d4 --- /dev/null +++ b/src/hir2mpl/common/include/hir2mpl_option.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef HIR2MPL_COMMON_INCLUDE_HIR2MPL_OPTION_H +#define HIR2MPL_COMMON_INCLUDE_HIR2MPL_OPTION_H + +#include "driver_options.h" + +namespace opts::hir2mpl { +extern maplecl::Option help; +extern maplecl::Option version; +extern maplecl::Option mpltSys; +extern maplecl::Option mpltApk; +extern maplecl::Option mplt; +extern maplecl::Option inClass; +extern maplecl::Option inJar; +extern maplecl::Option inDex; +extern maplecl::Option inAst; +extern maplecl::Option inMast; +extern maplecl::Option output; +extern maplecl::Option outputName; +extern maplecl::Option mpltOnly; +extern maplecl::Option asciimplt; +extern maplecl::Option dumpInstComment; +extern maplecl::Option noMplFile; +extern maplecl::Option dumpLevel; +extern maplecl::Option dumpTime; +extern maplecl::Option dumpComment; +extern maplecl::Option dumpLOC; +extern maplecl::Option dbgFriendly; +extern maplecl::Option dumpPhaseTime; +extern maplecl::Option dumpPhaseTimeDetail; +extern maplecl::Option rc; +extern maplecl::Option nobarrier; +extern maplecl::Option usesignedchar; +extern maplecl::Option o2; +extern maplecl::Option simplifyShortCircuit; +extern maplecl::Option enableVariableArray; +extern maplecl::Option funcInliceSize; +extern maplecl::Option np; +extern maplecl::Option dumpThreadTime; +extern maplecl::Option xbootclasspath; +extern maplecl::Option classloadercontext; +extern maplecl::Option dep; +extern maplecl::Option depsamename; +extern maplecl::Option npeCheckDynamic; +extern maplecl::Option boundaryCheckDynamic; +extern maplecl::Option safeRegion; +extern maplecl::Option defaultSafe; +extern maplecl::Option dumpFEIRBB; +extern maplecl::Option dumpFEIRCFGGraph; +extern maplecl::Option wpaa; +extern maplecl::Option debug; + +} + +#endif /* HIR2MPL_COMMON_INCLUDE_HIR2MPL_OPTION_H */ diff --git a/src/hir2mpl/common/include/hir2mpl_options.h b/src/hir2mpl/common/include/hir2mpl_options.h new file mode 100644 index 0000000000000000000000000000000000000000..fcc4b4b78df8d68df9850464397f8da387a1647e --- /dev/null +++ b/src/hir2mpl/common/include/hir2mpl_options.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_COMMON_HIR2MPL_OPTIONS_H +#define HIR2MPL_INCLUDE_COMMON_HIR2MPL_OPTIONS_H +#include +#include +#include "factory.h" +#include "hir2mpl_option.h" +#include "parser_opt.h" +#include "types_def.h" + +namespace maple { +class HIR2MPLOptions { + public: + static inline HIR2MPLOptions &GetInstance() { + static HIR2MPLOptions options; + return options; + } + void Init() const; + static bool InitFactory(); + bool SolveOptions(bool isDebug); + bool SolveArgs(int argc, char **argv); + void DumpUsage() const; + void DumpVersion() const; + template + static void Split(const std::string &s, char delim, Out result); + static std::list SplitByComma(const std::string &s); + + // non-option process + void ProcessInputFiles(const std::vector &inputs) const; + + private: + template + using OptionProcessFactory = FunctionFactory; + using OptionFactory = OptionProcessFactory; + + HIR2MPLOptions(); + ~HIR2MPLOptions() = default; + + // option process + bool ProcessHelp(const maplecl::OptionInterface &) const; + bool ProcessVersion(const maplecl::OptionInterface &) const; + + // input control options + bool ProcessInClass(const maplecl::OptionInterface &mpltSys) const; + bool ProcessInJar(const maplecl::OptionInterface &mpltApk) const; + bool ProcessInDex(const maplecl::OptionInterface &inDex) const; + bool ProcessInAST(const maplecl::OptionInterface &inAst) const; + bool ProcessInMAST(const maplecl::OptionInterface &inMast) const; + bool ProcessInputMplt(const maplecl::OptionInterface &mplt) const; + bool ProcessInputMpltFromSys(const maplecl::OptionInterface &mpltSys) const; + bool ProcessInputMpltFromApk(const maplecl::OptionInterface &mpltApk) const; + + // output control options + bool ProcessOutputPath(const maplecl::OptionInterface &output) const; + bool ProcessOutputName(const maplecl::OptionInterface &outputName) const; + bool ProcessGenMpltOnly(const maplecl::OptionInterface &) const; + bool ProcessGenAsciiMplt(const maplecl::OptionInterface &) const; + bool ProcessDumpInstComment(const maplecl::OptionInterface &) const; + bool ProcessNoMplFile(const maplecl::OptionInterface &) const; + + // debug info control options + bool ProcessDumpLevel(const maplecl::OptionInterface &outputName) const; + bool ProcessDumpTime(const maplecl::OptionInterface &) const; + bool ProcessDumpComment(const maplecl::OptionInterface &) const; + bool ProcessDumpLOC(const maplecl::OptionInterface &) const; + bool ProcessDbgFriendly(const maplecl::OptionInterface &) const; + bool ProcessDumpPhaseTime(const maplecl::OptionInterface &) const; + bool ProcessDumpPhaseTimeDetail(const maplecl::OptionInterface &) const; + + // java compiler options + bool ProcessModeForJavaStaticFieldName(const maplecl::OptionInterface &opt) const; + bool ProcessJBCInfoUsePathName(const maplecl::OptionInterface &) const; + bool ProcessDumpJBCStmt(const maplecl::OptionInterface &) const; + bool ProcessDumpJBCAll(const maplecl::OptionInterface &) const; + bool ProcessDumpJBCErrorOnly(const maplecl::OptionInterface &) const; + bool ProcessDumpJBCFuncName(const maplecl::OptionInterface &opt) const; + bool ProcessEmitJBCLocalVarInfo(const maplecl::OptionInterface &) const; + + // bc compiler options + bool ProcessRC(const maplecl::OptionInterface &) const; + bool ProcessNoBarrier(const maplecl::OptionInterface &) const; + bool ProcessO2(const maplecl::OptionInterface &) const; + bool ProcessSimplifyShortCircuit(const maplecl::OptionInterface &) const; + bool ProcessEnableVariableArray(const maplecl::OptionInterface &) const; + bool ProcessFuncInlineSize(const maplecl::OptionInterface &funcInliceSize) const; + bool ProcessWPAA(const maplecl::OptionInterface &) const; + + // ast compiler options + bool ProcessUseSignedChar(const maplecl::OptionInterface &) const; + bool ProcessBigEndian() const; + + // general stmt/bb/cfg options + bool ProcessDumpFEIRBB(const maplecl::OptionInterface &) const; + bool ProcessDumpFEIRCFGGraph(const maplecl::OptionInterface &opt) const; + + // multi-thread control options + bool ProcessNThreads(const maplecl::OptionInterface &numThreads) const; + bool ProcessDumpThreadTime(const maplecl::OptionInterface &) const; + + // On Demand Type Creation + bool ProcessXbootclasspath(const maplecl::OptionInterface &xbootclasspath) const; + bool ProcessClassLoaderContext(const maplecl::OptionInterface &classloadercontext) const; + bool ProcessCollectDepTypes(const maplecl::OptionInterface &dep) const; + bool ProcessDepSameNamePolicy(const maplecl::OptionInterface &depsamename) const; + + // EnhanceC + bool ProcessNpeCheckDynamic(const maplecl::OptionInterface &) const; + bool ProcessBoundaryCheckDynamic(const maplecl::OptionInterface &) const; + bool ProcessSafeRegion(const maplecl::OptionInterface &) const; + bool ProcessDefaultSafe(const maplecl::OptionInterface &) const; + + // symbol resolve + bool ProcessAOT(const maplecl::OptionInterface &) const; +}; // class HIR2MPLOptions +} // namespace maple +#endif // HIR2MPL_INCLUDE_COMMON_HIR2MPL_OPTIONS_H diff --git a/src/hir2mpl/common/include/simple_xml.h b/src/hir2mpl/common/include/simple_xml.h new file mode 100644 index 0000000000000000000000000000000000000000..3e07b9baf450d0c5c0879d9d706ce4048b3272d1 --- /dev/null +++ b/src/hir2mpl/common/include/simple_xml.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_SIMPLE_XML_H +#define HIR2MPL_INCLUDE_SIMPLE_XML_H +#include +#include +#include +#include "types_def.h" +#include "mempool_allocator.h" +#include "maple_string.h" + +namespace maple { +enum SimpleXMLElemIdTag { + kXMLIDNone, + kXMLID, +}; + +enum SimpleXMLElemValueTag { + kXMLValueNone, + kXMLValueI8, + kXMLValueU8, + kXMLValueI16, + kXMLValueU16, + kXMLValueI32, + kXMLValueU32, + kXMLValueI64, + kXMLValueU64, + kXMLValueFloat, + kXMLValueDouble, + kXMLValueString, +}; + +class SimpleXMLElem { + public: + SimpleXMLElem(MapleAllocator &alloc, const std::string &cat); + SimpleXMLElem(MapleAllocator &alloc, const std::string &cat, uint32 argID); + virtual ~SimpleXMLElem() = default; + void AddElement(SimpleXMLElem &elem); + SimpleXMLElem *AddNewElement(const std::string &cat); + SimpleXMLElem *AddNewElement(const std::string &cat, uint32 argID); + void SetValue(int8 v); + void SetValue(uint8 v); + void SetValue(int16 v); + void SetValue(uint16 v); + void SetValue(int32 v); + void SetValue(uint32 v); + void SetValue(int64 v); + void SetValue(uint64 v); + void SetValue(float v); + void SetValue(double v); + void SetValue(const std::string &str); + void SetContent(const std::string &str); + static std::string XMLString(const std::string &strIn); + void Dump(std::ostream &os, const std::string &prefix = "") const { + return DumpImpl(os, prefix); + } + + protected: + virtual void DumpImpl(std::ostream &os, const std::string &prefix) const; + void DumpHead(std::ostream &os, const std::string &prefix = "") const; + void DumpTail(std::ostream &os, const std::string &prefix = "") const; + + SimpleXMLElemIdTag tagID; + SimpleXMLElemValueTag tagValue; + MapleAllocator &allocator; + std::string catalog; + uint32 id; + union { + int8 i8; + uint8 u8; + int16 i16; + uint16 u16; + int32 i32; + uint32 u32; + int64 i64; + uint64 u64; + float f; + double d; + uint64 raw; + } value; + std::string valueString; + std::string content; + MapleList elems; +}; + +class SimpleXMLElemMultiLine : public SimpleXMLElem { + public: + SimpleXMLElemMultiLine(MapleAllocator &alloc, const std::string &cat); + SimpleXMLElemMultiLine(MapleAllocator &alloc, const std::string &cat, uint32 argID); + ~SimpleXMLElemMultiLine() = default; + void AddLine(const std::string &line); + + protected: + void DumpImpl(std::ostream &os, const std::string &prefix) const override; + + private: + MapleList lines; +}; + +class SimpleXML { + public: + explicit SimpleXML(MapleAllocator &alloc); + ~SimpleXML() = default; + void AddRoot(SimpleXMLElem &elem); + + protected: + void DumpImpl(std::ostream &os) const; + + private: + MapleAllocator &allocator; + MapleList roots; +}; +} // namespace maple +#endif diff --git a/src/hir2mpl/common/include/simple_zip.h b/src/hir2mpl/common/include/simple_zip.h new file mode 100644 index 0000000000000000000000000000000000000000..c6ad931e9189e200c68d60cc48ef18d339a4530c --- /dev/null +++ b/src/hir2mpl/common/include/simple_zip.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +// ref: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT +#ifndef HIR2MPL_INCLUDE_SIMPLE_ZIP_H +#define HIR2MPL_INCLUDE_SIMPLE_ZIP_H +#include +#include +#include +#include "types_def.h" +#include "basic_io.h" + +namespace maple { +class ZipLocalFileHeader { + public: + ZipLocalFileHeader() = default; + ~ZipLocalFileHeader(); + static std::unique_ptr Parse(BasicIORead &io); + uint16 GetGPFlag() const { + return gpFlag; + } + + std::string GetFileName() const { + return fileName; + } + + private: + uint32 signature = 0; + uint16 minVersion = 0; + uint16 gpFlag = 0; + uint16 compMethod = 0; + uint16 rawTime = 0; + uint16 rawDate = 0; + uint32 crc32 = 0; + uint32 compSize = 0; + uint32 unCompSize = 0; + uint16 lengthFileName = 0; + uint16 lengthExtraField = 0; + std::string fileName = ""; + uint8 *extraField = nullptr; +}; + +class ZipDataDescriptor { + public: + ZipDataDescriptor() = default; + ~ZipDataDescriptor() = default; + static std::unique_ptr Parse(BasicIORead &io); + uint32 GetSignature() const { + return signature; + } + + uint32 GetCRC32() const { + return crc32; + } + + uint32 GetCompSize() const { + return compSize; + } + + uint32 GetUnCompSize() const { + return unCompSize; + } + + private: + uint32 signature = 0; + uint32 crc32 = 0; + uint32 compSize = 0; + uint32 unCompSize = 0; +}; + +class ZipLocalFile { + public: + ZipLocalFile() = default; + ~ZipLocalFile(); + static std::unique_ptr Parse(BasicIORead &io); + std::string GetFileName() const { + return header->GetFileName(); + } + + const uint8 *GetUnCompData() const { + return unCompData; + } + + uint32 GetUnCompDataSize() const { + return unCompDataSize; + } + + private: + uint32 GetDataEndPos(const BasicIORead &io); + void ProcessUncompressedFile(BasicIORead &io, uint32 start, uint32 end); + void ProcessCompressedFile(BasicIORead &io, uint32 start, uint32 end); + + std::unique_ptr header; + uint8 *compData = nullptr; + uint8 *unCompData = nullptr; + uint32 unCompDataSize = 0; + std::unique_ptr dataDesc; + bool isCompressed = false; +}; + +class SimpleZip : public BasicIORead { + public: + explicit SimpleZip(BasicIOMapFile &file); + ~SimpleZip() override; + void ParseFile(); + + const std::list> &GetFiles() const { + return files; + } + + private: + std::list> files; +}; +} // namespace maple +#endif diff --git a/src/hir2mpl/common/src/base64.cpp b/src/hir2mpl/common/src/base64.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6afadbe9a2ade5e6f5fadb930eecc2415832af19 --- /dev/null +++ b/src/hir2mpl/common/src/base64.cpp @@ -0,0 +1,164 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "base64.h" +#include +#include +#include "types_def.h" +#include "mpl_logging.h" + +namespace maple { +namespace { +const std::string kBase64IdxStr = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; +const uint32 kBase64IdxStrLength = 64; +const uint8 kBase64MaskEncode = 0x3F; +const uint8 kBase64MaskDecode = 0xFF; +const uint32 kBase64Shift0 = 0; +const uint32 kBase64Shift8 = 8; +const uint32 kBase64Shift16 = 16; +const uint32 kBase64Shift6 = 6; +const uint32 kBase64Shift12 = 12; +const uint32 kBase64Shift18 = 18; +const size_t kBase64EncodeBaseLen = 3; +const size_t kBase64DecodeBaseLen = 4; +} + +std::map Base64::encodeMap = Base64::InitEncodeMap(); +std::map Base64::decodeMap = Base64::InitDecodeMap(); + +std::map Base64::InitEncodeMap() { + // init encode map + std::map ans; + CHECK_FATAL(kBase64IdxStr.length() == kBase64IdxStrLength, "length of base64_idx_str_ must be 64"); + for (size_t i = 0; i < kBase64IdxStr.length(); i++) { + CHECK_FATAL(ans.insert(std::make_pair(i, kBase64IdxStr[i])).second, "encodeMap insert failed"); + } + return ans; +} + +std::map Base64::InitDecodeMap() { + // init decode map + std::map ans; + CHECK_FATAL(kBase64IdxStr.length() == kBase64IdxStrLength, "length of base64_idx_str_ must be 64"); + for (uint32 i = 0; i < kBase64IdxStr.length(); i++) { + CHECK_FATAL(ans.insert(std::make_pair(kBase64IdxStr[i], i)).second, "decodeMap insert failed"); + } + CHECK_FATAL(ans.insert(std::make_pair('=', 0)).second, "decodeMap insert failed"); + return ans; +} + +std::string Base64::Encode(const uint8 *input, size_t length) { + // process input + std::string strEncoded; + uint32 temp; + size_t offset; + while (length > 0) { + temp = 0; + if (length >= 1) { + temp |= (input[0] << kBase64Shift16); + } + if (length >= 2) { + temp |= (input[1] << kBase64Shift8); + } + if (length >= 3) { + temp |= (input[2] << kBase64Shift0); + } + + // encoded code 0 + strEncoded.push_back(encodeMap[static_cast((temp >> kBase64Shift18) & kBase64MaskEncode)]); + // encoded code 1 + strEncoded.push_back(encodeMap[static_cast((temp >> kBase64Shift12) & kBase64MaskEncode)]); + + if (length < 2) { + strEncoded.push_back('='); + } else { + // encoded code 2 + strEncoded.push_back(encodeMap[static_cast((temp >> kBase64Shift6) & kBase64MaskEncode)]); + } + + if (length < kBase64EncodeBaseLen) { + strEncoded.push_back('='); + } else { + // encoded code 3 + strEncoded.push_back(encodeMap[static_cast((temp >> kBase64Shift0) & kBase64MaskEncode)]); + } + + offset = (length >= kBase64EncodeBaseLen) ? kBase64EncodeBaseLen : length; + length -= offset; + input += offset; + } + return strEncoded; +} + +size_t Base64::DecodeLength(const std::string &input) { + // length calculation + size_t length; + size_t inputLength = input.length(); + if (inputLength == 0) { + return 0; + } + CHECK_FATAL(inputLength % kBase64DecodeBaseLen == 0, "input.length must be factor of 4"); + length = inputLength * kBase64EncodeBaseLen / kBase64DecodeBaseLen; + if (input[inputLength - 1] == '=') { + length--; + } + if (input[inputLength - 2] == '=') { + length--; + } + return length; +} + +uint8 *Base64::Decode(const std::string &input, size_t &lengthRet) { + size_t inputLength = input.length(); + lengthRet = DecodeLength(input); + if (lengthRet == 0) { + return nullptr; + } + size_t idx = 0; + const char *inputBuf = input.c_str(); + uint8 *buf = static_cast(malloc(sizeof(uint8) * lengthRet)); + CHECK_FATAL(buf != nullptr, "malloc failed"); + uint32 temp; + while (inputLength > 0) { + auto it0 = decodeMap.find(inputBuf[0]); + auto it1 = decodeMap.find(inputBuf[1]); + auto it2 = decodeMap.find(inputBuf[2]); + auto it3 = decodeMap.find(inputBuf[3]); + if (it0 == decodeMap.end() || it1 == decodeMap.end() || it2 == decodeMap.end() || it3 == decodeMap.end()) { + CHECK_FATAL(false, "invalid input"); + } + temp = (it0->second << kBase64Shift18) | (it1->second << kBase64Shift12) | (it2->second << kBase64Shift6) | + (it3->second << kBase64Shift0); + uint8 c0 = (temp >> kBase64Shift16) & kBase64MaskDecode; + uint8 c1 = (temp >> kBase64Shift8) & kBase64MaskDecode; + uint8 c2 = (temp >> kBase64Shift0) & kBase64MaskDecode; + if (inputBuf[2] == '=') { + CHECK_FATAL(inputLength == kBase64DecodeBaseLen, "'=' must be in last package"); + CHECK_FATAL(inputBuf[3] == '=', "'=' must be in last package"); + buf[idx++] = c0; + } else if (inputBuf[3] == '=') { + CHECK_FATAL(inputLength == kBase64DecodeBaseLen, "'=' must be in last package"); + buf[idx++] = c0; + buf[idx++] = c1; + } else { + buf[idx++] = c0; + buf[idx++] = c1; + buf[idx++] = c2; + } + inputLength -= kBase64DecodeBaseLen; + inputBuf += kBase64DecodeBaseLen; + } + return buf; +} +} // namespace maple diff --git a/src/hir2mpl/common/src/basic_io.cpp b/src/hir2mpl/common/src/basic_io.cpp new file mode 100644 index 0000000000000000000000000000000000000000..178142ec79e7ac710d8b1d43c0d8d8f62e5bce90 --- /dev/null +++ b/src/hir2mpl/common/src/basic_io.cpp @@ -0,0 +1,331 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "basic_io.h" +#include +#include +#include +#include +#include +#include +#include "mpl_logging.h" +#include "file_utils.h" + +namespace maple { +BasicIOMapFile::BasicIOMapFile(const std::string &name) + : fd(-1), ptr(nullptr), ptrMemMap(nullptr), length(0), fileName(name) {} + +BasicIOMapFile::BasicIOMapFile(const std::string &name, const uint8 *ptrIn, long lengthIn) + : fd(-1), ptr(ptrIn), ptrMemMap(nullptr), length(lengthIn), fileName(name) {} + +BasicIOMapFile::~BasicIOMapFile() { + ptr = nullptr; + ptrMemMap = nullptr; +} + +bool BasicIOMapFile::OpenAndMapImpl() { + fd = -1; + std::string realPath = FileUtils::GetRealPath(fileName); + fd = open(realPath.c_str(), O_RDONLY); + if (fd < 0) { + ERR(kLncErr, "Unable to open %s.\nError %d in open()", fileName.c_str(), errno); + return false; + } + long start = lseek(fd, 0L, SEEK_SET); + long end = lseek(fd, 0L, SEEK_END); + if (end > start) { + length = static_cast(end - start); + ptrMemMap = static_cast(mmap(NULL, length, PROT_READ, MAP_FILE | MAP_PRIVATE, fd, start)); + ptr = ptrMemMap; + return true; + } else { + length = 0; + ptrMemMap = nullptr; + ptr = nullptr; + if (close(fd) != 0) { + FATAL(kLncFatal, "close error"); + } + return false; + } +} + +void BasicIOMapFile::Close() { + if (fd > 0) { + if (munmap(ptrMemMap, length) != 0) { + FATAL(kLncFatal, "munmap error"); + } + ptrMemMap = nullptr; + ptr = nullptr; + if (close(fd) != 0) { + FATAL(kLncFatal, "close error"); + } + } + fd = -1; +} + +std::unique_ptr BasicIOMapFile::GenFileInMemory(const std::string &name, const uint8 *buf, size_t len) { + std::unique_ptr file = std::make_unique(name); + file->ptr = buf; + file->length = len; + return file; +} + +BasicIORead::BasicIORead(BasicIOMapFile &f, bool bigEndian) : file(f), isBigEndian(bigEndian), pos(0) {} + +uint8 BasicIORead::ReadUInt8() { + const uint8 *p = GetSafeBuffer(BasicIOEndian::kLengthByte); + pos += BasicIOEndian::kLengthByte; + return p[0]; +} + +uint8 BasicIORead::ReadUInt8(bool &success) { + const uint8 *p = GetBuffer(BasicIOEndian::kLengthByte); + if (p == nullptr) { + success = false; + return 0; + } + pos += BasicIOEndian::kLengthByte; + success = true; + return p[0]; +} + +int8 BasicIORead::ReadInt8() { + return static_cast(ReadUInt8()); +} + +int8 BasicIORead::ReadInt8(bool &success) { + return static_cast(ReadUInt8(success)); +} + +char BasicIORead::ReadChar() { + return static_cast(ReadUInt8()); +} + +char BasicIORead::ReadChar(bool &success) { + return static_cast(ReadUInt8(success)); +} + +uint16 BasicIORead::ReadUInt16() { + const uint8 *p = GetSafeBuffer(BasicIOEndian::kLengthWord); + pos += BasicIOEndian::kLengthWord; + if (isBigEndian) { + return BasicIOEndian::GetUInt16BigEndian(p); + } else { + return BasicIOEndian::GetUInt16LittleEndian(p); + } +} + +uint16 BasicIORead::ReadUInt16(bool &success) { + const uint8 *p = GetBuffer(BasicIOEndian::kLengthWord); + if (p == nullptr) { + success = false; + return 0; + } + pos += BasicIOEndian::kLengthWord; + success = true; + if (isBigEndian) { + return BasicIOEndian::GetUInt16BigEndian(p); + } else { + return BasicIOEndian::GetUInt16LittleEndian(p); + } +} + +int16 BasicIORead::ReadInt16() { + return static_cast(ReadUInt16()); +} + +int16 BasicIORead::ReadInt16(bool &success) { + return static_cast(ReadUInt16(success)); +} + +uint32 BasicIORead::ReadUInt32() { + const uint8 *p = GetSafeBuffer(BasicIOEndian::kLengthDWord); + pos += BasicIOEndian::kLengthDWord; + if (isBigEndian) { + return BasicIOEndian::GetUInt32BigEndian(p); + } else { + return BasicIOEndian::GetUInt32LittleEndian(p); + } +} + +uint32 BasicIORead::ReadUInt32(bool &success) { + const uint8 *p = GetBuffer(BasicIOEndian::kLengthDWord); + if (p == nullptr) { + success = false; + return 0; + } + pos += BasicIOEndian::kLengthDWord; + success = true; + if (isBigEndian) { + return BasicIOEndian::GetUInt32BigEndian(p); + } else { + return BasicIOEndian::GetUInt32LittleEndian(p); + } +} + +int32 BasicIORead::ReadInt32() { + return static_cast(ReadUInt32()); +} + +int32 BasicIORead::ReadInt32(bool &success) { + return static_cast(ReadUInt32(success)); +} + +uint64 BasicIORead::ReadUInt64() { + const uint8 *p = GetSafeBuffer(BasicIOEndian::kLengthQWord); + pos += BasicIOEndian::kLengthQWord; + if (isBigEndian) { + return BasicIOEndian::GetUInt64BigEndian(p); + } else { + return BasicIOEndian::GetUInt64LittleEndian(p); + } +} + +uint64 BasicIORead::ReadUInt64(bool &success) { + const uint8 *p = GetBuffer(BasicIOEndian::kLengthQWord); + if (p == nullptr) { + success = false; + return 0; + } + pos += BasicIOEndian::kLengthQWord; + success = true; + if (isBigEndian) { + return BasicIOEndian::GetUInt64BigEndian(p); + } else { + return BasicIOEndian::GetUInt64LittleEndian(p); + } +} + +int64 BasicIORead::ReadInt64() { + return static_cast(ReadUInt64()); +} + +int64 BasicIORead::ReadInt64(bool &success) { + return static_cast(ReadUInt64(success)); +} + +float BasicIORead::ReadFloat() { + union { + uint32 iv; + float fv; + } v; + v.iv = ReadUInt32(); + return v.fv; +} + +float BasicIORead::ReadFloat(bool &success) { + union { + uint32 iv; + float fv; + } v; + v.iv = ReadUInt32(success); + return v.fv; +} + +double BasicIORead::ReadDouble() { + union { + uint64 lv; + double dv; + } v; + v.lv = ReadUInt64(); + return v.dv; +} + +double BasicIORead::ReadDouble(bool &success) { + union { + uint64 lv; + double dv; + } v; + v.lv = ReadUInt64(success); + return v.dv; +} + +void BasicIORead::ReadBufferUInt8(uint8 *dst, uint32 length) { + const uint8 *p = GetSafeBuffer(length); + pos += length; + errno_t err = memcpy_s(dst, length, p, length); + CHECK_FATAL(err == EOK, "memcpy_s failed"); +} + +void BasicIORead::ReadBufferUInt8(uint8 *dst, uint32 length, bool &success) { + const uint8 *p = GetBuffer(length); + if (p == nullptr) { + success = false; + return; + } + pos += length; + success = true; + errno_t err = memcpy_s(dst, length, p, length); + CHECK_FATAL(err == EOK, "memcpy_s failed"); +} + +void BasicIORead::ReadBufferInt8(int8 *dst, uint32 length) { + CHECK_NULL_FATAL(dst); + const uint8 *p = GetSafeBuffer(length); + pos += length; + errno_t err = memcpy_s(dst, length, p, length); + CHECK_FATAL(err == EOK, "memcpy_s failed"); +} + +void BasicIORead::ReadBufferInt8(int8 *dst, uint32 length, bool &success) { + CHECK_NULL_FATAL(dst); + const uint8 *p = GetBuffer(length); + if (p == nullptr) { + success = false; + return; + } + pos += length; + success = true; + errno_t err = memcpy_s(dst, length, p, length); + CHECK_FATAL(err == EOK, "memcpy_s failed"); +} + +void BasicIORead::ReadBufferChar(char *dst, uint32 length) { + const uint8 *p = GetSafeBuffer(length); + pos += length; + errno_t err = memcpy_s(dst, length, p, length); + CHECK_FATAL(err == EOK, "memcpy_s failed"); +} + +void BasicIORead::ReadBufferChar(char *dst, uint32 length, bool &success) { + const uint8 *p = GetBuffer(length); + if (p == nullptr) { + success = false; + return; + } + pos += length; + success = true; + errno_t err = memcpy_s(dst, length, p, length); + CHECK_FATAL(err == EOK, "memcpy_s failed"); +} + +std::string BasicIORead::ReadString(uint32 length) { + const void *p = GetSafeBuffer(length); + const char *pchar = static_cast(p); + pos += length; + return std::string(pchar, length); +} + +std::string BasicIORead::ReadString(uint32 length, bool &success) { + const void *p = GetBuffer(length); + const char *pchar = static_cast(p); + if (p == nullptr) { + success = false; + return ""; + } + pos += length; + success = true; + return std::string(pchar, length); +} +} // namespace maple diff --git a/src/hir2mpl/common/src/enhance_c_checker.cpp b/src/hir2mpl/common/src/enhance_c_checker.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eaadc191191588dad972b7143d831dc92bf471c3 --- /dev/null +++ b/src/hir2mpl/common/src/enhance_c_checker.cpp @@ -0,0 +1,2232 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "enhance_c_checker.h" +#include "ast_parser.h" +#include "ast_expr.h" +#include "ast_stmt.h" +#include "ast_decl_builder.h" +#include "feir_builder.h" +#include "fe_manager.h" +#include "fe_macros.h" + +namespace maple { +const std::string kBoundsBuiltFunc = "__builtin_dynamic_bounds_cast"; +void ASTParser::ProcessNonnullFuncPtrAttrs(MapleAllocator &allocator, const clang::ValueDecl &valueDecl, + ASTDecl &astVar) { + const MIRFuncType *funcType = FEUtils::GetFuncPtrType(*astVar.GetTypeDesc().front()); + if (funcType == nullptr) { + return; + } + std::vector attrsVec = funcType->GetParamAttrsList(); + TypeAttrs retAttr = funcType->GetRetAttrs(); + // nonnull with args in function type pointers need marking nonnull arg + for (const auto *nonNull : valueDecl.specific_attrs()) { + if (nonNull->args_size() == 0) { + continue; + } + for (const clang::ParamIdx ¶mIdx : nonNull->args()) { + // The clang ensures that nonnull attribute only applies to pointer parameter + unsigned int idx = paramIdx.getASTIndex(); + if (idx >= attrsVec.size()) { + continue; + } + attrsVec[idx].SetAttr(ATTR_nonnull); + } + } + if (valueDecl.hasAttr()) { + retAttr.SetAttr(ATTR_nonnull); + } + MIRType *newFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( + funcType->GetRetTyIdx(), funcType->GetParamTypeList(), attrsVec, funcType->IsVarargs(), retAttr); + astVar.SetTypeDesc(MapleVector({GlobalTables::GetTypeTable().GetOrCreatePointerType( + *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}, allocator.Adapter())); +} + +bool ENCChecker::HasNonnullAttrInExpr(MIRBuilder &mirBuilder, const UniqueFEIRExpr &expr, bool isNested) { + if (expr->GetKind() == kExprDRead) { + if (isNested) { // skip multi-dimensional pointer + return true; + } + FEIRExprDRead *dread = static_cast(expr.get()); + MIRSymbol *symbol = dread->GetVar()->GenerateMIRSymbol(mirBuilder); + if (expr->GetFieldID() == 0) { + return symbol->GetAttr(ATTR_nonnull); + } else { + FieldID fieldID = expr->GetFieldID(); + CHECK_FATAL(symbol->GetType()->IsStructType(), "basetype must be StructType"); + FieldPair fieldPair = static_cast(symbol->GetType())->TraverseToFieldRef(fieldID); + return fieldPair.second.second.GetAttr(FLDATTR_nonnull); + } + } else if (expr->GetKind() == kExprIRead) { + FEIRExprIRead *iread = static_cast(expr.get()); + FieldID fieldID = expr->GetFieldID(); + if (fieldID == 0) { + return HasNonnullAttrInExpr(mirBuilder, iread->GetClonedOpnd(), true); + } + MIRType *pointerType = iread->GetClonedPtrType()->GenerateMIRTypeAuto(); + CHECK_FATAL(pointerType->IsMIRPtrType(), "Must be ptr type!"); + MIRType *baseType = static_cast(pointerType)->GetPointedType(); + CHECK_FATAL(baseType->IsStructType(), "basetype must be StructType"); + FieldPair fieldPair = static_cast(baseType)->TraverseToFieldRef(fieldID); + return fieldPair.second.second.GetAttr(FLDATTR_nonnull); + } else if (isNested && expr->GetKind() == kExprAddrofArray) { + return false; + } else { + return true; + } +} + +bool ENCChecker::HasNullExpr(const UniqueFEIRExpr &expr) { + if (expr == nullptr || expr->GetKind() != kExprUnary || expr->GetPrimType() != PTY_ptr) { + return false; + } + const UniqueFEIRExpr &cstExpr = static_cast(expr.get())->GetOpnd(); + return FEIRBuilder::IsZeroConstExpr(cstExpr); +} + +void ENCChecker::CheckNonnullGlobalVarInit(const MIRSymbol &sym, const MIRConst *cst) { + if (!FEOptions::GetInstance().IsNpeCheckDynamic()) { + return; + } + if (sym.GetAttr(ATTR_nonnull)) { + if ((cst != nullptr && cst->IsZero()) || (cst == nullptr && sym.GetAttr(ATTR_static_init_zero))) { + FE_ERR(kLncErr, FEUtils::GetSrcLocationForMIRSymbol(sym), "null assignment of nonnull pointer"); + } else if (cst == nullptr) { + FE_ERR(kLncErr, FEUtils::GetSrcLocationForMIRSymbol(sym), "nonnull parameter is uninitialized when defined"); + } + } else if (sym.GetAttr(ATTR_static_init_zero) && HasNonnullFieldInStruct(*sym.GetType())) { + auto *structType = static_cast(sym.GetType()); + for (size_t i = 0; i < structType->GetFieldsSize(); ++i) { + if (!structType->GetFieldsElemt(i).second.second.GetAttr(FLDATTR_nonnull)) { + continue; + } + FE_ERR(kLncErr, FEUtils::GetSrcLocationForMIRSymbol(sym), + "null assignment of nonnull field pointer. [field name: %s]", + GlobalTables::GetStrTable().GetStringFromStrIdx(structType->GetFieldsElemt(i).first).c_str()); + } + } +} + +void ENCChecker::CheckNullFieldInGlobalStruct(MIRType &type, MIRAggConst &cst, const MapleVector &initExprs) { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || !ENCChecker::HasNonnullFieldInStruct(type)) { + return; + } + auto &structType = static_cast(type); + for (size_t i = 0; i < structType.GetFieldsSize(); ++i) { + if (!structType.GetFieldsElemt(i).second.second.GetAttr(FLDATTR_nonnull)) { + continue; + } + size_t idx = structType.GetKind() == kTypeUnion ? 0 : i; // union only is one element + if (idx < cst.GetConstVec().size() && cst.GetConstVecItem(idx) != nullptr && cst.GetConstVecItem(idx)->IsZero()) { + FE_ERR(kLncErr, initExprs[idx]->GetSrcLoc(), "null assignment of nonnull field pointer. [field name: %s]", + GlobalTables::GetStrTable().GetStringFromStrIdx(structType.GetFieldsElemt(i).first).c_str()); + } + } +} + +void ENCChecker::CheckNonnullLocalVarInit(const MIRSymbol &sym, const ASTExpr *initExpr) { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || initExpr != nullptr) { + return; + } + if (sym.GetAttr(ATTR_nonnull)) { + if (sym.GetAttr(ATTR_static_init_zero)) { + FE_ERR(kLncErr, FEUtils::GetSrcLocationForMIRSymbol(sym), "null assignment of nonnull pointer"); + } else { + FE_ERR(kLncErr, FEUtils::GetSrcLocationForMIRSymbol(sym), + "error: nonnull parameter is uninitialized when defined"); + } + } else if (sym.GetAttr(ATTR_static_init_zero) && HasNonnullFieldInStruct(*sym.GetType())) { + auto *structType = static_cast(sym.GetType()); + for (size_t i = 0; i < structType->GetFieldsSize(); ++i) { + if (structType->GetFieldsElemt(i).second.second.GetAttr(FLDATTR_nonnull)) { + FE_ERR(kLncErr, FEUtils::GetSrcLocationForMIRSymbol(sym), + "null assignment of nonnull field pointer. [field name: %s]", + GlobalTables::GetStrTable().GetStringFromStrIdx(structType->GetFieldsElemt(i).first).c_str());} + } + } +} + +void ENCChecker::CheckNonnullLocalVarInit(const MIRSymbol &sym, const UniqueFEIRExpr &initFEExpr, + std::list &stmts) { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || !sym.GetAttr(ATTR_nonnull)) { + return; + } + if (HasNullExpr(initFEExpr)) { + FE_ERR(kLncErr, FEUtils::GetSrcLocationForMIRSymbol(sym), "null assignment of nonnull pointer"); + return; + } + if (initFEExpr->GetPrimType() == PTY_ptr) { + UniqueFEIRStmt stmt = std::make_unique(OP_assignassertnonnull, initFEExpr->Clone()); + stmt->SetSrcLoc(FEUtils::GetSrcLocationForMIRSymbol(sym)); + stmts.emplace_back(std::move(stmt)); + } +} + +std::string ENCChecker::GetNthStr(size_t index) { + switch (index) { + case 0: + return "1st"; + case 1: + return "2nd"; + case 2: + return "3rd"; + default: { + std::ostringstream oss; + oss << index + 1 << "th"; + return oss.str(); + } + } +} + +std::string ENCChecker::PrintParamIdx(const std::list &idxs) { + std::ostringstream os; + for (auto iter = idxs.begin(); iter != idxs.end(); ++iter) { + if (iter != idxs.begin()) { + os << ", "; + } + os << GetNthStr(*iter); + } + return os.str(); +} + +void ENCChecker::CheckNonnullArgsAndRetForFuncPtr(const MIRType &dstType, const UniqueFEIRExpr &srcExpr, + const Loc &loc) { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || !srcExpr->IsEnhancedChecking()) { + return; + } + const MIRFuncType *funcType = FEUtils::GetFuncPtrType(dstType); + if (funcType == nullptr) { + return; + } + if (srcExpr->GetKind() == kExprAddrofFunc) { // check func ptr l-value and &func decl r-value + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + static_cast(srcExpr.get())->GetFuncAddr()); + MIRFunction *srcFunc = FEManager::GetTypeManager().GetMIRFunction(strIdx, false); + CHECK_FATAL(srcFunc != nullptr, "can not get MIRFunction"); + std::list errIdxs; + for (size_t i = 0; i < srcFunc->GetParamSize() && i < funcType->GetParamAttrsList().size(); ++i) { + if (srcFunc->GetNthParamAttr(i).GetAttr(ATTR_nonnull) != funcType->GetNthParamAttrs(i).GetAttr(ATTR_nonnull)) { + errIdxs.emplace_back(i); + } + } + if (!errIdxs.empty()) { + FE_ERR(kLncErr, loc, "function pointer and assigned function %s are mismatched " + "for the %s argument of nonnull attributes", srcFunc->GetName().c_str(), PrintParamIdx(errIdxs).c_str()); + } + if (srcFunc->GetFuncAttrs().GetAttr(FUNCATTR_nonnull) != funcType->GetRetAttrs().GetAttr(ATTR_nonnull)) { + FE_ERR(kLncErr, loc, "function pointer and target function's nonnull attributes are mismatched for " + "the return value"); + } + } + const MIRFuncType *srcFuncType = FEUtils::GetFuncPtrType(*srcExpr->GetType()->GenerateMIRTypeAuto()); + if (srcFuncType != nullptr) { // check func ptr l-value and func ptr r-value + std::list errIdxs; + for (size_t i = 0; i < srcFuncType->GetParamAttrsList().size() && i < funcType->GetParamAttrsList().size(); ++i) { + if (srcFuncType->GetNthParamAttrs(i).GetAttr(ATTR_nonnull) != + funcType->GetNthParamAttrs(i).GetAttr(ATTR_nonnull)) { + errIdxs.emplace_back(i); + } + } + if (!errIdxs.empty()) { + FE_ERR(kLncErr, loc, "function pointer's nonnull attributes are mismatched for the %s argument", + PrintParamIdx(errIdxs).c_str()); + } + if (srcFuncType->GetRetAttrs().GetAttr(ATTR_nonnull) != funcType->GetRetAttrs().GetAttr(ATTR_nonnull)) { + FE_ERR(kLncErr, loc, "function pointer's nonnull attributes are mismatched for the return value"); + } + } +} + +void FEIRStmtDAssign::CheckNonnullArgsAndRetForFuncPtr(const MIRBuilder &mirBuilder) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || ENCChecker::IsUnsafeRegion(mirBuilder)) { + return; + } + MIRType *baseType = var->GetType()->GenerateMIRTypeAuto(); + if (fieldID != 0) { + baseType = FEUtils::GetStructFieldType(static_cast(baseType), fieldID); + } + ENCChecker::CheckNonnullArgsAndRetForFuncPtr(*baseType, expr, loc); +} + +void FEIRStmtIAssign::CheckNonnullArgsAndRetForFuncPtr(const MIRBuilder &mirBuilder, const MIRType &baseType) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || ENCChecker::IsUnsafeRegion(mirBuilder)) { + return; + } + MIRType *fieldType = FEUtils::GetStructFieldType(static_cast(&baseType), fieldID); + ENCChecker::CheckNonnullArgsAndRetForFuncPtr(*fieldType, baseExpr, loc); +} + +bool ENCChecker::HasNonnullFieldInStruct(const MIRType &mirType) { + if (mirType.IsStructType() && FEManager::GetTypeManager().IsOwnedNonnullFieldStructSet(mirType.GetTypeIndex())) { + return true; + } + return false; +} + +bool ENCChecker::HasNonnullFieldInPtrStruct(const MIRType &mirType) { + if (mirType.IsMIRPtrType()) { + MIRType *structType = static_cast(mirType).GetPointedType(); + if (structType != nullptr && HasNonnullFieldInStruct(*structType)) { + return true; + } + } + return false; +} + +void ASTCallExpr::CheckNonnullFieldInStruct() const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic()) { + return; + } + std::list nullStmts; + UniqueFEIRExpr baseExpr = nullptr; + if (GetFuncName() == "bzero" && args.size() == 2) { + baseExpr = args[0]->Emit2FEExpr(nullStmts); + } else if (GetFuncName() == "memset" && args.size() == 3 && + FEIRBuilder::IsZeroConstExpr(args[1]->Emit2FEExpr(nullStmts))) { + baseExpr = args[0]->Emit2FEExpr(nullStmts); + } + if (baseExpr == nullptr) { + return; + } + MIRType *mirType = ENCChecker::GetTypeFromAddrExpr(baseExpr); // check addrof or iaddrof + if (mirType != nullptr) { + mirType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType); + } else if ((baseExpr->GetKind() == kExprDRead || baseExpr->GetKind() == kExprIRead) && + baseExpr->GetType() != nullptr) { + mirType = baseExpr->GetType()->GenerateMIRTypeAuto(); + } + if (mirType != nullptr && ENCChecker::HasNonnullFieldInPtrStruct(*mirType)) { + FE_ERR(kLncErr, loc, "null assignment of nonnull structure field pointer in %s", GetFuncName().c_str()); + } +} + +void ENCChecker::CheckNonnullFieldInStruct(const MIRType &src, const MIRType &dst, const Loc &loc) { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || + !dst.IsMIRPtrType() || !src.IsMIRPtrType() || + dst.GetTypeIndex() == src.GetTypeIndex()) { + return; + } + if (ENCChecker::HasNonnullFieldInPtrStruct(dst)) { + FE_ERR(kLncErr, loc, "null assignment risk of nonnull field pointer"); + } +} + +// --------------------------- +// process boundary attr +// --------------------------- +void ASTParser::ProcessBoundaryFuncAttrs(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + ASTFunc &astFunc) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + return; + } + for (const auto *returnsCountAttr : funcDecl.specific_attrs()) { + clang::Expr *expr = returnsCountAttr->getLenExpr(); + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + continue; + } + ProcessBoundaryLenExprInFunc(allocator, funcDecl, static_cast(-1), astFunc, lenExpr, true); + } + for (const auto *countAttr : funcDecl.specific_attrs()) { + clang::Expr *expr = countAttr->getLenExpr(); + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + continue; + } + if (countAttr->index_size() == 0) { + // Lack of attribute index parameters means that only one pointer parameter is + // implicitly marked as boundary var in func. + for (unsigned int i = 0; i < astFunc.GetParamDecls().size(); ++i) { + if (astFunc.GetParamDecls()[i]->GetTypeDesc().front()->IsMIRPtrType()) { + ProcessBoundaryLenExprInFunc(allocator, funcDecl, i, astFunc, lenExpr, true); + } + } + break; + } + for (const clang::ParamIdx ¶mIdx : countAttr->index()) { + // The clang ensures that the attribute only applies to pointer parameter + unsigned int idx = paramIdx.getASTIndex(); + if (idx >= astFunc.GetParamDecls().size()) { + continue; + } + ProcessBoundaryLenExprInFunc(allocator, funcDecl, idx, astFunc, lenExpr, true); + } + } + ProcessByteBoundaryFuncAttrs(allocator, funcDecl, astFunc); + ProcessBoundaryFuncAttrsByIndex(allocator, funcDecl, astFunc); +} + +void ASTParser::ProcessByteBoundaryFuncAttrs(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + ASTFunc &astFunc) { + for (const auto *returnsCountAttr : funcDecl.specific_attrs()) { + clang::Expr *expr = returnsCountAttr->getLenExpr(); + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + continue; + } + ProcessBoundaryLenExprInFunc(allocator, funcDecl, static_cast(-1), astFunc, lenExpr, false); + } + for (const auto *countAttr : funcDecl.specific_attrs()) { + clang::Expr *expr = countAttr->getLenExpr(); + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + continue; + } + if (countAttr->index_size() == 0) { + // Lack of attribute index parameters means that only one pointer parameter is + // implicitly marked as boundary var in func. + for (unsigned int i = 0; i < astFunc.GetParamDecls().size(); ++i) { + if (astFunc.GetParamDecls()[i]->GetTypeDesc().front()->IsMIRPtrType()) { + ProcessBoundaryLenExprInFunc(allocator, funcDecl, i, astFunc, lenExpr, false); + } + } + break; + } + for (const clang::ParamIdx ¶mIdx : countAttr->index()) { + // The clang ensures that the attribute only applies to pointer parameter + unsigned int idx = paramIdx.getASTIndex(); + if (idx >= astFunc.GetParamDecls().size()) { + continue; + } + ProcessBoundaryLenExprInFunc(allocator, funcDecl, idx, astFunc, lenExpr, false); + } + } +} + +void ASTParser::ProcessBoundaryFuncAttrsByIndex(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + ASTFunc &astFunc) { + for (const auto *countIndexAttr : funcDecl.specific_attrs()) { + unsigned int lenIdx = countIndexAttr->getLenVarIndex().getASTIndex(); + for (const clang::ParamIdx ¶mIdx : countIndexAttr->index()) { + unsigned int idx = paramIdx.getASTIndex(); + if (idx >= astFunc.GetParamDecls().size()) { + continue; + } + ProcessBoundaryLenExprInFunc(allocator, funcDecl, idx, astFunc, lenIdx, true); + } + } + for (const auto *returnsCountIndexAttr : funcDecl.specific_attrs()) { + unsigned int retLenIdx = returnsCountIndexAttr->getLenVarIndex().getASTIndex(); + ProcessBoundaryLenExprInFunc(allocator, funcDecl, static_cast(-1), astFunc, retLenIdx, true); + } + for (const auto *byteCountIndexAttr : funcDecl.specific_attrs()) { + unsigned int lenIdx = byteCountIndexAttr->getLenVarIndex().getASTIndex(); + for (const clang::ParamIdx ¶mIdx : byteCountIndexAttr->index()) { + unsigned int idx = paramIdx.getASTIndex(); + if (idx >= astFunc.GetParamDecls().size()) { + continue; + } + ProcessBoundaryLenExprInFunc(allocator, funcDecl, idx, astFunc, lenIdx, false); + } + } + for (const auto *returnsByteCountIndexAttr : funcDecl.specific_attrs()) { + unsigned int retLenIdx = returnsByteCountIndexAttr->getLenVarIndex().getASTIndex(); + ProcessBoundaryLenExprInFunc(allocator, funcDecl, static_cast(-1), astFunc, retLenIdx, false); + } +} + +void ASTParser::ProcessBoundaryParamAttrs(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + ASTFunc &astFunc) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + return; + } + for (unsigned int i = 0; i < funcDecl.getNumParams(); ++i) { + const clang::ParmVarDecl *parmDecl = funcDecl.getParamDecl(i); + if (parmDecl->getKind() == clang::Decl::Function) { + continue; + } + for (const auto *countAttr : parmDecl->specific_attrs()) { + clang::Expr *expr = countAttr->getLenExpr(); + if (countAttr->index_size() > 0) { + continue; // boundary attrs with index args are only marked function pointers + } + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + continue; + } + ProcessBoundaryLenExprInFunc(allocator, funcDecl, i, astFunc, lenExpr, true); + } + for (const auto *byteCountAttr : parmDecl->specific_attrs()) { + clang::Expr *expr = byteCountAttr->getLenExpr(); + if (byteCountAttr->index_size() > 0) { + continue; // boundary attrs with index args are only marked function pointers + } + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + continue; + } + ProcessBoundaryLenExprInFunc(allocator, funcDecl, i, astFunc, lenExpr, false); + } + } + ProcessBoundaryParamAttrsByIndex(allocator, funcDecl, astFunc); +} + +void ASTParser::ProcessBoundaryParamAttrsByIndex(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + ASTFunc &astFunc) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + return; + } + for (unsigned int i = 0; i < funcDecl.getNumParams(); ++i) { + const clang::ParmVarDecl *parmDecl = funcDecl.getParamDecl(i); + for (const auto *countIndexAttr : parmDecl->specific_attrs()) { + if (countIndexAttr->index_size() > 0) { + continue; // boundary attrs with index args are only marked function pointers + } + unsigned int lenIdx = countIndexAttr->getLenVarIndex().getASTIndex(); + ProcessBoundaryLenExprInFunc(allocator, funcDecl, i, astFunc, lenIdx, true); + } + for (const auto *byteCountIndexAttr : parmDecl->specific_attrs()) { + if (byteCountIndexAttr->index_size() > 0) { + continue; // boundary attrs with index args are only marked function pointers + } + unsigned int lenIdx = byteCountIndexAttr->getLenVarIndex().getASTIndex(); + ProcessBoundaryLenExprInFunc(allocator, funcDecl, i, astFunc, lenIdx, false); + } + } +} + +void ASTParser::ProcessBoundaryVarAttrs(MapleAllocator &allocator, const clang::VarDecl &varDecl, ASTVar &astVar) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + return; + } + for (const auto *countAttr : varDecl.specific_attrs()) { + clang::Expr *expr = countAttr->getLenExpr(); + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + continue; + } + if (countAttr->index_size() > 0) { + continue; // boundary attrs with index args are only marked function pointers + } + ProcessBoundaryLenExprInVar(allocator, astVar, varDecl, lenExpr, true); + } + for (const auto *byteCountAttr : varDecl.specific_attrs()) { + clang::Expr *expr = byteCountAttr->getLenExpr(); + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + continue; + } + if (byteCountAttr->index_size() > 0) { + continue; // boundary attrs with index args are only marked function pointers + } + ProcessBoundaryLenExprInVar(allocator, astVar, varDecl, lenExpr, false); + } +} + +void ASTParser::ProcessBoundaryFuncPtrAttrs(MapleAllocator &allocator, const clang::ValueDecl &valueDecl, + ASTDecl &astDecl) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + return; + } + const MIRFuncType *funcType = FEUtils::GetFuncPtrType(*astDecl.GetTypeDesc().front()); + if (funcType == nullptr || !valueDecl.getType()->isFunctionPointerType()) { + return; + } + clang::QualType qualType = valueDecl.getType()->getPointeeType(); + const clang::FunctionType *clangFuncType = qualType->getAs(); + if (clangFuncType == nullptr) { + return; + } + const clang::FunctionProtoType *proto = llvm::dyn_cast(clangFuncType); + if (proto == nullptr) { + return; + } + std::vector attrsVec = funcType->GetParamAttrsList(); + TypeAttrs retAttr = funcType->GetRetAttrs(); + bool isUpdated = false; + for (const auto *countAttr : valueDecl.specific_attrs()) { + if (ProcessBoundaryFuncPtrAttrsForParams(countAttr, allocator, *funcType, *proto, attrsVec)) { + isUpdated = true; + } + } + for (const auto *byteCountAttr : valueDecl.specific_attrs()) { + if (ProcessBoundaryFuncPtrAttrsForParams(byteCountAttr, allocator, *funcType, *proto, attrsVec)) { + isUpdated = true; + } + } + for (const auto *returnsCountAttr : valueDecl.specific_attrs()) { + if (ProcessBoundaryFuncPtrAttrsForRet(returnsCountAttr, allocator, *funcType, *clangFuncType, retAttr)) { + isUpdated = true; + } + } + for (const auto *returnsByteCountAttr : valueDecl.specific_attrs()) { + if (ProcessBoundaryFuncPtrAttrsForRet(returnsByteCountAttr, allocator, *funcType, *clangFuncType, retAttr)) { + isUpdated = true; + } + } + if (isUpdated) { + MIRType *newFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( + funcType->GetRetTyIdx(), funcType->GetParamTypeList(), attrsVec, funcType->IsVarargs(), retAttr); + astDecl.SetTypeDesc(MapleVector({GlobalTables::GetTypeTable().GetOrCreatePointerType( + *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}, allocator.Adapter())); + } + ProcessBoundaryFuncPtrAttrsByIndex(allocator, valueDecl, astDecl, *funcType); +} + +template +bool ASTParser::ProcessBoundaryFuncPtrAttrsForParams(T *attr, MapleAllocator &allocator, const MIRFuncType &funcType, + const clang::FunctionProtoType &proto, + std::vector &attrsVec) { + bool isUpdated = false; + clang::Expr *expr = attr->getLenExpr(); + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (attr->index_size() == 0 || lenExpr == nullptr) { + return isUpdated; + } + std::vector typesVec = funcType.GetParamTypeList(); + for (const clang::ParamIdx ¶mIdx : attr->index()) { + unsigned int idx = paramIdx.getASTIndex(); + if (idx >= attrsVec.size() || idx >= typesVec.size()) { + continue; + } + MIRType *ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typesVec[idx]); + ASTVar *tmpDecl = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("", allocator.GetMemPool()), "tmpVar", + MapleVector({ptrType}, allocator.Adapter()), GenericAttrs()); + bool isByte = std::is_same::type, clang::ByteCountAttr>::value; + ProcessBoundaryLenExprInVar(allocator, *tmpDecl, proto.getParamType(idx), lenExpr, !isByte); + ENCChecker::InsertBoundaryInAtts(attrsVec[idx], tmpDecl->GetBoundaryInfo()); + isUpdated = true; + } + return isUpdated; +} + +template +bool ASTParser::ProcessBoundaryFuncPtrAttrsForRet(T *attr, MapleAllocator &allocator, const MIRFuncType &funcType, + const clang::FunctionType &clangFuncType, TypeAttrs &retAttr) { + clang::Expr *expr = attr->getLenExpr(); + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + return false; + } + MIRType *ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType.GetRetTyIdx()); + ASTVar *tmpRetDecl = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("", allocator.GetMemPool()), + "tmpRetVar", MapleVector({ptrType}, allocator.Adapter()), GenericAttrs()); + bool isByte = std::is_same::type, clang::ReturnsByteCountAttr>::value; + ProcessBoundaryLenExprInVar(allocator, *tmpRetDecl, clangFuncType.getReturnType(), lenExpr, !isByte); + ENCChecker::InsertBoundaryInAtts(retAttr, tmpRetDecl->GetBoundaryInfo()); + return true; +} + +void ASTParser::ProcessBoundaryFuncPtrAttrsByIndex(MapleAllocator &allocator, const clang::ValueDecl &valueDecl, + ASTDecl &astDecl, const MIRFuncType &funcType) { + std::vector attrsVec = funcType.GetParamAttrsList(); + TypeAttrs retAttr = funcType.GetRetAttrs(); + bool isUpdated = false; + for (const auto *countAttr : valueDecl.specific_attrs()) { + if (ProcessBoundaryFuncPtrAttrsByIndexForParams(countAttr, astDecl, funcType, attrsVec)) { + isUpdated = true; + } + } + for (const auto *byteCountAttr : valueDecl.specific_attrs()) { + if (ProcessBoundaryFuncPtrAttrsByIndexForParams(byteCountAttr, astDecl, funcType, attrsVec)) { + isUpdated = true; + } + } + for (const auto *returnsCountIndexAttr : valueDecl.specific_attrs()) { + unsigned int lenIdx = returnsCountIndexAttr->getLenVarIndex().getASTIndex(); + retAttr.GetAttrBoundary().SetLenParamIdx(static_cast(lenIdx)); + isUpdated = true; + } + for (const auto *returnsByteCountIndexAttr : valueDecl.specific_attrs()) { + unsigned int lenIdx = returnsByteCountIndexAttr->getLenVarIndex().getASTIndex(); + retAttr.GetAttrBoundary().SetLenParamIdx(static_cast(lenIdx)); + retAttr.GetAttrBoundary().SetIsBytedLen(true); + isUpdated = true; + } + if (isUpdated) { + MIRType *newFuncType = GlobalTables::GetTypeTable().GetOrCreateFunctionType( + funcType.GetRetTyIdx(), funcType.GetParamTypeList(), attrsVec, funcType.IsVarargs(), retAttr); + astDecl.SetTypeDesc(MapleVector({GlobalTables::GetTypeTable().GetOrCreatePointerType( + *GlobalTables::GetTypeTable().GetOrCreatePointerType(*newFuncType))}, allocator.Adapter())); + } +} + +template +bool ASTParser::ProcessBoundaryFuncPtrAttrsByIndexForParams(T *attr, ASTDecl &astDecl, const MIRFuncType &funcType, + std::vector &attrsVec) const { + bool isUpdated = false; + std::vector typesVec = funcType.GetParamTypeList(); + unsigned int lenIdx = attr->getLenVarIndex().getASTIndex(); + if (attr->index_size() == 0 || lenIdx >= typesVec.size()) { + return isUpdated; + } + MIRType *lenType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typesVec[lenIdx]); + if (lenType == nullptr || !FEUtils::IsInteger(lenType->GetPrimType())) { + FE_ERR(kLncErr, astDecl.GetSrcLoc(), "The boundary length var by the %s argument is not an integer type", + ENCChecker::GetNthStr(lenIdx).c_str()); + return isUpdated; + } + for (const clang::ParamIdx ¶mIdx : attr->index()) { + unsigned int idx = paramIdx.getASTIndex(); + if (idx >= attrsVec.size() || idx >= typesVec.size()) { + continue; + } + attrsVec[idx].GetAttrBoundary().SetLenParamIdx(static_cast(lenIdx)); + if (std::is_same::type, clang::ByteCountIndexAttr>::value) { + attrsVec[idx].GetAttrBoundary().SetIsBytedLen(true); + } + isUpdated = true; + } + return isUpdated; +} + +void ASTParser::ProcessBoundaryFieldAttrs(MapleAllocator &allocator, const ASTStruct &structDecl, + const clang::RecordDecl &recDecl) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + return; + } + const auto *declContext = llvm::dyn_cast(&recDecl); + if (declContext == nullptr) { + return; + } + for (auto *loadDecl : declContext->decls()) { + if (loadDecl == nullptr) { + continue; + } + auto *fieldDecl = llvm::dyn_cast(loadDecl); + if (fieldDecl == nullptr) { + continue; + } + ASTDecl *astField = ASTDeclsBuilder::GetASTDecl(fieldDecl->getID()); + if (astField == nullptr) { + continue; + } + for (const auto *countAttr : fieldDecl->specific_attrs()) { + clang::Expr *expr = countAttr->getLenExpr(); + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + continue; + } + if (countAttr->index_size() == 0) { + ProcessBoundaryLenExprInField(allocator, *astField, structDecl, fieldDecl->getType(), lenExpr, true); + } + } + for (const auto *byteCountAttr : fieldDecl->specific_attrs()) { + clang::Expr *expr = byteCountAttr->getLenExpr(); + ASTExpr *lenExpr = ProcessExpr(allocator, expr); + if (lenExpr == nullptr) { + continue; + } + if (byteCountAttr->index_size() == 0) { + ProcessBoundaryLenExprInField(allocator, *astField, structDecl, fieldDecl->getType(), lenExpr, false); + } + } + } +} + +// --------------------------- +// process boundary length expr in attr +// --------------------------- +void ASTParser::ProcessBoundaryLenExpr(MapleAllocator &allocator, ASTDecl &ptrDecl, const clang::QualType &qualType, + const std::function &getLenExprFromStringLiteral, + ASTExpr *lenExpr, bool isSize) { + if (!qualType->isPointerType()) { + FE_ERR(kLncErr, lenExpr->GetSrcLoc(), "The variable modified by the boundary attribute should be a pointer type"); + return; + } + // Check lenExpr kind from: length stringLiteral or constant value/var expression + if (lenExpr->GetASTOp() == kASTStringLiteral) { + // boundary length stringLiteral -> real length decl expr + lenExpr = getLenExprFromStringLiteral(); + if (lenExpr == nullptr) { + return; + } + } else if (lenExpr->GetType() == nullptr || !FEUtils::IsInteger(lenExpr->GetType()->GetPrimType())) { + FE_ERR(kLncErr, lenExpr->GetSrcLoc(), "The boundary length expr is not an integer type"); + return; + } + if (isSize) { + // The type size can only be obtained from ClangDecl instead of ASTDecl, + // because the field of mir struct type has not yet been initialized at this time + uint32 lenSize = GetSizeFromQualType(qualType->getPointeeType()); + MIRType *pointedType = static_cast(ptrDecl.GetTypeDesc().front())->GetPointedType(); + if (pointedType->GetPrimType() == PTY_f64) { + lenSize = 8; // 8 is f64 byte num, because now f128 also cvt to f64 + } + lenExpr = GetAddrShiftExpr(allocator, *lenExpr, lenSize); + } + ptrDecl.SetBoundaryLenExpr(lenExpr); + ptrDecl.SetIsBytedLen(!isSize); +} + +void ENCChecker::CheckLenExpr(const ASTExpr &lenExpr, const std::list &nullstmts) { + for (const auto &stmt : nullstmts) { + bool isAssertStmt = false; + if (stmt->GetKind() == kStmtNary) { + FEIRStmtNary *nary = static_cast(stmt.get()); + if (kOpcodeInfo.IsAssertBoundary(nary->GetOP()) || kOpcodeInfo.IsAssertNonnull(nary->GetOP())) { + isAssertStmt = true; + } + } + if (!isAssertStmt) { + FE_ERR(kLncErr, lenExpr.GetSrcLoc(), "The boundary length expr containing statement is invalid"); + break; + } + } +} + +void ASTParser::ProcessBoundaryLenExprInFunc(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + unsigned int idx, ASTFunc &astFunc, ASTExpr *lenExpr, bool isSize) { + ASTDecl *ptrDecl = nullptr; + clang::QualType qualType; + if (idx == static_cast(-1)) { // return boundary attr + ptrDecl = &astFunc; + qualType = funcDecl.getReturnType(); + } else if (idx < astFunc.GetParamDecls().size()) { + ptrDecl = astFunc.GetParamDecls()[idx]; + qualType = funcDecl.getParamDecl(idx)->getType(); + } else { + FE_ERR(kLncErr, lenExpr->GetSrcLoc(), "The parameter annotated boundary attr [the %s argument] is not found" + "in the function [%s]", ENCChecker::GetNthStr(idx).c_str(), astFunc.GetName().c_str()); + return; + } + // parameter stringLiteral -> real parameter decl + auto getLenExprFromStringLiteral = [&allocator, &astFunc, lenExpr, ptrDecl]() -> ASTExpr* { + ASTStringLiteral *strExpr = static_cast(lenExpr); + std::string lenName(strExpr->GetCodeUnits().begin(), strExpr->GetCodeUnits().end()); + for (size_t i = 0; i < astFunc.GetParamDecls().size(); ++i) { + if (astFunc.GetParamDecls()[i]->GetName() != lenName) { + continue; + } + MIRType *lenType = astFunc.GetParamDecls()[i]->GetTypeDesc().front(); + if (lenType == nullptr || !FEUtils::IsInteger(lenType->GetPrimType())) { + FE_ERR(kLncErr, lenExpr->GetSrcLoc(), "The parameter [%s] specified as boundary length var is not an integer " + "type in the function [%s]", lenName.c_str(), astFunc.GetName().c_str()); + return nullptr; + } + ASTDeclRefExpr *lenRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + lenRefExpr->SetASTDecl(astFunc.GetParamDecls()[i]); + lenRefExpr->SetType(astFunc.GetParamDecls()[i]->GetTypeDesc().front()); + ptrDecl->SetBoundaryLenParamIdx(static_cast(i)); + return lenRefExpr; + } + FE_ERR(kLncErr, lenExpr->GetSrcLoc(), "The parameter [%s] specified as boundary length var is not found " + "in the function [%s]", lenName.c_str(), astFunc.GetName().c_str()); + return nullptr; + }; + ProcessBoundaryLenExpr(allocator, *ptrDecl, qualType, getLenExprFromStringLiteral, lenExpr, isSize); +} + +void ASTParser::ProcessBoundaryLenExprInFunc(MapleAllocator &allocator, const clang::FunctionDecl &funcDecl, + unsigned int idx, ASTFunc &astFunc, unsigned int lenIdx, bool isSize) { + if (lenIdx > astFunc.GetParamDecls().size()) { + FE_ERR(kLncErr, astFunc.GetSrcLoc(), "The %s parameter specified as boundary length var is not found " + "in the function [%s]", ENCChecker::GetNthStr(lenIdx).c_str(), astFunc.GetName().c_str()); + return; + } + ASTDecl *lenDecl = astFunc.GetParamDecls()[lenIdx]; + MIRType *lenType = lenDecl->GetTypeDesc().front(); + if (lenType == nullptr || !FEUtils::IsInteger(lenType->GetPrimType())) { + FE_ERR(kLncErr, astFunc.GetSrcLoc(), "The %s parameter specified as boundary length var is not an integer type " + "in the function [%s]", ENCChecker::GetNthStr(lenIdx).c_str(), astFunc.GetName().c_str()); + return; + } + ASTDeclRefExpr *lenRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + lenRefExpr->SetASTDecl(lenDecl); + lenRefExpr->SetType(lenType); + + ASTDecl *ptrDecl = nullptr; + if (idx == static_cast(-1)) { // return boundary attr + ptrDecl = &astFunc; + } else if (idx < astFunc.GetParamDecls().size()) { + ptrDecl = astFunc.GetParamDecls()[idx]; + } else { + FE_ERR(kLncErr, astFunc.GetSrcLoc(), "The %s parameter annotated boundary attr is not found in the function [%s]", + ENCChecker::GetNthStr(idx).c_str(), astFunc.GetName().c_str()); + return; + } + ptrDecl->SetBoundaryLenParamIdx(static_cast(lenIdx)); + ProcessBoundaryLenExprInFunc(allocator, funcDecl, idx, astFunc, lenRefExpr, isSize); +} + +void ASTParser::ProcessBoundaryLenExprInVar(MapleAllocator &allocator, ASTDecl &ptrDecl, + const clang::VarDecl &varDecl, ASTExpr *lenExpr, bool isSize) { + if (!varDecl.isLocalVarDeclOrParm()) { + ASTDecl *lenDecl = lenExpr->GetASTDecl(); + if (lenDecl != nullptr && FEUtils::IsInteger(lenDecl->GetTypeDesc().front()->GetPrimType())) { + lenDecl->SetAttr(GENATTR_final_boundary_size); + } + } + ProcessBoundaryLenExprInVar(allocator, ptrDecl, varDecl.getType(), lenExpr, isSize); +} + +void ASTParser::ProcessBoundaryLenExprInVar(MapleAllocator &allocator, ASTDecl &ptrDecl, + const clang::QualType &qualType, ASTExpr *lenExpr, bool isSize) { + // The StringLiteral is not allowed to use as boundary length of var + auto getLenExprFromStringLiteral = [lenExpr]() -> ASTExpr* { + FE_ERR(kLncErr, lenExpr->GetSrcLoc(), "The StringLiteral is not allowed to use as boundary length of var"); + return nullptr; + }; + ProcessBoundaryLenExpr(allocator, ptrDecl, qualType, getLenExprFromStringLiteral, lenExpr, isSize); +} + +void ASTParser::ProcessBoundaryLenExprInField(MapleAllocator &allocator, ASTDecl &ptrDecl, const ASTStruct &structDecl, + const clang::QualType &qualType, ASTExpr *lenExpr, bool isSize) { + // boundary length stringLiteral in field -> real field decl + auto getLenExprFromStringLiteral = [&allocator, &structDecl, lenExpr]() -> ASTExpr* { + ASTStringLiteral *strExpr = static_cast(lenExpr); + std::string lenName(strExpr->GetCodeUnits().begin(), strExpr->GetCodeUnits().end()); + for (ASTField *fieldDecl: structDecl.GetFields()) { + if (lenName != fieldDecl->GetName()) { + continue; + } + MIRType *lenType = fieldDecl->GetTypeDesc().front(); + if (lenType == nullptr || !FEUtils::IsInteger(lenType->GetPrimType())) { + FE_ERR(kLncErr, lenExpr->GetSrcLoc(), "The field [%s] specified as boundary length var is not an integer type " + "in the struct [%s]", lenName.c_str(), structDecl.GetName().c_str()); + return nullptr; + } + fieldDecl->SetAttr(GENATTR_final_boundary_size); + ASTDeclRefExpr *lenRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + lenRefExpr->SetASTDecl(fieldDecl); + lenRefExpr->SetType(fieldDecl->GetTypeDesc().front()); + return lenRefExpr; + } + FE_ERR(kLncErr, lenExpr->GetSrcLoc(), "The StringLiteral [%s] as boundary length var is not found " + "in the struct [%s]", lenName.c_str(), structDecl.GetName().c_str()); + return nullptr; + }; + ProcessBoundaryLenExpr(allocator, ptrDecl, qualType, getLenExprFromStringLiteral, lenExpr, isSize); +} + +// --------------------------- +// process boundary var +// --------------------------- +UniqueFEIRExpr ENCChecker::FindBaseExprInPointerOperation(const UniqueFEIRExpr &expr, bool isIncludingAddrof) { + if (expr == nullptr) { + return nullptr; + } + UniqueFEIRExpr baseExpr = nullptr; + if (expr->GetKind() == kExprBinary) { + FEIRExprBinary *binExpr = static_cast(expr.get()); + if (binExpr->IsComparative()) { + return nullptr; + } + baseExpr = FindBaseExprInPointerOperation(binExpr->GetOpnd0()); + if (baseExpr != nullptr) { + return baseExpr; + } + baseExpr = FindBaseExprInPointerOperation(binExpr->GetOpnd1()); + if (baseExpr != nullptr) { + return baseExpr; + } + } + if (expr->GetKind() == kExprUnary) { + FEIRExprUnary *cvtExpr = static_cast(expr.get()); + baseExpr = FindBaseExprInPointerOperation(cvtExpr->GetOpnd()); + } else if (expr->GetKind() == kExprAddrofArray) { + FEIRExprAddrofArray *arrExpr = static_cast(expr.get()); + baseExpr = FindBaseExprInPointerOperation(arrExpr->GetExprArray()); + } else if ((expr->GetKind() == kExprDRead && expr->GetPrimType() == PTY_ptr) || + (expr->GetKind() == kExprIRead && expr->GetPrimType() == PTY_ptr && expr->GetFieldID() != 0) || + GetArrayTypeFromExpr(expr) != nullptr) { + baseExpr = expr->Clone(); + } else if (isIncludingAddrof && GetTypeFromAddrExpr(expr) != nullptr) { // addrof as 1-sized array + baseExpr = expr->Clone(); + } + return baseExpr; +} + +MIRType *ENCChecker::GetTypeFromAddrExpr(const UniqueFEIRExpr &expr) { + if (expr->GetKind() == kExprAddrofVar) { + MIRType *type = expr->GetVarUses().front()->GetType()->GenerateMIRTypeAuto(); + if (expr->GetFieldID() == 0) { + return type; + } else { + CHECK_FATAL(type->IsStructType(), "basetype must be StructType"); + FieldID fieldID = expr->GetFieldID(); + FieldPair fieldPair = static_cast(type)->TraverseToFieldRef(fieldID); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + } + } else if (expr->GetKind() == kExprIAddrof) { + FEIRExprIAddrof *iaddrof = static_cast(expr.get()); + MIRType *pointerType = iaddrof->GetClonedPtrType()->GenerateMIRTypeAuto(); + CHECK_FATAL(pointerType->IsMIRPtrType(), "Must be ptr type!"); + MIRType *baseType = static_cast(pointerType)->GetPointedType(); + CHECK_FATAL(baseType->IsStructType(), "basetype must be StructType"); + FieldID fieldID = iaddrof->GetFieldID(); + FieldPair fieldPair = static_cast(baseType)->TraverseToFieldRef(fieldID); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + } else { + return nullptr; + } +} + +MIRType *ENCChecker::GetArrayTypeFromExpr(const UniqueFEIRExpr &expr) { + MIRType *arrType = GetTypeFromAddrExpr(expr); + if (arrType != nullptr && arrType->GetKind() == kTypeArray && + !static_cast(arrType)->GetTypeAttrs().GetAttr(ATTR_incomplete_array)) { + return arrType; + } + if (expr->GetKind() == kExprAddrof) { // local char* value size + auto *constArr = static_cast(expr.get()); + return GlobalTables::GetTypeTable().GetOrCreateArrayType( + *constArr->GetElemType(), constArr->GetStringLiteralSize() + 1); // including the end character with string + } else if (expr->GetKind() == kExprDRead || expr->GetKind() == kExprIRead) { // global const char* value size + MIRType *type = expr->GetType()->GenerateMIRTypeAuto(); + if (type->IsMIRPtrType() && static_cast(type)->GetPointedType()->GetPrimType() == PTY_u8) { + MIRConst *cst = GetMIRConstFromExpr(expr); + if (cst != nullptr && cst->GetKind() == kConstStrConst) { + size_t size = GlobalTables::GetUStrTable().GetStringFromStrIdx( + static_cast(cst)->GetValue()).size() + 1; // including the end character with string + return GlobalTables::GetTypeTable().GetOrCreateArrayType( + *GlobalTables::GetTypeTable().GetUInt8(), static_cast(size)); + } + } + return nullptr; + } + return nullptr; +} + +MIRConst *ENCChecker::GetMIRConstFromExpr(const UniqueFEIRExpr &expr) { + if (expr == nullptr || expr->GetVarUses().size() != 1) { + return nullptr; + } + MIRSymbol *sym = expr->GetVarUses().front()->GenerateMIRSymbol(FEManager::GetMIRBuilder()); + if (!sym->IsGlobal() || !sym->GetAttr(ATTR_const) || sym->GetKonst() == nullptr) { // const global + return nullptr; + } + if (expr->GetKind() == kExprDRead || expr->GetKind() == kExprAddrofVar) { + if (expr->GetFieldID() == 0) { + return sym->GetKonst(); + } else if (expr->GetFieldID() != 0 && sym->GetKonst()->GetKind() == kConstAggConst) { + MIRAggConst *aggConst = static_cast(sym->GetKonst()); + FieldID tmpID = expr->GetFieldID(); + MIRConst *cst = FEUtils::TraverseToMIRConst(aggConst, *static_cast(sym->GetType()), tmpID); + return cst; + } + return nullptr; + } else if (expr->GetKind() == kExprIRead) { + auto *iread = static_cast(expr.get()); + MIRConst *cst = GetMIRConstFromExpr(iread->GetClonedOpnd()); + if (expr->GetFieldID() != 0 && cst != nullptr && sym->GetKonst()->GetKind() == kConstAggConst) { + MIRType *pointType = static_cast(iread->GetClonedPtrType()->GenerateMIRTypeAuto())->GetPointedType(); + FieldID tmpID = expr->GetFieldID(); + cst = FEUtils::TraverseToMIRConst(static_cast(cst), *static_cast(pointType), tmpID); + } + return cst; + } else if (expr->GetKind() == kExprAddrofArray) { + FEIRExprAddrofArray *arrExpr = static_cast(expr.get()); + MIRConst *cst = GetMIRConstFromExpr(arrExpr->GetExprArray()); + if (cst == nullptr || cst->GetKind() != kConstAggConst) { + return nullptr; + } + for (const auto &idxExpr : arrExpr->GetExprIndexs()) { + MIRAggConst *aggConst = static_cast(cst); + if (idxExpr->GetKind() != kExprConst) { + return nullptr; + } + uint64 idx = static_cast(idxExpr.get())->GetValue().u64; + if (idx >= aggConst->GetConstVec().size()) { + return nullptr; + } + cst = aggConst->GetConstVecItem(idx); + } + return cst; + } + return nullptr; +} + +void ENCChecker::AssignBoundaryVar(MIRBuilder &mirBuilder, const UniqueFEIRExpr &dstExpr, const UniqueFEIRExpr &srcExpr, + const UniqueFEIRExpr &lRealLenExpr, std::list &ans) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || + srcExpr->GetPrimType() != PTY_ptr || dstExpr->GetPrimType() != PTY_ptr) { + return; + } + if (lRealLenExpr == nullptr && IsGlobalVarInExpr(dstExpr)) { + return; // skip boundary assignment for global var whithout boundary attr + } + // Avoid inserting redundant boundary vars + const std::string prefix = "_boundary."; + if (dstExpr->GetKind() == kExprDRead && + static_cast(dstExpr.get())->GetVar()->GetNameRaw().compare(0, prefix.size(), prefix) == 0) { + return; + } + MIRFunction *curFunction = mirBuilder.GetCurrentFunctionNotNull(); + FEFunction &curFEFunction = FEManager::GetCurrentFEFunction(); + UniqueFEIRExpr baseExpr = FindBaseExprInPointerOperation(srcExpr, true); + if (baseExpr == nullptr) { + return; + } + if (dstExpr->Hash() == baseExpr->Hash()) { // skip self-assignment, e.g. p++; + return; + } + // Check if the l-value has a boundary var + std::pair lBoundaryVarStIdx = std::make_pair(StIdx(0), StIdx(0)); + auto lIt = curFEFunction.GetBoundaryMap().find(dstExpr->Hash()); + if (lIt != curFEFunction.GetBoundaryMap().end()) { // The boundary var exists on the l-value + lBoundaryVarStIdx = lIt->second; + } else if (lRealLenExpr != nullptr) { // init boundary in func body if a field/global var l-value with boundary attr + std::list stmts; + lBoundaryVarStIdx = InitBoundaryVar(*curFunction, dstExpr, lRealLenExpr->Clone(), stmts); + for (const auto &stmt : stmts) { + std::list stmtNodes = stmt->GenMIRStmts(mirBuilder); + for (auto stmtNode : stmtNodes) { + curFunction->GetBody()->InsertFirst(stmtNode); + } + } + } + // Check if the r-value has a boundary var or an array or a global var/field with boundary attr + std::pair rBoundaryVarStIdx = std::make_pair(StIdx(0), StIdx(0)); + MIRType *arrType = nullptr; + UniqueFEIRExpr rRealLenExpr = nullptr; + auto rIt = curFEFunction.GetBoundaryMap().find(baseExpr->Hash()); + if (rIt != curFEFunction.GetBoundaryMap().end()) { // Assgin when the boundary var exists on the r-value + rBoundaryVarStIdx = rIt->second; + } else { + arrType = GetArrayTypeFromExpr(baseExpr); + if (arrType == nullptr) { + rRealLenExpr = GetGlobalOrFieldLenExprInExpr(mirBuilder, baseExpr); + } + } + // insert L-value bounary and assign boundary var + // when r-value with a boundary var, or with a sized array, a global var/field r-value with boundary attr + bool isSizedArray = (arrType != nullptr && arrType->GetSize() > 0); + if (lBoundaryVarStIdx.first == StIdx(0) && + (rBoundaryVarStIdx.first != StIdx(0) || isSizedArray || rRealLenExpr != nullptr)) { + lBoundaryVarStIdx = ENCChecker::InsertBoundaryVar(mirBuilder, dstExpr); + } + if (lBoundaryVarStIdx.first != StIdx(0)) { + MIRSymbol *lLowerSym = curFunction->GetLocalOrGlobalSymbol(lBoundaryVarStIdx.first); + CHECK_NULL_FATAL(lLowerSym); + MIRSymbol *lUpperSym = curFunction->GetLocalOrGlobalSymbol(lBoundaryVarStIdx.second); + CHECK_NULL_FATAL(lUpperSym); + StmtNode *lowerStmt = nullptr; + StmtNode *upperStmt = nullptr; + if (rBoundaryVarStIdx.first != StIdx(0)) { + MIRSymbol *rLowerSym = curFunction->GetLocalOrGlobalSymbol(rBoundaryVarStIdx.first); + CHECK_NULL_FATAL(rLowerSym); + lowerStmt = mirBuilder.CreateStmtDassign(*lLowerSym, 0, mirBuilder.CreateExprDread(*rLowerSym)); + + MIRSymbol *rUpperSym = curFunction->GetLocalOrGlobalSymbol(rBoundaryVarStIdx.second); + CHECK_NULL_FATAL(rUpperSym); + upperStmt = mirBuilder.CreateStmtDassign(*lUpperSym, 0, mirBuilder.CreateExprDread(*rUpperSym)); + } else if (isSizedArray) { + lowerStmt = mirBuilder.CreateStmtDassign(*lLowerSym, 0, baseExpr->GenMIRNode(mirBuilder)); + UniqueFEIRExpr binExpr = FEIRBuilder::CreateExprBinary( + OP_add, baseExpr->Clone(), std::make_unique(arrType->GetSize(), PTY_ptr)); + upperStmt = mirBuilder.CreateStmtDassign(*lUpperSym, 0, binExpr->GenMIRNode(mirBuilder)); + } else if (rRealLenExpr != nullptr) { + lowerStmt = mirBuilder.CreateStmtDassign(*lLowerSym, 0, baseExpr->GenMIRNode(mirBuilder)); + UniqueFEIRExpr binExpr = FEIRBuilder::CreateExprBinary(OP_add, baseExpr->Clone(), rRealLenExpr->Clone()); + upperStmt = mirBuilder.CreateStmtDassign(*lUpperSym, 0, binExpr->GenMIRNode(mirBuilder)); + } else { + MIRType *addrofType = GetTypeFromAddrExpr(baseExpr); + if (addrofType != nullptr) { // addrof as 1-sized array, if l-value with boundary and r-value is addrof non-array + lowerStmt = mirBuilder.CreateStmtDassign(*lLowerSym, 0, baseExpr->GenMIRNode(mirBuilder)); + UniqueFEIRExpr binExpr = FEIRBuilder::CreateExprBinary( + OP_add, baseExpr->Clone(), std::make_unique(addrofType->GetSize(), PTY_ptr)); + upperStmt = mirBuilder.CreateStmtDassign(*lUpperSym, 0, binExpr->GenMIRNode(mirBuilder)); + } else { + // Insert a undef boundary r-value + // when there is a l-value boundary var and r-value without a boundary var + lowerStmt = mirBuilder.CreateStmtDassign(*lLowerSym, 0, baseExpr->GenMIRNode(mirBuilder)); + UniqueFEIRExpr undef = std::make_unique(kUndefValue, PTY_ptr); + upperStmt = mirBuilder.CreateStmtDassign(*lUpperSym, 0, undef->GenMIRNode(mirBuilder)); + } + } + if (lowerStmt == nullptr || upperStmt == nullptr) { + return; + } + if (lRealLenExpr != nullptr) { // use l-vaule own boundary(use r-base + offset) when l-value has a boundary attr + BaseNode *binExpr = mirBuilder.CreateExprBinary( + OP_add, *lLowerSym->GetType(), srcExpr->GenMIRNode(mirBuilder), lRealLenExpr->GenMIRNode(mirBuilder)); + upperStmt = mirBuilder.CreateStmtDassign(*lUpperSym, 0, binExpr); + } + ans.emplace_back(lowerStmt); + ans.emplace_back(upperStmt); + } +} + +bool ENCChecker::IsGlobalVarInExpr(const UniqueFEIRExpr &expr) { + bool isGlobal = false; + auto vars = expr->GetVarUses(); + if (!vars.empty() && vars.front() != nullptr) { + isGlobal = vars.front()->IsGlobal(); + } + return isGlobal; +} + +std::pair ENCChecker::InsertBoundaryVar(MIRBuilder &mirBuilder, const UniqueFEIRExpr &expr) { + std::pair boundaryVarStIdx = std::make_pair(StIdx(0), StIdx(0)); + std::string boundaryName = GetBoundaryName(expr); + if (boundaryName.empty()) { + return boundaryVarStIdx; + } + MIRType *boundaryType = expr->GetType()->GenerateMIRTypeAuto(); + MIRSymbol *lowerSrcSym = mirBuilder.GetOrCreateLocalDecl(boundaryName + ".lower", *boundaryType); + MIRSymbol *upperSrcSym = mirBuilder.GetOrCreateLocalDecl(boundaryName + ".upper", *boundaryType); + // assign undef val to boundary var in func body head + AssignUndefVal(mirBuilder, *upperSrcSym); + AssignUndefVal(mirBuilder, *lowerSrcSym); + // update BoundaryMap + boundaryVarStIdx = std::make_pair(lowerSrcSym->GetStIdx(), upperSrcSym->GetStIdx()); + FEManager::GetCurrentFEFunction().SetBoundaryMap(expr->Hash(), boundaryVarStIdx); + return boundaryVarStIdx; +} + +void ENCChecker::InsertBoundaryVar(const ASTDecl &ptrDecl, std::list &stmts) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || + ptrDecl.GetBoundaryLenExpr() == nullptr) { + return; + } + // GetCurrentFunction need to be optimized when parallel features + MIRFunction *curFunction = FEManager::GetMIRBuilder().GetCurrentFunctionNotNull(); + UniqueFEIRExpr lenFEExpr = ptrDecl.GetBoundaryLenExpr()->Emit2FEExpr(stmts); + ENCChecker::InitBoundaryVar(*curFunction, ptrDecl, std::move(lenFEExpr), stmts); +} + +std::string ENCChecker::GetBoundaryName(const UniqueFEIRExpr &expr) { + std::string boundaryName; + if (expr == nullptr) { + return boundaryName; + } + if (expr->GetKind() == kExprDRead) { + const FEIRExprDRead *dread = static_cast(expr.get()); + // Naming format for var boundary: _boundary.[varname]_[fieldID].[exprHash].lower/upper + boundaryName = "_boundary." + dread->GetVar()->GetNameRaw() + "." + std::to_string(expr->GetFieldID()) + "." + + std::to_string(expr->Hash()); + } else if (expr->GetKind() == kExprIRead) { + const FEIRExprIRead *iread = static_cast(expr.get()); + MIRType *pointerType = iread->GetClonedPtrType()->GenerateMIRTypeAuto(); + CHECK_FATAL(pointerType->IsMIRPtrType(), "Must be ptr type!"); + MIRType *structType = static_cast(pointerType)->GetPointedType(); + std::string structName = GlobalTables::GetStrTable().GetStringFromStrIdx(structType->GetNameStrIdx()); + FieldID fieldID = iread->GetFieldID(); + FieldPair rFieldPair = static_cast(structType)->TraverseToFieldRef(fieldID); + std::string fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(rFieldPair.first); + // Naming format for field var boundary: _boundary.[sturctname]_[fieldname].[exprHash].lower/upper + boundaryName = "_boundary." + structName + "_" + fieldName + "." + std::to_string(iread->Hash()); + } + return boundaryName; +} + +void ENCChecker::AssignUndefVal(MIRBuilder &mirBuilder, MIRSymbol &sym) { + if (sym.IsGlobal()) { + MIRIntConst *cst = FEManager::GetModule().GetMemPool()->New( + kUndefValue, *GlobalTables::GetTypeTable().GetPrimType(PTY_ptr)); + sym.SetKonst(cst); + } else { + BaseNode *undef = mirBuilder.CreateIntConst(kUndefValue, PTY_ptr); + StmtNode *assign = mirBuilder.CreateStmtDassign(sym, 0, undef); + MIRFunction *curFunction = mirBuilder.GetCurrentFunctionNotNull(); + curFunction->GetBody()->InsertFirst(assign); + } +} + +void ENCChecker::InitBoundaryVarFromASTDecl(MapleAllocator &allocator, ASTDecl *ptrDecl, ASTExpr *lenExpr, + std::list &stmts) { + MIRType *ptrType = ptrDecl->GetTypeDesc().front(); + // insert lower boundary stmt + ASTDeclRefExpr *lowerRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + lowerRefExpr->SetASTDecl(ptrDecl); + std::string lowerVarName = "_boundary." + ptrDecl->GetName() + ".lower"; + ASTVar *lowerDecl = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("", allocator.GetMemPool()), + lowerVarName, MapleVector({ptrType}, allocator.Adapter()), GenericAttrs()); + lowerDecl->SetIsParam(true); + lowerDecl->SetInitExpr(lowerRefExpr); + ASTDeclStmt *lowerStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + lowerStmt->SetSubDecl(lowerDecl); + // insert upper boundary stmt + ASTDeclRefExpr *upperRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + upperRefExpr->SetASTDecl(ptrDecl); + ASTBinaryOperatorExpr *upperBinExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + upperBinExpr->SetLeftExpr(upperRefExpr); + upperBinExpr->SetRightExpr(lenExpr); + upperBinExpr->SetOpcode(OP_add); + upperBinExpr->SetRetType(ptrType); + upperBinExpr->SetCvtNeeded(true); + std::string upperVarName = "_boundary." + ptrDecl->GetName() + ".upper"; + ASTVar *upperDecl = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("", allocator.GetMemPool()), + upperVarName, MapleVector({ptrType}, allocator.Adapter()), GenericAttrs()); + upperDecl->SetIsParam(true); + upperDecl->SetInitExpr(upperBinExpr); + ASTDeclStmt *upperStmt = ASTDeclsBuilder::ASTStmtBuilder(allocator); + upperStmt->SetSubDecl(upperDecl); + stmts.emplace_back(lowerStmt); + stmts.emplace_back(upperStmt); +} + +void ENCChecker::InitBoundaryVar(MIRFunction &curFunction, const ASTDecl &ptrDecl, + UniqueFEIRExpr lenExpr, std::list &stmts) { + std::string ptrName = ptrDecl.GenerateUniqueVarName(); + MIRType *ptrType = ptrDecl.GetTypeDesc().front(); + UniqueFEIRExpr ptrExpr = FEIRBuilder::CreateExprDRead(FEIRBuilder::CreateVarNameForC(ptrName, *ptrType)); + // insert lower boundary stmt + std::string lowerVarName = "_boundary." + ptrName + ".lower"; + UniqueFEIRVar lowerVar = FEIRBuilder::CreateVarNameForC(lowerVarName, *ptrType); + UniqueFEIRStmt lowerStmt = FEIRBuilder::CreateStmtDAssign(std::move(lowerVar), ptrExpr->Clone()); + // insert upper boundary stmt + UniqueFEIRExpr binExpr = FEIRBuilder::CreateExprBinary(OP_add, ptrExpr->Clone(), std::move(lenExpr)); + std::string upperVarName = "_boundary." + ptrName + ".upper"; + UniqueFEIRVar upperVar = FEIRBuilder::CreateVarNameForC(upperVarName, *ptrType); + UniqueFEIRStmt upperStmt = FEIRBuilder::CreateStmtDAssign(std::move(upperVar), std::move(binExpr)); + if (ptrDecl.GetSrcFileLineNum() != 0) { + Loc loc = {ptrDecl.GetSrcFileIdx(), ptrDecl.GetSrcFileLineNum(), ptrDecl.GetSrcFileColumn()}; + lowerStmt->SetSrcLoc(loc); + upperStmt->SetSrcLoc(loc); + } + stmts.emplace_back(std::move(lowerStmt)); + stmts.emplace_back(std::move(upperStmt)); + // update BoundaryMap + MIRSymbol *lowerSym = nullptr; + MIRSymbol *upperSym = nullptr; + if (ptrDecl.IsGlobal()) { + lowerSym = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl(lowerVarName, *ptrType); + upperSym = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl(upperVarName, *ptrType); + } else { + lowerSym = FEManager::GetMIRBuilder().GetOrCreateDeclInFunc(lowerVarName, *ptrType, curFunction); + upperSym = FEManager::GetMIRBuilder().GetOrCreateDeclInFunc(upperVarName, *ptrType, curFunction); + } + FEManager::GetCurrentFEFunction().SetBoundaryMap( + ptrExpr->Hash(), std::make_pair(lowerSym->GetStIdx(), upperSym->GetStIdx())); +} + +void ENCChecker::InitBoundaryVar(MIRFunction &curFunction, const std::string &ptrName, MIRType &ptrType, + UniqueFEIRExpr lenExpr, std::list &stmts) { + UniqueFEIRExpr ptrExpr = FEIRBuilder::CreateExprDRead(FEIRBuilder::CreateVarNameForC(ptrName, ptrType)); + // insert lower boundary stmt + std::string lowerVarName = "_boundary." + ptrName + ".lower"; + UniqueFEIRVar lowerVar = FEIRBuilder::CreateVarNameForC(lowerVarName, ptrType); + UniqueFEIRStmt lowerStmt = FEIRBuilder::CreateStmtDAssign(std::move(lowerVar), ptrExpr->Clone()); + stmts.emplace_back(std::move(lowerStmt)); + // insert upper boundary stmt + UniqueFEIRExpr binExpr = FEIRBuilder::CreateExprBinary(OP_add, ptrExpr->Clone(), std::move(lenExpr)); + std::string upperVarName = "_boundary." + ptrName + ".upper"; + UniqueFEIRVar upperVar = FEIRBuilder::CreateVarNameForC(upperVarName, ptrType); + UniqueFEIRStmt upperStmt = FEIRBuilder::CreateStmtDAssign(std::move(upperVar), std::move(binExpr)); + stmts.emplace_back(std::move(upperStmt)); + // update BoundaryMap + MIRSymbol *lowerSym = FEManager::GetMIRBuilder().GetOrCreateDeclInFunc(lowerVarName, ptrType, curFunction); + MIRSymbol *upperSym = FEManager::GetMIRBuilder().GetOrCreateDeclInFunc(upperVarName, ptrType, curFunction); + FEManager::GetCurrentFEFunction().SetBoundaryMap( + ptrExpr->Hash(), std::make_pair(lowerSym->GetStIdx(), upperSym->GetStIdx())); +} + +std::pair ENCChecker::InitBoundaryVar(MIRFunction &curFunction, const UniqueFEIRExpr &ptrExpr, + UniqueFEIRExpr lenExpr, std::list &stmts) { + std::string ptrName = GetBoundaryName(ptrExpr); + if (ptrName.empty()) { + return std::make_pair(StIdx(0), StIdx(0)); + } + MIRType *ptrType = ptrExpr->GetType()->GenerateMIRTypeAuto(); + // insert lower boundary stmt + std::string lowerVarName = ptrName + ".lower"; + UniqueFEIRVar lowerVar = FEIRBuilder::CreateVarNameForC(lowerVarName, *ptrType); + UniqueFEIRStmt lowerStmt = FEIRBuilder::CreateStmtDAssign(std::move(lowerVar), ptrExpr->Clone()); + stmts.emplace_back(std::move(lowerStmt)); + // insert upper boundary stmt + UniqueFEIRExpr binExpr = FEIRBuilder::CreateExprBinary(OP_add, ptrExpr->Clone(), std::move(lenExpr)); + std::string upperVarName = ptrName + ".upper"; + UniqueFEIRVar upperVar = FEIRBuilder::CreateVarNameForC(upperVarName, *ptrType); + UniqueFEIRStmt upperStmt = FEIRBuilder::CreateStmtDAssign(std::move(upperVar), std::move(binExpr)); + stmts.emplace_back(std::move(upperStmt)); + // update BoundaryMap + MIRSymbol *lowerSym = FEManager::GetMIRBuilder().GetOrCreateDeclInFunc(lowerVarName, *ptrType, curFunction); + MIRSymbol *upperSym = FEManager::GetMIRBuilder().GetOrCreateDeclInFunc(upperVarName, *ptrType, curFunction); + auto stIdxs = std::make_pair(lowerSym->GetStIdx(), upperSym->GetStIdx()); + FEManager::GetCurrentFEFunction().SetBoundaryMap(ptrExpr->Hash(), stIdxs); + return stIdxs; +} + + +UniqueFEIRExpr ENCChecker::GetGlobalOrFieldLenExprInExpr(MIRBuilder &mirBuilder, const UniqueFEIRExpr &expr) { + UniqueFEIRExpr lenExpr = nullptr; + if (expr->GetKind() == kExprDRead && expr->GetFieldID() == 0) { + FEIRVar *var = expr->GetVarUses().front(); + MIRSymbol *symbol = var->GenerateMIRSymbol(mirBuilder); + if (!symbol->IsGlobal()) { + return nullptr; + } + // Get the boundary attr(i.e. boundary length expr cache) + lenExpr = GetBoundaryLenExprCache(symbol->GetAttrs()); + } else if ((expr->GetKind() == kExprDRead || expr->GetKind() == kExprIRead) && expr->GetFieldID() != 0) { + MIRStructType *structType = nullptr; + if (expr->GetKind() == kExprDRead) { + structType = static_cast(expr->GetVarUses().front()->GetType()->GenerateMIRTypeAuto()); + } else { + FEIRExprIRead *iread = static_cast(expr.get()); + MIRType *baseType = static_cast(iread->GetClonedPtrType()->GenerateMIRTypeAuto())->GetPointedType(); + structType = static_cast(baseType); + } + FieldID tmpID = expr->GetFieldID(); + FieldPair fieldPair = structType->TraverseToFieldRef(tmpID); + // Get the boundary attr(i.e. boundary length expr cache) of field + lenExpr = GetBoundaryLenExprCache(fieldPair.second.second); + if (lenExpr == nullptr) { + return nullptr; + } + UniqueFEIRExpr realExpr = ENCChecker::GetRealBoundaryLenExprInField( + lenExpr->Clone(), *structType, expr); // lenExpr needs to be cloned + if (realExpr != nullptr) { + lenExpr = std::move(realExpr); + } + } else { + return nullptr; + } + return lenExpr; +} + +bool ENCChecker::IsConstantIndex(const UniqueFEIRExpr &expr) { + if (expr->GetKind() != kExprAddrofArray) { + return false; + } + FEIRExprAddrofArray *arrExpr = static_cast(expr.get()); + // nested array + if (arrExpr->GetExprArray()->GetKind() == kExprAddrofArray) { + if (!IsConstantIndex(arrExpr->GetExprArray()->Clone())) { + return false; + } + } + for (const auto &idxExpr : arrExpr->GetExprIndexs()) { + if (idxExpr->GetKind() != kExprConst) { + return false; + } + } + return true; +} + +void ENCChecker::PeelNestedBoundaryChecking(std::list &stmts, const UniqueFEIRExpr &baseExpr) { + std::list::iterator i = stmts.begin(); + while (i != stmts.end()) { + bool flag = ((*i)->GetKind() == kStmtNary); + if (flag) { + FEIRStmtNary *nary = static_cast((*i).get()); + flag = kOpcodeInfo.IsAssertBoundary(nary->GetOP()) && + nary->GetArgExprs().back()->Hash() == baseExpr->Hash(); + } + if (flag) { + i = stmts.erase(i); + } else { + ++i; + } + } +} + +UniqueFEIRExpr ENCChecker::GetRealBoundaryLenExprInFuncByIndex(const TypeAttrs &typeAttrs, const MIRType &type, + const ASTCallExpr &astCallExpr) { + int8 idx = typeAttrs.GetAttrBoundary().GetLenParamIdx(); + if (idx >= 0) { + ASTExpr *astLenExpr = astCallExpr.GetArgsExpr()[idx]; + if (!typeAttrs.GetAttrBoundary().IsBytedLen()) { + CHECK_FATAL(type.IsMIRPtrType(), "Must be ptr type!"); + size_t lenSize = static_cast(type).GetPointedType()->GetSize(); + MapleAllocator &allocator = FEManager::GetModule().GetMPAllocator(); + astLenExpr = ASTParser::GetAddrShiftExpr(allocator, *(astCallExpr.GetArgsExpr()[idx]), + static_cast(lenSize)); + } + std::list nullStmts; + return astLenExpr->Emit2FEExpr(nullStmts); + } else if (typeAttrs.GetAttrBoundary().GetLenExprHash() != 0) { + return ENCChecker::GetBoundaryLenExprCache(typeAttrs); + } + return nullptr; +} + +UniqueFEIRExpr ENCChecker::GetRealBoundaryLenExprInFunc(const UniqueFEIRExpr &lenExpr, const ASTFunc &astFunc, + const ASTCallExpr &astCallExpr) { + if (lenExpr == nullptr) { + return nullptr; + } + if (lenExpr->GetKind() == kExprBinary) { + FEIRExprBinary *mulExpr = static_cast(lenExpr.get()); + UniqueFEIRExpr leftExpr = GetRealBoundaryLenExprInFunc(mulExpr->GetOpnd0(), astFunc, astCallExpr); + if (leftExpr != nullptr) { + mulExpr->SetOpnd0(std::move(leftExpr)); + } else { + return nullptr; + } + UniqueFEIRExpr rightExpr = GetRealBoundaryLenExprInFunc(mulExpr->GetOpnd1(), astFunc, astCallExpr); + if (rightExpr != nullptr) { + mulExpr->SetOpnd1(std::move(rightExpr)); + } else { + return nullptr; + } + } + if (lenExpr->GetKind() == kExprUnary) { + FEIRExprUnary *cvtExpr = static_cast(lenExpr.get()); + UniqueFEIRExpr subExpr = GetRealBoundaryLenExprInFunc(cvtExpr->GetOpnd(), astFunc, astCallExpr); + if (subExpr != nullptr) { + cvtExpr->SetOpnd(std::move(subExpr)); + } else { + return nullptr; + } + } + if (lenExpr->GetKind() == kExprIRead) { + FEIRExprIRead *ireadExpr = static_cast(lenExpr.get()); + UniqueFEIRExpr subExpr = GetRealBoundaryLenExprInFunc(ireadExpr->GetClonedOpnd(), astFunc, astCallExpr); + if (subExpr != nullptr) { + ireadExpr->SetClonedOpnd(std::move(subExpr)); + } else { + return nullptr; + } + } + // formal parameter length expr -> actual parameter expr + std::list nullStmts; + if (lenExpr->GetKind() == kExprDRead) { + std::string lenName = lenExpr->GetVarUses().front()->GetNameRaw(); + for (size_t i = 0; i < astFunc.GetParamDecls().size(); ++i) { + if (lenName == astFunc.GetParamDecls()[i]->GetName()) { + return astCallExpr.GetArgsExpr()[i]->Emit2FEExpr(nullStmts); + } + } + // the var may be a global var + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "%s:%d EnhanceC warning: The var [%s] as boundary length var " + "is not found in the caller func [%s], check the caller whether the var is the actual parameter", + FEManager::GetModule().GetFileNameFromFileNum(astCallExpr.GetSrcFileIdx()).c_str(), + astCallExpr.GetSrcFileLineNum(), lenName.c_str(), astFunc.GetName().c_str()); + } + return lenExpr->Clone(); +} + +UniqueFEIRExpr ENCChecker::GetRealBoundaryLenExprInField(const UniqueFEIRExpr &lenExpr, MIRStructType &baseType, + const UniqueFEIRExpr &dstExpr) { + if (lenExpr == nullptr) { + return nullptr; + } + if (lenExpr->GetKind() == kExprBinary) { + FEIRExprBinary *mulExpr = static_cast(lenExpr.get()); + UniqueFEIRExpr leftExpr = GetRealBoundaryLenExprInField(mulExpr->GetOpnd0(), baseType, dstExpr); + if (leftExpr != nullptr) { + mulExpr->SetOpnd0(std::move(leftExpr)); + } else { + return nullptr; + } + UniqueFEIRExpr rightExpr = GetRealBoundaryLenExprInField(mulExpr->GetOpnd1(), baseType, dstExpr); + if (rightExpr != nullptr) { + mulExpr->SetOpnd1(std::move(rightExpr)); + } else { + return nullptr; + } + } + if (lenExpr->GetKind() == kExprUnary) { + FEIRExprUnary *cvtExpr = static_cast(lenExpr.get()); + UniqueFEIRExpr subExpr = GetRealBoundaryLenExprInField(cvtExpr->GetOpnd(), baseType, dstExpr); + if (subExpr != nullptr) { + cvtExpr->SetOpnd(std::move(subExpr)); + } else { + return nullptr; + } + } + // boundary length expr -> actual dread/iread length field expr + if (lenExpr->GetKind() == kExprDRead) { + std::string lenName = lenExpr->GetVarUses().front()->GetNameRaw(); + uint32 fieldID = 0; + bool flag = FEManager::GetMIRBuilder().TraverseToNamedField( + baseType, GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lenName), fieldID); + if (!flag) { + return nullptr; + } + MIRType *reType = FEUtils::GetStructFieldType(&baseType, static_cast(fieldID)); + UniqueFEIRType reFEType = FEIRTypeHelper::CreateTypeNative(*reType); + if (dstExpr->GetKind() == kExprDRead) { + return FEIRBuilder::CreateExprDReadAggField( + dstExpr->GetVarUses().front()->Clone(), static_cast(fieldID), std::move(reFEType)); + } else if (dstExpr->GetKind() == kExprIRead) { + FEIRExprIRead *iread = static_cast(dstExpr.get()); + return FEIRBuilder::CreateExprIRead( + std::move(reFEType), iread->GetClonedPtrType(), iread->GetClonedOpnd(), static_cast(fieldID)); + } else { + return nullptr; + } + } + return lenExpr->Clone(); +} + +// --------------------------- +// process boundary var and checking for ast func arg and return +// --------------------------- +std::list ASTFunc::InitArgsBoundaryVar(MIRFunction &mirFunc) const { + std::list stmts; + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || compound == nullptr) { + return stmts; + } + for (auto paramDecl : paramDecls) { + ASTExpr *lenExpr = paramDecl->GetBoundaryLenExpr(); + if (lenExpr == nullptr) { + continue; + } + UniqueFEIRExpr lenFEExpr = lenExpr->Emit2FEExpr(stmts); + ENCChecker::InitBoundaryVar(mirFunc, *paramDecl, std::move(lenFEExpr), stmts); + } + return stmts; +} + +void ASTFunc::InsertBoundaryCheckingInRet(std::list &stmts) const { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || boundary.lenExpr == nullptr || + stmts.size() == 0 || stmts.back()->GetKind() != kStmtReturn) { + return; + } + std::list nullStmts; + const UniqueFEIRExpr &retExpr = static_cast(stmts.back().get())->GetExpr(); + UniqueFEIRExpr baseExpr = ENCChecker::FindBaseExprInPointerOperation(retExpr); + if (baseExpr == nullptr) { + return; + } + std::list exprs; + UniqueFEIRExpr lenExpr = boundary.lenExpr->Emit2FEExpr(nullStmts); + if (boundary.lenParamIdx != -1) { // backup return boundary size in head func body + UniqueFEIRVar retSizeVar = FEIRBuilder::CreateVarNameForC("_boundary.return.size", lenExpr->GetType()->Clone()); + UniqueFEIRStmt lenStmt = FEIRBuilder::CreateStmtDAssign(retSizeVar->Clone(), lenExpr->Clone()); + stmts.emplace_front(std::move(lenStmt)); + lenExpr = FEIRBuilder::CreateExprDRead(std::move(retSizeVar)); + } + lenExpr = FEIRBuilder::CreateExprBinary(OP_add, retExpr->Clone(), std::move(lenExpr)); + exprs.emplace_back(std::move(lenExpr)); + exprs.emplace_back(std::move(baseExpr)); + UniqueFEIRStmt stmt = std::make_unique(OP_returnassertle, std::move(exprs)); + stmt->SetSrcLoc(stmts.back()->GetSrcLoc()); + stmts.insert(--stmts.cend(), std::move(stmt)); +} + +void ENCChecker::InsertBoundaryAssignChecking(MIRBuilder &mirBuilder, std::list &ans, + const UniqueFEIRExpr &srcExpr, const Loc &loc) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || srcExpr == nullptr || srcExpr->GetPrimType() != PTY_ptr || + srcExpr->GetKind() != kExprBinary) { // pointer computed assignment + return; + } + if (srcExpr->IsBoundaryChecking()) { // skip if boundary checking has been generated + return; + } + UniqueFEIRExpr baseExpr = FindBaseExprInPointerOperation(srcExpr); + if (baseExpr == nullptr) { + return; + } + srcExpr->SetIsBoundaryChecking(true); + // insert l-value lower boundary chencking + std::list lowerExprs; + lowerExprs.emplace_back(srcExpr->Clone()); + lowerExprs.emplace_back(baseExpr->Clone()); + UniqueFEIRStmt lowerStmt = std::make_unique(OP_calcassertge, std::move(lowerExprs)); + lowerStmt->SetSrcLoc(loc); + std::list lowerStmts = lowerStmt->GenMIRStmts(mirBuilder); + ans.splice(ans.end(), lowerStmts); + // insert l-value upper boundary chencking + std::list upperExprs; + upperExprs.emplace_back(srcExpr->Clone()); + upperExprs.emplace_back(baseExpr->Clone()); + UniqueFEIRStmt upperStmt = std::make_unique(OP_calcassertlt, std::move(upperExprs)); + upperStmt->SetSrcLoc(loc); + std::list upperStmts = upperStmt->GenMIRStmts(mirBuilder); + ans.splice(ans.end(), upperStmts); +} + +UniqueFEIRStmt ENCChecker::InsertBoundaryLEChecking(UniqueFEIRExpr lenExpr, const UniqueFEIRExpr &srcExpr, + const UniqueFEIRExpr &dstExpr) { + UniqueFEIRExpr baseExpr = ENCChecker::FindBaseExprInPointerOperation(srcExpr); + if (baseExpr == nullptr) { + return nullptr; + } + if (dstExpr->Hash() == baseExpr->Hash()) { // skip self-assignment, e.g. p++; + return nullptr; + } + UniqueFEIRExpr boundaryExpr = FEIRBuilder::CreateExprBinary( // use r-value base + offset + OP_add, srcExpr->Clone(), std::move(lenExpr)); + std::list exprs; + exprs.emplace_back(std::move(boundaryExpr)); + exprs.emplace_back(std::move(baseExpr)); + return std::make_unique(OP_assignassertle, std::move(exprs)); +} + +UniqueFEIRExpr ENCChecker::GetBoundaryLenExprCache(uint32 hash) { + if (hash == 0) { + return nullptr; + } else { + return FEManager::GetTypeManager().GetBoundaryLenExprFromMap(hash); + } +} + +UniqueFEIRExpr ENCChecker::GetBoundaryLenExprCache(const TypeAttrs &attr) { + return GetBoundaryLenExprCache(attr.GetAttrBoundary().GetLenExprHash()); +} + +UniqueFEIRExpr ENCChecker::GetBoundaryLenExprCache(const FieldAttrs &attr) { + return GetBoundaryLenExprCache(attr.GetAttrBoundary().GetLenExprHash()); +} + +void ENCChecker::InsertBoundaryInAtts(TypeAttrs &attr, const BoundaryInfo &boundary) { + attr.GetAttrBoundary().SetIsBytedLen(boundary.isBytedLen); + if (boundary.lenParamIdx != -1) { + attr.GetAttrBoundary().SetLenParamIdx(boundary.lenParamIdx); + } + if (boundary.lenExpr == nullptr) { + return; + } + std::list nullStmts; + UniqueFEIRExpr lenExpr = boundary.lenExpr->Emit2FEExpr(nullStmts); + CheckLenExpr(*boundary.lenExpr, nullStmts); + InsertBoundaryLenExprInAtts(attr, lenExpr); +} + +void ENCChecker::InsertBoundaryLenExprInAtts(TypeAttrs &attr, const UniqueFEIRExpr &expr) { + if (expr == nullptr) { + return; + } + uint32 hash = expr->Hash(); + FEManager::GetTypeManager().InsertBoundaryLenExprHashMap(hash, expr->Clone()); // save expr cache + attr.GetAttrBoundary().SetLenExprHash(hash); +} + +void ENCChecker::InsertBoundaryInAtts(FieldAttrs &attr, const BoundaryInfo &boundary) { + attr.GetAttrBoundary().SetIsBytedLen(boundary.isBytedLen); + if (boundary.lenParamIdx != -1) { + attr.GetAttrBoundary().SetLenParamIdx(boundary.lenParamIdx); + } + if (boundary.lenExpr == nullptr) { + return; + } + std::list nullStmts; + UniqueFEIRExpr lenExpr = boundary.lenExpr->Emit2FEExpr(nullStmts); + uint32 hash = lenExpr->Hash(); + FEManager::GetTypeManager().InsertBoundaryLenExprHashMap(hash, std::move(lenExpr)); // save expr cache + attr.GetAttrBoundary().SetLenExprHash(hash); +} + +void ENCChecker::InsertBoundaryInAtts(FuncAttrs &attr, const BoundaryInfo &boundary) { + attr.GetAttrBoundary().SetIsBytedLen(boundary.isBytedLen); + if (boundary.lenParamIdx != -1) { + attr.GetAttrBoundary().SetLenParamIdx(boundary.lenParamIdx); + } + if (boundary.lenExpr == nullptr) { + return; + } + std::list nullStmts; + UniqueFEIRExpr lenExpr = boundary.lenExpr->Emit2FEExpr(nullStmts); + uint32 hash = lenExpr->Hash(); + FEManager::GetTypeManager().InsertBoundaryLenExprHashMap(hash, std::move(lenExpr)); // save expr cache + attr.GetAttrBoundary().SetLenExprHash(hash); +} + +// --------------------------- +// process boundary var and checking in stmt of ast func +// --------------------------- +void FEIRStmtDAssign::AssignBoundaryVarAndChecking(MIRBuilder &mirBuilder, std::list &ans) const { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || expr->GetPrimType() != PTY_ptr) { + return; + } + const std::string prefix = "_boundary."; + if (var->GetNameRaw().compare(0, prefix.size(), prefix) == 0) { + return; + } + // insert assign boundary checking for computed r-value + ENCChecker::InsertBoundaryAssignChecking(mirBuilder, ans, expr, loc); + + UniqueFEIRExpr dstExpr = nullptr; + UniqueFEIRExpr lenExpr = nullptr; + if (fieldID == 0) { + dstExpr = FEIRBuilder::CreateExprDRead(var->Clone()); + MIRSymbol *dstSym = var->GenerateMIRSymbol(mirBuilder); + // Get the boundary attr(i.e. boundary length expr cache) of var + lenExpr = ENCChecker::GetBoundaryLenExprCache(dstSym->GetAttrs()); + } else { + FieldID tmpID = fieldID; + MIRStructType *structType = static_cast(var->GetType()->GenerateMIRTypeAuto()); + FieldPair fieldPair = structType->TraverseToFieldRef(tmpID); + UniqueFEIRType fieldType = FEIRTypeHelper::CreateTypeNative( + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first)); + dstExpr = FEIRBuilder::CreateExprDReadAggField(var->Clone(), fieldID, std::move(fieldType)); + // Get the boundary attr(i.e. boundary length expr cache) of field + lenExpr = ENCChecker::GetBoundaryLenExprCache(fieldPair.second.second); + if (lenExpr != nullptr) { + UniqueFEIRExpr realLenExpr = ENCChecker::GetRealBoundaryLenExprInField( + lenExpr->Clone(), *structType, dstExpr); // lenExpr needs to be cloned + if (realLenExpr != nullptr) { + lenExpr = std::move(realLenExpr); + } + } + } + if (lenExpr != nullptr) { + UniqueFEIRStmt stmt = ENCChecker::InsertBoundaryLEChecking(lenExpr->Clone(), expr, dstExpr); + if (stmt != nullptr) { + stmt->SetSrcLoc(loc); + std::list stmtnodes = stmt->GenMIRStmts(mirBuilder); + ans.insert(ans.cend(), stmtnodes.cbegin(), stmtnodes.cend()); + } + } + ENCChecker::AssignBoundaryVar(mirBuilder, dstExpr, expr, lenExpr, ans); +} + +void FEIRStmtIAssign::AssignBoundaryVarAndChecking(MIRBuilder &mirBuilder, std::list &ans) const { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || fieldID == 0 || baseExpr->GetPrimType() != PTY_ptr) { + return; + } + // insert assign boundary checking for computed r-value + ENCChecker::InsertBoundaryAssignChecking(mirBuilder, ans, baseExpr, loc); + + MIRType *baseType = static_cast(addrType->GenerateMIRTypeAuto())->GetPointedType(); + FieldID tmpID = fieldID; + FieldPair fieldPair = static_cast(baseType)->TraverseToFieldRef(tmpID); + MIRType *dstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + UniqueFEIRExpr dstExpr = FEIRBuilder::CreateExprIRead( + FEIRTypeHelper::CreateTypeNative(*dstType), addrType->Clone(), addrExpr->Clone(), fieldID); + // Get the boundary attr (i.e. boundary length expr cache) of field + UniqueFEIRExpr lenExpr = ENCChecker::GetBoundaryLenExprCache(fieldPair.second.second); + if (lenExpr != nullptr) { + UniqueFEIRExpr realLenExpr = ENCChecker::GetRealBoundaryLenExprInField( + lenExpr->Clone(), *static_cast(baseType), dstExpr); // lenExpr needs to be cloned + if (realLenExpr != nullptr) { + lenExpr = std::move(realLenExpr); + } + UniqueFEIRStmt stmt = ENCChecker::InsertBoundaryLEChecking(lenExpr->Clone(), baseExpr, dstExpr); + if (stmt != nullptr) { + stmt->SetSrcLoc(loc); + std::list stmtnodes = stmt->GenMIRStmts(mirBuilder); + ans.insert(ans.cend(), stmtnodes.cbegin(), stmtnodes.cend()); + } + } + ENCChecker::AssignBoundaryVar(mirBuilder, dstExpr, baseExpr, lenExpr, ans); +} + +void ENCChecker::CheckBoundaryLenFinalAssign(MIRBuilder &mirBuilder, const UniqueFEIRVar &var, FieldID fieldID, + const Loc &loc) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || !FEOptions::GetInstance().IsEnableSafeRegion()) { + return; + } + bool isUnsafe = mirBuilder.GetCurrentFunctionNotNull()->GetAttr(FUNCATTR_unsafed); + if (!FEManager::GetCurrentFEFunction().GetSafeRegionFlag().empty()) { + isUnsafe = !FEManager::GetCurrentFEFunction().GetSafeRegionFlag().top(); + } + if (isUnsafe) { // not warning in unsafe region + return; + } + if (fieldID == 0) { + MIRSymbol *dstSym = var->GenerateMIRSymbol(mirBuilder); + if (dstSym->GetAttr(ATTR_final_boundary_size)) { + FE_WARN(kLncWarn, loc, "this var specified as the global or field boundary length is " + "assigned or token address. [Use __Unsafe__ to eliminate the warning]"); + } + } else { + FieldID tmpID = fieldID; + MIRStructType *structType = static_cast(var->GetType()->GenerateMIRTypeAuto()); + FieldPair fieldPair = structType->TraverseToFieldRef(tmpID); + if (fieldPair.second.second.GetAttr(FLDATTR_final_boundary_size)) { + FE_WARN(kLncWarn, loc, "this field specified as the global or field boundary length is " + "assigned or token address. [Use __Unsafe__ to eliminate the warning]"); + } + } +} + +void ENCChecker::CheckBoundaryLenFinalAssign(MIRBuilder &mirBuilder, const UniqueFEIRType &addrType, FieldID fieldID, + const Loc &loc) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || !FEOptions::GetInstance().IsEnableSafeRegion() || + fieldID == 0) { + return; + } + bool isUnsafe = mirBuilder.GetCurrentFunctionNotNull()->GetAttr(FUNCATTR_unsafed); + if (!FEManager::GetCurrentFEFunction().GetSafeRegionFlag().empty()) { + isUnsafe = !FEManager::GetCurrentFEFunction().GetSafeRegionFlag().top(); + } + if (isUnsafe) { // not warning in unsafe region + return; + } + MIRType *baseType = static_cast(addrType->GenerateMIRTypeAuto())->GetPointedType(); + FieldID tmpID = fieldID; + FieldPair fieldPair = static_cast(baseType)->TraverseToFieldRef(tmpID); + if (fieldPair.second.second.GetAttr(FLDATTR_final_boundary_size)) { + FE_WARN(kLncWarn, loc, "this field specified as the global or field boundary length is " + "assigned or token address. [Use __Unsafe__ to eliminate the warning]"); + } +} + +void ENCChecker::CheckBoundaryLenFinalAddr(MIRBuilder &mirBuilder, const UniqueFEIRExpr &expr, const Loc &loc) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || !FEOptions::GetInstance().IsEnableSafeRegion()) { + return; + } + if (expr->GetKind() == kExprAddrofVar) { + UniqueFEIRVar var = expr->GetVarUses().front()->Clone(); + CheckBoundaryLenFinalAssign(mirBuilder, var, expr->GetFieldID(), loc); + } else if (expr->GetKind() == kExprIAddrof) { + auto *iaddrof = static_cast(expr.get()); + CheckBoundaryLenFinalAssign(mirBuilder, iaddrof->GetClonedPtrType(), expr->GetFieldID(), loc); + } +} + +MapleVector ENCChecker::ReplaceBoundaryChecking(MIRBuilder &mirBuilder, const FEIRStmtNary *stmt) { + MIRFunction *curFunction = mirBuilder.GetCurrentFunctionNotNull(); + UniqueFEIRExpr leftExpr = stmt->GetArgExprs().front()->Clone(); + UniqueFEIRExpr rightExpr = stmt->GetArgExprs().back()->Clone(); + BaseNode *leftNode = nullptr; + BaseNode *rightNode = nullptr; + MIRType *arrType = ENCChecker::GetArrayTypeFromExpr(stmt->GetArgExprs().back()); + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + Opcode op = stmt->GetOP(); + if (arrType != nullptr) { // var must be array type by the previous checking + if (arrType->GetSize() == 0) { // unsized array + return args; + } + // Convert to the following node for array: + // assertge/assertlt lnode: addrofarray + index; assertle lnode: (attributed upper boundary) addrofarray + len expr + // assertge rnode: addrof array; assertlt/assertle rnode: addrof array + sizeof expr + if (kOpcodeInfo.IsAssertUpperBoundary(op)) { + rightExpr = FEIRBuilder::CreateExprBinary( + OP_add, std::move(rightExpr), std::make_unique(arrType->GetSize(), PTY_ptr)); + } + leftNode = leftExpr->GenMIRNode(mirBuilder); + rightNode = rightExpr->GenMIRNode(mirBuilder); + } else { + // Convert to the following node for expr with boundary var: + // assertge/assertlt lnode: addrof base + index; assertle lnode: (attributed upper boundary) addrof base + len expr + // assertge rnode: lower boundary; assertlt/assertle rnode: upper boundary + auto it = FEManager::GetCurrentFEFunction().GetBoundaryMap().find(rightExpr->Hash()); + UniqueFEIRExpr lenExpr = ENCChecker::GetGlobalOrFieldLenExprInExpr(mirBuilder, rightExpr); + if (it != FEManager::GetCurrentFEFunction().GetBoundaryMap().end()) { + if (lenExpr == nullptr && IsGlobalVarInExpr(rightExpr)) { + return args; // skip boundary checking for global var whithout boundary attr, + } // because the lack of global variable boundary analysis capabilities in mapleall + StIdx boundaryStIdx = kOpcodeInfo.IsAssertLowerBoundary(op) ? it->second.first : it->second.second; + MIRSymbol *boundarySym = curFunction->GetLocalOrGlobalSymbol(boundaryStIdx); + CHECK_NULL_FATAL(boundarySym); + rightNode = mirBuilder.CreateExprDread(*boundarySym); + } else { + if (lenExpr != nullptr) { // a global var or field with boundary attr + if (kOpcodeInfo.IsAssertUpperBoundary(op)) { + rightExpr = FEIRBuilder::CreateExprBinary(OP_add, std::move(rightExpr), std::move(lenExpr)); + } + rightNode = rightExpr->GenMIRNode(mirBuilder); + } else { + ReplaceBoundaryErr(mirBuilder, stmt); + return args; + } + } + leftNode = leftExpr->GenMIRNode(mirBuilder); + } + args.emplace_back(leftNode); + args.emplace_back(rightNode); + return args; +} + +void ENCChecker::ReplaceBoundaryErr(const MIRBuilder &mirBuilder, const FEIRStmtNary *stmt) { + if (ENCChecker::IsUnsafeRegion(mirBuilder)) { + return; + } + Opcode op = stmt->GetOP(); + if (op == OP_callassertle) { + auto callAssert = static_cast(stmt); + FE_ERR(kLncErr, stmt->GetSrcLoc(), "boundaryless pointer passed to %s that requires a boundary pointer for " + "the %s argument", callAssert->GetFuncName().c_str(), + ENCChecker::GetNthStr(callAssert->GetParamIndex()).c_str()); + } else if (op == OP_returnassertle) { + MIRFunction *curFunction = mirBuilder.GetCurrentFunctionNotNull(); + if (curFunction->GetName() != kBoundsBuiltFunc) { + FE_ERR(kLncErr, stmt->GetSrcLoc(), "boundaryless pointer returned from %s that requires a boundary pointer", + curFunction->GetName().c_str()); + } + } else if (op == OP_assignassertle) { + FE_ERR(kLncErr, stmt->GetSrcLoc(), "r-value requires a boundary pointer"); + } else if (ENCChecker::IsSafeRegion(mirBuilder) && + (op == OP_calcassertge || + (op == OP_assertge && static_cast(stmt)->IsComputable()))) { + FE_ERR(kLncErr, stmt->GetSrcLoc(), "calculation with pointer requires bounds in safe region"); + } +} + +bool ASTArraySubscriptExpr::InsertBoundaryChecking(std::list &stmts, + UniqueFEIRExpr indexExpr, UniqueFEIRExpr baseAddrFEExpr) const { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + return false; + } + if (arrayType->GetKind() == MIRTypeKind::kTypeArray) { + // checking all type of indexes, including constant indexes + while (baseAddrFEExpr != nullptr && baseAddrFEExpr->GetKind() == kExprAddrofArray) { + baseAddrFEExpr = static_cast(baseAddrFEExpr.get())->GetExprArray()->Clone(); + } + } else { + baseAddrFEExpr = ENCChecker::FindBaseExprInPointerOperation(baseAddrFEExpr); + if (baseAddrFEExpr == nullptr) { + return false; + } + } + // peel nested boundary checking in a multi-dimensional array + ENCChecker::PeelNestedBoundaryChecking(stmts, baseAddrFEExpr); + // insert lower boundary chencking, baseExpr will be replace by lower boundary var when FEIRStmtNary GenMIRStmts + std::list lowerExprs; + lowerExprs.emplace_back(indexExpr->Clone()); + lowerExprs.emplace_back(baseAddrFEExpr->Clone()); + auto lowerStmt = std::make_unique(OP_assertge, std::move(lowerExprs)); + lowerStmt->SetIsComputable(true); + lowerStmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(lowerStmt)); + // insert upper boundary chencking, baseExpr will be replace by upper boundary var when FEIRStmtNary GenMIRStmts + std::list upperExprs; + upperExprs.emplace_back(std::move(indexExpr)); + upperExprs.emplace_back(std::move(baseAddrFEExpr)); + auto upperStmt = std::make_unique(OP_assertlt, std::move(upperExprs)); + upperStmt->SetIsComputable(true); + upperStmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(upperStmt)); + return true; +} + +bool ASTUODerefExpr::InsertBoundaryChecking(std::list &stmts, UniqueFEIRExpr expr) const { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + return false; + } + UniqueFEIRExpr baseExpr = ENCChecker::FindBaseExprInPointerOperation(expr); + if (baseExpr == nullptr) { + return false; + } + // peel nested boundary checking in a multi-dereference + ENCChecker::PeelNestedBoundaryChecking(stmts, baseExpr); + // insert lower boundary chencking, baseExpr will be replace by lower boundary var when FEIRStmtNary GenMIRStmts + std::list lowerExprs; + lowerExprs.emplace_back(expr->Clone()); + lowerExprs.emplace_back(baseExpr->Clone()); + auto lowerStmt = std::make_unique(OP_assertge, std::move(lowerExprs)); + lowerStmt->SetIsComputable(expr->GetKind() == kExprBinary); + lowerStmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(lowerStmt)); + // insert upper boundary chencking, baseExpr will be replace by upper boundary var when FEIRStmtNary GenMIRStmts + std::list upperExprs; + upperExprs.emplace_back(expr->Clone()); + upperExprs.emplace_back(std::move(baseExpr)); + auto upperStmt = std::make_unique(OP_assertlt, std::move(upperExprs)); + upperStmt->SetIsComputable(expr->GetKind() == kExprBinary); + upperStmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(upperStmt)); + return true; +} + +void ENCChecker::ReduceBoundaryChecking(std::list &stmts, const UniqueFEIRExpr &expr) { + // assert* --> calcassert*, when addrof the dereference, e.g. &arr[i] + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + return; + } + std::list::iterator iter = stmts.begin(); + for (; iter != stmts.end(); ++iter) { + if ((*iter)->GetKind() != kStmtNary) { + continue; + } + FEIRStmtNary *nary = static_cast((*iter).get()); + if (nary->GetOP() != OP_assertge || + nary->GetArgExprs().front()->Hash() != expr->Hash()) { // addrof expr and index expr of checking are consistent + continue; + } + nary->SetOP(OP_calcassertge); + std::list::iterator nextedIter = std::next(iter, 1); + if (nextedIter != stmts.end() && (*nextedIter)->GetKind() == kStmtNary) { + FEIRStmtNary *nextedNary = static_cast((*nextedIter).get()); + if (nextedNary->GetOP() == OP_assertlt) { + nextedNary->SetOP(OP_calcassertlt); + } + } + break; + } +} + +// --------------------------- +// caller boundary inserting and checking +// --------------------------- +void ASTCallExpr::InsertBoundaryCheckingInArgs(std::list &stmts) const { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || funcDecl == nullptr) { + return; + } + std::list nullStmts; + for (size_t i = 0; i < funcDecl->GetParamDecls().size(); ++i) { + ASTDecl *paramDecl = funcDecl->GetParamDecls()[i]; + ASSERT_NOT_NULL(paramDecl); + if (paramDecl->GetBoundaryLenExpr() == nullptr) { + continue; + } + UniqueFEIRExpr realLenExpr = ENCChecker::GetRealBoundaryLenExprInFunc( + paramDecl->GetBoundaryLenExpr()->Emit2FEExpr(stmts), *funcDecl, *this); + if (realLenExpr == nullptr) { + continue; + } + UniqueFEIRExpr argExpr = args[i]->Emit2FEExpr(nullStmts); + UniqueFEIRExpr baseExpr = ENCChecker::FindBaseExprInPointerOperation(argExpr); + if (baseExpr == nullptr) { + continue; + } + std::list exprs; + realLenExpr = FEIRBuilder::CreateExprBinary(OP_add, std::move(argExpr), std::move(realLenExpr)); + exprs.emplace_back(std::move(realLenExpr)); + exprs.emplace_back(std::move(baseExpr)); + UniqueFEIRStmt stmt = std::make_unique(OP_callassertle, std::move(exprs), + GetFuncName(), i); + stmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(stmt)); + } +} + +void ASTCallExpr::InsertBoundaryCheckingInArgsForICall(std::list &stmts, + const UniqueFEIRExpr &calleeFEExpr) const { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || calleeFEExpr == nullptr) { + return; + } + const MIRFuncType *funcType = FEUtils::GetFuncPtrType(*calleeFEExpr->GetType()->GenerateMIRType()); + if (funcType == nullptr) { + return; + } + const std::vector &attrsVec = funcType->GetParamAttrsList(); + const std::vector typesVec = funcType->GetParamTypeList(); + std::list nullStmts; + for (size_t i = 0; i < attrsVec.size() && i < typesVec.size() && i < args.size(); ++i) { + MIRType *ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typesVec[i]); + UniqueFEIRExpr lenExpr = ENCChecker::GetRealBoundaryLenExprInFuncByIndex(attrsVec[i], *ptrType, *this); + if (lenExpr == nullptr) { + continue; + } + UniqueFEIRExpr argExpr = args[i]->Emit2FEExpr(nullStmts); + UniqueFEIRExpr baseExpr = ENCChecker::FindBaseExprInPointerOperation(argExpr); + if (baseExpr == nullptr) { + continue; + } + std::list exprs; + lenExpr = FEIRBuilder::CreateExprBinary(OP_add, std::move(argExpr), std::move(lenExpr)); + exprs.emplace_back(std::move(lenExpr)); + exprs.emplace_back(std::move(baseExpr)); + UniqueFEIRStmt stmt = std::make_unique( + OP_callassertle, std::move(exprs), "function_pointer", i); + stmt->SetSrcLoc(loc); + stmts.emplace_back(std::move(stmt)); + } +} + +void ASTCallExpr::InsertBoundaryVarInRet(std::list &stmts) const { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || !IsNeedRetExpr()) { + return; + } + std::list nullStmts; + UniqueFEIRExpr realLenExpr = nullptr; + if (funcDecl != nullptr && funcDecl->GetBoundaryLenExpr() != nullptr) { // call + realLenExpr = ENCChecker::GetRealBoundaryLenExprInFunc( + funcDecl->GetBoundaryLenExpr()->Emit2FEExpr(stmts), *funcDecl, *this); + } else if (isIcall && calleeExpr != nullptr) { // icall + const MIRFuncType *funcType = FEUtils::GetFuncPtrType( + *calleeExpr->Emit2FEExpr(nullStmts)->GetType()->GenerateMIRType()); + if (funcType != nullptr) { + MIRType *ptrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); + if (ptrType != nullptr) { + realLenExpr = ENCChecker::GetRealBoundaryLenExprInFuncByIndex(funcType->GetRetAttrs(), *ptrType, *this); + } + } + } + if (realLenExpr == nullptr) { + return; + } + // GetCurrentFunction need to be optimized when parallel features + MIRFunction *curFunction = FEManager::GetMIRBuilder().GetCurrentFunctionNotNull(); + ENCChecker::InitBoundaryVar(*curFunction, GetRetVarName(), *mirType, std::move(realLenExpr), stmts); +} + +bool ENCChecker::IsSameBoundary(const AttrBoundary &arg1, const AttrBoundary &arg2) { + if (arg1.IsBytedLen() != arg2.IsBytedLen()) { + return false; + } + if (arg1.GetLenExprHash() != 0 && arg1.GetLenExprHash() == arg2.GetLenExprHash()) { + return true; + } + if (arg1.GetLenParamIdx() != -1 && arg1.GetLenParamIdx() == arg2.GetLenParamIdx()) { + return true; + } + if (arg1.GetLenExprHash() == arg2.GetLenExprHash() && arg1.GetLenParamIdx() == arg2.GetLenParamIdx()) { + return true; + } + return false; +} + +void ENCChecker::CheckBoundaryArgsAndRetForFuncPtr(const MIRType &dstType, const UniqueFEIRExpr &srcExpr, + const Loc &loc) { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || !srcExpr->IsEnhancedChecking()) { + return; + } + const MIRFuncType *funcType = FEUtils::GetFuncPtrType(dstType); + if (funcType == nullptr) { + return; + } + if (srcExpr->GetKind() == kExprAddrofFunc) { // check func ptr l-value and &func decl r-value + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + static_cast(srcExpr.get())->GetFuncAddr()); + MIRFunction *srcFunc = FEManager::GetTypeManager().GetMIRFunction(strIdx, false); + CHECK_FATAL(srcFunc != nullptr, "can not get MIRFunction"); + std::list errIdxs; + for (size_t i = 0; i < srcFunc->GetParamSize() && i < funcType->GetParamAttrsList().size(); ++i) { + if (!IsSameBoundary( + srcFunc->GetNthParamAttr(i).GetAttrBoundary(), funcType->GetNthParamAttrs(i).GetAttrBoundary())) { + errIdxs.emplace_back(i); + } + } + if (!errIdxs.empty()) { + FE_ERR(kLncErr, loc, "function pointer and target function's boundary attributes are mismatched " + "for the %s argument", PrintParamIdx(errIdxs).c_str()); + } + if (!IsSameBoundary(srcFunc->GetFuncAttrs().GetAttrBoundary(), funcType->GetRetAttrs().GetAttrBoundary())) { + FE_ERR(kLncErr, loc, "function pointer and target function's boundary attributes are mismatched " + "for the return value"); + } + } + const MIRFuncType *srcFuncType = FEUtils::GetFuncPtrType(*srcExpr->GetType()->GenerateMIRTypeAuto()); + if (srcFuncType != nullptr) { // check func ptr l-value and func ptr r-value + std::list errIdxs; + for (size_t i = 0; i < srcFuncType->GetParamAttrsList().size() && i < funcType->GetParamAttrsList().size(); ++i) { + if (!IsSameBoundary( + srcFuncType->GetNthParamAttrs(i).GetAttrBoundary(), funcType->GetNthParamAttrs(i).GetAttrBoundary())) { + errIdxs.emplace_back(i); + } + } + if (!errIdxs.empty()) { + FE_ERR(kLncErr, loc, "function pointer's boundary attributes are mismatched for the %s argument", + PrintParamIdx(errIdxs).c_str()); + } + if (!IsSameBoundary(srcFuncType->GetRetAttrs().GetAttrBoundary(), funcType->GetRetAttrs().GetAttrBoundary())) { + FE_ERR(kLncErr, loc, "function pointer's boundary attributes are mismatched for the return value"); + } + } +} + +void FEIRStmtDAssign::CheckBoundaryArgsAndRetForFuncPtr(const MIRBuilder &mirBuilder) const { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || ENCChecker::IsUnsafeRegion(mirBuilder)) { + return; + } + MIRType *baseType = var->GetType()->GenerateMIRTypeAuto(); + if (fieldID != 0) { + baseType = FEUtils::GetStructFieldType(static_cast(baseType), fieldID); + } + ENCChecker::CheckBoundaryArgsAndRetForFuncPtr(*baseType, expr, loc); +} + +void FEIRStmtIAssign::CheckBoundaryArgsAndRetForFuncPtr(const MIRBuilder &mirBuilder, const MIRType &baseType) const { + if (!FEOptions::GetInstance().IsBoundaryCheckDynamic() || ENCChecker::IsUnsafeRegion(mirBuilder)) { + return; + } + MIRType *fieldType = FEUtils::GetStructFieldType(static_cast(&baseType), fieldID); + ENCChecker::CheckBoundaryArgsAndRetForFuncPtr(*fieldType, baseExpr, loc); +} + +// --------------------------- +// process safe region +// --------------------------- +bool ENCChecker::IsSafeRegion(const MIRBuilder &mirBuilder) { + bool isSafe = false; + if (FEOptions::GetInstance().IsEnableSafeRegion()) { + isSafe = mirBuilder.GetCurrentFunctionNotNull()->GetAttr(FUNCATTR_safed); + if (!FEManager::GetCurrentFEFunction().GetSafeRegionFlag().empty()) { + isSafe = FEManager::GetCurrentFEFunction().GetSafeRegionFlag().top(); + } + } + return isSafe; +} + +bool ENCChecker::IsUnsafeRegion(const MIRBuilder &mirBuilder) { + bool isUnsafe = false; + if (FEOptions::GetInstance().IsEnableSafeRegion()) { + isUnsafe = mirBuilder.GetCurrentFunctionNotNull()->GetAttr(FUNCATTR_unsafed); + if (!FEManager::GetCurrentFEFunction().GetSafeRegionFlag().empty()) { + isUnsafe = !FEManager::GetCurrentFEFunction().GetSafeRegionFlag().top(); + } + } + return isUnsafe; +} +} diff --git a/src/hir2mpl/common/src/fe_config_parallel.cpp b/src/hir2mpl/common/src/fe_config_parallel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..980373dda4920b96f39474ce315c371d8bc3dbe1 --- /dev/null +++ b/src/hir2mpl/common/src/fe_config_parallel.cpp @@ -0,0 +1,22 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_config_parallel.h" + +namespace maple { +FEConfigParallel FEConfigParallel::instance; + +FEConfigParallel::FEConfigParallel() + : nThread(0), enableParallel(false) {} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/common/src/fe_file_ops.cpp b/src/hir2mpl/common/src/fe_file_ops.cpp new file mode 100644 index 0000000000000000000000000000000000000000..067071980a47957c9928fa98b35f550dc32b4a11 --- /dev/null +++ b/src/hir2mpl/common/src/fe_file_ops.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_file_ops.h" + +namespace maple { +std::string FEFileOps::GetFilePath(const std::string &pathName) { + size_t pos = pathName.find_last_of('/'); + if (pos == std::string::npos) { + return ""; + } else { + return pathName.substr(0, pos + 1); + } +} + +std::string FEFileOps::GetFileNameWithExt(const std::string &pathName) { + size_t pos = pathName.find_last_of('/'); + if (pos == std::string::npos) { + return pathName; + } else { + return pathName.substr(pos + 1); + } +} + +std::string FEFileOps::GetFileName(const std::string &pathName) { + std::string nameWithExt = GetFileNameWithExt(pathName); + size_t pos = nameWithExt.find_last_of("."); + if (pos == std::string::npos) { + return nameWithExt; + } else { + return nameWithExt.substr(0, pos); + } +} + +std::string FEFileOps::GetFileExtName(const std::string &pathName) { + std::string nameWithExt = GetFileNameWithExt(pathName); + size_t pos = nameWithExt.find_last_of("."); + if (pos == std::string::npos) { + return ""; + } else { + return nameWithExt.substr(pos + 1); + } +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/common/src/fe_file_type.cpp b/src/hir2mpl/common/src/fe_file_type.cpp new file mode 100644 index 0000000000000000000000000000000000000000..65e4a594e8b2e06e5ca70bf547af2703a713b9c4 --- /dev/null +++ b/src/hir2mpl/common/src/fe_file_type.cpp @@ -0,0 +1,152 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_file_type.h" +#include +#include "mpl_logging.h" + +namespace maple { +FEFileType FEFileType::fileType; + +FEFileType::FEFileType() { + LoadDefault(); +} + +FEFileType::FileType FEFileType::GetFileTypeByExtName(const std::string &extName) const { + if (extName.empty()) { + WARN(kLncWarn, "Empty input for GetFileTypeByExtName()...skipped"); + return kUnknownType; + } + auto itExtNameType = mapExtNameType.find(extName); + if (itExtNameType == mapExtNameType.end()) { + WARN(kLncWarn, "Unknown file extension name %s...skipped", extName.c_str()); + return kUnknownType; + } + return itExtNameType->second; +} + +FEFileType::FileType FEFileType::GetFileTypeByPathName(const std::string &pathName) const { + std::string extName = GetExtName(pathName); + return GetFileTypeByExtName(extName); +} + +FEFileType::FileType FEFileType::GetFileTypeByMagicNumber(const std::string &pathName) const { + std::ifstream file(pathName); + if (!file.is_open()) { + ERR(kLncErr, "unable to open file %s", pathName.c_str()); + return kUnknownType; + } + uint32 magic = 0; + int lenght = static_cast(sizeof(uint32)); + (void)file.read(reinterpret_cast(&magic), lenght); + file.close(); + return GetFileTypeByMagicNumber(magic); +} + +FEFileType::FileType FEFileType::GetFileTypeByMagicNumber(BasicIOMapFile &file) const { + BasicIORead fileReader(file); + bool success = false; + uint32 magic = fileReader.ReadUInt32(success); + if (!success) { + ERR(kLncErr, "unable to open file %s", file.GetFileName().c_str()); + return kUnknownType; + } + return GetFileTypeByMagicNumber(magic); +} + +FEFileType::FileType FEFileType::GetFileTypeByMagicNumber(uint32 magic) const { + std::map::const_iterator it = mapMagicType.find(magic); + if (it != mapMagicType.end()) { + return it->second; + } else { + return kUnknownType; + } +} + +void FEFileType::Reset() { + mapExtNameType.clear(); + mapTypeMagic.clear(); + mapMagicType.clear(); +} + +void FEFileType::LoadDefault() { + Reset(); + RegisterExtName(kClass, "class"); + RegisterMagicNumber(kClass, kMagicClass); + RegisterExtName(kJar, "jar"); + RegisterMagicNumber(kJar, kMagicZip); + RegisterExtName(kDex, "dex"); + RegisterMagicNumber(kDex, kMagicDex); + RegisterExtName(kAST, "ast"); + RegisterMagicNumber(kAST, kMagicAST); + RegisterExtName(kMAST, "mast"); + RegisterMagicNumber(kMAST, kMagicMAST); +} + +void FEFileType::RegisterExtName(FileType argFileType, const std::string &extName) { + if (extName.empty() || argFileType == kUnknownType) { + WARN(kLncWarn, "Invalid input for RegisterMagicNumber()...skipped"); + return; + } + mapExtNameType[extName] = argFileType; + mapTypeMagic[argFileType] = 0; +} + +void FEFileType::RegisterMagicNumber(FileType argFileType, uint32 magicNumber) { + if (magicNumber == 0 || argFileType == kUnknownType) { + WARN(kLncWarn, "Invalid input for RegisterMagicNumber()...skipped"); + return; + } + mapTypeMagic[argFileType] = magicNumber; + mapMagicType[magicNumber] = argFileType; +} + +std::string FEFileType::GetPath(const std::string &pathName) { + size_t pos = pathName.find_last_of('/'); + if (pos != std::string::npos) { + return pathName.substr(0, pos + 1); + } else { + return ""; + } +} + +std::string FEFileType::GetName(const std::string &pathName, bool withExt) { + size_t pos = pathName.find_last_of('/'); + std::string name = ""; + if (pos != std::string::npos) { + name = pathName.substr(pos + 1); + } else { + name = pathName; + } + if (withExt) { + return name; + } + size_t posDot = name.find_last_of('.'); + if (posDot != std::string::npos) { + return name.substr(0, posDot); + } else { + return name; + } +} + +std::string FEFileType::GetExtName(const std::string &pathName) { + std::string name = GetName(pathName, true); + size_t pos = name.find_last_of('.'); + if (pos != std::string::npos) { + return name.substr(pos + 1); + } else { + return ""; + } +} +} // namespace maple diff --git a/src/hir2mpl/common/src/fe_function.cpp b/src/hir2mpl/common/src/fe_function.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2e55bb4b07110de0ee7fc69a394272a4d94da0a0 --- /dev/null +++ b/src/hir2mpl/common/src/fe_function.cpp @@ -0,0 +1,856 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_function.h" +#include "feir_bb.h" +#include "mpl_logging.h" +#include "fe_options.h" +#include "fe_manager.h" +#include "feir_var_name.h" +#include "feir_var_reg.h" +#include "hir2mpl_env.h" +#include "feir_builder.h" +#include "feir_dfg.h" +#include "feir_type_helper.h" +#include "feir_var_type_scatter.h" +#include "fe_options.h" + +namespace maple { +FEFunction::FEFunction(MIRFunction &argMIRFunction, const std::unique_ptr &argPhaseResultTotal) + : genStmtHead(nullptr), + genStmtTail(nullptr), + genBBHead(nullptr), + genBBTail(nullptr), + feirStmtHead(nullptr), + feirStmtTail(nullptr), + feirBBHead(nullptr), + feirBBTail(nullptr), + phaseResult(FEOptions::GetInstance().IsDumpPhaseTimeDetail() || FEOptions::GetInstance().IsDumpPhaseTime()), + phaseResultTotal(argPhaseResultTotal), + mirFunction(argMIRFunction) {} + +FEFunction::~FEFunction() { + genStmtHead = nullptr; + genStmtTail = nullptr; + genBBHead = nullptr; + genBBTail = nullptr; + feirStmtHead = nullptr; + feirStmtTail = nullptr; + feirBBHead = nullptr; + feirBBTail = nullptr; +} + +void FEFunction::InitImpl() { + // feir stmt/bb + feirStmtHead = RegisterFEIRStmt(std::make_unique(FEIRNodeKind::kStmtPesudoFuncStart)); + feirStmtTail = RegisterFEIRStmt(std::make_unique(FEIRNodeKind::kStmtPesudoFuncEnd)); + feirStmtHead->SetNext(feirStmtTail); + feirStmtTail->SetPrev(feirStmtHead); + feirBBHead = RegisterFEIRBB(std::make_unique(FEIRBBKind::kBBKindPesudoHead)); + feirBBTail = RegisterFEIRBB(std::make_unique(FEIRBBKind::kBBKindPesudoTail)); + feirBBHead->SetNext(feirBBTail); + feirBBTail->SetPrev(feirBBHead); +} + +void FEFunction::AppendFEIRStmts(std::list &stmts) { + ASSERT_NOT_NULL(feirStmtTail); + InsertFEIRStmtsBefore(*feirStmtTail, stmts); +} + +void FEFunction::InsertFEIRStmtsBefore(FEIRStmt &pos, std::list &stmts) { + while (stmts.size() > 0) { + FEIRStmt *ptrFEIRStmt = RegisterFEIRStmt(std::move(stmts.front())); + stmts.pop_front(); + pos.InsertBefore(ptrFEIRStmt); + } +} + +FEIRStmt *FEFunction::RegisterGeneralStmt(std::unique_ptr stmt) { + genStmtList.push_back(std::move(stmt)); + return genStmtList.back().get(); +} + +const std::unique_ptr &FEFunction::RegisterGeneralStmtUniqueReturn(std::unique_ptr stmt) { + genStmtList.push_back(std::move(stmt)); + return genStmtList.back(); +} + +FEIRStmt *FEFunction::RegisterFEIRStmt(UniqueFEIRStmt stmt) { + feirStmtList.push_back(std::move(stmt)); + return feirStmtList.back().get(); +} + +FEIRBB *FEFunction::RegisterFEIRBB(std::unique_ptr bb) { + feirBBList.push_back(std::move(bb)); + return feirBBList.back().get(); +} + +void FEFunction::DumpGeneralStmts() { + FELinkListNode *nodeStmt = genStmtHead; + while (nodeStmt != nullptr) { + FEIRStmt *stmt = static_cast(nodeStmt); + stmt->Dump(); + nodeStmt = nodeStmt->GetNext(); + } +} + +bool FEFunction::LowerFunc(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + if (feirLower == nullptr) { + feirLower = std::make_unique(*this); + feirLower->LowerFunc(); + feirStmtHead = feirLower->GetlowerStmtHead(); + feirStmtTail = feirLower->GetlowerStmtTail(); + } + return phaseResult.Finish(); +} + +bool FEFunction::DumpFEIRBBs(const std::string &phaseName) { + HIR2MPL_PARALLEL_FORBIDDEN(); + phaseResult.RegisterPhaseNameAndStart(phaseName); + if (feirCFG == nullptr) { + feirCFG = std::make_unique(feirStmtHead, feirStmtTail); + feirCFG->GenerateCFG(); + } + std::cout << "****** CFG built by FEIR for " << GetGeneralFuncName() << " *******\n"; + feirCFG->DumpBBs(); + std::cout << "****** END CFG built for " << GetGeneralFuncName() << " *******\n\n"; + return phaseResult.Finish(); +} + +bool FEFunction::DumpFEIRCFGGraph(const std::string &phaseName) { + HIR2MPL_PARALLEL_FORBIDDEN(); + phaseResult.RegisterPhaseNameAndStart(phaseName); + std::string outName = FEManager::GetModule().GetFileName(); + size_t lastDot = outName.find_last_of("."); + if (lastDot != std::string::npos) { + outName = outName.substr(0, lastDot); + } + CHECK_FATAL(!outName.empty(), "General CFG Graph FileName is empty"); + std::string fileName = outName + "." + GetGeneralFuncName() + ".dot"; + std::ofstream file(fileName); + CHECK_FATAL(file.is_open(), "Failed to open General CFG Graph FileName: %s", fileName.c_str()); + if (feirCFG == nullptr) { + feirCFG = std::make_unique(feirStmtHead, feirStmtTail); + feirCFG->GenerateCFG(); + } + file << "digraph {" << std::endl; + file << " label=\"" << GetGeneralFuncName() << "\"\n"; + file << " labelloc=t\n"; + feirCFG->DumpCFGGraph(file); + file.close(); + return phaseResult.Finish(); +} + +void FEFunction::DumpFEIRCFGGraphForDFGEdge(std::ofstream &file) { + file << " subgraph cfg_edges {" << std::endl; + file << " edge [color=\"#00FF00\",weight=0.3,len=3];" << std::endl; + file << " }" << std::endl; +} + +bool FEFunction::BuildGeneralStmtBBMap(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + FELinkListNode *nodeBB = genBBHead->GetNext(); + while (nodeBB != nullptr && nodeBB != genBBTail) { + FEIRBB *bb = static_cast(nodeBB); + const FELinkListNode *nodeStmt = bb->GetStmtHead(); + while (nodeStmt != nullptr) { + const FEIRStmt *stmt = static_cast(nodeStmt); + genStmtBBMap[stmt] = bb; + if (nodeStmt == bb->GetStmtTail()) { + break; + } + nodeStmt = nodeStmt->GetNext(); + } + nodeBB = nodeBB->GetNext(); + } + return phaseResult.Finish(); +} + +bool FEFunction::LabelGeneralStmts(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + uint32 idx = 0; + FELinkListNode *nodeStmt = genStmtHead; + while (nodeStmt != nullptr) { + FEIRStmt *stmt = static_cast(nodeStmt); + stmt->SetID(idx); + idx++; + nodeStmt = nodeStmt->GetNext(); + } + return phaseResult.Finish(); +} + +bool FEFunction::LabelFEIRBBs(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + uint32 idx = 0; + FELinkListNode *nodeBB = genBBHead->GetNext(); + while (nodeBB != nullptr && nodeBB != genBBTail) { + FEIRBB *bb = static_cast(nodeBB); + bb->SetID(idx); + idx++; + nodeBB = nodeBB->GetNext(); + } + return phaseResult.Finish(); +} + +std::string FEFunction::GetGeneralFuncName() const { + return mirFunction.GetName(); +} + +void FEFunction::PhaseTimerStart(FETimerNS &timer) const { + if (!FEOptions::GetInstance().IsDumpPhaseTime()) { + return; + } + timer.Start(); +} + +void FEFunction::PhaseTimerStopAndDump(FETimerNS &timer, const std::string &label) const { + if (!FEOptions::GetInstance().IsDumpPhaseTime()) { + return; + } + timer.Stop(); + INFO(kLncInfo, "[PhaseTime] %s: %lld ns", label.c_str(), timer.GetTimeNS()); +} + +bool FEFunction::UpdateFormal(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + HIR2MPL_PARALLEL_FORBIDDEN(); + mirFunction.ClearFormals(); + FEManager::GetMIRBuilder().SetCurrentFunction(mirFunction); + for (const std::unique_ptr &argVar : argVarList) { + MIRSymbol *sym = argVar->GenerateMIRSymbol(FEManager::GetMIRBuilder()); + sym->SetStorageClass(kScFormal); + mirFunction.AddArgument(sym); + } + return phaseResult.Finish(); +} + +std::string FEFunction::GetDescription() { + std::stringstream ss; + std::string oriFuncName = GetGeneralFuncName(); + std::string mplFuncName = namemangler::EncodeName(oriFuncName); + ss << "ori function name: " << oriFuncName << std::endl; + ss << "mpl function name: " << mplFuncName << std::endl; + ss << "parameter list:" << "("; + for (const std::unique_ptr &argVar : argVarList) { + ss << argVar->GetNameRaw() << ", "; + } + ss << ") {" << std::endl; + FELinkListNode *node = feirStmtHead->GetNext(); + while (node != feirStmtTail) { + FEIRStmt *currStmt = static_cast(node); + ss << currStmt->DumpDotString() << std::endl; + node = node->GetNext(); + } + ss << "}" << std::endl; + return ss.str(); +} + +bool FEFunction::EmitToMIR(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + mirFunction.NewBody(); + FEManager::GetMIRBuilder().SetCurrentFunction(mirFunction); + FEManager::SetCurrentFEFunction(*this); + BuildMapLabelIdx(); + EmitToMIRStmt(); + return phaseResult.Finish(); +} + +const FEIRStmtPesudoLOC *FEFunction::GetLOCForStmt(const FEIRStmt &feIRStmt) const { + if (!feIRStmt.ShouldHaveLOC()) { + return nullptr; + } + FELinkListNode *prevNode = static_cast(feIRStmt.GetPrev()); + while (prevNode != nullptr) { + if ((*static_cast(prevNode)).ShouldHaveLOC()) { + return nullptr; + } + FEIRStmt *stmt = static_cast(prevNode); + if (stmt->GetKind() == kStmtPesudoLOC) { + FEIRStmtPesudoLOC *loc = static_cast(stmt); + return loc; + } + prevNode = prevNode->GetPrev(); + } + return nullptr; +} + +void FEFunction::BuildMapLabelIdx() { + FELinkListNode *nodeStmt = feirStmtHead->GetNext(); + while (nodeStmt != nullptr && nodeStmt != feirStmtTail) { + FEIRStmt *stmt = static_cast(nodeStmt); + if (stmt->GetKind() == FEIRNodeKind::kStmtPesudoLabel) { + FEIRStmtPesudoLabel *stmtLabel = static_cast(stmt); + stmtLabel->GenerateLabelIdx(FEManager::GetMIRBuilder()); + mapLabelIdx[stmtLabel->GetLabelIdx()] = stmtLabel->GetMIRLabelIdx(); + } + nodeStmt = nodeStmt->GetNext(); + } +} + +bool FEFunction::CheckPhaseResult(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + bool success = phaseResult.IsSuccess(); + return phaseResult.Finish(success); +} + +bool FEFunction::ProcessFEIRFunction() { + bool success = true; + success = success && BuildMapLabelStmt("fe/build map label stmt"); + success = success && SetupFEIRStmtJavaTry("fe/setup stmt javatry"); + success = success && SetupFEIRStmtBranch("fe/setup stmt branch"); + success = success && UpdateRegNum2This("fe/update reg num to this pointer"); + return success; +} + +bool FEFunction::BuildMapLabelStmt(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + FELinkListNode *nodeStmt = feirStmtHead->GetNext(); + while (nodeStmt != nullptr && nodeStmt != feirStmtTail) { + FEIRStmt *stmt = static_cast(nodeStmt); + FEIRNodeKind kind = stmt->GetKind(); + switch (kind) { + case FEIRNodeKind::kStmtPesudoLabel: + case FEIRNodeKind::kStmtPesudoJavaCatch: { + FEIRStmtPesudoLabel *stmtLabel = static_cast(stmt); + mapLabelStmt[stmtLabel->GetLabelIdx()] = stmtLabel; + break; + } + default: + break; + } + nodeStmt = nodeStmt->GetNext(); + } + return phaseResult.Finish(); +} + +bool FEFunction::SetupFEIRStmtJavaTry(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + FELinkListNode *nodeStmt = feirStmtHead->GetNext(); + while (nodeStmt != nullptr && nodeStmt != feirStmtTail) { + FEIRStmt *stmt = static_cast(nodeStmt); + if (stmt->GetKind() == FEIRNodeKind::kStmtPesudoJavaTry) { + FEIRStmtPesudoJavaTry *stmtJavaTry = static_cast(stmt); + for (uint32 labelIdx : stmtJavaTry->GetCatchLabelIdxVec()) { + auto it = mapLabelStmt.find(labelIdx); + CHECK_FATAL(it != mapLabelStmt.cend(), "label is not found"); + stmtJavaTry->AddCatchTarget(*(it->second)); + } + } + nodeStmt = nodeStmt->GetNext(); + } + return phaseResult.Finish(); +} + +bool FEFunction::SetupFEIRStmtBranch(const std::string &phaseName) { + phaseResult.RegisterPhaseNameAndStart(phaseName); + bool success = true; + FELinkListNode *nodeStmt = feirStmtHead->GetNext(); + while (nodeStmt != nullptr && nodeStmt != feirStmtTail) { + FEIRStmt *stmt = static_cast(nodeStmt); + FEIRNodeKind kind = stmt->GetKind(); + switch (kind) { + case FEIRNodeKind::kStmtGoto: + case FEIRNodeKind::kStmtCondGoto: + success = success && SetupFEIRStmtGoto(*(static_cast(stmt))); + break; + case FEIRNodeKind::kStmtSwitch: + success = success && SetupFEIRStmtSwitch(*(static_cast(stmt))); + break; + default: + break; + } + nodeStmt = nodeStmt->GetNext(); + } + return phaseResult.Finish(success); +} + +bool FEFunction::SetupFEIRStmtGoto(FEIRStmtGoto &stmt) { + auto it = mapLabelStmt.find(stmt.GetLabelIdx()); + if (it == mapLabelStmt.cend()) { + ERR(kLncErr, "target not found for stmt goto"); + return false; + } + stmt.SetStmtTarget(*(it->second)); + return true; +} + +bool FEFunction::SetupFEIRStmtSwitch(FEIRStmtSwitch &stmt) { + // default target + auto itDefault = mapLabelStmt.find(stmt.GetDefaultLabelIdx()); + if (itDefault == mapLabelStmt.cend()) { + ERR(kLncErr, "target not found for stmt goto"); + return false; + } + stmt.SetDefaultTarget(*(itDefault->second)); + + // value targets + for (const auto &itItem : stmt.GetMapValueLabelIdx()) { + auto itTarget = mapLabelStmt.find(itItem.second); + if (itTarget == mapLabelStmt.cend()) { + ERR(kLncErr, "target not found for stmt goto"); + return false; + } + stmt.AddTarget(itItem.first, *(itTarget->second)); + } + return true; +} + +bool FEFunction::UpdateRegNum2This(const std::string &phaseName) { + bool success = CheckPhaseResult(phaseName); + if (!success) { + return success; + } + if (!HasThis()) { + return success; + } + const std::unique_ptr &firstArg = argVarList.front(); + std::unique_ptr varReg = firstArg->Clone(); + GStrIdx thisNameIdx = FEUtils::GetThisIdx(); + std::unique_ptr varThisAsParam = std::make_unique(thisNameIdx, varReg->GetType()->Clone()); + if (!IsNative()) { + std::unique_ptr varThisAsLocalVar = std::make_unique(thisNameIdx, varReg->GetType()->Clone()); + std::unique_ptr dReadThis = std::make_unique(std::move(varThisAsLocalVar)); + std::unique_ptr daStmt = std::make_unique(std::move(varReg), std::move(dReadThis)); + FEIRStmt *stmt = RegisterFEIRStmt(std::move(daStmt)); + FELinkListNode::InsertAfter(stmt, feirStmtHead); + } + argVarList[0].reset(varThisAsParam.release()); + return success; +} + +void FEFunction::OutputStmts() const { + FELinkListNode *node = feirStmtHead->GetNext(); + while (node != feirStmtTail) { + FEIRStmt *currStmt = static_cast(node); + LogInfo::MapleLogger() << currStmt->DumpDotString() << "\n"; + node = node->GetNext(); + } +} + +void FEFunction::LabelFEIRStmts() { + // stmt idx start from 1 + FELinkListNode *node = feirStmtHead->GetNext(); + uint32 id = 1; + while (node != feirStmtTail) { + FEIRStmt *currStmt = static_cast(node); + currStmt->SetID(id++); + node = node->GetNext(); + } + stmtCount = --id; +} + +bool FEFunction::ShouldNewBB(const FEIRBB *currBB, const FEIRStmt &currStmt) const { + if (currBB == nullptr) { + return true; + } + if (currStmt.IsTarget()) { + if (currBB->GetStmtNoAuxTail() != nullptr) { + return true; + } + } + return false; +} + +bool FEFunction::IsBBEnd(const FEIRStmt &stmt) const { + bool currStmtMayBeBBEnd = MayBeBBEnd(stmt); + if (currStmtMayBeBBEnd) { + FELinkListNode *node = stmt.GetNext(); + FEIRStmt *nextStmt = static_cast(node); + if (nextStmt->IsAuxPost()) { // if curr stmt my be BB end, but next stmt is AuxPost + return false; // curr stmt should not be BB end + } + return true; + } + if (stmt.IsAuxPost()) { + FELinkListNode *node = stmt.GetPrev(); + FEIRStmt *prevStmt = static_cast(node); + bool prevStmtMayBeBBEnd = MayBeBBEnd(*prevStmt); // if curr stmt is AuxPost, and prev stmt my be BB end + return prevStmtMayBeBBEnd; // return prev stmt my be BB end as result + } + return currStmtMayBeBBEnd; +} + +bool FEFunction::MayBeBBEnd(const FEIRStmt &stmt) const { + return (stmt.IsBranch() || !stmt.IsFallThru()); +} + +void FEFunction::LinkFallThroughBBAndItsNext(FEIRBB &bb) { + if (!CheckBBsStmtNoAuxTail(bb)) { + return; + } + if (!bb.IsFallThru()) { + return; + } + FELinkListNode *node = bb.GetNext(); + FEIRBB *nextBB = static_cast(node); + if (nextBB != feirBBTail) { + LinkBB(bb, *nextBB); + } +} + +void FEFunction::LinkBranchBBAndItsTargets(FEIRBB &bb) { + if (!CheckBBsStmtNoAuxTail(bb)) { + return; + } + if (!bb.IsBranch()) { + return; + } + const FEIRStmt *stmtTail = bb.GetStmtNoAuxTail(); + FEIRNodeKind nodeKind = stmtTail->GetKind(); + switch (nodeKind) { + case FEIRNodeKind::kStmtCondGoto: + case FEIRNodeKind::kStmtGoto: { + LinkGotoBBAndItsTarget(bb, *stmtTail); + break; + } + case FEIRNodeKind::kStmtSwitch: { + LinkSwitchBBAndItsTargets(bb, *stmtTail); + break; + } + default: { + CHECK_FATAL(false, "nodeKind %u is not branch", nodeKind); + break; + } + } +} + +void FEFunction::LinkGotoBBAndItsTarget(FEIRBB &bb, const FEIRStmt &stmtTail) { + const FEIRStmtGoto2 &gotoStmt = static_cast(stmtTail); + const FEIRStmtPesudoLabel2 &targetStmt = gotoStmt.GetStmtTargetRef(); + FEIRBB &targetBB = GetFEIRBBByStmt(targetStmt); + LinkBB(bb, targetBB); +} + +void FEFunction::LinkSwitchBBAndItsTargets(FEIRBB &bb, const FEIRStmt &stmtTail) { + const FEIRStmtSwitch2 &switchStmt = static_cast(stmtTail); + const std::map &mapValueTargets = switchStmt.GetMapValueTargets(); + for (auto it : mapValueTargets) { + FEIRStmtPesudoLabel2 *pesudoLabel = it.second; + FEIRBB &targetBB = GetFEIRBBByStmt(*pesudoLabel); + LinkBB(bb, targetBB); + } + FEIRBB &targetBB = GetFEIRBBByStmt(switchStmt.GetDefaultTarget()); + LinkBB(bb, targetBB); +} + +void FEFunction::LinkBB(FEIRBB &predBB, FEIRBB &succBB) { + predBB.AddSuccBB(&succBB); + succBB.AddPredBB(&predBB); +} + +FEIRBB &FEFunction::GetFEIRBBByStmt(const FEIRStmt &stmt) { + auto it = feirStmtBBMap.find(&stmt); + return *(it->second); +} + +bool FEFunction::CheckBBsStmtNoAuxTail(const FEIRBB &bb) const { + bool bbHasStmtNoAuxTail = (bb.GetStmtNoAuxTail() != nullptr); + CHECK_FATAL(bbHasStmtNoAuxTail, "Error accured in BuildFEIRBB phase, bb.GetStmtNoAuxTail() should not be nullptr"); + return true; +} + +void FEFunction::InsertCheckPointForBBs() { + FELinkListNode *node = feirBBHead->GetNext(); + while (node != feirBBTail) { + FEIRBB *currBB = static_cast(node); // get currBB + // create chekPointIn + std::unique_ptr chekPointIn = std::make_unique(); + currBB->SetCheckPointIn(std::move(chekPointIn)); // set to currBB's checkPointIn + FEIRStmtCheckPoint &cpIn = currBB->GetCheckPointIn(); + currBB->InsertAndUpdateNewHead(&cpIn); // insert and update new head to chekPointIn + (void)feirStmtBBMap.insert(std::make_pair(&cpIn, currBB)); // add pair to feirStmtBBMap + // create chekPointOut + std::unique_ptr chekPointOut = std::make_unique(); + currBB->SetCheckPointOut(std::move(chekPointOut)); // set to currBB's checkPointOut + FEIRStmtCheckPoint &cpOut = currBB->GetCheckPointOut(); + currBB->InsertAndUpdateNewTail(&cpOut); // insert and update new tail to chekPointOut + (void)feirStmtBBMap.insert(std::make_pair(&cpOut, currBB)); // add pair to feirStmtBBMap + // get next BB + node = node->GetNext(); + } +} + +void FEFunction::InsertCheckPointForTrys() { + FEIRStmtPesudoJavaTry2 *currTry = nullptr; + FEIRStmtCheckPoint *checkPointInTry = nullptr; + FELinkListNode *node = feirStmtHead->GetNext(); + while (node != feirStmtTail) { + FEIRStmt *currStmt = static_cast(node); + if (currStmt->GetKind() == FEIRNodeKind::kStmtPesudoJavaTry) { + currTry = static_cast(currStmt); + checkPointInTry = nullptr; + } + if ((currTry != nullptr) && + (currStmt->IsThrowable()) && + ((checkPointInTry == nullptr) || currStmt->HasDef())) { + FEIRBB &currBB = GetFEIRBBByStmt(*currStmt); + if (currStmt == currBB.GetStmtNoAuxHead()) { + checkPointInTry = &(currBB.GetCheckPointIn()); + (void)checkPointJavaTryMap.insert(std::make_pair(checkPointInTry, currTry)); + if (currStmt == currBB.GetStmtHead()) { + currBB.SetStmtHead(currStmt); + } + node = node->GetNext(); + continue; + } + std::unique_ptr newCheckPoint = std::make_unique(); + currBB.AddCheckPoint(std::move(newCheckPoint)); + checkPointInTry = currBB.GetLatestCheckPoint(); + CHECK_NULL_FATAL(checkPointInTry); + FELinkListNode::InsertBefore(checkPointInTry, currStmt); + (void)feirStmtBBMap.insert(std::make_pair(checkPointInTry, &currBB)); + (void)checkPointJavaTryMap.insert(std::make_pair(checkPointInTry, currTry)); + if (currStmt == currBB.GetStmtHead()) { + currBB.SetStmtHead(currStmt); + } + } + if (currStmt->GetKind() == FEIRNodeKind::kStmtPesudoEndTry) { + currTry = nullptr; + } + node = node->GetNext(); + } +} + +void FEFunction::InitTrans4AllVars() { + FELinkListNode *node = feirStmtHead->GetNext(); + while (node != feirStmtTail) { + FEIRStmt *currStmt = static_cast(node); + currStmt->InitTrans4AllVars(); + node = node->GetNext(); + } +} + +FEIRStmtPesudoJavaTry2 &FEFunction::GetJavaTryByCheckPoint(FEIRStmtCheckPoint &checkPoint) { + auto it = checkPointJavaTryMap.find(&checkPoint); + return *(it->second); +} + +FEIRStmtCheckPoint &FEFunction::GetCheckPointByFEIRStmt(const FEIRStmt &stmt) { + auto it = feirStmtCheckPointMap.find(&stmt); + return *(it->second); +} + +void FEFunction::SetUpDefVarTypeScatterStmtMap() { + FELinkListNode *node = feirStmtHead->GetNext(); + while (node != feirStmtTail) { + FEIRStmt *currStmt = static_cast(node); + FEIRVarTypeScatter *defVarTypeScatter = currStmt->GetTypeScatterDefVar(); + if (defVarTypeScatter != nullptr) { + (void)defVarTypeScatterStmtMap.insert(std::make_pair(defVarTypeScatter, currStmt)); + } + node = node->GetNext(); + } +} + +void FEFunction::InsertRetypeStmtsAfterDef(const UniqueFEIRVar& def) { + bool defIsTypeScatter = (def->GetKind() == kFEIRVarTypeScatter); + if (!defIsTypeScatter) { + return; + } + FEIRVarTypeScatter &fromVar = *(static_cast(def.get())); + const std::unordered_set &scatterTypes = fromVar.GetScatterTypes(); + for (auto &it : scatterTypes) { + const maple::FEIRTypeKey &typeKey = it; + FEIRType &toType = *(typeKey.GetType()); + FEIRType &fromType = *(fromVar.GetType()); + Opcode opcode = FEIRTypeCvtHelper::ChooseCvtOpcodeByFromTypeAndToType(fromType, toType); + if (opcode == OP_retype) { + InsertRetypeStmt(fromVar, toType); + } else if (opcode == OP_cvt) { + InsertCvtStmt(fromVar, toType); + } else { + InsertJavaMergeStmt(fromVar, toType); + } + } +} + +void FEFunction::InsertRetypeStmt(const FEIRVarTypeScatter &fromVar, const FEIRType &toType) { + // create DRead Expr + std::unique_ptr exprDRead = std::make_unique(fromVar.GetVar()->Clone()); + std::unique_ptr typeDst = FEIRTypeHelper::CreatePointerType(toType.Clone(), toType.GetPrimType()); + // create expr for retype + std::unique_ptr expr = std::make_unique(std::move(typeDst), OP_retype, + std::move(exprDRead)); + // after expr created, insert dassign stmt + InsertDAssignStmt4TypeCvt(fromVar, toType, std::move(expr)); +} + +void FEFunction::InsertCvtStmt(const FEIRVarTypeScatter &fromVar, const FEIRType &toType) { + // create DRead Expr + std::unique_ptr exprDRead = std::make_unique(fromVar.GetVar()->Clone()); + // create expr for type cvt + std::unique_ptr expr = std::make_unique(OP_cvt, std::move(exprDRead)); + expr->GetType()->SetPrimType(toType.GetPrimType()); + // after expr created, insert dassign stmt + InsertDAssignStmt4TypeCvt(fromVar, toType, std::move(expr)); +} + +void FEFunction::InsertJavaMergeStmt(const FEIRVarTypeScatter &fromVar, const FEIRType &toType) { + // create DRead Expr + std::unique_ptr exprDRead = std::make_unique(fromVar.GetVar()->Clone()); + // create expr for java merge + std::vector> argOpnds; + argOpnds.push_back(std::move(exprDRead)); + std::unique_ptr javaMergeExpr = std::make_unique(toType.Clone(), argOpnds); + // after expr created, insert dassign stmt + InsertDAssignStmt4TypeCvt(fromVar, toType, std::move(javaMergeExpr)); +} + +void FEFunction::InsertDAssignStmt4TypeCvt(const FEIRVarTypeScatter &fromVar, const FEIRType &toType, + UniqueFEIRExpr expr) { + FEIRVar *var = fromVar.GetVar().get(); + CHECK_FATAL((var->GetKind() == FEIRVarKind::kFEIRVarReg), "fromVar's inner var must be var reg kind"); + FEIRVarReg *varReg = static_cast(var); + uint32 regNum = varReg->GetRegNum(); + UniqueFEIRVar toVar = FEIRBuilder::CreateVarReg(regNum, toType.Clone()); + std::unique_ptr daStmt = std::make_unique(std::move(toVar), std::move(expr)); + FEIRStmt *insertedStmt = RegisterFEIRStmt(std::move(daStmt)); + FEIRStmt &stmt = GetStmtByDefVarTypeScatter(fromVar); + FELinkListNode::InsertAfter(insertedStmt, &stmt); +} + +FEIRStmt &FEFunction::GetStmtByDefVarTypeScatter(const FEIRVarTypeScatter &varTypeScatter) { + auto it = defVarTypeScatterStmtMap.find(&varTypeScatter); + return *(it->second); +} + +bool FEFunction::WithFinalFieldsNeedBarrier(MIRClassType *classType, bool isStatic) const { + // final field + if (isStatic) { + // final static fields with non-primitive types + // the one with primitive types are all inlined + for (auto it : classType->GetStaticFields()) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it.second.first); + if (it.second.second.GetAttr(FLDATTR_final) && type->GetPrimType() == PTY_ref) { + return true; + } + } + } else { + for (auto it : classType->GetFields()) { + if (it.second.second.GetAttr(FLDATTR_final)) { + return true; + } + } + } + return false; +} + +bool FEFunction::IsNeedInsertBarrier() { + if (mirFunction.GetAttr(FUNCATTR_constructor) || + mirFunction.GetName().find("_7Cclone_7C") != std::string::npos || + mirFunction.GetName().find("_7CcopyOf_7C") != std::string::npos) { + const std::string &className = mirFunction.GetBaseClassName(); + MIRType *type = FEManager::GetTypeManager().GetClassOrInterfaceType(className); + if (type->GetKind() == kTypeClass) { + MIRClassType *currClass = static_cast(type); + if (!mirFunction.GetAttr(FUNCATTR_constructor) || + WithFinalFieldsNeedBarrier(currClass, mirFunction.GetAttr(FUNCATTR_static))) { + return true; + } + } + } + return false; +} + +void FEFunction::EmitToMIRStmt() { + MIRBuilder &builder = FEManager::GetMIRBuilder(); + FELinkListNode *nodeStmt = feirStmtHead->GetNext(); + while (nodeStmt != nullptr && nodeStmt != feirStmtTail) { + FEIRStmt *stmt = static_cast(nodeStmt); + std::list mirStmts = stmt->GenMIRStmts(builder); +#ifdef DEBUG + // LOC info has been recorded in FEIRStmt already, this could be removed later. + AddLocForStmt(*stmt, mirStmts); +#endif + for (StmtNode *mirStmt : mirStmts) { + builder.AddStmtInCurrentFunctionBody(*mirStmt); + } + nodeStmt = nodeStmt->GetNext(); + } +} + +void FEFunction::AddLocForStmt(const FEIRStmt &stmt, std::list &mirStmts) const { + const FEIRStmtPesudoLOC *pesudoLoc = GetLOCForStmt(stmt); + if (pesudoLoc != nullptr) { + mirStmts.front()->GetSrcPos().SetFileNum(static_cast(pesudoLoc->GetSrcFileIdx())); + mirStmts.front()->GetSrcPos().SetLineNum(pesudoLoc->GetSrcFileLineNum()); + } +} + +void FEFunction::PushFuncScope(const SrcPosition &startOfScope, const SrcPosition &endOfScope) { + UniqueFEIRScope feirScope = std::make_unique(scopeID++); + if (FEOptions::GetInstance().IsDbgFriendly()) { + MIRScope *mirScope = mirFunction.GetScope(); + mirScope->SetRange(startOfScope, endOfScope); + feirScope->SetMIRScope(mirScope); + } + scopeStack.push_front(std::move(feirScope)); +} + +void FEFunction::PushStmtScope(const SrcPosition &startOfScope, const SrcPosition &endOfScope, bool isControllScope) { + UniqueFEIRScope feirScope = std::make_unique(scopeID++); + if (FEOptions::GetInstance().IsDbgFriendly()) { + MIRScope *parentMIRScope = GetTopMIRScope(); + CHECK_NULL_FATAL(parentMIRScope); + MIRScope *mirScope = mirFunction.GetModule()->GetMemPool()->New(mirFunction.GetModule()); + mirScope->SetRange(startOfScope, endOfScope); + (void)parentMIRScope->AddScope(mirScope); + feirScope->SetMIRScope(mirScope); + } + feirScope->SetIsControllScope(isControllScope); + scopeStack.push_front(std::move(feirScope)); +} + +void FEFunction::PushStmtScope(bool isControllScope) { + UniqueFEIRScope feirScope = std::make_unique(scopeID++, isControllScope); + scopeStack.push_front(std::move(feirScope)); +} + +FEIRScope *FEFunction::GetTopFEIRScopePtr() const { + if (!scopeStack.empty()) { + return scopeStack.front().get(); + } + CHECK_FATAL(false, "scope stack is empty"); + return nullptr; +} + +MIRScope *FEFunction::GetTopMIRScope() const { + if (scopeStack.empty()) { + CHECK_FATAL(false, "scope stack is empty"); + return nullptr; + } + for (const auto &feirScope : scopeStack) { + if (feirScope->GetMIRScope() != nullptr) { + return feirScope->GetMIRScope(); + } + } + return nullptr; +} + +UniqueFEIRScope FEFunction::PopTopScope() { + if (!scopeStack.empty()) { + UniqueFEIRScope scope = std::move(scopeStack.front()); + scopeStack.pop_front(); + return scope; + } + CHECK_FATAL(false, "scope stack is empty"); + return nullptr; +} + +void FEFunction::AddVLACleanupStmts(std::list &stmts) { + (void)stmts; + CHECK_FATAL(false, "AddVLACleanupStmts only support astfunction"); +}; +} // namespace maple diff --git a/src/hir2mpl/common/src/fe_function_phase_result.cpp b/src/hir2mpl/common/src/fe_function_phase_result.cpp new file mode 100644 index 0000000000000000000000000000000000000000..16ce9e51bbf2048c9a7733a635e0cbb49b69feee --- /dev/null +++ b/src/hir2mpl/common/src/fe_function_phase_result.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_function_phase_result.h" +#include "mpl_logging.h" + +namespace maple { +void FEFunctionPhaseResult::Combine(const FEFunctionPhaseResult &result) { + for (const std::string &name : result.phaseNames) { + auto itResult = result.phaseTimes.find(name); + CHECK_FATAL(itResult != result.phaseTimes.end(), "invalid result: without time recorded"); + int64 t = itResult->second; + auto itLocal = phaseTimes.find(name); + if (itLocal == phaseTimes.end()) { + phaseNames.push_back(name); + phaseTimes[name] = t; + } else { + phaseTimes[name] = itLocal->second + t; + } + } +} + +void FEFunctionPhaseResult::Dump() { + for (const std::string &name :phaseNames) { + auto it = phaseTimes.find(name); + CHECK_FATAL(it != phaseTimes.cend(), "phase time is undefined for %s", name.c_str()); + INFO(kLncInfo, "[PhaseTime] %s: %lld ns", name.c_str(), it->second); + } +} + +void FEFunctionPhaseResult::DumpMS() { + for (const std::string &name :phaseNames) { + auto it = phaseTimes.find(name); + CHECK_FATAL(it != phaseTimes.cend(), "phase time is undefined for %s", name.c_str()); + INFO(kLncInfo, "[PhaseTime] %s: %.2lf ms", name.c_str(), it->second / 1000000.0); // 1ms = 1000000 ns + } +} + +bool FEFunctionPhaseResult::Finish(bool isSuccess) { + success = isSuccess; + if (enable && recordTime) { + timer.Stop(); + CHECK_FATAL(!currPhaseName.empty(), "Phase Name is empty"); + int64 t = timer.GetTimeNS(); + phaseTimes[currPhaseName] = t; + } + return success; +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/common/src/fe_input_helper.cpp b/src/hir2mpl/common/src/fe_input_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2dcefcd3a12d6aaac05b3e4ef506e7b2adc22b4a --- /dev/null +++ b/src/hir2mpl/common/src/fe_input_helper.cpp @@ -0,0 +1,360 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_input_helper.h" +#include "fe_options.h" +#include "fe_manager.h" + +namespace maple { +#define SET_CLASS_INFO_PAIR(A, B, C, D) \ + do { \ + (A)->PushbackMIRInfo(MIRInfoPair(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(B), C)); \ + (A)->PushbackIsString(D); \ + } while (0); + +std::string FEInputStructHelper::GetSrcFileNameImpl() const { + return "unknown"; +} + +MIRStructType *FEInputStructHelper::GetContainerImpl() { + return mirStructType; +} + +bool FEInputStructHelper::PreProcessDeclImpl() { + bool error = false; + MIRStructType *structType = CreateMIRStructType(error); + if (error) { + return false; + } + isSkipped = (structType == nullptr); + mirStructType = structType; + if (structType != nullptr) { + FETypeManager::SetComplete(*structType); + FEManager::GetTypeManager().AddClassToModule(*structType); + } + return true; +} + +bool FEInputStructHelper::ProcessDeclImpl() { + if (isSkipped) { + return true; + } + if (mirStructType == nullptr) { + return false; + } + if (!FEOptions::GetInstance().IsGenMpltOnly() && !isOnDemandLoad) { + CreateSymbol(); + } + ProcessDeclSuperClass(); + ProcessDeclImplements(); + ProcessDeclDefInfo(); + // Process Fields + InitFieldHelpers(); + ProcessFieldDef(); + ProcessExtraFields(); + if (!FEOptions::GetInstance().IsGenMpltOnly() && !isOnDemandLoad) { + ProcessStaticFields(); + } + // Process Methods + InitMethodHelpers(); + ProcessMethodDef(); + return true; +} + +void FEInputStructHelper::CreateSymbol() { + std::string structNameMpl = GetStructNameMpl(); + mirSymbol = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl(structNameMpl.c_str(), *mirStructType); + switch (mirStructType->GetKind()) { + case kTypeClass: + case kTypeClassIncomplete: + mirSymbol->SetSKind(kStJavaClass); + break; + case kTypeInterface: + case kTypeInterfaceIncomplete: + mirSymbol->SetSKind(kStJavaInterface); + break; + default: + break; + } + mirSymbol->SetAttrs(GetStructAttributeFromInput()); +} + +void FEInputStructHelper::ProcessDeclSuperClass() { + ProcessDeclSuperClassForJava(); +} + +void FEInputStructHelper::ProcessDeclSuperClassForJava() { + const std::list &superNames = GetSuperClassNames(); + ASSERT(superNames.size() <= 1, "there must be zero or one super class for java class: %s", + GetStructNameOrin().c_str()); + if (superNames.size() == 1) { + const std::string &superNameMpl = namemangler::EncodeName(superNames.front()); + bool isCreate = false; + MIRStructType *superType = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType(superNameMpl, false, + FETypeFlag::kSrcExtern, + isCreate); + if (isCreate) { + // Mark incomplete + } + switch (mirStructType->GetKind()) { + case kTypeClass: + case kTypeClassIncomplete: { + MIRClassType *thisType = static_cast(mirStructType); + thisType->SetParentTyIdx(superType->GetTypeIndex()); + break; + } + case kTypeInterface: + case kTypeInterfaceIncomplete: { + MIRInterfaceType *thisType = static_cast(mirStructType); + if (superType->GetKind() == kTypeInterface) { + thisType->GetParentsTyIdx().push_back(superType->GetTypeIndex()); + } + break; + } + default: + break; + } + } +} + +void FEInputStructHelper::ProcessDeclImplements() { + const std::vector &interfaceNames = GetInterfaceNames(); + std::vector interfaceTypes; + for (const std::string &name : interfaceNames) { + const std::string &interfaceNameMpl = namemangler::EncodeName(name); + bool isCreate = false; + MIRStructType *interfaceType = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType(interfaceNameMpl, true, + FETypeFlag::kSrcExtern, + isCreate); + if (isCreate) { + // Mark incomplete + } + interfaceTypes.push_back(interfaceType); + } + if (interfaceTypes.size() > 0) { + switch (mirStructType->GetKind()) { + case kTypeClass: + case kTypeClassIncomplete: { + MIRClassType *thisType = static_cast(mirStructType); + for (MIRStructType *type : interfaceTypes) { + thisType->GetInterfaceImplemented().push_back(type->GetTypeIndex()); + } + break; + } + case kTypeInterface: + case kTypeInterfaceIncomplete: { + MIRInterfaceType *thisType = static_cast(mirStructType); + for (MIRStructType *type : interfaceTypes) { + thisType->GetParentsTyIdx().push_back(type->GetTypeIndex()); + } + break; + } + default: + break; + } + } +} + +void FEInputStructHelper::ProcessDeclDefInfo() { + // INFO_srcfile + std::string srcFileName = GetSourceFileName(); + if (!srcFileName.empty()) { + GStrIdx srcFileNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(srcFileName); + SET_CLASS_INFO_PAIR(mirStructType, "INFO_srcfile", srcFileNameIdx.GetIdx(), true); + } + // INFO_classname + std::string classNameMpl = GetStructNameMpl(); + GStrIdx classNameMplIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(classNameMpl); + SET_CLASS_INFO_PAIR(mirStructType, "INFO_classname", classNameMplIdx.GetIdx(), true); + // INFO_classnameorig + std::string classNameOrig = GetStructNameOrin(); + GStrIdx classNameOrigIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(classNameOrig); + SET_CLASS_INFO_PAIR(mirStructType, "INFO_classnameorig", classNameOrigIdx.GetIdx(), true); + if (srcLang == kSrcLangJava) { + // INFO_superclassname + ProcessDeclDefInfoSuperNameForJava(); + // INFO_implements + ProcessDeclDefInfoImplementNameForJava(); + } + // INFO_attribute_string + TypeAttrs attrs = GetStructAttributeFromInput(); + std::string attrsName = FETypeManager::TypeAttrsToString(attrs); + GStrIdx attrsNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(attrsName); + SET_CLASS_INFO_PAIR(mirStructType, "INFO_attribute_string", attrsNameIdx.GetIdx(), true); + // INFO_access_flags + SET_CLASS_INFO_PAIR(mirStructType, "INFO_access_flags", GetRawAccessFlags(), false); + // INFO_ir_srcfile_signature + SET_CLASS_INFO_PAIR(mirStructType, "INFO_ir_srcfile_signature", GetIRSrcFileSigIdx(), true); +} + +void FEInputStructHelper::ProcessDeclDefInfoSuperNameForJava() { + std::list superNames = GetSuperClassNames(); + if (superNames.size() > 1) { + ASSERT(false, "There is one super class at most in java"); + return; + } + std::string superName = superNames.size() == 0 ? "unknown" : superNames.front(); + std::string superNameMpl = namemangler::EncodeName(superName); + GStrIdx superNameMplIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(superNameMpl.c_str()); + SET_CLASS_INFO_PAIR(mirStructType, "INFO_superclassname", superNameMplIdx.GetIdx(), true); +} + +void FEInputStructHelper::ProcessDeclDefInfoImplementNameForJava() { + MIRTypeKind kind = mirStructType->GetKind(); + if (kind == kTypeInterface || kind == kTypeInterfaceIncomplete) { + return; + } + std::vector implementNames = GetInterfaceNames(); + for (const std::string &name : implementNames) { + if (!name.empty()) { + std::string nameMpl = namemangler::EncodeName(name); + GStrIdx nameMplIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(nameMpl.c_str()); + SET_CLASS_INFO_PAIR(mirStructType, "INFO_implements", nameMplIdx.GetIdx(), true); + } + } +} + +void FEInputStructHelper::ProcessStaticFields() { + uint32 i = 0; + FieldVector::iterator it; + for (it = mirStructType->GetStaticFields().begin(); it != mirStructType->GetStaticFields().end(); ++i, ++it) { + StIdx stIdx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(it->first); + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(it->first); + MIRConst *cst = nullptr; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it->second.first); + MapleAllocator &alloc = FEManager::GetModule().GetMPAllocator(); + if (i < staticFieldsConstVal.size()) { + cst = staticFieldsConstVal[i]; + if (cst != nullptr && cst->GetKind() == kConstStr16Const) { + std::u16string str16 = + GlobalTables::GetU16StrTable().GetStringFromStrIdx(static_cast(cst)->GetValue()); + MIRSymbol *literalVar = FEManager::GetJavaStringManager().GetLiteralVar(str16); + if (literalVar == nullptr) { + literalVar = FEManager::GetJavaStringManager().CreateLiteralVar(FEManager::GetMIRBuilder(), str16, true); + } + AddrofNode *expr = FEManager::GetMIRBuilder().CreateExprAddrof(0, *literalVar, + FEManager::GetModule().GetMemPool()); + MIRType *ptrType = GlobalTables::GetTypeTable().GetTypeTable()[PTY_ptr]; + cst = alloc.GetMemPool()->New(expr->GetStIdx(), expr->GetFieldID(), *ptrType); + } + } + MIRSymbol *fieldVar = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + if (fieldVar == nullptr) { + fieldVar = FEManager::GetMIRBuilder().GetOrCreateGlobalDecl(fieldName, *type); + fieldVar->SetAttrs(it->second.second.ConvertToTypeAttrs()); + } + if (cst != nullptr) { + fieldVar->SetKonst(cst); + } + } +} + +void FEInputStructHelper::ProcessFieldDef() { + for (FEInputFieldHelper *fieldHelper : fieldHelpers) { + bool success = fieldHelper->ProcessDeclWithContainer(allocator); + if (success) { + if (fieldHelper->IsStatic()) { + mirStructType->GetStaticFields().push_back(fieldHelper->GetMIRFieldPair()); + } else { + mirStructType->GetFields().push_back(fieldHelper->GetMIRFieldPair()); + } + } else { + ERR(kLncErr, "Error occurs in ProcessFieldDef for %s", GetStructNameOrin().c_str()); + } + } +} + +void FEInputStructHelper::ProcessExtraFields() { + // add to this set to add extrafield into a class, in this format: classname, fieldname, type, attributes + std::vector extraFields = { + { "Lcom_2Fandroid_2Finternal_2Fos_2FBinderCallsStats_3B", "mCallSessionsPoolSize", "i32", "private" }, + { "Ljava_2Flang_2FObject_3B", "shadow_24__klass__", "Ljava_2Flang_2FClass_3B", "private transient final" }, + }; + for (auto it = extraFields.begin(); it != extraFields.end(); ++it) { + bool isCreat = false; + MIRStructType *structType = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType(it->klass, + false, FETypeFlag::kSrcUnknown, isCreat); + if (structType->IsImported()) { + continue; + } + GStrIdx fieldStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(it->field); + MIRType *fieldType = FEManager::GetTypeManager().GetOrCreateTypeFromName(it->type, FETypeFlag::kSrcUnknown, true); + std::vector attrs = FEUtils::Split(it->attr, ' '); + FieldAttrs typeAttrs; + for (auto ait = attrs.begin(); ait != attrs.end(); ++ait) { + FEInputFieldHelper::SetFieldAttribute(*ait, typeAttrs); + } + for (auto fit = structType->GetFields().begin(); fit != structType->GetFields().end(); ++fit) { + if (fit->first == fieldStrIdx) { + (void)structType->GetFields().erase(fit); + break; + } + } + // insert at the beginning + structType->GetFields().insert(structType->GetFields().cbegin(), + FieldPair(fieldStrIdx, TyIdxFieldAttrPair(fieldType->GetTypeIndex(), typeAttrs))); + } +} + +void FEInputStructHelper::ProcessMethodDef() { + for (FEInputMethodHelper *methodHelper : methodHelpers) { + bool success = methodHelper->ProcessDecl(allocator); + if (success) { + mirStructType->GetMethods().push_back(methodHelper->GetMIRMethodPair()); + methodHelper->SetClassTypeInfo(*mirStructType); + } else { + ERR(kLncErr, "Error occurs in ProcessMethodDef for %s", GetStructNameOrin().c_str()); + } + } +} + +void FEInputStructHelper::ProcessPragma() { + if (isSkipped) { + return; + } + std::vector pragmas = pragmaHelper->GenerateMIRPragmas(); + std::vector &pragmaVec = mirStructType->GetPragmaVec(); + for (MIRPragma *pragma : pragmas) { + pragmaVec.push_back(pragma); + } +} + +// ---------- FEInputMethodHelper ---------- +bool FEInputMethodHelper::ProcessDeclImpl(MapleAllocator &allocatorIn) { + (void)allocatorIn; + CHECK_FATAL(false, "NYI"); + return true; +} + +// ---------- FEInputHelper ---------- +bool FEInputHelper::PreProcessDecl() { + bool success = true; + for (FEInputStructHelper *helper : structHelpers) { + success = helper->PreProcessDecl() ? success : false; + } + return success; +} + +bool FEInputHelper::ProcessDecl() { + bool success = true; + for (FEInputStructHelper *helper : structHelpers) { + success = helper->ProcessDecl() ? success : false; + } + return success; +} + +bool FEInputHelper::ProcessImpl() const { + return true; +} +} diff --git a/src/hir2mpl/common/src/fe_java_string_manager.cpp b/src/hir2mpl/common/src/fe_java_string_manager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..666404ce1e790ed9be4269bf50c25cba749a7146 --- /dev/null +++ b/src/hir2mpl/common/src/fe_java_string_manager.cpp @@ -0,0 +1,291 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_java_string_manager.h" +#include +#include "global_tables.h" +#include "namemangler.h" +#include "muid.h" +#include "literalstrname.h" +#include "fe_config_parallel.h" +#include "feir_type.h" +#include "fe_manager.h" +#include "fe_options.h" + +namespace maple { +FEJavaStringManager::FEJavaStringManager(MIRModule &argModule, MIRBuilder &mirBuilderIn) + : module(argModule), mirBuilder(mirBuilderIn) { +} + +FEJavaStringManager::~FEJavaStringManager() { + typeString = nullptr; +} + +void FEJavaStringManager::LoadProfilingData(const std::string &profileFileName) { + std::ifstream inFile(profileFileName); + if (!inFile.is_open()) { + WARN(kLncWarn, "Cannot open literal profile data file %s", profileFileName.c_str()); + return; + } + std::string literalName; + while (std::getline(inFile, literalName)) { + if (!literalName.empty()) { + (void)preloadSet.insert(literalName); + } + } + inFile.close(); +} + +MIRSymbol *FEJavaStringManager::GetLiteralPtrVar(const MIRSymbol *var) const { + auto it = literalMap.find(var); + if (it != literalMap.end()) { + return it->second; + } else { + return nullptr; + } +} + +MIRSymbol *FEJavaStringManager::GetLiteralPtrVar(const std::string &str) const { + MIRSymbol *literalVar = GetLiteralVar(str); + return GetLiteralPtrVar(literalVar); +} + +MIRSymbol *FEJavaStringManager::GetLiteralPtrVar(const std::u16string &strU16) const { + MIRSymbol *literalVar = GetLiteralVar(strU16); + return GetLiteralPtrVar(literalVar); +} + +MIRSymbol *FEJavaStringManager::CreateLiteralVar(MIRBuilder &mirBuilderIn, const std::string &str, bool isFieldValue) { + std::u16string strU16; + (void)namemangler::UTF8ToUTF16(strU16, str); + return CreateLiteralVar(mirBuilderIn, strU16, isFieldValue); +} + +MIRSymbol *FEJavaStringManager::CreateLiteralVar(MIRBuilder &mirBuilderIn, const std::u16string &strU16, + bool isFieldValue) { + HIR2MPL_PARALLEL_FORBIDDEN(); + if (typeString == nullptr) { + FEIRTypeDefault type(PTY_ref); + type.LoadFromJavaTypeName("Ljava/lang/String;", false); + typeString = type.GenerateMIRTypeAuto(kSrcLangJava); + } + MIRSymbol *literalVar = GetLiteralVar(strU16); + if (literalVar != nullptr) { + return literalVar; + } + std::string literalGlobalName = GetLiteralGlobalName(strU16); + bool compress = useCompressedJavaString && IsAllASCII(strU16); + MIRArrayType *byteArrayType = ConstructArrayType4Str(strU16, compress); + literalVar = mirBuilderIn.GetOrCreateGlobalDecl(literalGlobalName.c_str(), *byteArrayType); + MIRAggConst *strConst = CreateByteArrayConst(strU16, *byteArrayType, compress); + literalVar->SetKonst(strConst); + literalVar->SetAttr(ATTR_readonly); + literalVar->SetStorageClass(kScFstatic); + bool isHotLiteral = false; + if (preloadSet.find(literalGlobalName) != preloadSet.end()) { + isHotLiteral = true; + (void)literalSet.insert(literalVar); + } + if (isFieldValue) { + (void)fieldValueSet.insert(literalVar); + } + if ((isFieldValue || isHotLiteral) && (!FEOptions::GetInstance().IsAOT())) { + std::string literalGlobalPtrName = namemangler::kPtrPrefixStr + literalGlobalName; + MIRSymbol *literalVarPtr = mirBuilderIn.GetOrCreateGlobalDecl(literalGlobalPtrName.c_str(), *typeString); + literalVarPtr->SetStorageClass(literalVar->GetStorageClass()); + AddrofNode *expr = mirBuilderIn.CreateExprAddrof(0, *literalVar, module.GetMemPool()); + MIRConst *cst = module.GetMemPool()->New( + expr->GetStIdx(), expr->GetFieldID(), *GlobalTables::GetTypeTable().GetPtr()); + literalVarPtr->SetKonst(cst); + literalVarPtr->SetAttr(ATTR_readonly); + literalMap[literalVar] = literalVarPtr; + } + (void)GlobalTables::GetConstPool().GetConstU16StringPool().insert(std::make_pair(strU16, literalVar)); + return literalVar; +} + +MIRSymbol *FEJavaStringManager::GetLiteralVar(const std::string &str) const { + std::u16string strU16; + (void)namemangler::UTF8ToUTF16(strU16, str); + return GetLiteralVar(strU16); +} + +MIRSymbol *FEJavaStringManager::GetLiteralVar(const std::u16string &strU16) const { + auto it = GlobalTables::GetConstPool().GetConstU16StringPool().find(strU16); + if (it != GlobalTables::GetConstPool().GetConstU16StringPool().end()) { + return it->second; + } + return nullptr; +} + +std::string FEJavaStringManager::GetLiteralGlobalName(const std::u16string &strU16) { + std::string literalGlobalName; + std::vector swapped = SwapBytes(strU16); + if (strU16.length() == 0) { + literalGlobalName = LiteralStrName::GetLiteralStrName(swapped.data(), 0); + } else { + literalGlobalName = LiteralStrName::GetLiteralStrName(swapped.data(), static_cast(strU16.length() << 1)); + } + return literalGlobalName; +} + +// Valid ASCII characters are in range 1..0x7f. Zero is not considered ASCII +// because it would complicate the detection of ASCII strings in Modified-UTF8. +bool FEJavaStringManager::IsAllASCII(const std::u16string &strU16) { + if (strU16.length() == 0) { + return false; + } + for (size_t i = 0; i < strU16.length(); ++i) { + uint16 val = ExchangeBytesPosition(strU16[i]); + if ((val - 1u) >= 0x7fu) { + return false; + } + } + return true; +} + +MIRArrayType *FEJavaStringManager::ConstructArrayType4Str(const std::u16string &strU16, bool compressible) const { + HIR2MPL_PARALLEL_FORBIDDEN(); + uint32 arraySize[1]; + // use 2 bytes per char in uncompress mode + uint32 length = compressible ? static_cast(strU16.length()) : static_cast(strU16.length() * 2); +#ifdef JAVA_OBJ_IN_MFILE +#ifdef USE_32BIT_REF + uint32 sizeInBytes = 16 + length; // shadow(4B)+monitor(4B)+count(4B)+hash(4B)+content +#else // !USE_32BIT_REF + uint32 sizeInBytes = 20 + length; // shadow(8B)+monitor(4B)+count(4B)+hash(4B)+content +#endif // USE_32BIT_REF +#else // !JAVA_OBJ_IN_MFILE + uint32 sizeInBytes = 8 + length; // count(4B)+hash(4B)+content +#endif // JAVA_OBJ_IN_MFILE + uint32 sizeInLongs = (sizeInBytes - 1) / 8 + 1; // round up to 8B units + arraySize[0] = sizeInLongs; + MIRArrayType *byteArrayType = static_cast( + GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetUInt64(), 1, arraySize)); + return byteArrayType; +} + +MIRAggConst *FEJavaStringManager::CreateByteArrayConst(const std::u16string &strU16, MIRArrayType &byteArrayType, + bool compressible) const { + MIRAggConst *newconst = module.GetMemPool()->New(module, byteArrayType); + MIRType *uInt64 = GlobalTables::GetTypeTable().GetUInt64(); + MemPool *mp = module.GetMemPool(); + DWBuffer currData = { 0, 0 }; + +#ifdef JAVA_OBJ_IN_MFILE + // @shadow + // To avoid linker touch cold pages, the classinfo is not set ready in file. + // It will be set at runtime +#ifdef USE_32BIT_REF + AddDataIntoByteArray(*newconst, *mp, currData, static_cast(0), *uInt64); +#else + AddDataIntoByteArray(*newconst, *mp, currData, static_cast(0), *uInt64); +#endif // USE_32BIT_REF + // @monitor + AddDataIntoByteArray(*newconst, *mp, currData, static_cast(0), *uInt64); +#endif // JAVA_OBJ_IN_MFILE + + // @count + uint32 strCount = (strU16.length() > 0) ? static_cast((strU16.length() * 2) | compressible) : 0; + AddDataIntoByteArray(*newconst, *mp, currData, strCount, *uInt64); + + // @hash + uint32 hash = static_cast( + LiteralStrName::CalculateHashSwapByte(strU16.data(), static_cast(strU16.length()))); + AddDataIntoByteArray(*newconst, *mp, currData, hash, *uInt64); + + // @content + if (compressible) { + for (size_t i = 0; i < strU16.size(); i++) { + AddDataIntoByteArray(*newconst, *mp, currData, static_cast(ExchangeBytesPosition(strU16[i])), *uInt64); + } + } else { + for (size_t i = 0; i < strU16.size(); i++) { + AddDataIntoByteArray(*newconst, *mp, currData, static_cast(ExchangeBytesPosition(strU16[i])), *uInt64); + } + } + // in case there're remaining data in the buffer + FinishByteArray(*newconst, *mp, currData, *uInt64); + return newconst; +} + +std::vector FEJavaStringManager::SwapBytes(const std::u16string &strU16) { + std::vector out; + for (size_t i = 0; i < strU16.length(); ++i) { + uint16 c16 = strU16[i]; + out.push_back((c16 & 0xFF00) >> 8); + out.push_back(c16 & 0xFF); + } + out.push_back(0); + out.push_back(0); + return out; +} + +uint16 FEJavaStringManager::ExchangeBytesPosition(uint16 input) { + uint16 lowerByte = input << 8; + uint16 higherByte = input >> 8; + return lowerByte | higherByte; +} + +template +void FEJavaStringManager::AddDataIntoByteArray(MIRAggConst &newConst, MemPool &mp, DWBuffer &buf, T data, + MIRType &uInt64) { + if (buf.pos == 8) { // buffer is already full + newConst.PushBack(mp.New(buf.data, uInt64)); + buf.data = 0; + buf.pos = 0; + } + CHECK_FATAL(((buf.pos + sizeof(T)) <= 8), "inserted data exceeds current buffer capacity"); + buf.data |= ((static_cast(data)) << (buf.pos * 8)); + buf.pos += sizeof(T); +} + +void FEJavaStringManager::FinishByteArray(MIRAggConst &newConst, MemPool &mp, DWBuffer &buf, MIRType &uInt64) { + if (buf.pos > 0) { // there're data inside buffer + newConst.PushBack(mp.New(buf.data, uInt64)); + buf.data = 0; + buf.pos = 0; + } +} + +void FEJavaStringManager::GenStringMetaClassVar() { + std::unique_ptr metaClassType = std::make_unique(kTypeStruct); + + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("dummy"); + metaClassType->GetFields().emplace_back( + strIdx, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(), FieldAttrs())); + + GStrIdx metaStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::kClassMetadataTypeName); + TyIdx tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(metaClassType.get()); + // Global? + module.GetTypeNameTab()->SetGStrIdxToTyIdx(metaStrIdx, tyIdx); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().size() > tyIdx, "empty check"); + if (GlobalTables::GetTypeTable().GetTypeTable()[tyIdx]->GetNameStrIdx() == 0) { + GlobalTables::GetTypeTable().GetTypeTable()[tyIdx]->SetNameStrIdx(metaStrIdx); + } + metaClassType->SetTypeIndex(tyIdx); + module.AddClass(tyIdx); + std::string buf(CLASSINFO_PREFIX_STR); + (void)buf.append("Ljava_2Flang_2FString_3B"); + stringMetaClassSymbol = mirBuilder.GetOrCreateGlobalDecl(buf.c_str(), *metaClassType); + GStrIdx typeNameIdxForString = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("Ljava_2Flang_2FString_3B"); + if (FEManager::GetTypeManager().IsImportedType(typeNameIdxForString)) { + stringMetaClassSymbol->SetStorageClass(kScExtern); + } +} + +void FEJavaStringManager::ClearStringMetaClassSymbolExternFlag() { + stringMetaClassSymbol->SetStorageClass(kScGlobal); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/common/src/fe_manager.cpp b/src/hir2mpl/common/src/fe_manager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dec5dc361dbcbdc89049cedbbd6ed27228549345 --- /dev/null +++ b/src/hir2mpl/common/src/fe_manager.cpp @@ -0,0 +1,19 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_manager.h" + +namespace maple { +FEManager *FEManager::manager = nullptr; +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/common/src/fe_options.cpp b/src/hir2mpl/common/src/fe_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..848f0a03719f6121ef415d2a6a2241aa135645e2 --- /dev/null +++ b/src/hir2mpl/common/src/fe_options.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_options.h" +#include "fe_file_type.h" +namespace maple { +FEOptions FEOptions::options; +FEOptions::FEOptions() + : isGenMpltOnly(false), + isGenAsciiMplt(false), + outputPath(""), + outputName(""), + dumpLevel(kDumpLevelDisable), + isDumpTime(false), + nthreads(0), + dumpThreadTime(false) {} + +void FEOptions::AddInputClassFile(const std::string &fileName) { + FEFileType::FileType type = FEFileType::GetInstance().GetFileTypeByMagicNumber(fileName); + if (type == FEFileType::FileType::kClass) { + inputClassFiles.push_back(fileName); + } else { + WARN(kLncWarn, "invalid input class file %s...skipped", fileName.c_str()); + } +} + +void FEOptions::AddInputJarFile(const std::string &fileName) { + FEFileType::FileType type = FEFileType::GetInstance().GetFileTypeByMagicNumber(fileName); + if (type == FEFileType::FileType::kJar) { + inputJarFiles.push_back(fileName); + } else { + WARN(kLncWarn, "invalid input jar file %s...skipped", fileName.c_str()); + } +} + +void FEOptions::AddInputDexFile(const std::string &fileName) { + FEFileType::FileType type = FEFileType::GetInstance().GetFileTypeByMagicNumber(fileName); + if (type == FEFileType::FileType::kDex) { + inputDexFiles.push_back(fileName); + } else { + WARN(kLncWarn, "invalid input dex file %s...skipped", fileName.c_str()); + } +} + +void FEOptions::AddInputASTFile(const std::string &fileName) { + FEFileType::FileType type = FEFileType::GetInstance().GetFileTypeByMagicNumber(fileName); + if (type == FEFileType::FileType::kAST) { + inputASTFiles.push_back(fileName); + } else { + WARN(kLncWarn, "invalid input AST file %s...skipped", fileName.c_str()); + } +} + +void FEOptions::AddInputMASTFile(const std::string &fileName) { + FEFileType::FileType type = FEFileType::GetInstance().GetFileTypeByMagicNumber(fileName); + if (type == FEFileType::FileType::kMAST) { + inputMASTFiles.push_back(fileName); + } else { + WARN(kLncWarn, "invalid input MAST file %s...skipped", fileName.c_str()); + } +} +} // namespace maple diff --git a/src/hir2mpl/common/src/fe_struct_elem_info.cpp b/src/hir2mpl/common/src/fe_struct_elem_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a20a5d6b143b65398ab6d72a45c89a14b666bcad --- /dev/null +++ b/src/hir2mpl/common/src/fe_struct_elem_info.cpp @@ -0,0 +1,449 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_struct_elem_info.h" +#include "global_tables.h" +#include "mpl_logging.h" +#include "namemangler.h" +#include "feir_builder.h" +#include "feir_var_name.h" +#include "fe_utils_java.h" +#include "fe_utils.h" +#include "fe_manager.h" +#include "fe_options.h" + + +namespace maple { +// ---------- FEStructElemInfo ---------- +FEStructElemInfo::FEStructElemInfo(MapleAllocator &allocatorIn, const StructElemNameIdx &argStructElemNameIdx, + MIRSrcLang argSrcLang, bool argIsStatic) + : isStatic(argIsStatic), + isMethod(false), + isDefined(false), + isFromDex(false), + isPrepared(false), + srcLang(argSrcLang), + allocator(allocatorIn), + structElemNameIdx(argStructElemNameIdx), + actualContainer(allocator.GetMemPool()) { +} + +UniqueFEIRType FEStructElemInfo::GetActualContainerType() const { + // Invokable after prepared + return FEIRBuilder::CreateTypeByJavaName(actualContainer.c_str(), true); +} + +// ---------- FEStructFieldInfo ---------- +FEStructFieldInfo::FEStructFieldInfo(MapleAllocator &allocatorIn, const StructElemNameIdx &argStructElemNameIdx, + MIRSrcLang argSrcLang, bool argIsStatic) + : FEStructElemInfo(allocatorIn, argStructElemNameIdx, argSrcLang, argIsStatic), + fieldType(nullptr), + fieldNameIdx(0), + fieldID(0), + isVolatile(false) { + isMethod = false; + LoadFieldType(); +} + +void FEStructFieldInfo::PrepareImpl(MIRBuilder &mirBuilder, bool argIsStatic) { + if (isPrepared && argIsStatic == isStatic) { + return; + } + // Prepare + actualContainer = GetStructName(); + const std::string stdActualContainer = actualContainer.c_str(); + std::string rawName = stdActualContainer + namemangler::kNameSplitterStr + GetElemName(); + if (isStatic && + FEOptions::GetInstance().GetModeJavaStaticFieldName() != FEOptions::ModeJavaStaticFieldName::kNoType) { + rawName = rawName + namemangler::kNameSplitterStr + GetSignatureName(); + } + fieldNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(rawName); + MIRStructType *structType = FEManager::GetTypeManager().GetStructTypeFromName(stdActualContainer); + if (structType == nullptr) { + isDefined = false; + isPrepared = true; + return; + } + isDefined = SearchStructFieldJava(*structType, mirBuilder, argIsStatic); + if (isDefined) { + return; + } + WARN(kLncErr, "use undefined %s field %s", argIsStatic ? "static" : "", rawName.c_str()); + isPrepared = true; + isStatic = argIsStatic; + return; +} + +void FEStructFieldInfo::LoadFieldType() { + switch (srcLang) { + case kSrcLangJava: + LoadFieldTypeJava(); + break; + case kSrcLangC: + WARN(kLncWarn, "kSrcLangC LoadFieldType NYI"); + break; + default: + WARN(kLncWarn, "unsupported language"); + break; + } +} + +void FEStructFieldInfo::LoadFieldTypeJava() { + fieldType = allocator.GetMemPool()->New(PTY_unknown); + static_cast(fieldType)->LoadFromJavaTypeName(GetSignatureName(), true); +} + +void FEStructFieldInfo::PrepareStaticField(const MIRStructType &structType) { + std::string ownerStructName = structType.GetName(); + const std::string &fieldName = GetElemName(); + std::string fullName = ownerStructName + namemangler::kNameSplitterStr + fieldName; + if (FEOptions::GetInstance().GetModeJavaStaticFieldName() != FEOptions::ModeJavaStaticFieldName::kNoType) { + fullName += namemangler::kNameSplitterStr + GetSignatureName(); + } + fieldNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fullName); + isPrepared = true; + isStatic = true; +} + +void FEStructFieldInfo::PrepareNonStaticField(MIRBuilder &mirBuilder) { + FEIRTypeDefault feType(PTY_unknown); + feType.LoadFromJavaTypeName(GetSignatureName(), true); + MIRType *fieldMIRType = feType.GenerateMIRTypeAuto(srcLang); + uint32 idx = 0; + uint32 idx1 = 0; + MIRStructType *structType = FEManager::GetTypeManager().GetStructTypeFromName(GetStructName()); + mirBuilder.TraverseToNamedFieldWithType(*structType, structElemNameIdx.elem, fieldMIRType->GetTypeIndex(), idx1, idx); + fieldID = static_cast(idx); + isPrepared = true; + isStatic = false; +} + +bool FEStructFieldInfo::SearchStructFieldJava(MIRStructType &structType, MIRBuilder &mirBuilder, bool argIsStatic, + bool allowPrivate) { + if (structType.IsIncomplete()) { + return false; + } + GStrIdx nameIdx = structElemNameIdx.elem; + if (argIsStatic) { + // suppose anti-proguard is off in jbc. + // Turn on anti-proguard in jbc: -java-staticfield-name=smart && JBCClass2FEHelper::isStaticFieldProguard(false) + // Turn on anti-proguard in BC: -java-staticfield-name=smart + std::string fullName = structType.GetCompactMplTypeName() + namemangler::kNameSplitterStr + GetElemName(); + if (FEOptions::GetInstance().GetModeJavaStaticFieldName() != FEOptions::ModeJavaStaticFieldName::kNoType) { + fullName += namemangler::kNameSplitterStr + GetSignatureName(); + } + nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fullName); + } + actualContainer = structType.GetCompactMplTypeName(); + const FieldVector &fields = argIsStatic ? structType.GetStaticFields() : structType.GetFields(); + for (const FieldPair &fieldPair : fields) { + if (fieldPair.first != nameIdx) { + continue; + } + if (fieldPair.second.second.GetAttr(FLDATTR_private) && !allowPrivate) { + continue; + } + if (CompareFieldType(fieldPair)) { + if (argIsStatic) { + PrepareStaticField(structType); + } else { + PrepareNonStaticField(mirBuilder); + } + isVolatile = fieldPair.second.second.GetAttr(FLDATTR_volatile); + return true; + } + } + // search parent + bool found = false; + if (structType.GetKind() == kTypeClass) { + MIRClassType &classType = static_cast(structType); + // implemented + for (const TyIdx &tyIdx : classType.GetInterfaceImplemented()) { + found = found || SearchStructFieldJava(tyIdx, mirBuilder, argIsStatic, false); + } + // parent + found = found || SearchStructFieldJava(classType.GetParentTyIdx(), mirBuilder, argIsStatic, false); + } else if (structType.GetKind() == kTypeInterface) { + // parent + MIRInterfaceType &interfaceType = static_cast(structType); + for (const TyIdx &tyIdx : interfaceType.GetParentsTyIdx()) { + found = found || SearchStructFieldJava(tyIdx, mirBuilder, argIsStatic, false); + } + } else { + CHECK_FATAL(false, "not supported yet"); + } + return found; +} + +bool FEStructFieldInfo::SearchStructFieldJava(const TyIdx &tyIdx, MIRBuilder &mirBuilder, bool argIsStatic, + bool allowPrivate) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (type == nullptr) { + return false; + } + if (type->IsIncomplete()) { + return false; + } + if (type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + MIRStructType *structType = static_cast(type); + return SearchStructFieldJava(*structType, mirBuilder, argIsStatic, allowPrivate); + } else { + ERR(kLncErr, "parent type should be StructType"); + return false; + } +} + +bool FEStructFieldInfo::CompareFieldType(const FieldPair &fieldPair) const { + MIRType *fieldMIRType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + std::string typeName = fieldMIRType->GetCompactMplTypeName(); + if (GetSignatureName().compare(typeName) == 0) { + return true; + } else { + return false; + } +} + +// ---------- FEStructMethodInfo ---------- +FEStructMethodInfo::FEStructMethodInfo(MapleAllocator &allocatorIn, const StructElemNameIdx &argStructElemNameIdx, + MIRSrcLang argSrcLang, bool argIsStatic) + : FEStructElemInfo(allocatorIn, argStructElemNameIdx, argSrcLang, argIsStatic), + isReturnVoid(false), + isConstructor(false), + isJavaPolymorphicCall(false), + isJavaDynamicCall(false), + methodNameIdx(argStructElemNameIdx.full), + retType(nullptr), + ownerType(nullptr), + mirFunc(nullptr), + argTypes(allocator.Adapter()) { + isMethod = true; + LoadMethodType(); +} + +FEStructMethodInfo::~FEStructMethodInfo() { + mirFunc = nullptr; + retType = nullptr; + ownerType = nullptr; +} + +PUIdx FEStructMethodInfo::GetPuIdx() const { + CHECK_NULL_FATAL(mirFunc); + return mirFunc->GetPuidx(); +} + +void FEStructMethodInfo::PrepareImpl(MIRBuilder &mirBuilder, bool argIsStatic) { + if (isPrepared && argIsStatic == isStatic) { + return; + } + switch (srcLang) { + case kSrcLangJava: + PrepareImplJava(mirBuilder, argIsStatic); + break; + case kSrcLangC: + PrepareMethodC(); + return; + default: + CHECK_FATAL(false, "unsupported src lang"); + } + PrepareMethod(); +} + +void FEStructMethodInfo::PrepareImplJava(MIRBuilder &mirBuilder, bool argIsStatic) { + // Prepare + actualContainer = GetStructName(); + MIRStructType *structType = nullptr; + if (!actualContainer.empty() && actualContainer[0] == 'A') { + structType = FEManager::GetTypeManager().GetStructTypeFromName("Ljava_2Flang_2FObject_3B"); + } else { + structType = FEManager::GetTypeManager().GetStructTypeFromName(actualContainer.c_str()); + } + isStatic = argIsStatic; + isDefined = false; + if (structType != nullptr) { + isDefined = SearchStructMethodJava(*structType, mirBuilder, argIsStatic); + if (isDefined) { + return; + } + } else if (isJavaDynamicCall) { + methodNameIdx = structElemNameIdx.full; + isDefined = true; + PrepareMethod(); + return; + } + std::string methodName = GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx.full); + WARN(kLncWarn, "undefined %s method: %s", isStatic ? "static" : "", methodName.c_str()); +} + +void FEStructMethodInfo::LoadMethodType() { + switch (srcLang) { + case kSrcLangJava: + LoadMethodTypeJava(); + break; + case kSrcLangC: + break; + default: + WARN(kLncWarn, "unsupported language"); + break; + } +} + +void FEStructMethodInfo::LoadMethodTypeJava() { + std::string signatureJava = + namemangler::DecodeName(GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx.full)); + std::vector typeNames = FEUtilJava::SolveMethodSignature(signatureJava); + CHECK_FATAL(typeNames.size() > 0, "invalid method signature: %s", signatureJava.c_str()); + // constructor check + const std::string &funcName = GetElemName(); + isConstructor = (funcName.find("init_28") == 0); + // return type + retType = allocator.GetMemPool()->New(PTY_unknown); + if (typeNames[0].compare("V") == 0) { + isReturnVoid = true; + } + static_cast(retType)->LoadFromJavaTypeName(typeNames[0], false); + // argument types + argTypes.clear(); + for (size_t i = 1; i < typeNames.size(); i++) { + FEIRType *argType = allocator.GetMemPool()->New(PTY_unknown); + static_cast(argType)->LoadFromJavaTypeName(typeNames[i], false); + argTypes.push_back(argType); + } + // owner type + ownerType = allocator.GetMemPool()->New(PTY_unknown); + static_cast(ownerType)->LoadFromJavaTypeName(GetStructName(), true); +} + +void FEStructMethodInfo::PrepareMethodC() { + mirFunc = FEManager::GetTypeManager().GetMIRFunction(methodNameIdx, isStatic); + if (mirFunc == nullptr) { + MIRType *mirRetType = retType->GenerateMIRTypeAuto(srcLang); + std::vector argsTypeIdx; + for (const FEIRType *argType : argTypes) { + MIRType *mirArgType = argType->GenerateMIRTypeAuto(srcLang); + argsTypeIdx.push_back(mirArgType->GetTypeIndex()); + } + mirFunc = FEManager::GetTypeManager().CreateFunction(methodNameIdx, mirRetType->GetTypeIndex(), + argsTypeIdx, false, isStatic); + mirFunc->SetFuncAttrs(funcAttrs); + } + isPrepared = true; +} + +void FEStructMethodInfo::PrepareMethod() { + mirFunc = FEManager::GetTypeManager().GetMIRFunction(methodNameIdx, isStatic); + if (mirFunc == nullptr) { + MIRType *mirRetType = retType->GenerateMIRTypeAuto(srcLang); + // args type + std::vector> argVarList; + std::vector argsTypeIdx; + if (!isStatic) { + UniqueFEIRVar regVar = std::make_unique(FEUtils::GetThisIdx(), ownerType->Clone(), false); + argVarList.emplace_back(std::move(regVar)); + argsTypeIdx.emplace_back(ownerType->GenerateMIRType(srcLang, true)->GetTypeIndex()); + } + uint8 regNum = 1; + for (const FEIRType *argType : argTypes) { + UniqueFEIRVar regVar = FEIRBuilder::CreateVarReg(regNum, argType->Clone(), false); + ++regNum; + argVarList.emplace_back(std::move(regVar)); + MIRType *mirArgType = argType->GenerateMIRTypeAuto(srcLang); + argsTypeIdx.push_back(mirArgType->GetTypeIndex()); + } + mirFunc = FEManager::GetTypeManager().CreateFunction(methodNameIdx, mirRetType->GetTypeIndex(), argsTypeIdx, false, + isStatic); + // Update formals for external function, + // defined function will be update formals later in FEFunction::UpdateFormal + for (const std::unique_ptr &argVar : argVarList) { + MIRType *mirTy = argVar->GetType()->GenerateMIRTypeAuto(); + std::string name = argVar->GetName(*mirTy); + MIRSymbol *sym = FEManager::GetMIRBuilder().GetOrCreateDeclInFunc(name, *mirTy, *mirFunc); + sym->SetStorageClass(kScFormal); + mirFunc->AddArgument(sym); + } + } + isPrepared = true; +} + +bool FEStructMethodInfo::SearchStructMethodJava(MIRStructType &structType, MIRBuilder &mirBuilder, bool argIsStatic, + bool allowPrivate) { + if (structType.IsIncomplete()) { + return false; + } + actualContainer = structType.GetCompactMplTypeName(); + std::string fullName = structType.GetCompactMplTypeName() + namemangler::kNameSplitterStr + GetElemName() + + namemangler::kNameSplitterStr + GetSignatureName(); + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fullName); + for (const MethodPair &methodPair : structType.GetMethods()) { + if (methodPair.second.second.GetAttr(FUNCATTR_private) && !allowPrivate) { + continue; + } + if (methodPair.second.second.GetAttr(FUNCATTR_static) != argIsStatic) { + continue; + } + const MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(methodPair.first.Idx(), true); + CHECK_NULL_FATAL(sym); + if (sym->GetNameStrIdx() == nameIdx) { + isStatic = argIsStatic; + if (isStatic) { + methodNameIdx = nameIdx; + } + PrepareMethod(); + return true; + } + } + // search parent + return SearchStructMethodJavaInParent(structType, mirBuilder, argIsStatic); +} + +bool FEStructMethodInfo::SearchStructMethodJavaInParent(MIRStructType &structType, MIRBuilder &mirBuilder, + bool argIsStatic) { + bool found = false; + if (structType.GetKind() == kTypeClass) { + // parent + MIRClassType &classType = static_cast(structType); + found = SearchStructMethodJava(classType.GetParentTyIdx(), mirBuilder, argIsStatic, false); + // implemented + for (const TyIdx &tyIdx : classType.GetInterfaceImplemented()) { + found = found || SearchStructMethodJava(tyIdx, mirBuilder, argIsStatic, false); + } + } else if (structType.GetKind() == kTypeInterface) { + // parent + MIRInterfaceType &interfaceType = static_cast(structType); + for (const TyIdx &tyIdx : interfaceType.GetParentsTyIdx()) { + found = found || SearchStructMethodJava(tyIdx, mirBuilder, argIsStatic, false); + } + } else { + CHECK_FATAL(false, "not supported yet"); + } + return found; +} + +bool FEStructMethodInfo::SearchStructMethodJava(const TyIdx &tyIdx, MIRBuilder &mirBuilder, bool argIsStatic, + bool allowPrivate) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (type == nullptr) { + return false; + } + if (type->IsIncomplete()) { + return false; + } + if (type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + MIRStructType *structType = static_cast(type); + return SearchStructMethodJava(*structType, mirBuilder, argIsStatic, allowPrivate); + } else { + ERR(kLncErr, "parent type should be StructType"); + return false; + } +} +} // namespace maple diff --git a/src/hir2mpl/common/src/fe_timer_ns.cpp b/src/hir2mpl/common/src/fe_timer_ns.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6efff93a79b1ea5ef48cf0b21b4f758660d119eb --- /dev/null +++ b/src/hir2mpl/common/src/fe_timer_ns.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_timer_ns.h" +#include "types_def.h" + +namespace maple { +void FETimerNS::Start() { + (void)clock_gettime(CLOCK_REALTIME, &timeStart); +} + +void FETimerNS::Stop() { + (void)clock_gettime(CLOCK_REALTIME, &timeEnd); +} + +int64_t FETimerNS::GetTimeNS() const { + const int64 nsInS = 1000000000; + return nsInS * (timeEnd.tv_sec - timeStart.tv_sec) + (timeEnd.tv_nsec - timeStart.tv_nsec); +} +} // namespace maple diff --git a/src/hir2mpl/common/src/fe_type_hierarchy.cpp b/src/hir2mpl/common/src/fe_type_hierarchy.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c51ae341d0f19d7e8ecedeb2d838f412e0fd21d3 --- /dev/null +++ b/src/hir2mpl/common/src/fe_type_hierarchy.cpp @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_type_hierarchy.h" +#include "global_tables.h" +#include "fe_config_parallel.h" + +namespace maple { +FETypeHierarchy FETypeHierarchy::instance; + +void FETypeHierarchy::InitByGlobalTable() { + HIR2MPL_PARALLEL_FORBIDDEN(); + mapIdxChildParent.clear(); + for (const MIRType *type : GlobalTables::GetTypeTable().GetTypeTable()) { + if (type == nullptr) { + continue; + } + switch (type->GetKind()) { + case kTypeClass: { + const MIRClassType *typeClass = static_cast(type); + AddMIRType(*typeClass); + break; + } + case kTypeInterface: { + const MIRInterfaceType *typeInterface = static_cast(type); + AddMIRType(*typeInterface); + break; + } + default: + break; + } + } +} + +void FETypeHierarchy::AddMIRType(const MIRClassType &type) { + MIRType *parentType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(type.GetParentTyIdx()); + if (parentType != nullptr) { + CHECK_FATAL(parentType->IsStructType(), "parent must be struct type"); + AddParentChildRelation(parentType->GetNameStrIdx(), type.GetNameStrIdx()); + } + for (TyIdx tyIdx : type.GetInterfaceImplemented()) { + MIRType *implType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(implType->IsStructType(), "parent must be struct type"); + AddParentChildRelation(implType->GetNameStrIdx(), type.GetNameStrIdx()); + } +} + +void FETypeHierarchy::AddMIRType(const MIRInterfaceType &type) { + for (TyIdx tyIdx : type.GetParentsTyIdx()) { + MIRType *parentType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(parentType->IsStructType(), "parent must be struct type"); + AddParentChildRelation(parentType->GetNameStrIdx(), type.GetNameStrIdx()); + } +} + +bool FETypeHierarchy::IsParentOf(const GStrIdx &parentIdx, const GStrIdx &childIdx) { + if (parentIdx == childIdx) { + return true; + } + if (cache.find(std::make_pair(childIdx, parentIdx)) != cache.end()) { + return true; + } + auto it = mapIdxChildParent.find(childIdx); + if (it == mapIdxChildParent.end()) { + return false; + } + for (GStrIdx idx : it->second) { + if (IsParentOf(parentIdx, idx)) { + std::pair item = std::make_pair(childIdx, parentIdx); + if (cache.find(item) == cache.end()) { + CHECK_FATAL(cache.insert(item).second, "cache insert failed"); + } + return true; + } + } + return false; +} + +void FETypeHierarchy::AddParentChildRelation(const GStrIdx &parentIdx, const GStrIdx &childIdx) { + if (mapIdxChildParent[childIdx].find(parentIdx) == mapIdxChildParent[childIdx].end()) { + CHECK_FATAL(mapIdxChildParent[childIdx].insert(parentIdx).second, "mapIdxChildParent insert failed"); + } +} +} // namespace maple diff --git a/src/hir2mpl/common/src/fe_type_manager.cpp b/src/hir2mpl/common/src/fe_type_manager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5685715a32a04b8199183b4d9cbda9b2b70823a1 --- /dev/null +++ b/src/hir2mpl/common/src/fe_type_manager.cpp @@ -0,0 +1,804 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_type_manager.h" +#include +#include +#include "mir_parser.h" +#include "bin_mplt.h" +#include "global_tables.h" +#include "fe_timer.h" +#include "fe_config_parallel.h" +#include "feir_type_helper.h" +#include "fe_macros.h" +#include "types_def.h" +#include "fe_utils_ast.h" + +namespace maple { +const UniqueFEIRType FETypeManager::kPrimFEIRTypeUnknown = std::make_unique(PTY_unknown); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeU1 = std::make_unique(PTY_u1); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeI8 = std::make_unique(PTY_i8); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeU8 = std::make_unique(PTY_u8); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeI16 = std::make_unique(PTY_i16); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeU16 = std::make_unique(PTY_u16); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeI32 = std::make_unique(PTY_i32); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeU32 = std::make_unique(PTY_u32); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeI64 = std::make_unique(PTY_i64); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeU64 = std::make_unique(PTY_u64); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeF32 = std::make_unique(PTY_f32); +const UniqueFEIRType FETypeManager::kPrimFEIRTypeF64 = std::make_unique(PTY_f64); +const UniqueFEIRType FETypeManager::kFEIRTypeJavaObject = std::make_unique(PTY_ref); +const UniqueFEIRType FETypeManager::kFEIRTypeJavaClass = std::make_unique(PTY_ref); +const UniqueFEIRType FETypeManager::kFEIRTypeJavaString = std::make_unique(PTY_ref); + +FETypeManager::FETypeManager(MIRModule &moduleIn) + : module(moduleIn), + mp(FEUtils::NewMempool("mempool for FETypeManager", false /* isLcalPool */)), + allocator(mp), + builder(&module), + srcLang(kSrcLangJava), + funcMCCGetOrInsertLiteral(nullptr) { + static_cast(kFEIRTypeJavaObject.get())->LoadFromJavaTypeName("Ljava/lang/Object;", false); + static_cast(kFEIRTypeJavaClass.get())->LoadFromJavaTypeName("Ljava/lang/Class;", false); + static_cast(kFEIRTypeJavaString.get())->LoadFromJavaTypeName("Ljava/lang/String;", false); + sameNamePolicy.SetFlag(FETypeSameNamePolicy::kFlagUseLastest); +} + +FETypeManager::~FETypeManager() { + mp = nullptr; + funcMCCGetOrInsertLiteral = nullptr; + + funcMCCStaticFieldGetBool = nullptr; + funcMCCStaticFieldGetByte = nullptr; + funcMCCStaticFieldGetShort = nullptr; + funcMCCStaticFieldGetChar = nullptr; + funcMCCStaticFieldGetInt = nullptr; + funcMCCStaticFieldGetLong = nullptr; + funcMCCStaticFieldGetFloat = nullptr; + funcMCCStaticFieldGetDouble = nullptr; + funcMCCStaticFieldGetObject = nullptr; + + funcMCCStaticFieldSetBool = nullptr; + funcMCCStaticFieldSetByte = nullptr; + funcMCCStaticFieldSetShort = nullptr; + funcMCCStaticFieldSetChar = nullptr; + funcMCCStaticFieldSetInt = nullptr; + funcMCCStaticFieldSetLong = nullptr; + funcMCCStaticFieldSetFloat = nullptr; + funcMCCStaticFieldSetDouble = nullptr; + funcMCCStaticFieldSetObject = nullptr; +} + +void FETypeManager::ReleaseMemPool() { + FEUtils::DeleteMempoolPtr(mp); +} + +bool FETypeManager::LoadMplts(const std::list &mpltNames, FETypeFlag flag, const std::string &phaseName) { + FETimer timer; + timer.StartAndDump(phaseName); + bool success = true; + for (const std::string &fileName : mpltNames) { + success = success && LoadMplt(fileName, flag); + } + timer.StopAndDumpTimeMS(phaseName); + return success; +} + +bool FETypeManager::LoadMplt(const std::string &mpltName, FETypeFlag flag) { + BinaryMplt binMplt(module); + if (!binMplt.Import(mpltName)) { + // not a binary mplt + std::ifstream file(mpltName); + if (!file.is_open()) { + ERR(kLncErr, "unable to open mplt file %s", mpltName.c_str()); + return false; + } + MIRParser parser(module); + if (!parser.ParseMPLTStandalone(file, mpltName)) { + ERR(kLncErr, "Failed to parser mplt file %s\n%s", mpltName.c_str(), parser.GetError().c_str()); + file.close(); + return false; + } + file.close(); + } + UpdateStructNameTypeMapFromTypeTable(mpltName, flag); + UpdateNameFuncMapFromTypeTable(); + return true; +} + +void FETypeManager::UpdateStructNameTypeMapFromTypeTable(const std::string &mpltName, FETypeFlag flag) { + bool sameNameUseLastest = sameNamePolicy.IsUseLastest(); + GStrIdx mpltNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(mpltName); + for (MIRType *type : GlobalTables::GetTypeTable().GetTypeTable()) { + if ((type == nullptr) || !IsStructType(*type)) { + continue; + } + MIRStructType *structType = static_cast(type); + auto it = structNameTypeMap.insert(std::make_pair(structType->GetNameStrIdx(), std::make_pair(structType, flag))); + if (!it.second) { + // type is existed + structSameNameSrcList.push_back(std::make_pair(structType->GetNameStrIdx(), + structNameSrcMap[structType->GetNameStrIdx()])); + structSameNameSrcList.push_back(std::make_pair(structType->GetNameStrIdx(), mpltNameIdx)); + if (sameNameUseLastest) { + structNameTypeMap[structType->GetNameStrIdx()] = std::make_pair(structType, flag); + structNameSrcMap[structType->GetNameStrIdx()] = mpltNameIdx; + } + } else { + // type is not existed + structNameSrcMap[structType->GetNameStrIdx()] = mpltNameIdx; + } + } +} + +void FETypeManager::SetMirImportedTypes(FETypeFlag flag) { + for (auto &item : structNameTypeMap) { + MIRStructType *type = item.second.first; + if ((type != nullptr) && FETypeManager::IsStructType(*type)) { + type->SetIsImported(true); + item.second.second = flag; + } + } +} + +void FETypeManager::UpdateNameFuncMapFromTypeTable() { + for (uint32 i = 1; i < GlobalTables::GetGsymTable().GetSymbolTableSize(); i++) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + CHECK_FATAL(symbol, "Symbol is null"); + if (symbol->GetSKind() == kStFunc) { + CHECK_FATAL(symbol->GetFunction(), "Function in symbol is null"); + MIRFunction *func = symbol->GetFunction(); + if (func->GetAttr(FUNCATTR_static)) { + mpltNameStaticFuncMap[symbol->GetNameStrIdx()] = symbol->GetFunction(); + } else { + mpltNameFuncMap[symbol->GetNameStrIdx()] = symbol->GetFunction(); + } + } + } +} + +void FETypeManager::CheckSameNamePolicy() const { + std::unordered_map, GStrIdxHash> result; + for (const std::pair &item : structSameNameSrcList) { + result[item.first].push_back(item.second); + } + if (result.size() > 0) { + WARN(kLncWarn, "========== Structs list with the same name =========="); + } + for (const std::pair> &item : result) { + std::string typeName = GlobalTables::GetStrTable().GetStringFromStrIdx(item.first); + WARN(kLncWarn, "Type: %s", typeName.c_str()); + for (const GStrIdx &mpltNameIdx : item.second) { + std::string mpltName = GlobalTables::GetStrTable().GetStringFromStrIdx(mpltNameIdx); + WARN(kLncWarn, " Defined in %s", mpltName.c_str()); + } + } + if (result.size() > 0 && sameNamePolicy.IsFatal()) { + FATAL(kLncFatal, "Structs with the same name exsited. Exit."); + } +} + +MIRStructType *FETypeManager::CreateStructType(const std::string &name) { + MIRStructType type(kTypeStruct); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + TyIdx tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&type); + module.GetTypeNameTab()->SetGStrIdxToTyIdx(strIdx, tyIdx); + if (GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx.GetIdx())->GetNameStrIdx() == 0) { + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx.GetIdx())->SetNameStrIdx(strIdx); + } + auto *structType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx.GetIdx())); + structNameTypeMap[strIdx] = std::make_pair(structType, FETypeFlag::kSrcInput); + return structType; +} + +MIRStructType *FETypeManager::GetOrCreateStructType(const std::string &name) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + const std::unordered_map::iterator &it = structNameTypeMap.find(nameIdx); + if (it != structNameTypeMap.end()) { + return it->second.first; + } + MIRStructType *structType = CreateStructType(name); + module.PushbackTypeDefOrder(nameIdx); + CHECK_NULL_FATAL(structType); + return structType; +} + +MIRTypeByName *FETypeManager::CreateTypeByNameType(const std::string &name) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + MIRTypeByName nameType(strIdx); + TyIdx tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&nameType); + MIRTypeByName *type = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx.GetIdx())); + nameTypeMap[strIdx] = type; + return type; +} + +MIRTypeByName *FETypeManager::GetOrCreateTypeByNameType(const std::string &name) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + const auto &it = nameTypeMap.find(nameIdx); + if (it != nameTypeMap.cend()) { + return it->second; + } + MIRTypeByName *nameType = CreateTypeByNameType(name); + return nameType; +} + +MIRTypeByName *FETypeManager::GetTypeByNameType(const std::string &name) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + return GetTypeByNameType(nameIdx); +} + +MIRTypeByName *FETypeManager::GetTypeByNameType(const GStrIdx &nameIdx) { + const auto &it = nameTypeMap.find(nameIdx); + if (it != nameTypeMap.cend()) { + return it->second; + } + return nullptr; +} + +MIRTypeByName *FETypeManager::CreateTypedef(const std::string &name, const MIRType &type) { + MIRTypeByName *typdefType = GetOrCreateTypeByNameType(name); + if (!module.GetScope()->GetTypeAlias()->Exist(typdefType->GetNameStrIdx())) { + module.GetScope()->SetTypeAliasMap(typdefType->GetNameStrIdx(), type.GetTypeIndex()); + } + return typdefType; +} + +MIREnum *FETypeManager::CreateEnum(const std::string &name, PrimType primType) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + MIREnum *enumType = new MIREnum(primType, strIdx); + enumNameMap[strIdx] = GlobalTables::GetEnumTable().enumTable.size(); + GlobalTables::GetEnumTable().enumTable.push_back(enumType); + return enumType; +} + +MIREnum *FETypeManager::GetOrCreateEnum(const std::string &name, PrimType primType) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + const auto &it = enumNameMap.find(nameIdx); + if (it != enumNameMap.cend()) { + CHECK_FATAL(it->second < GlobalTables::GetEnumTable().enumTable.size(), "enumTable out of bounds"); + MIREnum *enumType = GlobalTables::GetEnumTable().enumTable[it->second]; + return enumType; + } + MIREnum *enumType = CreateEnum(name, primType); + return enumType; +} + +size_t FETypeManager::GetEnumIdx(const std::string &name) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + const auto &it = enumNameMap.find(nameIdx); + if (it != enumNameMap.cend()) { + return it->second; + } + CHECK_FATAL(false, "The enum was not found, %s", name.c_str()); + return 0; +} + +/* + * create MIRStructType for complex number, e.g. + * type $Complex|F + */ +MIRType *FETypeManager::GetOrCreateComplexStructType(const MIRType &elemType) { + const std::string typeName = "Complex|" + FEUtilAST::Type2Label(elemType.GetPrimType()); + MIRStructType *structType = GetOrCreateStructType(typeName); + if (structType->GetFields().size() == 0) { + GStrIdx realStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("real"); + GStrIdx imagStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("imag"); + FieldAttrs fieldAttrs; + TyIdxFieldAttrPair pair(elemType.GetTypeIndex(), fieldAttrs); + FieldPair realPair(realStrIdx, pair); + FieldPair imagePair(imagStrIdx, pair); + structType->GetFields().push_back(realPair); + structType->GetFields().push_back(imagePair); + } + return structType; +} + +MIRStructType *FETypeManager::GetClassOrInterfaceType(const GStrIdx &nameIdx) const { + auto it = structNameTypeMap.find(nameIdx); + if (it == structNameTypeMap.end()) { + return nullptr; + } else { + return it->second.first; + } +} + +FETypeFlag FETypeManager::GetClassOrInterfaceTypeFlag(const GStrIdx &nameIdx) const { + auto it = structNameTypeMap.find(nameIdx); + if (it == structNameTypeMap.end()) { + return FETypeFlag::kDefault; + } else { + return it->second.second; + } +} + +MIRStructType *FETypeManager::CreateClassOrInterfaceType(const GStrIdx &nameIdx, bool isInterface, + FETypeFlag typeFlag) { + MIRType *type = nullptr; + std::string name = GlobalTables::GetStrTable().GetStringFromStrIdx(nameIdx); + if (isInterface) { + type = GlobalTables::GetTypeTable().GetOrCreateInterfaceType(name.c_str(), module); + } else { + type = GlobalTables::GetTypeTable().GetOrCreateClassType(name.c_str(), module); + } + CHECK_NULL_FATAL(type); + ASSERT(IsStructType(*type), "type is not struct type"); + MIRStructType *structType = static_cast(type); + structNameTypeMap[nameIdx] = std::make_pair(structType, typeFlag); + return structType; +} + +MIRStructType *FETypeManager::GetOrCreateClassOrInterfaceType(const GStrIdx &nameIdx, bool isInterface, + FETypeFlag typeFlag, bool &isCreate) { + // same name policy: mpltSys > dex > mpltApk > mplt + const std::unordered_map::iterator &it = structNameTypeMap.find(nameIdx); + if (it != structNameTypeMap.end()) { + uint16 flagExist = it->second.second & FETypeFlag::kSrcMask; + uint16 flagNew = typeFlag & FETypeFlag::kSrcMask; + // type is existed, use existed type + if (flagNew > flagExist) { + isCreate = false; + return it->second.first; + } + // type is existed when src input, replace with new type + if (typeFlag == FETypeFlag::kSrcInput && it->second.second != FETypeFlag::kSrcMpltApk) { + UpdateDupTypes(nameIdx, isInterface, it); + } + } + MIRStructType *structType = CreateClassOrInterfaceType(nameIdx, isInterface, typeFlag); + isCreate = true; + CHECK_NULL_FATAL(structType); + return structType; +} + +void FETypeManager::UpdateDupTypes(const GStrIdx &nameIdx, bool isInterface, + const std::unordered_map::iterator &importedTypeIt) { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "duplicated type %s from src", + GlobalTables::GetStrTable().GetStringFromStrIdx(nameIdx).c_str()); + MIRStructType *importedType = importedTypeIt->second.first; + MIRStructType *newType = nullptr; + // If locally defined type and imported type have the same name, but one is of interface and another one + // is of class type, we need to update the type + if ((importedType->IsMIRClassType() && isInterface) || + (importedType->IsMIRInterfaceType() && !isInterface)) { + if (isInterface) { + newType = new MIRInterfaceType(kTypeInterfaceIncomplete); + } else { + newType = new MIRClassType(kTypeClassIncomplete); + } + newType->SetTypeIndex(importedType->GetTypeIndex()); + importedType->SetTypeIndex(TyIdx(-1)); + newType->SetNameStrIdx(importedType->GetNameStrIdx()); + importedType->SetNameStrIdxItem(0); + CHECK_FATAL(newType->GetTypeIndex() < GlobalTables::GetTypeTable().GetTypeTable().size(), + "newType->_ty_idx >= GlobalTables::GetTypeTable().type_table_.size()"); + GlobalTables::GetTypeTable().GetTypeTable()[newType->GetTypeIndex()] = newType; + } else { + importedType->ClearContents(); + } + (void)structNameTypeMap.erase(importedTypeIt); + auto it = structNameSrcMap.find(nameIdx); + if (it != structNameSrcMap.end()) { + (void)structNameSrcMap.erase(it); + } +} + +MIRType *FETypeManager::GetOrCreateClassOrInterfacePtrType(const GStrIdx &nameIdx, bool isInterface, + FETypeFlag typeFlag, bool &isCreate) { + MIRStructType *structType = GetOrCreateClassOrInterfaceType(nameIdx, isInterface, typeFlag, isCreate); + MIRType *ptrType = GetOrCreatePointerType(*structType); + CHECK_NULL_FATAL(ptrType); + return ptrType; +} + +MIRStructType *FETypeManager::GetStructTypeFromName(const std::string &name) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + return GetStructTypeFromName(nameIdx); +} + +MIRStructType *FETypeManager::GetStructTypeFromName(const GStrIdx &nameIdx) { + auto it = structNameTypeMap.find(nameIdx); + if (it == structNameTypeMap.end()) { + return nullptr; + } else { + return it->second.first; + } +} + +uint32 FETypeManager::GetTypeIDFromMplClassName(const std::string &mplClassName, int32 dexFileHashCode) const { + const auto &itMap = classNameTypeIDMapAllDex.find(dexFileHashCode); + if (itMap != classNameTypeIDMapAllDex.end()) { + const auto &thisDexClassNameTypeIDMap = itMap->second; + const auto &it = thisDexClassNameTypeIDMap.find(mplClassName); + if (it != thisDexClassNameTypeIDMap.end()) { + return it->second; + } + return UINT32_MAX; + } + return UINT32_MAX; // some type id not in the dex file, give UINT32_MAX +} + +MIRType *FETypeManager::GetOrCreateTypeFromName(const std::string &name, FETypeFlag typeFlag, bool usePtr) { + CHECK_FATAL(!name.empty(), "type name is empty"); + PrimType pty = GetPrimType(name); + if (pty != kPtyInvalid) { + return GlobalTables::GetTypeTable().GetTypeTable().at(pty); + } + switch (name[0]) { + case 'L': { + bool isCreate = false; + MIRStructType *structType = GetOrCreateClassOrInterfaceType(name, false, typeFlag, isCreate); + if (usePtr) { + return GetOrCreatePointerType(*structType); + } else { + return structType; + } + } + case 'A': { + uint32 dim = 0; + bool isCreate = false; + const std::string &elemTypeName = GetBaseTypeName(name, dim, true); + MIRType *elemType = GetMIRTypeForPrim(elemTypeName); + if (elemType == nullptr) { + elemType = GetOrCreateClassOrInterfaceType(elemTypeName, false, typeFlag, isCreate); + CHECK_NULL_FATAL(elemType); + elemType = GetOrCreatePointerType(*elemType); + } + CHECK_FATAL(dim <= UINT8_MAX, "array dimension (%u) is out of range", dim); + MIRType *type = GetOrCreateArrayType(*elemType, static_cast(dim)); + ASSERT(type != nullptr, "Array type is null"); + if (usePtr) { + return GetOrCreatePointerType(*type); + } else { + return type; + } + } + default: + MIRType *type = GetMIRTypeForPrim(name[0]); + CHECK_FATAL(type, "Unresolved type %s", name.c_str()); + return type; + } +} + +MIRType *FETypeManager::GetOrCreatePointerType(const MIRType &type, PrimType ptyPtr) { + MIRType *retType = GlobalTables::GetTypeTable().GetOrCreatePointerType(type, ptyPtr); + CHECK_NULL_FATAL(retType); + return retType; +} + +MIRType *FETypeManager::GetOrCreateArrayType(MIRType &elemType, uint8 dim, PrimType ptyPtr) { + switch (srcLang) { + case kSrcLangJava: + case kSrcLangC: // Need to be optimized + return GetOrCreateJArrayType(elemType, dim, ptyPtr); + default: + CHECK_FATAL(false, "unsupported src lang: %d", srcLang); + return nullptr; + } +} + +MIRType *FETypeManager::GetOrCreateJArrayType(MIRType &elemType, uint8 dim, PrimType ptyPtr) { + MIRType *type = &elemType; + for (uint8 i = 0; i < dim; i++) { + type = GlobalTables::GetTypeTable().GetOrCreateJarrayType(*type); + CHECK_NULL_FATAL(type); + if (i != dim - 1) { + type = GetOrCreatePointerType(*type, ptyPtr); + CHECK_NULL_FATAL(type); + } + } + CHECK_NULL_FATAL(type); + return type; +} + +void FETypeManager::AddClassToModule(const MIRStructType &structType) { + module.AddClass(structType.GetTypeIndex()); +} + +FEStructElemInfo *FETypeManager::RegisterStructFieldInfo( + const StructElemNameIdx &argStructElemNameIdx, MIRSrcLang argSrcLang, bool isStatic) { + std::lock_guard lk(feTypeManagerMtx); + FEStructElemInfo *ptrInfo = GetStructElemInfo(argStructElemNameIdx.full); + if (ptrInfo != nullptr) { + return ptrInfo; + } + ptrInfo = allocator.GetMemPool()->New(allocator, argStructElemNameIdx, argSrcLang, isStatic); + CHECK_FATAL(mapStructElemInfo.insert(std::make_pair(argStructElemNameIdx.full, ptrInfo)).second, + "register struct elem info failed"); + return ptrInfo; +} + +FEStructElemInfo *FETypeManager::RegisterStructMethodInfo( + const StructElemNameIdx &argStructElemNameIdx, MIRSrcLang argSrcLang, bool isStatic) { + std::lock_guard lk(feTypeManagerMtx); + FEStructElemInfo *ptrInfo = GetStructElemInfo(argStructElemNameIdx.full); + if (ptrInfo != nullptr) { + return ptrInfo; + } + ptrInfo = allocator.GetMemPool()->New(allocator, argStructElemNameIdx, argSrcLang, isStatic); + CHECK_FATAL(mapStructElemInfo.insert(std::make_pair(argStructElemNameIdx.full, ptrInfo)).second, + "register struct elem info failed"); + return ptrInfo; +} + +FEStructElemInfo *FETypeManager::GetStructElemInfo(const GStrIdx &fullNameIdx) const { + auto it = mapStructElemInfo.find(fullNameIdx); + if (it == mapStructElemInfo.end()) { + return nullptr; + } + return it->second; +} + +MIRFunction *FETypeManager::GetMIRFunction(const std::string &classMethodName, bool isStatic) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(classMethodName); + return GetMIRFunction(nameIdx, isStatic); +} + +MIRFunction *FETypeManager::GetMIRFunction(const GStrIdx &nameIdx, bool isStatic) { + const std::unordered_map &funcMap = isStatic ? nameStaticFuncMap : nameFuncMap; + auto it = funcMap.find(nameIdx); + if (it != funcMap.end()) { + return it->second; + } + const std::unordered_map &mpltFuncMap = + isStatic ? mpltNameStaticFuncMap : mpltNameFuncMap; + auto it2 = mpltFuncMap.find(nameIdx); + if (it2 != mpltFuncMap.end()) { + return it2->second; + } + return nullptr; +} + +MIRFunction *FETypeManager::CreateFunction(const GStrIdx &nameIdx, const TyIdx &retTypeIdx, + const std::vector &argsTypeIdx, bool isVarg, bool isStatic) { + HIR2MPL_PARALLEL_FORBIDDEN(); + MIRFunction *mirFunc = GetMIRFunction(nameIdx, isStatic); + if (mirFunc != nullptr) { + return mirFunc; + } + MIRSymbol *funcSymbol = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + funcSymbol->SetNameStrIdx(nameIdx); + bool added = GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSymbol); + if (!added) { + funcSymbol = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(nameIdx); + mirFunc = funcSymbol->GetFunction(); + if (mirFunc != nullptr) { + return mirFunc; + } + } + funcSymbol->SetStorageClass(kScText); + funcSymbol->SetSKind(kStFunc); + MemPool *mpModule = module.GetMemPool(); + ASSERT(mpModule, "mem pool is nullptr"); + mirFunc = mpModule->New(&module, funcSymbol->GetStIdx()); + mirFunc->AllocSymTab(); + size_t idx = GlobalTables::GetFunctionTable().GetFuncTable().size(); + CHECK_FATAL(idx < UINT32_MAX, "PUIdx is out of range"); + mirFunc->SetPuidx(static_cast(idx)); + mirFunc->SetPuidxOrigin(mirFunc->GetPuidx()); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(mirFunc); + std::vector argsAttr; + for (uint32_t i = 0; i < argsTypeIdx.size(); i++) { + argsAttr.emplace_back(TypeAttrs()); + } + mirFunc->SetBaseClassFuncNames(nameIdx); + funcSymbol->SetTyIdx(GlobalTables::GetTypeTable().GetOrCreateFunctionType(retTypeIdx, argsTypeIdx, argsAttr, + isVarg)->GetTypeIndex()); + funcSymbol->SetFunction(mirFunc); + MIRFuncType *functype = static_cast(funcSymbol->GetType()); + mirFunc->SetMIRFuncType(functype); + mirFunc->SetReturnTyIdx(retTypeIdx); + if (isStatic) { + mirFunc->SetAttr(FUNCATTR_static); + CHECK_FATAL(nameStaticFuncMap.insert(std::make_pair(nameIdx, mirFunc)).second, "nameStaticFuncMap insert failed"); + } else { + CHECK_FATAL(nameFuncMap.insert(std::make_pair(nameIdx, mirFunc)).second, "nameFuncMap insert failed"); + } + return mirFunc; +} + +MIRFunction *FETypeManager::CreateFunction(const std::string &methodName, const std::string &returnTypeName, + const std::vector &argTypeNames, bool isVarg, bool isStatic) { + HIR2MPL_PARALLEL_FORBIDDEN(); + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(methodName); + MIRFunction *mirFunc = GetMIRFunction(nameIdx, isStatic); + if (mirFunc != nullptr) { + return mirFunc; + } + MIRType *returnType = GetOrCreateTypeFromName(returnTypeName, FETypeFlag::kSrcUnknown, true); + std::vector argsTypeIdx; + for (const std::string &typeName : argTypeNames) { + MIRType *argType = GetOrCreateTypeFromName(typeName, FETypeFlag::kSrcUnknown, true); + argsTypeIdx.push_back(argType->GetTypeIndex()); + } + return CreateFunction(nameIdx, returnType->GetTypeIndex(), argsTypeIdx, isVarg, isStatic); +} + +const FEIRType *FETypeManager::GetOrCreateFEIRTypeByName(const std::string &typeName, const GStrIdx &typeNameIdx, + MIRSrcLang argSrcLang) { + const FEIRType *feirType = GetFEIRTypeByName(typeName); + if (feirType != nullptr) { + return feirType; + } + HIR2MPL_PARALLEL_FORBIDDEN(); + UniqueFEIRType uniType; + switch (argSrcLang) { + case kSrcLangJava: + uniType = FEIRTypeHelper::CreateTypeByJavaName(typeName, true, false); + break; + case kSrcLangC: + WARN(kLncWarn, "kSrcLangC GetOrCreateFEIRTypeByName NYI"); + break; + default: + CHECK_FATAL(false, "unsupported language"); + return nullptr; + } + feirType = uniType.get(); + nameFEIRTypeList.push_back(std::move(uniType)); + if (typeNameIdx == 0) { + nameFEIRTypeMap[GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName)] = feirType; + } else { + nameFEIRTypeMap[typeNameIdx] = feirType; + } + return feirType; +} + +const FEIRType *FETypeManager::GetOrCreateFEIRTypeByName(const GStrIdx &typeNameIdx, MIRSrcLang argSrcLang) { + const std::string &typeName = GlobalTables::GetStrTable().GetStringFromStrIdx(typeNameIdx); + return GetOrCreateFEIRTypeByName(typeName, typeNameIdx, argSrcLang); +} + +const FEIRType *FETypeManager::GetFEIRTypeByName(const std::string &typeName) const { + GStrIdx typeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + return GetFEIRTypeByName(typeNameIdx); +} + +const FEIRType *FETypeManager::GetFEIRTypeByName(const GStrIdx &typeNameIdx) const { + auto it = nameFEIRTypeMap.find(typeNameIdx); + if (it == nameFEIRTypeMap.end()) { + return nullptr; + } else { + return it->second; + } +} + +bool FETypeManager::IsAntiProguardFieldStruct(const GStrIdx &structNameIdx) { + return setAntiProguardFieldStructIdx.find(structNameIdx) != setAntiProguardFieldStructIdx.end(); +} + +bool FETypeManager::IsStructType(const MIRType &type) { + MIRTypeKind kind = type.GetKind(); + return kind == kTypeStruct || kind == kTypeStructIncomplete || + kind == kTypeClass || kind == kTypeClassIncomplete || + kind == kTypeInterface || kind == kTypeInterfaceIncomplete || + kind == kTypeUnion; +} + +PrimType FETypeManager::GetPrimType(const std::string &name) { +#define LOAD_ALGO_PRIMARY_TYPE +#define PRIMTYPE(P) \ + if (name.compare(#P) == 0) { \ + return PTY_##P; \ + } +#include "prim_types.def" +#undef PRIMTYPE + return kPtyInvalid; +} + +MIRType *FETypeManager::GetMIRTypeForPrim(char c) { + switch (c) { + case 'B': + return GlobalTables::GetTypeTable().GetInt8(); + case 'C': + return GlobalTables::GetTypeTable().GetUInt16(); + case 'S': + return GlobalTables::GetTypeTable().GetInt16(); + case 'Z': + return GlobalTables::GetTypeTable().GetUInt1(); + case 'I': + return GlobalTables::GetTypeTable().GetInt32(); + case 'J': + return GlobalTables::GetTypeTable().GetInt64(); + case 'F': + return GlobalTables::GetTypeTable().GetFloat(); + case 'D': + return GlobalTables::GetTypeTable().GetDouble(); + case 'V': + return GlobalTables::GetTypeTable().GetVoid(); + case 'R': + return GlobalTables::GetTypeTable().GetRef(); + default: + return nullptr; + } +} + +std::string FETypeManager::GetBaseTypeName(const std::string &name, uint32 &dim, bool inMpl) { + dim = 0; + char prefix = inMpl ? 'A' : '['; + while (name[dim] == prefix) { + dim++; + } + return name.substr(dim); +} + +void FETypeManager::SetComplete(MIRStructType &structType) { + switch (structType.GetKind()) { + case kTypeClassIncomplete: + structType.SetMIRTypeKind(kTypeClass); + break; + case kTypeInterfaceIncomplete: + structType.SetMIRTypeKind(kTypeInterface); + break; + case kTypeStructIncomplete: + structType.SetMIRTypeKind(kTypeStruct); + break; + default: + break; + } +} + +std::string FETypeManager::TypeAttrsToString(const TypeAttrs &attrs) { + std::stringstream ss; +#define TYPE_ATTR +#define ATTR(A) \ + if (attrs.GetAttr(ATTR_##A)) \ + ss << " " << #A; +#include "all_attributes.def" +#undef ATTR +#undef TYPE_ATTR + ss << " "; + return ss.str(); +} + +void FETypeManager::MarkExternStructType() { + for (auto elem : structNameTypeMap) { + if (elem.second.second != FETypeFlag::kSrcInput && + elem.second.second != FETypeFlag::kSrcMplt && + elem.second.second != FETypeFlag::kSrcMpltSys && + elem.second.second != FETypeFlag::kSrcMpltApk) { + module.AddExternStructType(elem.second.first); + } + } +} + +void FETypeManager::InitMCCFunctions() { + if (srcLang == kSrcLangJava) { + InitFuncMCCGetOrInsertLiteral(); + } +} + +void FETypeManager::InitFuncMCCGetOrInsertLiteral() { + std::string funcName = "MCC_GetOrInsertLiteral"; + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + MIRType *typeString = kFEIRTypeJavaString->GenerateMIRTypeAuto(kSrcLangJava); + std::vector argsType; + funcMCCGetOrInsertLiteral = CreateFunction(nameIdx, typeString->GetTypeIndex(), argsType, false, false); + funcMCCGetOrInsertLiteral->SetAttr(FUNCATTR_pure); + funcMCCGetOrInsertLiteral->SetAttr(FUNCATTR_nosideeffect); + funcMCCGetOrInsertLiteral->SetAttr(FUNCATTR_noprivate_defeffect); + nameMCCFuncMap[nameIdx] = funcMCCGetOrInsertLiteral; +} + +MIRFunction *FETypeManager::GetMCCFunction(const std::string &funcName) const { + GStrIdx funcNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + return GetMCCFunction(funcNameIdx); +} + +MIRFunction *FETypeManager::GetMCCFunction(const GStrIdx &funcNameIdx) const { + auto it = nameMCCFuncMap.find(funcNameIdx); + CHECK_FATAL(it != nameMCCFuncMap.cend(), "The specified MCCFunc was not found"); + return it->second; +} +} // namespace maple diff --git a/src/hir2mpl/common/src/fe_utils.cpp b/src/hir2mpl/common/src/fe_utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..abd13345eef2c16258924a45f205635407813b62 --- /dev/null +++ b/src/hir2mpl/common/src/fe_utils.cpp @@ -0,0 +1,619 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_utils.h" +#include +#include "mpl_logging.h" +#include "mir_type.h" +#include "mir_builder.h" +#include "fe_manager.h" +namespace maple { +// ---------- FEUtils ---------- +const std::string FEUtils::kBoolean = "Z"; +const std::string FEUtils::kByte = "B"; +const std::string FEUtils::kShort = "S"; +const std::string FEUtils::kChar = "C"; +const std::string FEUtils::kInt = "I"; +const std::string FEUtils::kLong = "J"; +const std::string FEUtils::kFloat = "F"; +const std::string FEUtils::kDouble = "D"; +const std::string FEUtils::kVoid = "V"; +const std::string FEUtils::kThis = "_this"; +const std::string FEUtils::kMCCStaticFieldGetBool = "MCC_StaticFieldGetBool"; +const std::string FEUtils::kMCCStaticFieldGetByte = "MCC_StaticFieldGetByte"; +const std::string FEUtils::kMCCStaticFieldGetShort = "MCC_StaticFieldGetShort"; +const std::string FEUtils::kMCCStaticFieldGetChar = "MCC_StaticFieldGetChar"; +const std::string FEUtils::kMCCStaticFieldGetInt = "MCC_StaticFieldGetInt"; +const std::string FEUtils::kMCCStaticFieldGetLong = "MCC_StaticFieldGetLong"; +const std::string FEUtils::kMCCStaticFieldGetFloat = "MCC_StaticFieldGetFloat"; +const std::string FEUtils::kMCCStaticFieldGetDouble = "MCC_StaticFieldGetDouble"; +const std::string FEUtils::kMCCStaticFieldGetObject = "MCC_StaticFieldGetObject"; + +const std::string FEUtils::kMCCStaticFieldSetBool = "MCC_StaticFieldSetBool"; +const std::string FEUtils::kMCCStaticFieldSetByte = "MCC_StaticFieldSetByte"; +const std::string FEUtils::kMCCStaticFieldSetShort = "MCC_StaticFieldSetShort"; +const std::string FEUtils::kMCCStaticFieldSetChar = "MCC_StaticFieldSetChar"; +const std::string FEUtils::kMCCStaticFieldSetInt = "MCC_StaticFieldSetInt"; +const std::string FEUtils::kMCCStaticFieldSetLong = "MCC_StaticFieldSetLong"; +const std::string FEUtils::kMCCStaticFieldSetFloat = "MCC_StaticFieldSetFloat"; +const std::string FEUtils::kMCCStaticFieldSetDouble = "MCC_StaticFieldSetDouble"; +const std::string FEUtils::kMCCStaticFieldSetObject = "MCC_StaticFieldSetObject"; + +const std::string FEUtils::kCondGoToStmtLabelNamePrefix = "shortCircuit_"; + +std::vector FEUtils::Split(const std::string &str, char delim) { + std::vector ans; + std::stringstream ss; + ss.str(str); + std::string item; + while (std::getline(ss, item, delim)) { + ans.push_back(item); + } + return ans; +} + +uint8 FEUtils::GetWidth(PrimType primType) { + switch (primType) { + case PTY_u1: + return 1; + case PTY_i8: + case PTY_u8: + return 8; + case PTY_i16: + case PTY_u16: + return 16; + case PTY_i32: + case PTY_u32: + case PTY_f32: + return 32; + case PTY_i64: + case PTY_u64: + case PTY_f64: + return 64; + default: + CHECK_FATAL(false, "unsupported type %d", primType); + return 0; + } +} + +bool FEUtils::IsInteger(PrimType primType) { + return (primType == PTY_i8) || (primType == PTY_u8) || + (primType == PTY_i16) || (primType == PTY_u16) || + (primType == PTY_i32) || (primType == PTY_u32) || + (primType == PTY_i64) || (primType == PTY_u64); +} + +bool FEUtils::IsSignedInteger(PrimType primType) { + return (primType == PTY_i8) || (primType == PTY_i16) || (primType == PTY_i32) || (primType == PTY_i64); +} + +bool FEUtils::IsUnsignedInteger(PrimType primType) { + return (primType == PTY_u8) || (primType == PTY_u16) || (primType == PTY_u32) || (primType == PTY_u64); +} + +PrimType FEUtils::MergePrimType(PrimType primType1, PrimType primType2) { + if (primType1 == primType2) { + return primType1; + } + // merge signed integer + CHECK_FATAL(!LogicXOR(IsSignedInteger(primType1), IsSignedInteger(primType2)), + "can not merge type %s and %s", GetPrimTypeName(primType1), GetPrimTypeName(primType2)); + if (IsSignedInteger(primType1)) { + return GetPrimTypeSize(primType1) >= GetPrimTypeSize(primType2) ? primType1 : primType2; + } + + // merge unsigned integer + CHECK_FATAL(!LogicXOR(IsUnsignedInteger(primType1), IsUnsignedInteger(primType2)), + "can not merge type %s and %s", GetPrimTypeName(primType1), GetPrimTypeName(primType2)); + if (IsUnsignedInteger(primType1)) { + if (GetPrimTypeSize(primType1) == GetPrimTypeSize(primType2) && GetPrimTypeSize(primType1) == 1) { + return PTY_u8; + } else { + return GetPrimTypeSize(primType1) >= GetPrimTypeSize(primType2) ? primType1 : primType2; + } + } + + // merge float + CHECK_FATAL(!LogicXOR(IsPrimitiveFloat(primType1), IsPrimitiveFloat(primType2)), + "can not merge type %s and %s", GetPrimTypeName(primType1), GetPrimTypeName(primType2)); + if (IsPrimitiveFloat(primType1)) { + return GetPrimTypeSize(primType1) >= GetPrimTypeSize(primType2) ? primType1 : primType2; + } + + CHECK_FATAL(false, "can not merge type %s and %s", GetPrimTypeName(primType1), GetPrimTypeName(primType2)); + return PTY_unknown; +} + +uint8 FEUtils::GetDim(const std::string &typeName) { + uint8 dim = 0; + for (size_t i = 0; i < typeName.length(); ++i) { + if (typeName.at(i) == 'A') { + dim++; + } else { + break; + } + } + return dim; +} + +std::string FEUtils::GetBaseTypeName(const std::string &typeName) { + return typeName.substr(GetDim(typeName)); +} + +PrimType FEUtils::GetPrimType(const GStrIdx &typeNameIdx) { + if (typeNameIdx == GetBooleanIdx()) { + return PTY_u1; + } + if (typeNameIdx == GetByteIdx()) { + return PTY_i8; + } + if (typeNameIdx == GetShortIdx()) { + return PTY_i16; + } + if (typeNameIdx == GetCharIdx()) { + return PTY_u16; + } + if (typeNameIdx == GetIntIdx()) { + return PTY_i32; + } + if (typeNameIdx == GetLongIdx()) { + return PTY_i64; + } + if (typeNameIdx == GetFloatIdx()) { + return PTY_f32; + } + if (typeNameIdx == GetDoubleIdx()) { + return PTY_f64; + } + if (typeNameIdx == GetVoidIdx()) { + return PTY_void; + } + return PTY_ref; +} + +std::string FEUtils::GetSequentialName0(const std::string &prefix, uint32_t num) { + std::stringstream ss; + ss << prefix << num; + return ss.str(); +} + +uint32 FEUtils::GetSequentialNumber() { + static uint32 unnamedSymbolIdx = 1; + return unnamedSymbolIdx++; +} + +std::string FEUtils::GetFileNameHashStr(const std::string &fileName, uint32 seed) { + const char *name = fileName.c_str(); + uint32 hash = 0; + while (*name) { + uint8_t uName = *name++; + hash = hash * seed + uName; + } + return kRenameKeyWord + std::to_string(hash); +} + +std::string FEUtils::GetSequentialName(const std::string &prefix) { + std::string name = GetSequentialName0(prefix, GetSequentialNumber()); + return name; +} + +std::string FEUtils::CreateLabelName() { + static uint32 unnamedSymbolIdx = 1; + return "L." + std::to_string(unnamedSymbolIdx++); +} + +bool FEUtils::TraverseToNamedField(MIRStructType &structType, const GStrIdx &nameIdx, FieldID &fieldID, + bool isTopLevel) { + for (uint32 fieldIdx = 0; fieldIdx < structType.GetFieldsSize(); ++fieldIdx) { + ++fieldID; + TyIdx fieldTyIdx = structType.GetFieldsElemt(fieldIdx).second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + ASSERT(fieldType != nullptr, "fieldType is null"); + if (isTopLevel && structType.GetFieldsElemt(fieldIdx).first == nameIdx) { + return true; + } + // The fields of an embedded structure array are assigned fieldIDs + if (fieldType->GetKind() == kTypeArray) { + fieldType = fieldType->EmbeddedStructType(); + } + if (fieldType != nullptr && fieldType->IsStructType()) { + auto *subStructType = static_cast(fieldType); + if (TraverseToNamedField(*subStructType, nameIdx, fieldID, false)) { + return true; + } + } + } + return false; +} + +FieldID FEUtils::GetStructFieldID(MIRStructType *base, const std::string &fieldName) { + MIRStructType *type = base; + std::vector fieldNames = FEUtils::Split(fieldName, '$'); + std::reverse(fieldNames.begin(), fieldNames.end()); + FieldID fieldID = 0; + for (const auto &f: fieldNames) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(f); + CHECK_FATAL(type->IsStructType(), "Must be struct type!"); + if (TraverseToNamedField(*type, strIdx, fieldID)) { + type = static_cast(base->GetFieldType(fieldID)); + } + } + return fieldID; +} + +MIRType *FEUtils::GetStructFieldType(const MIRStructType *type, FieldID fieldID) { + FieldID tmpID = fieldID; + FieldPair fieldPair = type->TraverseToFieldRef(tmpID); + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + return fieldType; +} + +const MIRFuncType *FEUtils::GetFuncPtrType(const MIRType &type) { + const MIRType *mirType = &type; + if (mirType->GetKind() != kTypePointer) { + return nullptr; + } + mirType = static_cast(mirType)->GetPointedType(); + if (mirType->GetKind() != kTypePointer) { + return nullptr; + } + mirType = static_cast(mirType)->GetPointedType(); + if (mirType->GetKind() != kTypeFunction) { + return nullptr; + } + return static_cast(mirType); +} + +MIRConst *FEUtils::CreateImplicitConst(MIRType *type) { + switch (type->GetPrimType()) { + case PTY_u1: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_u1)); + } + case PTY_u8: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_u8)); + } + case PTY_u16: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_u16)); + } + case PTY_u32: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_u32)); + } + case PTY_u64: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_u64)); + } + case PTY_i8: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_i8)); + } + case PTY_i16: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_i16)); + } + case PTY_i32: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_i32)); + } + case PTY_i64: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_i64)); + } + case PTY_f32: { + return FEManager::GetModule().GetMemPool()->New( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_f32)); + } + case PTY_f64: { + return FEManager::GetModule().GetMemPool()->New( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); + } + case PTY_ptr: { + return GlobalTables::GetIntConstTable().GetOrCreateIntConst( + 0, *GlobalTables::GetTypeTable().GetPrimType(PTY_i64)); + } + case PTY_agg: { + auto *aggConst = FEManager::GetModule().GetMemPool()->New(FEManager::GetModule(), *type); + if (type->IsStructType()) { + auto structType = static_cast(type); + FieldID fieldID = 0; + for (auto &f:structType->GetFields()) { + fieldID++; + auto fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(f.second.first); + aggConst->AddItem(CreateImplicitConst(fieldType), fieldID); + } + } else if (type->GetKind() == kTypeArray) { + auto arrayType = static_cast(type); + MIRConst *elementConst; + if (arrayType->GetDim() > 1) { + uint32 subSizeArray[arrayType->GetDim()]; + for (int dim = 1; dim < arrayType->GetDim(); ++dim) { + subSizeArray[dim - 1] = arrayType->GetSizeArrayItem(dim); + } + auto subArrayType = GlobalTables::GetTypeTable().GetOrCreateArrayType( + *arrayType->GetElemType(), static_cast(arrayType->GetDim() - 1), subSizeArray); + elementConst = CreateImplicitConst(subArrayType); + } else { + elementConst = CreateImplicitConst(arrayType->GetElemType()); + } + for (uint32 i = 0; i < arrayType->GetSizeArrayItem(0); ++i) { + aggConst->AddItem(elementConst, 0); + } + } + return aggConst; + } + default: { + CHECK_FATAL(false, "Unsupported Primitive type: %d", type->GetPrimType()); + } + } +} + +PrimType FEUtils::GetVectorElementPrimType(PrimType vectorPrimType) { + switch (vectorPrimType) { + case PTY_v2i64: + return PTY_i64; + case PTY_v4i32: + case PTY_v2i32: + return PTY_i32; + case PTY_v8i16: + case PTY_v4i16: + return PTY_i16; + case PTY_v16i8: + case PTY_v8i8: + return PTY_i8; + case PTY_v2u64: + return PTY_u64; + case PTY_v4u32: + case PTY_v2u32: + return PTY_u32; + case PTY_v8u16: + case PTY_v4u16: + return PTY_u16; + case PTY_v16u8: + case PTY_v8u8: + return PTY_u8; + case PTY_v2f64: + return PTY_f64; + case PTY_v4f32: + case PTY_v2f32: + return PTY_f32; + default: + return PTY_unknown; + } +} + +bool FEUtils::EndsWith(const std::string &value, const std::string &ending) { + if (ending.size() > value.size()) { + return false; + } + return std::equal(ending.rbegin(), ending.rend(), value.rbegin()); +} + +// in the search, curfieldid is being decremented until it reaches 1 +MIRConst *FEUtils::TraverseToMIRConst(MIRAggConst *aggConst, const MIRStructType &structType, FieldID &fieldID) { + if (aggConst == nullptr || aggConst->GetConstVec().size() == 0) { + return nullptr; + } + uint32 fieldIdx = 0; + FieldPair curPair = structType.GetFields()[0]; + MIRConst *curConst = aggConst->GetConstVec()[0]; + while (fieldID > 1) { + --fieldID; + MIRType *curFieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curPair.second.first); + MIRStructType *subStructTy = curFieldType->EmbeddedStructType(); + if (subStructTy != nullptr && curConst->GetKind() == kConstAggConst) { + curConst = TraverseToMIRConst(static_cast(curConst), *subStructTy, fieldID); + if (fieldID == 1 && curConst != nullptr) { + return curConst; + } + } + ++fieldIdx; + if (fieldIdx == structType.GetFields().size()) { + return nullptr; + } + if (structType.GetKind() == kTypeUnion) { + curConst = aggConst->GetConstVec()[0]; // union only is one element + } else { + curConst = aggConst->GetConstVec()[fieldIdx]; + } + curPair = structType.GetFields()[fieldIdx]; + } + return curConst; +} + +Loc FEUtils::GetSrcLocationForMIRSymbol(const MIRSymbol &symbol) { + return Loc(symbol.GetSrcPosition().FileNum(), symbol.GetSrcPosition().LineNum(), symbol.GetSrcPosition().Column()); +} + +void FEUtils::InitPrimTypeFuncNameIdxMap(std::map &primTypeFuncNameIdxMap) { + primTypeFuncNameIdxMap = { + { PTY_u1, GetMCCStaticFieldSetBoolIdx() }, + { PTY_i8, GetMCCStaticFieldSetByteIdx() }, + { PTY_i16, GetMCCStaticFieldSetShortIdx() }, + { PTY_u16, GetMCCStaticFieldSetCharIdx() }, + { PTY_i32, GetMCCStaticFieldSetIntIdx() }, + { PTY_i64, GetMCCStaticFieldSetLongIdx() }, + { PTY_f32, GetMCCStaticFieldSetFloatIdx() }, + { PTY_f64, GetMCCStaticFieldSetDoubleIdx() }, + { PTY_ref, GetMCCStaticFieldSetObjectIdx() }, + }; +} + +MIRAliasVars FEUtils::AddAlias(const GStrIdx &mplNameIdx, const MIRType *sourceType, const TypeAttrs &attrs, + bool isLocal) { + MIRAliasVars aliasVar; + aliasVar.mplStrIdx = mplNameIdx; + aliasVar.isLocal = isLocal; + aliasVar.attrs = attrs; + if (sourceType != nullptr) { + aliasVar.atk = kATKType; + aliasVar.index = sourceType->GetTypeIndex(); + } else { + CHECK_FATAL(false, "unknown source type of %s", + GlobalTables::GetStrTable().GetStringFromStrIdx(mplNameIdx).c_str()); + } + return aliasVar; +}; + +void FEUtils::AddAliasInMIRScope(MIRScope &scope, const std::string &srcVarName, const MIRSymbol &symbol, + const MIRType *sourceType) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(srcVarName); + MIRAliasVars aliasVar = FEUtils::AddAlias(symbol.GetNameStrIdx(), sourceType, symbol.GetAttrs(), symbol.IsLocal()); + scope.SetAliasVarMap(nameIdx, aliasVar); +}; + +SrcPosition FEUtils::CvtLoc2SrcPosition(const Loc &loc) { + SrcPosition srcPos; + srcPos.SetFileNum(static_cast(loc.fileIdx)); + srcPos.SetLineNum(loc.line); + srcPos.SetColumn(static_cast(loc.column)); + return srcPos; +} +// ---------- FELinkListNode ---------- +FELinkListNode::FELinkListNode() + : prev(nullptr), next(nullptr) {} + +FELinkListNode::~FELinkListNode() { + prev = nullptr; + next = nullptr; +} + +void FELinkListNode::InsertBefore(FELinkListNode *ins) { + InsertBefore(ins, this); +} + +void FELinkListNode::InsertAfter(FELinkListNode *ins) { + InsertAfter(ins, this); +} + +void FELinkListNode::InsertBefore(FELinkListNode *ins, FELinkListNode *pos) { + // pos_p -- ins -- pos + if (pos == nullptr || pos->prev == nullptr || ins == nullptr) { + CHECK_FATAL(false, "invalid input"); + } + FELinkListNode *posPrev = pos->prev; + posPrev->next = ins; + pos->prev = ins; + ins->prev = posPrev; + ins->next = pos; +} + +void FELinkListNode::InsertAfter(FELinkListNode *ins, FELinkListNode *pos) { + // pos -- ins -- pos_n + if (pos == nullptr || pos->next == nullptr || ins == nullptr) { + CHECK_FATAL(false, "invalid input"); + } + FELinkListNode *posNext = pos->next; + pos->next = ins; + posNext->prev = ins; + ins->prev = pos; + ins->next = posNext; +} + +void FELinkListNode::SpliceNodes(FELinkListNode *head, FELinkListNode *tail, FELinkListNode *newTail) { + FELinkListNode *stmt = head->GetNext(); + FELinkListNode *nextStmt = stmt; + do { + stmt = nextStmt; + nextStmt = stmt->GetNext(); + newTail->InsertBefore(stmt); + } while (nextStmt != nullptr && nextStmt != tail); +} + +uint32_t AstSwitchUtil::tempVarNo = 0; +const char *AstSwitchUtil::cleanLabel = "clean"; +const char *AstSwitchUtil::exitLabel = "exit"; +const char *AstSwitchUtil::blockLabel = "blklbl"; +const char *AstSwitchUtil::caseLabel = "case"; +const char *AstSwitchUtil::catchLabel = "catch"; +const char *AstSwitchUtil::endehLabel = "endeh"; + +std::string AstSwitchUtil::CreateEndOrExitLabelName() const { + std::string labelName = FEUtils::GetSequentialName0(blockLabel, tempVarNo); + ++tempVarNo; + return labelName; +} + +void AstSwitchUtil::MarkLabelUsed(const std::string &label) { + labelUsed[label] = true; +} + +void AstSwitchUtil::MarkLabelUnUsed(const std::string &label) { + labelUsed[label] = false; +} + +void AstSwitchUtil::PushNestedBreakLabels(const std::string &label) { + nestedBreakLabels.push(label); +} + +void AstSwitchUtil::PopNestedBreakLabels() { + nestedBreakLabels.pop(); +} + +void AstSwitchUtil::PushNestedCaseVectors(const std::pair &caseVec) { + nestedCaseVectors.push(caseVec); +} + +void AstSwitchUtil::PopNestedCaseVectors() { + nestedCaseVectors.pop(); +} + +bool AstSwitchUtil::CheckLabelUsed(const std::string &label) { + return labelUsed[label]; +} + +const std::pair &AstSwitchUtil::GetTopOfNestedCaseVectors() const { + return nestedCaseVectors.top(); +} + +const std::string &AstSwitchUtil::GetTopOfBreakLabels() const { + return nestedBreakLabels.top(); +} + +void AstLoopUtil::PushBreak(std::string label) { + breakLabels.push(std::make_pair(label, false)); +} + +std::string AstLoopUtil::GetCurrentBreak() { + breakLabels.top().second = true; + return breakLabels.top().first; +} + +bool AstLoopUtil::IsBreakLabelsEmpty() const { + return breakLabels.empty(); +} + +void AstLoopUtil::PopCurrentBreak() { + breakLabels.pop(); +} + +void AstLoopUtil::PushContinue(std::string label) { + continueLabels.push(std::make_pair(label, false)); +} + +std::string AstLoopUtil::GetCurrentContinue() { + continueLabels.top().second = true; + return continueLabels.top().first; +} + +bool AstLoopUtil::IsContinueLabelsEmpty() const { + return continueLabels.empty(); +} + +void AstLoopUtil::PopCurrentContinue() { + continueLabels.pop(); +} +} // namespace maple diff --git a/src/hir2mpl/common/src/fe_utils_ast.cpp b/src/hir2mpl/common/src/fe_utils_ast.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c10ca129c5da7473f6b1b288e1ec002aa43f6a9b --- /dev/null +++ b/src/hir2mpl/common/src/fe_utils_ast.cpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_utils_ast.h" +#include +#include "mpl_logging.h" + +namespace maple { +PrimType FEUtilAST::GetTypeFromASTTypeName(const std::string &typeName) { + const static std::map mapASTTypeNameToType = { + {"bool", PTY_u1}, + {"uint8", PTY_u8}, + {"uint16", PTY_u16}, + {"uint32", PTY_u32}, + {"uint64", PTY_u64}, + {"int8", PTY_i8}, + {"int16", PTY_i16}, + {"int32", PTY_i32}, + {"int64", PTY_i64}, + {"float", PTY_f32}, + {"double", PTY_f64}, + {"void", PTY_void} + }; + auto it = mapASTTypeNameToType.find(typeName); + CHECK_FATAL(it != mapASTTypeNameToType.end(), "Can not find typeName %s", typeName.c_str()); + return it->second; +} + +const std::string FEUtilAST::Type2Label(PrimType primType) { + switch (primType) { + case PTY_u1: + return "B"; + case PTY_i8: + return "A"; + case PTY_u8: + return "C"; + case PTY_i16: + return "S"; + case PTY_u16: + return "T"; + case PTY_i32: + return "I"; + case PTY_u32: + return "M"; + case PTY_i64: + return "O"; + case PTY_u64: + return "Q"; + case PTY_f32: + return "F"; + case PTY_f64: + return "D"; + case PTY_void: + return "V"; + default: + return "R"; + } +} +} // namespace maple diff --git a/src/hir2mpl/common/src/fe_utils_java.cpp b/src/hir2mpl/common/src/fe_utils_java.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5ac307d9701c84459fba7fb92187bffcb9838601 --- /dev/null +++ b/src/hir2mpl/common/src/fe_utils_java.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_utils_java.h" +#include "namemangler.h" + +namespace maple { +std::vector FEUtilJava::SolveMethodSignature(const std::string &signature) { + std::vector ans; + size_t pos1 = signature.find('('); + size_t pos2 = signature.find(')'); + if (pos1 == std::string::npos || pos2 == std::string::npos || pos1 > pos2) { + CHECK_FATAL(false, "invalid method signature %s", signature.c_str()); + } + std::string paramTypeNames = signature.substr(pos1 + 1, (pos2 - pos1) - 1); + std::string retTypeName = signature.substr(pos2 + 1); + ans.push_back(retTypeName); + while (paramTypeNames.length() > 0) { + std::string typeName = SolveParamNameInJavaFormat(paramTypeNames); + ans.push_back(typeName); + paramTypeNames = paramTypeNames.substr(typeName.length()); + } + return ans; +} + +std::string FEUtilJava::SolveParamNameInJavaFormat(const std::string &signature) { + if (signature.empty()) { + return ""; + } + char c = signature[0]; + switch (c) { + case '[': + return "[" + SolveParamNameInJavaFormat(signature.substr(1)); + case 'L': { + size_t pos = signature.find(';'); + CHECK_FATAL(pos != std::string::npos, "invalid type %s", signature.c_str()); + return signature.substr(0, pos + 1); + } + default: { + std::string ans = ""; + ans.push_back(c); + return ans; + } + } +} +} // namespace maple diff --git a/src/hir2mpl/common/src/feir_builder.cpp b/src/hir2mpl/common/src/feir_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..23a50a5a0c66ac22b425a5554f4b588913db5689 --- /dev/null +++ b/src/hir2mpl/common/src/feir_builder.cpp @@ -0,0 +1,770 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_builder.h" +#include "mpl_logging.h" +#include "global_tables.h" +#include "feir_var_reg.h" +#include "feir_var_name.h" +#include "fe_type_manager.h" +#include "feir_type_helper.h" + +namespace maple { +UniqueFEIRType FEIRBuilder::CreateType(PrimType basePty, const GStrIdx &baseNameIdx, uint32 dim) { + UniqueFEIRType type = std::make_unique(basePty, baseNameIdx, dim); + CHECK_NULL_FATAL(type); + return type; +} + +UniqueFEIRType FEIRBuilder::CreateArrayElemType(const UniqueFEIRType &arrayType) { + std::string typeName = arrayType->GetTypeName(); + ASSERT(typeName.length() > 1 && typeName.at(0) == 'A', "Invalid array type: %s", typeName.c_str()); + std::unique_ptr type = std::make_unique(); + type->LoadFromJavaTypeName(typeName.substr(1), true); + return type; +} + +UniqueFEIRType FEIRBuilder::CreateRefType(const GStrIdx &baseNameIdx, uint32 dim) { + return CreateType(PTY_ref, baseNameIdx, dim); +} + +UniqueFEIRType FEIRBuilder::CreateTypeByJavaName(const std::string &typeName, bool inMpl) { + UniqueFEIRType type = std::make_unique(PTY_ref); + CHECK_NULL_FATAL(type); + FEIRTypeDefault *ptrType = static_cast(type.get()); + ptrType->LoadFromJavaTypeName(typeName, inMpl); + return type; +} + +UniqueFEIRVar FEIRBuilder::CreateVarReg(uint32 regNum, PrimType primType, bool isGlobal) { + UniqueFEIRVar var = std::make_unique(regNum, primType); + CHECK_NULL_FATAL(var); + var->SetGlobal(isGlobal); + return var; +} + +UniqueFEIRVar FEIRBuilder::CreateVarReg(uint32 regNum, UniqueFEIRType type, bool isGlobal) { + UniqueFEIRVar var = std::make_unique(regNum, std::move(type)); + CHECK_NULL_FATAL(var); + var->SetGlobal(isGlobal); + return var; +} + +UniqueFEIRVar FEIRBuilder::CreateVarName(GStrIdx nameIdx, PrimType primType, bool isGlobal, bool withType) { + UniqueFEIRVar var = std::make_unique(nameIdx, withType); + CHECK_NULL_FATAL(var); + var->GetType()->SetPrimType(primType); + var->SetGlobal(isGlobal); + return var; +} + +UniqueFEIRVar FEIRBuilder::CreateVarName(const std::string &name, PrimType primType, bool isGlobal, + bool withType) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + return CreateVarName(nameIdx, primType, isGlobal, withType); +} + +UniqueFEIRVar FEIRBuilder::CreateVarNameForC(GStrIdx nameIdx, MIRType &mirType, bool isGlobal, bool withType) { + UniqueFEIRType type = std::make_unique(mirType); + UniqueFEIRVar var = std::make_unique(nameIdx, std::move(type), withType); + var->SetGlobal(isGlobal); + return var; +} + +UniqueFEIRVar FEIRBuilder::CreateVarNameForC(const std::string &name, MIRType &mirType, bool isGlobal, bool withType) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + return CreateVarNameForC(nameIdx, mirType, isGlobal, withType); +} + +UniqueFEIRVar FEIRBuilder::CreateVarNameForC(const std::string &name, UniqueFEIRType type, + bool isGlobal, bool withType) { + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + UniqueFEIRVar var = std::make_unique(nameIdx, std::move(type), withType); + var->SetGlobal(isGlobal); + return var; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprSizeOfType(UniqueFEIRType ty) { + UniqueFEIRExpr expr = std::make_unique(std::move(ty)); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprDRead(UniqueFEIRVar srcVar) { + UniqueFEIRExpr expr = std::make_unique(std::move(srcVar)); + CHECK_NULL_FATAL(expr); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprDReadAggField(UniqueFEIRVar srcVar, FieldID fieldID, UniqueFEIRType fieldType) { + CHECK_FATAL(srcVar != nullptr && srcVar->GetType()->GetPrimType() == PTY_agg, + "var type must be struct type, %u", srcVar->GetType()->GetPrimType()); + UniqueFEIRExpr expr = std::make_unique(std::move(srcVar)); + expr->SetFieldID(fieldID); + expr->SetFieldType(std::move(fieldType)); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprIRead(UniqueFEIRType returnType, UniqueFEIRType ptrType, + UniqueFEIRExpr expr, FieldID id /* optional parameters */) { + UniqueFEIRExpr feirExpr = std::make_unique(std::move(returnType), std::move(ptrType), + id, std::move(expr)); + return feirExpr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprAddrofLabel(const std::string &lbName, UniqueFEIRType exprTy) { + UniqueFEIRExpr expr = std::make_unique(lbName, std::move(exprTy)); + CHECK_NULL_FATAL(expr); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprAddrofVar(UniqueFEIRVar srcVar) { + UniqueFEIRExpr expr = std::make_unique(std::move(srcVar)); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprAddrofFunc(const std::string &addr) { + UniqueFEIRExpr expr = std::make_unique(addr); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprAddrofArray(UniqueFEIRType argTypeNativeArray, + UniqueFEIRExpr argExprArray, std::string argArrayName, + std::list &argExprIndexs) { + UniqueFEIRExpr expr = std::make_unique(std::move(argTypeNativeArray), + std::move(argExprArray), argArrayName, argExprIndexs); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprTernary(Opcode op, UniqueFEIRType type, UniqueFEIRExpr cExpr, + UniqueFEIRExpr tExpr, UniqueFEIRExpr fExpr) { + UniqueFEIRExpr expr = std::make_unique(op, std::move(type), std::move(cExpr), + std::move(tExpr), std::move(fExpr)); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstRefNull() { + return std::make_unique(int64{ 0 }, PTY_ref); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstPtrNull() { + return std::make_unique(int64{ 0 }, PTY_ptr); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstI8(int8 val) { + return std::make_unique(int64{ val }, PTY_i8); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstI16(int16 val) { + return std::make_unique(int64{ val }, PTY_i16); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstI32(int32 val) { + return std::make_unique(int64{ val }, PTY_i32); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstU32(uint32 val) { + return std::make_unique(val); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstI64(int64 val) { + return std::make_unique(val, PTY_i64); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstU64(uint64 val) { + return std::make_unique(val, PTY_u64); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstF32(float val) { + return std::make_unique(val); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstF64(double val) { + return std::make_unique(val); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprConstPtr(int64 val) { + return std::make_unique(val, PTY_ptr); +} + +// Create a const expr of specified prime type with fixed value. +// Note that loss of precision, byte value is only supported. +UniqueFEIRExpr FEIRBuilder::CreateExprConstAnyScalar(PrimType primType, int64 val) { + switch (primType) { + case PTY_u1: + case PTY_u8: + case PTY_u16: + case PTY_u32: + case PTY_u64: + case PTY_i8: + case PTY_i16: + case PTY_i32: + case PTY_i64: + case PTY_ptr: + case PTY_a64: + return std::make_unique(val, primType); + case PTY_f128: + // Not Implemented + CHECK_FATAL(false, "Not Implemented"); + return nullptr; + case PTY_f32: + return CreateExprConstF32(static_cast(val)); + case PTY_f64: + return CreateExprConstF64(static_cast(val)); + default: + if (IsPrimitiveVector(primType)) { + return CreateExprVdupAnyVector(primType, val); + } + CHECK_FATAL(false, "unsupported const prime type"); + return nullptr; + } +} + +UniqueFEIRExpr FEIRBuilder::CreateExprVdupAnyVector(PrimType primtype, int64 val) { +MIRIntrinsicID intrinsic; + switch (primtype) { +#define SET_VDUP(TY) \ + case PTY_##TY: \ + intrinsic = INTRN_vector_from_scalar_##TY; \ + break; + + SET_VDUP(v2i64) + SET_VDUP(v4i32) + SET_VDUP(v8i16) + SET_VDUP(v16i8) + SET_VDUP(v2u64) + SET_VDUP(v4u32) + SET_VDUP(v8u16) + SET_VDUP(v16u8) + SET_VDUP(v2f64) + SET_VDUP(v4f32) + SET_VDUP(v2i32) + SET_VDUP(v4i16) + SET_VDUP(v8i8) + SET_VDUP(v2u32) + SET_VDUP(v4u16) + SET_VDUP(v8u8) + SET_VDUP(v2f32) + default: + CHECK_FATAL(false, "Unhandled vector type in CreateExprVdupAnyVector"); + } + UniqueFEIRType feType = FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPrimType(primtype)); + UniqueFEIRExpr valExpr = CreateExprConstAnyScalar(FEUtils::GetVectorElementPrimType(primtype), val); + std::vector> argOpnds; + argOpnds.push_back(std::move(valExpr)); + return std::make_unique(std::move(feType), intrinsic, argOpnds); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprMathUnary(Opcode op, UniqueFEIRVar var0) { + UniqueFEIRExpr opnd0 = CreateExprDRead(std::move(var0)); + return std::make_unique(op, std::move(opnd0)); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprMathUnary(Opcode op, UniqueFEIRExpr expr) { + return std::make_unique(op, std::move(expr)); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprZeroCompare(Opcode op, UniqueFEIRExpr expr) { + CHECK_FATAL(op == OP_ne || op == OP_eq, "Unsupported op in CreateExprZeroCompare."); + if (op == OP_ne && expr->GetKind() == kExprBinary && static_cast(expr.get())->IsComparative()) { + return expr; + } + if (expr->GetKind() == kExprConst) { + FEIRExprConst *constExpr = static_cast(expr.get()); + int64 val; + if (op == OP_ne) { + val = constExpr->GetValue().u64 == 0 ? 0 : 1; + } else { + val = constExpr->GetValue().u64 == 0 ? 1 : 0; + } + return std::make_unique(val, PTY_u1); + } + UniqueFEIRExpr zeroExpr = + (expr->GetPrimType() == PTY_ptr) ? CreateExprConstPtrNull() : CreateExprConstAnyScalar(expr->GetPrimType(), 0); + return CreateExprMathBinary(op, std::move(expr), std::move(zeroExpr)); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprMathBinary(Opcode op, UniqueFEIRVar var0, UniqueFEIRVar var1) { + UniqueFEIRExpr opnd0 = CreateExprDRead(std::move(var0)); + UniqueFEIRExpr opnd1 = CreateExprDRead(std::move(var1)); + return std::make_unique(op, std::move(opnd0), std::move(opnd1)); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprMathBinary(Opcode op, UniqueFEIRExpr expr0, UniqueFEIRExpr expr1) { + return std::make_unique(op, std::move(expr0), std::move(expr1)); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprBinary(UniqueFEIRType exprType, Opcode op, + UniqueFEIRExpr expr0, UniqueFEIRExpr expr1) { + return std::make_unique(std::move(exprType), op, std::move(expr0), std::move(expr1)); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprBinary(Opcode op, UniqueFEIRExpr expr0, UniqueFEIRExpr expr1) { + return std::make_unique(op, std::move(expr0), std::move(expr1)); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprSExt(UniqueFEIRVar srcVar) { + return std::make_unique(OP_sext, PTY_i32, + std::make_unique(std::move(srcVar))); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprSExt(UniqueFEIRExpr srcExpr, PrimType dstType) { + return std::make_unique(OP_sext, dstType, std::move(srcExpr)); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprZExt(UniqueFEIRVar srcVar) { + return std::make_unique(OP_zext, PTY_i32, + std::make_unique(std::move(srcVar))); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprZExt(UniqueFEIRExpr srcExpr, PrimType dstType) { + return std::make_unique(OP_zext, dstType, std::move(srcExpr)); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprCvtPrim(UniqueFEIRVar srcVar, PrimType dstType) { + return CreateExprCastPrim(CreateExprDRead(std::move(srcVar)), dstType); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprCvtPrim(UniqueFEIRExpr srcExpr, PrimType dstType) { + UniqueFEIRExpr expr = std::make_unique(OP_cvt, std::move(srcExpr)); + CHECK_NULL_FATAL(expr); + FEIRExprTypeCvt *ptrExpr = static_cast(expr.get()); + ptrExpr->GetType()->SetPrimType(dstType); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprCvtPrim(UniqueFEIRExpr srcExpr, PrimType srcType, PrimType dstType) { + UniqueFEIRExpr expr = std::make_unique(OP_cvt, std::move(srcExpr)); + CHECK_NULL_FATAL(expr); + FEIRExprTypeCvt *ptrExpr = static_cast(expr.get()); + ptrExpr->SetSrcPrimType(srcType); + ptrExpr->GetType()->SetPrimType(dstType); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprCvtPrim(Opcode argOp, UniqueFEIRExpr srcExpr, PrimType dstType) { + UniqueFEIRExpr expr = std::make_unique(argOp, std::move(srcExpr)); + CHECK_NULL_FATAL(expr); + FEIRExprTypeCvt *ptrExpr = static_cast(expr.get()); + ptrExpr->GetType()->SetPrimType(dstType); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprCastPrim(UniqueFEIRExpr srcExpr, PrimType dstType) { + // Handle the case separately for future optimization and deletion. + // bool u1 + if (srcExpr->GetPrimType() == PTY_u1) { + if (IsPrimitiveFloat(dstType)) { + return CreateExprCvtPrim(std::move(srcExpr), PTY_u32, dstType); + } + return srcExpr; + } + + if (dstType == PTY_u1) { + return FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(srcExpr)); + } + + // Integer to Integer : 8, 16, 32, 64 bits Integer + auto createExprExt = [&srcExpr, &dstType]() { + if (FEUtils::IsUnsignedInteger(dstType)) { + return CreateExprZExt(std::move(srcExpr), dstType); + } else { + return CreateExprSExt(std::move(srcExpr), dstType); + } + }; + + if (FEUtils::IsInteger(srcExpr->GetPrimType()) && FEUtils::IsInteger(dstType)) { + // avoid using OP_cvt for integer types less than 32 bits + if (GetPrimTypeBitSize(dstType) < 32) { + return createExprExt(); + } + + if (GetPrimTypeBitSize(dstType) > GetPrimTypeBitSize(srcExpr->GetPrimType())) { + PrimType srcRegPty = GetRegPrimType(srcExpr->GetPrimType()); + if (srcRegPty == dstType) { + // avoid using OP_cvt for integer types less than 32 bits, but need to explicit transformation + if (GetPrimTypeBitSize(srcExpr->GetPrimType()) < 32) { + return createExprExt(); + } + return srcExpr; + } + return CreateExprCvtPrim(std::move(srcExpr), srcRegPty, dstType); + } + } + + // others + return CreateExprCvtPrim(std::move(srcExpr), dstType); +} + +UniqueFEIRExpr FEIRBuilder::CreateExprJavaNewInstance(UniqueFEIRType type) { + UniqueFEIRExpr expr = std::make_unique(std::move(type)); + CHECK_NULL_FATAL(expr); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprJavaNewInstance(UniqueFEIRType type, uint32 argTypeID) { + UniqueFEIRExpr expr = std::make_unique(std::move(type), argTypeID); + CHECK_NULL_FATAL(expr); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprJavaNewInstance(UniqueFEIRType type, uint32 argTypeID, bool isRcPermanent) { + UniqueFEIRExpr expr = std::make_unique(std::move(type), argTypeID, isRcPermanent); + CHECK_NULL_FATAL(expr); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprJavaNewArray(UniqueFEIRType type, UniqueFEIRExpr exprSize) { + UniqueFEIRExpr expr = std::make_unique(std::move(type), std::move(exprSize)); + CHECK_NULL_FATAL(expr); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprJavaNewArray(UniqueFEIRType type, UniqueFEIRExpr exprSize, uint32 typeID) { + UniqueFEIRExpr expr = std::make_unique(std::move(type), std::move(exprSize), typeID); + CHECK_NULL_FATAL(expr); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprJavaNewArray(UniqueFEIRType type, UniqueFEIRExpr exprSize, uint32 typeID, + bool isRcPermanent) { + UniqueFEIRExpr expr = std::make_unique( + std::move(type), std::move(exprSize), typeID, isRcPermanent); + CHECK_NULL_FATAL(expr); + return expr; +} + +UniqueFEIRExpr FEIRBuilder::CreateExprJavaArrayLength(UniqueFEIRExpr exprArray) { + UniqueFEIRExpr expr = std::make_unique(std::move(exprArray)); + CHECK_NULL_FATAL(expr); + return expr; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtDAssign(UniqueFEIRVar dstVar, UniqueFEIRExpr srcExpr, bool hasException) { + std::unique_ptr stmt = std::make_unique(std::move(dstVar), std::move(srcExpr)); + stmt->SetHasException(hasException); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtDAssignAggField(UniqueFEIRVar dstVar, UniqueFEIRExpr srcExpr, FieldID fieldID) { + UniqueFEIRStmt stmt = std::make_unique(std::move(dstVar), std::move(srcExpr), fieldID); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtIAssign(UniqueFEIRType dstType, UniqueFEIRExpr dstExpr, + UniqueFEIRExpr srcExpr, FieldID fieldID /* optional parameters */) { + UniqueFEIRStmt stmt = std::make_unique( + std::move(dstType), std::move(dstExpr), std::move(srcExpr), fieldID); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtGoto(uint32 targetLabelIdx) { + UniqueFEIRStmt stmt = std::make_unique(targetLabelIdx); + CHECK_NULL_FATAL(stmt); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtGoto(const std::string &labelName) { + UniqueFEIRStmt stmt = std::make_unique(labelName); + CHECK_NULL_FATAL(stmt); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtIGoto(UniqueFEIRExpr targetExpr) { + UniqueFEIRStmt stmt = std::make_unique(std::move(targetExpr)); + CHECK_NULL_FATAL(stmt); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtCondGoto(uint32 targetLabelIdx, Opcode op, UniqueFEIRExpr expr) { + UniqueFEIRStmt stmt = std::make_unique(op, targetLabelIdx, std::move(expr)); + CHECK_NULL_FATAL(stmt); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtSwitch(UniqueFEIRExpr expr) { + UniqueFEIRStmt stmt = std::make_unique(std::move(expr)); + CHECK_NULL_FATAL(stmt); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtIfWithoutElse(UniqueFEIRExpr cond, std::list &thenStmts) { + UniqueFEIRStmt stmt = std::make_unique(std::move(cond), thenStmts); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtIf(UniqueFEIRExpr cond, + std::list &thenStmts, + std::list &elseStmts) { + UniqueFEIRStmt stmt = std::make_unique(std::move(cond), thenStmts, elseStmts); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtJavaConstClass(UniqueFEIRVar dstVar, UniqueFEIRType type) { + UniqueFEIRType dstType = FETypeManager::kFEIRTypeJavaClass->Clone(); + dstVar->SetType(std::move(dstType)); + UniqueFEIRStmt stmt = std::make_unique(std::move(dstVar), std::move(type)); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtJavaConstString(UniqueFEIRVar dstVar, const std::string &strVal) { + UniqueFEIRType dstType = FETypeManager::kFEIRTypeJavaString->Clone(); + dstVar->SetType(std::move(dstType)); + UniqueFEIRStmt stmt = std::make_unique(std::move(dstVar), strVal, + 0, GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(strVal)); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtJavaCheckCast(UniqueFEIRVar dstVar, UniqueFEIRVar srcVar, UniqueFEIRType type) { + UniqueFEIRExpr expr = CreateExprDRead(std::move(srcVar)); + UniqueFEIRStmt stmt = std::make_unique(std::move(dstVar), std::move(expr), std::move(type), + FEIRStmtJavaTypeCheck::CheckKind::kCheckCast); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtJavaCheckCast(UniqueFEIRVar dstVar, UniqueFEIRVar srcVar, UniqueFEIRType type, + uint32 argTypeID) { + UniqueFEIRExpr expr = CreateExprDRead(std::move(srcVar)); + UniqueFEIRStmt stmt = std::make_unique(std::move(dstVar), std::move(expr), std::move(type), + FEIRStmtJavaTypeCheck::CheckKind::kCheckCast, + argTypeID); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtJavaInstanceOf(UniqueFEIRVar dstVar, UniqueFEIRVar srcVar, UniqueFEIRType type) { + UniqueFEIRExpr expr = CreateExprDRead(std::move(srcVar)); + UniqueFEIRStmt stmt = std::make_unique(std::move(dstVar), std::move(expr), std::move(type), + FEIRStmtJavaTypeCheck::CheckKind::kInstanceOf); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtJavaInstanceOf(UniqueFEIRVar dstVar, UniqueFEIRVar srcVar, UniqueFEIRType type, + uint32 argTypeID) { + UniqueFEIRExpr expr = CreateExprDRead(std::move(srcVar)); + UniqueFEIRStmt stmt = std::make_unique(std::move(dstVar), std::move(expr), std::move(type), + FEIRStmtJavaTypeCheck::CheckKind::kInstanceOf, + argTypeID); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtJavaFillArrayData(UniqueFEIRVar srcVar, const int8 *arrayData, + uint32 size, const std::string &arrayName) { + UniqueFEIRExpr expr = CreateExprDRead(std::move(srcVar)); + UniqueFEIRStmt stmt = std::make_unique(std::move(expr), arrayData, size, arrayName); + return stmt; +} + +std::list FEIRBuilder::CreateStmtArrayStore(UniqueFEIRVar varElem, UniqueFEIRVar varArray, + UniqueFEIRVar varIndex) { + std::list ans; + UniqueFEIRType arrayType = varElem->GetType()->Clone(); + (void)arrayType->ArrayIncrDim(); + UniqueFEIRExpr exprElem = CreateExprDRead(std::move(varElem)); + UniqueFEIRExpr exprArray = CreateExprDRead(std::move(varArray)); + UniqueFEIRExpr exprIndex = CreateExprDRead(std::move(varIndex)); + UniqueFEIRStmt stmt = std::make_unique(std::move(exprElem), std::move(exprArray), + std::move(exprIndex), std::move(arrayType)); + ans.push_back(std::move(stmt)); + return ans; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtArrayStoreOneStmt(UniqueFEIRVar varElem, UniqueFEIRVar varArray, + UniqueFEIRExpr exprIndex) { + UniqueFEIRType arrayType = varElem->GetType()->Clone(); + (void)arrayType->ArrayIncrDim(); + UniqueFEIRExpr exprElem = CreateExprDRead(std::move(varElem)); + UniqueFEIRExpr exprArray = CreateExprDRead(std::move(varArray)); + UniqueFEIRStmt stmt = std::make_unique(std::move(exprElem), std::move(exprArray), + std::move(exprIndex), std::move(arrayType)); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, + UniqueFEIRExpr exprIndex, UniqueFEIRType arrayType) { + UniqueFEIRStmt stmt = std::make_unique(std::move(exprElem), std::move(exprArray), + std::move(exprIndex), std::move(arrayType)); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, + UniqueFEIRExpr exprIndex, UniqueFEIRType arrayType, + const std::string &argArrayName) { + UniqueFEIRStmt stmt = std::make_unique(std::move(exprElem), std::move(exprArray), + std::move(exprIndex), std::move(arrayType), + argArrayName); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, + std::list exprIndexs, + UniqueFEIRType arrayType, const std::string &argArrayName) { + UniqueFEIRStmt stmt = std::make_unique(std::move(exprElem), std::move(exprArray), + exprIndexs, std::move(arrayType), + argArrayName); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtArrayStoreOneStmtForC(UniqueFEIRExpr exprElem, UniqueFEIRExpr exprArray, + UniqueFEIRExpr exprIndex, UniqueFEIRType arrayType, + UniqueFEIRType elemType, const std::string &argArrayName) { + UniqueFEIRStmt stmt = std::make_unique(std::move(exprElem), std::move(exprArray), + std::move(exprIndex), std::move(arrayType), + std::move(elemType), argArrayName); + return stmt; +} + +std::list FEIRBuilder::CreateStmtArrayLoad(UniqueFEIRVar varElem, UniqueFEIRVar varArray, + UniqueFEIRVar varIndex) { + std::list ans; + UniqueFEIRExpr exprArray = CreateExprDRead(std::move(varArray)); + UniqueFEIRExpr exprIndex = CreateExprDRead(std::move(varIndex)); + UniqueFEIRType arrayType = varElem->GetType()->Clone(); + (void)arrayType->ArrayIncrDim(); + UniqueFEIRExpr expr = std::make_unique(std::move(exprArray), std::move(exprIndex), + std::move(arrayType)); + UniqueFEIRStmt stmt = CreateStmtDAssign(std::move(varElem), std::move(expr), true); + ans.push_back(std::move(stmt)); + return ans; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtArrayLength(UniqueFEIRVar varLength, UniqueFEIRVar varArray) { + UniqueFEIRExpr exprArray = CreateExprDRead(std::move(varArray)); + UniqueFEIRExpr exprIntriOp = CreateExprJavaArrayLength(std::move(exprArray)); + UniqueFEIRStmt stmt = std::make_unique(std::move(varLength), std::move(exprIntriOp)); + return stmt; +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtRetype(UniqueFEIRVar varDst, const UniqueFEIRVar &varSrc) { + // ref -> ref : retype + // primitive + // short -> long : cvt GetPrimTypeSize + // long -> short : JAVAMERGE + PrimType dstType = varDst->GetType()->GetPrimType(); + PrimType srcType = varSrc->GetType()->GetPrimType(); + uint32 srcPrmTypeSize = GetPrimTypeSize(varSrc->GetType()->GetPrimType()); + uint32 dstPrmTypeSize = GetPrimTypeSize(dstType); + UniqueFEIRExpr dreadExpr = std::make_unique(varSrc->Clone()); + if (dstType == PTY_ref && srcType == PTY_ref) { + std::unique_ptr ptrTy = FEIRTypeHelper::CreatePointerType(varDst->GetType()->Clone(), PTY_ref); + UniqueFEIRExpr expr = std::make_unique(std::move(ptrTy), OP_retype, std::move(dreadExpr)); + return FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(expr), false); + } else if (srcPrmTypeSize < dstPrmTypeSize || (IsPrimitiveFloat(srcType) && IsPrimitiveInteger(dstType)) || + dstType == PTY_ref) { + UniqueFEIRExpr expr = std::make_unique(varDst->GetType()->Clone(), OP_cvt, std::move(dreadExpr)); + return FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(expr), false); + } else if (((IsPrimitiveInteger(dstType) || IsPrimitiveFloat(dstType)) && + (IsPrimitiveInteger(srcType) || IsPrimitiveFloat(srcType)) && + IsPrimitiveInteger(srcType) && IsPrimitiveFloat(dstType) && + GetPrimTypeBitSize(srcType) <= GetPrimTypeBitSize(dstType)) || + (IsPrimitiveInteger(srcType) && IsPrimitiveInteger(dstType))) { + std::vector> exprs; + exprs.emplace_back(std::move(dreadExpr)); + UniqueFEIRExpr javaMerge = std::make_unique(varDst->GetType()->Clone(), exprs); + return std::make_unique(std::move(varDst), std::move(javaMerge)); + } else { + WARN(kLncWarn, "Unsafe convert %s to %s", GetPrimTypeName(srcType), GetPrimTypeName(dstType)); + UniqueFEIRExpr expr = std::make_unique(varDst->GetType()->Clone(), OP_cvt, std::move(dreadExpr)); + return FEIRBuilder::CreateStmtDAssign(std::move(varDst), std::move(expr), false); + } +} + +UniqueFEIRStmt FEIRBuilder::CreateStmtComment(const std::string &comment) { + UniqueFEIRStmt stmt = std::make_unique(comment); + return stmt; +} + +UniqueFEIRExpr FEIRBuilder::ReadExprField(UniqueFEIRExpr expr, FieldID fieldID, UniqueFEIRType fieldType) { + FieldID baseID = expr->GetFieldID(); + expr->SetFieldID(baseID + fieldID); + expr->SetFieldType(std::move(fieldType)); + return expr; +} + +UniqueFEIRStmt FEIRBuilder::AssginStmtField(const UniqueFEIRExpr &addrExpr, UniqueFEIRExpr srcExpr, FieldID fieldID) { + UniqueFEIRStmt stmt; + FieldID baseID = addrExpr->GetFieldID(); + if (addrExpr->GetKind() == kExprDRead) { + stmt = CreateStmtDAssignAggField( + static_cast(addrExpr.get())->GetVar()->Clone(), std::move(srcExpr), baseID + fieldID); + } else if (addrExpr->GetKind() == kExprIRead) { + auto ireadExpr = static_cast(addrExpr.get()); + stmt = CreateStmtIAssign(ireadExpr->GetClonedPtrType(), ireadExpr->GetClonedOpnd(), + std::move(srcExpr), baseID + fieldID); + } else if (addrExpr->GetKind() == kExprIAddrof) { + auto iaddrofExpr = static_cast(addrExpr.get()); + stmt = CreateStmtIAssign(iaddrofExpr->GetClonedPtrType(), iaddrofExpr->GetClonedOpnd(), + std::move(srcExpr), baseID + fieldID); + } else { + CHECK_FATAL(false, "unsupported expr in AssginStmtField"); + } + return stmt; +} + +bool FEIRBuilder::IsZeroConstExpr(const std::unique_ptr &expr) { + if (expr != nullptr && expr->GetKind() == kExprConst && + static_cast(expr.get())->GetValue().u64 == 0) { + return true; + } + return false; +} + +UniqueFEIRStmt FEIRBuilder::CreateVLAStackRestore(const UniqueFEIRVar &vlaSavedStackVar) { + UniqueFEIRExpr dreadVar = FEIRBuilder::CreateExprDRead(vlaSavedStackVar->Clone()); + std::unique_ptr> argExprList = std::make_unique>(); + argExprList->emplace_back(std::move(dreadVar)); + return std::make_unique(INTRN_C_stack_restore, nullptr, nullptr, + std::move(argExprList)); +} + +std::string FEIRBuilder::EmitVLACleanupStmts(FEFunction &feFunction, const std::string &labelName, const Loc &loc) { + std::list stmts; + bool isVlaCleanup = false; + std::string vlaLabelName = ""; + for (const auto &scope : feFunction.GetScopeStack()) { + if (scope->GetVLASavedStackVar() != nullptr) { + if (!isVlaCleanup) { + // label vla_cleanup + vlaLabelName = FEUtils::GetSequentialName("vla_cleanup."); + auto vlaLabelStmt = std::make_unique(vlaLabelName); + (void)stmts.emplace_back(std::move(vlaLabelStmt)); + } + // vla stack restore + isVlaCleanup = true; + auto stackRestoreStmt = scope->GenVLAStackRestoreStmt(); + (void)stmts.emplace_back(std::move(stackRestoreStmt)); + } + if (scope->IsControllScope()) { + break; + } + } + if (isVlaCleanup) { + // goto last label name + auto gotoStmt = FEIRBuilder::CreateStmtGoto(labelName); + stmts.emplace_back(std::move(gotoStmt)); + for (const auto &stmt : stmts) { + stmt->SetSrcLoc(loc); + } + feFunction.AddVLACleanupStmts(stmts); + } + return vlaLabelName; +} + +void FEIRBuilder::EmitVLACleanupStmts(const FEFunction &feFunction, std::list &stmts) { + for (const auto &scope : feFunction.GetScopeStack()) { + if (scope->GetVLASavedStackVar() != nullptr) { + auto stackRestoreStmt = scope->GenVLAStackRestoreStmt(); + (void)stmts.emplace_back(std::move(stackRestoreStmt)); + } + } +} +} // namespace maple diff --git a/src/hir2mpl/common/src/feir_scope.cpp b/src/hir2mpl/common/src/feir_scope.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f21e59e2d4c84e1521d629f6f3d812ed41a90fda --- /dev/null +++ b/src/hir2mpl/common/src/feir_scope.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_scope.h" +#include "feir_builder.h" + +namespace maple { +UniqueFEIRScope FEIRScope::Clone() const { + UniqueFEIRScope scope = std::make_unique(id, mirScope); + scope->SetIsControllScope(isControllScope); + if (vlaSavedStackVar != nullptr) { + scope->SetVLASavedStackVar(vlaSavedStackVar->Clone()); + } + return scope; +} + +UniqueFEIRStmt FEIRScope::GenVLAStackRestoreStmt() const { + CHECK_NULL_FATAL(vlaSavedStackVar); + return FEIRBuilder::CreateVLAStackRestore(vlaSavedStackVar->Clone()); +} +} diff --git a/src/hir2mpl/common/src/feir_stmt.cpp b/src/hir2mpl/common/src/feir_stmt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..02f3c3dfc9cab95cfc9a829201e0e66844f66f5c --- /dev/null +++ b/src/hir2mpl/common/src/feir_stmt.cpp @@ -0,0 +1,4443 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_stmt.h" +#include "opcode_info.h" +#include "literalstrname.h" +#include "mir_type.h" +#include "feir_builder.h" +#include "feir_var_reg.h" +#include "feir_var_name.h" +#include "fe_manager.h" +#include "hir2mpl_env.h" +#include "feir_var_type_scatter.h" +#include "fe_options.h" +#include "feir_type_helper.h" +#include "fe_utils.h" +#include "fe_utils_java.h" +#include "enhance_c_checker.h" +#include "fe_macros.h" +#ifndef ONLY_C +#include "rc_setter.h" +#endif +#include "ast_util.h" + +namespace maple { +std::string GetFEIRNodeKindDescription(FEIRNodeKind kindArg) { + switch (kindArg) { +#define FEIR_NODE_KIND(kind, description) \ + case k##kind: { \ + return description; \ + } +#include "feir_node_kind.def" +#undef FEIR_NODE_KIND + default: { + CHECK_FATAL(false, "Undefined FEIRNodeKind %u", static_cast(kindArg)); + return ""; + } + } +} + +// ---------- FEIRStmt ---------- +std::list FEIRStmt::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + return std::list(); +} + +bool FEIRStmt::IsStmtInstImpl() const { + switch (kind) { + case kStmtAssign: + case kStmtNonAssign: + case kStmtDAssign: + case kStmtJavaTypeCheck: + case kStmtJavaConstClass: + case kStmtJavaConstString: + case kStmtJavaMultiANewArray: + case kStmtCallAssign: + case kStmtJavaDynamicCallAssign: + case kStmtIAssign: + case kStmtUseOnly: + case kStmtReturn: + case kStmtBranch: + case kStmtGoto: + case kStmtCondGoto: + case kStmtSwitch: + case kStmtArrayStore: + case kStmtFieldStore: + case kStmtFieldLoad: + return true; + default: + return false; + } +} + +bool FEIRStmt::IsStmtInstComment() const { + return (kind == kStmtPesudoComment); +} + +bool FEIRStmt::ShouldHaveLOC() const { + return (IsStmtInstImpl() || IsStmtInstComment()); +} + +BaseNode *FEIRStmt::ReplaceAddrOfNode(BaseNode *node) const { + switch (node->GetOpCode()) { + case OP_dread: + node->SetOpCode(OP_addrof); + node->SetPrimType(PTY_ptr); + return node; + case OP_iread: + node->SetOpCode(OP_iaddrof); + node->SetPrimType(PTY_ptr); + return node; + default: + return node; + } +} + +std::string FEIRStmt::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +void FEIRStmt::DumpImpl(const std::string &prefix) const { + std::cout << prefix << "FEIRStmt" << id << "(kind=" << GetFEIRNodeKindDescription(kind) << ")\n"; +} + +// ---------- FEIRStmtCheckPoint ---------- +void FEIRStmtCheckPoint::Reset() { + predCPs.clear(); + localUD.clear(); + lastDef.clear(); + cacheUD.clear(); + defs.clear(); + uses.clear(); +} + +void FEIRStmtCheckPoint::RegisterDFGNode(UniqueFEIRVar &var) { + CHECK_NULL_FATAL(var); + if (var->IsDef()) { + defs.push_back(&var); + lastDef[FEIRDFGNode(var)] = &var; + } else { + uses.push_back(&var); + auto it = lastDef.find(FEIRDFGNode(var)); + if (it != lastDef.end()) { + CHECK_FATAL(localUD[&var].insert(it->second).second, "localUD insert failed"); + } + } +} + +void FEIRStmtCheckPoint::RegisterDFGNodes(const std::list &vars) { + for (UniqueFEIRVar *var : vars) { + CHECK_NULL_FATAL(var); + RegisterDFGNode(*var); + } +} + +void FEIRStmtCheckPoint::RegisterDFGNodeFromAllVisibleStmts() { + if (firstVisibleStmt == nullptr) { + return; + } + FELinkListNode *node = static_cast(firstVisibleStmt); + while (node != this) { + FEIRStmt *stmt = static_cast(node); + stmt->RegisterDFGNodes2CheckPoint(*this); + node = node->GetNext(); + } +} + +void FEIRStmtCheckPoint::AddPredCheckPoint(FEIRStmtCheckPoint &stmtCheckPoint) { + if (predCPs.find(&stmtCheckPoint) == predCPs.end()) { + CHECK_FATAL(predCPs.insert(&stmtCheckPoint).second, "pred checkpoints insert error"); + } +} + +std::set &FEIRStmtCheckPoint::CalcuDef(UniqueFEIRVar &use) { + CHECK_NULL_FATAL(use); + auto itLocal = localUD.find(&use); + // search localUD + if (itLocal != localUD.cend()) { + return itLocal->second; + } + // search cacheUD + auto itCache = cacheUD.find(FEIRDFGNode(use)); + if (itCache != cacheUD.cend()) { + return itCache->second; + } + // search by DFS + std::set visitSet; + std::set &result = cacheUD[FEIRDFGNode(use)]; + CalcuDefDFS(result, use, *this, visitSet); + if (result.size() == 0) { + WARN(kLncWarn, "use var %s without def", use->GetNameRaw().c_str()); + } + return result; +} + +void FEIRStmtCheckPoint::CalcuDefDFS(std::set &result, const UniqueFEIRVar &use, + const FEIRStmtCheckPoint &cp, + std::set &visitSet) const { + CHECK_NULL_FATAL(use); + if (visitSet.find(&cp) != visitSet.end()) { + return; + } + CHECK_FATAL(visitSet.insert(&cp).second, "visitSet insert failed"); + auto itLast = cp.lastDef.find(FEIRDFGNode(use)); + if (itLast != cp.lastDef.end()) { + CHECK_FATAL(result.insert(itLast->second).second, "def insert failed"); + return; + } + // optimization by cacheUD + auto itCache = cp.cacheUD.find(FEIRDFGNode(use)); + if (itCache != cp.cacheUD.end()) { + for (UniqueFEIRVar *def : itCache->second) { + CHECK_FATAL(result.insert(def).second, "def insert failed"); + } + if (itCache->second.size() > 0) { + return; + } + } + // optimization by cacheUD (end) + for (const FEIRStmtCheckPoint *pred : cp.predCPs) { + CHECK_NULL_FATAL(pred); + CalcuDefDFS(result, use, *pred, visitSet); + } +} + +std::string FEIRStmtCheckPoint::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind) << " preds: [ "; + for (FEIRStmtCheckPoint *pred : predCPs) { + ss << pred->GetID() << ", "; + } + ss << " ]"; + return ss.str(); +} + +// ---------- FEIRStmtNary ---------- +FEIRStmtNary::FEIRStmtNary(Opcode opIn, std::list> argExprsIn) + : FEIRStmt(kStmtNary), op(opIn), argExprs(std::move(argExprsIn)) {} + +std::list FEIRStmtNary::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + StmtNode *stmt = nullptr; + if (argExprs.size() > 1) { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const auto &arg : argExprs) { + BaseNode *node = arg->GenMIRNode(mirBuilder); + args.push_back(node); + } + stmt = mirBuilder.CreateStmtNary(op, std::move(args)); + } else if (argExprs.size() == 1) { + // ignore invalid syntax: access content pointed by void pointer + if (op == OP_eval && argExprs.front()->GetKind() == FEIRNodeKind::kExprIRead) { + FEIRExprIRead *ireadFeExpr = static_cast(argExprs.front().get()); + MIRType *mirType = ireadFeExpr->GetClonedPtrType()->GenerateMIRTypeAuto(); + if (ASTUtil::IsVoidPointerType(mirType->GetTypeIndex())) { + return stmts; + } + } + BaseNode *node = argExprs.front()->GenMIRNode(mirBuilder); + if (op == OP_eval && argExprs.front()->IsAddrof()) { + node = ReplaceAddrOfNode(node); // addrof va_list + } + stmt = mirBuilder.CreateStmtNary(op, node); + } else { + CHECK_FATAL(false, "Invalid arg size for MIR StmtNary"); + } + if (stmt != nullptr) { + stmts.emplace_back(stmt); + } + return stmts; +} + +// ---------- FEStmtAssign ---------- +FEIRStmtAssign::FEIRStmtAssign(FEIRNodeKind argKind, std::unique_ptr argVar) + : FEIRStmt(argKind), + hasException(false), + var(std::move(argVar)) {} + +void FEIRStmtAssign::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + if (var != nullptr) { + var->SetDef(true); + checkPoint.RegisterDFGNode(var); + } +} + +std::string FEIRStmtAssign::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEStmtDAssign ---------- +FEIRStmtDAssign::FEIRStmtDAssign(std::unique_ptr argVar, std::unique_ptr argExpr, int32 argFieldID) + : FEIRStmtAssign(FEIRNodeKind::kStmtDAssign, std::move(argVar)), + fieldID(argFieldID) { + SetExpr(std::move(argExpr)); +} + +void FEIRStmtDAssign::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + expr->RegisterDFGNodes2CheckPoint(checkPoint); + var->SetDef(true); + checkPoint.RegisterDFGNode(var); +} + +bool FEIRStmtDAssign::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return expr->CalculateDefs4AllUses(checkPoint, udChain); +} + +void FEIRStmtDAssign::InitTrans4AllVarsImpl() { + switch (expr->GetKind()) { + case FEIRNodeKind::kExprDRead: { + FEIRExprDRead *dRead = static_cast(expr.get()); + UniqueFEIRVarTrans trans1 = std::make_unique(FEIRVarTransKind::kFEIRVarTransDirect, var); + dRead->SetTrans(std::move(trans1)); + var->SetTrans(dRead->CreateTransDirect()); + break; + } + case FEIRNodeKind::kExprArrayLoad: { + FEIRExprArrayLoad *arrayLoad = static_cast(expr.get()); + UniqueFEIRVarTrans trans1 = std::make_unique(FEIRVarTransKind::kFEIRVarTransArrayDimIncr, var); + arrayLoad->SetTrans(std::move(trans1)); + var->SetTrans(arrayLoad->CreateTransArrayDimDecr()); + break; + } + default: + break; + } +} + +std::list FEIRStmtDAssign::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + ASSERT(var != nullptr, "dst var is nullptr"); + ASSERT(expr != nullptr, "src expr is nullptr"); + MIRSymbol *dstSym = var->GenerateMIRSymbol(mirBuilder); + BaseNode *srcNode = expr->GenMIRNode(mirBuilder); + if (fieldID != 0) { + MIRType *mirType = var->GetType()->GenerateMIRTypeAuto(); + CHECK_FATAL((mirType->GetKind() == MIRTypeKind::kTypeStruct || mirType->GetKind() == MIRTypeKind::kTypeUnion), + "If fieldID is not 0, then the variable must be a structure"); + } + InsertNonnullChecking(mirBuilder, *dstSym, ans); + CheckNonnullArgsAndRetForFuncPtr(mirBuilder); + AssignBoundaryVarAndChecking(mirBuilder, ans); + CheckBoundaryArgsAndRetForFuncPtr(mirBuilder); + StmtNode *mirStmt = mirBuilder.CreateStmtDassign(*dstSym, fieldID, srcNode); + ans.push_back(mirStmt); + ENCChecker::CheckBoundaryLenFinalAssign(mirBuilder, var, fieldID, loc); + ENCChecker::CheckBoundaryLenFinalAddr(mirBuilder, expr, loc); + return ans; +} + +void FEIRStmtDAssign::InsertNonnullChecking(MIRBuilder &mirBuilder, const MIRSymbol &dstSym, + std::list &ans) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic()) { + return; + } + MIRType *srcType = expr->GetType()->GenerateMIRTypeAuto(); + if (fieldID == 0) { + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstSym.GetType(), loc); + if (!dstSym.GetAttr(ATTR_nonnull)) { + return; + } + } else { + FieldID tmpID = fieldID; + FieldPair fieldPair = static_cast(dstSym.GetType())->TraverseToFieldRef(tmpID); + MIRType *dstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstType, loc); + if (!fieldPair.second.second.GetAttr(FLDATTR_nonnull)) { + return; + } + } + if (ENCChecker::HasNullExpr(expr)) { + FE_ERR(kLncErr, loc, "null assignment of nonnull pointer"); + return; + } + UniqueFEIRStmt stmt = std::make_unique(OP_assignassertnonnull, expr->Clone()); + std::list stmts = stmt->GenMIRStmts(mirBuilder); + ans.splice(ans.end(), stmts); +} + +std::string FEIRStmtDAssign::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + ss << " def : " << var->GetNameRaw() << ", uses : " << expr->DumpDotString() << std::endl; + return ss.str(); +} + +// ---------- FEIRStmtJavaTypeCheck ---------- +FEIRStmtJavaTypeCheck::FEIRStmtJavaTypeCheck(std::unique_ptr argVar, std::unique_ptr argExpr, + std::unique_ptr argType, + FEIRStmtJavaTypeCheck::CheckKind argCheckKind) + : FEIRStmtAssign(FEIRNodeKind::kStmtJavaTypeCheck, std::move(argVar)), + checkKind(argCheckKind), + expr(std::move(argExpr)), + type(std::move(argType)) {} + +FEIRStmtJavaTypeCheck::FEIRStmtJavaTypeCheck(std::unique_ptr argVar, std::unique_ptr argExpr, + std::unique_ptr argType, + FEIRStmtJavaTypeCheck::CheckKind argCheckKind, + uint32 argTypeID) + : FEIRStmtAssign(FEIRNodeKind::kStmtJavaTypeCheck, std::move(argVar)), + checkKind(argCheckKind), + expr(std::move(argExpr)), + type(std::move(argType)), + typeID(argTypeID) {} + +void FEIRStmtJavaTypeCheck::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + expr->RegisterDFGNodes2CheckPoint(checkPoint); + var->SetDef(true); + checkPoint.RegisterDFGNode(var); +} + +bool FEIRStmtJavaTypeCheck::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return expr->CalculateDefs4AllUses(checkPoint, udChain); +} + +std::list FEIRStmtJavaTypeCheck::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + CHECK_FATAL(expr->GetKind() == FEIRNodeKind::kExprDRead, "only support expr dread"); + BaseNode *objNode = expr->GenMIRNode(mirBuilder); + MIRSymbol *ret = var->GenerateLocalMIRSymbol(mirBuilder); + MIRType *mirType = type->GenerateMIRType(); + MIRType *mirPtrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType, PTY_ref); + MapleVector arguments(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + arguments.push_back(objNode); + if (FEOptions::GetInstance().IsAOT()) { + arguments.push_back(mirBuilder.CreateIntConst(typeID, PTY_i32)); + } + if (checkKind == kCheckCast) { + StmtNode *callStmt = mirBuilder.CreateStmtIntrinsicCallAssigned(INTRN_JAVA_CHECK_CAST, std::move(arguments), ret, + mirPtrType->GetTypeIndex()); + ans.push_back(callStmt); + } else { + BaseNode *instanceOf = mirBuilder.CreateExprIntrinsicop(INTRN_JAVA_INSTANCE_OF, OP_intrinsicopwithtype, *mirPtrType, + std::move(arguments)); + instanceOf->SetPrimType(PTY_u1); + DassignNode *stmt = mirBuilder.CreateStmtDassign(*ret, 0, instanceOf); + ans.push_back(stmt); + } + return ans; +} + +std::string FEIRStmtJavaTypeCheck::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + ss << "uses :" << expr->DumpDotString(); + return ss.str(); +} + +// ---------- FEIRStmtJavaConstClass ---------- +FEIRStmtJavaConstClass::FEIRStmtJavaConstClass(std::unique_ptr argVar, std::unique_ptr argType) + : FEIRStmtAssign(FEIRNodeKind::kStmtJavaConstClass, std::move(argVar)), + type(std::move(argType)) {} + +void FEIRStmtJavaConstClass::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + var->SetDef(true); + checkPoint.RegisterDFGNode(var); +} + +std::list FEIRStmtJavaConstClass::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + MIRSymbol *varSym = var->GenerateLocalMIRSymbol(mirBuilder); + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + MIRType *ptrType = type->GenerateMIRTypeAuto(kSrcLangJava); + BaseNode *expr = + mirBuilder.CreateExprIntrinsicop(INTRN_JAVA_CONST_CLASS, OP_intrinsicopwithtype, *ptrType, std::move(args)); + StmtNode *stmt = mirBuilder.CreateStmtDassign(*varSym, 0, expr); + ans.push_back(stmt); + return ans; +} + +// ---------- FEIRStmtJavaConstString ---------- +FEIRStmtJavaConstString::FEIRStmtJavaConstString(std::unique_ptr argVar, const std::string &argStrVal, + uint32 argFileIdx, uint32 argStringID) + : FEIRStmtAssign(FEIRNodeKind::kStmtJavaConstString, std::move(argVar)), + strVal(argStrVal), fileIdx(argFileIdx), stringID(argStringID) {} + +void FEIRStmtJavaConstString::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + var->SetDef(true); + checkPoint.RegisterDFGNode(var); +} + +std::list FEIRStmtJavaConstString::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + MIRSymbol *literalVal = FEManager::GetJavaStringManager().GetLiteralVar(strVal); + if (literalVal == nullptr) { + literalVal = FEManager::GetJavaStringManager().CreateLiteralVar(mirBuilder, strVal, false); + } + MIRSymbol *literalValPtr = FEManager::GetJavaStringManager().GetLiteralPtrVar(literalVal); + if (literalValPtr == nullptr) { + std::string localStrName = kLocalStringPrefix + std::to_string(fileIdx) + "_" + std::to_string(stringID); + MIRType *typeString = FETypeManager::kFEIRTypeJavaString->GenerateMIRTypeAuto(kSrcLangJava); + MIRSymbol *symbolLocal = mirBuilder.GetOrCreateLocalDecl(localStrName.c_str(), *typeString); + if (!FEOptions::GetInstance().IsAOT()) { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + args.push_back(mirBuilder.CreateExprAddrof(0, *literalVal)); + StmtNode *stmtCreate = mirBuilder.CreateStmtCallAssigned( + FEManager::GetTypeManager().GetPuIdxForMCCGetOrInsertLiteral(), std::move(args), symbolLocal, + OP_callassigned); + + ans.push_back(stmtCreate); + } + literalValPtr = symbolLocal; + } + MIRSymbol *varDst = var->GenerateLocalMIRSymbol(mirBuilder); + AddrofNode *node = mirBuilder.CreateDread(*literalValPtr, PTY_ptr); + StmtNode *stmt = mirBuilder.CreateStmtDassign(*varDst, 0, node); + ans.push_back(stmt); + return ans; +} + +// ---------- FEIRStmtJavaFillArrayData ---------- +FEIRStmtJavaFillArrayData::FEIRStmtJavaFillArrayData(std::unique_ptr arrayExprIn, const int8 *arrayDataIn, + uint32 sizeIn, const std::string &arrayNameIn) + : FEIRStmtAssign(FEIRNodeKind::kStmtJavaFillArrayData, nullptr), + arrayExpr(std::move(arrayExprIn)), + arrayData(arrayDataIn), + size(sizeIn), + arrayName(arrayNameIn) {} + +void FEIRStmtJavaFillArrayData::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + arrayExpr->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRStmtJavaFillArrayData::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return arrayExpr->CalculateDefs4AllUses(checkPoint, udChain); +} + +std::list FEIRStmtJavaFillArrayData::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + PrimType elemPrimType = ProcessArrayElemPrimType(); + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + BaseNode *nodeArray = arrayExpr->GenMIRNode(mirBuilder); + args.push_back(nodeArray); + MIRSymbol *elemDataVar = ProcessArrayElemData(mirBuilder, elemPrimType); + BaseNode *nodeAddrof = mirBuilder.CreateExprAddrof(0, *elemDataVar); + args.push_back(nodeAddrof); + uint64 elemPrimTypeSize = GetPrimTypeSize(elemPrimType); + uint64 val = elemPrimTypeSize * size; + BaseNode *nodebytes = mirBuilder.CreateIntConst(val, PTY_i32); + args.push_back(nodebytes); + StmtNode *stmt = mirBuilder.CreateStmtIntrinsicCallAssigned(INTRN_JAVA_ARRAY_FILL, std::move(args), nullptr); + ans.push_back(stmt); + return ans; +} + +PrimType FEIRStmtJavaFillArrayData::ProcessArrayElemPrimType() const { + const FEIRType &arrayType = arrayExpr->GetTypeRef(); + CHECK_FATAL(arrayType.IsArray(), "var should be array type"); + UniqueFEIRType elemType = arrayType.Clone(); + (void)elemType->ArrayDecrDim(); + PrimType elemPrimType = elemType->GetPrimType(); + return elemPrimType; +} + +MIRSymbol *FEIRStmtJavaFillArrayData::ProcessArrayElemData(const MIRBuilder &mirBuilder, PrimType elemPrimType) const { + // specify size for const array + uint32 sizeIn = size; + MIRType *arrayTypeWithSize = GlobalTables::GetTypeTable().GetOrCreateArrayType( + *GlobalTables::GetTypeTable().GetPrimType(elemPrimType), 1, &sizeIn); + MIRSymbol *arrayVar = mirBuilder.GetOrCreateGlobalDecl(arrayName, *arrayTypeWithSize); + arrayVar->SetAttr(ATTR_readonly); + arrayVar->SetStorageClass(kScFstatic); + MIRAggConst *val = FillArrayElem(mirBuilder, elemPrimType, *arrayTypeWithSize, size); + arrayVar->SetKonst(val); + return arrayVar; +} + +MIRAggConst *FEIRStmtJavaFillArrayData::FillArrayElem(const MIRBuilder &mirBuilder, PrimType elemPrimType, + MIRType &arrayTypeWithSize, uint32 elemSize) const { + MemPool *mp = mirBuilder.GetMirModule().GetMemPool(); + MIRModule &module = mirBuilder.GetMirModule(); + MIRAggConst *val = module.GetMemPool()->New(module, arrayTypeWithSize); + MIRConst *cst = nullptr; + for (uint32 i = 0; i < elemSize; ++i) { + MIRType &elemType = *GlobalTables::GetTypeTable().GetPrimType(elemPrimType); + switch (elemPrimType) { + case PTY_u1: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + case PTY_i8: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + case PTY_u8: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + case PTY_i16: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + case PTY_u16: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + case PTY_i32: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + case PTY_u32: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + case PTY_i64: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + case PTY_u64: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + case PTY_f32: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + case PTY_f64: + cst = mp->New((reinterpret_cast(arrayData))[i], elemType); + break; + default: + CHECK_FATAL(false, "Unsupported primitive type"); + break; + } + val->PushBack(cst); + } + return val; +} + +// ---------- FEIRStmtJavaMultiANewArray ---------- +UniqueFEIRVar FEIRStmtJavaMultiANewArray::varSize = nullptr; +UniqueFEIRVar FEIRStmtJavaMultiANewArray::varClass = nullptr; +UniqueFEIRType FEIRStmtJavaMultiANewArray::typeAnnotation = nullptr; +FEStructMethodInfo *FEIRStmtJavaMultiANewArray::methodInfoNewInstance = nullptr; + +FEIRStmtJavaMultiANewArray::FEIRStmtJavaMultiANewArray(std::unique_ptr argVar, + std::unique_ptr argElemType, + std::unique_ptr argArrayType) + : FEIRStmtAssign(FEIRNodeKind::kStmtJavaMultiANewArray, std::move(argVar)), + elemType(std::move(argElemType)), + arrayType(std::move(argArrayType)) {} + +void FEIRStmtJavaMultiANewArray::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + for (const UniqueFEIRExpr &expr : exprSizes) { + expr->RegisterDFGNodes2CheckPoint(checkPoint); + } + var->SetDef(true); + checkPoint.RegisterDFGNode(var); +} + +bool FEIRStmtJavaMultiANewArray::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + bool success = true; + for (const UniqueFEIRExpr &expr : exprSizes) { + success = success && expr->CalculateDefs4AllUses(checkPoint, udChain); + } + return success; +} + +void FEIRStmtJavaMultiANewArray::AddVarSize(std::unique_ptr argVarSize) { + argVarSize->SetType(FETypeManager::kPrimFEIRTypeI32->Clone()); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(argVarSize)); + exprSizes.push_back(std::move(expr)); +} + +void FEIRStmtJavaMultiANewArray::AddVarSizeRev(std::unique_ptr argVarSize) { + argVarSize->SetType(FETypeManager::kPrimFEIRTypeI32->Clone()); + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(argVarSize)); + exprSizes.push_front(std::move(expr)); +} + +std::list FEIRStmtJavaMultiANewArray::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + // size array fill + MapleVector argsSizeArrayFill(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const UniqueFEIRExpr &expr : exprSizes) { + BaseNode *node = expr->GenMIRNode(mirBuilder); + argsSizeArrayFill.push_back(node); + } + MIRSymbol *symSize = GetVarSize()->GenerateLocalMIRSymbol(mirBuilder); + StmtNode *stmtSizeArrayFill = mirBuilder.CreateStmtIntrinsicCallAssigned(INTRN_JAVA_FILL_NEW_ARRAY, + std::move(argsSizeArrayFill), symSize, TyIdx(PTY_i32)); + ans.push_back(stmtSizeArrayFill); + // const class + FEIRStmtJavaConstClass feStmtConstClass(GetVarClass()->Clone(), elemType->Clone()); + std::list stmtsConstClass = feStmtConstClass.GenMIRStmts(mirBuilder); + (void)ans.insert(ans.cend(), stmtsConstClass.cbegin(), stmtsConstClass.cend()); + // invoke newInstance + UniqueFEIRVar varRetCall = var->Clone(); + varRetCall->SetType(FETypeManager::kFEIRTypeJavaObject->Clone()); + FEIRStmtCallAssign feStmtCall(GetMethodInfoNewInstance(), OP_callassigned, varRetCall->Clone(), true); + feStmtCall.AddExprArg(FEIRBuilder::CreateExprDRead(GetVarClass()->Clone())); + feStmtCall.AddExprArg(FEIRBuilder::CreateExprDRead(GetVarSize()->Clone())); + std::list stmtsCall = feStmtCall.GenMIRStmts(mirBuilder); + (void)ans.insert(ans.end(), stmtsCall.begin(), stmtsCall.end()); + // check cast + var->SetType(arrayType->Clone()); + UniqueFEIRExpr expr = std::make_unique(std::move(varRetCall)); + FEIRStmtJavaTypeCheck feStmtCheck(var->Clone(), std::move(expr), arrayType->Clone(), + FEIRStmtJavaTypeCheck::kCheckCast); + std::list stmtsCheck = feStmtCheck.GenMIRStmts(mirBuilder); + (void)ans.insert(ans.cend(), stmtsCheck.cbegin(), stmtsCheck.cend()); + return ans; +} + +const UniqueFEIRVar &FEIRStmtJavaMultiANewArray::GetVarSize() { + if (varSize != nullptr) { + return varSize; + } + HIR2MPL_PARALLEL_FORBIDDEN(); + GStrIdx varNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("tmpsize"); + UniqueFEIRType varSizeType = FETypeManager::kPrimFEIRTypeI32->Clone(); + (void)varSizeType->ArrayIncrDim(); + varSize = std::make_unique(varNameIdx, std::move(varSizeType), true); + return varSize; +} + +const UniqueFEIRVar &FEIRStmtJavaMultiANewArray::GetVarClass() { + if (varClass != nullptr) { + return varClass; + } + HIR2MPL_PARALLEL_FORBIDDEN(); + GStrIdx varNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("tmpclass"); + varClass = std::make_unique(varNameIdx, FETypeManager::kFEIRTypeJavaClass->Clone(), true); + return varClass; +} + +const UniqueFEIRType &FEIRStmtJavaMultiANewArray::GetTypeAnnotation() { + if (typeAnnotation != nullptr) { + return typeAnnotation; + } + HIR2MPL_PARALLEL_FORBIDDEN(); + typeAnnotation = std::make_unique(PTY_ref); + static_cast(typeAnnotation.get())->LoadFromJavaTypeName("Ljava/lang/annotation/Annotation;", false); + return typeAnnotation; +} + +FEStructMethodInfo &FEIRStmtJavaMultiANewArray::GetMethodInfoNewInstance() { + if (methodInfoNewInstance != nullptr) { + return *methodInfoNewInstance; + } + StructElemNameIdx structElemNameIdx(FEUtilJava::GetMultiANewArrayClassIdx(), + FEUtilJava::GetMultiANewArrayElemIdx(), + FEUtilJava::GetMultiANewArrayTypeIdx(), + FEUtilJava::GetMultiANewArrayFullIdx()); + methodInfoNewInstance = static_cast(FEManager::GetTypeManager().RegisterStructMethodInfo( + structElemNameIdx, kSrcLangJava, true)); + return *methodInfoNewInstance; +} + +std::string FEIRStmtJavaMultiANewArray::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtUseOnly ---------- +FEIRStmtUseOnly::FEIRStmtUseOnly(FEIRNodeKind argKind, Opcode argOp, std::unique_ptr argExpr) + : FEIRStmt(argKind), + op(argOp) { + if (argExpr != nullptr) { + expr = std::move(argExpr); + } +} + +FEIRStmtUseOnly::FEIRStmtUseOnly(Opcode argOp, std::unique_ptr argExpr) + : FEIRStmtUseOnly(FEIRNodeKind::kStmtUseOnly, argOp, std::move(argExpr)) {} + +void FEIRStmtUseOnly::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + if (expr != nullptr) { + expr->RegisterDFGNodes2CheckPoint(checkPoint); + } +} + +bool FEIRStmtUseOnly::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + if (expr != nullptr) { + return expr->CalculateDefs4AllUses(checkPoint, udChain); + } + return true; +} + +std::list FEIRStmtUseOnly::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + ASSERT_NOT_NULL(expr); + if (SkipNonnullChecking(mirBuilder)) { + return ans; + } + BaseNode *srcNode = expr->GenMIRNode(mirBuilder); + StmtNode *mirStmt = mirBuilder.CreateStmtNary(op, srcNode); + ans.push_back(mirStmt); + return ans; +} + +bool FEIRStmtUseOnly::SkipNonnullChecking(MIRBuilder &mirBuilder) const { + if (!kOpcodeInfo.IsAssertNonnull(op)) { + return false; + } + return ENCChecker::HasNonnullAttrInExpr(mirBuilder, expr); +} + +std::string FEIRStmtUseOnly::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + if (expr != nullptr) { + ss << expr->DumpDotString(); + } + return ss.str(); +} + +std::list FEIRStmtAssertNonnull::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + ASSERT_NOT_NULL(expr); + if (SkipNonnullChecking(mirBuilder)) { + return ans; + } + BaseNode *srcNode = expr->GenMIRNode(mirBuilder); + StmtNode *mirStmt = mirBuilder.CreateStmtAssertNonnull(op, srcNode, mirBuilder.GetCurrentFunction()->GetNameStrIdx()); + ans.push_back(mirStmt); + return ans; +} + +// ---------- FEIRStmtCallAssertNonnull ---------- +std::list FEIRStmtCallAssertNonnull::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + ASSERT_NOT_NULL(expr); + if (SkipNonnullChecking(mirBuilder)) { + return ans; + } + BaseNode *srcNode = expr->GenMIRNode(mirBuilder); + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(GetFuncName()); + StmtNode *mirStmt = mirBuilder.CreateStmtCallAssertNonnull(op, srcNode, stridx, GetParamIndex(), + mirBuilder.GetCurrentFunction()->GetNameStrIdx()); + ans.push_back(mirStmt); + return ans; +} + +// ---------- FEIRStmtCallAssertBoundary ---------- +std::list FEIRStmtCallAssertBoundary::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + StmtNode *stmt = nullptr; + auto args = ENCChecker::ReplaceBoundaryChecking(mirBuilder, this); + if (args.size() > 0) { + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(GetFuncName()); + stmt = mirBuilder.CreateStmtCallAssertBoundary(op, std::move(args), stridx, GetParamIndex(), + mirBuilder.GetCurrentFunction()->GetNameStrIdx()); + } + if (stmt != nullptr) { + stmts.emplace_back(stmt); + } + return stmts; +} + +// ---------- FEIRStmtAssertBoundary ---------- +std::list FEIRStmtAssertBoundary::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + StmtNode *stmt = nullptr; + auto args = ENCChecker::ReplaceBoundaryChecking(mirBuilder, this); + if (args.size() > 0) { + stmt = mirBuilder.CreateStmtAssertBoundary(op, std::move(args), mirBuilder.GetCurrentFunction()->GetNameStrIdx()); + } + if (stmt != nullptr) { + stmts.emplace_back(stmt); + } + return stmts; +} + +// ---------- FEIRStmtReturn ---------- +FEIRStmtReturn::FEIRStmtReturn(std::unique_ptr argExpr) + : FEIRStmtUseOnly(FEIRNodeKind::kStmtReturn, OP_return, std::move(argExpr)) { + isFallThru = true; + } + +std::list FEIRStmtReturn::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + StmtNode *mirStmt = nullptr; + if (expr == nullptr) { + mirStmt = mirBuilder.CreateStmtReturn(nullptr); + } else { + BaseNode *srcNode = expr->GenMIRNode(mirBuilder); + if (mirBuilder.GetCurrentFunction()->IsFirstArgReturn()) { + MIRSymbol *firstArgRetSym = mirBuilder.GetCurrentFunction()->GetFormal(0); + BaseNode *addrNode = mirBuilder.CreateDread(*firstArgRetSym, PTY_ptr); + StmtNode *iNode = mirBuilder.CreateStmtIassign(*firstArgRetSym->GetType(), 0, addrNode, srcNode); + ans.emplace_back(iNode); + mirStmt = mirBuilder.CreateStmtReturn(nullptr); + } else { + InsertNonnullChecking(mirBuilder, ans); + ENCChecker::InsertBoundaryAssignChecking(mirBuilder, ans, expr, loc); + ENCChecker::CheckBoundaryLenFinalAddr(mirBuilder, expr, loc); + mirStmt = mirBuilder.CreateStmtReturn(srcNode); + } + } + ans.emplace_back(mirStmt); + return ans; +} + +void FEIRStmtReturn::InsertNonnullChecking(MIRBuilder &mirBuilder, std::list &ans) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || expr == nullptr) { + return; + } + MIRType *srcType = expr->GetType()->GenerateMIRTypeAuto(); + MIRType *dstType = mirBuilder.GetCurrentFunction()->GetReturnType(); + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstType, loc); + if (!mirBuilder.GetCurrentFunction()->GetAttrs().GetAttr(FUNCATTR_nonnull)) { + return; + } + if (ENCChecker::HasNullExpr(expr)) { + FE_ERR(kLncErr, loc, "%s return nonnull but got null pointer", mirBuilder.GetCurrentFunction()->GetName().c_str()); + return; + } + if (expr->GetPrimType() == PTY_ptr) { + UniqueFEIRStmt stmt = std::make_unique(OP_returnassertnonnull, expr->Clone()); + stmt->SetSrcLoc(expr->GetLoc()); + std::list stmts = stmt->GenMIRStmts(mirBuilder); + ans.splice(ans.end(), stmts); + } +} + +// ---------- FEIRStmtGoto ---------- +FEIRStmtGoto::FEIRStmtGoto(uint32 argLabelIdx) + : FEIRStmt(FEIRNodeKind::kStmtGoto), + labelIdx(argLabelIdx), + stmtTarget(nullptr) {} + +FEIRStmtGoto::~FEIRStmtGoto() { + stmtTarget = nullptr; +} + +std::list FEIRStmtGoto::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + CHECK_NULL_FATAL(stmtTarget); + GotoNode *gotoNode = mirBuilder.CreateStmtGoto(OP_goto, stmtTarget->GetMIRLabelIdx()); + ans.push_back(gotoNode); + return ans; +} + +std::string FEIRStmtGoto::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtGoto2 ---------- +FEIRStmtGoto2::FEIRStmtGoto2(uint32 qIdx0, uint32 qIdx1) + : FEIRStmt(FEIRNodeKind::kStmtGoto), labelIdxOuter(qIdx0), labelIdxInner(qIdx1) {} + +std::list FEIRStmtGoto2::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + GotoNode *gotoNode = mirBuilder.CreateStmtGoto( + OP_goto, FEIRStmtPesudoLabel2::GenMirLabelIdx(mirBuilder, labelIdxOuter, labelIdxInner)); + stmts.push_back(gotoNode); + return stmts; +} + +std::pair FEIRStmtGoto2::GetLabelIdx() const { + return std::make_pair(labelIdxOuter, labelIdxInner); +} + +FEIRStmtGotoForC::FEIRStmtGotoForC(const std::string &name) + : FEIRStmt(FEIRNodeKind::kStmtGoto), + labelName(name) {} + +std::list FEIRStmtGotoForC::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + GenVLACleanup(mirBuilder, ans); + LabelIdx label = mirBuilder.GetOrCreateMIRLabel(labelName); + GotoNode *gotoNode = mirBuilder.CreateStmtGoto(OP_goto, label); + ans.push_back(gotoNode); + return ans; +} + +void FEIRStmtGotoForC::GenVLACleanup(MIRBuilder &mirBuilder, std::list &ans) const { + if (vlaSvaedStackVars.empty()) { + return; + } + const auto &labelWithScopes = FEManager::GetCurrentFEFunction().GetLabelWithScopes(); + const auto &iter = labelWithScopes.find(labelName); + if (iter == labelWithScopes.cend()) { + return; + } + const std::map &scopes = iter->second; + for (const auto &pair : vlaSvaedStackVars) { + // The current scope is the same, and the parent scope is also the same between goto stmt and label stmt + const auto &pIter = scopes.find(pair.first); + if (pIter != scopes.cend()) { + CHECK_NULL_FATAL(pIter->second); + const UniqueFEIRVar &vlaStackVar = pIter->second->GetVLASavedStackVar(); + if (vlaStackVar != nullptr) { + break; + } + } + // emit vla stack restore stmt + auto vlaStmt = FEIRBuilder::CreateVLAStackRestore(pair.second->Clone())->GenMIRStmts(mirBuilder); + (void)ans.insert(ans.cend(), vlaStmt.cbegin(), vlaStmt.cend()); + } +} + +std::string FEIRStmtGotoForC::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtIGoto ---------- +FEIRStmtIGoto::FEIRStmtIGoto(UniqueFEIRExpr expr) : FEIRStmt(kStmtIGoto), targetExpr(std::move(expr)) {} + +std::list FEIRStmtIGoto::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + stmts.emplace_back(mirBuilder.CreateStmtUnary(OP_igoto, targetExpr->GenMIRNode(mirBuilder))); + return stmts; +} + +// ---------- FEIRStmtCondGotoForC ---------- +std::list FEIRStmtCondGotoForC::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + LabelIdx labelID = mirBuilder.GetOrCreateMIRLabel(labelName); + BaseNode *condNode = expr->GenMIRNode(mirBuilder); + CondGotoNode *gotoNode = mirBuilder.CreateStmtCondGoto(condNode, opCode, labelID); + ans.push_back(gotoNode); + return ans; +} + +std::string FEIRStmtCondGotoForC::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtCondGoto ---------- +FEIRStmtCondGoto::FEIRStmtCondGoto(Opcode argOp, uint32 argLabelIdx, UniqueFEIRExpr argExpr) + : FEIRStmtGoto(argLabelIdx), + op(argOp) { + kind = FEIRNodeKind::kStmtCondGoto; + SetExpr(std::move(argExpr)); +} + +void FEIRStmtCondGoto::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + expr->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRStmtCondGoto::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return expr->CalculateDefs4AllUses(checkPoint, udChain); +} + +std::list FEIRStmtCondGoto::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + BaseNode *condNode = expr->GenMIRNode(mirBuilder); + CHECK_NULL_FATAL(stmtTarget); + CondGotoNode *gotoNode = mirBuilder.CreateStmtCondGoto(condNode, op, stmtTarget->GetMIRLabelIdx()); + ans.push_back(gotoNode); + return ans; +} + +std::string FEIRStmtCondGoto::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtCondGoto2 ---------- +FEIRStmtCondGoto2::FEIRStmtCondGoto2(Opcode argOp, uint32 qIdx0, uint32 qIdx1, UniqueFEIRExpr argExpr) + : FEIRStmtGoto2(qIdx0, qIdx1), op(argOp), expr(std::move(argExpr)) {} + +std::list FEIRStmtCondGoto2::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + BaseNode *condNode = expr->GenMIRNode(mirBuilder); + CondGotoNode *gotoNode = mirBuilder.CreateStmtCondGoto( + condNode, op, FEIRStmtPesudoLabel2::GenMirLabelIdx(mirBuilder, labelIdxOuter, labelIdxInner)); + stmts.push_back(gotoNode); + return stmts; +} + +// ---------- FEIRStmtSwitch ---------- +FEIRStmtSwitch::FEIRStmtSwitch(UniqueFEIRExpr argExpr) + : FEIRStmt(FEIRNodeKind::kStmtSwitch), + defaultLabelIdx(0), + defaultTarget(nullptr) { + SetExpr(std::move(argExpr)); +} + +FEIRStmtSwitch::~FEIRStmtSwitch() { + defaultTarget = nullptr; +} + +void FEIRStmtSwitch::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + expr->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRStmtSwitch::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return expr->CalculateDefs4AllUses(checkPoint, udChain); +} + +bool FEIRStmtSwitch::IsFallThroughImpl() const { + WARN(kLncWarn, "%s:%d stmt[%s] need to be lowed when building bb", + FEManager::GetModule().GetFileNameFromFileNum(loc.fileIdx).c_str(), loc.line, + GetFEIRNodeKindDescription(kind).c_str()); + return false; +} + +std::list FEIRStmtSwitch::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + CaseVector switchTable(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const std::pair &targetPair : mapValueTargets) { + CHECK_NULL_FATAL(targetPair.second); + switchTable.push_back(std::make_pair(targetPair.first, targetPair.second->GetMIRLabelIdx())); + } + BaseNode *exprNode = expr->GenMIRNode(mirBuilder); + CHECK_NULL_FATAL(defaultTarget); + SwitchNode *switchNode = mirBuilder.CreateStmtSwitch(exprNode, defaultTarget->GetMIRLabelIdx(), switchTable); + ans.push_back(switchNode); + return ans; +} + +std::string FEIRStmtSwitch::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtSwitch2 ---------- +FEIRStmtSwitch2::FEIRStmtSwitch2(uint32 outerIdxIn, UniqueFEIRExpr argExpr) + : FEIRStmt(FEIRNodeKind::kStmtSwitch), + outerIdx(outerIdxIn), + defaultLabelIdx(0), + defaultTarget(nullptr) { + SetExpr(std::move(argExpr)); +} + +FEIRStmtSwitch2::~FEIRStmtSwitch2() { + defaultTarget = nullptr; +} + +bool FEIRStmtSwitch2::IsFallThroughImpl() const { + WARN(kLncWarn, "%s:%d stmt[%s] need to be lowed when building bb", + FEManager::GetModule().GetFileNameFromFileNum(loc.fileIdx).c_str(), loc.line, + GetFEIRNodeKindDescription(kind).c_str()); + return false; +} + +std::list FEIRStmtSwitch2::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + CaseVector switchTable(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const std::pair &valueLabelPair : mapValueLabelIdx) { + switchTable.emplace_back(valueLabelPair.first, + FEIRStmtPesudoLabel2::GenMirLabelIdx(mirBuilder, outerIdx, valueLabelPair.second)); + } + BaseNode *exprNode = expr->GenMIRNode(mirBuilder); + SwitchNode *switchNode = mirBuilder.CreateStmtSwitch(exprNode, + FEIRStmtPesudoLabel2::GenMirLabelIdx(mirBuilder, outerIdx, defaultLabelIdx), switchTable); + ans.push_back(switchNode); + return ans; +} + +std::string FEIRStmtSwitch2::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtIf ---------- +FEIRStmtIf::FEIRStmtIf(UniqueFEIRExpr argCondExpr, std::list &argThenStmts) + : FEIRStmt(FEIRNodeKind::kStmtIf) { + SetCondExpr(std::move(argCondExpr)); + hasElse = false; + SetThenStmts(argThenStmts); +} + +FEIRStmtIf::FEIRStmtIf(UniqueFEIRExpr argCondExpr, + std::list &argThenStmts, + std::list &argElseStmts) + : FEIRStmt(FEIRNodeKind::kStmtIf) { + SetCondExpr(std::move(argCondExpr)); + SetThenStmts(argThenStmts); + if (argElseStmts.empty()) { + hasElse = false; + } else { + hasElse = true; + SetElseStmts(argElseStmts); + } +} + +bool FEIRStmtIf::IsFallThroughImpl() const { + WARN(kLncWarn, "%s:%d stmt[%s] need to be lowed when building bb", + FEManager::GetModule().GetFileNameFromFileNum(loc.fileIdx).c_str(), loc.line, + GetFEIRNodeKindDescription(kind).c_str()); + return false; +} + +std::list FEIRStmtIf::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + BaseNode *condBase = condExpr->GenMIRNode(mirBuilder); + IfStmtNode *stmt = nullptr; + if (hasElse) { + stmt = mirBuilder.CreateStmtIfThenElse(condBase); + } else { + stmt = mirBuilder.CreateStmtIf(condBase); + } + for (const auto &thenStmt : thenStmts) { + for (auto thenNode : thenStmt->GenMIRStmts(mirBuilder)) { + stmt->GetThenPart()->AddStatement(thenNode); + } + } + if (hasElse) { + for (const auto &elseStmt : elseStmts) { + for (auto elseNode : elseStmt->GenMIRStmts(mirBuilder)) { + stmt->GetElsePart()->AddStatement(elseNode); + } + } + } + return std::list({ stmt }); +} + +std::string FEIRStmtIf::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtSwitchForC ---------- +FEIRStmtSwitchForC::FEIRStmtSwitchForC(UniqueFEIRExpr argCondExpr, bool argHasDefault) + : FEIRStmt(FEIRNodeKind::kStmtSwitch), + expr(std::move(argCondExpr)), + hasDefault(argHasDefault) {} + +void FEIRStmtSwitchForC::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + expr->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRStmtSwitchForC::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return expr->CalculateDefs4AllUses(checkPoint, udChain); +} + +std::list FEIRStmtSwitchForC::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + MIRModule &module = mirBuilder.GetMirModule(); + CaseVector *caseVec = module.CurFuncCodeMemPool()->New(module.CurFuncCodeMemPoolAllocator()->Adapter()); + std::string endName = AstSwitchUtil::Instance().CreateEndOrExitLabelName(); + LabelIdx swDefaultLabel = mirBuilder.GetOrCreateMIRLabel(endName); // end label + AstSwitchUtil::Instance().PushNestedCaseVectors(std::pair(caseVec, swDefaultLabel)); + BaseNode *exprNode = expr->GenMIRNode(mirBuilder); + BlockNode *tempBlock = module.CurFuncCodeMemPool()->New(); + CaseVector &switchTable = *AstSwitchUtil::Instance().GetTopOfNestedCaseVectors().first; + for (auto &sub : subStmts) { + for (auto mirStmt : sub->GenMIRStmts(mirBuilder)) { + tempBlock->AddStatement(mirStmt); + } + } + SwitchNode *mirSwitchStmt = mirBuilder.CreateStmtSwitch(exprNode, swDefaultLabel, switchTable); + ans.push_back(mirSwitchStmt); + for (auto &it : tempBlock->GetStmtNodes()) { + ans.emplace_back(&it); + } + if (!hasDefault) { + LabelIdx endLab = mirBuilder.GetOrCreateMIRLabel(endName); + StmtNode *mirSwExitLabelStmt = mirBuilder.CreateStmtLabel(endLab); + ans.push_back(mirSwExitLabelStmt); + } + LabelIdx exitLab = mirBuilder.GetOrCreateMIRLabel(breakLabelName); + StmtNode *mirSwExitLabelStmt = mirBuilder.CreateStmtLabel(exitLab); + ans.push_back(mirSwExitLabelStmt); + AstSwitchUtil::Instance().PopNestedCaseVectors(); + return ans; +} + +std::string FEIRStmtSwitchForC::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +FEIRStmtCaseForC::FEIRStmtCaseForC(int64 label) + : FEIRStmt(FEIRNodeKind::kStmtCaseForC), + lCaseLabel(label) {} + +void FEIRStmtCaseForC::AddCaseTag2CaseVec(int64 lCaseTag, int64 rCaseTag) { + auto pLabel = std::make_unique(lCaseLabel); + for (int64 csTag = lCaseTag; csTag <= rCaseTag; ++csTag) { + pesudoLabelMap.insert(std::make_pair(csTag, std::move(pLabel))); + } +} + +std::list FEIRStmtCaseForC::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + CaseVector &caseVec = *AstSwitchUtil::Instance().GetTopOfNestedCaseVectors().first; + StmtNode *mirLabelStmt = nullptr; + for (auto &targetPair : GetPesudoLabelMap()) { + targetPair.second->GenerateLabelIdx(mirBuilder); + caseVec.push_back(std::make_pair(targetPair.first, targetPair.second->GetMIRLabelIdx())); + } + for (auto it : caseVec) { + if (lCaseLabel == it.first) { + mirLabelStmt = mirBuilder.CreateStmtLabel(it.second); + ans.emplace_back(mirLabelStmt); + } + } + for (auto &sub : subStmts) { + ans.splice(ans.end(), sub.get()->GenMIRStmts(mirBuilder)); + } + return ans; +} + +std::string FEIRStmtCaseForC::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +FEIRStmtDefaultForC::FEIRStmtDefaultForC() + : FEIRStmt(FEIRNodeKind::kStmtDefaultForC) {} + +std::list FEIRStmtDefaultForC::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + StmtNode *mirLabelStmt = mirBuilder.CreateStmtLabel(AstSwitchUtil::Instance().GetTopOfNestedCaseVectors().second); + ans.emplace_back(mirLabelStmt); + for (auto &sub : subStmts) { + ans.splice(ans.end(), sub.get()->GenMIRStmts(mirBuilder)); + } + return ans; +} + +std::string FEIRStmtDefaultForC::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtArrayStore ---------- +FEIRStmtArrayStore::FEIRStmtArrayStore(UniqueFEIRExpr argExprElem, UniqueFEIRExpr argExprArray, + UniqueFEIRExpr argExprIndex, UniqueFEIRType argTypeArray) + : FEIRStmt(FEIRNodeKind::kStmtArrayStore), + exprElem(std::move(argExprElem)), + exprArray(std::move(argExprArray)), + exprIndex(std::move(argExprIndex)), + typeArray(std::move(argTypeArray)) {} + +FEIRStmtArrayStore::FEIRStmtArrayStore(UniqueFEIRExpr argExprElem, UniqueFEIRExpr argExprArray, + UniqueFEIRExpr argExprIndex, UniqueFEIRType argTypeArray, + const std::string &argArrayName) + : FEIRStmt(FEIRNodeKind::kStmtArrayStore), + exprElem(std::move(argExprElem)), + exprArray(std::move(argExprArray)), + exprIndex(std::move(argExprIndex)), + typeArray(std::move(argTypeArray)), + arrayName(argArrayName) {} + +FEIRStmtArrayStore::FEIRStmtArrayStore(UniqueFEIRExpr argExprElem, UniqueFEIRExpr argExprArray, + UniqueFEIRExpr argExprIndex, UniqueFEIRType argTypeArray, + UniqueFEIRType argTypeElem, const std::string &argArrayName) + : FEIRStmt(FEIRNodeKind::kStmtArrayStore), + exprElem(std::move(argExprElem)), + exprArray(std::move(argExprArray)), + exprIndex(std::move(argExprIndex)), + typeArray(std::move(argTypeArray)), + typeElem(std::move(argTypeElem)), + arrayName(argArrayName) {} + +FEIRStmtArrayStore::FEIRStmtArrayStore(UniqueFEIRExpr argExprElem, UniqueFEIRExpr argExprArray, + std::list &argExprIndexs, UniqueFEIRType argTypeArray, + const std::string &argArrayName) + : FEIRStmt(FEIRNodeKind::kStmtArrayStore), + exprElem(std::move(argExprElem)), + exprArray(std::move(argExprArray)), + typeArray(std::move(argTypeArray)), + arrayName(argArrayName) { + SetIndexsExprs(argExprIndexs); +} + +FEIRStmtArrayStore::FEIRStmtArrayStore(UniqueFEIRExpr argExprElem, UniqueFEIRExpr argExprArray, + std::list &argExprIndexs, UniqueFEIRType argTypeArray, + UniqueFEIRExpr argExprStruct, UniqueFEIRType argTypeStruct, + const std::string &argArrayName) + : FEIRStmt(FEIRNodeKind::kStmtArrayStore), + exprElem(std::move(argExprElem)), + exprArray(std::move(argExprArray)), + typeArray(std::move(argTypeArray)), + exprStruct(std::move(argExprStruct)), + typeStruct(std::move(argTypeStruct)), + arrayName(argArrayName) { + SetIndexsExprs(argExprIndexs); +} + +void FEIRStmtArrayStore::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + exprArray->RegisterDFGNodes2CheckPoint(checkPoint); + exprIndex->RegisterDFGNodes2CheckPoint(checkPoint); + exprElem->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRStmtArrayStore::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + bool success = true; + success = success && exprArray->CalculateDefs4AllUses(checkPoint, udChain); + success = success && exprIndex->CalculateDefs4AllUses(checkPoint, udChain); + success = success && exprElem->CalculateDefs4AllUses(checkPoint, udChain); + return success; +} + +void FEIRStmtArrayStore::InitTrans4AllVarsImpl() { + CHECK_FATAL(exprArray->GetKind() == kExprDRead, "only support dread expr for exprArray"); + CHECK_FATAL(exprIndex->GetKind() == kExprDRead, "only support dread expr for exprIndex"); + CHECK_FATAL(exprElem->GetKind() == kExprDRead, "only support dread expr for exprElem"); + FEIRExprDRead *exprArrayDRead = static_cast(exprArray.get()); + FEIRExprDRead *exprElemDRead = static_cast(exprElem.get()); + exprArrayDRead->SetTrans(exprElemDRead->CreateTransArrayDimIncr()); + exprElemDRead->SetTrans(exprArrayDRead->CreateTransArrayDimDecr()); +} + +std::list FEIRStmtArrayStore::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + CHECK_FATAL(((exprIndex == nullptr) && (exprIndexs.size() != 0))|| + (exprIndex->GetKind() == kExprDRead) || + (exprIndex->GetKind() == kExprConst), "only support dread/const expr for exprIndex"); + MIRType *ptrMIRArrayType = typeArray->GenerateMIRType(false); + BaseNode *arrayExpr = nullptr; + MIRType *mIRElemType = nullptr; + if (FEManager::GetModule().GetSrcLang() == kSrcLangC) { + GenMIRStmtsImplForCPart(mirBuilder, ptrMIRArrayType, &mIRElemType, &arrayExpr); + } else { + BaseNode *addrBase = exprArray->GenMIRNode(mirBuilder); + BaseNode *indexBn = exprIndex->GenMIRNode(mirBuilder); + arrayExpr = mirBuilder.CreateExprArray(*ptrMIRArrayType, addrBase, indexBn); + UniqueFEIRType typeArrayCloned = typeArray->Clone(); + if ((exprIndex->GetKind() != kExprConst) || (!FEOptions::GetInstance().IsAOT())) { + (void)typeArrayCloned->ArrayDecrDim(); + } + mIRElemType = typeArrayCloned->GenerateMIRType(true); + } + BaseNode *elemBn = exprElem->GenMIRNode(mirBuilder); + IassignNode *stmt = nullptr; + if ((FEManager::GetModule().GetSrcLang() == kSrcLangC) || + (exprIndex->GetKind() != kExprConst) || (!FEOptions::GetInstance().IsAOT())) { + MIRType *ptrMIRElemType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mIRElemType, PTY_ptr); + stmt = mirBuilder.CreateStmtIassign(*ptrMIRElemType, 0, arrayExpr, elemBn); + } else { + reinterpret_cast(arrayExpr)->SetBoundsCheck(false); + stmt = mirBuilder.CreateStmtIassign(*mIRElemType, 0, arrayExpr, elemBn); + } + return std::list({ stmt }); +} + +void FEIRStmtArrayStore::GenMIRStmtsImplForCPart(MIRBuilder &mirBuilder, MIRType *ptrMIRArrayType, + MIRType **mIRElemType, BaseNode **arrayExpr) const { + if (typeElem == nullptr) { + typeElem = std::make_unique(*static_cast(ptrMIRArrayType)->GetElemType()); + } + FieldID fieldID = 0; + // array in struct + if (typeStruct != nullptr) { + fieldID = static_cast(exprArray.get())->GetFieldID(); + MIRType *ptrMIRStructType = typeStruct->GenerateMIRType(false); + MIRStructType* mirStructType = static_cast(ptrMIRStructType); + // for no init, create the struct symbol + (void)mirBuilder.GetOrCreateLocalDecl(arrayName, *mirStructType); + } + + *mIRElemType = typeElem->GenerateMIRType(true); + BaseNode *arrayAddrOfExpr = nullptr; + MIRSymbol *mirSymbol = exprArray->GetVarUses().front()->GenerateMIRSymbol(mirBuilder); + auto mirtype = mirSymbol->GetType(); + if (mirtype->GetKind() == kTypePointer) { + arrayAddrOfExpr = exprArray->GenMIRNode(mirBuilder); + } else { + arrayAddrOfExpr = mirBuilder.CreateExprAddrof(fieldID, *mirSymbol); + } + if (exprIndex == nullptr) { + std::vector nds; + nds.push_back(arrayAddrOfExpr); + for (auto &e : exprIndexs) { + BaseNode *no = e->GenMIRNode(mirBuilder); + nds.push_back(no); + } + *arrayExpr = mirBuilder.CreateExprArray(*ptrMIRArrayType, nds); + } else { + BaseNode *indexBn = exprIndex->GenMIRNode(mirBuilder); + *arrayExpr = mirBuilder.CreateExprArray(*ptrMIRArrayType, arrayAddrOfExpr, indexBn); + } +} + +std::string FEIRStmtArrayStore::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + ss << " def : {"; + ss << " exprArray : " << exprArray->DumpDotString(); + ss << " exprIndex : " << exprIndex->DumpDotString(); + ss << " exprElem kind : " << GetFEIRNodeKindDescription(exprElem->GetKind()) << " " << exprElem->DumpDotString(); + ss << " } "; + return ss.str(); +} + +// ---------- FEIRStmtFieldStore ---------- +FEIRStmtFieldStore::FEIRStmtFieldStore(UniqueFEIRVar argVarObj, UniqueFEIRVar argVarField, + FEStructFieldInfo &argFieldInfo, bool argIsStatic) + : FEIRStmt(FEIRNodeKind::kStmtFieldStore), + varObj(std::move(argVarObj)), + varField(std::move(argVarField)), + fieldInfo(argFieldInfo), + isStatic(argIsStatic) {} +FEIRStmtFieldStore::FEIRStmtFieldStore(UniqueFEIRVar argVarObj, UniqueFEIRVar argVarField, + FEStructFieldInfo &argFieldInfo, bool argIsStatic, int32 argDexFileHashCode) + : FEIRStmt(FEIRNodeKind::kStmtFieldStore), + varObj(std::move(argVarObj)), + varField(std::move(argVarField)), + fieldInfo(argFieldInfo), + isStatic(argIsStatic), + dexFileHashCode(argDexFileHashCode) {} + +void FEIRStmtFieldStore::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + if (isStatic) { + RegisterDFGNodes2CheckPointForStatic(checkPoint); + } else { + RegisterDFGNodes2CheckPointForNonStatic(checkPoint); + } +} + +void FEIRStmtFieldStore::RegisterDFGNodes2CheckPointForStatic(FEIRStmtCheckPoint &checkPoint) { + checkPoint.RegisterDFGNode(varField); +} + +void FEIRStmtFieldStore::RegisterDFGNodes2CheckPointForNonStatic(FEIRStmtCheckPoint &checkPoint) { + checkPoint.RegisterDFGNode(varObj); + checkPoint.RegisterDFGNode(varField); +} + +bool FEIRStmtFieldStore::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + if (isStatic) { + return CalculateDefs4AllUsesForStatic(checkPoint, udChain); + } else { + return CalculateDefs4AllUsesForNonStatic(checkPoint, udChain); + } +} + +bool FEIRStmtFieldStore::CalculateDefs4AllUsesForStatic(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + std::set &defs4VarField = checkPoint.CalcuDef(varField); + (void)udChain.insert(std::make_pair(&varField, defs4VarField)); + return (defs4VarField.size() > 0); +} + +bool FEIRStmtFieldStore::CalculateDefs4AllUsesForNonStatic(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + std::set &defs4VarObj = checkPoint.CalcuDef(varObj); + (void)udChain.insert(std::make_pair(&varObj, defs4VarObj)); + std::set &defs4VarField = checkPoint.CalcuDef(varField); + (void)udChain.insert(std::make_pair(&varField, defs4VarField)); + return ((defs4VarObj.size() > 0) && (defs4VarField.size() > 0)); +} + +std::list FEIRStmtFieldStore::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + // prepare and find root + fieldInfo.Prepare(mirBuilder, isStatic); + if (isStatic) { + return GenMIRStmtsImplForStatic(mirBuilder); + } else { + return GenMIRStmtsImplForNonStatic(mirBuilder); + } +} + +bool FEIRStmtFieldStore::NeedMCCForStatic(uint32 &typeID) const { + // check type first + const std::string &actualContainerName = fieldInfo.GetActualContainerName(); + typeID = FEManager::GetTypeManager().GetTypeIDFromMplClassName(actualContainerName, dexFileHashCode); + if (typeID == UINT32_MAX) { + return true; + } + + // check type field second + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldInfo.GetFieldNameIdx()); + MIRStructType *currStructType = FEManager::GetTypeManager().GetStructTypeFromName(actualContainerName); + const auto &fields = currStructType->GetStaticFields(); + for (auto f : fields) { + const std::string &fieldNameIt = GlobalTables::GetStrTable().GetStringFromStrIdx(f.first); + if (fieldName.compare(fieldNameIt) == 0) { + return false; + } + } + return true; +} + +std::list FEIRStmtFieldStore::GenMIRStmtsImplForStatic(MIRBuilder &mirBuilder) const { + CHECK_FATAL(fieldInfo.GetFieldNameIdx() != 0, "invalid name idx"); + std::list mirStmts; + UniqueFEIRVar varTarget = std::make_unique(fieldInfo.GetFieldNameIdx(), fieldInfo.GetType()->Clone()); + varTarget->SetGlobal(true); + MIRSymbol *symSrc = varTarget->GenerateGlobalMIRSymbol(mirBuilder); + UniqueFEIRExpr exprDRead = FEIRBuilder::CreateExprDRead(varField->Clone()); + UniqueFEIRStmt stmtDAssign = FEIRBuilder::CreateStmtDAssign(std::move(varTarget), std::move(exprDRead)); + mirStmts = stmtDAssign->GenMIRStmts(mirBuilder); + if (!FEOptions::GetInstance().IsNoBarrier() && fieldInfo.IsVolatile()) { + StmtNode *barrier = mirBuilder.GetMirModule().CurFuncCodeMemPool()->New(OP_membarrelease); + mirStmts.emplace_front(barrier); + barrier = mirBuilder.GetMirModule().CurFuncCodeMemPool()->New(OP_membarstoreload); + mirStmts.emplace_back(barrier); + } + TyIdx containerTyIdx = fieldInfo.GetActualContainerType()->GenerateMIRType()->GetTypeIndex(); + if (!mirBuilder.GetCurrentFunction()->IsClinit() || + mirBuilder.GetCurrentFunction()->GetClassTyIdx() != containerTyIdx) { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + uint32 typeID = UINT32_MAX; + bool needMCCForStatic = false; + if (FEOptions::GetInstance().IsAOT()) { + needMCCForStatic = NeedMCCForStatic(typeID); + if (!needMCCForStatic) { + BaseNode *argNumExpr = mirBuilder.CreateIntConst(typeID, PTY_i32); + args.push_back(argNumExpr); + } else { + const auto &pt = fieldInfo.GetType()->GetPrimType(); + std::map primTypeFuncNameIdxMap; + FEUtils::InitPrimTypeFuncNameIdxMap(primTypeFuncNameIdxMap); + const auto &itorFunc = primTypeFuncNameIdxMap.find(pt); + CHECK_FATAL(itorFunc != primTypeFuncNameIdxMap.cend(), "java type not support %d", pt); + args.push_back(mirBuilder.CreateIntConst(fieldInfo.GetFieldID(), PTY_i32)); + BaseNode *nodeSrc = mirBuilder.CreateExprDread(*symSrc); + args.push_back(nodeSrc); + MIRSymbol *retVarSym = nullptr; + retVarSym = varField->GenerateLocalMIRSymbol(mirBuilder); + StmtNode *stmtMCC = mirBuilder.CreateStmtCallAssigned( + FEManager::GetTypeManager().GetMCCFunction(itorFunc->second)->GetPuidx(), MapleVector(args), + retVarSym, + OP_callassigned); + mirStmts.clear(); + mirStmts.emplace_front(stmtMCC); + } + } + if (!needMCCForStatic) { + StmtNode *stmtClinitCheck = mirBuilder.CreateStmtIntrinsicCall(INTRN_JAVA_CLINIT_CHECK, std::move(args), + containerTyIdx); + mirStmts.emplace_front(stmtClinitCheck); + } + } + return mirStmts; +} + +std::list FEIRStmtFieldStore::GenMIRStmtsImplForNonStatic(MIRBuilder &mirBuilder) const { + std::list ans; + FieldID fieldID = fieldInfo.GetFieldID(); + ASSERT(fieldID != 0, "invalid field ID"); + MIRStructType *structType = FEManager::GetTypeManager().GetStructTypeFromName(fieldInfo.GetStructName()); + CHECK_NULL_FATAL(structType); + MIRType *ptrStructType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*structType, PTY_ref); + UniqueFEIRExpr exprDReadObj = FEIRBuilder::CreateExprDRead(varObj->Clone()); + UniqueFEIRExpr exprDReadField = FEIRBuilder::CreateExprDRead(varField->Clone()); + BaseNode *nodeObj = exprDReadObj->GenMIRNode(mirBuilder); + BaseNode *nodeField = exprDReadField->GenMIRNode(mirBuilder); + StmtNode *stmt = mirBuilder.CreateStmtIassign(*ptrStructType, fieldID, nodeObj, nodeField); +#ifndef ONLY_C + if (FEOptions::GetInstance().IsRC()) { + bc::RCSetter::GetRCSetter().CollectInputStmtField(stmt, fieldInfo.GetElemNameIdx()); + } +#endif + ans.emplace_back(stmt); + if (!FEOptions::GetInstance().IsNoBarrier() && fieldInfo.IsVolatile()) { + StmtNode *barrier = mirBuilder.GetMirModule().CurFuncCodeMemPool()->New(OP_membarrelease); + ans.emplace_front(barrier); + barrier = mirBuilder.GetMirModule().CurFuncCodeMemPool()->New(OP_membarstoreload); + ans.emplace_back(barrier); + } + return ans; +} + +std::string FEIRStmtFieldStore::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtFieldLoad ---------- +FEIRStmtFieldLoad::FEIRStmtFieldLoad(UniqueFEIRVar argVarObj, UniqueFEIRVar argVarField, + FEStructFieldInfo &argFieldInfo, bool argIsStatic) + : FEIRStmtAssign(FEIRNodeKind::kStmtFieldLoad, std::move(argVarField)), + varObj(std::move(argVarObj)), + fieldInfo(argFieldInfo), + isStatic(argIsStatic) {} + +FEIRStmtFieldLoad::FEIRStmtFieldLoad(UniqueFEIRVar argVarObj, UniqueFEIRVar argVarField, + FEStructFieldInfo &argFieldInfo, + bool argIsStatic, int32 argDexFileHashCode) + : FEIRStmtAssign(FEIRNodeKind::kStmtFieldLoad, std::move(argVarField)), + varObj(std::move(argVarObj)), + fieldInfo(argFieldInfo), + isStatic(argIsStatic), + dexFileHashCode(argDexFileHashCode) {} + +void FEIRStmtFieldLoad::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + if (isStatic) { + RegisterDFGNodes2CheckPointForStatic(checkPoint); + } else { + RegisterDFGNodes2CheckPointForNonStatic(checkPoint); + } +} + +bool FEIRStmtFieldLoad::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + if (!isStatic) { + std::set &defs = checkPoint.CalcuDef(varObj); + (void)udChain.insert(std::make_pair(&varObj, defs)); + if (defs.size() == 0) { + return false; + } + } + return true; +} + +void FEIRStmtFieldLoad::RegisterDFGNodes2CheckPointForStatic(FEIRStmtCheckPoint &checkPoint) { + var->SetDef(true); + checkPoint.RegisterDFGNode(var); +} + +void FEIRStmtFieldLoad::RegisterDFGNodes2CheckPointForNonStatic(FEIRStmtCheckPoint &checkPoint) { + checkPoint.RegisterDFGNode(varObj); + var->SetDef(true); + checkPoint.RegisterDFGNode(var); +} + +std::list FEIRStmtFieldLoad::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + // prepare and find root + fieldInfo.Prepare(mirBuilder, isStatic); + if (isStatic) { + return GenMIRStmtsImplForStatic(mirBuilder); + } else { + return GenMIRStmtsImplForNonStatic(mirBuilder); + } +} + +bool FEIRStmtFieldLoad::NeedMCCForStatic(uint32 &typeID) const { + // check type first + const std::string &actualContainerName = fieldInfo.GetActualContainerName(); + typeID = FEManager::GetTypeManager().GetTypeIDFromMplClassName(actualContainerName, dexFileHashCode); + if (typeID == UINT32_MAX) { + return true; + } + + // check type field second + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldInfo.GetFieldNameIdx()); + MIRStructType *currStructType = FEManager::GetTypeManager().GetStructTypeFromName(actualContainerName); + if (currStructType == nullptr) { + return true; + } + const auto &fields = currStructType->GetStaticFields(); + if (fields.size() == 0) { + return true; + } + for (auto f : fields) { + const std::string &fieldNameIt = GlobalTables::GetStrTable().GetStringFromStrIdx(f.first); + if (fieldName.compare(fieldNameIt) == 0) { + return false; + } + } + return true; +} + +std::list FEIRStmtFieldLoad::GenMIRStmtsImplForStatic(MIRBuilder &mirBuilder) const { + UniqueFEIRVar varTarget = std::make_unique(fieldInfo.GetFieldNameIdx(), fieldInfo.GetType()->Clone()); + varTarget->SetGlobal(true); + UniqueFEIRExpr exprDRead = FEIRBuilder::CreateExprDRead(std::move(varTarget)); + UniqueFEIRStmt stmtDAssign = FEIRBuilder::CreateStmtDAssign(var->Clone(), std::move(exprDRead)); + std::list mirStmts = stmtDAssign->GenMIRStmts(mirBuilder); + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + + bool needMCCForStatic = false; + if (FEOptions::GetInstance().IsAOT()) { + uint32 typeID = UINT32_MAX; + needMCCForStatic = NeedMCCForStatic(typeID); + if (!needMCCForStatic) { + BaseNode *argNumExpr = mirBuilder.CreateIntConst(typeID, PTY_i32); + args.push_back(argNumExpr); + } else { + auto pt = fieldInfo.GetType()->GetPrimType(); + std::map primTypeFuncNameIdxMap; + FEUtils::InitPrimTypeFuncNameIdxMap(primTypeFuncNameIdxMap); + auto itorFunc = primTypeFuncNameIdxMap.find(pt); + CHECK_FATAL(itorFunc != primTypeFuncNameIdxMap.cend(), "java type not support %d", pt); + args.push_back(mirBuilder.CreateIntConst(fieldInfo.GetFieldID(), PTY_i32)); + MIRSymbol *retVarSym = nullptr; + retVarSym = var->GenerateLocalMIRSymbol(mirBuilder); + StmtNode *stmtMCC = mirBuilder.CreateStmtCallAssigned( + FEManager::GetTypeManager().GetMCCFunction(itorFunc->second)->GetPuidx(), MapleVector(args), + retVarSym, OP_callassigned); + mirStmts.clear(); + mirStmts.emplace_front(stmtMCC); + } + } + if (!needMCCForStatic) { + TyIdx containerTyIdx = fieldInfo.GetActualContainerType()->GenerateMIRType()->GetTypeIndex(); + if (!mirBuilder.GetCurrentFunction()->IsClinit() || + mirBuilder.GetCurrentFunction()->GetClassTyIdx() != containerTyIdx) { + StmtNode *stmtClinitCheck = mirBuilder.CreateStmtIntrinsicCall(INTRN_JAVA_CLINIT_CHECK, std::move(args), + containerTyIdx); + mirStmts.emplace_front(stmtClinitCheck); + } + } + if (!FEOptions::GetInstance().IsNoBarrier() && fieldInfo.IsVolatile()) { + StmtNode *barrier = mirBuilder.GetMirModule().CurFuncCodeMemPool()->New(OP_membaracquire); + mirStmts.emplace_back(barrier); + } + return mirStmts; +} + +std::list FEIRStmtFieldLoad::GenMIRStmtsImplForNonStatic(MIRBuilder &mirBuilder) const { + std::list ans; + FieldID fieldID = fieldInfo.GetFieldID(); + ASSERT(fieldID != 0, "invalid field ID"); + MIRStructType *structType = FEManager::GetTypeManager().GetStructTypeFromName(fieldInfo.GetStructName()); + CHECK_NULL_FATAL(structType); + MIRType *ptrStructType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*structType, PTY_ref); + MIRType *fieldType = fieldInfo.GetType()->GenerateMIRTypeAuto(fieldInfo.GetSrcLang()); + UniqueFEIRExpr exprDReadObj = FEIRBuilder::CreateExprDRead(varObj->Clone()); + BaseNode *nodeObj = exprDReadObj->GenMIRNode(mirBuilder); + BaseNode *nodeVal = mirBuilder.CreateExprIread(*fieldType, *ptrStructType, fieldID, nodeObj); + MIRSymbol *valRet = var->GenerateLocalMIRSymbol(mirBuilder); + StmtNode *stmt = mirBuilder.CreateStmtDassign(*valRet, 0, nodeVal); + ans.emplace_back(stmt); + if (!FEOptions::GetInstance().IsNoBarrier() && fieldInfo.IsVolatile()) { + StmtNode *barrier = mirBuilder.GetMirModule().CurFuncCodeMemPool()->New(OP_membaracquire); + ans.emplace_back(barrier); + } + return ans; +} + +std::string FEIRStmtFieldLoad::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtCallAssign ---------- +std::map FEIRStmtCallAssign::mapOpAssignToOp = FEIRStmtCallAssign::InitMapOpAssignToOp(); +std::map FEIRStmtCallAssign::mapOpToOpAssign = FEIRStmtCallAssign::InitMapOpToOpAssign(); + +FEIRStmtCallAssign::FEIRStmtCallAssign(FEStructMethodInfo &argMethodInfo, Opcode argMIROp, UniqueFEIRVar argVarRet, + bool argIsStatic) + : FEIRStmtAssign(FEIRNodeKind::kStmtCallAssign, std::move(argVarRet)), + methodInfo(argMethodInfo), + mirOp(argMIROp), + isStatic(argIsStatic) {} + +std::map FEIRStmtCallAssign::InitMapOpAssignToOp() { + std::map ans; + ans[OP_callassigned] = OP_call; + ans[OP_virtualcallassigned] = OP_virtualcall; + ans[OP_superclasscallassigned] = OP_superclasscall; + ans[OP_interfacecallassigned] = OP_interfacecall; + return ans; +} + +std::map FEIRStmtCallAssign::InitMapOpToOpAssign() { + std::map ans; + ans[OP_call] = OP_callassigned; + ans[OP_virtualcall] = OP_virtualcallassigned; + ans[OP_superclasscall] = OP_superclasscallassigned; + ans[OP_interfacecall] = OP_interfacecallassigned; + return ans; +} + +void FEIRStmtCallAssign::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + for (const UniqueFEIRExpr &exprArg : exprArgs) { + exprArg->RegisterDFGNodes2CheckPoint(checkPoint); + } + if (!methodInfo.IsReturnVoid() && (var != nullptr)) { + var->SetDef(true); + checkPoint.RegisterDFGNode(var); + } +} + +bool FEIRStmtCallAssign::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + bool success = true; + for (const UniqueFEIRExpr &exprArg : exprArgs) { + success = success && exprArg->CalculateDefs4AllUses(checkPoint, udChain); + } + return success; +} + +std::list FEIRStmtCallAssign::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + // If the struct is a class, we check it if external type directly. + // If the struct is a array type, we check its baseType if external type. + // FEUtils::GetBaseTypeName returns a class type itself or an arrayType's base type. + if (methodInfo.GetSrcLang() == kSrcLangJava) { + std::string baseStructName = FEUtils::GetBaseTypeName(methodInfo.GetStructName()); + bool isCreate = false; + (void)FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType( + baseStructName, false, FETypeFlag::kSrcExtern, isCreate); + } + std::list ans; + StmtNode *stmtCall = nullptr; + // prepare and find root + methodInfo.Prepare(mirBuilder, isStatic); + if (methodInfo.IsJavaPolymorphicCall() || methodInfo.IsJavaDynamicCall()) { + return GenMIRStmtsUseZeroReturn(mirBuilder); + } + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + args.reserve(exprArgs.size()); + size_t index = 0; + const std::string funcName = methodInfo.GetMirFunc()->GetName(); + for (const UniqueFEIRExpr &exprArg : exprArgs) { + InsertNonnullCheckingInArgs(exprArg, index++, mirBuilder, ans, funcName); + ENCChecker::InsertBoundaryAssignChecking(mirBuilder, ans, exprArg, loc); + ENCChecker::CheckBoundaryLenFinalAddr(mirBuilder, exprArg, loc); + BaseNode *node = exprArg->GenMIRNode(mirBuilder); + args.push_back(node); + } + MIRSymbol *retVarSym = nullptr; + if (!methodInfo.IsReturnVoid() && var != nullptr) { + retVarSym = var->GenerateLocalMIRSymbol(mirBuilder); + InsertNonnullInRetVar(*retVarSym); + } + if (retVarSym == nullptr) { + stmtCall = mirBuilder.CreateStmtCall(methodInfo.GetPuIdx(), std::move(args), mirOp); + } else { + stmtCall = mirBuilder.CreateStmtCallAssigned(methodInfo.GetPuIdx(), std::move(args), retVarSym, mirOp); + } + ans.push_back(stmtCall); + return ans; +} + + +void FEIRStmtCallAssign::InsertNonnullInRetVar(MIRSymbol &retVarSym) const { + if (methodInfo.GetMirFunc()->GetFuncAttrs().GetAttr(FUNCATTR_nonnull)) { + TypeAttrs attrs = TypeAttrs(); + attrs.SetAttr(ATTR_nonnull); + retVarSym.AddAttrs(attrs); + } +} + +void FEIRStmtCallAssign::InsertNonnullCheckingInArgs(const UniqueFEIRExpr &expr, size_t index, MIRBuilder &mirBuilder, + std::list &ans, const std::string& funcName) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic()) { + return; + } + if (index >= methodInfo.GetMirFunc()->GetParamSize()) { // Skip variable parameter + return; + } + MIRType *srcType = expr->GetType()->GenerateMIRTypeAuto(); + MIRType *dstType = methodInfo.GetMirFunc()->GetNthParamType(index); + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstType, loc); + if (!methodInfo.GetMirFunc()->GetNthParamAttr(index).GetAttr(ATTR_nonnull)) { + return; + } + if (ENCChecker::HasNullExpr(expr)) { + FE_ERR(kLncErr, loc, "null passed to a callee that requires a nonnull argument[the %s argument]", + ENCChecker::GetNthStr(index).c_str()); + return; + } + if (expr->GetPrimType() == PTY_ptr) { + UniqueFEIRStmt stmt = std::make_unique(OP_callassertnonnull, expr->Clone(), + funcName, index); + stmt->SetSrcLoc(expr->GetLoc()); + std::list stmts = stmt->GenMIRStmts(mirBuilder); + ans.splice(ans.end(), stmts); + } +} + +std::list FEIRStmtCallAssign::GenMIRStmtsUseZeroReturn(MIRBuilder &mirBuilder) const { + std::list ans; + if (methodInfo.IsReturnVoid()) { + return ans; + } + const FEIRType *retType = methodInfo.GetReturnType(); + MIRType *mirRetType = retType->GenerateMIRTypeAuto(kSrcLangJava); + MIRSymbol *mirRetSym = var->GenerateLocalMIRSymbol(mirBuilder); + BaseNode *nodeZero; + if (mirRetType->IsScalarType()) { + switch (mirRetType->GetPrimType()) { + case PTY_u1: + case PTY_i8: + case PTY_i16: + case PTY_u16: + case PTY_i32: + nodeZero = mirBuilder.CreateIntConst(0, PTY_i32); + break; + case PTY_i64: + nodeZero = mirBuilder.CreateIntConst(0, PTY_i64); + break; + case PTY_f32: + nodeZero = mirBuilder.CreateFloatConst(0.0f); + break; + case PTY_f64: + nodeZero = mirBuilder.CreateDoubleConst(0.0); + break; + default: + nodeZero = mirBuilder.CreateIntConst(0, PTY_i32); + break; + } + } else { + nodeZero = mirBuilder.CreateIntConst(0, PTY_ref); + } + StmtNode *stmt = mirBuilder.CreateStmtDassign(mirRetSym->GetStIdx(), 0, nodeZero); + ans.push_back(stmt); + return ans; +} + +Opcode FEIRStmtCallAssign::AdjustMIROp() const { + if (methodInfo.IsReturnVoid()) { + auto it = mapOpAssignToOp.find(mirOp); + if (it != mapOpAssignToOp.cend()) { + return it->second; + } + } else { + auto it = mapOpToOpAssign.find(mirOp); + if (it != mapOpToOpAssign.cend()) { + return it->second; + } + } + return mirOp; +} + +std::string FEIRStmtCallAssign::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + if (var != nullptr) { + ss << " def : " << var->GetNameRaw() << ", "; + } + if (exprArgs.size() > 0) { + ss << " uses : "; + for (const UniqueFEIRExpr &exprArg : exprArgs) { + ss << exprArg->DumpDotString() << ", "; + } + } + return ss.str(); +} + +// ---------- FEIRStmtICallAssign ---------- +FEIRStmtICallAssign::FEIRStmtICallAssign() + : FEIRStmtAssign(FEIRNodeKind::kStmtICallAssign, nullptr) {} + +void FEIRStmtICallAssign::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + CHECK_FATAL(false, "NYI"); +} + +bool FEIRStmtICallAssign::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + bool success = true; + for (const UniqueFEIRExpr &exprArg : exprArgs) { + success = success && exprArg->CalculateDefs4AllUses(checkPoint, udChain); + } + return success; +} + +void FEIRStmtICallAssign::InsertNonnullCheckingInArgs(MIRBuilder &mirBuilder, std::list &ans) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || exprArgs.size() <= 1) { + return; + } + const MIRFuncType *funcType = FEUtils::GetFuncPtrType(*exprArgs.front()->GetType()->GenerateMIRType()); + if (funcType == nullptr) { + return; + } + int idx = -2; // the first arg is function pointer + size_t size = funcType->GetParamAttrsList().size(); + for (const auto &expr : exprArgs) { + ++idx; + if (idx < 0 || idx >= static_cast(size)) { + continue; + } + MIRType *srcType = expr->GetType()->GenerateMIRTypeAuto(); + MIRType *dstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetNthParamType(idx)); + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstType, loc); + if (!funcType->GetNthParamAttrs(idx).GetAttr(ATTR_nonnull)) { + continue; + } + if (ENCChecker::HasNullExpr(expr)) { + FE_ERR(kLncErr, loc, "null passed to a callee that requires a nonnull argument[the %s argument]", + ENCChecker::GetNthStr(idx).c_str()); + continue; + } + if (expr->GetPrimType() == PTY_ptr) { + UniqueFEIRStmt stmt = std::make_unique( + OP_callassertnonnull, expr->Clone(), "function_pointer", idx); + stmt->SetSrcLoc(expr->GetLoc()); + std::list stmts = stmt->GenMIRStmts(mirBuilder); + ans.splice(ans.end(), stmts); + } + } +} + +void FEIRStmtICallAssign::InsertNonnullInRetVar(MIRSymbol &retVarSym) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic() || exprArgs.size() < 1) { + return; + } + const MIRFuncType *funcType = FEUtils::GetFuncPtrType(*exprArgs.front()->GetType()->GenerateMIRType()); + if (funcType == nullptr) { + return; + } + if (funcType->GetRetAttrs().GetAttr(ATTR_nonnull)) { + TypeAttrs attrs = TypeAttrs(); + attrs.SetAttr(ATTR_nonnull); + retVarSym.AddAttrs(attrs); + } +} + +std::list FEIRStmtICallAssign::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + StmtNode *stmtICall = nullptr; + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + args.reserve(exprArgs.size()); + for (const UniqueFEIRExpr &exprArg : exprArgs) { + BaseNode *node = exprArg->GenMIRNode(mirBuilder); + ENCChecker::InsertBoundaryAssignChecking(mirBuilder, ans, exprArg, loc); + ENCChecker::CheckBoundaryLenFinalAddr(mirBuilder, exprArg, loc); + args.push_back(node); + } + InsertNonnullCheckingInArgs(mirBuilder, ans); + bool isC = FEManager::GetModule().IsCModule(); + MIRType *pMIRType = nullptr; + if (isC) { + CHECK_FATAL(prototype != nullptr, "cannot find prototype for icall"); + pMIRType = prototype->GenerateMIRTypeAuto(); + CHECK_NULL_FATAL(pMIRType); + } + if (var != nullptr) { + MIRSymbol *retVarSym = var->GenerateLocalMIRSymbol(mirBuilder); + InsertNonnullInRetVar(*retVarSym); + stmtICall = isC ? mirBuilder.CreateStmtIcallprotoAssigned(std::move(args), *retVarSym, pMIRType->GetTypeIndex()) : + mirBuilder.CreateStmtIcallAssigned(std::move(args), *retVarSym); + } else { + stmtICall = isC ? mirBuilder.CreateStmtIcallproto(std::move(args), pMIRType->GetTypeIndex()) : + mirBuilder.CreateStmtIcall(std::move(args)); + } + ans.push_back(stmtICall); + return ans; +} + +std::string FEIRStmtICallAssign::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + if (var != nullptr) { + ss << " def : " << var->GetNameRaw() << ", "; + } + if (exprArgs.size() > 0) { + ss << " uses : "; + for (const UniqueFEIRExpr &exprArg : exprArgs) { + ss << exprArg->DumpDotString() << ", "; + } + } + return ss.str(); +} + +// ---------- FEIRStmtIntrinsicCallAssign ---------- +FEIRStmtIntrinsicCallAssign::FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, UniqueFEIRType typeIn, + UniqueFEIRVar argVarRet) + : FEIRStmtAssign(FEIRNodeKind::kStmtIntrinsicCallAssign, std::move(argVarRet)), + intrinsicId(id), + type(std::move(typeIn)) {} + +FEIRStmtIntrinsicCallAssign::FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, UniqueFEIRType typeIn, + UniqueFEIRVar argVarRet, + std::unique_ptr> exprListIn) + : FEIRStmtAssign(FEIRNodeKind::kStmtIntrinsicCallAssign, std::move(argVarRet)), + intrinsicId(id), + type(std::move(typeIn)), + exprList(std::move(exprListIn)) {} + +FEIRStmtIntrinsicCallAssign::FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, const std::string &funcNameIn, + const std::string &protoIN, + std::unique_ptr> argsIn) + : FEIRStmtAssign(FEIRNodeKind::kStmtIntrinsicCallAssign, nullptr), + intrinsicId(id), + funcName(funcNameIn), + proto(protoIN), + polyArgs(std::move(argsIn)) {} +FEIRStmtIntrinsicCallAssign::FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, const std::string &funcNameIn, + const std::string &protoIN, + std::unique_ptr> argsIn, + uint32 callerClassTypeIDIn, bool isInStaticFuncIn) + : FEIRStmtAssign(FEIRNodeKind::kStmtIntrinsicCallAssign, nullptr), + intrinsicId(id), + funcName(funcNameIn), + proto(protoIN), + polyArgs(std::move(argsIn)), + callerClassTypeID(callerClassTypeIDIn), + isInStaticFunc(isInStaticFuncIn) {} + +FEIRStmtIntrinsicCallAssign::FEIRStmtIntrinsicCallAssign(MIRIntrinsicID id, UniqueFEIRType typeIn, + UniqueFEIRVar argVarRet, uint32 typeIDIn) + : FEIRStmtAssign(FEIRNodeKind::kStmtIntrinsicCallAssign, std::move(argVarRet)), + intrinsicId(id), + type(std::move(typeIn)), + typeID(typeIDIn) {} + +std::string FEIRStmtIntrinsicCallAssign::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +void FEIRStmtIntrinsicCallAssign::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + if ((var != nullptr) && (var.get() != nullptr)) { + var->SetDef(true); + checkPoint.RegisterDFGNode(var); + } +} + +std::list FEIRStmtIntrinsicCallAssign::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + StmtNode *stmtCall = nullptr; + if (intrinsicId == INTRN_JAVA_CLINIT_CHECK) { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + if (FEOptions::GetInstance().IsAOT()) { + BaseNode *argNumExpr = mirBuilder.CreateIntConst(typeID, PTY_i32); + args.push_back(argNumExpr); + } + stmtCall = mirBuilder.CreateStmtIntrinsicCall(INTRN_JAVA_CLINIT_CHECK, std::move(args), + type->GenerateMIRType()->GetTypeIndex()); + } else if (intrinsicId == INTRN_JAVA_FILL_NEW_ARRAY) { + return GenMIRStmtsForFillNewArray(mirBuilder); + } else if (intrinsicId == INTRN_JAVA_POLYMORPHIC_CALL) { + return GenMIRStmtsForInvokePolyMorphic(mirBuilder); + } else if (intrinsicId == INTRN_C_va_start || intrinsicId == INTRN_C_memcpy) { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + if (exprList != nullptr) { + for (const auto &expr : *exprList) { + BaseNode *node = expr->GenMIRNode(mirBuilder); + if (expr->IsAddrof()) { + node = ReplaceAddrOfNode(node); // addrof va_list + } + args.push_back(node); + } + } + MIRSymbol *retVarSym = nullptr; + if (var != nullptr) { + retVarSym = var->GenerateLocalMIRSymbol(mirBuilder); + stmtCall = mirBuilder.CreateStmtIntrinsicCallAssigned(intrinsicId, std::move(args), retVarSym); + } else { + stmtCall = mirBuilder.CreateStmtIntrinsicCall(intrinsicId, std::move(args), TyIdx(0)); + } + } else { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + if (exprList != nullptr) { + for (const auto &expr : *exprList) { + BaseNode *node = expr->GenMIRNode(mirBuilder); + args.push_back(node); + } + } + MIRSymbol *retVarSym = nullptr; + if ((var != nullptr) && (var.get() != nullptr)) { + retVarSym = var->GenerateLocalMIRSymbol(mirBuilder); + stmtCall = mirBuilder.CreateStmtIntrinsicCallAssigned(intrinsicId, std::move(args), retVarSym); + } else { + stmtCall = mirBuilder.CreateStmtIntrinsicCall(intrinsicId, std::move(args), TyIdx(0)); + } + } + // other intrinsic call should be implemented + ans.emplace_back(stmtCall); + return ans; +} + +std::list FEIRStmtIntrinsicCallAssign::GenMIRStmtsForFillNewArray(MIRBuilder &mirBuilder) const { + std::list ans; + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + if (exprList != nullptr) { + for (const auto &expr : *exprList) { + BaseNode *node = expr->GenMIRNode(mirBuilder); + args.push_back(node); + } + } + MIRSymbol *retVarSym = nullptr; + if ((var != nullptr) && (var.get() != nullptr)) { + retVarSym = var->GenerateLocalMIRSymbol(mirBuilder); + } + auto *stmtCall = mirBuilder.CreateStmtIntrinsicCallAssigned(INTRN_JAVA_FILL_NEW_ARRAY, std::move(args), retVarSym, + type->GenerateMIRType(true)->GetTypeIndex()); + (void)ans.emplace_back(stmtCall); + return ans; +} + +std::list FEIRStmtIntrinsicCallAssign::GenMIRStmtsForInvokePolyMorphic(MIRBuilder &mirBuilder) const { + std::list ans; + StmtNode *stmtCall = nullptr; + UniqueFEIRVar tmpVar; + bool needRetypeRet = false; + MIRSymbol *retVarSym = nullptr; + if ((var != nullptr) && (var.get() != nullptr)) { + PrimType ptyRet = var->GetTypeRef().GetPrimType(); + needRetypeRet = (ptyRet == PTY_f32 || ptyRet == PTY_f64); + tmpVar = var->Clone(); + if (ptyRet == PTY_f32) { + tmpVar->SetType(std::make_unique(PTY_i32, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("I"))); + } else if (ptyRet == PTY_f64) { + tmpVar->SetType(std::make_unique(PTY_i64, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("J"))); + } + retVarSym = tmpVar->GenerateLocalMIRSymbol(mirBuilder); + } + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + ConstructArgsForInvokePolyMorphic(mirBuilder, args); + stmtCall = mirBuilder.CreateStmtIntrinsicCallAssigned(INTRN_JAVA_POLYMORPHIC_CALL, std::move(args), retVarSym); + ans.emplace_back(stmtCall); + if (needRetypeRet) { + UniqueFEIRStmt retypeStmt = FEIRBuilder::CreateStmtRetype(var->Clone(), std::move(tmpVar)); + std::list retypeMirStmts = retypeStmt->GenMIRStmts(mirBuilder); + for (auto elem : retypeMirStmts) { + ans.emplace_back(elem); + } + } + return ans; +} + +void FEIRStmtIntrinsicCallAssign::ConstructArgsForInvokePolyMorphic(MIRBuilder &mirBuilder, + MapleVector &intrnCallargs) const { + MIRSymbol *funcNameVal = FEManager::GetJavaStringManager().GetLiteralVar(funcName); + if (funcNameVal == nullptr) { + funcNameVal = FEManager::GetJavaStringManager().CreateLiteralVar(mirBuilder, funcName, false); + } + BaseNode *dreadExprFuncName = mirBuilder.CreateExprAddrof(0, *funcNameVal); + dreadExprFuncName->SetPrimType(PTY_ptr); + intrnCallargs.push_back(dreadExprFuncName); + + MIRSymbol *protoNameVal = FEManager::GetJavaStringManager().GetLiteralVar(proto); + if (protoNameVal == nullptr) { + protoNameVal = FEManager::GetJavaStringManager().CreateLiteralVar(mirBuilder, proto, false); + } + BaseNode *dreadExprProtoName = mirBuilder.CreateExprAddrof(0, *protoNameVal); + dreadExprProtoName->SetPrimType(PTY_ptr); + intrnCallargs.push_back(dreadExprProtoName); + + BaseNode *argNumExpr = mirBuilder.CreateIntConst(static_cast(polyArgs->size() - 1), PTY_i32); + intrnCallargs.push_back(argNumExpr); + + bool isAfterMethodHandle = true; + for (const auto &arg : *polyArgs) { + intrnCallargs.push_back(mirBuilder.CreateExprDread(*(arg->GenerateMIRSymbol(mirBuilder)))); + if (FEOptions::GetInstance().IsAOT() && isAfterMethodHandle) { + if (isInStaticFunc) { + intrnCallargs.push_back(mirBuilder.CreateIntConst(callerClassTypeID, PTY_i32)); + } else { + std::unique_ptr varThisAsLocalVar = std::make_unique(FEUtils::GetThisIdx(), + FEIRTypeDefault(PTY_ref).Clone()); + intrnCallargs.push_back(mirBuilder.CreateExprDread(*(varThisAsLocalVar->GenerateMIRSymbol(mirBuilder)))); + } + isAfterMethodHandle = false; + } + } +} + +// ---------- FEIRExpr ---------- +FEIRExpr::FEIRExpr(FEIRNodeKind argKind) + : kind(argKind), + isNestable(true), + isAddrof(false), + hasException(false), + isBoundaryChecking(false), + isEnhancedChecking(true) { + type = std::make_unique(); +} + +FEIRExpr::FEIRExpr(FEIRNodeKind argKind, std::unique_ptr argType) + : kind(argKind), + isNestable(true), + isAddrof(false), + hasException(false), + isBoundaryChecking(false), + isEnhancedChecking(true), + type(std::move(argType)) {} + +std::unique_ptr FEIRExpr::Clone() { + auto expr = CloneImpl(); + expr->isNestable = IsNestable(); + expr->isAddrof = IsAddrof(); + expr->hasException = HasException(); + expr->isBoundaryChecking = IsBoundaryChecking(); + expr->isEnhancedChecking = IsEnhancedChecking(); + expr->SetLoc(loc); + return expr; +} + +bool FEIRExpr::IsNestableImpl() const { + return isNestable; +} + +bool FEIRExpr::IsAddrofImpl() const { + return isAddrof; +} + +bool FEIRExpr::HasExceptionImpl() const { + return hasException; +} + +std::vector FEIRExpr::GetVarUsesImpl() const { + return std::vector(); +} + +std::string FEIRExpr::DumpDotString() const { + std::stringstream ss; + if (kind == FEIRNodeKind::kExprArrayLoad) { + ss << " kExprArrayLoad: "; + } + std::vector varUses = GetVarUses(); + ss << "[ "; + for (FEIRVar *use : varUses) { + ss << use->GetNameRaw() << ", "; + } + ss << "] "; + return ss.str(); +} + +// ---------- FEIRExprConst ---------- +FEIRExprConst::FEIRExprConst() + : FEIRExpr(FEIRNodeKind::kExprConst) { + ASSERT(type != nullptr, "type is nullptr"); + type->SetPrimType(PTY_i32); + value.u64 = 0; +} + +FEIRExprConst::FEIRExprConst(int64 val, PrimType argType) + : FEIRExpr(FEIRNodeKind::kExprConst) { + ASSERT(type != nullptr, "type is nullptr"); + type->SetPrimType(argType); + value.i64 = val; + CheckRawValue2SetZero(); +} + +FEIRExprConst::FEIRExprConst(uint64 val, PrimType argType) + : FEIRExpr(FEIRNodeKind::kExprConst) { + ASSERT(type != nullptr, "type is nullptr"); + type->SetPrimType(argType); + value.u64 = val; + CheckRawValue2SetZero(); +} + +FEIRExprConst::FEIRExprConst(uint32 val) + : FEIRExpr(FEIRNodeKind::kExprConst) { + type->SetPrimType(PTY_u32); + value.u32 = val; + CheckRawValue2SetZero(); +} + +FEIRExprConst::FEIRExprConst(float val) + : FEIRExpr(FEIRNodeKind::kExprConst) { + ASSERT(type != nullptr, "type is nullptr"); + type->SetPrimType(PTY_f32); + value.f32 = val; + CheckRawValue2SetZero(); +} + +FEIRExprConst::FEIRExprConst(double val) + : FEIRExpr(FEIRNodeKind::kExprConst) { + ASSERT(type != nullptr, "type is nullptr"); + type->SetPrimType(PTY_f64); + value.f64 = val; + CheckRawValue2SetZero(); +} + +std::unique_ptr FEIRExprConst::CloneImpl() const { + std::unique_ptr expr = std::make_unique(); + FEIRExprConst *exprConst = static_cast(expr.get()); + exprConst->value.u64 = value.u64; + ASSERT(type != nullptr, "type is nullptr"); + exprConst->type->SetPrimType(type->GetPrimType()); + exprConst->CheckRawValue2SetZero(); + return expr; +} + +BaseNode *FEIRExprConst::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + PrimType primType = GetPrimType(); + switch (primType) { + case PTY_u1: + case PTY_u8: + case PTY_u16: + case PTY_u32: + return mirBuilder.CreateIntConst(value.u32, primType); + case PTY_u64: + case PTY_i8: + case PTY_i16: + case PTY_i32: + case PTY_i64: + case PTY_i128: + case PTY_u128: + case PTY_ref: + case PTY_ptr: + return mirBuilder.CreateIntConst(static_cast(value.i64), primType); + case PTY_f32: + return mirBuilder.CreateFloatConst(value.f32); + case PTY_f64: + return mirBuilder.CreateDoubleConst(value.f64); + default: + ERR(kLncErr, "unsupported const kind"); + return nullptr; + } +} + +uint32 FEIRExprConst::HashImpl() const { + return (static_cast(kind) << kOpHashShift) + (type->Hash() << kTypeHashShift) + + static_cast(std::hash{}(value.u64)); +} + +void FEIRExprConst::CheckRawValue2SetZero() { + if (value.u64 == 0) { + type->SetZero(true); + } +} + +// ---------- FEIRExprSizeOfType ---------- +FEIRExprSizeOfType::FEIRExprSizeOfType(UniqueFEIRType ty) + : FEIRExpr(FEIRNodeKind::kExprSizeOfType, + FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPrimType(PTY_u32))), + feirType(std::move(ty)) {} + +std::unique_ptr FEIRExprSizeOfType::CloneImpl() const { + std::unique_ptr expr = std::make_unique(feirType->Clone()); + return expr; +} + +BaseNode *FEIRExprSizeOfType::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + return mirBuilder.CreateExprSizeoftype(*(feirType->GenerateMIRTypeAuto())); +} + +// ---------- FEIRExprDRead ---------- +FEIRExprDRead::FEIRExprDRead(std::unique_ptr argVarSrc) + : FEIRExpr(FEIRNodeKind::kExprDRead) { + SetVarSrc(std::move(argVarSrc)); +} + +FEIRExprDRead::FEIRExprDRead(std::unique_ptr argType, std::unique_ptr argVarSrc) + : FEIRExpr(FEIRNodeKind::kExprDRead, std::move(argType)) { + SetVarSrc(std::move(argVarSrc)); +} + +std::unique_ptr FEIRExprDRead::CloneImpl() const { + UniqueFEIRExpr expr = std::make_unique(type->Clone(), varSrc->Clone()); + if (fieldType != nullptr) { + expr->SetFieldType(fieldType->Clone()); + } + expr->SetFieldID(fieldID); + return expr; +} + +BaseNode *FEIRExprDRead::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MIRType *type = varSrc->GetType()->GenerateMIRTypeAuto(); + MIRSymbol *symbol = varSrc->GenerateMIRSymbol(mirBuilder); + ASSERT(type != nullptr, "type is nullptr"); + PrimType regType = GetRegPrimType(type->GetPrimType()); + if (regType != type->GetPrimType()) { + type = GlobalTables::GetTypeTable().GetPrimType(regType); + } + AddrofNode *node = mirBuilder.CreateExprDread(*type, *symbol); + if (fieldID != 0) { + CHECK_FATAL((type->GetKind() == MIRTypeKind::kTypeStruct || type->GetKind() == MIRTypeKind::kTypeUnion), + "If fieldID is not 0, then the variable must be a structure"); + CHECK_NULL_FATAL(fieldType); + MIRType *fieldMIRType = fieldType->GenerateMIRTypeAuto(); + regType = GetRegPrimType(fieldMIRType->GetPrimType()); + if (regType != fieldMIRType->GetPrimType()) { + fieldMIRType = GlobalTables::GetTypeTable().GetPrimType(regType); + } + node = mirBuilder.CreateExprDread(*fieldMIRType, fieldID, *symbol); + } + return node; +} + +void FEIRExprDRead::SetVarSrc(std::unique_ptr argVarSrc) { + CHECK_FATAL(argVarSrc != nullptr, "input is nullptr"); + varSrc = std::move(argVarSrc); + SetType(varSrc->GetType()->Clone()); +} + +std::vector FEIRExprDRead::GetVarUsesImpl() const { + return std::vector({ varSrc.get() }); +} + +PrimType FEIRExprDRead::GetPrimTypeImpl() const { + PrimType primType = type->GetPrimType(); + if (primType == PTY_agg && fieldID != 0) { + CHECK_NULL_FATAL(fieldType); + return fieldType->GetPrimType(); + } + return primType; +} + +void FEIRExprDRead::SetTypeImpl(std::unique_ptr argType) { + if (fieldID != 0) { + fieldType = std::move(argType); + } else { + type = std::move(argType); + } +} + +FEIRType *FEIRExprDRead::GetTypeImpl() const { + if (fieldID != 0) { + CHECK_NULL_FATAL(fieldType); + return fieldType.get(); + } + return type.get(); +} + +const FEIRType &FEIRExprDRead::GetTypeRefImpl() const { + return *GetTypeImpl(); +} + +// ---------- FEIRExprIRead ---------- +std::unique_ptr FEIRExprIRead::CloneImpl() const { + std::unique_ptr expr = std::make_unique(type->Clone(), ptrType->Clone(), + fieldID, subExpr->Clone()); + return expr; +} + +std::vector FEIRExprIRead::GetVarUsesImpl() const { + return subExpr->GetVarUses(); +} + +BaseNode *FEIRExprIRead::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MIRType *returnType = type->GenerateMIRTypeAuto(); + MIRType *pointerType = ptrType->GenerateMIRTypeAuto(); + BaseNode *node = subExpr->GenMIRNode(mirBuilder); + CHECK_FATAL(pointerType->IsMIRPtrType(), "Must be ptr type!"); + MIRPtrType *mirPtrType = static_cast(pointerType); + MIRType *pointedMirType = mirPtrType->GetPointedType(); + if (fieldID != 0) { + CHECK_FATAL((pointedMirType->GetKind() == MIRTypeKind::kTypeStruct || + pointedMirType->GetKind() == MIRTypeKind::kTypeUnion), + "If fieldID is not 0, then type must specify pointer to a structure"); + } + return mirBuilder.CreateExprIread(*returnType, *mirPtrType, fieldID, node); +} + +// ---------- FEIRExprAddrofConstArray ---------- +FEIRExprAddrofConstArray::FEIRExprAddrofConstArray(const std::vector &arrayIn, MIRType *typeIn, + const std::string &strIn) + : FEIRExpr(FEIRNodeKind::kExprAddrof, FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), + arrayName(FEOptions::GetInstance().GetFuncInlineSize() != 0 ? FEUtils::GetSequentialName("const_array_") + + FEUtils::GetFileNameHashStr(FEManager::GetModule().GetFileName()) : + FEUtils::GetSequentialName("const_array_")), + elemType(typeIn), + str(strIn) { + std::copy(arrayIn.begin(), arrayIn.end(), std::back_inserter(array)); +} + +std::unique_ptr FEIRExprAddrofConstArray::CloneImpl() const { + std::unique_ptr expr = std::make_unique(arrayName, array, elemType, str); + return expr; +} + +BaseNode *FEIRExprAddrofConstArray::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + if (!str.empty()) { + MIRModule &module = mirBuilder.GetMirModule(); + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str); + return module.GetMemPool()->New(PTY_ptr, strIdx); + } + MIRType *arrayTypeWithSize = GlobalTables::GetTypeTable().GetOrCreateArrayType( + *elemType, static_cast(array.size())); + MIRSymbol *arrayVar = mirBuilder.GetOrCreateGlobalDecl(arrayName, *arrayTypeWithSize); + arrayVar->SetAttr(ATTR_readonly); + arrayVar->SetStorageClass(kScFstatic); + MIRModule &module = mirBuilder.GetMirModule(); + MIRAggConst *val = module.GetMemPool()->New(module, *arrayTypeWithSize); + for (uint32 i = 0; i < array.size(); ++i) { + MIRConst *cst = module.GetMemPool()->New(array[i], *elemType); + val->PushBack(cst); + } + // This interface is only for string literal, 0 is added to the end of the string. + MIRConst *cst0 = module.GetMemPool()->New(0, *elemType); + val->PushBack(cst0); + arrayVar->SetKonst(val); + BaseNode *nodeAddrof = mirBuilder.CreateExprAddrof(0, *arrayVar); + return nodeAddrof; +} + +// ---------- FEIRExprAddrOfLabel --------- +std::unique_ptr FEIRExprAddrOfLabel::CloneImpl() const { + std::unique_ptr expr = std::make_unique(labelName, type->Clone()); + return expr; +} + +BaseNode *FEIRExprAddrOfLabel::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + LabelIdx lbIdx = mirBuilder.GetOrCreateMIRLabel(labelName); + BaseNode *mirNode = mirBuilder.GetCurrentFuncCodeMp()->New(static_cast(lbIdx)); + mirNode->SetPrimType(PTY_ptr); + return mirNode; +} + +// ---------- FEIRExprIAddrof ---------- +std::unique_ptr FEIRExprIAddrof::CloneImpl() const { + return std::make_unique(ptrType->Clone(), fieldID, subExpr->Clone()); +} + +std::vector FEIRExprIAddrof::GetVarUsesImpl() const { + return subExpr->GetVarUses(); +} + +BaseNode *FEIRExprIAddrof::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MIRType *returnType = GlobalTables::GetTypeTable().GetPtrType(); + MIRType *pointerType = ptrType->GenerateMIRTypeAuto(); + BaseNode *node = subExpr->GenMIRNode(mirBuilder); + CHECK_FATAL(pointerType->IsMIRPtrType(), "Must be ptr type!"); + MIRPtrType *mirPtrType = static_cast(pointerType); + MIRType *pointedMirType = mirPtrType->GetPointedType(); + if (fieldID != 0) { + CHECK_FATAL((pointedMirType->GetKind() == MIRTypeKind::kTypeStruct || + pointedMirType->GetKind() == MIRTypeKind::kTypeUnion), + "If fieldID is not 0, then type must specify pointer to a structure"); + } + return mirBuilder.CreateExprIaddrof(*returnType, *mirPtrType, fieldID, node); +} + +// ---------- FEIRExprAddrofVar ---------- +std::unique_ptr FEIRExprAddrofVar::CloneImpl() const { + UniqueFEIRExpr expr = std::make_unique(varSrc->Clone()); + expr->SetFieldID(fieldID); + return expr; +} + +BaseNode *FEIRExprAddrofVar::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MIRSymbol *varSymbol = varSrc->GenerateMIRSymbol(mirBuilder); + if (cst != nullptr) { + varSymbol->SetKonst(cst); + } + MIRType *type = varSrc->GetType()->GenerateMIRTypeAuto(); + if (fieldID != 0) { + CHECK_FATAL((type->IsMIRStructType() || type->GetKind() == MIRTypeKind::kTypeUnion), + "if fieldID is not 0, then the variable must be a structure"); + } + AddrofNode *node = mirBuilder.CreateExprAddrof(fieldID, *varSymbol); + return node; +} + +std::vector FEIRExprAddrofVar::GetVarUsesImpl() const { + return std::vector({ varSrc.get() }); +} + +// ---------- FEIRExprAddrofFunc ---------- +std::unique_ptr FEIRExprAddrofFunc::CloneImpl() const { + auto expr = std::make_unique(funcAddr); + return expr; +} + +BaseNode *FEIRExprAddrofFunc::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(funcAddr); + MIRFunction *mirFunc = FEManager::GetTypeManager().GetMIRFunction(strIdx, false); + CHECK_FATAL(mirFunc != nullptr, "can not get MIRFunction"); + return mirBuilder.CreateExprAddroffunc(mirFunc->GetPuidx(), + mirBuilder.GetMirModule().GetMemPool()); +} + +// ---------- FEIRExprAddrofArray ---------- +FEIRExprAddrofArray::FEIRExprAddrofArray(UniqueFEIRType argTypeNativeArray, UniqueFEIRExpr argExprArray, + const std::string &argArrayName, std::list &argExprIndexs) + : FEIRExpr(FEIRNodeKind::kExprAddrofArray, + FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetPtrType())), + typeNativeArray(std::move(argTypeNativeArray)), + exprArray(std::move(argExprArray)), + arrayName(argArrayName) { + SetIndexsExprs(argExprIndexs); +} + +std::unique_ptr FEIRExprAddrofArray::CloneImpl() const { + UniqueFEIRType uTypeNativeArray = typeNativeArray->Clone(); + UniqueFEIRExpr uExprArray = exprArray->Clone(); + auto feirExprAddrofArray = std::make_unique(std::move(uTypeNativeArray), + std::move(uExprArray), arrayName, + exprIndexs); + feirExprAddrofArray->SetIndexsExprs(exprIndexs); + return feirExprAddrofArray; +} + +std::vector FEIRExprAddrofArray::GetVarUsesImpl() const { + return exprArray->GetVarUses(); +} + +BaseNode *FEIRExprAddrofArray::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MIRType *ptrMIRArrayType = typeNativeArray->GenerateMIRType(false); + BaseNode *nodeAddrof = nullptr; + std::vector nds; + if (!arrayName.empty()) { + std::unique_ptr arrayVar = exprArray->GetVarUses().front()->Clone(); + MIRSymbol *mirSymbol = arrayVar->GenerateMIRSymbol(mirBuilder); + if (ptrMIRArrayType->GetKind() == kTypeArray) { + mirSymbol->SetTyIdx(ptrMIRArrayType->GetTypeIndex()); + nodeAddrof = mirBuilder.CreateExprAddrof(0, *mirSymbol); + } else { + nodeAddrof = mirBuilder.CreateDread(*mirSymbol, PTY_ptr); + } + } else { + nodeAddrof = exprArray->GenMIRNode(mirBuilder); + } + nds.push_back(nodeAddrof); + for (auto &e : exprIndexs) { + BaseNode *no = e->GenMIRNode(mirBuilder); + nds.push_back(no); + } + BaseNode *arrayExpr = mirBuilder.CreateExprArray(*ptrMIRArrayType, nds); + return arrayExpr; +} + +// ---------- FEIRExprRegRead ---------- +FEIRExprRegRead::FEIRExprRegRead(PrimType pty, int32 regNumIn) + : FEIRExpr(FEIRNodeKind::kExprRegRead), prmType(pty), regNum(regNumIn) {} + +std::unique_ptr FEIRExprRegRead::CloneImpl() const { + std::unique_ptr expr = std::make_unique(prmType, regNum); + return expr; +} + +BaseNode *FEIRExprRegRead::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + RegreadNode *node = mirBuilder.CreateExprRegread(prmType, regNum); + return node; +} + +void FEIRExprDRead::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + checkPoint.RegisterDFGNode(varSrc); +} + +bool FEIRExprDRead::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + std::set defs = checkPoint.CalcuDef(varSrc); + (void)udChain.insert(std::make_pair(&varSrc, defs)); + return (defs.size() > 0); +} + +// ---------- FEIRExprUnary ---------- +std::map FEIRExprUnary::mapOpNestable = FEIRExprUnary::InitMapOpNestableForExprUnary(); + +FEIRExprUnary::FEIRExprUnary(Opcode argOp, std::unique_ptr argOpnd) + : FEIRExpr(kExprUnary), + op(argOp) { + SetOpnd(std::move(argOpnd)); + SetExprTypeByOp(); +} + +FEIRExprUnary::FEIRExprUnary(std::unique_ptr argType, Opcode argOp, std::unique_ptr argOpnd) + : FEIRExpr(kExprUnary, std::move(argType)), + op(argOp) { + SetOpnd(std::move(argOpnd)); +} + +std::map FEIRExprUnary::InitMapOpNestableForExprUnary() { + std::map ans; + ans[OP_abs] = true; + ans[OP_bnot] = true; + ans[OP_lnot] = true; + ans[OP_neg] = true; + ans[OP_recip] = true; + ans[OP_sqrt] = true; + ans[OP_gcmallocjarray] = false; + return ans; +} + +std::unique_ptr FEIRExprUnary::CloneImpl() const { + std::unique_ptr expr = std::make_unique(type->Clone(), op, opnd->Clone()); + return expr; +} + +BaseNode *FEIRExprUnary::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx( + TyIdx(static_cast(GetTypeRef().GetPrimType()))); + ASSERT(mirType != nullptr, "mir type is nullptr"); + BaseNode *nodeOpnd = opnd->GenMIRNode(mirBuilder); + BaseNode *expr = mirBuilder.CreateExprUnary(op, *mirType, nodeOpnd); + return expr; +} + +void FEIRExprUnary::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + opnd->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRExprUnary::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return opnd->CalculateDefs4AllUses(checkPoint, udChain); +} + +std::vector FEIRExprUnary::GetVarUsesImpl() const { + return opnd->GetVarUses(); +} + +void FEIRExprUnary::SetOpnd(std::unique_ptr argOpnd) { + CHECK_FATAL(argOpnd != nullptr, "opnd is nullptr"); + opnd = std::move(argOpnd); +} + +const UniqueFEIRExpr &FEIRExprUnary::GetOpnd() const { + return opnd; +} + +void FEIRExprUnary::SetExprTypeByOp() { + switch (op) { + case OP_neg: + case OP_bnot: + case OP_lnot: + type->SetPrimType(opnd->GetPrimType()); + break; + default: + break; + } +} + +// ---------- FEIRExprTypeCvt ---------- +std::map FEIRExprTypeCvt::mapOpNestable = FEIRExprTypeCvt::InitMapOpNestableForTypeCvt(); +std::map FEIRExprTypeCvt::funcPtrMapForParseExpr = + FEIRExprTypeCvt::InitFuncPtrMapForParseExpr(); + +FEIRExprTypeCvt::FEIRExprTypeCvt(Opcode argOp, std::unique_ptr argOpnd) + : FEIRExprUnary(argOp, std::move(argOpnd)) {} + +FEIRExprTypeCvt::FEIRExprTypeCvt(std::unique_ptr exprType, Opcode argOp, std::unique_ptr argOpnd) + : FEIRExprUnary(std::move(exprType), argOp, std::move(argOpnd)) {} + +std::unique_ptr FEIRExprTypeCvt::CloneImpl() const { + std::unique_ptr expr = std::make_unique(type->Clone(), op, opnd->Clone()); + static_cast(expr.get())->SetSrcPrimType(srcPrimType); + return expr; +} + +BaseNode *FEIRExprTypeCvt::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + auto ptrFunc = funcPtrMapForParseExpr.find(op); + ASSERT(ptrFunc != funcPtrMapForParseExpr.cend(), "unsupported op: %s", kOpcodeInfo.GetName(op).c_str()); + return (this->*(ptrFunc->second))(mirBuilder); +} + +void FEIRExprTypeCvt::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + opnd->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRExprTypeCvt::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return opnd->CalculateDefs4AllUses(checkPoint, udChain); +} + +Opcode FEIRExprTypeCvt::ChooseOpcodeByFromVarAndToVar(const FEIRVar &fromVar, const FEIRVar &toVar) { + if ((fromVar.GetType()->IsRef()) && (toVar.GetType()->IsRef())) { + return OP_retype; + } + return OP_retype; +} + +BaseNode *FEIRExprTypeCvt::GenMIRNodeMode1(MIRBuilder &mirBuilder) const { + // MIR: op () + MIRType *mirTypeDst = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(static_cast(GetTypeRef().GetPrimType()))); + MIRType *mirTypeSrc = nullptr; + if (srcPrimType == PTY_unknown) { + mirTypeSrc = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(static_cast(opnd->GetPrimType()))); + } else { + mirTypeSrc = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(static_cast(srcPrimType))); + } + BaseNode *nodeOpnd = opnd->GenMIRNode(mirBuilder); + BaseNode *expr = mirBuilder.CreateExprTypeCvt(op, *mirTypeDst, *mirTypeSrc, nodeOpnd); + return expr; +} + +BaseNode *FEIRExprTypeCvt::GenMIRNodeMode2(MIRBuilder &mirBuilder) const { + // MIR: op () + PrimType primTypeSrc = opnd->GetPrimType(); + CHECK_FATAL(IsPrimitiveFloat(primTypeSrc), "from type must be float type"); + return GenMIRNodeMode1(mirBuilder); +} + +BaseNode *FEIRExprTypeCvt::GenMIRNodeMode3(MIRBuilder &mirBuilder) const { + // MIR: retype () + MIRType *mirTypeDst = GetTypeRef().GenerateMIRType(); + MIRType *mirTypeSrc = opnd->GetTypeRef().GenerateMIRTypeAuto(); + BaseNode *nodeOpnd = opnd->GenMIRNode(mirBuilder); + BaseNode *expr = mirBuilder.CreateExprRetype(*mirTypeDst, *mirTypeSrc, nodeOpnd); + return expr; +} + +std::map FEIRExprTypeCvt::InitMapOpNestableForTypeCvt() { + std::map ans; + ans[OP_ceil] = true; + ans[OP_cvt] = true; + ans[OP_floor] = true; + ans[OP_retype] = true; + ans[OP_round] = true; + ans[OP_trunc] = true; + return ans; +} + +std::map FEIRExprTypeCvt::InitFuncPtrMapForParseExpr() { + std::map ans; + ans[OP_ceil] = &FEIRExprTypeCvt::GenMIRNodeMode2; + ans[OP_cvt] = &FEIRExprTypeCvt::GenMIRNodeMode1; + ans[OP_floor] = &FEIRExprTypeCvt::GenMIRNodeMode2; + ans[OP_retype] = &FEIRExprTypeCvt::GenMIRNodeMode3; + ans[OP_round] = &FEIRExprTypeCvt::GenMIRNodeMode2; + ans[OP_trunc] = &FEIRExprTypeCvt::GenMIRNodeMode2; + return ans; +} + +// ---------- FEIRExprExtractBits ---------- +std::map FEIRExprExtractBits::mapOpNestable = FEIRExprExtractBits::InitMapOpNestableForExtractBits(); +std::map FEIRExprExtractBits::funcPtrMapForParseExpr = + FEIRExprExtractBits::InitFuncPtrMapForParseExpr(); + +FEIRExprExtractBits::FEIRExprExtractBits(Opcode argOp, PrimType argPrimType, uint8 argBitOffset, uint8 argBitSize, + std::unique_ptr argOpnd) + : FEIRExprUnary(argOp, std::move(argOpnd)), + bitOffset(argBitOffset), + bitSize(argBitSize) { + CHECK_FATAL(IsPrimitiveInteger(argPrimType), "only integer type is supported"); + type->SetPrimType(argPrimType); +} + +FEIRExprExtractBits::FEIRExprExtractBits(Opcode argOp, PrimType argPrimType, std::unique_ptr argOpnd) + : FEIRExprExtractBits(argOp, argPrimType, 0, 0, std::move(argOpnd)) {} + +std::unique_ptr FEIRExprExtractBits::CloneImpl() const { + std::unique_ptr expr = std::make_unique(op, type->GetPrimType(), bitOffset, bitSize, + opnd->Clone()); + return expr; +} + +BaseNode *FEIRExprExtractBits::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + auto ptrFunc = funcPtrMapForParseExpr.find(op); + ASSERT(ptrFunc != funcPtrMapForParseExpr.cend(), "unsupported op: %s", kOpcodeInfo.GetName(op).c_str()); + return (this->*(ptrFunc->second))(mirBuilder); +} + +void FEIRExprExtractBits::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + opnd->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRExprExtractBits::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return opnd->CalculateDefs4AllUses(checkPoint, udChain); +} + +std::map FEIRExprExtractBits::InitMapOpNestableForExtractBits() { + std::map ans; + ans[OP_extractbits] = true; + ans[OP_sext] = true; + ans[OP_zext] = true; + return ans; +} + +std::map FEIRExprExtractBits::InitFuncPtrMapForParseExpr() { + std::map ans; + ans[OP_extractbits] = &FEIRExprExtractBits::GenMIRNodeForExtrabits; + ans[OP_sext] = &FEIRExprExtractBits::GenMIRNodeForExt; + ans[OP_zext] = &FEIRExprExtractBits::GenMIRNodeForExt; + return ans; +} + +BaseNode *FEIRExprExtractBits::GenMIRNodeForExtrabits(MIRBuilder &mirBuilder) const { + ASSERT(opnd != nullptr, "nullptr check"); + PrimType primTypeDst = GetTypeRef().GetPrimType(); + PrimType primTypeSrc = opnd->GetPrimType(); + CHECK_FATAL(FEUtils::IsInteger(primTypeDst), "dst type of extrabits must integer"); + CHECK_FATAL(FEUtils::IsInteger(primTypeSrc), "src type of extrabits must integer"); + uint8 widthDst = FEUtils::GetWidth(primTypeDst); + uint8 widthSrc = FEUtils::GetWidth(primTypeSrc); + CHECK_FATAL(widthDst >= bitSize, "dst width is not enough"); + CHECK_FATAL(widthSrc >= bitOffset + bitSize, "src width is not enough"); + MIRType *mirTypeDst = GetTypeRef().GenerateMIRTypeAuto(); + BaseNode *nodeOpnd = opnd->GenMIRNode(mirBuilder); + BaseNode *expr = mirBuilder.CreateExprExtractbits(op, *mirTypeDst, bitOffset, bitSize, nodeOpnd); + return expr; +} + +BaseNode *FEIRExprExtractBits::GenMIRNodeForExt(MIRBuilder &mirBuilder) const { + ASSERT(opnd != nullptr, "nullptr check"); + PrimType primTypeDst = GetTypeRef().GetPrimType(); + CHECK_FATAL(FEUtils::IsInteger(primTypeDst), "dst type of sext/zext must integer"); + uint8 widthDst = FEUtils::GetWidth(primTypeDst); + // The bit size must be smaller than 32. This parameter is used only when the value of src or dst is smaller than 32. + if (widthDst >= 32) { + widthDst = FEUtils::GetWidth(opnd->GetPrimType()); + } + BaseNode *nodeOpnd = opnd->GenMIRNode(mirBuilder); + PrimType extPty; + if (op == OP_zext) { + extPty = GetUnsignedPrimType(GetRegPrimType(nodeOpnd->GetPrimType())); + } else { + extPty = GetSignedPrimType(GetRegPrimType(nodeOpnd->GetPrimType())); + } + MIRType *mirTypeDst = GlobalTables::GetTypeTable().GetPrimType(extPty); + BaseNode *expr = mirBuilder.CreateExprExtractbits(op, *mirTypeDst, 0, widthDst, nodeOpnd); + return expr; +} + +// ---------- FEIRExprBinary ---------- +std::map FEIRExprBinary::funcPtrMapForGenMIRNode = + FEIRExprBinary::InitFuncPtrMapForGenMIRNode(); + +FEIRExprBinary::FEIRExprBinary(Opcode argOp, std::unique_ptr argOpnd0, std::unique_ptr argOpnd1) + : FEIRExpr(FEIRNodeKind::kExprBinary), + op(argOp) { + SetOpnd0(std::move(argOpnd0)); + SetOpnd1(std::move(argOpnd1)); + SetExprTypeByOp(); +} + +FEIRExprBinary::FEIRExprBinary(std::unique_ptr exprType, Opcode argOp, std::unique_ptr argOpnd0, + std::unique_ptr argOpnd1) + : FEIRExpr(FEIRNodeKind::kExprBinary, std::move(exprType)), + op(argOp) { + SetOpnd0(std::move(argOpnd0)); + SetOpnd1(std::move(argOpnd1)); + if (FEManager::GetModule().GetSrcLang() != kSrcLangC) { + SetExprTypeByOp(); + } +} + +std::unique_ptr FEIRExprBinary::CloneImpl() const { + std::unique_ptr expr = std::make_unique(type->Clone(), op, opnd0->Clone(), opnd1->Clone()); + return expr; +} + +BaseNode *FEIRExprBinary::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + auto ptrFunc = funcPtrMapForGenMIRNode.find(op); + ASSERT(ptrFunc != funcPtrMapForGenMIRNode.cend(), "unsupported op: %s", kOpcodeInfo.GetName(op).c_str()); + return (this->*(ptrFunc->second))(mirBuilder); +} + +void FEIRExprBinary::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + opnd0->RegisterDFGNodes2CheckPoint(checkPoint); + opnd1->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRExprBinary::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + bool success = true; + success = success && opnd0->CalculateDefs4AllUses(checkPoint, udChain); + success = success && opnd1->CalculateDefs4AllUses(checkPoint, udChain); + return success; +} + +std::vector FEIRExprBinary::GetVarUsesImpl() const { + std::vector ans; + for (FEIRVar *var : opnd0->GetVarUses()) { + ans.push_back(var); + } + for (FEIRVar *var : opnd1->GetVarUses()) { + ans.push_back(var); + } + return ans; +} + +bool FEIRExprBinary::IsNestableImpl() const { + return true; +} + +bool FEIRExprBinary::IsAddrofImpl() const { + return false; +} + +void FEIRExprBinary::SetOpnd0(std::unique_ptr argOpnd) { + CHECK_FATAL(argOpnd != nullptr, "input is nullptr"); + opnd0 = std::move(argOpnd); +} + +void FEIRExprBinary::SetOpnd1(std::unique_ptr argOpnd) { + CHECK_FATAL(argOpnd != nullptr, "input is nullptr"); + opnd1 = std::move(argOpnd); +} + +const std::unique_ptr &FEIRExprBinary::GetOpnd0() const { + return opnd0; +} + +const std::unique_ptr &FEIRExprBinary::GetOpnd1() const { + return opnd1; +} + +bool FEIRExprBinary::IsComparative() const { + switch (op) { + case OP_cmp: + case OP_cmpl: + case OP_cmpg: + case OP_eq: + case OP_ge: + case OP_gt: + case OP_le: + case OP_lt: + case OP_ne: + // Cand and cior do not need to be converted to comparison. + case OP_cand: + case OP_cior: + return true; + default: + return false; + } +} + +std::map FEIRExprBinary::InitFuncPtrMapForGenMIRNode() { + std::map ans; + ans[OP_add] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_ashr] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_band] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_bior] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_bxor] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_cand] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_cior] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_cmp] = &FEIRExprBinary::GenMIRNodeCompare; + ans[OP_cmpg] = &FEIRExprBinary::GenMIRNodeCompare; + ans[OP_cmpl] = &FEIRExprBinary::GenMIRNodeCompare; + ans[OP_div] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_eq] = &FEIRExprBinary::GenMIRNodeCompareU1; + ans[OP_ge] = &FEIRExprBinary::GenMIRNodeCompareU1; + ans[OP_gt] = &FEIRExprBinary::GenMIRNodeCompareU1; + ans[OP_land] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_lior] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_le] = &FEIRExprBinary::GenMIRNodeCompareU1; + ans[OP_lshr] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_lt] = &FEIRExprBinary::GenMIRNodeCompareU1; + ans[OP_max] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_min] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_mul] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_ne] = &FEIRExprBinary::GenMIRNodeCompareU1; + ans[OP_rem] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_shl] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_sub] = &FEIRExprBinary::GenMIRNodeNormal; + ans[OP_ror] = &FEIRExprBinary::GenMIRNodeNormal; + return ans; +} + +BaseNode *FEIRExprBinary::GenMIRNodeNormal(MIRBuilder &mirBuilder) const { + MIRType *mirTypeDst = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(static_cast(type->GetPrimType()))); + UniqueFEIRExpr opnd0FEExpr = opnd0->Clone(); + UniqueFEIRExpr opnd1FEExpr = opnd1->Clone(); + if (op == OP_cand || op == OP_cior) { + opnd0FEExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(opnd0FEExpr)); + opnd1FEExpr = FEIRBuilder::CreateExprZeroCompare(OP_ne, std::move(opnd1FEExpr)); + } + BaseNode *nodeOpnd0 = opnd0FEExpr->GenMIRNode(mirBuilder); + BaseNode *nodeOpnd1 = opnd1FEExpr->GenMIRNode(mirBuilder); + BaseNode *expr = mirBuilder.CreateExprBinary(op, *mirTypeDst, nodeOpnd0, nodeOpnd1); + return expr; +} + +BaseNode *FEIRExprBinary::GenMIRNodeCompare(MIRBuilder &mirBuilder) const { + BaseNode *nodeOpnd0 = opnd0->GenMIRNode(mirBuilder); + BaseNode *nodeOpnd1 = opnd1->GenMIRNode(mirBuilder); + MIRType *mirTypeSrc = GlobalTables::GetTypeTable().GetTypeFromTyIdx( + TyIdx(static_cast(nodeOpnd0->GetPrimType()))); + MIRType *mirTypeDst = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(static_cast(type->GetPrimType()))); + BaseNode *expr = mirBuilder.CreateExprCompare(op, *mirTypeDst, *mirTypeSrc, nodeOpnd0, nodeOpnd1); + return expr; +} + +BaseNode *FEIRExprBinary::GenMIRNodeCompareU1(MIRBuilder &mirBuilder) const { + BaseNode *nodeOpnd0 = opnd0->GenMIRNode(mirBuilder); + BaseNode *nodeOpnd1 = opnd1->GenMIRNode(mirBuilder); + PrimType srcType0 = nodeOpnd0->GetPrimType(); + PrimType srcType1 = nodeOpnd1->GetPrimType(); + // We take the bigger one as srcType + PrimType srcType = GetPrimTypeActualBitSize(srcType0) > GetPrimTypeActualBitSize(srcType1) ? srcType0 : srcType1; + MIRType *mirTypeSrc = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(static_cast(srcType))); + // If the src type is a vector type, there is no need to use u1 to replace + MIRType *mirTypeDst = type->GenerateMIRTypeAuto(); + if (!PrimitiveType(mirTypeDst->GetPrimType()).IsVector()) { + mirTypeDst = GlobalTables::GetTypeTable().GetUInt1(); + } + BaseNode *expr = mirBuilder.CreateExprCompare(op, *mirTypeDst, *mirTypeSrc, nodeOpnd0, nodeOpnd1); + return expr; +} + +void FEIRExprBinary::SetExprTypeByOp() { + switch (op) { + // Normal + case OP_add: + case OP_div: + case OP_max: + case OP_min: + case OP_mul: + case OP_rem: + case OP_sub: + SetExprTypeByOpNormal(); + break; + // Shift + case OP_ashr: + case OP_lshr: + case OP_shl: + case OP_ror: + SetExprTypeByOpShift(); + break; + // Logic + case OP_band: + case OP_bior: + case OP_bxor: + case OP_cand: + case OP_cior: + case OP_land: + case OP_lior: + SetExprTypeByOpLogic(); + break; + // Compare + case OP_cmp: + case OP_cmpl: + case OP_cmpg: + case OP_eq: + case OP_ge: + case OP_gt: + case OP_le: + case OP_lt: + case OP_ne: + SetExprTypeByOpCompare(); + break; + default: + break; + } +} + +void FEIRExprBinary::SetExprTypeByOpNormal() { + PrimType primTypeOpnd0 = opnd0->GetPrimType(); + PrimType primTypeOpnd1 = opnd1->GetPrimType(); + // primTypeOpnd0 == PTY_void for addrof add + if (primTypeOpnd0 == PTY_ptr || primTypeOpnd1 == PTY_ptr || primTypeOpnd0 == PTY_void) { + type->SetPrimType(PTY_ptr); + return; + } + type->SetPrimType(primTypeOpnd0); +} + +void FEIRExprBinary::SetExprTypeByOpShift() { + PrimType primTypeOpnd0 = opnd0->GetPrimType(); + PrimType primTypeOpnd1 = opnd1->GetPrimType(); + CHECK_FATAL(IsPrimitiveInteger(primTypeOpnd0), "logic's opnd0 must be integer"); + CHECK_FATAL(IsPrimitiveInteger(primTypeOpnd1), "logic's opnd1 must be integer"); + type->SetPrimType(primTypeOpnd0); +} + +void FEIRExprBinary::SetExprTypeByOpLogic() { + PrimType primTypeOpnd0 = opnd0->GetPrimType(); + CHECK_FATAL(IsPrimitiveInteger(primTypeOpnd0), "logic's opnds must be integer"); + type->SetPrimType(primTypeOpnd0); +} + +void FEIRExprBinary::SetExprTypeByOpCompare() { + type->SetPrimType(PTY_i32); +} + +// ---------- FEIRExprTernary ---------- +FEIRExprTernary::FEIRExprTernary(Opcode argOp, std::unique_ptr argOpnd0, std::unique_ptr argOpnd1, + std::unique_ptr argOpnd2) + : FEIRExpr(FEIRNodeKind::kExprTernary), + op(argOp) { + SetOpnd(std::move(argOpnd0), 0); + SetOpnd(std::move(argOpnd1), 1); + SetOpnd(std::move(argOpnd2), 2); + SetExprTypeByOp(); +} + +FEIRExprTernary::FEIRExprTernary(Opcode argOp, std::unique_ptr argType, std::unique_ptr argOpnd0, + std::unique_ptr argOpnd1, std::unique_ptr argOpnd2) + : FEIRExpr(FEIRNodeKind::kExprTernary, std::move(argType)), + op(argOp) { + SetOpnd(std::move(argOpnd0), 0); + SetOpnd(std::move(argOpnd1), 1); + SetOpnd(std::move(argOpnd2), 2); +} + +std::unique_ptr FEIRExprTernary::CloneImpl() const { + std::unique_ptr expr = std::make_unique(op, type->Clone(), opnd0->Clone(), opnd1->Clone(), + opnd2->Clone()); + return expr; +} + +BaseNode *FEIRExprTernary::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MIRType *mirTypeDst = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(static_cast(type->GetPrimType()))); + BaseNode *nodeOpnd0 = opnd0->GenMIRNode(mirBuilder); + BaseNode *nodeOpnd1 = opnd1->GenMIRNode(mirBuilder); + BaseNode *nodeOpnd2 = opnd2->GenMIRNode(mirBuilder); + BaseNode *expr = mirBuilder.CreateExprTernary(op, *mirTypeDst, nodeOpnd0, nodeOpnd1, nodeOpnd2); + return expr; +} + +void FEIRExprTernary::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + opnd0->RegisterDFGNodes2CheckPoint(checkPoint); + opnd1->RegisterDFGNodes2CheckPoint(checkPoint); + opnd2->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRExprTernary::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + bool success = true; + success = success && opnd0->CalculateDefs4AllUses(checkPoint, udChain); + success = success && opnd1->CalculateDefs4AllUses(checkPoint, udChain); + success = success && opnd2->CalculateDefs4AllUses(checkPoint, udChain); + return success; +} + +std::vector FEIRExprTernary::GetVarUsesImpl() const { + std::vector ans; + for (FEIRVar *var : opnd0->GetVarUses()) { + ans.push_back(var); + } + for (FEIRVar *var : opnd1->GetVarUses()) { + ans.push_back(var); + } + for (FEIRVar *var : opnd2->GetVarUses()) { + ans.push_back(var); + } + return ans; +} + +bool FEIRExprTernary::IsNestableImpl() const { + return true; +} + +bool FEIRExprTernary::IsAddrofImpl() const { + return false; +} + +void FEIRExprTernary::SetOpnd(std::unique_ptr argOpnd, uint32 idx) { + CHECK_FATAL(argOpnd != nullptr, "input is nullptr"); + switch (idx) { + case 0: + opnd0 = std::move(argOpnd); + break; + case 1: + opnd1 = std::move(argOpnd); + break; + case 2: + opnd2 = std::move(argOpnd); + break; + default: + CHECK_FATAL(false, "index out of range"); + } +} + +void FEIRExprTernary::SetExprTypeByOp() { + PrimType primTypeOpnd1 = opnd1->GetPrimType(); + type->SetPrimType(primTypeOpnd1); +} + +// ---------- FEIRExprNary ---------- +FEIRExprNary::FEIRExprNary(Opcode argOp) + : FEIRExpr(FEIRNodeKind::kExprNary), + op(argOp) {} + +std::vector FEIRExprNary::GetVarUsesImpl() const { + std::vector ans; + for (const std::unique_ptr &opnd : opnds) { + for (FEIRVar *var : opnd->GetVarUses()) { + ans.push_back(var); + } + } + return ans; +} + +void FEIRExprNary::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + for (const std::unique_ptr &opnd : opnds) { + opnd->RegisterDFGNodes2CheckPoint(checkPoint); + } +} + +bool FEIRExprNary::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + bool success = true; + for (const std::unique_ptr &opnd : opnds) { + success = success && opnd->CalculateDefs4AllUses(checkPoint, udChain); + } + return success; +} + +void FEIRExprNary::AddOpnd(std::unique_ptr argOpnd) { + CHECK_FATAL(argOpnd != nullptr, "input opnd is nullptr"); + opnds.push_back(std::move(argOpnd)); +} + +void FEIRExprNary::AddOpnds(const std::vector> &argOpnds) { + for (const std::unique_ptr &opnd : argOpnds) { + ASSERT_NOT_NULL(opnd); + AddOpnd(opnd->Clone()); + } +} + +void FEIRExprNary::ResetOpnd() { + opnds.clear(); +} + +// ---------- FEIRExprIntrinsicop ---------- +FEIRExprIntrinsicop::FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID) + : FEIRExprNary(OP_intrinsicop), + intrinsicID(argIntrinsicID) { + kind = FEIRNodeKind::kExprIntrinsicop; + SetType(std::move(exprType)); +} + +FEIRExprIntrinsicop::FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, + std::unique_ptr argParamType) + : FEIRExprNary(OP_intrinsicopwithtype), + intrinsicID(argIntrinsicID) { + kind = FEIRNodeKind::kExprIntrinsicop; + SetType(std::move(exprType)); + paramType = std::move(argParamType); +} + +FEIRExprIntrinsicop::FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, + const std::vector> &argOpnds) + : FEIRExprIntrinsicop(std::move(exprType), argIntrinsicID) { + AddOpnds(argOpnds); +} + +FEIRExprIntrinsicop::FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, + std::unique_ptr argParamType, + const std::vector> &argOpnds) + : FEIRExprIntrinsicop(std::move(exprType), argIntrinsicID, std::move(argParamType)) { + AddOpnds(argOpnds); +} + +FEIRExprIntrinsicop::FEIRExprIntrinsicop(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, + std::unique_ptr argParamType, uint32 argTypeID) + : FEIRExprNary(OP_intrinsicopwithtype), + intrinsicID(argIntrinsicID), + typeID(argTypeID) { + kind = FEIRNodeKind::kExprIntrinsicop; + SetType(std::move(exprType)); + paramType = std::move(argParamType); +} + +std::unique_ptr FEIRExprIntrinsicop::CloneImpl() const { + if (op == OP_intrinsicop) { + return std::make_unique(type->Clone(), intrinsicID, opnds); + } else { + CHECK_FATAL(paramType != nullptr, "error: param type is not set"); + return std::make_unique(type->Clone(), intrinsicID, paramType->Clone(), opnds); + } +} + +BaseNode *FEIRExprIntrinsicop::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const auto &e : opnds) { + BaseNode *node = e->GenMIRNode(mirBuilder); + args.emplace_back(node); + } + (void)typeID; + MIRType *ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType( + *(type->GenerateMIRType()), paramType->IsRef() ? PTY_ref : PTY_ptr); + BaseNode *expr = mirBuilder.CreateExprIntrinsicop(intrinsicID, op, *ptrType, std::move(args)); + return expr; +} + +void FEIRExprIntrinsicop::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + for (const std::unique_ptr &opnd : opnds) { + opnd->RegisterDFGNodes2CheckPoint(checkPoint); + } +} + +bool FEIRExprIntrinsicop::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + bool success = true; + for (const std::unique_ptr &opnd : opnds) { + success = success && opnd->CalculateDefs4AllUses(checkPoint, udChain); + } + return success; +} + +bool FEIRExprIntrinsicop::IsNestableImpl() const { + return false; +} + +bool FEIRExprIntrinsicop::IsAddrofImpl() const { + return false; +} + +// ---------- FEIRExprIntrinsicopForC ---------- +FEIRExprIntrinsicopForC::FEIRExprIntrinsicopForC(std::unique_ptr exprType, MIRIntrinsicID argIntrinsicID, + const std::vector> &argOpnds) + : FEIRExprNary(OP_intrinsicop), + intrinsicID(argIntrinsicID) { + kind = FEIRNodeKind::kExprIntrinsicop; + SetType(std::move(exprType)); + AddOpnds(argOpnds); +} + +std::unique_ptr FEIRExprIntrinsicopForC::CloneImpl() const { + return std::make_unique(type->Clone(), intrinsicID, opnds); +} + +BaseNode *FEIRExprIntrinsicopForC::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const auto &e : opnds) { + BaseNode *node = e->GenMIRNode(mirBuilder); + args.emplace_back(node); + } + return mirBuilder.CreateExprIntrinsicop(intrinsicID, op, + *type->GenerateMIRTypeAuto(), MapleVector(args)); +} + +// ---------- FEIRExprJavaMerge ---------------- +FEIRExprJavaMerge::FEIRExprJavaMerge(std::unique_ptr mergedTypeArg, + const std::vector> &argOpnds) + : FEIRExprNary(OP_intrinsicop) { + SetType(std::move(mergedTypeArg)); + AddOpnds(argOpnds); +} + +std::unique_ptr FEIRExprJavaMerge::CloneImpl() const { + return std::make_unique(type->Clone(), opnds); +} + +void FEIRExprJavaMerge::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + for (const std::unique_ptr &opnd : opnds) { + opnd->RegisterDFGNodes2CheckPoint(checkPoint); + } +} + +bool FEIRExprJavaMerge::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + bool success = true; + for (const std::unique_ptr &opnd : opnds) { + success = success && opnd->CalculateDefs4AllUses(checkPoint, udChain); + } + return success; +} + +BaseNode *FEIRExprJavaMerge::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + args.reserve(opnds.size()); + for (const auto &e : opnds) { + BaseNode *node = e->GenMIRNode(mirBuilder); + args.emplace_back(node); + } + // (intrinsicop u1 JAVA_MERGE (dread i32 %Reg0_I)) + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[INTRN_JAVA_MERGE]; + MIRType *retType = intrinDesc->GetReturnType(); + BaseNode *intr = mirBuilder.CreateExprIntrinsicop(INTRN_JAVA_MERGE, op, *retType, std::move(args)); + intr->SetPrimType(type->GetPrimType()); + return intr; +} + +// ---------- FEIRExprJavaNewInstance ---------- +FEIRExprJavaNewInstance::FEIRExprJavaNewInstance(UniqueFEIRType argType) + : FEIRExpr(FEIRNodeKind::kExprJavaNewInstance) { + SetType(std::move(argType)); +} + +FEIRExprJavaNewInstance::FEIRExprJavaNewInstance(UniqueFEIRType argType, uint32 argTypeID) + : FEIRExpr(FEIRNodeKind::kExprJavaNewInstance), typeID(argTypeID) { + SetType(std::move(argType)); +} + +FEIRExprJavaNewInstance::FEIRExprJavaNewInstance(UniqueFEIRType argType, uint32 argTypeID, bool argIsRcPermanent) + : FEIRExpr(FEIRNodeKind::kExprJavaNewInstance), typeID(argTypeID), isRcPermanent(argIsRcPermanent) { + SetType(std::move(argType)); +} + +std::unique_ptr FEIRExprJavaNewInstance::CloneImpl() const { + std::unique_ptr expr = std::make_unique(type->Clone()); + CHECK_NULL_FATAL(expr); + return expr; +} + +BaseNode *FEIRExprJavaNewInstance::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MIRType *mirType = type->GenerateMIRType(kSrcLangJava, false); + BaseNode *expr = nullptr; + MIRType *ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType, PTY_ref); + Opcode opMalloc = isRcPermanent ? OP_gcpermalloc : OP_gcmalloc; + expr = mirBuilder.CreateExprGCMalloc(opMalloc, *ptrType, *mirType); + CHECK_NULL_FATAL(expr); + return expr; +} + +// ---------- FEIRExprJavaNewArray ---------- +FEIRExprJavaNewArray::FEIRExprJavaNewArray(UniqueFEIRType argArrayType, UniqueFEIRExpr argExprSize) + : FEIRExpr(FEIRNodeKind::kExprJavaNewArray) { + SetArrayType(std::move(argArrayType)); + SetExprSize(std::move(argExprSize)); +} + +FEIRExprJavaNewArray::FEIRExprJavaNewArray(UniqueFEIRType argArrayType, UniqueFEIRExpr argExprSize, uint32 argTypeID) + : FEIRExpr(FEIRNodeKind::kExprJavaNewArray), typeID(argTypeID) { + SetArrayType(std::move(argArrayType)); + SetExprSize(std::move(argExprSize)); +} + +FEIRExprJavaNewArray::FEIRExprJavaNewArray(UniqueFEIRType argArrayType, UniqueFEIRExpr argExprSize, uint32 argTypeID, + bool argIsRcPermanent) + : FEIRExpr(FEIRNodeKind::kExprJavaNewArray), typeID(argTypeID), isRcPermanent(argIsRcPermanent) { + SetArrayType(std::move(argArrayType)); + SetExprSize(std::move(argExprSize)); +} + +std::unique_ptr FEIRExprJavaNewArray::CloneImpl() const { + std::unique_ptr expr = std::make_unique(arrayType->Clone(), exprSize->Clone()); + CHECK_NULL_FATAL(expr); + return expr; +} + +std::vector FEIRExprJavaNewArray::GetVarUsesImpl() const { + return exprSize->GetVarUses(); +} + +BaseNode *FEIRExprJavaNewArray::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + UniqueFEIRType elemType = FEIRBuilder::CreateArrayElemType(arrayType); + MIRType *elemMirType = elemType->GenerateMIRType(kSrcLangJava, false); + if (!elemMirType->IsScalarType()) { + elemMirType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*elemMirType, PTY_ptr); + } + MIRType *jarrayType = GlobalTables::GetTypeTable().GetOrCreateJarrayType(*elemMirType); + (void)typeID; + MIRType *mirType = arrayType->GenerateMIRType(kSrcLangJava, false); + MIRType *ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirType, PTY_ref); + BaseNode *sizeNode = exprSize->GenMIRNode(mirBuilder); + Opcode opMalloc = isRcPermanent ? OP_gcpermallocjarray : OP_gcmallocjarray; + BaseNode *expr = mirBuilder.CreateExprJarrayMalloc(opMalloc, *ptrType, *jarrayType, sizeNode); + CHECK_NULL_FATAL(expr); + return expr; +} + +void FEIRExprJavaNewArray::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + exprSize->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRExprJavaNewArray::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return exprSize->CalculateDefs4AllUses(checkPoint, udChain); +} + +// ---------- FEIRExprJavaArrayLength ---------- +FEIRExprJavaArrayLength::FEIRExprJavaArrayLength(UniqueFEIRExpr argExprArray) + : FEIRExpr(FEIRNodeKind::kExprJavaArrayLength) { + SetExprArray(std::move(argExprArray)); +} + +std::unique_ptr FEIRExprJavaArrayLength::CloneImpl() const { + UniqueFEIRExpr expr = std::make_unique(exprArray->Clone()); + CHECK_NULL_FATAL(expr); + return expr; +} + +std::vector FEIRExprJavaArrayLength::GetVarUsesImpl() const { + return exprArray->GetVarUses(); +} + +BaseNode *FEIRExprJavaArrayLength::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + BaseNode *arrayNode = exprArray->GenMIRNode(mirBuilder); + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + args.push_back(arrayNode); + MIRType *retType = GlobalTables::GetTypeTable().GetInt32(); + return mirBuilder.CreateExprIntrinsicop(INTRN_JAVA_ARRAY_LENGTH, OP_intrinsicop, *retType, std::move(args)); +} + +void FEIRExprJavaArrayLength::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + exprArray->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRExprJavaArrayLength::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + return exprArray->CalculateDefs4AllUses(checkPoint, udChain); +} + +// ---------- FEIRExprArrayLoad ---------- +FEIRExprArrayLoad::FEIRExprArrayLoad(UniqueFEIRExpr argExprArray, UniqueFEIRExpr argExprIndex, + UniqueFEIRType argTypeArray) + : FEIRExpr(FEIRNodeKind::kExprArrayLoad), + exprArray(std::move(argExprArray)), + exprIndex(std::move(argExprIndex)), + typeArray(std::move(argTypeArray)) {} + +std::unique_ptr FEIRExprArrayLoad::CloneImpl() const { + std::unique_ptr expr = std::make_unique(exprArray->Clone(), exprIndex->Clone(), + typeArray->Clone()); + return expr; +} + +BaseNode *FEIRExprArrayLoad::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + CHECK_FATAL(exprArray->GetKind() == kExprDRead, "only support dread expr for exprArray"); + CHECK_FATAL(exprIndex->GetKind() == kExprDRead, "only support dread expr for exprIndex"); + BaseNode *addrBase = exprArray->GenMIRNode(mirBuilder); + BaseNode *indexBn = exprIndex->GenMIRNode(mirBuilder); + MIRType *ptrMIRArrayType = typeArray->GenerateMIRType(false); + + BaseNode *arrayExpr = mirBuilder.CreateExprArray(*ptrMIRArrayType, addrBase, indexBn); + UniqueFEIRType typeElem = typeArray->Clone(); + (void)typeElem->ArrayDecrDim(); + + MIRType *mirElemType = typeElem->GenerateMIRType(true); + MIRType *ptrMIRElemType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*mirElemType, PTY_ptr); + BaseNode *elemBn = mirBuilder.CreateExprIread(*mirElemType, *ptrMIRElemType, 0, arrayExpr); + return elemBn; +} + +std::vector FEIRExprArrayLoad::GetVarUsesImpl() const { + std::vector ans; + for (FEIRVar *var : exprArray->GetVarUses()) { + ans.push_back(var); + } + for (FEIRVar *var : exprIndex->GetVarUses()) { + ans.push_back(var); + } + return ans; +} + +void FEIRExprArrayLoad::RegisterDFGNodes2CheckPointImpl(FEIRStmtCheckPoint &checkPoint) { + exprArray->RegisterDFGNodes2CheckPoint(checkPoint); + exprIndex->RegisterDFGNodes2CheckPoint(checkPoint); +} + +bool FEIRExprArrayLoad::CalculateDefs4AllUsesImpl(FEIRStmtCheckPoint &checkPoint, FEIRUseDefChain &udChain) { + bool success = true; + success = success && exprArray->CalculateDefs4AllUses(checkPoint, udChain); + success = success && exprIndex->CalculateDefs4AllUses(checkPoint, udChain); + return success; +} + +// ---------- FEIRExprCStyleCast ---------- +FEIRExprCStyleCast::FEIRExprCStyleCast(MIRType *src, + MIRType *dest, + UniqueFEIRExpr sub, + bool isArr2Pty) + : FEIRExpr(FEIRNodeKind::kExprCStyleCast), + srcType(src), + destType(dest), + subExpr(std::move(sub)), + isArray2Pointer(isArr2Pty) { + CHECK_NULL_FATAL(dest); + type = FEIRTypeHelper::CreateTypeNative(*dest); +} + +std::unique_ptr FEIRExprCStyleCast::CloneImpl() const { + auto expr = std::make_unique(srcType, destType, + subExpr->Clone(), isArray2Pointer); + expr->SetRefName(refName); + return expr; +} + +std::vector FEIRExprCStyleCast::GetVarUsesImpl() const { + return subExpr->GetVarUses(); +} + +BaseNode *FEIRExprCStyleCast::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + BaseNode *sub = subExpr.get()->GenMIRNode(mirBuilder); + BaseNode *cvt = nullptr; + if (isArray2Pointer) { + auto *arrayType = static_cast(srcType); + ASSERT(arrayType != nullptr, "ERROR:null pointer!"); + ArrayNode *arrayNode = mirBuilder.CreateExprArray(*arrayType); + MIRSymbol *var = subExpr->GetVarUses().front()->GenerateMIRSymbol(mirBuilder); + arrayNode->GetNopnd().push_back(mirBuilder.CreateExprAddrof(0, *var)); + for (uint8 i = 0; i < arrayType->GetDim(); ++i) { + arrayNode->GetNopnd().push_back(mirBuilder.CreateIntConst(0, PTY_i32)); + } + arrayNode->SetNumOpnds(static_cast(arrayType->GetDim() + 1)); + return arrayNode; + } + auto isCvtNeeded = [](const MIRType &fromNode, const MIRType &toNode) { + if (fromNode.EqualTo(toNode)) { + return false; + } + return true; + }; + if (!isCvtNeeded(*srcType, *destType)) { + return sub; + } + if (sub != nullptr && srcType != nullptr && destType != nullptr) { + PrimType fromType = srcType->GetPrimType(); + PrimType toType = destType->GetPrimType(); + if (fromType == toType || toType == PTY_void || destType->GetKind() == kTypeUnion) { + return sub; + } + if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) { + if (toType == PTY_u1) { + MIRType *mirTypeU1 = GlobalTables::GetTypeTable().GetUInt1(); + BaseNode *zeroNode = (fromType == PTY_f32) ? mirBuilder.CreateFloatConst(0) : mirBuilder.CreateDoubleConst(0); + return mirBuilder.CreateExprCompare(OP_ne, *mirTypeU1, *srcType, sub, zeroNode); + } + cvt = mirBuilder.CreateExprTypeCvt(OP_trunc, *destType, *srcType, sub); + } else { + cvt = mirBuilder.CreateExprTypeCvt(OP_cvt, *destType, *srcType, sub); + } + } + return cvt; +} + +// ---------- FEIRExprAtomic ---------- +FEIRExprAtomic::FEIRExprAtomic(MIRType *ty, MIRType *ref, UniqueFEIRExpr obj, ASTAtomicOp atomOp) + : FEIRExpr(FEIRNodeKind::kExprAtomic), + mirType(ty), + refType(ref), + ptrType(GlobalTables::GetTypeTable().GetOrCreatePointerType(*refType)), + objExpr(std::move(obj)), + atomicOp(atomOp) {} + +std::unique_ptr FEIRExprAtomic::CloneImpl() const { + std::unique_ptr expr = std::make_unique(mirType, refType, objExpr->Clone(), atomicOp); + static_cast(expr.get())->SetVal1Type(val1Type); + if (valExpr1.get() != nullptr) { + static_cast(expr.get())->SetVal1Expr(valExpr1->Clone()); + } + static_cast(expr.get())->SetVal1Type(val2Type); + if (valExpr2.get() != nullptr) { + static_cast(expr.get())->SetVal1Expr(valExpr2->Clone()); + } + return expr; +} + +BaseNode *FEIRExprAtomic::GenMIRNodeImpl(MIRBuilder &mirBuilder) const { + MIRSymbol *retVar = nullptr; + if (atomicOp != kAtomicOpStore) { + retVar = val->GenerateMIRSymbol(mirBuilder); + } + MapleVector args(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + BaseNode *objNode = objExpr.get()->GenMIRNode(mirBuilder); + args.emplace_back(objNode); + bool retVoid = false; + if (atomicOp != kAtomicOpLoadN) { + args.emplace_back(valExpr1->GenMIRNode(mirBuilder)); + if (atomicOp == kAtomicOpExchange) { + args.emplace_back(valExpr2->GenMIRNode(mirBuilder)); + } + retVoid = (atomicOp == kAtomicOpExchangeN || + (atomicOp >= kAtomicOpAddFetch && atomicOp <= kAtomicOpFetchNand)) ? false : true; + } + static std::unordered_map intrinsicIDMap = { + {kAtomicOpLoadN, INTRN_C___atomic_load_n}, + {kAtomicOpLoad, INTRN_C___atomic_load}, + {kAtomicOpStoreN, INTRN_C___atomic_store_n}, + {kAtomicOpStore, INTRN_C___atomic_store}, + {kAtomicOpExchangeN, INTRN_C___atomic_exchange_n}, + {kAtomicOpExchange, INTRN_C___atomic_exchange}, + {kAtomicOpAddFetch, INTRN_C___atomic_add_fetch}, + {kAtomicOpSubFetch, INTRN_C___atomic_sub_fetch}, + {kAtomicOpAndFetch, INTRN_C___atomic_and_fetch}, + {kAtomicOpXorFetch, INTRN_C___atomic_xor_fetch}, + {kAtomicOpOrFetch, INTRN_C___atomic_or_fetch}, + {kAtomicOpFetchAdd, INTRN_C___atomic_fetch_add}, + {kAtomicOpFetchSub, INTRN_C___atomic_fetch_sub}, + {kAtomicOpFetchAnd, INTRN_C___atomic_fetch_and}, + {kAtomicOpFetchXor, INTRN_C___atomic_fetch_xor}, + {kAtomicOpFetchOr, INTRN_C___atomic_fetch_or}, + }; + ASSERT(intrinsicIDMap.find(atomicOp) != intrinsicIDMap.end(), "atomic opcode not yet supported!"); + MIRIntrinsicID intrinsicID = intrinsicIDMap[atomicOp]; + args.emplace_back(orderExpr->GenMIRNode(mirBuilder)); + TyIdx typeIndex(0); + if (atomicOp == kAtomicOpStoreN || atomicOp == kAtomicOpExchangeN) { + typeIndex = valExpr1->GetType()->GenerateMIRType()->GetTypeIndex(); + } + if (atomicOp == kAtomicOpExchange || atomicOp == kAtomicOpLoad || atomicOp == kAtomicOpStore) { + UniqueFEIRVar var = static_cast(valExpr1.get())->GetVar()->Clone(); + typeIndex = var->GenerateMIRSymbol(mirBuilder)->GetTyIdx(); + } + return (!retVoid) ? mirBuilder.CreateStmtIntrinsicCallAssigned(intrinsicID, std::move(args), retVar, typeIndex) + : mirBuilder.CreateStmtIntrinsicCall(intrinsicID, std::move(args), typeIndex); +} + +// ---------- FEIRStmtPesudoLabel ---------- +FEIRStmtPesudoLabel::FEIRStmtPesudoLabel(uint32 argLabelIdx) + : FEIRStmt(kStmtPesudoLabel), + labelIdx(argLabelIdx), + mirLabelIdx(0) {} + +std::list FEIRStmtPesudoLabel::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + StmtNode *stmtLabel = mirBuilder.CreateStmtLabel(mirLabelIdx); + ans.push_back(stmtLabel); + return ans; +} + +void FEIRStmtPesudoLabel::GenerateLabelIdx(const MIRBuilder &mirBuilder) { + std::stringstream ss; + ss << "label" << HIR2MPLEnv::GetInstance().GetGlobalLabelIdx(); + HIR2MPLEnv::GetInstance().IncrGlobalLabelIdx(); + mirLabelIdx = mirBuilder.GetOrCreateMIRLabel(ss.str()); +} + +std::string FEIRStmtPesudoLabel::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtPesudoLabel2 ---------- +LabelIdx FEIRStmtPesudoLabel2::GenMirLabelIdx(const MIRBuilder &mirBuilder, uint32 qIdx0, uint32 qIdx1) { + std::string label = "L" + std::to_string(qIdx0) + "_" + std::to_string(qIdx1); + return mirBuilder.GetOrCreateMIRLabel(label); +} + +std::pair FEIRStmtPesudoLabel2::GetLabelIdx() const { + return std::make_pair(labelIdxOuter, labelIdxInner); +} + +std::list FEIRStmtPesudoLabel2::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + StmtNode *stmtLabel = mirBuilder.CreateStmtLabel(GenMirLabelIdx(mirBuilder, labelIdxOuter, labelIdxInner)); + ans.push_back(stmtLabel); + return ans; +} + +// ---------- FEIRStmtPesudoLOC ---------- +FEIRStmtPesudoLOC::FEIRStmtPesudoLOC(uint32 argSrcFileIdx, uint32 argLineNumber) + : FEIRStmt(kStmtPesudoLOC) { + isAuxPre = true; + Loc loc = {argSrcFileIdx, argLineNumber, 0}; + SetSrcLoc(loc); +} + +std::list FEIRStmtPesudoLOC::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + return std::list(); +} + +std::string FEIRStmtPesudoLOC::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtPesudoJavaTry ---------- +FEIRStmtPesudoJavaTry::FEIRStmtPesudoJavaTry() + : FEIRStmt(kStmtPesudoJavaTry) {} + +std::list FEIRStmtPesudoJavaTry::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + MapleVector vec(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (FEIRStmtPesudoLabel *stmtLabel : catchTargets) { + vec.push_back(stmtLabel->GetMIRLabelIdx()); + } + StmtNode *stmtTry = mirBuilder.CreateStmtTry(vec); + ans.push_back(stmtTry); + return ans; +} + +std::string FEIRStmtPesudoJavaTry::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtPesudoJavaTry2 ---------- +FEIRStmtPesudoJavaTry2::FEIRStmtPesudoJavaTry2(uint32 outerIdxIn) + : FEIRStmt(kStmtPesudoJavaTry), outerIdx(outerIdxIn) {} + +std::list FEIRStmtPesudoJavaTry2::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + MapleVector vec(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (uint32 target : catchLabelIdxVec) { + vec.push_back(FEIRStmtPesudoLabel2::GenMirLabelIdx(mirBuilder, outerIdx, target)); + } + StmtNode *stmtTry = mirBuilder.CreateStmtTry(vec); + ans.push_back(stmtTry); + return ans; +} + +std::string FEIRStmtPesudoJavaTry2::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtPesudoEndTry ---------- +FEIRStmtPesudoEndTry::FEIRStmtPesudoEndTry() + : FEIRStmt(kStmtPesudoEndTry) { + isAuxPost = true; +} + +std::list FEIRStmtPesudoEndTry::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + MemPool *mp = mirBuilder.GetCurrentFuncCodeMp(); + ASSERT(mp != nullptr, "mempool is nullptr"); + StmtNode *stmt = mp->New(OP_endtry); + ans.push_back(stmt); + return ans; +} + +std::string FEIRStmtPesudoEndTry::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtPesudoCatch ---------- +FEIRStmtPesudoCatch::FEIRStmtPesudoCatch(uint32 argLabelIdx) + : FEIRStmtPesudoLabel(argLabelIdx) {} + +std::list FEIRStmtPesudoCatch::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + StmtNode *stmtLabel = mirBuilder.CreateStmtLabel(mirLabelIdx); + ans.push_back(stmtLabel); + MapleVector vec(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const UniqueFEIRType &type : catchTypes) { + MIRType *mirType = type->GenerateMIRType(kSrcLangJava, true); + vec.push_back(mirType->GetTypeIndex()); + } + StmtNode *stmtCatch = mirBuilder.CreateStmtCatch(vec); + ans.push_back(stmtCatch); + return ans; +} + +void FEIRStmtPesudoCatch::AddCatchTypeNameIdx(GStrIdx typeNameIdx) { + UniqueFEIRType type = std::make_unique(PTY_ref, typeNameIdx); + catchTypes.push_back(std::move(type)); +} + +std::string FEIRStmtPesudoCatch::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtPesudoCatch2 ---------- +FEIRStmtPesudoCatch2::FEIRStmtPesudoCatch2(uint32 qIdx0, uint32 qIdx1) + : FEIRStmtPesudoLabel2(qIdx0, qIdx1) {} + +std::list FEIRStmtPesudoCatch2::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + StmtNode *stmtLabel = mirBuilder.CreateStmtLabel( + FEIRStmtPesudoLabel2::GenMirLabelIdx(mirBuilder, GetLabelIdx().first, GetLabelIdx().second)); + ans.push_back(stmtLabel); + MapleVector vec(mirBuilder.GetCurrentFuncCodeMpAllocator()->Adapter()); + for (const UniqueFEIRType &type : catchTypes) { + MIRType *mirType = type->GenerateMIRType(kSrcLangJava, true); + vec.push_back(mirType->GetTypeIndex()); + } + StmtNode *stmtCatch = mirBuilder.CreateStmtCatch(vec); + ans.push_back(stmtCatch); + return ans; +} + +void FEIRStmtPesudoCatch2::AddCatchTypeNameIdx(GStrIdx typeNameIdx) { + UniqueFEIRType type; + if (typeNameIdx == FEUtils::GetVoidIdx()) { + type = std::make_unique(PTY_ref, FEUtilJava::GetJavaThrowableNameMplIdx()); + } else { + type = std::make_unique(PTY_ref, typeNameIdx); + } + catchTypes.push_back(std::move(type)); +} + +std::string FEIRStmtPesudoCatch2::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +FEIRStmtPesudoSafe::FEIRStmtPesudoSafe(bool isEnd) + : FEIRStmt(kStmtPesudoSafe), end(isEnd) {} + +std::list FEIRStmtPesudoSafe::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + MemPool *mp = mirBuilder.GetCurrentFuncCodeMp(); + ASSERT(mp != nullptr, "mempool is nullptr"); + Opcode op = end ? OP_endsafe : OP_safe; + StmtNode *stmt = mp->New(op); + ans.push_back(stmt); + FEFunction &curFEFunc = FEManager::GetCurrentFEFunction(); + if (end) { + if (curFEFunc.GetSafeRegionFlag().empty() || !curFEFunc.GetSafeRegionFlag().top()) { + CHECK_FATAL(false, "pop safe region error"); + } + curFEFunc.GetSafeRegionFlag().pop(); + } else { + curFEFunc.GetSafeRegionFlag().push(true); + } + return ans; +} + +std::string FEIRStmtPesudoSafe::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +FEIRStmtPesudoUnsafe::FEIRStmtPesudoUnsafe(bool isEnd) + : FEIRStmt(kStmtPesudoUnsafe), end(isEnd) {} + +std::list FEIRStmtPesudoUnsafe::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + MemPool *mp = mirBuilder.GetCurrentFuncCodeMp(); + ASSERT(mp != nullptr, "mempool is nullptr"); + Opcode op = end ? OP_endunsafe : OP_unsafe; + StmtNode *stmt = mp->New(op); + ans.push_back(stmt); + FEFunction &curFEFunc = FEManager::GetCurrentFEFunction(); + if (end) { + if (curFEFunc.GetSafeRegionFlag().empty() || curFEFunc.GetSafeRegionFlag().top()) { + CHECK_FATAL(false, "pop unsafe region error"); + } + curFEFunc.GetSafeRegionFlag().pop(); + } else { + curFEFunc.GetSafeRegionFlag().push(false); + } + return ans; +} + +std::string FEIRStmtPesudoUnsafe::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtPesudoComment ---------- +FEIRStmtPesudoComment::FEIRStmtPesudoComment(FEIRNodeKind argKind) + : FEIRStmt(argKind) { + isAuxPre = true; +} + +FEIRStmtPesudoComment::FEIRStmtPesudoComment(const std::string &argContent) + : FEIRStmt(kStmtPesudoComment), + content(argContent) { + isAuxPre = true; +} + +std::list FEIRStmtPesudoComment::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + StmtNode *stmt = mirBuilder.CreateStmtComment(content); + ans.push_back(stmt); + return ans; +} + +std::string FEIRStmtPesudoComment::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtPesudoCommentForInst ---------- +FEIRStmtPesudoCommentForInst::FEIRStmtPesudoCommentForInst() + : FEIRStmtPesudoComment(kStmtPesudoCommentForInst) { + isAuxPre = true; +} + +std::list FEIRStmtPesudoCommentForInst::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + return ans; +} + +std::string FEIRStmtPesudoCommentForInst::DumpDotStringImpl() const { + std::stringstream ss; + ss << " " << id << ": " << GetFEIRNodeKindDescription(kind); + return ss.str(); +} + +// ---------- FEIRStmtIAssign ---------- +std::list FEIRStmtIAssign::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list ans; + MIRType *mirType = addrType->GenerateMIRTypeAuto(); + CHECK_FATAL(mirType->IsMIRPtrType(), "Must be ptr type"); + BaseNode *addrNode = addrExpr->GenMIRNode(mirBuilder); + BaseNode *baseNode = baseExpr->GenMIRNode(mirBuilder); + if (fieldID != 0) { + MIRType * baseType = static_cast(mirType)->GetPointedType(); + CHECK_FATAL((baseType->GetKind() == MIRTypeKind::kTypeStruct || baseType->GetKind() == MIRTypeKind::kTypeUnion), + "If fieldID is not 0, then the computed address must correspond to a structure"); + InsertNonnullChecking(mirBuilder, *baseType, ans); + CheckNonnullArgsAndRetForFuncPtr(mirBuilder, *baseType); + CheckBoundaryArgsAndRetForFuncPtr(mirBuilder, *baseType); + } + AssignBoundaryVarAndChecking(mirBuilder, ans); + IassignNode *iAssignNode = mirBuilder.CreateStmtIassign(*mirType, fieldID, addrNode, baseNode); + ans.emplace_back(iAssignNode); + ENCChecker::CheckBoundaryLenFinalAssign(mirBuilder, addrType, fieldID, loc); + ENCChecker::CheckBoundaryLenFinalAddr(mirBuilder, addrExpr, loc); + return ans; +} + +void FEIRStmtIAssign::InsertNonnullChecking(MIRBuilder &mirBuilder, const MIRType &baseType, + std::list &ans) const { + if (!FEOptions::GetInstance().IsNpeCheckDynamic()) { + return; + } + FieldID tmpID = fieldID; + FieldPair fieldPair = static_cast(baseType).TraverseToFieldRef(tmpID); + MIRType *srcType = baseExpr->GetType()->GenerateMIRTypeAuto(); + MIRType *dstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + ENCChecker::CheckNonnullFieldInStruct(*srcType, *dstType, loc); + if (fieldPair.second.second.GetAttr(FLDATTR_nonnull)) { + if (ENCChecker::HasNullExpr(baseExpr)) { + FE_ERR(kLncErr, loc, "null assignment of nonnull pointer"); + return; + } + UniqueFEIRStmt stmt = std::make_unique(OP_assignassertnonnull, baseExpr->Clone()); + std::list stmts = stmt->GenMIRStmts(mirBuilder); + ans.splice(ans.end(), stmts); + } +} + +// ---------- FEIRStmtDoWhile ---------- +bool FEIRStmtDoWhile::IsFallThroughImpl() const { + WARN(kLncWarn, "%s:%d stmt[%s] need to be lowed when building bb", + FEManager::GetModule().GetFileNameFromFileNum(loc.fileIdx).c_str(), loc.line, + GetFEIRNodeKindDescription(kind).c_str()); + return false; +} + +std::list FEIRStmtDoWhile::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + auto *whileStmtNode = mirBuilder.GetCurrentFuncCodeMp()->New(opcode); + BaseNode *mirCond = condExpr->GenMIRNode(mirBuilder); + whileStmtNode->SetOpnd(mirCond, 0); + auto *bodyBlock = mirBuilder.GetCurrentFuncCodeMp()->New(); + for (auto &stmt : bodyStmts) { + for (auto mirStmt : stmt->GenMIRStmts(mirBuilder)) { + bodyBlock->AddStatement(mirStmt); + } + } + whileStmtNode->SetBody(bodyBlock); + stmts.emplace_back(whileStmtNode); + return stmts; +} + +std::list FEIRStmtBreak::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + CHECK_FATAL(!breakLabelName.empty(), "labelName is null!"); + LabelIdx labelIdx = mirBuilder.GetOrCreateMIRLabel(breakLabelName); + GotoNode *gotoNode = mirBuilder.CreateStmtGoto(OP_goto, labelIdx); + stmts.emplace_back(gotoNode); + return stmts; +} + +std::list FEIRStmtContinue::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + CHECK_FATAL(!labelName.empty(), "labelName is null!"); + LabelIdx labelIdx = mirBuilder.GetOrCreateMIRLabel(labelName); + GotoNode *gotoNode = mirBuilder.CreateStmtGoto(OP_goto, labelIdx); + stmts.emplace_back(gotoNode); + return stmts; +} + +std::list FEIRStmtLabel::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + return std::list{mirBuilder.CreateStmtLabel(mirBuilder.GetOrCreateMIRLabel(labelName))}; +} + +FEIRStmtAtomic::FEIRStmtAtomic(UniqueFEIRExpr expr) + : FEIRStmt(FEIRNodeKind::kStmtAtomic), + atomicExpr(std::move(expr)) {} + +std::list FEIRStmtAtomic::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + FEIRExprAtomic *atom = static_cast(atomicExpr.get()); + auto stmt = static_cast(atom->GenMIRNode(mirBuilder)); + stmts.emplace_back(stmt); + return stmts; +} + +bool FEIRStmtGCCAsm::HandleConstraintPlusQm(MIRBuilder &mirBuilder, AsmNode *asmNode, uint32 index, + std::list &stmts, std::list &initStmts) const { + if (std::get<1>(outputs[index]) != "+Q" && std::get<1>(outputs[index]) != "+m") { + return false; + } + FieldID fieldID = outputsExprs[index]->GetFieldID(); + MIRSymbol *sym = outputsExprs[index]->GetVarUses().front()->GenerateMIRSymbol(mirBuilder); + UniqueFEIRVar asmOut = outputsExprs[index]->GetVarUses().front()->Clone(); + MIRSymbol *localSym = nullptr; + UniqueFEIRVar localAsmOut = nullptr; + BaseNode *node; + if (outputsExprs[index]->GetKind() == kExprDRead) { + if (asmOut->IsGlobal()) { + auto pair = HandleGlobalAsmOutOperand(asmOut, fieldID, stmts, mirBuilder); + localSym = pair.first; + localAsmOut = pair.second->Clone(); + // '+' means that asm out operand is both read and written, copy the initial value of global var into the + // local temp var and then add local temp var into the input list. + auto stmt = FEIRBuilder::CreateStmtDAssign(localAsmOut->Clone(), outputsExprs[index]->Clone()); + std::list nodes = stmt->GenMIRStmts(mirBuilder); + initStmts.splice(initStmts.end(), nodes); + } + node = static_cast(mirBuilder.CreateExprAddrof(fieldID, localSym != nullptr ? *localSym : *sym)); + } else if (outputsExprs[index]->GetKind() == kExprIRead) { + FEIRExprIRead *iread = static_cast(outputsExprs[index].get()); + if (iread->GetFieldID() == 0) { + node = iread->GetClonedOpnd()->GenMIRNode(mirBuilder); + } else { + auto addrOfExpr = std::make_unique(iread->GetClonedPtrType(), iread->GetFieldID(), + iread->GetClonedOpnd()); + node = addrOfExpr->GenMIRNode(mirBuilder); + } + auto pair = HandleAsmOutOperandWithPtrType(iread, stmts, mirBuilder); + localSym = pair.first; + localAsmOut = pair.second->Clone(); + fieldID = 0; + } else { + CHECK_FATAL(false, "FEIRStmtGCCAsm NYI."); + } + + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(std::get<1>(outputs[index])); + asmNode->PushOpnd(node); + asmNode->inputConstraints.emplace_back(strIdx); + + CallReturnPair retPair(localSym != nullptr ? localSym->GetStIdx() : sym->GetStIdx(), RegFieldPair(fieldID, 0)); + asmNode->asmOutputs.emplace_back(retPair); + asmNode->outputConstraints.emplace_back(strIdx); + return true; +} + +std::pair FEIRStmtGCCAsm::HandleAsmOutOperandWithPtrType(const FEIRExprIRead *ireadExpr, + std::list &stmts, + MIRBuilder &mirBuilder) const { + UniqueFEIRVar localAsmOut = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("asm_out_"), + ireadExpr->GetClonedRetType()); + MIRSymbol *localSym = localAsmOut->GenerateLocalMIRSymbol(mirBuilder); + UniqueFEIRExpr srcExpr = FEIRBuilder::CreateExprDRead(localAsmOut->Clone()); + auto stmt = FEIRBuilder::CreateStmtIAssign(ireadExpr->GetClonedPtrType(), ireadExpr->GetClonedOpnd(), + std::move(srcExpr), ireadExpr->GetFieldID()); + std::list node = stmt->GenMIRStmts(mirBuilder); + stmts.splice(stmts.end(), node); + return std::make_pair(localSym, localAsmOut->Clone()); +} + +std::pair FEIRStmtGCCAsm::HandleGlobalAsmOutOperand(const UniqueFEIRVar &asmOut, + const FieldID fieldID, + std::list &stmts, + MIRBuilder &mirBuilder) const { + MIRSymbol *localSym = nullptr; + UniqueFEIRExpr srcExpr; + UniqueFEIRStmt stmt; + UniqueFEIRVar localAsmOut = FEIRBuilder::CreateVarNameForC(FEUtils::GetSequentialName("asm_out_"), + asmOut->GetType()->Clone(), false); + localSym = localAsmOut->GenerateLocalMIRSymbol(mirBuilder); + if (fieldID) { + MIRStructType *structType = static_cast(asmOut->GetType()->GenerateMIRTypeAuto()); + FieldPair fieldPair = structType->TraverseToField(fieldID); + UniqueFEIRType fieldType = FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable() + .GetTypeFromTyIdx(fieldPair.second.first)); + srcExpr = FEIRBuilder::CreateExprDReadAggField(localAsmOut->Clone(), fieldID, fieldType->Clone()); + stmt = FEIRBuilder::CreateStmtDAssignAggField(asmOut->Clone(), std::move(srcExpr), fieldID); + } else { + srcExpr = FEIRBuilder::CreateExprDRead(localAsmOut->Clone()); + stmt = FEIRBuilder::CreateStmtDAssign(asmOut->Clone(), std::move(srcExpr)); + } + std::list node = stmt->GenMIRStmts(mirBuilder); + stmts.splice(stmts.end(), node); + return std::make_pair(localSym, localAsmOut->Clone()); +} + +std::list FEIRStmtGCCAsm::GenMIRStmtsImpl(MIRBuilder &mirBuilder) const { + std::list stmts; + std::list initStmts; + MemPool *mp = mirBuilder.GetCurrentFuncCodeMp(); + AsmNode *asmNode = mp->New(mirBuilder.GetCurrentFuncCodeMpAllocator()); + asmNode->asmString = asmStr; + for (uint32 i = 0; i < inputs.size(); ++i) { + BaseNode *node = inputsExprs[i]->GenMIRNode(mirBuilder); + asmNode->PushOpnd(node); + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(inputs[i].second); + asmNode->inputConstraints.emplace_back(strIdx); + } + for (uint32 i = 0; i < outputs.size(); ++i) { + if (HandleConstraintPlusQm(mirBuilder, asmNode, i, stmts, initStmts)) { + continue; + } + FieldID fieldID = 0; + MIRSymbol *sym = nullptr; + MIRSymbol *localSym = nullptr; + UniqueFEIRVar localAsmOut = nullptr; + UniqueFEIRVar asmOut; + if (outputsExprs[i]->GetKind() == kExprDRead) { + FEIRExprDRead *dread = static_cast(outputsExprs[i].get()); + fieldID = dread->GetFieldID(); + sym = dread->GetVarUses().front()->GenerateMIRSymbol(mirBuilder); + asmOut = dread->GetVarUses().front()->Clone(); + if (asmOut->IsGlobal()) { + auto pair = HandleGlobalAsmOutOperand(asmOut, fieldID, stmts, mirBuilder); + localSym = pair.first; + localAsmOut = pair.second->Clone(); + } + } else if (outputsExprs[i]->GetKind() == kExprIRead) { + FEIRExprIRead *iread = static_cast(outputsExprs[i].get()); + fieldID = iread->GetFieldID(); + auto pair = HandleAsmOutOperandWithPtrType(iread, stmts, mirBuilder); + localSym = pair.first; + localAsmOut = pair.second->Clone(); + fieldID = 0; + } else { + CHECK_FATAL(false, "FEIRStmtGCCAsm NYI."); + } + + CallReturnPair retPair(localSym != nullptr ? localSym->GetStIdx() : sym->GetStIdx(), RegFieldPair(fieldID, 0)); + asmNode->asmOutputs.emplace_back(retPair); + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(std::get<1>(outputs[i])); + asmNode->outputConstraints.emplace_back(strIdx); + + // If this is a read/write, copy the initial value into the temp before and added to the input list + if (std::get<2>(outputs[i])) { + auto stmt = FEIRBuilder::CreateStmtDAssign(localAsmOut != nullptr ? localAsmOut->Clone() : asmOut->Clone(), + outputsExprs[i]->Clone()); + std::list node = stmt->GenMIRStmts(mirBuilder); + initStmts.splice(initStmts.end(), node); + + AddrofNode *rNode = mirBuilder.CreateExprDread(localSym != nullptr ? *localSym : *sym); + asmNode->PushOpnd(static_cast(rNode)); + asmNode->inputConstraints.emplace_back(strIdx); + } + } + for (uint32 i = 0; i < clobbers.size(); ++i) { + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(clobbers[i]); + asmNode->clobberList.emplace_back(strIdx); + } + for (uint32 i = 0; i < labels.size(); ++i) { + LabelIdx label = mirBuilder.GetOrCreateMIRLabel(labels[i]); + asmNode->gotoLabels.emplace_back(label); + } + if (isVolatile) { + asmNode->SetQualifier(kASMvolatile); + } + if (isGoto) { + asmNode->SetQualifier(kASMgoto); + } + stmts.emplace_front(asmNode); + if (!initStmts.empty()) { + stmts.splice(stmts.begin(), initStmts); + } + return stmts; +} +} // namespace maple diff --git a/src/hir2mpl/common/src/feir_type.cpp b/src/hir2mpl/common/src/feir_type.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9052fa4f9de3fbfaf32df7652b7cdafd84a2d65c --- /dev/null +++ b/src/hir2mpl/common/src/feir_type.cpp @@ -0,0 +1,518 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_type.h" +#include +#include "global_tables.h" +#include "mpl_logging.h" +#include "fe_manager.h" +#include "fe_config_parallel.h" +#include "fe_utils.h" + +namespace maple { +// ---------- FEIRType ---------- +std::map> FEIRType::langConfig = FEIRType::InitLangConfig(); + +FEIRType::FEIRType(FEIRTypeKind argKind) + : kind(argKind), isZero(false), srcLang(kSrcLangJava) {} + +void FEIRType::CopyFromImpl(const FEIRType &type) { + kind = type.kind; +} + +bool FEIRType::IsEqualToImpl(const std::unique_ptr &argType) const { + CHECK_NULL_FATAL(argType.get()); + if (kind != argType.get()->kind) { + return false; + } + return IsEqualTo(*(argType.get())); +} + +bool FEIRType::IsEqualToImpl(const FEIRType &argType) const { + if (kind == argType.kind && isZero == argType.isZero) { + return true; + } else { + return false; + } +} + +std::unique_ptr FEIRType::NewType(FEIRTypeKind argKind) { + switch (argKind) { + case kFEIRTypeDefault: + return std::make_unique(); + default: + CHECK_FATAL(false, "unsupported FEIRType Kind"); + return std::make_unique(); + } +} + +std::map> FEIRType::InitLangConfig() { + std::map> ans; + ans[kSrcLangJava] = std::make_tuple(true, PTY_ref); + ans[kSrcLangC] = std::make_tuple(true, PTY_ref); + return ans; +} + +MIRType *FEIRType::GenerateMIRTypeAuto(MIRSrcLang argSrcLang) const { + return GenerateMIRTypeAutoImpl(argSrcLang); +} + +MIRType *FEIRType::GenerateMIRTypeAutoImpl(MIRSrcLang argSrcLang) const { + HIR2MPL_PARALLEL_FORBIDDEN(); + auto it = langConfig.find(argSrcLang); + if (it == langConfig.cend()) { + CHECK_FATAL(false, "unsupported language"); + return nullptr; + } + PrimType pTy = GetPrimType(); + return GenerateMIRType(std::get<0>(it->second), pTy == PTY_begin ? std::get<1>(it->second) : pTy); +} + +// ---------- FEIRTypeDefault ---------- +FEIRTypeDefault::FEIRTypeDefault() + : FEIRTypeDefault(PTY_void, GStrIdx(0), 0) {} + +FEIRTypeDefault::FEIRTypeDefault(PrimType argPrimType) + : FEIRTypeDefault(argPrimType, GStrIdx(0), 0) {} + +FEIRTypeDefault::FEIRTypeDefault(PrimType argPrimType, const GStrIdx &argTypeNameIdx) + : FEIRTypeDefault(argPrimType, argTypeNameIdx, 0) { + std::string typeName = GlobalTables::GetStrTable().GetStringFromStrIdx(argTypeNameIdx); + uint8 typeDim = FEUtils::GetDim(typeName); + if (typeDim != 0) { + dim = typeDim; + typeName = typeName.substr(dim); + typeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + primType = FEUtils::GetPrimType(typeNameIdx); + } +} + +FEIRTypeDefault::FEIRTypeDefault(PrimType argPrimType, const GStrIdx &argTypeNameIdx, TypeDim argDim) + : FEIRType(kFEIRTypeDefault), + primType(argPrimType), + typeNameIdx(argTypeNameIdx), + dim(argDim) {} + +void FEIRTypeDefault::CopyFromImpl(const FEIRType &type) { + CHECK_FATAL(type.GetKind() == kFEIRTypeDefault, "invalid FEIRType Kind"); + FEIRType::CopyFromImpl(type); + const FEIRTypeDefault &typeDefault = static_cast(type); + typeNameIdx = typeDefault.typeNameIdx; + dim = typeDefault.dim; +} + +std::unique_ptr FEIRTypeDefault::CloneImpl() const { + std::unique_ptr type = std::make_unique(primType, typeNameIdx, dim); + return type; +} + +MIRType *FEIRTypeDefault::GenerateMIRTypeImpl(bool usePtr, PrimType ptyPtr) const { + HIR2MPL_PARALLEL_FORBIDDEN(); + return GenerateMIRTypeInternal(typeNameIdx, usePtr, ptyPtr); +} + +TypeDim FEIRTypeDefault::ArrayIncrDimImpl(TypeDim delta) { + CHECK_FATAL(FEConstants::kDimMax - dim >= delta, "dim delta is too large"); + dim += delta; + return dim; +} + +TypeDim FEIRTypeDefault::ArrayDecrDimImpl(TypeDim delta) { + CHECK_FATAL(dim >= delta, "dim delta is too large"); + dim -= delta; + return dim; +} + +bool FEIRTypeDefault::IsEqualToImpl(const FEIRType &argType) const { + if (!FEIRType::IsEqualToImpl(argType)) { + return false; + } + const FEIRTypeDefault &argTypeDefault = static_cast(argType); + if (typeNameIdx == argTypeDefault.typeNameIdx && dim == argTypeDefault.dim && primType == argTypeDefault.primType) { + return true; + } else { + return false; + } +} + +bool FEIRTypeDefault::IsEqualToImpl(const std::unique_ptr &argType) const { + CHECK_NULL_FATAL(argType.get()); + return IsEqualToImpl(*(argType.get())); +} + +uint32 FEIRTypeDefault::HashImpl() const { + return static_cast( + std::hash{}(primType) + std::hash{}(typeNameIdx) + std::hash{}(dim)); +} + +bool FEIRTypeDefault::IsScalarImpl() const { + return (primType != PTY_ref && IsPrimitiveScalar(primType) && dim == 0); +} + +PrimType FEIRTypeDefault::GetPrimTypeImpl() const { + if (dim == 0) { + return primType; + } else { + return PTY_ref; + } +} + +void FEIRTypeDefault::SetPrimTypeImpl(PrimType pt) { + if (dim == 0) { + primType = pt; + } else { + if (pt == PTY_ref) { + primType = pt; + } else { + WARN(kLncWarn, "dim is set to zero"); + dim = 0; + } + } +} + +void FEIRTypeDefault::LoadFromJavaTypeName(const std::string &typeName, bool inMpl) { + uint32 dimLocal = 0; + std::string baseName = FETypeManager::GetBaseTypeName(typeName, dimLocal, inMpl); + CHECK_FATAL(dimLocal <= FEConstants::kDimMax, "invalid array type %s (dim is too big)", typeName.c_str()); + dim = static_cast(dimLocal); + if (baseName.length() == 1) { + typeNameIdx = GStrIdx(0); + switch (baseName[0]) { + case 'I': + primType = PTY_i32; + break; + case 'J': + primType = PTY_i64; + break; + case 'F': + primType = PTY_f32; + break; + case 'D': + primType = PTY_f64; + break; + case 'Z': + primType = PTY_u1; + break; + case 'B': + primType = PTY_i8; + break; + case 'S': + primType = PTY_i16; + break; + case 'C': + primType = PTY_u16; + break; + case 'V': + primType = PTY_void; + break; + default: + CHECK_FATAL(false, "unsupported java type %s", typeName.c_str()); + } + } else if (baseName[0] == 'L') { + primType = PTY_ref; + baseName = inMpl ? baseName : namemangler::EncodeName(baseName); + typeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(baseName); + } +} + +void FEIRTypeDefault::LoadFromASTTypeName(const std::string &typeName) { + const static std::map mapASTTypeNameToType = { + {"bool", PTY_u1}, + {"uint8", PTY_u8}, + {"uint16", PTY_u16}, + {"uint32", PTY_u32}, + {"uint64", PTY_u64}, + {"int8", PTY_i8}, + {"int16", PTY_i16}, + {"int32", PTY_i32}, + {"int64", PTY_i64}, + {"float", PTY_f32}, + {"double", PTY_f64}, + {"void", PTY_void} + }; + auto it = mapASTTypeNameToType.find(typeName); + if (it != mapASTTypeNameToType.end()) { + primType = it->second; + } else { + primType = PTY_ref; + typeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + } +} + +MIRType *FEIRTypeDefault::GenerateMIRTypeForPrim() const { + switch (primType) { + case PTY_i8: + return GlobalTables::GetTypeTable().GetInt8(); + case PTY_i16: + return GlobalTables::GetTypeTable().GetInt16(); + case PTY_i32: + return GlobalTables::GetTypeTable().GetInt32(); + case PTY_i64: + return GlobalTables::GetTypeTable().GetInt64(); + case PTY_f32: + return GlobalTables::GetTypeTable().GetFloat(); + case PTY_f64: + return GlobalTables::GetTypeTable().GetDouble(); + case PTY_u1: + return GlobalTables::GetTypeTable().GetUInt1(); + case PTY_u8: + return GlobalTables::GetTypeTable().GetUInt8(); + case PTY_u16: + return GlobalTables::GetTypeTable().GetUInt16(); + case PTY_u32: + return GlobalTables::GetTypeTable().GetUInt32(); + case PTY_u64: + return GlobalTables::GetTypeTable().GetUInt64(); + case PTY_void: + return GlobalTables::GetTypeTable().GetVoid(); + case PTY_a32: + return GlobalTables::GetTypeTable().GetAddr32(); + case PTY_ref: + return GlobalTables::GetTypeTable().GetRef(); + case PTY_ptr: + return GlobalTables::GetTypeTable().GetPtr(); + default: + CHECK_FATAL(false, "unsupported prim type"); + } + return GlobalTables::GetTypeTable().GetDynundef(); +} + +MIRType *FEIRTypeDefault::GenerateMIRTypeInternal(const GStrIdx &argTypeNameIdx, bool usePtr) const { + MIRType *baseType = nullptr; + MIRType *type = nullptr; + if (primType == PTY_ref) { + if (argTypeNameIdx.GetIdx() == 0) { + baseType = GlobalTables::GetTypeTable().GetRef(); + } else { + bool isCreate = false; + baseType = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType(GStrIdx(argTypeNameIdx), false, + FETypeFlag::kSrcUnknown, isCreate); + } + if (dim > 0) { + baseType = FEManager::GetTypeManager().GetOrCreatePointerType(*baseType); + } + type = FEManager::GetTypeManager().GetOrCreateArrayType(*baseType, dim); + } else { + baseType = GenerateMIRTypeForPrim(); + type = FEManager::GetTypeManager().GetOrCreateArrayType(*baseType, dim); + } + if (IsScalar() || !IsPreciseRefType()) { + return type; + } + return usePtr ? FEManager::GetTypeManager().GetOrCreatePointerType(*type) : type; +} + +MIRType *FEIRTypeDefault::GenerateMIRTypeInternal(const GStrIdx &argTypeNameIdx, bool usePtr, PrimType ptyPtr) const { + MIRType *baseType = nullptr; + MIRType *type = nullptr; + bool baseTypeUseNoPtr = (IsScalarPrimType(primType) || argTypeNameIdx == 0); + bool typeUseNoPtr = !IsRef() || (!IsArray() && !IsPrecise()); + if (baseTypeUseNoPtr) { + baseType = GenerateMIRTypeForPrim(); + type = FEManager::GetTypeManager().GetOrCreateArrayType(*baseType, dim, ptyPtr); + } else { + bool isCreate = false; + baseType = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType(argTypeNameIdx, false, + FETypeFlag::kSrcUnknown, isCreate); + if (dim > 0) { + baseType = FEManager::GetTypeManager().GetOrCreatePointerType(*baseType, ptyPtr); + } + type = FEManager::GetTypeManager().GetOrCreateArrayType(*baseType, dim, ptyPtr); + } + if (typeUseNoPtr) { + return type; + } + return usePtr ? FEManager::GetTypeManager().GetOrCreatePointerType(*type, ptyPtr) : type; +} + +std::string FEIRTypeDefault::GetTypeNameImpl() const { + if (dim == 0) { + return GlobalTables::GetStrTable().GetStringFromStrIdx(typeNameIdx); + } + std::string name; + switch (srcLang) { + case kSrcLangJava: { + for (TypeDim i = 0; i < dim; i++) { + (void)name.append("A"); + } + (void)name.append(GlobalTables::GetStrTable().GetStringFromStrIdx(typeNameIdx)); + return name; + } + default: + CHECK_FATAL(false, "undefined language"); + return ""; + } +} + +// ---------- FEIRTypeByName ---------- +FEIRTypeByName::FEIRTypeByName(PrimType argPrimType, const std::string &argTypeName, TypeDim argDim) + : FEIRTypeDefault(argPrimType, GStrIdx(0), argDim), + typeName(argTypeName) { + kind = kFEIRTypeByName; +} + +std::unique_ptr FEIRTypeByName::CloneImpl() const { + std::unique_ptr newType = std::make_unique(primType, typeName, dim); + return newType; +} + +MIRType *FEIRTypeByName::GenerateMIRTypeImpl(bool usePtr, PrimType ptyPtr) const { + HIR2MPL_PARALLEL_FORBIDDEN(); + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + return GenerateMIRTypeInternal(nameIdx, usePtr, ptyPtr); +} + +bool FEIRTypeByName::IsEqualToImpl(const FEIRType &argType) const { + if (!FEIRTypeDefault::IsEqualToImpl(argType)) { + return false; + } + const FEIRTypeByName &argTypeName = static_cast(argType); + if (typeName.compare(argTypeName.typeName) == 0) { + return true; + } else { + return false; + } +} + +uint32 FEIRTypeByName::HashImpl() const { + return static_cast(std::hash{}(typeName)); +} + +bool FEIRTypeByName::IsScalarImpl() const { + return false; +} + +// ---------- FEIRTypePointer ---------- +FEIRTypePointer::FEIRTypePointer(std::unique_ptr argBaseType, PrimType argPrimType) + : FEIRType(kFEIRTypePointer), + primType(argPrimType) { + CHECK_FATAL(argBaseType != nullptr, "input type is nullptr"); + baseType = std::move(argBaseType); +} + +std::unique_ptr FEIRTypePointer::CloneImpl() const { + std::unique_ptr newType = std::make_unique(baseType->Clone(), primType); + return newType; +} + +MIRType *FEIRTypePointer::GenerateMIRTypeImpl(bool usePtr, PrimType ptyPtr) const { + MIRType *mirBaseType = baseType->GenerateMIRType(usePtr, ptyPtr); + return FEManager::GetTypeManager().GetOrCreatePointerType(*mirBaseType, ptyPtr); +} + +bool FEIRTypePointer::IsEqualToImpl(const FEIRType &argType) const { + const FEIRTypePointer &argTypePointer = static_cast(argType); + return baseType->IsEqualTo(argTypePointer.baseType); +} + +uint32 FEIRTypePointer::HashImpl() const { + ASSERT(baseType != nullptr, "base type is nullptr"); + return baseType->Hash(); +} + +bool FEIRTypePointer::IsScalarImpl() const { + return false; +} + +TypeDim FEIRTypePointer::ArrayIncrDimImpl(TypeDim delta) { + ASSERT(baseType != nullptr, "base type is nullptr"); + return baseType->ArrayIncrDim(delta); +} + +TypeDim FEIRTypePointer::ArrayDecrDimImpl(TypeDim delta) { + ASSERT(baseType != nullptr, "base type is nullptr"); + return baseType->ArrayDecrDim(delta); +} + +PrimType FEIRTypePointer::GetPrimTypeImpl() const { + return primType; +} + +void FEIRTypePointer::SetPrimTypeImpl(PrimType pt) { + CHECK_FATAL(false, "PrimType %d should not run here", pt); +} + +// ---------- FEIRTypeNative ---------- +FEIRTypeNative::FEIRTypeNative(MIRType &argMIRType) + : FEIRType(kFEIRTypeNative), + mirType(argMIRType) { + kind = kFEIRTypeNative; + // Right now, FEIRTypeNative is only used for c-language. + srcLang = kSrcLangC; +} + +PrimType FEIRTypeNative::GetPrimTypeImpl() const { + return mirType.GetPrimType(); +} + +void FEIRTypeNative::SetPrimTypeImpl(PrimType pt) { + mirType.SetPrimType(pt); +} + +void FEIRTypeNative::CopyFromImpl(const FEIRType &type) { + CHECK_FATAL(type.GetKind() == kFEIRTypeNative, "invalid opration"); + mirType = *(type.GenerateMIRTypeAuto()); +} + +MIRType *FEIRTypeNative::GenerateMIRTypeAutoImpl() const { + return &mirType; +} + +std::unique_ptr FEIRTypeNative::CloneImpl() const { + std::unique_ptr newType = std::make_unique(mirType); + return newType; +} + +MIRType *FEIRTypeNative::GenerateMIRTypeImpl(bool usePtr, PrimType ptyPtr) const { + return &mirType; +} + +bool FEIRTypeNative::IsEqualToImpl(const FEIRType &argType) const { + if (argType.GetKind() != kFEIRTypeNative) { + return false; + } + const FEIRTypeNative &argTypeNative = static_cast(argType); + return &argTypeNative.mirType == &mirType; +} + +uint32 FEIRTypeNative::HashImpl() const { + return static_cast(mirType.GetHashIndex()); +} + +std::string FEIRTypeNative::GetTypeNameImpl() const { + return mirType.GetName(); +} + +bool FEIRTypeNative::IsScalarImpl() const { + return mirType.IsScalarType(); +} + +bool FEIRTypeNative::IsRefImpl() const { + return mirType.GetPrimType() == PTY_ref; +} + +bool FEIRTypeNative::IsArrayImpl() const { + return mirType.GetKind() == kTypeArray; +} + +TypeDim FEIRTypeNative::ArrayIncrDimImpl(TypeDim delta) { + CHECK_FATAL(false, "Should not get here"); + return delta; +} + +TypeDim FEIRTypeNative::ArrayDecrDimImpl(TypeDim delta) { + CHECK_FATAL(false, "Should not get here"); + return delta; +} +} // namespace maple diff --git a/src/hir2mpl/common/src/feir_type_helper.cpp b/src/hir2mpl/common/src/feir_type_helper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..00bd483ebe5c281f7413e945456643bf6270e751 --- /dev/null +++ b/src/hir2mpl/common/src/feir_type_helper.cpp @@ -0,0 +1,143 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_type_helper.h" +#include "fe_type_manager.h" + +namespace maple { +UniqueFEIRType FEIRTypeHelper::CreateTypeByPrimType(PrimType primType, TypeDim dim, bool usePtr) { + UniqueFEIRType type = std::make_unique(primType, GStrIdx(0), dim); + if (usePtr) { + return FEIRTypeHelper::CreatePointerType(std::move(type)); + } else { + return type; + } +} + +UniqueFEIRType FEIRTypeHelper::CreateTypeByJavaName(const std::string &typeName, bool inMpl, bool usePtr) { + uint32 dim = 0; + std::string baseName = FETypeManager::GetBaseTypeName(typeName, dim, inMpl); + CHECK_FATAL(dim <= FEConstants::kDimMax, "invalid array type %s (dim is too big)", typeName.c_str()); + uint8 dim8 = static_cast(dim); + UniqueFEIRType newType; + if (baseName.length() == 1) { + newType = CreateTypeByJavaNamePrim(baseName[0], dim8); + CHECK_FATAL(newType != nullptr, "unsupported java type name %s: ", typeName.c_str()); + } else if (baseName[0] == 'L') { + baseName = inMpl ? baseName : namemangler::EncodeName(baseName); + GStrIdx typeNameIdx = GlobalTables::GetStrTable().GetStrIdxFromName(baseName); + if (typeNameIdx == 0) { + newType = std::make_unique(PTY_ref, baseName, dim8); + } else { + newType = std::make_unique(PTY_ref, typeNameIdx, dim8); + } + } else { + CHECK_FATAL(false, "unsupported java type name %s: ", typeName.c_str()); + return nullptr; + } + if (usePtr) { + return FEIRTypeHelper::CreatePointerType(std::move(newType), PTY_ref); + } else { + return newType; + } +} + +UniqueFEIRType FEIRTypeHelper::CreatePointerType(UniqueFEIRType baseType, PrimType primType) { + UniqueFEIRType newType = std::make_unique(std::move(baseType), primType); + return newType; +} + +UniqueFEIRType FEIRTypeHelper::CreateTypeByJavaNamePrim(char primTypeFlag, uint8 dim8) { + PrimType primType = PTY_unknown; + switch (primTypeFlag) { + case 'I': + primType = PTY_i32; + break; + case 'J': + primType = PTY_i64; + break; + case 'F': + primType = PTY_f32; + break; + case 'D': + primType = PTY_f64; + break; + case 'Z': + primType = PTY_u1; + break; + case 'B': + primType = PTY_i8; + break; + case 'S': + primType = PTY_i16; + break; + case 'C': + primType = PTY_u16; + break; + case 'V': + primType = PTY_void; + break; + default: + return nullptr; + } + return std::make_unique(primType, GStrIdx(0), dim8); +} + +UniqueFEIRType FEIRTypeHelper::CreateTypeByDimIncr(const UniqueFEIRType &srcType, uint8 delta, bool usePtr, + PrimType primType) { + UniqueFEIRType type = srcType->Clone(); + (void)type->ArrayIncrDim(delta); + if (!usePtr || type->GetKind() == FEIRTypeKind::kFEIRTypePointer) { + return type; + } else { + return CreatePointerType(std::move(type), primType); + } +} + +UniqueFEIRType FEIRTypeHelper::CreateTypeByDimDecr(const UniqueFEIRType &srcType, uint8 delta) { + UniqueFEIRType type = srcType->Clone(); + uint8 dim = type->ArrayDecrDim(delta); + if (srcType->GetKind() == FEIRTypeKind::kFEIRTypePointer && dim == 0 && IsPrimitiveScalar(type->GetPrimType())) { + const FEIRTypePointer *ptrType = static_cast(type.get()); + ASSERT(ptrType != nullptr, "nullptr check"); + return ptrType->GetBaseType()->Clone(); + } else { + return type; + } +} + +UniqueFEIRType FEIRTypeHelper::CreateTypeByGetAddress(const UniqueFEIRType &srcType, PrimType primType) { + UniqueFEIRType type = std::make_unique(srcType->Clone(), primType); + return type; +} + +UniqueFEIRType FEIRTypeHelper::CreateTypeByDereferrence(const UniqueFEIRType &srcType) { + CHECK_FATAL(srcType->GetKind() == FEIRTypeKind::kFEIRTypePointer, "input is not pointer type"); + const FEIRTypePointer *ptrSrcType = static_cast(srcType.get()); + ASSERT(ptrSrcType != nullptr, "nullptr check"); + return ptrSrcType->GetBaseType()->Clone(); +} + +UniqueFEIRType FEIRTypeHelper::CreateTypeDefault(PrimType primType, const GStrIdx &typeNameIdx, TypeDim dim) { + UniqueFEIRType type = std::make_unique(primType, typeNameIdx, dim); + CHECK_FATAL(type->IsValid(), "invalid type"); + return type; +} + +UniqueFEIRType FEIRTypeHelper::CreateTypeNative(MIRType &mirType) { + UniqueFEIRType type = std::make_unique(mirType); + CHECK_FATAL(type->IsValid(), "invalid type"); + return type; +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/common/src/feir_type_infer.cpp b/src/hir2mpl/common/src/feir_type_infer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8fb89a50fccdf2d0b2ee25c6bebea344b39cc023 --- /dev/null +++ b/src/hir2mpl/common/src/feir_type_infer.cpp @@ -0,0 +1,299 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_type_infer.h" +#include "fe_type_hierarchy.h" +#include "feir_type_helper.h" +#include "feir_var_type_scatter.h" +#include "feir_dfg.h" + +namespace maple { +// ---------- FEIRTypeMergeHelper ---------- +FEIRTypeMergeHelper::FEIRTypeMergeHelper() + : firstType(true) {} + +FEIRTypeMergeHelper::FEIRTypeMergeHelper(const UniqueFEIRType &argTypeDefault) + : firstType(true) { + CHECK_FATAL(argTypeDefault.get() != nullptr, "nullptr check"); + ResetTypeDefault(argTypeDefault); +} + +void FEIRTypeMergeHelper::Reset() { + firstType = true; + type.reset(); + error = ""; +} + +void FEIRTypeMergeHelper::ResetTypeDefault(const UniqueFEIRType &argTypeDefault) { + typeDefault = argTypeDefault->Clone(); +} + +bool FEIRTypeMergeHelper::MergeType(const UniqueFEIRType &argType, bool parent) { + if (firstType) { + type = argType->Clone(); + firstType = false; + return true; + } + return MergeType(type, argType, parent); +} + +UniqueFEIRType FEIRTypeMergeHelper::GetResult() const { + return type->Clone(); +} + +void FEIRTypeMergeHelper::SetDefaultType(UniqueFEIRType &typeDst) { + SetType(typeDst, typeDefault); +} + +void FEIRTypeMergeHelper::SetType(UniqueFEIRType &typeDst, const UniqueFEIRType &typeSrc) { + ASSERT(typeDst.get() != nullptr, "nullptr check for type dst"); + ASSERT(typeSrc.get() != nullptr, "nullptr check for type src"); + if (typeDst.get()->GetKind() == typeSrc.get()->GetKind()) { + typeDst.get()->CopyFrom(*(typeSrc.get())); + } else { + typeDst = typeSrc->Clone(); + } +} + +bool FEIRTypeMergeHelper::MergeType(UniqueFEIRType &typeDst, const UniqueFEIRType &typeSrc, bool parent) { + ASSERT(typeDst.get() != nullptr, "nullptr check for type dst"); + ASSERT(typeSrc.get() != nullptr, "nullptr check for type src"); + ASSERT(typeDefault.get() != nullptr, "nullptr check for type default"); + FEIRTypeKind kindDst = typeDst.get()->GetKind(); + FEIRTypeKind kindSrc = typeSrc.get()->GetKind(); + // boundary: equal + if (typeDst->IsEqualTo(typeSrc)) { + return true; + } + // boundary: zero + if (typeSrc->IsZero()) { + return true; + } + if (typeDst->IsZero()) { + SetType(typeDst, typeSrc); + return true; + } + if (kindDst == FEIRTypeKind::kFEIRTypePointer && kindSrc == FEIRTypeKind::kFEIRTypePointer) { + FEIRTypePointer *ptrTypeDst = static_cast(typeDst.get()); + FEIRTypePointer *ptrTypeSrc = static_cast(typeSrc.get()); + UniqueFEIRType mergedType = ptrTypeDst->Clone(); + bool success = MergeType(mergedType, ptrTypeSrc->GetBaseType(), parent); + if (success) { + ptrTypeDst->SetBaseType(std::move(mergedType)); + } + return success; + } + if (kindDst == FEIRTypeKind::kFEIRTypePointer && kindSrc != FEIRTypeKind::kFEIRTypePointer) { + return false; + } + if (kindDst != FEIRTypeKind::kFEIRTypePointer && kindSrc == FEIRTypeKind::kFEIRTypePointer) { + return false; + } + // boundary: default + if (typeSrc->IsEqualTo(typeDefault)) { + if (parent) { + SetDefaultType(typeDst); + } + return true; + } + if (typeDst->IsEqualTo(typeDefault)) { + if (!parent) { + SetType(typeDst, typeSrc); + } + return true; + } + // hierarchy check + FEIRTypeDefault *ptrTypeDst = static_cast(typeDst.get()); + FEIRTypeDefault *ptrTypeSrc = static_cast(typeSrc.get()); + GStrIdx idxDst = ptrTypeDst->GetTypeNameIdx(); + GStrIdx idxSrc = ptrTypeSrc->GetTypeNameIdx(); + if (ptrTypeDst->GetDim() != ptrTypeSrc->GetDim()) { + return false; + } + if (ptrTypeDst->GetPrimType() != ptrTypeSrc->GetPrimType()) { + return false; + } + if (FETypeHierarchy::GetInstance().IsParentOf(idxDst, idxSrc)) { + // dst is parent of src + if (!parent) { + (void)ptrTypeDst->SetTypeNameIdx(idxSrc); + } + } else if (FETypeHierarchy::GetInstance().IsParentOf(idxSrc, idxDst)) { + // src is parent of dst + if (parent) { + (void)ptrTypeDst->SetTypeNameIdx(idxSrc); + } + } else { + if (parent) { + SetDefaultType(typeDst); + } else { + return false; + } + } + return true; +} + +// ---------- FEIRTypeInfer ---------- +FEIRTypeInfer::FEIRTypeInfer(MIRSrcLang argSrcLang, const FEIRDefUseChain &argMapDefUse) + : srcLang(argSrcLang), + mapDefUse(argMapDefUse) { + LoadTypeDefault(); +} + +void FEIRTypeInfer::LoadTypeDefault() { + switch (srcLang) { + case kSrcLangJava: + typeDefault = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, false); + mergeHelper.ResetTypeDefault(typeDefault); + break; + default: + typeDefault.reset(); + break; + } +} + +void FEIRTypeInfer::Reset() { + visitVars.clear(); + withCircle = false; + first = true; +} + +UniqueFEIRType FEIRTypeInfer::GetTypeForVarUse(const UniqueFEIRVar &varUse) { + CHECK_NULL_FATAL(varUse); + if (varUse->GetType()->IsPrecise()) { + return varUse->GetType()->Clone(); + } + if (varUse->GetTrans().get() == nullptr) { + return typeDefault->Clone(); + } + if (visitVars.find(&varUse) != visitVars.end()) { + withCircle = true; + return UniqueFEIRType(); + } + CHECK_FATAL(visitVars.insert(&varUse).second, "visitVars insert failed"); + bool isFirst = first; + first = false; + UniqueFEIRType ans = GetTypeByTransForVarUse(varUse); + if ((isFirst || (!withCircle)) && ans != nullptr) { + varUse->SetType(ans->Clone()); + return ans->Clone(); + } else { + return UniqueFEIRType(); + } +} + +UniqueFEIRType FEIRTypeInfer::GetTypeForVarDef(UniqueFEIRVar &varDef) { + CHECK_NULL_FATAL(varDef); + if (varDef->GetType()->IsPreciseRefType()) { + return varDef->GetType()->Clone(); + } + if (varDef->GetType()->IsZero()) { + return UniqueFEIRType(); + } + if (visitVars.find(&varDef) != visitVars.end()) { + withCircle = true; + return UniqueFEIRType(); + } + CHECK_FATAL(visitVars.insert(&varDef).second, "visitVars insert failed"); + auto it = mapDefUse.find(&varDef); + CHECK_FATAL(it != mapDefUse.end(), "use not found"); + std::unordered_set useTypes; + for (const UniqueFEIRVar *use : it->second) { + CHECK_NULL_FATAL(use); + UniqueFEIRType useType = GetTypeForVarUse(*use); + if (useType != nullptr && (!useType->IsEqualTo(varDef->GetType()))) { + FEIRTypeKey key(useType); + if (useTypes.find(key) == useTypes.end()) { + CHECK_FATAL(useTypes.insert(key).second, "useTypes insert failed"); + } + } + } + if (useTypes.size() > 0) { + FEIRTypeMergeHelper mh(typeDefault); + for (const FEIRTypeKey &typeKey : useTypes) { + UniqueFEIRType defType = typeKey.GetType()->Clone(); + bool success = mh.MergeType(defType, false); + CHECK_FATAL(success, "merge type error"); + } + return mh.GetTypeClone(); + } else { + return UniqueFEIRType(); + } +} + +void FEIRTypeInfer::ProcessVarDef(UniqueFEIRVar &varDef) { + CHECK_NULL_FATAL(varDef); + auto it = mapDefUse.find(&varDef); + if (it == mapDefUse.end()) { + return; + } + std::unordered_set useTypes; + for (const UniqueFEIRVar *use : it->second) { + CHECK_NULL_FATAL(use); + UniqueFEIRType useType = GetTypeForVarUse(*use); + if (useType != nullptr && (!useType->IsEqualTo(varDef->GetType()))) { + FEIRTypeKey key(useType); + if (useTypes.find(key) == useTypes.end()) { + CHECK_FATAL(useTypes.insert(key).second, "useTypes insert failed"); + } + } + } + if (useTypes.size() == 1 && !(varDef->GetType()->IsPrecise())) { + varDef->SetType((*(useTypes.begin())).GetType()->Clone()); + return; + } + if (useTypes.size() > 0) { + std::unique_ptr varNew = + std::make_unique(static_cast(varDef->Clone())); + FEIRVarTypeScatter *ptrVarNew = varNew.get(); + ptrVarNew->SetType(varDef->GetType()->Clone()); + for (const FEIRTypeKey &typeKey : useTypes) { + ptrVarNew->AddScatterType(typeKey.GetType()); + } + varDef.reset(varNew.release()); + } +} + +UniqueFEIRType FEIRTypeInfer::GetTypeByTransForVarUse(const UniqueFEIRVar &varUse) { + CHECK_NULL_FATAL(varUse); + FEIRVarTrans *ptrVarUseTrans = varUse->GetTrans().get(); + CHECK_NULL_FATAL(ptrVarUseTrans); + UniqueFEIRVar &varDef = ptrVarUseTrans->GetVar(); + CHECK_NULL_FATAL(varDef); + UniqueFEIRType defType = GetTypeForVarDef(varDef); + if (defType != nullptr) { + return ptrVarUseTrans->GetType(defType); + } else { + return defType; + } +} + +Opcode FEIRTypeCvtHelper::ChooseCvtOpcodeByFromTypeAndToType(const FEIRType &fromType, const FEIRType &toType) { + if (IsRetypeable(fromType, toType)) { + return OP_retype; + } else if (IsIntCvt2Ref(fromType, toType)) { + return OP_cvt; + } else { + return OP_undef; + } +} + +bool FEIRTypeCvtHelper::IsRetypeable(const FEIRType &fromType, const FEIRType &toType) { + return (fromType.GetPrimType() == toType.GetPrimType()); +} + +bool FEIRTypeCvtHelper::IsIntCvt2Ref(const FEIRType &fromType, const FEIRType &toType) { + return (IsPrimitiveInteger(fromType.GetPrimType()) && toType.IsPreciseRefType()); +} +} // namespace maple diff --git a/src/hir2mpl/common/src/feir_var.cpp b/src/hir2mpl/common/src/feir_var.cpp new file mode 100644 index 0000000000000000000000000000000000000000..082030b15ee2deda6beefc25efdd2c3dcc773125 --- /dev/null +++ b/src/hir2mpl/common/src/feir_var.cpp @@ -0,0 +1,201 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_var.h" +#include "feir_stmt.h" +#include "global_tables.h" +#include "feir_type_helper.h" +#include "fe_config_parallel.h" +#include "enhance_c_checker.h" + +namespace maple { +// ---------- FEIRVarTrans ---------- +FEIRVarTrans::FEIRVarTrans(FEIRVarTransKind argKind, std::unique_ptr &argVar) + : kind(argKind), var(argVar) { + param.dimDelta = 0; + switch (kind) { + case kFEIRVarTransArrayDimIncr: + case kFEIRVarTransArrayDimDecr: + param.dimDelta = 1; + break; + default: + break; + } +} + +FEIRVarTrans::FEIRVarTrans(FEIRVarTransKind argKind, std::unique_ptr &argVar, uint8 dimDelta) + : kind(argKind), var(argVar) { + param.dimDelta = 0; + switch (kind) { + case kFEIRVarTransArrayDimIncr: + case kFEIRVarTransArrayDimDecr: + param.dimDelta = dimDelta; + break; + default: + break; + } +} + +UniqueFEIRType FEIRVarTrans::GetType(const UniqueFEIRType &type, PrimType primType, bool usePtr) { + ASSERT(type != nullptr, "nullptr check"); + UniqueFEIRType typeNew = type->Clone(); + switch (kind) { + case kFEIRVarTransDirect: + break; + case kFEIRVarTransArrayDimIncr: { + (void)typeNew->ArrayIncrDim(param.dimDelta); + if (typeNew->GetKind() != FEIRTypeKind::kFEIRTypePointer && usePtr) { + return FEIRTypeHelper::CreatePointerType(typeNew->Clone(), primType); + } else { + return typeNew; + } + } + case kFEIRVarTransArrayDimDecr: { + (void)typeNew->ArrayDecrDim(param.dimDelta); + if (typeNew->GetKind() == FEIRTypeKind::kFEIRTypePointer) { + FEIRTypePointer *ptrTypeNew = static_cast(typeNew.get()); + ASSERT(ptrTypeNew->GetBaseType() != nullptr, "nullptr check"); + if (ptrTypeNew->GetBaseType()->IsScalar()) { + return ptrTypeNew->GetBaseType()->Clone(); + } + } else if (usePtr && !typeNew->IsScalar()) { + return FEIRTypeHelper::CreatePointerType(typeNew->Clone(), primType); + } else { + return typeNew; + } + break; + } + default: + CHECK_FATAL(false, "unsupported trans kind"); + } + return typeNew; +} + +// ---------- FEIRVar ---------- +FEIRVar::FEIRVar(FEIRVarKind argKind) + : kind(argKind), + isGlobal(false), + isDef(false), + type(std::make_unique()) { + boundaryLenExpr = nullptr; +} + +FEIRVar::FEIRVar(FEIRVarKind argKind, std::unique_ptr argType) + : FEIRVar(argKind) { + boundaryLenExpr = nullptr; + SetType(std::move(argType)); +} + +FEIRVar::~FEIRVar() {} + +std::unique_ptr FEIRVar::Clone() const { + auto var = CloneImpl(); + var->SetGlobal(isGlobal); + var->SetAttrs(genAttrs); + var->SetSectionAttr(sectionAttr); + if (boundaryLenExpr != nullptr) { + var->SetBoundaryLenExpr(boundaryLenExpr->Clone()); + } + var->SetSrcLoc(loc); + return var; +} + +void FEIRVar::SetBoundaryLenExpr(std::unique_ptr expr) { + boundaryLenExpr = std::move(expr); +} + +const std::unique_ptr &FEIRVar::GetBoundaryLenExpr() const { + return boundaryLenExpr; +} + +MIRSymbol *FEIRVar::GenerateGlobalMIRSymbolImpl(MIRBuilder &builder) const { + HIR2MPL_PARALLEL_FORBIDDEN(); + MIRType *mirType = type->GenerateMIRTypeAuto(); + std::string name = GetName(*mirType); + auto attrs = genAttrs.ConvertToTypeAttrs(); + ENCChecker::InsertBoundaryLenExprInAtts(attrs, boundaryLenExpr); + // do not allow extern var override global var + MIRSymbol *gSymbol = builder.GetGlobalDecl(name); + if (gSymbol != nullptr && attrs.GetAttr(ATTR_extern)) { + return gSymbol; + } + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + gSymbol = builder.GetOrCreateGlobalDecl(name, *mirType); + // Set global var attr once + std::size_t pos = name.find("_7C"); + if (pos != std::string::npos) { + std::string containerName = name.substr(0, pos); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(containerName); + TyIdx containerTypeIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(strIdx); + if (containerTypeIdx != TyIdx(0)) { + MIRType *containerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(containerTypeIdx); + if (containerType->IsStructType()) { + // for external class + MIRStructType *mirContainer = static_cast(containerType); + if (!mirContainer->IsLocal()) { + gSymbol->SetStorageClass(kScExtern); + } + // for not defined field use + if (gSymbol->GetAttrs().GetAttrFlag() == 0) { + auto t = TypeAttrs(); + t.SetAttr(ATTR_static); + gSymbol->AddAttrs(t); + } + for (auto field : mirContainer->GetStaticFields()) { + if (field.first == nameIdx) { + gSymbol->AddAttrs(field.second.second.ConvertToTypeAttrs()); + } + } + } + } + } + if (attrs.GetAttr(ATTR_extern)) { + gSymbol->SetStorageClass(MIRStorageClass::kScExtern); + attrs.ResetAttr(AttrKind::ATTR_extern); + } else { + if (gSymbol->GetStorageClass() == MIRStorageClass::kScInvalid) { + gSymbol->SetStorageClass(MIRStorageClass::kScGlobal); + } + } + return gSymbol; +} + +MIRSymbol *FEIRVar::GenerateLocalMIRSymbolImpl(MIRBuilder &builder) const { + HIR2MPL_PARALLEL_FORBIDDEN(); + MIRType *mirType = type->GenerateMIRTypeAuto(); + std::string name = GetName(*mirType); + MIRSymbol *mirSymbol = builder.GetOrCreateLocalDecl(name, *mirType); + auto attrs = genAttrs.ConvertToTypeAttrs(); + ENCChecker::InsertBoundaryLenExprInAtts(attrs, boundaryLenExpr); + if (attrs.GetAttr(ATTR_static)) { + attrs.ResetAttr(ATTR_static); + mirSymbol->SetStorageClass(MIRStorageClass::kScPstatic); + } + mirSymbol->AddAttrs(attrs); + return mirSymbol; +} + +MIRSymbol *FEIRVar::GenerateMIRSymbolImpl(MIRBuilder &builder) const { + if (isGlobal) { + return GenerateGlobalMIRSymbol(builder); + } else { + return GenerateLocalMIRSymbol(builder); + } +} + +void FEIRVar::SetType(std::unique_ptr argType) { + CHECK_FATAL(argType != nullptr, "input type is nullptr"); + type = std::move(argType); +} +} // namespace maple diff --git a/src/hir2mpl/common/src/feir_var_name.cpp b/src/hir2mpl/common/src/feir_var_name.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8da2483cd47276ead9e44da78d0d2b0c8f1cb8fc --- /dev/null +++ b/src/hir2mpl/common/src/feir_var_name.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_var_name.h" +#include "feir_stmt.h" + +namespace maple { +// ---------- FEIRVarName ---------- +std::string FEIRVarName::GetNameImpl(const MIRType &mirType) const { + std::stringstream ss; + ASSERT(nameIdx.GetIdx() != 0, "invalid name idx"); + std::string name = GlobalTables::GetStrTable().GetStringFromStrIdx(nameIdx); + ss << name; + if (withType) { + ss << "_"; + if (type->IsScalar()) { + ss << GetPrimTypeName(type->GetPrimType()); + } else { + ss << "R" << mirType.GetTypeIndex().GetIdx(); + } + } + return ss.str(); +} + +std::string FEIRVarName::GetNameRawImpl() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(nameIdx); +} + +std::unique_ptr FEIRVarName::CloneImpl() const { + return std::make_unique(nameIdx, type->Clone(), withType); +} + +bool FEIRVarName::EqualsToImpl(const std::unique_ptr &var) const { + if (var->GetKind() != kind) { + return false; + } + FEIRVarName *ptrVarName = static_cast(var.get()); + ASSERT(ptrVarName != nullptr, "ptr var is nullptr"); + return ptrVarName->nameIdx == nameIdx && GetType()->IsEqualTo(ptrVarName->GetType()); +} + +uint32 FEIRVarName::HashImpl() const { + return static_cast(std::hash{}(nameIdx)); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/common/src/feir_var_reg.cpp b/src/hir2mpl/common/src/feir_var_reg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c21a9ce99e42b9430cab5c80abca668499ee0fe5 --- /dev/null +++ b/src/hir2mpl/common/src/feir_var_reg.cpp @@ -0,0 +1,103 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_var_reg.h" +#include +#include +#include "mir_type.h" +#include "fe_options.h" + +namespace maple { +std::string FEIRVarReg::GetNameImpl(const MIRType &mirType) const { + thread_local static std::stringstream ss(""); + ss.str(""); + ss << "Reg"; + ss << regNum; + ss << "_"; + if (type->IsPreciseRefType()) { + ss << "R" << mirType.GetTypeIndex().GetIdx(); + } else { + if (type->GetSrcLang() == kSrcLangJava) { + ss << GetPrimTypeJavaName(type->GetPrimType()); + } else { + ss << GetPrimTypeName(type->GetPrimType()); + } + } + return ss.str(); +} + +std::string FEIRVarReg::GetNameRawImpl() const { + thread_local static std::stringstream ss(""); + ss.str(""); + ss << "Reg" << regNum; + return ss.str(); +} + +MIRSymbol *FEIRVarReg::GenerateLocalMIRSymbolImpl(MIRBuilder &builder) const { + HIR2MPL_PARALLEL_FORBIDDEN(); + MIRType *mirType = type->GenerateMIRTypeAuto(); + std::string name = GetName(*mirType); + MIRSymbol *ret = builder.GetOrCreateLocalDecl(name, *mirType); + return ret; +} + +std::unique_ptr FEIRVarReg::CloneImpl() const { + std::unique_ptr var = std::make_unique(regNum, type->Clone()); + return var; +} + +bool FEIRVarReg::EqualsToImpl(const std::unique_ptr &var) const { + if (var->GetKind() != kind) { + return false; + } + FEIRVarReg *ptrVarReg = static_cast(var.get()); + ASSERT(ptrVarReg != nullptr, "ptr var is nullptr"); + return ptrVarReg->regNum == regNum; +} + +uint32 FEIRVarReg::HashImpl() const { + return static_cast(std::hash{}(regNum)); +} + +// ========== FEIRVarAccumulator ========== +std::string FEIRVarAccumulator::GetNameImpl(const MIRType &mirType) const { + thread_local static std::stringstream ss(""); + ss.str(""); + ss << "Reg"; + ss << "_Accumulator"; + ss << "_"; + if (type->IsPreciseRefType()) { + ss << "R" << mirType.GetTypeIndex().GetIdx(); + } else { + if (type->GetSrcLang() == kSrcLangJava) { + ss << GetPrimTypeJavaName(type->GetPrimType()); + } else { + ss << GetPrimTypeName(type->GetPrimType()); + } + } + return ss.str(); +} + +std::string FEIRVarAccumulator::GetNameRawImpl() const { + thread_local static std::stringstream ss(""); + ss.str(""); + ss << "Reg_Accumulator"; + return ss.str(); +} + +std::unique_ptr FEIRVarAccumulator::CloneImpl() const { + std::unique_ptr var = std::make_unique(regNum, type->Clone()); + return var; +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/common/src/feir_var_type_scatter.cpp b/src/hir2mpl/common/src/feir_var_type_scatter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ad4d77ab4830033d5011624a945f62069a7619fa --- /dev/null +++ b/src/hir2mpl/common/src/feir_var_type_scatter.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_var_type_scatter.h" + +namespace maple { +FEIRVarTypeScatter::FEIRVarTypeScatter(UniqueFEIRVar argVar) + : FEIRVar(FEIRVarKind::kFEIRVarTypeScatter) { + ASSERT(argVar != nullptr, "nullptr check"); + ASSERT(argVar->GetKind() != FEIRVarKind::kFEIRVarTypeScatter, "invalid input var type"); + var = std::move(argVar); +} + +void FEIRVarTypeScatter::AddScatterType(const UniqueFEIRType &type) { + if (var->GetType()->IsEqualTo(type)) { + return; + } + FEIRTypeKey key(type); + if (scatterTypes.find(key) == scatterTypes.end()) { + CHECK_FATAL(scatterTypes.insert(key).second, "scatterTypes insert failed"); + } +} + +std::string FEIRVarTypeScatter::GetNameImpl(const MIRType &mirType) const { + return var->GetName(mirType); +} + +std::string FEIRVarTypeScatter::GetNameRawImpl() const { + return var->GetNameRaw(); +} + +std::unique_ptr FEIRVarTypeScatter::CloneImpl() const { + std::unique_ptr ans = std::make_unique(var->Clone()); + FEIRVarTypeScatter *ptrAns = static_cast(ans.get()); + ASSERT(ptrAns != nullptr, "nullptr check"); + for (const FEIRTypeKey &key : scatterTypes) { + ptrAns->AddScatterType(key.GetType()); + } + return std::unique_ptr(ans.release()); +} + +bool FEIRVarTypeScatter::EqualsToImpl(const std::unique_ptr &argVar) const { + return false; +} + +uint32 FEIRVarTypeScatter::HashImpl() const { + return 0; +} + +MIRSymbol *FEIRVarTypeScatter::GenerateGlobalMIRSymbolImpl(MIRBuilder &builder) const { + HIR2MPL_PARALLEL_FORBIDDEN(); + MIRType *mirType = var->GetType()->GenerateMIRTypeAuto(); + std::string name = GetName(*mirType); + return builder.GetOrCreateGlobalDecl(name, *mirType); +} + +MIRSymbol *FEIRVarTypeScatter::GenerateLocalMIRSymbolImpl(MIRBuilder &builder) const { + HIR2MPL_PARALLEL_FORBIDDEN(); + MIRType *mirType = var->GetType()->GenerateMIRTypeAuto(); + std::string name = GetName(*mirType); + return builder.GetOrCreateLocalDecl(name, *mirType); +} + +MIRSymbol *FEIRVarTypeScatter::GenerateMIRSymbolImpl(MIRBuilder &builder) const { + if (isGlobal) { + return GenerateGlobalMIRSymbol(builder); + } else { + return GenerateLocalMIRSymbol(builder); + } +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/common/src/generic_attrs.cpp b/src/hir2mpl/common/src/generic_attrs.cpp new file mode 100644 index 0000000000000000000000000000000000000000..de7ccba767bcca2de929cc927b4fde5d1efeb615 --- /dev/null +++ b/src/hir2mpl/common/src/generic_attrs.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "generic_attrs.h" +#include "global_tables.h" + +namespace maple { +TypeAttrs GenericAttrs::ConvertToTypeAttrs() const { + TypeAttrs attr; + for (uint32 i = 0; i < kMaxATTRNum; ++i) { + if (attrFlag[i] == 0) { + continue; + } + auto tA = static_cast(i); + switch (tA) { +#define TYPE_ATTR +#define ATTR(STR) \ + case GENATTR_##STR: \ + attr.SetAttr(ATTR_##STR); \ + break; +#include "all_attributes.def" +#undef ATTR +#undef TYPE_ATTR + default: + ASSERT(false, "unknown TypeAttrs"); + break; + } + } + if (GetContentFlag(GENATTR_pack)) { + attr.SetPack(static_cast(std::get(contentMap[GENATTR_pack]))); + } + return attr; +} + +FuncAttrs GenericAttrs::ConvertToFuncAttrs() { + FuncAttrs attr; + for (uint32 i = 0; i < kMaxATTRNum; ++i) { + if (attrFlag[i] == 0) { + continue; + } + auto tA = static_cast(i); + switch (tA) { +#define FUNC_ATTR +#define ATTR(STR) \ + case GENATTR_##STR: \ + attr.SetAttr(FUNCATTR_##STR); \ + break; +#include "all_attributes.def" +#undef ATTR +#undef FUNC_ATTR + default: + ASSERT(false, "unknown FuncAttrs"); + break; + } + } + if (GetContentFlag(GENATTR_alias)) { + std::string name = GlobalTables::GetStrTable().GetStringFromStrIdx(std::get(contentMap[GENATTR_alias])); + attr.SetAliasFuncName(name); + } + if (GetContentFlag(GENATTR_constructor_priority)) { + attr.SetConstructorPriority(std::get(contentMap[GENATTR_constructor_priority])); + } + if (GetContentFlag(GENATTR_destructor_priority)) { + attr.SetDestructorPriority(std::get(contentMap[GENATTR_destructor_priority])); + } + return attr; +} + +FieldAttrs GenericAttrs::ConvertToFieldAttrs() { + FieldAttrs attr; + constexpr uint32 maxAttrNum = 128; + for (uint32 i = 0; i < maxAttrNum; ++i) { + if (attrFlag[i] == 0) { + continue; + } + auto tA = static_cast(i); + switch (tA) { +#define FIELD_ATTR +#define ATTR(STR) \ + case GENATTR_##STR: \ + attr.SetAttr(FLDATTR_##STR); \ + break; +#include "all_attributes.def" +#undef ATTR +#undef FIELD_ATTR + default: + ASSERT(false, "unknown FieldAttrs"); + break; + } + } + return attr; +} +} diff --git a/src/hir2mpl/common/src/hir2mpl.cpp b/src/hir2mpl/common/src/hir2mpl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b7b2febb18e4afcb4fc061560e5563ae9dd15600 --- /dev/null +++ b/src/hir2mpl/common/src/hir2mpl.cpp @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "fe_utils.h" +#include "hir2mpl_compiler.h" +#include "mpl_sighandler.h" +using namespace maple; + +int main(int argc, char **argv) { + SigHandler::EnableAll(); + + MPLTimer timer; + timer.Start(); + HIR2MPLOptions &options = HIR2MPLOptions::GetInstance(); + if (!options.SolveArgs(argc, argv)) { + return static_cast(FEErrno::kCmdParseError); + } + HIR2MPLEnv::GetInstance().Init(); + MIRModule module; + HIR2MPLCompiler compiler(module); + int res = compiler.Run(); + timer.Stop(); + if (FEOptions::GetInstance().IsDumpTime()) { + INFO(kLncInfo, "hir2mpl time: %.2lfms", timer.ElapsedMilliseconds() / 1.0); + } + return res; +} diff --git a/src/hir2mpl/common/src/hir2mpl_compiler.cpp b/src/hir2mpl/common/src/hir2mpl_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d447d68387eebbf9fbd3643bb3ae189938ddc002 --- /dev/null +++ b/src/hir2mpl/common/src/hir2mpl_compiler.cpp @@ -0,0 +1,377 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "hir2mpl_compiler.h" +#include +#include "fe_manager.h" +#include "fe_file_type.h" +#include "fe_timer.h" +#include "inline_mplt.h" +#ifndef ONLY_C +#include "rc_setter.h" +#endif + +namespace maple { +HIR2MPLCompiler::HIR2MPLCompiler(MIRModule &argModule) + : module(argModule), + srcLang(kSrcLangJava), + mp(FEUtils::NewMempool("MemPool for HIR2MPLCompiler", false /* isLcalPool */)), + allocator(mp) {} + +HIR2MPLCompiler::~HIR2MPLCompiler() { + mp = nullptr; +} + +void HIR2MPLCompiler::Init() { + FEManager::Init(module); + module.SetFlavor(maple::kFeProduced); + module.GetImportFiles().clear(); +#ifndef ONLY_C + if (FEOptions::GetInstance().IsRC()) { + bc::RCSetter::InitRCSetter(""); + } +#endif +} + +void HIR2MPLCompiler::Release() { + FEManager::Release(); + FEUtils::DeleteMempoolPtr(mp); +} + +int HIR2MPLCompiler::Run() { + bool success = true; + Init(); + CheckInput(); + RegisterCompilerComponent(); + success = success && LoadMplt(); + SetupOutputPathAndName(); + ParseInputs(); + if (!FEOptions::GetInstance().GetXBootClassPath().empty()) { + LoadOnDemandTypes(); + } + PreProcessDecls(); + ProcessDecls(); + ProcessPragmas(); + if (!FEOptions::GetInstance().IsGenMpltOnly()) { + FETypeHierarchy::GetInstance().InitByGlobalTable(); + ProcessFunctions(); +#ifndef ONLY_C + if (FEOptions::GetInstance().IsRC()) { + bc::RCSetter::GetRCSetter().MarkRCAttributes(); + } + } + bc::RCSetter::ReleaseRCSetter(); +#else + } +#endif + FEManager::GetManager().ReleaseStructElemMempool(); + CHECK_FATAL(success, "Compile Error"); + ExportMpltFile(); + ExportMplFile(); + logInfo.PrintUserWarnMessages(); + logInfo.PrintUserErrorMessages(); + int res = logInfo.GetUserErrorsNum() > 0 ? FEErrno::kFEError : FEErrno::kNoError; + HIR2MPLEnv::GetInstance().Finish(); + Release(); + return res; +} + +void HIR2MPLCompiler::CheckInput() { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process HIR2MPLCompiler::CheckInput() ====="); + size_t nInput = 0; + + // check input class files + const std::list &inputClassNames = FEOptions::GetInstance().GetInputClassFiles(); + if (!inputClassNames.empty()) { + nInput += inputClassNames.size(); + if (firstInputName.empty()) { + firstInputName = inputClassNames.front(); + } + } + + // check input jar files + const std::list &inputJarNames = FEOptions::GetInstance().GetInputJarFiles(); + if (!inputJarNames.empty()) { + nInput += inputJarNames.size(); + if (firstInputName.empty()) { + firstInputName = inputJarNames.front(); + } + } + + // check input dex files + const std::vector &inputDexNames = FEOptions::GetInstance().GetInputDexFiles(); + if (!inputDexNames.empty()) { + nInput += inputDexNames.size(); + if (firstInputName.empty()) { + firstInputName = inputDexNames[0]; + } + } + + // check input ast files + const std::vector &inputASTNames = FEOptions::GetInstance().GetInputASTFiles(); + if (!inputASTNames.empty()) { + nInput += inputASTNames.size(); + if (firstInputName.empty()) { + firstInputName = inputASTNames[0]; + } + } + + // check input mast files + const std::vector &inputMASTNames = FEOptions::GetInstance().GetInputMASTFiles(); + if (!inputMASTNames.empty()) { + nInput += inputMASTNames.size(); + if (firstInputName.empty()) { + firstInputName = inputMASTNames[0]; + } + } + + CHECK_FATAL(nInput > 0, "Error occurs: no inputs. exit."); +} + +void HIR2MPLCompiler::SetupOutputPathAndName() { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process HIR2MPLCompiler::SetupOutputPathAndName() ====="); + // get outputName from option + const std::string &outputName0 = FEOptions::GetInstance().GetOutputName(); + if (!outputName0.empty()) { + outputName = outputName0; + } else { + // use default + outputName = FEFileType::GetName(firstInputName, true); + outputPath = FEFileType::GetPath(firstInputName); + } + const std::string &outputPath0 = FEOptions::GetInstance().GetOutputPath(); + if (!outputPath0.empty()) { + outputPath = outputPath0[outputPath0.size() - 1] == '/' ? outputPath0 : (outputPath0 + "/"); + } + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "OutputPath: %s", outputPath.c_str()); + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "OutputName: %s", outputName.c_str()); + std::string outName = ""; + if (outputPath.empty()) { + outName = outputName; + } else { + outName = outputPath + outputName; + } + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "OutputFullName: %s", outName.c_str()); + module.SetFileName(outName); + // mapleall need outName with type, but mplt file no need + size_t lastDot = outName.find_last_of("."); + if (lastDot == std::string::npos) { + outNameWithoutType = outName; + } else { + outNameWithoutType = outName.substr(0, lastDot); + } + std::string mpltName = outNameWithoutType + ".mplt"; + if (srcLang != kSrcLangC) { + GStrIdx strIdx = module.GetMIRBuilder()->GetOrCreateStringIndex(mpltName); + module.GetImportFiles().push_back(strIdx); + } +} + +inline void HIR2MPLCompiler::InsertImportInMpl(const std::list &mplt) const { + for (const std::string &fileName : mplt) { + GStrIdx strIdx = module.GetMIRBuilder()->GetOrCreateStringIndex(fileName); + module.GetImportFiles().push_back(strIdx); + } +} + +bool HIR2MPLCompiler::LoadMplt() { + bool success = true; + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process HIR2MPLCompiler::LoadMplt() ====="); + // load mplt from sys + const std::list &mpltsFromSys = FEOptions::GetInstance().GetInputMpltFilesFromSys(); + success = success && FEManager::GetTypeManager().LoadMplts(mpltsFromSys, FETypeFlag::kSrcMpltSys, + "Load mplt from sys"); + InsertImportInMpl(mpltsFromSys); + // load mplt + const std::list &mplts = FEOptions::GetInstance().GetInputMpltFiles(); + success = success && FEManager::GetTypeManager().LoadMplts(mplts, FETypeFlag::kSrcMplt, "Load mplt"); + InsertImportInMpl(mplts); + // load mplt from apk + const std::list &mpltsFromApk = FEOptions::GetInstance().GetInputMpltFilesFromApk(); + success = success && FEManager::GetTypeManager().LoadMplts(mpltsFromApk, FETypeFlag::kSrcMpltApk, + "Load mplt from apk"); + InsertImportInMpl(mpltsFromApk); + return success; +} + +void HIR2MPLCompiler::ExportMpltFile() { + if (!FEOptions::GetInstance().IsNoMplFile() && srcLang != kSrcLangC) { + FETimer timer; + timer.StartAndDump("Output mplt"); + module.DumpToHeaderFile(!FEOptions::GetInstance().IsGenAsciiMplt()); + timer.StopAndDumpTimeMS("Output mplt"); + } +} + +void HIR2MPLCompiler::ExportMplFile() { + if (!FEOptions::GetInstance().IsNoMplFile() && !FEOptions::GetInstance().IsGenMpltOnly()) { + FETimer timer; + timer.StartAndDump("Output mpl"); + bool emitStructureType = false; + // Currently, struct types cannot be dumped to mplt. + // After mid-end interfaces are optimized, the judgment can be deleted. + if (srcLang == kSrcLangC) { + emitStructureType = true; + } + module.OutputAsciiMpl("", ".mpl", nullptr, emitStructureType, false); + if (FEOptions::GetInstance().GetFuncInlineSize() != 0 && !FEOptions::GetInstance().GetWPAA()) { + std::unique_ptr modInline = std::make_unique(module); + bool isInlineNeeded = modInline->CollectInlineInfo(FEOptions::GetInstance().GetFuncInlineSize()); + if (isInlineNeeded) { + modInline->DumpInlineCandidateToFile(outNameWithoutType + ".mplt_inline"); + } + } + timer.StopAndDumpTimeMS("Output mpl"); + } +} + +void HIR2MPLCompiler::RegisterCompilerComponent(std::unique_ptr comp) { + CHECK_FATAL(comp != nullptr, "input compiler component is nullptr"); + components.push_back(std::move(comp)); +} + +void HIR2MPLCompiler::ParseInputs() { + FETimer timer; + timer.StartAndDump("HIR2MPLCompiler::ParseInputs()"); + for (const std::unique_ptr &comp : components) { + CHECK_NULL_FATAL(comp); + bool success = comp->ParseInput(); + CHECK_FATAL(success, "Error occurs in HIR2MPLCompiler::ParseInputs(). exit."); + } + timer.StopAndDumpTimeMS("HIR2MPLCompiler::ParseInputs()"); +} + +void HIR2MPLCompiler::LoadOnDemandTypes() { + FETimer timer; + timer.StartAndDump("HIR2MPLCompiler::LoadOnDemandTypes()"); + for (const std::unique_ptr &comp : components) { + CHECK_NULL_FATAL(comp); + bool success = comp->LoadOnDemandType(); + CHECK_FATAL(success, "Error occurs in HIR2MPLCompiler::LoadOnDemandTypes(). exit."); + } + timer.StopAndDumpTimeMS("HIR2MPLCompiler::LoadOnDemandTypes()"); +} + +void HIR2MPLCompiler::PreProcessDecls() { + FETimer timer; + timer.StartAndDump("HIR2MPLCompiler::PreProcessDecls()"); + for (const std::unique_ptr &comp : components) { + ASSERT(comp != nullptr, "nullptr check"); + bool success = comp->PreProcessDecl(); + CHECK_FATAL(success, "Error occurs in HIR2MPLCompiler::PreProcessDecls(). exit."); + } + timer.StopAndDumpTimeMS("HIR2MPLCompiler::PreProcessDecl()"); +} + +void HIR2MPLCompiler::ProcessDecls() { + FETimer timer; + timer.StartAndDump("HIR2MPLCompiler::ProcessDecl()"); + for (const std::unique_ptr &comp : components) { + ASSERT(comp != nullptr, "nullptr check"); + bool success = comp->ProcessDecl(); + CHECK_FATAL(success, "Error occurs in HIR2MPLCompiler::ProcessDecls(). exit."); + } + timer.StopAndDumpTimeMS("HIR2MPLCompiler::ProcessDecl()"); +} + +void HIR2MPLCompiler::ProcessPragmas() { + FETimer timer; + timer.StartAndDump("HIR2MPLCompiler::ProcessPragmas()"); + for (const std::unique_ptr &comp : components) { + ASSERT_NOT_NULL(comp); + comp->ProcessPragma(); + } + timer.StopAndDumpTimeMS("HIR2MPLCompiler::ProcessPragmas()"); +} + +void HIR2MPLCompiler::ProcessFunctions() { + FETimer timer; + bool success = true; + timer.StartAndDump("HIR2MPLCompiler::ProcessFunctions()"); + uint32 funcSize = 0; + for (const std::unique_ptr &comp : components) { + ASSERT(comp != nullptr, "nullptr check"); + success = comp->ProcessFunctionSerial() && success; + funcSize += comp->GetFunctionsSize(); + if (!success) { + const std::set &failedFEFunctions = comp->GetCompileFailedFEFunctions(); + compileFailedFEFunctions.insert(failedFEFunctions.cbegin(), failedFEFunctions.cend()); + } + if (FEOptions::GetInstance().IsDumpPhaseTime()) { + comp->DumpPhaseTimeTotal(); + } + comp->ReleaseMemPool(); + } + FEManager::GetTypeManager().MarkExternStructType(); + module.SetNumFuncs(funcSize); + FindMinCompileFailedFEFunctions(); + timer.StopAndDumpTimeMS("HIR2MPLCompiler::ProcessFunctions()"); + CHECK_FATAL(success, "ProcessFunction error"); +} + +void HIR2MPLCompiler::RegisterCompilerComponent() { +#ifndef ONLY_C + if (FEOptions::GetInstance().HasJBC()) { + FEOptions::GetInstance().SetTypeInferKind(FEOptions::TypeInferKind::kNo); + std::unique_ptr jbcCompilerComp = std::make_unique(module); + RegisterCompilerComponent(std::move(jbcCompilerComp)); + } + if (FEOptions::GetInstance().GetInputDexFiles().size() != 0) { + bc::ArkAnnotationProcessor::Process(); + std::unique_ptr bcCompilerComp = + std::make_unique>(module); + RegisterCompilerComponent(std::move(bcCompilerComp)); + } +#endif + if (FEOptions::GetInstance().GetInputASTFiles().size() != 0) { + srcLang = kSrcLangC; + std::unique_ptr astCompilerComp = + std::make_unique>(module); + RegisterCompilerComponent(std::move(astCompilerComp)); + } +#ifdef ENABLE_MAST + if (FEOptions::GetInstance().GetInputMASTFiles().size() != 0) { + srcLang = kSrcLangC; + std::unique_ptr mapleAstCompilerComp = + std::make_unique>(module); + RegisterCompilerComponent(std::move(mapleAstCompilerComp)); + } +#endif + module.SetSrcLang(srcLang); + FEManager::GetTypeManager().SetSrcLang(srcLang); +} + +void HIR2MPLCompiler::FindMinCompileFailedFEFunctions() { + if (compileFailedFEFunctions.size() == 0) { + return; + } + FEFunction *minCompileFailedFEFunction = nullptr; + uint32 minFailedStmtCount = 0; + for (FEFunction *feFunc : compileFailedFEFunctions) { + if (minCompileFailedFEFunction == nullptr) { + minCompileFailedFEFunction = feFunc; + minFailedStmtCount = minCompileFailedFEFunction->GetStmtCount(); + } + uint32 stmtCount = feFunc->GetStmtCount(); + if (stmtCount < minFailedStmtCount) { + minCompileFailedFEFunction = feFunc; + minFailedStmtCount = stmtCount; + } + } + if (minCompileFailedFEFunction != nullptr) { + INFO(kLncWarn, "function compile failed!!! the min function is :"); + INFO(kLncWarn, minCompileFailedFEFunction->GetDescription().c_str()); + } +} +} // namespace maple diff --git a/src/hir2mpl/common/src/hir2mpl_compiler_component.cpp b/src/hir2mpl/common/src/hir2mpl_compiler_component.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ff7abd5bb0ef6257f9a299b6296ef9c69fba5fe7 --- /dev/null +++ b/src/hir2mpl/common/src/hir2mpl_compiler_component.cpp @@ -0,0 +1,175 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "hir2mpl_compiler_component.h" +#include +#include "fe_macros.h" +#include "fe_timer.h" +#include "fe_config_parallel.h" +#include "fe_manager.h" + +namespace maple { +// ---------- FEFunctionProcessTask ---------- +FEFunctionProcessTask::FEFunctionProcessTask(std::unique_ptr argFunction) + : function(std::move(argFunction)) {} + +int FEFunctionProcessTask::RunImpl(MplTaskParam *param) { + bool success = function->Process(); + if (success) { + return 1; + } else { + return 0; + } +} + +int FEFunctionProcessTask::FinishImpl(MplTaskParam *param) { + function->Finish(); + return 0; +} + +// ---------- FEFunctionProcessSchedular ---------- +void FEFunctionProcessSchedular::AddFunctionProcessTask(std::unique_ptr function) { + std::unique_ptr task = std::make_unique(std::move(function)); + AddTask(*task.get()); + tasks.push_back(std::move(task)); +} + +void FEFunctionProcessSchedular::CallbackThreadMainStart() { + std::thread::id tid = std::this_thread::get_id(); + if (FEOptions::GetInstance().GetDumpLevel() >= FEOptions::kDumpLevelInfoDebug) { + INFO(kLncInfo, "Start Run Thread (tid=%lx)", tid); + } + FEConfigParallel::GetInstance().RegisterRunThreadID(tid); +} + +// ---------- HIR2MPLCompilerComponent ---------- +HIR2MPLCompilerComponent::HIR2MPLCompilerComponent(MIRModule &argModule, MIRSrcLang argSrcLang) + : funcSize(0), + module(argModule), + srcLang(argSrcLang), + phaseResultTotal(std::make_unique(true)) {} + +bool HIR2MPLCompilerComponent::LoadOnDemandTypeImpl() { + return false; +} + +bool HIR2MPLCompilerComponent::PreProcessDeclImpl() { + FETimer timer; + timer.StartAndDump("HIR2MPLCompilerComponent::PreProcessDecl()"); + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process HIR2MPLCompilerComponent::PreProcessDecl() ====="); + bool success = true; + FEManager::GetJavaStringManager().GenStringMetaClassVar(); + for (FEInputStructHelper *helper : structHelpers) { + ASSERT_NOT_NULL(helper); + success = helper->PreProcessDecl() ? success : false; + } + FEManager::GetTypeManager().InitMCCFunctions(); + timer.StopAndDumpTimeMS("HIR2MPLCompilerComponent::PreProcessDecl()"); + return success; +} + +bool HIR2MPLCompilerComponent::ProcessDeclImpl() { + FETimer timer; + timer.StartAndDump("HIR2MPLCompilerComponent::ProcessDecl()"); + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process HIR2MPLCompilerComponent::ProcessDecl() ====="); + bool success = true; + for (FEInputStructHelper *helper : structHelpers) { + ASSERT_NOT_NULL(helper); + success = helper->ProcessDecl() ? success : false; + } + for (FEInputMethodHelper *helper : globalFuncHelpers) { + ASSERT_NOT_NULL(helper); + success = helper->ProcessDecl() ? success : false; + } + for (FEInputGlobalVarHelper *helper : globalVarHelpers) { + ASSERT_NOT_NULL(helper); + success = helper->ProcessDecl() ? success : false; + } + for (FEInputFileScopeAsmHelper *helper : globalFileScopeAsmHelpers) { + ASSERT_NOT_NULL(helper); + success = helper->ProcessDecl() ? success : false; + } + if (FEOptions::GetInstance().IsDbgFriendly()) { + for (FEInputEnumHelper *helper : enumHelpers) { + ASSERT_NOT_NULL(helper); + success = helper->ProcessDecl() ? success : false; + } + } + timer.StopAndDumpTimeMS("HIR2MPLCompilerComponent::ProcessDecl()"); + return success; +} + +bool HIR2MPLCompilerComponent::ProcessFunctionSerialImpl() { + std::stringstream ss; + ss << GetComponentName() << "::ProcessFunctionSerial()"; + FETimer timer; + timer.StartAndDump(ss.str()); + bool success = true; + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process %s =====", ss.str().c_str()); + for (FEInputStructHelper *structHelper : structHelpers) { + ASSERT_NOT_NULL(structHelper); + for (FEInputMethodHelper *methodHelper : structHelper->GetMethodHelpers()) { + ASSERT_NOT_NULL(methodHelper); + std::unique_ptr feFunction = CreatFEFunction(methodHelper); + feFunction->SetSrcFileName(structHelper->GetSrcFileName()); + bool processResult = feFunction->Process(); + if (!processResult) { + (void)compileFailedFEFunctions.insert(feFunction.get()); + } + success = success && processResult; + feFunction->Finish(); + funcSize++; + } + } + timer.StopAndDumpTimeMS(ss.str()); + return success; +} + +bool HIR2MPLCompilerComponent::ProcessFunctionParallelImpl(uint32 nthreads) { + std::stringstream ss; + ss << GetComponentName() << "::ProcessFunctionParallel()"; + FETimer timer; + timer.StartAndDump(ss.str()); + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process %s =====", ss.str().c_str()); + FEFunctionProcessSchedular schedular(ss.str()); + schedular.Init(); + for (FEInputStructHelper *structHelper : structHelpers) { + ASSERT_NOT_NULL(structHelper); + for (FEInputMethodHelper *methodHelper : structHelper->GetMethodHelpers()) { + ASSERT_NOT_NULL(methodHelper); + std::unique_ptr feFunction = CreatFEFunction(methodHelper); + feFunction->SetSrcFileName(structHelper->GetSrcFileName()); + schedular.AddFunctionProcessTask(std::move(feFunction)); + funcSize++; + } + } + schedular.SetDumpTime(FEOptions::GetInstance().IsDumpThreadTime()); + (void)schedular.RunTask(nthreads, true); + timer.StopAndDumpTimeMS(ss.str()); + return true; +} + +std::string HIR2MPLCompilerComponent::GetComponentNameImpl() const { + return "HIR2MPLCompilerComponent"; +} + +bool HIR2MPLCompilerComponent::ParallelableImpl() const { + return false; +} + +void HIR2MPLCompilerComponent::DumpPhaseTimeTotalImpl() const { + CHECK_NULL_FATAL(phaseResultTotal); + phaseResultTotal->DumpMS(); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/common/src/hir2mpl_env.cpp b/src/hir2mpl/common/src/hir2mpl_env.cpp new file mode 100644 index 0000000000000000000000000000000000000000..98eb5a29cf92ee1f1a6994ba7e507c649fb3c9be --- /dev/null +++ b/src/hir2mpl/common/src/hir2mpl_env.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "hir2mpl_env.h" +#include "global_tables.h" +#include "mpl_logging.h" +#include "fe_options.h" + +namespace maple { +HIR2MPLEnv HIR2MPLEnv::instance; + +void HIR2MPLEnv::Init() { + srcFileIdxNameMap.clear(); + srcFileIdxNameMap[0] = GStrIdx(0); + globalLabelIdx = 1; +} + +void HIR2MPLEnv::Finish() { + srcFileIdxNameMap.clear(); +} + +uint32 HIR2MPLEnv::NewSrcFileIdx(const GStrIdx &nameIdx) { + size_t idx = srcFileIdxNameMap.size() + 1; // 1: already occupied by VtableImpl.mpl + CHECK_FATAL(idx < UINT32_MAX, "idx is out of range"); + srcFileIdxNameMap[idx] = nameIdx; + return static_cast(idx); +} + +GStrIdx HIR2MPLEnv::GetFileNameIdx(uint32 fileIdx) const { + auto it = srcFileIdxNameMap.find(fileIdx); + if (it == srcFileIdxNameMap.end()) { + return GStrIdx(0); + } else { + return it->second; + } +} + +std::string HIR2MPLEnv::GetFileName(uint32 fileIdx) const { + auto it = srcFileIdxNameMap.find(fileIdx); + if (it == srcFileIdxNameMap.end() || it->second == 0) { + return "unknown"; + } else { + return GlobalTables::GetStrTable().GetStringFromStrIdx(it->second); + } +} +} // namespace maple diff --git a/src/hir2mpl/common/src/hir2mpl_option.cpp b/src/hir2mpl/common/src/hir2mpl_option.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b25bdc9062c3f3171105bac41da60ff089b80424 --- /dev/null +++ b/src/hir2mpl/common/src/hir2mpl_option.cpp @@ -0,0 +1,224 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "driver_options.h" + +namespace opts::hir2mpl { + +maplecl::Option help({"--help", "-h"}, + " -h, -help : print usage and exit", + {hir2mplCategory}); + +maplecl::Option version({"--version", "-v"}, + " -v, -version : print version and exit", + {hir2mplCategory}); + +maplecl::Option mpltSys({"--mplt-sys", "-mplt-sys"}, + " -mplt-sys sys1.mplt,sys2.mplt\n" + " : input sys mplt files", + {hir2mplCategory}); + +maplecl::Option mpltApk({"--mplt-apk", "-mplt-apk"}, + " -mplt-apk apk1.mplt,apk2.mplt\n" + " : input apk mplt files", + {hir2mplCategory}); + +maplecl::Option mplt({"--mplt", "-mplt"}, + " -mplt lib1.mplt,lib2.mplt\n" + " : input mplt files", + {hir2mplCategory}); + +maplecl::Option inClass({"--in-class", "-in-class"}, + " -in-class file1.jar,file2.jar\n" + " : input class files", + {hir2mplCategory}); + +maplecl::Option inJar({"--in-jar", "-in-jar"}, + " -in-jar file1.jar,file2.jar\n" + " : input jar files", + {hir2mplCategory}); + +maplecl::Option inDex({"--in-dex", "-in-dex"}, + " -in-dex file1.dex,file2.dex\n" + " : input dex files", + {hir2mplCategory}); + +maplecl::Option inAst({"--in-ast", "-in-ast"}, + " -in-ast file1.ast,file2.ast\n" + " : input ast files", + {hir2mplCategory}); + +maplecl::Option inMast({"--in-mast", "-in-mast"}, + " -in-mast file1.mast,file2.mast\n" + " : input mast files", + {hir2mplCategory}); + +maplecl::Option output({"--output", "-p"}, + " -p, -output : output path", + {hir2mplCategory}); + +maplecl::Option outputName({"--output-name", "-o"}, + " -o, -output-name : output name", + {hir2mplCategory}); + +maplecl::Option mpltOnly({"--t", "-t"}, + " -t : generate mplt only", + {hir2mplCategory}); + +maplecl::Option asciimplt({"--asciimplt", "-asciimplt"}, + " -asciimplt : generate mplt in ascii format", + {hir2mplCategory}); + +maplecl::Option dumpInstComment({"--dump-inst-comment", "-dump-inst-comment"}, + " -dump-inst-comment : dump instruction comment", + {hir2mplCategory}); + +maplecl::Option noMplFile({"--no-mpl-file", "-no-mpl-file"}, + " -no-mpl-file : disable dump mpl file", + {hir2mplCategory}); + +maplecl::Option dumpLevel({"--dump-level", "-d"}, + " -d, -dump-level xx : debug info dump level\n" + " [0] disable\n" + " [1] dump simple info\n" + " [2] dump detail info\n" + " [3] dump debug info", + {hir2mplCategory}); + +maplecl::Option dumpTime({"--dump-time", "-dump-time"}, + " -dump-time : dump time", + {hir2mplCategory}); + +maplecl::Option dumpComment({"--dump-comment", "-dump-comment"}, + " -dump-comment : gen comment stmt", + {hir2mplCategory}); + +maplecl::Option dumpLOC({"--dump-LOC", "-dump-LOC"}, + " -dump-LOC : gen LOC", + {hir2mplCategory}); + +maplecl::Option dbgFriendly({"--g", "-g"}, + " -g : emit debug friendly mpl, including\n" + " no variable renaming\n" + " gen LOC", + {hir2mplCategory}); + +maplecl::Option dumpPhaseTime({"--dump-phase-time", "-dump-phase-time"}, + " -dump-phase-time : dump total phase time", + {hir2mplCategory}); + +maplecl::Option dumpPhaseTimeDetail({"-dump-phase-time-detail", "--dump-phase-time-detail"}, + " -dump-phase-time-detail\n" \ + " : dump phase time for each method", + {hir2mplCategory}); + +maplecl::Option rc({"-rc", "--rc"}, + " -rc : enable rc", + {hir2mplCategory}); + +maplecl::Option nobarrier({"-nobarrier", "--nobarrier"}, + " -nobarrier : no barrier", + {hir2mplCategory}); + +maplecl::Option usesignedchar({"-usesignedchar", "--usesignedchar"}, + " -usesignedchar : use signed char", + {hir2mplCategory}); + +maplecl::Option o2({"-O2", "--O2"}, + " -O2 : enable hir2mpl O2 optimize", + {hir2mplCategory}); + +maplecl::Option simplifyShortCircuit({"-simplify-short-circuit", "--simplify-short-circuit"}, + " -simplify-short-circuit\n" \ + " : enable simplify short circuit", + {hir2mplCategory}); + +maplecl::Option enableVariableArray({"-enable-variable-array", "--enable-variable-array"}, + " -enable-variable-array\n" \ + " : enable variable array", + {hir2mplCategory}); + +maplecl::Option funcInliceSize({"-func-inline-size", "--func-inline-size"}, + " -func-inline-size : set func inline size", + {hir2mplCategory}); + +maplecl::Option np({"-np", "--np"}, + " -np num : number of threads", + {hir2mplCategory}); + +maplecl::Option dumpThreadTime({"-dump-thread-time", "--dump-thread-time"}, + " -dump-thread-time : dump thread time in mpl schedular", + {hir2mplCategory}); + +maplecl::Option xbootclasspath({"-Xbootclasspath", "--Xbootclasspath"}, + " -Xbootclasspath=bootclasspath\n" \ + " : boot class path list", + {hir2mplCategory}); + +maplecl::Option classloadercontext({"-classloadercontext", "--classloadercontext"}, + " -classloadercontext=pcl\n" \ + " : class loader context \n" \ + " : path class loader", + {hir2mplCategory}); + +maplecl::Option dep({"-dep", "--dep"}, + " -dep=all or func\n" \ + " : [all] collect all dependent types\n" \ + " : [func] collect dependent types in function", + {hir2mplCategory}); + +maplecl::Option depsamename({"-depsamename", "--depsamename"}, + " -DepSameNamePolicy=sys or src\n" \ + " : [sys] load type from sys when on-demand load same name type\n" \ + " : [src] load type from src when on-demand load same name type", + {hir2mplCategory}); + +maplecl::Option npeCheckDynamic({"-npe-check-dynamic", "--npe-check-dynamic"}, + " --npe-check-dynamic : Nonnull pointr dynamic checking", + {hir2mplCategory}); + +maplecl::Option boundaryCheckDynamic({"-boundary-check-dynamic", "--boundary-check-dynamic"}, + " --boundary-check-dynamic\n" \ + " : Boundary dynamic checking", + {hir2mplCategory}); + +maplecl::Option safeRegion({"-safe-region", "--safe-region"}, + " --boundary-check-dynamic\n" \ + " --safe-region : Enable safe region", + {hir2mplCategory}); + +maplecl::Option defaultSafe({"-defaultSafe", "--defaultSafe"}, + " --defaultSafe : treat unmarked function or blocks as safe region by default", + {hir2mplCategory}); + +maplecl::Option dumpFEIRBB({"-dump-bb", "--dump-bb"}, + " -dump-bb : dump basic blocks info", + {hir2mplCategory}); + +maplecl::Option dumpFEIRCFGGraph({"-dump-cfg", "--dump-cfg"}, + " -dump-cfg funcname1,funcname2\n" \ + " : dump cfg graph to dot file", + {hir2mplCategory}); + +maplecl::Option wpaa({"-wpaa", "--wpaa"}, + " -dump-cfg funcname1,funcname2\n" \ + " -wpaa : enable whole program ailas analysis", + {hir2mplCategory}); + +maplecl::Option debug({"-debug", "--debug"}, + " -debug : dump enabled options", + {hir2mplCategory}); + +} diff --git a/src/hir2mpl/common/src/hir2mpl_options.cpp b/src/hir2mpl/common/src/hir2mpl_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ed2787cdf44761c4cf47cbb7ed0b030a1e669a4e --- /dev/null +++ b/src/hir2mpl/common/src/hir2mpl_options.cpp @@ -0,0 +1,623 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "hir2mpl_options.h" +#include +#include +#include +#include +#include +#include +#include "driver_options.h" +#include "file_utils.h" +#include "fe_options.h" +#include "fe_macros.h" +#include "fe_file_type.h" +#include "hir2mpl_option.h" +#include "parser_opt.h" +#include "triple.h" +#include "types_def.h" +#include "version.h" + +namespace maple { + +HIR2MPLOptions::HIR2MPLOptions() { + Init(); +} + +void HIR2MPLOptions::Init() const { + FEOptions::GetInstance().Init(); + bool success = InitFactory(); + CHECK_FATAL(success, "InitFactory failed. Exit."); +} + +bool HIR2MPLOptions::InitFactory() { + RegisterFactoryFunction(&opts::hir2mpl::help, + &HIR2MPLOptions::ProcessHelp); + RegisterFactoryFunction(&opts::hir2mpl::version, + &HIR2MPLOptions::ProcessVersion); + + // input control options + RegisterFactoryFunction(&opts::hir2mpl::mpltSys, + &HIR2MPLOptions::ProcessInputMpltFromSys); + RegisterFactoryFunction(&opts::hir2mpl::mpltApk, + &HIR2MPLOptions::ProcessInputMpltFromApk); + RegisterFactoryFunction(&opts::hir2mpl::mplt, + &HIR2MPLOptions::ProcessInputMplt); + RegisterFactoryFunction(&opts::hir2mpl::inClass, + &HIR2MPLOptions::ProcessInClass); + RegisterFactoryFunction(&opts::hir2mpl::inJar, + &HIR2MPLOptions::ProcessInJar); + RegisterFactoryFunction(&opts::hir2mpl::inDex, + &HIR2MPLOptions::ProcessInDex); + RegisterFactoryFunction(&opts::hir2mpl::inAst, + &HIR2MPLOptions::ProcessInAST); + RegisterFactoryFunction(&opts::hir2mpl::inMast, + &HIR2MPLOptions::ProcessInMAST); + + // output control options + RegisterFactoryFunction(&opts::hir2mpl::output, + &HIR2MPLOptions::ProcessOutputPath); + RegisterFactoryFunction(&opts::hir2mpl::outputName, + &HIR2MPLOptions::ProcessOutputName); + RegisterFactoryFunction(&opts::hir2mpl::mpltOnly, + &HIR2MPLOptions::ProcessGenMpltOnly); + RegisterFactoryFunction(&opts::hir2mpl::asciimplt, + &HIR2MPLOptions::ProcessGenAsciiMplt); + RegisterFactoryFunction(&opts::hir2mpl::dumpInstComment, + &HIR2MPLOptions::ProcessDumpInstComment); + RegisterFactoryFunction(&opts::hir2mpl::noMplFile, + &HIR2MPLOptions::ProcessNoMplFile); + + // debug info control options + RegisterFactoryFunction(&opts::hir2mpl::dumpLevel, + &HIR2MPLOptions::ProcessDumpLevel); + RegisterFactoryFunction(&opts::hir2mpl::dumpTime, + &HIR2MPLOptions::ProcessDumpTime); + RegisterFactoryFunction(&opts::hir2mpl::dumpComment, + &HIR2MPLOptions::ProcessDumpComment); + RegisterFactoryFunction(&opts::hir2mpl::dumpLOC, + &HIR2MPLOptions::ProcessDumpLOC); + RegisterFactoryFunction(&opts::hir2mpl::dbgFriendly, + &HIR2MPLOptions::ProcessDbgFriendly); + RegisterFactoryFunction(&opts::hir2mpl::dumpPhaseTime, + &HIR2MPLOptions::ProcessDumpPhaseTime); + RegisterFactoryFunction(&opts::hir2mpl::dumpPhaseTimeDetail, + &HIR2MPLOptions::ProcessDumpPhaseTimeDetail); + + // general stmt/bb/cfg debug options + RegisterFactoryFunction(&opts::hir2mpl::dumpFEIRBB, + &HIR2MPLOptions::ProcessDumpFEIRBB); + RegisterFactoryFunction(&opts::hir2mpl::dumpFEIRCFGGraph, + &HIR2MPLOptions::ProcessDumpFEIRCFGGraph); + + // multi-thread control options + RegisterFactoryFunction(&opts::hir2mpl::np, + &HIR2MPLOptions::ProcessNThreads); + RegisterFactoryFunction(&opts::hir2mpl::dumpThreadTime, + &HIR2MPLOptions::ProcessDumpThreadTime); + + RegisterFactoryFunction(&opts::hir2mpl::rc, + &HIR2MPLOptions::ProcessRC); + RegisterFactoryFunction(&opts::hir2mpl::nobarrier, + &HIR2MPLOptions::ProcessNoBarrier); + + // ast compiler options + RegisterFactoryFunction(&opts::hir2mpl::usesignedchar, + &HIR2MPLOptions::ProcessUseSignedChar); + + // On Demand Type Creation + RegisterFactoryFunction(&opts::hir2mpl::xbootclasspath, + &HIR2MPLOptions::ProcessXbootclasspath); + RegisterFactoryFunction(&opts::hir2mpl::classloadercontext, + &HIR2MPLOptions::ProcessClassLoaderContext); + RegisterFactoryFunction(&opts::hir2mpl::dep, + &HIR2MPLOptions::ProcessCollectDepTypes); + RegisterFactoryFunction(&opts::hir2mpl::depsamename, + &HIR2MPLOptions::ProcessDepSameNamePolicy); + // EnhanceC + RegisterFactoryFunction(&opts::hir2mpl::npeCheckDynamic, + &HIR2MPLOptions::ProcessNpeCheckDynamic); + RegisterFactoryFunction(&opts::hir2mpl::boundaryCheckDynamic, + &HIR2MPLOptions::ProcessBoundaryCheckDynamic); + RegisterFactoryFunction(&opts::hir2mpl::safeRegion, + &HIR2MPLOptions::ProcessSafeRegion); + RegisterFactoryFunction(&opts::hir2mpl::defaultSafe, + &HIR2MPLOptions::ProcessDefaultSafe); + + // O2 does not work, because it generates OP_ror instruction but this instruction is not supported in me + RegisterFactoryFunction(&opts::hir2mpl::o2, + &HIR2MPLOptions::ProcessO2); + RegisterFactoryFunction(&opts::hir2mpl::simplifyShortCircuit, + &HIR2MPLOptions::ProcessSimplifyShortCircuit); + RegisterFactoryFunction(&opts::hir2mpl::enableVariableArray, + &HIR2MPLOptions::ProcessEnableVariableArray); + RegisterFactoryFunction(&opts::hir2mpl::funcInliceSize, + &HIR2MPLOptions::ProcessFuncInlineSize); + RegisterFactoryFunction(&opts::hir2mpl::wpaa, + &HIR2MPLOptions::ProcessWPAA); + + return true; +} + +bool HIR2MPLOptions::SolveOptions(bool isDebug) { + if (opts::target.IsEnabledByUser()) { + Triple::GetTriple().Init(opts::target.GetValue()); + } else { + Triple::GetTriple().Init(); + } + + if (Triple::GetTriple().IsBigEndian()) { + (void)ProcessBigEndian(); + } + + for (const auto &opt : hir2mplCategory.GetEnabledOptions()) { + std::string printOpt; + if (isDebug) { + for (const auto &val : opt->GetRawValues()) { + printOpt += opt->GetName() + " " + val + " "; + } + LogInfo::MapleLogger() << "hir2mpl options: " << printOpt << '\n'; + } + + auto func = CreateProductFunction(opt); + if (func != nullptr) { + if (!func(this, *opt)) { + return false; + } + } + } + + return true; +} + +bool HIR2MPLOptions::SolveArgs(int argc, char **argv) { + maplecl::CommandLine::GetCommandLine().Parse(argc, argv, hir2mplCategory); + bool result = SolveOptions(opts::hir2mpl::debug); + if (!result) { + return result; + } + + std::vector inputs; + for (auto &arg : maplecl::CommandLine::GetCommandLine().badCLArgs) { + if (FileUtils::IsFileExists(arg.first)) { + inputs.push_back(arg.first); + } else { + ERR(kLncErr, "Unknown Option: %s\n", arg.first.c_str()); + DumpUsage(); + return result; + } + } + + if (inputs.size() >= 1) { + ProcessInputFiles(inputs); + return true; + } else { + ERR(kLncErr, "Input File is not specified\n"); + DumpUsage(); + return false; + } +} + +void HIR2MPLOptions::DumpUsage() const { + std::cout << "\n====== Usage: hir2mpl [options] input1 input2 input3 ======\n"; + maplecl::CommandLine::GetCommandLine().HelpPrinter(hir2mplCategory); +} + +void HIR2MPLOptions::DumpVersion() const { + std::cout << "Maple FE Version : " << Version::GetVersionStr() << std::endl; +} + +bool HIR2MPLOptions::ProcessHelp(const maplecl::OptionInterface &) const { + DumpUsage(); + return false; +} + +bool HIR2MPLOptions::ProcessVersion(const maplecl::OptionInterface &) const { + DumpVersion(); + return false; +} + +bool HIR2MPLOptions::ProcessInClass(const maplecl::OptionInterface &inClass) const { + std::string arg = inClass.GetCommonValue(); + std::list listFiles = SplitByComma(arg); + for (const std::string &fileName : listFiles) { + FEOptions::GetInstance().AddInputClassFile(fileName); + } + return true; +} + +bool HIR2MPLOptions::ProcessInJar(const maplecl::OptionInterface &inJar) const { + std::string arg = inJar.GetCommonValue(); + std::list listFiles = SplitByComma(arg); + for (const std::string &fileName : listFiles) { + FEOptions::GetInstance().AddInputJarFile(fileName); + } + return true; +} + +bool HIR2MPLOptions::ProcessInDex(const maplecl::OptionInterface &inDex) const { + std::string arg = inDex.GetCommonValue(); + std::list listFiles = SplitByComma(arg); + for (const std::string &fileName : listFiles) { + FEOptions::GetInstance().AddInputDexFile(fileName); + } + return true; +} + +bool HIR2MPLOptions::ProcessInAST(const maplecl::OptionInterface &inAst) const { + std::string arg = inAst.GetCommonValue(); + std::list listFiles = SplitByComma(arg); + for (const std::string &fileName : listFiles) { + FEOptions::GetInstance().AddInputASTFile(fileName); + } + return true; +} + +bool HIR2MPLOptions::ProcessInMAST(const maplecl::OptionInterface &inMast) const { + std::string arg = inMast.GetCommonValue(); + std::list listFiles = SplitByComma(arg); + for (const std::string &fileName : listFiles) { + FEOptions::GetInstance().AddInputMASTFile(fileName); + } + return true; +} + +bool HIR2MPLOptions::ProcessInputMplt(const maplecl::OptionInterface &mplt) const { + std::string arg = mplt.GetCommonValue(); + std::list listFiles = SplitByComma(arg); + for (const std::string &fileName : listFiles) { + FEOptions::GetInstance().AddInputMpltFile(fileName); + } + return true; +} + +bool HIR2MPLOptions::ProcessInputMpltFromSys(const maplecl::OptionInterface &mpltSys) const { + std::string arg = mpltSys.GetCommonValue(); + std::list listFiles = SplitByComma(arg); + for (const std::string &fileName : listFiles) { + FEOptions::GetInstance().AddInputMpltFileFromSys(fileName); + } + return true; +} + +bool HIR2MPLOptions::ProcessInputMpltFromApk(const maplecl::OptionInterface &mpltApk) const { + std::string arg = mpltApk.GetCommonValue(); + std::list listFiles = SplitByComma(arg); + for (const std::string &fileName : listFiles) { + FEOptions::GetInstance().AddInputMpltFileFromApk(fileName); + } + return true; +} + +bool HIR2MPLOptions::ProcessOutputPath(const maplecl::OptionInterface &output) const { + std::string arg = output.GetCommonValue(); + FEOptions::GetInstance().SetOutputPath(arg); + return true; +} + +bool HIR2MPLOptions::ProcessOutputName(const maplecl::OptionInterface &outputName) const { + std::string arg = outputName.GetCommonValue(); + FEOptions::GetInstance().SetOutputName(arg); + return true; +} + +bool HIR2MPLOptions::ProcessGenMpltOnly(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsGenMpltOnly(true); + return true; +} + +bool HIR2MPLOptions::ProcessGenAsciiMplt(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsGenAsciiMplt(true); + return true; +} + +bool HIR2MPLOptions::ProcessDumpInstComment(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().EnableDumpInstComment(); + return true; +} + +bool HIR2MPLOptions::ProcessNoMplFile(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetNoMplFile(); + return true; +} + +bool HIR2MPLOptions::ProcessDumpLevel(const maplecl::OptionInterface &outputName) const { + int arg = outputName.GetCommonValue(); + FEOptions::GetInstance().SetDumpLevel(arg); + return true; +} + +bool HIR2MPLOptions::ProcessDumpTime(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsDumpTime(true); + return true; +} + +bool HIR2MPLOptions::ProcessDumpComment(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsDumpComment(true); + return true; +} + +bool HIR2MPLOptions::ProcessDumpLOC(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsDumpLOC(true); + return true; +} + +bool HIR2MPLOptions::ProcessDbgFriendly(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetDbgFriendly(true); + return true; +} + +bool HIR2MPLOptions::ProcessDumpPhaseTime(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsDumpPhaseTime(true); + return true; +} + +bool HIR2MPLOptions::ProcessDumpPhaseTimeDetail(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsDumpPhaseTimeDetail(true); + return true; +} + +// java compiler options +bool HIR2MPLOptions::ProcessModeForJavaStaticFieldName(const maplecl::OptionInterface &opt) const { + const std::string &arg = opt.GetCommonValue(); + if (arg.compare("notype") == 0) { + FEOptions::GetInstance().SetModeJavaStaticFieldName(FEOptions::ModeJavaStaticFieldName::kNoType); + } else if (arg.compare("alltype") == 0) { + FEOptions::GetInstance().SetModeJavaStaticFieldName(FEOptions::ModeJavaStaticFieldName::kAllType); + } else if (arg.compare("smart") == 0) { + FEOptions::GetInstance().SetModeJavaStaticFieldName(FEOptions::ModeJavaStaticFieldName::kSmart); + } else { + ERR(kLncErr, "unsupported options: %s", arg.c_str()); + return false; + } + return true; +} + +bool HIR2MPLOptions::ProcessJBCInfoUsePathName(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsJBCInfoUsePathName(true); + return true; +} + +bool HIR2MPLOptions::ProcessDumpJBCStmt(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsDumpJBCStmt(true); + return true; +} + +bool HIR2MPLOptions::ProcessDumpJBCAll(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsDumpJBCAll(true); + return true; +} + +bool HIR2MPLOptions::ProcessDumpJBCErrorOnly(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsDumpJBCErrorOnly(true); + return true; +} + +bool HIR2MPLOptions::ProcessDumpJBCFuncName(const maplecl::OptionInterface &opt) const { + std::string arg = opt.GetCommonValue(); + while (!arg.empty()) { + size_t pos = arg.find(","); + if (pos != std::string::npos) { + FEOptions::GetInstance().AddDumpJBCFuncName(arg.substr(0, pos)); + arg = arg.substr(pos + 1); + } else { + FEOptions::GetInstance().AddDumpJBCFuncName(arg); + arg = ""; + } + } + return true; +} + +bool HIR2MPLOptions::ProcessEmitJBCLocalVarInfo(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsEmitJBCLocalVarInfo(true); + return true; +} + +// bc compiler options +bool HIR2MPLOptions::ProcessRC(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetRC(true); + return true; +} + +bool HIR2MPLOptions::ProcessNoBarrier(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetNoBarrier(true); + return true; +} + +// ast compiler options +bool HIR2MPLOptions::ProcessUseSignedChar(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetUseSignedChar(true); + return true; +} + +bool HIR2MPLOptions::ProcessBigEndian() const { + FEOptions::GetInstance().SetBigEndian(true); + return true; +} + +// general stmt/bb/cfg debug options +bool HIR2MPLOptions::ProcessDumpFEIRBB(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsDumpFEIRBB(true); + return true; +} + +bool HIR2MPLOptions::ProcessDumpFEIRCFGGraph(const maplecl::OptionInterface &opt) const { + std::string arg = opt.GetCommonValue(); + std::list funcNameList = SplitByComma(arg); + for (const std::string &funcName : funcNameList) { + FEOptions::GetInstance().AddFuncNameForDumpCFGGraph(funcName); + } + return true; +} + +// multi-thread control options +bool HIR2MPLOptions::ProcessNThreads(const maplecl::OptionInterface &numThreads) const { + uint32_t num = numThreads.GetCommonValue(); + FEOptions::GetInstance().SetNThreads(num); + return true; +} + +bool HIR2MPLOptions::ProcessDumpThreadTime(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetDumpThreadTime(true); + return true; +} + +void HIR2MPLOptions::ProcessInputFiles(const std::vector &inputs) const { + FE_INFO_LEVEL(FEOptions::kDumpLevelInfo, "===== Process HIR2MPLOptions::ProcessInputFiles() ====="); + for (const std::string &inputName : inputs) { + FEFileType::FileType type = FEFileType::GetInstance().GetFileTypeByPathName(inputName); + switch (type) { + case FEFileType::kClass: + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "CLASS file detected: %s", inputName.c_str()); + FEOptions::GetInstance().AddInputClassFile(inputName); + break; + case FEFileType::kJar: + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "JAR file detected: %s", inputName.c_str()); + FEOptions::GetInstance().AddInputJarFile(inputName); + break; + case FEFileType::kDex: + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "DEX file detected: %s", inputName.c_str()); + FEOptions::GetInstance().AddInputDexFile(inputName); + break; + case FEFileType::kAST: + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "AST file detected: %s", inputName.c_str()); + FEOptions::GetInstance().AddInputASTFile(inputName); + break; + case FEFileType::kMAST: + FE_INFO_LEVEL(FEOptions::kDumpLevelInfoDetail, "MAST file detected: %s", inputName.c_str()); + FEOptions::GetInstance().AddInputMASTFile(inputName); + break; + default: + WARN(kLncErr, "unsupported file format (%s)", inputName.c_str()); + break; + } + } +} + +// Xbootclasspath +bool HIR2MPLOptions::ProcessXbootclasspath(const maplecl::OptionInterface &xbootclasspath) const { + std::string arg = xbootclasspath.GetCommonValue(); + FEOptions::GetInstance().SetXBootClassPath(arg); + return true; +} + +// PCL +bool HIR2MPLOptions::ProcessClassLoaderContext(const maplecl::OptionInterface &classloadercontext) const { + std::string arg = classloadercontext.GetCommonValue(); + FEOptions::GetInstance().SetClassLoaderContext(arg); + return true; +} + +// Dep +bool HIR2MPLOptions::ProcessCollectDepTypes(const maplecl::OptionInterface &dep) const { + const std::string arg = dep.GetCommonValue(); + if (arg.compare("all") == 0) { + FEOptions::GetInstance().SetModeCollectDepTypes(FEOptions::ModeCollectDepTypes::kAll); + } else if (arg.compare("func") == 0) { + FEOptions::GetInstance().SetModeCollectDepTypes(FEOptions::ModeCollectDepTypes::kFunc); + } else { + ERR(kLncErr, "unsupported options: %s", arg.c_str()); + return false; + } + return true; +} + +// SameNamePolicy +bool HIR2MPLOptions::ProcessDepSameNamePolicy(const maplecl::OptionInterface &depsamename) const { + const std::string arg = depsamename.GetCommonValue(); + if (arg.compare("sys") == 0) { + FEOptions::GetInstance().SetModeDepSameNamePolicy(FEOptions::ModeDepSameNamePolicy::kSys); + } else if (arg.compare("src") == 0) { + FEOptions::GetInstance().SetModeDepSameNamePolicy(FEOptions::ModeDepSameNamePolicy::kSrc); + } else { + ERR(kLncErr, "unsupported options: %s", arg.c_str()); + return false; + } + return true; +} + +// EnhanceC +bool HIR2MPLOptions::ProcessNpeCheckDynamic(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetNpeCheckDynamic(true); + return true; +} + +bool HIR2MPLOptions::ProcessBoundaryCheckDynamic(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetBoundaryCheckDynamic(true); + return true; +} + +bool HIR2MPLOptions::ProcessSafeRegion(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetSafeRegion(true); + // boundary and npe checking options will be opened, if safe region option is opened + FEOptions::GetInstance().SetNpeCheckDynamic(true); + FEOptions::GetInstance().SetBoundaryCheckDynamic(true); + return true; +} + +bool HIR2MPLOptions::ProcessDefaultSafe(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetDefaultSafe(true); + return true; +} + +bool HIR2MPLOptions::ProcessO2(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetO2(false); + return true; +} + +bool HIR2MPLOptions::ProcessSimplifyShortCircuit(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetSimplifyShortCircuit(true); + return true; +} + +bool HIR2MPLOptions::ProcessEnableVariableArray(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetEnableVariableArray(true); + return true; +} + +bool HIR2MPLOptions::ProcessFuncInlineSize(const maplecl::OptionInterface &funcInliceSize) const { + uint32_t size = funcInliceSize.GetCommonValue(); + FEOptions::GetInstance().SetFuncInlineSize(size); + return true; +} + +bool HIR2MPLOptions::ProcessWPAA(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetWPAA(true); + FEOptions::GetInstance().SetFuncInlineSize(UINT32_MAX); + return true; +} + +// AOT +bool HIR2MPLOptions::ProcessAOT(const maplecl::OptionInterface &) const { + FEOptions::GetInstance().SetIsAOT(true); + return true; +} + +template +void HIR2MPLOptions::Split(const std::string &s, char delim, Out result) { + std::stringstream ss; + ss.str(s); + std::string item; + while (std::getline(ss, item, delim)) { + *(result++) = item; + } +} + +std::list HIR2MPLOptions::SplitByComma(const std::string &s) { + std::list results; + HIR2MPLOptions::Split(s, ',', std::back_inserter(results)); + return results; +} +} // namespace maple diff --git a/src/hir2mpl/common/src/simple_xml.cpp b/src/hir2mpl/common/src/simple_xml.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ecb6bab9345b227354ed71d2f688274ea50543ec --- /dev/null +++ b/src/hir2mpl/common/src/simple_xml.cpp @@ -0,0 +1,240 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "simple_xml.h" + +namespace maple { +SimpleXMLElem::SimpleXMLElem(MapleAllocator &alloc, const std::string &cat) + : tagID(kXMLIDNone), + tagValue(kXMLValueNone), + allocator(alloc), + catalog(cat), + id(0), + valueString(""), + content(""), + elems(allocator.Adapter()) { + value.raw = 0; + CHECK_FATAL(allocator.GetMemPool(), "mp is null"); +} + +SimpleXMLElem::SimpleXMLElem(MapleAllocator &alloc, const std::string &cat, uint32 argID) + : tagID(kXMLID), + tagValue(kXMLValueNone), + allocator(alloc), + catalog(cat), + id(argID), + valueString(""), + content(""), + elems(allocator.Adapter()) { + value.raw = 0; + CHECK_FATAL(allocator.GetMemPool(), "mp is null"); +} + +void SimpleXMLElem::DumpImpl(std::ostream &os, const std::string &prefix) const { + DumpHead(os, prefix); + if (elems.size() > 0) { + os << std::endl; + for (auto elem : elems) { + elem->Dump(os, prefix + " "); + } + os << prefix; + } else { + if (content != "") { + os << "\"" << XMLString(content) << "\""; + } + } + DumpTail(os, ""); + os << std::endl; +} + +void SimpleXMLElem::DumpHead(std::ostream &os, const std::string &prefix) const { + os << prefix << "<" << catalog; + if (tagID == kXMLID) { + os << " id=\"" << id << "\""; + } + switch (tagValue) { + case kXMLValueNone: + break; + case kXMLValueI8: + os << " value=\"" << static_cast(value.i8) << "\""; + break; + case kXMLValueU8: + os << " value=\"" << static_cast(value.u8) << "\""; + break; + case kXMLValueI16: + os << " value=\"" << static_cast(value.i16) << "\""; + break; + case kXMLValueU16: + os << " value=\"" << static_cast(value.u16) << "\""; + break; + case kXMLValueI32: + os << " value=\"" << value.i32 << "\""; + break; + case kXMLValueU32: + os << " value=\"" << value.u32 << "\""; + break; + case kXMLValueI64: + os << " value=\"" << value.i64 << "\""; + break; + case kXMLValueU64: + os << " value=\"" << value.u64 << "\""; + break; + case kXMLValueFloat: + os << " value=\"" << value.f << "\""; + break; + case kXMLValueDouble: + os << " value=\"" << value.d << "\""; + break; + case kXMLValueString: + os << " value=\"" << XMLString(valueString) << "\""; + break; + default: + break; + } + os << ">"; +} + +void SimpleXMLElem::DumpTail(std::ostream &os, const std::string &prefix) const { + os << prefix << ""; +} + +void SimpleXMLElem::AddElement(SimpleXMLElem &elem) { + elems.push_back(&elem); +} + +SimpleXMLElem *SimpleXMLElem::AddNewElement(const std::string &cat) { + MemPool *mp = allocator.GetMemPool(); + CHECK_NULL_FATAL(mp); + SimpleXMLElem *elem = mp->New(allocator, cat); + AddElement(*elem); + return elem; +} + +SimpleXMLElem *SimpleXMLElem::AddNewElement(const std::string &cat, uint32 argID) { + MemPool *mp = allocator.GetMemPool(); + CHECK_NULL_FATAL(mp); + SimpleXMLElem *elem = mp->New(allocator, cat, argID); + AddElement(*elem); + return elem; +} + +void SimpleXMLElem::SetValue(int8 v) { + tagValue = kXMLValueI8; + value.i8 = v; +} + +void SimpleXMLElem::SetValue(uint8 v) { + tagValue = kXMLValueU8; + value.u8 = v; +} + +void SimpleXMLElem::SetValue(int16 v) { + tagValue = kXMLValueI16; + value.i16 = v; +} + +void SimpleXMLElem::SetValue(uint16 v) { + tagValue = kXMLValueU16; + value.u16 = v; +} + +void SimpleXMLElem::SetValue(int32 v) { + tagValue = kXMLValueI32; + value.i32 = v; +} + +void SimpleXMLElem::SetValue(uint32 v) { + tagValue = kXMLValueU32; + value.u32 = v; +} + +void SimpleXMLElem::SetValue(int64 v) { + tagValue = kXMLValueI64; + value.i64 = v; +} + +void SimpleXMLElem::SetValue(uint64 v) { + tagValue = kXMLValueU64; + value.u64 = v; +} + +void SimpleXMLElem::SetValue(float v) { + tagValue = kXMLValueFloat; + value.f = v; +} + +void SimpleXMLElem::SetValue(double v) { + tagValue = kXMLValueDouble; + value.d = v; +} + +void SimpleXMLElem::SetValue(const std::string &str) { + tagValue = kXMLValueString; + valueString = str; +} + +void SimpleXMLElem::SetContent(const std::string &str) { + content = str; +} + +std::string SimpleXMLElem::XMLString(const std::string &strIn) { + std::string str(strIn); + size_t npos = str.find("<"); + while (npos != std::string::npos) { + (void)str.replace(npos, 1, "<"); + npos = str.find("<"); + } + npos = str.find(">"); + while (npos != std::string::npos) { + (void)str.replace(npos, 1, ">"); + npos = str.find(">"); + } + return str; +} + +SimpleXMLElemMultiLine::SimpleXMLElemMultiLine(MapleAllocator &alloc, const std::string &cat) + : SimpleXMLElem(alloc, cat), lines(alloc.Adapter()) {} + +SimpleXMLElemMultiLine::SimpleXMLElemMultiLine(MapleAllocator &alloc, const std::string &cat, uint32 argID) + : SimpleXMLElem(alloc, cat, argID), lines(alloc.Adapter()) {} + +void SimpleXMLElemMultiLine::AddLine(const std::string &line) { + lines.emplace_back(MapleString(line, allocator.GetMemPool())); +} + +void SimpleXMLElemMultiLine::DumpImpl(std::ostream &os, const std::string &prefix) const { + DumpHead(os, prefix); + if (lines.size() > 0) { + os << std::endl; + for (const MapleString &str : lines) { + os << prefix << " " << str << std::endl; + } + } + DumpTail(os, prefix); + os << std::endl; +} + +SimpleXML::SimpleXML(MapleAllocator &alloc) : allocator(alloc), roots(allocator.Adapter()) {} + +void SimpleXML::DumpImpl(std::ostream &os) const { + os << "" << std::endl; + for (auto it : roots) { + it->Dump(os, ""); + } +} + +void SimpleXML::AddRoot(SimpleXMLElem &elem) { + roots.push_back(&elem); +} +} // namespace maple diff --git a/src/hir2mpl/common/src/simple_zip.cpp b/src/hir2mpl/common/src/simple_zip.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c0abb9046d438f9987499f5e5d776c00d5c48af3 --- /dev/null +++ b/src/hir2mpl/common/src/simple_zip.cpp @@ -0,0 +1,179 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "simple_zip.h" +#include +#include + +namespace maple { +static const uint32 kZipSigLocalFile = 0x04034B50; +static const uint32 kZipSigDataDescriptor = 0x08074B50; +static const uint32 kZipSigCentralDir = 0x02014B50; + +ZipLocalFileHeader::~ZipLocalFileHeader() { + if (extraField != nullptr) { + free(extraField); + extraField = nullptr; + } +} + +std::unique_ptr ZipLocalFileHeader::Parse(BasicIORead &io) { + std::unique_ptr header = std::make_unique(); + header->signature = io.ReadUInt32(); + if (header->signature != kZipSigLocalFile) { + return std::unique_ptr(nullptr); + } + header->minVersion = io.ReadUInt16(); + header->gpFlag = io.ReadUInt16(); + header->compMethod = io.ReadUInt16(); + header->rawTime = io.ReadUInt16(); + header->rawDate = io.ReadUInt16(); + header->crc32 = io.ReadUInt32(); + header->compSize = io.ReadUInt32(); + header->unCompSize = io.ReadUInt32(); + header->lengthFileName = io.ReadUInt16(); + header->lengthExtraField = io.ReadUInt16(); + header->fileName = io.ReadString(header->lengthFileName); + if (header->lengthExtraField > 0) { + header->extraField = static_cast(malloc(header->lengthExtraField)); + CHECK_NULL_FATAL(header->extraField); + io.ReadBufferUInt8(header->extraField, header->lengthExtraField); + } + return header; +} + +std::unique_ptr ZipDataDescriptor::Parse(BasicIORead &io) { + std::unique_ptr desc = std::make_unique(); + desc->signature = io.ReadUInt32(); + CHECK_FATAL(desc->signature == kZipSigDataDescriptor, "invalid zip file: wrong signature for data descriptor"); + desc->crc32 = io.ReadUInt32(); + desc->compSize = io.ReadUInt32(); + desc->unCompSize = io.ReadUInt32(); + return desc; +} + +ZipLocalFile::~ZipLocalFile() { + if (compData != nullptr) { + free(compData); + compData = nullptr; + } + if (unCompData != nullptr) { + free(unCompData); + unCompData = nullptr; + } +} + +std::unique_ptr ZipLocalFile::Parse(BasicIORead &io) { + std::unique_ptr zf = std::make_unique(); + zf->header = ZipLocalFileHeader::Parse(io); + if (zf->header == nullptr) { + return std::unique_ptr(nullptr); + } + CHECK_FATAL((zf->header->GetGPFlag() & 0x1) == 0, "encrypted file is not supported"); + uint32 posDataStart = io.GetPos(); + uint32 posDataEnd = zf->GetDataEndPos(io); + if (!zf->isCompressed) { + zf->ProcessUncompressedFile(io, posDataStart, posDataEnd); + } else { + zf->ProcessCompressedFile(io, posDataStart, posDataEnd); + } + return zf; +} + +uint32 ZipLocalFile::GetDataEndPos(const BasicIORead &io) { + const uint8 offsetSize = 4; + uint32 posDataStart = io.GetPos(); + uint32 posDataEnd = posDataStart; + const uint8 *buf = io.GetSafeBuffer(offsetSize); + while (posDataEnd + offsetSize < io.GetFileLength()) { + uint32 sig = BasicIOEndian::GetUInt32LittleEndian(buf); + if (sig == kZipSigLocalFile) { + break; + } + if (sig == kZipSigCentralDir) { + break; + } + if (sig == kZipSigDataDescriptor) { + isCompressed = true; + break; + } + ++buf; + ++posDataEnd; + } + CHECK_FATAL(posDataEnd + offsetSize < io.GetFileLength(), "invalid zip file: no data descriptor"); + return posDataEnd; +} + +void ZipLocalFile::ProcessUncompressedFile(BasicIORead &io, uint32 start, uint32 end) { + unCompDataSize = end - start; + if (unCompDataSize > 0) { + unCompData = static_cast(malloc(unCompDataSize)); + CHECK_NULL_FATAL(unCompData); + io.ReadBufferUInt8(unCompData, unCompDataSize); + } +} + +void ZipLocalFile::ProcessCompressedFile(BasicIORead &io, uint32 start, uint32 end) { + uint32 compDataLength = static_cast(end - start); + if (compDataLength == 0) { + return; + } + compData = static_cast(malloc(compDataLength)); + CHECK_NULL_FATAL(compData); + io.ReadBufferUInt8(compData, compDataLength); + dataDesc = ZipDataDescriptor::Parse(io); + CHECK_FATAL(compDataLength == dataDesc->GetCompSize(), "invalid zip file: wrong compsize"); + if (dataDesc->GetUnCompSize() > 0) { + unCompData = static_cast(malloc(dataDesc->GetUnCompSize())); + CHECK_NULL_FATAL(unCompData); + z_stream zs; + zs.zalloc = static_cast(0); + zs.zfree = static_cast(0); + int err = inflateInit2(&zs, -MAX_WBITS); + CHECK_FATAL(err == 0, "inflateInit2 error"); + zs.next_in = compData; + zs.avail_in = dataDesc->GetCompSize(); + zs.total_in = 0; + zs.next_out = unCompData; + zs.avail_out = dataDesc->GetUnCompSize(); + zs.total_out = 0; + err = inflate(&zs, Z_NO_FLUSH); + if (err == Z_STREAM_END) { + err = inflateEnd(&zs); + } + unCompDataSize = dataDesc->GetUnCompSize(); + if (err != Z_OK) { + free(unCompData); + unCompData = nullptr; + unCompDataSize = 0; + CHECK_FATAL(false, "inflate failed"); + } + } +} + +SimpleZip::SimpleZip(BasicIOMapFile &file) : BasicIORead(file, false) {} + +SimpleZip::~SimpleZip() {} + +void SimpleZip::ParseFile() { + while (true) { + std::unique_ptr zf = ZipLocalFile::Parse(*this); + if (zf != nullptr) { + files.push_back(std::move(zf)); + } else { + break; + } + } +} +} // namespace maple diff --git a/src/hir2mpl/optimize/include/conditional_operator.h b/src/hir2mpl/optimize/include/conditional_operator.h new file mode 100644 index 0000000000000000000000000000000000000000..4b5eaa68981d9475f6ba08d9dbf7e072affb88ab --- /dev/null +++ b/src/hir2mpl/optimize/include/conditional_operator.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef CONDITIONAL_OPERATOR_H +#define CONDITIONAL_OPERATOR_H +#include "feir_stmt.h" +#include "ast_expr.h" + +namespace maple { +// To avoid affecting the unified abstraction processing of ast expr, +// redundant temporary variables of conditional statements are processed separately. +class ConditionalOptimize { + public: + static bool DeleteRedundantTmpVar(const UniqueFEIRExpr &expr, std::list &stmts, + const UniqueFEIRVar &var, PrimType dstPty, FieldID fieldID = 0); + static bool DeleteRedundantTmpVar(const UniqueFEIRExpr &expr, std::list &stmts); + static bool IsCompletedConditional(const UniqueFEIRExpr &expr, std::list &stmts); +}; +} // namespace maple +#endif // CONDITIONAL_OPERATOR_H diff --git a/src/hir2mpl/optimize/include/feir_bb.h b/src/hir2mpl/optimize/include/feir_bb.h new file mode 100644 index 0000000000000000000000000000000000000000..1516bccf2fb81e3e7cc010363721f4988ad2d6a8 --- /dev/null +++ b/src/hir2mpl/optimize/include/feir_bb.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_FEIR_BB_H +#define HIR2MPL_FEIR_BB_H +#include +#include "types_def.h" +#include "mempool_allocator.h" +#include "safe_ptr.h" +#include "fe_utils.h" +#include "feir_stmt.h" + +namespace maple { +enum FEIRBBKind : uint8 { + kBBKindDefault, + kBBKindPesudoHead, + kBBKindPesudoTail, + kBBKindExt +}; + +class FEIRBB : public FELinkListNode { + public: + FEIRBB(); + explicit FEIRBB(uint8 argKind); + ~FEIRBB() override; + + uint8 GetBBKind() const { + return kind; + } + + std::string GetBBKindName() const; + + uint32 GetID() const { + return id; + } + + void SetID(uint32 arg) { + id = arg; + } + + const FEIRStmt *GetStmtHead() const { + return stmtHead; + } + + void SetStmtHead(FEIRStmt *stmtHeadIn) { + stmtHead = stmtHeadIn; + } + + const FEIRStmt *GetStmtTail() const { + return stmtTail; + } + + void SetStmtTail(FEIRStmt *stmtTailIn) { + stmtTail = stmtTailIn; + } + + void InsertAndUpdateNewHead(FEIRStmt *newHead) { + stmtHead->InsertBefore(newHead); + stmtHead = newHead; + } + + void InsertAndUpdateNewTail(FEIRStmt *newTail) { + stmtTail->InsertAfter(newTail); + stmtTail = newTail; + } + + const FEIRStmt *GetStmtNoAuxHead() const { + return stmtNoAuxHead; + } + + const FEIRStmt *GetStmtNoAuxTail() const { + return stmtNoAuxTail; + } + + void AddPredBB(FEIRBB *bb) { + if (predBBs.find(bb) == predBBs.end()) { + predBBs.insert(bb); + } + } + + void AddSuccBB(FEIRBB *bb) { + if (succBBs.find(bb) == succBBs.end()) { + succBBs.insert(bb); + } + } + + const std::set &GetPredBBs() const { + return predBBs; + } + + const std::set &GetSuccBBs() const { + return succBBs; + } + + bool IsPredBB(FEIRBB *bb) const { + return predBBs.find(bb) != predBBs.end(); + } + + bool IsSuccBB(FEIRBB *bb) const { + return succBBs.find(bb) != succBBs.end(); + } + + bool IsDead() const { + return predBBs.empty(); + } + + bool IsFallThru() const { + return stmtNoAuxTail->IsFallThru(); + } + + bool IsBranch() const { + return stmtNoAuxTail->IsBranch(); + } + + void SetCheckPointIn(std::unique_ptr argCheckPointIn) { + checkPointIn = std::move(argCheckPointIn); + } + + FEIRStmtCheckPoint &GetCheckPointIn() const { + return *(checkPointIn.get()); + } + + void SetCheckPointOut(std::unique_ptr argCheckPointOut) { + checkPointOut = std::move(argCheckPointOut); + } + + FEIRStmtCheckPoint &GetCheckPointOut() const { + return *(checkPointOut.get());; + } + + void AddCheckPoint(std::unique_ptr checkPoint) { + checkPoints.emplace_back(std::move(checkPoint)); + } + + FEIRStmtCheckPoint *GetLatestCheckPoint() const { + if (checkPoints.empty()) { + return nullptr; + } + return checkPoints.back().get(); + } + + const std::vector> &GetCheckPoints() const { + return checkPoints; + } + + void AppendStmt(FEIRStmt *stmt); + void AddStmtAuxPre(FEIRStmt *stmt); + void AddStmtAuxPost(FEIRStmt *stmt); + bool IsPredBB(uint32 bbID); + bool IsSuccBB(uint32 bbID); + virtual void Dump() const; + + protected: + uint8 kind; + uint32 id; + FEIRStmt *stmtHead; + FEIRStmt *stmtTail; + FEIRStmt *stmtNoAuxHead; + FEIRStmt *stmtNoAuxTail; + std::set predBBs; + std::set succBBs; + + private: + std::unique_ptr checkPointIn; + std::vector> checkPoints; + std::unique_ptr checkPointOut; + std::map feirStmtCheckPointMap; +}; +} // namespace maple +#endif // HIR2MPL_FEIR_BB_H \ No newline at end of file diff --git a/src/hir2mpl/optimize/include/feir_cfg.h b/src/hir2mpl/optimize/include/feir_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..c103f7b36c8dec7bf305c70d22bf3162b209a9eb --- /dev/null +++ b/src/hir2mpl/optimize/include/feir_cfg.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_FEIR_CFG_H +#define HIR2MPL_FEIR_CFG_H +#include +#include +#include "feir_bb.h" + +namespace maple { +class FEIRCFG { + public: + FEIRCFG(FEIRStmt *argStmtHead, FEIRStmt *argStmtTail); + + virtual ~FEIRCFG() { + currBBNode = nullptr; + stmtHead = nullptr; + stmtTail = nullptr; + } + + void Init(); + void BuildBB(); + bool BuildCFG(); + void GenerateCFG(); + const FEIRBB *GetHeadBB(); + const FEIRBB *GetNextBB(); + void LabelStmtID() const; + void LabelBBID() const; + bool HasDeadBB() const; + void DumpBBs(); + void DumpCFGGraph(std::ofstream &file); + void DumpCFGGraphForBB(std::ofstream &file, const FEIRBB &bb) const; + void DumpCFGGraphForEdge(std::ofstream &file); + + FEIRBB *GetDummyHead() const { + return bbHead.get(); + } + + FEIRBB *GetDummyTail() const { + return bbTail.get(); + } + + std::unique_ptr NewFEIRBB() const { + return std::make_unique(); + } + + bool IsGeneratedCFG() const { + return isGeneratedCFG; + } + + LLT_PRIVATE: + void AppendAuxStmt(); + FEIRBB *NewBBAppend(); + + bool isGeneratedCFG = false; + FEIRStmt *stmtHead; + FEIRStmt *stmtTail; + FELinkListNode *currBBNode = nullptr; + std::list> listBB; + std::unique_ptr bbHead; + std::unique_ptr bbTail; +}; +} // namespace maple +#endif // HIR2MPL_FEIR_CFG_H \ No newline at end of file diff --git a/src/hir2mpl/optimize/include/feir_dfg.h b/src/hir2mpl/optimize/include/feir_dfg.h new file mode 100644 index 0000000000000000000000000000000000000000..78de62d78b667807a4d443e2d98ab9447a27f487 --- /dev/null +++ b/src/hir2mpl/optimize/include/feir_dfg.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_FEIR_DFG_H +#define HIR2MPL_INCLUDE_FEIR_DFG_H +#include "feir_var.h" + +namespace maple { +// FEIRUseDefChain key is use, value is def set +using FEIRUseDefChain = std::map>; +// FEIRUseDefChain key is def, value is use set +using FEIRDefUseChain = std::map>; + +class FEIRDFG { + public: + FEIRDFG() = default; + ~FEIRDFG() = default; + void CalculateDefUseByUseDef(FEIRDefUseChain &mapDefUse, const FEIRUseDefChain &mapUseDef) const; + void CalculateUseDefByDefUse(FEIRUseDefChain &mapUseDef, const FEIRDefUseChain &mapDefUse) const; + void BuildFEIRUDDU(); + void OutputUseDefChain(); + void OutputDefUseChain(); + + private: + FEIRUseDefChain useDefChain; + FEIRDefUseChain defUseChain; +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_FEIR_DFG_H \ No newline at end of file diff --git a/src/hir2mpl/optimize/include/feir_lower.h b/src/hir2mpl/optimize/include/feir_lower.h new file mode 100644 index 0000000000000000000000000000000000000000..9dc6db1bf41278dbc8c53060a860574ecb0ae54d --- /dev/null +++ b/src/hir2mpl/optimize/include/feir_lower.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef HIR2MPL_FEIR_LOWER_H +#define HIR2MPL_FEIR_LOWER_H +#include "feir_stmt.h" + +namespace maple { +class FEFunction; // circular dependency exists, no other choice +class FEIRLower { + public: + explicit FEIRLower(FEFunction &funcIn); + void LowerFunc(); + void LowerStmt(FEIRStmt *stmt, FEIRStmt *ptrTail); + void LowerStmt(const std::list &stmts, FEIRStmt *ptrTail); + + FEIRStmt *GetlowerStmtHead() { + return lowerStmtHead; + } + + FEIRStmt *GetlowerStmtTail() { + return lowerStmtTail; + } + + private: + void Init(); + void Clear(); + FEIRStmt *CreateHeadAndTail(); + FEIRStmt *RegisterAuxFEIRStmt(UniqueFEIRStmt stmt); + FEIRStmt *RegisterAndInsertFEIRStmt(UniqueFEIRStmt stmt, FEIRStmt *ptrTail, const Loc loc = {0, 0, 0}); + void LowerIfStmt(FEIRStmtIf &ifStmt, FEIRStmt *ptrTail); + void ProcessLoopStmt(FEIRStmtDoWhile &stmt, FEIRStmt *ptrTail); + void LowerWhileStmt(const FEIRStmtDoWhile &whileStmt, FEIRStmt *bodyHead, FEIRStmt *bodyTail, FEIRStmt *ptrTail); + void LowerDoWhileStmt(const FEIRStmtDoWhile &doWhileStmt, FEIRStmt *bodyHead, FEIRStmt *bodyTail, FEIRStmt *ptrTail); + void CreateAndInsertCondStmt(Opcode op, const FEIRStmtIf &ifStmt, FEIRStmt *head, FEIRStmt *tail, FEIRStmt *ptrTail); + + FEFunction &func; + FEIRStmt *lowerStmtHead; + FEIRStmt *lowerStmtTail; + std::list auxFEIRStmtList; // auxiliary feir stmt list +}; +} // namespace maple +#endif // HIR2MPL_FEIR_LOWER_H diff --git a/src/hir2mpl/optimize/include/ror.h b/src/hir2mpl/optimize/include/ror.h new file mode 100644 index 0000000000000000000000000000000000000000..e582c6ac9de647c74a67bd0c3ca8c30665dc8aa4 --- /dev/null +++ b/src/hir2mpl/optimize/include/ror.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef OPTIMIZE_INCLUDE_ROR_H +#define OPTIMIZE_INCLUDE_ROR_H +#include "feir_stmt.h" +#include "opcodes.h" + +namespace maple { +class Ror { + public: + Ror(Opcode opIn, const UniqueFEIRExpr &left, const UniqueFEIRExpr &right) + : op(opIn), lExpr(left), rExpr(right) {} + ~Ror() = default; + UniqueFEIRExpr Emit2FEExpr(); + + private: + bool CheckBaseExpr() const; + bool GetConstVal(const UniqueFEIRExpr &expr); + bool IsRorShlOpnd(const UniqueFEIRExpr &expr); + bool IsRorLshrOpnd(const UniqueFEIRExpr &expr, bool inShl); + + Opcode op; + const UniqueFEIRExpr &lExpr; + const UniqueFEIRExpr &rExpr; + UniqueFEIRExpr rShiftBaseExpr; + UniqueFEIRExpr lShiftBaseExpr; + uint64 constVal = 0; + uint32 bitWidth = 0; +}; +} // namespace maple +#endif // OPTIMIZE_INCLUDE_ROR_H \ No newline at end of file diff --git a/src/hir2mpl/optimize/src/conditional_operator.cpp b/src/hir2mpl/optimize/src/conditional_operator.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0995d38fe2f853cea385d689a8e2559f6cf81652 --- /dev/null +++ b/src/hir2mpl/optimize/src/conditional_operator.cpp @@ -0,0 +1,93 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "conditional_operator.h" +#include "feir_builder.h" + +namespace maple { +bool ConditionalOptimize::IsCompletedConditional(const UniqueFEIRExpr &expr, std::list &stmts) { + if (FEOptions::GetInstance().IsNpeCheckDynamic() || FEOptions::GetInstance().IsBoundaryCheckDynamic()) { + return false; + } + + if (expr == nullptr || expr->GetKind() != kExprTernary) { + return false; + } + + if (stmts.empty() || stmts.back()->GetKind() != kStmtIf) { + return false; + } + + FEIRStmtIf *ifStmt = static_cast(stmts.back().get()); + if (ifStmt->GetThenStmt().empty() || ifStmt->GetElseStmt().empty()) { + return false; + } + + return true; +} + +bool ConditionalOptimize::DeleteRedundantTmpVar(const UniqueFEIRExpr &expr, std::list &stmts, + const UniqueFEIRVar &var, PrimType dstPty, FieldID fieldID) { + if (!IsCompletedConditional(expr, stmts)) { + return false; + } + + auto ReplaceBackStmt = [dstPty, fieldID, &var](std::list &stmts, const FEIRStmt &srcStmt) { + auto dassignStmt = static_cast(stmts.back().get()); + UniqueFEIRExpr srcExpr = dassignStmt->GetExpr()->Clone(); + PrimType srcPty = srcExpr->GetPrimType(); + if (srcPty != dstPty && srcPty != PTY_agg && srcPty != PTY_void) { + PrimType newDstPty = dstPty; + if (srcPty == PTY_f32 || srcPty == PTY_f64) { + if (dstPty == PTY_u8 || dstPty == PTY_u16) { + newDstPty = PTY_u32; + } + if (dstPty == PTY_i8 || dstPty == PTY_i16) { + newDstPty = PTY_i32; + } + } + srcExpr = FEIRBuilder::CreateExprCastPrim(std::move(srcExpr), newDstPty); + } + + auto stmt = std::make_unique(var->Clone(), srcExpr->Clone(), fieldID); + stmt->SetSrcLoc(srcStmt.GetSrcLoc()); + stmts.pop_back(); + stmts.emplace_back(std::move(stmt)); + }; + + FEIRStmtIf *ifStmt = static_cast(stmts.back().get()); + ReplaceBackStmt(ifStmt->GetThenStmt(), *ifStmt); + ReplaceBackStmt(ifStmt->GetElseStmt(), *ifStmt); + return true; +} + +bool ConditionalOptimize::DeleteRedundantTmpVar(const UniqueFEIRExpr &expr, std::list &stmts) { + if (!IsCompletedConditional(expr, stmts)) { + return false; + } + + auto ReplaceBackStmt = [](std::list &stmts, const FEIRStmt &srcStmt) { + auto dassignStmt = static_cast(stmts.back().get()); + auto stmt = std::make_unique(dassignStmt->GetExpr()->Clone()); + stmt->SetSrcLoc(srcStmt.GetSrcLoc()); + stmts.pop_back(); + stmts.emplace_back(std::move(stmt)); + }; + + FEIRStmtIf *ifStmt = static_cast(stmts.back().get()); + ReplaceBackStmt(ifStmt->GetThenStmt(), *ifStmt); + ReplaceBackStmt(ifStmt->GetElseStmt(), *ifStmt); + return true; +} +} \ No newline at end of file diff --git a/src/hir2mpl/optimize/src/feir_bb.cpp b/src/hir2mpl/optimize/src/feir_bb.cpp new file mode 100644 index 0000000000000000000000000000000000000000..42786149d5765d210bd7e546c9cf84011d1cd54e --- /dev/null +++ b/src/hir2mpl/optimize/src/feir_bb.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_bb.h" + +namespace maple { +FEIRBB::FEIRBB(uint8 argKind) + : kind(argKind), + id(0), + stmtHead(nullptr), + stmtTail(nullptr), + stmtNoAuxHead(nullptr), + stmtNoAuxTail(nullptr) {} + +FEIRBB::FEIRBB() + : FEIRBB(FEIRBBKind::kBBKindDefault) {} + +FEIRBB::~FEIRBB() { + stmtHead = nullptr; + stmtTail = nullptr; + stmtNoAuxHead = nullptr; + stmtNoAuxTail = nullptr; +} + +void FEIRBB::AppendStmt(FEIRStmt *stmt) { + if (stmtHead == nullptr) { + stmtHead = stmt; + } + stmtTail = stmt; + if (!stmt->IsAux()) { + if (stmtNoAuxHead == nullptr) { + stmtNoAuxHead = stmt; + } + stmtNoAuxTail = stmt; + } +} + +void FEIRBB::AddStmtAuxPre(FEIRStmt *stmt) { + if (!stmt->IsAuxPre()) { + return; + } + stmtHead = stmt; +} + +void FEIRBB::AddStmtAuxPost(FEIRStmt *stmt) { + if (!stmt->IsAuxPost()) { + return; + } + stmtTail = stmt; +} + +bool FEIRBB::IsPredBB(uint32 bbID) { + for (FEIRBB *bb : predBBs) { + if (bb->GetID() == bbID) { + return true; + } + } + return false; +} + +bool FEIRBB::IsSuccBB(uint32 bbID) { + for (FEIRBB *bb : succBBs) { + if (bb->GetID() == bbID) { + return true; + } + } + return false; +} + +void FEIRBB::Dump() const { + std::cout << "FEIRBB (id=" << id << ", kind=" << GetBBKindName() << + ", preds={"; + for (FEIRBB *bb : predBBs) { + std::cout << bb->GetID() << " "; + } + std::cout << "}, succs={"; + for (FEIRBB *bb : succBBs) { + std::cout << bb->GetID() << " "; + } + std::cout << "})" << std::endl; + FELinkListNode *nodeStmt = stmtHead; + while (nodeStmt != nullptr) { + FEIRStmt *stmt = static_cast(nodeStmt); + stmt->Dump(" "); + if (nodeStmt == stmtTail) { + return; + } + nodeStmt = nodeStmt->GetNext(); + } +} + +std::string FEIRBB::GetBBKindName() const { + switch (kind) { + case kBBKindDefault: + return "Default"; + case kBBKindPesudoHead: + return "PesudoHead"; + case kBBKindPesudoTail: + return "PesudoTail"; + default: + return "unknown"; + } +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/optimize/src/feir_cfg.cpp b/src/hir2mpl/optimize/src/feir_cfg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3ddba5d23966a41b502ec4728a9608607fd0be76 --- /dev/null +++ b/src/hir2mpl/optimize/src/feir_cfg.cpp @@ -0,0 +1,252 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_cfg.h" +#include +#include "mpl_logging.h" + +namespace maple { +FEIRCFG::FEIRCFG(FEIRStmt *argStmtHead, FEIRStmt *argStmtTail) + : stmtHead(argStmtHead), stmtTail(argStmtTail) {} + +void FEIRCFG::Init() { + bbHead = std::make_unique(kBBKindPesudoHead); + bbTail = std::make_unique(kBBKindPesudoTail); + bbHead->SetNext(bbTail.get()); + bbTail->SetPrev(bbHead.get()); +} + +void FEIRCFG::GenerateCFG() { + if (isGeneratedCFG) { + return; + } + Init(); + LabelStmtID(); + BuildBB(); + BuildCFG(); + LabelBBID(); +} + +void FEIRCFG::BuildBB() { + FELinkListNode *nodeStmt = stmtHead->GetNext(); + FEIRBB *currBB = nullptr; + while (nodeStmt != nullptr && nodeStmt != stmtTail) { + FEIRStmt *stmt = static_cast(nodeStmt); + if (!stmt->IsAux()) { + // check start of BB + if (currBB == nullptr || !stmt->GetExtraPreds().empty()) { + currBB = NewBBAppend(); + bbTail->InsertBefore(currBB); + } + CHECK_FATAL(currBB != nullptr, "nullptr check of currBB"); + currBB->AppendStmt(stmt); + // check end of BB + if (!stmt->IsFallThru() || !stmt->GetExtraSuccs().empty()) { + currBB = nullptr; + } + } + nodeStmt = nodeStmt->GetNext(); + } + + AppendAuxStmt(); +} + +void FEIRCFG::AppendAuxStmt() { + FELinkListNode *nodeBB = bbHead->GetNext(); + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + FEIRBB *bb = static_cast(nodeBB); + // add pre + FELinkListNode *nodeStmt = bb->GetStmtHead()->GetPrev(); + while (nodeStmt != nullptr) { + FEIRStmt *stmt = static_cast(nodeStmt); + if (stmt->IsAuxPre()) { + bb->AddStmtAuxPre(stmt); + } else { + break; + } + nodeStmt = nodeStmt->GetPrev(); + } + // add post + nodeStmt = bb->GetStmtTail()->GetNext(); + while (nodeStmt != nullptr) { + FEIRStmt *stmt = static_cast(nodeStmt); + if (stmt->IsAuxPost()) { + bb->AddStmtAuxPost(stmt); + } else { + break; + } + nodeStmt = nodeStmt->GetNext(); + } + nodeBB = nodeBB->GetNext(); + } +} + +FEIRBB *FEIRCFG::NewBBAppend() { + std::unique_ptr bbNew = NewFEIRBB(); + ASSERT(bbNew != nullptr, "nullptr check for bbNew"); + listBB.push_back(std::move(bbNew)); + return listBB.back().get(); +} + +bool FEIRCFG::BuildCFG() { + // build target map + std::map mapTargetStmtBB; + FELinkListNode *nodeBB = bbHead->GetNext(); + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + FEIRBB *bb = static_cast(nodeBB); + const FEIRStmt *locStmtHead = bb->GetStmtNoAuxHead(); + if (locStmtHead != nullptr) { // Additional conditions need to be added + mapTargetStmtBB[locStmtHead] = bb; + } + nodeBB = nodeBB->GetNext(); + } + // link + nodeBB = bbHead->GetNext(); + bool firstBB = true; + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + FEIRBB *bb = static_cast(nodeBB); + if (firstBB) { + bb->AddPredBB(bbHead.get()); + bbHead->AddSuccBB(bb); + firstBB = false; + } + const FEIRStmt *locStmtTail = bb->GetStmtNoAuxTail(); + CHECK_FATAL(locStmtTail != nullptr, "stmt tail is nullptr"); + if (locStmtTail->IsFallThru()) { + FELinkListNode *nodeBBNext = nodeBB->GetNext(); + if (nodeBBNext == nullptr || nodeBBNext == bbTail.get()) { + ERR(kLncErr, "Method without return"); + return false; + } + FEIRBB *bbNext = static_cast(nodeBBNext); + bb->AddSuccBB(bbNext); + bbNext->AddPredBB(bb); + } + for (FEIRStmt *stmt : locStmtTail->GetExtraSuccs()) { + std::map::const_iterator itBB = mapTargetStmtBB.find(stmt); + CHECK_FATAL(itBB != mapTargetStmtBB.cend(), "Target BB is not found"); + FEIRBB *bbNext = itBB->second; + bb->AddSuccBB(bbNext); + bbNext->AddPredBB(bb); + } + nodeBB = nodeBB->GetNext(); + } + isGeneratedCFG = true; + return isGeneratedCFG; +} + +const FEIRBB *FEIRCFG::GetHeadBB() { + currBBNode = bbHead->GetNext(); + if (currBBNode == bbTail.get()) { + return nullptr; + } + return static_cast(currBBNode); +} + +const FEIRBB *FEIRCFG::GetNextBB() { + currBBNode = currBBNode->GetNext(); + if (currBBNode == bbTail.get()) { + return nullptr; + } + return static_cast(currBBNode); +} + +void FEIRCFG::LabelStmtID() const { + FELinkListNode *nodeStmt = stmtHead; + uint32 idx = 0; + while (nodeStmt != nullptr) { + FEIRStmt *stmt = static_cast(nodeStmt); + stmt->SetID(idx); + idx++; + nodeStmt = nodeStmt->GetNext(); + } +} + +void FEIRCFG::LabelBBID() const { + FELinkListNode *nodeBB = bbHead.get(); + uint32 idx = 0; + while (nodeBB != nullptr) { + FEIRBB *bb = static_cast(nodeBB); + bb->SetID(idx); + idx++; + nodeBB = nodeBB->GetNext(); + } +} + +bool FEIRCFG::HasDeadBB() const { + FELinkListNode *nodeBB = bbHead->GetNext(); + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + FEIRBB *bb = static_cast(nodeBB); + if (bb->IsDead()) { + return true; + } + nodeBB = nodeBB->GetNext(); + } + return false; +} + +void FEIRCFG::DumpBBs() { + FELinkListNode *nodeBB = bbHead->GetNext(); + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + FEIRBB *bb = static_cast(nodeBB); + bb->Dump(); + nodeBB = nodeBB->GetNext(); + } +} + +void FEIRCFG::DumpCFGGraph(std::ofstream &file) { + FELinkListNode *nodeBB = bbHead->GetNext(); + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + FEIRBB *bb = static_cast(nodeBB); + DumpCFGGraphForBB(file, *bb); + nodeBB = nodeBB->GetNext(); + } + DumpCFGGraphForEdge(file); + file << "}" << std::endl; +} + +void FEIRCFG::DumpCFGGraphForBB(std::ofstream &file, const FEIRBB &bb) const { + file << " BB" << bb.GetID() << " [shape=record,label=\"{\n"; + const FELinkListNode *nodeStmt = bb.GetStmtHead(); + while (nodeStmt != nullptr) { + const FEIRStmt *stmt = static_cast(nodeStmt); + file << " " << stmt->DumpDotString(); + if (nodeStmt == bb.GetStmtTail()) { + file << "\n"; + break; + } else { + file << " |\n"; + } + nodeStmt = nodeStmt->GetNext(); + } + file << " }\"];\n"; +} + +void FEIRCFG::DumpCFGGraphForEdge(std::ofstream &file) { + file << " subgraph cfg_edges {\n"; + file << " edge [color=\"#000000\",weight=0.3,len=3];\n"; + const FELinkListNode *nodeBB = bbHead->GetNext(); + while (nodeBB != nullptr && nodeBB != bbTail.get()) { + const FEIRBB *bb = static_cast(nodeBB); + const FEIRStmt *stmtS = bb->GetStmtTail(); + for (FEIRBB *bbNext : bb->GetSuccBBs()) { + const FEIRStmt *stmtE = bbNext->GetStmtHead(); + file << " BB" << bb->GetID() << ":stmt" << stmtS->GetID() << " -> "; + file << "BB" << bbNext->GetID() << ":stmt" << stmtE->GetID() << "\n"; + } + nodeBB = nodeBB->GetNext(); + } + file << " }\n"; +} +} // namespace maple diff --git a/src/hir2mpl/optimize/src/feir_dfg.cpp b/src/hir2mpl/optimize/src/feir_dfg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2513b15765931c08918d4d028ece0553a21d2fdb --- /dev/null +++ b/src/hir2mpl/optimize/src/feir_dfg.cpp @@ -0,0 +1,83 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_dfg.h" + +namespace maple { +void FEIRDFG::CalculateDefUseByUseDef(FEIRDefUseChain &mapDefUse, const FEIRUseDefChain &mapUseDef) const { + mapDefUse.clear(); + for (auto &it : mapUseDef) { + for (UniqueFEIRVar *def : it.second) { + if (mapDefUse[def].find(it.first) == mapDefUse[def].end()) { + mapDefUse[def].insert(it.first); + } + } + } +} + +void FEIRDFG::CalculateUseDefByDefUse(FEIRUseDefChain &mapUseDef, const FEIRDefUseChain &mapDefUse) const { + mapUseDef.clear(); + for (auto &it : mapDefUse) { + for (UniqueFEIRVar *use : it.second) { + if (mapUseDef[use].find(it.first) == mapUseDef[use].end()) { + mapUseDef[use].insert(it.first); + } + } + } +} + +void FEIRDFG::BuildFEIRUDDU() { + CalculateDefUseByUseDef(defUseChain, useDefChain); // build Def-Use Chain +} + +void FEIRDFG::OutputUseDefChain() { + std::cout << "useDefChain : {" << std::endl; + FEIRUseDefChain::const_iterator it = useDefChain.cbegin(); + while (it != useDefChain.cend()) { + UniqueFEIRVar *use = it->first; + std::cout << " use : " << (*use)->GetNameRaw() << "_" << GetPrimTypeName((*use)->GetType()->GetPrimType()); + std::cout << " defs : ["; + const std::set &defs = it->second; + for (UniqueFEIRVar *def : defs) { + std::cout << (*def)->GetNameRaw() << "_" << GetPrimTypeName((*def)->GetType()->GetPrimType()) << ", "; + } + if (defs.size() == 0) { + std::cout << "empty defs"; + } + std::cout << " ]" << std::endl; + it++; + } + std::cout << "}" << std::endl; +} + +void FEIRDFG::OutputDefUseChain() { + std::cout << "defUseChain : {" << std::endl; + FEIRDefUseChain::const_iterator it = defUseChain.cbegin(); + while (it != defUseChain.cend()) { + UniqueFEIRVar *def = it->first; + std::cout << " def : " << (*def)->GetNameRaw() << "_" << GetPrimTypeName((*def)->GetType()->GetPrimType()); + std::cout << " uses : ["; + const std::set &uses = it->second; + for (UniqueFEIRVar *use : uses) { + std::cout << (*use)->GetNameRaw() << "_" << GetPrimTypeName((*use)->GetType()->GetPrimType()) << ", "; + } + if (uses.size() == 0) { + std::cout << "empty uses"; + } + std::cout << " ]" << std::endl; + it++; + } + std::cout << "}" << std::endl; +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/optimize/src/feir_lower.cpp b/src/hir2mpl/optimize/src/feir_lower.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d732b97468698c187e1cce79efd0a4bb49102810 --- /dev/null +++ b/src/hir2mpl/optimize/src/feir_lower.cpp @@ -0,0 +1,244 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_lower.h" +#include "fe_function.h" +#include "feir_builder.h" + +namespace maple { +FEIRLower::FEIRLower(FEFunction &funcIn) : func(funcIn) { + Init(); +} + +void FEIRLower::Init() { + lowerStmtHead = func.RegisterFEIRStmt(std::make_unique(FEIRNodeKind::kStmtPesudoFuncStart)); + lowerStmtTail = func.RegisterFEIRStmt(std::make_unique(FEIRNodeKind::kStmtPesudoFuncEnd)); + lowerStmtHead->SetNext(lowerStmtTail); + lowerStmtTail->SetPrev(lowerStmtHead); +} + +void FEIRLower::Clear() { + auxFEIRStmtList.clear(); +} + +FEIRStmt *FEIRLower::RegisterAuxFEIRStmt(UniqueFEIRStmt stmt) { + auxFEIRStmtList.push_back(std::move(stmt)); + return auxFEIRStmtList.back().get(); +} + +FEIRStmt *FEIRLower::CreateHeadAndTail() { + FEIRStmt *head = RegisterAuxFEIRStmt(std::make_unique(FEIRNodeKind::kStmtPesudoHead)); + FEIRStmt *tail = RegisterAuxFEIRStmt(std::make_unique(FEIRNodeKind::kStmtPesudoTail)); + head->SetNext(tail); + tail->SetPrev(head); + return head; +} + +FEIRStmt *FEIRLower::RegisterAndInsertFEIRStmt(UniqueFEIRStmt stmt, FEIRStmt *ptrTail, const Loc loc) { + stmt->SetSrcLoc(loc); + FEIRStmt *prtStmt = func.RegisterFEIRStmt(std::move(stmt)); + ptrTail->InsertBefore(prtStmt); + return prtStmt; +} + +void FEIRLower::LowerFunc() { + FELinkListNode *nodeStmt = func.GetFEIRStmtHead()->GetNext(); + if (nodeStmt != func.GetFEIRStmtTail()) { + LowerStmt(static_cast(nodeStmt), lowerStmtTail); + } + Clear(); +} + +void FEIRLower::LowerStmt(const std::list &stmts, FEIRStmt *ptrTail) { + FEIRStmt *tmpHead = CreateHeadAndTail(); + FEIRStmt *tmpTail = static_cast(tmpHead->GetNext()); + for (auto &stmt : stmts) { + tmpTail->InsertBefore(stmt.get()); + } + LowerStmt(static_cast(tmpHead->GetNext()), ptrTail); +} + +void FEIRLower::LowerStmt(FEIRStmt *stmt, FEIRStmt *ptrTail) { + FEIRStmt *nextStmt = stmt; + do { + stmt = nextStmt; + nextStmt = static_cast(stmt->GetNext()); + switch (stmt->GetKind()) { + case kStmtIf: + LowerIfStmt(*static_cast(stmt), ptrTail); + break; + case kStmtDoWhile: + ProcessLoopStmt(*static_cast(stmt), ptrTail); + break; + case kStmtPesudoTail: + case kStmtPesudoFuncEnd: + return; + default: + ptrTail->InsertBefore(stmt); + break; + } + } while (nextStmt != nullptr); +} + +void FEIRLower::LowerIfStmt(FEIRStmtIf &ifStmt, FEIRStmt *ptrTail) { + FEIRStmt *thenHead = nullptr; + FEIRStmt *thenTail = nullptr; + FEIRStmt *elseHead = nullptr; + FEIRStmt *elseTail = nullptr; + if (!ifStmt.GetThenStmt().empty()) { + thenHead = CreateHeadAndTail(); + thenTail = static_cast(thenHead->GetNext()); + LowerStmt(ifStmt.GetThenStmt(), thenTail); + } + if (!ifStmt.GetElseStmt().empty()) { + elseHead = CreateHeadAndTail(); + elseTail = static_cast(elseHead->GetNext()); + LowerStmt(ifStmt.GetElseStmt(), elseTail); + } + if (ifStmt.GetThenStmt().empty() && ifStmt.GetElseStmt().empty()) { + // eval statement + std::list feExprs; + feExprs.emplace_back(ifStmt.GetCondExpr()->Clone()); + (void)RegisterAndInsertFEIRStmt( + std::make_unique(OP_eval, std::move(feExprs)), ptrTail, ifStmt.GetSrcLoc()); + } else if (ifStmt.GetElseStmt().empty()) { + // brfalse + // + // label + CreateAndInsertCondStmt(OP_brfalse, ifStmt, thenHead, thenTail, ptrTail); + } else if (ifStmt.GetThenStmt().empty()) { + // brtrue + // + // label + CreateAndInsertCondStmt(OP_brtrue, ifStmt, elseHead, elseTail, ptrTail); + } else { + // brfalse + // + // goto + // label + // + // label + std::string elseName = FEUtils::CreateLabelName(); + UniqueFEIRStmt condFEStmt = std::make_unique( + ifStmt.GetCondExpr()->Clone(), OP_brfalse, elseName); + auto condStmt = RegisterAndInsertFEIRStmt(std::move(condFEStmt), ptrTail, ifStmt.GetSrcLoc()); + // + FELinkListNode::SpliceNodes(thenHead, thenTail, ptrTail); + // goto + std::string endName = FEUtils::CreateLabelName(); + auto gotoStmt = RegisterAndInsertFEIRStmt(FEIRBuilder::CreateStmtGoto(endName), ptrTail); + // label + auto elseLabelStmt = RegisterAndInsertFEIRStmt(std::make_unique(elseName), ptrTail); + // + FELinkListNode::SpliceNodes(elseHead, elseTail, ptrTail); + // label + auto endLabelStmt = RegisterAndInsertFEIRStmt(std::make_unique(endName), ptrTail); + // link bb + condStmt->AddExtraSucc(*elseLabelStmt); + elseLabelStmt->AddExtraPred(*condStmt); + gotoStmt->AddExtraSucc(*endLabelStmt); + endLabelStmt->AddExtraPred(*gotoStmt); + } +} + +// for/dowhile/while stmts +void FEIRLower::ProcessLoopStmt(FEIRStmtDoWhile &stmt, FEIRStmt *ptrTail) { + FEIRStmt *bodyHead = nullptr; + FEIRStmt *bodyTail = nullptr; + if (!stmt.GetBodyStmts().empty()) { + bodyHead = CreateHeadAndTail(); + bodyTail = static_cast(bodyHead->GetNext()); + LowerStmt(stmt.GetBodyStmts(), bodyTail); + } + // for/while + if (stmt.GetOpcode() == OP_while) { + return LowerWhileStmt(stmt, bodyHead, bodyTail, ptrTail); + } + // dowhile + if (stmt.GetOpcode() == OP_dowhile) { + return LowerDoWhileStmt(stmt, bodyHead, bodyTail, ptrTail); + } +} + +/* + * while + * is lowered to : + * label + * brfalse + * + * goto + * label + */ +void FEIRLower::LowerWhileStmt(const FEIRStmtDoWhile &whileStmt, FEIRStmt *bodyHead, + FEIRStmt *bodyTail, FEIRStmt *ptrTail) { + std::string whileLabelName = FEUtils::CreateLabelName(); + // label + auto whileLabelStmt = RegisterAndInsertFEIRStmt(std::make_unique(whileLabelName), ptrTail); + std::string endLabelName = FEUtils::CreateLabelName(); + // brfalse + UniqueFEIRStmt condFEStmt = std::make_unique( + whileStmt.GetCondExpr()->Clone(), OP_brfalse, endLabelName); + auto condStmt = RegisterAndInsertFEIRStmt(std::move(condFEStmt), ptrTail, whileStmt.GetSrcLoc()); + if (bodyHead != nullptr && bodyTail != nullptr) { + // + FELinkListNode::SpliceNodes(bodyHead, bodyTail, ptrTail); + } + // goto + auto gotoStmt = RegisterAndInsertFEIRStmt(FEIRBuilder::CreateStmtGoto(whileLabelName), ptrTail); + // label + auto endLabelStmt = RegisterAndInsertFEIRStmt(std::make_unique(endLabelName), ptrTail); + // link bb + condStmt->AddExtraSucc(*endLabelStmt); + endLabelStmt->AddExtraPred(*condStmt); + gotoStmt->AddExtraSucc(*whileLabelStmt); + whileLabelStmt->AddExtraPred(*gotoStmt); +} + +/* + * dowhile + * is lowered to: + * label + * + * brtrue + */ +void FEIRLower::LowerDoWhileStmt(const FEIRStmtDoWhile &doWhileStmt, FEIRStmt *bodyHead, + FEIRStmt *bodyTail, FEIRStmt *ptrTail) { + std::string bodyLabelName = FEUtils::CreateLabelName(); + // label + auto bodyLabelStmt = RegisterAndInsertFEIRStmt(std::make_unique(bodyLabelName), ptrTail); + if (bodyHead != nullptr && bodyTail != nullptr) { + // + FELinkListNode::SpliceNodes(bodyHead, bodyTail, ptrTail); + } + // brtrue + UniqueFEIRStmt condFEStmt = std::make_unique( + doWhileStmt.GetCondExpr()->Clone(), OP_brtrue, bodyLabelName); + auto condStmt = RegisterAndInsertFEIRStmt(std::move(condFEStmt), ptrTail, doWhileStmt.GetSrcLoc()); + // link bb + condStmt->AddExtraSucc(*bodyLabelStmt); + bodyLabelStmt->AddExtraPred(*condStmt); +} + +void FEIRLower::CreateAndInsertCondStmt(Opcode op, const FEIRStmtIf &ifStmt, + FEIRStmt *head, FEIRStmt *tail, FEIRStmt *ptrTail) { + std::string labelName = FEUtils::CreateLabelName(); + UniqueFEIRStmt condFEStmt = std::make_unique(ifStmt.GetCondExpr()->Clone(), op, labelName); + FEIRStmt *condStmt = RegisterAndInsertFEIRStmt(std::move(condFEStmt), ptrTail, ifStmt.GetSrcLoc()); + FELinkListNode::SpliceNodes(head, tail, ptrTail); + FEIRStmt *labelStmt = RegisterAndInsertFEIRStmt(std::make_unique(labelName), ptrTail); + // link bb + condStmt->AddExtraSucc(*labelStmt); + labelStmt->AddExtraPred(*condStmt); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/optimize/src/ror.cpp b/src/hir2mpl/optimize/src/ror.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b2abd038c67d455ae195aa5892da7ab3b84f7b02 --- /dev/null +++ b/src/hir2mpl/optimize/src/ror.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ror.h" +#include "feir_builder.h" + +namespace maple { +UniqueFEIRExpr Ror::Emit2FEExpr() { + if (op != OP_bior || !CheckBaseExpr()) { + return nullptr; + } + + auto lBinExpr = static_cast(lExpr.get()); + auto rBinExpr = static_cast(rExpr.get()); + bitWidth = GetPrimTypeBitSize(lBinExpr->GetOpnd0()->GetPrimType()); + + if (lBinExpr->GetOp() == OP_shl && rBinExpr->GetOp() == OP_lshr) { + if (!IsRorLshrOpnd(rBinExpr->GetOpnd1(), false) || !IsRorShlOpnd(lBinExpr->GetOpnd1())) { + return nullptr; + } + } else if (lBinExpr->GetOp() == OP_lshr && rBinExpr->GetOp() == OP_shl) { + if (!IsRorLshrOpnd(lBinExpr->GetOpnd1(), false) || !IsRorShlOpnd(rBinExpr->GetOpnd1())) { + return nullptr; + } + } + + if (*rShiftBaseExpr != *lShiftBaseExpr) { + return nullptr; + } + + return FEIRBuilder::CreateExprBinary(OP_ror, lBinExpr->GetOpnd0()->Clone(), rShiftBaseExpr->Clone()); +} + +bool Ror::CheckBaseExpr() const { + if (lExpr->GetKind() != kExprBinary || rExpr->GetKind() != kExprBinary) { + return false; + } + + auto lBinExpr = static_cast(lExpr.get()); + auto rBinExpr = static_cast(rExpr.get()); + if (!((lBinExpr->GetOp() == OP_shl && rBinExpr->GetOp() == OP_lshr) || + (lBinExpr->GetOp() == OP_lshr && rBinExpr->GetOp() == OP_shl))) { + return false; + } + + if (*(lBinExpr->GetOpnd0()) != *(rBinExpr->GetOpnd0()) || + !IsUnsignedInteger(lBinExpr->GetOpnd0()->GetPrimType())) { + return false; + } + + return true; +} + +bool Ror::GetConstVal(const UniqueFEIRExpr &expr) { + if (expr->GetKind() != kExprConst && expr->GetKind() != kExprUnary) { + return false; + } + + FEIRExprConst *constExpr; + if (expr->GetKind() == kExprUnary) { + UniqueFEIRExpr opndExpr = static_cast(expr.get())->GetOpnd()->Clone(); + if (opndExpr->GetKind() != kExprConst) { + return false; + } + constExpr = static_cast(opndExpr.get()); + } else { + constExpr = static_cast(expr.get()); + } + + constVal = constExpr->GetValue().u64; + + return true; +} + +bool Ror::IsRorShlOpnd(const UniqueFEIRExpr &expr) { + if (expr->GetKind() != kExprBinary) { + return false; + } + + auto binExpr = static_cast(expr.get()); + if (binExpr->GetOp() != OP_sub || !GetConstVal(binExpr->GetOpnd0()) || constVal != bitWidth) { + return false; + } + + if (!IsRorLshrOpnd(binExpr->GetOpnd1(), true)) { + return false; + } + + return true; +} + +bool Ror::IsRorLshrOpnd(const UniqueFEIRExpr &expr, bool inShl) { + if (expr->GetKind() != kExprBinary) { + return false; + } + + auto binExpr = static_cast(expr.get()); + if (binExpr->GetOp() != OP_band) { + return false; + } + + auto SetShiftBaseExpr = [inShl, this](const UniqueFEIRExpr &expr) { + if (inShl) { + lShiftBaseExpr = expr->Clone(); + } else { + rShiftBaseExpr = expr->Clone(); + } + }; + + if (GetConstVal(binExpr->GetOpnd0())) { + SetShiftBaseExpr(binExpr->GetOpnd1()); + } else if (GetConstVal(binExpr->GetOpnd1())) { + SetShiftBaseExpr(binExpr->GetOpnd0()); + } else { + return false; + } + + if (constVal != bitWidth - 1) { + return false; + } + + return true; +} +} diff --git a/src/hir2mpl/test/BUILD.gn b/src/hir2mpl/test/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..ac663e060f93d270191ae1a3aeead4435de503cf --- /dev/null +++ b/src/hir2mpl/test/BUILD.gn @@ -0,0 +1,219 @@ +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] +cflags += [ + "-DMIR_FEATURE_FULL=1", + "-DHIR2MPL_FULL_INFO_DUMP=1", + "-DENABLE_COV_CHECK=1", +] + +cflags_cc -= [ + "-Werror", +] + + +include_dirs_dex = [ + "${ANDROID_ROOT}/dalvik", + "${ANDROID_ROOT}/system/core/include", +] +include_dirs_libdexfile = [ + #for libdexfile -start + "${ANDROID_ROOT}/system/core/liblog/include", + "${ANDROID_ROOT}/system/core/libutils/include", + "${ANDROID_ROOT}/system/core/libnativehelper/include_jni", + "${ANDROID_ROOT}/system/core/base/include", + "${ANDROID_ROOT}/system/core/libziparchive/include", + "${ANDROID_ROOT}/art/libartpalette/include", + "${ANDROID_ROOT}/art/libartbase", + "${ANDROID_ROOT}/art/libdexfile", + "${ANDROID_ROOT}/include", + + #for libdexfile -end +] + +include_directories = include_dirs_libdexfile + include_dirs_dex + +executable("hir2mplUT") { + sources = [] + + include_dirs = include_directories + + deps = [ + ":lib_hir2mpl_test_ast_input_clang", + ":lib_hir2mpl_test_bytecode_input_dex", + ":lib_hir2mpl_test_common", + ":lib_hir2mpl_test_bytecode_input_class", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libHWSecureC", + "${MAPLEALL_ROOT}/maple_ir:libmplir", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", + "${HIR2MPL_ROOT}:lib_hir2mpl_ast_input_clang", + "${HIR2MPL_ROOT}:lib_hir2mpl_ast_input_common", + "${HIR2MPL_ROOT}:lib_hir2mpl_common", + "${HIR2MPL_ROOT}:lib_hir2mpl_optimize", + ] + + if (ONLY_C != 1) { + deps += [ + "${HIR2MPL_ROOT}:lib_hir2mpl_bytecode_input_dex", + "${HIR2MPL_ROOT}:lib_hir2mpl_bytecode_input_common", + "${HIR2MPL_ROOT}:lib_hir2mpl_bytecode_input_class", + ] + } + + ldflags = [ + "-rdynamic", + "-L${LLVMLIBDIR}/", + "-Wl,-rpath,${LLVMLIBDIR}/", + "-Wl,-rpath,${MAPLE_ROOT}/build/gtestlib/lib", + ] + + libs = [ + "${MAPLE_ROOT}/build/gtestlib/lib/libgmock.so", + "${MAPLE_ROOT}/build/gtestlib/lib/libgmock_main.so", + "${MAPLE_ROOT}/build/gtestlib/lib/libgtest.so", + "${MAPLE_ROOT}/build/gtestlib/lib/libgtest_main.so", + "z", + "pthread", + ] +} + +include_ast_input_clang_directories = [ + "${HIR2MPL_ROOT}/common/include", + "${HIR2MPL_ROOT}/optimize/include", + "${HIR2MPL_ROOT}/ast_input/clang/include", + "${HIR2MPL_ROOT}/ast_input/common/include", + "${HIR2MPL_ROOT}/test/common", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${MAPLE_ROOT}/build/gtestlib/include", +] + +source_set("lib_hir2mpl_test_ast_input_clang") { + sources = [ + "${HIR2MPL_ROOT}/test/ast_input/clang/ast_expr_test.cpp", + "${HIR2MPL_ROOT}/test/ast_input/clang/ast_var_test.cpp", + ] + include_dirs = include_ast_input_clang_directories + output_dir = "${root_out_dir}/ar" +} + +include_bytecode_input_class_directories = [ + "${HIR2MPL_ROOT}/common/include", + "${HIR2MPL_ROOT}/optimize/include", + "${HIR2MPL_ROOT}/bytecode_input/class/include", + "${HIR2MPL_ROOT}/test/common", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${MAPLE_ROOT}/build/gtestlib/include", +] + +source_set("lib_hir2mpl_test_bytecode_input_class") { + sources = [ + "${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_class2fe_helper_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_class_const_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_class_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_function_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_opcode_helper_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_stack2fe_helper_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_stack_helper_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_stmt_bb_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_stmt_loc_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_util_test.cpp", + ] + include_dirs = include_bytecode_input_class_directories + output_dir = "${root_out_dir}/ar" +} + +include_common_directories = [ + "${HIR2MPL_ROOT}/common/include", + "${HIR2MPL_ROOT}/optimize/include", + "${HIR2MPL_ROOT}/bytecode_input/class/include", # hir2mplUT.cpp exists + "${HIR2MPL_ROOT}/test/common", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${MAPLE_ROOT}/build/gtestlib/include", +] + +source_set("lib_hir2mpl_test_common") { + sources = [ + "${HIR2MPL_ROOT}/test/common/base64_test.cpp", + "${HIR2MPL_ROOT}/test/common/basic_io_test.cpp", + + # "${HIR2MPL_ROOT}/test/common/fe_function_process_schedular_test.cpp", + "${HIR2MPL_ROOT}/test/common/fe_algorithm_test.cpp", + "${HIR2MPL_ROOT}/test/common/fe_file_ops_test.cpp", + "${HIR2MPL_ROOT}/test/common/fe_file_type_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_cfg_test.cpp", + "${HIR2MPL_ROOT}/test/common/fe_struct_elem_info_test.cpp", + "${HIR2MPL_ROOT}/test/common/fe_type_hierarchy_test.cpp", + "${HIR2MPL_ROOT}/test/common/fe_type_manager_test.cpp", + "${HIR2MPL_ROOT}/test/common/fe_utils_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_builder_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_stmt_dfg_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_stmt_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_test_base.cpp", + "${HIR2MPL_ROOT}/test/common/feir_type_helper_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_type_infer_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_type_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_var_test.cpp", + "${HIR2MPL_ROOT}/test/common/feir_lower_test.cpp", + "${HIR2MPL_ROOT}/test/common/hir2mplUT.cpp", + "${HIR2MPL_ROOT}/test/common/hir2mpl_ut_options.cpp", + "${HIR2MPL_ROOT}/test/common/hir2mpl_ut_regx.cpp", + "${HIR2MPL_ROOT}/test/common/hir2mpl_ut_regx_test.cpp", + ] + include_dirs = include_common_directories + output_dir = "${root_out_dir}/ar" +} + +include_bytecode_input_dex_directories = [ + "${HIR2MPL_ROOT}/common/include", + "${HIR2MPL_ROOT}/optimize/include", + "${HIR2MPL_ROOT}/bytecode_input/common/include", + "${HIR2MPL_ROOT}/bytecode_input/dex/include", + "${HIR2MPL_ROOT}/test/common", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${MAPLE_ROOT}/build/gtestlib/include", +] + +source_set("lib_hir2mpl_test_bytecode_input_dex") { + sources = [ + "${HIR2MPL_ROOT}/test/bytecode_input/dex/bc_load_on_demand_type_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/dex/bc_parser_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/dex/bc_util_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/dex/dex_class_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/dex/dex_file_util_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/dex/dex_op_test.cpp", + "${HIR2MPL_ROOT}/test/bytecode_input/dex/dex_reader_test.cpp", + ] + include_dirs = include_bytecode_input_dex_directories + output_dir = "${root_out_dir}/ar" +} + diff --git a/src/hir2mpl/test/CMakeLists.txt b/src/hir2mpl/test/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..ced8cd50ddfe785a786fb31cfe3330db3819d2fd --- /dev/null +++ b/src/hir2mpl/test/CMakeLists.txt @@ -0,0 +1,168 @@ +# +# Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMIR_FEATURE_FULL=1 -DHIR2MPL_FULL_INFO_DUMP=1 -DENABLE_COV_CHECK=1 -fvisibility=hidden") +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DMIR_FEATURE_FULL=1 -DHIR2MPL_FULL_INFO_DUMP=1 -DENABLE_COV_CHECK=1 -fvisibility=hidden") + +string(REPLACE "-Werror" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + +set(include_directories + ${ANDROID_ROOT}/dalvik + ${ANDROID_ROOT}/system/core/include + ${ANDROID_ROOT}/system/core/liblog/include + ${ANDROID_ROOT}/system/core/libutils/include + ${ANDROID_ROOT}/system/core/libnativehelper/include_jni + ${ANDROID_ROOT}/system/core/base/include + ${ANDROID_ROOT}/system/core/libziparchive/include + ${ANDROID_ROOT}/art/libartpalette/include + ${ANDROID_ROOT}/art/libartbase + ${ANDROID_ROOT}/art/libdexfile + ${ANDROID_ROOT}/include + ${HIR2MPL_ROOT}/ast_input/clang/include + ${HIR2MPL_ROOT}/ast_input/common/include + ${HIR2MPL_ROOT}/common/include + ${HIR2MPL_ROOT}/optimize/include + ${HIR2MPL_ROOT}/bytecode_input/class/include # hir2mplUT.cpp exists + ${HIR2MPL_ROOT}/test/common + ${THIRD_PARTY_ROOT}/bounds_checking_function/include + ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/maple_util/include + ${MAPLEALL_ROOT}/maple_driver/include + ${MAPLEALL_ROOT}/mempool/include + ${MAPLE_ROOT}/build/gtestlib/include + ${HIR2MPL_ROOT}/bytecode_input/common/include + ${HIR2MPL_ROOT}/bytecode_input/dex/include + ) + +set(deps_hir2mplUT + libHWSecureC + libdriver_option + libmplphase + libcommandline + libmplir + libmplutil + libmempool + libmpl2mpl + lib_hir2mpl_ast_input_clang_lib + lib_hir2mpl_ast_input_clang + lib_hir2mpl_ast_input_common + lib_hir2mpl_common + lib_hir2mpl_optimize + ${LLVMLIBDIR}/libclang.so + ${LLVMLIBDIR}/libclang-cpp.so + ${LLVMLIBDIR}/libclangFrontend.a + ${LLVMLIBDIR}/libclangDriver.a + ${LLVMLIBDIR}/libclangSerialization.a + ${LLVMLIBDIR}/libclangParse.a + ${LLVMLIBDIR}/libclangSema.a + ${LLVMLIBDIR}/libclangEdit.a + ${LLVMLIBDIR}/libclangLex.a + ${LLVMLIBDIR}/libclangAnalysis.a + ${LLVMLIBDIR}/libclangAST.a + ${LLVMLIBDIR}/libclangBasic.a + ${LLVMLIBDIR}/libLLVMDemangle.a + ${LLVMLIBDIR}/libLLVMMCParser.a + ${LLVMLIBDIR}/libLLVMMC.a + ${LLVMLIBDIR}/libLLVMBitReader.a + ${LLVMLIBDIR}/libLLVMCore.a + ${LLVMLIBDIR}/libLLVMBinaryFormat.a + ${LLVMLIBDIR}/libLLVMProfileData.a + ${LLVMLIBDIR}/libLLVMOption.a + ${LLVMLIBDIR}/libLLVMSupport.a + ${MAPLE_ROOT}/build/gtestlib/lib/libgmock.so + ${MAPLE_ROOT}/build/gtestlib/lib/libgmock_main.so + ${MAPLE_ROOT}/build/gtestlib/lib/libgtest.so + ${MAPLE_ROOT}/build/gtestlib/lib/libgtest_main.so + z + pthread + ) + +if (NOT ${ONLY_C} STREQUAL "1") + list(APPEND deps_hir2mplUT + lib_hir2mpl_bytecode_input_dex + lib_hir2mpl_bytecode_input_common + lib_hir2mpl_bytecode_input_class + libdexfile + libziparchive + libbase + ${THIRD_PARTY_ROOT}/aosp_modified/system/core/liblog/liblog.a + ) +endif() + +set($CMAKE_EXE_LINKER_FLAGS "CMAKE_EXE_LINKER_FLAGS -rdynamic -rdynamic -L${LLVMLIBDIR} -Wl,-rpath,${LLVMLIBDIR} -Wl,-rpath,${MAPLE_ROOT}/build/gtestlib/lib") + +set(src_hir2mplUT "") +list(APPEND src_hir2mplUT + ${HIR2MPL_ROOT}/test/ast_input/clang/ast_expr_test.cpp + ${HIR2MPL_ROOT}/test/ast_input/clang/ast_var_test.cpp + ) + +list(APPEND src_hir2mplUT + ${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_class2fe_helper_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_class_const_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_class_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_function_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_opcode_helper_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_stack2fe_helper_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_stack_helper_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_stmt_bb_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_stmt_loc_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/class/jbc_util_test.cpp + ) + +list(APPEND src_hir2mplUT + ${HIR2MPL_ROOT}/test/common/base64_test.cpp + ${HIR2MPL_ROOT}/test/common/basic_io_test.cpp + ${HIR2MPL_ROOT}/test/common/fe_algorithm_test.cpp + ${HIR2MPL_ROOT}/test/common/fe_file_ops_test.cpp + ${HIR2MPL_ROOT}/test/common/fe_file_type_test.cpp + ${HIR2MPL_ROOT}/test/common/feir_cfg_test.cpp + ${HIR2MPL_ROOT}/test/common/fe_struct_elem_info_test.cpp + ${HIR2MPL_ROOT}/test/common/fe_type_hierarchy_test.cpp + ${HIR2MPL_ROOT}/test/common/fe_type_manager_test.cpp + ${HIR2MPL_ROOT}/test/common/fe_utils_test.cpp + ${HIR2MPL_ROOT}/test/common/feir_builder_test.cpp + ${HIR2MPL_ROOT}/test/common/feir_stmt_dfg_test.cpp + ${HIR2MPL_ROOT}/test/common/feir_stmt_test.cpp + ${HIR2MPL_ROOT}/test/common/feir_test_base.cpp + ${HIR2MPL_ROOT}/test/common/feir_type_helper_test.cpp + ${HIR2MPL_ROOT}/test/common/feir_type_infer_test.cpp + ${HIR2MPL_ROOT}/test/common/feir_type_test.cpp + ${HIR2MPL_ROOT}/test/common/feir_var_test.cpp + ${HIR2MPL_ROOT}/test/common/feir_lower_test.cpp + ${HIR2MPL_ROOT}/test/common/hir2mplUT.cpp + ${HIR2MPL_ROOT}/test/common/hir2mpl_ut_options.cpp + ${HIR2MPL_ROOT}/test/common/hir2mpl_ut_regx.cpp + ${HIR2MPL_ROOT}/test/common/hir2mpl_ut_regx_test.cpp + ) + +list(APPEND src_hir2mplUT + ${HIR2MPL_ROOT}/test/bytecode_input/dex/bc_load_on_demand_type_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/dex/bc_parser_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/dex/bc_util_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/dex/dex_class_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/dex/dex_file_util_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/dex/dex_op_test.cpp + ${HIR2MPL_ROOT}/test/bytecode_input/dex/dex_reader_test.cpp + ) + +#hir2mplUT +add_executable(hir2mplUT "${src_hir2mplUT}") +set_target_properties(hir2mplUT PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${include_directories}" + LINK_LIBRARIES "${deps_hir2mplUT}" + RUNTIME_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/bin" + ) diff --git a/src/hir2mpl/test/ast_input/clang/ast_expr_test.cpp b/src/hir2mpl/test/ast_input/clang/ast_expr_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e3ca47837205f4eac14d1ebb6a0b235f75370567 --- /dev/null +++ b/src/hir2mpl/test/ast_input/clang/ast_expr_test.cpp @@ -0,0 +1,761 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "feir_test_base.h" +#include "fe_manager.h" +#include "hir2mpl_ut_regx.h" +#include "ast_expr.h" +#include "ast_decl_builder.h" + +namespace maple { +class AstExprTest : public FEIRTestBase { + public: + static MemPool *mp; + MapleAllocator allocator; + AstExprTest() : allocator(mp) {} + virtual ~AstExprTest() = default; + + static void SetUpTestCase() { + FEManager::GetManager().GetModule().SetSrcLang(kSrcLangC); + mp = FEUtils::NewMempool("MemPool for JBCFunctionTest", false /* isLcalPool */); + } + + static void TearDownTestCase() { + FEManager::GetManager().GetModule().SetSrcLang(kSrcLangUnknown); + delete mp; + mp = nullptr; + } + + template + std::unique_ptr GetConstASTExpr(PrimType primType, double val) { + std::unique_ptr astExpr = std::make_unique(); + astExpr->SetType(primType); + astExpr->SetVal(val); + return std::move(astExpr); + } +}; +MemPool *AstExprTest::mp = nullptr; + +TEST_F(AstExprTest, IntegerLiteral) { + RedirectCout(); + std::list stmts; + ASTIntegerLiteral *astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astExpr->SetType(GlobalTables::GetTypeTable().GetInt64()); + astExpr->SetVal(256); + UniqueFEIRExpr feExpr = astExpr->Emit2FEExpr(stmts); + ASSERT_EQ(feExpr->GetKind(), kExprConst); + BaseNode *exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "constval i64 256\n"); + RestoreCout(); +} + +TEST_F(AstExprTest, ImaginaryLiteral) { + MIRType *elemType = FEManager::GetTypeManager().GetOrCreateTypeFromName("I", FETypeFlag::kSrcUnknown, false); + MIRType *complexType = FEManager::GetTypeManager().GetOrCreateComplexStructType(*elemType); + ASTImaginaryLiteral *astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astExpr->SetComplexType(complexType); + astExpr->SetElemType(elemType); + // create child expr + ASTIntegerLiteral *childExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + childExpr->SetVal(2); + childExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + astExpr->SetASTExpr(childExpr); + + std::list stmts; + UniqueFEIRExpr feExpr = astExpr->Emit2FEExpr(stmts); + // check returned expr products + ASSERT_EQ(feExpr->GetKind(), kExprDRead); + BaseNode *exprConst = feExpr->GenMIRNode(mirBuilder); + RedirectCout(); + exprConst->Dump(); + std::string pattern = std::string("dread agg %Complex_[0-9]\n"); + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + // check intermediate stmt products + ASSERT_EQ(stmts.size(), 2); + std::list stmtNodeFront = stmts.front()->GenMIRStmts(mirBuilder); + stmtNodeFront.front()->Dump(); + pattern = std::string("dassign %Complex_[0-9] 1 \\(constval i32 0\\)\n\n"); + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + pattern = std::string("dassign %Complex_[0-9] 1 \\(constval i32 0\\)\n\n"); + std::list stmtNodeSecond = stmts.back()->GenMIRStmts(mirBuilder); + stmtNodeSecond.front()->Dump(); + pattern = std::string("dassign %Complex_[0-9] 2 \\(constval i32 2\\)\n\n"); + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + RestoreCout(); +} + +// ---------- ASTUnaryOperatorExpr ---------- +TEST_F(AstExprTest, ASTUnaryOperatorExpr_1) { + RedirectCout(); + std::list stmts; + + // struct/union/array and pointer need test + PrimType primType = PTY_i32; + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(primType); + PrimType ouPrimType = PTY_i32; + MIRType *ouType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ouPrimType); + ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); + astRefExpr->SetASTDecl(astDecl); + + // ASTUOMinusExpr + ASTUOMinusExpr *astUOMinusExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astUOMinusExpr->SetSubType(subType); + astUOMinusExpr->SetUOType(ouType); + astRefExpr->SetASTDecl(astDecl); + astUOMinusExpr->SetUOExpr(astRefExpr); + UniqueFEIRExpr feExpr = astUOMinusExpr->Emit2FEExpr(stmts); + BaseNode *exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "neg i32 (dread i32 %aVar_0_0)\n"); + + // ASTUOPlusExpr + ASTUOPlusExpr *astUOPlusExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astUOPlusExpr->SetSubType(subType); + astUOPlusExpr->SetUOType(ouType); + astRefExpr->SetASTDecl(astDecl); + astUOPlusExpr->SetUOExpr(astRefExpr); + feExpr = astUOPlusExpr->Emit2FEExpr(stmts); + exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "dread i32 %aVar_0_0\n"); + + // ASTUONotExpr + ASTUONotExpr *astUONotExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astUONotExpr->SetSubType(subType); + astUONotExpr->SetUOExpr(astRefExpr); + astUONotExpr->SetUOType(ouType); + feExpr = astUONotExpr->Emit2FEExpr(stmts); + exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "bnot i32 (dread i32 %aVar_0_0)\n"); + + // ASTUOLNotExpr + ASTUOLNotExpr *astUOLNotExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astUOLNotExpr->SetSubType(subType); + astUOLNotExpr->SetUOExpr(astRefExpr); + astUOLNotExpr->SetUOType(ouType); + feExpr = astUOLNotExpr->Emit2FEExpr(stmts); + exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "eq u1 i32 (dread i32 %aVar_0_0, constval i32 0)\n"); + RestoreCout(); +} + +TEST_F(AstExprTest, ASTUnaryOperatorExpr_2) { + RedirectCout(); + std::list stmts; + + // struct/union/array and pointer need test + PrimType primType = PTY_i32; + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(primType); + ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); + astRefExpr->SetASTDecl(astDecl); + + // ASTUOPostIncExpr + ASTUOPostIncExpr *astUOPostIncExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astUOPostIncExpr->SetSubType(subType); + astUOPostIncExpr->SetASTDecl(astDecl); + astUOPostIncExpr->SetUOExpr(astRefExpr); + UniqueFEIRExpr feExpr = astUOPostIncExpr->Emit2FEExpr(stmts); + std::list stmtNodeFront = stmts.front()->GenMIRStmts(mirBuilder); + stmtNodeFront.front()->Dump(); + std::string pattern = std::string("dassign %postinc_[0-9] 0 \\(dread i32 \\%aVar_0_0\\)\n\n"); + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + std::list stmtNodeSecond = stmts.back()->GenMIRStmts(mirBuilder); + stmtNodeSecond.front()->Dump(); + EXPECT_EQ(GetBufferString(), "dassign %aVar_0_0 0 (add i32 (dread i32 %aVar_0_0, constval i32 1))\n\n"); + BaseNode *exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + pattern = std::string("dread i32 %postinc_[0-9]\n"); + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + stmts.clear(); + + // ASTUOPostDecExpr + ASTUOPostDecExpr *astUOPostDecExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astUOPostDecExpr->SetSubType(subType); + astUOPostDecExpr->SetASTDecl(astDecl); + astUOPostDecExpr->SetUOExpr(astRefExpr); + feExpr = astUOPostDecExpr->Emit2FEExpr(stmts); + stmtNodeFront = stmts.front()->GenMIRStmts(mirBuilder); + stmtNodeFront.front()->Dump(); + pattern = std::string("dassign %postdec_[0-9] 0 \\(dread i32 \\%aVar_0_0\\)\n\n"); + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + stmtNodeSecond = stmts.back()->GenMIRStmts(mirBuilder); + stmtNodeSecond.front()->Dump(); + EXPECT_EQ(GetBufferString(), "dassign %aVar_0_0 0 (sub i32 (dread i32 %aVar_0_0, constval i32 1))\n\n"); + exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + pattern = std::string("dread i32 %postdec_[0-9]\n"); + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + stmts.clear(); + + // ASTUOPreIncExpr + ASTUOPreIncExpr *astUOPreIncExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astUOPreIncExpr->SetSubType(subType); + astUOPreIncExpr->SetASTDecl(astDecl); + astUOPreIncExpr->SetUOExpr(astRefExpr); + feExpr = astUOPreIncExpr->Emit2FEExpr(stmts); + stmtNodeFront = stmts.front()->GenMIRStmts(mirBuilder); + stmtNodeFront.front()->Dump(); + EXPECT_EQ(GetBufferString(), "dassign %aVar_0_0 0 (add i32 (dread i32 %aVar_0_0, constval i32 1))\n\n"); + exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "dread i32 %aVar_0_0\n"); + stmts.clear(); + + // ASTUOPreDecExpr + ASTUOPreDecExpr *astUOPreDecExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astUOPreDecExpr->SetSubType(subType); + astUOPreDecExpr->SetASTDecl(astDecl); + astUOPreDecExpr->SetUOExpr(astRefExpr); + feExpr = astUOPreDecExpr->Emit2FEExpr(stmts); + stmtNodeFront = stmts.front()->GenMIRStmts(mirBuilder); + stmtNodeFront.front()->Dump(); + EXPECT_EQ(GetBufferString(), "dassign %aVar_0_0 0 (sub i32 (dread i32 %aVar_0_0, constval i32 1))\n\n"); + exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "dread i32 %aVar_0_0\n"); + stmts.clear(); + + RestoreCout(); +} + +TEST_F(AstExprTest, ASTUnaryOperatorExpr_3) { + RedirectCout(); + std::list stmts; + + // struct/union/array and pointer need test + PrimType primType = PTY_i32; + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(primType); + ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); + astRefExpr->SetASTDecl(astDecl); + + // ASTUOAddrOfExpr + ASTUOAddrOfExpr *astUOAddrOfExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astUOAddrOfExpr->SetSubType(subType); + astRefExpr->SetASTDecl(astDecl); + astUOAddrOfExpr->SetUOExpr(astRefExpr); + UniqueFEIRExpr feExpr = astUOAddrOfExpr->Emit2FEExpr(stmts); + BaseNode *exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "addrof ptr %aVar_0_0\n"); + + // ASTUODerefExpr + ASTUODerefExpr *astUODerefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + MIRType *uoType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_i32); + subType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*uoType, PTY_ptr); + astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); + astRefExpr->SetASTDecl(astDecl); + astUODerefExpr->SetUOType(uoType); + astUODerefExpr->SetSubType(subType); + astRefExpr->SetASTDecl(astDecl); + astUODerefExpr->SetUOExpr(astRefExpr); + feExpr = astUODerefExpr->Emit2FEExpr(stmts); + exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "iread i32 <* i32> 0 (dread ptr %aVar_0_0)\n"); + RestoreCout(); +} + +TEST_F(AstExprTest, ASTCharacterLiteral) { + RedirectCout(); + std::list stmts; + ASTCharacterLiteral *astExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astExpr->SetVal('a'); + astExpr->SetPrimType(PTY_i32); + UniqueFEIRExpr feExpr = astExpr->Emit2FEExpr(stmts); + ASSERT_EQ(feExpr->GetKind(), kExprConst); + BaseNode *exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "constval i32 97\n"); + RestoreCout(); +} + +TEST_F(AstExprTest, ASTParenExpr) { + RedirectCout(); + std::list stmts; + ASTParenExpr *astParenExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTIntegerLiteral *astIntExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astIntExpr->SetVal(2); + astIntExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + astParenExpr->SetASTExpr(static_cast(astIntExpr)); + UniqueFEIRExpr feExpr = astParenExpr->Emit2FEExpr(stmts); + BaseNode *exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "constval i32 2\n"); + RestoreCout(); +} + +TEST_F(AstExprTest, ASTBinaryOperatorExpr_1) { + std::list stmts; + PrimType primType = PTY_i32; + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(primType); + ASTDeclRefExpr *astRefExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); + astRefExpr1->SetASTDecl(astDecl1); + ASTDeclRefExpr *astRefExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "bVar", + MapleVector({subType}, allocator.Adapter())); + astRefExpr2->SetASTDecl(astDecl2); + + ASTBinaryOperatorExpr *astBinaryOperatorExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); + astBinaryOperatorExpr1->SetLeftExpr(astRefExpr1); + astBinaryOperatorExpr1->SetRightExpr(astRefExpr2); + astBinaryOperatorExpr1->SetOpcode(OP_add); + astBinaryOperatorExpr1->SetRetType(GlobalTables::GetTypeTable().GetInt32()); + UniqueFEIRExpr feExpr1 = astBinaryOperatorExpr1->Emit2FEExpr(stmts); + BaseNode *expr1 = feExpr1->GenMIRNode(mirBuilder); + RedirectCout(); + expr1->Dump(); + EXPECT_EQ(GetBufferString(), "add i32 (dread i32 %aVar_0_0, dread i32 %bVar_0_0)\n"); + + ASTBinaryOperatorExpr *astBinaryOperatorExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); + astBinaryOperatorExpr2->SetLeftExpr(astRefExpr1); + astBinaryOperatorExpr2->SetRightExpr(astRefExpr2); + astBinaryOperatorExpr2->SetOpcode(OP_mul); + astBinaryOperatorExpr2->SetRetType(GlobalTables::GetTypeTable().GetInt32()); + UniqueFEIRExpr feExpr2 = astBinaryOperatorExpr2->Emit2FEExpr(stmts); + BaseNode *expr2 = feExpr2->GenMIRNode(mirBuilder); + expr2->Dump(); + EXPECT_EQ(GetBufferString(), "mul i32 (dread i32 %aVar_0_0, dread i32 %bVar_0_0)\n"); + RestoreCout(); +} + +TEST_F(AstExprTest, ASTBinaryOperatorExpr_2) { + std::list stmts; + PrimType primType = PTY_i32; + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(primType); + ASTDeclRefExpr *astRefExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({subType}, allocator.Adapter())); + astRefExpr1->SetASTDecl(astDecl1); + ASTDeclRefExpr *astRefExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "bVar", + MapleVector({subType}, allocator.Adapter())); + astRefExpr2->SetASTDecl(astDecl2); + + ASTAssignExpr *astAssignExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astAssignExpr->SetLeftExpr(astRefExpr1); + astAssignExpr->SetRightExpr(astRefExpr2); + UniqueFEIRExpr feExpr = astAssignExpr->Emit2FEExpr(stmts); + RedirectCout(); + EXPECT_EQ(stmts.size(), 1); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + EXPECT_EQ(mirStmts.size(), 1); + BaseNode *expr = feExpr->GenMIRNode(mirBuilder); + mirStmts.front()->Dump(); + EXPECT_EQ(GetBufferString(), "dassign %aVar_0_0 0 (dread i32 %bVar_0_0)\n\n"); + expr->Dump(); + EXPECT_EQ(GetBufferString(), "dread i32 %aVar_0_0\n"); + RestoreCout(); +} + + // a < b ? 1 : 2 +TEST_F(AstExprTest, ConditionalOperator) { + std::list stmts; + // create ast cond expr + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_i32); + ASTDeclRefExpr *astRefExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({ subType }, allocator.Adapter())); + astRefExpr1->SetASTDecl(astDecl1); + ASTDeclRefExpr *astRefExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "bVar", + MapleVector({ subType }, allocator.Adapter())); + astRefExpr2->SetASTDecl(astDecl2); + ASTBinaryOperatorExpr *astBinaryOperatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astBinaryOperatorExpr->SetLeftExpr(astRefExpr1); + astBinaryOperatorExpr->SetRightExpr(astRefExpr2); + astBinaryOperatorExpr->SetOpcode(OP_lt); + astBinaryOperatorExpr->SetRetType(GlobalTables::GetTypeTable().GetInt32()); + // create true expr + ASTIntegerLiteral *trueAstExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + trueAstExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + trueAstExpr->SetVal(1); + // create false expr + ASTIntegerLiteral *falseAstExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + falseAstExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + falseAstExpr->SetVal(2); + // create ConditionalOperator expr + ASTConditionalOperator *conditionalOperatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + conditionalOperatorExpr->SetCondExpr(astBinaryOperatorExpr); + conditionalOperatorExpr->SetTrueExpr(trueAstExpr); + conditionalOperatorExpr->SetFalseExpr(falseAstExpr); + conditionalOperatorExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + UniqueFEIRExpr feExpr = conditionalOperatorExpr->Emit2FEExpr(stmts); + EXPECT_EQ(stmts.size(), 1); + RedirectCout(); + feExpr->GenMIRNode(mirBuilder)->Dump(); + std::string pattern = "dread i32 %levVar_7\n"; + EXPECT_EQ(GetBufferString(), pattern); + RestoreCout(); +} + +// a < b ? 1 : a++ +TEST_F(AstExprTest, ConditionalOperator_NestedExpr) { + std::list stmts; + // create ast cond expr + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_i32); + ASTDeclRefExpr *astRefExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({ subType }, allocator.Adapter())); + astRefExpr1->SetASTDecl(astDecl1); + ASTDeclRefExpr *astRefExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "bVar", + MapleVector({ subType }, allocator.Adapter())); + astRefExpr2->SetASTDecl(astDecl2); + ASTBinaryOperatorExpr *astBinaryOperatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astBinaryOperatorExpr->SetLeftExpr(astRefExpr1); + astBinaryOperatorExpr->SetRightExpr(astRefExpr2); + astBinaryOperatorExpr->SetOpcode(OP_lt); + astBinaryOperatorExpr->SetRetType(GlobalTables::GetTypeTable().GetInt32()); + // create true expr + ASTIntegerLiteral *trueAstExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + trueAstExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + trueAstExpr->SetVal(1); + // create false expr + ASTUOPostIncExpr *falseAstExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + falseAstExpr->SetSubType(subType); + falseAstExpr->SetASTDecl(astDecl1); + falseAstExpr->SetUOExpr(astRefExpr1); + // create ConditionalOperator expr + ASTConditionalOperator *conditionalOperatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + conditionalOperatorExpr->SetCondExpr(astBinaryOperatorExpr); + conditionalOperatorExpr->SetTrueExpr(trueAstExpr); + conditionalOperatorExpr->SetFalseExpr(falseAstExpr); + conditionalOperatorExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + UniqueFEIRExpr feExpr = conditionalOperatorExpr->Emit2FEExpr(stmts); + EXPECT_EQ(stmts.size(), 1); + RedirectCout(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + EXPECT_EQ(mirStmts.size(), 1); + mirStmts.front()->GetOpCode(); + EXPECT_EQ(mirStmts.front()->GetOpCode(), OP_if); + feExpr->GenMIRNode(mirBuilder)->Dump(); + std::string pattern = std::string("dread i32 %levVar_[0-9][0-9]\n"); + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + RestoreCout(); +} + +// a ? 1 : a++ +TEST_F(AstExprTest, ConditionalOperator_Noncomparative) { + std::list stmts; + // create ast cond expr + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_f64); + ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({ subType }, allocator.Adapter())); + astRefExpr->SetASTDecl(astDecl); + // create true expr + ASTIntegerLiteral *trueAstExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + trueAstExpr->SetType(GlobalTables::GetTypeTable().GetDouble()); + trueAstExpr->SetVal(0); + // create false expr + ASTUOPostIncExpr *falseAstExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + falseAstExpr->SetSubType(subType); + falseAstExpr->SetASTDecl(astDecl); + falseAstExpr->SetUOExpr(astRefExpr); + // create ConditionalOperator expr + ASTConditionalOperator *conditionalOperatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + conditionalOperatorExpr->SetCondExpr(astRefExpr); + conditionalOperatorExpr->SetTrueExpr(trueAstExpr); + conditionalOperatorExpr->SetFalseExpr(falseAstExpr); + conditionalOperatorExpr->SetType(GlobalTables::GetTypeTable().GetDouble()); + UniqueFEIRExpr feExpr = conditionalOperatorExpr->Emit2FEExpr(stmts); + EXPECT_EQ(stmts.size(), 1); + RedirectCout(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + EXPECT_EQ(mirStmts.size(), 1); + mirStmts.front()->GetOpCode(); + EXPECT_EQ(mirStmts.front()->GetOpCode(), OP_if); + mirStmts.front()->Dump(); + std::string pattern = + std::string("if \\(ne u1 f64 \\(dread f64 %aVar_0_0, constval f64 0\\)\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + ClearBufferString(); + feExpr->GenMIRNode(mirBuilder)->Dump(); + pattern = std::string("dread f64 %levVar_[0-9][0-9]\n"); + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + RestoreCout(); +} + +// a < b ?: 1 +TEST_F(AstExprTest, BinaryConditionalOperator) { + std::list stmts; + // create ast cond expr + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_i32); + ASTDeclRefExpr *astRefExpr1 = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl1 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({ subType }, allocator.Adapter())); + astRefExpr1->SetASTDecl(astDecl1); + ASTDeclRefExpr *astRefExpr2 = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl2 = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "bVar", + MapleVector({ subType },allocator.Adapter())); + astRefExpr2->SetASTDecl(astDecl2); + ASTBinaryOperatorExpr *astBinaryOperatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astBinaryOperatorExpr->SetLeftExpr(astRefExpr1); + astBinaryOperatorExpr->SetRightExpr(astRefExpr2); + astBinaryOperatorExpr->SetOpcode(OP_lt); + astBinaryOperatorExpr->SetRetType(GlobalTables::GetTypeTable().GetInt32()); + // create false expr + ASTIntegerLiteral *falseAstExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + falseAstExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + falseAstExpr->SetVal(1); + // create ConditionalOperator expr + ASTBinaryConditionalOperator *operatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + operatorExpr->SetCondExpr(astBinaryOperatorExpr); + operatorExpr->SetFalseExpr(falseAstExpr); + operatorExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + UniqueFEIRExpr feExpr = operatorExpr->Emit2FEExpr(stmts); + EXPECT_EQ(stmts.size(), 1); + RedirectCout(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + EXPECT_EQ(mirStmts.size(), 1); + mirStmts.front()->Dump(); + // save conditional var for true expr + std::string pattern = + "dassign %condVal_[0-9][0-9] 0 \\(lt u1 i32 \\(dread i32 %aVar_0_0, dread i32 %bVar_0_0\\)\\)\n\n"; + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + ClearBufferString(); + feExpr->GenMIRNode(mirBuilder)->Dump(); + pattern = "select i32 \\(dread i32 %condVal_[0-9][0-9], dread i32 %condVal_[0-9][0-9], constval i32 1\\)\n"; + EXPECT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + RestoreCout(); +} + +// a ?: 1 +TEST_F(AstExprTest, BinaryConditionalOperator_Noncomparative) { + std::list stmts; + // create ast cond expr + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_i32); + ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "aVar", + MapleVector({ subType }, allocator.Adapter())); + astRefExpr->SetASTDecl(astDecl); + // create false expr + ASTIntegerLiteral *falseAstExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + falseAstExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + falseAstExpr->SetVal(1); + // create ConditionalOperator expr + ASTBinaryConditionalOperator *operatorExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + operatorExpr->SetCondExpr(astRefExpr); + operatorExpr->SetFalseExpr(falseAstExpr); + operatorExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + UniqueFEIRExpr feExpr = operatorExpr->Emit2FEExpr(stmts); + EXPECT_EQ(stmts.size(), 0); + RedirectCout(); + feExpr->GenMIRNode(mirBuilder)->Dump(); + std::string pattern = "select i32 (\n"\ + " ne u1 i32 (dread i32 %aVar_0_0, constval i32 0),\n"\ + " dread i32 %aVar_0_0,\n"\ + " constval i32 1)\n"; + EXPECT_EQ(GetBufferString(), pattern); + RestoreCout(); +} + +// ASTCstyleCastExpr +TEST_F(AstExprTest, ASTCstyleCastExpr) { + RedirectCout(); + std::list stmts; + PrimType srcPrimType = PTY_f32; + MIRType *srcType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(srcPrimType); + ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString("", mp), "a", + MapleVector({srcType}, allocator.Adapter())); + astRefExpr->SetASTDecl(astDecl); + ASTCastExpr *imCastExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + imCastExpr->SetASTExpr(astRefExpr); + PrimType destPrimType = PTY_i32; + MIRType *destType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(destPrimType); + ASTCastExpr *cstCast = ASTDeclsBuilder::ASTExprBuilder(allocator); + cstCast->SetNeededCvt(true); + cstCast->SetASTExpr(imCastExpr); + cstCast->SetSrcType(srcType); + cstCast->SetDstType(destType); + UniqueFEIRExpr feExpr = cstCast->Emit2FEExpr(stmts); + BaseNode *exprConst = feExpr->GenMIRNode(mirBuilder); + exprConst->Dump(); + EXPECT_EQ(GetBufferString(), "trunc i32 f32 (dread f32 %a_0_0)\n"); + RestoreCout(); +} + +TEST_F(AstExprTest, ASTArraySubscriptExpr) { + RedirectCout(); + std::list stmts; + + const std::string &fileName = "hello.ast"; + const std::string &refName = "arr"; + MIRArrayType *arrayType = static_cast( + GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetDouble(), 10)); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString(fileName, mp), refName, + MapleVector({arrayType}, allocator.Adapter())); + + ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astRefExpr->SetASTDecl(astDecl); // astDecl is var + astRefExpr->SetType(arrayType); + + ASTCastExpr *astImplicitCastExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astImplicitCastExpr->SetASTExpr(astRefExpr); + + // Index + ASTIntegerLiteral *astIntIndexExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astIntIndexExpr->SetVal(2); + astIntIndexExpr->SetType(GlobalTables::GetTypeTable().GetInt32()); + + // Elem + ASTFloatingLiteral *astFloatingLiteral = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral->SetVal(3.5); + astFloatingLiteral->SetKind(FloatKind::F64); + + // astArraySubscriptExpr + ASTArraySubscriptExpr *astArraySubscriptExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astArraySubscriptExpr->SetBaseExpr(astImplicitCastExpr); + astArraySubscriptExpr->SetIdxExpr(astIntIndexExpr); + astArraySubscriptExpr->SetType(GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); + astArraySubscriptExpr->SetArrayType(arrayType); + + ASTAssignExpr *astAssignExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astAssignExpr->SetLeftExpr(astArraySubscriptExpr); + astAssignExpr->SetRightExpr(astFloatingLiteral); + (void)astAssignExpr->Emit2FEExpr(stmts); + std::list mirList = stmts.front()->GenMIRStmts(mirBuilder); + mirList.front()->Dump(); + auto dumpStr = GetBufferString(); + + std::string pattern = std::string("iassign \\<\\* f64\\> 0 \\(\n array 1 ptr \\<\\* \\<\\[10\\] f64\\>\\>") + + std::string(" \\(addrof ptr %arr_0_0, constval i32 2\\), \n constval f64 3.5\\)\n\n"); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(AstExprTest, InitListExpr_Array) { + RedirectCout(); + std::list stmts; + + const std::string &fileName = "hello.ast"; + const std::string &refName = "arr"; + MIRArrayType *arrayType = static_cast( + GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetDouble(), 4)); + ASTDecl *astDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString(fileName, mp), refName, + MapleVector({arrayType}, allocator.Adapter())); + + ASTDeclRefExpr *astRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astRefExpr->SetASTDecl(astDecl); // astDecl is var + astRefExpr->SetType(arrayType); + + ASTCastExpr *astImplicitCastExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astImplicitCastExpr->SetASTExpr(astRefExpr); + + // Elem 0 + ASTFloatingLiteral *astFloatingLiteral0 = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral0->SetVal(2.5); + astFloatingLiteral0->SetKind(FloatKind::F64); + + // Elem 1 + ASTFloatingLiteral *astFloatingLiteral1 = ASTDeclsBuilder::ASTExprBuilder(allocator); + astFloatingLiteral1->SetVal(3.5); + astFloatingLiteral1->SetKind(FloatKind::F64); + + // astInitListExpr + ASTInitListExpr *astInitListExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astInitListExpr->SetInitExprs(astFloatingLiteral0); + astInitListExpr->SetInitExprs(astFloatingLiteral1); + astInitListExpr->SetInitListType(arrayType); + astInitListExpr->SetInitListVarName("arr"); + + (void)astInitListExpr->Emit2FEExpr(stmts); + std::list mirList1 = stmts.front()->GenMIRStmts(mirBuilder); + mirList1.front()->Dump(); + auto dumpStr1 = GetBufferString(); + EXPECT_EQ(dumpStr1, "iassign <* f64> 0 (\n array 1 ptr <* <[4] f64>> (addrof ptr %arr, constval i32 0), \n" \ + " constval f64 2.5)\n\n"); + + stmts.pop_front(); + std::list mirList2 = stmts.front()->GenMIRStmts(mirBuilder); + mirList2.front()->Dump(); + auto dumpStr2 = GetBufferString(); + EXPECT_EQ(dumpStr2, "iassign <* f64> 0 (\n array 1 ptr <* <[4] f64>> (addrof ptr %arr, constval i32 1), \n" \ + " constval f64 3.5)\n\n"); + + // mul dim array + uint32 arraySize[2] = {4, 10}; + MIRArrayType *arrayMulDimType = static_cast( + GlobalTables::GetTypeTable().GetOrCreateArrayType(*GlobalTables::GetTypeTable().GetDouble(), 2, arraySize)); + ASTDecl *astMulDimDecl = ASTDeclsBuilder::ASTDeclBuilder(allocator, MapleString(fileName, mp), "xxx", + MapleVector({arrayMulDimType}, allocator.Adapter())); + ASTDeclRefExpr *astMulDimRefExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astMulDimRefExpr->SetASTDecl(astMulDimDecl); // astDecl is var + astMulDimRefExpr->SetType(arrayMulDimType); + ASTCastExpr *astMulDimImplicitCastExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astMulDimImplicitCastExpr->SetASTExpr(astMulDimRefExpr); + ASTInitListExpr *astMulDimInitListExpr = ASTDeclsBuilder::ASTExprBuilder(allocator); + astMulDimInitListExpr->SetInitExprs(astInitListExpr); + astMulDimInitListExpr->SetInitListType(arrayMulDimType); + astMulDimInitListExpr->SetInitListVarName("mulDimArr"); + + stmts.clear(); + (void)astMulDimInitListExpr->Emit2FEExpr(stmts); + ASSERT_EQ(stmts.size(), 4); + std::list mirMulDimList3 = stmts.front()->GenMIRStmts(mirBuilder); + mirMulDimList3.front()->Dump(); + auto dumpStr3 = GetBufferString(); + std::string pattern3 = "iassign <* f64> 0 (\n array 1 ptr <* <[4] f64>> (\n" \ + " array 1 ptr <* <[4][10] f64>> (addrof ptr %mulDimArr, constval i32 0),\n" \ + " constval i32 0), \n" \ + " constval f64 2.5)\n\n"; + EXPECT_EQ(dumpStr3, pattern3); + + stmts.pop_front(); + std::list mirMulDimList4 = stmts.front()->GenMIRStmts(mirBuilder); + mirMulDimList4.front()->Dump(); + auto dumpStr4 = GetBufferString(); + std::string pattern4 = "iassign <* f64> 0 (\n array 1 ptr <* <[4] f64>> (\n" \ + " array 1 ptr <* <[4][10] f64>> (addrof ptr %mulDimArr, constval i32 0),\n" \ + " constval i32 1), \n" \ + " constval f64 3.5)\n\n"; + EXPECT_EQ(dumpStr4, pattern4); + + stmts.pop_front(); + std::list mirMulDimList5 = stmts.front()->GenMIRStmts(mirBuilder); + mirMulDimList5.front()->Dump(); + auto dumpStr5 = GetBufferString(); + std::string pattern5 = "intrinsiccall C_memset (\n add ptr (\n array 1 ptr <* <[4][10] f64>> " \ + "(addrof ptr %mulDimArr, constval i32 0),\n mul i32 (constval i32 2, constval i32 8))" \ + ",\n constval i32 0,\n mul i32 (constval i32 2, constval i32 8))\n\n"; + EXPECT_EQ(dumpStr5, pattern5); + + + stmts.pop_front(); + std::list mirMulDimList6 = stmts.front()->GenMIRStmts(mirBuilder); + mirMulDimList6.front()->Dump(); + auto dumpStr6 = GetBufferString(); + std::string pattern6 = "intrinsiccall C_memset (\n add ptr (\n addrof ptr %mulDimArr,\n " \ + "mul i32 (constval i32 1, constval i32 80)),\n constval i32 0,\n " \ + "mul i32 (constval i32 3, constval i32 80))\n\n"; + EXPECT_EQ(dumpStr6, pattern6); + RestoreCout(); +} +} // namespace maple diff --git a/src/hir2mpl/test/ast_input/clang/ast_var_test.cpp b/src/hir2mpl/test/ast_input/clang/ast_var_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1a478db53d247a6184d7ae24e590cd7193190af1 --- /dev/null +++ b/src/hir2mpl/test/ast_input/clang/ast_var_test.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "feir_test_base.h" +#include "feir_var.h" +#include "feir_var_name.h" +#include "feir_type_helper.h" +#include "hir2mpl_ut_regx.h" +#include "feir_builder.h" +#include "ast_decl_builder.h" + +namespace maple { +class FEIRVarNameTest : public FEIRTestBase { + public: + FEIRVarNameTest() = default; + virtual ~FEIRVarNameTest() = default; +}; + +TEST_F(FEIRVarNameTest, FEIRVarInAST) { + GenericAttrs attrs; + attrs.SetAttr(GENATTR_const); + MIRType *type = GlobalTables::GetTypeTable().GetInt32(); + auto astVar = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("foo.c", mp), "a", + MapleVector({type}, allocator.Adapter()), attrs); + astVar->SetGlobal(false); + auto feirVar = astVar->Translate2FEIRVar(); + EXPECT_EQ(feirVar->GetKind(), kFEIRVarName); + EXPECT_EQ(feirVar->GetType()->GetKind(), kFEIRTypeNative); + EXPECT_EQ(feirVar->GetType()->GenerateMIRTypeAuto(), type); + EXPECT_EQ(feirVar->GetName(*type).find("a") != std::string::npos, true); // a_0_0 + EXPECT_EQ(feirVar->GetNameRaw().find("a") != std::string::npos, true); + EXPECT_EQ(feirVar->GetType()->IsArray(), false); + MIRSymbol *symbol = feirVar->GenerateMIRSymbol(mirBuilder); + RedirectCout(); + std::string symbolName = symbol->GetName(); + EXPECT_EQ(symbolName.find("a") != std::string::npos, true); + EXPECT_EQ(symbol->GetAttr(ATTR_const), true); + symbol->Dump(false, 0); + std::string symbolDump = GetBufferString(); + std::string strPattern2 = std::string("var \\$") + "a" + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(symbolDump, strPattern2), true); + RestoreCout(); + + MIRType *type1 = GlobalTables::GetTypeTable().GetInt32(); + auto astVar1 = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("foo.c", mp), "a", + MapleVector({type1}, allocator.Adapter()), attrs); + astVar1->SetGlobal(false); + auto feirVar1 = astVar1->Translate2FEIRVar(); + EXPECT_EQ(feirVar1->EqualsTo(feirVar), true); + + auto feirVarClone = feirVar->Clone(); + EXPECT_EQ(feirVarClone->EqualsTo(feirVar), true); + + // array type + uint32 arraySize[3] = {3, 4, 5}; + MIRType *arrayType = GlobalTables::GetTypeTable().GetOrCreateArrayType(*type, 3, arraySize); + auto astArrVar = ASTDeclsBuilder::ASTVarBuilder(allocator, MapleString("foo.c", mp), "array", + MapleVector({arrayType}, allocator.Adapter()), attrs); + astArrVar->SetGlobal(true); + auto feirArrVar = astArrVar->Translate2FEIRVar(); + EXPECT_EQ(feirArrVar->GetType()->IsArray(), true); + MIRSymbol *symbolArr = feirArrVar->GenerateMIRSymbol(mirBuilder); + RedirectCout(); + symbolName = symbolArr->GetName(); + EXPECT_EQ(symbolName, "array"); + symbolArr->Dump(false, 0); + symbolDump = GetBufferString(); + strPattern2 = std::string("var \\$") + "array \\<\\[3\\]\\[4\\]\\[5\\] i32\\>" + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(symbolDump, strPattern2), true); + RestoreCout(); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/bytecode_input/class/JBC0001/Test.class b/src/hir2mpl/test/bytecode_input/class/JBC0001/Test.class new file mode 100644 index 0000000000000000000000000000000000000000..cf22eb34e438f9f941d7d250d399800556b849d5 Binary files /dev/null and b/src/hir2mpl/test/bytecode_input/class/JBC0001/Test.class differ diff --git a/src/hir2mpl/test/bytecode_input/class/JBC0001/Test.java b/src/hir2mpl/test/bytecode_input/class/JBC0001/Test.java new file mode 100644 index 0000000000000000000000000000000000000000..d441a9debb157f5d81b473ab6e823419f5269b17 --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/JBC0001/Test.java @@ -0,0 +1,12 @@ +public class Test { + public int v1 = 0x12345678; + public static long v2 = 0x1234567890l; + public float v3 = 16.0f; + public static double v4 = -16.0; + + public void func1() { + } + + public static void func2() { + } +} diff --git a/src/hir2mpl/test/bytecode_input/class/jbc_class2fe_helper_test.cpp b/src/hir2mpl/test/bytecode_input/class/jbc_class2fe_helper_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..17db7f2ce755c79a8842036ddbd93f93529355b7 --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/jbc_class2fe_helper_test.cpp @@ -0,0 +1,397 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#define protected public +#define private public +#include "jbc_class2fe_helper.h" +#undef protected +#undef private +#include "redirect_buffer.h" +#include "fe_options.h" + +namespace maple { +class JBCClass2FEHelperTest : public testing::Test, public RedirectBuffer { + public: + JBCClass2FEHelperTest() + : allocator(mp) {} + ~JBCClass2FEHelperTest() = default; + + static void SetUpTestCase() { + mp = FEUtils::NewMempool("MemPool for JBCClass2FEHelperTest", false /* isLcalPool */); + } + + static void TearDownTestCase() { + delete mp; + mp = nullptr; + } + + protected: + static MemPool *mp; + MapleAllocator allocator; +}; +MemPool *JBCClass2FEHelperTest::mp = nullptr; + +class JBCClassField2FEHelperTest : public testing::Test, public RedirectBuffer { + public: + JBCClassField2FEHelperTest() + : allocator(mp) {} + ~JBCClassField2FEHelperTest() = default; + + static void SetUpTestCase() { + mp = FEUtils::NewMempool("MemPool for JBCClassField2FEHelperTest", false /* isLocalPool */); + } + + static void TearDownTestCase() { + delete mp; + mp = nullptr; + } + + protected: + static MemPool *mp; + MapleAllocator allocator; +}; +MemPool *JBCClassField2FEHelperTest::mp = nullptr; + +class MockJBCClass : public jbc::JBCClass { + public: + explicit MockJBCClass(MapleAllocator &allocator) : jbc::JBCClass(allocator) {} + ~MockJBCClass() = default; + MOCK_CONST_METHOD0(GetClassNameMpl, std::string()); + MOCK_CONST_METHOD0(GetClassNameOrin, std::string()); + MOCK_CONST_METHOD0(GetSuperClassName, std::string()); + MOCK_CONST_METHOD0(GetInterfaceNames, std::vector()); +}; + +class MockJBCClassField : public jbc::JBCClassField { + public: + MockJBCClassField(MapleAllocator &allocator, const jbc::JBCClass &klass) + : jbc::JBCClassField(allocator, klass) {} + ~MockJBCClassField() = default; + MOCK_CONST_METHOD0(GetAccessFlag, uint16()); + MOCK_CONST_METHOD0(IsStatic, bool()); + MOCK_CONST_METHOD0(GetName, std::string()); + MOCK_CONST_METHOD0(GetDescription, std::string()); +}; + +class MockJBCClass2FEHelper : public JBCClass2FEHelper { + public: + MockJBCClass2FEHelper(MapleAllocator &allocator, jbc::JBCClass &klassIn) + : JBCClass2FEHelper(allocator, klassIn) {} + ~MockJBCClass2FEHelper() = default; + MOCK_CONST_METHOD0(IsStaticFieldProguard, bool()); +}; + +TEST_F(JBCClass2FEHelperTest, PreProcessDecl_SameName) { + ::testing::NiceMock klass(allocator); + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("Ljava_2Flang_2FObject_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("Ljava/lang/Object;")); + helper.PreProcessDecl(); + EXPECT_EQ(helper.IsSkipped(), true); +} + +TEST_F(JBCClass2FEHelperTest, PreProcessDecl_NotSameName_Class) { + ::testing::NiceMock klass(allocator); + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("LNewClass1InJBCClass2FEHelperTest_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LNewClass1InJBCClass2FEHelperTest;")); + helper.PreProcessDecl(); + EXPECT_EQ(helper.IsSkipped(), false); +} + +TEST_F(JBCClass2FEHelperTest, PreProcessDecl_NotSameName_Interface) { + ::testing::NiceMock klass(allocator); + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("LNewInterface1InJBCClass2FEHelperTest_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LNewInterface1InJBCClass2FEHelperTest;")); + klass.header.accessFlag = jbc::kAccClassInterface; + helper.PreProcessDecl(); + EXPECT_EQ(helper.IsSkipped(), false); +} + +TEST_F(JBCClass2FEHelperTest, ProcessDeclSuperClassForClass) { + ::testing::NiceMock klass(allocator); + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("LNewClass2InJBCClass2FEHelperTest_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LNewClass2InJBCClass2FEHelperTest;")); + ON_CALL(klass, GetSuperClassName()) + .WillByDefault(::testing::Return("Ljava/lang/Object;")); + helper.PreProcessDecl(); + helper.ProcessDeclSuperClass(); + MIRStructType *structType = helper.GetContainer(); + EXPECT_NE(structType, nullptr); + EXPECT_EQ(structType->GetKind(), kTypeClass); + MIRClassType *classType = static_cast(structType); + MIRType *superType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType->GetParentTyIdx()); + EXPECT_EQ(superType->GetKind(), kTypeClass); + EXPECT_EQ(superType->GetCompactMplTypeName(), "Ljava_2Flang_2FObject_3B"); +} + +TEST_F(JBCClass2FEHelperTest, ProcessDeclImplementsForClass) { + ::testing::NiceMock klass(allocator); + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("LNewClass3InJBCClass2FEHelperTest_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LNewClass3InJBCClass2FEHelperTest;")); + ON_CALL(klass, GetInterfaceNames()) + .WillByDefault(::testing::Return(std::vector({ "LTestInterface1;", "LTestInterface2;" }))); + helper.PreProcessDecl(); + helper.ProcessDeclImplements(); + MIRStructType *structType = helper.GetContainer(); + EXPECT_NE(structType, nullptr); + EXPECT_EQ(structType->GetKind(), kTypeClass); + MIRClassType *classType = static_cast(structType); + const std::vector &interfaces = classType->GetInterfaceImplemented(); + EXPECT_EQ(interfaces.size(), 2); + MIRType *interfaceType0 = GlobalTables::GetTypeTable().GetTypeFromTyIdx(interfaces[0]); + EXPECT_EQ(interfaceType0->GetKind(), kTypeInterfaceIncomplete); + EXPECT_EQ(interfaceType0->GetCompactMplTypeName(), "LTestInterface1_3B"); + MIRType *interfaceType1 = GlobalTables::GetTypeTable().GetTypeFromTyIdx(interfaces[1]); + EXPECT_EQ(interfaceType1->GetKind(), kTypeInterfaceIncomplete); + EXPECT_EQ(interfaceType1->GetCompactMplTypeName(), "LTestInterface2_3B"); +} + +TEST_F(JBCClass2FEHelperTest, ProcessDeclSuperClassForInterface) { + ::testing::NiceMock klass(allocator); + klass.header.accessFlag = jbc::kAccClassInterface; + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("LNewInterface2InJBCClass2FEHelperTest_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LNewInterface2InJBCClass2FEHelperTest;")); + ON_CALL(klass, GetSuperClassName()) + .WillByDefault(::testing::Return("Ljava/io/Serializable;")); + helper.PreProcessDecl(); + helper.ProcessDeclSuperClass(); + MIRStructType *structType = helper.GetContainer(); + EXPECT_NE(structType, nullptr); + EXPECT_EQ(structType->GetKind(), kTypeInterface); + MIRInterfaceType *interfaceType = static_cast(structType); + const std::vector &parents = interfaceType->GetParentsTyIdx(); + ASSERT_EQ(parents.size(), 1); + MIRType *interfaceType0 = GlobalTables::GetTypeTable().GetTypeFromTyIdx(parents[0]); + EXPECT_EQ(interfaceType0->GetKind(), kTypeInterface); + EXPECT_EQ(interfaceType0->GetCompactMplTypeName(), "Ljava_2Fio_2FSerializable_3B"); +} + +TEST_F(JBCClass2FEHelperTest, ProcessDeclImplementsForInterface) { + ::testing::NiceMock klass(allocator); + klass.header.accessFlag = jbc::kAccClassInterface; + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("LNewInterface3InJBCClass2FEHelperTest_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LNewInterface3InJBCClass2FEHelperTest;")); + ON_CALL(klass, GetInterfaceNames()) + .WillByDefault(::testing::Return(std::vector({ "LTestInterface3;", "LTestInterface4;" }))); + helper.PreProcessDecl(); + helper.ProcessDeclImplements(); + MIRStructType *structType = helper.GetContainer(); + EXPECT_NE(structType, nullptr); + EXPECT_EQ(structType->GetKind(), kTypeInterface); + MIRInterfaceType *interfaceType = static_cast(structType); + const std::vector &parents = interfaceType->GetParentsTyIdx(); + EXPECT_EQ(parents.size(), 2); + MIRType *interfaceType0 = GlobalTables::GetTypeTable().GetTypeFromTyIdx(parents[0]); + EXPECT_EQ(interfaceType0->GetKind(), kTypeInterfaceIncomplete); + EXPECT_EQ(interfaceType0->GetCompactMplTypeName(), "LTestInterface3_3B"); + MIRType *interfaceType1 = GlobalTables::GetTypeTable().GetTypeFromTyIdx(parents[1]); + EXPECT_EQ(interfaceType1->GetKind(), kTypeInterfaceIncomplete); + EXPECT_EQ(interfaceType1->GetCompactMplTypeName(), "LTestInterface4_3B"); +} + +TEST_F(JBCClass2FEHelperTest, CreateSymbol_Class) { + ::testing::NiceMock klass(allocator); + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("LNewClass4InJBCClass2FEHelperTest_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LNewClass4InJBCClass2FEHelperTest;")); + helper.PreProcessDecl(); + helper.CreateSymbol(); + EXPECT_NE(helper.mirSymbol, nullptr); + RedirectCout(); + helper.mirSymbol->Dump(false, 0); + EXPECT_EQ(GetBufferString(), + "javaclass $LNewClass4InJBCClass2FEHelperTest_3B <$LNewClass4InJBCClass2FEHelperTest_3B>\n"); + RestoreCout(); +} + +TEST_F(JBCClass2FEHelperTest, CreateSymbol_Interface) { + ::testing::NiceMock klass(allocator); + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("LNewInterface4InJBCClass2FEHelperTest_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LNewInterface4InJBCClass2FEHelperTest;")); + klass.header.accessFlag = jbc::kAccClassInterface; + helper.PreProcessDecl(); + helper.CreateSymbol(); + EXPECT_NE(helper.mirSymbol, nullptr); + RedirectCout(); + helper.mirSymbol->Dump(false, 0); + EXPECT_EQ(GetBufferString(), + "javainterface $LNewInterface4InJBCClass2FEHelperTest_3B <$LNewInterface4InJBCClass2FEHelperTest_3B>\n"); + RestoreCout(); +} + +TEST_F(JBCClass2FEHelperTest, ProcessDecl_Class) { + ::testing::NiceMock klass(allocator); + klass.header.accessFlag = jbc::kAccClassAbstract; + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("LNewClass5InJBCClass2FEHelperTest_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LNewClass5InJBCClass2FEHelperTest;")); + ON_CALL(klass, GetSuperClassName()) + .WillByDefault(::testing::Return("Ljava/lang/Object;")); + ON_CALL(klass, GetInterfaceNames()) + .WillByDefault(::testing::Return(std::vector())); + helper.PreProcessDecl(); + helper.ProcessDecl(); + EXPECT_NE(helper.mirSymbol, nullptr); + RedirectCout(); + helper.mirSymbol->Dump(false, 0); + EXPECT_EQ(GetBufferString(), + "javaclass $LNewClass5InJBCClass2FEHelperTest_3B <$LNewClass5InJBCClass2FEHelperTest_3B> abstract\n"); + RestoreCout(); +} + +TEST_F(JBCClass2FEHelperTest, ProcessDecl_Interface) { + ::testing::NiceMock klass(allocator); + klass.header.accessFlag = jbc::kAccClassInterface | jbc::kAccClassPublic; + JBCClass2FEHelper helper(allocator, klass); + ON_CALL(klass, GetClassNameMpl()) + .WillByDefault(::testing::Return("LNewInterface5InJBCClass2FEHelperTest_3B")); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LNewInterface5InJBCClass2FEHelperTest;")); + ON_CALL(klass, GetSuperClassName()) + .WillByDefault(::testing::Return("Ljava/lang/Object;")); + ON_CALL(klass, GetInterfaceNames()) + .WillByDefault(::testing::Return(std::vector())); + helper.PreProcessDecl(); + helper.ProcessDecl(); + EXPECT_NE(helper.mirSymbol, nullptr); + RedirectCout(); + helper.mirSymbol->Dump(false, 0); + EXPECT_EQ(GetBufferString(), + "javainterface $LNewInterface5InJBCClass2FEHelperTest_3B <$LNewInterface5InJBCClass2FEHelperTest_3B> public\n"); + RestoreCout(); +} + +TEST_F(JBCClassField2FEHelperTest, ProcessDeclWithContainer_Instance) { + ::testing::NiceMock klass(allocator); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LTestPack/TestClass;")); + ::testing::NiceMock field(allocator, klass); + ON_CALL(field, GetAccessFlag()).WillByDefault(::testing::Return(jbc::kAccFieldPublic)); + ON_CALL(field, IsStatic()).WillByDefault(::testing::Return(false)); + ON_CALL(field, GetName()).WillByDefault(::testing::Return("field")); + ON_CALL(field, GetDescription()).WillByDefault(::testing::Return("Ljava/lang/Object;")); + JBCClassField2FEHelper fieldHelper(allocator, field); + ASSERT_EQ(fieldHelper.ProcessDeclWithContainer(allocator), true); + // check field name + std::string fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldHelper.mirFieldPair.first); + EXPECT_EQ(fieldName, "field"); + // check field type + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldHelper.mirFieldPair.second.first); + EXPECT_EQ(type->GetKind(), kTypePointer); + EXPECT_EQ(type->GetCompactMplTypeName(), "Ljava_2Flang_2FObject_3B"); + // check field attr + EXPECT_EQ(fieldHelper.mirFieldPair.second.second.GetAttr(FLDATTR_public), true); +} + +TEST_F(JBCClassField2FEHelperTest, ProcessDeclWithContainer_Static) { + ::testing::NiceMock klass(allocator); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LTestPack/TestClass;")); + ::testing::NiceMock field(allocator, klass); + ON_CALL(field, GetAccessFlag()).WillByDefault(::testing::Return(jbc::kAccFieldProtected)); + ON_CALL(field, IsStatic()).WillByDefault(::testing::Return(true)); + ON_CALL(field, GetName()).WillByDefault(::testing::Return("field_static")); + ON_CALL(field, GetDescription()).WillByDefault(::testing::Return("Ljava/lang/Object;")); + JBCClassField2FEHelper fieldHelper(allocator, field); + ASSERT_EQ(fieldHelper.ProcessDeclWithContainer(allocator), true); + // check field name + std::string fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldHelper.mirFieldPair.first); + EXPECT_EQ(fieldName, "LTestPack_2FTestClass_3B_7Cfield__static"); + // check field type + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldHelper.mirFieldPair.second.first); + EXPECT_EQ(type->GetKind(), kTypePointer); + EXPECT_EQ(type->GetCompactMplTypeName(), "Ljava_2Flang_2FObject_3B"); + // check field attr + EXPECT_EQ(fieldHelper.mirFieldPair.second.second.GetAttr(FLDATTR_protected), true); +} + +TEST_F(JBCClassField2FEHelperTest, ProcessDeclWithContainer_Static_AllType) { + ::testing::NiceMock klass(allocator); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LTestPack/TestClass;")); + ::testing::NiceMock field(allocator, klass); + ON_CALL(field, GetAccessFlag()).WillByDefault(::testing::Return(jbc::kAccFieldPrivate)); + ON_CALL(field, IsStatic()).WillByDefault(::testing::Return(true)); + ON_CALL(field, GetName()).WillByDefault(::testing::Return("field_static")); + ON_CALL(field, GetDescription()).WillByDefault(::testing::Return("Ljava/lang/Object;")); + FEOptions::GetInstance().SetModeJavaStaticFieldName(FEOptions::kAllType); + JBCClassField2FEHelper fieldHelper(allocator, field); + ASSERT_EQ(fieldHelper.ProcessDeclWithContainer(allocator), true); + // check field name + std::string fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldHelper.mirFieldPair.first); + EXPECT_EQ(fieldName, "LTestPack_2FTestClass_3B_7Cfield__static_7CLjava_2Flang_2FObject_3B"); + // check field type + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldHelper.mirFieldPair.second.first); + EXPECT_EQ(type->GetKind(), kTypePointer); + EXPECT_EQ(type->GetCompactMplTypeName(), "Ljava_2Flang_2FObject_3B"); + // check field attr + EXPECT_EQ(fieldHelper.mirFieldPair.second.second.GetAttr(FLDATTR_private), true); +} + +TEST_F(JBCClassField2FEHelperTest, ProcessDeclWithContainer_Static_Smart) { + ::testing::NiceMock klass(allocator); + ON_CALL(klass, GetClassNameOrin()) + .WillByDefault(::testing::Return("LTestPack/TestClass;")); + ::testing::NiceMock field(allocator, klass); + ON_CALL(field, GetAccessFlag()).WillByDefault(::testing::Return(jbc::kAccFieldPrivate)); + ON_CALL(field, IsStatic()).WillByDefault(::testing::Return(true)); + ON_CALL(field, GetName()).WillByDefault(::testing::Return("field_static")); + ON_CALL(field, GetDescription()).WillByDefault(::testing::Return("Ljava/lang/Object;")); + ::testing::NiceMock klassHelper(allocator, klass); + ON_CALL(klassHelper, IsStaticFieldProguard()).WillByDefault(::testing::Return(true)); + FEOptions::GetInstance().SetModeJavaStaticFieldName(FEOptions::kSmart); + JBCClassField2FEHelper fieldHelper(allocator, field); + ASSERT_EQ(fieldHelper.ProcessDeclWithContainer(allocator), true); + // check field name + std::string fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldHelper.mirFieldPair.first); + EXPECT_EQ(fieldName, "LTestPack_2FTestClass_3B_7Cfield__static_7CLjava_2Flang_2FObject_3B"); + // check field type + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldHelper.mirFieldPair.second.first); + EXPECT_EQ(type->GetKind(), kTypePointer); + EXPECT_EQ(type->GetCompactMplTypeName(), "Ljava_2Flang_2FObject_3B"); + // check field attr + EXPECT_EQ(fieldHelper.mirFieldPair.second.second.GetAttr(FLDATTR_private), true); + FEOptions::GetInstance().SetModeJavaStaticFieldName(FEOptions::kNoType); +} +} // namespace maple diff --git a/src/hir2mpl/test/bytecode_input/class/jbc_class_const_test.cpp b/src/hir2mpl/test/bytecode_input/class/jbc_class_const_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a71cda587c8d460c8d7f9b6f682f9b23ca83ec99 --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/jbc_class_const_test.cpp @@ -0,0 +1,25 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "jbc_class_const.h" + +namespace maple { +namespace jbc { +TEST(JBCClassConst, GetTagName) { + EXPECT_EQ(JBCConstTagName::GetTagName(kConstUTF8), "ConstUTF8"); +} +} // namespace jbc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/bytecode_input/class/jbc_class_test.cpp b/src/hir2mpl/test/bytecode_input/class/jbc_class_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..10c3c9fd01f6a8951083f17a0699582106c6febb --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/jbc_class_test.cpp @@ -0,0 +1,120 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "base64.h" +#include "jbc_class.h" +#include "fe_utils.h" + +namespace maple { +namespace jbc { +class JBCClassTest : public testing::Test { + public: + JBCClassTest() + : allocator(mp), + fileJBC0001(nullptr), + ioJBC0001(nullptr) { + LoadFileJBC0001(); + } + + ~JBCClassTest() = default; + + void LoadFileJBC0001() { + std::string base64Str = + "yv66vgAAADQAKQoADQAiAxI0VngJAAwAIwRBgAAACQAMACQFAAAAEjRWeJAJ" + "AAwAJQbAMAAAAAAAAAkADAAmBwAnBwAoAQACdjEBAAFJAQACdjIBAAFKAQAC" + "djMBAAFGAQACdjQBAAFEAQAGPGluaXQ+AQADKClWAQAEQ29kZQEAD0xpbmVO" + "dW1iZXJUYWJsZQEAEkxvY2FsVmFyaWFibGVUYWJsZQEABHRoaXMBAAZMVGVz" + "dDsBAAVmdW5jMQEABWZ1bmMyAQAIPGNsaW5pdD4BAApTb3VyY2VGaWxlAQAJ" + "VGVzdC5qYXZhDAAWABcMAA4ADwwAEgATDAAQABEMABQAFQEABFRlc3QBABBq" + "YXZhL2xhbmcvT2JqZWN0ACEADAANAAAABAABAA4ADwAAAAkAEAARAAAAAQAS" + "ABMAAAAJABQAFQAAAAQAAQAWABcAAQAYAAAAQwACAAEAAAARKrcAASoSArUA" + "AyoSBLUABbEAAAACABkAAAAOAAMAAAABAAQAAgAKAAQAGgAAAAwAAQAAABEA" + "GwAcAAAAAQAdABcAAQAYAAAAKwAAAAEAAAABsQAAAAIAGQAAAAYAAQAAAAgA" + "GgAAAAwAAQAAAAEAGwAcAAAACQAeABcAAQAYAAAAGQAAAAAAAAABsQAAAAEA" + "GQAAAAYAAQAAAAsACAAfABcAAQAYAAAAKQACAAAAAAANFAAGswAIFAAJswAL" + "sQAAAAEAGQAAAAoAAgAAAAMABgAFAAEAIAAAAAIAIQ=="; + size_t length = 0; + uint8 *ptr = Base64::Decode(base64Str, length); + fileJBC0001 = BasicIOMapFile::GenFileInMemory("JBC0001", ptr, length); + ioJBC0001 = mp->New(*fileJBC0001, true); + } + + inline MapleAllocator &GetAllocator() { + return allocator; + } + + protected: + static MemPool *mp; + MapleAllocator allocator; + std::unique_ptr fileJBC0001; + BasicIORead *ioJBC0001; + + static void SetUpTestCase() { + mp = FEUtils::NewMempool("MemPool for JBCClassTest", false /* isLocalPool */); + } + + static void TearDownTestCase() { + delete mp; + mp = nullptr; + } +}; +MemPool *JBCClassTest::mp = nullptr; + +TEST_F(JBCClassTest, ParseFileJBC0001) { + JBCClass klass(allocator); + bool success = klass.ParseFile(*ioJBC0001); + EXPECT_EQ(success, true); + // ConstPoolCount test + uint16 constSize = klass.GetConstPoolCount(); + EXPECT_EQ(constSize, 41); + // GetConstByIdx test + const JBCConst *const0 = klass.GetConstPool().GetConstByIdx(0); + EXPECT_EQ(const0, nullptr); + const JBCConst *const9 = klass.GetConstPool().GetConstByIdx(9); + EXPECT_NE(const9, nullptr); + const JBCConst *const42 = klass.GetConstPool().GetConstByIdx(42); + EXPECT_EQ(const42, nullptr); + // GetConstByIdxWithTag test + const JBCConst *const3 = klass.GetConstPool().GetConstByIdxWithTag(3, kConstFieldRef); + EXPECT_NE(const3, nullptr); + const JBCConst *const11 = klass.GetConstPool().GetConstByIdxWithTag(11, kConstUTF8); + EXPECT_EQ(const11, nullptr); + // GetConstValueByIdx test + const JBCConst *const4 = klass.GetConstPool().GetConstValueByIdx(4); + EXPECT_NE(const4, nullptr); + const JBCConst *const6 = klass.GetConstPool().GetConstValueByIdx(6); + EXPECT_NE(const6, nullptr); + const JBCConst *const8 = klass.GetConstPool().GetConstValueByIdx(8); + EXPECT_EQ(const8, nullptr); + // GetConstValue4ByteByIdx test + const4 = klass.GetConstPool().GetConstValue4ByteByIdx(4); + EXPECT_NE(const4, nullptr); + const6 = klass.GetConstPool().GetConstValue4ByteByIdx(6); + EXPECT_EQ(const6, nullptr); + // GetConstValue8ByteByIdx test + const4 = klass.GetConstPool().GetConstValue8ByteByIdx(4); + EXPECT_EQ(const4, nullptr); + const6 = klass.GetConstPool().GetConstValue8ByteByIdx(6); + EXPECT_NE(const6, nullptr); + // Field Count + EXPECT_EQ(klass.GetFieldCount(), 4); + // Method Count + EXPECT_EQ(klass.GetMethodCount(), 4); + // Attr Count + EXPECT_EQ(klass.GetAttrCount(), 1); +} +} // namespace jbc +} // namespace maple diff --git a/src/hir2mpl/test/bytecode_input/class/jbc_function_test.cpp b/src/hir2mpl/test/bytecode_input/class/jbc_function_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d3f5ab958ee83c347276563e05a30a8b1051aa1e --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/jbc_function_test.cpp @@ -0,0 +1,166 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "jbc_function.h" +#include "redirect_buffer.h" +#include "hir2mpl_ut_environment.h" + +namespace maple { +class JBCFunctionTest : public testing::Test, public RedirectBuffer { + public: + static MemPool *mp; + MapleAllocator allocator; + jbc::JBCClass jbcClass; + jbc::JBCClassMethod jbcMethod; + JBCClassMethod2FEHelper jbcMethodHelper; + MIRFunction mirFunction; + JBCFunction jbcFunction; + JBCFunctionTest() + : allocator(mp), + jbcClass(allocator), + jbcMethod(allocator, jbcClass), + jbcMethodHelper(allocator, jbcMethod), + mirFunction(&HIR2MPLUTEnvironment::GetMIRModule(), StIdx(0, 0)), + jbcFunction(jbcMethodHelper, mirFunction, std::make_unique(true)) {} + ~JBCFunctionTest() = default; + + static void SetUpTestCase() { + mp = FEUtils::NewMempool("MemPool for JBCFunctionTest", false /* isLcalPool */); + } + + static void TearDownTestCase() { + delete mp; + mp = nullptr; + } +}; +MemPool *JBCFunctionTest::mp = nullptr; + +TEST_F(JBCFunctionTest, BuildTryInfo_Case1) { + std::map, std::vector> rawInfo; + /* Case1 + * S(0)--E(10)--C(20) + */ + rawInfo[std::make_pair(0, 10)] = std::vector({ 20 }); + std::map outMapStartEnd; + std::map> outMapStartCatch; + jbcFunction.BuildTryInfo(rawInfo, outMapStartEnd, outMapStartCatch); + EXPECT_EQ(outMapStartEnd[0], 10); + ASSERT_EQ(outMapStartCatch[0].size(), 1); + EXPECT_EQ(outMapStartCatch[0][0], 20); +} + +TEST_F(JBCFunctionTest, BuildTryInfo_Case2) { + std::map, std::vector> rawInfo; + /* Case2 + * S(0)--E(10)--C(10) + */ + rawInfo[std::make_pair(0, 10)] = std::vector({ 10 }); + std::map outMapStartEnd; + std::map> outMapStartCatch; + jbcFunction.BuildTryInfo(rawInfo, outMapStartEnd, outMapStartCatch); + EXPECT_EQ(outMapStartEnd[0], 10); + ASSERT_EQ(outMapStartCatch[0].size(), 1); + EXPECT_EQ(outMapStartCatch[0][0], 10); +} + +TEST_F(JBCFunctionTest, BuildTryInfo_Case3) { + std::map, std::vector> rawInfo; + /* Case3 + * S(0)--E(10)--C(5) + */ + rawInfo[std::make_pair(0, 10)] = std::vector({ 5 }); + std::map outMapStartEnd; + std::map> outMapStartCatch; + jbcFunction.BuildTryInfo(rawInfo, outMapStartEnd, outMapStartCatch); + EXPECT_EQ(outMapStartEnd[0], 5); + EXPECT_EQ(outMapStartEnd[5], 10); + ASSERT_EQ(outMapStartCatch[0].size(), 1); + ASSERT_EQ(outMapStartCatch[5].size(), 1); + EXPECT_EQ(outMapStartCatch[0][0], 5); + EXPECT_EQ(outMapStartCatch[5][0], 5); +} + +TEST_F(JBCFunctionTest, BuildTryInfo_Case4) { + std::map, std::vector> rawInfo; + /* Case4 + * S1/S2(0)--E1(10)--E2(20)--C1(25)--C2(30) + */ + rawInfo[std::make_pair(0, 10)] = std::vector({ 25 }); + rawInfo[std::make_pair(0, 20)] = std::vector({ 30 }); + std::map outMapStartEnd; + std::map> outMapStartCatch; + jbcFunction.BuildTryInfo(rawInfo, outMapStartEnd, outMapStartCatch); + EXPECT_EQ(outMapStartEnd[0], 10); + EXPECT_EQ(outMapStartEnd[10], 20); + ASSERT_EQ(outMapStartCatch[0].size(), 2); + ASSERT_EQ(outMapStartCatch[10].size(), 1); + EXPECT_EQ(outMapStartCatch[0][0], 25); + EXPECT_EQ(outMapStartCatch[0][1], 30); + EXPECT_EQ(outMapStartCatch[10][0], 30); +} + +TEST_F(JBCFunctionTest, BuildTryInfo_Case5) { + std::map, std::vector> rawInfo; + /* Case5 + * S(0)--E(10)--C_1(20)--C_2(30) + */ + rawInfo[std::make_pair(0, 10)] = std::vector({ 20, 30 }); + std::map outMapStartEnd; + std::map> outMapStartCatch; + jbcFunction.BuildTryInfo(rawInfo, outMapStartEnd, outMapStartCatch); + EXPECT_EQ(outMapStartEnd[0], 10); + ASSERT_EQ(outMapStartCatch[0].size(), 2); + EXPECT_EQ(outMapStartCatch[0][0], 20); + EXPECT_EQ(outMapStartCatch[0][1], 30); +} + +TEST_F(JBCFunctionTest, BuildTryInfo_Case6) { + std::map, std::vector> rawInfo; + /* Case6 + * S(0)--C_1(5)--E(10)--C_2(20) + */ + rawInfo[std::make_pair(0, 10)] = std::vector({ 5, 20 }); + std::map outMapStartEnd; + std::map> outMapStartCatch; + jbcFunction.BuildTryInfo(rawInfo, outMapStartEnd, outMapStartCatch); + EXPECT_EQ(outMapStartEnd[0], 5); + EXPECT_EQ(outMapStartEnd[5], 10); + ASSERT_EQ(outMapStartCatch[0].size(), 2); + EXPECT_EQ(outMapStartCatch[0][0], 5); + EXPECT_EQ(outMapStartCatch[0][1], 20); + ASSERT_EQ(outMapStartCatch[5].size(), 2); + EXPECT_EQ(outMapStartCatch[5][0], 5); + EXPECT_EQ(outMapStartCatch[5][1], 20); +} + +TEST_F(JBCFunctionTest, BuildTryInfo_Case7) { + std::map, std::vector> rawInfo; + /* Case7 + * S1(0)-E1(10)-C1(20)-S2(30)-E2(40)-C2(50) + */ + rawInfo[std::make_pair(0, 10)] = std::vector({ 20 }); + rawInfo[std::make_pair(30, 40)] = std::vector({ 50 }); + std::map outMapStartEnd; + std::map> outMapStartCatch; + jbcFunction.BuildTryInfo(rawInfo, outMapStartEnd, outMapStartCatch); + EXPECT_EQ(outMapStartEnd[0], 10); + EXPECT_EQ(outMapStartEnd[30], 40); + ASSERT_EQ(outMapStartCatch[0].size(), 1); + EXPECT_EQ(outMapStartCatch[0][0], 20); + ASSERT_EQ(outMapStartCatch[30].size(), 1); + EXPECT_EQ(outMapStartCatch[30][0], 50); +} +} // namespace maple diff --git a/src/hir2mpl/test/bytecode_input/class/jbc_opcode_helper_test.cpp b/src/hir2mpl/test/bytecode_input/class/jbc_opcode_helper_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d0886f349bb85bda7e34ffea0139abf089098460 --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/jbc_opcode_helper_test.cpp @@ -0,0 +1,180 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "fe_configs.h" +#include "base64.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "jbc_class.h" +#include "jbc_opcode.h" +#include "jbc_opcode_helper.h" +#include "redirect_buffer.h" +#include "fe_utils.h" + +namespace maple { +class JBCOpcodeHelperTest : public testing::Test, public RedirectBuffer { + public: + JBCOpcodeHelperTest() + : allocator(mp), + klass(allocator), + method(allocator, klass), + helper(method) {} + ~JBCOpcodeHelperTest() = default; + + static void SetUpTestCase() { + mp = FEUtils::NewMempool("MemPool for JBCOpcodeHelperTest", false /* isLcalPool */); + } + + static void TearDownTestCase() { + delete mp; + mp = nullptr; + } + + LLT_PRIVATE: + static MemPool *mp; + MapleAllocator allocator; + jbc::JBCClass klass; + jbc::JBCClassMethod method; + JBCOpcodeHelper helper; +}; // class JBCOpcodeHelperTest +MemPool *JBCOpcodeHelperTest::mp = nullptr; + +TEST_F(JBCOpcodeHelperTest, GetBaseTypeNamesForOPDefault) { + jbc::JBCOpGoto *op = mp->New(allocator, jbc::kOpGoto, jbc::kOpKindGoto, false); + bool success = false; + std::vector ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, true); + EXPECT_EQ(ans.size(), 0); +} + +TEST_F(JBCOpcodeHelperTest, GetBaseTypeNamesForOPConst_Class) { + uint16 idxConstClass; + (void)klass.constPool.NewConstClass(idxConstClass, "Ljava/lang/Object;"); + jbc::JBCOpConst *op = mp->New(allocator, jbc::kOpLdc, jbc::kOpKindConst, false); + op->SetIndex(idxConstClass); + bool success = false; + std::vector ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, true); + ASSERT_EQ(ans.size(), 1); + EXPECT_EQ(ans[0], "Ljava/lang/Object;"); + op->SetIndex(0xFFFF); + ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, false); + EXPECT_EQ(ans.size(), 0); +} + +TEST_F(JBCOpcodeHelperTest, GetBaseTypeNamesForOPFieldOpr) { + uint16 idxConstRef; + (void)klass.constPool.NewConstRef(idxConstRef, jbc::kConstFieldRef, "LTest;", "field", "Ljava/lang/String;"); + jbc::JBCOpFieldOpr *op = mp->New(allocator, jbc::kOpGetField, jbc::kOpKindFieldOpr, false); + op->SetFieldIdx(idxConstRef); + bool success = false; + std::vector ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, true); + ASSERT_EQ(ans.size(), 1); + EXPECT_EQ(ans[0], "Ljava/lang/String;"); + op->SetFieldIdx(0xFFFF); + ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, false); + EXPECT_EQ(ans.size(), 0); +} + +TEST_F(JBCOpcodeHelperTest, GetBaseTypeNamesForOPInvoke) { + uint16 idxConstRef; + (void)klass.constPool.NewConstRef(idxConstRef, jbc::kConstMethodRef, "LTest;", "method", + "(Ljava/lang/String;IJ)Ljava/lang/Object;"); + jbc::JBCOpInvoke *op = mp->New(allocator, jbc::kOpInvokeVirtual, jbc::kOpKindInvoke, false); + op->SetMethodIdx(idxConstRef); + bool success = false; + std::vector ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, true); + ASSERT_EQ(ans.size(), 4); + EXPECT_EQ(ans[0], "Ljava/lang/Object;"); + EXPECT_EQ(ans[1], "Ljava/lang/String;"); + EXPECT_EQ(ans[2], "I"); + EXPECT_EQ(ans[3], "J"); + op->SetMethodIdx(0xFFFF); + ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, false); + EXPECT_EQ(ans.size(), 0); +} + +TEST_F(JBCOpcodeHelperTest, GetBaseTypeNamesForOPNew) { + uint16 idxConstClass; + (void)klass.constPool.NewConstClass(idxConstClass, "Ljava/lang/Integer;"); + jbc::JBCOpNew *op = mp->New(allocator, jbc::kOpNew, jbc::kOpKindNew, false); + op->SetRefTypeIdx(idxConstClass); + bool success = false; + std::vector ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, true); + ASSERT_EQ(ans.size(), 1); + EXPECT_EQ(ans[0], "Ljava/lang/Integer;"); + op->SetRefTypeIdx(0xFFFF); + ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, false); + EXPECT_EQ(ans.size(), 0); +} + +TEST_F(JBCOpcodeHelperTest, GetBaseTypeNamesForOPNew_Array) { + uint16 idxConstClass; + (void)klass.constPool.NewConstClass(idxConstClass, "[Ljava/lang/Float;"); + jbc::JBCOpNew *op = mp->New(allocator, jbc::kOpANewArray, jbc::kOpKindNew, false); + op->SetRefTypeIdx(idxConstClass); + bool success = false; + std::vector ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, true); + ASSERT_EQ(ans.size(), 1); + EXPECT_EQ(ans[0], "Ljava/lang/Float;"); + op->SetRefTypeIdx(0xFFFF); + ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, false); + EXPECT_EQ(ans.size(), 0); +} + +TEST_F(JBCOpcodeHelperTest, GetBaseTypeNamesForOPMultiANewArray) { + uint16 idxConstClass; + (void)klass.constPool.NewConstClass(idxConstClass, "[Ljava/lang/Double;"); + jbc::JBCOpMultiANewArray *op = mp->New(allocator, jbc::kOpMultiANewArray, + jbc::kOpKindMultiANewArray, false); + op->SetRefTypeIdx(idxConstClass); + bool success = false; + std::vector ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, true); + ASSERT_EQ(ans.size(), 1); + EXPECT_EQ(ans[0], "Ljava/lang/Double;"); + op->SetRefTypeIdx(0xFFFF); + ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, false); + EXPECT_EQ(ans.size(), 0); +} + +TEST_F(JBCOpcodeHelperTest, GetBaseTypeNamesForOPTypeCheck) { + uint16 idxConstClass; + (void)klass.constPool.NewConstClass(idxConstClass, "Ljava/lang/Object;"); + jbc::JBCOpTypeCheck *op = mp->New(allocator, jbc::kOpCheckCast, + jbc::kOpKindTypeCheck, false); + op->SetTypeIdx(idxConstClass); + bool success = false; + std::vector ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, true); + ASSERT_EQ(ans.size(), 1); + EXPECT_EQ(ans[0], "Ljava/lang/Object;"); + op->SetTypeIdx(0xFFFF); + ans = helper.GetBaseTypeNamesForOP(*op, success); + EXPECT_EQ(success, false); + EXPECT_EQ(ans.size(), 0); +} +} // namespace maple diff --git a/src/hir2mpl/test/bytecode_input/class/jbc_stack2fe_helper_test.cpp b/src/hir2mpl/test/bytecode_input/class/jbc_stack2fe_helper_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..029eeaba81ab295c223b0e263a860216ace9646c --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/jbc_stack2fe_helper_test.cpp @@ -0,0 +1,615 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "jbc_stack_helper.h" +#include "jbc_stack2fe_helper.h" +#include "feir_test_base.h" +#include "hir2mpl_ut_regx.h" + +namespace maple { +class JBCStack2FEHelperTest : public FEIRTestBase { + public: + JBCStackHelper stackHelper; + JBCStack2FEHelper helper; + JBCStack2FEHelperTest() = default; + ~JBCStack2FEHelperTest() = default; +}; // class JBCStack2FEHelperTest + +TEST_F(JBCStack2FEHelperTest, GetRegNumForSlot) { + helper.SetNStacks(8); + helper.SetNSwaps(2); + helper.SetNLocals(4); + helper.SetNArgs(4); + // 0~7: stack + // 8~9: swap + // 10~13: local + // 14~17: arg + EXPECT_EQ(helper.GetRegNumForSlot(0), 14); + EXPECT_EQ(helper.GetRegNumForSlot(2), 16); + EXPECT_EQ(helper.GetRegNumForSlot(4), 10); + EXPECT_EQ(helper.GetRegNumForSlot(6), 12); +} + +TEST_F(JBCStack2FEHelperTest, PushItem_PopItem) { + helper.SetNStacks(3); + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i64), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "IJJ"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NWD"); + EXPECT_EQ(helper.PushItem(std::make_unique(3), PTY_f32), false); + UniqueFEIRVar var = helper.PopItem(PTY_i32); + EXPECT_EQ(var, nullptr); + var = helper.PopItem(PTY_i64); + EXPECT_EQ(var->GetNameRaw(), "Reg1"); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "I"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "N"); + var = helper.PopItem(PTY_i64); + EXPECT_EQ(var, nullptr); + var = helper.PopItem(PTY_i32); + EXPECT_EQ(var->GetNameRaw(), "Reg0"); + EXPECT_EQ(helper.DumpStackInJavaFormat(), ""); + EXPECT_EQ(helper.DumpStackInInternalFormat(), ""); + var = helper.PopItem(PTY_i32); + EXPECT_EQ(var, nullptr); + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_i32), true); + var = helper.PopItem(PTY_f32); + EXPECT_EQ(var, nullptr); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i64), true); + var = helper.PopItem(PTY_f64); + EXPECT_EQ(var, nullptr); +} + +TEST_F(JBCStack2FEHelperTest, GetRegNumForStack) { + helper.SetNStacks(6); + helper.ClearStack(); + EXPECT_EQ(helper.GetRegNumForStack(), 0); + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_i64), true); + EXPECT_EQ(helper.GetRegNumForStack(), 2); + helper.ClearStack(); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i64), true); + EXPECT_EQ(helper.GetRegNumForStack(), 0); +} + +TEST_F(JBCStack2FEHelperTest, Swap) { + helper.SetNStacks(6); + helper.ClearStack(); + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_f32), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "IF"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NN"); + EXPECT_EQ(helper.Swap(), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "FI"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NN"); + UniqueFEIRVar var = helper.PopItem(PTY_i32); + EXPECT_EQ(var->GetNameRaw(), "Reg0"); + var = helper.PopItem(PTY_f32); + EXPECT_EQ(var->GetNameRaw(), "Reg1"); + EXPECT_EQ(helper.DumpStackInJavaFormat(), ""); + EXPECT_EQ(helper.DumpStackInInternalFormat(), ""); + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_i64), true); + EXPECT_EQ(helper.PushItem(std::make_unique(2), PTY_f64), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "JJDD"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "WDWD"); + EXPECT_EQ(helper.Swap(), false); + helper.ClearStack(); + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_i32), true); + EXPECT_EQ(helper.Swap(), false); +} + +TEST_F(JBCStack2FEHelperTest, Pop) { + helper.SetNStacks(6); + helper.ClearStack(); + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_f32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(2), PTY_ref), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "IFR"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNN"); + EXPECT_EQ(helper.Pop(jbc::kOpPop), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "IF"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NN"); + EXPECT_EQ(helper.Pop(jbc::kOpPop2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), ""); + EXPECT_EQ(helper.DumpStackInInternalFormat(), ""); + EXPECT_EQ(helper.Pop(jbc::kOpPop), false); + EXPECT_EQ(helper.Pop(jbc::kOpPop2), false); + EXPECT_EQ(helper.Pop(jbc::kOpDup), false); +} + +TEST_F(JBCStack2FEHelperTest, Pop2) { + helper.SetNStacks(6); + helper.ClearStack(); + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_i64), true); + EXPECT_EQ(helper.PushItem(std::make_unique(2), PTY_f64), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "JJDD"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "WDWD"); + EXPECT_EQ(helper.Pop(jbc::kOpPop), false); + EXPECT_EQ(helper.Pop(jbc::kOpPop2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "JJ"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "WD"); + EXPECT_EQ(helper.Pop(jbc::kOpPop2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), ""); + EXPECT_EQ(helper.DumpStackInInternalFormat(), ""); + EXPECT_EQ(helper.Pop(jbc::kOpPop2), false); + EXPECT_EQ(helper.Pop(jbc::kOpDup), false); +} + +TEST_F(JBCStack2FEHelperTest, Dup_Correct) { + helper.SetNStacks(6); + helper.ClearStack(); + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_i32), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "I"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "N"); + EXPECT_EQ(helper.Dup(jbc::kOpDup), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "II"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NN"); + PrimType pty; + UniqueFEIRVar var1 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var1->GetNameRaw(), "Reg0"); + UniqueFEIRVar var2 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var2->GetNameRaw(), "Reg0"); +} + +TEST_F(JBCStack2FEHelperTest, Dup_Error_Empty) { + helper.SetNStacks(6); + helper.ClearStack(); + EXPECT_EQ(helper.Dup(jbc::kOpDup), false); +} + +TEST_F(JBCStack2FEHelperTest, Dup_Error_WrongType) { + helper.SetNStacks(6); + helper.ClearStack(); + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_i64), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "JJ"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "WD"); + EXPECT_EQ(helper.Dup(jbc::kOpDup), false); +} + +TEST_F(JBCStack2FEHelperTest, DupX1_Correct) { + helper.SetNStacks(6); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i32, Reg2_f32, -> + // after: Reg0_ref, Reg2_f32, Reg1_i32, Reg2_f32, -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(2), PTY_f32), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RIF"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNN"); + EXPECT_EQ(helper.Dup(jbc::kOpDupX1), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RFIF"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNNN"); + PrimType pty; + UniqueFEIRVar var = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var->GetNameRaw(), "Reg2"); + UniqueFEIRVar var2 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var2->GetNameRaw(), "Reg1"); + UniqueFEIRVar var3 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var3->GetNameRaw(), "Reg2"); +} + +TEST_F(JBCStack2FEHelperTest, DupX1_Error_NotEnough) { + helper.SetNStacks(6); + helper.ClearStack(); + // before: -> + // after: Error + EXPECT_EQ(helper.Dup(jbc::kOpDupX1), false); + // before: -> + // after: Error + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.Dup(jbc::kOpDupX1), false); +} + +TEST_F(JBCStack2FEHelperTest, DupX1_Error_WrongType) { + helper.SetNStacks(6); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i64, -> + // after: Error + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i64), true); + EXPECT_EQ(helper.Dup(jbc::kOpDupX1), false); +} + +TEST_F(JBCStack2FEHelperTest, DupX2_Correct_S1) { + helper.SetNStacks(6); + helper.ClearStack(); + // before: Reg0_ref, Reg1_ref, Reg2_i32, Reg3_f32, -> + // after: Reg0_ref, Reg3_f32, Reg1_ref, Reg2_i32, Reg3_f32, -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(2), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(3), PTY_f32), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RRIF"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNNN"); + EXPECT_EQ(helper.Dup(jbc::kOpDupX2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RFRIF"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNNNN"); + PrimType pty; + UniqueFEIRVar var = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var->GetNameRaw(), "Reg3"); + UniqueFEIRVar var2 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var2->GetNameRaw(), "Reg2"); + UniqueFEIRVar var3 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_ref); + EXPECT_EQ(var3->GetNameRaw(), "Reg1"); + UniqueFEIRVar var4 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var4->GetNameRaw(), "Reg3"); +} + +TEST_F(JBCStack2FEHelperTest, DupX2_Correct_S2) { + helper.SetNStacks(6); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i64, Reg3_i32, -> + // after: Reg0_ref, Reg3_i32, Reg1_i64, Reg3_i32, -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i64), true); + EXPECT_EQ(helper.PushItem(std::make_unique(3), PTY_i32), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RJJI"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NWDN"); + EXPECT_EQ(helper.Dup(jbc::kOpDupX2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RIJJI"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNWDN"); + PrimType pty; + UniqueFEIRVar var1 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var1->GetNameRaw(), "Reg3"); + UniqueFEIRVar var2 = helper.PopItem(true, pty); + EXPECT_EQ(pty, PTY_i64); + EXPECT_EQ(var2->GetNameRaw(), "Reg1"); + UniqueFEIRVar var3 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var3->GetNameRaw(), "Reg3"); +} + +TEST_F(JBCStack2FEHelperTest, Dup2_Correct_S1) { + helper.SetNStacks(6); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i32, Reg2_f32, -> + // after: Reg0_ref, Reg1_i32, Reg2_f32, Reg1_i32, Reg2_f32, -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(2), PTY_f32), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RIF"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNN"); + EXPECT_EQ(helper.Dup(jbc::kOpDup2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RIFIF"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNNNN"); + PrimType pty; + UniqueFEIRVar var1 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var1->GetNameRaw(), "Reg2"); + UniqueFEIRVar var2 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var2->GetNameRaw(), "Reg1"); + UniqueFEIRVar var3 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var3->GetNameRaw(), "Reg2"); + UniqueFEIRVar var4 = helper.PopItem(false, pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var4->GetNameRaw(), "Reg1"); +} + +TEST_F(JBCStack2FEHelperTest, Dup2_Correct_S2) { + helper.SetNStacks(6); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i64 -> + // after: Reg0_ref, Reg1_i64, Reg1_i64 -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i64), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RJJ"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NWD"); + EXPECT_EQ(helper.Dup(jbc::kOpDup2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RJJJJ"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NWDWD"); + PrimType pty; + UniqueFEIRVar var1 = helper.PopItem(true, pty); + EXPECT_EQ(pty, PTY_i64); + EXPECT_EQ(var1->GetNameRaw(), "Reg1"); + UniqueFEIRVar var2 = helper.PopItem(true, pty); + EXPECT_EQ(pty, PTY_i64); + EXPECT_EQ(var2->GetNameRaw(), "Reg1"); +} + +TEST_F(JBCStack2FEHelperTest, Dup2X1_Correct_S1) { + helper.SetNStacks(6); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i32, Reg2_f32, Reg3_ref -> + // after: Reg0_ref, Reg2_f32, Reg3_ref, Reg1_i32, Reg2_f32, Reg3_ref -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(2), PTY_f32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(3), PTY_ref), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RIFR"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNNN"); + EXPECT_EQ(helper.Dup(jbc::kOpDup2X1), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RFRIFR"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNNNNN"); + PrimType pty; + UniqueFEIRVar var1 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_ref); + EXPECT_EQ(var1->GetNameRaw(), "Reg3"); + UniqueFEIRVar var2 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var2->GetNameRaw(), "Reg2"); + UniqueFEIRVar var3 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var3->GetNameRaw(), "Reg1"); + UniqueFEIRVar var4 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_ref); + EXPECT_EQ(var4->GetNameRaw(), "Reg3"); + UniqueFEIRVar var5 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var5->GetNameRaw(), "Reg2"); +} + +TEST_F(JBCStack2FEHelperTest, Dup2X1_Correct_S2) { + helper.SetNStacks(6); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i32, Reg2_i64 -> + // after: Reg0_ref, Reg2_i64, Reg1_i32, Reg2_i64 -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(2), PTY_i64), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RIJJ"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNWD"); + EXPECT_EQ(helper.Dup(jbc::kOpDup2X1), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RJJIJJ"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NWDNWD"); + PrimType pty; + UniqueFEIRVar var1 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i64); + EXPECT_EQ(var1->GetNameRaw(), "Reg2"); + UniqueFEIRVar var2 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var2->GetNameRaw(), "Reg1"); + UniqueFEIRVar var3 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i64); + EXPECT_EQ(var3->GetNameRaw(), "Reg2"); +} + +TEST_F(JBCStack2FEHelperTest, Dup2X2_Correct_S1) { + helper.SetNStacks(8); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i32, Reg2_f32, Reg3_i32, Reg4_ref -> + // after: Reg0_ref, Reg3_i32, Reg4_ref, Reg1_i32, Reg2_f32, Reg3_i32, Reg4_ref -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(2), PTY_f32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(3), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(4), PTY_ref), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RIFIR"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNNNN"); + EXPECT_EQ(helper.Dup(jbc::kOpDup2X2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RIRIFIR"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNNNNNN"); + PrimType pty; + UniqueFEIRVar var1 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_ref); + EXPECT_EQ(var1->GetNameRaw(), "Reg4"); + UniqueFEIRVar var2 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var2->GetNameRaw(), "Reg3"); + UniqueFEIRVar var3 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var3->GetNameRaw(), "Reg2"); + UniqueFEIRVar var4 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var4->GetNameRaw(), "Reg1"); + UniqueFEIRVar var5 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_ref); + EXPECT_EQ(var5->GetNameRaw(), "Reg4"); + UniqueFEIRVar var6 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var6->GetNameRaw(), "Reg3"); +} + +TEST_F(JBCStack2FEHelperTest, Dup2X2_Correct_S2) { + helper.SetNStacks(8); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i32, Reg2_f32, Reg3_i64 -> + // after: Reg0_ref, Reg3_i64, Reg1_i32, Reg2_f32, Reg3_i64 -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(2), PTY_f32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(3), PTY_i64), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RIFJJ"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNNWD"); + EXPECT_EQ(helper.Dup(jbc::kOpDup2X2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RJJIFJJ"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NWDNNWD"); + PrimType pty; + UniqueFEIRVar var1 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i64); + EXPECT_EQ(var1->GetNameRaw(), "Reg3"); + UniqueFEIRVar var2 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var2->GetNameRaw(), "Reg2"); + UniqueFEIRVar var3 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var3->GetNameRaw(), "Reg1"); + UniqueFEIRVar var4 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i64); + EXPECT_EQ(var4->GetNameRaw(), "Reg3"); +} + +TEST_F(JBCStack2FEHelperTest, Dup2X2_Correct_S3) { + helper.SetNStacks(8); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i64, Reg3_i32, Reg4_f32 -> + // after: Reg0_ref, Reg3_i32, Reg4_f32, Reg1_i64, Reg3_i32, Reg4_f32 -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i64), true); + EXPECT_EQ(helper.PushItem(std::make_unique(3), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(4), PTY_f32), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RJJIF"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NWDNN"); + EXPECT_EQ(helper.Dup(jbc::kOpDup2X2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RIFJJIF"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NNNWDNN"); + PrimType pty; + UniqueFEIRVar var1 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var1->GetNameRaw(), "Reg4"); + UniqueFEIRVar var2 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var2->GetNameRaw(), "Reg3"); + UniqueFEIRVar var3 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i64); + EXPECT_EQ(var3->GetNameRaw(), "Reg1"); + UniqueFEIRVar var4 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_f32); + EXPECT_EQ(var4->GetNameRaw(), "Reg4"); + UniqueFEIRVar var5 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var5->GetNameRaw(), "Reg3"); +} + +TEST_F(JBCStack2FEHelperTest, Dup2X2_Correct_S4) { + helper.SetNStacks(8); + helper.ClearStack(); + // before: Reg0_ref, Reg1_i64, Reg3_f64 -> + // after: Reg0_ref, Reg3_f64, Reg1_i64, Reg3_f64 -> + EXPECT_EQ(helper.PushItem(std::make_unique(0), PTY_ref), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1), PTY_i64), true); + EXPECT_EQ(helper.PushItem(std::make_unique(3), PTY_f64), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RJJDD"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NWDWD"); + EXPECT_EQ(helper.Dup(jbc::kOpDup2X2), true); + EXPECT_EQ(helper.DumpStackInJavaFormat(), "RDDJJDD"); + EXPECT_EQ(helper.DumpStackInInternalFormat(), "NWDWDWD"); + PrimType pty; + UniqueFEIRVar var1 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_f64); + EXPECT_EQ(var1->GetNameRaw(), "Reg3"); + UniqueFEIRVar var2 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i64); + EXPECT_EQ(var2->GetNameRaw(), "Reg1"); + UniqueFEIRVar var3 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_f64); + EXPECT_EQ(var3->GetNameRaw(), "Reg3"); +} + +TEST_F(JBCStack2FEHelperTest, GenerateSwapStmts_Test1) { + helper.SetNStacks(5); + helper.SetNSwaps(5); + helper.ClearStack(); + // stack: Reg0_i32, Ref1_i64, Reg3_f64 + EXPECT_EQ(helper.PushItem(std::make_unique(0, PTY_i32), PTY_i32), true); + EXPECT_EQ(helper.PushItem(std::make_unique(1, PTY_i64), PTY_i64), true); + EXPECT_EQ(helper.PushItem(std::make_unique(3, PTY_f64), PTY_f64), true); + std::list stmts = helper.GenerateSwapStmts(); + ASSERT_EQ(stmts.size(), 3); + std::list::iterator it = stmts.begin(); + std::list mirStmts0 = it->get()->GenMIRStmts(mirBuilder); + it++; + std::list mirStmts1 = it->get()->GenMIRStmts(mirBuilder); + it++; + std::list mirStmts2 = it->get()->GenMIRStmts(mirBuilder); + + RedirectCout(); + // stmt 0: dassign %Reg5_f64 0 (dread f64 %Reg3_f64) + ASSERT_EQ(mirStmts0.size(), 1); + mirStmts0.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg5_D 0 \\(dread f64 %Reg3_D\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + + // stmt 1: dassign %Reg7_i64 0 (dread i64 %Reg1_i64) + ClearBufferString(); + ASSERT_EQ(mirStmts1.size(), 1); + mirStmts1.front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg7_J 0 \\(dread i64 %Reg1_J\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + + // stmt 2: dassign %Reg9_i32 0 (dread i32 %Reg0_i32) + ClearBufferString(); + ASSERT_EQ(mirStmts2.size(), 1); + mirStmts2.front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg9_I 0 \\(dread i32 %Reg0_I\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(JBCStack2FEHelperTest, LoadSwapStack_Test1) { + stackHelper.Reset(); + helper.SetNStacks(5); + helper.SetNSwaps(5); + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + stackHelper.PushItem(jbc::JBCPrimType::kTypeDouble); + bool success; + std::list stmts = helper.LoadSwapStack(stackHelper, success); + EXPECT_EQ(success, true); + ASSERT_EQ(stmts.size(), 3); + std::list::iterator it = stmts.begin(); + std::list mirStmts0 = it->get()->GenMIRStmts(mirBuilder); + it++; + std::list mirStmts1 = it->get()->GenMIRStmts(mirBuilder); + it++; + std::list mirStmts2 = it->get()->GenMIRStmts(mirBuilder); + + // swap detail + // swap stack: Reg9_i32, Reg7_i64, Reg5_f64 + // jbc stack: Reg0_i32, Reg1_i64, Reg3_f64 + + // stack check + PrimType pty; + UniqueFEIRVar var1 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_f64); + EXPECT_EQ(var1->GetNameRaw(), "Reg3"); + UniqueFEIRVar var2 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i64); + EXPECT_EQ(var2->GetNameRaw(), "Reg1"); + UniqueFEIRVar var3 = helper.PopItemAuto(pty); + EXPECT_EQ(pty, PTY_i32); + EXPECT_EQ(var3->GetNameRaw(), "Reg0"); + + // stmts check + RedirectCout(); + // stmt 0: dassign %Reg0_i32 0 (dread i32 %Reg9_i32) + ASSERT_EQ(mirStmts0.size(), 1); + mirStmts0.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_I 0 \\(dread i32 %Reg9_I\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + + // stmt 1: dassign %Reg1_i64 0 (dread i64 %Reg7_i64) + ClearBufferString(); + ASSERT_EQ(mirStmts1.size(), 1); + mirStmts1.front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg1_J 0 \\(dread i64 %Reg7_J\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + + // stmt 2: dassign %Reg3_f64 0 (dread f64 %Reg5_f64) + ClearBufferString(); + ASSERT_EQ(mirStmts2.size(), 1); + mirStmts2.front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg3_D 0 \\(dread f64 %Reg5_D\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/bytecode_input/class/jbc_stack_helper_test.cpp b/src/hir2mpl/test/bytecode_input/class/jbc_stack_helper_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..68296f9c655169b2cb16404e0dc08d7e57e9e04d --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/jbc_stack_helper_test.cpp @@ -0,0 +1,234 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "jbc_stack_helper.h" + +namespace maple { +TEST(JBCStackHelper, PushItem_PopItem) { + JBCStackHelper stackHelper; + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + EXPECT_EQ(stackHelper.PopItem(jbc::JBCPrimType::kTypeInt), true); + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + EXPECT_EQ(stackHelper.PopItem(jbc::JBCPrimType::kTypeFloat), false); + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + EXPECT_EQ(stackHelper.PopItem(jbc::JBCPrimType::kTypeInt), false); + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + EXPECT_EQ(stackHelper.PopItem(jbc::JBCPrimType::kTypeLong), true); + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeDouble); + EXPECT_EQ(stackHelper.PopItem(jbc::JBCPrimType::kTypeDouble), true); + stackHelper.Reset(); + EXPECT_EQ(stackHelper.PopItem(jbc::JBCPrimType::kTypeDouble), false); +} + +TEST(JBCStackHelper, Dup) { + JBCStackHelper stackHelper; + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup), true); + std::vector types({ jbc::JBCPrimType::kTypeInt, jbc::JBCPrimType::kTypeInt }); + EXPECT_EQ(stackHelper.PopItems(types), true); + + stackHelper.Reset(); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup), false); + + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup), false); +} + +TEST(JBCStackHelper, DupX1) { + JBCStackHelper stackHelper; + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeFloat); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDupX1), true); + std::vector types({ jbc::JBCPrimType::kTypeFloat, jbc::JBCPrimType::kTypeInt, + jbc::JBCPrimType::kTypeFloat }); + EXPECT_EQ(stackHelper.PopItems(types), true); + + stackHelper.Reset(); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDupX1), false); + + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDupX1), false); + + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDupX1), false); + + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDupX1), false); + + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDupX1), false); + + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDupX1), false); +} + +TEST(JBCStackHelper, DupX2) { + JBCStackHelper stackHelper; + // mode 1: ..., typeI, typeI, typeI -> + // input: ..., I, S, B -> + // output: ..., B, I, S, B -> + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeShort); + stackHelper.PushItem(jbc::JBCPrimType::kTypeByteOrBoolean); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDupX2), true); + std::vector types1({ jbc::JBCPrimType::kTypeByteOrBoolean, jbc::JBCPrimType::kTypeInt, + jbc::JBCPrimType::kTypeShort, jbc::JBCPrimType::kTypeByteOrBoolean }); + EXPECT_EQ(stackHelper.PopItems(types1), true); + + // mode2: ..., typeII, typeI -> + // input: ..., J, I -> + // output: ..., I, J, I -> + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDupX2), true); + std::vector types2({ jbc::JBCPrimType::kTypeInt, jbc::JBCPrimType::kTypeLong, + jbc::JBCPrimType::kTypeInt }); + EXPECT_EQ(stackHelper.PopItems(types2), true); + + // TODO(UT): add false cases +} + +TEST(JBCStackHelper, Dup2) { + JBCStackHelper stackHelper; + // mode 1: ..., typeI, typeI -> + // input: ..., I, S -> + // output: ..., I, S, I, S -> + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeShort); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup2), true); + std::vector types1({ jbc::JBCPrimType::kTypeInt, jbc::JBCPrimType::kTypeShort, + jbc::JBCPrimType::kTypeInt, jbc::JBCPrimType::kTypeShort }); + EXPECT_EQ(stackHelper.PopItems(types1), true); + + // mode 2: ..., typeII -> + // input: ..., J -> + // output: ..., J, J -> + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup2), true); + std::vector types2({ jbc::JBCPrimType::kTypeLong, jbc::JBCPrimType::kTypeLong }); + EXPECT_EQ(stackHelper.PopItems(types2), true); + + // TODO(UT): add false cases +} + +TEST(JBCStackHelper, Dup2X1) { + JBCStackHelper stackHelper; + // mode 1: ..., typeI, typeI, typeI -> + // input: ..., I, S, B -> + // output: ..., S, B, I, S, B -> + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeShort); + stackHelper.PushItem(jbc::JBCPrimType::kTypeByteOrBoolean); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup2X1), true); + std::vector types1({ jbc::JBCPrimType::kTypeShort, jbc::JBCPrimType::kTypeByteOrBoolean, + jbc::JBCPrimType::kTypeInt, jbc::JBCPrimType::kTypeShort, + jbc::JBCPrimType::kTypeByteOrBoolean }); + EXPECT_EQ(stackHelper.PopItems(types1), true); + + // mode 2: ..., typeI, typeII -> + // input: ..., I, J -> + // output: ..., J, I, J -> + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup2X1), true); + std::vector types2({ jbc::JBCPrimType::kTypeLong, jbc::JBCPrimType::kTypeInt, + jbc::JBCPrimType::kTypeLong }); + EXPECT_EQ(stackHelper.PopItems(types2), true); + + // TODO(UT): add false cases +} + +TEST(JBCStackHelper, Dup2X2) { + JBCStackHelper stackHelper; + // mode 1: ..., typeI, typeI, typeI, typeI -> + // input: ..., I, S, B, C -> + // output: ..., B, C, I, S, B, C -> + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeShort); + stackHelper.PushItem(jbc::JBCPrimType::kTypeByteOrBoolean); + stackHelper.PushItem(jbc::JBCPrimType::kTypeChar); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup2X2), true); + std::vector types1({ jbc::JBCPrimType::kTypeByteOrBoolean, jbc::JBCPrimType::kTypeChar, + jbc::JBCPrimType::kTypeInt, jbc::JBCPrimType::kTypeShort, + jbc::JBCPrimType::kTypeByteOrBoolean, jbc::JBCPrimType::kTypeChar }); + EXPECT_EQ(stackHelper.PopItems(types1), true); + + // mode 2: ..., typeI, typeI, typeII -> + // input: ..., I, S, J -> + // output: ..., J, I, S, J -> + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeShort); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup2X2), true); + std::vector types2({ jbc::JBCPrimType::kTypeLong, jbc::JBCPrimType::kTypeInt, + jbc::JBCPrimType::kTypeShort, jbc::JBCPrimType::kTypeLong }); + EXPECT_EQ(stackHelper.PopItems(types2), true); + + // mode 3: ..., typeII, typeI, typeI -> + // input: ..., J, I, S -> + // output: ..., I, S, J, I, S -> + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeShort); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup2X2), true); + std::vector types3({ jbc::JBCPrimType::kTypeInt, jbc::JBCPrimType::kTypeShort, + jbc::JBCPrimType::kTypeLong, jbc::JBCPrimType::kTypeInt, + jbc::JBCPrimType::kTypeShort }); + EXPECT_EQ(stackHelper.PopItems(types3), true); + + // mode 4: ..., typeII, typeII -> + // input: ..., J, D -> + // output: ..., D, J, D -> + stackHelper.Reset(); + stackHelper.PushItem(jbc::JBCPrimType::kTypeLong); + stackHelper.PushItem(jbc::JBCPrimType::kTypeDouble); + EXPECT_EQ(stackHelper.Dup(jbc::kOpDup2X2), true); + std::vector types4({ jbc::JBCPrimType::kTypeDouble, jbc::JBCPrimType::kTypeLong, + jbc::JBCPrimType::kTypeDouble }); + EXPECT_EQ(stackHelper.PopItems(types4), true); + + // TODO(UT): add false cases +} + +TEST(JBCStackHelper, Swap) { + JBCStackHelper stackHelper; + stackHelper.PushItem(jbc::JBCPrimType::kTypeInt); + stackHelper.PushItem(jbc::JBCPrimType::kTypeShort); + EXPECT_EQ(stackHelper.Swap(), true); + std::vector types({ jbc::JBCPrimType::kTypeShort, jbc::JBCPrimType::kTypeInt }); + EXPECT_EQ(stackHelper.PopItems(types), true); + + // TODO(UT): add false cases +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/bytecode_input/class/jbc_stmt_bb_test.cpp b/src/hir2mpl/test/bytecode_input/class/jbc_stmt_bb_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3bdbbb58d00d19c21f36ae3dc96816008307bd5c --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/jbc_stmt_bb_test.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "feir_test_base.h" +#include "feir_stmt.h" +#include "feir_var.h" +#include "feir_var_reg.h" +#include "feir_var_name.h" +#include "feir_type_helper.h" +#include "feir_bb.h" +#include "jbc_function.h" +#include "hir2mpl_ut_environment.h" + +#define protected public +#define private public + +namespace maple { +class FEIRStmtBBTest : public FEIRTestBase { + public: + static MemPool *mp; + MapleAllocator allocator; + jbc::JBCClass jbcClass; + jbc::JBCClassMethod jbcMethod; + JBCClassMethod2FEHelper jbcMethodHelper; + MIRFunction mirFunction; + JBCFunction jbcFunction; + FEIRStmtBBTest() + : allocator(mp), + jbcClass(allocator), + jbcMethod(allocator, jbcClass), + jbcMethodHelper(allocator, jbcMethod), + mirFunction(&HIR2MPLUTEnvironment::GetMIRModule(), StIdx(0, 0)), + jbcFunction(jbcMethodHelper, mirFunction, std::make_unique(true)) {} + virtual ~FEIRStmtBBTest() = default; + static void SetUpTestCase() { + mp = FEUtils::NewMempool("MemPool for FEIRStmtBBTest", false /* isLcalPool */); + } + + static void TearDownTestCase() { + delete mp; + mp = nullptr; + } +}; +MemPool *FEIRStmtBBTest::mp = nullptr; +} // namespace maple diff --git a/src/hir2mpl/test/bytecode_input/class/jbc_stmt_loc_test.cpp b/src/hir2mpl/test/bytecode_input/class/jbc_stmt_loc_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cb3638e4ea34be591f74bf85c2d311a5a3565024 --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/jbc_stmt_loc_test.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "jbc_function.h" +#include "fe_manager.h" +#include "redirect_buffer.h" +#include "feir_type_helper.h" +#include "hir2mpl_ut_regx.h" +#include "hir2mpl_ut_environment.h" +#include "feir_stmt.h" +#include "feir_var.h" +#include "feir_var_reg.h" +#include "feir_var_name.h" +#include "feir_test_base.h" +namespace maple { +class FEIRStmtLOCTest : public FEIRTestBase { + public: + static MemPool *mp; + MapleAllocator allocator; + jbc::JBCClass jbcClass; + jbc::JBCClassMethod jbcMethod; + JBCClassMethod2FEHelper jbcMethodHelper; + MIRFunction mirFunction; + JBCFunction jbcFunction; + FEIRStmtLOCTest() + : allocator(mp), + jbcClass(allocator), + jbcMethod(allocator, jbcClass), + jbcMethodHelper(allocator, jbcMethod), + mirFunction(&HIR2MPLUTEnvironment::GetMIRModule(), StIdx(0, 0)), + jbcFunction(jbcMethodHelper, mirFunction, std::make_unique(true)) {} + ~FEIRStmtLOCTest() = default; + + static void SetUpTestCase() { + mp = FEUtils::NewMempool("MemPool for FEIRStmtLOCTest", false /* isLcalPool */); + } + + static void TearDownTestCase() { + delete mp; + mp = nullptr; + } +}; +MemPool *FEIRStmtLOCTest::mp = nullptr; + +// ---------- FEIRStmtDAssign ---------- +TEST_F(FEIRStmtLOCTest, GetLOCForStmt) { + std::unique_ptr type = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/String;", false, true); + std::unique_ptr dstVar = std::make_unique(0, type->Clone()); + std::unique_ptr srcVar = std::make_unique(1, type->Clone()); + std::unique_ptr exprDRead = std::make_unique(std::move(srcVar)); + std::unique_ptr stmtDAssign = + std::make_unique(std::move(dstVar), std::move(exprDRead)); + + uint32 srcFileIdx = 2; + uint32 lineNumber = 10; + std::unique_ptr stmtPesudoLOC = std::make_unique(srcFileIdx, lineNumber); + jbcFunction.InitImpl(); + jbcFunction.feirStmtTail->InsertBefore(static_cast(stmtPesudoLOC.get())); + jbcFunction.feirStmtTail->InsertBefore(static_cast(stmtDAssign.get())); + const FEIRStmtPesudoLOC *expectedLOC = jbcFunction.GetLOCForStmt(*static_cast(stmtDAssign.get())); + std::list mirStmts = stmtDAssign->GenMIRStmts(mirBuilder); + mirStmts.front()->GetSrcPos().SetFileNum(static_cast(expectedLOC->GetSrcFileIdx())); + mirStmts.front()->GetSrcPos().SetLineNum(expectedLOC->GetSrcFileLineNum()); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("LOC 2 10\n") + std::string("dassign %Reg0_") + + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + " 0 \\(dread ref %Reg1_" + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + "\\)" + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} +} // namespace maple diff --git a/src/hir2mpl/test/bytecode_input/class/jbc_util_test.cpp b/src/hir2mpl/test/bytecode_input/class/jbc_util_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bb50e91065b11f7ade15eb43d8738221a0ecd99f --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/class/jbc_util_test.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "jbc_util.h" +#include "fe_utils_java.h" + +namespace maple { +namespace jbc { +TEST(JBCUtil, ClassInternalNameToFullName) { + EXPECT_EQ(JBCUtil::ClassInternalNameToFullName("java/lang/Object"), "Ljava/lang/Object;"); + EXPECT_EQ(JBCUtil::ClassInternalNameToFullName("[Ljava/lang/Object;"), "[Ljava/lang/Object;"); +} + +TEST(JBCUtil, SolveMethodSignature) { + std::vector ans; + ans = FEUtilJava::SolveMethodSignature("func(Ljava/lang/Object;[II)J"); + ASSERT_EQ(ans.size(), 4); + EXPECT_EQ(ans[0], "J"); + EXPECT_EQ(ans[1], "Ljava/lang/Object;"); + EXPECT_EQ(ans[2], "[I"); + EXPECT_EQ(ans[3], "I"); +} +} // namespace jbc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/bytecode_input/dex/bc_load_on_demand_type_test.cpp b/src/hir2mpl/test/bytecode_input/dex/bc_load_on_demand_type_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ae76647ccd0c811b831158d194a80f60fdb95f6a --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/dex/bc_load_on_demand_type_test.cpp @@ -0,0 +1,56 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "hir2mpl_ut_environment.h" +#define private public +#include "bc_compiler_component.h" +#undef private +#include "dexfile_factory.h" +#include "dex_pragma.h" + +namespace maple { +TEST(BCLoadOnDemandType, Test) { + std::string str(getenv("MAPLE_ROOT")); + std::string path = str + "/output/aarch64-clang-release/libjava-core/host-x86_64-O2/libcore-all.dex"; + FEOptions::GetInstance().SetXBootClassPath(path); + bc::BCCompilerComponent bcCompiler(HIR2MPLUTEnvironment::GetMIRModule()); + std::unordered_set allDepSet; + std::list> klassList; + allDepSet.insert("Ljava/lang/String;"); + std::unordered_set allClassSet; + bool success = bcCompiler.LoadOnDemandType2BCClass(allDepSet, allClassSet, klassList); + EXPECT_EQ(success, true); + EXPECT_EQ(klassList.size(), 5); + ASSERT_EQ(klassList.front()->GetClassName(false), "Ljava/lang/Object;"); + for (const std::unique_ptr &klass : klassList) { + // Dependent classes were loaded by mplt, rename class names to avoid MIRStruct types are created failed + klass->SetClassName("Temp/" + klass->GetClassName(false)); + // Annotations are set nullptr instead of parsed failed, + // because above renamed classname is different from klass in local orinNameStrIdxMap. + DexFileFactory dexFileFactory; + std::unique_ptr iDexFile = dexFileFactory.NewInstance(); + std::unique_ptr annotationsDirectory = std::make_unique( + HIR2MPLUTEnvironment::GetMIRModule(), *(HIR2MPLUTEnvironment::GetMIRModule().GetMemPool()), *iDexFile, "", nullptr); + klass->SetAnnotationsDirectory(std::move(annotationsDirectory)); + } + std::list> structHelpers; + success = bcCompiler.LoadOnDemandBCClass2FEClass(klassList, structHelpers, false); + EXPECT_EQ(structHelpers.size(), 5); + ASSERT_EQ(structHelpers.front()->GetStructNameOrin(), "Temp/Ljava/lang/Object;"); + ASSERT_EQ(structHelpers.front()->GetContainer()->IsImported(), true); + ASSERT_EQ(structHelpers.front()->GetMethodHelpers().front()->GetMethodName(false, false), ""); +} +} // namespace maple diff --git a/src/hir2mpl/test/bytecode_input/dex/bc_parser_test.cpp b/src/hir2mpl/test/bytecode_input/dex/bc_parser_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..85ae3ff78000f0a5057115e356af5fe2a4c7f063 --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/dex/bc_parser_test.cpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include "bc_parser.h" +#include "dex_parser.h" +#include "bc_class.h" +#include "types_def.h" + +namespace maple { +namespace bc { +class BCParserTest : public testing::Test { + public: + BCParserTest() = default; + ~BCParserTest() = default; + template + void Init(uint32 index, std::string fileName, const std::list &classNamesIn) { + bcParser = std::make_unique(index, fileName, classNamesIn); + } + + protected: + std::unique_ptr bcParser; +}; + +TEST_F(BCParserTest, TestDexParser) { + std::string fileName = ""; + std::list classNames; + Init(0, fileName, classNames); + EXPECT_EQ(bcParser->OpenFile(), false); +} + +TEST_F(BCParserTest, TestClassNameMplIdx) { + GStrIdx idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName("Ljava/lang/Object;")); + std::string fileName = ""; + std::list classNames; + Init(0, fileName, classNames); + BCClass *bcClass = new BCClass(0, *bcParser.get()); + bcClass->SetClassName("Ljava/lang/Object;"); + bool result = bcClass->GetClassNameMplIdx() == idx; + EXPECT_EQ(result, true); + delete bcClass; +} + +// TODO +// Add UT for ParseHeader(), Verify(), RetrieveClasses(klasses) +} // namespace jbc +} // namespace maple diff --git a/src/hir2mpl/test/bytecode_input/dex/bc_util_test.cpp b/src/hir2mpl/test/bytecode_input/dex/bc_util_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f94d9326df008b1e41af6b6b2bb7e78c0813370c --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/dex/bc_util_test.cpp @@ -0,0 +1,26 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "bc_util.h" + +namespace maple { +TEST(BCUtil, IsWideType) { + ASSERT_EQ(bc::BCUtil::IsWideType(bc::BCUtil::GetIntIdx()), false); + ASSERT_EQ(bc::BCUtil::IsWideType(bc::BCUtil::GetFloatIdx()), false); + ASSERT_EQ(bc::BCUtil::IsWideType(bc::BCUtil::GetLongIdx()), true); + ASSERT_EQ(bc::BCUtil::IsWideType(bc::BCUtil::GetDoubleIdx()), true); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/bytecode_input/dex/dex_class_test.cpp b/src/hir2mpl/test/bytecode_input/dex/dex_class_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..68a9ff8d592e9de5dafdc0dc95eec7d32ef77a71 --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/dex/dex_class_test.cpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "global_tables.h" +#include "dex_class.h" + +namespace maple { +namespace bc { +TEST(DexClass, TestTryCatch) { + std::unique_ptr catchInfo1 = + std::make_unique(0x0023, BCUtil::GetJavaExceptionNameMplIdx(), false); + std::unique_ptr catchInfo2 = std::make_unique(0x0021, BCUtil::GetVoidIdx(), true); + ASSERT_EQ(catchInfo1->GetHandlerAddr(), 0x0023); + std::cout << GlobalTables::GetStrTable().GetStringFromStrIdx(catchInfo1->GetExceptionNameIdx()) <GetExceptionNameIdx()), + "Ljava_2Flang_2FException_3B"); + ASSERT_EQ(catchInfo1->GetIsCatchAll(), false); + + std::unique_ptr>> catches = + std::make_unique>>(); + catches->push_back(std::move(catchInfo1)); + catches->push_back(std::move(catchInfo2)); + std::unique_ptr tryInfo = std::make_unique(0x0003, 0x0012, std::move(catches)); + ASSERT_EQ(tryInfo->GetStartAddr(), 0x0003); + ASSERT_EQ(tryInfo->GetEndAddr(), 0x0012); + ASSERT_EQ(tryInfo->GetCatches()->size(), 2); +} + +// Additional UT would be supplied later +} // bc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/bytecode_input/dex/dex_file_util_test.cpp b/src/hir2mpl/test/bytecode_input/dex/dex_file_util_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b61df26db4e52064c0590b99b438038cef91eef5 --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/dex/dex_file_util_test.cpp @@ -0,0 +1,24 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "dex_file_util.h" + +namespace maple { +TEST(DexFileUtil, Adler32) { + const uint8 data[11] = {'H', 'e', 'l', 'l', 'o', 'W', 'o', 'r', 'l', 'd', '!'}; + ASSERT_EQ(bc::DexFileUtil::Adler32(data, 11), 0x1974041E); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/bytecode_input/dex/dex_op_test.cpp b/src/hir2mpl/test/bytecode_input/dex/dex_op_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..17b35d7ee69b6c15a7a0c2de033a0fbc9a27de06 --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/dex/dex_op_test.cpp @@ -0,0 +1,1474 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "feir_test_base.h" +#include "hir2mpl_ut_regx.h" +#include "fe_utils_java.h" + +#define private public +#include "dex_op.h" +#undef private + +namespace maple { +namespace bc { +class DexOpTest : public FEIRTestBase { + public: + DexOpTest() = default; + virtual ~DexOpTest() = default; +}; + +TEST_F(DexOpTest, TestKindAndWide) { + MemPool *mp = FEUtils::NewMempool("MemPool for Test DexOp", true); + MapleAllocator allocator(mp); + auto dexOp = static_cast(dexOpGeneratorMap[kDexOpMoveResultWide](allocator, 0)); + ASSERT_EQ(dexOp->GetInstKind(), kFallThru); + ASSERT_EQ(dexOp->IsWide(), true); + + dexOp = static_cast(dexOpGeneratorMap[kDexOpPackedSwitch](allocator, 0)); + ASSERT_EQ(dexOp->GetInstKind(), kFallThru | kSwitch); + ASSERT_EQ(dexOp->IsWide(), false); + ASSERT_EQ(dexOp->GetArrayElementTypeFromArrayType("ALava_2Flang_2FObject_3B").compare("Lava_2Flang_2FObject_3B"), 0); + ASSERT_EQ(dexOp->GetArrayElementTypeFromArrayType("AAI").compare("AI"), 0); + delete mp; +} + +class DexOpConstClassUT : public bc::DexOpConstClass { + public: + DexOpConstClassUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpConstClass(allocatorIn, pcIn, opcodeIn) {} + ~DexOpConstClassUT() = default; + void SetRegTypeInUT(const std::string &name) { + const std::string &mplTypeName = namemangler::EncodeName(name); + mplTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(mplTypeName); + std::string refTypeName = bc::BCUtil::kJavaClassName; + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(refTypeName))); + } +}; + +class DexOpMoveExceptionUT : public bc::DexOpMoveException { + public: + DexOpMoveExceptionUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpMoveException(allocatorIn, pcIn, opcodeIn) { + instKind = bc::kCatch; + } + ~DexOpMoveExceptionUT() = default; + void SetRegTypeInUT(const std::string &name) { + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name))); + } +}; + +class DexOpMoveUT : public bc::DexOpMove { + public: + DexOpMoveUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpMove(allocatorIn, pcIn, opcodeIn) {} + ~DexOpMoveUT() = default; + void SetRegTypeInUT(const std::string &name) { + vB.regType = allocator.GetMemPool()->New(allocator, vB, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name))); + vA.regTypeItem = vB.regTypeItem; + } +}; +// ---------- FEIRStmtDAssign-DexOpMove ---------- +TEST_F(DexOpTest, FEIRStmtDAssignDexOpMove) { + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpMove); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetRegTypeInUT(bc::BCUtil::kInt); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_I") + " 0 \\(dread i32 %Reg1_I" + "\\)" + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRStmtDAssign-DexOpMoveException ---------- +TEST_F(DexOpTest, FEIRStmtDAssignDexOpMoveException) { + std::unique_ptr dexOp = + std::make_unique(allocator, 1, bc::kDexOpMoveException); + dexOp->SetVA(0); + dexOp->SetRegTypeInUT("Ljava/lang/Exception;"); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 2); + ASSERT_EQ(feStmts.front()->GetKind(), kStmtPesudoLabel); + std::list mirNodes = feStmts.back()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_") + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + " 0 \\(regread ref %%thrownval" + "\\)" + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRStmtDAssign-DexOpConstClass ---------- +TEST_F(DexOpTest, FEIRStmtDAssignDexOpConstClass) { + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpConstClass); + dexOp->SetVA(0); + dexOp->SetRegTypeInUT("Ljava/lang/String;"); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = "dassign %Reg0_" + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + " 0 \\(intrinsicopwithtype ref \\<\\* \\<\\$Ljava_2Flang_2FString_3B\\>\\> " + "JAVA_CONST_CLASS \\(\\)\\)" + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRStmt-DexOpMonitor ---------- +class DexOpMonitorUT : public bc::DexOpMonitor { + public: + DexOpMonitorUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpMonitor(allocatorIn, pcIn, opcodeIn) {} + ~DexOpMonitorUT() = default; + void SetRegTypeInUT(const std::string &name) { + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name))); + } +}; +TEST_F(DexOpTest, FEIRStmtDexOpMonitor) { + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpMonitorEnter); + dexOp->SetVA(0); + dexOp->SetRegTypeInUT(bc::BCUtil::kInt); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = "syncenter \\(dread i32 %Reg0_I, constval i32 2\\)" + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); + dexOp = std::make_unique(allocator, 1, bc::kDexOpMonitorExit); + dexOp->SetVA(0); + dexOp->SetRegTypeInUT(bc::BCUtil::kInt); + feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + dumpStr = GetBufferString(); + EXPECT_EQ(dumpStr.find("syncexit (dread i32 %Reg0_I)"), 0); + RestoreCout(); +} + +// ---------- FEIRStmt-DexOpIfTest ---------- +class DexOpIfTestUT : public bc::DexOpIfTest { + public: + DexOpIfTestUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpIfTest(allocatorIn, pcIn, opcodeIn) {} + ~DexOpIfTestUT() = default; + void SetType(const std::string &name) { + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name))); + vB.regType = vA.regType; + vB.regTypeItem = vA.regTypeItem; + } + + void SetFuncNameIdx(uint32 idx) { + funcNameIdx = idx; + } +}; + +TEST_F(DexOpTest, FEIRStmtDexOpIfTest) { + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpIfLt); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetType("I"); + dexOp->SetFuncNameIdx(111); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + RestoreCout(); + EXPECT_EQ(dumpStr.find("brtrue @L111_0 (lt u1 i32 (dread i32 %Reg0_I, dread i32 %Reg1_I))"), 0); +} + +// ---------- FEIRStmt-DexOpIfTestZ ---------- +class DexOpIfTestZUT : public bc::DexOpIfTestZ { + public: + DexOpIfTestZUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpIfTestZ(allocatorIn, pcIn, opcodeIn) {} + ~DexOpIfTestZUT() = default; + void SetType(const std::string &name) { + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name))); + } + void SetFuncNameIdx(uint32 idx) { + funcNameIdx = idx; + } +}; + +TEST_F(DexOpTest, FEIRStmtDexOpIfTestZ) { + RedirectCout(); + std::unique_ptr dexOpI = std::make_unique(allocator, 1, bc::kDexOpIfEqZ); + dexOpI->SetVA(0); + dexOpI->SetType("I"); + dexOpI->SetFuncNameIdx(111); + std::list feStmts = dexOpI->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("brtrue @L111_0 \\(eq u1 i32 \\(dread i32 %Reg0_I, constval i32 0\\)\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + std::unique_ptr dexOpZ = std::make_unique(allocator, 1, bc::kDexOpIfEqZ); + dexOpZ->SetVA(0); + dexOpZ->SetType("Z"); + dexOpZ->SetFuncNameIdx(222); + dexOpZ->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("brfalse @L222_0 \\(dread u32 %Reg0_Z\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRStmt-DexOpInvoke ---------- +class DexOpInvokeUT : public bc::DexOpInvoke { + public: + DexOpInvokeUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpInvoke(allocatorIn, pcIn, opcodeIn) {} + ~DexOpInvokeUT() = default; + + void ParseImplUT(const bc::BCReader::ClassElem &elem) { + bc::BCReader::ClassElem methodInfo = elem; + retArgsTypeNames = FEUtilJava::SolveMethodSignature(methodInfo.typeName); + bc::DexReg reg; + MapleList argRegNums = argRegs; + ReplaceStringFactory(methodInfo, argRegNums); + structElemNameIdx = allocator.GetMemPool()->New( + methodInfo.className, methodInfo.elemName, methodInfo.typeName); + + std::string typeName; + if (!IsStatic()) { + reg.regNum = argRegNums.front(); + argRegNums.pop_front(); + typeName = methodInfo.className; + typeName = namemangler::EncodeName(typeName); + reg.regType = allocator.GetMemPool()->New(allocator, reg, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName)); + argVRegs.emplace_back(reg); + } + for (size_t i = 1; i < retArgsTypeNames.size(); ++i) { + reg.regNum = argRegNums.front(); + argRegNums.pop_front(); + typeName = retArgsTypeNames[i]; + typeName = namemangler::EncodeName(typeName); + reg.regType = allocator.GetMemPool()->New(allocator, reg, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName)); + argVRegs.emplace_back(reg); + } + } +}; + +TEST_F(DexOpTest, FEIRStmtDexOpInvoke) { + RedirectCout(); + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpInvokeVirtual); + dexOp->SetVA(3); + dexOp->SetVC(6); + MapleList argVRegNums({6, 7, 8}, allocator.Adapter()); + dexOp->SetArgs(argVRegNums); + bc::BCReader::ClassElem elem; + elem.className = "LTestClass;"; + elem.elemName = "funcName"; + elem.typeName = "(Ljava/lang/String;I)V"; + dexOp->ParseImplUT(elem); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + RestoreCout(); + EXPECT_EQ(dumpStr.find( + "virtualcallassigned <estClass_3B_7CfuncName_7C_28Ljava_2Flang_2FString_3BI_29V (dread ref %Reg6_R"), 0); + EXPECT_EQ(dumpStr.find(", dread ref %Reg7_R", 99) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(", dread i32 %Reg8_I) {}", 99) != std::string::npos, true); +} + +TEST_F(DexOpTest, FEIRStmtDexOpInvoke_StrFac) { + RedirectCout(); + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpInvokeDirect); + dexOp->SetVA(3); + dexOp->SetVC(6); + MapleList argVRegNums({6, 7}, allocator.Adapter()); + dexOp->SetArgs(argVRegNums); + bc::BCReader::ClassElem elem; + elem.className = "Ljava/lang/String;"; + elem.elemName = ""; + elem.typeName = "(Ljava/lang/String;)V"; + dexOp->ParseImplUT(elem); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("callassigned &Ljava_2Flang_2FStringFactory_3B_7CnewStringFromString_7C_28Ljava_" + "2Flang_2FString_3B_29Ljava_2Flang_2FString_3B \\(dread ref %Reg7_") + + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + std::string("\\) \\{ dassign %Reg6_") + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + std::string(" 0 \\}") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRStmt-DexOpCheckCast ---------- +class DexOpCheckCastUT : public bc::DexOpCheckCast { + public: + DexOpCheckCastUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpCheckCast(allocatorIn, pcIn, opcodeIn) {} + ~DexOpCheckCastUT() = default; + void SetRegType(const std::string &name) { + targetTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name)); + vA.regType = allocator.GetMemPool()->New(allocator, vA, targetTypeNameIdx); + vDef.regType = allocator.GetMemPool()->New(allocator, vDef, targetTypeNameIdx); + } +}; + +TEST_F(DexOpTest, FEIRStmtDexOpCheckCast) { + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpCheckCast); + dexOp->SetVA(0); + dexOp->SetRegType("Ljava/lang/String;"); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + RestoreCout(); + EXPECT_EQ(dumpStr.find( + "intrinsiccallwithtypeassigned <* <$Ljava_2Flang_2FString_3B>> JAVA_CHECK_CAST (dread ref %Reg0_R") != + std::string::npos, true); + EXPECT_EQ(dumpStr.find(") { dassign %Reg0_R", 96) != std::string::npos, true); +} + +// ---------- FEIRStmt-DexOpInstanceOf ---------- +class DexOpInstanceOfUT : public bc::DexOpInstanceOf { + public: + DexOpInstanceOfUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpInstanceOf(allocatorIn, pcIn, opcodeIn) {} + ~DexOpInstanceOfUT() = default; + void SetVBType(const std::string &name) { + vB.regType = allocator.GetMemPool()->New(allocator, vB, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name))); + } + + void SetTargetType(const std::string &name) { + typeName = name; + } +}; + +TEST_F(DexOpTest, FEIRStmtDexOpInstanceOf) { + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpInstanceOf); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVBType("Ljava/lang/Object;"); + dexOp->SetTargetType("Ljava/lang/String;"); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + RestoreCout(); + EXPECT_EQ(dumpStr.find("dassign %Reg0_Z 0 (intrinsicopwithtype u1 <* <$Ljava_2Flang_2FString_3B>> JAVA_INSTANCE_OF " + "(dread ref %Reg1_R") != std::string::npos, true); +} + +// ---------- FEIRStmt-DexOpArrayLength ---------- +class DexOpArrayLengthUT : public bc::DexOpArrayLength { + public: + DexOpArrayLengthUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpArrayLength(allocatorIn, pcIn, opcodeIn) {} + ~DexOpArrayLengthUT() = default; + + void SetVBType(const std::string &name) { + vB.regType = allocator.GetMemPool()->New(allocator, vB, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name))); + } + + void SetFuncNameIdx(uint32 idx) { + funcNameIdx = idx; + } +}; + +TEST_F(DexOpTest, FEIRStmtDexOpArrayLength) { + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpArrayLength); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVBType("[Ljava/lang/Object;"); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + RestoreCout(); + EXPECT_EQ(dumpStr.find("dassign %Reg0_I 0 (intrinsicop i32 JAVA_ARRAY_LENGTH (dread ref %Reg1_R") == 0, true); +} + +// ---------- FEIRStmt-DexOpArrayLengthWithCatch ---------- +TEST_F(DexOpTest, FEIRStmtDexOpArrayLengthWithCatch) { + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpArrayLength); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVBType("[Ljava/lang/Object;"); + dexOp->SetExceptionType(bc::BCUtil::GetJavaExceptionNameMplIdx()); + dexOp->SetInstructionKind(bc::kCatch); + dexOp->SetFuncNameIdx(111); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 2); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 2); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr1 = GetBufferString(); + mirNodes.back()->Dump(); + std::string dumpStr2 = GetBufferString(); + RestoreCout(); + EXPECT_EQ(dumpStr1.find("@L111_1") == 0, true); + EXPECT_EQ(dumpStr2.find("catch { <* <$Ljava_2Flang_2FException_3B>> }") == 0, true); +} + +// ---------- FEIRStmtDAssign-DexOpNewInstance ---------- +class DexOpNewInstanceUT : public bc::DexOpNewInstance { + public: + DexOpNewInstanceUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpNewInstance(allocatorIn, pcIn, opcodeIn) {} + ~DexOpNewInstanceUT() = default; + void SetVAType(const std::string &name) { + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name))); + } +}; + +TEST_F(DexOpTest, DexOpNewInstance) { + std::unique_ptr dexOp = std::make_unique(allocator, 0, bc::kDexOpNewInstance); + dexOp->SetVA(0); + dexOp->SetVAType("Ljava/lang/String;"); + dexOp->SetVB(1); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string( + "intrinsiccallwithtype <\\$Ljava_2Flang_2FString_3B> JAVA_CLINIT_CHECK \\(\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + EXPECT_EQ(mirStmts.size(), 1); + RestoreCout(); + mirStmts = stmts.back()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.back()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string( + "dassign %Reg0_") + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + std::string(" 0 \\(gcmalloc ref <\\$Ljava_2Flang_2FString_3B>\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + EXPECT_EQ(mirStmts.size(), 1); + RestoreCout(); +} + +TEST_F(DexOpTest, DexOpNewInstance_Permanent) { + RedirectCout(); + std::unique_ptr dexOp = std::make_unique(allocator, 0, bc::kDexOpNewInstance); + dexOp->SetVA(0); + dexOp->SetVAType("Ljava/lang/String;"); + dexOp->SetVB(1); + dexOp->isRcPermanent = true; + dexOp->EmitToFEIRStmts().back()->GenMIRStmts(mirBuilder).back()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string( + "dassign %Reg0_") + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + std::string(" 0 \\(gcpermalloc ref <\\$Ljava_2Flang_2FString_3B>\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRStmt-DexOpConst ---------- +class DexOpConstUT : public bc::DexOpConst { + public: + DexOpConstUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpConst(allocatorIn, pcIn, opcodeIn) {} + ~DexOpConstUT() = default; + + void SetWideAndVReg(bool isWideIn) { + isWide = isWideIn; + SetVAImpl(0); // reg num + SetVBImpl(123); // const value + SetWideVBImpl(static_cast(-1)); // wide const value + } +}; + +TEST_F(DexOpTest, DexOpConst) { + RedirectCout(); + // const/4 + std::unique_ptr dexOpConst4 = std::make_unique(allocator, 1, bc::kDexOpConst4); + dexOpConst4->SetWideAndVReg(false); + std::list feStmts = dexOpConst4->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_I 0 \\(cvt i32 i8 \\(constval i8 123\\)\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + // const/16 + std::unique_ptr dexOpConst16 = std::make_unique(allocator, 1, bc::kDexOpConst16); + dexOpConst16->SetWideAndVReg(false); + dexOpConst16->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_I 0 \\(cvt i32 i16 \\(constval i16 123\\)\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + // const + std::unique_ptr dexOpConst = std::make_unique(allocator, 1, bc::kDexOpConst); + dexOpConst->SetWideAndVReg(false); + dexOpConst->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_I 0 \\(constval i32 123\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + // const/high16 + std::unique_ptr dexOpConstHigh16 = std::make_unique(allocator, 1, bc::kDexOpConstHigh16); + dexOpConstHigh16->SetWideAndVReg(false); + dexOpConstHigh16->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + auto pos = dumpStr.find("dassign %Reg0_I 0 (shl i32 ("); + EXPECT_EQ(pos, 0); + pos = dumpStr.find(" cvt i32 i16 (constval i16 123),", 28); + EXPECT_EQ(pos, 29); + pos = dumpStr.find(" constval i32 16))", 65); + EXPECT_EQ(pos, 65); + RestoreCout(); +} + +TEST_F(DexOpTest, DexOpConstWide) { + RedirectCout(); + // const-wide/16 + std::unique_ptr dexOpConstWide16 = std::make_unique(allocator, 1, bc::kDexOpConstWide16); + dexOpConstWide16->SetWideAndVReg(true); + std::list feStmts = dexOpConstWide16->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_J 0 \\(cvt i64 i16 \\(constval i16 123\\)\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + // const-wide/32 + std::unique_ptr dexOpConstWide32 = std::make_unique(allocator, 1, bc::kDexOpConstWide32); + dexOpConstWide32->SetWideAndVReg(true); + dexOpConstWide32->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_J 0 \\(cvt i64 i32 \\(constval i32 123\\)\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + // const-wide + std::unique_ptr dexOpConstWide = std::make_unique(allocator, 1, bc::kDexOpConstWide); + dexOpConstWide->SetWideAndVReg(true); + dexOpConstWide->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + pattern = std::string("dassign %Reg0_J 0 \\(constval i64 -1\\)") + HIR2MPLUTRegx::Any(); + dumpStr = GetBufferString(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + // const-wide/high16 + std::unique_ptr dexOpConstWideHigh16 = + std::make_unique(allocator, 1, bc::kDexOpConstWideHigh16); + dexOpConstWideHigh16->SetWideAndVReg(true); + dexOpConstWideHigh16->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + auto pos = dumpStr.find("dassign %Reg0_J 0 (shl i64 ("); + EXPECT_EQ(pos, 0); + pos = dumpStr.find(" cvt i64 i16 (constval i16 123),", 29); + EXPECT_EQ(pos, 29); + pos = dumpStr.find(" constval i32 48))", 65); + EXPECT_EQ(pos, 65); + RestoreCout(); +} + +// ---------- FEIRStmtDAssign-DexOpNewArray ---------- +class DexOpNewArrayUT : public bc::DexOpNewArray { + public: + DexOpNewArrayUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpNewArray(allocatorIn, pcIn, opcodeIn) {} + ~DexOpNewArrayUT() = default; + void SetVAType(const std::string &name) { + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName(name))); + vB.regType = allocator.GetMemPool()->New(allocator, vB, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(bc::BCUtil::kInt)); + } +}; + +TEST_F(DexOpTest, DexOpNewArrayUT) { + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpNewArray); + dexOp->SetVA(0); + dexOp->SetVAType("[Ljava/lang/Object;"); + dexOp->SetVB(1); + dexOp->SetVC(2); + std::list feStmts = dexOp->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string( + "dassign %Reg0_") + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + std::string(" 0 \\(gcmallocjarray ref <\\[\\] <\\* <\\$Ljava_2Flang_2FObject_3B>>> ") + + std::string("\\(dread i32 %Reg1_I\\)\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(DexOpTest, DexOpNewArrayUT_Pernament) { + RedirectCout(); + std::unique_ptr dexOp = std::make_unique(allocator, 1, bc::kDexOpNewArray); + dexOp->SetVA(0); + dexOp->SetVAType("[Ljava/lang/Object;"); + dexOp->SetVB(1); + dexOp->SetVC(2); + dexOp->isRcPermanent = true; + dexOp->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string( + "dassign %Reg0_") + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + std::string(" 0 \\(gcpermallocjarray ref <\\[\\] <\\* <\\$Ljava_2Flang_2FObject_3B>>> ") + + std::string("\\(dread i32 %Reg1_I\\)\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRExprBinary - binop/2addr ---------- +class DexOpBinaryOp2AddrUT : public bc::DexOpBinaryOp2Addr { + public: + DexOpBinaryOp2AddrUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpBinaryOp2Addr(allocatorIn, pcIn, opcodeIn) {} + ~DexOpBinaryOp2AddrUT() = default; + void SetVAImpl(uint32 num) { + vDef.regNum = num; + vDef.isDef = true; + vA.regNum = num; + defedRegs.emplace_back(&vDef); + usedRegs.emplace_back(&vA); + std::string typeName; // typeName of A, B are same + if (bc::kDexOpAddInt2Addr <= opcode && opcode <= bc::kDexOpUshrInt2Addr) { + typeName = bc::BCUtil::kInt; + } else if (bc::kDexOpAddLong2Addr <= opcode && opcode <= bc::kDexOpUshrLong2Addr) { + typeName = bc::BCUtil::kLong; + } else if (bc::kDexOpAddFloat2Addr <= opcode && opcode <= bc::kDexOpRemFloat2Addr) { + typeName = bc::BCUtil::kFloat; + } else if (bc::kDexOpAddDouble2Addr <= opcode && opcode <= bc::kDexOpRemDouble2Addr) { + typeName = bc::BCUtil::kDouble; + } else { + CHECK_FATAL(false, "Invalid opcode: 0x%x in DexOpBinaryOp2Addr", opcode); + } + GStrIdx usedTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + vDef.regType = allocator.GetMemPool()->New(allocator, vDef, usedTypeNameIdx); + vA.regType = allocator.GetMemPool()->New(allocator, vA, usedTypeNameIdx); + vB.regType = allocator.GetMemPool()->New(allocator, vB, usedTypeNameIdx); + } +}; + +TEST_F(DexOpTest, DexOpBinaryOp2AddrIntAdd) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpAddInt2Addr); + dexOp->SetVA(0); + dexOp->SetVB(1); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_I 0 (add i32 (dread i32 %Reg0_I, dread i32 %Reg1_I))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(DexOpTest, FEIRExprBinaryOf2AddrIntSub) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpSubInt2Addr); + dexOp->SetVA(0); + dexOp->SetVB(1); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_I 0 (sub i32 (dread i32 %Reg0_I, dread i32 %Reg1_I))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(DexOpTest, FEIRExprBinaryOf2AddrIntShr) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpShrInt2Addr); + dexOp->SetVA(0); + dexOp->SetVB(1); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_I 0 (ashr i32 (dread i32 %Reg0_I, dread i32 %Reg1_I))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(DexOpTest, FEIRExprBinaryOf2AddrLongXor) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpXorLong2Addr); + dexOp->SetVA(0); + dexOp->SetVB(1); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_J 0 (bxor i64 (dread i64 %Reg0_J, dread i64 %Reg1_J))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(DexOpTest, FEIRExprBinaryOf2AddrFloatSub) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpSubFloat2Addr); + dexOp->SetVA(0); + dexOp->SetVB(1); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = ""; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(DexOpTest, FEIRExprBinaryOf2AddrDoubleMul) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpMulDouble2Addr); + dexOp->SetVA(0); + dexOp->SetVB(1); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_D 0 (mul f64 (dread f64 %Reg0_D, dread f64 %Reg1_D))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(DexOpTest, FEIRExprBinaryOf2AddrDoubleDiv) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpDivDouble2Addr); + dexOp->SetVA(0); + dexOp->SetVB(1); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_D 0 (div f64 (dread f64 %Reg0_D, dread f64 %Reg1_D))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(DexOpTest, FEIRExprBinaryOf2AddrDoubleRem) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpRemDouble2Addr); + dexOp->SetVA(0); + dexOp->SetVB(1); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_D 0 (rem f64 (dread f64 %Reg0_D, dread f64 %Reg1_D))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +// ---------- FEIRExprUnary - unop ---------- +class DexOpUnaryOpUT : public bc::DexOpUnaryOp { + public: + DexOpUnaryOpUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpUnaryOp(allocatorIn, pcIn, opcodeIn) {} + ~DexOpUnaryOpUT() = default; + void SetVAImpl(uint32 num) { + vA.regNum = num; + vA.isDef = true; + defedRegs.emplace_back(&vA); + auto it = GetOpcodeMapForUnary().find(opcode); + CHECK_FATAL(it != GetOpcodeMapForUnary().end(), "Invalid opcode: %u in DexOpUnaryOp", opcode); + mirOp = std::get<0>(it->second); + vA.regType = allocator.GetMemPool()->New(allocator, vA, std::get<1>(it->second)); + vB.regType = allocator.GetMemPool()->New(allocator, vB, std::get<2>(it->second)); + } +}; + +TEST_F(DexOpTest, DexOpunaryOp) { + RedirectCout(); + // OP_cvt + std::unique_ptr dexOp1 = std::make_unique(allocator, 0, bc::kDexOpFloatToInt); + dexOp1->SetVA(0); + dexOp1->SetVB(1); + std::list stmts = dexOp1->EmitToFEIRStmts(); + ASSERT_EQ(stmts.size(), 1); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_I 0 \\(cvt i32 f32 \\(dread f32 %Reg1_F\\)\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + // OP_sext + std::unique_ptr dexOp2 = std::make_unique(allocator, 0, bc::kDexOpIntToByte); + dexOp2->SetVA(0); + dexOp2->SetVB(1); + dexOp2->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_B 0 \\(sext i32 8 \\(dread i32 %Reg1_I\\)\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + // OP_zext + std::unique_ptr dexOp3 = std::make_unique(allocator, 0, bc::kDexOpIntToChar); + dexOp3->SetVA(0); + dexOp3->SetVB(1); + dexOp3->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_C 0 \\(zext u32 16 \\(dread i32 %Reg1_I\\)\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + // OP_neg + std::unique_ptr dexOp4 = std::make_unique(allocator, 0, bc::kDexOpNegInt); + dexOp4->SetVA(0); + dexOp4->SetVB(1); + dexOp4->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_I 0 \\(neg i32 \\(dread i32 %Reg1_I\\)\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + // OP_bnot + std::unique_ptr dexOp5 = std::make_unique(allocator, 0, bc::kDexOpNotLong); + dexOp5->SetVA(0); + dexOp5->SetVB(1); + dexOp5->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_J 0 \\(bnot i64 \\(dread i64 %Reg1_J\\)\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRExprBinary - binop ---------- +class DexOpBinaryOpUT : public bc::DexOpBinaryOp { + public: + DexOpBinaryOpUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpBinaryOp(allocatorIn, pcIn, opcodeIn) {} + ~DexOpBinaryOpUT() = default; + void SetVBImpl(uint32 num) { + vB.regNum = num; + vB.regTypeItem = vA.regTypeItem; + vB.regType = vA.regType; + usedRegs.emplace_back(&vB); + } + + void SetVCImpl(uint32 num) { + vC.regNum = num; + vC.regTypeItem = vA.regTypeItem; + vC.regType = vA.regType; + usedRegs.emplace_back(&vC); + } +}; + +TEST_F(DexOpTest, DexOpBinaryOpOfDoubleRem) { + std::unique_ptr dexOp = std::make_unique(allocator, 0, bc::kDexOpRemDouble); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVC(2); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_D 0 (rem f64 (dread f64 %Reg1_D, dread f64 %Reg2_D))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +// ---------- FEIRExprBinary - binop/Lit ---------- +class DexOpBinaryOpLitUT : public bc::DexOpBinaryOpLit { + public: + DexOpBinaryOpLitUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpBinaryOpLit(allocatorIn, pcIn, opcodeIn) {} + ~DexOpBinaryOpLitUT() = default; + void SetVBImpl(uint32 num) { + vB.regNum = num; + std::string typeName = bc::BCUtil::kInt; + GStrIdx usedTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + vB.regType = allocator.GetMemPool()->New(allocator, vB, usedTypeNameIdx); + usedRegs.emplace_back(&vB); + } +}; + +TEST_F(DexOpTest, DexOpBinaryOpLiOfRemIntLit16) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpRemIntLit16); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVC(20); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_I 0 (rem i32 (\n dread i32 %Reg1_I,\n cvt i32 i16 (constval i16 20)))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(DexOpTest, DexOpBinaryOpLiOfRemIntLit8) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpRemIntLit8); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVC(20); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_I 0 (rem i32 (\n dread i32 %Reg1_I,\n cvt i32 i8 (constval i8 20)))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(DexOpTest, DexOpBinaryOpLiOfRsubIntLit8) { + std::unique_ptr dexOp = + std::make_unique(allocator, 0, bc::kDexOpRsubIntLit8); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVC(20); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expectedStr = "dassign %Reg0_I 0 (sub i32 (\n cvt i32 i8 (constval i8 20),\n dread i32 %Reg1_I))"; + EXPECT_EQ(dumpStr.find(expectedStr) != std::string::npos, true); + RestoreCout(); +} + +class DexOpSgetUT : public bc::DexOpSget { + public: + DexOpSgetUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpSget(allocatorIn, pcIn, opcodeIn) {} + ~DexOpSgetUT() = default; + std::map opcodeTypeMap = { + {bc::BCUtil::kChar, "C"}, + {bc::BCUtil::kByte, "B"}, + {bc::BCUtil::kShort, "S"}, + {bc::BCUtil::kInt, "I"}, + {bc::BCUtil::kFloat, "F"}, + {bc::BCUtil::kLong, "J"}, + {bc::BCUtil::kDouble, "D"}, + {bc::BCUtil::kJavaObjectName, "Ljava/lang/Object;"}, + }; + std::map opcodeMirMap = { + {bc::BCUtil::kChar, "u32"}, + {bc::BCUtil::kByte, "i32"}, + {bc::BCUtil::kShort, "i32"}, + {bc::BCUtil::kInt, "i32"}, + {bc::BCUtil::kFloat, "f32"}, + {bc::BCUtil::kLong, "i64"}, + {bc::BCUtil::kDouble, "f64"}, + {bc::BCUtil::kDouble, "D"}, + {bc::BCUtil::kJavaObjectName, "ref"}, + }; + void SetFieldInfoArg(std::string typeNameIn) { + bc::BCReader::ClassElem fieldInfo; + fieldInfo.className = "Landroid/icu/text/CurrencyMetaInfo;"; + fieldInfo.elemName = "hasData"; + fieldInfo.typeName = typeNameIn; + structElemNameIdx = allocator.GetMemPool()->New( + fieldInfo.className, fieldInfo.elemName, fieldInfo.typeName); + vA.regType = allocator.GetMemPool()->New(allocator, vA, structElemNameIdx->type); + } +}; + +TEST_F(DexOpTest, DexOpSget) { + std::unique_ptr dexOp = std::make_unique(allocator, 0, bc::kDexOpSget); + dexOp->SetVA(3); + for (std::map::iterator it = dexOp->opcodeTypeMap.begin(); + it != dexOp->opcodeTypeMap.end(); ++it) { + auto mirOp = dexOp->opcodeMirMap.find(it->first); + CHECK_FATAL(mirOp != dexOp->opcodeMirMap.end(), "Invalid opcode"); + + dexOp->SetFieldInfoArg(it->second); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string( + "intrinsiccallwithtype <\\$Ljava_2Flang_2FObject_3B> JAVA_CLINIT_CHECK \\(\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + EXPECT_EQ(mirStmts.size(), 2); + RestoreCout(); + + mirStmts = stmts.back()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.back()->Dump(); + dumpStr = GetBufferString(); + if (it->first != bc::BCUtil::kJavaObjectName) { + pattern = std::string("dassign" + std::string(" %Reg3_") + it->second + " 0 \\(dread ") + + std::string(mirOp->second) + " \\$Landroid_2Ficu_2Ftext_2FCurrencyMetaInfo_3B_7ChasData" + + std::string("\\)") + HIR2MPLUTRegx::Any(); + } else { + pattern = std::string("dassign " + std::string("%Reg3_") + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber)) + + " 0 \\(dread " + mirOp->second + std::string(" \\$Landroid_2Ficu_2Ftext_2FCurrencyMetaInfo_3B_7ChasData\\)") + + HIR2MPLUTRegx::Any(); + } + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + EXPECT_EQ(mirStmts.size(), 2); + RestoreCout(); + } +} + +class DexOpSputUT : public bc::DexOpSput { + public: + DexOpSputUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpSput(allocatorIn, pcIn, opcodeIn) {} + ~DexOpSputUT() = default; + std::map opcodeTypeMap = { + {bc::BCUtil::kChar, "C"}, + {bc::BCUtil::kByte, "B"}, + {bc::BCUtil::kShort, "S"}, + {bc::BCUtil::kInt, "I"}, + {bc::BCUtil::kFloat, "F"}, + {bc::BCUtil::kLong, "J"}, + {bc::BCUtil::kDouble, "D"}, + {bc::BCUtil::kJavaObjectName, "Ljava/lang/Object;"}, + }; + + std::map opcodeMirMap = { + {bc::BCUtil::kChar, "u32"}, + {bc::BCUtil::kByte, "i32"}, + {bc::BCUtil::kShort, "i32"}, + {bc::BCUtil::kInt, "i32"}, + {bc::BCUtil::kFloat, "f32"}, + {bc::BCUtil::kLong, "i64"}, + {bc::BCUtil::kDouble, "f64"}, + {bc::BCUtil::kDouble, "D"}, + {bc::BCUtil::kJavaObjectName, "ref"}, + }; + + void SetFieldInfoArg(std::string typeNameIn) { + bc::BCReader::ClassElem fieldInfo; + fieldInfo.className = "Landroid/icu/text/CurrencyMetaInfo;"; + fieldInfo.elemName = "hasData"; + fieldInfo.typeName = typeNameIn; + structElemNameIdx = allocator.GetMemPool()->New( + fieldInfo.className, fieldInfo.elemName, fieldInfo.typeName); + vA.regType = allocator.GetMemPool()->New(allocator, vA, structElemNameIdx->type); + } +}; + +TEST_F(DexOpTest, DexOpSput) { + std::unique_ptr dexOp = std::make_unique(allocator, 0, bc::kDexOpSput); + dexOp->SetVA(3); + for (std::map::iterator it = dexOp->opcodeTypeMap.begin(); + it != dexOp->opcodeTypeMap.end(); ++it) { + auto mirOp = dexOp->opcodeMirMap.find(it->first); + CHECK_FATAL(mirOp != dexOp->opcodeMirMap.end(), "Invalid opcode"); + + dexOp->SetFieldInfoArg(it->second); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string( + "intrinsiccallwithtype <\\$Ljava_2Flang_2FObject_3B> JAVA_CLINIT_CHECK \\(\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + EXPECT_EQ(mirStmts.size(), 2); + RestoreCout(); + + mirStmts = stmts.back()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.back()->Dump(); + dumpStr = GetBufferString(); + if (it->first != bc::BCUtil::kJavaObjectName) { + pattern = std::string( + "dassign \\$Landroid_2Ficu_2Ftext_2FCurrencyMetaInfo_3B_7ChasData 0 \\(dread ") + + std::string(mirOp->second) + std::string(" %Reg3_") + it->second + std::string("\\)") + HIR2MPLUTRegx::Any(); + } else { + pattern = std::string( + "dassign \\$Landroid_2Ficu_2Ftext_2FCurrencyMetaInfo_3B_7ChasData 0 \\(dread ") + std::string(mirOp->second) + + std::string(" %Reg3_") + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + std::string("\\)") + HIR2MPLUTRegx::Any(); + } + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + EXPECT_EQ(mirStmts.size(), 2); + RestoreCout(); + } +} + +class DexOpCompareUT : public bc::DexOpCompare { + public: + DexOpCompareUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpCompare(allocatorIn, pcIn, opcodeIn) {} + ~DexOpCompareUT() = default; + void SetRegNum() { + int mockReg0 = 0; + int mockReg1 = 1; + int mockReg2 = 2; + SetVA(mockReg0); + SetVB(mockReg1); + SetVC(mockReg2); + } + + void SetVBImpl(uint32 num) { + vB.regNum = num; + std::string typeName; + if (opcode == bc::kDexOpCmplFloat || opcode == bc::kDexOpCmpgFloat) { + typeName = bc::BCUtil::kFloat; + } else if (opcode == bc::kDexOpCmplDouble || opcode == bc::kDexOpCmpgDouble) { + typeName = bc::BCUtil::kDouble; + } else { + // kDexOpCmpLong + typeName = bc::BCUtil::kLong; + } + GStrIdx usedTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + vB.regType = allocator.GetMemPool()->New(allocator, vB, usedTypeNameIdx); + usedRegs.emplace_back(&vB); + } + + void SetVCImpl(uint32 num) { + vC = vB; + vC.regNum = num; + vC.regType = vB.regType; + usedRegs.emplace_back(&vC); + } +}; + +TEST_F(DexOpTest, FEIRStmtDexOpCmp) { + RedirectCout(); + std::unique_ptr dexOpCmpLong = std::make_unique(allocator, 1, bc::kDexOpCmpLong); + dexOpCmpLong->SetRegNum(); + std::list feStmts = dexOpCmpLong->EmitToFEIRStmts(); + ASSERT_EQ(feStmts.size(), 1); + std::list mirNodes = feStmts.front()->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_I 0 \\(cmp i32 i64 \\(dread i64 %Reg1_J, dread i64 %Reg2_J\\)\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + + std::unique_ptr dexOpCmplFloat = std::make_unique(allocator, 1, bc::kDexOpCmplFloat); + dexOpCmplFloat->SetRegNum(); + dexOpCmplFloat->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_I 0 \\(cmpl i32 f32 \\(dread f32 %Reg1_F, dread f32 %Reg2_F\\)\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + + std::unique_ptr dexOpCmpgFloat = std::make_unique(allocator, 1, bc::kDexOpCmpgFloat); + dexOpCmpgFloat->SetRegNum(); + dexOpCmpgFloat->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_I 0 \\(cmpg i32 f32 \\(dread f32 %Reg1_F, dread f32 %Reg2_F\\)\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + + std::unique_ptr dexOpCmplDouble = + std::make_unique(allocator, 1, bc::kDexOpCmplDouble); + dexOpCmplDouble->SetRegNum(); + dexOpCmplDouble->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_I 0 \\(cmpl i32 f64 \\(dread f64 %Reg1_D, dread f64 %Reg2_D\\)\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + + std::unique_ptr dexOpCmpgDouble = + std::make_unique(allocator, 1, bc::kDexOpCmpgDouble); + dexOpCmpgDouble->SetRegNum(); + dexOpCmpgDouble->EmitToFEIRStmts().front()->GenMIRStmts(mirBuilder).front()->Dump(); + dumpStr = GetBufferString(); + pattern = std::string("dassign %Reg0_I 0 \\(cmpg i32 f64 \\(dread f64 %Reg1_D, dread f64 %Reg2_D\\)\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} +class DexOpThrowUT : public bc::DexOpThrow { + public: + DexOpThrowUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpThrow(allocatorIn, pcIn, opcodeIn) {} + ~DexOpThrowUT() = default; + + void SetVA(uint32 num) { + vA.regNum = num; + std::string typeName = bc::BCUtil::kAggregate; + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName)); + } +}; + +// ---------- FEIRExprBinary - DexOpThrow ---------- +TEST_F(DexOpTest, DexOpThrow) { + std::unique_ptr dexOp = std::make_unique(allocator, 0, bc::kDexOpThrow); + dexOp->SetVA(3); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string( + "throw \\(dread ref %Reg3_") + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + std::string("\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + EXPECT_EQ(mirStmts.size(), 1); + RestoreCout(); +} + +// ---------- FEIRExprBinary - DexOpAget ---------- +class DexOpAgetUT : public bc::DexOpAget { + public: + DexOpAgetUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpAget(allocatorIn, pcIn, opcodeIn) {} + ~DexOpAgetUT() = default; + void SetType(bc::DexOpCode opcode) { + std::string elemTypeName; + switch (opcode) { + case bc::kDexOpAget: { + elemTypeName = bc::BCUtil::kInt; + break; + } + case bc::kDexOpAgetWide: { + elemTypeName = bc::BCUtil::kWide; + break; + } + case bc::kDexOpAgetObject: { + elemTypeName = bc::BCUtil::kJavaObjectName; + break; + } + case bc::kDexOpAgetBoolean: { + elemTypeName = bc::BCUtil::kBoolean; + break; + } + case bc::kDexOpAgetByte: { + elemTypeName = bc::BCUtil::kByte; + break; + } + case bc::kDexOpAgetChar: { + elemTypeName = bc::BCUtil::kChar; + break; + } + case bc::kDexOpAgetShort: { + elemTypeName = bc::BCUtil::kShort; + break; + } + default: { + CHECK_FATAL(false, "Invalid opcode : 0x%x in DexOpAget", opcode); + break; + } + } + + std::string arrayTypeName = "[" + elemTypeName; + elemTypeName = namemangler::EncodeName(elemTypeName); + arrayTypeName = namemangler::EncodeName(arrayTypeName); + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(elemTypeName)); + vB.regType = allocator.GetMemPool()->New(allocator, vB, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(arrayTypeName)); + + GStrIdx usedTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(bc::BCUtil::kInt); + vC.regType = allocator.GetMemPool()->New(allocator, vC, usedTypeNameIdx); + } +}; + +TEST_F(DexOpTest, DexOpAgetObject) { + std::unique_ptr dexOp = std::make_unique(allocator, 0, bc::kDexOpAgetObject); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVC(2); + dexOp->SetType(bc::kDexOpAgetObject); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expect1 = std::string("dassign %Reg0_"); + std::string expect2 = std::string(" 0 (iread ref <* <* <$Ljava_2Flang_2FObject_3B>>> 0 " + "(array 1 ptr <* <[] <* <$Ljava_2Flang_2FObject_3B>>>> (dread ref %Reg1_"); + std::string expect3 = std::string(", dread i32 %Reg2_I"); + EXPECT_EQ(dumpStr.find(expect1) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(expect2) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(expect3) != std::string::npos, true); + EXPECT_EQ(mirStmts.size(), 1); + RestoreCout(); +} + +TEST_F(DexOpTest, DexOpAgetBoolean) { + std::unique_ptr dexOp = std::make_unique(allocator, 0, bc::kDexOpAgetBoolean); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVC(2); + dexOp->SetType(bc::kDexOpAgetBoolean); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + + std::string expect1 = std::string("dassign %Reg0_Z"); + std::string expect2 = std::string("0 (iread u32 <* u1> 0 (array 1 ptr <* <[] u1>> (dread ref %Reg1_"); + std::string expect3 = std::string(", dread i32 %Reg2_I"); + EXPECT_EQ(dumpStr.find(expect1) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(expect2) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(expect3) != std::string::npos, true); + + EXPECT_EQ(mirStmts.size(), 1); + RestoreCout(); +} + +// ---------- FEIRExprBinary - DexOpAput ---------- +class DexOpAputUT : public bc::DexOpAput { + public: + DexOpAputUT(MapleAllocator &allocatorIn, uint32 pcIn, bc::DexOpCode opcodeIn) + : bc::DexOpAput(allocatorIn, pcIn, opcodeIn) {} + ~DexOpAputUT() = default; + void SetType(bc::DexOpCode opcode) { + std::string elemTypeName; + switch (opcode) { + case bc::kDexOpAput: { + elemTypeName = bc::BCUtil::kInt; + break; + } + case bc::kDexOpAputWide: { + elemTypeName = bc::BCUtil::kWide; + break; + } + case bc::kDexOpAputObject: { + elemTypeName = bc::BCUtil::kJavaObjectName; + break; + } + case bc::kDexOpAputBoolean: { + elemTypeName = bc::BCUtil::kBoolean; + break; + } + case bc::kDexOpAputByte: { + elemTypeName = bc::BCUtil::kByte; + break; + } + case bc::kDexOpAputChar: { + elemTypeName = bc::BCUtil::kChar; + break; + } + case bc::kDexOpAputShort: { + elemTypeName = bc::BCUtil::kShort; + break; + } + default: { + CHECK_FATAL(false, "Invalid opcode : 0x%x in DexOpAget", opcode); + break; + } + } + + std::string arrayTypeName = "[" + elemTypeName; + elemTypeName = namemangler::EncodeName(elemTypeName); + arrayTypeName = namemangler::EncodeName(arrayTypeName); + vA.regType = allocator.GetMemPool()->New(allocator, vA, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(elemTypeName)); + vB.regType = allocator.GetMemPool()->New(allocator, vB, + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(arrayTypeName)); + GStrIdx usedTypeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(bc::BCUtil::kInt); + vC.regType = allocator.GetMemPool()->New(allocator, vC, usedTypeNameIdx); + } +}; + +TEST_F(DexOpTest, kDexOpAputBoolean) { + std::unique_ptr dexOp = std::make_unique(allocator, 0, bc::kDexOpAputBoolean); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVC(2); + dexOp->SetType(bc::kDexOpAputBoolean); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + EXPECT_EQ(mirStmts.size(), 1); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expect1 = std::string("iassign <* u1> 0 ("); + std::string expect2 = std::string("array 1 ptr <* <[] u1>> (dread ref %Reg1_R"); + std::string expect3 = std::string(", dread i32 %Reg2_I), "); + std::string expect4 = std::string(" dread u32 %Reg0_Z)"); + EXPECT_EQ(dumpStr.find(expect1) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(expect2) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(expect3) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(expect4) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(DexOpTest, DexOpAputObject) { + std::unique_ptr dexOp = std::make_unique(allocator, 0, bc::kDexOpAputObject); + dexOp->SetVA(0); + dexOp->SetVB(1); + dexOp->SetVC(2); + dexOp->SetType(bc::kDexOpAputObject); + std::list stmts = dexOp->EmitToFEIRStmts(); + std::list mirStmts = stmts.front()->GenMIRStmts(mirBuilder); + EXPECT_EQ(mirStmts.size(), 1); + RedirectCout(); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expect1 = std::string("iassign <* <* <$Ljava_2Flang_2FObject_3B>>> 0 ("); + std::string expect2 = std::string("array 1 ptr <* <[] <* <$Ljava_2Flang_2FObject_3B>>>> (dread ref %Reg1_R"); + std::string expect3 = std::string(", dread i32 %Reg2_I), "); + std::string expect4 = std::string(" dread ref %Reg0_R"); + EXPECT_EQ(dumpStr.find(expect1) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(expect2) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(expect3) != std::string::npos, true); + EXPECT_EQ(dumpStr.find(expect4) != std::string::npos, true); + RestoreCout(); +} +} // bc +} // namespace maple diff --git a/src/hir2mpl/test/bytecode_input/dex/dex_reader_test.cpp b/src/hir2mpl/test/bytecode_input/dex/dex_reader_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..abdcf1bfe3f0fb30488f3a6425282798b5df6095 --- /dev/null +++ b/src/hir2mpl/test/bytecode_input/dex/dex_reader_test.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "dex_reader.h" +#include "types_def.h" + +namespace maple { +namespace bc { +class DexReaderTest : public testing::Test { + public: + DexReaderTest() : reader(std::make_unique(0, "./hir2mplUT")) {} + + ~DexReaderTest() = default; + + protected: + std::unique_ptr reader; +}; + +TEST_F(DexReaderTest, GetRealInterger) { + uint32 value0 = 0x12345678; + uint32 value1 = 0x78563412; + reader->SetEndianTag(true); // big endian + EXPECT_EQ(reader->GetRealInteger(value0), value1); + + reader->SetEndianTag(false); // little endian + EXPECT_EQ(reader->GetRealInteger(value0), value0); +} +} // namespace jbc +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/base64_test.cpp b/src/hir2mpl/test/common/base64_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ae122844a2d072bf27148d295b1ba8f238e862f1 --- /dev/null +++ b/src/hir2mpl/test/common/base64_test.cpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "base64.h" + +namespace maple { +TEST(Base64, Encode) { + uint8 buf[] = { 'A', 'B', 'C', 'D' }; + std::string base64Str = Base64::Encode(buf, 4); + EXPECT_EQ(base64Str, "QUJDRA=="); +} + +TEST(Base64, Decode) { + std::string base64Str = "QUJDRA=="; + size_t length = 0; + uint8 *buf = Base64::Decode(base64Str, length); + ASSERT_EQ(length, 4); + EXPECT_EQ(buf[0], 'A'); + EXPECT_EQ(buf[1], 'B'); + EXPECT_EQ(buf[2], 'C'); + EXPECT_EQ(buf[3], 'D'); +} + +TEST(Base64, Decode_Boundary1) { + std::string base64Str = ""; + size_t length = 0; + (void)Base64::Decode(base64Str, length); + ASSERT_EQ(length, 0); +} + +TEST(Base64, Decode_Boundary2) { + std::string base64Str = "QUI="; + size_t length = 0; + uint8 *buf = Base64::Decode(base64Str, length); + ASSERT_EQ(length, 2); + EXPECT_EQ(buf[0], 'A'); + EXPECT_EQ(buf[1], 'B'); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/basic_io_test.cpp b/src/hir2mpl/test/common/basic_io_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..356825fc75ca786983402d160e2f9600be17d4ca --- /dev/null +++ b/src/hir2mpl/test/common/basic_io_test.cpp @@ -0,0 +1,314 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "basic_io.h" +#include "base64.h" + +namespace maple { +TEST(BasicIOMapFile, GenFileInMemory) { + uint8 buf[] = { 0x00, 0x01, 0x02, 0x03 }; + std::unique_ptr mapFile = BasicIOMapFile::GenFileInMemory("test", buf, sizeof(buf)); + EXPECT_NE(mapFile.get(), nullptr); + EXPECT_EQ(mapFile->GetLength(), 4); +} + +class BasicIOReadTest : public testing::Test { + public: + BasicIOReadTest() { + testFile1 = BasicIOMapFile::GenFileInMemory("test", buf1, sizeof(buf1)); + testFile2 = BasicIOMapFile::GenFileInMemory("test", buf2, sizeof(buf2)); + testFile3 = BasicIOMapFile::GenFileInMemory("test", buf3, sizeof(buf3)); + testFile4 = BasicIOMapFile::GenFileInMemory("test", buf4, sizeof(buf4)); + testFile5 = BasicIOMapFile::GenFileInMemory("test", buf5, sizeof(buf5)); + } + + ~BasicIOReadTest() = default; + + BasicIOMapFile &GetTestFile1() { + CHECK_NULL_FATAL(testFile1.get()); + return *testFile1; + } + + BasicIOMapFile &GetTestFile2() { + CHECK_NULL_FATAL(testFile2.get()); + return *testFile2; + } + + BasicIOMapFile &GetTestFile3() { + CHECK_NULL_FATAL(testFile3.get()); + return *testFile3; + } + + BasicIOMapFile &GetTestFile4() { + CHECK_NULL_FATAL(testFile4.get()); + return *testFile4; + } + + BasicIOMapFile &GetTestFile5() { + CHECK_NULL_FATAL(testFile5.get()); + return *testFile5; + } + + private: + std::unique_ptr testFile1; + std::unique_ptr testFile2; + std::unique_ptr testFile3; + std::unique_ptr testFile4; + std::unique_ptr testFile5; + uint8 buf1[16] = {0x01, 0xFF, 0x02, 0xFE, 0x03, 0xFD, 0x04, 0xFC, 0x05, 0xFB, 0x06, 0xFA, 0x07, 0xF8, 0x08, 0xF7}; + uint8 buf2[12] = {0x3F, 0x80, 0x00, 0x00, 0xBF, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + uint8 buf3[12] = {0x00, 0x00, 0x80, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xF0, 0xBF}; + uint8 buf4[4] = {'A', 'B', 'C', 'D'}; + uint8 buf5[1] = {0x00}; +}; + +TEST_F(BasicIOReadTest, ReadUInt8) { + BasicIORead ioBigEndian(GetTestFile1(), true); + BasicIORead ioLittleEndian(GetTestFile1(), false); + bool success = false; + EXPECT_EQ(ioBigEndian.ReadUInt8(), 0x01); + EXPECT_EQ(ioLittleEndian.ReadUInt8(), 0x01); + EXPECT_EQ(ioBigEndian.ReadUInt8(success), 0xFF); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadUInt8(success), 0xFF); + EXPECT_EQ(success, true); +} + +TEST_F(BasicIOReadTest, ReadInt8) { + BasicIORead ioBigEndian(GetTestFile1(), true); + BasicIORead ioLittleEndian(GetTestFile1(), false); + bool success = false; + EXPECT_EQ(ioBigEndian.ReadInt8(), static_cast(0x01)); + EXPECT_EQ(ioLittleEndian.ReadInt8(), static_cast(0x01)); + EXPECT_EQ(ioBigEndian.ReadInt8(success), static_cast(0xFF)); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadInt8(success), static_cast(0xFF)); + EXPECT_EQ(success, true); +} + +TEST_F(BasicIOReadTest, ReadUInt16) { + BasicIORead ioBigEndian(GetTestFile1(), true); + BasicIORead ioLittleEndian(GetTestFile1(), false); + bool success = false; + EXPECT_EQ(ioBigEndian.ReadUInt16(), 0x01FF); + EXPECT_EQ(ioLittleEndian.ReadUInt16(), 0xFF01); + EXPECT_EQ(ioBigEndian.ReadUInt16(success), 0x02FE); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadUInt16(success), 0xFE02); + EXPECT_EQ(success, true); +} + +TEST_F(BasicIOReadTest, ReadInt16) { + BasicIORead ioBigEndian(GetTestFile1(), true); + BasicIORead ioLittleEndian(GetTestFile1(), false); + bool success = false; + EXPECT_EQ(ioBigEndian.ReadInt16(), static_cast(0x01FF)); + EXPECT_EQ(ioLittleEndian.ReadInt16(), static_cast(0xFF01)); + EXPECT_EQ(ioBigEndian.ReadInt16(success), static_cast(0x02FE)); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadInt16(success), static_cast(0xFE02)); + EXPECT_EQ(success, true); +} + +TEST_F(BasicIOReadTest, ReadUInt32) { + BasicIORead ioBigEndian(GetTestFile1(), true); + BasicIORead ioLittleEndian(GetTestFile1(), false); + bool success = false; + EXPECT_EQ(ioBigEndian.ReadUInt32(), 0x01FF02FE); + EXPECT_EQ(ioLittleEndian.ReadUInt32(), 0xFE02FF01); + EXPECT_EQ(ioBigEndian.ReadUInt32(success), 0x03FD04FC); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadUInt32(success), 0xFC04FD03); + EXPECT_EQ(success, true); +} + +TEST_F(BasicIOReadTest, ReadInt32) { + BasicIORead ioBigEndian(GetTestFile1(), true); + BasicIORead ioLittleEndian(GetTestFile1(), false); + bool success = false; + EXPECT_EQ(ioBigEndian.ReadInt32(), static_cast(0x01FF02FE)); + EXPECT_EQ(ioLittleEndian.ReadInt32(), static_cast(0xFE02FF01)); + EXPECT_EQ(ioBigEndian.ReadInt32(success), static_cast(0x03FD04FC)); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadInt32(success), static_cast(0xFC04FD03)); + EXPECT_EQ(success, true); +} + +TEST_F(BasicIOReadTest, ReadUInt64) { + BasicIORead ioBigEndian(GetTestFile1(), true); + BasicIORead ioLittleEndian(GetTestFile1(), false); + bool success = false; + EXPECT_EQ(ioBigEndian.ReadUInt64(), 0x01FF02FE03FD04FC); + EXPECT_EQ(ioLittleEndian.ReadUInt64(), 0xFC04FD03FE02FF01); + EXPECT_EQ(ioBigEndian.ReadUInt64(success), 0x05FB06FA07F808F7); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadUInt64(success), 0xF708F807FA06FB05); + EXPECT_EQ(success, true); +} + +TEST_F(BasicIOReadTest, ReadInt64) { + BasicIORead ioBigEndian(GetTestFile1(), true); + BasicIORead ioLittleEndian(GetTestFile1(), false); + bool success = false; + EXPECT_EQ(ioBigEndian.ReadInt64(), static_cast(0x01FF02FE03FD04FC)); + EXPECT_EQ(ioLittleEndian.ReadInt64(), static_cast(0xFC04FD03FE02FF01)); + EXPECT_EQ(ioBigEndian.ReadInt64(success), static_cast(0x05FB06FA07F808F7)); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadInt64(success), static_cast(0xF708F807FA06FB05)); + EXPECT_EQ(success, true); +} + +TEST_F(BasicIOReadTest, ReadFloat) { + BasicIORead ioBigEndian(GetTestFile2(), true); + BasicIORead ioLittleEndian(GetTestFile3(), false); + EXPECT_EQ(ioBigEndian.ReadFloat(), 1.0f); + EXPECT_EQ(ioLittleEndian.ReadFloat(), 1.0f); + EXPECT_EQ(ioBigEndian.ReadDouble(), -1.0); + EXPECT_EQ(ioLittleEndian.ReadDouble(), -1.0); +} + +TEST_F(BasicIOReadTest, ReadFloat2) { + BasicIORead ioBigEndian(GetTestFile2(), true); + BasicIORead ioLittleEndian(GetTestFile3(), false); + bool success = false; + EXPECT_EQ(ioBigEndian.ReadFloat(success), 1.0f); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadFloat(success), 1.0f); + EXPECT_EQ(success, true); + EXPECT_EQ(ioBigEndian.ReadDouble(success), -1.0); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadDouble(success), -1.0); + EXPECT_EQ(success, true); +} + +TEST_F(BasicIOReadTest, ReadBufferUInt8) { + BasicIORead ioBigEndian(GetTestFile4(), true); + BasicIORead ioLittleEndian(GetTestFile4(), false); + bool success = false; + uint8 buf[2]; + // big endian + ioBigEndian.ReadBufferUInt8(buf, 2); + EXPECT_EQ(buf[0], static_cast('A')); + EXPECT_EQ(buf[1], static_cast('B')); + ioBigEndian.ReadBufferUInt8(buf, 2, success); + EXPECT_EQ(success, true); + EXPECT_EQ(buf[0], static_cast('C')); + EXPECT_EQ(buf[1], static_cast('D')); + ioBigEndian.ReadBufferUInt8(buf, 2, success); + EXPECT_EQ(success, false); + // little endian + ioLittleEndian.ReadBufferUInt8(buf, 2); + EXPECT_EQ(buf[0], static_cast('A')); + EXPECT_EQ(buf[1], static_cast('B')); + ioLittleEndian.ReadBufferUInt8(buf, 2, success); + EXPECT_EQ(success, true); + EXPECT_EQ(buf[0], static_cast('C')); + EXPECT_EQ(buf[1], static_cast('D')); + ioLittleEndian.ReadBufferUInt8(buf, 2, success); + EXPECT_EQ(success, false); +} + +TEST_F(BasicIOReadTest, ReadBufferInt8) { + BasicIORead ioBigEndian(GetTestFile4(), true); + BasicIORead ioLittleEndian(GetTestFile4(), false); + bool success = false; + int8 buf[2]; + // big endian + ioBigEndian.ReadBufferInt8(buf, 2); + EXPECT_EQ(buf[0], static_cast('A')); + EXPECT_EQ(buf[1], static_cast('B')); + ioBigEndian.ReadBufferInt8(buf, 2, success); + EXPECT_EQ(success, true); + EXPECT_EQ(buf[0], static_cast('C')); + EXPECT_EQ(buf[1], static_cast('D')); + ioBigEndian.ReadBufferInt8(buf, 2, success); + EXPECT_EQ(success, false); + // little endian + ioLittleEndian.ReadBufferInt8(buf, 2); + EXPECT_EQ(buf[0], static_cast('A')); + EXPECT_EQ(buf[1], static_cast('B')); + ioLittleEndian.ReadBufferInt8(buf, 2, success); + EXPECT_EQ(success, true); + EXPECT_EQ(buf[0], static_cast('C')); + EXPECT_EQ(buf[1], static_cast('D')); + ioLittleEndian.ReadBufferInt8(buf, 2, success); + EXPECT_EQ(success, false); +} + +TEST_F(BasicIOReadTest, ReadBufferChar) { + BasicIORead ioBigEndian(GetTestFile4(), true); + BasicIORead ioLittleEndian(GetTestFile4(), false); + bool success = false; + char buf[2]; + // big endian + ioBigEndian.ReadBufferChar(buf, 2); + EXPECT_EQ(buf[0], 'A'); + EXPECT_EQ(buf[1], 'B'); + ioBigEndian.ReadBufferChar(buf, 2, success); + EXPECT_EQ(success, true); + EXPECT_EQ(buf[0], 'C'); + EXPECT_EQ(buf[1], 'D'); + ioBigEndian.ReadBufferChar(buf, 2, success); + EXPECT_EQ(success, false); + // little endian + ioLittleEndian.ReadBufferChar(buf, 2); + EXPECT_EQ(buf[0], 'A'); + EXPECT_EQ(buf[1], 'B'); + ioLittleEndian.ReadBufferChar(buf, 2, success); + EXPECT_EQ(success, true); + EXPECT_EQ(buf[0], 'C'); + EXPECT_EQ(buf[1], 'D'); + ioLittleEndian.ReadBufferChar(buf, 2, success); + EXPECT_EQ(success, false); +} + +TEST_F(BasicIOReadTest, ReadString) { + BasicIORead ioBigEndian(GetTestFile4(), true); + BasicIORead ioLittleEndian(GetTestFile4(), false); + bool success = false; + // big endian + EXPECT_EQ(ioBigEndian.ReadString(2), "AB"); + EXPECT_EQ(ioBigEndian.ReadString(2, success), "CD"); + EXPECT_EQ(success, true); + EXPECT_EQ(ioBigEndian.ReadString(2, success), ""); + EXPECT_EQ(success, false); + // little endian + EXPECT_EQ(ioLittleEndian.ReadString(2), "AB"); + EXPECT_EQ(ioLittleEndian.ReadString(2, success), "CD"); + EXPECT_EQ(success, true); + EXPECT_EQ(ioLittleEndian.ReadString(2, success), ""); + EXPECT_EQ(success, false); +} + +TEST_F(BasicIOReadTest, Read_BoundaryCheck) { + BasicIORead ioBigEndian(GetTestFile5(), true); + ioBigEndian.ReadUInt8(); + bool success = false; + ioBigEndian.ReadUInt8(success); + EXPECT_EQ(success, false); + ioBigEndian.ReadUInt16(success); + EXPECT_EQ(success, false); + ioBigEndian.ReadUInt32(success); + EXPECT_EQ(success, false); + ioBigEndian.ReadUInt64(success); + EXPECT_EQ(success, false); + ioBigEndian.ReadFloat(success); + EXPECT_EQ(success, false); + ioBigEndian.ReadDouble(success); + EXPECT_EQ(success, false); +} + +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/fe_algorithm_test.cpp b/src/hir2mpl/test/common/fe_algorithm_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..515814b08b4971b95ba79b5baad531b5426e1680 --- /dev/null +++ b/src/hir2mpl/test/common/fe_algorithm_test.cpp @@ -0,0 +1,226 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include +#include "fe_options.h" +#include "fe_algorithm.h" +#include "mpl_logging.h" + +namespace maple { +class TestNode { + public: + TestNode(int argID, std::set argItems) : id(argID) { + for (int item : argItems) { + items.insert(item); + } + mergeEnd = items.size() > 0; + } + ~TestNode() = default; + + bool Merge(const TestNode &src) { + for (int item : src.GetItems()) { + items.insert(item); + } + return true; + } + + bool MergeEnd() const { + return mergeEnd; + } + + bool CheckExist(int item) const { + return items.find(item) != items.end(); + } + + bool CheckExist(std::set argItems) const { + for (int item : argItems) { + if (items.find(item) == items.end()) { + return false; + } + } + return true; + } + + const std::set &GetItems() const { + return items; + } + + int GetID() const { + return id; + } + + private: + int id; + bool mergeEnd; + std::set items; +}; // class TestNode + +class CorrelativeMergeTest { + public: + CorrelativeMergeTest() = default; + ~CorrelativeMergeTest() = default; +}; + +// Test1 +TEST(CorrelativeMergeTest, Test1) { + std::map> correlation; + std::unique_ptr node0 = std::make_unique(0, std::set({ 0 })); + std::unique_ptr node1 = std::make_unique(1, std::set()); + correlation[node1.get()].insert(node0.get()); + CorrelativeMerge mergeHelper(correlation, &TestNode::Merge, &TestNode::MergeEnd); + mergeHelper.ProcessAll(); + EXPECT_EQ(node1->CheckExist(0), true); +} + +// Test2 +// Graph: doc/images/ut_cases/CorrelativeMerge/Test2.dot +// Image: doc/images/ut_cases/CorrelativeMerge/Test2.png +TEST(CorrelativeMergeTest, Test2) { + std::map> correlation; + std::unique_ptr node0 = std::make_unique(0, std::set({ 0 })); + std::unique_ptr node1 = std::make_unique(1, std::set()); + std::unique_ptr node2 = std::make_unique(2, std::set()); + correlation[node1.get()].insert(node0.get()); + correlation[node1.get()].insert(node2.get()); + correlation[node2.get()].insert(node1.get()); + CorrelativeMerge mergeHelper(correlation, &TestNode::Merge, &TestNode::MergeEnd); + mergeHelper.ProcessAll(); + EXPECT_EQ(node1->CheckExist(0), true); + EXPECT_EQ(node2->CheckExist(0), true); + INFO(kLncInfo, "Visit Count = %d", mergeHelper.GetVisitCount()); +} + +// Test3 +// Graph: doc/images/ut_cases/CorrelativeMerge/Test3.dot +// Image: doc/images/ut_cases/CorrelativeMerge/Test3.png +TEST(CorrelativeMergeTest, Test3) { + std::map> correlation; + std::unique_ptr node0 = std::make_unique(0, std::set({ 0 })); + std::unique_ptr node1 = std::make_unique(1, std::set()); + std::unique_ptr node2 = std::make_unique(2, std::set({ 2 })); + correlation[node1.get()].insert(node0.get()); + correlation[node1.get()].insert(node2.get()); + correlation[node2.get()].insert(node1.get()); + CorrelativeMerge mergeHelper(correlation, &TestNode::Merge, &TestNode::MergeEnd); + mergeHelper.ProcessAll(); + EXPECT_EQ(node1->CheckExist(std::set({ 0, 2 })), true); + INFO(kLncInfo, "Visit Count = %d", mergeHelper.GetVisitCount()); +} + +// Test4 +// Graph: doc/images/ut_cases/CorrelativeMerge/Test4.dot +// Image: doc/images/ut_cases/CorrelativeMerge/Test4.png +TEST(CorrelativeMergeTest, Test4) { + std::map> correlation; + std::unique_ptr node0 = std::make_unique(0, std::set({ 0 })); + std::unique_ptr node1 = std::make_unique(1, std::set()); + std::unique_ptr node2 = std::make_unique(2, std::set()); + std::unique_ptr node3 = std::make_unique(3, std::set()); + std::unique_ptr node4 = std::make_unique(4, std::set()); + std::unique_ptr node5 = std::make_unique(5, std::set()); + correlation[node1.get()].insert(node0.get()); + correlation[node2.get()].insert(node1.get()); + correlation[node2.get()].insert(node5.get()); + correlation[node3.get()].insert(node2.get()); + correlation[node4.get()].insert(node3.get()); + correlation[node5.get()].insert(node4.get()); + CorrelativeMerge mergeHelper(correlation, &TestNode::Merge, &TestNode::MergeEnd); + mergeHelper.ProcessAll(); + EXPECT_EQ(node1->CheckExist(0), true); + EXPECT_EQ(node2->CheckExist(0), true); + EXPECT_EQ(node3->CheckExist(0), true); + EXPECT_EQ(node4->CheckExist(0), true); + EXPECT_EQ(node5->CheckExist(0), true); + INFO(kLncInfo, "Visit Count = %d", mergeHelper.GetVisitCount()); +} + +// Test5 +// Graph: doc/images/ut_cases/CorrelativeMerge/Test5.dot +// Image: doc/images/ut_cases/CorrelativeMerge/Test5.png +TEST(CorrelativeMergeTest, Test5) { + std::map> correlation; + std::unique_ptr node0 = std::make_unique(0, std::set({ 0 })); + std::unique_ptr node1 = std::make_unique(1, std::set()); + std::unique_ptr node2 = std::make_unique(2, std::set()); + std::unique_ptr node3 = std::make_unique(3, std::set()); + std::unique_ptr node4 = std::make_unique(4, std::set({ 4 })); + std::unique_ptr node5 = std::make_unique(5, std::set()); + correlation[node1.get()].insert(node0.get()); + correlation[node2.get()].insert(node1.get()); + correlation[node2.get()].insert(node5.get()); + correlation[node3.get()].insert(node2.get()); + correlation[node4.get()].insert(node3.get()); + correlation[node5.get()].insert(node4.get()); + CorrelativeMerge mergeHelper(correlation, &TestNode::Merge, &TestNode::MergeEnd); + mergeHelper.ProcessAll(); + EXPECT_EQ(node1->CheckExist(0), true); + EXPECT_EQ(node2->CheckExist(std::set({ 0, 4 })), true); + EXPECT_EQ(node3->CheckExist(std::set({ 0, 4 })), true); + EXPECT_EQ(node5->CheckExist(4), true); + INFO(kLncInfo, "Visit Count = %d", mergeHelper.GetVisitCount()); +} + +// Test6 +// Graph: doc/images/ut_cases/CorrelativeMerge/Test6.dot +// Image: doc/images/ut_cases/CorrelativeMerge/Test6.png +TEST(CorrelativeMergeTest, Test6) { + std::map> correlation; + std::unique_ptr node0 = std::make_unique(0, std::set({ 0 })); + std::unique_ptr node1 = std::make_unique(1, std::set()); + std::unique_ptr node2 = std::make_unique(2, std::set({ 2 })); + std::unique_ptr node3 = std::make_unique(3, std::set()); + std::unique_ptr node4 = std::make_unique(4, std::set()); + correlation[node1.get()].insert(node0.get()); + correlation[node2.get()].insert(node0.get()); + correlation[node2.get()].insert(node4.get()); + correlation[node3.get()].insert(node1.get()); + correlation[node3.get()].insert(node2.get()); + correlation[node4.get()].insert(node3.get()); + CorrelativeMerge mergeHelper(correlation, &TestNode::Merge, &TestNode::MergeEnd); + mergeHelper.ProcessAll(); + EXPECT_EQ(node1->CheckExist(0), true); + EXPECT_EQ(node3->CheckExist(std::set({ 0, 2 })), true); + EXPECT_EQ(node4->CheckExist(std::set({ 0, 2 })), true); + INFO(kLncInfo, "Visit Count = %d", mergeHelper.GetVisitCount()); +} + +// Test7 +// Graph: doc/images/ut_cases/CorrelativeMerge/Test7.dot +// Image: doc/images/ut_cases/CorrelativeMerge/Test7.png +TEST(CorrelativeMergeTest, Test7) { + std::map> correlation; + std::unique_ptr node0 = std::make_unique(0, std::set({ 0 })); + std::unique_ptr node1 = std::make_unique(1, std::set()); + std::unique_ptr node2 = std::make_unique(2, std::set()); + std::unique_ptr node3 = std::make_unique(3, std::set()); + std::unique_ptr node4 = std::make_unique(4, std::set()); + std::unique_ptr node5 = std::make_unique(5, std::set()); + correlation[node1.get()].insert(node0.get()); + correlation[node1.get()].insert(node5.get()); + correlation[node2.get()].insert(node1.get()); + correlation[node3.get()].insert(node2.get()); + correlation[node4.get()].insert(node2.get()); + correlation[node5.get()].insert(node3.get()); + correlation[node5.get()].insert(node4.get()); + CorrelativeMerge mergeHelper(correlation, &TestNode::Merge, &TestNode::MergeEnd); + mergeHelper.LoopCheckAll(); + mergeHelper.ProcessOne(*node3, true); + EXPECT_EQ(node3->CheckExist(std::set({ 0 })), true); + INFO(kLncInfo, "Visit Count = %d", mergeHelper.GetVisitCount()); +} +} // namespace MAPLE \ No newline at end of file diff --git a/src/hir2mpl/test/common/fe_file_ops_test.cpp b/src/hir2mpl/test/common/fe_file_ops_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c2d94d6f8d93b6606dfb5af76d64859b04f41d7e --- /dev/null +++ b/src/hir2mpl/test/common/fe_file_ops_test.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "fe_file_ops.h" + +namespace maple { +TEST(FEFileOps, GetFilePath) { + EXPECT_EQ(FEFileOps::GetFilePath("name"), ""); + EXPECT_EQ(FEFileOps::GetFilePath("/name"), "/"); + EXPECT_EQ(FEFileOps::GetFilePath("path/"), "path/"); + EXPECT_EQ(FEFileOps::GetFilePath("path/name"), "path/"); + EXPECT_EQ(FEFileOps::GetFilePath("/path/name"), "/path/"); + EXPECT_EQ(FEFileOps::GetFilePath("../name"), "../"); +} + +TEST(FEFileOps, GetFileNameWithExt) { + EXPECT_EQ(FEFileOps::GetFileNameWithExt("name"), "name"); + EXPECT_EQ(FEFileOps::GetFileNameWithExt("/name"), "name"); + EXPECT_EQ(FEFileOps::GetFileNameWithExt("path/"), ""); + EXPECT_EQ(FEFileOps::GetFileNameWithExt("path/name"), "name"); + EXPECT_EQ(FEFileOps::GetFileNameWithExt("/path/name"), "name"); + EXPECT_EQ(FEFileOps::GetFileNameWithExt("../name"), "name"); +} + +TEST(FEFileOps, GetFileName) { + EXPECT_EQ(FEFileOps::GetFileName("name.ext"), "name"); + EXPECT_EQ(FEFileOps::GetFileName("name"), "name"); + EXPECT_EQ(FEFileOps::GetFileName(".ext"), ""); + EXPECT_EQ(FEFileOps::GetFileName("/name.ext"), "name"); + EXPECT_EQ(FEFileOps::GetFileName("path/name.ext"), "name"); +} + +TEST(FEFileOps, GetFileExtName) { + EXPECT_EQ(FEFileOps::GetFileExtName("name.ext"), "ext"); + EXPECT_EQ(FEFileOps::GetFileExtName("name"), ""); + EXPECT_EQ(FEFileOps::GetFileExtName(".ext"), "ext"); + EXPECT_EQ(FEFileOps::GetFileExtName("/name.ext"), "ext"); + EXPECT_EQ(FEFileOps::GetFileExtName("path/name.ext"), "ext"); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/fe_file_type_test.cpp b/src/hir2mpl/test/common/fe_file_type_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e31774815165a9131f80354db786cbab8d89213b --- /dev/null +++ b/src/hir2mpl/test/common/fe_file_type_test.cpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "fe_file_type.h" + +namespace maple { +TEST(FEFileType, GetFileTypeByExtName) { + EXPECT_EQ(FEFileType::GetInstance().GetFileTypeByExtName("jar"), FEFileType::FileType::kJar); + EXPECT_EQ(FEFileType::GetInstance().GetFileTypeByExtName("class"), FEFileType::FileType::kClass); + EXPECT_EQ(FEFileType::GetInstance().GetFileTypeByExtName("dex"), FEFileType::FileType::kDex); + EXPECT_EQ(FEFileType::GetInstance().GetFileTypeByExtName(""), FEFileType::FileType::kUnknownType); + EXPECT_EQ(FEFileType::GetInstance().GetFileTypeByExtName("txt"), FEFileType::FileType::kUnknownType); +} + +TEST(FEFileType, RegisterExtName_Warn) { + FEFileType::GetInstance().RegisterExtName(FEFileType::FileType::kJar, ""); + FEFileType::GetInstance().RegisterExtName(FEFileType::FileType::kUnknownType, "txt"); +} + +TEST(FEFileType, RegisterMagicNumber_Warn) { + FEFileType::GetInstance().RegisterMagicNumber(FEFileType::FileType::kJar, 0); + FEFileType::GetInstance().RegisterMagicNumber(FEFileType::FileType::kUnknownType, 0x12345678); +} + +TEST(FEFileType, GetExtName) { + EXPECT_EQ(FEFileType::GetExtName("test.jar"), "jar"); + EXPECT_EQ(FEFileType::GetExtName("../test.jar"), "jar"); + EXPECT_EQ(FEFileType::GetExtName("../path/test.jar"), "jar"); + EXPECT_EQ(FEFileType::GetExtName("path/test.jar"), "jar"); + EXPECT_EQ(FEFileType::GetExtName("path/test"), ""); +} +} diff --git a/src/hir2mpl/test/common/fe_function_process_schedular_test.cpp b/src/hir2mpl/test/common/fe_function_process_schedular_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..07a39e7b73b86acb60a6764a1b2355526dcde32c --- /dev/null +++ b/src/hir2mpl/test/common/fe_function_process_schedular_test.cpp @@ -0,0 +1,256 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "hir2mpl_compiler_component.h" +#include +#include +#include + +namespace maple { +class FEFunctionChild : public FEFunction { + public: + FEFunctionChild(std::list &argResult, uint32 argIdx) + : FEFunction(nullptr), + result(argResult), + idx(argIdx) {} + virtual ~FEFunctionChild() = default; + void Process() override { + usleep(rand() % 1000); + } + void EmitToMIR() override { + result.push_back(idx); + } + bool PreProcessTypeNameIdx() override { + return true; + } + void GenerateGeneralStmt(bool &success) override {} + void GenerateGeneralStmtFailCallBack() override {} + void GenerateGeneralDebugInfo() override {} + bool VerifyGeneral() override { + return true; + } + void VerifyGeneralFailCallBack() override {} + void EmitToFEIRStmt() override {} + void GenerateArgVarList() override {} + bool HasThis() override { + return false; + } + + private: + std::list &result; + uint32 idx; +}; // class FEFunctionChild + +TEST(FEFunctionProcessSchedular, RunSingle) { + std::list results; + std::unique_ptr function = std::make_unique(results, 0); + std::unique_ptr task = std::make_unique(function.get()); + task->Run(); + task->Finish(); + EXPECT_EQ(results.size(), 1); +} + +TEST(FEFunctionProcessSchedular, RunDirect) { + std::list> functions; + std::list> tasks; + std::list results; + FEFunctionProcessSchedular schedular("test schedular"); + for (uint32 i = 0; i < 1000; i++) { + functions.push_back(std::make_unique(results, i)); + tasks.push_back(std::make_unique(functions.back().get())); + } + for (const std::unique_ptr &task : tasks) { + task->Run(); + task->Finish(); + } + for (uint32 i = 0; i < 1000; i++) { + EXPECT_EQ(results.front(), i); + results.pop_front(); + } +} + +TEST(FEFunctionProcessSchedular, RunSerial) { + std::list> functions; + std::list> tasks; + std::list results; + FEFunctionProcessSchedular schedular("test schedular"); + for (uint32 i = 0; i < 1000; i++) { + functions.push_back(std::make_unique(results, i)); + tasks.push_back(std::make_unique(functions.back().get())); + } + for (const std::unique_ptr &task : tasks) { + schedular.AddTask(task.get()); + } + schedular.RunTask(1, true); + for (uint32 i = 0; i < 1000; i++) { + EXPECT_EQ(results.front(), i); + results.pop_front(); + } +} + +TEST(FEFunctionProcessSchedular, RunParallel2) { + std::list> functions; + std::list> tasks; + std::list results; + FEFunctionProcessSchedular schedular("test schedular"); + for (uint32 i = 0; i < 1000; i++) { + functions.push_back(std::make_unique(results, i)); + tasks.push_back(std::make_unique(functions.back().get())); + } + for (const std::unique_ptr &task : tasks) { + schedular.AddTask(task.get()); + } + schedular.RunTask(2, true); + for (uint32 i = 0; i < 1000; i++) { + EXPECT_EQ(results.front(), i); + results.pop_front(); + } +} + +TEST(FEFunctionProcessSchedular, RunParallel4) { + std::list> functions; + std::list> tasks; + std::list results; + FEFunctionProcessSchedular schedular("test schedular"); + for (uint32 i = 0; i < 1000; i++) { + functions.push_back(std::make_unique(results, i)); + tasks.push_back(std::make_unique(functions.back().get())); + } + for (const std::unique_ptr &task : tasks) { + schedular.AddTask(task.get()); + } + schedular.RunTask(4, true); + for (uint32 i = 0; i < 1000; i++) { + EXPECT_EQ(results.front(), i); + results.pop_front(); + } +} + +TEST(FEFunctionProcessSchedular, RunParallel8) { + std::list> functions; + std::list> tasks; + std::list results; + FEFunctionProcessSchedular schedular("test schedular"); + for (uint32 i = 0; i < 1000; i++) { + functions.push_back(std::make_unique(results, i)); + tasks.push_back(std::make_unique(functions.back().get())); + } + for (const std::unique_ptr &task : tasks) { + schedular.AddTask(task.get()); + } + schedular.RunTask(8, true); + for (uint32 i = 0; i < 1000; i++) { + EXPECT_EQ(results.front(), i); + results.pop_front(); + } +} + +TEST(FEFunctionProcessSchedular, RunParallel16) { + std::list> functions; + std::list> tasks; + std::list results; + FEFunctionProcessSchedular schedular("test schedular"); + for (uint32 i = 0; i < 1000; i++) { + functions.push_back(std::make_unique(results, i)); + tasks.push_back(std::make_unique(functions.back().get())); + } + for (const std::unique_ptr &task : tasks) { + schedular.AddTask(task.get()); + } + schedular.RunTask(16, true); + for (uint32 i = 0; i < 1000; i++) { + EXPECT_EQ(results.front(), i); + results.pop_front(); + } +} + +TEST(FEFunctionProcessSchedular, RunParallel32) { + std::list> functions; + std::list> tasks; + std::list results; + FEFunctionProcessSchedular schedular("test schedular"); + for (uint32 i = 0; i < 1000; i++) { + functions.push_back(std::make_unique(results, i)); + tasks.push_back(std::make_unique(functions.back().get())); + } + for (const std::unique_ptr &task : tasks) { + schedular.AddTask(task.get()); + } + schedular.RunTask(32, true); + for (uint32 i = 0; i < 1000; i++) { + EXPECT_EQ(results.front(), i); + results.pop_front(); + } +} + +TEST(FEFunctionProcessSchedular, RunParallel48) { + std::list> functions; + std::list> tasks; + std::list results; + FEFunctionProcessSchedular schedular("test schedular"); + for (uint32 i = 0; i < 1000; i++) { + functions.push_back(std::make_unique(results, i)); + tasks.push_back(std::make_unique(functions.back().get())); + } + for (const std::unique_ptr &task : tasks) { + schedular.AddTask(task.get()); + } + schedular.RunTask(48, true); + for (uint32 i = 0; i < 1000; i++) { + EXPECT_EQ(results.front(), i); + results.pop_front(); + } +} + +TEST(FEFunctionProcessSchedular, RunParallel64) { + std::list> functions; + std::list> tasks; + std::list results; + FEFunctionProcessSchedular schedular("test schedular"); + for (uint32 i = 0; i < 1000; i++) { + functions.push_back(std::make_unique(results, i)); + tasks.push_back(std::make_unique(functions.back().get())); + } + for (const std::unique_ptr &task : tasks) { + schedular.AddTask(task.get()); + } + schedular.RunTask(64, true); + for (uint32 i = 0; i < 1000; i++) { + EXPECT_EQ(results.front(), i); + results.pop_front(); + } +} + +TEST(FEFunctionProcessSchedular, RunParallel128) { + std::list> functions; + std::list> tasks; + std::list results; + FEFunctionProcessSchedular schedular("test schedular"); + schedular.Init(); + for (uint32 i = 0; i < 1000; i++) { + functions.push_back(std::make_unique(results, i)); + tasks.push_back(std::make_unique(functions.back().get())); + } + for (const std::unique_ptr &task : tasks) { + schedular.AddTask(task.get()); + } + schedular.RunTask(128, true); + for (uint32 i = 0; i < 1000; i++) { + EXPECT_EQ(results.front(), i); + results.pop_front(); + } +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/fe_struct_elem_info_test.cpp b/src/hir2mpl/test/common/fe_struct_elem_info_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eaa952d885dc06acf32bc11fa38da6007e868ec2 --- /dev/null +++ b/src/hir2mpl/test/common/fe_struct_elem_info_test.cpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "global_tables.h" +#include "namemangler.h" +#include "fe_struct_elem_info.h" +#include "fe_manager.h" +#include "hir2mpl_ut_environment.h" +#include "redirect_buffer.h" + +namespace maple { +class FEStructFieldInfoTest : public testing::Test, public RedirectBuffer { + public: + FEStructFieldInfoTest() + : mirBuilder(&HIR2MPLUTEnvironment::GetMIRModule()) {} + + virtual ~FEStructFieldInfoTest() = default; + + MIRBuilder mirBuilder; +}; + +TEST_F(FEStructFieldInfoTest, FEStructFieldInfo) { + StructElemNameIdx *structElemNameIdx = new StructElemNameIdx("Ljava/lang/Integer;", "MIN_VALUE", "I"); + FEStructFieldInfo info(mirBuilder.GetMirModule().GetMPAllocator(), *structElemNameIdx, kSrcLangJava, true); + std::string structName = GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx->klass); + std::string elemName = GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx->elem); + std::string signatureName = GlobalTables::GetStrTable().GetStringFromStrIdx(structElemNameIdx->type); + delete structElemNameIdx; + EXPECT_EQ(structName, namemangler::EncodeName("Ljava/lang/Integer;")); + EXPECT_EQ(elemName, namemangler::EncodeName("MIN_VALUE")); + EXPECT_EQ(signatureName, namemangler::EncodeName("I")); + EXPECT_EQ(info.fieldType->IsScalar(), true); +} + +TEST_F(FEStructFieldInfoTest, SearchStructFieldJava) { + StructElemNameIdx *structElemNameIdx = new StructElemNameIdx("Ljava/lang/Integer;", "MIN_VALUE", "I"); + FEStructFieldInfo info(mirBuilder.GetMirModule().GetMPAllocator(), *structElemNameIdx, kSrcLangJava, true); + MIRStructType *structType = + FEManager::GetTypeManager().GetStructTypeFromName(namemangler::EncodeName("Ljava/lang/Integer;")); + delete structElemNameIdx; + ASSERT_NE(structType, nullptr); + EXPECT_EQ(info.SearchStructFieldJava(*structType, mirBuilder, true), true); + EXPECT_EQ(info.SearchStructFieldJava(*structType, mirBuilder, false), false); +} +} // namespace maple diff --git a/src/hir2mpl/test/common/fe_type_hierarchy_test.cpp b/src/hir2mpl/test/common/fe_type_hierarchy_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4f5c017bfa13ab76fe837694518ecf68289787dc --- /dev/null +++ b/src/hir2mpl/test/common/fe_type_hierarchy_test.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "fe_type_hierarchy.h" +#include "global_tables.h" + +namespace maple { +TEST(FETypeHierarchy, IsParentOf) { + GStrIdx idxObject = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("Ljava_2Flang_2FObject_3B"); + GStrIdx idxString = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("Ljava_2Flang_2FString_3B"); + GStrIdx idxInteger = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("Ljava_2Flang_2FInteger_3B"); + EXPECT_EQ(FETypeHierarchy::GetInstance().IsParentOf(idxObject, idxString), true); + EXPECT_EQ(FETypeHierarchy::GetInstance().IsParentOf(idxObject, idxInteger), true); + EXPECT_EQ(FETypeHierarchy::GetInstance().IsParentOf(idxString, idxInteger), false); + EXPECT_EQ(FETypeHierarchy::GetInstance().IsParentOf(idxInteger, idxString), false); + EXPECT_EQ(FETypeHierarchy::GetInstance().IsParentOf(idxString, idxString), true); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/fe_type_manager_test.cpp b/src/hir2mpl/test/common/fe_type_manager_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4817a268531aa8744e0936e473097b36ce9f7ad0 --- /dev/null +++ b/src/hir2mpl/test/common/fe_type_manager_test.cpp @@ -0,0 +1,195 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "fe_manager.h" +#include "fe_type_manager.h" +#include "redirect_buffer.h" + +namespace maple { +class FETypeManagerTest : public testing::Test, public RedirectBuffer { + public: + FETypeManagerTest() = default; + ~FETypeManagerTest() = default; +}; + +TEST_F(FETypeManagerTest, GetClassOrInterfaceType) { + MIRStructType *structType = FEManager::GetTypeManager().GetClassOrInterfaceType("Ljava_2Flang_2FObject_3B"); + EXPECT_NE(structType, nullptr); + if (structType != nullptr) { + std::string mplName = structType->GetCompactMplTypeName(); + EXPECT_EQ(mplName, "Ljava_2Flang_2FObject_3B"); + } + MIRStructType *structTypeUnknown = FEManager::GetTypeManager().GetClassOrInterfaceType("LUnknown"); + EXPECT_EQ(structTypeUnknown, nullptr); +} + +TEST_F(FETypeManagerTest, GetClassOrInterfaceTypeFlag) { + FETypeFlag flag = FEManager::GetTypeManager().GetClassOrInterfaceTypeFlag("Ljava_2Flang_2FObject_3B"); + EXPECT_EQ(flag, FETypeFlag::kSrcMpltSys); + FETypeFlag flagUnknown = FEManager::GetTypeManager().GetClassOrInterfaceTypeFlag("LUnknown"); + EXPECT_EQ(flagUnknown, FETypeFlag::kDefault); +} + +TEST_F(FETypeManagerTest, CreateClassOrInterfaceType) { + MIRStructType *structType1 = FEManager::GetTypeManager().CreateClassOrInterfaceType("LNewClass", false, + FETypeFlag::kSrcInput); + EXPECT_NE(structType1, nullptr); + if (structType1 != nullptr) { + std::string mplName = structType1->GetCompactMplTypeName(); + EXPECT_EQ(mplName, "LNewClass"); + EXPECT_EQ(structType1->GetKind(), kTypeClassIncomplete); + } + MIRStructType *structType2 = FEManager::GetTypeManager().CreateClassOrInterfaceType("LNewInterface", true, + FETypeFlag::kSrcInput); + EXPECT_NE(structType2, nullptr); + if (structType2 != nullptr) { + std::string mplName = structType2->GetCompactMplTypeName(); + EXPECT_EQ(mplName, "LNewInterface"); + EXPECT_EQ(structType2->GetKind(), kTypeInterfaceIncomplete); + } +} + +TEST_F(FETypeManagerTest, GetOrCreateClassOrInterfaceType) { + bool isCreate = false; + MIRStructType *structType1 = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType("Ljava_2Flang_2FObject_3B", + false, + FETypeFlag::kSrcUnknown, + isCreate); + EXPECT_EQ(isCreate, false); + ASSERT_NE(structType1, nullptr); + std::string mplName1 = structType1->GetCompactMplTypeName(); + EXPECT_EQ(mplName1, "Ljava_2Flang_2FObject_3B"); + EXPECT_EQ(structType1->GetKind(), kTypeClass); + MIRStructType *structType2 = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType("LNewClass2", + false, + FETypeFlag::kSrcUnknown, + isCreate); + EXPECT_EQ(isCreate, true); + ASSERT_NE(structType2, nullptr); + std::string mplName2 = structType2->GetCompactMplTypeName(); + EXPECT_EQ(mplName2, "LNewClass2"); + EXPECT_EQ(structType2->GetKind(), kTypeClassIncomplete); +} + +TEST_F(FETypeManagerTest, GetOrCreateSameType_LoadInputType) { + // bacause flag kSrcInput > kSrcMplt in same name type, type is overridden + bool isCreate = false; + MIRStructType *structType1 = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType("LSameType1", + false, + FETypeFlag::kSrcMplt, + isCreate); + EXPECT_EQ(isCreate, true); + ASSERT_NE(structType1, nullptr); + std::string mplName1 = structType1->GetCompactMplTypeName(); + EXPECT_EQ(mplName1, "LSameType1"); + EXPECT_EQ(structType1->GetKind(), kTypeClassIncomplete); + + isCreate = false; + MIRStructType *structType2 = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType("LSameType1", + true, + FETypeFlag::kSrcInput, + isCreate); + EXPECT_EQ(isCreate, true); + ASSERT_NE(structType2, nullptr); + std::string mplName2 = structType2->GetCompactMplTypeName(); + EXPECT_EQ(mplName2, "LSameType1"); + EXPECT_EQ(structType2->GetKind(), kTypeInterfaceIncomplete); +} + +TEST_F(FETypeManagerTest, GetOrCreateSameType_LoadSysType) { + // bacause flag kSrcMpltSys > kSrcInput in same name type, type is not overridden + bool isCreate = false; + MIRStructType *structType1 = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType("LSameType2", + false, + FETypeFlag::kSrcMpltSys, + isCreate); + EXPECT_EQ(isCreate, true); + ASSERT_NE(structType1, nullptr); + std::string mplName1 = structType1->GetCompactMplTypeName(); + EXPECT_EQ(mplName1, "LSameType2"); + EXPECT_EQ(structType1->GetKind(), kTypeClassIncomplete); + + isCreate = false; + MIRStructType *structType2 = FEManager::GetTypeManager().GetOrCreateClassOrInterfaceType("LSameType2", + true, + FETypeFlag::kSrcInput, + isCreate); + EXPECT_EQ(isCreate, false); + ASSERT_NE(structType2, nullptr); + std::string mplName2 = structType2->GetCompactMplTypeName(); + EXPECT_EQ(mplName2, "LSameType2"); + EXPECT_EQ(structType2->GetKind(), kTypeClassIncomplete); +} + +TEST_F(FETypeManagerTest, GetOrCreateClassOrInterfacePtrType) { + bool isCreate = false; + MIRType *ptrType = FEManager::GetTypeManager().GetOrCreateClassOrInterfacePtrType("Ljava_2Flang_2FObject_3B", + false, FETypeFlag::kSrcUnknown, + isCreate); + ASSERT_NE(ptrType, nullptr); + RedirectCout(); + ptrType->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <$Ljava_2Flang_2FObject_3B>>"); + RestoreCout(); +} + +TEST_F(FETypeManagerTest, GetOrCreateTypeFromName) { + // Prim Type + MIRType *typePrim = FEManager::GetTypeManager().GetOrCreateTypeFromName("I", + FETypeFlag::kSrcUnknown, false); + ASSERT_NE(typePrim, nullptr); + EXPECT_EQ(typePrim, GlobalTables::GetTypeTable().GetInt32()); + // Object Type + MIRType *typeObject = FEManager::GetTypeManager().GetOrCreateTypeFromName("Ljava_2Flang_2FObject_3B", + FETypeFlag::kSrcUnknown, true); + ASSERT_NE(typeObject, nullptr); + RedirectCout(); + typeObject->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <$Ljava_2Flang_2FObject_3B>>"); + RestoreCout(); + // Array Type + MIRType *typeArray = FEManager::GetTypeManager().GetOrCreateTypeFromName("ALjava_2Flang_2FObject_3B", + FETypeFlag::kSrcUnknown, true); + ASSERT_NE(typeArray, nullptr); + RedirectCout(); + typeArray->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <$Ljava_2Flang_2FObject_3B>>>>"); + RestoreCout(); + // Array Type2 + MIRType *typeArray2 = FEManager::GetTypeManager().GetOrCreateTypeFromName("AALjava_2Flang_2FObject_3B", + FETypeFlag::kSrcUnknown, true); + ASSERT_NE(typeArray2, nullptr); + RedirectCout(); + typeArray2->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <[] <* <$Ljava_2Flang_2FObject_3B>>>>>>"); + RestoreCout(); +} + +TEST_F(FETypeManagerTest, GetOrCreateComplexStructType) { + MIRType *elemType = GlobalTables::GetTypeTable().GetInt32(); + MIRType *type = FEManager::GetTypeManager().GetOrCreateComplexStructType(*elemType); + ASSERT_NE(type, nullptr); + EXPECT_EQ(type->IsStructType(), true); + std::string mplName = type->GetCompactMplTypeName(); + EXPECT_EQ(mplName, "Complex|I"); + MIRStructType *structType = static_cast(type); + ASSERT_EQ(structType->GetFields().size(), 2); + EXPECT_EQ(GlobalTables::GetStrTable().GetStringFromStrIdx(structType->GetFields()[0].first), "real"); + EXPECT_EQ(structType->GetFields()[0].second.first == PTY_i32, true); + EXPECT_EQ(GlobalTables::GetStrTable().GetStringFromStrIdx(structType->GetFields()[1].first), "imag"); + EXPECT_EQ(structType->GetFields()[1].second.first == PTY_i32, true); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/fe_utils_test.cpp b/src/hir2mpl/test/common/fe_utils_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b5c39e71cebdef3ecb27c1a9064b20d2bfb63b54 --- /dev/null +++ b/src/hir2mpl/test/common/fe_utils_test.cpp @@ -0,0 +1,28 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "fe_utils.h" + +namespace maple { +TEST(FEUtils, Split) { + std::string str = "A,B,C"; + std::vector results = FEUtils::Split(str, ','); + ASSERT_EQ(results.size(), 3); + EXPECT_EQ(results[0], "A"); + EXPECT_EQ(results[1], "B"); + EXPECT_EQ(results[2], "C"); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/feir_builder_test.cpp b/src/hir2mpl/test/common/feir_builder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0e0651671f9911dad9ff676c892ffa3dd871f229 --- /dev/null +++ b/src/hir2mpl/test/common/feir_builder_test.cpp @@ -0,0 +1,155 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include +#include "feir_test_base.h" +#include "feir_stmt.h" +#include "feir_var.h" +#include "feir_var_reg.h" +#include "feir_var_name.h" +#include "feir_type_helper.h" +#include "feir_builder.h" +#include "hir2mpl_ut_regx.h" + +namespace maple { +class FEIRBuilderTest : public FEIRTestBase { + public: + FEIRBuilderTest() = default; + virtual ~FEIRBuilderTest() = default; +}; + +// ---------- FEIRExprConst ---------- +TEST_F(FEIRBuilderTest, CreateExprConst_Any) { + RedirectCout(); + UniqueFEIRExpr expr1 = FEIRBuilder::CreateExprConstAnyScalar(PTY_i8, 127); + expr1->GenMIRNode(mirBuilder)->Dump(); + EXPECT_EQ(GetBufferString(), "constval i8 127\n"); + ClearBufferString(); + UniqueFEIRExpr expr2 = FEIRBuilder::CreateExprConstAnyScalar(PTY_i64, 127); + expr2->GenMIRNode(mirBuilder)->Dump(); + EXPECT_EQ(GetBufferString(), "constval i64 127\n"); + ClearBufferString(); + UniqueFEIRExpr expr3 = FEIRBuilder::CreateExprConstAnyScalar(PTY_f32, 127); + expr3->GenMIRNode(mirBuilder)->Dump(); + EXPECT_EQ(GetBufferString(), "constval f32 127f\n"); + ClearBufferString(); + UniqueFEIRExpr expr4 = FEIRBuilder::CreateExprConstAnyScalar(PTY_f64, 127); + expr4->GenMIRNode(mirBuilder)->Dump(); + EXPECT_EQ(GetBufferString(), "constval f64 127\n"); + ClearBufferString(); + UniqueFEIRExpr expr5 = FEIRBuilder::CreateExprConstAnyScalar(PTY_u8, -1); + expr5->GenMIRNode(mirBuilder)->Dump(); + EXPECT_EQ(GetBufferString(), "constval u8 255\n"); + ClearBufferString(); + UniqueFEIRExpr expr6 = FEIRBuilder::CreateExprConstAnyScalar(PTY_i8, -1); + expr6->GenMIRNode(mirBuilder)->Dump(); + EXPECT_EQ(GetBufferString(), "constval i8 -1\n"); + UniqueFEIRExpr expr7 = FEIRBuilder::CreateExprConstAnyScalar(PTY_v4i32, 0); + expr7->GenMIRNode(mirBuilder)->Dump(); + EXPECT_EQ(GetBufferString(), "intrinsicop v4i32 vector_from_scalar_v4i32 (constval i32 0)\n"); + RestoreCout(); +} + +// ---------- FEIRStmtDAssign ---------- +TEST_F(FEIRBuilderTest, CreateExprDRead) { + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(FEIRBuilder::CreateVarReg(0, PTY_i32)); + BaseNode *mirNode = expr->GenMIRNode(mirBuilder); + RedirectCout(); + mirNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dread i32 %Reg0_I") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRStmtRetype ---------- +TEST_F(FEIRBuilderTest, CreateRetypeFloat2Int) { + UniqueFEIRVar dstVar = FEIRBuilder::CreateVarReg(0, PTY_i32); + UniqueFEIRVar srcVar = FEIRBuilder::CreateVarReg(0, PTY_f32); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtRetype(std::move(dstVar), std::move(srcVar)); + RedirectCout(); + std::list mirStmts = stmt->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirStmts.size(), 1); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_I 0 (cvt i32 f32 (dread f32 %Reg0_F))"); + EXPECT_EQ(dumpStr.find(pattern), 0); + RestoreCout(); +} + +TEST_F(FEIRBuilderTest, CreateRetypeShort2Float) { + UniqueFEIRVar dstVar = FEIRBuilder::CreateVarReg(0, PTY_f32); + UniqueFEIRVar srcVar = FEIRBuilder::CreateVarReg(0, PTY_i16); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtRetype(std::move(dstVar), std::move(srcVar)); + RedirectCout(); + std::list mirStmts = stmt->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirStmts.size(), 1); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_F 0 (cvt f32 i16 (dread i32 %Reg0_S))"); + EXPECT_EQ(dumpStr.find(pattern), 0); + RestoreCout(); +} + +TEST_F(FEIRBuilderTest, CreateRetypeInt2Short) { + UniqueFEIRVar dstVar = FEIRBuilder::CreateVarReg(0, PTY_i16); + UniqueFEIRVar srcVar = FEIRBuilder::CreateVarReg(0, PTY_i32); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtRetype(std::move(dstVar), std::move(srcVar)); + RedirectCout(); + std::list mirStmts = stmt->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirStmts.size(), 1); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_S 0 (intrinsicop i16 JAVA_MERGE (dread i32 %Reg0_I))"); + EXPECT_EQ(dumpStr.find(pattern), 0); + RestoreCout(); +} + +TEST_F(FEIRBuilderTest, CreateRetypeShort2Ref) { + UniqueFEIRVar dstVar = FEIRBuilder::CreateVarReg(0, PTY_ref); + dstVar->SetType(FEIRBuilder::CreateTypeByJavaName("Ljava/lang/String;", false)); + UniqueFEIRVar srcVar = FEIRBuilder::CreateVarReg(0, PTY_i16); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtRetype(std::move(dstVar), std::move(srcVar)); + RedirectCout(); + std::list mirStmts = stmt->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirStmts.size(), 1); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + EXPECT_EQ(dumpStr.find("dassign %Reg0_R"), 0); + EXPECT_EQ(dumpStr.find(" 0 (cvt ref i16 (dread i32 %Reg0_S))", 15) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(FEIRBuilderTest, CreateRetypeRef2Ref) { + UniqueFEIRVar dstVar = FEIRBuilder::CreateVarReg(0, PTY_ref); + dstVar->SetType(FEIRBuilder::CreateTypeByJavaName("Ljava/lang/String;", false)); + UniqueFEIRVar srcVar = FEIRBuilder::CreateVarReg(0, PTY_ref); + srcVar->SetType(FEIRBuilder::CreateTypeByJavaName("Ljava/lang/Object;", false)); + UniqueFEIRStmt stmt = FEIRBuilder::CreateStmtRetype(std::move(dstVar), std::move(srcVar)); + RedirectCout(); + std::list mirStmts = stmt->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirStmts.size(), 1); + mirStmts.front()->Dump(); + std::string dumpStr = GetBufferString(); + // dassign %Reg0_R46 0 (retype ref <* <$Ljava_2Flang_2FString_3B>> (dread ref %Reg0_R40)) + EXPECT_EQ(dumpStr.find("dassign %Reg0_R"), 0); + EXPECT_EQ(dumpStr.find(" 0 (retype ref <* <$Ljava_2Flang_2FString_3B>> (dread ref %Reg0_R", 15) != std::string::npos, + true); + RestoreCout(); +} +} // namespace maple diff --git a/src/hir2mpl/test/common/feir_cfg_test.cpp b/src/hir2mpl/test/common/feir_cfg_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d3c328b911675430f818c7321113229bf041176e --- /dev/null +++ b/src/hir2mpl/test/common/feir_cfg_test.cpp @@ -0,0 +1,576 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "feir_cfg.h" +#include "redirect_buffer.h" +#include "hir2mpl_ut_environment.h" + +namespace maple { +class GeneralStmtHead : public FEIRStmt { + public: + GeneralStmtHead() : FEIRStmt(kStmtPesudo) {} + ~GeneralStmtHead() = default; +}; + +class GeneralStmtTail : public FEIRStmt { + public: + GeneralStmtTail() : FEIRStmt(kStmtPesudo) {} + ~GeneralStmtTail() = default; +}; + +class GeneralStmtAuxPre : public FEIRStmt { + public: + GeneralStmtAuxPre() : FEIRStmt(kStmtPesudo) { + isAuxPre = true; + } + ~GeneralStmtAuxPre() = default; +}; + +class GeneralStmtAuxPost : public FEIRStmt { + public: + GeneralStmtAuxPost() : FEIRStmt(kStmtPesudo) { + isAuxPost = true; + } + ~GeneralStmtAuxPost() = default; +}; + +class FEIRCFGDemo : public FEIRCFG { + public: + FEIRCFGDemo(MapleAllocator &argAllocator, FEIRStmt *argStmtHead, FEIRStmt *argStmtTail) + : FEIRCFG(argStmtHead, argStmtTail), + allocator(argAllocator), + mapIdxStmt(allocator.Adapter()) {} + ~FEIRCFGDemo() = default; + + void LoadGenStmtDemo1(); + void LoadGenStmtDemo2(); + void LoadGenStmtDemo3(); + void LoadGenStmtDemo4(); + void LoadGenStmtDemo5(); + void LoadGenStmtDemo6(); + void LoadGenStmtDemo7(); + void LoadGenStmtDemo8(); + + FEIRStmt *GetStmtByIdx(uint32 idx) { + CHECK_FATAL(mapIdxStmt.find(idx) != mapIdxStmt.end(), "invalid idx"); + return mapIdxStmt[idx]; + } + + template + T *NewTemporaryStmt(uint32 idx) { + FEIRStmt *ptrStmt = allocator.New(); + ptrStmt->SetFallThru(true); + stmtTail->InsertBefore(ptrStmt); + mapIdxStmt[idx] = ptrStmt; + return static_cast(ptrStmt); + } + + private: + MapleAllocator allocator; + MapleMap mapIdxStmt; +}; + +class FEIRCFGTest : public testing::Test, public RedirectBuffer { + public: + static MemPool *mp; + static FEIRStmt *genStmtHead; + static FEIRStmt *genStmtTail; + MapleAllocator allocator; + FEIRCFGDemo demoCFG; + FEIRCFGTest() + : allocator(mp), + demoCFG(allocator, genStmtHead, genStmtTail) {} + ~FEIRCFGTest() = default; + + static void SetUpTestCase() { + mp = FEUtils::NewMempool("MemPool for FEIRCFGTest", false /* isLocalPool */); + genStmtHead = mp->New(); + genStmtTail = mp->New(); + } + + static void TearDownTestCase() { + delete mp; + mp = nullptr; + } + + virtual void SetUp() { + // reset head and tail stmt + genStmtHead->SetNext(genStmtTail); + genStmtTail->SetPrev(genStmtHead); + } +}; +MemPool *FEIRCFGTest::mp = nullptr; +FEIRStmt *FEIRCFGTest::genStmtHead = nullptr; +FEIRStmt *FEIRCFGTest::genStmtTail = nullptr; + +/* GenStmtDemo1:BB + * 0 StmtHead + * 1 Stmt (fallthru = true) + * 2 Stmt (fallthru = false) + * 3 StmtTail + */ +void FEIRCFGDemo::LoadGenStmtDemo1() { + mapIdxStmt.clear(); + (void)NewTemporaryStmt(1); + FEIRStmt *stmt2 = NewTemporaryStmt(2); + stmt2->SetFallThru(false); +} + +TEST_F(FEIRCFGTest, CFGBuildForBB) { + demoCFG.LoadGenStmtDemo1(); + demoCFG.Init(); + demoCFG.LabelStmtID(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + EXPECT_EQ(bb1->GetNext(), demoCFG.GetDummyTail()); + EXPECT_EQ(bb1->GetStmtHead()->GetID(), 1); + EXPECT_EQ(bb1->GetStmtTail()->GetID(), 2); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} + +/* GenStmtDemo2:BB_StmtAux + * 0 StmtHead + * 1 StmtAuxPre + * 2 Stmt (fallthru = true) + * 3 Stmt (fallthru = false) + * 4 StmtAuxPost + * 5 StmtTail + */ +void FEIRCFGDemo::LoadGenStmtDemo2() { + mapIdxStmt.clear(); + (void)NewTemporaryStmt(1); + (void)NewTemporaryStmt(2); + FEIRStmt *stmt3 = NewTemporaryStmt(3); + (void)NewTemporaryStmt(4); + stmt3->SetFallThru(false); +} + +TEST_F(FEIRCFGTest, CFGBuildForBB_StmtAux) { + demoCFG.LoadGenStmtDemo2(); + demoCFG.Init(); + demoCFG.LabelStmtID(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + EXPECT_EQ(bb1->GetNext(), demoCFG.GetDummyTail()); + EXPECT_EQ(bb1->GetStmtHead()->GetID(), 1); + EXPECT_EQ(bb1->GetStmtTail()->GetID(), 4); + EXPECT_EQ(bb1->GetStmtNoAuxHead()->GetID(), 2); + EXPECT_EQ(bb1->GetStmtNoAuxTail()->GetID(), 3); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} + +/* GenStmtDemo3:CFG + * --- BB0 --- + * 0 StmtHead + * --- BB1 --- + * 1 StmtAuxPre + * 2 StmtMultiOut (fallthru = true, out = {8}) + * 3 StmtAuxPost + * --- BB2 --- + * 4 StmtAuxPre + * 5 Stmt (fallthru = false) + * 6 StmtAuxPost + * --- BB3 --- + * 7 StmtAuxPre + * 8 StmtMultiIn (fallthru = true, in = {2}) + * 9 Stmt (fallthru = false) + * 10 StmtAuxPos + * --- BB4 --- + * 11 StmtTail + * + * GenStmtDemo3_CFG: + * BB0 + * | + * BB1 + * / \ + * BB2 BB3 + */ +void FEIRCFGDemo::LoadGenStmtDemo3() { + mapIdxStmt.clear(); + // --- BB1 --- + (void)NewTemporaryStmt(1); + FEIRStmt *stmt2 = NewTemporaryStmt(2); + (void)NewTemporaryStmt(3); + // --- BB2 --- + (void)NewTemporaryStmt(4); + FEIRStmt *stmt5 = NewTemporaryStmt(5); + stmt5->SetFallThru(false); + (void)NewTemporaryStmt(6); + // --- BB3 --- + (void)NewTemporaryStmt(7); + FEIRStmt *stmt8 = NewTemporaryStmt(8); + FEIRStmt *stmt9 = NewTemporaryStmt(9); + stmt9->SetFallThru(false); + (void)NewTemporaryStmt(10); + // Link + stmt2->AddExtraSucc(*stmt8); + stmt8->AddExtraPred(*stmt2); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG) { + demoCFG.LoadGenStmtDemo3(); + demoCFG.LabelStmtID(); + demoCFG.Init(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + // Check BB + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + ASSERT_NE(bb1, demoCFG.GetDummyTail()); + FEIRBB *bb2 = static_cast(bb1->GetNext()); + ASSERT_NE(bb2, demoCFG.GetDummyTail()); + FEIRBB *bb3 = static_cast(bb2->GetNext()); + ASSERT_NE(bb3, demoCFG.GetDummyTail()); + // Check BB's detail + EXPECT_EQ(bb1->GetStmtHead()->GetID(), 1); + EXPECT_EQ(bb1->GetStmtNoAuxHead()->GetID(), 2); + EXPECT_EQ(bb1->GetStmtNoAuxTail()->GetID(), 2); + EXPECT_EQ(bb1->GetStmtTail()->GetID(), 3); + EXPECT_EQ(bb2->GetStmtHead()->GetID(), 4); + EXPECT_EQ(bb2->GetStmtNoAuxHead()->GetID(), 5); + EXPECT_EQ(bb2->GetStmtNoAuxTail()->GetID(), 5); + EXPECT_EQ(bb2->GetStmtTail()->GetID(), 6); + EXPECT_EQ(bb3->GetStmtHead()->GetID(), 7); + EXPECT_EQ(bb3->GetStmtNoAuxHead()->GetID(), 8); + EXPECT_EQ(bb3->GetStmtNoAuxTail()->GetID(), 9); + EXPECT_EQ(bb3->GetStmtTail()->GetID(), 10); + // Check CFG + EXPECT_EQ(bb1->GetPredBBs().size(), 1); + EXPECT_EQ(bb1->IsPredBB(0U), true); + EXPECT_EQ(bb1->GetSuccBBs().size(), 2); + EXPECT_EQ(bb1->IsSuccBB(2), true); + EXPECT_EQ(bb1->IsSuccBB(3), true); + EXPECT_EQ(bb2->GetPredBBs().size(), 1); + EXPECT_EQ(bb2->IsPredBB(1), true); + EXPECT_EQ(bb2->GetSuccBBs().size(), 0); + EXPECT_EQ(bb3->GetPredBBs().size(), 1); + EXPECT_EQ(bb3->IsPredBB(1), true); + EXPECT_EQ(bb3->GetSuccBBs().size(), 0); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} + +/* GenStmtDemo4:CFG_Fail + * 0 StmtHead + * 1 Stmt (fallthru = true) + * 2 Stmt (fallthru = true) + * 3 StmtTail + */ +void FEIRCFGDemo::LoadGenStmtDemo4() { + mapIdxStmt.clear(); + (void)NewTemporaryStmt(1); + (void)NewTemporaryStmt(2); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG_Fail) { + demoCFG.Init(); + demoCFG.LoadGenStmtDemo4(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, false); +} + +/* GenStmtDemo5:CFG_DeadBB + * --- BB0 --- + * 0 StmtHead + * --- BB1 --- + * 1 Stmt (fallthru = true) + * 2 Stmt (fallthru = false) + * --- BB2 --- + * 3 Stmt (fallthru = false) + * --- BB3 --- + * 4 StmtTail + * + * GenStmtDemo5_CFG: + * BB0 + * | + * BB1 BB2(DeadBB) + */ +void FEIRCFGDemo::LoadGenStmtDemo5() { + mapIdxStmt.clear(); + (void)NewTemporaryStmt(1); + FEIRStmt *stmt2 = NewTemporaryStmt(2); + stmt2->SetFallThru(false); + FEIRStmt *stmt3 = NewTemporaryStmt(3); + stmt3->SetFallThru(false); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG_DeadBB) { + demoCFG.Init(); + demoCFG.LoadGenStmtDemo5(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelStmtID(); + demoCFG.LabelBBID(); + // Check BB + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + ASSERT_NE(bb1, demoCFG.GetDummyTail()); + FEIRBB *bb2 = static_cast(bb1->GetNext()); + ASSERT_NE(bb2, demoCFG.GetDummyTail()); + // Check BB's detail + EXPECT_EQ(bb1->GetStmtHead()->GetID(), 1); + EXPECT_EQ(bb1->GetStmtTail()->GetID(), 2); + EXPECT_EQ(bb2->GetStmtHead()->GetID(), 3); + EXPECT_EQ(bb2->GetStmtTail()->GetID(), 3); + // Check CFG + EXPECT_EQ(bb1->GetPredBBs().size(), 1); + EXPECT_EQ(bb1->IsPredBB(0U), true); + EXPECT_EQ(bb2->GetSuccBBs().size(), 0); + EXPECT_EQ(demoCFG.HasDeadBB(), true); +} + +/* GenStmtDemo6:CFG + * --- BB0 --- + * 0 StmtHead + * --- BB1 --- + * 1 StmtMultiOut (fallthru = true, out = {3}) + * --- BB2 --- + * 2 Stmt (fallthru = true) + * --- BB3 --- + * 3 StmtMultiIn (fallthru = true, in = {1}) + * 4 Stmt (fallthru = false) + * --- BB4 --- + * 5 StmtTail + * + * GenStmtDemo6_CFG: + * BB0 + * | + * BB1 + * / \ + * BB2 | + * \ / + * BB3 + */ +void FEIRCFGDemo::LoadGenStmtDemo6() { + mapIdxStmt.clear(); + // --- BB1 --- + FEIRStmt *stmt1 = NewTemporaryStmt(1); + // --- BB2 --- + (void)NewTemporaryStmt(2); + // --- BB3 --- + FEIRStmt *stmt3 = NewTemporaryStmt(3); + FEIRStmt *stmt4 = NewTemporaryStmt(4); + stmt4->SetFallThru(false); + // Link + stmt1->AddExtraSucc(*stmt3); + stmt3->AddExtraPred(*stmt1); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG1) { + demoCFG.Init(); + demoCFG.LoadGenStmtDemo6(); + demoCFG.LabelStmtID(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + // Check BB + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + ASSERT_NE(bb1, demoCFG.GetDummyTail()); + FEIRBB *bb2 = static_cast(bb1->GetNext()); + ASSERT_NE(bb2, demoCFG.GetDummyTail()); + FEIRBB *bb3 = static_cast(bb2->GetNext()); + ASSERT_NE(bb3, demoCFG.GetDummyTail()); + // Check CFG + EXPECT_EQ(bb1->GetPredBBs().size(), 1); + EXPECT_EQ(bb1->IsPredBB(0U), true); + EXPECT_EQ(bb1->GetSuccBBs().size(), 2); + EXPECT_EQ(bb1->IsSuccBB(2), true); + EXPECT_EQ(bb1->IsSuccBB(3), true); + EXPECT_EQ(bb2->GetPredBBs().size(), 1); + EXPECT_EQ(bb2->IsPredBB(1), true); + EXPECT_EQ(bb2->GetSuccBBs().size(), 1); + EXPECT_EQ(bb2->IsSuccBB(3), true); + EXPECT_EQ(bb3->GetPredBBs().size(), 2); + EXPECT_EQ(bb3->IsPredBB(1), true); + EXPECT_EQ(bb3->IsPredBB(2), true); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} + +/* GenStmtDemo7:CFG + * --- BB0 --- + * 0 StmtHead + * --- BB1 --- + * 1 StmtMultiOut (fallthru = true, out = {5}) + * --- BB2 --- + * 2 Stmt (fallthru = true) + * --- BB3 --- + * 3 StmtMultiIn (fallthru = true, in = {6}) + * 4 Stmt (fallthru = false) + * --- BB4 --- + * 5 StmtMultiIn (fallthru = true, in = {1}) + * 6 Stmt (fallthru = false, out = {3}) + * --- BB5 --- + * 7 StmtTail + * + * GenStmtDemo7_CFG: + * BB0 + * | + * BB1 + * / \ + * BB2 BB4 + * \ / + * BB3 + */ +void FEIRCFGDemo::LoadGenStmtDemo7() { + mapIdxStmt.clear(); + // --- BB1 --- + FEIRStmt *stmt1 = NewTemporaryStmt(1); + // --- BB2 --- + (void)NewTemporaryStmt(2); + // --- BB3 --- + FEIRStmt *stmt3 = NewTemporaryStmt(3); + FEIRStmt *stmt4 = NewTemporaryStmt(4); + stmt4->SetFallThru(false); + // --- BB4 --- + FEIRStmt *stmt5 = NewTemporaryStmt(5); + FEIRStmt *stmt6 = NewTemporaryStmt(6); + stmt6->SetFallThru(false); + // Link + stmt1->AddExtraSucc(*stmt5); + stmt5->AddExtraPred(*stmt1); + stmt6->AddExtraSucc(*stmt3); + stmt3->AddExtraPred(*stmt6); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG2) { + demoCFG.Init(); + demoCFG.LoadGenStmtDemo7(); + demoCFG.LabelStmtID(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + // Check BB + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + ASSERT_NE(bb1, demoCFG.GetDummyTail()); + FEIRBB *bb2 = static_cast(bb1->GetNext()); + ASSERT_NE(bb2, demoCFG.GetDummyTail()); + FEIRBB *bb3 = static_cast(bb2->GetNext()); + ASSERT_NE(bb3, demoCFG.GetDummyTail()); + FEIRBB *bb4 = static_cast(bb3->GetNext()); + ASSERT_NE(bb4, demoCFG.GetDummyTail()); + // Check CFG + EXPECT_EQ(bb2->GetPredBBs().size(), 1); + EXPECT_EQ(bb2->IsPredBB(1), true); + EXPECT_EQ(bb2->GetSuccBBs().size(), 1); + EXPECT_EQ(bb2->IsSuccBB(3), true); + EXPECT_EQ(bb4->GetPredBBs().size(), 1); + EXPECT_EQ(bb4->IsPredBB(1), true); + EXPECT_EQ(bb4->GetSuccBBs().size(), 1); + EXPECT_EQ(bb4->IsSuccBB(3), true); + EXPECT_EQ(bb3->GetPredBBs().size(), 2); + EXPECT_EQ(bb3->IsPredBB(2), true); + EXPECT_EQ(bb3->IsPredBB(4), true); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} + +/* GenStmtDemo8:CFG + * --- BB0 --- + * 0 StmtHead + * --- BB1 --- + * 1 StmtMultiOut (fallthru = true, out = {6}) + * --- BB2 --- + * 2 StmtMultiIn (fallthru = true, in = {5}) + * 3 Stmt (fallthru = true, out = {4}) + * --- BB3 --- + * 4 Stmt (fallthru = true, in = {3}) + * 5 StmtMultiOut (fallthru = true, in = {2}) + * --- BB4 --- + * 6 StmtMultiIn (fallthru = true, in = {1}) + * 7 Stmt (fallthru = false) + * --- BB5 --- + * 8 StmtTail + * + * GenStmtDemo8_CFG_while: + * BB0 + * | + * BB1 ----- + * | | + * BB2 <- | + * | | | + * BB3 -- | + * | | + * BB4 <---- + */ +void FEIRCFGDemo::LoadGenStmtDemo8() { + mapIdxStmt.clear(); + // --- BB1 --- + FEIRStmt *stmt1 = NewTemporaryStmt(1); + // --- BB2 --- + FEIRStmt *stmt2 = NewTemporaryStmt(2); + FEIRStmt *stmt3 = NewTemporaryStmt(3); + // --- BB3 --- + FEIRStmt *stmt4 = NewTemporaryStmt(4); + FEIRStmt *stmt5 = NewTemporaryStmt(5); + // --- BB4 --- + FEIRStmt *stmt6 = NewTemporaryStmt(6); + FEIRStmt *stmt7 = NewTemporaryStmt(7); + stmt7->SetFallThru(false); + // Link + stmt1->AddExtraSucc(*stmt6); + stmt6->AddExtraPred(*stmt1); + stmt5->AddExtraSucc(*stmt2); + stmt2->AddExtraPred(*stmt5); + stmt3->AddExtraSucc(*stmt4); + stmt4->AddExtraPred(*stmt3); +} + +TEST_F(FEIRCFGTest, CFGBuildForCFG_while) { + demoCFG.Init(); + demoCFG.LoadGenStmtDemo8(); + demoCFG.LabelStmtID(); + demoCFG.BuildBB(); + bool resultCFG = demoCFG.BuildCFG(); + ASSERT_EQ(resultCFG, true); + demoCFG.LabelBBID(); + // Check BB + demoCFG.DumpBBs(); + FEIRBB *bb1 = static_cast(demoCFG.GetDummyHead()->GetNext()); + ASSERT_NE(bb1, demoCFG.GetDummyTail()); + FEIRBB *bb2 = static_cast(bb1->GetNext()); + ASSERT_NE(bb2, demoCFG.GetDummyTail()); + FEIRBB *bb3 = static_cast(bb2->GetNext()); + ASSERT_NE(bb3, demoCFG.GetDummyTail()); + FEIRBB *bb4 = static_cast(bb3->GetNext()); + ASSERT_NE(bb4, demoCFG.GetDummyTail()); + // Check CFG + EXPECT_EQ(bb1->GetSuccBBs().size(), 2); + EXPECT_EQ(bb1->IsSuccBB(2), true); + EXPECT_EQ(bb1->IsSuccBB(4), true); + EXPECT_EQ(bb2->GetPredBBs().size(), 2); + EXPECT_EQ(bb2->IsPredBB(1), true); + EXPECT_EQ(bb2->IsPredBB(3), true); + EXPECT_EQ(bb2->GetSuccBBs().size(), 1); + EXPECT_EQ(bb2->IsSuccBB(3), true); + EXPECT_EQ(bb3->GetPredBBs().size(), 1); + EXPECT_EQ(bb3->IsPredBB(2), true); + EXPECT_EQ(bb3->GetSuccBBs().size(), 2); + EXPECT_EQ(bb3->IsSuccBB(2), true); + EXPECT_EQ(bb3->IsSuccBB(4), true); + EXPECT_EQ(bb4->GetPredBBs().size(), 2); + EXPECT_EQ(bb4->IsPredBB(1), true); + EXPECT_EQ(bb4->IsPredBB(3), true); + EXPECT_EQ(demoCFG.HasDeadBB(), false); +} +} // namespace maple diff --git a/src/hir2mpl/test/common/feir_lower_test.cpp b/src/hir2mpl/test/common/feir_lower_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5348b1b73b6dfd0f7806fda1095354ce5b93ed38 --- /dev/null +++ b/src/hir2mpl/test/common/feir_lower_test.cpp @@ -0,0 +1,215 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "feir_test_base.h" +#include "hir2mpl_ut_environment.h" +#include "hir2mpl_ut_regx.h" +#include "fe_function.h" +#include "feir_lower.h" +#include "feir_var_reg.h" +#include "feir_builder.h" + +namespace maple { +class FEFunctionDemo : public FEFunction { + public: + FEFunctionDemo(MIRFunction &argMIRFunction) + : FEFunction(argMIRFunction, std::make_unique(true)) {} + ~FEFunctionDemo() = default; + + bool PreProcessTypeNameIdx() override { + return false; + } + + bool GenerateGeneralStmt(const std::string &phaseName) override { + return true; + } + + void GenerateGeneralStmtFailCallBack() override {} + void GenerateGeneralDebugInfo() override {} + bool GenerateArgVarList(const std::string &phaseName) override { + return true; + } + + bool HasThis() override { + return false; + } + + bool IsNative() override { + return false; + } + + bool VerifyGeneral() override { + return false; + } + + void VerifyGeneralFailCallBack() override {} + bool EmitToFEIRStmt(const std::string &phaseName) override { + return true; + } + + bool GenerateAliasVars(const std::string &phaseName) override { + return true; + } + + void LoadGenStmtDemo1(); + void LoadGenStmtDemo2(); + void LoadGenStmtDemo3(); +}; + +class FEIRLowerTest : public FEIRTestBase { + public: + FEFunctionDemo feFunc; + + FEIRLowerTest() + : feFunc(*func) { + feFunc.Init(); + } + ~FEIRLowerTest() = default; +}; + +// ifStmt +void FEFunctionDemo::LoadGenStmtDemo1() { + UniqueFEIRVar varReg = FEIRBuilder::CreateVarReg(0, PTY_u1); + std::unique_ptr exprDReadReg = std::make_unique(std::move(varReg)); + // ThenStmts + UniqueFEIRVar dstVar = FEIRBuilder::CreateVarReg(0, PTY_i32); + UniqueFEIRVar srcVar = std::make_unique(1, PTY_i32); + UniqueFEIRExpr exprDRead = std::make_unique(std::move(srcVar)); + UniqueFEIRStmt stmtDAssign = std::make_unique(std::move(dstVar), exprDRead->Clone()); + std::list thenStmts; + thenStmts.emplace_back(std::move(stmtDAssign)); + // ElseStmts + UniqueFEIRVar dstVar2 = FEIRBuilder::CreateVarReg(0, PTY_f32); + UniqueFEIRStmt stmtDAssign1 = std::make_unique(std::move(dstVar2), std::move(exprDRead)); + std::list elseStmts; + elseStmts.emplace_back(std::move(stmtDAssign1)); + + std::list stmts; + stmts.emplace_back(std::make_unique(std::move(exprDReadReg), thenStmts, elseStmts)); + AppendFEIRStmts(stmts); +} + +// whileStmt/forStmt +void FEFunctionDemo::LoadGenStmtDemo2() { + // conditionExpr: val > 10 + UniqueFEIRVar varReg = FEIRBuilder::CreateVarReg(0, PTY_i32); + UniqueFEIRExpr exprDRead = FEIRBuilder::CreateExprDRead(varReg->Clone()); + UniqueFEIRExpr exprConst = FEIRBuilder::CreateExprConstI32(10); + UniqueFEIRExpr conditionExpr = FEIRBuilder::CreateExprBinary(OP_gt, exprDRead->Clone(), exprConst->Clone()); + // bodyStmts : --val + UniqueFEIRExpr exprConst2 = FEIRBuilder::CreateExprConstI32(1); + UniqueFEIRExpr subExpr = FEIRBuilder::CreateExprBinary(OP_sub, exprDRead->Clone(), exprConst2->Clone()); + UniqueFEIRStmt bodyStmt = FEIRBuilder::CreateStmtDAssign(varReg->Clone(), subExpr->Clone()); + std::list bodayStmts; + bodayStmts.emplace_back(std::move(bodyStmt)); + std::list stmts; + stmts.emplace_back(std::make_unique(OP_while, std::move(conditionExpr), std::move(bodayStmts))); + AppendFEIRStmts(stmts); +} + +// doWhileStmt +void FEFunctionDemo::LoadGenStmtDemo3() { + // bodayStmts --val + UniqueFEIRVar varReg = FEIRBuilder::CreateVarReg(0, PTY_i32); + UniqueFEIRExpr exprDRead = FEIRBuilder::CreateExprDRead(varReg->Clone()); + UniqueFEIRExpr exprConst = FEIRBuilder::CreateExprConstI32(1); + UniqueFEIRExpr subExpr = FEIRBuilder::CreateExprBinary(OP_sub, exprDRead->Clone(), exprConst->Clone()); + UniqueFEIRStmt bodyStmt = FEIRBuilder::CreateStmtDAssign(varReg->Clone(), subExpr->Clone()); + std::list bodayStmts; + bodayStmts.emplace_back(std::move(bodyStmt)); + // conditionExpr: val > 10 + UniqueFEIRExpr exprConst2 = FEIRBuilder::CreateExprConstI32(10); + UniqueFEIRExpr conditionExpr = FEIRBuilder::CreateExprBinary(OP_gt, exprDRead->Clone(), exprConst2->Clone()); + std::list stmts; + stmts.emplace_back(std::make_unique(OP_dowhile, std::move(conditionExpr), std::move(bodayStmts))); + AppendFEIRStmts(stmts); +} + +TEST_F(FEIRLowerTest, IfStmtLower) { + feFunc.LoadGenStmtDemo1(); + bool res = feFunc.LowerFunc("fert lower"); + ASSERT_EQ(res, true); + RedirectCout(); + const FEIRStmt *head = feFunc.GetFEIRStmtHead(); + FEIRStmt *stmt = static_cast(head->GetNext()); + while (stmt != nullptr && stmt->GetKind() != kStmtPesudoFuncEnd) { + std::list baseNodes = stmt->GenMIRStmts(mirBuilder); + baseNodes.front()->Dump(); + stmt = static_cast(stmt->GetNext()); + } + std::string pattern = + "brfalse @.* \\(dread u32 %Reg0_Z\\)\n\n"\ + "dassign %Reg0_I 0 \\(dread i32 %Reg1_I\\)\n\n"\ + "goto @.*\n\n"\ + "@.*\n"\ + "dassign %Reg0_F 0 \\(dread i32 %Reg1_I\\)\n\n"\ + "@.*\n"; + ASSERT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + ASSERT_EQ(static_cast(head->GetNext())->GetLabelName(), + static_cast(feFunc.GetFEIRStmtTail()->GetPrev()->GetPrev()->GetPrev())->GetLabelName()); + ASSERT_EQ(static_cast(head->GetNext()->GetNext()->GetNext())->GetLabelName(), + static_cast(feFunc.GetFEIRStmtTail()->GetPrev())->GetLabelName()); + RestoreCout(); +} + +TEST_F(FEIRLowerTest, WhileAndForStmtLower) { + feFunc.LoadGenStmtDemo2(); + bool res = feFunc.LowerFunc("fert lower"); + ASSERT_EQ(res, true); + RedirectCout(); + const FEIRStmt *head = feFunc.GetFEIRStmtHead(); + FEIRStmt *stmt = static_cast(head->GetNext()); + while (stmt != nullptr && stmt->GetKind() != kStmtPesudoFuncEnd) { + std::list baseNodes = stmt->GenMIRStmts(mirBuilder); + baseNodes.front()->Dump(); + stmt = static_cast(stmt->GetNext()); + } + std::string pattern = + "@.*\n" + "brfalse @.* \\(gt u1 i32 \\(dread i32 %Reg0_I, constval i32 10\\)\\)\n\n"\ + "dassign %Reg0_I 0 \\(sub i32 \\(dread i32 %Reg0_I, constval i32 1\\)\\)\n\n"\ + "goto @.*\n\n"\ + "@.*\n"; + ASSERT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + ASSERT_EQ(static_cast(head->GetNext()->GetNext())->GetLabelName(), + static_cast(feFunc.GetFEIRStmtTail()->GetPrev())->GetLabelName()); + ASSERT_EQ(static_cast(feFunc.GetFEIRStmtTail()->GetPrev()->GetPrev())->GetLabelName(), + static_cast(head->GetNext())->GetLabelName()); + RestoreCout(); +} + +TEST_F(FEIRLowerTest, DoWhileStmtLower) { + feFunc.LoadGenStmtDemo3(); + bool res = feFunc.LowerFunc("fert lower"); + ASSERT_EQ(res, true); + RedirectCout(); + const FEIRStmt *head = feFunc.GetFEIRStmtHead(); + FEIRStmt *stmt = static_cast(head->GetNext()); + while (stmt != nullptr && stmt->GetKind() != kStmtPesudoFuncEnd) { + std::list baseNodes = stmt->GenMIRStmts(mirBuilder); + baseNodes.front()->Dump(); + stmt = static_cast(stmt->GetNext()); + } + std::string pattern = + "@.*\n" + "dassign %Reg0_I 0 \\(sub i32 \\(dread i32 %Reg0_I, constval i32 1\\)\\)\n\n"\ + "brtrue @.* \\(gt u1 i32 \\(dread i32 %Reg0_I, constval i32 10\\)\\)\n\n"; + ASSERT_EQ(HIR2MPLUTRegx::Match(GetBufferString(), pattern), true); + ASSERT_EQ(static_cast(feFunc.GetFEIRStmtTail()->GetPrev())->GetLabelName(), + static_cast(head->GetNext())->GetLabelName()); + RestoreCout(); +} +} // namespace maple diff --git a/src/hir2mpl/test/common/feir_stmt_dfg_test.cpp b/src/hir2mpl/test/common/feir_stmt_dfg_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1039d3559e1be72aa16f4c3639b8bed8f723d223 --- /dev/null +++ b/src/hir2mpl/test/common/feir_stmt_dfg_test.cpp @@ -0,0 +1,264 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include +#include "feir_test_base.h" +#include "feir_stmt.h" +#include "feir_var.h" +#include "feir_var_reg.h" +#include "feir_var_name.h" +#include "feir_type_helper.h" +#include "feir_builder.h" + +namespace maple { +class FEIRDFGTest : public FEIRTestBase { + public: + std::vector vars; + std::vector checkPoints; + FEIRDFGTest() = default; + virtual ~FEIRDFGTest() = default; + void Init(uint32 nVar, uint32 nCheckPoint) { + vars.clear(); + checkPoints.clear(); + vars.resize(nVar); + for (uint i = 0; i < nCheckPoint; i++) { + checkPoints.push_back(std::make_unique()); + } + } + + void AddVar2DFGNodes(std::list &nodes, uint32 idx, UniqueFEIRVar var, bool isDef) { + CHECK_FATAL(idx < vars.size(), "index out of range"); + var->SetDef(isDef); + vars[idx].reset(var.release()); + nodes.push_back(&vars[idx]); + } + + void BuildDFG(const std::string &str) { + if (str.length() == 0) { + return; + } + std::string item; + std::string remainStr = str; + do { + size_t npos = remainStr.find(','); + item = remainStr.substr(0, npos); + if (item.length() > 0) { + size_t npos2 = item.find("->"); + std::string strSrc = item.substr(0, npos2); + std::string strDst = item.substr(npos2 + 2); + uint32 src = std::atoi(strSrc.c_str()); + uint32 dst = std::atoi(strDst.c_str()); + TransformCheckPoint(checkPoints[dst])->AddPredCheckPoint(*(TransformCheckPoint(checkPoints[src]))); + } + if (npos != std::string::npos) { + remainStr = remainStr.substr(npos + 1); + } else { + remainStr = ""; + } + } while (remainStr.length() > 0); + } + + FEIRStmtCheckPoint *TransformCheckPoint(const UniqueFEIRStmt &stmtCheckPoint) { + if (stmtCheckPoint == nullptr || stmtCheckPoint->GetKind() != FEIRNodeKind::kStmtCheckPoint) { + CHECK_FATAL(false, "invalid input"); + } + return static_cast(stmtCheckPoint.get()); + } +}; + +// ---------- FEIRDFGNode ---------- +TEST_F(FEIRDFGTest, FEIRDFGNode_VarReg_EqualsTo) { + std::unique_ptr varReg1 = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr varReg2 = FEIRBuilder::CreateVarReg(0, PTY_i64); + FEIRDFGNode node1(varReg1); + FEIRDFGNode node2(varReg2); + FEIRDFGNode node11(node1); + FEIRDFGNode node21(node2); + FEIRDFGNode node12 = node11; + FEIRDFGNode node22 = node21; + EXPECT_EQ(node12 == node22, true); +} + +TEST_F(FEIRDFGTest, FEIRDFGNode_VarDiff_EqualsTo) { + std::unique_ptr varReg1 = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr varReg2 = FEIRBuilder::CreateVarName("testVar2", PTY_i32, false, false); + (void)GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("testVar3"); + std::unique_ptr varReg3 = FEIRBuilder::CreateVarName("testVar3", PTY_i32, false, false); + FEIRDFGNode node1(varReg1); + FEIRDFGNode node2(varReg2); + FEIRDFGNode node3(varReg3); + EXPECT_EQ(node1 == node2, false); + EXPECT_EQ(node1 == node3, false); + EXPECT_EQ(node2 == node3, false); +} + +TEST_F(FEIRDFGTest, FEIRDFGNode_VarReg_set) { + std::unique_ptr varReg1 = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr varReg2 = FEIRBuilder::CreateVarReg(0, PTY_i64); + std::unique_ptr varReg3 = FEIRBuilder::CreateVarReg(1, PTY_i32); + std::unique_ptr varReg4 = FEIRBuilder::CreateVarReg(0, PTY_u1); + std::unordered_set set1; + set1.insert(FEIRDFGNode(varReg1)); + set1.insert(FEIRDFGNode(varReg2)); + EXPECT_EQ(set1.size(), 1); + set1.insert(FEIRDFGNode(varReg3)); + EXPECT_EQ(set1.size(), 2); + set1.erase(FEIRDFGNode(varReg4)); + EXPECT_EQ(set1.size(), 1); +} + +// ---------- FEIRStmtCheckPoint ---------- +/* + * Test1 + * DFG: hir2mpl/doc/images/ut_cases/DFG/Test1.dot + * Image: hir2mpl/doc/images/ut_cases/DFG/Test1.png + */ +TEST_F(FEIRDFGTest, CheckPointTest1) { + Init(2, 1); + // BB0 + std::list stmt0; + AddVar2DFGNodes(stmt0, 0, FEIRBuilder::CreateVarReg(0, PTY_i32), true); + std::list stmt1; + AddVar2DFGNodes(stmt1, 1, FEIRBuilder::CreateVarReg(0, PTY_i32), false); + TransformCheckPoint(checkPoints[0])->RegisterDFGNodes(stmt0); + TransformCheckPoint(checkPoints[0])->RegisterDFGNodes(stmt1); + // Build DFG + BuildDFG(""); + std::set &defs = TransformCheckPoint(checkPoints[0])->CalcuDef(vars[1]); + ASSERT_EQ(defs.size(), 1); + EXPECT_NE(defs.find(&vars[0]), defs.end()); +} + +/* + * Test2 + * DFG: hir2mpl/doc/images/ut_cases/DFG/Test2.dot + * Image: hir2mpl/doc/images/ut_cases/DFG/Test2.png + */ +TEST_F(FEIRDFGTest, CheckPointTest2) { + Init(2, 2); + // BB0 + std::list stmt0; + AddVar2DFGNodes(stmt0, 0, FEIRBuilder::CreateVarReg(0, PTY_i32), true); + TransformCheckPoint(checkPoints[0])->RegisterDFGNodes(stmt0); + // BB1 + std::list stmt1; + AddVar2DFGNodes(stmt1, 1, FEIRBuilder::CreateVarReg(0, PTY_i32), false); + TransformCheckPoint(checkPoints[1])->RegisterDFGNodes(stmt1); + // Build DFG + BuildDFG("0->1"); + std::set &defs = TransformCheckPoint(checkPoints[1])->CalcuDef(vars[1]); + ASSERT_EQ(defs.size(), 1); + EXPECT_NE(defs.find(&vars[0]), defs.end()); +} + +/* + * Test3 + * DFG: hir2mpl/doc/images/ut_cases/DFG/Test3.dot + * Image: hir2mpl/doc/images/ut_cases/DFG/Test3.png + */ +TEST_F(FEIRDFGTest, Test3) { + Init(4, 4); + // BB0 + std::list stmt0; + AddVar2DFGNodes(stmt0, 0, FEIRBuilder::CreateVarReg(0, PTY_i32), true); + TransformCheckPoint(checkPoints[0])->RegisterDFGNodes(stmt0); + // BB1 + std::list stmt1; + AddVar2DFGNodes(stmt1, 1, FEIRBuilder::CreateVarReg(0, PTY_i32), true); + TransformCheckPoint(checkPoints[1])->RegisterDFGNodes(stmt1); + // BB2 + std::list stmt2; + TransformCheckPoint(checkPoints[2])->RegisterDFGNodes(stmt2); + // BB3 + std::list stmt3; + AddVar2DFGNodes(stmt3, 3, FEIRBuilder::CreateVarReg(0, PTY_i32), false); + TransformCheckPoint(checkPoints[3])->RegisterDFGNodes(stmt3); + // Build DFG + BuildDFG("0->1,0->2,1->3,2->3"); + std::set &defs = TransformCheckPoint(checkPoints[3])->CalcuDef(vars[3]); + ASSERT_EQ(defs.size(), 2); + EXPECT_NE(defs.find(&vars[0]), defs.end()); + EXPECT_NE(defs.find(&vars[1]), defs.end()); +} + +/* + * Test4 + * DFG: hir2mpl/doc/images/ut_cases/DFG/Test4.dot + * Image: hir2mpl/doc/images/ut_cases/DFG/Test4.png + */ +TEST_F(FEIRDFGTest, Test4) { + Init(4, 4); + // BB0 + std::list stmt0; + AddVar2DFGNodes(stmt0, 0, FEIRBuilder::CreateVarReg(0, PTY_i32), true); + TransformCheckPoint(checkPoints[0])->RegisterDFGNodes(stmt0); + // BB1 + std::list stmt1; + AddVar2DFGNodes(stmt1, 1, FEIRBuilder::CreateVarReg(0, PTY_i32), true); + TransformCheckPoint(checkPoints[1])->RegisterDFGNodes(stmt1); + // BB2 + std::list stmt2; + AddVar2DFGNodes(stmt2, 2, FEIRBuilder::CreateVarReg(0, PTY_i32), true); + TransformCheckPoint(checkPoints[2])->RegisterDFGNodes(stmt2); + // BB3 + std::list stmt3; + AddVar2DFGNodes(stmt3, 3, FEIRBuilder::CreateVarReg(0, PTY_i32), false); + TransformCheckPoint(checkPoints[3])->RegisterDFGNodes(stmt3); + // Build DFG + BuildDFG("0->1,0->2,1->3,2->3"); + std::set &defs = TransformCheckPoint(checkPoints[3])->CalcuDef(vars[3]); + ASSERT_EQ(defs.size(), 2); + EXPECT_EQ(defs.find(&vars[0]), defs.end()); + EXPECT_NE(defs.find(&vars[1]), defs.end()); + EXPECT_NE(defs.find(&vars[2]), defs.end()); +} + +/* + * Test5 + * DFG: hir2mpl/doc/images/ut_cases/DFG/Test5.dot + * Image: hir2mpl/doc/images/ut_cases/DFG/Test5.png + */ +TEST_F(FEIRDFGTest, Test5) { + Init(4, 4); + // BB0 + std::list stmt0; + AddVar2DFGNodes(stmt0, 0, FEIRBuilder::CreateVarReg(0, PTY_i32), true); + TransformCheckPoint(checkPoints[0])->RegisterDFGNodes(stmt0); + // BB1 + std::list stmt1; + AddVar2DFGNodes(stmt1, 1, FEIRBuilder::CreateVarReg(0, PTY_i32), false); + TransformCheckPoint(checkPoints[1])->RegisterDFGNodes(stmt1); + // BB2 + std::list stmt2; + AddVar2DFGNodes(stmt2, 2, FEIRBuilder::CreateVarReg(0, PTY_i32), true); + TransformCheckPoint(checkPoints[2])->RegisterDFGNodes(stmt2); + // BB3 + std::list stmt3; + AddVar2DFGNodes(stmt3, 3, FEIRBuilder::CreateVarReg(0, PTY_i32), false); + TransformCheckPoint(checkPoints[3])->RegisterDFGNodes(stmt3); + // Build DFG + BuildDFG("0->1,1->2,2->3,2->1"); + std::set &defs1 = TransformCheckPoint(checkPoints[1])->CalcuDef(vars[1]); + ASSERT_EQ(defs1.size(), 2); + EXPECT_NE(defs1.find(&vars[0]), defs1.end()); + EXPECT_NE(defs1.find(&vars[2]), defs1.end()); + std::set &defs3 = TransformCheckPoint(checkPoints[3])->CalcuDef(vars[3]); + ASSERT_EQ(defs3.size(), 1); + EXPECT_NE(defs3.find(&vars[2]), defs3.end()); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/feir_stmt_test.cpp b/src/hir2mpl/test/common/feir_stmt_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bc26d5867791308a5673e7ae444872f8558fe535 --- /dev/null +++ b/src/hir2mpl/test/common/feir_stmt_test.cpp @@ -0,0 +1,573 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include +#include "feir_test_base.h" +#include "feir_stmt.h" +#include "feir_var.h" +#include "feir_var_reg.h" +#include "feir_var_name.h" +#include "feir_type_helper.h" +#include "feir_builder.h" +#include "hir2mpl_ut_regx.h" +#include "fe_utils_java.h" +#include "ror.h" +#define private public +#undef private + +namespace maple { +class FEIRStmtTest : public FEIRTestBase { + public: + FEIRStmtTest() = default; + virtual ~FEIRStmtTest() = default; +}; + +// ---------- FEIRExprConst ---------- +TEST_F(FEIRStmtTest, FEIRExprConst_i64) { + std::unique_ptr exprConst = std::make_unique(int64{ 0x100 }, PTY_i64); + std::unique_ptr exprConst2 = exprConst->Clone(); + BaseNode *baseNode = exprConst2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + EXPECT_EQ(GetBufferString(), "constval i64 256\n"); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExprConst_u64) { + std::unique_ptr exprConst = std::make_unique(uint64{ 0x100 }, PTY_u64); + std::unique_ptr exprConst2 = exprConst->Clone(); + BaseNode *baseNode = exprConst2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + EXPECT_EQ(GetBufferString(), "constval u64 256\n"); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExprConst_f32) { + std::unique_ptr exprConst = std::make_unique(1.5f); + std::unique_ptr exprConst2 = exprConst->Clone(); + BaseNode *baseNode = exprConst2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + EXPECT_EQ(GetBufferString(), "constval f32 1.5f\n"); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExprConst_f64) { + std::unique_ptr exprConst = std::make_unique(1.5); + std::unique_ptr exprConst2 = exprConst->Clone(); + BaseNode *baseNode = exprConst2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + EXPECT_EQ(GetBufferString(), "constval f64 1.5\n"); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExprConstUnsupported) { + std::unique_ptr exprConst = std::make_unique(int64{ 0 }, PTY_unknown); + BaseNode *baseNode = exprConst->GenMIRNode(mirBuilder); + EXPECT_EQ(baseNode, nullptr); +} + +// ---------- FEIRExprUnary ---------- +TEST_F(FEIRStmtTest, FEIRExprUnary) { + std::unique_ptr varReg = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr exprDRead = std::make_unique(std::move(varReg)); + std::unique_ptr expr = std::make_unique(OP_neg, std::move(exprDRead)); + expr->GetType()->SetPrimType(PTY_i32); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("neg i32 \\(dread i32 %Reg0_I\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); + std::vector varUses = expr2->GetVarUses(); + ASSERT_EQ(varUses.size(), 1); +} + +// ---------- FEIRExprTypeCvt ---------- +TEST_F(FEIRStmtTest, FEIRExprTypeCvtMode1) { + std::unique_ptr varReg = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr exprDRead = std::make_unique(std::move(varReg)); + std::unique_ptr expr = std::make_unique(OP_cvt, std::move(exprDRead)); + expr->GetType()->SetPrimType(PTY_f32); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("cvt f32 i32 \\(dread i32 %Reg0_I\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExprTypeCvtMode2) { + std::unique_ptr varReg = FEIRBuilder::CreateVarReg(0, PTY_f32); + std::unique_ptr exprDRead = std::make_unique(std::move(varReg)); + std::unique_ptr expr = std::make_unique(OP_round, std::move(exprDRead)); + expr->GetType()->SetPrimType(PTY_i32); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("round i32 f32 \\(dread f32 %Reg0_F\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExprTypeCvtMode3) { + std::unique_ptr typeVar = FEIRTypeHelper::CreateTypeByJavaName("Ltest/lang/Object;", false, true); + std::unique_ptr varReg = std::make_unique(0, std::move(typeVar)); + std::unique_ptr exprDRead = std::make_unique(std::move(varReg)); + std::unique_ptr typeDst = FEIRTypeHelper::CreateTypeByJavaName("Ltest/lang/String;", false, true); + std::unique_ptr expr = std::make_unique(std::move(typeDst), OP_retype, + std::move(exprDRead)); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("retype ref <\\* <\\$Ltest_2Flang_2FString_3B>> \\(dread ref %Reg0_") + + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + "\\)" + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRExprExtractBits ---------- +TEST_F(FEIRStmtTest, FEIRExprExtractBits) { + std::unique_ptr varReg = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr exprDRead = std::make_unique(std::move(varReg)); + std::unique_ptr expr = + std::make_unique(OP_extractbits, PTY_i32, 8, 16, std::move(exprDRead)); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("extractbits i32 8 16 \\(dread i32 %Reg0_I\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExprExtractBits_sext) { + std::unique_ptr varReg = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr exprDRead = std::make_unique(std::move(varReg)); + std::unique_ptr expr = + std::make_unique(OP_sext, PTY_i8, std::move(exprDRead)); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("sext i32 8 \\(dread i32 %Reg0_I\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExprExtractBits_zext) { + std::unique_ptr varReg = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr exprDRead = std::make_unique(std::move(varReg)); + std::unique_ptr expr = + std::make_unique(OP_zext, PTY_u16, std::move(exprDRead)); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("zext u32 16 \\(dread i32 %Reg0_I\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRExprBinary ---------- +TEST_F(FEIRStmtTest, FEIRExprBinary_add) { + std::unique_ptr varReg0 = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr varReg1 = FEIRBuilder::CreateVarReg(1, PTY_i32); + std::unique_ptr exprDRead0 = std::make_unique(std::move(varReg0)); + std::unique_ptr exprDRead1 = std::make_unique(std::move(varReg1)); + std::unique_ptr expr = + std::make_unique(OP_add, std::move(exprDRead0), std::move(exprDRead1)); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("add i32 \\(dread i32 %Reg0_I, dread i32 %Reg1_I\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); + std::vector varUses = expr2->GetVarUses(); + ASSERT_EQ(varUses.size(), 2); + EXPECT_EQ(expr2->IsNestable(), true); + EXPECT_EQ(expr2->IsAddrof(), false); +} + +TEST_F(FEIRStmtTest, FEIRExprBinary_cmpg) { + std::unique_ptr varReg0 = FEIRBuilder::CreateVarReg(0, PTY_f64); + std::unique_ptr varReg2 = FEIRBuilder::CreateVarReg(2, PTY_f64); + std::unique_ptr exprDRead0 = std::make_unique(std::move(varReg0)); + std::unique_ptr exprDRead2 = std::make_unique(std::move(varReg2)); + std::unique_ptr expr = + std::make_unique(OP_cmpg, std::move(exprDRead0), std::move(exprDRead2)); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("cmpg i32 f64 \\(dread f64 %Reg0_D, dread f64 %Reg2_D\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExprBinary_lshr) { + std::unique_ptr varReg0 = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr varReg1 = FEIRBuilder::CreateVarReg(1, PTY_i8); + std::unique_ptr exprDRead0 = std::make_unique(std::move(varReg0)); + std::unique_ptr exprDRead1 = std::make_unique(std::move(varReg1)); + std::unique_ptr expr = + std::make_unique(OP_lshr, std::move(exprDRead0), std::move(exprDRead1)); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("lshr i32 \\(dread i32 %Reg0_I, dread i32 %Reg1_B\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExprBinary_band) { + std::unique_ptr varReg0 = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr varReg1 = FEIRBuilder::CreateVarReg(1, PTY_i32); + std::unique_ptr exprDRead0 = std::make_unique(std::move(varReg0)); + std::unique_ptr exprDRead1 = std::make_unique(std::move(varReg1)); + std::unique_ptr expr = + std::make_unique(OP_band, std::move(exprDRead0), std::move(exprDRead1)); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("band i32 \\(dread i32 %Reg0_I, dread i32 %Reg1_I\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRExprTernary ---------- +TEST_F(FEIRStmtTest, FEIRExprTernary_add) { + std::unique_ptr varReg0 = FEIRBuilder::CreateVarReg(0, PTY_u1); + std::unique_ptr varReg1 = FEIRBuilder::CreateVarReg(1, PTY_i32); + std::unique_ptr varReg2 = FEIRBuilder::CreateVarReg(2, PTY_i32); + std::unique_ptr exprDRead0 = std::make_unique(std::move(varReg0)); + std::unique_ptr exprDRead1 = std::make_unique(std::move(varReg1)); + std::unique_ptr exprDRead2 = std::make_unique(std::move(varReg2)); + std::unique_ptr expr = + std::make_unique(OP_select, std::move(exprDRead0), std::move(exprDRead1), std::move(exprDRead2)); + std::unique_ptr expr2 = expr->Clone(); + BaseNode *baseNode = expr2->GenMIRNode(mirBuilder); + RedirectCout(); + baseNode->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = + std::string("select i32 \\(dread u32 %Reg0_Z, dread i32 %Reg1_I, dread i32 %Reg2_I\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); + std::vector varUses = expr2->GetVarUses(); + ASSERT_EQ(varUses.size(), 3); + EXPECT_EQ(expr2->IsNestable(), true); + EXPECT_EQ(expr2->IsAddrof(), false); +} + +// ---------- FEIRStmtIf ---------- +TEST_F(FEIRStmtTest, FEIRStmtIf) { + // CondExpr + UniqueFEIRVar varReg = FEIRBuilder::CreateVarReg(0, PTY_u1); + std::unique_ptr exprDReadReg = std::make_unique(std::move(varReg)); + // ThenStmts + UniqueFEIRVar dstVar = FEIRBuilder::CreateVarReg(0, PTY_i32); + UniqueFEIRVar dstVar1 = dstVar->Clone(); + UniqueFEIRVar srcVar = std::make_unique(1, PTY_i32); + UniqueFEIRExpr exprDRead = std::make_unique(std::move(srcVar)); + UniqueFEIRExpr exprDRead1 = exprDRead->Clone(); + UniqueFEIRStmt stmtDAssign = std::make_unique(std::move(dstVar), std::move(exprDRead)); + std::list thenStmts; + thenStmts.emplace_back(std::move(stmtDAssign)); + // ElseStmts + UniqueFEIRVar dstVar2 = dstVar1->Clone(); + UniqueFEIRExpr exprDRead2 = exprDRead1->Clone(); + UniqueFEIRStmt stmtDAssign1 = std::make_unique(std::move(dstVar1), std::move(exprDRead1)); + UniqueFEIRStmt stmtDAssign2 = std::make_unique(std::move(dstVar2), std::move(exprDRead2)); + std::list elseStmts; + elseStmts.emplace_back(std::move(stmtDAssign1)); + elseStmts.emplace_back(std::move(stmtDAssign2)); + + std::unique_ptr stmt = + std::make_unique(std::move(exprDReadReg), thenStmts, elseStmts); + std::list baseNodes = stmt->GenMIRStmts(mirBuilder); + RedirectCout(); + baseNodes.front()->Dump(); + std::string pattern = + "if (dread u32 %Reg0_Z) {\n"\ + " dassign %Reg0_I 0 (dread i32 %Reg1_I)\n"\ + "}\n"\ + "else {\n"\ + " dassign %Reg0_I 0 (dread i32 %Reg1_I)\n"\ + " dassign %Reg0_I 0 (dread i32 %Reg1_I)\n"\ + "}\n\n"; + EXPECT_EQ(GetBufferString(), pattern); + RestoreCout(); +} + +// ---------- FEIRStmtDAssign ---------- +TEST_F(FEIRStmtTest, FEIRStmtDAssign) { + std::unique_ptr type = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/String;", false, true); + std::unique_ptr dstVar = std::make_unique(0, type->Clone()); + std::unique_ptr srcVar = std::make_unique(1, type->Clone()); + std::unique_ptr exprDRead = std::make_unique(std::move(srcVar)); + std::unique_ptr stmtDAssign = + std::make_unique(std::move(dstVar), std::move(exprDRead)); + std::list mirNodes = stmtDAssign->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("dassign %Reg0_") + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + " 0 \\(dread ref %Reg1_" + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + "\\)" + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +// ---------- FEIRStmtIntrinsicCallAssign ---------- +TEST_F(FEIRStmtTest, FEIRStmtIntrinsicCallAssign) { + std::string containerName = "Ljava/lang/String;"; + containerName = namemangler::EncodeName(containerName); + GStrIdx containerNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(containerName); + std::unique_ptr stmtCall = + std::make_unique(INTRN_JAVA_CLINIT_CHECK, + std::make_unique(PTY_ref, containerNameIdx), + nullptr); + std::list mirNodes = stmtCall->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + RedirectCout(); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string expected = "intrinsiccallwithtype <$Ljava_2Flang_2FString_3B> JAVA_CLINIT_CHECK ()"; + EXPECT_EQ(dumpStr.find(expected) != std::string::npos, true); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRStmtIntrinsicCallAssign_FilledNewArray) { + RedirectCout(); + std::string elemName = "Ljava/lang/String;"; + UniqueFEIRType elemType = FEIRTypeHelper::CreateTypeByJavaName(elemName, false, false); + + auto exprRegList = std::make_unique>(); + UniqueFEIRExpr exprReg = FEIRBuilder::CreateExprDRead(FEIRBuilder::CreateVarReg(0, PTY_i32)); + exprRegList->emplace_back(std::move(exprReg)); + + UniqueFEIRType type = FEIRTypeHelper::CreateTypeByPrimType(PTY_i32, 1, false); + std::unique_ptr varReg = FEIRBuilder::CreateVarReg(0, std::move(type)); + + std::unique_ptr stmtCall = std::make_unique( + INTRN_JAVA_FILL_NEW_ARRAY, std::move(elemType), std::move(varReg), std::move(exprRegList)); + std::list mirNodes = stmtCall->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("intrinsiccallwithtypeassigned <\\* <\\$Ljava_2Flang_2FString_3B>> ") + + std::string("JAVA_FILL_NEW_ARRAY \\(dread i32 %Reg0_I\\) \\{ dassign %Reg0_") + + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + std::string(" 0 \\}") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRStmtJavaFillArrayData) { + RedirectCout(); + UniqueFEIRType type = FEIRTypeHelper::CreateTypeByPrimType(PTY_i32, 1, false); + std::unique_ptr varReg = FEIRBuilder::CreateVarReg(0, std::move(type)); + int32 arr[] = { 1, 2, 3, 4 }; + const std::string tempName = "const_array_0"; + UniqueFEIRExpr expr = FEIRBuilder::CreateExprDRead(std::move(varReg)); + std::unique_ptr stmt = + std::make_unique(std::move(expr), reinterpret_cast(&arr), 4, tempName); + MIRSymbol *arrayDataVar = stmt->ProcessArrayElemData(mirBuilder, stmt->ProcessArrayElemPrimType()); + arrayDataVar->Dump(true, 0); + EXPECT_EQ(GetBufferString(), "var %const_array_0 fstatic <[4] i32> readonly = [1, 2, 3, 4]\n"); + + std::list mirNodes = stmt->GenMIRStmts(mirBuilder); + ASSERT_EQ(mirNodes.size(), 1); + mirNodes.front()->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("intrinsiccallassigned JAVA_ARRAY_FILL \\(dread ref %Reg0_") + + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + std::string(", addrof ptr \\$const_array_0, constval i32 16\\)") + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} + +TEST_F(FEIRStmtTest, FEIRExpr_hash) { + // FEIRExprDRead + std::unique_ptr varReg0 = FEIRBuilder::CreateVarReg(0, PTY_i32); + std::unique_ptr varReg1 = FEIRBuilder::CreateVarReg(1, PTY_i8); + UniqueFEIRExpr exprDRead0 = std::make_unique(std::move(varReg0)); + UniqueFEIRExpr exprDRead1 = std::make_unique(std::move(varReg1)); + UniqueFEIRExpr exprDRead2 = exprDRead0->Clone(); + EXPECT_EQ(exprDRead0->Hash() == exprDRead1->Hash(), false); + EXPECT_EQ(exprDRead0->Hash() == exprDRead2->Hash(), true); + // FEIRExprIRead + UniqueFEIRVar Var0 = FEIRBuilder::CreateVarNameForC("a", *GlobalTables::GetTypeTable().GetInt32()); + UniqueFEIRVar Var1 = FEIRBuilder::CreateVarNameForC("b", *GlobalTables::GetTypeTable().GetInt32()); + UniqueFEIRVar Var2 = Var0->Clone(); + UniqueFEIRExpr exprDRead10 = std::make_unique(Var0->Clone()); + UniqueFEIRExpr exprDRead11 = std::make_unique(Var1->Clone()); + UniqueFEIRExpr exprDRead12 = exprDRead10->Clone(); + EXPECT_EQ(exprDRead10->Hash() == exprDRead11->Hash(), false); + EXPECT_EQ(exprDRead10->Hash() == exprDRead12->Hash(), true); + UniqueFEIRType retType = FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetInt32()); + MIRType *ptr = GlobalTables::GetTypeTable().GetOrCreatePointerType(*GlobalTables::GetTypeTable().GetInt32()); + UniqueFEIRType ptrType = FEIRTypeHelper::CreateTypeNative(*ptr); + UniqueFEIRExpr exprIread0 = FEIRBuilder::CreateExprIRead(retType->Clone(), ptrType->Clone(), exprDRead10->Clone()); + UniqueFEIRExpr exprIread1 = FEIRBuilder::CreateExprIRead(retType->Clone(), ptrType->Clone(), exprDRead11->Clone()); + UniqueFEIRExpr exprIread2 = FEIRBuilder::CreateExprIRead(retType->Clone(), ptrType->Clone(), exprDRead12->Clone()); + UniqueFEIRExpr exprIread3 = exprIread0->Clone(); + EXPECT_EQ(exprIread0->Hash() == exprIread1->Hash(), false); + EXPECT_EQ(exprIread0->Hash() == exprIread2->Hash(), true); + EXPECT_EQ(exprIread0->Hash() == exprIread3->Hash(), true); + // FEIRExprConst + UniqueFEIRExpr exprConst0 = std::make_unique(int64{ 0x100 }, PTY_i64); + UniqueFEIRExpr exprConst1 = std::make_unique(int64{ 0x100 }, PTY_u64); + UniqueFEIRExpr exprConst2 = std::make_unique(int64{ 0x101 }, PTY_u64); + UniqueFEIRExpr exprConst3 = exprConst2->Clone(); + EXPECT_EQ(exprConst0->Hash() == exprConst1->Hash(), false); + EXPECT_EQ(exprConst1->Hash() == exprConst2->Hash(), false); + EXPECT_EQ(exprConst2->Hash() == exprConst3->Hash(), true); + // FEIRExprUnary + UniqueFEIRVar varReg = FEIRBuilder::CreateVarReg(0, PTY_i32); + UniqueFEIRExpr exprDRead = std::make_unique(std::move(varReg)); + UniqueFEIRExpr exprUnary0 = std::make_unique(OP_neg, exprDRead->Clone()); + UniqueFEIRExpr exprUnary1 = std::make_unique(OP_bnot, exprDRead->Clone()); + UniqueFEIRExpr exprUnary2 = exprUnary1->Clone(); + EXPECT_EQ(exprUnary0->Hash() == exprUnary1->Hash(), false); + EXPECT_EQ(exprUnary1->Hash() == exprUnary2->Hash(), true); + // FEIRExprTypeCvt + std::unique_ptr exprCvt0 = std::make_unique(OP_round, exprDRead->Clone()); + exprCvt0->GetType()->SetPrimType(PTY_f32); + UniqueFEIRExpr exprCvt1 = exprCvt0->Clone(); + exprCvt1->GetType()->SetPrimType(PTY_u32); + UniqueFEIRExpr exprCvt2 = exprCvt0->Clone(); + EXPECT_EQ(exprCvt0->Hash() == exprCvt1->Hash(), false); + EXPECT_EQ(exprCvt0->Hash() == exprCvt2->Hash(), true); + // FEIRExprExtractBits + UniqueFEIRExpr exprExtractBits0 = + std::make_unique(OP_extractbits, PTY_i32, 8, 16, exprDRead->Clone()); + UniqueFEIRExpr exprExtractBits1 = + std::make_unique(OP_extractbits, PTY_u32, 8, 16, exprDRead->Clone()); + UniqueFEIRExpr exprExtractBits2 = + std::make_unique(OP_extractbits, PTY_u32, 16, 8, exprDRead->Clone()); + UniqueFEIRExpr exprExtractBits3 = exprExtractBits0->Clone(); + EXPECT_EQ(exprExtractBits0->Hash() == exprConst1->Hash(), false); + EXPECT_EQ(exprExtractBits0->Hash() == exprExtractBits2->Hash(), false); + EXPECT_EQ(exprExtractBits0->Hash() == exprExtractBits3->Hash(), true); + // FEIRExprBinary + UniqueFEIRExpr exprBin0 = std::make_unique(OP_add, exprDRead10->Clone(), exprDRead11->Clone()); + UniqueFEIRExpr exprBin1 = std::make_unique(OP_sub, exprDRead10->Clone(), exprDRead11->Clone()); + UniqueFEIRExpr exprBin2 = std::make_unique(OP_add, exprDRead11->Clone(), exprDRead10->Clone()); + UniqueFEIRExpr exprBin3 = exprBin0->Clone(); + EXPECT_EQ(exprBin0->Hash() == exprBin1->Hash(), false); + EXPECT_EQ(exprBin0->Hash() == exprBin2->Hash(), false); + EXPECT_EQ(exprBin0->Hash() == exprBin3->Hash(), true); + UniqueFEIRExpr exprTernary0 = + std::make_unique(OP_select, exprDRead0->Clone(), exprDRead1->Clone(), exprDRead2->Clone()); + UniqueFEIRExpr exprTernary1 = + std::make_unique(OP_select, exprDRead0->Clone(), exprDRead2->Clone(), exprDRead1->Clone()); + UniqueFEIRExpr exprTernary2 = exprTernary0->Clone(); + EXPECT_EQ(exprTernary0->Hash() == exprTernary1->Hash(), false); + EXPECT_EQ(exprTernary0->Hash() == exprTernary2->Hash(), true); + // FEIRExprAddrofVar + UniqueFEIRExpr exprAddrOfVar0 = FEIRBuilder::CreateExprAddrofVar(Var0->Clone()); + UniqueFEIRExpr exprAddrOfVar1 = FEIRBuilder::CreateExprAddrofVar(Var1->Clone()); + UniqueFEIRExpr exprAddrOfVar2 = exprAddrOfVar0->Clone(); + EXPECT_EQ(exprAddrOfVar0->Hash() == exprAddrOfVar1->Hash(), false); + EXPECT_EQ(exprAddrOfVar0->Hash() == exprAddrOfVar2->Hash(), true); + // FEIRExprIAddrof + UniqueFEIRExpr exprAddrIAddrof0 = std::make_unique( + FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetAddr64()), 1, exprDRead10->Clone()); + UniqueFEIRExpr exprAddrIAddrof1 = std::make_unique( + FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetAddr64()), 2, exprDRead10->Clone()); + UniqueFEIRExpr exprAddrIAddrof2 = std::make_unique( + FEIRTypeHelper::CreateTypeNative(*GlobalTables::GetTypeTable().GetAddr64()), 1, exprDRead11->Clone()); + UniqueFEIRExpr exprAddrIAddrof3 = exprAddrIAddrof0->Clone(); + EXPECT_EQ(exprAddrIAddrof0->Hash() == exprAddrIAddrof1->Hash(), false); + EXPECT_EQ(exprAddrIAddrof0->Hash() == exprAddrIAddrof2->Hash(), false); + EXPECT_EQ(exprAddrIAddrof0->Hash() == exprAddrIAddrof3->Hash(), true); + // FEIRExprAddrofFunc + UniqueFEIRExpr exprFunc0 = FEIRBuilder::CreateExprAddrofFunc("func1"); + UniqueFEIRExpr exprFunc1 = FEIRBuilder::CreateExprAddrofFunc("func2"); + UniqueFEIRExpr exprFunc2 = exprFunc0->Clone(); + EXPECT_EQ(exprFunc0->Hash() == exprFunc1->Hash(), false); + EXPECT_EQ(exprFunc0->Hash() == exprFunc2->Hash(), true); + // FEIRExprAddrofArray + uint32_t sizeArray[2] = {3, 3}; + MIRType *arrType = GlobalTables::GetTypeTable().GetOrCreateArrayType( + *GlobalTables::GetTypeTable().GetAddr64(), 2, sizeArray); + UniqueFEIRType arrayFEType = FEIRTypeHelper::CreateTypeNative(*arrType); + std::list exprs0; + exprs0.emplace_back( FEIRBuilder::CreateExprConstU32(1)); + exprs0.emplace_back( FEIRBuilder::CreateExprConstU32(2)); + std::list exprs1; + exprs1.emplace_back( FEIRBuilder::CreateExprConstU32(2)); + exprs1.emplace_back( FEIRBuilder::CreateExprConstU32(1)); + auto exprAddrOfArr0 = FEIRBuilder::CreateExprAddrofArray(arrayFEType->Clone(), exprAddrOfVar0->Clone(), "", exprs0); + auto exprAddrOfArr1 = FEIRBuilder::CreateExprAddrofArray(arrayFEType->Clone(), exprAddrOfVar1->Clone(), "", exprs0); + auto exprAddrOfArr2 = FEIRBuilder::CreateExprAddrofArray(arrayFEType->Clone(), exprAddrOfVar1->Clone(), "", exprs1); + auto exprAddrOfArr3 = exprAddrOfArr0->Clone(); + EXPECT_EQ(exprAddrOfArr0->Hash() == exprAddrOfArr1->Hash(), false); + EXPECT_EQ(exprAddrOfArr0->Hash() == exprAddrOfArr2->Hash(), false); + EXPECT_EQ(exprAddrOfArr0->Hash() == exprAddrOfArr3->Hash(), true); +} + +TEST_F(FEIRStmtTest, FEIRStmtRor) { + RedirectCout(); + MIRType *type = GlobalTables::GetTypeTable().GetUInt64(); + UniqueFEIRVar baseVar = FEIRBuilder::CreateVarNameForC("a", *type, false, false); + UniqueFEIRExpr baseExpr = FEIRBuilder::CreateExprDRead(std::move(baseVar)); + UniqueFEIRVar baseShiftVar = FEIRBuilder::CreateVarNameForC("b", *type, false, false); + UniqueFEIRExpr baseShiftExpr = FEIRBuilder::CreateExprDRead(std::move(baseShiftVar)); + UniqueFEIRExpr constExpr1 = FEIRBuilder::CreateExprConstI32(64); + UniqueFEIRExpr constExpr2 = FEIRBuilder::CreateExprConstI32(63); + UniqueFEIRExpr andExpr = FEIRBuilder::CreateExprBinary(OP_band, baseShiftExpr->Clone(), constExpr2->Clone()); + UniqueFEIRExpr leftExpr = FEIRBuilder::CreateExprBinary(OP_lshr, baseExpr->Clone(), andExpr->Clone()); + UniqueFEIRExpr subExpr = FEIRBuilder::CreateExprBinary(OP_sub, constExpr1->Clone(), andExpr->Clone()); + UniqueFEIRExpr rightExpr = FEIRBuilder::CreateExprBinary(OP_shl, baseExpr->Clone(), subExpr->Clone()); + UniqueFEIRExpr orExpr = FEIRBuilder::CreateExprBinary(OP_bior, leftExpr->Clone(), rightExpr->Clone()); + // ror optimize + auto orExprPtr = static_cast(orExpr.get()); + Ror ror(orExprPtr->GetOp(), orExprPtr->GetOpnd0(), orExprPtr->GetOpnd1()); + UniqueFEIRExpr target = ror.Emit2FEExpr(); + BaseNode *node = target->GenMIRNode(mirBuilder); + node->Dump(); + std::string dumpStr = GetBufferString(); + std::string pattern = std::string("ror u64 \\(dread u64 %a, dread u64 %b\\)") + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(dumpStr, pattern), true); + RestoreCout(); +} +} // namespace maple diff --git a/src/hir2mpl/test/common/feir_test_base.cpp b/src/hir2mpl/test/common/feir_test_base.cpp new file mode 100644 index 0000000000000000000000000000000000000000..888d2bde82159348830d14fb8cd95c3a187b561e --- /dev/null +++ b/src/hir2mpl/test/common/feir_test_base.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "feir_test_base.h" +#include "hir2mpl_ut_environment.h" +#include "fe_utils.h" +#include "fe_manager.h" + +namespace maple { +MemPool *FEIRTestBase::mp = nullptr; + +FEIRTestBase::FEIRTestBase() + : allocator(mp), + mirBuilder(&HIR2MPLUTEnvironment::GetMIRModule()) { + func = FEManager::GetTypeManager().GetMIRFunction("mock_func", false); + if (func == nullptr) { + func = FEManager::GetTypeManager().CreateFunction("mock_func", "void", std::vector{}, false, false); + } + mirBuilder.SetCurrentFunction(*func); +} + +void FEIRTestBase::SetUpTestCase() { + mp = FEUtils::NewMempool("MemPool for FEIRTestBase", false /* isLocalPool */); +} + +void FEIRTestBase::TearDownTestCase() { + delete mp; + mp = nullptr; +} +} // namespace maple diff --git a/src/hir2mpl/test/common/feir_test_base.h b/src/hir2mpl/test/common/feir_test_base.h new file mode 100644 index 0000000000000000000000000000000000000000..a2cacebe195855b049af7460e74fb7a6b478e189 --- /dev/null +++ b/src/hir2mpl/test/common/feir_test_base.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "redirect_buffer.h" +#include "global_tables.h" +#include "mir_module.h" +#include "mir_builder.h" + +namespace maple { +class FEIRTestBase : public testing::Test, public RedirectBuffer { + public: + static MemPool *mp; + MapleAllocator allocator; + MIRBuilder mirBuilder; + MIRFunction *func; + FEIRTestBase(); + virtual ~FEIRTestBase() = default; + + static void SetUpTestCase(); + static void TearDownTestCase(); +}; +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/feir_type_helper_test.cpp b/src/hir2mpl/test/common/feir_type_helper_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..75c3151aa4b163230a1ddb0b8ac19dedda444892 --- /dev/null +++ b/src/hir2mpl/test/common/feir_type_helper_test.cpp @@ -0,0 +1,184 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "redirect_buffer.h" +#include "global_tables.h" +#include "mir_module.h" +#include "feir_type_helper.h" + +namespace maple { +class FEIRTypeHelperTest : public testing::Test, public RedirectBuffer { + public: + FEIRTypeHelperTest() = default; + ~FEIRTypeHelperTest() = default; +}; + +TEST_F(FEIRTypeHelperTest, CreateTypeByPrimType_i32) { + std::unique_ptr type1 = FEIRTypeHelper::CreateTypeByPrimType(PTY_i32, 0, false); + std::unique_ptr type2 = FEIRTypeHelper::CreateTypeByPrimType(PTY_i32, 0, true); + RedirectCout(); + type1->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + ClearBufferString(); + type2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* i32>"); + RestoreCout(); +} + +TEST_F(FEIRTypeHelperTest, CreateTypeByJavaName_J) { + std::unique_ptr type1 = FEIRTypeHelper::CreateTypeByJavaName("J", false, false); + std::unique_ptr type2 = FEIRTypeHelper::CreateTypeByJavaName("J", false, true); + RedirectCout(); + type1->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i64"); + ClearBufferString(); + type2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* i64>"); + RestoreCout(); +} + +TEST_F(FEIRTypeHelperTest, CreateTypeByJavaName_PrimType) { + RedirectCout(); + FEIRTypeHelper::CreateTypeByJavaName("I", false, false)->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + ClearBufferString(); + FEIRTypeHelper::CreateTypeByJavaName("J", false, false)->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i64"); + ClearBufferString(); + FEIRTypeHelper::CreateTypeByJavaName("F", false, false)->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "f32"); + ClearBufferString(); + FEIRTypeHelper::CreateTypeByJavaName("D", false, false)->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "f64"); + ClearBufferString(); + FEIRTypeHelper::CreateTypeByJavaName("Z", false, false)->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "u1"); + ClearBufferString(); + FEIRTypeHelper::CreateTypeByJavaName("B", false, false)->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i8"); + ClearBufferString(); + FEIRTypeHelper::CreateTypeByJavaName("S", false, false)->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i16"); + ClearBufferString(); + FEIRTypeHelper::CreateTypeByJavaName("C", false, false)->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "u16"); + ClearBufferString(); + FEIRTypeHelper::CreateTypeByJavaName("V", false, false)->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "void"); + RestoreCout(); +} + +TEST_F(FEIRTypeHelperTest, CreateTypeByJavaName_Object) { + std::unique_ptr type1 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, false); + std::unique_ptr type2 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, true); + std::unique_ptr type3 = FEIRTypeHelper::CreateTypeByJavaName("[Ljava/lang/Object;", false, false); + std::unique_ptr type4 = FEIRTypeHelper::CreateTypeByJavaName("[Ljava/lang/Object;", false, true); + RedirectCout(); + type1->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<$Ljava_2Flang_2FObject_3B>"); + ClearBufferString(); + type2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <$Ljava_2Flang_2FObject_3B>>"); + ClearBufferString(); + type3->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<[] <* <$Ljava_2Flang_2FObject_3B>>>"); + ClearBufferString(); + type4->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <$Ljava_2Flang_2FObject_3B>>>>"); + RestoreCout(); +} + +TEST_F(FEIRTypeHelperTest, CreateTypeByJavaName_Undefined) { + std::unique_ptr type1 = FEIRTypeHelper::CreateTypeByJavaName("LUndefine4FEIRTypeHelper;", false, false); + std::unique_ptr type2 = FEIRTypeHelper::CreateTypeByJavaName("LUndefine4FEIRTypeHelper;", false, true); + RedirectCout(); + type1->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<$LUndefine4FEIRTypeHelper_3B>"); + ClearBufferString(); + type2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <$LUndefine4FEIRTypeHelper_3B>>"); + RestoreCout(); +} + +TEST_F(FEIRTypeHelperTest, CreateTypeByDimIncr) { + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByPrimType(PTY_i32, 0, false); + UniqueFEIRType type1A = FEIRTypeHelper::CreateTypeByDimIncr(type1, 1); + UniqueFEIRType type1AP = FEIRTypeHelper::CreateTypeByDimIncr(type1, 1, true); + UniqueFEIRType type2 = FEIRTypeHelper::CreateTypeByPrimType(PTY_i32, 1, true); + UniqueFEIRType type2A = FEIRTypeHelper::CreateTypeByDimIncr(type2, 2); + UniqueFEIRType type2AP = FEIRTypeHelper::CreateTypeByDimIncr(type2, 2, true); + RedirectCout(); + type1->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + ClearBufferString(); + type1A->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<[] i32>"); + ClearBufferString(); + type1AP->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] i32>>"); + ClearBufferString(); + type2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] i32>>"); + ClearBufferString(); + type2A->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <[] <* <[] i32>>>>>>"); + ClearBufferString(); + type2AP->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <[] <* <[] i32>>>>>>"); + RestoreCout(); +} + +TEST_F(FEIRTypeHelperTest, CreateTypeByDimDecr) { + UniqueFEIRType type2AP = FEIRTypeHelper::CreateTypeByPrimType(PTY_i32, 2, true); + UniqueFEIRType type1AP = FEIRTypeHelper::CreateTypeByDimDecr(type2AP, 1); + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByDimDecr(type2AP, 2); + RedirectCout(); + type2AP->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <[] i32>>>>"); + ClearBufferString(); + type1AP->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] i32>>"); + ClearBufferString(); + type0->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + RestoreCout(); +} + +TEST_F(FEIRTypeHelperTest, CreateTypeByGetAddress) { + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByPrimType(PTY_i32, 0, false); + UniqueFEIRType type0P = FEIRTypeHelper::CreateTypeByGetAddress(type0); + RedirectCout(); + type0->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + ClearBufferString(); + type0P->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* i32>"); + RestoreCout(); +} + +TEST_F(FEIRTypeHelperTest, CreateTypeByDereferrence) { + UniqueFEIRType type0P = FEIRTypeHelper::CreateTypeByPrimType(PTY_i32, 0, true); + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByDereferrence(type0P); + RedirectCout(); + type0P->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* i32>"); + ClearBufferString(); + type0->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + RestoreCout(); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/feir_type_infer_test.cpp b/src/hir2mpl/test/common/feir_type_infer_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2c523349d59269f54a6a4c900409e605e72f28b9 --- /dev/null +++ b/src/hir2mpl/test/common/feir_type_infer_test.cpp @@ -0,0 +1,318 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "redirect_buffer.h" +#include "global_tables.h" +#include "mir_module.h" +#define protected public +#define private public +#include "feir_type_helper.h" +#include "feir_builder.h" +#include "feir_var_type_scatter.h" +#include "feir_dfg.h" +#include "namemangler.h" +#include "fe_type_hierarchy.h" +#include "feir_type_infer.h" + +namespace maple { +class FEIRTypeHelperTest : public testing::Test, public RedirectBuffer { + public: + FEIRTypeHelperTest() = default; + ~FEIRTypeHelperTest() = default; +}; + +class FEIRTypeInferTest : public testing::Test, public RedirectBuffer { + public: + FEIRTypeInferTest() {}; + ~FEIRTypeInferTest() = default; +}; + +class FEIRTypeCvtHelperTest : public testing::Test, public RedirectBuffer { + public: + FEIRTypeCvtHelperTest() {}; + ~FEIRTypeCvtHelperTest() = default; +}; + +TEST_F(FEIRTypeHelperTest, MergeType_Parent) { + GStrIdx type0StrIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName("Ljava/lang/Integer;")); + GStrIdx type1StrIdx = + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(namemangler::EncodeName("Ljava/lang/Number;")); + FETypeHierarchy::GetInstance().AddParentChildRelation(type1StrIdx, type0StrIdx); + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Integer;", false, false); + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Number;", false, false); + UniqueFEIRType type2 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/String;", false, false); + UniqueFEIRType type3 = FEIRTypeHelper::CreateTypeByJavaName("[Ljava/lang/String;", false, false); + UniqueFEIRType type4 = FEIRTypeHelper::CreateTypeByJavaName("I", false, false); + UniqueFEIRType type5 = FEIRTypeHelper::CreateTypeByJavaName("J", false, false); + UniqueFEIRType type6 = FEIRTypeHelper::CreateTypeByJavaName("LFEIRTypeHelperTest", false, false); + UniqueFEIRType typeDefault = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, false); + UniqueFEIRType typeResult; + FEIRTypeMergeHelper typeMergeHelper(typeDefault); + // merge type0 and type1, expect Ljava/lang/Number; + typeResult.reset(type0->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type1), true); + EXPECT_EQ(typeResult->IsEqualTo(type1), true); + + // merge type1 and type0, expect Ljava/lang/Number; + typeResult.reset(type1->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type0), true); + EXPECT_EQ(typeResult->IsEqualTo(type1), true); + + // merge type0 and type2, expect Ljava/lang/Object; + typeResult.reset(type0->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type2), true); + EXPECT_EQ(typeResult->IsEqualTo(typeDefault), true); + + // merge type1 and type2, expect Ljava/lang/Object; + typeResult.reset(type1->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type2), true); + EXPECT_EQ(typeResult->IsEqualTo(typeDefault), true); + + // merge diff dim + typeResult.reset(type2->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type3), false); + + // merge diff PrimType + typeResult.reset(type4->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type5), false); + + // merge type0 and type6, expect Ljava/lang/Object; + typeResult.reset(type0->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type6), true); + EXPECT_EQ(typeResult->IsEqualTo(typeDefault), true); + + // merge type6 and type0, expect Ljava/lang/Object; + typeResult.reset(type6->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type0), true); + EXPECT_EQ(typeResult->IsEqualTo(typeDefault), true); +} + +TEST_F(FEIRTypeHelperTest, MergeType_Child) { + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Integer;", false, false); + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Number;", false, false); + UniqueFEIRType type2 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/String;", false, false); + UniqueFEIRType typeDefault = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, false); + UniqueFEIRType typeResult; + FEIRTypeMergeHelper typeMergeHelper(typeDefault); + // merge type0 and type1, expect Ljava/lang/Integer; + typeResult.reset(type0->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type1, false), true); + EXPECT_EQ(typeResult->IsEqualTo(type0), true); + + // merge type1 and type0, expect Ljava/lang/Integer; + typeResult.reset(type1->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type0, false), true); + EXPECT_EQ(typeResult->IsEqualTo(type0), true); + + // merge type0 and type2, expect error + typeResult.reset(type0->Clone().release()); + EXPECT_EQ(typeMergeHelper.MergeType(typeResult, type2, false), false); +} + +// +// TypeInferTest: Test1 +// DFG: hir2mpl/doc/images/ut_cases/TypeInfer/Test1.dot +// Image: hir2mpl/doc/images/ut_cases/TypeInfer/Test1.png +// UseDef: +// var1 -> var0 +// DefUse: +// var0 -> var1 +// +TEST_F(FEIRTypeInferTest, Test1) { + std::map> mapDefUse; + UniqueFEIRVar var0 = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + UniqueFEIRVar var1 = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/String;", false, true); + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, true); + var0->SetType(type0->Clone()); + var1->SetType(type1->Clone()); + mapDefUse[&var0].insert(&var1); + std::unique_ptr typeInfer = std::make_unique(kSrcLangJava, mapDefUse); + EXPECT_EQ(typeInfer->GetTypeForVarUse(var1)->IsEqualTo(type1), true); + EXPECT_EQ(typeInfer->GetTypeForVarDef(var0)->IsEqualTo(type0), true); + typeInfer->ProcessVarDef(var0); + EXPECT_EQ(var0->GetKind(), FEIRVarKind::kFEIRVarTypeScatter); + FEIRVarTypeScatter *ptrVar0 = static_cast(var0.get()); + EXPECT_EQ(ptrVar0->GetType()->IsEqualTo(type0), true); + EXPECT_EQ(ptrVar0->GetScatterTypes().size(), 1); + EXPECT_NE(ptrVar0->GetScatterTypes().find(FEIRTypeKey(type1)), ptrVar0->GetScatterTypes().end()); +} + +// +// TypeInferTest: Test2 +// DFG: hir2mpl/doc/images/ut_cases/TypeInfer/Test2.dot +// Image: hir2mpl/doc/images/ut_cases/TypeInfer/Test2.png +// UseDef: +// var1 -> var0 +// var3 -> var2 +// DefUse: +// var0 -> var1 +// var2 -> var3 +// +TEST_F(FEIRTypeInferTest, Test2) { + std::map> mapDefUse; + UniqueFEIRVar var0 = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + UniqueFEIRVar var1 = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + UniqueFEIRVar var2 = FEIRBuilder::CreateVarReg(1, PTY_ref, false); + UniqueFEIRVar var3 = FEIRBuilder::CreateVarReg(1, PTY_ref, false); + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/String;", false, true); + UniqueFEIRType type3 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, true); + UniqueFEIRVarTrans trans0 = std::make_unique(FEIRVarTransKind::kFEIRVarTransDirect, var1); + UniqueFEIRVarTrans trans1 = std::make_unique(FEIRVarTransKind::kFEIRVarTransDirect, var2); + var1->SetTrans(std::move(trans1)); + var2->SetTrans(std::move(trans0)); + var0->SetType(type0->Clone()); + var3->SetType(type3->Clone()); + mapDefUse[&var0].insert(&var1); + mapDefUse[&var2].insert(&var3); + std::unique_ptr typeInfer = std::make_unique(kSrcLangJava, mapDefUse); + EXPECT_EQ(typeInfer->GetTypeForVarUse(var1)->IsEqualTo(type3), true); + typeInfer->Reset(); + EXPECT_EQ(typeInfer->GetTypeForVarUse(var3)->IsEqualTo(type3), true); + + typeInfer->ProcessVarDef(var0); + typeInfer->ProcessVarDef(var2); + EXPECT_EQ(var0->GetKind(), FEIRVarKind::kFEIRVarTypeScatter); + EXPECT_EQ(var2->GetKind(), FEIRVarKind::kFEIRVarReg); +} + +// +// TypeInferTest: Test3 +// DFG: hir2mpl/doc/images/ut_cases/TypeInfer/Test3.dot +// Image: hir2mpl/doc/images/ut_cases/TypeInfer/Test3.png +// UseDef: +// var1 -> {var0, var5} +// var3 -> var2 +// var4 -> var2 +TEST_F(FEIRTypeInferTest, Test3) { + std::map> mapDefUse; + UniqueFEIRVar var0 = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + UniqueFEIRVar var1 = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + UniqueFEIRVar var2 = FEIRBuilder::CreateVarReg(1, PTY_ref, false); + UniqueFEIRVar var3 = FEIRBuilder::CreateVarReg(1, PTY_ref, false); + UniqueFEIRVar var4 = FEIRBuilder::CreateVarReg(1, PTY_ref, false); + UniqueFEIRVar var5 = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/String;", false, true); + UniqueFEIRVarTrans transFrom1To2 = std::make_unique(FEIRVarTransKind::kFEIRVarTransDirect, var1); + UniqueFEIRVarTrans transFrom2To1 = std::make_unique(FEIRVarTransKind::kFEIRVarTransDirect, var2); + UniqueFEIRVarTrans transFrom4To5 = std::make_unique(FEIRVarTransKind::kFEIRVarTransDirect, var4); + UniqueFEIRVarTrans transFrom5To4 = std::make_unique(FEIRVarTransKind::kFEIRVarTransDirect, var5); + var1->SetTrans(std::move(transFrom2To1)); + var2->SetTrans(std::move(transFrom1To2)); + var4->SetTrans(std::move(transFrom5To4)); + var5->SetTrans(std::move(transFrom4To5)); + var3->SetType(type0->Clone()); + mapDefUse[&var0].insert(&var1); + mapDefUse[&var5].insert(&var1); + mapDefUse[&var2].insert(&var3); + mapDefUse[&var2].insert(&var4); + std::unique_ptr typeInfer = std::make_unique(kSrcLangJava, mapDefUse); + typeInfer->Reset(); + EXPECT_EQ(typeInfer->GetTypeForVarUse(var1)->IsEqualTo(type0), true); + typeInfer->Reset(); + EXPECT_EQ(typeInfer->GetTypeForVarUse(var3)->IsEqualTo(type0), true); + typeInfer->Reset(); + EXPECT_EQ(typeInfer->GetTypeForVarUse(var4)->IsEqualTo(type0), true); + typeInfer->ProcessVarDef(var0); + typeInfer->ProcessVarDef(var2); + typeInfer->ProcessVarDef(var5); + EXPECT_EQ(var0->GetKind(), FEIRVarKind::kFEIRVarReg); + EXPECT_EQ(var2->GetKind(), FEIRVarKind::kFEIRVarReg); + EXPECT_EQ(var5->GetKind(), FEIRVarKind::kFEIRVarReg); + EXPECT_EQ(var0->GetType()->IsEqualTo(type0), true); + EXPECT_EQ(var2->GetType()->IsEqualTo(type0), true); + EXPECT_EQ(var5->GetType()->IsEqualTo(type0), true); +} + +// +// TypeInferTest: Test4 +// DFG: hir2mpl/doc/images/ut_cases/TypeInfer/Test4.dot +// Image: hir2mpl/doc/images/ut_cases/TypeInfer/Test4.png +// UseDef: +// var1 -> var0 +// var3 -> var2 +// +TEST_F(FEIRTypeInferTest, Test4) { + std::map> mapDefUse; + UniqueFEIRVar var0 = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + UniqueFEIRVar var1 = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + UniqueFEIRVar var2 = FEIRBuilder::CreateVarReg(1, PTY_ref, false); + UniqueFEIRVar var3 = FEIRBuilder::CreateVarReg(1, PTY_ref, false); + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, true); + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("[Ljava/lang/Object;", false, true); + UniqueFEIRVarTrans transFrom1To2 = std::make_unique(FEIRVarTransKind::kFEIRVarTransArrayDimDecr, var1); + UniqueFEIRVarTrans transFrom2To1 = std::make_unique(FEIRVarTransKind::kFEIRVarTransArrayDimIncr, var2); + var3->SetType(type0->Clone()); + var1->SetTrans(std::move(transFrom2To1)); + var2->SetTrans(std::move(transFrom1To2)); + mapDefUse[&var0].insert(&var1); + mapDefUse[&var2].insert(&var3); + std::unique_ptr typeInfer = std::make_unique(kSrcLangJava, mapDefUse); + typeInfer->Reset(); + EXPECT_EQ(typeInfer->GetTypeForVarUse(var1)->IsEqualTo(type1), true); + typeInfer->ProcessVarDef(var0); + typeInfer->ProcessVarDef(var2); + EXPECT_EQ(var0->GetType()->IsEqualTo(type1), true); + EXPECT_EQ(var2->GetType()->IsEqualTo(type0), true); +} + +TEST_F(FEIRTypeCvtHelperTest, IsRetypeable) { + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, true); + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/String;", false, true); + UniqueFEIRType type2 = FEIRTypeHelper::CreateTypeByJavaName("I", false, false); + bool isRetypeable = FEIRTypeCvtHelper::IsRetypeable(*type0, *type1); + EXPECT_EQ(isRetypeable, true); + isRetypeable = FEIRTypeCvtHelper::IsRetypeable(*type1, *type0); + EXPECT_EQ(isRetypeable, true); + isRetypeable = FEIRTypeCvtHelper::IsRetypeable(*type0, *type2); + EXPECT_EQ(isRetypeable, false); + isRetypeable = FEIRTypeCvtHelper::IsRetypeable(*type2, *type0); + EXPECT_EQ(isRetypeable, false); + isRetypeable = FEIRTypeCvtHelper::IsRetypeable(*type1, *type2); + EXPECT_EQ(isRetypeable, false); + isRetypeable = FEIRTypeCvtHelper::IsRetypeable(*type2, *type1); + EXPECT_EQ(isRetypeable, false); +} + +TEST_F(FEIRTypeCvtHelperTest, IsIntCvt2Ref) { + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, true); + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/String;", false, true); + UniqueFEIRType type2 = FEIRTypeHelper::CreateTypeByJavaName("I", false, false); + bool isIntCvt2Ref = FEIRTypeCvtHelper::IsIntCvt2Ref(*type2, *type0); + EXPECT_EQ(isIntCvt2Ref, true); + isIntCvt2Ref = FEIRTypeCvtHelper::IsIntCvt2Ref(*type2, *type1); + EXPECT_EQ(isIntCvt2Ref, true); +} + +TEST_F(FEIRTypeCvtHelperTest, ChooseCvtOpcodeByFromTypeAndToType) { + UniqueFEIRType type0 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, true); + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/String;", false, true); + UniqueFEIRType type2 = FEIRTypeHelper::CreateTypeByJavaName("I", false, false); + UniqueFEIRType type3 = FEIRTypeHelper::CreateTypeByJavaName("F", false, false); + Opcode opcode = FEIRTypeCvtHelper::ChooseCvtOpcodeByFromTypeAndToType(*type0, *type1); + EXPECT_EQ((opcode == OP_retype), true); + opcode = FEIRTypeCvtHelper::ChooseCvtOpcodeByFromTypeAndToType(*type1, *type0); + EXPECT_EQ((opcode == OP_retype), true); + opcode = FEIRTypeCvtHelper::ChooseCvtOpcodeByFromTypeAndToType(*type2, *type0); + EXPECT_EQ((opcode == OP_cvt), true); + opcode = FEIRTypeCvtHelper::ChooseCvtOpcodeByFromTypeAndToType(*type2, *type1); + EXPECT_EQ((opcode == OP_cvt), true); + opcode = FEIRTypeCvtHelper::ChooseCvtOpcodeByFromTypeAndToType(*type2, *type3); + EXPECT_EQ((opcode == OP_undef), true); +} +} // namespace maple diff --git a/src/hir2mpl/test/common/feir_type_test.cpp b/src/hir2mpl/test/common/feir_type_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..267bbcb835ecf668bb164435f77166d7ceb97b25 --- /dev/null +++ b/src/hir2mpl/test/common/feir_type_test.cpp @@ -0,0 +1,324 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "feir_type.h" +#include "feir_type_helper.h" +#include "redirect_buffer.h" +#include "global_tables.h" +#include "mir_module.h" + +namespace maple { +class FEIRTypeDefaultTest : public testing::Test, public RedirectBuffer { + public: + FEIRTypeDefaultTest() = default; + ~FEIRTypeDefaultTest() = default; +}; + +TEST(FEIRTypeDefault, IsScalarPrimType) { + EXPECT_EQ(FEIRTypeDefault::IsScalarPrimType(PTY_i32), true); + EXPECT_EQ(FEIRTypeDefault::IsScalarPrimType(PTY_ref), false); +} + +TEST_F(FEIRTypeDefaultTest, FEIRTypeDefaultTest_Ref_Array_Precise_Valid) { + GStrIdx idxZero = GStrIdx(0); + GStrIdx idxNonZero = GStrIdx(100); + + FEIRTypeDefault typeScalar(PTY_i32, idxZero, 0); + EXPECT_EQ(typeScalar.IsRef(), false); + EXPECT_EQ(typeScalar.IsArray(), false); + EXPECT_EQ(typeScalar.IsPrecise(), true); + EXPECT_EQ(typeScalar.IsValid(), true); + + FEIRTypeDefault typeScalarArray(PTY_i32, idxZero, 1); + EXPECT_EQ(typeScalarArray.IsRef(), true); + EXPECT_EQ(typeScalarArray.IsArray(), true); + EXPECT_EQ(typeScalarArray.IsPrecise(), true); + EXPECT_EQ(typeScalarArray.IsValid(), true); + + FEIRTypeDefault typeRefNP(PTY_ref, idxZero, 0); + EXPECT_EQ(typeRefNP.IsRef(), true); + EXPECT_EQ(typeRefNP.IsArray(), false); + EXPECT_EQ(typeRefNP.IsPrecise(), false); + EXPECT_EQ(typeRefNP.IsValid(), true); + + FEIRTypeDefault typeRefNPArray(PTY_ref, idxZero, 1); + EXPECT_EQ(typeRefNPArray.IsRef(), true); + EXPECT_EQ(typeRefNPArray.IsArray(), true); + EXPECT_EQ(typeRefNPArray.IsPrecise(), false); + EXPECT_EQ(typeRefNPArray.IsValid(), true); + + FEIRTypeDefault typeRef(PTY_ref, idxNonZero, 0); + EXPECT_EQ(typeRef.IsRef(), true); + EXPECT_EQ(typeRef.IsArray(), false); + EXPECT_EQ(typeRef.IsPrecise(), true); + EXPECT_EQ(typeRef.IsValid(), true); + + FEIRTypeDefault typeRefArray(PTY_ref, idxNonZero, 1); + EXPECT_EQ(typeRefArray.IsRef(), true); + EXPECT_EQ(typeRefArray.IsArray(), true); + EXPECT_EQ(typeRefArray.IsPrecise(), true); + EXPECT_EQ(typeRefArray.IsValid(), true); + + FEIRTypeDefault typeInvalid(PTY_i32, idxNonZero, 0); + EXPECT_EQ(typeInvalid.IsValid(), false); + + FEIRTypeDefault typeInvalidArray(PTY_i32, idxNonZero, 1); + EXPECT_EQ(typeInvalidArray.IsValid(), false); +} + +TEST_F(FEIRTypeDefaultTest, FEIRTypeDefaultTest_default) { + FEIRTypeDefault type; + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "void"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "void"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, FEIRTypeDefaultTest_void) { + FEIRTypeDefault type(PTY_void); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "void"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "void"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, FEIRTypeDefaultTest_ref) { + FEIRTypeDefault type(PTY_ref); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "ref"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "ref"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, FEIRTypeDefaultTest_refArray) { + std::string typeName = "Ljava_2Flang_2FString_3B"; + GStrIdx gStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + FEIRTypeDefault type(PTY_ref, gStrIdx, 1); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "<[] <* <$Ljava_2Flang_2FString_3B>>>"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <$Ljava_2Flang_2FString_3B>>>>"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, FEIRTypeDefaultTest_Object) { + GStrIdx typeNameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("Ljava_2Flang_2FObject_3B"); + FEIRTypeDefault type(PTY_ref, typeNameIdx, 0); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "<$Ljava_2Flang_2FObject_3B>"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <$Ljava_2Flang_2FObject_3B>>"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_V) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("V"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "void"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "void"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_I) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("I"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_J) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("J"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "i64"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "i64"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_F) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("F"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "f32"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "f32"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_D) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("D"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "f64"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "f64"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_Z) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("Z"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "u1"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "u1"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_B) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("B"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "i8"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "i8"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_C) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("C"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "u16"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "u16"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_S) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("S"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "i16"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "i16"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_Object) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("Ljava_2Flang_2FObject_3B"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "<$Ljava_2Flang_2FObject_3B>"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <$Ljava_2Flang_2FObject_3B>>"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, LoadFromJavaTypeName_AI) { + FEIRTypeDefault type; + type.LoadFromJavaTypeName("AI"); + RedirectCout(); + type.GenerateMIRType(kSrcLangJava, false)->Dump(0); + EXPECT_EQ(GetBufferString(), "<[] i32>"); + ClearBufferString(); + type.GenerateMIRType(kSrcLangJava, true)->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] i32>>"); + ClearBufferString(); + type.GenerateMIRTypeAuto(kSrcLangJava)->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] i32>>"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, UseFEIRTypeKey) { + std::unordered_set testSet; + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, false); + UniqueFEIRType type2 = type1->Clone(); + UniqueFEIRType type3 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, true); + UniqueFEIRType type4 = type3->Clone(); + testSet.insert(FEIRTypeKey(type1)); + testSet.insert(FEIRTypeKey(type2)); + testSet.insert(FEIRTypeKey(type3)); + testSet.insert(FEIRTypeKey(type4)); + EXPECT_EQ(testSet.size(), 2); + EXPECT_NE(testSet.find(FEIRTypeKey(type1)), testSet.end()); + EXPECT_NE(testSet.find(FEIRTypeKey(type2)), testSet.end()); + EXPECT_NE(testSet.find(FEIRTypeKey(type3)), testSet.end()); + EXPECT_NE(testSet.find(FEIRTypeKey(type4)), testSet.end()); +} + +TEST_F(FEIRTypeDefaultTest, FEIRTypeNative_create) { + RedirectCout(); + MIRType *mirType = GlobalTables::GetTypeTable().GetInt32(); + FEIRTypeNative type(*mirType); + EXPECT_EQ(type.IsRef(), false); + EXPECT_EQ(type.IsArray(), false); + EXPECT_EQ(type.IsPrecise(), true); + EXPECT_EQ(type.IsValid(), true); + type.GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + + MIRType *mirType1 = GlobalTables::GetTypeTable().GetRef(); + FEIRTypeNative type1(*mirType1); + EXPECT_EQ(type1.IsRef(), true); + EXPECT_EQ(type1.IsArray(), false); + EXPECT_EQ(type1.IsPrecise(), true); + EXPECT_EQ(type1.IsValid(), true); + type1.GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "ref"); + RestoreCout(); +} + +TEST_F(FEIRTypeDefaultTest, FEIRTypeNative_equal) { + UniqueFEIRType type1 = std::make_unique(*GlobalTables::GetTypeTable().GetInt32()); + UniqueFEIRType type2 = std::make_unique(*GlobalTables::GetTypeTable().GetInt64()); + UniqueFEIRType type3 = std::make_unique(*GlobalTables::GetTypeTable().GetRef()); + UniqueFEIRType type4 = type1->Clone(); + EXPECT_EQ(type1->IsEqualTo(type2), false); + EXPECT_EQ(type1->IsEqualTo(type3), false); + EXPECT_EQ(type1->IsEqualTo(type4), true); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/feir_var_test.cpp b/src/hir2mpl/test/common/feir_var_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ea700416627f205bfa8da7a25a40439f5db2cf25 --- /dev/null +++ b/src/hir2mpl/test/common/feir_var_test.cpp @@ -0,0 +1,187 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "feir_test_base.h" +#include "feir_var.h" +#include "feir_var_reg.h" +#include "feir_var_name.h" +#include "feir_type_helper.h" +#include "hir2mpl_ut_regx.h" +#include "feir_builder.h" + +namespace maple { +class FEIRVarTest : public FEIRTestBase { + public: + FEIRVarTest() = default; + virtual ~FEIRVarTest() = default; +}; + +TEST_F(FEIRVarTest, FEIRVarReg) { + std::unique_ptr type = FEIRTypeHelper::CreateTypeByJavaName("Ltest/lang/Object;", false, true); + FEIRVarReg varReg(1, std::move(type)); + varReg.SetGlobal(false); + MIRSymbol *symbol = varReg.GenerateMIRSymbol(mirBuilder); + RedirectCout(); + std::string symbolName = symbol->GetName(); + std::string strPattern = HIR2MPLUTRegx::RegName(1) + "_" + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber); + EXPECT_EQ(HIR2MPLUTRegx::Match(symbolName, strPattern), true); + symbol->Dump(true, 0); + std::string symbolDump = GetBufferString(); + std::string strPattern2 = "var %" + HIR2MPLUTRegx::RegName(1) + "_" + HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber) + + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(symbolDump, strPattern2), true); + RestoreCout(); +} + +TEST_F(FEIRVarTest, FEIRVarName) { + std::unique_ptr type = FEIRTypeHelper::CreateTypeByJavaName("Ltest/lang/Object;", false, true); + GStrIdx nameIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("Ltest_2Flang_2FObject_3B_7Cklass"); + FEIRVarName varName(nameIdx, std::move(type)); + varName.SetGlobal(true); + MIRSymbol *symbol = varName.GenerateMIRSymbol(mirBuilder); + RedirectCout(); + std::string symbolName = symbol->GetName(); + EXPECT_EQ(symbolName, "Ltest_2Flang_2FObject_3B_7Cklass"); + symbol->Dump(false, 0); + std::string symbolDump = GetBufferString(); + std::string strPattern2 = std::string("var \\$") + "Ltest_2Flang_2FObject_3B_7Cklass" + HIR2MPLUTRegx::Any(); + EXPECT_EQ(HIR2MPLUTRegx::Match(symbolDump, strPattern2), true); + RestoreCout(); +} + +TEST_F(FEIRVarTest, FEIRVarTransDirect) { + UniqueFEIRVar var = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + FEIRVarTrans trans(FEIRVarTransKind::kFEIRVarTransDirect, var); + RedirectCout(); + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, false); + UniqueFEIRType type1T = trans.GetType(type1); + type1T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<$Ljava_2Flang_2FObject_3B>"); + ClearBufferString(); + + UniqueFEIRType type2 = FEIRTypeHelper::CreateTypeByJavaName("Ltest/lang/Object;", false, true); + UniqueFEIRType type2T = trans.GetType(type2); + type2T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <$Ltest_2Flang_2FObject_3B>>"); + ClearBufferString(); + + UniqueFEIRType type3 = FEIRTypeHelper::CreateTypeByJavaName("I", false, false); + UniqueFEIRType type3T = trans.GetType(type3); + type3T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + ClearBufferString(); + + UniqueFEIRType type4 = FEIRTypeHelper::CreateTypeByJavaName("[I", false, true); + UniqueFEIRType type4T = trans.GetType(type4); + type4T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] i32>>"); + ClearBufferString(); + RestoreCout(); +} + +TEST_F(FEIRVarTest, FEIRVarTransArrayDimIncr) { + UniqueFEIRVar var = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + FEIRVarTrans trans(FEIRVarTransKind::kFEIRVarTransArrayDimIncr, var); + RedirectCout(); + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("Ljava/lang/Object;", false, false); + UniqueFEIRType type1T = trans.GetType(type1); + UniqueFEIRType type1T2 = trans.GetType(type1, PTY_ref, false); + type1T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <$Ljava_2Flang_2FObject_3B>>>>"); + ClearBufferString(); + type1T2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<[] <* <$Ljava_2Flang_2FObject_3B>>>"); + ClearBufferString(); + + UniqueFEIRType type2 = FEIRTypeHelper::CreateTypeByJavaName("[Ljava/lang/Object;", false, true); + UniqueFEIRType type2T = trans.GetType(type2); + UniqueFEIRType type2T2 = trans.GetType(type2, PTY_ref, false); + type2T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <[] <* <$Ljava_2Flang_2FObject_3B>>>>>>"); + ClearBufferString(); + type2T2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <[] <* <$Ljava_2Flang_2FObject_3B>>>>>>"); + ClearBufferString(); + + UniqueFEIRType type3 = FEIRTypeHelper::CreateTypeByJavaName("I", false, false); + UniqueFEIRType type3T = trans.GetType(type3); + UniqueFEIRType type3T2 = trans.GetType(type3, PTY_ref, false); + type3T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] i32>>"); + ClearBufferString(); + type3T2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<[] i32>"); + ClearBufferString(); + + UniqueFEIRType type4 = FEIRTypeHelper::CreateTypeByJavaName("[I", false, true); + UniqueFEIRType type4T = trans.GetType(type4); + UniqueFEIRType type4T2 = trans.GetType(type4, PTY_ref, false); + type4T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <[] i32>>>>"); + ClearBufferString(); + type4T2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <[] i32>>>>"); + ClearBufferString(); + RestoreCout(); +} + +TEST_F(FEIRVarTest, FEIRVarTransArrayDimDecr) { + UniqueFEIRVar var = FEIRBuilder::CreateVarReg(0, PTY_ref, false); + FEIRVarTrans trans(FEIRVarTransKind::kFEIRVarTransArrayDimDecr, var); + RedirectCout(); + UniqueFEIRType type1 = FEIRTypeHelper::CreateTypeByJavaName("[Ljava/lang/Object;", false, false); + UniqueFEIRType type1T = trans.GetType(type1); + UniqueFEIRType type1T2 = trans.GetType(type1, PTY_ref, false); + type1T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <$Ljava_2Flang_2FObject_3B>>"); + ClearBufferString(); + type1T2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<$Ljava_2Flang_2FObject_3B>"); + ClearBufferString(); + + UniqueFEIRType type2 = FEIRTypeHelper::CreateTypeByJavaName("[[Ljava/lang/Object;", false, true); + UniqueFEIRType type2T = trans.GetType(type2); + UniqueFEIRType type2T2 = trans.GetType(type2, PTY_ref, false); + type2T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <$Ljava_2Flang_2FObject_3B>>>>"); + ClearBufferString(); + type2T2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] <* <$Ljava_2Flang_2FObject_3B>>>>"); + ClearBufferString(); + + UniqueFEIRType type3 = FEIRTypeHelper::CreateTypeByJavaName("[I", false, false); + UniqueFEIRType type3T = trans.GetType(type3); + UniqueFEIRType type3T2 = trans.GetType(type3, PTY_ref, false); + type3T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + ClearBufferString(); + type3T2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "i32"); + ClearBufferString(); + + UniqueFEIRType type4 = FEIRTypeHelper::CreateTypeByJavaName("[[I", false, false); + UniqueFEIRType type4T = trans.GetType(type4); + UniqueFEIRType type4T2 = trans.GetType(type4, PTY_ref, false); + type4T->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<* <[] i32>>"); + ClearBufferString(); + type4T2->GenerateMIRType()->Dump(0); + EXPECT_EQ(GetBufferString(), "<[] i32>"); + ClearBufferString(); + RestoreCout(); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/hir2mplUT.cpp b/src/hir2mpl/test/common/hir2mplUT.cpp new file mode 100644 index 0000000000000000000000000000000000000000..63d4d71783ae549bf016efff769fe6d68a7d32e6 --- /dev/null +++ b/src/hir2mpl/test/common/hir2mplUT.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "mpl_logging.h" +#include "mir_module.h" +#include "hir2mpl_ut_options.h" +#include "basic_io.h" +#include "base64.h" +#include "jbc_input.h" +#include "fe_manager.h" +#include "fe_file_type.h" +#include "fe_type_hierarchy.h" +#include "hir2mpl_ut_environment.h" + +using namespace maple; +void GenBase64ForFile(const std::string &fileName) { + BasicIOMapFile file(fileName); + if (!file.OpenAndMap()) { + FATAL(kLncFatal, "Can not open the input file %s for base64 convert.", fileName.c_str()); + } + std::string base64Str = Base64::Encode(file.GetPtr(), file.GetLength()); + size_t length = base64Str.length(); + size_t start = 0; + const size_t width = 60; + while (start < length) { + std::cout << "\"" << base64Str.substr(start, width) << "\"" << std::endl; + start += width; + } + file.Close(); +} + +int main(int argc, char **argv) { + ::testing::AddGlobalTestEnvironment(new HIR2MPLUTEnvironment); + FEManager::Init(HIR2MPLUTEnvironment::GetMIRModule()); + HIR2MPLUTOptions options; + if (!options.SolveArgs(argc, argv)) { + return 0; + } + if (options.GetRunAll()) { + if (argc > 1) { + argc--; + argv++; + } + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); + } + if (options.GetRunAllWithCore()) { + CHECK_FATAL(argc > 2, "In RunAllWithCore mode, argc must larger than 2"); + argc -= 2; + argv += 2; + FEManager::GetTypeManager().LoadMplt(options.GetCoreMpltName(), FETypeFlag::kSrcMpltSys); + FETypeHierarchy::GetInstance().InitByGlobalTable(); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); + } + HIR2MPLUTEnvironment::GetMIRModule().SetFlavor(maple::kFeProduced); + HIR2MPLUTEnvironment::GetMIRModule().SetSrcLang(maple::kSrcLangJava); + if (options.GetGenBase64()) { + GenBase64ForFile(options.GetBase64SrcFileName()); + } + jbc::JBCInput jbcInput(HIR2MPLUTEnvironment::GetMIRModule()); + if (options.GetClassFileList().size() > 0) { + jbcInput.ReadClassFiles(options.GetClassFileList()); + } + if (options.GetJarFileList().size() > 0) { + jbcInput.ReadJarFiles(options.GetJarFileList()); + } + if (options.GetMpltFileList().size() > 0) { + if (FEManager::GetTypeManager().LoadMplts(options.GetMpltFileList(), FETypeFlag::kSrcMplt, "Load mplt")) { + INFO(kLncInfo, "Load mplt...success"); + } else { + INFO(kLncInfo, "Load mplt...failed"); + } + } + return 0; +} diff --git a/src/hir2mpl/test/common/hir2mpl_ut.h b/src/hir2mpl/test/common/hir2mpl_ut.h new file mode 100644 index 0000000000000000000000000000000000000000..fdf9da4b4d24af8b5bb70a179debc4b725c382ee --- /dev/null +++ b/src/hir2mpl/test/common/hir2mpl_ut.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPLUT_INCLUDE_HIR2MPL_UT_H +#define HIR2MPLUT_INCLUDE_HIR2MPL_UT_H +#include "mir_module.h" + +extern maple::MIRModule &module; +extern HIR2MPLUTOptions &options; +#endif // HIR2MPLUT_INCLUDE_HIR2MPL_UT_H \ No newline at end of file diff --git a/src/hir2mpl/test/common/hir2mpl_ut_environment.h b/src/hir2mpl/test/common/hir2mpl_ut_environment.h new file mode 100644 index 0000000000000000000000000000000000000000..46f5dae9ddd3c6436d79a52c73ea63c55ea4ad14 --- /dev/null +++ b/src/hir2mpl/test/common/hir2mpl_ut_environment.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_TEST_HIR2MPL_UT_ENVIRONMENT_H +#define HIR2MPL_TEST_HIR2MPL_UT_ENVIRONMENT_H +#include +#include +#include "mir_module.h" +#include "mir_nodes.h" + +namespace maple { +class HIR2MPLUTEnvironment : public ::testing::Environment { + public: + HIR2MPLUTEnvironment() = default; + ~HIR2MPLUTEnvironment() = default; + + static MIRModule &GetMIRModule() { + static MIRModule module("hir2mplUT"); + return module; + } + + void SetUp() override { + theMIRModule = &GetMIRModule(); + } +}; +} // namespace maple +#endif // HIR2MPL_TEST_HIR2MPL_UT_ENVIRONMENT_H \ No newline at end of file diff --git a/src/hir2mpl/test/common/hir2mpl_ut_options.cpp b/src/hir2mpl/test/common/hir2mpl_ut_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5d7fb3d74a05ce8c28e9928ca270df297ef44dfc --- /dev/null +++ b/src/hir2mpl/test/common/hir2mpl_ut_options.cpp @@ -0,0 +1,124 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cl_option.h" +#include "driver_options.h" +#include "hir2mpl_ut_options.h" +#include +#include "mpl_logging.h" +#include "parser_opt.h" +#include "triple.h" + +namespace maple { + +namespace opts::hir2mplut { + +static maplecl::OptionCategory hir2mplUTCategory; + +maplecl::Option help({"--help", "-h"}, + " -h, -help : print usage and exit", + {hir2mplUTCategory}); +maplecl::Option genBase64({"--gen-base64", "-gen-base64"}, + " -gen-base64 file.xx : generate base64 string for file.xx", + {hir2mplUTCategory}); +maplecl::Option mplt({"--mplt", "-mplt"}, + " -mplt lib1.mplt,lib2.mplt\n" + " : input mplt files", + {hir2mplUTCategory}); + +maplecl::Option inClass({"--in-class", "-in-class"}, + " -in-class file1.jar,file2.jar\n" + " : input class files", + {hir2mplUTCategory}); + +maplecl::Option inJar({"--in-jar", "-in-jar"}, + " -in-jar file1.jar,file2.jar\n" + " : input jar files", + {hir2mplUTCategory}); +} + +HIR2MPLUTOptions::HIR2MPLUTOptions() + : runAll(false), + runAllWithCore(false), + genBase64(false), + base64SrcFileName(""), + coreMpltName("") {} + +void HIR2MPLUTOptions::DumpUsage() const { + std::cout << "========================================\n" + << " Run gtest: hir2mplUT\n" + << " Run gtest: hir2mplUT test [ options for gtest ]\n" + << " Run ext mode: hir2mplUT ext [ options ]\n" + << "========= options for ext mode =========\n"; + maplecl::CommandLine::GetCommandLine().HelpPrinter(opts::hir2mplut::hir2mplUTCategory); + exit(1); +} + +bool HIR2MPLUTOptions::SolveArgs(int argc, char **argv) { + if (argc == 1) { + runAll = true; + return true; + } + if (std::string(argv[1]).compare("test") == 0) { + runAll = true; + return true; + } + if (std::string(argv[1]).compare("testWithMplt") == 0) { + runAllWithCore = true; + CHECK_FATAL(argc > 2, "In TestWithMplt mode, core.mplt must be specified"); + coreMpltName = argv[2]; + return true; + } + if (std::string(argv[1]).compare("ext") != 0) { + FATAL(kLncFatal, "Undefined mode"); + return false; + } + runAll = false; + + maplecl::CommandLine::GetCommandLine().Parse(argc, argv, opts::hir2mplut::hir2mplUTCategory); + + if (opts::hir2mplut::help) { + DumpUsage(); + return false; + } + + if (opts::hir2mplut::genBase64.IsEnabledByUser()) { + base64SrcFileName = opts::hir2mplut::genBase64; + } + + if (opts::hir2mplut::inClass.IsEnabledByUser()) { + Split(opts::hir2mplut::inClass, ',', std::back_inserter(classFileList)); + } + + if (opts::hir2mplut::inJar.IsEnabledByUser()) { + Split(opts::hir2mplut::inJar, ',', std::back_inserter(jarFileList)); + } + + if (opts::hir2mplut::mplt.IsEnabledByUser()) { + Split(opts::hir2mplut::mplt, ',', std::back_inserter(mpltFileList)); + } + + return true; +} + +template +void HIR2MPLUTOptions::Split(const std::string &s, char delim, Out result) { + std::stringstream ss; + ss.str(s); + std::string item; + while (std::getline(ss, item, delim)) { + *(result++) = item; + } +} +} // namespace maple diff --git a/src/hir2mpl/test/common/hir2mpl_ut_options.h b/src/hir2mpl/test/common/hir2mpl_ut_options.h new file mode 100644 index 0000000000000000000000000000000000000000..06df9793cd4f2d3d8cc6d8c7798e74ffe5f92e87 --- /dev/null +++ b/src/hir2mpl/test/common/hir2mpl_ut_options.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_HIR2MPL_UT_OPTIONS_H +#define HIR2MPL_INCLUDE_HIR2MPL_UT_OPTIONS_H +#include +#include + +namespace maple { +class HIR2MPLUTOptions { + public: + HIR2MPLUTOptions(); + ~HIR2MPLUTOptions() = default; + void DumpUsage() const; + bool SolveArgs(int argc, char **argv); + template + static void Split(const std::string &s, char delim, Out result); + + static HIR2MPLUTOptions &GetInstance() { + static HIR2MPLUTOptions options; + return options; + } + + bool GetRunAll() const { + return runAll; + } + + bool GetRunAllWithCore() const { + return runAllWithCore; + } + + bool GetGenBase64() const { + return genBase64; + } + + std::string GetBase64SrcFileName() const { + return base64SrcFileName; + } + + std::string GetCoreMpltName() const { + return coreMpltName; + } + + const std::list &GetClassFileList() const { + return classFileList; + } + + const std::list &GetJarFileList() const { + return jarFileList; + } + + const std::list &GetMpltFileList() const { + return mpltFileList; + } + + private: + bool runAll; + bool runAllWithCore; + bool genBase64; + std::string base64SrcFileName; + std::string coreMpltName; + std::list classFileList; + std::list jarFileList; + std::list mpltFileList; +}; +} // namespace maple + +#endif // HIR2MPL_INCLUDE_HIR2MPL_UT_OPTIONS_H \ No newline at end of file diff --git a/src/hir2mpl/test/common/hir2mpl_ut_regx.cpp b/src/hir2mpl/test/common/hir2mpl_ut_regx.cpp new file mode 100644 index 0000000000000000000000000000000000000000..19f9a4d79ec223ebdea9085969f51816b6452688 --- /dev/null +++ b/src/hir2mpl/test/common/hir2mpl_ut_regx.cpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "hir2mpl_ut_regx.h" +#include +#include + +namespace maple { +bool HIR2MPLUTRegx::Match(const std::string &str, const std::string &pattern) { + if (std::regex_match(str, std::regex(pattern))) { + return true; + } else { + std::cerr << "Pattern: " << pattern << std::endl; + std::cerr << "String: " << str << std::endl; + return false; + } +} + +std::string HIR2MPLUTRegx::RegName(uint32 regNum) { + std::stringstream ss; + if (regNum == kAnyNumber) { + ss << "Reg[0-9]+"; + } else { + ss << "Reg" << regNum; + } + return ss.str(); +} + +std::string HIR2MPLUTRegx::RefIndex(uint32 typeIdx) { + std::stringstream ss; + if (typeIdx == kAnyNumber) { + ss << "R[0-9]+"; + } else { + ss << "R" << typeIdx; + } + return ss.str(); +} + +std::string HIR2MPLUTRegx::AnyNum(uint32 typeIdx) { + std::stringstream ss; + if (typeIdx == kAnyNumber) { + ss << "[0-9]+"; + } else { + ss << "" << typeIdx; + } + return ss.str(); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/hir2mpl_ut_regx.h b/src/hir2mpl/test/common/hir2mpl_ut_regx.h new file mode 100644 index 0000000000000000000000000000000000000000..ce67fc974112fa22897a49773929c40946c19cf1 --- /dev/null +++ b/src/hir2mpl/test/common/hir2mpl_ut_regx.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_UT_INCLUDE_HIR2MPL_UT_REGX_H +#define HIR2MPL_UT_INCLUDE_HIR2MPL_UT_REGX_H +#include +#include +#include "types_def.h" + +namespace maple { +class HIR2MPLUTRegx { + public: + static const uint32 kAnyNumber = 0xFFFFFFFF; + HIR2MPLUTRegx() = default; + ~HIR2MPLUTRegx() = default; + static bool Match(const std::string &str, const std::string &pattern); + static std::string RegName(uint32 regNum); + static std::string RefIndex(uint32 typeIdx); + static std::string AnyNum(uint32 typeIdx); + static std::string Any() { + return "(.|\r|\n)*"; + } +}; +} // namespace maple +#endif // HIR2MPL_UT_INCLUDE_HIR2MPL_UT_REGX_H \ No newline at end of file diff --git a/src/hir2mpl/test/common/hir2mpl_ut_regx_test.cpp b/src/hir2mpl/test/common/hir2mpl_ut_regx_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9e438b45e5edf333b01ccf7d5210aa8dd73061d6 --- /dev/null +++ b/src/hir2mpl/test/common/hir2mpl_ut_regx_test.cpp @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "hir2mpl_ut_regx.h" + +namespace maple { +TEST(HIR2MPLUTRegx, Any) { + std::string pattern = HIR2MPLUTRegx::Any(); + std::string str = "\n"; + EXPECT_EQ(HIR2MPLUTRegx::Match(str, pattern), true); +} + +TEST(HIR2MPLUTRegx, RegName) { + std::string patternAny = HIR2MPLUTRegx::RegName(HIR2MPLUTRegx::kAnyNumber); + std::string pattern100 = HIR2MPLUTRegx::RegName(100); + EXPECT_EQ(HIR2MPLUTRegx::Match("Reg100", patternAny), true); + EXPECT_EQ(HIR2MPLUTRegx::Match("Reg100", pattern100), true); + EXPECT_EQ(HIR2MPLUTRegx::Match("Reg1000", patternAny), true); + EXPECT_EQ(HIR2MPLUTRegx::Match("Reg1000", pattern100), false); +} + +TEST(HIR2MPLUTRegx, RefIndex) { + std::string patternAny = HIR2MPLUTRegx::RefIndex(HIR2MPLUTRegx::kAnyNumber); + std::string pattern100 = HIR2MPLUTRegx::RefIndex(100); + EXPECT_EQ(HIR2MPLUTRegx::Match("R100", patternAny), true); + EXPECT_EQ(HIR2MPLUTRegx::Match("R100", pattern100), true); + EXPECT_EQ(HIR2MPLUTRegx::Match("R1000", patternAny), true); + EXPECT_EQ(HIR2MPLUTRegx::Match("R1000", pattern100), false); +} +} // namespace maple \ No newline at end of file diff --git a/src/hir2mpl/test/common/redirect_buffer.h b/src/hir2mpl/test/common/redirect_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..ffbe80810b9d0db3296ceb87a4b8e817070d03df --- /dev/null +++ b/src/hir2mpl/test/common/redirect_buffer.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef HIR2MPL_INCLUDE_REDIRECT_BUFFER_H +#define HIR2MPL_INCLUDE_REDIRECT_BUFFER_H +#include +#include + +namespace maple { +class RedirectBuffer { + public: + RedirectBuffer() : sbuf(std::cout.rdbuf()), outByErr(false) {} + ~RedirectBuffer() = default; + + void RedirectCout(bool outByErrIn = false) { + sbuf = RedirectCoutBuf(ss.rdbuf()); + ss.str(""); + outByErr = outByErrIn; + } + + void RestoreCout() { + ss.str(""); + RedirectCoutBuf(sbuf); + } + + std::string GetBufferString() { + std::string str = ss.str(); + ss.str(""); + if (outByErr) { + std::cerr << str; + } + return str; + } + + void ClearBufferString() { + ss.str(""); + } + + std::string RemoveLastReturnChar(const std::string &input) { + size_t length = input.length(); + while (input[length - 1] == '\n') { + length--; + } + return input.substr(0, length); + } + + private: + std::stringstream ss; + std::streambuf *sbuf; + bool outByErr; + + std::streambuf *RedirectCoutBuf(std::streambuf *newBuf) { + std::streambuf *streamBuf = std::cout.rdbuf(); + std::cout.rdbuf(newBuf); + return streamBuf; + } +}; +} // namespace maple +#endif // HIR2MPL_INCLUDE_REDIRECT_BUFFER_H \ No newline at end of file diff --git a/src/hir2mpl/test/cov_check.sh b/src/hir2mpl/test/cov_check.sh new file mode 100644 index 0000000000000000000000000000000000000000..f1af497bfc7a40feb015bd3e5c362cf2a737c0a5 --- /dev/null +++ b/src/hir2mpl/test/cov_check.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +CORE_OJ_JAR=${MAPLE_ROOT}/android/emui-10.0/out/target/common/obj/JAVA_LIBRARIES/core-oj_intermediates/classes.jar +CORE_LIBART_JAR=${MAPLE_ROOT}/android/emui-10.0/out/target/common/obj/JAVA_LIBRARIES/core-libart_intermediates/classes.jar + +rm -rf ${MAPLE_ROOT}/report +${MAPLE_ROOT}/out/target/product/maple_arm64/bin/hir2mplUT ext -gen-base64 xxx.file +${MAPLE_ROOT}/out/target/product/maple_arm64/bin/hir2mplUT ext -gen-base64 ${MAPLE_ROOT}/hir2mpl/test/jbc_input/JBC0001/Test.class +${MAPLE_ROOT}/out/target/product/maple_arm64/bin/hir2mplUT ext -in-class xxx.class +${MAPLE_ROOT}/out/target/product/maple_arm64/bin/hir2mplUT ext -in-class ${MAPLE_ROOT}/hir2mpl/test/jbc_input/JBC0001/Test.class +${MAPLE_ROOT}/out/target/product/maple_arm64/bin/hir2mplUT ext -in-jar xxx.jar +${MAPLE_ROOT}/out/target/product/maple_arm64/bin/hir2mplUT ext -in-jar ${CORE_OJ_JAR},${CORE_LIBART_JAR} +${MAPLE_ROOT}/out/target/product/maple_arm64/bin/hir2mplUT ext -mplt ${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-HIR2MPL_DEXO0/libcore-all.mplt +# ${MAPLE_ROOT}/out/target/product/maple_arm64/bin/hir2mplUT test +${MAPLE_ROOT}/out/target/product/maple_arm64/bin/hir2mplUT testWithMplt ${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-HIR2MPL_DEXO0/libcore-all.mplt +bash ${MAPLE_ROOT}/zeiss/prebuilt/tools/coverage_check/coverage_check.sh hir2mpl + diff --git a/src/hir2mpl/test/hir2mplUT_check.sh b/src/hir2mpl/test/hir2mplUT_check.sh new file mode 100644 index 0000000000000000000000000000000000000000..9b9c8bdd9f5dccf76fd5d879887090999f9a6d40 --- /dev/null +++ b/src/hir2mpl/test/hir2mplUT_check.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +set -e + +rm -rf ${MAPLE_ROOT}/report +${MAPLE_ROOT}/output/aarch64-clang-release/bin/hir2mplUT ext -gen-base64 ${MAPLE_ROOT}/src/hir2mpl/test/bytecode_input/class/JBC0001/Test.class +${MAPLE_ROOT}/output/aarch64-clang-release/bin/hir2mplUT ext -in-class ${MAPLE_ROOT}/src/hir2mpl/test/bytecode_input/class/JBC0001/Test.class +${MAPLE_ROOT}/output/aarch64-clang-release/bin/hir2mplUT ext -mplt ${MAPLE_ROOT}/output/aarch64-clang-release/libjava-core/host-x86_64-O2/libcore-all.mplt +${MAPLE_ROOT}/output/aarch64-clang-release/bin/hir2mplUT testWithMplt ${MAPLE_ROOT}/output/aarch64-clang-release/libjava-core/host-x86_64-O2/libcore-all.mplt + + diff --git a/src/hir2mpl/test/ops_ut_check.sh b/src/hir2mpl/test/ops_ut_check.sh new file mode 100644 index 0000000000000000000000000000000000000000..36dcf8ccc32dcea69d10edfebe5d90bf72a3ed67 --- /dev/null +++ b/src/hir2mpl/test/ops_ut_check.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +set -e + +rm -rf ${MAPLE_ROOT}/report +${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/bin/hir2mplUT ext -gen-base64 ${MAPLE_ROOT}/hir2mpl/test/jbc_input/JBC0001/Test.class +${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/bin/hir2mplUT ext -in-class ${MAPLE_ROOT}/hir2mpl/test/jbc_input/JBC0001/Test.class +${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/bin/hir2mplUT ext -mplt ${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-OPS_O2/libcore-all.mplt +${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/bin/hir2mplUT testWithMplt ${MAPLE_ROOT}/out/target/product/maple_arm64-clang-release/lib/host-x86_64-OPS_O2/libcore-all.mplt + + diff --git a/src/mapleall/BUILD.gn b/src/mapleall/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..73605d9877d2425298c4152e1e56be1145fb713c --- /dev/null +++ b/src/mapleall/BUILD.gn @@ -0,0 +1,123 @@ +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +config("mapleallcompilecfg") { + cflags_cc = [] + cflags_cc += [ + "-std=c++17", + "-fno-common", + ] + + if (ASAN == 1) { + cflags_cc += [ + "-fsanitize=address" + ] + libs = [ + "${LLVMLIBDIR}/libclang_rt.asan-x86_64.a" + ] + } + + if (TARGET == "aarch64") { + cflags_cc += [ + "-DTARGAARCH64", + "-DMAPLE_ROOT=\"${MAPLE_ROOT}\"", + ] + } + + if (TARGET == "x86_64") { + cflags_cc += [ + "-DTARGX86_64", + "-DMAPLE_ROOT=\"${MAPLE_ROOT}\"", + ] + } + + if (TARGET == "riscv64") { + cflags_cc += [ + "-DTARGRISCV64", + "-DMAPLE_ROOT=\"${MAPLE_ROOT}\"", + ] + } + + if (TARGET == "ark") { + cflags_cc += [ + "-DTARGARK", + "-DMAPLE_ROOT=\"${MAPLE_ROOT}\"", + ] + } + + if (GN_BUILD_TYPE == "DEBUG") { + cflags_c += [ "-DDEBUG" ] + cflags_cc += [ "-DDEBUG" ] + } + + if (HOST_ARCH == 64) { + ldflags = [] + ldflags += [ + "-fPIC", + "-rdynamic", + "-lpthread", + "-Wl,-z,relro", + "-Wl,-z,now", + "-Wl,-z,noexecstack", + "-pie", + ] + } + + if (ASAN == 1) { + ldflags += ["-ldl"] + } + if (COV == 1) { + ldflags += ["--coverage"] + cflags_cc += [ + "-fprofile-arcs", + "-ftest-coverage" + ] + } + if (GPROF == 1) { + ldflags += ["-pg"] + cflags_cc += ["-pg"] + } + + if (MAJOR_VERSION != "") { + cflags_cc += [ "-DMAJOR_VERSION=${MAJOR_VERSION}", ] + } + + if (MINOR_VERSION != "") { + cflags_cc += [ "-DMINOR_VERSION=${MINOR_VERSION}", ] + } + + if (RELEASE_VERSION != "") { + cflags_cc += [ "-DRELEASE_VERSION=\"${RELEASE_VERSION}\"", ] + } + + if (BUILD_VERSION != "") { + cflags_cc += [ "-DBUILD_VERSION=${BUILD_VERSION}", ] + } + + if (GIT_REVISION != "") { + cflags_cc += [ "-DGIT_REVISION=\"${GIT_REVISION}\"", ] + } +} + +group("maple") { + deps = [ "${MAPLEALL_ROOT}/maple_driver:maple" ] +} + +group("irbuild") { + deps = [ "${MAPLEALL_ROOT}/maple_ir:irbuild" ] +} + +group("maplegen") { + deps = [ "${MAPLEALL_ROOT}/maple_be:maplegen" ] +} diff --git a/src/mapleall/CMakeLists.txt b/src/mapleall/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..a581092ca23f035569113e65834fe799ea64a822 --- /dev/null +++ b/src/mapleall/CMakeLists.txt @@ -0,0 +1,43 @@ +# +# Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +# Version Info +if(NOT ${MAJOR_VERSION} STREQUAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMAJOR_VERSION=${MAJOR_VERSION}") +endif() +if(NOT ${MINOR_VERSION} STREQUAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DMINOR_VERSION=${MINOR_VERSION}") +endif() +if(NOT ${RELEASE_VERSION} STREQUAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DRELEASE_VERSION=\\\"${RELEASE_VERSION}\\\"") +endif() +if(NOT ${BUILD_VERSION} STREQUAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DBUILD_VERSION=${BUILD_VERSION}") +endif() +if(NOT ${GIT_REVISION} STREQUAL "") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DGIT_REVISION=\\\"${GIT_REVISION}\\\"") +endif() + + +add_subdirectory(maple_util) +add_subdirectory(mempool) +add_subdirectory(maple_ipa) +add_subdirectory(maple_phase) +add_subdirectory(mpl2mpl) +add_subdirectory(maple_me) +add_subdirectory(maple_be) +add_subdirectory(maple_driver) +add_subdirectory(maple_ir) +add_subdirectory(maple_pgo) diff --git a/src/mapleall/bin/dex2mpl b/src/mapleall/bin/dex2mpl new file mode 100755 index 0000000000000000000000000000000000000000..3393eaf9df84834196ddeee3e5c6c636a7679ec5 Binary files /dev/null and b/src/mapleall/bin/dex2mpl differ diff --git a/src/mapleall/bin/java2jar b/src/mapleall/bin/java2jar new file mode 100755 index 0000000000000000000000000000000000000000..c8d50f6f0e8a207ff57b0eb23442f6015c6dc97d --- /dev/null +++ b/src/mapleall/bin/java2jar @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +OUTPUT=$1 +CORE_ALL_JAR=$2 +shift 2 +javac -g -d . -bootclasspath ${CORE_ALL_JAR} $@ +jar -cvf ${OUTPUT} *.class diff --git a/src/mapleall/maple_be/BUILD.gn b/src/mapleall/maple_be/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..0abb44fdd19f8d8950c1bea101beee2943448be6 --- /dev/null +++ b/src/mapleall/maple_be/BUILD.gn @@ -0,0 +1,355 @@ +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +include_directories = [ + "${MAPLEALL_ROOT}/maple_be/include/cg", + "${MAPLEALL_ROOT}/maple_be/include/ad", + "${MAPLE_BUILD_OUTPUT}/common/target", + "${MAPLEALL_ROOT}/maple_pgo/include", + "${MAPLEALL_ROOT}/maple_be/include/ad/target", + "${MAPLEALL_ROOT}/maple_be/include/be", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/mempool/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/maple_phase/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${THIRD_PARTY_ROOT}/llvm_modified/llvm/include/llvm/BinaryFormat", +] + +deps_libcg = [ "${MAPLEALL_ROOT}/maple_pgo:libmplpgo" ] + +deps_libmplbe = [ ":libcglowerer" ] + +if (TARGET == "aarch64") { + include_directories += [ + "${MAPLEALL_ROOT}/maple_be/include/cg/aarch64", + "${MAPLEALL_ROOT}/maple_be/include/be/aarch64", + ] + deps_libcg += [ ":libcgaarch64", + ":libcgphases", + ] +} + +if (TARGET == "x86_64") { + include_directories += [ + "${MAPLEALL_ROOT}/maple_be/include/cg/x86_64", + "${MAPLEALL_ROOT}/maple_be/include/be/x86_64", + ] + deps_libcg += [ ":libcgx8664", + ":libcgx86phases" + ] +} + +if (TARGET == "riscv64") { + include_directories += [ + "${MAPLEALL_ROOT}/maple_be/include/cg/riscv64", + "${MAPLEALL_ROOT}/maple_be/include/be/riscv64", + ] + deps_libcg += [ ":libcgriscv64" ] +} + +if (TARGET == "ark") { + include_directories += [ + "${MAPLEALL_ROOT}/maple_be/include/cg/ark", + "${MAPLEALL_ROOT}/maple_be/include/be/ark", + ] + deps_libcg += [ ":libcgark" ] +} + +src_libmplad = [ "src/ad/mad.cpp" ] + +src_libcglowerer = [ + "src/be/bbt.cpp", + "src/be/trycatchblockslower.cpp", + "src/be/lower.cpp", +] + +src_libmplbe = [ + "src/be/becommon.cpp", + "src/be/switch_lowerer.cpp", + "src/be/rt.cpp", +] + +src_libcgaarch64 = [ + "src/cg/aarch64/aarch64_abi.cpp", + "src/cg/aarch64/aarch64_call_conv.cpp", + "src/cg/aarch64/mpl_atomic.cpp", + "src/cg/aarch64/aarch64_cfi_generator.cpp", + "src/cg/aarch64/aarch64_cgfunc.cpp", + "src/cg/aarch64/aarch64_dependence.cpp", + "src/cg/aarch64/aarch64_data_dep_base.cpp", + "src/cg/aarch64/aarch64_ebo.cpp", + "src/cg/aarch64/aarch64_emitter.cpp", + "src/cg/aarch64/aarch64_fixshortbranch.cpp", + "src/cg/aarch64/aarch64_global.cpp", + "src/cg/aarch64/aarch64_proepilog.cpp", + "src/cg/aarch64/aarch64_operand.cpp", + "src/cg/aarch64/aarch64_color_ra.cpp", + "src/cg/aarch64/aarch64_reg_info.cpp", + "src/cg/aarch64/aarch64_ssa.cpp", + "src/cg/aarch64/aarch64_prop.cpp", + "src/cg/aarch64/aarch64_pgo_gen.cpp", + "src/cg/aarch64/aarch64_dce.cpp", + "src/cg/aarch64/aarch64_phi_elimination.cpp", + "src/cg/aarch64/aarch64_reg_coalesce.cpp", + "src/cg/aarch64/aarch64_ico.cpp", + "src/cg/aarch64/aarch64_insn.cpp", + "src/cg/aarch64/aarch64_isa.cpp", + "src/cg/aarch64/aarch64_memlayout.cpp", + "src/cg/aarch64/aarch64_args.cpp", + "src/cg/aarch64/aarch64_live.cpp", + "src/cg/aarch64/aarch64_yieldpoint.cpp", + "src/cg/aarch64/aarch64_offset_adjust.cpp", + "src/cg/aarch64/aarch64_optimize_common.cpp", + "src/cg/aarch64/aarch64_peep.cpp", + "src/cg/aarch64/aarch64_reaching.cpp", + "src/cg/aarch64/aarch64_schedule.cpp", + "src/cg/aarch64/aarch64_strldr.cpp", + "src/cg/aarch64/aarch64_ra_opt.cpp", + "src/cg/aarch64/aarch64_tailcall.cpp", + "src/cg/aarch64/aarch64_alignment.cpp", + "src/cg/aarch64/aarch64_regsaves.cpp", + "src/cg/aarch64/aarch64_utils.cpp", + "src/cg/aarch64/aarch64_cg.cpp", + "src/cg/aarch64/aarch64_validbit_opt.cpp", + "src/cg/aarch64/aarch64_rce.cpp", + "src/cg/aarch64/aarch64_cfgo.cpp", + "src/cg/aarch64/aarch64_isolate_fastpath.cpp", + "src/cg/aarch64/aarch64_rematerialize.cpp", +] + +src_libcgx86phases = [ + "src/cg/peep.cpp", + "src/cg/alignment.cpp", + "src/cg/reaching.cpp", + "src/cg/local_opt.cpp", + "src/cg/cfgo.cpp", +] + +src_libcgx8664 = [ + "src/cg/x86_64/x64_cg.cpp", + "src/cg/x86_64/x64_MPIsel.cpp", + "src/cg/x86_64/x64_cgfunc.cpp", + "src/cg/x86_64/x64_memlayout.cpp", + "src/cg/x86_64/x64_emitter.cpp", + "src/cg/x86_64/x64_abi.cpp", + "src/cg/x86_64/x64_call_conv.cpp", + "src/cg/x86_64/x64_standardize.cpp", + "src/cg/x86_64/x64_live.cpp", + "src/cg/x86_64/x64_reg_info.cpp", + "src/cg/x86_64/x64_proepilog.cpp", + "src/cg/x86_64/x64_args.cpp", + "src/cg/x86_64/x64_peep.cpp", + "src/cg/x86_64/x64_reaching.cpp", + "src/cg/x86_64/x64_local_opt.cpp", + "src/cg/x86_64/x64_cfgo.cpp", + "src/cg/x86_64/x64_isa.cpp", + "src/cg/x86_64/x64_optimize_common.cpp", +] + +src_libcgriscv64 = [ + "src/cg/riscv64/mpl_atomic.cpp", + "src/cg/riscv64/riscv64_abi.cpp", + "src/cg/riscv64/riscv64_args.cpp", + "src/cg/riscv64/riscv64_cg.cpp", + "src/cg/riscv64/riscv64_cgfunc.cpp", + "src/cg/riscv64/riscv64_color_ra.cpp", + "src/cg/riscv64/riscv64_dependence.cpp", + "src/cg/riscv64/riscv64_ebo.cpp", + "src/cg/riscv64/riscv64_emitter.cpp", + "src/cg/riscv64/riscv64_fixshortbranch.cpp", + "src/cg/riscv64/riscv64_global.cpp", + "src/cg/riscv64/riscv64_ico.cpp", + "src/cg/riscv64/riscv64_immediate.cpp", + "src/cg/riscv64/riscv64_insn.cpp", + "src/cg/riscv64/riscv64_isa.cpp", + "src/cg/riscv64/riscv64_live.cpp", + "src/cg/riscv64/riscv64_lsra.cpp", + "src/cg/riscv64/riscv64_memlayout.cpp", + "src/cg/riscv64/riscv64_offset_adjust.cpp", + "src/cg/riscv64/riscv64_operand.cpp", + "src/cg/riscv64/riscv64_optimize_common.cpp", + "src/cg/riscv64/riscv64_peep.cpp", + "src/cg/riscv64/riscv64_proepilog.cpp", + "src/cg/riscv64/riscv64_reaching.cpp", + "src/cg/riscv64/riscv64_reg_alloc.cpp", + "src/cg/riscv64/riscv64_schedule.cpp", + "src/cg/riscv64/riscv64_strldr.cpp", + "src/cg/riscv64/riscv64_yieldpoint.cpp", + "src/cg/riscv64/riscv64_ra_opt.cpp", +] + +src_libcgark = [ "src/cg/ark/foo.cpp" ] + +src_libcgphases = [ + "src/cg/cfi_generator.cpp", + "src/cg/cfgo.cpp", + "src/cg/local_opt.cpp", + "src/cg/ebo.cpp", + "src/cg/ra_opt.cpp", + "src/cg/tailcall.cpp", + "src/cg/cg_ssa.cpp", + "src/cg/cg_prop.cpp", + "src/cg/cg_dce.cpp", + "src/cg/cg_phi_elimination.cpp", + "src/cg/reg_coalesce.cpp", + "src/cg/global.cpp", + "src/cg/ico.cpp", + "src/cg/peep.cpp", + "src/cg/pressure.cpp", + "src/cg/reaching.cpp", + "src/cg/schedule.cpp", + "src/cg/strldr.cpp", + "src/cg/cg_dominance.cpp", + "src/cg/cg_pre.cpp", + "src/cg/cg_occur.cpp", + "src/cg/cg_ssu_pre.cpp", + "src/cg/cg_ssa_pre.cpp", + "src/cg/cg_pgo_gen.cpp", + "src/cg/cg_pgo_use.cpp", + "src/cg/regsaves.cpp", + "src/cg/cg_critical_edge.cpp", + "src/cg/alignment.cpp", + "src/cg/cg_validbit_opt.cpp", + "src/cg/cg_rce.cpp", + "src/cg/rematerialize.cpp", + "src/cg/control_dep_analysis.cpp", + "src/cg/data_dep_base.cpp", + "src/cg/data_dep_analysis.cpp", + "src/cg/global_schedule.cpp", +] + +src_libcg = [ + "src/cg/args.cpp", + "src/cg/cg_irbuilder.cpp", + "src/cg/cfi.cpp", + "src/cg/cgbb.cpp", + "src/cg/operand.cpp", + "src/cg/cgfunc.cpp", + "src/cg/cg_cfg.cpp", + "src/cg/cg_option.cpp", + "src/cg/cg_options.cpp", + "src/cg/dbg.cpp", + "src/cg/optimize_common.cpp", + "src/cg/eh_func.cpp", + "src/cg/emit.cpp", + "src/cg/live.cpp", + "src/cg/loop.cpp", + "src/cg/isel.cpp", + "src/cg/standardize.cpp", + "src/cg/memlayout.cpp", + "src/cg/yieldpoint.cpp", + "src/cg/label_creation.cpp", + "src/cg/offset_adjust.cpp", + "src/cg/reg_alloc.cpp", + "src/cg/reg_alloc_basic.cpp", + "src/cg/reg_alloc_lsra.cpp", + "src/cg/proepilog.cpp", + "src/cg/isolate_fastpath.cpp", + "src/cg/cg.cpp", + "src/cg/isa.cpp", + "src/cg/insn.cpp", + "src/cg/cg_phasemanager.cpp", +] + +cflags_cc -= [ "-DRC_NO_MMAP" ] +cflags_cc -= [ "-DMIR_JAVA=1" ] + +configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + +static_library("libmplad") { + sources = src_libmplad + include_dirs = include_directories + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" + + cflags_cc += [ "-DRC_NO_MMAP" ] +} + +source_set("libcglowerer") { + sources = src_libcglowerer + include_dirs = include_directories +} + +static_library("libmplbe") { + sources = src_libmplbe + deps = deps_libmplbe + include_dirs = include_directories + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" +} + +source_set("libcgaarch64") { + sources = src_libcgaarch64 + include_dirs = include_directories +} + +source_set("libcgx8664") { + sources = src_libcgx8664 + include_dirs = include_directories +} + +source_set("libcgx86phases") { + sources = src_libcgx86phases + include_dirs = include_directories +} + +source_set("libcgriscv64") { + sources = src_libcgriscv64 + include_dirs = include_directories +} + +source_set("libcgark") { + sources = src_libcgark + include_dirs = include_directories +} + +source_set("libcgphases") { + sources = src_libcgphases + include_dirs = include_directories +} + +static_library("libcg") { + sources = src_libcg + include_dirs = include_directories + deps = deps_libcg + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" +} + +executable("maplegen") { + sources = [ + "${MAPLEALL_ROOT}/maple_be/mdgen/src/mdgenerator.cpp", + "${MAPLEALL_ROOT}/maple_be/mdgen/src/mdlexer.cpp", + "${MAPLEALL_ROOT}/maple_be/mdgen/src/mdmain.cpp", + "${MAPLEALL_ROOT}/maple_be/mdgen/src/mdparser.cpp", + "${MAPLEALL_ROOT}/maple_be/mdgen/src/mdrecord.cpp", + ] + deps = [ + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libHWSecureC", + ] + + include_dirs = [ + "${MAPLEALL_ROOT}/maple_be/mdgen/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + ] +} diff --git a/src/mapleall/maple_be/CMakeLists.txt b/src/mapleall/maple_be/CMakeLists.txt new file mode 100755 index 0000000000000000000000000000000000000000..15909177b84f6e2cf0ffa95765f9b42cc4c6f16e --- /dev/null +++ b/src/mapleall/maple_be/CMakeLists.txt @@ -0,0 +1,354 @@ +# +# Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +set(deps_maple + libcglowerer + libdriver_option + libmaple_driver + libmplphase + libcommandline + libmplutil + libmempool + libmaplepgo + libHWSecureC + libcg + libmplad + libmplbe + libmplipa + libmplir + libmplme + libmplmewpo + libmpl2mpl + libmaple +) + +set(inc_dirs + ${MAPLEALL_ROOT}/maple_be/include/cg + ${MAPLEALL_ROOT}/maple_be/include/ad + ${MAPLE_BUILD_OUTPUT}/common/target + ${MAPLEALL_ROOT}/maple_be/include/ad/target + ${MAPLEALL_ROOT}/maple_be/include/be + ${MAPLEALL_ROOT}/maple_driver/include + ${MAPLEALL_ROOT}/maple_pgo/include + ${MAPLEALL_ROOT}/maple_util/include + ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/maple_me/include + ${MAPLEALL_ROOT}/mpl2mpl/include + ${MAPLEALL_ROOT}/mempool/include + ${MAPLEALL_ROOT}/maple_ipa/include + ${MAPLEALL_ROOT}/maple_ipa/include/old + ${MAPLEALL_ROOT}/maple_phase/include + ${THIRD_PARTY_ROOT}/bounds_checking_function/include + ${THIRD_PARTY_ROOT}/llvm_modified/llvm/include/llvm/BinaryFormat +) + +set(src_maple "") +set(deps_libmplbe "libcglowerer") + +if(${TARGET} STREQUAL "aarch64" OR ${TARGET} STREQUAL "aarch64_ilp32") + list(APPEND inc_dirs + ${MAPLEALL_ROOT}/maple_be/include/cg/aarch64 + ${MAPLEALL_ROOT}/maple_be/include/be/aarch64 + ) + list(APPEND src_maple + src/cg/aarch64/aarch64_abi.cpp + src/cg/aarch64/aarch64_call_conv.cpp + src/cg/aarch64/mpl_atomic.cpp + src/cg/aarch64/aarch64_cfi_generator.cpp + src/cg/aarch64/aarch64_cgfunc.cpp + src/cg/aarch64/aarch64_dependence.cpp + src/cg/aarch64/aarch64_data_dep_base.cpp + src/cg/aarch64/aarch64_ebo.cpp + src/cg/aarch64/aarch64_emitter.cpp + src/cg/aarch64/aarch64_fixshortbranch.cpp + src/cg/aarch64/aarch64_global.cpp + src/cg/aarch64/aarch64_proepilog.cpp + src/cg/aarch64/aarch64_operand.cpp + src/cg/aarch64/aarch64_color_ra.cpp + src/cg/aarch64/aarch64_reg_info.cpp + src/cg/aarch64/aarch64_ssa.cpp + src/cg/aarch64/aarch64_prop.cpp + src/cg/aarch64/aarch64_dce.cpp + src/cg/aarch64/aarch64_phi_elimination.cpp + src/cg/aarch64/aarch64_reg_coalesce.cpp + src/cg/aarch64/aarch64_ico.cpp + src/cg/aarch64/aarch64_insn.cpp + src/cg/aarch64/aarch64_isa.cpp + src/cg/aarch64/aarch64_memlayout.cpp + src/cg/aarch64/aarch64_args.cpp + src/cg/aarch64/aarch64_live.cpp + src/cg/aarch64/aarch64_yieldpoint.cpp + src/cg/aarch64/aarch64_offset_adjust.cpp + src/cg/aarch64/aarch64_optimize_common.cpp + src/cg/aarch64/aarch64_peep.cpp + src/cg/aarch64/aarch64_reaching.cpp + src/cg/aarch64/aarch64_schedule.cpp + src/cg/aarch64/aarch64_strldr.cpp + src/cg/aarch64/aarch64_ra_opt.cpp + src/cg/aarch64/aarch64_tailcall.cpp + src/cg/aarch64/aarch64_alignment.cpp + src/cg/aarch64/aarch64_regsaves.cpp + src/cg/aarch64/aarch64_utils.cpp + src/cg/aarch64/aarch64_cg.cpp + src/cg/aarch64/aarch64_validbit_opt.cpp + src/cg/aarch64/aarch64_rce.cpp + src/cg/aarch64/aarch64_cfgo.cpp + src/cg/aarch64/aarch64_pgo_gen.cpp + src/cg/aarch64/aarch64_isolate_fastpath.cpp + src/cg/aarch64/aarch64_rematerialize.cpp + src/cg/cfi_generator.cpp + src/cg/cfgo.cpp + src/cg/local_opt.cpp + src/cg/ebo.cpp + src/cg/ra_opt.cpp + src/cg/tailcall.cpp + src/cg/cg_pgo_gen.cpp + src/cg/cg_pgo_use.cpp + src/cg/cg_ssa.cpp + src/cg/cg_prop.cpp + src/cg/cg_dce.cpp + src/cg/cg_phi_elimination.cpp + src/cg/reg_coalesce.cpp + src/cg/global.cpp + src/cg/ico.cpp + src/cg/peep.cpp + src/cg/pressure.cpp + src/cg/reaching.cpp + src/cg/schedule.cpp + src/cg/strldr.cpp + src/cg/cg_dominance.cpp + src/cg/cg_pre.cpp + src/cg/cg_occur.cpp + src/cg/cg_ssu_pre.cpp + src/cg/cg_ssa_pre.cpp + src/cg/regsaves.cpp + src/cg/cg_critical_edge.cpp + src/cg/alignment.cpp + src/cg/cg_validbit_opt.cpp + src/cg/cg_rce.cpp + src/cg/rematerialize.cpp + src/cg/control_dep_analysis.cpp + src/cg/data_dep_base.cpp + src/cg/data_dep_analysis.cpp + src/cg/global_schedule.cpp + ) +endif() + +if(${TARGET} STREQUAL "x86_64") + list(APPEND inc_dirs + ${MAPLEALL_ROOT}/maple_be/include/cg/x86_64 + ${MAPLEALL_ROOT}/maple_be/include/be/x86_64 + ) + list(APPEND src_maple + src/cg/x86_64/x64_cg.cpp + src/cg/x86_64/x64_MPIsel.cpp + src/cg/x86_64/x64_cgfunc.cpp + src/cg/x86_64/x64_memlayout.cpp + src/cg/x86_64/x64_emitter.cpp + src/cg/x86_64/x64_abi.cpp + src/cg/x86_64/x64_call_conv.cpp + src/cg/x86_64/x64_standardize.cpp + src/cg/x86_64/x64_live.cpp + src/cg/x86_64/x64_reg_info.cpp + src/cg/x86_64/x64_proepilog.cpp + src/cg/x86_64/x64_args.cpp + src/cg/x86_64/x64_peep.cpp + src/cg/x86_64/x64_reaching.cpp + src/cg/x86_64/x64_local_opt.cpp + src/cg/x86_64/x64_cfgo.cpp + src/cg/x86_64/x64_isa.cpp + src/cg/x86_64/x64_optimize_common.cpp + src/cg/peep.cpp + src/cg/alignment.cpp + src/cg/reaching.cpp + src/cg/local_opt.cpp + src/cg/cfgo.cpp + ) +endif() + +if(${TARGET} STREQUAL "riscv64") + list(APPEND inc_dirs + ${MAPLEALL_ROOT}/maple_be/include/cg/riscv64 + ${MAPLEALL_ROOT}/maple_be/include/be/riscv64 + ) + list(APPEND src_maple + src/cg/riscv64/mpl_atomic.cpp + src/cg/riscv64/riscv64_abi.cpp + src/cg/riscv64/riscv64_args.cpp + src/cg/riscv64/riscv64_cg.cpp + src/cg/riscv64/riscv64_cgfunc.cpp + src/cg/riscv64/riscv64_color_ra.cpp + src/cg/riscv64/riscv64_dependence.cpp + src/cg/riscv64/riscv64_ebo.cpp + src/cg/riscv64/riscv64_emitter.cpp + src/cg/riscv64/riscv64_fixshortbranch.cpp + src/cg/riscv64/riscv64_global.cpp + src/cg/riscv64/riscv64_ico.cpp + src/cg/riscv64/riscv64_immediate.cpp + src/cg/riscv64/riscv64_insn.cpp + src/cg/riscv64/riscv64_isa.cpp + src/cg/riscv64/riscv64_live.cpp + src/cg/riscv64/riscv64_lsra.cpp + src/cg/riscv64/riscv64_memlayout.cpp + src/cg/riscv64/riscv64_offset_adjust.cpp + src/cg/riscv64/riscv64_operand.cpp + src/cg/riscv64/riscv64_optimize_common.cpp + src/cg/riscv64/riscv64_peep.cpp + src/cg/riscv64/riscv64_proepilog.cpp + src/cg/riscv64/riscv64_reaching.cpp + src/cg/riscv64/riscv64_reg_alloc.cpp + src/cg/riscv64/riscv64_schedule.cpp + src/cg/riscv64/riscv64_strldr.cpp + src/cg/riscv64/riscv64_yieldpoint.cpp + src/cg/riscv64/riscv64_ra_opt.cpp + ) +endif() + +if(${TARGET} STREQUAL "ark") + list(APPEND inc_dirs + ${MAPLEALL_ROOT}/maple_be/include/cg/ark + ${MAPLEALL_ROOT}/maple_be/include/be/ark + ) + list(APPEND src_maple + src/cg/ark/foo.cpp + ) +endif() + +set(src_libmplad "src/ad/mad.cpp") +set(src_libcglowerer + src/be/bbt.cpp + src/be/trycatchblockslower.cpp + src/be/lower.cpp +) +set(src_libmplbe + src/be/becommon.cpp + src/be/switch_lowerer.cpp + src/be/rt.cpp +) +set(src_libcg + src/cg/args.cpp + src/cg/cg_irbuilder.cpp + src/cg/cfi.cpp + src/cg/cgbb.cpp + src/cg/operand.cpp + src/cg/cgfunc.cpp + src/cg/cg_cfg.cpp + src/cg/cg_option.cpp + src/cg/cg_options.cpp + src/cg/dbg.cpp + src/cg/optimize_common.cpp + src/cg/eh_func.cpp + src/cg/emit.cpp + src/cg/live.cpp + src/cg/loop.cpp + src/cg/isel.cpp + src/cg/standardize.cpp + src/cg/memlayout.cpp + src/cg/yieldpoint.cpp + src/cg/label_creation.cpp + src/cg/offset_adjust.cpp + src/cg/reg_alloc.cpp + src/cg/reg_alloc_basic.cpp + src/cg/reg_alloc_lsra.cpp + src/cg/proepilog.cpp + src/cg/isolate_fastpath.cpp + src/cg/cg.cpp + src/cg/isa.cpp + src/cg/insn.cpp + src/cg/cg_phasemanager.cpp +) + +string(REPLACE "-DRC_NO_MMAP" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") +string(REPLACE "-DMIR_JAVA=1" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") + +# libmplad +add_library(libmplad STATIC ${src_libmplad}) +target_include_directories(libmplad PRIVATE ${inc_dirs}) +set_target_properties(libmplad PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${MAPLE_BUILD_OUTPUT}/lib/${HOST_ARCH} + COMPILE_FLAGS "${CMAKE_CXX_FLAGS} -DRC_NO_MMAP" +) + +# maplegen +set(src_maplegen + ${MAPLEALL_ROOT}/maple_be/mdgen/src/mdgenerator.cpp + ${MAPLEALL_ROOT}/maple_be/mdgen/src/mdlexer.cpp + ${MAPLEALL_ROOT}/maple_be/mdgen/src/mdmain.cpp + ${MAPLEALL_ROOT}/maple_be/mdgen/src/mdparser.cpp + ${MAPLEALL_ROOT}/maple_be/mdgen/src/mdrecord.cpp +) +set(inc_maplegen + ${MAPLEALL_ROOT}/maple_be/mdgen/include + ${MAPLEALL_ROOT}/maple_util/include + ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/mpl2mpl/include + ${MAPLEALL_ROOT}/mempool/include + ${THIRD_PARTY_ROOT}/bounds_checking_function/include +) +set(deps_maplegen + libmplutil + libmempool + libHWSecureC +) +add_executable(maplegen "${src_maplegen}") +set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -ldl") +set_target_properties(maplegen PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_maplegen}" + LINK_LIBRARIES "${deps_maplegen}" + RUNTIME_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/bin" +) + +#libcg +add_library(libcg STATIC ${src_libcg}) +set_target_properties(libcg PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_dirs}" + LINK_LIBRARIES "" + ARCHIVE_OUTPUT_DIRECTORY ${MAPLE_BUILD_OUTPUT}/lib/${HOST_ARCH} +) + +#libmplbe +add_library(libmplbe STATIC ${src_libmplbe}) +set_target_properties(libmplbe PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_dirs}" + LINK_LIBRARIES "${deps_libmplbe}" + ARCHIVE_OUTPUT_DIRECTORY ${MAPLE_BUILD_OUTPUT}/lib/${HOST_ARCH} +) + +# libcglowerer +add_library(libcglowerer STATIC ${src_libcglowerer}) +set_target_properties(libcglowerer PROPERTIES + INCLUDE_DIRECTORIES "${inc_dirs}" + ARCHIVE_OUTPUT_DIRECTORY ${MAPLE_BUILD_OUTPUT}/lib/${HOST_ARCH} +) + + +#maple +add_executable(maple "${src_maple}") +set_target_properties(maple PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_dirs}" + LINK_LIBRARIES "${deps_maple}" + RUNTIME_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/bin" +) + +message("deps_maplegen: ${deps_maplegen}") + + + + + diff --git a/src/mapleall/maple_be/include/ad/cortex_a55/sched_cortex_a55.td b/src/mapleall/maple_be/include/ad/cortex_a55/sched_cortex_a55.td new file mode 100644 index 0000000000000000000000000000000000000000..bb1127d737296c7a1e95732956f677fc8495b33f --- /dev/null +++ b/src/mapleall/maple_be/include/ad/cortex_a55/sched_cortex_a55.td @@ -0,0 +1,171 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +DefType UnitType = Primary, And, Or; +DefType BypassType = Accumulator, Store, AluShift; + +// Architecture name +Class ArchitectureName ; +// Parallelism number +Class Parallelism ; + +Def ArchitectureName {cortex_a55}; +Def Parallelism {2}; + +// class parameters can be set as default. +// default parameters can only be placed at the end +// Class Unit :Name +Class Unit :string ; +// Class Reservation :Name +Class Reservation : string ; +// AnonClass Bypass : BypassNum, fromTypeReservation, toTypeReservation, BypassType +Class Bypass ; + +Def Unit : kUnitIdSlot0 {Primary}; +Def Unit : kUnitIdSlot1 {Primary}; +Def Unit : kUnitIdAgen {Primary}; +Def Unit : kUnitIdHazard {Primary}; +Def Unit : kUnitIdCrypto {Primary}; +Def Unit : kUnitIdMul {Primary}; +Def Unit : kUnitIdDiv {Primary}; +Def Unit : kUnitIdBranch {Primary}; +Def Unit : kUnitIdStAgu {Primary}; +Def Unit : kUnitIdLdAgu {Primary}; +Def Unit : kUnitIdFpAluLo {Primary}; +Def Unit : kUnitIdFpAluHi {Primary}; +Def Unit : kUnitIdFpMulLo {Primary}; +Def Unit : kUnitIdFpMulHi {Primary}; +Def Unit : kUnitIdFpDivLo {Primary}; +Def Unit : kUnitIdFpDivHi {Primary}; + +Def Unit : kUnitIdSlotS {Or, [kUnitIdSlot0, kUnitIdSlot1]}; +Def Unit : kUnitIdFpAluS {Or, [kUnitIdFpAluLo, kUnitIdFpAluHi]}; +Def Unit : kUnitIdFpMulS {Or, [kUnitIdFpMulLo, kUnitIdFpMulHi]}; +Def Unit : kUnitIdFpDivS {Or, [kUnitIdFpDivLo, kUnitIdFpDivHi]}; + +Def Unit : kUnitIdSlotD {And, [kUnitIdSlot0, kUnitIdSlot1]}; +Def Unit : kUnitIdFpAluD {And, [kUnitIdFpAluLo, kUnitIdFpAluHi]}; +Def Unit : kUnitIdFpMulD {And, [kUnitIdFpMulLo, kUnitIdFpMulHi]}; +Def Unit : kUnitIdFpDivD {And, [kUnitIdFpDivLo, kUnitIdFpDivHi]}; +Def Unit : kUnitIdSlotSHazard {And, [kUnitIdSlotS, kUnitIdHazard]}; +Def Unit : kUnitIdSlotSMul {And, [kUnitIdSlotS, kUnitIdMul]}; +Def Unit : kUnitIdSlotSBranch {And, [kUnitIdSlotS, kUnitIdBranch]}; +Def Unit : kUnitIdSlotSAgen {And, [kUnitIdSlotS, kUnitIdAgen]}; +Def Unit : kUnitIdSlotDAgen {And, [kUnitIdSlot0, kUnitIdSlot1, kUnitIdAgen]}; +Def Unit : kUnitIdSlot0LdAgu {And, [kUnitIdSlot0, kUnitIdLdAgu]}; +Def Unit : kUnitIdSlot0StAgu {And, [kUnitIdSlot0, kUnitIdStAgu]}; +Def Unit : nothing {}; + +Def Reservation : kLtUndef {0}; +Def Reservation : kLtShift {2, [kUnitIdSlotS]}; +Def Reservation : kLtShiftReg {2, [ kUnitIdSlotS, kUnitIdHazard]}; +Def Reservation : kLtAlu {3, [kUnitIdSlotS]}; +Def Reservation : kLtAluShift {3, [kUnitIdSlotS]}; +Def Reservation : kLtAluShiftReg {3, [kUnitIdSlotS, kUnitIdHazard]}; +Def Reservation : kLtAluExtr {3, [kUnitIdSlot1]}; +Def Reservation : kLtMul {4, [kUnitIdSlotS, kUnitIdMul]}; +Def Reservation : kLtDiv {4, [kUnitIdSlot0, kUnitIdDiv, kUnitIdDiv]}; +Def Reservation : kLtLoad1 {4, [kUnitIdSlotSAgen, kUnitIdLdAgu]}; +Def Reservation : kLtStore1 {2, [kUnitIdSlotSAgen, kUnitIdStAgu]}; +Def Reservation : kLtLoad2 {4, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]}; +Def Reservation : kLtStore2 {2, [ kUnitIdSlotSAgen, kUnitIdStAgu]}; +Def Reservation : kLtLoad3plus {6, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]}; +Def Reservation : kLtStore3plus {2, [kUnitIdSlotDAgen, kUnitIdSlot0StAgu, kUnitIdStAgu]}; +Def Reservation : kLtBranch {0, [kUnitIdSlotSBranch]}; +Def Reservation : kLtFpalu {4, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtFconst {2, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtFpmul {4, [kUnitIdSlotS, kUnitIdFpMulS]}; +Def Reservation : kLtFpmac {8, [kUnitIdSlotS, kUnitIdFpMulS, nothing, nothing, nothing, kUnitIdFpAluS]}; +Def Reservation : kLtR2f {2, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtF2r {4, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtR2fCvt {4, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtF2rCvt {5, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtFFlags {5, [kUnitIdSlotS]}; +Def Reservation : kLtFLoad64 {3, [kUnitIdSlotSAgen, kUnitIdLdAgu]}; +Def Reservation : kLtFLoadMany {4, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]}; +Def Reservation : kLtFStore64 {0, [kUnitIdSlotSAgen, kUnitIdStAgu]}; +Def Reservation : kLtFStoreMany {0, [kUnitIdSlotSAgen, kUnitIdSlot0StAgu, kUnitIdStAgu]}; +Def Reservation : kLtAdvsimdAlu {4, [kUnitIdSlotS, kUnitIdFpAluS]}; +Def Reservation : kLtAdvsimdAluQ {4, [kUnitIdSlot0, kUnitIdFpAluD]}; +Def Reservation : kLtAdvsimdMul {4, [kUnitIdSlotS, kUnitIdFpMulS]}; +Def Reservation : kLtAdvsimdMulQ {4, [kUnitIdSlot0, kUnitIdFpMulD]}; +Def Reservation : kLtAdvsimdDivS {14, [kUnitIdSlot0, kUnitIdFpMulS, kUnitIdFpDivS]}; +Def Reservation : kLtAdvsimdDivD {29, [kUnitIdSlot0, kUnitIdFpMulS, kUnitIdFpDivS]}; +Def Reservation : kLtAdvsimdDivSQ {14, [kUnitIdSlotD, kUnitIdFpMulD, kUnitIdFpDivD]}; +Def Reservation : kLtAdvsimdDivdQ {29, [kUnitIdSlotD, kUnitIdFpMulD, kUnitIdFpDivD]}; +Def Reservation : kLtCryptoAese {3, [kUnitIdSlot0]}; +Def Reservation : kLtCryptoAesmc {3, [kUnitIdSlotS]}; +Def Reservation : kLtClinit {14, [kUnitIdSlotS, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, + kUnitIdLdAgu, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, + kUnitIdLdAgu, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, + kUnitIdLdAgu]}; +Def Reservation : kLtAdrpLdr {6, [kUnitIdSlotS, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, + kUnitIdLdAgu]}; +Def Reservation : kLtClinitTail {8, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu, nothing, + kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]}; + +Def Bypass {0, [kLtShift, kLtShiftReg], [kLtAlu]}; +Def Bypass {1, [kLtShift], [kLtShift, kLtShiftReg, kLtAluShift, kLtAluShiftReg]}; +Def Bypass {1, [kLtShiftReg], [kLtShift, kLtShiftReg, kLtAluShift, kLtAluShiftReg]}; +Def Bypass {1, [kLtAlu, kLtAluShift, kLtAluShiftReg], [kLtAlu]}; +Def Bypass {1, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShift], AluShift}; +Def Bypass {1, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShiftReg], AluShift}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShift]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShiftReg]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluExtr]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtShift]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtShiftReg]}; +Def Bypass {2, [kLtMul], [kLtMul], Accumulator}; +Def Bypass {2, [kLtMul], [kLtAlu]}; +Def Bypass {3, [kLtMul], [kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg]}; +Def Bypass {2, [kLtLoad1], [kLtAlu]}; +Def Bypass {3, [kLtLoad1], [kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg]}; +Def Bypass {3, [kLtLoad2], [kLtAlu]}; +Def Bypass {0, [kLtAlu], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtAluShift], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtAluShiftReg], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtAluExtr], [ kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtShift], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtShiftReg], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {1, [kLtMul], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {1, [kLtLoad1], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {1, [kLtLoad2], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {1, [kLtLoad3plus], [kLtStore1, kLtStore2, kLtStore3plus], Store}; +Def Bypass {0, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg], [kLtR2f]}; +Def Bypass {1, [kLtMul, kLtLoad1, kLtLoad2], [kLtR2f]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtR2fCvt]}; +Def Bypass {3, [kLtMul, kLtLoad1, kLtLoad2], [kLtR2fCvt]}; +Def Bypass {0, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg], [kLtBranch]}; +Def Bypass {1, [kLtFpalu, kLtFpmul, kLtR2f, kLtR2fCvt, kLtFconst], [kLtFpmac], Accumulator}; +Def Bypass {1, [kLtFLoad64, kLtFLoadMany], [kLtFpmac]}; +Def Bypass {4, [kLtFpmac], [kLtFpmac], Accumulator}; +Def Bypass {0, [kLtCryptoAese], [kLtCryptoAesmc]}; +Def Bypass {1, [kLtShiftReg], [kLtClinit]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluExtr], [kLtClinit]}; +Def Bypass {3, [kLtMul, kLtLoad1], [kLtClinit]}; +Def Bypass {13, [kLtAlu], [kLtClinit]}; +Def Bypass {11, [kLtClinit], [kLtStore1, kLtStore3plus], Store}; +Def Bypass {11, [kLtClinit], [kLtR2f]}; +Def Bypass {13, [kLtClinit], [kLtR2fCvt]}; +Def Bypass {1, [kLtShiftReg], [kLtAdrpLdr]}; +Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluExtr], [kLtAdrpLdr]}; +Def Bypass {3, [kLtMul, kLtLoad1], [kLtAdrpLdr]}; +Def Bypass {5, [kLtAdrpLdr], [kLtAlu]}; +Def Bypass {3, [kLtAdrpLdr], [kLtStore1, kLtStore3plus], Store}; +Def Bypass {3, [kLtAdrpLdr], [kLtR2f]}; +Def Bypass {5, [kLtAdrpLdr], [kLtR2fCvt]}; +Def Bypass {7, [kLtClinitTail], [kLtAlu]}; +Def Bypass {5, [kLtClinitTail], [kLtStore1, kLtStore3plus], Store}; +Def Bypass {5, [kLtClinitTail], [kLtR2f]}; +Def Bypass {7, [kLtClinitTail], [kLtR2fCvt]}; diff --git a/src/mapleall/maple_be/include/ad/mad.h b/src/mapleall/maple_be/include/ad/mad.h new file mode 100644 index 0000000000000000000000000000000000000000..9e28dc6ef0dc28fa7d8948b20773e32c395ee6f1 --- /dev/null +++ b/src/mapleall/maple_be/include/ad/mad.h @@ -0,0 +1,249 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_AD_MAD_H +#define MAPLEBE_INCLUDE_AD_MAD_H +#include +#include "types_def.h" +#include "mpl_logging.h" +#include "insn.h" + +namespace maplebe { +enum UnitId : maple::uint32 { +#include "mplad_unit_id.def" + kUnitIdLast +}; + +enum UnitType : maple::uint8 { + kUnitTypePrimart, + kUnitTypeOr, + kUnitTypeAnd, + KUnitTypeNone +}; + +enum RealUnitKind : maple::uint32 { + kUnitKindUndef, +#include "mplad_unit_kind.def" + kUnitKindLast = 13 +}; + +enum SlotType : maple::uint8 { + kSlotNone, + kSlot0, + kSlot1, + kSlotAny, + kSlots, +}; + +/* machine model */ +enum LatencyType : maple::uint32 { + /* LT: latency */ +#include "mplad_latency_type.def" + kLtLast, +}; + +class Unit { + public: + explicit Unit(enum UnitId theUnitId); + Unit(enum UnitType theUnitType, enum UnitId theUnitId, int numOfUnits, ...); + ~Unit() = default; + + enum UnitType GetUnitType() const { + return unitType; + } + + enum UnitId GetUnitId() const { + return unitId; + }; + + const std::vector &GetCompositeUnits() const; + + std::string GetName() const; + bool IsFree(uint32 cycle) const; + void Occupy(const Insn &insn, uint32 cycle); + void Release(); + void AdvanceCycle(); + void Dump(int indent = 0) const; + maple::uint32 GetOccupancyTable() const; + + void SetOccupancyTable(maple::uint32 table) { + occupancyTable = table; + } + + private: + void PrintIndent(int indent) const; + + enum UnitId unitId; + enum UnitType unitType; + maple::uint32 occupancyTable; + std::vector compositeUnits; +}; + +class Reservation { + public: + Reservation(LatencyType t, int l, int n, ...); + ~Reservation() = default; + + bool IsEqual(maple::uint32 typ) const { + return typ == type; + } + + int GetLatency() const { + return latency; + } + + uint32 GetUnitNum() const { + return unitNum; + } + + enum SlotType GetSlot() const { + return slot; + } + + const std::string &GetSlotName() const; + + Unit * const *GetUnit() const { + return units; + } + + private: + static const int kMaxUnit = 13; + LatencyType type; + int latency; + uint32 unitNum; + Unit *units[kMaxUnit]; + enum SlotType slot = kSlotNone; + + SlotType GetSlotType(UnitId unitID) const; +}; + +class Bypass { + public: + Bypass(LatencyType d, LatencyType u, int l) : def(d), use(u), latency(l) {} + virtual ~Bypass() = default; + + virtual bool CanBypass(const Insn &defInsn, const Insn &useInsn) const; + + int GetLatency() const { + return latency; + } + + LatencyType GetDefType() const { + return def; + } + + LatencyType GetUseType() const { + return use; + } + + private: + LatencyType def; + LatencyType use; + int latency; +}; + +class MAD { + public: + MAD() { + InitUnits(); + InitParallelism(); + InitReservation(); + InitBypass(); + } + + ~MAD(); + + using BypassVector = std::vector; + + void InitUnits() const; + void InitParallelism() const; + void InitReservation() const; + void InitBypass() const; + bool IsSlot0Free() const; + bool IsFullIssued() const; + int GetLatency(const Insn &def, const Insn &use) const; + int DefaultLatency(const Insn &insn) const; + Reservation *FindReservation(const Insn &insn) const; + void AdvanceCycle() const; + void ReleaseAllUnits() const; + void SaveStates(std::vector &occupyTable, int size) const; + void RestoreStates(std::vector &occupyTable, int size) const; + + int GetMaxParallelism() const { + return parallelism; + } + + const Unit *GetUnitByUnitId(enum UnitId uId) const { + CHECK_FATAL(!allUnits.empty(), "CHECK_CONTAINER_EMPTY"); + return allUnits[uId]; + } + + static void AddUnit(Unit &u) { + allUnits.emplace_back(&u); + } + + static maple::uint32 GetAllUnitsSize() { + return allUnits.size(); + } + + static void AddReservation(Reservation &rev) { + allReservations.emplace_back(&rev); + } + + static void AddBypass(Bypass &bp) { + ASSERT(bp.GetDefType() < kLtLast, "out of range"); + ASSERT(bp.GetUseType() < kLtLast, "out of range"); + (bypassArrays[bp.GetDefType()][bp.GetUseType()]).push_back(&bp); + } + + protected: + static void SetMaxParallelism(int num) { + parallelism = num; + } + + int BypassLatency(const Insn &def, const Insn &use) const; + + private: + static int parallelism; + static std::vector allUnits; + static std::vector allReservations; + static std::array, kLtLast> bypassArrays; +}; + +class AluShiftBypass : public Bypass { + public: + AluShiftBypass(LatencyType d, LatencyType u, int l) : Bypass(d, u, l) {} + ~AluShiftBypass() override = default; + + bool CanBypass(const Insn &defInsn, const Insn &useInsn) const override; +}; + +class AccumulatorBypass : public Bypass { + public: + AccumulatorBypass(LatencyType d, LatencyType u, int l) : Bypass(d, u, l) {} + ~AccumulatorBypass() override = default; + + bool CanBypass(const Insn &defInsn, const Insn &useInsn) const override; +}; + +class StoreBypass : public Bypass { + public: + StoreBypass(LatencyType d, LatencyType u, int l) : Bypass(d, u, l) {} + ~StoreBypass() override = default; + + bool CanBypass(const Insn &defInsn, const Insn &useInsn) const override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_AD_MAD_H */ diff --git a/src/mapleall/maple_be/include/ad/target/mplad_unit_kind.def b/src/mapleall/maple_be/include/ad/target/mplad_unit_kind.def new file mode 100644 index 0000000000000000000000000000000000000000..0c56044925a600296d4fbf96bbd2944e8d1d7780 --- /dev/null +++ b/src/mapleall/maple_be/include/ad/target/mplad_unit_kind.def @@ -0,0 +1,28 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* cortex_a55 function unit ID definition: */ +kUnitKindSlot0 = 1, +kUnitKindAgen = 2, +kUnitKindHazard = 4, +kUnitKindCrypto = 8, +kUnitKindMul = 16, +kUnitKindDiv = 32, +kUnitKindBranch = 64, +kUnitKindStAgu = 128, +kUnitKindLdAgu = 256, +kUnitKindFpAlu = 512, +kUnitKindFpMul = 1024, +kUnitKindFpDiv = 2048, + diff --git a/src/mapleall/maple_be/include/be/array_base_name.def b/src/mapleall/maple_be/include/be/array_base_name.def new file mode 100644 index 0000000000000000000000000000000000000000..42c8905f3c212fffdd8d3b8bc0b9582b9ceaffe6 --- /dev/null +++ b/src/mapleall/maple_be/include/be/array_base_name.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +"ALjava_2Flang_2FObject_3B", +"ALjava_2Flang_2FClass_3B", +"ALjava_2Flang_2FString_3B" diff --git a/src/mapleall/maple_be/include/be/array_klass_name.def b/src/mapleall/maple_be/include/be/array_klass_name.def new file mode 100644 index 0000000000000000000000000000000000000000..3812b8c1224c82c5ea1c73995bfc9bbdeb916063 --- /dev/null +++ b/src/mapleall/maple_be/include/be/array_klass_name.def @@ -0,0 +1,26 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +"ALjava_2Flang_2FObject_3B", +"ALjava_2Flang_2FClass_3B", +"ALjava_2Flang_2FString_3B", +"ALjava_2Futil_2FFormatter_24Flags_3B", +"ALjava_2Futil_2FHashMap_24Node_3B", +"ALjava_2Futil_2FFormatter_24FormatString_3B", +"ALjava_2Flang_2FCharSequence_3B", +"ALjava_2Flang_2FThreadLocal_24ThreadLocalMap_24Entry_3B", +"ALjava_2Futil_2FHashtable_24HashtableEntry_3B", +"ALlibcore_2Freflect_2FAnnotationMember_3B", +"ALsun_2Fsecurity_2Futil_2FDerValue_3B", +"ALsun_2Fsecurity_2Fx509_2FAVA_3B" \ No newline at end of file diff --git a/src/mapleall/maple_be/include/be/bbt.h b/src/mapleall/maple_be/include/be/bbt.h new file mode 100644 index 0000000000000000000000000000000000000000..cdae745afdd06eb56d8a279ebf6aa9bb416b1a56 --- /dev/null +++ b/src/mapleall/maple_be/include/be/bbt.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_BBT_H +#define MAPLEBE_INCLUDE_BE_BBT_H +/* MapleIR headers. */ +#include "mir_nodes.h" +#include "mir_lower.h" +namespace maplebe { +using namespace maple; + +class BBT { + /* + * if stmt is a switch/rangegoto, succs gets defined, and condJumpBranch == fallthruBranch == nullptr. + * otherwise, succs.size() ==0 && + * 1. for cond br stmt, both condJumpBranch and fallthruBranch are defined. + * 2. if bb ends with 'throw', both fields get nullptr. + * 3. for the others, condJumpBranch == nullptr && only fallthruBranch is defined + */ + public: + enum BBTType : uint8 { + kBBPlain, + kBBTry, + kBBEndTry, + kBBCatch + }; + + BBT(StmtNode *s, StmtNode *e, MemPool *memPool) + : alloc(memPool), + type(kBBPlain), + succs(alloc.Adapter()), + labelIdx(MIRLabelTable::GetDummyLabel()), + firstStmt(s != nullptr ? s : e), + lastStmt(e) {} + + ~BBT() = default; + + void Extend(const StmtNode *sNode, StmtNode *eNode) { + CHECK_FATAL(lastStmt != nullptr, "nullptr check"); + CHECK_FATAL(sNode != nullptr ? lastStmt->GetNext() == sNode : lastStmt->GetNext() == eNode, "Extend fail"); + lastStmt = eNode; + } + + void SetLabelIdx(LabelIdx li) { + labelIdx = li; + } + + bool IsLabeled() const { + return labelIdx != MIRLabelTable::GetDummyLabel(); + } + + LabelIdx GetLabelIdx() const { + return labelIdx; + } + + void SetType(BBTType t, StmtNode &k) { + type = t; + keyStmt = &k; + } + + bool IsTry() const { + return type == kBBTry; + } + + bool IsEndTry() const { + return type == kBBEndTry; + } + + bool IsCatch() const { + return type == kBBCatch; + } + + void AddSuccs(BBT *bb) { + succs.emplace_back(bb); + } + + void SetCondJumpBranch(BBT *bb) { + condJumpBranch = bb; + } + + BBT *GetCondJumpBranch() { + return condJumpBranch; + } + void SetFallthruBranch(BBT *bb) { + fallthruBranch = bb; + } + + BBT *GetFallthruBranch() { + return fallthruBranch; + } + + StmtNode *GetFirstStmt() { + return firstStmt; + } + + void SetFirstStmt(StmtNode &stmt) { + firstStmt = &stmt; + } + + StmtNode *GetLastStmt() { + return lastStmt; + } + + void SetLastStmt(StmtNode &stmt) { + lastStmt = &stmt; + } + + StmtNode *GetKeyStmt() { + return keyStmt; + } + +#if DEBUG + void Dump(const MIRModule &mod) const; + static void ValidateStmtList(StmtNode *head, StmtNode *detached = nullptr); +#endif + private: + MapleAllocator alloc; + BBTType type; + BBT *condJumpBranch = nullptr; + BBT *fallthruBranch = nullptr; + MapleVector succs; + LabelIdx labelIdx; + StmtNode *firstStmt; + StmtNode *lastStmt; + StmtNode *keyStmt = nullptr; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_BBT_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/be/becommon.h b/src/mapleall/maple_be/include/be/becommon.h new file mode 100644 index 0000000000000000000000000000000000000000..17c8b24494949898ed097a13d063262123622ea5 --- /dev/null +++ b/src/mapleall/maple_be/include/be/becommon.h @@ -0,0 +1,245 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_BECOMMON_H +#define MAPLEBE_INCLUDE_BE_BECOMMON_H +/* C++ headers. */ +#include +#include +/* Basic Maple-independent utility functions */ +#include "common_utils.h" +/* MapleIR headers. */ +#include "mir_nodes.h" /* maple_ir/include, for BaseNode */ +#include "mir_type.h" /* maple_ir/include, for MIRType */ +#include "mir_module.h" /* maple_ir/include, for mirModule */ + +namespace maplebe { +using namespace maple; + +enum BitsPerByte : uint8 { + kBitsPerByte = 8, + kLog2BitsPerByte = 3 +}; + +class JClassFieldInfo { /* common java class field info */ + public: + /* constructors */ + JClassFieldInfo() : isRef(false), isUnowned(false), isWeak(false), offset(0) {} + + JClassFieldInfo(bool isRef, bool isUnowned, bool isWeak, uint32 offset) + : isRef(isRef), isUnowned(isUnowned), isWeak(isWeak), offset(offset) {} + + ~JClassFieldInfo() = default; + + bool IsRef() const { + return isRef; + } + + bool IsUnowned() const { + return isUnowned; + } + + bool IsWeak() const { + return isWeak; + } + + uint32 GetOffset() const { + return offset; + } + + private: + bool isRef; /* used to generate object-map */ + bool isUnowned; /* used to mark unowned fields for RC */ + bool isWeak; /* used to mark weak fields for RC */ + uint32 offset; /* offset from the start of the java object */ +}; + +using JClassLayout = MapleVector; /* java class layout info */ + +class BECommon { + public: + explicit BECommon(MIRModule &mod); + + ~BECommon() = default; + + void LowerTypeAttribute(MIRType &ty); + + void LowerJavaTypeAttribute(MIRType &ty); + + void LowerJavaVolatileInClassType(MIRClassType &ty); + + void LowerJavaVolatileForSymbol(MIRSymbol &sym) const; + + void ComputeTypeSizesAligns(MIRType &ty, uint8 align = 0); + + void GenFieldOffsetMap(const std::string &className); + + void GenFieldOffsetMap(MIRClassType &classType, FILE &outFile); + + void GenObjSize(const MIRClassType &classType, FILE &outFile) const; + + std::pair GetFieldOffset(MIRStructType &structType, FieldID fieldID); + + bool IsRefField(MIRStructType &structType, FieldID fieldID) const; + + /* some class may has incomplete type definition. provide an interface to check them. */ + bool HasJClassLayout(MIRClassType &klass) const { + return (jClassLayoutTable.find(&klass) != jClassLayoutTable.end()); + } + + const JClassLayout &GetJClassLayout(MIRClassType &klass) const { + return *(jClassLayoutTable.at(&klass)); + } + + void AddNewTypeAfterBecommon(uint32 oldTypeTableSize, uint32 newTypeTableSize); + + void AddElementToJClassLayout(MIRClassType &klass, JClassFieldInfo info); + + bool HasFuncReturnType(MIRFunction &func) const { + return (funcReturnType.find(&func) != funcReturnType.end()); + } + + const TyIdx GetFuncReturnType(MIRFunction &func) const { + return (funcReturnType.at(&func)); + } + + void AddElementToFuncReturnType(MIRFunction &func, const TyIdx tyIdx); + + MIRType *BeGetOrCreatePointerType(const MIRType &pointedType); + + MIRType *BeGetOrCreateFunctionType(TyIdx tyIdx, const std::vector &vecTy, + const std::vector &vecAt); + + BaseNode *GetAddressOfNode(const BaseNode &node); + + bool CallIsOfAttr(FuncAttrKind attr, const StmtNode *narynode) const; + + PrimType GetAddressPrimType() const { + return GetLoweredPtrType(); + } + + /* update typeSizeTable and typeAlignTable when new type is created */ + void UpdateTypeTable(MIRType &ty) { + if (!TyIsInSizeAlignTable(ty)) { + AddAndComputeSizeAlign(ty); + } + } + + /* Global type table might be updated during lowering for C/C++. */ + void FinalizeTypeTable(const MIRType &ty); + + uint32 GetFieldIdxIncrement(const MIRType &ty) const { + if (ty.GetKind() == kTypeClass) { + /* number of fields + 2 */ + return static_cast(ty).GetFieldsSize() + 2; + } else if (ty.GetKind() == kTypeStruct) { + /* number of fields + 1 */ + return static_cast(ty).GetFieldsSize() + 1; + } + return 1; + } + + MIRModule &GetMIRModule() const { + return mirModule; + } + + uint64 GetTypeSize(uint32 idx) const { + return typeSizeTable.at(idx); + } + uint32 GetSizeOfTypeSizeTable() const { + return typeSizeTable.size(); + } + bool IsEmptyOfTypeSizeTable() const { + return typeSizeTable.empty(); + } + void SetTypeSize(uint32 idx, uint64 value) { + typeSizeTable.at(idx) = value; + } + void AddTypeSize(uint64 value) { + typeSizeTable.emplace_back(value); + } + + void AddTypeSizeAndAlign(const TyIdx tyIdx, uint64 value) { + if (typeSizeTable.size() == tyIdx) { + typeSizeTable.emplace_back(value); + typeAlignTable.emplace_back(value); + } else { + CHECK_FATAL(typeSizeTable.size() > tyIdx, "there are some types haven't set type size and align, %d"); + } + } + + uint8 GetTypeAlign(uint32 idx) const { + return typeAlignTable.at(idx); + } + size_t GetSizeOfTypeAlignTable() const { + return typeAlignTable.size(); + } + bool IsEmptyOfTypeAlignTable() const { + return typeAlignTable.empty(); + } + void SetTypeAlign(uint32 idx, uint8 value) { + typeAlignTable.at(idx) = value; + } + void AddTypeAlign(uint8 value) { + typeAlignTable.emplace_back(value); + } + + bool GetHasFlexibleArray(uint32 idx) const { + return typeHasFlexibleArray.at(idx); + } + void SetHasFlexibleArray(uint32 idx, bool value) { + typeHasFlexibleArray.at(idx) = value; + } + + FieldID GetStructFieldCount(uint32 idx) const { + return structFieldCountTable.at(idx); + } + uint32 GetSizeOfStructFieldCountTable() const { + return structFieldCountTable.size(); + } + void SetStructFieldCount(uint32 idx, FieldID value) { + structFieldCountTable.at(idx) = value; + } + void AppendStructFieldCount(uint32 idx, FieldID value) { + structFieldCountTable.at(idx) += value; + } + + private: + bool TyIsInSizeAlignTable(const MIRType &ty) const; + void AddAndComputeSizeAlign(MIRType &ty); + void ComputeStructTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx); + void ComputeClassTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx, uint8 align = 0); + void ComputeArrayTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx); + void ComputeFArrayOrJArrayTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx); + + MIRModule &mirModule; + MapleVector typeSizeTable; /* index is TyIdx */ + MapleVector typeAlignTable; /* index is TyIdx */ + MapleVector typeHasFlexibleArray; /* struct with flexible array */ + /* + * gives number of fields inside + * each struct inclusive of nested structs, for speeding up + * traversal for locating the field for a given fieldID + */ + MapleVector structFieldCountTable; + /* + * a lookup table for class layout. the vector is indexed by field-id + * Note: currently only for java class types. + */ + MapleUnorderedMap jClassLayoutTable; + MapleUnorderedMap funcReturnType; +}; /* class BECommon */ +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_BECOMMON_H */ diff --git a/src/mapleall/maple_be/include/be/common_utils.h b/src/mapleall/maple_be/include/be/common_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..04cec84218a8e062bd62dd8cef5890e83b47e5ee --- /dev/null +++ b/src/mapleall/maple_be/include/be/common_utils.h @@ -0,0 +1,319 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_COMMON_UTILS_H +#define MAPLEBE_INCLUDE_BE_COMMON_UTILS_H +#include "types_def.h" +#include "mpl_logging.h" + +namespace maplebe { +using namespace maple; +constexpr uint32 kOffsetAlignmentOf8Bit = 0; +constexpr uint32 kOffsetAlignmentOf16Bit = 1; +constexpr uint32 kOffsetAlignmentOf32Bit = 2; +constexpr uint32 kOffsetAlignmentOf64Bit = 3; +constexpr uint32 kOffsetAlignmentOf128Bit = 4; +constexpr uint32 kBaseOffsetAlignment = 3; +/* + * The constexpr implementations, without assertions. Suitable for using in + * constants. + */ +constexpr uint32 k1FConst = 31; +constexpr uint32 k0BitSize = 0; +constexpr uint32 k1BitSize = 1; +constexpr uint32 k2BitSize = 2; +constexpr uint32 k3BitSize = 3; +constexpr uint32 k4BitSize = 4; +constexpr uint32 k5BitSize = 5; +constexpr uint32 k6BitSize = 6; +constexpr uint32 k7BitSize = 7; +constexpr uint32 k8BitSize = 8; +constexpr uint32 k16BitSize = 16; +constexpr uint32 k24BitSize = 24; +constexpr uint32 k32BitSize = 32; +constexpr uint32 k48BitSize = 48; +constexpr uint32 k56BitSize = 56; +constexpr uint32 k64BitSize = 64; +constexpr uint32 k128BitSize = 128; +constexpr uint32 k256BitSize = 256; +constexpr uint32 k512BitSize = 512; +constexpr uint32 k1024BitSize = 1024; +constexpr uint32 k2048BitSize = 2048; + +constexpr int32 k1FConstInt = 31; +constexpr int32 k0BitSizeInt = 0; +constexpr int32 k1BitSizeInt = 1; +constexpr int32 k2BitSizeInt = 2; +constexpr int32 k3BitSizeInt = 3; +constexpr int32 k4BitSizeInt = 4; +constexpr int32 k5BitSizeInt = 5; +constexpr int32 k6BitSizeInt = 6; +constexpr int32 k7BitSizeInt = 7; +constexpr int32 k8BitSizeInt = 8; +constexpr int32 k16BitSizeInt = 16; +constexpr int32 k24BitSizeInt = 24; +constexpr int32 k32BitSizeInt = 32; +constexpr int32 k48BitSizeInt = 48; +constexpr int32 k56BitSizeInt = 56; +constexpr int32 k64BitSizeInt = 64; +constexpr int32 k128BitSizeInt = 128; +constexpr int32 k256BitSizeInt = 256; +constexpr int32 k512BitSizeInt = 512; +constexpr int32 k1024BitSizeInt = 1024; + +constexpr int32 kNegative256BitSize = -256; +constexpr int32 kNegative512BitSize = -512; +constexpr int32 kNegative1024BitSize = -1024; + +constexpr uint32 k1ByteSize = 1; +constexpr uint32 k2ByteSize = 2; +constexpr uint32 k3ByteSize = 3; +constexpr uint32 k4ByteSize = 4; +constexpr uint32 k8ByteSize = 8; +constexpr uint32 k9ByteSize = 9; +constexpr uint32 k12ByteSize = 12; +constexpr uint32 k14ByteSize = 14; +constexpr uint32 k15ByteSize = 15; +constexpr uint32 k16ByteSize = 16; +constexpr uint32 k32ByteSize = 32; + +constexpr int32 k1ByteSizeInt = 1; +constexpr int32 k2ByteSizeInt = 2; +constexpr int32 k3ByteSizeInt = 3; +constexpr int32 k4ByteSizeInt = 4; +constexpr int32 k8ByteSizeInt = 8; +constexpr int32 k9ByteSizeInt = 9; +constexpr int32 k12ByteSizeInt = 12; +constexpr int32 k14ByteSizeInt = 14; +constexpr int32 k15ByteSizeInt = 15; +constexpr int32 k16ByteSizeInt = 16; +constexpr int32 k32ByteSizeInt = 32; + +constexpr uint32 k1EightBytesSize = 8; +constexpr uint32 k2EightBytesSize = 16; +constexpr uint32 k3EightBytesSize = 24; +constexpr uint32 k4EightBytesSize = 32; + +constexpr uint32 k4BitShift = 2; /* 4 is 1 << 2; */ +constexpr uint32 k8BitShift = 3; /* 8 is 1 << 3; */ +constexpr uint32 k16BitShift = 4; /* 16 is 1 << 4 */ + +constexpr uint32 kDwordSizeTwo = 2; + +constexpr uint32 k4ByteFloatSize = 4; +constexpr uint32 k8ByteDoubleSize = 8; + +/* Storage location of operands in one insn */ +constexpr int32 kInsnFirstOpnd = 0; +constexpr int32 kInsnSecondOpnd = 1; +constexpr int32 kInsnThirdOpnd = 2; +constexpr int32 kInsnFourthOpnd = 3; +constexpr int32 kInsnFifthOpnd = 4; +constexpr int32 kInsnSixthOpnd = 5; +constexpr int32 kInsnSeventhOpnd = 6; +constexpr int32 kInsnEighthOpnd = 7; +constexpr int32 kInsnMaxOpnd = 8; + +/* Reg of CCLocInfo */ +constexpr uint32 kFirstReg = 0; +constexpr uint32 kSecondReg = 1; +constexpr uint32 kThirdReg = 2; +constexpr uint32 kFourthReg = 3; + +/* inline asm operand designations */ +constexpr uint32 kAsmStringOpnd = 0; +constexpr uint32 kAsmOutputListOpnd = 1; +constexpr uint32 kAsmClobberListOpnd = 2; +constexpr uint32 kAsmInputListOpnd = 3; +constexpr uint32 kAsmOutputConstraintOpnd = 4; +constexpr uint32 kAsmInputConstraintOpnd = 5; +constexpr uint32 kAsmOutputRegPrefixOpnd = 6; +constexpr uint32 kAsmInputRegPrefixOpnd = 7; + +/* Number of registers */ +constexpr uint32 kOneRegister = 1; +constexpr uint32 kTwoRegister = 2; +constexpr uint32 kThreeRegister = 3; +constexpr uint32 kFourRegister = 4; + +/* Size of struct for memcpy */ +constexpr uint32 kParmMemcpySize = 40; + +/* const value used in cfg */ +constexpr uint32 kSuccSizeOfIfBB = 2; + +/* Check whether the value is an even number. */ +constexpr int32 kDivide2 = 2; +constexpr int32 kRegNum2 = 2; +constexpr int32 kStepNum2 = 2; +constexpr int32 kSign4ByteSize = 4; + +/* alignment in bytes of uint8 */ +constexpr uint8 kAlignOfU8 = 3; + +/* + * if the number of local refvar is less than 12, use stp or str to init local refvar + * else call function MCC_InitializeLocalStackRef to init. + */ +constexpr int32 kRefNum12 = 12; + +/* mod function max argument size */ +constexpr uint32 kMaxModFuncArgSize = 8; + +/* string length of spacial name "__EARetTemp__" */ +constexpr int32 kEARetTempNameSize = 10; + +/* + * Aarch64 data processing instructions have 12 bits of space for values in their instuction word + * This is arranged as a four-bit rotate value and an eight-bit immediate value: + */ +constexpr uint32 kMaxImmVal5Bits = 5; +constexpr uint32 kMaxImmVal6Bits = 6; +constexpr uint32 kMaxImmVal8Bits = 8; +constexpr uint32 kMaxImmVal12Bits = 12; +constexpr uint32 kMaxImmVal13Bits = 13; +constexpr uint32 kMaxImmVal16Bits = 16; + +constexpr int32 kMaxPimm8 = 4095; +constexpr int32 kMaxPimm16 = 8190; +constexpr int32 kMaxPimm32 = 16380; +constexpr int32 kMaxPimm64 = 32760; +constexpr int32 kMaxPimm128 = 65520; + +constexpr int32 kMaxPimm[k5BitSize] = {kMaxPimm8, kMaxPimm16, kMaxPimm32, kMaxPimm64, kMaxPimm128}; +constexpr int32 kMaxPairPimm[k3BitSize] = {k256BitSize, k512BitSize, k512BitSize}; + +constexpr int32 kMaxSimm32 = 255; +constexpr int32 kMaxSimm32Pair = 252; +constexpr int32 kMinSimm32 = kNegative256BitSize; +constexpr int32 kMaxSimm64Pair = 504; +constexpr int32 kMinSimm64 = kNegative512BitSize; + +constexpr int32 kMax12UnsignedImm = 4096; +constexpr int32 kMax13UnsignedImm = 8192; +constexpr int32 kMax16UnsignedImm = 65535; + +/* Dedicated for Vector */ +constexpr int32 kMinImmVal = -128; +constexpr int32 kMaxImmVal = 255; + +/* aarch64 assembly takes up to 24-bits */ +constexpr uint32 kMaxImmVal24Bits = 24; + +constexpr uint32 kDecimalMax = 10; + +constexpr double kMicroSecPerMilliSec = 1000.0; + +constexpr double kPercent = 100.0; + +constexpr int32 kZeroAsciiNum = 48; + +enum ConditionCode : uint8 { + CC_EQ, /* equal */ + CC_NE, /* not equal */ + CC_CS, /* carry set (== HS) */ + CC_HS, /* unsigned higher or same (== CS) */ + CC_CC, /* carry clear (== LO) */ + CC_LO, /* Unsigned lower (== CC) */ + CC_MI, /* Minus or negative result */ + CC_PL, /* positive or zero result */ + CC_VS, /* overflow */ + CC_VC, /* no overflow */ + CC_HI, /* unsigned higher */ + CC_LS, /* unsigned lower or same */ + CC_GE, /* signed greater than or equal */ + CC_LT, /* signed less than */ + CC_GT, /* signed greater than */ + CC_LE, /* signed less than or equal */ + CC_AL, /* always, this is the default. usually omitted. */ + kCcLast +}; + +inline ConditionCode GetReverseCC(ConditionCode cc) { + switch (cc) { + case CC_NE: + return CC_EQ; + case CC_EQ: + return CC_NE; + case CC_HS: + return CC_LO; + case CC_LO: + return CC_HS; + case CC_MI: + return CC_PL; + case CC_PL: + return CC_MI; + case CC_VS: + return CC_VC; + case CC_VC: + return CC_VS; + case CC_HI: + return CC_LS; + case CC_LS: + return CC_HI; + case CC_LT: + return CC_GE; + case CC_GE: + return CC_LT; + case CC_GT: + return CC_LE; + case CC_LE: + return CC_GT; + default: + CHECK_FATAL(0, "unknown condition code"); + } + return kCcLast; +} + +inline bool IsPowerOf2Const(uint64 i) { + return (i & (i - 1)) == 0; +} + +inline uint64 RoundUpConst(uint64 offset, uint64 align) { + return (-align) & (offset + align - 1); +} + +inline bool IsPowerOf2(uint64 i) { + return IsPowerOf2Const(i); +} + +/* align must be a power of 2 */ +inline uint64 RoundUp(uint64 offset, uint64 align) { + if (align == 0) { + return offset; + } + ASSERT(IsPowerOf2(align), "align must be power of 2!"); + return RoundUpConst(offset, align); +} + +inline uint64 RoundDownConst(uint64 offset, uint64 align) { + return (UINT64_MAX - align + 1) & offset; +} + +// align must be a power of 2 +inline uint64 RoundDown(uint64 offset, uint64 align) { + if (align == 0) { + return offset; + } + ASSERT(IsPowerOf2(align), "align must be power of 2!"); + return RoundDownConst(offset, align); +} + +inline bool IsAlignedTo(uint64 offset, uint64 align) { + ASSERT(IsPowerOf2(align), "align must be power of 2!"); + return (offset & (align - 1)) == 0; +} +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_COMMON_UTILS_H */ diff --git a/src/mapleall/maple_be/include/be/lower.h b/src/mapleall/maple_be/include/be/lower.h new file mode 100644 index 0000000000000000000000000000000000000000..eee0bb15588677821b2a10126b8141a1b4a64284 --- /dev/null +++ b/src/mapleall/maple_be/include/be/lower.h @@ -0,0 +1,347 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_LOWERER_H +#define MAPLEBE_INCLUDE_BE_LOWERER_H +/* C++ headers. */ +#include +#include +#include +#include +#include +#include +#include "intrinsics.h" /* For IntrinDesc. This includes 'intrinsic_op.h' as well */ +#include "becommon.h" +#include "cg.h" +#include "bbt.h" +/* MapleIR headers. */ +#include "mir_nodes.h" +#include "mir_module.h" +#include "mir_function.h" +#include "mir_lower.h" +#include "simplify.h" + +namespace maplebe { +class CGLowerer { + enum Option : uint64 { + kUndefined = 0, + kGenEh = 1ULL << 0, + kVerboseCG = 1ULL << 1, + }; + + using BuiltinFunctionID = uint32; + using OptionFlag = uint64; + public: + CGLowerer(MIRModule &mod, BECommon &common, MemPool &memPool, MIRFunction *func = nullptr) + : mirModule(mod), + beCommon(common), + simplifyOp(memPool) { + SetOptions(kGenEh); + mirBuilder = mod.GetMIRBuilder(); + SetCurrentFunc(func); + } + + CGLowerer(MIRModule &mod, BECommon &common, MemPool &memPool, bool genEh, bool verboseCG) + : mirModule(mod), + beCommon(common), + simplifyOp(memPool) { + OptionFlag option = 0; + if (genEh) { + option |= kGenEh; + } + if (verboseCG) { + option |= kVerboseCG; + } + SetOptions(option); + mirBuilder = mod.GetMIRBuilder(); + SetCurrentFunc(nullptr); + } + + ~CGLowerer() { + mirBuilder = nullptr; + currentBlock = nullptr; + } + + MIRFunction *RegisterFunctionVoidStarToVoid(BuiltinFunctionID id, const std::string &name, + const std::string ¶mName); + + void RegisterBuiltIns(); + + void LowerFunc(MIRFunction &func); + + BaseNode *LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &intrinNode, BlockNode &newBlk); + + BaseNode *LowerIntrinsicopwithtype(const BaseNode &parent, IntrinsicopNode &intrinNode, BlockNode &blk); + + StmtNode *LowerIntrinsicMplClearStack(const IntrinsiccallNode &intrinCall, BlockNode &newBlk); + + StmtNode *LowerIntrinsicRCCall(const IntrinsiccallNode &intrinCall); + + void LowerArrayStore(const IntrinsiccallNode &intrinCall, BlockNode &newBlk); + + StmtNode *LowerDefaultIntrinsicCall(IntrinsiccallNode &intrinCall, MIRSymbol &st, MIRFunction &fn); + + StmtNode *LowerIntrinsicMplCleanupLocalRefVarsSkip(IntrinsiccallNode &intrinCall); + + StmtNode *LowerIntrinsiccall(IntrinsiccallNode &intrinCall, BlockNode &newBlk); + + StmtNode *LowerSyncEnterSyncExit(StmtNode &stmt); + + MIRFunction *GetCurrentFunc() const { + return mirModule.CurFunction(); + } + + BaseNode *LowerExpr(BaseNode &parent, BaseNode &expr, BlockNode &blkNode); + + BaseNode *LowerDread(DreadNode &dread); + + BaseNode *LowerIread(IreadNode &iread) { + /* use PTY_u8 for boolean type in dread/iread */ + if (iread.GetPrimType() == PTY_u1) { + iread.SetPrimType(PTY_u8); + } + return (iread.GetFieldID() == 0 ? &iread : LowerIreadBitfield(iread)); + } + + BaseNode *LowerCastExpr(BaseNode &expr) const; + + BaseNode *ExtractSymbolAddress(const StIdx &stIdx); + BaseNode *LowerDreadToThreadLocal(BaseNode &expr); + StmtNode *LowerDassignToThreadLocal(StmtNode &stmt); + + void LowerDassign(DassignNode &dsNode, BlockNode &newBlk); + + void LowerResetStmt(StmtNode &stmt, BlockNode &block); + + void LowerIassign(IassignNode &iassign, BlockNode &newBlk); + + void LowerRegassign(RegassignNode ®Node, BlockNode &newBlk); + + void AddElemToPrintf(MapleVector &argsPrintf, int num, ...) const; + + bool CheckSwitchTableContinuous(SwitchNode &stmt) const; + + bool IsSwitchToRangeGoto(const BlockNode &blk) const; + + std::string AssertBoundaryGetFileName(StmtNode &stmt) { + size_t pos = mirModule.GetFileNameFromFileNum(stmt.GetSrcPos().FileNum()).rfind('/'); + return mirModule.GetFileNameFromFileNum(stmt.GetSrcPos().FileNum()).substr(pos + 1); + } + + std::string GetFileNameSymbolName(const std::string &fileName) const; + + void SwitchAssertBoundary(StmtNode &stmt, MapleVector &argsPrintf); + + void LowerAssertBoundary(StmtNode &stmt, BlockNode &block, BlockNode &newBlk, std::vector &abortNode); + + StmtNode *LowerIntrinsicopDassign(const DassignNode &dsNode, IntrinsicopNode &intrinNode, BlockNode &newBlk); + + void LowerGCMalloc(const BaseNode &node, const GCMallocNode &gcmalloc, BlockNode &blkNode, bool perm = false); + + std::string GetNewArrayFuncName(const uint32 elemSize, const bool perm) const; + + void LowerJarrayMalloc(const StmtNode &stmt, const JarrayMallocNode &node, BlockNode &blkNode, bool perm = false); + + BaseNode *LowerAddrof(AddrofNode &addrof) const { + return &addrof; + } + + BaseNode *LowerIaddrof(const IreadNode &iaddrof); + BaseNode *SplitBinaryNodeOpnd1(BinaryNode &bNode, BlockNode &blkNode); + BaseNode *SplitTernaryNodeResult(TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode); + bool IsComplexSelect(const TernaryNode &tNode) const; + int32 FindTheCurrentStmtFreq(const StmtNode *stmt) const; + BaseNode *LowerComplexSelect(const TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode); + BaseNode *LowerFarray(ArrayNode &array); + BaseNode *LowerArrayDim(ArrayNode &array, int32 dim); + BaseNode *LowerArrayForLazyBiding(BaseNode &baseNode, BaseNode &offsetNode, const BaseNode &parent); + BaseNode *LowerArray(ArrayNode &array, const BaseNode &parent); + BaseNode *LowerCArray(ArrayNode &array); + + DassignNode *SaveReturnValueInLocal(StIdx stIdx, uint16 fieldID); + void LowerCallStmt(StmtNode &stmt, StmtNode *&nextStmt, BlockNode &newBlk, MIRType *retty = nullptr, + bool uselvar = false, bool isIntrinAssign = false); + BlockNode *LowerIntrinsiccallAassignedToAssignStmt(IntrinsiccallNode &intrinsicCall); + BlockNode *LowerCallAssignedStmt(StmtNode &stmt, bool uselvar = false); + bool LowerStructReturn(BlockNode &newBlk, StmtNode *stmt, StmtNode *&nextStmt, bool &lvar, BlockNode *oldBlk); + BlockNode *LowerMemop(StmtNode &stmt); + + BaseNode *LowerRem(BaseNode &expr, BlockNode &blk); + + BaseNode *LowerExtractBits(ExtractbitsNode &extr) const; + + void LowerStmt(StmtNode &stmt, BlockNode &newBlk); + + void LowerSwitchOpnd(StmtNode &stmt, BlockNode &newBlk); + + MIRSymbol *CreateNewRetVar(const MIRType &ty, const std::string &prefix); + + void RegisterExternalLibraryFunctions(); + + BlockNode *LowerBlock(BlockNode &block); + + void SimplifyBlock(BlockNode &block) const; + + void LowerTryCatchBlocks(BlockNode &body); + +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 + BlockNode *LowerReturnStructUsingFakeParm(NaryStmtNode &retNode); +#endif + BlockNode *LowerReturn(NaryStmtNode &retNode); + void LowerEntry(MIRFunction &func); + + StmtNode *LowerCall( + CallNode &callNode, StmtNode *&nextStmt, BlockNode &newBlk, MIRType *retTy = nullptr, bool uselvar = false); + void SplitCallArg(CallNode &callNode, BaseNode *newOpnd, size_t i, BlockNode &newBlk); + + void CleanupBranches(MIRFunction &func) const; + + void LowerTypePtr(BaseNode &node) const; + + BaseNode *GetBitField(int32 byteOffset, BaseNode *baseAddr, PrimType fieldPrimType); + StmtNode *WriteBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, + BaseNode *baseAddr, BaseNode *rhs, BlockNode *block); + BaseNode *ReadBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, + BaseNode *baseAddr); + BaseNode *LowerDreadBitfield(DreadNode &dread); + BaseNode *LowerIreadBitfield(IreadNode &iread); + StmtNode *LowerDassignBitfield(DassignNode &dassign, BlockNode &newBlk); + StmtNode *LowerIassignBitfield(IassignNode &iassign, BlockNode &newBlk); + + void LowerAsmStmt(AsmNode *asmNode, BlockNode *newBlk); + + bool ShouldOptarray() const { + ASSERT(mirModule.CurFunction() != nullptr, "nullptr check"); + return MIRLower::ShouldOptArrayMrt(*mirModule.CurFunction()); + } + + BaseNode *NodeConvert(PrimType mType, BaseNode &expr); + /* Lower pointer/reference types if found in pseudo registers. */ + void LowerPseudoRegs(const MIRFunction &func) const; + + /* A pseudo register refers to a symbol when DreadNode is converted to RegreadNode. */ + StIdx GetSymbolReferredToByPseudoRegister(PregIdx regNO) const { + (void)regNO; + return StIdx(); + } + + void SetOptions(OptionFlag option) { + options = option; + } + + void SetCheckLoadStore(bool value) { + checkLoadStore = value; + } + + /* if it defines a built-in to use for the given intrinsic, return the name. otherwise, return nullptr */ + PUIdx GetBuiltinToUse(BuiltinFunctionID id) const; + void InitArrayClassCacheTableIndex(); + + MIRModule &mirModule; + BECommon &beCommon; + BlockNode *currentBlock = nullptr; /* current block for lowered statements to be inserted to */ + bool checkLoadStore = false; + int64 seed = 0; + SimplifyOp simplifyOp; + static const std::string kIntrnRetValPrefix; + static const std::string kUserRetValPrefix; + + static constexpr PUIdx kFuncNotFound = PUIdx(-1); + static constexpr int kThreeDimArray = 3; + static constexpr int kNodeThirdOpnd = 2; + static constexpr int kMCCSyncEnterFast0 = 0; + static constexpr int kMCCSyncEnterFast1 = 1; + static constexpr int kMCCSyncEnterFast2 = 2; + static constexpr int kMCCSyncEnterFast3 = 3; + + void BuildLabel2FreqMap(); + + std::unordered_map &GetLabel2Freq() { + return l2fMap; + } + + protected: + /* + * true if the lower level (e.g. mplcg) can handle the intrinsic directly. + * For example, the INTRN_MPL_ATOMIC_EXCHANGE_PTR can be directly handled by mplcg, + * and generate machine code sequences not containing any function calls. + * Such intrinsics will bypass the lowering of "assigned", + * and let mplcg handle the intrinsic results which are not return values. + */ + bool IsIntrinsicCallHandledAtLowerLevel(MIRIntrinsicID intrinsic) const; + + bool IsIntrinsicOpHandledAtLowerLevel(MIRIntrinsicID intrinsic) const; + + private: + + void SetCurrentFunc(MIRFunction *func) { + mirModule.SetCurFunction(func); + simplifyOp.SetFunction(func); + if (func != nullptr) { + const std::string &dumpFunc = CGOptions::GetDumpFunc(); + const bool debug = CGOptions::GetDumpPhases().find("cglower") != CGOptions::GetDumpPhases().end() && + (dumpFunc == "*" || dumpFunc == func->GetName()); + simplifyOp.SetDebug(debug); + } + } + + bool ShouldAddAdditionalComment() const { + return (options & kVerboseCG) != 0; + } + + bool GenerateExceptionHandlingCode() const { + return (options & kGenEh) != 0; + } + + BaseNode *MergeToCvtType(PrimType dType, PrimType sType, BaseNode &src) const; + BaseNode *LowerJavascriptIntrinsicop(IntrinsicopNode &intrinNode, const IntrinDesc &desc); + StmtNode *CreateStmtCallWithReturnValue(const IntrinsicopNode &intrinNode, const MIRSymbol &ret, PUIdx bFunc, + BaseNode *extraInfo = nullptr) const; + StmtNode *CreateStmtCallWithReturnValue(const IntrinsicopNode &intrinNode, PregIdx retPregIdx, PUIdx bFunc, + BaseNode *extraInfo = nullptr) const; + BaseNode *LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &intrinNode); + BaseNode *LowerIntrinJavaMerge(const BaseNode &parent, IntrinsicopNode &intrinNode) const; + BaseNode *LowerIntrinJavaArrayLength(const BaseNode &parent, IntrinsicopNode &intrinNode); + BaseNode *LowerIntrinsicopWithType(const BaseNode &parent, IntrinsicopNode &intrinNode); + + MIRType *GetArrayNodeType(BaseNode &baseNode); + IreadNode &GetLenNode(BaseNode &opnd0); + LabelIdx GetLabelIdx(MIRFunction &curFunc) const; + void ProcessArrayExpr(BaseNode &expr, BlockNode &blkNode); + void ProcessClassInfo(MIRType &classType, bool &classInfoFromRt, std::string &classInfo) const; + StmtNode *GenCallNode(const StmtNode &stmt, PUIdx &funcCalled, CallNode& origCall); + StmtNode *GenIntrinsiccallNode(const StmtNode &stmt, PUIdx &funcCalled, bool &handledAtLowerLevel, + IntrinsiccallNode &origCall); + StmtNode *GenIcallNode(PUIdx &funcCalled, IcallNode &origCall); + BlockNode *GenBlockNode(StmtNode &newCall, const CallReturnVector &p2nRets, const Opcode &opcode, + const PUIdx &funcCalled, bool handledAtLowerLevel, bool uselvar); + BaseNode *GetClassInfoExprFromRuntime(const std::string &classInfo); + BaseNode *GetClassInfoExprFromArrayClassCache(const std::string &classInfo); + BaseNode *GetClassInfoExpr(const std::string &classInfo) const; + BaseNode *GetBaseNodeFromCurFunc(MIRFunction &curFunc, bool isFromJarray); + + OptionFlag options = 0; + bool needBranchCleanup = false; + bool hasTry = false; + + static std::vector> builtinFuncIDs; + MIRBuilder *mirBuilder = nullptr; + uint32 labelIdx = 0; + static std::unordered_map intrinFuncIDs; + static std::unordered_map arrayClassCacheIndex; + std::unordered_map l2fMap; // Map label to frequency on profileUse +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_LOWERER_H */ diff --git a/src/mapleall/maple_be/include/be/rt.h b/src/mapleall/maple_be/include/be/rt.h new file mode 100644 index 0000000000000000000000000000000000000000..d0a235a2f39be74ef548c64f1296095588498304 --- /dev/null +++ b/src/mapleall/maple_be/include/be/rt.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_RT_H +#define MAPLEBE_INCLUDE_BE_RT_H + +#include +#include + +namespace maplebe { +/* + * This class contains constants about the ABI of the runtime, such as symbols + * for GC-related metadata in generated binary files. + */ +class RTSupport { + public: + static RTSupport &GetRTSupportInstance() { + static RTSupport rtSupport; + return rtSupport; + } + uint64_t GetObjectAlignment() const { + return kObjectAlignment; + } + int64_t GetArrayContentOffset() const { + return kArrayContentOffset; + } + int64_t GetArrayLengthOffset() const { + return kArrayLengthOffset; + } + uint64_t GetFieldSize() const { + return kRefFieldSize; + } + uint64_t GetFieldAlign() const { + return kRefFieldAlign; + } + + protected: + uint64_t kObjectAlignment = 0; /* Word size. Suitable for all Java types. */ + uint64_t kObjectHeaderSize = 0; /* java object header used by MM. */ + +#ifdef USE_32BIT_REF + uint32_t kRefFieldSize = 0; /* reference field in java object */ + uint32_t kRefFieldAlign = 0; +#else + uint32_t kRefFieldSize = 0; /* reference field in java object */ + uint32_t kRefFieldAlign = 0; +#endif /* USE_32BIT_REF */ + /* The array length offset is fixed since CONTENT_OFFSET is fixed to simplify code */ + int64_t kArrayLengthOffset = 0; /* shadow + monitor + [padding] */ + /* The array content offset is aligned to 8B to alow hosting of size-8B elements */ + int64_t kArrayContentOffset = 0; /* fixed */ + int64_t kGcTibOffset = 0; + int64_t kGcTibOffsetAbs = 0; + + private: + RTSupport() { + kObjectAlignment = 8; + kObjectHeaderSize = 8; +#ifdef USE_32BIT_REF + kRefFieldSize = 4; + kRefFieldAlign = 4; +#else + kRefFieldSize = 8; + kRefFieldAlign = 8; +#endif /* USE_32BIT_REF */ + kArrayLengthOffset = 12; + kArrayContentOffset = 16; + kGcTibOffset = -8; + kGcTibOffsetAbs = -kGcTibOffset; + } + static const std::string kObjectMapSectionName; + static const std::string kGctibLabelArrayOfObject; + static const std::string kGctibLabelJavaObject; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_RT_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/be/switch_lowerer.h b/src/mapleall/maple_be/include/be/switch_lowerer.h new file mode 100644 index 0000000000000000000000000000000000000000..18450a1b39e0dd1d797054c80a519f8255932cd8 --- /dev/null +++ b/src/mapleall/maple_be/include/be/switch_lowerer.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_SWITCH_LOWERER_H +#define MAPLEBE_INCLUDE_BE_SWITCH_LOWERER_H +#include "mir_nodes.h" +#include "mir_module.h" +#include "lower.h" + +namespace maplebe { +using namespace maple; +class BELowerer; + +class SwitchLowerer { + public: + SwitchLowerer(maple::MIRModule &mod, maple::SwitchNode &stmt, + CGLowerer *lower, maple::MapleAllocator &allocator) + : mirModule(mod), + stmt(&stmt), + cgLowerer(lower), + switchItems(allocator.Adapter()), + ownAllocator(&allocator) {} + + SwitchLowerer(maple::MIRModule &mod, maple::SwitchNode &stmt, + maple::MapleAllocator &allocator) + : mirModule(mod), + stmt(&stmt), + switchItems(allocator.Adapter()), + ownAllocator(&allocator) {} + + ~SwitchLowerer() = default; + + maple::BlockNode *LowerSwitch(LabelIdx newLabelIdx = 0); + + private: + using Cluster = std::pair; + using SwitchItem = std::pair; + + maple::MIRModule &mirModule; + maple::SwitchNode *stmt; + CGLowerer *cgLowerer; + /* + * the original switch table is sorted and then each dense (in terms of the + * case tags) region is condensed into 1 switch item; in the switchItems + * table, each item either corresponds to an original entry in the original + * switch table (pair's second is 0), or to a dense region (pair's second + * gives the upper limit of the dense range) + */ + maple::MapleVector switchItems; /* uint32 is index in switchTable */ + maple::MapleAllocator *ownAllocator; + const maple::int32 kClusterSwitchCutoff = 5; + const float kClusterSwitchDensityHigh = 0.4; + const float kClusterSwitchDensityLow = 0.2; + const maple::int32 kMaxRangeGotoTableSize = 127; + bool jumpToDefaultBlockGenerated = false; + + void FindClusters(MapleVector &clusters) const; + void InitSwitchItems(MapleVector &clusters); + maple::RangeGotoNode *BuildRangeGotoNode(int32 startIdx, int32 endIdx, LabelIdx newLabelIdx); + maple::CompareNode *BuildCmpNode(Opcode opCode, uint32 idx); + maple::GotoNode *BuildGotoNode(int32 idx); + maple::CondGotoNode *BuildCondGotoNode(int32 idx, Opcode opCode, BaseNode &cond); + maple::BlockNode *BuildCodeForSwitchItems(int32 start, int32 end, bool lowBlockNodeChecked, + bool highBlockNodeChecked, LabelIdx newLabelIdx = 0); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_SWITCH_LOWERER_H */ diff --git a/src/mapleall/maple_be/include/be/try_catch.h b/src/mapleall/maple_be/include/be/try_catch.h new file mode 100644 index 0000000000000000000000000000000000000000..45387aeba275a5280de6830e066d6baf6359a472 --- /dev/null +++ b/src/mapleall/maple_be/include/be/try_catch.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_BE_TRY_CATCH_H +#define MAPLEBE_INCLUDE_BE_TRY_CATCH_H +#include "bbt.h" +/* MapleIR headers. */ +#include "mir_nodes.h" +#include "mir_lower.h" + +namespace maplebe { +using namespace maple; +class TryEndTryBlock { + public: + explicit TryEndTryBlock(MemPool &memPool) + : allocator(&memPool), enclosedBBs(allocator.Adapter()), + labeledBBsInTry(allocator.Adapter()), bbsToRelocate(allocator.Adapter()) {} + + ~TryEndTryBlock() = default; + + void Init() { + startTryBB = nullptr; + endTryBB = nullptr; + tryStmt = nullptr; + enclosedBBs.clear(); + labeledBBsInTry.clear(); + bbsToRelocate.clear(); + } + + void Reset(BBT &startBB) { + startTryBB = &startBB; + CHECK_NULL_FATAL(startTryBB->GetKeyStmt()); + tryStmt = startTryBB->GetKeyStmt(); + CHECK_FATAL(tryStmt->GetOpCode() == OP_try, "expect OPT_try"); + endTryBB = nullptr; + enclosedBBs.clear(); + labeledBBsInTry.clear(); + bbsToRelocate.clear(); + } + + void SetStartTryBB(BBT *bb) { + startTryBB = bb; + } + + BBT *GetStartTryBB() { + return startTryBB; + } + + void SetEndTryBB(BBT *bb) { + endTryBB = bb; + } + + BBT *GetEndTryBB() { + return endTryBB; + } + + StmtNode *GetTryStmtNode() { + return tryStmt; + } + + MapleVector &GetEnclosedBBs() { + return enclosedBBs; + } + + size_t GetEnclosedBBsSize() const { + return enclosedBBs.size(); + } + + const BBT *GetEnclosedBBsElem(size_t index) const{ + ASSERT(index < enclosedBBs.size(), "out of range"); + return enclosedBBs[index]; + } + + void PushToEnclosedBBs(BBT &bb) { + enclosedBBs.emplace_back(&bb); + } + + MapleVector &GetLabeledBBsInTry() { + return labeledBBsInTry; + } + + MapleVector &GetBBsToRelocate() { + return bbsToRelocate; + } + + private: + MapleAllocator allocator; + BBT *startTryBB = nullptr; + BBT *endTryBB = nullptr; + StmtNode *tryStmt = nullptr; + MapleVector enclosedBBs; + MapleVector labeledBBsInTry; + MapleVector bbsToRelocate; +}; + +class TryCatchBlocksLower { + public: + TryCatchBlocksLower(MemPool &memPool, BlockNode &body, MIRModule &mirModule) + : memPool(memPool), allocator(&memPool), body(body), mirModule(mirModule), + tryEndTryBlock(memPool), bbList(allocator.Adapter()), prevBBOfTry(allocator.Adapter()), + firstStmtToBBMap(allocator.Adapter()), catchesSeenSoFar(allocator.Adapter()) {} + + ~TryCatchBlocksLower() = default; + void RecoverBasicBlock(); + void TraverseBBList(); + void CheckTryCatchPattern() const; + + void SetGenerateEHCode(bool val) { + generateEHCode = val; + } + + private: + MemPool &memPool; + MapleAllocator allocator; + BlockNode &body; + MIRModule &mirModule; + TryEndTryBlock tryEndTryBlock; + StmtNode *bodyFirst = nullptr; + bool bodyEndWithEndTry = false; + bool generateEHCode = false; + MapleVector bbList; + MapleUnorderedMap prevBBOfTry; + MapleUnorderedMap firstStmtToBBMap; + MapleVector catchesSeenSoFar; + + void ProcessEnclosedBBBetweenTryEndTry(); + void ConnectRemainBB(); + BBT *FindInsertAfterBB(); + void PlaceRelocatedBB(BBT &insertAfter); + void PalceCatchSeenSofar(BBT &insertAfter); + BBT *CreateNewBB(StmtNode *first, StmtNode *last); + bool CheckAndProcessCatchNodeInCurrTryBlock(BBT &origLowerBB, LabelIdx ebbLabel, uint32 index); + BBT *CollectCatchAndFallthruUntilNextCatchBB(BBT *&lowerBB, uint32 &nextEnclosedIdx, std::vector &currBBThread); + void WrapCatchWithTryEndTryBlock(std::vector &currBBThread, BBT *&nextBBThreadHead, + uint32 &nextEnclosedIdx, bool hasMoveEndTry); + void SwapEndTryBBAndCurrBBThread(const std::vector &currBBThread, + bool &hasMoveEndTry, const BBT *nextBBThreadHead); + void ProcessThreadTail(BBT &threadTail, BBT * const &nextBBThreadHead, bool hasMoveEndTry); + static StmtNode *MoveCondGotoIntoTry(BBT &jtBB, BBT &condbrBB, const MapleVector &labeledBBsInTry); + static BBT *FindTargetBBlock(LabelIdx idx, const std::vector &bbs); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_BE_TRY_CATCH_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h new file mode 100644 index 0000000000000000000000000000000000000000..aac9a9bd877bf032971be7dfea369b828e65b5c2 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_MPISel.h @@ -0,0 +1,14 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_abi.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_abi.h new file mode 100644 index 0000000000000000000000000000000000000000..9f3118688e087c0cd4d0e3237c6b44c52294ef80 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_abi.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ABI_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ABI_H + +#include "aarch64_isa.h" +#include "types_def.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +namespace AArch64Abi { +constexpr uint32 kNumIntParmRegs = 8; +constexpr uint32 kNumFloatParmRegs = 8; +constexpr int32 kYieldPointReservedReg = 19; +constexpr uint32 kNormalUseOperandNum = 3; +constexpr uint32 kMaxInstrForCondBr = 260000; // approximately less than (2^18); + +constexpr AArch64reg kIntReturnRegs[kNumIntParmRegs] = { R0, R1, R2, R3, R4, R5, R6, R7 }; +constexpr AArch64reg kFloatReturnRegs[kNumFloatParmRegs] = { V0, V1, V2, V3, V4, V5, V6, V7 }; +constexpr AArch64reg kIntParmRegs[kNumIntParmRegs] = { R0, R1, R2, R3, R4, R5, R6, R7 }; +constexpr AArch64reg kFloatParmRegs[kNumFloatParmRegs] = { V0, V1, V2, V3, V4, V5, V6, V7 }; + +/* + * Refer to ARM IHI 0055C_beta: Procedure Call Standard for + * ARM 64-bit Architecture. Section 5.5 + */ +bool IsAvailableReg(AArch64reg reg); +bool IsCalleeSavedReg(AArch64reg reg); +bool IsCallerSaveReg(AArch64reg regNO); +bool IsParamReg(AArch64reg reg); +bool IsSpillReg(AArch64reg reg); +bool IsExtraSpillReg(AArch64reg reg); +bool IsSpillRegInRA(AArch64reg regNO, bool has3RegOpnd); +PrimType IsVectorArrayType(MIRType *ty, uint32 &arraySize); +} /* namespace AArch64Abi */ + +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ABI_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_alignment.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_alignment.h new file mode 100644 index 0000000000000000000000000000000000000000..34ce4f7e0bbf8adf4ee6b1d984a19c6248221195 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_alignment.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ALIGNMENT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ALIGNMENT_H + +#include "alignment.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +constexpr uint32 kAlignRegionPower = 4; +constexpr uint32 kAlignInsnLength = 4; +constexpr uint32 kAlignMaxNopNum = 1; + +struct AArch64AlignInfo { + /* if bb size in (16byte, 96byte) , the bb need align */ + uint32 alignMinBBSize = 16; + uint32 alignMaxBBSize = 96; + /* default loop & jump align power, related to the target machine. eg. 2^5 */ + uint32 loopAlign = 4; + uint32 jumpAlign = 5; + /* record func_align_power in CGFunc */ +}; + +class AArch64AlignAnalysis : public AlignAnalysis { + public: + AArch64AlignAnalysis(CGFunc &func, MemPool &memPool) : AlignAnalysis(func, memPool) { + aarFunc = static_cast(&func); + } + ~AArch64AlignAnalysis() override = default; + + void FindLoopHeader() override; + void FindJumpTarget() override; + void ComputeLoopAlign() override; + void ComputeJumpAlign() override; + void ComputeCondBranchAlign() override; + bool MarkCondBranchAlign(); + bool MarkShortBranchSplit(); + void AddNopAfterMark(); + void UpdateInsnId(); + uint32 GetAlignRange(uint32 alignedVal, uint32 addr) const; + + /* filter condition */ + bool IsIncludeCall(BB &bb) override; + bool IsInSizeRange(BB &bb) override; + bool HasFallthruEdge(BB &bb) override; + bool IsInSameAlignedRegion(uint32 addr1, uint32 addr2, uint32 alignedRegionSize) const; + + private: + AArch64CGFunc *aarFunc = nullptr; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ALIGNMENT_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h new file mode 100644 index 0000000000000000000000000000000000000000..e8c10d13de4e7e7ec91b6c79ca17797ed2882c25 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_args.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ARGS_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ARGS_H + +#include "args.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +using namespace maple; + +struct ArgInfo { + AArch64reg reg; + MIRType *mirTy; + uint32 symSize; + uint32 stkSize; + RegType regType; + MIRSymbol *sym; + const AArch64SymbolAlloc *symLoc; + uint8 memPairSecondRegSize; /* struct arg requiring two regs, size of 2nd reg */ + bool doMemPairOpt; + bool createTwoStores; + bool isTwoRegParm; +}; + +class AArch64MoveRegArgs : public MoveRegArgs { + public: + explicit AArch64MoveRegArgs(CGFunc &func) : MoveRegArgs(func) {} + ~AArch64MoveRegArgs() override = default; + void Run() override; + + private: + RegOperand *baseReg = nullptr; + const MemSegment *lastSegment = nullptr; + void CollectRegisterArgs(std::map &argsList, std::vector &indexList, + std::map &pairReg, std::vector &numFpRegs, + std::vector &fpSize) const; + ArgInfo GetArgInfo(std::map &argsList, std::vector &numFpRegs, + std::vector &fpSize, uint32 argIndex) const; + bool IsInSameSegment(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) const; + void GenOneInsn(const ArgInfo &argInfo, RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest, int32 offset) const; + void GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo); + void GenerateStrInsn(const ArgInfo &argInfo, AArch64reg reg2, uint32 numFpRegs, uint32 fpSize); + void MoveRegisterArgs(); + void MoveVRegisterArgs() const; + void MoveLocalRefVarToRefLocals(MIRSymbol &mirSym) const; + void LoadStackArgsToVReg(MIRSymbol &mirSym) const; + void MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ARGS_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..d85dafee8c740eb875f9cee7d75c6897ded7c876 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_call_conv.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CALL_CONV_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CALL_CONV_H + +#include "types_def.h" +#include "becommon.h" +#include "call_conv.h" + +namespace maplebe { +using namespace maple; + +/* + * We use the names used in ARM IHI 0055C_beta. $ 5.4.2. + * nextGeneralRegNO (= _int_parm_num) : Next General-purpose Register number + * nextFloatRegNO (= _float_parm_num): Next SIMD and Floating-point Register Number + * nextStackArgAdress (= _last_memOffset): Next Stacked Argument Address + * for processing an incoming or outgoing parameter list + */ +class AArch64CallConvImpl { + public: + explicit AArch64CallConvImpl(BECommon &be) : beCommon(be) {} + + ~AArch64CallConvImpl() = default; + + /* Return size of aggregate structure copy on stack. */ + int32 LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst = false, MIRFunction *tFunc = nullptr); + + int32 LocateRetVal(MIRType &retType, CCLocInfo &pLoc); + + void InitCCLocInfo(CCLocInfo &pLoc) const; + + /* for lmbc */ + uint32 FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize); + + /* return value related */ + void InitReturnInfo(MIRType &retTy, CCLocInfo &ccLocInfo); + + void SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const; + + void SetupToReturnThroughMemory(CCLocInfo &pLoc) const { + pLoc.regCount = 1; + pLoc.reg0 = R8; + pLoc.primTypeOfReg0 = PTY_u64; + } + + private: + BECommon &beCommon; + uint64 paramNum = 0; /* number of all types of parameters processed so far */ + uint32 nextGeneralRegNO = 0; /* number of integer parameters processed so far */ + uint32 nextFloatRegNO = 0; /* number of float parameters processed so far */ + int32 nextStackArgAdress = 0; + + AArch64reg AllocateGPRegister() { + return (nextGeneralRegNO < AArch64Abi::kNumIntParmRegs) ? AArch64Abi::kIntParmRegs[nextGeneralRegNO++] : kRinvalid; + } + + void AllocateTwoGPRegisters(CCLocInfo &pLoc) { + if ((nextGeneralRegNO + 1) < AArch64Abi::kNumIntParmRegs) { + pLoc.reg0 = AArch64Abi::kIntParmRegs[nextGeneralRegNO++]; + pLoc.reg1 = AArch64Abi::kIntParmRegs[nextGeneralRegNO++]; + } else { + pLoc.reg0 = kRinvalid; + } + } + + AArch64reg AllocateSIMDFPRegister() { + return (nextFloatRegNO < AArch64Abi::kNumFloatParmRegs) ? AArch64Abi::kFloatParmRegs[nextFloatRegNO++] : kRinvalid; + } + + void AllocateNSIMDFPRegisters(CCLocInfo &ploc, uint32 num) { + if ((nextFloatRegNO + num - 1) < AArch64Abi::kNumFloatParmRegs) { + switch (num) { + case kOneRegister: + ploc.reg0 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; + break; + case kTwoRegister: + ploc.reg0 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; + ploc.reg1 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; + break; + case kThreeRegister: + ploc.reg0 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; + ploc.reg1 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; + ploc.reg2 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; + break; + case kFourRegister: + ploc.reg0 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; + ploc.reg1 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; + ploc.reg2 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; + ploc.reg3 = AArch64Abi::kFloatParmRegs[nextFloatRegNO++]; + break; + default: + CHECK_FATAL(0, "AllocateNSIMDFPRegisters: unsupported"); + } + } else { + ploc.reg0 = kRinvalid; + } + } + + void RoundNGRNUpToNextEven() { + nextGeneralRegNO = (nextGeneralRegNO + 1U) & ~1U; + } + + int32 ProcessPtyAggWhenLocateNextParm(MIRType &mirType, CCLocInfo &pLoc, uint64 &typeSize, int32 typeAlign); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CALL_CONV_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cfgo.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cfgo.h new file mode 100644 index 0000000000000000000000000000000000000000..9eff8f9cd2d504753cc02bb7132a0fc17d9e7b3f --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cfgo.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H + +#include "cfgo.h" + +namespace maplebe { +class AArch64CFGOptimizer : public CFGOptimizer { + public: + AArch64CFGOptimizer(CGFunc &func, MemPool &memPool) + : CFGOptimizer(func, memPool) {} + ~AArch64CFGOptimizer() = default; + void InitOptimizePatterns() override; +}; + +class AArch64FlipBRPattern : public FlipBRPattern { + public: + explicit AArch64FlipBRPattern(CGFunc &func) : FlipBRPattern(func) {} + ~AArch64FlipBRPattern() = default; + + private: + uint32 GetJumpTargetIdx(const Insn &insn) override; + MOperator FlipConditionOp(MOperator flippedOp) override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cfi_generator.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cfi_generator.h new file mode 100644 index 0000000000000000000000000000000000000000..dbd1320d7fedae57964d169bd81e42ae5d1b8c8a --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cfi_generator.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFI_GENERATOR_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFI_GENERATOR_H + +#include "cfi_generator.h" + +namespace maplebe { +class AArch64GenCfi : public GenCfi { + public: + explicit AArch64GenCfi(CGFunc &func) : GenCfi(func) { + useFP = func.UseFP(); + if (func.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + stackBaseReg = RFP; + } else { + stackBaseReg = useFP ? R29 : RSP; + } + } + ~AArch64GenCfi() = default; + + private: + void GenerateRegisterSaveDirective(BB &bb) override; + void GenerateRegisterRestoreDirective(BB &bb) override; + + /* frame pointer(x29) is available as a general-purpose register if useFP is set as false */ + AArch64reg stackBaseReg = RFP; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFI_GENERATOR_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h new file mode 100644 index 0000000000000000000000000000000000000000..6555183b509bb48768683bda9c56e1b62eb9ef53 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cg.h @@ -0,0 +1,251 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CG_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CG_H + +#include "cg.h" +#include "aarch64_cgfunc.h" +#include "aarch64_ssa.h" +#include "aarch64_phi_elimination.h" +#include "aarch64_prop.h" +#include "aarch64_dce.h" +#include "aarch64_live.h" +#include "aarch64_reaching.h" +#include "aarch64_args.h" +#include "aarch64_alignment.h" +#include "aarch64_validbit_opt.h" +#include "aarch64_reg_coalesce.h" +#include "aarch64_rce.h" +#include "aarch64_tailcall.h" +#include "aarch64_cfgo.h" +#include "aarch64_rematerialize.h" +#include "aarch64_pgo_gen.h" + +namespace maplebe { +constexpr int64 kShortBRDistance = (8 * 1024); +constexpr int64 kNegativeImmLowerLimit = -4096; +constexpr int32 kIntRegTypeNum = 5; +constexpr uint32 kAlignPseudoSize = 3; +constexpr uint32 kInsnSize = 4; +constexpr uint32 kAlignMovedFlag = 31; + +/* Supporting classes for GCTIB merging */ +class GCTIBKey { + public: + GCTIBKey(MapleAllocator &allocator, uint32 rcHeader, std::vector &patternWords) + : header(rcHeader), bitMapWords(allocator.Adapter()) { + (void)bitMapWords.insert(bitMapWords.cbegin(), patternWords.cbegin(), patternWords.cend()); + } + + ~GCTIBKey() = default; + + uint32 GetHeader() const { + return header; + } + + const MapleVector &GetBitmapWords() const { + return bitMapWords; + } + + private: + uint32 header; + MapleVector bitMapWords; +}; + +class Hasher { + public: + size_t operator()(const GCTIBKey *key) const { + CHECK_NULL_FATAL(key); + size_t hash = key->GetHeader(); + return hash; + } +}; + +class EqualFn { + public: + bool operator()(const GCTIBKey *firstKey, const GCTIBKey *secondKey) const { + CHECK_NULL_FATAL(firstKey); + CHECK_NULL_FATAL(secondKey); + const MapleVector &firstWords = firstKey->GetBitmapWords(); + const MapleVector &secondWords = secondKey->GetBitmapWords(); + + if ((firstKey->GetHeader() != secondKey->GetHeader()) || (firstWords.size() != secondWords.size())) { + return false; + } + + for (size_t i = 0; i < firstWords.size(); ++i) { + if (firstWords[i] != secondWords[i]) { + return false; + } + } + return true; + } +}; + +class GCTIBPattern { + public: + GCTIBPattern(GCTIBKey &patternKey, MemPool &mp) : name(&mp) { + key = &patternKey; + id = GetId(); + name = GCTIB_PREFIX_STR + std::string("PTN_") + std::to_string(id); + } + + ~GCTIBPattern() = default; + + int GetId() const { + static int createNum = 0; + return createNum++; + } + + std::string GetName() const { + ASSERT(!name.empty(), "null name check!"); + return std::string(name.c_str()); + } + + void SetName(const std::string &ptnName) { + name = ptnName; + } + + private: + int id = 0; + MapleString name; + GCTIBKey *key = nullptr; +}; + +/* sub Target info & implement */ +class AArch64CG : public CG { + public: + AArch64CG(MIRModule &mod, const CGOptions &opts, const std::vector &nameVec, + const std::unordered_map> &patternMap) + : CG(mod, opts), + ehExclusiveNameVec(nameVec), + cyclePatternMap(patternMap), + keyPatternMap(allocator.Adapter()), + symbolPatternMap(allocator.Adapter()) {} + + ~AArch64CG() override = default; + + CGFunc *CreateCGFunc(MIRModule &mod, MIRFunction &mirFunc, BECommon &bec, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) override { + return memPool.New(mod, *this, mirFunc, bec, memPool, stackMp, mallocator, funcId); + } + + MemLayout *CreateMemLayout(MemPool &mp, BECommon &b, MIRFunction &f, + MapleAllocator &mallocator) const override { + return mp.New(b, f, mallocator); + } + + RegisterInfo *CreateRegisterInfo(MemPool &mp, MapleAllocator &mallocator) const override { + return mp.New(mallocator); + } + + void EnrollTargetPhases(MaplePhaseManager *pm) const override; + + const std::unordered_map> &GetCyclePatternMap() const { + return cyclePatternMap; + } + + void GenerateObjectMaps(BECommon &beCommon) override; + + bool IsExclusiveFunc(MIRFunction &mirFunc) override; + + void FindOrCreateRepresentiveSym(std::vector &bitmapWords, uint32 rcHeader, const std::string &name); + + void CreateRefSymForGlobalPtn(GCTIBPattern &ptn) const; + + Insn &BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) override; + + PhiOperand &CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) override; + + std::string FindGCTIBPatternName(const std::string &name) const override; + + LiveAnalysis *CreateLiveAnalysis(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + ReachingDefinition *CreateReachingDefinition(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + MoveRegArgs *CreateMoveRegArgs(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } + AlignAnalysis *CreateAlignAnalysis(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + CGSSAInfo *CreateCGSSAInfo(MemPool &mp, CGFunc &f, DomAnalysis &da, MemPool &tmp) const override { + return mp.New(f, da, mp, tmp); + } + LiveIntervalAnalysis *CreateLLAnalysis(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + PhiEliminate *CreatePhiElimintor(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + return mp.New(f, ssaInfo, mp); + } + CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const override { + return mp.New(mp, f, ssaInfo, ll); + } + CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + return mp.New(mp, f, ssaInfo); + } + ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + return mp.New(f, ssaInfo); + } + RedundantComputeElim *CreateRedundantCompElim(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + return mp.New(f, ssaInfo, mp); + } + TailCallOpt *CreateCGTailCallOpt(MemPool &mp, CGFunc &f) const override { + return mp.New(mp, f); + } + CFGOptimizer *CreateCFGOptimizer(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + Rematerializer *CreateRematerializer(MemPool &mp) const override { + return mp.New(); + } + /* Return the copy operand id of reg1 if it is an insn who just do copy from reg1 to reg2. + * i. mov reg2, reg1 + * ii. add/sub reg2, reg1, 0/zero register + * iii. mul reg2, reg1, 1 + */ + bool IsEffectiveCopy(Insn &insn) const final; + bool IsTargetInsn(MOperator mOp) const final; + bool IsClinitInsn(MOperator mOp) const final; + void DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const final; + const InsnDesc &GetTargetMd(MOperator mOp) const final { + return kMd[mOp]; + } + CGProfGen *CreateCGProfGen(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + }; + + static const InsnDesc kMd[kMopLast]; + enum : uint8 { + kR8List, + kR16List, + kR32List, + kR64List, + kV64List + }; + static std::array, kIntRegTypeNum> intRegNames; + static std::array vectorRegNames; + + private: + const std::vector &ehExclusiveNameVec; + const std::unordered_map> &cyclePatternMap; + MapleUnorderedMap keyPatternMap; + MapleUnorderedMap symbolPatternMap; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CG_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h new file mode 100644 index 0000000000000000000000000000000000000000..c62db13473bab7d9c01d2c0bd01a85d7b8cb64f1 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_cgfunc.h @@ -0,0 +1,969 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CGFUNC_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CGFUNC_H + +#include "cgfunc.h" +#include "call_conv.h" +#include "mpl_atomic.h" +#include "aarch64_abi.h" +#include "aarch64_operand.h" +#include "aarch64_insn.h" +#include "aarch64_memlayout.h" +#include "aarch64_reg_info.h" +#include "aarch64_optimize_common.h" +#include "aarch64_call_conv.h" + +namespace maplebe { +class LmbcArgInfo { + public: + explicit LmbcArgInfo(MapleAllocator &mallocator) + : lmbcCallArgs(mallocator.Adapter()), + lmbcCallArgTypes(mallocator.Adapter()), + lmbcCallArgOffsets(mallocator.Adapter()), + lmbcCallArgNumOfRegs(mallocator.Adapter()) {} + MapleVector lmbcCallArgs; + MapleVector lmbcCallArgTypes; + MapleVector lmbcCallArgOffsets; + MapleVector lmbcCallArgNumOfRegs; // # of regs needed to complete struct + int32 lmbcTotalStkUsed = -1; // remove when explicit addr for large agg is available +}; + +class AArch64CGFunc : public CGFunc { + public: + AArch64CGFunc(MIRModule &mod, CG &c, MIRFunction &f, BECommon &b, + MemPool &memPool, StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) + : CGFunc(mod, c, f, b, memPool, stackMp, mallocator, funcId), + calleeSavedRegs(mallocator.Adapter()), + proEpilogSavedRegs(mallocator.Adapter()), + phyRegOperandTable(mallocator.Adapter()), + hashLabelOpndTable(mallocator.Adapter()), + hashOfstOpndTable(mallocator.Adapter()), + hashMemOpndTable(mallocator.Adapter()), + memOpndsRequiringOffsetAdjustment(mallocator.Adapter()), + memOpndsForStkPassedArguments(mallocator.Adapter()), + immOpndsRequiringOffsetAdjustment(mallocator.Adapter()), + immOpndsRequiringOffsetAdjustmentForRefloc(mallocator.Adapter()) { + uCatch.regNOCatch = 0; + SetUseFP(CGOptions::UseFramePointer() || HasVLAOrAlloca() || !f.GetModule()->IsCModule() || + f.GetModule()->GetFlavor() == MIRFlavor::kFlavorLmbc); + } + + ~AArch64CGFunc() override = default; + + uint32 GetRefCount() const { + return refCount; + } + + int32 GetBeginOffset() const { + return beginOffset; + } + + MOperator PickMovBetweenRegs(PrimType destType, PrimType srcType) const; + MOperator PickMovInsn(const RegOperand &lhs, const RegOperand &rhs) const; + + regno_t NewVRflag() override { + ASSERT(maxRegCount > kRFLAG, "CG internal error."); + constexpr uint8 size = 4; + if (maxRegCount <= kRFLAG) { + maxRegCount += (kRFLAG + kVRegisterNumber); + vRegTable.resize(maxRegCount); + } + new (&vRegTable[kRFLAG]) VirtualRegNode(kRegTyCc, size); + return kRFLAG; + } + + MIRType *LmbcGetAggTyFromCallSite(StmtNode *stmt, std::vector **parmList) const; + RegOperand &GetOrCreateResOperand(const BaseNode &parent, PrimType primType); + MIRStructType *GetLmbcStructArgType(BaseNode &stmt, size_t argNo); + + void IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty); + void IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty); + void IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty); + void IntrinsifyStringIndexOf(ListOperand &srcOpnds, const MIRSymbol &funcSym); + void GenSaveMethodInfoCode(BB &bb) override; + void DetermineReturnTypeofCall() override; + void HandleRCCall(bool begin, const MIRSymbol *retRef = nullptr) override; + bool GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool forEA = false); + void HandleRetCleanup(NaryStmtNode &retNode) override; + void MergeReturn() override; + RegOperand *ExtractNewMemBase(const MemOperand &memOpnd); + void SelectDassign(DassignNode &stmt, Operand &opnd0) override; + void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) override; + void SelectRegassign(RegassignNode &stmt, Operand &opnd0) override; + void SelectAbort() override; + void SelectAssertNull(UnaryStmtNode &stmt) override; + void SelectAsm(AsmNode &node) override; + MemOperand *GenLargeAggFormalMemOpnd(const MIRSymbol &sym, uint32 align, int64 offset, + bool needLow12 = false); + MemOperand *FixLargeMemOpnd(MemOperand &memOpnd, uint32 align); + MemOperand *FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx); + uint32 LmbcFindTotalStkUsed(std::vector *paramList); + uint32 LmbcTotalRegsUsed(); + bool LmbcSmallAggForRet(const BaseNode &bNode, Operand *src, int32 offset = 0, bool skip1 = false); + bool LmbcSmallAggForCall(BlkassignoffNode &bNode, const Operand *src, std::vector **parmList); + bool GetNumReturnRegsForIassignfpoff(MIRType *rType, PrimType &primType, uint32 &numRegs); + void GenIassignfpoffStore(Operand &srcOpnd, int32 offset, uint32 byteSize, PrimType primType); + void SelectAggDassign(DassignNode &stmt) override; + void SelectIassign(IassignNode &stmt) override; + void SelectIassignoff(IassignoffNode &stmt) override; + void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; + void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) override; + void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) override; + void SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) override; + void SelectReturnSendOfStructInRegs(BaseNode *x) override; + void SelectReturn(Operand *opnd0) override; + void SelectIgoto(Operand *opnd0) override; + void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) override; + void SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0, + Operand &origOpnd1, PrimType primType, bool signedCond); + void SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &expr) override; + void SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &expr) override; + void SelectGoto(GotoNode &stmt) override; + void SelectCall(CallNode &callNode) override; + void SelectIcall(IcallNode &icallNode, Operand &srcOpnd) override; + void SelectIntrinCall(IntrinsiccallNode &intrinsicCallNode) override; + Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name) override; + Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, PrimType retType, const std::string &name) override; + Operand *SelectCclz(IntrinsicopNode &intrnNode) override; + Operand *SelectCctz(IntrinsicopNode &intrnNode) override; + Operand *SelectCpopcount(IntrinsicopNode &intrnNode) override; + Operand *SelectCparity(IntrinsicopNode &intrnNode) override; + Operand *SelectCclrsb(IntrinsicopNode &intrnNode) override; + Operand *SelectCisaligned(IntrinsicopNode &intrnNode) override; + Operand *SelectCalignup(IntrinsicopNode &intrnNode) override; + Operand *SelectCaligndown(IntrinsicopNode &intrnNode) override; + Operand *SelectCSyncFetch(IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) override; + Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode) override; + Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode) override; + Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) override; + Operand *SelectCSyncSynchronize(IntrinsicopNode &intrinopNode) override; + AArch64isa::MemoryOrdering PickMemOrder(std::memory_order memOrder, bool isLdr) const; + Operand *SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCAtomicExchangeN(const IntrinsiccallNode &intrinsiccallNode) override; + Operand *SelectAtomicLoad(Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder); + Operand *SelectCAtomicFetch(IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) override; + Operand *SelectCReturnAddress(IntrinsicopNode &intrinopNode) override; + void SelectCAtomicExchange(const IntrinsiccallNode &intrinsiccallNode) override; + void SelectMembar(StmtNode &membar) override; + void SelectComment(CommentNode &comment) override; + + void HandleCatch() override; + Operand *SelectDread(const BaseNode &parent, AddrofNode &expr) override; + RegOperand *SelectRegread(RegreadNode &expr) override; + + void SelectAddrof(Operand &result, StImmOperand &stImm, FieldID field = 0); + void SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID field = 0); + Operand *SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, bool retBool = false); + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) override; + Operand *SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) override; + Operand &SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; + Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; + + PrimType GetDestTypeFromAggSize(uint32 bitSize) const; + + Operand *SelectIread(const BaseNode &parent, IreadNode &expr, + int extraOffset = 0, PrimType finalBitFieldDestType = kPtyInvalid) override; + Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) override; + Operand *SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) override; + Operand *SelectIntConst(MIRIntConst &intConst) override; + Operand *HandleFmovImm(PrimType stype, int64 val, MIRConst &mirConst, const BaseNode &parent); + Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) override; + Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) override; + Operand *SelectStrConst(MIRStrConst &strConst) override; + Operand *SelectStr16Const(MIRStr16Const &str16Const) override; + + void SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand &SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) override; + void SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) override; + Operand *SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) override; + Operand *SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + + void SelectBxorShift(Operand &resOpnd, Operand *opnd0, Operand *opnd1, Operand &opnd2, PrimType primType); + Operand *SelectLand(BinaryNode &node, Operand &lhsOpnd, Operand &rhsOpnd, const BaseNode &parent) override; + Operand *SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, + bool parentIsBr = false) override; + Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + void SelectFMinFMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, bool is64Bits, bool isMin); + void SelectCmpOp(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, + Opcode opcode, PrimType primType, const BaseNode &parent); + + Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + + void SelectAArch64Cmp(Operand &o0, Operand &o1, bool isIntType, uint32 dsize); + void SelectTargetFPCmpQuiet(Operand &o0, Operand &o1, uint32 dsize); + void SelectAArch64CCmp(Operand &o, Operand &i, Operand &nzcv, CondOperand &cond, bool is64Bits); + void SelectAArch64CSet(Operand &res, CondOperand &cond, bool is64Bits); + void SelectAArch64CSINV(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits); + void SelectAArch64CSINC(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits); + void SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, ShiftDirection direct, PrimType primType); + Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + /* method description contains method information which is metadata for reflection. */ + MemOperand *AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, bool isDest, Insn &insn, + AArch64reg regNum, bool &isOutOfRange); + void SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, bool isDest, Insn &insn); + bool IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint32 bitLen); + bool IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx); + Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectDiv(Operand &resOpnd, Operand &origOpnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectAbsSub(Insn &lastInsn, const UnaryNode &node, Operand &newOpnd0); + Operand *SelectAbs(UnaryNode &node, Operand &opnd0) override; + Operand *SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectExtractbits(ExtractbitsNode &node, Operand &srcOpnd, const BaseNode &parent) override; + Operand *SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) override; + Operand *SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectLnot(UnaryNode &node, Operand &srcOpnd, const BaseNode &parent) override; + Operand *SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + void SelectNeg(Operand &dest, Operand &srcOpnd, PrimType primType); + void SelectMvn(Operand &dest, Operand &src, PrimType primType); + Operand *SelectRecip(UnaryNode &node, Operand &src, const BaseNode &parent) override; + Operand *SelectSqrt(UnaryNode &node, Operand &src, const BaseNode &parent) override; + Operand *SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0) override; + Operand *SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) override; + Operand *SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectSelect(TernaryNode &node, Operand &opnd0, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent, bool hasCompare = false) override; + Operand *SelectMalloc(UnaryNode &node, Operand &opnd0) override; + Operand *SelectAlloca(UnaryNode &node, Operand &opnd0) override; + Operand *SelectGCMalloc(GCMallocNode &node) override; + Operand *SelectJarrayMalloc(JarrayMallocNode &node, Operand &opnd0) override; + void SelectSelect(Operand &resOpnd, Operand &condOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType dtype, + PrimType ctype, bool hasCompare = false, ConditionCode cc = CC_NE); + void SelectAArch64Select(Operand &dest, Operand &opnd0, Operand &opnd1, CondOperand &cond, bool isIntType, + uint32 is64bits); + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; + Operand *SelectLazyLoad(Operand &opnd0, PrimType primType) override; + Operand *SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) override; + Operand *SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) override; + RegOperand &SelectCopy(Operand &src, PrimType stype, PrimType dtype) override; + void SelectCopy(Operand &dest, PrimType dtype, Operand &src, PrimType stype); + void SelectCopyImm(Operand &dest, PrimType dType, ImmOperand &src, PrimType sType); + void SelectCopyImm(Operand &dest, ImmOperand &src, PrimType dtype); + void SelectLibCall(const std::string &funcName, std::vector &opndVec, PrimType primType, + PrimType retPrimType, bool is2ndRet = false); + void SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, std::vector pt, + PrimType retPrimType, bool is2ndRet); + bool IsRegRematCand(const RegOperand ®) const; + void ClearRegRematInfo(const RegOperand ®) const; + bool IsRegSameRematInfo(const RegOperand ®Dest, const RegOperand ®Src) const; + void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t destNO) override; + void CleanupDeadMov(bool dumpInfo) override; + void GetRealCallerSaveRegs(const Insn &insn, std::set &realSaveRegs) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand &GetOrCreateRflag() override; + const Operand *GetRflag() const override; + RegOperand &GetOrCreatevaryreg(); + RegOperand &CreateRegisterOperandOfType(PrimType primType); + RegOperand &CreateRegisterOperandOfType(RegType regType, uint32 byteLen); + RegOperand &CreateRflagOperand(); + RegOperand &GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType); + MemOperand *GetOrCreatSpillMem(regno_t vrNum) override; + void FreeSpillRegMem(regno_t vrNum) override; + RegOperand &GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, RegType kind, uint32 flag = 0); + RegOperand &GetOrCreatePhysicalRegisterOperand(std::string &asmAttr); + RegOperand *CreateVirtualRegisterOperand(regno_t vRegNO, uint32 size, RegType kind, uint32 flg = 0) const; + RegOperand &CreateVirtualRegisterOperand(regno_t vRegNO) override; + RegOperand &GetOrCreateVirtualRegisterOperand(regno_t vRegNO) override; + RegOperand &GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) override; + const LabelOperand *GetLabelOperand(LabelIdx labIdx) const override; + LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) override; + LabelOperand &GetOrCreateLabelOperand(BB &bb) override; + uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; + + RegOperand *SelectVectorAddLong(PrimType rType, Operand *o1, Operand *o2, PrimType otyp, bool isLow) override; + RegOperand *SelectVectorAddWiden(Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, bool isLow) override; + RegOperand *SelectVectorAbs(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) override; + RegOperand *SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) override; + RegOperand *SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) override; + RegOperand *SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) override; + RegOperand *SelectOneElementVectorCopy(Operand *src, PrimType sType); + RegOperand *SelectVectorImmMov(PrimType rType, Operand *src, PrimType sType); + RegOperand *SelectVectorRegMov(PrimType rType, Operand *src, PrimType sType); + RegOperand *SelectVectorFromScalar(PrimType rType, Operand *src, PrimType sType) override; + RegOperand *SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) override; + RegOperand *SelectVectorDup(PrimType rType, Operand *src, bool getLow) override; + RegOperand *SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, PrimType oTy, bool isLow) override; + RegOperand *SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, + Operand *o3, PrimType oTyp3) override; + RegOperand *SelectVectorMerge(PrimType rType, Operand *o1, Operand *o2, int32 index) override; + RegOperand *SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, + Operand *o2, PrimType oTyp2, bool isLow) override; + RegOperand *SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) override; + RegOperand *SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) override; + RegOperand *SelectVectorNeg(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorNot(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, Operand *src2, PrimType sty2) override; + RegOperand *SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) override; + RegOperand *SelectVectorReverse(PrimType rType, Operand *src, PrimType sType, uint32 size) override; + RegOperand *SelectVectorSetElement(Operand *eOpnd, PrimType eType, Operand *vOpnd, + PrimType vType, int32 lane) override; + RegOperand *SelectVectorSelect(Operand &cond, PrimType rType, Operand &o0, Operand &o1); + RegOperand *SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) override; + RegOperand *SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) override; + RegOperand *SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, Operand *o2, bool isLow) override; + RegOperand *SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, + Operand *o2, PrimType otyp2, bool isLow, bool isWide) override; + RegOperand *SelectVectorSum(PrimType rType, Operand *o1, PrimType oType) override; + RegOperand *SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) override; + RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) override; + RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) override; + + void SelectVectorCvt(Operand *res, PrimType rType, Operand *o1, PrimType oType); + void SelectVectorZip(PrimType rType, Operand *o1, Operand *o2); + void SelectStackSave(); + void SelectStackRestore(const IntrinsiccallNode &intrnNode); + + void PrepareVectorOperands(Operand **o1, PrimType &oty1, Operand **o2, PrimType &oty2); + RegOperand *AdjustOneElementVectorOperand(PrimType oType, RegOperand *opnd); + bool DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targId) const; + + PrimType FilterOneElementVectorType(PrimType origTyp) const { + PrimType nType = origTyp; + if (origTyp == PTY_i64 || origTyp == PTY_u64) { + nType = PTY_f64; + } + return nType; + } + + ImmOperand &CreateImmOperand(PrimType ptyp, int64 val) override { + if (ptyp == PTY_f32 || ptyp == PTY_f64) { + ASSERT(val == 0, "val must be 0!"); + return CreateImmOperand(Operand::kOpdFPImmediate, 0, static_cast(GetPrimTypeBitSize(ptyp)), false); + } + return CreateImmOperand(val, GetPrimTypeBitSize(ptyp), IsSignedInteger(ptyp)); + } + + + const Operand *GetFloatRflag() const override { + return nullptr; + } + /* create an integer immediate operand */ + ImmOperand &CreateImmOperand(int64 val, uint32 size, bool isSigned, VaryType varyType = kNotVary, + bool isFmov = false) const { + return *memPool->New(val, size, isSigned, varyType, isFmov); + } + + ImmOperand &CreateImmOperand(Operand::OperandType type, int64 val, uint32 size, bool isSigned) { + return *memPool->New(type, val, size, isSigned); + } + + ListOperand *CreateListOpnd(MapleAllocator &allocator) { + return memPool->New(allocator); + } + + OfstOperand &GetOrCreateOfstOpnd(uint64 offset, uint32 size); + + OfstOperand &CreateOfstOpnd(uint64 offset, uint32 size) const { + return *memPool->New(offset, size); + } + + OfstOperand &CreateOfstOpnd(const MIRSymbol &mirSymbol, int32 relocs) const { + return *memPool->New(mirSymbol, 0, relocs); + } + + OfstOperand &CreateOfstOpnd(const MIRSymbol &mirSymbol, int64 offset, int32 relocs) const { + return *memPool->New(mirSymbol, 0, offset, relocs); + } + + StImmOperand &CreateStImmOperand(const MIRSymbol &mirSymbol, int64 offset, int32 relocs) const { + return *memPool->New(mirSymbol, offset, relocs); + } + + RegOperand &GetOrCreateFramePointerRegOperand() override { + return GetOrCreateStackBaseRegOperand(); + } + + RegOperand &GetOrCreateStackBaseRegOperand() override { + AArch64reg reg; + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + reg = RSP; + } else { + reg = RFP; + } + return GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, kRegTyInt); + } + + RegOperand &GenStructParamIndex(RegOperand &base, const BaseNode &indexExpr, int shift, PrimType baseType); + void SelectAddrofAfterRa(Operand &result, StImmOperand &stImm, std::vector& rematInsns); + MemOperand &GetOrCreateMemOpndAfterRa(const MIRSymbol &symbol, int32 offset, uint32 size, + bool needLow12, RegOperand *regOp, std::vector& rematInsns); + + MemOperand &GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 offset, uint32 size, bool forLocalRef = false, + bool needLow12 = false, RegOperand *regOp = nullptr); + + MemOperand &HashMemOpnd(MemOperand &tMemOpnd); + + MemOperand &GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand *base, + RegOperand *index, ImmOperand *offset, const MIRSymbol *st); + + MemOperand &GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, RegOperand *base, + RegOperand *index, int32 shift, bool isSigned = false); + + MemOperand &GetOrCreateMemOpnd(MemOperand &oldMem); + + MemOperand &CreateMemOpnd(AArch64reg reg, int64 offset, uint32 size) { + RegOperand &baseOpnd = GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, kRegTyInt); + return CreateMemOpnd(baseOpnd, offset, size); + } + + MemOperand &CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size); + + MemOperand &CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size, const MIRSymbol &sym) const; + + MemOperand &CreateMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0, + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone); + + MemOperand *CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0, + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone); + + CondOperand &GetCondOperand(ConditionCode op) const { + return ccOperands[op]; + } + + BitShiftOperand *GetLogicalShiftLeftOperand(uint32 shiftAmount, bool is64bits) const; + + BitShiftOperand &CreateBitShiftOperand(BitShiftOperand::ShiftOp op, uint32 amount, int32 bitLen) const { + return *memPool->New(op, amount, bitLen); + } + + ExtendShiftOperand &CreateExtendShiftOperand(ExtendShiftOperand::ExtendOp op, uint32 amount, int32 bitLen) const { + return *memPool->New(op, amount, bitLen); + } + + void SplitMovImmOpndInstruction(int64 immVal, RegOperand &destReg, Insn *curInsn = nullptr); + + Operand &GetOrCreateFuncNameOpnd(const MIRSymbol &symbol) const; + void GenerateYieldpoint(BB &bb) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + void GenerateCleanupCode(BB &bb) override; + bool NeedCleanup() override; + void GenerateCleanupCodeForExtEpilog(BB &bb) override; + uint32 FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) override; + void AssignLmbcFormalParams() override; + void LmbcGenSaveSpForAlloca() override; + MemOperand *GenLmbcFpMemOperand(int32 offset, uint32 byteSize, AArch64reg baseRegno = RFP); + RegOperand *GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, + PrimType primType, AArch64reg baseRegno = RFP); + RegOperand *LmbcStructReturnLoad(int32 offset); + RegOperand *GetBaseReg(const SymbolAlloc &symAlloc) override; + int32 GetBaseOffset(const SymbolAlloc &symbolAlloc) override; + + Operand &CreateCommentOperand(const std::string &s) const { + return *memPool->New(s, *memPool); + } + + Operand &CreateCommentOperand(const MapleString &s) const { + return *memPool->New(s.c_str(), *memPool); + } + + Operand &CreateStringOperand(const std::string &s) const { + return *memPool->New(s, *memPool); + } + + Operand &CreateStringOperand(const MapleString &s) const { + return *memPool->New(s.c_str(), *memPool); + } + + void AddtoCalleeSaved(regno_t reg) override { + if (!UseFP() && reg == R29) { + reg = RFP; + } + if (find(calleeSavedRegs.begin(), calleeSavedRegs.end(), reg) != calleeSavedRegs.end()) { + return; + } + (void)calleeSavedRegs.emplace_back(static_cast(reg)); + ASSERT((AArch64isa::IsGPRegister(static_cast(reg)) || + AArch64isa::IsFPSIMDRegister(static_cast(reg))), + "Int or FP registers are expected"); + if (AArch64isa::IsGPRegister(static_cast(reg))) { + ++numIntregToCalleeSave; + } else { + ++numFpregToCalleeSave; + } + } + + uint32 SizeOfCalleeSaved() const { + /* npairs = num / 2 + num % 2 */ + uint32 nPairs = (numIntregToCalleeSave >> 1) + (numIntregToCalleeSave & 0x1); + nPairs += (numFpregToCalleeSave >> 1) + (numFpregToCalleeSave & 0x1); + return (nPairs * (kIntregBytelen << 1)); + } + + void DBGFixCallFrameLocationOffsets() override; + + void NoteFPLRAddedToCalleeSavedList() { + fplrAddedToCalleeSaved = true; + } + + bool IsFPLRAddedToCalleeSavedList() const { + return fplrAddedToCalleeSaved; + } + + bool IsIntrnCallForC() const { + return isIntrnCallForC; + } + + bool UsedStpSubPairForCallFrameAllocation() const { + return usedStpSubPairToAllocateCallFrame; + } + void SetUsedStpSubPairForCallFrameAllocation(bool val) { + usedStpSubPairToAllocateCallFrame = val; + } + + const MapleVector &GetCalleeSavedRegs() const { + return calleeSavedRegs; + } + + Insn *GetYieldPointInsn() { + return yieldPointInsn; + } + + const Insn *GetYieldPointInsn() const { + return yieldPointInsn; + } + + IntrinsiccallNode *GetCleanEANode() { + return cleanEANode; + } + + MemOperand &CreateStkTopOpnd(uint32 offset, uint32 size); + MemOperand *CreateStackMemOpnd(regno_t preg, int32 offset, uint32 size) const; + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand *index, ImmOperand *offset, const MIRSymbol *symbol) const; + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand &index, ImmOperand *offset, const MIRSymbol &symbol, bool noExtend) const; + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, + RegOperand &base, RegOperand &indexOpnd, uint32 shift, bool isSigned = false) const; + MemOperand *CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, const MIRSymbol &sym) const; + + /* if offset < 0, allocation; otherwise, deallocation */ + MemOperand &CreateCallFrameOperand(int32 offset, uint32 size) const; + + void AppendCall(const MIRSymbol &funcSymbol); + Insn &AppendCall(const MIRSymbol &sym, ListOperand &srcOpnds); + + static constexpr uint32 kDwarfFpRegBegin = 64; + static constexpr int32 kBitLenOfShift64Bits = 6; /* for 64 bits register, shift amount is 0~63, use 6 bits to store */ + static constexpr int32 kBitLenOfShift32Bits = 5; /* for 32 bits register, shift amount is 0~31, use 5 bits to store */ + static constexpr int32 kHighestBitOf64Bits = 63; /* 63 is highest bit of a 64 bits number */ + static constexpr int32 kHighestBitOf32Bits = 31; /* 31 is highest bit of a 32 bits number */ + static constexpr int32 k16ValidBit = 16; + + /* CFI directives related stuffs */ + Operand &CreateCfiRegOperand(uint32 reg, uint32 size) override { + /* + * DWARF for ARM Architecture (ARM IHI 0040B) 3.1 Table 1 + * Having kRinvalid=0 (see arm32_isa.h) means + * each register gets assigned an id number one greater than + * its physical number + */ + if (reg < V0) { + return *memPool->New((reg - R0), size); + } else { + return *memPool->New((reg - V0) + kDwarfFpRegBegin, size); + } + } + + void SetCatchRegno(regno_t regNO) { + uCatch.regNOCatch = regNO; + } + + regno_t GetCatchRegno() const { + return uCatch.regNOCatch; + } + + void SetCatchOpnd(Operand &opnd) { + uCatch.opndCatch = &opnd; + } + + AArch64reg GetReturnRegisterNumber(); + + MOperator PickStInsn(uint32 bitSize, PrimType primType, + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone) const; + MOperator PickLdInsn(uint32 bitSize, PrimType primType, + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone) const; + MOperator PickExtInsn(PrimType dtype, PrimType stype) const; + + bool CheckIfSplitOffsetWithAdd(const MemOperand &memOpnd, uint32 bitLen) const; + RegOperand *GetBaseRegForSplit(uint32 baseRegNum); + + MemOperand &ConstraintOffsetToSafeRegion(uint32 bitLen, const MemOperand &memOpnd, const MIRSymbol *symbol); + MemOperand &SplitOffsetWithAddInstruction(const MemOperand &memOpnd, uint32 bitLen, + uint32 baseRegNum = AArch64reg::kRinvalid, bool isDest = false, + Insn *insn = nullptr, bool forPair = false); + ImmOperand &SplitAndGetRemained(const MemOperand &memOpnd, uint32 bitLen, int64 ofstVal, bool forPair = false); + MemOperand &CreateReplacementMemOperand(uint32 bitLen, RegOperand &baseReg, int64 offset); + + bool OpndHasStackLoadStore(Insn &insn, Operand &opnd); + bool HasStackLoadStore(); + + MemOperand &LoadStructCopyBase(const MIRSymbol &symbol, int64 offset, int dataSize); + + int32 GetSplitBaseOffset() const { + return splitStpldpBaseOffset; + } + void SetSplitBaseOffset(int32 val) { + splitStpldpBaseOffset = val; + } + + Insn &CreateCfiRestoreInsn(uint32 reg, uint32 size) { + return GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_restore).AddOpndChain(CreateCfiRegOperand(reg, size)); + } + + Insn &CreateCfiOffsetInsn(uint32 reg, int64 val, uint32 size) { + return GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_offset). + AddOpndChain(CreateCfiRegOperand(reg, size)). + AddOpndChain(CreateCfiImmOperand(val, size)); + } + Insn &CreateCfiDefCfaInsn(uint32 reg, int64 val, uint32 size) { + return GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_def_cfa). + AddOpndChain(CreateCfiRegOperand(reg, size)). + AddOpndChain(CreateCfiImmOperand(val, size)); + } + + InsnVisitor *NewInsnModifier() override { + return memPool->New(*this); + } + + RegType GetRegisterType(regno_t reg) const override; + + uint32 MaxCondBranchDistance() override { + return AArch64Abi::kMaxInstrForCondBr; + } + + void InsertJumpPad(Insn *insn) override; + + MapleVector &GetProEpilogSavedRegs() { + return proEpilogSavedRegs; + } + + uint32 GetDefaultAlignPow() const { + return alignPow; + } + + LmbcArgInfo *GetLmbcArgInfo() { + return lmbcArgInfo; + } + + void SetLmbcArgInfo(LmbcArgInfo *p) { + lmbcArgInfo = p; + } + + void SetLmbcArgInfo(RegOperand *reg, PrimType pTy, int32 ofst, int32 regs) const { + (void)GetLmbcCallArgs().emplace_back(reg); + (void)GetLmbcCallArgTypes().emplace_back(pTy); + (void)GetLmbcCallArgOffsets().emplace_back(ofst); + (void)GetLmbcCallArgNumOfRegs().emplace_back(regs); + } + + void ResetLmbcArgInfo() const { + GetLmbcCallArgs().clear(); + GetLmbcCallArgTypes().clear(); + GetLmbcCallArgOffsets().clear(); + GetLmbcCallArgNumOfRegs().clear(); + } + + MapleVector &GetLmbcCallArgs() const { + return lmbcArgInfo->lmbcCallArgs; + } + + MapleVector &GetLmbcCallArgTypes() const { + return lmbcArgInfo->lmbcCallArgTypes; + } + + MapleVector &GetLmbcCallArgOffsets() const { + return lmbcArgInfo->lmbcCallArgOffsets; + } + + MapleVector &GetLmbcCallArgNumOfRegs() const { + return lmbcArgInfo->lmbcCallArgNumOfRegs; + } + + int32 GetLmbcTotalStkUsed() const { + return lmbcArgInfo->lmbcTotalStkUsed; + } + + void SetLmbcTotalStkUsed(int32 offset) const { + lmbcArgInfo->lmbcTotalStkUsed = offset; + } + + void SetLmbcCallReturnType(MIRType *ty) { + lmbcCallReturnType = ty; + } + + MIRType *GetLmbcCallReturnType() { + return lmbcCallReturnType; + } + + bool IsSPOrFP(const RegOperand &opnd) const override; + bool IsReturnReg(const RegOperand &opnd) const override; + bool IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &cgBeCommon) const override; + + RegOperand &GetZeroOpnd(uint32 bitLen) override; + + private: + enum RelationOperator : uint8 { + kAND, + kIOR, + kEOR + }; + + enum RelationOperatorOpndPattern : uint8 { + kRegReg, + kRegImm + }; + + enum RoundType : uint8 { + kCeil, + kFloor, + kRound + }; + + static constexpr int32 kMaxMovkLslEntries = 8; + using MovkLslOperandArray = std::array; + + MapleVector calleeSavedRegs; + MapleVector proEpilogSavedRegs; + uint32 refCount = 0; /* Ref count number. 0 if function don't have "bl MCC_InitializeLocalStackRef" */ + int32 beginOffset = 0; /* Begin offset based x29. */ + Insn *yieldPointInsn = nullptr; /* The insn of yield point at the entry of the func. */ + IntrinsiccallNode *cleanEANode = nullptr; + + MapleUnorderedMap phyRegOperandTable; /* machine register operand table */ + MapleUnorderedMap hashLabelOpndTable; + MapleUnorderedMap hashOfstOpndTable; + MapleUnorderedMap hashMemOpndTable; + /* + * Local variables, formal parameters that are passed via registers + * need offset adjustment after callee-saved registers are known. + */ + MapleUnorderedMap memOpndsRequiringOffsetAdjustment; + MapleUnorderedMap memOpndsForStkPassedArguments; + MapleUnorderedMap immOpndsRequiringOffsetAdjustment; + MapleUnorderedMap immOpndsRequiringOffsetAdjustmentForRefloc; + union { + regno_t regNOCatch; /* For O2. */ + Operand *opndCatch; /* For O0-O1. */ + } uCatch; + enum FpParamState { + kNotFp, + kFp32Bit, + kFp64Bit, + kStateUnknown, + }; + Operand *rcc = nullptr; + RegOperand *vary = nullptr; + RegOperand *fsp = nullptr; /* used to point the address of local variables and formal parameters */ + + static CondOperand ccOperands[kCcLast]; + static MovkLslOperandArray movkLslOperands; + uint32 numIntregToCalleeSave = 0; + uint32 numFpregToCalleeSave = 0; + bool fplrAddedToCalleeSaved = false; + bool isIntrnCallForC = false; + bool usedStpSubPairToAllocateCallFrame = false; + int32 splitStpldpBaseOffset = 0; + regno_t methodHandleVreg = -1; + uint32 alignPow = 5; /* function align pow defaults to 5 i.e. 2^5 */ + LmbcArgInfo *lmbcArgInfo = nullptr; + MIRType *lmbcCallReturnType = nullptr; + + void SelectLoadAcquire(Operand &dest, PrimType dtype, Operand &src, PrimType stype, + AArch64isa::MemoryOrdering memOrd, bool isDirect); + void SelectStoreRelease(Operand &dest, PrimType dtype, Operand &src, PrimType stype, + AArch64isa::MemoryOrdering memOrd, bool isDirect); + MOperator PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned) const; + bool IsFrameReg(const RegOperand &opnd) const override; + + PrimType GetOperandTy(bool isIntty, uint32 dsize, bool isSigned) const { + ASSERT(!isSigned || isIntty, ""); + return (isIntty ? ((dsize == k64BitSize) ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)) + : ((dsize == k64BitSize) ? PTY_f64 : PTY_f32)); + } + + RegOperand &LoadIntoRegister(Operand &o, bool isIntty, uint32 dsize, bool asSigned = false) { + PrimType pTy; + if (o.GetKind() == Operand::kOpdRegister && static_cast(o).GetRegisterType() == kRegTyFloat) { + // f128 is a vector placeholder, no use for now + pTy = dsize == k32BitSize ? PTY_f32 : (dsize == k64BitSize ? PTY_f64 : PTY_f128); + } else { + pTy = GetOperandTy(isIntty, dsize, asSigned); + } + return LoadIntoRegister(o, pTy); + } + + RegOperand &LoadIntoRegister(Operand &o, PrimType oty) { + return (o.IsRegister() ? static_cast(o) : SelectCopy(o, oty, oty)); + } + + RegOperand &LoadIntoRegister(Operand &o, PrimType dty, PrimType sty) { + return (o.IsRegister() ? static_cast(o) : SelectCopy(o, sty, dty)); + } + + void CreateCallStructParamPassByStack(int32 symSize, const MIRSymbol *sym, RegOperand *addrOpnd, int32 baseOffset); + RegOperand *SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, const CCLocInfo &ploc, + int32 offset, uint32 parmNum); + void CreateCallStructParamPassByReg(regno_t regno, MemOperand &memOpnd, ListOperand &srcOpnds, + FpParamState state); + void CreateCallStructParamMemcpy(const MIRSymbol &sym, uint32 structSize, int32 copyOffset, int32 fromOffset); + void CreateCallStructParamMemcpy(RegOperand &addropnd, uint32 structSize, int32 copyOffset, int32 fromOffset); + RegOperand *CreateCallStructParamCopyToStack(uint32 numMemOp, const MIRSymbol *sym, RegOperand *addrOpd, + int32 copyOffset, int32 fromOffset, const CCLocInfo &ploc); + RegOperand *LoadIreadAddrForSamllAgg(BaseNode &iread); + void SelectParmListDreadSmallAggregate(const MIRSymbol &sym, MIRType &structType, + ListOperand &srcOpnds, + int32 offset, AArch64CallConvImpl &parmLocator, FieldID fieldID); + void SelectParmListIreadSmallAggregate(BaseNode &iread, MIRType &structType, ListOperand &srcOpnds, + int32 offset, AArch64CallConvImpl &parmLocator); + void SelectParmListDreadLargeAggregate(const MIRSymbol &sym, MIRType &structType, + ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, int32 fromOffset); + void SelectParmListIreadLargeAggregate(const IreadNode &iread, MIRType &structType, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, int32 fromOffset); + void CreateCallStructMemcpyToParamReg(MIRType &structType, int32 structCopyOffset, AArch64CallConvImpl &parmLocator, + ListOperand &srcOpnds); + void GenAggParmForDread(const BaseNode &parent, ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, + int32 &structCopyOffset, size_t argNo); + void GenAggParmForIread(const BaseNode &parent, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo); + void GenAggParmForIreadoff(BaseNode &parent, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo); + void GenAggParmForIreadfpoff(BaseNode &parent, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo); + void SelectParmListForAggregate(BaseNode &parent, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo); + size_t SelectParmListGetStructReturnSize(StmtNode &naryNode); + bool MarkParmListCall(BaseNode &expr); + void GenLargeStructCopyForDread(BaseNode &argExpr, int32 &structCopyOffset); + void GenLargeStructCopyForIread(BaseNode &argExpr, int32 &structCopyOffset); + void GenLargeStructCopyForIreadfpoff(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); + void GenLargeStructCopyForIreadoff(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); + void SelectParmListPreprocessLargeStruct(BaseNode &parent, BaseNode &argExpr, int32 &structCopyOffset, size_t argNo); + void SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::set &specialArgs); + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative = false); + Operand *SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue); + void SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, + std::vector &stackPostion); + void SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned, bool is64Bits); + void SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType, PrimType toType); + void SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType); + void SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype); + void SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); + Operand *SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0, + Operand &opnd1, const BaseNode &parent); + void SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0, Operand &opnd1, + PrimType primType); + MOperator SelectRelationMop(RelationOperator operatorCode, RelationOperatorOpndPattern opndPattern, bool is64Bits, + bool isBitmaskImmediate, bool isBitNumLessThan16) const; + Operand *SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + Operand *SelectRoundLibCall(RoundType roundType, const TypeCvtNode &node, Operand &opnd0); + Operand *SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0, const BaseNode &parent); + Operand *SelectAArch64ffs(Operand &argOpnd, PrimType argType); + Operand *SelectAArch64align(const IntrinsicopNode &intrnNode, bool isUp /* false for align down */); + int64 GetOrCreatSpillRegLocation(regno_t vrNum) { + AArch64SymbolAlloc *symLoc = static_cast(GetMemlayout()->GetLocOfSpillRegister(vrNum)); + return static_cast(GetBaseOffset(*symLoc)); + } + void SelectCopyMemOpnd(Operand &dest, PrimType dtype, uint32 dsize, Operand &src, PrimType stype); + void SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::OperandType opndType, uint32 dsize, Operand &src, + PrimType stype); + bool GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, PrimType primType, + LabelOperand &targetOpnd, Operand &opnd0); + void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccallNode); + void SelectCAtomicStore(const IntrinsiccallNode &intrinsiccall); + void SelectCAtomicLoad(const IntrinsiccallNode &intrinsiccall); + void SelectCSyncLockRelease(const IntrinsiccallNode &intrinsiccall, PrimType primType); + void SelectAtomicStore(Operand &srcOpnd, Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder); + void SelectAddrofThreadLocal(Operand &result, StImmOperand &stImm); + void SelectCTlsLocalDesc(Operand &result, StImmOperand &stImm); + void SelectCTlsGlobalDesc(Operand &result, StImmOperand &stImm); + void SelectMPLClinitCheck(const IntrinsiccallNode &intrnNode); + void SelectMPLProfCounterInc(const IntrinsiccallNode &intrnNode); + void SelectArithmeticAndLogical(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, Opcode op); + + Operand *SelectAArch64CAtomicFetch(const IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore); + Operand *SelectAArch64CSyncFetch(const IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore); + /* Helper functions for translating complex Maple IR instructions/inrinsics */ + void SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opnd0); + LabelIdx CreateLabeledBB(StmtNode &stmt); + void SaveReturnValueInLocal(CallReturnVector &retVals, size_t index, PrimType primType, Operand &value, + StmtNode &parentStmt); + /* Translation for load-link store-conditional, and atomic RMW operations. */ + MemOrd OperandToMemOrd(Operand &opnd) const; + MOperator PickLoadStoreExclInsn(uint32 byteP2Size, bool store, bool acqRel) const; + RegOperand *SelectLoadExcl(PrimType valPrimType, MemOperand &loc, bool acquire); + RegOperand *SelectStoreExcl(PrimType valPty, MemOperand &loc, RegOperand &newVal, bool release); + + MemOperand *GetPseudoRegisterSpillMemoryOperand(PregIdx i) override; + void ProcessLazyBinding() override; + bool CanLazyBinding(const Insn &ldrInsn) const; + void ConvertAdrpl12LdrToLdr(); + void ConvertAdrpLdrToIntrisic(); + bool IsStoreMop(MOperator mOp) const; + bool IsImmediateValueInRange(MOperator mOp, int64 immVal, bool is64Bits, + bool isIntactIndexed, bool isPostIndexed, bool isPreIndexed) const; + Insn &GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds); + Insn &GenerateLocalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds); + bool IsDuplicateAsmList(const MIRSymbol &sym) const; + RegOperand *CheckStringIsCompressed(BB &bb, RegOperand &str, int32 countOffset, PrimType countPty, + LabelIdx jumpLabIdx); + RegOperand *CheckStringLengthLessThanEight(BB &bb, RegOperand &countOpnd, PrimType countPty, LabelIdx jumpLabIdx); + void GenerateIntrnInsnForStrIndexOf(BB &bb, RegOperand &srcString, RegOperand &patternString, + RegOperand &srcCountOpnd, RegOperand &patternLengthOpnd, + PrimType countPty, LabelIdx jumpLabIdx); + MemOperand *CheckAndCreateExtendMemOpnd(PrimType ptype, const BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd); + MemOperand &CreateNonExtendMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset); + std::string GenerateMemOpndVerbose(const Operand &src) const; + RegOperand *PrepareMemcpyParamOpnd(bool isLo12, const MIRSymbol &symbol, int64 offsetVal, RegOperand &baseReg); + RegOperand *PrepareMemcpyParamOpnd(int64 offset, Operand &exprOpnd); + RegOperand *PrepareMemcpyParamOpnd(uint64 copySize); + Insn *AggtStrLdrInsert(bool bothUnion, Insn *lastStrLdr, Insn &newStrLdr); + MemOperand &CreateMemOpndForStatic(const MIRSymbol &symbol, int64 offset, uint32 size, bool needLow12, + RegOperand *regOp); + LabelIdx GetLabelInInsn(Insn &insn) override { + return static_cast(insn.GetOperand(AArch64isa::GetJumpTargetIdx(insn))).GetLabelIndex(); + } +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CGFUNC_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_color_ra.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_color_ra.h new file mode 100644 index 0000000000000000000000000000000000000000..2c9457547e892563e512c15b2e2cc5c8c0f83903 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_color_ra.h @@ -0,0 +1,1447 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_COLOR_RA_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_COLOR_RA_H + +#include "reg_alloc.h" +#include "operand.h" +#include "cgfunc.h" +#include "loop.h" +#include "cg_dominance.h" +#include "cg_pre.h" +#include "rematerialize.h" + +namespace maplebe { +#define RESERVED_REGS + +#define USE_LRA +#define USE_SPLIT +#undef USE_BB_FREQUENCY +#define OPTIMIZE_FOR_PROLOG +#define REUSE_SPILLMEM +#undef COLOR_SPLIT +#define MOVE_COALESCE + +/* for robust test */ +#undef CONSISTENT_MEMOPND +#undef RANDOM_PRIORITY + +constexpr uint32 kU64 = sizeof(uint64) * CHAR_BIT; + +template > +inline bool FindNotIn(const std::set &set, const T &item) { + return set.find(item) == set.end(); +} + +template > +inline bool FindNotIn(const std::unordered_set &set, const T &item) { + return set.find(item) == set.end(); +} + +template +inline bool FindNotIn(const MapleSet &set, const T &item) { + return set.find(item) == set.end(); +} + +template +inline bool FindNotIn(const MapleUnorderedSet &set, const T &item) { + return set.find(item) == set.end(); +} + +template +inline bool FindNotIn(const MapleList &list, const T &item) { + return std::find(list.begin(), list.end(), item) == list.end(); +} + +template > +inline bool FindIn(const std::set &set, const T &item) { + return set.find(item) != set.end(); +} + +template > +inline bool FindIn(const std::unordered_set &set, const T &item) { + return set.find(item) != set.end(); +} + +template +inline bool FindIn(const MapleSet &set, const T &item) { + return set.find(item) != set.end(); +} + +template +inline bool FindIn(const MapleUnorderedSet &set, const T &item) { + return set.find(item) != set.end(); +} + +template +inline bool FindIn(const MapleList &list, const T &item) { + return std::find(list.begin(), list.end(), item) != list.end(); +} + +inline bool IsBitArrElemSet(const uint64 *vec, const uint32 num) { + size_t index = num / kU64; + uint64 bit = num % kU64; + return vec[index] & (1ULL << bit); +} + +inline bool IsBBsetOverlap(const uint64 *vec1, const uint64 *vec2, uint32 bbBuckets) { + for (uint32 i = 0; i < bbBuckets; ++i) { + if ((vec1[i] & vec2[i]) != 0) { + return true; + } + } + return false; +} + +/* For each bb, record info pertain to allocation */ +/* + * This is per bb per LR. + * LU info is particular to a bb in a LR. + */ +class LiveUnit { + public: + LiveUnit() = default; + ~LiveUnit() = default; + + void PrintLiveUnit() const; + + uint32 GetBegin() const { + return begin; + } + + void SetBegin(uint32 val) { + begin = val; + } + + uint32 GetEnd() const { + return end; + } + + void SetEnd(uint32 endVal) { + this->end = endVal; + } + + bool HasCall() const { + return hasCall; + } + + void SetHasCall(bool hasCallVal) { + this->hasCall = hasCallVal; + } + + uint32 GetDefNum() const { + return defNum; + } + + void SetDefNum(uint32 defNumVal) { + this->defNum = defNumVal; + } + + void IncDefNum() { + ++defNum; + } + + uint32 GetUseNum() const { + return useNum; + } + + void SetUseNum(uint32 useNumVal) { + this->useNum = useNumVal; + } + + void IncUseNum() { + ++useNum; + } + + bool NeedReload() const { + return needReload; + } + + void SetNeedReload(bool needReloadVal) { + this->needReload = needReloadVal; + } + + bool NeedRestore() const { + return needRestore; + } + + void SetNeedRestore(bool needRestoreVal) { + this->needRestore = needRestoreVal; + } + + private: + uint32 begin = 0; /* first encounter in bb */ + uint32 end = 0; /* last encounter in bb */ + bool hasCall = false; /* bb has a call */ + uint32 defNum = 0; + uint32 useNum = 0; /* used for priority calculation */ + bool needReload = false; + bool needRestore = false; +}; + +struct SortedBBCmpFunc { + bool operator()(const BB *lhs, const BB *rhs) const { + return (lhs->GetLevel() < rhs->GetLevel()); + } +}; + +enum RefType : uint8 { + kIsUse = 0x1, + kIsDef = 0x2, + kIsCall = 0x4, +}; + +/* LR is for each global vreg. */ +class LiveRange { + public: + explicit LiveRange(uint32 maxRegNum, MapleAllocator &allocator) + : lrAlloca(&allocator), + pregveto(maxRegNum, false, allocator.Adapter()), + callDef(maxRegNum, false, allocator.Adapter()), + forbidden(maxRegNum, false, allocator.Adapter()), + prefs(allocator.Adapter()), + refMap(allocator.Adapter()), + luMap(allocator.Adapter()) {} + + ~LiveRange() = default; + + regno_t GetRegNO() const { + return regNO; + } + + void SetRegNO(regno_t val) { + regNO = val; + } + + uint32 GetID() const { + return id; + } + + void SetID(uint32 idVal) { + this->id = idVal; + } + + regno_t GetAssignedRegNO() const { + return assignedRegNO; + } + + void SetAssignedRegNO(regno_t regno) { + assignedRegNO = regno; + } + + uint32 GetNumCall() const { + return numCall; + } + + void SetNumCall(uint32 num) { + numCall = num; + } + + void IncNumCall() { + ++numCall; + } + + RegType GetRegType() const { + return regType; + } + + void SetRegType(RegType regTy) { + this->regType = regTy; + } + + float GetPriority() const { + return priority; + } + + void SetPriority(float priorityVal) { + this->priority = priorityVal; + } + + bool IsMustAssigned() const { + return mustAssigned; + } + + void SetMustAssigned() { + mustAssigned = true; + } + + void SetBBBuckets(uint32 bucketNum) { + bbBuckets = bucketNum; + } + + void SetRegBuckets(uint32 bucketNum) { + regBuckets = bucketNum; + } + + uint32 GetNumBBMembers() const { + return numBBMembers; + } + + void IncNumBBMembers() { + ++numBBMembers; + } + + void DecNumBBMembers() { + --numBBMembers; + } + + void InitBBMember(MemPool &memPool, size_t size) { + bbMember = memPool.NewArray(size); + errno_t ret = memset_s(bbMember, size * sizeof(uint64), 0, size * sizeof(uint64)); + CHECK_FATAL(ret == EOK, "call memset_s failed"); + } + + uint64 *GetBBMember() { + return bbMember; + } + + const uint64 *GetBBMember() const { + return bbMember; + } + + uint64 GetBBMemberElem(int32 index) const { + return bbMember[index]; + } + + void SetBBMemberElem(int32 index, uint64 elem) const { + bbMember[index] = elem; + } + + void SetMemberBitArrElem(uint32 bbID) { + uint32 index = bbID / kU64; + uint64 bit = bbID % kU64; + uint64 mask = 1ULL << bit; + if ((GetBBMemberElem(index) & mask) == 0) { + IncNumBBMembers(); + SetBBMemberElem(index, GetBBMemberElem(index) | mask); + } + } + + void UnsetMemberBitArrElem(uint32 bbID) { + uint32 index = bbID / kU64; + uint64 bit = bbID % kU64; + uint64 mask = 1ULL << bit; + if ((GetBBMemberElem(index) & mask) != 0) { + DecNumBBMembers(); + SetBBMemberElem(index, GetBBMemberElem(index) & (~mask)); + } + } + + void SetConflictBitArrElem(regno_t regno) { + uint32 index = regno / kU64; + uint64 bit = regno % kU64; + uint64 mask = 1ULL << bit; + if ((GetBBConflictElem(index) & mask) == 0) { + IncNumBBConflicts(); + SetBBConflictElem(index, GetBBConflictElem(index) | mask); + } + } + + void UnsetConflictBitArrElem(regno_t regno) { + uint32 index = regno / kU64; + uint64 bit = regno % kU64; + uint64 mask = 1ULL << bit; + if ((GetBBConflictElem(index) & mask) != 0) { + DecNumBBConflicts(); + SetBBConflictElem(index, GetBBConflictElem(index) & (~mask)); + } + } + + void InitPregveto() { + pregveto.assign(pregveto.size(), false); + callDef.assign(callDef.size(), false); + } + + bool GetPregveto(regno_t regno) const { + return pregveto[regno]; + } + + size_t GetPregvetoSize() const { + return numPregveto; + } + + void InsertElemToPregveto(regno_t regno) { + if (!pregveto[regno]) { + pregveto[regno] = true; + ++numPregveto; + } + } + + bool GetCallDef(regno_t regno) const { + return callDef[regno]; + } + + void InsertElemToCallDef(regno_t regno) { + if (!callDef[regno]) { + callDef[regno] = true; + ++numCallDef; + } + } + + void SetCrossCall() { + crossCall = true; + } + + bool GetCrossCall() const { + return crossCall; + } + + void InitForbidden() { + forbidden.assign(forbidden.size(), false); + } + + const MapleVector &GetForbidden() const { + return forbidden; + } + + bool GetForbidden(regno_t regno) const { + return forbidden[regno]; + } + + size_t GetForbiddenSize() const { + return numForbidden; + } + + void InsertElemToForbidden(regno_t regno) { + if (!forbidden[regno]) { + forbidden[regno] = true; + ++numForbidden; + } + } + + void EraseElemFromForbidden(regno_t regno) { + if (forbidden[regno]) { + forbidden[regno] = false; + --numForbidden; + } + } + + void ClearForbidden() { + forbidden.clear(); + } + + uint32 GetNumBBConflicts() const { + return numBBConflicts; + } + + void IncNumBBConflicts() { + ++numBBConflicts; + } + + void DecNumBBConflicts() { + --numBBConflicts; + } + + void InitBBConflict(MemPool &memPool, size_t size) { + bbConflict = memPool.NewArray(size); + errno_t ret = memset_s(bbConflict, size * sizeof(uint64), 0, size * sizeof(uint64)); + CHECK_FATAL(ret == EOK, "call memset_s failed"); + } + + const uint64 *GetBBConflict() const { + return bbConflict; + } + + uint64 GetBBConflictElem(int32 index) const { + ASSERT(index < regBuckets, "out of bbConflict"); + return bbConflict[index]; + } + + void SetBBConflictElem(int32 index, uint64 elem) const { + ASSERT(index < regBuckets, "out of bbConflict"); + bbConflict[index] = elem; + } + + void SetOldConflict(uint64 *conflict) { + oldConflict = conflict; + } + + const uint64 *GetOldConflict() const { + return oldConflict; + } + + const MapleSet &GetPrefs() const { + return prefs; + } + + void InsertElemToPrefs(regno_t regno) { + (void)prefs.insert(regno); + } + + const MapleMap*> GetRefs() const { + return refMap; + } + + const MapleMap GetRefs(uint32 bbId) const { + return *(refMap.find(bbId)->second); + } + + void AddRef(uint32 bbId, uint32 pos, uint32 mark) { + if (refMap.find(bbId) == refMap.end()) { + auto point = lrAlloca->New>(lrAlloca->Adapter()); + (void)point->emplace(std::pair(pos, mark)); + (void)refMap.emplace(std::pair*>(bbId, point)); + } else { + auto &bbPoint = (refMap.find(bbId))->second; + if (bbPoint->find(pos) == bbPoint->end()) { + (void)bbPoint->emplace(std::pair(pos, mark)); + } else { + auto posVal = bbPoint->find(pos)->second; + (void)bbPoint->erase(bbPoint->find(pos)); + (void)bbPoint->emplace(std::pair(pos, posVal | mark)); + } + } + } + + const MapleMap &GetLuMap() const { + return luMap; + } + + MapleMap::iterator FindInLuMap(uint32 index) { + return luMap.find(index); + } + + MapleMap::iterator EndOfLuMap() { + return luMap.end(); + } + + MapleMap::const_iterator EraseLuMap(MapleMap::const_iterator it) { + return luMap.erase(it); + } + + void SetElemToLuMap(uint32 key, LiveUnit &value) { + luMap[key] = &value; + } + + LiveUnit *GetLiveUnitFromLuMap(uint32 key) { + return luMap[key]; + } + + const LiveUnit *GetLiveUnitFromLuMap(uint32 key) const { + auto it = luMap.find(key); + ASSERT(it != luMap.end(), "can't find live unit"); + return it->second; + } + + const LiveRange *GetSplitLr() const { + return splitLr; + } + + void SetSplitLr(LiveRange &lr) { + splitLr = &lr; + } + +#ifdef OPTIMIZE_FOR_PROLOG + uint32 GetNumDefs() const { + return numDefs; + } + + void IncNumDefs() { + ++numDefs; + } + + void SetNumDefs(uint32 val) { + numDefs = val; + } + + uint32 GetNumUses() const { + return numUses; + } + + void IncNumUses() { + ++numUses; + } + + void SetNumUses(uint32 val) { + numUses = val; + } + + uint32 GetFrequency() const { + return frequency; + } + + void SetFrequency(uint32 frequencyVal) { + this->frequency = frequencyVal; + } +#endif /* OPTIMIZE_FOR_PROLOG */ + + MemOperand *GetSpillMem() { + return spillMem; + } + + const MemOperand *GetSpillMem() const { + return spillMem; + } + + void SetSpillMem(MemOperand& memOpnd) { + spillMem = &memOpnd; + } + + regno_t GetSpillReg() const { + return spillReg; + } + + void SetSpillReg(regno_t spillRegister) { + this->spillReg = spillRegister; + } + + uint32 GetSpillSize() const { + return spillSize; + } + + void SetSpillSize(uint32 size) { + spillSize = size; + } + + bool IsSpilled() const { + return spilled; + } + + void SetSpilled(bool spill) { + spilled = spill; + } + + bool HasDefUse() const { + return hasDefUse; + } + + void SetDefUse() { + hasDefUse = true; + } + + bool GetProcessed() const { + return proccessed; + } + + void SetProcessed() { + proccessed = true; + } + + bool IsNonLocal() const { + return isNonLocal; + } + + void SetIsNonLocal(bool isNonLocalVal) { + this->isNonLocal = isNonLocalVal; + } + + void SetRematLevel(RematLevel val) { + rematerializer->SetRematLevel(val); + } + + RematLevel GetRematLevel() const { + return rematerializer->GetRematLevel(); + } + + Opcode GetOp() const { + return rematerializer->GetOp(); + } + + void SetRematerializable(const MIRConst *c) { + rematerializer->SetRematerializable(c); + } + + void SetRematerializable(Opcode opcode, const MIRSymbol *symbol, FieldID fieldId, bool addrUp) { + rematerializer->SetRematerializable(opcode, symbol, fieldId, addrUp); + } + + void CopyRematerialization(const LiveRange &lr) { + *rematerializer = *lr.GetRematerializer(); + } + + bool GetIsSpSave() const { + return isSpSave; + } + + void SetIsSpSave() { + isSpSave = true; + } + + bool IsRematerializable(CGFunc &cgFunc, RematLevel rematLev) const { + return rematerializer->IsRematerializable(cgFunc, rematLev, *this); + } + std::vector Rematerialize(CGFunc &cgFunc, RegOperand ®Op) { + return rematerializer->Rematerialize(cgFunc, regOp, *this); + } + + void SetRematerializer(Rematerializer *remat) { + rematerializer = remat; + } + + Rematerializer *GetRematerializer() const { + return rematerializer; + } + + private: + MapleAllocator *lrAlloca; + regno_t regNO = 0; + uint32 id = 0; /* for priority tie breaker */ + regno_t assignedRegNO = 0; /* color assigned */ + uint32 numCall = 0; + RegType regType = kRegTyUndef; + float priority = 0.0; + bool mustAssigned = false; + uint32 bbBuckets = 0; /* size of bit array for bb (each bucket == 64 bits) */ + uint32 regBuckets = 0; /* size of bit array for reg (each bucket == 64 bits) */ + uint32 numBBMembers = 0; /* number of bits set in bbMember */ + uint64 *bbMember = nullptr; /* Same as smember, but use bit array */ + + MapleBitVector pregveto; /* pregs cannot be assigned -- SplitLr may clear forbidden */ + MapleBitVector callDef; /* pregs cannot be assigned -- SplitLr may clear forbidden */ + MapleBitVector forbidden; /* pregs cannot be assigned */ + uint32 numPregveto = 0; + uint32 numCallDef = 0; + uint32 numForbidden = 0; + bool crossCall = false; + + uint32 numBBConflicts = 0; /* number of bits set in bbConflict */ + uint64 *bbConflict = nullptr; /* vreg interference from graph neighbors (bit) */ + uint64 *oldConflict = nullptr; + MapleSet prefs; /* pregs that prefer */ + MapleMap*> refMap; + MapleMap luMap; /* info for each bb */ + LiveRange *splitLr = nullptr; /* The 1st part of the split */ +#ifdef OPTIMIZE_FOR_PROLOG + uint32 numDefs = 0; + uint32 numUses = 0; + uint32 frequency = 0; +#endif /* OPTIMIZE_FOR_PROLOG */ + MemOperand *spillMem = nullptr; /* memory operand used for spill, if any */ + regno_t spillReg = 0; /* register operand for spill at current point */ + uint32 spillSize = 0; /* 32 or 64 bit spill */ + bool spilled = false; /* color assigned */ + bool hasDefUse = false; /* has regDS */ + bool proccessed = false; + bool isNonLocal = false; + bool isSpSave = false; /* contain SP in case of alloca */ + Rematerializer *rematerializer = nullptr; +}; + +/* One per bb, to communicate local usage to global RA */ +class LocalRaInfo { + public: + explicit LocalRaInfo(MapleAllocator &allocator) + : defCnt(allocator.Adapter()), + useCnt(allocator.Adapter()) {} + + ~LocalRaInfo() = default; + + const MapleMap &GetDefCnt() const { + return defCnt; + } + + uint16 GetDefCntElem(regno_t regNO) { + return defCnt[regNO]; + } + + void SetDefCntElem(regno_t key, uint16 value) { + defCnt[key] = value; + } + + const MapleMap &GetUseCnt() const { + return useCnt; + } + + uint16 GetUseCntElem(regno_t regNO) { + return useCnt[regNO]; + } + + void SetUseCntElem(regno_t key, uint16 value) { + useCnt[key] = value; + } + + private: + MapleMap defCnt; + MapleMap useCnt; +}; + +/* For each bb, record info pertain to allocation */ +class BBAssignInfo { + public: + explicit BBAssignInfo(uint32 maxRegNum, MapleAllocator &allocator) + : globalsAssigned(maxRegNum, false, allocator.Adapter()), + regMap(allocator.Adapter()) {} + + ~BBAssignInfo() = default; + + uint32 GetLocalRegsNeeded() const { + return localRegsNeeded; + } + + void SetLocalRegsNeeded(uint32 num) { + localRegsNeeded = num; + } + + void InitGlobalAssigned() { + globalsAssigned.assign(globalsAssigned.size(), false); + } + + bool GetGlobalsAssigned(regno_t regNO) const { + return globalsAssigned[regNO]; + } + + void InsertElemToGlobalsAssigned(regno_t regNO) { + globalsAssigned[regNO] = true; + } + + void EraseElemToGlobalsAssigned(regno_t regNO) { + globalsAssigned[regNO] = false; + } + + const MapleMap &GetRegMap() const { + return regMap; + } + + bool HasRegMap(regno_t regNOKey) const { + return (regMap.find(regNOKey) != regMap.end()); + } + + regno_t GetRegMapElem(regno_t regNO) { + return regMap[regNO]; + } + + void SetRegMapElem(regno_t regNOKey, regno_t regNOValue) { + regMap[regNOKey] = regNOValue; + } + + private: + uint32 localRegsNeeded = 0; /* num local reg needs for each bb */ + MapleBitVector globalsAssigned; /* globals used in a bb */ + MapleMap regMap; /* local vreg to preg mapping */ +}; + +class FinalizeRegisterInfo { + public: + explicit FinalizeRegisterInfo(MapleAllocator &allocator) + : defOperands(allocator.Adapter()), + useOperands(allocator.Adapter()), + useDefOperands(allocator.Adapter()) {} + + ~FinalizeRegisterInfo() = default; + void ClearInfo() { + memOperandIdx = 0; + baseOperand = nullptr; + offsetOperand = nullptr; + defOperands.clear(); + useOperands.clear(); + useDefOperands.clear(); + } + + void SetBaseOperand(Operand &opnd, uint32 idx) { + baseOperand = &opnd; + memOperandIdx = idx; + } + + void SetOffsetOperand(Operand &opnd) { + offsetOperand = &opnd; + } + + void SetDefOperand(Operand &opnd, uint32 idx) { + defOperands.emplace_back(idx, &opnd); + } + + void SetUseOperand(Operand &opnd, uint32 idx) { + useOperands.emplace_back(idx, &opnd); + } + + void SetUseDefOperand(Operand &opnd, uint32 idx) { + useDefOperands.emplace_back(idx, &opnd); + } + + int32 GetMemOperandIdx() const { + return memOperandIdx; + } + + const Operand *GetBaseOperand() const { + return baseOperand; + } + + const Operand *GetOffsetOperand() const { + return offsetOperand; + } + + const MapleVector> &GetDefOperands() const { + return defOperands; + } + + const MapleVector> &GetUseOperands() const { + return useOperands; + } + + const MapleVector> &GetUseDefOperands() const { + return useDefOperands; + } + private: + uint32 memOperandIdx = 0; + Operand *baseOperand = nullptr; + Operand *offsetOperand = nullptr; + MapleVector> defOperands; + MapleVector> useOperands; + MapleVector> useDefOperands; +}; + +class LocalRegAllocator { + public: + LocalRegAllocator(CGFunc &cgFunc, MapleAllocator &allocator) + : regInfo(cgFunc.GetTargetRegInfo()), + regAssigned(allocator.Adapter()), + regSpilled(allocator.Adapter()), + regAssignmentMap(allocator.Adapter()), + pregUsed(allocator.Adapter()), + pregs(allocator.Adapter()), + useInfo(allocator.Adapter()), + defInfo(allocator.Adapter()) { + regAssigned.resize(cgFunc.GetMaxRegNum(), false); + regSpilled.resize(cgFunc.GetMaxRegNum(), false); + pregUsed.resize(regInfo->GetAllRegNum(), false); + pregs.resize(regInfo->GetAllRegNum(), false); + } + + ~LocalRegAllocator() = default; + + void ClearLocalRaInfo() { + regSpilled.assign(regSpilled.size(), false); + regAssigned.assign(regAssigned.size(), false); + regAssignmentMap.clear(); + pregUsed.assign(pregUsed.size(), false); + } + + bool IsInRegAssigned(regno_t regNO) const { + return regAssigned[regNO]; + } + + void SetRegAssigned(regno_t regNO) { + regAssigned[regNO] = true; + } + + regno_t GetRegAssignmentItem(regno_t regKey) const { + auto iter = regAssignmentMap.find(regKey); + ASSERT(iter != regAssignmentMap.end(), "error reg assignmemt"); + return iter->second; + } + + void SetRegAssignmentMap(regno_t regKey, regno_t regValue) { + regAssignmentMap[regKey] = regValue; + } + + /* only for HandleLocalRaDebug */ + const MapleBitVector &GetPregUsed() const { + return pregUsed; + } + + void SetPregUsed(regno_t regNO) { + if (!pregUsed[regNO]) { + pregUsed[regNO] = true; + ++numPregUsed; + } + } + + bool IsInRegSpilled(regno_t regNO) const { + return regSpilled[regNO]; + } + + void SetRegSpilled(regno_t regNO) { + regSpilled[regNO] = true; + } + + const MapleBitVector &GetPregs() const { + return pregs; + } + + void SetPregs(regno_t regNO) { + pregs[regNO] = true; + } + + void ClearPregs(regno_t regNO) { + pregs[regNO] = false; + } + + bool IsPregAvailable(regno_t regNO) const { + return pregs[regNO]; + } + + void InitPregs(bool hasYield, const MapleSet &intSpillRegSet, + const MapleSet &fpSpillRegSet) { + for (regno_t regNO : regInfo->GetAllRegs()) { + SetPregs(regNO); + } + for (regno_t regNO : intSpillRegSet) { + ClearPregs(regNO); + } + for (regno_t regNO : fpSpillRegSet) { + ClearPregs(regNO); + } + if (hasYield) { + ClearPregs(regInfo->GetYieldPointReg()); + } +#ifdef RESERVED_REGS + ClearPregs(regInfo->GetReservedSpillReg()); + ClearPregs(regInfo->GetSecondReservedSpillReg()); +#endif /* RESERVED_REGS */ + } + + const MapleMap &GetRegAssignmentMap() const { + return regAssignmentMap; + } + + const MapleMap &GetUseInfo() const { + return useInfo; + } + + void SetUseInfoElem(regno_t regNO, uint16 info) { + useInfo[regNO] = info; + } + + void IncUseInfoElem(regno_t regNO) { + if (useInfo.find(regNO) != useInfo.end()) { + ++useInfo[regNO]; + } + } + + uint16 GetUseInfoElem(regno_t regNO) { + return useInfo[regNO]; + } + + void ClearUseInfo() { + useInfo.clear(); + } + + const MapleMap &GetDefInfo() const { + return defInfo; + } + + void SetDefInfoElem(regno_t regNO, uint16 info) { + defInfo[regNO] = info; + } + + uint16 GetDefInfoElem(regno_t regNO) { + return defInfo[regNO]; + } + + void IncDefInfoElem(regno_t regNO) { + if (defInfo.find(regNO) != defInfo.end()) { + ++defInfo[regNO]; + } + } + + void ClearDefInfo() { + defInfo.clear(); + } + + uint32 GetNumPregUsed() const { + return numPregUsed; + } + + private: + RegisterInfo *regInfo = nullptr; + /* The following local vars keeps track of allocation information in bb. */ + MapleBitVector regAssigned; /* in this set if vreg is assigned */ + MapleBitVector regSpilled; /* on this list if vreg is spilled */ + MapleMap regAssignmentMap; /* vreg -> preg map, which preg is the vreg assigned */ + MapleBitVector pregUsed; /* pregs used in bb */ + MapleBitVector pregs; /* available regs for assignement */ + MapleMap useInfo; /* copy of local ra info for useCnt */ + MapleMap defInfo; /* copy of local ra info for defCnt */ + + uint32 numPregUsed = 0; +}; + +class SplitBBInfo { + public: + SplitBBInfo() = default; + + ~SplitBBInfo() = default; + + BB *GetCandidateBB() { + return candidateBB; + } + + const BB *GetCandidateBB() const { + return candidateBB; + } + + const BB *GetStartBB() const { + return startBB; + } + + void SetCandidateBB(BB &bb) { + candidateBB = &bb; + } + + void SetStartBB(BB &bb) { + startBB = &bb; + } + + private: + BB *candidateBB = nullptr; + BB *startBB = nullptr; +}; + +class GraphColorRegAllocator : public RegAllocator { + public: + GraphColorRegAllocator(CGFunc &cgFunc, MemPool &memPool, DomAnalysis &dom) + : RegAllocator(cgFunc, memPool), + domInfo(dom), + bbVec(alloc.Adapter()), + vregLive(alloc.Adapter()), + pregLive(alloc.Adapter()), + lrMap(alloc.Adapter()), + localRegVec(alloc.Adapter()), + bbRegInfo(alloc.Adapter()), + unconstrained(alloc.Adapter()), + unconstrainedPref(alloc.Adapter()), + constrained(alloc.Adapter()), + mustAssigned(alloc.Adapter()), +#ifdef OPTIMIZE_FOR_PROLOG + intDelayed(alloc.Adapter()), + fpDelayed(alloc.Adapter()), +#endif /* OPTIMIZE_FOR_PROLOG */ + intCallerRegSet(alloc.Adapter()), + intCalleeRegSet(alloc.Adapter()), + intSpillRegSet(alloc.Adapter()), + fpCallerRegSet(alloc.Adapter()), + fpCalleeRegSet(alloc.Adapter()), + fpSpillRegSet(alloc.Adapter()), + intCalleeUsed(alloc.Adapter()), + fpCalleeUsed(alloc.Adapter()) { + constexpr uint32 kNumInsnThreashold = 30000; + numVregs = cgFunc.GetMaxVReg(); + localRegVec.resize(cgFunc.NumBBs()); + bbRegInfo.resize(cgFunc.NumBBs()); + if (CGOptions::DoMultiPassColorRA() && cgFunc.GetMirModule().IsCModule()) { + uint32 cnt = 0; + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + ++cnt; + } + } + ASSERT(cnt <= cgFunc.GetTotalNumberOfInstructions(), "Incorrect insn count"); + if (cnt <= kNumInsnThreashold) { + doMultiPass = true; + doLRA = false; + doOptProlog = false; + } + } + } + + ~GraphColorRegAllocator() override = default; + + bool AllocateRegisters() override; + + enum SpillMemCheck : uint8 { + kSpillMemPre, + kSpillMemPost, + }; + + LiveRange *GetLiveRange(regno_t regNO) { + MapleMap::const_iterator it = lrMap.find(regNO); + if (it != lrMap.cend()) { + return it->second; + } else { + return nullptr; + } + } + LiveRange *GetLiveRange(regno_t regNO) const { + auto it = lrMap.find(regNO); + if (it != lrMap.end()) { + return it->second; + } else { + return nullptr; + } + } + const MapleMap &GetLrMap() const { + return lrMap; + } + Insn *SpillOperand(Insn &insn, const Operand &opnd, bool isDef, RegOperand &phyOpnd, bool forCall = false); + private: + struct SetLiveRangeCmpFunc { + bool operator()(const LiveRange *lhs, const LiveRange *rhs) const { + if (fabs(lhs->GetPriority() - rhs->GetPriority()) <= 1e-6) { + /* + * This is to ensure the ordering is consistent as the reg# + * differs going through VtableImpl.mpl file. + */ + if (lhs->GetID() == rhs->GetID()) { + return lhs->GetRegNO() < rhs->GetRegNO(); + } else { + return lhs->GetID() < rhs->GetID(); + } + } + return (lhs->GetPriority() > rhs->GetPriority()); + } + }; + + template + void ForEachBBArrElem(const uint64 *vec, Func functor) const; + + template + void ForEachBBArrElemWithInterrupt(const uint64 *vec, Func functor) const; + + template + void ForEachRegArrElem(const uint64 *vec, Func functor) const; + + void PrintLiveUnitMap(const LiveRange &lr) const; + void PrintLiveRangeConflicts(const LiveRange &lr) const; + void PrintLiveBBBit(const LiveRange &lr) const; + void PrintLiveRange(const LiveRange &lr, const std::string &str) const; + void PrintLiveRanges() const; + void PrintLocalRAInfo(const std::string &str) const; + void PrintBBAssignInfo() const; + void PrintBBs() const; + + void InitFreeRegPool(); + LiveRange *NewLiveRange(); + void CalculatePriority(LiveRange &lr) const; + bool CreateLiveRangeHandleLocal(regno_t regNO, const BB &bb, bool isDef); + LiveRange *CreateLiveRangeAllocateAndUpdate(regno_t regNO, const BB &bb, bool isDef, uint32 currId); + void CreateLiveRange(regno_t regNO, const BB &bb, bool isDef, uint32 currId, bool updateCount); + bool SetupLiveRangeByOpHandlePhysicalReg(const RegOperand ®Opnd, Insn &insn, regno_t regNO, bool isDef); + void SetupLiveRangeByOp(Operand &op, Insn &insn, bool isDef, uint32 &numUses); + void SetupLiveRangeByRegNO(regno_t liveOut, BB &bb, uint32 currPoint); + bool UpdateInsnCntAndSkipUseless(Insn &insn, uint32 &currPoint) const; + void UpdateCallInfo(uint32 bbId, uint32 currPoint, const Insn &insn); + void ClassifyOperand(std::unordered_set &pregs, std::unordered_set &vregs, + const Operand &opnd) const; + void SetOpndConflict(const Insn &insn, bool onlyDef); + void UpdateOpndConflict(const Insn &insn, bool multiDef); + void SetLrMustAssign(const RegOperand *regOpnd); + void SetupMustAssignedLiveRanges(const Insn &insn); + void ComputeLiveRangesForEachDefOperand(Insn &insn, bool &multiDef); + void ComputeLiveRangesForEachUseOperand(Insn &insn); + void ComputeLiveRangesUpdateIfInsnIsCall(const Insn &insn); + void ComputeLiveRangesUpdateLiveUnitInsnRange(BB &bb, uint32 currPoint); + void ComputeLiveRanges(); + MemOperand *CreateSpillMem(uint32 spillIdx, SpillMemCheck check); + bool CheckOverlap(uint64 val, uint32 i, LiveRange &lr1, LiveRange &lr2) const; + void CheckInterference(LiveRange &lr1, LiveRange &lr2) const; + void BuildInterferenceGraphSeparateIntFp(std::vector &intLrVec, std::vector &fpLrVec); + void BuildInterferenceGraph(); + void SetBBInfoGlobalAssigned(uint32 bbID, regno_t regNO); + bool HaveAvailableColor(const LiveRange &lr, uint32 num) const; + void Separate(); + void SplitAndColorForEachLr(MapleVector &targetLrVec); + void SplitAndColor(); + void ColorForOptPrologEpilog(); + bool IsLocalReg(regno_t regNO) const; + bool IsLocalReg(const LiveRange &lr) const; + void HandleLocalRaDebug(regno_t regNO, const LocalRegAllocator &localRa, bool isInt) const; + void HandleLocalRegAssignment(regno_t regNO, LocalRegAllocator &localRa, bool isInt); + void UpdateLocalRegDefUseCount(regno_t regNO, LocalRegAllocator &localRa, bool isDef) const; + void UpdateLocalRegConflict(regno_t regNO, const LocalRegAllocator &localRa); + void HandleLocalReg(Operand &op, LocalRegAllocator &localRa, const BBAssignInfo *bbInfo, bool isDef, bool isInt); + void LocalRaRegSetEraseReg(LocalRegAllocator &localRa, regno_t regNO) const; + bool LocalRaInitRegSet(LocalRegAllocator &localRa, uint32 bbId); + void LocalRaInitAllocatableRegs(LocalRegAllocator &localRa, uint32 bbId); + void LocalRaForEachDefOperand(const Insn &insn, LocalRegAllocator &localRa, const BBAssignInfo *bbInfo); + void LocalRaForEachUseOperand(const Insn &insn, LocalRegAllocator &localRa, const BBAssignInfo *bbInfo); + void LocalRaPrepareBB(BB &bb, LocalRegAllocator &localRa); + void LocalRaFinalAssignment(const LocalRegAllocator &localRa, BBAssignInfo &bbInfo); + void LocalRaDebug(const BB &bb, const LocalRegAllocator &localRa) const; + void LocalRegisterAllocator(bool doAllocate); + MemOperand *GetSpillOrReuseMem(LiveRange &lr, uint32 regSize, bool &isOutOfRange, Insn &insn, bool isDef); + void SpillOperandForSpillPre(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, uint32 spillIdx, bool needSpill); + void SpillOperandForSpillPost(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, + uint32 spillIdx, bool needSpill); + MemOperand *GetConsistentReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, uint32 size, + RegType regType); + MemOperand *GetCommonReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, uint32 size, + RegType regType); + MemOperand *GetReuseMem(uint32 vregNO, uint32 size, RegType regType); + MemOperand *GetSpillMem(uint32 vregNO, bool isDest, Insn &insn, regno_t regNO, + bool &isOutOfRange); + bool SetAvailableSpillReg(std::unordered_set &cannotUseReg, LiveRange &lr, + MapleBitVector &usedRegMask); + void CollectCannotUseReg(std::unordered_set &cannotUseReg, const LiveRange &lr, Insn &insn); + regno_t PickRegForSpill(MapleBitVector &usedRegMask, RegType regType, uint32 spillIdx, + bool &needSpillLr); + bool SetRegForSpill(LiveRange &lr, Insn &insn, uint32 spillIdx, MapleBitVector &usedRegMask, + bool isDef); + bool GetSpillReg(Insn &insn, LiveRange &lr, const uint32 &spillIdx, MapleBitVector &usedRegMask, + bool isDef); + RegOperand *GetReplaceOpndForLRA(Insn &insn, const Operand &opnd, uint32 &spillIdx, + MapleBitVector &usedRegMask, bool isDef); + RegOperand *GetReplaceUseDefOpndForLRA(Insn &insn, const Operand &opnd, uint32 &spillIdx, + MapleBitVector &usedRegMask); + bool EncountPrevRef(const BB &pred, LiveRange &lr, bool isDef, std::vector& visitedMap); + bool FoundPrevBeforeCall(Insn &insn, LiveRange &lr, bool isDef); + bool EncountNextRef(const BB &succ, LiveRange &lr, bool isDef, std::vector& visitedMap); + bool FoundNextBeforeCall(Insn &insn, LiveRange &lr, bool isDef); + bool HavePrevRefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const; + bool HaveNextDefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const; + bool NeedCallerSave(Insn &insn, LiveRange &lr, bool isDef); + RegOperand *GetReplaceOpnd(Insn &insn, const Operand &opnd, uint32 &spillIdx, + MapleBitVector &usedRegMask, bool isDef); + RegOperand *GetReplaceUseDefOpnd(Insn &insn, const Operand &opnd, uint32 &spillIdx, + MapleBitVector &usedRegMask); + void MarkCalleeSaveRegs(); + void MarkUsedRegs(Operand &opnd, MapleBitVector &usedRegMask); + bool FinalizeRegisterPreprocess(FinalizeRegisterInfo &fInfo, const Insn &insn, + MapleBitVector &usedRegMask); + void SplitVregAroundLoop(const CGFuncLoops &loop, const std::vector &lrs, + BB &headerPred, BB &exitSucc, const std::set &cands); + bool LoopNeedSplit(const CGFuncLoops &loop, std::set &cands); + bool LrGetBadReg(const LiveRange &lr) const; + void AnalysisLoopPressureAndSplit(const CGFuncLoops &loop); + void AnalysisLoop(const CGFuncLoops &loop); + void OptCallerSave(); + void FinalizeRegisters(); + void GenerateSpillFillRegs(const Insn &insn); + RegOperand *CreateSpillFillCode(const RegOperand &opnd, Insn &insn, uint32 spillCnt, bool isdef = false); + bool SpillLiveRangeForSpills(); + void FinalizeSpSaveReg(); + + MapleVector::iterator GetHighPriorityLr(MapleVector &lrSet) const; + void UpdateForbiddenForNeighbors(const LiveRange &lr) const; + void UpdatePregvetoForNeighbors(const LiveRange &lr) const; + regno_t FindColorForLr(const LiveRange &lr) const; + regno_t TryToAssignCallerSave(const LiveRange &lr) const; + bool ShouldUseCallee(LiveRange &lr, const MapleSet &calleeUsed, + const MapleVector &delayed) const; + void AddCalleeUsed(regno_t regNO, RegType regType); + bool AssignColorToLr(LiveRange &lr, bool isDelayed = false); + void PruneLrForSplit(LiveRange &lr, BB &bb, bool remove, std::set &candidateInLoop, + std::set &defInLoop); + bool UseIsUncovered(const BB &bb, const BB &startBB, std::vector &visitedBB); + void FindUseForSplit(LiveRange &lr, SplitBBInfo &bbInfo, bool &remove, + std::set &candidateInLoop, + std::set &defInLoop); + void FindBBSharedInSplit(LiveRange &lr, + const std::set &candidateInLoop, + std::set &defInLoop); + void ComputeBBForNewSplit(LiveRange &newLr, LiveRange &origLr); + void ClearLrBBFlags(const std::set &member) const; + void ComputeBBForOldSplit(LiveRange &newLr, LiveRange &origLr); + bool LrCanBeColored(const LiveRange &lr, const BB &bbAdded, std::unordered_set &conflictRegs); + void MoveLrBBInfo(LiveRange &oldLr, LiveRange &newLr, BB &bb) const; + bool ContainsLoop(const CGFuncLoops &loop, const std::set &loops) const; + void GetAllLrMemberLoops(LiveRange &lr, std::set &loops); + bool SplitLrShouldSplit(LiveRange &lr); + bool SplitLrFindCandidateLr(LiveRange &lr, LiveRange &newLr, std::unordered_set &conflictRegs); + void SplitLrHandleLoops(LiveRange &lr, LiveRange &newLr, const std::set &origLoops, + const std::set &newLoops); + void SplitLrFixNewLrCallsAndRlod(LiveRange &newLr, const std::set &origLoops); + void SplitLrFixOrigLrCalls(LiveRange &lr) const; + void SplitLrUpdateInterference(LiveRange &lr); + void SplitLrUpdateRegInfo(const LiveRange &origLr, LiveRange &newLr, + std::unordered_set &conflictRegs) const ; + void SplitLrErrorCheckAndDebug(const LiveRange &origLr) const; + void SplitLr(LiveRange &lr); + + static constexpr uint16 kMaxUint16 = 0x7fff; + + DomAnalysis &domInfo; + MapleVector bbVec; + MapleUnorderedSet vregLive; + MapleUnorderedSet pregLive; + MapleMap lrMap; + MapleVector localRegVec; /* local reg info for each bb, no local reg if null */ + MapleVector bbRegInfo; /* register assignment info for each bb */ + MapleVector unconstrained; + MapleVector unconstrainedPref; + MapleVector constrained; + MapleVector mustAssigned; +#ifdef OPTIMIZE_FOR_PROLOG + MapleVector intDelayed; + MapleVector fpDelayed; +#endif /* OPTIMIZE_FOR_PROLOG */ + MapleSet intCallerRegSet; /* integer caller saved */ + MapleSet intCalleeRegSet; /* callee */ + MapleSet intSpillRegSet; /* spill */ + MapleSet fpCallerRegSet; /* float caller saved */ + MapleSet fpCalleeRegSet; /* callee */ + MapleSet fpSpillRegSet; /* spill */ + MapleSet intCalleeUsed; + MapleSet fpCalleeUsed; + Bfs *bfs = nullptr; + + uint32 bbBuckets = 0; /* size of bit array for bb (each bucket == 64 bits) */ + uint32 regBuckets = 0; /* size of bit array for reg (each bucket == 64 bits) */ + uint32 intRegNum = 0; /* total available int preg */ + uint32 fpRegNum = 0; /* total available fp preg */ + uint32 numVregs = 0; /* number of vregs when starting */ + /* For spilling of spill register if there are none available + * Example, all 3 operands spilled + * sp_reg1 -> [spillMemOpnds[1]] + * sp_reg2 -> [spillMemOpnds[2]] + * ld sp_reg1 <- [addr-reg2] + * ld sp_reg2 <- [addr-reg3] + * reg1 <- reg2, reg3 sp_reg1 <- sp_reg1, sp_reg2 + * st sp_reg1 -> [addr-reg1] + * sp_reg1 <- [spillMemOpnds[1]] + * sp_reg2 <- [spillMemOpnds[2]] + */ + std::array spillMemOpnds = { nullptr }; + bool operandSpilled[kSpillMemOpndNum]; + bool needExtraSpillReg = false; +#ifdef USE_LRA + bool doLRA = true; +#else + bool doLRA = false; +#endif +#ifdef OPTIMIZE_FOR_PROLOG + bool doOptProlog = true; +#else + bool doOptProlog = false; +#endif + bool hasSpill = false; + bool doMultiPass = false; +}; + +class CallerSavePre : public CGPre { + public: + CallerSavePre(GraphColorRegAllocator * regAlloc, CGFunc &cgfunc, DomAnalysis &currDom, + MemPool &memPool, MemPool &mp2, PreKind kind, uint32 limit) + : CGPre(currDom, memPool, mp2, kind, limit), + func(&cgfunc), + regAllocator(regAlloc), + loopHeadBBs(ssaPreAllocator.Adapter()) {} + + ~CallerSavePre() = default; + + void ApplySSAPRE(); + void SetDump(bool val) { + dump = val; + } + private: + void CodeMotion() ; + void UpdateLoadSite(CgOccur *occ); + void CalLoadSites(); + void ComputeAvail(); + void Rename1(); + void ComputeVarAndDfPhis() override; + void BuildWorkList() override; + void DumpWorkCandAndOcc(); + + BB *GetBB(uint32 id) const override { + return func->GetBBFromID(id); + } + + PUIdx GetPUIdx() const override { + return func->GetFunction().GetPuidx(); + } + + bool IsLoopHeadBB(uint32 bbId) const { + return loopHeadBBs.find(bbId) != loopHeadBBs.end(); + } + CGFunc *func; + bool dump = false; + LiveRange *workLr = nullptr; + GraphColorRegAllocator *regAllocator; + MapleSet loopHeadBBs; + bool beyondLimit = false; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_COLOR_RA_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_data_dep_base.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_data_dep_base.h new file mode 100644 index 0000000000000000000000000000000000000000..2a90a4d25e7914d9ee963dfe21b22bb0fd88baa5 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_data_dep_base.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DATA_DEP_BASE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DATA_DEP_BASE_H + +#include "aarch64_operand.h" +#include "cgfunc.h" +#include "data_dep_base.h" + +namespace maplebe { +class AArch64DataDepBase : public DataDepBase { + public: + AArch64DataDepBase(MemPool &mp, CGFunc &func, MAD &mad) : DataDepBase(mp, func, mad) {} + ~AArch64DataDepBase() override = default; + + void CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) override; + void CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) override; + bool IsFrameReg(const RegOperand &opnd) const override; + void AnalysisAmbiInsns(BB &bb) override; + void BuildDepsMemBar(Insn &insn) override; + void BuildDepsUseMem(Insn &insn, MemOperand &aarchMemOpnd) override; + void BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) override; + void BuildDepsAccessStImmMem(Insn &insn, bool isDest) override; + void BuildCallerSavedDeps(Insn &insn) override; + void BuildStackPassArgsDeps(Insn &insn) override; + void BuildDepsDirtyStack(Insn &insn) override; + void BuildDepsUseStack(Insn &insn) override; + void BuildDepsDirtyHeap(Insn &insn) override; + void BuildOpndDependency(Insn &insn) override; + void BuildSpecialInsnDependency(Insn &insn, const MapleVector &nodes) override; + void UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes) override; + DepNode *BuildSeparatorNode() override; + void BuildInterBlockMemDefUseDependency(DepNode &depNode, MemOperand &memOpnd, + MemOperand *nextMemOpnd, bool isMemDef) override; + void BuildPredPathMemDefDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, + MemOperand &memOpnd, MemOperand *nextMemOpnd) override; + void BuildPredPathMemUseDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, + MemOperand &memOpnd, MemOperand *nextMemOpnd) override; + + void BuildAntiDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd); + bool NeedBuildDepsMem(const MemOperand &memOpnd, + const MemOperand *nextMemOpnd, const Insn &memInsn) const; + + protected: + MemOperand *GetNextMemOperand(const Insn &insn, const MemOperand &aarchMemOpnd) const; + void BuildMemOpndDependency(Insn &insn, Operand &opnd, const OpndDesc ®Prop); + MemOperand *BuildNextMemOperandByByteSize(const MemOperand &aarchMemOpnd, uint32 byteSize) const; + void ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn& newInsn, bool isFromClinit) const; + void ClearDepNodeInfo(DepNode &depNode) const; +}; +} + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DATA_DEP_BASE_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_dce.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_dce.h new file mode 100644 index 0000000000000000000000000000000000000000..4d78f79cbae3d788e8916f3c967d6158fcf81bc0 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_dce.h @@ -0,0 +1,42 @@ +/* +* Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef MAPLEBE_INCLUDE_AARCH64_DCE_H +#define MAPLEBE_INCLUDE_AARCH64_DCE_H + +#include "cg_dce.h" +namespace maplebe { +class AArch64Dce : public CGDce { + public: + AArch64Dce(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo) : CGDce(mp, f, sInfo) {} + ~AArch64Dce() override = default; + + private: + bool RemoveUnuseDef(VRegVersion &defVersion) override; +}; + +class A64DeleteRegUseVisitor : public DeleteRegUseVisitor { + public: + A64DeleteRegUseVisitor(CGSSAInfo &cgSSAInfo, uint32 dInsnID) : DeleteRegUseVisitor(cgSSAInfo, dInsnID) {} + ~A64DeleteRegUseVisitor() override = default; + + private: + void Visit(RegOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(PhiOperand *v) final; +}; +} +#endif /* MAPLEBE_INCLUDE_AARCH64_DCE_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_dependence.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_dependence.h new file mode 100644 index 0000000000000000000000000000000000000000..a8626fece0583d66a6c5383e0812341b53a6aca9 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_dependence.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H + +#include "aarch64_operand.h" +#include "cgfunc.h" +#include "dependence.h" + +namespace maplebe { +class AArch64DepAnalysis : public DepAnalysis { + public: + AArch64DepAnalysis(CGFunc &func, MemPool &mp, MAD &mad, bool beforeRA); + + ~AArch64DepAnalysis() override = default; + + void Run(BB &bb, MapleVector &nodes) override; + const std::string &GetDepTypeName(DepType depType) const override; + void DumpDepNode(DepNode &node) const override; + void DumpDepLink(DepLink &link, const DepNode *node) const override; + + protected: + void Init(BB &bb, MapleVector &nodes) override; + void ClearAllDepData() override; + void AnalysisAmbiInsns(BB &bb) override; + void AppendRegUseList(Insn &insn, regno_t regNO) override; + void AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType) override; + void RemoveSelfDeps(Insn &insn) override; + void CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) override; + void CombineDependence(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator, + bool isMemCombine = false) override; + void CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) override; + void BuildDepsUseReg(Insn &insn, regno_t regNO) override; + void BuildDepsDefReg(Insn &insn, regno_t regNO) override; + void BuildDepsAmbiInsn(Insn &insn) override; + void BuildDepsMayThrowInsn(Insn &insn) override; + bool NeedBuildDepsMem(const MemOperand &memOpnd, const MemOperand *nextMemOpnd, const Insn &memInsn) const; + void BuildDepsUseMem(Insn &insn, MemOperand &aarchMemOpnd) override; + void BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) override; + void BuildAntiDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd); + void BuildOutputDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd); + void BuildDepsMemBar(Insn &insn) override; + void BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes) override; + void BuildDepsControlAll(DepNode &depNode, const MapleVector &nodes) override; + void BuildDepsAccessStImmMem(Insn &insn, bool isDest) override; + void BuildCallerSavedDeps(Insn &insn) override; + void BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) override; + void BuildStackPassArgsDeps(Insn &insn) override; + void BuildDepsDirtyStack(Insn &insn) override; + void BuildDepsUseStack(Insn &insn) override; + void BuildDepsDirtyHeap(Insn &insn) override; + DepNode *BuildSeparatorNode() override; + bool IfInAmbiRegs(regno_t regNO) const override; + bool IsFrameReg(const RegOperand &opnd) const override; + + private: + MemOperand *GetNextMemOperand(const Insn &insn, const MemOperand &aarchMemOpnd) const; + void BuildMemOpndDependency(Insn &insn, Operand &opnd, const OpndDesc ®Prop); + void BuildOpndDependency(Insn &insn); + void BuildSpecialInsnDependency(Insn &insn, DepNode &depNode, const MapleVector &nodes); + void SeperateDependenceGraph(MapleVector &nodes, uint32 &nodeSum); + DepNode *GenerateDepNode(Insn &insn, MapleVector &nodes, int32 nodeSum, + const MapleVector &comments); + void BuildAmbiInsnDependency(Insn &insn); + void BuildMayThrowInsnDependency(Insn &insn); + void UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes); + void UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn); + MemOperand *BuildNextMemOperandByByteSize(const MemOperand &aarchMemOpnd, uint32 byteSize) const; + void AddDependence4InsnInVectorByType(MapleVector &insns, Insn &insn, const DepType &type); + void AddDependence4InsnInVectorByTypeAndCmp(MapleVector &insns, Insn &insn, const DepType &type); + void ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn &newInsn, bool isFromClinit) const; + void ClearDepNodeInfo(DepNode &depNode) const; + void AddEndSeparatorNode(MapleVector &nodes); + + Insn **regDefs = nullptr; + RegList **regUses = nullptr; + Insn *memBarInsn = nullptr; + bool hasAmbiRegs = false; + Insn *lastCallInsn = nullptr; + uint32 separatorIndex = 0; + Insn *lastFrameDef = nullptr; + MapleVector stackUses; + MapleVector stackDefs; + MapleVector heapUses; + MapleVector heapDefs; + MapleVector mayThrows; + /* instructions that can not across may throw instructions. */ + MapleVector ambiInsns; + /* register number that catch bb and cleanup bb uses. */ + MapleSet ehInRegs; + /* the bb to be scheduling currently */ + BB *curBB = nullptr; +}; +} // namespace maplebe + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h new file mode 100644 index 0000000000000000000000000000000000000000..224ca456a8c15d75391930fe8f9025535fa846cf --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ebo.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EBO_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EBO_H + +#include "ebo.h" +#include "aarch64_operand.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { + +class AArch64Ebo : public Ebo { + public: + AArch64Ebo(CGFunc &func, MemPool &memPool, LiveAnalysis *live, bool before, const std::string &phase) + : Ebo(func, memPool, live, before, phase), + callerSaveRegTable(eboAllocator.Adapter()) { + a64CGFunc = static_cast(cgFunc); + } + + enum ExtOpTable : uint8; + + ~AArch64Ebo() override = default; + + protected: + MapleVector callerSaveRegTable; + AArch64CGFunc *a64CGFunc = nullptr; + int32 GetOffsetVal(const MemOperand &memOpnd) const override; + OpndInfo *OperandInfoDef(BB ¤tBB, Insn ¤tInsn, Operand &localOpnd) override; + const RegOperand &GetRegOperand(const Operand &opnd) const override; + bool IsGlobalNeeded(Insn &insn) const override; + bool IsDecoupleStaticOp(Insn &insn) const override; + bool OperandEqSpecial(const Operand &op1, const Operand &op2) const override; + bool DoConstProp(Insn &insn, uint32 idx, Operand &opnd) override; + bool Csel2Cset(Insn &insn, const MapleVector &opnds) override; + bool SimplifyConstOperand(Insn &insn, const MapleVector &opnds, + const MapleVector &opndInfo) override; + void BuildCallerSaveRegisters() override; + void DefineAsmRegisters(InsnInfo &insnInfo) override; + void DefineCallerSaveRegisters(InsnInfo &insnInfo) override; + void DefineReturnUseRegister(Insn &insn) override; + void DefineCallUseSpecialRegister(Insn &insn) override; + void DefineClinitSpecialRegisters(InsnInfo &insnInfo) override; + bool CombineExtensionAndLoad(Insn *insn, const MapleVector &origInfos, + ExtOpTable idx, bool is64bits) const; + bool SpecialSequence(Insn &insn, const MapleVector &origInfos) override; + bool IsMovToSIMDVmov(Insn &insn, const Insn &replaceInsn) const override; + bool IsPseudoRet(Insn &insn) const override; + bool ChangeLdrMop(Insn &insn, const Operand &opnd) const override; + bool IsAdd(const Insn &insn) const override; + bool IsFmov(const Insn &insn) const override; + bool IsClinitCheck(const Insn &insn) const override; + bool IsLastAndBranch(BB &bb, Insn &insn) const override; + bool IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const override; + bool ResIsNotDefAndUse(Insn &insn) const override; + bool LiveOutOfBB(const Operand &opnd, const BB &bb) const override; + bool IsInvalidReg(const RegOperand &opnd) const override; + bool IsZeroRegister(const Operand &opnd) const override; + bool IsConstantImmOrReg(const Operand &opnd) const override; + bool OperandLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) const; + bool ValidPatternForCombineExtAndLoad(OpndInfo *prevOpndInfo, Insn *insn, MOperator newMop, MOperator oldMop, + const RegOperand& opnd) const; + + private: + /* The number of elements in callerSaveRegTable must less then 45. */ + static constexpr int32 kMaxCallerSaveReg = 45; + MOperator ExtLoadSwitchBitSize(MOperator lowMop) const; + bool CheckCondCode(const CondOperand &cond) const; + bool CombineMultiplyAdd(Insn *insn, const Insn *prevInsn, InsnInfo *insnInfo, Operand *addOpnd, + bool is64bits, bool isFp) const; + bool CheckCanDoMadd(Insn *insn, OpndInfo *opndInfo, int32 pos, bool is64bits, bool isFp) const; + bool CombineMultiplySub(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const; + bool CombineMultiplyNeg(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const; + bool SimplifyBothConst(BB &bb, Insn &insn, const ImmOperand &immOperand0, const ImmOperand &immOperand1, + uint32 opndSize) const; + ConditionCode GetReverseCond(const CondOperand &cond) const; + bool CombineLsrAnd(Insn &insn, const OpndInfo &opndInfo, bool is64bits, bool isFp) const; + bool CombineExtAnd(Insn &insn, const OpndInfo &opndInfo, bool isFp, int64 immVal) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EBO_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h new file mode 100644 index 0000000000000000000000000000000000000000..d34cbd4b6e3ed07f53c62ff335e2769c8bdc71b0 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_emitter.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EMITTER_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EMITTER_H + +#include "asm_emit.h" + +namespace maplebe { +using namespace maple; + +class AArch64AsmEmitter : public AsmEmitter { + public: + AArch64AsmEmitter(CG &cg, const std::string &asmFileName) : AsmEmitter(cg, asmFileName) {} + ~AArch64AsmEmitter() = default; + + void EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; + void EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; + void EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; + void EmitFastLSDA(FuncEmitInfo &funcEmitInfo) override; + void EmitFullLSDA(FuncEmitInfo &funcEmitInfo) override; + void EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) override; + void EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) override; + void RecordRegInfo(FuncEmitInfo &funcEmitInfo) const; + void Run(FuncEmitInfo &funcEmitInfo) override; + + private: + /* cfi & dbg need target info ? */ + void EmitAArch64CfiInsn(Emitter &emitter, const Insn &insn) const; + void EmitAArch64DbgInsn(FuncEmitInfo &funcEmitInfo, Emitter &emitter, const Insn &insn) const; + + void EmitAArch64Insn(Emitter &emitter, Insn &insn) const; + void EmitClinit(Emitter &emitter, const Insn &insn) const; + void EmitAdrpLdr(Emitter &emitter, const Insn &insn) const; + void EmitCounter(Emitter &emitter, const Insn &insn) const; + void EmitCCounter(Emitter &emitter, const Insn &insn) const; + void EmitInlineAsm(Emitter &emitter, const Insn &insn) const; + void EmitClinitTail(Emitter &emitter, const Insn &insn) const; + void EmitLazyLoad(Emitter &emitter, const Insn &insn) const; + void EmitAdrpLabel(Emitter &emitter, const Insn &insn) const; + void EmitLazyLoadStatic(Emitter &emitter, const Insn &insn) const; + void EmitArrayClassCacheLoad(Emitter &emitter, const Insn &insn) const; + void EmitGetAndAddInt(Emitter &emitter, const Insn &insn) const; + void EmitGetAndSetInt(Emitter &emitter, const Insn &insn) const; + void EmitCompareAndSwapInt(Emitter &emitter, const Insn &insn) const; + void EmitStringIndexOf(Emitter &emitter, const Insn &insn) const; + void EmitLazyBindingRoutine(Emitter &emitter, const Insn &insn) const; + void EmitCheckThrowPendingException(Emitter &emitter) const; + void EmitCTlsDescRel(Emitter &emitter, const Insn &insn) const; + void EmitCTlsDescCall(Emitter &emitter, const Insn &insn) const; + void EmitSyncLockTestSet(Emitter &emitter, const Insn &insn) const; + + void PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds, Insn &insn) const; + bool CheckInsnRefField(const Insn &insn, size_t opndIndex) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EMITTER_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_fixshortbranch.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_fixshortbranch.h new file mode 100644 index 0000000000000000000000000000000000000000..8fd3b9fdaf445739fce944758f720352983ea3a3 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_fixshortbranch.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_FIXSHORTBRANCH_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_FIXSHORTBRANCH_H + +#include "aarch64_cg.h" +#include "optimize_common.h" +#include "mir_builder.h" + +namespace maplebe { +class AArch64FixShortBranch { + public: + explicit AArch64FixShortBranch(CGFunc *cf) : cgFunc(cf) {} + ~AArch64FixShortBranch() = default; + void FixShortBranches() const; + + private: + CGFunc *cgFunc = nullptr; + uint32 CalculateAlignRange(const BB &bb, uint32 addr) const; + void SetInsnId() const; +}; /* class AArch64ShortBranch */ + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFixShortBranch, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_FIXSHORTBRANCH_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_fp_simd_regs.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_fp_simd_regs.def new file mode 100644 index 0000000000000000000000000000000000000000..c630b95c7775a55522b66f291463b7133a0007cd --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_fp_simd_regs.def @@ -0,0 +1,75 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* + * ARM Compiler armasm User Guide version 6.6. + * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0473j/deb1353594352617.html + * (retrieved on 3/24/2017) + */ +/* + * ID, 128 bit vector prefix, followed by scalar prefixes + * scalar prefixes: 8-bit, 16-bit, 32-bit, 64-bit, 128-bit, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill + * (e.g., we use D0 when V0 contains a 64-bit scalar FP number (aka, double)) + */ +FP_SIMD_REG(0 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(1 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(2 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(3 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(4 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(5 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(6 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(7 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(8 , "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(9 , "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(10, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(11, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(12, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(13, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(14, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(15, "V", "B", "H", "S", "D", "Q", true, true, false, false, false) +FP_SIMD_REG(16, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(17, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(18, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(19, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(20, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(21, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(22, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(23, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(24, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(25, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(26, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(27, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(28, "V", "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(29, "V", "B", "H", "S", "D", "Q", true, false, false, false, true) +FP_SIMD_REG(30, "V", "B", "H", "S", "D", "Q", true, false, false, true, false) +FP_SIMD_REG(31, "V", "B", "H", "S", "D", "Q", true, false, false, true, false) + +/* Alias ID */ +FP_SIMD_REG_ALIAS(0) +FP_SIMD_REG_ALIAS(1) +FP_SIMD_REG_ALIAS(2) +FP_SIMD_REG_ALIAS(3) +FP_SIMD_REG_ALIAS(4) +FP_SIMD_REG_ALIAS(5) +FP_SIMD_REG_ALIAS(6) +FP_SIMD_REG_ALIAS(7) + +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(0) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(1) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(2) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(3) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(4) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(5) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(6) */ +/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(7) */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h new file mode 100644 index 0000000000000000000000000000000000000000..dea57f8a82c2d00aea5339e480f96a0cff7e86c9 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_global.h @@ -0,0 +1,484 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_GLOBAL_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_GLOBAL_H + +#include "aarch64_opt_utiles.h" +#include "global.h" +#include "aarch64_operand.h" + +namespace maplebe { +using namespace maple; + +class AArch64GlobalOpt : public GlobalOpt { + public: + explicit AArch64GlobalOpt(CGFunc &func) : GlobalOpt(func) {} + ~AArch64GlobalOpt() override = default; + void Run() override; +}; + +class OptimizeManager { + public: + explicit OptimizeManager(CGFunc &cgFunc) : cgFunc(cgFunc) {} + ~OptimizeManager() = default; + template + void Optimize() { + OptimizePattern optPattern(cgFunc); + optPattern.Run(); + } + private: + CGFunc &cgFunc; +}; + +class OptimizePattern { + public: + explicit OptimizePattern(CGFunc &cgFunc) : cgFunc(cgFunc) {} + virtual ~OptimizePattern() = default; + virtual bool CheckCondition(Insn &insn) = 0; + virtual void Optimize(Insn &insn) = 0; + virtual void Run() = 0; + bool OpndDefByOne(Insn &insn, int32 useIdx) const; + bool OpndDefByZero(Insn &insn, int32 useIdx) const; + bool OpndDefByOneOrZero(Insn &insn, int32 useIdx) const; + void ReplaceAllUsedOpndWithNewOpnd(const InsnSet &useInsnSet, uint32 regNO, + Operand &newOpnd, bool updateInfo) const; + + static bool InsnDefOne(const Insn &insn); + static bool InsnDefZero(const Insn &insn); + static bool InsnDefOneOrZero(const Insn &insn); + + std::string PhaseName() const { + return "globalopt"; + } + protected: + virtual void Init() = 0; + CGFunc &cgFunc; +}; + +/* + * Do Forward prop when insn is mov + * mov xx, x1 + * ... // BBs and x1 is live + * mOp yy, xx + * + * => + * mov x1, x1 + * ... // BBs and x1 is live + * mOp yy, x1 + */ +class ForwardPropPattern : public OptimizePattern { + public: + explicit ForwardPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~ForwardPropPattern() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + private: + InsnSet firstRegUseInsnSet; + void RemoveMopUxtwToMov(Insn &insn); + bool IsUseInsnSetValid(Insn &insn, regno_t firstRegNO, regno_t secondRegNO); + std::set modifiedBB; +}; + +/* + * Do back propagate of vreg/preg when encount following insn: + * + * mov vreg/preg1, vreg2 + * + * back propagate reg1 to all vreg2's use points and def points, when all of them is in same bb + */ +class BackPropPattern : public OptimizePattern { + public: + explicit BackPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~BackPropPattern() override { + firstRegOpnd = nullptr; + secondRegOpnd = nullptr; + defInsnForSecondOpnd = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool CheckAndGetOpnd(const Insn &insn); + bool DestOpndHasUseInsns(Insn &insn); + bool DestOpndLiveOutToEHSuccs(Insn &insn) const; + bool CheckSrcOpndDefAndUseInsns(Insn &insn); + bool CheckSrcOpndDefAndUseInsnsGlobal(Insn &insn); + bool CheckPredefineInsn(Insn &insn); + bool CheckRedefineInsn(Insn &insn); + bool CheckReplacedUseInsn(Insn &insn); + RegOperand *firstRegOpnd = nullptr; + RegOperand *secondRegOpnd = nullptr; + uint32 firstRegNO = 0; + uint32 secondRegNO = 0; + InsnSet srcOpndUseInsnSet; + Insn *defInsnForSecondOpnd = nullptr; + bool globalProp = false; +}; + +/* + * when w0 has only one valid bit, these tranformation will be done + * cmp w0, #0 + * cset w1, NE --> mov w1, w0 + * + * cmp w0, #0 + * cset w1, EQ --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, NE --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, EQ --> mov w1, w0 + * + * cmp w0, #0 + * cset w0, NE -->null + * + * cmp w0, #1 + * cset w0, EQ -->null + * + * condition: + * 1. the first operand of cmp instruction must has only one valid bit + * 2. the second operand of cmp instruction must be 0 or 1 + * 3. flag register of cmp isntruction must not be used later + */ +class CmpCsetPattern : public OptimizePattern { + public: + explicit CmpCsetPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~CmpCsetPattern() override { + nextInsn = nullptr; + cmpFirstOpnd = nullptr; + cmpSecondOpnd = nullptr; + csetFirstOpnd = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + Insn *nextInsn = nullptr; + int64 cmpConstVal = 0; + Operand *cmpFirstOpnd = nullptr; + Operand *cmpSecondOpnd = nullptr; + Operand *csetFirstOpnd = nullptr; +}; + +/* + * mov w5, #1 + * ... --> cset w5, NE + * mov w0, #0 + * csel w5, w5, w0, NE + * + * mov w5, #0 + * ... --> cset w5,EQ + * mov w0, #1 + * csel w5, w5, w0, NE + * + * condition: + * 1.all define points of w5 are defined by: mov w5, #1(#0) + * 2.all define points of w0 are defined by: mov w0, #0(#1) + * 3.w0 will not be used after: csel w5, w5, w0, NE(EQ) + */ +class CselPattern : public OptimizePattern { + public: + explicit CselPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~CselPattern() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final {} +}; + +/* + * uxtb w0, w0 --> null + * uxth w0, w0 --> null + * + * condition: + * 1. validbits(w0)<=8,16,32 + * 2. the first operand is same as the second operand + * + * uxtb w0, w1 --> null + * uxth w0, w1 --> null + * + * condition: + * 1. validbits(w1)<=8,16,32 + * 2. the use points of w0 has only one define point, that is uxt w0, w1 + */ +class RedundantUxtPattern : public OptimizePattern { + public: + explicit RedundantUxtPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~RedundantUxtPattern() override { + secondOpnd = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + uint32 GetMaximumValidBit(Insn &insn, uint8 index, InsnSet &visitedInsn) const; + static uint32 GetInsnValidBit(const Insn &insn); + InsnSet useInsnSet; + uint32 firstRegNO = 0; + Operand *secondOpnd = nullptr; +}; + +/* + * bl MCC_NewObj_flexible_cname bl MCC_NewObj_flexible_cname + * mov x21, x0 // [R203] + * str x0, [x29,#16] // local var: Reg0_R6340 [R203] --> str x0, [x29,#16] // local var: Reg0_R6340 [R203] + * ... (has call) ... (has call) + * mov x2, x21 // use of x21 ldr x2, [x29, #16] + * bl *** bl *** + */ +class LocalVarSaveInsnPattern : public OptimizePattern { + public: + explicit LocalVarSaveInsnPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~LocalVarSaveInsnPattern() override { + firstInsnSrcOpnd = nullptr; + firstInsnDestOpnd = nullptr; + secondInsnSrcOpnd = nullptr; + secondInsnDestOpnd = nullptr; + useInsn = nullptr; + secondInsn = nullptr; + } + bool CheckCondition(Insn &firstInsn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool CheckFirstInsn(const Insn &firstInsn); + bool CheckSecondInsn(); + bool CheckAndGetUseInsn(Insn &firstInsn); + bool CheckLiveRange(const Insn &firstInsn); + Operand *firstInsnSrcOpnd = nullptr; + Operand *firstInsnDestOpnd = nullptr; + Operand *secondInsnSrcOpnd = nullptr; + Operand *secondInsnDestOpnd = nullptr; + Insn *useInsn = nullptr; + Insn *secondInsn = nullptr; +}; + +class ExtendShiftOptPattern : public OptimizePattern { + public: + explicit ExtendShiftOptPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~ExtendShiftOptPattern() override { + defInsn = nullptr; + newInsn = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + void DoExtendShiftOpt(Insn &insn); + + protected: + void Init() final; + + private: + void SelectExtendOrShift(const Insn &def); + bool CheckDefUseInfo(Insn &use, uint32 size); + SuffixType CheckOpType(const Operand &lastOpnd) const; + void ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount); + void SetExMOpType(const Insn &use); + void SetLsMOpType(const Insn &use); + + MOperator replaceOp = 0; + uint32 replaceIdx = 0; + ExtendShiftOperand::ExtendOp extendOp = ExtendShiftOperand::kUndef; + BitShiftOperand::ShiftOp shiftOp = BitShiftOperand::kUndef; + Insn *defInsn = nullptr; + Insn *newInsn = nullptr; + bool optSuccess = false; + bool removeDefInsn = false; + ExMOpType exMOpType = kExUndef; + LsMOpType lsMOpType = kLsUndef; +}; + +/* + * This pattern do: + * 1) + * uxtw vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32] + * ------> + * mov vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32] + * 2) + * ldrh R201, [...] + * and R202, R201, #65520 + * uxth R203, R202 + * -------> + * ldrh R201, [...] + * and R202, R201, #65520 + * mov R203, R202 + */ +class ExtenToMovPattern : public OptimizePattern { + public: + explicit ExtenToMovPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~ExtenToMovPattern() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool CheckHideUxtw(const Insn &insn, regno_t regno) const; + bool CheckUxtw(Insn &insn); + bool BitNotAffected(Insn &insn, uint32 validNum); /* check whether significant bits are affected */ + bool CheckSrcReg(Insn &insn, regno_t srcRegNo, uint32 validNum, std::vector &checkedInsns); + + MOperator replaceMop = MOP_undef; +}; + +class SameDefPattern : public OptimizePattern { + public: + explicit SameDefPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~SameDefPattern() override { + currInsn = nullptr; + sameInsn = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool IsSameDef(); + bool SrcRegIsRedefined(regno_t regNo); + bool IsSameOperand(Operand &opnd0, Operand &opnd1); + + Insn *currInsn = nullptr; + Insn *sameInsn = nullptr; +}; + +/* + * and r0, r0, #4 (the imm is n power of 2) + * ... (r0 is not used) + * cbz r0, .Label + * ===> tbz r0, #2, .Label + * + * and r0, r0, #4 (the imm is n power of 2) + * ... (r0 is not used) + * cbnz r0, .Label + * ===> tbnz r0, #2, .Label + */ +class AndCbzPattern : public OptimizePattern { + public: + explicit AndCbzPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~AndCbzPattern() override { + prevInsn = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + int64 CalculateLogValue(int64 val) const; + bool IsAdjacentArea(Insn &prev, Insn &curr) const; + Insn *prevInsn = nullptr; +}; + +/* + * [arithmetic operation] + * add/sub/ R202, R201, #1 add/sub/ R202, R201, #1 + * ... ... + * add/sub/ R203, R201, #1 ---> mov R203, R202 + * + * [copy operation] + * mov R201, #1 mov R201, #1 + * ... ... + * mov R202, #1 ---> mov R202, R201 + * + * The pattern finds the insn with the same rvalue as the current insn, + * then prop its lvalue, and replaces the current insn with movrr insn. + * The mov can be prop in forwardprop or backprop. + * + * conditions: + * 1. in same BB + * 2. rvalue is not defined between two insns + * 3. lvalue is not defined between two insns + */ +class SameRHSPropPattern : public OptimizePattern { + public: + explicit SameRHSPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + ~SameRHSPropPattern() override { + prevInsn = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool IsSameOperand(Operand *opnd1, Operand *opnd2) const; + bool FindSameRHSInsnInBB(Insn &insn); + Insn *prevInsn = nullptr; + std::vector candidates; +}; + +/* + * ldr r0, [r19, 8] + * ldrh r1, [r19, 16] + * ldrh r2, [r19, 18] (r0,r1,r2 are call parameters) + * ====> + * ldp x0, x1 [r19, 8] + * ubfx x2, x1, 16, 16 + * + * we do this pattern because parameters can be passed without the unused high bit is cleared + */ +class ContinuousLdrPattern : public OptimizePattern { + public: + explicit ContinuousLdrPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {} + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final {} + + private: + static bool IsMopMatch(const Insn &insn); + bool IsUsedBySameCall(Insn &insn1, Insn &insn2, Insn &insn3) const; + static bool IsMemValid(const MemOperand &memopnd); + static bool IsImmValid(MOperator mop, const ImmOperand &imm); + static int64 GetMemOffsetValue(const Insn &insn); + + std::vector insnList; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_GLOBAL_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ico.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ico.h new file mode 100755 index 0000000000000000000000000000000000000000..1d198f420c42fc59ac1cab2d0b6e343bb5d6cc17 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ico.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ICO_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ICO_H +#include "ico.h" +#include "aarch64_isa.h" +#include "optimize_common.h" +#include "live.h" + +namespace maplebe { +class AArch64IfConversionOptimizer : public IfConversionOptimizer { + public: + AArch64IfConversionOptimizer(CGFunc &func, MemPool &memPool) : IfConversionOptimizer(func, memPool) {} + + ~AArch64IfConversionOptimizer() override = default; + void InitOptimizePatterns() override; +}; + +class AArch64ICOPattern : public ICOPattern { + public: + explicit AArch64ICOPattern(CGFunc &func) : ICOPattern(func) {} + ~AArch64ICOPattern() override = default; + protected: + ConditionCode Encode(MOperator mOp, bool inverse) const; + Insn *BuildCmpInsn(const Insn &condBr) const; + Insn *BuildCcmpInsn(ConditionCode ccCode, const Insn *cmpInsn) const; + Insn *BuildCondSet(const Insn &branch, RegOperand ®, bool inverse) const; + Insn *BuildCondSel(const Insn &branch, MOperator mOp, RegOperand &dst, RegOperand &src1, RegOperand &src2) const; + static uint32 GetNZCV(ConditionCode ccCode, bool inverse); + bool CheckMop(MOperator mOperator) const; +}; + +/* If-Then-Else pattern */ +class AArch64ICOIfThenElsePattern : public AArch64ICOPattern { + public: + enum DefUseKind : uint8 { + kUse, + kDef, + KUseOrDef + }; + + explicit AArch64ICOIfThenElsePattern(CGFunc &func) : AArch64ICOPattern(func) {} + ~AArch64ICOIfThenElsePattern() override = default; + bool Optimize(BB &curBB) override; + protected: + bool BuildCondMovInsn(const BB &bb, const std::map> &ifDestSrcMap, + const std::map> &elseDestSrcMap, bool elseBBIsProcessed, + std::vector &generateInsn); + bool DoOpt(BB *ifBB, BB *elseBB, BB &joinBB); + void GenerateInsnForImm(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, RegOperand &destReg, + std::vector &generateInsn) const; + Operand *GetDestReg(const std::map> &destSrcMap, const RegOperand &destReg) const; + void GenerateInsnForReg(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, RegOperand &destReg, + std::vector &generateInsn) const; + RegOperand *GenerateRegAndTempInsn(Operand &dest, const RegOperand &destReg, std::vector &generateInsn) const; + bool CheckHasSameDest(std::vector &lInsn, std::vector &rInsn) const; + bool CheckModifiedRegister(Insn &insn, std::map> &destSrcMap, + std::vector &src, std::map &dest2InsnMap, Insn **toBeRremovedOutOfCurrBB) const; + bool CheckCondMoveBB(BB *bb, std::map> &destSrcMap, + std::vector &destRegs, std::vector &setInsn, Insn **toBeRremovedOutOfCurrBB) const; + bool CheckModifiedInCmpInsn(const Insn &insn) const; + bool DoHostBeforeDoCselOpt(BB &ifBB, BB &elseBB); + void UpdateTemps(std::vector &destRegs, std::vector &setInsn, + std::map> &destSrcMap, const Insn &oldInsn, Insn *newInsn); + Insn *MoveSetInsn2CmpBB(Insn &toBeRremoved2CmpBB, BB &currBB, + std::vector &anotherBranchDestRegs, std::map> &destSrcMap); + void RevertMoveInsns(BB *bb, Insn *prevInsnInBB, Insn *newInsnOfBB, + Insn *insnInBBToBeRremovedOutOfCurrBB); + bool IsExpansionMOperator(const Insn &insn) const; + bool IsMovMOperator(const Insn &insn) const; + bool IsEorMOperator(const Insn &insn) const; + bool IsShiftMOperator(const Insn &insn) const; + bool Has2SrcOpndSetInsn(const Insn &insn) const; + bool IsSetInsnMOperator(const Insn &insn) const; + bool IsSetInsn(const Insn &insn, Operand **dest, std::vector &src) const; + + private: + BB *cmpBB = nullptr; + Insn *cmpInsn = nullptr; + Operand *flagOpnd = nullptr; +}; + +/* If( cmp || cmp ) then or If( cmp && cmp ) then + * cmp w4, #1 + * beq .L.886__1(branch1) cmp w4, #1 + * .L.886__2: => ccmp w4, #4, #4, NE + * cmp w4, #4 beq .L.886__1 + * beq .L.886__1(branch2) + * */ +class AArch64ICOSameCondPattern : public AArch64ICOPattern { + public: + explicit AArch64ICOSameCondPattern(CGFunc &func) : AArch64ICOPattern(func) {} + ~AArch64ICOSameCondPattern() override = default; + bool Optimize(BB &secondIfBB) override; + protected: + bool DoOpt(BB *firstIfBB, BB &secondIfBB) const; +}; + +/* If-Then MorePreds pattern + * + * .L.891__92: .L.891__92: + * cmp x4, w0, UXTW cmp x4, w0, UXTW + * bls .L.891__41 csel x0, x2, x0, LS + * .L.891__42: bls .L.891__94 + * sub x0, x4, w0, UXTW =====> .L.891__42: + * cmp x0, x2 sub x0, x4, w0, UXTW + * bls .L.891__41 cmp x0, x2 + * ...... csel x0, x2, x0, LS + * .L.891__41: bls .L.891__94 + * mov x0, x2 + * b .L.891__94 + * */ +class AArch64ICOMorePredsPattern : public AArch64ICOPattern { + public: + explicit AArch64ICOMorePredsPattern(CGFunc &func) : AArch64ICOPattern(func) {} + ~AArch64ICOMorePredsPattern() override = default; + bool Optimize(BB &curBB) override; + protected: + bool DoOpt(BB &gotoBB) const; + bool CheckGotoBB(BB &gotoBB, std::vector &movInsn) const; + bool MovToCsel(std::vector &movInsn, std::vector &cselInsn, const Insn &branchInsn) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ICO_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h new file mode 100644 index 0000000000000000000000000000000000000000..0fe036b2124890616b96c41421c915f9240c4c79 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_insn.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_INSN_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_INSN_H + +#include "aarch64_isa.h" +#include "insn.h" +#include "string_utils.h" +#include "aarch64_operand.h" +#include "common_utils.h" +namespace maplebe { +class A64OpndEmitVisitor : public OpndEmitVisitor { + public: + A64OpndEmitVisitor(Emitter &emitter, const OpndDesc *operandProp) + : OpndEmitVisitor(emitter), + opndProp(operandProp) {} + ~A64OpndEmitVisitor() override { + opndProp = nullptr; + } + + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(CondOperand *v) final; + void Visit(StImmOperand *v) final; + void Visit(BitShiftOperand *v) final; + void Visit(ExtendShiftOperand *v) final; + void Visit(LabelOperand *v) final; + void Visit(FuncNameOperand *v) final; + void Visit(CommentOperand *v) final; + void Visit(OfstOperand *v) final; + void Visit(ListOperand *v) final; + + private: + void EmitVectorOperand(const RegOperand &v); + void EmitIntReg(const RegOperand &v, int32 opndSz = kMaxSimm32); + void Visit(const MIRSymbol &symbol, int64 offset); + + const OpndDesc *opndProp; +}; + +class A64OpndDumpVisitor : public OpndDumpVisitor { + public: + A64OpndDumpVisitor(const OpndDesc &operandDesc) : OpndDumpVisitor(operandDesc) {} + ~A64OpndDumpVisitor() override = default; + + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(MemOperand *a64v) final; + void Visit(ListOperand *v) final; + void Visit(CondOperand *v) final; + void Visit(StImmOperand *v) final; + void Visit(BitShiftOperand *v) final; + void Visit(ExtendShiftOperand *v) final; + void Visit(LabelOperand *v) final; + void Visit(FuncNameOperand *v) final; + void Visit(PhiOperand *v) final; + void Visit(CommentOperand *v) final; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_INSN_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_int_regs.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_int_regs.def new file mode 100644 index 0000000000000000000000000000000000000000..d4b00c71a43c654d89cc34a46e8583d7e864b8c4 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_int_regs.def @@ -0,0 +1,77 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* + * ARM Compiler armasm User Guide version 6.6. + * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0473j/deb1353594352617.html + * (retrieved on 3/24/2017) + * + * $ 4.1 Registers in AArch64 state + * + * There is no register named W31 or X31. + * Depending on the instruction, register 31 is either the stack + * pointer or the zero register. When used as the stack pointer, + * you refer to it as "SP". When used as the zero register, you refer + * to it as WZR in a 32-bit context or XZR in a 64-bit context. + * The zero register returns 0 when read and discards data when + * written (e.g., when setting the status register for testing). + */ +/* ID, 32-bit prefix, 64-bit prefix, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill */ +INT_REG(0 , "W", "X", true, false, true, false, false) +INT_REG(1 , "W", "X", true, false, true, false, false) +INT_REG(2 , "W", "X", true, false, true, false, false) +INT_REG(3 , "W", "X", true, false, true, false, false) +INT_REG(4 , "W", "X", true, false, true, false, false) +INT_REG(5 , "W", "X", true, false, true, false, false) +INT_REG(6 , "W", "X", true, false, true, false, false) +INT_REG(7 , "W", "X", true, false, true, false, false) +INT_REG(8 , "W", "X", true, false, false, false, false) +INT_REG(9 , "W", "X", true, false, false, false, false) +INT_REG(10, "W", "X", true, false, false, false, false) +INT_REG(11, "W", "X", true, false, false, false, false) +INT_REG(12, "W", "X", true, false, false, false, false) +INT_REG(13, "W", "X", true, false, false, false, false) +INT_REG(14, "W", "X", true, false, false, false, false) +INT_REG(15, "W", "X", true, false, false, false, true) +INT_REG(16, "W", "X", true, false, false, true, false) +INT_REG(17, "W", "X", true, false, false, true, false) +INT_REG(18, "W", "X", true, false, false, false, false) +INT_REG(19, "W", "X", true, true, false, false, false) +INT_REG(20, "W", "X", true, true, false, false, false) +INT_REG(21, "W", "X", true, true, false, false, false) +INT_REG(22, "W", "X", true, true, false, false, false) +INT_REG(23, "W", "X", true, true, false, false, false) +INT_REG(24, "W", "X", true, true, false, false, false) +INT_REG(25, "W", "X", true, true, false, false, false) +INT_REG(26, "W", "X", true, true, false, false, false) +INT_REG(27, "W", "X", true, true, false, false, false) +INT_REG(28, "W", "X", true, true, false, false, false) +INT_REG(29, "W", "X", true, true, false, false, false) +INT_REG(30, "W", "X", false, true, false, false, false) +INT_REG(31, "W", "X", false, true, false, false, false) +/* + * Refer to ARM Compiler armasm User Guide version 6.6. $4.5 Predeclared core register names in AArch64 state + * We should not use "W" prefix in 64-bit context, though!! + */ +INT_REG(SP, "W", "" , false, false, false, false, false) +INT_REG(ZR, "W", "X", false, false, false, false, false) + +/* Alias ID, ID, 32-bit prefix, 64-bit prefix */ +INT_REG_ALIAS(FP, 31, "", "" ) +INT_REG_ALIAS(LR, 30, "", "" ) + +/* R19 is reserved for yieldpoint */ +INT_REG_ALIAS(YP, 19, "", "" ) + +INT_REG_ALIAS(LAST_INT_REG, 31, "", "" ) diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h new file mode 100644 index 0000000000000000000000000000000000000000..ae170bf012f678f12f133667f4966841c90e4bb9 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_isa.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H + +#include "isa.h" + +#define DEFINE_MOP(op, ...) op, +enum AArch64MOP_t : maple::uint32 { +#include "abstract_mmir.def" +#include "aarch64_md.def" +#include "aarch64_mem_md.def" + kMopLast +}; +#undef DEFINE_MOP + +namespace maplebe { +/* + * ARM Architecture Reference Manual (for ARMv8) + * D1.8.2 + */ +constexpr uint32 kAarch64StackPtrAlignment = 16; +constexpr int32 kAarch64StackPtrAlignmentInt = 16; + +constexpr int32 kOffsetAlign = 8; +constexpr uint32 kIntregBytelen = 8; /* 64-bit */ +constexpr uint32 kFpregBytelen = 8; /* only lower 64 bits are used */ +constexpr int kSizeOfFplr = 16; + +enum StpLdpImmBound : int { + kStpLdpImm64LowerBound = -512, + kStpLdpImm64UpperBound = 504, + kStpLdpImm32LowerBound = -256, + kStpLdpImm32UpperBound = 252 +}; + +enum StrLdrPerPostBound : int64 { + kStrLdrPerPostLowerBound = -256, + kStrLdrPerPostUpperBound = 255 +}; + +constexpr int64 kStrAllLdrAllImmLowerBound = 0; +enum StrLdrImmUpperBound : int64 { + kStrLdrImm32UpperBound = 16380, /* must be a multiple of 4 */ + kStrLdrImm64UpperBound = 32760, /* must be a multiple of 8 */ + kStrbLdrbImmUpperBound = 4095, + kStrhLdrhImmUpperBound = 8190 +}; + +/* + * ARM Compiler armasm User Guide version 6.6. + * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0473j/deb1353594352617.html + * (retrieved on 3/24/2017) + * + * $ 4.1 Registers in AArch64 state + * ...When you use the 32-bit form of an instruction, the upper + * 32 bits of the source registers are ignored and + * the upper 32 bits of the destination register are set to zero. + * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + * + * There is no register named W31 or X31. + * Depending on the instruction, register 31 is either the stack + * pointer or the zero register. When used as the stack pointer, + * you refer to it as "SP". When used as the zero register, you refer + * to it as WZR in a 32-bit context or XZR in a 64-bit context. + * The zero register returns 0 when read and discards data when + * written (e.g., when setting the status register for testing). + */ +enum AArch64reg : uint32 { + kRinvalid = kInvalidRegNO, +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) R##ID, +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) V##ID, +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + kMaxRegNum, + kRFLAG, + kAllRegNum, +/* alias */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) R##ALIAS = R##ID, +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) +#define FP_SIMD_REG_ALIAS(ID) S##ID = V##ID, +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) +#define FP_SIMD_REG_ALIAS(ID) D##ID = V##ID, +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS +}; + +class Insn; + +namespace AArch64isa { +static inline bool IsGPRegister(AArch64reg r) { + return R0 <= r && r <= RZR; +} + +static inline bool IsFPSIMDRegister(AArch64reg r) { + return V0 <= r && r <= V31; +} + +static inline bool IsPhysicalRegister(regno_t r) { + return r < kMaxRegNum; +} + +static inline RegType GetRegType(AArch64reg r) { + if (IsGPRegister(r)) { + return kRegTyInt; + } + if (IsFPSIMDRegister(r)) { + return kRegTyFloat; + } + ASSERT(false, "No suitable register type to return?"); + return kRegTyUndef; +} + +enum MemoryOrdering : uint32 { + kMoNone = 0, + kMoAcquire = 1ULL, /* ARMv8 */ + kMoAcquireRcpc = (1ULL << 1), /* ARMv8.3 */ + kMoLoacquire = (1ULL << 2), /* ARMv8.1 */ + kMoRelease = (1ULL << 3), /* ARMv8 */ + kMoLorelease = (1ULL << 4) /* ARMv8.1 */ +}; + +static inline bool IsPseudoInstruction(MOperator mOp) { + return (mOp >= MOP_pseudo_param_def_x && mOp <= MOP_pseudo_eh_def_x); +} + +/* + * Precondition: The given insn is a jump instruction. + * Get the jump target label operand index from the given instruction. + * Note: MOP_xbr is a jump instruction, but the target is unknown at compile time, + * because a register instead of label. So we don't take it as a branching instruction. + * However for special long range branch patch, the label is installed in this case. + */ +uint32 GetJumpTargetIdx(const Insn &insn); + +MOperator FlipConditionOp(MOperator flippedOp); +} /* namespace AArch64isa */ + +/* + * We save callee-saved registers from lower stack area to upper stack area. + * If possible, we store a pair of registers (int/int and fp/fp) in the stack. + * The Stack Pointer has to be aligned at 16-byte boundary. + * On AArch64, kIntregBytelen == 8 (see the above) + */ +inline void GetNextOffsetCalleeSaved(int &offset) { + offset += (kIntregBytelen << 1); +} + +MOperator GetMopPair(MOperator mop); +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_isolate_fastpath.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_isolate_fastpath.h new file mode 100644 index 0000000000000000000000000000000000000000..e2248dbbbc0d485695042ae7f12e30501b3cdc5e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_isolate_fastpath.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISOLATE_FASTPATH_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISOLATE_FASTPATH_H + +#include "isolate_fastpath.h" +#include "aarch64_cgfunc.h" +#include "aarch64_operand.h" +#include "aarch64_insn.h" + +namespace maplebe { +using namespace maple; + +class AArch64IsolateFastPath : public IsolateFastPath { + public: + explicit AArch64IsolateFastPath(CGFunc &func) + : IsolateFastPath(func) {} + ~AArch64IsolateFastPath() override = default; + + void Run() override; + + private: + bool InsertOpndRegs(Operand &op, std::set &vecRegs) const; + bool InsertInsnRegs(Insn &insn, bool insertSource, std::set &vecSourceRegs, + bool insertTarget, std::set &vecTargetRegs) const; + bool FindRegs(Operand &op, std::set &vecRegs) const; + bool BackwardFindDependency(BB &ifbb, std::set &vecReturnSourceRegs, std::list &existingInsns, + std::list &moveInsns) const; + void IsolateFastPathOpt(); + + void SetFastPathReturnBB(BB *bb) { + bb->SetFastPathReturn(true); + fastPathReturnBB = bb; + } + BB *GetFastPathReturnBB() { + return fastPathReturnBB; + } + + BB *fastPathReturnBB = nullptr; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISOLATE_FASTPATH_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_live.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_live.h new file mode 100644 index 0000000000000000000000000000000000000000..681c94831d808194784a89d6580067e241c2cbb5 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_live.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_LIVE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_LIVE_H + +#include "live.h" + +namespace maplebe { +class AArch64LiveAnalysis : public LiveAnalysis { + public: + AArch64LiveAnalysis(CGFunc &func, MemPool &memPool) : LiveAnalysis(func, memPool) {} + ~AArch64LiveAnalysis() override = default; + bool CleanupBBIgnoreReg(regno_t reg) override; + void InitEhDefine(BB &bb) override; + void GenerateReturnBBDefUse(BB &bb) const override; + void ProcessCallInsnParam(BB &bb, const Insn &insn) const override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_LIVE_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def new file mode 100644 index 0000000000000000000000000000000000000000..4e84c643543b3d350db377827fc6d59adeada237 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_md.def @@ -0,0 +1,1212 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* InsnDesc format: + * {mop, opndMD, properties, latency, name, format, atomicNum, validFunc(nullptr)} + */ + +/* AARCH64 MOVES */ +/* MOP_xmovrr */ +DEFINE_MOP(MOP_xmovrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISMOVE,kLtAlu,"mov","0,1",1) +/* MOP_wmovrr */ +DEFINE_MOP(MOP_wmovrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) +/* MOP_wmovri32 */ +DEFINE_MOP(MOP_wmovri32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISMOVE,kLtAlu,"mov","0,1",1) +/* MOP_xmovri64 */ +DEFINE_MOP(MOP_xmovri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISMOVE,kLtAlu,"mov","0,1",1) +/* MOP_xmovrr_uxtw -- Remove Redundant uxtw -- used in globalopt:UxtwMovPattern */ +DEFINE_MOP(MOP_xmovrr_uxtw, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISMOVE,kLtAlu,"mov","0,1",1) + +/* MOP_xvmovsr */ +DEFINE_MOP(MOP_xvmovsr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISMOVE,kLtR2f,"fmov","0,1",1) +/* MOP_xvmovdr */ +DEFINE_MOP(MOP_xvmovdr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISMOVE,kLtR2f,"fmov","0,1",1) +/* MOP_xvmovrs */ +DEFINE_MOP(MOP_xvmovrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISMOVE,kLtF2r,"fmov","0,1",1) +/* MOP_xvmovrd */ +DEFINE_MOP(MOP_xvmovrd, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISMOVE,kLtF2r,"fmov","0,1",1) +/* MOP_xvmovs */ +DEFINE_MOP(MOP_xvmovs, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},ISMOVE,kLtFpalu,"fmov","0,1",1) +/* MOP_xvmovd */ +DEFINE_MOP(MOP_xvmovd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},ISMOVE,kLtFpalu,"fmov","0,1",1) + +/* Vector SIMD mov */ +/* MOP_xmovrv */ +DEFINE_MOP(MOP_xvmovrv, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISMOVE,kLtF2r,"mov","0,1",1) + +/* MOP_xadrp */ +DEFINE_MOP(MOP_xadrp, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISLOADADDR,kLtShift,"adrp","0,1",1) +/* MOP_xadr */ +DEFINE_MOP(MOP_xadri64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISLOADADDR,kLtShift,"adr","0,1",1) +/* MOP_xadrpl12 */ + +DEFINE_MOP(MOP_xadrpl12, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Literal12Src},ISLOADADDR,kLtAlu,"add","0,1,2",1) + +/* AARCH64 Arithmetic: add */ +/* MOP_xaddrrr */ +DEFINE_MOP(MOP_xaddrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"add","0,1,2",1) +/* MOP_xaddrrrs */ +DEFINE_MOP(MOP_xaddrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"add","0,1,2,3",1) +/* MOP_xxwaddrrre */ +DEFINE_MOP(MOP_xxwaddrrre, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"add","0,1,2,3",1) +/* MOP_xaddrri24 */ +DEFINE_MOP(MOP_xaddrri24, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtShift,"add","0,1,2,3",1,Imm12BitValid) +/* MOP_xaddrri12 */ +DEFINE_MOP(MOP_xaddrri12, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12},0,kLtAlu,"add","0,1,2",1,Imm12BitValid) +/* MOP_waddrrr */ +DEFINE_MOP(MOP_waddrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"add","0,1,2",1) +/* MOP_waddrrrs */ +DEFINE_MOP(MOP_waddrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"add","0,1,2,3",1) +/* MOP_xxwaddrrre */ +DEFINE_MOP(MOP_wwwaddrrre, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"add","0,1,2,3",1) +/* MOP_waddrri24 */ +DEFINE_MOP(MOP_waddrri24, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"add","0,1,2,3",1,Imm12BitValid) +/* MOP_waddrri12 */ +DEFINE_MOP(MOP_waddrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"add","0,1,2",1,Imm12BitValid) +/* MOP_dadd */ +DEFINE_MOP(MOP_dadd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fadd","0,1,2",1) +/* MOP_sadd */ +DEFINE_MOP(MOP_sadd, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fadd","0,1,2",1) + +/* AARCH64 Arithmetic: sub/subs */ +/* MOP newly add to the following group should be related pairs with such order :{ sub, subs } */ +/* MOP_xsubrrr */ +DEFINE_MOP(MOP_xsubrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"sub","0,1,2",1) +/* MOP_xsubsrrr */ +DEFINE_MOP(MOP_xsubsrrr, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"subs","1,2,3",1) +/* MOP_xsubrrrs */ +DEFINE_MOP(MOP_xsubrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"sub","0,1,2,3",1) +/* MOP_xsubsrrrs */ +DEFINE_MOP(MOP_xsubsrrrs, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"subs","1,2,3,4",1) +/* MOP_xsubrri24 */ +DEFINE_MOP(MOP_xsubrri24, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"sub","0,1,2,3",1,Imm12BitValid) +/* MOP_xsubsrri24 */ +DEFINE_MOP(MOP_xsubsrri24, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"subs","1,2,3,4",1,Imm12BitValid) +/* MOP_xsubrri12 */ +DEFINE_MOP(MOP_xsubrri12, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12},0,kLtAlu,"sub","0,1,2",1,Imm12BitValid) +/* MOP_xsubsrri12 */ +DEFINE_MOP(MOP_xsubsrri12, {&OpndDesc::CCD, &OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm12},0,kLtAlu,"subs","1,2,3",1,Imm12BitValid) +/* MOP_wsubrrr */ +DEFINE_MOP(MOP_wsubrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"sub","0,1,2",1) +/* MOP_wsubsrrr */ +DEFINE_MOP(MOP_wsubsrrr, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"subs","1,2,3",1) +/* MOP_wsubrrrs */ +DEFINE_MOP(MOP_wsubrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"sub","0,1,2,3",1) +/* MOP_wsubsrrrs */ +DEFINE_MOP(MOP_wsubsrrrs, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"subs","1,2,3,4",1) +/* MOP_wsubrri24 */ +DEFINE_MOP(MOP_wsubrri24, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"sub","0,1,2,3",1,Imm12BitValid) +/* MOP_wsubsrri24 */ +DEFINE_MOP(MOP_wsubsrri24, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12,&OpndDesc::Lsl12},0,kLtAluShift,"subs","1,2,3,4",1,Imm12BitValid) +/* MOP_wsubrri12 */ +DEFINE_MOP(MOP_wsubrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"sub","0,1,2",1,Imm12BitValid) +/* MOP_wsubsrri12 */ +DEFINE_MOP(MOP_wsubsrri12, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"subs","1,2,3",1,Imm12BitValid) + +/* AARCH64 Arithmetic: sub */ +/* MOP_xxwsubrrre */ +DEFINE_MOP(MOP_xxwsubrrre, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"sub","0,1,2,3",1) +/* MOP_wwwsubrrre */ +DEFINE_MOP(MOP_wwwsubrrre, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAluShift,"sub","0,1,2,3",1) +/* MOP_dsub */ +DEFINE_MOP(MOP_dsub, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fsub","0,1,2",1) +/* MOP_ssub */ +DEFINE_MOP(MOP_ssub, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fsub","0,1,2",1) + +/* AARCH64 Arithmetic: multiply */ +/* MOP_Tbxmulrrr */ +DEFINE_MOP(MOP_xmulrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"mul","0,1,2",1) +/* MOP_wmulrrr */ +DEFINE_MOP(MOP_wmulrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"mul","0,1,2",1) +/* MOP_Tbxvmuls */ +DEFINE_MOP(MOP_xvmuls, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpmul,"fmul","0,1,2",1) +/* MOP_Tbxvmuld */ +DEFINE_MOP(MOP_xvmuld, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpmul,"fmul","0,1,2",1) +/*MOP_xsmullrrr */ +DEFINE_MOP(MOP_xsmullrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"smull","0,1,2",1) + +/* AARCH64 Arithmetic: multiply first then add */ +/* MOP_xmaddrrrr */ +DEFINE_MOP(MOP_xmaddrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"madd","0,1,2,3",1) +/* MOP_wmaddrrrr */ +DEFINE_MOP(MOP_wmaddrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"madd","0,1,2,3",1) + +/* AARCH64 leading zeros, reverse bits (for trailing zeros) */ +/* MOP_wclz */ +DEFINE_MOP(MOP_wclz, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"clz","0,1", 1) +/* MOP_xclz */ +DEFINE_MOP(MOP_xclz, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"clz","0,1", 1) +/* MOP_wrbit */ +DEFINE_MOP(MOP_wrbit, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"rbit","0,1", 1) +/* MOP_xrbit */ +DEFINE_MOP(MOP_xrbit, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"rbit","0,1", 1) + +/* AARCH64 Conversions */ +/* MOP_xsxtb32 */ +DEFINE_MOP(MOP_xsxtb32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxtb","0,1",1) +/* MOP_xsxtb64 */ +DEFINE_MOP(MOP_xsxtb64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxtb","0,1",1) +/* MOP_xsxth32 */ +DEFINE_MOP(MOP_xsxth32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxth","0,1",1) +/* MOP_xsxth64 */ +DEFINE_MOP(MOP_xsxth64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxth","0,1",1) +/* MOP_xsxtw64 */ +DEFINE_MOP(MOP_xsxtw64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"sxtw","0,1",1) + +/* MOP_xuxtb32 */ +DEFINE_MOP(MOP_xuxtb32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"uxtb","0,1",1) +/* MOP_xuxth32 */ +DEFINE_MOP(MOP_xuxth32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"uxth","0,1",1) +/* MOP_xuxtw64 Same as mov w0,w0 */ +DEFINE_MOP(MOP_xuxtw64, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISCONVERSION,kLtAluShift,"uxtw","0,1",1) + +/* + * User-defined pseudo-instruction, which is used in validbit opt based on ssa, + * will recover to mov after validbit opt + */ +DEFINE_MOP(MOP_wuxtb_vb, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAluShift,"//uxtb_vb","0,1",1) +DEFINE_MOP(MOP_wuxth_vb, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAluShift,"//uxth_vb","0,1",1) + +/* MOP_xvcvtfd */ +DEFINE_MOP(MOP_xvcvtfd, {&OpndDesc::Reg32FD,&OpndDesc::Reg64FS},ISCONVERSION,kLtFpalu,"fcvt","0,1",1) +/* MOP_xvcvtdf */ +DEFINE_MOP(MOP_xvcvtdf, {&OpndDesc::Reg64FD,&OpndDesc::Reg32FS},ISCONVERSION,kLtFpalu,"fcvt","0,1",1) + +/* MOP_vcvtrf fcvtzs w,s */ +DEFINE_MOP(MOP_vcvtrf, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1) +/* MOP_xvcvtrf fcvtzs x,s */ +DEFINE_MOP(MOP_xvcvtrf, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1) +/* MOP_vcvturf fcvtzu w,s */ +DEFINE_MOP(MOP_vcvturf, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1) +/* MOP_xvcvturf fcvtzu x,s */ +DEFINE_MOP(MOP_xvcvturf, {&OpndDesc::Reg64ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1) + +/* MOP_vcvtas fcvtas w,s (for round) */ +DEFINE_MOP(MOP_vcvtas, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtas","0,1",1) +/* MOP_xvcvtas fcvtas x,s */ +DEFINE_MOP(MOP_xvcvtas, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtas","0,1",1) +/* MOP_vcvtms fcvtms w,s (for floor) */ +DEFINE_MOP(MOP_vcvtms, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtms","0,1",1) +/* MOP_xvcvtms fcvtms x,s */ +DEFINE_MOP(MOP_xvcvtms, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtms","0,1",1) +/* MOP_vcvtps fcvtps w,s (for ceil) */ +DEFINE_MOP(MOP_vcvtps, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISCONVERSION,kLtF2rCvt,"fcvtps","0,1",1) +/* MOP_xvcvtps fcvtps x,d */ +DEFINE_MOP(MOP_xvcvtps, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtps","0,1",1) + +/* MOP_vcvtrd fcvtzs w,d */ +DEFINE_MOP(MOP_vcvtrd, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1) +/* MOP_xvcvtrd fcvtzs x,d */ +DEFINE_MOP(MOP_xvcvtrd, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzs","0,1",1) +/* MOP_vcvturd fcvtzu w,d */ +DEFINE_MOP(MOP_vcvturd, {&OpndDesc::Reg32ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1) +/* MOP_xvcvturd fcvtzu x,d */ +DEFINE_MOP(MOP_xvcvturd, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISCONVERSION,kLtF2rCvt,"fcvtzu","0,1",1) + +/* MOP_vcvtfr scvtf s,w */ +DEFINE_MOP(MOP_vcvtfr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1) +/* MOP_xvcvtfr scvtf s,x */ +DEFINE_MOP(MOP_xvcvtfr, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1) +/* MOP_vcvtufr ucvtf s,w */ +DEFINE_MOP(MOP_vcvtufr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1) +/* MOP_xvcvtufr ucvtf s,x */ +DEFINE_MOP(MOP_xvcvtufr, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1) + +/* MOP_vcvtdr scvtf d,w */ +DEFINE_MOP(MOP_vcvtdr, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1) +/* MOP_xvcvtdr scvtf d,x */ +DEFINE_MOP(MOP_xvcvtdr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"scvtf","0,1",1) +/* MOP_vcvtudr ucvtf d,w */ +DEFINE_MOP(MOP_vcvtudr, {&OpndDesc::Reg64FD,&OpndDesc::Reg32IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1) +/* MOP_xvcvtudr ucvtf d,x */ +DEFINE_MOP(MOP_xvcvtudr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISCONVERSION,kLtR2fCvt,"ucvtf","0,1",1) + +/* MOP_xcsel */ +DEFINE_MOP(MOP_wcselrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csel","0,1,2,3",1) +DEFINE_MOP(MOP_xcselrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csel","0,1,2,3",1) + +/* MOP_xcset -- all conditions minus AL & NV */ +DEFINE_MOP(MOP_wcsetrc, {&OpndDesc::Reg32ID,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cset","0,1",1) +DEFINE_MOP(MOP_xcsetrc, {&OpndDesc::Reg64ID,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cset","0,1",1) + +/* MOP_xcinc */ +DEFINE_MOP(MOP_wcincrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cinc","0,1,2",1) +DEFINE_MOP(MOP_xcincrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cinc","0,1,2",1) + +/* MOP_xcsinc */ +DEFINE_MOP(MOP_wcsincrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinc","0,1,2,3",1) +DEFINE_MOP(MOP_xcsincrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinc","0,1,2,3",1) + +/* MOP_xcsinv */ +DEFINE_MOP(MOP_wcsinvrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinv","0,1,2,3",1) +DEFINE_MOP(MOP_xcsinvrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csinv","0,1,2,3",1) + +/* MOP_xandrrr */ +DEFINE_MOP(MOP_xandrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"and","0,1,2",1) +/* MOP_xandrrrs */ +DEFINE_MOP(MOP_xandrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAluShift,"and","0,1,2,3",1) +/* MOP_xandrri13 */ +DEFINE_MOP(MOP_xandrri13, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm13},0,kLtAlu,"and","0,1,2",1,Imm13BitMaskValid) +/* MOP_wandrrr */ +DEFINE_MOP(MOP_wandrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"and","0,1,2",1) +/* MOP_wandrrrs */ +DEFINE_MOP(MOP_wandrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAluShift,"and","0,1,2,3",1) +/* MOP_wandrri12 */ +DEFINE_MOP(MOP_wandrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"and","0,1,2",1,Imm12BitMaskValid) + +/* MOP_xbicrrr */ +DEFINE_MOP(MOP_xbicrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"bic","0,1,2",1) +/* MOP_wbicrrr */ +DEFINE_MOP(MOP_wbicrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"bic","0,1,2",1) + +/* MOP_xiorrrr */ +DEFINE_MOP(MOP_xiorrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"orr","0,1,2",1) +/* MOP_xiorrrrs */ +DEFINE_MOP(MOP_xiorrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"orr","0,1,2,3",1) +/* MOP_xiorrri13 */ +DEFINE_MOP(MOP_xiorrri13, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm13},0,kLtAlu,"orr","0,1,2",1,Imm13BitMaskValid) +/* MOP_wiorrrr */ +DEFINE_MOP(MOP_wiorrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"orr","0,1,2",1) +/* MOP_wiorrrrs */ +DEFINE_MOP(MOP_wiorrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"orr","0,1,2,3",1) +/* MOP_wiorrri12 */ +DEFINE_MOP(MOP_wiorrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"orr","0,1,2",1,Imm12BitMaskValid) + +/* MOP_xeorrrr */ +DEFINE_MOP(MOP_xeorrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"eor","0,1,2",1) +/* MOP_xeorrrrs */ +DEFINE_MOP(MOP_xeorrrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"eor","0,1,2,3",1) +/* MOP_xeorrri13 */ +DEFINE_MOP(MOP_xeorrri13, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm13},0,kLtAlu,"eor","0,1,2",1,Imm13BitMaskValid) +/* MOP_weorrrr */ +DEFINE_MOP(MOP_weorrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"eor","0,1,2",1) +/* MOP_weorrrrs */ +DEFINE_MOP(MOP_weorrrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"eor","0,1,2,3",1) +/* MOP_weorrri12 */ +DEFINE_MOP(MOP_weorrri12, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"eor","0,1,2",1,Imm12BitMaskValid) + +/* MOP_xnotrr */ +DEFINE_MOP(MOP_xnotrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"mvn","0,1",1) +/* MOP_wnotrr */ +DEFINE_MOP(MOP_wnotrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"mvn","0,1",1) +/* MOP_vnotui */ +DEFINE_MOP(MOP_vnotui, {&OpndDesc::Reg64VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"mvni","0,1",1) +/* MOP_vnotvi */ +DEFINE_MOP(MOP_vnotvi, {&OpndDesc::Reg128VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"mvni","0,1",1) + +/* MOP_xrevrr */ +DEFINE_MOP(MOP_xrevrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"rev","0,1",1) +/* MOP_wrevrr */ +DEFINE_MOP(MOP_wrevrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"rev","0,1",1) +/* MOP_xrevrr */ +DEFINE_MOP(MOP_wrevrr16, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"rev16","0,1",1) + +/* MOP_wfmaxrrr */ +DEFINE_MOP(MOP_wfmaxrrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fmax","0,1,2",1) +/* MOP_xfmaxrrr */ +DEFINE_MOP(MOP_xfmaxrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fmax","0,1,2",1) +/* MOP_wfminrrr */ +DEFINE_MOP(MOP_wfminrrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fmin","0,1,2",1) +/* MOP_xfminrrr */ +DEFINE_MOP(MOP_xfminrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fmin","0,1,2",1) + +/* MOP_wsdivrrr */ +DEFINE_MOP(MOP_wsdivrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},CANTHROW,kLtDiv,"sdiv","0,1,2",1) +/* MOP_xsdivrrr */ +DEFINE_MOP(MOP_xsdivrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},CANTHROW,kLtDiv,"sdiv","0,1,2",1) +/* MOP_wudivrrr */ +DEFINE_MOP(MOP_wudivrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},CANTHROW,kLtDiv,"udiv","0,1,2",1) +/* MOP_xudivrrr */ +DEFINE_MOP(MOP_xudivrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},CANTHROW,kLtDiv,"udiv","0,1,2",1) + +/* MOP_wmsubrrrr */ +DEFINE_MOP(MOP_wmsubrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"msub","0,1,2,3",1) +/* MOP_xmsubrrrr */ +DEFINE_MOP(MOP_xmsubrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"msub","0,1,2,3",1) + +/* MOP_wmnegrrr */ +DEFINE_MOP(MOP_wmnegrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtMul,"mneg","0,1,2",1) +/* MOP_xmnegrrr */ +DEFINE_MOP(MOP_xmnegrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtMul,"mneg","0,1,2",1) + +/* MOP_wubfxrri5i5 */ +DEFINE_MOP(MOP_wubfxrri5i5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},0,kLtAluShift,"ubfx","0,1,2,3",1) +/* MOP_xubfxrri6i6 */ +DEFINE_MOP(MOP_xubfxrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"ubfx","0,1,2,3",1) + +/* MOP_wsbfxrri5i5 -- Signed Bitfield Extract */ +DEFINE_MOP(MOP_wsbfxrri5i5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},0,kLtAluShift,"sbfx","0,1,2,3",1) +/* MOP_xsbfxrri6i6 */ +DEFINE_MOP(MOP_xsbfxrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"sbfx","0,1,2,3",1) + +/* MOP_wubfizrri5i5 -- Unsigned Bitfield Insert in Zero */ +DEFINE_MOP(MOP_wubfizrri5i5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},0,kLtAluShift,"ubfiz","0,1,2,3",1) +/* MOP_xubfizrri6i6 */ +DEFINE_MOP(MOP_xubfizrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"ubfiz","0,1,2,3",1) + +/* MOP_xsbfizrri6i6 Signed Bitfield Insert in Zero */ +DEFINE_MOP(MOP_xsbfizrri6i6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},0,kLtAluShift,"sbfiz","0,1,2,3",1) + +/* MOP_wbfirri5i5 -- Bitfield Insert */ +DEFINE_MOP(MOP_wbfirri5i5, {&OpndDesc::Reg32IDS,&OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm5},ISMOVE,kLtAluShift,"bfi","0,1,2,3",1) +/* MOP_xbfirri6i6 */ +DEFINE_MOP(MOP_xbfirri6i6, {&OpndDesc::Reg64IDS,&OpndDesc::Reg64IS,&OpndDesc::Imm6,&OpndDesc::Imm6},ISMOVE,kLtAluShift,"bfi","0,1,2,3",1) + +/* MOP_xlslrri6,--- Logical Shift Left */ +DEFINE_MOP(MOP_xlslrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"lsl","0,1,2",1) +/* MOP_wlslrri5 */ +DEFINE_MOP(MOP_wlslrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm8},0,kLtAluShift,"lsl","0,1,2",1) +/* MOP_xasrrri6, */ +DEFINE_MOP(MOP_xasrrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"asr","0,1,2",1) +/* MOP_wasrrri5 */ +DEFINE_MOP(MOP_wasrrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm8},0,kLtAluShift,"asr","0,1,2",1) +/* MOP_xlsrrri6, */ +DEFINE_MOP(MOP_xlsrrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"lsr","0,1,2",1) +/* MOP_wlsrrri5 */ +DEFINE_MOP(MOP_wlsrrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Imm8},0,kLtAluShift,"lsr","0,1,2",1) +/* MOP_xlslrrr, */ +DEFINE_MOP(MOP_xlslrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"lsl","0,1,2",1) +/* MOP_wlslrrr */ +DEFINE_MOP(MOP_wlslrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"lsl","0,1,2",1) +/* MOP_xasrrrr, */ +DEFINE_MOP(MOP_xasrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"asr","0,1,2",1) +/* MOP_wasrrrr */ +DEFINE_MOP(MOP_wasrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"asr","0,1,2",1) +/* MOP_xlsrrrr, */ +DEFINE_MOP(MOP_xlsrrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"lsr","0,1,2",1) +/* MOP_wlsrrrr */ +DEFINE_MOP(MOP_wlsrrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"lsr","0,1,2",1) +/* MOP_xrorrrr */ +DEFINE_MOP(MOP_xrorrrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAluShiftReg,"ror","0,1,2",1) +/* MOP_wrorrrr */ +DEFINE_MOP(MOP_wrorrrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAluShiftReg,"ror","0,1,2",1) +/* MOP_wtstri32 */ +DEFINE_MOP(MOP_wtstri32, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Imm32},0,kLtAlu,"tst","1,2",1) +/* MOP_xtstri64 */ +DEFINE_MOP(MOP_xtstri64, {&OpndDesc::CCD,&OpndDesc::Reg64ID,&OpndDesc::Imm64},0,kLtAlu,"tst","1,2",1) +/* MOP_wtstrr */ +DEFINE_MOP(MOP_wtstrr, {&OpndDesc::CCD,&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"tst","1,2",1) +/* MOP_xtstrr */ +DEFINE_MOP(MOP_xtstrr, {&OpndDesc::CCD,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"tst","1,2",1) +/* MOP_wextrrrri5 -- Extracts a register from a pair of registers */ +DEFINE_MOP(MOP_wextrrrri5, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Imm5},0,kLtAluShift,"extr","0,1,2,3",1) +/* MOP_xextrrrri6 */ +DEFINE_MOP(MOP_xextrrrri6, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm6},0,kLtAluShift,"extr","0,1,2,3",1) + +/* MOP_wsfmovri imm8->s */ +DEFINE_MOP(MOP_wsfmovri, {&OpndDesc::Reg32FD,&OpndDesc::Imm8},ISMOVE,kLtFconst,"fmov","0,1",1) +/* MOP_xdfmovri imm8->d */ +DEFINE_MOP(MOP_xdfmovri, {&OpndDesc::Reg64FD,&OpndDesc::Imm8},ISMOVE,kLtFconst,"fmov","0,1",1) + +/* MOP_xcsneg -- Conditional Select Negation */ +DEFINE_MOP(MOP_wcsnegrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csneg","0,1,2,3",1) +DEFINE_MOP(MOP_xcsnegrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"csneg","0,1,2,3",1) +DEFINE_MOP(MOP_wcnegrrrc, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cneg","0,1,2",1) +DEFINE_MOP(MOP_xcnegrrrc, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtAlu,"cneg","0,1,2",1) + +/* MOP_sabsrr */ +DEFINE_MOP(MOP_sabsrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},0,kLtFpalu,"fabs","0,1",1) +/* MOP_dabsrr */ +DEFINE_MOP(MOP_dabsrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},0,kLtFpalu,"fabs","0,1",1) + +/* MOP_winegrr */ +DEFINE_MOP(MOP_winegrr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},0,kLtAlu,"neg","0,1",1) +/* MOP_winegrre */ +DEFINE_MOP(MOP_winegrrs, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"neg","0,1,2",1) +/* neg MOP_xinegrr */ +DEFINE_MOP(MOP_xinegrr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},0,kLtAlu,"neg","0,1",1) +/* neg MOP_xinegrrs */ +DEFINE_MOP(MOP_xinegrrs, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"neg","0,1,2",1) +/* neg f32 */ +DEFINE_MOP(MOP_wfnegrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},0,kLtFpalu,"fneg","0,1",1) +/* neg f64 */ +DEFINE_MOP(MOP_xfnegrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},0,kLtFpalu,"fneg","0,1",1) + +/* MOP_sdivrrr */ +DEFINE_MOP(MOP_sdivrrr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtAdvsimdDivS,"fdiv","0,1,2",1) +/* MOP_ddivrrr */ +DEFINE_MOP(MOP_ddivrrr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtAdvsimdDivD,"fdiv","0,1,2",1) + +/* MOP_smadd */ +DEFINE_MOP(MOP_smadd, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtFpmul,"fmadd","0,1,2,3",1) +/* MOP_dmadd */ +DEFINE_MOP(MOP_dmadd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtFpmul,"fmadd","0,1,2,3",1) + +/* MOP_smsub */ +DEFINE_MOP(MOP_smsub, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtFpmul,"fmsub","0,1,2,3",1) +/* MOP_dmsub */ +DEFINE_MOP(MOP_dmsub, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtFpmul,"fmsub","0,1,2,3",1) + +/* MOP_snmul */ +DEFINE_MOP(MOP_snmul, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS},CANTHROW,kLtFpmul,"fnmul","0,1,2",1) +/* MOP_dnmul */ +DEFINE_MOP(MOP_dnmul, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},CANTHROW,kLtFpmul,"fnmul","0,1,2",1) + +/* MOP_hcselrrrc --- Floating-point Conditional Select */ +DEFINE_MOP(MOP_hcselrrrc, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS,&OpndDesc::Reg16FS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtFpalu,"fcsel","0,1,2,3",1) +/* MOP_scselrrrc */ +DEFINE_MOP(MOP_scselrrrc, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtFpalu,"fcsel","0,1,2,3",1) +/* MOP_dcselrrrc */ +DEFINE_MOP(MOP_dcselrrrc, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Cond,&OpndDesc::CCS},ISCONDDEF,kLtFpalu,"fcsel","0,1,2,3",1) + +/* MOP_wldli -- load 32-bit literal */ +DEFINE_MOP(MOP_wldli, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1) +/* MOP_xldli -- load 64-bit literal */ +DEFINE_MOP(MOP_xldli, {&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad2,"ldr","0,1",1) +/* MOP_sldli -- load 32-bit literal */ +DEFINE_MOP(MOP_sldli, {&OpndDesc::Reg32FD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1) +/* MOP_dldli -- load 64-bit literal */ +DEFINE_MOP(MOP_dldli, {&OpndDesc::Reg64FD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad2,"ldr","0,1",1) + +/* AArch64 branches/calls */ +/* MOP_xbl -- branch with link (call); this is a special definition */ +DEFINE_MOP(MOP_xbl, {&OpndDesc::AddressName,&OpndDesc::ListSrc},ISCALL|CANTHROW,kLtBranch,"bl","0",1) +/* MOP_xblr -- branch with link (call) to register; this is a special definition */ +DEFINE_MOP(MOP_xblr, {&OpndDesc::Reg64IS,&OpndDesc::ListSrc},ISCALL|CANTHROW,kLtBranch,"blr","0",1) + +/* Tls descriptor */ +/* + * add x0, #:tprel_hi12:symbol, lsl #12 + * add x0, #:tprel_lo12_nc:symbol + */ +DEFINE_MOP(MOP_tls_desc_rel, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::LiteralSrc},SPINTRINSIC,kLtAlu,"tlsdescrel","0,1",2) + +/* + * adrp x0, , :tlsdesc:symbol + * ldr x1, [x0, #tlsdesc_lo12:symbol]] + * add x0, #tlsdesc_lo12:symbol + * .tlsdesccall symbol + * blr x1 + */ +DEFINE_MOP(MOP_tls_desc_call, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::ListSrc},ISCALL|CANTHROW|SPINTRINSIC,kLtBranch,"tlsdesccall","0,1",2) + +/* System register access */ +/* MOP_mrs */ +DEFINE_MOP(MOP_mrs, {&OpndDesc::Reg64ID,&OpndDesc::String0S},ISMOVE,kLtAlu,"mrs","0,1",1) + + +/* Inline asm */ +/* Number of instructions generated by inline asm is arbitrary. Use a large number here. */ +/* asm string, output list, clobber list, input list, output constraint, input constraint, out reg prefix, in reg prefix */ +DEFINE_MOP(MOP_asm, {&OpndDesc::String0S,&OpndDesc::ListDest,&OpndDesc::ListDest,&OpndDesc::ListSrc,&OpndDesc::ListSrc,&OpndDesc::ListSrc,&OpndDesc::ListSrc,&OpndDesc::ListSrc},INLINEASM|CANTHROW|HASACQUIRE|HASRELEASE,kLtUndef,"asm","0,1,2,3",100) + +/* c sync builtins */ +/* + * intrinsic_sync_lock_test_setI w0, w1, x2, w3, lable1 + * label1: + * ldxr w0, [x2] + * stxr w1, w3, [x2] + * cbnz w1, label1 + * dmb ish + */ +DEFINE_MOP(MOP_sync_lock_test_setI, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_sync_lock_test_setI","0,1,2,3,4",5) + +/* + * intrinsic_sync_lock_test_setL x0, w1, x2, x3, lable1 + * label1: + * ldxr x0, [x2] + * stxr w1, x3, [x2] + * cbnz w1, label1 + * dmb ish + */ +DEFINE_MOP(MOP_sync_lock_test_setL, {&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_sync_lock_test_setL","0,1,2,3,4",5) + +/* AARCH64 LOADS */ +/* MOP_wldrsb --- Load Register Signed Byte */ +DEFINE_MOP(MOP_wldrsb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +/* MOP_xldrsb --- Load Register Signed Byte */ +DEFINE_MOP(MOP_xldrsb, {&OpndDesc::Reg64ID,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +/* MOP_wldrb */ +DEFINE_MOP(MOP_wldrb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +/* MOP_wldrsh --- Load Register Signed Halfword */ +DEFINE_MOP(MOP_wldrsh, {&OpndDesc::Reg32ID,&OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +/* MOP_xldrsh --- Load Register Signed Halfword */ +DEFINE_MOP(MOP_xldrsh, {&OpndDesc::Reg64ID,&OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +/* MOP_xldrsw --- Load Register Signed Word */ +DEFINE_MOP(MOP_xldrsw, {&OpndDesc::Reg64ID,&OpndDesc::Mem32S},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +/* MOP_wldrh */ +DEFINE_MOP(MOP_wldrh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +/* MOP_wldr */ +DEFINE_MOP(MOP_wldr, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +/* MOP_xldr */ +DEFINE_MOP(MOP_xldr, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|CANTHROW,kLtLoad2,"ldr","0,1",1,StrLdr64ImmValid) +/* MOP_bldr */ +DEFINE_MOP(MOP_bldr, {&OpndDesc::Reg8FD,&OpndDesc::Mem8S},ISLOAD|CANTHROW,kLtFLoad64,"ldr","0,1",1,StrLdr8ImmValid) +/* MOP_hldr */ +DEFINE_MOP(MOP_hldr, {&OpndDesc::Reg16FD,&OpndDesc::Mem16S},ISLOAD|CANTHROW,kLtFLoad64,"ldr","0,1",1,StrLdr16ImmValid) +/* MOP_sldr */ +DEFINE_MOP(MOP_sldr, {&OpndDesc::Reg32FD,&OpndDesc::Mem32S},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +/* MOP_dldr */ +DEFINE_MOP(MOP_dldr, {&OpndDesc::Reg64FD,&OpndDesc::Mem64S},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +/* MOP_qldr */ +DEFINE_MOP(MOP_qldr, {&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) + +/* AArch64 LDP/LDPSW */ +/* MOP_wldp */ +DEFINE_MOP(MOP_wldp, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +/* MOP_xldp */ +DEFINE_MOP(MOP_xldp, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +/* MOP_xldpsw */ +DEFINE_MOP(MOP_xldpsw, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +/* MOP_sldp */ +DEFINE_MOP(MOP_sldp, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +/* MOP_dldp */ +DEFINE_MOP(MOP_dldp, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Mem64S},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +/* MOP_qldp */ +DEFINE_MOP(MOP_qldp, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Mem128S},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) + +/* AARCH64 Load with Acquire semantics */ +/* MOP_wldarb */ +DEFINE_MOP(MOP_wldarb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarb","0,1",1,StrLdr8ImmValid) +/* MOP_wldarh */ +DEFINE_MOP(MOP_wldarh, {&OpndDesc::Reg32ID, &OpndDesc::Mem16S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarh","0,1",1,StrLdr16ImmValid) +/* MOP_wldar */ +DEFINE_MOP(MOP_wldar, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1,StrLdr32ImmValid) +/* MOP_xldar */ +DEFINE_MOP(MOP_xldar, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1,StrLdr64ImmValid) + +/* MOP_wmovkri16 */ +DEFINE_MOP(MOP_wmovkri16, {&OpndDesc::Reg32IDS,&OpndDesc::Imm16,&OpndDesc::Lsl4},ISMOVE,kLtShift,"movk","0,1,2",1,Imm16BitValid) +/* MOP_xmovkri16 */ +DEFINE_MOP(MOP_xmovkri16, {&OpndDesc::Reg64IDS,&OpndDesc::Imm16,&OpndDesc::Lsl6},ISMOVE,kLtShift,"movk","0,1,2",1,Imm16BitValid) + +/* MOP_wmovzri16 */ +DEFINE_MOP(MOP_wmovzri16, {&OpndDesc::Reg32ID,&OpndDesc::Imm16,&OpndDesc::Lsl4},ISMOVE,kLtShift,"movz","0,1,2",1,Imm16BitValid) +/* MOP_xmovzri16 */ +DEFINE_MOP(MOP_xmovzri16, {&OpndDesc::Reg64ID,&OpndDesc::Imm16,&OpndDesc::Lsl6},ISMOVE,kLtShift,"movz","0,1,2",1,Imm16BitValid) + +/* MOP_wmovnri16 */ +DEFINE_MOP(MOP_wmovnri16, {&OpndDesc::Reg32ID,&OpndDesc::Imm16,&OpndDesc::Lsl4},ISMOVE,kLtShift,"movn","0,1,2",1,Imm16BitValid) +/* MOP_xmovnri16 */ +DEFINE_MOP(MOP_xmovnri16, {&OpndDesc::Reg64ID,&OpndDesc::Imm16,&OpndDesc::Lsl6},ISMOVE,kLtShift,"movn","0,1,2",1,Imm16BitValid) + +/* AARCH64 Load exclusive with/without acquire semantics */ +DEFINE_MOP(MOP_wldxrb, {&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldxrh, {&OpndDesc::Reg32ID,&OpndDesc::Mem16S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldxr, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldxr, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxr","0,1",1,StrLdr64ImmValid) + +DEFINE_MOP(MOP_wldaxrb,{&OpndDesc::Reg32ID,&OpndDesc::Mem8S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldaxrh,{&OpndDesc::Reg32ID,&OpndDesc::Mem16S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldaxr, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldaxr, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxr","0,1",1,StrLdr64ImmValid) + +DEFINE_MOP(MOP_wldaxp, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISLOAD|ISLOADPAIR|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxp","0,1,2",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldaxp, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISLOAD|ISLOADPAIR|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxp","0,1,2",1,StrLdr64ImmValid) + +/* MOP_vsqrts */ +DEFINE_MOP(MOP_vsqrts, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},CANTHROW,kLtAdvsimdDivS,"fsqrt","0,1",1) +/* MOP_vsqrtd */ +DEFINE_MOP(MOP_vsqrtd, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},CANTHROW,kLtAdvsimdDivD,"fsqrt","0,1",1) + + +/* # Non Definitions */ +/* # As far as register allocation is concerned, the instructions below are non-definitions. */ + +/* MOP_bcs */ +DEFINE_MOP(MOP_bcs, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bcs","1",1) +/* MOP_bcc */ +DEFINE_MOP(MOP_bcc, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bcc","1",1) +/* MOP_beq */ +DEFINE_MOP(MOP_beq, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"beq","1",1) +/* MOP_bne */ +DEFINE_MOP(MOP_bne, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bne","1",1) +/* MOP_blt */ +DEFINE_MOP(MOP_blt, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"blt","1",1) +/* MOP_ble */ +DEFINE_MOP(MOP_ble, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"ble","1",1) +/* MOP_bgt */ +DEFINE_MOP(MOP_bgt, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bgt","1",1) +/* MOP_bge */ +DEFINE_MOP(MOP_bge, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bge","1",1) +/* MOP_blo equal to MOP_blt for unsigned comparison */ +DEFINE_MOP(MOP_blo, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"blo","1",1) +/* MOP_bls equal to MOP_bls for unsigned comparison */ +DEFINE_MOP(MOP_bls, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bls","1",1) +/* MOP_bhs equal to MOP_bge for unsigned comparison */ +DEFINE_MOP(MOP_bhs, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bhs","1",1) +/* MOP_bhi equal to MOP_bgt for float comparison */ +DEFINE_MOP(MOP_bhi, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bhi","1",1) +/* MOP_bpl equal to MOP_bge for float comparison */ +DEFINE_MOP(MOP_bpl, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bpl","1",1) +DEFINE_MOP(MOP_bmi, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bmi","1",1) +DEFINE_MOP(MOP_bvc, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bvc","1",1) +DEFINE_MOP(MOP_bvs, {&OpndDesc::CCS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"bvs","1",1) + +/* MOP_xret AARCH64 Specific */ +DEFINE_MOP(MOP_xret, {},CANTHROW,kLtBranch,"ret","",1) +/* MOP_clrex AARCH64 Specific */ +DEFINE_MOP(MOP_clrex, {},CANTHROW,kLtBranch,"clrex","",1) + +/* AARCH64 Floating-Point COMPARES signaling versions */ +/* MOP_hcmperi -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_hcmperi, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmpe","1,2",1) +/* MOP_hcmperr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_hcmperr, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::Reg16FS},0,kLtFpalu,"fcmpe","1,2",1) + +/* MOP_scmperi -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_scmperi, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmpe","1,2",1) +/* MOP_scmperr */ +DEFINE_MOP(MOP_scmperr, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fcmpe","1,2",1) + +/* MOP_dcmperi -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_dcmperi, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmpe","1,2",1) +/* MOP_dcmperr */ +DEFINE_MOP(MOP_dcmperr, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fcmpe","1,2",1) + +/* AARCH64 Floating-Point COMPARES non-signaling (quiet) versions */ +/* MOP_hcmpqri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_hcmpqri, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmp","1,2",1) +/* MOP_hcmpqrr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_hcmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg16FS,&OpndDesc::Reg16FS},0,kLtFpalu,"fcmp","1,2",1) + +/* MOP_scmpqri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_scmpqri, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmp","1,2",1) +/* MOP_scmpqrr */ +DEFINE_MOP(MOP_scmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg32FS,&OpndDesc::Reg32FS},0,kLtFpalu,"fcmp","1,2",1) + +/* MOP_dcmpqri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_dcmpqri, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::FpImm8},0,kLtFpalu,"fcmp","1,2",1) +/* MOP_dcmpqrr */ +DEFINE_MOP(MOP_dcmpqrr, {&OpndDesc::CCD, &OpndDesc::Reg64FS,&OpndDesc::Reg64FS},0,kLtFpalu,"fcmp","1,2",1) + +/* AARCH64 Integer COMPARES */ +/* MOP_wcmpri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmpri, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"cmp","1,2",1,Imm12BitValid) +/* MOP_wcmprr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmprr, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"cmp","1,2",1) +/* MOP_wcmprrs -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmprrs, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"cmp","1,2,3",1) +/* MOP_wwcmprre -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wwcmprre, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmp","1,2,3",1) +/* MOP_xcmpri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmpri, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Imm16},0,kLtAlu,"cmp","1,2",1,Imm16BitValid) +/* MOP_xcmprr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmprr, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"cmp","1,2",1) +/* MOP_xcmprrs -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmprrs, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"cmp","1,2,3",1) +/* MOP_xwcmprre -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xwcmprre, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmp","1,2,3",1) + +/* MOP_wccmpriic -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wccmpriic, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Imm5,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1) +/* MOP_wccmprric -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wccmprric, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1) +/* MOP_xccmpriic -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xccmpriic, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Imm5,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1) +/* MOP_xccmprric -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xccmprric, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm4,&OpndDesc::Cond,&OpndDesc::CCS},0,kLtAlu,"ccmp","1,2,3,4",1) + +/* MOP_wcmnri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmnri, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Imm12},0,kLtAlu,"cmn","1,2",1,Imm12BitValid) +/* MOP_wcmnrr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmnrr, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS},0,kLtAlu,"cmn","1,2",1) +/* MOP_wcmnrrs -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wcmnrrs, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Bitshift32},0,kLtAlu,"cmn","1,2,3",1) +/* MOP_wwcmnrre -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_wwcmnrre, {&OpndDesc::CCD, &OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmn","1,2,3",1) +/* MOP_xcmnri -- AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmnri, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Imm16},0,kLtAlu,"cmn","1,2",1,Imm16BitValid) +/* MOP_xcmnrr -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmnrr, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS},0,kLtAlu,"cmn","1,2",1) +/* MOP_xcmnrrs -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xcmnrrs, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},0,kLtAlu,"cmn","1,2,3",1) +/* MOP_xwcmnrre -- register, shifted register, AArch64 cmp has no dest operand */ +DEFINE_MOP(MOP_xwcmnrre, {&OpndDesc::CCD, &OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},0,kLtAlu,"cmn","1,2,3",1) + +/* AArch64 branches */ +/* MOP_xbr -- branch to register */ +DEFINE_MOP(MOP_xbr, {&OpndDesc::Reg64IS,&OpndDesc::LiteralSrc},ISUNCONDBRANCH,kLtBranch,"br","0",1) +/* MOP_Tbbuncond */ +DEFINE_MOP(MOP_xuncond, {&OpndDesc::AddressName},ISUNCONDBRANCH,kLtBranch,"b","0",1) + +/* MOP_wcbnz --- Compare and Branch on Nonzero */ +DEFINE_MOP(MOP_wcbnz, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbnz","0,1",1) +/* MOP_xcbnz */ +DEFINE_MOP(MOP_xcbnz, {&OpndDesc::Reg64IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbnz","0,1",1) +/* MOP_wcbz --- Compare and Branch on zero */ +DEFINE_MOP(MOP_wcbz, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbz","0,1",1) +/* MOP_xcbz */ +DEFINE_MOP(MOP_xcbz, {&OpndDesc::Reg64IS,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"cbz","0,1",1) + +/* MOP_wtbnz --- Test bit and Branch if Nonzero */ +DEFINE_MOP(MOP_wtbnz, {&OpndDesc::Reg32IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbnz","0,1,2",1) +/* MOP_xtbnz */ +DEFINE_MOP(MOP_xtbnz, {&OpndDesc::Reg64IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbnz","0,1,2",1) +/* MOP_wtbz --- Test bit and Branch if Zero */ +DEFINE_MOP(MOP_wtbz, {&OpndDesc::Reg32IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbz","0,1,2",1) +/* MOP_xtbz */ +DEFINE_MOP(MOP_xtbz, {&OpndDesc::Reg64IS,&OpndDesc::Imm8,&OpndDesc::AddressName},ISCONDBRANCH,kLtBranch,"tbz","0,1,2",1) + +/* AARCH64 STORES */ +/* MOP_wstrb -- Store Register Byte */ +DEFINE_MOP(MOP_wstrb, {&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +/* MOP_wstrh -- Store Register Halfword */ +DEFINE_MOP(MOP_wstrh, {&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +/* MOP_wstr -- Store Register Word */ +DEFINE_MOP(MOP_wstr, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +/* MOP_xstr -- Store Register Double word */ +DEFINE_MOP(MOP_xstr, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) + +/* MOP_sstr -- Store Register SIMD/FP Float */ +DEFINE_MOP(MOP_sstr, {&OpndDesc::Reg32FS,&OpndDesc::Mem32D},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +/* MOP_dstr -- Store Register SIMD/FP Double */ +DEFINE_MOP(MOP_dstr, {&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +/* MOP_qstr -- Store Register SIMD/FP Double */ +DEFINE_MOP(MOP_qstr, {&OpndDesc::Reg128VS,&OpndDesc::Mem128D},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) + +/* AArch64 STP. */ +/* MOP_wstp */ +DEFINE_MOP(MOP_wstp, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +/* MOP_xstp */ +DEFINE_MOP(MOP_xstp, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr64PairImmValid) +/* AArch64 does not define STPSW. It has no practical value. */ +/* MOP_sstp */ +DEFINE_MOP(MOP_sstp, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Mem32D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +/* MOP_dstp */ +DEFINE_MOP(MOP_dstp, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +/* MOP_qstp */ +DEFINE_MOP(MOP_qstp, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Mem128D},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) + +/* AARCH64 Store with Release semantics */ +/* MOP_wstlrb -- Store-Release Register Byte */ +DEFINE_MOP(MOP_wstlrb, {&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrb","0,1",1,StrLdr8ImmValid) +/* MOP_wstlrh -- Store-Release Register Halfword */ +DEFINE_MOP(MOP_wstlrh, {&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrh","0,1",1,StrLdr16ImmValid) +/* MOP_wstlr -- Store-Release Register Word */ +DEFINE_MOP(MOP_wstlr, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1,StrLdr32ImmValid) +/* MOP_xstlr -- Store-Release Register Double word */ +DEFINE_MOP(MOP_xstlr, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1,StrLdr64ImmValid) + +/* AARCH64 Store exclusive with/without release semantics */ +DEFINE_MOP(MOP_wstxrb, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxrb","0,1,2",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstxrh, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxrh","0,1,2",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxr","0,1,2",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xstxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISATOMIC|CANTHROW,kLtStore1,"stxr","0,1,2",1,StrLdr64ImmValid) + +DEFINE_MOP(MOP_wstlxrb,{&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem8D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxrb","0,1,2",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstlxrh,{&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem16D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxrh","0,1,2",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstlxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxr","0,1,2",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xstlxr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxr","0,1,2",1,StrLdr64ImmValid) + +DEFINE_MOP(MOP_wstlxp, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxp","0,1,2,3",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstlxp, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE|ISSTOREPAIR|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxp","0,1,2,3",1,StrLdr64ImmValid) + +/* Memory barriers */ +/* MOP_dmb_ishld */ +DEFINE_MOP(MOP_dmb_ishld, {}, HASACQUIRE|ISDMB,kLtBranch, "dmb\tishld", "",1) +/* MOP_dmb_ishst */ +DEFINE_MOP(MOP_dmb_ishst, {}, HASRELEASE|ISDMB,kLtBranch, "dmb\tishst", "",1) +/* MOP_dmb_ish */ +DEFINE_MOP(MOP_dmb_ish, {}, HASACQUIRE|HASRELEASE|ISDMB,kLtBranch, "dmb\tish", "",1) + +/* Neon simd, r:nonvector reg, u:64b vector reg, v:128b vector reg */ +/* Following ISMOVE vector instructions must be in a group, starting with vmovui and end with vmovvv */ +DEFINE_MOP(MOP_vmovui, {&OpndDesc::Reg64VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"movi","0,1",1) +DEFINE_MOP(MOP_vmovvi, {&OpndDesc::Reg128VD,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"movi","0,1",1) +DEFINE_MOP(MOP_vmovuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISMOVE|ISVECTOR,kLtFpalu,"mov","0,1",1) +DEFINE_MOP(MOP_vmovvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISMOVE|ISVECTOR,kLtFpalu,"mov","0,1",1) +DEFINE_MOP(MOP_vwmovru, {&OpndDesc::Reg32ID,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umov","0,1",1) +DEFINE_MOP(MOP_vwmovrv, {&OpndDesc::Reg32ID,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umov","0,1",1) +DEFINE_MOP(MOP_vxmovrv, {&OpndDesc::Reg64ID,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umov","0,1",1) +DEFINE_MOP(MOP_vwdupur, {&OpndDesc::Reg64VD,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) +DEFINE_MOP(MOP_vwdupvr, {&OpndDesc::Reg128VD,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) +DEFINE_MOP(MOP_vxdupur, {&OpndDesc::Reg64VD,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) +DEFINE_MOP(MOP_vxdupvr, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) +DEFINE_MOP(MOP_vduprv, {&OpndDesc::Reg64FD,&OpndDesc::Reg128VS},ISVECTOR|SPINTRINSIC,kLtFpalu,"dup","0,1",1) +DEFINE_MOP(MOP_vextuuui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ext","0,1,2,3",1) +DEFINE_MOP(MOP_vextvvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ext","0,1,2,3",1) +DEFINE_MOP(MOP_vsabdlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"sabdl","0,1,2",1) +DEFINE_MOP(MOP_vuabdlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"uabdl","0,1,2",1) +DEFINE_MOP(MOP_vsabdl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sabdl2","0,1,2",1) +DEFINE_MOP(MOP_vuabdl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uabdl2","0,1,2",1) +DEFINE_MOP(MOP_vspadaluu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"sadalp","0,1",1) +DEFINE_MOP(MOP_vspadalvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"sadalp","0,1",1) +DEFINE_MOP(MOP_vupadaluu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"uadalp","0,1",1) +DEFINE_MOP(MOP_vupadalvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"uadalp","0,1",1) +DEFINE_MOP(MOP_vspadduu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"saddlp","0,1",1) +DEFINE_MOP(MOP_vspaddvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"saddlp","0,1",1) +DEFINE_MOP(MOP_vupadduu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtAlu,"uaddlp","0,1",1) +DEFINE_MOP(MOP_vupaddvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtAlu,"uaddlp","0,1",1) +DEFINE_MOP(MOP_vwinsur, {&OpndDesc::Reg64VDS,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) +DEFINE_MOP(MOP_vxinsur, {&OpndDesc::Reg64VDS,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) +DEFINE_MOP(MOP_vwinsvr, {&OpndDesc::Reg128VDS,&OpndDesc::Reg32IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) +DEFINE_MOP(MOP_vxinsvr, {&OpndDesc::Reg128VDS,&OpndDesc::Reg64IS},ISVECTOR|SPINTRINSIC,kLtFpalu,"ins","0,1",1) +DEFINE_MOP(MOP_vrev16dd,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"rev16","0,1",1) +DEFINE_MOP(MOP_vrev32dd,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"rev32","0,1",1) +DEFINE_MOP(MOP_vrev64dd,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"rev64","0,1",1) +DEFINE_MOP(MOP_vrev16qq,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rev16","0,1",1) +DEFINE_MOP(MOP_vrev32qq,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rev32","0,1",1) +DEFINE_MOP(MOP_vrev64qq,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"rev64","0,1",1) +DEFINE_MOP(MOP_vbaddvru,{&OpndDesc::Reg8FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vhaddvru,{&OpndDesc::Reg16FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vsaddvru,{&OpndDesc::Reg32FD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vbaddvrv,{&OpndDesc::Reg8FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vhaddvrv,{&OpndDesc::Reg16FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vsaddvrv,{&OpndDesc::Reg32FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addv","0,1",1) +DEFINE_MOP(MOP_vdaddvrv,{&OpndDesc::Reg64FD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"addp","0,1",1) + +DEFINE_MOP(MOP_vzcmequu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) +DEFINE_MOP(MOP_vzcmgtuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) +DEFINE_MOP(MOP_vzcmgeuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) +DEFINE_MOP(MOP_vzcmltuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmlt","0,1,2",1) +DEFINE_MOP(MOP_vzcmleuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmle","0,1,2",1) +DEFINE_MOP(MOP_vzcmeqvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) +DEFINE_MOP(MOP_vzcmgtvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) +DEFINE_MOP(MOP_vzcmgevv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) +DEFINE_MOP(MOP_vzcmltvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmlt","0,1,2",1) +DEFINE_MOP(MOP_vzcmlevv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"cmle","0,1,2",1) +DEFINE_MOP(MOP_vcmequuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) +DEFINE_MOP(MOP_vcmgeuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) +DEFINE_MOP(MOP_vcmgtuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) +DEFINE_MOP(MOP_vcmhiuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmhi","0,1,2",1) +DEFINE_MOP(MOP_vcmhsuuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"cmhs","0,1,2",1) +DEFINE_MOP(MOP_vcmeqvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmeq","0,1,2",1) +DEFINE_MOP(MOP_vcmgevvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmge","0,1,2",1) +DEFINE_MOP(MOP_vcmgtvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmgt","0,1,2",1) +DEFINE_MOP(MOP_vcmhivvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmhi","0,1,2",1) +DEFINE_MOP(MOP_vcmhsvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"cmhs","0,1,2",1) +DEFINE_MOP(MOP_vbsluuu,{&OpndDesc::Reg64VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"bsl","0,1,2",1) +DEFINE_MOP(MOP_vbslvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"bsl","0,1,2",1) + +DEFINE_MOP(MOP_vshluuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sshl","0,1,2",1) +DEFINE_MOP(MOP_vshlvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sshl","0,1,2",1) +DEFINE_MOP(MOP_vushluuu,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"ushl","0,1,2",1) +DEFINE_MOP(MOP_vushlvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"ushl","0,1,2",1) + +DEFINE_MOP(MOP_vushluui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shl","0,1,2",1) +DEFINE_MOP(MOP_vushlvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shl","0,1,2",1) +DEFINE_MOP(MOP_vushruui,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushr","0,1,2",1) +DEFINE_MOP(MOP_vushrvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushr","0,1,2",1) + +DEFINE_MOP(MOP_vshllvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shll","0,1,2",1) +DEFINE_MOP(MOP_vushllvvi,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"ushll","0,1,2",1) +DEFINE_MOP(MOP_vxtnuv, {&OpndDesc::Reg64VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"xtn","0,1",1) +DEFINE_MOP(MOP_vsxtlvu, {&OpndDesc::Reg128VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sxtl","0,1",1) +DEFINE_MOP(MOP_vuxtlvu, {&OpndDesc::Reg128VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uxtl","0,1",1) +DEFINE_MOP(MOP_vxtn2uv, {&OpndDesc::Reg64VDS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"xtn2","0,1",1) +DEFINE_MOP(MOP_vsxtl2vv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sxtl2","0,1",1) +DEFINE_MOP(MOP_vuxtl2vv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uxtl2","0,1",1) + +DEFINE_MOP(MOP_vshruui, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sshr","0,1,2",1) +DEFINE_MOP(MOP_vshrvvi, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"sshr","0,1,2",1) +DEFINE_MOP(MOP_vshrnuvi,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS,&OpndDesc::Imm8},ISVECTOR,kLtFpalu,"shrn","0,1,2",1) + +DEFINE_MOP(MOP_vtbl1vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"tbl","0,1,2",1) +DEFINE_MOP(MOP_vsmaddvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smlal","0,1,2",1) +DEFINE_MOP(MOP_vumaddvvv,{&OpndDesc::Reg128VDS,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umlal","0,1,2",1) +DEFINE_MOP(MOP_vsmullvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"smull","0,1,2",1) +DEFINE_MOP(MOP_vumullvvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"umull","0,1,2",1) +DEFINE_MOP(MOP_vsmull2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"smull2","0,1,2",1) +DEFINE_MOP(MOP_vumull2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"umull2","0,1,2",1) +DEFINE_MOP(MOP_vabsuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"abs","0,1",1) +DEFINE_MOP(MOP_vabsvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"abs","0,1",1) +DEFINE_MOP(MOP_vadduuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"add","0,1,2",1) +DEFINE_MOP(MOP_vsaddlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"saddl","0,1,2",1) +DEFINE_MOP(MOP_vuaddlvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uaddl","0,1,2",1) +DEFINE_MOP(MOP_vsaddl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"saddl2","0,1,2",1) +DEFINE_MOP(MOP_vuaddl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uaddl2","0,1,2",1) +DEFINE_MOP(MOP_vsaddwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"saddw","0,1,2",1) +DEFINE_MOP(MOP_vuaddwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"uaddw","0,1,2",1) +DEFINE_MOP(MOP_vsaddw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"saddw2","0,1,2",1) +DEFINE_MOP(MOP_vuaddw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uaddw2","0,1,2",1) +DEFINE_MOP(MOP_vaddvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"add","0,1,2",1) +DEFINE_MOP(MOP_vmuluuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"mul","0,1,2",1) +DEFINE_MOP(MOP_vmulvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"mul","0,1,2",1) +DEFINE_MOP(MOP_vsubuuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"sub","0,1,2",1) +DEFINE_MOP(MOP_vsubvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sub","0,1,2",1) +DEFINE_MOP(MOP_vanduuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"and","0,1,2",1) +DEFINE_MOP(MOP_vandvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"and","0,1,2",1) +DEFINE_MOP(MOP_voruuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"orr","0,1,2",1) +DEFINE_MOP(MOP_vorvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"orr","0,1,2",1) +DEFINE_MOP(MOP_vxoruuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"eor","0,1,2",1) +DEFINE_MOP(MOP_vxorvvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"eor","0,1,2",1) +DEFINE_MOP(MOP_vnotuu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"not","0,1",1) +DEFINE_MOP(MOP_vnotvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"not","0,1",1) +DEFINE_MOP(MOP_vneguu, {&OpndDesc::Reg64VD,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"neg","0,1",1) +DEFINE_MOP(MOP_vnegvv, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"neg","0,1",1) +DEFINE_MOP(MOP_vssublvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"ssubl","0,1,2",1) +DEFINE_MOP(MOP_vusublvuu,{&OpndDesc::Reg128VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"usubl","0,1,2",1) +DEFINE_MOP(MOP_vssubl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"ssubl2","0,1,2",1) +DEFINE_MOP(MOP_vusubl2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"usubl2","0,1,2",1) +DEFINE_MOP(MOP_vssubwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"ssubw","0,1,2",1) +DEFINE_MOP(MOP_vusubwvvu,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"usubw","0,1,2",1) +DEFINE_MOP(MOP_vssubw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"ssubw2","0,1,2",1) +DEFINE_MOP(MOP_vusubw2vvv,{&OpndDesc::Reg128VD,&OpndDesc::Reg128VS,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"usubw2","0,1,2",1) +DEFINE_MOP(MOP_vzip1vvv,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"zip1","0,1,2",1) +DEFINE_MOP(MOP_vzip2vvv,{&OpndDesc::Reg64VD,&OpndDesc::Reg64VS,&OpndDesc::Reg64VS},ISVECTOR,kLtFpalu,"zip2","0,1,2",1) +DEFINE_MOP(MOP_vsqxtnuv,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"sqxtn","0,1",1) +DEFINE_MOP(MOP_vuqxtnuv,{&OpndDesc::Reg64VD,&OpndDesc::Reg128VS},ISVECTOR,kLtFpalu,"uqxtn","0,1",1) + +/* + * MOP_clinit + * will be emit to four instructions in a row: + * adrp xd, :got:__classinfo__Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B + * ldr xd, [xd,#:got_lo12:__classinfo__Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + * ldr xd, [xd,#112] + * ldr wzr, [xd] + */ +DEFINE_MOP(MOP_clinit, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW,kLtClinit,"intrinsic_clinit","0,1",4) + +/* + * MOP_counter + * will be emit to five instructions in a row: + * adrp x1, :got:__profile_table + idx + * ldr w17, [x1,#:got_lo12:__profile_table] + * add w17, w17, #1 + * str w17,[x1,,#:got_lo12:__profile_table] + */ +DEFINE_MOP(MOP_counter, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW,kLtClinit,"intrinsic_counter","0,1", 4) + +/* + * MOP_c_counter + * will be emit to seven instructions in a row: + * str x30, [sp , #-16]! + * adrp x16, :got:__profile_table + * ldr x16, [x16,#:got_lo12:__profile_table] + * ldr x30, [x16 + offset] + * add x30, x30, #1 + * str w30, [x16 + offset] + * ldr x30, [sp], #16 + */ +DEFINE_MOP(MOP_c_counter, {&OpndDesc::LiteralSrc,&OpndDesc::Imm64},ISATOMIC|CANTHROW,kLtClinit,"intrinsic_counter","0,1", 8) + +/* + * will be emit to two instrunctions in a row: + * ldr wd, [xs] // xd and xs should be differenct register + * ldr wd, [xd] + */ +DEFINE_MOP(MOP_lazy_ldr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISATOMIC|CANTHROW|SPINTRINSIC,kLtClinitTail,"intrinsic_lazyload","0,1",2) + +/* + * will be emit to three instrunctions in a row: + * adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset + * ldr xd, [xd,#:got_lo12:__staticDecoupleValueOffset$$xx+offset] + * ldr xzr, [xd] + */ +DEFINE_MOP(MOP_lazy_ldr_static, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW,kLtAdrpLdr,"intrinsic_lazyloadstatic","0,1",3) + +/* A pseudo instruction followed MOP_lazy_ldr, to make sure xs and xd be allocated to different physical registers. */ +DEFINE_MOP(MOP_lazy_tail, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},0,kLtUndef,"pseudo_lazy_tail","",0) + +/* will be emit to two instructions in a row: + * adrp xd, _PTR__cinf_Ljava_2Flang_2FSystem_3B + * ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] + * MOP_adrp_ldr + */ +DEFINE_MOP(MOP_adrp_ldr, {&OpndDesc::Reg64ID, &OpndDesc::LiteralSrc},ISATOMIC|CANTHROW,kLtAdrpLdr,"intrinsic_adrpldr","0,1",2) + +/* will be emit to two instructions in a row: + * adrp xd, label + * add xd, xd, #:lo12:label + */ +DEFINE_MOP(MOP_adrp_label, {&OpndDesc::Reg64ID, &OpndDesc::Imm64},0,kLtAlu,"intrinsic_adrplabel","0,1", 2) + +/* + * will be emit to three instrunctions in a row: + * adrp xd, :got:__arrayClassCacheTable$$xxx+offset + * ldr xd, [xd,#:got_lo12:__arrayClassCacheTable$$xx+offset] + * ldr xzr, [xd] + */ +DEFINE_MOP(MOP_arrayclass_cache_ldr, {&OpndDesc::Reg64ID,&OpndDesc::LiteralSrc},ISATOMIC|CANTHROW,kLtAdrpLdr,"intrinsic_loadarrayclass","0,1",3) + +/* + * ldr x17, [xs,#112] + * ldr wzr, [x17] + */ +DEFINE_MOP(MOP_clinit_tail, {&OpndDesc::Reg64IS},ISATOMIC|CANTHROW|SPINTRINSIC,kLtClinitTail,"intrinsic_clinit_tail","0",2) + +/* + * intrinsic Unsafe.getAndAddInt + * intrinsic_get_add_int w0, xt, wt, ws, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * add wt, w0, w3 + * stlxr ws, wt, [xt] + * cbnz ws, label + */ +DEFINE_MOP(MOP_get_and_addI, {&OpndDesc::Reg32ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_add_int","",5) +/* + * intrinsic Unsafe.getAndAddLong + * intrinsic_get_add_long x0, xt, xs, ws, x1, x2, x3, ws, label + * add xt, x1, x2 + * label: + * ldaxr x0, [xt] + * add xs, x0, x3 + * stlxr ws, x2, [xt] + * cbnz ws, label + */ +DEFINE_MOP(MOP_get_and_addL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_add_long","",5) + +/* + * intrinsic Unsafe.getAndSetInt + * intrinsic_get_set_int w0, xt, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * stlxr w2, w3, [xt] + * cbnz w2, label + */ +DEFINE_MOP(MOP_get_and_setI, {&OpndDesc::Reg32ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_set_int","0,1,2,3,4",4) +/* + * intrinsic Unsafe.getAndSetLong + * intrinsic_get_set_long x0, x1, x2, x3, label + * add xt, x1, x2 + * label: + * ldaxr x0, [xt] + * stlxr w2, x3, [xt] + * cbnz w2, label + */ +DEFINE_MOP(MOP_get_and_setL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_get_set_long","0,1,2,3,4",4) + +/* + * intrinsic Unsafe.compareAndSwapInt + * intrinsic_compare_swap_int x0, xt, ws, x1, x2, w3, w4, lable1, label2 + * add xt, x1, x2 + * label1: + * ldaxr ws, [xt] + * cmp ws, w3 + * b.ne label2 + * stlxr ws, w4, [xt] + * cbnz ws, label1 + * label2: + * cset x0, eq + */ +DEFINE_MOP(MOP_compare_and_swapI, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_compare_swap_int","0,1,2,3,4,5,6",7) +/* + * intrinsic Unsafe.compareAndSwapLong + * intrinsic_compare_swap_long x0, xt, xs, x1, x2, x3, x4, lable1, label2 + * add xt, x1, x2 + * label1: + * ldaxr xs, [xt] + * cmp xs, x3 + * b.ne label2 + * stlxr ws, x4, [xt] + * cbnz ws, label1 + * label2: + * cset x0, eq + */ +DEFINE_MOP(MOP_compare_and_swapL, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_compare_swap_long","0,1,2,3,4,5,6",7) + +/* + * intrinsic String.indexOf(Ljava/lang/String;)I + * intrinsic_string_indexof w0, x1, w2, x3, w4, x5, x6, x7, x8, x9, w10, Label.FIRST_LOOP, Label.STR2_NEXT, Label.STR1_LOOP, Label.STR1_NEXT, Label.LAST_WORD, Label.NOMATCH, Label.RET + * cmp w4, w2 + * b.gt .Label.NOMATCH + * sub w2, w2, w4 + * sub w4, w4, #8 + * mov w10, w2 + * uxtw x4, w4 + * uxtw x2, w2 + * add x3, x3, x4 + * add x1, x1, x2 + * neg x4, x4 + * neg x2, x2 + * ldr x5, [x3,x4] + * .Label.FIRST_LOOP: + * ldr x7, [x1,x2] + * cmp x5, x7 + * b.eq .Label.STR1_LOOP + * .Label.STR2_NEXT: + * adds x2, x2, #1 + * b.le .Label.FIRST_LOOP + * b .Label.NOMATCH + * .Label.STR1_LOOP: + * adds x8, x4, #8 + * add x9, x2, #8 + * b.ge .Label.LAST_WORD + * .Label.STR1_NEXT: + * ldr x6, [x3,x8] + * ldr x7, [x1,x9] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * adds x8, x8, #8 + * add x9, x9, #8 + * b.lt .Label.STR1_NEXT + * .Label.LAST_WORD: + * ldr x6, [x3] + * sub x9, x1, x4 + * ldr x7, [x9,x2] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * add w0, w10, w2 + * b .Label.RET + * .Label.NOMATCH: + * mov w0, #-1 + * .Label.RET: + */ +DEFINE_MOP(MOP_string_indexof, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IDS,&OpndDesc::Reg32IDS,&OpndDesc::Reg64IDS,&OpndDesc::Reg32IDS,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg32ID,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName,&OpndDesc::AddressName},HASLOOP|CANTHROW|SPINTRINSIC,kLtBranch,"intrinsic_string_indexof","0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17",36) + +/* MOP_tail_call_opt_xbl -- branch without link (call); this is a special definition */ +DEFINE_MOP(MOP_tail_call_opt_xbl, {&OpndDesc::AddressName,&OpndDesc::ListSrc},CANTHROW|ISTAILCALL,kLtBranch,"b","0", 1) +/* MOP_tail_call_opt_xblr -- branch without link (call) to register; this is a special definition */ +DEFINE_MOP(MOP_tail_call_opt_xblr, {&OpndDesc::Reg64IS,&OpndDesc::ListSrc},CANTHROW|ISTAILCALL,kLtBranch,"br","0", 1) + +/* MOP_pseudo_param_def_x, */ +DEFINE_MOP(MOP_pseudo_param_def_x, {&OpndDesc::Reg64ID},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) + +/* MOP_pseudo_param_def_w, */ +DEFINE_MOP(MOP_pseudo_param_def_w, {&OpndDesc::Reg32ID},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) + +/* MOP_pseudo_param_def_d, */ +DEFINE_MOP(MOP_pseudo_param_def_d, {&OpndDesc::Reg64FD},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) + +/* MOP_pseudo_param_def_s, */ +DEFINE_MOP(MOP_pseudo_param_def_s, {&OpndDesc::Reg32FD},0,kLtUndef,"//MOP_pseudo_param_def","0", 0) + +/* MOP_pseudo_param_store_x, */ +DEFINE_MOP(MOP_pseudo_param_store_x, {&OpndDesc::Mem64D},0,kLtUndef,"//MOP_pseudo_param_store_x","0", 0) + +/* MOP_pseudo_param_store_w, */ +DEFINE_MOP(MOP_pseudo_param_store_w, {&OpndDesc::Mem32D},0,kLtUndef,"//MOP_pseudo_param_store_w","0", 0) + +/* MOP_pseudo_ref_init_x, */ +DEFINE_MOP(MOP_pseudo_ref_init_x, {&OpndDesc::Mem64D},0,kLtUndef,"//MOP_pseudo_ref_init_x","0", 0) + +/* MOP_pseudo_ret_int, */ +DEFINE_MOP(MOP_pseudo_ret_int, {&OpndDesc::Reg64IS},0,kLtUndef,"//MOP_pseudo_ret_int","", 0) + +/* MOP_pseudo_ret_float, */ +DEFINE_MOP(MOP_pseudo_ret_float, {&OpndDesc::Reg64FS},0,kLtUndef,"//MOP_pseudo_ret_float","", 0) + +/* When exception occurs, R0 and R1 may be defined by runtime code. */ +/* MOP_pseudo_eh_def_x, */ +DEFINE_MOP(MOP_pseudo_eh_def_x, {&OpndDesc::Reg64ID},0,kLtUndef,"//MOP_pseudo_eh_def_x","0", 0) + +/*MOP_nop */ +DEFINE_MOP(MOP_nop, {},ISNOP,kLtAlu,"nop","", 1) + +/* phi node for SSA form */ +/* MOP_xphirr */ +DEFINE_MOP(MOP_xphirr, {&OpndDesc::Reg64ID,&OpndDesc::ListSrc},ISPHI,kLtAlu,"//phi","0,1",1) +/* MOP_wphirr */ +DEFINE_MOP(MOP_wphirr, {&OpndDesc::Reg32ID,&OpndDesc::ListSrc},ISPHI,kLtAlu,"//phi","0,1",1) +/* MOP_xvphis */ +DEFINE_MOP(MOP_xvphis, {&OpndDesc::Reg32FD,&OpndDesc::ListSrc},ISPHI,kLtFpalu,"//phi","0,1",1) +/* MOP_xvphid */ +DEFINE_MOP(MOP_xvphid, {&OpndDesc::Reg64FD,&OpndDesc::ListSrc},ISPHI,kLtFpalu,"//phi","0,1",1) +/* MOP_xvphivd */ +DEFINE_MOP(MOP_xvphivd, {&OpndDesc::Reg128VD,&OpndDesc::ListSrc},ISPHI,kLtFpalu,"movi","0,1",1) + +/* A pseudo instruction that used for seperating dependence graph. */ +/* MOP_pseudo_dependence_seperator, */ +DEFINE_MOP(MOP_pseudo_dependence_seperator, {},0,kLtUndef,"//MOP_pseudo_dependence_seperator","0", 0) + + +/* A pseudo instruction that used for replacing MOP_clinit_tail after clinit merge in scheduling. */ +/* MOP_pseudo_none, */ +DEFINE_MOP(MOP_pseudo_none, {},0,kLtUndef,"//MOP_pseudo_none","0", 0) + +/* end of AArch64 instructions */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_mem_md.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_mem_md.def new file mode 100644 index 0000000000000000000000000000000000000000..574452818de95db43195c10463c938a729ebb53e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_mem_md.def @@ -0,0 +1,356 @@ +/* Load Register Signed Byte */ +DEFINE_MOP(MOP_wldrsb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrsb_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +/* ldrsb */ +DEFINE_MOP(MOP_xldrsb_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_ri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_rr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_rex, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_rls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_rlo, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_pri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_poi, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_xldrsb_l, {&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr8ImmValid) +/* ldrb */ +DEFINE_MOP(MOP_wldrb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldrb_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrb","0,1",1,StrLdr8ImmValid) + +/* Load Register Signed Halfword */ +DEFINE_MOP(MOP_wldrsh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsb","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrsh_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrss","0,1",1,StrLdr16ImmValid) +/* ldrsh */ +DEFINE_MOP(MOP_xldrsh_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_ri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_rr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_rex, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_rls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_rlo, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_pri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_poi, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_xldrsh_l, {&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrsh","0,1",1,StrLdr16ImmValid) +/* ldrh */ +DEFINE_MOP(MOP_wldrh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldrh_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrh","0,1",1,StrLdr16ImmValid) + +/* Load Register Signed Word */ +DEFINE_MOP(MOP_xldrsw_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_ri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_rr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_rex, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_rls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_rlo, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_pri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_poi, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldrsw_l, {&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldrsw","0,1",1,StrLdr32ImmValid) + +/* ldr to w */ +DEFINE_MOP(MOP_wldr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wldr_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +/* ldr to x */ +DEFINE_MOP(MOP_xldr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldr_l, {&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtLoad1,"ldr","0,1",1,StrLdr32ImmValid) + +/* ldr to float reg */ +DEFINE_MOP(MOP_sldr_r, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_ri, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_rr, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_rex, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_rls, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_rlo, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_pri, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_poi, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sldr_l, {&OpndDesc::Reg32FD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr32ImmValid) + +DEFINE_MOP(MOP_dldr_r, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_ri, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_rr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_rex, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_rls, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_rlo, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_pri, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_poi, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dldr_l, {&OpndDesc::Reg64FD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr64ImmValid) + +DEFINE_MOP(MOP_qldr_r, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_ri, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_rr, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_rex, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_rls, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_rlo, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_pri, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_poi, {&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qldr_l, {&OpndDesc::Reg128VD,&OpndDesc::AddressName},ISLOAD|CANTHROW,kLtFLoadMany,"ldr","0,1",1,StrLdr128ImmValid) + +/* ldr to pair reg */ +DEFINE_MOP(MOP_wldp_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_ri, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_rr, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_rex, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_rls, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_rlo, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_pri, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_poi, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wldp_l, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldp","0,1,2",1,StrLdr32PairImmValid) + +DEFINE_MOP(MOP_xldp_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_ri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_rr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_rex, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_rls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_rlo, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_pri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_poi, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_xldp_l, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad3plus,"ldp","0,1,2",1,StrLdr64PairImmValid) + +DEFINE_MOP(MOP_xldpsw_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_ri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_rr, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_rex, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_rls, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_rlo, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_pri, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_poi, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xldpsw_l, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtLoad2,"ldpsw","0,1,2",1,StrLdr32PairImmValid) + +DEFINE_MOP(MOP_sldp_r, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_ri, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_rr, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_rex, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_rls, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_rlo, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_pri, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_poi, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sldp_l, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FD,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoad64,"ldp","0,1,2",1,StrLdr32PairImmValid) + +DEFINE_MOP(MOP_dldp_r, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_ri, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_rr, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_rex, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_rls, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_rlo, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_pri, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_poi, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dldp_l, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FD,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr64PairImmValid) + +DEFINE_MOP(MOP_qldp_r, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_ri, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_rr, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_rex, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_rls, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_rlo, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_pri, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_poi, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qldp_l, {&OpndDesc::Reg128VD,&OpndDesc::Reg128VD,&OpndDesc::AddressName},ISLOAD|ISLOADPAIR|CANTHROW,kLtFLoadMany,"ldp","0,1,2",1,StrLdr128PairImmValid) + +/* Load with Acquire semantics */ +DEFINE_MOP(MOP_wldarb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldarh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldarh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldar_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldar_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldar","0,1",1,StrLdr64ImmValid) + +/* Load exclusive with/without acquire semantics */ +DEFINE_MOP(MOP_wldxrb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldxrh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldxr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldxr_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|CANTHROW,kLtLoad1,"ldxr","0,1",1,StrLdr64ImmValid) + +DEFINE_MOP(MOP_wldaxrb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxrb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wldaxrh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxrh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wldaxr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxr","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldaxr_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad1,"ldaxr","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_wldaxp_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32ID,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad2,"ldaxp","0,1,2",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xldaxp_r, {&OpndDesc::Reg64ID,&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISLOAD|ISLOADPAIR|ISATOMIC|HASACQUIRE|CANTHROW,kLtLoad2,"ldaxp","0,1,2",1,StrLdr64ImmValid) + +/* Store Register Byte */ +DEFINE_MOP(MOP_wstrb_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_ri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_rr, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_rex, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_rls, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_rlo, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_pri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_poi, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstrb_l, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore1,"strb","0,1",1,StrLdr8ImmValid) + +/* Store Register Halfword */ +DEFINE_MOP(MOP_wstrh_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_ri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_rr, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_rex, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_rls, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_rlo, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_pri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_poi, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstrh_l, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore1,"strh","0,1",1,StrLdr16ImmValid) + +/* Store Register Word */ +DEFINE_MOP(MOP_wstr_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_ri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_rr, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_rex, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_rls, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_rlo, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_pri, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_poi, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_wstr_l, {&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore1,"str","0,1",1,StrLdr32ImmValid) + +/* Store Register Double word */ +DEFINE_MOP(MOP_xstr_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_ri, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_rr, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_rex, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_rls, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_rlo, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_pri, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_poi, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstr_l, {&OpndDesc::Reg64IS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr64ImmValid) + +/* Store Register SIMD/FP Float */ +DEFINE_MOP(MOP_sstr_r, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_ri, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_rr, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_rex, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_rls, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_rlo, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_pri, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_poi, {&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_sstr_l, {&OpndDesc::Reg32FS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore2,"str","0,1",1,StrLdr32ImmValid) + +/* Store Register SIMD/FP Double */ +DEFINE_MOP(MOP_dstr_r, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_ri, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_rr, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_rex, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_rls, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_rlo, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_pri, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_poi, {&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_dstr_l, {&OpndDesc::Reg64FS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr64ImmValid) + +/* MOP_qstr -- Store Register SIMD/FP Double */ +DEFINE_MOP(MOP_qstr_r, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_ri, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_rr, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_rex, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_rls, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_rlo, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_pri, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_poi, {&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) +DEFINE_MOP(MOP_qstr_l, {&OpndDesc::Reg128VS,&OpndDesc::AddressName},ISSTORE|CANTHROW,kLtStore3plus,"str","0,1",1,StrLdr128ImmValid) + +/* store to pair reg */ +DEFINE_MOP(MOP_wstp_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_ri, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_rr, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_rex, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_rls, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_rlo, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_pri, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_poi, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_wstp_l, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::AddressName},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore2,"stp","0,1,2",1,StrLdr32PairImmValid) + +/* MOP_xstp */ +DEFINE_MOP(MOP_xstp_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_ri, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_rr, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_rex, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_rls, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_rlo, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_pri, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_poi, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_xstp_l, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::AddressName},ISSTORE|ISSTOREPAIR|CANTHROW,kLtStore3plus,"stp","0,1,2",1,StrLdr32PairImmValid) + +/* AArch64 does not define STPSW. It has no practical value. */ +/* MOP_sstp */ +DEFINE_MOP(MOP_sstp_r, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_ri, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_rr, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_rex, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_rls, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_rlo, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_pri, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_poi, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) +DEFINE_MOP(MOP_sstp_l, {&OpndDesc::Reg32FS,&OpndDesc::Reg32FS,&OpndDesc::AddressName},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr32PairImmValid) + +/* MOP_dstp */ +DEFINE_MOP(MOP_dstp_r, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_ri, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_rr, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_rex, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_rls, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_rlo, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_pri, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_poi, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) +DEFINE_MOP(MOP_dstp_l, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS,&OpndDesc::AddressName},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr64PairImmValid) + +/* MOP_qstp */ +DEFINE_MOP(MOP_qstp_r, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_ri, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_rr, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_rex, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg32IS,&OpndDesc::Extendshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_rls, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Bitshift64},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_rlo, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::AddressName,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_pri, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_poi, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::Reg64IS,&OpndDesc::Imm8},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) +DEFINE_MOP(MOP_qstp_l, {&OpndDesc::Reg128VS,&OpndDesc::Reg128VS,&OpndDesc::AddressName},ISSTORE|ISSTOREPAIR|CANTHROW,kLtAdvsimdMulQ,"stp","0,1,2",1,StrLdr128PairImmValid) + +/* AARCH64 Store with Release semantics */ +/* MOP_wstlrb -- Store-Release Register Byte */ +DEFINE_MOP(MOP_wstlrb_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrb","0,1",1,StrLdr8ImmValid) +/* MOP_wstlrh -- Store-Release Register Halfword */ +DEFINE_MOP(MOP_wstlrh_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlrh","0,1",1,StrLdr16ImmValid) +/* MOP_wstlr -- Store-Release Register Word */ +DEFINE_MOP(MOP_wstlr_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1,StrLdr32ImmValid) +/* MOP_xstlr -- Store-Release Register Double word */ +DEFINE_MOP(MOP_xstlr_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlr","0,1",1,StrLdr64ImmValid) + +DEFINE_MOP(MOP_wstxrb_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stxrb","0,1,2",1,StrLdr8ImmValid) +DEFINE_MOP(MOP_wstxrh_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stxrh","0,1,2",1,StrLdr16ImmValid) +DEFINE_MOP(MOP_wstxr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stxr","0,1,2",1,StrLdr32ImmValid) +DEFINE_MOP(MOP_xstxr_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stxr","0,1,2",1,StrLdr64ImmValid) + +DEFINE_MOP(MOP_wstlxp_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS,&OpndDesc::Reg32IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxp","0,1,2,3",1,StrLdr64ImmValid) +DEFINE_MOP(MOP_xstlxp_r, {&OpndDesc::Reg32ID,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS,&OpndDesc::Reg64IS},ISSTORE|ISSTOREPAIR|ISATOMIC|HASRELEASE|CANTHROW,kLtStore1,"stlxp","0,1,2,3",1,StrLdr64ImmValid) + diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_memlayout.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_memlayout.h new file mode 100644 index 0000000000000000000000000000000000000000..0b3a1d7942b4a194c52bec19c6cfe0642d7660fe --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_memlayout.h @@ -0,0 +1,214 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MEMLAYOUT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MEMLAYOUT_H + +#include "memlayout.h" +#include "aarch64_abi.h" + +namespace maplebe { +class AArch64SymbolAlloc : public SymbolAlloc { + public: + AArch64SymbolAlloc() = default; + + ~AArch64SymbolAlloc() = default; + + void SetRegisters(AArch64reg r0, AArch64reg r1, AArch64reg r2, AArch64reg r3) { + reg0 = r0; + reg1 = r1; + reg2 = r2; + reg3 = r3; + } + + inline bool IsRegister() const { + return reg0 != kRinvalid; + } + + private: + AArch64reg reg0 = kRinvalid; + AArch64reg reg1 = kRinvalid; + AArch64reg reg2 = kRinvalid; + AArch64reg reg3 = kRinvalid; +}; + +/* + * On AArch64, stack frames are structured as follows: + * + * The stack grows downward -- full descending (SP points + * to a filled slot). + * + * Any of the parts of a frame is optional, i.e., it is + * possible to write a caller-callee pair in such a way + * that the particular part is absent in the frame. + * + * Before a call is made, the frame looks like: + * | | + * ||----------------------------| + * | args passed on the stack | (we call them up-formals) + * ||----------------------------|<- Stack Pointer + * | | + * + * V1. + * Right after a call is made + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------|<- Stack Pointer + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * + * After the prologue has run, + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------| + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * | callee-saved registers | + * ||----------------------------| + * | empty space. should have | + * | at least 16-byte alignment | + * ||----------------------------| + * | local variables | + * ||----------------------------| + * | variable-sized local vars | + * | (VLAs) | + * ||----------------------------|<- Stack Pointer + * + * callee-saved registers include + * 1. R19-R28 + * 2. R8 if return value needs to be returned + * thru memory and callee wants to use R8 + * 3. we don't need to save R19 if it is used + * as base register for PIE. + * 4. V8-V15 + * + * V2. (this way, we may be able to save + * on SP modifying instruction) + * Right after a call is made + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------|<- Stack Pointer + * | | + * | empty space | + * | | + * ||----------------------------| + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * + * After the prologue has run, + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------| + * | callee-saved registers | + * | including those used for | + * | parameter passing | + * ||----------------------------| + * | empty space. should have | + * | at least 16-byte alignment | + * ||----------------------------| + * | local variables | + * ||----------------------------| + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * | variable-sized local vars | + * | (VLAs) | + * ||----------------------------| + * | args to pass through stack | + * ||----------------------------| + */ +class AArch64MemLayout : public MemLayout { + public: + AArch64MemLayout(BECommon &b, MIRFunction &f, MapleAllocator &mallocator) + : MemLayout(b, f, mallocator, kAarch64StackPtrAlignment) {} + + ~AArch64MemLayout() override = default; + + /* + * Returns stack space required for a call + * which is used to pass arguments that cannot be + * passed through registers + */ + uint32 ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall) override; + + void LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) override; + + void AssignSpillLocationsToPseudoRegisters() override; + + SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum) override; + + uint64 StackFrameSize() const; + + uint32 RealStackFrameSize() const; + + const MemSegment &Locals() const { + return segLocals; + } + + uint32 GetSizeOfSpillReg() const { + return segSpillReg.GetSize(); + } + + uint32 GetSizeOfLocals() const { + return segLocals.GetSize(); + } + + void SetSizeOfGRSaveArea(uint32 sz) { + segGrSaveArea.SetSize(sz); + } + + uint32 GetSizeOfGRSaveArea() const { + return segGrSaveArea.GetSize(); + } + + inline void SetSizeOfVRSaveArea(uint32 sz) { + segVrSaveArea.SetSize(sz); + } + + uint32 GetSizeOfVRSaveArea() const { + return segVrSaveArea.GetSize(); + } + + uint32 GetSizeOfRefLocals() const { + return segRefLocals.GetSize(); + } + + int32 GetRefLocBaseLoc() const; + int32 GetGRSaveAreaBaseLoc() const; + int32 GetVRSaveAreaBaseLoc() const; + int32 GetCalleeSaveBaseLoc() const override; + + private: + MemSegment segRefLocals = MemSegment(kMsRefLocals); + /* callee saved register R19-R28 (10) */ + MemSegment segSpillReg = MemSegment(kMsSpillReg); + MemSegment segLocals = MemSegment(kMsLocals); /* these are accessed via Frame Pointer */ + MemSegment segGrSaveArea = MemSegment(kMsGrSaveArea); + MemSegment segVrSaveArea = MemSegment(kMsVrSaveArea); + int32 fixStackSize = 0; + void SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const; + void SetSegmentSize(AArch64SymbolAlloc &symbolAlloc, MemSegment &segment, uint32 typeIdx) const; + void LayoutVarargParams(); + void LayoutFormalParams(); + void LayoutActualParams(); + void LayoutLocalVariables(std::vector &tempVar, std::vector &returnDelays); + void LayoutEAVariales(std::vector &tempVar); + void LayoutReturnRef(std::vector &returnDelays, int32 &structCopySize, int32 &maxParmStackSize); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MEMLAYOUT_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_offset_adjust.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_offset_adjust.h new file mode 100644 index 0000000000000000000000000000000000000000..28aa82bdfd3cdc5d15f69e0db220cf1dc777a611 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_offset_adjust.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OFFSET_ADJUST_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OFFSET_ADJUST_H + +#include "offset_adjust.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +using namespace maple; + +class AArch64FPLROffsetAdjustment : public FrameFinalize { + public: + explicit AArch64FPLROffsetAdjustment(CGFunc &func) : FrameFinalize(func) {} + + ~AArch64FPLROffsetAdjustment() override = default; + + void Run() override; + + private: + void AdjustmentOffsetForOpnd(Insn &insn, AArch64CGFunc &aarchCGFunc) const; + void AdjustmentOffsetForImmOpnd(Insn &insn, uint32 index, AArch64CGFunc &aarchCGFunc) const; + void AdjustmentOffsetForFPLR(); + /* frame pointer(x29) is available as a general-purpose register if useFP is set as false */ + void AdjustmentStackPointer(Insn &insn, AArch64CGFunc &aarchCGFunc) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OFFSET_ADJUST_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_operand.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_operand.h new file mode 100644 index 0000000000000000000000000000000000000000..c04b3775bc1ec02cce0d4e434ff7c1ec9132f6aa --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_operand.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPERAND_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPERAND_H + +#include +#include +#include +#include "aarch64_isa.h" +#include "operand.h" +#include "cg.h" +#include "emit.h" +#include "common_utils.h" + +namespace std { +template<> /* function-template-specialization */ +class std::hash { + public: + size_t operator()(const maplebe::MemOperand &x) const { + std::size_t seed = 0; + hash_combine(seed, x.GetAddrMode()); + hash_combine(seed, x.GetSize()); + maplebe::RegOperand *xb = x.GetBaseRegister(); + maplebe::RegOperand *xi = x.GetIndexRegister(); + if (xb != nullptr) { + hash_combine(seed, xb->GetRegisterNumber()); + hash_combine(seed, xb->GetSize()); + } + if (xi != nullptr) { + hash_combine(seed, xi->GetRegisterNumber()); + hash_combine(seed, xi->GetSize()); + } + return seed; + } +}; +} +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPERAND_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_opt_utiles.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_opt_utiles.h new file mode 100644 index 0000000000000000000000000000000000000000..52aa468d8e5d6f178460fed68a13f90bd3a46d89 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_opt_utiles.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef OPENARKCOMPILER_AARCH64_OPT_UTILES_H +#define OPENARKCOMPILER_AARCH64_OPT_UTILES_H +#include "types_def.h" + +namespace maplebe { +using namespace maple; +enum ExMOpType : uint8 { + kExUndef, + kExAdd, /* MOP_xaddrrr | MOP_xxwaddrrre | MOP_xaddrrrs */ + kEwAdd, /* MOP_waddrrr | MOP_wwwaddrrre | MOP_waddrrrs */ + kExSub, /* MOP_xsubrrr | MOP_xxwsubrrre | MOP_xsubrrrs */ + kEwSub, /* MOP_wsubrrr | MOP_wwwsubrrre | MOP_wsubrrrs */ + kExCmn, /* MOP_xcmnrr | MOP_xwcmnrre | MOP_xcmnrrs */ + kEwCmn, /* MOP_wcmnrr | MOP_wwcmnrre | MOP_wcmnrrs */ + kExCmp, /* MOP_xcmprr | MOP_xwcmprre | MOP_xcmprrs */ + kEwCmp, /* MOP_wcmprr | MOP_wwcmprre | MOP_wcmprrs */ +}; + +enum LsMOpType : uint8 { + kLsUndef, + kLxAdd, /* MOP_xaddrrr | MOP_xaddrrrs */ + kLwAdd, /* MOP_waddrrr | MOP_waddrrrs */ + kLxSub, /* MOP_xsubrrr | MOP_xsubrrrs */ + kLwSub, /* MOP_wsubrrr | MOP_wsubrrrs */ + kLxCmn, /* MOP_xcmnrr | MOP_xcmnrrs */ + kLwCmn, /* MOP_wcmnrr | MOP_wcmnrrs */ + kLxCmp, /* MOP_xcmprr | MOP_xcmprrs */ + kLwCmp, /* MOP_wcmprr | MOP_wcmprrs */ + kLxEor, /* MOP_xeorrrr | MOP_xeorrrrs */ + kLwEor, /* MOP_weorrrr | MOP_weorrrrs */ + kLxNeg, /* MOP_xinegrr | MOP_xinegrrs */ + kLwNeg, /* MOP_winegrr | MOP_winegrrs */ + kLxIor, /* MOP_xiorrrr | MOP_xiorrrrs */ + kLwIor, /* MOP_wiorrrr | MOP_wiorrrrs */ +}; + +enum SuffixType : uint8 { + kNoSuffix, /* no suffix or do not perform the optimization. */ + kLSL, /* logical shift left */ + kLSR, /* logical shift right */ + kASR, /* arithmetic shift right */ + kROR, /* rotate shift right */ + kExten /* ExtendOp */ +}; + +inline constexpr uint32 kExtenAddShiftNum = 6; +inline SuffixType kDoOptimizeTable[kExtenAddShiftNum][kExtenAddShiftNum] = { + { kNoSuffix, kLSL, kLSR, kASR, kROR, kExten }, /* useType == kNoSuffix */ + { kNoSuffix, kLSL, kNoSuffix, kNoSuffix, kNoSuffix, kExten }, /* useType == kLSL */ + { kNoSuffix, kNoSuffix, kLSR, kNoSuffix, kNoSuffix, kNoSuffix }, /* useType == kLSR */ + { kNoSuffix, kNoSuffix, kNoSuffix, kASR, kNoSuffix, kNoSuffix }, /* useType == kASR */ + { kNoSuffix, kNoSuffix, kNoSuffix, kNoSuffix, kROR, kNoSuffix }, /* useType == kROR */ + { kNoSuffix, kNoSuffix, kNoSuffix, kNoSuffix, kNoSuffix, kExten }, /* useType == kExten */ +}; +} + +#endif /* OPENARKCOMPILER_AARCH64_OPT_UTILES_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_optimize_common.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_optimize_common.h new file mode 100644 index 0000000000000000000000000000000000000000..52ab2539cefc57769d8ccfea5859f4bccd33490b --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_optimize_common.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPTIMIZE_COMMON_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPTIMIZE_COMMON_H + +#include "aarch64_isa.h" +#include "optimize_common.h" + +namespace maplebe { +using namespace maple; + + +class AArch64InsnVisitor : public InsnVisitor { + public: + explicit AArch64InsnVisitor(CGFunc &func) : InsnVisitor(func) {} + + ~AArch64InsnVisitor() override = default; + + void ModifyJumpTarget(maple::LabelIdx targetLabel, BB &bb) override; + void ModifyJumpTarget(Operand &targetOperand, BB &bb) override; + void ModifyJumpTarget(BB &newTarget, BB &bb) override; + /* Check if it requires to add extra gotos when relocate bb */ + Insn *CloneInsn(Insn &originalInsn) override; + LabelIdx GetJumpLabel(const Insn &insn) const override; + bool IsCompareInsn(const Insn &insn) const override; + bool IsCompareAndBranchInsn(const Insn &insn) const override; + bool IsAddOrSubInsn(const Insn &insn) const override; + RegOperand *CreateVregFromReg(const RegOperand &pReg) override; + void ReTargetSuccBB(BB &bb, LabelIdx newTarget) const override; + void FlipIfBB(BB &bb, LabelIdx ftLabel) const override; + BB *CreateGotoBBAfterCondBB(BB &bb, BB &fallthru, bool isTargetFallthru) const override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPTIMIZE_COMMON_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h new file mode 100644 index 0000000000000000000000000000000000000000..d55c8ab75b5a1de5a0330da98034b2bbc01c9b07 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_peep.h @@ -0,0 +1,1641 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PEEP_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PEEP_H + +#include +#include "peep.h" +#include "aarch64_cg.h" +#include "optimize_common.h" +#include "mir_builder.h" + +namespace maplebe { +class AArch64CGPeepHole : CGPeepHole { + public: + /* normal constructor */ + AArch64CGPeepHole(CGFunc &f, MemPool *memPool) : CGPeepHole(f, memPool) {}; + /* constructor for ssa */ + AArch64CGPeepHole(CGFunc &f, MemPool *memPool, CGSSAInfo *cgssaInfo) : CGPeepHole(f, memPool, cgssaInfo) {}; + ~AArch64CGPeepHole() = default; + + void Run() override; + bool DoSSAOptimize(BB &bb, Insn &insn) override; + void DoNormalOptimize(BB &bb, Insn &insn) override; +}; + +/* +* i. cmp x0, x1 +* cset w0, EQ ===> cmp x0, x1 +* cmp w0, #0 cset w0, EQ +* cset w0, NE +* +* ii. cmp x0, x1 +* cset w0, EQ ===> cmp x0, x1 +* cmp w0, #0 cset w0, NE +* cset w0, EQ +*/ +class ContinuousCmpCsetPattern : public CGPeepPattern { + public: + ContinuousCmpCsetPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~ContinuousCmpCsetPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ContinuousCmpCsetPattern"; + } + + private: + bool CheckCondCode(const CondOperand &condOpnd) const; + Insn *prevCmpInsn = nullptr; + Insn *prevCsetInsn1 = nullptr; + Insn *prevCmpInsn1 = nullptr; + bool reverse = false; +}; + +/* + * Example 1) + * mov w5, #1 + * ... + * mov w0, #0 + * csel w5, w5, w0, NE ===> cset w5, NE + * + * Example 2) + * mov w5, #0 + * ... + * mov w0, #1 + * csel w5, w5, w0, NE ===> cset w5,EQ + * + * conditions: + * 1. mov_imm1 value is 0(1) && mov_imm value is 1(0) + */ +class CselToCsetPattern : public CGPeepPattern { + public: + CselToCsetPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~CselToCsetPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CselToCsetPattern"; + } + + private: + bool IsOpndDefByZero(const Insn &insn) const; + bool IsOpndDefByOne(const Insn &insn) const; + Insn *prevMovInsn1 = nullptr; + Insn *prevMovInsn2 = nullptr; +}; + +/* + * cset w0, HS + * add w2, w1, w0 ===> cinc w2, w1, hs + * + * cset w0, HS + * add w2, w0, w1 ===> cinc w2, w1, hs + */ +class CsetToCincPattern : public CGPeepPattern { + public: + CsetToCincPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~CsetToCincPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + bool CheckDefInsn(const RegOperand &opnd, Insn &insn); + bool CheckRegTyCc(const Insn &tempDefInsn, Insn &insn); + std::string GetPatternName() override { + return "CsetToCincPattern"; + } + + private: + Insn *defInsn = nullptr; + int32 csetOpnd1 = 0; +}; + +/* + * combine cset & cbz/cbnz ---> beq/bne + * Example 1) + * cset w0, EQ or cset w0, NE + * cbnz w0, .label cbnz w0, .label + * ===> beq .label ===> bne .label + * + * Case: same conditon_code + * + * Example 2) + * cset w0, EQ or cset w0, NE + * cbz w0, .label cbz w0, .label + * ===> bne .label ===> beq .label + * + * Case: reversed condition_code + */ +class CsetCbzToBeqPattern : public CGPeepPattern { + public: + CsetCbzToBeqPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~CsetCbzToBeqPattern() override = default; + std::string GetPatternName() override { + return "CsetCbzToBeqPattern"; + } + bool CheckCondition(Insn &insn) override; + void Run(BB &bb, Insn &insn) override; + + private: + MOperator SelectNewMop(ConditionCode condCode, bool inverse) const; + Insn *prevInsn = nullptr; +}; + +/* + * combine neg & cmp --> cmn + * Example 1) + * neg x0, x6 + * cmp x2, x0 ---> (currInsn) + * ===> cmn x2, x6 + * + * Example 2) + * neg x0, x6, LSL #5 + * cmp x2, x0 ---> (currInsn) + * ===> cmn x2, x6, LSL #5 + * + * Conditions: + * 1. neg_amount_val is valid in cmn amount range + */ +class NegCmpToCmnPattern : public CGPeepPattern { + public: + NegCmpToCmnPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~NegCmpToCmnPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "NegCmpToCmnPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +/* + * case: + * ldr R261(32), [R197, #300] + * ldr R262(32), [R208, #12] + * cmp (CC) R261, R262 + * bne lable175. + * ldr R264(32), [R197, #304] + * ldr R265(32), [R208, #16] + * cmp (CC) R264, R265 + * bne lable175. + * ====> + * ldr R261(64), [R197, #300] + * ldr R262(64), [R208, #12] + * cmp (CC) R261, R262 + * bne lable175. + */ +class LdrCmpPattern : public CGPeepPattern { + public: + LdrCmpPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~LdrCmpPattern() override { + prevLdr1 = nullptr; + prevLdr2 = nullptr; + ldr1 = nullptr; + ldr2 = nullptr; + prevCmp = nullptr; + bne1 = nullptr; + bne2 = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "LdrCmpPattern"; + } + + private: + bool IsLdr(const Insn *insn) const { + if (insn == nullptr) { + return false; + } + return insn->GetMachineOpcode() == MOP_wldr; + } + + bool IsCmp(const Insn *insn) const { + if (insn == nullptr) { + return false; + } + return insn->GetMachineOpcode() == MOP_wcmprr; + } + + bool IsBne(const Insn *insn) const { + if (insn == nullptr) { + return false; + } + return insn->GetMachineOpcode() == MOP_bne; + } + + bool SetInsns(); + bool CheckInsns() const; + bool MemOffet4Bit(const MemOperand &m1, const MemOperand &m2) const; + Insn *prevLdr1 = nullptr; + Insn *prevLdr2 = nullptr; + Insn *ldr1 = nullptr; + Insn *ldr2 = nullptr; + Insn *prevCmp = nullptr; + Insn *bne1 = nullptr; + Insn *bne2 = nullptr; +}; + +/* + * combine {sxtw / uxtw} & lsl ---> {sbfiz / ubfiz} + * sxtw x1, w0 + * lsl x2, x1, #3 ===> sbfiz x2, x0, #3, #32 + * + * uxtw x1, w0 + * lsl x2, x1, #3 ===> ubfiz x2, x0, #3, #32 + */ +class ExtLslToBitFieldInsertPattern : public CGPeepPattern { + public: + ExtLslToBitFieldInsertPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~ExtLslToBitFieldInsertPattern() override = default; + std::string GetPatternName() override { + return "ExtLslToBitFieldInsertPattern"; + } + bool CheckCondition(Insn &insn) override; + void Run(BB &bb, Insn &insn) override; + + private: + Insn *prevInsn = nullptr; +}; + +/* + * Optimize the following patterns: + * Example 1) + * and w0, w6, #1 ====> tbz w6, #0, .label + * cmp w0, #1 + * bne .label + * + * and w0, w6, #16 ====> tbz w6, #4, .label + * cmp w0, #16 + * bne .label + * + * and w0, w6, #32 ====> tbnz w6, #5, .label + * cmp w0, #32 + * beq .label + * + * Conditions: + * 1. cmp_imm value == and_imm value + * 2. (and_imm value is (1 << n)) && (cmp_imm value is (1 << n)) + * + * Example 2) + * and x0, x6, #32 ====> tbz x6, #5, .label + * cmp x0, #0 + * beq .label + * + * and x0, x6, #32 ====> tbnz x6, #5, .label + * cmp x0, #0 + * bne .labelSimplifyMulArithmeticPattern + * + * Conditions: + * 1. (cmp_imm value is 0) || (cmp_imm == and_imm) + * 2. and_imm value is (1 << n) + */ +class AndCmpBranchesToTbzPattern : public CGPeepPattern { + public: + AndCmpBranchesToTbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~AndCmpBranchesToTbzPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "AndCmpBranchesToTbzPattern"; + } + + private: + bool CheckAndSelectPattern(const Insn &currInsn); + Insn *prevAndInsn = nullptr; + Insn *prevCmpInsn = nullptr; + MOperator newMop = MOP_undef; + int64 tbzImmVal = -1; +}; + +/* + * optimize the following patterns: + * Example 1) + * cmp w1, wzr + * bge .label ====> tbz w1, #31, .label + * + * cmp wzr, w1 + * ble .label ====> tbz w1, #31, .label + * + * cmp w1,wzr + * blt .label ====> tbnz w1, #31, .label + * + * cmp wzr, w1 + * bgt .label ====> tbnz w1, #31, .label + * + * + * Example 2) + * cmp w1, #0 + * bge .label ====> tbz w1, #31, .label + * + * cmp w1, #0 + * blt .label ====> tbnz w1, #31, .label + */ +class ZeroCmpBranchesToTbzPattern : public CGPeepPattern { + public: + ZeroCmpBranchesToTbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~ZeroCmpBranchesToTbzPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ZeroCmpBranchesToTbzPattern"; + } + + private: + bool CheckAndSelectPattern(const Insn &currInsn); + Insn *prevInsn = nullptr; + MOperator newMop = MOP_undef; + RegOperand *regOpnd = nullptr; +}; + +/* + * mvn w3, w3 ====> bic w3, w5, w3 + * and w3, w5, w3 + * ====> + * mvn x3, x3 ====> bic x3, x5, x3 + * and x3, x5, x3 + */ +class MvnAndToBicPattern : public CGPeepPattern { + public: + MvnAndToBicPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~MvnAndToBicPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "MvnAndToBicPattern"; + } + + private: + Insn *prevInsn1 = nullptr; + Insn *prevInsn2 = nullptr; + bool op1IsMvnDef = false; + bool op2IsMvnDef = false; +}; + +/* + * and r0, r1, #4 (the imm is n power of 2) + * ... + * cbz r0, .Label + * ===> tbz r1, #2, .Label + * + * and r0, r1, #4 (the imm is n power of 2) + * ... + * cbnz r0, .Label + * ===> tbnz r1, #2, .Label + */ +class AndCbzToTbzPattern : public CGPeepPattern { + public: + AndCbzToTbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + AndCbzToTbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~AndCbzToTbzPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "AndCbzToTbzPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +class CombineSameArithmeticPattern : public CGPeepPattern { + public: + CombineSameArithmeticPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~CombineSameArithmeticPattern() override { + prevInsn = nullptr; + newImmOpnd = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CombineSameArithmeticPattern"; + } + + private: + std::vector validMops = {MOP_wlsrrri5, MOP_xlsrrri6, MOP_wasrrri5, MOP_xasrrri6, MOP_wlslrri5, + MOP_xlslrri6, MOP_waddrri12, MOP_xaddrri12, MOP_wsubrri12, MOP_xsubrri12}; + Insn *prevInsn = nullptr; + ImmOperand *newImmOpnd = nullptr; +}; + +/* + * Specific Extension Elimination, includes sxt[b|h|w] & uxt[b|h|w]. There are scenes: + * 1. PrevInsn is mov + * Example 1) + * mov w0, #imm or mov w0, #imm + * sxt{} w0, w0 uxt{} w0, w0 + * ===> mov w0, #imm ===> mov w0, #imm + * mov w0, w0 mov w0, w0 + * + * Example 2) + * mov w0, R0 + * uxt{} w0, w0 + * ===> mov w0, R0 + * mov w0, w0 + * + * Conditions: + * 1) #imm is not out of range depend on extention valid bits. + * 2) [mov w0, R0] is return value of call and return size is not of range + * 3) mov.destOpnd.size = ext.destOpnd.size + * + * + * 2. PrevInsn is ldr[b|h|sb|sh] + * Example 1) + * ldrb x1, [] + * and x1, x1, #imm + * ===> ldrb x1, [] + * mov x1, x1 + * + * Example 2) + * ldrb x1, [] or ldrb x1, [] or ldrsb x1, [] or ldrsb x1, [] or + * sxtb x1, x1 uxtb x1, x1 sxtb x1, x1 uxtb x1, x1 + * ===> ldrsb x1, [] ===> ldrb x1, [] ===> ldrsb x1, [] ===> ldrb x1, [] + * mov x1, x1 mov x1, x1 mov x1, x1 mov x1, x1 + * + * ldrh x1, [] or ldrh x1, [] or ldrsh x1, [] or ldrsh x1, [] or + * sxth x1, x1 uxth x1, x1 sxth x1, x1 uxth x1, x1 + * ===> ldrsh x1, [] ===> ldrh x1, [] ===> ldrsh x1, [] ===> ldrb x1, [] + * mov x1, x1 mov x1, x1 mov x1, x1 mov x1, x1 + * + * ldrsw x1, [] or ldrsw x1, [] + * sxtw x1, x1 uxtw x1, x1 + * ===> ldrsw x1, [] ===> no change + * mov x1, x1 + * + * Example 3) + * ldrb x1, [] or ldrb x1, [] or ldrsb x1, [] or ldrsb x1, [] or + * sxth x1, x1 uxth x1, x1 sxth x1, x1 uxth x1, x1 + * ===> ldrb x1, [] ===> ldrb x1, [] ===> ldrsb x1, [] ===> no change + * mov x1, x1 mov x1, x1 mov x1, x1 + * + * ldrb x1, [] or ldrh x1, [] or ldrsb x1, [] or ldrsh x1, [] or + * sxtw x1, x1 sxtw x1, x1 sxtw x1, x1 sxtw x1, x1 + * ===> ldrb x1, [] ===> ldrh x1, [] ===> ldrsb x1, [] ===> ldrsh x1, [] + * mov x1, x1 mov x1, x1 mov x1, x1 mov x1, x1 + * + * ldr x1, [] + * sxtw x1, x1 + * ===> ldrsw x1, [] + * mov x1, x1 + * + * Cases: + * 1) extension size == load size -> change the load type or eliminate the extension + * 2) extension size > load size -> possibly eliminating the extension + * + * + * 3. PrevInsn is same sxt / uxt + * Example 1) + * sxth x1, x2 + * sxth x3, x1 + * ===> sxth x1, x2 + * mov x3, x1 + * + * Example 2) + * sxtb x1, x2 or uxtb w0, w0 + * sxth x3, x1 uxth w0, w0 + * ===> sxtb x1, x2 ===> uxtb w0, w0 + * mov x3, x1 mov x0, x0 + * + * Conditions: + * 1) ext1.destOpnd.size == ext2.destOpnd.size + * 2) ext1.destOpnd.regNo == ext2.destOpnd.regNo + * === prop ext1.destOpnd to ext2.srcOpnd, transfer ext2 to mov + * + * Cases: + * 1) ext1 type == ext2 type ((sxth32 & sxth32) || (sxth64 & sxth64) || ...) + * 2) ext1 type < ext2 type ((sxtb32 & sxth32) || (sxtb64 & sxth64) || (sxtb64 & sxtw64) || + * (sxth64 & sxtw64) || (uxtb32 & uxth32)) + */ +class ElimSpecificExtensionPattern : public CGPeepPattern { + public: + ElimSpecificExtensionPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~ElimSpecificExtensionPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ElimSpecificExtensionPattern"; + } + + protected: + enum SpecificExtType : uint8 { + EXTUNDEF = 0, + SXTB, + SXTH, + SXTW, + UXTB, + UXTH, + UXTW, + SETS /* SETS */ + }; + enum OptSceneType : uint8 { + kSceneUndef = 0, + kSceneMov, + kSceneLoad, + kSceneSameExt + }; + static constexpr uint8 kPrevLoadPatternNum = 6; + static constexpr uint8 kPrevLoadMappingNum = 2; + static constexpr uint8 kValueTypeNum = 2; + static constexpr uint64 kInvalidValue = 0; + static constexpr uint8 kSameExtPatternNum = 4; + static constexpr uint8 kSameExtMappingNum = 2; + uint64 extValueRangeTable[SETS][kValueTypeNum] = { + /* {minValue, maxValue} */ + {kInvalidValue, kInvalidValue}, /* UNDEF */ + {0xFFFFFFFFFFFFFF80, 0x7F}, /* SXTB */ + {0xFFFFFFFFFFFF8000, 0x7FFF}, /* SXTH */ + {0xFFFFFFFF80000000, kInvalidValue}, /* SXTW */ + {0xFFFFFFFFFFFFFF00, kInvalidValue}, /* UXTB */ + {0xFFFFFFFFFFFF0000, kInvalidValue}, /* UXTH */ + {kInvalidValue, kInvalidValue} /* UXTW */ + }; + MOperator loadMappingTable[SETS][kPrevLoadPatternNum][kPrevLoadMappingNum] = { + /* {prevOrigMop, prevNewMop} */ + {{MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* UNDEF */ + {{MOP_wldrb, MOP_wldrsb}, {MOP_wldrsb, MOP_wldrsb}, {MOP_wldr, MOP_wldrsb}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* SXTB */ + {{MOP_wldrh, MOP_wldrsh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrsb}, {MOP_wldrsh, MOP_wldrsh}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* SXTH */ + {{MOP_wldrh, MOP_wldrh}, {MOP_wldrsh, MOP_wldrsh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrsb}, + {MOP_wldr, MOP_xldrsw}, {MOP_xldrsw, MOP_xldrsw}}, /* SXTW */ + {{MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrb}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* UXTB */ + {{MOP_wldrh, MOP_wldrh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldr, MOP_wldrh}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}}, /* UXTH */ + {{MOP_wldr, MOP_wldr}, {MOP_wldrh, MOP_wldrh}, {MOP_wldrb, MOP_wldrb}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}} /* UXTW */ + }; + MOperator sameExtMappingTable[SETS][kSameExtPatternNum][kSameExtMappingNum] = { + /* {prevMop, currMop} */ + {{MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* UNDEF */ + {{MOP_xsxtb32, MOP_xsxtb32}, {MOP_xsxtb64, MOP_xsxtb64}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* SXTB */ + {{MOP_xsxtb32, MOP_xsxth32}, {MOP_xsxtb64, MOP_xsxth64}, {MOP_xsxth32, MOP_xsxth32}, + {MOP_xsxth64, MOP_xsxth64}}, /* SXTH */ + {{MOP_xsxtb64, MOP_xsxtw64}, {MOP_xsxth64, MOP_xsxtw64}, {MOP_xsxtw64, MOP_xsxtw64}, + {MOP_undef, MOP_undef}}, /* SXTW */ + {{MOP_xuxtb32, MOP_xuxtb32}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* UXTB */ + {{MOP_xuxtb32, MOP_xuxth32}, {MOP_xuxth32, MOP_xuxth32}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* UXTH */ + {{MOP_xuxtw64, MOP_xuxtw64}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}} /* UXTW */ + }; + + private: + void SetSpecificExtType(const Insn &currInsn); + void SetOptSceneType(); + bool IsValidLoadExtPattern(MOperator oldMop, MOperator newMop) const; + MOperator SelectNewLoadMopByBitSize(MOperator lowBitMop) const; + void ElimExtensionAfterLoad(Insn &insn); + void ElimExtensionAfterMov(Insn &insn); + void ElimExtensionAfterSameExt(Insn &insn); + void ReplaceExtWithMov(Insn &currInsn); + Insn *prevInsn = nullptr; + SpecificExtType extTypeIdx = EXTUNDEF; + OptSceneType sceneType = kSceneUndef; + bool is64Bits = false; +}; + +/* + * We optimize the following pattern in this function: + * if w0's valid bits is one + * uxtb w0, w0 + * eor w0, w0, #1 + * cbz w0, .label + * => + * tbnz w0, .label + * if there exists uxtb w0, w0 and w0's valid bits is + * less than 8, eliminate it. + */ +class OneHoleBranchPattern : public CGPeepPattern { + public: + explicit OneHoleBranchPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~OneHoleBranchPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "OneHoleBranchPattern"; + } + + private: + void FindNewMop(const BB &bb, const Insn &insn); + bool CheckPrePrevInsn(); + Insn *prevInsn = nullptr; + Insn *prePrevInsn = nullptr; + MOperator newOp = MOP_undef; +}; + +/* + * Combine logical shift and orr to [extr wd, wn, wm, #lsb / extr xd, xn, xm, #lsb] + * Example 1) + * lsr w5, w6, #16 + * lsl w4, w7, #16 + * orr w5, w5, w4 ---> (currInsn) + * ===> extr w5, w6, w7, #16 + * + * Example 2) + * lsr w5, w6, #16 + * orr w5, w5, w4, LSL #16 ---> (currInsn) + * ===> extr w5, w6, w4, #16 + * + * Example 3) + * lsl w4, w7, #16 + * orr w5, w4, w5, LSR #16 ---> (currInsn) + * ===> extr w5, w5, w7, #16 + * + * Conditions: + * 1. (def[wn] is lsl) & (def[wm] is lsr) + * 2. lsl_imm + lsr_imm == curr type size (32 or 64) + * 3. is64bits ? (extr_imm in range [0, 63]) : (extr_imm in range [0, 31]) + * 4. extr_imm = lsr_imm + */ +class LogicShiftAndOrrToExtrPattern : public CGPeepPattern { + public: + LogicShiftAndOrrToExtrPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~LogicShiftAndOrrToExtrPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "LogicShiftAndOrrToExtrPattern"; + } + + private: + Insn *prevLsrInsn = nullptr; + Insn *prevLslInsn = nullptr; + int64 shiftValue = 0; + bool is64Bits = false; +}; + +/* + * Simplify Mul and Basic Arithmetic. There are three scenes: + * 1. currInsn is add: + * Example 1) + * mul x1, x1, x2 or mul x0, x1, x2 + * add x0, x0, x1 add x0, x0, x1 + * ===> madd x0, x1, x2, x0 ===> madd x0, x1, x2, x1 + * + * Example 2) + * fmul d1, d1, d2 or fmul d0, d1, d2 + * fadd d0, d0, d1 fadd d0, d0, d1 + * ===> fmadd d0, d1, d2, d0 ===> fmadd d0, d1, d2, d1 + * + * cases: addInsn second opnd || addInsn third opnd + * + * + * 2. currInsn is sub: + * Example 1) Example 2) + * mul x1, x1, x2 fmul d1, d1, d2 + * sub x0, x0, x1 fsub d0, d0, d1 + * ===> msub x0, x1, x2, x0 ===> fmsub d0, d1, d2, d0 + * + * cases: subInsn third opnd + * + * 3. currInsn is neg: + * Example 1) Example 2) + * mul x1, x1, x2 fmul d1, d1, d2 + * neg x0, x1 fneg d0, d1 + * ===> mneg x0, x1, x2 ===> fnmul d0, d1, d2 + * + * cases: negInsn second opnd + */ +class SimplifyMulArithmeticPattern : public CGPeepPattern { + public: + SimplifyMulArithmeticPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~SimplifyMulArithmeticPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "SimplifyMulArithmeticPattern"; + } + + protected: + enum ArithmeticType : uint8 { + kUndef = 0, + kAdd, + kFAdd, + kSub, + kFSub, + kNeg, + kFNeg, + kArithmeticTypeSize + }; + static constexpr uint8 newMopNum = 2; + MOperator curMop2NewMopTable[kArithmeticTypeSize][newMopNum] = { + /* {32bit_mop, 64bit_mop} */ + {MOP_undef, MOP_undef}, /* kUndef */ + {MOP_wmaddrrrr, MOP_xmaddrrrr}, /* kAdd */ + {MOP_smadd, MOP_dmadd}, /* kFAdd */ + {MOP_wmsubrrrr, MOP_xmsubrrrr}, /* kSub */ + {MOP_smsub, MOP_dmsub}, /* kFSub */ + {MOP_wmnegrrr, MOP_xmnegrrr}, /* kNeg */ + {MOP_snmul, MOP_dnmul} /* kFNeg */ + }; + + private: + void SetArithType(const Insn &currInsn); + void DoOptimize(BB &currBB, Insn &currInsn); + ArithmeticType arithType = kUndef; + int32 validOpndIdx = -1; + Insn *prevInsn = nullptr; + bool isFloat = false; +}; + +/* + * Example 1) + * lsr w0, w1, #6 + * and w0, w0, #1 ---> (currInsn) + * ===> ubfx w0, w1, #6, #1 + * + * Conditions: + * 1. and_imm value is (1 << n -1) + * 2. is64bits ? (ubfx_imm_lsb in range [0, 63]) : (ubfx_imm_lsb in range [0, 31]) + * 3. is64bits ? ((ubfx_imm_lsb + ubfx_imm_width) in range [1, 32]) : ((ubfx_imm_lsb + ubfx_imm_width) in range [1, 64]) + */ +class LsrAndToUbfxPattern : public CGPeepPattern { + public: + LsrAndToUbfxPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~LsrAndToUbfxPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "LsrAndToUbfxPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +/* + * lsl w1, w2, #m + * and w3, w1, #2^n-1 ---> if n > m : ubfiz w3, w2, #m, #n-m + * + * and w1, w2, #2^n-1 ---> ubfiz w3, w2, #m, #n + * lsl w3, w1, #m + * Exclude the scenarios that can be optimized by prop. + */ +class LslAndToUbfizPattern : public CGPeepPattern { + public: + LslAndToUbfizPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~LslAndToUbfizPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + Insn *BuildNewInsn(const Insn &andInsn, const Insn &lslInsn, const Insn &useInsn); + bool CheckUseInsnMop(const Insn &useInsn); + std::string GetPatternName() override { + return "LslAndToUbfizPattern"; + } + + private: + Insn *defInsn = nullptr; +}; + +/* + * Optimize the following patterns: + * orr w21, w0, #0 ====> mov w21, w0 + * orr w21, #0, w0 ====> mov w21, w0 + */ +class OrrToMovPattern : public CGPeepPattern { + public: + explicit OrrToMovPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~OrrToMovPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "OrrToMovPattern"; + } + + private: + MOperator newMop = MOP_undef; + RegOperand *reg2 = nullptr; +}; + +/* + * Optimize the following patterns: + * ubfx x201, x202, #0, #32 + * ====> + * uxtw x201, w202 + */ +class UbfxToUxtwPattern : public CGPeepPattern { + public: + UbfxToUxtwPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~UbfxToUxtwPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "UbfxToUxtwPattern"; + } +}; + +/* + * Optimize the following patterns: + * ubfx w0, w0, #2, #1 + * cbz w0, .L.3434__292 ====> tbz w0, #2, .L.3434__292 + * ------------------------------- + * ubfx w0, w0, #2, #1 + * cnbz w0, .L.3434__292 ====> tbnz w0, #2, .L.3434__292 + * ------------------------------- + * ubfx x0, x0, #2, #1 + * cbz x0, .L.3434__292 ====> tbz x0, #2, .L.3434__292 + * ------------------------------- + * ubfx x0, x0, #2, #1 + * cnbz x0, .L.3434__292 ====> tbnz x0, #2, .L.3434__292 + */ +class UbfxAndCbzToTbzPattern : public CGPeepPattern { + public: + UbfxAndCbzToTbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn, CGSSAInfo &info) + : CGPeepPattern(cgFunc, currBB, currInsn, info) {} + ~UbfxAndCbzToTbzPattern() override { + useInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "UbfxAndCbzToTbzPattern"; + } + private: + Insn *useInsn = nullptr; + MOperator newMop = MOP_undef; +}; + +/* + * Looking for identical mem insn to eliminate. + * If two back-to-back is: + * 1. str + str + * 2. str + ldr + * And the [MEM] is pattern of [base + offset] + * 1. The [MEM] operand is exactly same then first + * str can be eliminate. + * 2. The [MEM] operand is exactly same and src opnd + * of str is same as the dest opnd of ldr then + * ldr can be eliminate + */ +class RemoveIdenticalLoadAndStorePattern : public CGPeepPattern { + public: + RemoveIdenticalLoadAndStorePattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveIdenticalLoadAndStorePattern() override { + nextInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveIdenticalLoadAndStorePattern"; + } + + private: + bool IsMemOperandsIdentical(const Insn &insn1, const Insn &insn2) const; + Insn *nextInsn = nullptr; +}; + +/* Remove redundant mov which src and dest opnd is exactly same */ +class RemoveMovingtoSameRegPattern : public CGPeepPattern { + public: + RemoveMovingtoSameRegPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveMovingtoSameRegPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveMovingtoSameRegPattern"; + } +}; + +/* Remove redundant mov which src and dest opnd is exactly same */ +class RemoveMovingtoSameRegAArch64 : public PeepPattern { + public: + explicit RemoveMovingtoSameRegAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~RemoveMovingtoSameRegAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Combining 2 STRs into 1 stp or 2 LDRs into 1 ldp, when they are + * back to back and the [MEM] they access is conjointed. + */ +class CombineContiLoadAndStorePattern : public CGPeepPattern { + public: + CombineContiLoadAndStorePattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) { + doAggressiveCombine = cgFunc.GetMirModule().IsCModule(); + } + ~CombineContiLoadAndStorePattern() override { + memOpnd = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CombineContiLoadAndStorePattern"; + } + + private: + std::vector FindPrevStrLdr(Insn &insn, regno_t destRegNO, regno_t memBaseRegNO, int64 baseOfst) const; + /* + * avoid the following situation: + * str x2, [x19, #8] + * mov x0, x19 + * bl foo (change memory) + * str x21, [x19, #16] + */ + bool IsRegNotSameMemUseInInsn(const Insn &insn, regno_t regNO, bool isStore, int64 baseOfst) const; + void RemoveInsnAndKeepComment(BB &bb, Insn &insn, Insn &prevInsn) const; + MOperator GetMopHigherByte(MOperator mop) const; + bool SplitOfstWithAddToCombine(const Insn &curInsn, Insn &combineInsn, const MemOperand &memOperand) const; + Insn *FindValidSplitAddInsn(Insn &combineInsn, const RegOperand &baseOpnd) const; + bool FindTmpRegOnlyUseAfterCombineInsn(const Insn &curInsn) const; + bool PlaceSplitAddInsn(const Insn &curInsn, Insn &combineInsn, const MemOperand &memOperand, + RegOperand &baseOpnd, uint32 bitLen) const; + bool doAggressiveCombine = false; + MemOperand *memOpnd = nullptr; +}; + +/* + * add xt, xn, #imm add xt, xn, xm + * ldr xd, [xt] ldr xd, [xt] + * =====================> + * ldr xd, [xn, #imm] ldr xd, [xn, xm] + * + * load/store can do extend shift as well + */ +class EnhanceStrLdrAArch64 : public PeepPattern { + public: + explicit EnhanceStrLdrAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~EnhanceStrLdrAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + + private: + bool IsEnhanceAddImm(MOperator prevMop) const; +}; + +/* Eliminate the sxt[b|h|w] w0, w0;, when w0 is satisify following: + * i) mov w0, #imm (#imm is not out of range) + * ii) ldrs[b|h] w0, [MEM] + */ +class EliminateSpecifcSXTPattern : public CGPeepPattern { + public: + EliminateSpecifcSXTPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~EliminateSpecifcSXTPattern() override { + prevInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "EliminateSpecifcSXTPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +/* Eliminate the uxt[b|h|w] w0, w0;when w0 is satisify following: + * i) mov w0, #imm (#imm is not out of range) + * ii) mov w0, R0(Is return value of call and return size is not of range) + * iii)w0 is defined and used by special load insn and uxt[] pattern + */ +class EliminateSpecifcUXTAArch64 : public PeepPattern { + public: + explicit EliminateSpecifcUXTAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~EliminateSpecifcUXTAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* fmov ireg1 <- freg1 previous insn + * fmov ireg2 <- freg1 current insn + * use ireg2 may or may not be present + * => + * fmov ireg1 <- freg1 previous insn + * mov ireg2 <- ireg1 current insn + * use ireg1 may or may not be present + */ +class FmovRegPattern : public CGPeepPattern { + public: + FmovRegPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~FmovRegPattern() override { + prevInsn = nullptr; + nextInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "FmovRegPattern"; + } + + private: + Insn *prevInsn = nullptr; + Insn *nextInsn = nullptr; +}; + +/* sbfx ireg1, ireg2, 0, 32 + * use ireg1.32 + * => + * sbfx ireg1, ireg2, 0, 32 + * use ireg2.32 + */ +class SbfxOptPattern : public CGPeepPattern { + public: + SbfxOptPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~SbfxOptPattern() override { + nextInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "SbfxOptPattern"; + } + + private: + Insn *nextInsn = nullptr; + bool toRemove = false; + std::vector cands; +}; + +/* cbnz x0, labelA + * mov x0, 0 + * b return-bb + * labelA: + * => + * cbz x0, return-bb + * labelA: + */ +class CbnzToCbzPattern : public CGPeepPattern { + public: + CbnzToCbzPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~CbnzToCbzPattern() override { + nextBB = nullptr; + movInsn = nullptr; + brInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CbnzToCbzPattern"; + } + + private: + BB *nextBB = nullptr; + Insn *movInsn = nullptr; + Insn *brInsn = nullptr; +}; + +/* When exist load after load or load after store, and [MEM] is + * totally same. Then optimize them. + */ +class ContiLDRorSTRToSameMEMPattern : public CGPeepPattern { + public: + ContiLDRorSTRToSameMEMPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~ContiLDRorSTRToSameMEMPattern() override { + prevInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ContiLDRorSTRToSameMEMPattern"; + } + + private: + Insn *prevInsn = nullptr; + bool loadAfterStore = false; + bool loadAfterLoad = false; +}; + +/* + * Remove following patterns: + * mov x1, x0 + * bl MCC_IncDecRef_NaiveRCFast + */ +class RemoveIncDecRefPattern : public CGPeepPattern { + public: + RemoveIncDecRefPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveIncDecRefPattern() override { + prevInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveIncDecRefPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +/* + * When GCONLY is enabled, the read barriers can be inlined. + * we optimize it with the following pattern: + * #if USE_32BIT_REF + * bl MCC_LoadRefField -> ldr w0, [x1] + * bl MCC_LoadVolatileField -> ldar w0, [x1] + * bl MCC_LoadRefStatic -> ldr w0, [x0] + * bl MCC_LoadVolatileStaticField -> ldar w0, [x0] + * bl MCC_Dummy -> omitted + * #else + * bl MCC_LoadRefField -> ldr x0, [x1] + * bl MCC_LoadVolatileField -> ldar x0, [x1] + * bl MCC_LoadRefStatic -> ldr x0, [x0] + * bl MCC_LoadVolatileStaticField -> ldar x0, [x0] + * bl MCC_Dummy -> omitted + * #endif + * + * if we encounter a tail call optimized read barrier call, + * such as: + * b MCC_LoadRefField + * a return instruction will be added just after the load: + * ldr w0, [x1] + * ret + */ +class InlineReadBarriersPattern : public CGPeepPattern { + public: + InlineReadBarriersPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~InlineReadBarriersPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "InlineReadBarriersPattern"; + } +}; + +/* + * mov w1, #34464 + * movk w1, #1, LSL #16 + * sdiv w2, w0, w1 + * ========> + * mov w1, #34464 // may deleted if w1 not live anymore. + * movk w1, #1, LSL #16 // may deleted if w1 not live anymore. + * mov w16, #0x588f + * movk w16, #0x4f8b, LSL #16 + * smull x16, w0, w16 + * asr x16, x16, #32 + * add x16, x16, w0, SXTW + * asr x16, x16, #17 + * add x2, x16, x0, LSR #31 + */ +class ReplaceDivToMultiPattern : public CGPeepPattern { + public: + ReplaceDivToMultiPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~ReplaceDivToMultiPattern() override { + prevInsn = nullptr; + prePrevInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ReplaceDivToMultiPattern"; + } + + private: + Insn *prevInsn = nullptr; + Insn *prePrevInsn = nullptr; +}; + +/* + * Optimize the following patterns: + * and w0, w0, #imm ====> tst w0, #imm + * cbz/cbnz .label beq/bne .label + */ +class AndCbzBranchesToTstAArch64 : public PeepPattern { + public: + explicit AndCbzBranchesToTstAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~AndCbzBranchesToTstAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Optimize the following patterns: + * and w0, w0, #1 ====> and w0, w0, #1 + * cmp w0, #1 + * cset w0, EQ + * + * and w0, w0, #1 ====> and w0, w0, #1 + * cmp w0, #0 + * cset w0, NE + * --------------------------------------------------- + * and w0, w0, #imm ====> ubfx w0, w0, pos, size + * cmp w0, #imm + * cset w0, EQ + * + * and w0, w0, #imm ====> ubfx w0, w0, pos, size + * cmp w0, #0 + * cset w0, NE + * conditions: + * imm is pos power of 2 + * + * --------------------------------------------------- + * and w0, w0, #1 ====> and wn, w0, #1 + * cmp w0, #1 + * cset wn, EQ # wn != w0 && w0 is not live after cset + * + * and w0, w0, #1 ====> and wn, w0, #1 + * cmp w0, #0 + * cset wn, NE # wn != w0 && w0 is not live after cset + * --------------------------------------------------- + * and w0, w0, #imm ====> ubfx wn, w0, pos, size + * cmp w0, #imm + * cset wn, EQ # wn != w0 && w0 is not live after cset + * + * and w0, w0, #imm ====> ubfx wn, w0, pos, size + * cmp w0, #0 + * cset wn, NE # wn != w0 && w0 is not live after cset + * conditions: + * imm is pos power of 2 and w0 is not live after cset + */ +class AndCmpBranchesToCsetAArch64 : public PeepPattern { + public: + explicit AndCmpBranchesToCsetAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~AndCmpBranchesToCsetAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + + private: + Insn *FindPreviousCmp(Insn &insn) const; +}; + +/* + * cbnz w0, @label + * .... + * mov w0, #0 (elseBB) -->this instruction can be deleted + * + * cbz w0, @label + * .... + * mov w0, #0 (ifBB) -->this instruction can be deleted + * + * condition: + * 1.there is not predefine points of w0 in elseBB(ifBB) + * 2.the first opearnd of cbnz insn is same as the first Operand of mov insn + * 3.w0 is defined by move 0 + * 4.all preds of elseBB(ifBB) end with cbnz or cbz + * + * NOTE: if there are multiple preds and there is not define point of w0 in one pred, + * (mov w0, 0) can't be deleted, avoiding use before def. + */ +class DeleteMovAfterCbzOrCbnzAArch64 : public PeepPattern { + public: + explicit DeleteMovAfterCbzOrCbnzAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) { + cgcfg = cgFunc.GetTheCFG(); + cgcfg->InitInsnVisitor(cgFunc); + } + ~DeleteMovAfterCbzOrCbnzAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + + private: + bool PredBBCheck(BB &bb, bool checkCbz, const Operand &opnd) const; + bool OpndDefByMovZero(const Insn &insn) const; + bool NoPreDefine(Insn &testInsn) const; + void ProcessBBHandle(BB *processBB, const BB &bb, const Insn &insn) const; + CGCFG *cgcfg = nullptr; +}; + +/* + * We optimize the following pattern in this function: + * movz x0, #11544, LSL #0 + * movk x0, #21572, LSL #16 + * movk x0, #8699, LSL #32 + * movk x0, #16393, LSL #48 + * => + * ldr x0, label_of_constant_1 + */ +class LoadFloatPointPattern : public CGPeepPattern { + public: + LoadFloatPointPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~LoadFloatPointPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "LoadFloatPointPattern"; + } + private: + bool FindLoadFloatPoint(Insn &insn); + bool IsPatternMatch(); + std::vector optInsn; +}; + +/* + * Optimize the following patterns: + * ldr w0, [x21,#68] ldr w0, [x21,#68] + * mov w1, #-1 mov w1, #-1 + * cmp w0, w1 ====> cmn w0, #-1 + */ +class ReplaceCmpToCmnAArch64 : public PeepPattern { + public: + explicit ReplaceCmpToCmnAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ReplaceCmpToCmnAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * Remove following patterns: + * mov x0, XX + * mov x1, XX + * bl MCC_IncDecRef_NaiveRCFast + */ +class RemoveIncRefPattern : public CGPeepPattern { + public: + RemoveIncRefPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveIncRefPattern() override { + insnMov2 = nullptr; + insnMov1 = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveIncRefPattern"; + } + + private: + Insn *insnMov2 = nullptr; + Insn *insnMov1 = nullptr; +}; + +/* + * opt long int compare with 0 + * *cmp x0, #0 + * csinv w0, wzr, wzr, GE + * csinc w0, w0, wzr, LE + * cmp w0, #0 + * => + * cmp x0, #0 + */ +class LongIntCompareWithZPattern : public CGPeepPattern { + public: + LongIntCompareWithZPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~LongIntCompareWithZPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "LongIntCompareWithZPattern"; + } + + private: + bool FindLondIntCmpWithZ(Insn &insn); + bool IsPatternMatch(); + std::vector optInsn; +}; + +/* + * add x0, x1, #:lo12:Ljava_2Futil_2FLocale_241_3B_7C_24SwitchMap_24java_24util_24Locale_24Category + * ldr x2, [x0] + * ==> + * ldr x2, [x1, #:lo12:Ljava_2Futil_2FLocale_241_3B_7C_24SwitchMap_24java_24util_24Locale_24Category] + */ +class ComplexMemOperandAArch64 : public PeepPattern { + public: + explicit ComplexMemOperandAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ComplexMemOperandAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * add x0, x1, x0 + * ldr x2, [x0] + * ==> + * ldr x2, [x1, x0] + */ +class ComplexMemOperandPreAddAArch64 : public PeepPattern { + public: + explicit ComplexMemOperandPreAddAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ComplexMemOperandPreAddAArch64() override = default; + void Run(BB &bb, Insn &insn) override; +}; + +/* + * mov R0, vreg1 / R0 mov R0, vreg1 + * add vreg2, vreg1, #imm1 add vreg2, vreg1, #imm1 + * mov R1, vreg2 mov R1, vreg2 + * mov R2, vreg3 mov R2, vreg3 + * ... ... + * mov R0, vreg1 + * add vreg4, vreg1, #imm2 -> str vreg5, [vreg1, #imm2] + * mov R1, vreg4 + * mov R2, vreg5 + */ +class WriteFieldCallPattern : public CGPeepPattern { + public: + struct WriteRefFieldParam { + Operand *objOpnd = nullptr; + RegOperand *fieldBaseOpnd = nullptr; + int64 fieldOffset = 0; + Operand *fieldValue = nullptr; + }; + WriteFieldCallPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~WriteFieldCallPattern() override { + prevCallInsn = nullptr; + nextInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "WriteFieldCallPattern"; + } + + private: + bool hasWriteFieldCall = false; + Insn *prevCallInsn = nullptr; + Insn *nextInsn = nullptr; + WriteRefFieldParam firstCallParam; + WriteRefFieldParam currentCallParam; + std::vector paramDefInsns; + bool WriteFieldCallOptPatternMatch(const Insn &writeFieldCallInsn, WriteRefFieldParam ¶m); + bool IsWriteRefFieldCallInsn(const Insn &insn) const; +}; + +/* + * Remove following patterns: + * mov x0, xzr/#0 + * bl MCC_DecRef_NaiveRCFast + */ +class RemoveDecRefPattern : public CGPeepPattern { + public: + RemoveDecRefPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveDecRefPattern() override { + prevInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveDecRefPattern"; + } + + private: + Insn *prevInsn = nullptr; +}; + +/* + * Replace following pattern: + * mov x1, xzr + * bl MCC_IncDecRef_NaiveRCFast + * => + * bl MCC_IncRef_NaiveRCFast + */ +class ReplaceIncDecWithIncPattern : public CGPeepPattern { + public: + ReplaceIncDecWithIncPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~ReplaceIncDecWithIncPattern() override { + prevInsn = nullptr; + target = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ReplaceIncDecWithIncPattern"; + } + + private: + Insn *prevInsn = nullptr; + FuncNameOperand *target = nullptr; +}; + +/* + * Replace following pattern: + * sxtw x1, w0 + * lsl x2, x1, #3 ====> sbfiz x2, x0, #3, #32 + * + * uxtw x1, w0 + * lsl x2, x1, #3 ====> ubfiz x2, x0, #3, #32 + */ +class ComplexExtendWordLslAArch64 : public PeepPattern { + public: + explicit ComplexExtendWordLslAArch64(CGFunc &cgFunc) : PeepPattern(cgFunc) {} + ~ComplexExtendWordLslAArch64() override = default; + void Run(BB &bb, Insn &insn) override; + + private: + bool IsExtendWordLslPattern(const Insn &insn) const; +}; + +class AArch64PeepHole : public PeepPatternMatch { + public: + AArch64PeepHole(CGFunc &oneCGFunc, MemPool *memPool) : PeepPatternMatch(oneCGFunc, memPool) {} + ~AArch64PeepHole() override = default; + void InitOpts() override; + void Run(BB &bb, Insn &insn) override; + + private: + enum PeepholeOpts : int32 { + kRemoveIdenticalLoadAndStoreOpt = 0, + kRemoveMovingtoSameRegOpt, + kCombineContiLoadAndStoreOpt, + kEliminateSpecifcSXTOpt, + kEliminateSpecifcUXTOpt, + kFmovRegOpt, + kCbnzToCbzOpt, + kCsetCbzToBeqOpt, + kContiLDRorSTRToSameMEMOpt, + kRemoveIncDecRefOpt, + kInlineReadBarriersOpt, + kReplaceDivToMultiOpt, + kAndCmpBranchesToCsetOpt, + kAndCmpBranchesToTstOpt, + kAndCbzBranchesToTstOpt, + kZeroCmpBranchesOpt, + kCselZeroOneToCsetOpt, + kPeepholeOptsNum + }; +}; + +class AArch64PeepHole0 : public PeepPatternMatch { + public: + AArch64PeepHole0(CGFunc &oneCGFunc, MemPool *memPool) : PeepPatternMatch(oneCGFunc, memPool) {} + ~AArch64PeepHole0() override = default; + void InitOpts() override; + void Run(BB &bb, Insn &insn) override; + + private: + enum PeepholeOpts : int32 { + kRemoveIdenticalLoadAndStoreOpt = 0, + kCmpCsetOpt, + kComplexMemOperandOptAdd, + kDeleteMovAfterCbzOrCbnzOpt, + kRemoveSxtBeforeStrOpt, + kRemoveMovingtoSameRegOpt, + kPeepholeOptsNum + }; +}; + +class AArch64PrePeepHole : public PeepPatternMatch { + public: + AArch64PrePeepHole(CGFunc &oneCGFunc, MemPool *memPool) : PeepPatternMatch(oneCGFunc, memPool) {} + ~AArch64PrePeepHole() override = default; + void InitOpts() override; + void Run(BB &bb, Insn &insn) override; + + private: + enum PeepholeOpts : int32 { + kOneHoleBranchesPreOpt = 0, + kLoadFloatPointOpt, + kReplaceOrrToMovOpt, + kReplaceCmpToCmnOpt, + kRemoveIncRefOpt, + kLongIntCompareWithZOpt, + kComplexMemOperandOpt, + kComplexMemOperandPreOptAdd, + kComplexMemOperandOptLSL, + kComplexMemOperandOptLabel, + kWriteFieldCallOpt, + kDuplicateExtensionOpt, + kEnhanceStrLdrAArch64Opt, + kUbfxToUxtw, + kPeepholeOptsNum + }; +}; + +class AArch64PrePeepHole1 : public PeepPatternMatch { + public: + AArch64PrePeepHole1(CGFunc &oneCGFunc, MemPool *memPool) : PeepPatternMatch(oneCGFunc, memPool) {} + ~AArch64PrePeepHole1() override = default; + void InitOpts() override; + void Run(BB &bb, Insn &insn) override; + + private: + enum PeepholeOpts : int32 { + kRemoveDecRefOpt = 0, + kComputationTreeOpt, + kOneHoleBranchesOpt, + kReplaceIncDecWithIncOpt, + kAndCmpBranchesToTbzOpt, + kComplexExtendWordLslOpt, + kPeepholeOptsNum + }; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PEEP_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_pgo_gen.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_pgo_gen.h new file mode 100644 index 0000000000000000000000000000000000000000..4f0d40722e4b9713258ca4f45f763306381c764e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_pgo_gen.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_AARCH64_PGO_GEN_H +#define MAPLEBE_CG_INCLUDE_AARCH64_PGO_GEN_H + +#include "cg_pgo_gen.h" +namespace maplebe { +class AArch64ProfGen : public CGProfGen { + public: + AArch64ProfGen(CGFunc &curF, MemPool &mp) : CGProfGen(curF, mp) {} + + void CreateClearIcall(BB &bb, const std::string &symName) override; + void CreateIcallForWeakSymbol(BB &bb, const std::string &symName) override ; + void InstrumentBB(BB &bb, MIRSymbol &countTab, uint32 offset) override; +}; + +} +#endif // MAPLEBE_CG_INCLUDE_AARCH64_PGO_GEN_H \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def new file mode 100644 index 0000000000000000000000000000000000000000..48e835dbb3777c4e84a5504bdf9a556bec44e49b --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phases.def @@ -0,0 +1,76 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + ADDTARGETPHASE("layoutstackframe", true); + ADDTARGETPHASE("createstartendlabel", true); + ADDTARGETPHASE("buildehfunc", !GetMIRModule()->IsCModule()); + ADDTARGETPHASE("handlefunction", true); + ADDTARGETPHASE("moveargs", true); + /* SSA PHASES */ + ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgcopyprop", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgpeephole", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgvalidbitopt", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgredundantcompelim", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgtargetprop", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgdeadcodeelimination", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgsplitcriticaledge", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgphielimination", CGOptions::DoCGSSA()); + ADDTARGETPHASE("cgregcoalesce", CGOptions::DoCGSSA()); + /* Normal OPT PHASES */ + ADDTARGETPHASE("cgprepeephole", CGOptions::DoPrePeephole()); + ADDTARGETPHASE("ebo", CGOptions::DoEBO()); + ADDTARGETPHASE("prepeephole", CGOptions::DoPrePeephole()) + ADDTARGETPHASE("ico", CGOptions::DoICO()) + ADDTARGETPHASE("cfgo", !GetMIRModule()->IsCModule() && CGOptions::DoCFGO()); + + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + ADDTARGETPHASE("storeloadopt", CGOptions::DoStoreLoadOpt() && !CGOptions::DoCGSSA()); + ADDTARGETPHASE("globalopt", CGOptions::DoGlobalOpt()); + } + ADDTARGETPHASE("clearrdinfo", (CGOptions::DoStoreLoadOpt()) || CGOptions::DoGlobalOpt()); + + ADDTARGETPHASE("prepeephole1", CGOptions::DoPrePeephole()); + ADDTARGETPHASE("ebo1", CGOptions::DoEBO()); + ADDTARGETPHASE("globalschedule", false); + ADDTARGETPHASE("prescheduling", !GetMIRModule()->IsJavaModule() && CGOptions::DoPreSchedule()); + ADDTARGETPHASE("raopt", CGOptions::DoPreLSRAOpt()); + ADDTARGETPHASE("cgsplitcriticaledge", GetMIRModule()->IsCModule()); + ADDTARGETPHASE("regalloc", true); + ADDTARGETPHASE("tailcallopt", GetMIRModule()->IsCModule()); + ADDTARGETPHASE("regsaves", GetMIRModule()->IsCModule() && CGOptions::DoRegSavesOpt()); + if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) { + ADDTARGETPHASE("storeloadopt", GetMIRModule()->IsCModule() && CGOptions::DoStoreLoadOpt()); + ADDTARGETPHASE("globalopt", CGOptions::DoCGSSA()); + } + ADDTARGETPHASE("clearrdinfo", GetMIRModule()->IsCModule() && (CGOptions::DoStoreLoadOpt() || CGOptions::DoGlobalOpt())); + ADDTARGETPHASE("isolatefastpath", CGOptions::DoIsolateFastPath()); + ADDTARGETPHASE("generateproepilog", true); + ADDTARGETPHASE("framefinalize", true); + ADDTARGETPHASE("cfgo", GetMIRModule()->IsCModule() && CGOptions::DoCFGO()); + ADDTARGETPHASE("peephole0", CGOptions::DoPeephole()) + ADDTARGETPHASE("postebo", CGOptions::DoEBO()); + ADDTARGETPHASE("postcfgo", CGOptions::DoCFGO()); + ADDTARGETPHASE("cgpostpeephole", CGOptions::DoPeephole()) + ADDTARGETPHASE("peephole", CGOptions::DoPeephole()) + ADDTARGETPHASE("gencfi", !GetMIRModule()->IsCModule() || CGOptions::GetInstance().IsUnwindTables() || GetMIRModule()->IsWithDbgInfo()); + ADDTARGETPHASE("dbgfixcallframeoffsets", true); + ADDTARGETPHASE("yieldpoint", GetMIRModule()->IsJavaModule() && CGOptions::IsInsertYieldPoint()); + ADDTARGETPHASE("scheduling", CGOptions::DoSchedule()); + ADDTARGETPHASE("cgsplitcriticaledge", CGOptions::DoLiteProfGen()); + ADDTARGETPHASE("cgpgogen", CGOptions::DoLiteProfGen()); + ADDTARGETPHASE("cgpgouse", CGOptions::DoLiteProfUse()); + ADDTARGETPHASE("alignanalysis", GetMIRModule()->IsCModule() && CGOptions::DoAlignAnalysis() && !CGOptions::DoLiteProfGen()); + ADDTARGETPHASE("fixshortbranch", true); + ADDTARGETPHASE("cgemit", true); diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_phi_elimination.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phi_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..6276be4cd35272b9810b767c5168ac1cf7abdc78 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_phi_elimination.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_CG_INCLUDE_AARCH64_PHI_ELIMINATION_H +#define MAPLEBE_CG_INCLUDE_AARCH64_PHI_ELIMINATION_H +#include "cg_phi_elimination.h" +namespace maplebe { +class AArch64PhiEliminate : public PhiEliminate { + public: + AArch64PhiEliminate(CGFunc &f, CGSSAInfo &ssaAnalysisResult, MemPool &mp) : PhiEliminate(f, ssaAnalysisResult, mp) {} + ~AArch64PhiEliminate() override = default; + RegOperand &GetCGVirtualOpearnd(RegOperand &ssaOpnd, const Insn &curInsn /* for remat */); + + private: + void ReCreateRegOperand(Insn &insn) override; + Insn &CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) override; + void MaintainRematInfo(RegOperand &destOpnd, RegOperand &fromOpnd, bool isCopy) override; + RegOperand &CreateTempRegForCSSA(RegOperand &oriOpnd) override; + void AppendMovAfterLastVregDef(BB &bb, Insn &movInsn) const override; +}; + +class A64OperandPhiElmVisitor : public OperandPhiElmVisitor { + public: + A64OperandPhiElmVisitor(AArch64PhiEliminate *a64PhiElm, Insn &cInsn, uint32 idx) + : a64PhiEliminator(a64PhiElm), + insn(&cInsn), + idx(idx) {}; + ~A64OperandPhiElmVisitor() override = default; + void Visit(RegOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *a64MemOpnd) final; + + private: + AArch64PhiEliminate *a64PhiEliminator; + Insn *insn; + uint32 idx; +}; +} +#endif // MAPLEBE_CG_INCLUDE_AARCH64_PHI_ELIMINATION_H diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h new file mode 100644 index 0000000000000000000000000000000000000000..aef5b287716a2842c2ac5cf37e668fa01fbac347 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_proepilog.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PROEPILOG_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PROEPILOG_H + +#include "proepilog.h" +#include "cg.h" +#include "operand.h" +#include "aarch64_cgfunc.h" +#include "aarch64_operand.h" +#include "aarch64_insn.h" + +namespace maplebe { +using namespace maple; + +class AArch64GenProEpilog : public GenProEpilog { + public: + AArch64GenProEpilog(CGFunc &func, MemPool &memPool) + : GenProEpilog(func), + tmpAlloc(&memPool) { + useFP = func.UseFP(); + if (func.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + stackBaseReg = RFP; + } else { + stackBaseReg = useFP ? R29 : RSP; + } + } + + ~AArch64GenProEpilog() override = default; + + bool NeedProEpilog() override; + static MemOperand *SplitStpLdpOffsetForCalleeSavedWithAddInstruction( + CGFunc &cgFunc, const MemOperand &mo, uint32 bitLen, AArch64reg baseRegNum = AArch64reg::kRinvalid); + static void AppendInstructionPushPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, int32 offset); + static void AppendInstructionPushSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int32 offset); + static void AppendInstructionPopSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int32 offset); + static void AppendInstructionPopPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, int32 offset); + void Run() override; + private: + MemOperand *GetDownStack(); + void GenStackGuard(); + void AddStackGuard(BB &bb); + BB &GenStackGuardCheckInsn(BB &bb); + void AppendInstructionAllocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty); + void AppendInstructionAllocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty); + void GeneratePushRegs(); + void GeneratePushUnnamedVarargRegs(); + void AppendInstructionStackCheck(AArch64reg reg, RegType rty, int32 offset); + void GenerateProlog(BB &bb); + + void GenerateRet(BB &bb); + bool TestPredsOfRetBB(const BB &exitBB); + void AppendInstructionDeallocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty); + void AppendInstructionDeallocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty); + void GeneratePopRegs(); + void AppendJump(const MIRSymbol &funcSymbol); + void GenerateEpilog(BB &bb); + void GenerateEpilogForCleanup(BB &bb); + void AppendBBtoEpilog(BB &epilogBB, BB &newBB); + + Insn &CreateAndAppendInstructionForAllocateCallFrame(int64 argsToStkPassSize, AArch64reg reg0, AArch64reg reg1, + RegType rty); + Insn &AppendInstructionForAllocateOrDeallocateCallFrame(int64 argsToStkPassSize, AArch64reg reg0, AArch64reg reg1, + RegType rty, bool isAllocate); + void SetFastPathReturnBB(BB *bb) { + bb->SetFastPathReturn(true); + fastPathReturnBB = bb; + } + BB *GetFastPathReturnBB() { + return fastPathReturnBB; + } + MapleAllocator tmpAlloc; + static constexpr const int32 kOffset8MemPos = 8; + static constexpr const int32 kOffset16MemPos = 16; + + BB *fastPathReturnBB = nullptr; + bool useFP = true; + /* frame pointer(x29) is available as a general-purpose register if useFP is set as false */ + AArch64reg stackBaseReg = RFP; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PROEPILOG_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h new file mode 100644 index 0000000000000000000000000000000000000000..721a2260d59768825972a28d21d1490421efa7ad --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_prop.h @@ -0,0 +1,495 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_AARCH64_PROP_H +#define MAPLEBE_INCLUDE_AARCH64_PROP_H + +#include "aarch64_opt_utiles.h" +#include "cg_prop.h" +#include "aarch64_cgfunc.h" +#include "aarch64_strldr.h" +namespace maplebe { +class AArch64Prop : public CGProp { + public: + AArch64Prop(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, LiveIntervalAnalysis &ll) + : CGProp(mp, f, sInfo, ll) {} + ~AArch64Prop() override = default; + + /* do not extend life range */ + static bool IsInLimitCopyRange(VRegVersion *toBeReplaced); + private: + void CopyProp() override; + /* + * for aarch64 + * 1. extended register prop + * 2. shift register prop + * 3. add/ext/shf prop -> str/ldr + * 4. const prop + */ + void TargetProp(Insn &insn) override; + void PropPatternOpt() override; +}; + +class A64StrLdrProp { + public: + A64StrLdrProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, Insn &insn, CGDce &dce) + : cgFunc(&f), + ssaInfo(&sInfo), + curInsn(&insn), + a64StrLdrAlloc(&mp), + replaceVersions(a64StrLdrAlloc.Adapter()), + cgDce(&dce) {} + ~A64StrLdrProp() {} + + void Init() { + defInsn = nullptr; + } + void DoOpt(); + + private: + MemOperand *StrLdrPropPreCheck(const Insn &insn, MemPropMode prevMod = kUndef); + static MemPropMode SelectStrLdrPropMode(const MemOperand &currMemOpnd); + bool ReplaceMemOpnd(const MemOperand &currMemOpnd); + MemOperand *SelectReplaceMem(const MemOperand &currMemOpnd); + RegOperand *GetReplaceReg(RegOperand &a64Reg); + MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal, uint32 memSize) const; + MemOperand *SelectReplaceExt(RegOperand &base, uint32 amount, bool isSigned, uint32 memSize); + bool CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) const; + void DoMemReplace(const RegOperand &replacedReg, MemOperand &newMem, Insn &useInsn); + uint32 GetMemOpndIdx(MemOperand *newMemOpnd, const Insn &insn) const; + Insn *GetDefInsn(const RegOperand ®Opnd, std::vector &allUseInsns); + bool IsSameOpndsOfInsn(const Insn &insn1, const Insn &insn2, uint32 opndIdx); + bool IsPhiInsnValid(const Insn &phiInsn); + bool CheckSameReplace(const RegOperand &replacedReg, const MemOperand *memOpnd) const; + + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; + Insn *curInsn; + MapleAllocator a64StrLdrAlloc; + MapleMap replaceVersions; + MemPropMode memPropMode = kUndef; + CGDce *cgDce = nullptr; + Insn *defInsn = nullptr; +}; + +enum ArithmeticType { + kAArch64Add, + kAArch64Sub, + kAArch64Logic, + kUndefArith +}; + +class A64ConstProp { + public: + A64ConstProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, Insn &insn) + : constPropMp(&mp), + cgFunc(&f), + ssaInfo(&sInfo), + curInsn(&insn) {} + void DoOpt(); + /* false : default lsl #0 true: lsl #12 (only support 12 bit left shift in aarch64) */ + static MOperator GetRegImmMOP(MOperator regregMop, bool withLeftShift); + static MOperator GetReversalMOP(MOperator arithMop); + static MOperator GetFoldMopAndVal(int64 &newVal, int64 constVal, const Insn &arithInsn); + + private: + bool ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd); + /* use xzr/wzr in aarch64 to shrink register live range */ + void ZeroRegProp(DUInsnInfo &useDUInfo, RegOperand &toReplaceReg) const; + + /* replace old Insn with new Insn, update ssa info automatically */ + void ReplaceInsnAndUpdateSSA(Insn &oriInsn, Insn &newInsn) const; + ImmOperand *CanDoConstFold(const ImmOperand &value1, const ImmOperand &value2, ArithmeticType aT, bool is64Bit) const; + + /* optimization */ + bool MovConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) const; + bool ArithConstReplaceForOneOpnd(Insn &useInsn, DUInsnInfo &useDUInfo, + ImmOperand &constOpnd, ArithmeticType aT); + bool ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT); + bool ArithmeticConstFold(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd, ArithmeticType aT) const; + bool ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd); + bool BitInsertReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) const; + + MemPool *constPropMp; + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; + Insn *curInsn; +}; + +class CopyRegProp : public PropOptimizePattern { + public: + CopyRegProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll) + : PropOptimizePattern(cgFunc, cgssaInfo, ll) {} + ~CopyRegProp() override { + destVersion = nullptr; + srcVersion = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final { + destVersion = nullptr; + srcVersion = nullptr; + } + private: + bool IsValidCopyProp(const RegOperand &dstReg, const RegOperand &srcReg) const; + void VaildateImplicitCvt(RegOperand &destReg, const RegOperand &srcReg, Insn &movInsn); + bool IsNotSpecialOptimizedInsn(const Insn &insn); + void ReplaceAllUseForCopyProp(); + VRegVersion *destVersion = nullptr; + VRegVersion *srcVersion = nullptr; +}; + +class RedundantPhiProp : public PropOptimizePattern { + public: + RedundantPhiProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~RedundantPhiProp() override { + destVersion = nullptr; + srcVersion = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final { + destVersion = nullptr; + srcVersion = nullptr; + } + + private: + VRegVersion *destVersion = nullptr; + VRegVersion *srcVersion = nullptr; +}; + +class ValidBitNumberProp : public PropOptimizePattern { + public: + ValidBitNumberProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~ValidBitNumberProp() override { + destVersion = nullptr; + srcVersion = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final { + destVersion = nullptr; + srcVersion = nullptr; + } + private: + bool IsImplicitUse(const RegOperand &dstOpnd, const RegOperand &srcOpnd) const; + VRegVersion *destVersion = nullptr; + VRegVersion *srcVersion = nullptr; +}; + +/* + * frame pointer and stack pointer will not be varied in function body + * treat them as const + */ +class FpSpConstProp : public PropOptimizePattern { + public: + FpSpConstProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~FpSpConstProp() override { + fpSpBase = nullptr; + shiftOpnd = nullptr; + replaced = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final { + fpSpBase = nullptr; + shiftOpnd = nullptr; + aT = kUndefArith; + replaced = nullptr; + } + + private: + bool GetValidSSAInfo(Operand &opnd); + void PropInMem(DUInsnInfo &useDUInfo, Insn &useInsn); + void PropInArith(DUInsnInfo &useDUInfo, Insn &useInsn, ArithmeticType curAT); + void PropInCopy(DUInsnInfo &useDUInfo, Insn &useInsn, MOperator oriMop); + int64 ArithmeticFold(int64 valInUse, ArithmeticType useAT) const; + + RegOperand *fpSpBase = nullptr; + ImmOperand *shiftOpnd = nullptr; + ArithmeticType aT = kUndefArith; + VRegVersion *replaced = nullptr; +}; + +/* + * This pattern do: + * 1) + * uxtw vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32] + * ------> + * mov vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32] + * 2) + * ldrh R201, [...] + * and R202, R201, #65520 + * uxth R203, R202 + * -------> + * ldrh R201, [...] + * and R202, R201, #65520 + * mov R203, R202 + */ +class ExtendMovPattern : public PropOptimizePattern { + public: + ExtendMovPattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~ExtendMovPattern() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final; + + private: + bool BitNotAffected(const Insn &insn, uint32 validNum); /* check whether significant bits are affected */ + bool CheckSrcReg(regno_t srcRegNo, uint32 validNum); + + MOperator replaceMop = MOP_undef; +}; + +class ExtendShiftPattern : public PropOptimizePattern { + public: + ExtendShiftPattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~ExtendShiftPattern() override { + defInsn = nullptr; + newInsn = nullptr; + curInsn = nullptr; + } + bool IsSwapInsn(const Insn &insn) const; + void SwapOpnd(Insn &insn); + bool CheckAllOpndCondition(Insn &insn); + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + void DoExtendShiftOpt(Insn &insn); + + protected: + void Init() final; + + private: + void SelectExtendOrShift(const Insn &def); + SuffixType CheckOpType(const Operand &lastOpnd) const; + void ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount); + void SetExMOpType(const Insn &use); + void SetLsMOpType(const Insn &use); + + MOperator replaceOp = 0; + uint32 replaceIdx = 0; + ExtendShiftOperand::ExtendOp extendOp = ExtendShiftOperand::kUndef; + BitShiftOperand::ShiftOp shiftOp = BitShiftOperand::kUndef; + Insn *defInsn = nullptr; + Insn *newInsn = nullptr; + Insn *curInsn = nullptr; + bool optSuccess = false; + ExMOpType exMOpType = kExUndef; + LsMOpType lsMOpType = kLsUndef; + bool is64BitSize = false; +}; + +/* + * For example: + * 1) lsr R102, R101, #1 + * lsr R103, R102, #6 ====> lsr R103, R101, #7 + * + * 2) add R102, R101, #1 + * sub R103, R102, #1 ====> Replace R103 with R101 + * + * Condition: + * the immVal can not be negative + */ +class A64ConstFoldPattern : public PropOptimizePattern { + public: + A64ConstFoldPattern(CGFunc &cgFunc, CGSSAInfo *ssaInfo) + : PropOptimizePattern(cgFunc, ssaInfo) {} + ~A64ConstFoldPattern() override { + defInsn = nullptr; + dstOpnd = nullptr; + srcOpnd = nullptr; + } + bool CheckCondition(Insn &insn) final; + void Optimize(Insn &insn) final; + void Run() final; + + protected: + void Init() final { + defInsn = nullptr; + dstOpnd = nullptr; + srcOpnd = nullptr; + defDstOpnd = nullptr; + useFoldType = kFoldUndef; + defFoldType = kFoldUndef; + optType = kOptUndef; + is64Bit = false; + } + + private: + enum FoldType : uint8 { + kAdd, + kSub, + kLsr, + kLsl, + kAsr, + kAnd, + kOrr, + kEor, + kFoldUndef + }; + enum OptType : uint8 { + kNegativeDef, /* negative the immVal of defInsn */ + kNegativeUse, /* negative the immVal of useInsn */ + kNegativeBoth, /* negative the immVal of both defInsn and useInsn */ + kPositive, /* do not change the immVal of both defInsn and useInsn */ + kLogicalAnd, /* for kAnd */ + kLogicalOrr, /* for kOrr */ + kLogicalEor, /* for kEor */ + kOptUndef + }; + constexpr static uint32 kFoldTypeSize = 8; + OptType constFoldTable[kFoldTypeSize][kFoldTypeSize] = { + /* defInsn: kAdd kSub kLsr kLsl kAsr kAnd kOrr kEor */ + {kPositive, kNegativeDef, kOptUndef, kOptUndef, kOptUndef, kOptUndef, + kOptUndef, kOptUndef}, /* useInsn == kAdd */ + {kNegativeUse, kNegativeBoth, kOptUndef, kOptUndef, kOptUndef, kOptUndef, + kOptUndef, kOptUndef}, /* useInsn == kSub */ + {kOptUndef, kOptUndef, kPositive, kOptUndef, kOptUndef, kOptUndef, + kOptUndef, kOptUndef}, /* useInsn == kLsr */ + {kOptUndef, kOptUndef, kOptUndef, kPositive, kOptUndef, kOptUndef, + kOptUndef, kOptUndef}, /* useInsn == kLsl */ + {kOptUndef, kOptUndef, kOptUndef, kOptUndef, kPositive, kOptUndef, + kOptUndef, kOptUndef}, /* useInsn == kAsr */ + {kOptUndef, kOptUndef, kOptUndef, kOptUndef, kOptUndef, kLogicalAnd, + kOptUndef, kOptUndef}, /* useInsn == kAnd */ + {kOptUndef, kOptUndef, kOptUndef, kOptUndef, kOptUndef, kOptUndef, + kLogicalOrr, kOptUndef}, /* useInsn == kOrr */ + {kOptUndef, kOptUndef, kOptUndef, kOptUndef, kOptUndef, kOptUndef, + kOptUndef, kLogicalEor}, /* useInsn == kEor */ + }; + + std::pair SelectFoldTypeAndCheck64BitSize(const Insn &insn) const; + ImmOperand &GetNewImmOpnd(const ImmOperand &immOpnd, int64 newImmVal) const; + MOperator GetNewMop(bool isNegativeVal, MOperator curMop) const; + int64 GetNewImmVal(const Insn &insn, const ImmOperand &defImmOpnd) const; + void ReplaceWithNewInsn(Insn &insn, const ImmOperand &immOpnd, int64 newImmVal); + bool IsDefInsnValid(const Insn &curInsn, const Insn &validDefInsn); + bool IsPhiInsnValid(const Insn &curInsn, const Insn &phiInsn); + bool IsCompleteOptimization(); + Insn *defInsn = nullptr; + RegOperand *dstOpnd = nullptr; + RegOperand *srcOpnd = nullptr; + RegOperand *defDstOpnd = nullptr; + FoldType useFoldType = kFoldUndef; + FoldType defFoldType = kFoldUndef; + OptType optType = kOptUndef; + bool is64Bit = false; + using TypeAndSize = std::pair; +}; + +/* + * optimization for call convention + * example: + * [BB26] [BB43] + * sub R287, R101, R275 sub R279, R101, R275 + * \ / + * \ / + * [BB27] + * <---- insert new phi: R403, (R275 <26>, R275 <43>) + * old phi: R297, (R287 <26>, R279 <43>) + * / \ + * / \ + * [BB28] \ + * sub R310, R101, R309 \ + * | \ + * | \ + * [BB17] [BB29] [BB44] + * sub R314, R101, R275 | / + * \ | / + * \ | / + * \ | / + * \ | / + * [BB18] + * <---- insert new phi: R404, (R275 <17>, R309 <29>, R403 <44>) + * old phi: R318, (R314 <17>, R310 <29>, R297 <44>) + * mov R1, R318 ====> sub R1, R101, R404 + * / \ + * / \ + * / \ + * [BB19] [BB34] + * sub R336, R101, R335 / + * \ / + * \ / + * \ / + * [BB20] + * <---- insert new phi: R405, (R335 <19>, R404<34>) + * old phi: R340, (R336 <19>, R318 <34>) + * mov R1, R340 ====> sub R1, R101, R405 + */ +class A64PregCopyPattern : public PropOptimizePattern { + public: + A64PregCopyPattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {} + ~A64PregCopyPattern() override { + firstPhiInsn = nullptr; + } + bool CheckCondition(Insn &insn) override; + void Optimize(Insn &insn) override; + void Run() override; + + protected: + void Init() override { + validDefInsns.clear(); + firstPhiInsn = nullptr; + differIdx = -1; + differOrigNO = 0; + isCrossPhi = false; + } + + private: + bool CheckUselessDefInsn(const Insn *defInsn) const; + bool CheckValidDefInsn(const Insn *defInsn); + bool CheckMultiUsePoints(const Insn *defInsn) const; + bool CheckPhiCaseCondition(Insn &defInsn); + bool DFSFindValidDefInsns(Insn *curDefInsn, std::vector &visitedPhiDefs, + std::unordered_map &visited); + Insn &CreateNewPhiInsn(std::unordered_map &newPhiList, Insn *curInsn); + RegOperand &DFSBuildPhiInsn(Insn *curInsn, std::unordered_map &visited); + RegOperand *CheckAndGetExistPhiDef(Insn &phiInsn, std::vector &validDifferRegNOs) const; + std::vector validDefInsns; + Insn *firstPhiInsn = nullptr; + int differIdx = -1; + regno_t differOrigNO = 0; + bool isCrossPhi = false; +}; + +class A64ReplaceRegOpndVisitor : public ReplaceRegOpndVisitor { + public: + A64ReplaceRegOpndVisitor(CGFunc &f, Insn &cInsn, uint32 cIdx, RegOperand &oldRegister, RegOperand &newRegister) + : ReplaceRegOpndVisitor(f, cInsn, cIdx, oldRegister, newRegister) {} + ~A64ReplaceRegOpndVisitor() override = default; + private: + void Visit(RegOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *a64memOpnd) final; + void Visit(PhiOperand *v) final; +}; +} +#endif /* MAPLEBE_INCLUDE_AARCH64_PROP_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ra_opt.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ra_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..9ab20564714caf4e764a3447ec89c48227b80b6e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ra_opt.h @@ -0,0 +1,176 @@ +/* + * Copyright (c) [2021] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64RAOPT_H +#define MAPLEBE_INCLUDE_CG_AARCH64RAOPT_H + +#include "cg.h" +#include "ra_opt.h" +#include "aarch64_cg.h" +#include "aarch64_insn.h" +#include "aarch64_operand.h" + +namespace maplebe { +class X0OptInfo { + public: + X0OptInfo() : movSrc(nullptr), replaceReg(0), renameInsn(nullptr), renameOpnd(nullptr), renameReg(0) {} + ~X0OptInfo() = default; + + inline RegOperand *GetMovSrc() const { + return movSrc; + } + + inline regno_t GetReplaceReg() const { + return replaceReg; + } + + inline Insn *GetRenameInsn() const { + return renameInsn; + } + + inline Operand *GetRenameOpnd() const { + return renameOpnd; + } + + inline regno_t GetRenameReg() const { + return renameReg; + } + + inline void SetMovSrc(RegOperand *srcReg) { + movSrc = srcReg; + } + + inline void SetReplaceReg(regno_t regno) { + replaceReg = regno; + } + + inline void SetRenameInsn(Insn *insn) { + renameInsn = insn; + } + + inline void ResetRenameInsn() { + renameInsn = nullptr; + } + + inline void SetRenameOpnd(Operand *opnd) { + renameOpnd = opnd; + } + + inline void SetRenameReg(regno_t regno) { + renameReg = regno; + } + + private: + RegOperand *movSrc; + regno_t replaceReg; + Insn *renameInsn; + Operand *renameOpnd; + regno_t renameReg; +}; + +class RaX0Opt { + public: + explicit RaX0Opt(CGFunc* func) : cgFunc(func) {} + ~RaX0Opt() = default; + + bool PropagateX0CanReplace(Operand *opnd, regno_t replaceReg) const; + bool PropagateRenameReg(Insn *nInsn, const X0OptInfo &optVal) const; + bool PropagateX0DetectX0(const Insn *insn, X0OptInfo &optVal) const; + bool PropagateX0DetectRedefine(const InsnDesc *md, const Insn *ninsn, const X0OptInfo &optVal, uint32 index) const; + bool PropagateX0Optimize(const BB *bb, const Insn *insn, X0OptInfo &optVal) const; + bool PropagateX0ForCurrBb(BB *bb, const X0OptInfo &optVal) const; + void PropagateX0ForNextBb(BB *nextBb, const X0OptInfo &optVal) const; + void PropagateX0(); + + private: + CGFunc *cgFunc; +}; + +class ParamRegOpt { + public: + ParamRegOpt(CGFunc *func, DomAnalysis *dom) : cgFunc(func), domInfo(dom) {} + ~ParamRegOpt() = default; + + void HandleParamReg(); + void CollectRefBBs(RegOperand &movDest, std::set &refBBs); + void TryToSplitParamReg(RegOperand &movDest, Insn &posInsn); + BB* GetCommondDom(std::set &refBBs); + bool DominatorAll(uint32 domBB, std::set &refBBs) const; + void SplitAtDomBB(RegOperand &movDest, BB &domBB, Insn &posInsn) const; + void SetDumpInfo(bool val) { + dumpInfo = val; + } + + private: + CGFunc *cgFunc; + DomAnalysis *domInfo; + bool dumpInfo = false; +}; + +class VregRenameInfo { + public: + VregRenameInfo() = default; + + ~VregRenameInfo() = default; + + BB *firstBBLevelSeen = nullptr; + BB *lastBBLevelSeen = nullptr; + uint32 numDefs = 0; + uint32 numUses = 0; + uint32 numInnerDefs = 0; + uint32 numInnerUses = 0; + uint32 largestUnusedDistance = 0; + uint8 innerMostloopLevelSeen = 0; +}; + +class VregRename { + public: + VregRename(CGFunc *func, MemPool *pool) : cgFunc(func), memPool(pool), alloc(pool), renameInfo(alloc.Adapter()) { + renameInfo.resize(cgFunc->GetMaxRegNum()); + ccRegno = static_cast(&cgFunc->GetOrCreateRflag())->GetRegisterNumber(); + } + ~VregRename() = default; + + void PrintRenameInfo(regno_t regno) const; + void PrintAllRenameInfo() const; + + void RenameFindLoopVregs(const CGFuncLoops *loop); + void RenameFindVregsToRename(const CGFuncLoops *loop); + bool IsProfitableToRename(const VregRenameInfo *info) const; + void RenameProfitableVreg(RegOperand *ropnd, const CGFuncLoops *loop); + void RenameGetFuncVregInfo(); + void UpdateVregInfo(regno_t vreg, BB *bb, bool isInner, bool isDef); + void VregLongLiveRename(); + + CGFunc *cgFunc = nullptr; + MemPool *memPool = nullptr; + MapleAllocator alloc; + Bfs *bfs = nullptr; + MapleVector renameInfo; + uint32 maxRegnoSeen = 0; + regno_t ccRegno = 0; +}; + +class AArch64RaOpt : public RaOpt { + public: + AArch64RaOpt(CGFunc &func, MemPool &pool) : RaOpt(func, pool) {} + ~AArch64RaOpt() override = default; + void Run() override; + + private: +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64RAOPT_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_rce.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_rce.h new file mode 100644 index 0000000000000000000000000000000000000000..ceb6ea0b38ac900211e85f80abc38df5dbfbe60c --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_rce.h @@ -0,0 +1,141 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_RCE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_RCE_H + +#include "cg_rce.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +static std::size_t g_hashSeed = 0; +using InsnPtr = Insn*; +/* + * Redundancy elimination with the same right value + * Example: + * mov R140, #31161 + * movk R140(use){implicit-def: R141}, #40503, LSL #16 + * ... + * mov R149, #31161 + * movk R149(use){implicit-def: R150}, #40503, LSL #16 ===> redundant + * + * 1) do not support vector ops & mem ops currently + */ +class AArch64RedundantComputeElim : public RedundantComputeElim { + public: + AArch64RedundantComputeElim(CGFunc &f, CGSSAInfo &info, MemPool &mp) : + RedundantComputeElim(f, info, mp), candidates(rceAlloc.Adapter()) {} + ~AArch64RedundantComputeElim() override {} + + void Run() override; + void DumpHash() const; + + private: + struct InsnRHSHash { + std::size_t operator()(const InsnPtr &insn) const { + std::string hashS = (insn->IsVectorOp() ? std::to_string(++g_hashSeed) : + std::to_string(insn->GetMachineOpcode())); + uint32 opndNum = insn->GetOperandSize(); + hashS += std::to_string(opndNum); + for (uint i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + Operand::OperandType opndKind = opnd.GetKind(); + if (opnd.IsRegister() && !static_cast(opnd).IsPhysicalRegister() && !insn->OpndIsDef(i)) { + hashS += static_cast(opnd).GetHashContent(); + } else if (opndKind == Operand::kOpdImmediate) { + hashS += static_cast(opnd).GetHashContent(); + } else if (opndKind == Operand::kOpdExtend) { + hashS += static_cast(opnd).GetHashContent(); + } else if (opndKind == Operand::kOpdShift) { + hashS += static_cast(opnd).GetHashContent(); + } else if (opnd.IsRegister() && insn->OpndIsDef(i) && insn->OpndIsUse(i)) { + hashS += static_cast(opnd).GetHashContent(); + } else if (opnd.IsRegister() && insn->OpndIsDef(i)) { + continue; + } else { + hashS += std::to_string(++g_hashSeed); + } + } + return std::hash{}(hashS); + } + }; + + struct InsnRHSEqual { + bool operator()(const InsnPtr &insn1, const InsnPtr &insn2) const { + if (insn1->GetMachineOpcode() != insn2->GetMachineOpcode()) { + return false; + } + if (insn1->IsVectorOp()) { + return false; + } + uint32 opndNum1 = insn1->GetOperandSize(); + uint32 opndNum2 = insn2->GetOperandSize(); + if (opndNum1 != opndNum2) { + return false; + } + for (uint32 i = 0; i < opndNum1; ++i) { + Operand &opnd1 = insn1->GetOperand(i); + Operand::OperandType opk1 = opnd1.GetKind(); + Operand &opnd2 = insn2->GetOperand(i); + /* There are cases that the operand is both def and use */ + if (opnd1.IsRegister() && opnd2.IsRegister() && insn1->OpndIsDef(i) && insn2->OpndIsDef(i) && + !insn1->OpndIsUse(i) && !insn2->OpndIsUse(i)) { + continue; + } + if (!opnd1.BasicEquals(opnd2)) { + return false; + } + if (opk1 == Operand::kOpdRegister) { + if (!static_cast(opnd1).Equals(opnd2)) { + return false; + } + if (static_cast(opnd1).IsPhysicalRegister() || + static_cast(opnd2).IsPhysicalRegister()) { + return false; + } + } else if (opk1 == Operand::kOpdImmediate) { + if (!static_cast(opnd1).Equals(opnd2)) { + return false; + } + } else if (opk1 == Operand::kOpdExtend) { + if (!static_cast(opnd1).Equals(opnd2)) { + return false; + } + } else if (opk1 == Operand::kOpdShift) { + if (!static_cast(opnd1).Equals(opnd2)) { + return false; + } + } else { + return false; + } + } + return true; + } + }; + + bool DoOpt(BB *bb); + bool IsBothDefUseCase(VRegVersion &version) const; + bool CheckFakeOptmization(const Insn &existInsn) const; + void CheckCondition(const Insn &existInsn, const Insn &curInsn); + void CheckBothDefAndUseChain(RegOperand *curDstOpnd, RegOperand *existDstOpnd); + std::size_t ComputeDefUseHash(const Insn &insn, const RegOperand *replaceOpnd) const; + DUInsnInfo *GetDefUseInsnInfo(VRegVersion &defVersion); + MOperator GetNewMop(const RegOperand &curDstOpnd, const RegOperand &existDstOpnd) const; + void Optimize(BB &curBB, Insn &curInsn, RegOperand &curDstOpnd, RegOperand &existDstOpnd) const; + MapleUnorderedSet candidates; + bool doOpt = true; + bool isBothDefUse = false; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_RCE_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_reaching.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_reaching.h new file mode 100644 index 0000000000000000000000000000000000000000..5f0363fdd69b9673e8c0bd41f93108c07c5cc215 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_reaching.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H + +#include "reaching.h" +#include "aarch64_operand.h" + +namespace maplebe { +class AArch64ReachingDefinition : public ReachingDefinition { + public: + AArch64ReachingDefinition(CGFunc &func, MemPool &memPool) : ReachingDefinition(func, memPool) {} + ~AArch64ReachingDefinition() override = default; + std::vector FindRegDefBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn) const final; + std::vector FindMemDefBetweenInsn(uint32 offset, const Insn *startInsn, Insn *endInsn) const final; + bool FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, InsnSet ®UseInsnSet) const final; + bool FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, BB* movBB) const final; + bool FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &memUseInsnSet) const final; + bool HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn) const; + bool DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB) const; + InsnSet FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO = false) const final; + InsnSet FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset = false) const final; + InsnSet FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem = false) const final; + bool FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const; + + protected: + void InitStartGen() final; + void InitEhDefine(BB &bb) final; + void InitGenUse(BB &bb, bool firstTime = true) final; + void GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) final; + void GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) final; + void GenAllCallerSavedRegs(BB &bb, Insn &insn) final; + bool IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const final; + bool KilledByCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn, regno_t regNO) const final; + void AddRetPseudoInsn(BB &bb) final; + void AddRetPseudoInsns() final; + bool IsCallerSavedReg(uint32 regNO) const final; + void FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const final; + void FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const final; + void DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &defInsnSet) const final; + void DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &defInsnSet) const final; + int32 GetStackSize() const final; + + private: + void InitInfoForMemOperand(Insn &insn, Operand &opnd, bool isDef); + void InitInfoForListOpnd(const BB &bb, const Operand &opnd); + void InitInfoForConditionCode(const BB &bb); + void InitInfoForRegOpnd(const BB &bb, Operand &opnd, bool isDef); + void InitMemInfoForClearStackCall(Insn &callInsn); + inline bool CallInsnClearDesignateStackRef(const Insn &callInsn, int64 offset) const; + int64 GetEachMemSizeOfPair(MOperator opCode) const; + bool DFSFindRegInfoBetweenBB(const BB startBB, const BB &endBB, uint32 regNO, std::vector &visitedBB, + std::list &pathStatus, DumpType infoType) const; + bool DFSFindRegDomianBetweenBB(const BB startBB, uint32 regNO, std::vector &visitedBB) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_reg_coalesce.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_reg_coalesce.h new file mode 100644 index 0000000000000000000000000000000000000000..4698d4b2a83c5f49bcab80a71934877f1cfd68bd --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_reg_coalesce.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REGCOALESCE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REGCOALESCE_H +#include "reg_coalesce.h" +#include "aarch64_isa.h" +#include "live.h" + +namespace maplebe { +class AArch64LiveIntervalAnalysis : public LiveIntervalAnalysis { + public: + AArch64LiveIntervalAnalysis(CGFunc &func, MemPool &memPool) + : LiveIntervalAnalysis(func, memPool), + vregLive(alloc.Adapter()), + candidates(alloc.Adapter()) {} + + ~AArch64LiveIntervalAnalysis() override = default; + + void ComputeLiveIntervals() override; + bool IsUnconcernedReg(const RegOperand ®Opnd) const; + LiveInterval *GetOrCreateLiveInterval(regno_t regNO); + void UpdateCallInfo(); + void SetupLiveIntervalByOp(const Operand &op, Insn &insn, bool isDef); + void ComputeLiveIntervalsForEachDefOperand(Insn &insn); + void ComputeLiveIntervalsForEachUseOperand(Insn &insn); + void SetupLiveIntervalInLiveOut(regno_t liveOut, const BB &bb, uint32 currPoint); + void CoalesceRegPair(RegOperand ®Dest, RegOperand ®Src); + void CoalesceRegisters() override; + void CollectMoveForEachBB(BB &bb, std::vector &movInsns) const; + void CoalesceMoves(std::vector &movInsns, bool phiOnly); + void CheckInterference(LiveInterval &li1, LiveInterval &li2) const; + void CollectCandidate(); + std::string PhaseName() const { + return "regcoalesce"; + } + + private: + static bool IsRegistersCopy(Insn &insn); + MapleUnorderedSet vregLive; + MapleSet candidates; +}; + +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REGCOALESCE_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_reg_info.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_reg_info.h new file mode 100644 index 0000000000000000000000000000000000000000..0af05c9890574241f64e282a2a2039c7e532f39e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_reg_info.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REG_INFO_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REG_INFO_H +#include "reg_info.h" +#include "aarch64_operand.h" +#include "aarch64_insn.h" +#include "aarch64_abi.h" + +namespace maplebe { + +class AArch64RegInfo : public RegisterInfo { + public: + explicit AArch64RegInfo(MapleAllocator &mallocator) : RegisterInfo(mallocator) {} + + ~AArch64RegInfo() override = default; + + bool IsGPRegister(regno_t regNO) const override { + return AArch64isa::IsGPRegister(static_cast(regNO)); + } + /* phys reg which can be pre-Assignment */ + bool IsPreAssignedReg(regno_t regNO) const override { + return AArch64Abi::IsParamReg(static_cast(regNO)); + } + regno_t GetIntRetReg(uint32 idx) override { + CHECK_FATAL(idx < AArch64Abi::kNumIntParmRegs, "index out of range in IntRetReg"); + return AArch64Abi::kIntReturnRegs[idx]; + } + regno_t GetFpRetReg(uint32 idx) override { + CHECK_FATAL(idx < AArch64Abi::kNumFloatParmRegs, "index out of range in FloatRetReg"); + return AArch64Abi::kFloatReturnRegs[idx]; + } + bool IsAvailableReg(regno_t regNO) const override { + return AArch64Abi::IsAvailableReg(static_cast(regNO)); + } + /* Those registers can not be overwrite. */ + bool IsUntouchableReg(regno_t regNO) const override{ + if ((regNO == RSP) || (regNO == RFP) || regNO == RZR) { + return true; + } + /* when yieldpoint is enabled, the RYP(x19) can not be used. */ + if (GetCurrFunction()->GetCG()->GenYieldPoint() && (regNO == RYP)) { + return true; + } + return false; + } + uint32 GetIntRegsParmsNum() override { + return AArch64Abi::kNumIntParmRegs; + } + uint32 GetFloatRegsParmsNum() override { + return AArch64Abi::kNumFloatParmRegs; + } + uint32 GetIntRetRegsNum() override { + return AArch64Abi::kNumIntParmRegs; + } + uint32 GetFpRetRegsNum() override { + return AArch64Abi::kNumFloatParmRegs; + } + uint32 GetNormalUseOperandNum() override { + return AArch64Abi::kNormalUseOperandNum; + } + uint32 GetIntParamRegIdx(regno_t regNO) const override { + return static_cast(regNO - R0); + } + uint32 GetFpParamRegIdx(regno_t regNO) const override { + return static_cast(regNO - V0); + } + regno_t GetLastParamsIntReg() override { + return R7; + } + regno_t GetLastParamsFpReg() override { + return V7; + } + uint32 GetAllRegNum() override { + return kAllRegNum; + } + regno_t GetInvalidReg() override { + return kRinvalid; + } + bool IsVirtualRegister(const RegOperand ®Opnd) override { + return regOpnd.GetRegisterNumber() > kAllRegNum; + } + bool IsVirtualRegister(regno_t regno) override { + return regno > kAllRegNum; + } + regno_t GetReservedSpillReg() override { + return R16; + } + regno_t GetSecondReservedSpillReg() override { + return R17; + } + regno_t GetYieldPointReg() const override { + return RYP; + } + regno_t GetStackPointReg() const override { + return RSP; + } + + void Init() override; + void Fini() override; + void SaveCalleeSavedReg(MapleSet savedRegs) override; + bool IsSpecialReg(regno_t regno) const override; + bool IsCalleeSavedReg(regno_t regno) const override; + bool IsYieldPointReg(regno_t regno) const override; + bool IsUnconcernedReg(regno_t regNO) const override; + bool IsUnconcernedReg(const RegOperand ®Opnd) const override; + bool IsFramePointReg(regno_t regNO) const override { + return (regNO == RFP); + } + bool IsReservedReg(regno_t regNO, bool doMultiPass) const override; + bool IsSpillRegInRA(regno_t regNO, bool has3RegOpnd) override; + RegOperand *GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag) override; + Insn *BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) override; + Insn *BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) override; + MemOperand *AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, + bool isDest, Insn &insn, regno_t regNum, bool &isOutOfRange) override; + + regno_t GetIntSpillFillReg(size_t idx) const override { + static regno_t intRegs[kSpillMemOpndNum] = { R10, R11, R12, R13 }; + ASSERT(idx < kSpillMemOpndNum, "index out of range"); + return intRegs[idx]; + } + regno_t GetFpSpillFillReg(size_t idx) const override { + static regno_t fpRegs[kSpillMemOpndNum] = { V16, V17, V18, V19 }; + ASSERT(idx < kSpillMemOpndNum, "index out of range"); + return fpRegs[idx]; + } +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REG_INFO_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h new file mode 100644 index 0000000000000000000000000000000000000000..a0be8767f9ad7a78acad231d614a70324d237e6b --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_regsaves.h @@ -0,0 +1,267 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64REGSAVESOPT_H +#define MAPLEBE_INCLUDE_CG_AARCH64REGSAVESOPT_H + +#include "cg.h" +#include "regsaves.h" +#include "aarch64_cg.h" +#include "aarch64_insn.h" +#include "aarch64_operand.h" + +namespace maplebe { + +#define BBID uint32 + +/* Saved callee-save reg info */ +class SavedRegInfo { + public: + bool insertAtLastMinusOne = false; + explicit SavedRegInfo(MapleAllocator &alloc) + : saveSet(alloc.Adapter()), + restoreEntrySet(alloc.Adapter()), + restoreExitSet(alloc.Adapter()) {} + + bool ContainSaveReg(regno_t r) { + if (saveSet.find(r) != saveSet.end()) { + return true; + } + return false; + } + + bool ContainEntryReg(regno_t r) { + if (restoreEntrySet.find(r) != restoreEntrySet.end()) { + return true; + } + return false; + } + + bool ContainExitReg(regno_t r) { + if (restoreExitSet.find(r) != restoreExitSet.end()) { + return true; + } + return false; + } + + void InsertSaveReg(regno_t r) { + (void)saveSet.insert(r); + } + + void InsertEntryReg(regno_t r) { + (void)restoreEntrySet.insert(r); + } + + void InsertExitReg(regno_t r) { + (void)restoreExitSet.insert(r); + } + + MapleSet &GetSaveSet() { + return saveSet; + } + + MapleSet &GetEntrySet() { + return restoreEntrySet; + } + + MapleSet &GetExitSet() { + return restoreExitSet; + } + + void RemoveSaveReg(regno_t r) { + (void)saveSet.erase(r); + } + + private: + MapleSet saveSet; + MapleSet restoreEntrySet; + MapleSet restoreExitSet; +}; + +/* BBs info for saved callee-saved reg */ +class SavedBBInfo { + public: + explicit SavedBBInfo(MapleAllocator &alloc) : bbList (alloc.Adapter()) {} + + MapleSet &GetBBList() { + return bbList; + } + + void InsertBB(BB *bb) { + (void)bbList.insert(bb); + } + + void RemoveBB(BB *bb) { + (void)bbList.erase(bb); + } + + private: + MapleSet bbList; +}; + +class AArch64RegSavesOpt : public RegSavesOpt { + public: + AArch64RegSavesOpt(CGFunc &func, MemPool &pool, DomAnalysis &dom, PostDomAnalysis &pdom) + : RegSavesOpt(func, pool), + domInfo(&dom), + pDomInfo(&pdom), + bbSavedRegs(alloc.Adapter()), + regSavedBBs(alloc.Adapter()), + regOffset(alloc.Adapter()), + visited(alloc.Adapter()), + id2bb(alloc.Adapter()) { + bbSavedRegs.resize(func.NumBBs()); + for (size_t i = 0; i < bbSavedRegs.size(); ++i) { + bbSavedRegs[i] = nullptr; + } + regSavedBBs.resize(sizeof(CalleeBitsType)<<3); + for (size_t i = 0; i < regSavedBBs.size(); ++i) { + regSavedBBs[i] = nullptr; + } + } + ~AArch64RegSavesOpt() override = default; + + using CalleeBitsType = uint64 ; + + void InitData(); + void CollectLiveInfo(const BB &bb, const Operand &opnd, bool isDef, bool isUse); + void GenerateReturnBBDefUse(const BB &bb); + void ProcessCallInsnParam(BB &bb); + void ProcessAsmListOpnd(const BB &bb, const Operand &opnd, uint32 idx); + void ProcessListOpnd(const BB &bb, const Operand &opnd); + void ProcessMemOpnd(const BB &bb, Operand &opnd); + void ProcessCondOpnd(const BB &bb); + void ProcessOperands(const Insn &insn, const BB &bb); + void GenAccDefs(); + void GenRegDefUse(); + bool CheckForUseBeforeDefPath(); + void PrintBBs() const; + int CheckCriteria(BB *bb, regno_t reg) const; + void CheckCriticalEdge(BB *bb, AArch64reg reg); + bool AlreadySavedInDominatorList(const BB *bb, regno_t reg) const; + BB* FindLoopDominator(BB *bb, regno_t reg, bool *done); + void CheckAndRemoveBlksFromCurSavedList(SavedBBInfo *sp, BB *bbDom, regno_t reg); + void DetermineCalleeSaveLocationsDoms(); + void RevertToRestoreAtEpilog(AArch64reg reg); + void DetermineCalleeSaveLocationsPre(); + void DetermineCalleeRestoreLocations(); + int32 FindCalleeBase() const; + void SetupRegOffsets(); + void InsertCalleeSaveCode(); + void InsertCalleeRestoreCode(); + void PrintSaveLocs(AArch64reg reg); + void Run() override; + + DomAnalysis *GetDomInfo() const { + return domInfo; + } + + PostDomAnalysis *GetPostDomInfo() const { + return pDomInfo; + } + + Bfs *GetBfs() const { + return bfs; + } + + CalleeBitsType *GetCalleeBitsDef() { + return calleeBitsDef; + } + + CalleeBitsType *GetCalleeBitsUse() { + return calleeBitsUse; + } + + CalleeBitsType *GetCalleeBitsAcc() { + return calleeBitsAcc; + } + + CalleeBitsType GetBBCalleeBits(CalleeBitsType *data, BBID bid) const { + return data[bid]; + } + + void SetCalleeBit(CalleeBitsType *dest, BBID bidD, CalleeBitsType src) { + dest[bidD] = src; + } + + void SetCalleeBit(CalleeBitsType *data, BBID bid, regno_t reg) { + CalleeBitsType mask = 1ULL << RegBitMap(reg); + if ((GetBBCalleeBits(data, bid) & mask) == 0) { + data[bid] = GetBBCalleeBits(data, bid) | mask; + } + } + + void ResetCalleeBit(CalleeBitsType * data, BBID bid, regno_t reg) { + CalleeBitsType mask = 1ULL << RegBitMap(reg); + data[bid] = GetBBCalleeBits(data, bid) & ~mask; + } + + bool IsCalleeBitSet(CalleeBitsType * data, BBID bid, regno_t reg) const { + CalleeBitsType mask = 1ULL << RegBitMap(reg); + return GetBBCalleeBits(data, bid) & mask; + } + + /* AArch64 specific callee-save registers bit positions + 0 9 10 33 -- position + R19 .. R28 V8 .. V15 V16 .. V31 -- regs */ + uint32 RegBitMap(regno_t reg) const { + uint32 r; + if (reg <= R28) { + r = (reg - R19); + } else { + r = ((R28 - R19) + 1) + (reg - V8); + } + return r; + } + + regno_t ReverseRegBitMap(uint32 reg) const { + if (reg < 10) { + return static_cast(R19 + reg); + } else { + return static_cast((V8 + reg) - (R28 - R19 + 1)); + } + } + + SavedRegInfo *GetbbSavedRegsEntry(BBID bid) { + if (bbSavedRegs[bid] == nullptr) { + bbSavedRegs[bid] = memPool->New(alloc); + } + return bbSavedRegs[bid]; + } + + void SetId2bb(BB *bb) { + id2bb[bb->GetId()] = bb; + } + + BB *GetId2bb(BBID bid) { + return id2bb[bid]; + } + + private: + DomAnalysis *domInfo; + PostDomAnalysis *pDomInfo; + Bfs *bfs = nullptr; + CalleeBitsType *calleeBitsDef = nullptr; + CalleeBitsType *calleeBitsUse = nullptr; + CalleeBitsType *calleeBitsAcc = nullptr; + MapleVector bbSavedRegs; /* set of regs to be saved in a BB */ + MapleVector regSavedBBs; /* set of BBs to be saved for a reg */ + MapleMap regOffset; /* save offset of each register */ + MapleSet visited; /* temp */ + MapleMap id2bb; /* bbid to bb* mapping */ +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64REGSAVESOPT_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_rematerialize.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_rematerialize.h new file mode 100644 index 0000000000000000000000000000000000000000..1f35c2b9bf2240c5d7399906e37e17f26c2b2d67 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_rematerialize.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REMATERIALIZE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REMATERIALIZE_H + +#include "rematerialize.h" + +namespace maplebe { +class AArch64Rematerializer : public Rematerializer { + public: + AArch64Rematerializer() = default; + virtual ~AArch64Rematerializer() = default; + private: + bool IsRematerializableForConstval(int64 val, uint32 bitLen) const override; + bool IsRematerializableForDread(int32 offset) const override; + + std::vector RematerializeForConstval(CGFunc &cgFunc, RegOperand ®Op, + const LiveRange &lr) override; + + std::vector RematerializeForAddrof(CGFunc &cgFunc, RegOperand ®Op, + int32 offset) override; + + std::vector RematerializeForDread(CGFunc &cgFunc, RegOperand ®Op, + int32 offset, PrimType type) override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REMATERIALIZE_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_schedule.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_schedule.h new file mode 100644 index 0000000000000000000000000000000000000000..dc22d8b88f2d34654abc1484d5402d8c75ac73b5 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_schedule.h @@ -0,0 +1,291 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_SCHEDULE_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_SCHEDULE_H + +#include "schedule.h" +#include "aarch64_operand.h" + +namespace maplebe { +enum RegisterType : uint8 { + kRegisterUndef, + kRegisterInt, + kRegisterFloat, + kRegisterCc, + kRegisterLast, +}; + +class ScheduleProcessInfo { + public: + explicit ScheduleProcessInfo(uint32 size) { + availableReadyList.reserve(size); + scheduledNodes.reserve(size); + } + + virtual ~ScheduleProcessInfo() = default; + + uint32 GetLastUpdateCycle() const { + return lastUpdateCycle; + } + + void SetLastUpdateCycle(uint32 updateCycle) { + lastUpdateCycle = updateCycle; + } + + uint32 GetCurrCycle() const { + return currCycle; + } + + void IncCurrCycle() { + ++currCycle; + } + + void DecAdvanceCycle() { + advanceCycle--; + } + + uint32 GetAdvanceCycle() const { + return advanceCycle; + } + + void SetAdvanceCycle(uint32 cycle) { + advanceCycle = cycle; + } + + void ClearAvailableReadyList() { + availableReadyList.clear(); + } + + void PushElemIntoAvailableReadyList(DepNode *node) { + availableReadyList.emplace_back(node); + } + + size_t SizeOfAvailableReadyList() const { + return availableReadyList.size(); + } + + bool AvailableReadyListIsEmpty() const { + return availableReadyList.empty(); + } + + void SetAvailableReadyList(const std::vector &tempReadyList) { + availableReadyList = tempReadyList; + } + + const std::vector &GetAvailableReadyList() const { + return availableReadyList; + } + + const std::vector &GetAvailableReadyList() { + return availableReadyList; + } + + void PushElemIntoScheduledNodes(DepNode *node) { + node->SetState(kScheduled); + node->SetSchedCycle(currCycle); + node->OccupyUnits(); + scheduledNodes.emplace_back(node); + } + + bool IsFirstSeparator() const { + return isFirstSeparator; + } + + void ResetIsFirstSeparator() { + isFirstSeparator = false; + } + + size_t SizeOfScheduledNodes() const { + return scheduledNodes.size(); + } + + const std::vector &GetScheduledNodes() const { + return scheduledNodes; + } + + private: + std::vector availableReadyList; + std::vector scheduledNodes; + uint32 lastUpdateCycle = 0; + uint32 currCycle = 0; + uint32 advanceCycle = 0; + bool isFirstSeparator = true; +}; + + +class AArch64ScheduleProcessInfo : public ScheduleProcessInfo { + public: + explicit AArch64ScheduleProcessInfo(uint32 size) : ScheduleProcessInfo(size) {} + ~AArch64ScheduleProcessInfo() override = default; + + /* recover register type which is not recorded in live analysis */ + static RegType GetRegisterType(CGFunc &f, regno_t regNO); + void VaryLiveRegSet(CGFunc &f, regno_t regNO, bool isInc); + void VaryFreeRegSet(CGFunc &f, std::set regNOs, DepNode &node); + + uint32 GetFreeIntRegs(DepNode &node) { + return (freeIntRegNodeSet.count(&node)) > 0 ? freeIntRegNodeSet.find(&node)->second : 0; + } + void IncFreeIntRegNode(DepNode &node) { + if (freeIntRegNodeSet.count(&node) == 0) { + freeIntRegNodeSet.emplace(std::pair(&node, 1)); + } else { + freeIntRegNodeSet.find(&node)->second++; + } + } + const std::map &GetFreeIntRegNodeSet() const { + return freeIntRegNodeSet; + } + void IncFreeFpRegNode(DepNode &node) { + if (freeFpRegNodeSet.count(&node) == 0) { + freeFpRegNodeSet.emplace(std::pair(&node, 1)); + } else { + freeFpRegNodeSet.find(&node)->second++; + } + } + uint32 GetFreeFpRegs(DepNode &node) { + return (freeFpRegNodeSet.count(&node)) > 0 ? freeFpRegNodeSet.find(&node)->second : 0; + } + const std::map &GetFreeFpRegNodeSet() const { + return freeFpRegNodeSet; + } + + void ClearALLFreeRegNodeSet() { + freeIntRegNodeSet.clear(); + freeFpRegNodeSet.clear(); + } + + size_t FindIntLiveReg(regno_t reg) const { + return intLiveRegSet.count(reg); + } + void IncIntLiveRegSet(regno_t reg) { + intLiveRegSet.emplace(reg); + } + void DecIntLiveRegSet(regno_t reg) { + intLiveRegSet.erase(reg); + } + size_t FindFpLiveReg(regno_t reg) const { + return fpLiveRegSet.count(reg); + } + void IncFpLiveRegSet(regno_t reg) { + fpLiveRegSet.emplace(reg); + } + void DecFpLiveRegSet(regno_t reg) { + fpLiveRegSet.erase(reg); + } + + size_t SizeOfIntLiveRegSet() const { + return intLiveRegSet.size(); + } + + size_t SizeOfCalleeSaveLiveRegister(bool isInt) { + size_t num = 0; + if (isInt) { + for (auto regNO : intLiveRegSet) { + if (regNO > static_cast(R19)) { + num++; + } + } + } else { + for (auto regNO : fpLiveRegSet) { + if (regNO > static_cast(V16)) { + num++; + } + } + } + return num; + } + + size_t SizeOfFpLiveRegSet() const { + return fpLiveRegSet.size(); + } + private: + std::set intLiveRegSet; + std::set fpLiveRegSet; + std::map freeIntRegNodeSet; + std::map freeFpRegNodeSet; +}; + +class AArch64Schedule : public Schedule { + public: + AArch64Schedule(CGFunc &func, MemPool &memPool, LiveAnalysis &live, const std::string &phaseName) + : Schedule(func, memPool, live, phaseName) { + intCalleeSaveThreshold = func.UseFP() ? intCalleeSaveThresholdBase : intCalleeSaveThresholdEnhance; + } + ~AArch64Schedule() override = default; + protected: + void DumpDepGraph(const MapleVector &nodes) const; + void DumpScheduleResult(const MapleVector &nodes, SimulateType type) const; + void GenerateDot(const BB &bb, const MapleVector &nodes) const; + void EraseNodeFromNodeList(const DepNode &target, MapleVector &nodeList) override; + void FindAndCombineMemoryAccessPair(const std::vector &memList) override; + void RegPressureScheduling(BB &bb, MapleVector &nodes) override; + + private: + enum CSRResult : uint8 { + kNode1, + kNode2, + kDoCSP /* can do csp further */ + }; + void Init() override; + void MemoryAccessPairOpt() override; + void ClinitPairOpt() override; + uint32 DoSchedule() override; + uint32 DoBruteForceSchedule() override; + uint32 SimulateOnly() override; + void UpdateBruteForceSchedCycle() override; + void IterateBruteForce(DepNode &targetNode, MapleVector &readyList, uint32 currCycle, + MapleVector &scheduledNodes, uint32 &maxCycleCount, + MapleVector &optimizedScheduledNodes) override; + bool CanCombine(const Insn &insn) const override; + void ListScheduling(bool beforeRA) override; + void BruteForceScheduling(const BB &bb); + void SimulateScheduling(const BB &bb); + void FinalizeScheduling(BB &bb, const DataDepBase &dataDepBase) override; + uint32 ComputeEstart(uint32 cycle) override; + void ComputeLstart(uint32 maxEstart) override; + void UpdateELStartsOnCycle(uint32 cycle) override; + void RandomTest() override; + void EraseNodeFromReadyList(const DepNode &target) override; + uint32 GetNextSepIndex() const override; + void CountUnitKind(const DepNode &depNode, uint32 array[], const uint32 arraySize) const override; + static bool IfUseUnitKind(const DepNode &depNode, uint32 index); + void UpdateReadyList(DepNode &targetNode, MapleVector &readyList, bool updateEStart) override; + void UpdateScheduleProcessInfo(AArch64ScheduleProcessInfo &info) const; + void UpdateAdvanceCycle(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode &targetNode) const; + bool CheckSchedulable(AArch64ScheduleProcessInfo &info) const; + void SelectNode(AArch64ScheduleProcessInfo &scheduleInfo); + static void DumpDebugInfo(const ScheduleProcessInfo &scheduleInfo); + bool CompareDepNode(DepNode &node1, DepNode &node2, AArch64ScheduleProcessInfo &scheduleInfo) const; + void CalculateMaxUnitKindCount(ScheduleProcessInfo &scheduleInfo) const; + void UpdateReleaseRegInfo(AArch64ScheduleProcessInfo &scheduleInfo); + std::set CanFreeRegister(const DepNode &node) const; + void UpdateLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode& node); + void InitLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo); + int CalSeriesCycles(const MapleVector &nodes) const; + CSRResult DoCSR(DepNode &node1, DepNode &node2, AArch64ScheduleProcessInfo &scheduleInfo) const; + AArch64Schedule::CSRResult ScheduleCrossCall(const DepNode &node1, const DepNode &node2) const; + int intCalleeSaveThreshold = 0; + + static uint32 maxUnitIndex; + static int intRegPressureThreshold; + static int fpRegPressureThreshold; + static int intCalleeSaveThresholdBase; + static int intCalleeSaveThresholdEnhance; + static int fpCalleeSaveThreshold; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_SCHEDULE_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_ssa.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ssa.h new file mode 100644 index 0000000000000000000000000000000000000000..6f04d94d96d18d446ead6e434c7b9acac3979358 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_ssa.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_AARCH64_SSA_H +#define MAPLEBE_CG_INCLUDE_AARCH64_SSA_H + +#include "cg_ssa.h" +#include "aarch64_insn.h" + +namespace maplebe { +class AArch64CGSSAInfo : public CGSSAInfo { + public: + AArch64CGSSAInfo(CGFunc &f, DomAnalysis &da, MemPool &mp, MemPool &tmp) : CGSSAInfo(f, da, mp, tmp) {} + ~AArch64CGSSAInfo() override = default; + void DumpInsnInSSAForm(const Insn &insn) const override; + RegOperand *GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) override; + MemOperand *CreateMemOperand(MemOperand &memOpnd, bool isOnSSA) const; /* Second input parameter:false = on cgfunc */ + void ReplaceInsn(Insn &oriInsn, Insn &newInsn) override; + void ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) override; + void CreateNewInsnSSAInfo(Insn &newInsn) override; + void CheckAsmDUbinding(Insn &insn, const VRegVersion *toBeReplaced, VRegVersion *newVersion); + + private: + void RenameInsn(Insn &insn) override; + VRegVersion *RenamedOperandSpecialCase(RegOperand &vRegOpnd, Insn &curInsn, uint32 idx); + RegOperand *CreateSSAOperand(RegOperand &virtualOpnd) override; +}; + +class A64SSAOperandRenameVisitor : public SSAOperandVisitor { + public: + A64SSAOperandRenameVisitor(AArch64CGSSAInfo &cssaInfo, Insn &cInsn, const OpndDesc &cProp, uint32 idx) + : SSAOperandVisitor(cInsn, cProp, idx), ssaInfo(&cssaInfo) {} + ~A64SSAOperandRenameVisitor() override = default; + void Visit(RegOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *v) final; + + private: + AArch64CGSSAInfo *ssaInfo; +}; + +class A64OpndSSAUpdateVsitor : public SSAOperandVisitor, + public OperandVisitor { + public: + explicit A64OpndSSAUpdateVsitor(AArch64CGSSAInfo &cssaInfo) : ssaInfo(&cssaInfo) {} + ~A64OpndSSAUpdateVsitor() override = default; + void MarkIncrease() { + isDecrease = false; + }; + void MarkDecrease() { + isDecrease = true; + }; + bool HasDeleteDef() const { + return !deletedDef.empty(); + } + void Visit(RegOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(PhiOperand *v) final; + + bool IsPhi() const { + return isPhi; + } + + void SetPhi(bool flag) { + isPhi = flag; + } + + private: + void UpdateRegUse(uint32 ssaIdx); + void UpdateRegDef(uint32 ssaIdx); + AArch64CGSSAInfo *ssaInfo; + bool isDecrease = false; + std::set deletedDef; + bool isPhi = false; +}; + +class A64SSAOperandDumpVisitor : public SSAOperandDumpVisitor { + public: + explicit A64SSAOperandDumpVisitor(const MapleUnorderedMap &allssa) + : SSAOperandDumpVisitor(allssa) {} + ~A64SSAOperandDumpVisitor() override = default; + void Visit(RegOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(PhiOperand *v) final; +}; +} + +#endif // MAPLEBE_CG_INCLUDE_AARCH64_SSA_H diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h new file mode 100644 index 0000000000000000000000000000000000000000..f826c555c05cfd474ec084cbf0e57d0047edc4e2 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_strldr.h @@ -0,0 +1,81 @@ + /* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H + +#include "strldr.h" +#include "aarch64_reaching.h" +#include "aarch64_operand.h" + +namespace maplebe { +using namespace maple; +enum MemPropMode : uint8 { + kUndef, + kPropBase, + kPropOffset, + kPropSignedExtend, + kPropUnsignedExtend, + kPropShift +}; + +class AArch64StoreLoadOpt : public StoreLoadOpt { + public: + AArch64StoreLoadOpt(CGFunc &func, MemPool &memPool) + : StoreLoadOpt(func, memPool), localAlloc(&memPool), str2MovMap(localAlloc.Adapter()) {} + ~AArch64StoreLoadOpt() override = default; + void Run() final; + void DoStoreLoadOpt(); + void DoLoadZeroToMoveTransfer(const Insn &strInsn, short strSrcIdx, + const InsnSet &memUseInsnSet) const; + void DoLoadToMoveTransfer(Insn &strInsn, short strSrcIdx, + short memSeq, const InsnSet &memUseInsnSet); + bool CheckStoreOpCode(MOperator opCode) const; + static bool CheckNewAmount(const Insn &insn, uint32 newAmount); + + private: + void StrLdrIndexModeOpt(Insn &currInsn); + bool CheckReplaceReg(Insn &defInsn, Insn &currInsn, InsnSet &replaceRegDefSet, regno_t replaceRegNo); + bool CheckDefInsn(Insn &defInsn, Insn &currInsn); + bool CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx); + MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal); + MemOperand *SelectReplaceMem(Insn &defInsn, Insn &curInsn, RegOperand &base, Operand *offset); + MemOperand *SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned); + bool CanDoMemProp(const Insn *insn); + bool CanDoIndexOpt(const MemOperand &currMemOpnd); + void MemPropInit(); + void SelectPropMode(const MemOperand &currMemOpnd); + int64 GetOffsetForNewIndex(Insn &defInsn, Insn &insn, regno_t baseRegNO, uint32 memOpndSize) const; + MemOperand *SelectIndexOptMode(Insn &insn, const MemOperand &curMemOpnd); + bool ReplaceMemOpnd(Insn &insn, regno_t regNo, RegOperand &base, Operand *offset); + void MemProp(Insn &insn); + void ProcessStrPair(Insn &insn); + void ProcessStr(Insn &insn); + void GenerateMoveLiveInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, + Insn &ldrInsn, Insn &strInsn, short memSeq); + void GenerateMoveDeadInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, + Insn &ldrInsn, Insn &strInsn, short memSeq); + bool HasMemBarrier(const Insn &ldrInsn, const Insn &strInsn) const; + bool IsAdjacentBB(Insn &defInsn, Insn &curInsn) const; + MapleAllocator localAlloc; + /* the max number of mov insn to optimize. */ + static constexpr uint8 kMaxMovNum = 2; + MapleMap str2MovMap; + MemPropMode propMode = kUndef; + uint32 amount = 0; + bool removeDefInsn = false; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_tailcall.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_tailcall.h new file mode 100644 index 0000000000000000000000000000000000000000..9eba2c0e42c64c8116672e41c9a4013d1b4c7a24 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_tailcall.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64TAILCALL_H +#define MAPLEBE_INCLUDE_CG_AARCH64TAILCALL_H + +#include "cg.h" +#include "tailcall.h" +#include "aarch64_insn.h" +#include "aarch64_operand.h" + +namespace maplebe { +class AArch64TailCallOpt : public TailCallOpt { + public: + AArch64TailCallOpt(MemPool &pool, CGFunc &func) : + TailCallOpt(pool, func) {} + ~AArch64TailCallOpt() override = default; + bool IsFuncNeedFrame(Insn &callInsn) const override; + bool InsnIsCallCand(Insn &insn) const override; + bool InsnIsLoadPair(Insn &insn) const override; + bool InsnIsMove(Insn &insn) const override; + bool InsnIsIndirectCall(Insn &insn) const override; + bool InsnIsCall(Insn &insn) const override; + bool InsnIsUncondJump(Insn &insn) const override; + bool InsnIsAddWithRsp(Insn &insn) const override; + bool OpndIsStackRelatedReg(RegOperand &opnd) const override; + bool OpndIsR0Reg(RegOperand &opnd) const override; + bool OpndIsCalleeSaveReg(RegOperand &opnd) const override; + bool IsAddOrSubOp(MOperator mOp) const override; + void ReplaceInsnMopWithTailCall(Insn &insn) override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64TAILCALL_H */ diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..3379b9be1d3ac2a734185c7d632bfaccdf8eb80d --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_utils.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H + +#include "aarch64_cg.h" +#include "aarch64_operand.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { + +/** + * Get or create new memory operand for load instruction loadIns for which + * machine opcode will be replaced with newLoadMop. + * + * @param loadIns load instruction + * @param newLoadMop new opcode for load instruction + * @return memory operand for new load machine opcode + * or nullptr if memory operand can't be obtained + */ +MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, + const Insn &loadIns, MOperator newLoadMop); +} // namespace maplebe + +#endif // MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..8e5350314c6e38c5ac4bf460a5e8614e746dd1bf --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_validbit_opt.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H + +#include "cg_validbit_opt.h" +#include "operand.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +class AArch64ValidBitOpt : public ValidBitOpt { + public: + AArch64ValidBitOpt(CGFunc &f, CGSSAInfo &info) : ValidBitOpt(f, info) {} + ~AArch64ValidBitOpt() override = default; + + void DoOpt(BB &bb, Insn &insn) override; + void SetValidBits(Insn &insn) override; + bool SetPhiValidBits(Insn &insn) override; +}; + +/* + * Example 1) + * def w9 def w9 + * ... ... + * and w4, w9, #255 ===> mov w4, w9 + * + * Example 2) + * and w6[16], w0[16], #FF00[16] mov w6, w0 + * asr w6, w6[16], #8[4] ===> asr w6, w6 + */ +class AndValidBitPattern : public ValidBitPattern { + public: + AndValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~AndValidBitPattern() override { + desReg = nullptr; + srcReg = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "AndValidBitPattern"; + } + + private: + bool CheckImmValidBit(int64 andImm, uint32 andImmVB, int64 shiftImm) const; + MOperator newMop = MOP_undef; + RegOperand *desReg = nullptr; + RegOperand *srcReg = nullptr; +}; + +/* + * Example 1) + * uxth w1[16], w2[16] / uxtb w1[8], w2[8] + * ===> + * mov w1, w2 + * + * Example 2) + * ubfx w1, w2[16], #0, #16 / sbfx w1, w2[16], #0, #16 + * ===> + * mov w1, w2 + */ +class ExtValidBitPattern : public ValidBitPattern { + public: + ExtValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~ExtValidBitPattern() override { + newDstOpnd = nullptr; + newSrcOpnd = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "ExtValidBitPattern"; + } + + private: + RegOperand *newDstOpnd = nullptr; + RegOperand *newSrcOpnd = nullptr; + MOperator newMop = MOP_undef; +}; + +/* + * cmp w0, #0 + * cset w1, NE --> mov w1, w0 + * + * cmp w0, #0 + * cset w1, EQ --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, NE --> eor w1, w0, 1 + * + * cmp w0, #1 + * cset w1, EQ --> mov w1, w0 + * + * cmp w0, #0 + * cset w0, NE -->null + * + * cmp w0, #1 + * cset w0, EQ -->null + * + * condition: + * 1. the first operand of cmp instruction must has only one valid bit + * 2. the second operand of cmp instruction must be 0 or 1 + * 3. flag register of cmp isntruction must not be used later + */ +class CmpCsetVBPattern : public ValidBitPattern { + public: + CmpCsetVBPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~CmpCsetVBPattern() override { + cmpInsn = nullptr; + } + void Run(BB &bb, Insn &csetInsn) override; + bool CheckCondition(Insn &csetInsn) override; + std::string GetPatternName() override { + return "CmpCsetPattern"; + }; + + private: + bool IsContinuousCmpCset(const Insn &curInsn) const; + bool OpndDefByOneValidBit(const Insn &defInsn) const; + Insn *cmpInsn = nullptr; + int64 cmpConstVal = -1; +}; + +/* + * cmp w0[16], #32768 + * bge label ===> tbnz w0, #15, label + * + * bge / blt + */ +class CmpBranchesPattern : public ValidBitPattern { + public: + CmpBranchesPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {} + ~CmpBranchesPattern() override { + prevCmpInsn = nullptr; + } + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "CmpBranchesPattern"; + }; + + private: + void SelectNewMop(MOperator mop); + Insn *prevCmpInsn = nullptr; + int64 newImmVal = -1; + MOperator newMop = MOP_undef; + bool is64Bit = false; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H */ + diff --git a/src/mapleall/maple_be/include/cg/aarch64/aarch64_yieldpoint.h b/src/mapleall/maple_be/include/cg/aarch64/aarch64_yieldpoint.h new file mode 100644 index 0000000000000000000000000000000000000000..549c50a80c8abcb1717c414685a41111c4793f1f --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/aarch64_yieldpoint.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_YIELDPOINT_H +#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_YIELDPOINT_H + +#include "yieldpoint.h" + +namespace maplebe { +using namespace maple; + +class AArch64YieldPointInsertion : public YieldPointInsertion { + public: + explicit AArch64YieldPointInsertion(CGFunc &func) : YieldPointInsertion(func) {} + + ~AArch64YieldPointInsertion() override = default; + + void Run() override; + + private: + void InsertYieldPoint() const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_YIELDPOINT_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/aarch64/mpl_atomic.h b/src/mapleall/maple_be/include/cg/aarch64/mpl_atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..e034f0e9f729321e8651106f0c3a7fec4fe96e51 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/aarch64/mpl_atomic.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_AARCH64_MPL_ATOMIC_H +#define MAPLEBE_INCLUDE_CG_AARCH64_MPL_ATOMIC_H + +#include "types_def.h" + +namespace maple { +enum class MemOrd : uint32 { + kNotAtomic = 0, +#define ATTR(STR) STR, +#include "memory_order_attrs.def" +#undef ATTR +}; + +MemOrd MemOrdFromU32(uint32 val); + +bool MemOrdIsAcquire(MemOrd ord); + +bool MemOrdIsRelease(MemOrd ord); +} /* namespace maple */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_MPL_ATOMIC_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/abi.h b/src/mapleall/maple_be/include/cg/abi.h new file mode 100644 index 0000000000000000000000000000000000000000..57c4ad728c7d19b09d0c3be87db131df3fb0fc3d --- /dev/null +++ b/src/mapleall/maple_be/include/cg/abi.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ABI_H +#define MAPLEBE_INCLUDE_CG_ABI_H + +#include +#include "types_def.h" +#include "operand.h" + +namespace maplebe { +enum ArgumentClass : uint8 { + kNoClass, + kIntegerClass, + kFloatClass, + kPointerClass, + kVectorClass, + kMemoryClass, + kShortVectorClass, + kCompositeTypeHFAClass, /* Homegeneous Floating-point Aggregates for AArch64 */ + kCompositeTypeHVAClass, /* Homegeneous Short-Vector Aggregates for AArch64 */ +}; + +using regno_t = uint32_t; + +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ABI_H */ diff --git a/src/mapleall/maple_be/include/cg/abstract_mmir.def b/src/mapleall/maple_be/include/cg/abstract_mmir.def new file mode 100644 index 0000000000000000000000000000000000000000..c2d595600a3471bde05c1df9a8928994e121d767 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/abstract_mmir.def @@ -0,0 +1,149 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + + /* Abstract Maple Machine IR */ + /* {mop, opnds, prop, latency, name, format, length} */ + DEFINE_MOP(MOP_undef, {}, ISABSTRACT,0,"","",0) + + /* conversion between all types and registers */ + DEFINE_MOP(MOP_copy_ri_8, {&OpndDesc::Reg8ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_8","",1) + DEFINE_MOP(MOP_copy_rr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISMOVE,0,"copy_rr_8","",1) + DEFINE_MOP(MOP_copy_ri_16, {&OpndDesc::Reg16ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_16","",1) + DEFINE_MOP(MOP_copy_rr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISMOVE,0,"copy_rr_16","",1) + DEFINE_MOP(MOP_copy_ri_32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_32","",1) + DEFINE_MOP(MOP_copy_rr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISMOVE,0,"copy_rr_32","",1) + DEFINE_MOP(MOP_copy_ri_64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISABSTRACT|ISMOVE,0,"copy_ri_64","",1) + DEFINE_MOP(MOP_copy_rr_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS},ISABSTRACT|ISMOVE,0,"copy_rr_64","",1) + + DEFINE_MOP(MOP_copy_fi_8, {&OpndDesc::Reg8FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_8","",1) + DEFINE_MOP(MOP_copy_ff_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS},ISABSTRACT|ISMOVE,0,"copy_ff_8","",1) + DEFINE_MOP(MOP_copy_fi_16, {&OpndDesc::Reg16FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_16","",1) + DEFINE_MOP(MOP_copy_ff_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS},ISABSTRACT|ISMOVE,0,"copy_ff_16","",1) + DEFINE_MOP(MOP_copy_fi_32, {&OpndDesc::Reg32FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_32","",1) + DEFINE_MOP(MOP_copy_ff_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},ISABSTRACT|ISMOVE,0,"copy_ff_32","",1) + DEFINE_MOP(MOP_copy_fi_64, {&OpndDesc::Reg64FD,&OpndDesc::Imm64},ISABSTRACT|ISMOVE,0,"copy_fi_64","",1) + DEFINE_MOP(MOP_copy_ff_64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS},ISABSTRACT|ISMOVE,0,"copy_ff_64","",1) + + /* register extend */ + DEFINE_MOP(MOP_zext_rr_16_8, {&OpndDesc::Reg16ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r8","",1) + DEFINE_MOP(MOP_sext_rr_16_8, {&OpndDesc::Reg16ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r8","",1) + DEFINE_MOP(MOP_zext_rr_32_8, {&OpndDesc::Reg32ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r8","",1) + DEFINE_MOP(MOP_sext_rr_32_8, {&OpndDesc::Reg32ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r8","",1) + DEFINE_MOP(MOP_zext_rr_32_16, {&OpndDesc::Reg32ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r16","",1) + DEFINE_MOP(MOP_sext_rr_32_16, {&OpndDesc::Reg32ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r16","",1) + + DEFINE_MOP(MOP_zext_rr_64_8, {&OpndDesc::Reg64ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r8","",1) + DEFINE_MOP(MOP_sext_rr_64_8, {&OpndDesc::Reg64ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r8","",1) + DEFINE_MOP(MOP_zext_rr_64_16, {&OpndDesc::Reg64ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r16","",1) + DEFINE_MOP(MOP_sext_rr_64_16, {&OpndDesc::Reg64ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r16","",1) + DEFINE_MOP(MOP_zext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r32","",1) + DEFINE_MOP(MOP_sext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r32","",1) + +/* int2float conversion */ + DEFINE_MOP(MOP_cvt_fr_u32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_u32","",1) + DEFINE_MOP(MOP_cvt_fr_u64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_u64","",1) + DEFINE_MOP(MOP_cvt_fr_i32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_i32","",1) + DEFINE_MOP(MOP_cvt_fr_i64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_fr_i64","",1) + +/* float2int conversion */ + DEFINE_MOP(MOP_cvt_rf_u32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_u32","",1) + DEFINE_MOP(MOP_cvt_rf_u64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_u64","",1) + DEFINE_MOP(MOP_cvt_rf_i32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_i32","",1) + DEFINE_MOP(MOP_cvt_rf_i64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_i64","",1) + + /* float conversion */ + DEFINE_MOP(MOP_cvt_ff_64_32, {&OpndDesc::Reg64FD,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_ff_64_32","",1) + DEFINE_MOP(MOP_cvt_ff_32_64, {&OpndDesc::Reg32FD,&OpndDesc::Reg64IS},ISABSTRACT|ISCONVERSION,0,"cvt_ff_32_64","",1) + + /* Support transformation between memory and registers */ + DEFINE_MOP(MOP_str_8, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISABSTRACT|ISSTORE,0,"str_8","",1) + DEFINE_MOP(MOP_str_16, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISABSTRACT|ISSTORE,0,"str_16","",1) + DEFINE_MOP(MOP_str_32, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISABSTRACT|ISSTORE,0,"str_32","",1) + DEFINE_MOP(MOP_str_64, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISABSTRACT|ISSTORE,0,"str_64","",1) + DEFINE_MOP(MOP_load_8, {&OpndDesc::Reg8ID,&OpndDesc::Mem8S},ISABSTRACT|ISLOAD,0,"load_8","",1) + DEFINE_MOP(MOP_load_16, {&OpndDesc::Reg16ID,&OpndDesc::Mem16S},ISABSTRACT|ISLOAD,0,"load_16","",1) + DEFINE_MOP(MOP_load_32, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISABSTRACT|ISLOAD,0,"load_32","",1) + DEFINE_MOP(MOP_load_64, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISABSTRACT|ISLOAD,0,"load_64","",1) + DEFINE_MOP(MOP_str_f_8, {&OpndDesc::Reg8FS,&OpndDesc::Mem8D},ISABSTRACT|ISSTORE,0,"str_f_8","",1) + DEFINE_MOP(MOP_str_f_16, {&OpndDesc::Reg16FS,&OpndDesc::Mem16D},ISABSTRACT|ISSTORE,0,"str_f_16","",1) + DEFINE_MOP(MOP_str_f_32, {&OpndDesc::Reg32FS,&OpndDesc::Mem32D},ISABSTRACT|ISSTORE,0,"str_f_32","",1) + DEFINE_MOP(MOP_str_f_64, {&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISABSTRACT|ISSTORE,0,"str_f_64","",1) + DEFINE_MOP(MOP_load_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Mem8S},ISABSTRACT|ISLOAD,0,"load_f_8","",1) + DEFINE_MOP(MOP_load_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Mem16S},ISABSTRACT|ISLOAD,0,"load_f_16","",1) + DEFINE_MOP(MOP_load_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Mem32S},ISABSTRACT|ISLOAD,0,"load_f_32","",1) + DEFINE_MOP(MOP_load_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Mem64S},ISABSTRACT|ISLOAD,0,"load_f_64","",1) + + /* Support three address basic operations */ + DEFINE_MOP(MOP_add_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"add_8","",1) + DEFINE_MOP(MOP_add_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"add_16","",1) + DEFINE_MOP(MOP_add_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"add_32","",1) + DEFINE_MOP(MOP_add_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"add_64","",1) + DEFINE_MOP(MOP_sub_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"sub_8","",1) + DEFINE_MOP(MOP_sub_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"sub_16","",1) + DEFINE_MOP(MOP_sub_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"sub_32","",1) + DEFINE_MOP(MOP_sub_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"sub_64","",1) + DEFINE_MOP(MOP_or_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"or_8","",1) + DEFINE_MOP(MOP_or_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"or_16","",1) + DEFINE_MOP(MOP_or_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"or_32","",1) + DEFINE_MOP(MOP_or_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"or_64","",1) + DEFINE_MOP(MOP_xor_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"xor_8","",1) + DEFINE_MOP(MOP_xor_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"xor_16","",1) + DEFINE_MOP(MOP_xor_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"xor_32","",1) + DEFINE_MOP(MOP_xor_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"xor_64","",1) + DEFINE_MOP(MOP_and_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"and_8","",1) + DEFINE_MOP(MOP_and_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"and_16","",1) + DEFINE_MOP(MOP_and_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"and_32","",1) + DEFINE_MOP(MOP_and_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"and_64","",1) + + /* Support three address basic operations (Floating point) */ + DEFINE_MOP(MOP_add_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS, &OpndDesc::Reg8FS},ISABSTRACT|ISBASICOP,0,"add_8","",1) + DEFINE_MOP(MOP_add_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS, &OpndDesc::Reg16FS},ISABSTRACT|ISBASICOP,0,"add_16","",1) + DEFINE_MOP(MOP_add_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS, &OpndDesc::Reg32FS},ISABSTRACT|ISBASICOP,0,"add_32","",1) + DEFINE_MOP(MOP_add_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS, &OpndDesc::Reg64FS},ISABSTRACT|ISBASICOP,0,"add_64","",1) + DEFINE_MOP(MOP_sub_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS, &OpndDesc::Reg8FS},ISABSTRACT|ISBASICOP,0,"sub_8","",1) + DEFINE_MOP(MOP_sub_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS, &OpndDesc::Reg16FS},ISABSTRACT|ISBASICOP,0,"sub_16","",1) + DEFINE_MOP(MOP_sub_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS, &OpndDesc::Reg32FS},ISABSTRACT|ISBASICOP,0,"sub_32","",1) + DEFINE_MOP(MOP_sub_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS, &OpndDesc::Reg64FS},ISABSTRACT|ISBASICOP,0,"sub_64","",1) + + /* shift -- shl/ashr/lshr */ + DEFINE_MOP(MOP_shl_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"shl_8","",1) + DEFINE_MOP(MOP_shl_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"shl_16","",1) + DEFINE_MOP(MOP_shl_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"shl_32","",1) + DEFINE_MOP(MOP_shl_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"shl_64","",1) + DEFINE_MOP(MOP_ashr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"ashr_8","",1) + DEFINE_MOP(MOP_ashr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"ashr_16","",1) + DEFINE_MOP(MOP_ashr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"ashr_32","",1) + DEFINE_MOP(MOP_ashr_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"ashr_64","",1) + DEFINE_MOP(MOP_lshr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"lshr_8","",1) + DEFINE_MOP(MOP_lshr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"lshr_16","",1) + DEFINE_MOP(MOP_lshr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"lshr_32","",1) + DEFINE_MOP(MOP_lshr_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"lshr_64","",1) + + /* Support two address basic operations */ + DEFINE_MOP(MOP_neg_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISUNARYOP,0,"neg_8","",1) + DEFINE_MOP(MOP_neg_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISUNARYOP,0,"neg_16","",1) + DEFINE_MOP(MOP_neg_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISUNARYOP,0,"neg_32","",1) + DEFINE_MOP(MOP_neg_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"neg_64","",1) + DEFINE_MOP(MOP_neg_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS},ISABSTRACT|ISUNARYOP,0,"neg_f_8","",1) + DEFINE_MOP(MOP_neg_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS},ISABSTRACT|ISUNARYOP,0,"neg_f_16","",1) + DEFINE_MOP(MOP_neg_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},ISABSTRACT|ISUNARYOP,0,"neg_f_32","",1) + DEFINE_MOP(MOP_neg_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Reg64FS},ISABSTRACT|ISUNARYOP,0,"neg_f_64","",1) + DEFINE_MOP(MOP_not_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISUNARYOP,0,"not_8","",1) + DEFINE_MOP(MOP_not_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISUNARYOP,0,"not_16","",1) + DEFINE_MOP(MOP_not_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISUNARYOP,0,"not_32","",1) + DEFINE_MOP(MOP_not_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"not_64","",1) + + /* MOP_comment */ + DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT,0,"//","0", 0) \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/alignment.h b/src/mapleall/maple_be/include/cg/alignment.h new file mode 100644 index 0000000000000000000000000000000000000000..8c588039992dbb776a8444eaf221544b96965d21 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/alignment.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_ALIGNMENT_H +#define MAPLEBE_INCLUDE_CG_ALIGNMENT_H + +#include "cg_phase.h" +#include "maple_phase.h" +#include "cgbb.h" +#include "loop.h" + +namespace maplebe { +class AlignAnalysis { + public: + AlignAnalysis(CGFunc &func, MemPool &memP) + : cgFunc(&func), + alignAllocator(&memP), + loopHeaderBBs(alignAllocator.Adapter()), + jumpTargetBBs(alignAllocator.Adapter()), + alignInfos(alignAllocator.Adapter()), + sameTargetBranches(alignAllocator.Adapter()) {} + + virtual ~AlignAnalysis() = default; + + void AnalysisAlignment(); + void Dump(); + virtual void FindLoopHeader() = 0; + virtual void FindJumpTarget() = 0; + virtual void ComputeLoopAlign() = 0; + virtual void ComputeJumpAlign() = 0; + virtual void ComputeCondBranchAlign() = 0; + + /* filter condition */ + virtual bool IsIncludeCall(BB &bb) = 0; + virtual bool IsInSizeRange(BB &bb) = 0; + virtual bool HasFallthruEdge(BB &bb) = 0; + + std::string PhaseName() const { + return "alignanalysis"; + } + const MapleUnorderedSet &GetLoopHeaderBBs() const { + return loopHeaderBBs; + } + const MapleUnorderedSet &GetJumpTargetBBs() const { + return jumpTargetBBs; + } + const MapleUnorderedMap &GetAlignInfos() const { + return alignInfos; + } + uint32 GetAlignPower(BB &bb) { + return alignInfos[&bb]; + } + + void InsertLoopHeaderBBs(BB &bb) { + loopHeaderBBs.insert(&bb); + } + void InsertJumpTargetBBs(BB &bb) { + jumpTargetBBs.insert(&bb); + } + void InsertAlignInfos(BB &bb, uint32 power) { + alignInfos[&bb] = power; + } + + protected: + CGFunc *cgFunc; + MapleAllocator alignAllocator; + MapleUnorderedSet loopHeaderBBs; + MapleUnorderedSet jumpTargetBBs; + MapleUnorderedMap alignInfos; + MapleUnorderedMap sameTargetBranches; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgAlignAnalysis, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ALIGNMENT_H */ diff --git a/src/mapleall/maple_be/include/cg/args.h b/src/mapleall/maple_be/include/cg/args.h new file mode 100644 index 0000000000000000000000000000000000000000..33e444b30e99358f3614fdd072544dd975d9185d --- /dev/null +++ b/src/mapleall/maple_be/include/cg/args.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ARGS_H +#define MAPLEBE_INCLUDE_CG_ARGS_H + +#include "cgfunc.h" +#include "cg_phase.h" + +namespace maplebe { +class MoveRegArgs { + public: + explicit MoveRegArgs(CGFunc &func) : cgFunc(&func) {} + + virtual ~MoveRegArgs() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "moveargs"; + } + + const CGFunc *GetCGFunc() const { + return cgFunc; + } + + protected: + CGFunc *cgFunc; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgMoveRegArgs, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ARGS_H */ diff --git a/src/mapleall/maple_be/include/cg/asm_emit.h b/src/mapleall/maple_be/include/cg/asm_emit.h new file mode 100644 index 0000000000000000000000000000000000000000..88236bc007af2f9de5f87e9dc1aaa61090fbe378 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/asm_emit.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ASM_EMIT_H +#define MAPLEBE_INCLUDE_CG_ASM_EMIT_H + +#include "emit.h" + +namespace maplebe { +class AsmFuncEmitInfo : public FuncEmitInfo { + public: + explicit AsmFuncEmitInfo(CGFunc &func) : FuncEmitInfo(func) {} + virtual ~AsmFuncEmitInfo() = default; +}; + +class AsmEmitter : public Emitter { + protected: + AsmEmitter(CG &cg, const std::string &asmFileName) : Emitter(cg, asmFileName) {} + virtual ~AsmEmitter() = default; + + virtual void EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) = 0; + virtual void EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) = 0; + virtual void EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) = 0; + virtual void EmitFastLSDA(FuncEmitInfo &funcEmitInfo) = 0; + virtual void EmitFullLSDA(FuncEmitInfo &funcEmitInfo) = 0; + virtual void EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) = 0; + virtual void EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) = 0; + virtual void Run(FuncEmitInfo &funcEmitInfo) = 0; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ASM_EMIT_H */ diff --git a/src/mapleall/maple_be/include/cg/asm_info.h b/src/mapleall/maple_be/include/cg/asm_info.h new file mode 100644 index 0000000000000000000000000000000000000000..f58f6c6f56314c5889872f4779154bbd3b969cc0 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/asm_info.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ASM_INFO_H +#define MAPLEBE_INCLUDE_CG_ASM_INFO_H + +#include "maple_string.h" + +namespace maplebe { +enum AsmLabel : uint8 { + kAsmGlbl, + kAsmLocal, + kAsmWeak, + kAsmBss, + kAsmComm, + kAsmData, + kAsmAlign, + kAsmSyname, + kAsmZero, + kAsmByte, + kAsmShort, + kAsmValue, + kAsmLong, + kAsmQuad, + kAsmSize, + kAsmType, + kAsmText, + kAsmHidden +}; + +class AsmInfo { + public: + const MapleString &GetCmnt() const { + return asmCmnt; + } + + const MapleString &GetAtobt() const { + return asmAtObt; + } + + const MapleString &GetFile() const { + return asmFile; + } + + const MapleString &GetSection() const { + return asmSection; + } + + const MapleString &GetRodata() const { + return asmRodata; + } + + const MapleString &GetGlobal() const { + return asmGlobal; + } + + const MapleString &GetLocal() const { + return asmLocal; + } + + const MapleString &GetWeak() const { + return asmWeak; + } + + const MapleString &GetBss() const { + return asmBss; + } + + const MapleString &GetComm() const { + return asmComm; + } + + const MapleString &GetData() const { + return asmData; + } + + const MapleString &GetAlign() const { + return asmAlign; + } + + const MapleString &GetZero() const { + return asmZero; + } + + const MapleString &GetByte() const { + return asmByte; + } + + const MapleString &GetShort() const { + return asmShort; + } + + const MapleString &GetValue() const { + return asmValue; + } + + const MapleString &GetLong() const { + return asmLong; + } + + const MapleString &GetQuad() const { + return asmQuad; + } + + const MapleString &GetSize() const { + return asmSize; + } + + const MapleString &GetType() const { + return asmType; + } + + const MapleString &GetHidden() const { + return asmHidden; + } + + const MapleString &GetText() const { + return asmText; + } + + const MapleString &GetSet() const { + return asmSet; + } + + const MapleString &GetWeakref() const { + return asmWeakref; + } + + explicit AsmInfo(MemPool &memPool) +#if TARGX86 || TARGX86_64 + : asmCmnt("\t//\t", &memPool), +#elif TARGARM32 + : asmCmnt("\t@\t", &memPool), +#else + : asmCmnt("\t#\t", &memPool), +#endif + + asmAtObt("\t%object\t", &memPool), + asmFile("\t.file\t", &memPool), + asmSection("\t.section\t", &memPool), + asmRodata(".rodata\t", &memPool), + asmGlobal("\t.global\t", &memPool), + asmLocal("\t.local\t", &memPool), + asmWeak("\t.weak\t", &memPool), + asmBss("\t.bss\t", &memPool), + asmComm("\t.comm\t", &memPool), + asmData("\t.data\t", &memPool), + asmAlign("\t.align\t", &memPool), + asmZero("\t.zero\t", &memPool), + asmByte("\t.byte\t", &memPool), + asmShort("\t.short\t", &memPool), +#ifdef TARGARM32 + asmValue("\t.short\t", &memPool), +#else + asmValue("\t.value\t", &memPool), +#endif +#ifdef TARGARM32 + asmLong("\t.word\t", &memPool), +#else + asmLong("\t.long\t", &memPool), +#endif + asmQuad("\t.quad\t", &memPool), + asmSize("\t.size\t", &memPool), + asmType("\t.type\t", &memPool), + asmHidden("\t.hidden\t", &memPool), + asmText("\t.text\t", &memPool), + asmSet("\t.set\t", &memPool), + asmWeakref("\t.weakref\t", &memPool) {} + + ~AsmInfo() = default; + + private: + MapleString asmCmnt; + MapleString asmAtObt; + MapleString asmFile; + MapleString asmSection; + MapleString asmRodata; + MapleString asmGlobal; + MapleString asmLocal; + MapleString asmWeak; + MapleString asmBss; + MapleString asmComm; + MapleString asmData; + MapleString asmAlign; + MapleString asmZero; + MapleString asmByte; + MapleString asmShort; + MapleString asmValue; + MapleString asmLong; + MapleString asmQuad; + MapleString asmSize; + MapleString asmType; + MapleString asmHidden; + MapleString asmText; + MapleString asmSet; + MapleString asmWeakref; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ASM_INFO_H */ diff --git a/src/mapleall/maple_be/include/cg/call_conv.h b/src/mapleall/maple_be/include/cg/call_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..19661f0e3a28bdd92175c46daffad0a7d3eec1b5 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/call_conv.h @@ -0,0 +1,182 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CALL_CONV_H +#define MAPLEBE_INCLUDE_CG_CALL_CONV_H + +#include "types_def.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +/* for specifying how a parameter is passed */ +struct CCLocInfo { + regno_t reg0 = 0; /* 0 means parameter is stored on the stack */ + regno_t reg1 = 0; + regno_t reg2 = 0; /* can have up to 4 single precision fp registers */ + regno_t reg3 = 0; /* for small structure return. */ + int32 memOffset = 0; + int32 memSize = 0; + uint32 fpSize = 0; + uint32 numFpPureRegs = 0; + uint8 regCount = 0; /* number of registers <= 2 storing the return value */ + PrimType primTypeOfReg0; /* the primitive type stored in reg0 */ + PrimType primTypeOfReg1; /* the primitive type stored in reg1 */ + PrimType primTypeOfReg2; + PrimType primTypeOfReg3; + uint8 GetRegCount() const { + return regCount; + } + + PrimType GetPrimTypeOfReg0() const { + return primTypeOfReg0; + } + + PrimType GetPrimTypeOfReg1() const { + return primTypeOfReg1; + } + + PrimType GetPrimTypeOfReg2() const { + return primTypeOfReg2; + } + + PrimType GetPrimTypeOfReg3() const { + return primTypeOfReg3; + } + + regno_t GetReg0() const { + return reg0; + } + + regno_t GetReg1() const { + return reg1; + } + + regno_t GetReg2() const { + return reg2; + } + + regno_t GetReg3() const { + return reg3; + } +}; + +class LmbcFormalParamInfo { + public: + LmbcFormalParamInfo(PrimType pType, uint32 ofst, uint32 sz) + : type(nullptr), primType(pType), offset(ofst), onStackOffset(0), size(sz), regNO(0), vregNO(0), numRegs(0), + fpSize(0), isReturn(false), isPureFloat(false), isOnStack(false), hasRegassign(false) {} + + ~LmbcFormalParamInfo() = default; + + MIRStructType *GetType() { + return type; + } + void SetType(MIRStructType *ty) { + type = ty; + } + PrimType GetPrimType() const { + return primType; + } + void SetPrimType(PrimType pType) { + primType = pType; + } + uint32 GetOffset() const { + return offset; + } + void SetOffset(uint32 ofs) { + offset = ofs; + } + uint32 GetOnStackOffset() const { + return onStackOffset; + } + void SetOnStackOffset(uint32 ofs) { + onStackOffset = ofs; + } + uint32 GetSize() const { + return size; + } + void SetSize(uint32 sz) { + size = sz; + } + regno_t GetRegNO() const { + return regNO; + } + void SetRegNO(regno_t reg) { + regNO = reg; + } + regno_t GetVregNO() const { + return vregNO; + } + void SetVregNO(regno_t reg) { + vregNO = reg; + } + uint32 GetNumRegs() const { + return numRegs; + } + void SetNumRegs(uint32 num) { + numRegs = num; + } + uint32 GetFpSize() const { + return fpSize; + } + void SetFpSize(uint32 sz) { + fpSize = sz; + } + bool IsReturn() const { + return isReturn; + } + void SetIsReturn() { + isReturn = true; + } + bool IsPureFloat() const { + return isPureFloat; + } + void SetIsPureFloat() { + isPureFloat = true; + } + bool IsInReg() const { + return !isOnStack ; + } + bool IsOnStack() const { + return isOnStack; + } + void SetIsOnStack() { + isOnStack = true; + } + bool HasRegassign() const { + return hasRegassign; + } + void SetHasRegassign() { + hasRegassign = true; + } + private: + MIRStructType *type; + PrimType primType; + uint32 offset; + uint32 onStackOffset; /* stack location if isOnStack */ + uint32 size; /* size primtype or struct */ + regno_t regNO = 0; /* param reg num or starting reg num if numRegs > 0 */ + regno_t vregNO = 0; /* if no explicit regassing from IR, create move from param reg */ + uint32 numRegs = 0; /* number of regs for struct param */ + uint32 fpSize = 0; /* size of fp param if isPureFloat */ + bool isReturn; + bool isPureFloat = false; + bool isOnStack; /* large struct is passed by a copy on stack */ + bool hasRegassign; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CALL_CONV_H */ diff --git a/src/mapleall/maple_be/include/cg/cfgo.h b/src/mapleall/maple_be/include/cg/cfgo.h new file mode 100644 index 0000000000000000000000000000000000000000..340078e4f97469cafccab4a8652c19d71ff7c781 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cfgo.h @@ -0,0 +1,156 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CFGO_H +#define MAPLEBE_INCLUDE_CG_CFGO_H +#include "cg_cfg.h" +#include "optimize_common.h" + +namespace maplebe { + +enum CfgoPhase : maple::uint8 { + kCfgoDefault, + kCfgoPreRegAlloc, + kCfgoPostRegAlloc, + kPostCfgo, +}; + +class ChainingPattern : public OptimizationPattern { + public: + explicit ChainingPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "BB Chaining"; + dotColor = kCfgoChaining; + } + + virtual ~ChainingPattern() = default; + bool Optimize(BB &curBB) override; + + protected: + bool NoInsnBetween(const BB &from, const BB &to) const; + bool DoSameThing(const BB &bb1, const Insn &last1, const BB &bb2, const Insn &last2) const; + bool MergeFallthuBB(BB &curBB); + bool MergeGotoBB(BB &curBB, BB &sucBB); + bool MoveSuccBBAsCurBBNext(BB &curBB, BB &sucBB); + bool RemoveGotoInsn(BB &curBB, BB &sucBB); + bool ClearCurBBAndResetTargetBB(BB &curBB, BB &sucBB); +}; + +class SequentialJumpPattern : public OptimizationPattern { + public: + explicit SequentialJumpPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "Sequential Jump"; + dotColor = kCfgoSj; + } + + virtual ~SequentialJumpPattern() = default; + bool Optimize(BB &curBB) override; + + protected: + void SkipSucBB(BB &curBB, BB &sucBB) const; + void UpdateSwitchSucc(BB &curBB, BB &sucBB) const; +}; + +class FlipBRPattern : public OptimizationPattern { + public: + explicit FlipBRPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "Condition Flip"; + dotColor = kCfgoFlipCond; + } + + virtual ~FlipBRPattern() = default; + bool Optimize(BB &curBB) override; + + CfgoPhase GetPhase() const { + return phase; + } + void SetPhase(CfgoPhase val) { + phase = val; + } + CfgoPhase phase = kCfgoDefault; + + protected: + void RelocateThrowBB(BB &curBB); + private: + virtual uint32 GetJumpTargetIdx(const Insn &insn) = 0; + virtual MOperator FlipConditionOp(MOperator flippedOp) = 0; +}; + +/* This class represents the scenario that the BB is unreachable. */ +class UnreachBBPattern : public OptimizationPattern { + public: + explicit UnreachBBPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "Unreachable BB"; + dotColor = kCfgoUnreach; + func.GetTheCFG()->FindAndMarkUnreachable(*cgFunc); + } + + virtual ~UnreachBBPattern() = default; + bool Optimize(BB &curBB) override; +}; + +/* + * This class represents the scenario that a common jump BB can be duplicated + * to one of its another predecessor. + */ +class DuplicateBBPattern : public OptimizationPattern { + public: + explicit DuplicateBBPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "Duplicate BB"; + dotColor = kCfgoDup; + } + + virtual ~DuplicateBBPattern() = default; + bool Optimize(BB &curBB) override; + + private: + static constexpr int kThreshold = 10; +}; + +/* + * This class represents the scenario that a BB contains nothing. + */ +class EmptyBBPattern : public OptimizationPattern { + public: + explicit EmptyBBPattern(CGFunc &func) : OptimizationPattern(func) { + patternName = "Empty BB"; + dotColor = kCfgoEmpty; + } + + virtual ~EmptyBBPattern() = default; + bool Optimize(BB &curBB) override; +}; + +class CFGOptimizer : public Optimizer { + public: + CFGOptimizer(CGFunc &func, MemPool &memPool) : Optimizer(func, memPool) { + name = "CFGO"; + } + + virtual ~CFGOptimizer() = default; + CfgoPhase GetPhase() const { + return phase; + } + void SetPhase(CfgoPhase val) { + phase = val; + } + CfgoPhase phase = kCfgoDefault; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgCfgo, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPostCfgo, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CFGO_H */ diff --git a/src/mapleall/maple_be/include/cg/cfi.def b/src/mapleall/maple_be/include/cg/cfi.def new file mode 100644 index 0000000000000000000000000000000000000000..2c361aa64d2d69f53b53af4067d51cbf8d2da196 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cfi.def @@ -0,0 +1,53 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* Binutiles 2.28 */ +/* https://sourceware.org/binutils/docs-2.28/as/CFI-directives.html#CFI-directives */ +CFI_DEFINE( sections, , 1, List, Undef, Undef ) +CFI_DEFINE( startproc, , 0, Undef, Undef, Undef ) +CFI_DEFINE( startproc, _simple, 1, String, Undef, Undef ) /* "simple" */ +CFI_DEFINE( endproc, , 0, Undef, Undef, Undef ) +CFI_DEFINE( personality, _default, 1, Immediate, Undef, Undef ) +CFI_DEFINE( personality, _symbol, 2, Immediate, String, Undef ) +CFI_DEFINE( personality, _constant, 2, Immediate, Immediate, Undef ) +CFI_DEFINE( personality_id, , 1, StImmediate, Undef, Undef ) +CFI_DEFINE( fde_data, , 1, List, Undef, Undef ) +CFI_DEFINE( lsda, _default, 1, Immediate, Undef, Undef ) +CFI_DEFINE( lsda, _label, 2, Immediate, BBAddress, Undef ) +CFI_DEFINE( lsda, _constant, 2, Immediate, Immediate, Undef ) +CFI_DEFINE( inline_lsda, , 0, Undef, Undef, Undef ) +CFI_DEFINE( inline_lsda, _align, 1, Immediate, Undef, Undef ) /* power of 2 */ +CFI_DEFINE( def_cfa, , 2, Register, Immediate, Undef ) +CFI_DEFINE( def_cfa_register, , 1, Register, Undef, Undef ) +CFI_DEFINE( def_cfa_offset, , 1, Immediate, Undef, Undef ) +CFI_DEFINE( adjust_cfa_offset, , 1, Immediate, Undef, Undef ) +CFI_DEFINE( offset, , 2, Register, Immediate, Undef ) +CFI_DEFINE( val_offset, , 2, Register, Immediate, Undef ) +CFI_DEFINE( rel_offset, , 2, Register, Immediate, Undef ) +CFI_DEFINE( register, , 2, Register, Register, Undef ) +CFI_DEFINE( restore, , 1, Register, Undef, Undef ) +CFI_DEFINE( undefined, , 1, Register, Undef, Undef ) +CFI_DEFINE( same_value, , 1, Register, Undef, Undef ) +CFI_DEFINE( remember_state, , 0, Undef, Undef, Undef ) +CFI_DEFINE( restore_state, , 0, Undef, Undef, Undef ) +CFI_DEFINE( return_column, , 1, Register, Undef, Undef ) +CFI_DEFINE( signal_frame, , 0, Undef, Undef, Undef ) +CFI_DEFINE( window_save, , 0, Undef, Undef, Undef ) +CFI_DEFINE( escape, , 2, StImmediate, List /*expression[, ...]*/, Undef ) +CFI_DEFINE( val_encoded_addr, , 3, Register, Immediate, StImmediate ) + +ARM_DIRECTIVES_DEFINE( save, , 1, List, Undef, Undef ) +ARM_DIRECTIVES_DEFINE( vsave, , 1, List, Undef, Undef ) +ARM_DIRECTIVES_DEFINE( setfp, , 3, Register, Register, Immediate ) +ARM_DIRECTIVES_DEFINE( pad, , 1, Immediate, Undef, Undef ) diff --git a/src/mapleall/maple_be/include/cg/cfi.h b/src/mapleall/maple_be/include/cg/cfi.h new file mode 100644 index 0000000000000000000000000000000000000000..7f90ae5d86db819aae8e08e485f9688ddd9449a3 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cfi.h @@ -0,0 +1,266 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CFI_H +#define MAPLEBE_INCLUDE_CG_CFI_H + +#include "insn.h" +#include "mempool_allocator.h" +#include "mir_symbol.h" +#include "operand.h" +#include "common_utils.h" + +/* + * Reference: + * GNU Binutils. AS documentation + * https://sourceware.org/binutils/docs-2.28/as/index.html + * + * CFI blog + * https://www.imperialviolet.org/2017/01/18/cfi.html + * + * System V Application Binary Interface + * AMD64 Architecture Processor Supplement. Draft Version 0.99.7 + * https://www.uclibc.org/docs/psABI-x86_64.pdf $ 3.7 Figure 3.36 + * (RBP->6, RSP->7) + * + * System V Application Binary Interface + * Inte386 Architecture Processor Supplement. Version 1.0 + * https://www.uclibc.org/docs/psABI-i386.pdf $ 2.5 Table 2.14 + * (EBP->5, ESP->4) + * + * DWARF for ARM Architecture (ARM IHI 0040B) + * infocenter.arm.com/help/topic/com.arm.doc.ihi0040b/IHI0040B_aadwarf.pdf + * $ 3.1 Table 1 + * (0-15 -> R0-R15) + */ +namespace cfi { +using namespace maple; + +enum CfiOpcode : uint8 { +#define CFI_DEFINE(k, sub, n, o0, o1, o2) OP_CFI_##k##sub, +#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) OP_ARM_DIRECTIVES_##k##sub, +#include "cfi.def" +#undef CFI_DEFINE +#undef ARM_DIRECTIVES_DEFINE + kOpCfiLast +}; + +class CfiInsn : public maplebe::Insn { + public: + CfiInsn(MemPool &memPool, maplebe::MOperator op) : Insn(memPool, op) {} + + CfiInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0) : Insn(memPool, op, opnd0) {} + + CfiInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1) + : Insn(memPool, op, opnd0, opnd1) {} + + CfiInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1, + maplebe::Operand &opnd2) + : Insn(memPool, op, opnd0, opnd1, opnd2) {} + + ~CfiInsn() = default; + + bool IsMachineInstruction() const override { + return false; + } + + void Dump() const override; + +#if DEBUG + void Check() const override; +#endif + + bool IsCfiInsn() const override { + return true; + } + + bool IsTargetInsn() const override { + return false; + } + + bool IsRegDefined(maplebe::regno_t regNO) const override { + CHECK_FATAL(false, "cfi do not def regs"); + return false; + } + + std::set GetDefRegs() const override{ + CHECK_FATAL(false, "cfi do not def regs"); + return std::set(); + } + + uint32 GetBothDefUseOpnd() const override { + return maplebe::kInsnMaxOpnd; + } + + private: + CfiInsn &operator=(const CfiInsn&); +}; + +class RegOperand : public maplebe::OperandVisitable { + public: + RegOperand(uint32 no, uint32 size) : OperandVisitable(kOpdRegister, size), regNO(no) {} + + ~RegOperand() = default; + using OperandVisitable::OperandVisitable; + + uint32 GetRegisterNO() const { + return regNO; + } + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + + void Dump() const override; + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + private: + uint32 regNO; +}; + +class ImmOperand : public maplebe::OperandVisitable { + public: + ImmOperand(int64 val, uint32 size) : OperandVisitable(kOpdImmediate, size), val(val) {} + + ~ImmOperand() = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + int64 GetValue() const { + return val; + } + + void Dump() const override; + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + private: + int64 val; +}; + +class SymbolOperand : public maplebe::OperandVisitable { + public: + SymbolOperand(const maple::MIRSymbol &mirSymbol, uint8 size) + : OperandVisitable(kOpdStImmediate, size), + symbol(&mirSymbol) {} + ~SymbolOperand() = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + void Dump() const override { + LogInfo::MapleLogger() << "symbol is : " << symbol->GetName(); + } + + private: + const maple::MIRSymbol *symbol; +}; + +class StrOperand : public maplebe::OperandVisitable { + public: + StrOperand(const std::string &str, MemPool &memPool) : OperandVisitable(kOpdString, 0), str(str, &memPool) {} + + ~StrOperand() = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + const MapleString &GetStr() const { + return str; + } + + void Dump() const override; + + private: + const MapleString str; +}; + +class LabelOperand : public maplebe::OperandVisitable { + public: + LabelOperand(const std::string &parent, LabelIdx labIdx, MemPool &memPool) + : OperandVisitable(kOpdBBAddress, 0), parentFunc(parent, &memPool), labelIndex(labIdx) {} + + ~LabelOperand() = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + void Dump() const override; + + const MapleString &GetParentFunc() const { + return parentFunc; + } + LabelIdx GetIabelIdx() const { + return labelIndex; + }; + + private: + const MapleString parentFunc; + LabelIdx labelIndex; +}; + +class CFIOpndEmitVisitor : public maplebe::OperandVisitorBase, + public maplebe::OperandVisitors { + public: + explicit CFIOpndEmitVisitor(maplebe::Emitter &asmEmitter): emitter(asmEmitter) {} + virtual ~CFIOpndEmitVisitor() = default; + protected: + maplebe::Emitter &emitter; + private: + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(SymbolOperand *v) final; + void Visit(StrOperand *v) final; + void Visit(LabelOperand *v) final; +}; +} /* namespace cfi */ +#endif /* MAPLEBE_INCLUDE_CG_CFI_H */ diff --git a/src/mapleall/maple_be/include/cg/cfi_generator.h b/src/mapleall/maple_be/include/cg/cfi_generator.h new file mode 100644 index 0000000000000000000000000000000000000000..1844935ae3569ed9d62d8b82b000cfec54efb2b8 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cfi_generator.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CFI_GENERATOR_H +#define MAPLEBE_INCLUDE_CG_CFI_GENERATOR_H +#include "cg_phase.h" +#include "cfi.h" +#include "cgfunc.h" +#include "insn.h" +#include "aarch64_insn.h" +#include "aarch64_operand.h" + +namespace maplebe { +class GenCfi { + public: + explicit GenCfi(CGFunc &func) : cg(*func.GetCG()), cgFunc(func) {} + virtual ~GenCfi() = default; + + void Run(); + + protected: + void InsertCFIDefCfaOffset(BB &bb, Insn &insn, int32 &cfiOffset); /* cfiOffset in-out */ + Insn &FindStackDefNextInsn(BB &bb) const; + + /* CFI related routines */ + int64 GetOffsetFromCFA() const { + return offsetFromCfa; + } + + /* add increment (can be negative) and return the new value */ + int64 AddtoOffsetFromCFA(int64 delta) { + offsetFromCfa += delta; + return offsetFromCfa; + } + + CG &cg; + CGFunc &cgFunc; + /* SP offset from Call Frame Address */ + int64 offsetFromCfa = 0; + bool useFP = true; + + static constexpr const int32 kOffset8MemPos = 8; + + private: + void GenerateStartDirective(BB &bb); + void GenerateEndDirective(BB &bb); + void GenerateRegisterStateDirective(BB &bb); + virtual void GenerateRegisterSaveDirective(BB &bb) {} + virtual void GenerateRegisterRestoreDirective(BB &bb) {} + + /* It is do insert a start location information for each function in debugging mode */ + void InsertFirstLocation(BB &bb); +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_CFI_GENERATOR_H */ diff --git a/src/mapleall/maple_be/include/cg/cg.h b/src/mapleall/maple_be/include/cg/cg.h new file mode 100644 index 0000000000000000000000000000000000000000..8cc3b4e74ad85172c008d7f10cf07ac534bcec7a --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg.h @@ -0,0 +1,392 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_H +#define MAPLEBE_INCLUDE_CG_CG_H + +/* C++ headers. */ +#include +/* MapleIR headers. */ +#include "operand.h" +#include "insn.h" +#include "cgfunc.h" +#include "live.h" +#include "cg_option.h" +#include "opcode_info.h" +#include "global_tables.h" +#include "mir_function.h" +#include "mad.h" + +namespace maplebe { +#define ADDTARGETPHASE(PhaseName, condition) \ + if (!CGOptions::IsSkipPhase(PhaseName)) { \ + pm->AddPhase(PhaseName, condition); \ + } +/* subtarget opt phase -- cyclic Dependency, use Forward declaring */ +class CGSSAInfo; +class PhiEliminate; +class DomAnalysis; +class CGProp; +class CGDce; +class AlignAnalysis; +class MoveRegArgs; +class MPISel; +class Standardize; +class LiveIntervalAnalysis; +class ValidBitOpt; +class CG; +class LocalOpt; +class CFGOptimizer; +class RedundantComputeElim; +class TailCallOpt; +class Rematerializer; +class CGProfGen; + +class Globals { + public: + static Globals *GetInstance() { + static Globals instance; + return &instance; + } + + ~Globals() = default; + + void SetBECommon(BECommon &bc) { + beCommon = &bc; + } + + BECommon *GetBECommon() { + return beCommon; + } + + const BECommon *GetBECommon() const { + return beCommon; + } + + void SetMAD(MAD &m) { + mad = &m; + } + + MAD *GetMAD() { + return mad; + } + + const MAD *GetMAD() const { + return mad; + } + + void SetOptimLevel(int32 opLevel) { + optimLevel = opLevel; + } + + int32 GetOptimLevel() const { + return optimLevel; + } + + void SetTarget(CG &target); + const CG *GetTarget() const ; + + private: + BECommon *beCommon = nullptr; + MAD *mad = nullptr; + int32 optimLevel = 0; + CG *cg = nullptr; + Globals() = default; +}; + +class CG { + public: + using GenerateFlag = uint64; + + public: + CG(MIRModule &mod, const CGOptions &cgOptions) + : memPool(memPoolCtrler.NewMemPool("maplecg mempool", false /* isLocalPool */)), + allocator(memPool), + mirModule(&mod), + emitter(nullptr), + labelOrderCnt(0), + cgOption(cgOptions), + fileGP(nullptr) { + const std::string &internalNameLiteral = namemangler::GetInternalNameLiteral(namemangler::kJavaLangObjectStr); + GStrIdx strIdxFromName = GlobalTables::GetStrTable().GetStrIdxFromName(internalNameLiteral); + isLibcore = (GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdxFromName) != nullptr); + DefineDebugTraceFunctions(); + isLmbc = (mirModule->GetFlavor() == MIRFlavor::kFlavorLmbc); + } + + virtual ~CG(); + + /* enroll all code generator phases for target machine */ + virtual void EnrollTargetPhases(MaplePhaseManager *pm) const = 0; + + void GenExtraTypeMetadata(const std::string &classListFileName, const std::string &outputBaseName); + void GenPrimordialObjectList(const std::string &outputBaseName); + const std::string ExtractFuncName(const std::string &str) const; + + virtual Insn &BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) = 0; + virtual PhiOperand &CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) = 0; + + virtual CGFunc *CreateCGFunc(MIRModule &mod, MIRFunction&, BECommon&, MemPool&, StackMemPool&, + MapleAllocator&, uint32) = 0; + + bool IsExclusiveEH() const { + return CGOptions::IsExclusiveEH(); + } + + virtual bool IsExclusiveFunc(MIRFunction &mirFunc) = 0; + + /* NOTE: Consider making be_common a field of CG. */ + virtual void GenerateObjectMaps(BECommon &beCommon) = 0; + + /* Used for GCTIB pattern merging */ + virtual std::string FindGCTIBPatternName(const std::string &name) const = 0; + + bool GenerateVerboseAsm() const { + return cgOption.GenerateVerboseAsm(); + } + + bool GenerateVerboseCG() const { + return cgOption.GenerateVerboseCG(); + } + + bool DoPrologueEpilogue() const { + return cgOption.DoPrologueEpilogue(); + } + + bool DoTailCall() const { + return cgOption.DoTailCall(); + } + + bool DoCheckSOE() const { + return cgOption.DoCheckSOE(); + } + + bool GenerateDebugFriendlyCode() const { + return cgOption.GenerateDebugFriendlyCode(); + } + + int32 GetOptimizeLevel() const { + return cgOption.GetOptimizeLevel(); + } + + bool UseFastUnwind() const { + return true; + } + + bool IsStackProtectorStrong() const { + return cgOption.IsStackProtectorStrong(); + } + + bool IsStackProtectorAll() const { + return cgOption.IsStackProtectorAll(); + } + + bool IsUnwindTables() const { + return cgOption.IsUnwindTables(); + } + + bool InstrumentWithDebugTraceCall() const { + return cgOption.InstrumentWithDebugTraceCall(); + } + + bool DoPatchLongBranch() const { + return cgOption.DoPatchLongBranch(); + } + + uint8 GetRematLevel() const { + return CGOptions::GetRematLevel(); + } + + bool GenYieldPoint() const { + return cgOption.GenYieldPoint(); + } + + bool GenLocalRC() const { + return cgOption.GenLocalRC(); + } + + bool GenerateExceptionHandlingCode() const { + return cgOption.GenerateExceptionHandlingCode(); + } + + bool DoConstFold() const { + return cgOption.DoConstFold(); + } + + void AddStackGuardvar() const; + void DefineDebugTraceFunctions(); + MIRModule *GetMIRModule() { + return mirModule; + } + + void SetEmitter(Emitter &emitterVal) { + this->emitter = &emitterVal; + } + + Emitter *GetEmitter() const { + return emitter; + } + + void IncreaseLabelOrderCnt() { + labelOrderCnt++; + } + + LabelIDOrder GetLabelOrderCnt() const { + return labelOrderCnt; + } + + const CGOptions &GetCGOptions() const { + return cgOption; + } + + void UpdateCGOptions(const CGOptions &newOption) { + cgOption.SetOptionFlag(newOption.GetOptionFlag()); + } + + bool IsLibcore() const { + return isLibcore; + } + + bool IsLmbc() const { + return isLmbc; + } + + MIRSymbol *GetDebugTraceEnterFunction() { + return dbgTraceEnter; + } + + const MIRSymbol *GetDebugTraceEnterFunction() const { + return dbgTraceEnter; + } + + MIRSymbol *GetProfileFunction() { + return dbgFuncProfile; + } + + const MIRSymbol *GetProfileFunction() const { + return dbgFuncProfile; + } + + const MIRSymbol *GetDebugTraceExitFunction() const { + return dbgTraceExit; + } + + /* Init SubTarget Info */ + virtual MemLayout *CreateMemLayout(MemPool &mp, BECommon &b, + MIRFunction &f, MapleAllocator &mallocator) const = 0; + virtual RegisterInfo *CreateRegisterInfo(MemPool &mp, MapleAllocator &mallocator) const = 0; + + /* Init SubTarget phase */ + virtual LiveAnalysis *CreateLiveAnalysis(MemPool &mp, CGFunc &f) const = 0; + virtual ReachingDefinition *CreateReachingDefinition(MemPool &mp, CGFunc &f) const { + return nullptr; + }; + virtual MoveRegArgs *CreateMoveRegArgs(MemPool &mp, CGFunc &f) const = 0; + virtual AlignAnalysis *CreateAlignAnalysis(MemPool &mp, CGFunc &f) const = 0; + virtual MPISel *CreateMPIsel(MemPool &mp, CGFunc &f) const { + return nullptr; + } + virtual Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const { + return nullptr; + } + + /* Init SubTarget optimization */ + virtual CGSSAInfo *CreateCGSSAInfo(MemPool &mp, CGFunc &f, DomAnalysis &da, MemPool &tmp) const = 0; + virtual LiveIntervalAnalysis *CreateLLAnalysis(MemPool &mp, CGFunc &f) const = 0; + virtual PhiEliminate *CreatePhiElimintor(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const = 0; + virtual CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const = 0; + virtual CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const = 0; + virtual ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const = 0; + virtual RedundantComputeElim *CreateRedundantCompElim(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const = 0; + virtual TailCallOpt *CreateCGTailCallOpt(MemPool &mp, CGFunc &f) const = 0; + virtual LocalOpt *CreateLocalOpt(MemPool &mp, CGFunc &f, ReachingDefinition&) const { + return nullptr; + }; + virtual CFGOptimizer *CreateCFGOptimizer(MemPool &mp, CGFunc &f) const { + return nullptr; + } + virtual CGProfGen *CreateCGProfGen(MemPool &mp, CGFunc &f) const { + return nullptr; + } + + virtual Rematerializer *CreateRematerializer(MemPool &mp) const { + return nullptr; + } + + /* Object map generation helper */ + std::vector GetReferenceOffsets64(const BECommon &beCommon, MIRStructType &structType) const; + + void SetGP(MIRSymbol *sym) { + fileGP = sym; + } + MIRSymbol *GetGP() const { + return fileGP; + } + + static bool IsInFuncWrapLabels(MIRFunction *func) { + return funcWrapLabels.find(func) != funcWrapLabels.end(); + } + + static void SetFuncWrapLabels(MIRFunction *func, const std::pair labels) { + if (!IsInFuncWrapLabels(func)) { + funcWrapLabels[func] = labels; + } + } + + static std::map> &GetFuncWrapLabels() { + return funcWrapLabels; + } + static void SetCurCGFunc(CGFunc &cgFunc) { + currentCGFunction = &cgFunc; + } + + static const CGFunc *GetCurCGFunc() { + return currentCGFunction; + } + + static CGFunc *GetCurCGFuncNoConst() { + return currentCGFunction; + } + + virtual const InsnDesc &GetTargetMd(MOperator mOp) const = 0; + virtual bool IsEffectiveCopy(Insn &insn) const = 0; + virtual bool IsTargetInsn(MOperator mOp) const = 0; + virtual bool IsClinitInsn(MOperator mOp) const = 0; + virtual void DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const = 0; + + protected: + MIRModule *GetMIRModule() const { + return mirModule; + } + + MemPool *memPool = nullptr; + MapleAllocator allocator; + + private: + MIRModule *mirModule = nullptr; + Emitter *emitter = nullptr; + LabelIDOrder labelOrderCnt; + static CGFunc *currentCGFunction; /* current cg function being compiled */ + CGOptions cgOption; + MIRSymbol *dbgTraceEnter = nullptr; + MIRSymbol *dbgTraceExit = nullptr; + MIRSymbol *dbgFuncProfile = nullptr; + MIRSymbol *fileGP; /* for lmbc, one local %GP per file */ + static std::map> funcWrapLabels; + bool isLibcore = false; + bool isLmbc = false; +}; /* class CG */ +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CG_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_cdg.h b/src/mapleall/maple_be/include/cg/cg_cdg.h new file mode 100644 index 0000000000000000000000000000000000000000..cd2a5c48c37ad4af2c4fbc1f31ddb76cff6e76f7 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_cdg.h @@ -0,0 +1,602 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +/* + * The file defines the data structures of Control Dependence Graph(CDG). + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_CDG_H +#define MAPLEBE_INCLUDE_CG_CG_CDG_H + +#include +#include "cgfunc.h" +#include "mpl_number.h" +#include "deps.h" +#include "mempool_allocator.h" + +namespace maplebe { +class CDGNode; +class CDGEdge; +class CDGRegion; +using CDGNodeId = utils::Index; +using CDGRegionId = utils::Index; + +/* Encapsulation of BB */ +class CDGNode { + public: + CDGNode(CDGNodeId nId, BB &bb, MapleAllocator &alloc) + : id(nId), bb(&bb), outEdges(alloc.Adapter()), inEdges(alloc.Adapter()), + topoPreds(alloc.Adapter()), lastComments(alloc.Adapter()), predCDGNodes(alloc.Adapter()) {} + virtual ~CDGNode() = default; + + BB *GetBB() { + return bb; + } + + BB *GetBB() const { + return bb; + } + + CDGNodeId GetNodeId() { + return id; + } + + CDGNodeId GetNodeId() const { + return id; + } + + void SetNodeId(CDGNodeId nId) { + id = nId; + } + + CDGRegion *GetRegion() { + return region; + } + + CDGRegion *GetRegion() const { + return region; + } + + void SetRegion(CDGRegion &cdgRegion) { + region = &cdgRegion; + } + + bool IsEntryNode() const { + return isEntryNode; + } + + void SetEntryNode() { + isEntryNode = true; + } + + bool IsExitNode() const { + return isExitNode; + } + + void SetExitNode() { + isExitNode = true; + } + + void AddOutEdges(CDGEdge *edge) { + outEdges.emplace_back(edge); + } + + MapleVector &GetAllOutEdges() { + return outEdges; + } + + void AddInEdges(CDGEdge *edge) { + inEdges.emplace_back(edge); + } + + MapleVector &GetAllInEdges() { + return inEdges; + } + + std::size_t GetOutEdgesNum() const { + return outEdges.size(); + } + + std::size_t GetInEdgesNum() const { + return inEdges.size(); + } + + bool HasAmbiRegs() { + return hasAmbiRegs; + } + + void SetHasAmbiRegs(bool flag) { + hasAmbiRegs = flag; + } + + Insn *GetMembarInsn() { + return membarInsn; + } + + void SetMembarInsn(Insn *insn) { + membarInsn = insn; + } + + Insn *GetLastCallInsn() { + return lastCallInsn; + } + + void SetLastCallInsn(Insn *callInsn) { + lastCallInsn = callInsn; + } + + Insn *GetLastFrameDefInsn() { + return lastFrameDef; + } + + void SetLastFrameDefInsn(Insn *frameInsn) { + lastFrameDef = frameInsn; + } + + MapleVector &GetTopoPreds() { + return topoPreds; + } + + void AddTopoPred(CDGNode *pred) { + (void)topoPreds.emplace_back(pred); + } + + void RemoveTopoPred(CDGNode *pred) { + auto it = std::find(topoPreds.begin(), topoPreds.end(), pred); + if (it != topoPreds.end()) { + (void)topoPreds.erase(it); + } + } + + void InitDataDepInfo(MemPool &tmpMp, MapleAllocator &tmpAlloc, uint32 maxRegNum) { + regDefs = tmpMp.New>(tmpAlloc.Adapter()); + regUses = tmpMp.New>(tmpAlloc.Adapter()); + stackUses = tmpMp.New>(tmpAlloc.Adapter()); + stackDefs = tmpMp.New>(tmpAlloc.Adapter()); + heapUses = tmpMp.New>(tmpAlloc.Adapter()); + heapDefs = tmpMp.New>(tmpAlloc.Adapter()); + mayThrows = tmpMp.New>(tmpAlloc.Adapter()); + ambiInsns = tmpMp.New>(tmpAlloc.Adapter()); + pseudoSepNodes = tmpMp.New>(tmpAlloc.Adapter()); + ehInRegs = tmpMp.New>(tmpAlloc.Adapter()); + + regDefs->resize(maxRegNum); + regUses->resize(maxRegNum); + } + + void ClearDataDepInfo() { + membarInsn = nullptr; + lastCallInsn = nullptr; + lastFrameDef = nullptr; + lastComments.clear(); + predCDGNodes.clear(); + + regDefs = nullptr; + regUses = nullptr; + stackUses = nullptr; + stackDefs = nullptr; + heapUses = nullptr; + heapDefs = nullptr; + mayThrows = nullptr; + ambiInsns = nullptr; + pseudoSepNodes = nullptr; + ehInRegs = nullptr; + } + + void ClearDepDataVec() { + membarInsn = nullptr; + lastCallInsn = nullptr; + lastFrameDef = nullptr; + + for (auto ®Def : *regDefs) { + regDef = nullptr; + } + for (auto ®Use : *regUses) { + regUse = nullptr; + } + + stackUses->clear(); + stackDefs->clear(); + heapUses->clear(); + heapDefs->clear(); + mayThrows->clear(); + ambiInsns->clear(); + } + + Insn *GetLatestDefInsn(regno_t regNO) { + return (*regDefs)[regNO]; + } + + void SetLatestDefInsn(regno_t regNO, Insn *defInsn) { + (*regDefs)[regNO] = defInsn; + } + + RegList *GetUseInsnChain(regno_t regNO) { + return (*regUses)[regNO]; + } + + void AppendUseInsnChain(regno_t regNO, Insn *useInsn, MemPool &mp, bool beforeRA) { + CHECK_FATAL(useInsn != nullptr, "invalid useInsn"); + auto *newUse = mp.New(); + newUse->insn = useInsn; + newUse->next = nullptr; + + RegList *headUse = (*regUses)[regNO]; + if (headUse == nullptr) { + (*regUses)[regNO] = newUse; + if (beforeRA) { + Insn *defInsn = (*regDefs)[regNO]; + if (defInsn != nullptr) { + DepNode *depNode = defInsn->GetDepNode(); + depNode->SetRegDefs(regNO, newUse); + } + } + } else { + while (headUse->next != nullptr) { + headUse = headUse->next; + } + headUse->next = newUse; + } + } + + void ClearUseInsnChain(regno_t regNO) { + (*regUses)[regNO] = nullptr; + } + + MapleVector &GetStackUseInsns() { + return *stackUses; + } + + void AddStackUseInsn(Insn *stackInsn) { + stackUses->emplace_back(stackInsn); + } + + MapleVector &GetStackDefInsns() { + return *stackDefs; + } + + void AddStackDefInsn(Insn *stackInsn) { + stackDefs->emplace_back(stackInsn); + } + + MapleVector &GetHeapUseInsns() { + return *heapUses; + } + + void AddHeapUseInsn(Insn *heapInsn) { + heapUses->emplace_back(heapInsn); + } + + MapleVector &GetHeapDefInsns() { + return *heapDefs; + } + + void AddHeapDefInsn(Insn *heapInsn) { + heapDefs->emplace_back(heapInsn); + } + + MapleVector &GetMayThrowInsns() { + return *mayThrows; + } + + void AddMayThrowInsn(Insn *throwInsn) { + mayThrows->emplace_back(throwInsn); + } + + MapleVector &GetAmbiguousInsns() { + return *ambiInsns; + } + + void AddAmbiguousInsn(Insn *ambiInsn) { + ambiInsns->emplace_back(ambiInsn); + } + + MapleVector &GetLastComments() { + return lastComments; + } + + void AddCommentInsn(Insn *commentInsn) { + lastComments.emplace_back(commentInsn); + } + + void CopyAndClearComments(MapleVector &comments) { + lastComments = comments; + comments.clear(); + } + + void ClearLastComments() { + lastComments.clear(); + } + + void AddPseudoSepNodes(DepNode *node) { + pseudoSepNodes->emplace_back(node); + } + + MapleSet &GetEhInRegs() { + return *ehInRegs; + } + + bool operator!=(const CDGNode &node) { + if (this != &node) { + return true; + } + if (this->id != node.GetNodeId() || this->bb != node.GetBB() || this->region != node.GetRegion()) { + return true; + } + if (this->GetInEdgesNum() != node.GetInEdgesNum() || this->GetOutEdgesNum() != node.GetOutEdgesNum()) { + return true; + } + return false; + } + + private: + CDGNodeId id; // same to bbId + BB *bb = nullptr; + CDGRegion *region = nullptr; + bool isEntryNode = false; + bool isExitNode = false; + MapleVector outEdges; + MapleVector inEdges; + /* + * The following structures are used to record data flow infos in building data dependence among insns + */ + bool hasAmbiRegs = false; + Insn *membarInsn = nullptr; + Insn *lastCallInsn = nullptr; + Insn *lastFrameDef = nullptr; + MapleVector topoPreds; // For visit nodes by topological order in the region, it will change dynamically + MapleVector *regDefs = nullptr; // the index is regNO, record the latest defInsn in the curBB + MapleVector *regUses = nullptr; // the index is regNO + MapleVector *stackUses = nullptr; + MapleVector *stackDefs = nullptr; + MapleVector *heapUses = nullptr; + MapleVector *heapDefs = nullptr; + MapleVector *mayThrows = nullptr; + MapleVector *ambiInsns = nullptr; + MapleVector *pseudoSepNodes = nullptr; + MapleSet *ehInRegs = nullptr; + MapleVector lastComments; + MapleVector predCDGNodes; // predecessor nodes of the curBB in CFG +}; + +class CDGEdge { + public: + CDGEdge(CDGNode &from, CDGNode &to, int32 cond) : fromNode(from), toNode(to), condition(cond) {} + virtual ~CDGEdge() = default; + + CDGNode &GetFromNode() { + return fromNode; + } + + CDGNode &GetFromNode() const { + return fromNode; + } + + void SetFromNode(CDGNode &from) { + fromNode = from; + } + + CDGNode &GetToNode() { + return toNode; + } + + CDGNode &GetToNode() const { + return toNode; + } + + void SetToNode(CDGNode &to) { + toNode = to; + } + + int32 GetCondition() const { + return condition; + } + + void SetCondition(int32 cond) { + condition = cond; + } + + /* for checking same control dependence */ + bool operator==(const CDGEdge &edge) { + if (this == &edge) { + return true; + } + if (this->fromNode != edge.GetFromNode()) { + return false; + } + if (this->toNode != edge.GetToNode()) { + return false; + } + if (this->condition != edge.GetCondition()) { + return false; + } + return true; + } + + private: + CDGNode &fromNode; + CDGNode &toNode; + /* + * allocate different COND number to different succ edges of the same fromBB + * default value is -1 indicated no cond. + */ + int32 condition; +}; + +/* + * A region consists of nodes with the same control dependence sets + */ +class CDGRegion { + public: + CDGRegion(CDGRegionId rId, MapleAllocator &alloc) + : id(rId), memberNodes(alloc.Adapter()), cdEdges(alloc.Adapter()) {} + virtual ~CDGRegion() = default; + + CDGRegionId GetRegionId() { + return id; + } + + MapleVector &GetRegionNodes() { + return memberNodes; + } + + std::size_t GetRegionNodeSize() const { + return memberNodes.size(); + } + + // Ensure the node is unique + void AddCDGNode(CDGNode *node) { + if (std::find(memberNodes.cbegin(), memberNodes.cend(), node) == memberNodes.cend()) { + memberNodes.emplace_back(node); + } + } + + void RemoveCDGNode(CDGNode *node) { + auto it = std::find(memberNodes.begin(), memberNodes.end(), node); + if (it == memberNodes.end()) { + return; + } + (void)memberNodes.erase(it); + } + + void ClearMemberNodes() { + memberNodes.clear(); + } + + MapleVector &GetCDEdges() { + return cdEdges; + } + + void AddCDEdge(CDGEdge *edge) { + cdEdges.emplace_back(edge); + } + + void AddCDEdgeSet(MapleVector &cds) { + for (auto &cd : cds) { + cdEdges.emplace_back(cd); + } + } + + uint32 GetMaxBBIdInRegion() { + uint32 maxId = 0; + for (auto node : memberNodes) { + maxId = (node->GetNodeId() > maxId ? static_cast(node->GetNodeId()) : maxId); + } + return maxId; + } + + private: + CDGRegionId id; + MapleVector memberNodes; // the nodes in CDGRegion by out-of-order + MapleVector cdEdges; // the control dependence sets of the parent node +}; + +/* + * Forward Control Dependence Graph + * which does not compute the control dependence on the back edges + */ +class FCDG { + public: + FCDG(CGFunc &f, MapleAllocator &alloc) + : nodes(f.GetAllBBSize(), alloc.Adapter()), + fcds(alloc.Adapter()), regions(f.GetAllBBSize() + 1, alloc.Adapter()) {} + virtual ~FCDG() = default; + + MapleVector &GetAllFCDGNodes() { + return nodes; + } + + std::size_t GetFCDGNodeSize() const { + return nodes.size(); + } + + CDGNode *GetCDGNodeFromId(CDGNodeId id) { + return nodes[id]; + } + + MapleVector &GetAllFCDGEdges() { + return fcds; + } + + MapleVector &GetAllRegions() { + return regions; + } + + CDGRegion *GetRegionFromId(CDGRegionId id) { + return regions[id]; + } + + // Ensure the node is unique + void AddFCDGNode(CDGNode &node) { + if (nodes[node.GetNodeId()] == nullptr) { + nodes[node.GetNodeId()] = &node; + } + } + + void AddFCDGEdge(CDGEdge *edge) { + fcds.emplace_back(edge); + } + + // Ensure the region is unique + void AddRegion(CDGRegion ®ion) { + if (regions[region.GetRegionId()] == nullptr) { + regions[region.GetRegionId()] = ®ion; + } + } + + void RemoveRegionById(CDGRegionId id) { + regions[id] = nullptr; + } + + /* Provide interfaces for global scheduling */ + + private: + MapleVector nodes; // all CDGNodes in FCDG that use nodeId as the index + MapleVector fcds; // all forward-control-dependence in FCDG + MapleVector regions; // all regions in FCDG that use CDGRegionId as the index +}; + +struct CDGOutEdgeComparator { + bool operator()(const CDGEdge &outEdge1, const CDGEdge &outEdge2) const { + const CDGNode &toNode1 = outEdge1.GetToNode(); + const CDGNode &toNode2 = outEdge2.GetToNode(); + return toNode1.GetNodeId() < toNode2.GetNodeId(); + } +}; + +struct CDGInEdgeComparator { + bool operator()(const CDGEdge &inEdge1, const CDGEdge &inEdge2) const { + const CDGNode &fromNode1 = inEdge1.GetFromNode(); + const CDGNode &fromNode2 = inEdge2.GetToNode(); + return fromNode1.GetNodeId() < fromNode2.GetNodeId(); + } +}; +} /* namespace maplebe */ + +namespace std { +template <> +struct hash { + size_t operator()(const maplebe::CDGNodeId &nId) const { + return nId; + } +}; + +template <> +struct hash { + size_t operator()(const maplebe::CDGRegionId &rId) const { + return rId; + } +}; +} +#endif /* MAPLEBE_INCLUDE_CG_CG_CDG_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_cfg.h b/src/mapleall/maple_be/include/cg/cg_cfg.h new file mode 100644 index 0000000000000000000000000000000000000000..b2b3398408ae77ab144a3a218541d70a8f4a4ad8 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_cfg.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_CFG_H +#define MAPLEBE_INCLUDE_CG_CG_CFG_H +#include "eh_func.h" +#include "cgbb.h" + +namespace maplebe { +class InsnVisitor { + public: + explicit InsnVisitor(CGFunc &func) : cgFunc(&func) {} + + virtual ~InsnVisitor() = default; + CGFunc *GetCGFunc() const { + return cgFunc; + } + + /* + * Precondition: + * The last instruction in bb is either conditional or unconditional jump. + * + * The jump target of bb is modified to the location specified by targetLabel. + */ + virtual void ModifyJumpTarget(LabelIdx targetLabel, BB &bb) = 0; + + /* + * Precondition: + * The last instruction in bb is either conditional or unconditional jump. + * + * The jump target of bb is modified to the location specified by targetOperand. + */ + virtual void ModifyJumpTarget(Operand &targetOperand, BB &bb) = 0; + + /* + * Precondition: + * The last instruction in bb is either a conditional or an unconditional jump. + * The last instruction in newTarget is an unconditional jump. + * + * The jump target of bb is modified to newTarget's jump target. + */ + virtual void ModifyJumpTarget(BB &newTarget, BB &bb) = 0; + /* Check if it requires to add extra gotos when relocate bb */ + virtual Insn *CloneInsn(Insn &originalInsn) = 0; + /* Create a new virtual register operand which has the same type and size as the given one. */ + virtual RegOperand *CreateVregFromReg(const RegOperand ®) = 0; + virtual LabelIdx GetJumpLabel(const Insn &insn) const = 0; + virtual bool IsCompareInsn(const Insn &insn) const = 0; + virtual bool IsCompareAndBranchInsn(const Insn &insn) const = 0; + virtual bool IsAddOrSubInsn(const Insn &insn) const = 0; + + virtual void ReTargetSuccBB(BB &bb, LabelIdx newTarget) const = 0; + virtual void FlipIfBB(BB &bb, LabelIdx ftLabel) const = 0; + virtual BB *CreateGotoBBAfterCondBB(BB &bb, BB &fallthru, bool isTargetFallthru) const = 0; + + private: + CGFunc *cgFunc; +}; /* class InsnVisitor; */ + +class CGCFG { + public: + explicit CGCFG(CGFunc &cgFunc) : cgFunc(&cgFunc) {} + + ~CGCFG() = default; + + void BuildCFG(); + void CheckCFG(); + void CheckCFGFreq(); + + void InitInsnVisitor(CGFunc &func) const; + InsnVisitor *GetInsnModifier() const { + return insnVisitor; + } + + static bool AreCommentAllPreds(const BB &bb); + bool CanMerge(const BB &merger, const BB &mergee) const; + bool BBJudge(const BB &first, const BB &second) const; + /* + * Merge all instructions in mergee into merger, each BB's successors and + * predecessors should be modified accordingly. + */ + static void MergeBB(BB &merger, BB &mergee, CGFunc &func); + + /* + * Remove a BB from its position in the CFG. + * Prev, next, preds and sucs are all modified accordingly. + */ + void RemoveBB(BB &curBB, bool isGotoIf = false) const; + /* Skip the successor of bb, directly jump to bb's successor'ssuccessor */ + void RetargetJump(BB &srcBB, BB &targetBB) const; + + /* Loop up if the given label is in the exception tables in LSDA */ + static bool InLSDA(LabelIdx label, const EHFunc *ehFunc); + static bool InSwitchTable(LabelIdx label, const CGFunc &func); + + RegOperand *CreateVregFromReg(const RegOperand &pReg) const; + Insn *CloneInsn(Insn &originalInsn) const; + static BB *GetTargetSuc(BB &curBB, bool branchOnly = false, bool isGotoIf = false); + bool IsCompareAndBranchInsn(const Insn &insn) const; + bool IsAddOrSubInsn(const Insn &insn) const; + + Insn *FindLastCondBrInsn(BB &bb) const; + static void FindAndMarkUnreachable(CGFunc &func); + void FlushUnReachableStatusAndRemoveRelations(BB &bb, const CGFunc &func) const; + void MarkLabelTakenBB() const; + void UnreachCodeAnalysis() const; + void FindWillExitBBs(BB *bb, std::set *visitedBBs); + void WontExitAnalysis(); + BB *FindLastRetBB(); + + void UpdatePredsSuccsAfterSplit(BB &pred, BB &succ, BB &newBB) const; + BB *BreakCriticalEdge(BB &pred, BB &succ); + void ReverseCriticalEdge(BB &cbb); + /* cgcfgvisitor */ + private: + CGFunc *cgFunc = nullptr; + static InsnVisitor *insnVisitor; + static void MergeBB(BB &merger, BB &mergee); +}; /* class CGCFG */ +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgHandleCFG, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CG_CFG_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_critical_edge.h b/src/mapleall/maple_be/include/cg/cg_critical_edge.h new file mode 100644 index 0000000000000000000000000000000000000000..d0c7159a18539f1cf675e04f7e29b2c1a82c808e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_critical_edge.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CRITICAL_EDGE_H +#define MAPLEBE_INCLUDE_CG_CRITICAL_EDGE_H + +#include "cg.h" +#include "cgbb.h" +#include "insn.h" + +namespace maplebe { +class CriticalEdge { + public: + CriticalEdge(CGFunc &func, MemPool &mem) + : cgFunc(&func), + alloc(&mem), + criticalEdges(alloc.Adapter()), + newBBcreated(alloc.Adapter()) {} + + ~CriticalEdge() = default; + + void CollectCriticalEdges(); + void SplitCriticalEdges(); + MapleSet CopyNewBBInfo() { + return newBBcreated; + } + + private: + CGFunc *cgFunc; + MapleAllocator alloc; + MapleVector> criticalEdges; + MapleSet newBBcreated; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgCriticalEdge, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CRITICAL_EDGE_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_dce.h b/src/mapleall/maple_be/include/cg/cg_dce.h new file mode 100644 index 0000000000000000000000000000000000000000..af9fd918abe75023c14c80ffe86490d1e8cdab45 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_dce.h @@ -0,0 +1,59 @@ +/* +* Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#ifndef MAPLEBE_INCLUDE_CG_DCE_H +#define MAPLEBE_INCLUDE_CG_DCE_H +#include "cgfunc.h" +#include "cg_ssa.h" + +namespace maplebe { +/* dead code elimination */ +class CGDce { + public: + CGDce(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo) : memPool(&mp), cgFunc(&f), ssaInfo(&sInfo) {} + virtual ~CGDce() = default; + + void DoDce(); + /* provide public use in ssa opt */ + virtual bool RemoveUnuseDef(VRegVersion &defVersion) = 0; + CGSSAInfo *GetSSAInfo() { + return ssaInfo; + } + + protected: + MemPool *memPool; + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; +}; + +class DeleteRegUseVisitor : public OperandVisitorBase, + public OperandVisitors, + public OperandVisitor { + public: + DeleteRegUseVisitor(CGSSAInfo &cgSSAInfo, uint32 dInsnID) : deleteInsnId(dInsnID), ssaInfo(&cgSSAInfo) {} + virtual ~DeleteRegUseVisitor() = default; + + protected: + CGSSAInfo *GetSSAInfo() { + return ssaInfo; + } + uint32 deleteInsnId; + private: + CGSSAInfo *ssaInfo; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgDce, maplebe::CGFunc) +} +#endif /* MAPLEBE_INCLUDE_CG_DCE_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_dominance.h b/src/mapleall/maple_be/include/cg/cg_dominance.h new file mode 100644 index 0000000000000000000000000000000000000000..1e7c370cf083701e302368cc012bc132783ac189 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_dominance.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DOM_H +#define MAPLEBE_INCLUDE_CG_DOM_H + +#include "cg_phase.h" +#include "insn.h" +#include "cgbb.h" +#include "datainfo.h" +#include "maple_phase.h" + +namespace maplebe { +class DominanceBase : public AnalysisResult { + public: + DominanceBase(CGFunc &func, MemPool &memPool, MemPool &tmpPool, MapleVector &bbVec, BB &commonEntryBB, + BB &commonExitBB) + : AnalysisResult(&memPool), + domAllocator(&memPool), + tmpAllocator(&tmpPool), + bbVec(bbVec), + cgFunc(func), + commonEntryBB(commonEntryBB), + commonExitBB(commonExitBB) {} + + ~DominanceBase() override = default; + + BB &GetCommonEntryBB() const { + return commonEntryBB; + } + + BB &GetCommonExitBB() const { + return commonExitBB; + } + + protected: + bool CommonEntryBBIsPred(const BB &bb) const; + MapleAllocator domAllocator; // stores the analysis results + MapleAllocator tmpAllocator; // can be freed after dominator computation + MapleVector &bbVec; + CGFunc &cgFunc; + BB &commonEntryBB; + BB &commonExitBB; +}; + +class DomAnalysis : public DominanceBase { + public: + DomAnalysis(CGFunc &func, MemPool &memPool, MemPool &tmpPool, MapleVector &bbVec, BB &commonEntryBB, + BB &commonExitBB) + : DominanceBase(func, memPool, tmpPool, bbVec, commonEntryBB, commonExitBB), + postOrderIDVec(bbVec.size() + 1, -1, tmpAllocator.Adapter()), + reversePostOrder(tmpAllocator.Adapter()), + doms(bbVec.size() + 1, nullptr, domAllocator.Adapter()), + domFrontier(bbVec.size() + 1, MapleVector(domAllocator.Adapter()), domAllocator.Adapter()), + domChildren(bbVec.size() + 1, MapleVector(domAllocator.Adapter()), domAllocator.Adapter()), + iterDomFrontier(bbVec.size() + 1, MapleSet(domAllocator.Adapter()), domAllocator.Adapter()), + dtPreOrder(bbVec.size() + 1, 0, domAllocator.Adapter()), + dtDfn(bbVec.size() + 1, -1, domAllocator.Adapter()), + dtDfnOut(bbVec.size() + 1, -1, domAllocator.Adapter()) {} + ~DomAnalysis() override = default; + + void Compute(); + void Dump(); + + void GenPostOrderID(); + void ComputeDominance(); + void ComputeDomFrontiers(); + void ComputeDomChildren(); + void GetIterDomFrontier(const BB *bb, MapleSet *dfset, uint32 bbidMarker, std::vector &visitedMap); + void ComputeIterDomFrontiers(); + uint32 ComputeDtPreorder(const BB &bb, uint32 &num); + bool Dominate(const BB &bb1, const BB &bb2); // true if bb1 dominates bb2 + + MapleVector &GetReversePostOrder() { + return reversePostOrder; + } + + MapleVector &GetDtPreOrder() { + return dtPreOrder; + } + + uint32 GetDtPreOrderItem(size_t idx) const { + return dtPreOrder[idx]; + } + + size_t GetDtPreOrderSize() const { + return dtPreOrder.size(); + } + + uint32 GetDtDfnItem(size_t idx) const { + return dtDfn[idx]; + } + + size_t GetDtDfnSize() const { + return dtDfn.size(); + } + + BB *GetDom(uint32 id) { + ASSERT(id < doms.size(), "bbid out of range"); + return doms[id]; + } + void SetDom(uint32 id, BB *bb) { + ASSERT(id < doms.size(), "bbid out of range"); + doms[id] = bb; + } + size_t GetDomsSize() const { + return doms.size(); + } + + auto &GetDomFrontier(size_t idx) { + return domFrontier[idx]; + } + bool HasDomFrontier(uint32 id, uint32 frontier) const { + return std::find(domFrontier[id].begin(), domFrontier[id].end(), frontier) != domFrontier[id].end(); + } + + size_t GetDomFrontierSize() const { + return domFrontier.size(); + } + + auto &GetDomChildren() { + return domChildren; + } + + auto &GetDomChildren(size_t idx) { + return domChildren[idx]; + } + + auto &GetIdomFrontier(uint32 idx) { + return iterDomFrontier[idx]; + } + + size_t GetDomChildrenSize() const { + return domChildren.size(); + } + + private: + void PostOrderWalk(const BB &bb, int32 &pid, MapleVector &visitedMap); + BB *Intersect(BB &bb1, const BB &bb2); + + MapleVector postOrderIDVec; // index is bb id + MapleVector reversePostOrder; // an ordering of the BB in reverse postorder + MapleVector doms; // index is bb id; immediate dominator for each BB + MapleVector> domFrontier; // index is bb id + MapleVector> domChildren; // index is bb id; for dom tree + MapleVector> iterDomFrontier; + MapleVector dtPreOrder; // ordering of the BBs in a preorder traversal of the dominator tree + MapleVector dtDfn; // gives position of each BB in dt_preorder + MapleVector dtDfnOut; // max position of all nodes in the sub tree of each BB in dt_preorder +}; + +class PostDomAnalysis : public DominanceBase { + public: + PostDomAnalysis(CGFunc &func, MemPool &memPool, MemPool &tmpPool, MapleVector &bbVec, BB &commonEntryBB, + BB &commonExitBB) + : DominanceBase(func, memPool, tmpPool, bbVec, commonEntryBB, commonExitBB), + pdomPostOrderIDVec(bbVec.size() + 1, -1, tmpAllocator.Adapter()), + pdomReversePostOrder(tmpAllocator.Adapter()), + pdoms(bbVec.size() + 1, nullptr, domAllocator.Adapter()), + pdomFrontier(bbVec.size() + 1, MapleVector(domAllocator.Adapter()), domAllocator.Adapter()), + pdomChildren(bbVec.size() + 1, MapleVector(domAllocator.Adapter()), domAllocator.Adapter()), + iterPdomFrontier(bbVec.size() + 1, MapleSet(domAllocator.Adapter()), domAllocator.Adapter()), + pdtPreOrder(bbVec.size() + 1, 0, domAllocator.Adapter()), + pdtDfn(bbVec.size() + 1, -1, domAllocator.Adapter()), + pdtDfnOut(bbVec.size() + 1, -1, domAllocator.Adapter()) {} + + ~PostDomAnalysis() override = default; + void Compute(); + void PdomGenPostOrderID(); + void ComputePostDominance(); + void ComputePdomFrontiers(); + void ComputePdomChildren(); + void GetIterPdomFrontier(const BB *bb, MapleSet *dfset, uint32 bbidMarker, std::vector &visitedMap); + void ComputeIterPdomFrontiers(); + uint32 ComputePdtPreorder(const BB &bb, uint32 &num); + bool PostDominate(const BB &bb1, const BB &bb2); // true if bb1 postdominates bb2 + void Dump(); + void GeneratePdomTreeDot(); + + auto &GetPdomFrontierItem(size_t idx) { + return pdomFrontier[idx]; + } + + size_t GetPdomFrontierSize() const { + return pdomFrontier.size(); + } + + auto &GetIpdomFrontier(uint32 idx) { + return iterPdomFrontier[idx]; + } + + auto &GetPdomChildrenItem(size_t idx) { + return pdomChildren[idx]; + } + + std::size_t GetPdomTreeVecSize() const { + return pdomChildren.size(); + } + + void ResizePdtPreOrder(size_t n) { + pdtPreOrder.resize(n); + } + + uint32 GetPdtPreOrderItem(size_t idx) const { + return pdtPreOrder[idx]; + } + + size_t GetPdtPreOrderSize() const { + return pdtPreOrder.size(); + } + + uint32 GetPdtDfnItem(size_t idx) const { + return pdtDfn[idx]; + } + + int32 GetPdomPostOrderIDVec(size_t idx) const { + return pdomPostOrderIDVec[idx]; + } + + BB *GetPdomReversePostOrder(size_t idx) { + return pdomReversePostOrder[idx]; + } + + MapleVector &GetPdomReversePostOrder() { + return pdomReversePostOrder; + } + + size_t GetPdomReversePostOrderSize() const { + return pdomReversePostOrder.size(); + } + + bool HasPdomFrontier(uint32 id, uint32 frontier) const { + return std::find(pdomFrontier[id].begin(), pdomFrontier[id].end(), frontier) != pdomFrontier[id].end(); + } + + BB *GetPdom(uint32 id) { + ASSERT(id < pdoms.size(), "bbid out of range"); + return pdoms[id]; + } + void SetPdom(uint32 id, BB *bb) { + ASSERT(id < pdoms.size(), "bbid out of range"); + pdoms[id] = bb; + } + + private: + void PdomPostOrderWalk(const BB &bb, int32 &pid, MapleVector &visitedMap); + BB *PdomIntersect(BB &bb1, const BB &bb2); + + MapleVector pdomPostOrderIDVec; // index is bb id + MapleVector pdomReversePostOrder; // an ordering of the BB in reverse postorder + MapleVector pdoms; // index is bb id; immediate dominator for each BB + MapleVector> pdomFrontier; // index is bb id + MapleVector> pdomChildren; // index is bb id; for pdom tree + MapleVector> iterPdomFrontier; + MapleVector pdtPreOrder; // ordering of the BBs in a preorder traversal of the post-dominator tree + MapleVector pdtDfn; // gives position of each BB in pdt_preorder + MapleVector pdtDfnOut; // max position of all nodes in the sub tree of each BB in pdt_preorder +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgDomAnalysis, maplebe::CGFunc); + DomAnalysis *GetResult() { + return domAnalysis; + } + DomAnalysis *domAnalysis = nullptr; +MAPLE_FUNC_PHASE_DECLARE_END + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPostDomAnalysis, maplebe::CGFunc); + PostDomAnalysis *GetResult() { + return pdomAnalysis; + } + PostDomAnalysis *pdomAnalysis = nullptr; +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_DOM_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_irbuilder.h b/src/mapleall/maple_be/include/cg/cg_irbuilder.h new file mode 100644 index 0000000000000000000000000000000000000000..720936143004afc0433d533dabe2f992e297e881 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_irbuilder.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_IRBUILDER_H +#define MAPLEBE_INCLUDE_CG_IRBUILDER_H + +#include "insn.h" +#include "operand.h" + +namespace maplebe { +class InsnBuilder { + public: + explicit InsnBuilder(MemPool &memPool) : mp(&memPool) {} + virtual ~InsnBuilder() { + mp = nullptr; + } + + template + Insn &BuildInsn(MOperator opCode) { + return BuildInsn(opCode, Target::kMd[opCode]); + } + Insn &BuildInsn(MOperator opCode, const InsnDesc &idesc); + Insn &BuildInsn(MOperator opCode, Operand &o0); + Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1); + Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2); + Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3); + Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3, Operand &o4); + Insn &BuildInsn(MOperator opCode, std::vector &opnds); + + Insn &BuildCfiInsn(MOperator opCode); + Insn &BuildDbgInsn(MOperator opCode); + Insn &BuildCommentInsn(CommentOperand &comment); + VectorInsn &BuildVectorInsn(MOperator opCode, const InsnDesc &idesc); + + uint32 GetCreatedInsnNum () const { + return createdInsnNum; + } + protected: + MemPool *mp; + private: + void IncreaseInsnNum() { + createdInsnNum++; + } + uint32 createdInsnNum = 0; +}; + +constexpr uint32 kBaseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */ +class OperandBuilder { + public: + explicit OperandBuilder(MemPool &mp, uint32 mirPregNum = 0) + : alloc(&mp), virtualRegNum(mirPregNum) {} + + /* create an operand in cgfunc when no mempool is supplied */ + ImmOperand &CreateImm(uint32 size, int64 value, MemPool *mp = nullptr); + ImmOperand &CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp = nullptr); + MemOperand &CreateMem(uint32 size, MemPool *mp = nullptr); + MemOperand &CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size); + RegOperand &CreateVReg(uint32 size, RegType type, MemPool *mp = nullptr); + RegOperand &CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp = nullptr); + RegOperand &CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp = nullptr); + ListOperand &CreateList(MemPool *mp = nullptr); + FuncNameOperand &CreateFuncNameOpnd(MIRSymbol &symbol, MemPool *mp = nullptr); + LabelOperand &CreateLabel(const char *parent, LabelIdx idx, MemPool *mp = nullptr); + CommentOperand &CreateComment(const std::string &s, MemPool *mp = nullptr); + CommentOperand &CreateComment(const MapleString &s, MemPool *mp = nullptr); + + uint32 GetCurrentVRegNum() const { + return virtualRegNum; + } + + protected: + MapleAllocator alloc; + + private: + uint32 virtualRegNum = 0; + /* reg bank for multiple use */ +}; +} +#endif // MAPLEBE_INCLUDE_CG_IRBUILDER_H diff --git a/src/mapleall/maple_be/include/cg/cg_occur.h b/src/mapleall/maple_be/include/cg/cg_occur.h new file mode 100644 index 0000000000000000000000000000000000000000..6db5e0fdd393eca1f81973d776e8b5688c8463df --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_occur.h @@ -0,0 +1,499 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CGOCCUR_H +#define MAPLEBE_CG_INCLUDE_CGOCCUR_H +#include "cg_dominance.h" + +// the data structures that represent occurrences and work candidates for PRE +namespace maplebe { +enum OccType { + kOccUndef, + kOccReal, + kOccDef, + kOccStore, + kOccPhiocc, + kOccPhiopnd, + kOccExit, + kOccUse, // for use appearances when candidate is dassign + kOccMembar, // for representing occurrence of memory barriers (use CgRealOcc) +}; + +class CgOccur { + public: + CgOccur(OccType ty, BB *bb, Insn *insn, Operand *opnd) + : occTy(ty), + cgBB(bb), + insn(insn), + opnd(opnd) {} + + CgOccur(OccType ty, int cId, BB &bb, CgOccur *df) : occTy(ty), classID(cId), cgBB(&bb), def(df) {} + virtual ~CgOccur() = default; + + bool IsDominate(DomAnalysis &dom, CgOccur &occ); + const BB *GetBB() const { + return cgBB; + } + + BB *GetBB() { + return cgBB; + } + + void SetBB(BB &bb) { + cgBB = &bb; + } + + OccType GetOccType() const { + return occTy; + } + + int GetClassID() const { + return classID; + } + + void SetClassID(int id) { + classID = id; + } + + const CgOccur *GetDef() const { + return def; + } + + CgOccur *GetDef() { + return def; + } + + void SetDef(CgOccur *define) { + def = define; + } + + const Insn *GetInsn() const { + return insn; + } + + Insn *GetInsn() { + return insn; + } + + const Operand *GetOperand() const { + return opnd; + } + + Operand *GetOperand() { + return opnd; + } + + bool Processed() const { + return processed; + } + + void SetProcessed(bool val) { + processed = val; + } + + virtual CgOccur *GetPrevVersionOccur() { + CHECK_FATAL(false, "has no prev version occur"); + } + + virtual void SetPrevVersionOccur(CgOccur*) { + CHECK_FATAL(false, "has no prev version occur"); + } + + virtual void Dump() const { + if (occTy == kOccExit) { + LogInfo::MapleLogger() << "ExitOcc at bb " << GetBB()->GetId() << std::endl; + } + }; + + private: + OccType occTy = kOccUndef; // kinds of occ + int classID = 0; // class id + BB *cgBB = nullptr; // the BB it occurs in + Insn *insn = nullptr; + Operand *opnd = nullptr; + CgOccur *def = nullptr; + bool processed = false; +}; + +class CgUseOcc : public CgOccur { + public: + CgUseOcc(BB *bb, Insn *insn, Operand *opnd) + : CgOccur(kOccUse, bb, insn, opnd), + needReload(false) {} + + ~CgUseOcc() = default; + + bool Reload() const { + return needReload; + } + + void SetReload(bool val) { + needReload = val; + } + + CgOccur *GetPrevVersionOccur() override { + return prevVersion; + } + + void SetPrevVersionOccur(CgOccur *val) override { + prevVersion = val; + } + + void Dump() const override { + LogInfo::MapleLogger() << "UseOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": " + << (needReload ? "need-reload, " : "not need-reload, ") + << "\n"; + } + private: + bool needReload = false; + CgOccur *prevVersion = nullptr; +}; + +class CgStoreOcc : public CgOccur { + public: + CgStoreOcc(BB *bb, Insn *insn, Operand *opnd) : CgOccur(kOccStore, bb, insn, opnd) {} + ~CgStoreOcc() = default; + + bool Reload() const { + return needReload; + } + + void SetReload(bool val) { + needReload = val; + } + + CgOccur *GetPrevVersionOccur() override { + return prevVersion; + } + + void SetPrevVersionOccur(CgOccur *val) override { + prevVersion = val; + } + + void Dump() const override { + LogInfo::MapleLogger() << "StoreOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": " + << (needReload ? "reload, " : "not reload, ") + << "\n"; + } + private: + bool needReload = false; + CgOccur *prevVersion = nullptr; +}; + +class CgDefOcc : public CgOccur { + public: + CgDefOcc(BB *bb, Insn *insn, Operand *opnd) : CgOccur(kOccDef, bb, insn, opnd) {} + ~CgDefOcc() = default; + + bool Loaded() const { + return needStore; + } + + void SetLoaded(bool val) { + needStore = val; + } + + CgOccur *GetPrevVersionOccur() override { + return prevVersion; + } + + void SetPrevVersionOccur(CgOccur *val) override { + prevVersion = val; + } + + void Dump() const override { + LogInfo::MapleLogger() << "DefOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": " + << (needStore ? "store" : "not store") + << "\n"; + } + private: + bool needStore = false; + CgOccur *prevVersion = nullptr; +}; + + +class CgPhiOpndOcc; +enum AvailState { + kFullyAvailable, + kPartialAvailable, + kNotAvailable +}; +class CgPhiOcc : public CgOccur { + public: + CgPhiOcc(BB &bb, Operand *opnd, MapleAllocator &alloc) + : CgOccur(kOccPhiocc, 0, bb, nullptr), + regOpnd(opnd), + isDownSafe(!bb.IsCatch()), + phiOpnds(alloc.Adapter()) {} + + virtual ~CgPhiOcc() = default; + + bool IsDownSafe() const { + return isDownSafe; + } + + void SetIsDownSafe(bool downSafe) { + isDownSafe = downSafe; + } + + const MapleVector &GetPhiOpnds() const { + return phiOpnds; + } + + MapleVector &GetPhiOpnds() { + return phiOpnds; + } + + Operand *GetOpnd() { + return regOpnd; + } + + CgPhiOpndOcc *GetPhiOpnd(size_t idx) { + ASSERT(idx < phiOpnds.size(), "out of range in CgPhiOcc::GetPhiOpnd"); + return phiOpnds.at(idx); + } + + const CgPhiOpndOcc *GetPhiOpnd(size_t idx) const { + ASSERT(idx < phiOpnds.size(), "out of range in CgPhiOcc::GetPhiOpnd"); + return phiOpnds.at(idx); + } + + void AddPhiOpnd(CgPhiOpndOcc &opnd) { + phiOpnds.push_back(&opnd); + } + + CgOccur *GetPrevVersionOccur() override { + return prevVersion; + } + + void SetPrevVersionOccur(CgOccur *val) override { + prevVersion = val; + } + + bool IsFullyAvailable() const { + return availState == kFullyAvailable; + } + + bool IsPartialAvailable() const { + return availState == kPartialAvailable; + } + + bool IsNotAvailable() const { + return availState == kNotAvailable; + } + + void SetAvailability(AvailState val) { + availState = val; + } + + void Dump() const override { + LogInfo::MapleLogger() << "PhiOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": " + << (isDownSafe ? "downsafe, " : "not downsafe, ") + << (availState == kNotAvailable ? "not avail" + : (availState == kPartialAvailable ? "part avail" : "fully avail")) + << "\n"; + } + + private: + Operand *regOpnd; + bool isDownSafe = true; // default is true + AvailState availState = kFullyAvailable; + MapleVector phiOpnds; + CgOccur *prevVersion = nullptr; +}; + +class CgPhiOpndOcc : public CgOccur { + public: + CgPhiOpndOcc(BB *bb, Operand *opnd, CgPhiOcc *defPhi) + : CgOccur(kOccPhiopnd, bb, nullptr, opnd), + hasRealUse(false), + phiOcc(defPhi) {} + + ~CgPhiOpndOcc() = default; + + bool HasRealUse() const { + return hasRealUse; + } + + void SetHasRealUse(bool realUse) { + hasRealUse = realUse; + } + + const CgPhiOcc *GetPhiOcc() const { + return phiOcc; + } + + CgPhiOcc *GetPhiOcc() { + return phiOcc; + } + + void SetPhiOcc(CgPhiOcc &occ) { + phiOcc = &occ; + } + + bool Reload() const { + return reload; + } + void SetReload(bool val) { + reload = val; + } + + void Dump() const override { + LogInfo::MapleLogger() << "PhiOpndOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": " + << (hasRealUse ? "hasRealUse, " : "not hasRealUse, ") + << (reload ? "reload" : "not reload") << std::endl; + } + + private: + bool hasRealUse; + bool reload = false; + CgPhiOcc *phiOcc = nullptr; // its lhs +}; + +// each singly linked list represents each bucket in workCandHashTable +class PreWorkCand { + public: + PreWorkCand(MapleAllocator &alloc, Operand *curOpnd, PUIdx pIdx) + : next(nullptr), + allOccs(alloc.Adapter()), + realOccs(alloc.Adapter()), + phiOccs(alloc.Adapter()), + theOperand(curOpnd), + puIdx(pIdx), + redo2HandleCritEdges(false) { + ASSERT(pIdx != 0, "PreWorkCand: initial puIdx cannot be 0"); + } + + virtual ~PreWorkCand() = default; + + void AddRealOccAsLast(CgOccur &occ, PUIdx pIdx) { + realOccs.push_back(&occ); // add as last + ASSERT(pIdx != 0, "puIdx of realocc cannot be 0"); + if (pIdx != puIdx) { + puIdx = 0; + } + } + + const PreWorkCand *GetNext() const { + return next; + } + + PreWorkCand *GetNext() { + return next; + } + + void SetNext(PreWorkCand &workCand) { + next = &workCand; + } + + int32 GetIndex() const { + return index; + } + + void SetIndex(int idx) { + index = idx; + } + + const MapleVector &GetRealOccs() const { + return realOccs; + } + + MapleVector &GetRealOccs() { + return realOccs; + } + + const CgOccur *GetRealOcc(size_t idx) const { + ASSERT(idx < realOccs.size(), "out of range in PreWorkCand::GetRealOccAt"); + return realOccs.at(idx); + } + + CgOccur *GetRealOcc(size_t idx) { + ASSERT(idx < realOccs.size(), "out of range in PreWorkCand::GetRealOccAt"); + return realOccs.at(idx); + } + + const MapleVector &PhiOccs() const { + return phiOccs; + } + + MapleVector &PhiOccs() { + return phiOccs; + } + + const Operand *GetTheOperand() const { + return theOperand; + } + + Operand *GetTheOperand() { + return theOperand; + } + + void SetTheOperand(Operand &expr) { + theOperand = &expr; + } + + PUIdx GetPUIdx() const { + return puIdx; + } + + void SetPUIdx(PUIdx idx) { + puIdx = idx; + } + + bool Redo2HandleCritEdges() const { + return redo2HandleCritEdges; + } + + void SetRedo2HandleCritEdges(bool redo) { + redo2HandleCritEdges = redo; + } + + private: + PreWorkCand *next; + int32 index = 0; + MapleVector allOccs; + MapleVector realOccs; // maintained in order of dt_preorder + MapleVector phiOccs; + Operand *theOperand; // the expression of this workcand + PUIdx puIdx; // if 0, its occ span multiple PUs; initial value must + // puIdx cannot be 0 if hasLocalOpnd is true + bool redo2HandleCritEdges : 1; // redo to make critical edges affect canbevail +}; + +class PreWorkCandHashTable { + public: + static const uint32 workCandHashLength = 229; + static uint32 ComputeWorkCandHashIndex(const Operand &opnd); + static uint32 ComputeStmtWorkCandHashIndex(const Insn &insn); + + PreWorkCandHashTable() = default; + ~PreWorkCandHashTable() = default; + + std::array &GetWorkcandHashTable() { + return workCandHashTable; + } + + PreWorkCand *GetWorkcandFromIndex(size_t idx) { + return workCandHashTable[idx]; + } + + void SetWorkCandAt(size_t idx, PreWorkCand &workCand) { + workCandHashTable[idx] = &workCand; + } + + private: + std::array workCandHashTable; +}; +} // namespace maple +#endif // MAPLEBE_CG_INCLUDE_CGOCCUR_H diff --git a/src/mapleall/maple_be/include/cg/cg_option.h b/src/mapleall/maple_be/include/cg/cg_option.h new file mode 100644 index 0000000000000000000000000000000000000000..3ddc69b6366e17906d37bfdcbcee6116eeeec7eb --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_option.h @@ -0,0 +1,1442 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_OPTION_H +#define MAPLEBE_INCLUDE_CG_CG_OPTION_H +#include +#include +#include "mempool.h" +#include "mempool_allocator.h" +#include "mir_module.h" +#include "types_def.h" + +namespace maplebe { +using namespace maple; +struct Range { + bool enable; + uint64 begin; + uint64 end; +}; + +class CGOptions { + public: + enum OptionEnum : uint64 { + kUndefined = 0ULL, + kDoCg = 1ULL << 0, + kDoLinearScanRegAlloc = 1ULL << 1, + kDoColorRegAlloc = 1ULL << 2, + kConstFold = 1ULL << 3, + kGenPic = 1ULL << 4, + kGenPie = 1ULL << 5, + kVerboseAsm = 1ULL << 6, + kGenInsertCall = 1ULL << 7, + kAddDebugTrace = 1ULL << 8, + kGenYieldPoint = 1ULL << 9, + kGenLocalRc = 1ULL << 10, + kProEpilogueOpt = 1ULL << 11, + kVerboseCG = 1ULL << 12, + kDebugFriendly = 1ULL << 20, + kWithLoc = 1ULL << 21, + kWithDwarf = 1ULL << 22, + kWithMpl = 1ULL << 23, + kWithSrc = 1ULL << 24, + kWithAsm = 1ULL << 25, + kWithProfileCode = 1ULL << 30, + kUseStackProtectorStrong = 1ULL << 31, + kUseStackProtectorAll = 1ULL << 32, + kSoeCheckInsert = 1ULL << 33, + kAddFuncProfile = 1ULL << 34, + kPatchLongBranch = 1ULL << 35, + kTailCallOpt = 1ULL << 36, + kUseUnwindTables = 1ULL << 37, + /* undocumented */ + kDumpCFG = 1ULL << 61, + kDumpCgir = 1ULL << 62, + kSuppressFileInfo = 1ULL << 63, + }; + + using OptionFlag = uint64; + + enum GenerateEnum : uint64 { + kCMacroDef = 1ULL << 0, + kGctib = 1ULL << 1, + kGrootList = 1ULL << 2, + kPrimorList = 1ULL << 3, + }; + + using GenerateFlag = uint64; + + enum OptimizeLevel : uint8 { + kLevel0 = 0, + kLevelLiteCG = 1, + kLevel1 = 2, + kLevel2 = 3, + }; + + enum ABIType : uint8 { + kABIHard, + kABISoft, + kABISoftFP + }; + + enum EmitFileType : uint8 { + kAsm, + kObj, + kEmitNone, + }; + /* + * The default CG option values are: + * Don't BE_QUITE; verbose, + * DO CG and generate .s file as output, + * Generate EH, + * Use frame pointer, + * Generate CFI directives, + * DO peephole optimization, + * Generate position-independent executable, + * Don't insert debug comments in .s file, + * Don't insert a call to the named (instrumentation) + * function at each function entry. + */ + static const OptionFlag kDefaultOptions = OptionFlag( +#if TARGAARCH64 || TARGARM32 || TARGRISCV64 + kDoCg | kGenPie | kDoColorRegAlloc +#else + kDoCg +#endif + ); + + /* + * The default metadata generation flags values are: + * Generate .macros.def for C preprocessors. + * Generate .groots.txt for GC. + * Generate .primordials.txt for GC. + * Generate yieldpoints for GC. + * Do not generate separate GCTIB file. + */ + static const GenerateFlag kDefaultGflags = GenerateFlag(0); + + public: + static CGOptions &GetInstance(); + virtual ~CGOptions() = default; + bool SolveOptions(bool isDebug); + void DecideMplcgRealLevel(bool isDebug); + + void DumpOptions(); + std::vector &GetSequence() { + return phaseSequence; + } + + template + void SetOrClear(T &dest, uint64 flag, bool truth) const { + if (truth) { + dest |= flag; + } else { + dest &= ~flag; + } + } + + void ParseExclusiveFunc(const std::string &fileName); + void ParseCyclePattern(const std::string &fileName) const; + + void EnableO0(); + void EnableO1(); + void EnableO2(); + void EnableLiteCG(); + + bool GenDef() const { + return generateFlag & kCMacroDef; + } + + bool GenGctib() const { + return generateFlag & kGctib; + } + + bool GenGrootList() const { + return generateFlag & kGrootList; + } + + bool GenPrimorList() const { + return generateFlag & kPrimorList; + } + + bool GenYieldPoint() const { + return generateFlag & kGenYieldPoint; + } + + bool GenLocalRC() const { + return ((generateFlag & kGenLocalRc) != 0) && !gcOnly; + } + + bool DoConstFold() const { + return options & kConstFold; + } + + bool DoEmitCode() const { + return (options & kDoCg) != 0; + } + + bool GenerateExceptionHandlingCode() const { + return true; + } + + bool DoLinearScanRegisterAllocation() const { + return (options & kDoLinearScanRegAlloc) != 0; + } + bool DoColoringBasedRegisterAllocation() const { + return (options & kDoColorRegAlloc) != 0; + } + + bool GeneratePositionIndependentExecutable() const { + return (options & kGenPie) != 0; + } + + bool GenerateVerboseAsm() const { + return (options & kVerboseAsm) != 0; + } + + bool GenerateVerboseCG() const { + return (options & kVerboseCG) != 0; + } + + bool GenerateDebugFriendlyCode() const { + return true; + } + + bool DoPrologueEpilogue() const { + return (options & kProEpilogueOpt) != 0; + } + + bool IsStackProtectorStrong() const { + return (options & kUseStackProtectorStrong) != 0; + } + + bool IsStackProtectorAll() const { + return (options & kUseStackProtectorAll) != 0; + } + + bool IsUnwindTables() const { + return (options & kUseUnwindTables) != 0; + } + + bool WithLoc() const { + return (options & kWithLoc) != 0; + } + + bool WithDwarf() const { + return (options & kWithDwarf) != 0; + } + + bool WithSrc() const { + return (options & kWithSrc) != 0; + } + + bool WithMpl() const { + return (options & kWithMpl) != 0; + } + + bool WithAsm() const { + return (options & kWithAsm) != 0; + } + + bool InstrumentWithDebugTraceCall() const { + return (options & kAddDebugTrace) != 0; + } + + bool DoPatchLongBranch() const { + return (options & kPatchLongBranch) != 0; + } + + bool DoTailCall() const { + return (options & kTailCallOpt) != 0; + } + + bool DoCheckSOE() const { + return (options & kSoeCheckInsert) != 0; + } + + bool SuppressFileInfo() const { + return (options & kSuppressFileInfo) != 0; + } + + bool DoDumpCFG() const { + return (options & kDumpCFG) != 0; + } + + void SetDefaultOptions(const MIRModule &mod); + static bool DumpPhase(const std::string &phase); + static bool FuncFilter(const std::string &name); + void SplitPhases(const std::string &str, std::unordered_set &set) const; + void SetRange(const std::string &str, const std::string &cmd, Range &subRange) const; + void SetTargetMachine(const std::string &str); + + int32 GetOptimizeLevel() const { + return optimizeLevel; + } + + bool IsRunCG() const { + return runCGFlag; + } + + void SetRunCGFlag(bool cgFlag) { + runCGFlag = cgFlag; + } + + bool IsGenerateObjectMap() const { + return generateObjectMap; + } + + void SetGenerateObjectMap(bool flag) { + generateObjectMap = flag; + } + + void SetParserOption(uint32 option) { + parserOption |= option; + } + + uint32 GetParserOption() const { + return parserOption; + } + + GenerateFlag &GetGenerateFlags() { + return generateFlag; + } + + const GenerateFlag &GetGenerateFlags() const { + return generateFlag; + } + + void SetGenerateFlags(GenerateFlag flag) { + generateFlag |= flag; + } + + void SetOption(OptionFlag opFlag) { + options |= opFlag; + } + + void ClearOption(OptionFlag opFlag) { + options &= ~opFlag; + } + + const std::string &GetClassListFile() const { + return classListFile; + } + + void SetClassListFile(const std::string &classList) { + classListFile = classList; + } + + void SetEHExclusiveFile(const std::string &ehExclusive) { + ehExclusiveFile = ehExclusive; + } + + void SetCyclePatternFile(const std::string &cyclePattern) { + cyclePatternFile = cyclePattern; + } + + static bool IsQuiet() { + return quiet; + } + + static void SetQuiet(bool flag) { + quiet = flag; + } + + static std::unordered_set &GetDumpPhases() { + return dumpPhases; + } + + static std::unordered_set &GetSkipPhases() { + return skipPhases; + } + + static bool IsSkipPhase(const std::string &phaseName) { + return !(skipPhases.find(phaseName) == skipPhases.end()); + } + + const std::vector &GetEHExclusiveFunctionNameVec() const { + return ehExclusiveFunctionName; + } + + static const std::unordered_map> &GetCyclePatternMap() { + return cyclePatternMap; + } + + static bool IsSkipFromPhase(const std::string &phaseName) { + return skipFrom.compare(phaseName) == 0; + } + + static const std::string GetSkipFromPhase() { + return skipFrom; + } + + static void SetSkipFrom(const std::string &phaseName) { + skipFrom = phaseName; + } + + static bool IsSkipAfterPhase(const std::string &phaseName) { + return skipAfter.compare(phaseName) == 0; + } + + static const std::string GetSkipAfterPhase() { + return skipAfter; + } + + static void SetSkipAfter(const std::string &phaseName) { + skipAfter = phaseName; + } + + static const std::string &GetDumpFunc() { + return dumpFunc; + } + + static bool IsDumpFunc(const std::string &func) { + return ((dumpFunc.compare("*") == 0) || (func.find(CGOptions::dumpFunc.c_str()) != std::string::npos)); + } + + static void SetDumpFunc(const std::string &func) { + dumpFunc = func; + } + static size_t FindIndexInProfileData(char data) { + return profileData.find(data); + } + + static void SetProfileData(const std::string &path) { + profileData = path; + } + + static std::string &GetProfileData() { + return profileData; + } + + static const std::string GetProfileDataSubStr(size_t begin, size_t end) { + return profileData.substr(begin, end); + } + + static const std::string GetProfileDataSubStr(size_t position) { + return profileData.substr(position); + } + + static bool IsProfileDataEmpty() { + return profileData.empty(); + } + + static const std::string &GetProfileFuncData() { + return profileFuncData; + } + + static bool IsProfileFuncDataEmpty() { + return profileFuncData.empty(); + } + + static void SetProfileFuncData(const std::string &data) { + profileFuncData = data; + } + + static const std::string &GetProfileClassData() { + return profileClassData; + } + + static void SetProfileClassData(const std::string &data) { + profileClassData = data; + } + + static const std::string &GetDuplicateAsmFile() { + return duplicateAsmFile; + } + + static bool IsDuplicateAsmFileEmpty() { + if (duplicateAsmFile.empty()) { + return true; + } + struct stat buffer; + if (stat(duplicateAsmFile.c_str(), &buffer) != 0) { + return true; + } + return false; + } + + static void SetDuplicateAsmFile(const std::string &fileName) { + duplicateAsmFile = fileName; + } + + static bool UseRange() { + return range.enable; + } + static const std::string &GetFastFuncsAsmFile() { + return fastFuncsAsmFile; + } + + static bool IsFastFuncsAsmFileEmpty() { + return fastFuncsAsmFile.empty(); + } + + static void SetFastFuncsAsmFile(const std::string &fileName) { + fastFuncsAsmFile = fileName; + } + + static Range &GetRange() { + return range; + } + + static uint64 GetRangeBegin() { + return range.begin; + } + + static uint64 GetRangeEnd() { + return range.end; + } + + static Range &GetSpillRanges() { + return spillRanges; + } + + static uint64 GetSpillRangesBegin() { + return spillRanges.begin; + } + + static uint64 GetSpillRangesEnd() { + return spillRanges.end; + } + + static uint64 GetLSRABBOptSize() { + return lsraBBOptSize; + } + + static void SetLSRABBOptSize(uint64 size) { + lsraBBOptSize = size; + } + + static void SetLSRAInsnOptSize(uint64 size) { + lsraInsnOptSize = size; + } + + static uint64 GetOverlapNum() { + return overlapNum; + } + + static void SetOverlapNum(uint64 num) { + overlapNum = num; + } + + static uint8 GetRematLevel() { + return rematLevel; + } + + static bool OptimizeForSize() { + return optForSize; + } + + static void SetRematLevel(uint8 level) { + rematLevel = level; + } + + static uint8 GetFastAllocMode() { + return fastAllocMode; + } + + static void SetFastAllocMode(uint8 mode) { + fastAllocMode = mode; + } + + static void EnableBarriersForVolatile() { + useBarriersForVolatile = true; + } + + static void DisableBarriersForVolatile() { + useBarriersForVolatile = false; + } + + static bool UseBarriersForVolatile() { + return useBarriersForVolatile; + } + static void EnableFastAlloc() { + fastAlloc = true; + } + + static bool IsFastAlloc() { + return fastAlloc; + } + + static bool IsEnableTimePhases() { + return timePhases; + } + + static void EnableTimePhases() { + timePhases = true; + } + + static void DisableTimePhases() { + timePhases = false; + } + + static void EnableInRange() { + inRange = true; + } + + static void DisableInRange() { + inRange = false; + } + + static bool IsInRange() { + return inRange; + } + + static void EnableEBO() { + doEBO = true; + } + + static void DisableEBO() { + doEBO = false; + } + + static bool DoEBO() { + return doEBO; + } + + static void DisableCGSSA() { + doCGSSA = false; + } + + static void EnableCGSSA() { + doCGSSA = true; + } + + static bool DoCGSSA() { + return doCGSSA && !flavorLmbc; + } + + static void DisableIPARA() { + doIPARA = false; + } + + static bool DoIPARA() { + return doIPARA; + } + + static void EnableCFGO() { + doCFGO = true; + } + + static void DisableCFGO() { + doCFGO = false; + } + + static bool DoCFGO() { + return doCFGO; + } + + static void EnableRegSavesOpt() { + doRegSavesOpt = true; + } + + static void DisableRegSavesOpt() { + doRegSavesOpt = false; + } + + static bool DoRegSavesOpt() { + return doRegSavesOpt; + } + + static void EnableSsaPreSave() { + useSsaPreSave = true; + } + + static void DisableSsaPreSave() { + useSsaPreSave = false; + } + + static bool UseSsaPreSave() { + return useSsaPreSave; + } + static void EnableSsuPreRestore() { + useSsuPreRestore = true; + } + + static void DisableSsuPreRestore() { + useSsuPreRestore = false; + } + + static bool UseSsuPreRestore() { + return useSsuPreRestore; + } + + static void EnableICO() { + doICO = true; + } + + static void DisableICO() { + doICO = false; + } + + static bool DoICO() { + return doICO; + } + + static bool DoIsolateFastPath() { + return (CGOptions::GetInstance().DoPrologueEpilogue()) && + (CGOptions::GetInstance().GetOptimizeLevel() == CGOptions::kLevel2); + } + + static void EnableStoreLoadOpt() { + doStoreLoadOpt = true; + } + + static void DisableStoreLoadOpt() { + doStoreLoadOpt = false; + } + + static bool DoStoreLoadOpt() { + return doStoreLoadOpt; + } + + static void EnableGlobalOpt() { + doGlobalOpt = true; + } + + static void DisableGlobalOpt() { + doGlobalOpt = false; + } + + static void EnableHotColdSplit() { + enableHotColdSplit = true; + } + + static void DisableHotColdSplit() { + enableHotColdSplit = false; + } + + static bool DoEnableHotColdSplit() { + return enableHotColdSplit; + } + + static bool DoGlobalOpt() { + return doGlobalOpt; + } + + static void EnableAlignAnalysis() { + doAlignAnalysis = true; + } + + static void DisableAlignAnalysis() { + doAlignAnalysis = false; + } + + static bool DoAlignAnalysis() { + return doAlignAnalysis; + } + + static void EnableCondBrAlign() { + doCondBrAlign = true; + } + + static void DisableCondBrAlign() { + doCondBrAlign = false; + } + + static bool DoCondBrAlign() { + return doCondBrAlign; + } + + static void EnableBigEndianInCG() { + cgBigEndian = true; + } + + static void DisableBigEndianInCG() { + cgBigEndian = false; + } + + static bool IsBigEndian() { + return cgBigEndian; + } + + static void EnableArm64ilp32() { + arm64ilp32 = true; + } + + static void DisableArm64ilp32() { + arm64ilp32 = false; + } + + static bool IsArm64ilp32() { + return arm64ilp32; + } + + static bool IsTargetX86_64() { + return targetArch == "x86_64"; + }; + + static void EnableVregRename() { + doVregRename = true; + } + + static void DisableVregRename() { + doVregRename = false; + } + + static bool DoVregRename() { + return doVregRename; + } + + static void EnableMultiPassColorRA() { + doMultiPassColorRA = true; + } + + static void DisableMultiPassColorRA() { + doMultiPassColorRA = false; + } + + static bool DoMultiPassColorRA() { + return doMultiPassColorRA; + } + + static void EnablePreLSRAOpt() { + doPreLSRAOpt = true; + } + + static void DisablePreLSRAOpt() { + doPreLSRAOpt = false; + } + + static bool DoPreLSRAOpt() { + return doPreLSRAOpt; + } + + static void EnableLocalRefSpill() { + doLocalRefSpill = true; + } + + static void DisableLocalRefSpill() { + doLocalRefSpill = false; + } + + static bool DoLocalRefSpill() { + return doLocalRefSpill; + } + + static void EnableCalleeToSpill() { + doCalleeToSpill = true; + } + + static void DisableCalleeToSpill() { + doCalleeToSpill = false; + } + + static bool DoCalleeToSpill() { + return doCalleeToSpill; + } + + static void EnablePrePeephole() { + doPrePeephole = true; + } + + static void DisablePrePeephole() { + doPrePeephole = false; + } + + static bool DoPrePeephole() { + return doPrePeephole; + } + + static void EnablePeephole() { + doPeephole = true; + } + + static void DisablePeephole() { + doPeephole = false; + } + + static bool DoPeephole() { + return doPeephole; + } + + static void EnableRetMerge() { + doRetMerge = true; + } + + static void DisableRetMerge() { + doRetMerge = false; + } + + static bool DoRetMerge() { + return doRetMerge; + } + + static void EnablePreSchedule() { + doPreSchedule = true; + } + + static void DisablePreSchedule() { + doPreSchedule = false; + } + + static bool DoPreSchedule() { + return doPreSchedule; + } + + static void EnableSchedule() { + doSchedule = true; + } + + static void DisableSchedule() { + doSchedule = false; + } + + static bool DoSchedule() { + return doSchedule; + } + static void EnableWriteRefFieldOpt() { + doWriteRefFieldOpt = true; + } + + static void DisableWriteRefFieldOpt() { + doWriteRefFieldOpt = false; + } + static bool DoWriteRefFieldOpt() { + return doWriteRefFieldOpt; + } + + static void EnableDumpOptimizeCommonLog() { + dumpOptimizeCommonLog = true; + } + + static void DisableDumpOptimizeCommonLog() { + dumpOptimizeCommonLog = false; + } + + static bool IsDumpOptimizeCommonLog() { + return dumpOptimizeCommonLog; + } + + static void EnableCheckArrayStore() { + checkArrayStore = true; + } + + static void DisableCheckArrayStore() { + checkArrayStore = false; + } + + static bool IsCheckArrayStore() { + return checkArrayStore; + } + + static void EnableExclusiveEH() { + exclusiveEH = true; + } + + static bool IsExclusiveEH() { + return exclusiveEH; + } + + static void EnablePIC() { + doPIC = true; + } + + static void DisablePIC() { + doPIC = false; + } + + static bool IsPIC() { + return doPIC; + } + + static void EnableNoDupBB() { + noDupBB = true; + } + + static void DisableNoDupBB() { + noDupBB = false; + } + + static bool IsNoDupBB() { + return noDupBB; + } + + static void EnableNoCalleeCFI() { + noCalleeCFI = true; + } + + static void DisableNoCalleeCFI() { + noCalleeCFI = false; + } + + static bool IsNoCalleeCFI() { + return noCalleeCFI; + } + + static void EnableEmitCyclePattern() { + emitCyclePattern = true; + } + + static bool IsInsertYieldPoint() { + return insertYieldPoint; + } + + static void EnableMapleLinker() { + mapleLinker = true; + } + + static void DisableMapleLinker() { + mapleLinker = false; + } + + static bool IsMapleLinker() { + return mapleLinker; + } + static void EnableReplaceASM() { + replaceASM = true; + } + + static void DisableReplaceASM() { + replaceASM = false; + } + + static bool IsReplaceASM() { + return replaceASM; + } + + static void EnableGeneralRegOnly() { + generalRegOnly = true; + } + + static void DisableGeneralRegOnly() { + generalRegOnly = false; + } + + static bool UseGeneralRegOnly() { + return generalRegOnly; + } + + static void EnablePrintFunction() { + printFunction = true; + } + + static void DisablePrintFunction() { + printFunction = false; + } + + static bool IsPrintFunction() { + return printFunction; + } + + static std::string &GetGlobalVarProFile() { + return globalVarProfile; + } + + static bool IsGlobalVarProFileEmpty() { + return globalVarProfile.empty(); + } + + static bool IsEmitBlockMarker() { + return emitBlockMarker; + } + + static void EnableNativeOpt() { + nativeOpt = true; + } + + static void DisableNativeOpt() { + nativeOpt = false; + } + + static bool IsNativeOpt() { + return nativeOpt; + } + + static void EnableLazyBinding() { + lazyBinding = true; + } + + static void DisableLazyBinding() { + lazyBinding = false; + } + + static bool IsLazyBinding() { + return lazyBinding; + } + + static void EnableHotFix() { + hotFix = true; + } + + static void DisableHotFix() { + hotFix = false; + } + + static bool IsHotFix() { + return hotFix; + } + + static void EnableDebugSched() { + debugSched = true; + } + + static void DisableDebugSched() { + debugSched = false; + } + + static bool IsDebugSched() { + return debugSched; + } + + static void EnableDruteForceSched() { + bruteForceSched = true; + } + + static void DisableDruteForceSched() { + bruteForceSched = false; + } + + static bool IsDruteForceSched() { + return bruteForceSched; + } + + static void EnableSimulateSched() { + simulateSched = true; + } + + static void DisableSimulateSched() { + simulateSched = false; + } + + static bool IsSimulateSched() { + return simulateSched; + } + + static void SetABIType(const std::string &type) { + if (type == "hard") { + abiType = kABIHard; + } else if (type == "soft") { + CHECK_FATAL(false, "float-abi=soft is not supported Currently."); + } else if (type == "softfp") { + abiType = kABISoftFP; + } else { + CHECK_FATAL(false, "unexpected abi-type, only hard, soft and softfp are supported"); + } + } + + static ABIType GetABIType() { + return abiType; + } + + static void SetEmitFileType(const std::string &type) { + if (type == "asm") { + emitFileType = kAsm; + } else if (type == "obj") { + emitFileType = kObj; + } else if (type == "null") { + emitFileType = kEmitNone; + CHECK_FATAL(false, "null is not supported Currently."); + } else { + CHECK_FATAL(false, "unexpected file-type, only asm, obj, and null are supported"); + } + } + + static EmitFileType GetEmitFileType() { + return emitFileType; + } + + static void EnableLongCalls() { + genLongCalls = true; + } + + static void DisableLongCalls() { + genLongCalls = false; + } + + static bool IsLongCalls() { + return genLongCalls; + } + + static void EnableFunctionSections() { + functionSections = true; + } + + static void DisableFunctionSections() { + functionSections = false; + } + + static bool IsFunctionSections() { + return functionSections; + } + + static void EnableFramePointer() { + useFramePointer = true; + } + + static void DisableFramePointer() { + useFramePointer = false; + } + + static bool UseFramePointer() { + return useFramePointer; + } + + static void EnableGCOnly() { + gcOnly = true; + } + + static void DisableGCOnly() { + gcOnly = false; + } + + static bool IsGCOnly() { + return gcOnly; + } + + const OptionFlag &GetOptionFlag() const { + return options; + } + + void SetOptionFlag(const OptionFlag &flag) { + options = flag; + } + + static void EnableFastMath() { + fastMath = true; + } + + static void DisableFastMath() { + fastMath = false; + } + + static bool IsFastMath() { + return fastMath; + } + + static void EnableCommon() { + noCommon = false; + } + + static void DisableCommon() { + noCommon = true; + } + + static bool IsNoCommon() { + return noCommon; + } + + static void EnableFlavorLmbc() { + flavorLmbc = true; + } + + static void SetAlignMinBBSize(uint32 minBBSize) { + alignMinBBSize = minBBSize; + } + + static uint32 GetAlignMinBBSize() { + return alignMinBBSize; + } + + static void SetAlignMaxBBSize(uint32 maxBBSize) { + alignMaxBBSize = maxBBSize; + } + + static uint32 GetAlignMaxBBSize() { + return alignMaxBBSize; + } + + static void SetLoopAlignPow(uint32 loopPow) { + loopAlignPow = loopPow; + } + + static uint32 GetLoopAlignPow() { + return loopAlignPow; + } + + static void SetJumpAlignPow(uint32 jumpPow) { + jumpAlignPow = jumpPow; + } + + static uint32 GetJumpAlignPow() { + return jumpAlignPow; + } + + static void SetFuncAlignPow(uint32 funcPow) { + funcAlignPow = funcPow; + } + + static uint32 GetFuncAlignPow() { + return funcAlignPow; + } + + static bool DoLiteProfGen() { + return liteProfGen; + } + + static void EnableLiteProfGen() { + liteProfGen = true; + } + + static void DisableLiteProfGen() { + liteProfGen = false; + } + + static bool DoLiteProfUse() { + return liteProfUse; + } + + static void EnableLiteProfUse() { + liteProfUse = true; + } + + static void SetLiteProfile(std::string pgofile) { + liteProfile = pgofile; + } + + static std::string GetLiteProfile() { + return liteProfile; + } + + static void SetLitePgoOutputFunction(std::string iofile) { + litePgoOutputFunction = iofile; + } + + static std::string& GetLitePgoOutputFunction() { + return litePgoOutputFunction; + } + + static void SetInstrumentationWhiteList(std::string pgoWhiteList) { + instrumentationWhiteList = pgoWhiteList; + } + + static std::string& GetInstrumentationWhiteList() { + return instrumentationWhiteList; + } + + private: + std::vector phaseSequence; + bool runCGFlag = true; + bool generateObjectMap = true; + uint32 parserOption = 0; + int32 optimizeLevel = 0; + + GenerateFlag generateFlag = 0; + OptionFlag options = kUndefined; + + std::string classListFile; + std::string ehExclusiveFile; + std::string cyclePatternFile; + /* we don't do exception handling in this list */ + std::vector ehExclusiveFunctionName; + + static bool quiet; + static std::string targetArch; + static std::unordered_set dumpPhases; + static std::unordered_set skipPhases; + static std::unordered_map> cyclePatternMap; + static std::string skipFrom; + static std::string skipAfter; + static std::string dumpFunc; + static std::string duplicateAsmFile; + static bool optForSize; + static bool enableHotColdSplit; + static bool useBarriersForVolatile; + static bool timePhases; + static bool cgBigEndian; + static bool doEBO; + static bool doCGSSA; + static bool doIPARA; + static bool doCFGO; + static bool doICO; + static bool doStoreLoadOpt; + static bool doGlobalOpt; + static bool doVregRename; + static bool doMultiPassColorRA; + static bool doPrePeephole; + static bool doPeephole; + static bool doRetMerge; + static bool doSchedule; + static bool doAlignAnalysis; + static bool doCondBrAlign; + static bool doWriteRefFieldOpt; + static bool doRegSavesOpt; + static bool useSsaPreSave; + static bool useSsuPreRestore; + static bool dumpOptimizeCommonLog; + static bool checkArrayStore; + static bool exclusiveEH; + static bool doPIC; + static bool noDupBB; + static bool noCalleeCFI; + static bool emitCyclePattern; + static bool insertYieldPoint; + static bool mapleLinker; + static bool printFunction; + static std::string globalVarProfile; + static bool nativeOpt; + static bool lazyBinding; + static bool arm64ilp32; + static bool hotFix; + /* if true dump scheduling information */ + static bool debugSched; + /* if true do BruteForceSchedule */ + static bool bruteForceSched; + /* if true do SimulateSched */ + static bool simulateSched; + static ABIType abiType; + static EmitFileType emitFileType; + /* if true generate adrp/ldr/blr */ + static bool genLongCalls; + static bool functionSections; + static bool useFramePointer; + static bool gcOnly; + static bool doPreSchedule; + static bool emitBlockMarker; + static Range range; + static bool inRange; + static bool doPatchLongBranch; + static std::string profileData; + static std::string profileFuncData; + static std::string profileClassData; + static std::string fastFuncsAsmFile; + static Range spillRanges; + static uint64 lsraBBOptSize; + static uint64 lsraInsnOptSize; + static uint64 overlapNum; + static uint8 rematLevel; + static uint8 fastAllocMode; + static bool fastAlloc; + static bool doPreLSRAOpt; + static bool doLocalRefSpill; + static bool doCalleeToSpill; + static bool replaceASM; + static bool generalRegOnly; + static bool fastMath; + static bool noCommon; + static bool flavorLmbc; + static uint32 alignMinBBSize; + static uint32 alignMaxBBSize; + static uint32 loopAlignPow; + static uint32 jumpAlignPow; + static uint32 funcAlignPow; + static bool liteProfGen; + static bool liteProfUse; + static std::string litePgoOutputFunction; + static std::string instrumentationWhiteList; + static std::string liteProfile; +}; +} /* namespace maplebe */ + +#define SET_FIND(SET, NAME) ((SET).find(NAME)) +#define SET_END(SET) ((SET).end()) +#define IS_STR_IN_SET(SET, NAME) (SET_FIND(SET, NAME) != SET_END(SET)) + +#define CG_DEBUG_FUNC(f) \ + (!maplebe::CGOptions::GetDumpPhases().empty() && maplebe::CGOptions::IsDumpFunc((f).GetName()) && \ + maplebe::CGOptions::GetDumpPhases().find(PhaseName()) != maplebe::CGOptions::GetDumpPhases().end()) +#ifndef TRACE_PHASE +#define TRACE_PHASE (IS_STR_IN_SET(maplebe::CGOptions::GetDumpPhases(), PhaseName())) +#endif + +#endif /* MAPLEBE_INCLUDE_CG_CG_OPTION_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_options.h b/src/mapleall/maple_be/include/cg/cg_options.h new file mode 100644 index 0000000000000000000000000000000000000000..9b37bf93bd6082c1f51ee35e0431468e1eafedec --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_options.h @@ -0,0 +1,133 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_BE_INCLUDE_CG_OPTIONS_H +#define MAPLE_BE_INCLUDE_CG_OPTIONS_H + +#include "cl_option.h" + +namespace opts::cg { + +extern maplecl::Option pie; +extern maplecl::Option fpic; +extern maplecl::Option verboseAsm; +extern maplecl::Option verboseCg; +extern maplecl::Option maplelinker; +extern maplecl::Option quiet; +extern maplecl::Option cg; +extern maplecl::Option replaceAsm; +extern maplecl::Option generalRegOnly; +extern maplecl::Option lazyBinding; +extern maplecl::Option hotFix; +extern maplecl::Option ebo; +extern maplecl::Option cfgo; +extern maplecl::Option ico; +extern maplecl::Option storeloadopt; +extern maplecl::Option globalopt; +extern maplecl::Option hotcoldsplit; +extern maplecl::Option prelsra; +extern maplecl::Option lsraLvarspill; +extern maplecl::Option lsraOptcallee; +extern maplecl::Option calleeregsPlacement; +extern maplecl::Option ssapreSave; +extern maplecl::Option ssupreRestore; +extern maplecl::Option prepeep; +extern maplecl::Option peep; +extern maplecl::Option preschedule; +extern maplecl::Option schedule; +extern maplecl::Option retMerge; +extern maplecl::Option vregRename; +extern maplecl::Option fullcolor; +extern maplecl::Option writefieldopt; +extern maplecl::Option dumpOlog; +extern maplecl::Option nativeopt; +extern maplecl::Option objmap; +extern maplecl::Option yieldpoint; +extern maplecl::Option proepilogue; +extern maplecl::Option localRc; +extern maplecl::Option addDebugTrace; +extern maplecl::Option classListFile; +extern maplecl::Option genCMacroDef; +extern maplecl::Option genGctibFile; +extern maplecl::Option unwindTables; +extern maplecl::Option debug; +extern maplecl::Option gdwarf; +extern maplecl::Option gsrc; +extern maplecl::Option gmixedsrc; +extern maplecl::Option gmixedasm; +extern maplecl::Option profile; +extern maplecl::Option withRaLinearScan; +extern maplecl::Option withRaGraphColor; +extern maplecl::Option patchLongBranch; +extern maplecl::Option constFold; +extern maplecl::Option ehExclusiveList; +extern maplecl::Option o0; +extern maplecl::Option o1; +extern maplecl::Option o2; +extern maplecl::Option os; +extern maplecl::Option olitecg; +extern maplecl::Option lsraBb; +extern maplecl::Option lsraInsn; +extern maplecl::Option lsraOverlap; +extern maplecl::Option remat; +extern maplecl::Option suppressFileinfo; +extern maplecl::Option dumpCfg; +extern maplecl::Option target; +extern maplecl::Option dumpPhases; +extern maplecl::Option skipPhases; +extern maplecl::Option skipFrom; +extern maplecl::Option skipAfter; +extern maplecl::Option dumpFunc; +extern maplecl::Option timePhases; +extern maplecl::Option useBarriersForVolatile; +extern maplecl::Option range; +extern maplecl::Option fastAlloc; +extern maplecl::Option spillRange; +extern maplecl::Option dupBb; +extern maplecl::Option calleeCfi; +extern maplecl::Option printFunc; +extern maplecl::Option cyclePatternList; +extern maplecl::Option duplicateAsmList; +extern maplecl::Option duplicateAsmList2; +extern maplecl::Option blockMarker; +extern maplecl::Option soeCheck; +extern maplecl::Option checkArraystore; +extern maplecl::Option debugSchedule; +extern maplecl::Option bruteforceSchedule; +extern maplecl::Option simulateSchedule; +extern maplecl::Option crossLoc; +extern maplecl::Option floatAbi; +extern maplecl::Option filetype; +extern maplecl::Option longCalls; +extern maplecl::Option functionSections; +extern maplecl::Option omitFramePointer; +extern maplecl::Option fastMath; +extern maplecl::Option tailcall; +extern maplecl::Option alignAnalysis; +extern maplecl::Option cgSsa; +extern maplecl::Option common; +extern maplecl::Option condbrAlign; +extern maplecl::Option alignMinBbSize; +extern maplecl::Option alignMaxBbSize; +extern maplecl::Option loopAlignPow; +extern maplecl::Option jumpAlignPow; +extern maplecl::Option funcAlignPow; +extern maplecl::Option litePgoGen; +extern maplecl::Option litePgoOutputFunc; +extern maplecl::Option instrumentationFile; +extern maplecl::Option litePgoFile; +} + +#endif /* MAPLE_BE_INCLUDE_CG_OPTIONS_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_pgo_gen.h b/src/mapleall/maple_be/include/cg/cg_pgo_gen.h new file mode 100644 index 0000000000000000000000000000000000000000..cf8647e17198f26c817365b97b1c5a51308d0adf --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_pgo_gen.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CG_PGO_GEN_H +#define MAPLEBE_CG_INCLUDE_CG_PGO_GEN_H + +#include "cgfunc.h" +#include "instrument.h" +namespace maplebe { +class CGProfGen { + public: + CGProfGen(CGFunc &curF, MemPool &mp) + : f(&curF), + instrumenter(mp) {} + + void InstrumentFunction(); + void CreateProfileCalls(); + virtual void CreateIcallForWeakSymbol(BB &bb, const std::string &symName) = 0; + virtual void CreateClearIcall(BB &bb, const std::string &symName) = 0; + virtual void InstrumentBB(BB &bb, MIRSymbol &countTab, uint32 offset) = 0; + protected: + CGFunc *f; + private: + static uint64 counterIdx; + PGOInstrumentTemplate> instrumenter; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPgoGen, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} +#endif // MAPLEBE_CG_INCLUDE_CG_PGO_GEN_H diff --git a/src/mapleall/maple_be/include/cg/cg_pgo_use.h b/src/mapleall/maple_be/include/cg/cg_pgo_use.h new file mode 100644 index 0000000000000000000000000000000000000000..97d5da631b6ea8ac2ed31d484610f45ba3e36338 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_pgo_use.h @@ -0,0 +1,172 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CG_PGO_USE_H +#define MAPLEBE_CG_INCLUDE_CG_PGO_USE_H +#include "cgfunc.h" +#include "instrument.h" +#include "cg_dominance.h" +namespace maplebe { +class BBChain { + public: + using iterator = MapleVector::iterator; + using const_iterator = MapleVector::const_iterator; + BBChain(MapleAllocator &alloc, MapleVector &bb2chain, BB &bb, uint32 inputId) + : id(inputId), + bbVec(1, &bb, alloc.Adapter()), + bb2chain(bb2chain) { + bb2chain[bb.GetId()] = this; + } + + iterator begin() { + return bbVec.begin(); + } + const_iterator begin() const { + return bbVec.begin(); + } + iterator end() { + return bbVec.end(); + } + const_iterator end() const { + return bbVec.end(); + } + + bool empty() const { + return bbVec.empty(); + } + + size_t size() const { + return bbVec.size(); + } + + uint32 GetId() const { + return id; + } + + BB *GetHeader() { + CHECK_FATAL(!bbVec.empty(), "cannot get header from a empty bb chain"); + return bbVec.front(); + } + BB *GetTail() { + CHECK_FATAL(!bbVec.empty(), "cannot get tail from a empty bb chain"); + return bbVec.back(); + } + + bool FindBB(BB &bb) { + auto fIt = std::find(bbVec.begin(), bbVec.end(), &bb); + return fIt != bbVec.end(); + } + + // update unlaidPredCnt if needed. The chain is ready to layout only if unlaidPredCnt == 0 + bool IsReadyToLayout(const MapleVector *context) { + MayRecalculateUnlaidPredCnt(context); + return (unlaidPredCnt == 0); + } + + // Merge src chain to this one + void MergeFrom(BBChain *srcChain); + + void UpdateSuccChainBeforeMerged(const BBChain &destChain, const MapleVector *context, + MapleSet &readyToLayoutChains); + + void Dump() const { + LogInfo::MapleLogger() << "bb chain with " << bbVec.size() << " blocks: "; + for (BB *bb : bbVec) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << std::endl; + } + + void DumpOneLine() const { + for (BB *bb : bbVec) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + } + + private: + void MayRecalculateUnlaidPredCnt(const MapleVector *context); + + uint32 id = 0; + MapleVector bbVec; + MapleVector &bb2chain; + uint32 unlaidPredCnt = 0; // how many predecessors are not laid out + bool isCacheValid = false; // whether unlaidPredCnt is trustable +}; + +class CGProfUse { + public: + CGProfUse(CGFunc &curF, MemPool &mp, DomAnalysis *dom, MapleSet &newbbinsplit) + : f(&curF), + mp(&mp), + puAlloc(&mp), + domInfo(dom), + bbSplit(newbbinsplit), + instrumenter(mp), + bb2chain(puAlloc.Adapter()), + readyToLayoutChains(puAlloc.Adapter()), + layoutBBs(puAlloc.Adapter()), + laidOut(puAlloc.Adapter()) {} + + bool ApplyPGOData(); + void LayoutBBwithProfile(); + protected: + CGFunc *f; + MemPool *mp; + MapleAllocator puAlloc; + DomAnalysis *domInfo = nullptr; + MapleSet bbSplit; + private: + PGOInstrumentTemplate> instrumenter; + std::unordered_map*> bbProfileInfo; + + void ApplyOnBB(); + void InitBBEdgeInfo(); + /* compute all edge freq in the cfg without consider exception */ + void ComputeEdgeFreq(); + /* If all input edges or output edges determined, caculate BB freq */ + void ComputeBBFreq(BBUseInfo &bbInfo, bool &change); + uint64 SumEdgesCount(const MapleVector*> &edges) const; + BBUseInfo *GetOrCreateBBUseInfo(const maplebe::BB &bb, bool notCreate = false); + void SetEdgeCount(maple::BBUseEdge &e, size_t count); + + /* functions && members for PGO layout */ + void BuildChainForFunc(); + void BuildChainForLoops(); + void BuildChainForLoop(CGFuncLoops &loop, MapleVector *context); + void InitBBChains(); + void DoBuildChain(const BB &header, BBChain &chain, const MapleVector *context); + BB *GetBestSucc(BB &bb, const BBChain &chain, const MapleVector *context, bool considerBetterPred); + BB *FindBestStartBBForLoop(CGFuncLoops &loop, const MapleVector *context); + + bool IsBBInCurrContext(const BB &bb, const MapleVector *context) const; + bool IsCandidateSucc(const BB &bb, const BB &succ, const MapleVector *context); + bool HasBetterLayoutPred(const BB &bb, const BB &succ) const; + + void AddBBProf(BB &bb); + void AddBB(BB &bb); + void ReTargetSuccBB(BB &bb, BB &fallthru); + void ChangeToFallthruFromGoto(BB &bb); + + MapleVector bb2chain; + MapleSet readyToLayoutChains; + bool debugChainLayout = false; + uint32 rpoSearchPos = 0; // reverse post order search beginning position + MapleVector layoutBBs; // gives the determined layout order + MapleVector laidOut; // indexed by bbid to tell if has been laid out +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPgoUse, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} +#endif // MAPLEBE_CG_INCLUDE_CG_PGO_USE_H \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/cg_phase.h b/src/mapleall/maple_be/include/cg/cg_phase.h new file mode 100644 index 0000000000000000000000000000000000000000..5c0fe56006505489b092d98a82c7604d61b62023 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_phase.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_PHASE_H +#define MAPLEBE_INCLUDE_CG_CG_PHASE_H + +namespace maple {} +namespace maplebe { +using namespace maple; +class CGFunc; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CG_PHASE_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_phasemanager.h b/src/mapleall/maple_be/include/cg/cg_phasemanager.h new file mode 100644 index 0000000000000000000000000000000000000000..411532ec236e584c5ccb06b4da4ea9bee0a48022 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_phasemanager.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CG_PHASEMANAGER_H +#define MAPLEBE_INCLUDE_CG_CG_PHASEMANAGER_H +#include +#include "mempool.h" +#include "mempool_allocator.h" +#include "mir_module.h" +#include "mir_lower.h" +#include "lower.h" +#include "constantfold.h" +#include "cgfunc.h" +#include "cg_phase.h" +#include "cg_option.h" +namespace maplebe { +using CgFuncOptTy = MapleFunctionPhase; + +/* =================== new phase manager =================== */ +class CgFuncPM : public FunctionPM { + public: + explicit CgFuncPM(MemPool *mp) : FunctionPM(mp, &id) {} + PHASECONSTRUCTOR(CgFuncPM); + std::string PhaseName() const override; + ~CgFuncPM() override { + cgOptions = nullptr; + cg = nullptr; + beCommon = nullptr; + if (CGOptions::IsEnableTimePhases()) { + DumpPhaseTime(); + } + } + bool PhaseRun(MIRModule &m) override; + + void SetCGOptions(CGOptions *curCGOptions) { + cgOptions = curCGOptions; + } + + CG *GetCG() { + return cg; + } + BECommon *GetBECommon() { + return beCommon; + } + private: + bool FuncLevelRun(CGFunc &cgFunc, AnalysisDataManager &serialADM); + void GenerateOutPutFile(MIRModule &m) const; + void CreateCGAndBeCommon(MIRModule &m); + void PrepareLower(MIRModule &m); + void PostOutPut(MIRModule &m) const; + void DoFuncCGLower(const MIRModule &m, MIRFunction &mirFunc) const; + /* Tool functions */ + void DumpFuncCGIR(const CGFunc &f, const std::string &phaseName) const; + /* For Emit */ + void InitProfile(MIRModule &m) const; + void EmitGlobalInfo(MIRModule &m) const; + void EmitDuplicatedAsmFunc(MIRModule &m) const; + void EmitDebugInfo(const MIRModule &m) const; + void EmitFastFuncs(const MIRModule &m) const; + bool IsFramework(MIRModule &m) const; + void SweepUnusedStaticSymbol(MIRModule &m) const; + + CG *cg = nullptr; + BECommon *beCommon = nullptr; + MIRLower *mirLower = nullptr; + CGLowerer *cgLower = nullptr; + /* module options */ + CGOptions *cgOptions = nullptr; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_CG_PHASEMANAGER_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_phi_elimination.h b/src/mapleall/maple_be/include/cg/cg_phi_elimination.h new file mode 100644 index 0000000000000000000000000000000000000000..1fe8d22ab948a2e3c5d17315e6a05a646edf27da --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_phi_elimination.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CG_PHI_ELIMINATE_H +#define MAPLEBE_CG_INCLUDE_CG_PHI_ELIMINATE_H + +#include "cgfunc.h" +#include "cg_ssa.h" + +namespace maplebe { +class PhiEliminate { + public: + PhiEliminate(CGFunc &f, CGSSAInfo &ssaAnalysisResult, MemPool &mp) + : cgFunc(&f), + ssaInfo(&ssaAnalysisResult), + phiEliAlloc(&mp), + eliminatedBB(phiEliAlloc.Adapter()), + replaceVreg(phiEliAlloc.Adapter()), + remateInfoAfterSSA(phiEliAlloc.Adapter()) { + tempRegNO = static_cast(GetSSAInfo()->GetAllSSAOperands().size()) + CGSSAInfo::ssaRegNObase; + } + virtual ~PhiEliminate() = default; + CGSSAInfo *GetSSAInfo() { + return ssaInfo; + } + void TranslateTSSAToCSSA(); + /* move ssaRegOperand from ssaInfo to cgfunc */ + virtual void ReCreateRegOperand(Insn &insn) = 0; + + protected: + virtual Insn &CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) = 0; + virtual void MaintainRematInfo(RegOperand &destOpnd, RegOperand &fromOpnd, bool isCopy) = 0; + virtual void AppendMovAfterLastVregDef(BB &bb, Insn &movInsn) const = 0; + void UpdateRematInfo(); + regno_t GetAndIncreaseTempRegNO(); + regno_t RecursiveBothDU(RegOperand &ssaOpnd); + RegOperand *MakeRoomForNoDefVreg(RegOperand &conflictReg); + void RecordRematInfo(regno_t vRegNO, PregIdx pIdx); + PregIdx FindRematInfo(regno_t vRegNO) { + return (remateInfoAfterSSA.count(vRegNO)) > 0 ? remateInfoAfterSSA[vRegNO] : -1; + } + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; + MapleAllocator phiEliAlloc; + + private: + void PlaceMovInPredBB(uint32 predBBId, Insn &movInsn) const; + virtual RegOperand &CreateTempRegForCSSA(RegOperand &oriOpnd) = 0; + MapleSet eliminatedBB; + /* + * noDef Vregs occupy the vregno_t which is used for ssa re_creating + * first : conflicting VReg with noDef VReg second : new_Vreg opnd to replace occupied Vreg + */ + MapleUnorderedMap replaceVreg; + regno_t tempRegNO = 0; /* use for create mov insn for phi */ + MapleMap remateInfoAfterSSA; +}; + +class OperandPhiElmVisitor : public OperandVisitorBase, + public OperandVisitors { +}; + +MAPLE_FUNC_PHASE_DECLARE(CgPhiElimination, maplebe::CGFunc) +} + +#endif // MAPLEBE_CG_INCLUDE_CG_PHI_ELIMINATE_H diff --git a/src/mapleall/maple_be/include/cg/cg_pre.h b/src/mapleall/maple_be/include/cg/cg_pre.h new file mode 100644 index 0000000000000000000000000000000000000000..40e80d65fa1687860f0c12b7b427fcc236ba19e2 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_pre.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CGPRE_H +#define MAPLEBE_CG_INCLUDE_CGPRE_H +#include "cg_occur.h" +#include "cg_dominance.h" +#include "cgfunc.h" + +namespace maplebe { +enum PreKind { + kExprPre, + kStmtPre, + kLoadPre, + kAddrPre +}; + +class CGPre { + public: + CGPre(DomAnalysis &currDom, MemPool &memPool, MemPool &mp2, PreKind kind, uint32 limit) + : dom(&currDom), + ssaPreMemPool(&memPool), + ssaPreAllocator(&memPool), + perCandMemPool(&mp2), + perCandAllocator(&mp2), + workList(ssaPreAllocator.Adapter()), + preKind(kind), + allOccs(ssaPreAllocator.Adapter()), + phiOccs(ssaPreAllocator.Adapter()), + exitOccs(ssaPreAllocator.Adapter()), + preLimit(limit), + dfPhiDfns(std::less(), ssaPreAllocator.Adapter()), + varPhiDfns(std::less(), ssaPreAllocator.Adapter()), + temp2LocalRefVarMap(ssaPreAllocator.Adapter()) { + preWorkCandHashTable.GetWorkcandHashTable().fill(nullptr); + } + + virtual ~CGPre() = default; + + const MapleVector &GetRealOccList() const { + return workCand->GetRealOccs(); + } + + virtual BB *GetBB(uint32 id) const = 0; + virtual PUIdx GetPUIdx() const = 0; + virtual void SetCurFunction(PUIdx) const {} + + void GetIterDomFrontier(const BB *bb, MapleSet *dfset) const { + for (uint32 bbid : dom->GetIdomFrontier(bb->GetId())) { + (void)dfset->insert(dom->GetDtDfnItem(bbid)); + } + } + + PreWorkCand* GetWorkCand() const { + return workCand; + } + // compute downsafety for each PHI + static void ResetDS(CgPhiOcc *phiOcc); + void ComputeDS(); + + protected: + virtual void ComputeVarAndDfPhis() = 0; + virtual void CreateSortedOccs(); + CgOccur *CreateRealOcc(Insn &insn, Operand &opnd, OccType occType); + virtual void BuildWorkList() = 0; + /* for stmt pre only */ + void CreateExitOcc(BB &bb) { + CgOccur *exitOcc = ssaPreMemPool->New(kOccExit, 0, bb, nullptr); + exitOccs.push_back(exitOcc); + } + + DomAnalysis *dom; + MemPool *ssaPreMemPool; + MapleAllocator ssaPreAllocator; + MemPool *perCandMemPool; + MapleAllocator perCandAllocator; + MapleList workList; + PreWorkCand *workCand = nullptr; // the current PreWorkCand + PreKind preKind; + + // PRE work candidates; incremented by 2 for each tree; + // purpose is to avoid processing a node the third time + // inside a tree (which is a DAG) + // the following 3 lists are all maintained in order of dt_preorder + MapleVector allOccs; // cleared at start of each workcand + MapleVector phiOccs; // cleared at start of each workcand + MapleVector exitOccs; // this is shared by all workcands + uint32 preLimit; // set by command-line option to limit the number of candidates optimized (for debugging purpose) + // step 1 phi insertion data structures + // following are set of BBs in terms of their dfn's; index into + // dominance->pdt_preorder to get their bbid's + MapleSet dfPhiDfns; // phis inserted due to dominance frontiers + MapleSet varPhiDfns; // phis inserted due to the var operands + // step 2 renaming data structures + uint32 classCount = 0; // count class created during renaming + // is index into workCand->realOccs + // step 6 codemotion data structures + MapleMap temp2LocalRefVarMap; + int32 reBuiltOccIndex = -1; // stores the size of worklist every time when try to add new worklist, update before + // each code motion + uint32 strIdxCount = 0; // ssapre will create a lot of temp variables if using var to store redundances, start from 0 + PreWorkCandHashTable preWorkCandHashTable; +}; +} // namespace maple +#endif // MAPLEBE_CG_INCLUDE_CGPRE_H diff --git a/src/mapleall/maple_be/include/cg/cg_prop.h b/src/mapleall/maple_be/include/cg/cg_prop.h new file mode 100644 index 0000000000000000000000000000000000000000..885c84273d39156366e30a9fcde8d3e603e67cf5 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_prop.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_PROP_H +#define MAPLEBE_INCLUDE_CG_PROP_H + +#include "cgfunc.h" +#include "cg_ssa.h" +#include "cg_dce.h" +#include "cg.h" +#include "reg_coalesce.h" + +namespace maplebe { +class CGProp { + public: + CGProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, LiveIntervalAnalysis &ll) + : memPool(&mp), + cgFunc(&f), + propAlloc(&mp), + ssaInfo(&sInfo), + regll(&ll) { + cgDce = f.GetCG()->CreateCGDce(mp, f, sInfo); + } + virtual ~CGProp() = default; + + void DoCopyProp(); + void DoTargetProp(); + + protected: + MemPool *memPool; + CGFunc *cgFunc; + MapleAllocator propAlloc; + CGSSAInfo *GetSSAInfo() { + return ssaInfo; + } + CGDce *GetDce() { + return cgDce; + } + LiveIntervalAnalysis *GetRegll() { + return regll; + } + + private: + virtual void CopyProp() = 0; + virtual void TargetProp(Insn &insn) = 0; + virtual void PropPatternOpt() = 0; + CGSSAInfo *ssaInfo; + CGDce *cgDce = nullptr; + LiveIntervalAnalysis *regll; +}; + +class PropOptimizeManager { + public: + ~PropOptimizeManager() = default; + template + void Optimize(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll) const { + PropOptimizePattern optPattern(cgFunc, cgssaInfo, ll); + optPattern.Run(); + } + template + void Optimize(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) const { + PropOptimizePattern optPattern(cgFunc, cgssaInfo); + optPattern.Run(); + } +}; + +class PropOptimizePattern { + public: + PropOptimizePattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll) + : cgFunc(cgFunc), + optSsaInfo(cgssaInfo), + regll(ll) {} + + PropOptimizePattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) + : cgFunc(cgFunc), + optSsaInfo(cgssaInfo) {} + virtual ~PropOptimizePattern() = default; + virtual bool CheckCondition(Insn &insn) = 0; + virtual void Optimize(Insn &insn) = 0; + virtual void Run() = 0; + + protected: + std::string PhaseName() const { + return "propopt"; + } + virtual void Init() = 0; + Insn *FindDefInsn(const VRegVersion *useVersion) const; + + CGFunc &cgFunc; + CGSSAInfo *optSsaInfo = nullptr; + LiveIntervalAnalysis *regll = nullptr; +}; + +class ReplaceRegOpndVisitor : public OperandVisitorBase, + public OperandVisitors, + public OperandVisitor { + public: + ReplaceRegOpndVisitor(CGFunc &f, Insn &cInsn, uint32 cIdx, RegOperand &oldR, RegOperand &newR) + : cgFunc(&f), + insn(&cInsn), + idx(cIdx), + oldReg(&oldR), + newReg(&newR) {} + virtual ~ReplaceRegOpndVisitor() = default; + + protected: + CGFunc *cgFunc; + Insn *insn; + uint32 idx; + RegOperand *oldReg; + RegOperand *newReg; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgCopyProp, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE(CgTargetProp, maplebe::CGFunc) +} +#endif /* MAPLEBE_INCLUDE_CG_PROP_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_rce.h b/src/mapleall/maple_be/include/cg/cg_rce.h new file mode 100644 index 0000000000000000000000000000000000000000..2fc3d2e11c0ede5a07756350262cbdf8476de723 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_rce.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_RCE_H +#define MAPLEBE_INCLUDE_CG_RCE_H + +#include "cg.h" +#include "cgfunc.h" +#include "cgbb.h" +#include "cg_ssa.h" + +namespace maplebe { +#define CG_RCE_DUMP CG_DEBUG_FUNC(*cgFunc) +using InsnSet = std::set; +static uint32 g_count = 0; +class RedundantComputeElim { + public: + RedundantComputeElim(CGFunc &f, CGSSAInfo &info, MemPool &mp) : + cgFunc(&f), ssaInfo(&info), rceAlloc(&mp) {} + virtual ~RedundantComputeElim() = default; + + std::string PhaseName() const { + return "cgredundantcompelim"; + } + + virtual void Run() = 0; + void Dump(const Insn *insn1, const Insn *insn2) const; + + protected: + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; + MapleAllocator rceAlloc; +}; +MAPLE_FUNC_PHASE_DECLARE(CgRedundantCompElim, maplebe::CGFunc) +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_RCE_H */ diff --git a/src/mapleall/maple_be/include/cg/cg_ssa.h b/src/mapleall/maple_be/include/cg/cg_ssa.h new file mode 100644 index 0000000000000000000000000000000000000000..1853481ac4021bbd4f0bbe1604d1bc7e65f882b2 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_ssa.h @@ -0,0 +1,301 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CG_SSA_H +#define MAPLEBE_CG_INCLUDE_CG_SSA_H + +#include "cgfunc.h" +#include "cg_dominance.h" +#include "live.h" +#include "operand.h" +#include "visitor_common.h" + +namespace maplebe { +class CGSSAInfo; +enum SSAOpndDefBy { + kDefByNo, + kDefByInsn, + kDefByPhi +}; + +/* precise def/use info in machine instrcution */ +class DUInsnInfo { + public: + DUInsnInfo(Insn *cInsn, uint32 cIdx, MapleAllocator &alloc) : insn(cInsn), defUseInfo(alloc.Adapter()) { + IncreaseDU(cIdx); + } + void IncreaseDU(uint32 idx) { + if (defUseInfo.count(idx) == 0) { + defUseInfo[idx] = 0; + } + defUseInfo[idx]++; + } + void DecreaseDU(uint32 idx) { + ASSERT(defUseInfo[idx] > 0, "no def/use any more"); + defUseInfo[idx]--; + } + void ClearDU(uint32 idx) { + ASSERT(defUseInfo.count(idx), "no def/use find"); + defUseInfo[idx] = 0; + } + bool HasNoDU() { + for (auto &it : std::as_const(defUseInfo)) { + if (it.second != 0) { + return false; + } + } + return true; + } + Insn *GetInsn() { + return insn; + } + MapleMap& GetOperands() { + return defUseInfo; + } + private: + Insn *insn; + /* operand idx --- count */ + MapleMap defUseInfo; +}; + +class VRegVersion { + public: + VRegVersion(const MapleAllocator &alloc, RegOperand &vReg, uint32 vIdx, regno_t vregNO) + : versionAlloc(alloc), + ssaRegOpnd(&vReg), + versionIdx(vIdx), + originalRegNO(vregNO), + useInsnInfos(versionAlloc.Adapter()) {} + void SetDefInsn(DUInsnInfo *duInfo, SSAOpndDefBy defTy) { + defInsnInfo = duInfo; + defType = defTy; + } + DUInsnInfo *GetDefInsnInfo() const { + return defInsnInfo; + } + SSAOpndDefBy GetDefType() const { + return defType; + } + RegOperand *GetSSAvRegOpnd(bool isDef = true) { + if (!isDef) { + return implicitCvtedRegOpnd; + } + return ssaRegOpnd; + } + uint32 GetVersionIdx() const { + return versionIdx; + } + regno_t GetOriginalRegNO() const { + return originalRegNO; + } + void AddUseInsn(CGSSAInfo &ssaInfo, Insn &useInsn, uint32 idx); + /* elimate dead use */ + void CheckDeadUse(const Insn &useInsn); + void RemoveUseInsn(const Insn &useInsn, uint32 idx); + MapleUnorderedMap &GetAllUseInsns() { + return useInsnInfos; + } + void MarkDeleted() { + deleted = true; + } + void MarkRecovery() { + deleted = false; + } + bool IsDeleted() const { + return deleted; + } + void SetImplicitCvt() { + hasImplicitCvt = true; + } + bool HasImplicitCvt() const { + return hasImplicitCvt; + } + + private: + MapleAllocator versionAlloc; + /* if this version has implicit conversion, it refers to def reg */ + RegOperand *ssaRegOpnd; + RegOperand *implicitCvtedRegOpnd = nullptr; + uint32 versionIdx; + regno_t originalRegNO; + DUInsnInfo *defInsnInfo = nullptr; + SSAOpndDefBy defType = kDefByNo; + /* insn ID -> insn* & operand Idx */ + // --> vector? + MapleUnorderedMap useInsnInfos; + bool deleted = false; + /* + * def reg (size:64) or def reg (size:32) --> + * all use reg (size:32) all use reg (size:64) + * do not support single use which has implicit conversion yet + * support single use in defUseInfo in future + */ + bool hasImplicitCvt = false; +}; + +class CGSSAInfo { + public: + CGSSAInfo(CGFunc &f, DomAnalysis &da, MemPool &mp, MemPool &tmp) + : cgFunc(&f), + memPool(&mp), + tempMp(&tmp), + ssaAlloc(&mp), + domInfo(&da), + renamedBBs(ssaAlloc.Adapter()), + vRegDefCount(ssaAlloc.Adapter()), + vRegStk(ssaAlloc.Adapter()), + allSSAOperands(ssaAlloc.Adapter()), + noDefVRegs(ssaAlloc.Adapter()), + reversePostOrder(ssaAlloc.Adapter()), + safePropInsns(ssaAlloc.Adapter()) {} + virtual ~CGSSAInfo() = default; + void ConstructSSA(); + VRegVersion *FindSSAVersion(regno_t ssaRegNO); /* Get specific ssa info */ + Insn *GetDefInsn(const RegOperand &useReg); + virtual void ReplaceInsn(Insn &oriInsn, Insn &newInsn) = 0; /* replace insn & update ssaInfo */ + virtual void ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) = 0; + virtual void CreateNewInsnSSAInfo(Insn &newInsn) = 0; + PhiOperand &CreatePhiOperand(); + + DUInsnInfo *CreateDUInsnInfo(Insn *cInsn, uint32 idx) { + return memPool->New(cInsn, idx, ssaAlloc); + } + const MapleUnorderedMap &GetAllSSAOperands() const { + return allSSAOperands; + } + bool IsNoDefVReg(regno_t vRegNO) const { + return noDefVRegs.find(vRegNO) != noDefVRegs.end(); + } + uint32 GetVersionNOOfOriginalVreg(regno_t vRegNO) { + if (vRegDefCount.count(vRegNO) > 0) { + return vRegDefCount[vRegNO]; + } + ASSERT(false, " original vreg is not existed"); + return 0; + } + MapleVector &GetReversePostOrder() { + return reversePostOrder; + } + void InsertSafePropInsn(uint32 insnId) { + (void)safePropInsns.emplace_back(insnId); + } + MapleVector &GetSafePropInsns() { + return safePropInsns; + } + void DumpFuncCGIRinSSAForm() const; + virtual void DumpInsnInSSAForm(const Insn &insn) const = 0; + static uint32 ssaRegNObase; + + protected: + VRegVersion *CreateNewVersion(RegOperand &virtualOpnd, Insn &defInsn, uint32 idx, bool isDefByPhi = false); + virtual RegOperand *CreateSSAOperand(RegOperand &virtualOpnd) = 0; + bool IncreaseSSAOperand(regno_t vRegNO, VRegVersion *vst); + uint32 IncreaseVregCount(regno_t vRegNO); + VRegVersion *GetVersion(const RegOperand &virtualOpnd); + MapleUnorderedMap &GetPrivateAllSSAOperands() { + return allSSAOperands; + } + void AddNoDefVReg(regno_t noDefVregNO) { + ASSERT(!noDefVRegs.count(noDefVregNO), "duplicate no def Reg, please check"); + noDefVRegs.emplace(noDefVregNO); + } + void MarkInsnsInSSA(Insn &insn); + CGFunc *cgFunc = nullptr; + MemPool *memPool = nullptr; + MemPool *tempMp = nullptr; + MapleAllocator ssaAlloc; + + private: + void InsertPhiInsn(); + void RenameVariablesForBB(uint32 bbID); + void RenameBB(BB &bb); + void RenamePhi(BB &bb); + virtual void RenameInsn(Insn &insn) = 0; + /* build ssa on virtual register only */ + virtual RegOperand *GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) = 0; + void RenameSuccPhiUse(const BB &bb); + void PrunedPhiInsertion(const BB &bb, RegOperand &virtualOpnd); + + void AddRenamedBB(uint32 bbID) { + ASSERT(!renamedBBs.count(bbID), "cgbb has been renamed already"); + renamedBBs.emplace(bbID); + } + bool IsBBRenamed(uint32 bbID) const { + return renamedBBs.count(bbID); + } + void SetReversePostOrder(); + + DomAnalysis *domInfo = nullptr; + MapleSet renamedBBs; + /* original regNO - number of definitions (start from 0) */ + MapleMap vRegDefCount; + /* original regNO - ssa version stk */ + MapleMap> vRegStk; + /* ssa regNO - ssa virtual operand version */ + MapleUnorderedMap allSSAOperands; + /* For virtual registers which do not have definition */ + MapleSet noDefVRegs; + /* only save bb_id to reduce space */ + MapleVector reversePostOrder; + /* destSize < srcSize but can be propagated */ + MapleVector safePropInsns; + int32 insnCount = 0; +}; + +class SSAOperandVisitor : public OperandVisitorBase, + public OperandVisitors { + public: + SSAOperandVisitor(Insn &cInsn, const OpndDesc &cDes, uint32 idx) : insn(&cInsn), opndDes(&cDes), idx(idx) {} + SSAOperandVisitor() = default; + virtual ~SSAOperandVisitor() = default; + void SetInsnOpndInfo(Insn &cInsn, const OpndDesc &cDes, uint32 index) { + insn = &cInsn; + opndDes = &cDes; + this->idx = index; + } + + protected: + Insn *insn = nullptr; + const OpndDesc *opndDes = nullptr; + uint32 idx = 0; +}; + +class SSAOperandDumpVisitor : public OperandVisitorBase, + public OperandVisitors, + public OperandVisitor { + public: + explicit SSAOperandDumpVisitor(const MapleUnorderedMap &allssa) : allSSAOperands(allssa) {} + virtual ~SSAOperandDumpVisitor() = default; + void SetHasDumped() { + hasDumped = true; + } + bool HasDumped() const { + return hasDumped; + } + bool hasDumped = false; + protected: + const MapleUnorderedMap &allSSAOperands; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgSSAConstruct, maplebe::CGFunc); +CGSSAInfo *GetResult() { + return ssaInfo; +} +CGSSAInfo *ssaInfo = nullptr; + private: + void GetAnalysisDependence(maple::AnalysisDep &aDep) const override; +MAPLE_FUNC_PHASE_DECLARE_END +} + +#endif // MAPLEBE_CG_INCLUDE_CG_SSA_H diff --git a/src/mapleall/maple_be/include/cg/cg_ssa_pre.h b/src/mapleall/maple_be/include/cg/cg_ssa_pre.h new file mode 100644 index 0000000000000000000000000000000000000000..988b979456fce3f9e0f351aa06463976d2f95004 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_ssa_pre.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CG_SSU_PRE_H +#define MAPLEBE_CG_INCLUDE_CG_SSU_PRE_H +#include +#include "mempool.h" +#include "mempool_allocator.h" +#include "cg_dominance.h" + +// Use SSAPRE to determine where to insert saves for callee-saved registers. +// The external interface is DoSavePlacementOpt(). Class SsaPreWorkCand is used +// as input/output interface. + +namespace maplebe { + +using BBId = uint32; + +// This must have been constructed by the caller of DoSavePlacementOpt() and +// passed to it as parameter. The caller of DoSavePlacementOpt() describes +// the problem via occBBs. DoSavePlacementOpt()'s outputs are returned to the +// caller by setting saveAtEntryBBs. +class SsaPreWorkCand { + public: + explicit SsaPreWorkCand(MapleAllocator *alloc) : occBBs(alloc->Adapter()), saveAtEntryBBs(alloc->Adapter()) {} + // inputs + MapleSet occBBs; // Id's of BBs with appearances of the callee-saved reg + // outputs + MapleSet saveAtEntryBBs; // Id's of BBs to insert saves of the register at BB entry + bool saveAtProlog = false; // if true, no shrinkwrapping can be done and + // the other outputs can be ignored +}; + +extern void DoSavePlacementOpt(CGFunc *f, DomAnalysis *dom, SsaPreWorkCand *workCand); + +enum AOccType { + kAOccUndef, + kAOccReal, + kAOccPhi, + kAOccPhiOpnd, + kAOccExit, +}; + +class Occ { + public: + Occ(AOccType ty, BB *bb) : occTy(ty), cgbb(bb) {} + virtual ~Occ() = default; + + virtual void Dump() const = 0; + bool IsDominate(DomAnalysis *dom, const Occ *occ) const { + return dom->Dominate(*cgbb, *occ->cgbb); + } + + AOccType occTy; + uint32 classId = 0; + BB *cgbb; // the BB it occurs in + Occ *def = nullptr; // points to its single def +}; + +class RealOcc : public Occ { + public: + explicit RealOcc(BB *bb): Occ(kAOccReal, bb) {} + virtual ~RealOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "RealOcc at bb" << cgbb->GetId(); + LogInfo::MapleLogger() << " classId" << classId; + } + + bool redundant = true; +}; + +class PhiOcc; + +class PhiOpndOcc : public Occ { + public: + explicit PhiOpndOcc(BB *bb): Occ(kAOccPhiOpnd, bb) {} + virtual ~PhiOpndOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "PhiOpndOcc at bb" << cgbb->GetId() << " classId" << classId; + } + + PhiOcc *defPhiOcc = nullptr; // its lhs definition + bool hasRealUse = false; + bool insertHere = false; +}; + +class PhiOcc : public Occ { + public: + PhiOcc(BB *bb, MapleAllocator &alloc) + : Occ(kAOccPhi, bb), phiOpnds(alloc.Adapter()) {} + virtual ~PhiOcc() = default; + + bool WillBeAvail() const { + return isCanBeAvail && !isLater; + } + + void Dump() const override { + LogInfo::MapleLogger() << "PhiOcc at bb" << cgbb->GetId() << " classId" << classId << " Phi["; + for (size_t i = 0; i < phiOpnds.size(); i++) { + phiOpnds[i]->Dump(); + if (i != phiOpnds.size() - 1) { + LogInfo::MapleLogger() << ", "; + } + } + LogInfo::MapleLogger() << "]"; + } + + bool isDownsafe = true; + bool speculativeDownsafe = false; // true if set to downsafe via speculation + bool isCanBeAvail = true; + bool isLater = true; + MapleVector phiOpnds; +}; + +class ExitOcc : public Occ { + public: + explicit ExitOcc(BB *bb) : Occ(kAOccExit, bb) {} + virtual ~ExitOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "ExitOcc at bb" << cgbb->GetId(); + } +}; + +class SSAPre { + public: + SSAPre(CGFunc *cgfunc, DomAnalysis *dm, MemPool *memPool, SsaPreWorkCand *wkcand, bool aeap, bool enDebug) + : cgFunc(cgfunc), + dom(dm), + preMp(memPool), + preAllocator(memPool), + workCand(wkcand), + fullyAntBBs(cgfunc->GetAllBBs().size(), true, preAllocator.Adapter()), + phiDfns(std::less(), preAllocator.Adapter()), + classCount(0), + realOccs(preAllocator.Adapter()), + allOccs(preAllocator.Adapter()), + phiOccs(preAllocator.Adapter()), + exitOccs(preAllocator.Adapter()), + asEarlyAsPossible(aeap), + enabledDebug(enDebug) {} + ~SSAPre() = default; + + void ApplySSAPre(); + + private: + // step 6 methods + void CodeMotion(); + // step 5 methods + void Finalize(); + // step 4 methods + void ResetCanBeAvail(PhiOcc *phi) const; + void ComputeCanBeAvail() const; + void ResetLater(PhiOcc *phi) const; + void ComputeLater() const; + // step 3 methods + void ResetDownsafe(const PhiOpndOcc *phiOpnd) const; + void ComputeDownsafe() const; + // step 2 methods + void Rename(); + // step 1 methods + void GetIterDomFrontier(const BB *bb, MapleSet *dfset) const { + for (BBId bbid : dom->GetIdomFrontier(bb->GetId())) { + (void)dfset->insert(dom->GetDtDfnItem(bbid)); + } + } + void FormPhis(); + void CreateSortedOccs(); + // step 0 methods + void PropagateNotAnt(BB *bb, std::set *visitedBBs); + void FormRealsNExits(); + + CGFunc *cgFunc; + DomAnalysis *dom; + MemPool *preMp; + MapleAllocator preAllocator; + SsaPreWorkCand *workCand; + // step 0 + MapleVector fullyAntBBs; // index is BBid; true if occ is fully anticipated at BB entry + // step 1 phi insertion data structures: + MapleSet phiDfns; // set by FormPhis(); set of BBs in terms of their + // dfn's; index into dominance->dt_preorder to get + // their bbid's + // step 2 renaming + uint32 classCount; // for assigning new class id + // the following 4 lists are all maintained in order of dt_preorder + MapleVector realOccs; + MapleVector allOccs; + MapleVector phiOccs; + MapleVector exitOccs; + bool asEarlyAsPossible; + bool enabledDebug; +}; + +}; // namespace maplabe +#endif // MAPLEBE_CG_INCLUDE_CG_SSA_PRE_H diff --git a/src/mapleall/maple_be/include/cg/cg_ssu_pre.h b/src/mapleall/maple_be/include/cg/cg_ssu_pre.h new file mode 100644 index 0000000000000000000000000000000000000000..26252c241e7264fcf1b7f51578124ff423295352 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_ssu_pre.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) [2021] Futurewei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_CG_INCLUDE_CGSSUPRE_H +#define MAPLEBE_CG_INCLUDE_CGSSUPRE_H +#include +#include "mempool.h" +#include "mempool_allocator.h" +#include "cg_dominance.h" +#include "cg_ssa_pre.h" + +// Use SSUPRE to determine where to insert restores for callee-saved registers. +// The external interface is DoRestorePlacementOpt(). Class SPreWorkCand is used +// as input/output interface. + +namespace maplebe { + +// This must have been constructed by the caller of DoRestorePlacementOpt() and +// passed to it as parameter. The caller of DoRestorePlacementOpt() describes +// the problem via occBBs and saveBBs. DoRestorePlacementOpt()'s outputs are +// returned to the caller by setting restoreAtEntryBBs and restoreAtExitBBs. +class SPreWorkCand { + public: + explicit SPreWorkCand(MapleAllocator *alloc) + : occBBs(alloc->Adapter()), + saveBBs(alloc->Adapter()), + restoreAtEntryBBs(alloc->Adapter()), + restoreAtExitBBs(alloc->Adapter()) {} + // inputs + MapleSet occBBs; // Id's of BBs with appearances of the callee-saved reg + MapleSet saveBBs; // Id's of BBs with saves of the callee-saved reg + // outputs + MapleSet restoreAtEntryBBs; // Id's of BBs to insert restores of the register at BB entry + MapleSet restoreAtExitBBs; // Id's of BBs to insert restores of the register at BB exit + bool restoreAtEpilog = false; // if true, no shrinkwrapping can be done and + // the other outputs can be ignored +}; + +extern void DoRestorePlacementOpt(CGFunc *f, PostDomAnalysis *pdom, SPreWorkCand *workCand); + +enum SOccType { + kSOccUndef, + kSOccReal, + kSOccLambda, + kSOccLambdaRes, + kSOccEntry, + kSOccKill, +}; + +class SOcc { + public: + SOcc(SOccType ty, BB *bb) : occTy(ty), cgbb(bb) {} + virtual ~SOcc() = default; + + virtual void Dump() const = 0; + bool IsPostDominate(PostDomAnalysis *pdom, const SOcc *occ) const { + return pdom->PostDominate(*cgbb, *occ->cgbb); + } + + SOccType occTy; + uint32 classId = 0; + BB *cgbb; // the BB it occurs in + SOcc *use = nullptr; // points to its single use +}; + +class SRealOcc : public SOcc { + public: + explicit SRealOcc(BB *bb) : SOcc(kSOccReal, bb) {} + virtual ~SRealOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "RealOcc at bb" << cgbb->GetId(); + LogInfo::MapleLogger() << " classId" << classId; + } + + bool redundant = true; +}; + +class SLambdaOcc; + +class SLambdaResOcc : public SOcc { + public: + explicit SLambdaResOcc(BB *bb): SOcc(kSOccLambdaRes, bb) {} + virtual ~SLambdaResOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "LambdaResOcc at bb" << cgbb->GetId() << " classId" << classId; + } + + + SLambdaOcc *useLambdaOcc = nullptr; // its rhs use + bool hasRealUse = false; + bool insertHere = false; +}; + +class SLambdaOcc : public SOcc { + public: + SLambdaOcc(BB *bb, MapleAllocator &alloc) + : SOcc(kSOccLambda, bb), lambdaRes(alloc.Adapter()) {} + virtual ~SLambdaOcc() = default; + + bool WillBeAnt() const { + return isCanBeAnt && !isEarlier; + } + + void Dump() const override { + LogInfo::MapleLogger() << "LambdaOcc at bb" << cgbb->GetId() << " classId" << classId << " Lambda["; + for (size_t i = 0; i < lambdaRes.size(); i++) { + lambdaRes[i]->Dump(); + if (i != lambdaRes.size() - 1) { + LogInfo::MapleLogger() << ", "; + } + } + LogInfo::MapleLogger() << "]"; + } + + + bool isUpsafe = true; + bool isCanBeAnt = true; + bool isEarlier = true; + MapleVector lambdaRes; +}; + +class SEntryOcc : public SOcc { + public: + explicit SEntryOcc(BB *bb) : SOcc(kSOccEntry, bb) {} + virtual ~SEntryOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "EntryOcc at bb" << cgbb->GetId(); + } +}; + +class SKillOcc : public SOcc { + public: + explicit SKillOcc(BB *bb) : SOcc(kSOccKill, bb) {} + virtual ~SKillOcc() = default; + + void Dump() const override { + LogInfo::MapleLogger() << "KillOcc at bb" << cgbb->GetId(); + } +}; + +class SSUPre { + public: + SSUPre(CGFunc *cgfunc, PostDomAnalysis *pd, MemPool *memPool, SPreWorkCand *wkcand, bool alap, bool enDebug) + : cgFunc(cgfunc), + pdom(pd), + spreMp(memPool), + spreAllocator(memPool), + workCand(wkcand), + fullyAvailBBs(cgfunc->GetAllBBs().size(), true, spreAllocator.Adapter()), + lambdaDfns(std::less(), spreAllocator.Adapter()), + classCount(0), + realOccs(spreAllocator.Adapter()), + allOccs(spreAllocator.Adapter()), + lambdaOccs(spreAllocator.Adapter()), + entryOccs(spreAllocator.Adapter()), + asLateAsPossible(alap), + enabledDebug(enDebug) { + CreateEntryOcc(cgfunc->GetFirstBB()); + } + ~SSUPre() = default; + + void ApplySSUPre(); + + private: + // step 6 methods + void CodeMotion(); + // step 5 methods + void Finalize(); + // step 4 methods + void ResetCanBeAnt(SLambdaOcc *lambda) const; + void ComputeCanBeAnt() const; + void ResetEarlier(SLambdaOcc *lambda) const; + void ComputeEarlier() const; + // step 3 methods + void ResetUpsafe(const SLambdaResOcc *lambdaRes) const; + void ComputeUpsafe() const; + // step 2 methods + void Rename(); + // step 1 methods + void GetIterPdomFrontier(const BB *bb, MapleSet *pdfset) const { + for (BBId bbid : pdom->GetIpdomFrontier(bb->GetId())) { + (void)pdfset->insert(pdom->GetPdtDfnItem(bbid)); + } + } + void FormLambdas(); + void CreateSortedOccs(); + // step 0 methods + void CreateEntryOcc(BB *bb) { + SEntryOcc *entryOcc = spreMp->New(bb); + entryOccs.push_back(entryOcc); + } + void PropagateNotAvail(BB *bb, std::set *visitedBBs); + void FormReals(); + + CGFunc *cgFunc; + PostDomAnalysis *pdom; + MemPool *spreMp; + MapleAllocator spreAllocator; + SPreWorkCand *workCand; + // step 0 + MapleVector fullyAvailBBs; // index is BBid; true if occ is fully available at BB exit + // step 1 lambda insertion data structures: + MapleSet lambdaDfns; // set by FormLambdas(); set of BBs in terms of + // their dfn's; index into + // dominance->pdt_preorder to get their bbid's + // step 2 renaming + uint32 classCount; // for assigning new class id + // the following 4 lists are all maintained in order of pdt_preorder + MapleVector realOccs; // both real and kill occurrences + MapleVector allOccs; + MapleVector lambdaOccs; + MapleVector entryOccs; + bool asLateAsPossible; + bool enabledDebug; +}; + +}; // namespace maplabe +#endif // MAPLEBE_CG_INCLUDE_CGSSUPRE_H diff --git a/src/mapleall/maple_be/include/cg/cg_validbit_opt.h b/src/mapleall/maple_be/include/cg/cg_validbit_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..d60abdf83300e061da7dce433c2199f436cf3bba --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cg_validbit_opt.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H +#define MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H + +#include "cg.h" +#include "cgfunc.h" +#include "bb.h" +#include "insn.h" +#include "cg_ssa.h" + +namespace maplebe { +#define CG_VALIDBIT_OPT_DUMP CG_DEBUG_FUNC(*cgFunc) +class ValidBitPattern { + public: + ValidBitPattern(CGFunc &f, CGSSAInfo &info) : cgFunc(&f), ssaInfo(&info) {} + virtual ~ValidBitPattern() { + cgFunc = nullptr; + ssaInfo = nullptr; + } + std::string PhaseName() const { + return "cgvalidbitopt"; + } + + virtual std::string GetPatternName() = 0; + virtual bool CheckCondition(Insn &insn) = 0; + virtual void Run(BB &bb, Insn &insn) = 0; + InsnSet GetAllUseInsn(const RegOperand &defReg); + void DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn); + + protected: + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; +}; + +class ValidBitOpt { + public: + ValidBitOpt(CGFunc &f, CGSSAInfo &info) : cgFunc(&f), ssaInfo(&info) {} + virtual ~ValidBitOpt() { + cgFunc = nullptr; + ssaInfo = nullptr; + } + void Run(); + static uint32 GetImmValidBit(int64 value, uint32 size) { + if (value < 0) { + return size; + } else if (value == 0) { + return k1BitSize; + } + uint32 pos = 0; + constexpr uint64 mask = 1; + for (uint32 i = 0; i <= k8BitSize * sizeof(int64); ++i) { + if ((static_cast(value) & mask) == mask) { + pos = i + 1; + } + value = value / 2; + } + return pos; + } + + static int64 GetLogValueAtBase2(int64 val) { + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : -1; + } + + template + void Optimize(BB &bb, Insn &insn) const { + VBOpt opt(*cgFunc, *ssaInfo); + opt.Run(bb, insn); + } + virtual void DoOpt(BB &bb, Insn &insn) = 0; + void RectifyValidBitNum(); + void RecoverValidBitNum(); + virtual void SetValidBits(Insn &insn) = 0; + virtual bool SetPhiValidBits(Insn &insn) = 0; + + protected: + CGFunc *cgFunc; + CGSSAInfo *ssaInfo; +}; +MAPLE_FUNC_PHASE_DECLARE(CgValidBitOpt, maplebe::CGFunc) +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H */ diff --git a/src/mapleall/maple_be/include/cg/cgbb.h b/src/mapleall/maple_be/include/cg/cgbb.h new file mode 100644 index 0000000000000000000000000000000000000000..91ac332c646d8a2c741cd536b015066135af6bf4 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cgbb.h @@ -0,0 +1,933 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CGBB_H +#define MAPLEBE_INCLUDE_CG_CGBB_H + +#if TARGAARCH64 +#include "aarch64/aarch64_isa.h" +#elif TARGX86_64 +#include "isa.h" +#endif +#include "insn.h" +#include "sparse_datainfo.h" + +/* Maple IR headers */ +#include "mir_nodes.h" +#include "mir_symbol.h" + +/* Maple MP header */ +#include "mempool_allocator.h" + +namespace maplebe { +/* For get bb */ +#define FIRST_BB_OF_FUNC(FUNC) ((FUNC)->GetFirstBB()) +#define LAST_BB_OF_FUNC(FUNC) ((FUNC)->GetLastBB()) + +/* For iterating over basic blocks. */ +#define FOR_BB_BETWEEN(BASE, FROM, TO, DIR) for (BB * (BASE) = (FROM); (BASE) != (TO); (BASE) = (BASE)->DIR()) +#define FOR_BB_BETWEEN_CONST(BASE, FROM, TO, DIR) \ + for (const BB * (BASE) = (FROM); (BASE) != (TO); (BASE) = (BASE)->DIR()) + +#define FOR_ALL_BB_CONST(BASE, FUNC) FOR_BB_BETWEEN_CONST(BASE, FIRST_BB_OF_FUNC(FUNC), nullptr, GetNext) +#define FOR_ALL_BB(BASE, FUNC) FOR_BB_BETWEEN(BASE, FIRST_BB_OF_FUNC(FUNC), nullptr, GetNext) +#define FOR_ALL_BB_REV_CONST(BASE, FUNC) FOR_BB_BETWEEN_CONST(BASE, LAST_BB_OF_FUNC(FUNC), nullptr, GetPrev) +#define FOR_ALL_BB_REV(BASE, FUNC) FOR_BB_BETWEEN(BASE, LAST_BB_OF_FUNC(FUNC), nullptr, GetPrev) + +/* For get insn */ +#define FIRST_INSN(BLOCK) (BLOCK)->GetFirstInsn() +#define LAST_INSN(BLOCK) (BLOCK)->GetLastInsn() +#define NEXT_INSN(INSN) (INSN)->GetNext() +#define PREV_INSN(INSN) (INSN)->GetPrev() + +/* For iterating over insns in basic block. */ +#define FOR_INSN_BETWEEN(INSN, FROM, TO, DIR) \ + for (Insn * (INSN) = (FROM); (INSN) != nullptr && (INSN) != (TO); (INSN) = (INSN)->DIR) + +#define FOR_BB_INSNS(INSN, BLOCK) \ + for (Insn * (INSN) = FIRST_INSN(BLOCK); (INSN) != nullptr; (INSN) = (INSN)->GetNext()) +#define FOR_BB_INSNS_CONST(INSN, BLOCK) \ + for (const Insn * (INSN) = FIRST_INSN(BLOCK); (INSN) != nullptr; (INSN) = (INSN)->GetNext()) + +#define FOR_BB_INSNS_REV(INSN, BLOCK) \ + for (Insn * (INSN) = LAST_INSN(BLOCK); (INSN) != nullptr; (INSN) = (INSN)->GetPrev()) + +/* For iterating over insns in basic block when we might remove the current insn. */ +#define FOR_BB_INSNS_SAFE(INSN, BLOCK, NEXT) \ + for (Insn * (INSN) = FIRST_INSN(BLOCK), *(NEXT) = (INSN) ? NEXT_INSN(INSN) : nullptr; (INSN) != nullptr; \ + (INSN) = (NEXT), (NEXT) = (INSN) ? NEXT_INSN(INSN) : nullptr) + +#define FOR_BB_INSNS_REV_SAFE(INSN, BLOCK, NEXT) \ + for (Insn * (INSN) = LAST_INSN(BLOCK), *(NEXT) = (INSN) ? PREV_INSN(INSN) : nullptr; (INSN) != nullptr; \ + (INSN) = (NEXT), (NEXT) = (INSN) ? PREV_INSN(INSN) : nullptr) + +class CGFuncLoops; +class CGFunc; +class CDGNode; + +class BB { + public: + enum BBKind : uint8 { + kBBFallthru, /* default */ + kBBIf, /* conditional branch */ + kBBGoto, /* unconditional branch */ + kBBIgoto, + kBBReturn, + kBBNoReturn, + kBBIntrinsic, /* BB created by inlining intrinsics; shares a lot with BB_if */ + kBBRangeGoto, + kBBThrow, /* For call __java_throw_* and call exit, which will run out of function. */ + kBBLast + }; + + BB(uint32 bbID, MapleAllocator &mallocator) + : id(bbID), + kind(kBBFallthru), /* kBBFallthru default kind */ + labIdx(MIRLabelTable::GetDummyLabel()), + preds(mallocator.Adapter()), + succs(mallocator.Adapter()), + ehPreds(mallocator.Adapter()), + ehSuccs(mallocator.Adapter()), + loopPreds(mallocator.Adapter()), + loopSuccs(mallocator.Adapter()), + succsFreq(mallocator.Adapter()), + liveInRegNO(mallocator.Adapter()), + liveOutRegNO(mallocator.Adapter()), + callInsns(mallocator.Adapter()), + rangeGotoLabelVec(mallocator.Adapter()), + phiInsnList(mallocator.Adapter()) {} + + virtual ~BB() = default; + + virtual BB *Clone(MemPool &memPool) const { + BB *bb = memPool.Clone(*this); + return bb; + } + + void Dump() const; + bool IsCommentBB() const; + bool IsEmptyOrCommentOnly() const; + bool IsSoloGoto() const; + BB* GetValidPrev(); + + bool IsEmpty() const { + if (lastInsn == nullptr) { + CHECK_FATAL(firstInsn == nullptr, "firstInsn must be nullptr"); + return true; + } else { + CHECK_FATAL(firstInsn != nullptr, "firstInsn must not be nullptr"); + return false; + } + } + + const std::string &GetKindName() const { + ASSERT(kind < kBBLast, "out of range in GetKindName"); + return bbNames[kind]; + } + + void SetKind(BBKind bbKind) { + kind = bbKind; + } + + BBKind GetKind() const { + return kind; + } + + void AddLabel(LabelIdx idx) { + labIdx = idx; + } + + void AppendBB(BB &bb) { + bb.prev = this; + bb.next = next; + if (next != nullptr) { + next->prev = &bb; + } + next = &bb; + } + + void PrependBB(BB &bb) { + bb.next = this; + bb.prev = this->prev; + if (this->prev != nullptr) { + this->prev->next = &bb; + } + this->prev = &bb; + } + + Insn *InsertInsnBefore(Insn &existing, Insn &newInsn); + + /* returns newly inserted instruction */ + Insn *InsertInsnAfter(Insn &existing, Insn &newInsn); + + void InsertInsnBegin(Insn &insn) { + if (lastInsn == nullptr) { + firstInsn = lastInsn = &insn; + insn.SetNext(nullptr); + insn.SetPrev(nullptr); + insn.SetBB(this); + } else { + InsertInsnBefore(*firstInsn, insn); + } + } + + void AppendInsn(Insn &insn) { + if (firstInsn != nullptr && lastInsn != nullptr) { + InsertInsnAfter(*lastInsn, insn); + } else { + firstInsn = lastInsn = &insn; + insn.SetNext(nullptr); + insn.SetPrev(nullptr); + insn.SetBB(this); + } + internalFlag1++; + } + + void ReplaceInsn(Insn &insn, Insn &newInsn); + + void RemoveInsn(Insn &insn); + + void RemoveInsnPair(Insn &insn, const Insn &nextInsn); + + void RemoveInsnSequence(Insn &insn, const Insn &nextInsn); + + /* append all insns from bb into this bb */ + void AppendBBInsns(BB &bb); + + /* append all insns from bb into this bb */ + void InsertAtBeginning(BB &bb); + void InsertAtEnd(BB &bb); + void InsertAtEndMinus1(BB &bb); + + /* clear BB but don't remove insns of this */ + void ClearInsns() { + firstInsn = lastInsn = nullptr; + } + + uint32 NumPreds() const { + return static_cast(preds.size()); + } + + bool IsPredecessor(const BB &predBB) { + for (const BB *bb : std::as_const(preds)) { + if (bb == &predBB) { + return true; + } + } + return false; + } + + bool IsBackEdgeDest() const { + return !loopPreds.empty(); + } + + void RemoveFromPredecessorList(const BB &bb) { + for (auto i = preds.begin(); i != preds.end(); ++i) { + if (*i == &bb) { + preds.erase(i); + return; + } + } + CHECK_FATAL(false, "request to remove a non-existent element?"); + } + + void RemoveFromSuccessorList(const BB &bb) { + for (auto i = succs.begin(); i != succs.end(); ++i) { + if (*i == &bb) { + succs.erase(i); + return; + } + } + CHECK_FATAL(false, "request to remove a non-existent element?"); + } + + uint32 NumSuccs() const { + return static_cast(succs.size()); + } + + bool HasCall() const { + return hasCall; + } + + void SetHasCall() { + hasCall = true; + } + + /* Number of instructions excluding DbgInsn and comments */ + int32 NumInsn() const; + uint32 GetId() const { + return id; + } + uint32 GetLevel() const { + return level; + } + void SetLevel(uint32 arg) { + level = arg; + } + uint32 GetFrequency() const { + return frequency; + } + void SetFrequency(uint32 arg) { + frequency = arg; + } + BB *GetNext() { + return next; + } + const BB *GetNext() const { + return next; + } + BB *GetPrev() { + return prev; + } + const BB *GetPrev() const { + return prev; + } + void SetNext(BB *arg) { + next = arg; + } + void SetPrev(BB *arg) { + prev = arg; + } + LabelIdx GetLabIdx() const { + return labIdx; + } + void SetLabIdx(LabelIdx arg) { + labIdx = arg; + } + StmtNode *GetFirstStmt() { + return firstStmt; + } + const StmtNode *GetFirstStmt() const { + return firstStmt; + } + void SetFirstStmt(StmtNode &arg) { + firstStmt = &arg; + } + StmtNode *GetLastStmt() { + return lastStmt; + } + const StmtNode *GetLastStmt() const { + return lastStmt; + } + void SetLastStmt(StmtNode &arg) { + lastStmt = &arg; + } + Insn *GetFirstInsn() { + return firstInsn; + } + const Insn *GetFirstInsn() const { + return firstInsn; + } + + void SetFirstInsn(Insn *arg) { + firstInsn = arg; + } + Insn *GetFirstMachineInsn() { + FOR_BB_INSNS(insn, this) { + if (insn->IsMachineInstruction()) { + return insn; + } + } + return nullptr; + } + Insn *GetLastMachineInsn() { + FOR_BB_INSNS_REV(insn, this) { +#if TARGAARCH64 + if (insn->IsMachineInstruction() && !AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode())) { +#elif TARGX86_64 + if (insn->IsMachineInstruction()) { +#endif + return insn; + } + } + return nullptr; + } + Insn *GetLastInsn() { + return lastInsn; + } + const Insn *GetLastInsn() const { + return lastInsn; + } + void SetLastInsn(Insn *arg) { + lastInsn = arg; + } + bool IsLastInsn(const Insn *insn) const{ + return (lastInsn == insn); + } + void InsertPred(const MapleList::iterator &it, BB &bb) { + preds.insert(it, &bb); + } + void InsertSucc(const MapleList::iterator &it, BB &bb) { + succs.insert(it, &bb); + } + const MapleList &GetPreds() const { + return preds; + } + const MapleList &GetSuccs() const { + return succs; + } + const std::size_t GetSuccsSize() const { + return succs.size(); + } + const MapleList &GetEhPreds() const { + return ehPreds; + } + const MapleList &GetEhSuccs() const { + return ehSuccs; + } + const MapleList &GetLoopPreds() const { + return loopPreds; + } + MapleList &GetLoopSuccs() { + return loopSuccs; + } + const MapleList &GetLoopSuccs() const { + return loopSuccs; + } + MapleList::iterator GetPredsBegin() { + return preds.begin(); + } + MapleList::iterator GetSuccsBegin() { + return succs.begin(); + } + MapleList::iterator GetEhPredsBegin() { + return ehPreds.begin(); + } + MapleList::iterator GetLoopSuccsBegin() { + return loopSuccs.begin(); + } + MapleList::iterator GetPredsEnd() { + return preds.end(); + } + MapleList::iterator GetSuccsEnd() { + return succs.end(); + } + MapleList::iterator GetEhPredsEnd() { + return ehPreds.end(); + } + MapleList::iterator GetLoopSuccsEnd() { + return loopSuccs.end(); + } + void PushBackPreds(BB &bb) { + preds.push_back(&bb); + } + void PushBackSuccs(BB &bb) { + succs.push_back(&bb); + } + void PushBackEhPreds(BB &bb) { + ehPreds.push_back(&bb); + } + void PushBackEhSuccs(BB &bb) { + ehSuccs.push_back(&bb); + } + void PushBackLoopPreds(BB &bb) { + loopPreds.push_back(&bb); + } + void PushBackLoopSuccs(BB &bb) { + loopSuccs.push_back(&bb); + } + void PushFrontPreds(BB &bb) { + preds.push_front(&bb); + } + void PushFrontSuccs(BB &bb) { + succs.push_front(&bb); + } + void ErasePreds(MapleList::const_iterator it) { + preds.erase(it); + } + void EraseSuccs(MapleList::const_iterator it) { + succs.erase(it); + } + void RemovePreds(BB &bb) { + preds.remove(&bb); + } + void RemoveSuccs(BB &bb) { + succs.remove(&bb); + } + void RemoveEhPreds(BB &bb) { + ehPreds.remove(&bb); + } + void RemoveEhSuccs(BB &bb) { + ehSuccs.remove(&bb); + } + void ClearPreds() { + preds.clear(); + } + void ClearSuccs() { + succs.clear(); + } + void ClearEhPreds() { + ehPreds.clear(); + } + void ClearEhSuccs() { + ehSuccs.clear(); + } + void ClearLoopPreds() { + loopPreds.clear(); + } + void ClearLoopSuccs() { + loopSuccs.clear(); + } + const MapleSet &GetLiveInRegNO() const { + return liveInRegNO; + } + MapleSet &GetLiveInRegNO() { + return liveInRegNO; + } + void InsertLiveInRegNO(regno_t arg) { + (void)liveInRegNO.insert(arg); + } + void EraseLiveInRegNO(MapleSet::iterator it) { + liveInRegNO.erase(it); + } + void EraseLiveInRegNO(regno_t arg) { + liveInRegNO.erase(arg); + } + void ClearLiveInRegNO() { + liveInRegNO.clear(); + } + const MapleSet &GetLiveOutRegNO() const { + return liveOutRegNO; + } + MapleSet &GetLiveOutRegNO() { + return liveOutRegNO; + } + void InsertLiveOutRegNO(regno_t arg) { + (void)liveOutRegNO.insert(arg); + } + void EraseLiveOutRegNO(MapleSet::iterator it) { + liveOutRegNO.erase(it); + } + void ClearLiveOutRegNO() { + liveOutRegNO.clear(); + } + CGFuncLoops *GetLoop() const { + return loop; + } + void SetLoop(CGFuncLoops &arg) { + loop = &arg; + } + bool GetLiveInChange() const { + return liveInChange; + } + void SetLiveInChange(bool arg) { + liveInChange = arg; + } + bool GetCritical() const { + return isCritical; + } + void SetCritical(bool arg) { + isCritical = arg; + } + bool HasCriticalEdge(); + bool GetInsertUse() const { + return insertUse; + } + void SetInsertUse(bool arg) { + insertUse = arg; + } + bool IsUnreachable() const { + return unreachable; + } + void SetUnreachable(bool arg) { + unreachable = arg; + } + bool IsWontExit() const { + return wontExit; + } + void SetWontExit(bool arg) { + wontExit = arg; + } + void SetFastPathReturn(bool arg) { + fastPathReturn = arg; + } + bool IsFastPathReturn() const { + return fastPathReturn; + } + bool IsCatch() const { + return isCatch; + } + void SetIsCatch(bool arg) { + isCatch = arg; + } + bool IsCleanup() const { + return isCleanup; + } + void SetIsCleanup(bool arg) { + isCleanup = arg; + } + bool IsProEpilog() const { + return isProEpilog; + } + void SetIsProEpilog(bool arg) { + isProEpilog = arg; + } + bool IsLabelTaken() const { + return labelTaken; + } + void SetLabelTaken() { + labelTaken = true; + } + bool GetHasCfi() const { + return hasCfi; + } + void SetHasCfi() { + hasCfi = true; + } + bool IsNeedRestoreCfi() const { + return needRestoreCfi; + } + void SetNeedRestoreCfi(bool flag) { + needRestoreCfi = flag; + } + long GetInternalFlag1() const { + return internalFlag1; + } + void SetInternalFlag1(long arg) { + internalFlag1 = arg; + } + long GetInternalFlag2() const { + return internalFlag2; + } + void SetInternalFlag2(long arg) { + internalFlag2 = arg; + } + long GetInternalFlag3() const { + return internalFlag3; + } + void SetInternalFlag3(long arg) { + internalFlag3 = arg; + } + bool IsAtomicBuiltInBB() const { + return isAtomicBuiltIn; + } + void SetAtomicBuiltIn() { + isAtomicBuiltIn = true; + } + const MapleList &GetCallInsns() const { + return callInsns; + } + void PushBackCallInsns(Insn &insn) { + callInsns.push_back(&insn); + } + void ClearCallInsns() { + callInsns.clear(); + } + const MapleVector &GetRangeGotoLabelVec() const { + return rangeGotoLabelVec; + } + void SetRangeGotoLabel(uint32 index, LabelIdx labelIdx) { + rangeGotoLabelVec[index] = labelIdx; + } + void PushBackRangeGotoLabel(LabelIdx labelIdx) { + rangeGotoLabelVec.emplace_back(labelIdx); + } + void AddPhiInsn(regno_t regNO, Insn &insn) { + ASSERT(!phiInsnList.count(regNO), "repeat phiInsn"); + phiInsnList.emplace(std::pair(regNO, &insn)); + } + void RemovePhiInsn(regno_t regNO) { + ASSERT(phiInsnList.count(regNO), "no such insn"); + phiInsnList.erase(regNO); + } + bool HasPhiInsn(regno_t regNO) { + return phiInsnList.find(regNO) != phiInsnList.end(); + } + MapleMap &GetPhiInsns() { + return phiInsnList; + } + bool IsInPhiList(regno_t regNO); + bool IsInPhiDef(regno_t regNO); + const Insn *GetFirstLoc() const { + return firstLoc; + } + void SetFirstLoc(const Insn &arg) { + firstLoc = &arg; + } + const Insn *GetLastLoc() const { + return lastLoc; + } + void SetLastLoc(const Insn *arg) { + lastLoc = arg; + } + SparseDataInfo *GetLiveIn() { + return liveIn; + } + const SparseDataInfo *GetLiveIn() const { + return liveIn; + } + void SetLiveIn(SparseDataInfo &arg) { + liveIn = &arg; + } + void SetLiveInBit(uint32 arg) const { + liveIn->SetBit(arg); + } + void SetLiveInInfo(const SparseDataInfo &arg) const { + *liveIn = arg; + } + void LiveInOrBits(const SparseDataInfo &arg) const { + liveIn->OrBits(arg); + } + void LiveInEnlargeCapacity(uint32 arg) const { + liveIn->EnlargeCapacityToAdaptSize(arg); + } + void LiveInClearDataInfo() { + liveIn->ClearDataInfo(); + liveIn = nullptr; + } + SparseDataInfo *GetLiveOut() { + return liveOut; + } + const SparseDataInfo *GetLiveOut() const { + return liveOut; + } + void SetLiveOut(SparseDataInfo &arg) { + liveOut = &arg; + } + void SetLiveOutBit(uint32 arg) const { + liveOut->SetBit(arg); + } + void LiveOutOrBits(const SparseDataInfo &arg) const { + liveOut->OrBits(arg); + } + void LiveOutEnlargeCapacity(uint32 arg) const { + liveOut->EnlargeCapacityToAdaptSize(arg); + } + void LiveOutClearDataInfo() { + liveOut->ClearDataInfo(); + liveOut = nullptr; + } + const SparseDataInfo *GetDef() const { + return def; + } + void SetDef(SparseDataInfo &arg) { + def = &arg; + } + void SetDefBit(uint32 arg) const { + def->SetBit(arg); + } + void DefResetAllBit() const { + def->ResetAllBit(); + } + void DefResetBit(uint32 arg) const { + def->ResetBit(arg); + } + void DefClearDataInfo() { + def->ClearDataInfo(); + def = nullptr; + } + const SparseDataInfo *GetUse() const { + return use; + } + void SetUse(SparseDataInfo &arg) { + use = &arg; + } + void SetUseBit(uint32 arg) const { + use->SetBit(arg); + } + void UseResetAllBit() const { + use->ResetAllBit(); + } + void UseResetBit(uint32 arg) const { + use->ResetBit(arg); + } + void UseClearDataInfo() { + use->ClearDataInfo(); + use = nullptr; + } + void SetNeedAlign(bool flag) { + needAlign = flag; + } + bool IsBBNeedAlign() const { + return needAlign; + } + void SetAlignPower(uint32 power) { + alignPower = power; + } + uint32 GetAlignPower() const { + return alignPower; + } + void SetAlignNopNum(uint32 num) { + alignNopNum = num; + } + uint32 GetAlignNopNum() const { + return alignNopNum; + } + CDGNode *GetCDGNode() { + return cdgNode; + } + void SetCDGNode(CDGNode *node) { + cdgNode = node; + } + + void InitEdgeFreq() { + succsFreq.resize(succs.size()); + } + + uint64 GetEdgeFreq(const BB &bb) const { + auto iter = std::find(succs.begin(), succs.end(), &bb); + if (iter == std::end(succs) || succs.size() > succsFreq.size()) { + return 0; + } + CHECK_FATAL(iter != std::end(succs), "%d is not the successor of %d", bb.GetId(), this->GetId()); + CHECK_FATAL(succs.size() == succsFreq.size(), "succfreq size doesn't match succ size"); + const size_t idx = static_cast(std::distance(succs.begin(), iter)); + return succsFreq[idx]; + } + + uint64 GetEdgeFreq(size_t idx) const { + if (idx >= succsFreq.size()) { + return 0; + } + CHECK_FATAL(idx < succsFreq.size(), "out of range in BB::GetEdgeFreq"); + CHECK_FATAL(succs.size() == succsFreq.size(), "succfreq size doesn't match succ size"); + return succsFreq[idx]; + } + + void SetEdgeFreq(const BB &bb, uint64 freq) { + auto iter = std::find(succs.begin(), succs.end(), &bb); + CHECK_FATAL(iter != std::end(succs), "%d is not the successor of %d", bb.GetId(), this->GetId()); + CHECK_FATAL(succs.size() == succsFreq.size(), "succfreq size %d doesn't match succ size %d", succsFreq.size(), + succs.size()); + const size_t idx = static_cast(std::distance(succs.begin(), iter)); + succsFreq[idx] = freq; + } + + private: + static const std::string bbNames[kBBLast]; + uint32 id; + uint32 level = 0; + uint32 frequency = 0; + BB *prev = nullptr; /* Doubly linked list of BBs; */ + BB *next = nullptr; + /* They represent the order in which blocks are to be emitted. */ + BBKind kind = kBBFallthru; /* The BB's last statement (i.e. lastStmt) determines */ + /* what type this BB has. By default, kBbFallthru */ + LabelIdx labIdx; + StmtNode *firstStmt = nullptr; + StmtNode *lastStmt = nullptr; + Insn *firstInsn = nullptr; /* the first instruction */ + Insn *lastInsn = nullptr; /* the last instruction */ + MapleList preds; /* preds, succs represent CFG */ + MapleList succs; + MapleList ehPreds; + MapleList ehSuccs; + MapleList loopPreds; + MapleList loopSuccs; + + MapleVector succsFreq; + + /* this is for live in out analysis */ + MapleSet liveInRegNO; + MapleSet liveOutRegNO; + CGFuncLoops *loop = nullptr; + bool liveInChange = false; + bool isCritical = false; + bool insertUse = false; + bool hasCall = false; + bool unreachable = false; + bool wontExit = false; + bool fastPathReturn = false; + bool isCatch = false; /* part of the catch bb, true does might also mean it is unreachable */ + /* + * Since isCatch is set early and unreachable detected later, there + * are some overlap here. + */ + bool isCleanup = false; /* true if the bb is cleanup bb. otherwise, false. */ + bool isProEpilog = false; /* Temporary tag for modifying prolog/epilog bb. */ + bool labelTaken = false; /* Block label is taken indirectly and can be used to jump to it. */ + bool hasCfi = false; /* bb contain cfi directive. */ + bool needRestoreCfi = false; /* add cfi insn to current bb if true */ + /* + * Different meaning for each data flow analysis. + * For HandleFunction(), rough estimate of num of insn created. + * For cgbb.cpp, track insn count during code selection. + * For cgbb.cpp, bb is traversed during BFS ordering. + * For aarchregalloc.cpp, the bb is part of cleanup at end of function. + * For aarchcolorra.cpp, the bb is part of cleanup at end of function. + * also used for live range splitting. + * For live analysis, it indicates if bb is cleanupbb. + */ + long internalFlag1 = 0; + + /* + * Different meaning for each data flow analysis. + * For cgbb.cpp, bb is levelized to be 1 more than largest predecessor. + * For aarchcolorra.cpp, used for live range splitting pruning of bb. + */ + long internalFlag2 = 0; + + /* + * Different meaning for each data flow analysis. + * For cgfunc.cpp, it temporarily marks for catch bb discovery. + * For live analysis, it indicates if bb is visited. + * For peephole, used for live-out checking of bb. + */ + long internalFlag3 = 0; + MapleList callInsns; + MapleVector rangeGotoLabelVec; + + /* bb support for SSA analysis */ + MapleMap phiInsnList; + + /* includes Built-in functions for atomic memory access */ + bool isAtomicBuiltIn = false; + + const Insn *firstLoc = nullptr; + const Insn *lastLoc = nullptr; + SparseDataInfo *liveIn = nullptr; + SparseDataInfo *liveOut = nullptr; + SparseDataInfo *def = nullptr; + SparseDataInfo *use = nullptr; + + bool needAlign = false; + uint32 alignPower = 0; + uint32 alignNopNum = 0; + + CDGNode *cdgNode = nullptr; +}; /* class BB */ + +struct BBIdCmp { + bool operator()(const BB *lhs, const BB *rhs) const { + CHECK_FATAL(lhs != nullptr, "null ptr check"); + CHECK_FATAL(rhs != nullptr, "null ptr check"); + return (lhs->GetId() < rhs->GetId()); + } +}; + +class Bfs { + public: + Bfs(CGFunc &cgFunc, MemPool &memPool) + : cgfunc(&cgFunc), + memPool(&memPool), + alloc(&memPool), + visitedBBs(alloc.Adapter()), + sortedBBs(alloc.Adapter()) {} + ~Bfs() = default; + + bool AllPredBBVisited(const BB &bb, long &level) const; + BB *MarkStraightLineBBInBFS(BB *bb); + BB *SearchForStraightLineBBs(BB &bb); + void BFS(BB &curBB); + void ComputeBlockOrder(); + + CGFunc *cgfunc; + MemPool *memPool; + MapleAllocator alloc; + MapleVector visitedBBs; + MapleVector sortedBBs; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_CGBB_H */ diff --git a/src/mapleall/maple_be/include/cg/cgfunc.h b/src/mapleall/maple_be/include/cg/cgfunc.h new file mode 100644 index 0000000000000000000000000000000000000000..3702303fa9408f3d56fd6ecccb60ecebd766a8e2 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/cgfunc.h @@ -0,0 +1,1461 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_CGFUNC_H +#define MAPLEBE_INCLUDE_CG_CGFUNC_H + +#include "becommon.h" +#include "operand.h" +#include "eh_func.h" +#include "memlayout.h" +#include "reg_info.h" +#include "cgbb.h" +#include "cfi.h" +#include "dbg.h" +#include "reaching.h" +#include "cg_cfg.h" +#include "cg_irbuilder.h" +#include "call_conv.h" +/* MapleIR headers. */ +#include "mir_parser.h" +#include "mir_function.h" +#include "debug_info.h" +#include "maple_phase_manager.h" + +/* Maple MP header */ +#include "mempool_allocator.h" + +namespace maplebe { +constexpr int32 kBBLimit = 100000; +constexpr int32 kFreqBase = 100000; +struct MemOpndCmp { + bool operator()(const MemOperand *lhs, const MemOperand *rhs) const { + CHECK_FATAL(lhs != nullptr, "null ptr check"); + CHECK_FATAL(rhs != nullptr, "null ptr check"); + if (lhs == rhs) { + return false; + } + return (lhs->Less(*rhs)); + } +}; + +class VirtualRegNode { + public: + VirtualRegNode() = default; + + VirtualRegNode(RegType type, uint32 size) + : regType(type), size(size), regNO(kInvalidRegNO) {} + + virtual ~VirtualRegNode() = default; + + void AssignPhysicalRegister(regno_t phyRegNO) { + regNO = phyRegNO; + } + + RegType GetType() const { + return regType; + } + + uint32 GetSize() const { + return size; + } + + private: + RegType regType = kRegTyUndef; + uint32 size = 0; /* size in bytes */ + regno_t regNO = kInvalidRegNO; /* physical register assigned by register allocation */ +}; + +class SpillMemOperandSet { + public: + explicit SpillMemOperandSet(MapleAllocator &mallocator) : reuseSpillLocMem(mallocator.Adapter()) {} + + virtual ~SpillMemOperandSet() = default; + + void Add(MemOperand &op) { + (void)reuseSpillLocMem.insert(&op); + } + + void Remove(MemOperand &op) { + reuseSpillLocMem.erase(&op); + } + + MemOperand *GetOne() { + if (!reuseSpillLocMem.empty()) { + MemOperand *res = *reuseSpillLocMem.begin(); + reuseSpillLocMem.erase(res); + return res; + } + return nullptr; + } + + private: + MapleSet reuseSpillLocMem; +}; + +#if TARGARM32 +class LiveRange; +#endif /* TARGARM32 */ +constexpr uint32 kVRegisterNumber = 80; +constexpr uint32 kNumBBOptReturn = 30; +class CGFunc { + public: + enum ShiftDirection : uint8 { + kShiftLeft, + kShiftAright, + kShiftLright + }; + + CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId); + virtual ~CGFunc(); + + const std::string &GetName() const { + return func.GetName(); + } + + const MapleMap &GetLabelAndValueMap() const { + return labelMap; + } + + void InsertLabelMap(LabelIdx idx, uint64 value) { + ASSERT(labelMap.find(idx) == labelMap.end(), "idx already exist"); + labelMap[idx] = value; + } + + void LayoutStackFrame() { + CHECK_FATAL(memLayout != nullptr, "memLayout should has been initialized in constructor"); + memLayout->LayoutStackFrame(structCopySize, maxParamStackSize); + } + + bool HasCall() const { + return func.HasCall(); + } + + bool HasVLAOrAlloca() const { + return hasVLAOrAlloca; + } + + void SetHasVLAOrAlloca(bool val) { + hasVLAOrAlloca = val; + } + + bool HasAlloca() const { + return hasAlloca; + } + + void SetHasAlloca(bool val) { + hasAlloca = val; + } + + void SetRD(ReachingDefinition *paramRd) { + reachingDef = paramRd; + } + + InsnBuilder *GetInsnBuilder() { + return insnBuilder; + } + OperandBuilder *GetOpndBuilder() { + return opndBuilder; + } + + bool GetRDStatus() const { + return (reachingDef != nullptr); + } + + ReachingDefinition *GetRD() { + return reachingDef; + } + + EHFunc *BuildEHFunc(); + void NeedStackProtect(); + virtual void GenSaveMethodInfoCode(BB &bb) = 0; + virtual void GenerateCleanupCode(BB &bb) = 0; + virtual bool NeedCleanup() = 0; + virtual void GenerateCleanupCodeForExtEpilog(BB &bb) = 0; + virtual MemOperand *GetOrCreatSpillMem(regno_t vrNum) = 0; + virtual void FreeSpillRegMem(regno_t vrNum) = 0; + void CreateLmbcFormalParamInfo(); + virtual uint32 FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) = 0; + virtual void AssignLmbcFormalParams() = 0; + LmbcFormalParamInfo *GetLmbcFormalParamInfo(uint32 offset); + virtual void LmbcGenSaveSpForAlloca() = 0; + void RemoveUnreachableBB(); + void GenerateLoc(StmtNode *stmt, SrcPosition &lastSrcPos, SrcPosition &lastMplPos); + void GenerateScopeLabel(StmtNode *stmt, SrcPosition &lastSrcPos, bool &posDone); + int32 GetFreqFromStmt(uint32 stmtId); + void GenerateInstruction(); + bool MemBarOpt(const StmtNode &membar); + void UpdateCallBBFrequency(); + void HandleFunction(); + void MakeupScopeLabels(BB &bb); + void ProcessExitBBVec(); + void AddCommonExitBB(); + virtual void MergeReturn() = 0; + void TraverseAndClearCatchMark(BB &bb); + void MarkCatchBBs(); + void MarkCleanupBB() const; + void SetCleanupLabel(BB &cleanupEntry); + bool ExitbbNotInCleanupArea(const BB &bb) const; + uint32 GetMaxRegNum() const { + return maxRegCount; + }; + void DumpCFG() const; + void DumpBBInfo(const BB *bb) const; + void DumpCGIR() const; + void DumpLoop() const; + void ClearLoopInfo(); + Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); + virtual void DetermineReturnTypeofCall() = 0; + /* handle rc reset */ + virtual void HandleRCCall(bool begin, const MIRSymbol *retRef = nullptr) = 0; + virtual void HandleRetCleanup(NaryStmtNode &retNode) = 0; + /* select stmt */ + virtual void SelectDassign(DassignNode &stmt, Operand &opnd0) = 0; + virtual void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) = 0; + virtual void SelectRegassign(RegassignNode &stmt, Operand &opnd0) = 0; + virtual void SelectAbort() = 0; + virtual void SelectAssertNull(UnaryStmtNode &stmt) = 0; + virtual void SelectAsm(AsmNode &node) = 0; + virtual void SelectAggDassign(DassignNode &stmt) = 0; + virtual void SelectIassign(IassignNode &stmt) = 0; + virtual void SelectIassignoff(IassignoffNode &stmt) = 0; + virtual void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) = 0; + virtual void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) = 0; + virtual void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) = 0; + virtual void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) = 0; + virtual void SelectReturnSendOfStructInRegs(BaseNode *x) = 0; + virtual void SelectReturn(Operand *opnd) = 0; + virtual void SelectIgoto(Operand *opnd0) = 0; + virtual void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) = 0; + virtual void SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &opnd0) = 0; + virtual void SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &opnd0) = 0; + virtual void SelectGoto(GotoNode &stmt) = 0; + virtual void SelectCall(CallNode &callNode) = 0; + virtual void SelectIcall(IcallNode &icallNode, Operand &fptrOpnd) = 0; + virtual void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) = 0; + virtual Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinsicopNode, std::string name) = 0; + virtual Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinsicopNode, PrimType retType, + const std::string &name) = 0; + virtual Operand *SelectCclz(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCctz(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCpopcount(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCparity(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCclrsb(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCisaligned(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCalignup(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCaligndown(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) = 0; + virtual Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinsicopNode, PrimType pty) = 0; + virtual Operand *SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) = 0; + virtual Operand *SelectCAtomicExchangeN(const IntrinsiccallNode &intrinsiccallNode) = 0; + virtual Operand *SelectCAtomicFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) = 0; + virtual Operand *SelectCReturnAddress(IntrinsicopNode &intrinsicopNode) = 0; + virtual void SelectCAtomicExchange(const IntrinsiccallNode &intrinsiccallNode) = 0; + virtual void SelectMembar(StmtNode &membar) = 0; + virtual void SelectComment(CommentNode &comment) = 0; + virtual void HandleCatch() = 0; + + /* select expr */ + virtual Operand *SelectDread(const BaseNode &parent, AddrofNode &expr) = 0; + virtual RegOperand *SelectRegread(RegreadNode &expr) = 0; + virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) = 0; + virtual Operand *SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) = 0; + virtual Operand &SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) = 0; + virtual Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) = 0; + virtual Operand *SelectIread(const BaseNode &parent, IreadNode &expr, + int extraOffset = 0, PrimType finalBitFieldDestType = kPtyInvalid) = 0; + virtual Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) = 0; + virtual Operand *SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) = 0; + virtual Operand *SelectIntConst(MIRIntConst &intConst) = 0; + virtual Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) = 0; + virtual Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) = 0; + virtual Operand *SelectStrConst(MIRStrConst &strConst) = 0; + virtual Operand *SelectStr16Const(MIRStr16Const &strConst) = 0; + virtual void SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) = 0; + virtual Operand *SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand &SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) = 0; + virtual Operand *SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectDiv(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectLand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, + bool parentIsBr = false) = 0; + virtual void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; + virtual Operand *SelectAbs(UnaryNode &node, Operand &opnd0) = 0; + virtual Operand *SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectExtractbits(ExtractbitsNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) = 0; + virtual Operand *SelectLnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectRecip(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectSqrt(UnaryNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0) = 0; + virtual Operand *SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) = 0; + virtual Operand *SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual Operand *SelectSelect(TernaryNode &node, Operand &cond, Operand &opnd0, Operand &opnd1, + const BaseNode &parent, bool hasCompare = false) = 0; + virtual Operand *SelectMalloc(UnaryNode &call, Operand &opnd0) = 0; + virtual RegOperand &SelectCopy(Operand &src, PrimType srcType, PrimType dstType) = 0; + virtual Operand *SelectAlloca(UnaryNode &call, Operand &opnd0) = 0; + virtual Operand *SelectGCMalloc(GCMallocNode &call) = 0; + virtual Operand *SelectJarrayMalloc(JarrayMallocNode &call, Operand &opnd0) = 0; + virtual void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &opnd0) = 0; + virtual Operand *SelectLazyLoad(Operand &opnd0, PrimType primType) = 0; + virtual Operand *SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) = 0; + virtual Operand *SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) = 0; + virtual void GenerateYieldpoint(BB &bb) = 0; + virtual Operand &ProcessReturnReg(PrimType primType, int32 sReg) = 0; + + virtual Operand &GetOrCreateRflag() = 0; + virtual const Operand *GetRflag() const = 0; + virtual const Operand *GetFloatRflag() const = 0; + virtual const LabelOperand *GetLabelOperand(LabelIdx labIdx) const = 0; + virtual LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) = 0; + virtual LabelOperand &GetOrCreateLabelOperand(BB &bb) = 0; + virtual RegOperand &CreateVirtualRegisterOperand(regno_t vRegNO) = 0; + virtual RegOperand &GetOrCreateVirtualRegisterOperand(regno_t vRegNO) = 0; + virtual RegOperand &GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) = 0; + virtual RegOperand &GetOrCreateFramePointerRegOperand() = 0; + virtual RegOperand &GetOrCreateStackBaseRegOperand() = 0; + virtual RegOperand *GetBaseReg(const SymbolAlloc &symAlloc) = 0; + virtual int32 GetBaseOffset(const SymbolAlloc &symbolAlloc) = 0; + virtual RegOperand &GetZeroOpnd(uint32 size) = 0; + virtual Operand &CreateCfiRegOperand(uint32 reg, uint32 size) = 0; + virtual Operand &GetTargetRetOperand(PrimType primType, int32 sReg) = 0; + virtual Operand &CreateImmOperand(PrimType primType, int64 val) = 0; + virtual void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t destNO) = 0; + virtual void CleanupDeadMov(bool dump) = 0; + virtual void GetRealCallerSaveRegs(const Insn &insn, std::set &realCallerSave) = 0; + + /* ra */ + virtual void AddtoCalleeSaved(regno_t reg) = 0; + + virtual bool IsFrameReg(const RegOperand &opnd) const = 0; + virtual bool IsSPOrFP(const RegOperand &opnd) const = 0; + virtual bool IsReturnReg(const RegOperand &opnd) const = 0; + virtual bool IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &cgBeCommon) const = 0; + + /* For Neon intrinsics */ + virtual RegOperand *SelectVectorAddLong(PrimType rTy, Operand *o1, Operand *o2, PrimType oty, bool isLow) = 0; + virtual RegOperand *SelectVectorAddWiden(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, bool isLow) = 0; + virtual RegOperand *SelectVectorAbs(PrimType rType, Operand *o1) = 0; + virtual RegOperand *SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, + PrimType oTyp2, Opcode opc) = 0; + virtual RegOperand *SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) = 0;; + virtual RegOperand *SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) = 0; + virtual RegOperand *SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) = 0; + virtual RegOperand *SelectVectorFromScalar(PrimType pType, Operand *opnd, PrimType sType) = 0; + virtual RegOperand *SelectVectorDup(PrimType rType, Operand *src, bool getLow) = 0; + virtual RegOperand *SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) = 0; + virtual RegOperand *SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, PrimType oTy, bool isLow) = 0; + virtual RegOperand *SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, Operand *o3, + PrimType oTyp3) = 0; + virtual RegOperand *SelectVectorMerge(PrimType rTyp, Operand *o1, Operand *o2, int32 iNum) = 0; + virtual RegOperand *SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, + Operand *o2, PrimType oTyp2, bool isLow) = 0; + virtual RegOperand *SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) = 0; + virtual RegOperand *SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) = 0; + virtual RegOperand *SelectVectorNeg(PrimType rType, Operand *o1) = 0; + virtual RegOperand *SelectVectorNot(PrimType rType, Operand *o1) = 0; + + virtual RegOperand *SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, Operand *src2, PrimType sty2) = 0; + virtual RegOperand *SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) = 0; + virtual RegOperand *SelectVectorReverse(PrimType rtype, Operand *src, PrimType stype, uint32 size) = 0; + virtual RegOperand *SelectVectorSetElement(Operand *eOp, PrimType eTyp, Operand *vOpd, PrimType vTyp, int32 lane) = 0; + virtual RegOperand *SelectVectorShift(PrimType rType, Operand *o1, + PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) = 0; + virtual RegOperand *SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) = 0; + virtual RegOperand *SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, + Operand *o2, bool isLow) = 0; + virtual RegOperand *SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, + Operand *o2, PrimType otyp2, bool isLow, bool isWide) = 0; + virtual RegOperand *SelectVectorSum(PrimType rtype, Operand *o1, PrimType oType) = 0; + virtual RegOperand *SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) = 0; + virtual RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) = 0; + virtual RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) = 0; + + /* For ebo issue. */ + virtual Operand *GetTrueOpnd() { + return nullptr; + } + virtual void ClearUnreachableGotInfos(BB &bb) { + (void)bb; + }; + virtual void ClearUnreachableConstInfos(BB &bb) { + (void)bb; + }; + LabelIdx CreateLabel(); + + RegOperand *GetVirtualRegisterOperand(regno_t vRegNO) { + auto it = vRegOperandTable.find(vRegNO); + return it == vRegOperandTable.end() ? nullptr : it->second; + } + + Operand &CreateCfiImmOperand(int64 val, uint32 size) const { + return *memPool->New(val, size); + } + + Operand &CreateCfiStrOperand(const std::string &str) const { + return *memPool->New(str, *memPool); + } + + bool IsSpecialPseudoRegister(PregIdx spr) const { + return spr < 0; + } + + regno_t NewVReg(RegType regType, uint32 size) { + if (CGOptions::UseGeneralRegOnly()) { + CHECK_FATAL(regType != kRegTyFloat, "cannot use float | SIMD register with --general-reg-only"); + } + /* when vRegCount reach to maxRegCount, maxRegCount limit adds 80 every time */ + /* and vRegTable increases 80 elements. */ + if (vRegCount >= maxRegCount) { + ASSERT(vRegCount < maxRegCount + 1, "MAINTIAN FAILED"); + maxRegCount += kRegIncrStepLen; + vRegTable.resize(maxRegCount); + } +#if TARGAARCH64 || TARGX86_64 || TARGRISCV64 + if (size < k4ByteSize) { + size = k4ByteSize; + } +#if TARGAARCH64 + /* cannot handle 128 size register */ + if (regType == kRegTyInt && size > k8ByteSize) { + size = k8ByteSize; + } +#endif + ASSERT(size == k4ByteSize || size == k8ByteSize || size == k16ByteSize, "check size"); +#endif + new (&vRegTable[vRegCount]) VirtualRegNode(regType, size); + return vRegCount++; + } + + virtual regno_t NewVRflag() { + return 0; + } + + RegType GetRegTyFromPrimTy(PrimType primType) const { + switch (primType) { + case PTY_u1: + case PTY_i8: + case PTY_u8: + case PTY_i16: + case PTY_u16: + case PTY_i32: + case PTY_u32: + case PTY_i64: + case PTY_u64: + case PTY_a32: + case PTY_a64: + case PTY_ptr: + case PTY_i128: + case PTY_u128: + case PTY_agg: + return kRegTyInt; + case PTY_f32: + case PTY_f64: + case PTY_v2i32: + case PTY_v2u32: + case PTY_v2i64: + case PTY_v2u64: + case PTY_v2f32: + case PTY_v2f64: + case PTY_v4i16: + case PTY_v4u16: + case PTY_v4i32: + case PTY_v4u32: + case PTY_v4f32: + case PTY_v8i8: + case PTY_v8u8: + case PTY_v8i16: + case PTY_v8u16: + case PTY_v16i8: + case PTY_v16u8: + return kRegTyFloat; + default: + ASSERT(false, "Unexpected pty"); + return kRegTyUndef; + } + } + + /* return Register Type */ + virtual RegType GetRegisterType(regno_t rNum) const { + CHECK(rNum < vRegTable.size(), "index out of range in GetVRegSize"); + return vRegTable[rNum].GetType(); + } + +#if TARGX86_64 + uint32 GetMaxVReg() const { + return vRegCount + opndBuilder->GetCurrentVRegNum(); + } +#else + uint32 GetMaxVReg() const { + return vRegCount; + } +#endif + + uint32 GetSSAvRegCount() const { + return ssaVRegCount; + } + + void SetSSAvRegCount(uint32 count) { + ssaVRegCount = count; + } + + uint32 GetVRegSize(regno_t vregNum) { + CHECK(vregNum < vRegTable.size(), "index out of range in GetVRegSize"); + return GetOrCreateVirtualRegisterOperand(vregNum).GetSize() / kBitsPerByte; + } + + MIRSymbol *GetRetRefSymbol(BaseNode &expr); + + void PatchLongBranch(); + + virtual uint32 MaxCondBranchDistance() { + return INT_MAX; + } + + virtual void InsertJumpPad(Insn *) { + return; + } + + virtual LabelIdx GetLabelInInsn(Insn &insn) { + return 0; + } + + Operand *CreateDbgImmOperand(int64 val) const { + return memPool->New(val); + } + + uint32 NumBBs() const { + return bbCnt; + } + +#if DEBUG + StIdx GetLocalVarReplacedByPreg(PregIdx reg) { + auto it = pregsToVarsMap->find(reg); + return it != pregsToVarsMap->end() ? it->second : StIdx(); + } +#endif + + void IncTotalNumberOfInstructions() { + totalInsns++; + } + + void DecTotalNumberOfInstructions() { + totalInsns--; + } + + uint32 GetTotalNumberOfInstructions() const { + return totalInsns + insnBuilder->GetCreatedInsnNum(); + } + + int32 GetStructCopySize() const { + return structCopySize; + } + + int32 GetMaxParamStackSize() const { + return maxParamStackSize; + } + + virtual void ProcessLazyBinding() = 0; + + /* Debugging support */ + void SetDebugInfo(DebugInfo *dbgInfo) { + debugInfo = dbgInfo; + } + + void AddDIESymbolLocation(const MIRSymbol *sym, SymbolAlloc *loc, bool isParam); + + virtual void DBGFixCallFrameLocationOffsets() {}; + + /* Get And Set private members */ + CG *GetCG() { + return cg; + } + + const CG *GetCG() const { + return cg; + } + + MIRModule &GetMirModule() { + return mirModule; + } + + const MIRModule &GetMirModule() const { + return mirModule; + } + + template + MIRConst *NewMirConst(T &mirConst) { + MIRConst *newConst = mirModule.GetMemPool()->New(mirConst.GetValue(), mirConst.GetType()); + return newConst; + } + + uint32 GetMIRSrcFileEndLineNum() const { + auto &srcFileInfo = mirModule.GetSrcFileInfo(); + if (!srcFileInfo.empty()) { + return srcFileInfo.back().second; + } else { + return 0; + } + } + + MIRFunction &GetFunction() { + return func; + } + + const MIRFunction &GetFunction() const { + return func; + } + + EHFunc *GetEHFunc() { + return ehFunc; + } + + const EHFunc *GetEHFunc() const { + return ehFunc; + } + + void SetEHFunc(EHFunc &ehFunction) { + ehFunc = &ehFunction; + } + + uint32 GetLabelIdx() const { + return labelIdx; + } + + void SetLabelIdx(uint32 idx) { + labelIdx = idx; + } + + LabelNode *GetStartLabel() { + return startLabel; + } + + const LabelNode *GetStartLabel() const { + return startLabel; + } + + void SetStartLabel(LabelNode &label) { + startLabel = &label; + } + + LabelNode *GetEndLabel() { + return endLabel; + } + + const LabelNode *GetEndLabel() const { + return endLabel; + } + + void SetEndLabel(LabelNode &label) { + endLabel = &label; + } + + LabelNode *GetCleanupLabel() { + return cleanupLabel; + } + + const LabelNode *GetCleanupLabel() const { + return cleanupLabel; + } + + void SetCleanupLabel(LabelNode &node) { + cleanupLabel = &node; + } + + const LabelNode *GetReturnLabel() const { + return returnLabel; + } + + void SetReturnLabel(LabelNode &label) { + returnLabel = &label; + } + + BB *GetFirstBB() { + return firstBB; + } + + const BB *GetFirstBB() const { + return firstBB; + } + + void SetFirstBB(BB &bb) { + firstBB = &bb; + } + + BB *GetCleanupBB() { + return cleanupBB; + } + + const BB *GetCleanupBB() const { + return cleanupBB; + } + + void SetCleanupBB(BB &bb) { + cleanupBB = &bb; + cleanupBB->SetIsCleanup(true); + } + + BB *GetLastBB() { + return lastBB; + } + + const BB *GetLastBB() const { + return lastBB; + } + + void SetLastBB(BB &bb) { + lastBB = &bb; + } + + BB *GetReturnBB() { + return returnBB; + } + + const BB *GetReturnBB() const { + return returnBB; + } + + void SetPrologureBB(BB &bb) { + prologureBB = &bb; + } + + BB *GetPrologureBB() { + return prologureBB != nullptr ? prologureBB : firstBB; + } + + void SetReturnBB(BB &bb) { + returnBB = &bb; + returnBB->SetKind(BB::kBBReturn); + GetExitBBsVec().emplace_back(returnBB); + } + + BB *GetCurBB() { + return curBB; + } + + const BB *GetCurBB() const { + return curBB; + } + + void SetCurBB(BB &bb) { + curBB = &bb; + } + + BB *GetDummyBB() { + return dummyBB; + } + + const BB *GetDummyBB() const { + return dummyBB; + } + + BB *GetCommonExitBB() { + return commonExitBB; + } + + LabelIdx GetFirstCGGenLabelIdx() const { + return firstCGGenLabelIdx; + } + + MapleVector &GetExitBBsVec() { + return exitBBVec; + } + + const MapleVector GetExitBBsVec() const { + return exitBBVec; + } + + size_t ExitBBsVecSize() const { + return exitBBVec.size(); + } + + bool IsExitBBsVecEmpty() const { + return exitBBVec.empty(); + } + + void EraseExitBBsVec(MapleVector::iterator it) { + exitBBVec.erase(it); + } + + void PushBackExitBBsVec(BB &bb) { + exitBBVec.emplace_back(&bb); + } + + void ClearExitBBsVec() { + exitBBVec.clear(); + } + + bool IsExtendReg(regno_t vregNum) { + return extendSet.find(vregNum) != extendSet.end(); + } + + void InsertExtendSet(regno_t vregNum) { + (void)extendSet.insert(vregNum); + } + + void RemoveFromExtendSet(regno_t vregNum) { + (void)extendSet.erase(vregNum); + } + + bool IsExitBB(const BB ¤tBB) { + for (BB *exitBB : exitBBVec) { + if (exitBB == ¤tBB) { + return true; + } + } + return false; + } + + BB *GetExitBB(int32 index) { + return exitBBVec.at(index); + } + + const BB *GetExitBB(int32 index) const { + return exitBBVec.at(index); + } + + void PushBackNoReturnCallBBsVec(BB &bb) { + noReturnCallBBVec.emplace_back(&bb); + } + + void SetLab2BBMap(int32 index, BB &bb) { + lab2BBMap[index] = &bb; + } + + BB *GetBBFromLab2BBMap(uint32 index) { + return lab2BBMap[index]; + } + + MapleUnorderedMap &GetLab2BBMap() { + return lab2BBMap; + } + + void DumpCFGToDot(const std::string &fileNamePrefix); + + BECommon &GetBecommon() { + return beCommon; + } + + const BECommon GetBecommon() const { + return beCommon; + } + + MemLayout *GetMemlayout() { + return memLayout; + } + + const MemLayout *GetMemlayout() const { + return memLayout; + } + + void SetMemlayout(MemLayout &layout) { + memLayout = &layout; + } + + RegisterInfo *GetTargetRegInfo() { + return targetRegInfo; + } + + void SetTargetRegInfo(RegisterInfo ®Info) { + targetRegInfo = ®Info; + } + + MemPool *GetMemoryPool() { + return memPool; + } + + const MemPool *GetMemoryPool() const { + return memPool; + } + + StackMemPool &GetStackMemPool() { + return stackMp; + } + + MapleAllocator *GetFuncScopeAllocator() { + return funcScopeAllocator; + } + + const MapleAllocator *GetFuncScopeAllocator() const { + return funcScopeAllocator; + } + + const MapleMap GetEmitStVec() const { + return emitStVec; + } + + MIRSymbol* GetEmitSt(uint32 id) { + return emitStVec[id]; + } + + void AddEmitSt(uint32 id, MIRSymbol &symbol) { + CHECK_FATAL(symbol.GetKonst()->GetKind() == kConstAggConst, "not a kConstAggConst"); + MIRAggConst *arrayConst = safe_cast(symbol.GetKonst()); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); ++i) { + CHECK_FATAL(arrayConst->GetConstVecItem(i)->GetKind() == kConstLblConst, "not a kConstLblConst"); + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + ++switchLabelCnt[lblConst->GetValue()]; + } + emitStVec[id] = &symbol; + } + + void UpdateEmitSt(BB &bb, LabelIdx oldLabelIdx, LabelIdx newLabelIdx) { + MIRSymbol *st = GetEmitSt(bb.GetId()); + MIRAggConst *arrayConst = safe_cast(st->GetKonst()); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + MIRConst *mirConst = GetMemoryPool()->New(newLabelIdx, + GetFunction().GetPuidx(), *etype); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); ++i) { + CHECK_FATAL(arrayConst->GetConstVecItem(i)->GetKind() == kConstLblConst, "not a kConstLblConst"); + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + if (oldLabelIdx == lblConst->GetValue()) { + arrayConst->SetConstVecItem(i, *mirConst); + ++switchLabelCnt[newLabelIdx]; + + CHECK_FATAL(switchLabelCnt[oldLabelIdx] > 0, "error labelIdx"); + --switchLabelCnt[oldLabelIdx]; + if (switchLabelCnt[oldLabelIdx] == 0) { + (void)switchLabelCnt.erase(oldLabelIdx); + } + } + } + } + + void DeleteEmitSt(uint32 id) { + MIRSymbol &symbol = *emitStVec[id]; + CHECK_FATAL(symbol.GetKonst()->GetKind() == kConstAggConst, "not a kConstAggConst"); + MIRAggConst *arrayConst = safe_cast(symbol.GetKonst()); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); ++i) { + CHECK_FATAL(arrayConst->GetConstVecItem(i)->GetKind() == kConstLblConst, "not a kConstLblConst"); + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + + LabelIdx labelIdx = lblConst->GetValue(); + CHECK_FATAL(switchLabelCnt[labelIdx] > 0, "error labelIdx"); + --switchLabelCnt[labelIdx]; + if (switchLabelCnt[labelIdx] == 0) { + (void)switchLabelCnt.erase(labelIdx); + } + } + (void)emitStVec.erase(id); + } + + bool InSwitchTable(LabelIdx label) const { + if (switchLabelCnt.empty()) { + return false; + } + return (switchLabelCnt.find(label) != switchLabelCnt.end()); + } + + MapleVector &GetLoops() { + return loops; + } + + const MapleVector GetLoops() const { + return loops; + } + + void PushBackLoops(CGFuncLoops &loop) { + loops.emplace_back(&loop); + } + + MapleVector &GetLmbcParamVec() { + return lmbcParamVec; + } + + void IncLmbcArgsInRegs(RegType ty) { + if (ty == kRegTyInt) { + lmbcIntArgs++; + } else { + lmbcFpArgs++; + } + } + + int32 GetLmbcArgsInRegs(RegType ty) const { + return ty == kRegTyInt ? lmbcIntArgs : lmbcFpArgs; + } + + void ResetLmbcArgsInRegs() { + lmbcIntArgs = 0; + lmbcFpArgs = 0; + } + + void IncLmbcTotalArgs() { + lmbcTotalArgs++; + } + + uint32 GetLmbcTotalArgs() const { + return lmbcTotalArgs; + } + + void ResetLmbcTotalArgs() { + lmbcTotalArgs = 0; + } + + void SetSpSaveReg(regno_t reg) { + spSaveReg = reg; + } + + regno_t GetSpSaveReg() { + return spSaveReg; + } + + MapleVector &GetAllBBs() { + return bbVec; + } + + std::size_t GetAllBBSize() const { + return bbVec.size(); + } + + BB *GetBBFromID(uint32 id) { + return bbVec[id]; + } + void ClearBBInVec(uint32 id) { + bbVec[id] = nullptr; + } + +#if TARGARM32 + MapleVector &GetSortedBBs() { + return sortedBBs; + } + + const MapleVector &GetSortedBBs() const { + return sortedBBs; + } + + void SetSortedBBs(const MapleVector &bbVec) { + sortedBBs = bbVec; + } + + MapleVector &GetLrVec() { + return lrVec; + } + + const MapleVector &GetLrVec() const { + return lrVec; + } + + void SetLrVec(const MapleVector &newLrVec) { + lrVec = newLrVec; + } +#endif /* TARGARM32 */ + + CGCFG *GetTheCFG() { + return theCFG; + } + + void SetTheCFG(CGCFG *cfg) { + theCFG = cfg; + } + + const CGCFG *GetTheCFG() const { + return theCFG; + } + + regno_t GetVirtualRegNOFromPseudoRegIdx(PregIdx idx) const { + return regno_t(idx + firstMapleIrVRegNO); + } + + bool GetHasProEpilogue() const { + return hasProEpilogue; + } + + void SetHasProEpilogue(bool state) { + hasProEpilogue = state; + } + + int32 GetDbgCallFrameOffset() const { + return dbgCallFrameOffset; + } + + void SetDbgCallFrameOffset(int32 val) { + dbgCallFrameOffset = val; + } + + BB *CreateNewBB() { + BB *bb = memPool->New(bbCnt++, *funcScopeAllocator); + bbVec.emplace_back(bb); + return bb; + } + + BB *CreateNewBB(bool unreachable, BB::BBKind kind, uint32 frequencyVal) { + BB *newBB = CreateNewBB(); + newBB->SetKind(kind); + newBB->SetUnreachable(unreachable); + newBB->SetFrequency(frequencyVal); + return newBB; + } + + BB *CreateNewBB(LabelIdx label, bool unreachable, BB::BBKind kind, uint32 frequencyVal) { + BB *newBB = CreateNewBB(unreachable, kind, frequencyVal); + newBB->AddLabel(label); + SetLab2BBMap(label, *newBB); + return newBB; + } + + void UpdateFrequency(const StmtNode &stmt) { + bool withFreqInfo = func.HasFreqMap() && !func.GetLastFreqMap().empty(); + if (!withFreqInfo) { + return; + } + auto it = func.GetLastFreqMap().find(stmt.GetStmtID()); + if (it != func.GetLastFreqMap().end()) { + frequency = it->second; + } + } + + BB *StartNewBBImpl(bool stmtIsCurBBLastStmt, StmtNode &stmt) { + BB *newBB = CreateNewBB(); + ASSERT(newBB != nullptr, "newBB should not be nullptr"); + if (stmtIsCurBBLastStmt) { + ASSERT(curBB != nullptr, "curBB should not be nullptr"); + curBB->SetLastStmt(stmt); + curBB->AppendBB(*newBB); + newBB->SetFirstStmt(*stmt.GetNext()); + } else { + newBB->SetFirstStmt(stmt); + if (curBB != nullptr) { + if (stmt.GetPrev() != nullptr) { + ASSERT(stmt.GetPrev()->GetNext() == &stmt, " the next of stmt's prev should be stmt self"); + } + curBB->SetLastStmt(*stmt.GetPrev()); + curBB->AppendBB(*newBB); + } + } + return newBB; + } + + BB *StartNewBB(StmtNode &stmt) { + BB *bb = curBB; + if (stmt.GetNext() != nullptr && stmt.GetNext()->GetOpCode() != OP_label) { + bb = StartNewBBImpl(true, stmt); + } + return bb; + } + + void SetCurBBKind(BB::BBKind bbKind) const { + curBB->SetKind(bbKind); + } + + void SetVolStore(bool val) { + isVolStore = val; + } + + void SetVolReleaseInsn(Insn *insn) { + volReleaseInsn = insn; + } + + bool IsAfterRegAlloc() const { + return isAfterRegAlloc; + } + + void SetIsAfterRegAlloc() { + isAfterRegAlloc = true; + } + + const MapleString &GetShortFuncName() const { + return shortFuncName; + } + + size_t GetLSymSize() const { + return lSymSize; + } + + bool HasTakenLabel() const{ + return hasTakenLabel; + } + + void SetHasTakenLabel() { + hasTakenLabel = true; + } + + virtual InsnVisitor *NewInsnModifier() = 0; + + MapleVector &GetDbgCallFrameLocations(bool isParam) { + return isParam ? dbgParamCallFrameLocations : dbgLocalCallFrameLocations; + } + + bool HasAsm() const { + return hasAsm; + } + + uint32 GetUniqueID() const { + return func.GetPuidx(); + } + void SetUseFP(bool canUseFP) { + useFP = canUseFP; + } + + bool UseFP() const { + return useFP; + } + + void SetSeenFP(bool seen) { + seenFP = seen; + } + + bool SeenFP() const { + return seenFP; + } + + void UpdateAllRegisterVregMapping(MapleMap &newMap); + + void RegisterVregMapping(regno_t vRegNum, PregIdx pidx) { + vregsToPregsMap[vRegNum] = pidx; + } + + uint32 GetFirstMapleIrVRegNO() const { + return firstMapleIrVRegNO; + } + + void SetHasAsm() { + hasAsm = true; + } + + void SetStackProtectInfo(StackProtectKind kind) { + stackProtectInfo |= kind; + } + + uint8 GetStackProtectInfo() const { + return stackProtectInfo; + } + + void SetNeedStackProtect(bool val) { + needStackProtect = val; + } + + bool GetNeedStackProtect() const { + return needStackProtect; + } + + MIRPreg *GetPseudoRegFromVirtualRegNO(const regno_t vRegNO, bool afterSSA = false) const { + PregIdx pri = afterSSA ? VRegNOToPRegIdx(vRegNO) : GetPseudoRegIdxFromVirtualRegNO(vRegNO); + if (pri == -1) { + return nullptr; + } + return GetFunction().GetPregTab()->PregFromPregIdx(pri); + } + + protected: + uint32 firstMapleIrVRegNO = 200; /* positioned after physical regs */ + uint32 firstNonPregVRegNO; + uint32 vRegCount; /* for assigning a number for each CG virtual register */ + uint32 ssaVRegCount = 0; /* vreg count in ssa */ + uint32 maxRegCount; /* for the current virtual register number limit */ + size_t lSymSize; /* size of local symbol table imported */ + MapleVector vRegTable; /* table of CG's virtual registers indexed by v_reg no */ + MapleVector bbVec; + MapleUnorderedMap vRegOperandTable; + MapleUnorderedMap pRegSpillMemOperands; + MapleUnorderedMap spillRegMemOperands; + MapleUnorderedMap reuseSpillLocMem; + LabelIdx firstCGGenLabelIdx; + MapleMap labelMap; +#if DEBUG + MapleMap *pregsToVarsMap = nullptr; +#endif + MapleMap vregsToPregsMap; + uint32 totalInsns = 0; + int32 structCopySize = 0; + int32 maxParamStackSize = 0; + static constexpr int kRegIncrStepLen = 80; /* reg number increate step length */ + + bool hasVLAOrAlloca = false; + bool hasAlloca = false; + bool hasProEpilogue = false; + bool isVolLoad = false; + bool isVolStore = false; + bool isAfterRegAlloc = false; + bool isAggParamInReg = false; + bool hasTakenLabel = false; + uint32 frequency = 0; + DebugInfo *debugInfo = nullptr; /* debugging info */ + MapleVector dbgParamCallFrameLocations; + MapleVector dbgLocalCallFrameLocations; + RegOperand *aggParamReg = nullptr; + ReachingDefinition *reachingDef = nullptr; + + int32 dbgCallFrameOffset = 0; + CG *cg; + MIRModule &mirModule; + MemPool *memPool; + StackMemPool &stackMp; + + PregIdx GetPseudoRegIdxFromVirtualRegNO(const regno_t vRegNO) const { + if (IsVRegNOForPseudoRegister(vRegNO)) { + return PregIdx(vRegNO - firstMapleIrVRegNO); + } + return VRegNOToPRegIdx(vRegNO); + } + + bool IsVRegNOForPseudoRegister(regno_t vRegNum) const { + /* 0 is not allowed for preg index */ + uint32 n = static_cast(vRegNum); + return (firstMapleIrVRegNO < n && n < firstNonPregVRegNO); + } + + PregIdx VRegNOToPRegIdx(regno_t vRegNum) const { + auto it = vregsToPregsMap.find(vRegNum); + if (it == vregsToPregsMap.end()) { + return PregIdx(-1); + } + return it->second; + } + + VirtualRegNode &GetVirtualRegNodeFromPseudoRegIdx(PregIdx idx) { + return vRegTable.at(GetVirtualRegNOFromPseudoRegIdx(idx)); + } + + PrimType GetTypeFromPseudoRegIdx(PregIdx idx) { + VirtualRegNode &vRegNode = GetVirtualRegNodeFromPseudoRegIdx(idx); + RegType regType = vRegNode.GetType(); + ASSERT(regType == kRegTyInt || regType == kRegTyFloat, ""); + uint32 size = vRegNode.GetSize(); /* in bytes */ + ASSERT(size == sizeof(int32) || size == sizeof(int64), ""); + return (regType == kRegTyInt ? (size == sizeof(int32) ? PTY_i32 : PTY_i64) + : (size == sizeof(float) ? PTY_f32 : PTY_f64)); + } + + int64 GetPseudoRegisterSpillLocation(PregIdx idx) { + const SymbolAlloc *symLoc = memLayout->GetSpillLocOfPseduoRegister(idx); + return static_cast(GetBaseOffset(*symLoc)); + } + + virtual MemOperand *GetPseudoRegisterSpillMemoryOperand(PregIdx idx) = 0; + + uint32 GetSpillLocation(uint32 size) { + uint32 offset = RoundUp(nextSpillLocation, static_cast(size)); + nextSpillLocation = offset + size; + return offset; + } + + /* See if the symbol is a structure parameter that requires a copy. */ + bool IsParamStructCopy(const MIRSymbol &symbol) { + if (symbol.GetStorageClass() == kScFormal && + GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + return true; + } + return false; + } + + private: + CGFunc &operator=(const CGFunc &cgFunc); + CGFunc(const CGFunc&); + StmtNode *HandleFirstStmt(); + bool CheckSkipMembarOp(const StmtNode &stmt); + MIRFunction &func; + EHFunc *ehFunc = nullptr; + + InsnBuilder *insnBuilder = nullptr; + OperandBuilder *opndBuilder = nullptr; + + uint32 bbCnt = 0; + uint32 labelIdx = 0; /* local label index number */ + LabelNode *startLabel = nullptr; /* start label of the function */ + LabelNode *returnLabel = nullptr; /* return label of the function */ + LabelNode *cleanupLabel = nullptr; /* label to indicate the entry of cleanup code. */ + LabelNode *endLabel = nullptr; /* end label of the function */ + + BB *firstBB = nullptr; + BB *prologureBB = nullptr; /* the BB to placing prologure's instructions(callee save/cfi) */ + BB *returnBB = nullptr; + BB *cleanupBB = nullptr; + BB *lastBB = nullptr; + BB *curBB = nullptr; + BB *dummyBB; /* use this bb for add some instructions to bb that is no curBB. */ + BB *commonExitBB = nullptr; /* this post-dominate all BBs */ + + Insn *volReleaseInsn = nullptr; /* use to record the release insn for volatile strore */ + MapleVector exitBBVec; + MapleVector noReturnCallBBVec; + MapleSet extendSet; /* use to mark regs which spilled 32 bits but loaded 64 bits. */ + MapleUnorderedMap lab2BBMap; + BECommon &beCommon; + MemLayout *memLayout = nullptr; + RegisterInfo *targetRegInfo = nullptr; + MapleAllocator *funcScopeAllocator; + MapleMap emitStVec; /* symbol that needs to be emit as a local symbol. i.e, switch table */ + MapleUnorderedMap switchLabelCnt; /* label in switch table */ +#if TARGARM32 + MapleVector sortedBBs; + MapleVector lrVec; +#endif /* TARGARM32 */ + MapleVector loops; + MapleVector lmbcParamVec; + CGCFG *theCFG = nullptr; + MapleSet scpIdSet; + const MapleString shortFuncName; + int32 lmbcIntArgs = 0; + int32 lmbcFpArgs = 0; + uint32 lmbcTotalArgs = 0; + uint32 nextSpillLocation = 0; + regno_t spSaveReg = 0; + + bool hasAsm = false; + bool useFP = true; + bool seenFP = true; + + /* save stack protect kinds which can trigger stack protect */ + uint8 stackProtectInfo = 0; + bool needStackProtect = false; +}; /* class CGFunc */ + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLayoutFrame, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgHandleFunction, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFixCFLocOsft, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenCfi, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgEmission, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgGenProEpiLog, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgIsolateFastPath, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_CGFUNC_H */ diff --git a/src/mapleall/maple_be/include/cg/control_dep_analysis.h b/src/mapleall/maple_be/include/cg/control_dep_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..bc572bfee46f5ca4920a6e02b6a022167b9edd2b --- /dev/null +++ b/src/mapleall/maple_be/include/cg/control_dep_analysis.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_PDG_ANALYSIS_H +#define MAPLEBE_INCLUDE_CG_PDG_ANALYSIS_H + +#include "cfg_mst.h" +#include "instrument.h" +#include "cg_cdg.h" +#include "cg_dominance.h" +#include "data_dep_base.h" +#include "loop.h" + +namespace maplebe { +/* Analyze Control Dependence */ +class ControlDepAnalysis { + public: + ControlDepAnalysis(CGFunc &func, MemPool &memPool, MemPool &tmpPool, PostDomAnalysis &pd, + CFGMST, maplebe::BB> &cfgmst) + : cgFunc(func), pdom(&pd), cfgMST(&cfgmst), cdgMemPool(memPool), tmpMemPool(&tmpPool), cdgAlloc(&memPool), + tmpAlloc(&tmpPool), nonPdomEdges(tmpAlloc.Adapter()), curCondNumOfBB(tmpAlloc.Adapter()) {} + ControlDepAnalysis(CGFunc &func, MemPool &memPool) + : cgFunc(func), cdgMemPool(memPool), cdgAlloc(&memPool), tmpAlloc(&memPool), + nonPdomEdges(cdgAlloc.Adapter()), curCondNumOfBB(cdgAlloc.Adapter()) {} + virtual ~ControlDepAnalysis() = default; + + /* The entry of analysis */ + void Run(); + + /* Interface for obtaining PDGAnalysis infos */ + FCDG *GetFCDG() { + return fcdg; + } + + /* Print forward-control-dependence-graph in dot syntax */ + void GenerateFCDGDot() const; + /* Print control-flow-graph with condition at edges in dot syntax */ + void GenerateCFGDot() const; + void CreateAllCDGNodes(); + + protected: + void BuildCFGInfo(); + void ConstructFCDG(); + void ComputeRegions(); + void ComputeRegionForCurNode(uint32 curBBId, std::vector &visited); + void CreateAndDivideRegion(uint32 pBBId); + void ComputeRegionForNonDepNodes(); + CDGRegion *FindExistRegion(CDGNode &node); + bool IsISEqualToCDs(CDGNode &parent, CDGNode &child); + void MergeRegions(CDGNode &mergeNode, CDGNode &candiNode); + + CDGEdge *BuildControlDependence(const BB &fromBB, const BB &toBB, int32 condition); + CDGRegion *CreateFCDGRegion(CDGNode &curNode); + + void AddNonPdomEdges(BBEdge *bbEdge) { + nonPdomEdges.emplace_back(bbEdge); + } + + uint32 GetAndAccSuccedCondNum(uint32 bbId) { + auto pair = curCondNumOfBB.try_emplace(bbId, 0); + if (pair.second) { + return 0; + } else { + uint32 curNum = pair.first->second; + pair.first->second = curNum + 1; + return curNum; + } + } + + static bool IsSameControlDependence(const CDGEdge &edge1, const CDGEdge &edge2) { + CDGNode &fromNode1 = edge1.GetFromNode(); + CDGNode &fromNode2 = edge2.GetFromNode(); + if (fromNode1.GetNodeId() != fromNode2.GetNodeId()) { + return false; + } + if (edge1.GetCondition() != edge2.GetCondition()) { + return false; + } + return true; + } + + CGFunc &cgFunc; + PostDomAnalysis *pdom = nullptr; + CFGMST, maplebe::BB> *cfgMST = nullptr; + MemPool &cdgMemPool; + MemPool *tmpMemPool = nullptr; + MapleAllocator cdgAlloc; + MapleAllocator tmpAlloc; + MapleVector*> nonPdomEdges; + MapleUnorderedMap curCondNumOfBB; // + FCDG *fcdg = nullptr; + uint32 lastRegionId = 0; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgControlDepAnalysis, maplebe::CGFunc); +ControlDepAnalysis *GetResult() { + return cda; +} +ControlDepAnalysis *cda = nullptr; + private: + void GetAnalysisDependence(maple::AnalysisDep &aDep) const override; +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_PDG_ANALYSIS_H */ diff --git a/src/mapleall/maple_be/include/cg/data_dep_analysis.h b/src/mapleall/maple_be/include/cg/data_dep_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..d8902784808b9d00433c044b805baea9db29dceb --- /dev/null +++ b/src/mapleall/maple_be/include/cg/data_dep_analysis.h @@ -0,0 +1,96 @@ +/* +* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +/* + * Build intra/inter block data dependence graph. + * 1: Build data dependence nodes + * 2: Build edges between dependence nodes. Edges are: + * 2.1) True dependence + * 2.2) Anti dependence + * 2.3) Output dependence + * 2.4) Barrier dependence + */ +#ifndef MAPLEBE_INCLUDE_CG_DATA_DEP_ANALYSIS_H +#define MAPLEBE_INCLUDE_CG_DATA_DEP_ANALYSIS_H + +#include "data_dep_base.h" +#include "mempool.h" +#include "cg_cdg.h" + +namespace maplebe { +/* Analyze IntraBlock Data Dependence */ +class IntraDataDepAnalysis { + public: + IntraDataDepAnalysis(MemPool &mp, CGFunc &f, DataDepBase &dataDepBase) + : intraMp(mp), intraAlloc(&mp), cgFunc(f), ddb(dataDepBase) {} + virtual ~IntraDataDepAnalysis() = default; + + void Run(BB &bb, MapleVector &dataNodes); + void InitCurNodeInfo(MemPool &tmpMp, MapleAllocator &tmpAlloc, BB &bb, MapleVector &dataNodes); + void ClearCurNodeInfo(MemPool *tmpMp, MapleAllocator *tmpAlloc); + void AddEndSeparatorNode(BB &bb, MapleVector &nodes); + + private: + MemPool &intraMp; + MapleAllocator intraAlloc; + CGFunc &cgFunc; + DataDepBase &ddb; +}; + +/* Analyze InterBlock Data Dependence */ +class InterDataDepAnalysis { + public: + InterDataDepAnalysis(CGFunc &f, MemPool &memPool, DataDepBase &dataDepBase) + : cgFunc(f), interAlloc(&memPool), ddb(dataDepBase), + readyNodes(interAlloc.Adapter()), restNodes(interAlloc.Adapter()) {} + virtual ~InterDataDepAnalysis() = default; + + void AddReadyNode(CDGNode *node) { + (void)readyNodes.emplace_back(node); + } + void RemoveReadyNode(CDGNode *node) { + auto it = std::find(readyNodes.begin(), readyNodes.end(), node); + if (it != readyNodes.end()) { + (void)readyNodes.erase(it); + } + } + void InitRestNodes(MapleVector &nodes) { + restNodes = nodes; + } + void RemoveRestNode(CDGNode *node) { + auto it = std::find(restNodes.begin(), restNodes.end(), node); + if (it != restNodes.end()) { + (void)restNodes.erase(it); + } + } + + void Run(CDGRegion ®ion, MapleVector &dataNodes); + void GlobalInit(MapleVector &dataNodes); + void LocalInit(BB &bb, CDGNode &cdgNode, MapleVector &dataNodes, std::size_t idx); + void GenerateInterDDGDot(MapleVector &dataNodes); + + protected: + void ComputeTopologicalOrderInRegion(CDGRegion ®ion); + + private: + CGFunc &cgFunc; + MapleAllocator interAlloc; + DataDepBase &ddb; + MapleVector readyNodes; + MapleVector restNodes; +}; +} /* namespace maplebe */ + +#endif // MAPLEBE_INCLUDE_CG_DATA_DEP_ANALYSIS_H diff --git a/src/mapleall/maple_be/include/cg/data_dep_base.h b/src/mapleall/maple_be/include/cg/data_dep_base.h new file mode 100644 index 0000000000000000000000000000000000000000..c1d41a3dc46263028e5017d06c4ca0e166d41a91 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/data_dep_base.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DATA_DEP_BASE_H +#define MAPLEBE_INCLUDE_CG_DATA_DEP_BASE_H + +#include "deps.h" +#include "cgbb.h" +#include "cg_cdg.h" + +namespace maplebe { +using namespace maple; +constexpr maple::uint32 kMaxDependenceNum = 200; + +class DataDepBase { + public: + DataDepBase(MemPool &memPool, CGFunc &func, MAD &mad) + : memPool(memPool), alloc(&memPool), cgFunc(func), mad(mad), beforeRA(!cgFunc.IsAfterRegAlloc()) {} + virtual ~DataDepBase() = default; + + enum DataFlowInfoType : uint8 { + kDataFlowUndef, + kMembar, + kLastCall, + kLastFrameDef, + kStackUses, + kStackDefs, + kHeapUses, + kHeapDefs, + kMayThrows, + kAmbiguous, + }; + + uint32 GetSeparatorIndex() const { + return separatorIndex; + } + void SetSeparatorIndex(uint32 index) { + separatorIndex = index; + } + void SetCDGNode(CDGNode *cdgNode) { + curCDGNode = cdgNode; + } + CDGNode *GetCDGNode() { + return curCDGNode; + } + void SetLastFrameDefInsn(Insn *insn) { + curCDGNode->SetLastFrameDefInsn(insn); + } + void CopyAndClearComments(MapleVector &comments) { + curCDGNode->CopyAndClearComments(comments); + } + MapleVector &GetLastComments() const { + return curCDGNode->GetLastComments(); + } + void ClearLastComments() const { + curCDGNode->ClearLastComments(); + } + + void ProcessNonMachineInsn(Insn &insn, MapleVector &comments, MapleVector &dataNodes, + const Insn *&locInsn); + + void AddDependence4InsnInVectorByType(MapleVector &insns, Insn &insn, const DepType &type); + void AddDependence4InsnInVectorByTypeAndCmp(MapleVector &insns, Insn &insn, const DepType &type); + + void DumpDepNode(const DepNode &node) const; + void DumpDepLink(const DepLink &link, const DepNode *node) const; + const std::string &GetDepTypeName(DepType depType) const; + void CombineDependence(DepNode &firstNode, const DepNode &secondNode, bool isAcrossSeparator, + bool isMemCombine = false); + + bool IsIntraBlockAnalysis() const; + bool IfInAmbiRegs(regno_t regNO) const; + void AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType); + void RemoveSelfDeps(Insn &insn); + void BuildDepsUseReg(Insn &insn, regno_t regNO); + void BuildDepsDefReg(Insn &insn, regno_t regNO); + void BuildDepsAmbiInsn(Insn &insn); + void BuildAmbiInsnDependency(Insn &insn); + void BuildDepsMayThrowInsn(Insn &insn); + void BuildMayThrowInsnDependency(Insn &insn); + void BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes); + void BuildDepsControlAll(Insn &insn, const MapleVector &nodes); + void BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest); + void BuildDepsLastCallInsn(Insn &insn); + void SeparateDependenceGraph(MapleVector &nodes, uint32 &nodeSum); + DepNode *GenerateDepNode(Insn &insn, MapleVector &nodes, uint32 &nodeSum, MapleVector &comments); + void UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn); + void BuildSeparatorNodeDependency(MapleVector &dataNodes, Insn &insn); + void BuildInterBlockDefUseDependency(DepNode &curDepNode, regno_t regNO, DepType depType, bool isDef); + void BuildPredPathDefDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, + regno_t regNO, DepType depType); + void BuildPredPathUseDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, + regno_t regNO, DepType depType); + void BuildInterBlockSpecialDataInfoDependency(DepNode &curDepNode, bool needCmp, DepType depType, + DataDepBase::DataFlowInfoType infoType); + void BuildPredPathSpecialDataInfoDependencyDFS(BB &curBB, std::vector &visited, bool needCmp, DepNode &depNode, + DepType depType, DataDepBase::DataFlowInfoType infoType); + + virtual void CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) = 0; + virtual void CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) = 0; + virtual bool IsFrameReg(const RegOperand&) const = 0; + virtual void AnalysisAmbiInsns(BB &bb) = 0; + virtual void BuildDepsMemBar(Insn &insn) = 0; + virtual void BuildDepsUseMem(Insn &insn, MemOperand &memOpnd) = 0; + virtual void BuildDepsDefMem(Insn &insn, MemOperand &memOpnd) = 0; + virtual void BuildDepsAccessStImmMem(Insn &insn, bool isDest) = 0; + virtual void BuildCallerSavedDeps(Insn &insn) = 0; + virtual void BuildStackPassArgsDeps(Insn &insn) = 0; + virtual void BuildDepsDirtyStack(Insn &insn) = 0; + virtual void BuildDepsUseStack(Insn &insn) = 0; + virtual void BuildDepsDirtyHeap(Insn &insn) = 0; + virtual void BuildOpndDependency(Insn &insn) = 0; + virtual void BuildSpecialInsnDependency(Insn &insn, const MapleVector &nodes) = 0; + virtual void UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes) = 0; + virtual DepNode *BuildSeparatorNode() = 0; + virtual void BuildInterBlockMemDefUseDependency(DepNode &depNode, MemOperand &memOpnd, + MemOperand *nextMemOpnd, bool isMemDef) = 0; + virtual void BuildPredPathMemDefDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, + MemOperand &memOpnd, MemOperand *nextMemOpnd) = 0; + virtual void BuildPredPathMemUseDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, + MemOperand &memOpnd, MemOperand *nextMemOpnd) = 0; + + protected: + MemPool &memPool; + MapleAllocator alloc; + CGFunc &cgFunc; + MAD &mad; + CDGNode *curCDGNode = nullptr; + bool beforeRA; + uint32 separatorIndex = 0; +}; +} + +#endif /* MAPLEBE_INCLUDE_CG_DATA_DEP_BASE_H */ diff --git a/src/mapleall/maple_be/include/cg/datainfo.h b/src/mapleall/maple_be/include/cg/datainfo.h new file mode 100644 index 0000000000000000000000000000000000000000..108a19be5e53c167ae6d9c02b1b5d238bd6e74a9 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/datainfo.h @@ -0,0 +1,217 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DATAINFO_H +#define MAPLEBE_INCLUDE_CG_DATAINFO_H +#include "maple_string.h" +#include "common_utils.h" +#include "mempool.h" +#include "mempool_allocator.h" + +namespace maplebe { +class DataInfo { + public: + DataInfo(uint32 bitNum, MapleAllocator &alloc) + : info((bitNum / kWordSize + 1), 0, alloc.Adapter()) {} + DataInfo(const DataInfo &other, MapleAllocator &alloc) : info(other.info, alloc.Adapter()) {} + DataInfo &Clone(MapleAllocator &alloc) { + auto *dataInfo = alloc.New(*this, alloc); + return *dataInfo; + } + + ~DataInfo() = default; + + void SetBit(int64 bitNO) { + ASSERT(bitNO < info.size() * kWordSize, "Out of Range"); + info[static_cast(bitNO / kWordSize)] |= (1ULL << static_cast((bitNO % kWordSize))); + } + + void ResetBit(uint32 bitNO) { + info[bitNO / kWordSize] &= (~(1ULL << (bitNO % kWordSize))); + } + + bool TestBit(uint32 bitNO) const { + return (info[bitNO / kWordSize] & (1ULL << (bitNO % kWordSize))) != 0ULL; + } + + const uint64 &GetElem(uint32 index) const { + ASSERT(index < info.size(), "out of range"); + return info[index]; + } + + void SetElem(uint32 index, uint64 val) { + ASSERT(index < info.size(), "out of range"); + info[index] = val; + } + + bool NoneBit() const { + for (auto &data : info) { + if (data != 0ULL) { + return false; + } + } + return true; + } + + size_t Size() const { + return info.size() * kWordSize; + } + + const MapleVector &GetInfo() const { + return info; + } + + bool IsEqual(const DataInfo &secondInfo) const { + auto infoSize = static_cast(info.size()); + ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + for (uint32 i = 0; i != infoSize; ++i) { + if (info[i] != secondInfo.GetElem(i)) { + return false; + } + } + return true; + } + + bool IsEqual(const MapleVector &liveInfoBak) const { + size_t infoSize = info.size(); + ASSERT(infoSize == liveInfoBak.size(), "two dataInfo's size different"); + for (size_t i = 0; i != infoSize; ++i) { + if (info[i] != liveInfoBak[i]) { + return false; + } + } + return true; + } + + void AndBits(const DataInfo &secondInfo) { + auto infoSize = static_cast(info.size()); + ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + for (uint32 i = 0; i != infoSize; ++i) { + info[i] &= secondInfo.GetElem(i); + } + } + + void OrBits(const DataInfo &secondInfo) { + auto infoSize = static_cast(info.size()); + ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + for (uint32 i = 0; i != infoSize; i++) { + info[i] |= secondInfo.GetElem(i); + } + } + + bool OrBitsCheck(const DataInfo &secondInfo) { + auto infoSize = static_cast(info.size()); + ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + bool changed = false; + for (uint32 i = 0; i != infoSize; i++) { + if (info[i] != secondInfo.GetElem(i)) { + changed = true; + info[i] |= secondInfo.GetElem(i); + } + } + return changed; + } + + void OrDesignateBits(const DataInfo &secondInfo, uint32 infoIndex) { + ASSERT(infoIndex < secondInfo.GetInfo().size(), "out of secondInfo's range"); + ASSERT(infoIndex < info.size(), "out of secondInfo's range"); + info[infoIndex] |= secondInfo.GetElem(infoIndex); + } + + void EorBits(const DataInfo &secondInfo) { + auto infoSize = static_cast(info.size()); + ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + for (uint32 i = 0; i != infoSize; i++) { + info[i] ^= secondInfo.GetElem(i); + } + } + + /* if bit in secondElem is 1, bit in current DataInfo is set 0 */ + void Difference(const DataInfo &secondInfo) { + auto infoSize = static_cast(info.size()); + ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different"); + for (uint32 i = 0; i != infoSize; i++) { + info[i] &= (~(secondInfo.GetElem(i))); + } + } + + void ResetAllBit() { + for (auto &data : info) { + data = 0ULL; + } + } + + void EnlargeCapacityToAdaptSize(uint32 bitNO) { + /* add one more size for each enlarge action */ + auto sizeToEnlarge = static_cast((bitNO / kWordSize + 1) - info.size()); + for (int32 i = 0; i < sizeToEnlarge; i++) { + info.emplace_back(0ULL); + } + } + + void GetNonZeroElemsIndex(std::set &index) { + auto infoSize = static_cast(info.size()); + for (int32 i = 0; i < infoSize; i++) { + if (info[i] != 0ULL) { + (void)index.insert(i); + } + } + } + + template + void GetBitsOfInfo(T &wordRes) const { + wordRes.clear(); + for (size_t i = 0; i != info.size(); ++i) { + uint32 result = 0; + uint64 word = info[i]; + uint32 offset = 0; + uint32 baseWord = 0; + bool firstTime = true; + while (word > 0) { + int32 index = __builtin_ffsll(static_cast(word)); + if (index == 0) { + continue; + } + if (index == k64BitSize) { + /* when the highest bit is 1, the shift operation will cause error, need special treatment. */ + result = i * kWordSize + (index - 1); + (void)wordRes.insert(result); + break; + } + if (firstTime) { + offset = static_cast(index - 1); + baseWord = i * kWordSize; + firstTime = false; + } else { + offset = static_cast(index); + baseWord = 0; + } + result += baseWord + offset; + (void)wordRes.insert(result); + word = word >> static_cast(index); + } + } + } + + void ClearDataInfo() { + info.clear(); + } + + private: + /* long type has 8 bytes, 64 bits */ + static constexpr int32 kWordSize = 64; + MapleVector info; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_INSN_H */ diff --git a/src/mapleall/maple_be/include/cg/dbg.def b/src/mapleall/maple_be/include/cg/dbg.def new file mode 100644 index 0000000000000000000000000000000000000000..e3f6760b783b4d2299d90a6850867f541172f1ca --- /dev/null +++ b/src/mapleall/maple_be/include/cg/dbg.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2020-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* .loc fileNum lineNum */ +DBG_DEFINE(loc, , 3, Immediate, Immediate, Immediate) +DBG_DEFINE(scope, , 2, Immediate, Immediate, Undef) diff --git a/src/mapleall/maple_be/include/cg/dbg.h b/src/mapleall/maple_be/include/cg/dbg.h new file mode 100644 index 0000000000000000000000000000000000000000..0ef3cb45615957be9f466c698b0833db5b14d05b --- /dev/null +++ b/src/mapleall/maple_be/include/cg/dbg.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) [2020-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DBG_H +#define MAPLEBE_INCLUDE_CG_DBG_H + +#include "insn.h" +#include "mempool_allocator.h" +#include "mir_symbol.h" +#include "debug_info.h" + +namespace mpldbg { +using namespace maple; + +/* https://sourceware.org/binutils/docs-2.28/as/Loc.html */ +enum LocOpt { kBB, kProEnd, kEpiBeg, kIsStmt, kIsa, kDisc }; + +enum DbgOpcode : uint8 { +#define DBG_DEFINE(k, sub, n, o0, o1, o2) OP_DBG_##k##sub, +#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) OP_ARM_DIRECTIVES_##k##sub, +#include "dbg.def" +#undef DBG_DEFINE +#undef ARM_DIRECTIVES_DEFINE + kOpDbgLast +}; + +class DbgInsn : public maplebe::Insn { + public: + DbgInsn(MemPool &memPool, maplebe::MOperator op) : Insn(memPool, op) {} + + DbgInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0) : Insn(memPool, op, opnd0) {} + + DbgInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1) + : Insn(memPool, op, opnd0, opnd1) {} + + DbgInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1, + maplebe::Operand &opnd2) + : Insn(memPool, op, opnd0, opnd1, opnd2) {} + + ~DbgInsn() = default; + + bool IsMachineInstruction() const override { + return false; + } + + void Dump() const override; + +#if DEBUG + void Check() const override; +#endif + + bool IsTargetInsn() const override{ + return false; + } + + bool IsDbgInsn() const override { + return true; + } + + bool IsRegDefined(maplebe::regno_t regNO) const override { + CHECK_FATAL(false, "dbg insn do not def regs"); + return false; + } + + std::set GetDefRegs() const override{ + CHECK_FATAL(false, "dbg insn do not def regs"); + return std::set(); + } + + uint32 GetBothDefUseOpnd() const override { + return maplebe::kInsnMaxOpnd; + } + + uint32 GetLoc() const; + + private: + DbgInsn &operator=(const DbgInsn&); +}; + +class ImmOperand : public maplebe::OperandVisitable { + public: + explicit ImmOperand(int64 val) : OperandVisitable(kOpdImmediate, 32), val(val) {} + + ~ImmOperand() = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + Operand *opnd = memPool.Clone(*this); + return opnd; + } + + void Dump() const override; + + bool Less(const Operand &right) const override { + (void)right; + return false; + } + + int64 GetVal() const { + return val; + } + + private: + int64 val; +}; + +class DBGOpndEmitVisitor : public maplebe::OperandVisitorBase, + public maplebe::OperandVisitor { + public: + explicit DBGOpndEmitVisitor(maplebe::Emitter &asmEmitter): emitter(asmEmitter) {} + virtual ~DBGOpndEmitVisitor() = default; + protected: + maplebe::Emitter &emitter; + private: + void Visit(ImmOperand *v) final; +}; + +} /* namespace mpldbg */ + +#endif /* MAPLEBE_INCLUDE_CG_DBG_H */ diff --git a/src/mapleall/maple_be/include/cg/dependence.h b/src/mapleall/maple_be/include/cg/dependence.h new file mode 100644 index 0000000000000000000000000000000000000000..ba92b5651d2906e0978b8fb50e7f5891b9ab64d2 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/dependence.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DEPENDENCE_H +#define MAPLEBE_INCLUDE_CG_DEPENDENCE_H + +#include "cgbb.h" +#include "deps.h" + +namespace maplebe { +using namespace maple; +namespace { +constexpr maple::uint32 kMaxDependenceNum = 200; +}; + +class DepAnalysis { + public: + DepAnalysis(CGFunc &func, MemPool &memPool, MAD &mad, bool beforeRA) + : cgFunc(func), memPool(memPool), alloc(&memPool), beforeRA(beforeRA), mad(mad), lastComments(alloc.Adapter()) {} + + virtual ~DepAnalysis() = default; + + virtual void Run(BB &bb, MapleVector &nodes) = 0; + + const MapleVector &GetLastComments() const { + return lastComments; + } + virtual void CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) = 0; + virtual void CombineDependence(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator, + bool isMemCombine = false) = 0; + virtual void CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) = 0; + + virtual const std::string &GetDepTypeName(DepType depType) const = 0; + virtual void DumpDepNode(DepNode &node) const = 0; + virtual void DumpDepLink(DepLink &link, const DepNode *node) const = 0; + + protected: + CGFunc &cgFunc; + MemPool &memPool; + MapleAllocator alloc; + bool beforeRA; + MAD &mad; + MapleVector lastComments; + + virtual void Init(BB &bb, MapleVector &nodes) = 0; + virtual void ClearAllDepData() = 0; + virtual void AnalysisAmbiInsns(BB &bb) = 0; + virtual void AppendRegUseList(Insn &insn, regno_t regNO) = 0; + virtual void AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType) = 0; + virtual void RemoveSelfDeps(Insn &insn) = 0; + virtual void BuildDepsUseReg(Insn &insn, regno_t regNO) = 0; + virtual void BuildDepsDefReg(Insn &insn, regno_t regNO) = 0; + virtual void BuildDepsAmbiInsn(Insn &insn) = 0; + virtual void BuildDepsMayThrowInsn(Insn &insn) = 0; + virtual void BuildDepsUseMem(Insn &insn, MemOperand &memOpnd) = 0; + virtual void BuildDepsDefMem(Insn &insn, MemOperand &memOpnd) = 0; + virtual void BuildDepsMemBar(Insn &insn) = 0; + virtual void BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes) = 0; + virtual void BuildDepsControlAll(DepNode &depNode, const MapleVector &nodes) = 0; + virtual void BuildDepsAccessStImmMem(Insn &insn, bool isDest) = 0; + virtual void BuildCallerSavedDeps(Insn &insn) = 0; + virtual void BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) = 0; + virtual void BuildStackPassArgsDeps(Insn &insn) = 0; + virtual void BuildDepsDirtyStack(Insn &insn) = 0; + virtual void BuildDepsUseStack(Insn &insn) = 0; + virtual void BuildDepsDirtyHeap(Insn &insn) = 0; + virtual DepNode *BuildSeparatorNode() = 0; + virtual bool IfInAmbiRegs(regno_t regNO) const = 0; + virtual bool IsFrameReg(const RegOperand &) const = 0; +}; +} // namespace maplebe + +#endif /* MAPLEBE_INCLUDE_CG_DEPENDENCE_H */ diff --git a/src/mapleall/maple_be/include/cg/deps.h b/src/mapleall/maple_be/include/cg/deps.h new file mode 100644 index 0000000000000000000000000000000000000000..1a71c17c0600fe1806cc583556c6b6f964c5400d --- /dev/null +++ b/src/mapleall/maple_be/include/cg/deps.h @@ -0,0 +1,465 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_DEPS_H +#define MAPLEBE_INCLUDE_CG_DEPS_H + +#include +#include "mad.h" +#include "pressure.h" +namespace maplebe { +#define PRINT_STR_VAL(STR, VAL) \ + LogInfo::MapleLogger() << std::left << std::setw(12) << STR << VAL << " | "; +#define PRINT_VAL(VAL) \ + LogInfo::MapleLogger() << std::left << std::setw(12) << VAL << " | "; + +enum DepType : uint8 { + kDependenceTypeTrue, + kDependenceTypeOutput, + kDependenceTypeAnti, + kDependenceTypeControl, + kDependenceTypeMembar, + kDependenceTypeThrow, + kDependenceTypeSeparator, + kDependenceTypeNone +}; + +inline const std::array kDepTypeName = { + "true-dep", + "output-dep", + "anti-dep", + "control-dep", + "membar-dep", + "throw-dep", + "separator-dep", + "none-dep", +}; + +enum NodeType : uint8 { + kNodeTypeNormal, + kNodeTypeSeparator, + kNodeTypeEmpty +}; + +enum ScheduleState : uint8 { + kNormal, + kReady, + kScheduled, +}; + +class DepNode; + +class DepLink { + public: + DepLink(DepNode &fromNode, DepNode &toNode, DepType typ) : from(fromNode), to(toNode), depType(typ), latency(0) {} + virtual ~DepLink() = default; + + DepNode &GetFrom() const { + return from; + } + DepNode &GetTo() const { + return to; + } + void SetDepType(DepType dType) { + depType = dType; + } + DepType GetDepType() const { + return depType; + } + void SetLatency(uint32 lat) { + latency = lat; + } + uint32 GetLatency() const { + return latency; + } + + private: + DepNode &from; + DepNode &to; + DepType depType; + uint32 latency; +}; + +class DepNode { + public: + bool CanBeScheduled() const; + void OccupyUnits(); + uint32 GetUnitKind() const; + + DepNode(Insn &insn, MapleAllocator &alloc) + : insn(&insn), units(nullptr), reservation(nullptr), unitNum(0), + eStart(0), lStart(0), visit(0), type(kNodeTypeNormal), state(kNormal), index(0), simulateCycle(0), + schedCycle(0), bruteForceSchedCycle(0), validPredsSize(0), validSuccsSize(0), + preds(alloc.Adapter()), succs(alloc.Adapter()), comments(alloc.Adapter()), + cfiInsns(alloc.Adapter()), clinitInsns(alloc.Adapter()), locInsn(nullptr), useRegnos(alloc.Adapter()), + defRegnos(alloc.Adapter()), regPressure(nullptr) {} + + DepNode(Insn &insn, MapleAllocator &alloc, Unit * const *unit, uint32 num, Reservation &rev) + : insn(&insn), units(unit), + reservation(&rev), unitNum(num), eStart(0), lStart(0), visit(0), type(kNodeTypeNormal), state(kNormal), + index(0), simulateCycle(0), schedCycle(0), bruteForceSchedCycle(0), validPredsSize(0), validSuccsSize(0), + preds(alloc.Adapter()), succs(alloc.Adapter()), comments(alloc.Adapter()), cfiInsns(alloc.Adapter()), + clinitInsns(alloc.Adapter()), locInsn(nullptr), useRegnos(alloc.Adapter()), defRegnos(alloc.Adapter()), + regPressure(nullptr) {} + + virtual ~DepNode() = default; + + Insn *GetInsn() const { + return insn; + } + void SetInsn(Insn &rvInsn) { + insn = &rvInsn; + } + void SetUnits(Unit * const *unit) { + units = unit; + } + const Unit *GetUnitByIndex(uint32 idx) const { + ASSERT(index < unitNum, "out of units"); + return units[idx]; + } + Reservation *GetReservation() const { + return reservation; + } + void SetReservation(Reservation &rev) { + reservation = &rev; + } + uint32 GetUnitNum() const { + return unitNum; + } + void SetUnitNum(uint32 num) { + unitNum = num; + } + uint32 GetEStart() const { + return eStart; + } + void SetEStart(uint32 start) { + eStart = start; + } + uint32 GetLStart() const { + return lStart; + } + void SetLStart(uint32 start) { + lStart = start; + } + uint32 GetVisit() const { + return visit; + } + void SetVisit(uint32 visitVal) { + visit = visitVal; + } + void IncreaseVisit() { + ++visit; + } + NodeType GetType() const { + return type; + } + void SetType(NodeType nodeType) { + type = nodeType; + } + ScheduleState GetState() const { + return state; + } + void SetState(ScheduleState scheduleState) { + state = scheduleState; + } + uint32 GetIndex() const { + return index; + } + void SetIndex(uint32 idx) { + index = idx; + } + void SetSchedCycle(uint32 cycle) { + schedCycle = cycle; + } + uint32 GetSchedCycle() const { + return schedCycle; + } + void SetSimulateCycle(uint32 cycle) { + simulateCycle = cycle; + } + uint32 GetSimulateCycle() const { + return simulateCycle; + } + void SetBruteForceSchedCycle(uint32 cycle) { + bruteForceSchedCycle = cycle; + } + uint32 GetBruteForceSchedCycle() const { + return bruteForceSchedCycle; + } + void SetValidPredsSize(uint32 validSize) { + validPredsSize = validSize; + } + uint32 GetValidPredsSize() const { + return validPredsSize; + } + void DescreaseValidPredsSize() { + --validPredsSize; + } + void IncreaseValidPredsSize() { + ++validPredsSize; + } + uint32 GetValidSuccsSize() const { + return validSuccsSize; + } + void SetValidSuccsSize(uint32 size) { + validSuccsSize = size; + } + const MapleVector &GetPreds() const { + return preds; + } + void ReservePreds(size_t size) { + preds.reserve(size); + } + void AddPred(DepLink &depLink) { + preds.emplace_back(&depLink); + } + void RemovePred() { + preds.pop_back(); + } + const MapleVector &GetSuccs() const{ + return succs; + } + void ReserveSuccs(size_t size) { + succs.reserve(size); + } + void AddSucc(DepLink &depLink) { + succs.emplace_back(&depLink); + } + void RemoveSucc() { + succs.pop_back(); + } + const MapleVector &GetComments() const { + return comments; + } + void SetComments(MapleVector com) { + comments = com; + } + void AddComments(Insn &addInsn) { + (void)comments.emplace_back(&addInsn); + } + void ClearComments() { + comments.clear(); + } + const MapleVector &GetCfiInsns() const { + return cfiInsns; + } + void SetCfiInsns(MapleVector insns) { + cfiInsns = insns; + } + void AddCfiInsn(Insn &curInsn) { + (void)cfiInsns.emplace_back(&curInsn); + } + void ClearCfiInsns() { + cfiInsns.clear(); + } + const MapleVector &GetClinitInsns() const { + return clinitInsns; + } + void SetClinitInsns(MapleVector insns) { + clinitInsns = insns; + } + void AddClinitInsn(Insn &addInsn) { + (void)clinitInsns.emplace_back(&addInsn); + } + const RegPressure *GetRegPressure() const { + return regPressure; + } + void SetRegPressure(RegPressure &pressure) { + regPressure = &pressure; + } + void DumpRegPressure() const { + if (regPressure) { + regPressure->DumpRegPressure(); + } + } + void InitPressure() const { + regPressure->InitPressure(); + } + const MapleVector &GetPressure() const { + return regPressure->GetPressure(); + } + + void IncPressureByIndex(int32 idx) const { + regPressure->IncPressureByIndex(static_cast(idx)); + } + void DecPressureByIndex(int32 idx) const { + regPressure->DecPressureByIndex(static_cast(idx)); + } + + const MapleVector &GetDeadDefNum() const { + return regPressure->GetDeadDefNum(); + } + void IncDeadDefByIndex(int32 idx) const { + regPressure->IncDeadDefByIndex(static_cast(idx)); + } + + void SetRegUses(RegList ®List) const { + regPressure->SetRegUses(®List); + } + void SetRegDefs(size_t idx, RegList *regList) const { + regPressure->SetRegDefs(idx, regList); + } + + int32 GetIncPressure() const { + return regPressure->GetIncPressure(); + } + void SetIncPressure(bool value) const { + regPressure->SetIncPressure(value); + } + int32 GetMaxDepth() const { + return regPressure->GetMaxDepth(); + } + void SetMaxDepth(int32 value) const { + regPressure->SetMaxDepth(value); + } + int32 GetNear() const { + return regPressure->GetNear(); + } + void SetNear(int32 value) const { + regPressure->SetNear(value); + } + int32 GetPriority() const { + return regPressure->GetPriority(); + } + void SetPriority(int32 value) const { + regPressure->SetPriority(value); + } + RegList *GetRegUses(size_t idx) const { + return regPressure->GetRegUses(idx); + } + void InitRegUsesSize(size_t size) const { + regPressure->InitRegUsesSize(size); + } + RegList *GetRegDefs(size_t idx) const { + return regPressure->GetRegDefs(idx); + } + void InitRegDefsSize(size_t size) const { + regPressure->InitRegDefsSize(size); + } + + void SetNumCall(int32 value) const { + regPressure->SetNumCall(value); + } + + int32 GetNumCall() const { + return regPressure->GetNumCall(); + } + + void SetHasNativeCallRegister(bool value) const { + regPressure->SetHasNativeCallRegister(value); + } + + bool GetHasNativeCallRegister() const { + return regPressure->GetHasNativeCallRegister(); + } + + const Insn *GetLocInsn() const { + return locInsn; + } + void SetLocInsn(const Insn &locationInsn) { + locInsn = &locationInsn; + } + + /* printf dep-node's information of scheduling */ + void DumpSchedInfo() const { + PRINT_STR_VAL("estart: ", eStart); + PRINT_STR_VAL("lstart: ", lStart); + PRINT_STR_VAL("visit: ", visit); + PRINT_STR_VAL("state: ", state); + PRINT_STR_VAL("index: ", index); + PRINT_STR_VAL("validPredsSize: ", validPredsSize); + PRINT_STR_VAL("validSuccsSize: ", validSuccsSize); + LogInfo::MapleLogger() << '\n'; + + constexpr int32 width = 12; + LogInfo::MapleLogger() << std::left << std::setw(width) << "usereg: "; + for (const auto &useReg : useRegnos) { + LogInfo::MapleLogger() << "R" << useReg << " "; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << std::left << std::setw(width) << "defreg: "; + for (const auto &defReg : defRegnos) { + LogInfo::MapleLogger() << "R" << defReg << " "; + } + LogInfo::MapleLogger() << "\n"; + } + + void SetHasPreg(bool value) const { + regPressure->SetHasPreg(value); + } + + bool GetHasPreg() const { + return regPressure->GetHasPreg(); + } + + void AddUseReg(regno_t reg) { + useRegnos.emplace_back(reg); + } + + const MapleVector &GetUseRegnos() const { + return useRegnos; + } + + void AddDefReg(regno_t reg) { + defRegnos.emplace_back(reg); + } + + const MapleVector &GetDefRegnos() const { + return defRegnos; + } + + private: + Insn *insn; + Unit * const *units; + Reservation *reservation; + uint32 unitNum; + uint32 eStart; + uint32 lStart; + uint32 visit; + NodeType type; + ScheduleState state; + uint32 index; + uint32 simulateCycle; + uint32 schedCycle; + uint32 bruteForceSchedCycle; + + /* For scheduling, denotes unscheduled preds/succs number. */ + uint32 validPredsSize; + uint32 validSuccsSize; + + /* Dependence links. */ + MapleVector preds; + MapleVector succs; + + /* Non-machine instructions prior to insn, such as comments. */ + MapleVector comments; + + /* Non-machine instructions which follows insn, such as cfi instructions. */ + MapleVector cfiInsns; + + /* Special instructions which follows insn, such as clinit instructions. */ + MapleVector clinitInsns; + + /* loc insn which indicate insn location in source file */ + const Insn *locInsn; + + MapleVector useRegnos; + MapleVector defRegnos; + + /* For register pressure analysis */ + RegPressure *regPressure; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_DEPS_H */ diff --git a/src/mapleall/maple_be/include/cg/ebo.h b/src/mapleall/maple_be/include/cg/ebo.h new file mode 100644 index 0000000000000000000000000000000000000000..d30cc91e7daeaf18690b1d9df1f75586526fd089 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/ebo.h @@ -0,0 +1,254 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_EBO_H +#define MAPLEBE_INCLUDE_CG_EBO_H + +#include "cg_phase.h" +#include "cgbb.h" +#include "live.h" +#include "loop.h" + +namespace maplebe { +namespace { +constexpr uint32 kEboDefaultMemHash = 0; +constexpr uint32 kEboNoAliasMemHash = 1; +constexpr uint32 kEboSpillMemHash = 2; +constexpr uint32 kEboCopyInsnHash = 3; +constexpr uint32 kEboReservedInsnHash = 4; +constexpr uint32 kEboMaxExpInsnHash = 1024; +constexpr uint32 kEboMaxOpndHash = 521; +constexpr uint32 kEboMaxInsnHash = kEboReservedInsnHash + kEboMaxExpInsnHash; +}; + +#define EBO_EXP_INSN_HASH(val) ((kEboMaxExpInsnHash - 1ULL) & (static_cast(val) >> 6)) + +/* forward decls */ +class InsnInfo; + +struct OpndInfo { + explicit OpndInfo(Operand &opnd) : opnd(&opnd) {} + + virtual ~OpndInfo() = default; + + int32 hashVal = 0; /* Mem operand is placed in hash table, this is the hashVal of it, and otherwise -1. */ + Operand *opnd; /* Operand */ + Operand *replacementOpnd = nullptr; /* Rename opnd with this new name. */ + OpndInfo *replacementInfo = nullptr; /* Rename opnd with this info. */ + BB *bb = nullptr; /* The Definining bb. */ + Insn *insn = nullptr; /* The Defining insn. */ + InsnInfo *insnInfo = nullptr; + bool redefinedInBB = false; /* A following definition exisit in bb. */ + bool redefined = false; /* A following definition exisit. */ + Insn *redefinedInsn = nullptr; /* Next defined insn if redefinedInBB is true */ +#if TARGARM32 + bool mayReDef = false; +#endif + OpndInfo *same = nullptr; /* Other definitions of the same operand. */ + OpndInfo *prev = nullptr; + OpndInfo *next = nullptr; + OpndInfo *hashNext = nullptr; + int32 refCount = 0; /* Number of references to the operand. */ +}; + +struct MemOpndInfo : public OpndInfo { + explicit MemOpndInfo(Operand &opnd) : OpndInfo(opnd) {} + + ~MemOpndInfo() override = default; + + OpndInfo *GetBaseInfo() const { + return base; + } + + OpndInfo *GetOffsetInfo() const{ + return offset; + } + + void SetBaseInfo(OpndInfo &baseInfo) { + base = &baseInfo; + } + + void SetOffsetInfo(OpndInfo &offInfo) { + offset = &offInfo; + } + + private: + OpndInfo *base = nullptr; + OpndInfo *offset = nullptr; +}; + +class InsnInfo { + public: + InsnInfo(MemPool &memPool, Insn &insn) + : alloc(&memPool), bb(insn.GetBB()), insn(&insn), result(alloc.Adapter()), + origOpnd(alloc.Adapter()), optimalOpnd(alloc.Adapter()) {} + + virtual ~InsnInfo() = default; + MapleAllocator alloc; + uint32 hashIndex = 0; + bool mustNotBeRemoved = false; /* Some condition requires this insn. */ + BB *bb; /* The defining bb. */ + Insn *insn; /* The defining insn. */ + InsnInfo *same = nullptr; /* Other insns with the same hash value. */ + InsnInfo *prev = nullptr; + InsnInfo *next = nullptr; + MapleVector result; /* Result array. */ + MapleVector origOpnd; + MapleVector optimalOpnd; +}; + +class Ebo { + public: + Ebo(CGFunc &func, MemPool &memPool, LiveAnalysis *live, bool before, const std::string &phase) + : cgFunc(&func), + beforeRegAlloc(before), + phaseName(phase), + live(live), + eboMp(&memPool), + eboAllocator(&memPool), + visitedBBs(eboAllocator.Adapter()), + vRegInfo(eboAllocator.Adapter()), + exprInfoTable(eboAllocator.Adapter()), + insnInfoTable(eboAllocator.Adapter()) {} + + virtual ~Ebo() = default; + + MemOpndInfo *GetMemInfo(InsnInfo &insnInfo) const; + void SetInsnInfo(uint32 hashVal, InsnInfo &info) { + ASSERT(hashVal < insnInfoTable.size(), "hashVal out of insnInfoTable range"); + insnInfoTable.at(hashVal) = &info; + } + + void IncRef(OpndInfo &info) const { + ++info.refCount; + } + + void DecRef(OpndInfo &info) const { + --info.refCount; + } + + void EnlargeSpaceForLA(Insn &csetInsn) const; + bool IsSaveReg(const Operand &opnd) const; + bool IsFrameReg(Operand &opnd) const; + bool OperandEqual(const Operand &op1, const Operand &op2) const; + Operand *GetZeroOpnd(uint32 size) const; + bool IsPhysicalReg(const Operand &opnd) const; + bool HasAssignedReg(const Operand &opnd) const; + bool IsOfSameClass(const Operand &op0, const Operand &op1) const; + bool OpndAvailableInBB(const BB &bb, OpndInfo *info) const; + bool ForwardPropCheck(const Operand *opndReplace, const OpndInfo &opndInfo, const Operand &opnd, Insn &insn); + bool RegForwardCheck(Insn &insn, const Operand &opnd, const Operand *opndReplace, Operand &oldOpnd, + const OpndInfo *tmpInfo) const; + bool IsNotVisited(const BB &bb) { + return !visitedBBs.at(bb.GetId()); + }; + + void SetBBVisited(const BB &bb) { + visitedBBs.at(bb.GetId()) = true; + }; + + void UpdateOpndInfo(const Operand &opnd, OpndInfo &opndInfo, OpndInfo *newInfo, int32 hashVal); + void SetOpndInfo(const Operand &opnd, OpndInfo *opndInfo, int32 hashVal); + bool RegistersIdentical(const Operand &op0, const Operand &op1) const; + OpndInfo *GetOpndInfo(const Operand &opnd, int32 hashVal) const; + OpndInfo *GetNewOpndInfo(BB &bb, Insn *insn, Operand &opnd, int32 hashVal); + OpndInfo *OperandInfoUse(BB ¤tBB, Operand &localOpnd); + InsnInfo *GetNewInsnInfo(Insn &insn); + int32 ComputeOpndHash(const Operand &opnd) const; + uint32 ComputeHashVal( Insn &insn, const MapleVector &opndInfos) const; + void MarkOpndLiveIntoBB(const Operand &opnd, BB &into, BB &def) const; + void RemoveInsn(InsnInfo &info) const; + void RemoveUses(uint32 opndNum, const MapleVector &origInfo) const; + void HashInsn(Insn &insn, const MapleVector &origInfo, const MapleVector &opndInfos); + void BuildAllInfo(BB &bb); + InsnInfo *LocateInsnInfo(const OpndInfo &info) const; + void RemoveUnusedInsns(BB &bb, bool normal); + void UpdateNextInfo(const OpndInfo &opndInfo); + void BackupOpndInfoList(OpndInfo *saveLast); + void BackupInsnInfoList(InsnInfo *saveLast); + void AddBB2EB(BB &bb); + void EboInit(); + void EboProcessSingleBB(); + void EboProcess(); + void Run(); + std::string PhaseName() const { + return phaseName; + } + + protected: + CGFunc *cgFunc; + bool beforeRegAlloc; /* True if perform Ebo before register allocation. */ + virtual OpndInfo *OperandInfoDef(BB ¤tBB, Insn ¤tInsn, Operand &localOpnd) = 0; + virtual const RegOperand &GetRegOperand(const Operand &opnd) const = 0; + virtual bool IsGlobalNeeded(Insn &insn) const = 0; + virtual bool IsDecoupleStaticOp(Insn &insn) const = 0; + virtual bool IsFmov(const Insn &insn) const = 0; + virtual bool SpecialSequence(Insn &insn, const MapleVector &origInfos) = 0; + virtual bool DoConstProp(Insn &insn, uint32 i, Operand &opnd) = 0; + virtual bool Csel2Cset(Insn &insn, const MapleVector &opnds) = 0; + virtual bool SimplifyConstOperand(Insn &insn, const MapleVector &opnds, + const MapleVector &opndInfo) = 0; + virtual int32 GetOffsetVal(const MemOperand &mem) const = 0; + virtual bool OperandEqSpecial(const Operand &op1, const Operand &op2) const = 0; + virtual void BuildCallerSaveRegisters() = 0; + virtual void DefineAsmRegisters(InsnInfo &insnInfo) = 0; + virtual void DefineCallerSaveRegisters(InsnInfo &insnInfo) = 0; + virtual void DefineReturnUseRegister(Insn &insn) = 0; + virtual void DefineCallUseSpecialRegister(Insn &insn) = 0; + virtual void DefineClinitSpecialRegisters(InsnInfo &insnInfo) = 0; + virtual bool IsMovToSIMDVmov(Insn &insn, const Insn &replaceInsn) const = 0; + virtual bool IsPseudoRet(Insn &insn) const = 0; + virtual bool ChangeLdrMop(Insn &insn, const Operand &opnd) const = 0; + virtual bool IsAdd(const Insn &insn) const = 0; + virtual bool IsClinitCheck(const Insn &insn) const = 0; + virtual bool IsLastAndBranch(BB &bb, Insn &insn) const = 0; + virtual bool IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const = 0; + virtual bool ResIsNotDefAndUse(Insn &insn) const = 0; + virtual bool LiveOutOfBB(const Operand &opnd, const BB &bb) const = 0; + virtual bool IsInvalidReg(const RegOperand &opnd) const = 0; + virtual bool IsZeroRegister(const Operand &opnd) const = 0; + virtual bool IsConstantImmOrReg(const Operand &opnd) const = 0; + OpndInfo *BuildMemOpndInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex); + OpndInfo *BuildOperandInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex, MapleVector &origInfos); + bool ForwardPropagateOpnd(Insn &insn, Operand *&opnd, uint32 opndIndex, OpndInfo *&opndInfo, + MapleVector &origInfos); + void SimplifyInsn(Insn &insn, bool &insnReplaced, bool opndsConstant, const MapleVector &opnds, + const MapleVector &opndInfos, const MapleVector &origInfos); + void FindRedundantInsns(BB &bb, Insn *&insn, const Insn *prev, bool insnReplaced, + MapleVector &opnds, MapleVector &opndInfos, + const MapleVector &origInfos); + void PreProcessSpecialInsn(Insn &insn); + + std::string phaseName; + LiveAnalysis *live; + uint32 bbNum = 0; /* bb numbers for an extend block. */ + MemPool *eboMp; + MapleAllocator eboAllocator; + MapleVector visitedBBs; + OpndInfo *firstOpndInfo = nullptr; + OpndInfo *lastOpndInfo = nullptr; + InsnInfo *firstInsnInfo = nullptr; + InsnInfo *lastInsnInfo = nullptr; + MapleUnorderedMap vRegInfo; + MapleVector exprInfoTable; + MapleVector insnInfoTable; + bool optSuccess = false; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgEbo0, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE(CgEbo1, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE(CgPostEbo, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_EBO_H */ diff --git a/src/mapleall/maple_be/include/cg/eh_func.h b/src/mapleall/maple_be/include/cg/eh_func.h new file mode 100644 index 0000000000000000000000000000000000000000..c997b73ef0191d00879c2d7f8d9e03c9ef29f9f5 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/eh_func.h @@ -0,0 +1,204 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_EH_EH_FUNC_H +#define MAPLEBE_INCLUDE_EH_EH_FUNC_H +#include "mir_parser.h" +#include "mir_function.h" +#include "lsda.h" +#include "cg_phase.h" +#include "maple_phase.h" + +namespace maplebe { +class EHTry { + public: + EHTry(MapleAllocator &alloc, TryNode &tryNode) + : tryNode(&tryNode), + catchVec(alloc.Adapter()) {} + ~EHTry() = default; + + TryNode *GetTryNode() const { + return tryNode; + } + + void SetEndtryNode(StmtNode &endtryNode) { + this->endTryNode = &endtryNode; + } + + StmtNode *GetEndtryNode() { + return endTryNode; + } + + void SetFallthruGoto(StmtNode *fallthruGoto) { + this->fallThroughGoto = fallthruGoto; + } + + StmtNode *GetFallthruGoto() { + return fallThroughGoto; + } + + size_t GetCatchVecSize() const { + return catchVec.size(); + } + + void PushBackCatchVec(CatchNode &catchNode) { + catchVec.emplace_back(&catchNode); + } + + CatchNode *GetCatchNodeAt(size_t pos) const { + CHECK_FATAL(pos < GetCatchVecSize(), "pos is out of range."); + return catchVec.at(pos); + } + + void SetLSDACallSite(LSDACallSite &lsdaCallSiteVal) { + this->lsdaCallSite = &lsdaCallSiteVal; + } + + void SetCSAction(uint32 action) const { + lsdaCallSite->csAction = action; + } + + void DumpEHTry(const MIRModule &mirModule); + + private: + TryNode *tryNode; + StmtNode *endTryNode = nullptr; + StmtNode *fallThroughGoto = nullptr; /* no throw in the try block, the goto stmt to the fall through */ + MapleVector catchVec; + LSDACallSite *lsdaCallSite = nullptr; /* one try has a callsite */ +}; + +class EHThrow { + public: + explicit EHThrow(UnaryStmtNode &rtNode) + : rethrow(&rtNode) {} + ~EHThrow() = default; + + bool IsUnderTry() const { + return javaTry != nullptr; + } + + bool HasLSDA() const { + return startLabel != nullptr; + } + + const UnaryStmtNode *GetRethrow() const { + return rethrow; + } + + void SetJavaTry(EHTry *javaTryVal) { + this->javaTry = javaTryVal; + } + + LabelNode *GetStartLabel() { + return startLabel; + } + + LabelNode *GetEndLabel() { + return endLabel; + } + + void Lower(CGFunc &cgFunc); + void ConvertThrowToRethrow(CGFunc &cgFunc) const; + void ConvertThrowToRuntime(CGFunc &cgFunc, BaseNode &arg) const; + + private: + UnaryStmtNode *rethrow; /* must be a throw stmt */ + EHTry *javaTry = nullptr; /* the try statement wrapping this throw */ + LabelNode *startLabel = nullptr; /* the label that "MCC_RethrowException" or "MCC_ThrowException" begin */ + LabelNode *endLabel = nullptr; /* the label that "MCC_RethrowException" or "MCC_ThrowException" end */ +}; + +class EHFunc { + public: + static constexpr uint8 kTypeEncoding = 0x9b; /* same thing as LSDAHeader.kTypeEncoding */ + explicit EHFunc(CGFunc &func); + ~EHFunc() = default; + + void CollectEHInformation(std::vector> &catchVec); + void InsertEHSwitchTable(); + void CreateLSDA(); + bool NeedFullLSDA() const; + bool NeedFastLSDA() const; + void InsertCxaAfterEachCatch(const std::vector> &catchVec) const; + void GenerateCleanupLabel(); + void MergeCatchToTry(const std::vector> &catchVec); + void BuildEHTypeTable(const std::vector> &catchVec); + void LowerThrow(); /* for non-personality function */ + void CreateTypeInfoSt() const; + void DumpEHFunc() const; + + bool HasThrow() const { + return !rethrowVec.empty(); + } + + void AddTry(EHTry &ehTry) { + tryVec.emplace_back(&ehTry); + } + + size_t GetEHTyTableSize() const { + return ehTyTable.size(); + } + + TyIdx &GetEHTyTableMember(size_t index) { + CHECK_FATAL(index < ehTyTable.size(), "out of ehTyTable"); + return ehTyTable[index]; + } + + LSDAHeader *GetLSDAHeader() { + return lsdaHeader; + } + + LSDACallSiteTable *GetLSDACallSiteTable() { + return lsdaCallSiteTable; + } + + const LSDACallSiteTable *GetLSDACallSiteTable() const { + return lsdaCallSiteTable; + } + + const LSDAActionTable *GetLSDAActionTable() const { + return lsdaActionTable; + } + + void AddRethrow(EHThrow &rethrow) { + rethrowVec.emplace_back(&rethrow); + } + + private: + void CreateLSDAAction(); + void InsertDefaultLabelAndAbortFunc(BlockNode &blkNode, SwitchNode &switchNode, + const StmtNode &beforeEndLabel) const; + void FillSwitchTable(SwitchNode &switchNode, const EHTry &ehTry); + void CreateLSDAHeader(); + void FillLSDACallSiteTable(); + LabelIdx CreateLabel(const std::string &cstr); + bool HasTry() const; + + CGFunc *cgFunc; + LabelIdx labelIdx = 0; + MapleVector tryVec; /* try stmt node */ + MapleVector ehTyTable; /* the type that would emit in LSDA */ + MapleMap ty2IndexTable; /* use the TyIdx to get the index of ehTyTable; */ + LSDAHeader *lsdaHeader = nullptr; + LSDACallSiteTable *lsdaCallSiteTable = nullptr; + LSDAActionTable *lsdaActionTable = nullptr; + MapleVector rethrowVec; /* EHRethrow */ +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgBuildEHFunc, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_EH_EH_FUNC_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/emit.h b/src/mapleall/maple_be/include/cg/emit.h new file mode 100644 index 0000000000000000000000000000000000000000..4ae714f7b36c1f5cc59f100718c702aea93381c2 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/emit.h @@ -0,0 +1,404 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_EMIT_H +#define MAPLEBE_INCLUDE_CG_EMIT_H + +/* C++ headers */ +#include +#include +#include +#include +#include "isa.h" +#include "lsda.h" +#include "asm_info.h" +#include "cg.h" + +/* Maple IR headers */ +#include "mir_module.h" +#include "mir_const.h" +#include "mempool_allocator.h" +#include "muid_replacement.h" +#include "namemangler.h" +#include "debug_info.h" +#include "alignment.h" + +namespace maple { +const char *GetDwTagName(unsigned n); +const char *GetDwFormName(unsigned n); +const char *GetDwAtName(unsigned n); +const char *GetDwOpName(unsigned n); +} /* namespace maple */ + +#if TARGRISCV64 +#define CMNT "\t# " +#else +#define CMNT "\t// " +#endif +#define TEXT_BEGIN text0 +#define TEXT_END etext0 +#define DEBUG_INFO_0 debug_info0 +#define DEBUG_ABBREV_0 debug_abbrev0 +#define DEBUG_LINE_0 debug_line0 +#define DEBUG_STR_LABEL ASF + +namespace maplebe { +constexpr int32 kSizeOfDecoupleStaticStruct = 4; +constexpr uint32 kHugeSoInsnCountThreshold = 0x1f00000; /* 124M (4bytes per Insn), leave 4M rooms for 128M */ +constexpr char kHugeSoPostFix[] = "$$hugeso_"; +constexpr char kDebugMapleThis[] = "_this"; +constexpr uint32 kDwarfVersion = 4; +constexpr uint32 kSizeOfPTR = 8; +class StructEmitInfo { + public: + /* default ctor */ + StructEmitInfo() = default; + + ~StructEmitInfo() = default; + + uint16 GetNextFieldOffset() const { + return nextFieldOffset; + } + + void SetNextFieldOffset(uint16 offset) { + nextFieldOffset = offset; + } + + void IncreaseNextFieldOffset(uint16 value) { + nextFieldOffset += value; + } + + uint8 GetCombineBitFieldWidth() const { + return combineBitFieldWidth; + } + + void SetCombineBitFieldWidth(uint8 offset) { + combineBitFieldWidth = offset; + } + + void IncreaseCombineBitFieldWidth(uint8 value) { + combineBitFieldWidth += value; + } + + void DecreaseCombineBitFieldWidth(uint8 value) { + combineBitFieldWidth -= value; + } + + uint64 GetCombineBitFieldValue() const { + return combineBitFieldValue; + } + + void SetCombineBitFieldValue(uint64 value) { + combineBitFieldValue = value; + } + + uint64 GetTotalSize() const { + return totalSize; + } + + void SetTotalSize(uint64 value) { + totalSize = value; + } + + void IncreaseTotalSize(uint64 value) { + totalSize += value; + } + + private: + /* Next field offset in struct. */ + uint16 nextFieldOffset = 0; + uint8 combineBitFieldWidth = 0; + uint64 combineBitFieldValue = 0; + /* Total size emitted in current struct. */ + uint64 totalSize = 0; +}; + +class FuncEmitInfo { + public: + CGFunc &GetCGFunc() { + return cgFunc; + } + + const CGFunc &GetCGFunc() const { + return cgFunc; + } + + protected: + explicit FuncEmitInfo(CGFunc &func) : cgFunc(func) {} + ~FuncEmitInfo() = default; + + private: + CGFunc &cgFunc; +}; + +class Emitter { + public: + void CloseOutput() { + if (outStream.is_open()) { + outStream.close(); + } + rangeIdx2PrefixStr.clear(); + hugeSoTargets.clear(); + labdie2labidxTable.clear(); + fileMap.clear(); + } + + MOperator GetCurrentMOP() const { + return currentMop; + } + + void SetCurrentMOP(const MOperator &mOp) { + currentMop = mOp; + } + + void EmitAsmLabel(AsmLabel label); + void EmitAsmLabel(const MIRSymbol &mirSymbol, AsmLabel label); + void EmitFileInfo(const std::string &fileName); + /* a symbol start/end a block */ + void EmitBlockMarker(const std::string &markerName, const std::string §ionName, + bool withAddr, const std::string &addrName = ""); + void EmitNullConstant(uint64 size); + void EmitCombineBfldValue(StructEmitInfo &structEmitInfo); + void EmitBitFieldConstant(StructEmitInfo &structEmitInfo, MIRConst &mirConst, const MIRType *nextType, + uint64 fieldOffset); + void EmitScalarConstant(MIRConst &mirConst, bool newLine = true, bool flag32 = false, bool isIndirect = false); + void EmitStr(const std::string& mplStr, bool emitAscii = false, bool emitNewline = false); + void EmitStrConstant(const MIRStrConst &mirStrConst, bool isIndirect = false); + void EmitStr16Constant(const MIRStr16Const &mirStr16Const); + void EmitIntConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst, uint32 itabConflictIndex, + const std::map &strIdx2Type, size_t idx); + void EmitAddrofFuncConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx); + void EmitAddrofSymbolConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx); + void EmitConstantTable(const MIRSymbol &mirSymbol, MIRConst &mirConst, + const std::map &strIdx2Type); + void EmitClassInfoSequential(const MIRSymbol &mirSymbol, const std::map &strIdx2Type, + const std::string §ionName); + void EmitMethodFieldSequential(const MIRSymbol &mirSymbol, const std::map &strIdx2Type, + const std::string §ionName); + void EmitLiterals(std::vector> &literals, + const std::map &strIdx2Type); + void EmitFuncLayoutInfo(const MIRSymbol &layout); + void EmitGlobalVars(std::vector> &globalVars); + void EmitGlobalVar(const MIRSymbol &globalVar); + void EmitStaticFields(const std::vector &fields); + void EmitLiteral(const MIRSymbol &literal, const std::map &strIdx2Type); + void EmitStringPointers(); + void GetHotAndColdMetaSymbolInfo(const std::vector &mirSymbolVec, + std::vector &hotFieldInfoSymbolVec, + std::vector &coldFieldInfoSymbolVec, const std::string &prefixStr, + bool forceCold = false) const; + void EmitMetaDataSymbolWithMarkFlag(const std::vector &mirSymbolVec, + const std::map &strIdx2Type, + const std::string &prefixStr, const std::string §ionName, + bool isHotFlag); + void EmitMethodDeclaringClass(const MIRSymbol &mirSymbol, const std::string §ionName); + void MarkVtabOrItabEndFlag(const std::vector &mirSymbolVec) const; + void EmitArrayConstant(MIRConst &mirConst); + void EmitStructConstant(MIRConst &mirConst); + void EmitStructConstant(MIRConst &mirConst, uint32 &subStructFieldCounts); + void EmitVectorConstant(MIRConst &mirConst); + void EmitLocalVariable(const CGFunc &cgFunc); + void EmitUninitializedSymbolsWithPrefixSection(const MIRSymbol &symbol, const std::string §ionName); + void EmitUninitializedSymbol(const MIRSymbol &mirSymbol); + void EmitGlobalVariable(); + void EmitGlobalRootList(const MIRSymbol &mirSymbol); + void EmitMuidTable(const std::vector &vec, const std::map &strIdx2Type, + const std::string §ionName); + MIRAddroffuncConst *GetAddroffuncConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst) const; + int64 GetFieldOffsetValue(const std::string &className, const MIRIntConst &intConst, + const std::map &strIdx2Type) const; + + Emitter &Emit(int64 val) { + outStream << val; + return *this; + } + + Emitter &Emit(const IntVal& val) { + outStream << val.GetExtValue(); + return *this; + } + + Emitter &Emit(const MapleString &str) { + ASSERT(str.c_str() != nullptr, "nullptr check"); + outStream << str; + return *this; + } + + Emitter &Emit(const std::string &str) { + outStream << str; + return *this; + } + + void EmitLabelRef(LabelIdx labIdx); + void EmitStmtLabel(LabelIdx labIdx); + void EmitLabelPair(const LabelPair &pairLabel); + void EmitLabelForFunc(const MIRFunction *func, LabelIdx labIdx); + + /* Emit signed/unsigned integer literals in decimal or hexadecimal */ + void EmitDecSigned(int64 num); + void EmitDecUnsigned(uint64 num); + void EmitHexUnsigned(uint64 num); + + /* Dwarf debug info */ + void FillInClassByteSize(DBGDie *die, DBGDieAttr *byteSizeAttr) const; + void SetupDBGInfo(DebugInfo *mirdi); + void ApplyInPrefixOrder(DBGDie *die, const std::function &func); + void AddLabelDieToLabelIdxMapping(DBGDie *lblDie, LabelIdx lblIdx); + LabelIdx GetLabelIdxForLabelDie(DBGDie *lblDie); + void EmitDIHeader(); + void EmitDIFooter(); + void EmitDIHeaderFileInfo(); + void EmitDIDebugInfoSection(DebugInfo *mirdi); + void EmitDIDebugAbbrevSection(DebugInfo *mirdi); + void EmitDIDebugARangesSection(); + void EmitDIDebugRangesSection(); + void EmitDIDebugLineSection(); + void EmitDIDebugStrSection(); + void EmitDIAttrValue(DBGDie *die, DBGDieAttr *attr, DwAt attrName, DwTag tagName, DebugInfo *di); + void EmitDIFormSpecification(unsigned int dwform); + void EmitDIFormSpecification(const DBGDieAttr *attr) { + EmitDIFormSpecification(attr->GetDwForm()); + } + +#if 1 /* REQUIRE TO SEPERATE TARGAARCH64 TARGARM32 */ +/* Following code is under TARGAARCH64 condition */ + void EmitHugeSoRoutines(bool lastRoutine = false); + void EmitInlineAsmSection(); + + uint64 GetJavaInsnCount() const { + return javaInsnCount; + } + + uint64 GetFuncInsnCount() const { + return funcInsnCount; + } + + MapleMap &GetFileMap() { + return fileMap; + } + + void SetFileMapValue(uint32_t n, const std::string &file) { + fileMap[n] = file; + } + + CG *GetCG() const { + return cg; + } + + void ClearFuncInsnCount() { + funcInsnCount = 0; + } + + void IncreaseJavaInsnCount(uint64 n = 1, bool alignToQuad = false) { + if (alignToQuad) { + javaInsnCount = (javaInsnCount + 1) & (~0x1UL); + funcInsnCount = (funcInsnCount + 1) & (~0x1UL); + } + javaInsnCount += n; + funcInsnCount += n; +#ifdef EMIT_INSN_COUNT + Emit(" /* InsnCount: "); + Emit(javaInsnCount *); + Emit("*/ "); +#endif + } + + bool NeedToDealWithHugeSo() const { + return javaInsnCount > kHugeSoInsnCountThreshold; + } + + std::string HugeSoPostFix() const { + return std::string(kHugeSoPostFix) + std::to_string(hugeSoSeqence); + } + + void InsertHugeSoTarget(const std::string &target) { + (void)hugeSoTargets.insert(target); + } +#endif + + void InsertLabdie2labidxTable(DBGDie *lbldie, LabelIdx lab) { + if (labdie2labidxTable.find(lbldie) == labdie2labidxTable.end()) { + labdie2labidxTable[lbldie] = lab; + } + } + + protected: + Emitter(CG &cg, const std::string &fileName) + : cg(&cg), + rangeIdx2PrefixStr(cg.GetMIRModule()->GetMPAllocator().Adapter()), + arraySize(0), + isFlexibleArray(false), + stringPtr(cg.GetMIRModule()->GetMPAllocator().Adapter()), + localStrPtr(cg.GetMIRModule()->GetMPAllocator().Adapter()), + hugeSoTargets(cg.GetMIRModule()->GetMPAllocator().Adapter()), + labdie2labidxTable(std::less(), cg.GetMIRModule()->GetMPAllocator().Adapter()), + fileMap(std::less(), cg.GetMIRModule()->GetMPAllocator().Adapter()) { + outStream.open(fileName, std::ios::trunc); + MIRModule &mirModule = *cg.GetMIRModule(); + memPool = mirModule.GetMemPool(); + asmInfo = memPool->New(*memPool); + } + + ~Emitter() = default; + + private: + AsmLabel GetTypeAsmInfoName(PrimType primType) const; + void EmitDWRef(const std::string &name); + void InitRangeIdx2PerfixStr(); + void EmitAddressString(const std::string &address); + void EmitAliasAndRef(const MIRSymbol &sym); /* handle function symbol which has alias and weak ref */ + + CG *cg; + MOperator currentMop = UINT_MAX; + MapleUnorderedMap rangeIdx2PrefixStr; + const AsmInfo *asmInfo = nullptr; + std::ofstream outStream; + MemPool *memPool = nullptr; + uint32 arraySize; + bool isFlexibleArray; + MapleSet stringPtr; + MapleVector localStrPtr; +#if 1 /* REQUIRE TO SEPERATE TARGAARCH64 TARGARM32 */ +/* Following code is under TARGAARCH64 condition */ + uint64 javaInsnCount = 0; + uint64 funcInsnCount = 0; + MapleSet hugeSoTargets; + uint32 hugeSoSeqence = 2; +#endif + MapleMap labdie2labidxTable; + MapleMap fileMap; +}; + +class OpndEmitVisitor : public OperandVisitorBase, + public OperandVisitors { + public: + explicit OpndEmitVisitor(Emitter &asmEmitter): emitter(asmEmitter) {} + virtual ~OpndEmitVisitor() = default; + protected: + Emitter &emitter; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_EMIT_H */ diff --git a/src/mapleall/maple_be/include/cg/framewhitelist.def b/src/mapleall/maple_be/include/cg/framewhitelist.def new file mode 100644 index 0000000000000000000000000000000000000000..59b30fc3b36a9c5db47ae22f21ead7d381560f45 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/framewhitelist.def @@ -0,0 +1,51 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +"Ldalvik_2Fsystem_2FVMStack_3B_7CgetStackClass1_7C_28_29Ljava_2Flang_2FClass_3B", +"Ldalvik_2Fsystem_2FVMStack_3B_7CgetStackClass2_7C_28_29Ljava_2Flang_2FClass_3B", +"Ljava_2Flang_2FClass_3B_7CnewInstance_7C_28_29Ljava_2Flang_2FObject_3B", +"Ljava_2Flang_2Freflect_2FConstructor_3B_7CnewInstance_7C_28ALjava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetShort_7C_28Ljava_2Flang_2FObject_3B_29S", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetInt_7C_28Ljava_2Flang_2FObject_3B_29I", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetFloat_7C_28Ljava_2Flang_2FObject_3B_29F", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetDouble_7C_28Ljava_2Flang_2FObject_3B_29D", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetChar_7C_28Ljava_2Flang_2FObject_3B_29C", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetByte_7C_28Ljava_2Flang_2FObject_3B_29B", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetBoolean_7C_28Ljava_2Flang_2FObject_3B_29Z", +"Ljava_2Flang_2Freflect_2FField_3B_7CgetLong_7C_28Ljava_2Flang_2FObject_3B_29J", +"Ljava_2Flang_2Freflect_2FField_3B_7Cget_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetBoolean_7C_28Ljava_2Flang_2FObject_3BZ_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7Cset_7C_28Ljava_2Flang_2FObject_3BLjava_2Flang_2FObject_3B_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetShort_7C_28Ljava_2Flang_2FObject_3BS_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetLong_7C_28Ljava_2Flang_2FObject_3BJ_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetInt_7C_28Ljava_2Flang_2FObject_3BI_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetFloat_7C_28Ljava_2Flang_2FObject_3BF_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetDouble_7C_28Ljava_2Flang_2FObject_3BD_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetChar_7C_28Ljava_2Flang_2FObject_3BC_29V", +"Ljava_2Flang_2Freflect_2FField_3B_7CsetByte_7C_28Ljava_2Flang_2FObject_3BB_29V", +"LThrowableNativeUncover_3B_7Cmain_7C_28ALjava_2Flang_2FString_3B_29V", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24InterfaceWithDefault_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_247_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_249_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_2410_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24InterfaceWithRedefinedMethods_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_2413_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_241ImplementationSuperUser_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24InterfaceWithStatic_3B_7CstaticMethod_7C_28_29Ljava_2Flang_2FString_3B", +"Ljava_2Flang_2Freflect_2FMethod_3B_7Cinvoke_7C_28Ljava_2Flang_2FObject_3BALjava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B", +"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24OtherInterfaceWithDefault_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B", +"LStackoverflow_3B_7CstackOverflow_7C_28_29V", +"Llibcore_2Fsun_2Fmisc_2FUnsafeTest_241_3B_7Crun_7C_28_29V", +"__sigsetjmp", +"_setjmp" diff --git a/src/mapleall/maple_be/include/cg/global.h b/src/mapleall/maple_be/include/cg/global.h new file mode 100644 index 0000000000000000000000000000000000000000..80ad3f08de55de6be98331b10f65024ac4e5b5f1 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/global.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_GLOBAL_H +#define MAPLEBE_INCLUDE_CG_GLOBAL_H + +#include "cg_phase.h" +#include "maple_phase.h" + +namespace maplebe { +class GlobalOpt { + public: + explicit GlobalOpt(CGFunc &func) : cgFunc(func) {} + virtual ~GlobalOpt() = default; + virtual void Run() {} + std::string PhaseName() const { + return "globalopt"; + } + + protected: + /* if the number of bbs is more than 500 or the number of insns is more than 9000, don't optimize. */ + static constexpr uint32 kMaxBBNum = 500; + static constexpr uint32 kMaxInsnNum = 9000; + CGFunc &cgFunc; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgGlobalOpt, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_GLOBAL_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/global_schedule.h b/src/mapleall/maple_be/include/cg/global_schedule.h new file mode 100644 index 0000000000000000000000000000000000000000..29a45c5f7ee5e721ae4ea1fe39613dc2e8e7c85c --- /dev/null +++ b/src/mapleall/maple_be/include/cg/global_schedule.h @@ -0,0 +1,44 @@ +/* +* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#ifndef MAPLEBE_INCLUDE_CG_GLOBAL_SCHEDULE_H +#define MAPLEBE_INCLUDE_CG_GLOBAL_SCHEDULE_H + +#include "cgfunc.h" +#include "control_dep_analysis.h" +#include "data_dep_analysis.h" + +namespace maplebe { +class GlobalSchedule { + public: + GlobalSchedule(MemPool &mp, CGFunc &f, ControlDepAnalysis &cdAna, InterDataDepAnalysis &interDDA) + : gsMempool(mp), gsAlloc(&mp), cgFunc(f), cda(cdAna), idda(interDDA), + dataNodes(gsAlloc.Adapter()) {} + virtual ~GlobalSchedule() = default; + + void Run(); + + protected: + MemPool &gsMempool; + MapleAllocator gsAlloc; + CGFunc &cgFunc; + ControlDepAnalysis &cda; + InterDataDepAnalysis &idda; + MapleVector dataNodes; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgGlobalSchedule, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif // MAPLEBE_INCLUDE_CG_GLOBAL_SCHEDULE_H diff --git a/src/mapleall/maple_be/include/cg/ico.h b/src/mapleall/maple_be/include/cg/ico.h new file mode 100644 index 0000000000000000000000000000000000000000..9155fd3a9c2580e4e7e726d7752acdfc2b4a1860 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/ico.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ICO_H +#define MAPLEBE_INCLUDE_CG_ICO_H +#include "optimize_common.h" +#include "live.h" + +namespace maplebe { +class IfConversionOptimizer : public Optimizer { + public: + IfConversionOptimizer(CGFunc &func, MemPool &memPool) : Optimizer(func, memPool) { + name = "ICO"; + } + + ~IfConversionOptimizer() override = default; +}; + +/* If-Then-Else pattern */ +class ICOPattern : public OptimizationPattern { + public: + explicit ICOPattern(CGFunc &func) : OptimizationPattern(func) { + dotColor = kIcoIte; + patternName = "If-Then-Else"; + } + ~ICOPattern() override = default; + static constexpr int kThreshold = 2; + + protected: + Insn *FindLastCmpInsn(BB &bb) const; + std::vector GetLabelOpnds(Insn &insn) const; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgIco, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ICO_H */ diff --git a/src/mapleall/maple_be/include/cg/immvalid.def b/src/mapleall/maple_be/include/cg/immvalid.def new file mode 100644 index 0000000000000000000000000000000000000000..5b38d448c22804f89c0ad8a6c161e5bb18cd8285 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/immvalid.def @@ -0,0 +1,176 @@ +static std::set ValidBitmaskImmSet = { +#include "valid_bitmask_imm.txt" +}; +constexpr uint32 kMaxBitTableSize = 5; +constexpr std::array kBitmaskImmMultTable = { + 0x0000000100000001UL, 0x0001000100010001UL, 0x0101010101010101UL, 0x1111111111111111UL, 0x5555555555555555UL, +}; + +bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits) { + /* mask1 is a 64bits number that is all 1 shifts left size bits */ + const uint64 mask1 = 0xffffffffffffffffUL << bitLen; + /* mask2 is a 64 bits number that nlowerZeroBits are all 1, higher bits aro all 0 */ + uint64 mask2 = (1UL << static_cast(nLowerZeroBits)) - 1UL; + return (mask2 & val) == 0UL && (mask1 & ((static_cast(val)) >> nLowerZeroBits)) == 0UL; +}; + +bool IsBitmaskImmediate(uint64 val, uint32 bitLen) { + ASSERT(val != 0, "IsBitmaskImmediate() don's accept 0 or -1"); + ASSERT(static_cast(val) != -1, "IsBitmaskImmediate() don's accept 0 or -1"); + if ((bitLen == k32BitSize) && (static_cast(val) == -1)) { + return false; + } + uint64 val2 = val; + if (bitLen == k32BitSize) { + val2 = (val2 << k32BitSize) | (val2 & ((1ULL << k32BitSize) - 1)); + } + bool expectedOutcome = (ValidBitmaskImmSet.find(val2) != ValidBitmaskImmSet.end()); + + if ((val & 0x1) != 0) { + /* + * we want to work with + * 0000000000000000000000000000000000000000000001100000000000000000 + * instead of + * 1111111111111111111111111111111111111111111110011111111111111111 + */ + val = ~val; + } + + if (bitLen == k32BitSize) { + val = (val << k32BitSize) | (val & ((1ULL << k32BitSize) - 1)); + } + + /* get the least significant bit set and add it to 'val' */ + uint64 tmpVal = val + (val & static_cast(UINT64_MAX - val + 1)); + + /* now check if tmp is a power of 2 or tmpVal==0. */ + tmpVal = tmpVal & (tmpVal - 1); + if (tmpVal == 0) { + if (!expectedOutcome) { + LogInfo::MapleLogger() << "0x" << std::hex << std::setw(static_cast(k16ByteSize)) << + std::setfill('0') << static_cast(val) << "\n"; + return false; + } + ASSERT(expectedOutcome, "incorrect implementation: not valid value but returning true"); + /* power of two or zero ; return true */ + return true; + } + + int32 p0 = __builtin_ctzll(val); + int32 p1 = __builtin_ctzll(tmpVal); + int64 diff = p1 - p0; + + /* check if diff is a power of two; return false if not. */ + if ((static_cast(diff) & (static_cast(diff) - 1)) != 0) { + ASSERT(!expectedOutcome, "incorrect implementation: valid value but returning false"); + return false; + } + + uint32 logDiff = static_cast(__builtin_ctzll(static_cast(diff))); + uint64 pattern = val & ((1ULL << static_cast(diff)) - 1); +#if DEBUG + bool ret = (val == pattern * kBitmaskImmMultTable[kMaxBitTableSize - logDiff]); + ASSERT(expectedOutcome == ret, "incorrect implementation: return value does not match expected outcome"); + return ret; +#else + return val == pattern * kBitmaskImmMultTable[kMaxBitTableSize - logDiff]; +#endif +} + +bool Imm12BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, 0); + // for target linux-aarch64-gnu + result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); + return result; +} + +bool Imm12BitMaskValid(int64 value) { + if (value == 0 || static_cast(value) == -1) { + return false; + } + return IsBitmaskImmediate(static_cast(value), k32BitSize); +} + +bool Imm13BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, 0); + // for target linux-aarch64-gnu + result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal13Bits, kMaxImmVal13Bits); + return result; +} + +bool Imm13BitMaskValid(int64 value) { + if (value == 0 || static_cast(value) == -1) { + return false; + } + return IsBitmaskImmediate(static_cast(value), k64BitSize); +} + +bool Imm16BitValid(int64 value) { + bool result = IsBitSizeImmediate(static_cast(value), kMaxImmVal16Bits, 0); + /* + * for target linux-aarch64-gnu + * aarch64 assembly takes up to 24-bits immediate, generating + * either cmp or cmp with shift 12 encoding + */ + result = result || IsBitSizeImmediate(static_cast(value), kMaxImmVal12Bits, kMaxImmVal12Bits); + return result; +} + +/* + * 8bit : 0 + * halfword : 1 + * 32bit - word : 2 + * 64bit - word : 3 + * 128bit- word : 4 + */ +bool StrLdrSignedOfstValid(int64 value, uint wordSize) { + if (value <= k256BitSize && value >= kNegative256BitSize) { + return true; + } else if ((value > k256BitSize) && (value <= kMaxPimm[wordSize])) { + uint64 mask = (1U << wordSize) - 1U; + return (static_cast(value) & mask) > 0 ? false : true; + } + return false; +} + + +bool StrLdr8ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, 0); +} + +bool StrLdr16ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k1ByteSize); +} + +bool StrLdr32ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k2ByteSize); +} + +bool StrLdr32PairImmValid(int64 value) { + if ((value <= kMaxSimm32Pair) && (value >= kMinSimm32)) { + return (static_cast(value) & 3) > 0 ? false : true; + } + return false; +} + +bool StrLdr64ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k3ByteSize); +} + +bool StrLdr64PairImmValid(int64 value) { + if (value <= kMaxSimm64Pair && (value >= kMinSimm64)) { + return (static_cast(value) & 7) > 0 ? false : true; + } + return false; +} + +bool StrLdr128ImmValid(int64 value) { + return StrLdrSignedOfstValid(value, k4ByteSize); +} + +bool StrLdr128PairImmValid(int64 value) { + if (value < k1024BitSize && (value >= kNegative1024BitSize)) { + return (static_cast(value) & 0xf) > 0 ? false : true; + } + return false; +} diff --git a/src/mapleall/maple_be/include/cg/insn.h b/src/mapleall/maple_be/include/cg/insn.h new file mode 100644 index 0000000000000000000000000000000000000000..806446a713676be8e8be10149c0b0ff91c3d214a --- /dev/null +++ b/src/mapleall/maple_be/include/cg/insn.h @@ -0,0 +1,678 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_INSN_H +#define MAPLEBE_INCLUDE_CG_INSN_H +/* C++ headers */ +#include /* for nullptr */ +#include +#include +#include +#include "operand.h" +#include "mpl_logging.h" +#include "isa.h" + +/* Maple IR header */ +#include "types_def.h" /* for uint32 */ +#include "common_utils.h" + +namespace maplebe { +/* forward declaration */ +class BB; +class CG; +class Emitter; +class DepNode; +class Insn { + public: + enum RetType : uint8 { + kRegNull, /* no return type */ + kRegFloat, /* return register is V0 */ + kRegInt /* return register is R0 */ + }; + /* MCC_DecRefResetPair clear 2 stack position, MCC_ClearLocalStackRef clear 1 stack position */ + static constexpr uint8 kMaxStackOffsetSize = 2; + + Insn(MemPool &memPool, MOperator opc) + : mOp(opc), + localAlloc(&memPool), + opnds(localAlloc.Adapter()), + registerBinding(localAlloc.Adapter()), + comment(&memPool) {} + Insn(MemPool &memPool, MOperator opc, Operand &opnd0) : Insn(memPool, opc) { opnds.emplace_back(&opnd0); } + Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1) : Insn(memPool, opc) { + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + } + Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1, Operand &opnd2) : Insn(memPool, opc) { + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + opnds.emplace_back(&opnd2); + } + Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1, Operand &opnd2, Operand &opnd3) + : Insn(memPool, opc) { + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + opnds.emplace_back(&opnd2); + opnds.emplace_back(&opnd3); + } + Insn(MemPool &memPool, MOperator opc, Operand &opnd0, Operand &opnd1, Operand &opnd2, Operand &opnd3, Operand &opnd4) + : Insn(memPool, opc) { + opnds.emplace_back(&opnd0); + opnds.emplace_back(&opnd1); + opnds.emplace_back(&opnd2); + opnds.emplace_back(&opnd3); + opnds.emplace_back(&opnd4); + } + virtual ~Insn() = default; + + MOperator GetMachineOpcode() const { + return mOp; + } + + void SetMOP(const InsnDesc &idesc); + + void AddOperand(Operand &opnd) { + opnds.emplace_back(&opnd); + } + + Insn &AddOpndChain(Operand &opnd) { + AddOperand(opnd); + return *this; + } + /* use carefully which might cause insn to illegal */ + void CommuteOperands(uint32 dIndex, uint32 sIndex); + void CleanAllOperand() { + opnds.clear(); + } + + void PopBackOperand() { + opnds.pop_back(); + } + + Operand &GetOperand(uint32 index) const { + ASSERT(index < opnds.size(), "index out of range"); + return *opnds[index]; + } + + void ResizeOpnds(uint32 newSize) { + opnds.resize(static_cast(newSize)); + } + + uint32 GetOperandSize() const { + return static_cast(opnds.size()); + } + + void SetOperand(uint32 index, Operand &opnd) { + ASSERT(index <= opnds.size(), "index out of range"); + opnds[index] = &opnd; + } + + void SetRetSize(uint32 size) { + ASSERT(IsCall(), "Insn should be a call."); + retSize = size; + } + + uint32 GetRetSize() const { + ASSERT(IsCall(), "Insn should be a call."); + return retSize; + } + + virtual bool IsMachineInstruction() const; + + bool OpndIsDef(uint32 id) const; + + bool OpndIsUse(uint32 id) const; + + virtual bool IsPCLoad() const { + return false; + } + + Operand *GetMemOpnd() const; + + void SetMemOpnd(MemOperand *memOpnd); + + bool IsCall() const; + bool IsTailCall() const; + bool IsAsmInsn() const; + bool IsClinit() const; + bool CanThrow() const; + bool MayThrow() const; + bool IsBranch() const; + bool IsCondBranch() const; + bool IsUnCondBranch() const; + bool IsMove() const; + bool IsBasicOp() const; + bool IsUnaryOp() const; + bool IsShift() const; + bool IsPhi() const; + bool IsLoad() const; + bool IsStore() const; + bool IsConversion() const; + bool IsAtomic() const; + + bool IsLoadPair() const; + bool IsStorePair() const; + bool IsLoadStorePair() const; + bool IsLoadLabel() const; + + virtual bool NoAlias() const { + return false; + } + + bool IsVolatile() const; + + bool IsMemAccessBar() const; + + bool IsMemAccess() const; + + virtual bool HasSideEffects() const { + return false; + } + + bool HasLoop() const; + + virtual bool IsSpecialIntrinsic() const; + + bool IsComment() const; + bool IsImmaterialInsn() const; + + virtual bool IsTargetInsn() const { + return true; + } + + virtual bool IsCfiInsn() const { + return false; + } + + virtual bool IsDbgInsn() const { + return false; + } + + bool IsDMBInsn() const; + + bool IsVectorOp() const; + + virtual Operand *GetCallTargetOperand() const; + + uint32 GetAtomicNum() const; + /* + * returns a ListOperand + * Note that we don't really need this for Emit + * Rather, we need it for register allocation, to + * correctly state the live ranges for operands + * use for passing call arguments + */ + virtual ListOperand *GetCallArgumentOperand(); + bool IsAtomicStore() const { + return IsStore() && IsAtomic(); + } + + void SetCondDef() { + flags |= kOpCondDef; + } + + bool IsCondDef() const { + return flags & kOpCondDef; + } + + bool AccessMem() const { + return IsLoad() || IsStore(); + } + + bool IsFrameDef() const { + return isFrameDef; + } + + void SetFrameDef(bool b) { + isFrameDef = b; + } + + bool IsStackDef() const { + return isStackDef; + } + + void SetStackDef(bool flag) { + isStackDef = flag; + } + + bool IsAsmDefCondCode() const { + return asmDefCondCode; + } + + void SetAsmDefCondCode() { + asmDefCondCode = true; + } + + bool IsAsmModMem() const { + return asmModMem; + } + + void SetAsmModMem() { + asmModMem = true; + } + + virtual uint32 GetUnitType() { + return 0; + } + + virtual void Dump() const; + +#if DEBUG + virtual void Check() const; +#endif + + void SetComment(const std::string &str) { + comment = str; + } + + void SetComment(const MapleString &str) { + comment = str; + } + + const MapleString &GetComment() const { + return comment; + } + + void AppendComment(const std::string &str) { + comment += str; + } + + void MarkAsSaveRetValToLocal() { + flags |= kOpDassignToSaveRetValToLocal; + } + + bool IsSaveRetValToLocal() const { + return ((flags & kOpDassignToSaveRetValToLocal) != 0); + } + + void MarkAsAccessRefField(bool cond) { + if (cond) { + flags |= kOpAccessRefField; + } + } + + bool IsAccessRefField() const { + return ((flags & kOpAccessRefField) != 0); + } + + bool IsIntRegisterMov() const { + if (md == nullptr || !md->IsPhysicalInsn() || !md->IsMove() || + md->opndMD.size() != kOperandNumBinary) { + return false; + } + auto firstMD = md->GetOpndDes(kFirstOpnd); + auto secondMD = md->GetOpndDes(kSecondOpnd); + if (!firstMD->IsRegister() || !secondMD->IsRegister() || + !firstMD->IsIntOperand() || !secondMD->IsIntOperand() || + firstMD->GetSize() != secondMD->GetSize()) { + return false; + } + return true; + } + + Insn *GetPreviousMachineInsn() const { + for (Insn *returnInsn = prev; returnInsn != nullptr; returnInsn = returnInsn->prev) { + ASSERT(returnInsn->bb == bb, "insn and it's prev insn must have same bb"); + if (returnInsn->IsMachineInstruction()) { + return returnInsn; + } + } + return nullptr; + } + + Insn *GetNextMachineInsn() const { + for (Insn *returnInsn = next; returnInsn != nullptr; returnInsn = returnInsn->next) { + CHECK_FATAL(returnInsn->bb == bb, "insn and it's next insn must have same bb"); + if (returnInsn->IsMachineInstruction()) { + return returnInsn; + } + } + return nullptr; + } + + uint32 GetLatencyType() const; + + void SetPrev(Insn *prevInsn) { + this->prev = prevInsn; + } + + Insn *GetPrev() { + return prev; + } + + const Insn *GetPrev() const { + return prev; + } + + void SetNext(Insn *nextInsn) { + this->next = nextInsn; + } + + Insn *GetNext() const { + return next; + } + + void SetBB(BB *newBB) { + this->bb = newBB; + } + + BB *GetBB() { + return bb; + } + + const BB *GetBB() const { + return bb; + } + + void SetId(uint32 idVal) { + this->id = idVal; + } + + uint32 GetId() const { + return id; + } + + void SetAddress(uint32 addr) { + address = addr; + } + + uint32 GetAddress() const { + return address; + } + + void SetNopNum(uint32 num) { + nopNum = num; + } + + uint32 GetNopNum() const { + return nopNum; + } + + void SetNeedSplit(bool flag) { + needSplit = flag; + } + + bool IsNeedSplit() const { + return needSplit; + } + + void SetIsThrow(bool isThrowVal) { + this->isThrow = isThrowVal; + } + + bool GetIsThrow() const { + return isThrow; + } + + void SetDoNotRemove(bool doNotRemoveVal) { + this->doNotRemove = doNotRemoveVal; + } + + bool GetDoNotRemove() const { + return doNotRemove; + } + + void SetIsSpill() { + this->isSpill = true; + } + + bool GetIsSpill() const { + return isSpill; + } + + void SetIsReload() { + this->isReload = true; + } + + bool GetIsReload() const { + return isReload; + } + + bool IsSpillInsn() const { + return (isSpill || isReload); + } + + void SetIsCallReturnUnsigned(bool unSigned) { + ASSERT(IsCall(), "Insn should be a call."); + this->isCallReturnUnsigned = unSigned; + } + + bool GetIsCallReturnUnsigned() const { + ASSERT(IsCall(), "Insn should be a call."); + return isCallReturnUnsigned; + } + + bool GetIsCallReturnSigned() const { + ASSERT(IsCall(), "Insn should be a call."); + return !isCallReturnUnsigned; + } + + void SetRetType(RetType retTy) { + this->retType = retTy; + } + + RetType GetRetType() const { + return retType; + } + + void SetClearStackOffset(short index, int64 offset) { + CHECK_FATAL(index < kMaxStackOffsetSize, "out of clearStackOffset's range"); + clearStackOffset[index] = offset; + } + + int64 GetClearStackOffset(short index) const { + CHECK_FATAL(index < kMaxStackOffsetSize, "out of clearStackOffset's range"); + return clearStackOffset[index]; + } + + /* if function name is MCC_ClearLocalStackRef or MCC_DecRefResetPair, will clear designate stack slot */ + bool IsClearDesignateStackCall() const { + return clearStackOffset[0] != -1 || clearStackOffset[1] != -1; + } + + void SetDepNode(DepNode &depNodeVal) { + this->depNode = &depNodeVal; + } + + DepNode *GetDepNode() { + return depNode; + } + + const DepNode *GetDepNode() const { + return depNode; + } + + void SetIsPhiMovInsn(bool val) { + isPhiMovInsn = val; + } + + bool IsPhiMovInsn() const { + return isPhiMovInsn; + } + + Insn *Clone(MemPool &memPool) const; + + void SetInsnDescrption(const InsnDesc &newMD) { + md = &newMD; + } + + const InsnDesc *GetDesc() const { + return md; + } + + void AddRegBinding(uint32 regA, uint32 regB) { + (void)registerBinding.emplace(regA, regB); + } + + const MapleMap& GetRegBinding() const { + return registerBinding; + } + + void SetRefSkipIdx(int32 index) { + refSkipIdx = index; + } + + /* Get Size of memory write/read by insn */ + uint32 GetMemoryByteSize() const; + + /* return ture if register appears */ + virtual bool ScanReg(regno_t regNO) const; + + virtual bool IsRegDefined(regno_t regNO) const; + + virtual std::set GetDefRegs() const; + + virtual uint32 GetBothDefUseOpnd() const; + + RegOperand *GetSSAImpDefOpnd() { + return ssaImplicitDefOpnd; + } + + void SetSSAImpDefOpnd(RegOperand *ssaDef) { + ssaImplicitDefOpnd = ssaDef; + } + + void SetProcessRHS() { + processRHS = true; + } + + bool HasProcessedRHS() const { + return processRHS; + } + + protected: + MOperator mOp; + MapleAllocator localAlloc; + MapleVector opnds; + RegOperand *ssaImplicitDefOpnd = nullptr; /* for the opnd is both def and use is ssa */ + Insn *prev = nullptr; + Insn *next = nullptr; + BB *bb = nullptr; /* BB to which this insn belongs */ + uint32 flags = 0; + bool isPhiMovInsn = false; + + private: + MapleMap registerBinding; /* used for inline asm only */ + enum OpKind : uint32 { + kOpUnknown = 0, + kOpCondDef = 0x1, + kOpAccessRefField = (1ULL << 30), /* load-from/store-into a ref flag-fieldGetMachineOpcode() */ + kOpDassignToSaveRetValToLocal = (1ULL << 31) /* save return value to local flag */ + }; + + uint32 id = 0; + uint32 address = 0; + uint32 nopNum = 0; + RetType retType = kRegNull; /* if this insn is call, it represent the return register type R0/V0 */ + uint32 retSize = 0; /* Byte size of the return value if insn is a call. */ + /* record the stack cleared by MCC_ClearLocalStackRef or MCC_DecRefResetPair */ + int64 clearStackOffset[kMaxStackOffsetSize] = { -1, -1 }; + DepNode *depNode = nullptr; /* For dependence analysis, pointing to a dependence node. */ + MapleString comment; + bool isThrow = false; + bool doNotRemove = false; /* caller reg cross call */ + bool isCallReturnUnsigned = false; /* for call insn only. false: signed, true: unsigned */ + bool isSpill = false; /* used as hint for optimization */ + bool isReload = false; /* used as hint for optimization */ + bool isFrameDef = false; + bool isStackDef = false; + bool asmDefCondCode = false; + bool asmModMem = false; + bool needSplit = false; + + /* for dynamic language to mark reference counting */ + int32 refSkipIdx = -1; + + /* for multiple architecture */ + const InsnDesc *md = nullptr; + /* + * for redundant compute elimination phase, + * indicate whether the version has been processed. + */ + bool processRHS = false; +}; + +struct VectorRegSpec { + VectorRegSpec() : vecLane(-1), vecLaneMax(0), vecElementSize(0), compositeOpnds(0) {} + + VectorRegSpec(PrimType type, int16 lane = -1, uint16 compositeOpnds = 0) : + vecLane(lane), + vecLaneMax(GetVecLanes(type)), + vecElementSize(GetVecEleSize(type)), + compositeOpnds(compositeOpnds) {} + + VectorRegSpec(uint16 laneNum, uint16 eleSize, int16 lane = -1, uint16 compositeOpnds = 0) : + vecLane(lane), + vecLaneMax(laneNum), + vecElementSize(eleSize), + compositeOpnds(compositeOpnds) {} + + int16 vecLane; /* -1 for whole reg, 0 to 15 to specify individual lane */ + uint16 vecLaneMax; /* Maximum number of lanes for this vregister */ + uint16 vecElementSize; /* element size in each Lane */ + uint16 compositeOpnds; /* Number of enclosed operands within this composite operand */ +}; + +class VectorInsn : public Insn { + public: + VectorInsn(MemPool &memPool, MOperator opc) + : Insn(memPool, opc), + regSpecList(localAlloc.Adapter()) { + regSpecList.clear(); + } + + ~VectorInsn() override = default; + + void ClearRegSpecList() { + regSpecList.clear(); + } + + VectorRegSpec *GetAndRemoveRegSpecFromList(); + + size_t GetNumOfRegSpec() const { + if (IsVectorOp() && !regSpecList.empty()) { + return regSpecList.size(); + } + return 0; + } + + MapleVector &GetRegSpecList() { + return regSpecList; + } + + void SetRegSpecList(const MapleVector &vec) { + regSpecList = vec; + } + + VectorInsn &PushRegSpecEntry(VectorRegSpec *v) { + (void)regSpecList.emplace(regSpecList.begin(), v); + return *this; + } + + private: + MapleVector regSpecList; +}; + +struct InsnIdCmp { + bool operator()(const Insn *lhs, const Insn *rhs) const { + CHECK_FATAL(lhs != nullptr, "lhs is nullptr in InsnIdCmp"); + CHECK_FATAL(rhs != nullptr, "rhs is nullptr in InsnIdCmp"); + return lhs->GetId() < rhs->GetId(); + } +}; +using InsnSet = std::set; +using InsnMapleSet = MapleSet; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_INSN_H */ diff --git a/src/mapleall/maple_be/include/cg/instruction_selection.h b/src/mapleall/maple_be/include/cg/instruction_selection.h new file mode 100644 index 0000000000000000000000000000000000000000..00568de66be166b2a8b088aeb24758cb805abcf3 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/instruction_selection.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_INSTRUCTION_SELECTION_H +#define MAPLEBE_INCLUDE_INSTRUCTION_SELECTION_H + +#include "maple_phase_manager.h" + +namespace maplebe { + +class InsnSel { + public: + explicit InsnSel(CGFunc &tempCGFunc) : cgFunc(&tempCGFunc) {} + + virtual ~InsnSel() = default; + + virtual bool InsnSel() = 0; + + protected: + CGFunc *cgFunc; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgIsel, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END + +} /* namespace maplebe */ + + +#endif /* MAPLEBE_INCLUDE_INSTRUCTION_SELECTION_H */ diff --git a/src/mapleall/maple_be/include/cg/isa.h b/src/mapleall/maple_be/include/cg/isa.h new file mode 100644 index 0000000000000000000000000000000000000000..5948612f8194033a3321b1c768d440052b3df923 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/isa.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ISA_H +#define MAPLEBE_INCLUDE_CG_ISA_H + +#include +#include "types_def.h" +#include "operand.h" + +namespace maplebe { +enum MopProperty : maple::uint8 { + kInsnIsAbstract, + kInsnIsMove, + kInsnIsLoad, + kInsnIsLoadPair, + kInsnIsStore, + kInsnIsStorePair, + kInsnIsLoadAddress, + kInsnIsAtomic, + kInsnIsCall, + kInsnIsTailCall, + kInsnIsConversion, + kInsnIsCondDef, + kInsnHasAcqure, + kInsnHasAcqureRCpc, + kInsnHasLOAcqure, + kInsnHasRelease, + kInsnHasLORelease, + kInsnCanThrow, + kInsnIsDMB, + kInsnIsUnCondBr, + kInsnIsCondBr, + kInsnHasLoop, + kInsnIsVectorOp, + kInsnIsBinaryOp, + kInsnIsPhi, + kInsnIsUnaryOp, + kInsnIsShift, + kInsnInlineAsm, + kInsnSpecialIntrisic, + kInsnIsNop, +}; +using regno_t = uint32_t; +#define ISABSTRACT 1ULL +#define ISMOVE (1ULL << kInsnIsMove) +#define ISLOAD (1ULL << kInsnIsLoad) +#define ISLOADPAIR (1ULL << kInsnIsLoadPair) +#define ISSTORE (1ULL << kInsnIsStore) +#define ISSTOREPAIR (1ULL << kInsnIsStorePair) +#define ISLOADADDR (1ULL << kInsnIsLoadAddress) +#define ISATOMIC (1ULL << kInsnIsAtomic) +#define ISCALL (1ULL << kInsnIsCall) +#define ISTAILCALL (1ULL << kInsnIsTailCall) +#define ISCONVERSION (1ULL << kInsnIsConversion) +#define ISCONDDEF (1ULL << kInsnIsCondDef) +#define HASACQUIRE (1ULL << kInsnHasAcqure) +#define HASACQUIRERCPC (1ULL << kInsnHasAcqureRCpc) +#define HASLOACQUIRE (1ULL << kInsnHasLOAcqure) +#define HASRELEASE (1ULL << kInsnHasRelease) +#define HASLORELEASE (1ULL << kInsnHasLORelease) +#define CANTHROW (1ULL << kInsnCanThrow) +#define ISDMB (1ULL << kInsnIsDMB) +#define ISUNCONDBRANCH (1ULL << kInsnIsUnCondBr) +#define ISCONDBRANCH (1ULL << kInsnIsCondBr) +#define HASLOOP (1ULL << kInsnHasLoop) +#define ISVECTOR (1ULL << kInsnIsVectorOp) +#define ISBASICOP (1ULL << kInsnIsBinaryOp) +#define ISPHI (1ULL << kInsnIsPhi) +#define ISUNARYOP (1ULL << kInsnIsUnaryOp) +#define ISSHIFT (1ULL << kInsnIsShift) +#define INLINEASM (1ULL << kInsnInlineAsm) +#define SPINTRINSIC (1ULL << kInsnSpecialIntrisic) +#define ISNOP (1ULL << kInsnIsNop) +constexpr maplebe::regno_t kInvalidRegNO = 0; + +/* + * ARM64 has 32 int registes and 32 FP registers. + * AMD64/X86_64 has 16 int registes, and 16 FP registers. + * In either case, the corresponding calling conventions use + * the smaller number of caller-saved registers. + * 64 bit is not large enough? + */ +using CsrBitset = uint64_t; + +template +class ConstraintFunction { + public: + using CfPointer = bool (*) (ParaType); + bool CheckConstraint(CfPointer ccfunc, ParaType a) const { + return (*ccfunc)(a); + } +}; + +/* + * abstract machine instruction + * a lower-level maple IR which is aimed to represent general machine instruction for extreme cpus + * 1. Support conversion between all types and registers + * 2. Support conversion between memory and registers + * 3. Support three address basic operations + * + */ +namespace abstract { +#define DEFINE_MOP(op, ...) op, +enum AbstractMOP_t : maple::uint32 { +#include "abstract_mmir.def" + kMopLast +}; +#undef DEFINE_MOP +} + +struct InsnDesc { + MOperator opc; + std::vector opndMD; + uint64 properties; + uint32 latencyType; + const std::string &name; + const std::string &format; + uint32 atomicNum; /* indicate how many asm instructions it will emit. */ + std::function validFunc = nullptr; /* If insn has immOperand, this function needs to be implemented. */ + + bool IsSame(const InsnDesc &left, + std::function cmp) const; + + bool IsCall() const { + return (properties & ISCALL) != 0; + } + bool IsTailCall() const { + return properties & ISTAILCALL; + } + bool IsPhi() const { + return (properties & ISPHI) != 0; + } + bool IsPhysicalInsn() const { + return (properties & ISABSTRACT) == 0; + } + bool IsStore() const { + return (properties & ISSTORE) != 0; + } + bool IsLoad() const { + return (properties & ISLOAD) != 0; + } + bool IsConversion() const { + return (properties & ISCONVERSION) != 0; + } + bool IsLoadPair() const { + return (properties & (ISLOADPAIR)) != 0; + } + bool IsStorePair() const { + return (properties & (ISSTOREPAIR)) != 0; + } + bool IsLoadStorePair() const { + return (properties & (ISLOADPAIR | ISSTOREPAIR)) != 0; + } + bool IsMove() const { + return (properties & ISMOVE) != 0; + } + bool IsDMB() const { + return (properties & (ISDMB)) != 0; + } + bool IsBasicOp() const { + return (properties & ISBASICOP) != 0; + } + bool IsCondBranch() const { + return (properties & (ISCONDBRANCH)) != 0; + } + bool IsUnCondBranch() const { + return (properties & (ISUNCONDBRANCH)) != 0; + } + bool IsLoadAddress() const { + return (properties & (ISLOADADDR)) != 0; + } + bool IsAtomic() const { + return (properties & ISATOMIC) != 0; + } + bool IsCondDef() const { + return (properties & ISCONDDEF) != 0; + } + bool IsVectorOp() const { + return (properties & ISVECTOR) != 0; + } + bool IsVolatile() const { + return ((properties & HASRELEASE) != 0) || ((properties & HASACQUIRE) != 0); + } + bool IsMemAccessBar() const { + return (properties & (HASRELEASE | HASACQUIRE | HASACQUIRERCPC | HASLOACQUIRE | HASLORELEASE)) != 0; + } + bool IsMemAccess() const { + return (properties & (ISLOAD | ISSTORE | ISLOADPAIR | ISSTOREPAIR)) != 0; + } + bool IsBranch() const { + return (properties & (ISCONDBRANCH | ISUNCONDBRANCH)) != 0; + } + bool HasLoop() const { + return (properties & HASLOOP) != 0; + } + bool CanThrow() const { + return (properties & CANTHROW) != 0; + } + bool IsInlineAsm() const { + return properties & INLINEASM; + } + bool IsSpecialIntrinsic() const { + return properties & SPINTRINSIC; + } + MOperator GetOpc() const { + return opc; + } + const OpndDesc *GetOpndDes(size_t index) const { + return opndMD[index]; + } + uint32 GetOperandSize() const { + if (properties & (ISLOAD | ISSTORE)) { + /* use memory operand */ + return GetOpndDes(1)->GetSize(); + } + /* use dest operand */ + return GetOpndDes(0)->GetSize(); + } + bool Is64Bit() const { + return GetOperandSize() == k64BitSize; + } + bool IsValidImmOpnd(int64 val) const { + if (!validFunc) { + return true; + } + return validFunc(val); + } + uint32 GetLatencyType() const { + return latencyType; + } + bool IsUnaryOp() const { + return (properties & ISUNARYOP) != 0; + } + bool IsShift() const { + return (properties & ISSHIFT) != 0; + } + const std::string &GetName() const { + return name; + } + const std::string &GetFormat() const { + return format; + } + uint32 GetAtomicNum() const { + return atomicNum; + } + static const InsnDesc &GetAbstractId(MOperator mOperator) { + ASSERT(mOperator < abstract::kMopLast, "op must be lower than kMopLast"); + return abstractId[mOperator]; + } + static const InsnDesc abstractId[abstract::kMopLast]; +}; + +enum RegAddress : uint32 { + kRegHigh = 0x4, + kRegLow = 0x8 +}; +constexpr uint32 kMemLow12 = 0x10; +constexpr uint32 kLiteralLow12 = kMemLow12; +constexpr uint32 kPreInc = 0x20; +constexpr uint32 kPostInc = 0x40; +constexpr uint32 kLoadLiteral = 0x80; + +enum BitIndex : maple::uint8 { + k8BitIndex = 0, + k16BitIndex, + k32BitIndex, + k64BitIndex, + kBitIndexEnd, +}; + +static inline BitIndex GetBitIndex(uint32 bitSize) { + switch (bitSize) { + case k8BitSize: + return k8BitIndex; + case k16BitSize: + return k16BitIndex; + case k32BitSize: + return k32BitIndex; + case k64BitSize: + return k64BitIndex; + default: + CHECK_FATAL(false, "NIY, Not support size"); + } +} +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ISA_H */ diff --git a/src/mapleall/maple_be/include/cg/isel.h b/src/mapleall/maple_be/include/cg/isel.h new file mode 100644 index 0000000000000000000000000000000000000000..fe28a9c9bb7d71e8ce7bdba7cc95f35c56fbd178 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/isel.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_ISEL_H +#define MAPLEBE_INCLUDE_CG_ISEL_H + +#include "cgfunc.h" + +namespace maplebe { +struct MirTypeInfo { + PrimType primType; + int32 offset = 0; + uint32 size = 0; /* for aggType */ +}; +/* macro expansion instruction selection */ +class MPISel { + public: + MPISel(MemPool &mp, CGFunc &f) : isMp(&mp), cgFunc(&f) {} + + virtual ~MPISel() { + isMp = nullptr; + cgFunc = nullptr; + } + + void DoMPIS(); + + CGFunc *GetCurFunc() const { + return cgFunc; + } + + Operand *HandleExpr(const BaseNode &parent, BaseNode &expr); + + void SelectDassign(const DassignNode &stmt, Operand &opndRhs); + void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0); + void SelectIassign(const IassignNode &stmt, Operand &opndAddr, Operand &opndRhs); + void SelectIassignoff(const IassignoffNode &stmt); + RegOperand *SelectRegread(RegreadNode &expr); + void SelectRegassign(RegassignNode &stmt, Operand &opnd0); + Operand* SelectDread(const BaseNode &parent, const AddrofNode &expr); + Operand* SelectBand(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand* SelectAdd(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand* SelectSub(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand* SelectNeg(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); + Operand* SelectCvt(const BaseNode &parent, const TypeCvtNode &node, Operand &opnd0); + Operand* SelectExtractbits(const BaseNode &parent, const ExtractbitsNode &node, Operand &opnd0); + Operand *SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectAbs(UnaryNode &node, Operand &opnd0); + Operand *SelectAlloca(UnaryNode &node, Operand &opnd0); + Operand *SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent); + ImmOperand *SelectIntConst(MIRIntConst &intConst, PrimType primType) const; + Operand *SelectLiteral(MIRDoubleConst &c, MIRFunction &func, uint32 labelIdx) const; + void SelectCallCommon(StmtNode &stmt, const MPISel &iSel); + void SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + Operand *SelectShift(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + void SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, + PrimType opnd0Type, PrimType opnd1Type); + void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + virtual void SelectReturn(NaryStmtNode &retNode, Operand &opnd) = 0; + virtual void SelectReturn() = 0; + virtual void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) = 0; + virtual void SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) = 0; + virtual void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) = 0; + virtual void SelectGoto(GotoNode &stmt) = 0; + virtual void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) = 0; + virtual void SelectIgoto(Operand &opnd0) = 0; + virtual void SelectCall(CallNode &callNode) = 0; + virtual void SelectIcall(IcallNode &icallNode, Operand &opnd0) = 0; + virtual void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) = 0; + virtual Operand *SelectDoubleConst(MIRDoubleConst &intConst, PrimType primType) const = 0; + virtual Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) = 0; + virtual Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) = 0; + virtual Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) = 0; + virtual Operand &ProcessReturnReg(PrimType primType, int32 sReg) = 0 ; + virtual void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) = 0; + Operand *SelectBior(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectBxor(const BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectIread(const BaseNode &parent, const IreadNode &expr, int extraOffset = 0); + Operand *SelectIreadoff(const BaseNode &parent, const IreadoffNode &ireadoff); + virtual Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) = 0; + virtual Operand *SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) = 0; + virtual Operand *SelectStrLiteral(ConststrNode &constStr) = 0; + virtual Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) = 0; + virtual void SelectAsm(AsmNode &node) = 0; + virtual void SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) = 0; + Operand *SelectBnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent); + Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent); + Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0); + protected: + MemPool *isMp; + CGFunc *cgFunc; + void SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType); + void SelectCopy(Operand &dest, Operand &src, PrimType toType); + RegOperand &SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType); + RegOperand &SelectCopy2Reg(Operand &src, PrimType toType); + void SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); + void SelectCvtInt2Float(RegOperand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); + void SelectFloatCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType); + void SelectCvtFloat2Int(RegOperand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType); + PrimType GetIntegerPrimTypeFromSize(bool isSigned, uint32 bitSize) const; + std::pair GetFieldIdAndMirTypeFromMirNode(const BaseNode &node); + MirTypeInfo GetMirTypeInfoFormFieldIdAndMirType(FieldID fieldId, MIRType *mirType); + MirTypeInfo GetMirTypeInfoFromMirNode(const BaseNode &node); + MemOperand *GetOrCreateMemOpndFromIreadNode(const IreadNode &expr, PrimType primType, int offset); + private: + StmtNode *HandleFuncEntry(); + void HandleFuncExit() const; + void SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opndRhs); + void SelectDassignStruct(MIRSymbol &symbol, MemOperand &symbolMem, Operand &opndRhs); + virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) const = 0; + virtual MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const = 0; + virtual Operand &GetTargetRetOperand(PrimType primType, int32 sReg) = 0; + void SelectBasicOp(Operand &resOpnd, Operand &opnd0, Operand &opnd1, MOperator mOp, PrimType primType); + /* + * Support conversion between all types and registers + * only Support conversion between registers and memory + * alltypes -> reg -> mem + */ + void SelectCopyInsn(Operand &dest, Operand &src, PrimType type); + void SelectNeg(Operand &resOpnd, Operand &opnd0, PrimType primType) const; + void SelectBnot(Operand &resOpnd, Operand &opnd0, PrimType primType); + void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bitOffset, uint8 bitSize, PrimType primType); + void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + virtual RegOperand &GetTargetBasicPointer(PrimType primType) = 0; + virtual RegOperand &GetTargetStackPointer(PrimType primType) = 0; + void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + virtual void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) = 0; +}; +MAPLE_FUNC_PHASE_DECLARE_BEGIN(InstructionSelector, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} +#endif /* MAPLEBE_INCLUDE_CG_ISEL_H */ diff --git a/src/mapleall/maple_be/include/cg/isolate_fastpath.h b/src/mapleall/maple_be/include/cg/isolate_fastpath.h new file mode 100644 index 0000000000000000000000000000000000000000..9bdb58cae8daed8a2f6f34a6aac6ad61c76aac17 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/isolate_fastpath.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_ISOLATE_FASTPATH_H +#define MAPLEBE_INCLUDE_CG_ISOLATE_FASTPATH_H +#include "cgfunc.h" + +namespace maplebe { +class IsolateFastPath { + public: + explicit IsolateFastPath(CGFunc &func) + : cgFunc(func) {} + + virtual ~IsolateFastPath() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "isolate_fastpath"; + } + + protected: + CGFunc &cgFunc; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_ISOLATE_FASTPATH_H */ diff --git a/src/mapleall/maple_be/include/cg/label_creation.h b/src/mapleall/maple_be/include/cg/label_creation.h new file mode 100644 index 0000000000000000000000000000000000000000..765fb3dc347b52e765b8f2b1ba553fc09adf5aee --- /dev/null +++ b/src/mapleall/maple_be/include/cg/label_creation.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_LABEL_CREATION_H +#define MAPLEBE_INCLUDE_CG_LABEL_CREATION_H + +#include "cgfunc.h" +#include "cg_phase.h" +#include "mir_builder.h" + +namespace maplebe { +class LabelCreation { + public: + explicit LabelCreation(CGFunc &func) : cgFunc(&func) {} + + ~LabelCreation() = default; + + void Run(); + + std::string PhaseName() const { + return "createlabel"; + } + + private: + CGFunc *cgFunc; + void CreateStartEndLabel() const; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgCreateLabel, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_LABEL_CREATION_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/live.h b/src/mapleall/maple_be/include/cg/live.h new file mode 100644 index 0000000000000000000000000000000000000000..8286afc657dda5210e5e0b881fba408d1fc78604 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/live.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_LIVE_H +#define MAPLEBE_INCLUDE_CG_LIVE_H + +#include "cg_phase.h" +#include "insn.h" +#include "cgbb.h" +#include "sparse_datainfo.h" +#include "cgfunc.h" + +namespace maplebe { +class LiveAnalysis : public AnalysisResult { + public: + LiveAnalysis(CGFunc &func, MemPool &memPool) + : AnalysisResult(&memPool), cgFunc(&func), memPool(&memPool), alloc(&memPool), stackMp(func.GetStackMemPool()) {} + ~LiveAnalysis() override = default; + + void AnalysisLive(); + void Dump() const; + void DumpInfo(const SparseDataInfo &info) const; + void InitBB(BB &bb); + void InitAndGetDefUse(); + bool GenerateLiveOut(BB &bb) const; + bool GenerateLiveIn(BB &bb); + void BuildInOutforFunc(); + void DealWithInOutOfCleanupBB(); + void InsertInOutOfCleanupBB(); + void ResetLiveSet(); + void ClearInOutDataInfo(); + void EnlargeSpaceForLiveAnalysis(BB &currBB); + void GetBBDefUse(BB &bb); + void ProcessAsmListOpnd(BB &bb, Operand &opnd, uint32 idx) const; + void ProcessListOpnd(BB &bb, Operand &opnd, bool isDef) const; + void ProcessMemOpnd(BB &bb, Operand &opnd) const; + void ProcessCondOpnd(BB &bb) const; + void CollectLiveInfo(BB &bb, const Operand &opnd, bool isDef, bool isUse) const; + + SparseDataInfo *NewLiveIn(uint32 maxRegCount) { + return memPool->New(maxRegCount, alloc); + } + + SparseDataInfo *NewLiveOut(uint32 maxRegCount) { + return memPool->New(maxRegCount, alloc); + } + + SparseDataInfo *NewDef(uint32 maxRegCount) { + return memPool->New(maxRegCount, alloc); + } + + SparseDataInfo *NewUse(uint32 maxRegCount) { + return memPool->New(maxRegCount, alloc); + } + + virtual void GenerateReturnBBDefUse(BB &bb) const = 0; + virtual void ProcessCallInsnParam(BB &bb, const Insn &insn) const = 0; + virtual bool CleanupBBIgnoreReg(regno_t reg) = 0; + virtual void InitEhDefine(BB &bb) = 0; + + protected: + int iteration = 0; + CGFunc *cgFunc; + MemPool *memPool; + MapleAllocator alloc; + StackMemPool &stackMp; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLiveAnalysis, maplebe::CGFunc) + LiveAnalysis *GetResult() { + return live; + } + LiveAnalysis *live = nullptr; +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_LIVE_H */ diff --git a/src/mapleall/maple_be/include/cg/local_opt.h b/src/mapleall/maple_be/include/cg/local_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..9b3d1246481a9aea7d10c2efc0ce50e4b2071b88 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/local_opt.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_LOCALO_H +#define MAPLEBE_INCLUDE_CG_LOCALO_H + +#include "cg_phase.h" +#include "cgbb.h" +#include "live.h" +#include "loop.h" +#include "cg.h" + +namespace maplebe { +class LocalOpt { + public: + LocalOpt(MemPool &memPool, CGFunc &func, ReachingDefinition &rd) + : localoMp(&memPool), + cgFunc(&func), + reachindDef(&rd) {} + + virtual ~LocalOpt() = default; + + void DoLocalCopyPropOptmize(); + + protected: + ReachingDefinition *GetRDInfo() { + return reachindDef; + } + MemPool *localoMp; + CGFunc *cgFunc; + ReachingDefinition *reachindDef; + + private: + virtual void DoLocalCopyProp() = 0; +}; + +class LocalOptimizeManager { + public: + LocalOptimizeManager(CGFunc &cgFunc, ReachingDefinition &rd) + : cgFunc(cgFunc), + reachingDef(&rd) {} + ~LocalOptimizeManager() = default; + template + void Optimize() { + LocalPropOptimizePattern optPattern(cgFunc, *reachingDef); + optPattern.Run(); + } + private: + CGFunc &cgFunc; + ReachingDefinition *reachingDef; +}; + +class LocalPropOptimizePattern { + public: + LocalPropOptimizePattern(CGFunc &cgFunc, ReachingDefinition &rd) + : cgFunc(cgFunc), + reachingDef(&rd) {} + virtual ~LocalPropOptimizePattern() = default; + virtual bool CheckCondition(Insn &insn) = 0; + virtual void Optimize(BB &bb, Insn &insn) = 0; + void Run(); + protected: + std::string PhaseName() const { + return "localopt"; + } + CGFunc &cgFunc; + ReachingDefinition *reachingDef; +}; + +class RedundantDefRemove : public LocalPropOptimizePattern { + public: + RedundantDefRemove(CGFunc &cgFunc, ReachingDefinition &rd) : LocalPropOptimizePattern(cgFunc, rd) {} + ~RedundantDefRemove() override = default; + bool CheckCondition(Insn &insn) final; +}; + +MAPLE_FUNC_PHASE_DECLARE(LocalCopyProp, maplebe::CGFunc) +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_LOCALO_H */ diff --git a/src/mapleall/maple_be/include/cg/loop.h b/src/mapleall/maple_be/include/cg/loop.h new file mode 100644 index 0000000000000000000000000000000000000000..379f11159e47b71ffbcdd846ead10dd594e6d7db --- /dev/null +++ b/src/mapleall/maple_be/include/cg/loop.h @@ -0,0 +1,306 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_LOOP_H +#define MAPLEBE_INCLUDE_CG_LOOP_H + +#include "cg_phase.h" +#include "cgbb.h" +#include "insn.h" +#include "maple_phase.h" + +namespace maplebe { +class LoopHierarchy { + public: + struct HeadIDCmp { + bool operator()(const LoopHierarchy *loopHierarchy1, const LoopHierarchy *loopHierarchy2) const { + CHECK_NULL_FATAL(loopHierarchy1); + CHECK_NULL_FATAL(loopHierarchy2); + return (loopHierarchy1->GetHeader()->GetId() < loopHierarchy2->GetHeader()->GetId()); + } + }; + + explicit LoopHierarchy(MemPool &memPool) + : loopMp(memPool), + loopAlloc(&memPool), + otherLoopEntries(loopAlloc.Adapter()), + loopMembers(loopAlloc.Adapter()), + backedge(loopAlloc.Adapter()), + backBBEdges(loopAlloc.Adapter()), + exits(loopAlloc.Adapter()), + innerLoops(loopAlloc.Adapter()) {} + + virtual ~LoopHierarchy() = default; + + BB *GetHeader() const { + return header; + } + const MapleSet &GetLoopMembers() const { + return loopMembers; + } + const MapleSet &GetBackedge() const { + return backedge; + } + const MapleUnorderedMap*> &GetBackBBEdges() const { + return backBBEdges; + } + MapleSet &GetBackedgeNonConst() { + return backedge; + } + const MapleSet &GetExits() const { + return exits; + } + const MapleSet &GetInnerLoops() const { + return innerLoops; + } + const LoopHierarchy *GetOuterLoop() const { + return outerLoop; + } + LoopHierarchy *GetPrev() { + return prev; + } + LoopHierarchy *GetNext() { + return next; + } + + MapleSet::iterator EraseLoopMembers(MapleSet::iterator it) { + return loopMembers.erase(it); + } + void InsertLoopMembers(BB &bb) { + (void)loopMembers.insert(&bb); + } + void InsertBackedge(BB &bb) { + (void)backedge.insert(&bb); + } + void InsertBackBBEdge(BB &backBB, BB &headerBB) { + auto backIt = backBBEdges.find(&backBB); + if (backIt == backBBEdges.end()) { + auto *headBBSPtr = loopMp.New>(loopAlloc.Adapter()); + headBBSPtr->insert(&headerBB); + backBBEdges.emplace(&backBB, headBBSPtr); + } else { + backIt->second->insert(&headerBB); + } + } + void InsertExit(BB &bb) { + (void)exits.insert(&bb); + } + void InsertInnerLoops(LoopHierarchy &loop) { + (void)innerLoops.insert(&loop); + } + void SetHeader(BB &bb) { + header = &bb; + } + void SetOuterLoop(LoopHierarchy &loop) { + outerLoop = &loop; + } + void SetPrev(LoopHierarchy *loop) { + prev = loop; + } + void SetNext(LoopHierarchy *loop) { + next = loop; + } + void PrintLoops(const std::string &name) const; + MapleSet GetOtherLoopEntries() const { + return otherLoopEntries; + } + + void InsertBBToOtherLoopEntries(BB * const &insertBB) { + (void)otherLoopEntries.insert(insertBB); + } + void EraseBBFromOtherLoopEntries(BB * const &eraseBB) { + (void)otherLoopEntries.erase(eraseBB); + } + + protected: + LoopHierarchy *prev = nullptr; + LoopHierarchy *next = nullptr; + + private: + MemPool &loopMp; + MapleAllocator loopAlloc; + BB *header = nullptr; + MapleSet otherLoopEntries; + MapleSet loopMembers; + MapleSet backedge; + MapleUnorderedMap*> backBBEdges; // + MapleSet exits; + MapleSet innerLoops; + LoopHierarchy *outerLoop = nullptr; +}; + +class LoopFinder : public AnalysisResult { + public: + LoopFinder(CGFunc &func, MemPool &mem) + : AnalysisResult(&mem), + cgFunc(&func), + memPool(&mem), + loopMemPool(memPool), + visitedBBs(loopMemPool.Adapter()), + sortedBBs(loopMemPool.Adapter()), + dfsBBs(loopMemPool.Adapter()), + onPathBBs(loopMemPool.Adapter()), + recurseVisited(loopMemPool.Adapter()) + {} + + ~LoopFinder() override = default; + + void FormLoop(BB* headBB, BB* backBB); + void SeekBackEdge(BB* bb, MapleList succs); + void SeekCycles(); + void MarkExtraEntryAndEncl(); + bool HasSameHeader(const LoopHierarchy *lp1, const LoopHierarchy *lp2) const; + void MergeLoops(); + void SortLoops(); + void UpdateOuterForInnerLoop(BB *bb, LoopHierarchy *outer); + void UpdateOuterLoop(const LoopHierarchy *loop); + void CreateInnerLoop(LoopHierarchy &inner, LoopHierarchy &outer); + void DetectInnerLoop(); + void UpdateCGFunc() const; + void FormLoopHierarchy(); + + private: + CGFunc *cgFunc; + MemPool *memPool; + MapleAllocator loopMemPool; + MapleVector visitedBBs; + MapleVector sortedBBs; + MapleStack dfsBBs; + MapleVector onPathBBs; + MapleVector recurseVisited; + LoopHierarchy *loops = nullptr; +}; + +class CGFuncLoops { + public: + explicit CGFuncLoops(MemPool &memPool) + : loopMemPool(&memPool), + multiEntries(loopMemPool.Adapter()), + loopMembers(loopMemPool.Adapter()), + backedge(loopMemPool.Adapter()), + backBBEdges(loopMemPool.Adapter()), + exits(loopMemPool.Adapter()), + innerLoops(loopMemPool.Adapter()) {} + + ~CGFuncLoops() = default; + + void CheckOverlappingInnerLoops(const MapleVector &iLoops, + const MapleVector &loopMem) const; + void CheckLoops() const; + void PrintLoops(const CGFuncLoops &loops) const; + bool IsBBLoopMember(const BB *bb) const; + bool IsBackEdge(const BB &fromBB, const BB &toBB) const { + auto backIt = backBBEdges.find(fromBB.GetId()); + if (backIt == backBBEdges.end()) { + return false; + } + for (auto headId : backIt->second) { + if (toBB.GetId() == headId) { + return true; + } + } + return false; + } + + const BB *GetHeader() const { + return header; + } + BB *GetHeader() { + return header; + } + const MapleVector &GetMultiEntries() const { + return multiEntries; + } + const MapleVector &GetLoopMembers() const { + return loopMembers; + } + const MapleVector &GetBackedge() const { + return backedge; + } + const MapleUnorderedMap> &GetBackBBEdges() const { + return backBBEdges; + } + const MapleVector &GetExits() const { + return exits; + } + const MapleVector &GetInnerLoops() const { + return innerLoops; + } + const CGFuncLoops *GetOuterLoop() const { + return outerLoop; + } + uint32 GetLoopLevel() const { + return loopLevel; + } + void AddMultiEntries(BB &bb) { + multiEntries.emplace_back(&bb); + } + void AddLoopMembers(BB &bb) { + loopMembers.emplace_back(&bb); + } + void AddBackedge(BB &bb) { + backedge.emplace_back(&bb); + } + void AddBackBBEdge(BB &backBB, BB &headerBB) { + auto backIt = backBBEdges.find(backBB.GetId()); + if (backIt == backBBEdges.end()) { + MapleSet toBBs(loopMemPool.Adapter()); + toBBs.insert(headerBB.GetId()); + backBBEdges.emplace(backBB.GetId(), toBBs); + } else { + backIt->second.insert(headerBB.GetId()); + } + } + void AddExit(BB &bb) { + exits.emplace_back(&bb); + } + void AddInnerLoops(CGFuncLoops &loop) { + innerLoops.emplace_back(&loop); + } + void SetHeader(BB &bb) { + header = &bb; + } + void SetOuterLoop(CGFuncLoops &loop) { + outerLoop = &loop; + } + void SetLoopLevel(uint32 val) { + loopLevel = val; + } + + private: + MapleAllocator loopMemPool; + BB *header = nullptr; + MapleVector multiEntries; + MapleVector loopMembers; + MapleVector backedge; + MapleUnorderedMap> backBBEdges; // () + MapleVector exits; + MapleVector innerLoops; + CGFuncLoops *outerLoop = nullptr; + uint32 loopLevel = 0; +}; + +struct CGFuncLoopCmp { + bool operator()(const CGFuncLoops *lhs, const CGFuncLoops *rhs) const { + CHECK_NULL_FATAL(lhs); + CHECK_NULL_FATAL(rhs); + return lhs->GetHeader()->GetId() < rhs->GetHeader()->GetId(); + } +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgLoopAnalysis, maplebe::CGFunc); +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_LOOP_H */ diff --git a/src/mapleall/maple_be/include/cg/lsda.h b/src/mapleall/maple_be/include/cg/lsda.h new file mode 100644 index 0000000000000000000000000000000000000000..cd11e092936b4e8f33f8c638d285e230a8a04ea6 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/lsda.h @@ -0,0 +1,266 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_LSDA_H +#define MAPLEBE_INCLUDE_CG_LSDA_H +#include "types_def.h" +#include "mir_nodes.h" +#include "cgbb.h" + +namespace maplebe { +using namespace maple; + +class LabelPair { + public: + LabelPair() = default; + LabelPair(LabelNode *startOffsetLbl, LabelNode *endOffsetLbl) { + startOffset = startOffsetLbl; + endOffset = endOffsetLbl; + } + ~LabelPair() = default; + + const LabelNode *GetStartOffset() const { + return startOffset; + } + + void SetStartOffset(LabelNode *lableNode) { + startOffset = lableNode; + } + + const LabelNode *GetEndOffset() const { + return endOffset; + } + + void SetEndOffsetLabelIdx(LabelIdx index) const { + endOffset->SetLabelIdx(index); + } + + void SetEndOffset(LabelNode *labelNode) { + endOffset = labelNode; + } + + private: + LabelNode *startOffset = nullptr; + LabelNode *endOffset = nullptr; +}; + +class LSDAHeader { + public: + const LabelNode *GetLSDALabel() const { + return lsdaLabel; + } + + void SetLSDALabel(LabelNode &labelNode) { + lsdaLabel = &labelNode; + } + + uint8 GetLPStartEncoding() const { + return lpStartEncoding; + } + + void SetLPStartEncoding(uint8 encoding) { + lpStartEncoding = encoding; + } + + uint8 GetTTypeEncoding() const { + return tTypeEncoding; + } + + void SetTTypeEncoding(uint8 encoding) { + tTypeEncoding = encoding; + } + + const LabelPair &GetTTypeOffset() const { + return tTypeOffset; + } + + void SetTTypeOffset(LabelNode *start, LabelNode *end) { + tTypeOffset.SetStartOffset(start); + tTypeOffset.SetEndOffset(end); + } + + uint8 GetCallSiteEncoding() const { + return callSiteEncoding; + } + + void SetCallSiteEncoding(uint8 encoding) { + callSiteEncoding = encoding; + } + + private: + LabelNode *lsdaLabel; + uint8 lpStartEncoding; + uint8 tTypeEncoding; + LabelPair tTypeOffset; + uint8 callSiteEncoding; +}; + +struct LSDACallSite { + LabelPair csStart; + LabelPair csLength; + LabelPair csLandingPad; + uint32 csAction; + + public: + void Init(const LabelPair &start, const LabelPair &length, const LabelPair &landingPad, uint32 action) { + csStart = start; + csLength = length; + csLandingPad = landingPad; + csAction = action; + } +}; + +class LSDAAction { + public: + LSDAAction(uint8 idx, uint8 filter) : actionIndex(idx), actionFilter(filter) {} + ~LSDAAction() = default; + + uint8 GetActionIndex() const { + return actionIndex; + } + + uint8 GetActionFilter() const { + return actionFilter; + } + + private: + uint8 actionIndex; + uint8 actionFilter; +}; + +class LSDACallSiteTable { + public: + explicit LSDACallSiteTable(MapleAllocator &alloc) : callSiteTable(alloc.Adapter()) { + csTable.SetStartOffset(nullptr); + csTable.SetEndOffset(nullptr); + } + ~LSDACallSiteTable() = default; + + const MapleVector &GetCallSiteTable() const { + return callSiteTable; + } + + void PushBack(LSDACallSite &lsdaCallSite) { + callSiteTable.emplace_back(&lsdaCallSite); + } + + const LabelPair &GetCSTable() const { + return csTable; + } + + void SetCSTable(LabelNode *start, LabelNode *end) { + csTable.SetStartOffset(start); + csTable.SetEndOffset(end); + } + + void UpdateCallSite(const BB &oldBB, const BB &newBB) { + for (auto *callSite : callSiteTable) { + if (callSite->csStart.GetEndOffset() != nullptr) { + if (callSite->csStart.GetEndOffset()->GetLabelIdx() == oldBB.GetLabIdx()) { + callSite->csStart.SetEndOffsetLabelIdx(newBB.GetLabIdx()); + } + } + + CHECK_NULL_FATAL(callSite->csLength.GetEndOffset()); + if (callSite->csLength.GetEndOffset()->GetLabelIdx() == oldBB.GetLabIdx()) { + callSite->csLength.SetEndOffsetLabelIdx(newBB.GetLabIdx()); + } + + if (callSite->csLandingPad.GetEndOffset() != nullptr) { + if (callSite->csLandingPad.GetEndOffset()->GetLabelIdx() == oldBB.GetLabIdx()) { + callSite->csLandingPad.SetEndOffsetLabelIdx(newBB.GetLabIdx()); + } + } + } + } + + void RemoveCallSite(const BB &bb) { + for (int32 i = static_cast(callSiteTable.size()) - 1; i > -1; --i) { + if (callSiteTable[i]->csStart.GetEndOffset() != nullptr) { + if (callSiteTable[i]->csStart.GetEndOffset()->GetLabelIdx() == bb.GetLabIdx()) { + (void)callSiteTable.erase(callSiteTable.cbegin() + i); + continue; + } + } + if (callSiteTable[i]->csLandingPad.GetEndOffset() != nullptr) { + if (callSiteTable[i]->csLandingPad.GetEndOffset()->GetLabelIdx() == bb.GetLabIdx()) { + (void)callSiteTable.erase(callSiteTable.cbegin() + i); + continue; + } + } + } + } + + /* return true if label is in callSiteTable */ + bool InCallSiteTable(LabelIdx label) const { + for (auto *callSite : callSiteTable) { + if (label == callSite->csStart.GetEndOffset()->GetLabelIdx() || + label == callSite->csStart.GetStartOffset()->GetLabelIdx()) { + return true; + } + if (label == callSite->csLength.GetEndOffset()->GetLabelIdx() || + label == callSite->csLength.GetStartOffset()->GetLabelIdx()) { + return true; + } + if (callSite->csLandingPad.GetStartOffset()) { + if (label == callSite->csLandingPad.GetEndOffset()->GetLabelIdx() || + label == callSite->csLandingPad.GetStartOffset()->GetLabelIdx()) { + return true; + } + } + } + return false; + } + + bool IsTryBlock(const BB &bb) const { + for (auto *callSite : callSiteTable) { + if (callSite->csLength.GetStartOffset()->GetLabelIdx() == bb.GetLabIdx()) { + return true; + } + } + return false; + } + + void SortCallSiteTable(std::function const &func) { + std::sort(callSiteTable.begin(), callSiteTable.end(), func); + } + + private: + MapleVector callSiteTable; + LabelPair csTable; +}; + +class LSDAActionTable { + public: + explicit LSDAActionTable(MapleAllocator &alloc) : actionTable(alloc.Adapter()) {} + virtual ~LSDAActionTable() = default; + + const MapleVector &GetActionTable() const{ + return actionTable; + } + + void PushBack(LSDAAction &lsdaAction) { + actionTable.emplace_back(&lsdaAction); + } + + size_t Size() const { + return actionTable.size(); + } + + private: + MapleVector actionTable; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_LSDA_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/memlayout.h b/src/mapleall/maple_be/include/cg/memlayout.h new file mode 100644 index 0000000000000000000000000000000000000000..17980e0d2378854c5babdcbe74ad2d08f66a7acd --- /dev/null +++ b/src/mapleall/maple_be/include/cg/memlayout.h @@ -0,0 +1,278 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_MEMLAYOUT_H +#define MAPLEBE_INCLUDE_CG_MEMLAYOUT_H + +/* C++ headers. */ +#include +#include "becommon.h" +#include "mir_function.h" +#include "mir_nodes.h" /* StmtNode */ + +namespace maplebe { +using regno_t = uint32; +enum MemSegmentKind : uint8 { + kMsUnknown, + /* + * Function arguments that are not passed through registers + * are passed to the callee through stack. + */ + kMsArgsStkPassed, + /* + * In between MS_args_stackpassed and kMsArgsRegpassed, + * we store call-saved registers if any. + */ + /* + * Args passed via registers according to the architecture-specific ABI + * may need be stored in stack. + * 1) In the unoptimized version, we implement a model (similar to GCC -O0) + * where all the values are initially stored in the memory and + * loaded into registers when needed, and stored back to the memory when + * their uses are done. + * 2) In an optimized version, some register-passed values may need to be + * spilled into memory. We allocate the space in this Memory segment. + * (or we may allocate them in caller-saved; may be this is better...) + */ + kMsArgsRegPassed, + /* + * GR/VR Save areas for unnamed arguments under vararg functions + */ + kMsGrSaveArea, + kMsVrSaveArea, + /* local (auto) variables */ + kMsRefLocals, + kMsLocals, + kMsSpillReg, + /* + * In between kMsLocals and MS_args_to_stackpass, we allocate + * a register-spill area and space for caller-saved registers + */ + /* + * When a function calls another which takes some arguments + * that cannot be passed through registers, it is the caller's + * responsibility to allocate space for those arguments in memory. + */ + kMsArgsToStkPass, + /* The red zone stack area will not be modified by the exception signal. */ + kMsRedZone, +}; + +enum StackProtectKind : uint8 { + kNone = 0, + kAddrofStack = 0x1, + /* if a callee has return agg type which size over 16bytes */ + kRetureStackSlot = 0x2, +}; + +class CGFunc; + +/* keeps track of the allocation of a memory segment */ +class MemSegment { + public: + explicit MemSegment(MemSegmentKind memSegKind) : kind(memSegKind), size(0) {} + + ~MemSegment() = default; + + uint32 GetSize() const { + return size; + } + + void SetSize(uint32 memSize) { + size = memSize; + } + + MemSegmentKind GetMemSegmentKind() const { + return kind; + } + + private: + MemSegmentKind kind; + uint32 size; /* size is negative if allocated offsets are negative */ +}; /* class MemSegment */ + +/* describes where a symbol is allocated */ +class SymbolAlloc { + public: + SymbolAlloc() = default; + + ~SymbolAlloc() = default; + + const MemSegment *GetMemSegment() const { + return memSegment; + } + + void SetMemSegment(const MemSegment &memSeg) { + memSegment = &memSeg; + } + + int64 GetOffset() const { + return offset; + } + + void SetOffset(int64 off) { + offset = off; + } + + protected: + const MemSegment *memSegment = nullptr; + int64 offset = 0; +}; /* class SymbolAlloc */ + +class MemLayout { + public: + MemLayout(BECommon &beCommon, MIRFunction &mirFunc, MapleAllocator &mallocator, uint32 kStackPtrAlignment) + : be(beCommon), + mirFunction(&mirFunc), + segArgsStkPassed(kMsArgsStkPassed), + segArgsRegPassed(kMsArgsRegPassed), + segArgsToStkPass(kMsArgsToStkPass), + symAllocTable(mallocator.Adapter()), + spillLocTable(mallocator.Adapter()), + spillRegLocMap(mallocator.Adapter()), + localRefLocMap(std::less(), mallocator.Adapter()), + memAllocator(&mallocator), + stackPtrAlignment(kStackPtrAlignment) { + symAllocTable.resize(mirFunc.GetSymTab()->GetSymbolTableSize()); + } + + virtual ~MemLayout() = default; + + void SetCurrFunction(CGFunc &func) { + cgFunc = &func; + } + + /* + * Returns stack space required for a call + * which is used to pass arguments that cannot be + * passed through registers + */ + virtual uint32 ComputeStackSpaceRequirementForCall(StmtNode &stmtNode, int32 &aggCopySize, bool isIcall) = 0; + + /* + * Go over all outgoing calls in the function body and get the maximum space + * needed for storing the actuals based on the actual parameters and the ABI. + * These are usually those arguments that cannot be passed + * through registers because a call passes more than 8 arguments, or + * they cannot be fit in a pair of registers. + */ + uint32 FindLargestActualArea(int32 &aggCopySize); + + virtual void LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) = 0; + + /* + * "Pseudo-registers can be regarded as local variables of a + * primitive type whose addresses are never taken" + */ + virtual void AssignSpillLocationsToPseudoRegisters() = 0; + + virtual SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum) = 0; + + virtual int32 GetCalleeSaveBaseLoc() const { + return 0; + } + + SymbolAlloc *GetSymAllocInfo(uint32 stIdx) { + ASSERT(stIdx < symAllocTable.size(), "out of symAllocTable's range"); + return symAllocTable[stIdx]; + } + + void SetSymAllocInfo(uint32 stIdx, SymbolAlloc &symAlloc) { + ASSERT(stIdx < symAllocTable.size(), "out of symAllocTable's range"); + symAllocTable[stIdx] = &symAlloc; + } + + const SymbolAlloc *GetSpillLocOfPseduoRegister(PregIdx index) const { + return spillLocTable.at(index); + } + + SymbolAlloc *GetLocOfSpillRegister(regno_t vrNum) { + SymbolAlloc *loc = nullptr; + auto pos = spillRegLocMap.find(vrNum); + if (pos == spillRegLocMap.end()) { + loc = AssignLocationToSpillReg(vrNum); + } else { + loc = pos->second; + } + return loc; + } + + uint32 SizeOfArgsToStackPass() const { + return segArgsToStkPass.GetSize(); + } + + uint32 SizeOfArgsRegisterPassed() const { + return segArgsRegPassed.GetSize(); + } + + BECommon &GetBECommon() { + return be; + } + + MIRFunction *GetMIRFunction() { + return mirFunction; + } + + const MemSegment &GetSegArgsStkPassed() const { + return segArgsStkPassed; + } + + const MemSegment &GetSegArgsRegPassed() const { + return segArgsRegPassed; + } + + const MemSegment &GetSegArgsToStkPass() const { + return segArgsToStkPass; + } + + const MapleVector &GetSymAllocTable() const { + return symAllocTable; + } + + void SetSpillRegLocInfo(regno_t regNum, SymbolAlloc &symAlloc) { + spillRegLocMap[regNum] = &symAlloc; + } + + const MapleMap &GetLocalRefLocMap() const { + return localRefLocMap; + } + + void SetLocalRegLocInfo(StIdx idx, SymbolAlloc &symAlloc) { + localRefLocMap[idx] = &symAlloc; + } + + bool IsLocalRefLoc(const MIRSymbol &symbol) const { + return localRefLocMap.find(symbol.GetStIdx()) != localRefLocMap.end(); + } + + uint32 GetStackPtrAlignment() const { + return stackPtrAlignment; + } + protected: + BECommon &be; + MIRFunction *mirFunction; + MemSegment segArgsStkPassed; + MemSegment segArgsRegPassed; + MemSegment segArgsToStkPass; + MapleVector symAllocTable; /* index is stindex from StIdx */ + MapleVector spillLocTable; /* index is preg idx */ + MapleUnorderedMap spillRegLocMap; + MapleMap localRefLocMap; /* localrefvar formals. real address passed in stack. */ + MapleAllocator *memAllocator; + CGFunc *cgFunc = nullptr; + const uint32 stackPtrAlignment; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_MEMLAYOUT_H */ diff --git a/src/mapleall/maple_be/include/cg/offset_adjust.h b/src/mapleall/maple_be/include/cg/offset_adjust.h new file mode 100644 index 0000000000000000000000000000000000000000..774e4fc6d8bec1950c2b8e1ee52f27837eed2d8c --- /dev/null +++ b/src/mapleall/maple_be/include/cg/offset_adjust.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_OFFSET_ADJUST_H +#define MAPLEBE_INCLUDE_CG_OFFSET_ADJUST_H + +#include "cgfunc.h" +#include "cg_phase.h" + +namespace maplebe { +class FrameFinalize { + public: + explicit FrameFinalize(CGFunc &func) : cgFunc(&func) {} + + virtual ~FrameFinalize() { + cgFunc = nullptr; + } + + virtual void Run() {} + + std::string PhaseName() const { + return "framefinalize"; + } + + protected: + CGFunc *cgFunc; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFrameFinalize, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_OFFSET_ADJUST_H */ diff --git a/src/mapleall/maple_be/include/cg/operand.def b/src/mapleall/maple_be/include/cg/operand.def new file mode 100644 index 0000000000000000000000000000000000000000..249f048046382c302518f830f1250c9b6570fdc4 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/operand.def @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +DEFINE_MOP(Mem8S, {Operand::kOpdMem, operand::kIsUse, 8}) +DEFINE_MOP(Mem8D, {Operand::kOpdMem, operand::kIsDef, 8}) +DEFINE_MOP(Mem16S, {Operand::kOpdMem, operand::kIsUse, 16}) +DEFINE_MOP(Mem16D, {Operand::kOpdMem, operand::kIsDef, 16}) +DEFINE_MOP(Mem32D, {Operand::kOpdMem, operand::kIsDef, 32}) +DEFINE_MOP(Mem32S, {Operand::kOpdMem, operand::kIsUse, 32}) +DEFINE_MOP(Mem64D, {Operand::kOpdMem, operand::kIsDef, 64}) +DEFINE_MOP(Mem64S, {Operand::kOpdMem, operand::kIsUse, 64}) +DEFINE_MOP(Mem128D, {Operand::kOpdMem, operand::kIsDef, 128}) +DEFINE_MOP(Mem128S, {Operand::kOpdMem, operand::kIsUse, 128}) + +DEFINE_MOP(Reg8IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 8}) +DEFINE_MOP(Reg8ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 8}) +DEFINE_MOP(Reg8IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 8}) +DEFINE_MOP(Reg16ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 16}) +DEFINE_MOP(Reg16IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 16}) +DEFINE_MOP(Reg16IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 16}) +DEFINE_MOP(Reg32ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 32}) +DEFINE_MOP(Reg32IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 32}) +DEFINE_MOP(Reg32IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 32}) +DEFINE_MOP(Reg64ID, {Operand::kOpdRegister, operand::kIsDef | operand::kInt, 64}) +DEFINE_MOP(Reg64IS, {Operand::kOpdRegister, operand::kIsUse | operand::kInt, 64}) +DEFINE_MOP(Reg64IDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kInt, 64}) + +DEFINE_MOP(Reg8FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 8}) +DEFINE_MOP(Reg8FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 8}) +DEFINE_MOP(Reg16FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 16}) +DEFINE_MOP(Reg16FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 16}) +DEFINE_MOP(Reg16FDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 16}) +DEFINE_MOP(Reg32FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 32}) +DEFINE_MOP(Reg32FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 32}) +DEFINE_MOP(Reg32FDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 32}) +DEFINE_MOP(Reg64FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 64}) +DEFINE_MOP(Reg64FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 64}) +DEFINE_MOP(Reg64FDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 64}) +DEFINE_MOP(Reg128FD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat, 128}) +DEFINE_MOP(Reg128FS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat, 128}) +DEFINE_MOP(Reg128FDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat, 128}) + +DEFINE_MOP(Reg64VD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat | operand::kIsVector, 64}) +DEFINE_MOP(Reg64VS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat | operand::kIsVector, 64}) +DEFINE_MOP(Reg64VDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat | operand::kIsVector, 64}) +DEFINE_MOP(Reg128VD, {Operand::kOpdRegister, operand::kIsDef | operand::kFloat | operand::kIsVector, 128}) +DEFINE_MOP(Reg128VS, {Operand::kOpdRegister, operand::kIsUse | operand::kFloat | operand::kIsVector, 128}) +DEFINE_MOP(Reg128VDS, {Operand::kOpdRegister, operand::kIsDef | operand::kIsUse | operand::kFloat | operand::kIsVector, 128}) + +DEFINE_MOP(CCD, {Operand::kOpdRegister, operand::kRegTyCc | operand::kIsDef, 1}) +DEFINE_MOP(CCS, {Operand::kOpdRegister, operand::kRegTyCc | operand::kIsUse, 1}) +DEFINE_MOP(Cond, {Operand::kOpdCond, operand::kRegTyCc | operand::kIsUse, 4}) + +DEFINE_MOP(Imm4, {Operand::kOpdImmediate, operand::kIsUse, 4}) +DEFINE_MOP(Imm5, {Operand::kOpdImmediate, operand::kIsUse, 5}) +DEFINE_MOP(Imm6, {Operand::kOpdImmediate, operand::kIsUse, 6}) +DEFINE_MOP(Imm8, {Operand::kOpdImmediate, operand::kIsUse, 8}) +DEFINE_MOP(Imm12, {Operand::kOpdImmediate, operand::kIsUse, 12}) +DEFINE_MOP(Imm13, {Operand::kOpdImmediate, operand::kIsUse, 13}) +DEFINE_MOP(Imm16, {Operand::kOpdImmediate, operand::kIsUse, 16}) +DEFINE_MOP(Imm32, {Operand::kOpdImmediate, operand::kIsUse, 32}) +DEFINE_MOP(Imm64, {Operand::kOpdImmediate, operand::kIsUse, 64}) +DEFINE_MOP(StImm32, {Operand::kOpdStImmediate, operand::kIsUse, 32}) +DEFINE_MOP(StImm64, {Operand::kOpdStImmediate, operand::kIsUse, 64}) +DEFINE_MOP(FpImm8, {Operand::kOpdFPImmediate, operand::kIsUse, 8}) +DEFINE_MOP(LiteralSrc, {Operand::kOpdStImmediate, operand::kIsUse, 64}) +DEFINE_MOP(Literal12Src, {Operand::kOpdStImmediate, operand::kLiteralLow12, 12}) + +/* for movk */ +DEFINE_MOP(Lsl4, {Operand::kOpdShift, operand::kIsUse, 4}) +DEFINE_MOP(Lsl6, {Operand::kOpdShift, operand::kIsUse, 6}) +DEFINE_MOP(Lsl12, {Operand::kOpdShift, operand::kIsUse, 12}) +/* for shift */ +DEFINE_MOP(Bitshift32, {Operand::kOpdShift, operand::kIsUse, 5}) +DEFINE_MOP(Bitshift64, {Operand::kOpdShift, operand::kIsUse, 6}) +DEFINE_MOP(Extendshift64, {Operand::kOpdExtend, operand::kIsUse, 3}) + +DEFINE_MOP(ListSrc, {Operand::kOpdList, operand::kIsUse, 1}) +DEFINE_MOP(ListDest, {Operand::kOpdList, operand::kIsDef, 1}) +DEFINE_MOP(String0S, {Operand::kOpdString, operand::kIsUse, 0}) +DEFINE_MOP(AddressName, {Operand::kOpdBBAddress, operand::kIsUse, 64}) + +DEFINE_MOP(Lbl64, {Operand::kOpdBBAddress, operand::kIsUse, 64}) \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/operand.h b/src/mapleall/maple_be/include/cg/operand.h new file mode 100644 index 0000000000000000000000000000000000000000..8833ae79c8bae3f57a5e892741d645592ca195fb --- /dev/null +++ b/src/mapleall/maple_be/include/cg/operand.h @@ -0,0 +1,1908 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_OPERAND_H +#define MAPLEBE_INCLUDE_CG_OPERAND_H + +#include "becommon.h" +#include "cg_option.h" +#include "visitor_common.h" + +/* maple_ir */ +#include "types_def.h" /* need uint8 etc */ +#include "prim_types.h" /* for PrimType */ +#include "mir_symbol.h" + +/* Mempool */ +#include "mempool_allocator.h" /* MapleList */ +#include "memlayout.h" + +namespace maplebe { +class OpndDesc; +class Emitter; + +bool IsBitSizeImmediate(uint64 val, uint32 bitLen, uint32 nLowerZeroBits); +bool IsBitmaskImmediate(uint64 val, uint32 bitLen); +bool IsMoveWidableImmediate(uint64 val, uint32 bitLen); +bool BetterUseMOVZ(uint64 val); + + +using MOperator = uint32; +enum RegType : maple::uint8 { + kRegTyUndef, + kRegTyInt, + kRegTyFloat, + kRegTyCc, + kRegTyX87, + kRegTyVary, + kRegTyFpsc, + kRegTyIndex, + kRegTyLast, +}; + +class Operand { + public: + enum OperandType : uint8 { + kOpdRegister, + kOpdImmediate, + kOpdMem, + kOpdCond, /* for condition code */ + kOpdPhi, /* for phi operand */ + kOpdFPImmediate, + kOpdStImmediate, /* use the symbol name as the offset */ + kOpdOffset, /* for the offset operand in MemOperand */ + kOpdBBAddress, + kOpdList, /* for list operand */ + kOpdShift, /* for imm shift operand */ + kOpdRegShift, /* for reg shift operand */ + kOpdExtend, /* for extend operand */ + kOpdString, /* for comments */ + kOpdUndef + }; + + Operand(OperandType type, uint32 size) : opndKind(type), size(size) {} + virtual ~Operand() = default; + + uint32 GetSize() const { + return size; + } + + void SetSize(uint32 sz) { + size = sz; + } + + OperandType GetKind() const { + return opndKind; + } + + bool IsIntImmediate() const { + return opndKind == kOpdImmediate || opndKind == kOpdOffset; + } + + bool IsConstImmediate() const { + return opndKind == kOpdImmediate || opndKind == kOpdOffset || opndKind == kOpdFPImmediate; + } + + bool IsOfstImmediate() const { + return opndKind == kOpdOffset; + } + + bool IsStImmediate() const { + return opndKind == kOpdStImmediate; + } + + bool IsImmediate() const { + return (kOpdFPImmediate <= opndKind && opndKind <= kOpdOffset) || opndKind == kOpdImmediate; + } + + bool IsRegister() const { + return opndKind == kOpdRegister; + } + + bool IsList() const { + return opndKind == kOpdList; + } + + bool IsPhi() const { + return opndKind == kOpdPhi; + } + + bool IsMemoryAccessOperand() const { + return opndKind == kOpdMem; + } + + bool IsLabel() const { + return opndKind == kOpdBBAddress; + } + + bool IsConditionCode() const { + return opndKind == kOpdCond; + } + + bool IsOpdShift() const { + return opndKind == kOpdShift; + } + + bool IsRegShift() const { + return opndKind == kOpdRegShift; + } + + bool IsOpdExtend() const { + return opndKind == kOpdExtend; + } + + virtual bool IsLabelOpnd() const { + return false; + } + + virtual bool IsFuncNameOpnd() const { + return false; + } + + virtual bool IsCommentOpnd() const { + return false; + } + + virtual bool IsLogicLSLOpnd() const { + return false; + } + + virtual Operand *Clone(MemPool &memPool) const = 0; + + /* + * A simple implementation here. + * Each subclass can elaborate on demand. + */ + virtual bool Equals(Operand &op) const { + return BasicEquals(op) && (&op == this); + } + + bool BasicEquals(const Operand &op) const { + return opndKind == op.GetKind() && size == op.GetSize(); + } + + /* + * Operand hash content, ensuring uniqueness + */ + virtual std::string GetHashContent() const { + return std::to_string(opndKind) + std::to_string(size); + } + + virtual void Dump() const = 0; + + virtual bool Less(const Operand &right) const = 0; + + virtual void Accept(OperandVisitorBase &v) = 0; + + protected: + OperandType opndKind; /* operand type */ + uint32 size; /* size in bits */ + uint64 flag = 0; /* operand property */ +}; + +/* RegOperand */ +enum RegOperandState : uint32 { + kRegOpndNone = 0, + kRegOpndSetLow32 = 0x1, + kRegOpndSetHigh32 = 0x2 +}; + +template +class OperandVisitable : public Operand { + public: + using Operand::Operand; + void Accept(OperandVisitorBase &v) override { + if (OperandVisitor* typeV = dynamic_cast*>(&v)) { + typeV->Visit(static_cast(this)); + } else { + /* the type which has no implements */ + } + } +}; + +class RegOperand : public OperandVisitable { + public: + RegOperand(regno_t regNum, uint32 size, RegType type, uint32 flg = 0) + : OperandVisitable(kOpdRegister, size), + regNO(regNum), + regType(type), + validBitsNum(size), + regFlag(flg) {} + + ~RegOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + void SetValidBitsNum(uint32 validNum) { + validBitsNum = validNum; + } + + uint32 GetValidBitsNum() const { + return validBitsNum; + } + + bool IsOfIntClass() const { + return regType == kRegTyInt; + } + + bool IsOfFloatOrSIMDClass() const { + return regType == kRegTyFloat; + } + + bool IsOfCC() const { + return regType == kRegTyCc; + } + + bool IsOfVary() const { + return regType == kRegTyVary; + } + + RegType GetRegisterType() const { + return regType; + } + + void SetRegisterType(RegType newTy) { + regType = newTy; + } + + virtual bool IsBBLocalReg() const { + return isBBLocal; + } + + void SetRegNotBBLocal() { + isBBLocal = false; + } + + regno_t GetRegisterNumber() const { + return regNO; + } + + void SetRegisterNumber(regno_t regNum) { + regNO = regNum; + } + + void Dump() const override { + LogInfo::MapleLogger() << "reg "; + LogInfo::MapleLogger() << "size : " << GetSize(); + LogInfo::MapleLogger() << " NO_" << GetRegisterNumber(); + }; + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + auto *rightOpnd = static_cast(&right); + + /* The same type. */ + return regNO < rightOpnd->regNO; + } + + bool Less(const RegOperand &right) const { + return regNO < right.regNO; + } + + bool RegNumEqual(const RegOperand &right) const { + return regNO == right.GetRegisterNumber(); + } + + int32 RegCompare(const RegOperand &right) const { + return (regNO - right.GetRegisterNumber()); + } + + bool Equals(Operand &operand) const override { + if (!operand.IsRegister()) { + return false; + } + auto &op = static_cast(operand); + if (&op == this) { + return true; + } + return (BasicEquals(op) && regNO == op.GetRegisterNumber() && regType == op.GetRegisterType() && + IsBBLocalReg() == op.IsBBLocalReg()); + } + + std::string GetHashContent() const override { + return Operand::GetHashContent() + std::to_string(regNO) + std::to_string(regType); + } + + static bool IsSameRegNO(const Operand &firstOpnd, const Operand &secondOpnd) { + if (!firstOpnd.IsRegister() || !secondOpnd.IsRegister()) { + return false; + } + auto &firstReg = static_cast(firstOpnd); + auto &secondReg = static_cast(secondOpnd); + return firstReg.RegNumEqual(secondReg); + } + + static bool IsSameReg(const Operand &firstOpnd, const Operand &secondOpnd) { + if (firstOpnd.GetSize() != secondOpnd.GetSize()) { + return false; + } + return IsSameRegNO(firstOpnd, secondOpnd); + } + + void SetOpndSSAForm() { + isSSAForm = true; + } + + void SetOpndOutOfSSAForm() { + isSSAForm = false; + } + + bool IsSSAForm() const { + return isSSAForm; + } + + void SetRefField(bool newIsRefField) { + isRefField = newIsRefField; + } + + bool IsPhysicalRegister() const { + return GetRegisterNumber() < 100 && !IsOfCC(); + } + + bool IsVirtualRegister() const { + return !IsPhysicalRegister(); + } + + bool IsBBLocalVReg() const { + return IsVirtualRegister() && IsBBLocalReg(); + } + + void SetIF64Vec() { + if64Vec = true; + } + + bool GetIF64Vec() const { + return if64Vec; + } + + void SetVecLanePosition(int32 pos) { + vecLane = static_cast(pos); + } + + int32 GetVecLanePosition() const { + return vecLane; + } + + void SetVecLaneSize(uint32 size) { + vecLaneSize = static_cast(size); + } + + uint32 GetVecLaneSize() const { + return vecLaneSize; + } + + void SetVecElementSize(uint32 size) { + vecElementSize = size; + } + + uint64 GetVecElementSize() const { + return vecElementSize; + } + + void SetHigh8Bit() { + isHigh8Bit = true; + } + + bool IsHigh8Bit() { + return isHigh8Bit; + } + + bool operator==(const RegOperand &o) const; + + bool operator<(const RegOperand &o) const; + + protected: + regno_t regNO; + RegType regType; + + /* + * used for EBO(-O1), it can recognize the registers whose use and def are in different BB. It is + * true by default. Sometime it should be false such as when handle intrinsiccall for target + * aarch64(AArch64CGFunc::SelectIntrinCall). + */ + bool isBBLocal = true; + uint32 validBitsNum; + /* use for SSA analysis */ + bool isSSAForm = false; + bool isRefField = false; + uint32 regFlag = 0; + int16 vecLane = -1; /* -1 for whole reg, 0 to 15 to specify each lane one at a time */ + uint16 vecLaneSize = 0; /* Number of lanes */ + uint64 vecElementSize = 0; /* size of vector element in each lane */ + bool if64Vec = false; /* operand returning 64x1's int value in FP/Simd register */ + bool isHigh8Bit = false; +}; /* class RegOperand */ + +enum VaryType : uint8 { + kNotVary = 0, + kUnAdjustVary, + kAdjustVary, +}; + +class ImmOperand : public OperandVisitable { + public: + ImmOperand(int64 val, uint32 size, bool isSigned, VaryType isVar = kNotVary, bool isFloat = false) + : OperandVisitable(kOpdImmediate, size), value(val), isSigned(isSigned), isVary(isVar), isFmov(isFloat) {} + ImmOperand(OperandType type, int64 val, uint32 size, bool isSigned, VaryType isVar = kNotVary, bool isFloat = false) + : OperandVisitable(type, size), value(val), isSigned(isSigned), isVary(isVar), isFmov(isFloat) {} + ImmOperand(const MIRSymbol &symbol, int64 val, int32 relocs, bool isSigned, VaryType isVar = kNotVary, + bool isFloat = false) : OperandVisitable(kOpdStImmediate, 0), value(val), isSigned(isSigned), isVary(isVar), + isFmov(isFloat), symbol(&symbol), relocs(relocs) {} + ~ImmOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + const MIRSymbol *GetSymbol() const { + return symbol; + } + + const std::string &GetName() const { + return symbol->GetName(); + } + + int32 GetRelocs() const { + return relocs; + } + + bool IsInBitSize(uint8 size, uint8 nLowerZeroBits) const { + return maplebe::IsBitSizeImmediate(static_cast(value), size, nLowerZeroBits); + } + + bool IsBitmaskImmediate() const { + ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); + ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); + return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(size)); + } + + bool IsBitmaskImmediate(uint32 destSize) const { + ASSERT(!IsZero(), " 0 is reserved for bitmask immediate"); + ASSERT(!IsAllOnes(), " -1 is reserved for bitmask immediate"); + return maplebe::IsBitmaskImmediate(static_cast(value), static_cast(destSize)); + } + + bool IsSingleInstructionMovable() const { + return (IsMoveWidableImmediate(static_cast(value), static_cast(size)) || + IsMoveWidableImmediate(~static_cast(value), static_cast(size)) || + IsBitmaskImmediate()); + } + + bool IsSingleInstructionMovable(uint32 destSize) const { + return (IsMoveWidableImmediate(static_cast(value), static_cast(destSize)) || + IsMoveWidableImmediate(~static_cast(value), static_cast(destSize)) || + IsBitmaskImmediate(destSize)); + } + + int64 GetValue() const { + return value; + } + + void SetValue(int64 val) { + value = val; + } + + void SetVary(VaryType flag) { + isVary = flag; + } + + bool IsZero() const { + return value == 0; + } + + VaryType GetVary() const { + return isVary; + } + + bool IsOne() const { + return value == 1; + } + + bool IsSignedValue() const { + return isSigned; + } + + void SetSigned() { + isSigned = true; + } + + void SetSigned(bool flag) { + isSigned = flag; + } + + bool IsInBitSizeRot(uint8 size) const { + return IsInBitSizeRot(size, value); + } + + static bool IsInBitSizeRot(uint8 size, int64 val) { + /* to tell if the val is in a rotate window of size */ +#if __GNU_C__ || __clang__ + if (val == 0) { + return true; + } + int32 start = __builtin_ctzll(static_cast(val)); + int32 end = static_cast(sizeof(val) * kBitsPerByte - __builtin_clzll(static_cast(val)) - 1); + return (size >= end - start + 1); +#else + uint8 start = 0; + uint8 end = 0; + bool isFound = false; + CHECK_FATAL(val > 0, "do not perform bit operator operations on signed integers"); + for (uint8 i = 0; i < k64BitSize; ++i) { + /* check whether the ith bit of val is 1 or not */ + if (((static_cast(val) >> i) & 0x1) == 0x1) { + if (!isFound) { + start = i; + end = i; + isFound = true; + } else { + end = i; + } + } + } + return !isFound || (size >= (end - start) + 1); +#endif + } + + static bool IsInValueRange(int32 lowVal, int32 highVal, int32 val) { + return val >= lowVal && val <= highVal; + } + + bool IsNegative() const { + return isSigned && value < 0; + } + + void Add(int64 delta) { + value += delta; + } + + void Negate() { + value = -value; + } + + void BitwiseNegate() { + value = ~(static_cast(value)) & ((1ULL << size) - 1UL); + } + + void DivideByPow2(uint32 shift) { + value = (static_cast(value)) >> shift; + } + + void ModuloByPow2(uint32 shift) { + value = (static_cast(value)) & ((1ULL << shift) - 1UL); + } + + bool IsAllOnes() const { + return value == -1; + } + + bool IsAllOnes32bit() const { + return value == 0x0ffffffffLL; + } + + bool operator<(const ImmOperand &iOpnd) const { + return value < iOpnd.value || (value == iOpnd.value && iOpnd.isSigned) || + (value == iOpnd.value && isSigned == iOpnd.isSigned && size < iOpnd.GetSize()); + } + + bool operator==(const ImmOperand &iOpnd) const { + return (value == iOpnd.value && isSigned == iOpnd.isSigned && size == iOpnd.GetSize()); + } + + void Dump() const override; + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + auto *rightOpnd = static_cast(&right); + + /* The same type. */ + if (isSigned != rightOpnd->isSigned) { + return isSigned; + } + + if (isVary != rightOpnd->isVary) { + return isVary; + } + + return value < rightOpnd->value; + } + + bool Equals(Operand &operand) const override { + if (!operand.IsImmediate()) { + return false; + } + auto &op = static_cast(operand); + if (&op == this) { + return true; + } + return (BasicEquals(op) && value == op.GetValue() && isSigned == op.IsSignedValue()); + } + + std::string GetHashContent() const override { + return std::to_string(opndKind) + std::to_string(value) + std::to_string(static_cast(isSigned)) + + std::to_string(static_cast(isVary)) + std::to_string(static_cast(isFmov)); + } + + bool ValueEquals(const ImmOperand &op) const { + if (&op == this) { + return true; + } + return (value == op.GetValue() && isSigned == op.IsSignedValue()); + } + bool IsFmov() const { + return isFmov; + } + + protected: + int64 value; + bool isSigned; + VaryType isVary; + bool isFmov = false; + const MIRSymbol *symbol; /* for Immediate in symbol form */ + int32 relocs; +}; + +class OfstOperand : public ImmOperand { + public: + enum OfstType : uint8 { + kSymbolOffset, + kImmediateOffset, + kSymbolImmediateOffset, + }; + + /* only for symbol offset */ + OfstOperand(const MIRSymbol &mirSymbol, uint32 size, int32 relocs) + : ImmOperand(kOpdOffset, 0, size, true, kNotVary, false), + offsetType(kSymbolOffset), symbol(&mirSymbol), relocs(relocs) {} + /* only for Immediate offset */ + OfstOperand(int64 val, uint32 size, VaryType isVar = kNotVary) + : ImmOperand(kOpdOffset, static_cast(val), size, true, isVar, false), + offsetType(kImmediateOffset), symbol(nullptr), relocs(0) {} + /* for symbol and Immediate offset */ + OfstOperand(const MIRSymbol &mirSymbol, int64 val, uint32 size, int32 relocs, VaryType isVar = kNotVary) + : ImmOperand(kOpdOffset, val, size, true, isVar, false), + offsetType(kSymbolImmediateOffset), + symbol(&mirSymbol), + relocs(relocs) {} + + ~OfstOperand() override { + symbol = nullptr; + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool IsSymOffset() const { + return offsetType == kSymbolOffset; + } + bool IsImmOffset() const { + return offsetType == kImmediateOffset; + } + bool IsSymAndImmOffset() const { + return offsetType == kSymbolImmediateOffset; + } + + const MIRSymbol *GetSymbol() const { + return symbol; + } + + const std::string &GetSymbolName() const { + return symbol->GetName(); + } + + int64 GetOffsetValue() const { + return GetValue(); + } + + void SetOffsetValue(int32 offVal) { + SetValue(static_cast(offVal)); + } + + void AdjustOffset(int32 delta) { + Add(static_cast(delta)); + } + + bool operator==(const OfstOperand &opnd) const { + return (offsetType == opnd.offsetType && symbol == opnd.symbol && + ImmOperand::operator==(opnd) && relocs == opnd.relocs); + } + + bool operator<(const OfstOperand &opnd) const { + return (offsetType < opnd.offsetType || + (offsetType == opnd.offsetType && symbol < opnd.symbol) || + (offsetType == opnd.offsetType && symbol == opnd.symbol && GetValue() < opnd.GetValue())); + } + + void Dump() const override { + if (IsImmOffset()) { + LogInfo::MapleLogger() << "ofst:" << GetValue(); + } else { + LogInfo::MapleLogger() << GetSymbolName(); + LogInfo::MapleLogger() << "+offset:" << GetValue(); + } + } + + std::string GetHashContent() const override { + return ImmOperand::GetHashContent() + std::to_string(offsetType) + std::to_string(relocs); + } + + private: + OfstType offsetType; + const MIRSymbol *symbol; + int32 relocs; +}; + +/* + * Table C1-6 A64 Load/Store addressing modes + * | Offset + * Addressing Mode | Immediate | Register | Extended Register + * + * Base register only | [base{,#0}] | - | - + * (no offset) | B_OI_NONE | | + * imm=0 + * + * Base plus offset | [base{,#imm}] | [base,Xm{,LSL #imm}] | [base,Wm,(S|U)XTW {#imm}] + * B_OI_NONE | B_OR_X | B_OR_X + * imm=0,1 (0,3) | imm=00,01,10,11 (0/2,s/u) + * + * Pre-indexed | [base, #imm]! | - | - + * + * Post-indexed | [base], #imm | [base], Xm(a) | - + * + * Literal | label | - | - + * (PC-relative) + * + * a) The post-indexed by register offset mode can be used with the SIMD Load/Store + * structure instructions described in Load/Store Vector on page C3-154. Otherwise + * the post-indexed by register offset mode is not available. + */ +class MemOperand : public OperandVisitable { + public: + enum AArch64AddressingMode : uint8 { + kAddrModeUndef, + /* AddrMode_BO, base, offset. EA = [base] + offset; */ + kAddrModeBOi, /* INTACT: EA = [base]+immediate */ + /* + * PRE: base += immediate, EA = [base] + * POST: EA = [base], base += immediate + */ + kAddrModeBOrX, /* EA = [base]+Extend([offreg/idxreg]), OR=Wn/Xn */ + kAddrModeLiteral, /* AArch64 insruction LDR takes literal and */ + /* + * "calculates an address from the PC value and an immediate offset, + * loads a word from memory, and writes it to a register." + */ + kAddrModeLo12Li // EA = [base] + #:lo12:Label+immediate. (Example: [x0, #:lo12:__Label300+456] + }; + /* + * ARMv8-A A64 ISA Overview by Matteo Franchin @ ARM + * (presented at 64-bit Android on ARM. Sep. 2015) p.14 + * o Address to load from/store to is a 64-bit base register + an optional offset + * LDR X0, [X1] ; Load from address held in X1 + * STR X0, [X1] ; Store to address held in X1 + * + * o Offset can be an immediate or a register + * LDR X0, [X1, #8] ; Load from address [X1 + 8 bytes] + * LDR X0, [X1, #-8] ; Load with negative offset + * LDR X0, [X1, X2] ; Load from address [X1 + X2] + * + * o A Wn register offset needs to be extended to 64 bits + * LDR X0, [X1, W2, SXTW] ; Sign-extend offset in W2 + * LDR X0, [X1, W2, UXTW] ; Zero-extend offset in W2 + * + * o Both Xn and Wn register offsets can include an optional left-shift + * LDR X0, [X1, W2, UXTW #2] ; Zero-extend offset in W2 & left-shift by 2 + * LDR X0, [X1, X2, LSL #2] ; Left-shift offset in X2 by 2 + * + * p.15 + * Addressing Modes Analogous C Code + * int *intptr = ... // X1 + * int out; // W0 + * o Simple: X1 is not changed + * LDR W0, [X1] out = *intptr; + * o Offset: X1 is not changed + * LDR W0, [X1, #4] out = intptr[1]; + * o Pre-indexed: X1 changed before load + * LDR W0, [X1, #4]! =|ADD X1,X1,#4 out = *(++intptr); + * |LDR W0,[X1] + * o Post-indexed: X1 changed after load + * LDR W0, [X1], #4 =|LDR W0,[X1] out = *(intptr++); + * |ADD X1,X1,#4 + */ + enum ExtendInfo : uint8 { + kShiftZero = 0x1, + kShiftOne = 0x2, + kShiftTwo = 0x4, + kShiftThree = 0x8, + kUnsignedExtend = 0x10, + kSignExtend = 0x20 + }; + + enum IndexingOption : uint8 { + kIntact, /* base register stays the same */ + kPreIndex, /* base register gets changed before load */ + kPostIndex, /* base register gets changed after load */ + }; + + MemOperand(uint32 size) : + OperandVisitable(Operand::kOpdMem, size) {} + MemOperand(uint32 size, const MIRSymbol &mirSymbol) : OperandVisitable(Operand::kOpdMem, size), symbol(&mirSymbol) {} + + MemOperand(uint32 size, RegOperand *baseOp, RegOperand *indexOp, ImmOperand *ofstOp, const MIRSymbol *mirSymbol, + ImmOperand *scaleOp = nullptr) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(baseOp), + indexOpnd(indexOp), + offsetOpnd(ofstOp), + scaleOpnd(scaleOp), + symbol(mirSymbol) {} + + MemOperand(RegOperand *base, OfstOperand *offset, uint32 size, IndexingOption idxOpt = kIntact) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(base), + indexOpnd(nullptr), + offsetOpnd(offset), + symbol(nullptr), + addrMode(kAddrModeBOi), + extend(0), + idxOpt(idxOpt), + noExtend(false), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 size, RegOperand &base, RegOperand *index, + ImmOperand *offset, const MIRSymbol *sym) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(&base), + indexOpnd(index), + offsetOpnd(offset), + symbol(sym), + addrMode(mode), + extend(0), + idxOpt(kIntact), + noExtend(false), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 size, RegOperand &base, RegOperand &index, + ImmOperand *offset, const MIRSymbol &sym, bool noExtend) + : OperandVisitable(Operand::kOpdMem, size), + baseOpnd(&base), + indexOpnd(&index), + offsetOpnd(offset), + symbol(&sym), + addrMode(mode), + extend(0), + idxOpt(kIntact), + noExtend(noExtend), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 dSize, RegOperand &baseOpnd, RegOperand &indexOpnd, + uint32 shift, bool isSigned = false) + : OperandVisitable(Operand::kOpdMem, dSize), + baseOpnd(&baseOpnd), + indexOpnd(&indexOpnd), + offsetOpnd(nullptr), + symbol(nullptr), + addrMode(mode), + extend((isSigned ? kSignExtend : kUnsignedExtend) | (1U << shift)), + idxOpt(kIntact), + noExtend(false), + isStackMem(false) {} + + MemOperand(AArch64AddressingMode mode, uint32 dSize, const MIRSymbol &sym) + : OperandVisitable(Operand::kOpdMem, dSize), + baseOpnd(nullptr), + indexOpnd(nullptr), + offsetOpnd(nullptr), + symbol(&sym), + addrMode(mode), + extend(0), + idxOpt(kIntact), + noExtend(false), + isStackMem(false) { + ASSERT(mode == kAddrModeLiteral, "This constructor version is supposed to be used with AddrMode_Literal only"); + } + + /* Copy constructor */ + explicit MemOperand(const MemOperand &memOpnd) + : OperandVisitable(Operand::kOpdMem, memOpnd.GetSize()), + baseOpnd(memOpnd.baseOpnd), + indexOpnd(memOpnd.indexOpnd), + offsetOpnd(memOpnd.offsetOpnd), + scaleOpnd(memOpnd.scaleOpnd), + symbol(memOpnd.symbol), + memoryOrder(memOpnd.memoryOrder), + addrMode(memOpnd.addrMode), + extend(memOpnd.extend), + idxOpt(memOpnd.idxOpt), + noExtend(memOpnd.noExtend), + isStackMem(memOpnd.isStackMem), + isStackArgMem(memOpnd.isStackArgMem) {} + + MemOperand &operator=(const MemOperand &memOpnd) = default; + + ~MemOperand() override = default; + using OperandVisitable::OperandVisitable; + + MemOperand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + void Dump() const override {}; + + RegOperand *GetBaseRegister() const { + return baseOpnd; + } + + void SetBaseRegister(RegOperand ®Opnd) { + baseOpnd = ®Opnd; + } + + RegOperand *GetIndexRegister() const { + return indexOpnd; + } + + void SetIndexRegister(RegOperand ®Opnd) { + indexOpnd = ®Opnd; + } + + ImmOperand *GetOffsetOperand() const { + return offsetOpnd; + } + + void SetOffsetOperand(ImmOperand &oftOpnd) { + offsetOpnd = &oftOpnd; + } + + const ImmOperand *GetScaleOperand() const { + return scaleOpnd; + } + + void SetScaleOperand(ImmOperand &scaOpnd) { + scaleOpnd = &scaOpnd; + } + + const MIRSymbol *GetSymbol() const { + return symbol; + } + + void SetMemoryOrdering(uint32 memOrder) { + memoryOrder |= memOrder; + } + + bool HasMemoryOrdering(uint32 memOrder) const { + return (memoryOrder & memOrder) != 0; + } + + void SetAccessSize(uint32 size) { + accessSize = size; + } + + uint8 GetAccessSize() const { + return accessSize; + } + + AArch64AddressingMode GetAddrMode() const { + return addrMode; + } + + const std::string &GetSymbolName() const { + return GetSymbol()->GetName(); + } + + bool IsStackMem() const { + return isStackMem; + } + + void SetStackMem(bool isStack) { + isStackMem = isStack; + } + + bool IsStackArgMem() const { + return isStackArgMem; + } + + void SetStackArgMem(bool isStackArg) { + isStackArgMem = isStackArg; + } + + Operand *GetOffset() const; + + OfstOperand *GetOffsetImmediate() const { + return static_cast(GetOffsetOperand()); + } + + /* Returns N where alignment == 2^N */ + static int32 GetImmediateOffsetAlignment(uint32 dSize) { + ASSERT(dSize >= k8BitSize, "error val:dSize"); + ASSERT(dSize <= k128BitSize, "error val:dSize"); + ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + /* dSize==8: 0, dSize==16 : 1, dSize==32: 2, dSize==64: 3 */ + return __builtin_ctz(dSize) - static_cast(kBaseOffsetAlignment); + } + + static int32 GetMaxPIMM(uint32 dSize) { + dSize = dSize > k64BitSize ? k64BitSize : dSize; + ASSERT(dSize >= k8BitSize, "error val:dSize"); + ASSERT(dSize <= k128BitSize, "error val:dSize"); + ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + int32 alignment = GetImmediateOffsetAlignment(dSize); + /* alignment is between kAlignmentOf8Bit and kAlignmentOf64Bit */ + ASSERT(alignment >= kOffsetAlignmentOf8Bit, "error val:alignment"); + ASSERT(alignment <= kOffsetAlignmentOf128Bit, "error val:alignment"); + return (kMaxPimm[alignment]); + } + + static int32 GetMaxPairPIMM(uint32 dSize) { + ASSERT(dSize >= k32BitSize, "error val:dSize"); + ASSERT(dSize <= k128BitSize, "error val:dSize"); + ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + int32 alignment = GetImmediateOffsetAlignment(dSize); + /* alignment is between kAlignmentOf8Bit and kAlignmentOf64Bit */ + ASSERT(alignment >= kOffsetAlignmentOf32Bit, "error val:alignment"); + ASSERT(alignment <= kOffsetAlignmentOf128Bit, "error val:alignment"); + return (kMaxPairPimm[static_cast(alignment) - k2BitSize]); + } + + bool IsOffsetMisaligned(uint32 dSize) const { + ASSERT(dSize >= k8BitSize, "error val:dSize"); + ASSERT(dSize <= k128BitSize, "error val:dSize"); + ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + if (dSize == k8BitSize) { + return false; + } + OfstOperand *ofstOpnd = GetOffsetImmediate(); + if (!ofstOpnd) { + return false; + } + int64 ofstVal = ofstOpnd->GetOffsetValue(); + if (addrMode == kAddrModeBOi) { + if (ofstVal >= kMinSimm32 && ofstVal <= kMaxSimm32) { + return false; + } + return ((static_cast(ofstOpnd->GetOffsetValue()) & + static_cast((1U << static_cast(GetImmediateOffsetAlignment(dSize))) - 1)) != 0); + } else if (addrMode == kAddrModeLo12Li) { + uint32 alignByte = (dSize / k8BitSize); + return ((ofstVal % alignByte) != k0BitSize); + } + return false; + } + + static bool IsSIMMOffsetOutOfRange(int64 offset, bool is64bit, bool isLDSTPair) { + if (!isLDSTPair) { + return (offset < kMinSimm32 || offset > kMaxSimm32); + } + if (is64bit) { + return (offset < kMinSimm64 || offset > kMaxSimm64Pair) || ((static_cast(offset) & k7BitSize) != 0); + } + return (offset < kMinSimm32 || offset > kMaxSimm32Pair) || ((static_cast(offset) & k3BitSize) != 0); + } + + static bool IsPIMMOffsetOutOfRange(int32 offset, uint32 dSize) { + ASSERT(dSize >= k8BitSize, "error val:dSize"); + ASSERT(dSize <= k128BitSize, "error val:dSize"); + ASSERT((dSize & (dSize - 1)) == 0, "error val:dSize"); + return (offset < 0 || offset > GetMaxPIMM(dSize)); + } + + bool operator<(const MemOperand &opnd) const { + return addrMode < opnd.addrMode || + (addrMode == opnd.addrMode && GetBaseRegister() < opnd.GetBaseRegister()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() < opnd.GetIndexRegister()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() < opnd.GetOffsetOperand()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && + GetSymbol() < opnd.GetSymbol()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && + GetSymbol() == opnd.GetSymbol() && GetSize() < opnd.GetSize()) || + (addrMode == opnd.addrMode && GetBaseRegister() == opnd.GetBaseRegister() && + GetIndexRegister() == opnd.GetIndexRegister() && GetOffsetOperand() == opnd.GetOffsetOperand() && + GetSymbol() == opnd.GetSymbol() && GetSize() == opnd.GetSize() && extend < opnd.extend); + } + + bool operator==(const MemOperand &opnd) const { + return (GetSize() == opnd.GetSize()) && (addrMode == opnd.addrMode) && (extend == opnd.extend) && + (GetBaseRegister() == opnd.GetBaseRegister()) && + (GetIndexRegister() == opnd.GetIndexRegister()) && + (GetSymbol() == opnd.GetSymbol()) && + (GetOffsetOperand() == opnd.GetOffsetOperand()) ; + } + + VaryType GetMemVaryType() const { + Operand *ofstOpnd = GetOffsetOperand(); + if (ofstOpnd != nullptr) { + auto *opnd = static_cast(ofstOpnd); + return opnd->GetVary(); + } + return kNotVary; + } + + void SetAddrMode(AArch64AddressingMode val) { + addrMode = val; + } + + bool IsExtendedRegisterMode() const { + return addrMode == kAddrModeBOrX; + } + + void UpdateExtend(ExtendInfo flag) { + extend = flag | (1U << ShiftAmount()); + } + + bool SignedExtend() const { + return IsExtendedRegisterMode() && ((extend & kSignExtend) != 0); + } + + bool UnsignedExtend() const { + return IsExtendedRegisterMode() && !SignedExtend(); + } + + uint32 ShiftAmount() const { + uint32 scale = extend & 0xF; + /* 8 is 1 << 3, 4 is 1 << 2, 2 is 1 << 1, 1 is 1 << 0; */ + return (scale == 8) ? 3 : ((scale == 4) ? 2 : ((scale == 2) ? 1 : 0)); + } + + bool ShouldEmitExtend() const { + return !noExtend && ((extend & 0x3F) != 0); + } + + IndexingOption GetIndexOpt() const { + return idxOpt; + } + + void SetIndexOpt(IndexingOption newidxOpt) { + idxOpt = newidxOpt; + } + + bool GetNoExtend() const { + return noExtend; + } + + void SetNoExtend(bool val) { + noExtend = val; + } + + uint32 GetExtend() const { + return extend; + } + + void SetExtend(uint32 val) { + extend = val; + } + + bool IsIntactIndexed() const { + return idxOpt == kIntact; + } + + bool IsPostIndexed() const { + return idxOpt == kPostIndex; + } + + bool IsPreIndexed() const { + return idxOpt == kPreIndex; + } + + std::string GetExtendAsString() const { + if (GetIndexRegister()->GetSize() == k64BitSize) { + return std::string("LSL"); + } + return ((extend & kSignExtend) != 0) ? std::string("SXTW") : std::string("UXTW"); + } + + /* Return true if given operand has the same base reg and offset with this. */ + bool Equals(Operand &op) const override; + bool Equals(const MemOperand &op) const; + bool Less(const Operand &right) const override; + + private: + RegOperand *baseOpnd = nullptr; /* base register */ + RegOperand *indexOpnd = nullptr; /* index register */ + ImmOperand *offsetOpnd = nullptr; /* offset immediate */ + ImmOperand *scaleOpnd = nullptr; + const MIRSymbol *symbol; /* AddrMode_Literal */ + uint32 memoryOrder = 0; + uint8 accessSize = 0; /* temp, must be set right before use everytime. */ + AArch64AddressingMode addrMode = kAddrModeBOi; + uint32 extend = false; /* used with offset register ; AddrMode_B_OR_X */ + IndexingOption idxOpt = kIntact; /* used with offset immediate ; AddrMode_B_OI */ + bool noExtend = false; + bool isStackMem = false; + bool isStackArgMem = false; +}; + +class LabelOperand : public OperandVisitable { + public: + LabelOperand(const char *parent, LabelIdx labIdx, MemPool &mp) + : OperandVisitable(kOpdBBAddress, 0), labelIndex(labIdx), parentFunc(parent, &mp), orderID(-1u) {} + + ~LabelOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool IsLabelOpnd() const override { + return true; + } + + LabelIdx GetLabelIndex() const { + return labelIndex; + } + + const MapleString &GetParentFunc() const { + return parentFunc; + } + + LabelIDOrder GetLabelOrder() const { + return orderID; + } + + void SetLabelOrder(LabelIDOrder idx) { + orderID = idx; + } + + void Dump() const override; + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + auto *rightOpnd = static_cast(&right); + + int32 nRes = strcmp(parentFunc.c_str(), rightOpnd->parentFunc.c_str()); + if (nRes == 0) { + return labelIndex < rightOpnd->labelIndex; + } else { + return nRes < 0; + } + } + + bool Equals(Operand &operand) const override { + if (!operand.IsLabel()) { + return false; + } + auto &op = static_cast(operand); + return ((&op == this) || (op.GetLabelIndex() == labelIndex)); + } + + protected: + LabelIdx labelIndex; + const MapleString parentFunc; + + private: + /* this index records the order this label is defined during code emit. */ + LabelIDOrder orderID = -1u; +}; + +class ListOperand : public OperandVisitable { + public: + explicit ListOperand(MapleAllocator &allocator) + : OperandVisitable(Operand::kOpdList, 0), + opndList(allocator.Adapter()) {} + + ~ListOperand() override = default; + + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + void PushOpnd(RegOperand &opnd) { + opndList.push_back(&opnd); + } + + MapleList &GetOperands() { + return opndList; + } + + const MapleList &GetOperands() const { + return opndList; + } + + void Dump() const override { + for (auto it = opndList.begin(); it != opndList.end();) { + (*it)->Dump(); + LogInfo::MapleLogger() << (++it == opndList.end() ? "" : " ,"); + } + } + + bool Less(const Operand &right) const override { + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + ASSERT(false, "We don't need to compare list operand."); + return false; + } + + bool Equals(Operand &operand) const override { + if (!operand.IsList()) { + return false; + } + auto &op = static_cast(operand); + return (&op == this); + } + + protected: + MapleList opndList; +}; + +/* representing for global variables address */ +class StImmOperand : public OperandVisitable { + public: + StImmOperand(const MIRSymbol &symbol, int64 offset, int32 relocs) + : OperandVisitable(kOpdStImmediate, 0), symbol(&symbol), offset(offset), relocs(relocs) {} + + ~StImmOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + const MIRSymbol *GetSymbol() const { + return symbol; + } + + const std::string &GetName() const { + return symbol->GetName(); + } + + int64 GetOffset() const { + return offset; + } + + void SetOffset(int64 newOffset) { + offset = newOffset; + } + + int32 GetRelocs() const { + return relocs; + } + + bool operator==(const StImmOperand &opnd) const { + return (symbol == opnd.symbol && offset == opnd.offset && relocs == opnd.relocs); + } + + bool operator<(const StImmOperand &opnd) const { + return (symbol < opnd.symbol || (symbol == opnd.symbol && offset < opnd.offset) || + (symbol == opnd.symbol && offset == opnd.offset && relocs < opnd.relocs)); + } + + bool Less(const Operand &right) const override; + + void Dump() const override { + CHECK_FATAL(false, "dont run here"); + } + + private: + const MIRSymbol *symbol; + int64 offset; + int32 relocs; +}; + +class ExtendShiftOperand : public OperandVisitable { + public: + /* if and only if at least one register is WSP, ARM Recommends use of the LSL operator name rathe than UXTW */ + enum ExtendOp : uint8 { + kUndef, + kUXTB, + kUXTH, + kUXTW, /* equal to lsl in 32bits */ + kUXTX, /* equal to lsl in 64bits */ + kSXTB, + kSXTH, + kSXTW, + kSXTX, + }; + + ExtendShiftOperand(ExtendOp op, uint32 amt, int32 bitLen) + : OperandVisitable(Operand::kOpdExtend, bitLen), extendOp(op), shiftAmount(amt) {} + + ~ExtendShiftOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + uint32 GetShiftAmount() const { + return shiftAmount; + } + + ExtendOp GetExtendOp() const { + return extendOp; + } + + bool Less(const Operand &right) const override; + + void Dump() const override { + CHECK_FATAL(false, "dont run here"); + } + + bool Equals(Operand &operand) const override { + if (!operand.IsOpdExtend()) { + return false; + } + auto &op = static_cast(operand); + return ((&op == this) || (op.GetExtendOp() == extendOp && op.GetShiftAmount() == shiftAmount)); + } + + std::string GetHashContent() const override { + return std::to_string(opndKind) + std::to_string(extendOp) + std::to_string(shiftAmount); + } + + private: + ExtendOp extendOp; + uint32 shiftAmount; +}; + +class BitShiftOperand : public OperandVisitable { + public: + enum ShiftOp : uint8 { + kUndef, + kLSL, /* logical shift left */ + kLSR, /* logical shift right */ + kASR, /* arithmetic shift right */ + kROR, /* rotate shift right */ + }; + + /* bitlength is equal to 5 or 6 */ + BitShiftOperand(ShiftOp op, uint32 amt, int32 bitLen) + : OperandVisitable(Operand::kOpdShift, bitLen), shiftOp(op), shiftAmount(amt) {} + + ~BitShiftOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const BitShiftOperand *rightOpnd = static_cast(&right); + + /* The same type. */ + if (shiftOp != rightOpnd->shiftOp) { + return shiftOp < rightOpnd->shiftOp; + } + return shiftAmount < rightOpnd->shiftAmount; + } + + uint32 GetShiftAmount() const { + return shiftAmount; + } + + ShiftOp GetShiftOp() const { + return shiftOp; + } + + void Dump() const override { + CHECK_FATAL(false, "dont run here"); + } + + bool Equals(Operand &operand) const override { + if (!operand.IsOpdShift()) { + return false; + } + auto &op = static_cast(operand); + return ((&op == this) || (op.GetShiftOp() == shiftOp && op.GetShiftAmount() == shiftAmount)); + } + + std::string GetHashContent() const override { + return std::to_string(opndKind) + std::to_string(shiftOp) + std::to_string(shiftAmount); + } + + private: + ShiftOp shiftOp; + uint32 shiftAmount; +}; + +class CommentOperand : public OperandVisitable { + public: + CommentOperand(const char *str, MemPool &memPool) + : OperandVisitable(Operand::kOpdString, 0), comment(str, &memPool) {} + + CommentOperand(const std::string &str, MemPool &memPool) + : OperandVisitable(Operand::kOpdString, 0), comment(str, &memPool) {} + + ~CommentOperand() override = default; + using OperandVisitable::OperandVisitable; + + const MapleString &GetComment() const { + return comment; + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool IsCommentOpnd() const override { + return true; + } + + bool Less(const Operand &right) const override { + /* For different type. */ + return GetKind() < right.GetKind(); + } + + void Dump() const override { + LogInfo::MapleLogger() << "# "; + if (!comment.empty()) { + LogInfo::MapleLogger() << comment; + } + } + + private: + const MapleString comment; +}; + +using StringOperand = CommentOperand; + +class ListConstraintOperand : public OperandVisitable { + public: + explicit ListConstraintOperand(MapleAllocator &allocator) + : OperandVisitable(Operand::kOpdString, 0), + stringList(allocator.Adapter()) {}; + + ~ListConstraintOperand() override = default; + using OperandVisitable::OperandVisitable; + + void Dump() const override { + for (auto *str : stringList) { + LogInfo::MapleLogger() << "(" << str->GetComment().c_str() << ")"; + } + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + bool Less(const Operand &right) const override { + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + + ASSERT(false, "We don't need to compare list operand."); + return false; + } + + MapleVector stringList; +}; + +/* for cg ssa analysis */ +class PhiOperand : public OperandVisitable { + public: + explicit PhiOperand(MapleAllocator &allocator) + : OperandVisitable(Operand::kOpdPhi, 0), + phiList(allocator.Adapter()) {} + + ~PhiOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.Clone(*this); + } + + void Dump() const override { + CHECK_FATAL(false, "NIY"); + } + + void InsertOpnd(uint32 bbId, RegOperand &phiParam) { + ASSERT(!phiList.count(bbId), "cannot insert duplicate operand"); + (void)phiList.emplace(std::pair(bbId, &phiParam)); + } + + void UpdateOpnd(uint32 bbId, uint32 newId, RegOperand &phiParam) { + (void)phiList.emplace(std::pair(newId, &phiParam)); + phiList.erase(bbId); + } + + MapleMap &GetOperands() { + return phiList; + } + + const MapleMap &GetOperands() const { + return phiList; + } + + uint32 GetLeastCommonValidBit() const; + + bool IsRedundancy() const; + + bool Less(const Operand &right) const override { + /* For different type. */ + if (opndKind != right.GetKind()) { + return opndKind < right.GetKind(); + } + ASSERT(false, "We don't need to compare list operand."); + return false; + } + + bool Equals(Operand &operand) const override { + if (!operand.IsPhi()) { + return false; + } + auto &op = static_cast(operand); + return (&op == this); + } + + protected: + MapleMap phiList; /* ssa-operand && BBId */ +}; + +/* Use StImmOperand instead? */ +class FuncNameOperand : public OperandVisitable { + public: + explicit FuncNameOperand(const MIRSymbol &fsym) : OperandVisitable(kOpdBBAddress, 0), + symbol(&fsym) {} + + ~FuncNameOperand() override { + symbol = nullptr; + } + using OperandVisitable::OperandVisitable; + + const std::string &GetName() const { + return symbol->GetName(); + } + + bool IsFuncNameOpnd() const override { + return true; + } + + const MIRSymbol *GetFunctionSymbol() const { + return symbol; + } + + void SetFunctionSymbol(const MIRSymbol &fsym) { + symbol = &fsym; + } + + Operand *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + bool Less(const Operand &right) const override { + if (&right == this) { + return false; + } + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + auto *rightOpnd = static_cast(&right); + + return static_cast(symbol) < static_cast(rightOpnd->symbol); + } + + void Dump() const override { + LogInfo::MapleLogger() << GetName(); + } + + private: + const MIRSymbol *symbol; +}; + +namespace operand { +/* bit 0-7 for common */ +enum CommOpndDescProp : maple::uint64 { + kIsDef = 1ULL, + kIsUse = (1ULL << 1), + kIsVector = (1ULL << 2) + +}; + +/* bit 8-15 for reg */ +enum RegOpndDescProp : maple::uint64 { + kInt = (1ULL << 8), + kFloat = (1ULL << 9), + kRegTyCc = (1ULL << 10), + kRegTyVary = (1ULL << 11), +}; + +/* bit 16-23 for imm */ +enum ImmOpndDescProp : maple::uint64 { + +}; + +/* bit 24-31 for mem */ +enum MemOpndDescProp : maple::uint64 { + kMemLow12 = (1ULL << 24), + kLiteralLow12 = kMemLow12, + kIsLoadLiteral = (1ULL << 25) + +}; +} + +class OpndDesc { + public: + OpndDesc(Operand::OperandType t, maple::uint64 p, maple::uint32 s) : opndType(t), property(p), size(s) {} + virtual ~OpndDesc() = default; + + Operand::OperandType GetOperandType() const { + return opndType; + } + + maple::uint32 GetSize() const { + return size; + } + + bool IsImm() const { + return opndType == Operand::kOpdImmediate; + } + + bool IsRegister() const { + return opndType == Operand::kOpdRegister; + } + + bool IsMem() const { + return opndType == Operand::kOpdMem; + } + + bool IsRegDef() const { + return opndType == Operand::kOpdRegister && ((property & operand::kIsDef) != 0); + } + + bool IsRegUse() const { + return opndType == Operand::kOpdRegister && ((property & operand::kIsUse) != 0); + } + + bool IsDef() const { + return (property & operand::kIsDef) != 0; + } + + bool IsUse() const { + return (property & operand::kIsUse) != 0; + } + + bool IsMemLow12() const { + return IsMem() && ((property & operand::kMemLow12) != 0); + } + + bool IsLiteralLow12() const { + return opndType == Operand::kOpdStImmediate && ((property & operand::kLiteralLow12) != 0); + } + + bool IsLoadLiteral() const { + return (property & operand::kIsLoadLiteral) != 0; + } + + bool IsVectorOperand() const { + return (property & operand::kIsVector); + } + + bool IsIntOperand() const { + return (property & operand::kInt); + } + +#define DEFINE_MOP(op, ...) static const OpndDesc op; +#include "operand.def" +#undef DEFINE_MOP + + private: + Operand::OperandType opndType; + maple::uint64 property; + maple::uint32 size; +}; + +class CondOperand : public OperandVisitable { + public: + explicit CondOperand(maplebe::ConditionCode cc) : OperandVisitable(Operand::kOpdCond, k4ByteSize), cc(cc) {} + + ~CondOperand() override = default; + using OperandVisitable::OperandVisitable; + + Operand *Clone(MemPool &memPool) const override { + return memPool.New(cc); + } + + ConditionCode GetCode() const { + return cc; + } + + bool Less(const Operand &right) const override; + + void Dump() const override { + CHECK_FATAL(false, "dont run here"); + } + + static const char *ccStrs[kCcLast]; + + private: + ConditionCode cc; +}; + +class OpndDumpVisitor : public OperandVisitorBase, + public OperandVisitors { + public: + explicit OpndDumpVisitor(const OpndDesc &operandDesc) : opndDesc(&operandDesc) {} + ~OpndDumpVisitor() override { + opndDesc = nullptr; + } + + protected: + virtual void DumpOpndPrefix() { + LogInfo::MapleLogger() << " (opnd:"; + } + virtual void DumpOpndSuffix() { + LogInfo::MapleLogger() << " )"; + } + void DumpSize(const Operand &opnd) const { + LogInfo::MapleLogger() << " [size:" << opnd.GetSize() << "]"; + } + const OpndDesc *GetOpndDesc() const { + return opndDesc; + } + + private: + const OpndDesc *opndDesc; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_OPERAND_H */ diff --git a/src/mapleall/maple_be/include/cg/optimize_common.h b/src/mapleall/maple_be/include/cg/optimize_common.h new file mode 100644 index 0000000000000000000000000000000000000000..b65252979b563d7eca25843f2550788b3ca023c4 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/optimize_common.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_OPTIMIZE_COMMON_H +#define MAPLEBE_INCLUDE_CG_OPTIMIZE_COMMON_H +#include "cgfunc.h" + +namespace maplebe { +inline const std::string kCfgoChaining = "red"; +inline const std::string kCfgoSj = "burlywood1"; +inline const std::string kCfgoFlipCond = "cadetblue1"; +inline const std::string kCfgoAlways = "green"; +inline const std::string kCfgoUnreach = "yellow"; +inline const std::string kCfgoDup = "orange"; +inline const std::string kCfgoEmpty = "purple"; +inline const std::string kIcoIte = "blue"; /* if conversion optimization, if-then-else */ +inline const std::string kIcoIt = "grey"; /* if conversion optimization, if-then-else */ + +class OptimizationPattern { + public: + explicit OptimizationPattern(CGFunc &func) + : patternName(func.GetMemoryPool()), + cgFunc(&func), + dotColor(func.GetMemoryPool()) {} + virtual ~OptimizationPattern() = default; + + bool IsKeepPosition() const { + return keepPosition; + } + + void SetKeepPosition(bool flag) { + keepPosition = flag; + } + + bool IsLabelInLSDAOrSwitchTable(LabelIdx label) const { + EHFunc *ehFunc = cgFunc->GetEHFunc(); + return (ehFunc != nullptr && cgFunc->GetTheCFG()->InLSDA(label, ehFunc)) || + cgFunc->GetTheCFG()->InSwitchTable(label, *cgFunc); + } + + void Search2Op(bool noOptimize); + virtual bool Optimize(BB &curBB) = 0; + + protected: + void Log(uint32 bbID); + + bool keepPosition = false; + MapleString patternName; + CGFunc *cgFunc; + MapleString dotColor; + bool checkOnly = false; +}; + +class Optimizer { + public: + Optimizer(CGFunc &func, MemPool &memPool) + : cgFunc(&func), + name(nullptr), + memPool(&memPool), + alloc(&memPool), + diffPassPatterns(alloc.Adapter()), + singlePassPatterns(alloc.Adapter()) { + func.GetTheCFG()->InitInsnVisitor(func); + } + + virtual ~Optimizer() = default; + void Run(const std::string &funcName, bool checkOnly = false); + virtual void InitOptimizePatterns() = 0; + + protected: + CGFunc *cgFunc; + const char *name; + MemPool *memPool; + MapleAllocator alloc; + /* patterns need to run in different passes of cgFunc */ + MapleVector diffPassPatterns; + /* patterns can run in a single pass of cgFunc */ + MapleVector singlePassPatterns; +}; + +class OptimizeLogger { + public: + static OptimizeLogger &GetLogger() { + static OptimizeLogger instance; + return instance; + } + + void Log(const std::string &patternName); + void ClearLocal(); + void Print(const std::string &funcName); + + private: + OptimizeLogger() = default; + + ~OptimizeLogger() = default; + + OptimizeLogger(const OptimizeLogger&); + OptimizeLogger &operator=(const OptimizeLogger&); + + std::map globalStat; + std::map localStat; +}; + +class DotGenerator { + public: + static void SetColor(uint32 bbID, const std::string &color); + static void GenerateDot(const std::string &preFix, const CGFunc &cgFunc, const MIRModule &mod, + bool includeEH = false, const std::string fname = "", regno_t vReg = 0); + private: + static std::map coloringMap; + static std::string GetFileName(const MIRModule &mirModule, const std::string &filePreFix); + static bool IsBackEdge(const CGFunc &cgFunction, const BB &from, const BB &to); + static void DumpEdge(const CGFunc &cgFunction, std::ofstream &cfgFileOfStream, bool isIncludeEH); + static void DumpBBInstructions(const CGFunc &cgFunction, regno_t vReg, std::ofstream &cfgFile); + static bool FoundListOpndRegNum(ListOperand &listOpnd, const Insn &insnObj, regno_t vReg); + static bool FoundMemAccessOpndRegNum(const MemOperand &memOperand, const Insn &insnObj, regno_t vReg); + static bool FoundNormalOpndRegNum(const RegOperand ®Opnd, const Insn &insnObj, regno_t vReg); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_OPTIMIZE_COMMON_H */ diff --git a/src/mapleall/maple_be/include/cg/peep.h b/src/mapleall/maple_be/include/cg/peep.h new file mode 100644 index 0000000000000000000000000000000000000000..549c0ea9751341cb18b600b81efcb976841e3bac --- /dev/null +++ b/src/mapleall/maple_be/include/cg/peep.h @@ -0,0 +1,236 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_PEEP_H +#define MAPLEBE_INCLUDE_CG_PEEP_H + +#include "cg.h" +#include "optimize_common.h" + +namespace maplebe { +enum ReturnType : uint8 { + kResUseFirst, + kResDefFirst, + kResNotFind +}; + +class PeepOptimizeManager { + public: + /* normal constructor */ + PeepOptimizeManager(CGFunc &f, BB &bb, Insn &insn) + : cgFunc(&f), + currBB(&bb), + currInsn(&insn), + ssaInfo(nullptr) {} + /* constructor for ssa */ + PeepOptimizeManager(CGFunc &f, BB &bb, Insn &insn, CGSSAInfo &info) + : cgFunc(&f), + currBB(&bb), + currInsn(&insn), + ssaInfo(&info) {} + ~PeepOptimizeManager() = default; + template + void Optimize(bool patternEnable = false) { + if (!patternEnable) { + return; + } + OptimizePattern optPattern(*cgFunc, *currBB, *currInsn, *ssaInfo); + optPattern.Run(*currBB, *currInsn); + optSuccess |= optPattern.GetPatternRes(); + if (optSuccess && optPattern.GetCurrInsn() != nullptr) { + currInsn = optPattern.GetCurrInsn(); + } + } + template + void NormalPatternOpt(bool patternEnable = false) { + if (!patternEnable) { + return; + } + OptimizePattern optPattern(*cgFunc, *currBB, *currInsn); + optPattern.Run(*currBB, *currInsn); + } + void SetOptSuccess(bool optRes) { + optSuccess = optRes; + } + bool OptSuccess() const { + return optSuccess; + } + private: + CGFunc *cgFunc; + BB *currBB; + Insn *currInsn; + CGSSAInfo *ssaInfo; + bool optSuccess = false; +}; + +class CGPeepHole { + public: + /* normal constructor */ + CGPeepHole(CGFunc &f, MemPool *memPool) + : cgFunc(&f), + peepMemPool(memPool), + ssaInfo(nullptr) {} + /* constructor for ssa */ + CGPeepHole(CGFunc &f, MemPool *memPool, CGSSAInfo *cgssaInfo) + : cgFunc(&f), + peepMemPool(memPool), + ssaInfo(cgssaInfo) {} + ~CGPeepHole() = default; + + virtual void Run() = 0; + virtual bool DoSSAOptimize(BB &bb, Insn &insn) = 0; + virtual void DoNormalOptimize(BB &bb, Insn &insn) = 0; + + protected: + CGFunc *cgFunc; + MemPool *peepMemPool; + CGSSAInfo *ssaInfo; + PeepOptimizeManager *manager = nullptr; +}; + +class PeepPattern { + public: + explicit PeepPattern(CGFunc &oneCGFunc) : cgFunc(oneCGFunc) {} + virtual ~PeepPattern() = default; + virtual void Run(BB &bb, Insn &insn) = 0; + /* optimization support function */ + bool IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn); + bool FindRegLiveOut(const RegOperand ®Opnd, const BB &bb); + bool CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const; + bool CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const; + ReturnType IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const; + int LogValueAtBase2(int64 val) const; + bool IsMemOperandOptPattern(const Insn &insn, Insn &nextInsn); + + protected: + CGFunc &cgFunc; +}; + +class CGPeepPattern { + public: + /* normal constructor */ + CGPeepPattern(CGFunc &f, BB &bb, Insn &insn) + : cgFunc(&f), + currBB(&bb), + currInsn(&insn), + ssaInfo(nullptr) {} + /* constructor for ssa */ + CGPeepPattern(CGFunc &f, BB &bb, Insn &insn, CGSSAInfo &info) + : cgFunc(&f), + currBB(&bb), + currInsn(&insn), + ssaInfo(&info) {} + virtual ~CGPeepPattern() = default; + + std::string PhaseName() const { + return "cgpeephole"; + } + + virtual std::string GetPatternName() = 0; + void DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn); + InsnSet GetAllUseInsn(const RegOperand &defReg) const; + int64 GetLogValueAtBase2(int64 val) const; + /* The CC reg is unique and cannot cross-version props. */ + bool IsCCRegCrossVersion(Insn &startInsn, Insn &endInsn, const RegOperand &ccReg) const; + /* optimization support function */ + bool IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn); + bool FindRegLiveOut(const RegOperand ®Opnd, const BB &bb); + bool CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const; + bool CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const; + ReturnType IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const; + bool GetPatternRes() const { + return optSuccess; + } + Insn *GetCurrInsn() { + return currInsn; + } + void SetCurrInsn(Insn *updateInsn) { + currInsn = updateInsn; + } + virtual void Run(BB &bb, Insn &insn) = 0; + virtual bool CheckCondition(Insn &insn) = 0; + + protected: + CGFunc *cgFunc; + BB *currBB; + Insn *currInsn; + CGSSAInfo *ssaInfo; + bool optSuccess = false; +}; + +class PeepHoleOptimizer { + public: + explicit PeepHoleOptimizer(CGFunc *cf) : cgFunc(cf) { + cg = cgFunc->GetCG(); + } + ~PeepHoleOptimizer() = default; + void Peephole0(); + void PeepholeOpt(); + void PrePeepholeOpt(); + void PrePeepholeOpt1(); + + private: + CGFunc *cgFunc; + CG *cg = nullptr; +}; /* class PeepHoleOptimizer */ + +class PeepPatternMatch { + public: + PeepPatternMatch(CGFunc &oneCGFunc, MemPool *memPool) + : optOwnMemPool(memPool), + peepAllocator(memPool), + optimizations(peepAllocator.Adapter()), + cgFunc(oneCGFunc) {} + virtual ~PeepPatternMatch() = default; + virtual void Run(BB &bb, Insn &insn) = 0; + virtual void InitOpts() = 0; + protected: + MemPool *optOwnMemPool; + MapleAllocator peepAllocator; + MapleVector optimizations; + CGFunc &cgFunc; +}; + +class PeepOptimizer { + public: + PeepOptimizer(CGFunc &oneCGFunc, MemPool *memPool) + : cgFunc(oneCGFunc), + peepOptMemPool(memPool) { + index = 0; + } + ~PeepOptimizer() = default; + template + void Run(); + static int32 index; + + private: + CGFunc &cgFunc; + MemPool *peepOptMemPool; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgPeepHole, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPrePeepHole, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPostPeepHole, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPrePeepHole0, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPrePeepHole1, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPeepHole0, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPeepHole1, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_PEEP_H */ diff --git a/src/mapleall/maple_be/include/cg/pressure.h b/src/mapleall/maple_be/include/cg/pressure.h new file mode 100644 index 0000000000000000000000000000000000000000..c4b69df73f1424b27971c8c2da373080b6fc163a --- /dev/null +++ b/src/mapleall/maple_be/include/cg/pressure.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_PRESSURE_H +#define MAPLEBE_INCLUDE_CG_PRESSURE_H + +#include "cgbb.h" +#include "cgfunc.h" + +namespace maplebe { +struct RegList { + Insn *insn; + RegList *next; +}; + +#define FOR_ALL_REGCLASS(i) \ + for (uint32 i = 0; i < static_cast(RegPressure::GetMaxRegClassNum()); ++i) + +class RegPressure { + public: + explicit RegPressure(MapleAllocator &alloc) + : regUses(alloc.Adapter()), regDefs(alloc.Adapter()), + pressure(alloc.Adapter()), deadDefNum(alloc.Adapter()) {} + + virtual ~RegPressure() = default; + + void DumpRegPressure() const; + + void SetRegUses(RegList *regList) { + regUses.emplace_back(regList); + } + + void SetRegDefs(size_t idx, RegList *regList) { + if (idx < regDefs.size()) { + regDefs[idx] = regList; + } else { + regDefs.emplace_back(regList); + } + } + + static void SetMaxRegClassNum(int32 maxClassNum) { + maxRegClassNum = maxClassNum; + } + + static int32 GetMaxRegClassNum() { + return maxRegClassNum; + } + + int32 GetPriority() const { + return priority; + } + + void SetPriority(int32 value) { + priority = value; + } + + int32 GetMaxDepth() const { + return maxDepth; + } + + void SetMaxDepth(int32 value) { + maxDepth = value; + } + + int32 GetNear() const { + return near; + } + + void SetNear(int32 value) { + near = value; + } + + int32 GetIncPressure() const { + return incPressure; + } + + void SetIncPressure(bool value) { + incPressure = value; + } + const MapleVector &GetPressure() const { + return pressure; + } + + void IncPressureByIndex(uint32 index) { + ASSERT(index < pressure.size(), "index out of range"); + ++pressure[index]; + } + + void DecPressureByIndex(uint32 index) { + ASSERT(index < pressure.size(), "index out of range"); + --pressure[index]; + } + + void InitPressure() { + pressure.resize(static_cast(maxRegClassNum), 0); + deadDefNum.resize(static_cast(maxRegClassNum), 0); + incPressure = false; + } + + const MapleVector &GetDeadDefNum() const { + return deadDefNum; + } + + void IncDeadDefByIndex(uint32 index) { + ASSERT(index < deadDefNum.size(), "index out of range"); + ++deadDefNum[index]; + } + + RegList *GetRegUses(size_t idx) const { + return idx < regUses.size() ? regUses[idx] : nullptr; + } + + void InitRegUsesSize(size_t size) { + regUses.reserve(size); + } + + RegList *GetRegDefs(size_t idx) const { + return idx < regDefs.size() ? regDefs[idx] : nullptr; + } + + void InitRegDefsSize(size_t size) { + regDefs.reserve(size); + } + + void SetHasPreg(bool value) { + hasPreg = value; + } + + bool GetHasPreg() const { + return hasPreg; + } + + void SetNumCall(int32 value) { + callNum = value; + } + + int32 GetNumCall() const { + return callNum; + } + + void SetHasNativeCallRegister(bool value) { + hasNativeCallRegister = value; + } + + bool GetHasNativeCallRegister() const { + return hasNativeCallRegister; + } + + private: + /* save reglist of every uses'register */ + MapleVector regUses; + /* save reglist of every defs'register */ + MapleVector regDefs; + + /* the number of the node needs registers */ + MapleVector pressure; + /* the count of dead define registers */ + MapleVector deadDefNum; + /* max number of reg's class */ + static int32 maxRegClassNum; + int32 priority = 0; + int32 maxDepth = 0; + int32 near = 0; + /* the number of successor call */ + int32 callNum = 0; + /* if a type register increase then set incPressure as true. */ + bool incPressure = false; + /* if define physical register, set hasPreg as true */ + bool hasPreg = false; + /* it is call native special register */ + bool hasNativeCallRegister = false; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_PRESSURE_H */ diff --git a/src/mapleall/maple_be/include/cg/proepilog.h b/src/mapleall/maple_be/include/cg/proepilog.h new file mode 100644 index 0000000000000000000000000000000000000000..9fffbbeff947d2be15b4d6e7317ec6d3b41bb497 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/proepilog.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_PROEPILOG_H +#define MAPLEBE_INCLUDE_CG_PROEPILOG_H +#include "cg_phase.h" +#include "cgfunc.h" +#include "insn.h" + +namespace maplebe { +class GenProEpilog { + public: + explicit GenProEpilog(CGFunc &func) : cgFunc(func) {} + + virtual ~GenProEpilog() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "generateproepilog"; + } + + virtual bool NeedProEpilog() { + return true; + } + + /* CFI related routines */ + int64 GetOffsetFromCFA() const { + return offsetFromCfa; + } + + /* add increment (can be negative) and return the new value */ + int64 AddtoOffsetFromCFA(int64 delta) { + offsetFromCfa += delta; + return offsetFromCfa; + } + + protected: + + CGFunc &cgFunc; + int64 offsetFromCfa = 0; /* SP offset from Call Frame Address */ + bool stackProtect = false; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_PROEPILOG_H */ diff --git a/src/mapleall/maple_be/include/cg/ra_opt.h b/src/mapleall/maple_be/include/cg/ra_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..93c4944e0fb15e89128767f4e02f85ca0dc2c4b5 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/ra_opt.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) [2021] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_RAOPT_H +#define MAPLEBE_INCLUDE_CG_RAOPT_H + +#include "cgfunc.h" +#include "cg_phase.h" +#include "cg_dominance.h" + +namespace maplebe { +class RaOpt { + public: + RaOpt(CGFunc &func, MemPool &pool) : cgFunc(&func), memPool(&pool) {} + + virtual ~RaOpt() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "raopt"; + } + + const CGFunc *GetCGFunc() const { + return cgFunc; + } + const MemPool *GetMemPool() const { + return memPool; + } + + void SetDomInfo(DomAnalysis *curDom) { + domInfo = curDom; + } + + protected: + CGFunc *cgFunc; + MemPool *memPool; + DomAnalysis *domInfo = nullptr; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgRaOpt, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_RAOPT_H */ diff --git a/src/mapleall/maple_be/include/cg/reaching.h b/src/mapleall/maple_be/include/cg/reaching.h new file mode 100644 index 0000000000000000000000000000000000000000..4a4692521a2058454d3356b4cdcf1ee0f8b583c5 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/reaching.h @@ -0,0 +1,190 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REACHING_H +#define MAPLEBE_INCLUDE_CG_REACHING_H + +#include "cg_phase.h" +#include "cgbb.h" +#include "datainfo.h" +#include "maple_phase.h" + +namespace maplebe { +enum VisitStatus : uint8 { + kNotVisited, + kNormalVisited, + kEHVisited +}; + +enum AnalysisType : uint8 { + kRDRegAnalysis = 1, + kRDMemAnalysis = 2, + kRDAllAnalysis = 3 +}; + +enum DumpType : uint32 { + kDumpAll = 0xFFF, + kDumpRegGen = 0x001, + kDumpRegUse = 0x002, + kDumpRegIn = 0x004, + kDumpRegOut = 0x008, + kDumpMemGen = 0x010, + kDumpMemIn = 0x020, + kDumpMemOut = 0x040, + kDumpMemUse = 0x080, + kDumpBBCGIR = 0x100 +}; + +class ReachingDefinition : public AnalysisResult { + public: + ReachingDefinition(CGFunc &func, MemPool &memPool); + ~ReachingDefinition() override = default; + void AnalysisStart(); + void Dump(uint32 flag) const; + void DumpInfo(const BB &bb, DumpType flag) const; + void DumpBBCGIR(const BB &bb) const; + void ClearDefUseInfo(); + void UpdateInOut(BB &changedBB); + void UpdateInOut(BB &changedBB, bool isReg); + void SetAnalysisMode(AnalysisType analysisMode) { + mode = analysisMode; + } + + bool OnlyAnalysisReg() const { + return mode == kRDRegAnalysis; + } + + uint32 GetMaxInsnNO() const { + return maxInsnNO; + } + + size_t GetRegSize(const BB &bb) const { + return regUse[bb.GetId()]->Size(); + } + + bool CheckRegGen(const BB &bb, uint32 regNO) const { + return regGen[bb.GetId()]->TestBit(regNO); + } + + void EnlargeRegCapacity(uint32 size); + bool IsFrameReg(const Operand &opnd) const; + InsnSet FindUseForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const; + bool RegIsUsedIncaller(uint32 regNO, Insn &startInsn, Insn &endInsn) const; + bool CheckRegLiveinReturnBB(uint32 regNO, const BB &bb) const; + bool RegIsLiveBetweenInsn(uint32 regNO, Insn &startInsn, Insn &endInsn, bool isBack = false, + bool isFirstNo = false) const; + bool RegIsUsedOrDefBetweenInsn(uint32 regNO, Insn &startInsn, Insn &endInsn) const; + bool IsLiveInAllPathBB(uint32 regNO, const BB &startBB, const BB &endBB, std::vector &visitedBB, + bool isFirstNo = false) const; + bool IsUseOrDefInAllPathBB(uint32 regNO, const BB &startBB, const BB &endBB, std::vector &visitedBB) const; + bool IsUseOrDefBetweenInsn(uint32 regNO, const BB &curBB, const Insn &startInsn, Insn &endInsn) const; + bool HasCallBetweenDefUse(const Insn &defInsn, const Insn &useInsn) const; + std::vector FindRegDefBetweenInsn ( + uint32 regNO, Insn *startInsn, Insn *endInsn, bool findAll = false, bool analysisDone = true) const; + virtual void InitGenUse(BB &bb, bool firstTime = true) = 0; + virtual InsnSet FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset = false) const = 0; + virtual InsnSet FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem = false) const = 0; + virtual std::vector FindMemDefBetweenInsn(uint32 offset, const Insn *startInsn, Insn *endInsn) const = 0; + virtual std::vector FindRegDefBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn) const = 0; + virtual bool FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, InsnSet &useInsnSet) const = 0; + virtual bool FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, BB* movBB) const = 0; + virtual bool FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &useInsnSet) const = 0; + virtual InsnSet FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO = false) const = 0; + + static constexpr int32 kWordByteNum = 4; + static constexpr int32 kDoubleWordByteNum = 8; + /* to save storage space, the offset of stack memory is devided by 4 and then saved in DataInfo */ + static constexpr int32 kMemZoomSize = 4; + /* number the insn interval 3. make sure no repeated insn number when new insn inserted */ + static constexpr uint32 kInsnNoInterval = 3; + bool HasCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn) const; + virtual bool KilledByCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn, regno_t regNO) const = 0; + protected: + virtual void InitStartGen() = 0; + virtual void InitEhDefine(BB &bb) = 0; + virtual void GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) = 0; + virtual void GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) = 0; + virtual void GenAllCallerSavedRegs(BB &bb, Insn &insn) = 0; + virtual void AddRetPseudoInsn(BB &bb) = 0; + virtual void AddRetPseudoInsns() = 0; + virtual int32 GetStackSize() const = 0; + virtual bool IsCallerSavedReg(uint32 regNO) const = 0; + virtual void FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const = 0; + virtual void FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const = 0; + virtual bool IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const = 0; + virtual void DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &defInsnSet) const = 0; + virtual void DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &defInsnSet) const = 0; + void DFSFindUseForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &useInsnSet, bool onlyFindForEhSucc) const; + + CGFunc *cgFunc; + MapleAllocator rdAlloc; + StackMemPool &stackMp; + MapleVector pseudoInsns; + AnalysisType mode = kRDRegAnalysis; + BB *firstCleanUpBB = nullptr; + std::vector regGen; + std::vector regUse; + std::vector regIn; + std::vector regOut; + std::vector memGen; + std::vector memUse; + std::vector memIn; + std::vector memOut; + const uint32 kMaxBBNum; + uint32 stackSize = 0; + private: + void Initialize(); + void InitDataSize(); + void BuildInOutForFuncBody(); + void BuildInOutForCleanUpBB(); + void BuildInOutForCleanUpBB(bool isReg, const std::set &index); + void InitRegAndMemInfo(const BB &bb); + void InitOut(const BB &bb); + bool GenerateIn(const BB &bb); + bool GenerateIn(const BB &bb, const std::set &infoIndex, const bool isReg); + bool GenerateOut(const BB &bb); + bool GenerateOut(const BB &bb, const std::set &infoIndex, const bool isReg); + bool GenerateInForFirstCleanUpBB(); + bool GenerateInForFirstCleanUpBB(bool isReg, const std::set &infoIndex); + void DFSFindUseForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &useInsnSet, bool onlyFindForEhSucc) const; + bool RegIsUsedInOtherBB(const BB &startBB, uint32 regNO, std::vector &visitedBB) const; + bool RegHasUsePoint(uint32 regNO, Insn ®DefInsn) const; + bool CanReachEndBBFromCurrentBB(const BB ¤tBB, const BB &endBB, std::vector &traversedBBSet) const; + + bool HasCallInPath(const BB &startBB, const BB &endBB, std::vector &visitedBB) const; + bool RegIsUsedInCleanUpBB(uint32 regNO) const; + void BuildInOutForFuncBodyBFS(); + void GenerateIn(const BB &bb, std::queue &worklist, std::vector &inQueued); + + MapleSet normalBBSet; + MapleSet cleanUpBBSet; + uint32 maxInsnNO = 0; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgReachingDefinition, maplebe::CGFunc) + ReachingDefinition *GetResult() { + return reachingDef; + } + ReachingDefinition *reachingDef = nullptr; +MAPLE_FUNC_PHASE_DECLARE_END +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgClearRDInfo, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REACHING_H */ diff --git a/src/mapleall/maple_be/include/cg/reg_alloc.h b/src/mapleall/maple_be/include/cg/reg_alloc.h new file mode 100644 index 0000000000000000000000000000000000000000..312f266dd5e029093365dc1aba57f24ff97534f9 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/reg_alloc.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REG_ALLOC_H +#define MAPLEBE_INCLUDE_CG_REG_ALLOC_H + +#include "cgfunc.h" +#include "maple_phase_manager.h" + +namespace maplebe { +class RegAllocator { + public: + RegAllocator(CGFunc &tempCGFunc, MemPool &memPool) + : cgFunc(&tempCGFunc), + memPool(&memPool), + alloc(&memPool), + regInfo(tempCGFunc.GetTargetRegInfo()) { + regInfo->Init(); +} + + virtual ~RegAllocator() = default; + + virtual bool AllocateRegisters() = 0; + + bool IsYieldPointReg(regno_t regNO) const; + bool IsUntouchableReg(regno_t regNO) const; + + virtual std::string PhaseName() const { + return "regalloc"; + } + + protected: + CGFunc *cgFunc = nullptr; + MemPool *memPool = nullptr; + MapleAllocator alloc; + RegisterInfo *regInfo = nullptr; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgRegAlloc, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REG_ALLOC_H */ diff --git a/src/mapleall/maple_be/include/cg/reg_alloc_basic.h b/src/mapleall/maple_be/include/cg/reg_alloc_basic.h new file mode 100644 index 0000000000000000000000000000000000000000..74c8e6d84c3b6a240e12372903b4c442db2c42d8 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/reg_alloc_basic.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REG_ALLOC_BASIC_H +#define MAPLEBE_INCLUDE_CG_REG_ALLOC_BASIC_H + +#include "reg_alloc.h" +#include "operand.h" +#include "cgfunc.h" + +namespace maplebe { +class DefaultO0RegAllocator : public RegAllocator { + public: + DefaultO0RegAllocator(CGFunc &cgFunc, MemPool &memPool) + : RegAllocator(cgFunc, memPool), + calleeSaveUsed(alloc.Adapter()), + availRegSet(alloc.Adapter()), + regMap(std::less(), alloc.Adapter()), + liveReg(std::less(), alloc.Adapter()), + allocatedSet(std::less(), alloc.Adapter()), + regLiveness(alloc.Adapter()), + rememberRegs(alloc.Adapter()) { + availRegSet.resize(regInfo->GetAllRegNum()); + } + + ~DefaultO0RegAllocator() override { + regInfo = nullptr; + } + + bool AllocateRegisters() override; + + void InitAvailReg(); + + bool AllocatePhysicalRegister(const RegOperand &opnd); + void ReleaseReg(regno_t reg); + void ReleaseReg(const RegOperand ®Opnd); + void AllocHandleDestList(Insn &insn, Operand &opnd, uint32 idx); + void AllocHandleDest(Insn &insn, Operand &opnd, uint32 idx); + void AllocHandleSrcList(Insn &insn, Operand &opnd, uint32 idx); + void AllocHandleSrc(Insn &insn, Operand &opnd, uint32 idx); + void SaveCalleeSavedReg(const RegOperand &opnd); + + protected: + Operand *HandleRegOpnd(Operand &opnd); + Operand *HandleMemOpnd(Operand &opnd); + Operand *AllocSrcOpnd(Operand &opnd); + Operand *AllocDestOpnd(Operand &opnd, const Insn &insn); + uint32 GetRegLivenessId(regno_t regNo); + bool CheckRangesOverlap(const std::pair &range1, + const MapleVector> &ranges2) const; + void SetupRegLiveness(BB *bb); + void SetupRegLiveness(MemOperand &opnd, uint32 insnId); + void SetupRegLiveness(ListOperand &opnd, uint32 insnId, bool isDef); + void SetupRegLiveness(RegOperand &opnd, uint32 insnId, bool isDef); + + MapleSet calleeSaveUsed; + MapleVector availRegSet; + MapleMap regMap; /* virtual-register-to-physical-register map */ + MapleSet liveReg; /* a set of currently live physical registers */ + MapleSet allocatedSet; /* already allocated */ + MapleMap>> regLiveness; + MapleVector rememberRegs; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REG_ALLOC_BASIC_H */ diff --git a/src/mapleall/maple_be/include/cg/reg_alloc_lsra.h b/src/mapleall/maple_be/include/cg/reg_alloc_lsra.h new file mode 100644 index 0000000000000000000000000000000000000000..7759ac1b5ac76d984a404282054c04b155465f72 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/reg_alloc_lsra.h @@ -0,0 +1,552 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REG_ALLOC_LSRA_H +#define MAPLEBE_INCLUDE_CG_REG_ALLOC_LSRA_H +#include "reg_alloc.h" +#include "cgfunc.h" +#include "optimize_common.h" + +namespace maplebe { +class LSRALinearScanRegAllocator : public RegAllocator { + enum RegInCatch : uint8 { + /* + * RA do not want to allocate certain registers if a live interval is + * only inside of catch blocks. + */ + kRegCatchNotInit = 0, /* unitialized state */ + kRegNOtInCatch = 1, /* interval is part or all outside of catch */ + kRegAllInCatch = 2, /* inteval is completely inside catch */ + }; + + enum RegInCleanup : uint8 { + /* Similar to reg_in_catch_t */ + kRegCleanupNotInit = 0, /* unitialized state */ + kRegAllInFirstbb = 1, /* interval is all in the first bb */ + kRegAllOutCleanup = 2, /* interval is all outside of cleanup, must in normal bb, may in first bb. */ + kRegInCleanupAndFirstbb = 3, /* inteval is in cleanup and first bb. */ + kRegInCleanupAndNormalbb = 4, /* inteval is in cleanup and non-first bb. */ + kRegAllInCleanup = 5 /* inteval is inside cleanup, except for bb 1 */ + }; + + class LiveInterval { + public: + explicit LiveInterval(MapleAllocator &mallocator) + : ranges(mallocator.Adapter()), + holes(mallocator.Adapter()), + usePositions(mallocator.Adapter()), + overlapPhyRegSet(mallocator.Adapter()) {} + + virtual ~LiveInterval() = default; + + void AddRange(uint32 from, uint32 to); + void AddUsePos(uint32 pos); + + const Insn *GetIsCall() const { + return isCall; + } + + void SetIsCall(Insn &newIsCall) { + isCall = &newIsCall; + } + + uint32 GetPhysUse() const { + return physUse; + } + + void SetPhysUse(uint32 newPhysUse) { + physUse = newPhysUse; + } + + uint32 GetLastUse() const { + return lastUse; + } + + void SetLastUse(uint32 newLastUse) { + lastUse = newLastUse; + } + + uint32 GetRegNO() const { + return regNO; + } + + void SetRegNO(uint32 newRegNO) { + regNO = newRegNO; + } + + uint32 GetAssignedReg() const { + return assignedReg; + } + + void SetAssignedReg(uint32 newAssignedReg) { + assignedReg = newAssignedReg; + } + + uint32 GetFirstDef() const { + return firstDef; + } + + void SetFirstDef(uint32 newFirstDef) { + firstDef = newFirstDef; + } + + uint32 GetStackSlot() const { + return stackSlot; + } + + void SetStackSlot(uint32 newStkSlot) { + stackSlot = newStkSlot; + } + + RegType GetRegType() const { + return regType; + } + + void SetRegType(RegType newRegType) { + regType = newRegType; + } + + uint32 GetFirstAcrossedCall() const { + return firstAcrossedCall; + } + + void SetFirstAcrossedCall(uint32 newFirstAcrossedCall) { + firstAcrossedCall = newFirstAcrossedCall; + } + + bool IsEndByCall() const { + return endByCall; + } + + bool IsUseBeforeDef() const { + return useBeforeDef; + } + + void SetUseBeforeDef(bool newUseBeforeDef) { + useBeforeDef = newUseBeforeDef; + } + + bool IsShouldSave() const { + return shouldSave; + } + + void SetShouldSave(bool newShouldSave) { + shouldSave = newShouldSave; + } + + bool IsMultiUseInBB() const { + return multiUseInBB; + } + + void SetMultiUseInBB(bool newMultiUseInBB) { + multiUseInBB = newMultiUseInBB; + } + + bool IsThrowVal() const { + return isThrowVal; + } + + bool IsCallerSpilled() const { + return isCallerSpilled; + } + + void SetIsCallerSpilled(bool newIsCallerSpilled) { + isCallerSpilled = newIsCallerSpilled; + } + + bool IsMustAllocate() const { + return mustAllocate; + } + + void SetMustAllocate(bool newMustAllocate) { + mustAllocate = newMustAllocate; + } + + uint32 GetRefCount() const{ + return refCount; + } + + void SetRefCount(uint32 newRefCount) { + refCount = newRefCount; + } + + float GetPriority() const { + return priority; + } + + void SetPriority(float newPriority) { + priority = newPriority; + } + + const MapleVector> &GetRanges() const { + return ranges; + } + + MapleVector> &GetRanges() { + return ranges; + } + + size_t GetRangesSize() const { + return ranges.size(); + } + + const MapleVector> &GetHoles() const { + return holes; + } + + void HolesPushBack(uint32 pair1, uint32 pair2) { + holes.push_back(std::pair(pair1, pair2)); + } + + void UsePositionsInsert(uint32 insertId) { + (void)usePositions.insert(insertId); + } + + const LiveInterval *GetLiParent() const { + return liveParent; + } + + void SetLiParent(LiveInterval *newLiParent) { + liveParent = newLiParent; + } + + void SetLiParentChild(LiveInterval *child) const { + liveParent->SetLiChild(child); + } + + const LiveInterval *GetLiChild() const { + return liveChild; + } + + void SetLiChild(LiveInterval *newLiChild) { + liveChild = newLiChild; + } + + void SetOverlapPhyRegSet(regno_t regNo) { + overlapPhyRegSet.insert(regNo); + } + bool IsOverlapPhyReg(regno_t regNo) { + return overlapPhyRegSet.find(regNo) != overlapPhyRegSet.end(); + } + + uint32 GetResultCount() const { + return resultCount; + } + + void SetResultCount(uint32 newResultCount) { + resultCount = newResultCount; + } + + void SetInCatchState() { + /* + * Once in REG_NOT_IN_CATCH, it is irreversible since once an interval + * is not in a catch, it is not completely in a catch. + */ + if (inCatchState == kRegNOtInCatch) { + return; + } + inCatchState = kRegAllInCatch; + } + + void SetNotInCatchState() { + inCatchState = kRegNOtInCatch; + } + + bool IsInCatch() const { + return (inCatchState == kRegAllInCatch); + } + + void SetInCleanupState() { + switch (inCleanUpState) { + case kRegCleanupNotInit: + inCleanUpState = kRegAllInCleanup; + break; + case kRegAllInFirstbb: + inCleanUpState = kRegInCleanupAndFirstbb; + break; + case kRegAllOutCleanup: + inCleanUpState = kRegInCleanupAndNormalbb; + break; + case kRegInCleanupAndFirstbb: + break; + case kRegInCleanupAndNormalbb: + break; + case kRegAllInCleanup: + break; + default: + ASSERT(false, "CG Internal error."); + break; + } + } + + void SetNotInCleanupState(bool isFirstBB) { + switch (inCleanUpState) { + case kRegCleanupNotInit: { + if (isFirstBB) { + inCleanUpState = kRegAllInFirstbb; + } else { + inCleanUpState = kRegAllOutCleanup; + } + break; + } + case kRegAllInFirstbb: { + if (!isFirstBB) { + inCleanUpState = kRegAllOutCleanup; + } + break; + } + case kRegAllOutCleanup: + break; + case kRegInCleanupAndFirstbb: { + if (!isFirstBB) { + inCleanUpState = kRegInCleanupAndNormalbb; + } + break; + } + case kRegInCleanupAndNormalbb: + break; + case kRegAllInCleanup: { + if (isFirstBB) { + inCleanUpState = kRegInCleanupAndFirstbb; + } else { + inCleanUpState = kRegInCleanupAndNormalbb; + } + break; + } + default: + ASSERT(false, "CG Internal error."); + break; + } + } + + bool IsAllInCleanupOrFirstBB() const { + return (inCleanUpState == kRegAllInCleanup) || (inCleanUpState == kRegInCleanupAndFirstbb); + } + + bool IsAllOutCleanup() const { + return (inCleanUpState == kRegAllInFirstbb) || (inCleanUpState == kRegAllOutCleanup); + } + + private: + Insn *isCall = nullptr; + uint32 firstDef = 0; + uint32 lastUse = 0; + uint32 physUse = 0; + uint32 regNO = 0; + /* physical register, using cg defined reg based on R0/V0. */ + uint32 assignedReg = 0; + uint32 stackSlot = -1; + RegType regType = kRegTyUndef; + uint32 firstAcrossedCall = 0; + bool endByCall = false; + bool useBeforeDef = false; + bool shouldSave = false; + bool multiUseInBB = false; /* vreg has more than 1 use in bb */ + bool isThrowVal = false; + bool isCallerSpilled = false; /* only for R0(R1?) which are used for explicit incoming value of throwval; */ + bool mustAllocate = false; /* The register cannot be spilled (clinit pair) */ + uint32 refCount = 0; + float priority = 0.0; + MapleVector> ranges; + MapleVector> holes; + MapleSet usePositions; + MapleSet overlapPhyRegSet; + LiveInterval *liveParent = nullptr; /* Current li is in aother li's hole. */ + LiveInterval *liveChild = nullptr; /* Another li is in current li's hole. */ + uint32 resultCount = 0; /* number of times this vreg has been written */ + uint8 inCatchState = kRegCatchNotInit; /* part or all of live interval is outside of catch blocks */ + uint8 inCleanUpState = kRegCleanupNotInit; /* part or all of live interval is outside of cleanup blocks */ + }; + + struct ActiveCmp { + bool operator()(const LiveInterval *lhs, const LiveInterval *rhs) const { + CHECK_NULL_FATAL(lhs); + CHECK_NULL_FATAL(rhs); + /* elements considered equal if return false */ + if (lhs == rhs) { + return false; + } + if (lhs->GetFirstDef() == rhs->GetFirstDef() && lhs->GetLastUse() == rhs->GetLastUse() && + lhs->GetRegNO() == rhs->GetRegNO() && lhs->GetRegType() == rhs->GetRegType() && + lhs->GetAssignedReg() == rhs->GetAssignedReg()) { + return false; + } + if (lhs->GetFirstDef() == rhs->GetFirstDef() && lhs->GetLastUse() == rhs->GetLastUse() && + lhs->GetPhysUse() == rhs->GetPhysUse() && lhs->GetRegType() == rhs->GetRegType()) { + return lhs->GetRegNO() < rhs->GetRegNO(); + } + if (lhs->GetPhysUse() != 0 && rhs->GetPhysUse() != 0) { + if (lhs->GetFirstDef() == rhs->GetFirstDef()) { + return lhs->GetPhysUse() < rhs->GetPhysUse(); + } else { + return lhs->GetFirstDef() < rhs->GetFirstDef(); + } + } + /* At this point, lhs != rhs */ + if (lhs->GetLastUse() == rhs->GetLastUse()) { + return lhs->GetFirstDef() <= rhs->GetFirstDef(); + } + return lhs->GetLastUse() < rhs->GetLastUse(); + } + }; + + public: + LSRALinearScanRegAllocator(CGFunc &cgFunc, MemPool &memPool) + : RegAllocator(cgFunc, memPool), + liveIntervalsArray(alloc.Adapter()), + lastIntParamLi(alloc.Adapter()), + lastFpParamLi(alloc.Adapter()), + initialQue(alloc.Adapter()), + intParamQueue(alloc.Adapter()), + fpParamQueue(alloc.Adapter()), + callList(alloc.Adapter()), + active(alloc.Adapter()), + intCallerRegSet(alloc.Adapter()), + intCalleeRegSet(alloc.Adapter()), + intParamRegSet(alloc.Adapter()), + intSpillRegSet(alloc.Adapter()), + fpCallerRegSet(alloc.Adapter()), + fpCalleeRegSet(alloc.Adapter()), + fpParamRegSet(alloc.Adapter()), + fpSpillRegSet(alloc.Adapter()), + calleeUseCnt(alloc.Adapter()) { + for (uint32 i = 0; i < regInfo->GetIntRegsParmsNum(); ++i) { + intParamQueue.push_back(initialQue); + } + for (int32 i = 0; i < regInfo->GetFloatRegsParmsNum(); ++i) { + fpParamQueue.push_back(initialQue); + } + firstIntReg = *regInfo->GetRegsFromType(kRegTyInt).begin(); + firstFpReg = *regInfo->GetRegsFromType(kRegTyFloat).begin(); + } + ~LSRALinearScanRegAllocator() override = default; + + bool AllocateRegisters() override; + void PreWork(); + bool CheckForReg(Operand &opnd, const Insn &insn, const LiveInterval &li, regno_t regNO, bool isDef) const; + void PrintRegSet(const MapleSet &set, const std::string &str) const; + void PrintLiveInterval(const LiveInterval &li, const std::string &str) const; + void PrintLiveRanges() const; + void PrintParamQueue(const std::string &str); + void PrintCallQueue(const std::string &str) const; + void PrintActiveList(const std::string &str, uint32 len = 0) const; + void PrintActiveListSimple() const; + void PrintLiveIntervals() const; + void DebugCheckActiveList() const; + void InitFreeRegPool(); + void RecordCall(Insn &insn); + void RecordPhysRegs(const RegOperand ®Opnd, uint32 insnNum, bool isDef); + void UpdateLiveIntervalState(const BB &bb, LiveInterval &li) const; + void SetupLiveInterval(Operand &opnd, Insn &insn, bool isDef, uint32 &nUses); + void UpdateLiveIntervalByLiveIn(const BB &bb, uint32 insnNum); + void UpdateParamLiveIntervalByLiveIn(const BB &bb, uint32 insnNum); + void ComputeLiveIn(BB &bb, uint32 insnNum); + void ComputeLiveOut(BB &bb, uint32 insnNum); + void ComputeLiveIntervalForEachOperand(Insn &insn); + void ComputeLiveInterval(); + void FindLowestPrioInActive(LiveInterval *&targetLi, RegType regType = kRegTyInt, bool startRa = false); + void LiveIntervalAnalysis(); + bool OpndNeedAllocation(const Insn &insn, Operand &opnd, bool isDef, uint32 insnNum); + void InsertParamToActive(Operand &opnd); + void InsertToActive(Operand &opnd, uint32 insnNum); + void ReturnPregToSet(const LiveInterval &li, uint32 preg); + void ReleasePregToSet(const LiveInterval &li, uint32 preg); + void UpdateActiveAtRetirement(uint32 insnID); + void RetireFromActive(const Insn &insn); + void AssignPhysRegsForInsn(Insn &insn); + RegOperand *GetReplaceOpnd(Insn &insn, Operand &opnd, uint32 &spillIdx, bool isDef); + RegOperand *GetReplaceUdOpnd(Insn &insn, Operand &opnd, uint32 &spillIdx); + + void SetAllocMode(); + void CheckSpillCallee(); + void LinearScanRegAllocator(); + void FinalizeRegisters(); + void SpillOperand(Insn &insn, Operand &opnd, bool isDef, uint32 spillIdx); + void SetOperandSpill(Operand &opnd); + RegOperand *HandleSpillForInsn(const Insn &insn, Operand &opnd); + MemOperand *GetSpillMem(uint32 vRegNO, bool isDest, Insn &insn, regno_t regNO, + bool &isOutOfRange) const; + void InsertCallerSave(Insn &insn, Operand &opnd, bool isDef); + uint32 GetRegFromSet(MapleSet &set, regno_t offset, LiveInterval &li, regno_t forcedReg = 0) const; + uint32 AssignSpecialPhysRegPattern(const Insn &insn, LiveInterval &li); + uint32 FindAvailablePhyReg(LiveInterval &li, const Insn &insn); + RegOperand *AssignPhysRegs(Operand &opnd, const Insn &insn); + void SetupIntervalRangesByOperand(Operand &opnd, const Insn &insn, uint32 blockFrom, bool isDef, bool isUse); + void BuildIntervalRangesForEachOperand(const Insn &insn, uint32 blockFrom); + void BuildIntervalRanges(); + uint32 FillInHole(LiveInterval &li); + + private: + uint32 FindAvailablePhyRegByFastAlloc(LiveInterval &li); + bool NeedSaveAcrossCall(LiveInterval &li); + uint32 FindAvailablePhyReg(LiveInterval &li, const Insn &insn, bool isIntReg); + + regno_t firstIntReg = 0; + regno_t firstFpReg = 0; + + /* Comparison function for LiveInterval */ + static constexpr uint32 kMaxSpillRegNum = 3; + static constexpr uint32 kMaxFpSpill = 2; + MapleVector liveIntervalsArray; + MapleVector lastIntParamLi; + MapleVector lastFpParamLi; + MapleQueue initialQue; + using SingleQue = MapleQueue; + MapleVector intParamQueue; + MapleVector fpParamQueue; + MapleList callList; + MapleSet active; + MapleSet::iterator itFinded; + + /* Change these into vectors so it can be added and deleted easily. */ + MapleSet intCallerRegSet; /* integer caller saved */ + MapleSet intCalleeRegSet; /* callee */ + MapleSet intParamRegSet; /* parameters */ + MapleVector intSpillRegSet; /* integer regs put aside for spills */ + + /* and register */ + uint32 intCallerMask = 0; /* bit mask for all possible caller int */ + uint32 intCalleeMask = 0; /* callee */ + uint32 intParamMask = 0; /* (physical-register) parameter */ + MapleSet fpCallerRegSet; /* float caller saved */ + MapleSet fpCalleeRegSet; /* callee */ + MapleSet fpParamRegSet; /* parameter */ + MapleVector fpSpillRegSet; /* float regs put aside for spills */ + MapleVector calleeUseCnt; /* Number of time callee reg is seen */ + Bfs *bfs = nullptr; + uint32 fpCallerMask = 0; /* bit mask for all possible caller fp */ + uint32 fpCalleeMask = 0; /* callee */ + uint32 fpParamMask = 0; /* (physical-register) parameter */ + uint32 intBBDefMask = 0; /* locally which physical reg is defined */ + uint32 fpBBDefMask = 0; + uint32 debugSpillCnt = 0; + uint32 regUsedInBBSz = 0; + uint64 *regUsedInBB = nullptr; + uint32 maxInsnNum = 0; + regno_t minVregNum = 0; + regno_t maxVregNum = 0xFFFFFFFF; + bool fastAlloc = false; + bool spillAll = false; + bool needExtraSpillReg = false; + bool isSpillZero = false; + bool shouldOptIntCallee = false; + bool shouldOptFpCallee = false; + uint64 spillCount = 0; + uint64 reloadCount = 0; + uint64 callerSaveSpillCount = 0; + uint64 callerSaveReloadCount = 0; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REG_ALLOC_LSRA_H */ diff --git a/src/mapleall/maple_be/include/cg/reg_coalesce.h b/src/mapleall/maple_be/include/cg/reg_coalesce.h new file mode 100644 index 0000000000000000000000000000000000000000..d905f08a7591e9a17967d975e650864389670fac --- /dev/null +++ b/src/mapleall/maple_be/include/cg/reg_coalesce.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REGCOALESCE_H +#define MAPLEBE_INCLUDE_CG_REGCOALESCE_H +#include "live.h" + +namespace maplebe { + +using PosPair = std::pair; +class LiveInterval { + public: + explicit LiveInterval(MapleAllocator &allocator) + : ranges(allocator.Adapter()), + conflict(allocator.Adapter()), + defPoints(allocator.Adapter()), + usePoints(allocator.Adapter()), + alloc(allocator) {} + + void IncNumCall() { + ++numCall; + } + + MapleMap> &GetRanges() { + return ranges; + } + + const MapleMap> &GetRanges() const { + return ranges; + } + + void AddRange(uint32 bbid, uint32 end, bool alreadLive) { + auto it = ranges.find(bbid); + if (it == ranges.end()) { + MapleVector posVec(alloc.Adapter()); + posVec.emplace_back(std::pair(end, end)); + ranges.emplace(bbid, posVec); + } else { + MapleVector &posVec = it->second; + if (alreadLive) { + PosPair lastPos = posVec[posVec.size() - 1]; + posVec[posVec.size() - 1] = std::pair(end, lastPos.second); + } else { + posVec.emplace_back(std::pair(end, end)); + } + } + } + + void MergeRanges(LiveInterval &lr) { + const MapleMap> lrDestRanges = lr.GetRanges(); + for (auto &destRange : lrDestRanges) { + uint32 bbid = destRange.first; + auto &destPosVec = destRange.second; + auto it = ranges.find(bbid); + if (it == ranges.end()) { + /* directly add it */ + MapleVector posVec(alloc.Adapter()); + for (auto pos: destPosVec) { + posVec.emplace_back(std::pair(pos.first, pos.second)); + } + ranges.emplace(bbid, posVec); + } else { + /* merge it simply, so that subpos may overlap. */ + auto &srcPosVec = it->second; + for (auto pos1 : destPosVec) { + bool merged = false; + for (auto &pos2 : srcPosVec) { + if (!((pos1.first < pos2.first && pos1.second < pos2.first) || + (pos2.first < pos1.second && pos2.second < pos1.first))) { + uint32 bgn = pos1.first < pos2.first ? pos1.first : pos2.first; + uint32 end = pos1.second > pos2.second ? pos1.second : pos2.second; + pos2 = std::pair(bgn, end); + merged = true; + } + } + /* add it directly when no overlap */ + if (!merged) { + srcPosVec.emplace_back(std::pair(pos1.first, pos1.second)); + } + } + } + } + } + + void MergeConflict(LiveInterval &lr) { + for (auto reg : lr.conflict) { + conflict.insert(reg); + } + } + + void MergeRefPoints(LiveInterval &lr) { + if (lr.GetDefPoint().size() != k1ByteSize) { + for (auto insn : lr.defPoints) { + defPoints.insert(insn); + } + } + for (auto insn : lr.usePoints) { + usePoints.insert(insn); + } + } + + void AddConflict(regno_t val) { + conflict.insert(val); + } + + MapleSet GetConflict() { + return conflict; + } + + void AddRefPoint(Insn *val, bool isDef) { + if (isDef) { + defPoints.insert(val); + } else { + usePoints.insert(val); + } + } + + InsnMapleSet &GetDefPoint() { + return defPoints; + } + + InsnMapleSet &GetUsePoint() { + return usePoints; + } + + bool IsConflictWith(regno_t val) { + return conflict.find(val) != conflict.end(); + } + + RegType GetRegType() const { + return regType; + } + + void SetRegType(RegType val) { + this->regType = val; + } + + regno_t GetRegNO() const { + return regno; + } + + void SetRegNO(regno_t val) { + this->regno = val; + } + + void Dump() const { + std::cout << "R" << regno << ": "; + for (auto &range : GetRanges()) { + uint32 bbid = range.first; + std::cout << "BB" << bbid << ": < " ; + for (auto pos : range.second) { + std::cout << "[" << pos.first << ", " << pos.second << ") "; + } + std::cout << " > " ; + } + std::cout << "\n"; + } + void DumpDefs() { + std::cout << "R" << regno << ": "; + for (auto def : defPoints) { + def->Dump(); + } + std::cout << "\n"; + } + void DumpUses() { + std::cout << "R" << regno << ": "; + for (auto def : usePoints) { + def->Dump(); + } + std::cout << "\n"; + } + + private: + MapleMap> ranges; + MapleSet conflict; + InsnMapleSet defPoints; + InsnMapleSet usePoints; + uint32 numCall = 0; + RegType regType = kRegTyUndef; + regno_t regno = 0; + MapleAllocator &alloc; +}; + +class LiveIntervalAnalysis { + public: + LiveIntervalAnalysis(CGFunc &func, MemPool &memPool) + : cgFunc(&func), + memPool(&memPool), + alloc(&memPool), + vregIntervals(alloc.Adapter()) {} + + virtual ~LiveIntervalAnalysis() { + cgFunc = nullptr; + memPool = nullptr; + bfs = nullptr; + } + + virtual void ComputeLiveIntervals() = 0; + virtual void CoalesceRegisters() = 0; + void Run(); + void Analysis(); + void DoAnalysis(); + void ClearBFS(); + void Dump(); + void CoalesceLiveIntervals(LiveInterval &lrDest, LiveInterval &lrSrc); + LiveInterval *GetLiveInterval(regno_t regno) { + MapleMap::const_iterator it = vregIntervals.find(regno); + if (it == vregIntervals.cend()) { + return nullptr; + } else { + return it->second; + } + } + + protected: + CGFunc *cgFunc; + MemPool *memPool; + MapleAllocator alloc; + MapleMap vregIntervals; + Bfs *bfs = nullptr; + bool runAnalysis = false; +}; + + +MAPLE_FUNC_PHASE_DECLARE(CgRegCoalesce, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CGliveIntervalAnalysis, maplebe::CGFunc) + LiveIntervalAnalysis *GetResult() { + return liveInterval; + } + LiveIntervalAnalysis *liveInterval = nullptr; + OVERRIDE_DEPENDENCE +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REGCOALESCE_H */ diff --git a/src/mapleall/maple_be/include/cg/reg_info.h b/src/mapleall/maple_be/include/cg/reg_info.h new file mode 100644 index 0000000000000000000000000000000000000000..8a1100c12d0f2f2ea8b9a2b3432484f7f853ca16 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/reg_info.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REG_INFO_H +#define MAPLEBE_INCLUDE_CG_REG_INFO_H + +#include "isa.h" +#include "insn.h" + +namespace maplebe { +constexpr size_t kSpillMemOpndNum = 4; + +class RegisterInfo { + public: + explicit RegisterInfo(MapleAllocator &mallocator) + : allIntRegs(mallocator.Adapter()), + allFpRegs(mallocator.Adapter()), + allregs(mallocator.Adapter()) {} + + virtual ~RegisterInfo() { + cgFunc = nullptr; + } + + virtual void Init() = 0; + virtual void Fini() = 0; + const MapleSet &GetAllRegs() const { + return allregs; + } + const MapleSet &GetRegsFromType(RegType regTy) const { + if (regTy == kRegTyInt) { + return allIntRegs; + } else if (regTy == kRegTyFloat) { + return allFpRegs; + } else { + CHECK_FATAL(false, "NIY, unsupported reg type"); + } + } + void AddToAllRegs(regno_t regNo) { + (void)allregs.insert(regNo); + } + void AddToIntRegs(regno_t regNo) { + (void)allIntRegs.insert(regNo); + } + void AddToFpRegs(regno_t regNo) { + (void)allFpRegs.insert(regNo); + } + void SetCurrFunction(CGFunc &func) { + cgFunc = &func; + } + CGFunc *GetCurrFunction() const { + return cgFunc; + } + virtual RegOperand *GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag = 0) = 0; + virtual bool IsGPRegister(regno_t regNO) const = 0; + virtual bool IsPreAssignedReg(regno_t regNO) const = 0; + virtual uint32 GetIntParamRegIdx(regno_t regNO) const = 0; + virtual uint32 GetFpParamRegIdx(regno_t regNO) const = 0; + virtual bool IsSpecialReg(regno_t regno) const = 0; + virtual bool IsAvailableReg(regno_t regNO) const = 0; + virtual bool IsCalleeSavedReg(regno_t regno) const = 0; + virtual bool IsYieldPointReg(regno_t regNO) const = 0; + virtual bool IsUntouchableReg(uint32 regNO) const = 0; + virtual bool IsUnconcernedReg(regno_t regNO) const = 0; + virtual bool IsUnconcernedReg(const RegOperand ®Opnd) const = 0; + virtual bool IsVirtualRegister(const RegOperand ®Opnd) = 0; + virtual bool IsVirtualRegister(regno_t regno) = 0; + virtual bool IsFramePointReg(regno_t regNO) const = 0; + virtual bool IsReservedReg(regno_t regNO, bool doMultiPass) const = 0; + virtual void SaveCalleeSavedReg(MapleSet savedRegs) = 0; + virtual uint32 GetIntRegsParmsNum() = 0; + virtual uint32 GetIntRetRegsNum() = 0; + virtual uint32 GetFpRetRegsNum() = 0; + virtual uint32 GetFloatRegsParmsNum() = 0; + virtual regno_t GetLastParamsIntReg() = 0; + virtual regno_t GetLastParamsFpReg() = 0; + virtual regno_t GetIntRetReg(uint32 idx) = 0; + virtual regno_t GetFpRetReg(uint32 idx) = 0; + virtual regno_t GetReservedSpillReg() = 0; + virtual regno_t GetSecondReservedSpillReg() = 0; + virtual regno_t GetYieldPointReg() const = 0; + virtual regno_t GetStackPointReg() const = 0; + virtual uint32 GetAllRegNum() = 0; + virtual uint32 GetNormalUseOperandNum() = 0; + virtual regno_t GetInvalidReg() = 0; + virtual bool IsSpillRegInRA(regno_t regNO, bool has3RegOpnd) = 0; + virtual Insn *BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) = 0; + virtual Insn *BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) = 0; + virtual MemOperand *AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, + bool isDest, Insn &insn, regno_t regNum, bool &isOutOfRange) = 0; + + // used in color ra + virtual regno_t GetIntSpillFillReg(size_t idx) const = 0; + virtual regno_t GetFpSpillFillReg(size_t idx) const = 0; + private: + MapleSet allIntRegs; + MapleSet allFpRegs; + MapleSet allregs; + CGFunc *cgFunc = nullptr; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REG_INFO_H */ diff --git a/src/mapleall/maple_be/include/cg/regsaves.h b/src/mapleall/maple_be/include/cg/regsaves.h new file mode 100644 index 0000000000000000000000000000000000000000..5f20313a9ec66c01957725e113a60512bcf47f45 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/regsaves.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_REGSAVES_OPT_H +#define MAPLEBE_INCLUDE_CG_REGSAVES_OPT_H + +#include "cgfunc.h" +#include "cg_phase.h" + +namespace maplebe { +class RegSavesOpt { + public: + RegSavesOpt(CGFunc &func, MemPool &pool) + : cgFunc(&func), + memPool(&pool), + alloc(&pool) {} + + virtual ~RegSavesOpt() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "regsavesopt"; + } + + CGFunc *GetCGFunc() const { + return cgFunc; + } + + MemPool *GetMemPool() const { + return memPool; + } + + bool GetEnabledDebug() const { + return enabledDebug; + } + + void SetEnabledDebug(bool d) { + enabledDebug = d; + } + + protected: + CGFunc *cgFunc; + MemPool *memPool; + MapleAllocator alloc; + bool enabledDebug = false; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgRegSavesOpt, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REGSAVES_OPT_H */ diff --git a/src/mapleall/maple_be/include/cg/rematerialize.h b/src/mapleall/maple_be/include/cg/rematerialize.h new file mode 100644 index 0000000000000000000000000000000000000000..80c677e85ce9fe830fbbce4bee99fc24c87b081b --- /dev/null +++ b/src/mapleall/maple_be/include/cg/rematerialize.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_REMATERIALIZE_H +#define MAPLEBE_INCLUDE_CG_REMATERIALIZE_H + +#include "cgfunc.h" + +namespace maplebe { +enum RematLevel { + kRematOff = 0, + kRematConst = 1, + kRematAddr = 2, + kRematDreadLocal = 3, + kRematDreadGlobal = 4 +}; + +class LiveRange; + +class Rematerializer { + public: + Rematerializer() = default; + virtual ~Rematerializer() = default; + + void SetRematerializable(const MIRConst *c) { + op = OP_constval; + rematInfo.mirConst = c; + } + + void SetRematerializable(Opcode opcode, const MIRSymbol *symbol, FieldID fieldId, bool addrUp) { + op = opcode; + rematInfo.sym = symbol; + fieldID = fieldId; + addrUpper = addrUp; + } + + void SetRematLevel(RematLevel val) { + rematLevel = val; + } + RematLevel GetRematLevel() const { + return rematLevel; + } + + Opcode GetOp() const { + return op; + } + + bool IsRematerializable(CGFunc &cgFunc, RematLevel rematLev, const LiveRange &lr) const; + std::vector Rematerialize(CGFunc &cgFunc, RegOperand ®Op, const LiveRange &lr); + + protected: + RematLevel rematLevel = kRematOff; + Opcode op = OP_undef; /* OP_constval, OP_addrof or OP_dread if rematerializable */ + union RematInfo { + const MIRConst *mirConst; + const MIRSymbol *sym; + } rematInfo; /* info for rematerializing value */ + FieldID fieldID = 0; /* used only when op is OP_addrof or OP_dread */ + bool addrUpper = false; /* indicates the upper bits of an addrof */ + + private: + bool IsRematerializableForAddrof(CGFunc &cgFunc, const LiveRange &lr) const; + bool IsRematerializableForDread(CGFunc &cgFunc, RematLevel rematLev) const; + + virtual bool IsRematerializableForConstval(int64 val, uint32 bitLen) const = 0; + virtual bool IsRematerializableForDread(int32 offset) const = 0; + + virtual std::vector RematerializeForConstval(CGFunc &cgFunc, RegOperand ®Op, + const LiveRange &lr) = 0; + + virtual std::vector RematerializeForAddrof(CGFunc &cgFunc, RegOperand ®Op, + int32 offset) = 0; + + virtual std::vector RematerializeForDread(CGFunc &cgFunc, RegOperand ®Op, + int32 offset, PrimType type) = 0; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_REMATERIALIZE_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/schedule.h b/src/mapleall/maple_be/include/cg/schedule.h new file mode 100644 index 0000000000000000000000000000000000000000..647b2501f12f67619e7e20cae27ddd815a08c8d0 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/schedule.h @@ -0,0 +1,191 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_SCHEDULE_H +#define MAPLEBE_INCLUDE_CG_SCHEDULE_H + +#include "data_dep_base.h" +#include "data_dep_analysis.h" +#include "insn.h" +#include "live.h" +#include "mad.h" + +namespace maplebe { +#define LIST_SCHED_DUMP_NEWPM CG_DEBUG_FUNC(f) +#define LIST_SCHED_DUMP_REF CG_DEBUG_FUNC(cgFunc) + +class RegPressureSchedule { + public: + RegPressureSchedule(CGFunc &func, MapleAllocator &alloc) + : cgFunc(func), liveReg(alloc.Adapter()), scheduledNode(alloc.Adapter()), + originalNodeSeries(alloc.Adapter()), readyList(alloc.Adapter()), + partialList(alloc.Adapter()), partialSet(alloc.Adapter()), + partialScheduledNode(alloc.Adapter()), optimisticScheduledNodes(alloc.Adapter()), + splitterIndexes(alloc.Adapter()), liveInRegNO(alloc.Adapter()), liveOutRegNO(alloc.Adapter()) {} + virtual ~RegPressureSchedule() = default; + + void InitBBInfo(BB &b, MemPool &memPool, const MapleVector &nodes); + void BuildPhyRegInfo(const std::vector ®NumVec); + void InitPartialSplitters(const MapleVector &nodes); + void Init(const MapleVector &nodes); + void UpdateBBPressure(const DepNode &node); + void CalculatePressure(DepNode &node, regno_t reg, bool def) const; + void SortReadyList(); + static bool IsLastUse(const DepNode &node, regno_t regNO) ; + void ReCalculateDepNodePressure(DepNode &node) const; + void UpdateLiveReg(const DepNode &node, regno_t reg, bool def); + bool CanSchedule(const DepNode &node) const; + void UpdateReadyList(const DepNode &node); + void BruteUpdateReadyList(const DepNode &node, std::vector &changedToReady); + void RestoreReadyList(DepNode &node, std::vector &changedToReady); + void UpdatePriority(DepNode &node); + void CalculateMaxDepth(const MapleVector &nodes) const; + void CalculateNear(const DepNode &node); + static bool DepNodePriorityCmp(const DepNode *node1, const DepNode *node2); + DepNode *ChooseNode(); + void DoScheduling(MapleVector &nodes); + void HeuristicScheduling(MapleVector &nodes); + int CalculateRegisterPressure(MapleVector &nodes); + void PartialScheduling(MapleVector &nodes); + void BruteForceScheduling(); + void CalculatePredSize(DepNode &node); + void InitBruteForceScheduling(MapleVector &nodes); + void EmitSchedulingSeries(MapleVector &nodes); + private: + void DumpBBPressureInfo() const; + void DumpBBLiveInfo() const; + void DumpReadyList() const; + void DumpSelectInfo(const DepNode &node) const; + static void DumpDependencyInfo(const MapleVector &nodes); + void ReportScheduleError() const; + void ReportScheduleOutput() const; + RegType GetRegisterType(regno_t reg) const; + + CGFunc &cgFunc; + BB *bb = nullptr; + int32 *maxPressure = nullptr; + int32 *curPressure = nullptr; + MapleUnorderedSet liveReg; + /* save node that has been scheduled. */ + MapleVector scheduledNode; + MapleVector originalNodeSeries; + MapleVector readyList; + /* save partial nodes to be scheduled */ + MapleVector partialList; + MapleSet partialSet; + /* save partial nodes which have been scheduled. */ + MapleVector partialScheduledNode; + /* optimistic schedule series with minimum register pressure */ + MapleVector optimisticScheduledNodes; + /* save split points */ + MapleVector splitterIndexes; + /* save integer register pressure */ + std::vector integerRegisterPressureList; + /* save the amount of every type register. */ + int32 *physicalRegNum = nullptr; + int32 maxPriority = 0; + int32 scheduleSeriesCount = 0; + /* live in register set */ + MapleSet liveInRegNO; + /* live out register set */ + MapleSet liveOutRegNO; + /* register pressure without pre-scheduling */ + int originalPressure = 0; + /* register pressure after pre-scheduling */ + int scheduledPressure = 0; + /* minimum pressure ever met */ + int minPressure = -1; +}; + +enum SimulateType : uint8 { + kListSchedule, + kBruteForce, + kSimulateOnly +}; + +class Schedule { + public: + Schedule(CGFunc &func, MemPool &memPool, LiveAnalysis &liveAnalysis, const std::string &phase) + : phaseName(phase), + cgFunc(func), + memPool(memPool), + alloc(&memPool), + live(liveAnalysis), + considerRegPressure(false), + nodes(alloc.Adapter()), + readyList(alloc.Adapter()), + liveInRegNo(alloc.Adapter()), + liveOutRegNo(alloc.Adapter()) {} + + virtual ~Schedule() = default; + virtual void MemoryAccessPairOpt() = 0; + virtual void ClinitPairOpt() = 0; + virtual uint32 DoSchedule() = 0; + virtual uint32 DoBruteForceSchedule() = 0; + virtual uint32 SimulateOnly() = 0; + virtual void UpdateBruteForceSchedCycle() = 0; + virtual void IterateBruteForce(DepNode &targetNode, MapleVector &readyList, uint32 currCycle, + MapleVector &scheduledNodes, uint32 &maxCycleCount, + MapleVector &optimizedScheduledNodes) = 0; + virtual void UpdateReadyList(DepNode &targetNode, MapleVector &readyList, bool updateEStart) = 0; + virtual void ListScheduling(bool beforeRA) = 0; + virtual void FinalizeScheduling(BB &bb, const DataDepBase &dataDepBase) = 0; + + protected: + virtual void Init() = 0; + virtual uint32 ComputeEstart(uint32 cycle) = 0; + virtual void ComputeLstart(uint32 maxEstart) = 0; + virtual void UpdateELStartsOnCycle(uint32 cycle) = 0; + virtual void RandomTest() = 0; + virtual void EraseNodeFromReadyList(const DepNode &target) = 0; + virtual void EraseNodeFromNodeList(const DepNode &target, MapleVector &nodeList) = 0; + virtual uint32 GetNextSepIndex() const = 0; + virtual void CountUnitKind(const DepNode &depNode, uint32 array[], const uint32 arraySize) const = 0; + virtual void FindAndCombineMemoryAccessPair(const std::vector &memList) = 0; + virtual void RegPressureScheduling(BB &bb, MapleVector &nodes) = 0; + virtual bool CanCombine(const Insn &insn) const = 0; + void SetConsiderRegPressure() { + considerRegPressure = true; + } + bool GetConsiderRegPressure() const { + return considerRegPressure; + } + void InitIDAndLoc(); + void RestoreFirstLoc(); + std::string PhaseName() const { + return phaseName; + } + + const std::string phaseName; + CGFunc &cgFunc; + MemPool &memPool; + MapleAllocator alloc; + LiveAnalysis &live; + DataDepBase *ddb = nullptr; + IntraDataDepAnalysis *intraDDA = nullptr; + MAD *mad = nullptr; + uint32 lastSeparatorIndex = 0; + uint32 nodeSize = 0; + bool considerRegPressure = false; + MapleVector nodes; /* Dependence graph */ + MapleVector readyList; /* Ready list. */ + MapleSet liveInRegNo; + MapleSet liveOutRegNo; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgPreScheduling, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE(CgScheduling, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_SCHEDULE_H */ diff --git a/src/mapleall/maple_be/include/cg/sparse_datainfo.h b/src/mapleall/maple_be/include/cg/sparse_datainfo.h new file mode 100644 index 0000000000000000000000000000000000000000..ccac2aebb6926be68614434e1f3009ce83310856 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/sparse_datainfo.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_SPARSE_DATAINFO_H +#define MAPLEBE_INCLUDE_CG_SPARSE_DATAINFO_H +#include "maple_string.h" +#include "common_utils.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "maple_sparse_bitvector.h" + +namespace maplebe { +class SparseDataInfo { + /* + * SparseDataInfo has the same imterface like DataInfo + * it can be used when the data member is small while the data + * range is big.like in live analysis, in some extreme case the + * vreg num range is 10k while in each bb, the max num of is 30+ + */ + public: + SparseDataInfo(uint32 bitNum, MapleAllocator &alloc) + : allocator(alloc), + info(allocator), + maxRegNum(bitNum) {} + + SparseDataInfo(const SparseDataInfo &other, MapleAllocator &alloc) + : allocator(alloc), + info(other.info, allocator), + maxRegNum(other.maxRegNum) {} + + SparseDataInfo(const SparseDataInfo &other) + : allocator(other.allocator), + info(other.info, allocator), + maxRegNum(other.maxRegNum) {} + + SparseDataInfo &Clone(MapleAllocator &alloc) const { + auto *dataInfo = alloc.New(*this, alloc); + return *dataInfo; + } + + ~SparseDataInfo() = default; + + SparseDataInfo &operator=(const SparseDataInfo &other) { + if (this == &other) { + return *this; + } + allocator = other.GetAllocator(); + info = other.GetInfo(); + maxRegNum = other.GetMaxRegNum(); + return *this; + } + + void SetBit(uint32 bitNO) { + info.Set(bitNO); + } + + void ResetBit(uint32 bitNO) { + info.Reset(bitNO); + } + + bool TestBit(uint32 bitNO) const { + return info.Test(bitNO); + } + + bool NoneBit() const { + return info.Empty(); + } + + size_t Size() const { + return maxRegNum; + } + + const MapleAllocator &GetAllocator() const { + return allocator; + } + + const MapleSparseBitVector<> &GetInfo() const { + return info; + } + + uint32 GetMaxRegNum() const { + return maxRegNum; + } + + bool IsEqual(const SparseDataInfo &secondInfo) const { + return info == secondInfo.GetInfo(); + } + + bool IsEqual(const MapleSparseBitVector<> &liveInfoBak) const { + return info == liveInfoBak; + } + + void AndBits(const SparseDataInfo &secondInfo) { + info &= secondInfo.info; + } + + void OrBits(const SparseDataInfo &secondInfo) { + info |= secondInfo.info; + } + + /* if bit in secondElem is 1, bit in current DataInfo is set 0 */ + void Difference(const SparseDataInfo &secondInfo) { + info.Diff(secondInfo.info); + } + + void ResetAllBit() { + info.Clear(); + } + + void EnlargeCapacityToAdaptSize(uint32 bitNO) { + /* add one more size for each enlarge action */ + } + + void ClearDataInfo() { + info.Clear(); + } + + template + void GetBitsOfInfo(T &wordRes) const { + info.ConvertToSet(wordRes); + } + + private: + MapleAllocator allocator; + MapleSparseBitVector<> info; + uint32 maxRegNum; +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_SPARSE_DATAINFO_H */ diff --git a/src/mapleall/maple_be/include/cg/standardize.h b/src/mapleall/maple_be/include/cg/standardize.h new file mode 100644 index 0000000000000000000000000000000000000000..dac0e2870202861d6e81b987474de902d89a296e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/standardize.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_STANDARDIZE_H +#define MAPLEBE_INCLUDE_STANDARDIZE_H + +#include "cgfunc.h" +namespace maplebe { +class Standardize { + public: + explicit Standardize(CGFunc &f) : cgFunc(&f) {} + + virtual ~Standardize() { + cgFunc = nullptr; + } + + /* + * for cpu instruction contains different operands + * maple provide a default implement from three address to two address + * convertion rule is: + * mop(dest, src1, src2) -> mov(dest, src1) + * mop(dest, src2) + * maple provide a default implement from two address to one address for unary op + * convertion rule is: + * mop(dest, src) -> mov(dest, src1) + * mop(dest) + */ + void AddressMapping(Insn &insn) const; + + void DoStandardize(); + + protected: + void SetAddressMapping(bool needMapping) { + needAddrMapping = needMapping; + } + bool NeedAddressMapping(const Insn &insn) { + /* Operand number for two addressing mode is 2 */ + /* and 3 for three addressing mode */ + needAddrMapping = (insn.GetOperandSize() > 2) || (insn.IsUnaryOp()); + return needAddrMapping; + } + private: + virtual void StdzMov(Insn &insn) = 0; + virtual void StdzStrLdr(Insn &insn) = 0; + virtual void StdzBasicOp(Insn &insn) = 0; + virtual void StdzUnaryOp(Insn &insn, CGFunc &cgFunc) = 0; + virtual void StdzCvtOp(Insn &insn, CGFunc &cgFunc) = 0; + virtual void StdzShiftOp(Insn &insn, CGFunc &cgFunc) = 0; + CGFunc *cgFunc; + bool needAddrMapping = false; +}; +MAPLE_FUNC_PHASE_DECLARE_BEGIN(InstructionStandardize, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} +#endif /* MAPLEBE_INCLUDE_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/include/cg/strldr.h b/src/mapleall/maple_be/include/cg/strldr.h new file mode 100644 index 0000000000000000000000000000000000000000..c36e5772923e79c0c686674fe5c7ccf89be502ae --- /dev/null +++ b/src/mapleall/maple_be/include/cg/strldr.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_STRLDR_H +#define MAPLEBE_INCLUDE_CG_STRLDR_H +#include "cg_phase.h" +#include "maple_phase.h" + +namespace maplebe { +class StoreLoadOpt { + public: + StoreLoadOpt(CGFunc &func, MemPool &memPool) : cgFunc(func), memPool(memPool) {} + virtual ~StoreLoadOpt() = default; + virtual void Run() = 0; + std::string PhaseName() const { + return "storeloadopt"; + } + + protected: + CGFunc &cgFunc; + MemPool &memPool; + /* if the number of bbs is more than 500 or the number of insns is more than 9000, don't optimize. */ + static constexpr uint32 kMaxBBNum = 500; + static constexpr uint32 kMaxInsnNum = 9000; +}; + +MAPLE_FUNC_PHASE_DECLARE(CgStoreLoadOpt, maplebe::CGFunc) +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_STRLDR_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/tailcall.h b/src/mapleall/maple_be/include/cg/tailcall.h new file mode 100644 index 0000000000000000000000000000000000000000..74eee52b4048724a2128a1c5a6248ca00a432aec --- /dev/null +++ b/src/mapleall/maple_be/include/cg/tailcall.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_TAILCALL_H +#define MAPLEBE_INCLUDE_CG_TAILCALL_H + +#include "cgfunc.h" +#include "cg_phase.h" +#include "cg_dominance.h" +#include "cg.h" +#include "cg_ssa.h" +#include "reg_coalesce.h" + +namespace maplebe { + +class TailCallOpt { + public: + TailCallOpt(MemPool &pool, CGFunc &func) + : cgFunc(func), + memPool(&pool), + tmpAlloc(&pool), + exitBB2CallSitesMap(tmpAlloc.Adapter()) { + exitBB2CallSitesMap.clear(); + } + + virtual ~TailCallOpt() = default; + + void Run(); + bool DoTailCallOpt(); + void TideExitBB(); + bool OptimizeTailBB(BB &bb, MapleSet &callInsns, const BB &exitBB) const; + void TailCallBBOpt(BB &bb, MapleSet &callInsns, BB &exitBB); + void ConvertToTailCalls(MapleSet &callInsnsMap); + MapleMap> &GetExitBB2CallSitesMap() { + return exitBB2CallSitesMap; + } + void SetCurTailcallExitBB(BB *bb) { + curTailcallExitBB = bb; + } + BB *GetCurTailcallExitBB() { + return curTailcallExitBB; + } + + const MemPool *GetMemPool() const { + return memPool; + } + + virtual bool IsFuncNeedFrame(Insn &callInsn) const = 0; + virtual bool InsnIsCallCand(Insn &insn) const = 0; + virtual bool InsnIsLoadPair(Insn &insn) const = 0; + virtual bool InsnIsMove(Insn &insn) const = 0; + virtual bool InsnIsIndirectCall(Insn &insn) const = 0; + virtual bool InsnIsCall(Insn &insn) const = 0; + virtual bool InsnIsUncondJump(Insn &insn) const = 0; + virtual bool InsnIsAddWithRsp(Insn &insn) const = 0; + virtual bool OpndIsStackRelatedReg(RegOperand &opnd) const = 0; + virtual bool OpndIsR0Reg(RegOperand &opnd) const = 0; + virtual bool OpndIsCalleeSaveReg(RegOperand &opnd) const = 0; + virtual bool IsAddOrSubOp(MOperator mOp) const = 0; + virtual void ReplaceInsnMopWithTailCall(Insn &insn) = 0; + bool IsStackAddrTaken(); + + protected: + CGFunc &cgFunc; + MemPool *memPool; + MapleAllocator tmpAlloc; + bool stackProtect = false; + MapleMap> exitBB2CallSitesMap; + BB *curTailcallExitBB = nullptr; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgTailCallOpt, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_TAILCALL_H */ diff --git a/src/mapleall/maple_be/include/cg/valid_bitmask_imm.txt b/src/mapleall/maple_be/include/cg/valid_bitmask_imm.txt new file mode 100644 index 0000000000000000000000000000000000000000..53a6135b6ebd4f570d728df66a9b2584a3a677ef --- /dev/null +++ b/src/mapleall/maple_be/include/cg/valid_bitmask_imm.txt @@ -0,0 +1,5372 @@ +0x5555555555555555, +0xaaaaaaaaaaaaaaaa, +0x1111111111111111, +0x2222222222222222, +0x4444444444444444, +0x8888888888888888, +0x3333333333333333, +0x6666666666666666, +0xcccccccccccccccc, +0x9999999999999999, +0x7777777777777777, +0xeeeeeeeeeeeeeeee, +0xdddddddddddddddd, +0xbbbbbbbbbbbbbbbb, +0x0101010101010101, +0x0202020202020202, +0x0404040404040404, +0x0808080808080808, +0x1010101010101010, +0x2020202020202020, +0x4040404040404040, +0x8080808080808080, +0x0303030303030303, +0x0606060606060606, +0x0c0c0c0c0c0c0c0c, +0x1818181818181818, +0x3030303030303030, +0x6060606060606060, +0xc0c0c0c0c0c0c0c0, +0x8181818181818181, +0x0707070707070707, +0x0e0e0e0e0e0e0e0e, +0x1c1c1c1c1c1c1c1c, +0x3838383838383838, +0x7070707070707070, +0xe0e0e0e0e0e0e0e0, +0xc1c1c1c1c1c1c1c1, +0x8383838383838383, +0x0f0f0f0f0f0f0f0f, +0x1e1e1e1e1e1e1e1e, +0x3c3c3c3c3c3c3c3c, +0x7878787878787878, +0xf0f0f0f0f0f0f0f0, +0xe1e1e1e1e1e1e1e1, +0xc3c3c3c3c3c3c3c3, +0x8787878787878787, +0x1f1f1f1f1f1f1f1f, +0x3e3e3e3e3e3e3e3e, +0x7c7c7c7c7c7c7c7c, +0xf8f8f8f8f8f8f8f8, +0xf1f1f1f1f1f1f1f1, +0xe3e3e3e3e3e3e3e3, +0xc7c7c7c7c7c7c7c7, +0x8f8f8f8f8f8f8f8f, +0x3f3f3f3f3f3f3f3f, +0x7e7e7e7e7e7e7e7e, +0xfcfcfcfcfcfcfcfc, +0xf9f9f9f9f9f9f9f9, +0xf3f3f3f3f3f3f3f3, +0xe7e7e7e7e7e7e7e7, +0xcfcfcfcfcfcfcfcf, +0x9f9f9f9f9f9f9f9f, +0x7f7f7f7f7f7f7f7f, +0xfefefefefefefefe, +0xfdfdfdfdfdfdfdfd, +0xfbfbfbfbfbfbfbfb, +0xf7f7f7f7f7f7f7f7, +0xefefefefefefefef, +0xdfdfdfdfdfdfdfdf, +0xbfbfbfbfbfbfbfbf, +0x0001000100010001, +0x0002000200020002, +0x0004000400040004, +0x0008000800080008, +0x0010001000100010, +0x0020002000200020, +0x0040004000400040, +0x0080008000800080, +0x0100010001000100, +0x0200020002000200, +0x0400040004000400, +0x0800080008000800, +0x1000100010001000, +0x2000200020002000, +0x4000400040004000, +0x8000800080008000, +0x0003000300030003, +0x0006000600060006, +0x000c000c000c000c, +0x0018001800180018, +0x0030003000300030, +0x0060006000600060, +0x00c000c000c000c0, +0x0180018001800180, +0x0300030003000300, +0x0600060006000600, +0x0c000c000c000c00, +0x1800180018001800, +0x3000300030003000, +0x6000600060006000, +0xc000c000c000c000, +0x8001800180018001, +0x0007000700070007, +0x000e000e000e000e, +0x001c001c001c001c, +0x0038003800380038, +0x0070007000700070, +0x00e000e000e000e0, +0x01c001c001c001c0, +0x0380038003800380, +0x0700070007000700, +0x0e000e000e000e00, +0x1c001c001c001c00, +0x3800380038003800, +0x7000700070007000, +0xe000e000e000e000, +0xc001c001c001c001, +0x8003800380038003, +0x000f000f000f000f, +0x001e001e001e001e, +0x003c003c003c003c, +0x0078007800780078, +0x00f000f000f000f0, +0x01e001e001e001e0, +0x03c003c003c003c0, +0x0780078007800780, +0x0f000f000f000f00, +0x1e001e001e001e00, +0x3c003c003c003c00, +0x7800780078007800, +0xf000f000f000f000, +0xe001e001e001e001, +0xc003c003c003c003, +0x8007800780078007, +0x001f001f001f001f, +0x003e003e003e003e, +0x007c007c007c007c, +0x00f800f800f800f8, +0x01f001f001f001f0, +0x03e003e003e003e0, +0x07c007c007c007c0, +0x0f800f800f800f80, +0x1f001f001f001f00, +0x3e003e003e003e00, +0x7c007c007c007c00, +0xf800f800f800f800, +0xf001f001f001f001, +0xe003e003e003e003, +0xc007c007c007c007, +0x800f800f800f800f, +0x003f003f003f003f, +0x007e007e007e007e, +0x00fc00fc00fc00fc, +0x01f801f801f801f8, +0x03f003f003f003f0, +0x07e007e007e007e0, +0x0fc00fc00fc00fc0, +0x1f801f801f801f80, +0x3f003f003f003f00, +0x7e007e007e007e00, +0xfc00fc00fc00fc00, +0xf801f801f801f801, +0xf003f003f003f003, +0xe007e007e007e007, +0xc00fc00fc00fc00f, +0x801f801f801f801f, +0x007f007f007f007f, +0x00fe00fe00fe00fe, +0x01fc01fc01fc01fc, +0x03f803f803f803f8, +0x07f007f007f007f0, +0x0fe00fe00fe00fe0, +0x1fc01fc01fc01fc0, +0x3f803f803f803f80, +0x7f007f007f007f00, +0xfe00fe00fe00fe00, +0xfc01fc01fc01fc01, +0xf803f803f803f803, +0xf007f007f007f007, +0xe00fe00fe00fe00f, +0xc01fc01fc01fc01f, +0x803f803f803f803f, +0x00ff00ff00ff00ff, +0x01fe01fe01fe01fe, +0x03fc03fc03fc03fc, +0x07f807f807f807f8, +0x0ff00ff00ff00ff0, +0x1fe01fe01fe01fe0, +0x3fc03fc03fc03fc0, +0x7f807f807f807f80, +0xff00ff00ff00ff00, +0xfe01fe01fe01fe01, +0xfc03fc03fc03fc03, +0xf807f807f807f807, +0xf00ff00ff00ff00f, +0xe01fe01fe01fe01f, +0xc03fc03fc03fc03f, +0x807f807f807f807f, +0x01ff01ff01ff01ff, +0x03fe03fe03fe03fe, +0x07fc07fc07fc07fc, +0x0ff80ff80ff80ff8, +0x1ff01ff01ff01ff0, +0x3fe03fe03fe03fe0, +0x7fc07fc07fc07fc0, +0xff80ff80ff80ff80, +0xff01ff01ff01ff01, +0xfe03fe03fe03fe03, +0xfc07fc07fc07fc07, +0xf80ff80ff80ff80f, +0xf01ff01ff01ff01f, +0xe03fe03fe03fe03f, +0xc07fc07fc07fc07f, +0x80ff80ff80ff80ff, +0x03ff03ff03ff03ff, +0x07fe07fe07fe07fe, +0x0ffc0ffc0ffc0ffc, +0x1ff81ff81ff81ff8, +0x3ff03ff03ff03ff0, +0x7fe07fe07fe07fe0, +0xffc0ffc0ffc0ffc0, +0xff81ff81ff81ff81, +0xff03ff03ff03ff03, +0xfe07fe07fe07fe07, +0xfc0ffc0ffc0ffc0f, +0xf81ff81ff81ff81f, +0xf03ff03ff03ff03f, +0xe07fe07fe07fe07f, +0xc0ffc0ffc0ffc0ff, +0x81ff81ff81ff81ff, +0x07ff07ff07ff07ff, +0x0ffe0ffe0ffe0ffe, +0x1ffc1ffc1ffc1ffc, +0x3ff83ff83ff83ff8, +0x7ff07ff07ff07ff0, +0xffe0ffe0ffe0ffe0, +0xffc1ffc1ffc1ffc1, +0xff83ff83ff83ff83, +0xff07ff07ff07ff07, +0xfe0ffe0ffe0ffe0f, +0xfc1ffc1ffc1ffc1f, +0xf83ff83ff83ff83f, +0xf07ff07ff07ff07f, +0xe0ffe0ffe0ffe0ff, +0xc1ffc1ffc1ffc1ff, +0x83ff83ff83ff83ff, +0x0fff0fff0fff0fff, +0x1ffe1ffe1ffe1ffe, +0x3ffc3ffc3ffc3ffc, +0x7ff87ff87ff87ff8, +0xfff0fff0fff0fff0, +0xffe1ffe1ffe1ffe1, +0xffc3ffc3ffc3ffc3, +0xff87ff87ff87ff87, +0xff0fff0fff0fff0f, +0xfe1ffe1ffe1ffe1f, +0xfc3ffc3ffc3ffc3f, +0xf87ff87ff87ff87f, +0xf0fff0fff0fff0ff, +0xe1ffe1ffe1ffe1ff, +0xc3ffc3ffc3ffc3ff, +0x87ff87ff87ff87ff, +0x1fff1fff1fff1fff, +0x3ffe3ffe3ffe3ffe, +0x7ffc7ffc7ffc7ffc, +0xfff8fff8fff8fff8, +0xfff1fff1fff1fff1, +0xffe3ffe3ffe3ffe3, +0xffc7ffc7ffc7ffc7, +0xff8fff8fff8fff8f, +0xff1fff1fff1fff1f, +0xfe3ffe3ffe3ffe3f, +0xfc7ffc7ffc7ffc7f, +0xf8fff8fff8fff8ff, +0xf1fff1fff1fff1ff, +0xe3ffe3ffe3ffe3ff, +0xc7ffc7ffc7ffc7ff, +0x8fff8fff8fff8fff, +0x3fff3fff3fff3fff, +0x7ffe7ffe7ffe7ffe, +0xfffcfffcfffcfffc, +0xfff9fff9fff9fff9, +0xfff3fff3fff3fff3, +0xffe7ffe7ffe7ffe7, +0xffcfffcfffcfffcf, +0xff9fff9fff9fff9f, +0xff3fff3fff3fff3f, +0xfe7ffe7ffe7ffe7f, +0xfcfffcfffcfffcff, +0xf9fff9fff9fff9ff, +0xf3fff3fff3fff3ff, +0xe7ffe7ffe7ffe7ff, +0xcfffcfffcfffcfff, +0x9fff9fff9fff9fff, +0x7fff7fff7fff7fff, +0xfffefffefffefffe, +0xfffdfffdfffdfffd, +0xfffbfffbfffbfffb, +0xfff7fff7fff7fff7, +0xffefffefffefffef, +0xffdfffdfffdfffdf, +0xffbfffbfffbfffbf, +0xff7fff7fff7fff7f, +0xfefffefffefffeff, +0xfdfffdfffdfffdff, +0xfbfffbfffbfffbff, +0xf7fff7fff7fff7ff, +0xefffefffefffefff, +0xdfffdfffdfffdfff, +0xbfffbfffbfffbfff, +0x0000000100000001, +0x0000000200000002, +0x0000000400000004, +0x0000000800000008, +0x0000001000000010, +0x0000002000000020, +0x0000004000000040, +0x0000008000000080, +0x0000010000000100, +0x0000020000000200, +0x0000040000000400, +0x0000080000000800, +0x0000100000001000, +0x0000200000002000, +0x0000400000004000, +0x0000800000008000, +0x0001000000010000, +0x0002000000020000, +0x0004000000040000, +0x0008000000080000, +0x0010000000100000, +0x0020000000200000, +0x0040000000400000, +0x0080000000800000, +0x0100000001000000, +0x0200000002000000, +0x0400000004000000, +0x0800000008000000, +0x1000000010000000, +0x2000000020000000, +0x4000000040000000, +0x8000000080000000, +0x0000000300000003, +0x0000000600000006, +0x0000000c0000000c, +0x0000001800000018, +0x0000003000000030, +0x0000006000000060, +0x000000c0000000c0, +0x0000018000000180, +0x0000030000000300, +0x0000060000000600, +0x00000c0000000c00, +0x0000180000001800, +0x0000300000003000, +0x0000600000006000, +0x0000c0000000c000, +0x0001800000018000, +0x0003000000030000, +0x0006000000060000, +0x000c0000000c0000, +0x0018000000180000, +0x0030000000300000, +0x0060000000600000, +0x00c0000000c00000, +0x0180000001800000, +0x0300000003000000, +0x0600000006000000, +0x0c0000000c000000, +0x1800000018000000, +0x3000000030000000, +0x6000000060000000, +0xc0000000c0000000, +0x8000000180000001, +0x0000000700000007, +0x0000000e0000000e, +0x0000001c0000001c, +0x0000003800000038, +0x0000007000000070, +0x000000e0000000e0, +0x000001c0000001c0, +0x0000038000000380, +0x0000070000000700, +0x00000e0000000e00, +0x00001c0000001c00, +0x0000380000003800, +0x0000700000007000, +0x0000e0000000e000, +0x0001c0000001c000, +0x0003800000038000, +0x0007000000070000, +0x000e0000000e0000, +0x001c0000001c0000, +0x0038000000380000, +0x0070000000700000, +0x00e0000000e00000, +0x01c0000001c00000, +0x0380000003800000, +0x0700000007000000, +0x0e0000000e000000, +0x1c0000001c000000, +0x3800000038000000, +0x7000000070000000, +0xe0000000e0000000, +0xc0000001c0000001, +0x8000000380000003, +0x0000000f0000000f, +0x0000001e0000001e, +0x0000003c0000003c, +0x0000007800000078, +0x000000f0000000f0, +0x000001e0000001e0, +0x000003c0000003c0, +0x0000078000000780, +0x00000f0000000f00, +0x00001e0000001e00, +0x00003c0000003c00, +0x0000780000007800, +0x0000f0000000f000, +0x0001e0000001e000, +0x0003c0000003c000, +0x0007800000078000, +0x000f0000000f0000, +0x001e0000001e0000, +0x003c0000003c0000, +0x0078000000780000, +0x00f0000000f00000, +0x01e0000001e00000, +0x03c0000003c00000, +0x0780000007800000, +0x0f0000000f000000, +0x1e0000001e000000, +0x3c0000003c000000, +0x7800000078000000, +0xf0000000f0000000, +0xe0000001e0000001, +0xc0000003c0000003, +0x8000000780000007, +0x0000001f0000001f, +0x0000003e0000003e, +0x0000007c0000007c, +0x000000f8000000f8, +0x000001f0000001f0, +0x000003e0000003e0, +0x000007c0000007c0, +0x00000f8000000f80, +0x00001f0000001f00, +0x00003e0000003e00, +0x00007c0000007c00, +0x0000f8000000f800, +0x0001f0000001f000, +0x0003e0000003e000, +0x0007c0000007c000, +0x000f8000000f8000, +0x001f0000001f0000, +0x003e0000003e0000, +0x007c0000007c0000, +0x00f8000000f80000, +0x01f0000001f00000, +0x03e0000003e00000, +0x07c0000007c00000, +0x0f8000000f800000, +0x1f0000001f000000, +0x3e0000003e000000, +0x7c0000007c000000, +0xf8000000f8000000, +0xf0000001f0000001, +0xe0000003e0000003, +0xc0000007c0000007, +0x8000000f8000000f, +0x0000003f0000003f, +0x0000007e0000007e, +0x000000fc000000fc, +0x000001f8000001f8, +0x000003f0000003f0, +0x000007e0000007e0, +0x00000fc000000fc0, +0x00001f8000001f80, +0x00003f0000003f00, +0x00007e0000007e00, +0x0000fc000000fc00, +0x0001f8000001f800, +0x0003f0000003f000, +0x0007e0000007e000, +0x000fc000000fc000, +0x001f8000001f8000, +0x003f0000003f0000, +0x007e0000007e0000, +0x00fc000000fc0000, +0x01f8000001f80000, +0x03f0000003f00000, +0x07e0000007e00000, +0x0fc000000fc00000, +0x1f8000001f800000, +0x3f0000003f000000, +0x7e0000007e000000, +0xfc000000fc000000, +0xf8000001f8000001, +0xf0000003f0000003, +0xe0000007e0000007, +0xc000000fc000000f, +0x8000001f8000001f, +0x0000007f0000007f, +0x000000fe000000fe, +0x000001fc000001fc, +0x000003f8000003f8, +0x000007f0000007f0, +0x00000fe000000fe0, +0x00001fc000001fc0, +0x00003f8000003f80, +0x00007f0000007f00, +0x0000fe000000fe00, +0x0001fc000001fc00, +0x0003f8000003f800, +0x0007f0000007f000, +0x000fe000000fe000, +0x001fc000001fc000, +0x003f8000003f8000, +0x007f0000007f0000, +0x00fe000000fe0000, +0x01fc000001fc0000, +0x03f8000003f80000, +0x07f0000007f00000, +0x0fe000000fe00000, +0x1fc000001fc00000, +0x3f8000003f800000, +0x7f0000007f000000, +0xfe000000fe000000, +0xfc000001fc000001, +0xf8000003f8000003, +0xf0000007f0000007, +0xe000000fe000000f, +0xc000001fc000001f, +0x8000003f8000003f, +0x000000ff000000ff, +0x000001fe000001fe, +0x000003fc000003fc, +0x000007f8000007f8, +0x00000ff000000ff0, +0x00001fe000001fe0, +0x00003fc000003fc0, +0x00007f8000007f80, +0x0000ff000000ff00, +0x0001fe000001fe00, +0x0003fc000003fc00, +0x0007f8000007f800, +0x000ff000000ff000, +0x001fe000001fe000, +0x003fc000003fc000, +0x007f8000007f8000, +0x00ff000000ff0000, +0x01fe000001fe0000, +0x03fc000003fc0000, +0x07f8000007f80000, +0x0ff000000ff00000, +0x1fe000001fe00000, +0x3fc000003fc00000, +0x7f8000007f800000, +0xff000000ff000000, +0xfe000001fe000001, +0xfc000003fc000003, +0xf8000007f8000007, +0xf000000ff000000f, +0xe000001fe000001f, +0xc000003fc000003f, +0x8000007f8000007f, +0x000001ff000001ff, +0x000003fe000003fe, +0x000007fc000007fc, +0x00000ff800000ff8, +0x00001ff000001ff0, +0x00003fe000003fe0, +0x00007fc000007fc0, +0x0000ff800000ff80, +0x0001ff000001ff00, +0x0003fe000003fe00, +0x0007fc000007fc00, +0x000ff800000ff800, +0x001ff000001ff000, +0x003fe000003fe000, +0x007fc000007fc000, +0x00ff800000ff8000, +0x01ff000001ff0000, +0x03fe000003fe0000, +0x07fc000007fc0000, +0x0ff800000ff80000, +0x1ff000001ff00000, +0x3fe000003fe00000, +0x7fc000007fc00000, +0xff800000ff800000, +0xff000001ff000001, +0xfe000003fe000003, +0xfc000007fc000007, +0xf800000ff800000f, +0xf000001ff000001f, +0xe000003fe000003f, +0xc000007fc000007f, +0x800000ff800000ff, +0x000003ff000003ff, +0x000007fe000007fe, +0x00000ffc00000ffc, +0x00001ff800001ff8, +0x00003ff000003ff0, +0x00007fe000007fe0, +0x0000ffc00000ffc0, +0x0001ff800001ff80, +0x0003ff000003ff00, +0x0007fe000007fe00, +0x000ffc00000ffc00, +0x001ff800001ff800, +0x003ff000003ff000, +0x007fe000007fe000, +0x00ffc00000ffc000, +0x01ff800001ff8000, +0x03ff000003ff0000, +0x07fe000007fe0000, +0x0ffc00000ffc0000, +0x1ff800001ff80000, +0x3ff000003ff00000, +0x7fe000007fe00000, +0xffc00000ffc00000, +0xff800001ff800001, +0xff000003ff000003, +0xfe000007fe000007, +0xfc00000ffc00000f, +0xf800001ff800001f, +0xf000003ff000003f, +0xe000007fe000007f, +0xc00000ffc00000ff, +0x800001ff800001ff, +0x000007ff000007ff, +0x00000ffe00000ffe, +0x00001ffc00001ffc, +0x00003ff800003ff8, +0x00007ff000007ff0, +0x0000ffe00000ffe0, +0x0001ffc00001ffc0, +0x0003ff800003ff80, +0x0007ff000007ff00, +0x000ffe00000ffe00, +0x001ffc00001ffc00, +0x003ff800003ff800, +0x007ff000007ff000, +0x00ffe00000ffe000, +0x01ffc00001ffc000, +0x03ff800003ff8000, +0x07ff000007ff0000, +0x0ffe00000ffe0000, +0x1ffc00001ffc0000, +0x3ff800003ff80000, +0x7ff000007ff00000, +0xffe00000ffe00000, +0xffc00001ffc00001, +0xff800003ff800003, +0xff000007ff000007, +0xfe00000ffe00000f, +0xfc00001ffc00001f, +0xf800003ff800003f, +0xf000007ff000007f, +0xe00000ffe00000ff, +0xc00001ffc00001ff, +0x800003ff800003ff, +0x00000fff00000fff, +0x00001ffe00001ffe, +0x00003ffc00003ffc, +0x00007ff800007ff8, +0x0000fff00000fff0, +0x0001ffe00001ffe0, +0x0003ffc00003ffc0, +0x0007ff800007ff80, +0x000fff00000fff00, +0x001ffe00001ffe00, +0x003ffc00003ffc00, +0x007ff800007ff800, +0x00fff00000fff000, +0x01ffe00001ffe000, +0x03ffc00003ffc000, +0x07ff800007ff8000, +0x0fff00000fff0000, +0x1ffe00001ffe0000, +0x3ffc00003ffc0000, +0x7ff800007ff80000, +0xfff00000fff00000, +0xffe00001ffe00001, +0xffc00003ffc00003, +0xff800007ff800007, +0xff00000fff00000f, +0xfe00001ffe00001f, +0xfc00003ffc00003f, +0xf800007ff800007f, +0xf00000fff00000ff, +0xe00001ffe00001ff, +0xc00003ffc00003ff, +0x800007ff800007ff, +0x00001fff00001fff, +0x00003ffe00003ffe, +0x00007ffc00007ffc, +0x0000fff80000fff8, +0x0001fff00001fff0, +0x0003ffe00003ffe0, +0x0007ffc00007ffc0, +0x000fff80000fff80, +0x001fff00001fff00, +0x003ffe00003ffe00, +0x007ffc00007ffc00, +0x00fff80000fff800, +0x01fff00001fff000, +0x03ffe00003ffe000, +0x07ffc00007ffc000, +0x0fff80000fff8000, +0x1fff00001fff0000, +0x3ffe00003ffe0000, +0x7ffc00007ffc0000, +0xfff80000fff80000, +0xfff00001fff00001, +0xffe00003ffe00003, +0xffc00007ffc00007, +0xff80000fff80000f, +0xff00001fff00001f, +0xfe00003ffe00003f, +0xfc00007ffc00007f, +0xf80000fff80000ff, +0xf00001fff00001ff, +0xe00003ffe00003ff, +0xc00007ffc00007ff, +0x80000fff80000fff, +0x00003fff00003fff, +0x00007ffe00007ffe, +0x0000fffc0000fffc, +0x0001fff80001fff8, +0x0003fff00003fff0, +0x0007ffe00007ffe0, +0x000fffc0000fffc0, +0x001fff80001fff80, +0x003fff00003fff00, +0x007ffe00007ffe00, +0x00fffc0000fffc00, +0x01fff80001fff800, +0x03fff00003fff000, +0x07ffe00007ffe000, +0x0fffc0000fffc000, +0x1fff80001fff8000, +0x3fff00003fff0000, +0x7ffe00007ffe0000, +0xfffc0000fffc0000, +0xfff80001fff80001, +0xfff00003fff00003, +0xffe00007ffe00007, +0xffc0000fffc0000f, +0xff80001fff80001f, +0xff00003fff00003f, +0xfe00007ffe00007f, +0xfc0000fffc0000ff, +0xf80001fff80001ff, +0xf00003fff00003ff, +0xe00007ffe00007ff, +0xc0000fffc0000fff, +0x80001fff80001fff, +0x00007fff00007fff, +0x0000fffe0000fffe, +0x0001fffc0001fffc, +0x0003fff80003fff8, +0x0007fff00007fff0, +0x000fffe0000fffe0, +0x001fffc0001fffc0, +0x003fff80003fff80, +0x007fff00007fff00, +0x00fffe0000fffe00, +0x01fffc0001fffc00, +0x03fff80003fff800, +0x07fff00007fff000, +0x0fffe0000fffe000, +0x1fffc0001fffc000, +0x3fff80003fff8000, +0x7fff00007fff0000, +0xfffe0000fffe0000, +0xfffc0001fffc0001, +0xfff80003fff80003, +0xfff00007fff00007, +0xffe0000fffe0000f, +0xffc0001fffc0001f, +0xff80003fff80003f, +0xff00007fff00007f, +0xfe0000fffe0000ff, +0xfc0001fffc0001ff, +0xf80003fff80003ff, +0xf00007fff00007ff, +0xe0000fffe0000fff, +0xc0001fffc0001fff, +0x80003fff80003fff, +0x0000ffff0000ffff, +0x0001fffe0001fffe, +0x0003fffc0003fffc, +0x0007fff80007fff8, +0x000ffff0000ffff0, +0x001fffe0001fffe0, +0x003fffc0003fffc0, +0x007fff80007fff80, +0x00ffff0000ffff00, +0x01fffe0001fffe00, +0x03fffc0003fffc00, +0x07fff80007fff800, +0x0ffff0000ffff000, +0x1fffe0001fffe000, +0x3fffc0003fffc000, +0x7fff80007fff8000, +0xffff0000ffff0000, +0xfffe0001fffe0001, +0xfffc0003fffc0003, +0xfff80007fff80007, +0xfff0000ffff0000f, +0xffe0001fffe0001f, +0xffc0003fffc0003f, +0xff80007fff80007f, +0xff0000ffff0000ff, +0xfe0001fffe0001ff, +0xfc0003fffc0003ff, +0xf80007fff80007ff, +0xf0000ffff0000fff, +0xe0001fffe0001fff, +0xc0003fffc0003fff, +0x80007fff80007fff, +0x0001ffff0001ffff, +0x0003fffe0003fffe, +0x0007fffc0007fffc, +0x000ffff8000ffff8, +0x001ffff0001ffff0, +0x003fffe0003fffe0, +0x007fffc0007fffc0, +0x00ffff8000ffff80, +0x01ffff0001ffff00, +0x03fffe0003fffe00, +0x07fffc0007fffc00, +0x0ffff8000ffff800, +0x1ffff0001ffff000, +0x3fffe0003fffe000, +0x7fffc0007fffc000, +0xffff8000ffff8000, +0xffff0001ffff0001, +0xfffe0003fffe0003, +0xfffc0007fffc0007, +0xfff8000ffff8000f, +0xfff0001ffff0001f, +0xffe0003fffe0003f, +0xffc0007fffc0007f, +0xff8000ffff8000ff, +0xff0001ffff0001ff, +0xfe0003fffe0003ff, +0xfc0007fffc0007ff, +0xf8000ffff8000fff, +0xf0001ffff0001fff, +0xe0003fffe0003fff, +0xc0007fffc0007fff, +0x8000ffff8000ffff, +0x0003ffff0003ffff, +0x0007fffe0007fffe, +0x000ffffc000ffffc, +0x001ffff8001ffff8, +0x003ffff0003ffff0, +0x007fffe0007fffe0, +0x00ffffc000ffffc0, +0x01ffff8001ffff80, +0x03ffff0003ffff00, +0x07fffe0007fffe00, +0x0ffffc000ffffc00, +0x1ffff8001ffff800, +0x3ffff0003ffff000, +0x7fffe0007fffe000, +0xffffc000ffffc000, +0xffff8001ffff8001, +0xffff0003ffff0003, +0xfffe0007fffe0007, +0xfffc000ffffc000f, +0xfff8001ffff8001f, +0xfff0003ffff0003f, +0xffe0007fffe0007f, +0xffc000ffffc000ff, +0xff8001ffff8001ff, +0xff0003ffff0003ff, +0xfe0007fffe0007ff, +0xfc000ffffc000fff, +0xf8001ffff8001fff, +0xf0003ffff0003fff, +0xe0007fffe0007fff, +0xc000ffffc000ffff, +0x8001ffff8001ffff, +0x0007ffff0007ffff, +0x000ffffe000ffffe, +0x001ffffc001ffffc, +0x003ffff8003ffff8, +0x007ffff0007ffff0, +0x00ffffe000ffffe0, +0x01ffffc001ffffc0, +0x03ffff8003ffff80, +0x07ffff0007ffff00, +0x0ffffe000ffffe00, +0x1ffffc001ffffc00, +0x3ffff8003ffff800, +0x7ffff0007ffff000, +0xffffe000ffffe000, +0xffffc001ffffc001, +0xffff8003ffff8003, +0xffff0007ffff0007, +0xfffe000ffffe000f, +0xfffc001ffffc001f, +0xfff8003ffff8003f, +0xfff0007ffff0007f, +0xffe000ffffe000ff, +0xffc001ffffc001ff, +0xff8003ffff8003ff, +0xff0007ffff0007ff, +0xfe000ffffe000fff, +0xfc001ffffc001fff, +0xf8003ffff8003fff, +0xf0007ffff0007fff, +0xe000ffffe000ffff, +0xc001ffffc001ffff, +0x8003ffff8003ffff, +0x000fffff000fffff, +0x001ffffe001ffffe, +0x003ffffc003ffffc, +0x007ffff8007ffff8, +0x00fffff000fffff0, +0x01ffffe001ffffe0, +0x03ffffc003ffffc0, +0x07ffff8007ffff80, +0x0fffff000fffff00, +0x1ffffe001ffffe00, +0x3ffffc003ffffc00, +0x7ffff8007ffff800, +0xfffff000fffff000, +0xffffe001ffffe001, +0xffffc003ffffc003, +0xffff8007ffff8007, +0xffff000fffff000f, +0xfffe001ffffe001f, +0xfffc003ffffc003f, +0xfff8007ffff8007f, +0xfff000fffff000ff, +0xffe001ffffe001ff, +0xffc003ffffc003ff, +0xff8007ffff8007ff, +0xff000fffff000fff, +0xfe001ffffe001fff, +0xfc003ffffc003fff, +0xf8007ffff8007fff, +0xf000fffff000ffff, +0xe001ffffe001ffff, +0xc003ffffc003ffff, +0x8007ffff8007ffff, +0x001fffff001fffff, +0x003ffffe003ffffe, +0x007ffffc007ffffc, +0x00fffff800fffff8, +0x01fffff001fffff0, +0x03ffffe003ffffe0, +0x07ffffc007ffffc0, +0x0fffff800fffff80, +0x1fffff001fffff00, +0x3ffffe003ffffe00, +0x7ffffc007ffffc00, +0xfffff800fffff800, +0xfffff001fffff001, +0xffffe003ffffe003, +0xffffc007ffffc007, +0xffff800fffff800f, +0xffff001fffff001f, +0xfffe003ffffe003f, +0xfffc007ffffc007f, +0xfff800fffff800ff, +0xfff001fffff001ff, +0xffe003ffffe003ff, +0xffc007ffffc007ff, +0xff800fffff800fff, +0xff001fffff001fff, +0xfe003ffffe003fff, +0xfc007ffffc007fff, +0xf800fffff800ffff, +0xf001fffff001ffff, +0xe003ffffe003ffff, +0xc007ffffc007ffff, +0x800fffff800fffff, +0x003fffff003fffff, +0x007ffffe007ffffe, +0x00fffffc00fffffc, +0x01fffff801fffff8, +0x03fffff003fffff0, +0x07ffffe007ffffe0, +0x0fffffc00fffffc0, +0x1fffff801fffff80, +0x3fffff003fffff00, +0x7ffffe007ffffe00, +0xfffffc00fffffc00, +0xfffff801fffff801, +0xfffff003fffff003, +0xffffe007ffffe007, +0xffffc00fffffc00f, +0xffff801fffff801f, +0xffff003fffff003f, +0xfffe007ffffe007f, +0xfffc00fffffc00ff, +0xfff801fffff801ff, +0xfff003fffff003ff, +0xffe007ffffe007ff, +0xffc00fffffc00fff, +0xff801fffff801fff, +0xff003fffff003fff, +0xfe007ffffe007fff, +0xfc00fffffc00ffff, +0xf801fffff801ffff, +0xf003fffff003ffff, +0xe007ffffe007ffff, +0xc00fffffc00fffff, +0x801fffff801fffff, +0x007fffff007fffff, +0x00fffffe00fffffe, +0x01fffffc01fffffc, +0x03fffff803fffff8, +0x07fffff007fffff0, +0x0fffffe00fffffe0, +0x1fffffc01fffffc0, +0x3fffff803fffff80, +0x7fffff007fffff00, +0xfffffe00fffffe00, +0xfffffc01fffffc01, +0xfffff803fffff803, +0xfffff007fffff007, +0xffffe00fffffe00f, +0xffffc01fffffc01f, +0xffff803fffff803f, +0xffff007fffff007f, +0xfffe00fffffe00ff, +0xfffc01fffffc01ff, +0xfff803fffff803ff, +0xfff007fffff007ff, +0xffe00fffffe00fff, +0xffc01fffffc01fff, +0xff803fffff803fff, +0xff007fffff007fff, +0xfe00fffffe00ffff, +0xfc01fffffc01ffff, +0xf803fffff803ffff, +0xf007fffff007ffff, +0xe00fffffe00fffff, +0xc01fffffc01fffff, +0x803fffff803fffff, +0x00ffffff00ffffff, +0x01fffffe01fffffe, +0x03fffffc03fffffc, +0x07fffff807fffff8, +0x0ffffff00ffffff0, +0x1fffffe01fffffe0, +0x3fffffc03fffffc0, +0x7fffff807fffff80, +0xffffff00ffffff00, +0xfffffe01fffffe01, +0xfffffc03fffffc03, +0xfffff807fffff807, +0xfffff00ffffff00f, +0xffffe01fffffe01f, +0xffffc03fffffc03f, +0xffff807fffff807f, +0xffff00ffffff00ff, +0xfffe01fffffe01ff, +0xfffc03fffffc03ff, +0xfff807fffff807ff, +0xfff00ffffff00fff, +0xffe01fffffe01fff, +0xffc03fffffc03fff, +0xff807fffff807fff, +0xff00ffffff00ffff, +0xfe01fffffe01ffff, +0xfc03fffffc03ffff, +0xf807fffff807ffff, +0xf00ffffff00fffff, +0xe01fffffe01fffff, +0xc03fffffc03fffff, +0x807fffff807fffff, +0x01ffffff01ffffff, +0x03fffffe03fffffe, +0x07fffffc07fffffc, +0x0ffffff80ffffff8, +0x1ffffff01ffffff0, +0x3fffffe03fffffe0, +0x7fffffc07fffffc0, +0xffffff80ffffff80, +0xffffff01ffffff01, +0xfffffe03fffffe03, +0xfffffc07fffffc07, +0xfffff80ffffff80f, +0xfffff01ffffff01f, +0xffffe03fffffe03f, +0xffffc07fffffc07f, +0xffff80ffffff80ff, +0xffff01ffffff01ff, +0xfffe03fffffe03ff, +0xfffc07fffffc07ff, +0xfff80ffffff80fff, +0xfff01ffffff01fff, +0xffe03fffffe03fff, +0xffc07fffffc07fff, +0xff80ffffff80ffff, +0xff01ffffff01ffff, +0xfe03fffffe03ffff, +0xfc07fffffc07ffff, +0xf80ffffff80fffff, +0xf01ffffff01fffff, +0xe03fffffe03fffff, +0xc07fffffc07fffff, +0x80ffffff80ffffff, +0x03ffffff03ffffff, +0x07fffffe07fffffe, +0x0ffffffc0ffffffc, +0x1ffffff81ffffff8, +0x3ffffff03ffffff0, +0x7fffffe07fffffe0, +0xffffffc0ffffffc0, +0xffffff81ffffff81, +0xffffff03ffffff03, +0xfffffe07fffffe07, +0xfffffc0ffffffc0f, +0xfffff81ffffff81f, +0xfffff03ffffff03f, +0xffffe07fffffe07f, +0xffffc0ffffffc0ff, +0xffff81ffffff81ff, +0xffff03ffffff03ff, +0xfffe07fffffe07ff, +0xfffc0ffffffc0fff, +0xfff81ffffff81fff, +0xfff03ffffff03fff, +0xffe07fffffe07fff, +0xffc0ffffffc0ffff, +0xff81ffffff81ffff, +0xff03ffffff03ffff, +0xfe07fffffe07ffff, +0xfc0ffffffc0fffff, +0xf81ffffff81fffff, +0xf03ffffff03fffff, +0xe07fffffe07fffff, +0xc0ffffffc0ffffff, +0x81ffffff81ffffff, +0x07ffffff07ffffff, +0x0ffffffe0ffffffe, +0x1ffffffc1ffffffc, +0x3ffffff83ffffff8, +0x7ffffff07ffffff0, +0xffffffe0ffffffe0, +0xffffffc1ffffffc1, +0xffffff83ffffff83, +0xffffff07ffffff07, +0xfffffe0ffffffe0f, +0xfffffc1ffffffc1f, +0xfffff83ffffff83f, +0xfffff07ffffff07f, +0xffffe0ffffffe0ff, +0xffffc1ffffffc1ff, +0xffff83ffffff83ff, +0xffff07ffffff07ff, +0xfffe0ffffffe0fff, +0xfffc1ffffffc1fff, +0xfff83ffffff83fff, +0xfff07ffffff07fff, +0xffe0ffffffe0ffff, +0xffc1ffffffc1ffff, +0xff83ffffff83ffff, +0xff07ffffff07ffff, +0xfe0ffffffe0fffff, +0xfc1ffffffc1fffff, +0xf83ffffff83fffff, +0xf07ffffff07fffff, +0xe0ffffffe0ffffff, +0xc1ffffffc1ffffff, +0x83ffffff83ffffff, +0x0fffffff0fffffff, +0x1ffffffe1ffffffe, +0x3ffffffc3ffffffc, +0x7ffffff87ffffff8, +0xfffffff0fffffff0, +0xffffffe1ffffffe1, +0xffffffc3ffffffc3, +0xffffff87ffffff87, +0xffffff0fffffff0f, +0xfffffe1ffffffe1f, +0xfffffc3ffffffc3f, +0xfffff87ffffff87f, +0xfffff0fffffff0ff, +0xffffe1ffffffe1ff, +0xffffc3ffffffc3ff, +0xffff87ffffff87ff, +0xffff0fffffff0fff, +0xfffe1ffffffe1fff, +0xfffc3ffffffc3fff, +0xfff87ffffff87fff, +0xfff0fffffff0ffff, +0xffe1ffffffe1ffff, +0xffc3ffffffc3ffff, +0xff87ffffff87ffff, +0xff0fffffff0fffff, +0xfe1ffffffe1fffff, +0xfc3ffffffc3fffff, +0xf87ffffff87fffff, +0xf0fffffff0ffffff, +0xe1ffffffe1ffffff, +0xc3ffffffc3ffffff, +0x87ffffff87ffffff, +0x1fffffff1fffffff, +0x3ffffffe3ffffffe, +0x7ffffffc7ffffffc, +0xfffffff8fffffff8, +0xfffffff1fffffff1, +0xffffffe3ffffffe3, +0xffffffc7ffffffc7, +0xffffff8fffffff8f, +0xffffff1fffffff1f, +0xfffffe3ffffffe3f, +0xfffffc7ffffffc7f, +0xfffff8fffffff8ff, +0xfffff1fffffff1ff, +0xffffe3ffffffe3ff, +0xffffc7ffffffc7ff, +0xffff8fffffff8fff, +0xffff1fffffff1fff, +0xfffe3ffffffe3fff, +0xfffc7ffffffc7fff, +0xfff8fffffff8ffff, +0xfff1fffffff1ffff, +0xffe3ffffffe3ffff, +0xffc7ffffffc7ffff, +0xff8fffffff8fffff, +0xff1fffffff1fffff, +0xfe3ffffffe3fffff, +0xfc7ffffffc7fffff, +0xf8fffffff8ffffff, +0xf1fffffff1ffffff, +0xe3ffffffe3ffffff, +0xc7ffffffc7ffffff, +0x8fffffff8fffffff, +0x3fffffff3fffffff, +0x7ffffffe7ffffffe, +0xfffffffcfffffffc, +0xfffffff9fffffff9, +0xfffffff3fffffff3, +0xffffffe7ffffffe7, +0xffffffcfffffffcf, +0xffffff9fffffff9f, +0xffffff3fffffff3f, +0xfffffe7ffffffe7f, +0xfffffcfffffffcff, +0xfffff9fffffff9ff, +0xfffff3fffffff3ff, +0xffffe7ffffffe7ff, +0xffffcfffffffcfff, +0xffff9fffffff9fff, +0xffff3fffffff3fff, +0xfffe7ffffffe7fff, +0xfffcfffffffcffff, +0xfff9fffffff9ffff, +0xfff3fffffff3ffff, +0xffe7ffffffe7ffff, +0xffcfffffffcfffff, +0xff9fffffff9fffff, +0xff3fffffff3fffff, +0xfe7ffffffe7fffff, +0xfcfffffffcffffff, +0xf9fffffff9ffffff, +0xf3fffffff3ffffff, +0xe7ffffffe7ffffff, +0xcfffffffcfffffff, +0x9fffffff9fffffff, +0x7fffffff7fffffff, +0xfffffffefffffffe, +0xfffffffdfffffffd, +0xfffffffbfffffffb, +0xfffffff7fffffff7, +0xffffffefffffffef, +0xffffffdfffffffdf, +0xffffffbfffffffbf, +0xffffff7fffffff7f, +0xfffffefffffffeff, +0xfffffdfffffffdff, +0xfffffbfffffffbff, +0xfffff7fffffff7ff, +0xffffefffffffefff, +0xffffdfffffffdfff, +0xffffbfffffffbfff, +0xffff7fffffff7fff, +0xfffefffffffeffff, +0xfffdfffffffdffff, +0xfffbfffffffbffff, +0xfff7fffffff7ffff, +0xffefffffffefffff, +0xffdfffffffdfffff, +0xffbfffffffbfffff, +0xff7fffffff7fffff, +0xfefffffffeffffff, +0xfdfffffffdffffff, +0xfbfffffffbffffff, +0xf7fffffff7ffffff, +0xefffffffefffffff, +0xdfffffffdfffffff, +0xbfffffffbfffffff, +0x0000000000000001, +0x0000000000000002, +0x0000000000000004, +0x0000000000000008, +0x0000000000000010, +0x0000000000000020, +0x0000000000000040, +0x0000000000000080, +0x0000000000000100, +0x0000000000000200, +0x0000000000000400, +0x0000000000000800, +0x0000000000001000, +0x0000000000002000, +0x0000000000004000, +0x0000000000008000, +0x0000000000010000, +0x0000000000020000, +0x0000000000040000, +0x0000000000080000, +0x0000000000100000, +0x0000000000200000, +0x0000000000400000, +0x0000000000800000, +0x0000000001000000, +0x0000000002000000, +0x0000000004000000, +0x0000000008000000, +0x0000000010000000, +0x0000000020000000, +0x0000000040000000, +0x0000000080000000, +0x0000000100000000, +0x0000000200000000, +0x0000000400000000, +0x0000000800000000, +0x0000001000000000, +0x0000002000000000, +0x0000004000000000, +0x0000008000000000, +0x0000010000000000, +0x0000020000000000, +0x0000040000000000, +0x0000080000000000, +0x0000100000000000, +0x0000200000000000, +0x0000400000000000, +0x0000800000000000, +0x0001000000000000, +0x0002000000000000, +0x0004000000000000, +0x0008000000000000, +0x0010000000000000, +0x0020000000000000, +0x0040000000000000, +0x0080000000000000, +0x0100000000000000, +0x0200000000000000, +0x0400000000000000, +0x0800000000000000, +0x1000000000000000, +0x2000000000000000, +0x4000000000000000, +0x8000000000000000, +0x0000000000000003, +0x0000000000000006, +0x000000000000000c, +0x0000000000000018, +0x0000000000000030, +0x0000000000000060, +0x00000000000000c0, +0x0000000000000180, +0x0000000000000300, +0x0000000000000600, +0x0000000000000c00, +0x0000000000001800, +0x0000000000003000, +0x0000000000006000, +0x000000000000c000, +0x0000000000018000, +0x0000000000030000, +0x0000000000060000, +0x00000000000c0000, +0x0000000000180000, +0x0000000000300000, +0x0000000000600000, +0x0000000000c00000, +0x0000000001800000, +0x0000000003000000, +0x0000000006000000, +0x000000000c000000, +0x0000000018000000, +0x0000000030000000, +0x0000000060000000, +0x00000000c0000000, +0x0000000180000000, +0x0000000300000000, +0x0000000600000000, +0x0000000c00000000, +0x0000001800000000, +0x0000003000000000, +0x0000006000000000, +0x000000c000000000, +0x0000018000000000, +0x0000030000000000, +0x0000060000000000, +0x00000c0000000000, +0x0000180000000000, +0x0000300000000000, +0x0000600000000000, +0x0000c00000000000, +0x0001800000000000, +0x0003000000000000, +0x0006000000000000, +0x000c000000000000, +0x0018000000000000, +0x0030000000000000, +0x0060000000000000, +0x00c0000000000000, +0x0180000000000000, +0x0300000000000000, +0x0600000000000000, +0x0c00000000000000, +0x1800000000000000, +0x3000000000000000, +0x6000000000000000, +0xc000000000000000, +0x8000000000000001, +0x0000000000000007, +0x000000000000000e, +0x000000000000001c, +0x0000000000000038, +0x0000000000000070, +0x00000000000000e0, +0x00000000000001c0, +0x0000000000000380, +0x0000000000000700, +0x0000000000000e00, +0x0000000000001c00, +0x0000000000003800, +0x0000000000007000, +0x000000000000e000, +0x000000000001c000, +0x0000000000038000, +0x0000000000070000, +0x00000000000e0000, +0x00000000001c0000, +0x0000000000380000, +0x0000000000700000, +0x0000000000e00000, +0x0000000001c00000, +0x0000000003800000, +0x0000000007000000, +0x000000000e000000, +0x000000001c000000, +0x0000000038000000, +0x0000000070000000, +0x00000000e0000000, +0x00000001c0000000, +0x0000000380000000, +0x0000000700000000, +0x0000000e00000000, +0x0000001c00000000, +0x0000003800000000, +0x0000007000000000, +0x000000e000000000, +0x000001c000000000, +0x0000038000000000, +0x0000070000000000, +0x00000e0000000000, +0x00001c0000000000, +0x0000380000000000, +0x0000700000000000, +0x0000e00000000000, +0x0001c00000000000, +0x0003800000000000, +0x0007000000000000, +0x000e000000000000, +0x001c000000000000, +0x0038000000000000, +0x0070000000000000, +0x00e0000000000000, +0x01c0000000000000, +0x0380000000000000, +0x0700000000000000, +0x0e00000000000000, +0x1c00000000000000, +0x3800000000000000, +0x7000000000000000, +0xe000000000000000, +0xc000000000000001, +0x8000000000000003, +0x000000000000000f, +0x000000000000001e, +0x000000000000003c, +0x0000000000000078, +0x00000000000000f0, +0x00000000000001e0, +0x00000000000003c0, +0x0000000000000780, +0x0000000000000f00, +0x0000000000001e00, +0x0000000000003c00, +0x0000000000007800, +0x000000000000f000, +0x000000000001e000, +0x000000000003c000, +0x0000000000078000, +0x00000000000f0000, +0x00000000001e0000, +0x00000000003c0000, +0x0000000000780000, +0x0000000000f00000, +0x0000000001e00000, +0x0000000003c00000, +0x0000000007800000, +0x000000000f000000, +0x000000001e000000, +0x000000003c000000, +0x0000000078000000, +0x00000000f0000000, +0x00000001e0000000, +0x00000003c0000000, +0x0000000780000000, +0x0000000f00000000, +0x0000001e00000000, +0x0000003c00000000, +0x0000007800000000, +0x000000f000000000, +0x000001e000000000, +0x000003c000000000, +0x0000078000000000, +0x00000f0000000000, +0x00001e0000000000, +0x00003c0000000000, +0x0000780000000000, +0x0000f00000000000, +0x0001e00000000000, +0x0003c00000000000, +0x0007800000000000, +0x000f000000000000, +0x001e000000000000, +0x003c000000000000, +0x0078000000000000, +0x00f0000000000000, +0x01e0000000000000, +0x03c0000000000000, +0x0780000000000000, +0x0f00000000000000, +0x1e00000000000000, +0x3c00000000000000, +0x7800000000000000, +0xf000000000000000, +0xe000000000000001, +0xc000000000000003, +0x8000000000000007, +0x000000000000001f, +0x000000000000003e, +0x000000000000007c, +0x00000000000000f8, +0x00000000000001f0, +0x00000000000003e0, +0x00000000000007c0, +0x0000000000000f80, +0x0000000000001f00, +0x0000000000003e00, +0x0000000000007c00, +0x000000000000f800, +0x000000000001f000, +0x000000000003e000, +0x000000000007c000, +0x00000000000f8000, +0x00000000001f0000, +0x00000000003e0000, +0x00000000007c0000, +0x0000000000f80000, +0x0000000001f00000, +0x0000000003e00000, +0x0000000007c00000, +0x000000000f800000, +0x000000001f000000, +0x000000003e000000, +0x000000007c000000, +0x00000000f8000000, +0x00000001f0000000, +0x00000003e0000000, +0x00000007c0000000, +0x0000000f80000000, +0x0000001f00000000, +0x0000003e00000000, +0x0000007c00000000, +0x000000f800000000, +0x000001f000000000, +0x000003e000000000, +0x000007c000000000, +0x00000f8000000000, +0x00001f0000000000, +0x00003e0000000000, +0x00007c0000000000, +0x0000f80000000000, +0x0001f00000000000, +0x0003e00000000000, +0x0007c00000000000, +0x000f800000000000, +0x001f000000000000, +0x003e000000000000, +0x007c000000000000, +0x00f8000000000000, +0x01f0000000000000, +0x03e0000000000000, +0x07c0000000000000, +0x0f80000000000000, +0x1f00000000000000, +0x3e00000000000000, +0x7c00000000000000, +0xf800000000000000, +0xf000000000000001, +0xe000000000000003, +0xc000000000000007, +0x800000000000000f, +0x000000000000003f, +0x000000000000007e, +0x00000000000000fc, +0x00000000000001f8, +0x00000000000003f0, +0x00000000000007e0, +0x0000000000000fc0, +0x0000000000001f80, +0x0000000000003f00, +0x0000000000007e00, +0x000000000000fc00, +0x000000000001f800, +0x000000000003f000, +0x000000000007e000, +0x00000000000fc000, +0x00000000001f8000, +0x00000000003f0000, +0x00000000007e0000, +0x0000000000fc0000, +0x0000000001f80000, +0x0000000003f00000, +0x0000000007e00000, +0x000000000fc00000, +0x000000001f800000, +0x000000003f000000, +0x000000007e000000, +0x00000000fc000000, +0x00000001f8000000, +0x00000003f0000000, +0x00000007e0000000, +0x0000000fc0000000, +0x0000001f80000000, +0x0000003f00000000, +0x0000007e00000000, +0x000000fc00000000, +0x000001f800000000, +0x000003f000000000, +0x000007e000000000, +0x00000fc000000000, +0x00001f8000000000, +0x00003f0000000000, +0x00007e0000000000, +0x0000fc0000000000, +0x0001f80000000000, +0x0003f00000000000, +0x0007e00000000000, +0x000fc00000000000, +0x001f800000000000, +0x003f000000000000, +0x007e000000000000, +0x00fc000000000000, +0x01f8000000000000, +0x03f0000000000000, +0x07e0000000000000, +0x0fc0000000000000, +0x1f80000000000000, +0x3f00000000000000, +0x7e00000000000000, +0xfc00000000000000, +0xf800000000000001, +0xf000000000000003, +0xe000000000000007, +0xc00000000000000f, +0x800000000000001f, +0x000000000000007f, +0x00000000000000fe, +0x00000000000001fc, +0x00000000000003f8, +0x00000000000007f0, +0x0000000000000fe0, +0x0000000000001fc0, +0x0000000000003f80, +0x0000000000007f00, +0x000000000000fe00, +0x000000000001fc00, +0x000000000003f800, +0x000000000007f000, +0x00000000000fe000, +0x00000000001fc000, +0x00000000003f8000, +0x00000000007f0000, +0x0000000000fe0000, +0x0000000001fc0000, +0x0000000003f80000, +0x0000000007f00000, +0x000000000fe00000, +0x000000001fc00000, +0x000000003f800000, +0x000000007f000000, +0x00000000fe000000, +0x00000001fc000000, +0x00000003f8000000, +0x00000007f0000000, +0x0000000fe0000000, +0x0000001fc0000000, +0x0000003f80000000, +0x0000007f00000000, +0x000000fe00000000, +0x000001fc00000000, +0x000003f800000000, +0x000007f000000000, +0x00000fe000000000, +0x00001fc000000000, +0x00003f8000000000, +0x00007f0000000000, +0x0000fe0000000000, +0x0001fc0000000000, +0x0003f80000000000, +0x0007f00000000000, +0x000fe00000000000, +0x001fc00000000000, +0x003f800000000000, +0x007f000000000000, +0x00fe000000000000, +0x01fc000000000000, +0x03f8000000000000, +0x07f0000000000000, +0x0fe0000000000000, +0x1fc0000000000000, +0x3f80000000000000, +0x7f00000000000000, +0xfe00000000000000, +0xfc00000000000001, +0xf800000000000003, +0xf000000000000007, +0xe00000000000000f, +0xc00000000000001f, +0x800000000000003f, +0x00000000000000ff, +0x00000000000001fe, +0x00000000000003fc, +0x00000000000007f8, +0x0000000000000ff0, +0x0000000000001fe0, +0x0000000000003fc0, +0x0000000000007f80, +0x000000000000ff00, +0x000000000001fe00, +0x000000000003fc00, +0x000000000007f800, +0x00000000000ff000, +0x00000000001fe000, +0x00000000003fc000, +0x00000000007f8000, +0x0000000000ff0000, +0x0000000001fe0000, +0x0000000003fc0000, +0x0000000007f80000, +0x000000000ff00000, +0x000000001fe00000, +0x000000003fc00000, +0x000000007f800000, +0x00000000ff000000, +0x00000001fe000000, +0x00000003fc000000, +0x00000007f8000000, +0x0000000ff0000000, +0x0000001fe0000000, +0x0000003fc0000000, +0x0000007f80000000, +0x000000ff00000000, +0x000001fe00000000, +0x000003fc00000000, +0x000007f800000000, +0x00000ff000000000, +0x00001fe000000000, +0x00003fc000000000, +0x00007f8000000000, +0x0000ff0000000000, +0x0001fe0000000000, +0x0003fc0000000000, +0x0007f80000000000, +0x000ff00000000000, +0x001fe00000000000, +0x003fc00000000000, +0x007f800000000000, +0x00ff000000000000, +0x01fe000000000000, +0x03fc000000000000, +0x07f8000000000000, +0x0ff0000000000000, +0x1fe0000000000000, +0x3fc0000000000000, +0x7f80000000000000, +0xff00000000000000, +0xfe00000000000001, +0xfc00000000000003, +0xf800000000000007, +0xf00000000000000f, +0xe00000000000001f, +0xc00000000000003f, +0x800000000000007f, +0x00000000000001ff, +0x00000000000003fe, +0x00000000000007fc, +0x0000000000000ff8, +0x0000000000001ff0, +0x0000000000003fe0, +0x0000000000007fc0, +0x000000000000ff80, +0x000000000001ff00, +0x000000000003fe00, +0x000000000007fc00, +0x00000000000ff800, +0x00000000001ff000, +0x00000000003fe000, +0x00000000007fc000, +0x0000000000ff8000, +0x0000000001ff0000, +0x0000000003fe0000, +0x0000000007fc0000, +0x000000000ff80000, +0x000000001ff00000, +0x000000003fe00000, +0x000000007fc00000, +0x00000000ff800000, +0x00000001ff000000, +0x00000003fe000000, +0x00000007fc000000, +0x0000000ff8000000, +0x0000001ff0000000, +0x0000003fe0000000, +0x0000007fc0000000, +0x000000ff80000000, +0x000001ff00000000, +0x000003fe00000000, +0x000007fc00000000, +0x00000ff800000000, +0x00001ff000000000, +0x00003fe000000000, +0x00007fc000000000, +0x0000ff8000000000, +0x0001ff0000000000, +0x0003fe0000000000, +0x0007fc0000000000, +0x000ff80000000000, +0x001ff00000000000, +0x003fe00000000000, +0x007fc00000000000, +0x00ff800000000000, +0x01ff000000000000, +0x03fe000000000000, +0x07fc000000000000, +0x0ff8000000000000, +0x1ff0000000000000, +0x3fe0000000000000, +0x7fc0000000000000, +0xff80000000000000, +0xff00000000000001, +0xfe00000000000003, +0xfc00000000000007, +0xf80000000000000f, +0xf00000000000001f, +0xe00000000000003f, +0xc00000000000007f, +0x80000000000000ff, +0x00000000000003ff, +0x00000000000007fe, +0x0000000000000ffc, +0x0000000000001ff8, +0x0000000000003ff0, +0x0000000000007fe0, +0x000000000000ffc0, +0x000000000001ff80, +0x000000000003ff00, +0x000000000007fe00, +0x00000000000ffc00, +0x00000000001ff800, +0x00000000003ff000, +0x00000000007fe000, +0x0000000000ffc000, +0x0000000001ff8000, +0x0000000003ff0000, +0x0000000007fe0000, +0x000000000ffc0000, +0x000000001ff80000, +0x000000003ff00000, +0x000000007fe00000, +0x00000000ffc00000, +0x00000001ff800000, +0x00000003ff000000, +0x00000007fe000000, +0x0000000ffc000000, +0x0000001ff8000000, +0x0000003ff0000000, +0x0000007fe0000000, +0x000000ffc0000000, +0x000001ff80000000, +0x000003ff00000000, +0x000007fe00000000, +0x00000ffc00000000, +0x00001ff800000000, +0x00003ff000000000, +0x00007fe000000000, +0x0000ffc000000000, +0x0001ff8000000000, +0x0003ff0000000000, +0x0007fe0000000000, +0x000ffc0000000000, +0x001ff80000000000, +0x003ff00000000000, +0x007fe00000000000, +0x00ffc00000000000, +0x01ff800000000000, +0x03ff000000000000, +0x07fe000000000000, +0x0ffc000000000000, +0x1ff8000000000000, +0x3ff0000000000000, +0x7fe0000000000000, +0xffc0000000000000, +0xff80000000000001, +0xff00000000000003, +0xfe00000000000007, +0xfc0000000000000f, +0xf80000000000001f, +0xf00000000000003f, +0xe00000000000007f, +0xc0000000000000ff, +0x80000000000001ff, +0x00000000000007ff, +0x0000000000000ffe, +0x0000000000001ffc, +0x0000000000003ff8, +0x0000000000007ff0, +0x000000000000ffe0, +0x000000000001ffc0, +0x000000000003ff80, +0x000000000007ff00, +0x00000000000ffe00, +0x00000000001ffc00, +0x00000000003ff800, +0x00000000007ff000, +0x0000000000ffe000, +0x0000000001ffc000, +0x0000000003ff8000, +0x0000000007ff0000, +0x000000000ffe0000, +0x000000001ffc0000, +0x000000003ff80000, +0x000000007ff00000, +0x00000000ffe00000, +0x00000001ffc00000, +0x00000003ff800000, +0x00000007ff000000, +0x0000000ffe000000, +0x0000001ffc000000, +0x0000003ff8000000, +0x0000007ff0000000, +0x000000ffe0000000, +0x000001ffc0000000, +0x000003ff80000000, +0x000007ff00000000, +0x00000ffe00000000, +0x00001ffc00000000, +0x00003ff800000000, +0x00007ff000000000, +0x0000ffe000000000, +0x0001ffc000000000, +0x0003ff8000000000, +0x0007ff0000000000, +0x000ffe0000000000, +0x001ffc0000000000, +0x003ff80000000000, +0x007ff00000000000, +0x00ffe00000000000, +0x01ffc00000000000, +0x03ff800000000000, +0x07ff000000000000, +0x0ffe000000000000, +0x1ffc000000000000, +0x3ff8000000000000, +0x7ff0000000000000, +0xffe0000000000000, +0xffc0000000000001, +0xff80000000000003, +0xff00000000000007, +0xfe0000000000000f, +0xfc0000000000001f, +0xf80000000000003f, +0xf00000000000007f, +0xe0000000000000ff, +0xc0000000000001ff, +0x80000000000003ff, +0x0000000000000fff, +0x0000000000001ffe, +0x0000000000003ffc, +0x0000000000007ff8, +0x000000000000fff0, +0x000000000001ffe0, +0x000000000003ffc0, +0x000000000007ff80, +0x00000000000fff00, +0x00000000001ffe00, +0x00000000003ffc00, +0x00000000007ff800, +0x0000000000fff000, +0x0000000001ffe000, +0x0000000003ffc000, +0x0000000007ff8000, +0x000000000fff0000, +0x000000001ffe0000, +0x000000003ffc0000, +0x000000007ff80000, +0x00000000fff00000, +0x00000001ffe00000, +0x00000003ffc00000, +0x00000007ff800000, +0x0000000fff000000, +0x0000001ffe000000, +0x0000003ffc000000, +0x0000007ff8000000, +0x000000fff0000000, +0x000001ffe0000000, +0x000003ffc0000000, +0x000007ff80000000, +0x00000fff00000000, +0x00001ffe00000000, +0x00003ffc00000000, +0x00007ff800000000, +0x0000fff000000000, +0x0001ffe000000000, +0x0003ffc000000000, +0x0007ff8000000000, +0x000fff0000000000, +0x001ffe0000000000, +0x003ffc0000000000, +0x007ff80000000000, +0x00fff00000000000, +0x01ffe00000000000, +0x03ffc00000000000, +0x07ff800000000000, +0x0fff000000000000, +0x1ffe000000000000, +0x3ffc000000000000, +0x7ff8000000000000, +0xfff0000000000000, +0xffe0000000000001, +0xffc0000000000003, +0xff80000000000007, +0xff0000000000000f, +0xfe0000000000001f, +0xfc0000000000003f, +0xf80000000000007f, +0xf0000000000000ff, +0xe0000000000001ff, +0xc0000000000003ff, +0x80000000000007ff, +0x0000000000001fff, +0x0000000000003ffe, +0x0000000000007ffc, +0x000000000000fff8, +0x000000000001fff0, +0x000000000003ffe0, +0x000000000007ffc0, +0x00000000000fff80, +0x00000000001fff00, +0x00000000003ffe00, +0x00000000007ffc00, +0x0000000000fff800, +0x0000000001fff000, +0x0000000003ffe000, +0x0000000007ffc000, +0x000000000fff8000, +0x000000001fff0000, +0x000000003ffe0000, +0x000000007ffc0000, +0x00000000fff80000, +0x00000001fff00000, +0x00000003ffe00000, +0x00000007ffc00000, +0x0000000fff800000, +0x0000001fff000000, +0x0000003ffe000000, +0x0000007ffc000000, +0x000000fff8000000, +0x000001fff0000000, +0x000003ffe0000000, +0x000007ffc0000000, +0x00000fff80000000, +0x00001fff00000000, +0x00003ffe00000000, +0x00007ffc00000000, +0x0000fff800000000, +0x0001fff000000000, +0x0003ffe000000000, +0x0007ffc000000000, +0x000fff8000000000, +0x001fff0000000000, +0x003ffe0000000000, +0x007ffc0000000000, +0x00fff80000000000, +0x01fff00000000000, +0x03ffe00000000000, +0x07ffc00000000000, +0x0fff800000000000, +0x1fff000000000000, +0x3ffe000000000000, +0x7ffc000000000000, +0xfff8000000000000, +0xfff0000000000001, +0xffe0000000000003, +0xffc0000000000007, +0xff8000000000000f, +0xff0000000000001f, +0xfe0000000000003f, +0xfc0000000000007f, +0xf8000000000000ff, +0xf0000000000001ff, +0xe0000000000003ff, +0xc0000000000007ff, +0x8000000000000fff, +0x0000000000003fff, +0x0000000000007ffe, +0x000000000000fffc, +0x000000000001fff8, +0x000000000003fff0, +0x000000000007ffe0, +0x00000000000fffc0, +0x00000000001fff80, +0x00000000003fff00, +0x00000000007ffe00, +0x0000000000fffc00, +0x0000000001fff800, +0x0000000003fff000, +0x0000000007ffe000, +0x000000000fffc000, +0x000000001fff8000, +0x000000003fff0000, +0x000000007ffe0000, +0x00000000fffc0000, +0x00000001fff80000, +0x00000003fff00000, +0x00000007ffe00000, +0x0000000fffc00000, +0x0000001fff800000, +0x0000003fff000000, +0x0000007ffe000000, +0x000000fffc000000, +0x000001fff8000000, +0x000003fff0000000, +0x000007ffe0000000, +0x00000fffc0000000, +0x00001fff80000000, +0x00003fff00000000, +0x00007ffe00000000, +0x0000fffc00000000, +0x0001fff800000000, +0x0003fff000000000, +0x0007ffe000000000, +0x000fffc000000000, +0x001fff8000000000, +0x003fff0000000000, +0x007ffe0000000000, +0x00fffc0000000000, +0x01fff80000000000, +0x03fff00000000000, +0x07ffe00000000000, +0x0fffc00000000000, +0x1fff800000000000, +0x3fff000000000000, +0x7ffe000000000000, +0xfffc000000000000, +0xfff8000000000001, +0xfff0000000000003, +0xffe0000000000007, +0xffc000000000000f, +0xff8000000000001f, +0xff0000000000003f, +0xfe0000000000007f, +0xfc000000000000ff, +0xf8000000000001ff, +0xf0000000000003ff, +0xe0000000000007ff, +0xc000000000000fff, +0x8000000000001fff, +0x0000000000007fff, +0x000000000000fffe, +0x000000000001fffc, +0x000000000003fff8, +0x000000000007fff0, +0x00000000000fffe0, +0x00000000001fffc0, +0x00000000003fff80, +0x00000000007fff00, +0x0000000000fffe00, +0x0000000001fffc00, +0x0000000003fff800, +0x0000000007fff000, +0x000000000fffe000, +0x000000001fffc000, +0x000000003fff8000, +0x000000007fff0000, +0x00000000fffe0000, +0x00000001fffc0000, +0x00000003fff80000, +0x00000007fff00000, +0x0000000fffe00000, +0x0000001fffc00000, +0x0000003fff800000, +0x0000007fff000000, +0x000000fffe000000, +0x000001fffc000000, +0x000003fff8000000, +0x000007fff0000000, +0x00000fffe0000000, +0x00001fffc0000000, +0x00003fff80000000, +0x00007fff00000000, +0x0000fffe00000000, +0x0001fffc00000000, +0x0003fff800000000, +0x0007fff000000000, +0x000fffe000000000, +0x001fffc000000000, +0x003fff8000000000, +0x007fff0000000000, +0x00fffe0000000000, +0x01fffc0000000000, +0x03fff80000000000, +0x07fff00000000000, +0x0fffe00000000000, +0x1fffc00000000000, +0x3fff800000000000, +0x7fff000000000000, +0xfffe000000000000, +0xfffc000000000001, +0xfff8000000000003, +0xfff0000000000007, +0xffe000000000000f, +0xffc000000000001f, +0xff8000000000003f, +0xff0000000000007f, +0xfe000000000000ff, +0xfc000000000001ff, +0xf8000000000003ff, +0xf0000000000007ff, +0xe000000000000fff, +0xc000000000001fff, +0x8000000000003fff, +0x000000000000ffff, +0x000000000001fffe, +0x000000000003fffc, +0x000000000007fff8, +0x00000000000ffff0, +0x00000000001fffe0, +0x00000000003fffc0, +0x00000000007fff80, +0x0000000000ffff00, +0x0000000001fffe00, +0x0000000003fffc00, +0x0000000007fff800, +0x000000000ffff000, +0x000000001fffe000, +0x000000003fffc000, +0x000000007fff8000, +0x00000000ffff0000, +0x00000001fffe0000, +0x00000003fffc0000, +0x00000007fff80000, +0x0000000ffff00000, +0x0000001fffe00000, +0x0000003fffc00000, +0x0000007fff800000, +0x000000ffff000000, +0x000001fffe000000, +0x000003fffc000000, +0x000007fff8000000, +0x00000ffff0000000, +0x00001fffe0000000, +0x00003fffc0000000, +0x00007fff80000000, +0x0000ffff00000000, +0x0001fffe00000000, +0x0003fffc00000000, +0x0007fff800000000, +0x000ffff000000000, +0x001fffe000000000, +0x003fffc000000000, +0x007fff8000000000, +0x00ffff0000000000, +0x01fffe0000000000, +0x03fffc0000000000, +0x07fff80000000000, +0x0ffff00000000000, +0x1fffe00000000000, +0x3fffc00000000000, +0x7fff800000000000, +0xffff000000000000, +0xfffe000000000001, +0xfffc000000000003, +0xfff8000000000007, +0xfff000000000000f, +0xffe000000000001f, +0xffc000000000003f, +0xff8000000000007f, +0xff000000000000ff, +0xfe000000000001ff, +0xfc000000000003ff, +0xf8000000000007ff, +0xf000000000000fff, +0xe000000000001fff, +0xc000000000003fff, +0x8000000000007fff, +0x000000000001ffff, +0x000000000003fffe, +0x000000000007fffc, +0x00000000000ffff8, +0x00000000001ffff0, +0x00000000003fffe0, +0x00000000007fffc0, +0x0000000000ffff80, +0x0000000001ffff00, +0x0000000003fffe00, +0x0000000007fffc00, +0x000000000ffff800, +0x000000001ffff000, +0x000000003fffe000, +0x000000007fffc000, +0x00000000ffff8000, +0x00000001ffff0000, +0x00000003fffe0000, +0x00000007fffc0000, +0x0000000ffff80000, +0x0000001ffff00000, +0x0000003fffe00000, +0x0000007fffc00000, +0x000000ffff800000, +0x000001ffff000000, +0x000003fffe000000, +0x000007fffc000000, +0x00000ffff8000000, +0x00001ffff0000000, +0x00003fffe0000000, +0x00007fffc0000000, +0x0000ffff80000000, +0x0001ffff00000000, +0x0003fffe00000000, +0x0007fffc00000000, +0x000ffff800000000, +0x001ffff000000000, +0x003fffe000000000, +0x007fffc000000000, +0x00ffff8000000000, +0x01ffff0000000000, +0x03fffe0000000000, +0x07fffc0000000000, +0x0ffff80000000000, +0x1ffff00000000000, +0x3fffe00000000000, +0x7fffc00000000000, +0xffff800000000000, +0xffff000000000001, +0xfffe000000000003, +0xfffc000000000007, +0xfff800000000000f, +0xfff000000000001f, +0xffe000000000003f, +0xffc000000000007f, +0xff800000000000ff, +0xff000000000001ff, +0xfe000000000003ff, +0xfc000000000007ff, +0xf800000000000fff, +0xf000000000001fff, +0xe000000000003fff, +0xc000000000007fff, +0x800000000000ffff, +0x000000000003ffff, +0x000000000007fffe, +0x00000000000ffffc, +0x00000000001ffff8, +0x00000000003ffff0, +0x00000000007fffe0, +0x0000000000ffffc0, +0x0000000001ffff80, +0x0000000003ffff00, +0x0000000007fffe00, +0x000000000ffffc00, +0x000000001ffff800, +0x000000003ffff000, +0x000000007fffe000, +0x00000000ffffc000, +0x00000001ffff8000, +0x00000003ffff0000, +0x00000007fffe0000, +0x0000000ffffc0000, +0x0000001ffff80000, +0x0000003ffff00000, +0x0000007fffe00000, +0x000000ffffc00000, +0x000001ffff800000, +0x000003ffff000000, +0x000007fffe000000, +0x00000ffffc000000, +0x00001ffff8000000, +0x00003ffff0000000, +0x00007fffe0000000, +0x0000ffffc0000000, +0x0001ffff80000000, +0x0003ffff00000000, +0x0007fffe00000000, +0x000ffffc00000000, +0x001ffff800000000, +0x003ffff000000000, +0x007fffe000000000, +0x00ffffc000000000, +0x01ffff8000000000, +0x03ffff0000000000, +0x07fffe0000000000, +0x0ffffc0000000000, +0x1ffff80000000000, +0x3ffff00000000000, +0x7fffe00000000000, +0xffffc00000000000, +0xffff800000000001, +0xffff000000000003, +0xfffe000000000007, +0xfffc00000000000f, +0xfff800000000001f, +0xfff000000000003f, +0xffe000000000007f, +0xffc00000000000ff, +0xff800000000001ff, +0xff000000000003ff, +0xfe000000000007ff, +0xfc00000000000fff, +0xf800000000001fff, +0xf000000000003fff, +0xe000000000007fff, +0xc00000000000ffff, +0x800000000001ffff, +0x000000000007ffff, +0x00000000000ffffe, +0x00000000001ffffc, +0x00000000003ffff8, +0x00000000007ffff0, +0x0000000000ffffe0, +0x0000000001ffffc0, +0x0000000003ffff80, +0x0000000007ffff00, +0x000000000ffffe00, +0x000000001ffffc00, +0x000000003ffff800, +0x000000007ffff000, +0x00000000ffffe000, +0x00000001ffffc000, +0x00000003ffff8000, +0x00000007ffff0000, +0x0000000ffffe0000, +0x0000001ffffc0000, +0x0000003ffff80000, +0x0000007ffff00000, +0x000000ffffe00000, +0x000001ffffc00000, +0x000003ffff800000, +0x000007ffff000000, +0x00000ffffe000000, +0x00001ffffc000000, +0x00003ffff8000000, +0x00007ffff0000000, +0x0000ffffe0000000, +0x0001ffffc0000000, +0x0003ffff80000000, +0x0007ffff00000000, +0x000ffffe00000000, +0x001ffffc00000000, +0x003ffff800000000, +0x007ffff000000000, +0x00ffffe000000000, +0x01ffffc000000000, +0x03ffff8000000000, +0x07ffff0000000000, +0x0ffffe0000000000, +0x1ffffc0000000000, +0x3ffff80000000000, +0x7ffff00000000000, +0xffffe00000000000, +0xffffc00000000001, +0xffff800000000003, +0xffff000000000007, +0xfffe00000000000f, +0xfffc00000000001f, +0xfff800000000003f, +0xfff000000000007f, +0xffe00000000000ff, +0xffc00000000001ff, +0xff800000000003ff, +0xff000000000007ff, +0xfe00000000000fff, +0xfc00000000001fff, +0xf800000000003fff, +0xf000000000007fff, +0xe00000000000ffff, +0xc00000000001ffff, +0x800000000003ffff, +0x00000000000fffff, +0x00000000001ffffe, +0x00000000003ffffc, +0x00000000007ffff8, +0x0000000000fffff0, +0x0000000001ffffe0, +0x0000000003ffffc0, +0x0000000007ffff80, +0x000000000fffff00, +0x000000001ffffe00, +0x000000003ffffc00, +0x000000007ffff800, +0x00000000fffff000, +0x00000001ffffe000, +0x00000003ffffc000, +0x00000007ffff8000, +0x0000000fffff0000, +0x0000001ffffe0000, +0x0000003ffffc0000, +0x0000007ffff80000, +0x000000fffff00000, +0x000001ffffe00000, +0x000003ffffc00000, +0x000007ffff800000, +0x00000fffff000000, +0x00001ffffe000000, +0x00003ffffc000000, +0x00007ffff8000000, +0x0000fffff0000000, +0x0001ffffe0000000, +0x0003ffffc0000000, +0x0007ffff80000000, +0x000fffff00000000, +0x001ffffe00000000, +0x003ffffc00000000, +0x007ffff800000000, +0x00fffff000000000, +0x01ffffe000000000, +0x03ffffc000000000, +0x07ffff8000000000, +0x0fffff0000000000, +0x1ffffe0000000000, +0x3ffffc0000000000, +0x7ffff80000000000, +0xfffff00000000000, +0xffffe00000000001, +0xffffc00000000003, +0xffff800000000007, +0xffff00000000000f, +0xfffe00000000001f, +0xfffc00000000003f, +0xfff800000000007f, +0xfff00000000000ff, +0xffe00000000001ff, +0xffc00000000003ff, +0xff800000000007ff, +0xff00000000000fff, +0xfe00000000001fff, +0xfc00000000003fff, +0xf800000000007fff, +0xf00000000000ffff, +0xe00000000001ffff, +0xc00000000003ffff, +0x800000000007ffff, +0x00000000001fffff, +0x00000000003ffffe, +0x00000000007ffffc, +0x0000000000fffff8, +0x0000000001fffff0, +0x0000000003ffffe0, +0x0000000007ffffc0, +0x000000000fffff80, +0x000000001fffff00, +0x000000003ffffe00, +0x000000007ffffc00, +0x00000000fffff800, +0x00000001fffff000, +0x00000003ffffe000, +0x00000007ffffc000, +0x0000000fffff8000, +0x0000001fffff0000, +0x0000003ffffe0000, +0x0000007ffffc0000, +0x000000fffff80000, +0x000001fffff00000, +0x000003ffffe00000, +0x000007ffffc00000, +0x00000fffff800000, +0x00001fffff000000, +0x00003ffffe000000, +0x00007ffffc000000, +0x0000fffff8000000, +0x0001fffff0000000, +0x0003ffffe0000000, +0x0007ffffc0000000, +0x000fffff80000000, +0x001fffff00000000, +0x003ffffe00000000, +0x007ffffc00000000, +0x00fffff800000000, +0x01fffff000000000, +0x03ffffe000000000, +0x07ffffc000000000, +0x0fffff8000000000, +0x1fffff0000000000, +0x3ffffe0000000000, +0x7ffffc0000000000, +0xfffff80000000000, +0xfffff00000000001, +0xffffe00000000003, +0xffffc00000000007, +0xffff80000000000f, +0xffff00000000001f, +0xfffe00000000003f, +0xfffc00000000007f, +0xfff80000000000ff, +0xfff00000000001ff, +0xffe00000000003ff, +0xffc00000000007ff, +0xff80000000000fff, +0xff00000000001fff, +0xfe00000000003fff, +0xfc00000000007fff, +0xf80000000000ffff, +0xf00000000001ffff, +0xe00000000003ffff, +0xc00000000007ffff, +0x80000000000fffff, +0x00000000003fffff, +0x00000000007ffffe, +0x0000000000fffffc, +0x0000000001fffff8, +0x0000000003fffff0, +0x0000000007ffffe0, +0x000000000fffffc0, +0x000000001fffff80, +0x000000003fffff00, +0x000000007ffffe00, +0x00000000fffffc00, +0x00000001fffff800, +0x00000003fffff000, +0x00000007ffffe000, +0x0000000fffffc000, +0x0000001fffff8000, +0x0000003fffff0000, +0x0000007ffffe0000, +0x000000fffffc0000, +0x000001fffff80000, +0x000003fffff00000, +0x000007ffffe00000, +0x00000fffffc00000, +0x00001fffff800000, +0x00003fffff000000, +0x00007ffffe000000, +0x0000fffffc000000, +0x0001fffff8000000, +0x0003fffff0000000, +0x0007ffffe0000000, +0x000fffffc0000000, +0x001fffff80000000, +0x003fffff00000000, +0x007ffffe00000000, +0x00fffffc00000000, +0x01fffff800000000, +0x03fffff000000000, +0x07ffffe000000000, +0x0fffffc000000000, +0x1fffff8000000000, +0x3fffff0000000000, +0x7ffffe0000000000, +0xfffffc0000000000, +0xfffff80000000001, +0xfffff00000000003, +0xffffe00000000007, +0xffffc0000000000f, +0xffff80000000001f, +0xffff00000000003f, +0xfffe00000000007f, +0xfffc0000000000ff, +0xfff80000000001ff, +0xfff00000000003ff, +0xffe00000000007ff, +0xffc0000000000fff, +0xff80000000001fff, +0xff00000000003fff, +0xfe00000000007fff, +0xfc0000000000ffff, +0xf80000000001ffff, +0xf00000000003ffff, +0xe00000000007ffff, +0xc0000000000fffff, +0x80000000001fffff, +0x00000000007fffff, +0x0000000000fffffe, +0x0000000001fffffc, +0x0000000003fffff8, +0x0000000007fffff0, +0x000000000fffffe0, +0x000000001fffffc0, +0x000000003fffff80, +0x000000007fffff00, +0x00000000fffffe00, +0x00000001fffffc00, +0x00000003fffff800, +0x00000007fffff000, +0x0000000fffffe000, +0x0000001fffffc000, +0x0000003fffff8000, +0x0000007fffff0000, +0x000000fffffe0000, +0x000001fffffc0000, +0x000003fffff80000, +0x000007fffff00000, +0x00000fffffe00000, +0x00001fffffc00000, +0x00003fffff800000, +0x00007fffff000000, +0x0000fffffe000000, +0x0001fffffc000000, +0x0003fffff8000000, +0x0007fffff0000000, +0x000fffffe0000000, +0x001fffffc0000000, +0x003fffff80000000, +0x007fffff00000000, +0x00fffffe00000000, +0x01fffffc00000000, +0x03fffff800000000, +0x07fffff000000000, +0x0fffffe000000000, +0x1fffffc000000000, +0x3fffff8000000000, +0x7fffff0000000000, +0xfffffe0000000000, +0xfffffc0000000001, +0xfffff80000000003, +0xfffff00000000007, +0xffffe0000000000f, +0xffffc0000000001f, +0xffff80000000003f, +0xffff00000000007f, +0xfffe0000000000ff, +0xfffc0000000001ff, +0xfff80000000003ff, +0xfff00000000007ff, +0xffe0000000000fff, +0xffc0000000001fff, +0xff80000000003fff, +0xff00000000007fff, +0xfe0000000000ffff, +0xfc0000000001ffff, +0xf80000000003ffff, +0xf00000000007ffff, +0xe0000000000fffff, +0xc0000000001fffff, +0x80000000003fffff, +0x0000000000ffffff, +0x0000000001fffffe, +0x0000000003fffffc, +0x0000000007fffff8, +0x000000000ffffff0, +0x000000001fffffe0, +0x000000003fffffc0, +0x000000007fffff80, +0x00000000ffffff00, +0x00000001fffffe00, +0x00000003fffffc00, +0x00000007fffff800, +0x0000000ffffff000, +0x0000001fffffe000, +0x0000003fffffc000, +0x0000007fffff8000, +0x000000ffffff0000, +0x000001fffffe0000, +0x000003fffffc0000, +0x000007fffff80000, +0x00000ffffff00000, +0x00001fffffe00000, +0x00003fffffc00000, +0x00007fffff800000, +0x0000ffffff000000, +0x0001fffffe000000, +0x0003fffffc000000, +0x0007fffff8000000, +0x000ffffff0000000, +0x001fffffe0000000, +0x003fffffc0000000, +0x007fffff80000000, +0x00ffffff00000000, +0x01fffffe00000000, +0x03fffffc00000000, +0x07fffff800000000, +0x0ffffff000000000, +0x1fffffe000000000, +0x3fffffc000000000, +0x7fffff8000000000, +0xffffff0000000000, +0xfffffe0000000001, +0xfffffc0000000003, +0xfffff80000000007, +0xfffff0000000000f, +0xffffe0000000001f, +0xffffc0000000003f, +0xffff80000000007f, +0xffff0000000000ff, +0xfffe0000000001ff, +0xfffc0000000003ff, +0xfff80000000007ff, +0xfff0000000000fff, +0xffe0000000001fff, +0xffc0000000003fff, +0xff80000000007fff, +0xff0000000000ffff, +0xfe0000000001ffff, +0xfc0000000003ffff, +0xf80000000007ffff, +0xf0000000000fffff, +0xe0000000001fffff, +0xc0000000003fffff, +0x80000000007fffff, +0x0000000001ffffff, +0x0000000003fffffe, +0x0000000007fffffc, +0x000000000ffffff8, +0x000000001ffffff0, +0x000000003fffffe0, +0x000000007fffffc0, +0x00000000ffffff80, +0x00000001ffffff00, +0x00000003fffffe00, +0x00000007fffffc00, +0x0000000ffffff800, +0x0000001ffffff000, +0x0000003fffffe000, +0x0000007fffffc000, +0x000000ffffff8000, +0x000001ffffff0000, +0x000003fffffe0000, +0x000007fffffc0000, +0x00000ffffff80000, +0x00001ffffff00000, +0x00003fffffe00000, +0x00007fffffc00000, +0x0000ffffff800000, +0x0001ffffff000000, +0x0003fffffe000000, +0x0007fffffc000000, +0x000ffffff8000000, +0x001ffffff0000000, +0x003fffffe0000000, +0x007fffffc0000000, +0x00ffffff80000000, +0x01ffffff00000000, +0x03fffffe00000000, +0x07fffffc00000000, +0x0ffffff800000000, +0x1ffffff000000000, +0x3fffffe000000000, +0x7fffffc000000000, +0xffffff8000000000, +0xffffff0000000001, +0xfffffe0000000003, +0xfffffc0000000007, +0xfffff8000000000f, +0xfffff0000000001f, +0xffffe0000000003f, +0xffffc0000000007f, +0xffff8000000000ff, +0xffff0000000001ff, +0xfffe0000000003ff, +0xfffc0000000007ff, +0xfff8000000000fff, +0xfff0000000001fff, +0xffe0000000003fff, +0xffc0000000007fff, +0xff8000000000ffff, +0xff0000000001ffff, +0xfe0000000003ffff, +0xfc0000000007ffff, +0xf8000000000fffff, +0xf0000000001fffff, +0xe0000000003fffff, +0xc0000000007fffff, +0x8000000000ffffff, +0x0000000003ffffff, +0x0000000007fffffe, +0x000000000ffffffc, +0x000000001ffffff8, +0x000000003ffffff0, +0x000000007fffffe0, +0x00000000ffffffc0, +0x00000001ffffff80, +0x00000003ffffff00, +0x00000007fffffe00, +0x0000000ffffffc00, +0x0000001ffffff800, +0x0000003ffffff000, +0x0000007fffffe000, +0x000000ffffffc000, +0x000001ffffff8000, +0x000003ffffff0000, +0x000007fffffe0000, +0x00000ffffffc0000, +0x00001ffffff80000, +0x00003ffffff00000, +0x00007fffffe00000, +0x0000ffffffc00000, +0x0001ffffff800000, +0x0003ffffff000000, +0x0007fffffe000000, +0x000ffffffc000000, +0x001ffffff8000000, +0x003ffffff0000000, +0x007fffffe0000000, +0x00ffffffc0000000, +0x01ffffff80000000, +0x03ffffff00000000, +0x07fffffe00000000, +0x0ffffffc00000000, +0x1ffffff800000000, +0x3ffffff000000000, +0x7fffffe000000000, +0xffffffc000000000, +0xffffff8000000001, +0xffffff0000000003, +0xfffffe0000000007, +0xfffffc000000000f, +0xfffff8000000001f, +0xfffff0000000003f, +0xffffe0000000007f, +0xffffc000000000ff, +0xffff8000000001ff, +0xffff0000000003ff, +0xfffe0000000007ff, +0xfffc000000000fff, +0xfff8000000001fff, +0xfff0000000003fff, +0xffe0000000007fff, +0xffc000000000ffff, +0xff8000000001ffff, +0xff0000000003ffff, +0xfe0000000007ffff, +0xfc000000000fffff, +0xf8000000001fffff, +0xf0000000003fffff, +0xe0000000007fffff, +0xc000000000ffffff, +0x8000000001ffffff, +0x0000000007ffffff, +0x000000000ffffffe, +0x000000001ffffffc, +0x000000003ffffff8, +0x000000007ffffff0, +0x00000000ffffffe0, +0x00000001ffffffc0, +0x00000003ffffff80, +0x00000007ffffff00, +0x0000000ffffffe00, +0x0000001ffffffc00, +0x0000003ffffff800, +0x0000007ffffff000, +0x000000ffffffe000, +0x000001ffffffc000, +0x000003ffffff8000, +0x000007ffffff0000, +0x00000ffffffe0000, +0x00001ffffffc0000, +0x00003ffffff80000, +0x00007ffffff00000, +0x0000ffffffe00000, +0x0001ffffffc00000, +0x0003ffffff800000, +0x0007ffffff000000, +0x000ffffffe000000, +0x001ffffffc000000, +0x003ffffff8000000, +0x007ffffff0000000, +0x00ffffffe0000000, +0x01ffffffc0000000, +0x03ffffff80000000, +0x07ffffff00000000, +0x0ffffffe00000000, +0x1ffffffc00000000, +0x3ffffff800000000, +0x7ffffff000000000, +0xffffffe000000000, +0xffffffc000000001, +0xffffff8000000003, +0xffffff0000000007, +0xfffffe000000000f, +0xfffffc000000001f, +0xfffff8000000003f, +0xfffff0000000007f, +0xffffe000000000ff, +0xffffc000000001ff, +0xffff8000000003ff, +0xffff0000000007ff, +0xfffe000000000fff, +0xfffc000000001fff, +0xfff8000000003fff, +0xfff0000000007fff, +0xffe000000000ffff, +0xffc000000001ffff, +0xff8000000003ffff, +0xff0000000007ffff, +0xfe000000000fffff, +0xfc000000001fffff, +0xf8000000003fffff, +0xf0000000007fffff, +0xe000000000ffffff, +0xc000000001ffffff, +0x8000000003ffffff, +0x000000000fffffff, +0x000000001ffffffe, +0x000000003ffffffc, +0x000000007ffffff8, +0x00000000fffffff0, +0x00000001ffffffe0, +0x00000003ffffffc0, +0x00000007ffffff80, +0x0000000fffffff00, +0x0000001ffffffe00, +0x0000003ffffffc00, +0x0000007ffffff800, +0x000000fffffff000, +0x000001ffffffe000, +0x000003ffffffc000, +0x000007ffffff8000, +0x00000fffffff0000, +0x00001ffffffe0000, +0x00003ffffffc0000, +0x00007ffffff80000, +0x0000fffffff00000, +0x0001ffffffe00000, +0x0003ffffffc00000, +0x0007ffffff800000, +0x000fffffff000000, +0x001ffffffe000000, +0x003ffffffc000000, +0x007ffffff8000000, +0x00fffffff0000000, +0x01ffffffe0000000, +0x03ffffffc0000000, +0x07ffffff80000000, +0x0fffffff00000000, +0x1ffffffe00000000, +0x3ffffffc00000000, +0x7ffffff800000000, +0xfffffff000000000, +0xffffffe000000001, +0xffffffc000000003, +0xffffff8000000007, +0xffffff000000000f, +0xfffffe000000001f, +0xfffffc000000003f, +0xfffff8000000007f, +0xfffff000000000ff, +0xffffe000000001ff, +0xffffc000000003ff, +0xffff8000000007ff, +0xffff000000000fff, +0xfffe000000001fff, +0xfffc000000003fff, +0xfff8000000007fff, +0xfff000000000ffff, +0xffe000000001ffff, +0xffc000000003ffff, +0xff8000000007ffff, +0xff000000000fffff, +0xfe000000001fffff, +0xfc000000003fffff, +0xf8000000007fffff, +0xf000000000ffffff, +0xe000000001ffffff, +0xc000000003ffffff, +0x8000000007ffffff, +0x000000001fffffff, +0x000000003ffffffe, +0x000000007ffffffc, +0x00000000fffffff8, +0x00000001fffffff0, +0x00000003ffffffe0, +0x00000007ffffffc0, +0x0000000fffffff80, +0x0000001fffffff00, +0x0000003ffffffe00, +0x0000007ffffffc00, +0x000000fffffff800, +0x000001fffffff000, +0x000003ffffffe000, +0x000007ffffffc000, +0x00000fffffff8000, +0x00001fffffff0000, +0x00003ffffffe0000, +0x00007ffffffc0000, +0x0000fffffff80000, +0x0001fffffff00000, +0x0003ffffffe00000, +0x0007ffffffc00000, +0x000fffffff800000, +0x001fffffff000000, +0x003ffffffe000000, +0x007ffffffc000000, +0x00fffffff8000000, +0x01fffffff0000000, +0x03ffffffe0000000, +0x07ffffffc0000000, +0x0fffffff80000000, +0x1fffffff00000000, +0x3ffffffe00000000, +0x7ffffffc00000000, +0xfffffff800000000, +0xfffffff000000001, +0xffffffe000000003, +0xffffffc000000007, +0xffffff800000000f, +0xffffff000000001f, +0xfffffe000000003f, +0xfffffc000000007f, +0xfffff800000000ff, +0xfffff000000001ff, +0xffffe000000003ff, +0xffffc000000007ff, +0xffff800000000fff, +0xffff000000001fff, +0xfffe000000003fff, +0xfffc000000007fff, +0xfff800000000ffff, +0xfff000000001ffff, +0xffe000000003ffff, +0xffc000000007ffff, +0xff800000000fffff, +0xff000000001fffff, +0xfe000000003fffff, +0xfc000000007fffff, +0xf800000000ffffff, +0xf000000001ffffff, +0xe000000003ffffff, +0xc000000007ffffff, +0x800000000fffffff, +0x000000003fffffff, +0x000000007ffffffe, +0x00000000fffffffc, +0x00000001fffffff8, +0x00000003fffffff0, +0x00000007ffffffe0, +0x0000000fffffffc0, +0x0000001fffffff80, +0x0000003fffffff00, +0x0000007ffffffe00, +0x000000fffffffc00, +0x000001fffffff800, +0x000003fffffff000, +0x000007ffffffe000, +0x00000fffffffc000, +0x00001fffffff8000, +0x00003fffffff0000, +0x00007ffffffe0000, +0x0000fffffffc0000, +0x0001fffffff80000, +0x0003fffffff00000, +0x0007ffffffe00000, +0x000fffffffc00000, +0x001fffffff800000, +0x003fffffff000000, +0x007ffffffe000000, +0x00fffffffc000000, +0x01fffffff8000000, +0x03fffffff0000000, +0x07ffffffe0000000, +0x0fffffffc0000000, +0x1fffffff80000000, +0x3fffffff00000000, +0x7ffffffe00000000, +0xfffffffc00000000, +0xfffffff800000001, +0xfffffff000000003, +0xffffffe000000007, +0xffffffc00000000f, +0xffffff800000001f, +0xffffff000000003f, +0xfffffe000000007f, +0xfffffc00000000ff, +0xfffff800000001ff, +0xfffff000000003ff, +0xffffe000000007ff, +0xffffc00000000fff, +0xffff800000001fff, +0xffff000000003fff, +0xfffe000000007fff, +0xfffc00000000ffff, +0xfff800000001ffff, +0xfff000000003ffff, +0xffe000000007ffff, +0xffc00000000fffff, +0xff800000001fffff, +0xff000000003fffff, +0xfe000000007fffff, +0xfc00000000ffffff, +0xf800000001ffffff, +0xf000000003ffffff, +0xe000000007ffffff, +0xc00000000fffffff, +0x800000001fffffff, +0x000000007fffffff, +0x00000000fffffffe, +0x00000001fffffffc, +0x00000003fffffff8, +0x00000007fffffff0, +0x0000000fffffffe0, +0x0000001fffffffc0, +0x0000003fffffff80, +0x0000007fffffff00, +0x000000fffffffe00, +0x000001fffffffc00, +0x000003fffffff800, +0x000007fffffff000, +0x00000fffffffe000, +0x00001fffffffc000, +0x00003fffffff8000, +0x00007fffffff0000, +0x0000fffffffe0000, +0x0001fffffffc0000, +0x0003fffffff80000, +0x0007fffffff00000, +0x000fffffffe00000, +0x001fffffffc00000, +0x003fffffff800000, +0x007fffffff000000, +0x00fffffffe000000, +0x01fffffffc000000, +0x03fffffff8000000, +0x07fffffff0000000, +0x0fffffffe0000000, +0x1fffffffc0000000, +0x3fffffff80000000, +0x7fffffff00000000, +0xfffffffe00000000, +0xfffffffc00000001, +0xfffffff800000003, +0xfffffff000000007, +0xffffffe00000000f, +0xffffffc00000001f, +0xffffff800000003f, +0xffffff000000007f, +0xfffffe00000000ff, +0xfffffc00000001ff, +0xfffff800000003ff, +0xfffff000000007ff, +0xffffe00000000fff, +0xffffc00000001fff, +0xffff800000003fff, +0xffff000000007fff, +0xfffe00000000ffff, +0xfffc00000001ffff, +0xfff800000003ffff, +0xfff000000007ffff, +0xffe00000000fffff, +0xffc00000001fffff, +0xff800000003fffff, +0xff000000007fffff, +0xfe00000000ffffff, +0xfc00000001ffffff, +0xf800000003ffffff, +0xf000000007ffffff, +0xe00000000fffffff, +0xc00000001fffffff, +0x800000003fffffff, +0x00000000ffffffff, +0x00000001fffffffe, +0x00000003fffffffc, +0x00000007fffffff8, +0x0000000ffffffff0, +0x0000001fffffffe0, +0x0000003fffffffc0, +0x0000007fffffff80, +0x000000ffffffff00, +0x000001fffffffe00, +0x000003fffffffc00, +0x000007fffffff800, +0x00000ffffffff000, +0x00001fffffffe000, +0x00003fffffffc000, +0x00007fffffff8000, +0x0000ffffffff0000, +0x0001fffffffe0000, +0x0003fffffffc0000, +0x0007fffffff80000, +0x000ffffffff00000, +0x001fffffffe00000, +0x003fffffffc00000, +0x007fffffff800000, +0x00ffffffff000000, +0x01fffffffe000000, +0x03fffffffc000000, +0x07fffffff8000000, +0x0ffffffff0000000, +0x1fffffffe0000000, +0x3fffffffc0000000, +0x7fffffff80000000, +0xffffffff00000000, +0xfffffffe00000001, +0xfffffffc00000003, +0xfffffff800000007, +0xfffffff00000000f, +0xffffffe00000001f, +0xffffffc00000003f, +0xffffff800000007f, +0xffffff00000000ff, +0xfffffe00000001ff, +0xfffffc00000003ff, +0xfffff800000007ff, +0xfffff00000000fff, +0xffffe00000001fff, +0xffffc00000003fff, +0xffff800000007fff, +0xffff00000000ffff, +0xfffe00000001ffff, +0xfffc00000003ffff, +0xfff800000007ffff, +0xfff00000000fffff, +0xffe00000001fffff, +0xffc00000003fffff, +0xff800000007fffff, +0xff00000000ffffff, +0xfe00000001ffffff, +0xfc00000003ffffff, +0xf800000007ffffff, +0xf00000000fffffff, +0xe00000001fffffff, +0xc00000003fffffff, +0x800000007fffffff, +0x00000001ffffffff, +0x00000003fffffffe, +0x00000007fffffffc, +0x0000000ffffffff8, +0x0000001ffffffff0, +0x0000003fffffffe0, +0x0000007fffffffc0, +0x000000ffffffff80, +0x000001ffffffff00, +0x000003fffffffe00, +0x000007fffffffc00, +0x00000ffffffff800, +0x00001ffffffff000, +0x00003fffffffe000, +0x00007fffffffc000, +0x0000ffffffff8000, +0x0001ffffffff0000, +0x0003fffffffe0000, +0x0007fffffffc0000, +0x000ffffffff80000, +0x001ffffffff00000, +0x003fffffffe00000, +0x007fffffffc00000, +0x00ffffffff800000, +0x01ffffffff000000, +0x03fffffffe000000, +0x07fffffffc000000, +0x0ffffffff8000000, +0x1ffffffff0000000, +0x3fffffffe0000000, +0x7fffffffc0000000, +0xffffffff80000000, +0xffffffff00000001, +0xfffffffe00000003, +0xfffffffc00000007, +0xfffffff80000000f, +0xfffffff00000001f, +0xffffffe00000003f, +0xffffffc00000007f, +0xffffff80000000ff, +0xffffff00000001ff, +0xfffffe00000003ff, +0xfffffc00000007ff, +0xfffff80000000fff, +0xfffff00000001fff, +0xffffe00000003fff, +0xffffc00000007fff, +0xffff80000000ffff, +0xffff00000001ffff, +0xfffe00000003ffff, +0xfffc00000007ffff, +0xfff80000000fffff, +0xfff00000001fffff, +0xffe00000003fffff, +0xffc00000007fffff, +0xff80000000ffffff, +0xff00000001ffffff, +0xfe00000003ffffff, +0xfc00000007ffffff, +0xf80000000fffffff, +0xf00000001fffffff, +0xe00000003fffffff, +0xc00000007fffffff, +0x80000000ffffffff, +0x00000003ffffffff, +0x00000007fffffffe, +0x0000000ffffffffc, +0x0000001ffffffff8, +0x0000003ffffffff0, +0x0000007fffffffe0, +0x000000ffffffffc0, +0x000001ffffffff80, +0x000003ffffffff00, +0x000007fffffffe00, +0x00000ffffffffc00, +0x00001ffffffff800, +0x00003ffffffff000, +0x00007fffffffe000, +0x0000ffffffffc000, +0x0001ffffffff8000, +0x0003ffffffff0000, +0x0007fffffffe0000, +0x000ffffffffc0000, +0x001ffffffff80000, +0x003ffffffff00000, +0x007fffffffe00000, +0x00ffffffffc00000, +0x01ffffffff800000, +0x03ffffffff000000, +0x07fffffffe000000, +0x0ffffffffc000000, +0x1ffffffff8000000, +0x3ffffffff0000000, +0x7fffffffe0000000, +0xffffffffc0000000, +0xffffffff80000001, +0xffffffff00000003, +0xfffffffe00000007, +0xfffffffc0000000f, +0xfffffff80000001f, +0xfffffff00000003f, +0xffffffe00000007f, +0xffffffc0000000ff, +0xffffff80000001ff, +0xffffff00000003ff, +0xfffffe00000007ff, +0xfffffc0000000fff, +0xfffff80000001fff, +0xfffff00000003fff, +0xffffe00000007fff, +0xffffc0000000ffff, +0xffff80000001ffff, +0xffff00000003ffff, +0xfffe00000007ffff, +0xfffc0000000fffff, +0xfff80000001fffff, +0xfff00000003fffff, +0xffe00000007fffff, +0xffc0000000ffffff, +0xff80000001ffffff, +0xff00000003ffffff, +0xfe00000007ffffff, +0xfc0000000fffffff, +0xf80000001fffffff, +0xf00000003fffffff, +0xe00000007fffffff, +0xc0000000ffffffff, +0x80000001ffffffff, +0x00000007ffffffff, +0x0000000ffffffffe, +0x0000001ffffffffc, +0x0000003ffffffff8, +0x0000007ffffffff0, +0x000000ffffffffe0, +0x000001ffffffffc0, +0x000003ffffffff80, +0x000007ffffffff00, +0x00000ffffffffe00, +0x00001ffffffffc00, +0x00003ffffffff800, +0x00007ffffffff000, +0x0000ffffffffe000, +0x0001ffffffffc000, +0x0003ffffffff8000, +0x0007ffffffff0000, +0x000ffffffffe0000, +0x001ffffffffc0000, +0x003ffffffff80000, +0x007ffffffff00000, +0x00ffffffffe00000, +0x01ffffffffc00000, +0x03ffffffff800000, +0x07ffffffff000000, +0x0ffffffffe000000, +0x1ffffffffc000000, +0x3ffffffff8000000, +0x7ffffffff0000000, +0xffffffffe0000000, +0xffffffffc0000001, +0xffffffff80000003, +0xffffffff00000007, +0xfffffffe0000000f, +0xfffffffc0000001f, +0xfffffff80000003f, +0xfffffff00000007f, +0xffffffe0000000ff, +0xffffffc0000001ff, +0xffffff80000003ff, +0xffffff00000007ff, +0xfffffe0000000fff, +0xfffffc0000001fff, +0xfffff80000003fff, +0xfffff00000007fff, +0xffffe0000000ffff, +0xffffc0000001ffff, +0xffff80000003ffff, +0xffff00000007ffff, +0xfffe0000000fffff, +0xfffc0000001fffff, +0xfff80000003fffff, +0xfff00000007fffff, +0xffe0000000ffffff, +0xffc0000001ffffff, +0xff80000003ffffff, +0xff00000007ffffff, +0xfe0000000fffffff, +0xfc0000001fffffff, +0xf80000003fffffff, +0xf00000007fffffff, +0xe0000000ffffffff, +0xc0000001ffffffff, +0x80000003ffffffff, +0x0000000fffffffff, +0x0000001ffffffffe, +0x0000003ffffffffc, +0x0000007ffffffff8, +0x000000fffffffff0, +0x000001ffffffffe0, +0x000003ffffffffc0, +0x000007ffffffff80, +0x00000fffffffff00, +0x00001ffffffffe00, +0x00003ffffffffc00, +0x00007ffffffff800, +0x0000fffffffff000, +0x0001ffffffffe000, +0x0003ffffffffc000, +0x0007ffffffff8000, +0x000fffffffff0000, +0x001ffffffffe0000, +0x003ffffffffc0000, +0x007ffffffff80000, +0x00fffffffff00000, +0x01ffffffffe00000, +0x03ffffffffc00000, +0x07ffffffff800000, +0x0fffffffff000000, +0x1ffffffffe000000, +0x3ffffffffc000000, +0x7ffffffff8000000, +0xfffffffff0000000, +0xffffffffe0000001, +0xffffffffc0000003, +0xffffffff80000007, +0xffffffff0000000f, +0xfffffffe0000001f, +0xfffffffc0000003f, +0xfffffff80000007f, +0xfffffff0000000ff, +0xffffffe0000001ff, +0xffffffc0000003ff, +0xffffff80000007ff, +0xffffff0000000fff, +0xfffffe0000001fff, +0xfffffc0000003fff, +0xfffff80000007fff, +0xfffff0000000ffff, +0xffffe0000001ffff, +0xffffc0000003ffff, +0xffff80000007ffff, +0xffff0000000fffff, +0xfffe0000001fffff, +0xfffc0000003fffff, +0xfff80000007fffff, +0xfff0000000ffffff, +0xffe0000001ffffff, +0xffc0000003ffffff, +0xff80000007ffffff, +0xff0000000fffffff, +0xfe0000001fffffff, +0xfc0000003fffffff, +0xf80000007fffffff, +0xf0000000ffffffff, +0xe0000001ffffffff, +0xc0000003ffffffff, +0x80000007ffffffff, +0x0000001fffffffff, +0x0000003ffffffffe, +0x0000007ffffffffc, +0x000000fffffffff8, +0x000001fffffffff0, +0x000003ffffffffe0, +0x000007ffffffffc0, +0x00000fffffffff80, +0x00001fffffffff00, +0x00003ffffffffe00, +0x00007ffffffffc00, +0x0000fffffffff800, +0x0001fffffffff000, +0x0003ffffffffe000, +0x0007ffffffffc000, +0x000fffffffff8000, +0x001fffffffff0000, +0x003ffffffffe0000, +0x007ffffffffc0000, +0x00fffffffff80000, +0x01fffffffff00000, +0x03ffffffffe00000, +0x07ffffffffc00000, +0x0fffffffff800000, +0x1fffffffff000000, +0x3ffffffffe000000, +0x7ffffffffc000000, +0xfffffffff8000000, +0xfffffffff0000001, +0xffffffffe0000003, +0xffffffffc0000007, +0xffffffff8000000f, +0xffffffff0000001f, +0xfffffffe0000003f, +0xfffffffc0000007f, +0xfffffff8000000ff, +0xfffffff0000001ff, +0xffffffe0000003ff, +0xffffffc0000007ff, +0xffffff8000000fff, +0xffffff0000001fff, +0xfffffe0000003fff, +0xfffffc0000007fff, +0xfffff8000000ffff, +0xfffff0000001ffff, +0xffffe0000003ffff, +0xffffc0000007ffff, +0xffff8000000fffff, +0xffff0000001fffff, +0xfffe0000003fffff, +0xfffc0000007fffff, +0xfff8000000ffffff, +0xfff0000001ffffff, +0xffe0000003ffffff, +0xffc0000007ffffff, +0xff8000000fffffff, +0xff0000001fffffff, +0xfe0000003fffffff, +0xfc0000007fffffff, +0xf8000000ffffffff, +0xf0000001ffffffff, +0xe0000003ffffffff, +0xc0000007ffffffff, +0x8000000fffffffff, +0x0000003fffffffff, +0x0000007ffffffffe, +0x000000fffffffffc, +0x000001fffffffff8, +0x000003fffffffff0, +0x000007ffffffffe0, +0x00000fffffffffc0, +0x00001fffffffff80, +0x00003fffffffff00, +0x00007ffffffffe00, +0x0000fffffffffc00, +0x0001fffffffff800, +0x0003fffffffff000, +0x0007ffffffffe000, +0x000fffffffffc000, +0x001fffffffff8000, +0x003fffffffff0000, +0x007ffffffffe0000, +0x00fffffffffc0000, +0x01fffffffff80000, +0x03fffffffff00000, +0x07ffffffffe00000, +0x0fffffffffc00000, +0x1fffffffff800000, +0x3fffffffff000000, +0x7ffffffffe000000, +0xfffffffffc000000, +0xfffffffff8000001, +0xfffffffff0000003, +0xffffffffe0000007, +0xffffffffc000000f, +0xffffffff8000001f, +0xffffffff0000003f, +0xfffffffe0000007f, +0xfffffffc000000ff, +0xfffffff8000001ff, +0xfffffff0000003ff, +0xffffffe0000007ff, +0xffffffc000000fff, +0xffffff8000001fff, +0xffffff0000003fff, +0xfffffe0000007fff, +0xfffffc000000ffff, +0xfffff8000001ffff, +0xfffff0000003ffff, +0xffffe0000007ffff, +0xffffc000000fffff, +0xffff8000001fffff, +0xffff0000003fffff, +0xfffe0000007fffff, +0xfffc000000ffffff, +0xfff8000001ffffff, +0xfff0000003ffffff, +0xffe0000007ffffff, +0xffc000000fffffff, +0xff8000001fffffff, +0xff0000003fffffff, +0xfe0000007fffffff, +0xfc000000ffffffff, +0xf8000001ffffffff, +0xf0000003ffffffff, +0xe0000007ffffffff, +0xc000000fffffffff, +0x8000001fffffffff, +0x0000007fffffffff, +0x000000fffffffffe, +0x000001fffffffffc, +0x000003fffffffff8, +0x000007fffffffff0, +0x00000fffffffffe0, +0x00001fffffffffc0, +0x00003fffffffff80, +0x00007fffffffff00, +0x0000fffffffffe00, +0x0001fffffffffc00, +0x0003fffffffff800, +0x0007fffffffff000, +0x000fffffffffe000, +0x001fffffffffc000, +0x003fffffffff8000, +0x007fffffffff0000, +0x00fffffffffe0000, +0x01fffffffffc0000, +0x03fffffffff80000, +0x07fffffffff00000, +0x0fffffffffe00000, +0x1fffffffffc00000, +0x3fffffffff800000, +0x7fffffffff000000, +0xfffffffffe000000, +0xfffffffffc000001, +0xfffffffff8000003, +0xfffffffff0000007, +0xffffffffe000000f, +0xffffffffc000001f, +0xffffffff8000003f, +0xffffffff0000007f, +0xfffffffe000000ff, +0xfffffffc000001ff, +0xfffffff8000003ff, +0xfffffff0000007ff, +0xffffffe000000fff, +0xffffffc000001fff, +0xffffff8000003fff, +0xffffff0000007fff, +0xfffffe000000ffff, +0xfffffc000001ffff, +0xfffff8000003ffff, +0xfffff0000007ffff, +0xffffe000000fffff, +0xffffc000001fffff, +0xffff8000003fffff, +0xffff0000007fffff, +0xfffe000000ffffff, +0xfffc000001ffffff, +0xfff8000003ffffff, +0xfff0000007ffffff, +0xffe000000fffffff, +0xffc000001fffffff, +0xff8000003fffffff, +0xff0000007fffffff, +0xfe000000ffffffff, +0xfc000001ffffffff, +0xf8000003ffffffff, +0xf0000007ffffffff, +0xe000000fffffffff, +0xc000001fffffffff, +0x8000003fffffffff, +0x000000ffffffffff, +0x000001fffffffffe, +0x000003fffffffffc, +0x000007fffffffff8, +0x00000ffffffffff0, +0x00001fffffffffe0, +0x00003fffffffffc0, +0x00007fffffffff80, +0x0000ffffffffff00, +0x0001fffffffffe00, +0x0003fffffffffc00, +0x0007fffffffff800, +0x000ffffffffff000, +0x001fffffffffe000, +0x003fffffffffc000, +0x007fffffffff8000, +0x00ffffffffff0000, +0x01fffffffffe0000, +0x03fffffffffc0000, +0x07fffffffff80000, +0x0ffffffffff00000, +0x1fffffffffe00000, +0x3fffffffffc00000, +0x7fffffffff800000, +0xffffffffff000000, +0xfffffffffe000001, +0xfffffffffc000003, +0xfffffffff8000007, +0xfffffffff000000f, +0xffffffffe000001f, +0xffffffffc000003f, +0xffffffff8000007f, +0xffffffff000000ff, +0xfffffffe000001ff, +0xfffffffc000003ff, +0xfffffff8000007ff, +0xfffffff000000fff, +0xffffffe000001fff, +0xffffffc000003fff, +0xffffff8000007fff, +0xffffff000000ffff, +0xfffffe000001ffff, +0xfffffc000003ffff, +0xfffff8000007ffff, +0xfffff000000fffff, +0xffffe000001fffff, +0xffffc000003fffff, +0xffff8000007fffff, +0xffff000000ffffff, +0xfffe000001ffffff, +0xfffc000003ffffff, +0xfff8000007ffffff, +0xfff000000fffffff, +0xffe000001fffffff, +0xffc000003fffffff, +0xff8000007fffffff, +0xff000000ffffffff, +0xfe000001ffffffff, +0xfc000003ffffffff, +0xf8000007ffffffff, +0xf000000fffffffff, +0xe000001fffffffff, +0xc000003fffffffff, +0x8000007fffffffff, +0x000001ffffffffff, +0x000003fffffffffe, +0x000007fffffffffc, +0x00000ffffffffff8, +0x00001ffffffffff0, +0x00003fffffffffe0, +0x00007fffffffffc0, +0x0000ffffffffff80, +0x0001ffffffffff00, +0x0003fffffffffe00, +0x0007fffffffffc00, +0x000ffffffffff800, +0x001ffffffffff000, +0x003fffffffffe000, +0x007fffffffffc000, +0x00ffffffffff8000, +0x01ffffffffff0000, +0x03fffffffffe0000, +0x07fffffffffc0000, +0x0ffffffffff80000, +0x1ffffffffff00000, +0x3fffffffffe00000, +0x7fffffffffc00000, +0xffffffffff800000, +0xffffffffff000001, +0xfffffffffe000003, +0xfffffffffc000007, +0xfffffffff800000f, +0xfffffffff000001f, +0xffffffffe000003f, +0xffffffffc000007f, +0xffffffff800000ff, +0xffffffff000001ff, +0xfffffffe000003ff, +0xfffffffc000007ff, +0xfffffff800000fff, +0xfffffff000001fff, +0xffffffe000003fff, +0xffffffc000007fff, +0xffffff800000ffff, +0xffffff000001ffff, +0xfffffe000003ffff, +0xfffffc000007ffff, +0xfffff800000fffff, +0xfffff000001fffff, +0xffffe000003fffff, +0xffffc000007fffff, +0xffff800000ffffff, +0xffff000001ffffff, +0xfffe000003ffffff, +0xfffc000007ffffff, +0xfff800000fffffff, +0xfff000001fffffff, +0xffe000003fffffff, +0xffc000007fffffff, +0xff800000ffffffff, +0xff000001ffffffff, +0xfe000003ffffffff, +0xfc000007ffffffff, +0xf800000fffffffff, +0xf000001fffffffff, +0xe000003fffffffff, +0xc000007fffffffff, +0x800000ffffffffff, +0x000003ffffffffff, +0x000007fffffffffe, +0x00000ffffffffffc, +0x00001ffffffffff8, +0x00003ffffffffff0, +0x00007fffffffffe0, +0x0000ffffffffffc0, +0x0001ffffffffff80, +0x0003ffffffffff00, +0x0007fffffffffe00, +0x000ffffffffffc00, +0x001ffffffffff800, +0x003ffffffffff000, +0x007fffffffffe000, +0x00ffffffffffc000, +0x01ffffffffff8000, +0x03ffffffffff0000, +0x07fffffffffe0000, +0x0ffffffffffc0000, +0x1ffffffffff80000, +0x3ffffffffff00000, +0x7fffffffffe00000, +0xffffffffffc00000, +0xffffffffff800001, +0xffffffffff000003, +0xfffffffffe000007, +0xfffffffffc00000f, +0xfffffffff800001f, +0xfffffffff000003f, +0xffffffffe000007f, +0xffffffffc00000ff, +0xffffffff800001ff, +0xffffffff000003ff, +0xfffffffe000007ff, +0xfffffffc00000fff, +0xfffffff800001fff, +0xfffffff000003fff, +0xffffffe000007fff, +0xffffffc00000ffff, +0xffffff800001ffff, +0xffffff000003ffff, +0xfffffe000007ffff, +0xfffffc00000fffff, +0xfffff800001fffff, +0xfffff000003fffff, +0xffffe000007fffff, +0xffffc00000ffffff, +0xffff800001ffffff, +0xffff000003ffffff, +0xfffe000007ffffff, +0xfffc00000fffffff, +0xfff800001fffffff, +0xfff000003fffffff, +0xffe000007fffffff, +0xffc00000ffffffff, +0xff800001ffffffff, +0xff000003ffffffff, +0xfe000007ffffffff, +0xfc00000fffffffff, +0xf800001fffffffff, +0xf000003fffffffff, +0xe000007fffffffff, +0xc00000ffffffffff, +0x800001ffffffffff, +0x000007ffffffffff, +0x00000ffffffffffe, +0x00001ffffffffffc, +0x00003ffffffffff8, +0x00007ffffffffff0, +0x0000ffffffffffe0, +0x0001ffffffffffc0, +0x0003ffffffffff80, +0x0007ffffffffff00, +0x000ffffffffffe00, +0x001ffffffffffc00, +0x003ffffffffff800, +0x007ffffffffff000, +0x00ffffffffffe000, +0x01ffffffffffc000, +0x03ffffffffff8000, +0x07ffffffffff0000, +0x0ffffffffffe0000, +0x1ffffffffffc0000, +0x3ffffffffff80000, +0x7ffffffffff00000, +0xffffffffffe00000, +0xffffffffffc00001, +0xffffffffff800003, +0xffffffffff000007, +0xfffffffffe00000f, +0xfffffffffc00001f, +0xfffffffff800003f, +0xfffffffff000007f, +0xffffffffe00000ff, +0xffffffffc00001ff, +0xffffffff800003ff, +0xffffffff000007ff, +0xfffffffe00000fff, +0xfffffffc00001fff, +0xfffffff800003fff, +0xfffffff000007fff, +0xffffffe00000ffff, +0xffffffc00001ffff, +0xffffff800003ffff, +0xffffff000007ffff, +0xfffffe00000fffff, +0xfffffc00001fffff, +0xfffff800003fffff, +0xfffff000007fffff, +0xffffe00000ffffff, +0xffffc00001ffffff, +0xffff800003ffffff, +0xffff000007ffffff, +0xfffe00000fffffff, +0xfffc00001fffffff, +0xfff800003fffffff, +0xfff000007fffffff, +0xffe00000ffffffff, +0xffc00001ffffffff, +0xff800003ffffffff, +0xff000007ffffffff, +0xfe00000fffffffff, +0xfc00001fffffffff, +0xf800003fffffffff, +0xf000007fffffffff, +0xe00000ffffffffff, +0xc00001ffffffffff, +0x800003ffffffffff, +0x00000fffffffffff, +0x00001ffffffffffe, +0x00003ffffffffffc, +0x00007ffffffffff8, +0x0000fffffffffff0, +0x0001ffffffffffe0, +0x0003ffffffffffc0, +0x0007ffffffffff80, +0x000fffffffffff00, +0x001ffffffffffe00, +0x003ffffffffffc00, +0x007ffffffffff800, +0x00fffffffffff000, +0x01ffffffffffe000, +0x03ffffffffffc000, +0x07ffffffffff8000, +0x0fffffffffff0000, +0x1ffffffffffe0000, +0x3ffffffffffc0000, +0x7ffffffffff80000, +0xfffffffffff00000, +0xffffffffffe00001, +0xffffffffffc00003, +0xffffffffff800007, +0xffffffffff00000f, +0xfffffffffe00001f, +0xfffffffffc00003f, +0xfffffffff800007f, +0xfffffffff00000ff, +0xffffffffe00001ff, +0xffffffffc00003ff, +0xffffffff800007ff, +0xffffffff00000fff, +0xfffffffe00001fff, +0xfffffffc00003fff, +0xfffffff800007fff, +0xfffffff00000ffff, +0xffffffe00001ffff, +0xffffffc00003ffff, +0xffffff800007ffff, +0xffffff00000fffff, +0xfffffe00001fffff, +0xfffffc00003fffff, +0xfffff800007fffff, +0xfffff00000ffffff, +0xffffe00001ffffff, +0xffffc00003ffffff, +0xffff800007ffffff, +0xffff00000fffffff, +0xfffe00001fffffff, +0xfffc00003fffffff, +0xfff800007fffffff, +0xfff00000ffffffff, +0xffe00001ffffffff, +0xffc00003ffffffff, +0xff800007ffffffff, +0xff00000fffffffff, +0xfe00001fffffffff, +0xfc00003fffffffff, +0xf800007fffffffff, +0xf00000ffffffffff, +0xe00001ffffffffff, +0xc00003ffffffffff, +0x800007ffffffffff, +0x00001fffffffffff, +0x00003ffffffffffe, +0x00007ffffffffffc, +0x0000fffffffffff8, +0x0001fffffffffff0, +0x0003ffffffffffe0, +0x0007ffffffffffc0, +0x000fffffffffff80, +0x001fffffffffff00, +0x003ffffffffffe00, +0x007ffffffffffc00, +0x00fffffffffff800, +0x01fffffffffff000, +0x03ffffffffffe000, +0x07ffffffffffc000, +0x0fffffffffff8000, +0x1fffffffffff0000, +0x3ffffffffffe0000, +0x7ffffffffffc0000, +0xfffffffffff80000, +0xfffffffffff00001, +0xffffffffffe00003, +0xffffffffffc00007, +0xffffffffff80000f, +0xffffffffff00001f, +0xfffffffffe00003f, +0xfffffffffc00007f, +0xfffffffff80000ff, +0xfffffffff00001ff, +0xffffffffe00003ff, +0xffffffffc00007ff, +0xffffffff80000fff, +0xffffffff00001fff, +0xfffffffe00003fff, +0xfffffffc00007fff, +0xfffffff80000ffff, +0xfffffff00001ffff, +0xffffffe00003ffff, +0xffffffc00007ffff, +0xffffff80000fffff, +0xffffff00001fffff, +0xfffffe00003fffff, +0xfffffc00007fffff, +0xfffff80000ffffff, +0xfffff00001ffffff, +0xffffe00003ffffff, +0xffffc00007ffffff, +0xffff80000fffffff, +0xffff00001fffffff, +0xfffe00003fffffff, +0xfffc00007fffffff, +0xfff80000ffffffff, +0xfff00001ffffffff, +0xffe00003ffffffff, +0xffc00007ffffffff, +0xff80000fffffffff, +0xff00001fffffffff, +0xfe00003fffffffff, +0xfc00007fffffffff, +0xf80000ffffffffff, +0xf00001ffffffffff, +0xe00003ffffffffff, +0xc00007ffffffffff, +0x80000fffffffffff, +0x00003fffffffffff, +0x00007ffffffffffe, +0x0000fffffffffffc, +0x0001fffffffffff8, +0x0003fffffffffff0, +0x0007ffffffffffe0, +0x000fffffffffffc0, +0x001fffffffffff80, +0x003fffffffffff00, +0x007ffffffffffe00, +0x00fffffffffffc00, +0x01fffffffffff800, +0x03fffffffffff000, +0x07ffffffffffe000, +0x0fffffffffffc000, +0x1fffffffffff8000, +0x3fffffffffff0000, +0x7ffffffffffe0000, +0xfffffffffffc0000, +0xfffffffffff80001, +0xfffffffffff00003, +0xffffffffffe00007, +0xffffffffffc0000f, +0xffffffffff80001f, +0xffffffffff00003f, +0xfffffffffe00007f, +0xfffffffffc0000ff, +0xfffffffff80001ff, +0xfffffffff00003ff, +0xffffffffe00007ff, +0xffffffffc0000fff, +0xffffffff80001fff, +0xffffffff00003fff, +0xfffffffe00007fff, +0xfffffffc0000ffff, +0xfffffff80001ffff, +0xfffffff00003ffff, +0xffffffe00007ffff, +0xffffffc0000fffff, +0xffffff80001fffff, +0xffffff00003fffff, +0xfffffe00007fffff, +0xfffffc0000ffffff, +0xfffff80001ffffff, +0xfffff00003ffffff, +0xffffe00007ffffff, +0xffffc0000fffffff, +0xffff80001fffffff, +0xffff00003fffffff, +0xfffe00007fffffff, +0xfffc0000ffffffff, +0xfff80001ffffffff, +0xfff00003ffffffff, +0xffe00007ffffffff, +0xffc0000fffffffff, +0xff80001fffffffff, +0xff00003fffffffff, +0xfe00007fffffffff, +0xfc0000ffffffffff, +0xf80001ffffffffff, +0xf00003ffffffffff, +0xe00007ffffffffff, +0xc0000fffffffffff, +0x80001fffffffffff, +0x00007fffffffffff, +0x0000fffffffffffe, +0x0001fffffffffffc, +0x0003fffffffffff8, +0x0007fffffffffff0, +0x000fffffffffffe0, +0x001fffffffffffc0, +0x003fffffffffff80, +0x007fffffffffff00, +0x00fffffffffffe00, +0x01fffffffffffc00, +0x03fffffffffff800, +0x07fffffffffff000, +0x0fffffffffffe000, +0x1fffffffffffc000, +0x3fffffffffff8000, +0x7fffffffffff0000, +0xfffffffffffe0000, +0xfffffffffffc0001, +0xfffffffffff80003, +0xfffffffffff00007, +0xffffffffffe0000f, +0xffffffffffc0001f, +0xffffffffff80003f, +0xffffffffff00007f, +0xfffffffffe0000ff, +0xfffffffffc0001ff, +0xfffffffff80003ff, +0xfffffffff00007ff, +0xffffffffe0000fff, +0xffffffffc0001fff, +0xffffffff80003fff, +0xffffffff00007fff, +0xfffffffe0000ffff, +0xfffffffc0001ffff, +0xfffffff80003ffff, +0xfffffff00007ffff, +0xffffffe0000fffff, +0xffffffc0001fffff, +0xffffff80003fffff, +0xffffff00007fffff, +0xfffffe0000ffffff, +0xfffffc0001ffffff, +0xfffff80003ffffff, +0xfffff00007ffffff, +0xffffe0000fffffff, +0xffffc0001fffffff, +0xffff80003fffffff, +0xffff00007fffffff, +0xfffe0000ffffffff, +0xfffc0001ffffffff, +0xfff80003ffffffff, +0xfff00007ffffffff, +0xffe0000fffffffff, +0xffc0001fffffffff, +0xff80003fffffffff, +0xff00007fffffffff, +0xfe0000ffffffffff, +0xfc0001ffffffffff, +0xf80003ffffffffff, +0xf00007ffffffffff, +0xe0000fffffffffff, +0xc0001fffffffffff, +0x80003fffffffffff, +0x0000ffffffffffff, +0x0001fffffffffffe, +0x0003fffffffffffc, +0x0007fffffffffff8, +0x000ffffffffffff0, +0x001fffffffffffe0, +0x003fffffffffffc0, +0x007fffffffffff80, +0x00ffffffffffff00, +0x01fffffffffffe00, +0x03fffffffffffc00, +0x07fffffffffff800, +0x0ffffffffffff000, +0x1fffffffffffe000, +0x3fffffffffffc000, +0x7fffffffffff8000, +0xffffffffffff0000, +0xfffffffffffe0001, +0xfffffffffffc0003, +0xfffffffffff80007, +0xfffffffffff0000f, +0xffffffffffe0001f, +0xffffffffffc0003f, +0xffffffffff80007f, +0xffffffffff0000ff, +0xfffffffffe0001ff, +0xfffffffffc0003ff, +0xfffffffff80007ff, +0xfffffffff0000fff, +0xffffffffe0001fff, +0xffffffffc0003fff, +0xffffffff80007fff, +0xffffffff0000ffff, +0xfffffffe0001ffff, +0xfffffffc0003ffff, +0xfffffff80007ffff, +0xfffffff0000fffff, +0xffffffe0001fffff, +0xffffffc0003fffff, +0xffffff80007fffff, +0xffffff0000ffffff, +0xfffffe0001ffffff, +0xfffffc0003ffffff, +0xfffff80007ffffff, +0xfffff0000fffffff, +0xffffe0001fffffff, +0xffffc0003fffffff, +0xffff80007fffffff, +0xffff0000ffffffff, +0xfffe0001ffffffff, +0xfffc0003ffffffff, +0xfff80007ffffffff, +0xfff0000fffffffff, +0xffe0001fffffffff, +0xffc0003fffffffff, +0xff80007fffffffff, +0xff0000ffffffffff, +0xfe0001ffffffffff, +0xfc0003ffffffffff, +0xf80007ffffffffff, +0xf0000fffffffffff, +0xe0001fffffffffff, +0xc0003fffffffffff, +0x80007fffffffffff, +0x0001ffffffffffff, +0x0003fffffffffffe, +0x0007fffffffffffc, +0x000ffffffffffff8, +0x001ffffffffffff0, +0x003fffffffffffe0, +0x007fffffffffffc0, +0x00ffffffffffff80, +0x01ffffffffffff00, +0x03fffffffffffe00, +0x07fffffffffffc00, +0x0ffffffffffff800, +0x1ffffffffffff000, +0x3fffffffffffe000, +0x7fffffffffffc000, +0xffffffffffff8000, +0xffffffffffff0001, +0xfffffffffffe0003, +0xfffffffffffc0007, +0xfffffffffff8000f, +0xfffffffffff0001f, +0xffffffffffe0003f, +0xffffffffffc0007f, +0xffffffffff8000ff, +0xffffffffff0001ff, +0xfffffffffe0003ff, +0xfffffffffc0007ff, +0xfffffffff8000fff, +0xfffffffff0001fff, +0xffffffffe0003fff, +0xffffffffc0007fff, +0xffffffff8000ffff, +0xffffffff0001ffff, +0xfffffffe0003ffff, +0xfffffffc0007ffff, +0xfffffff8000fffff, +0xfffffff0001fffff, +0xffffffe0003fffff, +0xffffffc0007fffff, +0xffffff8000ffffff, +0xffffff0001ffffff, +0xfffffe0003ffffff, +0xfffffc0007ffffff, +0xfffff8000fffffff, +0xfffff0001fffffff, +0xffffe0003fffffff, +0xffffc0007fffffff, +0xffff8000ffffffff, +0xffff0001ffffffff, +0xfffe0003ffffffff, +0xfffc0007ffffffff, +0xfff8000fffffffff, +0xfff0001fffffffff, +0xffe0003fffffffff, +0xffc0007fffffffff, +0xff8000ffffffffff, +0xff0001ffffffffff, +0xfe0003ffffffffff, +0xfc0007ffffffffff, +0xf8000fffffffffff, +0xf0001fffffffffff, +0xe0003fffffffffff, +0xc0007fffffffffff, +0x8000ffffffffffff, +0x0003ffffffffffff, +0x0007fffffffffffe, +0x000ffffffffffffc, +0x001ffffffffffff8, +0x003ffffffffffff0, +0x007fffffffffffe0, +0x00ffffffffffffc0, +0x01ffffffffffff80, +0x03ffffffffffff00, +0x07fffffffffffe00, +0x0ffffffffffffc00, +0x1ffffffffffff800, +0x3ffffffffffff000, +0x7fffffffffffe000, +0xffffffffffffc000, +0xffffffffffff8001, +0xffffffffffff0003, +0xfffffffffffe0007, +0xfffffffffffc000f, +0xfffffffffff8001f, +0xfffffffffff0003f, +0xffffffffffe0007f, +0xffffffffffc000ff, +0xffffffffff8001ff, +0xffffffffff0003ff, +0xfffffffffe0007ff, +0xfffffffffc000fff, +0xfffffffff8001fff, +0xfffffffff0003fff, +0xffffffffe0007fff, +0xffffffffc000ffff, +0xffffffff8001ffff, +0xffffffff0003ffff, +0xfffffffe0007ffff, +0xfffffffc000fffff, +0xfffffff8001fffff, +0xfffffff0003fffff, +0xffffffe0007fffff, +0xffffffc000ffffff, +0xffffff8001ffffff, +0xffffff0003ffffff, +0xfffffe0007ffffff, +0xfffffc000fffffff, +0xfffff8001fffffff, +0xfffff0003fffffff, +0xffffe0007fffffff, +0xffffc000ffffffff, +0xffff8001ffffffff, +0xffff0003ffffffff, +0xfffe0007ffffffff, +0xfffc000fffffffff, +0xfff8001fffffffff, +0xfff0003fffffffff, +0xffe0007fffffffff, +0xffc000ffffffffff, +0xff8001ffffffffff, +0xff0003ffffffffff, +0xfe0007ffffffffff, +0xfc000fffffffffff, +0xf8001fffffffffff, +0xf0003fffffffffff, +0xe0007fffffffffff, +0xc000ffffffffffff, +0x8001ffffffffffff, +0x0007ffffffffffff, +0x000ffffffffffffe, +0x001ffffffffffffc, +0x003ffffffffffff8, +0x007ffffffffffff0, +0x00ffffffffffffe0, +0x01ffffffffffffc0, +0x03ffffffffffff80, +0x07ffffffffffff00, +0x0ffffffffffffe00, +0x1ffffffffffffc00, +0x3ffffffffffff800, +0x7ffffffffffff000, +0xffffffffffffe000, +0xffffffffffffc001, +0xffffffffffff8003, +0xffffffffffff0007, +0xfffffffffffe000f, +0xfffffffffffc001f, +0xfffffffffff8003f, +0xfffffffffff0007f, +0xffffffffffe000ff, +0xffffffffffc001ff, +0xffffffffff8003ff, +0xffffffffff0007ff, +0xfffffffffe000fff, +0xfffffffffc001fff, +0xfffffffff8003fff, +0xfffffffff0007fff, +0xffffffffe000ffff, +0xffffffffc001ffff, +0xffffffff8003ffff, +0xffffffff0007ffff, +0xfffffffe000fffff, +0xfffffffc001fffff, +0xfffffff8003fffff, +0xfffffff0007fffff, +0xffffffe000ffffff, +0xffffffc001ffffff, +0xffffff8003ffffff, +0xffffff0007ffffff, +0xfffffe000fffffff, +0xfffffc001fffffff, +0xfffff8003fffffff, +0xfffff0007fffffff, +0xffffe000ffffffff, +0xffffc001ffffffff, +0xffff8003ffffffff, +0xffff0007ffffffff, +0xfffe000fffffffff, +0xfffc001fffffffff, +0xfff8003fffffffff, +0xfff0007fffffffff, +0xffe000ffffffffff, +0xffc001ffffffffff, +0xff8003ffffffffff, +0xff0007ffffffffff, +0xfe000fffffffffff, +0xfc001fffffffffff, +0xf8003fffffffffff, +0xf0007fffffffffff, +0xe000ffffffffffff, +0xc001ffffffffffff, +0x8003ffffffffffff, +0x000fffffffffffff, +0x001ffffffffffffe, +0x003ffffffffffffc, +0x007ffffffffffff8, +0x00fffffffffffff0, +0x01ffffffffffffe0, +0x03ffffffffffffc0, +0x07ffffffffffff80, +0x0fffffffffffff00, +0x1ffffffffffffe00, +0x3ffffffffffffc00, +0x7ffffffffffff800, +0xfffffffffffff000, +0xffffffffffffe001, +0xffffffffffffc003, +0xffffffffffff8007, +0xffffffffffff000f, +0xfffffffffffe001f, +0xfffffffffffc003f, +0xfffffffffff8007f, +0xfffffffffff000ff, +0xffffffffffe001ff, +0xffffffffffc003ff, +0xffffffffff8007ff, +0xffffffffff000fff, +0xfffffffffe001fff, +0xfffffffffc003fff, +0xfffffffff8007fff, +0xfffffffff000ffff, +0xffffffffe001ffff, +0xffffffffc003ffff, +0xffffffff8007ffff, +0xffffffff000fffff, +0xfffffffe001fffff, +0xfffffffc003fffff, +0xfffffff8007fffff, +0xfffffff000ffffff, +0xffffffe001ffffff, +0xffffffc003ffffff, +0xffffff8007ffffff, +0xffffff000fffffff, +0xfffffe001fffffff, +0xfffffc003fffffff, +0xfffff8007fffffff, +0xfffff000ffffffff, +0xffffe001ffffffff, +0xffffc003ffffffff, +0xffff8007ffffffff, +0xffff000fffffffff, +0xfffe001fffffffff, +0xfffc003fffffffff, +0xfff8007fffffffff, +0xfff000ffffffffff, +0xffe001ffffffffff, +0xffc003ffffffffff, +0xff8007ffffffffff, +0xff000fffffffffff, +0xfe001fffffffffff, +0xfc003fffffffffff, +0xf8007fffffffffff, +0xf000ffffffffffff, +0xe001ffffffffffff, +0xc003ffffffffffff, +0x8007ffffffffffff, +0x001fffffffffffff, +0x003ffffffffffffe, +0x007ffffffffffffc, +0x00fffffffffffff8, +0x01fffffffffffff0, +0x03ffffffffffffe0, +0x07ffffffffffffc0, +0x0fffffffffffff80, +0x1fffffffffffff00, +0x3ffffffffffffe00, +0x7ffffffffffffc00, +0xfffffffffffff800, +0xfffffffffffff001, +0xffffffffffffe003, +0xffffffffffffc007, +0xffffffffffff800f, +0xffffffffffff001f, +0xfffffffffffe003f, +0xfffffffffffc007f, +0xfffffffffff800ff, +0xfffffffffff001ff, +0xffffffffffe003ff, +0xffffffffffc007ff, +0xffffffffff800fff, +0xffffffffff001fff, +0xfffffffffe003fff, +0xfffffffffc007fff, +0xfffffffff800ffff, +0xfffffffff001ffff, +0xffffffffe003ffff, +0xffffffffc007ffff, +0xffffffff800fffff, +0xffffffff001fffff, +0xfffffffe003fffff, +0xfffffffc007fffff, +0xfffffff800ffffff, +0xfffffff001ffffff, +0xffffffe003ffffff, +0xffffffc007ffffff, +0xffffff800fffffff, +0xffffff001fffffff, +0xfffffe003fffffff, +0xfffffc007fffffff, +0xfffff800ffffffff, +0xfffff001ffffffff, +0xffffe003ffffffff, +0xffffc007ffffffff, +0xffff800fffffffff, +0xffff001fffffffff, +0xfffe003fffffffff, +0xfffc007fffffffff, +0xfff800ffffffffff, +0xfff001ffffffffff, +0xffe003ffffffffff, +0xffc007ffffffffff, +0xff800fffffffffff, +0xff001fffffffffff, +0xfe003fffffffffff, +0xfc007fffffffffff, +0xf800ffffffffffff, +0xf001ffffffffffff, +0xe003ffffffffffff, +0xc007ffffffffffff, +0x800fffffffffffff, +0x003fffffffffffff, +0x007ffffffffffffe, +0x00fffffffffffffc, +0x01fffffffffffff8, +0x03fffffffffffff0, +0x07ffffffffffffe0, +0x0fffffffffffffc0, +0x1fffffffffffff80, +0x3fffffffffffff00, +0x7ffffffffffffe00, +0xfffffffffffffc00, +0xfffffffffffff801, +0xfffffffffffff003, +0xffffffffffffe007, +0xffffffffffffc00f, +0xffffffffffff801f, +0xffffffffffff003f, +0xfffffffffffe007f, +0xfffffffffffc00ff, +0xfffffffffff801ff, +0xfffffffffff003ff, +0xffffffffffe007ff, +0xffffffffffc00fff, +0xffffffffff801fff, +0xffffffffff003fff, +0xfffffffffe007fff, +0xfffffffffc00ffff, +0xfffffffff801ffff, +0xfffffffff003ffff, +0xffffffffe007ffff, +0xffffffffc00fffff, +0xffffffff801fffff, +0xffffffff003fffff, +0xfffffffe007fffff, +0xfffffffc00ffffff, +0xfffffff801ffffff, +0xfffffff003ffffff, +0xffffffe007ffffff, +0xffffffc00fffffff, +0xffffff801fffffff, +0xffffff003fffffff, +0xfffffe007fffffff, +0xfffffc00ffffffff, +0xfffff801ffffffff, +0xfffff003ffffffff, +0xffffe007ffffffff, +0xffffc00fffffffff, +0xffff801fffffffff, +0xffff003fffffffff, +0xfffe007fffffffff, +0xfffc00ffffffffff, +0xfff801ffffffffff, +0xfff003ffffffffff, +0xffe007ffffffffff, +0xffc00fffffffffff, +0xff801fffffffffff, +0xff003fffffffffff, +0xfe007fffffffffff, +0xfc00ffffffffffff, +0xf801ffffffffffff, +0xf003ffffffffffff, +0xe007ffffffffffff, +0xc00fffffffffffff, +0x801fffffffffffff, +0x007fffffffffffff, +0x00fffffffffffffe, +0x01fffffffffffffc, +0x03fffffffffffff8, +0x07fffffffffffff0, +0x0fffffffffffffe0, +0x1fffffffffffffc0, +0x3fffffffffffff80, +0x7fffffffffffff00, +0xfffffffffffffe00, +0xfffffffffffffc01, +0xfffffffffffff803, +0xfffffffffffff007, +0xffffffffffffe00f, +0xffffffffffffc01f, +0xffffffffffff803f, +0xffffffffffff007f, +0xfffffffffffe00ff, +0xfffffffffffc01ff, +0xfffffffffff803ff, +0xfffffffffff007ff, +0xffffffffffe00fff, +0xffffffffffc01fff, +0xffffffffff803fff, +0xffffffffff007fff, +0xfffffffffe00ffff, +0xfffffffffc01ffff, +0xfffffffff803ffff, +0xfffffffff007ffff, +0xffffffffe00fffff, +0xffffffffc01fffff, +0xffffffff803fffff, +0xffffffff007fffff, +0xfffffffe00ffffff, +0xfffffffc01ffffff, +0xfffffff803ffffff, +0xfffffff007ffffff, +0xffffffe00fffffff, +0xffffffc01fffffff, +0xffffff803fffffff, +0xffffff007fffffff, +0xfffffe00ffffffff, +0xfffffc01ffffffff, +0xfffff803ffffffff, +0xfffff007ffffffff, +0xffffe00fffffffff, +0xffffc01fffffffff, +0xffff803fffffffff, +0xffff007fffffffff, +0xfffe00ffffffffff, +0xfffc01ffffffffff, +0xfff803ffffffffff, +0xfff007ffffffffff, +0xffe00fffffffffff, +0xffc01fffffffffff, +0xff803fffffffffff, +0xff007fffffffffff, +0xfe00ffffffffffff, +0xfc01ffffffffffff, +0xf803ffffffffffff, +0xf007ffffffffffff, +0xe00fffffffffffff, +0xc01fffffffffffff, +0x803fffffffffffff, +0x00ffffffffffffff, +0x01fffffffffffffe, +0x03fffffffffffffc, +0x07fffffffffffff8, +0x0ffffffffffffff0, +0x1fffffffffffffe0, +0x3fffffffffffffc0, +0x7fffffffffffff80, +0xffffffffffffff00, +0xfffffffffffffe01, +0xfffffffffffffc03, +0xfffffffffffff807, +0xfffffffffffff00f, +0xffffffffffffe01f, +0xffffffffffffc03f, +0xffffffffffff807f, +0xffffffffffff00ff, +0xfffffffffffe01ff, +0xfffffffffffc03ff, +0xfffffffffff807ff, +0xfffffffffff00fff, +0xffffffffffe01fff, +0xffffffffffc03fff, +0xffffffffff807fff, +0xffffffffff00ffff, +0xfffffffffe01ffff, +0xfffffffffc03ffff, +0xfffffffff807ffff, +0xfffffffff00fffff, +0xffffffffe01fffff, +0xffffffffc03fffff, +0xffffffff807fffff, +0xffffffff00ffffff, +0xfffffffe01ffffff, +0xfffffffc03ffffff, +0xfffffff807ffffff, +0xfffffff00fffffff, +0xffffffe01fffffff, +0xffffffc03fffffff, +0xffffff807fffffff, +0xffffff00ffffffff, +0xfffffe01ffffffff, +0xfffffc03ffffffff, +0xfffff807ffffffff, +0xfffff00fffffffff, +0xffffe01fffffffff, +0xffffc03fffffffff, +0xffff807fffffffff, +0xffff00ffffffffff, +0xfffe01ffffffffff, +0xfffc03ffffffffff, +0xfff807ffffffffff, +0xfff00fffffffffff, +0xffe01fffffffffff, +0xffc03fffffffffff, +0xff807fffffffffff, +0xff00ffffffffffff, +0xfe01ffffffffffff, +0xfc03ffffffffffff, +0xf807ffffffffffff, +0xf00fffffffffffff, +0xe01fffffffffffff, +0xc03fffffffffffff, +0x807fffffffffffff, +0x01ffffffffffffff, +0x03fffffffffffffe, +0x07fffffffffffffc, +0x0ffffffffffffff8, +0x1ffffffffffffff0, +0x3fffffffffffffe0, +0x7fffffffffffffc0, +0xffffffffffffff80, +0xffffffffffffff01, +0xfffffffffffffe03, +0xfffffffffffffc07, +0xfffffffffffff80f, +0xfffffffffffff01f, +0xffffffffffffe03f, +0xffffffffffffc07f, +0xffffffffffff80ff, +0xffffffffffff01ff, +0xfffffffffffe03ff, +0xfffffffffffc07ff, +0xfffffffffff80fff, +0xfffffffffff01fff, +0xffffffffffe03fff, +0xffffffffffc07fff, +0xffffffffff80ffff, +0xffffffffff01ffff, +0xfffffffffe03ffff, +0xfffffffffc07ffff, +0xfffffffff80fffff, +0xfffffffff01fffff, +0xffffffffe03fffff, +0xffffffffc07fffff, +0xffffffff80ffffff, +0xffffffff01ffffff, +0xfffffffe03ffffff, +0xfffffffc07ffffff, +0xfffffff80fffffff, +0xfffffff01fffffff, +0xffffffe03fffffff, +0xffffffc07fffffff, +0xffffff80ffffffff, +0xffffff01ffffffff, +0xfffffe03ffffffff, +0xfffffc07ffffffff, +0xfffff80fffffffff, +0xfffff01fffffffff, +0xffffe03fffffffff, +0xffffc07fffffffff, +0xffff80ffffffffff, +0xffff01ffffffffff, +0xfffe03ffffffffff, +0xfffc07ffffffffff, +0xfff80fffffffffff, +0xfff01fffffffffff, +0xffe03fffffffffff, +0xffc07fffffffffff, +0xff80ffffffffffff, +0xff01ffffffffffff, +0xfe03ffffffffffff, +0xfc07ffffffffffff, +0xf80fffffffffffff, +0xf01fffffffffffff, +0xe03fffffffffffff, +0xc07fffffffffffff, +0x80ffffffffffffff, +0x03ffffffffffffff, +0x07fffffffffffffe, +0x0ffffffffffffffc, +0x1ffffffffffffff8, +0x3ffffffffffffff0, +0x7fffffffffffffe0, +0xffffffffffffffc0, +0xffffffffffffff81, +0xffffffffffffff03, +0xfffffffffffffe07, +0xfffffffffffffc0f, +0xfffffffffffff81f, +0xfffffffffffff03f, +0xffffffffffffe07f, +0xffffffffffffc0ff, +0xffffffffffff81ff, +0xffffffffffff03ff, +0xfffffffffffe07ff, +0xfffffffffffc0fff, +0xfffffffffff81fff, +0xfffffffffff03fff, +0xffffffffffe07fff, +0xffffffffffc0ffff, +0xffffffffff81ffff, +0xffffffffff03ffff, +0xfffffffffe07ffff, +0xfffffffffc0fffff, +0xfffffffff81fffff, +0xfffffffff03fffff, +0xffffffffe07fffff, +0xffffffffc0ffffff, +0xffffffff81ffffff, +0xffffffff03ffffff, +0xfffffffe07ffffff, +0xfffffffc0fffffff, +0xfffffff81fffffff, +0xfffffff03fffffff, +0xffffffe07fffffff, +0xffffffc0ffffffff, +0xffffff81ffffffff, +0xffffff03ffffffff, +0xfffffe07ffffffff, +0xfffffc0fffffffff, +0xfffff81fffffffff, +0xfffff03fffffffff, +0xffffe07fffffffff, +0xffffc0ffffffffff, +0xffff81ffffffffff, +0xffff03ffffffffff, +0xfffe07ffffffffff, +0xfffc0fffffffffff, +0xfff81fffffffffff, +0xfff03fffffffffff, +0xffe07fffffffffff, +0xffc0ffffffffffff, +0xff81ffffffffffff, +0xff03ffffffffffff, +0xfe07ffffffffffff, +0xfc0fffffffffffff, +0xf81fffffffffffff, +0xf03fffffffffffff, +0xe07fffffffffffff, +0xc0ffffffffffffff, +0x81ffffffffffffff, +0x07ffffffffffffff, +0x0ffffffffffffffe, +0x1ffffffffffffffc, +0x3ffffffffffffff8, +0x7ffffffffffffff0, +0xffffffffffffffe0, +0xffffffffffffffc1, +0xffffffffffffff83, +0xffffffffffffff07, +0xfffffffffffffe0f, +0xfffffffffffffc1f, +0xfffffffffffff83f, +0xfffffffffffff07f, +0xffffffffffffe0ff, +0xffffffffffffc1ff, +0xffffffffffff83ff, +0xffffffffffff07ff, +0xfffffffffffe0fff, +0xfffffffffffc1fff, +0xfffffffffff83fff, +0xfffffffffff07fff, +0xffffffffffe0ffff, +0xffffffffffc1ffff, +0xffffffffff83ffff, +0xffffffffff07ffff, +0xfffffffffe0fffff, +0xfffffffffc1fffff, +0xfffffffff83fffff, +0xfffffffff07fffff, +0xffffffffe0ffffff, +0xffffffffc1ffffff, +0xffffffff83ffffff, +0xffffffff07ffffff, +0xfffffffe0fffffff, +0xfffffffc1fffffff, +0xfffffff83fffffff, +0xfffffff07fffffff, +0xffffffe0ffffffff, +0xffffffc1ffffffff, +0xffffff83ffffffff, +0xffffff07ffffffff, +0xfffffe0fffffffff, +0xfffffc1fffffffff, +0xfffff83fffffffff, +0xfffff07fffffffff, +0xffffe0ffffffffff, +0xffffc1ffffffffff, +0xffff83ffffffffff, +0xffff07ffffffffff, +0xfffe0fffffffffff, +0xfffc1fffffffffff, +0xfff83fffffffffff, +0xfff07fffffffffff, +0xffe0ffffffffffff, +0xffc1ffffffffffff, +0xff83ffffffffffff, +0xff07ffffffffffff, +0xfe0fffffffffffff, +0xfc1fffffffffffff, +0xf83fffffffffffff, +0xf07fffffffffffff, +0xe0ffffffffffffff, +0xc1ffffffffffffff, +0x83ffffffffffffff, +0x0fffffffffffffff, +0x1ffffffffffffffe, +0x3ffffffffffffffc, +0x7ffffffffffffff8, +0xfffffffffffffff0, +0xffffffffffffffe1, +0xffffffffffffffc3, +0xffffffffffffff87, +0xffffffffffffff0f, +0xfffffffffffffe1f, +0xfffffffffffffc3f, +0xfffffffffffff87f, +0xfffffffffffff0ff, +0xffffffffffffe1ff, +0xffffffffffffc3ff, +0xffffffffffff87ff, +0xffffffffffff0fff, +0xfffffffffffe1fff, +0xfffffffffffc3fff, +0xfffffffffff87fff, +0xfffffffffff0ffff, +0xffffffffffe1ffff, +0xffffffffffc3ffff, +0xffffffffff87ffff, +0xffffffffff0fffff, +0xfffffffffe1fffff, +0xfffffffffc3fffff, +0xfffffffff87fffff, +0xfffffffff0ffffff, +0xffffffffe1ffffff, +0xffffffffc3ffffff, +0xffffffff87ffffff, +0xffffffff0fffffff, +0xfffffffe1fffffff, +0xfffffffc3fffffff, +0xfffffff87fffffff, +0xfffffff0ffffffff, +0xffffffe1ffffffff, +0xffffffc3ffffffff, +0xffffff87ffffffff, +0xffffff0fffffffff, +0xfffffe1fffffffff, +0xfffffc3fffffffff, +0xfffff87fffffffff, +0xfffff0ffffffffff, +0xffffe1ffffffffff, +0xffffc3ffffffffff, +0xffff87ffffffffff, +0xffff0fffffffffff, +0xfffe1fffffffffff, +0xfffc3fffffffffff, +0xfff87fffffffffff, +0xfff0ffffffffffff, +0xffe1ffffffffffff, +0xffc3ffffffffffff, +0xff87ffffffffffff, +0xff0fffffffffffff, +0xfe1fffffffffffff, +0xfc3fffffffffffff, +0xf87fffffffffffff, +0xf0ffffffffffffff, +0xe1ffffffffffffff, +0xc3ffffffffffffff, +0x87ffffffffffffff, +0x1fffffffffffffff, +0x3ffffffffffffffe, +0x7ffffffffffffffc, +0xfffffffffffffff8, +0xfffffffffffffff1, +0xffffffffffffffe3, +0xffffffffffffffc7, +0xffffffffffffff8f, +0xffffffffffffff1f, +0xfffffffffffffe3f, +0xfffffffffffffc7f, +0xfffffffffffff8ff, +0xfffffffffffff1ff, +0xffffffffffffe3ff, +0xffffffffffffc7ff, +0xffffffffffff8fff, +0xffffffffffff1fff, +0xfffffffffffe3fff, +0xfffffffffffc7fff, +0xfffffffffff8ffff, +0xfffffffffff1ffff, +0xffffffffffe3ffff, +0xffffffffffc7ffff, +0xffffffffff8fffff, +0xffffffffff1fffff, +0xfffffffffe3fffff, +0xfffffffffc7fffff, +0xfffffffff8ffffff, +0xfffffffff1ffffff, +0xffffffffe3ffffff, +0xffffffffc7ffffff, +0xffffffff8fffffff, +0xffffffff1fffffff, +0xfffffffe3fffffff, +0xfffffffc7fffffff, +0xfffffff8ffffffff, +0xfffffff1ffffffff, +0xffffffe3ffffffff, +0xffffffc7ffffffff, +0xffffff8fffffffff, +0xffffff1fffffffff, +0xfffffe3fffffffff, +0xfffffc7fffffffff, +0xfffff8ffffffffff, +0xfffff1ffffffffff, +0xffffe3ffffffffff, +0xffffc7ffffffffff, +0xffff8fffffffffff, +0xffff1fffffffffff, +0xfffe3fffffffffff, +0xfffc7fffffffffff, +0xfff8ffffffffffff, +0xfff1ffffffffffff, +0xffe3ffffffffffff, +0xffc7ffffffffffff, +0xff8fffffffffffff, +0xff1fffffffffffff, +0xfe3fffffffffffff, +0xfc7fffffffffffff, +0xf8ffffffffffffff, +0xf1ffffffffffffff, +0xe3ffffffffffffff, +0xc7ffffffffffffff, +0x8fffffffffffffff, +0x3fffffffffffffff, +0x7ffffffffffffffe, +0xfffffffffffffffc, +0xfffffffffffffff9, +0xfffffffffffffff3, +0xffffffffffffffe7, +0xffffffffffffffcf, +0xffffffffffffff9f, +0xffffffffffffff3f, +0xfffffffffffffe7f, +0xfffffffffffffcff, +0xfffffffffffff9ff, +0xfffffffffffff3ff, +0xffffffffffffe7ff, +0xffffffffffffcfff, +0xffffffffffff9fff, +0xffffffffffff3fff, +0xfffffffffffe7fff, +0xfffffffffffcffff, +0xfffffffffff9ffff, +0xfffffffffff3ffff, +0xffffffffffe7ffff, +0xffffffffffcfffff, +0xffffffffff9fffff, +0xffffffffff3fffff, +0xfffffffffe7fffff, +0xfffffffffcffffff, +0xfffffffff9ffffff, +0xfffffffff3ffffff, +0xffffffffe7ffffff, +0xffffffffcfffffff, +0xffffffff9fffffff, +0xffffffff3fffffff, +0xfffffffe7fffffff, +0xfffffffcffffffff, +0xfffffff9ffffffff, +0xfffffff3ffffffff, +0xffffffe7ffffffff, +0xffffffcfffffffff, +0xffffff9fffffffff, +0xffffff3fffffffff, +0xfffffe7fffffffff, +0xfffffcffffffffff, +0xfffff9ffffffffff, +0xfffff3ffffffffff, +0xffffe7ffffffffff, +0xffffcfffffffffff, +0xffff9fffffffffff, +0xffff3fffffffffff, +0xfffe7fffffffffff, +0xfffcffffffffffff, +0xfff9ffffffffffff, +0xfff3ffffffffffff, +0xffe7ffffffffffff, +0xffcfffffffffffff, +0xff9fffffffffffff, +0xff3fffffffffffff, +0xfe7fffffffffffff, +0xfcffffffffffffff, +0xf9ffffffffffffff, +0xf3ffffffffffffff, +0xe7ffffffffffffff, +0xcfffffffffffffff, +0x9fffffffffffffff, +0x7fffffffffffffff, +0xfffffffffffffffe, +0xfffffffffffffffd, +0xfffffffffffffffb, +0xfffffffffffffff7, +0xffffffffffffffef, +0xffffffffffffffdf, +0xffffffffffffffbf, +0xffffffffffffff7f, +0xfffffffffffffeff, +0xfffffffffffffdff, +0xfffffffffffffbff, +0xfffffffffffff7ff, +0xffffffffffffefff, +0xffffffffffffdfff, +0xffffffffffffbfff, +0xffffffffffff7fff, +0xfffffffffffeffff, +0xfffffffffffdffff, +0xfffffffffffbffff, +0xfffffffffff7ffff, +0xffffffffffefffff, +0xffffffffffdfffff, +0xffffffffffbfffff, +0xffffffffff7fffff, +0xfffffffffeffffff, +0xfffffffffdffffff, +0xfffffffffbffffff, +0xfffffffff7ffffff, +0xffffffffefffffff, +0xffffffffdfffffff, +0xffffffffbfffffff, +0xffffffff7fffffff, +0xfffffffeffffffff, +0xfffffffdffffffff, +0xfffffffbffffffff, +0xfffffff7ffffffff, +0xffffffefffffffff, +0xffffffdfffffffff, +0xffffffbfffffffff, +0xffffff7fffffffff, +0xfffffeffffffffff, +0xfffffdffffffffff, +0xfffffbffffffffff, +0xfffff7ffffffffff, +0xffffefffffffffff, +0xffffdfffffffffff, +0xffffbfffffffffff, +0xffff7fffffffffff, +0xfffeffffffffffff, +0xfffdffffffffffff, +0xfffbffffffffffff, +0xfff7ffffffffffff, +0xffefffffffffffff, +0xffdfffffffffffff, +0xffbfffffffffffff, +0xff7fffffffffffff, +0xfeffffffffffffff, +0xfdffffffffffffff, +0xfbffffffffffffff, +0xf7ffffffffffffff, +0xefffffffffffffff, +0xdfffffffffffffff, +0xbfffffffffffffff, +/* +#include +#include + +// Dumps all legal bitmask immediates for ARM64 +// Total number of unique 64-bit patterns: +// 1*2 + 3*4 + 7*8 + 15*16 + 31*32 + 63*64 = 5334 + +const char *uint64_to_binary(uint64_t x) { + static char b[65]; + unsigned i; + for (i = 0; i < 64; i++, x <<= 1) + b[i] = (0x8000000000000000ULL & x)? '1' : '0'; + b[64] = '\0'; + return b; +} + +int main() { + uint64_t result; + unsigned size, length, rotation, e; + for (size = 2; size <= 64; size *= 2) + for (length = 1; length < size; ++length) { + result = 0xffffffffffffffffULL >> (64 - length); + for (e = size; e < 64; e *= 2) + result |= result << e; + for (rotation = 0; rotation < size; ++rotation) { +#if 0 + printf("0x%016llx %s (size=%u, length=%u, rotation=%u)\n", + (unsigned long long)result, uint64_to_binary(result), + size, length, rotation); +#endif + printf("0x%016llx\n", (unsigned long long)result ); + result = (result >> 63) | (result << 1); + } + } + return 0; +} +*/ diff --git a/src/mapleall/maple_be/include/cg/visitor_common.h b/src/mapleall/maple_be/include/cg/visitor_common.h new file mode 100644 index 0000000000000000000000000000000000000000..fd5be7080e282d6cea0553bab8cfc3cf9446db29 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/visitor_common.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_VISITOR_COMMON_H +#define MAPLEBE_INCLUDE_CG_VISITOR_COMMON_H +namespace maplebe { +class OperandVisitorBase { + public: + virtual ~OperandVisitorBase() = default; +}; + +template +class OperandVisitor { + public: + virtual ~OperandVisitor() = default; + virtual void Visit(Visitable *v) = 0; +}; + +template +class OperandVisitors { + public: + virtual ~OperandVisitors() = default; +}; + +template +class OperandVisitors : + public OperandVisitor, + public OperandVisitor, + public OperandVisitor ... {}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_VISITOR_COMMON_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h new file mode 100644 index 0000000000000000000000000000000000000000..6a26100ccba27f2a3b5f5166a244a1ade310d8b3 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_MPISel.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_X64_MPISEL_H +#define MAPLEBE_INCLUDE_X64_MPISEL_H + +#include "isel.h" +#include "x64_call_conv.h" + +namespace maplebe { +class X64MPIsel : public MPISel { + public: + X64MPIsel(MemPool &mp, CGFunc &f) : MPISel(mp, f) {} + ~X64MPIsel() override = default; + void SelectReturn(NaryStmtNode &retNode, Operand &opnd) override; + void SelectReturn() override; + void SelectCall(CallNode &callNode) override; + void SelectIcall(IcallNode &icallNode, Operand &opnd0) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; + Operand *SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; + Operand *SelectDoubleConst(MIRDoubleConst &intConst, PrimType primType) const override; + void SelectGoto(GotoNode &stmt) override; + void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; + void SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) override; + void SelectAggDassign(maplebe::MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) override; + void SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) override; + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) override; + void SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) override; + void SelectIgoto(Operand &opnd0) override; + Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) override; + Operand *SelectStrLiteral(ConststrNode &constStr) override; + void SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) override; + /* Create the operand interface directly */ + MemOperand &CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset = 0); + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + void SelectAsm(AsmNode &node) override; + private: + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId = 0) const override; + MemOperand &GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const override; + Insn &AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds); + void SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds); + + /* Inline function implementation of va_start */ + void GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize); + + /* Subclass private instruction selector function */ + void SelectCVaStart(const IntrinsiccallNode &intrnNode); + void SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, uint32 &fpNum); + void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType); + void SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType); + Operand *SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode); + void SelectSelect(Operand &resOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType primType, + Opcode cmpOpcode, PrimType cmpPrimType); + RegOperand &GetTargetStackPointer(PrimType primType) override; + RegOperand &GetTargetBasicPointer(PrimType primType) override; + std::tuple GetMemOpndInfoFromAggregateNode(BaseNode &argExpr); + void SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &parmLocator, bool isArgUnused); + void CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum); + void CreateCallStructParamPassByStack(MemOperand &addrOpnd, int32 symSize, int32 baseOffset); + void SelectAggCopyReturn(const MIRSymbol &symbol, MIRType &symbolType, uint64 symbolSize); + uint32 GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const; + bool IsParamStructCopy(const MIRSymbol &symbol); + void SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + void SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, PrimType primType); + void SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt); + void SelectPseduoForReturn(std::vector &retRegs); + RegOperand *PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp); + RegOperand *PrepareMemcpyParm(uint64 copySize); + + /* save param pass by reg */ + std::vector> paramPassByReg; +}; +} + +#endif /* MAPLEBE_INCLUDE_X64_MPISEL_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_abi.h b/src/mapleall/maple_be/include/cg/x86_64/x64_abi.h new file mode 100644 index 0000000000000000000000000000000000000000..252576216896bbabada6bd094b898619c07de156 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_abi.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ABI_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ABI_H + +#include "x64_isa.h" +#include "types_def.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +namespace x64 { +constexpr int32 kNumIntParmRegs = 6; +constexpr int32 kNumIntReturnRegs = 2; +constexpr int32 kNumFloatParmRegs = 8; +constexpr int32 kNumFloatReturnRegs = 2; + +constexpr uint32 kNormalUseOperandNum = 2; +constexpr uint32 kMaxInstrForCondBr = 260000; + +constexpr X64reg kIntParmRegs[kNumIntParmRegs] = { R7, R6, R3, R2, R8, R9 }; +constexpr X64reg kIntReturnRegs[kNumIntReturnRegs] = { R0, R3 }; +constexpr X64reg kFloatParmRegs[kNumFloatParmRegs] = { V0, V1, V2, V3, V4, V5, V6, V7 }; +constexpr X64reg kFloatReturnRegs[kNumFloatReturnRegs] = { V0, V1 }; + +/* + * Refer to: + * x64-bit Architecture. + */ +bool IsAvailableReg(X64reg reg); +bool IsCalleeSavedReg(X64reg reg); +bool IsCallerSaveReg(X64reg reg); +bool IsParamReg(X64reg reg); +bool IsSpillReg(X64reg reg); +bool IsExtraSpillReg(X64reg reg); +bool IsSpillRegInRA(X64reg regNO, bool has3RegOpnd); +PrimType IsVectorArrayType(MIRType *ty, uint32 &arraySize); +} /* namespace x64 */ + +/* + * X64-bit Architecture. + * After the argument values have been computed, they are placed either in registers + * or pushed on the stack. The way how values are passed is described in the + * following sections. + * - INTEGER This class consists of integral types that fit into one of the general + purpose registers. + - SSE The class consists of types that fit into a vector register. + - SSEUP The class consists of types that fit into a vector register and can be passed + and returned in the upper bytes of it. + - X87, X87UP These classes consists of types that will be returned via the x87 FPU. + - COMPLEX_X87 This class consists of types that will be returned via the x87 FPU. + - NO_CLASS This class is used as initializer in the algorithms. It will be used for + padding and empty structures and unions. + - MEMORY This class consists of types that will be passed and returned in memory via the stack. + * + */ +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ABI_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def new file mode 100644 index 0000000000000000000000000000000000000000..9aedec04365da76977bd80c9170dcd124b27667c --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_abstract_mapping.def @@ -0,0 +1,126 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +/* Mapping between abstract maple machine IR and machine operation code of X86_64*/ +/* {mmir, mop} */ +DEFINE_MAPPING(abstract::MOP_undef, x64::MOP_begin) + +/* Mov */ +DEFINE_MAPPING(abstract::MOP_copy_ri_8, x64::MOP_movb_i_r) +DEFINE_MAPPING(abstract::MOP_copy_ri_16, x64::MOP_movw_i_r) +DEFINE_MAPPING(abstract::MOP_copy_ri_32, x64::MOP_movl_i_r) +DEFINE_MAPPING(abstract::MOP_copy_ri_64, x64::MOP_movq_i_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_8, x64::MOP_movb_r_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_16, x64::MOP_movw_r_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_32, x64::MOP_movl_r_r) +DEFINE_MAPPING(abstract::MOP_copy_rr_64, x64::MOP_movq_r_r) +DEFINE_MAPPING(abstract::MOP_copy_ff_32, x64::MOP_movl_r_r) +DEFINE_MAPPING(abstract::MOP_copy_ff_64, x64::MOP_movq_r_r) + +/* str/load */ +DEFINE_MAPPING(abstract::MOP_str_8, x64::MOP_movb_r_m) +DEFINE_MAPPING(abstract::MOP_str_16, x64::MOP_movw_r_m) +DEFINE_MAPPING(abstract::MOP_str_32, x64::MOP_movl_r_m) +DEFINE_MAPPING(abstract::MOP_str_64, x64::MOP_movq_r_m) +DEFINE_MAPPING(abstract::MOP_load_8, x64::MOP_movb_m_r) +DEFINE_MAPPING(abstract::MOP_load_16, x64::MOP_movw_m_r) +DEFINE_MAPPING(abstract::MOP_load_32, x64::MOP_movl_m_r) +DEFINE_MAPPING(abstract::MOP_load_64, x64::MOP_movq_m_r) + +/* str/load floating point */ +DEFINE_MAPPING(abstract::MOP_str_f_64, x64::MOP_movfd_r_m) +DEFINE_MAPPING(abstract::MOP_load_f_64, x64::MOP_movfd_m_r) +DEFINE_MAPPING(abstract::MOP_str_f_32, x64::MOP_movfs_r_m) +DEFINE_MAPPING(abstract::MOP_load_f_32, x64::MOP_movfs_m_r) + +/* shift -- shl/ashr/lshr */ +DEFINE_MAPPING(abstract::MOP_shl_8, x64::MOP_shlb_r_r) +DEFINE_MAPPING(abstract::MOP_shl_16, x64::MOP_shlw_r_r) +DEFINE_MAPPING(abstract::MOP_shl_32, x64::MOP_shll_r_r) +DEFINE_MAPPING(abstract::MOP_shl_64, x64::MOP_shlq_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_8, x64::MOP_sarb_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_16, x64::MOP_sarw_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_32, x64::MOP_sarl_r_r) +DEFINE_MAPPING(abstract::MOP_ashr_64, x64::MOP_sarq_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_8, x64::MOP_shrb_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_16, x64::MOP_shrw_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_32, x64::MOP_shrl_r_r) +DEFINE_MAPPING(abstract::MOP_lshr_64, x64::MOP_shrq_r_r) + +/* BasicOp */ +DEFINE_MAPPING(abstract::MOP_and_8, x64::MOP_andb_r_r) +DEFINE_MAPPING(abstract::MOP_and_16, x64::MOP_andw_r_r) +DEFINE_MAPPING(abstract::MOP_and_32, x64::MOP_andl_r_r) +DEFINE_MAPPING(abstract::MOP_and_64, x64::MOP_andq_r_r) +DEFINE_MAPPING(abstract::MOP_or_8, x64::MOP_orb_r_r) +DEFINE_MAPPING(abstract::MOP_or_16, x64::MOP_orw_r_r) +DEFINE_MAPPING(abstract::MOP_or_32, x64::MOP_orl_r_r) +DEFINE_MAPPING(abstract::MOP_or_64, x64::MOP_orq_r_r) +DEFINE_MAPPING(abstract::MOP_xor_8, x64::MOP_xorb_r_r) +DEFINE_MAPPING(abstract::MOP_xor_16, x64::MOP_xorw_r_r) +DEFINE_MAPPING(abstract::MOP_xor_32, x64::MOP_xorl_r_r) +DEFINE_MAPPING(abstract::MOP_xor_64, x64::MOP_xorq_r_r) +DEFINE_MAPPING(abstract::MOP_add_8, x64::MOP_addb_r_r) +DEFINE_MAPPING(abstract::MOP_add_16, x64::MOP_addw_r_r) +DEFINE_MAPPING(abstract::MOP_add_32, x64::MOP_addl_r_r) +DEFINE_MAPPING(abstract::MOP_add_64, x64::MOP_addq_r_r) +DEFINE_MAPPING(abstract::MOP_sub_8, x64::MOP_subb_r_r) +DEFINE_MAPPING(abstract::MOP_sub_16, x64::MOP_subw_r_r) +DEFINE_MAPPING(abstract::MOP_sub_32, x64::MOP_subl_r_r) +DEFINE_MAPPING(abstract::MOP_sub_64, x64::MOP_subq_r_r) +DEFINE_MAPPING(abstract::MOP_add_f_32, x64::MOP_adds_r_r) +DEFINE_MAPPING(abstract::MOP_add_f_64, x64::MOP_addd_r_r) +DEFINE_MAPPING(abstract::MOP_sub_f_32, x64::MOP_subs_r_r) +DEFINE_MAPPING(abstract::MOP_sub_f_64, x64::MOP_subd_r_r) +/* UnaryOp */ +DEFINE_MAPPING(abstract::MOP_not_8, x64::MOP_notb_r) +DEFINE_MAPPING(abstract::MOP_not_16, x64::MOP_notw_r) +DEFINE_MAPPING(abstract::MOP_not_32, x64::MOP_notl_r) +DEFINE_MAPPING(abstract::MOP_not_64, x64::MOP_notq_r) +DEFINE_MAPPING(abstract::MOP_neg_8, x64::MOP_negb_r) +DEFINE_MAPPING(abstract::MOP_neg_16, x64::MOP_negw_r) +DEFINE_MAPPING(abstract::MOP_neg_32, x64::MOP_negl_r) +DEFINE_MAPPING(abstract::MOP_neg_64, x64::MOP_negq_r) + +/* CvtOp */ +DEFINE_MAPPING(abstract::MOP_zext_rr_16_8, x64::MOP_movzbw_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_16_8, x64::MOP_movsbw_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_32_8, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_32_8, x64::MOP_movsbl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_32_16, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_32_16, x64::MOP_movswl_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_8, x64::MOP_movzbl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_64_8, x64::MOP_movsbq_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_16, x64::MOP_movzwl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_64_16, x64::MOP_movswq_r_r) +DEFINE_MAPPING(abstract::MOP_zext_rr_64_32, x64::MOP_movl_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) +DEFINE_MAPPING(abstract::MOP_sext_rr_64_32, x64::MOP_movslq_r_r) + +/* Floating CvtOp int2float */ +DEFINE_MAPPING(abstract::MOP_cvt_fr_u64, x64::MOP_cvtsi2sdq_r) +DEFINE_MAPPING(abstract::MOP_cvt_fr_u32, x64::MOP_cvtsi2ssq_r) +DEFINE_MAPPING(abstract::MOP_cvt_fr_i32, x64::MOP_cvtsi2ssl_r) +DEFINE_MAPPING(abstract::MOP_cvt_fr_i64, x64::MOP_cvtsi2sdq_r) + +/* Floating CvtOp float2int */ +DEFINE_MAPPING(abstract::MOP_cvt_rf_u32, x64::MOP_cvttss2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_rf_u64, x64::MOP_cvttsd2siq_r) +DEFINE_MAPPING(abstract::MOP_cvt_rf_i32, x64::MOP_cvttss2sil_r) +DEFINE_MAPPING(abstract::MOP_cvt_rf_i64, x64::MOP_cvttsd2siq_r) + +/* Floating CvtOp float2float */ +DEFINE_MAPPING(abstract::MOP_cvt_ff_64_32, x64::MOP_cvtss2sd_r) +DEFINE_MAPPING(abstract::MOP_cvt_ff_32_64, x64::MOP_cvtsd2ss_r) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_args.h b/src/mapleall/maple_be/include/cg/x86_64/x64_args.h new file mode 100644 index 0000000000000000000000000000000000000000..773750d08e6100a1d00895508c32aa88aaf1535b --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_args.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ARGS_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ARGS_H + +#include "args.h" +#include "x64_isa.h" +#include "x64_cgfunc.h" +#include "x64_call_conv.h" + +namespace maplebe { +using namespace maple; +using namespace x64; + +struct ArgInfo { + X64reg reg; + MIRType *mirTy; + uint32 symSize; + uint32 stkSize; + RegType regType; + MIRSymbol *sym; + const X64SymbolAlloc *symLoc; + uint8 memPairSecondRegSize; /* struct arg requiring two regs, size of 2nd reg */ + bool doMemPairOpt; + bool createTwoStores; + bool isTwoRegParm; +}; + +class X64MoveRegArgs : public MoveRegArgs { + public: + explicit X64MoveRegArgs(CGFunc &func) : MoveRegArgs(func) {} + ~X64MoveRegArgs() override = default; + void Run() override; + + private: + void CollectRegisterArgs(std::map &argsList, std::vector &indexList, + std::map &pairReg, std::vector &numFpRegs, + std::vector &fpSize) const; + ArgInfo GetArgInfo(std::map &argsList, uint32 argIndex, + std::vector &numFpRegs, std::vector &fpSize) const; + void GenerateMovInsn(ArgInfo &argInfo, X64reg reg2); + void MoveRegisterArgs(); + void MoveVRegisterArgs(); + void LoadStackArgsToVReg(MIRSymbol &mirSym); + void MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ARGS_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_call_conv.h b/src/mapleall/maple_be/include/cg/x86_64/x64_call_conv.h new file mode 100644 index 0000000000000000000000000000000000000000..7f6c8049e461efd93f47e7aa806a7c0d0fc6a7e9 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_call_conv.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_CALL_CONV_H +#define MAPLEBE_INCLUDE_CG_X64_X64_CALL_CONV_H + +#include "types_def.h" +#include "becommon.h" +#include "call_conv.h" +#include "abi.h" +#include "x64_abi.h" + +namespace maplebe { +using namespace maple; +using namespace x64; + +constexpr const uint32 kMaxStructParamByReg = 4; + +class X64CallConvImpl { + public: + explicit X64CallConvImpl(BECommon &be) : beCommon(be) {} + + ~X64CallConvImpl() = default; + + void InitCCLocInfo(CCLocInfo &pLoc) const; + + /* Passing value related */ + int32 LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst = false, MIRFunction *func = nullptr); + + /* return value related */ + int32 LocateRetVal(MIRType &retType, CCLocInfo &ploc); + + private: + X64reg AllocateGPParmRegister() { + return (nextGeneralParmRegNO < kNumIntParmRegs) ? + kIntParmRegs[nextGeneralParmRegNO++] : kRinvalid; + } + + void AllocateTwoGPParmRegisters(CCLocInfo &pLoc) { + if ((nextGeneralParmRegNO + 1) < kNumIntParmRegs) { + pLoc.reg0 = kIntParmRegs[nextGeneralParmRegNO++]; + pLoc.reg1 = kIntParmRegs[nextGeneralParmRegNO++]; + } else { + pLoc.reg0 = kRinvalid; + } + } + + X64reg AllocateSIMDFPRegister() { + return (nextFloatRegNO < kNumFloatParmRegs) ? + kFloatParmRegs[nextFloatRegNO++] : kRinvalid; + } + + X64reg AllocateGPReturnRegister() { + return (nextGeneralReturnRegNO < kNumIntReturnRegs) ? + kIntReturnRegs[nextGeneralReturnRegNO++] : kRinvalid; + } + + void AllocateTwoGPReturnRegisters(CCLocInfo &pLoc) { + if ((nextGeneralReturnRegNO + 1) < kNumIntReturnRegs) { + pLoc.reg0 = kIntReturnRegs[nextGeneralReturnRegNO++]; + pLoc.reg1 = kIntReturnRegs[nextGeneralReturnRegNO++]; + } else { + pLoc.reg0 = kRinvalid; + } + } + + BECommon &beCommon; + uint64 paramNum = 0; /* number of all types of parameters processed so far */ + int32 nextGeneralParmRegNO = 0; /* number of integer parameters processed so far */ + int32 nextGeneralReturnRegNO = 0; /* number of integer return processed so far */ + int32 nextStackArgAdress = 0; + uint32 nextFloatRegNO = 0; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_CALL_CONV_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_cfgo.h b/src/mapleall/maple_be/include/cg/x86_64/x64_cfgo.h new file mode 100644 index 0000000000000000000000000000000000000000..78aba629f8db858f2edb25944a475b82266a716e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_cfgo.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_CFGO_H +#define MAPLEBE_INCLUDE_CG_X64_X64_CFGO_H + +#include "cfgo.h" + +namespace maplebe { +class X64CFGOptimizer : public CFGOptimizer { + public: + X64CFGOptimizer(CGFunc &func, MemPool &memPool) : CFGOptimizer(func, memPool) {} + ~X64CFGOptimizer() = default; + void InitOptimizePatterns() override; +}; + +class X64FlipBRPattern : public FlipBRPattern { + public: + explicit X64FlipBRPattern(CGFunc &func) : FlipBRPattern(func) {} + ~X64FlipBRPattern() = default; + + private: + uint32 GetJumpTargetIdx(const Insn &insn) override; + MOperator FlipConditionOp(MOperator flippedOp) override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_CFGO_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h b/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h new file mode 100644 index 0000000000000000000000000000000000000000..5e49c9a12db8504bf3497296e41b9a57c91aa95b --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_cg.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +/* sub Target info & implement */ +#ifndef MAPLEBE_INCLUDE_CG_X86_64_CG_H +#define MAPLEBE_INCLUDE_CG_X86_64_CG_H + +#include "cg.h" +#include "x64_reg_info.h" +#include "x64_live.h" +#include "x64_reaching.h" +#include "x64_MPISel.h" +#include "x64_standardize.h" +#include "x64_args.h" +#include "x64_local_opt.h" +#include "x64_cfgo.h" + +namespace maplebe { +constexpr int32 kIntRegTypeNum = 5; + +class X64CG : public CG { + public: + X64CG(MIRModule &mod, const CGOptions &opts) : CG(mod, opts) {} + + static const InsnDesc kMd[x64::kMopLast]; + void EnrollTargetPhases(MaplePhaseManager *pm) const override; + + MemLayout *CreateMemLayout(MemPool &mp, BECommon &b, + MIRFunction &f, MapleAllocator &mallocator) const override { + return mp.New(b, f, mallocator); + }; + + RegisterInfo *CreateRegisterInfo(MemPool &mp, MapleAllocator &mallocator) const override { + return mp.New(mallocator); + } + + LiveAnalysis *CreateLiveAnalysis(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + ReachingDefinition *CreateReachingDefinition(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + LocalOpt *CreateLocalOpt(MemPool &mp, CGFunc &f, ReachingDefinition& rd) const override { + return mp.New(mp, f, rd); + } + MoveRegArgs *CreateMoveRegArgs(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } + + MPISel *CreateMPIsel(MemPool &mp, CGFunc &f) const override { + return mp.New(mp, f); + } + + Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const override { + return mp.New(f); + } + + CFGOptimizer *CreateCFGOptimizer(MemPool &mp, CGFunc &f) const override { + return mp.New(f, mp); + } + + /* Init SubTarget optimization */ + + Insn &BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) override; + + PhiOperand &CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) override; + + CGFunc *CreateCGFunc(MIRModule &mod, MIRFunction &mirFunc, BECommon &bec, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) override; + + bool IsExclusiveFunc(MIRFunction &mirFunc) override; + + /* NOTE: Consider making be_common a field of CG. */ + void GenerateObjectMaps(BECommon &beCommon) override; + + AlignAnalysis *CreateAlignAnalysis(MemPool &mp, CGFunc &f) const override { + (void)mp; + (void)f; + return nullptr; + } + /* Init SubTarget optimization */ + CGSSAInfo *CreateCGSSAInfo(MemPool &mp, CGFunc &f, DomAnalysis &da, MemPool &tmp) const override { + (void)mp; + (void)f; + (void)da; + (void)tmp; + return nullptr; + } + LiveIntervalAnalysis *CreateLLAnalysis(MemPool &mp, CGFunc &f) const override { + (void)mp; + (void)f; + return nullptr; + }; + PhiEliminate *CreatePhiElimintor(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + (void)mp; + (void)f; + (void)ssaInfo; + return nullptr; + }; + CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const override { + (void)mp; + (void)f; + (void)ssaInfo; + (void)ll; + return nullptr; + } + CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + (void)mp; + (void)f; + (void)ssaInfo; + return nullptr; + } + ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + (void)mp; + (void)f; + (void)ssaInfo; + return nullptr; + } + RedundantComputeElim *CreateRedundantCompElim(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override { + (void)mp; + (void)f; + (void)ssaInfo; + return nullptr; + } + TailCallOpt *CreateCGTailCallOpt(MemPool &mp, CGFunc &f) const override { + (void)mp; + (void)f; + return nullptr; + } + + /* Used for GCTIB pattern merging */ + std::string FindGCTIBPatternName(const std::string &name) const override; + static std::array, kIntRegTypeNum> intRegNames; + enum : uint8 { + kR8LowList, + kR8HighList, + kR16List, + kR32List, + kR64List + }; + bool IsEffectiveCopy(Insn &insn) const final; + bool IsTargetInsn(MOperator mOp) const final; + bool IsClinitInsn(MOperator mOp) const final; + void DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const final; + const InsnDesc &GetTargetMd(MOperator mOp) const final { + return kMd[mOp]; + } +}; +} // namespace maplebe +#endif /* MAPLEBE_INCLUDE_CG_X86_64_CG_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h new file mode 100644 index 0000000000000000000000000000000000000000..7bc55508ab51a93baf1623cefd7b44db852201c9 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_cgfunc.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X86_64_CGFUNC_H +#define MAPLEBE_INCLUDE_CG_X86_64_CGFUNC_H + +#include "cgfunc.h" +#include "x64_memlayout.h" +#include "x64_isa.h" +#include "x64_reg_info.h" +#include "x64_optimize_common.h" + +namespace maplebe { +class X64CGFunc : public CGFunc { + public: + X64CGFunc(MIRModule &mod, CG &c, MIRFunction &f, BECommon &b, + MemPool &memPool, StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) + : CGFunc(mod, c, f, b, memPool, stackMp, mallocator, funcId), + calleeSavedRegs(mallocator.Adapter()) { } + /* null implementation yet */ + InsnVisitor *NewInsnModifier() override { + return memPool->New(*this); + } + void GenSaveMethodInfoCode(BB &bb) override; + void GenerateCleanupCode(BB &bb) override; + bool NeedCleanup() override; + void GenerateCleanupCodeForExtEpilog(BB &bb) override; + uint32 FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) override; + void AssignLmbcFormalParams() override; + void LmbcGenSaveSpForAlloca() override; + void MergeReturn() override; + void DetermineReturnTypeofCall() override; + void HandleRCCall(bool begin, const MIRSymbol *retRef = nullptr) override; + void HandleRetCleanup(NaryStmtNode &retNode) override; + void SelectDassign(DassignNode &stmt, Operand &opnd0) override; + void SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) override; + void SelectRegassign(RegassignNode &stmt, Operand &opnd0) override; + void SelectAbort() override; + void SelectAssertNull(UnaryStmtNode &stmt) override; + void SelectAsm(AsmNode &node) override; + void SelectAggDassign(DassignNode &stmt) override; + void SelectIassign(IassignNode &stmt) override; + void SelectIassignoff(IassignoffNode &stmt) override; + void SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) override; + void SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) override; + void SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) override; + void SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) override; + void SelectReturnSendOfStructInRegs(BaseNode *x) override; + void SelectReturn(Operand *opnd) override; + void SelectIgoto(Operand *opnd0) override; + void SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) override; + void SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &opnd0) override; + void SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &opnd0) override; + void SelectGoto(GotoNode &stmt) override; + void SelectCall(CallNode &callNode) override; + void SelectIcall(IcallNode &icallNode, Operand &fptrOpnd) override; + void SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) override; + Operand *SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinopNode, std::string name) override; + Operand *SelectCclz(IntrinsicopNode &intrinopNode) override; + Operand *SelectCctz(IntrinsicopNode &intrinopNode) override; + Operand *SelectCpopcount(IntrinsicopNode &intrinopNode) override; + Operand *SelectCparity(IntrinsicopNode &intrinopNode) override; + Operand *SelectCclrsb(IntrinsicopNode &intrinopNode) override; + Operand *SelectCisaligned(IntrinsicopNode &intrinopNode) override; + Operand *SelectCalignup(IntrinsicopNode &intrinopNode) override; + Operand *SelectCaligndown(IntrinsicopNode &intrinopNode) override; + Operand *SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) override; + Operand *SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) override; + Operand *SelectCAtomicExchangeN(const IntrinsiccallNode &intrinsiccallNode) override; + Operand *SelectCAtomicFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) override; + Operand *SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode) override; + Operand *SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode) override; + Operand *SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) override; + Operand *SelectCReturnAddress(IntrinsicopNode &intrinopNode) override; + void SelectCAtomicExchange(const IntrinsiccallNode &intrinsiccallNode) override; + void SelectMembar(StmtNode &membar) override; + void SelectComment(CommentNode &comment) override; + void HandleCatch() override; + Operand *SelectDread(const BaseNode &parent, AddrofNode &expr) override; + RegOperand *SelectRegread(RegreadNode &expr) override; + Operand *SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) override; + Operand *SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) override; + Operand &SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) override; + Operand &SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) override; + Operand *SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset = 0, + PrimType finalBitFieldDestType = kPtyInvalid) override; + Operand *SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) override; + Operand *SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) override; + Operand *SelectIntConst(MIRIntConst &intConst) override; + Operand *SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) override; + Operand *SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) override; + Operand *SelectStrConst(MIRStrConst &strConst) override; + Operand *SelectStr16Const(MIRStr16Const &strConst) override; + void SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) override; + Operand *SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) override; + Operand *SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand &SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) override; + Operand *SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectDiv(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectLand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, + bool parentIsBr = false) override; + void SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + void SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) override; + Operand *SelectAbs(UnaryNode &node, Operand &opnd0) override; + Operand *SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectExtractbits(ExtractbitsNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) override; + Operand *SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) override; + Operand *SelectLnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectRecip(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectSqrt(UnaryNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectRetype(TypeCvtNode &node, Operand &opnd0) override; + Operand *SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) override; + Operand *SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) override; + Operand *SelectSelect(TernaryNode &node, Operand &cond, Operand &opnd0, Operand &opnd1, + const BaseNode &parent, bool hasCompare = false) override; + Operand *SelectMalloc(UnaryNode &call, Operand &opnd0) override; + RegOperand &SelectCopy(Operand &src, PrimType srcType, PrimType dstType) override; + Operand *SelectAlloca(UnaryNode &call, Operand &opnd0) override; + Operand *SelectGCMalloc(GCMallocNode &call) override; + Operand *SelectJarrayMalloc(JarrayMallocNode &call, Operand &opnd0) override; + void SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &opnd0) override; + Operand *SelectLazyLoad(Operand &opnd0, PrimType primType) override; + Operand *SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) override; + Operand *SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) override; + void GenerateYieldpoint(BB &bb) override; + Operand &ProcessReturnReg(PrimType primType, int32 sReg) override; + Operand &GetOrCreateRflag() override; + const Operand *GetRflag() const override; + const Operand *GetFloatRflag() const override; + const LabelOperand *GetLabelOperand(LabelIdx labIdx) const override; + LabelOperand &GetOrCreateLabelOperand(LabelIdx labIdx) override; + LabelOperand &GetOrCreateLabelOperand(BB &bb) override; + RegOperand &CreateVirtualRegisterOperand(regno_t vRegNO) override; + RegOperand &GetOrCreateVirtualRegisterOperand(regno_t vRegNO) override; + RegOperand &GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) override; + RegOperand &GetOrCreateFramePointerRegOperand() override; + RegOperand &GetOrCreateStackBaseRegOperand() override; + RegOperand &GetZeroOpnd(uint32 size) override; + Operand &CreateCfiRegOperand(uint32 reg, uint32 size) override; + Operand &GetTargetRetOperand(PrimType primType, int32 sReg) override; + Operand &CreateImmOperand(PrimType primType, int64 val) override; + void ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t regno) override; + void CleanupDeadMov(bool dump = false) override; + void GetRealCallerSaveRegs(const Insn &insn, std::set &realCallerSave) override; + bool IsFrameReg(const RegOperand &opnd) const override; + RegOperand *SelectVectorAddLong(PrimType rTy, Operand *o1, Operand *o2, PrimType oty, bool isLow) override; + RegOperand *SelectVectorAddWiden(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, bool isLow) override; + RegOperand *SelectVectorAbs(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, + PrimType oTyp2, Opcode opc) override; + RegOperand *SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) override;; + RegOperand *SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) override; + RegOperand *SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) override; + RegOperand *SelectVectorFromScalar(PrimType pType, Operand *opnd, PrimType sType) override; + RegOperand *SelectVectorDup(PrimType rType, Operand *src, bool getLow) override; + RegOperand *SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) override; + RegOperand *SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, PrimType oTy, bool isLow) override; + RegOperand *SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, Operand *o3, + PrimType oTyp3) override; + RegOperand *SelectVectorMerge(PrimType rTyp, Operand *o1, Operand *o2, int32 iNum) override; + RegOperand *SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, + bool isLow) override; + RegOperand *SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) override; + RegOperand *SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) override; + RegOperand *SelectVectorNeg(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorNot(PrimType rType, Operand *o1) override; + RegOperand *SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, Operand *src2, PrimType sty2) override; + RegOperand *SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) override; + RegOperand *SelectVectorReverse(PrimType rtype, Operand *src, PrimType stype, uint32 size) override; + RegOperand *SelectVectorSetElement(Operand *eOp, PrimType eTyp, Operand *vOpd, PrimType vTyp, + int32 lane) override; + RegOperand *SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, + Opcode opc) override; + RegOperand *SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) override; + RegOperand *SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, Operand *o2, bool isLow) override; + RegOperand *SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, + bool isLow, bool isWide) override; + RegOperand *SelectVectorSum(PrimType rtype, Operand *o1, PrimType oType) override; + RegOperand *SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) override; + RegOperand *SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) override; + RegOperand *SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) override; + Operand *SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinopNode, PrimType retType, + const std::string &name) override; + void ProcessLazyBinding() override; + void DBGFixCallFrameLocationOffsets() override; + MemOperand *GetPseudoRegisterSpillMemoryOperand(PregIdx idx) override; + + RegOperand *GetBaseReg(const SymbolAlloc &symAlloc) override; + int32 GetBaseOffset(const SymbolAlloc &symbolAlloc) override; + + void AddtoCalleeSaved(regno_t reg) override { + const auto &[_, flag] = calleeSavedRegs.insert(static_cast(reg)); + ASSERT((IsGPRegister(static_cast(reg)) || + IsFPSIMDRegister(static_cast(reg))), "Int or FP registers are expected"); + if (flag) { + if (IsGPRegister(static_cast(reg))) { + ++numIntregToCalleeSave; + } else { + ++numFpregToCalleeSave; + } + } + } + + const MapleSet &GetCalleeSavedRegs() const { + return calleeSavedRegs; + } + + uint32 SizeOfCalleeSaved() const { + uint32 size = numIntregToCalleeSave * kIntregBytelen + numFpregToCalleeSave * kFpregBytelen; + return RoundUp(size, GetMemlayout()->GetStackPtrAlignment()); + } + + bool IsSPOrFP(const RegOperand &opnd) const override { + (void)opnd; + return false; + }; + bool IsReturnReg(const RegOperand &opnd) const override { + (void)opnd; + return false; + } + bool IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &cgBeCommon) const override { + (void)reg; + (void)mirType; + (void)cgBeCommon; + return false; + }; + + MemOperand *GetOrCreatSpillMem(regno_t vrNum) override; + void FreeSpillRegMem(regno_t vrNum) override; + int64 GetOrCreatSpillRegLocation(regno_t vrNum) { + auto symLoc = GetMemlayout()->GetLocOfSpillRegister(vrNum); + return static_cast(GetBaseOffset(*symLoc)); + } + private: + MapleSet calleeSavedRegs; + uint32 numIntregToCalleeSave = 0; + uint32 numFpregToCalleeSave = 0; +}; + +class X64OpndDumpVisitor : public OpndDumpVisitor { + public: + explicit X64OpndDumpVisitor(const OpndDesc &operandDesc) : OpndDumpVisitor(operandDesc) {}; + ~X64OpndDumpVisitor() override = default; + + private: + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(CondOperand *v) final; + void Visit(CommentOperand *v) final; + void Visit(StImmOperand *v) final; + void Visit(BitShiftOperand *v) final; + void Visit(ExtendShiftOperand *v) final; + void Visit(LabelOperand *v) final; + void Visit(FuncNameOperand *v) final; + void Visit(PhiOperand *v) final; + void DumpRegInfo(RegOperand &v); +}; +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_X86_64_CGFUNC_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h b/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h new file mode 100644 index 0000000000000000000000000000000000000000..4970611d8f8fa893847d0106ac2c6d0771319510 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_emitter.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X86_64_EMITTER_H +#define MAPLEBE_INCLUDE_CG_X86_64_EMITTER_H + +#include "asm_emit.h" +#include "visitor_common.h" +#include "operand.h" + +namespace maplebe { + +class X64Emitter : public AsmEmitter { + public: + X64Emitter(CG &cg, const std::string &asmFileName) : AsmEmitter(cg, asmFileName) {} + ~X64Emitter() = default; + + void EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; + void EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; + void EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override; + void EmitFastLSDA(FuncEmitInfo &funcEmitInfo) override; + void EmitFullLSDA(FuncEmitInfo &funcEmitInfo) override; + void EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) override; + void EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) override; + void Run(FuncEmitInfo &funcEmitInfo) override; +}; + +class X64OpndEmitVisitor : public OpndEmitVisitor { + public: + X64OpndEmitVisitor(Emitter &emitter) : OpndEmitVisitor(emitter) {} + ~X64OpndEmitVisitor() override = default; + + void Visit(RegOperand *v) final; + void Visit(ImmOperand *v) final; + void Visit(MemOperand *v) final; + void Visit(FuncNameOperand *v) final; + void Visit(LabelOperand *v) final; + void Visit(ListOperand *v) final; + void Visit(StImmOperand *v) final; + void Visit(CondOperand *v) final; + void Visit(BitShiftOperand *v) final; + void Visit(ExtendShiftOperand *v) final; + void Visit(CommentOperand *v) final; + void Visit(OfstOperand *v) final; +}; + +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X86_64_EMITTER_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_fp_simd_regs.def b/src/mapleall/maple_be/include/cg/x86_64/x64_fp_simd_regs.def new file mode 100644 index 0000000000000000000000000000000000000000..7c4dafda4ac80fec5f9967d03eae1ab7256ebeec --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_fp_simd_regs.def @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* + * - %xmm0–%xmm1 used to pass and return floating point arguments + - %xmm2–%xmm7 used to pass floating point arguments + * + */ + +/* + * ID, prefixes: 8-bit, 16-bit, 32-bit, 64-bit, 128-bit, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill + */ +/*XMM0 ~ XMM15*/ +FP_SIMD_REG(0 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(1 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(2 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(3 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(4 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(5 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(6 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(7 , "B", "H", "S", "D", "Q", true, false, true, false, false) +FP_SIMD_REG(8 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(9 , "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(10, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(11, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(12, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(13, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(14, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(15, "B", "H", "S", "D", "Q", true, false, false, false, false) +/*ST0 ~ ST7*/ +FP_SIMD_REG(16, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(17, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(18, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(19, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(20, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(21, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(22, "B", "H", "S", "D", "Q", true, false, false, false, false) +FP_SIMD_REG(23, "B", "H", "S", "D", "Q", true, false, false, false, false) \ No newline at end of file diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_int_regs.def b/src/mapleall/maple_be/include/cg/x86_64/x64_int_regs.def new file mode 100644 index 0000000000000000000000000000000000000000..10426073d7c4daf8338c6b0e21bb56f118fde866 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_int_regs.def @@ -0,0 +1,62 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* + * Registers in x86-64 + * + * - caller-save registers: %rax, %rcx, %rdx, %rdi, %rsi, %rsp, and %r8-r11 + * - callee-saved registers: %r12, %r13, %r14, %r15, %rbx, %rsp, %rbp. + * - In contrast to the Intel386 ABI, %rdi, and %rsi in x86-64 belong to the called function, not + * the caller. So, It's caller-save registers + * - User-level applications use as integer registers for passing the sequence %rdi, %rsi, %rdx, %rcx, + * %r8 and %r9. The kernel interface uses %rdi, %rsi, %rdx, %r10, %r8 and %r9. + * - the sequence %rax, %rdx is used to return INTEGER, + * - rdx is used to pass 3rd argument to functions; 2nd return register + * - %r11 is neither required to be preserved, nor is it used to pass arguments + */ +/* ID, 8-bit prefix, 8-16 bit prefix, 16-bit prefix, 32-bit prefix, 64-bit prefix, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill */ +INT_REG(0 , "BL", "BH", "W", "L", "Q", true, false, false, false, false) +INT_REG(1 , "BL", "BH", "W", "L", "Q", true, true, false, false, false) +INT_REG(2 , "BL", "BH", "W", "L", "Q", true, false, true, false, false) +INT_REG(3 , "BL", "BH", "W", "L", "Q", true, false, true, false, false) +INT_REG(4 , "B", "", "W", "L", "Q", false, false, false, false, false) +INT_REG(5 , "B", "", "W", "L", "Q", false, true, false, false, false) +INT_REG(6 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(7 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(8 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(9 , "B", "", "W", "L", "Q", true, false, true, false, false) +INT_REG(10, "B", "", "W", "L", "Q", true, false, false, true, false) +INT_REG(11, "B", "", "W", "L", "Q", true, false, false, true, false) +INT_REG(12, "B", "", "W", "L", "Q", true, true, false, false, false) +INT_REG(13, "B", "", "W", "L", "Q", true, true, false, false, false) +INT_REG(14, "B", "", "W", "L", "Q", true, true, false, false, false) +INT_REG(15, "B", "", "W", "L", "Q", true, true, false, false, false) +/* instruction pointer */ +INT_REG(16, "B", "", "W", "L", "Q", false, false, false, false, false) + +/* Alias */ +INT_REG_ALIAS(AX, 0) +INT_REG_ALIAS(BX, 1) +INT_REG_ALIAS(CX, 2) +INT_REG_ALIAS(DX, 3) +INT_REG_ALIAS(SP, 4) +INT_REG_ALIAS(BP, 5) +INT_REG_ALIAS(SI, 6) +INT_REG_ALIAS(DI, 7) + +INT_REG_ALIAS(FP, 5) +INT_REG_ALIAS(YP, 12) +INT_REG_ALIAS(IP, 16) +INT_REG_ALIAS(LAST_GP_REG, 16) + diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_isa.h b/src/mapleall/maple_be/include/cg/x86_64/x64_isa.h new file mode 100644 index 0000000000000000000000000000000000000000..505a717837c5a837a750dd1da07b92446f4493e3 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_isa.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ISA_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ISA_H + +#include "operand.h" +#include "mad.h" +#include "isa.h" + +namespace maplebe { +/* + * X64 Architecture Reference Manual + */ +constexpr int kX64StackPtrAlignment = 16; + +constexpr int32 kOffsetAlign = 8; +constexpr uint32 kIntregBytelen = 8; /* 64-bit */ +constexpr uint32 kFpregBytelen = 8; /* only lower 64 bits are used */ +constexpr int kSizeOfFplr = 16; + +class Insn; + +namespace x64 { +/* machine instruction description */ +#define DEFINE_MOP(op, ...) op, +enum X64MOP_t : maple::uint32 { +#include "abstract_mmir.def" +#include "x64_md.def" + kMopLast +}; +#undef DEFINE_MOP + +/* Registers in x64 state */ +enum X64reg : uint32 { + kRinvalid = kInvalidRegNO, +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) R##ID, +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) V##ID, +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + kMaxRegNum, + kRFLAG, + kAllRegNum, +/* integer registers alias */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) +#define INT_REG_ALIAS(ALIAS, ID) R##ALIAS = R##ID, +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +}; + +static inline bool IsGPRegister(X64reg r) { + return R0 <= r && r <= RLAST_GP_REG; +} + +static inline bool IsFPSIMDRegister(X64reg r) { + return V0 <= r && r <= V23; +} + +static inline bool IsFPRegister(X64reg r) { + return V0 <= r && r <= V7; +} + +static inline bool IsSIMDRegister(X64reg r) { + return V8 <= r && r <= V23; +} + +static inline bool IsPhysicalRegister(regno_t r) { + return r < kMaxRegNum; +} + +static inline RegType GetRegType(X64reg r) { + if (IsGPRegister(r)) { + return kRegTyInt; + } + if (IsFPSIMDRegister(r)) { + return kRegTyFloat; + } + ASSERT(false, "No suitable register type to return?"); + return kRegTyUndef; +} +/* + * Precondition: The given insn is a jump instruction. + * Get the jump target label operand index from the given instruction. + * Note: MOP_jmp_m, MOP_jmp_r is a jump instruction, but the target is unknown at compile time. + */ +uint32 GetJumpTargetIdx(const Insn &insn); + +MOperator FlipConditionOp(MOperator flippedOp); +} /* namespace x64 */ + +/* + * We save callee-saved registers from lower stack area to upper stack area. + * If possible, we store a pair of registers (int/int and fp/fp) in the stack. + * The Stack Pointer has to be aligned at 16-byte boundary. + * On X64, kIntregBytelen == 8 (see the above) + */ +inline void GetNextOffsetCalleeSaved(int &offset) { + offset += (kIntregBytelen << 1); +} +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ISA_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h b/src/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h new file mode 100644 index 0000000000000000000000000000000000000000..59708ac59fb3e7795e09a6e3426d33a19f6911cd --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_isa_tbl.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_ISA_TBL_H +#define MAPLEBE_INCLUDE_CG_X64_X64_ISA_TBL_H + +#include "x64_isa.h" +#include "operand.h" + +namespace maplebe { + +namespace x64 { + /* register, imm , memory, cond */ +#define DEF_X64_CMP_MAPPING_INT(SIZE) \ +static const X64MOP_t cmpIselMap##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = {\ + {MOP_cmp##SIZE##_r_r, MOP_begin, MOP_cmp##SIZE##_r_m, MOP_begin}, \ + {MOP_cmp##SIZE##_i_r, MOP_begin, MOP_cmp##SIZE##_i_m, MOP_begin}, \ + {MOP_cmp##SIZE##_m_r, MOP_begin, MOP_begin, MOP_begin}, \ + {MOP_begin, MOP_begin, MOP_begin, MOP_begin}, \ +}; +DEF_X64_CMP_MAPPING_INT(b) +DEF_X64_CMP_MAPPING_INT(w) +DEF_X64_CMP_MAPPING_INT(l) +DEF_X64_CMP_MAPPING_INT(q) + +static inline X64MOP_t GetCmpMop(Operand::OperandType dTy, Operand::OperandType sTy, PrimType primType) { + X64MOP_t cmpOp = MOP_begin; + switch (GetPrimTypeBitSize(primType)) { + case k8BitSize: + cmpOp = cmpIselMapb[sTy][dTy]; + break; + case k16BitSize: + cmpOp = cmpIselMapw[sTy][dTy]; + break; + case k32BitSize: + cmpOp = cmpIselMapl[sTy][dTy]; + break; + case k64BitSize: + cmpOp = cmpIselMapq[sTy][dTy]; + break; + default: + cmpOp= MOP_begin; + break; + } + return cmpOp; +} + + /* {OPCODE, {register, imm , memory, cond}} */ +#define DEF_X64_SET_MAPPING_INT(OPCODE, TYPE) \ +{OPCODE, {x64::MOP_##TYPE##_r, x64::MOP_begin, x64::MOP_##TYPE##_m, x64::MOP_begin}} + +using SetIselMappingType = std::unordered_map>; +static const SetIselMappingType setUnsignedIselMapping = { + DEF_X64_SET_MAPPING_INT(OP_le, setbe), + DEF_X64_SET_MAPPING_INT(OP_ge, setae), + DEF_X64_SET_MAPPING_INT(OP_gt, seta), + DEF_X64_SET_MAPPING_INT(OP_lt, setb), + DEF_X64_SET_MAPPING_INT(OP_ne, setne), + DEF_X64_SET_MAPPING_INT(OP_eq, sete), +}; +static const SetIselMappingType setSignedIselMapping = { + DEF_X64_SET_MAPPING_INT(OP_le, setle), + DEF_X64_SET_MAPPING_INT(OP_ge, setge), + DEF_X64_SET_MAPPING_INT(OP_gt, setg), + DEF_X64_SET_MAPPING_INT(OP_lt, setl), + DEF_X64_SET_MAPPING_INT(OP_ne, setne), + DEF_X64_SET_MAPPING_INT(OP_eq, sete), +}; +#undef DEF_X64_SET_MAPPING_INT + +static inline X64MOP_t GetSetCCMop(maple::Opcode opcode, Operand::OperandType dTy, bool isSigned) { + ASSERT(dTy < Operand::OperandType::kOpdPhi, "illegal operand type"); + const SetIselMappingType& setIselMapping = isSigned ? setSignedIselMapping : + setUnsignedIselMapping; + auto iter = setIselMapping.find(opcode); + if (iter == setIselMapping.end()) { + return x64::MOP_begin; + } + return iter->second[dTy]; +} + +#define DEF_X64_CMOV_MAPPING_INT(OPCODE, TYPE) \ +{OPCODE, {x64::MOP_begin, x64::MOP_##TYPE##w_r_r, x64::MOP_##TYPE##l_r_r, x64::MOP_##TYPE##q_r_r}} +using CMovIselMappingType = std::unordered_map>; +static const CMovIselMappingType cmovUnsignedIselMapping = { + DEF_X64_CMOV_MAPPING_INT(OP_le, cmovbe), + DEF_X64_CMOV_MAPPING_INT(OP_ge, cmovae), + DEF_X64_CMOV_MAPPING_INT(OP_gt, cmova), + DEF_X64_CMOV_MAPPING_INT(OP_lt, cmovb), + DEF_X64_CMOV_MAPPING_INT(OP_ne, cmovne), + DEF_X64_CMOV_MAPPING_INT(OP_eq, cmove), +}; +static const CMovIselMappingType cmovSignedIselMapping = { + DEF_X64_CMOV_MAPPING_INT(OP_le, cmovle), + DEF_X64_CMOV_MAPPING_INT(OP_ge, cmovge), + DEF_X64_CMOV_MAPPING_INT(OP_gt, cmovg), + DEF_X64_CMOV_MAPPING_INT(OP_lt, cmovl), + DEF_X64_CMOV_MAPPING_INT(OP_ne, cmovne), + DEF_X64_CMOV_MAPPING_INT(OP_eq, cmove), +}; +#undef DEF_X64_CMOV_MAPPING_INT + +static inline X64MOP_t GetCMovCCMop(maple::Opcode opcode, int32 bitSize, bool isSigned) { + const auto &cmovIselMapping = isSigned ? cmovSignedIselMapping : cmovUnsignedIselMapping; + auto iter = cmovIselMapping.find(opcode); + if (iter == cmovIselMapping.end()) { + return x64::MOP_begin; + } + return iter->second[GetBitIndex(bitSize)]; +} +} +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_ISA_TBL_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_live.h b/src/mapleall/maple_be/include/cg/x86_64/x64_live.h new file mode 100644 index 0000000000000000000000000000000000000000..8ccdb9c1c89d9feb70a4f10ad471145b519ce33f --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_live.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_LIVE_H +#define MAPLEBE_INCLUDE_CG_X64_X64_LIVE_H + +#include "live.h" + +namespace maplebe { +class X64LiveAnalysis : public LiveAnalysis { + public: + X64LiveAnalysis(CGFunc &func, MemPool &memPool) : LiveAnalysis(func, memPool) {} + ~X64LiveAnalysis() override = default; + bool CleanupBBIgnoreReg(regno_t reg) override; + void InitEhDefine(BB &bb) override {}; + void GenerateReturnBBDefUse(BB &bb) const override {}; + void ProcessCallInsnParam(BB &bb, const Insn &insn) const override {}; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_LIVE_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_local_opt.h b/src/mapleall/maple_be/include/cg/x86_64/x64_local_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..af7df9c4c72d13f4295eada723f4ea470a8e534e --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_local_opt.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_X64_LOCALO_H +#define MAPLEBE_INCLUDE_X64_LOCALO_H + +#include "local_opt.h" +namespace maplebe{ +class X64LocalOpt : public LocalOpt { + public: + X64LocalOpt(MemPool &memPool, CGFunc &func, ReachingDefinition& rd) + : LocalOpt(memPool, func, rd){} + ~X64LocalOpt() = default; + private: + void DoLocalCopyProp() override; +}; + +class CopyRegProp : public LocalPropOptimizePattern { + public: + CopyRegProp(CGFunc &cgFunc, ReachingDefinition &rd) : LocalPropOptimizePattern(cgFunc, rd) {} + ~CopyRegProp() override = default; + bool CheckCondition(Insn &insn) final; + void Optimize(BB &bb, Insn &insn) final; + private: + bool propagateOperand(Insn &insn, RegOperand& oldOpnd, RegOperand& replaceOpnd); +}; + +class X64RedundantDefRemove : public RedundantDefRemove { + public: + X64RedundantDefRemove(CGFunc &cgFunc, ReachingDefinition &rd) : RedundantDefRemove(cgFunc, rd) {} + ~X64RedundantDefRemove() override = default; + void Optimize(BB &bb, Insn &insn) final; +}; + +} + +#endif /* MAPLEBE_INCLUDE_X64_LOCALO_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_md.def b/src/mapleall/maple_be/include/cg/x86_64/x64_md.def new file mode 100644 index 0000000000000000000000000000000000000000..1b778335947749974631f1f7d3af08ae8b7302f6 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_md.def @@ -0,0 +1,522 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* {mop, opnds, prop, latency, name, format, length} */ +/* begin machine operation code of X86_64 instruction , */ +DEFINE_MOP(MOP_begin, {},0,0,"","",0) + +/* # Definitions + * use x64 style b/w/l/q for 8b/16b/32b/64b operation + * and using AT&T style assembly + */ + +/* X64 MOVES */ +// TODO: fix intruction opnds, prop, latency, format and length +// TODO: the encoding and enumeration seems too verbose +// TODO: understand how other system represent these MOPs (especially for x86-64) +// TODO: this is still an experiment +// TODO: should make sure the convention is consistent with (AT&T style?) +// TODO: how the general Machine instruction is designed? + +/* mov */ +DEFINE_MOP(MOP_movb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8ID},ISMOVE,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8ID},ISMOVE,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8ID},ISLOAD,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISSTORE,kLtAlu,"movb","0,1",1) +DEFINE_MOP(MOP_movb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISSTORE,kLtAlu,"movb","0,1",1) + +DEFINE_MOP(MOP_movw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16ID},ISMOVE,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16ID},ISMOVE,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16ID},ISLOAD,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISSTORE,kLtAlu,"movw","0,1",1) +DEFINE_MOP(MOP_movw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISSTORE,kLtAlu,"movw","0,1",1) + +DEFINE_MOP(MOP_movl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32ID},ISMOVE,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32ID},ISMOVE,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32ID},ISLOAD,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISSTORE,kLtAlu,"movl","0,1",1) +DEFINE_MOP(MOP_movl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISSTORE,kLtAlu,"movl","0,1",1) + +DEFINE_MOP(MOP_movq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64ID},ISMOVE,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movq_i_r, {&OpndDesc::Imm64,&OpndDesc::Reg64ID},ISMOVE,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64ID},ISLOAD,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISSTORE,kLtAlu,"movq","0,1",1) + +/* floating point mov */ +DEFINE_MOP(MOP_movd_fr_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg32ID},ISSTORE,kLtAlu,"movd","0,1",1) +DEFINE_MOP(MOP_movq_fr_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg64ID},ISSTORE,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movq_r_fr, {&OpndDesc::Reg64IS,&OpndDesc::Reg128FD},ISSTORE,kLtAlu,"movq","0,1",1) +DEFINE_MOP(MOP_movfs_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg128FD},ISLOAD,kLtAlu,"movss","0,1",1) +DEFINE_MOP(MOP_movfs_r_m, {&OpndDesc::Reg32FS,&OpndDesc::Mem128D},ISSTORE,kLtAlu,"movss","0,1",1) +DEFINE_MOP(MOP_movfd_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg128FD},ISLOAD,kLtAlu,"movsd","0,1",1) +DEFINE_MOP(MOP_movfd_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem128D},ISSTORE,kLtAlu,"movsd","0,1",1) + +/* movabs */ +//The movabs instruction to load arbitrary 64-bit constant into register and to load/store integer register from/to arbitrary constant 64-bit address is available +DEFINE_MOP(MOP_movabs_i_r, {&OpndDesc::Imm64,&OpndDesc::Reg64ID},ISMOVE,kLtAlu,"movabs","0,1",1) +DEFINE_MOP(MOP_movabs_s_r, {&OpndDesc::StImm64,&OpndDesc::Reg64ID},ISMOVE,kLtAlu,"movabs","0,1",1) +DEFINE_MOP(MOP_movabs_l_r, {&OpndDesc::Lbl64,&OpndDesc::Reg64ID},ISMOVE,kLtAlu,"movabs","0,1",1) + +/* push & pop & lea */ +DEFINE_MOP(MOP_pushq_r, {&OpndDesc::Reg64IS},0,kLtAlu,"pushq","0",1) +DEFINE_MOP(MOP_popq_r, {&OpndDesc::Reg32IS},0,kLtAlu,"popq","0",1) + +DEFINE_MOP(MOP_leaq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64ID},0,kLtAlu,"leaq","0,1",1) +DEFINE_MOP(MOP_leal_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg64ID},0,kLtAlu,"leaq","0,1",1) +DEFINE_MOP(MOP_leaw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg64ID},0,kLtAlu,"leaq","0,1",1) + +/* Moving from a smaller data size to 32 bits */ +/* zero extension */ +DEFINE_MOP(MOP_movzbw_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg16ID},ISCONVERSION,kLtAlu,"movzbw","0,1",1) +DEFINE_MOP(MOP_movzbw_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg16ID},ISCONVERSION,kLtAlu,"movzbw","0,1",1) +DEFINE_MOP(MOP_movzbl_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movzbl","0,1",1) +DEFINE_MOP(MOP_movzbl_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movzbl","0,1",1) +DEFINE_MOP(MOP_movzwl_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movzwl","0,1",1) +DEFINE_MOP(MOP_movzwl_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movzwl","0,1",1) +/* sign extension */ +DEFINE_MOP(MOP_movsbw_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg16ID},ISCONVERSION,kLtAlu,"movsbw","0,1",1) +DEFINE_MOP(MOP_movsbw_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg16ID},ISCONVERSION,kLtAlu,"movsbw","0,1",1) +DEFINE_MOP(MOP_movsbl_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movsbl","0,1",1) +DEFINE_MOP(MOP_movsbl_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movsbl","0,1",1) +DEFINE_MOP(MOP_movswl_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movswl","0,1",1) +DEFINE_MOP(MOP_movswl_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg32ID},ISCONVERSION,kLtAlu,"movswl","0,1",1) + +/* Moving from a smaller data size to 64 bits */ +/* zero extension */ +/* + * Perhaps unexpectedly, instructions that move or generate 32-bit register values also set the upper 32 bits of the register to zero. + * Consequently, there is no need for an instruction movzlq. + */ +DEFINE_MOP(MOP_movzbq_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movzbq","0,1",1) +DEFINE_MOP(MOP_movzbq_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movzbq","0,1",1) +DEFINE_MOP(MOP_movzwq_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movzwq","0,1",1) +DEFINE_MOP(MOP_movzwq_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movzwq","0,1",1) +/* sign extension */ +DEFINE_MOP(MOP_movsbq_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movsbq","0,1",1) +DEFINE_MOP(MOP_movsbq_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movsbq","0,1",1) +DEFINE_MOP(MOP_movswq_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movswq","0,1",1) +DEFINE_MOP(MOP_movswq_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movswq","0,1",1) +DEFINE_MOP(MOP_movslq_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movslq","0,1",1) +DEFINE_MOP(MOP_movslq_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg64ID},ISCONVERSION,kLtAlu,"movslq","0,1",1) + +/* BasicOp */ +/* add */ +DEFINE_MOP(MOP_addb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"addq","0,1",1) +DEFINE_MOP(MOP_addb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"addq","0,1",1) +DEFINE_MOP(MOP_addb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"addq","0,1",1) +DEFINE_MOP(MOP_addb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"addq","0,1",1) +DEFINE_MOP(MOP_addb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"addb","0,1",1) +DEFINE_MOP(MOP_addw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"addw","0,1",1) +DEFINE_MOP(MOP_addl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"addl","0,1",1) +DEFINE_MOP(MOP_addq_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"addq","0,1",1) +/* add floating point */ +DEFINE_MOP(MOP_adds_r_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg128FDS},ISBASICOP,kLtAlu,"addss","0,1",1) +DEFINE_MOP(MOP_adds_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg128FDS},ISBASICOP,kLtAlu,"addss","0,1",1) +DEFINE_MOP(MOP_addd_r_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg128FDS},ISBASICOP,kLtAlu,"addsd","0,1",1) +DEFINE_MOP(MOP_addd_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg128FDS},ISBASICOP,kLtAlu,"addsd","0,1",1) +/* sub */ +DEFINE_MOP(MOP_subb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"subq","0,1",1) +DEFINE_MOP(MOP_subb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"subq","0,1",1) +DEFINE_MOP(MOP_subb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"subq","0,1",1) +DEFINE_MOP(MOP_subb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"subq","0,1",1) +DEFINE_MOP(MOP_subb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"subb","0,1",1) +DEFINE_MOP(MOP_subw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"subw","0,1",1) +DEFINE_MOP(MOP_subl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"subl","0,1",1) +DEFINE_MOP(MOP_subq_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"subq","0,1",1) +/* sub floating point */ +DEFINE_MOP(MOP_subs_r_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg128FDS},ISBASICOP,kLtAlu,"subss","0,1",1) +DEFINE_MOP(MOP_subs_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg128FDS},ISBASICOP,kLtAlu,"subss","0,1",1) +DEFINE_MOP(MOP_subd_r_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg128FDS},ISBASICOP,kLtAlu,"subsd","0,1",1) +DEFINE_MOP(MOP_subd_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg128FDS},ISBASICOP,kLtAlu,"subsd","0,1",1) +/* and */ +DEFINE_MOP(MOP_andb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"andq","0,1",1) +DEFINE_MOP(MOP_andb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"andq","0,1",1) +DEFINE_MOP(MOP_andb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"andq","0,1",1) +DEFINE_MOP(MOP_andb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"andq","0,1",1) +DEFINE_MOP(MOP_andb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"andb","0,1",1) +DEFINE_MOP(MOP_andw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"andw","0,1",1) +DEFINE_MOP(MOP_andl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"andl","0,1",1) +DEFINE_MOP(MOP_andq_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"andq","0,1",1) +/* or */ +DEFINE_MOP(MOP_orb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"orq","0,1",1) +DEFINE_MOP(MOP_orb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"orq","0,1",1) +DEFINE_MOP(MOP_orb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"orq","0,1",1) +DEFINE_MOP(MOP_orb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"orq","0,1",1) +DEFINE_MOP(MOP_orb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"orb","0,1",1) +DEFINE_MOP(MOP_orw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"orw","0,1",1) +DEFINE_MOP(MOP_orl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"orl","0,1",1) +DEFINE_MOP(MOP_orq_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"orq","0,1",1) +/* xor */ +DEFINE_MOP(MOP_xorb_r_r, {&OpndDesc::Reg8IS,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"xorq","0,1",1) +DEFINE_MOP(MOP_xorb_m_r, {&OpndDesc::Mem8S,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"xorq","0,1",1) +DEFINE_MOP(MOP_xorb_i_r, {&OpndDesc::Imm8,&OpndDesc::Reg8IDS},ISBASICOP,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_i_r, {&OpndDesc::Imm16,&OpndDesc::Reg16IDS},ISBASICOP,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg32IDS},ISBASICOP,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_i_r, {&OpndDesc::Imm32,&OpndDesc::Reg64IDS},ISBASICOP,kLtAlu,"xorq","0,1",1) +DEFINE_MOP(MOP_xorb_r_m, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_r_m, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_r_m, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_r_m, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"xorq","0,1",1) +DEFINE_MOP(MOP_xorb_i_m, {&OpndDesc::Imm8,&OpndDesc::Mem8D},ISBASICOP,kLtAlu,"xorb","0,1",1) +DEFINE_MOP(MOP_xorw_i_m, {&OpndDesc::Imm16,&OpndDesc::Mem16D},ISBASICOP,kLtAlu,"xorw","0,1",1) +DEFINE_MOP(MOP_xorl_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem32D},ISBASICOP,kLtAlu,"xorl","0,1",1) +DEFINE_MOP(MOP_xorq_i_m, {&OpndDesc::Imm32,&OpndDesc::Mem64D},ISBASICOP,kLtAlu,"xorq","0,1",1) + +/* UnaryOp */ +/* neg */ +DEFINE_MOP(MOP_negb_r, {&OpndDesc::Reg8IDS},ISUNARYOP,kLtAlu,"negb","0",1) +DEFINE_MOP(MOP_negw_r, {&OpndDesc::Reg16IDS},ISUNARYOP,kLtAlu,"negw","0",1) +DEFINE_MOP(MOP_negl_r, {&OpndDesc::Reg32IDS},ISUNARYOP,kLtAlu,"negl","0",1) +DEFINE_MOP(MOP_negq_r, {&OpndDesc::Reg64IDS},ISUNARYOP,kLtAlu,"negq","0",1) +DEFINE_MOP(MOP_negb_m, {&OpndDesc::Mem8S},ISUNARYOP,kLtAlu,"negb","0",1) +DEFINE_MOP(MOP_negw_m, {&OpndDesc::Mem16S},ISUNARYOP,kLtAlu,"negw","0",1) +DEFINE_MOP(MOP_negl_m, {&OpndDesc::Mem32S},ISUNARYOP,kLtAlu,"negl","0",1) +DEFINE_MOP(MOP_negq_m, {&OpndDesc::Mem64S},ISUNARYOP,kLtAlu,"negq","0",1) +/* not */ +DEFINE_MOP(MOP_notb_r, {&OpndDesc::Reg8IDS},ISUNARYOP,kLtAlu,"notb","0",1) +DEFINE_MOP(MOP_notw_r, {&OpndDesc::Reg16IDS},ISUNARYOP,kLtAlu,"notw","0",1) +DEFINE_MOP(MOP_notl_r, {&OpndDesc::Reg32IDS},ISUNARYOP,kLtAlu,"notl","0",1) +DEFINE_MOP(MOP_notq_r, {&OpndDesc::Reg64IDS},ISUNARYOP,kLtAlu,"notq","0",1) +DEFINE_MOP(MOP_notb_m, {&OpndDesc::Mem8S},ISUNARYOP,kLtAlu,"notb","0",1) +DEFINE_MOP(MOP_notw_m, {&OpndDesc::Mem16S},ISUNARYOP,kLtAlu,"notw","0",1) +DEFINE_MOP(MOP_notl_m, {&OpndDesc::Mem32S},ISUNARYOP,kLtAlu,"notl","0",1) +DEFINE_MOP(MOP_notq_m, {&OpndDesc::Mem64S},ISUNARYOP,kLtAlu,"notq","0",1) + +/* shift -- shl/sar/shr reg8, use cl */ +/* shl */ +DEFINE_MOP(MOP_shlb_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"shlb","0,1",1) +DEFINE_MOP(MOP_shlw_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"shlw","0,1",1) +DEFINE_MOP(MOP_shll_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"shll","0,1",1) +DEFINE_MOP(MOP_shlq_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"shlq","0,1",1) +DEFINE_MOP(MOP_shlb_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"shlb","0,1",1) +DEFINE_MOP(MOP_shlw_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"shlw","0,1",1) +DEFINE_MOP(MOP_shll_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"shll","0,1",1) +DEFINE_MOP(MOP_shlq_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"shlq","0,1",1) +DEFINE_MOP(MOP_shlb_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"shlb","0,1",1) +DEFINE_MOP(MOP_shlw_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"shlw","0,1",1) +DEFINE_MOP(MOP_shll_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"shll","0,1",1) +DEFINE_MOP(MOP_shlq_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"shlq","0,1",1) +DEFINE_MOP(MOP_shlb_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"shlb","0,1",1) +DEFINE_MOP(MOP_shlw_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"shlw","0,1",1) +DEFINE_MOP(MOP_shll_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"shll","0,1",1) +DEFINE_MOP(MOP_shlq_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"shlq","0,1",1) +/* sar */ +DEFINE_MOP(MOP_sarb_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"sarb","0,1",1) +DEFINE_MOP(MOP_sarw_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"sarw","0,1",1) +DEFINE_MOP(MOP_sarl_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"sarl","0,1",1) +DEFINE_MOP(MOP_sarq_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"sarq","0,1",1) +DEFINE_MOP(MOP_sarb_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"sarb","0,1",1) +DEFINE_MOP(MOP_sarw_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"sarw","0,1",1) +DEFINE_MOP(MOP_sarl_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"sarl","0,1",1) +DEFINE_MOP(MOP_sarq_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"sarq","0,1",1) +DEFINE_MOP(MOP_sarb_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"sarb","0,1",1) +DEFINE_MOP(MOP_sarw_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"sarw","0,1",1) +DEFINE_MOP(MOP_sarl_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"sarl","0,1",1) +DEFINE_MOP(MOP_sarq_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"sarq","0,1",1) +DEFINE_MOP(MOP_sarb_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"sarb","0,1",1) +DEFINE_MOP(MOP_sarw_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"sarw","0,1",1) +DEFINE_MOP(MOP_sarl_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"sarl","0,1",1) +DEFINE_MOP(MOP_sarq_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"sarq","0,1",1) +/* shr */ +DEFINE_MOP(MOP_shrb_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"shrb","0,1",1) +DEFINE_MOP(MOP_shrw_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"shrw","0,1",1) +DEFINE_MOP(MOP_shrl_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"shrl","0,1",1) +DEFINE_MOP(MOP_shrq_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"shrq","0,1",1) +DEFINE_MOP(MOP_shrb_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg8IDS},ISSHIFT,kLtAlu,"shrb","0,1",1) +DEFINE_MOP(MOP_shrw_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg16IDS},ISSHIFT,kLtAlu,"shrw","0,1",1) +DEFINE_MOP(MOP_shrl_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg32IDS},ISSHIFT,kLtAlu,"shrl","0,1",1) +DEFINE_MOP(MOP_shrq_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg64IDS},ISSHIFT,kLtAlu,"shrq","0,1",1) +DEFINE_MOP(MOP_shrb_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"shrb","0,1",1) +DEFINE_MOP(MOP_shrw_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"shrw","0,1",1) +DEFINE_MOP(MOP_shrl_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"shrl","0,1",1) +DEFINE_MOP(MOP_shrq_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"shrq","0,1",1) +DEFINE_MOP(MOP_shrb_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem8D},ISSHIFT,kLtAlu,"shrb","0,1",1) +DEFINE_MOP(MOP_shrw_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem16D},ISSHIFT,kLtAlu,"shrw","0,1",1) +DEFINE_MOP(MOP_shrl_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem32D},ISSHIFT,kLtAlu,"shrl","0,1",1) +DEFINE_MOP(MOP_shrq_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem64D},ISSHIFT,kLtAlu,"shrq","0,1",1) + +/* idiv, div -- opnd(use), rax(def,use), rdx(def,use) */ +DEFINE_MOP(MOP_idivw_r, {&OpndDesc::Reg16IS, &OpndDesc::Reg16IDS, &OpndDesc::Reg16IDS},0,kLtAlu,"idivw","0",1) +DEFINE_MOP(MOP_idivl_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IDS, &OpndDesc::Reg32IDS},0,kLtAlu,"idivl","0",1) +DEFINE_MOP(MOP_idivq_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IDS, &OpndDesc::Reg64IDS},0,kLtAlu,"idivq","0",1) +DEFINE_MOP(MOP_idivw_m, {&OpndDesc::Mem16S, &OpndDesc::Reg16IDS, &OpndDesc::Reg16IDS},0,kLtAlu,"idivw","0",1) +DEFINE_MOP(MOP_idivl_m, {&OpndDesc::Mem32S, &OpndDesc::Reg32IDS, &OpndDesc::Reg32IDS},0,kLtAlu,"idivl","0",1) +DEFINE_MOP(MOP_idivq_m, {&OpndDesc::Mem64S, &OpndDesc::Reg64IDS, &OpndDesc::Reg64IDS},0,kLtAlu,"idivq","0",1) +DEFINE_MOP(MOP_divw_r, {&OpndDesc::Reg16IS, &OpndDesc::Reg16IDS, &OpndDesc::Reg16IDS},0,kLtAlu,"divw","0",1) +DEFINE_MOP(MOP_divl_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IDS, &OpndDesc::Reg32IDS},0,kLtAlu,"divl","0",1) +DEFINE_MOP(MOP_divq_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IDS, &OpndDesc::Reg64IDS},0,kLtAlu,"divq","0",1) +DEFINE_MOP(MOP_divw_m, {&OpndDesc::Mem16S, &OpndDesc::Reg16IDS, &OpndDesc::Reg16IDS},0,kLtAlu,"divw","0",1) +DEFINE_MOP(MOP_divl_m, {&OpndDesc::Mem32S, &OpndDesc::Reg32IDS, &OpndDesc::Reg32IDS},0,kLtAlu,"divl","0",1) +DEFINE_MOP(MOP_divq_m, {&OpndDesc::Mem64S, &OpndDesc::Reg64IDS, &OpndDesc::Reg64IDS},0,kLtAlu,"divq","0",1) +/* cwd, cdq, cqo -- rax(def use), rdx(def) */ +DEFINE_MOP(MOP_cwd, {&OpndDesc::Reg16IDS, &OpndDesc::Reg16ID},0,kLtAlu,"cwd","",1) +DEFINE_MOP(MOP_cdq, {&OpndDesc::Reg32IDS, &OpndDesc::Reg32ID},0,kLtAlu,"cdq","",1) +DEFINE_MOP(MOP_cqo, {&OpndDesc::Reg64IDS, &OpndDesc::Reg64ID},0,kLtAlu,"cqo","",1) + +/* jmp, je, jne */ +DEFINE_MOP(MOP_jmpq_r, {&OpndDesc::Reg64IS},ISUNCONDBRANCH,kLtAlu,"jmp","0",1) +DEFINE_MOP(MOP_jmpq_m, {&OpndDesc::Mem64S},ISUNCONDBRANCH,kLtAlu,"jmp","0",1) +DEFINE_MOP(MOP_jmpq_l, {&OpndDesc::Lbl64},ISUNCONDBRANCH,kLtAlu,"jmp","0",1) // ip relative + +DEFINE_MOP(MOP_je_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"je","0",1) +DEFINE_MOP(MOP_ja_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"ja","0",1) // unsigned > +DEFINE_MOP(MOP_jae_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jae","0",1) // unsigned >= +DEFINE_MOP(MOP_jne_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jne","0",1) +DEFINE_MOP(MOP_jb_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jb","0",1) // unsigned < +DEFINE_MOP(MOP_jbe_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jbe","0",1) // unsigned <= +DEFINE_MOP(MOP_jg_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jg","0",1) // signed > +DEFINE_MOP(MOP_jge_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jge","0",1) // signed >= +DEFINE_MOP(MOP_jl_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jl","0",1) // signed < +DEFINE_MOP(MOP_jle_l, {&OpndDesc::Lbl64},ISCONDBRANCH,kLtAlu,"jle","0",1) // signed <= + +/* cmp */ +DEFINE_MOP(MOP_cmpb_r_r, {&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_m_r, {&OpndDesc::Mem8S, &OpndDesc::Reg8IS},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_i_r, {&OpndDesc::Imm8, &OpndDesc::Reg8IS},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_r_m, {&OpndDesc::Reg8IS, &OpndDesc::Mem8S},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpb_i_m, {&OpndDesc::Imm8, &OpndDesc::Mem8S},0,kLtAlu,"cmpb","0,1",1) +DEFINE_MOP(MOP_cmpw_r_r, {&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},0,kLtAlu,"cmpw","0,1",1) +DEFINE_MOP(MOP_cmpw_m_r, {&OpndDesc::Mem16S, &OpndDesc::Reg16IS},0,kLtAlu,"cmpw","0,1",1) +DEFINE_MOP(MOP_cmpw_i_r, {&OpndDesc::Imm16, &OpndDesc::Reg16IS},0,kLtAlu,"cmpw","0,1",1) +DEFINE_MOP(MOP_cmpw_r_m, {&OpndDesc::Reg16IS, &OpndDesc::Mem16S},0,kLtAlu,"cmpw","0,1",1) +DEFINE_MOP(MOP_cmpw_i_m, {&OpndDesc::Imm16, &OpndDesc::Mem16S},0,kLtAlu,"cmpw","0,1",1) +DEFINE_MOP(MOP_cmpl_r_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_m_r, {&OpndDesc::Mem32S, &OpndDesc::Reg32IS},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_i_r, {&OpndDesc::Imm32, &OpndDesc::Reg32IS},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_r_m, {&OpndDesc::Reg32IS, &OpndDesc::Mem32S},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpl_i_m, {&OpndDesc::Imm32, &OpndDesc::Mem32S},0,kLtAlu,"cmpl","0,1",1) +DEFINE_MOP(MOP_cmpq_r_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpq_m_r, {&OpndDesc::Mem64S, &OpndDesc::Reg64IS},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpq_i_r, {&OpndDesc::Imm32, &OpndDesc::Reg64IS},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpq_r_m, {&OpndDesc::Reg64IS, &OpndDesc::Mem64S},0,kLtAlu,"cmpq","0,1",1) +DEFINE_MOP(MOP_cmpq_i_m, {&OpndDesc::Imm32, &OpndDesc::Mem64S},0,kLtAlu,"cmpq","0,1",1) + +/* test */ +DEFINE_MOP(MOP_testq_r_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},0,kLtAlu,"testq","0,1",1) + +/* setcc -- use ccreg(CF/ZF/SF/OF) */ +DEFINE_MOP(MOP_setbe_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setbe","0",1) +DEFINE_MOP(MOP_setle_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setle","0",1) +DEFINE_MOP(MOP_setae_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setae","0",1) +DEFINE_MOP(MOP_setge_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setge","0",1) +DEFINE_MOP(MOP_setne_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setne","0",1) +DEFINE_MOP(MOP_setb_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setb","0",1) +DEFINE_MOP(MOP_setl_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setl","0",1) +DEFINE_MOP(MOP_seta_r, {&OpndDesc::Reg8ID},0,kLtAlu,"seta","0",1) +DEFINE_MOP(MOP_setg_r, {&OpndDesc::Reg8ID},0,kLtAlu,"setg","0",1) +DEFINE_MOP(MOP_sete_r, {&OpndDesc::Reg8ID},0,kLtAlu,"sete","0",1) +DEFINE_MOP(MOP_setbe_m, {&OpndDesc::Mem8D},0,kLtAlu,"setbe","0",1) +DEFINE_MOP(MOP_setle_m, {&OpndDesc::Mem8D},0,kLtAlu,"setle","0",1) +DEFINE_MOP(MOP_setae_m, {&OpndDesc::Mem8D},0,kLtAlu,"setae","0",1) +DEFINE_MOP(MOP_setge_m, {&OpndDesc::Mem8D},0,kLtAlu,"setge","0",1) +DEFINE_MOP(MOP_setne_m, {&OpndDesc::Mem8D},0,kLtAlu,"setne","0",1) +DEFINE_MOP(MOP_setb_m, {&OpndDesc::Mem8D},0,kLtAlu,"setb","0",1) +DEFINE_MOP(MOP_setl_m, {&OpndDesc::Mem8D},0,kLtAlu,"setl","0",1) +DEFINE_MOP(MOP_seta_m, {&OpndDesc::Mem8D},0,kLtAlu,"seta","0",1) +DEFINE_MOP(MOP_setg_m, {&OpndDesc::Mem8D},0,kLtAlu,"setg","0",1) +DEFINE_MOP(MOP_sete_m, {&OpndDesc::Mem8D},0,kLtAlu,"sete","0",1) + +/* cmov */ +/* condition move if below or equal */ +DEFINE_MOP(MOP_cmovbew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovbew","0,1",1) +DEFINE_MOP(MOP_cmovbel_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovbel","0,1",1) +DEFINE_MOP(MOP_cmovbeq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovbeq","0,1",1) +DEFINE_MOP(MOP_cmovbew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovbew","0,1",1) +DEFINE_MOP(MOP_cmovbel_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovbel","0,1",1) +DEFINE_MOP(MOP_cmovbeq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovbeq","0,1",1) +/* condition move if less or equal */ +DEFINE_MOP(MOP_cmovlew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovlew","0,1",1) +DEFINE_MOP(MOP_cmovlel_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovlel","0,1",1) +DEFINE_MOP(MOP_cmovleq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovleq","0,1",1) +DEFINE_MOP(MOP_cmovlew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovlew","0,1",1) +DEFINE_MOP(MOP_cmovlel_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovlel","0,1",1) +DEFINE_MOP(MOP_cmovleq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovleq","0,1",1) +/* condition move if above or equal */ +DEFINE_MOP(MOP_cmovaew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovaew","0,1",1) +DEFINE_MOP(MOP_cmovael_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovael","0,1",1) +DEFINE_MOP(MOP_cmovaeq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovaeq","0,1",1) +DEFINE_MOP(MOP_cmovaew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovaew","0,1",1) +DEFINE_MOP(MOP_cmovael_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovael","0,1",1) +DEFINE_MOP(MOP_cmovaeq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovaeq","0,1",1) +/* condition move if greater or equal */ +DEFINE_MOP(MOP_cmovgew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovgew","0,1",1) +DEFINE_MOP(MOP_cmovgel_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovgel","0,1",1) +DEFINE_MOP(MOP_cmovgeq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovgeq","0,1",1) +DEFINE_MOP(MOP_cmovgew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovgew","0,1",1) +DEFINE_MOP(MOP_cmovgel_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovgel","0,1",1) +DEFINE_MOP(MOP_cmovgeq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovgeq","0,1",1) +/* condition move if not equal */ +DEFINE_MOP(MOP_cmovnew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovnew","0,1",1) +DEFINE_MOP(MOP_cmovnel_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovnel","0,1",1) +DEFINE_MOP(MOP_cmovneq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovneq","0,1",1) +DEFINE_MOP(MOP_cmovnew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovnew","0,1",1) +DEFINE_MOP(MOP_cmovnel_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovnel","0,1",1) +DEFINE_MOP(MOP_cmovneq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovneq","0,1",1) +/* condition move if below */ +DEFINE_MOP(MOP_cmovbw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovbw","0,1",1) +DEFINE_MOP(MOP_cmovbl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovbl","0,1",1) +DEFINE_MOP(MOP_cmovbq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovbq","0,1",1) +DEFINE_MOP(MOP_cmovbw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovbw","0,1",1) +DEFINE_MOP(MOP_cmovbl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovbl","0,1",1) +DEFINE_MOP(MOP_cmovbq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovbq","0,1",1) +/* condition move if less */ +DEFINE_MOP(MOP_cmovlw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovlw","0,1",1) +DEFINE_MOP(MOP_cmovll_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovll","0,1",1) +DEFINE_MOP(MOP_cmovlq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovlq","0,1",1) +DEFINE_MOP(MOP_cmovlw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovlw","0,1",1) +DEFINE_MOP(MOP_cmovll_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovll","0,1",1) +DEFINE_MOP(MOP_cmovlq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovlq","0,1",1) +/* condition move if above */ +DEFINE_MOP(MOP_cmovaw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovaw","0,1",1) +DEFINE_MOP(MOP_cmoval_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmoval","0,1",1) +DEFINE_MOP(MOP_cmovaq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovaq","0,1",1) +DEFINE_MOP(MOP_cmovaw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovaw","0,1",1) +DEFINE_MOP(MOP_cmoval_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmoval","0,1",1) +DEFINE_MOP(MOP_cmovaq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovaq","0,1",1) +/* condition move if greater */ +DEFINE_MOP(MOP_cmovgw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovgw","0,1",1) +DEFINE_MOP(MOP_cmovgl_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovgl","0,1",1) +DEFINE_MOP(MOP_cmovgq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovgq","0,1",1) +DEFINE_MOP(MOP_cmovgw_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovgw","0,1",1) +DEFINE_MOP(MOP_cmovgl_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovgl","0,1",1) +DEFINE_MOP(MOP_cmovgq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmovgq","0,1",1) +/* condition move if equal */ +DEFINE_MOP(MOP_cmovew_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovew","0,1",1) +DEFINE_MOP(MOP_cmovel_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovel","0,1",1) +DEFINE_MOP(MOP_cmoveq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"cmoveq","0,1",1) +DEFINE_MOP(MOP_cmovew_m_r, {&OpndDesc::Mem16S,&OpndDesc::Reg16IDS},0,kLtAlu,"cmovew","0,1",1) +DEFINE_MOP(MOP_cmovel_m_r, {&OpndDesc::Mem32S,&OpndDesc::Reg32IDS},0,kLtAlu,"cmovel","0,1",1) +DEFINE_MOP(MOP_cmoveq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64IDS},0,kLtAlu,"cmoveq","0,1",1) + +/* call, ret, leave */ +DEFINE_MOP(MOP_callq_l, {&OpndDesc::Lbl64,&OpndDesc::ListSrc,&OpndDesc::ListDest},ISCALL,kLtAlu,"callq","0",1) +DEFINE_MOP(MOP_callq_m, {&OpndDesc::Mem64S,&OpndDesc::ListSrc,&OpndDesc::ListDest},ISCALL,kLtAlu,"callq","0",1) +DEFINE_MOP(MOP_callq_r, {&OpndDesc::Reg64IS,&OpndDesc::ListSrc,&OpndDesc::ListDest},ISCALL,kLtAlu,"callq","0",1) + +DEFINE_MOP(MOP_retq, {},CANTHROW,kLtBranch,"ret","",1) + +DEFINE_MOP(MOP_leaveq, {},CANTHROW,kLtBranch,"leave","",1) + +/* imul */ +DEFINE_MOP(MOP_imulw_r_r, {&OpndDesc::Reg16IS,&OpndDesc::Reg16IDS},0,kLtAlu,"imulw","0,1",1) +DEFINE_MOP(MOP_imull_r_r, {&OpndDesc::Reg32IS,&OpndDesc::Reg32IDS},0,kLtAlu,"imull","0,1",1) +DEFINE_MOP(MOP_imulq_r_r, {&OpndDesc::Reg64IS,&OpndDesc::Reg64IDS},0,kLtAlu,"imulq","0,1",1) + +/* mul float */ +DEFINE_MOP(MOP_mulfs_r_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg128FDS},0,kLtAlu,"mulss","0,1",1) +DEFINE_MOP(MOP_mulfd_r_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg128FDS},0,kLtAlu,"mulsd","0,1",1) + +/* nop */ +// TODO: still not clear why we need so many forms of nop (except for patch) +DEFINE_MOP(MOP_nopb, {&OpndDesc::Mem8S},0,kLtAlu,"nopb","",1) +DEFINE_MOP(MOP_nopw, {&OpndDesc::Mem16S},0,kLtAlu,"nopw","",1) +DEFINE_MOP(MOP_nopl, {&OpndDesc::Mem32S},0,kLtAlu,"nopl","",1) +DEFINE_MOP(MOP_nop, {},0,0,"nop","",1) + +/* Byte Swap */ +DEFINE_MOP(MOP_bswapl_r, {&OpndDesc::Reg32IDS},0,kLtAlu,"bswapl","0",1) +DEFINE_MOP(MOP_bswapq_r, {&OpndDesc::Reg64IDS},0,kLtAlu,"bswapq","0",1) + +/* xchg */ +DEFINE_MOP(MOP_xchgb_r_r, {&OpndDesc::Reg8IDS,&OpndDesc::Reg8IDS},0,kLtAlu,"xchgb","0,1",1) + +/* end of X64 instructions */ + +/* invalid operation */ +DEFINE_MOP(MOP_movq_i_m, {&OpndDesc::Imm64,&OpndDesc::Mem64D},0,kLtAlu,"invalid","0,1",1) + + +/* floating point */ +DEFINE_MOP(MOP_movfq_m_r, {&OpndDesc::Mem64S,&OpndDesc::Reg64FD},ISLOAD,kLtAlu,"movsd","0,1",1) +DEFINE_MOP(MOP_movfq_r_m, {&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISSTORE,kLtAlu,"movsd","0,1",1) +DEFINE_MOP(MOP_movfq_r_r, {&OpndDesc::Reg64FS,&OpndDesc::Reg64FS},ISSTORE,kLtAlu,"movsd","0,1",1) + +/* floating div */ +DEFINE_MOP(MOP_divsd_r, {&OpndDesc::Reg64FS, &OpndDesc::Reg64FDS},0,kLtAlu,"divsd","0,1",1) +DEFINE_MOP(MOP_divsd_m, {&OpndDesc::Mem64S, &OpndDesc::Reg64FDS},0,kLtAlu,"divsd","0,1",1) + +/* convert int2float */ +DEFINE_MOP(MOP_cvtsi2ssq_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg128FD},0,kLtAlu,"cvtsi2ssq","0,1",1) +DEFINE_MOP(MOP_cvtsi2ssl_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg128FD},0,kLtAlu,"cvtsi2ssl","0,1",1) +DEFINE_MOP(MOP_cvtsi2sdq_r, {&OpndDesc::Reg64IS, &OpndDesc::Reg128FD},0,kLtAlu,"cvtsi2sdq","0,1",1) +DEFINE_MOP(MOP_cvtsi2sdl_r, {&OpndDesc::Reg32IS, &OpndDesc::Reg128FD},0,kLtAlu,"cvtsi2sdl","0,1",1) + +/*convert float2int */ +DEFINE_MOP(MOP_cvttsd2siq_r, {&OpndDesc::Reg128FS, &OpndDesc::Reg64ID},0,kLtAlu,"cvttsd2siq","0,1",1) +DEFINE_MOP(MOP_cvttsd2sil_r, {&OpndDesc::Reg128FS, &OpndDesc::Reg32ID},0,kLtAlu,"cvttsd2sil","0,1",1) +DEFINE_MOP(MOP_cvttss2siq_r, {&OpndDesc::Reg128FS, &OpndDesc::Reg64ID},0,kLtAlu,"cvttss2siq","0,1",1) +DEFINE_MOP(MOP_cvttss2sil_r, {&OpndDesc::Reg128FS, &OpndDesc::Reg32ID},0,kLtAlu,"cvttss2sil","0,1",1) + +/* convert float2float */ +DEFINE_MOP(MOP_cvtss2sd_r, {&OpndDesc::Reg128FS, &OpndDesc::Reg128FD},0,kLtAlu,"cvtss2sd","0,1",1) +DEFINE_MOP(MOP_cvtsd2ss_r, {&OpndDesc::Reg128FS, &OpndDesc::Reg128FD},0,kLtAlu,"cvtsd2ss","0,1",1) + +DEFINE_MOP(MOP_ucomisd_r_r, {&OpndDesc::Reg128FS,&OpndDesc::Reg128FS},0,kLtAlu,"ucomisd","0,1",1) + +/* pseudo operation */ +DEFINE_MOP(MOP_pseudo_ret_int, {&OpndDesc::Reg64IS},0,kLtUndef,"//MOP_pseudo_ret_int","", 0) diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h b/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h new file mode 100644 index 0000000000000000000000000000000000000000..a46a8ba5622246011e3e346755e6f78012a345b3 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_memlayout.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_CG_X86_64_MEMLAYOUT_H +#define MAPLEBE_INCLUDE_CG_X86_64_MEMLAYOUT_H + +#include "memlayout.h" +#include "x64_abi.h" + +namespace maplebe { +class X64SymbolAlloc : public SymbolAlloc { + public: + X64SymbolAlloc() = default; + + ~X64SymbolAlloc() = default; + + void SetRegisters(bool isR) { + isRegister = isR; + } + + inline bool IsRegister() const { + return isRegister; + } + + private: + bool isRegister = false; +}; +/* + * On X64, stack frames are structured as follows: + * + * The stack grows downward -- full descending (SP points + * to a filled slot). + * + * Any of the parts of a frame is optional, i.e., it is + * possible to write a caller-callee pair in such a way + * that the particular part is absent in the frame. + * + * Before a call is made, the frame looks like: + * | | + * ||----------------------------| + * | args passed on the stack | (we call them up-formals) + * ||----------------------------|<- Stack Pointer + * | | + * + * Right after a call is made + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------|<- Stack Pointer + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * + * After the prologue has run, + * | | + * ||----------------------------| + * | args passed on the stack | + * ||----------------------------| + * | PREV_FP, PREV_LR | + * ||----------------------------|<- Frame Pointer + * | GR Arg Save Area | + * ||----------------------------| + * | VR Arg Save Area | + * ||----------------------------| + * | callee-saved registers | + * ||----------------------------| + * | empty space. should have | + * | at least 16-byte alignment | + * ||----------------------------| + * | local variables | + * ||----------------------------|<- Stack Pointer + * | red zone | + * + * callee-saved registers include + * 1. rbx rbp r12 r14 r14 r15 + * 2. XMM0-XMM7 + */ + +class X64MemLayout : public MemLayout { + public: + X64MemLayout(BECommon &b, MIRFunction &f, MapleAllocator &mallocator) + : MemLayout(b, f, mallocator, kX64StackPtrAlignment) {} + + ~X64MemLayout() override = default; + + uint32 ComputeStackSpaceRequirementForCall(StmtNode &stmtNode, int32 &aggCopySize, bool isIcall) override; + void LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) override; + + uint64 StackFrameSize() const; + + const MemSegment &Locals() const { + return segLocals; + } + /* + * "Pseudo-registers can be regarded as local variables of a + * primitive type whose addresses are never taken" + */ + virtual void AssignSpillLocationsToPseudoRegisters() override; + + virtual SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum) override; + + uint32 GetSizeOfSpillReg() const { + return segSpillReg.GetSize(); + } + + uint32 GetSizeOfLocals() const { + return segLocals.GetSize(); + } + + void SetSizeOfGRSaveArea(uint32 sz) { + segGrSaveArea.SetSize(sz); + } + + uint32 GetSizeOfGRSaveArea() const { + return segGrSaveArea.GetSize(); + } + + inline void SetSizeOfVRSaveArea(uint32 sz) { + segVrSaveArea.SetSize(sz); + } + + uint32 GetSizeOfVRSaveArea() const { + return segVrSaveArea.GetSize(); + } + + int32 GetGRSaveAreaBaseLoc(); + int32 GetVRSaveAreaBaseLoc(); + private: + /* Layout function */ + void LayoutFormalParams(); + void LayoutLocalVariables(); + void LayoutVarargParams(); + + /* util function */ + void SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const; + void LayoutReturnRef(int32 &structCopySize, int32 &maxParmStackSize); + + MemSegment segLocals = MemSegment(kMsLocals); /* these are accessed via Frame Pointer */ + MemSegment segGrSaveArea = MemSegment(kMsGrSaveArea); + MemSegment segVrSaveArea = MemSegment(kMsVrSaveArea); + MemSegment segSpillReg = MemSegment(kMsSpillReg); +}; +} +#endif // MAPLEBE_INCLUDE_CG_X86_64_MEMLAYOUT_H diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_optimize_common.h b/src/mapleall/maple_be/include/cg/x86_64/x64_optimize_common.h new file mode 100644 index 0000000000000000000000000000000000000000..055658b0a5bc152b0033b6b494b2abe427626655 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_optimize_common.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_OPTIMIZE_COMMON_H +#define MAPLEBE_INCLUDE_CG_X64_X64_OPTIMIZE_COMMON_H + +#include "x64_isa.h" +#include "optimize_common.h" + +namespace maplebe { +using namespace maple; + +class X64InsnVisitor : public InsnVisitor { + public: + explicit X64InsnVisitor(CGFunc &func) : InsnVisitor(func) {} + + ~X64InsnVisitor() = default; + + void ModifyJumpTarget(LabelIdx targetLabel, BB &bb) override; + void ModifyJumpTarget(Operand &targetOperand, BB &bb) override; + void ModifyJumpTarget(BB &newTarget, BB &bb) override; + /* Check if it requires to add extra gotos when relocate bb */ + Insn *CloneInsn(Insn &originalInsn) override; + LabelIdx GetJumpLabel(const Insn &insn) const override; + bool IsCompareInsn(const Insn &insn) const override; + bool IsCompareAndBranchInsn(const Insn &insn) const override; + bool IsAddOrSubInsn(const Insn &insn) const override; + RegOperand *CreateVregFromReg(const RegOperand &pReg) override; + void ReTargetSuccBB(BB &bb, LabelIdx newTarget) const override; + void FlipIfBB(BB &bb, LabelIdx ftLabel) const override; + BB *CreateGotoBBAfterCondBB(BB &bb, BB &fallthru, bool isTargetFallthru) const override; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_OPTIMIZE_COMMON_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_peep.h b/src/mapleall/maple_be/include/cg/x86_64/x64_peep.h new file mode 100644 index 0000000000000000000000000000000000000000..488c82ad455d5816d2d563b96026aa7598c25e7c --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_peep.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_PEEP_H +#define MAPLEBE_INCLUDE_CG_X64_X64_PEEP_H + +#include +#include "peep.h" + +namespace maplebe { +class X64CGPeepHole : CGPeepHole { + public: + /* normal constructor */ + X64CGPeepHole(CGFunc &f, MemPool *memPool) : CGPeepHole(f, memPool) {}; + /* constructor for ssa */ + X64CGPeepHole(CGFunc &f, MemPool *memPool, CGSSAInfo *cgssaInfo) : CGPeepHole(f, memPool, cgssaInfo) {}; + ~X64CGPeepHole() = default; + void Run() override; + bool DoSSAOptimize(BB &bb, Insn &insn) override; + void DoNormalOptimize(BB &bb, Insn &insn) override; +}; + +class RemoveMovingtoSameRegPattern : public CGPeepPattern { + public: + RemoveMovingtoSameRegPattern(CGFunc &cgFunc, BB &currBB, Insn &currInsn) + : CGPeepPattern(cgFunc, currBB, currInsn) {} + ~RemoveMovingtoSameRegPattern() override = default; + void Run(BB &bb, Insn &insn) override; + bool CheckCondition(Insn &insn) override; + std::string GetPatternName() override { + return "RemoveMovingtoSameRegPattern"; + } +}; + +} /* namespace maplebe */ +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PEEP_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_phases.def b/src/mapleall/maple_be/include/cg/x86_64/x64_phases.def new file mode 100644 index 0000000000000000000000000000000000000000..0d7b66e6b7698a6e06a4364ae7744bf781800ac9 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_phases.def @@ -0,0 +1,30 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + + ADDTARGETPHASE("layoutstackframe", true); + ADDTARGETPHASE("createstartendlabel", true); + ADDTARGETPHASE("buildehfunc", GetMIRModule()->GetSrcLang() != kSrcLangC); + ADDTARGETPHASE("instructionselector", true); + ADDTARGETPHASE("instructionstandardize", true); + ADDTARGETPHASE("handlecfg", true); + ADDTARGETPHASE("moveargs", true); + ADDTARGETPHASE("cfgo", true); + ADDTARGETPHASE("localcopyprop", true); + ADDTARGETPHASE("regalloc", true); + ADDTARGETPHASE("postcfgo", true); + ADDTARGETPHASE("cgpostpeephole", true); + ADDTARGETPHASE("generateproepilog", true); + /* ASM EMIT */ + ADDTARGETPHASE("cgemit", true); diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_proepilog.h b/src/mapleall/maple_be/include/cg/x86_64/x64_proepilog.h new file mode 100644 index 0000000000000000000000000000000000000000..8f0ba354f5feb408cfcc9bb973d5df56033d2320 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_proepilog.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_PROEPILOG_H +#define MAPLEBE_INCLUDE_CG_X64_X64_PROEPILOG_H + +#include "proepilog.h" +#include "x64_cgfunc.h" + +namespace maplebe { +using namespace maple; + +class X64GenProEpilog : public GenProEpilog { + public: + explicit X64GenProEpilog(CGFunc &func) : GenProEpilog(func) {} + ~X64GenProEpilog() override = default; + + bool NeedProEpilog() override; + void Run() override; + private: + void GenerateProlog(BB &bb); + void GenerateEpilog(BB &bb); + void GenerateCalleeSavedRegs(bool isPush); + void GeneratePushCalleeSavedRegs(RegOperand ®Opnd, MemOperand &memOpnd, uint32 regSize); + void GeneratePopCalleeSavedRegs(RegOperand ®Opnd, MemOperand &memOpnd, uint32 regSize); + void GeneratePushUnnamedVarargRegs(); + void GeneratePushRbpInsn(); + void GenerateMovRspToRbpInsn(); + void GenerateSubFrameSizeFromRspInsn(); + void GenerateAddFrameSizeToRspInsn(); + void GeneratePopInsn(); + void GenerateRetInsn(); +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_PROEPILOG_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_reaching.h b/src/mapleall/maple_be/include/cg/x86_64/x64_reaching.h new file mode 100644 index 0000000000000000000000000000000000000000..f53023c23e64aa73aa29a813b3da60c7fae34dc1 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_reaching.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_REACHING_H +#define MAPLEBE_INCLUDE_CG_X64_REACHING_H + +#include "reaching.h" + +namespace maplebe { +class X64ReachingDefinition : public ReachingDefinition { + public: + X64ReachingDefinition(CGFunc &func, MemPool &memPool) : ReachingDefinition(func, memPool) {} + ~X64ReachingDefinition() override = default; + bool FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, InsnSet &useInsnSet) const final; + std::vector FindRegDefBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn) const final; + std::vector FindMemDefBetweenInsn(uint32 offset, const Insn *startInsn, Insn *endInsn) const final; + bool FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, BB* movBB) const final; + bool FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &useInsnSet) const final; + bool HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn); + bool DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB) const; + InsnSet FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO = false) const final; + InsnSet FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset = false) const final; + InsnSet FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem = false) const final; + bool FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const; + protected: + void InitStartGen() final; + void InitEhDefine(BB &bb) final; + void InitGenUse(BB &bb, bool firstTime = true) final; + void GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) final; + void GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) final; + void GenAllCallerSavedRegs(BB &bb, Insn &insn) final; + bool IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const final; + bool KilledByCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn, regno_t regNO) const final; + void AddRetPseudoInsn(BB &bb) final; + + void AddRetPseudoInsns() final; bool IsCallerSavedReg(uint32 regNO) const final; + void FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const final; + void FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const final; + void DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &defInsnSet) const final; + void DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &defInsnSet) const final; + int32 GetStackSize() const final; + private: + bool IsDiv(const Insn &insn) const; +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_reg_info.h b/src/mapleall/maple_be/include/cg/x86_64/x64_reg_info.h new file mode 100644 index 0000000000000000000000000000000000000000..7eba47a583907935c2f8e0425e70429bd70b11b7 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_reg_info.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_X64_X64_REG_INFO_H +#define MAPLEBE_INCLUDE_CG_X64_X64_REG_INFO_H + +#include "reg_info.h" +#include "x64_isa.h" +#include "x64_abi.h" + +namespace maplebe { +static const std::map x64IntParamsRegIdx = + {{x64::RAX, 0}, {x64::RDI, 1}, {x64::RSI, 2}, {x64::RDX, 3}, {x64::RCX, 4}, {x64::R8, 5}, {x64::R9, 6}}; + +class X64RegInfo : public RegisterInfo { + public: + X64RegInfo(MapleAllocator &mallocator): RegisterInfo(mallocator) {} + + ~X64RegInfo() override = default; + + void Init() override; + void Fini() override; + void SaveCalleeSavedReg(MapleSet savedRegs) override; + bool IsSpecialReg(regno_t regno) const override; + bool IsCalleeSavedReg(regno_t regno) const override; + bool IsYieldPointReg(regno_t regNO) const override; + bool IsUnconcernedReg(regno_t regNO) const override; + bool IsUnconcernedReg(const RegOperand ®Opnd) const override; + bool IsFramePointReg(regno_t regNO) const override { + return (regNO == x64::RBP); + } + bool IsReservedReg(regno_t regNO, bool doMultiPass) const override { + return false; + } + RegOperand *GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag) override; + Insn *BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) override; + Insn *BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) override; + MemOperand *AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, + bool isDest, Insn &insn, regno_t regNum, bool &isOutOfRange) override; + bool IsGPRegister(regno_t regNO) const override { + return x64::IsGPRegister(static_cast(regNO)); + } + /* Those registers can not be overwrite. */ + bool IsUntouchableReg(regno_t regNO) const override{ + return false; + } + /* Refactor later: Integrate parameters and return Reg */ + uint32 GetIntRegsParmsNum() override { + /*Parms: rdi, rsi, rdx, rcx, r8, r9; Ret: rax, rdx */ + return x64::kNumIntParmRegs + 1; + } + uint32 GetIntRetRegsNum() override { + return x64::kNumIntReturnRegs; + } + uint32 GetFpRetRegsNum() override { + return x64::kNumFloatReturnRegs; + } + regno_t GetLastParamsIntReg() override { + return x64::R9; + } + uint32 GetNormalUseOperandNum() override { + return 0; + } + regno_t GetIntRetReg(uint32 idx) override { + CHECK_FATAL(idx < x64::kNumIntReturnRegs, "index out of range in IntRetReg"); + return static_cast(x64::kIntReturnRegs[idx]); + } + regno_t GetFpRetReg(uint32 idx) override { + CHECK_FATAL(idx < x64::kNumFloatReturnRegs, "index out of range in FloatRetReg"); + return static_cast(x64::kFloatReturnRegs[idx]); + } + /* phys reg which can be pre-Assignment: + * INT param regs -- rdi, rsi, rdx, rcx, r8, r9 + * INT return regs -- rdx, rax + * FP param regs -- xmm0 ~ xmm7 + * FP return regs -- xmm0 ~ xmm1 + */ + bool IsPreAssignedReg(regno_t regNO) const override { + return x64::IsParamReg(static_cast(regNO)) || + regNO == x64::RAX || regNO == x64::RDX; + } + uint32 GetIntParamRegIdx(regno_t regNO) const override { + const std::map::const_iterator iter = x64IntParamsRegIdx.find(regNO); + CHECK_FATAL(iter != x64IntParamsRegIdx.end(), "index out of range in IntParamsRegs"); + return iter->second; + } + uint32 GetFpParamRegIdx(regno_t regNO) const override { + return static_cast(regNO - x64::V0); + } + regno_t GetLastParamsFpReg() override { + return x64::kRinvalid; + } + uint32 GetFloatRegsParmsNum() override { + return x64::kNumFloatParmRegs; + } + uint32 GetFloatRegsRetsNum() { + return x64::kNumFloatReturnRegs; + } + uint32 GetAllRegNum() override { + return x64::kAllRegNum; + } + regno_t GetInvalidReg() override { + return x64::kRinvalid; + } + bool IsAvailableReg(regno_t regNO) const override { + return x64::IsAvailableReg(static_cast(regNO)); + } + bool IsVirtualRegister(const RegOperand ®Opnd) override { + return regOpnd.GetRegisterNumber() > x64::kAllRegNum; + } + bool IsVirtualRegister(regno_t regno) override { + return regno > x64::kAllRegNum; + } + regno_t GetReservedSpillReg() override { + return x64::kRinvalid; + } + regno_t GetSecondReservedSpillReg() override { + return x64::kRinvalid; + } + regno_t GetYieldPointReg() const override { + return x64::kRinvalid; + } + regno_t GetStackPointReg() const override { + return x64::RSP; + } + bool IsSpillRegInRA(regno_t regNO, bool has3RegOpnd) override { + return x64::IsSpillRegInRA(static_cast(regNO), has3RegOpnd); + } + + regno_t GetIntSpillFillReg(size_t idx) const override { + static regno_t intRegs[kSpillMemOpndNum] = { 0 }; + ASSERT(idx < kSpillMemOpndNum, "index out of range"); + return intRegs[idx]; + } + regno_t GetFpSpillFillReg(size_t idx) const override { + static regno_t fpRegs[kSpillMemOpndNum] = { 0 }; + ASSERT(idx < kSpillMemOpndNum, "index out of range"); + return fpRegs[idx]; + } +}; +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_X64_X64_REG_INFO_H */ diff --git a/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h new file mode 100644 index 0000000000000000000000000000000000000000..46353bc7e0885b5df86ecf156a23a7f3f78b1eaf --- /dev/null +++ b/src/mapleall/maple_be/include/cg/x86_64/x64_standardize.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEBE_INCLUDE_X64_STANDARDIZE_H +#define MAPLEBE_INCLUDE_X64_STANDARDIZE_H + +#include "standardize.h" + +namespace maplebe { +class X64Standardize : public Standardize { + public: + explicit X64Standardize(CGFunc &f) : Standardize(f) { + SetAddressMapping(true); + } + + ~X64Standardize() override = default; + + private: + void StdzMov(Insn &insn) override; + void StdzStrLdr(Insn &insn) override; + void StdzBasicOp(Insn &insn) override; + void StdzUnaryOp(Insn &insn, CGFunc &cgFunc) override; + void StdzCvtOp(Insn &insn, CGFunc &cgFunc) override; + void StdzShiftOp(Insn &insn, CGFunc &cgFunc) override; + void StdzFloatingNeg(Insn &insn, CGFunc &cgFunc); +}; +} +#endif /* MAPLEBE_INCLUDEX_64_STANDARDIZE_H */ diff --git a/src/mapleall/maple_be/include/cg/yieldpoint.h b/src/mapleall/maple_be/include/cg/yieldpoint.h new file mode 100644 index 0000000000000000000000000000000000000000..d66103c59e93c1f4d0192098ccf64d7550235917 --- /dev/null +++ b/src/mapleall/maple_be/include/cg/yieldpoint.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_INCLUDE_CG_YIELDPOINT_H +#define MAPLEBE_INCLUDE_CG_YIELDPOINT_H + +#include "cgfunc.h" +#include "cg_phase.h" + +namespace maplebe { +class YieldPointInsertion { + public: + explicit YieldPointInsertion(CGFunc &func) : cgFunc(&func) {} + + virtual ~YieldPointInsertion() = default; + + virtual void Run() {} + + std::string PhaseName() const { + return "yieldpoint"; + } + + protected: + CGFunc *cgFunc; +}; + +MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgYieldPointInsertion, maplebe::CGFunc) +MAPLE_FUNC_PHASE_DECLARE_END +} /* namespace maplebe */ + +#endif /* MAPLEBE_INCLUDE_CG_YIELDPOINT_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/mdgen/gendef.py b/src/mapleall/maple_be/mdgen/gendef.py new file mode 100755 index 0000000000000000000000000000000000000000..a49862a0b8de538f732651ca65edf3d8c8c8d7fb --- /dev/null +++ b/src/mapleall/maple_be/mdgen/gendef.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +# coding=utf-8 +# +# Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +import os, sys, subprocess, shlex, re, argparse +def Gendef(execTool, mdFiles, outputDir, asanLib=None): + tdList = [] + for mdFile in mdFiles: + if mdFile.find('sched') >= 0: + schedInfo = mdFile + mdCmd = "%s --genSchdInfo %s -o %s" %(execTool, schedInfo , outputDir) + isMatch = re.search(r'[;\\|\\&\\$\\>\\<`]', mdCmd, re.M|re.I) + if (isMatch): + print("Command Injection !") + return + print("[*] %s" % (mdCmd)) + localEnv = os.environ + if asanLib is not None: + asanEnv = asanLib.split("=") + localEnv[asanEnv[0]] = asanEnv[1] + print("env :" + str(asanEnv)) + subprocess.check_call(shlex.split(mdCmd), shell = False, env = localEnv) + else: + tdList.append(i) + return + +def Process(execTool, mdFileDir, outputDir, asanLib=None): + if not (os.path.exists(execTool)): + print("maplegen is required before generating def files automatically") + return + if not (os.path.exists(mdFileDir)): + print("td/md files is required as input!!!") + print("Generate def files FAILED!!!") + return + + mdFiles = [] + for root,dirs,allfiles in os.walk(mdFileDir): + for mdFile in allfiles: + mdFiles.append("%s/%s"%(mdFileDir, mdFile)) + + if not (os.path.exists(outputDir)): + print("Create the " + outputDir) + os.makedirs(outputDir) + Gendef(execTool, mdFiles, outputDir, asanLib) + + defFile = "%s/mplad_arch_define.def" % (outputDir) + if not (os.path.exists(defFile)): + Gendef(execTool, mdFiles, outputDir, asanLib) + for mdfile in mdFiles: + if (os.stat(mdfile).st_mtime > os.stat(defFile).st_mtime): + Gendef(execTool, mdFiles, outputDir, asanLib) + if (os.stat(execTool).st_mtime > os.stat(defFile).st_mtime): + Gendef(execTool, mdFiles, outputDir, asanLib) + +def get_arg_parser(): + parser = argparse.ArgumentParser( + description="maplegen") + parser.add_argument('-e', '--exe', + help='maplegen_exe_directory') + parser.add_argument('-m', '--md', + help='mdfiles_directory') + parser.add_argument('-o', '--out', + help='output_defiless_directory') + parser.add_argument('-a', '--asan', + help='enabled asan and followed env LD_PRELOAD=xxxx') + return parser + +def main(): + parser = get_arg_parser() + args = parser.parse_args() + if (args.exe is None or args.md is None or args.out is None): + print(str(args)) + parser.print_help() + exit(-1) + + Process(args.exe, args.md, args.out, args.asan) + +if __name__ == "__main__": + main() diff --git a/src/mapleall/maple_be/mdgen/include/mdgenerator.h b/src/mapleall/maple_be/mdgen/include/mdgenerator.h new file mode 100644 index 0000000000000000000000000000000000000000..714b19fc45235dbc0c6019fa99eff9bbfb26bfb3 --- /dev/null +++ b/src/mapleall/maple_be/mdgen/include/mdgenerator.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_MDGEN_INCLUDE_MDGENERATOR_H +#define MAPLEBE_MDGEN_INCLUDE_MDGENERATOR_H +#include +#include "mdrecord.h" +#include "mpl_logging.h" + +namespace MDGen { +class MDCodeGen { + public: + MDCodeGen(const MDClassRange &inputRange, const std::string &oFileDirArg) + : curKeeper (inputRange), + outputFileDir(oFileDirArg) {} + virtual ~MDCodeGen() = default; + + const std::string &GetOFileDir() const { + return outputFileDir; + } + void SetTargetArchName(const std::string &archName) const { + targetArchName = archName; + } + + void EmitCheckPtr(std::ofstream &outputFile, const std::string &emitName, const std::string &name, + const std::string &ptrType) const; + void EmitFileHead(std::ofstream &outputFile, const std::string &headInfo) const; + MDClass GetSpecificClass(const std::string &className); + + protected: + MDClassRange curKeeper; + + private: + static std::string targetArchName; + std::string outputFileDir; +}; + +class SchedInfoGen : public MDCodeGen { + public: + SchedInfoGen(const MDClassRange &inputRange, const std::string &oFileDirArg) + : MDCodeGen(inputRange, oFileDirArg) {} + ~SchedInfoGen() override { + if (outFile.is_open()) { + outFile.close(); + } + } + + void EmitArchDef(); + const std::string &GetArchName(); + void EmitUnitIdDef(); + void EmitUnitDef(); + void EmitUnitNameDef(); + void EmitLatencyDef(); + void EmitResvDef(); + void EmitBypassDef(); + void Run(); + + private: + std::ofstream outFile; +}; +} /* namespace MDGen */ + +#endif /* MAPLEBE_MDGEN_INCLUDE_MDGENERATOR_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/mdgen/include/mdlexer.h b/src/mapleall/maple_be/mdgen/include/mdlexer.h new file mode 100644 index 0000000000000000000000000000000000000000..020ef877b7af58fef29fbf83a38e05edce4ebc17 --- /dev/null +++ b/src/mapleall/maple_be/mdgen/include/mdlexer.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_MDGEN_INCLUDE_MDLEXER_H +#define MAPLEBE_MDGEN_INCLUDE_MDLEXER_H + +#include +#include +#include +#include +#include +#include "mdtokens.h" +#include "mpl_logging.h" +#include "types_def.h" + +namespace MDGen { +using namespace maple; +class MDLexer { + public: + MDLexer() { + keywords.clear(); + /* input can be improved */ + (void)keywords.insert(std::make_pair("Def", kMDDef)); + (void)keywords.insert(std::make_pair("Class", kMDClass)); + (void)keywords.insert(std::make_pair("DefType", kMDDefType)); + }; + ~MDLexer() { + if (mdFileInternal.is_open()) { + mdFileInternal.close(); + } + }; + + MDTokenKind ReturnError() const; + MDTokenKind NextToken(); + MDTokenKind LexToken(); + MDTokenKind GetTokenIdentifier(); + MDTokenKind GetTokenConstVal(); + int ReadOneLine(); + bool SkipCComment(); + void SkipALineComment(); + + void PrepareFile(const std::string &mdfileName); + const std::string &GetStrToken() const { + return strToken; + } + int64_t GetIntVal() const { + return intVal; + } + const std::string &GetStrLine() const { + return strLine; + } + size_t GetStrLineSize() const { + return strLine.size(); + } + void RemoveInValidAtBack() { + if (strLine.length() == 0) { + return; + } + if (strLine.back() == '\n') { + strLine.pop_back(); + } + if (strLine.back() == '\r') { + strLine.pop_back(); + } + } + MDTokenKind GetCurKind() const { + return curKind; + } + char GetCurChar() { + return curPos < GetStrLineSize() ? strLine[curPos] : 0; + } + char GetNextChar() { + ++curPos; + return curPos < GetStrLineSize() ? strLine[curPos] : 0; + } + char ViewNextChar() const { + return curPos < GetStrLineSize() ? strLine[curPos] : 0; + } + char GetCharAt(uint32 pos) { + if (pos >= GetStrLineSize()) { + return 0; + } + return strLine[pos]; + } + int GetLineNumber() const { + return lineNumber; + } + + MDTokenKind GetHexConst(uint32 digitStartPos, bool isNegative); + MDTokenKind GetIntConst(uint32 digitStartPos, bool isNegative); + MDTokenKind GetFloatConst(); + + private: + static constexpr int maxNumLength = 10; + std::ifstream *mdFile = nullptr; + std::ifstream mdFileInternal; + uint32 lineNumber = 0; /* current Processing Line */ + uint32 curPos = 0; /* Position in a line */ + std::string strLine = ""; /* current token line */ + std::string strToken = ""; /* store ID,keywords ... */ + int32 intVal = 0; /* store integer when token */ + float floatVal = 0; /* store float value when token */ + MDTokenKind curKind = kMDInvalid; /* current token kind */ + std::unordered_map keywords; /* store keywords defined for md files */ +}; +} /* namespace MDGen */ + +#endif /* MAPLEBE_MDGEN_INCLUDE_MDLEXER_H */ diff --git a/src/mapleall/maple_be/mdgen/include/mdparser.h b/src/mapleall/maple_be/mdgen/include/mdparser.h new file mode 100644 index 0000000000000000000000000000000000000000..e4e449fcaeb0f4ca00b1a30ad7b83a0784c4afa9 --- /dev/null +++ b/src/mapleall/maple_be/mdgen/include/mdparser.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_MDGEN_INCLUDE_MDPARSER_H +#define MAPLEBE_MDGEN_INCLUDE_MDPARSER_H + +#include "mdlexer.h" +#include "mdrecord.h" +#include "mempool.h" + +namespace MDGen { +class MDParser { + public: + MDParser(MDClassRange &newKeeper, maple::MemPool *memPool) : dataKeeper(newKeeper), mdMemPool(memPool) {} + ~MDParser() = default; + + bool ParseFile(const std::string &inputFile); + bool ParseObjectStart(); + bool ParseObject(); + bool IsObjectStart(MDTokenKind k) const; + bool ParseDefType(); + bool ParseMDClass(); + bool ParseMDClassBody(MDClass &oneClass); + bool ParseMDObject(); + bool ParseMDObjBody(MDObject &curObj); + bool ParseIntElement(MDObject &curObj, bool isVec); + bool ParseStrElement(MDObject &curObj, bool isVec); + bool ParseDefTyElement(MDObject &curObj, bool isVec, const std::set &childSet); + bool ParseDefObjElement(MDObject &curObj, bool isVec, const MDClass &pClass); + + /* error process */ + bool EmitError(const std::string &errMsg); + + private: + MDLexer lexer; + MDClassRange &dataKeeper; + maple::MemPool *mdMemPool; +}; +} /* namespace MDGen */ + +#endif /* MAPLEBE_MDGEN_INCLUDE_MDPARSER_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/mdgen/include/mdrecord.h b/src/mapleall/maple_be/mdgen/include/mdrecord.h new file mode 100644 index 0000000000000000000000000000000000000000..3f9755ed31963f90118bd0f92408d81cd7cf66d6 --- /dev/null +++ b/src/mapleall/maple_be/mdgen/include/mdrecord.h @@ -0,0 +1,292 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_MDGEN_INCLUDE_MDREORD_H +#define MAPLEBE_MDGEN_INCLUDE_MDREORD_H + +#include +#include +#include +#include +#include +#include "mempool_allocator.h" +#include "mempool.h" +#include "mpl_logging.h" +#include "types_def.h" + +/* Define base data structure which is used to store information in .md files */ +namespace MDGen { +class MDClass; /* circular dependency */ + +enum RecordType : maple::uint32 { + kClassName, + kAnonClassName, + kObjectName, + kElementName, + kIntType, + kStringType, + kTypeName, + kTypeMemberName, + kUndefinedStr +}; + +struct StrInfo { + unsigned int idx; + RecordType sType; + StrInfo(unsigned int curIdx, RecordType curTy) : idx(curIdx), sType(curTy) {} +}; + +class MDElement { + public: + MDElement() = default; + virtual ~MDElement() = default; + enum ElementTy : maple::uint32 { + kEleIntTy, + kEleStrTy, + kEleDefTyTy, + kEleDefObjTy, + kEleVecTy, + kEleDefaultTy, + kEleInValidTy + }; + + unsigned int GetContent() const { + return DoGetContent(); + } + + ElementTy GetRecDataTy() const { + return eleType; + } + + protected: + ElementTy eleType = kEleInValidTy; + + private: + virtual unsigned int DoGetContent() const = 0; +}; + +class DefaultElement : public MDElement { + public: + DefaultElement() { + eleType = kEleDefaultTy; + } + + ~DefaultElement() override = default; + + private: + unsigned int DoGetContent() const override { + CHECK_FATAL(false, "Cannnot load default element's content"); + return UINT_MAX; + } +}; + +class IntElement : public MDElement { + public: + explicit IntElement(unsigned int curVal) : intEleVal(curVal) { + eleType = kEleIntTy; + } + + ~IntElement() override = default; + + private: + unsigned int intEleVal; + unsigned int DoGetContent() const override { + return intEleVal; + } +}; + +class StringElement : public MDElement { + public: + explicit StringElement(unsigned int curIdx) : strElemntIdx(curIdx) { + eleType = kEleStrTy; + } + + ~StringElement() override = default; + + private: + unsigned int strElemntIdx; + unsigned int DoGetContent() const override { + return strElemntIdx; + } +}; + +class DefTyElement : public MDElement { + public: + DefTyElement() { + eleType = kEleDefTyTy; + } + + ~DefTyElement() override = default; + + bool SetContent(const StrInfo curInfo, const std::set &childTySet); + + private: + unsigned int elementIdx = UINT_MAX; + unsigned int DoGetContent() const override { + return elementIdx; + } +}; + +class DefObjElement : public MDElement { + public: + DefObjElement() { + eleType = kEleDefObjTy; + } + + ~DefObjElement() override = default; + + bool SetContent(const StrInfo curInfo, const MDClass &parentClass); + + private: + unsigned int elementIdx = UINT_MAX; + unsigned int DoGetContent() const override { + return elementIdx; + } +}; + +class VecElement : public MDElement { + public: + explicit VecElement(maple::MemPool &mem) : alloc(&mem), vecData(alloc.Adapter()) { + eleType = kEleVecTy; + } + + ~VecElement() override = default; + + void appendElement(MDElement *curElement) { + vecData.emplace_back(curElement); + } + + const maple::MapleVector GetVecData() const { + return vecData; + } + + size_t GetVecDataSize() const { + return vecData.size(); + } + + private: + maple::MapleAllocator alloc; + maple::MapleVector vecData; + + unsigned int DoGetContent() const override { + CHECK_FATAL(false, "Vector element does not have a single content"); + return UINT_MAX; + } +}; + +class MDObject { + public: + MDObject(unsigned int curIdx, MDClass &pClass, maple::MemPool &memPool) + : objectIdx(curIdx), parentClass(&pClass), alloc(&memPool), mdElements(alloc.Adapter()) {} + + ~MDObject() = default; + + const MDElement *GetOneMDElement(size_t index) const; + + void AddMDElements(MDElement* curElement) { + mdElements.emplace_back(curElement); + } + + unsigned int GetIdx() const { + return objectIdx; + } + + const MDClass *GetParentClass() const { + return parentClass; + } + + private: + unsigned int objectIdx; + MDClass *parentClass; + maple::MapleAllocator alloc; + maple::MapleVector mdElements; +}; + +class MDClass { + public: + MDClass(unsigned int classIdx, bool isAnonymous) { + this->classIdx = classIdx; + this->isAnonymous = isAnonymous; + } + ~MDClass() = default; + + const MDObject &GetOneMDObject(size_t index) const; + void AddClassMember(MDObject inputObj); + bool IsClassMember(unsigned int curIdx) const; + bool IsValidStructEle(RecordType curTy) const; + unsigned int GetClassIdx() const { + return classIdx; + } + bool IsAnonymousClass() const { + return isAnonymous; + } + const std::vector> GetFormalTypes() const { + return formalTypes; + } + const std::set GetchildObjNames() const { + return childObjNames; + } + size_t GetFormalTypeSize() const { + return formalTypes.size(); + } + size_t GetMDObjectSize() const { + return mdObjects.size(); + } + void BuildFormalTypes(unsigned int memberIdx, bool isVec); + + private: + unsigned int classIdx = 0; + bool isAnonymous = false; + std::vector mdObjects; + std::vector> formalTypes; + std::set childObjNames; +}; + +class MDClassRange { + public: + explicit MDClassRange(std::string module) : moduleName(module) { + stringTable.clear(); + stringHashTable.clear(); + /* init common types such as unsigned int ,string , float */ + std::set initTypes; + AddDefinedType(CreateStrInTable("int", kIntType), initTypes); + AddDefinedType(CreateStrInTable("string", kStringType), initTypes); + } + ~MDClassRange() = default; + + StrInfo GetStrInTable(const std::string &inStr); + RecordType GetStrTyByIdx(size_t curIdx); + const std::string &GetStrByIdx(size_t curIdx); + void AddMDClass(MDClass curClass); + MDClass GetOneMDClass(unsigned int givenIdx); + std::set GetOneSpcType(unsigned int givenTyIdx); + size_t GetStringTableSize() const { + return stringTable.size(); + } + unsigned int CreateStrInTable(const std::string &inStr, RecordType curTy); + void ModifyStrTyInTable(const std::string &inStr, RecordType newTy); + void AddDefinedType(unsigned int typesName, std::set typesSet); + void FillMDClass(unsigned int givenIdx, const MDObject &insertObj); + + private: + std::string moduleName; + std::unordered_map stringHashTable; + std::vector stringTable; + unsigned int totalStr = 0; + std::unordered_map> definedTypes; + std::unordered_map allClasses; +}; +} /* namespace MDGen */ + +#endif /* MAPLEBE_MDGEN_INCLUDE_MDREORD_H */ diff --git a/src/mapleall/maple_be/mdgen/include/mdtokens.h b/src/mapleall/maple_be/mdgen/include/mdtokens.h new file mode 100644 index 0000000000000000000000000000000000000000..41a703feead3d2add9f6ee522d429112860e3dfd --- /dev/null +++ b/src/mapleall/maple_be/mdgen/include/mdtokens.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEBE_MDGEN_INCLUDE_MDTOKENS_H +#define MAPLEBE_MDGEN_INCLUDE_MDTOKENS_H +enum MDTokenKind { + /* special symbols */ + kMDError, + kMDEOF, + kMDInvalid, + + /* normal symbols */ + kMDOpenParen, /* ( */ + kMDCloseParen, /* ) */ + kMDOpenBrace, /* { */ + kMDCloseBrace, /* } */ + kMDOpenSquare, /* [ */ + kMDCloseSquare, /* ] */ + kMDEqual, /* = */ + kMDSemi, /* ; */ + kMDComma, /* , */ + kMDColon, /* : */ + kMDLess, /* < */ + kMDGreater, /* > */ + kMDLgAnd, /* & */ + kMDLgOr, /* | */ + + kMDIdentifier, + /* const values */ + kMDIntVal, + kMDFloatVal, + kMDDoubleVal, + + /* keywords */ + kMDDef, + kMDClass, + kMDAnonClass, + kMDDefType, +}; + +#endif /* MAPLEBE_MDGEN_INCLUDE_MDTOKENS_H */ \ No newline at end of file diff --git a/src/mapleall/maple_be/mdgen/src/mdgenerator.cpp b/src/mapleall/maple_be/mdgen/src/mdgenerator.cpp new file mode 100644 index 0000000000000000000000000000000000000000..649072f934643556d71bc0ec2a6be0eebe1d5c11 --- /dev/null +++ b/src/mapleall/maple_be/mdgen/src/mdgenerator.cpp @@ -0,0 +1,237 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include "mdgenerator.h" + +namespace MDGen { +std::string MDCodeGen::targetArchName = ""; + +void MDCodeGen::EmitCheckPtr(std::ofstream &outputFile, const std::string &emitName, const std::string &name, + const std::string &ptrType) const { + outputFile << "if(" << emitName << " == nullptr) {\n" << + " maple::LogInfo::MapleLogger(maple::kLlErr) << \"" << ptrType << " allocation for " << name << + " failed.\" << std::endl;\n" << "}\n" << + "ASSERT(" << emitName << ", \"" << ptrType << " allocation for " << name << + " failed.\");\n" << "\n"; +} + +void MDCodeGen::EmitFileHead(std::ofstream &outputFile, const std::string &headInfo) const { + outputFile << "/* " << targetArchName << " " << headInfo << " definition : */\n"; +} + +MDClass MDCodeGen::GetSpecificClass(const std::string &className) { + unsigned int classIdx = curKeeper.GetStrInTable(className).idx; + CHECK_FATAL(classIdx != UINT_MAX, "Load Class Failed!"); + return curKeeper.GetOneMDClass(classIdx); +} + +const std::string &SchedInfoGen::GetArchName() { + MDClass archClass = GetSpecificClass("ArchitectureName"); + const MDObject &archObj = archClass.GetOneMDObject(0); + auto *archStrEle = static_cast(archObj.GetOneMDElement(0)); + return curKeeper.GetStrByIdx(archStrEle->GetContent()); +} + +void SchedInfoGen::EmitArchDef() { + MDClass parallelClass = GetSpecificClass("Parallelism"); + CHECK_FATAL(parallelClass.GetMDObjectSize() > 0, "specific class failed, maybe illegal input"); + const MDObject ¶lleObj = parallelClass.GetOneMDObject(0); + auto *parallelEle = static_cast(paralleObj.GetOneMDElement(0)); + outFile.open(GetOFileDir() + "/mplad_arch_define.def", std::ios::out); + EmitFileHead(outFile, "Architecture"); + outFile << "SetMaxParallelism(" << parallelEle->GetContent() << ");\n"; + outFile.close(); +} + +void SchedInfoGen::EmitUnitIdDef() { + MDClass unitClass = GetSpecificClass("Unit"); + outFile.open(GetOFileDir() + "/mplad_unit_id.def", std::ios::out); + CHECK_FATAL(outFile.is_open(), "Failed to open output file: %s/mplad_unit_id.def", GetOFileDir().c_str()); + EmitFileHead(outFile, "function unit ID"); + for (auto unitIdx : unitClass.GetchildObjNames()) { + outFile << " " << curKeeper.GetStrByIdx(unitIdx) << ",\n"; + } + outFile.close(); +} + +void SchedInfoGen::EmitUnitNameDef() { + MDClass unitClass = GetSpecificClass("Unit"); + outFile.open(GetOFileDir() + "/mplad_unit_name.def", std::ios::out); + CHECK_FATAL(outFile.is_open(), "Failed to open output file: %s/mplad_unit_name.def", GetOFileDir().c_str()); + EmitFileHead(outFile, "function unit name"); + for (auto unitIdx : unitClass.GetchildObjNames()) { + std::string unitPureName = curKeeper.GetStrByIdx(unitIdx); + std::string unitPrefix = "kUnitId"; + if (unitPrefix.length() < unitPureName.length()) { + unitPureName = unitPureName.substr(unitPrefix.length()); + outFile << "\"" << unitPureName << "\",\n"; + } + } + outFile.close(); +} + +void SchedInfoGen::EmitUnitDef() { + MDClass unitClass = GetSpecificClass("Unit"); + outFile.open(GetOFileDir() + "/mplad_unit_define.def", std::ios::out); + CHECK_FATAL(outFile.is_open(), "Failed to open output file: %s/mplad_unit_define.def", GetOFileDir().c_str()); + EmitFileHead(outFile, "function units "); + bool isUnitNumDef = false; + for (size_t i = 0; i < unitClass.GetMDObjectSize(); ++i) { + const MDObject &singleUnit = unitClass.GetOneMDObject(i); + if (singleUnit.GetOneMDElement(0)->GetRecDataTy() == MDElement::kEleDefaultTy) { + continue; + } + auto *curUnitTy = static_cast(singleUnit.GetOneMDElement(0)); + std::string curUnitName = curKeeper.GetStrByIdx(singleUnit.GetIdx()); + std::string emitUnitName = "instance" + curUnitName; + std::string unitPrefix = "Unit *" + emitUnitName + " = new Unit("; + if (!isUnitNumDef) { + outFile << "\n"; + outFile << "const unsigned int kunitNum = 2;\n"; + isUnitNumDef = true; + } + outFile << unitPrefix; + if (curUnitTy->GetContent() == curKeeper.GetStrInTable("Primary").idx) { + outFile << curUnitName << ");\n"; + } else { + std::string unitTypeStr = ""; + if (curUnitTy->GetContent() == curKeeper.GetStrInTable("And").idx) { + unitTypeStr = "kUnitTypeAnd"; + } else if (curUnitTy->GetContent() == curKeeper.GetStrInTable("Or").idx) { + unitTypeStr = "kUnitTypeOr"; + } + CHECK_FATAL(unitTypeStr.size() != 0, "Haven't support this kind of Unit yet"); + outFile << unitTypeStr << ", " << curUnitName << ", kunitNum,\n"; + outFile << std::setiosflags(std::ios::right) << std::setw(unitPrefix.length()) << std::setfill(' ') << " "; + unsigned int dependUnitsIndex = 1; + auto *dependUnitEle = static_cast(singleUnit.GetOneMDElement(dependUnitsIndex)); + for (size_t k = 0; k < dependUnitEle->GetVecDataSize(); ++k) { + auto *dependUnit = static_cast(dependUnitEle->GetVecData()[k]); + outFile << "instance" << curKeeper.GetStrByIdx(dependUnit->GetContent()); + if (k != dependUnitEle->GetVecDataSize() - 1) { + outFile << ", "; + } + } + outFile << ");\n"; + } + EmitCheckPtr(outFile, emitUnitName, curUnitName, "Unit"); + } + outFile.close(); +} + +void SchedInfoGen::EmitLatencyDef() { + MDClass resvClass = GetSpecificClass("Reservation"); + outFile.open(GetOFileDir() + "/mplad_latency_type.def", std::ios::out); + CHECK_FATAL(outFile.is_open(), "Failed to open output file: %s/mplad_latency_type.def", GetOFileDir().c_str()); + EmitFileHead(outFile, " latency type definition "); + for (auto resvIdx : resvClass.GetchildObjNames()) { + outFile << " " << curKeeper.GetStrByIdx(resvIdx) << ",\n"; + } + outFile.close(); +} + +void SchedInfoGen::EmitResvDef() { + MDClass resvClass = GetSpecificClass("Reservation"); + outFile.open(GetOFileDir() + "/mplad_reservation_define.def", std::ios::out); + CHECK_FATAL(outFile.is_open(), "Failed to open output file: %s/mplad_reservation_define.def", + GetOFileDir().c_str()); + EmitFileHead(outFile, "reservations"); + for (size_t i = 0; i < resvClass.GetMDObjectSize(); ++i) { + const MDObject &singleResv = resvClass.GetOneMDObject(i); + if (singleResv.GetOneMDElement(0)->GetRecDataTy() == MDElement::kEleDefaultTy) { + continue; + } + auto *curResvLatency = static_cast(singleResv.GetOneMDElement(0)); + std::string curResvName = curKeeper.GetStrByIdx(singleResv.GetIdx()); + std::string emitResvName = "resvInst" + curResvName; + std::string resvPrefix = "Reservation *" + emitResvName + " = new Reservation("; + outFile << resvPrefix << curResvName << ", " << curResvLatency->GetContent() << ", "; + if (singleResv.GetOneMDElement(1)->GetRecDataTy() == MDElement::kEleDefaultTy) { + outFile << "0);\n"; + } else { + size_t dependUnitsIndex = 1; + auto *dependUnitEle = static_cast(singleResv.GetOneMDElement(dependUnitsIndex)); + outFile << dependUnitEle->GetVecDataSize() << ",\n"; + for (size_t k = 0; k < dependUnitEle->GetVecDataSize(); ++k) { + auto *dependUnit = static_cast(dependUnitEle->GetVecData()[k]); + if (curKeeper.GetStrByIdx(dependUnit->GetContent()) != "nothing") { + outFile << std::setiosflags(std::ios::right) << std::setw(resvPrefix.length()) << std::setfill(' ') + << "GetUnitByUnitId(" << curKeeper.GetStrByIdx(dependUnit->GetContent()) << ")"; + } else { + outFile << std::setiosflags(std::ios::right) << std::setw(resvPrefix.length()) << std::setfill(' ') + << "nullptr"; + } + if (k < dependUnitEle->GetVecDataSize() - 1) { + outFile << ",\n"; + } + } + outFile << ");\n"; + } + EmitCheckPtr(outFile, emitResvName, curResvName, "Reservation"); + } + outFile.close(); +} + +void SchedInfoGen::EmitBypassDef() { + MDClass bypassClass = GetSpecificClass("Bypass"); + outFile.open(GetOFileDir() + "/mplad_bypass_define.def", std::ios::out); + for (size_t i = 0; i < bypassClass.GetMDObjectSize(); ++i) { + const MDObject &singleBypass = bypassClass.GetOneMDObject(i); + if (singleBypass.GetOneMDElement(0)->GetRecDataTy() == MDElement::kEleDefaultTy) { + continue; + } + constexpr size_t fromVecIndex = 1; + constexpr size_t toVecIndex = 2; + constexpr size_t curBpTyIndex = 3; + auto *bpTyEle = singleBypass.GetOneMDElement(curBpTyIndex); + std::string curBypassTy = (bpTyEle->GetRecDataTy() == MDElement::kEleDefaultTy) ? + "" : curKeeper.GetStrByIdx(bpTyEle->GetContent()); + transform(curBypassTy.begin(), curBypassTy.end(), curBypassTy.begin(), ::toupper); + + CHECK_FATAL(singleBypass.GetOneMDElement(0)->GetRecDataTy() == MDElement::ElementTy::kEleIntTy, "Bypass illegal"); + CHECK_FATAL(singleBypass.GetOneMDElement(fromVecIndex)->GetRecDataTy() == MDElement::ElementTy::kEleVecTy, + "Bypass illegal"); + CHECK_FATAL(singleBypass.GetOneMDElement(toVecIndex)->GetRecDataTy() == MDElement::ElementTy::kEleVecTy, + "Bypass illegal"); + + unsigned int bypassNum = static_cast(singleBypass.GetOneMDElement(0))->GetContent(); + auto *fromVec = static_cast(singleBypass.GetOneMDElement(fromVecIndex)); + auto *toVec = static_cast(singleBypass.GetOneMDElement(toVecIndex)); + for (auto itTo : toVec->GetVecData()) { + for (auto itFrom : fromVec->GetVecData()) { + auto *fromResv = static_cast(itFrom); + auto *toResv = static_cast(itTo); + outFile << "ADD" << curBypassTy << "BYPASS(" << curKeeper.GetStrByIdx(fromResv->GetContent()) << ", " + << curKeeper.GetStrByIdx(toResv->GetContent()) << ", " << bypassNum <<");\n"; + } + } + } + outFile.close(); +} + +void SchedInfoGen::Run() { + SetTargetArchName(GetArchName()); + EmitArchDef(); + EmitResvDef(); + EmitBypassDef(); + EmitUnitDef(); + EmitUnitNameDef(); + EmitLatencyDef(); + EmitUnitIdDef(); +} +} /* namespace MDGen */ diff --git a/src/mapleall/maple_be/mdgen/src/mdlexer.cpp b/src/mapleall/maple_be/mdgen/src/mdlexer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1b1db44aa0fe407fe91993cde89526a88c63a48e --- /dev/null +++ b/src/mapleall/maple_be/mdgen/src/mdlexer.cpp @@ -0,0 +1,290 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "mdlexer.h" + +namespace MDGen { +void MDLexer::PrepareFile(const std::string &mdfileName) { + mdFileInternal.open(mdfileName); + if (!mdFileInternal.is_open()) { + CHECK_FATAL(false, "Open target file failed"); + } + mdFile = &mdFileInternal; +} + +MDTokenKind MDLexer::ReturnError() const { + maple::LogInfo::MapleLogger() << "Unexpect character at Line" << lineNumber << "\n"; + return kMDError; +} + +int MDLexer::ReadOneLine() { + if (mdFile == nullptr) { + strLine = ""; + return -1; + } + curPos = 0; + if (!std::getline(*mdFile, strLine)) { /* EOF */ + strLine = ""; + mdFile = nullptr; + return -1; + } + RemoveInValidAtBack(); + return GetStrLineSize(); +} + +MDTokenKind MDLexer::NextToken() { + curKind = LexToken(); + return curKind; +} + +MDTokenKind MDLexer::LexToken() { + char c = GetCurChar(); + while (c == ' ' || c == '\t') { /* skip space && tab */ + c = GetNextChar(); + } + while (c == 0) { + if (ReadOneLine() < 0) { + return kMDEOF; + } + lineNumber++; + c = GetCurChar(); + while (c == ' ' || c == '\t') { + c = GetNextChar(); + } + } + curPos++; + switch (c) { + case '(': + return kMDOpenParen; + case ')': + return kMDCloseParen; + case '{': + return kMDOpenBrace; + case '}': + return kMDCloseBrace; + case '[': + return kMDOpenSquare; + case ']': + return kMDCloseSquare; + case '<': + return kMDLess; + case '>': + return kMDGreater; + case ';': + return kMDSemi; + case ',': + return kMDComma; + case ':': + return kMDColon; + case '=': + return kMDEqual; + case '&': + return kMDLgAnd; + case '|': + return kMDLgOr; + case '0': /* start handling number */ + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '-': + curPos--; /* support HEX AND INTERGER at present */ + return GetTokenConstVal(); + case '/': { /* handle comment; */ + char cn = GetCurChar(); + if (cn == '/') { + SkipALineComment(); + } else if (cn == '*') { + if (!SkipCComment()) { + return kMDError; + } + } else { + return ReturnError(); + } + return LexToken(); + } + default: + if (isalpha(c) || c == '_') { + return GetTokenIdentifier(); /* identifier need to be modify */ + } + return ReturnError(); + } +} + +MDTokenKind MDLexer::GetTokenConstVal() { + bool negative = false; + char curC = GetCurChar(); + if (curC == '-') { + curC = GetNextChar(); + /* have Special Float const? */ + negative = true; + } + const uint32 hexPrefixLength = 2; + if (strLine.compare(curPos, hexPrefixLength, "0x") == 0) { + curPos += hexPrefixLength; + return GetHexConst(curPos, negative); + } + uint32 digitStartPos = curPos; + char digitStartC = GetCurChar(); + while (isdigit(curC)) { + curC = GetNextChar(); + } + if (!isdigit(digitStartC) && curC != '.') { + return kMDInvalid; + } + if (curC != '.' && curC != 'e' && curC != 'E') { + return GetIntConst(digitStartPos, negative); + } + return GetFloatConst(); +} + +MDTokenKind MDLexer::GetHexConst(uint32 digitStartPos, bool isNegative) { + if (digitStartPos >= strLine.length()) { + return ReturnError(); + } + char c = GetCurChar(); + if (!isxdigit(c)) { + return kMDInvalid; + } + int loopDepth = 0; + while (isxdigit(c)) { + c = GetNextChar(); + ++loopDepth; + if (loopDepth > maxNumLength) { + return ReturnError(); + } + } + std::string hexStr = strLine.substr(digitStartPos, curPos - digitStartPos); + const char *hexStrPtr = hexStr.c_str(); + errno = 0; + constexpr int hexInDec = 16; + intVal = static_cast(std::strtoll(hexStrPtr, nullptr, hexInDec)); + if (errno == EINVAL) { /* Invalid hexadecimal number */ + return ReturnError(); + } + if (errno == ERANGE) { + errno = 0; + intVal = static_cast(std::strtoll(hexStrPtr, nullptr, hexInDec)); + if (errno == EINVAL) { /* Invalid hexadecimal number */ + return ReturnError(); + } + if (errno == ERANGE) { /* input number is out of range */ + return ReturnError(); + } + } + if (isNegative) { + intVal = -intVal; + } + return kMDIntVal; +} + +MDTokenKind MDLexer::GetIntConst(uint32 digitStartPos, bool isNegative) { + char c = GetCharAt(digitStartPos); + /* no ULL LL suffix at present */ + int loopDepth = 0; + while (isdigit(c)) { + c = GetNextChar(); + ++loopDepth; + if (loopDepth > maxNumLength) { + return ReturnError(); + } + } + curPos--; + if (digitStartPos >= strLine.length() || digitStartPos > curPos) { + return ReturnError(); + } + std::string intStr = strLine.substr(digitStartPos, curPos - digitStartPos); + const char *intStrPtr = intStr.c_str(); + errno = 0; + constexpr int decInDec = 10; + intVal = static_cast(std::strtoll(intStrPtr, nullptr, decInDec)); + if (errno == ERANGE) { + return ReturnError(); + } + if (isNegative) { + intVal = -intVal; + } + return kMDIntVal; +} + +MDTokenKind MDLexer::GetFloatConst() { + floatVal = 0; + return kMDInvalid; +} + +MDTokenKind MDLexer::GetTokenIdentifier() { + --curPos; + uint32 startPos = curPos; + char curC = GetCurChar(); + + while (isalnum(curC) || curC == '_' || curC == '-' || curC < 0) { + curC = GetNextChar(); + } + if (startPos >= strLine.length()) { + return ReturnError(); + } + strToken = strLine.substr(startPos, curPos - startPos); + auto it = keywords.find(strToken); + if (it != keywords.end()) { + return it->second; + } + return kMDIdentifier; +} + +void MDLexer::SkipALineComment() { + while (curPos < GetStrLineSize()) { + curPos++; + } + /* if comment is required to be stored. it can be done here */ +} + +bool MDLexer::SkipCComment() { + bool startAnewLine = false; + char commentNext; + while (true) { + if (!startAnewLine) { + commentNext = GetNextChar(); + } else { + commentNext = GetCurChar(); + startAnewLine = false; + } + switch (commentNext) { + case 0: + if (ReadOneLine() < 0) { + ASSERT(false, "Untermianted comment"); + return false; + } + ++lineNumber; + startAnewLine = true; + break; + case '*': + commentNext = GetNextChar(); + if (commentNext == '/') { + ++curPos; + return true; + } + break; + default: + break; + } + } + return false; +} +} diff --git a/src/mapleall/maple_be/mdgen/src/mdmain.cpp b/src/mapleall/maple_be/mdgen/src/mdmain.cpp new file mode 100644 index 0000000000000000000000000000000000000000..36e27efbf271bc62dbbdf8a9a8c0eedf38f8d715 --- /dev/null +++ b/src/mapleall/maple_be/mdgen/src/mdmain.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include + +#include "mdgenerator.h" +#include "mdparser.h" +#include "mpl_sighandler.h" + +using namespace MDGen; +namespace { + bool isGenSched = false; + std::string schedSrcPath = ""; + std::string oFileDir = ""; +} + +static int PrintHelpAndExit() { + maple::LogInfo::MapleLogger() << "Maplegen is usd to process md files and " << + "generate architecture specific information in def files\n" << + "usage: ./mplgen xxx.md outputdirectroy\n"; + return 1; +} + +void ParseCommandLine(int argc, char **argv) { + int opt; + int gOptionIndex = 0; + std::string optStr = "s:o:"; + static struct option longOptions[] = { + {"genSchdInfo", required_argument, nullptr, 's'}, + {"outDirectory", required_argument, nullptr, 'o'}, + {nullptr, 0, nullptr, 0} + }; + while ((opt = getopt_long(argc, argv, optStr.c_str(), longOptions, &gOptionIndex)) != -1) { + switch (opt) { + case 's': + isGenSched = true; + schedSrcPath = optarg; + break; + case 'o': + oFileDir = optarg; + break; + default: + break; + } + } +} + +bool GenSchedFiles(const std::string &fileName, const std::string &fileDir) { + maple::MemPool *schedInfoMemPool = memPoolCtrler.NewMemPool("schedInfoMp", false /* isLcalPool */); + MDClassRange moduleData("Schedule"); + MDParser parser(moduleData, schedInfoMemPool); + if (!parser.ParseFile(fileName)) { + delete schedInfoMemPool; + return false; + } + SchedInfoGen schedEmiiter(moduleData, fileDir); + schedEmiiter.Run(); + delete schedInfoMemPool; + return true; +} + +int main(int argc, char **argv) { + SigHandler::EnableAll(); + + constexpr int minimumArgNum = 2; + if (argc <= minimumArgNum) { + return PrintHelpAndExit(); + } + ParseCommandLine(argc, argv); + if (isGenSched) { + if (!GenSchedFiles(schedSrcPath, oFileDir)) { + return 1; + } + } + return 0; +} diff --git a/src/mapleall/maple_be/mdgen/src/mdparser.cpp b/src/mapleall/maple_be/mdgen/src/mdparser.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c1bd058c6d93388c2d0b49ef1f6253f65c8ea364 --- /dev/null +++ b/src/mapleall/maple_be/mdgen/src/mdparser.cpp @@ -0,0 +1,380 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mdparser.h" + +namespace MDGen { +bool MDParser::ParseFile(const std::string &inputFile) { + lexer.PrepareFile(inputFile); + if (!ParseObjectStart()) { + return false; + } + if (lexer.GetCurKind() == kMDEOF) { + return true; + } + return EmitError("Unexpected input at begin"); +} + +bool MDParser::IsObjectStart(MDTokenKind k) const { + return (k == kMDDef || k == kMDClass || k == kMDDefType); +} + +bool MDParser::ParseObjectStart() { + while (IsObjectStart(lexer.NextToken())) { + if (!ParseObject()) { + return false; + } + } + return true; +} + +bool MDParser::ParseObject() { + switch (lexer.GetCurKind()) { + case kMDDefType: + return ParseDefType(); + case kMDClass: + return ParseMDClass(); + case kMDDef: + return ParseMDObject(); + default: + return EmitError("Unexpected key word at start"); + } +} + +bool MDParser::ParseDefType() { + if (lexer.NextToken() != kMDIdentifier) { + return EmitError("Expect a name after a specific type defined"); + } + unsigned int defTypeIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kTypeName); + if (defTypeIdx == UINT_MAX) { + return EmitError("InValid defType is defined"); + } + if (lexer.NextToken() != kMDEqual) { + return EmitError("Expect a equal when a specific type is going to be instantiated"); + } + std::set defTypeMembers; + while (lexer.NextToken() != kMDSemi) { + switch (lexer.GetCurKind()) { + case kMDIdentifier: { + unsigned int defTypeMemberIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kTypeMemberName); + if (defTypeMemberIdx == UINT_MAX || !defTypeMembers.insert(defTypeMemberIdx).second) { + return EmitError("InValid defType member is defined"); + } + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + } + dataKeeper.AddDefinedType(defTypeIdx, defTypeMembers); + return (lexer.GetCurKind() == kMDSemi) ? true : EmitError("Expected an ending with a semicolon"); +} + +bool MDParser::ParseMDClass() { + if (lexer.NextToken() != kMDIdentifier) { + return EmitError("Expect a name after a specific class defined"); + } + unsigned int classIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kClassName); + if (classIdx == UINT_MAX) { + return EmitError("InValid class name. Please change a class name"); + } + bool isAnon = true; + if (lexer.NextToken() == kMDColon) { + isAnon = false; + if (lexer.NextToken() != kMDIdentifier) { + return EmitError("Expect a name after a specific class defined"); + } + if (lexer.GetStrToken() != "string") { + return EmitError("Only Support string as a class name type at current stage"); + } + static_cast(lexer.NextToken()); + } + if (isAnon) { + dataKeeper.ModifyStrTyInTable(lexer.GetStrToken(), kAnonClassName); + } + MDClass oneMDclass(classIdx, isAnon); + if (lexer.GetCurKind() != kMDLess) { + return EmitError("Expect a 'less' before class structure being defined"); + } + + while (lexer.NextToken() != kMDGreater) { + if (!ParseMDClassBody(oneMDclass)) { + return false; + } + } + dataKeeper.AddMDClass(oneMDclass); + return (lexer.NextToken() == kMDSemi) ? true : EmitError("Expected an ending with a semicolon"); +} + +bool MDParser::ParseMDClassBody(MDClass &oneClass) { + switch (lexer.GetCurKind()) { + case kMDIdentifier: { + StrInfo defTypeInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + if (defTypeInfo.idx == UINT_MAX || !oneClass.IsValidStructEle(defTypeInfo.sType)) { + return EmitError("Expect a defined Type to be a memeber of a class"); + } + bool isVec = false; + if (lexer.ViewNextChar() == '[') { + if (lexer.NextToken() != kMDOpenSquare || lexer.NextToken() != kMDCloseSquare) { + return EmitError("Expect a \"[]\" to represent a list element"); + } + isVec = true; + } + oneClass.BuildFormalTypes(defTypeInfo.idx, isVec); + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + return true; +} + +bool MDParser::ParseMDObject() { + if (lexer.NextToken() != kMDIdentifier) { + return EmitError("Expect a name after a specific object defined"); + } + StrInfo parentInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + if (parentInfo.idx == UINT_MAX || (parentInfo.sType != kClassName && parentInfo.sType != kAnonClassName)) { + return EmitError("A new object should be belong to a defined class"); + } + MDClass parentClass = dataKeeper.GetOneMDClass(parentInfo.idx); + unsigned int objectIdx = UINT_MAX; + if (!parentClass.IsAnonymousClass()) { + if (lexer.NextToken() != kMDColon) { + return EmitError("Expect a colon when a object name is going to be defined"); + } + if (lexer.NextToken() != kMDIdentifier) { + return EmitError("Expect a name for a specific object"); + } + objectIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kObjectName); + if (objectIdx == UINT_MAX) { + return EmitError("InValid ObjectName!"); + } + } + MDObject *curObj = mdMemPool->New(objectIdx, parentClass, *mdMemPool); + if (lexer.NextToken() != kMDOpenBrace) { + return EmitError("Expect a OpenBrace before a object body is defined"); + } + if (!ParseMDObjBody(*curObj)) { + return false; + } + dataKeeper.FillMDClass(parentInfo.idx, *curObj); + return (lexer.NextToken() == kMDSemi) ? true : EmitError("Expected an ending with a semicolon"); +} + +bool MDParser::ParseMDObjBody(MDObject &curObj) { + bool hasDefault = false; + for (size_t i = 0; i < curObj.GetParentClass()->GetFormalTypeSize(); ++i) { + if (hasDefault) { + DefaultElement *defaultEle = mdMemPool->New(); + curObj.AddMDElements(defaultEle); + continue; + } + MDTokenKind curKind = lexer.NextToken(); + if (i != 0 && (curKind != kMDComma && curKind != kMDCloseBrace)) { + return EmitError("Unexpected Gramma when define a object"); + } + if (curKind == kMDComma) { + curKind = lexer.NextToken(); + } + if (curKind == kMDCloseBrace) { + hasDefault = true; + DefaultElement *defaultEle = mdMemPool->New(); + curObj.AddMDElements(defaultEle); + continue; + } + unsigned int typeIdx = curObj.GetParentClass()->GetFormalTypes().at(i).first; + bool isVec = curObj.GetParentClass()->GetFormalTypes().at(i).second; + if (dataKeeper.GetStrTyByIdx(typeIdx) == kIntType) { + if (!ParseIntElement(curObj, isVec)) { + return false; + } + } else if (dataKeeper.GetStrTyByIdx(typeIdx) == kStringType) { + if (!ParseStrElement(curObj, isVec)) { + return false; + } + } else if (dataKeeper.GetStrTyByIdx(typeIdx) == kTypeName) { + std::set childSet = dataKeeper.GetOneSpcType(typeIdx); + if (!ParseDefTyElement(curObj, isVec, childSet)) { + return false; + } + } else if (dataKeeper.GetStrTyByIdx(typeIdx) == kClassName) { + MDClass pClass = dataKeeper.GetOneMDClass(typeIdx); + if (!ParseDefObjElement(curObj, isVec, pClass)) { + return false; + } + } + } + if (lexer.GetCurKind() == kMDCloseBrace) { + return true; + } + return (lexer.NextToken() != kMDCloseBrace) ? EmitError("Expect a CloseBrace as end of object definition") : true; +} + +bool MDParser::ParseIntElement(MDObject &curObj, bool isVec) { + if (isVec) { + if (lexer.GetCurKind() != kMDOpenSquare) { + return EmitError("Expect a OpenSquare before a list element defined"); + } + + VecElement *curEle = mdMemPool->New(*mdMemPool); + while (lexer.NextToken() != kMDCloseSquare) { + switch (lexer.GetCurKind()) { + case kMDIntVal: { + IntElement *singleEle = mdMemPool->New(lexer.GetIntVal()); + curEle->appendElement(singleEle); + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + } + curObj.AddMDElements(curEle); + } else { + if (lexer.GetCurKind() != kMDIntVal) { + return EmitError("Expect a integer elemet as defined"); + } + IntElement *curEle = mdMemPool->New(lexer.GetIntVal()); + curObj.AddMDElements(curEle); + } + return true; +} + +bool MDParser::ParseStrElement(MDObject &curObj, bool isVec) { + if (isVec) { + if (lexer.GetCurKind() != kMDOpenSquare) { + return EmitError("Expect a OpenSquare before a list element defined"); + } + VecElement *curEle = mdMemPool->New(*mdMemPool); + while (lexer.NextToken() != kMDCloseSquare) { + switch (lexer.GetCurKind()) { + case kMDIdentifier: { + unsigned int elementIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kElementName); + if (elementIdx == UINT_MAX) { + return EmitError("Duplicate string name has already been defined"); + } + StringElement *singleEle = mdMemPool->New(elementIdx); + curEle->appendElement(singleEle); + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + } + curObj.AddMDElements(curEle); + } else { + if (lexer.GetCurKind() != kMDIdentifier) { + return EmitError("Expect a string elemet as defined"); + } + unsigned int elementIdx = dataKeeper.CreateStrInTable(lexer.GetStrToken(), kElementName); + if (elementIdx == UINT_MAX) { + return EmitError("Duplicate string name has already been defined"); + } + StringElement *curEle = mdMemPool->New(elementIdx); + curObj.AddMDElements(curEle); + } + return true; +} + +bool MDParser::ParseDefTyElement(MDObject &curObj, bool isVec, const std::set &childSet) { + if (isVec) { + if (lexer.GetCurKind() != kMDOpenSquare) { + return EmitError("Expect a OpenSquare before a list element defined"); + } + VecElement *curEle = mdMemPool->New(*mdMemPool); + while (lexer.NextToken() != kMDCloseSquare) { + switch (lexer.GetCurKind()) { + case kMDIdentifier: { + StrInfo defTypeInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + DefTyElement *singleEle = mdMemPool->New(); + if (!singleEle->SetContent(defTypeInfo, childSet)) { + return EmitError("Expect a input element which has been defined as a type"); + } + curEle->appendElement(singleEle); + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + } + curObj.AddMDElements(curEle); + } else { + if (lexer.GetCurKind() != kMDIdentifier) { + return EmitError("Expect a string elemet as defined"); + } + StrInfo defTypeInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + DefTyElement *curEle = mdMemPool->New(); + if (!curEle->SetContent(defTypeInfo, childSet)) { + return EmitError("Expect a input element which has been defined as a type"); + } + curObj.AddMDElements(curEle); + } + return true; +} + +bool MDParser::ParseDefObjElement(MDObject &curObj, bool isVec, const MDClass &pClass) { + if (isVec) { + if (lexer.GetCurKind() != kMDOpenSquare) { + return EmitError("Expect a OpenSquare before a list element defined"); + } + VecElement *curEle = mdMemPool->New(*mdMemPool); + while (lexer.NextToken() != kMDCloseSquare) { + switch (lexer.GetCurKind()) { + case kMDIdentifier: { + StrInfo defObjInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + DefObjElement *singleEle = mdMemPool->New(); + if (!singleEle->SetContent(defObjInfo, pClass)) { + return EmitError("Expect a input element which has been defined as a object"); + } + curEle->appendElement(singleEle); + break; + } + case kMDComma: + break; + default: + return EmitError("Unexpected token kind"); + } + } + curObj.AddMDElements(curEle); + } else { + if (lexer.GetCurKind() != kMDIdentifier) { + return EmitError("Expect a integer elemet as defined"); + } + StrInfo defObjInfo = dataKeeper.GetStrInTable(lexer.GetStrToken()); + DefObjElement *curEle = mdMemPool->New(); + if (!curEle->SetContent(defObjInfo, pClass)) { + return EmitError("Expect a input element which has been defined as a object"); + } + curObj.AddMDElements(curEle); + } + return true; +} + +bool MDParser::EmitError(const std::string &errMsg) { + maple::LogInfo::MapleLogger() << errMsg << "\n"; + maple::LogInfo::MapleLogger() << "A Error Appear At Line " << lexer.GetLineNumber() << "\n"; + maple::LogInfo::MapleLogger() << "Source code : " << lexer.GetStrLine() << "\n"; + return false; +} +} /* namespace MDGen */ diff --git a/src/mapleall/maple_be/mdgen/src/mdrecord.cpp b/src/mapleall/maple_be/mdgen/src/mdrecord.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0c89a504c9e8d21ddb61b735a723b0174ecc5d59 --- /dev/null +++ b/src/mapleall/maple_be/mdgen/src/mdrecord.cpp @@ -0,0 +1,123 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mdrecord.h" + +namespace MDGen { +constexpr unsigned int kInValidStrIdx = UINT_MAX; + +bool DefTyElement::SetContent(const StrInfo curInfo, const std::set &childTySet) { + if (childTySet.count(curInfo.idx) == 0) { + return false; + } + elementIdx = curInfo.idx; + return true; +} + +bool DefObjElement::SetContent(const StrInfo curInfo, const MDClass &parentClass) { + if (!parentClass.IsClassMember(curInfo.idx)) { + return false; + } + elementIdx = curInfo.idx; + return true; +} + +const MDElement *MDObject::GetOneMDElement(size_t index) const { + CHECK_FATAL(index < mdElements.size(), "Array boundary check failed"); + return mdElements[index]; +} + +const MDObject &MDClass::GetOneMDObject(size_t index) const { + CHECK_FATAL(index < mdObjects.size(), "Array boundary check failed"); + return mdObjects[index]; +} + +void MDClass::AddClassMember(MDObject inputObj) { + mdObjects.emplace_back(inputObj); + (void)childObjNames.insert(inputObj.GetIdx()); +} + +bool MDClass::IsClassMember(unsigned int curIdx) const { + return childObjNames.count(curIdx); +} + +void MDClass::BuildFormalTypes(unsigned int memberIdx, bool isVec) { + formalTypes.emplace_back(std::make_pair(memberIdx, isVec)); +} + +bool MDClass::IsValidStructEle(RecordType curTy) const { + return (curTy == kTypeName || curTy == kClassName || curTy == kIntType || curTy == kStringType); +} + +unsigned int MDClassRange::CreateStrInTable(const std::string &inStr, RecordType curTy) { + unsigned int result = kInValidStrIdx; + StrInfo curInfo (totalStr, curTy); + auto ret = stringHashTable.insert(std::make_pair(inStr, curInfo)); + if (ret.second) { + unsigned int temp = totalStr; + stringTable.emplace_back(inStr); + ++totalStr; + return temp; + } + return result; +} + +StrInfo MDClassRange::GetStrInTable(const std::string &inStr) { + auto ret = stringHashTable.find(inStr); + StrInfo inValidInfo (UINT_MAX, kUndefinedStr); + return (ret != stringHashTable.end()) ? ret->second : inValidInfo; +} + +RecordType MDClassRange::GetStrTyByIdx(size_t curIdx) { + CHECK_FATAL(curIdx < stringTable.size(), "Array boundary check failed"); + return GetStrInTable(stringTable[curIdx]).sType; +} + +const std::string &MDClassRange::GetStrByIdx(size_t curIdx) { + CHECK_FATAL(curIdx < stringTable.size(), "Array boundary check failed"); + return stringTable[curIdx]; +} + +void MDClassRange::ModifyStrTyInTable(const std::string &inStr, RecordType newTy) { + auto ret = stringHashTable.find(inStr); + CHECK_FATAL(ret != stringHashTable.end(), "find string failed!"); + ret->second.sType = newTy; +} + +void MDClassRange::AddDefinedType(unsigned int typesName, std::set typesSet) { + (void)definedTypes.insert(std::make_pair(typesName, typesSet)); +} + +void MDClassRange::AddMDClass(MDClass curClass) { + (void)allClasses.insert(std::make_pair(curClass.GetClassIdx(), curClass)); +} + +void MDClassRange::FillMDClass(unsigned int givenIdx, const MDObject &insertObj) { + auto ret = allClasses.find(givenIdx); + CHECK_FATAL(ret != allClasses.end(), "Cannot achieve target MD Class"); + ret->second.AddClassMember(insertObj); +} + +MDClass MDClassRange::GetOneMDClass(unsigned int givenIdx) { + auto ret = allClasses.find(givenIdx); + CHECK_FATAL(ret != allClasses.end(), "Cannot achieve target MD Class"); + return ret->second; +} + +std::set MDClassRange::GetOneSpcType(unsigned int givenTyIdx) { + auto ret = definedTypes.find(givenTyIdx); + CHECK_FATAL(ret != definedTypes.end(), "Cannot achieve a defined type"); + return ret->second; +} +} /* namespace MDGen */ diff --git a/src/mapleall/maple_be/src/ad/mad.cpp b/src/mapleall/maple_be/src/ad/mad.cpp new file mode 100644 index 0000000000000000000000000000000000000000..367c25f67831a461f309867c63d49c921d38d7aa --- /dev/null +++ b/src/mapleall/maple_be/src/ad/mad.cpp @@ -0,0 +1,389 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mad.h" +#include +#if TARGAARCH64 +#include "aarch64_operand.h" +#elif TARGRISCV64 +#include "riscv64_operand.h" +#endif +#include "schedule.h" +#include "insn.h" + +namespace maplebe { +const std::string kUnitName[] = { +#include "mplad_unit_name.def" + "None", +}; +/* Unit */ +Unit::Unit(enum UnitId theUnitId) + : unitId(theUnitId), unitType(kUnitTypePrimart), occupancyTable(0), compositeUnits() { + MAD::AddUnit(*this); +} + +Unit::Unit(enum UnitType theUnitType, enum UnitId theUnitId, int numOfUnits, ...) + : unitId(theUnitId), unitType(theUnitType), occupancyTable(0) { + ASSERT(numOfUnits > 1, "CG internal error, composite unit with less than 2 unit elements."); + va_list ap; + va_start(ap, numOfUnits); + + for (int i = 0; i < numOfUnits; ++i) { + compositeUnits.emplace_back(static_cast(va_arg(ap, Unit*))); + } + va_end(ap); + + MAD::AddUnit(*this); +} + +/* return name of unit */ +std::string Unit::GetName() const { + ASSERT(GetUnitId() <= kUnitIdLast, "Unexpected UnitID"); + return kUnitName[GetUnitId()]; +} + +/* Check if unit is free at next "cycle" cycle. */ +bool Unit::IsFree(uint32 cycle) const { + if (GetUnitType() == kUnitTypeOr) { + for (auto unit : compositeUnits) { + if (unit->IsFree(cycle)) { + return true; + } + } + return false; + } else if (GetUnitType() == kUnitTypeAnd) { + for (auto unit : compositeUnits) { + if (!unit->IsFree(cycle)) { + return false; + } + } + return true; + } + if ((occupancyTable & (1u << cycle)) != 0) { + return false; + } + return true; +} + +/* Occupy unit at next "cycle" cycle. */ +void Unit::Occupy(const Insn &insn, uint32 cycle) { + if (GetUnitType() == kUnitTypeOr) { + for (auto unit : GetCompositeUnits()) { + if (unit->IsFree(cycle)) { + unit->Occupy(insn, cycle); + return; + } + } + + ASSERT(false, "CG internal error, should not be reach here."); + return; + } else if (GetUnitType() == kUnitTypeAnd) { + for (auto unit : GetCompositeUnits()) { + unit->Occupy(insn, cycle); + } + return; + } + occupancyTable |= (1u << cycle); +} + +/* Advance all units one cycle */ +void Unit::AdvanceCycle() { + if (GetUnitType() != kUnitTypePrimart) { + return; + } + occupancyTable = (occupancyTable >> 1); +} + +/* Release all units. */ +void Unit::Release() { + if (GetUnitType() != kUnitTypePrimart) { + return; + } + occupancyTable = 0; +} + +const std::vector &Unit::GetCompositeUnits() const { + return compositeUnits; +} + +void Unit::PrintIndent(int indent) const { + for (int i = 0; i < indent; ++i) { + LogInfo::MapleLogger() << " "; + } +} + +void Unit::Dump(int indent) const { + PrintIndent(indent); + LogInfo::MapleLogger() << "Unit " << GetName() << " (ID " << GetUnitId() << "): "; + LogInfo::MapleLogger() << "occupancyTable = " << occupancyTable << '\n'; +} + +uint32 Unit::GetOccupancyTable() const { + return occupancyTable; +} + +/* MAD */ +int MAD::parallelism; +std::vector MAD::allUnits; +std::vector MAD::allReservations; +std::array, kLtLast> MAD::bypassArrays; + +MAD::~MAD() { + for (auto unit : allUnits) { + delete unit; + } + for (auto rev : allReservations) { + delete rev; + } + for (auto &bypassArray : bypassArrays) { + for (auto &bypassVector : bypassArray) { + for (auto *bypass : bypassVector) { + delete bypass; + } + } + } + allUnits.clear(); + allReservations.clear(); +} + +void MAD::InitUnits() const { +#include "mplad_unit_define.def" +} + +void MAD::InitReservation() const { +#include "mplad_reservation_define.def" +} + +void MAD::InitParallelism() const { +#include "mplad_arch_define.def" +} + +/* according insn's insnType to get a reservation */ +Reservation *MAD::FindReservation(const Insn &insn) const { + uint32 insnType = insn.GetLatencyType(); + for (auto reservation : allReservations) { + if (reservation->IsEqual(insnType)) { + return reservation; + } + } + return nullptr; +} + +/* Get latency that is def insn to use insn */ +int MAD::GetLatency(const Insn &def, const Insn &use) const { + int latency = BypassLatency(def, use); + if (latency < 0) { + latency = DefaultLatency(def); + } + return latency; +} + +/* Get bypass latency that is def insn to use insn */ +int MAD::BypassLatency(const Insn &def, const Insn &use) const { + int latency = -1; + ASSERT(def.GetLatencyType() < kLtLast, "out of range"); + ASSERT(use.GetLatencyType() < kLtLast, "out of range"); + BypassVector &bypassVec = bypassArrays[def.GetLatencyType()][use.GetLatencyType()]; + for (auto bypass : bypassVec) { + if (bypass->CanBypass(def, use)) { + latency = bypass->GetLatency(); + break; + } + } + return latency; +} + +/* Get insn's default latency */ +int MAD::DefaultLatency(const Insn &insn) const { + Reservation *res = insn.GetDepNode()->GetReservation(); + return res != nullptr ? res->GetLatency() : 0; +} + +void MAD::AdvanceCycle() const { + for (auto unit : allUnits) { + unit->AdvanceCycle(); + } +} + +void MAD::ReleaseAllUnits() const { + for (auto unit : allUnits) { + unit->Release(); + } +} + +void MAD::SaveStates(std::vector &occupyTable, int size) const { + int i = 0; + for (auto unit : allUnits) { + CHECK_FATAL(i < size, "unit number error"); + occupyTable[i] = unit->GetOccupancyTable(); + ++i; + } +} + +#define ADDBYPASS(DEFLTTY, USELTTY, LT) AddBypass(*(new Bypass(DEFLTTY, USELTTY, LT))) +#define ADDALUSHIFTBYPASS(DEFLTTY, USELTTY, LT) AddBypass(*(new AluShiftBypass(DEFLTTY, USELTTY, LT))) +#define ADDACCUMULATORBYPASS(DEFLTTY, USELTTY, LT) AddBypass(*(new AccumulatorBypass(DEFLTTY, USELTTY, LT))) +#define ADDSTOREBYPASS(DEFLTTY, USELTTY, LT) AddBypass(*(new StoreBypass(DEFLTTY, USELTTY, LT))) + +void MAD::InitBypass() const { +#include "mplad_bypass_define.def" +} + +bool MAD::IsSlot0Free() const { + if (GetUnitByUnitId(kUnitIdSlot0)->IsFree(0)) { + return false; + } + return true; +} + +bool MAD::IsFullIssued() const { + if (GetUnitByUnitId(kUnitIdSlot0)->IsFree(0) || GetUnitByUnitId(kUnitIdSlot1)->IsFree(0)) { + return false; + } + return true; +} + +void MAD::RestoreStates(std::vector &occupyTable, int size) const { + int i = 0; + for (auto unit : allUnits) { + CHECK_FATAL(i < size, "unit number error"); + unit->SetOccupancyTable(occupyTable[i]); + ++i; + } +} + +bool Bypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { + (void)defInsn; + (void)useInsn; + return true; +} + +bool AluShiftBypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { + /* + * hook condition + * true: r1=r2+x1 -> r3=r2<<0x2+r1 + * false:r1=r2+x1 -> r3=r1<<0x2+r2 + */ + return &(defInsn.GetOperand(kInsnFirstOpnd)) != &(useInsn.GetOperand(kInsnSecondOpnd)); +} + +bool AccumulatorBypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { + /* + * hook condition + * true: r98=x0*x1 -> x0=x2*x3+r98 + * false:r98=x0*x1 -> x0=x2*r98+x3 + */ + return (&(defInsn.GetOperand(kInsnFirstOpnd)) != &(useInsn.GetOperand(kInsnSecondOpnd)) && + &(defInsn.GetOperand(kInsnFirstOpnd)) != &(useInsn.GetOperand(kInsnThirdOpnd))); +} + +bool StoreBypass::CanBypass(const Insn &defInsn, const Insn &useInsn) const { + /* + * hook condition + * true: r96=r92+x2 -> str r96, [r92] + * false:r96=r92+x2 -> str r92, [r96] + * false:r96=r92+x2 -> str r92, [r94, r96] + */ +#if TARGAARCH64 + switch (useInsn.GetMachineOpcode()) { + case MOP_wstrb: + case MOP_wstrh: + case MOP_wstr: + case MOP_xstr: + case MOP_sstr: + case MOP_dstr: { + auto &useMemOpnd = static_cast(useInsn.GetOperand(kInsnSecondOpnd)); + return (&(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetOffset() && + &(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetBaseRegister()); + } + case MOP_wstp: + case MOP_xstp: { + auto &useMemOpnd = static_cast(useInsn.GetOperand(kInsnThirdOpnd)); + return (&(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetOffset() && + &(defInsn.GetOperand(kInsnFirstOpnd)) != useMemOpnd.GetBaseRegister()); + } + + default: + return false; + } +#endif + return false; +} + +/* Reservation */ +Reservation::Reservation(LatencyType t, int l, int n, ...) : type(t), latency(l), unitNum(n) { + ASSERT(l >= 0, "CG internal error, latency and unitNum should not be less than 0."); + ASSERT(n >= 0, "CG internal error, latency and unitNum should not be less than 0."); + + errno_t ret = memset_s(units, sizeof(Unit*) * kMaxUnit, 0, sizeof(Unit*) * kMaxUnit); + CHECK_FATAL(ret == EOK, "call memset_s failed in Reservation"); + + va_list ap; + va_start(ap, n); + for (uint32 i = 0; i < unitNum; ++i) { + units[i] = static_cast(va_arg(ap, Unit*)); + } + va_end(ap); + + MAD::AddReservation(*this); + /* init slot */ + if (n > 0) { + /* if there are units, init slot by units[0] */ + slot = GetSlotType(units[0]->GetUnitId()); + } else { + slot = kSlotNone; + } +} + +const std::string kSlotName[] = { + "SlotNone", + "Slot0", + "Slot1", + "SlotAny", + "Slots", +}; + +const std::string &Reservation::GetSlotName() const { + ASSERT(GetSlot() <= kSlots, "Unexpected slot"); + return kUnitName[GetSlot()]; +} + +/* Get slot type by unit id */ +SlotType Reservation::GetSlotType(UnitId unitID) const { + switch (unitID) { + case kUnitIdSlot0: + case kUnitIdSlot0LdAgu: + case kUnitIdSlot0StAgu: + return kSlot0; + + case kUnitIdSlot1: + return kSlot1; + + case kUnitIdSlotS: + case kUnitIdSlotSHazard: + case kUnitIdSlotSMul: + case kUnitIdSlotSBranch: + case kUnitIdSlotSAgen: + return kSlotAny; + + case kUnitIdSlotD: + case kUnitIdSlotDAgen: + return kSlots; + + default: + ASSERT(false, "unknown slot type!"); + return kSlotNone; + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/be/bbt.cpp b/src/mapleall/maple_be/src/be/bbt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bc00a8b515ecc5d0059a4de275bb7ce1eefdb897 --- /dev/null +++ b/src/mapleall/maple_be/src/be/bbt.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bbt.h" +namespace maplebe { +#if DEBUG +void BBT::Dump(const MIRModule &mod) const { + if (IsTry()) { + LogInfo::MapleLogger() << "Try" << '\n'; + } else if (IsEndTry()) { + LogInfo::MapleLogger() << "EndTry" << '\n'; + } else if (IsCatch()) { + LogInfo::MapleLogger() << "Catch" << '\n'; + } else { + LogInfo::MapleLogger() << "Plain" << '\n'; + } + if (firstStmt != nullptr) { + firstStmt->Dump(0); + LogInfo::MapleLogger() << '\n'; + if (keyStmt != nullptr) { + keyStmt->Dump(0); + LogInfo::MapleLogger() << '\n'; + } else { + LogInfo::MapleLogger() << "<>" << '\n'; + } + if (lastStmt != nullptr) { + lastStmt->Dump(0); + } + LogInfo::MapleLogger() << '\n'; + } else { + LogInfo::MapleLogger() << "<>" << '\n'; + } +} + +void BBT::ValidateStmtList(StmtNode *head, StmtNode *detached) { + static int nStmts = 0; + int n = 0; + int m = 0; + if (head == nullptr && detached == nullptr) { + nStmts = 0; + return; + } + for (StmtNode *s = head; s != nullptr; s = s->GetNext()) { + if (s->GetNext() != nullptr) { + CHECK_FATAL(s->GetNext()->GetPrev() == s, "make sure the prev node of s' next is s"); + } + if (s->GetPrev() != nullptr) { + CHECK_FATAL(s->GetPrev()->GetNext() == s, "make sure the next node of s' prev is s"); + } + ++n; + } + for (StmtNode *s = detached; s != nullptr; s = s->GetNext()) { + if (s->GetNext() != nullptr) { + CHECK_FATAL(s->GetNext()->GetPrev() == s, "make sure the prev node of s' next is s"); + } + if (s->GetPrev() != nullptr) { + CHECK_FATAL(s->GetPrev()->GetNext() == s, "make sure the next node of s' prev is s"); + } + ++m; + } + CHECK_FATAL(nStmts <= n + m, "make sure nStmts <= n + m"); + nStmts = n + m; +} +#endif +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/be/becommon.cpp b/src/mapleall/maple_be/src/be/becommon.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2d593f5aa3aec12fc62124955112d1bf1b8681e3 --- /dev/null +++ b/src/mapleall/maple_be/src/be/becommon.cpp @@ -0,0 +1,846 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include "rt.h" +#include "cg_option.h" +#include "mir_builder.h" +#include "mpl_logging.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +BECommon::BECommon(MIRModule &mod) + : mirModule(mod), + typeSizeTable(GlobalTables::GetTypeTable().GetTypeTable().size(), 0, mirModule.GetMPAllocator().Adapter()), + typeAlignTable(GlobalTables::GetTypeTable().GetTypeTable().size(), static_cast(mirModule.IsCModule()), + mirModule.GetMPAllocator().Adapter()), + typeHasFlexibleArray(GlobalTables::GetTypeTable().GetTypeTable().size(), 0, mirModule.GetMPAllocator().Adapter()), + structFieldCountTable(GlobalTables::GetTypeTable().GetTypeTable().size(), + 0, mirModule.GetMPAllocator().Adapter()), + jClassLayoutTable(mirModule.GetMPAllocator().Adapter()), + funcReturnType(mirModule.GetMPAllocator().Adapter()) { + for (uint32 i = 1; i < GlobalTables::GetTypeTable().GetTypeTable().size(); ++i) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeTable()[i]; + ComputeTypeSizesAligns(*ty); + LowerTypeAttribute(*ty); + } + + if (mirModule.IsJavaModule()) { + for (uint32 i = 0; i < GlobalTables::GetGsymTable().GetSymbolTableSize(); ++i) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbol(i); + if (sym == nullptr) { + continue; + } + LowerJavaVolatileForSymbol(*sym); + } + } +} + +/* + * try to find an available padding slot, and allocate the given field in it. + * return the offset of the allocated memory. 0 if not available + * Note: this will update lists in paddingSlots + * Note: padding slots is a list of un-occupied (small size) slots + * available to allocate new fields. so far, just for 1, 2, 4 bytes + * types (map to array index 0, 1, 2) + */ +static uint32 TryAllocInPaddingSlots(std::list paddingSlots[], + uint32 fieldSize, + uint32 fieldAlign, + size_t paddingSlotsLength) { + CHECK_FATAL(paddingSlotsLength > 0, "expect paddingSlotsLength > 0"); + if (fieldSize > 4) { + return 0; /* padding slots are for size 1/2/4 bytes */ + } + + uint32 fieldOffset = 0; + /* here is a greedy search */ + for (size_t freeSlot = static_cast(fieldSize >> 1); freeSlot < paddingSlotsLength; ++freeSlot) { + if (!paddingSlots[freeSlot].empty()) { + uint32 paddingOffset = paddingSlots[freeSlot].front(); + if (IsAlignedTo(paddingOffset, fieldAlign)) { + /* reuse one padding slot */ + paddingSlots[freeSlot].pop_front(); + fieldOffset = paddingOffset; + /* check whether there're still space left in this slot */ + uint32 leftSize = (1u << freeSlot) - fieldSize; + if (leftSize != 0) { + uint32 leftOffset = paddingOffset + fieldSize; + if ((leftSize & 0x1) > 0) { /* check whether the last bit is 1 */ + paddingSlots[0].push_front(leftOffset); + leftOffset += 1; + } + if ((leftSize & 0x2) > 0) { /* check whether the penultimate bit is 1 */ + paddingSlots[1].push_front(leftOffset); + } + } + break; + } + } + } + return fieldOffset; +} + +static void AddPaddingSlot(std::list paddingSlots[], uint32 offset, uint32 size, size_t paddingSlotsLength) { + CHECK_FATAL(paddingSlotsLength > 0, "expect paddingSlotsLength > 0"); + /* + * decompose the padding into 1/2/4 bytes slots. + * to satisfy alignment constraints. + */ + for (size_t i = 0; i < paddingSlotsLength; ++i) { + if ((size & (1u << i)) > 0) { + paddingSlots[i].push_front(offset); + offset += (1u << i); + } + } +} + +void BECommon::AddNewTypeAfterBecommon(uint32 oldTypeTableSize, uint32 newTypeTableSize) { + for (auto i = oldTypeTableSize; i < newTypeTableSize; ++i) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(i); + CHECK_NULL_FATAL(ty); + typeSizeTable.emplace_back(0); + typeAlignTable.emplace_back(static_cast(mirModule.IsCModule())); + typeHasFlexibleArray.emplace_back(0); + structFieldCountTable.emplace_back(0); + ComputeTypeSizesAligns(*ty); + LowerTypeAttribute(*ty); + } +} + +void BECommon::ComputeStructTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx) { + auto &structType = static_cast(ty); + const FieldVector &fields = structType.GetFields(); + uint64 allocedSize = 0; + uint64 allocedSizeInBits = 0; + SetStructFieldCount(structType.GetTypeIndex(), fields.size()); + if (fields.size() == 0) { + if (structType.IsCPlusPlus()) { + SetTypeSize(tyIdx.GetIdx(), 1); /* empty struct in C++ has size 1 */ + SetTypeAlign(tyIdx.GetIdx(), 1); + } else { + SetTypeSize(tyIdx.GetIdx(), 0); + SetTypeAlign(tyIdx.GetIdx(), 1); + } + return; + } + auto structAttr = structType.GetTypeAttrs(); + auto structPack = static_cast(structAttr.GetPack()); + for (uint32 j = 0; j < fields.size(); ++j) { + TyIdx fieldTyIdx = fields[j].second.first; + auto fieldAttr = fields[j].second.second; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + uint32 fieldTypeSize = GetTypeSize(fieldTyIdx); + if (fieldTypeSize == 0) { + ComputeTypeSizesAligns(*fieldType); + fieldTypeSize = GetTypeSize(fieldTyIdx); + } + uint64 fieldSizeBits = fieldTypeSize * kBitsPerByte; + auto attrAlign = static_cast(fieldAttr.GetAlign()); + auto originAlign = std::max(attrAlign, GetTypeAlign(fieldTyIdx)); + uint8 fieldAlign = fieldAttr.IsPacked() ? static_cast(1U) : std::min(originAlign, structPack); + uint64 fieldAlignBits = fieldAlign * kBitsPerByte; + CHECK_FATAL(fieldAlign != 0, "expect fieldAlign not equal 0"); + MIRStructType *subStructType = fieldType->EmbeddedStructType(); + if (subStructType != nullptr) { + AppendStructFieldCount(structType.GetTypeIndex(), GetStructFieldCount(subStructType->GetTypeIndex())); + } + if (structType.GetKind() != kTypeUnion) { + if (fieldType->GetKind() == kTypeBitField) { + uint32 fieldSize = static_cast(fieldType)->GetFieldSize(); + /* is this field is crossing the align boundary of its base type? */ + if ((!structAttr.IsPacked() && + ((allocedSizeInBits / fieldSizeBits) != ((allocedSizeInBits + fieldSize - 1u) / fieldSizeBits))) || + fieldSize == 0) { + allocedSizeInBits = RoundUp(allocedSizeInBits, fieldSizeBits); + } + /* allocate the bitfield */ + allocedSizeInBits += fieldSize; + allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); + } else { + bool leftoverbits = false; + + if (allocedSizeInBits == allocedSize * kBitsPerByte) { + allocedSize = RoundUp(allocedSize, fieldAlign); + } else { + /* still some leftover bits on allocated words, we calculate things based on bits then. */ + if (allocedSizeInBits / fieldAlignBits != (allocedSizeInBits + fieldSizeBits - 1) / fieldAlignBits) { + /* the field is crossing the align boundary of its base type */ + allocedSizeInBits = RoundUp(allocedSizeInBits, fieldAlignBits); + } + leftoverbits = true; + } + if (leftoverbits) { + allocedSizeInBits += fieldSizeBits; + allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); + } else { + /* pad alloced_size according to the field alignment */ + allocedSize = RoundUp(allocedSize, fieldAlign); + allocedSize += fieldTypeSize; + allocedSizeInBits = allocedSize * kBitsPerByte; + } + } + } else { /* for unions, bitfields are treated as non-bitfields */ + allocedSize = std::max(allocedSize, static_cast(fieldTypeSize)); + } + SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), fieldAlign)); + /* C99 + * Last struct element of a struct with more than one member + * is a flexible array if it is an array of size 0. + */ + if ((j != 0) && ((j + 1) == fields.size()) && + (fieldType->GetKind() == kTypeArray) && + (GetTypeSize(fieldTyIdx.GetIdx()) == 0)) { + SetHasFlexibleArray(tyIdx.GetIdx(), true); + } + } + SetTypeSize(tyIdx, RoundUp(allocedSize, GetTypeAlign(tyIdx.GetIdx()))); +} + +void BECommon::ComputeClassTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx, uint8 align) { + uint64 allocedSize = 0; + const FieldVector &fields = static_cast(ty).GetFields(); + + auto &classType = static_cast(ty); + TyIdx prntTyIdx = classType.GetParentTyIdx(); + /* process parent class */ + if (prntTyIdx != 0u) { + MIRClassType *parentType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(prntTyIdx)); + uint32 prntSize = GetTypeSize(prntTyIdx); + if (prntSize == 0) { + ComputeTypeSizesAligns(*parentType); + prntSize = GetTypeSize(prntTyIdx); + } + uint8 prntAlign = GetTypeAlign(prntTyIdx); + AppendStructFieldCount(tyIdx, GetStructFieldCount(prntTyIdx) + 1); + /* pad alloced_size according to the field alignment */ + allocedSize = RoundUp(allocedSize, prntAlign); + + JClassLayout *layout = mirModule.GetMemPool()->New(mirModule.GetMPAllocator().Adapter()); + /* add parent's record to the front */ + layout->emplace_back(JClassFieldInfo(false, false, false, allocedSize)); + /* copy parent's layout plan into my plan */ + if (HasJClassLayout(*parentType)) { /* parent may have incomplete type definition. */ + const JClassLayout &parentLayout = GetJClassLayout(*parentType); + (void)layout->insert(layout->cend(), parentLayout.cbegin(), parentLayout.cend()); + allocedSize += prntSize; + SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), prntAlign)); + } else { + LogInfo::MapleLogger() << "Warning:try to layout class with incomplete type:" << parentType->GetName() << "\n"; + } + jClassLayoutTable[&classType] = layout; + } else { + /* This is the root class, say, The Object */ + jClassLayoutTable[&classType] = mirModule.GetMemPool()->New(mirModule.GetMPAllocator().Adapter()); + } + + /* + * a list of un-occupied (small size) slots available for insertion + * so far, just for 1, 2, 4 bytes types (map to array index 0, 1, 2) + */ + std::list paddingSlots[3]; + /* process fields */ + AppendStructFieldCount(tyIdx, fields.size()); + if (fields.size() == 0 && mirModule.IsCModule()) { + SetTypeAlign(tyIdx.GetIdx(), 1); + SetTypeSize(tyIdx.GetIdx(), 1); + return; + } + for (uint32 j = 0; j < fields.size(); ++j) { + TyIdx fieldTyIdx = fields[j].second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + FieldAttrs fieldAttr = fields[j].second.second; + uint32 fieldSize = GetTypeSize(fieldTyIdx); + if (fieldSize == 0) { + ComputeTypeSizesAligns(*fieldType); + fieldSize = GetTypeSize(fieldTyIdx); + } + uint8 fieldAlign = GetTypeAlign(fieldTyIdx); + + if ((fieldType->GetKind() == kTypePointer) && (fieldType->GetPrimType() == PTY_a64)) { + /* handle class reference field */ + fieldSize = static_cast(RTSupport::GetRTSupportInstance().GetFieldSize()); + fieldAlign = static_cast(RTSupport::GetRTSupportInstance().GetFieldAlign()); + } + + /* try to alloc the field in one of previously created padding slots */ + uint32 currentFieldOffset = TryAllocInPaddingSlots(paddingSlots, fieldSize, fieldAlign, + sizeof(paddingSlots) / sizeof(paddingSlots[0])); + /* cannot reuse one padding slot. layout to current end */ + if (currentFieldOffset == 0) { + /* pad alloced_size according to the field alignment */ + currentFieldOffset = RoundUp(allocedSize, fieldAlign); + if (currentFieldOffset != allocedSize) { + /* rounded up, create one padding-slot */ + uint32 paddingSize = currentFieldOffset - allocedSize; + AddPaddingSlot(paddingSlots, allocedSize, paddingSize, + sizeof(paddingSlots) / sizeof(paddingSlots[0])); + allocedSize = currentFieldOffset; + } + /* need new memory for this field */ + allocedSize += fieldSize; + } + AddElementToJClassLayout(classType, JClassFieldInfo(fieldType->GetKind() == kTypePointer, + fieldAttr.GetAttr(FLDATTR_rcunowned), + fieldAttr.GetAttr(FLDATTR_rcweak), + currentFieldOffset)); + SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), fieldAlign)); + } + SetTypeSize(tyIdx, RoundUp(allocedSize, align)); +} + +void BECommon::ComputeArrayTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx) { + MIRArrayType &arrayType = static_cast(ty); + MIRType *elemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType.GetElemTyIdx()); + uint32 elemSize = GetTypeSize(elemType->GetTypeIndex()); + if (elemSize == 0) { + ComputeTypeSizesAligns(*elemType); + elemSize = GetTypeSize(elemType->GetTypeIndex()); + } + if (!mirModule.IsCModule()) { + CHECK_FATAL(elemSize != 0, "elemSize should not equal 0"); + CHECK_FATAL(elemType->GetTypeIndex() != 0u, "elemType's idx should not equal 0"); + } + uint32 arrayAlign = arrayType.GetTypeAttrs().GetAlign(); + elemSize = std::max(elemSize, static_cast(GetTypeAlign(elemType->GetTypeIndex()))); + elemSize = std::max(elemSize, arrayAlign); + /* compute total number of elements from the multipel dimensions */ + uint64 numElems = 1; + for (int d = 0; d < arrayType.GetDim(); ++d) { + numElems *= arrayType.GetSizeArrayItem(d); + } + auto typeSize = elemSize * numElems; + SetTypeSize(tyIdx, typeSize); + if (typeSize == 0) { + SetTypeAlign(tyIdx, static_cast(arrayAlign)); + } else { + auto maxAlign = std::max(static_cast(GetTypeAlign(elemType->GetTypeIndex())), arrayAlign); + SetTypeAlign(tyIdx, static_cast(maxAlign)); + } +} + +void BECommon::ComputeFArrayOrJArrayTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx) { + MIRFarrayType &arrayType = static_cast(ty); + MIRType *elemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType.GetElemTyIdx()); + uint32 elemSize = GetTypeSize(elemType->GetTypeIndex()); + if (elemSize == 0) { + ComputeTypeSizesAligns(*elemType); + elemSize = GetTypeSize(elemType->GetTypeIndex()); + } + CHECK_FATAL(elemSize != 0, "elemSize should not equal 0"); + CHECK_FATAL(GetTypeAlign(elemType->GetTypeIndex()) != 0u, "GetTypeAlign return 0 is not expected"); + elemSize = std::max(elemSize, static_cast(GetTypeAlign(elemType->GetTypeIndex()))); + SetTypeSize(tyIdx, 0); + SetTypeAlign(tyIdx, GetTypeAlign(elemType->GetTypeIndex())); +} + +/* Note: also do java class layout */ +void BECommon::ComputeTypeSizesAligns(MIRType &ty, uint8 align) { + TyIdx tyIdx = ty.GetTypeIndex(); + if ((structFieldCountTable.size() > tyIdx) && (GetStructFieldCount(tyIdx) != 0)) { + return; /* processed before */ + } + + if ((ty.GetPrimType() == PTY_ptr) || (ty.GetPrimType() == PTY_ref)) { + ty.SetPrimType(GetLoweredPtrType()); + } + + switch (ty.GetKind()) { + case kTypeScalar: + case kTypePointer: + case kTypeBitField: + case kTypeFunction: + SetTypeSize(tyIdx, GetPrimTypeSize(ty.GetPrimType())); + SetTypeAlign(tyIdx, GetTypeSize(tyIdx)); + break; + case kTypeArray: { + ComputeArrayTypeSizesAligns(ty, tyIdx); + break; + } + case kTypeFArray: + case kTypeJArray: { + ComputeFArrayOrJArrayTypeSizesAligns(ty, tyIdx); + break; + } + case kTypeUnion: + case kTypeStruct: { + ComputeStructTypeSizesAligns(ty, tyIdx); + break; + } + case kTypeInterface: { /* interface shouldn't have instance fields */ + SetTypeAlign(tyIdx, 0); + SetTypeSize(tyIdx, 0); + SetStructFieldCount(tyIdx, 0); + break; + } + case kTypeClass: { /* cannot have union or bitfields */ + ComputeClassTypeSizesAligns(ty, tyIdx, align); + break; + } + case kTypeByName: + case kTypeVoid: + default: + SetTypeSize(tyIdx, 0); + break; + } + /* there may be passed-in align attribute declared with the symbol */ + SetTypeAlign(tyIdx, std::max(GetTypeAlign(tyIdx), align)); +} + +void BECommon::LowerTypeAttribute(MIRType &ty) { + if (mirModule.IsJavaModule()) { + LowerJavaTypeAttribute(ty); + } +} + +void BECommon::LowerJavaTypeAttribute(MIRType &ty) { + /* we process volatile only for now */ + switch (ty.GetKind()) { + case kTypeClass: /* cannot have union or bitfields */ + LowerJavaVolatileInClassType(static_cast(ty)); + break; + + default: + break; + } +} + +void BECommon::LowerJavaVolatileInClassType(MIRClassType &ty) { + for (auto &field : ty.GetFields()) { + if (field.second.second.GetAttr(FLDATTR_volatile)) { + field.second.second.SetAttr(FLDATTR_memory_order_acquire); + field.second.second.SetAttr(FLDATTR_memory_order_release); + } else { + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(field.second.first); + if (fieldType->GetKind() == kTypeClass) { + LowerJavaVolatileInClassType(static_cast(*fieldType)); + } + } + } +} + +bool BECommon::IsRefField(MIRStructType &structType, FieldID fieldID) const { + if (structType.GetKind() == kTypeClass) { + CHECK_FATAL(HasJClassLayout(static_cast(structType)), "Cannot found java class layout information"); + const JClassLayout &layout = GetJClassLayout(static_cast(structType)); + if (layout.empty()) { + ERR(kLncErr, "layout is null in BECommon::IsRefField"); + return false; + } + return layout[fieldID - 1].IsRef(); + } + return false; +} + +void BECommon::LowerJavaVolatileForSymbol(MIRSymbol &sym) const { + /* type attr is associated with symbol */ + if (sym.GetAttr(ATTR_volatile)) { + sym.SetAttr(ATTR_memory_order_acquire); + sym.SetAttr(ATTR_memory_order_release); + } +} + +void BECommon::GenFieldOffsetMap(const std::string &className) { + MIRType *type = GlobalTables::GetTypeTable().GetOrCreateClassType(className, mirModule); + CHECK_FATAL(type != nullptr, "unknown class, type should not be nullptr"); + MIRClassType *classType = static_cast(type); + for (FieldID i = 1; i <= GetStructFieldCount(classType->GetTypeIndex()); ++i) { + FieldID fieldID = i; + FieldPair fp = classType->TraverseToFieldRef(fieldID); + GStrIdx strIdx = fp.first; + if (strIdx == 0u) { + continue; + } + + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + + TyIdx fieldTyIdx = fp.second.first; + uint64 fieldSize = GetTypeSize(fieldTyIdx); + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + + if ((fieldType->GetKind() == kTypePointer) && (fieldType->GetPrimType() == PTY_a64)) { + /* handle class reference field */ + fieldSize = RTSupport::GetRTSupportInstance().GetFieldSize(); + } + + std::pair p = GetFieldOffset(*classType, i); + CHECK_FATAL(p.second == 0, "expect p.second equals 0"); + LogInfo::MapleLogger() << "CLASS_FIELD_OFFSET_MAP(" << className.c_str() << "," << fieldName.c_str() << "," + << p.first << "," << fieldSize << ")\n"; + } +} + +void BECommon::GenFieldOffsetMap(MIRClassType &classType, FILE &outFile) { + const std::string &className = classType.GetName(); + + /* + * We only enumerate fields defined in the current class. There are cases + * where a parent classes may define private fields that have the same name as + * a field in the current class.This table is generated for the convenience of + * C programmers. If the C programmer wants to access parent class fields, + * the programmer should access them as `Parent.field`. + */ + FieldID myEnd = structFieldCountTable.at(classType.GetTypeIndex()); + FieldID myBegin = (myEnd - static_cast(classType.GetFieldsSize())) + 1; + + for (FieldID i = myBegin; i <= myEnd; ++i) { + FieldID fieldID = i; + FieldPair fp = classType.TraverseToFieldRef(fieldID); + GStrIdx strIdx = fp.first; + if (strIdx == 0u) { + continue; + } + FieldAttrs attrs = fp.second.second; + if (attrs.GetAttr(FLDATTR_static)) { + continue; + } + + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + + TyIdx fieldTyIdx = fp.second.first; + uint64 fieldSize = GetTypeSize(fieldTyIdx); + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + + if ((fieldType->GetKind() == kTypePointer) && (fieldType->GetPrimType() == PTY_a64)) { + /* handle class reference field */ + fieldSize = RTSupport::GetRTSupportInstance().GetFieldSize(); + } + + std::pair p = GetFieldOffset(classType, i); + CHECK_FATAL(p.second == 0, "expect p.second equals 0"); + (void)fprintf(&outFile, "__MRT_CLASS_FIELD(%s, %s, %d, %lu)\n", className.c_str(), fieldName.c_str(), + p.first, fieldSize); + } +} + +void BECommon::GenObjSize(const MIRClassType &classType, FILE &outFile) const { + const std::string &className = classType.GetName(); + uint64_t objSize = GetTypeSize(classType.GetTypeIndex()); + if (objSize == 0) { + return; + } + + TyIdx parentTypeIdx = classType.GetParentTyIdx(); + MIRType *parentType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(parentTypeIdx); + const char *parentName = nullptr; + if (parentType != nullptr) { + MIRClassType *parentClass = static_cast(parentType); + parentName = parentClass->GetName().c_str(); + } else { + parentName = "THIS_IS_ROOT"; + } + fprintf(&outFile, "__MRT_CLASS(%s, %" PRIu64 ", %s)\n", className.c_str(), objSize, parentName); +} + +/* + * compute the offset of the field given by fieldID within the structure type + * structy; it returns the answer in the pair (byteoffset, bitoffset) such that + * if it is a bitfield, byteoffset gives the offset of the container for + * extracting the bitfield and bitoffset is with respect to the container + */ +std::pair BECommon::GetFieldOffset(MIRStructType &structType, FieldID fieldID) { + CHECK_FATAL(fieldID <= GetStructFieldCount(structType.GetTypeIndex()), "GetFieldOFfset: fieldID too large"); + uint64 allocedSize = 0; + uint64 allocedSizeInBits = 0; + FieldID curFieldID = 1; + if (fieldID == 0) { + return std::pair(0, 0); + } + + if (structType.GetKind() == kTypeClass) { + CHECK_FATAL(HasJClassLayout(static_cast(structType)), "Cannot found java class layout information"); + const JClassLayout &layout = GetJClassLayout(static_cast(structType)); + CHECK_FATAL(static_cast(fieldID) - 1 < layout.size(), "subscript out of range"); + return std::pair(static_cast(layout[fieldID - 1].GetOffset()), 0); + } + + /* process the struct fields */ + FieldVector fields = structType.GetFields(); + auto structPack = static_cast(structType.GetTypeAttrs().GetPack()); + for (uint32 j = 0; j < fields.size(); ++j) { + TyIdx fieldTyIdx = fields[j].second.first; + auto fieldAttr = fields[j].second.second; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + uint32 fieldTypeSize = GetTypeSize(fieldTyIdx); + uint64 fieldSizeBits = fieldTypeSize * kBitsPerByte; + auto originAlign = GetTypeAlign(fieldTyIdx); + uint64 fieldAlign = static_cast( + static_cast(fieldAttr.IsPacked() ? 1 : std::min(originAlign, structPack))); + uint64 fieldAlignBits = static_cast(static_cast(fieldAlign * kBitsPerByte)); + CHECK_FATAL(fieldAlign != 0, "fieldAlign should not equal 0"); + if (structType.GetKind() != kTypeUnion) { + if (fieldType->GetKind() == kTypeBitField) { + uint32 fieldSize = static_cast(fieldType)->GetFieldSize(); + /* + * Is this field is crossing the align boundary of its base type? Or, + * is field a zero-with bit field? + * Refer to C99 standard (§6.7.2.1) : + * > As a special case, a bit-field structure member with a width of 0 indicates that no further + * > bit-field is to be packed into the unit in which the previous bit-field, if any, was placed. + * + * We know that A zero-width bit field can cause the next field to be aligned on the next container + * boundary where the container is the same size as the underlying type of the bit field. + */ + if ((!structType.GetTypeAttrs().IsPacked() && + ((allocedSizeInBits / fieldSizeBits) != ((allocedSizeInBits + fieldSize - 1u) / fieldSizeBits))) || + fieldSize == 0) { + /* + * the field is crossing the align boundary of its base type; + * align alloced_size_in_bits to fieldAlign + */ + allocedSizeInBits = RoundUp(allocedSizeInBits, fieldSizeBits); + } + /* allocate the bitfield */ + if (curFieldID == fieldID) { + return std::pair( + static_cast(static_cast((allocedSizeInBits / fieldAlignBits) * fieldAlign)), + static_cast(static_cast(allocedSizeInBits % fieldAlignBits))); + } else { + ++curFieldID; + } + allocedSizeInBits += fieldSize; + allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); + } else { + bool leftOverBits = false; + uint64 offset = 0; + + if (allocedSizeInBits == allocedSize * k8BitSize) { + allocedSize = RoundUp(allocedSize, fieldAlign); + offset = allocedSize; + } else { + /* still some leftover bits on allocated words, we calculate things based on bits then. */ + if (allocedSizeInBits / fieldAlignBits != (allocedSizeInBits + fieldSizeBits - k1BitSize) / fieldAlignBits) { + /* the field is crossing the align boundary of its base type */ + allocedSizeInBits = RoundUp(allocedSizeInBits, fieldAlignBits); + } + allocedSize = RoundUp(allocedSize, fieldAlign); + offset = (allocedSizeInBits / fieldAlignBits) * fieldAlign; + leftOverBits = true; + } + + if (curFieldID == fieldID) { + return std::pair(static_cast(static_cast(offset)), 0); + } else { + MIRStructType *subStructType = fieldType->EmbeddedStructType(); + if (subStructType == nullptr) { + ++curFieldID; + } else { + if ((curFieldID + GetStructFieldCount(subStructType->GetTypeIndex())) < fieldID) { + curFieldID += GetStructFieldCount(subStructType->GetTypeIndex()) + 1; + } else { + std::pair result = GetFieldOffset(*subStructType, fieldID - curFieldID); + return std::pair(result.first + allocedSize, result.second); + } + } + } + + if (leftOverBits) { + allocedSizeInBits += fieldSizeBits; + allocedSize = std::max(allocedSize, RoundUp(allocedSizeInBits, fieldAlignBits) / kBitsPerByte); + } else { + allocedSize += fieldTypeSize; + allocedSizeInBits = allocedSize * kBitsPerByte; + } + } + } else { /* for unions, bitfields are treated as non-bitfields */ + if (curFieldID == fieldID) { + return std::pair(0, 0); + } else { + MIRStructType *subStructType = fieldType->EmbeddedStructType(); + if (subStructType == nullptr) { + curFieldID++; + } else { + if ((curFieldID + GetStructFieldCount(subStructType->GetTypeIndex())) < fieldID) { + curFieldID += GetStructFieldCount(subStructType->GetTypeIndex()) + 1; + } else { + return GetFieldOffset(*subStructType, fieldID - curFieldID); + } + } + } + } + } + CHECK_FATAL(false, "GetFieldOffset() fails to find field"); + return std::pair(0, 0); +} + +bool BECommon::TyIsInSizeAlignTable(const MIRType &ty) const { + if (typeSizeTable.size() != typeAlignTable.size()) { + return false; + } + return ty.GetTypeIndex() < typeSizeTable.size(); +} + +void BECommon::AddAndComputeSizeAlign(MIRType &ty) { + FinalizeTypeTable(ty); + typeAlignTable.emplace_back(mirModule.IsCModule()); + typeSizeTable.emplace_back(0); + ComputeTypeSizesAligns(ty); +} + +void BECommon::AddElementToJClassLayout(MIRClassType &klass, JClassFieldInfo info) { + JClassLayout &layout = *(jClassLayoutTable.at(&klass)); + layout.emplace_back(info); +} + +void BECommon::AddElementToFuncReturnType(MIRFunction &func, const TyIdx tyIdx) { + funcReturnType[&func] = tyIdx; +} + +MIRType *BECommon::BeGetOrCreatePointerType(const MIRType &pointedType) { + MIRType *newType = GlobalTables::GetTypeTable().GetOrCreatePointerType(pointedType, GetLoweredPtrType()); + if (TyIsInSizeAlignTable(*newType)) { + return newType; + } + AddAndComputeSizeAlign(*newType); + return newType; +} + +MIRType *BECommon::BeGetOrCreateFunctionType(TyIdx tyIdx, const std::vector &vecTy, + const std::vector &vecAt) { + MIRType *newType = GlobalTables::GetTypeTable().GetOrCreateFunctionType(tyIdx, vecTy, vecAt); + if (TyIsInSizeAlignTable(*newType)) { + return newType; + } + AddAndComputeSizeAlign(*newType); + return newType; +} + +void BECommon::FinalizeTypeTable(const MIRType &ty) { + if (ty.GetTypeIndex() > GetSizeOfTypeSizeTable()) { + if (mirModule.GetSrcLang() == kSrcLangC) { + for (uint32 i = GetSizeOfTypeSizeTable(); i < ty.GetTypeIndex(); ++i) { + MIRType *tyTmp = GlobalTables::GetTypeTable().GetTypeFromTyIdx(i); + AddAndComputeSizeAlign(*tyTmp); + } + } else { + CHECK_FATAL(ty.GetTypeIndex() == typeSizeTable.size(), "make sure the ty idx is exactly the table size"); + } + } +} + +BaseNode *BECommon::GetAddressOfNode(const BaseNode &node) { + switch (node.GetOpCode()) { + case OP_dread: { + const DreadNode &dNode = static_cast(node); + const StIdx &index = dNode.GetStIdx(); + return mirModule.GetMIRBuilder()->CreateAddrof(*mirModule.CurFunction()->GetLocalOrGlobalSymbol(index)); + } + case OP_iread: { + const IreadNode &iNode = static_cast(node); + if (iNode.GetFieldID() == 0) { + return iNode.Opnd(0); + } + + uint32 index = static_cast(GlobalTables::GetTypeTable().GetTypeTable().at( + iNode.GetTyIdx()))->GetPointedTyIdx(); + MIRType *pointedType = GlobalTables::GetTypeTable().GetTypeTable().at(index); + std::pair byteBitOffset = + GetFieldOffset(static_cast(*pointedType), iNode.GetFieldID()); +#if TARGAARCH64 || TARGRISCV64 + ASSERT(GetAddressPrimType() == GetLoweredPtrType(), "incorrect address type, expect a GetLoweredPtrType()"); +#endif + return mirModule.GetMIRBuilder()->CreateExprBinary( + OP_add, *GlobalTables::GetTypeTable().GetPrimType(GetAddressPrimType()), + static_cast(iNode.Opnd(0)), + mirModule.GetMIRBuilder()->CreateIntConst(static_cast(static_cast(byteBitOffset.first)), + PTY_u32)); + } + default: + return nullptr; + } +} + +bool BECommon::CallIsOfAttr(FuncAttrKind attr, const StmtNode *narynode) const { + (void) attr; + (void) narynode; + return false; + + /* For now, all 64x1_t types object are not propagated to become pregs by mplme, so the following + is not needed for now. We need to revisit this later when types are enhanced with attributes */ +#if TO_BE_RESURRECTED + bool attrFunc = false; + if (narynode->GetOpCode() == OP_call) { + CallNode *callNode = static_cast(narynode); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + attrFunc = (mirModule.GetSrcLang() == kSrcLangC && func->GetAttr(attr)) ? true : false; + } else if (narynode->GetOpCode() == OP_icall) { + IcallNode *icallNode = static_cast(narynode); + BaseNode *fNode = icallNode->Opnd(0); + MIRFuncType *fType = nullptr; + MIRPtrType *pType = nullptr; + if (fNode->GetOpCode() == OP_dread) { + DreadNode *dNode = static_cast(fNode); + MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dNode->GetStIdx()); + pType = static_cast(symbol->GetType()); + MIRType *ty = pType; + if (dNode->GetFieldID() != 0) { + ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass, ""); + FieldPair thepair; + if (ty->GetKind() == kTypeStruct) { + thepair = static_cast(ty)->TraverseToField(dNode->GetFieldID()); + } else { + thepair = static_cast(ty)->TraverseToField(dNode->GetFieldID()); + } + pType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first)); + } + fType = static_cast(pType->GetPointedType()); + } else if (fNode->GetOpCode() == OP_iread) { + IreadNode *iNode = static_cast(fNode); + MIRPtrType *pointerty = static_cast(GlobalTables:: + GetTypeTable().GetTypeFromTyIdx(iNode->GetTyIdx())); + MIRType *pointedType = pointerty->GetPointedType(); + if (iNode->GetFieldID() != 0) { + pointedType = static_cast(pointedType)->GetFieldType(iNode->GetFieldID()); + } + if (pointedType->GetKind() == kTypeFunction) { + fType = static_cast(pointedType); + } else if (pointedType->GetKind() == kTypePointer) { + return false; /* assert? */ + } + } else if (fNode->GetOpCode() == OP_select) { + TernaryNode *sNode = static_cast(fNode); + BaseNode *expr = sNode->Opnd(1); + // both function ptrs under select should have the same signature, chk op1 only + AddroffuncNode *afNode = static_cast(expr); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(afNode->GetPUIdx()); + attrFunc = mirModule.GetSrcLang() == kSrcLangC && func->GetAttr(attr); + } else if (fNode->GetOpCode() == OP_regread) { + RegreadNode *rNode = static_cast(fNode); + PregIdx pregidx = rNode->GetRegIdx(); + MIRPreg *preg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(pregidx); + MIRType *type = preg->GetMIRType(); + if (type == nullptr) { + return false; + } + MIRPtrType *pType = static_cast(type); + type = pType->GetPointedType(); + if (type == nullptr) { + return false; + } + } else if (fNode->GetOpCode() == OP_retype) { + RetypeNode *rNode = static_cast(fNode); + pType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(rNode->GetTyIdx())); + fType = static_cast(pType->GetPointedType()); + } else { + return false; /* assert? */ + } + } + return attrFunc; +#endif +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/be/lower.cpp b/src/mapleall/maple_be/src/be/lower.cpp new file mode 100644 index 0000000000000000000000000000000000000000..83850176ef5d8399db35a8762dee00c422135020 --- /dev/null +++ b/src/mapleall/maple_be/src/be/lower.cpp @@ -0,0 +1,4164 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "lower.h" +#include +#include +#include +#include "mir_symbol.h" +#include "mir_function.h" +#include "cg_option.h" +#include "switch_lowerer.h" +#include "try_catch.h" +#include "intrinsic_op.h" +#include "mir_builder.h" +#include "opcode_info.h" +#include "rt.h" +#include "securec.h" +#include "string_utils.h" +#include "cast_opt.h" +#include "simplify.h" +#include "me_safety_warning.h" + +namespace maplebe { +namespace arrayNameForLower { +const std::set kArrayKlassName{ +#include "array_klass_name.def" +}; + +const std::set kArrayBaseName{ +#include "array_base_name.def" +}; +} + +using namespace maple; + +#define JAVALANG (mirModule.IsJavaModule()) +#define TARGARM32 0 + +enum ExtFuncT : uint8 { kFmodDouble, kFmodFloat }; + +struct ExtFuncDescrT { + ExtFuncT fid; + const char *name; + PrimType retType; + PrimType argTypes[kMaxModFuncArgSize]; +}; + +namespace { +std::pair cgBuiltins[] = { + { INTRN_JAVA_ARRAY_LENGTH, "MCC_DexArrayLength" }, + { INTRN_JAVA_ARRAY_FILL, "MCC_DexArrayFill" }, + { INTRN_JAVA_CHECK_CAST, "MCC_DexCheckCast" }, + { INTRN_JAVA_INSTANCE_OF, "MCC_DexInstanceOf" }, + { INTRN_JAVA_INTERFACE_CALL, "MCC_DexInterfaceCall" }, + { INTRN_JAVA_POLYMORPHIC_CALL, "MCC_DexPolymorphicCall" }, + { INTRN_MCC_DeferredFillNewArray, "MCC_DeferredFillNewArray" }, + { INTRN_MCC_DeferredInvoke, "MCC_DeferredInvoke" }, + { INTRN_JAVA_CONST_CLASS, "MCC_GetReferenceToClass" }, + { INTRN_JAVA_GET_CLASS, "MCC_GetClass" }, + { INTRN_MPL_SET_CLASS, "MCC_SetJavaClass" }, + { INTRN_MPL_MEMSET_LOCALVAR, "memset_s" }, +}; + +ExtFuncDescrT extFnDescrs[] = { + { kFmodDouble, "fmod", PTY_f64, { PTY_f64, PTY_f64, kPtyInvalid } }, + { kFmodFloat, "fmodf", PTY_f32, { PTY_f32, PTY_f32, kPtyInvalid } }, +}; + +std::vector> extFuncs; +const std::string kOpAssertge = "OP_assertge"; +const std::string kOpAssertlt = "OP_assertlt"; +const std::string kOpCallAssertle = "OP_callassertle"; +const std::string kOpReturnAssertle = "OP_returnassertle"; +const std::string kOpAssignAssertle = "OP_assignassertle"; +const std::string kFileSymbolNamePrefix = "symname"; +} + +const std::string CGLowerer::kIntrnRetValPrefix = "__iret"; +const std::string CGLowerer::kUserRetValPrefix = "__uret"; + +static bool CasePairKeyLessThan(const CasePair &left, const CasePair &right) { + return left.first < right.first; +} + +std::string CGLowerer::GetFileNameSymbolName(const std::string &fileName) const { + return kFileSymbolNamePrefix + std::regex_replace(fileName, std::regex("-"), "_"); +} + +MIRSymbol *CGLowerer::CreateNewRetVar(const MIRType &ty, const std::string &prefix) { + const uint32 bufSize = 257; + char buf[bufSize] = {'\0'}; + MIRFunction *func = GetCurrentFunc(); + MIRSymbol *var = func->GetSymTab()->CreateSymbol(kScopeLocal); + int eNum = sprintf_s(buf, bufSize - 1, "%s%" PRId64, prefix.c_str(), ++seed); + if (eNum == -1) { + FATAL(kLncFatal, "sprintf_s failed"); + } + std::string strBuf(buf); + var->SetNameStrIdx(mirModule.GetMIRBuilder()->GetOrCreateStringIndex(strBuf)); + var->SetTyIdx(ty.GetTypeIndex()); + var->SetStorageClass(kScAuto); + var->SetSKind(kStVar); + func->GetSymTab()->AddToStringSymbolMap(*var); + return var; +} + +void CGLowerer::RegisterExternalLibraryFunctions() { + for (uint32 i = 0; i < sizeof(extFnDescrs) / sizeof(extFnDescrs[0]); ++i) { + ExtFuncT id = extFnDescrs[i].fid; + CHECK_FATAL(id == i, "make sure id equal i"); + + MIRFunction *func = mirModule.GetMIRBuilder()->GetOrCreateFunction(extFnDescrs[i].name, + TyIdx(extFnDescrs[i].retType)); + beCommon.UpdateTypeTable(*func->GetMIRFuncType()); + func->AllocSymTab(); + MIRSymbol *funcSym = func->GetFuncSymbol(); + funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); + /* return type */ + MIRType *retTy = GlobalTables::GetTypeTable().GetPrimType(extFnDescrs[i].retType); + + /* use void* for PTY_dynany */ + if (retTy->GetPrimType() == PTY_dynany) { + retTy = GlobalTables::GetTypeTable().GetPtr(); + } + + std::vector formals; + for (uint32 j = 0; extFnDescrs[i].argTypes[j] != kPtyInvalid; ++j) { + PrimType primTy = extFnDescrs[i].argTypes[j]; + MIRType *argTy = GlobalTables::GetTypeTable().GetPrimType(primTy); + /* use void* for PTY_dynany */ + if (argTy->GetPrimType() == PTY_dynany) { + argTy = GlobalTables::GetTypeTable().GetPtr(); + } + MIRSymbol *argSt = func->GetSymTab()->CreateSymbol(kScopeLocal); + const uint32 bufSize = 18; + char buf[bufSize] = {'\0'}; + int eNum = sprintf_s(buf, bufSize - 1, "p%u", j); + if (eNum == -1) { + FATAL(kLncFatal, "sprintf_s failed"); + } + std::string strBuf(buf); + argSt->SetNameStrIdx(mirModule.GetMIRBuilder()->GetOrCreateStringIndex(strBuf)); + argSt->SetTyIdx(argTy->GetTypeIndex()); + argSt->SetStorageClass(kScFormal); + argSt->SetSKind(kStVar); + func->GetSymTab()->AddToStringSymbolMap(*argSt); + formals.emplace_back(argSt); + } + func->UpdateFuncTypeAndFormalsAndReturnType(formals, retTy->GetTypeIndex(), false); + auto *funcType = func->GetMIRFuncType(); + ASSERT(funcType != nullptr, "null ptr check"); + beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); + extFuncs.emplace_back(std::pair(id, func->GetPuidx())); + } +} + +BaseNode *CGLowerer::NodeConvert(PrimType mType, BaseNode &expr) { + PrimType srcType = expr.GetPrimType(); + if (GetPrimTypeSize(mType) == GetPrimTypeSize(srcType)) { + return &expr; + } + TypeCvtNode *cvtNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt); + cvtNode->SetFromType(srcType); + cvtNode->SetPrimType(mType); + cvtNode->SetOpnd(&expr, 0); + return cvtNode; +} + +BaseNode *CGLowerer::LowerIaddrof(const IreadNode &iaddrof) { + if (iaddrof.GetFieldID() == 0) { + return iaddrof.Opnd(0); + } + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iaddrof.GetTyIdx()); + MIRPtrType *pointerTy = static_cast(type); + CHECK_FATAL(pointerTy != nullptr, "LowerIaddrof: expect a pointer type at iaddrof node"); + MIRStructType *structTy = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx())); + CHECK_FATAL(structTy != nullptr, "LowerIaddrof: non-zero fieldID for non-structure"); + int32 offset = beCommon.GetFieldOffset(*structTy, iaddrof.GetFieldID()).first; + if (offset == 0) { + return iaddrof.Opnd(0); + } + uint32 loweredPtrType = static_cast(GetLoweredPtrType()); + MIRIntConst *offsetConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(static_cast(offset)), *GlobalTables::GetTypeTable().GetTypeTable().at(loweredPtrType)); + BaseNode *offsetNode = mirModule.CurFuncCodeMemPool()->New(offsetConst); + offsetNode->SetPrimType(GetLoweredPtrType()); + + BinaryNode *addNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + addNode->SetPrimType(GetLoweredPtrType()); + addNode->SetBOpnd(iaddrof.Opnd(0), 0); + addNode->SetBOpnd(offsetNode, 1); + return addNode; +} + +BaseNode *CGLowerer::SplitBinaryNodeOpnd1(BinaryNode &bNode, BlockNode &blkNode) { + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel1) { + return &bNode; + } + MIRBuilder *mirbuilder = mirModule.GetMIRBuilder(); + static uint32 val = 0; + std::string name("bnaryTmp"); + name.append(std::to_string(val++)); + + BaseNode *opnd1 = bNode.Opnd(1); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(opnd1->GetPrimType())); + MIRSymbol *dnodeSt = mirbuilder->GetOrCreateLocalDecl(const_cast(name), *ty); + DassignNode *dnode = mirbuilder->CreateStmtDassign(const_cast(*dnodeSt), 0, opnd1); + blkNode.InsertAfter(blkNode.GetLast(), dnode); + + BaseNode *dreadNode = mirbuilder->CreateExprDread(*dnodeSt); + bNode.SetOpnd(dreadNode, 1); + + return &bNode; +} + +BaseNode *CGLowerer::SplitTernaryNodeResult(TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode) { + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel1) { + return &tNode; + } + MIRBuilder *mirbuilder = mirModule.GetMIRBuilder(); + static uint32 val = 0; + std::string name("tnaryTmp"); + name.append(std::to_string(val++)); + + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(tNode.GetPrimType())); + MIRSymbol *dassignNodeSym = mirbuilder->GetOrCreateLocalDecl(const_cast(name), *ty); + DassignNode *dassignNode = mirbuilder->CreateStmtDassign(const_cast(*dassignNodeSym), 0, &tNode); + blkNode.InsertAfter(blkNode.GetLast(), dassignNode); + + BaseNode *dreadNode = mirbuilder->CreateExprDread(*dassignNodeSym); + for (size_t i = 0; i < parent.NumOpnds(); i++) { + if (parent.Opnd(i) == &tNode) { + parent.SetOpnd(dreadNode, i); + break; + } + } + + return dreadNode; +} + +/* Check if the operand of the select node is complex enough for either + * functionality or performance reason so we need to lower it to if-then-else. + */ +bool CGLowerer::IsComplexSelect(const TernaryNode &tNode) const { + if (tNode.GetPrimType() == PTY_agg) { + return true; + } + /* Iread may have side effect which may cause correctness issue. */ + if (HasIreadExpr(tNode.Opnd(1)) || HasIreadExpr(tNode.Opnd(2))) { + return true; + } + // it will be generated many insn for complex expr, leading to + // worse performance than punishment of branch prediction error + constexpr size_t maxDepth = 3; + if (MaxDepth(tNode.Opnd(1)) > maxDepth || MaxDepth(tNode.Opnd(1)) > maxDepth) { + return true; + } + return false; +} + +int32 CGLowerer::FindTheCurrentStmtFreq(const StmtNode *stmt) const { + while (stmt != nullptr) { + int32 freq = mirModule.CurFunction()->GetFreqFromLastStmt(stmt->GetStmtID()); + if (freq != -1) { + return freq; + } + stmt = stmt->GetPrev(); + } + return -1; +} + +/* Lower agg select node back to if-then-else stmt. */ +/* + 0(brfalse) + | \ + 1 2 + \ | + \ | + 3 +*/ +BaseNode *CGLowerer::LowerComplexSelect(const TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode) { + MIRBuilder *mirbuilder = mirModule.GetMIRBuilder(); + + MIRType *resultTy = nullptr; + MIRFunction *func = mirModule.CurFunction(); + if (tNode.GetPrimType() == PTY_agg) { + if (tNode.Opnd(1)->op == OP_dread) { + DreadNode *trueNode = static_cast(tNode.Opnd(1)); + resultTy = mirModule.CurFunction()->GetLocalOrGlobalSymbol(trueNode->GetStIdx())->GetType(); + } else if (tNode.Opnd(1)->op == OP_iread) { + IreadNode *trueNode = static_cast(tNode.Opnd(1)); + MIRPtrType *ptrty = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(trueNode->GetTyIdx())); + resultTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrty->GetPointedTyIdx())); + if (trueNode->GetFieldID() != 0) { + MIRStructType *structty = static_cast(resultTy); + resultTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(trueNode->GetFieldID())); + } + } else { + CHECK_FATAL(false, "NYI: LowerComplexSelect"); + } + } else { + resultTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(tNode.GetPrimType())); + } + + CondGotoNode *brTargetStmt = mirModule.CurFuncCodeMemPool()->New(OP_brfalse); + brTargetStmt->SetOpnd(tNode.Opnd(0), 0); + LabelIdx targetIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(targetIdx); + brTargetStmt->SetOffset(targetIdx); + // Update the current stmt frequence + int32 currentStmtFreq = 0; + if (kOpcodeInfo.IsStmt(parent.GetOpCode())) { + currentStmtFreq = FindTheCurrentStmtFreq(static_cast(&parent)); + } + currentStmtFreq = currentStmtFreq == -1 ? 0 : currentStmtFreq; + func->SetLastFreqMap(brTargetStmt->GetStmtID(), static_cast(currentStmtFreq)); + blkNode.InsertAfter(blkNode.GetLast(), brTargetStmt); + union { + MIRSymbol *resSym; + PregIdx resPreg; + } cplxSelRes; // complex select result + uint32 fallthruStmtFreq = static_cast((currentStmtFreq + 1) / 2); + if (tNode.GetPrimType() == PTY_agg) { + static uint32 val = 0; + std::string name("ComplexSelectTmp"); + name.append(std::to_string(val++)); + cplxSelRes.resSym = mirbuilder->GetOrCreateLocalDecl(const_cast(name), *resultTy); + DassignNode *dassignTrue = mirbuilder->CreateStmtDassign(*cplxSelRes.resSym, 0, tNode.Opnd(1)); + // Fallthru: update the frequence 1 + func->SetFirstFreqMap(dassignTrue->GetStmtID(), fallthruStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), dassignTrue); + } else { + cplxSelRes.resPreg = mirbuilder->GetCurrentFunction()->GetPregTab()->CreatePreg(tNode.GetPrimType()); + RegassignNode *regassignTrue = + mirbuilder->CreateStmtRegassign(tNode.GetPrimType(), cplxSelRes.resPreg, tNode.Opnd(1)); + // Update the frequence first opnd + func->SetFirstFreqMap(regassignTrue->GetStmtID(), fallthruStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), regassignTrue); + } + + GotoNode *gotoStmt = mirModule.CurFuncCodeMemPool()->New(OP_goto); + LabelIdx endIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(endIdx); + gotoStmt->SetOffset(endIdx); + // Update the frequence first opnd + func->SetLastFreqMap(gotoStmt->GetStmtID(), fallthruStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), gotoStmt); + + uint32 targetStmtFreq = static_cast(currentStmtFreq / 2); + LabelNode *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(targetIdx); + func->SetFirstFreqMap(lableStmt->GetStmtID(), targetStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), lableStmt); + + if (tNode.GetPrimType() == PTY_agg) { + DassignNode *dassignFalse = mirbuilder->CreateStmtDassign(*cplxSelRes.resSym, 0, tNode.Opnd(2)); + // Update the frequence second opnd + func->SetLastFreqMap(dassignFalse->GetStmtID(), targetStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), dassignFalse); + } else { + RegassignNode *regassignFalse = + mirbuilder->CreateStmtRegassign(tNode.GetPrimType(), cplxSelRes.resPreg, tNode.Opnd(2)); + // Update the frequence 2 + func->SetLastFreqMap(regassignFalse->GetStmtID(), targetStmtFreq); + blkNode.InsertAfter(blkNode.GetLast(), regassignFalse); + } + + lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(endIdx); + // Update the frequence third opnd + func->SetFirstFreqMap(lableStmt->GetStmtID(), static_cast(currentStmtFreq)); + blkNode.InsertAfter(blkNode.GetLast(), lableStmt); + + BaseNode *exprNode = (tNode.GetPrimType() == PTY_agg) ? + static_cast(mirbuilder->CreateExprDread(*cplxSelRes.resSym)) : + static_cast(mirbuilder->CreateExprRegread(tNode.GetPrimType(), cplxSelRes.resPreg)); + for (size_t i = 0; i < parent.NumOpnds(); i++) { + if (parent.Opnd(i) == &tNode) { + parent.SetOpnd(exprNode, i); + break; + } + } + + return exprNode; +} + +BaseNode *CGLowerer::LowerFarray(ArrayNode &array) { + auto *farrayType = static_cast(array.GetArrayType(GlobalTables::GetTypeTable())); + size_t eSize = GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayType->GetElemTyIdx())->GetSize(); + if (farrayType->GetKind() == kTypeJArray) { + if (farrayType->GetElemType()->GetKind() != kTypeScalar) { + /* not the last dimension of primitive array */ + eSize = RTSupport::GetRTSupportInstance().GetObjectAlignment(); + } + } + + MIRType &arrayType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType())); + /* how about multi-dimension array? */ + if (array.GetIndex(0)->GetOpCode() == OP_constval) { + const ConstvalNode *constvalNode = static_cast(array.GetIndex(0)); + if (constvalNode->GetConstVal()->GetKind() == kConstInt) { + const MIRIntConst *pIntConst = static_cast(constvalNode->GetConstVal()); + CHECK_FATAL(JAVALANG || !pIntConst->IsNegative(), "Array index should >= 0."); + uint64 eleOffset = static_cast(pIntConst->GetExtValue()) * eSize; + + if (farrayType->GetKind() == kTypeJArray) { + eleOffset += static_cast(RTSupport::GetRTSupportInstance().GetArrayContentOffset()); + } + + BaseNode *baseNode = NodeConvert(array.GetPrimType(), *array.GetBase()); + if (eleOffset == 0) { + return baseNode; + } + + MIRIntConst *eleConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(eleOffset, arrayType); + BaseNode *offsetNode = mirModule.CurFuncCodeMemPool()->New(eleConst); + offsetNode->SetPrimType(array.GetPrimType()); + + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(OP_add); + rAdd->SetPrimType(array.GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(offsetNode, 1); + return rAdd; + } + } + + BaseNode *resNode = NodeConvert(array.GetPrimType(), *array.GetIndex(0)); + BaseNode *rMul = nullptr; + + if ((farrayType->GetKind() == kTypeJArray) && (resNode->GetOpCode() == OP_constval)) { + ConstvalNode *idxNode = static_cast(resNode); + uint64 idx = static_cast(safe_cast(idxNode->GetConstVal())->GetExtValue()); + MIRIntConst *eConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(idx * eSize, arrayType); + rMul = mirModule.CurFuncCodeMemPool()->New(eConst); + rMul->SetPrimType(array.GetPrimType()); + } else { + MIRIntConst *eConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(eSize, arrayType); + BaseNode *eSizeNode = mirModule.CurFuncCodeMemPool()->New(eConst); + eSizeNode->SetPrimType(array.GetPrimType()); + rMul = mirModule.CurFuncCodeMemPool()->New(OP_mul); + rMul->SetPrimType(array.GetPrimType()); + rMul->SetOpnd(resNode, 0); + rMul->SetOpnd(eSizeNode, 1); + } + + BaseNode *baseNode = NodeConvert(array.GetPrimType(), *array.GetBase()); + + if (farrayType->GetKind() == kTypeJArray) { + BaseNode *jarrayBaseNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + MIRIntConst *arrayHeaderNode = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(RTSupport::GetRTSupportInstance().GetArrayContentOffset()), arrayType); + BaseNode *arrayHeaderCstNode = mirModule.CurFuncCodeMemPool()->New(arrayHeaderNode); + arrayHeaderCstNode->SetPrimType(array.GetPrimType()); + jarrayBaseNode->SetPrimType(array.GetPrimType()); + jarrayBaseNode->SetOpnd(baseNode, 0); + jarrayBaseNode->SetOpnd(arrayHeaderCstNode, 1); + baseNode = jarrayBaseNode; + } + + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(OP_add); + rAdd->SetPrimType(array.GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + return rAdd; +} + +BaseNode *CGLowerer::LowerArrayDim(ArrayNode &array, int32 dim) { + BaseNode *resNode = NodeConvert(array.GetPrimType(), *array.GetIndex(dim - 1)); + /* process left dimension index, resNode express the last dim, so dim need sub 2 */ + CHECK_FATAL(dim > (std::numeric_limits::min)() + 1, "out of range"); + int leftDim = dim - 2; + MIRType *aType = array.GetArrayType(GlobalTables::GetTypeTable()); + MIRArrayType *arrayType = static_cast(aType); + for (int i = leftDim; i >= 0; --i) { + BaseNode *mpyNode = mirModule.CurFuncCodeMemPool()->New(OP_mul); + BaseNode *item = NodeConvert(array.GetPrimType(), *array.GetDim(mirModule, GlobalTables::GetTypeTable(), dim - 1)); + if (mirModule.IsCModule()) { + item = NodeConvert(array.GetPrimType(), *array.GetIndex(static_cast(static_cast(i)))); + int64 offsetSize = 1; + for (int32 j = i + 1; j < dim; ++j) { + offsetSize *= static_cast(arrayType->GetSizeArrayItem(static_cast(j))); + } + MIRIntConst *offsetCst = mirModule.CurFuncCodeMemPool()->New( + offsetSize, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(array.GetPrimType())); + BaseNode *eleOffset = mirModule.CurFuncCodeMemPool()->New(offsetCst); + eleOffset->SetPrimType(array.GetPrimType()); + mpyNode->SetPrimType(array.GetPrimType()); + mpyNode->SetOpnd(eleOffset, 0); + mpyNode->SetOpnd(item, 1); + } else { + for (int j = leftDim; j > i; --j) { + BaseNode *mpyNodes = mirModule.CurFuncCodeMemPool()->New(OP_mul); + mpyNodes->SetPrimType(array.GetPrimType()); + mpyNodes->SetOpnd(item, 0); + mpyNodes->SetOpnd( + NodeConvert(array.GetPrimType(), *array.GetDim(mirModule, GlobalTables::GetTypeTable(), j)), 1); + item = mpyNodes; + } + mpyNode->SetPrimType(array.GetPrimType()); + mpyNode->SetOpnd(NodeConvert(array.GetPrimType(), *array.GetIndex(i)), 0); + mpyNode->SetOpnd(item, 1); + } + + BaseNode *newResNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + newResNode->SetPrimType(array.GetPrimType()); + newResNode->SetOpnd(resNode, 0); + newResNode->SetOpnd(mpyNode, 1); + resNode = newResNode; + } + return resNode; +} + +BaseNode *CGLowerer::LowerArrayForLazyBiding(BaseNode &baseNode, BaseNode &offsetNode, const BaseNode &parent) { + if (parent.GetOpCode() == OP_iread && (baseNode.GetOpCode() == maple::OP_addrof)) { + const MIRSymbol *st = + mirModule.CurFunction()->GetLocalOrGlobalSymbol(static_cast(baseNode).GetStIdx()); + if (StringUtils::StartsWith(st->GetName(), namemangler::kDecoupleStaticValueStr) || + ((StringUtils::StartsWith(st->GetName(), namemangler::kMuidFuncUndefTabPrefixStr) || + StringUtils::StartsWith(st->GetName(), namemangler::kMuidFuncDefTabPrefixStr) || + StringUtils::StartsWith(st->GetName(), namemangler::kMuidDataDefTabPrefixStr) || + StringUtils::StartsWith(st->GetName(), namemangler::kMuidDataUndefTabPrefixStr)) && + CGOptions::IsLazyBinding())) { + /* for decouple static or lazybinding def/undef tables, replace it with intrinsic */ + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(&baseNode); + args.emplace_back(&offsetNode); + return mirBuilder->CreateExprIntrinsicop(INTRN_MPL_READ_STATIC_OFFSET_TAB, OP_intrinsicop, + *GlobalTables::GetTypeTable().GetPrimType(parent.GetPrimType()), args); + } + } + return nullptr; +} + +BaseNode *CGLowerer::LowerArray(ArrayNode &array, const BaseNode &parent) { + MIRType *aType = array.GetArrayType(GlobalTables::GetTypeTable()); + if (aType->GetKind() == kTypeFArray || aType->GetKind() == kTypeJArray) { + return LowerFarray(array); + } + MIRArrayType *arrayType = static_cast(aType); + int32 dim = arrayType->GetDim(); + BaseNode *resNode = LowerArrayDim(array, dim); + BaseNode *rMul = nullptr; + size_t eSize = beCommon.GetTypeSize(arrayType->GetElemTyIdx().GetIdx()); + Opcode opAdd = OP_add; + MIRType &arrayTypes = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType())); + if (resNode->GetOpCode() == OP_constval) { + /* index is a constant, we can calculate the offset now */ + ConstvalNode *idxNode = static_cast(resNode); + uint64 idx = static_cast(safe_cast(idxNode->GetConstVal())->GetExtValue()); + MIRIntConst *eConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(idx * eSize, arrayTypes); + rMul = mirModule.CurFuncCodeMemPool()->New(eConst); + rMul->SetPrimType(array.GetPrimType()); + if (dim == 1) { + opAdd = OP_CG_array_elem_add; + } + } else { + MIRIntConst *eConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(eSize, arrayTypes); + BaseNode *tmpNode = mirModule.CurFuncCodeMemPool()->New(eConst); + tmpNode->SetPrimType(array.GetPrimType()); + rMul = mirModule.CurFuncCodeMemPool()->New(OP_mul); + rMul->SetPrimType(array.GetPrimType()); + rMul->SetOpnd(resNode, 0); + rMul->SetOpnd(tmpNode, 1); + } + BaseNode *baseNode = NodeConvert(array.GetPrimType(), *array.GetBase()); + if (rMul->GetOpCode() == OP_constval) { + BaseNode *intrnNode = LowerArrayForLazyBiding(*baseNode, *rMul, parent); + if (intrnNode != nullptr) { + return intrnNode; + } + } + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(opAdd); + rAdd->SetPrimType(array.GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + return rAdd; +} + +BaseNode *CGLowerer::LowerCArray(ArrayNode &array) { + MIRType *aType = array.GetArrayType(GlobalTables::GetTypeTable()); + if (aType->GetKind() == kTypeFArray || aType->GetKind() == kTypeJArray) { + return LowerFarray(array); + } + + MIRArrayType *arrayType = static_cast(aType); + /* There are two cases where dimension > 1. + * 1) arrayType->dim > 1. Process the current arrayType. (nestedArray = false) + * 2) arrayType->dim == 1, but arraytype->eTyIdx is another array. (nestedArray = true) + * Assume at this time 1) and 2) cannot mix. + * Along with the array dimension, there is the array indexing. + * It is allowed to index arrays less than the dimension. + * This is dictated by the number of indexes. + */ + bool nestedArray = false; + int dim = arrayType->GetDim(); + MIRType *innerType = nullptr; + MIRArrayType *innerArrayType = nullptr; + uint64 elemSize = 0; + if (dim == 1) { + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + if (innerType->GetKind() == kTypeArray) { + nestedArray = true; + do { + innerArrayType = static_cast(innerType); + elemSize = RoundUp(beCommon.GetTypeSize(innerArrayType->GetElemTyIdx().GetIdx()), + beCommon.GetTypeAlign(arrayType->GetElemTyIdx().GetIdx())); + dim++; + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); + } while (innerType->GetKind() == kTypeArray); + } + } + + int32 numIndex = static_cast(array.NumOpnds()) - 1; + MIRArrayType *curArrayType = arrayType; + BaseNode *resNode = NodeConvert(array.GetPrimType(), *array.GetIndex(0)); + if (dim > 1) { + BaseNode *prevNode = nullptr; + for (int i = 0; (i < dim) && (i < numIndex); i++) { + uint32 mpyDim = 1; + if (nestedArray) { + CHECK_FATAL(arrayType->GetSizeArrayItem(0) > 0, "Zero size array dimension"); + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curArrayType->GetElemTyIdx()); + curArrayType = static_cast(innerType); + while (innerType->GetKind() == kTypeArray) { + innerArrayType = static_cast(innerType); + mpyDim *= innerArrayType->GetSizeArrayItem(0); + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); + } + } else { + CHECK_FATAL(arrayType->GetSizeArrayItem(static_cast(i)) > 0, "Zero size array dimension"); + for (int j = i + 1; j < dim; j++) { + mpyDim *= arrayType->GetSizeArrayItem(static_cast(j)); + } + } + + BaseNode *index = static_cast(array.GetIndex(static_cast(i))); + bool isConst = false; + uint64 indexVal = 0; + if (index->op == OP_constval) { + ConstvalNode *constNode = static_cast(index); + indexVal = static_cast((static_cast(constNode->GetConstVal()))->GetExtValue()); + isConst = true; + MIRIntConst *newConstNode = mirModule.GetMemPool()->New( + indexVal * mpyDim, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType()))); + BaseNode *newValNode = mirModule.CurFuncCodeMemPool()->New(newConstNode); + newValNode->SetPrimType(array.GetPrimType()); + if (i == 0) { + prevNode = newValNode; + continue; + } else { + resNode = newValNode; + } + } + if (i > 0 && !isConst) { + resNode = NodeConvert(array.GetPrimType(), *array.GetIndex(static_cast(i))); + } + + BaseNode *mpyNode; + if (isConst) { + MIRIntConst *mulConst = mirModule.GetMemPool()->New( + mpyDim * indexVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType()))); + BaseNode *mulSize = mirModule.CurFuncCodeMemPool()->New(mulConst); + mulSize->SetPrimType(array.GetPrimType()); + mpyNode = mulSize; + } else if (mpyDim == 1 && prevNode) { + mpyNode = prevNode; + prevNode = resNode; + } else { + mpyNode = mirModule.CurFuncCodeMemPool()->New(OP_mul); + mpyNode->SetPrimType(array.GetPrimType()); + MIRIntConst *mulConst = mirModule.GetMemPool()->New( + mpyDim, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType()))); + BaseNode *mulSize = mirModule.CurFuncCodeMemPool()->New(mulConst); + mulSize->SetPrimType(array.GetPrimType()); + mpyNode->SetOpnd(NodeConvert(array.GetPrimType(), *mulSize), 0); + mpyNode->SetOpnd(resNode, 1); + } + if (i == 0) { + prevNode = mpyNode; + continue; + } + BaseNode *newResNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + newResNode->SetPrimType(array.GetPrimType()); + newResNode->SetOpnd(mpyNode, 0); + newResNode->SetOpnd(prevNode, 1); + prevNode = newResNode; + } + resNode = prevNode; + } + + BaseNode *rMul = nullptr; + // esize is the size of the array element (eg. int = 4 long = 8) + uint64 esize; + if (nestedArray) { + esize = elemSize; + } else { + esize = beCommon.GetTypeSize(arrayType->GetElemTyIdx().GetIdx()); + } + Opcode opadd = OP_add; + if (resNode->op == OP_constval) { + // index is a constant, we can calculate the offset now + ConstvalNode *idxNode = static_cast(resNode); + uint64 idx = static_cast(static_cast(idxNode->GetConstVal())->GetExtValue()); + MIRIntConst *econst = mirModule.GetMemPool()->New( + idx * esize, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType()))); + rMul = mirModule.CurFuncCodeMemPool()->New(econst); + rMul->SetPrimType(array.GetPrimType()); + if (dim == 1 && array.GetBase()->op == OP_addrof && static_cast(array.GetBase())->GetFieldID() == 0) { + opadd = OP_CG_array_elem_add; + } + } else { + MIRIntConst *econst = mirModule.GetMemPool()->New(esize, + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array.GetPrimType()))); + BaseNode *eSize = mirModule.CurFuncCodeMemPool()->New(econst); + eSize->SetPrimType(array.GetPrimType()); + rMul = mirModule.CurFuncCodeMemPool()->New(OP_mul); + rMul->SetPrimType(array.GetPrimType()); + rMul->SetOpnd(resNode, 0); + rMul->SetOpnd(eSize, 1); + } + BaseNode *baseNode = NodeConvert(array.GetPrimType(), *array.GetBase()); + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(opadd); + rAdd->SetPrimType(array.GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + return rAdd; +} + +StmtNode *CGLowerer::WriteBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, + BaseNode *baseAddr, BaseNode *rhs, BlockNode *block) { + auto bitSize = fieldType->GetFieldSize(); + auto primType = fieldType->GetPrimType(); + auto byteOffset = byteBitOffsets.first; + auto bitOffset = byteBitOffsets.second; + auto *builder = mirModule.GetMIRBuilder(); + auto *bitField = builder->CreateExprIreadoff(primType, byteOffset, baseAddr); + auto primTypeBitSize = GetPrimTypeBitSize(primType); + if ((static_cast(bitOffset) + bitSize) <= primTypeBitSize) { + if (CGOptions::IsBigEndian()) { + bitOffset = static_cast(static_cast(beCommon.GetTypeSize(fieldType->GetTypeIndex()) * + kBitsPerByte) - bitOffset) - bitSize; + } + auto depositBits = builder->CreateExprDepositbits(OP_depositbits, primType, static_cast(bitOffset), + bitSize, bitField, rhs); + return builder->CreateStmtIassignoff(primType, byteOffset, baseAddr, depositBits); + } + // if space not enough in the unit with size of primType, we would make an extra assignment from next bound + auto bitsRemained = (static_cast(bitOffset) + bitSize) - primTypeBitSize; + auto bitsExtracted = primTypeBitSize - static_cast(bitOffset); + if (CGOptions::IsBigEndian()) { + bitOffset = 0; + } + auto *depositedLowerBits = builder->CreateExprDepositbits(OP_depositbits, primType, + static_cast(bitOffset), bitsExtracted, bitField, rhs); + auto *assignedLowerBits = builder->CreateStmtIassignoff(primType, byteOffset, baseAddr, depositedLowerBits); + block->AddStatement(assignedLowerBits); + auto *extractedHigherBits = + builder->CreateExprExtractbits(OP_extractbits, primType, bitsExtracted, bitsRemained, rhs); + auto *bitFieldRemained = builder->CreateExprIreadoff(primType, + byteOffset + static_cast(GetPrimTypeSize(primType)), baseAddr); + auto *depositedHigherBits = + builder->CreateExprDepositbits(OP_depositbits, primType, 0, bitsRemained, bitFieldRemained, extractedHigherBits); + auto *assignedHigherBits = builder->CreateStmtIassignoff(primType, + byteOffset + static_cast(GetPrimTypeSize(primType)), baseAddr, depositedHigherBits); + return assignedHigherBits; +} + +BaseNode *CGLowerer::ReadBitField(const std::pair &byteBitOffsets, const MIRBitFieldType *fieldType, + BaseNode *baseAddr) { + auto bitSize = fieldType->GetFieldSize(); + auto primType = fieldType->GetPrimType(); + auto byteOffset = byteBitOffsets.first; + auto bitOffset = byteBitOffsets.second; + auto *builder = mirModule.GetMIRBuilder(); + auto *bitField = builder->CreateExprIreadoff(primType, byteOffset, baseAddr); + auto primTypeBitSize = GetPrimTypeBitSize(primType); + if ((static_cast(bitOffset) + bitSize) <= primTypeBitSize) { + if (CGOptions::IsBigEndian()) { + bitOffset = static_cast(static_cast(beCommon.GetTypeSize(fieldType->GetTypeIndex()) * + kBitsPerByte) - bitOffset) - bitSize; + } + return builder->CreateExprExtractbits(OP_extractbits, primType, static_cast(bitOffset), bitSize, bitField); + } + // if space not enough in the unit with size of primType, the result would be binding of two exprs of load + auto bitsRemained = (static_cast(bitOffset) + bitSize) - primTypeBitSize; + if (CGOptions::IsBigEndian()) { + bitOffset = 0; + } + auto *extractedLowerBits = builder->CreateExprExtractbits(OP_extractbits, primType, + static_cast(bitOffset), bitSize - bitsRemained, bitField); + auto *bitFieldRemained = builder->CreateExprIreadoff(primType, + byteOffset + static_cast(GetPrimTypeSize(primType)), baseAddr); + auto *result = builder->CreateExprDepositbits(OP_depositbits, primType, bitSize - bitsRemained, bitsRemained, + extractedLowerBits, bitFieldRemained); + return result; +} + +BaseNode *CGLowerer::LowerDreadBitfield(DreadNode &dread) { + auto *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); + auto *structTy = static_cast(symbol->GetType()); + auto fTyIdx = structTy->GetFieldTyIdx(dread.GetFieldID()); + auto *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); + if (fType->GetKind() != kTypeBitField) { + return &dread; + } + auto *builder = mirModule.GetMIRBuilder(); + auto *baseAddr = builder->CreateExprAddrof(0, dread.GetStIdx()); + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, dread.GetFieldID()); + return ReadBitField(byteBitOffsets, static_cast(fType), baseAddr); +} + +BaseNode *CGLowerer::LowerIreadBitfield(IreadNode &iread) { + uint32 index = iread.GetTyIdx(); + MIRPtrType *pointerTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(index)); + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + /* Here pointed type can be Struct or JArray */ + MIRStructType *structTy = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structTy = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structTy = static_cast(pointedTy)->GetParentType(); + } + TyIdx fTyIdx = structTy->GetFieldTyIdx(iread.GetFieldID()); + MIRType *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); + if (fType->GetKind() != kTypeBitField) { + return &iread; + } + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, iread.GetFieldID()); + return ReadBitField(byteBitOffsets, static_cast(fType), iread.Opnd(0)); +} + +// input node must be cvt, retype, zext or sext +BaseNode *CGLowerer::LowerCastExpr(BaseNode &expr) const { + if (CGOptions::GetInstance().GetOptimizeLevel() >= CGOptions::kLevel2) { + BaseNode *simplified = MapleCastOpt::SimplifyCast(*mirBuilder, &expr); + return simplified != nullptr ? simplified : &expr; + } + return &expr; +} + +void CGLowerer::LowerTypePtr(BaseNode &node) const { + if ((node.GetPrimType() == PTY_ptr) || (node.GetPrimType() == PTY_ref)) { + node.SetPrimType(GetLoweredPtrType()); + } + + if (kOpcodeInfo.IsTypeCvt(node.GetOpCode())) { + auto &cvt = static_cast(node); + if ((cvt.FromType() == PTY_ptr) || (cvt.FromType() == PTY_ref)) { + cvt.SetFromType(GetLoweredPtrType()); + } + } else if (kOpcodeInfo.IsCompare(node.GetOpCode())) { + auto &cmp = static_cast(node); + if ((cmp.GetOpndType() == PTY_ptr) || (cmp.GetOpndType() == PTY_ref)) { + cmp.SetOpndType(GetLoweredPtrType()); + } + } +} + + +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 +BlockNode *CGLowerer::LowerReturnStructUsingFakeParm(NaryStmtNode &retNode) { + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + for (size_t i = 0; i < retNode.GetNopndSize(); ++i) { + retNode.SetOpnd(LowerExpr(retNode, *retNode.GetNopndAt(i), *blk), i); + } + BaseNode *opnd0 = retNode.Opnd(0); + if (!(opnd0 && opnd0->GetPrimType() == PTY_agg)) { + /* It is possible function never returns and have a dummy return const instead of a struct. */ + maple::LogInfo::MapleLogger(kLlWarn) << "return struct should have a kid" << std::endl; + } + + MIRFunction *curFunc = GetCurrentFunc(); + MIRSymbol *retSt = curFunc->GetFormal(0); + MIRPtrType *retTy = static_cast(retSt->GetType()); + IassignNode *iassign = mirModule.CurFuncCodeMemPool()->New(); + iassign->SetTyIdx(retTy->GetTypeIndex()); + ASSERT(opnd0 != nullptr, "opnd0 should not be nullptr"); + if ((beCommon.GetTypeSize(retTy->GetPointedTyIdx().GetIdx()) <= k16ByteSize) && (opnd0->GetPrimType() == PTY_agg)) { + /* struct goes into register. */ + curFunc->SetStructReturnedInRegs(); + } + iassign->SetFieldID(0); + iassign->SetRHS(opnd0); + if (retSt->IsPreg()) { + RegreadNode *regNode = mirModule.GetMIRBuilder()->CreateExprRegread( + GetLoweredPtrType(), + curFunc->GetPregTab()->GetPregIdxFromPregno(static_cast(retSt->GetPreg()->GetPregNo()))); + iassign->SetOpnd(regNode, 0); + } else { + AddrofNode *dreadNode = mirModule.CurFuncCodeMemPool()->New(OP_dread); + dreadNode->SetPrimType(GetLoweredPtrType()); + dreadNode->SetStIdx(retSt->GetStIdx()); + iassign->SetOpnd(dreadNode, 0); + } + blk->AddStatement(iassign); + retNode.GetNopnd().clear(); + retNode.SetNumOpnds(0); + blk->AddStatement(&retNode); + return blk; +} + +#endif /* TARGARM32 || TARGAARCH64 || TARGX86_64 */ + +BlockNode *CGLowerer::LowerReturn(NaryStmtNode &retNode) { + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + if (retNode.NumOpnds() != 0) { + BaseNode *expr = retNode.Opnd(0); + Opcode opr = expr->GetOpCode(); + if (opr == OP_dread) { + AddrofNode *retExpr = static_cast(expr); + MIRFunction *mirFunc = mirModule.CurFunction(); + MIRSymbol *sym = mirFunc->GetLocalOrGlobalSymbol(retExpr->GetStIdx()); + if (sym->GetAttr(ATTR_localrefvar)) { + mirFunc->InsertMIRSymbol(sym); + } + } + } + for (size_t i = 0; i < retNode.GetNopndSize(); ++i) { + retNode.SetOpnd(LowerExpr(retNode, *retNode.GetNopndAt(i), *blk), i); + } + blk->AddStatement(&retNode); + return blk; +} + +StmtNode *CGLowerer::LowerDassignBitfield(DassignNode &dassign, BlockNode &newBlk) { + dassign.SetRHS(LowerExpr(dassign, *dassign.GetRHS(), newBlk)); + MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dassign.GetStIdx()); + MIRStructType *structTy = static_cast(symbol->GetType()); + CHECK_FATAL(structTy != nullptr, "LowerDassignBitfield: non-zero fieldID for non-structure"); + TyIdx fTyIdx = structTy->GetFieldTyIdx(dassign.GetFieldID()); + CHECK_FATAL(fTyIdx != 0u, "LowerDassignBitField: field id out of range for the structure"); + MIRType *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); + if (fType->GetKind() != kTypeBitField) { + return &dassign; + } + auto *builder = mirModule.GetMIRBuilder(); + auto *baseAddr = builder->CreateExprAddrof(0, dassign.GetStIdx()); + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, dassign.GetFieldID()); + return WriteBitField(byteBitOffsets, static_cast(fType), baseAddr, dassign.GetRHS(), &newBlk); +} + +StmtNode *CGLowerer::LowerIassignBitfield(IassignNode &iassign, BlockNode &newBlk) { + ASSERT(iassign.Opnd(0) != nullptr, "iassign.Opnd(0) should not be nullptr"); + iassign.SetOpnd(LowerExpr(iassign, *iassign.Opnd(0), newBlk), 0); + iassign.SetRHS(LowerExpr(iassign, *iassign.GetRHS(), newBlk)); + + CHECK_FATAL(iassign.GetTyIdx() < GlobalTables::GetTypeTable().GetTypeTable().size(), + "LowerIassignBitField: subscript out of range"); + uint32 index = iassign.GetTyIdx(); + MIRPtrType *pointerTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(index)); + CHECK_FATAL(pointerTy != nullptr, "LowerIassignBitField: type in iassign should be pointer type"); + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + /* + * Here pointed type can be Struct or JArray + * We should seriously consider make JArray also a Struct type + */ + MIRStructType *structTy = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structTy = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structTy = static_cast(pointedTy)->GetParentType(); + } + + TyIdx fTyIdx = structTy->GetFieldTyIdx(iassign.GetFieldID()); + MIRType *fType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fTyIdx)); + if (fType->GetKind() != kTypeBitField) { + return &iassign; + } + auto byteBitOffsets = beCommon.GetFieldOffset(*structTy, iassign.GetFieldID()); + auto *bitFieldType = static_cast(fType); + return WriteBitField(byteBitOffsets, bitFieldType, iassign.Opnd(0), iassign.GetRHS(), &newBlk); +} + +void CGLowerer::LowerIassign(IassignNode &iassign, BlockNode &newBlk) { + StmtNode *newStmt = nullptr; + if (iassign.GetFieldID() != 0) { + newStmt = LowerIassignBitfield(iassign, newBlk); + } else { + CHECK_FATAL(iassign.GetPrimType() != PTY_ptr, "should have been lowered already"); + CHECK_FATAL(iassign.GetPrimType() != PTY_ref, "should have been lowered already"); + LowerStmt(iassign, newBlk); + newStmt = &iassign; + } + newBlk.AddStatement(newStmt); +} + +static GStrIdx NewAsmTempStrIdx() { + static uint32 strIdxCount = 0; // to create unique temporary symbol names + std::string asmTempStr("asm_tempvar"); + (void)asmTempStr.append(std::to_string(++strIdxCount)); + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(asmTempStr); +} + +void CGLowerer::LowerAsmStmt(AsmNode *asmNode, BlockNode *newBlk) { + for (size_t i = 0; i < asmNode->NumOpnds(); i++) { + BaseNode *opnd = LowerExpr(*asmNode, *asmNode->Opnd(i), *newBlk); + if (opnd->NumOpnds() == 0) { + asmNode->SetOpnd(opnd, i); + continue; + } + // introduce a temporary to store the expression tree operand + TyIdx tyIdxUsed = static_cast(opnd->GetPrimType()); + if (opnd->op == OP_iread) { + IreadNode *ireadNode = static_cast(opnd); + tyIdxUsed = ireadNode->GetType()->GetTypeIndex(); + } + StmtNode *assignNode = nullptr; + BaseNode *readOpnd = nullptr; + PrimType type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdxUsed)->GetPrimType(); + if ((type != PTY_agg) && CGOptions::GetInstance().GetOptimizeLevel() >= CGOptions::kLevel2) { + PregIdx pregIdx = mirModule.CurFunction()->GetPregTab()->CreatePreg(type); + assignNode = mirBuilder->CreateStmtRegassign(type, pregIdx, opnd); + readOpnd = mirBuilder->CreateExprRegread(type, pregIdx); + } else { + MIRSymbol *st = mirModule.GetMIRBuilder()->CreateSymbol(tyIdxUsed, NewAsmTempStrIdx(), + kStVar, kScAuto, mirModule.CurFunction(), kScopeLocal); + assignNode = mirModule.GetMIRBuilder()->CreateStmtDassign(*st, 0, opnd); + readOpnd = mirBuilder->CreateExprDread(*st); + } + newBlk->AddStatement(assignNode); + asmNode->SetOpnd(readOpnd, i); + } + newBlk->AddStatement(asmNode); +} + +DassignNode *CGLowerer::SaveReturnValueInLocal(StIdx stIdx, uint16 fieldID) { + MIRSymbol *var; + if (stIdx.IsGlobal()) { + var = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + } else { + var = GetCurrentFunc()->GetSymbolTabItem(stIdx.Idx()); + } + CHECK_FATAL(var != nullptr, "var should not be nullptr"); + PrimType pType; + if (var->GetAttr(ATTR_oneelem_simd)) { + pType = PTY_f64; + } else { + pType = GlobalTables::GetTypeTable().GetTypeTable().at(var->GetTyIdx())->GetPrimType(); + } + RegreadNode *regRead = mirModule.GetMIRBuilder()->CreateExprRegread(pType, -kSregRetval0); + return mirModule.GetMIRBuilder()->CreateStmtDassign(*var, fieldID, regRead); +} + +BaseNode *CGLowerer::LowerExtractBits(ExtractbitsNode &extr) const { + PrimType nodeType = extr.GetPrimType(); + PrimType opndType = extr.Opnd(0)->GetPrimType(); + if (!IsPrimitiveInteger(nodeType) || !IsPrimitiveInteger(opndType) || + GetPrimTypeSize(GetRegPrimType(nodeType)) == GetPrimTypeSize(GetRegPrimType(opndType))) { + return &extr; + } + // instructions hope the reg-sizes of src and dst are same + if (GetPrimTypeSize(nodeType) > GetPrimTypeSize(opndType)) { + auto *newOpnd = mirBuilder->CreateExprTypeCvt(OP_cvt, GetRegPrimType(nodeType), opndType, *extr.Opnd(0)); + extr.SetOpnd(newOpnd, 0); + return &extr; + } else { + PrimType newType = IsSignedInteger(nodeType) ? GetSignedPrimType(GetRegPrimType(opndType)) + : GetUnsignedPrimType(GetRegPrimType(opndType)); + extr.SetPrimType(newType); + return mirBuilder->CreateExprTypeCvt(OP_cvt, nodeType, newType, extr); + } +} + +BaseNode *CGLowerer::LowerRem(BaseNode &expr, BlockNode &blk) { + auto &remExpr = static_cast(expr); + if (!IsPrimitiveFloat(remExpr.GetPrimType())) { + return &expr; + } + ExtFuncT fmodFunc = remExpr.GetPrimType() == PTY_f32 ? kFmodFloat : kFmodDouble; + uint32 i = 0; + for (; i < extFuncs.size(); ++i) { + if (extFuncs[i].first == fmodFunc) { + break; + } + } + CHECK_FATAL(i < extFuncs.size(), "rem expression primtype is not PTY_f32 nor PTY_f64."); + MIRSymbol *ret = CreateNewRetVar(*GlobalTables::GetTypeTable().GetPrimType(remExpr.GetPrimType()), + kIntrnRetValPrefix); + MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(remExpr.Opnd(0)); + args.emplace_back(remExpr.Opnd(1)); + CallNode *callStmt = mirModule.GetMIRBuilder()->CreateStmtCallAssigned(extFuncs[i].second, args, ret); + blk.AppendStatementsFromBlock(*LowerCallAssignedStmt(*callStmt)); + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(extFnDescrs[fmodFunc].retType); + return mirModule.GetMIRBuilder()->CreateExprDread(*type, 0, *ret); +} + +/* to lower call (including icall) and intrinsicall statements */ +void CGLowerer::LowerCallStmt(StmtNode &stmt, StmtNode *&nextStmt, BlockNode &newBlk, MIRType *retty, bool uselvar, + bool isIntrinAssign) { + StmtNode *newStmt = nullptr; + if (stmt.GetOpCode() == OP_intrinsiccall) { + auto &intrnNode = static_cast(stmt); + newStmt = LowerIntrinsiccall(intrnNode, newBlk); + } else { + /* We note the function has a user-defined (i.e., not an intrinsic) call. */ + GetCurrentFunc()->SetHasCall(); + newStmt = &stmt; + } + + if (newStmt == nullptr) { + return; + } + + if (newStmt->GetOpCode() == OP_call || newStmt->GetOpCode() == OP_icall || newStmt->GetOpCode() == OP_icallproto) { + newStmt = LowerCall(static_cast(*newStmt), nextStmt, newBlk, retty, uselvar); + } + newStmt->SetSrcPos(stmt.GetSrcPos()); + newBlk.AddStatement(newStmt); + if (CGOptions::GetInstance().GetOptimizeLevel() >= CGOptions::kLevel2 && stmt.GetOpCode() == OP_intrinsiccall) { + /* Try to expand memset and memcpy call lowered from intrinsiccall */ + /* Skip expansion if call returns a value that is used later. */ + BlockNode *blkLowered = isIntrinAssign ? nullptr : LowerMemop(*newStmt); + if (blkLowered != nullptr) { + newBlk.RemoveStmt(newStmt); + newBlk.AppendStatementsFromBlock(*blkLowered); + } + } +} + +StmtNode *CGLowerer::GenCallNode(const StmtNode &stmt, PUIdx &funcCalled, CallNode& origCall) { + StmtNode *newCall = nullptr; + if (stmt.GetOpCode() == OP_callassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtCall(origCall.GetPUIdx(), origCall.GetNopnd()); + } else if (stmt.GetOpCode() == OP_virtualcallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtVirtualCall(origCall.GetPUIdx(), origCall.GetNopnd()); + } else if (stmt.GetOpCode() == OP_superclasscallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtSuperclassCall(origCall.GetPUIdx(), origCall.GetNopnd()); + } else if (stmt.GetOpCode() == OP_interfacecallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtInterfaceCall(origCall.GetPUIdx(), origCall.GetNopnd()); + } + CHECK_FATAL(newCall != nullptr, "nullptr is not expected"); + newCall->SetSrcPos(stmt.GetSrcPos()); + funcCalled = origCall.GetPUIdx(); + CHECK_FATAL((newCall->GetOpCode() == OP_call || newCall->GetOpCode() == OP_interfacecall), + "virtual call or super class call are not expected"); + if (newCall->GetOpCode() == OP_interfacecall) { + std::cerr << "interfacecall found\n"; + } + return newCall; +} + +StmtNode *CGLowerer::GenIntrinsiccallNode(const StmtNode &stmt, PUIdx &funcCalled, bool &handledAtLowerLevel, + IntrinsiccallNode &origCall) { + StmtNode *newCall = nullptr; + handledAtLowerLevel = IsIntrinsicCallHandledAtLowerLevel(origCall.GetIntrinsic()); + if (handledAtLowerLevel) { + /* If the lower level can handle the intrinsic, just let it pass through. */ + newCall = &origCall; + } else { + PUIdx bFunc = GetBuiltinToUse(origCall.GetIntrinsic()); + if (bFunc != kFuncNotFound) { + newCall = mirModule.GetMIRBuilder()->CreateStmtCall(bFunc, origCall.GetNopnd()); + CHECK_FATAL(newCall->GetOpCode() == OP_call, "intrinsicnode except intrinsiccall is not expected"); + } else { + if (stmt.GetOpCode() == OP_intrinsiccallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtIntrinsicCall(origCall.GetIntrinsic(), origCall.GetNopnd()); + CHECK_FATAL(newCall->GetOpCode() == OP_intrinsiccall, "intrinsicnode except intrinsiccall is not expected"); + } else if (stmt.GetOpCode() == OP_xintrinsiccallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtXintrinsicCall(origCall.GetIntrinsic(), origCall.GetNopnd()); + CHECK_FATAL(newCall->GetOpCode() == OP_intrinsiccall, "intrinsicnode except intrinsiccall is not expected"); + } else { + newCall = mirModule.GetMIRBuilder()->CreateStmtIntrinsicCall(origCall.GetIntrinsic(), origCall.GetNopnd(), + origCall.GetTyIdx()); + CHECK_FATAL(newCall->GetOpCode() == OP_intrinsiccallwithtype, + "intrinsicnode except OP_intrinsiccallwithtype is not expected"); + } + } + newCall->SetSrcPos(stmt.GetSrcPos()); + funcCalled = bFunc; + } + return newCall; +} + +StmtNode *CGLowerer::GenIcallNode(PUIdx &funcCalled, IcallNode &origCall) { + StmtNode *newCall = nullptr; + if (origCall.GetOpCode() == OP_icallassigned) { + newCall = mirModule.GetMIRBuilder()->CreateStmtIcall(origCall.GetNopnd()); + } else { + newCall = mirModule.GetMIRBuilder()->CreateStmtIcallproto(origCall.GetNopnd(), origCall.GetRetTyIdx()); + } + newCall->SetSrcPos(origCall.GetSrcPos()); + CHECK_FATAL(newCall != nullptr, "nullptr is not expected"); + funcCalled = kFuncNotFound; + return newCall; +} + +BlockNode *CGLowerer::GenBlockNode(StmtNode &newCall, const CallReturnVector &p2nRets, const Opcode &opcode, + const PUIdx &funcCalled, bool handledAtLowerLevel, bool uselvar) { + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + blk->AddStatement(&newCall); + if (!handledAtLowerLevel) { + CHECK_FATAL(p2nRets.size() <= 1, "make sure p2nRets size <= 1"); + /* Create DassignStmt to save kSregRetval0. */ + StmtNode *dStmt = nullptr; + MIRType *retType = nullptr; + if (p2nRets.size() == 1) { + MIRSymbol *sym = nullptr; + StIdx stIdx = p2nRets[0].first; + if (stIdx.IsGlobal()) { + sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + } else { + sym = GetCurrentFunc()->GetSymbolTabItem(stIdx.Idx()); + } + bool sizeIs0 = false; + if (sym != nullptr) { + retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (beCommon.GetTypeSize(retType->GetTypeIndex().GetIdx()) == 0) { + sizeIs0 = true; + } + } + if (!sizeIs0) { + RegFieldPair regFieldPair = p2nRets[0].second; + if (!regFieldPair.IsReg()) { + uint16 fieldID = static_cast(regFieldPair.GetFieldID()); + DassignNode *dn = SaveReturnValueInLocal(stIdx, fieldID); + CHECK_FATAL(dn->GetFieldID() == 0, "make sure dn's fieldID return 0"); + LowerDassign(*dn, *blk); + CHECK_FATAL(&newCall == blk->GetLast() || newCall.GetNext() == blk->GetLast(), ""); + dStmt = (&newCall == blk->GetLast()) ? nullptr : blk->GetLast(); + CHECK_FATAL(newCall.GetNext() == dStmt, "make sure newCall's next equal dStmt"); + } else { + PregIdx pregIdx = static_cast(regFieldPair.GetPregIdx()); + MIRPreg *mirPreg = GetCurrentFunc()->GetPregTab()->PregFromPregIdx(pregIdx); + bool is64x1vec = beCommon.CallIsOfAttr(FUNCATTR_oneelem_simd, &newCall); + PrimType pType = is64x1vec ? PTY_f64 : mirPreg->GetPrimType(); + RegreadNode *regNode = mirModule.GetMIRBuilder()->CreateExprRegread(pType, -kSregRetval0); + RegassignNode *regAssign; + if (is64x1vec && IsPrimitiveInteger(mirPreg->GetPrimType())) { // not f64 + MIRType *to; + if (IsUnsignedInteger(mirPreg->GetPrimType())) { + to = GlobalTables::GetTypeTable().GetUInt64(); + } else { + to = GlobalTables::GetTypeTable().GetInt64(); + } + MIRType *from = GlobalTables::GetTypeTable().GetDouble(); + BaseNode *rNode = mirModule.GetMIRBuilder()->CreateExprRetype(*to, *from, regNode); + regAssign = mirModule.GetMIRBuilder()->CreateStmtRegassign(mirPreg->GetPrimType(), + regFieldPair.GetPregIdx(), rNode); + } else { + regAssign = mirModule.GetMIRBuilder()->CreateStmtRegassign(mirPreg->GetPrimType(), + regFieldPair.GetPregIdx(), regNode); + } + blk->AddStatement(regAssign); + dStmt = regAssign; + } + } + } + blk->ResetBlock(); + /* if VerboseCG, insert a comment */ + if (ShouldAddAdditionalComment()) { + CommentNode *cmnt = mirModule.CurFuncCodeMemPool()->New(mirModule); + cmnt->SetComment(kOpcodeInfo.GetName(opcode).c_str()); + if (funcCalled == kFuncNotFound) { + cmnt->Append(" : unknown"); + } else { + cmnt->Append(" : "); + cmnt->Append(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(funcCalled)->GetName()); + } + blk->AddStatement(cmnt); + } + CHECK_FATAL(dStmt == nullptr || dStmt->GetNext() == nullptr, "make sure dStmt or dStmt's next is nullptr"); + LowerCallStmt(newCall, dStmt, *blk, retType, uselvar ? true : false, opcode == OP_intrinsiccallassigned); + if (!uselvar && dStmt != nullptr) { + dStmt->SetSrcPos(newCall.GetSrcPos()); + blk->AddStatement(dStmt); + } + } + return blk; +} + +// try to expand memset and memcpy +BlockNode *CGLowerer::LowerMemop(StmtNode &stmt) { + auto memOpKind = SimplifyOp::ComputeOpKind(stmt); + if (memOpKind == MEM_OP_unknown) { + return nullptr; + } + auto *prev = stmt.GetPrev(); + auto *next = stmt.GetNext(); + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + blk->AddStatement(&stmt); + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + bool success = simplifyOp.AutoSimplify(stmt, *blk, true); + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + stmt.SetPrev(prev); + stmt.SetNext(next); // recover callStmt's position + if (!success) { + return nullptr; + } + // lower new generated stmts + auto *currStmt = blk->GetFirst(); + while (currStmt != nullptr) { + auto *nextStmt = currStmt->GetNext(); + for (uint32 i = 0; i < currStmt->NumOpnds(); ++i) { + currStmt->SetOpnd(LowerExpr(*currStmt, *currStmt->Opnd(i), *blk), i); + } + currStmt = nextStmt; + } + return blk; +} + +BlockNode *CGLowerer::LowerIntrinsiccallAassignedToAssignStmt(IntrinsiccallNode &intrinsicCall) { + auto *builder = mirModule.GetMIRBuilder(); + auto *block = mirModule.CurFuncCodeMemPool()->New(); + auto intrinsicID = intrinsicCall.GetIntrinsic(); + auto &opndVector = intrinsicCall.GetNopnd(); + auto returnPair = intrinsicCall.GetReturnVec().begin(); + auto regFieldPair = returnPair->second; + if (regFieldPair.IsReg()) { + auto regIdx = regFieldPair.GetPregIdx(); + auto primType = mirModule.CurFunction()->GetPregItem(static_cast(regIdx))->GetPrimType(); + auto intrinsicOp = builder->CreateExprIntrinsicop(intrinsicID, OP_intrinsicop, primType, TyIdx(0), opndVector); + auto regAssign = builder->CreateStmtRegassign(primType, regIdx, intrinsicOp); + block->AddStatement(regAssign); + } else { + auto fieldID = regFieldPair.GetFieldID(); + auto stIdx = returnPair->first; + auto *type = mirModule.CurFunction()->GetLocalOrGlobalSymbol(stIdx)->GetType(); + auto intrinsicOp = builder->CreateExprIntrinsicop(intrinsicID, OP_intrinsicop, *type, opndVector); + auto dAssign = builder->CreateStmtDassign(stIdx, fieldID, intrinsicOp); + block->AddStatement(dAssign); + } + return LowerBlock(*block); +} + +BlockNode *CGLowerer::LowerCallAssignedStmt(StmtNode &stmt, bool uselvar) { + StmtNode *newCall = nullptr; + CallReturnVector *p2nRets = nullptr; + PUIdx funcCalled = kFuncNotFound; + bool handledAtLowerLevel = false; + switch (stmt.GetOpCode()) { + case OP_callassigned: + case OP_virtualcallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: { + if (CGOptions::GetInstance().GetOptimizeLevel() >= CGOptions::kLevel2) { + BlockNode *blkLowered = LowerMemop(stmt); + if (blkLowered != nullptr) { + return blkLowered; + } + } + auto &origCall = static_cast(stmt); + newCall = GenCallNode(stmt, funcCalled, origCall); + p2nRets = &origCall.GetReturnVec(); + static_cast(newCall)->SetReturnVec(*p2nRets); + MIRFunction *curFunc = mirModule.CurFunction(); + curFunc->SetLastFreqMap(newCall->GetStmtID(), + static_cast(curFunc->GetFreqFromLastStmt(stmt.GetStmtID()))); + break; + } + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + IntrinsiccallNode &intrinCall = static_cast(stmt); + auto intrinsicID = intrinCall.GetIntrinsic(); + if (IntrinDesc::intrinTable[intrinsicID].IsAtomic()) { + return LowerIntrinsiccallAassignedToAssignStmt(intrinCall); + } + if (intrinsicID == INTRN_JAVA_POLYMORPHIC_CALL) { + BaseNode *contextClassArg = GetBaseNodeFromCurFunc(*mirModule.CurFunction(), false); + constexpr int kContextIdx = 4; /* stable index in MCC_DexPolymorphicCall, never out of range */ + intrinCall.InsertOpnd(contextClassArg, kContextIdx); + + BaseNode *firstArg = intrinCall.GetNopndAt(0); + BaseNode *baseVal = mirBuilder->CreateExprBinary(OP_add, *GlobalTables::GetTypeTable().GetPtr(), firstArg, + mirBuilder->CreateIntConst(1, PTY_ref)); + intrinCall.SetNOpndAt(0, baseVal); + } + newCall = GenIntrinsiccallNode(stmt, funcCalled, handledAtLowerLevel, intrinCall); + p2nRets = &intrinCall.GetReturnVec(); + static_cast(newCall)->SetReturnVec(*p2nRets); + break; + } + case OP_intrinsiccallwithtypeassigned: { + auto &origCall = static_cast(stmt); + newCall = GenIntrinsiccallNode(stmt, funcCalled, handledAtLowerLevel, origCall); + p2nRets = &origCall.GetReturnVec(); + static_cast(newCall)->SetReturnVec(*p2nRets); + break; + } + case OP_icallprotoassigned: + case OP_icallassigned: { + auto &origCall = static_cast(stmt); + newCall = GenIcallNode(funcCalled, origCall); + p2nRets = &origCall.GetReturnVec(); + static_cast(newCall)->SetReturnVec(*p2nRets); + break; + } + default: + CHECK_FATAL(false, "NIY"); + return nullptr; + } + + /* transfer srcPosition location info */ + newCall->SetSrcPos(stmt.GetSrcPos()); + return GenBlockNode(*newCall, *p2nRets, stmt.GetOpCode(), funcCalled, handledAtLowerLevel, uselvar); +} + +#if TARGAARCH64 +static PrimType IsStructElementSame(MIRType *ty) { + if (ty->GetKind() == kTypeArray) { + MIRArrayType *arrtype = static_cast(ty); + MIRType *pty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrtype->GetElemTyIdx()); + if (pty->GetKind() == kTypeArray || pty->GetKind() == kTypeStruct) { + return IsStructElementSame(pty); + } + return pty->GetPrimType(); + } else if (ty->GetKind() == kTypeStruct) { + MIRStructType *sttype = static_cast(ty); + FieldVector fields = sttype->GetFields(); + PrimType oldtype = PTY_void; + for (uint32 fcnt = 0; fcnt < fields.size(); ++fcnt) { + TyIdx fieldtyidx = fields[fcnt].second.first; + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldtyidx); + PrimType ptype = IsStructElementSame(fieldty); + if (oldtype != PTY_void && oldtype != ptype) { + return PTY_void; + } else { + oldtype = ptype; + } + } + return oldtype; + } else { + return ty->GetPrimType(); + } +} +#endif + +// return true if successfully lowered; nextStmt is in/out, and is made to point +// to its following statement if lowering of the struct return is successful +bool CGLowerer::LowerStructReturn(BlockNode &newBlk, StmtNode *stmt, + StmtNode *&nextStmt, bool &lvar, BlockNode *oldBlk) { + if (!nextStmt) { + return false; + } + CallReturnVector *p2nrets = stmt->GetCallReturnVector(); + if (p2nrets->size() == 0) { + return false; + } + CallReturnPair retPair = (*p2nrets)[0]; + if (retPair.second.IsReg()) { + return false; + } + MIRSymbol *retSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(retPair.first); + if (retSym->GetType()->GetPrimType() != PTY_agg) { + return false; + } + if (nextStmt->op != OP_dassign) { + // introduce a temporary and insert a dassign whose rhs is this temporary + // and whose lhs is retSym + MIRSymbol *temp = CreateNewRetVar(*retSym->GetType(), kUserRetValPrefix); + BaseNode *rhs = mirModule.GetMIRBuilder()->CreateExprDread(*temp->GetType(), 0, *temp); + DassignNode *dass = mirModule.GetMIRBuilder()->CreateStmtDassign( + retPair.first, retPair.second.GetFieldID(), rhs); + oldBlk->InsertBefore(nextStmt, dass); + nextStmt = dass; + // update CallReturnVector to the new temporary + (*p2nrets)[0].first = temp->GetStIdx(); + (*p2nrets)[0].second.SetFieldID(0); + } + // now, it is certain that nextStmt is a dassign + BaseNode *bnode = static_cast(nextStmt)->GetRHS(); + if (bnode->GetOpCode() != OP_dread) { + return false; + } + DreadNode *dnode = static_cast(bnode); + MIRType *dtype = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dnode->GetStIdx())->GetType(); +#if TARGAARCH64 + PrimType ty = IsStructElementSame(dtype); + if (ty == PTY_f32 || ty == PTY_f64 || IsPrimitiveVector(ty)) { + return false; + } +#endif + if (dnode->GetPrimType() != PTY_agg) { + return false; + } + CallReturnPair pair = (*p2nrets)[0]; + if (pair.first != dnode->GetStIdx() || pair.second.GetFieldID() != dnode->GetFieldID()) { + return false; + } + auto *dnodeStmt = static_cast(nextStmt); + if (dnodeStmt->GetFieldID() != 0) { + return false; + } + if (dtype->GetSize() > k16ByteSize) { + (*p2nrets)[0].first = dnodeStmt->GetStIdx(); + (*p2nrets)[0].second.SetFieldID(dnodeStmt->GetFieldID()); + lvar = true; + // set ATTR_firstarg_return for callee + if (stmt->GetOpCode() == OP_callassigned) { + CallNode *callNode = static_cast(stmt); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + f->SetFirstArgReturn(); + f->GetMIRFuncType()->SetFirstArgReturn(); + } else { + // for icall, front-end already set ATTR_firstarg_return + } + } else { /* struct <= 16 passed in regs lowered into + call &foo + regassign u64 %1 (regread u64 %%retval0) + regassign ptr %2 (addrof ptr $s) + iassign <* u64> 0 (regread ptr %2, regread u64 %1) */ + MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dnodeStmt->GetStIdx()); + auto *structType = static_cast(symbol->GetType()); + auto size = static_cast(structType->GetSize()); + if (stmt->GetOpCode() == OP_callassigned) { + auto *callNode = static_cast(stmt); + for (size_t i = 0; i < callNode->GetNopndSize(); ++i) { + BaseNode *newOpnd = LowerExpr(*callNode, *callNode->GetNopndAt(i), newBlk); + callNode->SetOpnd(newOpnd, i); + } + CallNode *callStmt = mirModule.GetMIRBuilder()->CreateStmtCall(callNode->GetPUIdx(), callNode->GetNopnd()); + callStmt->SetSrcPos(callNode->GetSrcPos()); + newBlk.AddStatement(callStmt); + } else if (stmt->GetOpCode() == OP_icallassigned || stmt->GetOpCode() == OP_icallprotoassigned) { + auto *icallNode = static_cast(stmt); + for (size_t i = 0; i < icallNode->GetNopndSize(); ++i) { + BaseNode *newOpnd = LowerExpr(*icallNode, *icallNode->GetNopndAt(i), newBlk); + icallNode->SetOpnd(newOpnd, i); + } + IcallNode *icallStmt = nullptr; + if (stmt->GetOpCode() == OP_icallassigned) { + icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcall(icallNode->GetNopnd()); + } else { + icallStmt = mirModule.GetMIRBuilder()->CreateStmtIcallproto(icallNode->GetNopnd(), icallNode->GetRetTyIdx()); + } + icallStmt->SetSrcPos(icallNode->GetSrcPos()); + newBlk.AddStatement(icallStmt); + } else { + return false; + } + + uint32 origSize = size; + PregIdx pIdxR, pIdx1R, pIdx2R; + StmtNode *aStmt = nullptr; + RegreadNode *reg = nullptr; + + /* save x0 */ + reg = mirBuilder->CreateExprRegread(PTY_u64, -kSregRetval0); + pIdx1R = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + aStmt = mirBuilder->CreateStmtRegassign(PTY_u64, pIdx1R, reg); + newBlk.AddStatement(aStmt); + + /* save x1 */ + if (origSize > k8ByteSize) { + reg = mirBuilder->CreateExprRegread(PTY_u64, -kSregRetval1); + pIdx2R = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + aStmt = mirBuilder->CreateStmtRegassign(PTY_u64, pIdx2R, reg); + newBlk.AddStatement(aStmt); + } + + /* save &s */ + BaseNode *regAddr = mirBuilder->CreateExprAddrof(0, *symbol); + LowerTypePtr(*regAddr); + PregIdx pIdxL = GetCurrentFunc()->GetPregTab()->CreatePreg(GetLoweredPtrType()); + aStmt = mirBuilder->CreateStmtRegassign(PTY_a64, pIdxL, regAddr); + newBlk.AddStatement(aStmt); + + uint32 curSize = 0; + PregIdx pIdxS; + while (size > 0) { + pIdxR = pIdx1R; + if (curSize >= k8ByteSize) { + pIdxR = pIdx2R; + } + BaseNode *addr; + BaseNode *shift; + BaseNode *regreadExp; + if (origSize != size) { + MIRType *addrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(GetLoweredPtrType()); + addr = mirBuilder->CreateExprBinary(OP_add, *addrType, + mirBuilder->CreateExprRegread(GetLoweredPtrType(), pIdxL), + mirBuilder->CreateIntConst(origSize - size, PTY_i32)); + } else { + addr = mirBuilder->CreateExprRegread(GetLoweredPtrType(), pIdxL); + } + if (size >= k8ByteSize) { + aStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt64()), + 0, addr, mirBuilder->CreateExprRegread(PTY_u64, pIdxR)); + size -= k8ByteSize; + curSize += k8ByteSize; + } else if (size >= k4ByteSize) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); + + if (CGOptions::IsBigEndian()) { + regreadExp = mirBuilder->CreateExprBinary( + OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k64BitSize - k32BitSize, PTY_i32)); + } else { + regreadExp = mirBuilder->CreateExprRegread(PTY_u32, pIdxR); + } + + aStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt32()), + 0, addr, regreadExp); + + if (CGOptions::IsBigEndian()) { + shift = mirBuilder->CreateExprBinary(OP_shl, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k32BitSize, PTY_i32)); + } else { + shift = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k32BitSize, PTY_i32)); + } + + pIdxS = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + StmtNode *sStmp = mirBuilder->CreateStmtRegassign(PTY_u64, pIdxS, shift); + + pIdx1R = pIdx2R = pIdxS; + newBlk.AddStatement(sStmp); + size -= k4ByteSize; + curSize += k4ByteSize; + } else if (size >= k2ByteSize) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); + + if (CGOptions::IsBigEndian()) { + regreadExp = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k64BitSize - k16BitSize, PTY_i32)); + } else { + regreadExp = mirBuilder->CreateExprRegread(PTY_u16, pIdxR); + } + + aStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt16()), + 0, addr, regreadExp); + + if (CGOptions::IsBigEndian()) { + shift = mirBuilder->CreateExprBinary(OP_shl, *type, + mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k64BitSize - k16BitSize, PTY_i32)); + } else { + shift = mirBuilder->CreateExprBinary(OP_lshr, *type, + mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k16BitSize, PTY_i32)); + } + + pIdxS = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + StmtNode *sStmp = mirBuilder->CreateStmtRegassign(PTY_u64, pIdxS, shift); + + pIdx1R = pIdx2R = pIdxS; + newBlk.AddStatement(sStmp); + size -= k2ByteSize; + curSize += k2ByteSize; + } else { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_u64); + + if (CGOptions::IsBigEndian()) { + regreadExp = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k64BitSize - k8BitSize, PTY_i32)); + } else { + regreadExp = mirBuilder->CreateExprRegread(PTY_u8, pIdxR); + } + + aStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt8()), + 0, addr, regreadExp); + + if (CGOptions::IsBigEndian()) { + shift = mirBuilder->CreateExprBinary(OP_shl, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k64BitSize - k8BitSize, PTY_i32)); + } else { + shift = mirBuilder->CreateExprBinary(OP_lshr, *type, mirBuilder->CreateExprRegread(PTY_u64, pIdxR), + mirBuilder->CreateIntConst(k8BitSize, PTY_i32)); + } + + pIdxS = GetCurrentFunc()->GetPregTab()->CreatePreg(PTY_u64); + StmtNode *sStmp = mirBuilder->CreateStmtRegassign(PTY_u64, pIdxS, shift); + + pIdx1R = pIdx2R = pIdxS; + newBlk.AddStatement(sStmp); + size -= k1ByteSize; + curSize += k1ByteSize; + } + newBlk.AddStatement(aStmt); + } + } + nextStmt = nextStmt->GetNext(); // skip the dassign + return true; +} + +void CGLowerer::LowerStmt(StmtNode &stmt, BlockNode &newBlk) { + CHECK_FATAL(stmt.GetPrimType() != PTY_ptr, "should have been lowered already"); + CHECK_FATAL(stmt.GetPrimType() != PTY_ref, "should have been lowered already"); + for (size_t i = 0; i < stmt.NumOpnds(); ++i) { + stmt.SetOpnd(LowerExpr(stmt, *stmt.Opnd(i), newBlk), i); + } +} + +void CGLowerer::LowerSwitchOpnd(StmtNode &stmt, BlockNode &newBlk) { + BaseNode *opnd = LowerExpr(stmt, *stmt.Opnd(0), newBlk); + if (CGOptions::GetInstance().GetOptimizeLevel() >= CGOptions::kLevel2 && opnd->GetOpCode() != OP_regread) { + PrimType ptyp = stmt.Opnd(0)->GetPrimType(); + PregIdx pIdx = GetCurrentFunc()->GetPregTab()->CreatePreg(ptyp); + RegassignNode *regAss = mirBuilder->CreateStmtRegassign(ptyp, pIdx, opnd); + newBlk.AddStatement(regAss); + GetCurrentFunc()->SetLastFreqMap(regAss->GetStmtID(), + static_cast(GetCurrentFunc()->GetFreqFromLastStmt(stmt.GetStmtID()))); + stmt.SetOpnd(mirBuilder->CreateExprRegread(ptyp, pIdx), 0); + } else { + stmt.SetOpnd(LowerExpr(stmt, *stmt.Opnd(0), newBlk), 0); + } +} + +void CGLowerer::AddElemToPrintf(MapleVector &argsPrintf, int num, ...) const { + va_list argPtr; + va_start(argPtr, num); + for (int i = 0; i < num; ++i) { + argsPrintf.push_back(va_arg(argPtr, BaseNode*)); + } + va_end(argPtr); +} + +bool CGLowerer::CheckSwitchTableContinuous(SwitchNode &stmt) const { + if (!stmt.GetSwitchTable().empty()) { + stmt.SortCasePair(CasePairKeyLessThan); + if (static_cast(((stmt.GetSwitchTable().end() - 1)->first - stmt.GetSwitchTable().begin()->first) + 1) > + stmt.GetSwitchTable().size()) { + return false; + } + } + return true; +} + +bool CGLowerer::IsSwitchToRangeGoto(const BlockNode &blk) const { + for (const StmtNode *stmt = blk.GetFirst(); stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt->GetOpCode() == OP_rangegoto) { + return true; + } + } + return false; +} + +void CGLowerer::SwitchAssertBoundary(StmtNode &stmt, MapleVector &argsPrintf) { + MIRSymbol *errMsg; + MIRSymbol *fileNameSym; + ConstvalNode *lineNum; + const int callElemNum = 5; + const int returnElemNum = 4; + const int otherElemNum = 3; + fileNameSym = mirBuilder->CreateConstStringSymbol(GetFileNameSymbolName(AssertBoundaryGetFileName(stmt)), + AssertBoundaryGetFileName(stmt)); + lineNum = mirBuilder->CreateIntConst(stmt.GetSrcPos().LineNum(), PTY_u32); + if (kOpcodeInfo.IsAssertLowerBoundary(stmt.GetOpCode())) { + errMsg = mirBuilder->CreateConstStringSymbol(kOpAssertge, + "%s:%d error: the pointer < the lower bounds when accessing the memory!\n"); + AddElemToPrintf(argsPrintf, otherElemNum, mirBuilder->CreateAddrof(*errMsg, PTY_a64), + mirBuilder->CreateAddrof(*fileNameSym, PTY_a64), lineNum); + } else { + if (kOpcodeInfo.IsAssertLeBoundary(stmt.GetOpCode())) { + if (stmt.GetOpCode() == OP_callassertle) { + auto &callStmt = static_cast(stmt); + std::string param; + MIRSymbol *funcName; + MIRSymbol *paramNum; + param = maple::GetNthStr(callStmt.GetParamIndex()); + errMsg = mirBuilder->CreateConstStringSymbol(kOpCallAssertle, + "%s:%d error: the pointer's bounds does not match the function %s declaration for the %s argument!\n"); + funcName = mirBuilder->CreateConstStringSymbol(callStmt.GetFuncName() + kOpCallAssertle, + callStmt.GetFuncName()); + paramNum = mirBuilder->CreateConstStringSymbol(kOpCallAssertle + param, param); + AddElemToPrintf(argsPrintf, callElemNum, mirBuilder->CreateAddrof(*errMsg, PTY_a64), + mirBuilder->CreateAddrof(*fileNameSym, PTY_a64), lineNum, + mirBuilder->CreateAddrof(*funcName, PTY_a64), + mirBuilder->CreateAddrof(*paramNum, PTY_a64)); + } else if (stmt.GetOpCode() == OP_returnassertle) { + auto &callStmt = static_cast(stmt); + MIRSymbol *funcName; + errMsg = mirBuilder->CreateConstStringSymbol(kOpReturnAssertle, + "%s:%d error: return value's bounds does not match the function declaration for %s\n"); + funcName = mirBuilder->CreateConstStringSymbol(callStmt.GetFuncName() + kOpReturnAssertle, + callStmt.GetFuncName()); + AddElemToPrintf(argsPrintf, returnElemNum, mirBuilder->CreateAddrof(*errMsg, PTY_a64), + mirBuilder->CreateAddrof(*fileNameSym, PTY_a64), lineNum, + mirBuilder->CreateAddrof(*funcName, PTY_a64)); + } else { + errMsg = mirBuilder->CreateConstStringSymbol(kOpAssignAssertle, + "%s:%d error: l-value boundary should not be larger than r-value boundary!\n"); + AddElemToPrintf(argsPrintf, otherElemNum, mirBuilder->CreateAddrof(*errMsg, PTY_a64), + mirBuilder->CreateAddrof(*fileNameSym, PTY_a64), lineNum); + } + } else { + errMsg = mirBuilder->CreateConstStringSymbol(kOpAssertlt, + "%s:%d error: the pointer >= the upper bounds when accessing the memory!\n"); + AddElemToPrintf(argsPrintf, otherElemNum, mirBuilder->CreateAddrof(*errMsg, PTY_a64), + mirBuilder->CreateAddrof(*fileNameSym, PTY_a64), lineNum); + } + } +} + +void CGLowerer::LowerAssertBoundary(StmtNode &stmt, BlockNode &block, BlockNode &newBlk, + std::vector &abortNode) { + MIRFunction *curFunc = mirModule.CurFunction(); + BaseNode *op0 = LowerExpr(stmt, *stmt.Opnd(0), block); + BaseNode *op1 = LowerExpr(stmt, *stmt.Opnd(1), block); + LabelIdx labIdx = GetLabelIdx(*curFunc); + LabelNode *labelBC = mirBuilder->CreateStmtLabel(labIdx); + Opcode op = OP_ge; + if (kOpcodeInfo.IsAssertUpperBoundary(stmt.GetOpCode())) { + op = (kOpcodeInfo.IsAssertLeBoundary(stmt.GetOpCode())) ? OP_le : OP_lt; + } + BaseNode *cond = mirBuilder->CreateExprCompare(op, *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetPrimType(op0->GetPrimType()), + op0, op1); + CondGotoNode *brFalseNode = mirBuilder->CreateStmtCondGoto(cond, OP_brfalse, labIdx); + + MIRFunction *printf = mirBuilder->GetOrCreateFunction("printf", TyIdx(PTY_i32)); + printf->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*printf->GetMIRFuncType()); + MapleVector argsPrintf(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + SwitchAssertBoundary(stmt, argsPrintf); + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + StmtNode *callPrintf = mirBuilder->CreateStmtCall(printf->GetPuidx(), argsPrintf); + UnaryStmtNode *abortModeNode = mirBuilder->CreateStmtUnary(OP_abort, nullptr); + + brFalseNode->SetSrcPos(stmt.GetSrcPos()); + labelBC->SetSrcPos(stmt.GetSrcPos()); + callPrintf->SetSrcPos(stmt.GetSrcPos()); + abortModeNode->SetSrcPos(stmt.GetSrcPos()); + + newBlk.AddStatement(brFalseNode); + abortNode.emplace_back(labelBC); + abortNode.emplace_back(callPrintf); + abortNode.emplace_back(abortModeNode); +} + +BlockNode *CGLowerer::LowerBlock(BlockNode &block) { + BlockNode *newBlk = mirModule.CurFuncCodeMemPool()->New(); + BlockNode *tmpBlockNode = nullptr; + std::vector abortNode; + if (block.GetFirst() == nullptr) { + return newBlk; + } + + StmtNode *nextStmt = block.GetFirst(); + do { + StmtNode *stmt = nextStmt; + nextStmt = stmt->GetNext(); + stmt->SetNext(nullptr); + currentBlock = newBlk; + auto lastStmt = newBlk->GetStmtNodes().rbegin(); + + LowerTypePtr(*stmt); + + switch (stmt->GetOpCode()) { + case OP_switch: { + LowerSwitchOpnd(*stmt, *newBlk); + auto switchMp = std::make_unique(memPoolCtrler, "switchlowere"); + MapleAllocator switchAllocator(switchMp.get()); + LabelNode *defaultLabel = nullptr; + LabelIdx newLabelIdx = 0; + if (!CheckSwitchTableContinuous(static_cast(*stmt)) && + static_cast(stmt)->GetDefaultLabel() == 0) { + newLabelIdx = GetLabelIdx(*mirModule.CurFunction()); + defaultLabel = mirBuilder->CreateStmtLabel(newLabelIdx); + } + SwitchLowerer switchLowerer(mirModule, static_cast(*stmt), switchAllocator); + BlockNode *blk = switchLowerer.LowerSwitch(newLabelIdx); + if (blk->GetFirst() != nullptr && defaultLabel != nullptr && IsSwitchToRangeGoto(*blk)) { + blk->AddStatement(defaultLabel); + } + if (blk->GetFirst() != nullptr) { + newBlk->AppendStatementsFromBlock(*blk); + } + needBranchCleanup = true; + break; + } + case OP_block: + tmpBlockNode = LowerBlock(static_cast(*stmt)); + CHECK_FATAL(tmpBlockNode != nullptr, "nullptr is not expected"); + newBlk->AppendStatementsFromBlock(*tmpBlockNode); + break; + case OP_dassign: { + LowerDassign(static_cast(*stmt), *newBlk); + break; + } + case OP_regassign: { + LowerRegassign(static_cast(*stmt), *newBlk); + break; + } + CASE_OP_ASSERT_BOUNDARY { + LowerAssertBoundary(*stmt, block, *newBlk, abortNode); + break; + } + case OP_iassign: { + LowerIassign(static_cast(*stmt), *newBlk); + break; + } + case OP_callassigned: + case OP_icallassigned: + case OP_icallprotoassigned: { + // pass the addr of lvar if this is a struct call assignment + bool lvar = false; + // nextStmt could be changed by the call to LowerStructReturn + if (!LowerStructReturn(*newBlk, stmt, nextStmt, lvar, &block)) { + newBlk->AppendStatementsFromBlock(*LowerCallAssignedStmt(*stmt, lvar)); + } + break; + } + case OP_virtualcallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: + case OP_intrinsiccallwithtypeassigned: + newBlk->AppendStatementsFromBlock(*LowerCallAssignedStmt(*stmt)); + break; + case OP_intrinsiccall: + case OP_call: + case OP_icall: + case OP_icallproto: +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 + // nextStmt could be changed by the call to LowerStructReturn + LowerCallStmt(*stmt, nextStmt, *newBlk); +#else + LowerStmt(*stmt, *newBlk); +#endif + break; + case OP_return: { +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 + if (GetCurrentFunc()->IsFirstArgReturn() && stmt->NumOpnds() > 0) { + newBlk->AppendStatementsFromBlock(*LowerReturnStructUsingFakeParm(static_cast(*stmt))); + } else { +#endif + NaryStmtNode *retNode = static_cast(stmt); + if (retNode->GetNopndSize() == 0) { + newBlk->AddStatement(stmt); + } else { + tmpBlockNode = LowerReturn(*retNode); + CHECK_FATAL(tmpBlockNode != nullptr, "nullptr is not expected"); + newBlk->AppendStatementsFromBlock(*tmpBlockNode); + } +#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64 + } +#endif + break; + } + case OP_comment: + newBlk->AddStatement(stmt); + break; + case OP_try: + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + hasTry = true; + break; + case OP_endtry: + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + break; + case OP_catch: + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + break; + case OP_throw: + if (mirModule.IsJavaModule()) { + if (GenerateExceptionHandlingCode()) { + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + } + } else { + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + } + break; + case OP_syncenter: + case OP_syncexit: { + LowerStmt(*stmt, *newBlk); + StmtNode *tmp = LowerSyncEnterSyncExit(*stmt); + CHECK_FATAL(tmp != nullptr, "nullptr is not expected"); + newBlk->AddStatement(tmp); + break; + } + case OP_decrefreset: { + /* + * only gconly can reach here + * lower stmt (decrefreset (addrof ptr %RegX_RXXXX)) to (dassign %RegX_RXXXX 0 (constval ref 0)) + */ + CHECK_FATAL(CGOptions::IsGCOnly(), "OP_decrefreset is expected only in gconly."); + LowerResetStmt(*stmt, *newBlk); + break; + } + case OP_asm: { + LowerAsmStmt(static_cast(stmt), newBlk); + break; + } + default: + LowerStmt(*stmt, *newBlk); + newBlk->AddStatement(stmt); + break; + } + CHECK_FATAL(beCommon.GetSizeOfTypeSizeTable() == GlobalTables::GetTypeTable().GetTypeTableSize(), "Error!"); + + for (auto itStmt = newBlk->GetStmtNodes().rbegin(); itStmt != lastStmt; ++itStmt) { + if (stmt->GetSrcPos().IsValid()) { + itStmt->SetSrcPos(stmt->GetSrcPos()); + } + } + lastStmt = newBlk->GetStmtNodes().rbegin(); + } while (nextStmt != nullptr); + for (auto node : abortNode) { + newBlk->AddStatement(node); + } + return newBlk; +} + +void CGLowerer::SimplifyBlock(BlockNode &block) const { + if (block.GetFirst() == nullptr) { + return; + } + StmtNode *nextStmt = block.GetFirst(); + do { + StmtNode *stmt = nextStmt; + nextStmt = stmt->GetNext(); + Opcode op = stmt->GetOpCode(); + switch (op) { + case OP_call: { + auto *callStmt = static_cast(stmt); + if (CGOptions::IsDuplicateAsmFileEmpty()) { + break; + } + auto *oldFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callStmt->GetPUIdx()); + if (asmMap.find(oldFunc->GetName()) == asmMap.end()) { + break; + } + auto *newFunc = theMIRModule->GetMIRBuilder()->GetOrCreateFunction(asmMap.at(oldFunc->GetName()), + callStmt->GetTyIdx()); + MIRSymbol *funcSym = newFunc->GetFuncSymbol(); + funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); + callStmt->SetPUIdx(newFunc->GetPuidx()); + break; + } + default: { + break; + } + } + } while (nextStmt != nullptr); + return; +} + +MIRType *CGLowerer::GetArrayNodeType(BaseNode &baseNode) { + MIRType *baseType = nullptr; + auto curFunc = mirModule.CurFunction(); + if (baseNode.GetOpCode() == OP_regread) { + RegreadNode *rrNode = static_cast(&baseNode); + MIRPreg *pReg = curFunc->GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx()); + if (pReg->IsRef()) { + baseType = pReg->GetMIRType(); + } + } + if (baseNode.GetOpCode() == OP_dread) { + DreadNode *dreadNode = static_cast(&baseNode); + MIRSymbol *symbol = curFunc->GetLocalOrGlobalSymbol(dreadNode->GetStIdx()); + baseType = symbol->GetType(); + } + MIRType *arrayElemType = nullptr; + if (baseType != nullptr) { + MIRType *stType = GlobalTables::GetTypeTable().GetTypeFromTyIdx( + static_cast(baseType)->GetPointedTyIdx()); + while (stType->GetKind() == kTypeJArray) { + MIRJarrayType *baseType1 = static_cast(stType); + MIRType *elemType = baseType1->GetElemType(); + if (elemType->GetKind() == kTypePointer) { + const TyIdx &index = static_cast(elemType)->GetPointedTyIdx(); + stType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(index); + } else { + stType = elemType; + } + } + + arrayElemType = stType; + } + return arrayElemType; +} + +void CGLowerer::SplitCallArg(CallNode &callNode, BaseNode *newOpnd, size_t i, BlockNode &newBlk) { + if (newOpnd->GetOpCode() != OP_regread && newOpnd->GetOpCode() != OP_constval && + newOpnd->GetOpCode() != OP_dread && newOpnd->GetOpCode() != OP_addrof && + newOpnd->GetOpCode() != OP_iaddrof && newOpnd->GetOpCode() != OP_constval && + newOpnd->GetOpCode() != OP_conststr && newOpnd->GetOpCode() != OP_conststr16) { + if (CGOptions::GetInstance().GetOptimizeLevel() == CGOptions::kLevel0) { + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(newOpnd->GetPrimType()); + MIRSymbol *ret = CreateNewRetVar(*type, kIntrnRetValPrefix); + DassignNode *dassignNode = mirBuilder->CreateStmtDassign(*ret, 0, newOpnd); + newBlk.AddStatement(dassignNode); + callNode.SetOpnd(mirBuilder->CreateExprDread(*type, 0, *ret), i); + } else { + PregIdx pregIdx = mirModule.CurFunction()->GetPregTab()->CreatePreg(newOpnd->GetPrimType()); + RegassignNode *temp = mirBuilder->CreateStmtRegassign(newOpnd->GetPrimType(), pregIdx, newOpnd); + newBlk.AddStatement(temp); + callNode.SetOpnd(mirBuilder->CreateExprRegread(newOpnd->GetPrimType(), pregIdx), i); + } + } else { + callNode.SetOpnd(newOpnd, i); + } +} + +StmtNode *CGLowerer::LowerCall( + CallNode &callNode, StmtNode *&nextStmt, BlockNode &newBlk, MIRType *retTy, bool uselvar) { + /* + * nextStmt in-out + * call $foo(constval u32 128) + * dassign %jlt (dread agg %%retval) + */ + bool isArrayStore = false; + + if (callNode.GetOpCode() == OP_call) { + MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + if ((calleeFunc->GetName() == "MCC_WriteRefField") && (callNode.Opnd(1)->GetOpCode() == OP_iaddrof)) { + IreadNode *addrExpr = static_cast(callNode.Opnd(1)); + if (addrExpr->Opnd(0)->GetOpCode() == OP_array) { + isArrayStore = true; + } + } + } + + for (size_t i = 0; i < callNode.GetNopndSize(); ++i) { + BaseNode *newOpnd = LowerExpr(callNode, *callNode.GetNopndAt(i), newBlk); +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + callNode.SetOpnd(newOpnd, i); +#else + SplitCallArg(callNode, newOpnd, i, newBlk); +#endif + } + + if (isArrayStore && checkLoadStore) { + bool needCheckStore = true; + MIRType *arrayElemType = GetArrayNodeType(*callNode.Opnd(0)); + MIRType *valueRealType = GetArrayNodeType(*callNode.Opnd(kNodeThirdOpnd)); + if ((arrayElemType != nullptr) && (valueRealType != nullptr) && (arrayElemType->GetKind() == kTypeClass) && + static_cast(arrayElemType)->IsFinal() && (valueRealType->GetKind() == kTypeClass) && + static_cast(valueRealType)->IsFinal() && + valueRealType->GetTypeIndex() == arrayElemType->GetTypeIndex()) { + needCheckStore = false; + } + + if (needCheckStore) { + MIRFunction *fn = mirModule.GetMIRBuilder()->GetOrCreateFunction("MCC_Reflect_Check_Arraystore", TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); + fn->AllocSymTab(); + MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(callNode.Opnd(0)); + args.emplace_back(callNode.Opnd(kNodeThirdOpnd)); + StmtNode *checkStoreStmt = mirModule.GetMIRBuilder()->CreateStmtCall(fn->GetPuidx(), args); + newBlk.AddStatement(checkStoreStmt); + } + } + + DassignNode *dassignNode = nullptr; + if ((nextStmt != nullptr) && (nextStmt->GetOpCode() == OP_dassign)) { + dassignNode = static_cast(nextStmt); + } + + /* if nextStmt is not a dassign stmt, return */ + if (dassignNode == nullptr) { + return &callNode; + } + + if (!uselvar && retTy && beCommon.GetTypeSize(retTy->GetTypeIndex().GetIdx()) <= k16ByteSize) { + /* return structure fitting in one or two regs. */ + return &callNode; + } + + MIRType *retType = nullptr; + if (callNode.op == OP_icall || callNode.op == OP_icallproto) { + if (retTy == nullptr) { + return &callNode; + } else { + retType = retTy; + } + } + + if (retType == nullptr) { + MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + retType = calleeFunc->GetReturnType(); + if (calleeFunc->IsReturnStruct() && (retType->GetPrimType() == PTY_void)) { + MIRPtrType *pretType = static_cast((calleeFunc->GetNthParamType(0))); + CHECK_FATAL(pretType != nullptr, "nullptr is not expected"); + retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pretType->GetPointedTyIdx()); + CHECK_FATAL((retType->GetKind() == kTypeStruct) || (retType->GetKind() == kTypeUnion), + "make sure retType is a struct type"); + } + } + + /* if return type is not of a struct, return */ + if ((retType->GetKind() != kTypeStruct) && (retType->GetKind() != kTypeUnion)) { + return &callNode; + } + + MIRSymbol *dsgnSt = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dassignNode->GetStIdx()); + CHECK_FATAL(dsgnSt->GetType()->IsStructType(), "expects a struct type"); + MIRStructType *structTy = static_cast(dsgnSt->GetType()); + if (structTy == nullptr) { + return &callNode; + } + + RegreadNode *regReadNode = nullptr; + if (dassignNode->Opnd(0)->GetOpCode() == OP_regread) { + regReadNode = static_cast(dassignNode->Opnd(0)); + } + if (regReadNode == nullptr || (regReadNode->GetRegIdx() != -kSregRetval0)) { + return &callNode; + } + + MapleVector newNopnd(mirModule.CurFuncCodeMemPoolAllocator()->Adapter()); + AddrofNode *addrofNode = mirModule.CurFuncCodeMemPool()->New(OP_addrof); + addrofNode->SetPrimType(GetLoweredPtrType()); + addrofNode->SetStIdx(dsgnSt->GetStIdx()); + addrofNode->SetFieldID(0); + + if (callNode.op == OP_icall || callNode.op == OP_icallproto) { + auto ond = callNode.GetNopnd().begin(); + newNopnd.emplace_back(*ond); + newNopnd.emplace_back(addrofNode); + for (++ond; ond != callNode.GetNopnd().end(); ++ond) { + newNopnd.emplace_back(*ond); + } + } else { + newNopnd.emplace_back(addrofNode); + for (auto *opnd : callNode.GetNopnd()) { + newNopnd.emplace_back(opnd); + } + } + + callNode.SetNOpnd(newNopnd); + callNode.SetNumOpnds(static_cast(newNopnd.size())); + CHECK_FATAL(nextStmt != nullptr, "nullptr is not expected"); + nextStmt = nextStmt->GetNext(); + return &callNode; +} + +void CGLowerer::LowerEntry(MIRFunction &func) { + // determine if needed to insert fake parameter to return struct for current function + if (func.IsReturnStruct()) { + MIRType *retType = func.GetReturnType(); +#if TARGAARCH64 + PrimType pty = IsStructElementSame(retType); + if (pty == PTY_f32 || pty == PTY_f64 || IsPrimitiveVector(pty)) { + func.SetStructReturnedInRegs(); + return; + } +#endif + if (retType->GetPrimType() != PTY_agg) { + return; + } + if (retType->GetSize() > k16ByteSize) { + func.SetFirstArgReturn(); + func.GetMIRFuncType()->SetFirstArgReturn(); + } else { + func.SetStructReturnedInRegs(); + } + } + if (func.IsFirstArgReturn() && func.GetReturnType()->GetPrimType() != PTY_void) { + MIRSymbol *retSt = func.GetSymTab()->CreateSymbol(kScopeLocal); + retSt->SetStorageClass(kScFormal); + retSt->SetSKind(kStVar); + std::string retName(".return."); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + retName.append(funcSt->GetName()); + retSt->SetNameStrIdx(retName); + MIRType *pointType = beCommon.BeGetOrCreatePointerType(*func.GetReturnType()); + + retSt->SetTyIdx(pointType->GetTypeIndex()); + std::vector formals; + formals.emplace_back(retSt); + for (uint32 i = 0; i < func.GetFormalCount(); ++i) { + auto formal = func.GetFormal(i); + formals.emplace_back(formal); + } + func.SetFirstArgReturn(); + + beCommon.AddElementToFuncReturnType(func, func.GetReturnTyIdx()); + + func.UpdateFuncTypeAndFormalsAndReturnType(formals, TyIdx(PTY_void), true); + auto *funcType = func.GetMIRFuncType(); + ASSERT(funcType != nullptr, "null ptr check"); + funcType->SetFirstArgReturn(); + beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); + } +} + +void CGLowerer::LowerPseudoRegs(const MIRFunction &func) const { + for (uint32 i = 1; i < func.GetPregTab()->Size(); ++i) { + MIRPreg *ipr = func.GetPregTab()->PregFromPregIdx(i); + PrimType primType = ipr->GetPrimType(); + if ((primType == PTY_ptr) || (primType == PTY_ref)) { + ipr->SetPrimType(GetLoweredPtrType()); + } else if (primType == PTY_u1) { + ipr->SetPrimType(PTY_u32); + } + } +} + +void CGLowerer::CleanupBranches(MIRFunction &func) const { + BlockNode *block = func.GetBody(); + StmtNode *prev = nullptr; + StmtNode *next = nullptr; + for (StmtNode *curr = block->GetFirst(); curr != nullptr; curr = next) { + next = curr->GetNext(); + if (next != nullptr) { + CHECK_FATAL(curr == next->GetPrev(), "unexpected node"); + } + if ((next != nullptr) && (prev != nullptr) && (curr->GetOpCode() == OP_goto)) { + /* + * Skip until find a label. + * Note that the CURRent 'goto' statement may be the last statement + * when discounting comment statements. + * Make sure we don't lose any comments. + */ + StmtNode *cmtB = nullptr; + StmtNode *cmtE = nullptr; + bool isCleanable = true; + while ((next != nullptr) && (next->GetOpCode() != OP_label)) { + if ((next->GetOpCode() == OP_try) || (next->GetOpCode() == OP_endtry) || (next->GetOpCode() == OP_catch)) { + isCleanable = false; + break; + } + next = next->GetNext(); + } + if ((next != nullptr) && (!isCleanable)) { + prev = next->GetPrev(); + continue; + } + + next = curr->GetNext(); + + while ((next != nullptr) && (next->GetOpCode() != OP_label)) { + if (next->GetOpCode() == OP_comment) { + if (cmtB == nullptr) { + cmtB = next; + cmtE = next; + } else { + CHECK_FATAL(cmtE != nullptr, "cmt_e is null in CGLowerer::CleanupBranches"); + cmtE->SetNext(next); + next->SetPrev(cmtE); + cmtE = next; + } + } + next = next->GetNext(); + } + + curr->SetNext(next); + + if (next != nullptr) { + next->SetPrev(curr); + } + + StmtNode *insertAfter = nullptr; + + if ((next != nullptr) && + ((static_cast(curr))->GetOffset() == (static_cast(next))->GetLabelIdx())) { + insertAfter = prev; + prev->SetNext(next); /* skip goto statement (which is pointed by curr) */ + next->SetPrev(prev); + curr = next; /* make curr point to the label statement */ + next = next->GetNext(); /* advance next to the next statement of the label statement */ + } else { + insertAfter = curr; + } + + /* insert comments before 'curr' */ + if (cmtB != nullptr) { + CHECK_FATAL(cmtE != nullptr, "nullptr is not expected"); + StmtNode *iaNext = insertAfter->GetNext(); + if (iaNext != nullptr) { + iaNext->SetPrev(cmtE); + } + cmtE->SetNext(iaNext); + + insertAfter->SetNext(cmtB); + cmtB->SetPrev(insertAfter); + + if (insertAfter == curr) { + curr = cmtE; + } + } + if (next == nullptr) { + func.GetBody()->SetLast(curr); + } + } + prev = curr; + } + CHECK_FATAL(func.GetBody()->GetLast() == prev, "make sure the return value of GetLast equal prev"); +} + +/* + * We want to place catch blocks so that they don't come before any of java trys that refer to them. + * In order to do that, we take advantage of the fact that the mpl. source we get is already flattened and + * no java-try-end-try block is enclosed in any other java-try-end-try block. they appear in the mpl file. + * We process each bb in bbList from the front to the end, and while doing so, we maintain a list of catch blocks + * we have seen. When we get to an end-try block, we examine each catch block label it has (offsets), + * and if we find any catch block in the "seen" list, we move the block after the end-try block. + * Note that we need to find a basic block which does not have 'fallthruBranch' control path. + * (Appending the catch block to any basic block that has the 'fallthruBranch' control path + * will alter the program semantics) + */ +void CGLowerer::LowerTryCatchBlocks(BlockNode &body) { + if (!hasTry) { + return; + } + +#if DEBUG + BBT::ValidateStmtList(nullptr, nullptr); +#endif + auto memPool = std::make_unique(memPoolCtrler, "CreateNewBB mempool"); + TryCatchBlocksLower tryCatchLower(*memPool, body, mirModule); + tryCatchLower.RecoverBasicBlock(); + bool generateEHCode = GenerateExceptionHandlingCode(); + tryCatchLower.SetGenerateEHCode(generateEHCode); + tryCatchLower.TraverseBBList(); +#if DEBUG + tryCatchLower.CheckTryCatchPattern(); +#endif +} + +inline bool IsAccessingTheSameMemoryLocation(const DassignNode &dassign, + const RegreadNode &rRead, const CGLowerer &cgLowerer) { + StIdx stIdx = cgLowerer.GetSymbolReferredToByPseudoRegister(rRead.GetRegIdx()); + return ((dassign.GetStIdx() == stIdx) && (dassign.GetFieldID() == 0)); +} + +inline bool IsAccessingTheSameMemoryLocation(const DassignNode &dassign, const DreadNode &dread) { + return ((dassign.GetStIdx() == dread.GetStIdx()) && (dassign.GetFieldID() == dread.GetFieldID())); +} + +inline bool IsDassignNOP(const DassignNode &dassign) { + if (dassign.GetRHS()->GetOpCode() == OP_dread) { + return IsAccessingTheSameMemoryLocation(dassign, static_cast(*dassign.GetRHS())); + } + return false; +} + +inline bool IsConstvalZero(const BaseNode &n) { + return ((n.GetOpCode() == OP_constval) && static_cast(n).GetConstVal()->IsZero()); +} + +#define NEXT_ID(x) ((x) + 1) +#define INTRN_FIRST_SYNC_ENTER NEXT_ID(INTRN_LAST) +#define INTRN_SECOND_SYNC_ENTER NEXT_ID(INTRN_FIRST_SYNC_ENTER) +#define INTRN_THIRD_SYNC_ENTER NEXT_ID(INTRN_SECOND_SYNC_ENTER) +#define INTRN_FOURTH_SYNC_ENTER NEXT_ID(INTRN_THIRD_SYNC_ENTER) +#define INTRN_YNC_EXIT NEXT_ID(INTRN_FOURTH_SYNC_ENTER) + +std::vector> CGLowerer::builtinFuncIDs; +std::unordered_map CGLowerer::intrinFuncIDs; +std::unordered_map CGLowerer::arrayClassCacheIndex; + +MIRFunction *CGLowerer::RegisterFunctionVoidStarToVoid(BuiltinFunctionID id, const std::string &name, + const std::string ¶mName) { + MIRFunction *func = mirBuilder->GetOrCreateFunction(name, GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex()); + beCommon.UpdateTypeTable(*func->GetMIRFuncType()); + func->AllocSymTab(); + MIRSymbol *funcSym = func->GetFuncSymbol(); + funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); + MIRType *argTy = GlobalTables::GetTypeTable().GetPtr(); + MIRSymbol *argSt = func->GetSymTab()->CreateSymbol(kScopeLocal); + argSt->SetNameStrIdx(mirBuilder->GetOrCreateStringIndex(paramName)); + argSt->SetTyIdx(argTy->GetTypeIndex()); + argSt->SetStorageClass(kScFormal); + argSt->SetSKind(kStVar); + func->GetSymTab()->AddToStringSymbolMap(*argSt); + std::vector formals; + formals.emplace_back(argSt); + if ((name == "MCC_SyncEnterFast0") || (name == "MCC_SyncEnterFast1") || + (name == "MCC_SyncEnterFast2") || (name == "MCC_SyncEnterFast3") || + (name == "MCC_SyncExitFast")) { + MIRSymbol *argStMatch = func->GetSymTab()->CreateSymbol(kScopeLocal); + argStMatch->SetNameStrIdx(mirBuilder->GetOrCreateStringIndex("monitor_slot")); + argStMatch->SetTyIdx(argTy->GetTypeIndex()); + argStMatch->SetStorageClass(kScFormal); + argStMatch->SetSKind(kStVar); + func->GetSymTab()->AddToStringSymbolMap(*argStMatch); + formals.emplace_back(argStMatch); + } + func->UpdateFuncTypeAndFormalsAndReturnType(formals, GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex(), + false); + auto *funcType = func->GetMIRFuncType(); + ASSERT(funcType != nullptr, "null ptr check"); + beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); + + builtinFuncIDs.emplace_back(std::pair(id, func->GetPuidx())); + return func; +} + +void CGLowerer::RegisterBuiltIns() { + for (uint32 i = 0; i < sizeof(cgBuiltins) / sizeof(cgBuiltins[0]); ++i) { + BuiltinFunctionID id = cgBuiltins[i].first; + IntrinDesc &desc = IntrinDesc::intrinTable[id]; + + MIRFunction *func = mirBuilder->GetOrCreateFunction(cgBuiltins[i].second, + GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex()); + beCommon.UpdateTypeTable(*func->GetMIRFuncType()); + func->AllocSymTab(); + MIRSymbol *funcSym = func->GetFuncSymbol(); + funcSym->SetStorageClass(kScExtern); + funcSym->SetAppearsInCode(true); + /* return type */ + MIRType *retTy = desc.GetReturnType(); + CHECK_FATAL(retTy != nullptr, "retTy should not be nullptr"); + /* use void* for PTY_dynany */ + if (retTy->GetPrimType() == PTY_dynany) { + retTy = GlobalTables::GetTypeTable().GetPtr(); + } + + std::vector formals; + const std::string params[IntrinDesc::kMaxArgsNum] = { "p0", "p1", "p2", "p3", "p4", "p5" }; + for (uint32 j = 0; j < IntrinDesc::kMaxArgsNum; ++j) { + MIRType *argTy = desc.GetArgType(j); + if (argTy == nullptr) { + break; + } + /* use void* for PTY_dynany */ + if (argTy->GetPrimType() == PTY_dynany) { + argTy = GlobalTables::GetTypeTable().GetPtr(); + } + MIRSymbol *argSt = func->GetSymTab()->CreateSymbol(kScopeLocal); + argSt->SetNameStrIdx(mirBuilder->GetOrCreateStringIndex(params[j])); + argSt->SetTyIdx(argTy->GetTypeIndex()); + argSt->SetStorageClass(kScFormal); + argSt->SetSKind(kStVar); + func->GetSymTab()->AddToStringSymbolMap(*argSt); + formals.emplace_back(argSt); + } + func->UpdateFuncTypeAndFormalsAndReturnType(formals, retTy->GetTypeIndex(), false); + auto *funcType = func->GetMIRFuncType(); + ASSERT(funcType != nullptr, "null ptr check"); + beCommon.AddTypeSizeAndAlign(funcType->GetTypeIndex(), GetPrimTypeSize(funcType->GetPrimType())); + + builtinFuncIDs.emplace_back(std::pair(id, func->GetPuidx())); + } + + /* register __builtin_sync_enter */ + static_cast(RegisterFunctionVoidStarToVoid(INTRN_FIRST_SYNC_ENTER, "MCC_SyncEnterFast0", "obj")); + static_cast(RegisterFunctionVoidStarToVoid(INTRN_SECOND_SYNC_ENTER, "MCC_SyncEnterFast1", "obj")); + static_cast(RegisterFunctionVoidStarToVoid(INTRN_THIRD_SYNC_ENTER, "MCC_SyncEnterFast2", "obj")); + static_cast(RegisterFunctionVoidStarToVoid(INTRN_FOURTH_SYNC_ENTER, "MCC_SyncEnterFast3", "obj")); + /* register __builtin_sync_exit */ + static_cast(RegisterFunctionVoidStarToVoid(INTRN_YNC_EXIT, "MCC_SyncExitFast", "obj")); +} + +/* + * From Maple IR Document as of Apr 14, 2017 + * Type Conversion Expression Opcodes + * Conversions between integer types of different sizes require the cvt opcode. + * Conversion between signed and unsigned integers of the same size does not + * require any operation, not even retype. + * cvt : + * Convert the operand's value from to . + * If the sizes of the two types are the same, the conversion must involve + * altering the bits. + * retype: + * is converted to which has derived type without + * changing any bits. The size of and must be the same. + * may be of aggregate type. + */ +BaseNode *CGLowerer::MergeToCvtType(PrimType dType, PrimType sType, BaseNode &src) const { + CHECK_FATAL(IsPrimitiveInteger(dType) || IsPrimitiveFloat(dType), + "dtype should be primitiveInteger or primitiveFloat"); + CHECK_FATAL(IsPrimitiveInteger(sType) || IsPrimitiveFloat(sType), + "sType should be primitiveInteger or primitiveFloat"); + /* src i32, dest f32; src i64, dest f64 */ + CHECK_FATAL( + (IsPrimitiveInteger(sType) && IsPrimitiveFloat(dType) && + (GetPrimTypeBitSize(sType) == GetPrimTypeBitSize(dType))) || + (IsPrimitiveInteger(sType) && IsPrimitiveInteger(dType)), + "when sType is primitiveInteger and dType is primitiveFloat, sType's primTypeBitSize must equal dType's," + " or both sType and dType should primitiveInteger"); + + /* src & dest are both of float type */ + MIRType *toType = GlobalTables::GetTypeTable().GetPrimType(dType); + MIRType *fromType = GlobalTables::GetTypeTable().GetPrimType(sType); + if (IsPrimitiveInteger(sType) && IsPrimitiveFloat(dType) && + (GetPrimTypeBitSize(sType) == GetPrimTypeBitSize(dType))) { + return mirBuilder->CreateExprRetype(*toType, *fromType, &src); + } else if (IsPrimitiveInteger(sType) && IsPrimitiveInteger(dType)) { + if (GetPrimTypeBitSize(sType) >= GetPrimTypeBitSize(dType)) { + if (dType == PTY_u1) { /* e.g., type _Bool */ + toType = GlobalTables::GetTypeTable().GetPrimType(PTY_u8); + return mirBuilder->CreateExprCompare(OP_ne, *toType, *fromType, &src, mirBuilder->CreateIntConst(0, sType)); + } else if (GetPrimTypeBitSize(sType) > GetPrimTypeBitSize(dType)) { + return mirBuilder->CreateExprTypeCvt(OP_cvt, *toType, *fromType, &src); + } else if (IsSignedInteger(sType) != IsSignedInteger(dType)) { + return mirBuilder->CreateExprTypeCvt(OP_cvt, *toType, *fromType, &src); + } + src.SetPrimType(dType); + return &src; + /* + * Force type cvt here because we currently do not run constant folding + * or contanst propagation before CG. We may revisit this decision later. + */ + } else if (GetPrimTypeBitSize(sType) < GetPrimTypeBitSize(dType)) { + return mirBuilder->CreateExprTypeCvt(OP_cvt, *toType, *fromType, &src); + } else if (IsConstvalZero(src)) { + return mirBuilder->CreateIntConst(0, dType); + } + CHECK_FATAL(false, "should not run here"); + } + CHECK_FATAL(false, "should not run here"); +} + +IreadNode &CGLowerer::GetLenNode(BaseNode &opnd0) { + MIRIntConst *arrayHeaderNode = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(RTSupport::GetRTSupportInstance().GetArrayLengthOffset()), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(opnd0.GetPrimType())); + BaseNode *arrayHeaderCstNode = mirModule.CurFuncCodeMemPool()->New(arrayHeaderNode); + arrayHeaderCstNode->SetPrimType(opnd0.GetPrimType()); + MIRType *addrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(opnd0.GetPrimType()); + BaseNode *refLenAddr = mirBuilder->CreateExprBinary(OP_add, *addrType, &opnd0, arrayHeaderCstNode); + MIRType *infoLenType = GlobalTables::GetTypeTable().GetInt32(); + MIRType *ptrType = beCommon.BeGetOrCreatePointerType(*infoLenType); + IreadNode *lenNode = mirBuilder->CreateExprIread(*infoLenType, *ptrType, 0, refLenAddr); + return (*lenNode); +} + +LabelIdx CGLowerer::GetLabelIdx(MIRFunction &curFunc) const { + std::string suffix = std::to_string(curFunc.GetLabelTab()->GetLabelTableSize()); + GStrIdx labelStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("__label_BC_" + suffix); + LabelIdx labIdx = curFunc.GetLabelTab()->AddLabel(labelStrIdx); + return labIdx; +} + +void CGLowerer::ProcessArrayExpr(BaseNode &expr, BlockNode &blkNode) { + bool needProcessArrayExpr = !ShouldOptarray() && mirModule.IsJavaModule(); + if (!needProcessArrayExpr) { + return; + } + /* Array boundary check */ + MIRFunction *curFunc = mirModule.CurFunction(); + auto &arrayNode = static_cast(expr); + StmtNode *boundaryCheckStmt = nullptr; + if (arrayNode.GetBoundsCheck()) { + CHECK_FATAL(arrayNode.GetNopndSize() == kOperandNumBinary, "unexpected nOpnd size"); + BaseNode *opnd0 = arrayNode.GetNopndAt(0); + if (opnd0->GetOpCode() == OP_iread) { + PregIdx pregIdx = curFunc->GetPregTab()->CreatePreg(opnd0->GetPrimType()); + RegassignNode *temp = mirBuilder->CreateStmtRegassign(opnd0->GetPrimType(), pregIdx, opnd0); + blkNode.InsertAfter(blkNode.GetLast(), temp); + arrayNode.SetNOpndAt(0, mirBuilder->CreateExprRegread(opnd0->GetPrimType(), pregIdx)); + } + IreadNode &lenNode = GetLenNode(*opnd0); + PregIdx lenPregIdx = curFunc->GetPregTab()->CreatePreg(lenNode.GetPrimType()); + RegassignNode *lenRegassignNode = mirBuilder->CreateStmtRegassign(lenNode.GetPrimType(), lenPregIdx, &lenNode); + BaseNode *lenRegreadNode = mirBuilder->CreateExprRegread(PTY_u32, lenPregIdx); + + LabelIdx labIdx = GetLabelIdx(*curFunc); + LabelNode *labelBC = mirBuilder->CreateStmtLabel(labIdx); + BaseNode *cond = mirBuilder->CreateExprCompare(OP_ge, *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetUInt32(), + arrayNode.GetNopndAt(1), lenRegreadNode); + CondGotoNode *brFalseNode = mirBuilder->CreateStmtCondGoto(cond, OP_brfalse, labIdx); + MIRFunction *fn = mirBuilder->GetOrCreateFunction("MCC_Array_Boundary_Check", TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); + fn->AllocSymTab(); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(arrayNode.GetNopndAt(0)); + args.emplace_back(arrayNode.GetNopndAt(1)); + boundaryCheckStmt = mirBuilder->CreateStmtCall(fn->GetPuidx(), args); + blkNode.InsertAfter(blkNode.GetLast(), lenRegassignNode); + blkNode.InsertAfter(blkNode.GetLast(), brFalseNode); + blkNode.InsertAfter(blkNode.GetLast(), boundaryCheckStmt); + blkNode.InsertAfter(blkNode.GetLast(), labelBC); + } +} + +BaseNode *CGLowerer::LowerExpr(BaseNode &parent, BaseNode &expr, BlockNode &blkNode) { + bool isCvtU1Expr = (expr.GetOpCode() == OP_cvt && expr.GetPrimType() == PTY_u1 && + static_cast(expr).FromType() != PTY_u1); + if (expr.GetPrimType() == PTY_u1) { +#if TARGAARCH64 + expr.SetPrimType(PTY_i32); +#elif TARGX86_64 + expr.SetPrimType(PTY_u8); +#else + CHECK_FATAL(false, "target not supported"); +#endif + } + if (expr.GetOpCode() == OP_intrinsicopwithtype) { + return LowerIntrinsicopwithtype(parent, static_cast(expr), blkNode); + } + + LowerTypePtr(expr); + + if (expr.GetOpCode() == OP_iread && expr.Opnd(0)->GetOpCode() == OP_array) { + /* iread ptr <* <$MUIDDataDefTabEntry>> 1 ( + * array 0 ptr <* <[5] <$MUIDDataDefTabEntry>>> (addrof ... + * ==> + * intrinsicop a64 MPL_READ_STATIC_OFFSET_TAB (addrof .. + */ + BaseNode *node = LowerExpr(expr, *expr.Opnd(0), blkNode); + if (node->GetOpCode() == OP_intrinsicop) { + auto *binNode = static_cast(node); + CHECK_FATAL(binNode->GetIntrinsic() == INTRN_MPL_READ_STATIC_OFFSET_TAB, "Something wrong here"); + return binNode; + } else { + expr.SetOpnd(node, 0); + } + } else { + for (size_t i = 0; i < expr.NumOpnds(); ++i) { + expr.SetOpnd(LowerExpr(expr, *expr.Opnd(i), blkNode), i); + } + } + // Convert `cvt u1 xx ` to `ne u8 xx (, constval xx 0)` + // No need to convert `cvt u1 u1 ` + if (isCvtU1Expr) { + auto &cvtExpr = static_cast(expr); + PrimType fromType = cvtExpr.FromType(); + auto *fromMIRType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(fromType)); + // We use u8 instead of u1 because codegen can't recognize u1 + auto *toMIRType = GlobalTables::GetTypeTable().GetUInt8(); + auto *zero = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *fromMIRType); + auto *converted = mirBuilder->CreateExprCompare(OP_ne, *toMIRType, *fromMIRType, cvtExpr.Opnd(0), + mirBuilder->CreateConstval(zero)); + return converted; + } + switch (expr.GetOpCode()) { + case OP_array: { + ProcessArrayExpr(expr, blkNode); + if (!mirModule.IsCModule()) { + return LowerArray(static_cast(expr), parent); + } else { + return LowerCArray(static_cast(expr)); + } + } + + case OP_dread: + return LowerDread(static_cast(expr)); + + case OP_addrof: + return LowerAddrof(static_cast(expr)); + + case OP_iread: + return LowerIread(static_cast(expr)); + + case OP_iaddrof: + return LowerIaddrof(static_cast(expr)); + + case OP_select: + if (IsComplexSelect(static_cast(expr))) { + return LowerComplexSelect(static_cast(expr), parent, blkNode); + } else if (mirModule.GetFlavor() != kFlavorLmbc) { + return SplitTernaryNodeResult(static_cast(expr), parent, blkNode); + } else { + return &expr; + } + + case OP_sizeoftype: { + CHECK(static_cast(expr).GetTyIdx() < beCommon.GetSizeOfTypeSizeTable(), + "index out of range in CGLowerer::LowerExpr"); + int64 typeSize = beCommon.GetTypeSize(static_cast(expr).GetTyIdx()); + return mirModule.GetMIRBuilder()->CreateIntConst(static_cast(typeSize), PTY_u32); + } + + case OP_fieldsdist: { + auto &fdNode = static_cast(expr); + CHECK(fdNode.GetTyIdx() < beCommon.GetSizeOfTypeSizeTable(), + "index out of range in CGLowerer::LowerExpr"); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fdNode.GetTyIdx()); + CHECK(ty->GetKind() == kTypeClass, "wrong type for FieldsDistNode"); + MIRClassType *classType = static_cast(ty); + const JClassLayout &layout = beCommon.GetJClassLayout(*classType); + ASSERT(!layout.empty(), "container should not be empty"); + int32 i1 = fdNode.GetFiledID1() > 0 ? fdNode.GetFiledID1() - 1 : 0; + int32 i2 = fdNode.GetFiledID2() > 0 ? fdNode.GetFiledID2() - 1 : 0; + int64 offset = layout[i2].GetOffset() - layout[i1].GetOffset(); + return mirModule.GetMIRBuilder()->CreateIntConst(static_cast(offset), PTY_u32); + } + + case OP_intrinsicop: + if (IsIntrinsicOpHandledAtLowerLevel(static_cast(expr).GetIntrinsic())) { + return &expr; + } + return LowerIntrinsicop(parent, static_cast(expr), blkNode); + + case OP_alloca: { + GetCurrentFunc()->SetVlaOrAlloca(true); + return &expr; + } + case OP_rem: + return LowerRem(expr, blkNode); + + case OP_cand: + expr.SetOpCode(OP_land); + return SplitBinaryNodeOpnd1(static_cast(expr), blkNode); + case OP_cior: + expr.SetOpCode(OP_lior); + return SplitBinaryNodeOpnd1(static_cast(expr), blkNode); + case OP_cvt: + case OP_retype: + case OP_zext: + case OP_sext: + return LowerCastExpr(expr); + case OP_extractbits: + return LowerExtractBits(static_cast(expr)); + default: + return &expr; + } +} + +BaseNode *CGLowerer::LowerDread(DreadNode &dread) { + /* use PTY_u8 for boolean type in dread/iread */ + if (dread.GetPrimType() == PTY_u1) { + dread.SetPrimType(PTY_u8); + } + auto *result = LowerDreadToThreadLocal(dread); + if (result->GetOpCode() == OP_iread) { + return LowerIread(static_cast(*result)); + } else if (dread.GetFieldID() != 0) { + return LowerDreadBitfield(static_cast(*result)); + } + return result; +} + +void CGLowerer::LowerRegassign(RegassignNode ®Node, BlockNode &newBlk) { + CHECK_FATAL(regNode.GetPrimType() != PTY_ptr, "should have been lowered already"); + CHECK_FATAL(regNode.GetPrimType() != PTY_ref, "should have been lowered already"); + BaseNode *rhsOpnd = regNode.Opnd(0); + Opcode op = rhsOpnd->GetOpCode(); + if ((op == OP_gcmalloc) || (op == OP_gcpermalloc)) { + LowerGCMalloc(regNode, static_cast(*rhsOpnd), newBlk, op == OP_gcpermalloc); + return; + } else if ((op == OP_gcmallocjarray) || (op == OP_gcpermallocjarray)) { + LowerJarrayMalloc(regNode, static_cast(*rhsOpnd), newBlk, op == OP_gcpermallocjarray); + return; + } else { + regNode.SetOpnd(LowerExpr(regNode, *rhsOpnd, newBlk), 0); + newBlk.AddStatement(®Node); + } +} + +BaseNode *CGLowerer::ExtractSymbolAddress(const StIdx &stIdx) { + auto builder = mirModule.GetMIRBuilder(); + return builder->CreateExprAddrof(0, stIdx); +} + +BaseNode *CGLowerer::LowerDreadToThreadLocal(BaseNode &expr) { + auto *result = &expr; + if (expr.GetOpCode() != maple::OP_dread) { + return result; + } + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + auto dread = static_cast(expr); + StIdx stIdx = dread.GetStIdx(); + MIRSymbol *symbol = stIdx.IsGlobal() ? + GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()) : + GetCurrentFunc()->GetSymbolTabItem(stIdx.Idx()); + ASSERT(symbol != nullptr, "symbol should not be nullptr"); + if (symbol->IsThreadLocal()) { + // iread <* u32> 0 (regread u64 %addr) + auto addr = ExtractSymbolAddress(stIdx); + auto fieldId = dread.GetFieldID(); + auto symbolType = symbol->GetType(); + auto readType = fieldId == 0 ? symbolType : static_cast(symbolType)->GetFieldType(fieldId); + auto ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*symbolType); + auto iread = mirModule.GetMIRBuilder()->CreateExprIread(*readType, *ptrType, fieldId, addr); + result = iread; + } + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + return result; +} + +StmtNode *CGLowerer::LowerDassignToThreadLocal(StmtNode &stmt) { + StmtNode *result = &stmt; + if (stmt.GetOpCode() != maple::OP_dassign) { + return result; + } + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + auto dAssign = static_cast(stmt); + StIdx stIdx = dAssign.GetStIdx(); + if (!stIdx.IsGlobal()) { + return result; + } + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + ASSERT(symbol != nullptr, "symbol should not be nullptr"); + if (symbol->IsThreadLocal()) { + // iassign <* u32> 0 (regread u64 %addr, dread u32 $x) + auto addr = ExtractSymbolAddress(stIdx); + auto ptrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(*symbol->GetType()); + auto iassign = mirModule.GetMIRBuilder()->CreateStmtIassign(*ptrType, dAssign.GetFieldID(), addr, dAssign.GetRHS()); + result = iassign; + } + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + return result; +} + +void CGLowerer::LowerDassign(DassignNode &dsNode, BlockNode &newBlk) { + StmtNode *newStmt = nullptr; + BaseNode *rhs = nullptr; + Opcode op = dsNode.GetRHS()->GetOpCode(); + if (dsNode.GetFieldID() != 0) { + newStmt = LowerDassignBitfield(dsNode, newBlk); + } else if (op == OP_intrinsicop) { + IntrinsicopNode *intrinNode = static_cast(dsNode.GetRHS()); + MIRType *retType = IntrinDesc::intrinTable[intrinNode->GetIntrinsic()].GetReturnType(); + CHECK_FATAL(retType != nullptr, "retType should not be nullptr"); + if (retType->GetKind() == kTypeStruct) { + newStmt = LowerIntrinsicopDassign(dsNode, *intrinNode, newBlk); + } else { + rhs = LowerExpr(dsNode, *intrinNode, newBlk); + dsNode.SetRHS(rhs); + CHECK_FATAL(dsNode.GetRHS() != nullptr, "dsNode->rhs is null in CGLowerer::LowerDassign"); + if (!IsDassignNOP(dsNode)) { + newStmt = &dsNode; + } + } + } else if ((op == OP_gcmalloc) || (op == OP_gcpermalloc)) { + LowerGCMalloc(dsNode, static_cast(*dsNode.GetRHS()), newBlk, op == OP_gcpermalloc); + return; + } else if ((op == OP_gcmallocjarray) || (op == OP_gcpermallocjarray)) { + LowerJarrayMalloc(dsNode, static_cast(*dsNode.GetRHS()), newBlk, op == OP_gcpermallocjarray); + return; + } else { + rhs = LowerExpr(dsNode, *dsNode.GetRHS(), newBlk); + dsNode.SetRHS(rhs); + newStmt = &dsNode; + } + + if (newStmt != nullptr) { + newBlk.AddStatement(LowerDassignToThreadLocal(*newStmt)); + } +} + +// Lower stmt Form +// Initial form: decrefreset (addrof ptr %RegX_RXXXX) +// Convert to form: dassign %RegX_RXXXX 0 (constval ref 0) +// Final form: str xzr, [x29,#XX] +void CGLowerer::LowerResetStmt(StmtNode &stmt, BlockNode &block) { + UnaryStmtNode &unaryStmtNode = static_cast(stmt); + AddrofNode *addrofNode = static_cast(unaryStmtNode.GetRHS()); + MIRType &type = *GlobalTables::GetTypeTable().GetPrimType(PTY_ref); + MIRConst *constVal = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, type); + ConstvalNode *exprConst = mirModule.CurFuncCodeMemPool()->New(); + exprConst->SetPrimType(type.GetPrimType()); + exprConst->SetConstVal(constVal); + DassignNode *dassignNode = mirModule.CurFuncCodeMemPool()->New(); + dassignNode->SetStIdx(addrofNode->GetStIdx()); + dassignNode->SetRHS(exprConst); + dassignNode->SetFieldID(addrofNode->GetFieldID()); + block.AddStatement(dassignNode); +} + +StmtNode *CGLowerer::LowerIntrinsicopDassign(const DassignNode &dsNode, + IntrinsicopNode &intrinNode, BlockNode &newBlk) { + for (size_t i = 0; i < intrinNode.GetNumOpnds(); ++i) { + ASSERT(intrinNode.Opnd(i) != nullptr, "intrinNode.Opnd(i) should not be nullptr"); + intrinNode.SetOpnd(LowerExpr(intrinNode, *intrinNode.Opnd(i), newBlk), i); + } + MIRIntrinsicID intrnID = intrinNode.GetIntrinsic(); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + const std::string name = intrinDesc->name; + CHECK_FATAL(intrinDesc->name != nullptr, "intrinDesc's name should not be nullptr"); + st->SetNameStrIdx(name); + st->SetStorageClass(kScText); + st->SetSKind(kStFunc); + MIRFunction *fn = mirModule.GetMemPool()->New(&mirModule, st->GetStIdx()); + MapleVector &nOpnds = intrinNode.GetNopnd(); + st->SetFunction(fn); + std::vector fnTyVec; + std::vector fnTaVec; + CHECK_FATAL(intrinDesc->IsJsOp(), "intrinDesc should be JsOp"); + /* setup parameters */ + for (uint32 i = 0; i < nOpnds.size(); ++i) { + fnTyVec.emplace_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); + BaseNode *addrNode = beCommon.GetAddressOfNode(*nOpnds[i]); + CHECK_FATAL(addrNode != nullptr, "addrNode should not be nullptr"); + nOpnds[i] = addrNode; + } + MIRSymbol *dst = mirModule.CurFunction()->GetLocalOrGlobalSymbol(dsNode.GetStIdx()); + MIRType *ty = dst->GetType(); + MIRType *fnType = beCommon.BeGetOrCreateFunctionType(ty->GetTypeIndex(), fnTyVec, fnTaVec); + st->SetTyIdx(fnType->GetTypeIndex()); + fn->SetMIRFuncType(static_cast(fnType)); + fn->SetReturnTyIdx(ty->GetTypeIndex()); + CHECK_FATAL(ty->GetKind() == kTypeStruct, "ty's kind should be struct type"); + CHECK_FATAL(dsNode.GetFieldID() == 0, "dsNode's filedId should equal"); + AddrofNode *addrofNode = mirBuilder->CreateAddrof(*dst, PTY_a32); + MapleVector newOpnd(mirModule.CurFuncCodeMemPoolAllocator()->Adapter()); + newOpnd.emplace_back(addrofNode); + (void)newOpnd.insert(newOpnd.cend(), nOpnds.cbegin(), nOpnds.cend()); + CallNode *callStmt = mirModule.CurFuncCodeMemPool()->New(mirModule, OP_call); + callStmt->SetPUIdx(st->GetFunction()->GetPuidx()); + callStmt->SetNOpnd(newOpnd); + return callStmt; +} + +/* From maple_ir/include/dex2mpl/dexintrinsic.def + * JAVA_ARRAY_LENGTH + * JAVA_ARRAY_FILL + * JAVA_FILL_NEW_ARRAY + * JAVA_CHECK_CAST + * JAVA_CONST_CLASS + * JAVA_INSTANCE_OF + * JAVA_MERGE + * JAVA_RANDOM + * #if DEXHACK + * JAVA_PRINTLN + * #endif + * INTRN_<> + * intrinsic + */ +BaseNode *CGLowerer::LowerJavascriptIntrinsicop(IntrinsicopNode &intrinNode, const IntrinDesc &desc) { + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + CHECK_FATAL(desc.name != nullptr, "desc's name should not be nullptr"); + const std::string name = desc.name; + st->SetNameStrIdx(name); + st->SetStorageClass(kScText); + st->SetSKind(kStFunc); + MIRFunction *fn = mirModule.GetMemPool()->New(&mirModule, st->GetStIdx()); + MapleVector &nOpnds = intrinNode.GetNopnd(); + st->SetFunction(fn); + std::vector fnTyVec; + std::vector fnTaVec; + CHECK_FATAL(desc.IsJsOp(), "desc should be jsOp"); + /* setup parameters */ + for (uint32 i = 0; i < nOpnds.size(); ++i) { + fnTyVec.emplace_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); + BaseNode *addrNode = beCommon.GetAddressOfNode(*nOpnds[i]); + CHECK_FATAL(addrNode != nullptr, "can not get address"); + nOpnds[i] = addrNode; + } + + MIRType *retType = desc.GetReturnType(); + CHECK_FATAL(retType != nullptr, "retType should not be nullptr"); + if (retType->GetKind() == kTypeStruct) { + /* create a local symbol and dread it; */ + std::string tmpstr("__ret_struct_tmp_st"); + static uint32 tmpIdx = 0; + tmpstr.append(std::to_string(tmpIdx++)); + MIRSymbol *tmpSt = mirBuilder->GetOrCreateDeclInFunc(tmpstr, *retType, *mirModule.CurFunction()); + MIRType *fnType = beCommon.BeGetOrCreateFunctionType(retType->GetTypeIndex(), fnTyVec, fnTaVec); + st->SetTyIdx(fnType->GetTypeIndex()); + fn->SetMIRFuncType(static_cast(fnType)); + AddrofNode *addrofNode = mirBuilder->CreateAddrof(*tmpSt, PTY_a32); + MapleVector newOpnd(mirModule.CurFuncCodeMemPoolAllocator()->Adapter()); + newOpnd.emplace_back(addrofNode); + (void)newOpnd.insert(newOpnd.cend(), nOpnds.cbegin(), nOpnds.cend()); + CallNode *callStmt = mirModule.CurFuncCodeMemPool()->New(mirModule, OP_call); + callStmt->SetPUIdx(st->GetFunction()->GetPuidx()); + callStmt->SetNOpnd(newOpnd); + currentBlock->AddStatement(callStmt); + /* return the dread */ + AddrofNode *drRetSt = mirBuilder->CreateDread(*tmpSt, PTY_agg); + return drRetSt; + } + CHECK_FATAL(st->GetStIdx().FullIdx() != 0, "the fullIdx of st's stIdx should not equal 0"); + CallNode *callStmt = static_cast(mirBuilder->CreateStmtCall(st->GetStIdx().FullIdx(), nOpnds)); + currentBlock->AddStatement(callStmt); + PrimType promotedPrimType = intrinNode.GetPrimType() == PTY_u1 ? PTY_u32 : intrinNode.GetPrimType(); + BaseNode *drRetSt = mirBuilder->CreateExprRegread(promotedPrimType, -kSregRetval0); + /* + * for safty dassign the return value to a register and return the dread to that register + * to avoid such code: + * call $__js_int32 (addrof ptr %temp_var_8 0) + * call $__jsop_getelem (addrof a32 %temp_var_9 0, addrof a32 $arr 0, dread i32 %%retval 0) + * for many target, the first actual parameter and return value would use R0, which would cause the above + * case fail + */ + PregIdx tmpRegIdx = GetCurrentFunc()->GetPregTab()->CreatePreg(promotedPrimType); + RegassignNode *dstoReg = mirBuilder->CreateStmtRegassign(promotedPrimType, tmpRegIdx, drRetSt); + currentBlock->AddStatement(dstoReg); + RegreadNode *outDsNode = mirBuilder->CreateExprRegread(promotedPrimType, tmpRegIdx); + return outDsNode; +} + +StmtNode *CGLowerer::CreateStmtCallWithReturnValue(const IntrinsicopNode &intrinNode, const MIRSymbol &ret, + PUIdx bFunc, BaseNode *extraInfo) const { + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + for (size_t i = 0; i < intrinNode.NumOpnds(); ++i) { + args.emplace_back(intrinNode.Opnd(i)); + } + if (extraInfo != nullptr) { + args.emplace_back(extraInfo); + } + return mirBuilder->CreateStmtCallAssigned(bFunc, args, &ret, OP_callassigned); +} + +StmtNode *CGLowerer::CreateStmtCallWithReturnValue(const IntrinsicopNode &intrinNode, PregIdx retPregIdx, PUIdx bFunc, + BaseNode *extraInfo) const { + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + for (size_t i = 0; i < intrinNode.NumOpnds(); ++i) { + args.emplace_back(intrinNode.Opnd(i)); + } + if (extraInfo != nullptr) { + args.emplace_back(extraInfo); + } + return mirBuilder->CreateStmtCallRegassigned(bFunc, args, retPregIdx, OP_callassigned); +} + +BaseNode *CGLowerer::LowerIntrinJavaMerge(const BaseNode &parent, IntrinsicopNode &intrinNode) const { + BaseNode *resNode = &intrinNode; + CHECK_FATAL(intrinNode.GetNumOpnds() > 0, "invalid JAVA_MERGE intrinsic node"); + BaseNode *candidate = intrinNode.Opnd(0); + ASSERT(candidate != nullptr, "candidate should not be nullptr"); + resNode = candidate; + if (parent.GetOpCode() == OP_regassign) { + PrimType sTyp = resNode->GetPrimType(); + auto ®Assign = static_cast(parent); + PrimType pType = GetCurrentFunc()->GetPregTab()->PregFromPregIdx(regAssign.GetRegIdx())->GetPrimType(); + if (sTyp != pType) { + resNode = MergeToCvtType(pType, sTyp, *resNode); + } + return resNode; + } + if (parent.GetOpCode() == OP_dassign) { + auto &dassign = static_cast(parent); + if (candidate->GetOpCode() == OP_constval) { + MIRSymbol *dest = GetCurrentFunc()->GetLocalOrGlobalSymbol(dassign.GetStIdx()); + MIRType *toType = dest->GetType(); + PrimType dTyp = toType->GetPrimType(); + PrimType sTyp = resNode->GetPrimType(); + if (dTyp != sTyp) { + resNode = MergeToCvtType(dTyp, sTyp, *resNode); + } + return resNode; + } + CHECK_FATAL((candidate->GetOpCode() == OP_dread) || (candidate->GetOpCode() == OP_regread), + "candidate's opcode should be OP_dread or OP_regread"); + bool differentLocation = + (candidate->GetOpCode() == OP_dread) + ? !IsAccessingTheSameMemoryLocation(dassign, static_cast(*candidate)) + : !IsAccessingTheSameMemoryLocation(dassign, static_cast(*candidate), *this); + if (differentLocation) { + bool simpleMove = false; + /* res_node already contains the 0-th operand. */ + for (size_t i = 1; i < intrinNode.GetNumOpnds(); ++i) { + candidate = intrinNode.Opnd(i); + ASSERT(candidate != nullptr, "candidate should not be nullptr"); + bool sameLocation = + (candidate->GetOpCode() == OP_dread) + ? IsAccessingTheSameMemoryLocation(dassign, static_cast(*candidate)) + : IsAccessingTheSameMemoryLocation(dassign, static_cast(*candidate), *this); + if (sameLocation) { + simpleMove = true; + resNode = candidate; + break; + } + } + if (!simpleMove) { + /* if source and destination types don't match, insert 'retype' */ + MIRSymbol *dest = GetCurrentFunc()->GetLocalOrGlobalSymbol(dassign.GetStIdx()); + MIRType *toType = dest->GetType(); + PrimType dTyp = toType->GetPrimType(); + CHECK_FATAL((dTyp != PTY_agg) && (dassign.GetFieldID() <= 0), + "dType should not be PTY_agg and dassign's filedId <= 0"); + PrimType sType = resNode->GetPrimType(); + if (dTyp != sType) { + resNode = MergeToCvtType(dTyp, sType, *resNode); + } + } + } + return resNode; + } + CHECK_FATAL(false, "should not run here"); + return resNode; +} + +BaseNode *CGLowerer::LowerIntrinJavaArrayLength(const BaseNode &parent, IntrinsicopNode &intrinNode) { + BaseNode *resNode = &intrinNode; + PUIdx bFunc = GetBuiltinToUse(intrinNode.GetIntrinsic()); + CHECK_FATAL(bFunc != kFuncNotFound, "bFunc should not be kFuncNotFound"); + MIRFunction *biFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(bFunc); + + BaseNode *arrAddr = intrinNode.Opnd(0); + ASSERT(arrAddr != nullptr, "arrAddr should not be nullptr"); + if (((arrAddr->GetPrimType() == PTY_a64) || (arrAddr->GetPrimType() == PTY_ref)) && + ((parent.GetOpCode() == OP_regassign) || (parent.GetOpCode() == OP_dassign) || (parent.GetOpCode() == OP_ge))) { + MIRType *addrType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(arrAddr->GetPrimType())); + MIRIntConst *arrayHeaderNode = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(RTSupport::GetRTSupportInstance().GetArrayLengthOffset()), *addrType); + BaseNode *arrayHeaderCstNode = mirModule.CurFuncCodeMemPool()->New(arrayHeaderNode); + arrayHeaderCstNode->SetPrimType(arrAddr->GetPrimType()); + + BaseNode *refLenAddr = mirBuilder->CreateExprBinary(OP_add, *addrType, arrAddr, arrayHeaderCstNode); + MIRType *infoLenType = GlobalTables::GetTypeTable().GetInt32(); + MIRType *ptrType = beCommon.BeGetOrCreatePointerType(*infoLenType); + resNode = mirBuilder->CreateExprIread(*infoLenType, *ptrType, 0, refLenAddr); + auto curFunc = mirModule.CurFunction(); + std::string suffix = std::to_string(curFunc->GetLabelTab()->GetLabelTableSize()); + GStrIdx labelStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("__label_nonnull_" + suffix); + LabelIdx labIdx = curFunc->GetLabelTab()->AddLabel(labelStrIdx); + LabelNode *labelNonNull = mirBuilder->CreateStmtLabel(labIdx); + + BaseNode *cond = mirBuilder->CreateExprCompare(OP_ne, + *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetRef(), arrAddr, + mirBuilder->CreateIntConst(0, PTY_ref)); + CondGotoNode *brtureNode = mirBuilder->CreateStmtCondGoto(cond, OP_brtrue, labIdx); + + MIRFunction *newFunc = + mirBuilder->GetOrCreateFunction("MCC_ThrowNullArrayNullPointerException", + GlobalTables::GetTypeTable().GetVoid()->GetTypeIndex()); + newFunc->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*newFunc->GetMIRFuncType()); + newFunc->AllocSymTab(); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + StmtNode *call = mirBuilder->CreateStmtCallAssigned(newFunc->GetPuidx(), args, nullptr, OP_callassigned); + + currentBlock->AddStatement(brtureNode); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*call)); + currentBlock->AddStatement(labelNonNull); + return resNode; + } + + if (parent.GetOpCode() == OP_regassign) { + auto ®Assign = static_cast(parent); + StmtNode *biCall = CreateStmtCallWithReturnValue(intrinNode, regAssign.GetRegIdx(), bFunc); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*biCall)); + PrimType pType = GetCurrentFunc()->GetPregTab()->PregFromPregIdx(regAssign.GetRegIdx())->GetPrimType(); + resNode = mirBuilder->CreateExprRegread(pType, regAssign.GetRegIdx()); + return resNode; + } + + if (parent.GetOpCode() == OP_dassign) { + auto &dassign = static_cast(parent); + MIRSymbol *ret = GetCurrentFunc()->GetLocalOrGlobalSymbol(dassign.GetStIdx()); + StmtNode *biCall = CreateStmtCallWithReturnValue(intrinNode, *ret, bFunc); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*biCall)); + resNode = mirBuilder->CreateExprDread(*biFunc->GetReturnType(), 0, *ret); + return resNode; + } + CHECK_FATAL(false, "should not run here"); + return resNode; +} + +BaseNode *CGLowerer::LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &intrinNode) { + BaseNode *resNode = &intrinNode; + if (intrinNode.GetIntrinsic() == INTRN_JAVA_MERGE) { + resNode = LowerIntrinJavaMerge(parent, intrinNode); + } else if (intrinNode.GetIntrinsic() == INTRN_JAVA_ARRAY_LENGTH) { + resNode = LowerIntrinJavaArrayLength(parent, intrinNode); + } + + return resNode; +} + +void CGLowerer::ProcessClassInfo(MIRType &classType, bool &classInfoFromRt, std::string &classInfo) const { + MIRPtrType &ptrType = static_cast(classType); + MIRType *pType = ptrType.GetPointedType(); + CHECK_FATAL(pType != nullptr, "Class type not found for INTRN_JAVA_CONST_CLASS"); + MIRType *typeScalar = nullptr; + + if (pType->GetKind() == kTypeScalar) { + typeScalar = pType; + } else if (classType.GetKind() == kTypeScalar) { + typeScalar = &classType; + } + if (typeScalar != nullptr) { + std::string eName(GetPrimTypeJavaName(typeScalar->GetPrimType())); + classInfo = PRIMITIVECLASSINFO_PREFIX_STR + eName; + } + if ((pType->GetKind() == kTypeByName) || (pType->GetKind() == kTypeClass) || (pType->GetKind() == kTypeInterface)) { + MIRStructType *classTypeSecond = static_cast(pType); + classInfo = CLASSINFO_PREFIX_STR + classTypeSecond->GetName(); + } else if ((pType->GetKind() == kTypeArray) || (pType->GetKind() == kTypeJArray)) { + MIRJarrayType *jarrayType = static_cast(pType); + CHECK_FATAL(jarrayType != nullptr, "jarrayType is null in CGLowerer::LowerIntrinsicopWithType"); + std::string baseName = jarrayType->GetJavaName(); + if (jarrayType->IsPrimitiveArray() && (jarrayType->GetDim() <= kThreeDimArray)) { + classInfo = PRIMITIVECLASSINFO_PREFIX_STR + baseName; + } else if (arrayNameForLower::kArrayBaseName.find(baseName) != arrayNameForLower::kArrayBaseName.end()) { + classInfo = CLASSINFO_PREFIX_STR + baseName; + } else { + classInfoFromRt = true; + classInfo = baseName; + } + } +} + +BaseNode *CGLowerer::GetBaseNodeFromCurFunc(MIRFunction &curFunc, bool isFromJarray) { + BaseNode *baseNode = nullptr; + if (curFunc.IsStatic()) { + /* + * it's a static function. + * pass caller functions's classinfo directly + */ + std::string callerName = CLASSINFO_PREFIX_STR; + callerName += mirModule.CurFunction()->GetBaseClassName(); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(callerName); + MIRSymbol *callerClassInfoSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (callerClassInfoSym == nullptr) { + if (isFromJarray) { + MIRType *mType = GlobalTables::GetTypeTable().GetVoidPtr(); + CHECK_FATAL(mType != nullptr, "type is null in CGLowerer::LowerJarrayMalloc"); + callerClassInfoSym = mirBuilder->CreateGlobalDecl(callerName.c_str(), *mType); + callerClassInfoSym->SetStorageClass(kScExtern); + } else { + callerClassInfoSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + callerClassInfoSym->SetNameStrIdx(strIdx); + callerClassInfoSym->SetStorageClass(kScGlobal); + callerClassInfoSym->SetSKind(kStVar); + /* it must be a local symbol */ + GlobalTables::GetGsymTable().AddToStringSymbolMap(*callerClassInfoSym); + callerClassInfoSym->SetTyIdx(static_cast(PTY_ptr)); + } + } + + baseNode = mirBuilder->CreateExprAddrof(0, *callerClassInfoSym); + } else { + /* + * it's an instance function. + * pass caller function's this pointer + */ + CHECK_FATAL(curFunc.GetFormalCount() != 0, "index out of range in CGLowerer::GetBaseNodeFromCurFunc"); + MIRSymbol *formalSt = curFunc.GetFormal(0); + if (formalSt->IsPreg()) { + if (isFromJarray) { + baseNode = mirBuilder->CreateExprRegread(formalSt->GetType()->GetPrimType(), + curFunc.GetPregTab()->GetPregIdxFromPregno(formalSt->GetPreg()->GetPregNo())); + } else { + CHECK_FATAL(curFunc.GetParamSize() != 0, "index out of range in CGLowerer::GetBaseNodeFromCurFunc"); + baseNode = mirBuilder->CreateExprRegread((curFunc.GetNthParamType(0))->GetPrimType(), + curFunc.GetPregTab()->GetPregIdxFromPregno(formalSt->GetPreg()->GetPregNo())); + } + } else { + baseNode = mirBuilder->CreateExprDread(*formalSt); + } + } + return baseNode; +} + +BaseNode *CGLowerer::GetClassInfoExprFromRuntime(const std::string &classInfo) { + /* + * generate runtime call to get class information + * jclass __mrt_getclass(jobject caller, const char *name) + * if the calling function is an instance function, it's the calling obj + * if the calling function is a static function, it's the calling class + */ + BaseNode *classInfoExpr = nullptr; + PUIdx getClassFunc = GetBuiltinToUse(INTRN_JAVA_GET_CLASS); + CHECK_FATAL(getClassFunc != kFuncNotFound, "classfunc is not found"); + /* return jclass */ + MIRType *voidPtrType = GlobalTables::GetTypeTable().GetPtr(); + MIRSymbol *ret0 = CreateNewRetVar(*voidPtrType, kIntrnRetValPrefix); + + BaseNode *arg0 = GetBaseNodeFromCurFunc(*mirModule.CurFunction(), false); + BaseNode *arg1 = nullptr; + /* classname */ + std::string klassJavaDescriptor; + namemangler::DecodeMapleNameToJavaDescriptor(classInfo, klassJavaDescriptor); + UStrIdx classNameStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(klassJavaDescriptor); + arg1 = mirModule.GetMemPool()->New(classNameStrIdx); + arg1->SetPrimType(PTY_ptr); + + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(arg0); + args.emplace_back(arg1); + StmtNode *getClassCall = mirBuilder->CreateStmtCallAssigned(getClassFunc, args, ret0, OP_callassigned); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*getClassCall)); + classInfoExpr = mirBuilder->CreateExprDread(*voidPtrType, 0, *ret0); + return classInfoExpr; +} + +BaseNode *CGLowerer::GetClassInfoExprFromArrayClassCache(const std::string &classInfo) { + std::string klassJavaDescriptor; + namemangler::DecodeMapleNameToJavaDescriptor(classInfo, klassJavaDescriptor); + if (arrayClassCacheIndex.find(klassJavaDescriptor) == arrayClassCacheIndex.end()) { + return nullptr; + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName( + namemangler::kArrayClassCacheTable + mirModule.GetFileNameAsPostfix()); + MIRSymbol *arrayClassSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (arrayClassSt == nullptr) { + return nullptr; + } + auto index = arrayClassCacheIndex[klassJavaDescriptor]; +#ifdef USE_32BIT_REF + const int32 width = 4; +#else + const int32 width = 8; +#endif /* USE_32BIT_REF */ + int64 offset = static_cast(index) * width; + ConstvalNode *offsetExpr = mirBuilder->CreateIntConst(static_cast(offset), PTY_u32); + AddrofNode *baseExpr = mirBuilder->CreateExprAddrof(0, *arrayClassSt, mirModule.GetMemPool()); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(baseExpr); + args.emplace_back(offsetExpr); + return mirBuilder->CreateExprIntrinsicop(INTRN_MPL_READ_ARRAYCLASS_CACHE_ENTRY, OP_intrinsicop, + *GlobalTables::GetTypeTable().GetPrimType(PTY_ref), args); +} + +BaseNode *CGLowerer::GetClassInfoExpr(const std::string &classInfo) const { + BaseNode *classInfoExpr = nullptr; + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(classInfo); + MIRSymbol *classInfoSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (classInfoSym != nullptr) { + classInfoExpr = mirBuilder->CreateExprAddrof(0, *classInfoSym); + } else { + classInfoSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + classInfoSym->SetNameStrIdx(strIdx); + classInfoSym->SetStorageClass(kScGlobal); + classInfoSym->SetSKind(kStVar); + if (CGOptions::IsPIC()) { + classInfoSym->SetStorageClass(kScExtern); + } else { + classInfoSym->SetAttr(ATTR_weak); + } + GlobalTables::GetGsymTable().AddToStringSymbolMap(*classInfoSym); + classInfoSym->SetTyIdx(static_cast(PTY_ptr)); + + classInfoExpr = mirBuilder->CreateExprAddrof(0, *classInfoSym); + } + return classInfoExpr; +} + +BaseNode *CGLowerer::LowerIntrinsicopWithType(const BaseNode &parent, IntrinsicopNode &intrinNode) { + BaseNode *resNode = &intrinNode; + if ((intrinNode.GetIntrinsic() == INTRN_JAVA_CONST_CLASS) || (intrinNode.GetIntrinsic() == INTRN_JAVA_INSTANCE_OF)) { + PUIdx bFunc = GetBuiltinToUse(intrinNode.GetIntrinsic()); + CHECK_FATAL(bFunc != kFuncNotFound, "bFunc not founded"); + MIRFunction *biFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(bFunc); + MIRType *classType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(intrinNode.GetTyIdx()); + std::string classInfo; + BaseNode *classInfoExpr = nullptr; + bool classInfoFromRt = false; /* whether the classinfo is generated by RT */ + ProcessClassInfo(*classType, classInfoFromRt, classInfo); + if (classInfoFromRt) { + classInfoExpr = GetClassInfoExprFromArrayClassCache(classInfo); + if (classInfoExpr == nullptr) { + classInfoExpr = GetClassInfoExprFromRuntime(classInfo); + } + } else { + classInfoExpr = GetClassInfoExpr(classInfo); + LowerTypePtr(*classInfoExpr); + } + + if (intrinNode.GetIntrinsic() == INTRN_JAVA_CONST_CLASS) { + CHECK_FATAL(classInfoExpr != nullptr, "classInfoExpr should not be nullptr"); + if ((classInfoExpr->GetPrimType() == PTY_ptr) || (classInfoExpr->GetPrimType() == PTY_ref)) { + classInfoExpr->SetPrimType(GetLoweredPtrType()); + } + resNode = classInfoExpr; + return resNode; + } + + if (parent.GetOpCode() == OP_regassign) { + auto ®Assign = static_cast(parent); + StmtNode *biCall = CreateStmtCallWithReturnValue(intrinNode, regAssign.GetRegIdx(), bFunc, classInfoExpr); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*biCall)); + PrimType pTyp = GetCurrentFunc()->GetPregTab()->PregFromPregIdx(regAssign.GetRegIdx())->GetPrimType(); + resNode = mirBuilder->CreateExprRegread(pTyp, regAssign.GetRegIdx()); + return resNode; + } + + if (parent.GetOpCode() == OP_dassign) { + auto &dassign = static_cast(parent); + MIRSymbol *ret = GetCurrentFunc()->GetLocalOrGlobalSymbol(dassign.GetStIdx()); + StmtNode *biCall = CreateStmtCallWithReturnValue(intrinNode, *ret, bFunc, classInfoExpr); + currentBlock->AppendStatementsFromBlock(*LowerCallAssignedStmt(*biCall)); + resNode = mirBuilder->CreateExprDread(*biFunc->GetReturnType(), 0, *ret); + return resNode; + } + CHECK_FATAL(false, "should not run here"); + } + CHECK_FATAL(false, "should not run here"); + return resNode; +} + +BaseNode *CGLowerer::LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &intrinNode, BlockNode &newBlk) { + for (size_t i = 0; i < intrinNode.GetNumOpnds(); ++i) { + intrinNode.SetOpnd(LowerExpr(intrinNode, *intrinNode.Opnd(i), newBlk), i); + } + + MIRIntrinsicID intrnID = intrinNode.GetIntrinsic(); + IntrinDesc &intrinDesc = IntrinDesc::intrinTable[intrnID]; + if (intrinDesc.IsJS()) { + return LowerJavascriptIntrinsicop(intrinNode, intrinDesc); + } + if (intrinDesc.IsJava()) { + return LowerIntrinsicop(parent, intrinNode); + } + if (intrinNode.GetIntrinsic() == INTRN_MPL_READ_OVTABLE_ENTRY_LAZY) { + return &intrinNode; + } + if (intrinNode.GetIntrinsic() == INTRN_MPL_READ_ARRAYCLASS_CACHE_ENTRY) { + return &intrinNode; + } + if (intrnID == INTRN_C_constant_p) { + BaseNode *opnd = intrinNode.Opnd(0); + int64 val = (opnd->op == OP_constval || opnd->op == OP_sizeoftype || + opnd->op == OP_conststr || opnd->op == OP_conststr16) ? 1 : 0; + return mirModule.GetMIRBuilder()->CreateIntConst(static_cast(val), PTY_i32); + } + if (intrnID == INTRN_C___builtin_expect) { + return intrinNode.Opnd(0); + } + if (intrinDesc.IsVectorOp() || intrinDesc.IsAtomic()) { + return &intrinNode; + } + CHECK_FATAL(false, "unexpected intrinsic type in CGLowerer::LowerIntrinsicop"); + return &intrinNode; +} + +BaseNode *CGLowerer::LowerIntrinsicopwithtype(const BaseNode &parent, IntrinsicopNode &intrinNode, BlockNode &blk) { + for (size_t i = 0; i < intrinNode.GetNumOpnds(); ++i) { + intrinNode.SetOpnd(LowerExpr(intrinNode, *intrinNode.Opnd(i), blk), i); + } + MIRIntrinsicID intrnID = intrinNode.GetIntrinsic(); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; + CHECK_FATAL(!intrinDesc->IsJS(), "intrinDesc should not be js"); + if (intrinDesc->IsJava()) { + return LowerIntrinsicopWithType(parent, intrinNode); + } + CHECK_FATAL(false, "should not run here"); + return &intrinNode; +} + +StmtNode *CGLowerer::LowerIntrinsicMplClearStack(const IntrinsiccallNode &intrinCall, BlockNode &newBlk) { + StmtNode *newStmt = mirBuilder->CreateStmtIassign( + *beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt8()), 0, + intrinCall.Opnd(0), mirBuilder->GetConstUInt8(0)); + newBlk.AddStatement(newStmt); + + BaseNode *length = intrinCall.Opnd(1); + PrimType pType = PTY_i64; + PregIdx pIdx = GetCurrentFunc()->GetPregTab()->CreatePreg(pType); + newStmt = mirBuilder->CreateStmtRegassign(pType, pIdx, mirBuilder->CreateIntConst(1, pType)); + newBlk.AddStatement(newStmt); + MIRFunction *func = GetCurrentFunc(); + + const std::string &name = func->GetName() + std::string("_Lalloca_"); + LabelIdx label1 = GetCurrentFunc()->GetOrCreateLableIdxFromName(name + std::to_string(labelIdx++)); + LabelIdx label2 = GetCurrentFunc()->GetOrCreateLableIdxFromName(name + std::to_string(labelIdx++)); + + newStmt = mirBuilder->CreateStmtGoto(OP_goto, label2); + newBlk.AddStatement(newStmt); + LabelNode *ln = mirBuilder->CreateStmtLabel(label1); + newBlk.AddStatement(ln); + + RegreadNode *regLen = mirBuilder->CreateExprRegread(pType, pIdx); + + BinaryNode *addr = mirBuilder->CreateExprBinary(OP_add, + *GlobalTables::GetTypeTable().GetAddr64(), + intrinCall.Opnd(0), regLen); + + newStmt = mirBuilder->CreateStmtIassign(*beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetUInt8()), + 0, addr, mirBuilder->GetConstUInt8(0)); + newBlk.AddStatement(newStmt); + + BinaryNode *subLen = mirBuilder->CreateExprBinary( + OP_add, *GlobalTables::GetTypeTable().GetPrimType(pType), regLen, mirBuilder->CreateIntConst(1, pType)); + newStmt = mirBuilder->CreateStmtRegassign(pType, pIdx, subLen); + newBlk.AddStatement(newStmt); + + ln = mirBuilder->CreateStmtLabel(label2); + newBlk.AddStatement(ln); + + CompareNode *cmpExp = + mirBuilder->CreateExprCompare(OP_lt, *GlobalTables::GetTypeTable().GetUInt32(), + *GlobalTables::GetTypeTable().GetPrimType(pType), regLen, length); + newStmt = mirBuilder->CreateStmtCondGoto(cmpExp, OP_brtrue, label1); + + return newStmt; +} + +StmtNode *CGLowerer::LowerIntrinsicRCCall(const IntrinsiccallNode &intrinCall) { + /* If GCONLY enabled, lowering RC intrinsics in another way. */ + MIRIntrinsicID intrnID = intrinCall.GetIntrinsic(); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; + + /* convert intrinsic call into function call. */ + if (intrinFuncIDs.find(intrinDesc) == intrinFuncIDs.end()) { + /* add funcid into map */ + MIRFunction *fn = mirBuilder->GetOrCreateFunction(intrinDesc->name, TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); + fn->AllocSymTab(); + intrinFuncIDs[intrinDesc] = fn->GetPuidx(); + } + CallNode *callStmt = mirModule.CurFuncCodeMemPool()->New(mirModule, OP_call); + callStmt->SetPUIdx(intrinFuncIDs.at(intrinDesc)); + for (size_t i = 0; i < intrinCall.GetNopndSize(); ++i) { + (void)callStmt->GetNopnd().emplace_back(intrinCall.GetNopndAt(i)); + callStmt->SetNumOpnds(callStmt->GetNumOpnds() + 1); + } + return callStmt; +} + +void CGLowerer::LowerArrayStore(const IntrinsiccallNode &intrinCall, BlockNode &newBlk) { + bool needCheckStore = true; + BaseNode *arrayNode = intrinCall.Opnd(0); + MIRType *arrayElemType = GetArrayNodeType(*arrayNode); + BaseNode *valueNode = intrinCall.Opnd(kNodeThirdOpnd); + MIRType *valueRealType = GetArrayNodeType(*valueNode); + if ((arrayElemType != nullptr) && (valueRealType != nullptr) && (arrayElemType->GetKind() == kTypeClass) && + static_cast(arrayElemType)->IsFinal() && (valueRealType->GetKind() == kTypeClass) && + static_cast(valueRealType)->IsFinal() && + (valueRealType->GetTypeIndex() == arrayElemType->GetTypeIndex())) { + needCheckStore = false; + } + + if (needCheckStore) { + MIRFunction *fn = mirBuilder->GetOrCreateFunction("MCC_Reflect_Check_Arraystore", TyIdx(PTY_void)); + fn->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); + fn->AllocSymTab(); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + (void)args.emplace_back(intrinCall.Opnd(0)); + (void)args.emplace_back(intrinCall.Opnd(kNodeThirdOpnd)); + StmtNode *checkStoreStmt = mirBuilder->CreateStmtCall(fn->GetPuidx(), args); + newBlk.AddStatement(checkStoreStmt); + } +} + +StmtNode *CGLowerer::LowerDefaultIntrinsicCall(IntrinsiccallNode &intrinCall, MIRSymbol &st, MIRFunction &fn) { + MIRIntrinsicID intrnID = intrinCall.GetIntrinsic(); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; + std::vector funcTyVec; + std::vector fnTaVec; + MapleVector &nOpnds = intrinCall.GetNopnd(); + MIRType *retTy = intrinDesc->GetReturnType(); + CHECK_FATAL(retTy != nullptr, "retTy should not be nullptr"); + if (retTy->GetKind() == kTypeStruct) { + funcTyVec.emplace_back(beCommon.BeGetOrCreatePointerType(*retTy)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); + fn.SetReturnStruct(); + } + for (uint32 i = 0; i < nOpnds.size(); ++i) { + MIRType *argTy = intrinDesc->GetArgType(i); + CHECK_FATAL(argTy != nullptr, "argTy should not be nullptr"); + if (argTy->GetKind() == kTypeStruct) { + funcTyVec.emplace_back(GlobalTables::GetTypeTable().GetTypeFromTyIdx(PTY_a32)->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); + BaseNode *addrNode = beCommon.GetAddressOfNode(*nOpnds[i]); + CHECK_FATAL(addrNode != nullptr, "can not get address"); + nOpnds[i] = addrNode; + } else { + funcTyVec.emplace_back(argTy->GetTypeIndex()); + fnTaVec.emplace_back(TypeAttrs()); + } + } + MIRType *funcType = beCommon.BeGetOrCreateFunctionType(retTy->GetTypeIndex(), funcTyVec, fnTaVec); + st.SetTyIdx(funcType->GetTypeIndex()); + fn.SetMIRFuncType(static_cast(funcType)); + if (retTy->GetKind() == kTypeStruct) { + fn.SetReturnTyIdx(static_cast(PTY_void)); + } else { + fn.SetReturnTyIdx(retTy->GetTypeIndex()); + } + return static_cast(mirBuilder->CreateStmtCall(fn.GetPuidx(), nOpnds)); +} + +StmtNode *CGLowerer::LowerIntrinsicMplCleanupLocalRefVarsSkip(IntrinsiccallNode &intrinCall) { + MIRFunction *mirFunc = mirModule.CurFunction(); + BaseNode *skipExpr = intrinCall.Opnd(intrinCall.NumOpnds() - 1); + + CHECK_FATAL(skipExpr != nullptr, "should be dread"); + CHECK_FATAL(skipExpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refNode = static_cast(skipExpr); + MIRSymbol *skipSym = mirFunc->GetLocalOrGlobalSymbol(refNode->GetStIdx()); + if (skipSym->GetAttr(ATTR_localrefvar)) { + mirFunc->InsertMIRSymbol(skipSym); + } + return &intrinCall; +} + +StmtNode *CGLowerer::LowerIntrinsiccall(IntrinsiccallNode &intrinCall, BlockNode &newBlk) { + MIRIntrinsicID intrnID = intrinCall.GetIntrinsic(); + for (size_t i = 0; i < intrinCall.GetNumOpnds(); ++i) { + intrinCall.SetOpnd(LowerExpr(intrinCall, *intrinCall.Opnd(i), newBlk), i); + } + if (intrnID == INTRN_MPL_CLEAR_STACK) { + return LowerIntrinsicMplClearStack(intrinCall, newBlk); + } + if (intrnID == INTRN_C_va_start) { + return &intrinCall; + } + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrnID]; + if (intrinDesc->IsSpecial() || intrinDesc->IsAtomic()) { + /* For special intrinsics we leave them to CGFunc::SelectintrinCall() */ + return &intrinCall; + } + /* default lowers intrinsic call to real function call. */ + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + CHECK_FATAL(intrinDesc->name != nullptr, "intrinsic's name should not be nullptr"); + const std::string name = intrinDesc->name; + st->SetNameStrIdx(name); + st->SetStorageClass(kScText); + st->SetSKind(kStFunc); + MIRFunction *fn = mirBuilder->GetOrCreateFunction(intrinDesc->name, TyIdx(0)); + beCommon.UpdateTypeTable(*fn->GetMIRFuncType()); + fn->AllocSymTab(); + st->SetFunction(fn); + st->SetAppearsInCode(true); + return LowerDefaultIntrinsicCall(intrinCall, *st, *fn); +} + +StmtNode *CGLowerer::LowerSyncEnterSyncExit(StmtNode &stmt) { + CHECK_FATAL(stmt.GetOpCode() == OP_syncenter || stmt.GetOpCode() == OP_syncexit, + "stmt's opcode should be OP_syncenter or OP_syncexit"); + + auto &nStmt = static_cast(stmt); + BuiltinFunctionID id; + if (nStmt.GetOpCode() == OP_syncenter) { + if (nStmt.NumOpnds() == 1) { + /* Just as ParseNaryStmt do for syncenter */ + MIRType &intType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + /* default 2 for __sync_enter_fast() */ + MIRIntConst *intConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(2, intType); + ConstvalNode *exprConst = mirModule.GetMemPool()->New(); + exprConst->SetPrimType(PTY_i32); + exprConst->SetConstVal(intConst); + nStmt.GetNopnd().emplace_back(exprConst); + nStmt.SetNumOpnds(nStmt.GetNopndSize()); + } + CHECK_FATAL(nStmt.NumOpnds() == kOperandNumBinary, "wrong args for syncenter"); + CHECK_FATAL(nStmt.Opnd(1)->GetOpCode() == OP_constval, "wrong 2nd arg type for syncenter"); + ConstvalNode *cst = static_cast(nStmt.GetNopndAt(1)); + MIRIntConst *intConst = safe_cast(cst->GetConstVal()); + switch (intConst->GetExtValue()) { + case kMCCSyncEnterFast0: + id = INTRN_FIRST_SYNC_ENTER; + break; + case kMCCSyncEnterFast1: + id = INTRN_SECOND_SYNC_ENTER; + break; + case kMCCSyncEnterFast2: + id = INTRN_THIRD_SYNC_ENTER; + break; + case kMCCSyncEnterFast3: + id = INTRN_FOURTH_SYNC_ENTER; + break; + default: + CHECK_FATAL(false, "wrong kind for syncenter"); + break; + } + } else { + CHECK_FATAL(nStmt.NumOpnds() == 1, "wrong args for syncexit"); + id = INTRN_YNC_EXIT; + } + PUIdx bFunc = GetBuiltinToUse(id); + CHECK_FATAL(bFunc != kFuncNotFound, "bFunc should be found"); + + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(nStmt.Opnd(0)); + return mirBuilder->CreateStmtCall(bFunc, args); +} + +PUIdx CGLowerer::GetBuiltinToUse(BuiltinFunctionID id) const { + /* + * use std::vector & linear search as the number of entries is small. + * we may revisit it if the number of entries gets larger. + */ + for (const auto &funcID : builtinFuncIDs) { + if (funcID.first == id) { + return funcID.second; + } + } + return kFuncNotFound; +} + +void CGLowerer::LowerGCMalloc(const BaseNode &node, const GCMallocNode &gcmalloc, BlockNode &blkNode, bool perm) { + MIRFunction *func = mirBuilder->GetOrCreateFunction((perm ? "MCC_NewPermanentObject" : "MCC_NewObj_fixed_class"), + static_cast(GetLoweredPtrType())); + func->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*func->GetMIRFuncType()); + func->AllocSymTab(); + /* Get the classinfo */ + MIRStructType *classType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(gcmalloc.GetTyIdx())); + std::string classInfoName = CLASSINFO_PREFIX_STR + classType->GetName(); + MIRSymbol *classSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(classInfoName)); + if (classSym == nullptr) { + MIRType *pointerType = beCommon.BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetVoid()); + classSym = mirBuilder->CreateGlobalDecl(classInfoName, *pointerType); + classSym->SetStorageClass(kScExtern); + } + CallNode *callAssign = nullptr; + auto *curFunc = mirModule.CurFunction(); + if (classSym->GetAttr(ATTR_abstract) || classSym->GetAttr(ATTR_interface)) { + MIRFunction *funcSecond = mirBuilder->GetOrCreateFunction("MCC_Reflect_ThrowInstantiationError", + static_cast(GetLoweredPtrType())); + funcSecond->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*funcSecond->GetMIRFuncType()); + funcSecond->AllocSymTab(); + BaseNode *arg = mirBuilder->CreateExprAddrof(0, *classSym); + if (node.GetOpCode() == OP_dassign) { + auto &dsNode = static_cast(node); + MIRSymbol *ret = curFunc->GetLocalOrGlobalSymbol(dsNode.GetStIdx()); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(arg); + callAssign = mirBuilder->CreateStmtCallAssigned(funcSecond->GetPuidx(), args, ret, OP_callassigned); + } else { + CHECK_FATAL(node.GetOpCode() == OP_regassign, "regassign expected"); + callAssign = mirBuilder->CreateStmtCallRegassigned( + funcSecond->GetPuidx(), static_cast(node).GetRegIdx(), OP_callassigned, arg); + } + blkNode.AppendStatementsFromBlock(*LowerCallAssignedStmt(*callAssign)); + return; + } + BaseNode *arg = mirBuilder->CreateExprAddrof(0, *classSym); + + if (node.GetOpCode() == OP_dassign) { + MIRSymbol *ret = curFunc->GetLocalOrGlobalSymbol(static_cast(node).GetStIdx()); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(arg); + callAssign = mirBuilder->CreateStmtCallAssigned(func->GetPuidx(), args, ret, OP_callassigned); + } else { + CHECK_FATAL(node.GetOpCode() == OP_regassign, "regassign expected"); + callAssign = mirBuilder->CreateStmtCallRegassigned( + func->GetPuidx(), static_cast(node).GetRegIdx(), OP_callassigned, arg); + } + blkNode.AppendStatementsFromBlock(*LowerCallAssignedStmt(*callAssign)); +} + +std::string CGLowerer::GetNewArrayFuncName(const uint32 elemSize, const bool perm) const { + if (elemSize == 1) { + return perm ? "MCC_NewPermArray8" : "MCC_NewArray8"; + } + if (elemSize == 2) { + return perm ? "MCC_NewPermArray16" : "MCC_NewArray16"; + } + if (elemSize == 4) { + return perm ? "MCC_NewPermArray32" : "MCC_NewArray32"; + } + CHECK_FATAL((elemSize == 8), "Invalid elemSize."); + return perm ? "MCC_NewPermArray64" : "MCC_NewArray64"; +} + +void CGLowerer::LowerJarrayMalloc(const StmtNode &stmt, const JarrayMallocNode &node, BlockNode &blkNode, bool perm) { + /* Extract jarray type */ + TyIdx tyIdx = node.GetTyIdx(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type->GetKind() == kTypeJArray, "Type param of gcmallocjarray is not a MIRJarrayType"); + auto jaryType = static_cast(type); + CHECK_FATAL(jaryType != nullptr, "Type param of gcmallocjarray is not a MIRJarrayType"); + + /* Inspect element type */ + MIRType *elemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(jaryType->GetElemTyIdx()); + PrimType elemPrimType = elemType->GetPrimType(); + uint32 elemSize = GetPrimTypeSize(elemPrimType); + if (elemType->GetKind() != kTypeScalar) { /* element is reference */ + elemSize = static_cast(RTSupport::GetRTSupportInstance().GetFieldSize()); + } + + std::string klassName = jaryType->GetJavaName(); + std::string arrayClassInfoName; + bool isPredefinedArrayClass = false; + BaseNode *arrayCacheNode = nullptr; + if (jaryType->IsPrimitiveArray() && (jaryType->GetDim() <= kThreeDimArray)) { + arrayClassInfoName = PRIMITIVECLASSINFO_PREFIX_STR + klassName; + isPredefinedArrayClass = true; + } else if (arrayNameForLower::kArrayKlassName.find(klassName) != arrayNameForLower::kArrayKlassName.end()) { + arrayClassInfoName = CLASSINFO_PREFIX_STR + klassName; + isPredefinedArrayClass = true; + } else { + arrayCacheNode = GetClassInfoExprFromArrayClassCache(klassName); + } + + std::string funcName; + MapleVector args(mirModule.GetMPAllocator().Adapter()); + auto *curFunc = mirModule.CurFunction(); + if (isPredefinedArrayClass || (arrayCacheNode != nullptr)) { + funcName = GetNewArrayFuncName(elemSize, perm); + args.emplace_back(node.Opnd(0)); /* n_elems */ + if (isPredefinedArrayClass) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(arrayClassInfoName); + MIRSymbol *arrayClassSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(arrayClassInfoName)); + if (arrayClassSym == nullptr) { + arrayClassSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + arrayClassSym->SetNameStrIdx(strIdx); + arrayClassSym->SetStorageClass(kScGlobal); + arrayClassSym->SetSKind(kStVar); + if (CGOptions::IsPIC()) { + arrayClassSym->SetStorageClass(kScExtern); + } else { + arrayClassSym->SetAttr(ATTR_weak); + } + GlobalTables::GetGsymTable().AddToStringSymbolMap(*arrayClassSym); + arrayClassSym->SetTyIdx(static_cast(PTY_ptr)); + } + args.emplace_back(mirBuilder->CreateExprAddrof(0, *arrayClassSym)); + } else { + args.emplace_back(arrayCacheNode); + } + } else { + funcName = perm ? "MCC_NewPermanentArray" : "MCC_NewObj_flexible_cname"; + args.emplace_back(mirBuilder->CreateIntConst(elemSize, PTY_u32)); /* elem_size */ + args.emplace_back(node.Opnd(0)); /* n_elems */ + std::string klassJavaDescriptor; + namemangler::DecodeMapleNameToJavaDescriptor(klassName, klassJavaDescriptor); + UStrIdx classNameStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(klassJavaDescriptor); + ConststrNode *classNameExpr = mirModule.GetMemPool()->New(classNameStrIdx); + classNameExpr->SetPrimType(PTY_ptr); + args.emplace_back(classNameExpr); /* class_name */ + args.emplace_back(GetBaseNodeFromCurFunc(*curFunc, true)); + /* set class flag 0 */ + args.emplace_back(mirBuilder->CreateIntConst(0, PTY_u32)); + } + MIRFunction *func = mirBuilder->GetOrCreateFunction(funcName, static_cast(GetLoweredPtrType())); + func->GetFuncSymbol()->SetAppearsInCode(true); + beCommon.UpdateTypeTable(*func->GetMIRFuncType()); + func->AllocSymTab(); + CallNode *callAssign = nullptr; + if (stmt.GetOpCode() == OP_dassign) { + auto &dsNode = static_cast(stmt); + MIRSymbol *ret = curFunc->GetLocalOrGlobalSymbol(dsNode.GetStIdx()); + + callAssign = mirBuilder->CreateStmtCallAssigned(func->GetPuidx(), args, ret, OP_callassigned); + } else { + auto ®Node = static_cast(stmt); + callAssign = mirBuilder->CreateStmtCallRegassigned(func->GetPuidx(), args, regNode.GetRegIdx(), OP_callassigned); + } + blkNode.AppendStatementsFromBlock(*LowerCallAssignedStmt(*callAssign)); +} + +bool CGLowerer::IsIntrinsicCallHandledAtLowerLevel(MIRIntrinsicID intrinsic) const { + /* only INTRN_MPL_ATOMIC_EXCHANGE_PTR now. */ + return intrinsic == INTRN_MPL_ATOMIC_EXCHANGE_PTR; +} + +bool CGLowerer::IsIntrinsicOpHandledAtLowerLevel(MIRIntrinsicID intrinsic) const { + switch (intrinsic) { +#if TARGAARCH64 || TARGX86_64 + case INTRN_C_bswap64: + case INTRN_C_bswap32: + case INTRN_C_bswap16: + case INTRN_C_cos: + case INTRN_C_cosf: + case INTRN_C_cosh: + case INTRN_C_coshf: + case INTRN_C_acos: + case INTRN_C_acosf: + case INTRN_C_sin: + case INTRN_C_sinf: + case INTRN_C_sinh: + case INTRN_C_sinhf: + case INTRN_C_asin: + case INTRN_C_asinf: + case INTRN_C_atan: + case INTRN_C_atanf: + case INTRN_C_exp: + case INTRN_C_expf: + case INTRN_C_ffs: + case INTRN_C_log: + case INTRN_C_logf: + case INTRN_C_log10: + case INTRN_C_log10f: + case INTRN_C_clz32: + case INTRN_C_clz64: + case INTRN_C_ctz32: + case INTRN_C_ctz64: + case INTRN_C_popcount32: + case INTRN_C_popcount64: + case INTRN_C_parity32: + case INTRN_C_parity64: + case INTRN_C_clrsb32: + case INTRN_C_clrsb64: + case INTRN_C_isaligned: + case INTRN_C_alignup: + case INTRN_C_aligndown: + case INTRN_C___sync_add_and_fetch_1: + case INTRN_C___sync_add_and_fetch_2: + case INTRN_C___sync_add_and_fetch_4: + case INTRN_C___sync_add_and_fetch_8: + case INTRN_C___sync_sub_and_fetch_1: + case INTRN_C___sync_sub_and_fetch_2: + case INTRN_C___sync_sub_and_fetch_4: + case INTRN_C___sync_sub_and_fetch_8: + case INTRN_C___sync_fetch_and_add_1: + case INTRN_C___sync_fetch_and_add_2: + case INTRN_C___sync_fetch_and_add_4: + case INTRN_C___sync_fetch_and_add_8: + case INTRN_C___sync_fetch_and_sub_1: + case INTRN_C___sync_fetch_and_sub_2: + case INTRN_C___sync_fetch_and_sub_4: + case INTRN_C___sync_fetch_and_sub_8: + case INTRN_C___sync_bool_compare_and_swap_1: + case INTRN_C___sync_bool_compare_and_swap_2: + case INTRN_C___sync_bool_compare_and_swap_4: + case INTRN_C___sync_bool_compare_and_swap_8: + case INTRN_C___sync_val_compare_and_swap_1: + case INTRN_C___sync_val_compare_and_swap_2: + case INTRN_C___sync_val_compare_and_swap_4: + case INTRN_C___sync_val_compare_and_swap_8: + case INTRN_C___sync_lock_test_and_set_1: + case INTRN_C___sync_lock_test_and_set_2: + case INTRN_C___sync_lock_test_and_set_4: + case INTRN_C___sync_lock_test_and_set_8: + case INTRN_C___sync_lock_release_8: + case INTRN_C___sync_lock_release_4: + case INTRN_C___sync_lock_release_2: + case INTRN_C___sync_lock_release_1: + case INTRN_C___sync_fetch_and_and_1: + case INTRN_C___sync_fetch_and_and_2: + case INTRN_C___sync_fetch_and_and_4: + case INTRN_C___sync_fetch_and_and_8: + case INTRN_C___sync_fetch_and_or_1: + case INTRN_C___sync_fetch_and_or_2: + case INTRN_C___sync_fetch_and_or_4: + case INTRN_C___sync_fetch_and_or_8: + case INTRN_C___sync_fetch_and_xor_1: + case INTRN_C___sync_fetch_and_xor_2: + case INTRN_C___sync_fetch_and_xor_4: + case INTRN_C___sync_fetch_and_xor_8: + case INTRN_C___sync_fetch_and_nand_1: + case INTRN_C___sync_fetch_and_nand_2: + case INTRN_C___sync_fetch_and_nand_4: + case INTRN_C___sync_fetch_and_nand_8: + case INTRN_C___sync_and_and_fetch_1: + case INTRN_C___sync_and_and_fetch_2: + case INTRN_C___sync_and_and_fetch_4: + case INTRN_C___sync_and_and_fetch_8: + case INTRN_C___sync_or_and_fetch_1: + case INTRN_C___sync_or_and_fetch_2: + case INTRN_C___sync_or_and_fetch_4: + case INTRN_C___sync_or_and_fetch_8: + case INTRN_C___sync_xor_and_fetch_1: + case INTRN_C___sync_xor_and_fetch_2: + case INTRN_C___sync_xor_and_fetch_4: + case INTRN_C___sync_xor_and_fetch_8: + case INTRN_C___sync_nand_and_fetch_1: + case INTRN_C___sync_nand_and_fetch_2: + case INTRN_C___sync_nand_and_fetch_4: + case INTRN_C___sync_nand_and_fetch_8: + case INTRN_C___sync_synchronize: + case INTRN_C__builtin_return_address: + case INTRN_C__builtin_extract_return_addr: + case INTRN_C_memcmp: + case INTRN_C_strlen: + case INTRN_C_strcmp: + case INTRN_C_strncmp: + case INTRN_C_strchr: + case INTRN_C_strrchr: + case INTRN_C_rev16_2: + case INTRN_C_rev_4: + case INTRN_C_rev_8: + case INTRN_C_stack_save: + case INTRN_C_stack_restore: + return true; +#endif + default: + return false; + } +} + +void CGLowerer::InitArrayClassCacheTableIndex() { + MIRSymbol *reflectStrtabSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::kReflectionStrtabPrefixStr + mirModule.GetFileNameAsPostfix())); + MIRSymbol *reflectStartHotStrtabSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::kReflectionStartHotStrtabPrefixStr + mirModule.GetFileNameAsPostfix())); + MIRSymbol *reflectBothHotStrtabSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::kReflectionBothHotStrTabPrefixStr + mirModule.GetFileNameAsPostfix())); + MIRSymbol *reflectRunHotStrtabSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::kReflectionRunHotStrtabPrefixStr + mirModule.GetFileNameAsPostfix())); + MIRSymbol *arrayCacheNameTableSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName( + namemangler::kArrayClassCacheNameTable + mirModule.GetFileNameAsPostfix())); + if (arrayCacheNameTableSym == nullptr) { + return; + } + MIRAggConst &aggConst = static_cast(*(arrayCacheNameTableSym->GetKonst())); + MIRSymbol *strTab = nullptr; + for (size_t i = 0; i < aggConst.GetConstVec().size(); ++i) { + MIRConst *elemConst = aggConst.GetConstVecItem(i); + uint32 intValue = static_cast( + static_cast((safe_cast(elemConst))->GetExtValue()) & 0xFFFFFFFF); + bool isHotReflectStr = (intValue & 0x00000003) != 0; /* use the last two bits of intValue in this expression */ + if (isHotReflectStr) { + uint32 tag = (intValue & 0x00000003) - kCStringShift; /* use the last two bits of intValue in this expression */ + if (tag == kLayoutBootHot) { + strTab = reflectStartHotStrtabSym; + } else if (tag == kLayoutBothHot) { + strTab = reflectBothHotStrtabSym; + } else { + strTab = reflectRunHotStrtabSym; + } + } else { + strTab = reflectStrtabSym; + } + ASSERT(strTab != nullptr, "strTab is nullptr"); + std::string arrayClassName; + MIRAggConst *strAgg = static_cast(strTab->GetKonst()); + for (auto start = (intValue >> 2); start < strAgg->GetConstVec().size(); ++start) { /* the last two bits is flag */ + MIRIntConst *oneChar = static_cast(strAgg->GetConstVecItem(start)); + if ((oneChar != nullptr) && !oneChar->IsZero()) { + arrayClassName += static_cast(oneChar->GetExtValue()); + } else { + break; + } + } + arrayClassCacheIndex[arrayClassName] = i; + } +} + +void CGLowerer::BuildLabel2FreqMap() { + StmtNodes stmtNodes = GetCurrentFunc()->GetBody()->GetStmtNodes(); + FuncProfInfo* funcProfile = mirModule.CurFunction()->GetFuncProfData(); + if (Options::profileUse && (funcProfile != nullptr)) { + for (StmtNode& stmt : stmtNodes) { + if (stmt.GetOpCode() == OP_label) { + l2fMap[static_cast(stmt).GetLabelIdx()] = funcProfile->GetStmtFreq(stmt.GetStmtID()); + } + } + } +} + +void CGLowerer::LowerFunc(MIRFunction &func) { + labelIdx = 0; + SetCurrentFunc(&func); + hasTry = false; + LowerEntry(func); + LowerPseudoRegs(func); + BuildLabel2FreqMap(); + BlockNode *origBody = func.GetBody(); + CHECK_FATAL(origBody != nullptr, "origBody should not be nullptr"); + + BlockNode *newBody = LowerBlock(*origBody); + func.SetBody(newBody); + if (needBranchCleanup) { + CleanupBranches(func); + } + + if (mirModule.IsJavaModule() && func.GetBody()->GetFirst() && GenerateExceptionHandlingCode()) { + LowerTryCatchBlocks(*func.GetBody()); + } + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + // We do the simplify work here because now all the intrinsic calls and potential expansion work of memcpy or other + // functions are handled well. So we can concentrate to do the replacement work. + SimplifyBlock(*newBody); + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + beCommon.AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/be/rt.cpp b/src/mapleall/maple_be/src/be/rt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5a14d9cf52f0f503d90320fb332e62b9bd62bc8a --- /dev/null +++ b/src/mapleall/maple_be/src/be/rt.cpp @@ -0,0 +1,21 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "rt.h" + +namespace maplebe { +const std::string RTSupport::kObjectMapSectionName = ".maple.objectmap"; +const std::string RTSupport::kGctibLabelArrayOfObject = "MCC_GCTIB___ArrayOfObject"; +const std::string RTSupport::kGctibLabelJavaObject = "MCC_GCTIB__Ljava_2Flang_2FObject_3B"; +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/be/switch_lowerer.cpp b/src/mapleall/maple_be/src/be/switch_lowerer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d361660d445046a1715626d0b0096906183d57a1 --- /dev/null +++ b/src/mapleall/maple_be/src/be/switch_lowerer.cpp @@ -0,0 +1,404 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +/* + * This module analyzes the tag distribution in a switch statement and decides + * the best strategy in terms of runtime performance to generate code for it. + * The generated code makes use of 3 code generation techniques: + * + * 1. cascade of if-then-else based on equality test + * 2. rangegoto + * 3. binary search + * + * 1 is applied only if the number of possibilities is <= 6. + * 2 corresponds to indexed jump, but it requires allocating an array + * initialized with the jump targets. Since it causes memory usage overhead, + * rangegoto is used only if the density is higher than 0.7. + * If neither 1 nor 2 is applicable, 3 is applied in the form of a decision + * tree. In this case, each test would split the tags into 2 halves. For + * each half, the above algorithm is then applied recursively until the + * algorithm terminates. + * + * But we don't want to apply 3 right from the beginning if both 1 and 2 do not + * apply, because there may be regions that have density > 0.7. Thus, the + * switch lowerer begins by finding clusters. A cluster is defined to be a + * maximal range of tags whose density is > 0.7. + * + * In finding clusters, the original switch table is sorted and then each dense + * region is condensed into 1 switch item; in the switch_items table, each item // either corresponds to an original + * entry in the original switch table (pair's // second is 0), or to a dense region (pair's second gives the upper limit + * of the dense range). The output code is generated based on the switch_items. See BuildCodeForSwitchItems() which is + * recursive. +*/ +#include "switch_lowerer.h" +#include "mir_nodes.h" +#include "mir_builder.h" +#include "mir_lower.h" /* "../../../maple_ir/include/mir_lower.h" */ + +namespace maplebe { +static bool CasePairKeyLessThan(const CasePair &left, const CasePair &right) { + return left.first < right.first; +} + +void SwitchLowerer::FindClusters(MapleVector &clusters) const { + int32 length = static_cast(stmt->GetSwitchTable().size()); + int32 i = 0; + while (i < length - kClusterSwitchCutoff) { + for (int32 j = length - 1; j > i; --j) { + float tmp1 = static_cast(j - i); + float tmp2 = static_cast(stmt->GetCasePair(static_cast(static_cast(j))).first) - + static_cast(stmt->GetCasePair(static_cast(static_cast(i))).first); + float currDensity = tmp1 / tmp2; + if (((j - i) >= kClusterSwitchCutoff) && + ((currDensity >= kClusterSwitchDensityHigh) || + ((currDensity >= kClusterSwitchDensityLow) && (tmp2 < kMaxRangeGotoTableSize)))) { + clusters.emplace_back(Cluster(i, j)); + i = j; + break; + } + } + ++i; + } +} + +void SwitchLowerer::InitSwitchItems(MapleVector &clusters) { + if (clusters.empty()) { + for (int32 i = 0; i < static_cast(stmt->GetSwitchTable().size()); ++i) { + switchItems.emplace_back(SwitchItem(i, 0)); + } + } else { + int32 j = 0; + Cluster front = clusters[j]; + for (int32 i = 0; i < static_cast(stmt->GetSwitchTable().size()); ++i) { + if (i == front.first) { + switchItems.emplace_back(SwitchItem(i, front.second)); + i = front.second; + ++j; + if (static_cast(clusters.size()) > j) { + front = clusters[j]; + } + } else { + switchItems.emplace_back(SwitchItem(i, 0)); + } + } + } +} + +RangeGotoNode *SwitchLowerer::BuildRangeGotoNode(int32 startIdx, int32 endIdx, LabelIdx newLabelIdx) { + RangeGotoNode *node = mirModule.CurFuncCodeMemPool()->New(mirModule); + node->SetOpnd(stmt->GetSwitchOpnd(), 0); + + node->SetRangeGotoTable(SmallCaseVector(mirModule.CurFuncCodeMemPoolAllocator()->Adapter())); + node->SetTagOffset(static_cast(stmt->GetCasePair(static_cast(startIdx)).first)); + uint32 curTag = 0; + node->AddRangeGoto(curTag, stmt->GetCasePair(startIdx).second); + int64 lastCaseTag = stmt->GetSwitchTable().at(startIdx).first; + for (int32 i = startIdx + 1; i <= endIdx; ++i) { + /* + * The second condition is to solve the problem that compilation falls into a dead loop, + * because in some cases the two will fall into a dead loop if they are equal. + */ + while ((stmt->GetCasePair(i).first != (lastCaseTag + 1)) && (stmt->GetCasePair(i).first != lastCaseTag)) { + /* fill in a gap in the case tags */ + curTag = static_cast(static_cast(++lastCaseTag) - node->GetTagOffset()); + if (stmt->GetDefaultLabel() != 0) { + node->AddRangeGoto(curTag, stmt->GetDefaultLabel()); + } else if (newLabelIdx != 0) { + node->AddRangeGoto(curTag, newLabelIdx); + } + } + curTag = static_cast(stmt->GetCasePair(static_cast(i)).first - node->GetTagOffset()); + node->AddRangeGoto(curTag, stmt->GetCasePair(i).second); + lastCaseTag = stmt->GetCasePair(i).first; + } + /* If the density is high enough, the range is allowed to be large */ + // ASSERT(static_cast(node->GetRangeGotoTable().size()) <= kMaxRangeGotoTableSize, + // "rangegoto table exceeds allowed number of entries"); + ASSERT(node->GetNumOpnds() == 1, "RangeGotoNode is a UnaryOpnd and numOpnds must be 1"); + return node; +} + +CompareNode *SwitchLowerer::BuildCmpNode(Opcode opCode, uint32 idx) { + CompareNode *binaryExpr = mirModule.CurFuncCodeMemPool()->New(opCode); + binaryExpr->SetPrimType(PTY_u32); + binaryExpr->SetOpndType(stmt->GetSwitchOpnd()->GetPrimType()); + + MIRType &type = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(stmt->GetSwitchOpnd()->GetPrimType())); + MIRConst *constVal = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(stmt->GetCasePair(idx).first), type); + ConstvalNode *exprConst = mirModule.CurFuncCodeMemPool()->New(); + exprConst->SetPrimType(stmt->GetSwitchOpnd()->GetPrimType()); + exprConst->SetConstVal(constVal); + + binaryExpr->SetBOpnd(stmt->GetSwitchOpnd(), 0); + binaryExpr->SetBOpnd(exprConst, 1); + return binaryExpr; +} + +GotoNode *SwitchLowerer::BuildGotoNode(int32 idx) { + if (idx == -1 && stmt->GetDefaultLabel() == 0) { + return nullptr; + } + GotoNode *gotoStmt = mirModule.CurFuncCodeMemPool()->New(OP_goto); + if (idx == -1) { + gotoStmt->SetOffset(stmt->GetDefaultLabel()); + } else { + gotoStmt->SetOffset(stmt->GetCasePair(idx).second); + } + return gotoStmt; +} + +CondGotoNode *SwitchLowerer::BuildCondGotoNode(int32 idx, Opcode opCode, BaseNode &cond) { + if (idx == -1 && stmt->GetDefaultLabel() == 0) { + return nullptr; + } + CondGotoNode *cGotoStmt = mirModule.CurFuncCodeMemPool()->New(opCode); + cGotoStmt->SetOpnd(&cond, 0); + if (idx == -1) { + cGotoStmt->SetOffset(stmt->GetDefaultLabel()); + } else { + cGotoStmt->SetOffset(stmt->GetCasePair(idx).second); + } + return cGotoStmt; +} + +/* start and end is with respect to switchItems */ +BlockNode *SwitchLowerer::BuildCodeForSwitchItems(int32 start, int32 end, bool lowBlockNodeChecked, + bool highBlockNodeChecked, LabelIdx newLabelIdx) { + ASSERT(start >= 0, "invalid args start"); + ASSERT(end >= 0, "invalid args end"); + BlockNode *localBlk = mirModule.CurFuncCodeMemPool()->New(); + if (start > end) { + return localBlk; + } + CondGotoNode *cGoto = nullptr; + RangeGotoNode *rangeGoto = nullptr; + IfStmtNode *ifStmt = nullptr; + CompareNode *cmpNode = nullptr; + MIRLower mirLowerer(mirModule, mirModule.CurFunction()); + mirLowerer.Init(); + /* if low side starts with a dense item, handle it first */ + while ((start <= end) && (switchItems[start].second != 0)) { + if (!lowBlockNodeChecked) { + lowBlockNodeChecked = true; + if (!(IsUnsignedInteger(stmt->GetSwitchOpnd()->GetPrimType()) && + (stmt->GetCasePair(static_cast(switchItems[static_cast(start)].first)).first == 0))) { + cGoto = BuildCondGotoNode(-1, OP_brtrue, *BuildCmpNode(OP_lt, switchItems[start].first)); + if (cGoto != nullptr) { + localBlk->AddStatement(cGoto); + } + } + } + rangeGoto = BuildRangeGotoNode(switchItems[start].first, switchItems[start].second, newLabelIdx); + if (stmt->GetDefaultLabel() == 0) { + localBlk->AddStatement(rangeGoto); + } else { + cmpNode = BuildCmpNode(OP_le, switchItems[start].second); + ifStmt = static_cast(mirModule.GetMIRBuilder()->CreateStmtIf(cmpNode)); + ifStmt->GetThenPart()->AddStatement(rangeGoto); + localBlk->AppendStatementsFromBlock(*mirLowerer.LowerIfStmt(*ifStmt, false)); + } + if (start < end) { + lowBlockNodeChecked = (stmt->GetCasePair(switchItems[start].second).first + 1 == + stmt->GetCasePair(switchItems[start + 1].first).first); + } + ++start; + } + /* if high side starts with a dense item, handle it also */ + while ((start <= end) && (switchItems[end].second != 0)) { + if (!highBlockNodeChecked) { + cGoto = BuildCondGotoNode(-1, OP_brtrue, *BuildCmpNode(OP_gt, switchItems[end].second)); + if (cGoto != nullptr) { + localBlk->AddStatement(cGoto); + } + highBlockNodeChecked = true; + } + rangeGoto = BuildRangeGotoNode(switchItems[end].first, switchItems[end].second, newLabelIdx); + if (stmt->GetDefaultLabel() == 0) { + localBlk->AddStatement(rangeGoto); + } else { + cmpNode = BuildCmpNode(OP_ge, switchItems[end].first); + ifStmt = static_cast(mirModule.GetMIRBuilder()->CreateStmtIf(cmpNode)); + ifStmt->GetThenPart()->AddStatement(rangeGoto); + localBlk->AppendStatementsFromBlock(*mirLowerer.LowerIfStmt(*ifStmt, false)); + } + if (start < end) { + highBlockNodeChecked = + (stmt->GetCasePair(switchItems[end].first).first - 1 == + stmt->GetCasePair(switchItems[end - 1].first).first) || + (stmt->GetCasePair(switchItems[end].first).first - 1 == + stmt->GetCasePair(switchItems[end - 1].second).first); + } + --end; + } + if (start > end) { + if (!lowBlockNodeChecked || !highBlockNodeChecked) { + GotoNode *gotoDft = BuildGotoNode(-1); + if (gotoDft != nullptr) { + localBlk->AddStatement(gotoDft); + jumpToDefaultBlockGenerated = true; + } + } + return localBlk; + } + if ((start == end) && lowBlockNodeChecked && highBlockNodeChecked) { + /* only 1 case with 1 tag remains */ + auto *gotoStmt = BuildGotoNode(switchItems[static_cast(start)].first); + if (gotoStmt != nullptr) { + localBlk->AddStatement(gotoStmt); + } + return localBlk; + } + if (end < (start + kClusterSwitchCutoff)) { + /* generate equality checks for what remains */ + std::vector > freq2case; + int32 lastIdx = -1; + bool freqPriority = false; + // The setting of kClusterSwitchDensityLow to such a lower value (0.2) makes other strategies less useful + if (Options::profileUse && cgLowerer->GetLabel2Freq().size()) { + for (int32 idx = start; idx <= end; idx++) { + if (switchItems[static_cast(idx)].second == 0) { + freq2case.push_back(std::make_pair(cgLowerer->GetLabel2Freq()[stmt->GetCasePair(static_cast( + switchItems[static_cast(idx)].first)).second], switchItems[static_cast(idx)].first)); + lastIdx = idx; + } else { + break; + } + } + + std::sort(freq2case.rbegin(), freq2case.rend()); + if (freq2case.size() > 0 && freq2case[0].first != freq2case[freq2case.size() - 1].first) { + freqPriority = true; + } + } + + if (Options::profileUse && freqPriority) { + for (std::pair f2c : freq2case) { + uint32 idx = static_cast(f2c.second); + cGoto = BuildCondGotoNode(switchItems[idx].first, OP_brtrue, *BuildCmpNode(OP_eq, switchItems[idx].first)); + if (cGoto != nullptr) { + localBlk->AddStatement(cGoto); + } + } + + if (lastIdx != -1) { + if (lowBlockNodeChecked && (lastIdx < end)) { + lowBlockNodeChecked = ( + stmt->GetCasePair(static_cast(switchItems[static_cast(lastIdx)].first)).first + 1 == + stmt->GetCasePair(static_cast(switchItems[static_cast(lastIdx + 1)].first)).first); + } + start = lastIdx + 1; + } + } else { + while ((start <= end) && (switchItems[static_cast(start)].second == 0)) { + if ((start == end) && lowBlockNodeChecked && highBlockNodeChecked) { + /* can omit the condition */ + cGoto = reinterpret_cast(BuildGotoNode(switchItems[static_cast(start)].first)); + } else { + cGoto = BuildCondGotoNode(switchItems[static_cast(start)].first, OP_brtrue, + *BuildCmpNode(OP_eq, switchItems[static_cast(start)].first)); + } + if (cGoto != nullptr) { + localBlk->AddStatement(cGoto); + } + if (lowBlockNodeChecked && (start < end)) { + lowBlockNodeChecked = (stmt->GetCasePair(switchItems[static_cast(start)].first).first + 1 == + stmt->GetCasePair(switchItems[static_cast(start + 1)].first).first); + } + ++start; + } + } + if (start <= end) { /* recursive call */ + BlockNode *tmp = BuildCodeForSwitchItems(start, end, lowBlockNodeChecked, highBlockNodeChecked); + CHECK_FATAL(tmp != nullptr, "tmp should not be nullptr"); + localBlk->AppendStatementsFromBlock(*tmp); + } else if (!lowBlockNodeChecked || !highBlockNodeChecked) { + GotoNode *gotoDft = BuildGotoNode(-1); + if (gotoDft != nullptr) { + localBlk->AddStatement(gotoDft); + jumpToDefaultBlockGenerated = true; + } + } + return localBlk; + } + + int64 lowestTag = stmt->GetCasePair(switchItems[static_cast(start)].first).first; + int64 highestTag = stmt->GetCasePair(switchItems[static_cast(end)].first).first; + + /* + * if lowestTag and higesttag have the same sign, use difference + * if lowestTag and higesttag have the diefferent sign, use sum + * 1LL << 63 judge lowestTag ^ highestTag operate result highest + * bit is 1 or not, the result highest bit is 1 express lowestTag + * and highestTag have same sign , otherwise diefferent sign.highestTag + * add or subtract lowestTag divide 2 to get middle tag. + */ + int64 middleTag = ((((static_cast(lowestTag)) ^ (static_cast(highestTag))) & (1ULL << 63)) == 0) + ? (highestTag - lowestTag) / 2 + lowestTag + : (highestTag + lowestTag) / 2; + /* find the mid-point in switch_items between start and end */ + int32 mid = start; + while (stmt->GetCasePair(switchItems[mid].first).first < middleTag) { + ++mid; + } + ASSERT(mid >= start, "switch lowering logic mid should greater than or equal start"); + ASSERT(mid <= end, "switch lowering logic mid should less than or equal end"); + /* generate test for binary search */ + if (stmt->GetDefaultLabel() != 0) { + cmpNode = BuildCmpNode(OP_lt, static_cast(switchItems[static_cast(mid)].first)); + ifStmt = static_cast(mirModule.GetMIRBuilder()->CreateStmtIf(cmpNode)); + bool leftHighBNdChecked = (stmt->GetCasePair(switchItems.at(mid - 1).first).first + 1 == + stmt->GetCasePair(switchItems.at(mid).first).first) || + (stmt->GetCasePair(switchItems.at(mid - 1).second).first + 1 == + stmt->GetCasePair(switchItems.at(mid).first).first); + ifStmt->SetThenPart(BuildCodeForSwitchItems(start, mid - 1, lowBlockNodeChecked, leftHighBNdChecked)); + ifStmt->SetElsePart(BuildCodeForSwitchItems(mid, end, true, highBlockNodeChecked)); + if (ifStmt->GetElsePart()) { + ifStmt->SetNumOpnds(kOperandNumTernary); + } + localBlk->AppendStatementsFromBlock(*mirLowerer.LowerIfStmt(*ifStmt, false)); + } + return localBlk; +} + +BlockNode *SwitchLowerer::LowerSwitch(LabelIdx newLabelIdx) { + if (stmt->GetSwitchTable().empty()) { /* change to goto */ + BlockNode *localBlk = mirModule.CurFuncCodeMemPool()->New(); + GotoNode *gotoDft = BuildGotoNode(-1); + if (gotoDft != nullptr) { + localBlk->AddStatement(gotoDft); + } + return localBlk; + } + + // add case labels to label table's caseLabelSet + MIRLabelTable *labelTab = mirModule.CurFunction()->GetLabelTab(); + for (CasePair &casePair : stmt->GetSwitchTable()) { + labelTab->caseLabelSet.insert(casePair.second); + } + + MapleVector clusters(ownAllocator->Adapter()); + stmt->SortCasePair(CasePairKeyLessThan); + FindClusters(clusters); + InitSwitchItems(clusters); + BlockNode *blkNode = BuildCodeForSwitchItems(0, static_cast(switchItems.size()) - 1, false, false, newLabelIdx); + if (!jumpToDefaultBlockGenerated) { + GotoNode *gotoDft = BuildGotoNode(-1); + if (gotoDft != nullptr) { + blkNode->AddStatement(gotoDft); + } + } + return blkNode; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/be/trycatchblockslower.cpp b/src/mapleall/maple_be/src/be/trycatchblockslower.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8dc8a37f072fe0e98a1888eb1dec12db50d919c2 --- /dev/null +++ b/src/mapleall/maple_be/src/be/trycatchblockslower.cpp @@ -0,0 +1,897 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "try_catch.h" +namespace maplebe { +BBT *TryCatchBlocksLower::CreateNewBB(StmtNode *first, StmtNode *last) { + BBT *newBB = memPool.New(first, last, &memPool); + bbList.emplace_back(newBB); + return newBB; +} + +BBT *TryCatchBlocksLower::FindTargetBBlock(LabelIdx idx, const std::vector &bbs) { + for (auto &target : bbs) { + if (target->GetLabelIdx() == idx) { + return target; + } + } + return nullptr; +} + +/* returns the first statement that is moved in into the try block. If none is moved in, nullptr is returned */ +StmtNode *TryCatchBlocksLower::MoveCondGotoIntoTry(BBT &jtBB, BBT &condbrBB, const MapleVector &labeledBBsInTry) { + StmtNode *firstStmtMovedIn = nullptr; + const MapleVector &bbs = labeledBBsInTry; + StmtNode *jtStmt = jtBB.GetKeyStmt(); +#if DEBUG + StmtNode *js = jtBB.GetFirstStmt(); + while (js->GetOpCode() != OP_try) { + js = js->GetNext(); + } + CHECK_FATAL(js == jtStmt, "make sure js equal jtStmt"); +#endif + StmtNode *ts = jtBB.GetFirstStmt()->GetPrev(); + while ((ts != nullptr) && (ts->GetOpCode() == OP_comment)) { + ts = ts->GetPrev(); + } + + if (ts != nullptr && ts->IsCondBr()) { + CHECK_FATAL(ts->GetNext() == jtBB.GetFirstStmt(), "make sure ts's next equal jtBB's firstStmt"); + StmtNode *firstStmtNode = jtBB.GetFirstStmt(); + /* [ jtbb_b..jtstmt ]; either jtbb_b is a comment or jtbb_b == jtstmt */ + LabelIdx id = static_cast(ts)->GetOffset(); + for (auto &lbb : bbs) { + if (lbb->GetLabelIdx() == id) { + /* + * this cond goto jumps into the try block; let the try block enclose it + * first find the preceding comment statements if any + */ + StmtNode *brS = ts; + while ((ts->GetPrev() != nullptr) && (ts->GetPrev()->GetOpCode() == OP_comment)) { + ts = ts->GetPrev(); + } + StmtNode *secondStmtNode = ts; /* beginning statement of branch block */ + /* [ brbb_b..br_s ]; either brbb_b is a comment or brbb_b == br_s */ + firstStmtNode->SetPrev(secondStmtNode->GetPrev()); + if (secondStmtNode->GetPrev()) { + secondStmtNode->GetPrev()->SetNext(firstStmtNode); + } + jtStmt->GetNext()->SetPrev(brS); + brS->SetNext(jtStmt->GetNext()); + secondStmtNode->SetPrev(jtStmt); + jtStmt->SetNext(secondStmtNode); + condbrBB.SetLastStmt(*firstStmtNode->GetPrev()); + CHECK_FATAL(condbrBB.GetFallthruBranch() == &jtBB, "make sure condbrBB's fallthruBranch equal &jtBB"); + condbrBB.SetFallthruBranch(&jtBB); + condbrBB.SetCondJumpBranch(nullptr); + firstStmtMovedIn = secondStmtNode; + break; + } + } + } + return firstStmtMovedIn; +} + +void TryCatchBlocksLower::RecoverBasicBlock() { + std::vector condbrBBs; + std::vector switchBBs; + std::vector labeledBBs; + using BBTPair = std::pair; + std::vector tryBBs; + std::vector catchBBs; + + CHECK_FATAL(body.GetFirst() != nullptr, "body should not be NULL"); + bodyFirst = body.GetFirst(); + StmtNode *next = bodyFirst; + /* + * comment block [ begin, end ], We treat comment statements as if they are parts + * of the immediately following non-comment statement + */ + StmtNode *commentB = nullptr; + StmtNode *commentE = nullptr; + + BBT *curBB = nullptr; + BBT *lastBB = nullptr; + BBT *openTry = nullptr; + + /* recover basic blocks */ + for (StmtNode *stmt = next; stmt != nullptr; stmt = next) { + next = stmt->GetNext(); + + if (stmt->GetOpCode() == OP_comment) { + if (commentB == nullptr) { + commentB = stmt; + commentE = stmt; + } else { + CHECK_FATAL(commentE != nullptr, "nullptr is not expected"); + CHECK_FATAL(commentE->GetNext() == stmt, "make sure commentE's next is stmt"); + commentE = stmt; + } + continue; + } + + CHECK_FATAL(stmt->GetOpCode() != OP_comment, "make sure stmt's opcde not equal OP_comment"); + CHECK_FATAL(commentB == nullptr || (commentE != nullptr && commentE->GetNext() == stmt), + "make sure commentB is nullptr or commentE's next is stmt"); + + if (curBB != nullptr) { + if (stmt->GetOpCode() != OP_label && stmt->GetOpCode() != OP_try && stmt->GetOpCode() != OP_endtry) { + curBB->Extend(commentB, stmt); + } else { + /* java catch blockes always start with a label (i.e., OP_catch) */ + CHECK_FATAL(curBB->GetCondJumpBranch() == nullptr, "expect curBB's condJumpBranch is nullptr"); + CHECK_FATAL(curBB->GetFallthruBranch() == nullptr, "expect curBB's fallthruBranch is nullptr"); + /* a 'label' statement starts a new basic block */ + BBT *newBB = CreateNewBB(commentB, stmt); + /* + * if the immediately preceding statement (discounting comments) was throw, goto or return, + * curBB is to be reset to nullptr, so the control won't come here. + */ + curBB->SetFallthruBranch(newBB); + curBB = newBB; + } + } else { + /* start a new basic block with 'comment_b -- stmt' */ + curBB = CreateNewBB(commentB, stmt); + if (lastBB != nullptr) { + Opcode lastBBLastStmtOp = lastBB->GetLastStmt()->GetOpCode(); + if (lastBB->GetLastStmt()->IsCondBr() || lastBBLastStmtOp == OP_endtry) { + lastBB->SetFallthruBranch(curBB); + } + /* else don't connect curBB to last_bb */ + } + } + commentB = nullptr; + commentE = nullptr; + + switch (stmt->GetOpCode()) { + case OP_throw: + case OP_return: + case OP_goto: + /* start a new bb at the next stmt */ + lastBB = curBB; + curBB = nullptr; + break; + case OP_label: { + LabelNode *labelStmt = static_cast(stmt); + labeledBBs.emplace_back(curBB); + curBB->SetLabelIdx(static_cast(labelStmt->GetLabelIdx())); + } break; + case OP_brtrue: + case OP_brfalse: + condbrBBs.emplace_back(curBB); + lastBB = curBB; + curBB = nullptr; + break; + case OP_switch: + switchBBs.emplace_back(curBB); + lastBB = curBB; + curBB = nullptr; + break; + /* + * We deal try and endtry slightly differently. + * 1. try begins a basic block which includes the try statement and the subsequent statements up to one that + * results in non-sequential control transfer such as unconditional/conditional branches. + * 2. endtry will create its own basic block which contains the endtry statement and nothing else. + */ + case OP_try: + case OP_endtry: { + /* because a label statement is inserted at the function entry */ + CHECK_FATAL(curBB != nullptr, "expect curBB is not nullptr"); + CHECK_FATAL(curBB->GetCondJumpBranch() == nullptr, "expect curBB's condJumpBranch is nullptr"); + CHECK_FATAL(curBB->GetFallthruBranch() == nullptr, "expect curBB's fallthruBranch is nullptr"); + CHECK_FATAL(curBB->GetLastStmt()->GetOpCode() == stmt->GetOpCode(), + "the opcode of curBB's lastStmt should equal stmt's opcocde"); + if (stmt->GetOpCode() == OP_try) { + CHECK_FATAL(openTry == nullptr, "trys are not expected to be nested"); + curBB->SetType(BBT::kBBTry, *stmt); + openTry = curBB; + prevBBOfTry[openTry] = lastBB; + } else { + tryBBs.emplace_back(BBTPair(openTry, curBB)); + openTry = nullptr; + curBB->SetType(BBT::kBBEndTry, *stmt); + lastBB = curBB; + curBB = nullptr; + } + break; + } + case OP_catch: { +#if DEBUG + StmtNode *ss = stmt->GetPrev(); + while ((ss != nullptr) && (ss->GetOpCode() == OP_comment)) { + ss = ss->GetPrev(); + } + CHECK_FATAL(ss != nullptr, "expect ss is not nullptr"); + CHECK_FATAL(ss->GetOpCode() == OP_label, "expect op equal OP_label"); + for (auto &tb : catchBBs) { + CHECK_FATAL(tb != curBB, "tb should not equal curBB"); + } +#endif + catchBBs.emplace_back(curBB); + curBB->SetType(BBT::kBBCatch, *stmt); + break; + } + case OP_block: + CHECK_FATAL(false, "should not run here"); + default: + break; + } + } + + for (auto &cbBB : condbrBBs) { + CHECK_FATAL(cbBB->GetLastStmt()->IsCondBr(), "cbBB's lastStmt is not condBr"); + CondGotoNode *cbBBLastStmt = static_cast(cbBB->GetLastStmt()); + cbBB->SetCondJumpBranch(FindTargetBBlock(static_cast(cbBBLastStmt->GetOffset()), labeledBBs)); + } + + for (auto &swBB : switchBBs) { + CHECK_FATAL(swBB->GetLastStmt()->GetOpCode() == OP_switch, "the opcode of sw's lastStmt should equal OP_switch"); + SwitchNode *ss = static_cast(swBB->GetLastStmt()); + + swBB->AddSuccs(FindTargetBBlock(ss->GetDefaultLabel(), labeledBBs)); + for (auto &cp : ss->GetSwitchTable()) { + swBB->AddSuccs(FindTargetBBlock(cp.second, labeledBBs)); + } + } + + for (auto &bb : bbList) { + firstStmtToBBMap[bb->GetFirstStmt()] = bb; + } + CHECK_FATAL(openTry == nullptr, "trys are not expected to be nested"); +} + +/* if catchBB is in try-endtry block and catch is own to current try-endtry, process it and return true */ +bool TryCatchBlocksLower::CheckAndProcessCatchNodeInCurrTryBlock(BBT &origLowerBB, LabelIdx ebbLabel, + uint32 index) { + MapleVector &enclosedBBs = tryEndTryBlock.GetEnclosedBBs(); + MapleVector &bbsToRelocate = tryEndTryBlock.GetBBsToRelocate(); + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + StmtNode *tryStmt = tryEndTryBlock.GetTryStmtNode(); + bool found = false; + for (size_t tempIndex = 0; tempIndex < static_cast(tryStmt)->GetOffsetsCount(); ++tempIndex) { + auto id = static_cast(tryStmt)->GetOffset(tempIndex); + /* + * if this labeled bb is a catch block, + * remove it from the list of blocks enclosed in this try-block' + */ + if (ebbLabel == id) { + found = true; + enclosedBBs[index] = nullptr; + std::vector currBBThread; + BBT *lowerBB = &origLowerBB; + /* append it to the list of blocks placed after the end try block */ + currBBThread.emplace_back(lowerBB); + while (lowerBB->GetFallthruBranch() != nullptr) { + lowerBB = lowerBB->GetFallthruBranch(); + CHECK_FATAL(!lowerBB->IsTry(), "ebb must not be tryBB"); + if (lowerBB->IsEndTry()) { + CHECK_FATAL(lowerBB == endTryBB, "lowerBB should equal endTryBB"); + break; + } + for (uint32 j = 0; j < enclosedBBs.size(); ++j) { + if (enclosedBBs[j] == lowerBB) { + enclosedBBs[j] = nullptr; + break; + } + } + currBBThread.emplace_back(lowerBB); + } + + if (!lowerBB->IsEndTry()) { + for (auto &e : currBBThread) { + bbsToRelocate.emplace_back(e); + } + } else { + /* + * We have the following case. + * bb_head -> bb_1 -> .. bb_n -> endtry_bb -> succ + * For this particular case, we swap endtry bb and curr_bb_thread because the bblock that + * contains the endtry statement does not contain any other statements!! + */ + CHECK_FATAL(endTryBB->GetFirstStmt()->GetOpCode() == OP_comment || + endTryBB->GetFirstStmt()->GetOpCode() == OP_endtry, + "the opcode of endTryBB's firstStmt should be OP_comment or OP_endtry"); + CHECK_FATAL(endTryBB->GetLastStmt()->GetOpCode() == OP_endtry, + "the opcode of endTryBB's lastStmt should be OP_endtry"); + + /* we move endtry_bb before thread_head */ + BBT *threadHead = currBBThread.front(); + CHECK_FATAL(threadHead->GetFirstStmt()->GetPrev() != nullptr, + "the prev node of threadHead's firstStmt should be not nullptr"); + CHECK_FATAL(threadHead->GetFirstStmt()->GetOpCode() == OP_comment || + threadHead->GetFirstStmt()->GetOpCode() == OP_label, + "the opcode of threadHead's firstStmt should be OP_comment or OP_label"); + CHECK_FATAL(threadHead->GetFirstStmt()->GetPrev()->GetNext() == threadHead->GetFirstStmt(), + "the next of the prev of threadHead's firstStmt should equal threadHead's firstStmt"); + threadHead->GetFirstStmt()->GetPrev()->SetNext(endTryBB->GetFirstStmt()); + endTryBB->GetFirstStmt()->SetPrev(threadHead->GetFirstStmt()->GetPrev()); + BBT *threadTail = currBBThread.back(); + threadTail->GetLastStmt()->SetNext(endTryBB->GetLastStmt()->GetNext()); + if (endTryBB->GetLastStmt()->GetNext() != nullptr) { + endTryBB->GetLastStmt()->GetNext()->SetPrev(threadTail->GetLastStmt()); + } + endTryBB->GetLastStmt()->SetNext(threadHead->GetFirstStmt()); + + CHECK_FATAL(endTryBB->GetCondJumpBranch() == nullptr, "endTryBB's condJumpBranch must be nullptr"); + if (threadTail->GetFallthruBranch() != nullptr) { + threadTail->SetFallthruBranch(firstStmtToBBMap[threadTail->GetLastStmt()->GetNext()]); + } + endTryBB->SetFallthruBranch(nullptr); + if (bodyEndWithEndTry) { + body.SetLast(threadTail->GetLastStmt()); + } + } + break; + } + } + return found; +} + +/* collect catchbb->fallthru(0-n) into currBBThread, when encounter a new catch, return it, else return nullptr */ +BBT *TryCatchBlocksLower::CollectCatchAndFallthruUntilNextCatchBB(BBT *&lowerBB, uint32 &nextEnclosedIdx, + std::vector &currBBThread) { + MapleVector &enclosedBBs = tryEndTryBlock.GetEnclosedBBs(); + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + + BBT *nextBBThreadHead = nullptr; + while (lowerBB->GetFallthruBranch() != nullptr) { + lowerBB = lowerBB->GetFallthruBranch(); + ++nextEnclosedIdx; + if (lowerBB->IsEndTry()) { + CHECK_FATAL(lowerBB == endTryBB, "lowerBB should equal endTryBB"); + break; + } + + for (uint32 j = 0; j < enclosedBBs.size(); ++j) { + if (enclosedBBs[j] == lowerBB) { + enclosedBBs[j] = nullptr; + break; + } + } + if (lowerBB->IsCatch()) { + nextBBThreadHead = lowerBB; + break; + } + currBBThread.emplace_back(lowerBB); + } + + if (nextBBThreadHead == nullptr && lowerBB->GetFallthruBranch() == nullptr && lowerBB != endTryBB && + nextEnclosedIdx < enclosedBBs.size() && enclosedBBs[nextEnclosedIdx]) { + /* + * Using a loop to find the next_bb_thread_head when it's a catch_BB or a normal_BB which + * is after a catch_BB. Other condition, push_back into the curr_bb_thread. + */ + do { + lowerBB = enclosedBBs[nextEnclosedIdx]; + enclosedBBs[nextEnclosedIdx++] = nullptr; + BBT *head = currBBThread.front(); + if (head->IsCatch() || lowerBB->IsCatch()) { + nextBBThreadHead = lowerBB; + break; + } + currBBThread.emplace_back(lowerBB); + } while (nextEnclosedIdx < enclosedBBs.size()); + } + + return nextBBThreadHead; +} + +void TryCatchBlocksLower::ProcessThreadTail(BBT &threadTail, BBT * const &nextBBThreadHead, bool hasMoveEndTry) { + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + StmtNode *newEndTry = endTryBB->GetKeyStmt()->CloneTree(mirModule.GetCurFuncCodeMPAllocator()); + newEndTry->SetPrev(threadTail.GetLastStmt()); + newEndTry->SetNext(threadTail.GetLastStmt()->GetNext()); + if (bodyEndWithEndTry && hasMoveEndTry) { + if (threadTail.GetLastStmt()->GetNext()) { + threadTail.GetLastStmt()->GetNext()->SetPrev(newEndTry); + } + } else { + CHECK_FATAL(threadTail.GetLastStmt()->GetNext() != nullptr, + "the next of threadTail's lastStmt should not be nullptr"); + threadTail.GetLastStmt()->GetNext()->SetPrev(newEndTry); + } + threadTail.GetLastStmt()->SetNext(newEndTry); + + threadTail.SetLastStmt(*newEndTry); + if (hasMoveEndTry && nextBBThreadHead == nullptr) { + body.SetLast(threadTail.GetLastStmt()); + } +} + +/* Wrap this catch block with try-endtry block */ +void TryCatchBlocksLower::WrapCatchWithTryEndTryBlock(std::vector &currBBThread, BBT *&nextBBThreadHead, + uint32 &nextEnclosedIdx, bool hasMoveEndTry) { + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + StmtNode *tryStmt = tryEndTryBlock.GetTryStmtNode(); + MapleVector &enclosedBBs = tryEndTryBlock.GetEnclosedBBs(); + for (auto &e : currBBThread) { + CHECK_FATAL(!e->IsTry(), "expect e is not try"); + } + BBT *threadHead = currBBThread.front(); + if (threadHead->IsCatch()) { + StmtNode *jcStmt = threadHead->GetKeyStmt(); + CHECK_FATAL(jcStmt->GetNext() != nullptr, "jcStmt's next should not be nullptr"); + TryNode *jtCopy = static_cast(tryStmt)->CloneTree(mirModule.GetCurFuncCodeMPAllocator()); + jtCopy->SetNext(jcStmt->GetNext()); + jtCopy->SetPrev(jcStmt); + jcStmt->GetNext()->SetPrev(jtCopy); + jcStmt->SetNext(jtCopy); + + BBT *threadTail = currBBThread.back(); + + /* for this endtry stmt, we don't need to create a basic block */ + ProcessThreadTail(*threadTail, static_cast(nextBBThreadHead), hasMoveEndTry); + } else { + /* For cases try->catch->normal_bb->normal_bb->endtry, Combine normal bb first. */ + while (nextEnclosedIdx < enclosedBBs.size()) { + if (nextBBThreadHead != nullptr) { + if (nextBBThreadHead->IsCatch()) { + break; + } + } + BBT *ebbSecond = enclosedBBs[nextEnclosedIdx]; + enclosedBBs[nextEnclosedIdx++] = nullptr; + CHECK_FATAL(ebbSecond != endTryBB, "ebbSecond should not equal endTryBB"); + if (ebbSecond->IsCatch()) { + nextBBThreadHead = ebbSecond; + break; + } + currBBThread.emplace_back(ebbSecond); + } + /* normal bb. */ + StmtNode *stmt = threadHead->GetFirstStmt(); + + TryNode *jtCopy = static_cast(tryStmt)->CloneTree(mirModule.GetCurFuncCodeMPAllocator()); + jtCopy->SetNext(stmt); + jtCopy->SetPrev(stmt->GetPrev()); + stmt->GetPrev()->SetNext(jtCopy); + stmt->SetPrev(jtCopy); + threadHead->SetFirstStmt(*jtCopy); + + BBT *threadTail = currBBThread.back(); + + /* for this endtry stmt, we don't need to create a basic block */ + ProcessThreadTail(*threadTail, static_cast(nextBBThreadHead), hasMoveEndTry); + } +} + +/* + * We have the following case. + * bb_head -> bb_1 -> .. bb_n -> endtry_bb -> succ + * For this particular case, we swap EndTry bb and curr_bb_thread, because the bblock that contains the endtry + * statement does not contain any other statements!! + */ +void TryCatchBlocksLower::SwapEndTryBBAndCurrBBThread(const std::vector &currBBThread, bool &hasMoveEndTry, + const BBT *nextBBThreadHead) { + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + CHECK_FATAL(endTryBB->GetFirstStmt()->GetOpCode() == OP_comment || + endTryBB->GetFirstStmt()->GetOpCode() == OP_endtry, + "the opcode of endTryBB's firstStmt should be OP_comment or OP_endtry"); + CHECK_FATAL(endTryBB->GetLastStmt()->GetOpCode() == OP_endtry, + "the opcode of endTryBB's lastStmt should be OP_endtry"); + + /* we move endtry_bb before bb_head */ + BBT *threadHead = currBBThread.front(); + CHECK_FATAL(threadHead->GetFirstStmt()->GetPrev() != nullptr, + "the prev of threadHead's firstStmt should not nullptr"); + CHECK_FATAL(threadHead->GetFirstStmt()->GetOpCode() == OP_comment || + threadHead->GetFirstStmt()->GetOpCode() == OP_label, + "the opcode of threadHead's firstStmt should be OP_comment or OP_label"); + CHECK_FATAL(threadHead->GetFirstStmt()->GetPrev()->GetNext() == threadHead->GetFirstStmt(), + "the next of the prev of threadHead's firstStmt should equal threadHead's firstStmt"); + + endTryBB->GetFirstStmt()->GetPrev()->SetNext(endTryBB->GetLastStmt()->GetNext()); + if (endTryBB->GetLastStmt()->GetNext() != nullptr) { + endTryBB->GetLastStmt()->GetNext()->SetPrev(endTryBB->GetFirstStmt()->GetPrev()); + } + + threadHead->GetFirstStmt()->GetPrev()->SetNext(endTryBB->GetFirstStmt()); + endTryBB->GetFirstStmt()->SetPrev(threadHead->GetFirstStmt()->GetPrev()); + + endTryBB->GetLastStmt()->SetNext(threadHead->GetFirstStmt()); + threadHead->GetFirstStmt()->SetPrev(endTryBB->GetLastStmt()); + + CHECK_FATAL(endTryBB->GetCondJumpBranch() == nullptr, "endTryBB's condJumpBranch must be nullptr"); + endTryBB->SetFallthruBranch(nullptr); + if (bodyEndWithEndTry) { + hasMoveEndTry = true; + if (nextBBThreadHead == nullptr) { + body.SetLast(currBBThread.back()->GetLastStmt()); + } + } +} + +void TryCatchBlocksLower::ProcessEnclosedBBBetweenTryEndTry() { + MapleVector &enclosedBBs = tryEndTryBlock.GetEnclosedBBs(); + MapleVector &labeledBBsInTry = tryEndTryBlock.GetLabeledBBsInTry(); + + for (uint32 i = 0; i < enclosedBBs.size(); ++i) { + BBT *lowerBB = enclosedBBs[i]; + uint32 nextEnclosedIdx = i + 1; + if (lowerBB == nullptr) { + continue; /* we may have removed the element */ + } + if (!lowerBB->IsLabeled()) { + continue; + } + labeledBBsInTry.emplace_back(lowerBB); + + /* + * It seems the way a finally is associated with its try is to put the catch block inside + * the java-try-end-try block. So, keep the 'catch(void*)' in it. + */ + LabelIdx ebbLabel = lowerBB->GetLabelIdx(); + bool found = CheckAndProcessCatchNodeInCurrTryBlock(*lowerBB, ebbLabel, i); + /* fill cur_bb_thread until meet the next catch */ + if (!found && lowerBB->IsCatch()) { + enclosedBBs[i] = nullptr; + std::vector currBBThread; + BBT *nextBBThreadHead = nullptr; + bool isFirstTime = true; + bool hasMoveEndTry = false; + do { + if (nextBBThreadHead != nullptr) { + isFirstTime = false; + } + nextBBThreadHead = nullptr; + currBBThread.clear(); + currBBThread.emplace_back(lowerBB); + nextBBThreadHead = CollectCatchAndFallthruUntilNextCatchBB(lowerBB, nextEnclosedIdx, currBBThread); + WrapCatchWithTryEndTryBlock(currBBThread, nextBBThreadHead, nextEnclosedIdx, hasMoveEndTry); + if (isFirstTime) { + SwapEndTryBBAndCurrBBThread(currBBThread, hasMoveEndTry, nextBBThreadHead); + } + } while (nextBBThreadHead != nullptr); + } + } +} + +void TryCatchBlocksLower::ConnectRemainBB() { + MapleVector &enclosedBBs = tryEndTryBlock.GetEnclosedBBs(); + BBT *startTryBB = tryEndTryBlock.GetStartTryBB(); + BBT *endTryBB = tryEndTryBlock.GetEndTryBB(); + size_t nEnclosedBBs = enclosedBBs.size(); + size_t k = 0; + while ((k < nEnclosedBBs) && (enclosedBBs[k] == nullptr)) { + ++k; + } + + if (k < nEnclosedBBs) { + BBT *prevBB = enclosedBBs[k]; + + startTryBB->GetLastStmt()->SetNext(prevBB->GetFirstStmt()); + prevBB->GetFirstStmt()->SetPrev(startTryBB->GetLastStmt()); + + for (++k; k < nEnclosedBBs; ++k) { + BBT *lowerBB = enclosedBBs[k]; + if (lowerBB == nullptr) { + continue; + } + prevBB->GetLastStmt()->SetNext(lowerBB->GetFirstStmt()); + lowerBB->GetFirstStmt()->SetPrev(prevBB->GetLastStmt()); + prevBB = lowerBB; + } + + prevBB->GetLastStmt()->SetNext(endTryBB->GetFirstStmt()); + endTryBB->GetFirstStmt()->SetPrev(prevBB->GetLastStmt()); + } else { + startTryBB->GetLastStmt()->SetNext(endTryBB->GetFirstStmt()); + endTryBB->GetFirstStmt()->SetPrev(startTryBB->GetLastStmt()); + } +} + +BBT *TryCatchBlocksLower::FindInsertAfterBB() { + BBT *insertAfter = tryEndTryBlock.GetEndTryBB(); + CHECK_FATAL(tryEndTryBlock.GetEndTryBB()->GetLastStmt()->GetOpCode() == OP_endtry, "LowerBB type check"); + BBT *iaOpenTry = nullptr; + while (insertAfter->GetFallthruBranch() != nullptr || iaOpenTry != nullptr) { + if (insertAfter->GetFallthruBranch() != nullptr) { + insertAfter = insertAfter->GetFallthruBranch(); + } else { + CHECK_FATAL(iaOpenTry != nullptr, "iaOpenTry should not be nullptr"); + insertAfter = firstStmtToBBMap[insertAfter->GetLastStmt()->GetNext()]; + CHECK_FATAL(!insertAfter->IsTry(), "insertAfter should not be try"); + } + + if (insertAfter->IsTry()) { + iaOpenTry = insertAfter; + } else if (insertAfter->IsEndTry()) { + iaOpenTry = nullptr; + } + } + return insertAfter; +} + +void TryCatchBlocksLower::PlaceRelocatedBB(BBT &insertAfter) { + StmtNode *iaLast = insertAfter.GetLastStmt(); + CHECK_FATAL(iaLast != nullptr, "iaLast should not nullptr"); + + StmtNode *iaNext = iaLast->GetNext(); + if (iaNext == nullptr) { + CHECK_FATAL(body.GetLast() == iaLast, "body's last should equal iaLast"); + } + BBT *prevBB = &insertAfter; + MapleVector &bbsToRelocate = tryEndTryBlock.GetBBsToRelocate(); + for (auto &rbb : bbsToRelocate) { + prevBB->GetLastStmt()->SetNext(rbb->GetFirstStmt()); + rbb->GetFirstStmt()->SetPrev(prevBB->GetLastStmt()); + prevBB = rbb; + } + prevBB->GetLastStmt()->SetNext(iaNext); + if (iaNext != nullptr) { + iaNext->SetPrev(prevBB->GetLastStmt()); + } else { + /* !ia_next means we started with insert_after that was the last bblock Refer to the above CHECK_FATAL. */ + body.SetLast(prevBB->GetLastStmt()); + body.GetLast()->SetNext(nullptr); + } +} + +void TryCatchBlocksLower::PalceCatchSeenSofar(BBT &insertAfter) { + TryNode *tryNode = static_cast(tryEndTryBlock.GetTryStmtNode()); + ASSERT(tryNode != nullptr, "tryNode should not be nullptr"); + MapleVector &bbsToRelocate = tryEndTryBlock.GetBBsToRelocate(); + + for (size_t offsetIndex = 0; offsetIndex < tryNode->GetOffsetsCount(); ++offsetIndex) { + auto id = tryNode->GetOffset(offsetIndex); + bool myCatchBlock = false; + for (auto &jcb : bbsToRelocate) { + if (!jcb->IsLabeled()) { + continue; + } + myCatchBlock = (id == jcb->GetLabelIdx()); + if (myCatchBlock) { + break; + } + } + /* + * If the catch block is the one enclosed in this try-endtry block, + * we just relocated it above, so we don't need to consider it again + */ + if (myCatchBlock) { + continue; + } + + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + for (auto &jcb : catchesSeenSoFar) { + CHECK_FATAL(jcb->IsLabeled(), "jcb should be labeled"); + if (id == jcb->GetLabelIdx()) { + /* + * Remove jcb and all of the blocks that are reachable by following fallthruBranch. + * If we hit a try block, cut there, append an unconditional jump to it to the preceding bblock, + * and relocate them. We may need to insert a label in the try block + */ + BBT *lastBB = jcb; + while (lastBB->GetFallthruBranch() != nullptr && !lastBB->GetFallthruBranch()->IsTry()) { + lastBB = lastBB->GetFallthruBranch(); + } + +#if DEBUG + BBT::ValidateStmtList(bodyFirst); +#endif + if (lastBB->GetFallthruBranch() != nullptr) { + BBT *jtBB = lastBB->GetFallthruBranch(); + CHECK_FATAL(jtBB->IsTry(), "jtBB should be try"); + if (!jtBB->IsLabeled()) { + LabelIdx jtLabIdx = mirModule.GetMIRBuilder()->CreateLabIdx(*mirModule.CurFunction()); + jtBB->SetLabelIdx(jtLabIdx); + StmtNode *labelStmt = mirModule.GetMIRBuilder()->CreateStmtLabel(jtLabIdx); + bool adjustBBFirstStmt = (jtBB->GetKeyStmt() == jtBB->GetFirstStmt()); + labelStmt->SetNext(jtBB->GetKeyStmt()); + labelStmt->SetPrev(jtBB->GetKeyStmt()->GetPrev()); + CHECK_FATAL(jtBB->GetKeyStmt()->GetPrev() != nullptr, "the prev of jtBB's ketStmt shpould not be nullptr"); + jtBB->GetKeyStmt()->GetPrev()->SetNext(labelStmt); + CHECK_FATAL(jtBB->GetKeyStmt()->GetNext() != nullptr, "the next of jtBB's ketStmt shpould not be nullptr"); + jtBB->GetKeyStmt()->SetPrev(labelStmt); + if (adjustBBFirstStmt) { + firstStmtToBBMap.erase(jtBB->GetFirstStmt()); + jtBB->SetFirstStmt(*labelStmt); + firstStmtToBBMap[jtBB->GetFirstStmt()] = jtBB; + } + } + CHECK_FATAL(jtBB->IsLabeled(), "jtBB should be labeled"); + CHECK_FATAL(lastBB->GetLastStmt()->GetOpCode() != OP_goto, + "the opcode of lastBB's lastStmt should not be OP_goto"); + StmtNode *gotoStmt = mirModule.GetMIRBuilder()->CreateStmtGoto(OP_goto, jtBB->GetLabelIdx()); + + StmtNode *lastBBLastStmt = lastBB->GetLastStmt(); + gotoStmt->SetNext(lastBBLastStmt->GetNext()); + gotoStmt->SetPrev(lastBBLastStmt); + if (lastBBLastStmt->GetNext()) { + lastBBLastStmt->GetNext()->SetPrev(gotoStmt); + } + lastBBLastStmt->SetNext(gotoStmt); + + lastBB->SetLastStmt(*gotoStmt); + lastBB->SetFallthruBranch(nullptr); + +#if DEBUG + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + BBT::ValidateStmtList(bodyFirst); +#endif + } + + /* we want to remove [jcb .. last_bb], inclusively. */ + if (jcb->GetFirstStmt() == body.GetFirst()) { + body.SetFirst(lastBB->GetLastStmt()->GetNext()); + body.GetFirst()->SetPrev(nullptr); + lastBB->GetLastStmt()->GetNext()->SetPrev(nullptr); + bodyFirst = body.GetFirst(); + } else { + CHECK_FATAL(jcb->GetFirstStmt()->GetPrev() != nullptr, "the prev of jcb's firstStmt should not be nullptr"); + CHECK_FATAL(jcb->GetFirstStmt()->GetPrev()->GetNext() == jcb->GetFirstStmt(), + "the next of the prev of jcb's firstStmt should equal jcb's firstStmt"); + if (lastBB->GetLastStmt()->GetNext() != nullptr) { + jcb->GetFirstStmt()->GetPrev()->SetNext(lastBB->GetLastStmt()->GetNext()); + lastBB->GetLastStmt()->GetNext()->SetPrev(jcb->GetFirstStmt()->GetPrev()); + } else { + CHECK_FATAL(lastBB->GetLastStmt() == body.GetLast(), "lastBB's lastStmt should equal body's last"); + body.SetLast(jcb->GetFirstStmt()->GetPrev()); + body.GetLast()->SetNext(nullptr); + jcb->GetFirstStmt()->GetPrev()->SetNext(nullptr); + } + } + jcb->GetFirstStmt()->SetPrev(nullptr); + lastBB->GetLastStmt()->SetNext(nullptr); + +#if DEBUG + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + BBT::ValidateStmtList(body.GetFirst(), jcb->GetFirstStmt()); +#endif + + /* append it (i.e., [jcb->firstStmt .. last_bb->lastStmt]) after insert_after */ + CHECK_FATAL(insertAfter.GetFallthruBranch() == nullptr, "insertAfter's fallthruBranch should be nullptr"); + if (insertAfter.GetLastStmt() == body.GetLast()) { + CHECK_FATAL(insertAfter.GetLastStmt()->GetNext() == nullptr, + "the next of insertAfter's lastStmt should not be nullptr"); + } + + jcb->GetFirstStmt()->SetPrev(insertAfter.GetLastStmt()); + lastBB->GetLastStmt()->SetNext(insertAfter.GetLastStmt()->GetNext()); + + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + + if (insertAfter.GetLastStmt()->GetNext() != nullptr) { + insertAfter.GetLastStmt()->GetNext()->SetPrev(lastBB->GetLastStmt()); + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + } else { + /* + * note that we have a single BlockNode that contains all the instructions of a method. + * What that means is each instruction's next is not nullptr except for the very last instruction. + * insert_after->lastStmt->next == nullptr, means insert_after->lastStmt is indeed the last instruction, + * and we are moving instructions of 'last_bb' after it. Thus, we need to fix the BlockNode's last field. + */ + body.SetLast(lastBB->GetLastStmt()); + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + } + insertAfter.GetLastStmt()->SetNext(jcb->GetFirstStmt()); + if (jcb->GetFirstStmt()->GetPrev() != nullptr) { + CHECK_FATAL(jcb->GetFirstStmt()->GetPrev()->GetNext() == jcb->GetFirstStmt(), + "the next of the prev of jcb's firstStmt should equal jcb's firstStmt"); + } + if (lastBB->GetLastStmt()->GetNext() != nullptr) { + CHECK_FATAL(lastBB->GetLastStmt()->GetNext()->GetPrev() == lastBB->GetLastStmt(), + "thr prev of the next of lastBB's lastStmt should equal lastBB's lastStmt"); + } + + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + } + } + } +} + +void TryCatchBlocksLower::TraverseBBList() { + tryEndTryBlock.Init(); + for (auto &bb : bbList) { + if (bb->IsCatch() && tryEndTryBlock.GetStartTryBB() == nullptr) { + /* Add to the list of catch blocks seen so far. */ + catchesSeenSoFar.emplace_back(bb); + } + bodyEndWithEndTry = false; + + if (tryEndTryBlock.GetStartTryBB() == nullptr) { + if (bb->IsTry()) { + StmtNode *firstNonCommentStmt = bb->GetFirstStmt(); + while (firstNonCommentStmt != nullptr && firstNonCommentStmt->GetOpCode() == OP_comment) { + firstNonCommentStmt = firstNonCommentStmt->GetNext(); + } + CHECK_FATAL(bb->GetLastStmt()->GetOpCode() != OP_try || bb->GetLastStmt() == firstNonCommentStmt || + !generateEHCode, "make sure the opcode of bb's lastStmt is not OP_try" + "or the opcode of bb's lastStmt is OP_try but bb's lastStmt equals firstNonCommentStmt" + "or not generate EHCode"); + /* prepare for processing a java try block */ + tryEndTryBlock.Reset(*bb); + } + continue; + } + + /* We should have not a try block enclosed in another java try block!! */ + CHECK_FATAL(!bb->IsTry(), "bb should not be try"); + if (!bb->IsEndTry()) { + tryEndTryBlock.PushToEnclosedBBs(*bb); + } else { + tryEndTryBlock.SetEndTryBB(bb); + if (tryEndTryBlock.GetEndTryBB()->GetLastStmt() == body.GetLast()) { + bodyEndWithEndTry = true; + } +#if DEBUG + for (size_t i = 0; i < tryEndTryBlock.GetEnclosedBBsSize(); ++i) { + CHECK_FATAL(tryEndTryBlock.GetEnclosedBBsElem(i), "there should not be nullptr in enclosedBBs"); + } +#endif + ProcessEnclosedBBBetweenTryEndTry(); + /* Now, connect the remaining ones again n_enclosed_bbs includes 'nullptr's (i.e., deleted entries) */ + ConnectRemainBB(); + BBT *insertAfter = FindInsertAfterBB(); + PlaceRelocatedBB(*insertAfter); + +#if DEBUG + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + BBT::ValidateStmtList(bodyFirst); +#endif + if (prevBBOfTry[tryEndTryBlock.GetStartTryBB()]) { + StmtNode *firstStmtMovedIn = MoveCondGotoIntoTry(*tryEndTryBlock.GetStartTryBB(), + *prevBBOfTry[tryEndTryBlock.GetStartTryBB()], + tryEndTryBlock.GetLabeledBBsInTry()); + if (firstStmtMovedIn == bodyFirst) { + bodyFirst = tryEndTryBlock.GetStartTryBB()->GetFirstStmt(); + prevBBOfTry[tryEndTryBlock.GetStartTryBB()] = nullptr; + } + } + /* + * Now, examine each offset attached to this try and move any catch block + * that is not in 'bbs_to_relocate' but in 'catches_seen_so_far' + */ + PalceCatchSeenSofar(*insertAfter); + + /* close the try that is open */ + tryEndTryBlock.SetStartTryBB(nullptr); + } +#if DEBUG + CHECK_FATAL(body.GetLast()->GetNext() == nullptr, "the next of body's last should be nullptr"); + BBT::ValidateStmtList(bodyFirst); +#endif + } + + body.SetFirst(bodyFirst); +} + +void TryCatchBlocksLower::CheckTryCatchPattern() const { + StmtNode *openJt = nullptr; + for (StmtNode *stmt = body.GetFirst(); stmt; stmt = stmt->GetNext()) { + switch (stmt->GetOpCode()) { + case OP_try: + openJt = stmt; + break; + case OP_endtry: + openJt = nullptr; + break; + case OP_catch: + if (openJt != nullptr) { + CatchNode *jcn = static_cast(stmt); + for (uint32 i = 0; i < jcn->Size(); ++i) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(jcn->GetExceptionTyIdxVecElement(i)); + MIRPtrType *ptr = static_cast(type); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptr->GetPointedTyIdx()); + CHECK_FATAL(type->GetPrimType() == PTY_void, "type's primType should be PTY_void"); + } + } + break; + default: + break; + } + } +} +} /* namespace maplebe */ \ No newline at end of file diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..72ba534f1b21bbd023375a8f6b493d9338cbcc20 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_MPISel.cpp @@ -0,0 +1,14 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ \ No newline at end of file diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_abi.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_abi.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2cc831cb4d682446dd5fc3d1ce1c88277312ace0 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_abi.cpp @@ -0,0 +1,170 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cgfunc.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +namespace AArch64Abi { +bool IsAvailableReg(AArch64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return canBeAssigned; +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return canBeAssigned; +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + default: + return false; + } +} + +bool IsCallerSaveReg(AArch64reg regNO) { + return (R0 <= regNO && regNO <= R18) || (V0 <= regNO && regNO <= V7) || + (V16 <= regNO && regNO <= V31) || (regNO == kRFLAG); +} + +bool IsCalleeSavedReg(AArch64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isCalleeSave; +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isCalleeSave; +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + default: + return false; + } +} + +bool IsParamReg(AArch64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isParam; +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isParam; +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + default: + return false; + } +} + +bool IsSpillReg(AArch64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isSpill; +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isSpill; +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + default: + return false; + } +} + +bool IsExtraSpillReg(AArch64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isExtraSpill; +#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) +#include "aarch64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isExtraSpill; +#define FP_SIMD_REG_ALIAS(ID) +#include "aarch64_fp_simd_regs.def" +#undef FP_SIMD_REG +#undef FP_SIMD_REG_ALIAS + default: + return false; + } +} + +bool IsSpillRegInRA(AArch64reg regNO, bool has3RegOpnd) { + /* if has 3 RegOpnd, previous reg used to spill. */ + if (has3RegOpnd) { + return AArch64Abi::IsSpillReg(regNO) || AArch64Abi::IsExtraSpillReg(regNO); + } + return AArch64Abi::IsSpillReg(regNO); +} + +PrimType IsVectorArrayType(MIRType *ty, uint32 &arraySize) { + if (ty->GetKind() == kTypeStruct) { + MIRStructType *structTy = static_cast(ty); + if (structTy->GetFields().size() == 1) { + auto fieldPair = structTy->GetFields()[0]; + MIRType *fieldTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldPair.second.first); + if (fieldTy->GetKind() == kTypeArray) { + MIRArrayType *arrayTy = static_cast(fieldTy); + MIRType *arrayElemTy = arrayTy->GetElemType(); + arraySize = arrayTy->GetSizeArrayItem(0); + if (arrayTy->GetDim() == k1BitSize && arraySize <= k4BitSize && + IsPrimitiveVector(arrayElemTy->GetPrimType())) { + return arrayElemTy->GetPrimType(); + } + } + } + } + return PTY_void; +} +} /* namespace AArch64Abi */ +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp new file mode 100644 index 0000000000000000000000000000000000000000..213ea3f33944364467ce17887f3299a7bd23195a --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_alignment.cpp @@ -0,0 +1,360 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "insn.h" +#include "loop.h" +#include "aarch64_cg.h" +#include "cg_option.h" +#include "aarch64_alignment.h" + +namespace maplebe { +void AArch64AlignAnalysis::FindLoopHeader() { + MapleVector loops = aarFunc->GetLoops(); + if (loops.empty()) { + return; + } + for (const auto *loop : loops) { + const BB *header = loop->GetHeader(); + if (header != nullptr) { + InsertLoopHeaderBBs(const_cast(*header)); + } + } +} + +void AArch64AlignAnalysis::FindJumpTarget() { + MapleUnorderedMap label2BBMap = aarFunc->GetLab2BBMap(); + if (label2BBMap.empty()) { + return; + } + for (auto &iter : label2BBMap) { + BB *jumpBB = iter.second; + if (jumpBB != nullptr) { + InsertJumpTargetBBs(*jumpBB); + } + } +} + +bool AArch64AlignAnalysis::IsIncludeCall(BB &bb) { + return bb.HasCall(); +} + +bool AArch64AlignAnalysis::IsInSizeRange(BB &bb) { + uint64 size = 0; + FOR_BB_INSNS_CONST(insn, &bb) { + if (!insn->IsMachineInstruction() || insn->GetMachineOpcode() == MOP_pseudo_ret_int || + insn->GetMachineOpcode() == MOP_pseudo_ret_float) { + continue; + } + size += kAlignInsnLength; + } + BB *curBB = &bb; + while (curBB->GetNext() != nullptr && curBB->GetNext()->GetLabIdx() == 0) { + FOR_BB_INSNS_CONST(insn, curBB->GetNext()) { + if (!insn->IsMachineInstruction() || insn->GetMachineOpcode() == MOP_pseudo_ret_int || + insn->GetMachineOpcode() == MOP_pseudo_ret_float) { + continue; + } + size += kAlignInsnLength; + } + curBB = curBB->GetNext(); + } + AArch64AlignInfo targetInfo; + if (CGOptions::GetAlignMinBBSize() == 0 || CGOptions::GetAlignMaxBBSize() == 0) { + return false; + } + targetInfo.alignMinBBSize = (CGOptions::OptimizeForSize()) ? 16 : CGOptions::GetAlignMinBBSize(); + targetInfo.alignMaxBBSize = (CGOptions::OptimizeForSize()) ? 44 : CGOptions::GetAlignMaxBBSize(); + if (size <= targetInfo.alignMinBBSize || size >= targetInfo.alignMaxBBSize) { + return false; + } + return true; +} + +bool AArch64AlignAnalysis::HasFallthruEdge(BB &bb) { + for (auto *iter : bb.GetPreds()) { + if (iter == bb.GetPrev()) { + return true; + } + } + return false; +} + +void AArch64AlignAnalysis::ComputeLoopAlign() { + if (loopHeaderBBs.empty()) { + return; + } + for (BB *bb : loopHeaderBBs) { + if (bb == cgFunc->GetFirstBB() || IsIncludeCall(*bb) || !IsInSizeRange(*bb)) { + continue; + } + bb->SetNeedAlign(true); + if (CGOptions::GetLoopAlignPow() == 0) { + return; + } + AArch64AlignInfo targetInfo; + targetInfo.loopAlign = CGOptions::GetLoopAlignPow(); + if (alignInfos.find(bb) == alignInfos.end()) { + alignInfos[bb] = targetInfo.loopAlign; + } else { + uint32 curPower = alignInfos[bb]; + alignInfos[bb] = (targetInfo.loopAlign < curPower) ? targetInfo.loopAlign : curPower; + } + bb->SetAlignPower(alignInfos[bb]); + } +} + +void AArch64AlignAnalysis::ComputeJumpAlign() { + if (jumpTargetBBs.empty()) { + return; + } + for (BB *bb : jumpTargetBBs) { + if (bb == cgFunc->GetFirstBB() || !IsInSizeRange(*bb) || HasFallthruEdge(*bb)) { + continue; + } + bb->SetNeedAlign(true); + if (CGOptions::GetJumpAlignPow() == 0) { + return; + } + AArch64AlignInfo targetInfo; + targetInfo.jumpAlign = (CGOptions::OptimizeForSize()) ? 3 : CGOptions::GetJumpAlignPow(); + if (alignInfos.find(bb) == alignInfos.end()) { + alignInfos[bb] = targetInfo.jumpAlign; + } else { + uint32 curPower = alignInfos[bb]; + alignInfos[bb] = (targetInfo.jumpAlign < curPower) ? targetInfo.jumpAlign : curPower; + } + bb->SetAlignPower(alignInfos[bb]); + } +} + +uint32 AArch64AlignAnalysis::GetAlignRange(uint32 alignedVal, uint32 addr) const { + if (addr == 0) { + return addr; + } + uint32 range = (alignedVal - (((addr - 1) * kInsnSize) & (alignedVal - 1))) / kInsnSize - 1; + return range; +} + +bool AArch64AlignAnalysis::IsInSameAlignedRegion(uint32 addr1, uint32 addr2, uint32 alignedRegionSize) const { + return (((addr1 - 1) * kInsnSize) / alignedRegionSize) == (((addr2 - 1) * kInsnSize) / alignedRegionSize); +} + +bool AArch64AlignAnalysis::MarkCondBranchAlign() { + sameTargetBranches.clear(); + uint32 addr = 0; + bool change = false; + FOR_ALL_BB(bb, aarFunc) { + if (bb != nullptr && bb->IsBBNeedAlign()) { + uint32 alignedVal = (1U << bb->GetAlignPower()); + uint32 alignNopNum = GetAlignRange(alignedVal, addr); + addr += alignNopNum; + bb->SetAlignNopNum(alignNopNum); + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + addr += insn->GetAtomicNum(); + MOperator mOp = insn->GetMachineOpcode(); + if ((mOp == MOP_wtbz || mOp == MOP_wtbnz || mOp == MOP_xtbz || mOp == MOP_xtbnz) && insn->IsNeedSplit()) { + ++addr; + } + if (!insn->IsCondBranch() || insn->GetOperandSize() == 0) { + insn->SetAddress(addr); + continue; + } + Operand &opnd = insn->GetOperand(insn->GetOperandSize() - 1); + if (!opnd.IsLabelOpnd()) { + insn->SetAddress(addr); + continue; + } + LabelIdx targetIdx = static_cast(opnd).GetLabelIndex(); + if (sameTargetBranches.find(targetIdx) == sameTargetBranches.end()) { + sameTargetBranches[targetIdx] = addr; + insn->SetAddress(addr); + continue; + } + uint32 sameTargetAddr = sameTargetBranches[targetIdx]; + uint32 alignedRegionSize = 1 << kAlignRegionPower; + /** + * if two branches jump to the same target and their addresses are within an 16byte aligned region, + * add a certain number of [nop] to move them out of the region. + */ + if (IsInSameAlignedRegion(sameTargetAddr, addr, alignedRegionSize)) { + uint32 nopNum = GetAlignRange(alignedRegionSize, addr) + 1; + nopNum = nopNum > kAlignMaxNopNum ? 0 : nopNum; + if (nopNum == 0) { + break; + } + change = true; + insn->SetNopNum(nopNum); + for (uint32 i = 0; i < nopNum; i++) { + addr += insn->GetAtomicNum(); + } + } else { + insn->SetNopNum(0); + } + sameTargetBranches[targetIdx] = addr; + insn->SetAddress(addr); + } + } + return change; +} + +void AArch64AlignAnalysis::UpdateInsnId() { + uint32 id = 0; + FOR_ALL_BB(bb, aarFunc) { + if (bb != nullptr && bb->IsBBNeedAlign()) { + uint32 alignedVal = 1U << (bb->GetAlignPower()); + uint32 range = GetAlignRange(alignedVal, id); + id = id + (range > kAlignPseudoSize ? range : kAlignPseudoSize); + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + id += insn->GetAtomicNum(); + if (insn->IsCondBranch() && insn->GetNopNum() != 0) { + id += insn->GetNopNum(); + } + MOperator mOp = insn->GetMachineOpcode(); + if ((mOp == MOP_wtbz || mOp == MOP_wtbnz || mOp == MOP_xtbz || mOp == MOP_xtbnz) && insn->IsNeedSplit()) { + ++id; + } + insn->SetId(id); + if (insn->GetMachineOpcode() == MOP_adrp_ldr && CGOptions::IsLazyBinding() && !aarFunc->GetCG()->IsLibcore()) { + ++id; + } + } + } +} + +bool AArch64AlignAnalysis::MarkShortBranchSplit() { + bool change = false; + bool split; + do { + split = false; + UpdateInsnId(); + for (auto *bb = aarFunc->GetFirstBB(); bb != nullptr && !split; bb = bb->GetNext()) { + for (auto *insn = bb->GetLastInsn(); insn != nullptr && !split; insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + MOperator mOp = insn->GetMachineOpcode(); + if (mOp != MOP_wtbz && mOp != MOP_wtbnz && mOp != MOP_xtbz && mOp != MOP_xtbnz) { + continue; + } + if (insn->IsNeedSplit()) { + continue; + } + auto &labelOpnd = static_cast(insn->GetOperand(kInsnThirdOpnd)); + if (aarFunc->DistanceCheck(*bb, labelOpnd.GetLabelIndex(), insn->GetId())) { + continue; + } + split = true; + change = true; + insn->SetNeedSplit(split); + } + } + } while (split); + return change; +} + +void AArch64AlignAnalysis::AddNopAfterMark() { + FOR_ALL_BB(bb, aarFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction() || !insn->IsCondBranch() || insn->GetNopNum() == 0) { + continue; + } + /** + * To minimize the performance loss of nop, we decided to place nop on an island before the current addr. + * The island here is after [b, ret, br, blr]. + * To ensure correct insertion of the nop, the nop is inserted in the original position in the following cases: + * 1. A branch with the same target exists before it. + * 2. A branch whose nopNum value is not 0 exists before it. + * 3. no BBs need to be aligned between the original location and the island. + */ + std::unordered_map targetCondBrs; + bool findIsland = false; + Insn *detect = insn->GetPrev(); + BB *region = bb; + while (detect != nullptr || region != aarFunc->GetFirstBB()) { + while (detect == nullptr) { + ASSERT(region->GetPrev() != nullptr, "get region prev failed"); + region = region->GetPrev(); + detect = region->GetLastInsn(); + } + if (detect->GetMachineOpcode() == MOP_xuncond || detect->GetMachineOpcode() == MOP_xret || + detect->GetMachineOpcode() == MOP_xbr) { + findIsland = true; + break; + } + if (region->IsBBNeedAlign()) { + break; + } + if (!detect->IsMachineInstruction() || !detect->IsCondBranch() || detect->GetOperandSize() == 0) { + detect = detect->GetPrev(); + continue; + } + if (detect->GetNopNum() != 0) { + break; + } + Operand &opnd = detect->GetOperand(detect->GetOperandSize() - 1); + if (!opnd.IsLabelOpnd()) { + detect = detect->GetPrev(); + continue; + } + LabelIdx targetIdx = static_cast(opnd).GetLabelIndex(); + if (targetCondBrs.find(targetIdx) != targetCondBrs.end()) { + break; + } + targetCondBrs[targetIdx] = detect; + detect = detect->GetPrev(); + } + uint32 nopNum = insn->GetNopNum(); + if (findIsland) { + for (uint32 i = 0; i < nopNum; i++) { + (void)bb->InsertInsnAfter(*detect, aarFunc->GetInsnBuilder()->BuildInsn(MOP_nop)); + } + } else { + for (uint32 i = 0; i < nopNum; i++) { + (void)bb->InsertInsnBefore(*insn, aarFunc->GetInsnBuilder()->BuildInsn(MOP_nop)); + } + } + } + } +} + +/** + * The insertion of nop affects the judgement of the addressing range of short branches, + * and the splitting of short branches affects the calculation of the location and number of nop insertions. + * In the iteration process of both, we only make some marks, wait for the fixed points, and fill in nop finally. + */ +void AArch64AlignAnalysis::ComputeCondBranchAlign() { + bool condBrChange = false; + bool shortBrChange = false; + while (true) { + condBrChange = MarkCondBranchAlign(); + if (!condBrChange) { + break; + } + shortBrChange = MarkShortBranchSplit(); + if (!shortBrChange) { + break; + } + } + AddNopAfterMark(); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e9ace62f7001060a5ca8379d9704ae584192a9ca --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_args.cpp @@ -0,0 +1,484 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_args.h" +#include +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +namespace maplebe { +using namespace maple; + +void AArch64MoveRegArgs::Run() { + MoveVRegisterArgs(); + MoveRegisterArgs(); +} + +void AArch64MoveRegArgs::CollectRegisterArgs(std::map &argsList, + std::vector &indexList, + std::map &pairReg, + std::vector &numFpRegs, + std::vector &fpSize) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + uint32 numFormal = static_cast(aarchCGFunc->GetFunction().GetFormalCount()); + numFpRegs.resize(numFormal); + fpSize.resize(numFormal); + AArch64CallConvImpl parmlocator(aarchCGFunc->GetBecommon()); + CCLocInfo ploc; + uint32 start = 0; + if (numFormal > 0) { + MIRFunction *func = const_cast(aarchCGFunc->GetBecommon().GetMIRModule().CurFunction()); + if (func->IsReturnStruct() && func->IsFirstArgReturn()) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + if (aarchCGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16ByteSize) { + start = 1; + } + } + } + for (uint32 i = start; i < numFormal; ++i) { + MIRType *ty = aarchCGFunc->GetFunction().GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, &aarchCGFunc->GetFunction()); + if (ploc.reg0 == kRinvalid) { + continue; + } + AArch64reg reg0 = static_cast(ploc.reg0); + MIRSymbol *sym = aarchCGFunc->GetFunction().GetFormal(i); + if (sym->IsPreg()) { + continue; + } + argsList[i] = reg0; + indexList.emplace_back(i); + if (ploc.reg1 == kRinvalid) { + continue; + } + if (ploc.numFpPureRegs > 0) { + uint32 index = i; + numFpRegs[index] = ploc.numFpPureRegs; + fpSize[index] = ploc.fpSize; + continue; + } + pairReg[i] = static_cast(ploc.reg1); + } +} + +ArgInfo AArch64MoveRegArgs::GetArgInfo(std::map &argsList, std::vector &numFpRegs, + std::vector &fpSize, uint32 argIndex) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + ArgInfo argInfo; + argInfo.reg = argsList[argIndex]; + argInfo.mirTy = aarchCGFunc->GetFunction().GetNthParamType(argIndex); + argInfo.symSize = aarchCGFunc->GetBecommon().GetTypeSize(argInfo.mirTy->GetTypeIndex()); + argInfo.memPairSecondRegSize = 0; + argInfo.doMemPairOpt = false; + argInfo.createTwoStores = false; + argInfo.isTwoRegParm = false; + + if (GetVecLanes(argInfo.mirTy->GetPrimType()) > 0) { + /* vector type */ + argInfo.stkSize = argInfo.symSize; + } else if ((argInfo.symSize > k8ByteSize) && (argInfo.symSize <= k16ByteSize)) { + argInfo.isTwoRegParm = true; + if (numFpRegs[argIndex] > kOneRegister) { + argInfo.symSize = argInfo.stkSize = fpSize[argIndex]; + } else { + if (argInfo.symSize > k12ByteSize) { + argInfo.memPairSecondRegSize = k8ByteSize; + } else { + /* Round to 4 the stack space required for storing the struct */ + argInfo.memPairSecondRegSize = k4ByteSize; + } + argInfo.doMemPairOpt = true; + if (CGOptions::IsArm64ilp32()) { + argInfo.symSize = argInfo.stkSize = k8ByteSize; + } else { + argInfo.symSize = argInfo.stkSize = GetPointerSize(); + } + } + } else if (argInfo.symSize > k16ByteSize) { + /* For large struct passing, a pointer to the copy is used. */ + if (CGOptions::IsArm64ilp32()) { + argInfo.symSize = argInfo.stkSize = k8ByteSize; + } else { + argInfo.symSize = argInfo.stkSize = GetPointerSize(); + } + } else if ((argInfo.mirTy->GetPrimType() == PTY_agg) && (argInfo.symSize < k8ByteSize)) { + /* + * For small aggregate parameter, set to minimum of 8 bytes. + * B.5:If the argument type is a Composite Type then the size of the argument is rounded up to the + * nearest multiple of 8 bytes. + */ + argInfo.symSize = argInfo.stkSize = k8ByteSize; + } else if (numFpRegs[argIndex] > kOneRegister) { + argInfo.isTwoRegParm = true; + argInfo.symSize = argInfo.stkSize = fpSize[argIndex]; + } else { + argInfo.stkSize = (argInfo.symSize < k4ByteSize) ? k4ByteSize : argInfo.symSize; + if (argInfo.symSize > k4ByteSize) { + argInfo.symSize = k8ByteSize; + } + } + argInfo.regType = (argInfo.reg < V0) ? kRegTyInt : kRegTyFloat; + argInfo.sym = aarchCGFunc->GetFunction().GetFormal(argIndex); + CHECK_NULL_FATAL(argInfo.sym); + argInfo.symLoc = + static_cast(aarchCGFunc->GetMemlayout()->GetSymAllocInfo(argInfo.sym->GetStIndex())); + CHECK_NULL_FATAL(argInfo.symLoc); + if (argInfo.doMemPairOpt && (static_cast(aarchCGFunc->GetBaseOffset(*(argInfo.symLoc))) & 0x7)) { + /* Do not optimize for struct reg pair for unaligned access. + * However, this symbol requires two parameter registers, separate stores must be generated. + */ + argInfo.symSize = GetPointerSize(); + argInfo.doMemPairOpt = false; + argInfo.createTwoStores = true; + } + return argInfo; +} + +bool AArch64MoveRegArgs::IsInSameSegment(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) const { + if (firstArgInfo.symLoc->GetMemSegment() != secondArgInfo.symLoc->GetMemSegment()) { + return false; + } + if (firstArgInfo.symSize != secondArgInfo.symSize) { + return false; + } + if (firstArgInfo.symSize != k4ByteSize && firstArgInfo.symSize != k8ByteSize) { + return false; + } + if (firstArgInfo.regType != secondArgInfo.regType) { + return false; + } + return firstArgInfo.symLoc->GetOffset() + firstArgInfo.stkSize == secondArgInfo.symLoc->GetOffset(); +} + +void AArch64MoveRegArgs::GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + RegOperand *baseOpnd = static_cast(aarchCGFunc->GetBaseReg(*firstArgInfo.symLoc)); + RegOperand ®Opnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(firstArgInfo.reg, + firstArgInfo.stkSize * kBitsPerByte, + firstArgInfo.regType); + MOperator mOp = firstArgInfo.regType == kRegTyInt ? ((firstArgInfo.stkSize > k4ByteSize) ? MOP_xstp : MOP_wstp) + : ((firstArgInfo.stkSize > k4ByteSize) ? MOP_dstp : MOP_sstp); + RegOperand *regOpnd2 = &aarchCGFunc->GetOrCreatePhysicalRegisterOperand(secondArgInfo.reg, + firstArgInfo.stkSize * kBitsPerByte, + firstArgInfo.regType); + if (firstArgInfo.doMemPairOpt && firstArgInfo.isTwoRegParm) { + AArch64reg regFp2 = static_cast(firstArgInfo.reg + kOneRegister); + regOpnd2 = &aarchCGFunc->GetOrCreatePhysicalRegisterOperand(regFp2, + firstArgInfo.stkSize * kBitsPerByte, + firstArgInfo.regType); + } + + int32 limit = (secondArgInfo.stkSize > k4ByteSize) ? kStpLdpImm64UpperBound : kStpLdpImm32UpperBound; + int32 stOffset = aarchCGFunc->GetBaseOffset(*firstArgInfo.symLoc); + MemOperand *memOpnd = nullptr; + if (stOffset > limit || baseReg != nullptr) { + if (baseReg == nullptr || lastSegment != firstArgInfo.symLoc->GetMemSegment()) { + ImmOperand &immOpnd = + aarchCGFunc->CreateImmOperand(stOffset - firstArgInfo.symLoc->GetOffset(), k64BitSize, false); + baseReg = &aarchCGFunc->CreateRegisterOperandOfType(kRegTyInt, k8ByteSize); + lastSegment = firstArgInfo.symLoc->GetMemSegment(); + aarchCGFunc->SelectAdd(*baseReg, *baseOpnd, immOpnd, GetLoweredPtrType()); + } + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(firstArgInfo.symLoc->GetOffset()), + k32BitSize); + if (firstArgInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd.SetVary(kUnAdjustVary); + } + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + firstArgInfo.stkSize * kBitsPerByte, + *baseReg, nullptr, &offsetOpnd, firstArgInfo.sym); + } else { + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(static_cast(stOffset)), + k32BitSize); + if (firstArgInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd.SetVary(kUnAdjustVary); + } + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + firstArgInfo.stkSize * kBitsPerByte, + *baseOpnd, nullptr, &offsetOpnd, firstArgInfo.sym); + } + Insn &pushInsn = aarchCGFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd, *regOpnd2, *memOpnd); + if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { + std::string argName = firstArgInfo.sym->GetName() + " " + secondArgInfo.sym->GetName(); + pushInsn.SetComment(std::string("store param: ").append(argName)); + } + aarchCGFunc->GetCurBB()->AppendInsn(pushInsn); +} + +void AArch64MoveRegArgs::GenOneInsn(const ArgInfo &argInfo, RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest, + int32 offset) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + MOperator mOp = aarchCGFunc->PickStInsn(stBitSize, argInfo.mirTy->GetPrimType()); + RegOperand ®Opnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(dest, stBitSize, argInfo.regType); + + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize); + if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd.SetVary(kUnAdjustVary); + } + MemOperand *memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + stBitSize, baseOpnd, nullptr, &offsetOpnd, argInfo.sym); + Insn &insn = aarchCGFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd, *memOpnd); + if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { + insn.SetComment(std::string("store param: ").append(argInfo.sym->GetName())); + } + aarchCGFunc->GetCurBB()->AppendInsn(insn); +} + +void AArch64MoveRegArgs::GenerateStrInsn(const ArgInfo &argInfo, AArch64reg reg2, uint32 numFpRegs, uint32 fpSize) { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + int32 stOffset = aarchCGFunc->GetBaseOffset(*argInfo.symLoc); + RegOperand *baseOpnd = static_cast(aarchCGFunc->GetBaseReg(*argInfo.symLoc)); + RegOperand ®Opnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(argInfo.reg, argInfo.stkSize * kBitsPerByte, argInfo.regType); + MemOperand *memOpnd = nullptr; + if (MemOperand::IsPIMMOffsetOutOfRange(stOffset, argInfo.symSize * kBitsPerByte) || + (baseReg != nullptr && (lastSegment == argInfo.symLoc->GetMemSegment()))) { + if (baseReg == nullptr || lastSegment != argInfo.symLoc->GetMemSegment()) { + ImmOperand &immOpnd = aarchCGFunc->CreateImmOperand(stOffset - argInfo.symLoc->GetOffset(), k64BitSize, + false); + baseReg = &aarchCGFunc->CreateRegisterOperandOfType(kRegTyInt, k8ByteSize); + lastSegment = argInfo.symLoc->GetMemSegment(); + aarchCGFunc->SelectAdd(*baseReg, *baseOpnd, immOpnd, PTY_a64); + } + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(argInfo.symLoc->GetOffset()), k32BitSize); + if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd.SetVary(kUnAdjustVary); + } + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + argInfo.symSize * kBitsPerByte, *baseReg, + nullptr, &offsetOpnd, argInfo.sym); + } else { + OfstOperand &offsetOpnd = aarchCGFunc->CreateOfstOpnd(static_cast(static_cast(stOffset)), + k32BitSize); + if (argInfo.symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd.SetVary(kUnAdjustVary); + } + memOpnd = aarchCGFunc->CreateMemOperand(MemOperand::kAddrModeBOi, + argInfo.symSize * kBitsPerByte, *baseOpnd, + nullptr, &offsetOpnd, argInfo.sym); + } + + MOperator mOp = aarchCGFunc->PickStInsn(argInfo.symSize * kBitsPerByte, argInfo.mirTy->GetPrimType()); + Insn &insn = aarchCGFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd, *memOpnd); + if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { + insn.SetComment(std::string("store param: ").append(argInfo.sym->GetName())); + } + aarchCGFunc->GetCurBB()->AppendInsn(insn); + + if (argInfo.createTwoStores || argInfo.doMemPairOpt) { + /* second half of the struct passing by registers. */ + uint32 part2BitSize = argInfo.memPairSecondRegSize * kBitsPerByte; + GenOneInsn(argInfo, *baseOpnd, part2BitSize, reg2, (stOffset + GetPointerSize())); + } else if (numFpRegs > kOneRegister) { + uint32 fpSizeBits = fpSize * kBitsPerByte; + AArch64reg regFp2 = static_cast(argInfo.reg + kOneRegister); + GenOneInsn(argInfo, *baseOpnd, fpSizeBits, regFp2, (stOffset + static_cast(fpSize))); + if (numFpRegs > kTwoRegister) { + AArch64reg regFp3 = static_cast(argInfo.reg + kTwoRegister); + GenOneInsn(argInfo, *baseOpnd, fpSizeBits, regFp3, (stOffset + static_cast(fpSize * k4BitShift))); + } + if (numFpRegs > kThreeRegister) { + AArch64reg regFp3 = static_cast(argInfo.reg + kThreeRegister); + GenOneInsn(argInfo, *baseOpnd, fpSizeBits, regFp3, (stOffset + static_cast(fpSize * k8BitShift))); + } + } +} + +void AArch64MoveRegArgs::MoveRegisterArgs() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + BB *formerCurBB = aarchCGFunc->GetCurBB(); + aarchCGFunc->GetDummyBB()->ClearInsns(); + aarchCGFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + + std::map movePara; + std::vector moveParaIndex; + std::map pairReg; + std::vector numFpRegs; + std::vector fpSize; + CollectRegisterArgs(movePara, moveParaIndex, pairReg, numFpRegs, fpSize); + + std::vector::iterator it; + std::vector::iterator next; + for (it = moveParaIndex.begin(); it != moveParaIndex.end(); ++it) { + uint32 firstIndex = *it; + ArgInfo firstArgInfo = GetArgInfo(movePara, numFpRegs, fpSize, firstIndex); + next = it; + ++next; + if ((next != moveParaIndex.end()) || (firstArgInfo.doMemPairOpt)) { + uint32 secondIndex = (firstArgInfo.doMemPairOpt) ? firstIndex : *next; + ArgInfo secondArgInfo = GetArgInfo(movePara, numFpRegs, fpSize, secondIndex); + secondArgInfo.reg = (firstArgInfo.doMemPairOpt) ? pairReg[firstIndex] : movePara[secondIndex]; + secondArgInfo.symSize = (firstArgInfo.doMemPairOpt) ? firstArgInfo.memPairSecondRegSize : secondArgInfo.symSize; + secondArgInfo.symLoc = (firstArgInfo.doMemPairOpt) ? secondArgInfo.symLoc : + static_cast(aarchCGFunc->GetMemlayout()->GetSymAllocInfo( + secondArgInfo.sym->GetStIndex())); + /* Make sure they are in same segment if want to use stp */ + if (((firstArgInfo.isTwoRegParm && secondArgInfo.isTwoRegParm) || + (!firstArgInfo.isTwoRegParm && !secondArgInfo.isTwoRegParm)) && + (firstArgInfo.doMemPairOpt || IsInSameSegment(firstArgInfo, secondArgInfo))) { + GenerateStpInsn(firstArgInfo, secondArgInfo); + if (!firstArgInfo.doMemPairOpt) { + it = next; + } + continue; + } + } + GenerateStrInsn(firstArgInfo, pairReg[firstIndex], numFpRegs[firstIndex], fpSize[firstIndex]); + } + + if (cgFunc->GetCG()->IsLmbc() && cgFunc->GetSpSaveReg()) { + /* lmbc uses vreg act as SP when alloca is present due to usage of FP for - offset */ + aarchCGFunc->GetFirstBB()->InsertAtEnd(*aarchCGFunc->GetDummyBB()); + } else { + /* Java requires insertion at begining as it has fast unwind and other features */ + aarchCGFunc->GetFirstBB()->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + } + aarchCGFunc->SetCurBB(*formerCurBB); +} + +void AArch64MoveRegArgs::MoveLocalRefVarToRefLocals(MIRSymbol &mirSym) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 byteSize = GetPrimTypeSize(stype); + uint32 bitSize = byteSize * kBitsPerByte; + MemOperand &memOpnd = aarchCGFunc->GetOrCreateMemOpnd(mirSym, 0, bitSize, true); + RegOperand *regOpnd = nullptr; + if (mirSym.IsPreg()) { + PregIdx pregIdx = aarchCGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + regOpnd = &aarchCGFunc->GetOrCreateVirtualRegisterOperand(aarchCGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } else { + regOpnd = &aarchCGFunc->GetOrCreateVirtualRegisterOperand(aarchCGFunc->NewVReg(kRegTyInt, k8ByteSize)); + } + Insn &insn = aarchCGFunc->GetInsnBuilder()->BuildInsn( + aarchCGFunc->PickLdInsn(GetPrimTypeBitSize(stype), stype), *regOpnd, memOpnd); + MemOperand &memOpnd1 = aarchCGFunc->GetOrCreateMemOpnd(mirSym, 0, bitSize, false); + Insn &insn1 = aarchCGFunc->GetInsnBuilder()->BuildInsn( + aarchCGFunc->PickStInsn(GetPrimTypeBitSize(stype), stype), *regOpnd, memOpnd1); + aarchCGFunc->GetCurBB()->InsertInsnBegin(insn1); + aarchCGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void AArch64MoveRegArgs::LoadStackArgsToVReg(MIRSymbol &mirSym) const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 byteSize = GetPrimTypeSize(stype); + uint32 bitSize = byteSize * kBitsPerByte; + MemOperand &memOpnd = aarchCGFunc->GetOrCreateMemOpnd(mirSym, 0, bitSize); + PregIdx pregIdx = aarchCGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + RegOperand &dstRegOpnd = aarchCGFunc->GetOrCreateVirtualRegisterOperand( + aarchCGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + Insn &insn = aarchCGFunc->GetInsnBuilder()->BuildInsn( + aarchCGFunc->PickLdInsn(GetPrimTypeBitSize(stype), stype), dstRegOpnd, memOpnd); + + if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { + std::string str = "param: %%"; + str += std::to_string(mirSym.GetPreg()->GetPregNo()); + ASSERT(mirSym.GetStorageClass() == kScFormal, "vreg parameters should be kScFormal type."); + insn.SetComment(str); + } + + aarchCGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void AArch64MoveRegArgs::MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym) const { + auto *aarchCGFunc = static_cast(cgFunc); + RegType regType = (ploc.reg0 < V0) ? kRegTyInt : kRegTyFloat; + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 byteSize = GetPrimTypeSize(stype); + uint32 srcBitSize = ((byteSize < k4ByteSize) ? k4ByteSize : byteSize) * kBitsPerByte; + PregIdx pregIdx = aarchCGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + RegOperand &dstRegOpnd = + aarchCGFunc->GetOrCreateVirtualRegisterOperand(aarchCGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + dstRegOpnd.SetSize(srcBitSize); + RegOperand &srcRegOpnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), srcBitSize, regType); + ASSERT(mirSym.GetStorageClass() == kScFormal, "should be args"); + MOperator mOp = aarchCGFunc->PickMovBetweenRegs(stype, stype); + if (mOp == MOP_vmovvv || mOp == MOP_vmovuu) { + VectorInsn &vInsn = aarchCGFunc->GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(dstRegOpnd).AddOpndChain(srcRegOpnd); + auto *vecSpec1 = aarchCGFunc->GetMemoryPool()->New(srcBitSize >> k3ByteSize, k8BitSize); + auto *vecSpec2 = aarchCGFunc->GetMemoryPool()->New(srcBitSize >> k3ByteSize, k8BitSize); + (void)vInsn.PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + aarchCGFunc->GetCurBB()->InsertInsnBegin(vInsn); + return; + } + if (CGOptions::DoCGSSA() && (mOp == MOP_wmovrr || mOp == MOP_xmovrr)) { + uint32 validBit = byteSize * kBitsPerByte; + if (validBit == k8BitSize) { + mOp = MOP_wuxtb_vb; + } else if (validBit == k16BitSize) { + mOp = MOP_wuxth_vb; + } + } + Insn &insn = aarchCGFunc->GetInsnBuilder()->BuildInsn(mOp, dstRegOpnd, srcRegOpnd); + if (aarchCGFunc->GetCG()->GenerateVerboseCG()) { + std::string str = "param: %%"; + str += std::to_string(mirSym.GetPreg()->GetPregNo()); + insn.SetComment(str); + } + aarchCGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void AArch64MoveRegArgs::MoveVRegisterArgs() const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + BB *formerCurBB = aarchCGFunc->GetCurBB(); + aarchCGFunc->GetDummyBB()->ClearInsns(); + aarchCGFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + AArch64CallConvImpl parmlocator(aarchCGFunc->GetBecommon()); + CCLocInfo ploc; + + uint32 formalCount = static_cast(aarchCGFunc->GetFunction().GetFormalCount()); + uint32 start = 0; + if (formalCount > 0) { + MIRFunction *func = const_cast(aarchCGFunc->GetBecommon().GetMIRModule().CurFunction()); + if (func->IsReturnStruct() && func->IsFirstArgReturn()) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + if (aarchCGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16BitSize) { + start = 1; + } + } + } + for (uint32 i = start; i < formalCount; ++i) { + MIRType *ty = aarchCGFunc->GetFunction().GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, &aarchCGFunc->GetFunction()); + MIRSymbol *sym = aarchCGFunc->GetFunction().GetFormal(i); + + /* load locarefvar formals to store in the reflocals. */ + if (aarchCGFunc->GetFunction().GetNthParamAttr(i).GetAttr(ATTR_localrefvar) && ploc.reg0 == kRinvalid) { + MoveLocalRefVarToRefLocals(*sym); + } + + if (!sym->IsPreg()) { + continue; + } + + if (ploc.reg0 == kRinvalid) { + /* load stack parameters to the vreg. */ + LoadStackArgsToVReg(*sym); + } else { + MoveArgsToVReg(ploc, *sym); + } + } + + if (cgFunc->GetCG()->IsLmbc() && cgFunc->GetSpSaveReg()) { + /* lmbc uses vreg act as SP when alloca is present due to usage of FP for - offset */ + aarchCGFunc->GetFirstBB()->InsertAtEnd(*aarchCGFunc->GetDummyBB()); + } else { + /* Java requires insertion at begining as it has fast unwind and other features */ + aarchCGFunc->GetFirstBB()->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + } + aarchCGFunc->SetCurBB(*formerCurBB); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp new file mode 100644 index 0000000000000000000000000000000000000000..596af4c9bc179a0a60f5b0a1fd53330f3eedb86b --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_call_conv.cpp @@ -0,0 +1,744 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cgfunc.h" +#include "becommon.h" +#include "aarch64_call_conv.h" + +namespace maplebe { +using namespace maple; + +namespace { +constexpr int kMaxRegCount = 4; + +/* + * Refer to ARM IHI 0055C_beta: Procedure Call Standard for + * ARM 64-bit Architecture. Table 1. + */ +enum AArch64ArgumentClass : uint8 { + kAArch64NoClass, + kAArch64IntegerClass, + kAArch64FloatClass, + kAArch64MemoryClass +}; + +int32 ProcessNonStructAndNonArrayWhenClassifyAggregate(const MIRType &mirType, + AArch64ArgumentClass classes[kMaxRegCount], + size_t classesLength) { + CHECK_FATAL(classesLength > 0, "classLength must > 0"); + /* scalar type */ + switch (mirType.GetPrimType()) { + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_a64: + case PTY_ptr: + case PTY_ref: + case PTY_u64: + case PTY_i64: + classes[0] = kAArch64IntegerClass; + return 1; + case PTY_f32: + case PTY_f64: + case PTY_c64: + case PTY_c128: + classes[0] = kAArch64FloatClass; + return 1; + default: + CHECK_FATAL(false, "NYI"); + } + + /* should not reach to this point */ + return 0; +} + +PrimType TraverseStructFieldsForFp(MIRType *ty, uint32 &numRegs) { + if (ty->GetKind() == kTypeArray) { + MIRArrayType *arrtype = static_cast(ty); + MIRType *pty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrtype->GetElemTyIdx()); + if (pty->GetKind() == kTypeArray || pty->GetKind() == kTypeStruct) { + return TraverseStructFieldsForFp(pty, numRegs); + } + for (uint32 i = 0; i < arrtype->GetDim(); ++i) { + numRegs += arrtype->GetSizeArrayItem(i); + } + return pty->GetPrimType(); + } else if (ty->GetKind() == kTypeStruct) { + MIRStructType *sttype = static_cast(ty); + FieldVector fields = sttype->GetFields(); + PrimType oldtype = PTY_void; + for (uint32 fcnt = 0; fcnt < fields.size(); ++fcnt) { + TyIdx fieldtyidx = fields[fcnt].second.first; + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldtyidx); + PrimType ptype = TraverseStructFieldsForFp(fieldty, numRegs); + if (oldtype != PTY_void && oldtype != ptype) { + return PTY_void; + } else { + oldtype = ptype; + } + } + return oldtype; + } else { + numRegs++; + return ty->GetPrimType(); + } +} + +int32 ClassifyAggregate(const BECommon &be, MIRType &mirType, AArch64ArgumentClass classes[kMaxRegCount], + size_t classesLength, uint32 &fpSize); + +uint32 ProcessStructAndUnionWhenClassifyAggregate(const BECommon &be, MIRStructType &structType, + AArch64ArgumentClass classes[kMaxRegCount], + size_t classesLength, uint32 &fpSize) { + CHECK_FATAL(classesLength > 0, "classLength must > 0"); + uint32 sizeOfTyInDwords = static_cast( + RoundUp(be.GetTypeSize(structType.GetTypeIndex()), k8ByteSize) >> k8BitShift); + bool isF32 = false; + bool isF64 = false; + uint32 numRegs = 0; + for (uint32 f = 0; f < structType.GetFieldsSize(); ++f) { + TyIdx fieldTyIdx = structType.GetFieldsElemt(f).second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + PrimType pType = TraverseStructFieldsForFp(fieldType, numRegs); + if (pType == PTY_f32) { + if (isF64) { + isF64 = false; + break; + } + isF32 = true; + } else if (pType == PTY_f64) { + if (isF32) { + isF32 = false; + break; + } + isF64 = true; + } else if (IsPrimitiveVector(pType)) { + isF64 = true; + break; + } else { + isF32 = isF64 = false; + break; + } + } + if (isF32 || isF64) { + CHECK_FATAL(numRegs <= classesLength, "ClassifyAggregate: num regs exceed limit"); + for (uint32 i = 0; i < numRegs; ++i) { + classes[i] = kAArch64FloatClass; + } + + fpSize = isF32 ? k4ByteSize : k8ByteSize; + if (structType.GetKind() == kTypeUnion) { + /* For Union, numRegs is calculated for the maximum size element in this Union */ + return sizeOfTyInDwords; + } + return numRegs; + } + + classes[0] = kAArch64IntegerClass; + if (sizeOfTyInDwords == kDwordSizeTwo) { + classes[1] = kAArch64IntegerClass; + } + ASSERT(sizeOfTyInDwords <= classesLength, "sizeOfTyInDwords exceed limit"); + return sizeOfTyInDwords; +} + +/* + * Analyze the given aggregate using the rules given by the ARM 64-bit ABI and + * return the number of doublewords to be passed in registers; the classes of + * the doublewords are returned in parameter "classes"; if 0 is returned, it + * means the whole aggregate is passed in memory. + */ +int32 ClassifyAggregate(const BECommon &be, MIRType &mirType, AArch64ArgumentClass classes[kMaxRegCount], + size_t classesLength, uint32 &fpSize) { + CHECK_FATAL(classesLength > 0, "invalid index"); + uint64 sizeOfTy = be.GetTypeSize(mirType.GetTypeIndex()); + /* Rule B.3. + * If the argument type is a Composite Type that is larger than 16 bytes + * then the argument is copied to memory allocated by the caller and + * the argument is replaced by a pointer to the copy. + */ + if ((sizeOfTy > k16ByteSize) || (sizeOfTy == 0)) { + return 0; + } + + /* + * An argument of any Integer class takes up an integer register + * which is a single double-word. + * Rule B.4. The size of an argument of composite type is rounded up to the nearest + * multiple of 8 bytes. + */ + int64 sizeOfTyInDwords = static_cast(RoundUp(sizeOfTy, k8ByteSize) >> k8BitShift); + ASSERT(sizeOfTyInDwords > 0, "sizeOfTyInDwords should be sizeOfTyInDwords > 0"); + ASSERT(sizeOfTyInDwords <= kTwoRegister, "sizeOfTyInDwords should be <= 2"); + int64 i; + for (i = 0; i < sizeOfTyInDwords; ++i) { + classes[i] = kAArch64NoClass; + } + if ((mirType.GetKind() != kTypeStruct) && (mirType.GetKind() != kTypeArray) && (mirType.GetKind() != kTypeUnion)) { + return ProcessNonStructAndNonArrayWhenClassifyAggregate(mirType, classes, classesLength); + } + if (mirType.GetKind() == kTypeStruct || mirType.GetKind() == kTypeUnion) { + MIRStructType &structType = static_cast(mirType); + return static_cast(ProcessStructAndUnionWhenClassifyAggregate(be, structType, classes, + classesLength, fpSize)); + } + /* post merger clean-up */ + for (i = 0; i < sizeOfTyInDwords; ++i) { + if (classes[i] == kAArch64MemoryClass) { + return 0; + } + } + return static_cast(sizeOfTyInDwords); +} +} + +/* external interface to look for pure float struct */ +uint32 AArch64CallConvImpl::FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize) { + if (structType.GetSize() > k32ByteSize) { + return 0; + } + AArch64ArgumentClass classes[kMaxRegCount]; + uint32 numRegs = ProcessStructAndUnionWhenClassifyAggregate(beCommon, structType, classes, kMaxRegCount, fpSize); + if (numRegs == 0) { + return 0; + } + + bool isPure = true; + for (uint i = 0; i < numRegs; ++i) { + ASSERT(i < kMaxRegCount, "i should be lower than kMaxRegCount"); + if (classes[i] != kAArch64FloatClass) { + isPure = false; + break; + } + } + if (isPure) { + return numRegs; + } + return 0; +} + +void AArch64CallConvImpl::InitCCLocInfo(CCLocInfo &pLoc) const { + pLoc.reg0 = kRinvalid; + pLoc.reg1 = kRinvalid; + pLoc.reg2 = kRinvalid; + pLoc.reg3 = kRinvalid; + pLoc.memOffset = nextStackArgAdress; + pLoc.fpSize = 0; + pLoc.numFpPureRegs = 0; +} + +int32 AArch64CallConvImpl::LocateRetVal(MIRType &retType, CCLocInfo &pLoc) { + InitCCLocInfo(pLoc); + uint32 retSize = beCommon.GetTypeSize(retType.GetTypeIndex().GetIdx()); + if (retSize == 0) { + return 0; /* size 0 ret val */ + } + if (retSize <= k16ByteSize) { + /* For return struct size less or equal to 16 bytes, the values */ + /* are returned in register pairs. */ + AArch64ArgumentClass classes[kMaxRegCount] = { kAArch64NoClass }; /* Max of four floats. */ + uint32 fpSize; + uint32 numRegs = static_cast(ClassifyAggregate(beCommon, retType, classes, sizeof(classes), fpSize)); + if (classes[0] == kAArch64FloatClass) { + CHECK_FATAL(numRegs <= kMaxRegCount, "LocateNextParm: illegal number of regs"); + AllocateNSIMDFPRegisters(pLoc, numRegs); + pLoc.numFpPureRegs = numRegs; + pLoc.fpSize = fpSize; + return 0; + } else { + CHECK_FATAL(numRegs <= kTwoRegister, "LocateNextParm: illegal number of regs"); + if (numRegs == kOneRegister) { + pLoc.reg0 = AllocateGPRegister(); + } else { + AllocateTwoGPRegisters(pLoc); + } + return 0; + } + } else { + /* For return struct size > 16 bytes the pointer returns in x8. */ + pLoc.reg0 = R8; + return GetPointerSize(); + } +} + +/* + * Refer to ARM IHI 0055C_beta: Procedure Call Standard for + * the ARM 64-bit Architecture. $5.4.2 + * + * For internal only functions, we may want to implement + * our own rules as Apple IOS has done. Maybe we want to + * generate two versions for each of externally visible functions, + * one conforming to the ARM standard ABI, and the other for + * internal only use. + * + * LocateNextParm should be called with each parameter in the parameter list + * starting from the beginning, one call per parameter in sequence; it returns + * the information on how each parameter is passed in pLoc + * + * *** CAUTION OF USE: *** + * If LocateNextParm is called for function formals, third argument isFirst is true. + * LocateNextParm is then checked against a function parameter list. All other calls + * of LocateNextParm are against caller's argument list must not have isFirst set, + * or it will be checking the caller's enclosing function. + */ +int32 AArch64CallConvImpl::LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFunction *tFunc) { + InitCCLocInfo(pLoc); + + bool is64x1vec = false; + if (tFunc != nullptr && tFunc->GetParamSize() > 0) { + is64x1vec = tFunc->GetNthParamAttr(paramNum).GetAttr(ATTR_oneelem_simd) != 0; + } + + if (isFirst) { + MIRFunction *func = tFunc != nullptr ? tFunc : const_cast(beCommon.GetMIRModule().CurFunction()); + if (func->IsFirstArgReturn()) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + size_t size = beCommon.GetTypeSize(tyIdx); + if (size == 0) { + /* For return struct size 0 there is no return value. */ + return 0; + } + /* For return struct size > 16 bytes the pointer returns in x8. */ + pLoc.reg0 = R8; + return GetPointerSize(); + } + } + uint64 typeSize = beCommon.GetTypeSize(mirType.GetTypeIndex()); + if (typeSize == 0) { + return 0; + } + int32 typeAlign = beCommon.GetTypeAlign(mirType.GetTypeIndex()); + /* + * Rule C.12 states that we do round nextStackArgAdress up before we use its value + * according to the alignment requirement of the argument being processed. + * We do the rounding up at the end of LocateNextParm(), + * so we want to make sure our rounding up is correct. + */ + ASSERT((nextStackArgAdress & (std::max(typeAlign, static_cast(k8ByteSize)) - 1)) == 0, + "C.12 alignment requirement is violated"); + pLoc.memSize = static_cast(typeSize); + ++paramNum; + + int32 aggCopySize = 0; + switch (mirType.GetPrimType()) { + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_ptr: + case PTY_ref: + case PTY_a64: + case PTY_u64: + case PTY_i64: + case PTY_i128: + case PTY_u128: + /* Rule C.7 */ + typeSize = k8ByteSize; + pLoc.reg0 = is64x1vec ? AllocateSIMDFPRegister() : AllocateGPRegister(); + ASSERT(nextGeneralRegNO <= AArch64Abi::kNumIntParmRegs, "RegNo should be pramRegNO"); + break; + /* + * for c64 complex numbers, we assume + * - callers marshall the two f32 numbers into one f64 register + * - callees de-marshall one f64 value into the real and the imaginery part + */ + case PTY_f32: + case PTY_f64: + case PTY_c64: + case PTY_v2i32: + case PTY_v4i16: + case PTY_v8i8: + case PTY_v2u32: + case PTY_v4u16: + case PTY_v8u8: + case PTY_v2f32: + /* Rule C.1 */ + ASSERT(GetPrimTypeSize(PTY_f64) == k8ByteSize, "unexpected type size"); + typeSize = k8ByteSize; + pLoc.reg0 = AllocateSIMDFPRegister(); + break; + /* + * for c128 complex numbers, we assume + * - callers marshall the two f64 numbers into one f128 register + * - callees de-marshall one f128 value into the real and the imaginery part + */ + case PTY_c128: + case PTY_v2i64: + case PTY_v4i32: + case PTY_v8i16: + case PTY_v16i8: + case PTY_v2u64: + case PTY_v4u32: + case PTY_v8u16: + case PTY_v16u8: + case PTY_v2f64: + case PTY_v4f32: + /* SIMD-FP registers have 128-bits. */ + pLoc.reg0 = AllocateSIMDFPRegister(); + ASSERT(nextFloatRegNO <= AArch64Abi::kNumFloatParmRegs, "regNO should not be greater than kNumFloatParmRegs"); + ASSERT(typeSize == k16ByteSize, "unexpected type size"); + break; + /* + * case of quad-word integer: + * we don't support java yet. + * if (has-16-byte-alignment-requirement) + * nextGeneralRegNO = (nextGeneralRegNO+1) & ~1; // C.8 round it up to the next even number + * try allocate two consecutive registers at once. + */ + /* case PTY_agg */ + case PTY_agg: { + aggCopySize = ProcessPtyAggWhenLocateNextParm(mirType, pLoc, typeSize, typeAlign); + break; + } + default: + CHECK_FATAL(false, "NYI"); + } + + /* Rule C.12 */ + if (pLoc.reg0 == kRinvalid) { + /* being passed in memory */ + nextStackArgAdress = pLoc.memOffset + static_cast(static_cast(typeSize)); + } + return aggCopySize; +} + +int32 AArch64CallConvImpl::ProcessPtyAggWhenLocateNextParm(MIRType &mirType, CCLocInfo &pLoc, uint64 &typeSize, + int32 typeAlign) { + /* + * In AArch64, integer-float or float-integer + * argument passing is not allowed. All should go through + * integer-integer. + * In the case where a struct is homogeneous composed of one of the fp types, + * either all single fp or all double fp, then it can be passed by float-float. + */ + AArch64ArgumentClass classes[kMaxRegCount] = { kAArch64NoClass }; + typeSize = beCommon.GetTypeSize(mirType.GetTypeIndex().GetIdx()); + int32 aggCopySize = 0; + if (typeSize > k16ByteSize) { + aggCopySize = static_cast(RoundUp(typeSize, GetPointerSize())); + } + /* + * alignment requirement + * Note. This is one of a few things iOS diverges from + * the ARM 64-bit standard. They don't observe the round-up requirement. + */ + if (typeAlign == k16ByteSizeInt) { + RoundNGRNUpToNextEven(); + } + + uint32 fpSize; + uint32 numRegs = static_cast( + ClassifyAggregate(beCommon, mirType, classes, sizeof(classes) / sizeof(AArch64ArgumentClass), fpSize)); + if (classes[0] == kAArch64FloatClass) { + CHECK_FATAL(numRegs <= kMaxRegCount, "LocateNextParm: illegal number of regs"); + typeSize = k8ByteSize; + AllocateNSIMDFPRegisters(pLoc, numRegs); + pLoc.numFpPureRegs = numRegs; + pLoc.fpSize = fpSize; + } else if (numRegs == 1) { + /* passing in registers */ + typeSize = k8ByteSize; + if (classes[0] == kAArch64FloatClass) { + CHECK_FATAL(false, "param passing in FP reg not allowed here"); + } else { + pLoc.reg0 = AllocateGPRegister(); + /* Rule C.11 */ + ASSERT((pLoc.reg0 != kRinvalid) || (nextGeneralRegNO == AArch64Abi::kNumIntParmRegs), + "reg0 should not be kRinvalid or nextGeneralRegNO should equal kNumIntParmRegs"); + } + } else if (numRegs == kTwoRegister) { + /* Other aggregates with 8 < size <= 16 bytes can be allocated in reg pair */ + ASSERT(classes[0] == kAArch64IntegerClass || classes[0] == kAArch64NoClass, + "classes[0] must be either integer class or no class"); + ASSERT(classes[1] == kAArch64IntegerClass || classes[1] == kAArch64NoClass, + "classes[1] must be either integer class or no class"); + AllocateTwoGPRegisters(pLoc); + /* Rule C.11 */ + if (pLoc.reg0 == kRinvalid) { + nextGeneralRegNO = AArch64Abi::kNumIntParmRegs; + } + } else { + /* + * 0 returned from ClassifyAggregate(). This means the whole data + * is passed thru memory. + * Rule B.3. + * If the argument type is a Composite Type that is larger than 16 + * bytes then the argument is copied to memory allocated by the + * caller and the argument is replaced by a pointer to the copy. + * + * Try to allocate an integer register + */ + typeSize = k8ByteSize; + pLoc.reg0 = AllocateGPRegister(); + pLoc.memSize = k8ByteSizeInt; /* byte size of a pointer in AArch64 */ + if (pLoc.reg0 != kRinvalid) { + numRegs = 1; + } + } + /* compute rightpad */ + if ((numRegs == 0) || (pLoc.reg0 == kRinvalid)) { + /* passed in memory */ + typeSize = RoundUp(static_cast(static_cast(pLoc.memSize)), k8ByteSize); + } + return aggCopySize; +} + +/* + * instantiated with the type of the function return value, it describes how + * the return value is to be passed back to the caller + * + * Refer to ARM IHI 0055C_beta: Procedure Call Standard for + * the ARM 64-bit Architecture. $5.5 + * "If the type, T, of the result of a function is such that + * void func(T arg) + * would require that 'arg' be passed as a value in a register + * (or set of registers) according to the rules in $5.4 Parameter + * Passing, then the result is returned in the same registers + * as would be used for such an argument. + */ +void AArch64CallConvImpl::InitReturnInfo(MIRType &retTy, CCLocInfo &ccLocInfo) { + PrimType pType = retTy.GetPrimType(); + switch (pType) { + case PTY_void: + break; + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + ccLocInfo.regCount = 1; + ccLocInfo.reg0 = AArch64Abi::kIntReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = IsSignedInteger(pType) ? PTY_i32 : PTY_u32; /* promote the type */ + return; + + case PTY_ptr: + case PTY_ref: + CHECK_FATAL(false, "PTY_ptr should have been lowered"); + return; + + case PTY_a64: + case PTY_u64: + case PTY_i64: + case PTY_i128: + case PTY_u128: + ccLocInfo.regCount = 1; + ccLocInfo.reg0 = AArch64Abi::kIntReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = IsSignedInteger(pType) ? PTY_i64 : PTY_u64; /* promote the type */ + return; + + /* + * for c64 complex numbers, we assume + * - callers marshall the two f32 numbers into one f64 register + * - callees de-marshall one f64 value into the real and the imaginery part + */ + case PTY_f32: + case PTY_f64: + case PTY_c64: + case PTY_v2i32: + case PTY_v4i16: + case PTY_v8i8: + case PTY_v2u32: + case PTY_v4u16: + case PTY_v8u8: + case PTY_v2f32: + + /* + * for c128 complex numbers, we assume + * - callers marshall the two f64 numbers into one f128 register + * - callees de-marshall one f128 value into the real and the imaginery part + */ + case PTY_c128: + case PTY_v2i64: + case PTY_v4i32: + case PTY_v8i16: + case PTY_v16i8: + case PTY_v2u64: + case PTY_v4u32: + case PTY_v8u16: + case PTY_v16u8: + case PTY_v2f64: + case PTY_v4f32: + ccLocInfo.regCount = 1; + ccLocInfo.reg0 = AArch64Abi::kFloatReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = pType; + return; + + /* + * Refer to ARM IHI 0055C_beta: Procedure Call Standard for + * the ARM 64-bit Architecture. $5.5 + * "Otherwise, the caller shall reserve a block of memory of + * sufficient size and alignment to hold the result. The + * address of the memory block shall be passed as an additional + * argument to the function in x8. The callee may modify the + * result memory block at any point during the execution of the + * subroutine (there is no requirement for the callee to preserve + * the value stored in x8)." + */ + case PTY_agg: { + uint64 size = beCommon.GetTypeSize(retTy.GetTypeIndex()); + if ((size > k16ByteSize) || (size == 0)) { + /* + * The return value is returned via memory. + * The address is in X8 and passed by the caller. + */ + SetupToReturnThroughMemory(ccLocInfo); + return; + } + uint32 fpSize; + AArch64ArgumentClass classes[kMaxRegCount] = { kAArch64NoClass }; + ccLocInfo.regCount = static_cast(ClassifyAggregate(beCommon, retTy, classes, + sizeof(classes) / sizeof(AArch64ArgumentClass), fpSize)); + if (classes[0] == kAArch64FloatClass) { + switch (ccLocInfo.regCount) { + case kFourRegister: + ccLocInfo.reg3 = AArch64Abi::kFloatReturnRegs[3]; + break; + case kThreeRegister: + ccLocInfo.reg2 = AArch64Abi::kFloatReturnRegs[2]; + break; + case kTwoRegister: + ccLocInfo.reg1 = AArch64Abi::kFloatReturnRegs[1]; + break; + case kOneRegister: + ccLocInfo.reg0 = AArch64Abi::kFloatReturnRegs[0]; + break; + default: + CHECK_FATAL(0, "AArch64CallConvImpl: unsupported"); + } + if (fpSize == k4ByteSize) { + ccLocInfo.primTypeOfReg0 = ccLocInfo.primTypeOfReg1 = PTY_f32; + } else { + ccLocInfo.primTypeOfReg0 = ccLocInfo.primTypeOfReg1 = PTY_f64; + } + return; + } else if (ccLocInfo.regCount == 0) { + SetupToReturnThroughMemory(ccLocInfo); + return; + } else { + if (ccLocInfo.regCount == 1) { + /* passing in registers */ + if (classes[0] == kAArch64FloatClass) { + ccLocInfo.reg0 = AArch64Abi::kFloatReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = PTY_f64; + } else { + ccLocInfo.reg0 = AArch64Abi::kIntReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = PTY_i64; + } + } else { + ASSERT(ccLocInfo.regCount <= k2ByteSize, "reg count from ClassifyAggregate() should be 0, 1, or 2"); + ASSERT(classes[0] == kAArch64IntegerClass, "error val :classes[0]"); + ASSERT(classes[1] == kAArch64IntegerClass, "error val :classes[1]"); + ccLocInfo.reg0 = AArch64Abi::kIntReturnRegs[0]; + ccLocInfo.primTypeOfReg0 = PTY_i64; + ccLocInfo.reg1 = AArch64Abi::kIntReturnRegs[1]; + ccLocInfo.primTypeOfReg1 = PTY_i64; + } + return; + } + } + default: + CHECK_FATAL(false, "NYI"); + } +} + +void AArch64CallConvImpl::SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const { + ASSERT(pLoc.reg1 == kRinvalid, "make sure reg1 equal kRinvalid"); + PrimType pType = retTy2.GetPrimType(); + switch (pType) { + case PTY_void: + break; + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_ptr: + case PTY_ref: + case PTY_a64: + case PTY_u64: + case PTY_i64: + pLoc.reg1 = AArch64Abi::kIntReturnRegs[1]; + pLoc.primTypeOfReg1 = IsSignedInteger(pType) ? PTY_i64 : PTY_u64; /* promote the type */ + break; + default: + CHECK_FATAL(false, "NYI"); + } +} + +/* + * From "ARM Procedure Call Standard for ARM 64-bit Architecture" + * ARM IHI 0055C_beta, 6th November 2013 + * $ 5.1 machine Registers + * $ 5.1.1 General-Purpose Registers + * Note + * SP Stack Pointer + * R30/LR Link register Stores the return address. + * We push it into stack along with FP on function + * entry using STP and restore it on function exit + * using LDP even if the function is a leaf (i.e., + * it does not call any other function) because it + * is free (we have to store FP anyway). So, if a + * function is a leaf, we may use it as a temporary + * register. + * R29/FP Frame Pointer + * R19-R28 Callee-saved + * registers + * R18 Platform reg Can we use it as a temporary register? + * R16,R17 IP0,IP1 Maybe used as temporary registers. Should be + * given lower priorities. (i.e., we push them + * into the free register stack before the others) + * R9-R15 Temporary registers, caller-saved + * Note: + * R16 and R17 may be used by a linker as a scratch register between + * a routine and any subroutine it calls. They can also be used within a + * routine to hold intermediate values between subroutine calls. + * + * The role of R18 is platform specific. If a platform ABI has need of + * a dedicated general purpose register to carry inter-procedural state + * (for example, the thread context) then it should use this register for + * that purpose. If the platform ABI has no such requirements, then it should + * use R18 as an additional temporary register. The platform ABI specification + * must document the usage for this register. + * + * A subroutine invocation must preserve the contents of the registers R19-R29 + * and SP. All 64 bits of each value stored in R19-R29 must be preserved, even + * when using the ILP32 data model. + * + * $ 5.1.2 SIMD and Floating-Point Registers + * + * The first eight registers, V0-V7, are used to pass argument values into + * a subroutine and to return result values from a function. They may also + * be used to hold intermediate values within a routine. + * + * V8-V15 must be preserved by a callee across subroutine calls; the + * remaining registers do not need to be preserved( or caller-saved). + * Additionally, only the bottom 64 bits of each value stored in V8- + * V15 need to be preserved. + */ +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfgo.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfgo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1048dc54911647dc82221e97aa52cedd69400183 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfgo.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_cfgo.h" +#include "aarch64_isa.h" + +namespace maplebe { +/* Initialize cfg optimization patterns */ +void AArch64CFGOptimizer::InitOptimizePatterns() { + (void)diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + (void)diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + AArch64FlipBRPattern *brOpt = memPool->New(*cgFunc); + if (GetPhase() == kCfgoPostRegAlloc) { + brOpt->SetPhase(kCfgoPostRegAlloc); + } + (void)diffPassPatterns.emplace_back(brOpt); + (void)diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + (void)diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + (void)diffPassPatterns.emplace_back(memPool->New(*cgFunc)); +} + +uint32 AArch64FlipBRPattern::GetJumpTargetIdx(const Insn &insn) { + return AArch64isa::GetJumpTargetIdx(insn); +} +MOperator AArch64FlipBRPattern::FlipConditionOp(MOperator flippedOp) { + return AArch64isa::FlipConditionOp(flippedOp); +} +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfi_generator.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfi_generator.cpp new file mode 100644 index 0000000000000000000000000000000000000000..052d274c1566ef3af1cfa490c356293019ca03f3 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cfi_generator.cpp @@ -0,0 +1,105 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cfi_generator.h" +#include "aarch64_memlayout.h" +#include "aarch64_cgfunc.h" +namespace maplebe { +void AArch64GenCfi::GenerateRegisterSaveDirective(BB &bb) { + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int32 argsToStkPassSize = static_cast(cgFunc.GetMemlayout()->SizeOfArgsToStackPass()); + int32 cfiOffset = stackFrameSize; + Insn &stackDefNextInsn = FindStackDefNextInsn(bb); + InsertCFIDefCfaOffset(bb, stackDefNextInsn, cfiOffset); + cfiOffset = GetOffsetFromCFA() - argsToStkPassSize; + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + + if (useFP) { + (void)bb.InsertInsnBefore(stackDefNextInsn, aarchCGFunc.CreateCfiOffsetInsn(stackBaseReg, -cfiOffset, k64BitSize)); + } + (void)bb.InsertInsnBefore(stackDefNextInsn, aarchCGFunc.CreateCfiOffsetInsn(RLR, -cfiOffset + kOffset8MemPos, k64BitSize)); + + /* change CFA register and offset */ + if (useFP) { + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; + if ((argsToStkPassSize > 0) || isLmbc) { + (void)bb.InsertInsnBefore(stackDefNextInsn, aarchCGFunc.CreateCfiDefCfaInsn(stackBaseReg, + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - argsToStkPassSize, + k64BitSize)); + } else { + (void)bb.InsertInsnBefore( + stackDefNextInsn, cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_def_cfa_register).AddOpndChain( + cgFunc.CreateCfiRegOperand(stackBaseReg, k64BitSize))); + } + } + + if (CGOptions::IsNoCalleeCFI()) { + return; + } + + /* callee save register cfi offset */ + auto ®sToSave = (aarchCGFunc.GetProEpilogSavedRegs().empty()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + if (regsToSave.empty()) { + return; + } + + auto it = regsToSave.begin(); + /* skip the first two registers */ + CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); + ++it; + CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); + ++it; + int32 offset = cgFunc.GetMemlayout()->GetCalleeSaveBaseLoc(); + for (; it != regsToSave.end(); ++it) { + AArch64reg reg = *it; + stackFrameSize -= static_cast(cgFunc.GetMemlayout()->SizeOfArgsToStackPass()); + cfiOffset = stackFrameSize - offset; + (void)bb.InsertInsnBefore(stackDefNextInsn, aarchCGFunc.CreateCfiOffsetInsn(reg, -cfiOffset, k64BitSize)); + /* On AArch64, kIntregBytelen == 8 */ + offset += kIntregBytelen; + } +} + +void AArch64GenCfi::GenerateRegisterRestoreDirective(BB &bb) { + auto &aarchCGFunc = static_cast(cgFunc); + auto ®sToSave = (aarchCGFunc.GetProEpilogSavedRegs().empty()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + + Insn *returnInsn = bb.GetLastMachineInsn(); + CHECK_NULL_FATAL(returnInsn); + if (!regsToSave.empty()) { + auto it = regsToSave.begin(); + CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); + ++it; + CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); + ++it; + + if (!CGOptions::IsNoCalleeCFI()) { + for (; it != regsToSave.end(); ++it) { + AArch64reg reg = *it; + (void)bb.InsertInsnBefore(*returnInsn, aarchCGFunc.CreateCfiRestoreInsn(reg, k64BitSize)); + } + } + + if (useFP) { + (void)bb.InsertInsnBefore(*returnInsn, aarchCGFunc.CreateCfiRestoreInsn(stackBaseReg, k64BitSize)); + } + (void)bb.InsertInsnBefore(*returnInsn, aarchCGFunc.CreateCfiRestoreInsn(RLR, k64BitSize)); + } + /* in aarch64 R31 is sp */ + (void)bb.InsertInsnBefore(*returnInsn, aarchCGFunc.CreateCfiDefCfaInsn(R31, 0, k64BitSize)); +} +} /* maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..aead8709591e217329050cb97d1af280b9524a75 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cg.cpp @@ -0,0 +1,370 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cg.h" +#include "mir_builder.h" +#include "becommon.h" +#include "label_creation.h" +#include "alignment.h" + +namespace maplebe { +#include "immvalid.def" +#define DEFINE_MOP(...) {__VA_ARGS__}, +const InsnDesc AArch64CG::kMd[kMopLast] = { +#include "abstract_mmir.def" +#include "aarch64_md.def" +#include "aarch64_mem_md.def" +}; +#undef DEFINE_MOP + +std::array, kIntRegTypeNum> AArch64CG::intRegNames = { + std::array { + "err", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", "err8", "err9", "err10", + "err11", "err12", "err13", "err14", "err15", "err16", "err17", "err18", "err19", "err20", "err21", "err22", + "err23", "err24", "err25", "err26", "err27", "err28", "err", "err", "err", "errsp", "errzr", /* x29 is fp */ + "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7", "b8", "b9", "b10", "b11", + "b12", "b13", "b14", "b15", "b16", "b17", "b18", "b19", "b20", "b21", "b22", "b23", + "b24", "b25", "b26", "b27", "b28", "b29", "b30", "b31", "errMaxRegNum", "rflag" }, + std::array { + "err", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", "err8", "err9", "err10", + "err11", "err12", "err13", "err14", "err15", "err16", "err17", "err18", "err19", "err20", "err21", "err22", + "err23", "err24", "err25", "err26", "err27", "err28", "err29", "err30", "err31", "errsp", "errzr", /* x29 is fp */ + "h0", "h1", "h2", "h3", "h4", "h5", "h6", "h7", "h8", "h9", "h10", "h11", + "h12", "h13", "h14", "h15", "h16", "h17", "h18", "h19", "h20", "h21", "h22", "h23", + "h24", "h25", "h26", "h27", "h28", "h29", "h30", "h31", "errMaxRegNum", "rflag" }, + std::array { + "err", "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", "w11", "w12", "w13", "w14", + "w15", "w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26", "w27", "w28", + "w29", "err", "err", "wsp", "wzr", /* x29 is fp */ + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15", + "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", + "errMaxRegNum", "rflag" }, + std::array { + "err", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", + "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30", + "x29" /* use X40 when debug */, "sp", "xzr", /* x29 is fp */ + "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", + "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31", + "errMaxRegNum", "rflag" }, + std::array { + "err", "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", + "x15", "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30", + "x29" /* use X40 when debug */, "sp", "xzr", /* x29 is fp */ + "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", + "q16", "q17", "q18", "q19", "q20", "q21", "q22", "q23", "q24", "q25", "q26", "q27", "q28", "q29", "q30", "q31", + "errMaxRegNum", "rflag" } +}; + +std::array AArch64CG::vectorRegNames = { + "err", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", "err8", "err9", "err10", + "err11", "err12", "err13", "err14", "err15", "err16", "err17", "err18", "err19", "err20", "err21", "err22", + /* x29 is fp, err40 is fp before RA */ + "err23", "err24", "err25", "err26", "err27", "err28", "err29", "err30", "errsp", "errzr", "err40", + "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", + "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", + "errMaxRegNum", "rflag" }; + +bool AArch64CG::IsExclusiveFunc(MIRFunction &mirFunc) { + const std::string &funcName = mirFunc.GetName(); + for (const auto &it : ehExclusiveNameVec) { + if (it.compare(funcName) == 0) { + return true; + } + } + return false; +} +namespace wordsMap { + /* + * Generate object maps. + * + * 1. each class record its GCTIB in method meta (not read only meta) + * 2. GCTIB include: header protoType; n bitmap word; bitmap word + * 3. each reference word(4 or 8 bytes) is represented by 2 bits + * 00: not ref + * 01: normal ref + * 10: weak ref + * 11: unowned ref + * + * For example, if a scalar object has five ptr fields at offsets 24, 40(weak), + * 64(unowned), the generated code will be like: + * + * MCC_GCTIB__xxx: + * .long 0x40 // object has child reference + * .long 1 // one word in the bitmap + * .quad 0b110000100001000000 + * ... + */ + const uint32 kRefWordsPerMapWord = 32; /* contains bitmap for 32 ref words in 64 bits */ + const uint32 kLogRefWordsPerMapWord = 5; +#ifdef USE_32BIT_REF + const uint32 kReferenceWordSize = 4; + const uint32 kLog2ReferenceWordSize = 2; +#else + const uint32 kReferenceWordSize = 8; + const uint32 kLog2ReferenceWordSize = 3; +#endif + const uint32 kInMapWordOffsetMask = ((kReferenceWordSize * kRefWordsPerMapWord) - 1); + const uint32 kInMapWordIndexShift = (kLog2ReferenceWordSize - 1); + const uint32 kMapWordIndexShift = (kLog2ReferenceWordSize + kLogRefWordsPerMapWord); + + const uint64 kRefBits = 1; + const uint64 kWeakRefBits = 2; + const uint64 kUnownedRefBits = 3; + + /* + * Give a structrue type, calculate its bitmap_vector + */ + static void GetGCTIBBitMapWords(const BECommon &beCommon, MIRStructType &stType, std::vector &bitmapWords) { + bitmapWords.clear(); + if (stType.GetKind() == kTypeClass) { + uint64 curBitmap = 0; + uint32 curBitmapIndex = 0; + uint32 prevOffset = 0; + for (const auto &fieldInfo : beCommon.GetJClassLayout(static_cast(stType))) { + if (fieldInfo.IsRef()) { + uint32 curOffset = fieldInfo.GetOffset(); + /* skip meta field */ + if (curOffset == 0) { + continue; + } + CHECK_FATAL((curOffset > prevOffset) || (prevOffset == 0), "not ascending offset"); + uint32 wordIndex = curOffset >> kMapWordIndexShift; + if (wordIndex > curBitmapIndex) { + bitmapWords.emplace_back(curBitmap); + for (uint32 i = curBitmapIndex + 1; i < wordIndex; i++) { + bitmapWords.emplace_back(0); + } + curBitmap = 0; + curBitmapIndex = wordIndex; + } + uint32 bitOffset = (curOffset & kInMapWordOffsetMask) >> kInMapWordIndexShift; + if (CGOptions::IsGCOnly()) { + /* ignore unowned/weak when GCONLY is enabled. */ + curBitmap |= (kRefBits << bitOffset); + } else if (fieldInfo.IsUnowned()) { + curBitmap |= (kUnownedRefBits << bitOffset); + } else if (fieldInfo.IsWeak()) { + curBitmap |= (kWeakRefBits << bitOffset); + } else { + /* ref */ + curBitmap |= (kRefBits << bitOffset); + } + prevOffset = curOffset; + } + } + if (curBitmap != 0) { + bitmapWords.emplace_back(curBitmap); + } + } else if (stType.GetKind() != kTypeInterface) { + /* interface doesn't have reference fields */ + CHECK_FATAL(false, "GetGCTIBBitMapWords unexpected type"); + } + } +} + +bool AArch64CG::IsTargetInsn(MOperator mOp) const { + return (mOp > MOP_undef && mOp <= MOP_nop); +} +bool AArch64CG::IsClinitInsn(MOperator mOp) const { + return (mOp == MOP_clinit || mOp == MOP_clinit_tail || mOp == MOP_adrp_ldr); +} + +bool AArch64CG::IsEffectiveCopy(Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp >= MOP_xmovrr && mOp <= MOP_xvmovrv) { + return true; + } + if (mOp == MOP_vmovuu || mOp == MOP_vmovvv) { + return true; + } + if ((mOp >= MOP_xaddrrr && mOp <= MOP_ssub) || (mOp >= MOP_xlslrri6 && mOp <= MOP_wlsrrrr)) { + Operand &opnd2 = insn.GetOperand(kInsnThirdOpnd); + if (opnd2.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd2); + if (immOpnd.IsZero()) { + return true; + } + } + } + if (mOp > MOP_xmulrrr && mOp <= MOP_xvmuld) { + Operand &opnd2 = insn.GetOperand(kInsnThirdOpnd); + if (opnd2.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd2); + if (immOpnd.GetValue() == 1) { + return true; + } + } + } + return false; +} + +void AArch64CG::DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const { + A64OpndDumpVisitor visitor(opndDesc); + opnd.Accept(visitor); +} + +/* + * Find if there exist same GCTIB (both rcheader and bitmap are same) + * for different class. If ture reuse, if not emit and record new GCTIB. + */ +void AArch64CG::FindOrCreateRepresentiveSym(std::vector &bitmapWords, uint32 rcHeader, + const std::string &name) { + GCTIBKey *key = memPool->New(allocator, rcHeader, bitmapWords); + const std::string &gcTIBName = GCTIB_PREFIX_STR + name; + MapleUnorderedMap::const_iterator iter = keyPatternMap.find(key); + if (iter == keyPatternMap.end() || gcTIBName.compare("MCC_GCTIB__Ljava_2Flang_2FObject_3B") == 0) { + /* Emit the GCTIB label for the class */ + GCTIBPattern *ptn = memPool->New(*key, *memPool); + + if (gcTIBName.compare("MCC_GCTIB__Ljava_2Flang_2FObject_3B") == 0) { + ptn->SetName("MCC_GCTIB__Ljava_2Flang_2FObject_3B"); + } + (void)keyPatternMap.insert(std::make_pair(key, ptn)); + (void)symbolPatternMap.insert(std::make_pair(gcTIBName, ptn)); + + /* Emit GCTIB pattern */ + std::string ptnString = "\t.type " + ptn->GetName() + ", %object\n" + "\t.data\n" + "\t.align 3\n"; + + MIRSymbol *gcTIBSymbol = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::GetInternalNameLiteral(gcTIBName))); + if (gcTIBSymbol != nullptr && gcTIBSymbol->GetStorageClass() == kScFstatic) { + ptnString += "\t.local "; + } else { + ptnString += "\t.global "; + } + + Emitter *emitter = GetEmitter(); + emitter->Emit(ptnString); + emitter->Emit(ptn->GetName()); + emitter->Emit("\n"); + + /* Emit the GCTIB pattern label for the class */ + emitter->Emit(ptn->GetName()); + emitter->Emit(":\n"); + + emitter->Emit("\t.long "); + emitter->EmitHexUnsigned(rcHeader); + emitter->Emit("\n"); + + /* generate n_bitmap word */ + emitter->Emit("\t.long "); /* AArch64-specific. Generate a 64-bit value. */ + emitter->EmitDecUnsigned(bitmapWords.size()); + emitter->Emit("\n"); + + /* Emit each bitmap word */ + for (const auto &bitmapWord : bitmapWords) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " bitmap_word: 0x"<< bitmapWord << " " << PRIx64 << "\n"; + } + emitter->Emit("\t.quad "); /* AArch64-specific. Generate a 64-bit value. */ + emitter->EmitHexUnsigned(bitmapWord); + emitter->Emit("\n"); + } + if (gcTIBSymbol != nullptr && gcTIBSymbol->GetStorageClass() != kScFstatic) { + /* add local symbol REF_XXX to every global GCTIB symbol */ + CreateRefSymForGlobalPtn(*ptn); + keyPatternMap[key] = ptn; + } + } else { + (void)symbolPatternMap.insert(make_pair(gcTIBName, iter->second)); + } +} + +/* + * Add local symbol REF_XXX to global GCTIB symbol, + * and replace the global GCTIBPattern in keyPatternMap. + */ +void AArch64CG::CreateRefSymForGlobalPtn(GCTIBPattern &ptn) const { + const std::string &refPtnString = REF_PREFIX_STR + ptn.GetName(); + const std::string &ptnString = "\t.type " + refPtnString + ", %object\n" + + "\t.data\n" + + "\t.align 3\n" + + "\t.local " + refPtnString + "\n" + + refPtnString + ":\n" + + "\t.quad " + ptn.GetName() + "\n"; + Emitter *emitter = GetEmitter(); + emitter->Emit(ptnString); + ptn.SetName(refPtnString); +} + +std::string AArch64CG::FindGCTIBPatternName(const std::string &name) const { + auto iter = symbolPatternMap.find(name); + if (iter == symbolPatternMap.end()) { + CHECK_FATAL(false, "No GCTIB pattern found for symbol: %s", name.c_str()); + } + return iter->second->GetName(); +} + +void AArch64CG::GenerateObjectMaps(BECommon &beCommon) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << "DEBUG: Generating object maps...\n"; + } + + for (auto &tyId : GetMIRModule()->GetClassList()) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << "Class tyIdx: " << tyId << "\n"; + } + TyIdx tyIdx(tyId); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + ASSERT(ty != nullptr, "ty nullptr check"); + /* Only emit GCTIB for classes owned by this module */ + ASSERT(ty->IsStructType(), "ty isn't MIRStructType* in AArch64CG::GenerateObjectMaps"); + MIRStructType *strTy = static_cast(ty); + if (!strTy->IsLocal()) { + continue; + } + + GStrIdx nameIdx = ty->GetNameStrIdx(); + + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(nameIdx); + + /* Emit for a class */ + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " name: " << name << "\n"; + } + + std::vector bitmapWords; + wordsMap::GetGCTIBBitMapWords(beCommon, *strTy, bitmapWords); + /* fill specific header according to the size of bitmapWords */ + uint32 rcHeader = (!bitmapWords.empty()) ? 0x40 : 0; + FindOrCreateRepresentiveSym(bitmapWords, rcHeader, name); + } +} + +void AArch64CG::EnrollTargetPhases(MaplePhaseManager *pm) const { + if (!GetMIRModule()->IsCModule()) { + CGOptions::DisableCGSSA(); + } +#include "aarch64_phases.def" +} + +Insn &AArch64CG::BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) { + ASSERT(defOpnd.IsRegister(), "build SSA on register operand"); + CHECK_FATAL(defOpnd.IsOfIntClass() || defOpnd.IsOfFloatOrSIMDClass(), " unknown operand type "); + bool is64bit = defOpnd.GetSize() == k64BitSize; + MOperator mop = MOP_nop; + if (defOpnd.GetSize() == k128BitSize) { + ASSERT(defOpnd.IsOfFloatOrSIMDClass(), "unexpect 128bit int operand in aarch64"); + mop = MOP_xvphivd; + } else { + mop = defOpnd.IsOfIntClass() ? is64bit ? MOP_xphirr : MOP_wphirr : is64bit ? MOP_xvphid : MOP_xvphis; + } + ASSERT(mop != MOP_nop, "unexpect 128bit int operand in aarch64"); + return GetCurCGFuncNoConst()->GetInsnBuilder()->BuildInsn(mop, defOpnd, listParam); +} + +PhiOperand &AArch64CG::CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) { + return *mp.New(mAllocator); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1bb68ac8257e89596b3f5d7da8d0901cc2592c19 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_cgfunc.cpp @@ -0,0 +1,12420 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include +#include "aarch64_cg.h" +#include "cfi.h" +#include "mpl_logging.h" +#include "rt.h" +#include "opcode_info.h" +#include "mir_builder.h" +#include "mir_symbol_builder.h" +#include "mpl_atomic.h" +#include "metadata_layout.h" +#include "emit.h" +#include "simplify.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +using namespace maple; +CondOperand AArch64CGFunc::ccOperands[kCcLast] = { + CondOperand(CC_EQ), + CondOperand(CC_NE), + CondOperand(CC_CS), + CondOperand(CC_HS), + CondOperand(CC_CC), + CondOperand(CC_LO), + CondOperand(CC_MI), + CondOperand(CC_PL), + CondOperand(CC_VS), + CondOperand(CC_VC), + CondOperand(CC_HI), + CondOperand(CC_LS), + CondOperand(CC_GE), + CondOperand(CC_LT), + CondOperand(CC_GT), + CondOperand(CC_LE), + CondOperand(CC_AL), +}; + +namespace { +constexpr int32 kSignedDimension = 2; /* signed and unsigned */ +constexpr int32 kIntByteSizeDimension = 4; /* 1 byte, 2 byte, 4 bytes, 8 bytes */ +constexpr int32 kFloatByteSizeDimension = 3; /* 4 bytes, 8 bytes, 16 bytes(vector) */ +constexpr int32 kShiftAmount12 = 12; /* for instruction that can use shift, shift amount must be 0 or 12 */ + +MOperator ldIs[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + { MOP_wldrb, MOP_wldrh, MOP_wldr, MOP_xldr }, + /* signed == 1 */ + { MOP_wldrsb, MOP_wldrsh, MOP_wldr, MOP_xldr } +}; + +MOperator stIs[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + { MOP_wstrb, MOP_wstrh, MOP_wstr, MOP_xstr }, + /* signed == 1 */ + { MOP_wstrb, MOP_wstrh, MOP_wstr, MOP_xstr } +}; + +MOperator ldIsAcq[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + { MOP_wldarb, MOP_wldarh, MOP_wldar, MOP_xldar }, + /* signed == 1 */ + { MOP_wldarb, MOP_wldarh, MOP_wldar, MOP_xldar } +}; + +MOperator stIsRel[kSignedDimension][kIntByteSizeDimension] = { + /* unsigned == 0 */ + { MOP_wstlrb, MOP_wstlrh, MOP_wstlr, MOP_xstlr }, + /* signed == 1 */ + { MOP_wstlrb, MOP_wstlrh, MOP_wstlr, MOP_xstlr } +}; + +MOperator ldFs[kFloatByteSizeDimension] = { MOP_sldr, MOP_dldr, MOP_qldr }; +MOperator stFs[kFloatByteSizeDimension] = { MOP_sstr, MOP_dstr, MOP_qstr }; + +MOperator ldFsAcq[kFloatByteSizeDimension] = { MOP_undef, MOP_undef, MOP_undef }; +MOperator stFsRel[kFloatByteSizeDimension] = { MOP_undef, MOP_undef, MOP_undef }; + +/* extended to unsigned ints */ +MOperator uextIs[kIntByteSizeDimension][kIntByteSizeDimension] = { + /* u8 u16 u32 u64 */ + { MOP_undef, MOP_xuxtb32, MOP_xuxtb32, MOP_xuxtb32}, /* u8/i8 */ + { MOP_undef, MOP_undef, MOP_xuxth32, MOP_xuxth32}, /* u16/i16 */ + { MOP_undef, MOP_undef, MOP_xuxtw64, MOP_xuxtw64}, /* u32/i32 */ + { MOP_undef, MOP_undef, MOP_undef, MOP_undef} /* u64/u64 */ +}; + +/* extended to signed ints */ +MOperator extIs[kIntByteSizeDimension][kIntByteSizeDimension] = { + /* i8 i16 i32 i64 */ + { MOP_undef, MOP_xsxtb32, MOP_xsxtb32, MOP_xsxtb64}, /* u8/i8 */ + { MOP_undef, MOP_undef, MOP_xsxth32, MOP_xsxth64}, /* u16/i16 */ + { MOP_undef, MOP_undef, MOP_undef, MOP_xsxtw64}, /* u32/i32 */ + { MOP_undef, MOP_undef, MOP_undef, MOP_undef} /* u64/u64 */ +}; + +MOperator PickLdStInsn(bool isLoad, uint32 bitSize, PrimType primType, AArch64isa::MemoryOrdering memOrd) { + ASSERT(__builtin_popcount(static_cast(memOrd)) <= 1, "must be kMoNone or kMoAcquire"); + ASSERT(primType != PTY_ptr, "should have been lowered"); + ASSERT(primType != PTY_ref, "should have been lowered"); + ASSERT(bitSize >= k8BitSize, "PTY_u1 should have been lowered?"); + ASSERT(__builtin_popcount(bitSize) == 1, "PTY_u1 should have been lowered?"); + if (isLoad) { + ASSERT((memOrd == AArch64isa::kMoNone) || (memOrd == AArch64isa::kMoAcquire) || + (memOrd == AArch64isa::kMoAcquireRcpc) || (memOrd == AArch64isa::kMoLoacquire), "unknown Memory Order"); + } else { + ASSERT((memOrd == AArch64isa::kMoNone) || (memOrd == AArch64isa::kMoRelease) || + (memOrd == AArch64isa::kMoLorelease), "unknown Memory Order"); + } + + /* __builtin_ffs(x) returns: 0 -> 0, 1 -> 1, 2 -> 2, 4 -> 3, 8 -> 4 */ + if ((IsPrimitiveInteger(primType) || primType == PTY_agg) && !IsPrimitiveVector(primType)) { + MOperator(*table)[kIntByteSizeDimension]; + if (isLoad) { + table = (memOrd == AArch64isa::kMoAcquire) ? ldIsAcq : ldIs; + } else { + table = (memOrd == AArch64isa::kMoRelease) ? stIsRel : stIs; + } + + int32 signedUnsigned = IsUnsignedInteger(primType) ? 0 : 1; + if (primType == PTY_agg) { + CHECK_FATAL(bitSize >= k8BitSize, " unexpect agg size"); + bitSize = static_cast(RoundUp(bitSize, k8BitSize)); + ASSERT((bitSize & (bitSize - 1)) == 0, "bitlen error"); + } + + /* __builtin_ffs(x) returns: 8 -> 4, 16 -> 5, 32 -> 6, 64 -> 7 */ + if (primType == PTY_i128 || primType == PTY_u128) { + bitSize = k64BitSize; + } + uint32 size = static_cast(__builtin_ffs(static_cast(bitSize))) - 4; + ASSERT(size <= 3, "wrong bitSize"); + return table[signedUnsigned][size]; + } else { + MOperator *table = nullptr; + if (isLoad) { + table = (memOrd == AArch64isa::kMoAcquire) ? ldFsAcq : ldFs; + } else { + table = (memOrd == AArch64isa::kMoRelease) ? stFsRel : stFs; + } + + /* __builtin_ffs(x) returns: 32 -> 6, 64 -> 7, 128 -> 8 */ + uint32 size = static_cast(__builtin_ffs(static_cast(bitSize))) - 6; + ASSERT(size <= 2, "size must be 0 to 2"); + return table[size]; + } +} +} + +bool IsBlkassignForPush(const BlkassignoffNode &bNode) { + BaseNode *dest = bNode.Opnd(0); + bool spBased = false; + if (dest->GetOpCode() == OP_regread) { + RegreadNode &node = static_cast(*dest); + if (-node.GetRegIdx() == kSregSp) { + spBased = true; + } + } + return spBased; +} + +MIRStructType *AArch64CGFunc::GetLmbcStructArgType(BaseNode &stmt, size_t argNo) { + MIRType *ty = nullptr; + if (stmt.GetOpCode() == OP_call) { + CallNode &callNode = static_cast(stmt); + MIRFunction *callFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + if (callFunc->GetFormalCount() < (argNo + 1UL)) { + return nullptr; /* formals less than actuals */ + } + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(callFunc->GetFormalDefVec()[argNo].formalTyIdx); + } else if (stmt.GetOpCode() == OP_icallproto) { + argNo--; /* 1st opnd of icallproto is funcname, skip it relative to param list */ + IcallNode &icallproto = static_cast(stmt); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallproto.GetRetTyIdx()); + MIRFuncType *fType = nullptr; + if (type->IsMIRPtrType()) { + fType = static_cast(type)->GetPointedFuncType(); + } else { + fType = static_cast(type); + } + if (fType->GetParamTypeList().size() < (argNo + 1UL)) { + return nullptr; + } + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetNthParamType(argNo)); + } + CHECK_FATAL(ty && ty->IsStructType(), "lmbc agg arg error"); + return static_cast(ty); +} + +RegOperand &AArch64CGFunc::GetOrCreateResOperand(const BaseNode &parent, PrimType primType) { + RegOperand *resOpnd = nullptr; + if (parent.GetOpCode() == OP_regassign) { + auto ®AssignNode = static_cast(parent); + PregIdx pregIdx = regAssignNode.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + /* if it is one of special registers */ + resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, primType); + } else { + resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } + } else { + resOpnd = &CreateRegisterOperandOfType(primType); + } + return *resOpnd; +} + +MOperator AArch64CGFunc::PickLdInsn(uint32 bitSize, PrimType primType, + AArch64isa::MemoryOrdering memOrd) const { + return PickLdStInsn(true, bitSize, primType, memOrd); +} + +MOperator AArch64CGFunc::PickStInsn(uint32 bitSize, PrimType primType, + AArch64isa::MemoryOrdering memOrd) const { + return PickLdStInsn(false, bitSize, primType, memOrd); +} + +MOperator AArch64CGFunc::PickExtInsn(PrimType dtype, PrimType stype) const { + int32 sBitSize = static_cast(GetPrimTypeBitSize(stype)); + int32 dBitSize = static_cast(GetPrimTypeBitSize(dtype)); + /* __builtin_ffs(x) returns: 0 -> 0, 1 -> 1, 2 -> 2, 4 -> 3, 8 -> 4 */ + if (IsPrimitiveInteger(stype) && IsPrimitiveInteger(dtype)) { + MOperator(*table)[kIntByteSizeDimension]; + table = IsUnsignedInteger(stype) ? uextIs : extIs; + if (stype == PTY_i128 || stype == PTY_u128) { + sBitSize = static_cast(k64BitSize); + } + /* __builtin_ffs(x) returns: 8 -> 4, 16 -> 5, 32 -> 6, 64 -> 7 */ + uint32 row = static_cast(__builtin_ffs(sBitSize)) - k4BitSize; + ASSERT(row <= 3, "wrong bitSize"); + if (dtype == PTY_i128 || dtype == PTY_u128) { + dBitSize = static_cast(k64BitSize); + } + uint32 col = static_cast(__builtin_ffs(dBitSize)) - k4BitSize; + ASSERT(col <= 3, "wrong bitSize"); + return table[row][col]; + } + CHECK_FATAL(false, "extend not primitive integer"); + return MOP_undef; +} + +MOperator AArch64CGFunc::PickMovBetweenRegs(PrimType destType, PrimType srcType) const { + if (IsPrimitiveVector(destType) && IsPrimitiveVector(srcType)) { + return GetPrimTypeSize(srcType) == k8ByteSize ? MOP_vmovuu : MOP_vmovvv; + } + if (IsPrimitiveInteger(destType) && IsPrimitiveInteger(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_wmovrr : MOP_xmovrr; + } + if (IsPrimitiveFloat(destType) && IsPrimitiveFloat(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovs : MOP_xvmovd; + } + if (IsPrimitiveInteger(destType) && IsPrimitiveFloat(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovrs : MOP_xvmovrd; + } + if (IsPrimitiveFloat(destType) && IsPrimitiveInteger(srcType)) { + return GetPrimTypeSize(srcType) <= k4ByteSize ? MOP_xvmovsr : MOP_xvmovdr; + } + if (IsPrimitiveInteger(destType) && IsPrimitiveVector(srcType)) { + return GetPrimTypeSize(srcType) == k8ByteSize ? MOP_vwmovru : + GetPrimTypeSize(destType) <= k4ByteSize ? MOP_vwmovrv : MOP_vxmovrv; + } + CHECK_FATAL(false, "unexpected operand primtype for mov"); + return MOP_undef; +} + +MOperator AArch64CGFunc::PickMovInsn(const RegOperand &lhs, const RegOperand &rhs) const { + CHECK_FATAL(lhs.GetRegisterType() == rhs.GetRegisterType(), "PickMovInsn: unequal kind NYI"); + CHECK_FATAL(lhs.GetSize() == rhs.GetSize(), "PickMovInsn: unequal size NYI"); + ASSERT(((lhs.GetSize() < k64BitSize) || (lhs.GetRegisterType() == kRegTyFloat)), + "should split the 64 bits or more mov"); + if (lhs.GetRegisterType() == kRegTyInt) { + return MOP_wmovrr; + } + if (lhs.GetRegisterType() == kRegTyFloat) { + return (lhs.GetSize() <= k32BitSize) ? MOP_xvmovs : MOP_xvmovd; + } + ASSERT(false, "PickMovInsn: kind NYI"); + return MOP_undef; +} + +void AArch64CGFunc::SelectLoadAcquire(Operand &dest, PrimType dtype, Operand &src, PrimType stype, + AArch64isa::MemoryOrdering memOrd, bool isDirect) { + ASSERT(src.GetKind() == Operand::kOpdMem, "Just checking"); + ASSERT(memOrd != AArch64isa::kMoNone, "Just checking"); + + uint32 ssize = isDirect ? src.GetSize() : GetPrimTypeBitSize(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + MOperator mOp = PickLdInsn(ssize, stype, memOrd); + + Operand *newSrc = &src; + auto &memOpnd = static_cast(src); + OfstOperand *immOpnd = memOpnd.GetOffsetImmediate(); + int32 offset = static_cast(immOpnd->GetOffsetValue()); + RegOperand *origBaseReg = memOpnd.GetBaseRegister(); + if (offset != 0) { + RegOperand &resOpnd = CreateRegisterOperandOfType(PTY_i64); + ASSERT(origBaseReg != nullptr, "nullptr check"); + SelectAdd(resOpnd, *origBaseReg, *immOpnd, PTY_i64); + newSrc = &CreateReplacementMemOperand(ssize, resOpnd, 0); + } + + std::string key; + if (isDirect && GetCG()->GenerateVerboseCG()) { + key = GenerateMemOpndVerbose(src); + } + + /* Check if the right load-acquire instruction is available. */ + if (mOp != MOP_undef) { + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, dest, *newSrc); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + } else { + if (IsPrimitiveFloat(stype)) { + /* Uses signed integer version ldar followed by a floating-point move(fmov). */ + ASSERT(stype == dtype, "Just checking"); + PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64; + RegOperand ®Opnd = CreateRegisterOperandOfType(itype); + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, itype, memOrd), regOpnd, *newSrc); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + mOp = (stype == PTY_f32) ? MOP_xvmovsr : MOP_xvmovdr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, regOpnd)); + } else { + /* Use unsigned version ldarb/ldarh followed by a sign-extension instruction(sxtb/sxth). */ + ASSERT((ssize == k8BitSize) || (ssize == k16BitSize), "Just checking"); + PrimType utype = (ssize == k8BitSize) ? PTY_u8 : PTY_u16; + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, utype, memOrd), dest, *newSrc); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + mOp = ((dsize == k32BitSize) ? ((ssize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32) + : ((ssize == k8BitSize) ? MOP_xsxtb64 : MOP_xsxth64)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, dest)); + } + } +} + +void AArch64CGFunc::SelectStoreRelease(Operand &dest, PrimType dtype, Operand &src, PrimType stype, + AArch64isa::MemoryOrdering memOrd, bool isDirect) { + ASSERT(dest.GetKind() == Operand::kOpdMem, "Just checking"); + + uint32 dsize = isDirect ? dest.GetSize() : GetPrimTypeBitSize(stype); + MOperator mOp = PickStInsn(dsize, stype, memOrd); + + Operand *newDest = &dest; + MemOperand *memOpnd = static_cast(&dest); + OfstOperand *immOpnd = memOpnd->GetOffsetImmediate(); + int32 offset = static_cast(immOpnd->GetOffsetValue()); + RegOperand *origBaseReg = memOpnd->GetBaseRegister(); + if (offset != 0) { + RegOperand &resOpnd = CreateRegisterOperandOfType(PTY_i64); + ASSERT(origBaseReg != nullptr, "nullptr check"); + SelectAdd(resOpnd, *origBaseReg, *immOpnd, PTY_i64); + newDest = &CreateReplacementMemOperand(dsize, resOpnd, 0); + } + + std::string key; + if (isDirect && GetCG()->GenerateVerboseCG()) { + key = GenerateMemOpndVerbose(dest); + } + + /* Check if the right store-release instruction is available. */ + if (mOp != MOP_undef) { + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, src, *newDest); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + } else { + /* Use a floating-point move(fmov) followed by a stlr. */ + ASSERT(IsPrimitiveFloat(stype), "must be float type"); + CHECK_FATAL(stype == dtype, "Just checking"); + PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64; + RegOperand ®Opnd = CreateRegisterOperandOfType(itype); + mOp = (stype == PTY_f32) ? MOP_xvmovrs : MOP_xvmovrd; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, regOpnd, src)); + Insn &insn = GetInsnBuilder()->BuildInsn(PickStInsn(dsize, itype, memOrd), regOpnd, *newDest); + if (isDirect && GetCG()->GenerateVerboseCG()) { + insn.SetComment(key); + } + GetCurBB()->AppendInsn(insn); + } +} + +void AArch64CGFunc::SelectCopyImm(Operand &dest, PrimType dType, ImmOperand &src, PrimType sType) { + if (IsPrimitiveInteger(dType) != IsPrimitiveInteger(sType)) { + RegOperand &tempReg = CreateRegisterOperandOfType(sType); + SelectCopyImm(tempReg, src, sType); + SelectCopy(dest, dType, tempReg, sType); + } else { + SelectCopyImm(dest, src, sType); + } +} + +void AArch64CGFunc::SelectCopyImm(Operand &dest, ImmOperand &src, PrimType dtype) { + uint32 dsize = GetPrimTypeBitSize(dtype); + ASSERT(IsPrimitiveInteger(dtype), "The type of destination operand must be Integer"); + ASSERT(((dsize == k8BitSize) || (dsize == k16BitSize) || (dsize == k32BitSize) || (dsize == k64BitSize)), + "The destination operand must be >= 8-bit"); + if (src.GetSize() == k32BitSize && dsize == k64BitSize && src.IsSingleInstructionMovable()) { + auto tempReg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, dsize), dsize, kRegTyInt); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, *tempReg, src)); + SelectCopy(dest, dtype, *tempReg, PTY_u32); + return; + } + if (src.IsSingleInstructionMovable()) { + MOperator mOp = (dsize == k32BitSize) ? MOP_wmovri32 : MOP_xmovri64; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, src)); + return; + } + uint64 srcVal = static_cast(src.GetValue()); + /* using mov/movk to load the immediate value */ + if (dsize == k8BitSize) { + /* compute lower 8 bits value */ + if (dtype == PTY_u8) { + /* zero extend */ + srcVal = (srcVal << 56) >> 56; + dtype = PTY_u16; + } else { + /* sign extend */ + srcVal = ((static_cast(srcVal)) << 56) >> 56; + dtype = PTY_i16; + } + dsize = k16BitSize; + } + if (dsize == k16BitSize) { + if (dtype == PTY_u16) { + /* check lower 16 bits and higher 16 bits respectively */ + ASSERT((srcVal & 0x0000FFFFULL) != 0, "unexpected value"); + ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) == 0, "unexpected value"); + ASSERT((srcVal & 0x0000FFFFULL) != 0xFFFFULL, "unexpected value"); + /* create an imm opereand which represents lower 16 bits of the immediate */ + ImmOperand &srcLower = CreateImmOperand(static_cast(srcVal & 0x0000FFFFULL), k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, srcLower)); + return; + } else { + /* sign extend and let `dsize == 32` case take care of it */ + srcVal = ((static_cast(srcVal)) << 48) >> 48; + dsize = k32BitSize; + } + } + if (dsize == k32BitSize) { + /* check lower 16 bits and higher 16 bits respectively */ + ASSERT((srcVal & 0x0000FFFFULL) != 0, "unexpected val"); + ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) != 0, "unexpected val"); + ASSERT((srcVal & 0x0000FFFFULL) != 0xFFFFULL, "unexpected val"); + ASSERT(((srcVal >> k16BitSize) & 0x0000FFFFULL) != 0xFFFFULL, "unexpected val"); + /* create an imm opereand which represents lower 16 bits of the immediate */ + ImmOperand &srcLower = CreateImmOperand(static_cast(srcVal & 0x0000FFFFULL), k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, srcLower)); + /* create an imm opereand which represents upper 16 bits of the immediate */ + ImmOperand &srcUpper = CreateImmOperand(static_cast((srcVal >> k16BitSize) & 0x0000FFFFULL), + k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovkri16, dest, srcUpper, *lslOpnd)); + } else { + /* + * partition it into 4 16-bit chunks + * if more 0's than 0xFFFF's, use movz as the initial instruction. + * otherwise, movn. + */ + bool useMovz = BetterUseMOVZ(srcVal); + bool useMovk = false; + /* get lower 32 bits of the immediate */ + uint64 chunkLval = srcVal & 0xFFFFFFFFULL; + /* get upper 32 bits of the immediate */ + uint64 chunkHval = (srcVal >> k32BitSize) & 0xFFFFFFFFULL; + int32 maxLoopTime = 4; + + if (chunkLval == chunkHval) { + /* compute lower 32 bits, and then copy to higher 32 bits, so only 2 chunks need be processed */ + maxLoopTime = 2; + } + + uint64 sa = 0; + + for (int64 i = 0; i < maxLoopTime; ++i, sa += k16BitSize) { + /* create an imm opereand which represents the i-th 16-bit chunk of the immediate */ + uint64 chunkVal = (srcVal >> (static_cast(sa))) & 0x0000FFFFULL; + if (useMovz ? (chunkVal == 0) : (chunkVal == 0x0000FFFFULL)) { + continue; + } + ImmOperand &src16 = CreateImmOperand(static_cast(chunkVal), k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(sa, true); + if (!useMovk) { + /* use movz or movn */ + if (!useMovz) { + src16.BitwiseNegate(); + } + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(useMovz ? MOP_xmovzri16 : MOP_xmovnri16, dest, src16, *lslOpnd)); + useMovk = true; + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xmovkri16, dest, src16, *lslOpnd)); + } + } + + if (maxLoopTime == 2) { + /* copy lower 32 bits to higher 32 bits */ + ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbfirri6i6, dest, dest, immOpnd, immOpnd)); + } + } +} + +std::string AArch64CGFunc::GenerateMemOpndVerbose(const Operand &src) const { + ASSERT(src.GetKind() == Operand::kOpdMem, "Just checking"); + const MIRSymbol *symSecond = static_cast(&src)->GetSymbol(); + if (symSecond != nullptr) { + std::string str; + MIRStorageClass sc = symSecond->GetStorageClass(); + if (sc == kScFormal) { + str = "param: "; + } else if (sc == kScAuto) { + str = "local var: "; + } else { + str = "global: "; + } + return str.append(symSecond->GetName()); + } + return ""; +} + +void AArch64CGFunc::SelectCopyMemOpnd(Operand &dest, PrimType dtype, uint32 dsize, + Operand &src, PrimType stype) { + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + const MIRSymbol *sym = static_cast(&src)->GetSymbol(); + if ((sym != nullptr) && (sym->GetStorageClass() == kScGlobal) && sym->GetAttr(ATTR_memory_order_acquire)) { + memOrd = AArch64isa::kMoAcquire; + } + + if (memOrd != AArch64isa::kMoNone) { + AArch64CGFunc::SelectLoadAcquire(dest, dtype, src, stype, memOrd, true); + return; + } + Insn *insn = nullptr; + uint32 ssize = src.GetSize(); + PrimType regTy = PTY_void; + RegOperand *loadReg = nullptr; + MOperator mop = MOP_undef; + if (IsPrimitiveFloat(stype) || IsPrimitiveVector(stype)) { + CHECK_FATAL(dsize == ssize, "dsize %u expect equals ssize %u", dtype, ssize); + insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), dest, src); + } else { + if (stype == PTY_agg && dtype == PTY_agg) { + mop = MOP_undef; + } else { + mop = PickExtInsn(dtype, stype); + } + if (ssize == (GetPrimTypeSize(dtype) * kBitsPerByte) || mop == MOP_undef) { + insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), dest, src); + } else { + regTy = dsize == k64BitSize ? dtype : PTY_i32; + loadReg = &CreateRegisterOperandOfType(regTy); + insn = &GetInsnBuilder()->BuildInsn(PickLdInsn(ssize, stype), *loadReg, src); + } + } + + if (GetCG()->GenerateVerboseCG()) { + insn->SetComment(GenerateMemOpndVerbose(src)); + } + + GetCurBB()->AppendInsn(*insn); + if (regTy != PTY_void && mop != MOP_undef) { + ASSERT(loadReg != nullptr, "loadReg should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, dest, *loadReg)); + } +} + +bool AArch64CGFunc::IsImmediateValueInRange(MOperator mOp, int64 immVal, bool is64Bits, + bool isIntactIndexed, bool isPostIndexed, bool isPreIndexed) const { + bool isInRange = false; + switch (mOp) { + case MOP_xstr: + case MOP_wstr: + isInRange = + (isIntactIndexed && + ((!is64Bits && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrLdrImm32UpperBound)) || + (is64Bits && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrLdrImm64UpperBound)))) || + ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) && + (immVal <= kStrLdrPerPostUpperBound)); + break; + case MOP_wstrb: + isInRange = + (isIntactIndexed && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrbLdrbImmUpperBound)) || + ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) && + (immVal <= kStrLdrPerPostUpperBound)); + break; + case MOP_wstrh: + isInRange = + (isIntactIndexed && (immVal >= kStrAllLdrAllImmLowerBound) && (immVal <= kStrhLdrhImmUpperBound)) || + ((isPostIndexed || isPreIndexed) && (immVal >= kStrLdrPerPostLowerBound) && + (immVal <= kStrLdrPerPostUpperBound)); + break; + default: + break; + } + return isInRange; +} + +bool AArch64CGFunc::IsStoreMop(MOperator mOp) const { + switch (mOp) { + case MOP_sstr: + case MOP_dstr: + case MOP_qstr: + case MOP_xstr: + case MOP_wstr: + case MOP_wstrb: + case MOP_wstrh: + return true; + default: + return false; + } +} + +void AArch64CGFunc::SplitMovImmOpndInstruction(int64 immVal, RegOperand &destReg, Insn *curInsn) { + bool useMovz = BetterUseMOVZ(immVal); + bool useMovk = false; + /* get lower 32 bits of the immediate */ + uint64 chunkLval = static_cast(immVal) & 0xFFFFFFFFULL; + /* get upper 32 bits of the immediate */ + uint64 chunkHval = (static_cast(immVal) >> k32BitSize) & 0xFFFFFFFFULL; + int32 maxLoopTime = 4; + + if (chunkLval == chunkHval) { + /* compute lower 32 bits, and then copy to higher 32 bits, so only 2 chunks need be processed */ + maxLoopTime = 2; + } + + uint64 sa = 0; + auto *bb = (curInsn != nullptr) ? curInsn->GetBB() : GetCurBB(); + for (int64 i = 0 ; i < maxLoopTime; ++i, sa += k16BitSize) { + /* create an imm opereand which represents the i-th 16-bit chunk of the immediate */ + uint64 chunkVal = (static_cast(immVal) >> sa) & 0x0000FFFFULL; + if (useMovz ? (chunkVal == 0) : (chunkVal == 0x0000FFFFULL)) { + continue; + } + ImmOperand &src16 = CreateImmOperand(static_cast(chunkVal), k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(sa, true); + Insn *newInsn = nullptr; + if (!useMovk) { + /* use movz or movn */ + if (!useMovz) { + src16.BitwiseNegate(); + } + MOperator mOpCode = useMovz ? MOP_xmovzri16 : MOP_xmovnri16; + newInsn = &GetInsnBuilder()->BuildInsn(mOpCode, destReg, src16, *lslOpnd); + useMovk = true; + } else { + newInsn = &GetInsnBuilder()->BuildInsn(MOP_xmovkri16, destReg, src16, *lslOpnd); + } + if (curInsn != nullptr) { + bb->InsertInsnBefore(*curInsn, *newInsn); + } else { + bb->AppendInsn(*newInsn); + } + } + + if (maxLoopTime == 2) { + /* copy lower 32 bits to higher 32 bits */ + ImmOperand &immOpnd = CreateImmOperand(k32BitSize, k8BitSize, false); + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xbfirri6i6, destReg, destReg, immOpnd, immOpnd); + if (curInsn != nullptr) { + bb->InsertInsnBefore(*curInsn, insn); + } else { + bb->AppendInsn(insn); + } + } +} + +void AArch64CGFunc::SelectCopyRegOpnd(Operand &dest, PrimType dtype, Operand::OperandType opndType, + uint32 dsize, Operand &src, PrimType stype) { + if (opndType != Operand::kOpdMem) { + if (!CGOptions::IsArm64ilp32()) { + ASSERT(stype != PTY_a32, ""); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickMovBetweenRegs(dtype, stype), dest, src)); + return; + } + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + const MIRSymbol *sym = static_cast(&dest)->GetSymbol(); + if ((sym != nullptr) && (sym->GetStorageClass() == kScGlobal) && sym->GetAttr(ATTR_memory_order_release)) { + memOrd = AArch64isa::kMoRelease; + } + + if (memOrd != AArch64isa::kMoNone) { + AArch64CGFunc::SelectStoreRelease(dest, dtype, src, stype, memOrd, true); + return; + } + + bool is64Bits = (dest.GetSize() == k64BitSize) ? true : false; + MOperator strMop = PickStInsn(dsize, stype); + if (!dest.IsMemoryAccessOperand()) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + + MemOperand *memOpnd = static_cast(&dest); + ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr"); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + if (memOpnd->GetOffsetOperand() == nullptr) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + ImmOperand *immOpnd = static_cast(memOpnd->GetOffsetOperand()); + ASSERT(immOpnd != nullptr, "immOpnd should not be nullptr"); + int64 immVal = immOpnd->GetValue(); + bool isIntactIndexed = memOpnd->IsIntactIndexed(); + bool isPostIndexed = memOpnd->IsPostIndexed(); + bool isPreIndexed = memOpnd->IsPreIndexed(); + ASSERT(!isPostIndexed, "memOpnd should not be post-index type"); + ASSERT(!isPreIndexed, "memOpnd should not be pre-index type"); + bool isInRange = false; + if (!GetMirModule().IsCModule()) { + isInRange = IsImmediateValueInRange(strMop, immVal, is64Bits, isIntactIndexed, isPostIndexed, isPreIndexed); + } else { + isInRange = IsOperandImmValid(strMop, memOpnd, kInsnSecondOpnd); + } + bool isMopStr = IsStoreMop(strMop); + if (isInRange || !isMopStr) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, dest)); + return; + } + ASSERT(memOpnd->GetBaseRegister() != nullptr, "nullptr check"); + if (isIntactIndexed) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dsize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(strMop, src, *memOpnd)); + } else if (isPostIndexed || isPreIndexed) { + RegOperand ® = CreateRegisterOperandOfType(PTY_i64); + MOperator mopMov = MOP_xmovri64; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMov, reg, *immOpnd)); + MOperator mopAdd = MOP_xaddrrr; + MemOperand &newDest = + GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(dtype), memOpnd->GetBaseRegister(), + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), nullptr); + Insn &insn1 = GetInsnBuilder()->BuildInsn(strMop, src, newDest); + Insn &insn2 = GetInsnBuilder()->BuildInsn(mopAdd, *newDest.GetBaseRegister(), *newDest.GetBaseRegister(), reg); + if (isPostIndexed) { + GetCurBB()->AppendInsn(insn1); + GetCurBB()->AppendInsn(insn2); + } else { + /* isPreIndexed */ + GetCurBB()->AppendInsn(insn2); + GetCurBB()->AppendInsn(insn1); + } + } +} + +void AArch64CGFunc::SelectCopy(Operand &dest, PrimType dtype, Operand &src, PrimType stype) { + ASSERT(dest.IsRegister() || dest.IsMemoryAccessOperand(), ""); + uint32 dsize = GetPrimTypeBitSize(dtype); + if (dest.IsRegister()) { + dsize = dest.GetSize(); + } + Operand::OperandType opnd0Type = dest.GetKind(); + Operand::OperandType opnd1Type = src.GetKind(); + ASSERT(((dsize >= src.GetSize()) || (opnd0Type == Operand::kOpdRegister) || (opnd0Type == Operand::kOpdMem)), "NYI"); + ASSERT(((opnd0Type == Operand::kOpdRegister) || (src.GetKind() == Operand::kOpdRegister)), + "either src or dest should be register"); + + switch (opnd1Type) { + case Operand::kOpdMem: + SelectCopyMemOpnd(dest, dtype, dsize, src, stype); + break; + case Operand::kOpdOffset: + case Operand::kOpdImmediate: + SelectCopyImm(dest, dtype, static_cast(src), stype); + break; + case Operand::kOpdFPImmediate: + CHECK_FATAL(static_cast(src).GetValue() == 0, "NIY"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn((dsize == k32BitSize) ? MOP_xvmovsr : MOP_xvmovdr, + dest, GetZeroOpnd(dsize))); + break; + case Operand::kOpdRegister: { + if (opnd0Type == Operand::kOpdRegister && IsPrimitiveVector(stype)) { + /* check vector reg to vector reg move */ + CHECK_FATAL(IsPrimitiveVector(dtype), "invalid vectreg to vectreg move"); + MOperator mop = (dsize <= k64BitSize) ? MOP_vmovuu : MOP_vmovvv; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + (void)vInsn.AddOpndChain(dest).AddOpndChain(src); + auto *vecSpecSrc = GetMemoryPool()->New(dsize >> k3ByteSize, k8BitSize); + auto *vecSpecDest = GetMemoryPool()->New(dsize >> k3ByteSize, k8BitSize); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + break; + } + RegOperand &desReg = static_cast(dest); + RegOperand &srcReg = static_cast(src); + if (desReg.GetRegisterNumber() == srcReg.GetRegisterNumber()) { + break; + } + SelectCopyRegOpnd(dest, dtype, opnd0Type, dsize, src, stype); + break; + } + default: + CHECK_FATAL(false, "NYI"); + } +} + +/* This function copies src to a register, the src can be an imm, mem or a label */ +RegOperand &AArch64CGFunc::SelectCopy(Operand &src, PrimType stype, PrimType dtype) { + RegOperand &dest = CreateRegisterOperandOfType(dtype); + SelectCopy(dest, dtype, src, stype); + return dest; +} + +/* + * We need to adjust the offset of a stack allocated local variable + * if we store FP/SP before any other local variables to save an instruction. + * See AArch64CGFunc::OffsetAdjustmentForFPLR() in aarch64_cgfunc.cpp + * + * That is when we !UsedStpSubPairForCallFrameAllocation(). + * + * Because we need to use the STP/SUB instruction pair to store FP/SP 'after' + * local variables when the call frame size is greater that the max offset + * value allowed for the STP instruction (we cannot use STP w/ prefix, LDP w/ + * postfix), if UsedStpSubPairForCallFrameAllocation(), we don't need to + * adjust the offsets. + */ +bool AArch64CGFunc::IsImmediateOffsetOutOfRange(const MemOperand &memOpnd, uint32 bitLen) { + ASSERT(bitLen >= k8BitSize, "bitlen error"); + ASSERT(bitLen <= k128BitSize, "bitlen error"); + + if (bitLen >= k8BitSize) { + bitLen = static_cast(RoundUp(bitLen, k8BitSize)); + } + ASSERT((bitLen & (bitLen - 1)) == 0, "bitlen error"); + + MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode(); + if ((mode == MemOperand::kAddrModeBOi) && memOpnd.IsIntactIndexed()) { + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + int32 offsetValue = ofstOpnd ? static_cast(ofstOpnd->GetOffsetValue()) : 0; + if (ofstOpnd && ofstOpnd->GetVary() == kUnAdjustVary) { + offsetValue += static_cast(static_cast(GetMemlayout())->RealStackFrameSize() + 0xff); + } + offsetValue += 2 * kIntregBytelen; /* Refer to the above comment */ + return MemOperand::IsPIMMOffsetOutOfRange(offsetValue, bitLen); + } else { + return false; + } +} + +bool AArch64CGFunc::IsOperandImmValid(MOperator mOp, Operand *o, uint32 opndIdx) { + const InsnDesc *md = &AArch64CG::kMd[mOp]; + auto *opndProp = md->opndMD[opndIdx]; + + Operand::OperandType opndTy = opndProp->GetOperandType(); + if (opndTy == Operand::kOpdMem) { + auto *memOpnd = static_cast(o); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOrX) { + return true; + } + OfstOperand *ofStOpnd = memOpnd->GetOffsetImmediate(); + int64 offsetValue = ofStOpnd ? ofStOpnd->GetOffsetValue() : 0LL; + if (md->IsLoadStorePair() || + (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && memOpnd->IsIntactIndexed())) { + if (ofStOpnd && ofStOpnd->GetVary() == kUnAdjustVary) { + offsetValue += static_cast(GetMemlayout())->RealStackFrameSize() + 0xffL; + } + return md->IsValidImmOpnd(offsetValue); + } else if (memOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li) { + return offsetValue == 0LL; + } else { + CHECK_FATAL(!memOpnd->IsIntactIndexed(), "CHECK WHAT?"); + return (offsetValue <= static_cast(k256BitSizeInt) && offsetValue >= kNegative256BitSize); + } + } else if (opndTy == Operand::kOpdImmediate) { + return md->IsValidImmOpnd(static_cast(o)->GetValue()); + } + return true; +} + +MemOperand &AArch64CGFunc::CreateReplacementMemOperand(uint32 bitLen, + RegOperand &baseReg, int64 offset) { + return CreateMemOpnd(baseReg, offset, bitLen); +} + +bool AArch64CGFunc::CheckIfSplitOffsetWithAdd(const MemOperand &memOpnd, uint32 bitLen) const { + if (memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd.IsIntactIndexed()) { + return false; + } + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + int32 opndVal = static_cast(ofstOpnd->GetOffsetValue()); + int32 maxPimm = memOpnd.GetMaxPIMM(bitLen); + int32 q0 = opndVal / maxPimm; + int32 addend = q0 * maxPimm; + int32 r0 = opndVal - addend; + int32 alignment = memOpnd.GetImmediateOffsetAlignment(bitLen); + int32 r1 = static_cast(r0) & ((1u << static_cast(alignment)) - 1); + addend = addend + r1; + return (addend > 0); +} + +RegOperand *AArch64CGFunc::GetBaseRegForSplit(uint32 baseRegNum) { + RegOperand *resOpnd = nullptr; + if (baseRegNum == AArch64reg::kRinvalid) { + resOpnd = &CreateRegisterOperandOfType(PTY_i64); + } else if (AArch64isa::IsPhysicalRegister(baseRegNum)) { + resOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast(baseRegNum), + GetPointerSize() * kBitsPerByte, kRegTyInt); + } else { + resOpnd = &GetOrCreateVirtualRegisterOperand(baseRegNum); + } + return resOpnd; +} + +/* + * When immediate of str/ldr is over 256bits, it should be aligned according to the reg byte size. + * Here we split the offset into (512 * n) and +/-(new Offset) when misaligned, to make sure that + * the new offet is always under 256 bits. + */ +MemOperand &AArch64CGFunc::ConstraintOffsetToSafeRegion(uint32 bitLen, const MemOperand &memOpnd, + const MIRSymbol *symbol) { + auto it = hashMemOpndTable.find(memOpnd); + if (it != hashMemOpndTable.end()) { + hashMemOpndTable.erase(memOpnd); + } + MemOperand::AArch64AddressingMode addrMode = memOpnd.GetAddrMode(); + int32 offsetValue = static_cast(memOpnd.GetOffsetImmediate()->GetOffsetValue()); + RegOperand *baseReg = memOpnd.GetBaseRegister(); + RegOperand *resOpnd = GetBaseRegForSplit(kRinvalid); + MemOperand *newMemOpnd = nullptr; + if (addrMode == MemOperand::kAddrModeBOi) { + int32 val256 = k256BitSizeInt; /* const val is unsigned */ + int32 val512 = k512BitSizeInt; + int32 multiplier = (offsetValue / val512) + static_cast(offsetValue % val512 > val256); + int32 addMount = multiplier * val512; + int32 newOffset = offsetValue - addMount; + ImmOperand &immAddMount = CreateImmOperand(addMount, k64BitSize, true); + if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + immAddMount.SetVary(kUnAdjustVary); + } + SelectAdd(*resOpnd, *baseReg, immAddMount, PTY_i64); + newMemOpnd = &CreateReplacementMemOperand(bitLen, *resOpnd, newOffset); + } else if (addrMode == MemOperand::kAddrModeLo12Li) { + CHECK_FATAL(symbol != nullptr, "must have symbol"); + StImmOperand &stImmOpnd = CreateStImmOperand(*symbol, offsetValue, 0); + SelectAdd(*resOpnd, *baseReg, stImmOpnd, PTY_i64); + newMemOpnd = &CreateReplacementMemOperand(bitLen, *resOpnd, 0); + } + CHECK_FATAL(newMemOpnd != nullptr, "create memOpnd failed"); + newMemOpnd->SetStackMem(memOpnd.IsStackMem()); + return *newMemOpnd; +} + +ImmOperand &AArch64CGFunc::SplitAndGetRemained(const MemOperand &memOpnd, uint32 bitLen, int64 ofstVal, bool forPair) { + auto it = hashMemOpndTable.find(memOpnd); + if (it != hashMemOpndTable.end()) { + (void)hashMemOpndTable.erase(memOpnd); + } + /* + * opndVal == Q0 * 32760(16380) + R0 + * R0 == Q1 * 8(4) + R1 + * ADDEND == Q0 * 32760(16380) + R1 + * NEW_OFFSET = Q1 * 8(4) + * we want to generate two instructions: + * ADD TEMP_REG, X29, ADDEND + * LDR/STR TEMP_REG, [ TEMP_REG, #NEW_OFFSET ] + */ + int32 maxPimm = 0; + if (!forPair) { + maxPimm = MemOperand::GetMaxPIMM(bitLen); + } else { + maxPimm = MemOperand::GetMaxPairPIMM(bitLen); + } + ASSERT(maxPimm != 0, "get max pimm failed"); + + int64 q0 = ofstVal / maxPimm + (ofstVal < 0 ? -1 : 0); + int64 addend = q0 * maxPimm; + uint64 r0 = static_cast(ofstVal - addend); + uint64 alignment = static_cast(static_cast(MemOperand::GetImmediateOffsetAlignment(bitLen))); + auto q1 = r0 >> alignment; + auto r1 = static_cast(r0 & ((1u << alignment) - 1)); + auto remained = static_cast(q1 << alignment); + addend = addend + r1; + if (addend > 0) { + uint64 suffixClear = 0xfff; + if (forPair) { + suffixClear = 0xff; + } + int64 remainedTmp = remained + static_cast(static_cast(addend) & suffixClear); + if (!MemOperand::IsPIMMOffsetOutOfRange(static_cast(remainedTmp), bitLen) && + ((static_cast(remainedTmp) & ((1u << alignment) - 1)) == 0)) { + addend = static_cast(static_cast(addend) & ~suffixClear); + } + } + ImmOperand &immAddend = CreateImmOperand(addend, k64BitSize, true); + if (memOpnd.GetOffsetImmediate()->GetVary() == kUnAdjustVary) { + immAddend.SetVary(kUnAdjustVary); + } + return immAddend; +} + +MemOperand &AArch64CGFunc::SplitOffsetWithAddInstruction(const MemOperand &memOpnd, uint32 bitLen, + uint32 baseRegNum, bool isDest, + Insn *insn, bool forPair) { + ASSERT((memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi), "expect kAddrModeBOi memOpnd"); + ASSERT(memOpnd.IsIntactIndexed(), "expect intactIndexed memOpnd"); + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + int64 ofstVal = ofstOpnd->GetOffsetValue(); + RegOperand *resOpnd = GetBaseRegForSplit(baseRegNum); + ImmOperand &immAddend = SplitAndGetRemained(memOpnd, bitLen, ofstVal, forPair); + int64 remained = (ofstVal - immAddend.GetValue()); + RegOperand *origBaseReg = memOpnd.GetBaseRegister(); + ASSERT(origBaseReg != nullptr, "nullptr check"); + if (insn == nullptr) { + SelectAdd(*resOpnd, *origBaseReg, immAddend, PTY_i64); + } else { + SelectAddAfterInsn(*resOpnd, *origBaseReg, immAddend, PTY_i64, isDest, *insn); + } + MemOperand &newMemOpnd = CreateReplacementMemOperand(bitLen, *resOpnd, remained); + newMemOpnd.SetStackMem(memOpnd.IsStackMem()); + return newMemOpnd; +} + +void AArch64CGFunc::SelectDassign(DassignNode &stmt, Operand &opnd0) { + SelectDassign(stmt.GetStIdx(), stmt.GetFieldID(), stmt.GetRHS()->GetPrimType(), opnd0); +} + +/* + * Used for SelectDassign when do optimization for volatile store, because the stlr instruction only allow + * store to the memory addrress with the register base offset 0. + * STLR , [{,#0}], 32-bit variant (size = 10) + * STLR , [{,#0}], 64-bit variant (size = 11) + * So the function do the prehandle of the memory operand to satisify the Store-Release.. + */ +RegOperand *AArch64CGFunc::ExtractNewMemBase(const MemOperand &memOpnd) { + const MIRSymbol *sym = memOpnd.GetSymbol(); + MemOperand::AArch64AddressingMode mode = memOpnd.GetAddrMode(); + if (mode == MemOperand::kAddrModeLiteral) { + return nullptr; + } + RegOperand *baseOpnd = memOpnd.GetBaseRegister(); + ASSERT(baseOpnd != nullptr, "nullptr check"); + RegOperand &resultOpnd = CreateRegisterOperandOfType(baseOpnd->GetRegisterType(), baseOpnd->GetSize() / kBitsPerByte); + bool is64Bits = (baseOpnd->GetSize() == k64BitSize); + if (mode == MemOperand::kAddrModeLo12Li) { + StImmOperand &stImm = CreateStImmOperand(*sym, 0, 0); + Insn &addInsn = GetInsnBuilder()->BuildInsn(MOP_xadrpl12, resultOpnd, *baseOpnd, stImm); + addInsn.SetComment("new add insn"); + GetCurBB()->AppendInsn(addInsn); + } else if (mode == MemOperand::kAddrModeBOi) { + OfstOperand *offsetOpnd = memOpnd.GetOffsetImmediate(); + if (offsetOpnd->GetOffsetValue() != 0) { + MOperator mOp = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resultOpnd, *baseOpnd, *offsetOpnd)); + } else { + return baseOpnd; + } + } else { + CHECK_FATAL(mode == MemOperand::kAddrModeBOrX, "unexpect addressing mode."); + RegOperand *regOpnd = static_cast(&memOpnd)->GetIndexRegister(); + MOperator mOp = is64Bits ? MOP_xaddrrr : MOP_waddrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resultOpnd, *baseOpnd, *regOpnd)); + } + return &resultOpnd; +} + +/* + * NOTE: I divided SelectDassign so that we can create "virtual" assignments + * when selecting other complex Maple IR instructions. For example, the atomic + * exchange and other intrinsics will need to assign its results to local + * variables. Such Maple IR instructions are pltform-specific (e.g. + * atomic_exchange can be implemented as one single machine intruction on x86_64 + * and ARMv8.1, but ARMv8.0 needs an LL/SC loop), therefore they cannot (in + * principle) be lowered at BELowerer or CGLowerer. + */ +void AArch64CGFunc::SelectDassign(StIdx stIdx, FieldID fieldId, PrimType rhsPType, Operand &opnd0) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(stIdx); + int32 offset = 0; + bool parmCopy = false; + if (fieldId != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + ASSERT(structType != nullptr, "SelectDassign: non-zero fieldID for non-structure"); + offset = GetBecommon().GetFieldOffset(*structType, fieldId).first; + parmCopy = IsParamStructCopy(*symbol); + } + uint32 regSize = GetPrimTypeBitSize(rhsPType); + MIRType *type = symbol->GetType(); + Operand &stOpnd = LoadIntoRegister(opnd0, IsPrimitiveInteger(rhsPType) || + IsPrimitiveVectorInteger(rhsPType), regSize, + IsSignedInteger(type->GetPrimType())); + MOperator mOp = MOP_undef; + if ((type->GetKind() == kTypeStruct) || (type->GetKind() == kTypeUnion)) { + MIRStructType *structType = static_cast(type); + type = structType->GetFieldType(fieldId); + } else if (type->GetKind() == kTypeClass) { + MIRClassType *classType = static_cast(type); + type = classType->GetFieldType(fieldId); + } + + uint32 dataSize = GetPrimTypeBitSize(type->GetPrimType()); + if (type->GetPrimType() == PTY_agg) { + dataSize = GetPrimTypeBitSize(PTY_a64); + } + MemOperand *memOpnd = nullptr; + if (parmCopy) { + memOpnd = &LoadStructCopyBase(*symbol, offset, static_cast(dataSize)); + } else { + memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize); + } + if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize); + } + + /* In bpl mode, a func symbol's type is represented as a MIRFuncType instead of a MIRPtrType (pointing to + * MIRFuncType), so we allow `kTypeFunction` to appear here */ + ASSERT(((type->GetKind() == kTypeScalar) || (type->GetKind() == kTypePointer) || (type->GetKind() == kTypeFunction) || + (type->GetKind() == kTypeStruct) || (type->GetKind() == kTypeUnion)|| (type->GetKind() == kTypeArray)), + "NYI dassign type"); + PrimType ptyp = type->GetPrimType(); + if (ptyp == PTY_agg) { + ptyp = PTY_a64; + } + + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + if (isVolStore) { + RegOperand *baseOpnd = ExtractNewMemBase(*memOpnd); + if (baseOpnd != nullptr) { + memOpnd = &CreateMemOpnd(*baseOpnd, 0, dataSize); + memOrd = AArch64isa::kMoRelease; + isVolStore = false; + } + } + + memOpnd = memOpnd->IsOffsetMisaligned(dataSize) ? + &ConstraintOffsetToSafeRegion(dataSize, *memOpnd, symbol) : memOpnd; + if (symbol->GetAsmAttr() != UStrIdx(0) && + symbol->GetStorageClass() != kScPstatic && symbol->GetStorageClass() != kScFstatic) { + std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetAsmAttr()); + RegOperand &specifiedOpnd = GetOrCreatePhysicalRegisterOperand(regDesp); + SelectCopy(specifiedOpnd, type->GetPrimType(), opnd0, rhsPType); + } else if (memOrd == AArch64isa::kMoNone) { + mOp = PickStInsn(GetPrimTypeBitSize(ptyp), ptyp); + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, stOpnd, *memOpnd); + if (GetCG()->GenerateVerboseCG()) { + insn.SetComment(GenerateMemOpndVerbose(*memOpnd)); + } + GetCurBB()->AppendInsn(insn); + } else { + AArch64CGFunc::SelectStoreRelease(*memOpnd, ptyp, stOpnd, ptyp, memOrd, true); + } +} + +void AArch64CGFunc::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(stmt.stIdx); + int64 offset = stmt.offset; + uint32 size = GetPrimTypeSize(stmt.GetPrimType()) * k8ByteSize; + MOperator mOp = (size == k16BitSize) ? MOP_wstrh : + ((size == k32BitSize) ? MOP_wstr : + ((size == k64BitSize) ? MOP_xstr : MOP_undef)); + CHECK_FATAL(mOp != MOP_undef, "illegal size for dassignoff"); + MemOperand *memOpnd = &GetOrCreateMemOpnd(*symbol, offset, size); + if ((memOpnd->GetMemVaryType() == kNotVary) && + (IsImmediateOffsetOutOfRange(*memOpnd, size) || (offset % 8 != 0))) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, size); + } + Operand &stOpnd = LoadIntoRegister(opnd0, true, size, false); + memOpnd = memOpnd->IsOffsetMisaligned(size) ? + &ConstraintOffsetToSafeRegion(size, *memOpnd, symbol) : memOpnd; + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, stOpnd, *memOpnd); + GetCurBB()->AppendInsn(insn); +} + +void AArch64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { + Operand *opnd0 = HandleExpr(stmt, *stmt.Opnd(0)); + RegOperand &baseReg = LoadIntoRegister(*opnd0, PTY_a64); + auto &zwr = GetZeroOpnd(k32BitSize); + auto &mem = CreateMemOpnd(baseReg, 0, k32BitSize); + Insn &loadRef = GetInsnBuilder()->BuildInsn(MOP_wldr, zwr, mem); + loadRef.SetDoNotRemove(true); + if (GetCG()->GenerateVerboseCG()) { + loadRef.SetComment("null pointer check"); + } + GetCurBB()->AppendInsn(loadRef); +} + +void AArch64CGFunc::SelectAbort() { + RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + auto &mem = CreateMemOpnd(inOpnd, 0, k64BitSize); + Insn &movXzr = GetInsnBuilder()->BuildInsn(MOP_xmovri64, inOpnd, CreateImmOperand(0, k64BitSize, false)); + Insn &loadRef = GetInsnBuilder()->BuildInsn(MOP_wldr, GetZeroOpnd(k64BitSize), mem); + loadRef.SetDoNotRemove(true); + movXzr.SetDoNotRemove(true); + GetCurBB()->AppendInsn(movXzr); + GetCurBB()->AppendInsn(loadRef); + SetCurBBKind(BB::kBBGoto); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(GetReturnLabel()->GetLabelIdx()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); +} + +static std::string GetRegPrefixFromPrimType(PrimType pType, uint32 size, const std::string &constraint) { + std::string regPrefix = ""; + /* memory access check */ + if (constraint.find("m") != std::string::npos || constraint.find("Q") != std::string::npos) { + regPrefix += "["; + } + if (IsPrimitiveVector(pType)) { + regPrefix += "v"; + } else if (IsPrimitiveInteger(pType)) { + if (size == k32BitSize) { + regPrefix += "w"; + } else { + regPrefix += "x"; + } + } else { + if (size == k32BitSize) { + regPrefix += "s"; + } else { + regPrefix += "d"; + } + } + return regPrefix; +} + +void AArch64CGFunc::SelectAsm(AsmNode &node) { + SetHasAsm(); + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + if (GetCG()->GetCGOptions().DoLinearScanRegisterAllocation()) { + LogInfo::MapleLogger() << "Using coloring RA\n"; + const_cast(GetCG()->GetCGOptions()).SetOption(CGOptions::kDoColorRegAlloc); + const_cast(GetCG()->GetCGOptions()).ClearOption(CGOptions::kDoLinearScanRegAlloc); + } + } + Operand *asmString = &CreateStringOperand(node.asmString); + ListOperand *listInputOpnd = CreateListOpnd(*GetFuncScopeAllocator()); + ListOperand *listOutputOpnd = CreateListOpnd(*GetFuncScopeAllocator()); + ListOperand *listClobber = CreateListOpnd(*GetFuncScopeAllocator()); + ListConstraintOperand *listInConstraint = memPool->New(*GetFuncScopeAllocator()); + ListConstraintOperand *listOutConstraint = memPool->New(*GetFuncScopeAllocator()); + ListConstraintOperand *listInRegPrefix = memPool->New(*GetFuncScopeAllocator()); + ListConstraintOperand *listOutRegPrefix = memPool->New(*GetFuncScopeAllocator()); + std::list> rPlusOpnd; + bool noReplacement = false; + if (node.asmString.find('$') == std::string::npos) { + /* no replacements */ + noReplacement = true; + } + /* input constraints should be processed before OP_asm instruction */ + for (size_t i = 0; i < node.numOpnds; ++i) { + /* process input constraint */ + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(node.inputConstraints[i]); + bool isOutputTempNode = false; + if (str[0] == '+') { + isOutputTempNode = true; + } + listInConstraint->stringList.push_back(static_cast(&CreateStringOperand(str))); + /* process input operands */ + switch (node.Opnd(i)->op) { + case OP_dread: { + DreadNode &dread = static_cast(*node.Opnd(i)); + Operand *inOpnd = SelectDread(node, dread); + PrimType pType = dread.GetPrimType(); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_addrof: { + auto &addrofNode = static_cast(*node.Opnd(i)); + Operand *inOpnd = SelectAddrof(addrofNode, node, false); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + PrimType pType = addrofNode.GetPrimType(); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_addrofoff: { + auto &addrofoffNode = static_cast(*node.Opnd(i)); + Operand *inOpnd = SelectAddrofoff(addrofoffNode, node); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + PrimType pType = addrofoffNode.GetPrimType(); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_ireadoff: { + IreadoffNode *ireadoff = static_cast(node.Opnd(i)); + Operand *inOpnd = SelectIreadoff(node, *ireadoff); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + PrimType pType = ireadoff->GetPrimType(); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_ireadfpoff: { + IreadFPoffNode *ireadfpoff = static_cast(node.Opnd(i)); + Operand *inOpnd = SelectIreadfpoff(node, *ireadfpoff); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + PrimType pType = ireadfpoff->GetPrimType(); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_iread: { + IreadNode *iread = static_cast(node.Opnd(i)); + Operand *inOpnd = SelectIread(node, *iread); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + PrimType pType = iread->GetPrimType(); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_add: { + BinaryNode *addNode = static_cast(node.Opnd(i)); + Operand *inOpnd = SelectAdd(*addNode, *HandleExpr(*addNode, *addNode->Opnd(0)), + *HandleExpr(*addNode, *addNode->Opnd(1)), node); + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + PrimType pType = addNode->GetPrimType(); + listInRegPrefix->stringList.push_back(static_cast(&CreateStringOperand( + GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + rPlusOpnd.emplace_back(std::make_pair(inOpnd, pType)); + } + break; + } + case OP_constval: { + CHECK_FATAL(!isOutputTempNode, "Unexpect"); + auto &constNode = static_cast(*node.Opnd(i)); + CHECK_FATAL(constNode.GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst does not support float yet"); + MIRIntConst *mirIntConst = safe_cast(constNode.GetConstVal()); + CHECK_FATAL(mirIntConst != nullptr, "just checking"); + int64 scale = mirIntConst->GetExtValue(); + if (str.find("r") != std::string::npos) { + bool isSigned = scale < 0; + ImmOperand &immOpnd = CreateImmOperand(scale, k64BitSize, isSigned); + /* set default type as a 64 bit reg */ + PrimType pty = isSigned ? PTY_i64 : PTY_u64; + auto &tempReg = static_cast(CreateRegisterOperandOfType(pty)); + SelectCopy(tempReg, pty, immOpnd, isSigned ? PTY_i64 : PTY_u64); + listInputOpnd->PushOpnd(static_cast(tempReg)); + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand(GetRegPrefixFromPrimType(pty, tempReg.GetSize(), str)))); + } else { + RegOperand &inOpnd = GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt); + listInputOpnd->PushOpnd(static_cast(inOpnd)); + + listInRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand("i" + std::to_string(scale)))); + } + break; + } + case OP_regread: { + auto ®readNode = static_cast(*node.Opnd(i)); + PregIdx pregIdx = regreadNode.GetRegIdx(); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + PrimType pType = preg->GetPrimType(); + RegOperand *inOpnd; + if (IsSpecialPseudoRegister(pregIdx)) { + inOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, pType); + } else { + inOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } + listInputOpnd->PushOpnd(static_cast(*inOpnd)); + listInRegPrefix->stringList.push_back(static_cast(&CreateStringOperand( + GetRegPrefixFromPrimType(pType, inOpnd->GetSize(), str)))); + if (isOutputTempNode) { + (void)rPlusOpnd.emplace_back(std::make_pair(&static_cast(*inOpnd), pType)); + } + break; + } + default: + CHECK_FATAL(false, "Inline asm input expression not handled"); + } + } + std::vector intrnOpnds; + intrnOpnds.emplace_back(asmString); + intrnOpnds.emplace_back(listOutputOpnd); + intrnOpnds.emplace_back(listClobber); + intrnOpnds.emplace_back(listInputOpnd); + intrnOpnds.emplace_back(listOutConstraint); + intrnOpnds.emplace_back(listInConstraint); + intrnOpnds.emplace_back(listOutRegPrefix); + intrnOpnds.emplace_back(listInRegPrefix); + Insn *asmInsn = &GetInsnBuilder()->BuildInsn(MOP_asm, intrnOpnds); + GetCurBB()->AppendInsn(*asmInsn); + + /* process listOutputOpnd */ + for (size_t i = 0; i < node.asmOutputs.size(); ++i) { + bool isOutputTempNode = false; + RegOperand *rPOpnd = nullptr; + /* process output constraint */ + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(node.outputConstraints[i]); + + listOutConstraint->stringList.push_back(static_cast(&CreateStringOperand(str))); + if (str[0] == '+') { + CHECK_FATAL(!rPlusOpnd.empty(), "Need r+ operand"); + rPOpnd = static_cast((rPlusOpnd.begin()->first)); + listOutputOpnd->PushOpnd(*rPOpnd); + listOutRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(rPlusOpnd.begin()->second, rPOpnd->GetSize(), str)))); + if (!rPlusOpnd.empty()) { + rPlusOpnd.pop_front(); + } + isOutputTempNode = true; + } + if (str.find("Q") != std::string::npos || str.find("m") != std::string::npos) { + continue; + } + /* process output operands */ + StIdx stIdx = node.asmOutputs[i].first; + RegFieldPair regFieldPair = node.asmOutputs[i].second; + if (regFieldPair.IsReg()) { + PregIdx pregIdx = static_cast(regFieldPair.GetPregIdx()); + MIRPreg *mirPreg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(pregIdx); + RegOperand *outOpnd = + isOutputTempNode ? rPOpnd : &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + PrimType srcType = mirPreg->GetPrimType(); + PrimType destType = srcType; + if (GetPrimTypeBitSize(destType) < k32BitSize) { + destType = IsSignedInteger(destType) ? PTY_i32 : PTY_u32; + } + RegType rtype = GetRegTyFromPrimTy(srcType); + RegOperand &opnd0 = isOutputTempNode ? + GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)) : + CreateVirtualRegisterOperand(NewVReg(rtype, GetPrimTypeSize(srcType))); + SelectCopy(opnd0, destType, *outOpnd, srcType); + if (!isOutputTempNode) { + listOutputOpnd->PushOpnd(static_cast(*outOpnd)); + listOutRegPrefix->stringList.push_back(static_cast( + &CreateStringOperand(GetRegPrefixFromPrimType(srcType, outOpnd->GetSize(), str)))); + } + } else { + MIRSymbol *var; + if (stIdx.IsGlobal()) { + var = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + } else { + var = mirModule.CurFunction()->GetSymbolTabItem(stIdx.Idx()); + } + CHECK_FATAL(var != nullptr, "var should not be nullptr"); + if (!noReplacement || var->GetAsmAttr() != UStrIdx(0)) { + RegOperand *outOpnd = nullptr; + PrimType pty = GlobalTables::GetTypeTable().GetTypeTable().at(var->GetTyIdx())->GetPrimType(); + if (var->GetAsmAttr() != UStrIdx(0)) { + std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(var->GetAsmAttr()); + outOpnd = &GetOrCreatePhysicalRegisterOperand(regDesp); + } else { + RegType rtype = GetRegTyFromPrimTy(pty); + outOpnd = isOutputTempNode ? rPOpnd : &CreateVirtualRegisterOperand(NewVReg(rtype, GetPrimTypeSize(pty))); + } + SaveReturnValueInLocal(node.asmOutputs, i, PTY_a64, *outOpnd, node); + if (!isOutputTempNode) { + listOutputOpnd->PushOpnd(static_cast(*outOpnd)); + listOutRegPrefix->stringList.push_back( + static_cast(&CreateStringOperand( + GetRegPrefixFromPrimType(pty, outOpnd->GetSize(), str)))); + } + } + } + } + if (noReplacement) { + return; + } + + /* process listClobber */ + for (size_t i = 0; i < node.clobberList.size(); ++i) { + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(node.clobberList[i]); + auto regno = static_cast(static_cast(str[1]) - kZeroAsciiNum); + if (str[2] >= '0' && str[2] <= '9') { + regno = regno * kDecimalMax + static_cast((static_cast(str[2]) - kZeroAsciiNum)); + } + RegOperand *reg; + switch (str[0]) { + case 'w': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + R0), k32BitSize, kRegTyInt); + listClobber->PushOpnd(*reg); + break; + } + case 'x': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + R0), k64BitSize, kRegTyInt); + listClobber->PushOpnd(*reg); + break; + } + case 's': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + V0), k32BitSize, kRegTyFloat); + listClobber->PushOpnd(*reg); + break; + } + case 'd': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + V0), k64BitSize, kRegTyFloat); + listClobber->PushOpnd(*reg); + break; + } + case 'v': { + reg = &GetOrCreatePhysicalRegisterOperand(static_cast(regno + V0), k64BitSize, kRegTyFloat); + listClobber->PushOpnd(*reg); + break; + } + case 'c': { + asmInsn->SetAsmDefCondCode(); + break; + } + case 'm': { + asmInsn->SetAsmModMem(); + break; + } + default: + CHECK_FATAL(false, "Inline asm clobber list not handled"); + } + } +} + +void AArch64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { + if (GetCG()->IsLmbc()) { + PrimType lhsSize = stmt.GetPrimType(); + PrimType rhsSize = stmt.Opnd(0)->GetPrimType(); + if (lhsSize != rhsSize && stmt.Opnd(0)->GetOpCode() == OP_ireadoff) { + Insn *prev = GetCurBB()->GetLastInsn(); + if (prev->GetMachineOpcode() == MOP_wldrsb || prev->GetMachineOpcode() == MOP_wldrsh) { + opnd0.SetSize(GetPrimTypeBitSize(stmt.GetPrimType())); + prev->SetMOP(AArch64CG::kMd[prev->GetMachineOpcode() == MOP_wldrsb ? MOP_xldrsb : MOP_xldrsh]); + } else if (prev->GetMachineOpcode() == MOP_wldr && stmt.GetPrimType() == PTY_i64) { + opnd0.SetSize(GetPrimTypeBitSize(stmt.GetPrimType())); + prev->SetMOP(AArch64CG::kMd[MOP_xldrsw]); + } + } + } + RegOperand *regOpnd = nullptr; + PregIdx pregIdx = stmt.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + if (GetCG()->IsLmbc() && stmt.GetPrimType() == PTY_agg) { + if (static_cast(opnd0).IsOfIntClass()) { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_i64); + } else if (opnd0.GetSize() <= k4ByteSize) { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_f32); + } else { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, PTY_f64); + } + } else { + regOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, stmt.GetPrimType()); + } + } else { + regOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } + /* look at rhs */ + PrimType rhsType = stmt.Opnd(0)->GetPrimType(); + if (GetCG()->IsLmbc() && rhsType == PTY_agg) { + /* This occurs when a call returns a small struct */ + /* The subtree should already taken care of the agg type that is in excess of 8 bytes */ + rhsType = PTY_i64; + } + PrimType dtype = rhsType; + if (GetPrimTypeBitSize(dtype) < k32BitSize) { + ASSERT(IsPrimitiveInteger(dtype), ""); + dtype = IsSignedInteger(dtype) ? PTY_i32 : PTY_u32; + } + ASSERT(regOpnd != nullptr, "null ptr check!"); + SelectCopy(*regOpnd, dtype, opnd0, rhsType); + if (GetCG()->GenerateVerboseCG()) { + if (GetCurBB()->GetLastInsn()) { + GetCurBB()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; "); + } else if (GetCurBB()->GetPrev()->GetLastInsn()) { + GetCurBB()->GetPrev()->GetLastInsn()->AppendComment(" regassign %" + std::to_string(pregIdx) + "; "); + } + } + + if ((Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) && (pregIdx >= 0)) { + MemOperand *dest = GetPseudoRegisterSpillMemoryOperand(pregIdx); + PrimType stype = GetTypeFromPseudoRegIdx(pregIdx); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + uint32 srcBitLength = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(srcBitLength, stype), *regOpnd, *dest)); + } else if (regOpnd->GetRegisterNumber() == R0 || regOpnd->GetRegisterNumber() == R1) { + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *regOpnd); + GetCurBB()->AppendInsn(pseudo); + } else if (regOpnd->GetRegisterNumber() >= V0 && regOpnd->GetRegisterNumber() <= V3) { + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, *regOpnd); + GetCurBB()->AppendInsn(pseudo); + } +} + +MemOperand *AArch64CGFunc::FixLargeMemOpnd(MemOperand &memOpnd, uint32 align) { + MemOperand *lhsMemOpnd = &memOpnd; + if ((lhsMemOpnd->GetMemVaryType() == kNotVary) && + IsImmediateOffsetOutOfRange(*lhsMemOpnd, align * kBitsPerByte)) { + RegOperand *addReg = &CreateRegisterOperandOfType(PTY_i64); + lhsMemOpnd = &SplitOffsetWithAddInstruction(*lhsMemOpnd, align * k8BitSize, addReg->GetRegisterNumber()); + } + return lhsMemOpnd; +} + +MemOperand *AArch64CGFunc::FixLargeMemOpnd(MOperator mOp, MemOperand &memOpnd, uint32 dSize, uint32 opndIdx) { + auto *a64MemOpnd = &memOpnd; + if ((a64MemOpnd->GetMemVaryType() == kNotVary) && !IsOperandImmValid(mOp, &memOpnd, opndIdx)) { + if (opndIdx == kInsnSecondOpnd) { + a64MemOpnd = &SplitOffsetWithAddInstruction(*a64MemOpnd, dSize); + } else if (opndIdx == kInsnThirdOpnd) { + a64MemOpnd = &SplitOffsetWithAddInstruction( + *a64MemOpnd, dSize, AArch64reg::kRinvalid, false, nullptr, true); + } else { + CHECK_FATAL(false, "NYI"); + } + } + return a64MemOpnd; +} + +MemOperand *AArch64CGFunc::GenLargeAggFormalMemOpnd(const MIRSymbol &sym, uint32 align, int64 offset, + bool needLow12) { + MemOperand *memOpnd; + if (sym.GetStorageClass() == kScFormal && GetBecommon().GetTypeSize(sym.GetTyIdx()) > k16ByteSize) { + /* formal of size of greater than 16 is copied by the caller and the pointer to it is passed. */ + /* otherwise it is passed in register and is accessed directly. */ + memOpnd = &GetOrCreateMemOpnd(sym, 0, align * kBitsPerByte); + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + Insn &ldInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *memOpnd); + GetCurBB()->AppendInsn(ldInsn); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, vreg, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), nullptr); + } else { + memOpnd = &GetOrCreateMemOpnd(sym, offset, align * kBitsPerByte, false, needLow12); + } + return FixLargeMemOpnd(*memOpnd, align); +} + +RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(bool isLo12, const MIRSymbol &symbol, int64 offsetVal, + RegOperand &baseReg) { + RegOperand *tgtAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + if (isLo12) { + StImmOperand &stImm = CreateStImmOperand(symbol, 0, 0); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, *tgtAddr, baseReg, stImm)); + } else { + ImmOperand &imm = CreateImmOperand(offsetVal, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *tgtAddr, baseReg, imm)); + } + return tgtAddr; +} + +RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(int64 offset, Operand &exprOpnd) { + RegOperand *tgtAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + OfstOperand *ofstOpnd = &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *tgtAddr, exprOpnd, *ofstOpnd)); + return tgtAddr; +} + +RegOperand *AArch64CGFunc::PrepareMemcpyParamOpnd(uint64 copySize) { + RegOperand *vregMemcpySize = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + ImmOperand *sizeOpnd = &CreateImmOperand(static_cast(copySize), k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, *vregMemcpySize, *sizeOpnd)); + return vregMemcpySize; +} + +Insn *AArch64CGFunc::AggtStrLdrInsert(bool bothUnion, Insn *lastStrLdr, Insn &newStrLdr) { + if (bothUnion) { + if (lastStrLdr == nullptr) { + GetCurBB()->AppendInsn(newStrLdr); + } else { + GetCurBB()->InsertInsnAfter(*lastStrLdr, newStrLdr); + } + } else { + GetCurBB()->AppendInsn(newStrLdr); + } + return &newStrLdr; +} + +void AArch64CGFunc::SelectAggDassign(DassignNode &stmt) { + MIRSymbol *lhsSymbol = GetFunction().GetLocalOrGlobalSymbol(stmt.GetStIdx()); + uint32 lhsOffset = 0; + MIRType *lhsType = lhsSymbol->GetType(); + bool bothUnion = false; + if (stmt.GetFieldID() != 0) { + MIRStructType *structType = static_cast(lhsSymbol->GetType()); + ASSERT(structType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); + lhsType = structType->GetFieldType(stmt.GetFieldID()); + lhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first); + bothUnion = bothUnion || (structType->GetKind() == kTypeUnion); + } + uint32 lhsAlign = GetBecommon().GetTypeAlign(lhsType->GetTypeIndex()); + uint64 lhsSize = GetBecommon().GetTypeSize(lhsType->GetTypeIndex()); + + uint32 rhsAlign; + uint32 alignUsed; + uint32 rhsOffset = 0; + if (stmt.GetRHS()->GetOpCode() == OP_dread) { + AddrofNode *rhsDread = static_cast(stmt.GetRHS()); + MIRSymbol *rhsSymbol = GetFunction().GetLocalOrGlobalSymbol(rhsDread->GetStIdx()); + MIRType *rhsType = rhsSymbol->GetType(); + if (rhsDread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(rhsSymbol->GetType()); + ASSERT(structType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); + rhsType = structType->GetFieldType(rhsDread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first); + bothUnion &= (structType->GetKind() == kTypeUnion); + } + bothUnion &= (rhsSymbol == lhsSymbol); + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(lhsOffset, rhsOffset, alignUsed); + MemOperand *rhsBaseMemOpnd; + if (IsParamStructCopy(*rhsSymbol)) { + rhsBaseMemOpnd = &LoadStructCopyBase(*rhsSymbol, + rhsOffset, static_cast(copySize * k8BitSize)); + } else { + rhsBaseMemOpnd = &GetOrCreateMemOpnd(*rhsSymbol, + rhsOffset, copySize * k8BitSize, false, true); + rhsBaseMemOpnd = FixLargeMemOpnd(*rhsBaseMemOpnd, copySize); + } + RegOperand *rhsBaseReg = rhsBaseMemOpnd->GetBaseRegister(); + int64 rhsOffsetVal = rhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + MemOperand *lhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*lhsSymbol, copySize, lhsOffset, true); + RegOperand *lhsBaseReg = lhsBaseMemOpnd->GetBaseRegister(); + int64 lhsOffsetVal = lhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + bool rhsIsLo12 = (rhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + bool lhsIsLo12 = (lhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsIsLo12, *lhsSymbol, lhsOffsetVal, *lhsBaseReg)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(rhsIsLo12, *rhsSymbol, rhsOffsetVal, *rhsBaseReg)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + Insn *lastLdr = nullptr; + Insn *lastStr = nullptr; + for (uint32 i = 0; i < (lhsSize / copySize); i++) { + uint64 rhsBaseOffset = i * copySize + static_cast(rhsOffsetVal); + uint64 lhsBaseOffset = i * copySize + static_cast(lhsOffsetVal); + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + /* generate the load */ + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + /* generate the load */ + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + bool doPair = (!rhsIsLo12 && !lhsIsLo12 && (copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize))); + RegOperand *result1 = nullptr; + Insn *newLoadInsn = nullptr; + if (doPair) { + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + result1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + rhsMemOpnd = FixLargeMemOpnd(mOpLDP, *rhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + newLoadInsn = &GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd); + } else { + MOperator mOp = PickLdInsn(copySize * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *rhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + newLoadInsn = &GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + } + ASSERT(newLoadInsn != nullptr, "build load instruction failed in SelectAggDassign"); + lastLdr = AggtStrLdrInsert(bothUnion, lastLdr, *newLoadInsn); + /* generate the store */ + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + addrMode = lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + sym = lhsIsLo12 ? lhsSymbol : nullptr; + Insn *newStoreInsn = nullptr; + MemOperand *lhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *lhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + ASSERT(result1 != nullptr, "result1 should not be nullptr"); + newStoreInsn = &GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd); + i++; + } else { + MOperator mOp = PickStInsn(copySize * k8BitSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *lhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + newStoreInsn = &GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd); + } + ASSERT(newStoreInsn != nullptr, "build store instruction failed in SelectAggDassign"); + lastStr = AggtStrLdrInsert(bothUnion, lastStr, *newStoreInsn); + } + /* take care of extra content at the end less than the unit */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + /* generate the load */ + MemOperand *rhsMemOpnd; + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(rhsOffsetVal), k32BitSize); + rhsMemOpnd = &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, newAlignUsed); + regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, newAlignUsed)); + RegOperand &result = CreateVirtualRegisterOperand(vRegNO); + MOperator mOp = PickLdInsn(newAlignUsed * k8BitSize, PTY_u32); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd)); + /* generate the store */ + addrMode = lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + sym = lhsIsLo12 ? lhsSymbol : nullptr; + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(lhsOffsetVal), k32BitSize); + MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, + lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + lhsMemOpnd = FixLargeMemOpnd(*lhsMemOpnd, newAlignUsed); + mOp = PickStInsn(newAlignUsed * k8BitSize, PTY_u32); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } else if (stmt.GetRHS()->GetOpCode() == OP_iread) { + IreadNode *rhsIread = static_cast(stmt.GetRHS()); + RegOperand *addrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); + addrOpnd = &LoadIntoRegister(*addrOpnd, rhsIread->Opnd(0)->GetPrimType()); + MIRPtrType *rhsPointerType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsIread->GetTyIdx())); + MIRType *rhsType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsPointerType->GetPointedTyIdx())); + bool isRefField = false; + if (rhsIread->GetFieldID() != 0) { + MIRStructType *rhsStructType = static_cast(rhsType); + ASSERT(rhsStructType != nullptr, "SelectAggDassign: non-zero fieldID for non-structure"); + rhsType = rhsStructType->GetFieldType(rhsIread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first); + isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); + } + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); + MemOperand *lhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*lhsSymbol, copySize, lhsOffset, true); + RegOperand *lhsBaseReg = lhsBaseMemOpnd->GetBaseRegister(); + int64 lhsOffsetVal = lhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + bool lhsIsLo12 = (lhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsIsLo12, *lhsSymbol, lhsOffsetVal, *lhsBaseReg)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(rhsOffset, *addrOpnd)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + for (uint32 i = 0; i < (lhsSize / copySize); i++) { + uint64 rhsBaseOffset = rhsOffset + i * copySize; + uint64 lhsBaseOffset = static_cast(lhsOffsetVal) + i * copySize; + /* generate the load */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, copySize * k8BitSize, + addrOpnd, nullptr, &ofstOpnd, nullptr); + regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, copySize)); + RegOperand &result = CreateVirtualRegisterOperand(vRegNO); + bool doPair = (!lhsIsLo12 && copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize)); + Insn *insn = nullptr; + RegOperand *result1 = nullptr; + if (doPair) { + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + regno_t vRegNO1 = NewVReg(kRegTyInt, std::max(4u, copySize)); + result1 = &CreateVirtualRegisterOperand(vRegNO1); + rhsMemOpnd = FixLargeMemOpnd(mOpLDP, *rhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + insn = &GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd); + } else { + MOperator mOp = PickLdInsn(copySize * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *rhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + insn = &GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + } + insn->MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(*insn); + /* generate the store */ + MemOperand::AArch64AddressingMode addrMode = + lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = lhsIsLo12 ? lhsSymbol : nullptr; + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + MemOperand *lhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *lhsMemOpnd, copySize * k8BitSize, kInsnThirdOpnd); + ASSERT(result1 != nullptr, "result1 should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd)); + i++; + } else { + MOperator mOp = PickStInsn(copySize * k8BitSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *lhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + } + } + /* take care of extra content at the end less than the unit of alignUsed */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + /* generate the load */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(rhsOffset + lhsSizeCovered, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, newAlignUsed * k8BitSize, + addrOpnd, nullptr, &ofstOpnd, nullptr); + rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, newAlignUsed); + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, newAlignUsed))); + MOperator mOp = PickLdInsn(newAlignUsed * k8BitSize, PTY_u32); + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + insn.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(insn); + /* generate the store */ + MemOperand::AArch64AddressingMode addrMode = + lhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = lhsIsLo12 ? lhsSymbol : nullptr; + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(lhsOffsetVal), k32BitSize); + MemOperand *lhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, lhsBaseReg, nullptr, &lhsOfstOpnd, sym); + lhsMemOpnd = FixLargeMemOpnd(*lhsMemOpnd, newAlignUsed); + mOp = PickStInsn(newAlignUsed * k8BitSize, PTY_u32); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } else { + ASSERT(stmt.GetRHS()->op == OP_regread, "SelectAggDassign: NYI"); + bool isRet = false; + if (lhsType->GetKind() == kTypeStruct || lhsType->GetKind() == kTypeUnion) { + RegreadNode *rhsregread = static_cast(stmt.GetRHS()); + PregIdx pregIdx = rhsregread->GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + if ((-pregIdx) == kSregRetval0) { + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + PrimType retPtype; + RegType regType; + uint32 memSize; + uint32 regSize; + parmlocator.LocateRetVal(*lhsType, pLoc); + AArch64reg r[kFourRegister]; + r[0] = static_cast(pLoc.reg0); + r[1] = static_cast(pLoc.reg1); + r[2] = static_cast(pLoc.reg2); + r[3] = static_cast(pLoc.reg3); + if (pLoc.numFpPureRegs > 0) { + regSize = (pLoc.fpSize == k4ByteSize) ? k32BitSize : k64BitSize; + memSize = pLoc.fpSize; + retPtype = (pLoc.fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + regType = kRegTyFloat; + } else { + regSize = k64BitSize; + memSize = k8BitSize; + retPtype = PTY_u64; + regType = kRegTyInt; + } + for (uint32 i = 0; i < kFourRegister; ++i) { + if (r[i] == kRinvalid) { + break; + } + RegOperand &parm = GetOrCreatePhysicalRegisterOperand(r[i], regSize, regType); + Operand &mOpnd = GetOrCreateMemOpnd(*lhsSymbol, memSize * i, regSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(regSize, retPtype), parm, mOpnd)); + } + isRet = true; + } + } + } + CHECK_FATAL(isRet, "SelectAggDassign: NYI"); + } +} + +static MIRType *GetPointedToType(const MIRPtrType &pointerType) { + MIRType *aType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType.GetPointedTyIdx()); + if (aType->GetKind() == kTypeArray) { + MIRArrayType *arrayType = static_cast(aType); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + } + if (aType->GetKind() == kTypeFArray || aType->GetKind() == kTypeJArray) { + MIRFarrayType *farrayType = static_cast(aType); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayType->GetElemTyIdx()); + } + return aType; +} + +void AArch64CGFunc::SelectIassign(IassignNode &stmt) { + int32 offset = 0; + MIRPtrType *pointerType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx())); + ASSERT(pointerType != nullptr, "expect a pointer type at iassign node"); + MIRType *pointedType = nullptr; + bool isRefField = false; + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + + if (stmt.GetFieldID() != 0) { + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + MIRStructType *structType = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structType = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structType = static_cast(pointedTy)->GetParentType(); + } + ASSERT(structType != nullptr, "SelectIassign: non-zero fieldID for non-structure"); + pointedType = structType->GetFieldType(stmt.GetFieldID()); + offset = GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first; + isRefField = GetBecommon().IsRefField(*structType, stmt.GetFieldID()); + } else { + pointedType = GetPointedToType(*pointerType); + if (GetFunction().IsJava() && (pointedType->GetKind() == kTypePointer)) { + MIRType *nextPointedType = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(pointedType)->GetPointedTyIdx()); + if (nextPointedType->GetKind() != kTypeScalar) { + isRefField = true; /* write into an object array or a high-dimensional array */ + } + } + } + + PrimType styp = stmt.GetRHS()->GetPrimType(); + Operand *valOpnd = HandleExpr(stmt, *stmt.GetRHS()); + Operand &srcOpnd = + LoadIntoRegister(*valOpnd, + (IsPrimitiveInteger(styp) || IsPrimitiveVectorInteger(styp)), GetPrimTypeBitSize(styp)); + + PrimType destType = pointedType->GetPrimType(); + if (destType == PTY_agg) { + destType = PTY_a64; + } + if (IsPrimitiveVector(styp)) { /* a vector type */ + destType = styp; + } + ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); + MemOperand &memOpnd = CreateMemOpnd(destType, stmt, *stmt.Opnd(0), offset); + auto dataSize = GetPrimTypeBitSize(destType); + memOpnd = memOpnd.IsOffsetMisaligned(dataSize) ? + ConstraintOffsetToSafeRegion(dataSize, memOpnd, nullptr) : memOpnd; + if (isVolStore && memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) { + memOrd = AArch64isa::kMoRelease; + isVolStore = false; + } + + if (memOrd == AArch64isa::kMoNone) { + SelectCopy(memOpnd, destType, srcOpnd, destType); + } else { + AArch64CGFunc::SelectStoreRelease(memOpnd, destType, srcOpnd, destType, memOrd, false); + } + GetCurBB()->GetLastInsn()->MarkAsAccessRefField(isRefField); +} + +void AArch64CGFunc::SelectIassignoff(IassignoffNode &stmt) { + int32 offset = stmt.GetOffset(); + PrimType destType = stmt.GetPrimType(); + + MemOperand &memOpnd = CreateMemOpnd(destType, stmt, *stmt.GetBOpnd(0), offset); + auto dataSize = GetPrimTypeBitSize(destType); + memOpnd = memOpnd.IsOffsetMisaligned(dataSize) ? + ConstraintOffsetToSafeRegion(dataSize, memOpnd, nullptr) : memOpnd; + Operand *valOpnd = HandleExpr(stmt, *stmt.GetBOpnd(1)); + Operand &srcOpnd = LoadIntoRegister(*valOpnd, true, GetPrimTypeBitSize(destType)); + SelectCopy(memOpnd, destType, srcOpnd, destType); +} + +MemOperand *AArch64CGFunc::GenLmbcFpMemOperand(int32 offset, uint32 byteSize, AArch64reg baseRegno) { + MemOperand *memOpnd; + RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(baseRegno, k64BitSize, kRegTyInt); + uint32 bitlen = byteSize * kBitsPerByte; + if (offset < kMinSimm32) { + RegOperand *baseOpnd = &CreateRegisterOperandOfType(PTY_a64); + ImmOperand &immOpnd = CreateImmOperand(offset, k32BitSize, true); + if (immOpnd.IsSingleInstructionMovable()) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *baseOpnd, *rfp, immOpnd)); + } else { + SelectCopyImm(*baseOpnd, immOpnd, PTY_i64); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrrr, *baseOpnd, *rfp, *baseOpnd)); + } + OfstOperand *offsetOpnd = &CreateOfstOpnd(0, k32BitSize); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, baseOpnd, nullptr, offsetOpnd, nullptr); + } else { + OfstOperand *offsetOpnd = &CreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitlen, rfp, nullptr, offsetOpnd, nullptr); + } + memOpnd->SetStackMem(true); + return memOpnd; +} + +bool AArch64CGFunc::GetNumReturnRegsForIassignfpoff(MIRType *rType, PrimType &primType, uint32 &numRegs) { + bool isPureFp = false; + uint32 rSize = static_cast(rType->GetSize()); + CHECK_FATAL(rSize <= k16ByteSize, "SelectIassignfpoff invalid agg size"); + uint32 fpSize; + numRegs = FloatParamRegRequired(static_cast(rType), fpSize); + if (numRegs > 0) { + primType = (fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + isPureFp = true; + } else if (rSize > k8ByteSize) { + primType = PTY_u64; + numRegs = kTwoRegister; + } else { + primType = PTY_u64; + numRegs = kOneRegister; + } + return isPureFp; +} + +void AArch64CGFunc::GenIassignfpoffStore(Operand &srcOpnd, int32 offset, uint32 byteSize, PrimType primType) { + MemOperand *memOpnd = GenLmbcFpMemOperand(offset, byteSize); + MOperator mOp = PickStInsn(byteSize * kBitsPerByte, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, srcOpnd, *memOpnd); + GetCurBB()->AppendInsn(store); +} + +void AArch64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) { + int32 offset = stmt.GetOffset(); + PrimType primType = stmt.GetPrimType(); + MIRType *rType = GetLmbcCallReturnType(); + bool isPureFpStruct = false; + uint32 numRegs = 0; + if (rType && rType->GetPrimType() == PTY_agg && opnd.IsRegister() && + static_cast(opnd).IsPhysicalRegister()) { + isPureFpStruct = GetNumReturnRegsForIassignfpoff(rType, primType, numRegs); + } + uint32 byteSize = GetPrimTypeSize(primType); + if (isPureFpStruct) { + for (uint32 i = 0 ; i < numRegs; ++i) { + RegOperand &srcOpnd = GetOrCreatePhysicalRegisterOperand(AArch64reg(V0 + i), + byteSize * kBitsPerByte, kRegTyFloat); + GenIassignfpoffStore(srcOpnd, offset + static_cast(i * byteSize), byteSize, primType); + } + } else if (numRegs) { + for (uint32 i = 0 ; i < numRegs; ++i) { + RegOperand &srcOpnd = GetOrCreatePhysicalRegisterOperand(AArch64reg(R0 + i), byteSize * kBitsPerByte, kRegTyInt); + GenIassignfpoffStore(srcOpnd, offset + static_cast(i * byteSize), byteSize, primType); + } + } else { + Operand &srcOpnd = LoadIntoRegister(opnd, primType); + GenIassignfpoffStore(srcOpnd, offset, byteSize, primType); + } +} + +/* Load and assign to a new register. To be moved to the correct call register OR stack + location in LmbcSelectParmList */ +void AArch64CGFunc::SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) { + if (GetLmbcArgInfo() == nullptr) { + LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); + SetLmbcArgInfo(p); + } + uint32 byteLen = GetPrimTypeSize(pTy); + uint32 bitLen = byteLen * kBitsPerByte; + RegType regTy = GetRegTyFromPrimTy(pTy); + int32 curRegArgs = GetLmbcArgsInRegs(regTy); + if (curRegArgs < static_cast(k8ByteSize)) { + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(regTy, byteLen)); + SelectCopy(*res, pTy, opnd, pTy); + SetLmbcArgInfo(res, pTy, offset, 1); + } + else { + /* Move into allocated space */ + Operand &memOpd = CreateMemOpnd(RSP, offset, byteLen); + Operand ® = LoadIntoRegister(opnd, pTy); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(bitLen, pTy), reg, memOpd)); + } + IncLmbcArgsInRegs(regTy); /* num of args in registers */ + IncLmbcTotalArgs(); /* num of args */ +} + +/* Search for CALL/ICALL/ICALLPROTO node, must be called from a blkassignoff node */ +MIRType *AArch64CGFunc::LmbcGetAggTyFromCallSite(StmtNode *stmt, std::vector **parmList) const { + for (; stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto) { + break; + } + } + CHECK_FATAL(stmt && (stmt->GetOpCode() == OP_call || stmt->GetOpCode() == OP_icallproto), + "blkassign sp not followed by call"); + uint32 nargs = GetLmbcTotalArgs(); + MIRType *ty = nullptr; + if (stmt->GetOpCode() == OP_call) { + CallNode *callNode = static_cast(stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + if (fn->GetFormalCount() > 0) { + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fn->GetFormalDefVec()[nargs].formalTyIdx); + } + *parmList = &fn->GetParamTypes(); + // would return null if the actual parameter is bogus + } else if (stmt->GetOpCode() == OP_icallproto) { + IcallNode *icallproto = static_cast(stmt); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallproto->GetRetTyIdx()); + MIRFuncType *fType = static_cast(type); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetNthParamType(nargs)); + *parmList = &fType->GetParamTypeList(); + } else { + CHECK_FATAL(stmt->GetOpCode() == OP_icallproto, + "LmbcGetAggTyFromCallSite:: unexpected call operator"); + } + return ty; +} + +/* return true if blkassignoff for return, false otherwise */ +bool AArch64CGFunc::LmbcSmallAggForRet(const BaseNode &bNode, Operand *src, int32 offset, bool skip1) { + PrimType pTy; + uint32 size = 0; + AArch64reg regno = static_cast(static_cast(src)->GetRegisterNumber()); + MIRFunction *func = &GetFunction(); + + if (!func->IsReturnStruct()) { + return false; + } + /* This blkassignoff is for struct return? */ + uint32 loadSize; + uint32 numRegs = 0; + if (static_cast(bNode).GetNext()->GetOpCode() == OP_return) { + MIRStructType *ty = static_cast(func->GetReturnType()); + uint32 tySize = GetBecommon().GetTypeSize(ty->GetTypeIndex()); + uint32 fpregs = FloatParamRegRequired(ty, size); + if (fpregs > 0) { + /* pure floating point in agg */ + numRegs = fpregs; + pTy = (size == k4ByteSize) ? PTY_f32 : PTY_f64; + loadSize = GetPrimTypeSize(pTy) * kBitsPerByte; + for (uint32 i = 0; i < fpregs; i++) { + int32 s = (i == 0) ? 0 : static_cast(i * size); + int64 newOffset = static_cast(s) + static_cast(offset); + MemOperand &mem = CreateMemOpnd(regno, newOffset, size * kBitsPerByte); + AArch64reg reg = static_cast(V0 + i); + RegOperand *res = &GetOrCreatePhysicalRegisterOperand(reg, loadSize, kRegTyFloat); + SelectCopy(*res, pTy, mem, pTy); + } + } else { + /* int/float mixed */ + numRegs = 2; + pTy = PTY_i64; + size = k4ByteSize; + switch (tySize) { + case 1: + pTy = PTY_i8; + break; + case 2: + pTy = PTY_i16; + break; + case 4: + pTy = PTY_i32; + break; + default: + size = k8ByteSize; /* pTy remains i64 */ + break; + } + loadSize = GetPrimTypeSize(pTy) * kBitsPerByte; + if (!skip1) { + MemOperand &mem = CreateMemOpnd(regno, offset, size * kBitsPerByte); + RegOperand &res1 = GetOrCreatePhysicalRegisterOperand(R0, loadSize, kRegTyInt); + SelectCopy(res1, pTy, mem, pTy); + } + if (tySize > static_cast(k8ByteSize)) { + int32 newOffset = offset + static_cast(k8ByteSize); + MemOperand &newMem = CreateMemOpnd(regno, newOffset, size * kBitsPerByte); + RegOperand &res2 = GetOrCreatePhysicalRegisterOperand(R1, loadSize, kRegTyInt); + SelectCopy(res2, pTy, newMem, pTy); + } + } + bool intReg = fpregs == 0; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = static_cast((intReg ? R0 : V0) + i); + MOperator mop = intReg ? MOP_pseudo_ret_int : MOP_pseudo_ret_float; + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(preg, loadSize, intReg ? kRegTyInt : kRegTyFloat); + Insn &pseudo = GetInsnBuilder()->BuildInsn(mop, dest); + GetCurBB()->AppendInsn(pseudo); + } + return true; + } + return false; +} + +/* return true if blkassignoff for return, false otherwise */ +bool AArch64CGFunc::LmbcSmallAggForCall(BlkassignoffNode &bNode, const Operand *src, std::vector **parmList) { + AArch64reg regno = static_cast(static_cast(src)->GetRegisterNumber()); + if (IsBlkassignForPush(bNode)) { + PrimType pTy = PTY_i64; + MIRStructType *ty = static_cast(LmbcGetAggTyFromCallSite(&bNode, parmList)); + uint32 size = 0; + uint32 fpregs = ty ? FloatParamRegRequired(ty, size) : 0; /* fp size determined */ + if (fpregs > 0) { + /* pure floating point in agg */ + pTy = (size == k4ByteSize) ? PTY_f32 : PTY_f64; + for (uint32 i = 0; i < fpregs; i++) { + int32 s = (i == 0) ? 0 : static_cast(i * size); + MemOperand &mem = CreateMemOpnd(regno, s, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyFloat, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, 0, static_cast(fpregs)); + IncLmbcArgsInRegs(kRegTyFloat); + } + IncLmbcTotalArgs(); + return true; + } else if (bNode.blockSize <= static_cast(k16ByteSize)) { + /* integer/mixed types in register/s */ + size = k4ByteSize; + switch (bNode.blockSize) { + case 1: + pTy = PTY_i8; + break; + case 2: + pTy = PTY_i16; + break; + case 4: + pTy = PTY_i32; + break; + default: + size = k8ByteSize; /* pTy remains i64 */ + break; + } + MemOperand &mem = CreateMemOpnd(regno, 0, size * kBitsPerByte); + RegOperand *res = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, size)); + SelectCopy(*res, pTy, mem, pTy); + SetLmbcArgInfo(res, pTy, bNode.offset, bNode.blockSize > static_cast(k8ByteSize) ? 2 : 1); + IncLmbcArgsInRegs(kRegTyInt); + if (bNode.blockSize > static_cast(k8ByteSize)) { + MemOperand &newMem = CreateMemOpnd(regno, k8ByteSize, size * kBitsPerByte); + RegOperand *newRes = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, size)); + SelectCopy(*newRes, pTy, newMem, pTy); + SetLmbcArgInfo(newRes, pTy, bNode.offset + k8ByteSizeInt, 2); + IncLmbcArgsInRegs(kRegTyInt); + } + IncLmbcTotalArgs(); + return true; + } + } + return false; +} + +/* This function is incomplete and may be removed when Lmbc IR is changed + to have the lowerer figures out the address of the large agg to reside */ +uint32 AArch64CGFunc::LmbcFindTotalStkUsed(std::vector *paramList) { + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + for (TyIdx tyIdx : *paramList) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + (void)parmlocator.LocateNextParm(*ty, pLoc); + } + return 0; +} + +/* All arguments passed as registers */ +uint32 AArch64CGFunc::LmbcTotalRegsUsed() { + if (GetLmbcArgInfo() == nullptr) { + return 0; /* no arg */ + } + MapleVector ®s = GetLmbcCallArgNumOfRegs(); + MapleVector &types = GetLmbcCallArgTypes(); + uint32 iCnt = 0; + uint32 fCnt = 0; + for (uint32 i = 0; i < regs.size(); i++) { + if (IsPrimitiveInteger(types[i])) { + if ((iCnt + static_cast(regs[i])) <= k8ByteSize) { + iCnt += static_cast(regs[i]); + }; + } else { + if ((fCnt + static_cast(regs[i])) <= k8ByteSize) { + fCnt += static_cast(regs[i]); + }; + } + } + return iCnt + fCnt; +} + +/* If blkassignoff for argument, this function loads the agg arguments into + virtual registers, disregard if there is sufficient physicall call + registers. Argument > 16-bytes are copied to preset space and ptr + result is loaded into virtual register. + If blassign is not for argument, this function simply memcpy */ +void AArch64CGFunc::SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) { + CHECK_FATAL(src->GetKind() == Operand::kOpdRegister, "blkassign src type not in register"); + std::vector *parmList; + if (GetLmbcArgInfo() == nullptr) { + LmbcArgInfo *p = memPool->New(*GetFuncScopeAllocator()); + SetLmbcArgInfo(p); + } + if (LmbcSmallAggForRet(bNode, src)) { + return; + } else if (LmbcSmallAggForCall(bNode, src, &parmList)) { + return; + } + Operand *dest = HandleExpr(bNode, *bNode.Opnd(0)); + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + /* memcpy for agg assign OR large agg for arg/ret */ + int32 offset = bNode.offset; + if (IsBlkassignForPush(bNode)) { + /* large agg for call, addr to be pushed in SelectCall */ + offset = GetLmbcTotalStkUsed(); + if (offset < 0) { + /* length of ALL stack based args for this call, this location is where the + next large agg resides, its addr will then be passed */ + offset = static_cast(LmbcFindTotalStkUsed(parmList) + LmbcTotalRegsUsed()); + } + SetLmbcTotalStkUsed(offset + bNode.blockSize); /* next use */ + SetLmbcArgInfo(regResult, PTY_i64, 0, 1); /* 1 reg for ptr */ + IncLmbcArgsInRegs(kRegTyInt); + IncLmbcTotalArgs(); + /* copy large agg arg to offset below */ + } + RegOperand *param0 = PrepareMemcpyParamOpnd(offset, *dest); + RegOperand *param1 = static_cast(src); + RegOperand *param2 = PrepareMemcpyParamOpnd(static_cast(static_cast(bNode.blockSize))); + if (bNode.blockSize > static_cast(kParmMemcpySize)) { + std::vector opndVec; + opndVec.push_back(regResult); /* result */ + opndVec.push_back(param0); /* param 0 */ + opndVec.push_back(src); /* param 1 */ + opndVec.push_back(param2); /* param 2 */ + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + } else { + int32 copyOffset = 0; + uint32 inc = 0; + bool isPair; + for (uint32 sz = static_cast(bNode.blockSize); sz > 0;) { + isPair = false; + MOperator ldOp, stOp; + if (sz >= k16ByteSize) { + sz -= k16ByteSize; + inc = k16ByteSize; + ldOp = MOP_xldp; + stOp = MOP_xstp; + isPair = true; + } else if (sz >= k8ByteSize) { + sz -= k8ByteSize; + inc = k8ByteSize; + ldOp = MOP_xldr; + stOp = MOP_xstr; + } else if (sz >= k4ByteSize) { + sz -= k4ByteSize; + inc = k4ByteSize; + ldOp = MOP_wldr; + stOp = MOP_wstr; + } else if (sz >= k2ByteSize) { + sz -= k2ByteSize; + inc = k2ByteSize; + ldOp = MOP_wldrh; + stOp = MOP_wstrh; + } else { + sz -= k1ByteSize; + inc = k1ByteSize; + ldOp = MOP_wldrb; + stOp = MOP_wstrb; + } + AArch64reg ldBaseReg = static_cast(param1->GetRegisterNumber()); + MemOperand &ldMem = CreateMemOpnd(ldBaseReg, copyOffset, k8ByteSize); + + AArch64reg stBaseReg = static_cast(param0->GetRegisterNumber()); + MemOperand &stMem = CreateMemOpnd(stBaseReg, copyOffset, k8ByteSize); + if (isPair) { + RegOperand &ldResult = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + RegOperand &ldResult2 = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(ldOp, ldResult, ldResult2, ldMem)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(stOp, ldResult, ldResult2, stMem)); + } else { + RegOperand &ldResult = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(ldOp, ldResult, ldMem)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(stOp, ldResult, stMem)); + } + copyOffset += static_cast(inc); + } + } +} + +void AArch64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &addrOpnd) { + ASSERT(stmt.Opnd(0) != nullptr, "null ptr check"); + Operand &lhsAddrOpnd = LoadIntoRegister(addrOpnd, stmt.Opnd(0)->GetPrimType()); + uint32 lhsOffset = 0; + MIRType *stmtType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); + MIRPtrType *lhsPointerType = static_cast(stmtType); + MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lhsPointerType->GetPointedTyIdx()); + if (stmt.GetFieldID() != 0) { + MIRStructType *structType = static_cast(lhsType); + ASSERT(structType != nullptr, "SelectAggIassign: non-zero fieldID for non-structure"); + lhsType = structType->GetFieldType(stmt.GetFieldID()); + lhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, stmt.GetFieldID()).first); + } else if (lhsType->GetKind() == kTypeArray) { +#if DEBUG + MIRArrayType *arrayLhsType = static_cast(lhsType); + /* access an array element */ + MIRType *lhsType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayLhsType->GetElemTyIdx()); + MIRTypeKind typeKind = lhsType->GetKind(); + ASSERT(((typeKind == kTypeScalar) || (typeKind == kTypeStruct) || (typeKind == kTypeClass) || + (typeKind == kTypePointer)), + "unexpected array element type in iassign"); +#endif + } else if (lhsType->GetKind() == kTypeFArray) { +#if DEBUG + MIRFarrayType *farrayLhsType = static_cast(lhsType); + /* access an array element */ + MIRType *lhsElemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayLhsType->GetElemTyIdx()); + MIRTypeKind typeKind = lhsElemType->GetKind(); + ASSERT(((typeKind == kTypeScalar) || (typeKind == kTypeStruct) || (typeKind == kTypeClass) || + (typeKind == kTypePointer)), + "unexpected array element type in iassign"); +#endif + } + uint32 lhsAlign = GetBecommon().GetTypeAlign(lhsType->GetTypeIndex()); + uint64 lhsSize = GetBecommon().GetTypeSize(lhsType->GetTypeIndex()); + + uint32 rhsAlign; + uint32 alignUsed; + uint32 rhsOffset = 0; + if (stmt.GetRHS()->GetOpCode() == OP_dread) { + AddrofNode *rhsDread = static_cast(stmt.GetRHS()); + MIRSymbol *rhsSymbol = GetFunction().GetLocalOrGlobalSymbol(rhsDread->GetStIdx()); + MIRType *rhsType = rhsSymbol->GetType(); + if (rhsDread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(rhsSymbol->GetType()); + ASSERT(structType != nullptr, "SelectAggIassign: non-zero fieldID for non-structure"); + rhsType = structType->GetFieldType(rhsDread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*structType, rhsDread->GetFieldID()).first); + } + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); + MemOperand *rhsBaseMemOpnd; + if (IsParamStructCopy(*rhsSymbol)) { + rhsBaseMemOpnd = &LoadStructCopyBase(*rhsSymbol, rhsOffset, + static_cast(copySize * k8BitSize)); + } else { + rhsBaseMemOpnd = GenLargeAggFormalMemOpnd(*rhsSymbol, copySize, rhsOffset, true); + } + RegOperand *rhsBaseReg = rhsBaseMemOpnd->GetBaseRegister(); + int64 rhsOffsetVal = rhsBaseMemOpnd->GetOffsetOperand()->GetValue(); + bool rhsIsLo12 = (rhsBaseMemOpnd->GetAddrMode() == MemOperand::kAddrModeLo12Li); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(lhsOffset), lhsAddrOpnd)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(rhsOffsetVal, *rhsBaseReg)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + for (uint32 i = 0; i < (lhsSize / copySize); ++i) { + uint32 rhsBaseOffset = static_cast(rhsOffsetVal + i * copySize); + uint32 lhsBaseOffset = lhsOffset + i * copySize; + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsBaseOffset, k32BitSize); + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, copySize * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + rhsMemOpnd = FixLargeMemOpnd(*rhsMemOpnd, copySize); + /* generate the load */ + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + bool doPair = (!rhsIsLo12 && (copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize))); + RegOperand *result1 = nullptr; + if (doPair) { + regno_t vRegNO1 = NewVReg(kRegTyInt, std::max(4u, copySize)); + result1 = &CreateVirtualRegisterOperand(vRegNO1); + rhsMemOpnd = FixLargeMemOpnd(mOpLDP, *static_cast(rhsMemOpnd), result.GetSize(), kInsnThirdOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd)); + } else { + MOperator mOp = PickLdInsn(copySize * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *static_cast(rhsMemOpnd), result.GetSize(), kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd)); + } + /* generate the store */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(lhsBaseOffset, k32BitSize); + MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, copySize * k8BitSize, + static_cast(&lhsAddrOpnd), nullptr, &ofstOpnd, nullptr); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *lhsMemOpnd, result.GetSize(), kInsnThirdOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd)); + i++; + } else { + MOperator mOp = PickStInsn(copySize * k8BitSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *lhsMemOpnd, copySize * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + } + } + /* take care of extra content at the end less than the unit of alignUsed */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + MemOperand::AArch64AddressingMode addrMode = + rhsIsLo12 ? MemOperand::kAddrModeLo12Li : MemOperand::kAddrModeBOi; + MIRSymbol *sym = rhsIsLo12 ? rhsSymbol : nullptr; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(lhsSizeCovered + static_cast(rhsOffsetVal), k32BitSize); + MemOperand *rhsMemOpnd = + &GetOrCreateMemOpnd(addrMode, newAlignUsed * k8BitSize, rhsBaseReg, nullptr, &rhsOfstOpnd, sym); + /* generate the load */ + Operand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, newAlignUsed))); + MOperator mOp = PickLdInsn(newAlignUsed * k8BitSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *rhsMemOpnd, newAlignUsed * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd)); + /* generate the store */ + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(lhsOffset + lhsSizeCovered, k32BitSize); + MemOperand &lhsMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, newAlignUsed * k8BitSize, + static_cast(&lhsAddrOpnd), nullptr, &ofstOpnd, static_cast(nullptr)); + mOp = PickStInsn(newAlignUsed * k8BitSize, PTY_u32); + lhsMemOpnd = *FixLargeMemOpnd(mOp, lhsMemOpnd, newAlignUsed * k8BitSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } else { /* rhs is iread */ + ASSERT(stmt.GetRHS()->GetOpCode() == OP_iread, "SelectAggDassign: NYI"); + IreadNode *rhsIread = static_cast(stmt.GetRHS()); + RegOperand *rhsAddrOpnd = static_cast(HandleExpr(*rhsIread, *rhsIread->Opnd(0))); + rhsAddrOpnd = &LoadIntoRegister(*rhsAddrOpnd, rhsIread->Opnd(0)->GetPrimType()); + MIRPtrType *rhsPointerType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsIread->GetTyIdx())); + MIRType *rhsType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(rhsPointerType->GetPointedTyIdx())); + bool isRefField = false; + if (rhsIread->GetFieldID() != 0) { + MIRStructType *rhsStructType = static_cast(rhsType); + ASSERT(rhsStructType, "SelectAggDassign: non-zero fieldID for non-structure"); + rhsType = rhsStructType->GetFieldType(rhsIread->GetFieldID()); + rhsOffset = static_cast(GetBecommon().GetFieldOffset(*rhsStructType, rhsIread->GetFieldID()).first); + isRefField = GetBecommon().IsRefField(*rhsStructType, rhsIread->GetFieldID()); + } + rhsAlign = GetBecommon().GetTypeAlign(rhsType->GetTypeIndex()); + alignUsed = std::min(lhsAlign, rhsAlign); + ASSERT(alignUsed != 0, "expect non-zero"); + uint32 copySize = GetAggCopySize(rhsOffset, lhsOffset, alignUsed); + if (lhsSize > kParmMemcpySize) { + std::vector opndVec; + RegOperand *regResult = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + opndVec.push_back(regResult); /* result */ + + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(lhsOffset), lhsAddrOpnd)); /* param 0 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(static_cast(rhsOffset), *rhsAddrOpnd)); /* param 1 */ + + opndVec.push_back(PrepareMemcpyParamOpnd(lhsSize)); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); + + return; + } + ASSERT(copySize != 0, "expect non-zero"); + for (uint32 i = 0; i < (lhsSize / copySize); i++) { + /* generate the load */ + uint32 operandSize = copySize * k8BitSize; + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsOffset + i * copySize, k32BitSize); + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, operandSize, + static_cast(rhsAddrOpnd), nullptr, &rhsOfstOpnd, nullptr); + RegOperand &result = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + bool doPair = ((copySize >= k4BitSize) && ((i + 1) < (lhsSize / copySize))); + Insn *insn = nullptr; + RegOperand *result1 = nullptr; + if (doPair) { + MOperator mOpLDP = (copySize == k4BitSize) ? MOP_wldp : MOP_xldp; + rhsMemOpnd = FixLargeMemOpnd(mOpLDP, *static_cast(rhsMemOpnd), operandSize, kInsnThirdOpnd); + result1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, std::max(4u, copySize))); + insn = &GetInsnBuilder()->BuildInsn(mOpLDP, result, *result1, *rhsMemOpnd); + } else { + MOperator mOp = PickLdInsn(operandSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOp, *static_cast(rhsMemOpnd), operandSize, kInsnSecondOpnd); + insn = &GetInsnBuilder()->BuildInsn(mOp, result, *rhsMemOpnd); + } + insn->MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(*insn); + /* generate the store */ + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsOffset + i * copySize, k32BitSize); + MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, operandSize, + static_cast(&lhsAddrOpnd), nullptr, &lhsOfstOpnd, nullptr); + if (doPair) { + MOperator mOpSTP = (copySize == k4BitSize) ? MOP_wstp : MOP_xstp; + lhsMemOpnd = FixLargeMemOpnd(mOpSTP, *static_cast(lhsMemOpnd), operandSize, kInsnThirdOpnd); + ASSERT(result1 != nullptr, "result1 should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpSTP, result, *result1, *lhsMemOpnd)); + i++; + } else { + MOperator mOp = PickStInsn(operandSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOp, *static_cast(lhsMemOpnd), operandSize, kInsnSecondOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, *lhsMemOpnd)); + } + } + /* take care of extra content at the end less than the unit */ + uint64 lhsSizeCovered = (lhsSize / copySize) * copySize; + uint32 newAlignUsed = copySize; + while (lhsSizeCovered < lhsSize) { + newAlignUsed = newAlignUsed >> 1; + CHECK_FATAL(newAlignUsed != 0, "expect non-zero"); + if ((lhsSizeCovered + newAlignUsed) > lhsSize) { + continue; + } + /* generate the load */ + OfstOperand &rhsOfstOpnd = GetOrCreateOfstOpnd(rhsOffset + lhsSizeCovered, k32BitSize); + uint32 memOpndSize = newAlignUsed * k8BitSize; + MemOperand *rhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memOpndSize, + static_cast(rhsAddrOpnd), nullptr, &rhsOfstOpnd, nullptr); + regno_t vRegNO = NewVReg(kRegTyInt, std::max(4u, newAlignUsed)); + RegOperand &result = CreateVirtualRegisterOperand(vRegNO); + MOperator mOpLD = PickLdInsn(memOpndSize, PTY_u32); + rhsMemOpnd = FixLargeMemOpnd(mOpLD, *rhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); + Insn &insn = GetInsnBuilder()->BuildInsn(mOpLD, result, *rhsMemOpnd); + insn.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(insn); + /* generate the store */ + OfstOperand &lhsOfstOpnd = GetOrCreateOfstOpnd(lhsOffset + lhsSizeCovered, k32BitSize); + MemOperand *lhsMemOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memOpndSize, + static_cast(&lhsAddrOpnd), nullptr, &lhsOfstOpnd, nullptr); + MOperator mOpST = PickStInsn(memOpndSize, PTY_u32); + lhsMemOpnd = FixLargeMemOpnd(mOpST, *lhsMemOpnd, memOpndSize, static_cast(kInsnSecondOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpST, result, *lhsMemOpnd)); + lhsSizeCovered += newAlignUsed; + } + } +} + +void AArch64CGFunc::SelectReturnSendOfStructInRegs(BaseNode *x) { + uint32 offset = 0; + if (x->GetOpCode() == OP_dread) { + DreadNode *dread = static_cast(x); + MIRSymbol *sym = GetFunction().GetLocalOrGlobalSymbol(dread->GetStIdx()); + MIRType *mirType = sym->GetType(); + if (dread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(dread->GetFieldID()); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, dread->GetFieldID()).first); + } + uint32 typeSize = GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + /* generate move to regs for agg return */ + AArch64CallConvImpl parmlocator(GetBecommon()); + CCLocInfo pLoc; + (void)parmlocator.LocateNextParm(*mirType, pLoc, true, GetBecommon().GetMIRModule().CurFunction()); + /* aggregates are 8 byte aligned. */ + Operand *rhsmemopnd = nullptr; + RegOperand *result[kFourRegister]; /* up to 2 int or 4 fp */ + uint32 loadSize; + uint32 numRegs; + RegType regType; + PrimType retPty; + bool fpParm = false; + if (pLoc.numFpPureRegs > 0) { + loadSize = pLoc.fpSize; + numRegs = pLoc.numFpPureRegs; + fpParm = true; + regType = kRegTyFloat; + retPty = (pLoc.fpSize == k4ByteSize) ? PTY_f32 : PTY_f64; + } else { + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u64; + } else { + loadSize = (typeSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + regType = kRegTyInt; + retPty = PTY_u32; + } + } + bool parmCopy = IsParamStructCopy(*sym); + for (uint32 i = 0; i < numRegs; i++) { + if (parmCopy) { + rhsmemopnd = &LoadStructCopyBase(*sym, + (offset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + static_cast(loadSize * kBitsPerByte)); + } else { + rhsmemopnd = &GetOrCreateMemOpnd(*sym, + (offset + static_cast(i * (fpParm ? loadSize : k8ByteSize))), + (loadSize * kBitsPerByte)); + } + result[i] = &CreateVirtualRegisterOperand(NewVReg(regType, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, retPty); + Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), *rhsmemopnd); + GetCurBB()->AppendInsn(ld); + } + AArch64reg regs[kFourRegister]; + regs[0] = static_cast(pLoc.reg0); + regs[1] = static_cast(pLoc.reg1); + regs[2] = static_cast(pLoc.reg2); + regs[3] = static_cast(pLoc.reg3); + RegOperand *dest; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop2; + if (fpParm) { + preg = regs[i]; + mop2 = (loadSize == k4ByteSize) ? MOP_xvmovs : MOP_xvmovd; + } else { + preg = (i == 0 ? R0 : R1); + mop2 = (loadSize == k4ByteSize) ? MOP_wmovrr : MOP_xmovrr; + } + dest = &GetOrCreatePhysicalRegisterOperand(preg, (loadSize * kBitsPerByte), regType); + Insn &mov = GetInsnBuilder()->BuildInsn(mop2, *dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg; + MOperator mop3; + if (fpParm) { + preg = regs[i]; + mop3 = MOP_pseudo_ret_float; + } else { + preg = (i == 0 ? R0 : R1); + mop3 = MOP_pseudo_ret_int; + } + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, regType); + Insn &pseudo = GetInsnBuilder()->BuildInsn(mop3, *dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } else if (x->GetOpCode() == OP_iread) { + IreadNode *iread = static_cast(x); + RegOperand *rhsAddrOpnd = static_cast(HandleExpr(*iread, *iread->Opnd(0))); + rhsAddrOpnd = &LoadIntoRegister(*rhsAddrOpnd, iread->Opnd(0)->GetPrimType()); + MIRPtrType *ptrType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx())); + MIRType *mirType = static_cast(ptrType->GetPointedType()); + bool isRefField = false; + if (iread->GetFieldID() != 0) { + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(iread->GetFieldID()); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, iread->GetFieldID()).first); + isRefField = GetBecommon().IsRefField(*structType, iread->GetFieldID()); + } + uint32 typeSize = static_cast(GetBecommon().GetTypeSize(mirType->GetTypeIndex())); + /* generate move to regs. */ + RegOperand *result[kTwoRegister]; /* maximum 16 bytes, 2 registers */ + uint32 loadSize; + if (CGOptions::IsBigEndian()) { + loadSize = k8ByteSize; + } else { + loadSize = (typeSize <= k4ByteSize) ? k4ByteSize : k8ByteSize; + } + uint32 numRegs = (typeSize <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (uint32 i = 0; i < numRegs; i++) { + OfstOperand *rhsOffOpnd = &GetOrCreateOfstOpnd(offset + i * loadSize, loadSize * kBitsPerByte); + Operand &rhsmemopnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, loadSize * kBitsPerByte, + rhsAddrOpnd, nullptr, rhsOffOpnd, nullptr); + result[i] = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, loadSize)); + MOperator mop1 = PickLdInsn(loadSize * kBitsPerByte, PTY_u32); + Insn &ld = GetInsnBuilder()->BuildInsn(mop1, *(result[i]), rhsmemopnd); + ld.MarkAsAccessRefField(isRefField); + GetCurBB()->AppendInsn(ld); + } + RegOperand *dest; + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &mov = GetInsnBuilder()->BuildInsn(MOP_xmovrr, *dest, *(result[i])); + GetCurBB()->AppendInsn(mov); + } + /* Create artificial dependency to extend the live range */ + for (uint32 i = 0; i < numRegs; i++) { + AArch64reg preg = (i == 0 ? R0 : R1); + dest = &GetOrCreatePhysicalRegisterOperand(preg, loadSize * kBitsPerByte, kRegTyInt); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *dest); + GetCurBB()->AppendInsn(pseudo); + } + return; + } else { // dummy return of 0 inserted by front-end at absence of return + ASSERT(x->GetOpCode() == OP_constval, "SelectReturnSendOfStructInRegs: unexpected return operand"); + uint32 typeSize = GetPrimTypeSize(x->GetPrimType()); + RegOperand &dest = GetOrCreatePhysicalRegisterOperand(R0, typeSize * kBitsPerByte, kRegTyInt); + ImmOperand &src = CreateImmOperand(0, k16BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, dest, src)); + return; + } +} + +Operand *AArch64CGFunc::SelectDread(const BaseNode &parent, DreadNode &expr) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + if (symbol->IsEhIndex()) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + /* use the second register return by __builtin_eh_return(). */ + AArch64CallConvImpl retLocator(GetBecommon()); + CCLocInfo retMech; + retLocator.InitReturnInfo(*type, retMech); + retLocator.SetupSecondRetReg(*type, retMech); + return &GetOrCreatePhysicalRegisterOperand(static_cast(retMech.GetReg1()), k64BitSize, kRegTyInt); + } + + PrimType symType = symbol->GetType()->GetPrimType(); + uint32 offset = 0; + bool parmCopy = false; + if (expr.GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + ASSERT(structType != nullptr, "SelectDread: non-zero fieldID for non-structure"); + symType = structType->GetFieldType(expr.GetFieldID())->GetPrimType(); + offset = static_cast(GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first); + parmCopy = IsParamStructCopy(*symbol); + } + + uint32 dataSize = GetPrimTypeBitSize(symType); + uint32 aggSize = 0; + PrimType resultType = expr.GetPrimType(); + if (symType == PTY_agg) { + if (expr.GetPrimType() == PTY_agg) { + aggSize = static_cast(GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx())); + dataSize = ((expr.GetFieldID() == 0) ? GetPointerSize() : aggSize) << 3; + resultType = PTY_u64; + symType = resultType; + } else { + dataSize = GetPrimTypeBitSize(expr.GetPrimType()); + } + } + MemOperand *memOpnd = nullptr; + if (aggSize > k8ByteSize) { + if (parent.op == OP_eval) { + if (symbol->GetAttr(ATTR_volatile)) { + /* Need to generate loads for the upper parts of the struct. */ + Operand &dest = GetZeroOpnd(k64BitSize); + uint32 numLoads = static_cast(RoundUp(aggSize, k64BitSize) / k64BitSize); + for (uint32 o = 0; o < numLoads; ++o) { + if (parmCopy) { + memOpnd = &LoadStructCopyBase(*symbol, offset + o * GetPointerSize(), GetPointerSize()); + } else { + memOpnd = &GetOrCreateMemOpnd(*symbol, offset + o * GetPointerSize(), GetPointerSize()); + } + if (IsImmediateOffsetOutOfRange(*memOpnd, GetPointerSize())) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, GetPointerSize()); + } + SelectCopy(dest, PTY_u64, *memOpnd, PTY_u64); + } + } else { + /* No side-effects. No need to generate anything for eval. */ + } + } else { + if (expr.GetFieldID() != 0) { + CHECK_FATAL(false, "SelectDread: Illegal agg size"); + } + } + } + if (parmCopy) { + memOpnd = &LoadStructCopyBase(*symbol, offset, static_cast(dataSize)); + } else { + memOpnd = &GetOrCreateMemOpnd(*symbol, offset, dataSize); + } + if ((memOpnd->GetMemVaryType() == kNotVary) && + IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize); + } + + RegOperand &resOpnd = GetOrCreateResOperand(parent, symType); + /* a local register variable defined with a specified register */ + if (symbol->GetAsmAttr() != UStrIdx(0) && + symbol->GetStorageClass() != kScPstatic && symbol->GetStorageClass() != kScFstatic) { + std::string regDesp = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetAsmAttr()); + RegOperand &specifiedOpnd = GetOrCreatePhysicalRegisterOperand(regDesp); + return &specifiedOpnd; + } + memOpnd = memOpnd->IsOffsetMisaligned(dataSize) ? + &ConstraintOffsetToSafeRegion(dataSize, *memOpnd, symbol) : memOpnd; + SelectCopy(resOpnd, resultType, *memOpnd, symType); + return &resOpnd; +} + +RegOperand *AArch64CGFunc::SelectRegread(RegreadNode &expr) { + PregIdx pregIdx = expr.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + /* if it is one of special registers */ + return &GetOrCreateSpecialRegisterOperand(-pregIdx, expr.GetPrimType()); + } + RegOperand ® = GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + MemOperand *src = GetPseudoRegisterSpillMemoryOperand(pregIdx); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + PrimType stype = preg->GetPrimType(); + uint32 srcBitLength = GetPrimTypeSize(stype) * kBitsPerByte; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(srcBitLength, stype), reg, *src)); + } + return ® +} + +void AArch64CGFunc::SelectAddrof(Operand &result, StImmOperand &stImm, FieldID field) { + const MIRSymbol *symbol = stImm.GetSymbol(); + if (!GetFunction().IsMayWriteToAddrofStackChecked() && symbol->GetStorageClass() == kScAuto) { + SetStackProtectInfo(kAddrofStack); + } + if ((symbol->GetStorageClass() == kScAuto) || (symbol->GetStorageClass() == kScFormal)) { + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlErr) << + "Warning: we expect AddrOf with StImmOperand is not used for local variables"; + } + AArch64SymbolAlloc *symLoc = + static_cast(GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); + ImmOperand *offset = nullptr; + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false, kUnAdjustVary); + } else if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsRefLocals) { + auto it = immOpndsRequiringOffsetAdjustmentForRefloc.find(symLoc); + if (it != immOpndsRequiringOffsetAdjustmentForRefloc.end()) { + offset = (*it).second; + } else { + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false); + immOpndsRequiringOffsetAdjustmentForRefloc[symLoc] = offset; + } + } else if (mirModule.IsJavaModule()) { + auto it = immOpndsRequiringOffsetAdjustment.find(symLoc); + if ((it != immOpndsRequiringOffsetAdjustment.end()) && (symbol->GetType()->GetPrimType() != PTY_agg)) { + offset = (*it).second; + } else { + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false); + if (symbol->GetType()->GetKind() != kTypeClass) { + immOpndsRequiringOffsetAdjustment[symLoc] = offset; + } + } + } else { + /* Do not cache modified symbol location */ + offset = &CreateImmOperand(GetBaseOffset(*symLoc) + stImm.GetOffset(), k64BitSize, false); + } + + SelectAdd(result, *GetBaseReg(*symLoc), *offset, PTY_u64); + if (GetCG()->GenerateVerboseCG()) { + /* Add a comment */ + Insn *insn = GetCurBB()->GetLastInsn(); + std::string comm = "local/formal var: "; + comm.append(symbol->GetName()); + insn->SetComment(comm); + } + } else if (symbol->IsThreadLocal()) { + SelectAddrofThreadLocal(result, stImm); + return; + } else { + Operand *srcOpnd = &result; + if (!IsAfterRegAlloc()) { + // Create a new vreg/preg for the upper bits of the address + PregIdx pregIdx = GetFunction().GetPregTab()->CreatePreg(PTY_a64); + MIRPreg *tmpPreg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(PTY_a64)); + RegOperand &tmpreg = GetOrCreateVirtualRegisterOperand(vRegNO); + + // Register this vreg mapping + RegisterVregMapping(vRegNO, pregIdx); + + // Store rematerialization info in the preg + tmpPreg->SetOp(OP_addrof); + tmpPreg->rematInfo.sym = symbol; + tmpPreg->fieldID = field; + tmpPreg->addrUpper = true; + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpreg, stImm)); + srcOpnd = &tmpreg; + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, result, stImm)); + } + if (CGOptions::IsPIC() && symbol->NeedPIC()) { + /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ + OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); + + auto size = GetPointerSize() * kBitsPerByte; + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, size, static_cast(srcOpnd), + nullptr, &offset, nullptr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(size == k64BitSize ? MOP_xldr : MOP_wldr, + result, memOpnd)); + + if (stImm.GetOffset() > 0) { + ImmOperand &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + SelectAdd(result, result, immOpnd, PTY_u64); + } + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, *srcOpnd, stImm)); + } + } +} + +void AArch64CGFunc::SelectAddrof(Operand &result, MemOperand &memOpnd, FieldID field) { + const MIRSymbol *symbol = memOpnd.GetSymbol(); + if (symbol->GetStorageClass() == kScAuto) { + auto *offsetOpnd = static_cast(memOpnd.GetOffsetImmediate()); + Operand &immOpnd = CreateImmOperand(offsetOpnd->GetOffsetValue(), PTY_u32, false); + ASSERT(memOpnd.GetBaseRegister() != nullptr, "nullptr check"); + SelectAdd(result, *memOpnd.GetBaseRegister(), immOpnd, PTY_u32); + if (!GetFunction().IsMayWriteToAddrofStackChecked()) { + SetStackProtectInfo(kAddrofStack); + } + } else if (!IsAfterRegAlloc()) { + // Create a new vreg/preg for the upper bits of the address + PregIdx pregIdx = GetFunction().GetPregTab()->CreatePreg(PTY_a64); + MIRPreg *tmpPreg = GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(PTY_a64)); + RegOperand &tmpreg = GetOrCreateVirtualRegisterOperand(vRegNO); + + // Register this vreg mapping + RegisterVregMapping(vRegNO, pregIdx); + + // Store rematerialization info in the preg + tmpPreg->SetOp(OP_addrof); + tmpPreg->rematInfo.sym = symbol; + tmpPreg->fieldID = field; + tmpPreg->addrUpper = true; + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpreg, memOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, tmpreg, memOpnd)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, result, memOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, result, memOpnd)); + } +} + +Operand *AArch64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + int32 offset = 0; + AddrofoffNode &addrofoffExpr = static_cast(static_cast(expr)); + if (isAddrofoff) { + offset = addrofoffExpr.offset; + } else { + if (expr.GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + /* with array of structs, it is possible to have nullptr */ + if (structType != nullptr) { + offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + } + } + } + if ((symbol->GetStorageClass() == kScFormal) && (symbol->GetSKind() == kStVar) && + ((!isAddrofoff && expr.GetFieldID() != 0) || + (GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) > k16ByteSize))) { + /* + * Struct param is copied on the stack by caller if struct size > 16. + * Else if size < 16 then struct param is copied into one or two registers. + */ + RegOperand *stackAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + /* load the base address of the struct copy from stack. */ + SelectAddrof(*stackAddr, CreateStImmOperand(*symbol, 0, 0)); + Operand *structAddr; + if (GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) <= k16ByteSize) { + isAggParamInReg = true; + structAddr = stackAddr; + } else { + OfstOperand *offopnd = &CreateOfstOpnd(0, k32BitSize); + MemOperand *mo = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, + stackAddr, nullptr, offopnd, nullptr); + structAddr = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xldr, *structAddr, *mo)); + } + if (offset == 0) { + return structAddr; + } else { + /* add the struct offset to the base address */ + Operand *result = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + ImmOperand *imm = &CreateImmOperand(PTY_a64, offset); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *result, *structAddr, *imm)); + return result; + } + } + PrimType ptype = expr.GetPrimType(); + Operand &result = GetOrCreateResOperand(parent, ptype); + if (symbol->IsReflectionClassInfo() && !symbol->IsReflectionArrayClassInfo() && !GetCG()->IsLibcore()) { + /* + * Turn addrof __cinf_X into a load of _PTR__cinf_X + * adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B + * ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] + */ + std::string ptrName = namemangler::kPtrPrefixStr + symbol->GetName(); + MIRType *ptrType = GlobalTables::GetTypeTable().GetPtr(); + symbol = GetMirModule().GetMIRBuilder()->GetOrCreateGlobalDecl(ptrName, *ptrType); + symbol->SetStorageClass(kScFstatic); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_adrp_ldr, result, CreateStImmOperand(*symbol, 0, 0))); + /* make it un rematerializable. */ + MIRPreg *preg = GetPseudoRegFromVirtualRegNO(static_cast(result).GetRegisterNumber()); + if (preg) { + preg->SetOp(OP_undef); + } + return &result; + } + + SelectAddrof(result, CreateStImmOperand(*symbol, offset, 0), isAddrofoff ? 0 : expr.GetFieldID()); + return &result; +} + +Operand *AArch64CGFunc::SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) { + return SelectAddrof(static_cast(static_cast(expr)), parent, true); +} + +Operand &AArch64CGFunc::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { + uint32 instrSize = static_cast(expr.SizeOfInstr()); + PrimType primType = (instrSize == k8ByteSize) ? PTY_u64 : + (instrSize == k4ByteSize) ? PTY_u32 : + (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; + Operand &operand = GetOrCreateResOperand(parent, primType); + MIRFunction *mirFunction = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(expr.GetPUIdx()); + SelectAddrof(operand, CreateStImmOperand(*mirFunction->GetFuncSymbol(), 0, 0)); + return operand; +} + +/* For an entire aggregate that can fit inside a single 8 byte register. */ +PrimType AArch64CGFunc::GetDestTypeFromAggSize(uint32 bitSize) const { + PrimType primType; + switch (bitSize) { + case k8BitSize: { + primType = PTY_u8; + break; + } + case k16BitSize: { + primType = PTY_u16; + break; + } + case k32BitSize: { + primType = PTY_u32; + break; + } + case k64BitSize: { + primType = PTY_u64; + break; + } + default: + CHECK_FATAL(false, "aggregate of unhandled size"); + } + return primType; +} + +Operand &AArch64CGFunc::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) { + /* adrp reg, label-id */ + uint32 instrSize = static_cast(expr.SizeOfInstr()); + PrimType primType = (instrSize == k8ByteSize) ? PTY_u64 : + (instrSize == k4ByteSize) ? PTY_u32 : + (instrSize == k2ByteSize) ? PTY_u16 : PTY_u8; + Operand &dst = GetOrCreateResOperand(parent, primType); + Operand &immOpnd = CreateImmOperand(expr.GetOffset(), k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_adrp_label, dst, immOpnd)); + return dst; +} + +Operand *AArch64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) { + auto offset = ireadoff.GetOffset(); + auto primType = ireadoff.GetPrimType(); + auto bitSize = GetPrimTypeBitSize(primType); + auto *baseAddr = ireadoff.Opnd(0); + auto *result = &CreateRegisterOperandOfType(primType); + auto *addrOpnd = HandleExpr(ireadoff, *baseAddr); + if (primType == PTY_agg && parent.GetOpCode() == OP_regassign) { + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, PTY_a64), offset, bitSize); + auto mop = PickLdInsn(64, PTY_a64); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *result, memOpnd)); + auto ®AssignNode = static_cast(parent); + PregIdx pIdx = regAssignNode.GetRegIdx(); + CHECK_FATAL(IsSpecialPseudoRegister(pIdx), "SelectIreadfpoff of agg"); + (void)LmbcSmallAggForRet(parent, addrOpnd, offset, true); + // result not used + } else { + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, PTY_a64), offset, bitSize); + auto mop = PickLdInsn(bitSize, primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *result, memOpnd)); + } + return result; +} + +RegOperand *AArch64CGFunc::GenLmbcParamLoad(int32 offset, uint32 byteSize, RegType regType, PrimType primType, + AArch64reg baseRegno) { + MemOperand *memOpnd = GenLmbcFpMemOperand(offset, byteSize, baseRegno); + RegOperand *result = &GetOrCreateVirtualRegisterOperand(NewVReg(regType, byteSize)); + MOperator mOp = PickLdInsn(byteSize * kBitsPerByte, primType); + Insn &load = GetInsnBuilder()->BuildInsn(mOp, *result, *memOpnd); + GetCurBB()->AppendInsn(load); + return result; +} + +RegOperand *AArch64CGFunc::LmbcStructReturnLoad(int32 offset) { + RegOperand *result = nullptr; + MIRFunction &func = GetFunction(); + CHECK_FATAL(func.IsReturnStruct(), "LmbcStructReturnLoad: not struct return"); + MIRType *ty = func.GetReturnType(); + uint32 sz = static_cast(GetBecommon().GetTypeSize(ty->GetTypeIndex())); + uint32 fpSize; + uint32 numFpRegs = FloatParamRegRequired(static_cast(ty), fpSize); + if (numFpRegs > 0) { + PrimType pType = (fpSize <= k4ByteSize) ? PTY_f32 : PTY_f64; + for (int32 i = static_cast(numFpRegs - kOneRegister); i > 0; --i) { + result = GenLmbcParamLoad(offset + (i * static_cast(fpSize)), fpSize, kRegTyFloat, pType); + AArch64reg regNo = static_cast(V0 + static_cast(i)); + RegOperand *reg = &GetOrCreatePhysicalRegisterOperand(regNo, fpSize * kBitsPerByte, kRegTyFloat); + SelectCopy(*reg, pType, *result, pType); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, *reg); + GetCurBB()->AppendInsn(pseudo); + } + result = GenLmbcParamLoad(offset, fpSize, kRegTyFloat, pType); + } else if (sz <= k4ByteSize) { + result = GenLmbcParamLoad(offset, k4ByteSize, kRegTyInt, PTY_u32); + } else if (sz <= k8ByteSize) { + result = GenLmbcParamLoad(offset, k8ByteSize, kRegTyInt, PTY_i64); + } else if (sz <= k16ByteSize) { + result = GenLmbcParamLoad(offset + k8ByteSizeInt, k8ByteSize, kRegTyInt, PTY_i64); + RegOperand *r1 = &GetOrCreatePhysicalRegisterOperand(R1, k8ByteSize * kBitsPerByte, kRegTyInt); + SelectCopy(*r1, PTY_i64, *result, PTY_i64); + Insn &pseudo = GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, *r1); + GetCurBB()->AppendInsn(pseudo); + result = GenLmbcParamLoad(offset, k8ByteSize, kRegTyInt, PTY_i64); + } + return result; +} + +Operand *AArch64CGFunc::SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) { + int32 offset = ireadoff.GetOffset(); + PrimType primType = ireadoff.GetPrimType(); + uint32 bytelen = GetPrimTypeSize(primType); + RegType regty = GetRegTyFromPrimTy(primType); + RegOperand *result = nullptr; + if (offset > 0) { + CHECK_FATAL(false, "Invalid ireadfpoff offset"); + } else { + if (primType == PTY_agg) { + /* agg return */ + CHECK_FATAL(parent.GetOpCode() == OP_regassign, "SelectIreadfpoff of agg"); + result = LmbcStructReturnLoad(offset); + } else { + result = GenLmbcParamLoad(offset, bytelen, regty, primType); + } + } + return result; +} + +Operand *AArch64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, + int extraOffset, PrimType finalBitFieldDestType) { + int32 offset = 0; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr.GetTyIdx()); + MIRPtrType *pointerType = static_cast(type); + ASSERT(pointerType != nullptr, "expect a pointer type at iread node"); + MIRType *pointedType = nullptr; + bool isRefField = false; + AArch64isa::MemoryOrdering memOrd = AArch64isa::kMoNone; + + if (expr.GetFieldID() != 0) { + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + MIRStructType *structType = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structType = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structType = static_cast(pointedTy)->GetParentType(); + } + + ASSERT(structType != nullptr, "SelectIread: non-zero fieldID for non-structure"); + pointedType = structType->GetFieldType(expr.GetFieldID()); + offset = GetBecommon().GetFieldOffset(*structType, expr.GetFieldID()).first; + isRefField = GetBecommon().IsRefField(*structType, expr.GetFieldID()); + } else { + pointedType = GetPointedToType(*pointerType); + if (GetFunction().IsJava() && (pointedType->GetKind() == kTypePointer)) { + MIRType *nextPointedType = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(pointedType)->GetPointedTyIdx()); + if (nextPointedType->GetKind() != kTypeScalar) { + isRefField = true; /* read from an object array, or an high-dimentional array */ + } + } + } + + RegType regType = GetRegTyFromPrimTy(expr.GetPrimType()); + uint32 regSize = GetPrimTypeSize(expr.GetPrimType()); + if (expr.GetFieldID() == 0 && pointedType->GetPrimType() == PTY_agg) { + /* Maple IR can passing small struct to be loaded into a single register. */ + if (regType == kRegTyFloat) { + /* regsize is correct */ + } else { + uint32 sz = GetBecommon().GetTypeSize(pointedType->GetTypeIndex().GetIdx()); + regSize = (sz <= k4ByteSize) ? k4ByteSize : k8ByteSize; + } + } else if (regSize < k4ByteSize) { + regSize = k4ByteSize; /* 32-bit */ + } + Operand *result = nullptr; + if (parent.GetOpCode() == OP_eval) { + /* regSize << 3, that is regSize * 8, change bytes to bits */ + result = &GetZeroOpnd(regSize << 3); + } else { + result = &GetOrCreateResOperand(parent, expr.GetPrimType()); + } + + PrimType destType = pointedType->GetPrimType(); + + uint32 bitSize = 0; + if ((pointedType->GetKind() == kTypeStructIncomplete) || (pointedType->GetKind() == kTypeClassIncomplete) || + (pointedType->GetKind() == kTypeInterfaceIncomplete)) { + bitSize = GetPrimTypeBitSize(expr.GetPrimType()); + maple::LogInfo::MapleLogger(kLlErr) << "Warning: objsize is zero! \n"; + } else { + if (pointedType->IsStructType()) { + MIRStructType *structType = static_cast(pointedType); + /* size << 3, that is size * 8, change bytes to bits */ + bitSize = static_cast(std::min(structType->GetSize(), static_cast(GetPointerSize())) << 3); + } else { + bitSize = GetPrimTypeBitSize(destType); + } + if (regType == kRegTyFloat) { + destType = expr.GetPrimType(); + bitSize = GetPrimTypeBitSize(destType); + } else if (destType == PTY_agg) { + switch (bitSize) { + case k8BitSize: + destType = PTY_u8; + break; + case k16BitSize: + destType = PTY_u16; + break; + case k32BitSize: + destType = PTY_u32; + break; + case k64BitSize: + destType = PTY_u64; + break; + default: + destType = PTY_u64; // when eval agg . a way to round up + ASSERT(bitSize == 0, " round up empty agg "); + bitSize = k64BitSize; + break; + } + } + } + + MemOperand *memOpnd = + CreateMemOpndOrNull(destType, expr, *expr.Opnd(0), static_cast(offset) + extraOffset, memOrd); + if (aggParamReg != nullptr) { + isAggParamInReg = false; + return aggParamReg; + } + ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr"); + if (isVolLoad && (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi)) { + memOrd = AArch64isa::kMoAcquire; + isVolLoad = false; + } + + memOpnd = memOpnd->IsOffsetMisaligned(bitSize) ? + &ConstraintOffsetToSafeRegion(bitSize, *memOpnd, nullptr) : memOpnd; + if (memOrd == AArch64isa::kMoNone) { + MOperator mOp = 0; + if (finalBitFieldDestType == kPtyInvalid) { + mOp = PickLdInsn(bitSize, destType); + } else { + mOp = PickLdInsn(GetPrimTypeBitSize(finalBitFieldDestType), finalBitFieldDestType); + } + if ((memOpnd->GetMemVaryType() == kNotVary) && !IsOperandImmValid(mOp, memOpnd, 1)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, bitSize); + } + Insn &insn = GetInsnBuilder()->BuildInsn(mOp, *result, *memOpnd); + if (parent.GetOpCode() == OP_eval && result->IsRegister() && + static_cast(result)->GetRegisterNumber() == RZR) { + insn.SetComment("null-check"); + } + GetCurBB()->AppendInsn(insn); + + if (parent.op != OP_eval) { + const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; + auto *prop = md->GetOpndDes(0); + if ((prop->GetSize()) < insn.GetOperand(0).GetSize()) { + switch (destType) { + case PTY_i8: + mOp = MOP_xsxtb64; + break; + case PTY_i16: + mOp = MOP_xsxth64; + break; + case PTY_i32: + mOp = MOP_xsxtw64; + break; + case PTY_u8: + mOp = MOP_xuxtb32; + break; + case PTY_u16: + mOp = MOP_xuxth32; + break; + case PTY_u32: + mOp = MOP_xuxtw64; + break; + default: + break; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + mOp, insn.GetOperand(0), insn.GetOperand(0))); + } + } + } else { + if ((memOpnd->GetMemVaryType() == kNotVary) && IsImmediateOffsetOutOfRange(*memOpnd, bitSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, bitSize); + } + AArch64CGFunc::SelectLoadAcquire(*result, destType, *memOpnd, destType, memOrd, false); + } + GetCurBB()->GetLastInsn()->MarkAsAccessRefField(isRefField); + return result; +} + +Operand *AArch64CGFunc::SelectIntConst(MIRIntConst &intConst) { + return &CreateImmOperand(intConst.GetExtValue(), GetPrimTypeSize(intConst.GetType().GetPrimType()) * kBitsPerByte, + false); +} + +template +Operand *SelectLiteral(T *c, MIRFunction *func, uint32 labelIdx, AArch64CGFunc *cgFunc) { + MIRSymbol *st = func->GetSymTab()->CreateSymbol(kScopeLocal); + std::string lblStr(".LB_"); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + std::string funcName = funcSt->GetName(); + lblStr.append(funcName).append(std::to_string(labelIdx)); + st->SetNameStrIdx(lblStr); + st->SetStorageClass(kScPstatic); + st->SetSKind(kStConst); + st->SetKonst(c); + PrimType primType = c->GetType().GetPrimType(); + st->SetTyIdx(TyIdx(primType)); + uint32 typeBitSize = GetPrimTypeBitSize(primType); + + if (cgFunc->GetMirModule().IsCModule() && (T::GetPrimType() == PTY_f32 || T::GetPrimType() == PTY_f64)) { + return static_cast(&cgFunc->GetOrCreateMemOpnd(*st, 0, typeBitSize)); + } + if (T::GetPrimType() == PTY_f32) { + return (fabs(c->GetValue()) < std::numeric_limits::denorm_min()) ? + static_cast(&cgFunc->CreateImmOperand( + Operand::kOpdFPImmediate, 0, static_cast(typeBitSize), false)) : + static_cast(&cgFunc->GetOrCreateMemOpnd(*st, 0, typeBitSize)); + } else if (T::GetPrimType() == PTY_f64) { + return (fabs(c->GetValue()) < std::numeric_limits::denorm_min()) ? + static_cast(&cgFunc->CreateImmOperand( + Operand::kOpdFPImmediate, 0, static_cast(typeBitSize), false)) : + static_cast(&cgFunc->GetOrCreateMemOpnd(*st, 0, typeBitSize)); + } else { + CHECK_FATAL(false, "Unsupported const type"); + } + return nullptr; +} + +Operand *AArch64CGFunc::HandleFmovImm(PrimType stype, int64 val, MIRConst &mirConst, const BaseNode &parent) { + Operand *result; + bool is64Bits = (GetPrimTypeBitSize(stype) == k64BitSize); + uint64 canRepreset = is64Bits ? (val & 0xffffffffffff) : (val & 0x7ffff); + uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3; + uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f; + bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f)); + canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame; + if (canRepreset > 0) { + uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7; + uint64 temp2 = is64Bits ? val >> 48 : val >> 19; + int64 imm8 = (temp2 & 0x7f) | temp1; + Operand *newOpnd0 = &CreateImmOperand(imm8, k8BitSize, true, kNotVary, true); + result = &GetOrCreateResOperand(parent, stype); + MOperator mopFmov = (is64Bits ? MOP_xdfmovri : MOP_wsfmovri); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *result, *newOpnd0)); + } else { + if (is64Bits) { // For DoubleConst, use ldr .literal + uint32 labelIdxTmp = GetLabelIdx(); + result = SelectLiteral(static_cast(&mirConst), &GetFunction(), labelIdxTmp++, this); + SetLabelIdx(labelIdxTmp); + return result; + } + Operand *newOpnd0 = &CreateImmOperand(val, GetPrimTypeSize(stype) * kBitsPerByte, false); + PrimType itype = (stype == PTY_f32) ? PTY_i32 : PTY_i64; + RegOperand ®Opnd = LoadIntoRegister(*newOpnd0, itype); + + result = &GetOrCreateResOperand(parent, stype); + MOperator mopFmov = (is64Bits ? MOP_xvmovdr : MOP_xvmovsr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *result, regOpnd)); + } + return result; +} + +Operand *AArch64CGFunc::SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) { + /* according to aarch64 encoding format, convert int to float expression */ + return HandleFmovImm(floatConst.GetType().GetPrimType(), floatConst.GetIntValue(), floatConst, parent); +} + +Operand *AArch64CGFunc::SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) { + /* according to aarch64 encoding format, convert int to float expression */ + return HandleFmovImm(doubleConst.GetType().GetPrimType(), doubleConst.GetIntValue(), doubleConst, parent); +} + +template +Operand *SelectStrLiteral(T &c, AArch64CGFunc &cgFunc) { + std::string labelStr; + if (c.GetKind() == kConstStrConst) { + labelStr.append(".LUstr_"); + } else if (c.GetKind() == kConstStr16Const) { + labelStr.append(".LUstr16_"); + } else { + CHECK_FATAL(false, "Unsupported literal type"); + } + labelStr.append(std::to_string(c.GetValue())); + + MIRSymbol *labelSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(labelStr)); + if (labelSym == nullptr) { + labelSym = cgFunc.GetMirModule().GetMIRBuilder()->CreateGlobalDecl(labelStr, c.GetType()); + labelSym->SetStorageClass(kScFstatic); + labelSym->SetSKind(kStConst); + /* c may be local, we need a global node here */ + labelSym->SetKonst(cgFunc.NewMirConst(c)); + } + + if (c.GetPrimType() == PTY_ptr) { + StImmOperand &stOpnd = cgFunc.CreateStImmOperand(*labelSym, 0, 0); + RegOperand &addrOpnd = cgFunc.CreateRegisterOperandOfType(PTY_a64); + cgFunc.SelectAddrof(addrOpnd, stOpnd); + return &addrOpnd; + } + CHECK_FATAL(false, "Unsupported const string type"); + return nullptr; +} + +Operand *AArch64CGFunc::SelectStrConst(MIRStrConst &strConst) { + return SelectStrLiteral(strConst, *this); +} + +Operand *AArch64CGFunc::SelectStr16Const(MIRStr16Const &str16Const) { + return SelectStrLiteral(str16Const, *this); +} + +static inline void AppendInstructionTo(Insn &i, CGFunc &f) { + f.GetCurBB()->AppendInsn(i); +} + +/* + * Returns the number of leading 0-bits in x, starting at the most significant bit position. + * If x is 0, the result is -1. + */ +static int32 GetHead0BitNum(int64 val) { + uint32 bitNum = 0; + for (; bitNum < k64BitSize; bitNum++) { + if ((0x8000000000000000ULL >> static_cast(bitNum)) & static_cast(val)) { + break; + } + } + if (bitNum == k64BitSize) { + return -1; + } + return bitNum; +} + +/* + * Returns the number of trailing 0-bits in x, starting at the least significant bit position. + * If x is 0, the result is -1. + */ +static int32 GetTail0BitNum(int64 val) { + uint32 bitNum = 0; + for (; bitNum < k64BitSize; bitNum++) { + if ((static_cast(1) << static_cast(bitNum)) & static_cast(val)) { + break; + } + } + if (bitNum == k64BitSize) { + return -1; + } + return bitNum; +} + +/* + * If the input integer is power of 2, return log2(input) + * else return -1 + */ +static inline int32 GetLog2(uint64 val) { + if (__builtin_popcountll(val) == 1) { + return __builtin_ffsll(static_cast(val)) - 1; + } + return -1; +} + +MOperator AArch64CGFunc::PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned) const { + switch (cmpOp) { + case OP_ne: + return (brOp == OP_brtrue) ? MOP_bne : MOP_beq; + case OP_eq: + return (brOp == OP_brtrue) ? MOP_beq : MOP_bne; + case OP_lt: + return (brOp == OP_brtrue) ? (isSigned ? MOP_blt : MOP_blo) + : (isFloat ? MOP_bpl : (isSigned ? MOP_bge : MOP_bhs)); + case OP_le: + return (brOp == OP_brtrue) ? (isSigned ? MOP_ble : MOP_bls) + : (isFloat ? MOP_bhi : (isSigned ? MOP_bgt : MOP_bhi)); + case OP_gt: + return (brOp == OP_brtrue) ? (isFloat ? MOP_bgt : (isSigned ? MOP_bgt : MOP_bhi)) + : (isSigned ? MOP_ble : MOP_bls); + case OP_ge: + return (brOp == OP_brtrue) ? (isFloat ? MOP_bpl : (isSigned ? MOP_bge : MOP_bhs)) + : (isSigned ? MOP_blt : MOP_blo); + default: + CHECK_FATAL(false, "PickJmpInsn error"); + } +} + +bool AArch64CGFunc::GenerateCompareWithZeroInstruction(Opcode jmpOp, Opcode cmpOp, bool is64Bits, + PrimType primType, + LabelOperand &targetOpnd, Operand &opnd0) { + bool finish = true; + MOperator mOpCode = MOP_undef; + switch (cmpOp) { + case OP_ne: { + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xcbnz : MOP_wcbnz; + } else { + mOpCode = is64Bits ? MOP_xcbz : MOP_wcbz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, targetOpnd)); + break; + } + case OP_eq: { + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xcbz : MOP_wcbz; + } else { + mOpCode = is64Bits ? MOP_xcbnz : MOP_wcbnz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, targetOpnd)); + break; + } + /* + * TBZ/TBNZ instruction have a range of +/-32KB, need to check if the jump target is reachable in a later + * phase. If the branch target is not reachable, then we change tbz/tbnz into combination of ubfx and + * cbz/cbnz, which will clobber one extra register. With LSRA under O2, we can use of the reserved registers + * for that purpose. + */ + case OP_lt: { + if (primType == PTY_u64 || primType == PTY_u32) { + return false; + } + ImmOperand &signBit = CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, k8BitSize, false); + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xtbnz : MOP_wtbnz; + } else { + mOpCode = is64Bits ? MOP_xtbz : MOP_wtbz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, signBit, targetOpnd)); + break; + } + case OP_ge: { + if (primType == PTY_u64 || primType == PTY_u32) { + return false; + } + ImmOperand &signBit = CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, k8BitSize, false); + if (jmpOp == OP_brtrue) { + mOpCode = is64Bits ? MOP_xtbz : MOP_wtbz; + } else { + mOpCode = is64Bits ? MOP_xtbnz : MOP_wtbnz; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opnd0, signBit, targetOpnd)); + break; + } + default: + finish = false; + break; + } + return finish; +} + +void AArch64CGFunc::SelectIgoto(Operand *opnd0) { + Operand *srcOpnd = opnd0; + if (opnd0->GetKind() == Operand::kOpdMem) { + Operand *dst = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xldr, *dst, *opnd0)); + srcOpnd = dst; + } + GetCurBB()->SetKind(BB::kBBIgoto); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbr, *srcOpnd)); +} + +void AArch64CGFunc::SelectCondGoto(LabelOperand &targetOpnd, Opcode jmpOp, Opcode cmpOp, Operand &origOpnd0, + Operand &origOpnd1, PrimType primType, bool signedCond) { + Operand *opnd0 = &origOpnd0; + Operand *opnd1 = &origOpnd1; + opnd0 = &LoadIntoRegister(origOpnd0, primType); + + bool is64Bits = GetPrimTypeBitSize(primType) == k64BitSize; + bool isFloat = IsPrimitiveFloat(primType); + Operand &rflag = GetOrCreateRflag(); + if (isFloat) { + opnd1 = &LoadIntoRegister(origOpnd1, primType); + MOperator mOp = is64Bits ? MOP_dcmperr : ((GetPrimTypeBitSize(primType) == k32BitSize) ? MOP_scmperr : MOP_hcmperr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, rflag, *opnd0, *opnd1)); + } else { + bool isImm = ((origOpnd1.GetKind() == Operand::kOpdImmediate) || (origOpnd1.GetKind() == Operand::kOpdOffset)); + if ((origOpnd1.GetKind() != Operand::kOpdRegister) && !isImm) { + opnd1 = &SelectCopy(origOpnd1, primType, primType); + } + MOperator mOp = is64Bits ? MOP_xcmprr : MOP_wcmprr; + + if (isImm) { + /* Special cases, i.e., comparing with zero + * Do not perform optimization for C, unlike Java which has no unsigned int. + */ + if (static_cast(opnd1)->IsZero() && + (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0)) { + bool finish = GenerateCompareWithZeroInstruction(jmpOp, cmpOp, is64Bits, primType, targetOpnd, *opnd0); + if (finish) { + return; + } + } + + /* + * aarch64 assembly takes up to 24-bits immediate, generating + * either cmp or cmp with shift 12 encoding + */ + ImmOperand *immOpnd = static_cast(opnd1); + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { + mOp = is64Bits ? MOP_xcmpri : MOP_wcmpri; + } else { + opnd1 = &SelectCopy(*opnd1, primType, primType); + } + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, rflag, *opnd0, *opnd1)); + } + + bool isSigned = IsPrimitiveInteger(primType) ? IsSignedInteger(primType) : (signedCond ? true : false); + MOperator jmpOperator = PickJmpInsn(jmpOp, cmpOp, isFloat, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(jmpOperator, rflag, targetOpnd)); +} + +/* + * brtrue @label0 (ge u8 i32 ( + * cmp i32 i64 (dread i64 %Reg2_J, dread i64 %Reg4_J), + * constval i32 0)) + * ===> + * cmp r1, r2 + * bge Cond, label0 + */ +void AArch64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &expr) { + ASSERT(expr.GetOpCode() == OP_cmp, "unexpect opcode"); + Operand *opnd0 = HandleExpr(expr, *expr.Opnd(0)); + Operand *opnd1 = HandleExpr(expr, *expr.Opnd(1)); + CompareNode *node = static_cast(&expr); + bool isFloat = IsPrimitiveFloat(node->GetOpndType()); + opnd0 = &LoadIntoRegister(*opnd0, node->GetOpndType()); + /* + * most of FP constants are passed as MemOperand + * except 0.0 which is passed as kOpdFPImmediate + */ + Operand::OperandType opnd1Type = opnd1->GetKind(); + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) && + (opnd1Type != Operand::kOpdOffset)) { + opnd1 = &LoadIntoRegister(*opnd1, node->GetOpndType()); + } + SelectAArch64Cmp(*opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(node->GetOpndType())); + /* handle condgoto now. */ + LabelIdx labelIdx = stmt.GetOffset(); + BaseNode *condNode = stmt.Opnd(0); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labelIdx); + Opcode cmpOp = condNode->GetOpCode(); + PrimType pType = static_cast(condNode)->GetOpndType(); + isFloat = IsPrimitiveFloat(pType); + Operand &rflag = GetOrCreateRflag(); + bool isSigned = IsPrimitiveInteger(pType) ? IsSignedInteger(pType) : + (IsSignedInteger(condNode->GetPrimType()) ? true : false); + MOperator jmpOp = PickJmpInsn(stmt.GetOpCode(), cmpOp, isFloat, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(jmpOp, rflag, targetOpnd)); +} + +/* + * Special case: + * brfalse(ge (cmpg (op0, op1), 0) ==> + * fcmp op1, op2 + * blo + */ +void AArch64CGFunc::SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &expr) { + auto &cmpNode = static_cast(expr); + Operand *opnd0 = HandleExpr(cmpNode, *cmpNode.Opnd(0)); + Operand *opnd1 = HandleExpr(cmpNode, *cmpNode.Opnd(1)); + PrimType operandType = cmpNode.GetOpndType(); + opnd0 = opnd0->IsRegister() ? static_cast(opnd0) + : &SelectCopy(*opnd0, operandType, operandType); + Operand::OperandType opnd1Type = opnd1->GetKind(); + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) && + (opnd1Type != Operand::kOpdOffset)) { + opnd1 = opnd1->IsRegister() ? static_cast(opnd1) + : &SelectCopy(*opnd1, operandType, operandType); + } +#ifdef DEBUG + bool isFloat = IsPrimitiveFloat(operandType); + if (!isFloat) { + ASSERT(false, "incorrect operand types"); + } +#endif + SelectTargetFPCmpQuiet(*opnd0, *opnd1, GetPrimTypeBitSize(operandType)); + Operand &rFlag = GetOrCreateRflag(); + LabelIdx tempLabelIdx = stmt.GetOffset(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(tempLabelIdx); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_blo, rFlag, targetOpnd)); +} + +void AArch64CGFunc::SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) { + /* + * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node + * such as a dread for example + */ + LabelIdx labelIdx = stmt.GetOffset(); + BaseNode *condNode = stmt.Opnd(0); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labelIdx); + Opcode cmpOp; + + if (opnd0.IsRegister() && (static_cast(&opnd0)->GetValidBitsNum() == 1) && + (condNode->GetOpCode() == OP_lior)) { + ImmOperand &condBit = CreateImmOperand(0, k8BitSize, false); + if (stmt.GetOpCode() == OP_brtrue) { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(MOP_wtbnz, static_cast(opnd0), condBit, targetOpnd)); + } else { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(MOP_wtbz, static_cast(opnd0), condBit, targetOpnd)); + } + return; + } + + PrimType pType; + if (kOpcodeInfo.IsCompare(condNode->GetOpCode())) { + cmpOp = condNode->GetOpCode(); + pType = static_cast(condNode)->GetOpndType(); + } else { + /* not a compare node; dread for example, take its pType */ + cmpOp = OP_ne; + pType = condNode->GetPrimType(); + } + bool signedCond = IsSignedInteger(pType) || IsPrimitiveFloat(pType); + SelectCondGoto(targetOpnd, stmt.GetOpCode(), cmpOp, opnd0, opnd1, pType, signedCond); +} + +void AArch64CGFunc::SelectGoto(GotoNode &stmt) { + Operand &targetOpnd = GetOrCreateLabelOperand(stmt.GetOffset()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); + GetCurBB()->SetKind(BB::kBBGoto); +} + +Operand *AArch64CGFunc::SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + if (parent.GetOpCode() == OP_regassign) { + auto ®AssignNode = static_cast(parent); + PregIdx pregIdx = regAssignNode.GetRegIdx(); + if (IsSpecialPseudoRegister(pregIdx)) { + resOpnd = &GetOrCreateSpecialRegisterOperand(-pregIdx, dtype); + } else { + resOpnd = &GetOrCreateVirtualRegisterOperand(GetVirtualRegNOFromPseudoRegIdx(pregIdx)); + } + } else { + resOpnd = &CreateRegisterOperandOfType(primType); + } + SelectAdd(*resOpnd, opnd0, opnd1, primType); + } else { + /* vector operands */ + resOpnd = SelectVectorBinOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, + node.Opnd(1)->GetPrimType(), OP_add); + } + return resOpnd; +} + +void AArch64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + if (opnd0Type != Operand::kOpdRegister) { + /* add #imm, #imm */ + if (opnd1Type != Operand::kOpdRegister) { + SelectAdd(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + return; + } + /* add #imm, reg */ + SelectAdd(resOpnd, opnd1, opnd0, primType); /* commutative */ + return; + } + /* add reg, reg */ + if (opnd1Type == Operand::kOpdRegister) { + ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI add"); + MOperator mOp = IsPrimitiveFloat(primType) ? + (is64Bits ? MOP_dadd : MOP_sadd) : (is64Bits ? MOP_xaddrrr : MOP_waddrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + return; + } else if (opnd1Type == Operand::kOpdStImmediate) { + CHECK_FATAL(is64Bits, "baseReg of mem in aarch64 must be 64bit size"); + /* add reg, reg, #:lo12:sym+offset */ + StImmOperand &stImmOpnd = static_cast(opnd1); + Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_xadrpl12, resOpnd, opnd0, stImmOpnd); + GetCurBB()->AppendInsn(newInsn); + return; + } else if (!((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset))) { + /* add reg, otheregType */ + SelectAdd(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + return; + } else { + /* add reg, #imm */ + ImmOperand *immOpnd = static_cast(&opnd1); + if (immOpnd->IsNegative()) { + immOpnd->Negate(); + SelectSub(resOpnd, opnd0, *immOpnd, primType); + return; + } + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + /* + * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers + * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + */ + MOperator mOpCode = MOP_undef; + Operand *newOpnd0 = &opnd0; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + /* process higher 12 bits */ + ImmOperand &immOpnd2 = + CreateImmOperand(static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits), + immOpnd->GetSize(), immOpnd->IsSignedValue()); + mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; + Operand *tmpRes = IsAfterRegAlloc() ? &resOpnd : &CreateRegisterOperandOfType(primType); + BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, *tmpRes, opnd0, immOpnd2, shiftopnd); + GetCurBB()->AppendInsn(newInsn); + immOpnd->ModuloByPow2(kMaxImmVal12Bits); + newOpnd0 = tmpRes; + } + /* process lower 12 bits */ + mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd); + GetCurBB()->AppendInsn(newInsn); + return; + } + /* load into register */ + int64 immVal = immOpnd->GetValue(); + int32 tail0bitNum = GetTail0BitNum(immVal); + int32 head0bitNum = GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum; + RegOperand ®Opnd = CreateRegisterOperandOfType(primType); + if (isAfterRegAlloc) { + RegType regty = GetRegTyFromPrimTy(primType); + uint32 bytelen = GetPrimTypeSize(primType); + regOpnd = GetOrCreatePhysicalRegisterOperand(static_cast(R16), bytelen, regty); + } + regno_t regNO0 = static_cast(opnd0).GetRegisterNumber(); + /* addrrrs do not support sp */ + if (bitNum <= k16ValidBit && regNO0 != RSP) { + int64 newImm = (static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF; + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + SelectCopyImm(regOpnd, immOpnd1, primType); + uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &bitShiftOpnd = + CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0bitNum), bitLen); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mopBadd, resOpnd, opnd0, regOpnd, bitShiftOpnd); + GetCurBB()->AppendInsn(newInsn); + return; + } + + SelectCopyImm(regOpnd, *immOpnd, primType); + MOperator mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, regOpnd); + GetCurBB()->AppendInsn(newInsn); + } +} + +Operand *AArch64CGFunc::SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + /* promoted type */ + PrimType primType = is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectMadd(resOpnd, opndM0, opndM1, opnd1, primType); + return &resOpnd; +} + +void AArch64CGFunc::SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) { + Operand::OperandType opndM0Type = opndM0.GetKind(); + Operand::OperandType opndM1Type = opndM1.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + + if (opndM0Type != Operand::kOpdRegister) { + SelectMadd(resOpnd, SelectCopy(opndM0, primType, primType), opndM1, opnd1, primType); + return; + } else if (opndM1Type != Operand::kOpdRegister) { + SelectMadd(resOpnd, opndM0, SelectCopy(opndM1, primType, primType), opnd1, primType); + return; + } else if (opnd1Type != Operand::kOpdRegister) { + SelectMadd(resOpnd, opndM0, opndM1, SelectCopy(opnd1, primType, primType), primType); + return; + } + + ASSERT(IsPrimitiveInteger(primType), "NYI MAdd"); + MOperator mOp = is64Bits ? MOP_xmaddrrrr : MOP_wmaddrrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opndM0, opndM1, opnd1)); +} + +Operand &AArch64CGFunc::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) { + BaseNode *opnd0 = node.Opnd(0); + BaseNode *opnd1 = node.Opnd(1); + ASSERT(opnd1->GetOpCode() == OP_constval, "Internal error, opnd1->op should be OP_constval."); + + switch (opnd0->op) { + case OP_regread: { + RegreadNode *regreadNode = static_cast(opnd0); + return *SelectRegread(*regreadNode); + } + case OP_addrof: { + AddrofNode *addrofNode = static_cast(opnd0); + MIRSymbol &symbol = *mirModule.CurFunction()->GetLocalOrGlobalSymbol(addrofNode->GetStIdx()); + ASSERT(addrofNode->GetFieldID() == 0, "For debug SelectCGArrayElemAdd."); + + Operand &result = GetOrCreateResOperand(parent, PTY_a64); + + /* OP_constval */ + ConstvalNode *constvalNode = static_cast(opnd1); + MIRConst *mirConst = constvalNode->GetConstVal(); + MIRIntConst *mirIntConst = static_cast(mirConst); + SelectAddrof(result, CreateStImmOperand(symbol, mirIntConst->GetExtValue(), 0)); + + return result; + } + default: + CHECK_FATAL(false, "Internal error, cannot handle opnd0."); + } +} + +void AArch64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(primType); + Operand *opnd0Bak = &LoadIntoRegister(opnd0, primType); + if (opnd1Type == Operand::kOpdRegister) { + MOperator mOp = isFloat ? (is64Bits ? MOP_dsub : MOP_ssub) : (is64Bits ? MOP_xsubrrr : MOP_wsubrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, *opnd0Bak, opnd1)); + return; + } + + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdOffset)) { + SelectSub(resOpnd, *opnd0Bak, SelectCopy(opnd1, primType, primType), primType); + return; + } + + ImmOperand *immOpnd = static_cast(&opnd1); + if (immOpnd->IsNegative()) { + immOpnd->Negate(); + SelectAdd(resOpnd, *opnd0Bak, *immOpnd, primType); + return; + } + + int64 higher12BitVal = static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits); + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0) && higher12BitVal + 1 <= kMaxPimm8) { + /* + * SUB Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers + * SUB Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + * large offset is treated as sub (higher 12 bits + 4096) + add + * it gives opportunities for combining add + ldr due to the characteristics of aarch64's load/store + */ + MOperator mOpCode = MOP_undef; + bool isSplitSub = false; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + isSplitSub = true; + /* process higher 12 bits */ + ImmOperand &immOpnd2 = + CreateImmOperand(higher12BitVal + 1, immOpnd->GetSize(), immOpnd->IsSignedValue()); + + mOpCode = is64Bits ? MOP_xsubrri24 : MOP_wsubrri24; + BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, immOpnd2, shiftopnd); + GetCurBB()->AppendInsn(newInsn); + immOpnd->ModuloByPow2(kMaxImmVal12Bits); + immOpnd->SetValue(static_cast(kMax12UnsignedImm) - immOpnd->GetValue()); + opnd0Bak = &resOpnd; + } + /* process lower 12 bits */ + mOpCode = isSplitSub ? (is64Bits ? MOP_xaddrri12 : MOP_waddrri12) : (is64Bits ? MOP_xsubrri12 : MOP_wsubrri12); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, *immOpnd); + GetCurBB()->AppendInsn(newInsn); + return; + } + + /* load into register */ + int64 immVal = immOpnd->GetValue(); + int32 tail0bitNum = GetTail0BitNum(immVal); + int32 head0bitNum = GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0bitNum) - tail0bitNum; + RegOperand ®Opnd = CreateRegisterOperandOfType(primType); + if (isAfterRegAlloc) { + RegType regty = GetRegTyFromPrimTy(primType); + uint32 bytelen = GetPrimTypeSize(primType); + regOpnd = GetOrCreatePhysicalRegisterOperand(static_cast(R16), bytelen, regty); + } + + if (bitNum <= k16ValidBit) { + int64 newImm = (static_cast(immVal) >> static_cast(tail0bitNum)) & 0xFFFF; + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k16BitSize, false); + SelectCopyImm(regOpnd, immOpnd1, primType); + uint32 mopBsub = is64Bits ? MOP_xsubrrrs : MOP_wsubrrrs; + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &bitShiftOpnd = + CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0bitNum), bitLen); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(mopBsub, resOpnd, *opnd0Bak, regOpnd, bitShiftOpnd)); + return; + } + + SelectCopyImm(regOpnd, *immOpnd, primType); + MOperator mOpCode = is64Bits ? MOP_xsubrrr : MOP_wsubrrr; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *opnd0Bak, regOpnd); + GetCurBB()->AppendInsn(newInsn); +} + +Operand *AArch64CGFunc::SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectSub(*resOpnd, opnd0, opnd1, primType); + } else { + /* vector operands */ + resOpnd = SelectVectorBinOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, + node.Opnd(1)->GetPrimType(), OP_sub); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectMpy(*resOpnd, opnd0, opnd1, primType); + } else { + resOpnd = SelectVectorBinOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, + node.Opnd(1)->GetPrimType(), OP_mul); + } + return resOpnd; +} + +void AArch64CGFunc::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + + if (((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset) || + (opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) && + IsPrimitiveInteger(primType)) { + ImmOperand *imm = + ((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset)) ? static_cast(&opnd0) + : static_cast(&opnd1); + Operand *otherOp = ((opnd0Type == Operand::kOpdImmediate) || (opnd0Type == Operand::kOpdOffset)) ? &opnd1 : &opnd0; + int64 immValue = llabs(imm->GetValue()); + if (immValue != 0 && (static_cast(immValue) & (static_cast(immValue) - 1)) == 0) { + /* immValue is 1 << n */ + if (otherOp->GetKind() != Operand::kOpdRegister) { + otherOp = &SelectCopy(*otherOp, primType, primType); + } + int64 shiftVal = __builtin_ffsll(immValue); + ImmOperand &shiftNum = CreateImmOperand(shiftVal - 1, dsize, false); + SelectShift(resOpnd, *otherOp, shiftNum, kShiftLeft, primType); + bool reachSignBit = (is64Bits && (shiftVal == k64BitSize)) || (!is64Bits && (shiftVal == k32BitSize)); + if (imm->GetValue() < 0 && !reachSignBit) { + SelectNeg(resOpnd, resOpnd, primType); + } + + return; + } else if (immValue > 2) { + uint32 zeroNum = __builtin_ffsll(immValue) - 1; + int64 headVal = static_cast(immValue) >> zeroNum; + /* + * if (headVal - 1) & (headVal - 2) == 0, that is (immVal >> zeroNum) - 1 == 1 << n + * otherOp * immVal = (otherOp * (immVal >> zeroNum) * (1 << zeroNum) + * = (otherOp * ((immVal >> zeroNum) - 1) + otherOp) * (1 << zeroNum) + */ + if (((static_cast(headVal) - 1) & (static_cast(headVal) - 2)) == 0) { + if (otherOp->GetKind() != Operand::kOpdRegister) { + otherOp = &SelectCopy(*otherOp, primType, primType); + } + ImmOperand &shiftNum1 = CreateImmOperand(__builtin_ffsll(headVal - 1) - 1, dsize, false); + RegOperand &tmpOpnd = CreateRegisterOperandOfType(primType); + SelectShift(tmpOpnd, *otherOp, shiftNum1, kShiftLeft, primType); + SelectAdd(resOpnd, *otherOp, tmpOpnd, primType); + ImmOperand &shiftNum2 = CreateImmOperand(zeroNum, dsize, false); + SelectShift(resOpnd, resOpnd, shiftNum2, kShiftLeft, primType); + if (imm->GetValue() < 0) { + SelectNeg(resOpnd, resOpnd, primType); + } + + return; + } + } + } + + if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + SelectMpy(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + } else if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + SelectMpy(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + } else if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) { + SelectMpy(resOpnd, opnd1, opnd0, primType); + } else { + ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI Mpy"); + MOperator mOp = IsPrimitiveFloat(primType) ? (is64Bits ? MOP_xvmuld : MOP_xvmuls) + : (is64Bits ? MOP_xmulrrr : MOP_wmulrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } +} + +void AArch64CGFunc::SelectDiv(Operand &resOpnd, Operand &origOpnd0, Operand &opnd1, PrimType primType) { + Operand &opnd0 = LoadIntoRegister(origOpnd0, primType); + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + if (((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) && IsSignedInteger(primType)) { + ImmOperand *imm = static_cast(&opnd1); + int64 immValue = llabs(imm->GetValue()); + if ((immValue != 0) && (static_cast(immValue) & (static_cast(immValue) - 1)) == 0) { + if (immValue == 1) { + if (imm->GetValue() > 0) { + uint32 mOp = is64Bits ? MOP_xmovrr : MOP_wmovrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); + } else { + SelectNeg(resOpnd, opnd0, primType); + } + + return; + } + int32 shiftNumber = __builtin_ffsll(immValue) - 1; + ImmOperand &shiftNum = CreateImmOperand(shiftNumber, dsize, false); + Operand &tmpOpnd = CreateRegisterOperandOfType(primType); + SelectShift(tmpOpnd, opnd0, CreateImmOperand(dsize - 1, dsize, false), kShiftAright, primType); + uint32 mopBadd = is64Bits ? MOP_xaddrrrs : MOP_waddrrrs; + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &shiftOpnd = CreateBitShiftOperand(BitShiftOperand::kLSR, + dsize - static_cast(shiftNumber), bitLen); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBadd, tmpOpnd, opnd0, tmpOpnd, shiftOpnd)); + SelectShift(resOpnd, tmpOpnd, shiftNum, kShiftAright, primType); + if (imm->GetValue() < 0) { + SelectNeg(resOpnd, resOpnd, primType); + } + + return; + } + } else if (((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) && + IsUnsignedInteger(primType)) { + ImmOperand *imm = static_cast(&opnd1); + if (imm->GetValue() != 0) { + if ((imm->GetValue() > 0) && + ((static_cast(imm->GetValue()) & (static_cast(imm->GetValue()) - 1)) == 0)) { + ImmOperand &shiftNum = CreateImmOperand(__builtin_ffsll(imm->GetValue()) - 1, dsize, false); + SelectShift(resOpnd, opnd0, shiftNum, kShiftLright, primType); + + return; + } else if (imm->GetValue() < 0) { + SelectAArch64Cmp(opnd0, *imm, true, dsize); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_CS), is64Bits); + + return; + } + } + } + } + + if (opnd0Type != Operand::kOpdRegister) { + SelectDiv(resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + } else if (opnd1Type != Operand::kOpdRegister) { + SelectDiv(resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + } else { + ASSERT(IsPrimitiveFloat(primType) || IsPrimitiveInteger(primType), "NYI Div"); + MOperator mOp = IsPrimitiveFloat(primType) ? (is64Bits ? MOP_ddivrrr : MOP_sdivrrr) + : (IsSignedInteger(primType) ? (is64Bits ? MOP_xsdivrrr : MOP_wsdivrrr) + : (is64Bits ? MOP_xudivrrr : MOP_wudivrrr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } +} + +Operand *AArch64CGFunc::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + CHECK_FATAL(!IsPrimitiveVector(dtype), "NYI DIV vector operands"); + /* promoted type */ + PrimType primType = + isFloat ? dtype : ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectDiv(resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void AArch64CGFunc::SelectRem(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, PrimType primType, bool isSigned, + bool is64Bits) { + Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType); + Operand &opnd1 = LoadIntoRegister(rhsOpnd, primType); + + ASSERT(IsPrimitiveInteger(primType), "Wrong type for REM"); + /* + * printf("%d \n", 29 % 7 ); + * -> 1 + * printf("%u %d \n", (unsigned)-7, (unsigned)(-7) % 7 ); + * -> 4294967289 4 + * printf("%d \n", (-7) % 7 ); + * -> 0 + * printf("%d \n", 237 % -7 ); + * 6-> + * printf("implicit i->u conversion %d \n", ((unsigned)237) % -7 ); + * implicit conversion 237 + + * http://stackoverflow.com/questions/35351470/obtaining-remainder-using-single-aarch64-instruction + * input: x0=dividend, x1=divisor + * udiv|sdiv x2, x0, x1 + * msub x3, x2, x1, x0 -- multply-sub : x3 <- x0 - x2*x1 + * result: x2=quotient, x3=remainder + * + * allocate temporary register + */ + RegOperand &temp = CreateRegisterOperandOfType(primType); + /* + * mov w1, #2 + * sdiv wTemp, w0, w1 + * msub wRespond, wTemp, w1, w0 + * ========> + * asr wTemp, w0, #31 + * lsr wTemp, wTemp, #31 (#30 for 4, #29 for 8, ...) + * add wRespond, w0, wTemp + * and wRespond, wRespond, #1 (#3 for 4, #7 for 8, ...) + * sub wRespond, wRespond, w2 + * + * if divde by 2 + * ========> + * lsr wTemp, w0, #31 + * add wRespond, w0, wTemp + * and wRespond, wRespond, #1 + * sub wRespond, wRespond, w2 + * + * for unsigned rem op, just use and + */ + if ((Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2)) { + ImmOperand *imm = nullptr; + Insn *movImmInsn = GetCurBB()->GetLastInsn(); + if (movImmInsn && + ((movImmInsn->GetMachineOpcode() == MOP_wmovri32) || (movImmInsn->GetMachineOpcode() == MOP_xmovri64)) && + movImmInsn->GetOperand(0).Equals(opnd1)) { + /* + * mov w1, #2 + * rem res, w0, w1 + */ + imm = static_cast(&movImmInsn->GetOperand(kInsnSecondOpnd)); + } else if (opnd1.IsImmediate()) { + /* + * rem res, w0, #2 + */ + imm = static_cast(&opnd1); + } + /* positive or negative do not have effect on the result */ + int64 dividor = 0; + if (imm && (imm->GetValue() != LONG_MIN)) { + dividor = abs(imm->GetValue()); + } + const int64 log2OfDividor = GetLog2(static_cast(dividor)); + if ((dividor != 0) && (log2OfDividor > 0)) { + if (is64Bits) { + CHECK_FATAL(log2OfDividor < k64BitSize, "imm out of bound"); + if (isSigned) { + ImmOperand &rightShiftValue = CreateImmOperand(k64BitSize - log2OfDividor, k64BitSize, isSigned); + if (log2OfDividor != 1) { + /* 63->shift ALL , 32 ->32bit register */ + ImmOperand &rightShiftAll = CreateImmOperand(63, k64BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xasrrri6, temp, opnd0, rightShiftAll)); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xlsrrri6, temp, temp, rightShiftValue)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xlsrrri6, temp, opnd0, rightShiftValue)); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrrr, resOpnd, opnd0, temp)); + ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, resOpnd, resOpnd, remBits)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xsubrrr, resOpnd, resOpnd, temp)); + return; + } else if (imm && imm->GetValue() > 0) { + ImmOperand &remBits = CreateImmOperand(dividor - 1, k64BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, resOpnd, opnd0, remBits)); + return; + } + } else { + CHECK_FATAL(log2OfDividor < k32BitSize, "imm out of bound"); + if (isSigned) { + ImmOperand &rightShiftValue = CreateImmOperand(k32BitSize - log2OfDividor, k32BitSize, isSigned); + if (log2OfDividor != 1) { + /* 31->shift ALL , 32 ->32bit register */ + ImmOperand &rightShiftAll = CreateImmOperand(31, k32BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wasrrri5, temp, opnd0, rightShiftAll)); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wlsrrri5, temp, temp, rightShiftValue)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wlsrrri5, temp, opnd0, rightShiftValue)); + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_waddrrr, resOpnd, opnd0, temp)); + ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, resOpnd, resOpnd, remBits)); + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wsubrrr, resOpnd, resOpnd, temp)); + return; + } else if (imm && imm->GetValue() > 0) { + ImmOperand &remBits = CreateImmOperand(dividor - 1, k32BitSize, isSigned); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wandrri12, resOpnd, opnd0, remBits)); + return; + } + } + } + } + + uint32 mopDiv = is64Bits ? (isSigned ? MOP_xsdivrrr : MOP_xudivrrr) : (isSigned ? MOP_wsdivrrr : MOP_wudivrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopDiv, temp, opnd0, opnd1)); + + uint32 mopSub = is64Bits ? MOP_xmsubrrrr : MOP_wmsubrrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopSub, resOpnd, temp, opnd1, opnd0)); +} + +Operand *AArch64CGFunc::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + ASSERT(IsPrimitiveInteger(dtype), "wrong type for rem"); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + CHECK_FATAL(!IsPrimitiveVector(dtype), "NYI DIV vector operands"); + + /* promoted type */ + PrimType primType = ((is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32))); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectRem(resOpnd, opnd0, opnd1, primType, isSigned, is64Bits); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLand(BinaryNode &node, Operand &lhsOpnd, Operand &rhsOpnd, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + ASSERT(IsPrimitiveInteger(primType), "Land should be integer type"); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + RegOperand &resOpnd = GetOrCreateResOperand(parent, is64Bits ? PTY_u64 : PTY_u32); + /* + * OP0 band Op1 + * cmp OP0, 0 # compare X0 with 0, sets Z bit + * ccmp OP1, 0, 4 //==0100b, ne # if(OP0!=0) cmp Op1 and 0, else NZCV <- 0100 makes OP0==0 + * cset RES, ne # if Z==1(i.e., OP0==0||OP1==0) RES<-0, RES<-1 + */ + Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType); + SelectAArch64Cmp(opnd0, CreateImmOperand(0, primType, false), true, GetPrimTypeBitSize(primType)); + Operand &opnd1 = LoadIntoRegister(rhsOpnd, primType); + SelectAArch64CCmp(opnd1, CreateImmOperand(0, primType, false), CreateImmOperand(4, PTY_u8, false), + GetCondOperand(CC_NE), is64Bits); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_NE), is64Bits); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, + bool parentIsBr) { + PrimType primType = node.GetPrimType(); + ASSERT(IsPrimitiveInteger(primType), "Lior should be integer type"); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + RegOperand &resOpnd = GetOrCreateResOperand(parent, is64Bits ? PTY_u64 : PTY_u32); + /* + * OP0 band Op1 + * cmp OP0, 0 # compare X0 with 0, sets Z bit + * ccmp OP1, 0, 0 //==0100b, eq # if(OP0==0,eq) cmp Op1 and 0, else NZCV <- 0000 makes OP0!=0 + * cset RES, ne # if Z==1(i.e., OP0==0&&OP1==0) RES<-0, RES<-1 + */ + if (parentIsBr && !is64Bits && opnd0.IsRegister() && (static_cast(&opnd0)->GetValidBitsNum() == 1) && + opnd1.IsRegister() && (static_cast(&opnd1)->GetValidBitsNum() == 1)) { + uint32 mOp = MOP_wiorrrr; + static_cast(resOpnd).SetValidBitsNum(1); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } else { + SelectBior(resOpnd, opnd0, opnd1, primType); + SelectAArch64Cmp(resOpnd, CreateImmOperand(0, primType, false), true, GetPrimTypeBitSize(primType)); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_NE), is64Bits); + } + return &resOpnd; +} + +void AArch64CGFunc::SelectCmpOp(Operand &resOpnd, Operand &lhsOpnd, Operand &rhsOpnd, + Opcode opcode, PrimType primType, const BaseNode &parent) { + uint32 dsize = resOpnd.GetSize(); + bool isFloat = IsPrimitiveFloat(primType); + Operand &opnd0 = LoadIntoRegister(lhsOpnd, primType); + + /* + * most of FP constants are passed as MemOperand + * except 0.0 which is passed as kOpdFPImmediate + */ + Operand::OperandType opnd1Type = rhsOpnd.GetKind(); + Operand *opnd1 = &rhsOpnd; + if ((opnd1Type != Operand::kOpdImmediate) && (opnd1Type != Operand::kOpdFPImmediate) && + (opnd1Type != Operand::kOpdOffset)) { + opnd1 = &LoadIntoRegister(rhsOpnd, primType); + } + + bool unsignedIntegerComparison = !isFloat && !IsSignedInteger(primType); + /* + * OP_cmp, OP_cmpl, OP_cmpg + * OP0, OP1 ; fcmp for OP_cmpl/OP_cmpg, cmp/fcmpe for OP_cmp + * CSINV RES, WZR, WZR, GE + * CSINC RES, RES, WZR, LE + * if OP_cmpl, CSINV RES, RES, WZR, VC (no overflow) + * if OP_cmpg, CSINC RES, RES, WZR, VC (no overflow) + */ + RegOperand &xzr = GetZeroOpnd(dsize); + if ((opcode == OP_cmpl) || (opcode == OP_cmpg)) { + ASSERT(isFloat, "incorrect operand types"); + SelectTargetFPCmpQuiet(opnd0, *opnd1, GetPrimTypeBitSize(primType)); + SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_GE), (dsize == k64BitSize)); + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LE), (dsize == k64BitSize)); + if (opcode == OP_cmpl) { + SelectAArch64CSINV(resOpnd, resOpnd, xzr, GetCondOperand(CC_VC), (dsize == k64BitSize)); + } else { + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_VC), (dsize == k64BitSize)); + } + return; + } + + if (opcode == OP_cmp) { + SelectAArch64Cmp(opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(primType)); + if (unsignedIntegerComparison) { + SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_HS), (dsize == k64BitSize)); + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LS), (dsize == k64BitSize)); + } else { + SelectAArch64CSINV(resOpnd, xzr, xzr, GetCondOperand(CC_GE), (dsize == k64BitSize)); + SelectAArch64CSINC(resOpnd, resOpnd, xzr, GetCondOperand(CC_LE), (dsize == k64BitSize)); + } + return; + } + + // lt u8 i32 ( xxx, 0 ) => get sign bit + if ((opcode == OP_lt) && opnd0.IsRegister() && opnd1->IsImmediate() && + (static_cast(opnd1)->GetValue() == 0) && parent.GetOpCode() != OP_select) { + bool is64Bits = (opnd0.GetSize() == k64BitSize); + if (!unsignedIntegerComparison) { + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + ImmOperand &shiftNum = + CreateImmOperand(is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits, static_cast(bitLen), false); + MOperator mOpCode = is64Bits ? MOP_xlsrrri6 : MOP_wlsrrri5; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, shiftNum)); + return; + } + ImmOperand &constNum = CreateImmOperand(0, is64Bits ? k64BitSize : k32BitSize, false); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(is64Bits ? MOP_xmovri64 : MOP_wmovri32, resOpnd, constNum)); + return; + } + SelectAArch64Cmp(opnd0, *opnd1, !isFloat, GetPrimTypeBitSize(primType)); + + ConditionCode cc = CC_EQ; + switch (opcode) { + case OP_eq: + cc = CC_EQ; + break; + case OP_ne: + cc = CC_NE; + break; + case OP_le: + cc = unsignedIntegerComparison ? CC_LS : CC_LE; + break; + case OP_ge: + cc = unsignedIntegerComparison ? CC_HS : CC_GE; + break; + case OP_gt: + cc = unsignedIntegerComparison ? CC_HI : CC_GT; + break; + case OP_lt: + cc = unsignedIntegerComparison ? CC_LO : CC_LT; + break; + default: + CHECK_FATAL(false, "illegal logical operator"); + } + SelectAArch64CSet(resOpnd, GetCondOperand(cc), (dsize == k64BitSize)); +} + +Operand *AArch64CGFunc::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(node.GetPrimType())) { + resOpnd = &GetOrCreateResOperand(parent, node.GetPrimType()); + SelectCmpOp(*resOpnd, opnd0, opnd1, node.GetOpCode(), node.GetOpndType(), parent); + } else { + resOpnd = SelectVectorCompare(&opnd0, node.Opnd(0)->GetPrimType(), &opnd1, + node.Opnd(1)->GetPrimType(), node.GetOpCode()); + } + return resOpnd; +} + +void AArch64CGFunc::SelectTargetFPCmpQuiet(Operand &o0, Operand &o1, uint32 dsize) { + MOperator mOpCode = 0; + if (o1.GetKind() == Operand::kOpdFPImmediate) { + CHECK_FATAL(static_cast(o0).GetValue() == 0, "NIY"); + mOpCode = (dsize == k64BitSize) ? MOP_dcmpqri : (dsize == k32BitSize) ? MOP_scmpqri : MOP_hcmpqri; + } else if (o1.GetKind() == Operand::kOpdRegister) { + mOpCode = (dsize == k64BitSize) ? MOP_dcmpqrr : (dsize == k32BitSize) ? MOP_scmpqrr : MOP_hcmpqrr; + } else { + CHECK_FATAL(false, "unsupported operand type"); + } + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, rflag, o0, o1)); +} + +void AArch64CGFunc::SelectAArch64Cmp(Operand &o0, Operand &o1, bool isIntType, uint32 dsize) { + MOperator mOpCode = 0; + Operand *newO1 = &o1; + if (isIntType) { + if ((o1.GetKind() == Operand::kOpdImmediate) || (o1.GetKind() == Operand::kOpdOffset)) { + ImmOperand *immOpnd = static_cast(&o1); + /* + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + */ + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { + mOpCode = (dsize == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; + } else { + /* load into register */ + PrimType ptype = (dsize == k64BitSize) ? PTY_i64 : PTY_i32; + newO1 = &SelectCopy(o1, ptype, ptype); + mOpCode = (dsize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr; + } + } else if (o1.GetKind() == Operand::kOpdRegister) { + mOpCode = (dsize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr; + } else { + CHECK_FATAL(false, "unsupported operand type"); + } + } else { /* float */ + if (o1.GetKind() == Operand::kOpdFPImmediate) { + CHECK_FATAL(static_cast(o1).GetValue() == 0, "NIY"); + mOpCode = (dsize == k64BitSize) ? MOP_dcmperi : ((dsize == k32BitSize) ? MOP_scmperi : MOP_hcmperi); + } else if (o1.GetKind() == Operand::kOpdRegister) { + mOpCode = (dsize == k64BitSize) ? MOP_dcmperr : ((dsize == k32BitSize) ? MOP_scmperr : MOP_hcmperr); + } else { + CHECK_FATAL(false, "unsupported operand type"); + } + } + ASSERT(mOpCode != 0, "mOpCode undefined"); + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, rflag, o0, *newO1)); +} + +void AArch64CGFunc::SelectAArch64CCmp(Operand &o, Operand &i, Operand &nzcv, CondOperand &cond, bool is64Bits) { + uint32 mOpCode = is64Bits ? MOP_xccmpriic : MOP_wccmpriic; + Operand &rflag = GetOrCreateRflag(); + std::vector opndVec; + opndVec.push_back(&rflag); + opndVec.push_back(&o); + opndVec.push_back(&i); + opndVec.push_back(&nzcv); + opndVec.push_back(&cond); + opndVec.push_back(&rflag); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, opndVec)); +} + +void AArch64CGFunc::SelectAArch64CSet(Operand &res, CondOperand &cond, bool is64Bits) { + MOperator mOpCode = is64Bits ? MOP_xcsetrc : MOP_wcsetrc; + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, cond, rflag)); +} + +void AArch64CGFunc::SelectAArch64CSINV(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits) { + MOperator mOpCode = is64Bits ? MOP_xcsinvrrrc : MOP_wcsinvrrrc; + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, o0, o1, cond, rflag)); +} + +void AArch64CGFunc::SelectAArch64CSINC(Operand &res, Operand &o0, Operand &o1, CondOperand &cond, bool is64Bits) { + MOperator mOpCode = is64Bits ? MOP_xcsincrrrc : MOP_wcsincrrrc; + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, res, o0, o1, cond, rflag)); +} + +Operand *AArch64CGFunc::SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return SelectRelationOperator(kAND, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectRelationOperator(kAND, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, const BinaryNode &node, Operand &opnd0, + Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + PrimType primType = is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); /* promoted type */ + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectRelationOperator(operatorCode, *resOpnd, opnd0, opnd1, primType); + } else { + /* vector operations */ + resOpnd = SelectVectorBitwiseOp(dtype, &opnd0, node.Opnd(0)->GetPrimType(), &opnd1, node.Opnd(1)->GetPrimType(), + (operatorCode == kAND) ? OP_band : (operatorCode == kIOR ? OP_bior : OP_bxor)); + } + return resOpnd; +} + +MOperator AArch64CGFunc::SelectRelationMop(RelationOperator operatorCode, + RelationOperatorOpndPattern opndPattern, bool is64Bits, + bool isBitmaskImmediate, bool isBitNumLessThan16) const { + MOperator mOp = MOP_undef; + if (opndPattern == kRegReg) { + switch (operatorCode) { + case kAND: + mOp = is64Bits ? MOP_xandrrr : MOP_wandrrr; + break; + case kIOR: + mOp = is64Bits ? MOP_xiorrrr : MOP_wiorrrr; + break; + case kEOR: + mOp = is64Bits ? MOP_xeorrrr : MOP_weorrrr; + break; + default: + break; + } + return mOp; + } + /* opndPattern == KRegImm */ + if (isBitmaskImmediate) { + switch (operatorCode) { + case kAND: + mOp = is64Bits ? MOP_xandrri13 : MOP_wandrri12; + break; + case kIOR: + mOp = is64Bits ? MOP_xiorrri13 : MOP_wiorrri12; + break; + case kEOR: + mOp = is64Bits ? MOP_xeorrri13 : MOP_weorrri12; + break; + default: + break; + } + return mOp; + } + /* normal imm value */ + if (isBitNumLessThan16) { + switch (operatorCode) { + case kAND: + mOp = is64Bits ? MOP_xandrrrs : MOP_wandrrrs; + break; + case kIOR: + mOp = is64Bits ? MOP_xiorrrrs : MOP_wiorrrrs; + break; + case kEOR: + mOp = is64Bits ? MOP_xeorrrrs : MOP_weorrrrs; + break; + default: + break; + } + return mOp; + } + return mOp; +} + +void AArch64CGFunc::SelectRelationOperator(RelationOperator operatorCode, Operand &resOpnd, Operand &opnd0, + Operand &opnd1, PrimType primType) { + Operand::OperandType opnd0Type = opnd0.GetKind(); + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + /* op #imm. #imm */ + if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + SelectRelationOperator(operatorCode, resOpnd, SelectCopy(opnd0, primType, primType), opnd1, primType); + return; + } + /* op #imm, reg -> op reg, #imm */ + if ((opnd0Type != Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) { + SelectRelationOperator(operatorCode, resOpnd, opnd1, opnd0, primType); + return; + } + /* op reg, reg */ + if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type == Operand::kOpdRegister)) { + ASSERT(IsPrimitiveInteger(primType), "NYI band"); + MOperator mOp = SelectRelationMop(operatorCode, kRegReg, is64Bits, false, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + return; + } + /* op reg, #imm */ + if ((opnd0Type == Operand::kOpdRegister) && (opnd1Type != Operand::kOpdRegister)) { + if (!((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset))) { + SelectRelationOperator(operatorCode, resOpnd, opnd0, SelectCopy(opnd1, primType, primType), primType); + return; + } + + ImmOperand *immOpnd = static_cast(&opnd1); + if (immOpnd->IsZero()) { + if (operatorCode == kAND) { + uint32 mopMv = is64Bits ? MOP_xmovrr : MOP_wmovrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopMv, resOpnd, + GetZeroOpnd(dsize))); + } else if ((operatorCode == kIOR) || (operatorCode == kEOR)) { + SelectCopy(resOpnd, primType, opnd0, primType); + } + } else if ((immOpnd->IsAllOnes()) || (!is64Bits && immOpnd->IsAllOnes32bit())) { + if (operatorCode == kAND) { + SelectCopy(resOpnd, primType, opnd0, primType); + } else if (operatorCode == kIOR) { + uint32 mopMovn = is64Bits ? MOP_xmovnri16 : MOP_wmovnri16; + ImmOperand &src16 = CreateImmOperand(0, k16BitSize, false); + BitShiftOperand *lslOpnd = GetLogicalShiftLeftOperand(0, is64Bits); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(mopMovn, resOpnd, src16, *lslOpnd)); + } else if (operatorCode == kEOR) { + SelectMvn(resOpnd, opnd0, primType); + } + } else if (immOpnd->IsBitmaskImmediate()) { + MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, true, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, opnd1)); + } else { + int64 immVal = immOpnd->GetValue(); + int32 tail0BitNum = GetTail0BitNum(immVal); + int32 head0BitNum = GetHead0BitNum(immVal); + const int32 bitNum = (k64BitSizeInt - head0BitNum) - tail0BitNum; + RegOperand ®Opnd = CreateRegisterOperandOfType(primType); + + if (bitNum <= k16ValidBit && tail0BitNum != 0) { + int64 newImm = (static_cast(immVal) >> static_cast(tail0BitNum)) & 0xFFFF; + ImmOperand &immOpnd1 = CreateImmOperand(newImm, k32BitSize, false); + SelectCopyImm(regOpnd, immOpnd1, primType); + MOperator mOp = SelectRelationMop(operatorCode, kRegImm, is64Bits, false, true); + int32 bitLen = is64Bits ? kBitLenOfShift64Bits : kBitLenOfShift32Bits; + BitShiftOperand &shiftOpnd = + CreateBitShiftOperand(BitShiftOperand::kLSL, static_cast(tail0BitNum), bitLen); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd, shiftOpnd)); + } else { + SelectCopyImm(regOpnd, *immOpnd, primType); + MOperator mOp = SelectRelationMop(operatorCode, kRegReg, is64Bits, false, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0, regOpnd)); + } + } + } +} + +Operand *AArch64CGFunc::SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return SelectRelationOperator(kIOR, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectRelationOperator(kIOR, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectMinOrMax(bool isMin, const BinaryNode &node, Operand &opnd0, Operand &opnd1, + const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + /* promoted type */ + PrimType primType = isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)); + RegOperand &resOpnd = GetOrCreateResOperand(parent, primType); + SelectMinOrMax(isMin, resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void AArch64CGFunc::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + if (IsPrimitiveInteger(primType)) { + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, primType); + Operand ®Opnd1 = LoadIntoRegister(opnd1, primType); + SelectAArch64Cmp(regOpnd0, regOpnd1, true, dsize); + Operand &newResOpnd = LoadIntoRegister(resOpnd, primType); + if (isMin) { + CondOperand &cc = IsSignedInteger(primType) ? GetCondOperand(CC_LT) : GetCondOperand(CC_LO); + SelectAArch64Select(newResOpnd, regOpnd0, regOpnd1, cc, true, dsize); + } else { + CondOperand &cc = IsSignedInteger(primType) ? GetCondOperand(CC_GT) : GetCondOperand(CC_HI); + SelectAArch64Select(newResOpnd, regOpnd0, regOpnd1, cc, true, dsize); + } + } else if (IsPrimitiveFloat(primType)) { + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, primType); + RegOperand ®Opnd1 = LoadIntoRegister(opnd1, primType); + SelectFMinFMax(resOpnd, regOpnd0, regOpnd1, is64Bits, isMin); + } else { + CHECK_FATAL(false, "NIY type max or min"); + } +} + +Operand *AArch64CGFunc::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return SelectMinOrMax(true, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectMinOrMax(true, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return SelectMinOrMax(false, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectMinOrMax(false, resOpnd, opnd0, opnd1, primType); +} + +void AArch64CGFunc::SelectFMinFMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, bool is64Bits, bool isMin) { + uint32 mOpCode = isMin ? (is64Bits ? MOP_xfminrrr : MOP_wfminrrr) : (is64Bits ? MOP_xfmaxrrr : MOP_wfmaxrrr); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, opnd1)); +} + +Operand *AArch64CGFunc::SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + return SelectRelationOperator(kEOR, node, opnd0, opnd1, parent); +} + +void AArch64CGFunc::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectRelationOperator(kEOR, resOpnd, opnd0, opnd1, primType); +} + +Operand *AArch64CGFunc::SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint32 dsize = GetPrimTypeBitSize(dtype); + bool is64Bits = (dsize == k64BitSize); + bool isFloat = IsPrimitiveFloat(dtype); + RegOperand *resOpnd = nullptr; + Opcode opcode = node.GetOpCode(); + + bool isOneElemVector = false; + BaseNode *expr = node.Opnd(0); + if (expr->GetOpCode() == OP_dread) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(static_cast(expr)->GetStIdx()); + isOneElemVector = symbol->GetAttr(ATTR_oneelem_simd); + } + + Operand *opd0 = &opnd0; + PrimType otyp0 = expr->GetPrimType(); + if (IsPrimitiveVector(dtype) && opnd0.IsConstImmediate()) { + opd0 = SelectVectorFromScalar(dtype, opd0, node.Opnd(0)->GetPrimType()); + otyp0 = dtype; + } + + if (IsPrimitiveVector(dtype) && opnd1.IsConstImmediate()) { + int64 sConst = static_cast(opnd1).GetValue(); + resOpnd = SelectVectorShiftImm(dtype, opd0, &opnd1, static_cast(sConst), opcode); + } else if ((IsPrimitiveVector(dtype) || isOneElemVector) && !opnd1.IsConstImmediate()) { + resOpnd = SelectVectorShift(dtype, opd0, otyp0, &opnd1, node.Opnd(1)->GetPrimType(), opcode); + } else { + PrimType primType = isFloat ? dtype : (is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32)); + resOpnd = &GetOrCreateResOperand(parent, primType); + ShiftDirection direct = (opcode == OP_lshr) ? kShiftLright : ((opcode == OP_ashr) ? kShiftAright : kShiftLeft); + SelectShift(*resOpnd, opnd0, opnd1, direct, primType); + } + + if (dtype == PTY_i16) { + MOperator exOp = is64Bits ? MOP_xsxth64 : MOP_xsxth32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(exOp, *resOpnd, *resOpnd)); + } else if (dtype == PTY_i8) { + MOperator exOp = is64Bits ? MOP_xsxtb64 : MOP_xsxtb32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(exOp, *resOpnd, *resOpnd)); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + uint32 dsize = GetPrimTypeBitSize(dtype); + PrimType primType = (dsize == k64BitSize) ? PTY_u64 : PTY_u32; + RegOperand *resOpnd = &GetOrCreateResOperand(parent, primType); + Operand *firstOpnd = &LoadIntoRegister(opnd0, primType); + MOperator mopRor = (dsize == k64BitSize) ? MOP_xrorrrr : MOP_wrorrrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopRor, *resOpnd, *firstOpnd, opnd1)); + return resOpnd; +} + +void AArch64CGFunc::SelectBxorShift(Operand &resOpnd, Operand *opnd0, Operand *opnd1, Operand &opnd2, + PrimType primType) { + opnd0 = &LoadIntoRegister(*opnd0, primType); + opnd1 = &LoadIntoRegister(*opnd1, primType); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + MOperator mopBxor = is64Bits ? MOP_xeorrrrs : MOP_weorrrrs; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBxor, resOpnd, *opnd0, *opnd1, opnd2)); +} + +void AArch64CGFunc::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, ShiftDirection direct, + PrimType primType) { + Operand::OperandType opnd1Type = opnd1.GetKind(); + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + Operand *firstOpnd = &LoadIntoRegister(opnd0, primType); + + MOperator mopShift; + if ((opnd1Type == Operand::kOpdImmediate) || (opnd1Type == Operand::kOpdOffset)) { + ImmOperand *immOpnd1 = static_cast(&opnd1); + const int64 kVal = immOpnd1->GetValue(); + const uint32 kShiftamt = is64Bits ? kHighestBitOf64Bits : kHighestBitOf32Bits; + if (kVal == 0) { + SelectCopy(resOpnd, primType, *firstOpnd, primType); + return; + } + /* e.g. a >> -1 */ + if ((kVal < 0) || (kVal > kShiftamt)) { + SelectShift(resOpnd, *firstOpnd, SelectCopy(opnd1, primType, primType), direct, primType); + return; + } + switch (direct) { + case kShiftLeft: + mopShift = is64Bits ? MOP_xlslrri6 : MOP_wlslrri5; + break; + case kShiftAright: + mopShift = is64Bits ? MOP_xasrrri6 : MOP_wasrrri5; + break; + case kShiftLright: + mopShift = is64Bits ? MOP_xlsrrri6 : MOP_wlsrrri5; + break; + } + } else if (opnd1Type != Operand::kOpdRegister) { + SelectShift(resOpnd, *firstOpnd, SelectCopy(opnd1, primType, primType), direct, primType); + return; + } else { + switch (direct) { + case kShiftLeft: + mopShift = is64Bits ? MOP_xlslrrr : MOP_wlslrrr; + break; + case kShiftAright: + mopShift = is64Bits ? MOP_xasrrrr : MOP_wasrrrr; + break; + case kShiftLright: + mopShift = is64Bits ? MOP_xlsrrrr : MOP_wlsrrrr; + break; + } + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopShift, resOpnd, *firstOpnd, opnd1)); +} + +Operand *AArch64CGFunc::SelectAbsSub(Insn &lastInsn, const UnaryNode &node, Operand &newOpnd0) { + PrimType dtyp = node.GetPrimType(); + bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize); + /* promoted type */ + PrimType primType = is64Bits ? (PTY_i64) : (PTY_i32); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + uint32 mopCsneg = is64Bits ? MOP_xcnegrrrc : MOP_wcnegrrrc; + /* ABS requires the operand be interpreted as a signed integer */ + CondOperand &condOpnd = GetCondOperand(CC_MI); + MOperator newMop = lastInsn.GetMachineOpcode() + 1; + Operand &rflag = GetOrCreateRflag(); + std::vector opndVec; + opndVec.push_back(&rflag); + for (uint32 i = 0; i < lastInsn.GetOperandSize(); i++) { + opndVec.push_back(&lastInsn.GetOperand(i)); + } + Insn *subsInsn = &GetInsnBuilder()->BuildInsn(newMop, opndVec); + GetCurBB()->ReplaceInsn(lastInsn, *subsInsn); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopCsneg, resOpnd, newOpnd0, condOpnd, rflag)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectAbs(UnaryNode &node, Operand &opnd0) { + PrimType dtyp = node.GetPrimType(); + if (IsPrimitiveVector(dtyp)) { + return SelectVectorAbs(dtyp, &opnd0); + } else if (IsPrimitiveFloat(dtyp)) { + CHECK_FATAL(GetPrimTypeBitSize(dtyp) >= k32BitSize, "We don't support hanf-word FP operands yet"); + bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize); + Operand &newOpnd0 = LoadIntoRegister(opnd0, dtyp); + RegOperand &resOpnd = CreateRegisterOperandOfType(dtyp); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_dabsrr : MOP_sabsrr, + resOpnd, newOpnd0)); + return &resOpnd; + } else { + bool is64Bits = (GetPrimTypeBitSize(dtyp) == k64BitSize); + /* promoted type */ + PrimType primType = is64Bits ? (PTY_i64) : (PTY_i32); + Operand &newOpnd0 = LoadIntoRegister(opnd0, primType); + Insn *lastInsn = GetCurBB()->GetLastInsn(); + if (lastInsn != nullptr && lastInsn->GetMachineOpcode() >= MOP_xsubrrr && + lastInsn->GetMachineOpcode() <= MOP_wsubrri12) { + return SelectAbsSub(*lastInsn, node, newOpnd0); + } + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + SelectAArch64Cmp(newOpnd0, CreateImmOperand(0, is64Bits ? PTY_u64 : PTY_u32, false), + true, GetPrimTypeBitSize(dtyp)); + uint32 mopCsneg = is64Bits ? MOP_xcsnegrrrc : MOP_wcsnegrrrc; + /* ABS requires the operand be interpreted as a signed integer */ + CondOperand &condOpnd = GetCondOperand(CC_GE); + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopCsneg, resOpnd, newOpnd0, newOpnd0, + condOpnd, rflag)); + return &resOpnd; + } +} + +Operand *AArch64CGFunc::SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + ASSERT(IsPrimitiveInteger(dtype) || IsPrimitiveVectorInteger(dtype), "bnot expect integer or NYI"); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + bool isSigned = IsSignedInteger(dtype); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + /* promoted type */ + PrimType primType = is64Bits ? (isSigned ? PTY_i64 : PTY_u64) : (isSigned ? PTY_i32 : PTY_u32); + resOpnd = &GetOrCreateResOperand(parent, primType); + + Operand &newOpnd0 = LoadIntoRegister(opnd0, primType); + + uint32 mopBnot = is64Bits ? MOP_xnotrr : MOP_wnotrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBnot, *resOpnd, newOpnd0)); + } else { + /* vector operand */ + resOpnd = SelectVectorNot(dtype, &opnd0); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + auto bitWidth = (GetPrimTypeBitSize(dtype)); + RegOperand *resOpnd = nullptr; + resOpnd = &GetOrCreateResOperand(parent, dtype); + Operand &newOpnd0 = LoadIntoRegister(opnd0, dtype); + uint32 mopBswap = bitWidth == 64 ? MOP_xrevrr : (bitWidth == 32 ? MOP_wrevrr : MOP_wrevrr16); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBswap, *resOpnd, newOpnd0)); + return resOpnd; +} + +Operand *AArch64CGFunc::SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool isSigned = IsSignedInteger(dtype); + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + CHECK_FATAL(!is64Bits, "dest opnd should not be 64bit"); + PrimType destType = GetIntegerPrimTypeBySizeAndSign(bitSize, isSigned); + Operand *result = SelectIread(parent, *static_cast(node.Opnd(0)), + static_cast(bitOffset / k8BitSize), destType); + return result; +} + +Operand *AArch64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &srcOpnd, const BaseNode &parent) { + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + RegOperand *srcVecRegOperand = static_cast(&srcOpnd); + if (srcVecRegOperand && srcVecRegOperand->IsRegister() && (srcVecRegOperand->GetSize() == k128BitSize)) { + if ((bitSize == k8BitSize || bitSize == k16BitSize || bitSize == k32BitSize || bitSize == k64BitSize) && + (bitOffset % bitSize) == k0BitSize) { + uint32 lane = bitOffset / bitSize; + PrimType srcVecPtype; + if (bitSize == k64BitSize) { + srcVecPtype = PTY_v2u64; + } else if (bitSize == k32BitSize) { + srcVecPtype = PTY_v4u32; + } else if (bitSize == k16BitSize) { + srcVecPtype = PTY_v8u16; + } else { + srcVecPtype = PTY_v16u8; + } + RegOperand *resRegOperand = SelectVectorGetElement(node.GetPrimType(), + &srcOpnd, srcVecPtype, static_cast(lane)); + return resRegOperand; + } else { + CHECK_FATAL(false, "NYI"); + } + } + PrimType dtype = node.GetPrimType(); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + bool isSigned = (node.GetOpCode() == OP_sext) ? true : (node.GetOpCode() == OP_zext) ? false : IsSignedInteger(dtype); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + uint32 immWidth = is64Bits ? kMaxImmVal13Bits : kMaxImmVal12Bits; + Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype); + if (bitOffset == 0) { + if (!isSigned && (bitSize < immWidth)) { + SelectBand(resOpnd, opnd0, CreateImmOperand(static_cast((static_cast(1) << bitSize) - 1), + immWidth, false), dtype); + return &resOpnd; + } else { + MOperator mOp = MOP_undef; + if (bitSize == k8BitSize) { + mOp = is64Bits ? (isSigned ? MOP_xsxtb64 : MOP_undef) : + (isSigned ? MOP_xsxtb32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxtb32 : MOP_undef)); + } else if (bitSize == k16BitSize) { + mOp = is64Bits ? (isSigned ? MOP_xsxth64 : MOP_undef) : + (isSigned ? MOP_xsxth32 : (opnd0.GetSize() == k32BitSize ? MOP_xuxth32 : MOP_undef)); + } else if (bitSize == k32BitSize) { + mOp = is64Bits ? (isSigned ? MOP_xsxtw64 : MOP_xuxtw64) : MOP_wmovrr; + } + if (mOp != MOP_undef) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); + return &resOpnd; + } + } + } + uint32 mopBfx = + is64Bits ? (isSigned ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6) : (isSigned ? MOP_wsbfxrri5i5 : MOP_wubfxrri5i5); + ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false); + ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBfx, resOpnd, opnd0, immOpnd1, immOpnd2)); + return &resOpnd; +} + +/* + * operand fits in MOVK if + * is64Bits && boffst == 0, 16, 32, 48 && bSize == 16, so boffset / 16 == 0, 1, 2, 3; (boffset / 16 ) & (~3) == 0 + * or is32Bits && boffset == 0, 16 && bSize == 16, so boffset / 16 == 0, 1; (boffset / 16) & (~1) == 0 + * imm range of aarch64-movk [0 - 65536] imm16 + */ +inline bool IsMoveWideKeepable(int64 offsetVal, uint32 bitOffset, uint32 bitSize, bool is64Bits) { + ASSERT(is64Bits || (bitOffset < k32BitSize), ""); + bool isOutOfRange = offsetVal < 0; + if (!isOutOfRange) { + isOutOfRange = (static_cast(offsetVal) >> k16BitSize) > 0; + } + return (!isOutOfRange) && + bitSize == k16BitSize && + ((bitOffset >> k16BitShift) & ~static_cast(is64Bits ? 0x3 : 0x1)) == 0; +} + +/* we use the fact that A ^ B ^ A == B, A ^ 0 = A */ +Operand *AArch64CGFunc::SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, + const BaseNode &parent) { + uint32 bitOffset = node.GetBitsOffset(); + uint32 bitSize = node.GetBitsSize(); + PrimType regType = node.GetPrimType(); + bool is64Bits = GetPrimTypeBitSize(regType) == k64BitSize; + // deposit does not define opnd0 but bfi does, so we need an extra copy to keep opnd0 wont de defined + Operand *result = &GetOrCreateResOperand(parent, regType); + SelectCopy(*result, regType, opnd0, regType); + /* + * if operand 1 is immediate and fits in MOVK, use it + * MOVK Wd, #imm{, LSL #shift} ; 32-bit general registers + * MOVK Xd, #imm{, LSL #shift} ; 64-bit general registers + */ + if (opnd1.IsIntImmediate() && + IsMoveWideKeepable(static_cast(opnd1).GetValue(), bitOffset, bitSize, is64Bits)) { + RegOperand &resOpnd = GetOrCreateResOperand(parent, regType); + SelectCopy(resOpnd, regType, *result, regType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn((is64Bits ? MOP_xmovkri16 : MOP_wmovkri16), + resOpnd, opnd1, + *GetLogicalShiftLeftOperand(bitOffset, is64Bits))); + return &resOpnd; + } else { + Operand &movOpnd = LoadIntoRegister(opnd1, regType); + uint32 mopBfi = is64Bits ? MOP_xbfirri6i6 : MOP_wbfirri5i5; + ImmOperand &immOpnd1 = CreateImmOperand(bitOffset, k8BitSize, false); + ImmOperand &immOpnd2 = CreateImmOperand(bitSize, k8BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopBfi, *result, movOpnd, immOpnd1, immOpnd2)); + return result; + } +} + +Operand *AArch64CGFunc::SelectLnot(UnaryNode &node, Operand &srcOpnd, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + Operand &opnd0 = LoadIntoRegister(srcOpnd, dtype); + SelectAArch64Cmp(opnd0, CreateImmOperand(0, is64Bits ? PTY_u64 : PTY_u32, false), true, GetPrimTypeBitSize(dtype)); + SelectAArch64CSet(resOpnd, GetCondOperand(CC_EQ), is64Bits); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + PrimType primType; + if (IsPrimitiveFloat(dtype)) { + primType = dtype; + } else { + primType = is64Bits ? (PTY_i64) : (PTY_i32); /* promoted type */ + } + resOpnd = &GetOrCreateResOperand(parent, primType); + SelectNeg(*resOpnd, opnd0, primType); + } else { + /* vector operand */ + resOpnd = SelectVectorNeg(dtype, &opnd0); + } + return resOpnd; +} + +void AArch64CGFunc::SelectNeg(Operand &dest, Operand &srcOpnd, PrimType primType) { + Operand &opnd0 = LoadIntoRegister(srcOpnd, primType); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + MOperator mOp; + if (IsPrimitiveFloat(primType)) { + mOp = is64Bits ? MOP_xfnegrr : MOP_wfnegrr; + } else { + mOp = is64Bits ? MOP_xinegrr : MOP_winegrr; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, opnd0)); +} + +void AArch64CGFunc::SelectMvn(Operand &dest, Operand &src, PrimType primType) { + Operand &opnd0 = LoadIntoRegister(src, primType); + bool is64Bits = (GetPrimTypeBitSize(primType) == k64BitSize); + MOperator mOp; + ASSERT(!IsPrimitiveFloat(primType), "Instruction 'mvn' do not have float version."); + mOp = is64Bits ? MOP_xnotrr : MOP_wnotrr; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, dest, opnd0)); +} + +Operand *AArch64CGFunc::SelectRecip(UnaryNode &node, Operand &src, const BaseNode &parent) { + /* + * fconsts s15, #112 + * fdivs s0, s15, s0 + */ + PrimType dtype = node.GetPrimType(); + if (!IsPrimitiveFloat(dtype)) { + ASSERT(false, "should be float type"); + return nullptr; + } + Operand &opnd0 = LoadIntoRegister(src, dtype); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + Operand *one = nullptr; + if (GetPrimTypeBitSize(dtype) == k64BitSize) { + MIRDoubleConst *c = memPool->New(1.0, *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f64)); + one = SelectDoubleConst(*c, node); + } else if (GetPrimTypeBitSize(dtype) == k32BitSize) { + MIRFloatConst *c = memPool->New(1.0f, *GlobalTables::GetTypeTable().GetTypeTable().at(PTY_f32)); + one = SelectFloatConst(*c, node); + } else { + CHECK_FATAL(false, "we don't support half-precision fp operations yet"); + } + SelectDiv(resOpnd, *one, opnd0, dtype); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectSqrt(UnaryNode &node, Operand &src, const BaseNode &parent) { + /* + * gcc generates code like below for better accurate + * fsqrts s15, s0 + * fcmps s15, s15 + * fmstat + * beq .L4 + * push {r3, lr} + * bl sqrtf + * pop {r3, pc} + * .L4: + * fcpys s0, s15 + * bx lr + */ + PrimType dtype = node.GetPrimType(); + if (!IsPrimitiveFloat(dtype)) { + ASSERT(false, "should be float type"); + return nullptr; + } + bool is64Bits = (GetPrimTypeBitSize(dtype) == k64BitSize); + Operand &opnd0 = LoadIntoRegister(src, dtype); + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(is64Bits ? MOP_vsqrtd : MOP_vsqrts, resOpnd, opnd0)); + return &resOpnd; +} + +void AArch64CGFunc::SelectCvtFloat2Int(Operand &resOpnd, Operand &srcOpnd, PrimType itype, PrimType ftype) { + bool is64BitsFloat = (ftype == PTY_f64); + MOperator mOp = 0; + + ASSERT(((ftype == PTY_f64) || (ftype == PTY_f32)), "wrong from type"); + Operand &opnd0 = LoadIntoRegister(srcOpnd, ftype); + switch (itype) { + case PTY_i32: + mOp = !is64BitsFloat ? MOP_vcvtrf : MOP_vcvtrd; + break; + case PTY_u32: + case PTY_a32: + mOp = !is64BitsFloat ? MOP_vcvturf : MOP_vcvturd; + break; + case PTY_i64: + mOp = !is64BitsFloat ? MOP_xvcvtrf : MOP_xvcvtrd; + break; + case PTY_u64: + case PTY_a64: + mOp = !is64BitsFloat ? MOP_xvcvturf : MOP_xvcvturd; + break; + default: + CHECK_FATAL(false, "unexpected type"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); +} + +void AArch64CGFunc::SelectCvtInt2Float(Operand &resOpnd, Operand &origOpnd0, PrimType toType, PrimType fromType) { + ASSERT((toType == PTY_f32) || (toType == PTY_f64), "unexpected type"); + bool is64BitsFloat = (toType == PTY_f64); + MOperator mOp = 0; + uint32 fsize = GetPrimTypeBitSize(fromType); + + PrimType itype = (GetPrimTypeBitSize(fromType) == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32); + + Operand *opnd0 = &LoadIntoRegister(origOpnd0, itype); + + /* need extension before cvt */ + ASSERT(opnd0->IsRegister(), "opnd should be a register operand"); + Operand *srcOpnd = opnd0; + if (IsSignedInteger(fromType) && (fsize < k32BitSize)) { + srcOpnd = &CreateRegisterOperandOfType(itype); + mOp = (fsize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *srcOpnd, *opnd0)); + } + + switch (itype) { + case PTY_i32: + mOp = !is64BitsFloat ? MOP_vcvtfr : MOP_vcvtdr; + break; + case PTY_u32: + mOp = !is64BitsFloat ? MOP_vcvtufr : MOP_vcvtudr; + break; + case PTY_i64: + mOp = !is64BitsFloat ? MOP_xvcvtfr : MOP_xvcvtdr; + break; + case PTY_u64: + mOp = !is64BitsFloat ? MOP_xvcvtufr : MOP_xvcvtudr; + break; + default: + CHECK_FATAL(false, "unexpected type"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, *srcOpnd)); +} + +Operand *AArch64CGFunc::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrnNode, std::string name) { + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + if (intrnNode.GetIntrinsic() == INTRN_C_ffs) { + ASSERT(intrnNode.GetPrimType() == PTY_i32, "Unexpect Size"); + return SelectAArch64ffs(*opnd, ptype); + } + if (opnd->IsMemoryAccessOperand()) { + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } + std::vector opndVec; + RegOperand *dst = &CreateRegisterOperandOfType(ptype); + opndVec.push_back(dst); /* result */ + opndVec.push_back(opnd); /* param 0 */ + SelectLibCall(name, opndVec, ptype, ptype); + + return dst; +} + +Operand *AArch64CGFunc::SelectIntrinsicOpWithNParams(IntrinsicopNode &intrnNode, PrimType retType, + const std::string &name) { + MapleVector argNodes = intrnNode.GetNopnd(); + std::vector opndVec; + std::vector opndTypes; + RegOperand *retOpnd = &CreateRegisterOperandOfType(retType); + opndVec.push_back(retOpnd); + opndTypes.push_back(retType); + + for (BaseNode *argexpr : argNodes) { + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + if (opnd->IsMemoryAccessOperand()) { + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } + opndVec.push_back(opnd); + opndTypes.push_back(ptype); + } + SelectLibCallNArg(name, opndVec, opndTypes, retType, false); + + return retOpnd; +} + +/* According to gcc.target/aarch64/ffs.c */ +Operand *AArch64CGFunc::SelectAArch64ffs(Operand &argOpnd, PrimType argType) { + RegOperand &destOpnd = LoadIntoRegister(argOpnd, argType); + uint32 argSize = GetPrimTypeBitSize(argType); + ASSERT((argSize == k64BitSize || argSize == k32BitSize), "Unexpect arg type"); + /* cmp */ + ImmOperand &zeroOpnd = CreateImmOperand(0, argSize, false); + Operand &rflag = GetOrCreateRflag(); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + argSize == k64BitSize ? MOP_xcmpri : MOP_wcmpri, rflag, destOpnd, zeroOpnd)); + /* rbit */ + RegOperand *tempResReg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPrimTypeSize(argType))); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + argSize == k64BitSize ? MOP_xrbit : MOP_wrbit, *tempResReg, destOpnd)); + /* clz */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + argSize == k64BitSize ? MOP_xclz : MOP_wclz, *tempResReg, *tempResReg)); + /* csincc */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + argSize == k64BitSize ? MOP_xcsincrrrc : MOP_wcsincrrrc, + *tempResReg, GetZeroOpnd(k32BitSize), *tempResReg, GetCondOperand(CC_EQ), rflag)); + return tempResReg; +} + +Operand *AArch64CGFunc::SelectRoundLibCall(RoundType roundType, const TypeCvtNode &node, Operand &opnd0) { + PrimType ftype = node.FromType(); + PrimType rtype = node.GetPrimType(); + bool is64Bits = (ftype == PTY_f64); + std::vector opndVec; + RegOperand *resOpnd; + if (is64Bits) { + resOpnd = &GetOrCreatePhysicalRegisterOperand(D0, k64BitSize, kRegTyFloat); + } else { + resOpnd = &GetOrCreatePhysicalRegisterOperand(S0, k32BitSize, kRegTyFloat); + } + opndVec.push_back(resOpnd); + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, ftype); + opndVec.push_back(®Opnd0); + std::string libName; + if (roundType == kCeil) { + libName.assign(is64Bits ? "ceil" : "ceilf"); + } else if (roundType == kFloor) { + libName.assign(is64Bits ? "floor" : "floorf"); + } else { + libName.assign(is64Bits ? "round" : "roundf"); + } + SelectLibCall(libName, opndVec, ftype, rtype); + + return resOpnd; +} + +Operand *AArch64CGFunc::SelectRoundOperator(RoundType roundType, const TypeCvtNode &node, Operand &opnd0, + const BaseNode &parent) { + PrimType itype = node.GetPrimType(); + if ((mirModule.GetSrcLang() == kSrcLangC) && ((itype == PTY_f64) || (itype == PTY_f32))) { + SelectRoundLibCall(roundType, node, opnd0); + } + PrimType ftype = node.FromType(); + ASSERT(((ftype == PTY_f64) || (ftype == PTY_f32)), "wrong float type"); + bool is64Bits = (ftype == PTY_f64); + RegOperand &resOpnd = GetOrCreateResOperand(parent, itype); + RegOperand ®Opnd0 = LoadIntoRegister(opnd0, ftype); + MOperator mop = MOP_undef; + if (roundType == kCeil) { + mop = is64Bits ? MOP_xvcvtps : MOP_vcvtps; + } else if (roundType == kFloor) { + mop = is64Bits ? MOP_xvcvtms : MOP_vcvtms; + } else { + mop = is64Bits ? MOP_xvcvtas : MOP_vcvtas; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, resOpnd, regOpnd0)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectRoundOperator(kCeil, node, opnd0, parent); +} + +/* float to int floor */ +Operand *AArch64CGFunc::SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectRoundOperator(kFloor, node, opnd0, parent); +} + +Operand *AArch64CGFunc::SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + return SelectRoundOperator(kRound, node, opnd0, parent); +} + +static bool LIsPrimitivePointer(PrimType ptype) { + return ((ptype >= PTY_ptr) && (ptype <= PTY_a64)); +} + +Operand *AArch64CGFunc::SelectRetype(TypeCvtNode &node, Operand &opnd0) { + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + ASSERT(GetPrimTypeSize(fromType) == GetPrimTypeSize(toType), "retype bit widith doesn' match"); + if (LIsPrimitivePointer(fromType) && LIsPrimitivePointer(toType)) { + return &LoadIntoRegister(opnd0, toType); + } + if (IsPrimitiveVector(fromType) || IsPrimitiveVector(toType)) { + return &LoadIntoRegister(opnd0, toType); + } + Operand::OperandType opnd0Type = opnd0.GetKind(); + RegOperand *resOpnd = &CreateRegisterOperandOfType(toType); + if (IsPrimitiveInteger(fromType) || IsPrimitiveFloat(fromType)) { + bool isFromInt = IsPrimitiveInteger(fromType); + bool is64Bits = GetPrimTypeBitSize(fromType) == k64BitSize; + PrimType itype = + isFromInt ? ((GetPrimTypeBitSize(fromType) == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32)) + : (is64Bits ? PTY_f64 : PTY_f32); + + /* + * if source operand is in memory, + * simply read it as a value of 'toType 'into the dest operand + * and return + */ + if (opnd0Type == Operand::kOpdMem) { + resOpnd = &SelectCopy(opnd0, toType, toType); + return resOpnd; + } + /* according to aarch64 encoding format, convert int to float expression */ + bool isImm = false; + ImmOperand *imm = static_cast(&opnd0); + uint64 val = static_cast(imm->GetValue()); + uint64 canRepreset = is64Bits ? (val & 0xffffffffffff) : (val & 0x7ffff); + uint32 val1 = is64Bits ? (val >> 61) & 0x3 : (val >> 29) & 0x3; + uint32 val2 = is64Bits ? (val >> 54) & 0xff : (val >> 25) & 0x1f; + bool isSame = is64Bits ? ((val2 == 0) || (val2 == 0xff)) : ((val2 == 0) || (val2 == 0x1f)); + canRepreset = (canRepreset == 0) && ((val1 & 0x1) ^ ((val1 & 0x2) >> 1)) && isSame; + Operand *newOpnd0 = &opnd0; + if (IsPrimitiveInteger(fromType) && IsPrimitiveFloat(toType) && canRepreset) { + uint64 temp1 = is64Bits ? (val >> 63) << 7 : (val >> 31) << 7; + uint64 temp2 = is64Bits ? val >> 48 : val >> 19; + int64 imm8 = (temp2 & 0x7f) | temp1; + newOpnd0 = &CreateImmOperand(imm8, k8BitSize, false, kNotVary, true); + isImm = true; + } else { + newOpnd0 = &LoadIntoRegister(opnd0, itype); + } + if ((IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) || + (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType))) { + MOperator mopFmov = (isImm ? static_cast(is64Bits ? MOP_xdfmovri : MOP_wsfmovri) : isFromInt) ? + (is64Bits ? MOP_xvmovdr : MOP_xvmovsr) : (is64Bits ? MOP_xvmovrd : MOP_xvmovrs); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mopFmov, *resOpnd, *newOpnd0)); + return resOpnd; + } else { + return newOpnd0; + } + } else { + CHECK_FATAL(false, "NYI retype"); + } + return nullptr; +} + +void AArch64CGFunc::SelectCvtFloat2Float(Operand &resOpnd, Operand &srcOpnd, PrimType fromType, PrimType toType) { + Operand &opnd0 = LoadIntoRegister(srcOpnd, fromType); + MOperator mOp = 0; + switch (toType) { + case PTY_f32: { + CHECK_FATAL(fromType == PTY_f64, "unexpected cvt from type"); + mOp = MOP_xvcvtfd; + break; + } + case PTY_f64: { + CHECK_FATAL(fromType == PTY_f32, "unexpected cvt from type"); + mOp = MOP_xvcvtdf; + break; + } + default: + CHECK_FATAL(false, "unexpected cvt to type"); + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, resOpnd, opnd0)); +} + +/* + * This should be regarded only as a reference. + * + * C11 specification. + * 6.3.1.3 Signed and unsigned integers + * 1 When a value with integer type is converted to another integer + * type other than _Bool, if the value can be represented by the + * new type, it is unchanged. + * 2 Otherwise, if the new type is unsigned, the value is converted + * by repeatedly adding or subtracting one more than the maximum + * value that can be represented in the new type until the value + * is in the range of the new type.60) + * 3 Otherwise, the new type is signed and the value cannot be + * represented in it; either the result is implementation-defined + * or an implementation-defined signal is raised. + */ +void AArch64CGFunc::SelectCvtInt2Int(const BaseNode *parent, Operand *&resOpnd, Operand *opnd0, PrimType fromType, + PrimType toType) { + uint32 fsize = GetPrimTypeBitSize(fromType); + if (fromType == PTY_i128 || fromType == PTY_u128) { + fsize = k64BitSize; + } + uint32 tsize = GetPrimTypeBitSize(toType); + if (toType == PTY_i128 || toType == PTY_u128) { + tsize = k64BitSize; + } + bool isExpand = tsize > fsize; + bool is64Bit = (tsize == k64BitSize); + if ((parent != nullptr) && opnd0->IsIntImmediate() && + ((parent->GetOpCode() == OP_band) || (parent->GetOpCode() == OP_bior) || (parent->GetOpCode() == OP_bxor) || + (parent->GetOpCode() == OP_ashr) || (parent->GetOpCode() == OP_lshr) || (parent->GetOpCode() == OP_shl))) { + ImmOperand *simm = static_cast(opnd0); + ASSERT(simm != nullptr, "simm is nullptr in AArch64CGFunc::SelectCvtInt2Int"); + bool isSign = false; + int64 origValue = simm->GetValue(); + int64 newValue = origValue; + int64 signValue = 0; + if (!isExpand) { + /* 64--->32 */ + if (fsize > tsize) { + if (IsSignedInteger(toType)) { + if (origValue < 0) { + signValue = static_cast(0xFFFFFFFFFFFFFFFFLL & (1ULL << static_cast(tsize))); + } + newValue = static_cast((static_cast(origValue) & ((1ULL << static_cast(tsize)) - 1u)) | + static_cast(signValue)); + } else { + newValue = static_cast(origValue) & ((1ULL << static_cast(tsize)) - 1u); + } + } + } + if (IsSignedInteger(toType)) { + isSign = true; + } + resOpnd = &static_cast(CreateImmOperand(newValue, GetPrimTypeSize(toType) * kBitsPerByte, isSign)); + return; + } + if (isExpand) { /* Expansion */ + /* if cvt expr's parent is add,and,xor and some other,we can use the imm version */ + PrimType primType = + ((fsize == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) : (IsSignedInteger(fromType) ? + PTY_i32 : PTY_u32)); + opnd0 = &LoadIntoRegister(*opnd0, primType); + + if (IsSignedInteger(fromType)) { + ASSERT((is64Bit || (fsize == k8BitSize || fsize == k16BitSize)), "incorrect from size"); + + MOperator mOp = + (is64Bit ? ((fsize == k8BitSize) ? MOP_xsxtb64 : ((fsize == k16BitSize) ? MOP_xsxth64 : MOP_xsxtw64)) + : ((fsize == k8BitSize) ? MOP_xsxtb32 : MOP_xsxth32)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0)); + } else { + /* Unsigned */ + if (is64Bit) { + if (fsize == k8BitSize) { + ImmOperand &immOpnd = CreateImmOperand(0xff, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else if (fsize == k16BitSize) { + ImmOperand &immOpnd = CreateImmOperand(0xffff, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xandrri13, *resOpnd, *opnd0, immOpnd)); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuxtw64, *resOpnd, *opnd0)); + } + } else { + ASSERT(((fsize == k8BitSize) || (fsize == k16BitSize)), "incorrect from size"); + if (fsize == k8BitSize) { + static_cast(opnd0)->SetValidBitsNum(k8BitSize); + static_cast(resOpnd)->SetValidBitsNum(k8BitSize); + } + if (fromType == PTY_u1) { + static_cast(opnd0)->SetValidBitsNum(1); + static_cast(resOpnd)->SetValidBitsNum(1); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + (fsize == k8BitSize) ? MOP_xuxtb32 : MOP_xuxth32, *resOpnd, *opnd0)); + } + } + } else { /* Same size or truncate */ +#ifdef CNV_OPTIMIZE + /* + * No code needed for aarch64 with same reg. + * Just update regno. + */ + RegOperand *reg = static_cast(resOpnd); + reg->regNo = static_cast(opnd0)->regNo; +#else + /* + * This is not really needed if opnd0 is result from a load. + * Hopefully the FE will get rid of the redundant conversions for loads. + */ + PrimType primType = ((fsize == k64BitSize) ? (IsSignedInteger(fromType) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(fromType) ? PTY_i32 : PTY_u32)); + opnd0 = &LoadIntoRegister(*opnd0, primType); + + if (fsize > tsize) { + if (tsize == k8BitSize) { + MOperator mOp = IsSignedInteger(toType) ? MOP_xsxtb32 : MOP_xuxtb32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0)); + } else if (tsize == k16BitSize) { + MOperator mOp = IsSignedInteger(toType) ? MOP_xsxth32 : MOP_xuxth32; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0)); + } else { + MOperator mOp = IsSignedInteger(toType) ? MOP_xsbfxrri6i6 : MOP_xubfxrri6i6; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resOpnd, *opnd0, + CreateImmOperand(0, k8BitSize, false), + CreateImmOperand(tsize, k8BitSize, false))); + } + } else { + /* same size, so resOpnd can be set */ + if ((mirModule.IsJavaModule()) || (IsSignedInteger(fromType) == IsSignedInteger(toType)) || + (GetPrimTypeSize(toType) >= k4BitSize)) { + resOpnd = opnd0; + } else if (IsUnsignedInteger(toType)) { + MOperator mop; + switch (toType) { + case PTY_u8: + mop = MOP_xuxtb32; + break; + case PTY_u16: + mop = MOP_xuxth32; + break; + default: + CHECK_FATAL(false, "Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *resOpnd, *opnd0)); + } else { + /* signed target */ + uint32 size = GetPrimTypeSize(toType); + MOperator mop; + switch (toType) { + case PTY_i8: + mop = (size > k4BitSize) ? MOP_xsxtb64 : MOP_xsxtb32; + break; + case PTY_i16: + mop = (size > k4BitSize) ? MOP_xsxth64 : MOP_xsxth32; + break; + default: + CHECK_FATAL(0, "Unhandled unsigned convert"); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, *resOpnd, *opnd0)); + } + } +#endif + } +} + +Operand *AArch64CGFunc::SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) { + PrimType fromType = node.FromType(); + PrimType toType = node.GetPrimType(); + if (fromType == toType) { + return &opnd0; /* noop */ + } + Operand *resOpnd = &GetOrCreateResOperand(parent, toType); + if (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType)) { + SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveFloat(fromType) && IsPrimitiveInteger(toType)) { + SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) { + SelectCvtInt2Int(&parent, resOpnd, &opnd0, fromType, toType); + } else if (IsPrimitiveVector(toType) || IsPrimitiveVector(fromType)) { + CHECK_FATAL(IsPrimitiveVector(toType) && IsPrimitiveVector(fromType), "Invalid vector cvt operands"); + SelectVectorCvt(resOpnd, toType, &opnd0, fromType); + } else { /* both are float type */ + SelectCvtFloat2Float(*resOpnd, opnd0, fromType, toType); + } + return resOpnd; +} + +Operand *AArch64CGFunc::SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType ftype = node.FromType(); + bool is64Bits = (GetPrimTypeBitSize(node.GetPrimType()) == k64BitSize); + PrimType itype = (is64Bits) ? (IsSignedInteger(node.GetPrimType()) ? PTY_i64 : PTY_u64) + : (IsSignedInteger(node.GetPrimType()) ? PTY_i32 : PTY_u32); /* promoted type */ + RegOperand &resOpnd = GetOrCreateResOperand(parent, itype); + SelectCvtFloat2Int(resOpnd, opnd0, itype, ftype); + return &resOpnd; +} + +void AArch64CGFunc::SelectSelect(Operand &resOpnd, Operand &condOpnd, Operand &trueOpnd, Operand &falseOpnd, + PrimType dtype, PrimType ctype, bool hasCompare, ConditionCode cc) { + ASSERT(&resOpnd != &condOpnd, "resOpnd cannot be the same as condOpnd"); + bool isIntType = IsPrimitiveInteger(dtype); + ASSERT((IsPrimitiveInteger(dtype) || IsPrimitiveFloat(dtype)), "unknown type for select"); + // making condOpnd and cmpInsn closer will provide more opportunity for opt + Operand &newTrueOpnd = LoadIntoRegister(trueOpnd, dtype); + Operand &newFalseOpnd = LoadIntoRegister(falseOpnd, dtype); + Operand &newCondOpnd = LoadIntoRegister(condOpnd, ctype); + if (hasCompare) { + SelectAArch64Cmp(newCondOpnd, CreateImmOperand(0, ctype, false), true, GetPrimTypeBitSize(ctype)); + cc = CC_NE; + } + Operand &newResOpnd = LoadIntoRegister(resOpnd, dtype); + SelectAArch64Select(newResOpnd, newTrueOpnd, newFalseOpnd, + GetCondOperand(cc), isIntType, GetPrimTypeBitSize(dtype)); +} + +Operand *AArch64CGFunc::SelectSelect(TernaryNode &node, Operand &opnd0, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent, bool hasCompare) { + PrimType dtype = node.GetPrimType(); + PrimType ctype = node.Opnd(0)->GetPrimType(); + + ConditionCode cc = CC_NE; + Opcode opcode = node.Opnd(0)->GetOpCode(); + PrimType cmpType = static_cast(node.Opnd(0))->GetOpndType(); + bool isFloat = false; + bool unsignedIntegerComparison = false; + if (!IsPrimitiveVector(cmpType)) { + isFloat = IsPrimitiveFloat(cmpType); + unsignedIntegerComparison = !isFloat && !IsSignedInteger(cmpType); + } else { + isFloat = IsPrimitiveVectorFloat(cmpType); + unsignedIntegerComparison = !isFloat && IsPrimitiveUnSignedVector(cmpType); + } + switch (opcode) { + case OP_eq: + cc = CC_EQ; + break; + case OP_ne: + cc = CC_NE; + break; + case OP_le: + cc = unsignedIntegerComparison ? CC_LS : CC_LE; + break; + case OP_ge: + cc = unsignedIntegerComparison ? CC_HS : CC_GE; + break; + case OP_gt: + cc = unsignedIntegerComparison ? CC_HI : CC_GT; + break; + case OP_lt: + cc = unsignedIntegerComparison ? CC_LO : CC_LT; + break; + default: + hasCompare = true; + break; + } + if (!IsPrimitiveVector(dtype)) { + RegOperand &resOpnd = GetOrCreateResOperand(parent, dtype); + SelectSelect(resOpnd, opnd0, trueOpnd, falseOpnd, dtype, ctype, hasCompare, cc); + return &resOpnd; + } else { + return SelectVectorSelect(opnd0, dtype, trueOpnd, falseOpnd); + } +} + +/* + * syntax: select (, , ) + * must be of integer type. + * and must be of the type given by . + * If is not 0, return . Otherwise, return . + */ +void AArch64CGFunc::SelectAArch64Select(Operand &dest, Operand &opnd0, Operand &opnd1, CondOperand &cond, + bool isIntType, uint32 is64bits) { + uint32 mOpCode = isIntType ? ((is64bits == k64BitSize) ? MOP_xcselrrrc : MOP_wcselrrrc) + : ((is64bits == k64BitSize) ? MOP_dcselrrrc + : ((is64bits == k32BitSize) ? MOP_scselrrrc : MOP_hcselrrrc)); + Operand &rflag = GetOrCreateRflag(); + if (opnd1.IsImmediate()) { + uint32 movOp = (is64bits == k64BitSize ? MOP_xmovri64 : MOP_wmovri32); + RegOperand &movDest = CreateVirtualRegisterOperand( + NewVReg(kRegTyInt, (is64bits == k64BitSize) ? k8ByteSize : k4ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(movOp, movDest, opnd1)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, dest, opnd0, movDest, cond, rflag)); + return; + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOpCode, dest, opnd0, opnd1, cond, rflag)); +} + +void AArch64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) { + const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable(); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + /* + * we store 8-byte displacement ( jump_label - offset_table_address ) + * in the table. Refer to AArch64Emit::Emit() in aarch64emit.cpp + */ + std::vector sizeArray; + sizeArray.emplace_back(switchTable.size()); + MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); + MIRAggConst *arrayConst = memPool->New(mirModule, *arrayType); + for (const auto &itPair : switchTable) { + LabelIdx labelIdx = itPair.second; + GetCurBB()->PushBackRangeGotoLabel(labelIdx); + MIRConst *mirConst = memPool->New(labelIdx, GetFunction().GetPuidx(), *etype); + arrayConst->AddItem(mirConst, 0); + } + + MIRSymbol *lblSt = GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + lblSt->SetStorageClass(kScFstatic); + lblSt->SetSKind(kStConst); + lblSt->SetTyIdx(arrayType->GetTypeIndex()); + lblSt->SetKonst(arrayConst); + std::string lblStr(".LB_"); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(GetFunction().GetStIdx().Idx()); + uint32 labelIdxTmp = GetLabelIdx(); + lblStr.append(funcSt->GetName()).append(std::to_string(labelIdxTmp++)); + SetLabelIdx(labelIdxTmp); + lblSt->SetNameStrIdx(lblStr); + AddEmitSt(GetCurBB()->GetId(), *lblSt); + + PrimType itype = rangeGotoNode.Opnd(0)->GetPrimType(); + Operand &opnd0 = LoadIntoRegister(srcOpnd, itype); + + regno_t vRegNO = NewVReg(kRegTyInt, 8u); + RegOperand *addOpnd = &CreateVirtualRegisterOperand(vRegNO); + + int32 minIdx = switchTable[0].first; + SelectAdd(*addOpnd, opnd0, + CreateImmOperand(-static_cast(minIdx) - static_cast(rangeGotoNode.GetTagOffset()), + GetPrimTypeBitSize(itype), true), + itype); + + /* contains the index */ + if (addOpnd->GetSize() != GetPrimTypeBitSize(PTY_u64)) { + addOpnd = static_cast(&SelectCopy(*addOpnd, PTY_u64, PTY_u64)); + } + + RegOperand &baseOpnd = CreateRegisterOperandOfType(PTY_u64); + StImmOperand &stOpnd = CreateStImmOperand(*lblSt, 0, 0); + + /* load the address of the switch table */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, baseOpnd, stOpnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, baseOpnd, baseOpnd, stOpnd)); + + /* load the displacement into a register by accessing memory at base + index*8 */ + Operand *disp = + CreateMemOperand(MemOperand::kAddrModeBOrX, k64BitSize, baseOpnd, *addOpnd, k8BitShift); + RegOperand &tgt = CreateRegisterOperandOfType(PTY_a64); + SelectAdd(tgt, baseOpnd, *disp, PTY_u64); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xbr, tgt)); +} + +Operand *AArch64CGFunc::SelectLazyLoad(Operand &opnd0, PrimType primType) { + ASSERT(opnd0.IsRegister(), "wrong type."); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_lazy_ldr, resOpnd, opnd0)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) { + StImmOperand &srcOpnd = CreateStImmOperand(st, offset, 0); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_lazy_ldr_static, resOpnd, srcOpnd)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) { + StImmOperand &srcOpnd = CreateStImmOperand(st, offset, 0); + RegOperand &resOpnd = CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_arrayclass_cache_ldr, resOpnd, srcOpnd)); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectAlloca(UnaryNode &node, Operand &opnd0) { + if (!CGOptions::IsArm64ilp32()) { + ASSERT((node.GetPrimType() == PTY_a64), "wrong type"); + } + if (GetCG()->IsLmbc()) { + SetHasVLAOrAlloca(true); + } + PrimType stype = node.Opnd(0)->GetPrimType(); + Operand *resOpnd = &opnd0; + if (GetPrimTypeBitSize(stype) < GetPrimTypeBitSize(PTY_u64)) { + resOpnd = &CreateRegisterOperandOfType(PTY_u64); + SelectCvtInt2Int(nullptr, resOpnd, &opnd0, stype, PTY_u64); + } + + RegOperand &aliOp = CreateRegisterOperandOfType(PTY_u64); + + SelectAdd(aliOp, *resOpnd, CreateImmOperand(kAarch64StackPtrAlignment - 1, k64BitSize, true), PTY_u64); + Operand &shifOpnd = CreateImmOperand(__builtin_ctz(kAarch64StackPtrAlignment), k64BitSize, true); + SelectShift(aliOp, aliOp, shifOpnd, kShiftLright, PTY_u64); + SelectShift(aliOp, aliOp, shifOpnd, kShiftLeft, PTY_u64); + Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + SelectSub(spOpnd, spOpnd, aliOp, PTY_u64); + int64 allocaOffset = GetMemlayout()->SizeOfArgsToStackPass(); + if (GetCG()->IsLmbc()) { + allocaOffset -= kDivide2 * k8ByteSize; + } + if (allocaOffset > 0) { + RegOperand &resallo = CreateRegisterOperandOfType(PTY_u64); + SelectAdd(resallo, spOpnd, CreateImmOperand(allocaOffset, k64BitSize, true), PTY_u64); + return &resallo; + } else { + return &SelectCopy(spOpnd, PTY_u64, PTY_u64); + } +} + +Operand *AArch64CGFunc::SelectMalloc(UnaryNode &node, Operand &opnd0) { + PrimType retType = node.GetPrimType(); + ASSERT((retType == PTY_a64), "wrong type"); + + std::vector opndVec; + RegOperand &resOpnd = CreateRegisterOperandOfType(retType); + opndVec.emplace_back(&resOpnd); + opndVec.emplace_back(&opnd0); + /* Use calloc to make sure allocated memory is zero-initialized */ + const std::string &funcName = "calloc"; + PrimType srcPty = PTY_u64; + if (opnd0.GetSize() <= k32BitSize) { + srcPty = PTY_u32; + } + Operand &opnd1 = CreateImmOperand(1, srcPty, false); + opndVec.emplace_back(&opnd1); + SelectLibCall(funcName, opndVec, srcPty, retType); + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectGCMalloc(GCMallocNode &node) { + PrimType retType = node.GetPrimType(); + ASSERT((retType == PTY_a64), "wrong type"); + + /* Get the size and alignment of the type. */ + TyIdx tyIdx = node.GetTyIdx(); + uint64 size = GetBecommon().GetTypeSize(tyIdx); + int64 align = static_cast(RTSupport::GetRTSupportInstance().GetObjectAlignment()); + + /* Generate the call to MCC_NewObj */ + Operand &opndSize = CreateImmOperand(static_cast(size), k64BitSize, false); + Operand &opndAlign = CreateImmOperand(align, k64BitSize, false); + + RegOperand &resOpnd = CreateRegisterOperandOfType(retType); + + std::vector opndVec{ &resOpnd, &opndSize, &opndAlign }; + + const std::string &funcName = "MCC_NewObj"; + SelectLibCall(funcName, opndVec, PTY_u64, retType); + + return &resOpnd; +} + +Operand *AArch64CGFunc::SelectJarrayMalloc(JarrayMallocNode &node, Operand &opnd0) { + PrimType retType = node.GetPrimType(); + ASSERT((retType == PTY_a64), "wrong type"); + + /* Extract jarray type */ + TyIdx tyIdx = node.GetTyIdx(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + ASSERT(type != nullptr, "nullptr check"); + CHECK_FATAL(type->GetKind() == kTypeJArray, "expect MIRJarrayType"); + auto jaryType = static_cast(type); + uint64 fixedSize = static_cast(RTSupport::GetRTSupportInstance().GetArrayContentOffset()); + + MIRType *elemType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(jaryType->GetElemTyIdx()); + PrimType elemPrimType = elemType->GetPrimType(); + uint64 elemSize = GetPrimTypeSize(elemPrimType); + + /* Generate the cal to MCC_NewObj_flexible */ + Operand &opndFixedSize = CreateImmOperand(PTY_u64, static_cast(fixedSize)); + Operand &opndElemSize = CreateImmOperand(PTY_u64, static_cast(elemSize)); + + Operand *opndNElems = &opnd0; + + Operand *opndNElems64 = &static_cast(CreateRegisterOperandOfType(PTY_u64)); + SelectCvtInt2Int(nullptr, opndNElems64, opndNElems, PTY_u32, PTY_u64); + + Operand &opndAlign = CreateImmOperand(PTY_u64, + static_cast(RTSupport::GetRTSupportInstance().GetObjectAlignment())); + + RegOperand &resOpnd = CreateRegisterOperandOfType(retType); + + std::vector opndVec{ &resOpnd, &opndFixedSize, &opndElemSize, opndNElems64, &opndAlign }; + + const std::string &funcName = "MCC_NewObj_flexible"; + SelectLibCall(funcName, opndVec, PTY_u64, retType); + + /* Generate the store of the object length field */ + MemOperand &opndArrayLengthField = CreateMemOpnd(resOpnd, + static_cast(RTSupport::GetRTSupportInstance().GetArrayLengthOffset()), k4BitSize); + RegOperand *regOpndNElems = &SelectCopy(*opndNElems, PTY_u32, PTY_u32); + ASSERT(regOpndNElems != nullptr, "null ptr check!"); + SelectCopy(opndArrayLengthField, PTY_u32, *regOpndNElems, PTY_u32); + + return &resOpnd; +} + +bool AArch64CGFunc::IsRegRematCand(const RegOperand ®) const { + MIRPreg *preg = GetPseudoRegFromVirtualRegNO(reg.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (preg != nullptr && preg->GetOp() != OP_undef) { + if (preg->GetOp() == OP_constval && cg->GetRematLevel() >= 1) { + return true; + } else if (preg->GetOp() == OP_addrof && cg->GetRematLevel() >= 2) { + return true; + } else if (preg->GetOp() == OP_iread && cg->GetRematLevel() >= 4) { + return true; + } else { + return false; + } + } else { + return false; + } +} + +void AArch64CGFunc::ClearRegRematInfo(const RegOperand ®) const { + MIRPreg *preg = GetPseudoRegFromVirtualRegNO(reg.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (preg != nullptr && preg->GetOp() != OP_undef) { + preg->SetOp(OP_undef); + } +} + +bool AArch64CGFunc::IsRegSameRematInfo(const RegOperand ®Dest, const RegOperand ®Src) const { + MIRPreg *pregDest = GetPseudoRegFromVirtualRegNO(regDest.GetRegisterNumber(), CGOptions::DoCGSSA()); + MIRPreg *pregSrc = GetPseudoRegFromVirtualRegNO(regSrc.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (pregDest != nullptr && pregDest == pregSrc) { + if (pregDest->GetOp() == OP_constval && cg->GetRematLevel() >= 1) { + return true; + } else if (pregDest->GetOp() == OP_addrof && cg->GetRematLevel() >= 2) { + return true; + } else if (pregDest->GetOp() == OP_iread && cg->GetRematLevel() >= 4) { + return true; + } else { + return false; + } + } else { + return false; + } +} + +void AArch64CGFunc::ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t destNO) { + auto opndNum = static_cast(insn.GetOperandSize()); + for (int i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + if (opnd.IsList()) { + std::list tempRegStore; + auto& opndList = static_cast(opnd).GetOperands(); + bool needReplace = false; + for (auto it = opndList.cbegin(), end = opndList.cend(); it != end; ++it) { + auto *regOpnd = *it; + if (regOpnd->GetRegisterNumber() == destNO) { + needReplace = true; + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + tempRegStore.push_back(®Dest); + } else { + tempRegStore.push_back(®Src); + } + } else { + tempRegStore.push_back(regOpnd); + } + } + if (needReplace) { + opndList.clear(); + for (auto &newOpnd : std::as_const(tempRegStore)) { + static_cast(opnd).PushOpnd(*newOpnd); + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *baseRegOpnd = memOpnd.GetBaseRegister(); + RegOperand *indexRegOpnd = memOpnd.GetIndexRegister(); + MemOperand *newMem = static_cast(memOpnd.Clone(*GetMemoryPool())); + if ((baseRegOpnd != nullptr && baseRegOpnd->GetRegisterNumber() == destNO) || + (indexRegOpnd != nullptr && indexRegOpnd->GetRegisterNumber() == destNO)) { + if (baseRegOpnd != nullptr && baseRegOpnd->GetRegisterNumber() == destNO) { + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + newMem->SetBaseRegister(regDest); + } else { + newMem->SetBaseRegister(regSrc); + } + } + if (indexRegOpnd != nullptr && indexRegOpnd->GetRegisterNumber() == destNO) { + if (regDest.GetSize() != regSrc.GetSize()) { + regDest.SetRegisterNumber(regSrc.GetRegisterNumber()); + newMem->SetIndexRegister(regDest); + } else { + newMem->SetIndexRegister(regSrc); + } + } + insn.SetMemOpnd(&GetOrCreateMemOpnd(*newMem)); + } + } else if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.GetRegisterNumber() == destNO) { + ASSERT(regOpnd.GetRegisterNumber() != kRFLAG, "both condi and reg"); + if (regDest.GetSize() != regSrc.GetSize()) { + regOpnd.SetRegisterNumber(regSrc.GetRegisterNumber()); + } else { + insn.SetOperand(static_cast(i), regSrc); + } + } + } + } +} + +void AArch64CGFunc::CleanupDeadMov(bool dumpInfo) { + /* clean dead mov. */ + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS_SAFE(insn, bb, ninsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetMachineOpcode() == MOP_xmovrr || insn->GetMachineOpcode() == MOP_wmovrr || + insn->GetMachineOpcode() == MOP_xvmovs || insn->GetMachineOpcode() == MOP_xvmovd) { + RegOperand ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + RegOperand ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (!regSrc.IsVirtualRegister() || !regDest.IsVirtualRegister()) { + continue; + } + + if (regSrc.GetRegisterNumber() == regDest.GetRegisterNumber()) { + bb->RemoveInsn(*insn); + } else if (insn->IsPhiMovInsn() && dumpInfo) { + LogInfo::MapleLogger() << "fail to remove mov: " << regDest.GetRegisterNumber() << " <- " + << regSrc.GetRegisterNumber() << std::endl; + } + } + } + } +} + +void AArch64CGFunc::GetRealCallerSaveRegs(const Insn &insn, std::set &realSaveRegs) { + auto *targetOpnd = insn.GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in AArch64Insn::IsCallToFunctionThatNeverReturns"); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCallerSaveReg(static_cast(preg))) { + realSaveRegs.insert(preg); + } + } + return; + } + } + for (uint32 i = R0; i <= kMaxRegNum; ++i) { + if (AArch64Abi::IsCallerSaveReg(static_cast(i))) { + realSaveRegs.insert(i); + } + } +} + +RegOperand &AArch64CGFunc::GetZeroOpnd(uint32 bitLen) { + /* + * It is possible to have a bitLen < 32, eg stb. + * Set it to 32 if it is less than 32. + */ + if (bitLen < k32BitSize) { + bitLen = k32BitSize; + } + ASSERT((bitLen == k32BitSize || bitLen == k64BitSize), "illegal bit length = %d", bitLen); + return (bitLen == k32BitSize) ? GetOrCreatePhysicalRegisterOperand(RZR, k32BitSize, kRegTyInt) : + GetOrCreatePhysicalRegisterOperand(RZR, k64BitSize, kRegTyInt); +} + +bool AArch64CGFunc::IsFrameReg(const RegOperand &opnd) const { + if (opnd.GetRegisterNumber() == RFP) { + return true; + } else { + return false; + } +} + +bool AArch64CGFunc::IsSaveReg(const RegOperand ®, MIRType &mirType, BECommon &cgBeCommon) const { + AArch64CallConvImpl retLocator(cgBeCommon); + CCLocInfo retMechanism; + retLocator.InitReturnInfo(mirType, retMechanism); + if (retMechanism.GetRegCount() > 0) { + return reg.GetRegisterNumber() == retMechanism.GetReg0() || reg.GetRegisterNumber() == retMechanism.GetReg1() || + reg.GetRegisterNumber() == retMechanism.GetReg2() || reg.GetRegisterNumber() == retMechanism.GetReg3(); + } + return false; +} + +bool AArch64CGFunc::IsSPOrFP(const RegOperand &opnd) const { + const RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = opnd.GetRegisterNumber(); + return (regOpnd.IsPhysicalRegister() && + (regNO == RSP || regNO == RFP || (regNO == R29 && CGOptions::UseFramePointer()))); +} + +bool AArch64CGFunc::IsReturnReg(const RegOperand &opnd) const { + regno_t regNO = opnd.GetRegisterNumber(); + return (regNO == R0) || (regNO == V0); +} + +/* + * This function returns true to indicate that the clean up code needs to be generated, + * otherwise it does not need. In GCOnly mode, it always returns false. + */ +bool AArch64CGFunc::NeedCleanup() { + if (CGOptions::IsGCOnly()) { + return false; + } + AArch64MemLayout *layout = static_cast(GetMemlayout()); + if (layout->GetSizeOfRefLocals() > 0) { + return true; + } + for (uint32 i = 0; i < GetFunction().GetFormalCount(); i++) { + TypeAttrs ta = GetFunction().GetNthParamAttr(i); + if (ta.GetAttr(ATTR_localrefvar)) { + return true; + } + } + + return false; +} + +/* + * bb must be the cleanup bb. + * this function must be invoked before register allocation. + * extended epilogue is specific for fast exception handling and is made up of + * clean up code and epilogue. + * clean up code is generated here while epilogue is generated in GeneratePrologEpilog() + */ +void AArch64CGFunc::GenerateCleanupCodeForExtEpilog(BB &bb) { + ASSERT(GetLastBB()->GetPrev()->GetFirstStmt() == GetCleanupLabel(), "must be"); + + if (!NeedCleanup()) { + return; + } + /* this is necessary for code insertion. */ + SetCurBB(bb); + + RegOperand ®Opnd0 = + GetOrCreatePhysicalRegisterOperand(R0, GetPointerSize() * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); + RegOperand ®Opnd1 = + GetOrCreatePhysicalRegisterOperand(R1, GetPointerSize() * kBitsPerByte, GetRegTyFromPrimTy(PTY_a64)); + /* allocate 16 bytes to store reg0 and reg1 (each reg has 8 bytes) */ + MemOperand &frameAlloc = CreateCallFrameOperand(-16, GetPointerSize() * kBitsPerByte); + Insn &allocInsn = GetInsnBuilder()->BuildInsn(MOP_xstp, regOpnd0, regOpnd1, frameAlloc); + allocInsn.SetDoNotRemove(true); + AppendInstructionTo(allocInsn, *this); + + /* invoke MCC_CleanupLocalStackRef(). */ + HandleRCCall(false); + /* deallocate 16 bytes which used to store reg0 and reg1 */ + MemOperand &frameDealloc = CreateCallFrameOperand(16, GetPointerSize() * kBitsPerByte); + GenRetCleanup(cleanEANode, true); + Insn &deallocInsn = GetInsnBuilder()->BuildInsn(MOP_xldp, regOpnd0, regOpnd1, frameDealloc); + deallocInsn.SetDoNotRemove(true); + AppendInstructionTo(deallocInsn, *this); + + CHECK_FATAL(GetCurBB() == &bb, "cleanup BB can't be splitted, it is only one cleanup BB"); +} + +/* + * bb must be the cleanup bb. + * this function must be invoked before register allocation. + */ +void AArch64CGFunc::GenerateCleanupCode(BB &bb) { + ASSERT(GetLastBB()->GetPrev()->GetFirstStmt() == GetCleanupLabel(), "must be"); + if (!NeedCleanup()) { + return; + } + + /* this is necessary for code insertion. */ + SetCurBB(bb); + + /* R0 is lived-in for clean-up code, save R0 before invocation */ + RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + + if (!GetCG()->GenLocalRC()) { + /* by pass local RC operations. */ + } else if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + regno_t vreg = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &backupRegOp = CreateVirtualRegisterOperand(vreg); + backupRegOp.SetRegNotBBLocal(); + SelectCopy(backupRegOp, PTY_a64, livein, PTY_a64); + + /* invoke MCC_CleanupLocalStackRef(). */ + HandleRCCall(false); + SelectCopy(livein, PTY_a64, backupRegOp, PTY_a64); + } else { + /* + * Register Allocation for O0 can not handle this case, so use a callee saved register directly. + * If yieldpoint is enabled, we use R20 instead R19. + */ + AArch64reg backupRegNO = GetCG()->GenYieldPoint() ? R20 : R19; + RegOperand &backupRegOp = GetOrCreatePhysicalRegisterOperand(backupRegNO, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + SelectCopy(backupRegOp, PTY_a64, livein, PTY_a64); + /* invoke MCC_CleanupLocalStackRef(). */ + HandleRCCall(false); + SelectCopy(livein, PTY_a64, backupRegOp, PTY_a64); + } + + /* invoke _Unwind_Resume */ + std::string funcName("_Unwind_Resume"); + MIRSymbol *sym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + sym->SetNameStrIdx(funcName); + sym->SetStorageClass(kScText); + sym->SetSKind(kStFunc); + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(livein); + AppendCall(*sym, *srcOpnds); + /* + * this instruction is unreachable, but we need it as the return address of previous + * "bl _Unwind_Resume" for stack unwinding. + */ + Insn &nop = GetInsnBuilder()->BuildInsn(MOP_xblr, livein, *srcOpnds); + GetCurBB()->AppendInsn(nop); + GetCurBB()->SetHasCall(); + + CHECK_FATAL(GetCurBB() == &bb, "cleanup BB can't be splitted, it is only one cleanup BB"); +} + +uint32 AArch64CGFunc::FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) { + AArch64CallConvImpl parmlocator(GetBecommon()); + return parmlocator.FloatParamRegRequired(*structType, fpSize); +} + +/* + * Map param registers to formals. For small structs passed in param registers, + * create a move to vreg since lmbc IR does not create a regassign for them. + */ +void AArch64CGFunc::AssignLmbcFormalParams() { + PrimType primType; + uint32 offset; + regno_t intReg = R0; + regno_t fpReg = V0; + for (auto param : GetLmbcParamVec()) { + primType = param->GetPrimType(); + offset = param->GetOffset(); + if (param->IsReturn()) { + param->SetRegNO(R8); + } else if (IsPrimitiveInteger(primType)) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + if (!param->HasRegassign()) { + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(static_cast(offset), bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(intReg), bitlen, kRegTyInt); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + } + intReg++; + } + } else if (IsPrimitiveFloat(primType)) { + if (fpReg > V7) { + param->SetRegNO(0); + } else { + param->SetRegNO(fpReg); + if (!param->HasRegassign()) { + uint32 bytelen = GetPrimTypeSize(primType); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(static_cast(offset), bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(fpReg), bitlen, kRegTyFloat); + MOperator mOp = PickStInsn(bitlen, primType); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + } + fpReg++; + } + } else if (primType == PTY_agg) { + if (param->IsPureFloat()) { + uint32 numFpRegs = param->GetNumRegs(); + if ((fpReg + numFpRegs - kOneRegister) > V7) { + param->SetRegNO(0); + } else { + param->SetRegNO(fpReg); + param->SetNumRegs(numFpRegs); + fpReg += numFpRegs; + } + } else if (param->GetSize() > k16ByteSize) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetIsOnStack(); + param->SetOnStackOffset(((intReg - R0 + fpReg) - V0) * k8ByteSize); + uint32 bytelen = GetPrimTypeSize(PTY_a64); + uint32 bitlen = bytelen * kBitsPerByte; + MemOperand *mOpnd = GenLmbcFpMemOperand(static_cast(param->GetOnStackOffset()), bytelen); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(AArch64reg(intReg), bitlen, kRegTyInt); + MOperator mOp = PickStInsn(bitlen, PTY_a64); + Insn &store = GetInsnBuilder()->BuildInsn(mOp, src, *mOpnd); + GetCurBB()->AppendInsn(store); + intReg++; + } + } else if (param->GetSize() <= k8ByteSize) { + if (intReg > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetNumRegs(kOneRegister); + intReg++; + } + } else { + /* size > 8 && size <= 16 */ + if ((intReg + kOneRegister) > R7) { + param->SetRegNO(0); + } else { + param->SetRegNO(intReg); + param->SetNumRegs(kTwoRegister); + intReg += kTwoRegister; + } + } + if (param->GetRegNO() != 0) { + for (uint32 i = 0; i < param->GetNumRegs(); ++i) { + PrimType pType = PTY_i64; + RegType rType = kRegTyInt; + uint32 rSize = k8ByteSize; + if (param->IsPureFloat()) { + rType = kRegTyFloat; + if (param->GetFpSize() <= k4ByteSize) { + pType = PTY_f32; + rSize = k4ByteSize; + } else { + pType = PTY_f64; + } + } + regno_t vreg = NewVReg(rType, rSize); + RegOperand &dest = GetOrCreateVirtualRegisterOperand(vreg); + RegOperand &src = GetOrCreatePhysicalRegisterOperand(static_cast(param->GetRegNO() + i), + rSize * kBitsPerByte, rType); + SelectCopy(dest, pType, src, pType); + if (param->GetVregNO() == 0) { + param->SetVregNO(vreg); + } + Operand *memOpd = &CreateMemOpnd(RFP, offset + (i * rSize), rSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + PickStInsn(rSize * kBitsPerByte, pType), dest, *memOpd)); + } + } + } else { + CHECK_FATAL(false, "lmbc formal primtype not handled"); + } + } +} + +void AArch64CGFunc::LmbcGenSaveSpForAlloca() { + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc || !HasVLAOrAlloca()) { + return; + } + Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + regno_t regno = NewVReg(kRegTyInt, GetPointerSize()); + RegOperand &spSaveOpnd = CreateVirtualRegisterOperand(regno); + SetSpSaveReg(regno); + Insn &save = GetInsnBuilder()->BuildInsn(MOP_xmovrr, spSaveOpnd, spOpnd); + GetFirstBB()->AppendInsn(save); + for (auto *retBB : GetExitBBsVec()) { + Insn &restore = GetInsnBuilder()->BuildInsn(MOP_xmovrr, spOpnd, spSaveOpnd); + retBB->AppendInsn(restore); + restore.SetFrameDef(true); + } +} + +/* if offset < 0, allocation; otherwise, deallocation */ +MemOperand &AArch64CGFunc::CreateCallFrameOperand(int32 offset, uint32 size) const { + MemOperand *memOpnd = CreateStackMemOpnd(RSP, offset, size); + memOpnd->SetIndexOpt((offset < 0) ? MemOperand::kPreIndex : MemOperand::kPostIndex); + return *memOpnd; +} + +BitShiftOperand *AArch64CGFunc::GetLogicalShiftLeftOperand(uint32 shiftAmount, bool is64bits) const { + /* num(0, 16, 32, 48) >> 4 is num1(0, 1, 2, 3), num1 & (~3) == 0 */ + ASSERT((!shiftAmount || ((shiftAmount >> 4) & ~static_cast(3)) == 0), + "shift amount should be one of 0, 16, 32, 48"); + /* movkLslOperands[4]~movkLslOperands[7] is for 64 bits */ + return &movkLslOperands[(shiftAmount >> 4) + (is64bits ? 4 : 0)]; +} + +AArch64CGFunc::MovkLslOperandArray AArch64CGFunc::movkLslOperands = { + BitShiftOperand(BitShiftOperand::kLSL, 0, 4), BitShiftOperand(BitShiftOperand::kLSL, 16, 4), + BitShiftOperand(BitShiftOperand::kLSL, static_cast(-1), 0), /* invalid entry */ + BitShiftOperand(BitShiftOperand::kLSL, static_cast(-1), 0), /* invalid entry */ + BitShiftOperand(BitShiftOperand::kLSL, 0, 6), BitShiftOperand(BitShiftOperand::kLSL, 16, 6), + BitShiftOperand(BitShiftOperand::kLSL, 32, 6), BitShiftOperand(BitShiftOperand::kLSL, 48, 6), +}; + +MemOperand &AArch64CGFunc::CreateStkTopOpnd(uint32 offset, uint32 size) { + AArch64reg reg; + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + reg = RSP; + } else { + reg = RFP; + } + MemOperand *memOp = CreateStackMemOpnd(reg, static_cast(offset), size); + return *memOp; +} + +MemOperand *AArch64CGFunc::CreateStackMemOpnd(regno_t preg, int32 offset, uint32 size) const { + auto *memOp = memPool->New( + memPool->New(preg, k64BitSize, kRegTyInt), + &CreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize), + size); + if (preg == RFP || preg == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand *index, + ImmOperand *offset, const MIRSymbol *symbol) const { + auto *memOp = memPool->New( + mode, size, base, index, offset, symbol); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand &base, RegOperand &index, + ImmOperand *offset, const MIRSymbol &symbol, bool noExtend) const { + auto *memOp = memPool->New( + mode, size, base, index, offset, symbol, noExtend); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, + RegOperand &base, RegOperand &indexOpnd, + uint32 shift, bool isSigned) const { + auto *memOp = memPool->New( + mode, dSize, base, indexOpnd, shift, isSigned); + if (base.GetRegisterNumber() == RFP || base.GetRegisterNumber() == RSP) { + memOp->SetStackMem(true); + } + return memOp; +} + +MemOperand *AArch64CGFunc::CreateMemOperand(MemOperand::AArch64AddressingMode mode, uint32 dSize, + const MIRSymbol &sym) const { + auto *memOp = memPool->New(mode, dSize, sym); + return memOp; +} + +void AArch64CGFunc::GenSaveMethodInfoCode(BB &bb) { + if (GetCG()->UseFastUnwind()) { + BB *formerCurBB = GetCurBB(); + GetDummyBB()->ClearInsns(); + SetCurBB(*GetDummyBB()); + /* + * FUNCATTR_bridge for function: Ljava_2Flang_2FString_3B_7CcompareTo_7C_28Ljava_2Flang_2FObject_3B_29I, to + * exclude this funciton this function is a bridge function generated for Java Genetic + */ + if ((GetFunction().GetAttr(FUNCATTR_native) || GetFunction().GetAttr(FUNCATTR_fast_native)) && + !GetFunction().GetAttr(FUNCATTR_critical_native) && !GetFunction().GetAttr(FUNCATTR_bridge)) { + RegOperand &fpReg = GetOrCreatePhysicalRegisterOperand(RFP, GetPointerSize() * kBitsPerByte, kRegTyInt); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + srcOpnds->PushOpnd(parmRegOpnd1); + Operand &immOpnd = CreateImmOperand(0, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadri64, parmRegOpnd1, immOpnd)); + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + srcOpnds->PushOpnd(parmRegOpnd2); + SelectCopy(parmRegOpnd2, PTY_a64, fpReg, PTY_a64); + + MIRSymbol *sym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_SetRiskyUnwindContext"); + sym->SetNameStrIdx(funcName); + + sym->SetStorageClass(kScText); + sym->SetSKind(kStFunc); + AppendCall(*sym, *srcOpnds); + bb.SetHasCall(); + } + + bb.InsertAtBeginning(*GetDummyBB()); + SetCurBB(*formerCurBB); + } +} + +bool AArch64CGFunc::OpndHasStackLoadStore(Insn &insn, Operand &opnd) { + if (!opnd.IsMemoryAccessOperand()) { + return false; + } + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + if ((base != nullptr) && base->IsRegister()) { + RegOperand *regOpnd = static_cast(base); + RegType regType = regOpnd->GetRegisterType(); + uint32 regNO = regOpnd->GetRegisterNumber(); + if (((regType != kRegTyCc) && ((regNO == RFP) || (regNO == RSP))) || (regType == kRegTyVary)) { + return true; + } + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + return false; + } + /* lmbc uses ireadoff, not dread. Need to check for previous insn */ + Insn *prevInsn = insn.GetPrev(); + if (prevInsn && prevInsn->GetMachineOpcode() == MOP_xaddrri12) { + RegOperand *pOpnd = static_cast(&prevInsn->GetOperand(1)); + if (pOpnd->GetRegisterType() == kRegTyVary) { + return true; + } + } + } + return false; +} + +bool AArch64CGFunc::HasStackLoadStore() { + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS(insn, bb) { + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (OpndHasStackLoadStore(*insn, opnd)) { + return true; + } + } + } + } + return false; +} + +void AArch64CGFunc::GenerateYieldpoint(BB &bb) { + /* ldr wzr, [RYP] # RYP hold address of the polling page. */ + auto &wzr = GetZeroOpnd(k32BitSize); + auto &pollingPage = CreateMemOpnd(RYP, 0, k32BitSize); + auto &yieldPoint = GetInsnBuilder()->BuildInsn(MOP_wldr, wzr, pollingPage); + if (GetCG()->GenerateVerboseCG()) { + yieldPoint.SetComment("yieldpoint"); + } + bb.AppendInsn(yieldPoint); +} + +Operand &AArch64CGFunc::ProcessReturnReg(PrimType primType, int32 sReg) { + return GetTargetRetOperand(primType, sReg); +} + +Operand &AArch64CGFunc::GetTargetRetOperand(PrimType primType, int32 sReg) { + uint32 bitSize = GetPrimTypeBitSize(primType) < k32BitSize ? k32BitSize : GetPrimTypeBitSize(primType); + AArch64reg pReg; + if (sReg < 0) { + return GetOrCreatePhysicalRegisterOperand( + IsPrimitiveFloat(primType) || (IsPrimitiveVector(primType)) ? S0 : R0, + bitSize, GetRegTyFromPrimTy(primType)); + } else { + switch (sReg) { + case kSregRetval0: + pReg = IsPrimitiveFloat(primType) || (IsPrimitiveVector(primType)) ? S0 : R0; + break; + case kSregRetval1: + pReg = R1; + break; + default: + pReg = RLAST_INT_REG; + ASSERT(0, "GetTargetRetOperand: NYI"); + } + return GetOrCreatePhysicalRegisterOperand(pReg, bitSize, GetRegTyFromPrimTy(primType)); + } +} + +RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(PrimType primType) { + RegType regType = GetRegTyFromPrimTy(primType); + uint32 byteLength = GetPrimTypeSize(primType); + return CreateRegisterOperandOfType(regType, byteLength); +} + +RegOperand &AArch64CGFunc::CreateRegisterOperandOfType(RegType regType, uint32 byteLen) { + /* BUG: if half-precision floating point operations are supported? */ + /* AArch64 has 32-bit and 64-bit registers only */ + if (byteLen < k4ByteSize) { + byteLen = k4ByteSize; + } + regno_t vRegNO = NewVReg(regType, byteLen); + return CreateVirtualRegisterOperand(vRegNO); +} + +RegOperand &AArch64CGFunc::CreateRflagOperand() { + /* AArch64 has Status register that is 32-bit wide. */ + regno_t vRegNO = NewVRflag(); + return CreateVirtualRegisterOperand(vRegNO); +} + +void AArch64CGFunc::MergeReturn() { + uint32 exitBBSize = GetExitBBsVec().size(); + CHECK_FATAL(exitBBSize == 1, "allow one returnBB only"); +} + +void AArch64CGFunc::HandleRetCleanup(NaryStmtNode &retNode) { + if (!GetCG()->GenLocalRC()) { + /* handle local rc is disabled. */ + return; + } + + Opcode ops[11] = { OP_label, OP_goto, OP_brfalse, OP_brtrue, OP_return, OP_call, + OP_icall, OP_rangegoto, OP_catch, OP_try, OP_endtry }; + std::set branchOp(ops, ops + 11); + + /* get cleanup intrinsic */ + bool found = false; + StmtNode *cleanupNode = retNode.GetPrev(); + cleanEANode = nullptr; + while (cleanupNode != nullptr) { + if (branchOp.find(cleanupNode->GetOpCode()) != branchOp.end()) { + if (cleanupNode->GetOpCode() == OP_call) { + CallNode *callNode = static_cast(cleanupNode); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + if ((fsym->GetName() == "MCC_DecRef_NaiveRCFast") || (fsym->GetName() == "MCC_IncRef_NaiveRCFast") || + (fsym->GetName() == "MCC_IncDecRef_NaiveRCFast") || (fsym->GetName() == "MCC_LoadRefStatic") || + (fsym->GetName() == "MCC_LoadRefField") || (fsym->GetName() == "MCC_LoadReferentField") || + (fsym->GetName() == "MCC_LoadRefField_NaiveRCFast") || (fsym->GetName() == "MCC_LoadVolatileField") || + (fsym->GetName() == "MCC_LoadVolatileStaticField") || (fsym->GetName() == "MCC_LoadWeakField") || + (fsym->GetName() == "MCC_CheckObjMem")) { + cleanupNode = cleanupNode->GetPrev(); + continue; + } else { + break; + } + } else { + break; + } + } + + if (cleanupNode->GetOpCode() == OP_intrinsiccall) { + IntrinsiccallNode *tempNode = static_cast(cleanupNode); + if ((tempNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS) || + (tempNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP)) { + GenRetCleanup(tempNode); + if (cleanEANode != nullptr) { + GenRetCleanup(cleanEANode, true); + } + found = true; + break; + } + if (tempNode->GetIntrinsic() == INTRN_MPL_CLEANUP_NORETESCOBJS) { + cleanEANode = tempNode; + } + } + cleanupNode = cleanupNode->GetPrev(); + } + + if (!found) { + MIRSymbol *retRef = nullptr; + if (retNode.NumOpnds() != 0) { + retRef = GetRetRefSymbol(*static_cast(retNode).Opnd(0)); + } + HandleRCCall(false, retRef); + } +} + +bool AArch64CGFunc::GenRetCleanup(const IntrinsiccallNode *cleanupNode, bool forEA) { +#undef CC_DEBUG_INFO + +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "==============" << GetFunction().GetName() << "==============" << '\n'; +#endif + + if (cleanupNode == nullptr) { + return false; + } + + int32 minByteOffset = INT_MAX; + int32 maxByteOffset = 0; + + int32 skipIndex = -1; + MIRSymbol *skipSym = nullptr; + size_t refSymNum = 0; + if (cleanupNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS) { + refSymNum = cleanupNode->GetNopndSize(); + if (refSymNum < 1) { + return true; + } + } else if (cleanupNode->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP) { + refSymNum = cleanupNode->GetNopndSize(); + /* refSymNum == 0, no local refvars; refSymNum == 1 and cleanup skip, so nothing to do */ + if (refSymNum < 2) { + return true; + } + BaseNode *skipExpr = cleanupNode->Opnd(refSymNum - 1); + + CHECK_FATAL(skipExpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refNode = static_cast(skipExpr); + skipSym = GetFunction().GetLocalOrGlobalSymbol(refNode->GetStIdx()); + + refSymNum -= 1; + } else if (cleanupNode->GetIntrinsic() == INTRN_MPL_CLEANUP_NORETESCOBJS) { + refSymNum = cleanupNode->GetNopndSize(); + /* the number of operands of intrinsic call INTRN_MPL_CLEANUP_NORETESCOBJS must be more than 1 */ + if (refSymNum < 2) { + return true; + } + BaseNode *skipexpr = cleanupNode->Opnd(0); + CHECK_FATAL(skipexpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refnode = static_cast(skipexpr); + skipSym = GetFunction().GetLocalOrGlobalSymbol(refnode->GetStIdx()); + } + + /* now compute the offset range */ + std::vector offsets; + AArch64MemLayout *memLayout = static_cast(this->GetMemlayout()); + for (size_t i = 0; i < refSymNum; ++i) { + BaseNode *argExpr = cleanupNode->Opnd(i); + CHECK_FATAL(argExpr->GetOpCode() == OP_dread, "should be dread"); + DreadNode *refNode = static_cast(argExpr); + MIRSymbol *refSymbol = GetFunction().GetLocalOrGlobalSymbol(refNode->GetStIdx()); + if (memLayout->GetSymAllocTable().size() <= refSymbol->GetStIndex()) { + ERR(kLncErr, "access memLayout->GetSymAllocTable() failed"); + return false; + } + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(refSymbol->GetStIndex())); + int32 tempOffset = GetBaseOffset(*symLoc); + offsets.emplace_back(tempOffset); +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "refsym " << refSymbol->GetName() << " offset " << tempOffset << '\n'; +#endif + minByteOffset = (minByteOffset > tempOffset) ? tempOffset : minByteOffset; + maxByteOffset = (maxByteOffset < tempOffset) ? tempOffset : maxByteOffset; + } + + /* get the skip offset */ + int32 skipOffset = -1; + if (skipSym != nullptr) { + AArch64SymbolAlloc *symLoc = static_cast(memLayout->GetSymAllocInfo(skipSym->GetStIndex())); + CHECK_FATAL(GetBaseOffset(*symLoc) < std::numeric_limits::max(), "out of range"); + skipOffset = GetBaseOffset(*symLoc); + offsets.emplace_back(skipOffset); + +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "skip " << skipSym->GetName() << " offset " << skipOffset << '\n'; +#endif + + skipIndex = symLoc->GetOffset() / kOffsetAlign; + } + + /* call runtime cleanup */ + if (minByteOffset < INT_MAX) { + int32 refLocBase = memLayout->GetRefLocBaseLoc(); + uint32 refNum = memLayout->GetSizeOfRefLocals() / kOffsetAlign; + CHECK_FATAL((refLocBase + (refNum - 1) * kIntregBytelen) < std::numeric_limits::max(), "out of range"); + int32 refLocEnd = refLocBase + static_cast((refNum - 1) * kIntregBytelen); + int32 realMin = minByteOffset < refLocBase ? refLocBase : minByteOffset; + int32 realMax = maxByteOffset > refLocEnd ? refLocEnd : maxByteOffset; + if (forEA) { + std::sort(offsets.begin(), offsets.end()); + int32 prev = offsets[0]; + for (size_t i = 1; i < offsets.size(); i++) { + CHECK_FATAL((offsets[i] == prev) || ((offsets[i] - prev) == kIntregBytelen), "must be"); + prev = offsets[i]; + } + CHECK_FATAL((refLocBase - prev) == kIntregBytelen, "must be"); + realMin = minByteOffset; + realMax = maxByteOffset; + } +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << " realMin " << realMin << " realMax " << realMax << '\n'; +#endif + if (realMax < realMin) { + /* maybe there is a cleanup intrinsic bug, use CHECK_FATAL instead? */ + CHECK_FATAL(false, "must be"); + } + + /* optimization for little slot cleanup */ + if (realMax == realMin && !forEA) { + RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + Operand &stackLoc = CreateStkTopOpnd(static_cast(realMin), GetPointerSize() * kBitsPerByte); + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, stackLoc); + GetCurBB()->AppendInsn(ldrInsn); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(phyOpnd); + MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_DecRef_NaiveRCFast"); + callSym->SetNameStrIdx(funcName); + callSym->SetStorageClass(kScText); + callSym->SetSKind(kStFunc); + Insn &callInsn = AppendCall(*callSym, *srcOpnds); + callInsn.SetRefSkipIdx(skipIndex); + GetCurBB()->SetHasCall(); + /* because of return stmt is often the last stmt */ + GetCurBB()->SetFrequency(frequency); + + return true; + } + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + + ImmOperand &beginOpnd = CreateImmOperand(realMin, k64BitSize, true); + regno_t vRegNO0 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg0 = CreateVirtualRegisterOperand(vRegNO0); + RegOperand &fpOpnd = GetOrCreateStackBaseRegOperand(); + SelectAdd(vReg0, fpOpnd, beginOpnd, PTY_i64); + + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd1); + SelectCopy(parmRegOpnd1, PTY_a64, vReg0, PTY_a64); + + uint32 realRefNum = static_cast((realMax - realMin) / kOffsetAlign + 1); + + ImmOperand &countOpnd = CreateImmOperand(realRefNum, k64BitSize, true); + + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd2); + SelectCopyImm(parmRegOpnd2, countOpnd, PTY_i64); + + MIRSymbol *funcSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + if ((skipSym != nullptr) && (skipOffset >= realMin) && (skipOffset <= realMax)) { + /* call cleanupskip */ + uint32 stOffset = (skipOffset - realMin) / kOffsetAlign; + ImmOperand &retLoc = CreateImmOperand(stOffset, k64BitSize, true); + + RegOperand &parmRegOpnd3 = GetOrCreatePhysicalRegisterOperand(R2, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd3); + SelectCopyImm(parmRegOpnd3, retLoc, PTY_i64); + + std::string funcName; + if (forEA) { + funcName = "MCC_CleanupNonRetEscObj"; + } else { + funcName = "MCC_CleanupLocalStackRefSkip_NaiveRCFast"; + } + funcSym->SetNameStrIdx(funcName); +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "num " << real_ref_num << " skip loc " << stOffset << '\n'; +#endif + } else { + /* call cleanup */ + CHECK_FATAL(!forEA, "must be"); + std::string funcName("MCC_CleanupLocalStackRef_NaiveRCFast"); + funcSym->SetNameStrIdx(funcName); +#ifdef CC_DEBUG_INFO + LogInfo::MapleLogger() << "num " << real_ref_num << '\n'; +#endif + } + + funcSym->SetStorageClass(kScText); + funcSym->SetSKind(kStFunc); + Insn &callInsn = AppendCall(*funcSym, *srcOpnds); + callInsn.SetRefSkipIdx(skipIndex); + GetCurBB()->SetHasCall(); + GetCurBB()->SetFrequency(frequency); + } + return true; +} + +RegOperand *AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO, uint32 size, RegType kind, uint32 flg) const { + RegOperand *res = memPool->New(vRegNO, size, kind, flg); + return res; +} + +RegOperand &AArch64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) { + ASSERT((vRegOperandTable.find(vRegNO) == vRegOperandTable.end()), "already exist"); + ASSERT(vRegNO < vRegTable.size(), "index out of range"); + uint8 bitSize = static_cast((static_cast(vRegTable[vRegNO].GetSize())) * kBitsPerByte); + RegOperand *res = CreateVirtualRegisterOperand(vRegNO, bitSize, vRegTable.at(vRegNO).GetType()); + vRegOperandTable[vRegNO] = res; + return *res; +} + +RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO) { + auto it = vRegOperandTable.find(vRegNO); + return (it != vRegOperandTable.end()) ? *(it->second) : CreateVirtualRegisterOperand(vRegNO); +} + +RegOperand &AArch64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) { + regno_t regNO = regOpnd.GetRegisterNumber(); + auto it = vRegOperandTable.find(regNO); + if (it != vRegOperandTable.end()) { + it->second->SetSize(regOpnd.GetSize()); + it->second->SetRegisterNumber(regNO); + it->second->SetRegisterType(regOpnd.GetRegisterType()); + it->second->SetValidBitsNum(regOpnd.GetValidBitsNum()); + return *it->second; + } else { + auto *newRegOpnd = static_cast(regOpnd.Clone(*memPool)); + regno_t newRegNO = newRegOpnd->GetRegisterNumber(); + if (newRegNO >= maxRegCount) { + maxRegCount = newRegNO + kRegIncrStepLen; + vRegTable.resize(maxRegCount); + } + vRegOperandTable[newRegNO] = newRegOpnd; + VirtualRegNode *vregNode = memPool->New(newRegOpnd->GetRegisterType(), newRegOpnd->GetSize()); + vRegTable[newRegNO] = *vregNode; + vRegCount = maxRegCount; + return *newRegOpnd; + } +} + +/* + * Traverse all call insn to determine return type of it + * If the following insn is mov/str/blr and use R0/V0, it means the call insn have reture value + */ +void AArch64CGFunc::DetermineReturnTypeofCall() { + FOR_ALL_BB(bb, this) { + if (bb->IsUnreachable() || !bb->HasCall()) { + continue; + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsTargetInsn()) { + continue; + } + if (!insn->IsCall() || insn->GetMachineOpcode() == MOP_asm) { + continue; + } + Insn *nextInsn = insn->GetNextMachineInsn(); + if (nextInsn == nullptr) { + continue; + } + if ((nextInsn->GetMachineOpcode() != MOP_asm) && + ((nextInsn->IsMove() && nextInsn->GetOperand(kInsnSecondOpnd).IsRegister()) || + nextInsn->IsStore() || + (nextInsn->IsCall() && nextInsn->GetOperand(kInsnFirstOpnd).IsRegister()))) { + auto *srcOpnd = static_cast(&nextInsn->GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(srcOpnd != nullptr, "nullptr"); + if (!srcOpnd->IsPhysicalRegister()) { + continue; + } + if (srcOpnd->GetRegisterNumber() == R0) { + insn->SetRetType(Insn::kRegInt); + continue; + } + if (srcOpnd->GetRegisterNumber() == V0) { + insn->SetRetType(Insn::kRegFloat); + } + } + } + } +} + +void AArch64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { + if (!GetCG()->GenLocalRC() && !begin) { + /* handle local rc is disabled. */ + return; + } + + AArch64MemLayout *memLayout = static_cast(this->GetMemlayout()); + int32 refNum = static_cast(memLayout->GetSizeOfRefLocals() / kOffsetAlign); + if (!refNum) { + if (begin) { + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + } + return; + } + + /* no MCC_CleanupLocalStackRefSkip when ret_ref is the only ref symbol */ + if ((refNum == 1) && (retRef != nullptr)) { + if (begin) { + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + } + return; + } + CHECK_FATAL(refNum < 0xFFFF, "not enough room for size."); + int32 refLocBase = memLayout->GetRefLocBaseLoc(); + CHECK_FATAL((refLocBase >= 0) && (refLocBase < 0xFFFF), "not enough room for offset."); + int32 formalRef = 0; + /* avoid store zero to formal localrefvars. */ + if (begin) { + for (uint32 i = 0; i < GetFunction().GetFormalCount(); ++i) { + if (GetFunction().GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + refNum--; + formalRef++; + } + } + } + /* + * if the number of local refvar is less than 12, use stp or str to init local refvar + * else call function MCC_InitializeLocalStackRef to init. + */ + if (begin && (refNum <= kRefNum12) && ((refLocBase + kIntregBytelen * (refNum - 1)) < kStpLdpImm64UpperBound)) { + int32 pairNum = refNum / kDivide2; + int32 singleNum = refNum % kDivide2; + const int32 pairRefBytes = 16; /* the size of each pair of ref is 16 bytes */ + int32 ind = 0; + while (ind < pairNum) { + int32 offset = memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef + pairRefBytes * ind; + Operand &zeroOp = GetZeroOpnd(k64BitSize); + Operand &stackLoc = CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Insn &setInc = GetInsnBuilder()->BuildInsn(MOP_xstp, zeroOp, zeroOp, stackLoc); + GetCurBB()->AppendInsn(setInc); + ind++; + } + if (singleNum > 0) { + int32 offset = memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef + kIntregBytelen * (refNum - 1); + Operand &zeroOp = GetZeroOpnd(k64BitSize); + Operand &stackLoc = CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + Insn &setInc = GetInsnBuilder()->BuildInsn(MOP_xstr, zeroOp, stackLoc); + GetCurBB()->AppendInsn(setInc); + } + /* Insert Yield Point just after localrefvar are initialized. */ + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + return; + } + + /* refNum is 1 and refvar is not returned, this refvar need to call MCC_DecRef_NaiveRCFast. */ + if ((refNum == 1) && !begin && (retRef == nullptr)) { + RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + Operand &stackLoc = CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()), + GetPointerSize() * kBitsPerByte); + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, stackLoc); + GetCurBB()->AppendInsn(ldrInsn); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(phyOpnd); + MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_DecRef_NaiveRCFast"); + callSym->SetNameStrIdx(funcName); + callSym->SetStorageClass(kScText); + callSym->SetSKind(kStFunc); + + AppendCall(*callSym, *srcOpnds); + GetCurBB()->SetHasCall(); + if (frequency != 0) { + GetCurBB()->SetFrequency(frequency); + } + return; + } + + /* refNum is 2 and one of refvar is returned, only another one is needed to call MCC_DecRef_NaiveRCFast. */ + if ((refNum == 2) && !begin && retRef != nullptr) { + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(retRef->GetStIndex())); + int32 stOffset = symLoc->GetOffset() / kOffsetAlign; + RegOperand &phyOpnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + Operand *stackLoc = nullptr; + if (stOffset == 0) { + /* just have to Dec the next one. */ + stackLoc = &CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()) + kIntregBytelen, + GetPointerSize() * kBitsPerByte); + } else { + /* just have to Dec the current one. */ + stackLoc = &CreateStkTopOpnd(static_cast(memLayout->GetRefLocBaseLoc()), GetPointerSize() * kBitsPerByte); + } + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), phyOpnd, *stackLoc); + GetCurBB()->AppendInsn(ldrInsn); + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + srcOpnds->PushOpnd(phyOpnd); + MIRSymbol *callSym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + std::string funcName("MCC_DecRef_NaiveRCFast"); + callSym->SetNameStrIdx(funcName); + callSym->SetStorageClass(kScText); + callSym->SetSKind(kStFunc); + Insn &callInsn = AppendCall(*callSym, *srcOpnds); + callInsn.SetRefSkipIdx(stOffset); + GetCurBB()->SetHasCall(); + if (frequency != 0) { + GetCurBB()->SetFrequency(frequency); + } + return; + } + + bool needSkip = false; + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + + ImmOperand *beginOpnd = + &CreateImmOperand(memLayout->GetRefLocBaseLoc() + kIntregBytelen * formalRef, k64BitSize, true); + ImmOperand *countOpnd = &CreateImmOperand(refNum, k64BitSize, true); + int32 refSkipIndex = -1; + if (!begin && retRef != nullptr) { + AArch64SymbolAlloc *symLoc = + static_cast(memLayout->GetSymAllocInfo(retRef->GetStIndex())); + int32 stOffset = symLoc->GetOffset() / kOffsetAlign; + refSkipIndex = stOffset; + if (stOffset == 0) { + /* ret_ref at begin. */ + beginOpnd = &CreateImmOperand(memLayout->GetRefLocBaseLoc() + kIntregBytelen, k64BitSize, true); + countOpnd = &CreateImmOperand(refNum - 1, k64BitSize, true); + } else if (stOffset == (refNum - 1)) { + /* ret_ref at end. */ + countOpnd = &CreateImmOperand(refNum - 1, k64BitSize, true); + } else { + needSkip = true; + } + } + + regno_t vRegNO0 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg0 = CreateVirtualRegisterOperand(vRegNO0); + RegOperand &fpOpnd = GetOrCreateStackBaseRegOperand(); + SelectAdd(vReg0, fpOpnd, *beginOpnd, PTY_i64); + + RegOperand &parmRegOpnd1 = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd1); + SelectCopy(parmRegOpnd1, PTY_a64, vReg0, PTY_a64); + + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + SelectCopyImm(vReg1, *countOpnd, PTY_i64); + + RegOperand &parmRegOpnd2 = GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd2); + SelectCopy(parmRegOpnd2, PTY_a64, vReg1, PTY_a64); + + MIRSymbol *sym = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + if (begin) { + std::string funcName("MCC_InitializeLocalStackRef"); + sym->SetNameStrIdx(funcName); + CHECK_FATAL(countOpnd->GetValue() > 0, "refCount should be greater than 0."); + refCount = static_cast(countOpnd->GetValue()); + beginOffset = beginOpnd->GetValue(); + } else if (!needSkip) { + std::string funcName("MCC_CleanupLocalStackRef_NaiveRCFast"); + sym->SetNameStrIdx(funcName); + } else { + CHECK_NULL_FATAL(retRef); + if (retRef->GetStIndex() >= memLayout->GetSymAllocTable().size()) { + CHECK_FATAL(false, "index out of range in AArch64CGFunc::HandleRCCall"); + } + AArch64SymbolAlloc *symLoc = static_cast(memLayout->GetSymAllocInfo(retRef->GetStIndex())); + int32 stOffset = symLoc->GetOffset() / kOffsetAlign; + ImmOperand &retLoc = CreateImmOperand(stOffset, k64BitSize, true); + + regno_t vRegNO2 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg2 = CreateVirtualRegisterOperand(vRegNO2); + SelectCopyImm(vReg2, retLoc, PTY_i64); + + RegOperand &parmRegOpnd3 = GetOrCreatePhysicalRegisterOperand(R2, k64BitSize, GetRegTyFromPrimTy(PTY_a64)); + srcOpnds->PushOpnd(parmRegOpnd3); + SelectCopy(parmRegOpnd3, PTY_a64, vReg2, PTY_a64); + + std::string funcName("MCC_CleanupLocalStackRefSkip_NaiveRCFast"); + sym->SetNameStrIdx(funcName); + } + sym->SetStorageClass(kScText); + sym->SetSKind(kStFunc); + + Insn &callInsn = AppendCall(*sym, *srcOpnds); + callInsn.SetRefSkipIdx(refSkipIndex); + if (frequency != 0) { + GetCurBB()->SetFrequency(frequency); + } + GetCurBB()->SetHasCall(); + if (begin) { + /* Insert Yield Point just after localrefvar are initialized. */ + GenerateYieldpoint(*GetCurBB()); + yieldPointInsn = GetCurBB()->GetLastInsn(); + } +} + +void AArch64CGFunc::SelectParmListDreadSmallAggregate(const MIRSymbol &sym, MIRType &structType, + ListOperand &srcOpnds, + int32 offset, AArch64CallConvImpl &parmLocator, FieldID fieldID) { + /* + * in two param regs if possible + * If struct is <= 8 bytes, then it fits into one param reg. + * If struct is <= 16 bytes, then it fits into two param regs. + * Otherwise, it goes onto the stack. + * If the number of available param reg is less than what is + * needed to fit the entire struct into them, then the param + * reg is skipped and the struct goes onto the stack. + * Example 1. + * struct size == 8 bytes. + * param regs x0 to x6 are used. + * struct is passed in x7. + * Example 2. + * struct is 16 bytes. + * param regs x0 to x5 are used. + * struct is passed in x6 and x7. + * Example 3. + * struct is 16 bytes. + * param regs x0 to x6 are used. x7 alone is not enough to pass the struct. + * struct is passed on the stack. + * x7 is not used, as the following param will go onto the stack also. + */ + int32 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + if (ploc.reg0 == 0) { + /* No param regs available, pass on stack. */ + /* If symSize is <= 8 bytes then use 1 reg, else 2 */ + CreateCallStructParamPassByStack(symSize, &sym, nullptr, ploc.memOffset); + } else { + /* pass by param regs. */ + RegOperand *parmOpnd0 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 0); + srcOpnds.PushOpnd(*parmOpnd0); + if (ploc.reg1 > 0) { + RegOperand *parmOpnd1 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 1); + srcOpnds.PushOpnd(*parmOpnd1); + } + if (ploc.reg2 > 0) { + RegOperand *parmOpnd2 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 2); + srcOpnds.PushOpnd(*parmOpnd2); + } + if (ploc.reg3 > 0) { + RegOperand *parmOpnd3 = SelectParmListDreadAccessField(sym, fieldID, ploc, offset, 3); + srcOpnds.PushOpnd(*parmOpnd3); + } + } +} + +RegOperand *AArch64CGFunc::LoadIreadAddrForSamllAgg(BaseNode &iread) { + RegOperand *addrOpnd1 = nullptr; + if (iread.GetOpCode() == OP_iread) { + RegOperand *addrOpnd0 = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + addrOpnd1 = &LoadIntoRegister(*addrOpnd0, iread.Opnd(0)->GetPrimType()); + } else if (iread.GetOpCode() == OP_ireadfpoff) { + IreadFPoffNode &ireadoff = static_cast(iread); + RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + RegOperand *addrOpnd0 = &CreateRegisterOperandOfType(PTY_a64); + ImmOperand &immOpnd = CreateImmOperand(ireadoff.GetOffset(), k32BitSize, true); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *addrOpnd0, *rfp, immOpnd)); + addrOpnd1 = &LoadIntoRegister(*addrOpnd0, PTY_i64); + } else if (iread.GetOpCode() == OP_ireadoff) { + IreadoffNode &ireadoff = static_cast(iread); + RegOperand *addrOpnd0 = static_cast(HandleExpr(ireadoff, *(ireadoff.Opnd(0)))); + addrOpnd1 = &LoadIntoRegister(*addrOpnd0, PTY_i64); + } + CHECK_FATAL(addrOpnd1 != nullptr, "addrOpnd for iread cannot be null"); + return addrOpnd1; +} + +void AArch64CGFunc::SelectParmListIreadSmallAggregate(BaseNode &iread, MIRType &structType, + ListOperand &srcOpnds, int32 offset, + AArch64CallConvImpl &parmLocator) { + int32 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + RegOperand *addrOpnd1 = LoadIreadAddrForSamllAgg(iread); + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + if (ploc.reg0 == 0) { + /* No param regs available, pass on stack. */ + CreateCallStructParamPassByStack(symSize, nullptr, addrOpnd1, ploc.memOffset); + } else { + /* pass by param regs. */ + FpParamState state = kStateUnknown; + uint32 memSize = 0; + switch (ploc.fpSize) { + case k0BitSize: + state = kNotFp; + memSize = k64BitSize; + break; + case k4BitSize: + state = kFp32Bit; + memSize = k32BitSize; + break; + case k8BitSize: + state = kFp64Bit; + memSize = k64BitSize; + break; + default: + break; + } + OfstOperand *offOpnd0 = &GetOrCreateOfstOpnd(static_cast(static_cast(offset)), k32BitSize); + MemOperand *mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd0, nullptr); + CreateCallStructParamPassByReg(ploc.reg0, *mopnd, srcOpnds, state); + if (ploc.reg1 > 0) { + OfstOperand *offOpnd1 = &GetOrCreateOfstOpnd((((ploc.fpSize > 0) ? ploc.fpSize : GetPointerSize()) + + static_cast(offset)), k32BitSize); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd1, nullptr); + CreateCallStructParamPassByReg(ploc.reg1, *mopnd, srcOpnds, state); + } + if (ploc.reg2 > 0) { + OfstOperand *offOpnd2 = + &GetOrCreateOfstOpnd(((ploc.fpSize ? (ploc.fpSize * k4BitShift) : GetPointerSize()) + static_cast(offset)), + k32BitSize); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd2, nullptr); + CreateCallStructParamPassByReg(ploc.reg2, *mopnd, srcOpnds, state); + } + if (ploc.reg3 > 0) { + OfstOperand *offOpnd3 = &GetOrCreateOfstOpnd((((ploc.fpSize > 0) ? (ploc.fpSize * k8BitShift) : GetPointerSize()) + + static_cast(offset)), k32BitSize); + mopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, memSize, addrOpnd1, nullptr, offOpnd3, nullptr); + CreateCallStructParamPassByReg(ploc.reg3, *mopnd, srcOpnds, state); + } + } +} + +void AArch64CGFunc::SelectParmListDreadLargeAggregate(const MIRSymbol &sym, MIRType &structType, + ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, + int32 fromOffset) { + /* + * Pass larger sized struct on stack. + * Need to copy the entire structure onto the stack. + * The pointer to the starting address of the copied struct is then + * used as the parameter for the struct. + * This pointer is passed as the next parameter. + * Example 1: + * struct is 23 bytes. + * param regs x0 to x5 are used. + * First around up 23 to 24, so 3 of 8-byte slots. + * Copy struct to a created space on the stack. + * Pointer of copied struct is passed in x6. + * Example 2: + * struct is 25 bytes. + * param regs x0 to x7 are used. + * First around up 25 to 32, so 4 of 8-byte slots. + * Copy struct to a created space on the stack. + * Pointer of copied struct is passed on stack as the 9th parameter. + */ + uint64 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); /* round up */ + /* Create the struct copies. */ + RegOperand *parmOpnd = CreateCallStructParamCopyToStack(numMemOp, &sym, nullptr, structCopyOffset, + fromOffset, ploc); + if (parmOpnd) { + srcOpnds.PushOpnd(*parmOpnd); + } + structCopyOffset += static_cast(numMemOp * GetPointerSize()); +} + +void AArch64CGFunc::SelectParmListIreadLargeAggregate(const IreadNode &iread, MIRType &structType, + ListOperand &srcOpnds, AArch64CallConvImpl &parmLocator, + int32 &structCopyOffset, int32 fromOffset) { + uint64 symSize = GetBecommon().GetTypeSize(structType.GetTypeIndex().GetIdx()); + RegOperand *addrOpnd0 = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + RegOperand *addrOpnd1 = &LoadIntoRegister(*addrOpnd0, iread.Opnd(0)->GetPrimType()); + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); /* round up */ + RegOperand *parmOpnd = + CreateCallStructParamCopyToStack(numMemOp, nullptr, addrOpnd1, structCopyOffset, fromOffset, ploc); + structCopyOffset += static_cast(numMemOp * GetPointerSize()); + if (parmOpnd) { + srcOpnds.PushOpnd(*parmOpnd); + } +} + +void AArch64CGFunc::CreateCallStructParamPassByStack(int32 symSize, const MIRSymbol *sym, + RegOperand *addrOpnd, int32 baseOffset) { + if (symSize == 0) { + return; + } + MemOperand *ldMopnd = nullptr; + MemOperand *stMopnd = nullptr; + uint32 numRegNeeded = (static_cast(symSize) <= k8ByteSize) ? kOneRegister : kTwoRegister; + for (int j = 0; j < static_cast(numRegNeeded); j++) { + if (sym) { + if (CGOptions::IsArm64ilp32()) { + ldMopnd = &GetOrCreateMemOpnd(*sym, (j * static_cast(k8ByteSize)), k64BitSize); + } else { + ldMopnd = &GetOrCreateMemOpnd(*sym, (j * static_cast(GetPointerSize())), k64BitSize); + } + } else { + if (CGOptions::IsArm64ilp32()) { + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, addrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(j) * k8ByteSize, k32BitSize), nullptr); + } else { + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, addrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(j) * GetPointerSize(), k32BitSize), nullptr); + } + } + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *ldMopnd)); + if (CGOptions::IsArm64ilp32()) { + stMopnd = &CreateMemOpnd(RSP, (static_cast(baseOffset) + (j * static_cast(k8ByteSize))), k64BitSize); + } else { + stMopnd = &CreateMemOpnd(RSP, (static_cast(baseOffset) + (j * GetPointerSize())), k64BitSize); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), *vreg, *stMopnd)); + } +} + +RegOperand *AArch64CGFunc::SelectParmListDreadAccessField(const MIRSymbol &sym, FieldID fieldID, + const CCLocInfo &ploc, int32 offset, uint32 parmNum) { + uint32 memSize; + PrimType primType; + RegOperand *parmOpnd; + uint32 dataSizeBits; + AArch64reg reg; + switch (parmNum) { + case 0: + reg = static_cast(ploc.reg0); + break; + case 1: + reg = static_cast(ploc.reg1); + break; + case 2: + reg = static_cast(ploc.reg2); + break; + case 3: + reg = static_cast(ploc.reg3); + break; + default: + CHECK_FATAL(false, "Exceeded maximum allowed fp parameter registers for struct passing"); + } + if (ploc.fpSize == 0) { + memSize = k64BitSize; + primType = PTY_i64; + dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); + } else if (ploc.fpSize == k4ByteSize) { + memSize = k32BitSize; + primType = PTY_f32; + dataSizeBits = GetPrimTypeSize(PTY_f32) * kBitsPerByte; + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k32BitSize, kRegTyFloat); + } else if (ploc.fpSize == k8ByteSize) { + memSize = k64BitSize; + primType = PTY_f64; + dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyFloat); + } else { + CHECK_FATAL(false, "Unknown call parameter state"); + } + MemOperand *memOpnd; + if (sym.GetStorageClass() == kScFormal && fieldID > 0) { + MemOperand &baseOpnd = GetOrCreateMemOpnd(sym, 0, memSize); + RegOperand &base = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), base, baseOpnd)); + memOpnd = &CreateMemOpnd(base, (static_cast(offset) + parmNum * GetPointerSize()), memSize); + } else if (ploc.fpSize > 0) { + memOpnd = &GetOrCreateMemOpnd(sym, (ploc.fpSize * parmNum + static_cast(offset)), memSize); + } else { + if (CGOptions::IsArm64ilp32()) { + memOpnd = &GetOrCreateMemOpnd(sym, (k8ByteSize * parmNum + static_cast(offset)), memSize); + } else { + memOpnd = &GetOrCreateMemOpnd(sym, (GetPointerSize() * parmNum + static_cast(offset)), memSize); + } + } + MOperator selectedMop = PickLdInsn(dataSizeBits, primType); + if ((memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) && + !IsOperandImmValid(selectedMop, memOpnd, kInsnSecondOpnd)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSizeBits); + } + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(selectedMop, *parmOpnd, *memOpnd)); + + return parmOpnd; +} + +void AArch64CGFunc::CreateCallStructParamPassByReg(regno_t regno, MemOperand &memOpnd, ListOperand &srcOpnds, + FpParamState state) { + RegOperand *parmOpnd; + uint32 dataSizeBits = 0; + PrimType pType = PTY_void; + parmOpnd = nullptr; + AArch64reg reg = static_cast(regno); + if (state == kNotFp) { + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); + dataSizeBits = GetPrimTypeSize(PTY_i64) * kBitsPerByte; + pType = PTY_i64; + } else if (state == kFp32Bit) { + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k32BitSize, kRegTyFloat); + dataSizeBits = GetPrimTypeSize(PTY_f32) * kBitsPerByte; + pType = PTY_f32; + } else if (state == kFp64Bit) { + parmOpnd = &GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyFloat); + dataSizeBits = GetPrimTypeSize(PTY_f64) * kBitsPerByte; + pType = PTY_f64; + } else { + ASSERT(0, "CreateCallStructParamPassByReg: Unknown state"); + } + + MOperator selectedMop = PickLdInsn(dataSizeBits, pType); + if (!IsOperandImmValid(selectedMop, &memOpnd, kInsnSecondOpnd)) { + memOpnd = SplitOffsetWithAddInstruction(memOpnd, dataSizeBits); + } + ASSERT(parmOpnd != nullptr, "parmOpnd should not be nullptr"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(selectedMop, *parmOpnd, memOpnd)); + srcOpnds.PushOpnd(*parmOpnd); +} + +void AArch64CGFunc::CreateCallStructParamMemcpy(const MIRSymbol &sym, uint32 structSize, + int32 copyOffset, int32 fromOffset) { + std::vector opndVec; + + RegOperand *vreg1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + opndVec.push_back(vreg1); /* result */ + + RegOperand *parmOpnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + RegOperand *spReg = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand *offsetOpnd0 = &CreateImmOperand(copyOffset, k64BitSize, false); + SelectAdd(*parmOpnd, *spReg, *offsetOpnd0, PTY_a64); + opndVec.push_back(parmOpnd); /* param 0 */ + + if (sym.GetStorageClass() == kScGlobal || sym.GetStorageClass() == kScExtern) { + StImmOperand &stopnd = CreateStImmOperand(sym, fromOffset, 0); + RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); + SelectAddrof(staddropnd, stopnd); + opndVec.push_back(&staddropnd); /* param 1 */ + } else if (sym.GetStorageClass() == kScAuto || sym.GetStorageClass() == kScFormal) { + RegOperand *parm1Reg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + AArch64SymbolAlloc *symloc = static_cast(GetMemlayout()->GetSymAllocInfo(sym.GetStIndex())); + RegOperand *baseOpnd = static_cast(GetBaseReg(*symloc)); + int32 stoffset = GetBaseOffset(*symloc); + ImmOperand *offsetOpnd1 = &CreateImmOperand(static_cast(stoffset), k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *parm1Reg, *baseOpnd, *offsetOpnd1)); + if (sym.GetStorageClass() == kScFormal) { + MemOperand *ldmopnd = + &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, parm1Reg, nullptr, + &GetOrCreateOfstOpnd(0, k32BitSize), static_cast(nullptr)); + RegOperand *tmpreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + RegOperand *vreg2 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_a64), + *tmpreg, *ldmopnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *vreg2, *tmpreg, + CreateImmOperand(fromOffset, k64BitSize, false))); + parm1Reg = vreg2; + } + opndVec.push_back(parm1Reg); /* param 1 */ + } else if (sym.GetStorageClass() == kScPstatic || sym.GetStorageClass() == kScFstatic) { + CHECK_FATAL(sym.GetSKind() != kStConst, "Unsupported sym const for struct param"); + StImmOperand *stopnd = &CreateStImmOperand(sym, 0, 0); + RegOperand &staddropnd = static_cast(CreateRegisterOperandOfType(PTY_u64)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrp, staddropnd, *stopnd)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, staddropnd, staddropnd, *stopnd)); + opndVec.push_back(&staddropnd); /* param 1 */ + } else { + CHECK_FATAL(false, "Unsupported sym for struct param"); + } + + RegOperand &vreg3 = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + ImmOperand &sizeOpnd = CreateImmOperand(structSize, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, vreg3, sizeOpnd)); + opndVec.push_back(&vreg3); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); +} + +void AArch64CGFunc::CreateCallStructParamMemcpy(RegOperand &addrOpnd, uint32 structSize, + int32 copyOffset, int32 fromOffset) { + std::vector opndVec; + + RegOperand *vreg1 = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + opndVec.push_back(vreg1); /* result */ + + RegOperand *parmOpnd = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + RegOperand *spReg = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand *offsetOpnd0 = &CreateImmOperand(copyOffset, k64BitSize, false); + SelectAdd(*parmOpnd, *spReg, *offsetOpnd0, PTY_a64); + opndVec.push_back(parmOpnd); /* param 0 */ + + if (fromOffset) { + RegOperand &p1vreg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + ImmOperand &fromImm = CreateImmOperand(fromOffset, k64BitSize, true); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, p1vreg, addrOpnd, fromImm)); + opndVec.push_back(&p1vreg); /* param 1 */ + } else { + opndVec.push_back(&addrOpnd); /* param 1 */ + } + + RegOperand &vreg3 = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + ImmOperand &sizeOpnd = CreateImmOperand(structSize, k64BitSize, false); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wmovri32, vreg3, sizeOpnd)); + opndVec.push_back(&vreg3); /* param 2 */ + + SelectLibCall("memcpy", opndVec, PTY_a64, PTY_a64); +} + +RegOperand *AArch64CGFunc::CreateCallStructParamCopyToStack(uint32 numMemOp, const MIRSymbol *sym, + RegOperand *addrOpd, int32 copyOffset, + int32 fromOffset, const CCLocInfo &ploc) { + /* Create the struct copies. */ + MemOperand *ldMopnd = nullptr; + MemOperand *stMopnd = nullptr; + for (uint32 j = 0; j < numMemOp; j++) { + if (sym != nullptr) { + if (sym->GetStorageClass() == kScFormal) { + MemOperand &base = GetOrCreateMemOpnd(*sym, 0, k64BitSize); + RegOperand &vreg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + Insn &ldInsn = GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), vreg, base); + GetCurBB()->AppendInsn(ldInsn); + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &vreg, nullptr, + &GetOrCreateOfstOpnd((j * GetPointerSize() + static_cast(fromOffset)), k32BitSize), nullptr); + } else { + if (CGOptions::IsArm64ilp32()) { + ldMopnd = &GetOrCreateMemOpnd(*sym, (j * GetPointerSize() + static_cast(fromOffset)), k32BitSize); + } else { + ldMopnd = &GetOrCreateMemOpnd(*sym, (j * GetPointerSize() + static_cast(fromOffset)), k64BitSize); + } + } + } else { + ldMopnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, addrOpd, nullptr, + &GetOrCreateOfstOpnd((j * GetPointerSize() + static_cast(fromOffset)), k32BitSize), nullptr); + } + if (CGOptions::IsArm64ilp32()) { + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k4ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k32BitSize, PTY_i32), *vreg, *ldMopnd)); + + stMopnd = &CreateMemOpnd(RSP, (static_cast(copyOffset) + (j * GetPointerSize())), k32BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k32BitSize, PTY_i32), *vreg, *stMopnd)); + } else { + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *ldMopnd)); + + stMopnd = &CreateMemOpnd(RSP, (static_cast(copyOffset) + (j * GetPointerSize())), k64BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), *vreg, *stMopnd)); + } + } + /* Create the copy address parameter for the struct */ + RegOperand *fpopnd = &GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand *offset = &CreateImmOperand(copyOffset, k64BitSize, false); + if (ploc.reg0 == kRinvalid) { + RegOperand &res = CreateRegisterOperandOfType(PTY_u64); + SelectAdd(res, *fpopnd, *offset, PTY_u64); + MemOperand &stMopnd2 = CreateMemOpnd(RSP, ploc.memOffset, k64BitSize); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), res, stMopnd2)); + return nullptr; + } else { + RegOperand *parmOpnd = &GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), + k64BitSize, kRegTyInt); + SelectAdd(*parmOpnd, *fpopnd, *offset, PTY_a64); + return parmOpnd; + } +} + +void AArch64CGFunc::CreateCallStructMemcpyToParamReg(MIRType &structType, int32 structCopyOffset, + AArch64CallConvImpl &parmLocator, ListOperand &srcOpnds) { + RegOperand &spReg = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand &offsetOpnd = CreateImmOperand(structCopyOffset, k64BitSize, false); + + CCLocInfo ploc; + parmLocator.LocateNextParm(structType, ploc); + if (ploc.reg0 != 0) { + RegOperand &res = GetOrCreatePhysicalRegisterOperand(static_cast(ploc.reg0), k64BitSize, kRegTyInt); + SelectAdd(res, spReg, offsetOpnd, PTY_a64); + srcOpnds.PushOpnd(res); + } else { + RegOperand &parmOpnd = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + SelectAdd(parmOpnd, spReg, offsetOpnd, PTY_a64); + MemOperand &stmopnd = CreateMemOpnd(RSP, ploc.memOffset, k64BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(k64BitSize, PTY_i64), parmOpnd, stmopnd)); + } +} + +void AArch64CGFunc::GenAggParmForDread(const BaseNode &parent, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo) { + int32 rhsOffset = 0; + BaseNode &argExpr = *parent.Opnd(argNo); + DreadNode &dread = static_cast(argExpr); + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); + MIRType *ty = sym->GetType(); + if (dread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(dread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, dread.GetFieldID()).first; + } + uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize <= k16ByteSize) { + SelectParmListDreadSmallAggregate(*sym, *ty, srcOpnds, rhsOffset, parmLocator, dread.GetFieldID()); + } else if (symSize > kParmMemcpySize) { + CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else { + SelectParmListDreadLargeAggregate(*sym, *ty, srcOpnds, parmLocator, structCopyOffset, rhsOffset); + } +} + +void AArch64CGFunc::GenAggParmForIread(const BaseNode &parent, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo) { + int32 rhsOffset = 0; + BaseNode &argExpr = *parent.Opnd(argNo); + IreadNode &iread = static_cast(argExpr); + MIRPtrType *pointerty = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx())); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); + if (iread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(iread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, iread.GetFieldID()).first; + } + uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize <= k16ByteSize) { + SelectParmListIreadSmallAggregate(iread, *ty, srcOpnds, rhsOffset, parmLocator); + } else if (symSize > kParmMemcpySize) { + RegOperand *ireadOpnd = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + if (rhsOffset > 0) { + RegOperand *addrOpnd = &LoadIntoRegister(*ireadOpnd, iread.Opnd(0)->GetPrimType()); + regno_t vRegNO = NewVReg(kRegTyInt, k8ByteSize); + RegOperand *result = &CreateVirtualRegisterOperand(vRegNO); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, *result, *addrOpnd, + CreateImmOperand(rhsOffset, k64BitSize, false))); + } + CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else { + SelectParmListIreadLargeAggregate(iread, *ty, srcOpnds, parmLocator, structCopyOffset, rhsOffset); + } +} + +void AArch64CGFunc::GenAggParmForIreadoff(BaseNode &parent, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo) { + int32 rhsOffset = 0; + BaseNode &argExpr = *parent.Opnd(argNo); + IreadoffNode &iread = static_cast(argExpr); + MIRStructType *ty = GetLmbcStructArgType(parent, argNo); + if (ty == nullptr) { + return; + } + uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize <= k16ByteSize) { + rhsOffset = iread.GetOffset(); + SelectParmListIreadSmallAggregate(iread, *ty, srcOpnds, rhsOffset, parmLocator); + } else { + CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } +} + +void AArch64CGFunc::GenAggParmForIreadfpoff(BaseNode &parent, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, size_t argNo) { + int32 rhsOffset = 0; + BaseNode &argExpr = *parent.Opnd(argNo); + IreadFPoffNode &iread = static_cast(argExpr); + MIRStructType *ty = GetLmbcStructArgType(parent, argNo); + if (ty == nullptr) { /* param < arg */ + return; + } + uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize <= k16ByteSize) { + SelectParmListIreadSmallAggregate(iread, *ty, srcOpnds, rhsOffset, parmLocator); + } else { + CreateCallStructMemcpyToParamReg(*ty, structCopyOffset, parmLocator, srcOpnds); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } +} + +void AArch64CGFunc::SelectParmListForAggregate(BaseNode &parent, ListOperand &srcOpnds, + AArch64CallConvImpl &parmLocator, int32 &structCopyOffset, + size_t argNo) { + BaseNode &argExpr = *parent.Opnd(argNo); + if (argExpr.GetOpCode() == OP_dread) { + GenAggParmForDread(parent, srcOpnds, parmLocator, structCopyOffset, argNo); + } else if (argExpr.GetOpCode() == OP_iread) { + GenAggParmForIread(parent, srcOpnds, parmLocator, structCopyOffset, argNo); + } else if (argExpr.GetOpCode() == OP_ireadfpoff) { + GenAggParmForIreadfpoff(parent, srcOpnds, parmLocator, structCopyOffset, argNo); + } else if (argExpr.GetOpCode() == OP_ireadoff) { + GenAggParmForIreadoff(parent, srcOpnds, parmLocator, structCopyOffset, argNo); + } else { + CHECK_FATAL(false, "NYI"); + } +} + +size_t AArch64CGFunc::SelectParmListGetStructReturnSize(StmtNode &naryNode) { + if (naryNode.GetOpCode() == OP_call) { + CallNode &callNode = static_cast(naryNode); + MIRFunction *callFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + TyIdx retIdx = callFunc->GetReturnTyIdx(); + if (callFunc->IsFirstArgReturn()) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(callFunc->GetFormalDefVec()[0].formalTyIdx); + return GetBecommon().GetTypeSize(static_cast(ty)->GetPointedTyIdx()); + } + size_t retSize = GetBecommon().GetTypeSize(retIdx.GetIdx()); + if ((retSize == 0) && callFunc->IsReturnStruct()) { + TyIdx tyIdx = callFunc->GetFuncRetStructTyIdx(); + return GetBecommon().GetTypeSize(tyIdx); + } + return retSize; + } else if (naryNode.GetOpCode() == OP_icall) { + IcallNode &icallNode = static_cast(naryNode); + CallReturnVector *p2nrets = &icallNode.GetReturnVec(); + if (p2nrets->size() == k1ByteSize) { + StIdx stIdx = (*p2nrets)[0].first; + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + if (sym != nullptr) { + return GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()); + } + } + } else if (naryNode.GetOpCode() == OP_icallproto) { + IcallNode &icallProto = static_cast(naryNode); + MIRFuncType *funcTy = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallProto.GetRetTyIdx())); + if (funcTy->FirstArgReturn()) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTy->GetNthParamType(0)); + return GetBecommon().GetTypeSize(static_cast(ty)->GetPointedTyIdx()); + } + return GetBecommon().GetTypeSize(funcTy->GetRetTyIdx()); + } + return 0; +} + +void AArch64CGFunc::GenLargeStructCopyForDread(BaseNode &argExpr, int32 &structCopyOffset) { + int32 rhsOffset = 0; + DreadNode &dread = static_cast(argExpr); + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread.GetStIdx()); + MIRType *ty = sym->GetType(); + if (dread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(dread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, dread.GetFieldID()).first; + } + uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize > kParmMemcpySize) { + CreateCallStructParamMemcpy(*sym, static_cast(symSize), structCopyOffset, rhsOffset); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else if (symSize > k16ByteSize) { + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); + structCopyOffset += static_cast(numMemOp * GetPointerSize()); + } +} + +void AArch64CGFunc::GenLargeStructCopyForIread(BaseNode &argExpr, int32 &structCopyOffset) { + int32 rhsOffset = 0; + IreadNode &iread = static_cast(argExpr); + MIRPtrType *pointerty = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx())); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerty->GetPointedTyIdx()); + if (iread.GetFieldID() != 0) { + MIRStructType *structty = static_cast(ty); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(structty->GetFieldTyIdx(iread.GetFieldID())); + rhsOffset = GetBecommon().GetFieldOffset(*structty, iread.GetFieldID()).first; + } + uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex().GetIdx()); + if (symSize > kParmMemcpySize) { + RegOperand *ireadOpnd = static_cast(HandleExpr(iread, *(iread.Opnd(0)))); + RegOperand &addrOpnd = LoadIntoRegister(*ireadOpnd, iread.Opnd(0)->GetPrimType()); + CreateCallStructParamMemcpy(addrOpnd, static_cast(symSize), structCopyOffset, rhsOffset); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } else if (symSize > k16ByteSize) { + uint32 numMemOp = static_cast(RoundUp(symSize, GetPointerSize()) / GetPointerSize()); + structCopyOffset += static_cast(numMemOp * GetPointerSize()); + } +} + +void AArch64CGFunc::GenLargeStructCopyForIreadfpoff(BaseNode &parent, BaseNode &argExpr, + int32 &structCopyOffset, size_t argNo) { + IreadFPoffNode &ireadoff = static_cast(argExpr); + MIRStructType *ty = GetLmbcStructArgType(parent, argNo); + uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex()); + if (symSize > k16ByteSize) { /* kParmMemcpySize */ + RegOperand *rfp = &GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + RegOperand &addrOpnd = CreateRegisterOperandOfType(PTY_a64); + ImmOperand &immOpnd = CreateImmOperand(ireadoff.GetOffset(), k32BitSize, true); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xaddrri12, addrOpnd, *rfp, immOpnd)); + CreateCallStructParamMemcpy(addrOpnd, static_cast(symSize), structCopyOffset, 0); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } +} + +void AArch64CGFunc::GenLargeStructCopyForIreadoff(BaseNode &parent, BaseNode &argExpr, + int32 &structCopyOffset, size_t argNo) { + IreadoffNode &ireadoff = static_cast(argExpr); + MIRStructType *ty = GetLmbcStructArgType(parent, argNo); + uint64 symSize = GetBecommon().GetTypeSize(ty->GetTypeIndex()); + if (symSize > k16ByteSize) { /* kParmMemcpySize */ + RegOperand *addrOpnd = static_cast( + HandleExpr(ireadoff, *(ireadoff.Opnd(0)))); + int32 fromOffset = ireadoff.GetOffset(); + CreateCallStructParamMemcpy(*addrOpnd, static_cast(symSize), structCopyOffset, fromOffset); + structCopyOffset += static_cast(RoundUp(symSize, GetPointerSize())); + } +} + +void AArch64CGFunc::SelectParmListPreprocessLargeStruct(BaseNode &parent, BaseNode &argExpr, + int32 &structCopyOffset, size_t argNo) { + if (argExpr.GetOpCode() == OP_dread) { + GenLargeStructCopyForDread(argExpr, structCopyOffset); + } else if (argExpr.GetOpCode() == OP_iread) { + GenLargeStructCopyForIread(argExpr, structCopyOffset); + } else if (argExpr.GetOpCode() == OP_ireadfpoff) { + GenLargeStructCopyForIreadfpoff(parent, argExpr, structCopyOffset, argNo); + } else if (argExpr.GetOpCode() == OP_ireadoff) { + GenLargeStructCopyForIreadoff(parent, argExpr, structCopyOffset, argNo); + } +} + +/* preprocess call in parmlist */ +bool AArch64CGFunc::MarkParmListCall(BaseNode &expr) { + if (!CGOptions::IsPIC()) { + return false; + } + switch (expr.GetOpCode()) { + case OP_addrof: { + auto &addrNode = static_cast(expr); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrNode.GetStIdx()); + if (symbol->IsThreadLocal()) { + return true; + } + break; + } + default: { + for (size_t i = 0; i < expr.GetNumOpnds(); i++) { + if (expr.Opnd(i)) { + if (MarkParmListCall(*expr.Opnd(i))) { + return true; + } + } + } + break; + } + } + return false; +} + +void AArch64CGFunc::SelectParmListPreprocess(StmtNode &naryNode, size_t start, std::set &specialArgs) { + size_t i = start; + int32 structCopyOffset = GetMaxParamStackSize() - GetStructCopySize(); + for (; i < naryNode.NumOpnds(); ++i) { + BaseNode *argExpr = naryNode.Opnd(i); + PrimType primType = argExpr->GetPrimType(); + if (MarkParmListCall(*argExpr)) { + (void)specialArgs.emplace(i); + } + ASSERT(primType != PTY_void, "primType should not be void"); + if (primType != PTY_agg) { + continue; + } + SelectParmListPreprocessLargeStruct(naryNode, *argExpr, structCopyOffset, i); + } +} + +/* + SelectParmList generates an instrunction for each of the parameters + to load the parameter value into the corresponding register. + We return a list of registers to the call instruction because + they may be needed in the register allocation phase. + */ +void AArch64CGFunc::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, bool isCallNative) { + size_t i = 0; + if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto || isCallNative) { + i++; + } + std::set specialArgs; + SelectParmListPreprocess(naryNode, i, specialArgs); + bool specialArg = false; + bool firstArgReturn = false; + MIRFunction *callee = nullptr; + if (dynamic_cast(&naryNode) != nullptr) { + auto calleePuIdx = static_cast(naryNode).GetPUIdx(); + callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); + firstArgReturn = callee->IsFirstArgReturn(); + } else if (naryNode.GetOpCode() == OP_icallproto) { + IcallNode *icallnode = &static_cast(naryNode); + MIRType *protoType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallnode->GetRetTyIdx()); + MIRFuncType *funcType = nullptr; + if (protoType->IsMIRPtrType()) { + funcType = static_cast(protoType)->GetPointedFuncType(); + } else if (protoType->IsMIRFuncType()) { + funcType = static_cast(protoType); + } + CHECK_FATAL(funcType != nullptr, "cannot find prototype for icall"); + firstArgReturn = funcType->FirstArgReturn(); + } + BB *curBBrecord = GetCurBB(); + BB *tmpBB = nullptr; + if (!specialArgs.empty()) { + tmpBB = CreateNewBB(); + specialArg = true; + } + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo ploc; + int32 structCopyOffset = GetMaxParamStackSize() - GetStructCopySize(); + std::vector insnForStackArgs; + uint32 stackArgsCount = 0; + for (uint32 pnum = 0; i < naryNode.NumOpnds(); ++i, ++pnum) { + if (specialArg) { + ASSERT(tmpBB, "need temp bb for lower priority args"); + SetCurBB((specialArgs.count(i) > 0) ? *curBBrecord : *tmpBB); + } + bool is64x1vec = false; + MIRType *ty = nullptr; + BaseNode *argExpr = naryNode.Opnd(i); + PrimType primType = argExpr->GetPrimType(); + ASSERT(primType != PTY_void, "primType should not be void"); + if (callee != nullptr && pnum < callee->GetFormalCount() && callee->GetFormal(pnum) != nullptr) { + is64x1vec = callee->GetFormal(pnum)->GetAttr(ATTR_oneelem_simd); + } + switch (argExpr->op) { + case OP_dread: { + DreadNode *dNode = static_cast(argExpr); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(dNode->GetStIdx()); + if (dNode->GetFieldID() != 0) { + MIRStructType *structType = static_cast(symbol->GetType()); + ASSERT(structType != nullptr, "SelectParmList: non-zero fieldID for non-structure"); + FieldAttrs fa = structType->GetFieldAttrs(dNode->GetFieldID()); + is64x1vec = fa.GetAttr(FLDATTR_oneelem_simd); + } else { + is64x1vec = symbol->GetAttr(ATTR_oneelem_simd); + } + break; + } + case OP_iread: { + IreadNode *iNode = static_cast(argExpr); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iNode->GetTyIdx()); + MIRPtrType *ptrTyp = static_cast(type); + ASSERT(ptrTyp != nullptr, "expect a pointer type at iread node"); + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTyp->GetPointedTyIdx()); + if (iNode->GetFieldID() != 0) { + MIRStructType *structType = static_cast(pointedTy); + FieldAttrs fa = structType->GetFieldAttrs(iNode->GetFieldID()); + is64x1vec = fa.GetAttr(FLDATTR_oneelem_simd); + } else { + TypeAttrs ta = static_cast(ptrTyp)->GetTypeAttrs(); + is64x1vec = ta.GetAttr(ATTR_oneelem_simd); + } + break; + } + case OP_constval: { + CallNode *call = safe_cast(&naryNode); + if (call == nullptr) { + break; + } + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(call->GetPUIdx()); + if (fn == nullptr || fn->GetFormalCount() == 0 || fn->GetFormalCount() <= pnum) { + break; + } + is64x1vec = fn->GetFormalDefAt(pnum).formalAttrs.GetAttr(ATTR_oneelem_simd); + break; + } + default: + break; + } + /* use alloca */ + if (primType == PTY_agg) { + SelectParmListForAggregate(naryNode, srcOpnds, parmLocator, structCopyOffset, i); + continue; + } + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; + RegOperand *expRegOpnd = nullptr; + Operand *opnd = HandleExpr(naryNode, *argExpr); + if (opnd->GetKind() == Operand::kOpdRegister && static_cast(opnd)->GetIF64Vec()) { + is64x1vec = true; + } + if (!opnd->IsRegister()) { + opnd = &LoadIntoRegister(*opnd, primType); + } + expRegOpnd = static_cast(opnd); + + if ((pnum == 0) && firstArgReturn) { + parmLocator.InitCCLocInfo(ploc); + ploc.reg0 = R8; + } else { + parmLocator.LocateNextParm(*ty, ploc); + } + /* is64x1vec should be an int64 value in an FP/simd reg for ABI compliance, + convert R-reg to equivalent V-reg */ + PrimType destPrimType = primType; + if (is64x1vec && ploc.reg0 != kRinvalid && ploc.reg0 < R7) { + ploc.reg0 = AArch64Abi::kFloatParmRegs[static_cast(ploc.reg0) - 1]; + destPrimType = PTY_f64; + } + + /* skip unused args */ + if (callee != nullptr && callee->GetFuncDesc().IsArgUnused(pnum)) { + continue; + } + + if (ploc.reg0 != kRinvalid) { /* load to the register. */ + CHECK_FATAL(expRegOpnd != nullptr, "null ptr check"); + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(destPrimType)); + SelectCopy(parmRegOpnd, destPrimType, *expRegOpnd, primType); + srcOpnds.PushOpnd(parmRegOpnd); + } else { /* store to the memory segment for stack-passsed arguments. */ + if (CGOptions::IsBigEndian()) { + if (GetPrimTypeBitSize(primType) < k64BitSize) { + ploc.memOffset = ploc.memOffset + static_cast(k4BitSize); + } + } + MemOperand &actMemOpnd = CreateMemOpnd(RSP, ploc.memOffset, GetPrimTypeBitSize(primType)); + Insn &strInsn = GetInsnBuilder()->BuildInsn(PickStInsn(GetPrimTypeBitSize(primType), primType), *expRegOpnd, + actMemOpnd); + actMemOpnd.SetStackArgMem(true); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel2 && stackArgsCount < kShiftAmount12) { + (void)insnForStackArgs.emplace_back(&strInsn); + stackArgsCount++; + } else { + GetCurBB()->AppendInsn(strInsn); + } + } + ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + if (specialArg) { + ASSERT(tmpBB, "need temp bb for lower priority args"); + curBBrecord->InsertAtEnd(*tmpBB); + SetCurBB(*curBBrecord); + } + for (auto &strInsn : insnForStackArgs) { + GetCurBB()->AppendInsn(*strInsn); + } +} + +/* + * for MCC_DecRefResetPair(addrof ptr %Reg17_R5592, addrof ptr %Reg16_R6202) or + * MCC_ClearLocalStackRef(addrof ptr %Reg17_R5592), the parameter (addrof ptr xxx) is converted to asm as follow: + * add vreg, x29, #imm + * mov R0/R1, vreg + * this function is used to prepare parameters, the generated vreg is returned, and #imm is saved in offsetValue. + */ +Operand *AArch64CGFunc::SelectClearStackCallParam(const AddrofNode &expr, int64 &offsetValue) { + MIRSymbol *symbol = GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(expr.GetStIdx()); + PrimType ptype = expr.GetPrimType(); + regno_t vRegNO = NewVReg(kRegTyInt, GetPrimTypeSize(ptype)); + Operand &result = CreateVirtualRegisterOperand(vRegNO); + CHECK_FATAL(expr.GetFieldID() == 0, "the fieldID of parameter in clear stack reference call must be 0"); + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlErr) << + "Warning: we expect AddrOf with StImmOperand is not used for local variables"; + } + auto *symLoc = static_cast(GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex())); + ImmOperand *offset = nullptr; + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offset = &CreateImmOperand(GetBaseOffset(*symLoc), k64BitSize, false, kUnAdjustVary); + } else if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsRefLocals) { + auto it = immOpndsRequiringOffsetAdjustmentForRefloc.find(symLoc); + if (it != immOpndsRequiringOffsetAdjustmentForRefloc.end()) { + offset = (*it).second; + } else { + offset = &CreateImmOperand(GetBaseOffset(*symLoc), k64BitSize, false); + immOpndsRequiringOffsetAdjustmentForRefloc[symLoc] = offset; + } + } else { + CHECK_FATAL(false, "the symLoc of parameter in clear stack reference call is unreasonable"); + } + ASSERT(offset != nullptr, "offset should not be nullptr"); + offsetValue = offset->GetValue(); + SelectAdd(result, *GetBaseReg(*symLoc), *offset, PTY_u64); + if (GetCG()->GenerateVerboseCG()) { + /* Add a comment */ + Insn *insn = GetCurBB()->GetLastInsn(); + std::string comm = "local/formal var: "; + comm.append(symbol->GetName()); + insn->SetComment(comm); + } + return &result; +} + +/* select paramters for MCC_DecRefResetPair and MCC_ClearLocalStackRef function */ +void AArch64CGFunc::SelectClearStackCallParmList(const StmtNode &naryNode, ListOperand &srcOpnds, + std::vector &stackPostion) { + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo ploc; + for (size_t i = 0; i < naryNode.NumOpnds(); ++i) { + MIRType *ty = nullptr; + BaseNode *argExpr = naryNode.Opnd(i); + PrimType primType = argExpr->GetPrimType(); + ASSERT(primType != PTY_void, "primType check"); + /* use alloc */ + CHECK_FATAL(primType != PTY_agg, "the type of argument is unreasonable"); + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; + CHECK_FATAL(argExpr->GetOpCode() == OP_addrof, "the argument of clear stack call is unreasonable"); + auto *expr = static_cast(argExpr); + int64 offsetValue = 0; + Operand *opnd = SelectClearStackCallParam(*expr, offsetValue); + stackPostion.emplace_back(offsetValue); + auto *expRegOpnd = static_cast(opnd); + parmLocator.LocateNextParm(*ty, ploc); + CHECK_FATAL(ploc.reg0 != 0, "the parameter of ClearStackCall must be passed by register"); + CHECK_FATAL(expRegOpnd != nullptr, "null ptr check"); + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(primType)); + SelectCopy(parmRegOpnd, primType, *expRegOpnd, primType); + srcOpnds.PushOpnd(parmRegOpnd); + ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } +} + +/* + * intrinsify Unsafe.getAndAddInt and Unsafe.getAndAddLong + * generate an intrinsic instruction instead of a function call + * intrinsic_get_add_int w0, xt, ws, ws, x1, x2, w3, label + */ +void AArch64CGFunc::IntrinsifyGetAndAddInt(ListOperand &srcOpnds, PrimType pty) { + MapleList &opnds = srcOpnds.GetOperands(); + /* Unsafe.getAndAddInt has more than 4 parameters */ + ASSERT(opnds.size() >= 4, "ensure the operands number"); + auto iter = opnds.cbegin(); + RegOperand *objOpnd = *(++iter); + RegOperand *offOpnd = *(++iter); + RegOperand *deltaOpnd = *(++iter); + auto &retVal = static_cast(GetTargetRetOperand(pty, -1)); + LabelIdx labIdx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labIdx); + RegOperand &tempOpnd0 = CreateRegisterOperandOfType(PTY_i64); + RegOperand &tempOpnd1 = CreateRegisterOperandOfType(pty); + RegOperand &tempOpnd2 = CreateRegisterOperandOfType(PTY_i32); + MOperator mOp = (pty == PTY_i64) ? MOP_get_and_addL : MOP_get_and_addI; + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(&tempOpnd2); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(deltaOpnd); + intrnOpnds.emplace_back(&targetOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, intrnOpnds)); +} + +/* + * intrinsify Unsafe.getAndSetInt and Unsafe.getAndSetLong + * generate an intrinsic instruction instead of a function call + */ +void AArch64CGFunc::IntrinsifyGetAndSetInt(ListOperand &srcOpnds, PrimType pty) { + MapleList &opnds = srcOpnds.GetOperands(); + /* Unsafe.getAndSetInt has 4 parameters */ + ASSERT(opnds.size() == 4, "ensure the operands number"); + auto iter = opnds.cbegin(); + RegOperand *objOpnd = *(++iter); + RegOperand *offOpnd = *(++iter); + RegOperand *newValueOpnd = *(++iter); + auto &retVal = static_cast(GetTargetRetOperand(pty, -1)); + LabelIdx labIdx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(labIdx); + RegOperand &tempOpnd0 = CreateRegisterOperandOfType(PTY_i64); + RegOperand &tempOpnd1 = CreateRegisterOperandOfType(PTY_i32); + + MOperator mOp = (pty == PTY_i64) ? MOP_get_and_setL : MOP_get_and_setI; + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(newValueOpnd); + intrnOpnds.emplace_back(&targetOpnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, intrnOpnds)); +} + +/* + * intrinsify Unsafe.compareAndSwapInt and Unsafe.compareAndSwapLong + * generate an intrinsic instruction instead of a function call + */ +void AArch64CGFunc::IntrinsifyCompareAndSwapInt(ListOperand &srcOpnds, PrimType pty) { + MapleList &opnds = srcOpnds.GetOperands(); + /* Unsafe.compareAndSwapInt has more than 5 parameters */ + ASSERT(opnds.size() >= 5, "ensure the operands number"); + auto iter = opnds.cbegin(); + RegOperand *objOpnd = *(++iter); + RegOperand *offOpnd = *(++iter); + RegOperand *expectedValueOpnd = *(++iter); + RegOperand *newValueOpnd = *(++iter); + auto &retVal = static_cast(GetTargetRetOperand(PTY_i64, -1)); + RegOperand &tempOpnd0 = CreateRegisterOperandOfType(PTY_i64); + RegOperand &tempOpnd1 = CreateRegisterOperandOfType(pty); + LabelIdx labIdx1 = CreateLabel(); + LabelOperand &label1Opnd = GetOrCreateLabelOperand(labIdx1); + LabelIdx labIdx2 = CreateLabel(); + LabelOperand &label2Opnd = GetOrCreateLabelOperand(labIdx2); + MOperator mOp = (pty == PTY_i32) ? MOP_compare_and_swapI : MOP_compare_and_swapL; + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&tempOpnd0); + intrnOpnds.emplace_back(&tempOpnd1); + intrnOpnds.emplace_back(objOpnd); + intrnOpnds.emplace_back(offOpnd); + intrnOpnds.emplace_back(expectedValueOpnd); + intrnOpnds.emplace_back(newValueOpnd); + intrnOpnds.emplace_back(&label1Opnd); + intrnOpnds.emplace_back(&label2Opnd); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, intrnOpnds)); +} + +/* + * the lowest bit of count field is used to indicate whether or not the string is compressed + * if the string is not compressed, jump to jumpLabIdx + */ +RegOperand *AArch64CGFunc::CheckStringIsCompressed(BB &bb, RegOperand &str, int32 countOffset, PrimType countPty, + LabelIdx jumpLabIdx) { + MemOperand &memOpnd = CreateMemOpnd(str, countOffset, str.GetSize()); + uint32 bitSize = GetPrimTypeBitSize(countPty); + MOperator loadOp = PickLdInsn(bitSize, countPty); + RegOperand &countOpnd = CreateRegisterOperandOfType(countPty); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(loadOp, countOpnd, memOpnd)); + ImmOperand &immValueOne = CreateImmOperand(countPty, 1); + RegOperand &countLowestBitOpnd = CreateRegisterOperandOfType(countPty); + MOperator andOp = bitSize == k64BitSize ? MOP_xandrri13 : MOP_wandrri12; + bb.AppendInsn(GetInsnBuilder()->BuildInsn(andOp, countLowestBitOpnd, countOpnd, immValueOne)); + RegOperand &wzr = GetZeroOpnd(bitSize); + MOperator cmpOp = (bitSize == k64BitSize) ? MOP_xcmprr : MOP_wcmprr; + Operand &rflag = GetOrCreateRflag(); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(cmpOp, rflag, wzr, countLowestBitOpnd)); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_beq, rflag, GetOrCreateLabelOperand(jumpLabIdx))); + bb.SetKind(BB::kBBIf); + return &countOpnd; +} + +/* + * count field stores the length shifted one bit to the left + * if the length is less than eight, jump to jumpLabIdx + */ +RegOperand *AArch64CGFunc::CheckStringLengthLessThanEight(BB &bb, RegOperand &countOpnd, PrimType countPty, + LabelIdx jumpLabIdx) { + RegOperand &lengthOpnd = CreateRegisterOperandOfType(countPty); + uint32 bitSize = GetPrimTypeBitSize(countPty); + MOperator lsrOp = (bitSize == k64BitSize) ? MOP_xlsrrri6 : MOP_wlsrrri5; + ImmOperand &immValueOne = CreateImmOperand(countPty, 1); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(lsrOp, lengthOpnd, countOpnd, immValueOne)); + constexpr int kConstIntEight = 8; + ImmOperand &immValueEight = CreateImmOperand(countPty, kConstIntEight); + MOperator cmpImmOp = (bitSize == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; + Operand &rflag = GetOrCreateRflag(); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(cmpImmOp, rflag, lengthOpnd, immValueEight)); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_blt, rflag, GetOrCreateLabelOperand(jumpLabIdx))); + bb.SetKind(BB::kBBIf); + return &lengthOpnd; +} + +void AArch64CGFunc::GenerateIntrnInsnForStrIndexOf(BB &bb, RegOperand &srcString, RegOperand &patternString, + RegOperand &srcCountOpnd, RegOperand &patternLengthOpnd, + PrimType countPty, LabelIdx jumpLabIdx) { + RegOperand &srcLengthOpnd = CreateRegisterOperandOfType(countPty); + ImmOperand &immValueOne = CreateImmOperand(countPty, 1); + uint32 bitSize = GetPrimTypeBitSize(countPty); + MOperator lsrOp = (bitSize == k64BitSize) ? MOP_xlsrrri6 : MOP_wlsrrri5; + bb.AppendInsn(GetInsnBuilder()->BuildInsn(lsrOp, srcLengthOpnd, srcCountOpnd, immValueOne)); +#ifdef USE_32BIT_REF + const int64 stringBaseObjSize = 16; /* shadow(4)+monitor(4)+count(4)+hash(4) */ +#else + const int64 stringBaseObjSize = 20; /* shadow(8)+monitor(4)+count(4)+hash(4) */ +#endif /* USE_32BIT_REF */ + PrimType pty = (srcString.GetSize() == k64BitSize) ? PTY_i64 : PTY_i32; + ImmOperand &immStringBaseOffset = CreateImmOperand(pty, stringBaseObjSize); + MOperator addOp = (pty == PTY_i64) ? MOP_xaddrri12 : MOP_waddrri12; + RegOperand &srcStringBaseOpnd = CreateRegisterOperandOfType(pty); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(addOp, srcStringBaseOpnd, srcString, immStringBaseOffset)); + RegOperand &patternStringBaseOpnd = CreateRegisterOperandOfType(pty); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(addOp, patternStringBaseOpnd, patternString, immStringBaseOffset)); + auto &retVal = static_cast(GetTargetRetOperand(PTY_i32, -1)); + std::vector intrnOpnds; + intrnOpnds.emplace_back(&retVal); + intrnOpnds.emplace_back(&srcStringBaseOpnd); + intrnOpnds.emplace_back(&srcLengthOpnd); + intrnOpnds.emplace_back(&patternStringBaseOpnd); + intrnOpnds.emplace_back(&patternLengthOpnd); + const uint32 tmpRegOperandNum = 6; + for (uint32 i = 0; i < tmpRegOperandNum - 1; ++i) { + RegOperand &tmpOpnd = CreateRegisterOperandOfType(PTY_i64); + intrnOpnds.emplace_back(&tmpOpnd); + } + intrnOpnds.emplace_back(&CreateRegisterOperandOfType(PTY_i32)); + const uint32 labelNum = 7; + for (uint32 i = 0; i < labelNum; ++i) { + LabelIdx labIdx = CreateLabel(); + LabelOperand &labelOpnd = GetOrCreateLabelOperand(labIdx); + intrnOpnds.emplace_back(&labelOpnd); + } + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_string_indexof, intrnOpnds)); + bb.AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, GetOrCreateLabelOperand(jumpLabIdx))); + bb.SetKind(BB::kBBGoto); +} + +/* + * intrinsify String.indexOf + * generate an intrinsic instruction instead of a function call if both the source string and the specified substring + * are compressed and the length of the substring is not less than 8, i.e. + * bl String.indexOf, srcString, patternString ===>> + * + * ldr srcCountOpnd, [srcString, offset] + * and srcCountLowestBitOpnd, srcCountOpnd, #1 + * cmp wzr, srcCountLowestBitOpnd + * beq Label.call + * ldr patternCountOpnd, [patternString, offset] + * and patternCountLowestBitOpnd, patternCountOpnd, #1 + * cmp wzr, patternCountLowestBitOpnd + * beq Label.call + * lsr patternLengthOpnd, patternCountOpnd, #1 + * cmp patternLengthOpnd, #8 + * blt Label.call + * lsr srcLengthOpnd, srcCountOpnd, #1 + * add srcStringBaseOpnd, srcString, immStringBaseOffset + * add patternStringBaseOpnd, patternString, immStringBaseOffset + * intrinsic_string_indexof retVal, srcStringBaseOpnd, srcLengthOpnd, patternStringBaseOpnd, patternLengthOpnd, + * tmpOpnd1, tmpOpnd2, tmpOpnd3, tmpOpnd4, tmpOpnd5, tmpOpnd6, + * label1, label2, label3, lable3, label4, label5, label6, label7 + * b Label.joint + * Label.call: + * bl String.indexOf, srcString, patternString + * Label.joint: + */ +void AArch64CGFunc::IntrinsifyStringIndexOf(ListOperand &srcOpnds, const MIRSymbol &funcSym) { + MapleList &opnds = srcOpnds.GetOperands(); + /* String.indexOf opnd size must be more than 2 */ + ASSERT(opnds.size() >= 2, "ensure the operands number"); + auto iter = opnds.cbegin(); + RegOperand *srcString = *iter; + RegOperand *patternString = *(++iter); + GStrIdx gStrIdx = GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::kJavaLangStringStr); + MIRType *type = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(gStrIdx)); + auto stringType = static_cast(type); + CHECK_FATAL(stringType != nullptr, "Ljava_2Flang_2FString_3B type can not be null"); + FieldID fieldID = GetMirModule().GetMIRBuilder()->GetStructFieldIDFromFieldNameParentFirst(stringType, "count"); + MIRType *fieldType = stringType->GetFieldType(fieldID); + PrimType countPty = fieldType->GetPrimType(); + int32 offset = GetBecommon().GetFieldOffset(*stringType, fieldID).first; + LabelIdx callBBLabIdx = CreateLabel(); + RegOperand *srcCountOpnd = CheckStringIsCompressed(*GetCurBB(), *srcString, offset, countPty, callBBLabIdx); + + BB *srcCompressedBB = CreateNewBB(); + GetCurBB()->AppendBB(*srcCompressedBB); + RegOperand *patternCountOpnd = CheckStringIsCompressed(*srcCompressedBB, *patternString, offset, countPty, + callBBLabIdx); + + BB *patternCompressedBB = CreateNewBB(); + RegOperand *patternLengthOpnd = CheckStringLengthLessThanEight(*patternCompressedBB, *patternCountOpnd, countPty, + callBBLabIdx); + + BB *intrinsicBB = CreateNewBB(); + LabelIdx jointLabIdx = CreateLabel(); + GenerateIntrnInsnForStrIndexOf(*intrinsicBB, *srcString, *patternString, *srcCountOpnd, *patternLengthOpnd, + countPty, jointLabIdx); + + BB *callBB = CreateNewBB(); + callBB->AddLabel(callBBLabIdx); + SetLab2BBMap(callBBLabIdx, *callBB); + SetCurBB(*callBB); + Insn &callInsn = AppendCall(funcSym, srcOpnds); + MIRType *retType = funcSym.GetFunction()->GetReturnType(); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + } + GetFunction().SetHasCall(); + + BB *jointBB = CreateNewBB(); + jointBB->AddLabel(jointLabIdx); + SetLab2BBMap(jointLabIdx, *jointBB); + srcCompressedBB->AppendBB(*patternCompressedBB); + patternCompressedBB->AppendBB(*intrinsicBB); + intrinsicBB->AppendBB(*callBB); + callBB->AppendBB(*jointBB); + SetCurBB(*jointBB); +} + +void AArch64CGFunc::SelectCall(CallNode &callNode) { + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + MIRSymbol *fsym = GetFunction().GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + MIRType *retType = fn->GetReturnType(); + + if (GetCG()->GenerateVerboseCG()) { + auto &comment = GetOpndBuilder()->CreateComment(fsym->GetName()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildCommentInsn(comment)); + } + + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + SetLmbcCallReturnType(nullptr); + if (fn->IsFirstArgReturn()) { + MIRPtrType *ptrTy = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx( + fn->GetFormalDefVec()[0].formalTyIdx)); + MIRType *sTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrTy->GetPointedTyIdx()); + SetLmbcCallReturnType(sTy); + } else { + MIRType *ty = fn->GetReturnType(); + SetLmbcCallReturnType(ty); + } + } + bool callNative = false; + if ((fsym->GetName() == "MCC_CallFastNative") || (fsym->GetName() == "MCC_CallFastNativeExt") || + (fsym->GetName() == "MCC_CallSlowNative0") || (fsym->GetName() == "MCC_CallSlowNative1") || + (fsym->GetName() == "MCC_CallSlowNative2") || (fsym->GetName() == "MCC_CallSlowNative3") || + (fsym->GetName() == "MCC_CallSlowNative4") || (fsym->GetName() == "MCC_CallSlowNative5") || + (fsym->GetName() == "MCC_CallSlowNative6") || (fsym->GetName() == "MCC_CallSlowNative7") || + (fsym->GetName() == "MCC_CallSlowNative8") || (fsym->GetName() == "MCC_CallSlowNativeExt")) { + callNative = true; + } + + std::vector stackPosition; + if ((fsym->GetName() == "MCC_DecRefResetPair") || (fsym->GetName() == "MCC_ClearLocalStackRef")) { + SelectClearStackCallParmList(callNode, *srcOpnds, stackPosition); + } else { + SelectParmList(callNode, *srcOpnds, callNative); + } + if (callNative) { + auto &comment = GetOpndBuilder()->CreateComment("call native func"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildCommentInsn(comment)); + + BaseNode *funcArgExpr = callNode.Opnd(0); + PrimType ptype = funcArgExpr->GetPrimType(); + Operand *funcOpnd = HandleExpr(callNode, *funcArgExpr); + RegOperand &livein = GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, + GetRegTyFromPrimTy(PTY_a64)); + SelectCopy(livein, ptype, *funcOpnd, ptype); + + RegOperand &extraOpnd = GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + srcOpnds->PushOpnd(extraOpnd); + } + const std::string &funcName = fsym->GetName(); + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && + funcName == "Ljava_2Flang_2FString_3B_7CindexOf_7C_28Ljava_2Flang_2FString_3B_29I") { + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(funcName); + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx, true); + IntrinsifyStringIndexOf(*srcOpnds, *st); + return; + } + Insn &callInsn = AppendCall(*fsym, *srcOpnds); + GetCurBB()->SetHasCall(); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + + /* check if this call use stack slot to return */ + if (fn->IsFirstArgReturn()) { + SetStackProtectInfo(kRetureStackSlot); + } + + GetFunction().SetHasCall(); + if (GetMirModule().IsCModule()) { /* do not mark abort BB in C at present */ + if (fsym->GetName() == "__builtin_unreachable") { + GetCurBB()->ClearInsns(); + GetCurBB()->SetUnreachable(true); + } + if (fn->GetAttr(FUNCATTR_noreturn)) { + GetCurBB()->SetKind(BB::kBBNoReturn); + PushBackNoReturnCallBBsVec(*GetCurBB()); + } + return; + } + if ((fsym->GetName() == "MCC_ThrowException") || (fsym->GetName() == "MCC_RethrowException") || + (fsym->GetName() == "MCC_ThrowArithmeticException") || + (fsym->GetName() == "MCC_ThrowArrayIndexOutOfBoundsException") || + (fsym->GetName() == "MCC_ThrowNullPointerException") || + (fsym->GetName() == "MCC_ThrowStringIndexOutOfBoundsException") || (fsym->GetName() == "abort") || + (fsym->GetName() == "exit") || (fsym->GetName() == "MCC_Array_Boundary_Check")) { + callInsn.SetIsThrow(true); + GetCurBB()->SetKind(BB::kBBThrow); + } else if ((fsym->GetName() == "MCC_DecRefResetPair") || (fsym->GetName() == "MCC_ClearLocalStackRef")) { + for (size_t i = 0; i < stackPosition.size(); ++i) { + callInsn.SetClearStackOffset(i, stackPosition[i]); + } + } +} + +void AArch64CGFunc::SelectIcall(IcallNode &icallNode, Operand &srcOpnd) { + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + SelectParmList(icallNode, *srcOpnds); + + Operand *fptrOpnd = &srcOpnd; + if (fptrOpnd->GetKind() != Operand::kOpdRegister) { + PrimType ty = icallNode.Opnd(0)->GetPrimType(); + fptrOpnd = &SelectCopy(srcOpnd, ty, ty); + } + ASSERT(fptrOpnd->IsRegister(), "SelectIcall: function pointer not RegOperand"); + RegOperand *regOpnd = static_cast(fptrOpnd); + Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, *regOpnd, *srcOpnds); + + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(icallNode.GetRetTyIdx()); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + + /* check if this icall use stack slot to return */ + CallReturnVector *p2nrets = &icallNode.GetReturnVec(); + if (p2nrets->size() == k1ByteSize) { + StIdx stIdx = (*p2nrets)[0].first; + MIRSymbol *sym = GetBecommon().GetMIRModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + if (sym != nullptr && (GetBecommon().GetTypeSize(sym->GetTyIdx().GetIdx()) > k16ByteSize)) { + SetStackProtectInfo(kRetureStackSlot); + } + } + + GetCurBB()->AppendInsn(callInsn); + GetCurBB()->SetHasCall(); + ASSERT(GetCurBB()->GetLastInsn()->IsCall(), "lastInsn should be a call"); + GetFunction().SetHasCall(); +} + +void AArch64CGFunc::HandleCatch() { + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel1) { + regno_t regNO = uCatch.regNOCatch; + RegOperand &vregOpnd = GetOrCreateVirtualRegisterOperand(regNO); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xmovrr, vregOpnd, + GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt))); + } else { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickStInsn(uCatch.opndCatch->GetSize(), PTY_a64), + GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt), *uCatch.opndCatch)); + } +} + +void AArch64CGFunc::SelectMembar(StmtNode &membar) { + switch (membar.GetOpCode()) { + case OP_membaracquire: + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ishld, AArch64CG::kMd[MOP_dmb_ishld])); + break; + case OP_membarrelease: + case OP_membarstoreload: + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + break; + case OP_membarstorestore: + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ishst, AArch64CG::kMd[MOP_dmb_ishst])); + break; + default: + ASSERT(false, "NYI"); + break; + } +} + +void AArch64CGFunc::SelectComment(CommentNode &comment) { + auto &commentOpnd = GetOpndBuilder()->CreateComment(comment.GetComment()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildCommentInsn(commentOpnd)); +} + +void AArch64CGFunc::SelectReturn(Operand *opnd0) { + bool is64x1vec = GetFunction().GetAttr(FUNCATTR_oneelem_simd) ? true : false; + MIRType *floatType = GlobalTables::GetTypeTable().GetDouble(); + MIRType *retTyp = is64x1vec ? floatType : GetFunction().GetReturnType(); + AArch64CallConvImpl retLocator(GetBecommon()); + CCLocInfo retMech; + retLocator.InitReturnInfo(*retTyp, retMech); + if ((retMech.GetRegCount() > 0) && (opnd0 != nullptr)) { + RegType regTyp = is64x1vec ? kRegTyFloat : GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0()); + PrimType oriPrimType = is64x1vec ? GetFunction().GetReturnType()->GetPrimType() : retMech.GetPrimTypeOfReg0(); + AArch64reg retReg = static_cast(retMech.GetReg0()); + if (opnd0->IsRegister()) { + RegOperand *regOpnd = static_cast(opnd0); + if (regOpnd->GetRegisterNumber() != retMech.GetReg0()) { + RegOperand &retOpnd = + GetOrCreatePhysicalRegisterOperand(retReg, regOpnd->GetSize(), regTyp); + SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *regOpnd, oriPrimType); + } + } else if (opnd0->IsMemoryAccessOperand()) { + auto *memopnd = static_cast(opnd0); + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, + GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()), regTyp); + MOperator mOp = PickLdInsn(memopnd->GetSize(), retMech.GetPrimTypeOfReg0()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, retOpnd, *memopnd)); + } else if (opnd0->IsConstImmediate()) { + ImmOperand *immOpnd = static_cast(opnd0); + if (!is64x1vec) { + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, + GetPrimTypeBitSize(retMech.GetPrimTypeOfReg0()), GetRegTyFromPrimTy(retMech.GetPrimTypeOfReg0())); + SelectCopy(retOpnd, retMech.GetPrimTypeOfReg0(), *immOpnd, retMech.GetPrimTypeOfReg0()); + } else { + PrimType rType = GetFunction().GetReturnType()->GetPrimType(); + RegOperand *reg = &CreateRegisterOperandOfType(rType); + SelectCopy(*reg, rType, *immOpnd, rType); + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(retReg, + GetPrimTypeBitSize(PTY_f64), GetRegTyFromPrimTy(PTY_f64)); + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xvmovdr, retOpnd, *reg); + GetCurBB()->AppendInsn(insn); + } + } else { + CHECK_FATAL(false, "nyi"); + } + } + + LabelOperand &targetOpnd = GetOrCreateLabelOperand(GetReturnLabel()->GetLabelIdx()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); +} + +RegOperand &AArch64CGFunc::GetOrCreateSpecialRegisterOperand(PregIdx sregIdx, PrimType primType) { + AArch64reg reg = R0; + switch (sregIdx) { + case kSregSp: + reg = RSP; + break; + case kSregFp: + reg = RFP; + break; + case kSregGp: { + MIRSymbol *sym = GetCG()->GetGP(); + if (sym == nullptr) { + sym = GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + std::string strBuf("__file__local__GP"); + sym->SetNameStrIdx(GetMirModule().GetMIRBuilder()->GetOrCreateStringIndex(strBuf)); + GetCG()->SetGP(sym); + } + RegOperand &result = GetOrCreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + SelectAddrof(result, CreateStImmOperand(*sym, 0, 0)); + return result; + } + case kSregThrownval: { /* uses x0 == R0 */ + ASSERT(uCatch.regNOCatch > 0, "regNOCatch should greater than 0."); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + RegOperand ®Opnd = GetOrCreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8BitSize)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + PickLdInsn(uCatch.opndCatch->GetSize(), PTY_a64), regOpnd, *uCatch.opndCatch)); + return regOpnd; + } else { + return GetOrCreateVirtualRegisterOperand(uCatch.regNOCatch); + } + } + case kSregRetval0: + if (!IsPrimitiveInteger(primType) || IsPrimitiveVectorFloat(primType)) { + reg = V0; + } + break; + case kSregMethodhdl: + if (methodHandleVreg == regno_t(-1)) { + methodHandleVreg = NewVReg(kRegTyInt, k8BitSize); + } + return GetOrCreateVirtualRegisterOperand(methodHandleVreg); + default: + ASSERT(false, "Special pseudo registers NYI"); + break; + } + return GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, kRegTyInt); +} + +RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(std::string &asmAttr) { + ASSERT(!asmAttr.empty(), "Get inline asm string failed in GetOrCreatePhysicalRegisterOperand"); + RegType rKind = kRegTyUndef; + uint32 rSize = 0; + /* Get Register Type and Size */ + switch (asmAttr[0]) { + case 'x': { + rKind = kRegTyInt; + rSize = k64BitSize; + break; + } + case 'w': { + rKind = kRegTyInt; + rSize = k32BitSize; + break; + } + default: { + LogInfo::MapleLogger() << "Unsupport asm string : " << asmAttr << "\n"; + CHECK_FATAL(false, "Have not support this kind of register "); + } + } + AArch64reg rNO = kRinvalid; + /* Get Register Number */ + uint32 regNumPos = 1; + char numberChar = asmAttr[regNumPos++]; + if (numberChar >= '0' && numberChar <= '9') { + uint32 val = static_cast(static_cast(numberChar) - kZeroAsciiNum); + if (regNumPos < asmAttr.length()) { + char numberCharSecond = asmAttr[regNumPos++]; + ASSERT(regNumPos == asmAttr.length(), "Invalid asm attribute"); + if (numberCharSecond >= '0' && numberCharSecond <= '9') { + val = val * kDecimalMax + static_cast((static_cast(numberCharSecond) - kZeroAsciiNum)); + } + } + rNO = static_cast(static_cast(R0) + val); + if (val > (kAsmInputRegPrefixOpnd + 1)) { + LogInfo::MapleLogger() << "Unsupport asm string : " << asmAttr << "\n"; + CHECK_FATAL(false, "have not support this kind of register "); + } + } else if (numberChar == 0) { + return CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + } else { + CHECK_FATAL(false, "Unexpect input in GetOrCreatePhysicalRegisterOperand"); + } + return GetOrCreatePhysicalRegisterOperand(rNO, rSize, rKind); +} + +RegOperand &AArch64CGFunc::GetOrCreatePhysicalRegisterOperand(AArch64reg regNO, uint32 size, + RegType kind, uint32 flag) { + uint64 aarch64PhyRegIdx = regNO; + ASSERT(flag == 0, "Do not expect flag here"); + if (size <= k32BitSize) { + size = k32BitSize; + aarch64PhyRegIdx = aarch64PhyRegIdx << 1; + } else if (size <= k64BitSize) { + size = k64BitSize; + aarch64PhyRegIdx = (aarch64PhyRegIdx << 1) + 1; + } else { + size = (size == k128BitSize) ? k128BitSize : k64BitSize; + aarch64PhyRegIdx = aarch64PhyRegIdx << 2; + } + RegOperand *phyRegOpnd = nullptr; + auto phyRegIt = phyRegOperandTable.find(aarch64PhyRegIdx); + if (phyRegIt != phyRegOperandTable.end()) { + phyRegOpnd = phyRegOperandTable[aarch64PhyRegIdx]; + } else { + phyRegOpnd = memPool->New(regNO, size, kind, flag); + phyRegOperandTable.emplace(aarch64PhyRegIdx, phyRegOpnd); + } + return *phyRegOpnd; +} + +const LabelOperand *AArch64CGFunc::GetLabelOperand(LabelIdx labIdx) const { + const MapleUnorderedMap::const_iterator it = hashLabelOpndTable.find(labIdx); + if (it != hashLabelOpndTable.end()) { + return it->second; + } + return nullptr; +} + +LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(LabelIdx labIdx) { + MapleUnorderedMap::iterator it = hashLabelOpndTable.find(labIdx); + if (it != hashLabelOpndTable.end()) { + return *(it->second); + } + LabelOperand *res = memPool->New(GetShortFuncName().c_str(), labIdx, *memPool); + hashLabelOpndTable[labIdx] = res; + return *res; +} + +LabelOperand &AArch64CGFunc::GetOrCreateLabelOperand(BB &bb) { + LabelIdx labelIdx = bb.GetLabIdx(); + if (labelIdx == MIRLabelTable::GetDummyLabel()) { + labelIdx = CreateLabel(); + bb.AddLabel(labelIdx); + } + return GetOrCreateLabelOperand(labelIdx); +} + +uint32 AArch64CGFunc::GetAggCopySize(uint32 offset1, uint32 offset2, uint32 alignment) const { + /* Generating a larger sized mem op than alignment if allowed by aggregate starting address */ + uint32 offsetAlign1 = (offset1 == 0) ? k8ByteSize : offset1; + uint32 offsetAlign2 = (offset2 == 0) ? k8ByteSize : offset2; + uint32 alignOffset = 1U << (std::min(__builtin_ffs(static_cast(offsetAlign1)), + __builtin_ffs(static_cast(offsetAlign2))) - 1); + if (alignOffset == k8ByteSize || alignOffset == k4ByteSize || alignOffset == k2ByteSize) { + return alignOffset; + } else if (alignOffset > k8ByteSize) { + return k8ByteSize; + } else { + return alignment; + } +} + +OfstOperand &AArch64CGFunc::GetOrCreateOfstOpnd(uint64 offset, uint32 size) { + uint64 aarch64OfstRegIdx = offset; + aarch64OfstRegIdx = (aarch64OfstRegIdx << 1); + if (size == k64BitSize) { + ++aarch64OfstRegIdx; + } + ASSERT(size == k32BitSize || size == k64BitSize, "ofStOpnd size check"); + auto it = hashOfstOpndTable.find(aarch64OfstRegIdx); + if (it != hashOfstOpndTable.end()) { + return *it->second; + } + OfstOperand *res = &CreateOfstOpnd(offset, size); + hashOfstOpndTable[aarch64OfstRegIdx] = res; + return *res; +} + +void AArch64CGFunc::SelectAddrofAfterRa(Operand &result, StImmOperand &stImm, std::vector& rematInsns) { + const MIRSymbol *symbol = stImm.GetSymbol(); + ASSERT ((symbol->GetStorageClass() != kScAuto) || (symbol->GetStorageClass() != kScFormal), ""); + Operand *srcOpnd = &result; + (void)rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn(MOP_xadrp, result, stImm)); + if (CGOptions::IsPIC() && symbol->NeedPIC()) { + /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ + OfstOperand &offset = CreateOfstOpnd(*stImm.GetSymbol(), stImm.GetOffset(), stImm.GetRelocs()); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, + static_cast(srcOpnd), nullptr, &offset, nullptr); + (void)rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn( + memOpnd.GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, result, memOpnd)); + + if (stImm.GetOffset() > 0) { + ImmOperand &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + (void)rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn(MOP_xaddrri12, result, result, immOpnd)); + return; + } + } else { + (void)rematInsns.emplace_back(&GetInsnBuilder()->BuildInsn(MOP_xadrpl12, result, *srcOpnd, stImm)); + } +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpndAfterRa(const MIRSymbol &symbol, int32 offset, uint32 size, + bool needLow12, RegOperand *regOp, + std::vector& rematInsns) { + MIRStorageClass storageClass = symbol.GetStorageClass(); + if ((storageClass == kScGlobal) || (storageClass == kScExtern)) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + RegOperand &stAddrOpnd = *regOp; + SelectAddrofAfterRa(stAddrOpnd, stOpnd, rematInsns); + /* MemOperand::AddrMode_B_OI */ + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { + if (symbol.GetSKind() == kStConst) { + ASSERT(offset == 0, "offset should be 0 for constant literals"); + return *CreateMemOperand(MemOperand::kAddrModeLiteral, size, symbol); + } else { + if (needLow12) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + RegOperand &stAddrOpnd = *regOp; + SelectAddrofAfterRa(stAddrOpnd, stOpnd, rematInsns); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + RegOperand &stAddrOpnd = *regOp; + /* adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B */ + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xadrp, stAddrOpnd, stOpnd); + rematInsns.emplace_back(&insn); + /* ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] */ + return *CreateMemOperand(MemOperand::kAddrModeLo12Li, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), &symbol); + } + } + } else { + CHECK_FATAL(false, "NYI"); + } +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(const MIRSymbol &symbol, int64 offset, uint32 size, bool forLocalRef, + bool needLow12, RegOperand *regOp) { + MIRStorageClass storageClass = symbol.GetStorageClass(); + if ((storageClass == kScAuto) || (storageClass == kScFormal)) { + AArch64SymbolAlloc *symLoc = + static_cast(GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex())); + if (forLocalRef) { + auto p = GetMemlayout()->GetLocalRefLocMap().find(symbol.GetStIdx()); + CHECK_FATAL(p != GetMemlayout()->GetLocalRefLocMap().end(), "sym loc should have been defined"); + symLoc = static_cast(p->second); + } + ASSERT(symLoc != nullptr, "sym loc should have been defined"); + /* At this point, we don't know which registers the callee needs to save. */ + ASSERT((IsFPLRAddedToCalleeSavedList() || (SizeOfCalleeSaved() == 0)), + "CalleeSaved won't be known until after Register Allocation"); + StIdx idx = symbol.GetStIdx(); + auto it = memOpndsRequiringOffsetAdjustment.find(idx); + ASSERT((!IsFPLRAddedToCalleeSavedList() || + ((it != memOpndsRequiringOffsetAdjustment.end()) || (storageClass == kScFormal))), + "Memory operand of this symbol should have been added to the hash table"); + int32 stOffset = GetBaseOffset(*symLoc); + if (it != memOpndsRequiringOffsetAdjustment.end()) { + if (GetMemlayout()->IsLocalRefLoc(symbol)) { + if (!forLocalRef) { + return *(it->second); + } + } else if (mirModule.IsJavaModule()) { + return *(it->second); + } else { + Operand* offOpnd = (it->second)->GetOffset(); + ASSERT(offOpnd != nullptr, "offOpnd should not be nullptr"); + if (((static_cast(offOpnd))->GetOffsetValue() == (stOffset + offset)) && + (it->second->GetSize() == size)) { + return *(it->second); + } + } + } + it = memOpndsForStkPassedArguments.find(idx); + if (it != memOpndsForStkPassedArguments.end()) { + if (GetMemlayout()->IsLocalRefLoc(symbol)) { + if (!forLocalRef) { + return *(it->second); + } + } else { + return *(it->second); + } + } + + RegOperand *baseOpnd = static_cast(GetBaseReg(*symLoc)); + int32 totalOffset = stOffset + static_cast(offset); + /* needs a fresh copy of ImmOperand as we may adjust its offset at a later stage. */ + OfstOperand *offsetOpnd = nullptr; + if (CGOptions::IsBigEndian()) { + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed && size < k64BitSize) { + offsetOpnd = &CreateOfstOpnd(k4BitSize + static_cast(totalOffset), k64BitSize); + } else { + offsetOpnd = &CreateOfstOpnd(static_cast(static_cast(totalOffset)), k64BitSize); + } + } else { + offsetOpnd = &CreateOfstOpnd(static_cast(static_cast(totalOffset)), k64BitSize); + } + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed && + MemOperand::IsPIMMOffsetOutOfRange(totalOffset, size)) { + ImmOperand *offsetOprand = &CreateImmOperand(totalOffset, k64BitSize, true, kUnAdjustVary); + Operand *resImmOpnd = &SelectCopy(*offsetOprand, PTY_i64, PTY_i64); + return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, *baseOpnd, + static_cast(*resImmOpnd), nullptr, symbol, true); + } else { + if (symLoc->GetMemSegment()->GetMemSegmentKind() == kMsArgsStkPassed) { + offsetOpnd->SetVary(kUnAdjustVary); + } + MemOperand *res = CreateMemOperand(MemOperand::kAddrModeBOi, size, *baseOpnd, + nullptr, offsetOpnd, &symbol); + if ((symbol.GetType()->GetKind() != kTypeClass) && !forLocalRef) { + memOpndsRequiringOffsetAdjustment[idx] = res; + } + return *res; + } + } else if ((storageClass == kScGlobal) || (storageClass == kScExtern)) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + if (!regOp) { + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + } + RegOperand &stAddrOpnd = *regOp; + SelectAddrof(stAddrOpnd, stOpnd); + /* MemOperand::AddrMode_B_OI */ + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else if ((storageClass == kScPstatic) || (storageClass == kScFstatic)) { + return CreateMemOpndForStatic(symbol, offset, size, needLow12, regOp); + } else { + CHECK_FATAL(false, "NYI"); + } +} + +MemOperand &AArch64CGFunc::CreateMemOpndForStatic(const MIRSymbol &symbol, int64 offset, uint32 size, + bool needLow12, RegOperand *regOp) { + if (symbol.GetSKind() == kStConst) { + ASSERT(offset == 0, "offset should be 0 for constant literals"); + return *CreateMemOperand(MemOperand::kAddrModeLiteral, size, symbol); + } else { + /* not guaranteed align for uninitialized symbol */ + if (needLow12 || (!symbol.IsConst() && CGOptions::IsPIC())) { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + if (!regOp) { + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + } + RegOperand &stAddrOpnd = *regOp; + SelectAddrof(stAddrOpnd, stOpnd); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, + nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), &symbol); + } else { + StImmOperand &stOpnd = CreateStImmOperand(symbol, offset, 0); + if (!regOp) { + regOp = static_cast(&CreateRegisterOperandOfType(PTY_u64)); + } + RegOperand &stAddrOpnd = *regOp; + /* adrp x1, _PTR__cinf_Ljava_2Flang_2FSystem_3B */ + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_xadrp, stAddrOpnd, stOpnd); + GetCurBB()->AppendInsn(insn); + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xadrpl12, stAddrOpnd, stAddrOpnd, stOpnd)); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(0), k32BitSize), nullptr); + } + /* ldr x1, [x1, #:lo12:_PTR__cinf_Ljava_2Flang_2FSystem_3B] */ + return *CreateMemOperand(MemOperand::kAddrModeLo12Li, size, stAddrOpnd, nullptr, + &GetOrCreateOfstOpnd(static_cast(offset), k32BitSize), &symbol); + } + } +} + +MemOperand &AArch64CGFunc::HashMemOpnd(MemOperand &tMemOpnd) { + auto it = hashMemOpndTable.find(tMemOpnd); + if (it != hashMemOpndTable.end()) { + return *(it->second); + } + auto *res = memPool->New(tMemOpnd); + hashMemOpndTable[tMemOpnd] = res; + return *res; +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand *base, RegOperand *index, ImmOperand *offset, + const MIRSymbol *st) { + ASSERT(base != nullptr, "nullptr check"); + MemOperand tMemOpnd(mode, size, *base, index, offset, st); + if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) { + tMemOpnd.SetStackMem(true); + } + return HashMemOpnd(tMemOpnd); +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand::AArch64AddressingMode mode, uint32 size, + RegOperand *base, RegOperand *index, int32 shift, + bool isSigned) { + ASSERT(base != nullptr, "nullptr check"); + MemOperand tMemOpnd(mode, size, *base, *index, shift, isSigned); + if (base->GetRegisterNumber() == RFP || base->GetRegisterNumber() == RSP) { + tMemOpnd.SetStackMem(true); + } + return HashMemOpnd(tMemOpnd); +} + +MemOperand &AArch64CGFunc::GetOrCreateMemOpnd(MemOperand &oldMem) { + return HashMemOpnd(oldMem); +} + +/* offset: base offset from FP or SP */ +MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size) { + OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast(offset), k32BitSize); + /* do not need to check bit size rotate of sign immediate */ + bool checkSimm = (offset > kMinSimm64 && offset < kMaxSimm64Pair); + if (!checkSimm && !ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset)) { + Operand *resImmOpnd = &SelectCopy(CreateImmOperand(offset, k32BitSize, true), PTY_i32, PTY_i32); + return *CreateMemOperand(MemOperand::kAddrModeBOrX, size, baseOpnd, + static_cast(resImmOpnd), nullptr, nullptr); + } else { + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, + nullptr, &offsetOpnd, nullptr); + } +} + +/* offset: base offset + #:lo12:Label+immediate */ +MemOperand &AArch64CGFunc::CreateMemOpnd(RegOperand &baseOpnd, int64 offset, uint32 size, const MIRSymbol &sym) const { + OfstOperand &offsetOpnd = CreateOfstOpnd(static_cast(offset), k32BitSize); + ASSERT(ImmOperand::IsInBitSizeRot(kMaxImmVal12Bits, offset), ""); + return *CreateMemOperand(MemOperand::kAddrModeBOi, size, baseOpnd, nullptr, &offsetOpnd, &sym); +} + +RegOperand &AArch64CGFunc::GenStructParamIndex(RegOperand &base, const BaseNode &indexExpr, int shift, + PrimType baseType) { + RegOperand *index = &LoadIntoRegister(*HandleExpr(indexExpr, *(indexExpr.Opnd(0))), PTY_a64); + RegOperand *srcOpnd = &CreateRegisterOperandOfType(PTY_a64); + ImmOperand *imm = &CreateImmOperand(PTY_a64, shift); + SelectShift(*srcOpnd, *index, *imm, kShiftLeft, PTY_a64); + RegOperand *result = &CreateRegisterOperandOfType(PTY_a64); + SelectAdd(*result, base, *srcOpnd, PTY_a64); + + OfstOperand *offopnd = &CreateOfstOpnd(0, k32BitSize); + MemOperand &mo = + GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, result, nullptr, offopnd, nullptr); + RegOperand &structAddr = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(baseType), baseType), structAddr, mo)); + return structAddr; +} + +/* + * case 1: iread a64 <* <* void>> 0 (add a64 ( + * addrof a64 $__reg_jni_func_tab$$libcore_all_dex, + * mul a64 ( + * cvt a64 i32 (constval i32 21), + * constval a64 8))) + * + * case 2 : iread u32 <* u8> 0 (add a64 (regread a64 %61, constval a64 3)) + * case 3 : iread u32 <* u8> 0 (add a64 (regread a64 %61, regread a64 %65)) + * case 4 : iread u32 <* u8> 0 (add a64 (cvt a64 i32(regread %n))) + */ +MemOperand *AArch64CGFunc::CheckAndCreateExtendMemOpnd(PrimType ptype, const BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd) { + aggParamReg = nullptr; + if (memOrd != AArch64isa::kMoNone || addrExpr.GetOpCode() != OP_add || offset != 0) { + return nullptr; + } + BaseNode *baseExpr = addrExpr.Opnd(0); + BaseNode *addendExpr = addrExpr.Opnd(1); + + if (baseExpr->GetOpCode() == OP_regread) { + /* case 2 */ + if (addendExpr->GetOpCode() == OP_constval) { + ASSERT(addrExpr.GetNumOpnds() == 2, "Unepect expr operand in CheckAndCreateExtendMemOpnd"); + ConstvalNode *constOfstNode = static_cast(addendExpr); + ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); + MIRIntConst *intOfst = safe_cast(constOfstNode->GetConstVal()); + CHECK_FATAL(intOfst != nullptr, "just checking"); + /* discard large offset and negative offset */ + if (intOfst->GetExtValue() > INT32_MAX || intOfst->IsNegative()) { + return nullptr; + } + uint32 scale = static_cast(intOfst->GetExtValue()); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(scale, k32BitSize); + uint32 dsize = GetPrimTypeBitSize(ptype); + MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype), + SelectRegread(*static_cast(baseExpr)), nullptr, &ofstOpnd, nullptr); + return IsOperandImmValid(PickLdInsn(dsize, ptype), memOpnd, kInsnSecondOpnd) ? memOpnd : nullptr; + /* case 3 */ + } else if (addendExpr->GetOpCode() == OP_regread) { + CHECK_FATAL(addrExpr.GetNumOpnds() == 2, "Unepect expr operand in CheckAndCreateExtendMemOpnd"); + if (GetPrimTypeSize(baseExpr->GetPrimType()) != GetPrimTypeSize(addendExpr->GetPrimType())) { + return nullptr; + } + + auto *baseReg = SelectRegread(*static_cast(baseExpr)); + auto *indexReg = SelectRegread(*static_cast(addendExpr)); + MemOperand *memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), baseReg, indexReg, + nullptr, nullptr); + if (CGOptions::IsArm64ilp32() && IsSignedInteger(addendExpr->GetPrimType())) { + memOpnd->SetExtend(memOpnd->GetExtend() | MemOperand::ExtendInfo::kSignExtend); + } + return memOpnd; + /* case 4 */ + } else if (addendExpr->GetOpCode() == OP_cvt && addendExpr->GetNumOpnds() == 1) { + int shiftAmount = 0; + BaseNode *cvtRegreadNode = addendExpr->Opnd(kInsnFirstOpnd); + if (cvtRegreadNode->GetOpCode() == OP_regread && cvtRegreadNode->IsLeaf()) { + uint32 fromSize = GetPrimTypeBitSize(cvtRegreadNode->GetPrimType()); + uint32 toSize = GetPrimTypeBitSize(addendExpr->GetPrimType()); + + if (toSize < fromSize) { + return nullptr; + } + + MemOperand *memOpnd = &GetOrCreateMemOpnd( + MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), + SelectRegread(*static_cast(baseExpr)), + SelectRegread(*static_cast(cvtRegreadNode)), shiftAmount, toSize != fromSize); + return memOpnd; + } + } + } + if (addendExpr->GetOpCode() != OP_mul || !IsPrimitiveInteger(ptype)) { + return nullptr; + } + BaseNode *indexExpr, *scaleExpr; + indexExpr = addendExpr->Opnd(0); + scaleExpr = addendExpr->Opnd(1); + if (scaleExpr->GetOpCode() != OP_constval) { + return nullptr; + } + ConstvalNode *constValNode = static_cast(scaleExpr); + CHECK_FATAL(constValNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); + MIRIntConst *mirIntConst = safe_cast(constValNode->GetConstVal()); + CHECK_FATAL(mirIntConst != nullptr, "just checking"); + int32 scale = static_cast(mirIntConst->GetExtValue()); + if (scale < 0) { + return nullptr; + } + uint32 unsignedScale = static_cast(scale); + if (unsignedScale != GetPrimTypeSize(ptype) || indexExpr->GetOpCode() != OP_cvt) { + return nullptr; + } + /* 8 is 1 << 3; 4 is 1 << 2; 2 is 1 << 1; 1 is 1 << 0 */ + int32 shift = (unsignedScale == 8) ? 3 : ((unsignedScale == 4) ? 2 : ((unsignedScale == 2) ? 1 : 0)); + RegOperand &base = static_cast(LoadIntoRegister(*HandleExpr(addrExpr, *baseExpr), PTY_a64)); + TypeCvtNode *typeCvtNode = static_cast(indexExpr); + PrimType fromType = typeCvtNode->FromType(); + PrimType toType = typeCvtNode->GetPrimType(); + if (isAggParamInReg) { + aggParamReg = &GenStructParamIndex(base, *indexExpr, shift, ptype); + return nullptr; + } + MemOperand *memOpnd = nullptr; + if ((fromType == PTY_i32) && (toType == PTY_a64)) { + RegOperand &index = + static_cast(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_i32)); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, + shift, true); + } else if ((fromType == PTY_u32) && (toType == PTY_a64)) { + RegOperand &index = + static_cast(LoadIntoRegister(*HandleExpr(*indexExpr, *indexExpr->Opnd(0)), PTY_u32)); + memOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, GetPrimTypeBitSize(ptype), &base, &index, + shift, false); + } + return memOpnd; +} + +MemOperand &AArch64CGFunc::CreateNonExtendMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, + int64 offset) { + Operand *addrOpnd = nullptr; + if ((addrExpr.GetOpCode() == OP_add || addrExpr.GetOpCode() == OP_sub) && + addrExpr.Opnd(1)->GetOpCode() == OP_constval) { + addrOpnd = HandleExpr(addrExpr, *addrExpr.Opnd(0)); + ConstvalNode *constOfstNode = static_cast(addrExpr.Opnd(1)); + ASSERT(constOfstNode->GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst"); + MIRIntConst *intOfst = safe_cast(constOfstNode->GetConstVal()); + CHECK_FATAL(intOfst != nullptr, "just checking"); + offset = (addrExpr.GetOpCode() == OP_add) ? offset + intOfst->GetSXTValue() : offset - intOfst->GetSXTValue(); + } else { + addrOpnd = HandleExpr(parent, addrExpr); + } + addrOpnd = static_cast(&LoadIntoRegister(*addrOpnd, PTY_a64)); + Insn *lastInsn = GetCurBB() == nullptr ? nullptr : GetCurBB()->GetLastInsn(); + if ((addrExpr.GetOpCode() == OP_CG_array_elem_add) && (offset == 0) && lastInsn && + (lastInsn->GetMachineOpcode() == MOP_xadrpl12) && + (&lastInsn->GetOperand(kInsnFirstOpnd) == &lastInsn->GetOperand(kInsnSecondOpnd))) { + Operand &opnd = lastInsn->GetOperand(kInsnThirdOpnd); + StImmOperand &stOpnd = static_cast(opnd); + + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(stOpnd.GetOffset()), k32BitSize); + MemOperand &tmpMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, GetPrimTypeBitSize(ptype), + static_cast(addrOpnd), nullptr, &ofstOpnd, stOpnd.GetSymbol()); + GetCurBB()->RemoveInsn(*GetCurBB()->GetLastInsn()); + return tmpMemOpnd; + } else { + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(offset), k64BitSize); + return GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, GetPrimTypeBitSize(ptype), + static_cast(addrOpnd), nullptr, &ofstOpnd, nullptr); + } +} + +/* + * Create a memory operand with specified data type and memory ordering, making + * use of aarch64 extend register addressing mode when possible. + */ +MemOperand &AArch64CGFunc::CreateMemOpnd(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd) { + MemOperand *memOpnd = CheckAndCreateExtendMemOpnd(ptype, addrExpr, offset, memOrd); + if (memOpnd != nullptr) { + return *memOpnd; + } + return CreateNonExtendMemOpnd(ptype, parent, addrExpr, offset); +} + +MemOperand *AArch64CGFunc::CreateMemOpndOrNull(PrimType ptype, const BaseNode &parent, BaseNode &addrExpr, int64 offset, + AArch64isa::MemoryOrdering memOrd) { + MemOperand *memOpnd = CheckAndCreateExtendMemOpnd(ptype, addrExpr, offset, memOrd); + if (memOpnd != nullptr) { + return memOpnd; + } else if (aggParamReg != nullptr) { + return nullptr; + } + return &CreateNonExtendMemOpnd(ptype, parent, addrExpr, offset); +} + +Operand &AArch64CGFunc::GetOrCreateFuncNameOpnd(const MIRSymbol &symbol) const { + return *memPool->New(symbol); +} + +Operand &AArch64CGFunc::GetOrCreateRflag() { + if (rcc == nullptr) { + rcc = &CreateRflagOperand(); + } + return *rcc; +} + +const Operand *AArch64CGFunc::GetRflag() const { + return rcc; +} + +RegOperand &AArch64CGFunc::GetOrCreatevaryreg() { + if (vary == nullptr) { + regno_t vRegNO = NewVReg(kRegTyVary, k8ByteSize); + vary = &CreateVirtualRegisterOperand(vRegNO); + } + return *vary; +} + +/* the first operand in opndvec is return opnd */ +void AArch64CGFunc::SelectLibCall(const std::string &funcName, std::vector &opndVec, PrimType primType, + PrimType retPrimType, bool is2ndRet) { + std::vector pt; + pt.push_back(retPrimType); + for (size_t i = 0; i < opndVec.size(); ++i) { + pt.push_back(primType); + } + SelectLibCallNArg(funcName, opndVec, pt, retPrimType, is2ndRet); + return; +} + +void AArch64CGFunc::SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt, PrimType retPrimType, bool is2ndRet) { + std::string newName = funcName; + // Check whether we have a maple version of libcall and we want to use it instead. + if (!CGOptions::IsDuplicateAsmFileEmpty() && asmMap.find(funcName) != asmMap.end()) { + newName = asmMap.at(funcName); + } + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(newName); + st->SetStorageClass(kScExtern); + st->SetSKind(kStFunc); + /* setup the type of the callee function */ + std::vector vec; + std::vector vecAt; + for (size_t i = 1; i < opndVec.size(); ++i) { + (void)vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); + } + + MIRType *retType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(retPrimType)); + st->SetTyIdx(GetBecommon().BeGetOrCreateFunctionType(retType->GetTypeIndex(), vec, vecAt)->GetTypeIndex()); + + if (GetCG()->GenerateVerboseCG()) { + auto &comment = GetOpndBuilder()->CreateComment("lib call : " + newName); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildCommentInsn(comment)); + } + + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo ploc; + /* setup actual parameters */ + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + for (size_t i = 1; i < opndVec.size(); ++i) { + ASSERT(pt[i] != PTY_void, "primType check"); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]; + Operand *stOpnd = opndVec[i]; + if (stOpnd->GetKind() != Operand::kOpdRegister) { + stOpnd = &SelectCopy(*stOpnd, pt[i], pt[i]); + } + RegOperand *expRegOpnd = static_cast(stOpnd); + parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { /* load to the register */ + RegOperand &parmRegOpnd = GetOrCreatePhysicalRegisterOperand( + static_cast(ploc.reg0), expRegOpnd->GetSize(), GetRegTyFromPrimTy(pt[i])); + SelectCopy(parmRegOpnd, pt[i], *expRegOpnd, pt[i]); + srcOpnds->PushOpnd(parmRegOpnd); + } + ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + + MIRSymbol *sym = GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false); + Insn &callInsn = AppendCall(*sym, *srcOpnds); + MIRType *callRetType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(retPrimType)); + if (callRetType != nullptr) { + callInsn.SetRetSize(static_cast(callRetType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(callRetType->GetPrimType())); + } + GetFunction().SetHasCall(); + /* get return value */ + Operand *opnd0 = opndVec[0]; + CCLocInfo retMech; + parmLocator.InitReturnInfo(*(GlobalTables::GetTypeTable().GetTypeTable().at(retPrimType)), retMech); + if (retMech.GetRegCount() <= 0) { + CHECK_FATAL(false, "should return from register"); + } + if (!opnd0->IsRegister()) { + CHECK_FATAL(false, "nyi"); + } + RegOperand *regOpnd = static_cast(opnd0); + AArch64reg regNum = static_cast(is2ndRet ? retMech.GetReg1() : retMech.GetReg0()); + if (regOpnd->GetRegisterNumber() != regNum) { + RegOperand &retOpnd = GetOrCreatePhysicalRegisterOperand(regNum, regOpnd->GetSize(), + GetRegTyFromPrimTy(retPrimType)); + SelectCopy(*opnd0, retPrimType, retOpnd, retPrimType); + } +} + +RegOperand *AArch64CGFunc::GetBaseReg(const SymbolAlloc &symAlloc) { + MemSegmentKind sgKind = symAlloc.GetMemSegment()->GetMemSegmentKind(); + ASSERT(((sgKind == kMsArgsRegPassed) || (sgKind == kMsLocals) || (sgKind == kMsRefLocals) || + (sgKind == kMsArgsToStkPass) || (sgKind == kMsArgsStkPassed)), "NYI"); + + if (sgKind == kMsArgsStkPassed) { + return &GetOrCreatevaryreg(); + } + + if (fsp == nullptr) { + if (GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + fsp = &GetOrCreatePhysicalRegisterOperand(RSP, GetPointerSize() * kBitsPerByte, kRegTyInt); + } else { + fsp = &GetOrCreatePhysicalRegisterOperand(RFP, GetPointerSize() * kBitsPerByte, kRegTyInt); + } + } + return fsp; +} + +int32 AArch64CGFunc::GetBaseOffset(const SymbolAlloc &symbolAlloc) { + const AArch64SymbolAlloc *symAlloc = static_cast(&symbolAlloc); + /* Call Frame layout of AArch64 + * Refer to V2 in aarch64_memlayout.h. + * Do Not change this unless you know what you do + */ + const int32 sizeofFplr = static_cast(2 * kIntregBytelen); + MemSegmentKind sgKind = symAlloc->GetMemSegment()->GetMemSegmentKind(); + AArch64MemLayout *memLayout = static_cast(this->GetMemlayout()); + if (sgKind == kMsArgsStkPassed) { /* for callees */ + int32 offset = static_cast(symAlloc->GetOffset()); + return offset; + } else if (sgKind == kMsArgsRegPassed) { + int32 baseOffset; + if (GetCG()->IsLmbc()) { + baseOffset = static_cast(symAlloc->GetOffset() + memLayout->GetSizeOfRefLocals() + + memLayout->SizeOfArgsToStackPass()); /* SP relative */ + } else { + baseOffset = static_cast(memLayout->GetSizeOfLocals() + symAlloc->GetOffset() + + memLayout->GetSizeOfRefLocals()); + } + return baseOffset + sizeofFplr; + } else if (sgKind == kMsRefLocals) { + int32 baseOffset = static_cast(symAlloc->GetOffset()) + static_cast(memLayout->GetSizeOfLocals()); + return baseOffset + sizeofFplr; + } else if (sgKind == kMsLocals) { + if (GetCG()->IsLmbc()) { + CHECK_FATAL(false, "invalid lmbc's locals"); + } + int32 baseOffset = symAlloc->GetOffset(); + return baseOffset + sizeofFplr; + } else if (sgKind == kMsSpillReg) { + int32 baseOffset; + if (GetCG()->IsLmbc()) { + baseOffset = static_cast(symAlloc->GetOffset() + + memLayout->SizeOfArgsRegisterPassed() + memLayout->GetSizeOfRefLocals() + + memLayout->SizeOfArgsToStackPass()); + } else { + baseOffset = static_cast(symAlloc->GetOffset() + + memLayout->SizeOfArgsRegisterPassed() + memLayout->GetSizeOfLocals() + + memLayout->GetSizeOfRefLocals()); + } + return baseOffset + sizeofFplr; + } else if (sgKind == kMsArgsToStkPass) { /* this is for callers */ + return static_cast(symAlloc->GetOffset()); + } else { + CHECK_FATAL(false, "sgKind check"); + } + return 0; +} + +void AArch64CGFunc::AppendCall(const MIRSymbol &funcSymbol) { + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + AppendCall(funcSymbol, *srcOpnds); +} + +void AArch64CGFunc::DBGFixCallFrameLocationOffsets() { + unsigned idx = 0; + for (DBGExprLoc *el : GetDbgCallFrameLocations(true)) { + if (el && el->GetSimpLoc() && el->GetSimpLoc()->GetDwOp() == DW_OP_fbreg) { + SymbolAlloc *symloc = static_cast(el->GetSymLoc()); + int32 offset = GetBaseOffset(*symloc) - ((idx < AArch64Abi::kNumIntParmRegs) ? GetDbgCallFrameOffset() : 0); + el->SetFboffset(offset); + } + idx++; + } + for (DBGExprLoc *el : GetDbgCallFrameLocations(false)) { + if (el->GetSimpLoc()->GetDwOp() == DW_OP_fbreg) { + SymbolAlloc *symloc = static_cast(el->GetSymLoc()); + int32 offset = GetBaseOffset(*symloc) - GetDbgCallFrameOffset(); + el->SetFboffset(offset); + } + } +} + +void AArch64CGFunc::SelectAddAfterInsn(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType, + bool isDest, Insn &insn) { + uint32 dsize = GetPrimTypeBitSize(primType); + bool is64Bits = (dsize == k64BitSize); + ASSERT(opnd0.GetKind() == Operand::kOpdRegister, "Spill memory operand should based on register"); + ASSERT((opnd1.GetKind() == Operand::kOpdImmediate || opnd1.GetKind() == Operand::kOpdOffset), + "Spill memory operand should be with a immediate offset."); + + ImmOperand *immOpnd = static_cast(&opnd1); + + MOperator mOpCode = MOP_undef; + Insn *curInsn = &insn; + /* lower 24 bits has 1, higher bits are all 0 */ + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + /* lower 12 bits and higher 12 bits both has 1 */ + Operand *newOpnd0 = &opnd0; + if (!(immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || + immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + /* process higher 12 bits */ + ImmOperand &immOpnd2 = + CreateImmOperand(static_cast(static_cast(immOpnd->GetValue()) >> kMaxImmVal12Bits), + immOpnd->GetSize(), immOpnd->IsSignedValue()); + mOpCode = is64Bits ? MOP_xaddrri24 : MOP_waddrri24; + BitShiftOperand &shiftopnd = CreateBitShiftOperand(BitShiftOperand::kLSL, kShiftAmount12, k64BitSize); + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, immOpnd2, shiftopnd); + ASSERT(IsOperandImmValid(mOpCode, &immOpnd2, kInsnThirdOpnd), "immOpnd2 appears invalid"); + if (isDest) { + insn.GetBB()->InsertInsnAfter(insn, newInsn); + } else { + insn.GetBB()->InsertInsnBefore(insn, newInsn); + } + /* get lower 12 bits value */ + immOpnd->ModuloByPow2(kMaxImmVal12Bits); + newOpnd0 = &resOpnd; + curInsn = &newInsn; + } + /* process lower 12 bits value */ + mOpCode = is64Bits ? MOP_xaddrri12 : MOP_waddrri12; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, *newOpnd0, *immOpnd); + ASSERT(IsOperandImmValid(mOpCode, immOpnd, kInsnThirdOpnd), "immOpnd appears invalid"); + if (isDest) { + insn.GetBB()->InsertInsnAfter(*curInsn, newInsn); + } else { + insn.GetBB()->InsertInsnBefore(insn, newInsn); + } + } else { + /* load into register */ + RegOperand &movOpnd = GetOrCreatePhysicalRegisterOperand(R16, dsize, kRegTyInt); + mOpCode = is64Bits ? MOP_xmovri64 : MOP_wmovri32; + Insn &movInsn = GetInsnBuilder()->BuildInsn(mOpCode, movOpnd, *immOpnd); + mOpCode = is64Bits ? MOP_xaddrrr : MOP_waddrrr; + Insn &newInsn = GetInsnBuilder()->BuildInsn(mOpCode, resOpnd, opnd0, movOpnd); + if (isDest) { + (void)insn.GetBB()->InsertInsnAfter(insn, newInsn); + (void)insn.GetBB()->InsertInsnAfter(insn, movInsn); + } else { + (void)insn.GetBB()->InsertInsnBefore(insn, movInsn); + (void)insn.GetBB()->InsertInsnBefore(insn, newInsn); + } + } +} + +MemOperand *AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange( + MemOperand *memOpnd, regno_t vrNum, bool isDest, Insn &insn, AArch64reg regNum, bool &isOutOfRange) { + if (vrNum >= vRegTable.size()) { + CHECK_FATAL(false, "index out of range in AArch64CGFunc::AdjustMemOperandIfOffsetOutOfRange"); + } + uint32 dataSize = GetOrCreateVirtualRegisterOperand(vrNum).GetSize(); + if (IsImmediateOffsetOutOfRange(*memOpnd, dataSize)) { + if (CheckIfSplitOffsetWithAdd(*memOpnd, dataSize)) { + isOutOfRange = true; + } + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, dataSize, regNum, isDest, &insn); + } else { + isOutOfRange = false; + } + return memOpnd; +} + +void AArch64CGFunc::FreeSpillRegMem(regno_t vrNum) { + MemOperand *memOpnd = nullptr; + + auto p = spillRegMemOperands.find(vrNum); + if (p != spillRegMemOperands.end()) { + memOpnd = p->second; + } + + if ((memOpnd == nullptr) && IsVRegNOForPseudoRegister(vrNum)) { + auto pSecond = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (pSecond != pRegSpillMemOperands.end()) { + memOpnd = pSecond->second; + } + } + + if (memOpnd == nullptr) { + ASSERT(false, "free spillreg have no mem"); + return; + } + + uint32 size = memOpnd->GetSize(); + MapleUnorderedMap::iterator iter; + if ((iter = reuseSpillLocMem.find(size)) != reuseSpillLocMem.end()) { + iter->second->Add(*memOpnd); + } else { + reuseSpillLocMem[size] = memPool->New(*GetFuncScopeAllocator()); + reuseSpillLocMem[size]->Add(*memOpnd); + } +} + +MemOperand *AArch64CGFunc::GetOrCreatSpillMem(regno_t vrNum) { + /* NOTES: must used in RA, not used in other place. */ + if (IsVRegNOForPseudoRegister(vrNum)) { + auto p = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (p != pRegSpillMemOperands.end()) { + return p->second; + } + } + + auto p = spillRegMemOperands.find(vrNum); + if (p == spillRegMemOperands.end()) { + if (vrNum >= vRegTable.size()) { + CHECK_FATAL(false, "index out of range in AArch64CGFunc::FreeSpillRegMem"); + } + uint32 memBitSize = k64BitSize; + auto it = reuseSpillLocMem.find(memBitSize); + if (it != reuseSpillLocMem.end()) { + MemOperand *memOpnd = it->second->GetOne(); + if (memOpnd != nullptr) { + (void)spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } + } + + RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand(); + int64 offset = GetOrCreatSpillRegLocation(vrNum); + OfstOperand *offsetOpnd = &CreateOfstOpnd(static_cast(offset), k64BitSize); + MemOperand *memOpnd = CreateMemOperand(MemOperand::kAddrModeBOi, memBitSize, baseOpnd, + nullptr, offsetOpnd, nullptr); + (void)spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } else { + return p->second; + } +} + +MemOperand *AArch64CGFunc::GetPseudoRegisterSpillMemoryOperand(PregIdx i) { + MapleUnorderedMap::iterator p; + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + p = pRegSpillMemOperands.end(); + } else { + p = pRegSpillMemOperands.find(i); + } + if (p != pRegSpillMemOperands.end()) { + return p->second; + } + int64 offset = GetPseudoRegisterSpillLocation(i); + MIRPreg *preg = GetFunction().GetPregTab()->PregFromPregIdx(i); + uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + RegOperand &base = GetOrCreateFramePointerRegOperand(); + + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd(static_cast(offset), k32BitSize); + MemOperand &memOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, bitLen, &base, nullptr, &ofstOpnd, nullptr); + if (IsImmediateOffsetOutOfRange(memOpnd, bitLen)) { + MemOperand &newMemOpnd = SplitOffsetWithAddInstruction(memOpnd, bitLen); + (void)pRegSpillMemOperands.emplace(std::pair(i, &newMemOpnd)); + return &newMemOpnd; + } + (void)pRegSpillMemOperands.emplace(std::pair(i, &memOpnd)); + return &memOpnd; +} + +/* Get the number of return register of current function. */ +AArch64reg AArch64CGFunc::GetReturnRegisterNumber() { + AArch64CallConvImpl retLocator(GetBecommon()); + CCLocInfo retMech; + retLocator.InitReturnInfo(*(GetFunction().GetReturnType()), retMech); + if (retMech.GetRegCount() > 0) { + return static_cast(retMech.GetReg0()); + } + return kRinvalid; +} + +bool AArch64CGFunc::CanLazyBinding(const Insn &ldrInsn) const { + Operand &memOpnd = ldrInsn.GetOperand(1); + auto &aarchMemOpnd = static_cast(memOpnd); + if (aarchMemOpnd.GetAddrMode() != MemOperand::kAddrModeLo12Li) { + return false; + } + + const MIRSymbol *sym = aarchMemOpnd.GetSymbol(); + CHECK_FATAL(sym != nullptr, "sym can't be nullptr"); + if (sym->IsMuidFuncDefTab() || sym->IsMuidFuncUndefTab() || + sym->IsMuidDataDefTab() || sym->IsMuidDataUndefTab() || + (sym->IsReflectionClassInfo() && !sym->IsReflectionArrayClassInfo())) { + return true; + } + + return false; +} + +/* + * add reg, reg, __PTR_C_STR_... + * ldr reg1, [reg] + * => + * ldr reg1, [reg, #:lo12:__Ptr_C_STR_...] + */ +void AArch64CGFunc::ConvertAdrpl12LdrToLdr() { + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + if (nextInsn == nullptr) { + break; + } + if (!insn->IsMachineInstruction()) { + continue; + } + /* check first insn */ + MOperator thisMop = insn->GetMachineOpcode(); + if (thisMop != MOP_xadrpl12) { + continue; + } + /* check second insn */ + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (!(((nextMop >= MOP_wldrsb) && (nextMop <= MOP_dldp)) || ((nextMop >= MOP_wstrb) && (nextMop <= MOP_dstp)))) { + continue; + } + + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + CHECK_FATAL(memOpnd != nullptr, "memOpnd can't be nullptr"); + + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + continue; + } + + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + continue; + } + + auto ®Opnd = static_cast(insn->GetOperand(0)); + + /* Check if dest operand of insn is idential with base register of nextInsn. */ + RegOperand *baseReg = memOpnd->GetBaseRegister(); + CHECK_FATAL(baseReg != nullptr, "baseReg can't be nullptr"); + if (baseReg->GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + + StImmOperand &stImmOpnd = static_cast(insn->GetOperand(kInsnThirdOpnd)); + OfstOperand &ofstOpnd = GetOrCreateOfstOpnd( + static_cast(stImmOpnd.GetOffset() + memOpnd->GetOffsetImmediate()->GetOffsetValue()), k32BitSize); + RegOperand &newBaseOpnd = static_cast(insn->GetOperand(kInsnSecondOpnd)); + MemOperand &newMemOpnd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, memOpnd->GetSize(), + &newBaseOpnd, nullptr, &ofstOpnd, stImmOpnd.GetSymbol()); + nextInsn->SetOperand(1, newMemOpnd); + bb->RemoveInsn(*insn); + } + } +} + +/* + * adrp reg1, __muid_func_undef_tab.. + * ldr reg2, [reg1, #:lo12:__muid_func_undef_tab..] + * => + * intrinsic_adrp_ldr reg2, __muid_func_undef_tab... + */ +void AArch64CGFunc::ConvertAdrpLdrToIntrisic() { + FOR_ALL_BB(bb, this) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + if (nextInsn == nullptr) { + break; + } + if (!insn->IsMachineInstruction()) { + continue; + } + + MOperator firstMop = insn->GetMachineOpcode(); + MOperator secondMop = nextInsn->GetMachineOpcode(); + if (!((firstMop == MOP_xadrp) && ((secondMop == MOP_wldr) || (secondMop == MOP_xldr)))) { + continue; + } + + if (CanLazyBinding(*nextInsn)) { + bb->ReplaceInsn(*insn, GetInsnBuilder()->BuildInsn(MOP_adrp_ldr, nextInsn->GetOperand(0), insn->GetOperand(1))); + bb->RemoveInsn(*nextInsn); + } + } + } +} + +void AArch64CGFunc::ProcessLazyBinding() { + ConvertAdrpl12LdrToLdr(); + ConvertAdrpLdrToIntrisic(); +} + +/* + * Generate global long call + * adrp VRx, symbol + * ldr VRx, [VRx, #:lo12:symbol] + * blr VRx + * + * Input: + * insn : insert new instruction after the 'insn' + * func : the symbol of the function need to be called + * srcOpnds : list operand of the function need to be called + * isCleanCall: when generate clean call insn, set isCleanCall as true + * Return: the 'blr' instruction + */ +Insn &AArch64CGFunc::GenerateGlobalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(func.GetStIdx()); + symbol->SetStorageClass(kScGlobal); + RegOperand &tmpReg = CreateRegisterOperandOfType(PTY_u64); + StImmOperand &stOpnd = CreateStImmOperand(*symbol, 0, 0); + OfstOperand &offsetOpnd = CreateOfstOpnd(*symbol, 0); + Insn &adrpInsn = GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpReg, stOpnd); + GetCurBB()->AppendInsn(adrpInsn); + MemOperand &memOrd = GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, GetPointerSize() * kBitsPerByte, + static_cast(&tmpReg), + nullptr, &offsetOpnd, symbol); + Insn &ldrInsn = GetInsnBuilder()->BuildInsn(memOrd.GetSize() == k64BitSize ? MOP_xldr : MOP_wldr, tmpReg, memOrd); + GetCurBB()->AppendInsn(ldrInsn); + + Insn &callInsn = GetInsnBuilder()->BuildInsn(MOP_xblr, tmpReg, srcOpnds); + GetCurBB()->AppendInsn(callInsn); + GetCurBB()->SetHasCall(); + return callInsn; +} + +/* + * Generate local long call + * adrp VRx, symbol + * add VRx, VRx, #:lo12:symbol + * blr VRx + * + * Input: + * insn : insert new instruction after the 'insn' + * func : the symbol of the function need to be called + * srcOpnds : list operand of the function need to be called + * isCleanCall: when generate clean call insn, set isCleanCall as true + * Return: the 'blr' instruction + */ +Insn &AArch64CGFunc::GenerateLocalLongCallAfterInsn(const MIRSymbol &func, ListOperand &srcOpnds) { + RegOperand &tmpReg = CreateRegisterOperandOfType(PTY_u64); + StImmOperand &stOpnd = CreateStImmOperand(func, 0, 0); + Insn &adrpInsn = GetInsnBuilder()->BuildInsn(MOP_xadrp, tmpReg, stOpnd); + GetCurBB()->AppendInsn(adrpInsn); + Insn &addInsn = GetInsnBuilder()->BuildInsn(MOP_xadrpl12, tmpReg, tmpReg, stOpnd); + GetCurBB()->AppendInsn(addInsn); + Insn *callInsn = &GetInsnBuilder()->BuildInsn(MOP_xblr, tmpReg, srcOpnds); + GetCurBB()->AppendInsn(*callInsn); + GetCurBB()->SetHasCall(); + return *callInsn; +} + +Insn &AArch64CGFunc::AppendCall(const MIRSymbol &sym, ListOperand &srcOpnds) { + Insn *callInsn = nullptr; + if (CGOptions::IsLongCalls()) { + MIRFunction *mirFunc = sym.GetFunction(); + if (IsDuplicateAsmList(sym) || (mirFunc && mirFunc->GetAttr(FUNCATTR_local))) { + callInsn = &GenerateLocalLongCallAfterInsn(sym, srcOpnds); + } else { + callInsn = &GenerateGlobalLongCallAfterInsn(sym, srcOpnds); + } + } else { + Operand &targetOpnd = GetOrCreateFuncNameOpnd(sym); + callInsn = &GetInsnBuilder()->BuildInsn(MOP_xbl, targetOpnd, srcOpnds); + GetCurBB()->AppendInsn(*callInsn); + GetCurBB()->SetHasCall(); + } + return *callInsn; +} + +bool AArch64CGFunc::IsDuplicateAsmList(const MIRSymbol &sym) const { + if (CGOptions::IsDuplicateAsmFileEmpty()) { + return false; + } + + const std::string &name = sym.GetName(); + if ((name == "strlen") || + (name == "strncmp") || + (name == "memcpy") || + (name == "memmove") || + (name == "strcmp") || + (name == "memcmp") || + (name == "memcmpMpl")) { + return true; + } + return false; +} + +void AArch64CGFunc::SelectMPLProfCounterInc(const IntrinsiccallNode &intrnNode) { + if (Options::profileGen) { + ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); + BaseNode *arg1 = intrnNode.Opnd(0); + ASSERT(arg1 != nullptr, "nullptr check"); + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + vReg1.SetRegNotBBLocal(); + static const MIRSymbol *bbProfileTab = nullptr; + + // Ref: MeProfGen::InstrumentFunc on ctrTbl namiLogicalShiftLeftOperandng + std::string ctrTblName = namemangler::kprefixProfCtrTbl + + GetMirModule().GetFileName() + "_" + GetName(); + std::replace(ctrTblName.begin(), ctrTblName.end(), '.', '_'); + std::replace(ctrTblName.begin(), ctrTblName.end(), '-', '_'); + std::replace(ctrTblName.begin(), ctrTblName.end(), '/', '_'); + + if (!bbProfileTab || bbProfileTab->GetName() != ctrTblName) { + bbProfileTab = GetMirModule().GetMIRBuilder()->GetGlobalDecl(ctrTblName); + CHECK_FATAL(bbProfileTab != nullptr, "expect counter table"); + } + + ConstvalNode *constvalNode = static_cast(arg1); + MIRConst *mirConst = constvalNode->GetConstVal(); + ASSERT(mirConst != nullptr, "nullptr check"); + CHECK_FATAL(mirConst->GetKind() == kConstInt, "expect MIRIntConst type"); + MIRIntConst *mirIntConst = safe_cast(mirConst); + int64 offset = GetPrimTypeSize(PTY_u64) * mirIntConst->GetExtValue(); + + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlInfo) << "At counter table offset: " << offset << std::endl; + } + MemOperand *memOpnd = &GetOrCreateMemOpnd(*bbProfileTab, offset, k64BitSize); + if (IsImmediateOffsetOutOfRange(*memOpnd, k64BitSize)) { + memOpnd = &SplitOffsetWithAddInstruction(*memOpnd, k64BitSize); + } + Operand *reg = &SelectCopy(*memOpnd, PTY_u64, PTY_u64); + ImmOperand &one = CreateImmOperand(1, k64BitSize, false); + SelectAdd(*reg, *reg, one, PTY_u64); + SelectCopy(*memOpnd, PTY_u64, *reg, PTY_u64); + return; + } + + ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); + BaseNode *arg1 = intrnNode.Opnd(0); + ASSERT(arg1 != nullptr, "nullptr check"); + regno_t vRegNO1 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg1 = CreateVirtualRegisterOperand(vRegNO1); + vReg1.SetRegNotBBLocal(); + static const MIRSymbol *bbProfileTab = nullptr; + if (!bbProfileTab) { + std::string bbProfileName = namemangler::kBBProfileTabPrefixStr + GetMirModule().GetFileNameAsPostfix(); + bbProfileTab = GetMirModule().GetMIRBuilder()->GetGlobalDecl(bbProfileName); + CHECK_FATAL(bbProfileTab != nullptr, "expect bb profile tab"); + } + ConstvalNode *constvalNode = static_cast(arg1); + MIRConst *mirConst = constvalNode->GetConstVal(); + ASSERT(mirConst != nullptr, "nullptr check"); + CHECK_FATAL(mirConst->GetKind() == kConstInt, "expect MIRIntConst type"); + MIRIntConst *mirIntConst = safe_cast(mirConst); + int64 idx = GetPrimTypeSize(PTY_u32) * mirIntConst->GetExtValue(); + if (!CGOptions::IsQuiet()) { + maple::LogInfo::MapleLogger(kLlErr) << "Id index " << idx << std::endl; + } + StImmOperand &stOpnd = CreateStImmOperand(*bbProfileTab, idx, 0); + Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_counter, vReg1, stOpnd); + newInsn.SetDoNotRemove(true); + GetCurBB()->AppendInsn(newInsn); +} + +void AArch64CGFunc::SelectMPLClinitCheck(const IntrinsiccallNode &intrnNode) { + ASSERT(intrnNode.NumOpnds() == 1, "must be 1 operand"); + BaseNode *arg = intrnNode.Opnd(0); + Operand *stOpnd = nullptr; + bool bClinitSeperate = false; + ASSERT(CGOptions::IsPIC(), "must be doPIC"); + if (arg->GetOpCode() == OP_addrof) { + AddrofNode *addrof = static_cast(arg); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrof->GetStIdx()); + ASSERT(symbol->GetName().find(CLASSINFO_PREFIX_STR) == 0, "must be a symbol with __classinfo__"); + + if (!symbol->IsMuidDataUndefTab()) { + std::string ptrName = namemangler::kPtrPrefixStr + symbol->GetName(); + MIRType *ptrType = GlobalTables::GetTypeTable().GetPtr(); + symbol = GetMirModule().GetMIRBuilder()->GetOrCreateGlobalDecl(ptrName, *ptrType); + bClinitSeperate = true; + symbol->SetStorageClass(kScFstatic); + } + stOpnd = &CreateStImmOperand(*symbol, 0, 0); + } else { + arg = arg->Opnd(0); + BaseNode *arg0 = arg->Opnd(0); + BaseNode *arg1 = arg->Opnd(1); + ASSERT(arg0 != nullptr, "nullptr check"); + ASSERT(arg1 != nullptr, "nullptr check"); + ASSERT(arg0->GetOpCode() == OP_addrof, "expect the operand to be addrof"); + AddrofNode *addrof = static_cast(arg0); + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(addrof->GetStIdx()); + ASSERT(addrof->GetFieldID() == 0, "For debug SelectMPLClinitCheck."); + ConstvalNode *constvalNode = static_cast(arg1); + MIRConst *mirConst = constvalNode->GetConstVal(); + ASSERT(mirConst != nullptr, "nullptr check"); + CHECK_FATAL(mirConst->GetKind() == kConstInt, "expect MIRIntConst type"); + MIRIntConst *mirIntConst = safe_cast(mirConst); + stOpnd = &CreateStImmOperand(*symbol, mirIntConst->GetExtValue(), 0); + } + + regno_t vRegNO2 = NewVReg(GetRegTyFromPrimTy(PTY_a64), GetPrimTypeSize(PTY_a64)); + RegOperand &vReg2 = CreateVirtualRegisterOperand(vRegNO2); + vReg2.SetRegNotBBLocal(); + if (bClinitSeperate) { + /* Seperate MOP_clinit to MOP_adrp_ldr + MOP_clinit_tail. */ + Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_adrp_ldr, vReg2, *stOpnd); + GetCurBB()->AppendInsn(newInsn); + newInsn.SetDoNotRemove(true); + Insn &insn = GetInsnBuilder()->BuildInsn(MOP_clinit_tail, vReg2); + insn.SetDoNotRemove(true); + GetCurBB()->AppendInsn(insn); + } else { + Insn &newInsn = GetInsnBuilder()->BuildInsn(MOP_clinit, vReg2, *stOpnd); + GetCurBB()->AppendInsn(newInsn); + } +} +void AArch64CGFunc::GenCVaStartIntrin(RegOperand &opnd, uint32 stkSize) { + /* FPLR only pushed in regalloc() after intrin function */ + Operand &stkOpnd = GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + + /* __stack */ + ImmOperand *offsOpnd; + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offsOpnd = &CreateImmOperand(0, k64BitSize, true, kUnAdjustVary); /* isvary reset StackFrameSize */ + } else { + offsOpnd = &CreateImmOperand(0, k64BitSize, true); + } + ImmOperand *offsOpnd2 = &CreateImmOperand(stkSize, k64BitSize, false); + RegOperand &vReg = CreateVirtualRegisterOperand(NewVReg(kRegTyInt, GetPrimTypeSize(GetLoweredPtrType()))); + if (stkSize > 0) { + SelectAdd(vReg, *offsOpnd, *offsOpnd2, GetLoweredPtrType()); + SelectAdd(vReg, stkOpnd, vReg, GetLoweredPtrType()); + } else { + SelectAdd(vReg, stkOpnd, *offsOpnd, GetLoweredPtrType()); /* stack pointer */ + } + OfstOperand *offOpnd = &GetOrCreateOfstOpnd(0, k64BitSize); /* va_list ptr */ + /* mem operand in va_list struct (lhs) */ + MemOperand *strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, + offOpnd, static_cast(nullptr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); + + /* __gr_top ; it's the same as __stack before the 1st va_arg */ + if (CGOptions::IsArm64ilp32()) { + offOpnd = &GetOrCreateOfstOpnd(GetPointerSize(), k64BitSize); + } else { + offOpnd = &GetOrCreateOfstOpnd(k8BitSize, k64BitSize); + } + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, + offOpnd, static_cast(nullptr)); + SelectAdd(vReg, stkOpnd, *offsOpnd, GetLoweredPtrType()); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); + + /* __vr_top */ + int32 grAreaSize = static_cast(static_cast(GetMemlayout())->GetSizeOfGRSaveArea()); + if (CGOptions::IsArm64ilp32()) { + offsOpnd2 = &CreateImmOperand(static_cast(RoundUp(static_cast(grAreaSize), k8ByteSize * 2)), + k64BitSize, false); + } else { + offsOpnd2 = &CreateImmOperand(static_cast(RoundUp(static_cast(grAreaSize), GetPointerSize() * 2)), + k64BitSize, false); + } + SelectSub(vReg, *offsOpnd, *offsOpnd2, GetLoweredPtrType()); /* if 1st opnd is register => sub */ + SelectAdd(vReg, stkOpnd, vReg, GetLoweredPtrType()); + offOpnd = &GetOrCreateOfstOpnd(GetPointerSize() * 2, k64BitSize); + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k64BitSize, &opnd, nullptr, + offOpnd, static_cast(nullptr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn( + vReg.GetSize() == k64BitSize ? MOP_xstr : MOP_wstr, vReg, *strOpnd)); + + /* __gr_offs */ + int32 offs = 0 - grAreaSize; + offsOpnd = &CreateImmOperand(offs, k32BitSize, false); + RegOperand *tmpReg = &CreateRegisterOperandOfType(PTY_i32); /* offs value to be assigned (rhs) */ + SelectCopyImm(*tmpReg, *offsOpnd, PTY_i32); + offOpnd = &GetOrCreateOfstOpnd(GetPointerSize() * 3, k32BitSize); + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k32BitSize, &opnd, nullptr, + offOpnd, static_cast(nullptr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wstr, *tmpReg, *strOpnd)); + + /* __vr_offs */ + offs = static_cast(UINT32_MAX - (static_cast(GetMemlayout())->GetSizeOfVRSaveArea() - 1UL)); + offsOpnd = &CreateImmOperand(offs, k32BitSize, false); + tmpReg = &CreateRegisterOperandOfType(PTY_i32); + SelectCopyImm(*tmpReg, *offsOpnd, PTY_i32); + offOpnd = &GetOrCreateOfstOpnd((GetPointerSize() * 3 + sizeof(int32)), k32BitSize); + strOpnd = &GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, k32BitSize, &opnd, nullptr, + offOpnd, static_cast(nullptr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wstr, *tmpReg, *strOpnd)); +} + +void AArch64CGFunc::SelectCVaStart(const IntrinsiccallNode &intrnNode) { + ASSERT(intrnNode.NumOpnds() == 2, "must be 2 operands"); + /* 2 operands, but only 1 needed. Don't need to emit code for second operand + * + * va_list is a passed struct with an address, load its address + */ + isIntrnCallForC = true; + BaseNode *argExpr = intrnNode.Opnd(0); + Operand *opnd = HandleExpr(intrnNode, *argExpr); + RegOperand &opnd0 = LoadIntoRegister(*opnd, GetLoweredPtrType()); /* first argument of intrinsic */ + + /* Find beginning of unnamed arg on stack. + * Ex. void foo(int i1, int i2, ... int i8, struct S r, struct S s, ...) + * where struct S has size 32, address of r and s are on stack but they are named. + */ + AArch64CallConvImpl parmLocator(GetBecommon()); + CCLocInfo pLoc; + uint32 stkSize = 0; + for (uint32 i = 0; i < GetFunction().GetFormalCount(); i++) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(GetFunction().GetNthParamTyIdx(i)); + parmLocator.LocateNextParm(*ty, pLoc); + if (pLoc.reg0 == kRinvalid) { /* on stack */ + stkSize = static_cast(pLoc.memOffset + pLoc.memSize); + } + } + if (CGOptions::IsArm64ilp32()) { + stkSize = static_cast(RoundUp(stkSize, k8ByteSize)); + } else { + stkSize = static_cast(RoundUp(stkSize, GetPointerSize())); + } + + GenCVaStartIntrin(opnd0, stkSize); + + return; +} + +/* + * intrinsiccall C___Atomic_store_N(ptr, val, memorder)) + * ====> *ptr = val + * let ptr -> x0 + * let val -> x1 + * implement to asm: str/stlr x1, [x0] + * a store-release would replace str if memorder is not 0 + */ +void AArch64CGFunc::SelectCAtomicStoreN(const IntrinsiccallNode &intrinsiccallNode) { + auto primType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(intrinsiccallNode.GetTyIdx())->GetPrimType(); + auto *addr = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(0)); + auto *value = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(1)); + auto *memOrderOpnd = intrinsiccallNode.Opnd(kInsnThirdOpnd); + std::memory_order memOrder = std::memory_order_seq_cst; + if (memOrderOpnd->IsConstval()) { + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + memOrder = static_cast(memOrderConst->GetExtValue()); + } + SelectAtomicStore(*value, *addr, primType, PickMemOrder(memOrder, false)); +} + +/* + * intrinsiccall C___atomic_store(ptr, val, memorder) + * ====> *ptr = *val + * let ptr -> x0 + * let val -> x1 + * implement to asm: + * ldr/ldar xn, [x1] + * str/stlr xn, [x0] + * a load-acquire would replace ldr if acquire needed + * a store-relase would replace str if release needed + */ +void AArch64CGFunc::SelectCAtomicStore(const IntrinsiccallNode &intrinsiccallNode) { + auto primType = GlobalTables::GetTypeTable(). + GetTypeFromTyIdx(intrinsiccallNode.GetTyIdx())->GetPrimType(); + auto *addrOpnd = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(kInsnFirstOpnd)); + auto *valueOpnd = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(kInsnSecondOpnd)); + auto *memOrderOpnd = intrinsiccallNode.Opnd(kInsnThirdOpnd); + std::memory_order memOrder = std::memory_order_seq_cst; + if (memOrderOpnd->IsConstval()) { + auto *memOrderConst = static_cast( + static_cast(memOrderOpnd)->GetConstVal()); + memOrder = static_cast(memOrderConst->GetExtValue()); + } + auto *value = SelectAtomicLoad(*valueOpnd, primType, PickMemOrder(memOrder, true)); + SelectAtomicStore(*value, *addrOpnd, primType, PickMemOrder(memOrder, false)); +} + +void AArch64CGFunc::SelectAtomicStore( + Operand &srcOpnd, Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder) { + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(addrOpnd, PTY_a64), 0, k64BitSize); + auto mOp = PickStInsn(GetPrimTypeBitSize(primType), primType, memOrder); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, LoadIntoRegister(srcOpnd, primType), memOpnd)); +} + +void AArch64CGFunc::SelectAddrofThreadLocal(Operand &result, StImmOperand &stImm) { + if (CGOptions::IsPIC()) { + SelectCTlsGlobalDesc(result, stImm); + } else { + SelectCTlsLocalDesc(result, stImm); + } + if (stImm.GetOffset() > 0) { + auto &immOpnd = CreateImmOperand(stImm.GetOffset(), result.GetSize(), false); + SelectAdd(result, result, immOpnd, PTY_u64); + } +} + +void AArch64CGFunc::SelectCTlsLocalDesc(Operand &result, StImmOperand &stImm) { + auto tpidr = &CreateCommentOperand("tpidr_el0"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_mrs, result, *tpidr)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tls_desc_rel, result, result, stImm)); +} + +void AArch64CGFunc::SelectCTlsGlobalDesc(Operand &result, StImmOperand &stImm) { + /* according to AArch64 Machine Directives */ + auto &r0opnd = GetOrCreatePhysicalRegisterOperand (R0, k64BitSize, GetRegTyFromPrimTy(PTY_u64)); + RegOperand *tlsAddr = &CreateRegisterOperandOfType(PTY_u64); + RegOperand *specialFunc = &CreateRegisterOperandOfType(PTY_u64); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_tls_desc_call, r0opnd, *tlsAddr, stImm)); + // mrs xn, tpidr_el0 + // add x0, x0, xn + auto tpidr = &CreateCommentOperand("tpidr_el0"); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_mrs, *specialFunc, *tpidr)); + SelectAdd(result, r0opnd, *specialFunc, PTY_u64); +} + +void AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsicCallNode) { + MIRIntrinsicID intrinsic = intrinsicCallNode.GetIntrinsic(); + + if (GetCG()->GenerateVerboseCG()) { + auto &comment = GetOpndBuilder()->CreateComment(GetIntrinsicName(intrinsic)); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildCommentInsn(comment)); + } + + /* + * At this moment, we eagerly evaluates all argument expressions. In theory, + * there could be intrinsics that extract meta-information of variables, such as + * their locations, rather than computing their values. Applications + * include building stack maps that help runtime libraries to find the values + * of local variables (See @stackmap in LLVM), in which case knowing their + * locations will suffice. + */ + if (intrinsic == INTRN_MPL_CLINIT_CHECK) { /* special case */ + SelectMPLClinitCheck(intrinsicCallNode); + return; + } + if (intrinsic == INTRN_MPL_PROF_COUNTER_INC) { /* special case */ + SelectMPLProfCounterInc(intrinsicCallNode); + return; + } + if ((intrinsic == INTRN_MPL_CLEANUP_LOCALREFVARS) || (intrinsic == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP) || + (intrinsic == INTRN_MPL_CLEANUP_NORETESCOBJS)) { + return; + } + switch (intrinsic) { + case INTRN_C_va_start: + SelectCVaStart(intrinsicCallNode); + return; + case INTRN_C___sync_lock_release_1: + SelectCSyncLockRelease(intrinsicCallNode, PTY_u8); + return; + case INTRN_C___sync_lock_release_2: + SelectCSyncLockRelease(intrinsicCallNode, PTY_u16); + return; + case INTRN_C___sync_lock_release_4: + SelectCSyncLockRelease(intrinsicCallNode, PTY_u32); + return; + case INTRN_C___sync_lock_release_8: + SelectCSyncLockRelease(intrinsicCallNode, PTY_u64); + return; + case INTRN_C___atomic_store_n: + SelectCAtomicStoreN(intrinsicCallNode); + return; + case INTRN_C___atomic_store: + SelectCAtomicStore(intrinsicCallNode); + return; + case INTRN_C___atomic_load: + SelectCAtomicLoad(intrinsicCallNode); + return; + case INTRN_vector_zip_v8u8: case INTRN_vector_zip_v8i8: + case INTRN_vector_zip_v4u16: case INTRN_vector_zip_v4i16: + case INTRN_vector_zip_v2u32: case INTRN_vector_zip_v2i32: + SelectVectorZip(intrinsicCallNode.Opnd(0)->GetPrimType(), + HandleExpr(intrinsicCallNode, *intrinsicCallNode.Opnd(0)), + HandleExpr(intrinsicCallNode, *intrinsicCallNode.Opnd(1))); + return; + case INTRN_C_stack_save: + SelectStackSave(); + return; + case INTRN_C_stack_restore: + SelectStackRestore(intrinsicCallNode); + return; + default: + break; + } + std::vector operands; /* Temporary. Deallocated on return. */ + ListOperand *srcOpnds = CreateListOpnd(*GetFuncScopeAllocator()); + for (size_t i = 0; i < intrinsicCallNode.NumOpnds(); i++) { + BaseNode *argExpr = intrinsicCallNode.Opnd(i); + Operand *opnd = HandleExpr(intrinsicCallNode, *argExpr); + operands.emplace_back(opnd); + if (!opnd->IsRegister()) { + opnd = &LoadIntoRegister(*opnd, argExpr->GetPrimType()); + } + RegOperand *expRegOpnd = static_cast(opnd); + srcOpnds->PushOpnd(*expRegOpnd); + } + CallReturnVector *retVals = &intrinsicCallNode.GetReturnVec(); + + switch (intrinsic) { + case INTRN_MPL_ATOMIC_EXCHANGE_PTR: { + BB *origFtBB = GetCurBB()->GetNext(); + Operand *loc = operands[kInsnFirstOpnd]; + Operand *newVal = operands[kInsnSecondOpnd]; + Operand *memOrd = operands[kInsnThirdOpnd]; + + MemOrd ord = OperandToMemOrd(*memOrd); + bool isAcquire = MemOrdIsAcquire(ord); + bool isRelease = MemOrdIsRelease(ord); + + const PrimType kValPrimType = PTY_a64; + + RegOperand &locReg = LoadIntoRegister(*loc, PTY_a64); + /* Because there is no live analysis when -O1 */ + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + locReg.SetRegNotBBLocal(); + } + MemOperand &locMem = GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, + k64BitSize, &locReg, nullptr, &GetOrCreateOfstOpnd(0, k32BitSize), nullptr); + RegOperand &newValReg = LoadIntoRegister(*newVal, PTY_a64); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + newValReg.SetRegNotBBLocal(); + } + GetCurBB()->SetKind(BB::kBBFallthru); + + LabelIdx retryLabIdx = CreateLabeledBB(intrinsicCallNode); + + RegOperand *oldVal = SelectLoadExcl(kValPrimType, locMem, isAcquire); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + oldVal->SetRegNotBBLocal(); + } + RegOperand *succ = SelectStoreExcl(kValPrimType, locMem, newValReg, isRelease); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + succ->SetRegNotBBLocal(); + } + + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *succ, GetOrCreateLabelOperand(retryLabIdx))); + GetCurBB()->SetKind(BB::kBBIntrinsic); + GetCurBB()->SetNext(origFtBB); + + SaveReturnValueInLocal(*retVals, 0, kValPrimType, *oldVal, intrinsicCallNode); + break; + } + case INTRN_GET_AND_ADDI: { + IntrinsifyGetAndAddInt(*srcOpnds, PTY_i32); + break; + } + case INTRN_GET_AND_ADDL: { + IntrinsifyGetAndAddInt(*srcOpnds, PTY_i64); + break; + } + case INTRN_GET_AND_SETI: { + IntrinsifyGetAndSetInt(*srcOpnds, PTY_i32); + break; + } + case INTRN_GET_AND_SETL: { + IntrinsifyGetAndSetInt(*srcOpnds, PTY_i64); + break; + } + case INTRN_COMP_AND_SWAPI: { + IntrinsifyCompareAndSwapInt(*srcOpnds, PTY_i32); + break; + } + case INTRN_COMP_AND_SWAPL: { + IntrinsifyCompareAndSwapInt(*srcOpnds, PTY_i64); + break; + } + case INTRN_C___atomic_exchange_n: { + Operand *oldVal = SelectCAtomicExchangeN(intrinsicCallNode); + auto primType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(intrinsicCallNode.GetTyIdx())->GetPrimType(); + uint32 regSize = GetPrimTypeBitSize(primType); + SelectCopy(GetOrCreatePhysicalRegisterOperand(R0, regSize, kRegTyInt), primType, *oldVal, primType); + break; + } + case INTRN_C___atomic_exchange : { + SelectCAtomicExchange(intrinsicCallNode); + break; + } + case INTRN_C___sync_synchronize: { + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish)); + break; + } + default: { + CHECK_FATAL(false, "Intrinsic %d: %s not implemented by the AArch64 CG.", intrinsic, GetIntrinsicName(intrinsic)); + break; + } + } +} + +Operand *AArch64CGFunc::SelectCclz(IntrinsicopNode &intrnNode) { + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + MOperator mop; + + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + if (opnd->IsMemoryAccessOperand()) { + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } else if (opnd->IsImmediate()) { + SelectCopyImm(ldDest, *static_cast(opnd), ptype); + opnd = &ldDest; + } + + if (GetPrimTypeSize(ptype) == k4ByteSize) { + mop = MOP_wclz; + } else { + mop = MOP_xclz; + } + RegOperand &dst = CreateRegisterOperandOfType(ptype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mop, dst, *opnd)); + return &dst; +} + +Operand *AArch64CGFunc::SelectCctz(IntrinsicopNode &intrnNode) { + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + if (opnd->IsMemoryAccessOperand()) { + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } else if (opnd->IsImmediate()) { + SelectCopyImm(ldDest, *static_cast(opnd), ptype); + opnd = &ldDest; + } + + MOperator clzmop; + MOperator rbitmop; + if (GetPrimTypeSize(ptype) == k4ByteSize) { + clzmop = MOP_wclz; + rbitmop = MOP_wrbit; + } else { + clzmop = MOP_xclz; + rbitmop = MOP_xrbit; + } + RegOperand &dst1 = CreateRegisterOperandOfType(ptype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(rbitmop, dst1, *opnd)); + RegOperand &dst2 = CreateRegisterOperandOfType(ptype); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(clzmop, dst2, dst1)); + return &dst2; +} + +Operand *AArch64CGFunc::SelectCpopcount(IntrinsicopNode &intrnNode) { + CHECK_FATAL(false, "%s NIY", intrnNode.GetIntrinDesc().name); + return nullptr; +} + +Operand *AArch64CGFunc::SelectCparity(IntrinsicopNode &intrnNode) { + CHECK_FATAL(false, "%s NIY", intrnNode.GetIntrinDesc().name); + return nullptr; +} + +Operand *AArch64CGFunc::SelectCclrsb(IntrinsicopNode &intrnNode) { + BaseNode *argexpr = intrnNode.Opnd(0); + PrimType ptype = argexpr->GetPrimType(); + Operand *opnd = HandleExpr(intrnNode, *argexpr); + + RegOperand &ldDest = CreateRegisterOperandOfType(ptype); + if (opnd->IsMemoryAccessOperand()) { + Insn &insn = GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype), ptype), ldDest, *opnd); + GetCurBB()->AppendInsn(insn); + opnd = &ldDest; + } else if (opnd->IsImmediate()) { + SelectCopyImm(ldDest, *static_cast(opnd), ptype); + opnd = &ldDest; + } + + bool is32Bit = (GetPrimTypeSize(ptype) == k4ByteSize); + RegOperand &res = CreateRegisterOperandOfType(ptype); + SelectMvn(res, *opnd, ptype); + SelectAArch64Cmp(*opnd, GetZeroOpnd(is32Bit ? k32BitSize : k64BitSize), true, is32Bit ? k32BitSize : k64BitSize); + SelectAArch64Select(*opnd, res, *opnd, GetCondOperand(CC_LT), true, is32Bit ? k32BitSize : k64BitSize); + MOperator clzmop = (is32Bit ? MOP_wclz : MOP_xclz); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(clzmop, *opnd, *opnd)); + SelectSub(*opnd, *opnd, CreateImmOperand(1, is32Bit ? k32BitSize : k64BitSize, true), ptype); + return opnd; +} + +Operand *AArch64CGFunc::SelectCisaligned(IntrinsicopNode &intrnNode) { + BaseNode *argexpr0 = intrnNode.Opnd(0); + PrimType ptype0 = argexpr0->GetPrimType(); + Operand *opnd0 = HandleExpr(intrnNode, *argexpr0); + + RegOperand &ldDest0 = CreateRegisterOperandOfType(ptype0); + if (opnd0->IsMemoryAccessOperand()) { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype0), ptype0), ldDest0, *opnd0)); + opnd0 = &ldDest0; + } else if (opnd0->IsImmediate()) { + SelectCopyImm(ldDest0, *static_cast(opnd0), ptype0); + opnd0 = &ldDest0; + } + + BaseNode *argexpr1 = intrnNode.Opnd(1); + PrimType ptype1 = argexpr1->GetPrimType(); + Operand *opnd1 = HandleExpr(intrnNode, *argexpr1); + + RegOperand &ldDest1 = CreateRegisterOperandOfType(ptype1); + if (opnd1->IsMemoryAccessOperand()) { + GetCurBB()->AppendInsn( + GetInsnBuilder()->BuildInsn(PickLdInsn(GetPrimTypeBitSize(ptype1), ptype1), ldDest1, *opnd1)); + opnd1 = &ldDest1; + } else if (opnd1->IsImmediate()) { + SelectCopyImm(ldDest1, *static_cast(opnd1), ptype1); + opnd1 = &ldDest1; + } + // mov w4, #1 + RegOperand ®0 = CreateRegisterOperandOfType(PTY_i32); + SelectCopyImm(reg0, CreateImmOperand(1, k32BitSize, true), PTY_i32); + // sxtw x4, w4 + MOperator mOp = MOP_xsxtw64; + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, reg0, reg0)); + // sub x3, x3, x4 + SelectSub(*opnd1, *opnd1, reg0, ptype1); + // and x2, x2, x3 + SelectBand(*opnd0, *opnd0, *opnd1, ptype1); + // mov w3, #0 + // sxtw x3, w3 + // cmp x2, x3 + SelectAArch64Cmp(*opnd0, GetZeroOpnd(k64BitSize), true, k64BitSize); + // cset w2, EQ + SelectAArch64CSet(*opnd0, GetCondOperand(CC_EQ), false); + return opnd0; +} + +void AArch64CGFunc::SelectArithmeticAndLogical(Operand &resOpnd, Operand &opnd0, Operand &opnd1, + PrimType primType, Opcode op) { + switch (op) { + case OP_add: + SelectAdd(resOpnd, opnd0, opnd1, primType); + break; + case OP_sub: + SelectSub(resOpnd, opnd0, opnd1, primType); + break; + case OP_band: + SelectBand(resOpnd, opnd0, opnd1, primType); + break; + case OP_bior: + SelectBior(resOpnd, opnd0, opnd1, primType); + break; + case OP_bxor: + SelectBxor(resOpnd, opnd0, opnd1, primType); + break; + default: + CHECK_FATAL(false, "unconcerned opcode for arithmetical and logical insns"); + break; + } +} + +Operand *AArch64CGFunc::SelectAArch64CAtomicFetch(const IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) { + auto primType = intrinopNode.GetPrimType(); + /* Create BB which includes atomic built_in function */ + LabelIdx atomicBBLabIdx = CreateLabel(); + BB *atomicBB = CreateNewBB(); + atomicBB->SetKind(BB::kBBIf); + atomicBB->SetAtomicBuiltIn(); + atomicBB->AddLabel(atomicBBLabIdx); + SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); + GetCurBB()->AppendBB(*atomicBB); + /* keep variables inside same BB */ + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + /* handle built_in args */ + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *valueOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + addrOpnd = &LoadIntoRegister(*addrOpnd, intrinopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); + valueOpnd = &LoadIntoRegister(*valueOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); + if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + /* load from pointed address */ + auto primTypeP2Size = GetPrimTypeP2Size(primType); + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd)); + /* update loaded value */ + auto *regOperated = &CreateRegisterOperandOfType(primType); + SelectArithmeticAndLogical(*regOperated, *regLoaded, *valueOpnd, primType, op); + /* store to pointed address */ + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, true); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, *regOperated, memOpnd)); + /* check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + + BB *nextBB = CreateNewBB(); + GetCurBB()->AppendBB(*nextBB); + SetCurBB(*nextBB); + return fetchBefore ? regLoaded : regOperated; +} + +Operand *AArch64CGFunc::SelectAArch64CSyncFetch(const IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) { + auto *result = SelectAArch64CAtomicFetch(intrinopNode, op, fetchBefore); + /* Data Memory Barrier */ + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish)); + return result; +} + +Operand *AArch64CGFunc::SelectCSyncCmpSwap(const IntrinsicopNode &intrinopNode, bool retBool) { + PrimType primType = intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType(); + ASSERT(primType == intrinopNode.GetNopndAt(kInsnThirdOpnd)->GetPrimType(), "gcc built_in rule"); + LabelIdx atomicBBLabIdx = CreateLabel(); + BB *atomicBB = CreateNewBB(); + atomicBB->SetKind(BB::kBBIf); + atomicBB->SetAtomicBuiltIn(); + atomicBB->AddLabel(atomicBBLabIdx); + SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); + GetCurBB()->AppendBB(*atomicBB); + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + /* handle built_in args */ + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *oldVal = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + Operand *newVal = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnThirdOpnd)); + if (GetCG()->GetOptimizeLevel() != CGOptions::kLevel0) { + SetCurBB(*atomicBB); + } + + uint32 primTypeP2Size = GetPrimTypeP2Size(primType); + /* ldxr */ + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, primType), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd)); + Operand *regExtend = &CreateRegisterOperandOfType(primType); + PrimType targetType = (oldVal->GetSize() <= k32BitSize) ? + (IsSignedInteger(primType) ? PTY_i32 : PTY_u32) : (IsSignedInteger(primType) ? PTY_i64 : PTY_u64); + SelectCvtInt2Int(nullptr, regExtend, regLoaded, primType, targetType); + /* cmp */ + SelectAArch64Cmp(*regExtend, *oldVal, true, oldVal->GetSize()); + /* bne */ + Operand &rflag = GetOrCreateRflag(); + LabelIdx nextBBLableIdx = CreateLabel(); + LabelOperand &targetOpnd = GetOrCreateLabelOperand(nextBBLableIdx); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_bne, rflag, targetOpnd)); + /* stlxr */ + BB *stlxrBB = CreateNewBB(); + stlxrBB->SetKind(BB::kBBIf); + atomicBB->AppendBB(*stlxrBB); + SetCurBB(*stlxrBB); + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto &newRegVal = LoadIntoRegister(*newVal, primType); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, true); + stlxrBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, newRegVal, memOpnd)); + /* cbnz ==> check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + stlxrBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + /* Data Memory Barrier */ + BB *nextBB = CreateNewBB(); + nextBB->AddLabel(nextBBLableIdx); + nextBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + SetLab2BBMap(static_cast(nextBBLableIdx), *nextBB); + stlxrBB->AppendBB(*nextBB); + SetCurBB(*nextBB); + /* bool version return true if the comparison is successful and newval is written */ + if (retBool) { + auto *retOpnd = &CreateRegisterOperandOfType(PTY_u32); + SelectAArch64CSet(*retOpnd, GetCondOperand(CC_EQ), false); + return retOpnd; + } + /* type version return the contents of *addrOpnd before the operation */ + return regLoaded; +} + +Operand *AArch64CGFunc::SelectCAtomicFetch(IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) { + return SelectAArch64CAtomicFetch(intrinopNode, op, fetchBefore); +} + +Operand *AArch64CGFunc::SelectCSyncFetch(IntrinsicopNode &intrinopNode, Opcode op, bool fetchBefore) { + return SelectAArch64CSyncFetch(intrinopNode, op, fetchBefore); +} + +Operand *AArch64CGFunc::SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode) { + return SelectCSyncCmpSwap(intrinopNode, true); +} + +Operand *AArch64CGFunc::SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode) { + return SelectCSyncCmpSwap(intrinopNode); +} + +Operand *AArch64CGFunc::SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) { + auto primType = intrinopNode.GetPrimType(); + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + Operand *valueOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnSecondOpnd)); + addrOpnd = &LoadIntoRegister(*addrOpnd, intrinopNode.GetNopndAt(kInsnFirstOpnd)->GetPrimType()); + valueOpnd = &LoadIntoRegister(*valueOpnd, intrinopNode.GetNopndAt(kInsnSecondOpnd)->GetPrimType()); + + /* Create BB which includes atomic built_in function */ + LabelIdx atomicBBLabIdx = CreateLabel(); + BB *atomicBB = CreateNewBB(); + atomicBB->SetKind(BB::kBBIf); + atomicBB->SetAtomicBuiltIn(); + atomicBB->AddLabel(atomicBBLabIdx); + SetLab2BBMap(static_cast(atomicBBLabIdx), *atomicBB); + GetCurBB()->AppendBB(*atomicBB); + SetCurBB(*atomicBB); + /* load from pointed address */ + auto primTypeP2Size = GetPrimTypeP2Size(primType); + auto *regLoaded = &CreateRegisterOperandOfType(primType); + auto &memOpnd = CreateMemOpnd(*static_cast(addrOpnd), 0, GetPrimTypeBitSize(primType)); + auto mOpLoad = PickLoadStoreExclInsn(primTypeP2Size, false, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpLoad, *regLoaded, memOpnd)); + /* store to pointed address */ + auto *accessStatus = &CreateRegisterOperandOfType(PTY_u32); + auto mOpStore = PickLoadStoreExclInsn(primTypeP2Size, true, false); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(mOpStore, *accessStatus, *valueOpnd, memOpnd)); + /* check the exclusive accsess status */ + auto &atomicBBOpnd = GetOrCreateLabelOperand(*atomicBB); + atomicBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_wcbnz, *accessStatus, atomicBBOpnd)); + + /* Data Memory Barrier */ + BB *nextBB = CreateNewBB(); + atomicBB->AppendBB(*nextBB); + SetCurBB(*nextBB); + nextBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_dmb_ish, AArch64CG::kMd[MOP_dmb_ish])); + return regLoaded; +} + +void AArch64CGFunc::SelectCSyncLockRelease(const IntrinsiccallNode &intrinsiccall, PrimType primType) { + auto *addrOpnd = HandleExpr(intrinsiccall, *intrinsiccall.GetNopndAt(kInsnFirstOpnd)); + auto primTypeBitSize = GetPrimTypeBitSize(primType); + auto mOp = PickStInsn(primTypeBitSize, primType, AArch64isa::kMoRelease); + auto &zero = GetZeroOpnd(primTypeBitSize); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(*addrOpnd, primType), 0, primTypeBitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, zero, memOpnd)); +} + +Operand *AArch64CGFunc::SelectCSyncSynchronize(IntrinsicopNode &intrinopNode) { + (void)intrinopNode; + CHECK_FATAL(false, "have not implement SelectCSyncSynchronize yet"); + return nullptr; +} + +AArch64isa::MemoryOrdering AArch64CGFunc::PickMemOrder(std::memory_order memOrder, bool isLdr) const { + switch (memOrder) { + case std::memory_order_relaxed: + return AArch64isa::kMoNone; + case std::memory_order_consume: + case std::memory_order_acquire: + return isLdr ? AArch64isa::kMoAcquire : AArch64isa::kMoNone; + case std::memory_order_release: + return isLdr ? AArch64isa::kMoNone : AArch64isa::kMoRelease; + case std::memory_order_acq_rel: + case std::memory_order_seq_cst: + return isLdr ? AArch64isa::kMoAcquire : AArch64isa::kMoRelease; + default: + CHECK_FATAL(false, "unexpected memorder"); + return AArch64isa::kMoNone; + } +} + +/* + * regassign %1 (intrinsicop C___Atomic_Load_N(ptr, memorder)) + * ====> %1 = *ptr + * let %1 -> x0 + * let ptr -> x1 + * implement to asm: ldr/ldar x0, [x1] + * a load-acquire would replace ldr if memorder is not 0 + */ +Operand *AArch64CGFunc::SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) { + auto *addrOpnd = HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + auto *memOrderOpnd = intrinsicopNode.Opnd(1); + auto primType = intrinsicopNode.GetPrimType(); + std::memory_order memOrder = std::memory_order_seq_cst; + if (memOrderOpnd->IsConstval()) { + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + memOrder = static_cast(memOrderConst->GetExtValue()); + } + return SelectAtomicLoad(*addrOpnd, primType, PickMemOrder(memOrder, true)); +} + +/* + * intrinsiccall C___atomic_load (ptr, ret, memorder) + * ====> *ret = *ptr + * let ret -> x0 + * let ptr -> x1 + * implement to asm: + * ldr/ldar xn, [x1] + * str/stlr xn, [x0] + * a load-acquire would replace ldr if acquire needed + * a store-relase would replace str if release needed + */ +void AArch64CGFunc::SelectCAtomicLoad(const IntrinsiccallNode &intrinsiccallNode) { + auto primType = GlobalTables::GetTypeTable(). + GetTypeFromTyIdx(intrinsiccallNode.GetTyIdx())->GetPrimType(); + auto *addrOpnd = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(kInsnFirstOpnd)); + auto *retOpnd = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(kInsnSecondOpnd)); + auto *memOrderOpnd = intrinsiccallNode.Opnd(kInsnThirdOpnd); + std::memory_order memOrder = std::memory_order_seq_cst; + if (memOrderOpnd->IsConstval()) { + auto *memOrderConst = static_cast( + static_cast(memOrderOpnd)->GetConstVal()); + memOrder = static_cast(memOrderConst->GetExtValue()); + } + auto *value = SelectAtomicLoad(*addrOpnd, primType, PickMemOrder(memOrder, true)); + SelectAtomicStore(*value, *retOpnd, primType, PickMemOrder(memOrder, false)); +} + +/* + * regassign %1 (intrinsicop C___Atomic_exchange_n(ptr, val, memorder)) + * ====> %1 = *ptr; *ptr = val; + * let %1 -> x0 + * let ptr -> x1 + * let val -> x2 + * implement to asm: + * ldr/ldar x0, [x1] + * str/stlr x2, [x1] + * a load-acquire would replace ldr if acquire needed + * a store-relase would replace str if release needed + */ +Operand *AArch64CGFunc::SelectCAtomicExchangeN(const IntrinsiccallNode &intrinsiccallNode) { + auto primType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(intrinsiccallNode.GetTyIdx())->GetPrimType(); + auto *addrOpnd = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(0)); + auto *valueOpnd = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(1)); + auto *memOrderOpnd = intrinsiccallNode.Opnd(kInsnThirdOpnd); + std::memory_order memOrder = std::memory_order_seq_cst; + if (memOrderOpnd->IsConstval()) { + auto *memOrderConst = static_cast(static_cast(memOrderOpnd)->GetConstVal()); + memOrder = static_cast(memOrderConst->GetExtValue()); + } + + auto *result = SelectAtomicLoad(*addrOpnd, primType, PickMemOrder(memOrder, true)); + SelectAtomicStore(*valueOpnd, *addrOpnd, primType, PickMemOrder(memOrder, false)); + return result; +} + +/* + * intrinsiccall C___atomic_exchange (ptr, val, ret, memorder) + * ====> *ret = *ptr; *ptr = *val + * let ptr -> x0 + * let val -> x1 + * let ret -> x2 + * implement to asm: + * ldr/ldar xn1, [x0] + * ldr/ldar xn2, [x1] + * str/stlr xn2, [x0] + * str/stlr xn1, [x2] + * a load-acquire would replace ldr if acquire needed + * a store-relase would replace str if release needed + */ +void AArch64CGFunc::SelectCAtomicExchange(const IntrinsiccallNode &intrinsiccallNode) { + auto primType = GlobalTables::GetTypeTable(). + GetTypeFromTyIdx(intrinsiccallNode.GetTyIdx())->GetPrimType(); + auto *addrOpnd = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(kInsnFirstOpnd)); + auto *valueOpnd = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(kInsnSecondOpnd)); + auto *retOpnd = HandleExpr(intrinsiccallNode, *intrinsiccallNode.Opnd(kInsnThirdOpnd)); + auto *memOrderOpnd = intrinsiccallNode.Opnd(kInsnFourthOpnd); + std::memory_order memOrder = std::memory_order_seq_cst; + if (memOrderOpnd->IsConstval()) { + auto *memOrderConst = static_cast( + static_cast(memOrderOpnd)->GetConstVal()); + memOrder = static_cast(memOrderConst->GetExtValue()); + } + + auto *result = SelectAtomicLoad(*addrOpnd, primType, PickMemOrder(memOrder, true)); + auto *val = SelectAtomicLoad(*valueOpnd, primType, PickMemOrder(memOrder, true)); + SelectAtomicStore(*val, *addrOpnd, primType, PickMemOrder(memOrder, false)); + SelectAtomicStore(*result, *retOpnd, primType, PickMemOrder(memOrder, false)); +} + +Operand *AArch64CGFunc::SelectAtomicLoad(Operand &addrOpnd, PrimType primType, AArch64isa::MemoryOrdering memOrder) { + auto mOp = PickLdInsn(GetPrimTypeBitSize(primType), primType, memOrder); + auto &memOpnd = CreateMemOpnd(LoadIntoRegister(addrOpnd, PTY_a64), 0, k64BitSize); + auto *resultOpnd = &CreateRegisterOperandOfType(primType); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, *resultOpnd, memOpnd)); + return resultOpnd; +} + +Operand *AArch64CGFunc::SelectCReturnAddress(IntrinsicopNode &intrinopNode) { + if (intrinopNode.GetIntrinsic() == INTRN_C__builtin_extract_return_addr) { + ASSERT(intrinopNode.GetNumOpnds() == 1, "expect one parameter"); + Operand *addrOpnd = HandleExpr(intrinopNode, *intrinopNode.GetNopndAt(kInsnFirstOpnd)); + return &LoadIntoRegister(*addrOpnd, PTY_a64); + } else if (intrinopNode.GetIntrinsic() == INTRN_C__builtin_return_address) { + BaseNode *argexpr0 = intrinopNode.Opnd(0); + while (!argexpr0->IsLeaf()) { + argexpr0 = argexpr0->Opnd(0); + } + CHECK_FATAL(argexpr0->IsConstval(), "Invalid argument of __builtin_return_address"); + auto &constNode = static_cast(*argexpr0); + ASSERT(constNode.GetConstVal()->GetKind() == kConstInt, "expect MIRIntConst does not support float yet"); + MIRIntConst *mirIntConst = safe_cast(constNode.GetConstVal()); + ASSERT(mirIntConst != nullptr, "nullptr checking"); + int64 scale = mirIntConst->GetExtValue(); + /* + * Do not support getting return address with a nonzero argument + * inline / tail call opt will destory this behavior + */ + CHECK_FATAL(scale == 0, "Do not support recursion"); + Operand *resReg = &static_cast(CreateRegisterOperandOfType(PTY_i64)); + SelectCopy(*resReg, PTY_i64, GetOrCreatePhysicalRegisterOperand(RLR, k64BitSize, kRegTyInt), PTY_i64); + return resReg; + } + return nullptr; +} + +Operand *AArch64CGFunc::SelectCalignup(IntrinsicopNode &intrnNode) { + return SelectAArch64align(intrnNode, true); +} + +Operand *AArch64CGFunc::SelectCaligndown(IntrinsicopNode &intrnNode) { + return SelectAArch64align(intrnNode, false); +} + +Operand *AArch64CGFunc::SelectAArch64align(const IntrinsicopNode &intrnNode, bool isUp) { + /* Handle Two args */ + BaseNode *argexpr0 = intrnNode.Opnd(0); + PrimType ptype0 = argexpr0->GetPrimType(); + Operand *opnd0 = HandleExpr(intrnNode, *argexpr0); + PrimType resultPtype = intrnNode.GetPrimType(); + RegOperand &ldDest0 = LoadIntoRegister(*opnd0, ptype0); + + BaseNode *argexpr1 = intrnNode.Opnd(1); + PrimType ptype1 = argexpr1->GetPrimType(); + Operand *opnd1 = HandleExpr(intrnNode, *argexpr1); + RegOperand &arg1 = LoadIntoRegister(*opnd1, ptype1); + ASSERT(IsPrimitiveInteger(ptype0) && IsPrimitiveInteger(ptype1), "align integer type only"); + Operand *ldDest1 = &static_cast(CreateRegisterOperandOfType(ptype0)); + SelectCvtInt2Int(nullptr, ldDest1, &arg1, ptype1, ptype0); + + Operand *resultReg = &static_cast(CreateRegisterOperandOfType(ptype0)); + Operand &immReg = CreateImmOperand(1, GetPrimTypeBitSize(ptype0), true); + /* Do alignment x0 -- value to be aligned x1 -- alignment */ + if (isUp) { + /* add res, x0, x1 */ + SelectAdd(*resultReg, ldDest0, *ldDest1, ptype0); + /* sub res, res, 1 */ + SelectSub(*resultReg, *resultReg, immReg, ptype0); + } + Operand *tempReg = &static_cast(CreateRegisterOperandOfType(ptype0)); + /* sub temp, x1, 1 */ + SelectSub(*tempReg, *ldDest1, immReg, ptype0); + /* mvn temp, temp */ + SelectMvn(*tempReg, *tempReg, ptype0); + /* and res, res, temp */ + if (isUp) { + SelectBand(*resultReg, *resultReg, *tempReg, ptype0); + } else { + SelectBand(*resultReg, ldDest0, *tempReg, ptype0); + } + if (resultPtype != ptype0) { + SelectCvtInt2Int(&intrnNode, resultReg, resultReg, ptype0, resultPtype); + } + return resultReg; +} + +void AArch64CGFunc::SelectStackSave() { + Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &r0Opnd = GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + Insn &saveInsn = GetInsnBuilder()->BuildInsn(MOP_xmovrr, r0Opnd, spOpnd); + GetCurBB()->AppendInsn(saveInsn); +} + +void AArch64CGFunc::SelectStackRestore(const IntrinsiccallNode &intrnNode) { + BaseNode *argexpr0 = intrnNode.Opnd(0); + Operand *opnd0 = HandleExpr(intrnNode, *argexpr0); + Operand &spOpnd = GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Insn &restoreInsn = GetInsnBuilder()->BuildInsn(MOP_xmovrr, spOpnd, *opnd0); + GetCurBB()->AppendInsn(restoreInsn); +} + +/* + * NOTE: consider moving the following things into aarch64_cg.cpp They may + * serve not only inrinsics, but other MapleIR instructions as well. + * Do it as if we are adding a label in straight-line assembly code. + */ +LabelIdx AArch64CGFunc::CreateLabeledBB(StmtNode &stmt) { + LabelIdx labIdx = CreateLabel(); + BB *newBB = StartNewBBImpl(false, stmt); + newBB->AddLabel(labIdx); + SetLab2BBMap(labIdx, *newBB); + SetCurBB(*newBB); + return labIdx; +} + +/* Save value into the local variable for the index-th return value; */ +void AArch64CGFunc::SaveReturnValueInLocal(CallReturnVector &retVals, size_t index, PrimType primType, Operand &value, + StmtNode &parentStmt) { + CallReturnPair &pair = retVals.at(index); + BB tempBB(static_cast(-1), *GetFuncScopeAllocator()); + BB *realCurBB = GetCurBB(); + CHECK_FATAL(!pair.second.IsReg(), "NYI"); + Operand* destOpnd = &value; + /* for O0 ,corss-BB var is not support, do extra store/load but why new BB */ + if (GetCG()->GetOptimizeLevel() == CGOptions::kLevel0) { + MIRSymbol *symbol = GetFunction().GetLocalOrGlobalSymbol(pair.first); + MIRType *sPty = symbol->GetType(); + PrimType ty = symbol->GetType()->GetPrimType(); + if (sPty->GetKind() == kTypeStruct || sPty->GetKind() == kTypeUnion) { + MIRStructType *structType = static_cast(sPty); + ty = structType->GetFieldType(pair.second.GetFieldID())->GetPrimType(); + } else if (sPty->GetKind() == kTypeClass) { + CHECK_FATAL(false, "unsuppotr type for inlineasm / intrinsic"); + } + RegOperand &tempReg = CreateVirtualRegisterOperand(NewVReg(GetRegTyFromPrimTy(ty), GetPrimTypeSize(ty))); + SelectCopy(tempReg, ty, value, ty); + destOpnd = &tempReg; + } + SetCurBB(tempBB); + SelectDassign(pair.first, pair.second.GetFieldID(), primType, *destOpnd); + + CHECK_FATAL(realCurBB->GetNext() == nullptr, "current BB must has not nextBB"); + realCurBB->SetLastStmt(parentStmt); + realCurBB->SetNext(StartNewBBImpl(true, parentStmt)); + realCurBB->GetNext()->SetKind(BB::kBBFallthru); + realCurBB->GetNext()->SetPrev(realCurBB); + + realCurBB->GetNext()->InsertAtBeginning(*GetCurBB()); + /* restore it */ + SetCurBB(*realCurBB->GetNext()); +} + +/* The following are translation of LL/SC and atomic RMW operations */ +MemOrd AArch64CGFunc::OperandToMemOrd(Operand &opnd) const { + CHECK_FATAL(opnd.IsImmediate(), "Memory order must be an int constant."); + auto immOpnd = static_cast(&opnd); + int32 val = immOpnd->GetValue(); + CHECK_FATAL(val >= 0, "val must be non-negtive"); + return MemOrdFromU32(static_cast(val)); +} + +/* + * Generate ldxr or ldaxr instruction. + * byte_p2x: power-of-2 size of operand in bytes (0: 1B, 1: 2B, 2: 4B, 3: 8B). + */ +MOperator AArch64CGFunc::PickLoadStoreExclInsn(uint32 byteP2Size, bool store, bool acqRel) const { + CHECK_FATAL(byteP2Size < kIntByteSizeDimension, "Illegal argument p2size: %d", byteP2Size); + + static MOperator operators[4][2][2] = { { { MOP_wldxrb, MOP_wldaxrb }, { MOP_wstxrb, MOP_wstlxrb } }, + { { MOP_wldxrh, MOP_wldaxrh }, { MOP_wstxrh, MOP_wstlxrh } }, + { { MOP_wldxr, MOP_wldaxr }, { MOP_wstxr, MOP_wstlxr } }, + { { MOP_xldxr, MOP_xldaxr }, { MOP_xstxr, MOP_xstlxr } } }; + + MOperator optr = operators[byteP2Size][static_cast(store)][static_cast(acqRel)]; + CHECK_FATAL(optr != MOP_undef, "Unsupported type p2size: %d", byteP2Size); + + return optr; +} + +RegOperand *AArch64CGFunc::SelectLoadExcl(PrimType valPrimType, MemOperand &loc, bool acquire) { + uint32 p2size = GetPrimTypeP2Size(valPrimType); + + RegOperand &result = CreateRegisterOperandOfType(valPrimType); + MOperator mOp = PickLoadStoreExclInsn(p2size, false, acquire); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, loc)); + + return &result; +} + +RegOperand *AArch64CGFunc::SelectStoreExcl(PrimType valPty, MemOperand &loc, RegOperand &newVal, bool release) { + uint32 p2size = GetPrimTypeP2Size(valPty); + + /* the result (success/fail) is to be stored in a 32-bit register */ + RegOperand &result = CreateRegisterOperandOfType(PTY_u32); + + MOperator mOp = PickLoadStoreExclInsn(p2size, true, release); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(mOp, result, newVal, loc)); + + return &result; +} + +RegType AArch64CGFunc::GetRegisterType(regno_t reg) const { + if (AArch64isa::IsPhysicalRegister(reg)) { + return AArch64isa::GetRegType(static_cast(reg)); + } else if (reg == kRFLAG) { + return kRegTyCc; + } else { + return CGFunc::GetRegisterType(reg); + } +} + +MemOperand &AArch64CGFunc::LoadStructCopyBase(const MIRSymbol &symbol, int64 offset, int dataSize) { + /* For struct formals > 16 bytes, this is the pointer to the struct copy. */ + /* Load the base pointer first. */ + RegOperand *vreg = &CreateVirtualRegisterOperand(NewVReg(kRegTyInt, k8ByteSize)); + MemOperand *baseMemOpnd = &GetOrCreateMemOpnd(symbol, 0, k64BitSize); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(PickLdInsn(k64BitSize, PTY_i64), *vreg, *baseMemOpnd)); + /* Create the indirect load mem opnd from the base pointer. */ + return CreateMemOpnd(*vreg, offset, static_cast(dataSize)); +} + + /* For long branch, insert an unconditional branch. + * From To + * cond_br targe_label reverse_cond_br fallthru_label + * fallthruBB unconditional br target_label + * fallthru_label: + * fallthruBB + */ +void AArch64CGFunc::InsertJumpPad(Insn *insn) { + BB *bb = insn->GetBB(); + ASSERT(bb, "instruction has no bb"); + ASSERT(bb->GetKind() == BB::kBBIf || bb->GetKind() == BB::kBBGoto, + "instruction is in neither if bb nor goto bb"); + if (bb->GetKind() == BB::kBBGoto) { + return; + } + ASSERT(bb->NumSuccs() == k2ByteSize, "if bb should have 2 successors"); + + BB *longBrBB = CreateNewBB(); + + BB *fallthruBB = bb->GetNext(); + LabelIdx fallthruLBL = fallthruBB->GetLabIdx(); + if (fallthruLBL == 0) { + fallthruLBL = CreateLabel(); + SetLab2BBMap(static_cast(fallthruLBL), *fallthruBB); + fallthruBB->AddLabel(fallthruLBL); + } + + BB *targetBB; + if (bb->GetSuccs().front() == fallthruBB) { + targetBB = bb->GetSuccs().back(); + } else { + targetBB = bb->GetSuccs().front(); + } + LabelIdx targetLBL = targetBB->GetLabIdx(); + if (targetLBL == 0) { + targetLBL = CreateLabel(); + SetLab2BBMap(static_cast(targetLBL), *targetBB); + targetBB->AddLabel(targetLBL); + } + + // Adjustment on br and CFG + bb->RemoveSuccs(*targetBB); + bb->PushBackSuccs(*longBrBB); + bb->SetNext(longBrBB); + // reverse cond br targeting fallthruBB + uint32 targetIdx = AArch64isa::GetJumpTargetIdx(*insn); + MOperator mOp = AArch64isa::FlipConditionOp(insn->GetMachineOpcode()); + insn->SetMOP( AArch64CG::kMd[mOp]); + LabelOperand &fallthruBBLBLOpnd = GetOrCreateLabelOperand(fallthruLBL); + insn->SetOperand(targetIdx, fallthruBBLBLOpnd); + + longBrBB->PushBackPreds(*bb); + longBrBB->PushBackSuccs(*targetBB); + LabelOperand &targetLBLOpnd = GetOrCreateLabelOperand(targetLBL); + longBrBB->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xuncond, targetLBLOpnd)); + longBrBB->SetPrev(bb); + longBrBB->SetNext(fallthruBB); + longBrBB->SetKind(BB::kBBGoto); + + fallthruBB->SetPrev(longBrBB); + + targetBB->RemovePreds(*bb); + targetBB->PushBackPreds(*longBrBB); +} + +RegOperand *AArch64CGFunc::AdjustOneElementVectorOperand(PrimType oType, RegOperand *opnd) { + RegOperand *resCvt = &CreateRegisterOperandOfType(oType); + Insn *insnCvt = &GetInsnBuilder()->BuildInsn(MOP_xvmovrd, *resCvt, *opnd); + GetCurBB()->AppendInsn(*insnCvt); + return resCvt; +} + +RegOperand *AArch64CGFunc::SelectOneElementVectorCopy(Operand *src, PrimType sType) { + RegOperand *res = &CreateRegisterOperandOfType(PTY_f64); + SelectCopy(*res, PTY_f64, *src, sType); + static_cast(res)->SetIF64Vec(); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorAbs(PrimType rType, Operand *o1) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vabsvv : MOP_vabsuu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorAddLong(PrimType rType, Operand *o1, Operand *o2, + PrimType otyp, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result type */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(otyp); /* vector operand 2 */ + MOperator mOp; + if (isLow) { + mOp = IsUnsignedInteger(rType) ? MOP_vuaddlvuu : MOP_vsaddlvuu; + } else { + mOp = IsUnsignedInteger(rType) ? MOP_vuaddl2vvv : MOP_vsaddl2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorAddWiden(Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(otyp1); /* restype is same as o1 */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(otyp1); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(otyp2); /* vector operand 2 */ + + MOperator mOp; + if (isLow) { + mOp = IsUnsignedInteger(otyp1) ? MOP_vuaddwvvu : MOP_vsaddwvvu; + } else { + mOp = IsUnsignedInteger(otyp1) ? MOP_vuaddw2vvv : MOP_vsaddw2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorImmMov(PrimType rType, Operand *src, PrimType sType) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpec = GetMemoryPool()->New(rType); + int64 val = static_cast(src)->GetValue(); + /* copy the src imm operand to a reg if out of range */ + if ((GetVecEleSize(rType) >= k64BitSize) || + (GetPrimTypeSize(sType) > k4ByteSize && val != 0) || + (val < kMinImmVal || val > kMaxImmVal)) { + Operand *reg = &CreateRegisterOperandOfType(sType); + SelectCopy(*reg, sType, *src, sType); + return SelectVectorRegMov(rType, reg, sType); + } + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmovvi : MOP_vmovui; + if (GetVecEleSize(rType) == k8BitSize && val < 0) { + src = &CreateImmOperand(static_cast(val), k8BitSize, true); + } else if (val < 0) { + src = &CreateImmOperand(-(val + 1), k8BitSize, true); + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vnotvi : MOP_vnotui; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*src); + (void)vInsn.PushRegSpecEntry(vecSpec); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorRegMov(PrimType rType, Operand *src, PrimType sType) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpec = GetMemoryPool()->New(rType); + + MOperator mOp; + if (GetPrimTypeSize(sType) > k4ByteSize) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vxdupvr : MOP_vxdupur; + } else { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vwdupvr : MOP_vwdupur; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*src); + (void)vInsn.PushRegSpecEntry(vecSpec); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorFromScalar(PrimType rType, Operand *src, PrimType sType) { + if (!IsPrimitiveVector(rType)) { + return SelectOneElementVectorCopy(src, sType); + } else if (src->IsConstImmediate()) { + return SelectVectorImmMov(rType, src, sType); + } else { + return SelectVectorRegMov(rType, src, sType); + } +} + +RegOperand *AArch64CGFunc::SelectVectorDup(PrimType rType, Operand *src, bool getLow) { + PrimType oType = rType; + rType = FilterOneElementVectorType(oType); + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(k2ByteSize, k64BitSize, getLow ? 0 : 1); + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vduprv, AArch64CG::kMd[MOP_vduprv]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*src); + (void)vInsn.PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + if (oType != rType) { + res = AdjustOneElementVectorOperand(oType, res); + static_cast(res)->SetIF64Vec(); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sType, lane); /* vector operand */ + + MOperator mop; + if (!IsPrimitiveVector(sType)) { + mop = MOP_xmovrr; + } else if (GetPrimTypeBitSize(rType) >= k64BitSize) { + mop = MOP_vxmovrv; + } else { + mop = (GetPrimTypeBitSize(sType) > k64BitSize) ? MOP_vwmovrv : MOP_vwmovru; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*src); + (void)vInsn.PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +/* adalp o1, o2 instruction accumulates into o1, overwriting the original operand. + Hence we perform c = vadalp(a,b) as + T tmp = a; + return tmp+b; + The return value of vadalp is then assigned to c, leaving value of a intact. + */ +RegOperand *AArch64CGFunc::SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, + Operand *src2, PrimType sty2) { + VectorRegSpec *vecSpecDest; + RegOperand *res; + + if (!IsPrimitiveVector(sty1)) { + RegOperand *resF = SelectOneElementVectorCopy(src1, sty1); + res = &CreateRegisterOperandOfType(PTY_f64); + SelectCopy(*res, PTY_f64, *resF, PTY_f64); + vecSpecDest = GetMemoryPool()->New(k1ByteSize, k64BitSize); + } else { + res = &CreateRegisterOperandOfType(sty1); /* result type same as sty1 */ + SelectCopy(*res, sty1, *src1, sty1); + vecSpecDest = GetMemoryPool()->New(sty1); + } + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sty2); + + MOperator mop; + if (IsUnsignedInteger(sty1)) { + mop = GetPrimTypeSize(sty1) > k8ByteSize ? MOP_vupadalvv : MOP_vupadaluu; + } else { + mop = GetPrimTypeSize(sty1) > k8ByteSize ? MOP_vspadalvv : MOP_vspadaluu; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*src2); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + if (!IsPrimitiveVector(sty1)) { + res = AdjustOneElementVectorOperand(sty1, res); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) { + PrimType oType = rType; + rType = FilterOneElementVectorType(oType); + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sType); /* source operand */ + + if (rType == PTY_f64) { + vecSpecDest->vecLaneMax = 1; + } + + MOperator mop; + if (IsUnsignedInteger(sType)) { + mop = GetPrimTypeSize(sType) > k8ByteSize ? MOP_vupaddvv : MOP_vupadduu; + } else { + mop = GetPrimTypeSize(sType) > k8ByteSize ? MOP_vspaddvv : MOP_vspadduu; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*src); + /* dest pushed first, popped first */ + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + if (oType != rType) { + res = AdjustOneElementVectorOperand(oType, res); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorSetElement(Operand *eOpnd, PrimType eType, Operand *vOpnd, + PrimType vType, int32 lane) { + if (!IsPrimitiveVector(vType)) { + return SelectOneElementVectorCopy(eOpnd, eType); + } + RegOperand *reg = &CreateRegisterOperandOfType(eType); /* vector element type */ + SelectCopy(*reg, eType, *eOpnd, eType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(vType, lane); /* vector operand == result */ + + MOperator mOp; + if (GetPrimTypeSize(eType) > k4ByteSize) { + mOp = GetPrimTypeSize(vType) > k8ByteSize ? MOP_vxinsvr : MOP_vxinsur; + } else { + mOp = GetPrimTypeSize(vType) > k8ByteSize ? MOP_vwinsvr : MOP_vwinsur; + } + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*vOpnd).AddOpndChain(*reg); + (void)vInsn.PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + return static_cast(vOpnd); +} + +RegOperand *AArch64CGFunc::SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, + PrimType oTy, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecOpd1 = GetMemoryPool()->New(oTy); + VectorRegSpec *vecSpecOpd2 = GetMemoryPool()->New(oTy); /* same opnd types */ + + MOperator mop; + if (isLow) { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vuabdlvuu : MOP_vsabdlvuu; + } else { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vuabdl2vvv : MOP_vsabdl2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecOpd1).PushRegSpecEntry(vecSpecOpd2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorMerge(PrimType rType, Operand *o1, Operand *o2, int32 index) { + if (!IsPrimitiveVector(rType)) { + static_cast(o1)->SetIF64Vec(); + return static_cast(o1); /* 64x1_t, index equals 0 */ + } + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecOpd1 = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecOpd2 = GetMemoryPool()->New(rType); + + ImmOperand *imm = &CreateImmOperand(index, k8BitSize, true); + + MOperator mOp = (GetPrimTypeSize(rType) > k8ByteSize) ? MOP_vextvvvi : MOP_vextuuui; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2).AddOpndChain(*imm); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecOpd1).PushRegSpecEntry(vecSpecOpd2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorReverse(PrimType rType, Operand *src, PrimType sType, uint32 size) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpecSrc = GetMemoryPool()->New(sType); /* vector operand */ + + MOperator mOp; + if (GetPrimTypeBitSize(rType) == k128BitSize) { + mOp = size >= k64BitSize ? MOP_vrev64qq : (size >= k32BitSize ? MOP_vrev32qq : MOP_vrev16qq); + } else if (GetPrimTypeBitSize(rType) == k64BitSize) { + mOp = size >= k64BitSize ? MOP_vrev64dd : (size >= k32BitSize ? MOP_vrev32dd : MOP_vrev16dd); + } else { + CHECK_FATAL(false, "should not be here"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*src); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorSum(PrimType rType, Operand *o1, PrimType oType) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* uint32_t result */ + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oType); + RegOperand *iOpnd = &CreateRegisterOperandOfType(oType); /* float intermediate result */ + uint32 eSize = GetVecEleSize(oType); /* vector opd in bits */ + bool is16ByteVec = GetPrimTypeSize(oType) >= k16ByteSize; + MOperator mOp; + if (is16ByteVec) { + mOp = eSize <= k8BitSize ? MOP_vbaddvrv : (eSize <= k16BitSize ? MOP_vhaddvrv : + (eSize <= k32BitSize ? MOP_vsaddvrv : MOP_vdaddvrv)); + } else { + mOp = eSize <= k8BitSize ? MOP_vbaddvru : (eSize <= k16BitSize ? MOP_vhaddvru : MOP_vsaddvru); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*iOpnd).AddOpndChain(*o1); + (void)vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + + mOp = eSize > k32BitSize ? MOP_vxmovrv : MOP_vwmovrv; + VectorInsn &vInsn2 = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + auto *vecSpec2 = GetMemoryPool()->New(oType); + (void)vInsn2.AddOpndChain(*res).AddOpndChain(*iOpnd); + vecSpec2->vecLane = 0; + (void)vInsn2.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn2); + return res; +} + +void AArch64CGFunc::PrepareVectorOperands(Operand **o1, PrimType &oty1, Operand **o2, PrimType &oty2) { + /* Only 1 operand can be non vector, otherwise it's a scalar operation, wouldn't come here */ + if (IsPrimitiveVector(oty1) == IsPrimitiveVector(oty2)) { + return; + } + PrimType origTyp = !IsPrimitiveVector(oty2) ? oty2 : oty1; + Operand *opd = !IsPrimitiveVector(oty2) ? *o2 : *o1; + PrimType rType = !IsPrimitiveVector(oty2) ? oty1 : oty2; /* Type to dup into */ + RegOperand *res = &CreateRegisterOperandOfType(rType); + VectorRegSpec *vecSpec = GetMemoryPool()->New(rType); + + bool immOpnd = false; + if (opd->IsConstImmediate()) { + int64 val = static_cast(opd)->GetValue(); + if (val >= kMinImmVal && val <= kMaxImmVal && GetVecEleSize(rType) < k64BitSize) { + immOpnd = true; + } else { + RegOperand *regOpd = &CreateRegisterOperandOfType(origTyp); + SelectCopyImm(*regOpd, origTyp, static_cast(*opd), origTyp); + opd = static_cast(regOpd); + } + } + + /* need dup to vector operand */ + MOperator mOp; + if (immOpnd) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmovvi : MOP_vmovui; /* a const */ + } else { + if (GetPrimTypeSize(origTyp) > k4ByteSize) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vxdupvr : MOP_vxdupur; + } else { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vwdupvr : MOP_vwdupur; /* a scalar var */ + } + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*opd); + (void)vInsn.PushRegSpecEntry(vecSpec); + GetCurBB()->AppendInsn(vInsn); + if (!IsPrimitiveVector(oty2)) { + *o2 = static_cast(res); + oty2 = rType; + } else { + *o1 = static_cast(res); + oty1 = rType; + } +} + +void AArch64CGFunc::SelectVectorCvt(Operand *res, PrimType rType, Operand *o1, PrimType oType) { + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oType); /* vector operand 1 */ + + MOperator mOp; + VectorInsn *insn; + if (GetPrimTypeSize(rType) > GetPrimTypeSize(oType)) { + /* expand, similar to vmov_XX() intrinsics */ + mOp = IsUnsignedInteger(rType) ? MOP_vushllvvi : MOP_vshllvvi; + ImmOperand *imm = &CreateImmOperand(0, k8BitSize, true); + insn = &GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)insn->AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*imm); + } else if (GetPrimTypeSize(rType) < GetPrimTypeSize(oType)) { + /* extract, similar to vqmovn_XX() intrinsics */ + insn = &GetInsnBuilder()->BuildVectorInsn(MOP_vxtnuv, AArch64CG::kMd[MOP_vxtnuv]); + (void)insn->AddOpndChain(*res).AddOpndChain(*o1); + } else { + CHECK_FATAL(false, "Invalid cvt between 2 operands of the same size"); + } + (void)insn->PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(*insn); +} + +RegOperand *AArch64CGFunc::SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) { + if (IsUnsignedInteger(oty1) && (opc != OP_eq && opc != OP_ne)) { + return nullptr; /* no unsigned instr for zero */ + } + RegOperand *res = &CreateRegisterOperandOfType(oty1); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(oty1); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oty1); /* vector operand 1 */ + + MOperator mOp; + switch (opc) { + case OP_eq: + case OP_ne: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmeqvv : MOP_vzcmequu; + break; + case OP_gt: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmgtvv : MOP_vzcmgtuu; + break; + case OP_ge: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmgevv : MOP_vzcmgeuu; + break; + case OP_lt: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmltvv : MOP_vzcmltuu; + break; + case OP_le: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vzcmlevv : MOP_vzcmleuu; + break; + default: + CHECK_FATAL(false, "Invalid cc in vector compare"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + if (opc == OP_ne) { + res = SelectVectorNot(oty1, res); + } + return res; +} + +/* Neon compare intrinsics always return unsigned vector, MapleIR for comparison always return + signed. Using type of 1st operand for operation here */ +RegOperand *AArch64CGFunc::SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) { + if (o2->IsConstImmediate() && static_cast(o2)->GetValue() == 0) { + RegOperand *zeroCmp = SelectVectorCompareZero(o1, oty1, o2, opc); + if (zeroCmp != nullptr) { + return zeroCmp; + } + } + PrepareVectorOperands(&o1, oty1, &o2, oty2); + ASSERT(oty1 == oty2, "vector operand type mismatch"); + + RegOperand *res = &CreateRegisterOperandOfType(oty1); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(oty1); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oty1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oty2); /* vector operand 2 */ + + MOperator mOp; + switch (opc) { + case OP_eq: + case OP_ne: + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmeqvvv : MOP_vcmequuu; + break; + case OP_lt: + case OP_gt: + if (IsUnsignedInteger(oty1)) { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmhivvv : MOP_vcmhiuuu; + } else { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmgtvvv : MOP_vcmgtuuu; + } + break; + case OP_le: + case OP_ge: + if (IsUnsignedInteger(oty1)) { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmhsvvv : MOP_vcmhsuuu; + } else { + mOp = GetPrimTypeSize(oty1) > k8ByteSize ? MOP_vcmgevvv : MOP_vcmgeuuu; + } + break; + default: + CHECK_FATAL(false, "Invalid cc in vector compare"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + if (opc == OP_lt || opc == OP_le) { + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o2).AddOpndChain(*o1); + } else { + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + } + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + if (opc == OP_ne) { + res = SelectVectorNot(oty1, res); + } + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, + Operand *o2, PrimType oty2, Opcode opc) { + PrepareVectorOperands(&o1, oty1, &o2, oty2); + PrimType resultType = rType; + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 2 */ + + if (!IsPrimitiveVector(rType)) { + o1 = &SelectCopy(*o1, rType, PTY_f64); + o2 = &SelectCopy(*o2, rType, PTY_f64); + resultType = PTY_f64; + } + RegOperand *res = &CreateRegisterOperandOfType(resultType); /* result operand */ + + /* signed and unsigned shl(v,v) both use sshl or ushl, they are the same */ + MOperator mOp; + if (IsPrimitiveUnsigned(rType)) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vushlvvv : MOP_vushluuu; + } else { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vshlvvv : MOP_vshluuu; + } + + if (opc != OP_shl) { + o2 = SelectVectorNeg(rType, o2); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +uint32 ValidShiftConst(PrimType rType) { + switch (rType) { + case PTY_v8u8: + case PTY_v8i8: + case PTY_v16u8: + case PTY_v16i8: + return k8BitSize; + case PTY_v4u16: + case PTY_v4i16: + case PTY_v8u16: + case PTY_v8i16: + return k16BitSize; + case PTY_v2u32: + case PTY_v2i32: + case PTY_v4u32: + case PTY_v4i32: + return k32BitSize; + case PTY_v2u64: + case PTY_v2i64: + return k64BitSize; + default: + CHECK_FATAL(false, "Invalid Shift operand type"); + } + return 0; +} + +RegOperand *AArch64CGFunc::SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + if (!imm->IsConstImmediate()) { + CHECK_FATAL(false, "VectorUShiftImm has invalid shift const"); + } + uint32 shift = static_cast(ValidShiftConst(rType)); + bool needDup = false; + if (opc == OP_shl) { + if ((shift == k8BitSize && (sVal < 0 || static_cast(sVal) >= shift)) || + (shift == k16BitSize && (sVal < 0 || static_cast(sVal) >= shift)) || + (shift == k32BitSize && (sVal < 0 || static_cast(sVal) >= shift)) || + (shift == k64BitSize && (sVal < 0 || static_cast(sVal) >= shift))) { + needDup = true; + } + } else { + if ((shift == k8BitSize && (sVal < 1 || static_cast(sVal) > shift)) || + (shift == k16BitSize && (sVal < 1 || static_cast(sVal) > shift)) || + (shift == k32BitSize && (sVal < 1 || static_cast(sVal) > shift)) || + (shift == k64BitSize && (sVal < 1 || static_cast(sVal) > shift))) { + needDup = true; + } + } + if (needDup) { + /* Dup constant to vector reg */ + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmovvi : MOP_vmovui; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*imm); + (void)vInsn.PushRegSpecEntry(vecSpecDest); + GetCurBB()->AppendInsn(vInsn); + res = SelectVectorShift(rType, o1, rType, res, rType, opc); + return res; + } + MOperator mOp; + if (GetPrimTypeSize(rType) > k8ByteSize) { + if (IsUnsignedInteger(rType)) { + mOp = opc == OP_shl ? MOP_vushlvvi : MOP_vushrvvi; + } else { + mOp = opc == OP_shl ? MOP_vushlvvi : MOP_vshrvvi; + } + } else { + if (IsUnsignedInteger(rType)) { + mOp = opc == OP_shl ? MOP_vushluui : MOP_vushruui; + } else { + mOp = opc == OP_shl ? MOP_vushluui : MOP_vshruui; + } + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*imm); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); /* 8B or 16B */ + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 2 */ + vecSpec1->compositeOpnds = 1; /* composite operand */ + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vtbl1vvv, AArch64CG::kMd[MOP_vtbl1vvv]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, + PrimType oTyp2, Operand *o3, PrimType oTyp3) { + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oTyp1); /* operand 1 and result */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oTyp2); /* vector operand 2 */ + VectorRegSpec *vecSpec3 = GetMemoryPool()->New(oTyp3); /* vector operand 2 */ + + MOperator mop = IsPrimitiveUnSignedVector(oTyp1) ? MOP_vumaddvvv : MOP_vsmaddvvv; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + (void)vInsn.AddOpndChain(*o1).AddOpndChain(*o2).AddOpndChain(*o3); + (void)vInsn.PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2).PushRegSpecEntry(vecSpec3); + GetCurBB()->AppendInsn(vInsn); + return static_cast(o1); +} + +RegOperand *AArch64CGFunc::SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, + Operand *o2, PrimType oTyp2, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oTyp1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oTyp2); /* vector operand 1 */ + + MOperator mop; + if (isLow) { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vumullvvv : MOP_vsmullvvv; + } else { + mop = IsPrimitiveUnSignedVector(rType) ? MOP_vumull2vvv : MOP_vsmull2vvv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mop, AArch64CG::kMd[mop]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) { + PrepareVectorOperands(&o1, oty1, &o2, oty2); + ASSERT(oty1 == oty2, "vector operand type mismatch"); + + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oty1); /* source operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oty2); /* source operand 2 */ + + MOperator mOp; + if (opc == OP_add) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vaddvvv : MOP_vadduuu; + } else if (opc == OP_sub) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vsubvvv : MOP_vsubuuu; + } else if (opc == OP_mul) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vmulvvv : MOP_vmuluuu; + } else { + CHECK_FATAL(false, "Invalid opcode for SelectVectorBinOp"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + /* dest pushed first, popped first */ + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) { + PrepareVectorOperands(&o1, oty1, &o2, oty2); + ASSERT(oty1 == oty2, "vector operand type mismatch"); + + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp; + if (opc == OP_band) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vandvvv : MOP_vanduuu; + } else if (opc == OP_bior) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vorvvv : MOP_voruuu; + } else if (opc == OP_bxor) { + mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vxorvvv : MOP_vxoruuu; + } else { + CHECK_FATAL(false, "Invalid opcode for SelectVectorBitwiseOp"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp); /* vector operand */ + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vxtnuv, AArch64CG::kMd[MOP_vxtnuv]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) { + (void)oty1; /* 1st opnd was loaded already, type no longer needed */ + RegOperand *res = static_cast(o1); /* o1 is also the result */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(oty2); /* vector opnd2 */ + + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(MOP_vxtn2uv, AArch64CG::kMd[MOP_vxtn2uv]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o2); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNot(PrimType rType, Operand *o1) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vnotvv : MOP_vnotuu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1); + (void)vInsn.PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorNeg(PrimType rType, Operand *o1) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + + MOperator mOp = GetPrimTypeSize(rType) > k8ByteSize ? MOP_vnegvv : MOP_vneguu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1); + (void)vInsn.PushRegSpecEntry(vecSpecDest); + (void)vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +/* + * Called internally for auto-vec, no intrinsics for now + */ +RegOperand *AArch64CGFunc::SelectVectorSelect(Operand &cond, PrimType rType, Operand &o0, Operand &o1) { + rType = GetPrimTypeSize(rType) > k8ByteSize ? PTY_v16u8 : PTY_v8u8; + RegOperand *res = &CreateRegisterOperandOfType(rType); + SelectCopy(*res, rType, cond, rType); + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); + + uint32 mOp = GetPrimTypeBitSize(rType) > k64BitSize ? MOP_vbslvvv : MOP_vbsluuu; + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(o0).AddOpndChain(o1); + (void)vInsn.PushRegSpecEntry(vecSpecDest); + (void)vInsn.PushRegSpecEntry(vecSpec1); + (void)vInsn.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, + Operand *o2, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oType); /* vector operand 1 */ + + ImmOperand *imm = static_cast(o2); + MOperator mOp; + if (isLow) { + mOp = MOP_vshrnuvi; + } else { + CHECK_FATAL(false, "NYI: vshrn_high_"); + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*imm); + (void)vInsn.PushRegSpecEntry(vecSpecDest); + (void)vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, + Operand *o2, PrimType otyp2, bool isLow, bool isWide) { + RegOperand *res = &CreateRegisterOperandOfType(resType); /* result reg */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(resType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp1); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(otyp2); /* vector operand 2 */ + + MOperator mOp; + if (!isWide) { + if (isLow) { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusublvuu : MOP_vssublvuu; + } else { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusubl2vvv : MOP_vssubl2vvv; + } + } else { + if (isLow) { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusubwvvu : MOP_vssubwvvu; + } else { + mOp = IsUnsignedInteger(otyp1) ? MOP_vusubw2vvv : MOP_vssubw2vvv; + } + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn.PushRegSpecEntry(vecSpecDest); + (void)vInsn.PushRegSpecEntry(vecSpec1); + (void)vInsn.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +void AArch64CGFunc::SelectVectorZip(PrimType rType, Operand *o1, Operand *o2) { + RegOperand *res1 = &CreateRegisterOperandOfType(rType); /* result operand 1 */ + RegOperand *res2 = &CreateRegisterOperandOfType(rType); /* result operand 2 */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(rType); /* vector operand 1 */ + VectorRegSpec *vecSpec2 = GetMemoryPool()->New(rType); /* vector operand 2 */ + + VectorInsn &vInsn1 = GetInsnBuilder()->BuildVectorInsn(MOP_vzip1vvv, AArch64CG::kMd[MOP_vzip1vvv]); + (void)vInsn1.AddOpndChain(*res1).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn1.PushRegSpecEntry(vecSpecDest); + (void)vInsn1.PushRegSpecEntry(vecSpec1); + (void)vInsn1.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn1); + + VectorInsn &vInsn2 = GetInsnBuilder()->BuildVectorInsn(MOP_vzip2vvv, AArch64CG::kMd[MOP_vzip2vvv]); + (void)vInsn2.AddOpndChain(*res2).AddOpndChain(*o1).AddOpndChain(*o2); + (void)vInsn2.PushRegSpecEntry(vecSpecDest); + (void)vInsn2.PushRegSpecEntry(vecSpec1); + (void)vInsn2.PushRegSpecEntry(vecSpec2); + GetCurBB()->AppendInsn(vInsn2); + + if (GetPrimTypeSize(rType) <= k16ByteSize) { + Operand *preg1 = &GetOrCreatePhysicalRegisterOperand(V0, k64BitSize, kRegTyFloat); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xvmovd, *preg1, *res1)); + Operand *preg2 = &GetOrCreatePhysicalRegisterOperand(V1, k64BitSize, kRegTyFloat); + GetCurBB()->AppendInsn(GetInsnBuilder()->BuildInsn(MOP_xvmovd, *preg2, *res2)); + } +} + +RegOperand *AArch64CGFunc::SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(otyp); /* vector operand */ + + MOperator mOp; + if (isLow) { + mOp = IsPrimitiveUnSignedVector(rType) ? MOP_vuxtlvu : MOP_vsxtlvu; + } else { + mOp = IsPrimitiveUnSignedVector(rType) ? MOP_vuxtl2vv : MOP_vsxtl2vv; + } + VectorInsn &vInsn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)vInsn.AddOpndChain(*res).AddOpndChain(*o1); + (void)vInsn.PushRegSpecEntry(vecSpecDest); + (void)vInsn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(vInsn); + return res; +} + +RegOperand *AArch64CGFunc::SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) { + RegOperand *res = &CreateRegisterOperandOfType(rType); /* result operand */ + VectorRegSpec *vecSpecDest = GetMemoryPool()->New(rType); + VectorRegSpec *vecSpec1 = GetMemoryPool()->New(oType); /* vector operand */ + + MOperator mOp = IsPrimitiveUnSignedVector(rType) ? MOP_vuqxtnuv : MOP_vsqxtnuv; + VectorInsn &insn = GetInsnBuilder()->BuildVectorInsn(mOp, AArch64CG::kMd[mOp]); + (void)insn.AddOpndChain(*res).AddOpndChain(*opnd); + (void)insn.PushRegSpecEntry(vecSpecDest); + (void)insn.PushRegSpecEntry(vecSpec1); + GetCurBB()->AppendInsn(insn); + return res; +} + +/* + * Check the distance between the first insn of BB with the lable(targ_labidx) + * and the insn with targ_id. If the distance greater than kShortBRDistance + * return false. + */ +bool AArch64CGFunc::DistanceCheck(const BB &bb, LabelIdx targLabIdx, uint32 targId) const { + for (auto *tBB : bb.GetSuccs()) { + if (tBB->GetLabIdx() != targLabIdx) { + continue; + } + Insn *tInsn = tBB->GetFirstInsn(); + while (tInsn == nullptr || !tInsn->IsMachineInstruction()) { + if (tInsn == nullptr) { + tBB = tBB->GetNext(); + if (tBB == nullptr) { /* tailcallopt may make the target block empty */ + return true; + } + tInsn = tBB->GetFirstInsn(); + } else { + tInsn = tInsn->GetNext(); + } + } + uint32 tmp = (tInsn->GetId() > targId) ? (tInsn->GetId() - targId) : (targId - tInsn->GetId()); + return (tmp < kShortBRDistance); + } + CHECK_FATAL(false, "CFG error"); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_color_ra.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_color_ra.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c1ef39d578128be369a2668d66b9570438be6dfc --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_color_ra.cpp @@ -0,0 +1,4888 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_color_ra.h" +#include "cg.h" +#include "mir_lower.h" +#include "securec.h" +/* + * Based on concepts from Chow and Hennessey. + * Phases are as follows: + * Prepass to collect local BB information. + * Compute local register allocation demands for global RA. + * Compute live ranges. + * Live ranges LR represented by a vector of size #BBs. + * for each cross bb vreg, a bit is set in the vector. + * Build interference graph with basic block as granularity. + * When intersection of two LRs is not null, they interfere. + * Separate unconstrained and constrained LRs. + * unconstrained - LR with connect edges less than available colors. + * These LR can always be colored. + * constrained - not uncontrained. + * Split LR based on priority cost + * Repetitive adding BB from original LR to new LR until constrained. + * Update all LR the new LR interferes with. + * Color the new LR + * Each LR has a forbidden list, the registers cannot be assigned + * Coalesce move using preferred color first. + * Mark the remaining uncolorable LR after split as spill. + * Local register allocate. + * Emit and insert spills. + */ +namespace maplebe { +#define JAVALANG (cgFunc->GetMirModule().IsJavaModule()) +#define CLANG (cgFunc->GetMirModule().IsCModule()) + +constexpr uint32 kLoopWeight = 20; +constexpr uint32 kAdjustWeight = 2; +constexpr uint32 kInsnStep = 2; +constexpr uint32 kMaxSplitCount = 3; +constexpr uint32 kRematWeight = 3; +constexpr uint32 kPriorityDefThreashold = 1; +constexpr uint32 kPriorityUseThreashold = 5; +constexpr uint32 kPriorityBBThreashold = 1000; +constexpr float kPriorityRatioThreashold = 0.9; + +#define GCRA_DUMP CG_DEBUG_FUNC(*cgFunc) + +void LiveUnit::PrintLiveUnit() const { + LogInfo::MapleLogger() << "[" << begin << "," << end << "]" + << ""; + if (!hasCall) { + /* Too many calls, so only print when there is no call. */ + LogInfo::MapleLogger() << " nc"; + } + if (needReload) { + LogInfo::MapleLogger() << " rlod"; + } + if (needRestore) { + LogInfo::MapleLogger() << " rstr"; + } +} + +template +void GraphColorRegAllocator::ForEachBBArrElem(const uint64 *vec, Func functor) const { + for (uint32 iBBArrElem = 0; iBBArrElem < bbBuckets; ++iBBArrElem) { + for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { + functor(iBBArrElem * kU64 + bBBArrElem); + } + } + } +} + +template +void GraphColorRegAllocator::ForEachBBArrElemWithInterrupt(const uint64 *vec, Func functor) const { + for (uint32 iBBArrElem = 0; iBBArrElem < bbBuckets; ++iBBArrElem) { + for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { + if (functor(iBBArrElem * kU64 + bBBArrElem)) { + return; + } + } + } + } +} + +template +void GraphColorRegAllocator::ForEachRegArrElem(const uint64 *vec, Func functor) const { + for (uint32 iBBArrElem = 0; iBBArrElem < regBuckets; ++iBBArrElem) { + for (uint32 bBBArrElem = 0; bBBArrElem < kU64; ++bBBArrElem) { + if ((vec[iBBArrElem] & (1ULL << bBBArrElem)) != 0) { + functor(iBBArrElem * kU64 + bBBArrElem); + } + } + } +} + +void GraphColorRegAllocator::PrintLiveUnitMap(const LiveRange &lr) const { + LogInfo::MapleLogger() << "\n\tlu:"; + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + if (!IsBitArrElemSet(lr.GetBBMember(), i)) { + continue; + } + auto lu = lr.GetLuMap().find(i); + if (lu != lr.GetLuMap().end() && ((lu->second->GetDefNum() > 0) || (lu->second->GetUseNum() > 0))) { + LogInfo::MapleLogger() << "(" << i << " "; + lu->second->PrintLiveUnit(); + LogInfo::MapleLogger() << ")"; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLiveRangeConflicts(const LiveRange &lr) const { + LogInfo::MapleLogger() << "\n\tinterfere(" << lr.GetNumBBConflicts() << "): "; + for (uint32 i = 0; i < regBuckets; ++i) { + uint64 chunk = lr.GetBBConflictElem(i); + for (uint64 bit = 0; bit < kU64; ++bit) { + if ((chunk & (1ULL << bit)) > 0) { + regno_t newNO = i * kU64 + bit; + LogInfo::MapleLogger() << newNO << ","; + } + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLiveBBBit(const LiveRange &lr) const { + LogInfo::MapleLogger() << "live_bb(" << lr.GetNumBBMembers() << "): "; + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + if (IsBitArrElemSet(lr.GetBBMember(), i)) { + LogInfo::MapleLogger() << i << " "; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLiveRange(const LiveRange &lr, const std::string &str) const { + LogInfo::MapleLogger() << str << "\n"; + + LogInfo::MapleLogger() << "R" << lr.GetRegNO(); + if (lr.GetRegType() == kRegTyInt) { + LogInfo::MapleLogger() << "(I)"; + } else if (lr.GetRegType() == kRegTyFloat) { + LogInfo::MapleLogger() << "(F)"; + } else { + LogInfo::MapleLogger() << "(U)"; + } + if (lr.GetSpillSize() == k32BitSize) { + LogInfo::MapleLogger() << "S32"; + } else if (lr.GetSpillSize() == k64BitSize) { + LogInfo::MapleLogger() << "S64"; + } else { + LogInfo::MapleLogger() << "S0(nodef)"; + } + LogInfo::MapleLogger() << "\tnumCall " << lr.GetNumCall(); + if (lr.GetCrossCall()) { + LogInfo::MapleLogger() << "\tcrossCall "; + } + LogInfo::MapleLogger() << "\tpriority " << lr.GetPriority(); + LogInfo::MapleLogger() << "\tforbidden: "; + for (regno_t preg = regInfo->GetInvalidReg(); preg < regInfo->GetAllRegNum(); preg++) { + if (lr.GetForbidden(preg)) { + LogInfo::MapleLogger() << preg << ","; + } + } + LogInfo::MapleLogger() << "\tcalldef: "; + for (regno_t preg = regInfo->GetInvalidReg(); preg < regInfo->GetAllRegNum(); preg++) { + if (lr.GetCallDef(preg)) { + LogInfo::MapleLogger() << preg << ","; + } + } + LogInfo::MapleLogger() << "\tpregveto: "; + for (regno_t preg = regInfo->GetInvalidReg(); preg < regInfo->GetAllRegNum(); preg++) { + if (lr.GetPregveto(preg)) { + LogInfo::MapleLogger() << preg << ","; + } + } + if (lr.IsSpilled()) { + LogInfo::MapleLogger() << " spilled"; + } + if (lr.GetSplitLr()) { + LogInfo::MapleLogger() << " split"; + } + LogInfo::MapleLogger() << "\top: " << kOpcodeInfo.GetName(lr.GetOp()); + LogInfo::MapleLogger() << "\n"; + PrintLiveBBBit(lr); + PrintLiveRangeConflicts(lr); + PrintLiveUnitMap(lr); + if (lr.GetSplitLr()) { + PrintLiveRange(*lr.GetSplitLr(), "===>Split LR"); + } +} + +void GraphColorRegAllocator::PrintLiveRanges() const { + LogInfo::MapleLogger() << "PrintLiveRanges: size = " << lrMap.size() << "\n"; + for (auto it : lrMap) { + PrintLiveRange(*it.second, ""); + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::PrintLocalRAInfo(const std::string &str) const { + LogInfo::MapleLogger() << str << "\n"; + for (uint32 id = 0; id < cgFunc->NumBBs(); ++id) { + LocalRaInfo *lraInfo = localRegVec[id]; + if (lraInfo == nullptr) { + continue; + } + LogInfo::MapleLogger() << "bb " << id << " def "; + for (const auto &defCntPair : lraInfo->GetDefCnt()) { + LogInfo::MapleLogger() << "[" << defCntPair.first << ":" << defCntPair.second << "],"; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "use "; + for (const auto &useCntPair : lraInfo->GetUseCnt()) { + LogInfo::MapleLogger() << "[" << useCntPair.first << ":" << useCntPair.second << "],"; + } + LogInfo::MapleLogger() << "\n"; + } +} + +void GraphColorRegAllocator::PrintBBAssignInfo() const { + for (size_t id = 0; id < bfs->sortedBBs.size(); ++id) { + uint32 bbID = bfs->sortedBBs[id]->GetId(); + BBAssignInfo *bbInfo = bbRegInfo[bbID]; + if (bbInfo == nullptr) { + continue; + } + LogInfo::MapleLogger() << "BBinfo(" << id << ")"; + LogInfo::MapleLogger() << " lra-needed " << bbInfo->GetLocalRegsNeeded(); + LogInfo::MapleLogger() << " greg-used "; + for (regno_t regNO = regInfo->GetInvalidReg(); regNO < regInfo->GetAllRegNum(); ++regNO) { + if (bbInfo->GetGlobalsAssigned(regNO)) { + LogInfo::MapleLogger() << regNO << ","; + } + } + LogInfo::MapleLogger() << "\n"; + } +} + +void GraphColorRegAllocator::CalculatePriority(LiveRange &lr) const { +#ifdef RANDOM_PRIORITY + unsigned long seed = 0; + size_t size = sizeof(seed); + std::ifstream randomNum("/dev/random", std::ios::in | std::ios::binary); + if (randomNum) { + randomNum.read(reinterpret_cast(&seed), size); + if (randomNum) { + lr.SetPriority(1 / (seed + 1)); + } + randomNum.close(); + } else { + std::cerr << "Failed to open /dev/urandom" << '\n'; + } + return; +#endif /* RANDOM_PRIORITY */ + float pri = 0.0; + uint32 bbNum = 0; + uint32 numDefs = 0; + uint32 numUses = 0; + CG *cg = cgFunc->GetCG(); + bool isSpSave = false; + + if (cgFunc->GetCG()->IsLmbc()) { + lr.SetRematLevel(kRematOff); + regno_t spSaveReg = cgFunc->GetSpSaveReg(); + if (spSaveReg && lr.GetRegNO() == spSaveReg) { + /* For lmbc, %fp and %sp are frame pointer and stack pointer respectively, unlike + * non-lmbc where %fp and %sp can be of the same. + * With alloca() potentially changing %sp, lmbc creates another register to act + * as %sp before alloca(). This register cannot be spilled as it is used to + * generate spill/fill instructions. + */ + isSpSave = true; + } + } else { + if (cg->GetRematLevel() >= kRematConst && lr.IsRematerializable(*cgFunc, kRematConst)) { + lr.SetRematLevel(kRematConst); + } else if (cg->GetRematLevel() >= kRematAddr && lr.IsRematerializable(*cgFunc, kRematAddr)) { + lr.SetRematLevel(kRematAddr); + } else if (cg->GetRematLevel() >= kRematDreadLocal && + lr.IsRematerializable(*cgFunc, kRematDreadLocal)) { + lr.SetRematLevel(kRematDreadLocal); + } else if (cg->GetRematLevel() >= kRematDreadGlobal && + lr.IsRematerializable(*cgFunc, kRematDreadGlobal)) { + lr.SetRematLevel(kRematDreadGlobal); + } + } + + auto calculatePriorityFunc = [&lr, &bbNum, &numDefs, &numUses, &pri, this] (uint32 bbID) { + auto lu = lr.FindInLuMap(bbID); + ASSERT(lu != lr.EndOfLuMap(), "can not find live unit"); + BB *bb = bbVec[bbID]; + if (bb->GetFirstInsn() != nullptr && !bb->IsSoloGoto()) { + ++bbNum; + numDefs += lu->second->GetDefNum(); + numUses += lu->second->GetUseNum(); + uint32 useCnt = lu->second->GetDefNum() + lu->second->GetUseNum(); + uint32 mult; +#ifdef USE_BB_FREQUENCY + mult = bb->GetFrequency(); +#else /* USE_BB_FREQUENCY */ + if (bb->GetLoop() != nullptr) { + uint32 loopFactor; + if (lr.GetNumCall() > 0 && lr.GetRematLevel() == kRematOff) { + loopFactor = bb->GetLoop()->GetLoopLevel() * kAdjustWeight; + } else { + loopFactor = bb->GetLoop()->GetLoopLevel() / kAdjustWeight; + } + mult = static_cast(pow(kLoopWeight, loopFactor)); + } else { + mult = 1; + } +#endif /* USE_BB_FREQUENCY */ + pri += useCnt * mult; + } + }; + ForEachBBArrElem(lr.GetBBMember(), calculatePriorityFunc); + + if (lr.GetRematLevel() == kRematAddr || lr.GetRematLevel() == kRematConst) { + if (numDefs <= 1 && numUses <= 1) { + pri = -0xFFFF; + } else { + pri /= kRematWeight; + } + } else if (lr.GetRematLevel() == kRematDreadLocal) { + pri /= 4; + } else if (lr.GetRematLevel() == kRematDreadGlobal) { + pri /= 2; + } + + lr.SetPriority(pri); + lr.SetNumDefs(numDefs); + lr.SetNumUses(numUses); + if (isSpSave) { + lr.SetPriority(MAXFLOAT); + lr.SetIsSpSave(); + return; + } + if (lr.GetPriority() > 0 && numDefs <= kPriorityDefThreashold && numUses <= kPriorityUseThreashold && + cgFunc->NumBBs() > kPriorityBBThreashold && + (static_cast(lr.GetNumBBMembers()) / cgFunc->NumBBs()) > kPriorityRatioThreashold) { + /* for large functions, delay allocating long LR with few defs and uses */ + lr.SetPriority(0.0); + } +} + +void GraphColorRegAllocator::PrintBBs() const { + for (auto *bb : bfs->sortedBBs) { + LogInfo::MapleLogger() << "\n< === > "; + LogInfo::MapleLogger() << bb->GetId(); + LogInfo::MapleLogger() << " succs:"; + for (auto *succBB : bb->GetSuccs()) { + LogInfo::MapleLogger() << " " << succBB->GetId(); + } + LogInfo::MapleLogger() << " eh_succs:"; + for (auto *succBB : bb->GetEhSuccs()) { + LogInfo::MapleLogger() << " " << succBB->GetId(); + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::InitFreeRegPool() { + /* + * ==== int regs ==== + * FP 29, LR 30, SP 31, 0 to 7 parameters + + * MapleCG defines 32 as ZR (zero register) + * use 8 if callee does not return large struct ? No + * 16 and 17 are intra-procedure call temp, can be caller saved + * 18 is platform reg, still use it + */ + uint32 intNum = 0; + uint32 fpNum = 0; + for (regno_t regNO : regInfo->GetAllRegs()) { + if (!regInfo->IsAvailableReg(regNO)) { + continue; + } + + /* + * Because of the try-catch scenario in JAVALANG, + * we should use specialized spill register to prevent register changes when exceptions occur. + */ + if (JAVALANG && regInfo->IsSpillRegInRA(regNO, needExtraSpillReg)) { + if (regInfo->IsGPRegister(regNO)) { + /* Preset int spill registers */ + (void)intSpillRegSet.insert(regNO); + } else { + /* Preset float spill registers */ + (void)fpSpillRegSet.insert(regNO); + } + continue; + } + +#ifdef RESERVED_REGS + if (regInfo->IsReservedReg(regNO, doMultiPass)) { + continue; + } +#endif /* RESERVED_REGS */ + + if (regInfo->IsGPRegister(regNO)) { + /* when yieldpoint is enabled, x19 is reserved. */ + if (regInfo->IsYieldPointReg(regNO)) { + continue; + } + if (regInfo->IsCalleeSavedReg(regNO)) { + (void)intCalleeRegSet.insert(regNO); + } else { + (void)intCallerRegSet.insert(regNO); + } + ++intNum; + } else { + if (regInfo->IsCalleeSavedReg(regNO)) { + (void)fpCalleeRegSet.insert(regNO); + } else { + (void)fpCallerRegSet.insert(regNO); + } + ++fpNum; + } + } + intRegNum = intNum; + fpRegNum = fpNum; +} + +/* + * Based on live analysis, the live-in and live-out set determines + * the bit to be set in the LR vector, which is of size #BBs. + * If a vreg is in the live-in and live-out set, it is live in the BB. + * + * Also keep track if a LR crosses a call. If a LR crosses a call, it + * interferes with all caller saved registers. Add all caller registers + * to the LR's forbidden list. + * + * Return created LiveRange object + * + * maybe need extra info: + * Add info for setjmp. + * Add info for defBB, useBB, index in BB for def and use + * Add info for startingBB and endingBB + */ +LiveRange *GraphColorRegAllocator::NewLiveRange() { + LiveRange *lr = memPool->New(regInfo->GetAllRegNum(), alloc); + + if (bbBuckets == 0) { + bbBuckets = (cgFunc->NumBBs() / kU64) + 1; + } + lr->SetBBBuckets(bbBuckets); + lr->InitBBMember(*memPool, bbBuckets); + if (regBuckets == 0) { + regBuckets = (cgFunc->GetMaxRegNum() / kU64) + 1; + } + lr->SetRegBuckets(regBuckets); + lr->InitBBConflict(*memPool, regBuckets); + lr->InitPregveto(); + lr->InitForbidden(); + lr->SetRematerializer(cgFunc->GetCG()->CreateRematerializer(*memPool)); + return lr; +} + +/* Create local info for LR. return true if reg is not local. */ +bool GraphColorRegAllocator::CreateLiveRangeHandleLocal(regno_t regNO, const BB &bb, bool isDef) { + if (FindIn(bb.GetLiveInRegNO(), regNO) || FindIn(bb.GetLiveOutRegNO(), regNO)) { + return true; + } + /* + * register not in globals for the bb, so it is local. + * Compute local RA info. + */ + LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; + if (lraInfo == nullptr) { + lraInfo = memPool->New(alloc); + localRegVec[bb.GetId()] = lraInfo; + } + if (isDef) { + /* movk is handled by different id for use/def in the same insn. */ + lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); + } else { + lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); + } + /* lr info is useful for lra, so continue lr info */ + return false; +} + +LiveRange *GraphColorRegAllocator::CreateLiveRangeAllocateAndUpdate(regno_t regNO, const BB &bb, bool isDef, + uint32 currId) { + LiveRange *lr = GetLiveRange(regNO); + if (lr == nullptr) { + lr = NewLiveRange(); + lr->SetID(currId); + + LiveUnit *lu = memPool->New(); + lr->SetElemToLuMap(bb.GetId(), *lu); + lu->SetBegin(currId); + lu->SetEnd(currId); + if (isDef) { + /* means no use after def for reg, chances for ebo opt */ + for (const auto &pregNO : pregLive) { + lr->InsertElemToPregveto(pregNO); + } + } + } else { + LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb.GetId()); + if (lu == nullptr) { + lu = memPool->New(); + lr->SetElemToLuMap(bb.GetId(), *lu); + lu->SetBegin(currId); + lu->SetEnd(currId); + } + if (lu->GetBegin() > currId) { + lu->SetBegin(currId); + } + } + + if (CLANG) { + MIRPreg *preg = cgFunc->GetPseudoRegFromVirtualRegNO(regNO, CGOptions::DoCGSSA()); + if (preg) { + switch (preg->GetOp()) { + case OP_constval: + lr->SetRematerializable(preg->rematInfo.mirConst); + break; + case OP_addrof: + case OP_dread: + lr->SetRematerializable(preg->GetOp(), preg->rematInfo.sym, + preg->fieldID, preg->addrUpper); + break; + case OP_undef: + break; + default: + ASSERT(false, "Unexpected op in Preg"); + } + } + } + + return lr; +} + +void GraphColorRegAllocator::CreateLiveRange(regno_t regNO, const BB &bb, bool isDef, uint32 currId, bool updateCount) { + bool isNonLocal = CreateLiveRangeHandleLocal(regNO, bb, isDef); + + if (!isDef) { + --currId; + } + + LiveRange *lr = CreateLiveRangeAllocateAndUpdate(regNO, bb, isDef, currId); + lr->SetRegNO(regNO); + lr->SetIsNonLocal(isNonLocal); + if (isDef) { + (void)vregLive.erase(regNO); +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && updateCount) { + if (lr->GetNumDefs() == 0) { + lr->SetFrequency(lr->GetFrequency() + bb.GetFrequency()); + } + lr->IncNumDefs(); + } +#endif /* OPTIMIZE_FOR_PROLOG */ + } else { + (void)vregLive.insert(regNO); +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && updateCount) { + if (lr->GetNumUses() == 0) { + lr->SetFrequency(lr->GetFrequency() + bb.GetFrequency()); + } + lr->IncNumUses(); + } +#endif /* OPTIMIZE_FOR_PROLOG */ + } + for (const auto &pregNO : pregLive) { + lr->InsertElemToPregveto(pregNO); + } + + /* only handle it in live_in and def point? */ + uint32 bbID = bb.GetId(); + lr->SetMemberBitArrElem(bbID); + + lrMap[regNO] = lr; +} + +bool GraphColorRegAllocator::SetupLiveRangeByOpHandlePhysicalReg(const RegOperand ®Opnd, Insn &insn, + regno_t regNO, bool isDef) { + if (!regOpnd.IsPhysicalRegister()) { + return false; + } + LocalRaInfo *lraInfo = localRegVec[insn.GetBB()->GetId()]; + if (lraInfo == nullptr) { + lraInfo = memPool->New(alloc); + localRegVec[insn.GetBB()->GetId()] = lraInfo; + } + + if (isDef) { + if (FindNotIn(pregLive, regNO)) { + for (const auto &vRegNO : vregLive) { + if (regInfo->IsUnconcernedReg(vRegNO)) { + continue; + } + lrMap[vRegNO]->InsertElemToPregveto(regNO); + } + } + pregLive.erase(regNO); + if (lraInfo != nullptr) { + lraInfo->SetDefCntElem(regNO, lraInfo->GetDefCntElem(regNO) + 1); + } + } else { + (void)pregLive.insert(regNO); + for (const auto &vregNO : vregLive) { + if (regInfo->IsUnconcernedReg(vregNO)) { + continue; + } + LiveRange *lr = lrMap[vregNO]; + lr->InsertElemToPregveto(regNO); + } + + if (lraInfo != nullptr) { + lraInfo->SetUseCntElem(regNO, lraInfo->GetUseCntElem(regNO) + 1); + } + } + return true; +} + +/* + * add pregs to forbidden list of lr. If preg is in + * the live list, then it is forbidden for other vreg on the list. + */ +void GraphColorRegAllocator::SetupLiveRangeByOp(Operand &op, Insn &insn, bool isDef, uint32 &numUses) { + if (!op.IsRegister()) { + return; + } + auto ®Opnd = static_cast(op); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regInfo->IsUnconcernedReg(regOpnd)) { + if (GetLiveRange(regNO) != nullptr) { + ASSERT(false, "Unconcerned reg"); + lrMap.erase(regNO); + } + return; + } + if (SetupLiveRangeByOpHandlePhysicalReg(regOpnd, insn, regNO, isDef)) { + return; + } + + CreateLiveRange(regNO, *insn.GetBB(), isDef, insn.GetId(), true); + + LiveRange *lr = GetLiveRange(regNO); + ASSERT(lr != nullptr, "lr should not be nullptr"); + if (isDef) { + lr->SetSpillSize((regOpnd.GetSize() <= k32BitSize) ? k32BitSize : k64BitSize); + } + if (lr->GetRegType() == kRegTyUndef) { + lr->SetRegType(regOpnd.GetRegisterType()); + } + if (isDef) { + lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->IncDefNum(); + lr->AddRef(insn.GetBB()->GetId(), insn.GetId(), kIsDef); + } else { + lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->IncUseNum(); + lr->AddRef(insn.GetBB()->GetId(), insn.GetId(), kIsUse); + ++numUses; + } +#ifdef MOVE_COALESCE + if (insn.IsIntRegisterMov()) { + RegOperand &opnd1 = static_cast(insn.GetOperand(1)); + if (!regInfo->IsVirtualRegister(opnd1.GetRegisterNumber()) && + !regInfo->IsUnconcernedReg(opnd1)) { + lr->InsertElemToPrefs(opnd1.GetRegisterNumber()); + } + RegOperand &opnd0 = static_cast(insn.GetOperand(0)); + if (!regInfo->IsVirtualRegister(opnd0.GetRegisterNumber())) { + lr->InsertElemToPrefs(opnd0.GetRegisterNumber()); + } + } +#endif /* MOVE_COALESCE */ + if (!insn.IsSpecialIntrinsic() && insn.GetBothDefUseOpnd() != kInsnMaxOpnd) { + lr->SetDefUse(); + } +} + +/* handle live range for bb->live_out */ +void GraphColorRegAllocator::SetupLiveRangeByRegNO(regno_t liveOut, BB &bb, uint32 currPoint) { + if (regInfo->IsUnconcernedReg(liveOut)) { + return; + } + if (regInfo->IsVirtualRegister(liveOut)) { + (void)vregLive.insert(liveOut); + CreateLiveRange(liveOut, bb, false, currPoint, false); + return; + } + + (void)pregLive.insert(liveOut); + for (const auto &vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->InsertElemToPregveto(liveOut); + } + + /* See if phys reg is livein also. Then assume it span the entire bb. */ + if (!FindIn(bb.GetLiveInRegNO(), liveOut)) { + return; + } + LocalRaInfo *lraInfo = localRegVec[bb.GetId()]; + if (lraInfo == nullptr) { + lraInfo = memPool->New(alloc); + localRegVec[bb.GetId()] = lraInfo; + } + /* Make it a large enough so no locals can be allocated. */ + lraInfo->SetUseCntElem(liveOut, kMaxUint16); +} + +void GraphColorRegAllocator::ClassifyOperand(std::unordered_set &pregs, std::unordered_set &vregs, + const Operand &opnd) const { + if (!opnd.IsRegister()) { + return; + } + auto ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + if (regInfo->IsUnconcernedReg(regNO)) { + return; + } + if (regOpnd.IsPhysicalRegister()) { + (void)pregs.insert(regNO); + } else { + (void)vregs.insert(regNO); + } +} + +void GraphColorRegAllocator::SetOpndConflict(const Insn &insn, bool onlyDef) { + uint32 opndNum = insn.GetOperandSize(); + if (opndNum <= 1) { + return; + } + const InsnDesc *md = insn.GetDesc(); + std::unordered_set pregs; + std::unordered_set vregs; + + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (!onlyDef) { + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto &op : listOpnd.GetOperands()) { + ClassifyOperand(pregs, vregs, *op); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + ClassifyOperand(pregs, vregs, *base); + } + if (offset != nullptr) { + ClassifyOperand(pregs, vregs, *offset); + } + } else if (opnd.IsRegister()) { + ClassifyOperand(pregs, vregs, opnd); + } + } else { + if (md->GetOpndDes(i)->IsRegDef()) { + ClassifyOperand(pregs, vregs, opnd); + } + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr && !memOpnd.IsIntactIndexed()) { + ClassifyOperand(pregs, vregs, *base); + } + } + } + } + + if (vregs.empty()) { + return; + } + /* Set BBConflict and Pregveto */ + for (regno_t vregNO : vregs) { + for (regno_t conflictVregNO : vregs) { + if (conflictVregNO != vregNO) { + lrMap[vregNO]->SetConflictBitArrElem(conflictVregNO); + } + } + for (regno_t conflictPregNO : pregs) { + lrMap[vregNO]->InsertElemToPregveto(conflictPregNO); + } + } +} + +void GraphColorRegAllocator::UpdateOpndConflict(const Insn &insn, bool multiDef) { + /* if IsSpecialIntrinsic or IsAtomicStore, set conflicts for all opnds */ + if (insn.IsAtomicStore() || insn.IsSpecialIntrinsic()) { + SetOpndConflict(insn, false); + return; + } + if (multiDef) { + SetOpndConflict(insn, true); + } +} + +void GraphColorRegAllocator::ComputeLiveRangesForEachDefOperand(Insn &insn, bool &multiDef) { + uint32 numDefs = 0; + uint32 numUses = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.IsAsmInsn() && (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd)) { + for (auto &opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveRangeByOp(*opnd, insn, true, numUses); + ++numDefs; + } + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + if (!memOpnd.IsIntactIndexed()) { + SetupLiveRangeByOp(opnd, insn, true, numUses); + ++numDefs; + } + } + if (!md->GetOpndDes(i)->IsRegDef()) { + continue; + } + SetupLiveRangeByOp(opnd, insn, true, numUses); + ++numDefs; + } + ASSERT(numUses == 0, "should only be def opnd"); + if (numDefs > 1) { + multiDef = true; + needExtraSpillReg = true; + } +} + +void GraphColorRegAllocator::ComputeLiveRangesForEachUseOperand(Insn &insn) { + uint32 numUses = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.IsAsmInsn() && i == kAsmInputListOpnd) { + for (auto &opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveRangeByOp(*opnd, insn, false, numUses); + } + continue; + } + if (md->GetOpndDes(i)->IsRegDef() && !md->GetOpndDes(i)->IsRegUse()) { + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto &op : listOpnd.GetOperands()) { + SetupLiveRangeByOp(*op, insn, false, numUses); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + SetupLiveRangeByOp(*base, insn, false, numUses); + } + if (offset != nullptr) { + SetupLiveRangeByOp(*offset, insn, false, numUses); + } + } else { + SetupLiveRangeByOp(opnd, insn, false, numUses); + } + } + if (numUses >= regInfo->GetNormalUseOperandNum()) { + needExtraSpillReg = true; + } +} + +void GraphColorRegAllocator::ComputeLiveRangesUpdateIfInsnIsCall(const Insn &insn) { + if (!insn.IsCall()) { + return; + } + /* def the return value */ + for (uint32 i = 0; i < regInfo->GetIntRetRegsNum(); ++i) { + pregLive.erase(regInfo->GetIntRetReg(i)); + } + for (uint32 i = 0; i < regInfo->GetFloatRegsParmsNum(); ++i) { + pregLive.erase(regInfo->GetFpRetReg(i)); + } + + /* active the parametes */ + Operand &opnd1 = insn.GetOperand(1); + if (opnd1.IsList()) { + auto &srcOpnds = static_cast(opnd1); + for (auto ®Opnd : srcOpnds.GetOperands()) { + ASSERT(!regOpnd->IsVirtualRegister(), "not be a virtual register"); + auto physicalReg = regOpnd->GetRegisterNumber(); + (void)pregLive.insert(physicalReg); + } + } +} + +void GraphColorRegAllocator::ComputeLiveRangesUpdateLiveUnitInsnRange(BB &bb, uint32 currPoint) { + for (auto lin : bb.GetLiveInRegNO()) { + if (!regInfo->IsVirtualRegister(lin)) { + continue; + } + LiveRange *lr = GetLiveRange(lin); + if (lr == nullptr) { + continue; + } + auto lu = lr->FindInLuMap(bb.GetId()); + ASSERT(lu != lr->EndOfLuMap(), "container empty check"); + if (bb.GetFirstInsn()) { + lu->second->SetBegin(bb.GetFirstInsn()->GetId()); + } else { + /* since bb is empty, then use pointer as is */ + lu->second->SetBegin(currPoint); + } + lu->second->SetBegin(lu->second->GetBegin() - 1); + } +} + +bool GraphColorRegAllocator::UpdateInsnCntAndSkipUseless(Insn &insn, uint32 &currPoint) const { + insn.SetId(currPoint); + if (insn.IsImmaterialInsn() || !insn.IsMachineInstruction()) { + --currPoint; + return true; + } + return false; +} + +void GraphColorRegAllocator::UpdateCallInfo(uint32 bbId, uint32 currPoint, const Insn &insn) { + auto *targetOpnd = insn.GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (!regInfo->IsCalleeSavedReg(preg)) { + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->InsertElemToCallDef(preg); + } + } + } + } else { + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->SetCrossCall(); + } + } + } else { + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->SetCrossCall(); + } + } + for (auto vregNO : vregLive) { + LiveRange *lr = lrMap[vregNO]; + lr->IncNumCall(); + lr->AddRef(bbId, currPoint, kIsCall); + + MapleMap::const_iterator lu = lr->FindInLuMap(bbId); + if (lu != lr->EndOfLuMap()) { + lu->second->SetHasCall(true); + } + } +} + +void GraphColorRegAllocator::SetLrMustAssign(const RegOperand *regOpnd) { + regno_t regNO = regOpnd->GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr) { + lr->SetMustAssigned(); + lr->SetIsNonLocal(true); + } +} + +void GraphColorRegAllocator::SetupMustAssignedLiveRanges(const Insn &insn) { + if (!insn.IsSpecialIntrinsic()) { + return; + } + if (insn.IsAsmInsn()) { + for (auto ®Opnd : static_cast(insn.GetOperand(kAsmOutputListOpnd)).GetOperands()) { + SetLrMustAssign(regOpnd); + } + for (auto ®Opnd : static_cast(insn.GetOperand(kAsmInputListOpnd)).GetOperands()) { + SetLrMustAssign(regOpnd); + } + return; + } + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand *opnd = &insn.GetOperand(i); + if (!opnd->IsRegister()) { + continue; + } + auto regOpnd = static_cast(opnd); + SetLrMustAssign(regOpnd); + } +} + +/* + * For each succ bb->GetSuccs(), if bb->liveout - succ->livein is not empty, the vreg(s) is + * dead on this path (but alive on the other path as there is some use of it on the + * other path). This might be useful for optimization of reload placement later for + * splits (lr split into lr1 & lr2 and lr2 will need to reload.) + * Not for now though. + */ +void GraphColorRegAllocator::ComputeLiveRanges() { + bbVec.clear(); + bbVec.resize(cgFunc->NumBBs()); + + auto currPoint = + static_cast(cgFunc->GetTotalNumberOfInstructions() + bfs->sortedBBs.size()); + /* distinguish use/def */ + CHECK_FATAL(currPoint < (INT_MAX >> 2), "integer overflow check"); + currPoint = currPoint << 2; + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + bbVec[bb->GetId()] = bb; + bb->SetLevel(bbIdx - 1); + + pregLive.clear(); + vregLive.clear(); + for (auto liveOut : bb->GetLiveOutRegNO()) { + SetupLiveRangeByRegNO(liveOut, *bb, currPoint); + } + --currPoint; + + if (bb->GetLastInsn() != nullptr && bb->GetLastInsn()->IsMachineInstruction() && bb->GetLastInsn()->IsCall()) { + UpdateCallInfo(bb->GetId(), currPoint, *bb->GetLastInsn()); + } + + FOR_BB_INSNS_REV_SAFE(insn, bb, ninsn) { +#ifdef MOVE_COALESCE + if (insn->IsIntRegisterMov() && + (regInfo->IsVirtualRegister(static_cast( + insn->GetOperand(0)).GetRegisterNumber())) && + (static_cast(insn->GetOperand(0)).GetRegisterNumber() == + static_cast(insn->GetOperand(1)).GetRegisterNumber())) { + bb->RemoveInsn(*insn); + continue; + } +#endif + if (UpdateInsnCntAndSkipUseless(*insn, currPoint)) { + if (ninsn && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(bb->GetId(), currPoint, *ninsn); + } + continue; + } + + bool multiDef = false; + ComputeLiveRangesForEachDefOperand(*insn, multiDef); + ComputeLiveRangesForEachUseOperand(*insn); + + UpdateOpndConflict(*insn, multiDef); + SetupMustAssignedLiveRanges(*insn); + + if (ninsn && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(bb->GetId(), currPoint - kInsnStep, *ninsn); + } + + ComputeLiveRangesUpdateIfInsnIsCall(*insn); + /* distinguish use/def */ + currPoint -= 2; + } + ComputeLiveRangesUpdateLiveUnitInsnRange(*bb, currPoint); + /* move one more step for each BB */ + --currPoint; + } + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "After ComputeLiveRanges\n"; + PrintLiveRanges(); +#ifdef USE_LRA + if (doLRA) { + PrintLocalRAInfo("After ComputeLiveRanges"); + } +#endif /* USE_LRA */ + } +} + +/* Create a common stack space for spilling with need_spill */ +MemOperand *GraphColorRegAllocator::CreateSpillMem(uint32 spillIdx, SpillMemCheck check) { + if (spillIdx >= spillMemOpnds.size()) { + return nullptr; + } + + if (operandSpilled[spillIdx]) { + /* For this insn, spill slot already used, need to find next available slot. */ + uint32 i; + for (i = spillIdx + 1; i < kSpillMemOpndNum; ++i) { + if (!operandSpilled[i]) { + break; + } + } + CHECK_FATAL(i < kSpillMemOpndNum, "no more available spill mem slot"); + spillIdx = i; + } + if (check == kSpillMemPost) { + operandSpilled[spillIdx] = true; + } + + if (spillMemOpnds[spillIdx] == nullptr) { + regno_t reg = cgFunc->NewVReg(kRegTyInt, sizeof(int64)); + spillMemOpnds[spillIdx] = cgFunc->GetOrCreatSpillMem(reg); + } + return spillMemOpnds[spillIdx]; +} + +bool GraphColorRegAllocator::IsLocalReg(regno_t regNO) const { + LiveRange *lr = GetLiveRange(regNO); + if (lr == nullptr) { + LogInfo::MapleLogger() << "unexpected regNO" << regNO; + return true; + } + return IsLocalReg(*lr); +} + +bool GraphColorRegAllocator::IsLocalReg(const LiveRange &lr) const { + return !lr.GetSplitLr() && (lr.GetNumBBMembers() == 1) && !lr.IsNonLocal(); +} + +bool GraphColorRegAllocator::CheckOverlap(uint64 val, uint32 i, LiveRange &lr1, LiveRange &lr2) const { + regno_t lr1RegNO = lr1.GetRegNO(); + regno_t lr2RegNO = lr2.GetRegNO(); + for (uint32 x = 0; x < kU64; ++x) { + if ((val & (1ULL << x)) != 0) { + uint32 lastBitSet = i * kU64 + x; + /* + * begin and end should be in the bb info (LU) + * Need to rethink this if. + * Under some circumstance, lr->begin can occur after lr->end. + */ + auto lu1 = lr1.FindInLuMap(lastBitSet); + auto lu2 = lr2.FindInLuMap(lastBitSet); + if (lu1 != lr1.EndOfLuMap() && lu2 != lr2.EndOfLuMap() && + !((lu1->second->GetBegin() < lu2->second->GetBegin() && lu1->second->GetEnd() < lu2->second->GetBegin()) || + (lu2->second->GetBegin() < lu1->second->GetEnd() && lu2->second->GetEnd() < lu1->second->GetBegin()))) { + lr1.SetConflictBitArrElem(lr2RegNO); + lr2.SetConflictBitArrElem(lr1RegNO); + return true; + } + } + } + return false; +} + +void GraphColorRegAllocator::CheckInterference(LiveRange &lr1, LiveRange &lr2) const { + uint64 bitArr[bbBuckets]; + for (uint32 i = 0; i < bbBuckets; ++i) { + bitArr[i] = lr1.GetBBMember()[i] & lr2.GetBBMember()[i]; + } + + for (uint32 i = 0; i < bbBuckets; ++i) { + uint64 val = bitArr[i]; + if (val == 0) { + continue; + } + if (CheckOverlap(val, i, lr1, lr2)) { + break; + } + } +} + +void GraphColorRegAllocator::BuildInterferenceGraphSeparateIntFp(std::vector &intLrVec, + std::vector &fpLrVec) { + for (auto &it : std::as_const(lrMap)) { + LiveRange *lr = it.second; + if (lr->GetRegNO() == 0) { + continue; + } +#ifdef USE_LRA + if (doLRA && IsLocalReg(*lr)) { + continue; + } +#endif /* USE_LRA */ + if (lr->GetRegType() == kRegTyInt) { + intLrVec.emplace_back(lr); + } else if (lr->GetRegType() == kRegTyFloat) { + fpLrVec.emplace_back(lr); + } else { + ASSERT(false, "Illegal regType in BuildInterferenceGraph"); + LogInfo::MapleLogger() << "error: Illegal regType in BuildInterferenceGraph\n"; + } + } +} + +/* + * Based on intersection of LRs. When two LRs interfere, add to each other's + * interference list. + */ +void GraphColorRegAllocator::BuildInterferenceGraph() { + std::vector intLrVec; + std::vector fpLrVec; + BuildInterferenceGraphSeparateIntFp(intLrVec, fpLrVec); + + /* + * Once number of BB becomes larger for big functions, the checking for interferences + * takes significant long time. Taking advantage of unique bucket is one of strategies + * to avoid unnecessary computation + */ + auto lrSize = intLrVec.size(); + std::vector uniqueBucketIdx(lrSize); + for (uint32 i = 0; i < lrSize; i++) { + uint32 count = 0; + uint32 uniqueIdx; + LiveRange *lr = intLrVec[i]; + for (uint32 j = 0; j < bbBuckets; ++j) { + if (lr->GetBBMember()[j] > 0) { + count++; + uniqueIdx = j; + } + } + if (count == 1) { + uniqueBucketIdx[i] = static_cast(uniqueIdx); + } else { + /* LR spans multiple buckets */ + ASSERT(count >= 1, "A live range can not be empty"); + uniqueBucketIdx[i] = -1; + } + } + + for (auto it1 = intLrVec.begin(); it1 != intLrVec.end(); ++it1) { + LiveRange *lr1 = *it1; + CalculatePriority(*lr1); + int32 lr1UniqueBucketIdx = uniqueBucketIdx[static_cast(std::distance(intLrVec.begin(), it1))]; + for (auto it2 = it1 + 1; it2 != intLrVec.end(); ++it2) { + LiveRange *lr2 = *it2; + if (lr1->GetRegNO() < lr2->GetRegNO()) { + int32 lr2UniqueBucketIdx = uniqueBucketIdx[static_cast(std::distance(intLrVec.begin(), it2))]; + if (lr1UniqueBucketIdx == -1 && lr2UniqueBucketIdx == -1) { + CheckInterference(*lr1, *lr2); + } else if (((lr1UniqueBucketIdx >= 0) && ((lr1->GetBBMember()[lr1UniqueBucketIdx] & + lr2->GetBBMember()[lr1UniqueBucketIdx])) > 0) || ((lr2UniqueBucketIdx >= 0) && + ((lr1->GetBBMember()[lr2UniqueBucketIdx] & lr2->GetBBMember()[lr2UniqueBucketIdx]) > 0))) { + CheckInterference(*lr1, *lr2); + } + } + } + } + + // Might need to do same as to intLrVec + for (auto it1 = fpLrVec.begin(); it1 != fpLrVec.end(); ++it1) { + LiveRange *lr1 = *it1; + CalculatePriority(*lr1); + for (auto it2 = it1 + 1; it2 != fpLrVec.end(); ++it2) { + LiveRange *lr2 = *it2; + if (lr1->GetRegNO() < lr2->GetRegNO()) { + CheckInterference(*lr1, *lr2); + } + } + } + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "After BuildInterferenceGraph\n"; + PrintLiveRanges(); + } +} + +void GraphColorRegAllocator::SetBBInfoGlobalAssigned(uint32 bbID, regno_t regNO) { + ASSERT(bbID < bbRegInfo.size(), "index out of range in GraphColorRegAllocator::SetBBInfoGlobalAssigned"); + BBAssignInfo *bbInfo = bbRegInfo[bbID]; + if (bbInfo == nullptr) { + bbInfo = memPool->New(regInfo->GetAllRegNum(), alloc); + bbRegInfo[bbID] = bbInfo; + bbInfo->InitGlobalAssigned(); + } + bbInfo->InsertElemToGlobalsAssigned(regNO); +} + +bool GraphColorRegAllocator::HaveAvailableColor(const LiveRange &lr, uint32 num) const { + return ((lr.GetRegType() == kRegTyInt && num < intRegNum) || (lr.GetRegType() == kRegTyFloat && num < fpRegNum)); +} + +/* + * If the members on the interference list is less than #colors, then + * it can be trivially assigned a register. Otherwise it is constrained. + * Separate the LR based on if it is contrained or not. + * + * The unconstrained LRs are colored last. + * + * Compute a sorted list of constrained LRs based on priority cost. + */ +void GraphColorRegAllocator::Separate() { + for (auto &it : std::as_const(lrMap)) { + LiveRange *lr = it.second; +#ifdef USE_LRA + if (doLRA && IsLocalReg(*lr)) { + continue; + } +#endif /* USE_LRA */ +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && ((lr->GetNumDefs() <= 1) && (lr->GetNumUses() <= 1) && (lr->GetNumCall() > 0)) && + (lr->GetFrequency() <= (cgFunc->GetFirstBB()->GetFrequency() << 1))) { + if (lr->GetRegType() == kRegTyInt) { + intDelayed.emplace_back(lr); + } else { + fpDelayed.emplace_back(lr); + } + continue; + } +#endif /* OPTIMIZE_FOR_PROLOG */ + if (lr->GetRematLevel() != kRematOff) { + unconstrained.emplace_back(lr); + } else if (HaveAvailableColor(*lr, lr->GetNumBBConflicts() + static_cast(lr->GetPregvetoSize()) + + static_cast(lr->GetForbiddenSize()))) { + if (lr->GetPrefs().size() > 0) { + unconstrainedPref.emplace_back(lr); + } else { + unconstrained.emplace_back(lr); + } + } else if (lr->IsMustAssigned()) { + mustAssigned.emplace_back(lr); + } else { + if ((lr->GetPrefs().size() > 0) && lr->GetNumCall() == 0) { + unconstrainedPref.emplace_back(lr); + } else { + constrained.emplace_back(lr); + } + } + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "Unconstrained : "; + for (auto lr : unconstrainedPref) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + for (auto lr : unconstrained) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "Constrained : "; + for (auto lr : constrained) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "mustAssigned : "; + for (auto lr : mustAssigned) { + LogInfo::MapleLogger() << lr->GetRegNO() << " "; + } + LogInfo::MapleLogger() << "\n"; + } +} + +MapleVector::iterator GraphColorRegAllocator::GetHighPriorityLr(MapleVector &lrSet) const { + auto it = lrSet.begin(); + auto highestIt = it; + LiveRange *startLr = *it; + float maxPrio = startLr->GetPriority(); + ++it; + for (; it != lrSet.end(); ++it) { + LiveRange *lr = *it; + if (lr->GetPriority() > maxPrio) { + maxPrio = lr->GetPriority(); + highestIt = it; + } + } + return highestIt; +} + +void GraphColorRegAllocator::UpdateForbiddenForNeighbors(const LiveRange &lr) const { + auto updateForbidden = [&lr, this] (regno_t regNO) { + LiveRange *newLr = GetLiveRange(regNO); + ASSERT(newLr != nullptr, "newLr should not be nullptr"); + if (!newLr->GetPregveto(lr.GetAssignedRegNO())) { + newLr->InsertElemToForbidden(lr.GetAssignedRegNO()); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateForbidden); +} + +void GraphColorRegAllocator::UpdatePregvetoForNeighbors(const LiveRange &lr) const { + auto updatePregveto = [&lr, this] (regno_t regNO) { + LiveRange *newLr = GetLiveRange(regNO); + ASSERT(newLr != nullptr, "newLr should not be nullptr"); + newLr->InsertElemToPregveto(lr.GetAssignedRegNO()); + newLr->EraseElemFromForbidden(lr.GetAssignedRegNO()); + }; + ForEachRegArrElem(lr.GetBBConflict(), updatePregveto); +} + +/* + * For cases with only one def/use and crosses a call. + * It might be more beneficial to spill vs save/restore in prolog/epilog. + * But if the callee register is already used, then it is ok to reuse it again. + * Or in certain cases, just use the callee. + */ +bool GraphColorRegAllocator::ShouldUseCallee(LiveRange &lr, const MapleSet &calleeUsed, + const MapleVector &delayed) const { + if (FindIn(calleeUsed, lr.GetAssignedRegNO())) { + return true; + } + if (regInfo->IsCalleeSavedReg(lr.GetAssignedRegNO()) && + (calleeUsed.size() % kDivide2) != 0) { + return true; + } + if (delayed.size() > 1 && calleeUsed.empty()) { + /* If there are more than 1 vreg that can benefit from callee, use callee */ + return true; + } + lr.SetAssignedRegNO(0); + return false; +} + +void GraphColorRegAllocator::AddCalleeUsed(regno_t regNO, RegType regType) { + ASSERT(!regInfo->IsVirtualRegister(regNO), "regNO should be physical register"); + bool isCalleeReg = regInfo->IsCalleeSavedReg(regNO); + if (isCalleeReg) { + if (regType == kRegTyInt) { + (void)intCalleeUsed.insert(regNO); + } else { + (void)fpCalleeUsed.insert(regNO); + } + } +} + +regno_t GraphColorRegAllocator::FindColorForLr(const LiveRange &lr) const { + RegType regType = lr.GetRegType(); + const MapleSet *currRegSet = nullptr; + const MapleSet *nextRegSet = nullptr; + if (regType == kRegTyInt) { + if (lr.GetNumCall() != 0) { + currRegSet = &intCalleeRegSet; + nextRegSet = &intCallerRegSet; + } else { + currRegSet = &intCallerRegSet; + nextRegSet = &intCalleeRegSet; + } + } else { + if (lr.GetNumCall() != 0) { + currRegSet = &fpCalleeRegSet; + nextRegSet = &fpCallerRegSet; + } else { + currRegSet = &fpCallerRegSet; + nextRegSet = &fpCalleeRegSet; + } + } + +#ifdef MOVE_COALESCE + if (lr.GetNumCall() == 0 || (lr.GetNumDefs() + lr.GetNumUses() <= 2)) { + for (const auto reg : lr.GetPrefs()) { + if ((FindIn(*currRegSet, reg) || FindIn(*nextRegSet, reg)) && !lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + return reg; + } + } + } +#endif /* MOVE_COALESCE */ + for (const auto reg : *currRegSet) { + if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + return reg; + } + } + /* Failed to allocate in first choice. Try 2nd choice. */ + for (const auto reg : *nextRegSet) { + if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg)) { + return reg; + } + } + ASSERT(false, "Failed to find a register"); + return 0; +} + +regno_t GraphColorRegAllocator::TryToAssignCallerSave(const LiveRange &lr) const { + RegType regType = lr.GetRegType(); + const MapleSet *currRegSet = nullptr; + if (regType == kRegTyInt) { + currRegSet = &intCallerRegSet; + } else { + currRegSet = &fpCallerRegSet; + } + +#ifdef MOVE_COALESCE + if (lr.GetNumCall() == 0 || (lr.GetNumDefs() + lr.GetNumUses() <= 2)) { + for (const auto reg : lr.GetPrefs()) { + if ((FindIn(*currRegSet, reg)) && !lr.GetForbidden(reg) && !lr.GetPregveto(reg) && !lr.GetCallDef(reg)) { + return reg; + } + } + } +#endif /* MOVE_COALESCE */ + for (const auto reg : *currRegSet) { + if (!lr.GetForbidden(reg) && !lr.GetPregveto(reg) && !lr.GetCallDef(reg)) { + return reg; + } + } + return 0; +} + +/* + * If forbidden list has more registers than max of all BB's local reg + * requirement, then LR can be colored. + * Update LR's color if success, return true, else return false. + */ +bool GraphColorRegAllocator::AssignColorToLr(LiveRange &lr, bool isDelayed) { + if (lr.GetAssignedRegNO() > 0) { + /* Already assigned. */ + return true; + } + if (!HaveAvailableColor(lr, lr.GetForbiddenSize() + lr.GetPregvetoSize())) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "assigned fail to R" << lr.GetRegNO() << "\n"; + } + return false; + } + regno_t callerSaveReg = 0; + regno_t reg = FindColorForLr(lr); + if (lr.GetNumCall() != 0 && !lr.GetCrossCall()) { + callerSaveReg = TryToAssignCallerSave(lr); + bool prefCaller = regInfo->IsCalleeSavedReg(reg) && + intCalleeUsed.find(reg) == intCalleeUsed.end() && + fpCalleeUsed.find(reg) == fpCalleeUsed.end(); + if (callerSaveReg != 0 && (prefCaller || !regInfo->IsCalleeSavedReg(reg))) { + reg = callerSaveReg; + lr.SetNumCall(0); + } + } + lr.SetAssignedRegNO(reg); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "assigned " << lr.GetAssignedRegNO() << " to R" << lr.GetRegNO() << "\n"; + } + if (lr.GetAssignedRegNO() == 0) { + return false; + } +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog && isDelayed) { + if ((lr.GetRegType() == kRegTyInt && !ShouldUseCallee(lr, intCalleeUsed, intDelayed)) || + (lr.GetRegType() == kRegTyFloat && !ShouldUseCallee(lr, fpCalleeUsed, fpDelayed))) { + return false; + } + } +#endif /* OPTIMIZE_FOR_PROLOG */ + + AddCalleeUsed(lr.GetAssignedRegNO(), lr.GetRegType()); + + UpdateForbiddenForNeighbors(lr); + ForEachBBArrElem(lr.GetBBMember(), + [&lr, this](uint32 bbID) { SetBBInfoGlobalAssigned(bbID, lr.GetAssignedRegNO()); }); + return true; +} + +void GraphColorRegAllocator::PruneLrForSplit(LiveRange &lr, BB &bb, bool remove, + std::set &candidateInLoop, + std::set &defInLoop) { + if (bb.GetInternalFlag1() != 0) { + /* already visited */ + return; + } + + bb.SetInternalFlag1(true); + MapleMap::const_iterator lu = lr.FindInLuMap(bb.GetId()); + uint32 defNum = 0; + uint32 useNum = 0; + if (lu != lr.EndOfLuMap()) { + defNum = lu->second->GetDefNum(); + useNum = lu->second->GetUseNum(); + } + + if (remove) { + /* In removal mode, has not encountered a ref yet. */ + if (defNum == 0 && useNum == 0) { + if (bb.GetLoop() != nullptr && FindIn(candidateInLoop, bb.GetLoop())) { + /* + * Upward search has found a loop. Regardless of def/use + * The loop members must be included in the new LR. + */ + remove = false; + } else { + /* No ref in this bb. mark as potential remove. */ + bb.SetInternalFlag2(true); + return; + } + } else { + /* found a ref, no more removal of bb and preds. */ + remove = false; + } + } + + if (bb.GetLoop() != nullptr) { + /* With a def in loop, cannot prune that loop */ + if (defNum > 0) { + (void)defInLoop.insert(bb.GetLoop()); + } + /* bb in loop, need to make sure of loop carried dependency */ + (void)candidateInLoop.insert(bb.GetLoop()); + } + for (auto pred : bb.GetPreds()) { + if (FindNotIn(bb.GetLoopPreds(), pred)) { + PruneLrForSplit(lr, *pred, remove, candidateInLoop, defInLoop); + } + } + for (auto pred : bb.GetEhPreds()) { + if (FindNotIn(bb.GetLoopPreds(), pred)) { + PruneLrForSplit(lr, *pred, remove, candidateInLoop, defInLoop); + } + } +} + +void GraphColorRegAllocator::FindBBSharedInSplit(LiveRange &lr, + const std::set &candidateInLoop, + std::set &defInLoop) { + /* A loop might be split into two. Need to see over the entire LR if there is a def in the loop. */ + auto findBBSharedFunc = [&lr, &candidateInLoop, &defInLoop, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + if (bb->GetLoop() != nullptr && FindIn(candidateInLoop, bb->GetLoop())) { + auto lu = lr.FindInLuMap(bb->GetId()); + if (lu != lr.EndOfLuMap() && lu->second->GetDefNum() > 0) { + (void)defInLoop.insert(bb->GetLoop()); + } + } + }; + ForEachBBArrElem(lr.GetBBMember(), findBBSharedFunc); +} + +/* + * Backward traversal of the top part of the split LR. + * Prune the part of the LR that has no downward exposing references. + * Take into account of loops and loop carried dependencies. + * The candidate bb to be removed, if in a loop, store that info. + * If a LR crosses a loop, even if the loop has no def/use, it must + * be included in the new LR. + */ +void GraphColorRegAllocator::ComputeBBForNewSplit(LiveRange &newLr, LiveRange &origLr) { + /* + * The candidate bb to be removed, if in a loop, store that info. + * If a LR crosses a loop, even if the loop has no def/use, it must + * be included in the new LR. + */ + std::set candidateInLoop; + /* If a bb has a def and is in a loop, store that info. */ + std::set defInLoop; + std::set smember; + ForEachBBArrElem(newLr.GetBBMember(), [this, &smember](uint32 bbID) { (void)smember.insert(bbVec[bbID]); }); + for (auto bbIt = smember.crbegin(); bbIt != smember.crend(); ++bbIt) { + BB *bb = *bbIt; + if (bb->GetInternalFlag1() != 0) { + continue; + } + PruneLrForSplit(newLr, *bb, true, candidateInLoop, defInLoop); + } + FindBBSharedInSplit(origLr, candidateInLoop, defInLoop); + auto pruneTopLr = [this, &newLr, &candidateInLoop, &defInLoop] (uint32 bbID) { + BB *bb = bbVec[bbID]; + if (bb->GetInternalFlag2() != 0) { + if (bb->GetLoop() != nullptr && FindIn(candidateInLoop, bb->GetLoop())) { + return; + } + if (bb->GetLoop() != nullptr || FindNotIn(defInLoop, bb->GetLoop())) { + /* defInLoop should be a subset of candidateInLoop. remove. */ + newLr.UnsetMemberBitArrElem(bbID); + } + } + }; + ForEachBBArrElem(newLr.GetBBMember(), pruneTopLr); /* prune the top LR. */ +} + +bool GraphColorRegAllocator::UseIsUncovered(const BB &bb, const BB &startBB, std::vector &visitedBB) { + CHECK_FATAL(bb.GetId() < visitedBB.size(), "index out of range"); + visitedBB[bb.GetId()] = true; + for (auto pred : bb.GetPreds()) { + if (visitedBB[pred->GetId()]) { + continue; + } + if (pred->GetLevel() <= startBB.GetLevel()) { + return true; + } + if (UseIsUncovered(*pred, startBB, visitedBB)) { + return true; + } + } + for (auto pred : bb.GetEhPreds()) { + if (visitedBB[pred->GetId()]) { + continue; + } + if (pred->GetLevel() <= startBB.GetLevel()) { + return true; + } + if (UseIsUncovered(*pred, startBB, visitedBB)) { + return true; + } + } + return false; +} + +void GraphColorRegAllocator::FindUseForSplit(LiveRange &lr, SplitBBInfo &bbInfo, bool &remove, + std::set &candidateInLoop, + std::set &defInLoop) { + BB *bb = bbInfo.GetCandidateBB(); + const BB *startBB = bbInfo.GetStartBB(); + if (bb->GetInternalFlag1() != 0) { + /* already visited */ + return; + } + for (auto pred : bb->GetPreds()) { + if (pred->GetInternalFlag1() == 0) { + return; + } + } + for (auto pred : bb->GetEhPreds()) { + if (pred->GetInternalFlag1() == 0) { + return; + } + } + + bb->SetInternalFlag1(true); + MapleMap::const_iterator lu = lr.FindInLuMap(bb->GetId()); + uint32 defNum = 0; + uint32 useNum = 0; + if (lu != lr.EndOfLuMap()) { + defNum = lu->second->GetDefNum(); + useNum = lu->second->GetUseNum(); + } + + std::vector visitedBB(cgFunc->GetAllBBs().size(), false); + if (remove) { + /* In removal mode, has not encountered a ref yet. */ + if (defNum == 0 && useNum == 0) { + /* No ref in this bb. mark as potential remove. */ + bb->SetInternalFlag2(true); + if (bb->GetLoop() != nullptr) { + /* bb in loop, need to make sure of loop carried dependency */ + (void)candidateInLoop.insert(bb->GetLoop()); + } + } else { + /* found a ref, no more removal of bb and preds. */ + remove = false; + /* A potential point for a upward exposing use. (might be a def). */ + lu->second->SetNeedReload(true); + } + } else if ((defNum > 0 || useNum > 0) && UseIsUncovered(*bb, *startBB, visitedBB)) { + lu->second->SetNeedReload(true); + } + + /* With a def in loop, cannot prune that loop */ + if (bb->GetLoop() != nullptr && defNum > 0) { + (void)defInLoop.insert(bb->GetLoop()); + } + + for (auto succ : bb->GetSuccs()) { + if (FindNotIn(bb->GetLoopSuccs(), succ)) { + bbInfo.SetCandidateBB(*succ); + FindUseForSplit(lr, bbInfo, remove, candidateInLoop, defInLoop); + } + } + for (auto succ : bb->GetEhSuccs()) { + if (FindNotIn(bb->GetLoopSuccs(), succ)) { + bbInfo.SetCandidateBB(*succ); + FindUseForSplit(lr, bbInfo, remove, candidateInLoop, defInLoop); + } + } +} + +void GraphColorRegAllocator::ClearLrBBFlags(const std::set &member) const { + for (auto bb : member) { + bb->SetInternalFlag1(0); + bb->SetInternalFlag2(0); + for (auto pred : bb->GetPreds()) { + pred->SetInternalFlag1(0); + pred->SetInternalFlag2(0); + } + for (auto pred : bb->GetEhPreds()) { + pred->SetInternalFlag1(0); + pred->SetInternalFlag2(0); + } + } +} + +/* + * Downward traversal of the bottom part of the split LR. + * Prune the part of the LR that has no upward exposing references. + * Take into account of loops and loop carried dependencies. + */ +void GraphColorRegAllocator::ComputeBBForOldSplit(LiveRange &newLr, LiveRange &origLr) { + /* The candidate bb to be removed, if in a loop, store that info. */ + std::set candidateInLoop; + /* If a bb has a def and is in a loop, store that info. */ + std::set defInLoop; + SplitBBInfo bbInfo; + bool remove = true; + + std::set smember; + ForEachBBArrElem(origLr.GetBBMember(), [this, &smember](uint32 bbID) { (void)smember.insert(bbVec[bbID]); }); + ClearLrBBFlags(smember); + for (auto bb : smember) { + if (bb->GetInternalFlag1() != 0) { + continue; + } + for (auto pred : bb->GetPreds()) { + pred->SetInternalFlag1(true); + } + for (auto pred : bb->GetEhPreds()) { + pred->SetInternalFlag1(true); + } + bbInfo.SetCandidateBB(*bb); + bbInfo.SetStartBB(*bb); + FindUseForSplit(origLr, bbInfo, remove, candidateInLoop, defInLoop); + } + FindBBSharedInSplit(newLr, candidateInLoop, defInLoop); + auto pruneLrFunc = [&origLr, &defInLoop, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + if (bb->GetInternalFlag2() != 0) { + if (bb->GetLoop() != nullptr && FindNotIn(defInLoop, bb->GetLoop())) { + origLr.UnsetMemberBitArrElem(bbID); + } + } + }; + ForEachBBArrElem(origLr.GetBBMember(), pruneLrFunc); +} + +/* + * There is at least one available color for this BB from the neighbors + * minus the ones reserved for local allocation. + * bbAdded : The new BB to be added into the split LR if color is available. + * conflictRegs : Reprent the LR before adding the bbAdded. These are the + * forbidden regs before adding the new BBs. + * Side effect : Adding the new forbidden regs from bbAdded into + * conflictRegs if the LR can still be colored. + */ +bool GraphColorRegAllocator::LrCanBeColored(const LiveRange &lr, const BB &bbAdded, + std::unordered_set &conflictRegs) { + RegType type = lr.GetRegType(); + + std::unordered_set newConflict; + auto updateConflictFunc = [&bbAdded, &conflictRegs, &newConflict, &lr, this](regno_t regNO) { + /* check the real conflict in current bb */ + LiveRange *conflictLr = lrMap[regNO]; + /* + * If the bb to be added to the new LR has an actual + * conflict with another LR, and if that LR has already + * assigned a color that is not in the conflictRegs, + * then add it as a newConflict. + */ + if (IsBitArrElemSet(conflictLr->GetBBMember(), bbAdded.GetId())) { + regno_t confReg = conflictLr->GetAssignedRegNO(); + if ((confReg > 0) && FindNotIn(conflictRegs, confReg) && !lr.GetPregveto(confReg)) { + (void)newConflict.insert(confReg); + } + } else if (conflictLr->GetSplitLr() != nullptr && + IsBitArrElemSet(conflictLr->GetSplitLr()->GetBBMember(), bbAdded.GetId())) { + /* + * The after split LR is split into pieces, and this ensures + * the after split color is taken into consideration. + */ + regno_t confReg = conflictLr->GetSplitLr()->GetAssignedRegNO(); + if ((confReg > 0) && FindNotIn(conflictRegs, confReg) && !lr.GetPregveto(confReg)) { + (void)newConflict.insert(confReg); + } + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateConflictFunc); + + size_t numRegs = newConflict.size() + lr.GetPregvetoSize() + conflictRegs.size(); + + bool canColor = false; + if (type == kRegTyInt) { + if (numRegs < intRegNum) { + canColor = true; + } + } else if (numRegs < fpRegNum) { + canColor = true; + } + + if (canColor) { + for (auto regNO : newConflict) { + (void)conflictRegs.insert(regNO); + } + } + + /* Update all the registers conflicting when adding thew new bb. */ + return canColor; +} + +/* Support function for LR split. Move one BB from LR1 to LR2. */ +void GraphColorRegAllocator::MoveLrBBInfo(LiveRange &oldLr, LiveRange &newLr, BB &bb) const { + /* initialize backward traversal flag for the bb pruning phase */ + bb.SetInternalFlag1(false); + /* initialize bb removal marker */ + bb.SetInternalFlag2(false); + /* Insert BB into new LR */ + uint32 bbID = bb.GetId(); + newLr.SetMemberBitArrElem(bbID); + + /* Move LU from old LR to new LR */ + MapleMap::const_iterator luIt = oldLr.FindInLuMap(bb.GetId()); + if (luIt != oldLr.EndOfLuMap()) { + newLr.SetElemToLuMap(luIt->first, *(luIt->second)); + oldLr.EraseLuMap(luIt); + } + + /* Remove BB from old LR */ + oldLr.UnsetMemberBitArrElem(bbID); +} + +/* Is the set of loops inside the loop? */ +bool GraphColorRegAllocator::ContainsLoop(const CGFuncLoops &loop, + const std::set &loops) const { + for (const CGFuncLoops *lp : loops) { + while (lp != nullptr) { + if (lp == &loop) { + return true; + } + lp = lp->GetOuterLoop(); + } + } + return false; +} + +void GraphColorRegAllocator::GetAllLrMemberLoops(LiveRange &lr, std::set &loops) { + auto getLrMemberFunc = [&loops, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + CGFuncLoops *loop = bb->GetLoop(); + if (loop != nullptr) { + (void)loops.insert(loop); + } + }; + ForEachBBArrElem(lr.GetBBMember(), getLrMemberFunc); +} + +bool GraphColorRegAllocator::SplitLrShouldSplit(LiveRange &lr) { + if (lr.GetSplitLr() != nullptr || lr.GetNumBBMembers() == 1) { + return false; + } + /* Need to split within the same hierarchy */ + uint32 loopID = 0xFFFFFFFF; /* loopID is initialized the maximum value,and then be assigned in function */ + bool needSplit = true; + auto setNeedSplit = [&needSplit, &loopID, this](uint32 bbID) -> bool { + BB *bb = bbVec[bbID]; + if (loopID == 0xFFFFFFFF) { + if (bb->GetLoop() != nullptr) { + loopID = bb->GetLoop()->GetHeader()->GetId(); + } else { + loopID = 0; + } + } else if ((bb->GetLoop() != nullptr && bb->GetLoop()->GetHeader()->GetId() != loopID) || + (bb->GetLoop() == nullptr && loopID != 0)) { + needSplit = false; + return true; + } + return false; + }; + ForEachBBArrElemWithInterrupt(lr.GetBBMember(), setNeedSplit); + return needSplit; +} + +/* + * When a BB in the LR has no def or use in it, then potentially + * there is no conflict within these BB for the new LR, since + * the new LR will need to spill the defs which terminates the + * new LR unless there is a use later which extends the new LR. + * There is no need to compute conflicting register set unless + * there is a def or use. + * It is assumed that the new LR is extended to the def or use. + * Initially newLr is empty, then add bb if can be colored. + * Return true if there is a split. + */ +bool GraphColorRegAllocator::SplitLrFindCandidateLr(LiveRange &lr, LiveRange &newLr, + std::unordered_set &conflictRegs) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "start split lr for vreg " << lr.GetRegNO() << "\n"; + } + std::set smember; + ForEachBBArrElem(lr.GetBBMember(), [&smember, this](uint32 bbID) { (void)smember.insert(bbVec[bbID]); }); + for (auto bb : smember) { + if (!LrCanBeColored(lr, *bb, conflictRegs)) { + break; + } + MoveLrBBInfo(lr, newLr, *bb); + } + + /* return ture if split is successful */ + return newLr.GetNumBBMembers() != 0; +} + +void GraphColorRegAllocator::SplitLrHandleLoops(LiveRange &lr, LiveRange &newLr, + const std::set &origLoops, + const std::set &newLoops) { + /* + * bb in loops might need a reload due to loop carried dependency. + * Compute this before pruning the LRs. + * if there is no re-definition, then reload is not necessary. + * Part of the new LR region after the last reference is + * no longer in the LR. Remove those bb. + */ + ComputeBBForNewSplit(newLr, lr); + + /* With new LR, recompute conflict. */ + auto recomputeConflict = [&lr, &newLr, this](uint32 bbID) { + auto lrFunc = [&newLr, &bbID, this](regno_t regNO) { + LiveRange *confLrVec = lrMap[regNO]; + if (IsBitArrElemSet(confLrVec->GetBBMember(), bbID) || + (confLrVec->GetSplitLr() != nullptr && IsBitArrElemSet(confLrVec->GetSplitLr()->GetBBMember(), bbID))) { + /* + * New LR getting the interference does not mean the + * old LR can remove the interference. + * Old LR's interference will be handled at the end of split. + */ + newLr.SetConflictBitArrElem(regNO); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), lrFunc); + }; + ForEachBBArrElem(newLr.GetBBMember(), recomputeConflict); + + /* update bb/loop same as for new LR. */ + ComputeBBForOldSplit(newLr, lr); + /* Update the conflict interference for the original LR later. */ + for (auto loop : newLoops) { + if (!ContainsLoop(*loop, origLoops)) { + continue; + } + for (auto bb : loop->GetLoopMembers()) { + if (!IsBitArrElemSet(newLr.GetBBMember(), bb->GetId())) { + continue; + } + LiveUnit *lu = newLr.GetLiveUnitFromLuMap(bb->GetId()); + if (lu->GetUseNum() != 0) { + lu->SetNeedReload(true); + } + } + } +} + +void GraphColorRegAllocator::SplitLrFixNewLrCallsAndRlod(LiveRange &newLr, + const std::set &origLoops) { + /* If a 2nd split loop is before the bb in 1st split bb. */ + newLr.SetNumCall(0); + auto fixCallsAndRlod = [&newLr, &origLoops, this](uint32 bbID) { + BB *bb = bbVec[bbID]; + for (auto loop : origLoops) { + if (loop->GetHeader()->GetLevel() >= bb->GetLevel()) { + continue; + } + LiveUnit *lu = newLr.GetLiveUnitFromLuMap(bbID); + if (lu->GetUseNum() != 0) { + lu->SetNeedReload(true); + } + } + LiveUnit *lu = newLr.GetLiveUnitFromLuMap(bbID); + if (lu->HasCall()) { + newLr.IncNumCall(); + } + }; + ForEachBBArrElem(newLr.GetBBMember(), fixCallsAndRlod); +} + +void GraphColorRegAllocator::SplitLrFixOrigLrCalls(LiveRange &lr) const { + lr.SetNumCall(0); + auto fixOrigCalls = [&lr](uint32 bbID) { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(bbID); + if (lu->HasCall()) { + lr.IncNumCall(); + } + }; + ForEachBBArrElem(lr.GetBBMember(), fixOrigCalls); +} + +void GraphColorRegAllocator::SplitLrUpdateInterference(LiveRange &lr) { + /* + * newLr is now a separate LR from the original lr. + * Update the interference info. + * Also recompute the forbidden info + */ + lr.ClearForbidden(); + auto updateInterfrence = [&lr, this](regno_t regNO) { + LiveRange *confLrVec = lrMap[regNO]; + if (IsBBsetOverlap(lr.GetBBMember(), confLrVec->GetBBMember(), bbBuckets)) { + /* interfere */ + if ((confLrVec->GetAssignedRegNO() > 0) && !lr.GetPregveto(confLrVec->GetAssignedRegNO())) { + lr.InsertElemToForbidden(confLrVec->GetAssignedRegNO()); + } + } else { + /* no interference */ + lr.UnsetConflictBitArrElem(regNO); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateInterfrence); +} + +void GraphColorRegAllocator::SplitLrUpdateRegInfo(const LiveRange &origLr, LiveRange &newLr, + std::unordered_set &conflictRegs) const { + for (regno_t regNO = regInfo->GetInvalidReg(); regNO < regInfo->GetAllRegNum(); ++regNO) { + if (origLr.GetPregveto(regNO)) { + newLr.InsertElemToPregveto(regNO); + } + } + for (auto regNO : conflictRegs) { + if (!newLr.GetPregveto(regNO)) { + newLr.InsertElemToForbidden(regNO); + } + } +} + +void GraphColorRegAllocator::SplitLrErrorCheckAndDebug(const LiveRange &origLr) const { + if (origLr.GetNumBBMembers() == 0) { + ASSERT(origLr.GetNumBBConflicts() == 0, "Error: member and conflict not match"); + } +} + +/* + * Pick a starting BB, then expand to maximize the new LR. + * Return the new LR. + */ +void GraphColorRegAllocator::SplitLr(LiveRange &lr) { + if (!SplitLrShouldSplit(lr)) { + return; + } + LiveRange *newLr = NewLiveRange(); + /* + * For the new LR, whenever a BB with either a def or + * use is added, then add the registers that the neighbor + * is using to the conflict register set indicating that these + * registers cannot be used for the new LR's color. + */ + std::unordered_set conflictRegs; + if (!SplitLrFindCandidateLr(lr, *newLr, conflictRegs)) { + return; + } +#ifdef REUSE_SPILLMEM + /* Copy the original conflict vector for spill reuse optimization */ + lr.SetOldConflict(memPool->NewArray(regBuckets)); + for (uint32 i = 0; i < regBuckets; ++i) { + lr.SetBBConflictElem(static_cast(i), lr.GetBBConflictElem(static_cast(i))); + } +#endif /* REUSE_SPILLMEM */ + + std::set newLoops; + std::set origLoops; + GetAllLrMemberLoops(*newLr, newLoops); + GetAllLrMemberLoops(lr, origLoops); + SplitLrHandleLoops(lr, *newLr, origLoops, newLoops); + SplitLrFixNewLrCallsAndRlod(*newLr, origLoops); + SplitLrFixOrigLrCalls(lr); + + SplitLrUpdateRegInfo(lr, *newLr, conflictRegs); + + CalculatePriority(lr); + /* At this point, newLr should be unconstrained. */ + lr.SetSplitLr(*newLr); + + newLr->SetRegNO(lr.GetRegNO()); + newLr->SetRegType(lr.GetRegType()); + newLr->SetID(lr.GetID()); + newLr->CopyRematerialization(lr); + CalculatePriority(*newLr); + SplitLrUpdateInterference(lr); + newLr->SetAssignedRegNO(FindColorForLr(*newLr)); + + AddCalleeUsed(newLr->GetAssignedRegNO(), newLr->GetRegType()); + + /* For the new LR, update assignment for local RA */ + ForEachBBArrElem(newLr->GetBBMember(), + [&newLr, this](uint32 bbID) { SetBBInfoGlobalAssigned(bbID, newLr->GetAssignedRegNO()); }); + + UpdatePregvetoForNeighbors(*newLr); + + SplitLrErrorCheckAndDebug(lr); +} + +void GraphColorRegAllocator::ColorForOptPrologEpilog() { +#ifdef OPTIMIZE_FOR_PROLOG + if (!doOptProlog) { + return; + } + for (auto lr : intDelayed) { + if (!AssignColorToLr(*lr, true)) { + lr->SetSpilled(true); + } + } + for (auto lr : fpDelayed) { + if (!AssignColorToLr(*lr, true)) { + lr->SetSpilled(true); + } + } +#endif +} + +/* + * From the sorted list of constrained LRs, pick the most profitable LR. + * Split the LR into LRnew1 LRnew2 where LRnew1 has the maximum number of + * BB and is colorable. + * The starting BB for traversal must have a color available. + * + * Assign a color, update neighbor's forbidden list. + * + * Update the conflict graph by change the interference list. + * In the case of both LRnew1 and LRnew2 conflicts with a BB, this BB's + * #neightbors increased. If this BB was unconstrained, must check if + * it is still unconstrained. Move to constrained if necessary. + * + * Color the unconstrained LRs. + */ +void GraphColorRegAllocator::SplitAndColorForEachLr(MapleVector &targetLrVec) { + while (!targetLrVec.empty()) { + auto highestIt = GetHighPriorityLr(targetLrVec); + LiveRange *lr = *highestIt; + /* check those lrs in lr->sconflict which is in unconstrained whether it turns to constrined */ + if (highestIt != targetLrVec.end()) { + targetLrVec.erase(highestIt); + } else { + ASSERT(false, "Error: not in targetLrVec"); + } + if (AssignColorToLr(*lr)) { + continue; + } +#ifdef USE_SPLIT + SplitLr(*lr); +#endif /* USE_SPLIT */ + /* + * When LR is spilled, it potentially has no conflicts as + * each def/use is spilled/reloaded. + */ +#ifdef COLOR_SPLIT + if (!AssignColorToLr(*lr)) { +#endif /* COLOR_SPLIT */ + lr->SetSpilled(true); + hasSpill = true; +#ifdef COLOR_SPLIT + } +#endif /* COLOR_SPLIT */ + } +} + +void GraphColorRegAllocator::SplitAndColor() { + /* handle mustAssigned */ + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting mustAssigned : \n"; + } + SplitAndColorForEachLr(mustAssigned); + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting unconstrainedPref : \n"; + } + /* assign color for unconstained */ + SplitAndColorForEachLr(unconstrainedPref); + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting constrained : \n"; + } + /* handle constrained */ + SplitAndColorForEachLr(constrained); + + if (GCRA_DUMP) { + LogInfo::MapleLogger() << " starting unconstrained : \n"; + } + /* assign color for unconstained */ + SplitAndColorForEachLr(unconstrained); + +#ifdef OPTIMIZE_FOR_PROLOG + if (doOptProlog) { + ColorForOptPrologEpilog(); + } +#endif /* OPTIMIZE_FOR_PROLOG */ +} + +void GraphColorRegAllocator::HandleLocalRegAssignment(regno_t regNO, LocalRegAllocator &localRa, bool isInt) { + /* vreg, get a reg for it if not assigned already. */ + if (!localRa.IsInRegAssigned(regNO) && !localRa.IsInRegSpilled(regNO)) { + /* find an available phys reg */ + bool founded = false; + LiveRange *lr = lrMap[regNO]; + auto ®sSet = regInfo->GetRegsFromType(isInt ? kRegTyInt : kRegTyFloat); + regno_t startReg = *regsSet.begin(); + regno_t endReg = *regsSet.rbegin(); + for (uint32 preg = startReg; preg <= endReg; ++preg) { + if (!localRa.IsPregAvailable(preg)) { + continue; + } + if (lr->GetNumCall() != 0 && !regInfo->IsCalleeSavedReg(preg)) { + continue; + } + if (lr->GetPregveto(preg)) { + continue; + } + regno_t assignedReg = preg; + localRa.ClearPregs(assignedReg); + localRa.SetPregUsed(assignedReg); + localRa.SetRegAssigned(regNO); + localRa.SetRegAssignmentMap(regNO, assignedReg); + lr->SetAssignedRegNO(assignedReg); + founded = true; + break; + } + if (!founded) { + localRa.SetRegSpilled(regNO); + lr->SetSpilled(true); + } + } +} + +void GraphColorRegAllocator::UpdateLocalRegDefUseCount(regno_t regNO, LocalRegAllocator &localRa, + bool isDef) const { + auto usedIt = localRa.GetUseInfo().find(regNO); + if (usedIt != localRa.GetUseInfo().end() && !isDef) { + /* reg use, decrement count */ + ASSERT(usedIt->second > 0, "Incorrect local ra info"); + localRa.SetUseInfoElem(regNO, usedIt->second - 1); + if (regInfo->IsVirtualRegister(regNO) && localRa.IsInRegAssigned(regNO)) { + localRa.IncUseInfoElem(localRa.GetRegAssignmentItem(regNO)); + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\treg " << regNO << " update #use to " << localRa.GetUseInfoElem(regNO) << "\n"; + } + } + + auto defIt = localRa.GetDefInfo().find(regNO); + if (defIt != localRa.GetDefInfo().end() && isDef) { + /* reg def, decrement count */ + ASSERT(defIt->second > 0, "Incorrect local ra info"); + localRa.SetDefInfoElem(regNO, defIt->second - 1); + if (regInfo->IsVirtualRegister(regNO) && localRa.IsInRegAssigned(regNO)) { + localRa.IncDefInfoElem(localRa.GetRegAssignmentItem(regNO)); + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\treg " << regNO << " update #def to " << localRa.GetDefInfoElem(regNO) << "\n"; + } + } +} + +void GraphColorRegAllocator::UpdateLocalRegConflict(regno_t regNO, + const LocalRegAllocator &localRa) { + LiveRange *lr = lrMap[regNO]; + if (lr->GetNumBBConflicts() == 0) { + return; + } + if (!localRa.IsInRegAssigned(regNO)) { + return; + } + regno_t preg = localRa.GetRegAssignmentItem(regNO); + ForEachRegArrElem(lr->GetBBConflict(), + [&preg, this](regno_t regNO) { lrMap[regNO]->InsertElemToPregveto(preg); }); +} + +void GraphColorRegAllocator::HandleLocalRaDebug(regno_t regNO, const LocalRegAllocator &localRa, bool isInt) const { + LogInfo::MapleLogger() << "HandleLocalReg " << regNO << "\n"; + LogInfo::MapleLogger() << "\tregUsed:"; + const auto ®Used = localRa.GetPregUsed(); + + auto ®sSet = regInfo->GetRegsFromType(isInt ? kRegTyInt : kRegTyFloat); + regno_t regStart = *regsSet.begin(); + regno_t regEnd = *regsSet.rbegin(); + + for (uint32 i = regStart; i <= regEnd; ++i) { + if (regUsed[i]) { + LogInfo::MapleLogger() << " " << i; + } + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "\tregs:"; + const auto ®s = localRa.GetPregs(); + for (uint32 regnoInLoop = regStart; regnoInLoop <= regEnd; ++regnoInLoop) { + if (regs[regnoInLoop]) { + LogInfo::MapleLogger() << " " << regnoInLoop; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void GraphColorRegAllocator::HandleLocalReg(Operand &op, LocalRegAllocator &localRa, const BBAssignInfo *bbInfo, + bool isDef, bool isInt) { + if (!op.IsRegister()) { + return; + } + auto ®Opnd = static_cast(op); + regno_t regNO = regOpnd.GetRegisterNumber(); + + if (regInfo->IsUnconcernedReg(regOpnd)) { + return; + } + + /* is this a local register ? */ + if (regInfo->IsVirtualRegister(regNO) && !IsLocalReg(regNO)) { + return; + } + + if (GCRA_DUMP) { + HandleLocalRaDebug(regNO, localRa, isInt); + } + + if (regOpnd.IsPhysicalRegister()) { + /* conflict with preg is record in lr->pregveto and BBAssignInfo->globalsAssigned */ + UpdateLocalRegDefUseCount(regNO, localRa, isDef); + /* See if it is needed by global RA */ + if (localRa.GetUseInfoElem(regNO) == 0 && localRa.GetDefInfoElem(regNO) == 0) { + if (bbInfo && !bbInfo->GetGlobalsAssigned(regNO)) { + /* This phys reg is now available for assignment for a vreg */ + localRa.SetPregs(regNO); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\tlast ref, phys-reg " << regNO << " now available\n"; + } + } + } + } else { + HandleLocalRegAssignment(regNO, localRa, isInt); + UpdateLocalRegDefUseCount(regNO, localRa, isDef); + UpdateLocalRegConflict(regNO, localRa); + if (localRa.GetUseInfoElem(regNO) == 0 && localRa.GetDefInfoElem(regNO) == 0 && + localRa.IsInRegAssigned(regNO)) { + /* last ref of vreg, release assignment */ + localRa.SetPregs(localRa.GetRegAssignmentItem(regNO)); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\t\tlast ref, release reg " << + localRa.GetRegAssignmentItem(regNO) << " for " << regNO << "\n"; + } + } + } +} + +void GraphColorRegAllocator::LocalRaRegSetEraseReg(LocalRegAllocator &localRa, regno_t regNO) const { + CHECK_FATAL(regInfo->IsAvailableReg(regNO), "regNO should be available"); + if (localRa.IsPregAvailable(regNO)) { + localRa.ClearPregs(regNO); + } +} + +bool GraphColorRegAllocator::LocalRaInitRegSet(LocalRegAllocator &localRa, uint32 bbId) { + bool needLocalRa = false; + localRa.InitPregs(cgFunc->GetCG()->GenYieldPoint(), intSpillRegSet, fpSpillRegSet); + + localRa.ClearUseInfo(); + localRa.ClearDefInfo(); + LocalRaInfo *lraInfo = localRegVec[bbId]; + ASSERT(lraInfo != nullptr, "lraInfo not be nullptr"); + for (const auto &useCntPair : lraInfo->GetUseCnt()) { + regno_t regNO = useCntPair.first; + if (regInfo->IsVirtualRegister(regNO)) { + needLocalRa = true; + } + localRa.SetUseInfoElem(useCntPair.first, useCntPair.second); + } + for (const auto &defCntPair : lraInfo->GetDefCnt()) { + regno_t regNO = defCntPair.first; + if (regInfo->IsVirtualRegister(regNO)) { + needLocalRa = true; + } + localRa.SetDefInfoElem(defCntPair.first, defCntPair.second); + } + return needLocalRa; +} + +void GraphColorRegAllocator::LocalRaInitAllocatableRegs(LocalRegAllocator &localRa, uint32 bbId) { + BBAssignInfo *bbInfo = bbRegInfo[bbId]; + if (bbInfo != nullptr) { + for (regno_t regNO = regInfo->GetInvalidReg(); regNO < regInfo->GetAllRegNum(); ++regNO) { + if (bbInfo->GetGlobalsAssigned(regNO)) { + LocalRaRegSetEraseReg(localRa, regNO); + } + } + } +} + +void GraphColorRegAllocator::LocalRaForEachDefOperand(const Insn &insn, LocalRegAllocator &localRa, + const BBAssignInfo *bbInfo) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + /* handle def opnd */ + if (!md->GetOpndDes(i)->IsRegDef()) { + continue; + } + auto ®Opnd = static_cast(opnd); + bool isInt = (regOpnd.GetRegisterType() == kRegTyInt); + HandleLocalReg(opnd, localRa, bbInfo, true, isInt); + } +} + +void GraphColorRegAllocator::LocalRaForEachUseOperand(const Insn &insn, LocalRegAllocator &localRa, + const BBAssignInfo *bbInfo) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + continue; + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + HandleLocalReg(*base, localRa, bbInfo, false, true); + } + if (!memOpnd.IsIntactIndexed()) { + HandleLocalReg(*base, localRa, bbInfo, true, true); + } + if (offset != nullptr) { + HandleLocalReg(*offset, localRa, bbInfo, false, true); + } + } else if (md->GetOpndDes(i)->IsRegUse()) { + auto ®Opnd = static_cast(opnd); + bool isInt = (regOpnd.GetRegisterType() == kRegTyInt); + HandleLocalReg(opnd, localRa, bbInfo, false, isInt); + } + } +} + +void GraphColorRegAllocator::LocalRaPrepareBB(BB &bb, LocalRegAllocator &localRa) { + BBAssignInfo *bbInfo = bbRegInfo[bb.GetId()]; + FOR_BB_INSNS(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + + /* + * Use reverse operand order, assuming use first then def for allocation. + * need to free the use resource so it can be reused for def. + */ + LocalRaForEachUseOperand(*insn, localRa, bbInfo); + LocalRaForEachDefOperand(*insn, localRa, bbInfo); + } +} + +void GraphColorRegAllocator::LocalRaFinalAssignment(const LocalRegAllocator &localRa, + BBAssignInfo &bbInfo) { + for (const auto ®AssignmentMapPair : localRa.GetRegAssignmentMap()) { + regno_t regNO = regAssignmentMapPair.second; + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "[" << regAssignmentMapPair.first << "," << regNO << "],"; + } + /* Might need to get rid of this copy. */ + bbInfo.SetRegMapElem(regAssignmentMapPair.first, regNO); + AddCalleeUsed(regNO, regInfo->IsGPRegister(regNO) ? kRegTyInt : kRegTyFloat); + } +} + +void GraphColorRegAllocator::LocalRaDebug(const BB &bb, const LocalRegAllocator &localRa) const { + LogInfo::MapleLogger() << "bb " << bb.GetId() << " local ra need " << + localRa.GetNumPregUsed() << " regs\n"; + LogInfo::MapleLogger() << "\tpotential assignments:"; + for (auto it : localRa.GetRegAssignmentMap()) { + LogInfo::MapleLogger() << "[" << it.first << "," << it.second << "],"; + } + LogInfo::MapleLogger() << "\n"; +} + +/* + * When do_allocate is false, it is prepass: + * Traverse each BB, keep track of the number of registers required + * for local registers in the BB. Communicate this to global RA. + * + * When do_allocate is true: + * Allocate local registers for each BB based on unused registers + * from global RA. Spill if no register available. + */ +void GraphColorRegAllocator::LocalRegisterAllocator(bool doAllocate) { + if (GCRA_DUMP) { + if (doAllocate) { + LogInfo::MapleLogger() << "LRA allocation start\n"; + PrintBBAssignInfo(); + } else { + LogInfo::MapleLogger() << "LRA preprocessing start\n"; + } + } + LocalRegAllocator *localRa = memPool->New(*cgFunc, alloc); + for (auto *bb : bfs->sortedBBs) { + uint32 bbID = bb->GetId(); + + LocalRaInfo *lraInfo = localRegVec[bb->GetId()]; + if (lraInfo == nullptr) { + /* No locals to allocate */ + continue; + } + + localRa->ClearLocalRaInfo(); + bool needLocalRa = LocalRaInitRegSet(*localRa, bbID); + if (!needLocalRa) { + /* Only physical regs in bb, no local ra needed. */ + continue; + } + + if (doAllocate) { + LocalRaInitAllocatableRegs(*localRa, bbID); + } + + LocalRaPrepareBB(*bb, *localRa); + + BBAssignInfo *bbInfo = bbRegInfo[bb->GetId()]; + if (bbInfo == nullptr) { + bbInfo = memPool->New(regInfo->GetAllRegNum(), alloc); + bbRegInfo[bbID] = bbInfo; + bbInfo->InitGlobalAssigned(); + } + bbInfo->SetLocalRegsNeeded(localRa->GetNumPregUsed()); + + if (doAllocate) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\tbb(" << bb->GetId() << ")final local ra assignments:"; + } + LocalRaFinalAssignment(*localRa, *bbInfo); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\n"; + } + } else if (GCRA_DUMP) { + LocalRaDebug(*bb, *localRa); + } + } +} + +MemOperand *GraphColorRegAllocator::GetConsistentReuseMem(const uint64 *conflict, + const std::set &usedMemOpnd, + uint32 size, RegType regType) { + std::set sconflict; + regno_t regNO; + for (uint32 i = 0; i < regBuckets; ++i) { + for (uint32 b = 0; b < kU64; ++b) { + if ((conflict[i] & (1ULL << b)) != 0) { + continue; + } + regNO = i * kU64 + b; + if (regNO >= numVregs) { + break; + } + if (GetLiveRange(regNO) != nullptr) { + (void)sconflict.insert(lrMap[regNO]); + } + } + } + + for (auto *noConflictLr : sconflict) { + if (noConflictLr == nullptr || noConflictLr->GetRegType() != regType || noConflictLr->GetSpillSize() != size) { + continue; + } + if (usedMemOpnd.find(noConflictLr->GetSpillMem()) == usedMemOpnd.end()) { + return noConflictLr->GetSpillMem(); + } + } + return nullptr; +} + +MemOperand *GraphColorRegAllocator::GetCommonReuseMem(const uint64 *conflict, const std::set &usedMemOpnd, + uint32 size, RegType regType) { + regno_t regNO; + for (uint32 i = 0; i < regBuckets; ++i) { + for (uint32 b = 0; b < kU64; ++b) { + if ((conflict[i] & (1ULL << b)) != 0) { + continue; + } + regNO = i * kU64 + b; + if (regNO >= numVregs) { + break; + } + LiveRange *noConflictLr = GetLiveRange(regNO); + if (noConflictLr == nullptr || noConflictLr->GetRegType() != regType || noConflictLr->GetSpillSize() != size) { + continue; + } + if (usedMemOpnd.find(noConflictLr->GetSpillMem()) == usedMemOpnd.end()) { + return noConflictLr->GetSpillMem(); + } + } + } + return nullptr; +} + +/* See if any of the non-conflict LR is spilled and use its memOpnd. */ +MemOperand *GraphColorRegAllocator::GetReuseMem(uint32 vregNO, uint32 size, RegType regType) { + if (cgFunc->GetMirModule().GetSrcLang() != kSrcLangC) { + return nullptr; + } + if (IsLocalReg(vregNO)) { + return nullptr; + } + + LiveRange *lr = lrMap[vregNO]; + const uint64 *conflict; + if (lr->GetSplitLr() != nullptr) { + /* + * For split LR, the vreg liveness is optimized, but for spill location + * the stack location needs to be maintained for the entire LR. + */ + return nullptr; + } else { + conflict = lr->GetBBConflict(); + } + + std::set usedMemOpnd; + auto updateMemOpnd = [&usedMemOpnd, this](regno_t regNO) { + if (regNO >= numVregs) { + return; + } + LiveRange *lrInner = GetLiveRange(regNO); + if (lrInner && lrInner->GetSpillMem() != nullptr) { + (void)usedMemOpnd.insert(lrInner->GetSpillMem()); + } + }; + ForEachRegArrElem(conflict, updateMemOpnd); + uint32 regSize = (size <= k32BitSize) ? k32BitSize : k64BitSize; + /* + * This is to order the search so memOpnd given out is consistent. + * When vreg#s do not change going through VtableImpl.mpl file + * then this can be simplified. + */ +#ifdef CONSISTENT_MEMOPND + return GetConsistentReuseMem(conflict, usedMemOpnd, regSize, regType); +#else /* CONSISTENT_MEMOPND */ + return GetCommonReuseMem(conflict, usedMemOpnd, regSize, regType); +#endif /* CONSISTENT_MEMOPNDi */ +} + +MemOperand *GraphColorRegAllocator::GetSpillMem(uint32 vregNO, bool isDest, Insn &insn, + regno_t regNO, bool &isOutOfRange) { + MemOperand *memOpnd = cgFunc->GetOrCreatSpillMem(vregNO); + if (cgFunc->GetCG()->IsLmbc() && cgFunc->GetSpSaveReg()) { + LiveRange *lr = lrMap[cgFunc->GetSpSaveReg()]; + RegOperand *baseReg = nullptr; + if (lr == nullptr) { + BB *firstBB = cgFunc->GetFirstBB(); + FOR_BB_INSNS(bbInsn, firstBB) { + if (bbInsn->IsIntRegisterMov() && bbInsn->GetOperand(kInsnSecondOpnd).IsRegister() && + static_cast(bbInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == + regInfo->GetStackPointReg()) { + baseReg = static_cast(&bbInsn->GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(baseReg->IsPhysicalRegister(), "Incorrect dest register for SP move"); + break; + } + } + CHECK_FATAL(baseReg, "Cannot find dest register for SP move"); + } else { + baseReg = &cgFunc->GetOpndBuilder()->CreatePReg(lr->GetAssignedRegNO(), + k64BitSize, kRegTyInt); + } + MemOperand *newmemOpnd = (static_cast(memOpnd)->Clone(*cgFunc->GetMemoryPool())); + newmemOpnd->SetBaseRegister(*baseReg); + return regInfo->AdjustMemOperandIfOffsetOutOfRange(newmemOpnd, vregNO, isDest, insn, regNO, + isOutOfRange); + } + return regInfo->AdjustMemOperandIfOffsetOutOfRange(memOpnd, vregNO, isDest, insn, regNO, + isOutOfRange); +} + +void GraphColorRegAllocator::SpillOperandForSpillPre(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, + uint32 spillIdx, bool needSpill) { + if (!needSpill) { + return; + } + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = lrMap[regNO]; + + MemOperand *spillMem = CreateSpillMem(spillIdx, kSpillMemPre); + ASSERT(spillMem != nullptr, "spillMem nullptr check"); + + uint32 regSize = regOpnd.GetSize(); + PrimType stype; + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyInt) { + stype = (regSize <= k32BitSize) ? PTY_i32 : PTY_i64; + } else { + stype = (regSize <= k32BitSize) ? PTY_f32 : PTY_f64; + } + bool isOutOfRange = false; + spillMem = regInfo->AdjustMemOperandIfOffsetOutOfRange(spillMem, regOpnd.GetRegisterNumber(), + false, insn, regInfo->GetReservedSpillReg(), isOutOfRange); + Insn &stInsn = *regInfo->BuildStrInsn(spillMem->GetSize(), stype, phyOpnd, *spillMem); + std::string comment = " SPILL for spill vreg: " + std::to_string(regNO) + " op:" + + kOpcodeInfo.GetName(lr->GetOp()); + stInsn.SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, stInsn); +} + +void GraphColorRegAllocator::SpillOperandForSpillPost(Insn &insn, const Operand &opnd, RegOperand &phyOpnd, + uint32 spillIdx, bool needSpill) { + if (!needSpill) { + return; + } + + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = lrMap[regNO]; + bool isLastInsn = false; + if (insn.GetBB()->GetKind() == BB::kBBIf && insn.GetBB()->IsLastInsn(&insn)) { + isLastInsn = true; + } + + if (lr->GetRematLevel() != kRematOff) { + std::string comment = " REMATERIALIZE for spill vreg: " + + std::to_string(regNO); + if (isLastInsn) { + for (auto tgtBB : insn.GetBB()->GetSuccs()) { + std::vector rematInsns = lr->Rematerialize(*cgFunc, phyOpnd); + for (auto &&remat : rematInsns) { + remat->SetComment(comment); + tgtBB->InsertInsnBegin(*remat); + } + } + } else { + std::vector rematInsns = lr->Rematerialize(*cgFunc, phyOpnd); + for (auto &&remat : rematInsns) { + remat->SetComment(comment); + insn.GetBB()->InsertInsnAfter(insn, *remat); + } + } + return; + } + + MemOperand *spillMem = CreateSpillMem(spillIdx, kSpillMemPost); + ASSERT(spillMem != nullptr, "spillMem nullptr check"); + + uint32 regSize = regOpnd.GetSize(); + PrimType stype; + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyInt) { + stype = (regSize <= k32BitSize) ? PTY_i32 : PTY_i64; + } else { + stype = (regSize <= k32BitSize) ? PTY_f32 : PTY_f64; + } + + bool isOutOfRange = false; + Insn *nextInsn = insn.GetNextMachineInsn(); + spillMem = regInfo->AdjustMemOperandIfOffsetOutOfRange(spillMem, regOpnd.GetRegisterNumber(), + true, insn, regInfo->GetReservedSpillReg(), isOutOfRange); + std::string comment = " RELOAD for spill vreg: " + std::to_string(regNO) + + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + if (isLastInsn) { + for (auto tgtBB : insn.GetBB()->GetSuccs()) { + Insn *newLd = regInfo->BuildLdrInsn(spillMem->GetSize(), stype, phyOpnd, *spillMem); + newLd->SetComment(comment); + tgtBB->InsertInsnBegin(*newLd); + } + } else { + Insn *ldrInsn = regInfo->BuildLdrInsn(spillMem->GetSize(), stype, phyOpnd, *spillMem); + ldrInsn->SetComment(comment); + if (isOutOfRange) { + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*ldrInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, *ldrInsn); + } + } else { + insn.GetBB()->InsertInsnAfter(insn, *ldrInsn); + } + } +} + +MemOperand *GraphColorRegAllocator::GetSpillOrReuseMem(LiveRange &lr, uint32 regSize, bool &isOutOfRange, Insn &insn, + bool isDef) { + MemOperand *memOpnd = nullptr; + if (lr.GetSpillMem() != nullptr) { + /* the saved memOpnd cannot be out-of-range */ + memOpnd = lr.GetSpillMem(); + } else { +#ifdef REUSE_SPILLMEM + memOpnd = GetReuseMem(lr.GetRegNO(), regSize, lr.GetRegType()); + if (memOpnd != nullptr) { + lr.SetSpillMem(*memOpnd); + lr.SetSpillSize((regSize <= k32BitSize) ? k32BitSize : k64BitSize); + } else { +#endif /* REUSE_SPILLMEM */ + regno_t baseRegNO = 0; + if (!isDef) { + /* src will use its' spill reg as baseRegister when offset out-of-range + * add x16, x29, #max-offset //out-of-range + * ldr x16, [x16, #offset] //reload + * mov xd, x16 + */ + baseRegNO = lr.GetSpillReg(); + if (baseRegNO > *regInfo->GetRegsFromType(kRegTyInt).rbegin()) { + baseRegNO = regInfo->GetReservedSpillReg(); + } + } else { + /* dest will use R16 as baseRegister when offset out-of-range + * mov x16, xs + * add x17, x29, #max-offset //out-of-range + * str x16, [x17, #offset] //spill + */ + baseRegNO = regInfo->GetReservedSpillReg(); + } + ASSERT(baseRegNO != 0, "invalid base register number"); + memOpnd = GetSpillMem(lr.GetRegNO(), isDef, insn, baseRegNO, isOutOfRange); + /* dest's spill reg can only be R15 and R16 () */ + if (isOutOfRange && isDef) { + ASSERT(lr.GetSpillReg() != regInfo->GetReservedSpillReg(), + "can not find valid memopnd's base register"); + } +#ifdef REUSE_SPILLMEM + if (!isOutOfRange) { + lr.SetSpillMem(*memOpnd); + lr.SetSpillSize((regSize <= k32BitSize) ? k32BitSize : k64BitSize); + } + } +#endif /* REUSE_SPILLMEM */ + } + return memOpnd; +} + +/* + * Create spill insn for the operand. + * When need_spill is true, need to spill the spill operand register first + * then use it for the current spill, then reload it again. + */ +Insn *GraphColorRegAllocator::SpillOperand(Insn &insn, const Operand &opnd, bool isDef, + RegOperand &phyOpnd, bool forCall) { + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + uint32 pregNO = phyOpnd.GetRegisterNumber(); + bool isCalleeReg = regInfo->IsCalleeSavedReg(pregNO); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "SpillOperand " << regNO << "\n"; + } + LiveRange *lr = lrMap[regNO]; + bool isForCallerSave = lr->GetSplitLr() == nullptr && (lr->GetNumCall() > 0) && !isCalleeReg; + uint32 regSize = regOpnd.GetSize(); + bool isOutOfRange = false; + PrimType stype; + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyInt) { + stype = (regSize <= k32BitSize) ? PTY_i32 : PTY_i64; + } else { + stype = (regSize <= k32BitSize) ? PTY_f32 : PTY_f64; + } + + if (isDef) { + Insn *spillDefInsn = nullptr; + if (lr->GetRematLevel() == kRematOff) { + lr->SetSpillReg(pregNO); + Insn *nextInsn = insn.GetNextMachineInsn(); + MemOperand *memOpnd = GetSpillOrReuseMem(*lr, regSize, isOutOfRange, insn, forCall ? false : true); + spillDefInsn = regInfo->BuildStrInsn(regSize, stype, phyOpnd, *memOpnd); + spillDefInsn->SetIsSpill(); + std::string comment = " SPILL vreg: " + std::to_string(regNO) + " op:" + + kOpcodeInfo.GetName(lr->GetOp()); + if (isForCallerSave) { + comment += " for caller save in BB " + std::to_string(insn.GetBB()->GetId()); + } + spillDefInsn->SetComment(comment); + if (forCall) { + insn.GetBB()->InsertInsnBefore(insn, *spillDefInsn); + } else if (isOutOfRange) { + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*spillDefInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, *spillDefInsn); + } + } else { + insn.GetBB()->InsertInsnAfter(insn, *spillDefInsn); + } + } + return spillDefInsn; + } + Insn *nextInsn = insn.GetNextMachineInsn(); + lr->SetSpillReg(pregNO); + + std::vector spillUseInsns; + std::string comment; + if (lr->GetRematLevel() != kRematOff) { + spillUseInsns = lr->Rematerialize(*cgFunc, phyOpnd); + comment = " REMATERIALIZE vreg: " + std::to_string(regNO); + } else { + MemOperand *memOpnd = GetSpillOrReuseMem(*lr, regSize, isOutOfRange, insn, + forCall ? true : false); + Insn &spillUseInsn = *regInfo->BuildLdrInsn(regSize, stype, phyOpnd, *memOpnd); + spillUseInsn.SetIsReload(); + spillUseInsns.push_back(&spillUseInsn); + comment = " RELOAD vreg: " + std::to_string(regNO) + " op:" + + kOpcodeInfo.GetName(lr->GetOp()); + } + if (isForCallerSave) { + comment += " for caller save in BB " + std::to_string(insn.GetBB()->GetId()); + } + for (auto &&spillUseInsn : spillUseInsns) { + spillUseInsn->SetComment(comment); + if (forCall) { + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*spillUseInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, *spillUseInsn); + } + } else { + insn.GetBB()->InsertInsnBefore(insn, *spillUseInsn); + } + } + return &insn; +} + +/* Try to find available reg for spill. */ +bool GraphColorRegAllocator::SetAvailableSpillReg(std::unordered_set &cannotUseReg, + LiveRange &lr, MapleBitVector &usedRegMask) { + bool isInt = (lr.GetRegType() == kRegTyInt); + MapleSet &callerRegSet = isInt ? intCallerRegSet : fpCallerRegSet; + MapleSet &calleeRegSet = isInt ? intCalleeRegSet : fpCalleeRegSet; + + for (const auto spillReg : callerRegSet) { + if (cannotUseReg.find(spillReg) == cannotUseReg.end() && (!usedRegMask[spillReg])) { + lr.SetAssignedRegNO(spillReg); + usedRegMask[spillReg] = true; + return true; + } + } + for (const auto spillReg : calleeRegSet) { + if (cannotUseReg.find(spillReg) == cannotUseReg.end() && (!usedRegMask[spillReg])) { + lr.SetAssignedRegNO(spillReg); + usedRegMask[spillReg] = true; + return true; + } + } + return false; +} + +void GraphColorRegAllocator::CollectCannotUseReg(std::unordered_set &cannotUseReg, const LiveRange &lr, + Insn &insn) { + /* Find the bb in the conflict LR that actually conflicts with the current bb. */ + for (regno_t regNO = regInfo->GetInvalidReg(); regNO < regInfo->GetAllRegNum(); ++regNO) { + if (lr.GetPregveto(regNO)) { + (void)cannotUseReg.insert(regNO); + } + } + auto updateCannotUse = [&insn, &cannotUseReg, this](regno_t regNO) { + LiveRange *conflictLr = lrMap[regNO]; + /* + * conflictLr->GetAssignedRegNO() might be zero + * caller save will be inserted so the assigned reg can be released actually + */ + if ((conflictLr->GetAssignedRegNO() > 0) && IsBitArrElemSet(conflictLr->GetBBMember(), insn.GetBB()->GetId())) { + if (!regInfo->IsCalleeSavedReg(conflictLr->GetAssignedRegNO()) && + (conflictLr->GetNumCall() > 0) && !conflictLr->GetProcessed()) { + return; + } + (void)cannotUseReg.insert(conflictLr->GetAssignedRegNO()); + } + }; + ForEachRegArrElem(lr.GetBBConflict(), updateCannotUse); +#ifdef USE_LRA + if (!doLRA) { + return; + } + BBAssignInfo *bbInfo = bbRegInfo[insn.GetBB()->GetId()]; + if (bbInfo != nullptr) { + for (const auto ®MapPair : bbInfo->GetRegMap()) { + (void)cannotUseReg.insert(regMapPair.second); + } + } +#endif /* USE_LRA */ +} + +regno_t GraphColorRegAllocator::PickRegForSpill(MapleBitVector &usedRegMask, RegType regType, + uint32 spillIdx, bool &needSpillLr) { + bool isIntReg = (regType == kRegTyInt); + if (JAVALANG) { + /* Use predetermined spill register */ + MapleSet &spillRegSet = isIntReg ? intSpillRegSet : fpSpillRegSet; + ASSERT(spillIdx < spillRegSet.size(), "spillIdx large than spillRegSet.size()"); + auto spillRegIt = spillRegSet.begin(); + for (; spillIdx > 0; --spillIdx) { + ++spillRegIt; + } + return *spillRegIt; + } + + /* Temporary find a unused reg to spill */ + auto &phyRegSet = regInfo->GetRegsFromType(regType); + for (auto iter = phyRegSet.rbegin(); iter != phyRegSet.rend(); ++iter) { + auto spillReg = *iter; + if (!usedRegMask[spillReg]) { + usedRegMask[spillReg] = true; + needSpillLr = true; + return spillReg; + } + } + + ASSERT(false, "can not find spillReg"); + return 0; +} + +/* return true if need extra spill */ +bool GraphColorRegAllocator::SetRegForSpill(LiveRange &lr, Insn &insn, uint32 spillIdx, + MapleBitVector &usedRegMask, bool isDef) { + std::unordered_set cannotUseReg; + /* SPILL COALESCE */ + if (!isDef && insn.IsIntRegisterMov()) { + auto &ropnd = static_cast(insn.GetOperand(0)); + if (ropnd.IsPhysicalRegister()) { + lr.SetAssignedRegNO(ropnd.GetRegisterNumber()); + return false; + } + } + + CollectCannotUseReg(cannotUseReg, lr, insn); + + if (SetAvailableSpillReg(cannotUseReg, lr, usedRegMask)) { + return false; + } + + bool needSpillLr = false; + if (lr.GetAssignedRegNO() == 0) { + /* + * All regs are assigned and none are free. + * Pick a reg to spill and reuse for this spill. + * Need to make sure the reg picked is not assigned to this insn, + * else there will be conflict. + */ + RegType regType = lr.GetRegType(); + regno_t spillReg = PickRegForSpill(usedRegMask, regType, spillIdx, needSpillLr); + lr.SetAssignedRegNO(spillReg); + } + return needSpillLr; +} + +RegOperand *GraphColorRegAllocator::GetReplaceOpndForLRA(Insn &insn, const Operand &opnd, + uint32 &spillIdx, MapleBitVector &usedRegMask, bool isDef) { + auto ®Opnd = static_cast(opnd); + uint32 vregNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + BBAssignInfo *bbInfo = bbRegInfo[insn.GetBB()->GetId()]; + if (bbInfo == nullptr) { + return nullptr; + } + auto regIt = bbInfo->GetRegMap().find(vregNO); + if (regIt != bbInfo->GetRegMap().end()) { + RegOperand &phyOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regIt->second, + regOpnd.GetSize(), regType); + return &phyOpnd; + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "spill vreg " << vregNO << "\n"; + } + regno_t spillReg; + bool needSpillLr = false; + if (insn.IsBranch() || insn.IsCall()) { + spillReg = regInfo->GetReservedSpillReg(); + } else { + /* + * use the reg that exclude livein/liveout/bbInfo->regMap + * Need to make sure the reg picked is not assigned to this insn, + * else there will be conflict. + */ + spillReg = PickRegForSpill(usedRegMask, regType, spillIdx, needSpillLr); + AddCalleeUsed(spillReg, regType); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\tassigning lra spill reg " << spillReg << "\n"; + } + } + RegOperand &phyOpnd = cgFunc->GetOpndBuilder()->CreatePReg(spillReg, regOpnd.GetSize(), regType); + SpillOperandForSpillPre(insn, regOpnd, phyOpnd, spillIdx, needSpillLr); + Insn *spill = SpillOperand(insn, regOpnd, isDef, phyOpnd); + if (spill != nullptr) { + SpillOperandForSpillPost(*spill, regOpnd, phyOpnd, spillIdx, needSpillLr); + } + ++spillIdx; + return &phyOpnd; +} + +RegOperand *GraphColorRegAllocator::GetReplaceUseDefOpndForLRA(Insn &insn, const Operand &opnd, + uint32 &spillIdx, MapleBitVector &usedRegMask) { + auto ®Opnd = static_cast(opnd); + uint32 vregNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + BBAssignInfo *bbInfo = bbRegInfo[insn.GetBB()->GetId()]; + if (bbInfo == nullptr) { + return nullptr; + } + auto regIt = bbInfo->GetRegMap().find(vregNO); + if (regIt != bbInfo->GetRegMap().end()) { + RegOperand &phyOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regIt->second, + regOpnd.GetSize(), regType); + return &phyOpnd; + } + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "spill vreg " << vregNO << "\n"; + } + regno_t spillReg; + bool needSpillLr = false; + if (insn.IsBranch() || insn.IsCall()) { + spillReg = regInfo->GetReservedSpillReg(); + } else { + /* + * use the reg that exclude livein/liveout/bbInfo->regMap + * Need to make sure the reg picked is not assigned to this insn, + * else there will be conflict. + */ + spillReg = PickRegForSpill(usedRegMask, regType, spillIdx, needSpillLr); + AddCalleeUsed(spillReg, regType); + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "\tassigning lra spill reg " << spillReg << "\n"; + } + } + RegOperand &phyOpnd = cgFunc->GetOpndBuilder()->CreatePReg(spillReg, regOpnd.GetSize(), regType); + SpillOperandForSpillPre(insn, regOpnd, phyOpnd, spillIdx, needSpillLr); + Insn *defSpill = SpillOperand(insn, regOpnd, true, phyOpnd); + if (defSpill != nullptr) { + SpillOperandForSpillPost(*defSpill, regOpnd, phyOpnd, spillIdx, needSpillLr); + } + Insn *useSpill = SpillOperand(insn, regOpnd, false, phyOpnd); + ASSERT(useSpill != nullptr, "null ptr check!"); + SpillOperandForSpillPost(*useSpill, regOpnd, phyOpnd, spillIdx, needSpillLr); + ++spillIdx; + return &phyOpnd; +} + +/* get spill reg and check if need extra spill */ +bool GraphColorRegAllocator::GetSpillReg(Insn &insn, LiveRange &lr, const uint32 &spillIdx, + MapleBitVector &usedRegMask, bool isDef) { + bool needSpillLr = false; + /* + * Find a spill reg for the BB among interfereing LR. + * Without LRA, this info is very inaccurate. It will falsely interfere + * with all locals which the spill might not be interfering. + * For now, every instance of the spill requires a brand new reg assignment. + */ + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "LR-regNO " << lr.GetRegNO() << " spilled, finding a spill reg\n"; + } + if (insn.IsBranch() || insn.IsCall()) { + /* + * When a cond branch reg is spilled, it cannot + * restore the value after the branch since it can be the target from other br. + * Todo it properly, it will require creating a intermediate bb for the reload. + * Use x16, it is taken out from available since it is used as a global in the system. + */ + lr.SetAssignedRegNO(regInfo->GetReservedSpillReg()); + } else { + lr.SetAssignedRegNO(0); + needSpillLr = SetRegForSpill(lr, insn, spillIdx, usedRegMask, isDef); + AddCalleeUsed(lr.GetAssignedRegNO(), lr.GetRegType()); + } + return needSpillLr; +} + +// find prev use/def after prev call +bool GraphColorRegAllocator::EncountPrevRef(const BB &pred, LiveRange &lr, bool isDef, std::vector& visitedMap) { + if (!visitedMap[pred.GetId()] && lr.FindInLuMap(pred.GetId()) != lr.EndOfLuMap()) { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(pred.GetId()); + if ((lu->GetDefNum() > 0) || (lu->GetUseNum() > 0) || lu->HasCall()) { + MapleMap refs = lr.GetRefs(pred.GetId()); + auto it = refs.rbegin(); + bool findPrevRef = (it->second & kIsCall) == 0; + return findPrevRef; + } + if (lu->HasCall()) { + return false; + } + } + visitedMap[pred.GetId()] = true; + bool found = true; + for (auto predBB: pred.GetPreds()) { + if (!visitedMap[predBB->GetId()]) { + found = EncountPrevRef(*predBB, lr, isDef, visitedMap) && found; + } + } + return found; +} + +bool GraphColorRegAllocator::FoundPrevBeforeCall(Insn &insn, LiveRange &lr, bool isDef) { + bool hasFind = true; + std::vector visitedMap(bbVec.size() + 1, false); + for (auto pred: insn.GetBB()->GetPreds()) { + hasFind = EncountPrevRef(*pred, lr, isDef, visitedMap) && hasFind; + if (!hasFind) { + return false; + } + } + return insn.GetBB()->GetPreds().size() == 0 ? false : true; +} + +// find next def before next call ? and no next use +bool GraphColorRegAllocator::EncountNextRef(const BB &succ, LiveRange &lr, bool isDef, std::vector& visitedMap) { + if (lr.FindInLuMap(succ.GetId()) != lr.EndOfLuMap()) { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(succ.GetId()); + bool findNextDef = false; + if ((lu->GetDefNum() > 0) || lu->HasCall()) { + MapleMap refs = lr.GetRefs(succ.GetId()); + for (auto it = refs.begin(); it != refs.end(); ++it) { + if ((it->second & kIsDef) != 0) { + findNextDef = true; + break; + } + if ((it->second & kIsCall) != 0) { + break; + } + if ((it->second & kIsUse) != 0) { + continue; + } + } + return findNextDef; + } + if (lu->HasCall()) { + return false; + } + } + visitedMap[succ.GetId()] = true; + bool found = true; + for (auto succBB: succ.GetSuccs()) { + if (!visitedMap[succBB->GetId()]) { + found = EncountNextRef(*succBB, lr, isDef, visitedMap) && found; + if (!found) { + return false; + } + } + } + return found; +} + +bool GraphColorRegAllocator::FoundNextBeforeCall(Insn &insn, LiveRange &lr, bool isDef) { + bool haveFind = true; + std::vector visitedMap(bbVec.size() + 1, false); + for (auto succ: insn.GetBB()->GetSuccs()) { + haveFind = EncountNextRef(*succ, lr, isDef, visitedMap) && haveFind; + if (!haveFind) { + return false; + } + } + return insn.GetBB()->GetSuccs().size() > 0; +} + +bool GraphColorRegAllocator::HavePrevRefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(insn.GetBB()->GetId()); + bool findPrevRef = false; + if ((lu->GetDefNum() > 0) || (lu->GetUseNum() > 0) || lu->HasCall()) { + MapleMap refs = lr.GetRefs(insn.GetBB()->GetId()); + for (auto it = refs.rbegin(); it != refs.rend(); ++it) { + if (it->first >= insn.GetId()) { + continue; + } + if ((it->second & kIsCall) != 0) { + contSearch = false; + break; + } + if (((it->second & kIsUse) != 0) || ((it->second & kIsDef) != 0)) { + findPrevRef = true; + contSearch = false; + break; + } + } + } + return findPrevRef; +} + +bool GraphColorRegAllocator::HaveNextDefInCurBB(Insn &insn, LiveRange &lr, bool &contSearch) const { + LiveUnit *lu = lr.GetLiveUnitFromLuMap(insn.GetBB()->GetId()); + bool findNextDef = false; + if ((lu->GetDefNum() > 0) || (lu->GetUseNum() > 0) || lu->HasCall()) { + MapleMap refs = lr.GetRefs(insn.GetBB()->GetId()); + for (auto it = refs.begin(); it != refs.end(); ++it) { + if (it->first <= insn.GetId()) { + continue; + } + if ((it->second & kIsCall) != 0) { + contSearch = false; + break; + } + if ((it->second & kIsDef) != 0) { + findNextDef = true; + contSearch = false; + } + } + } + return findNextDef; +} + +bool GraphColorRegAllocator::NeedCallerSave(Insn &insn, LiveRange &lr, bool isDef) { + if (doLRA) { + return true; + } + if (lr.HasDefUse()) { + return true; + } + + bool contSearch = true; + bool needed = true; + if (isDef) { + needed = !HaveNextDefInCurBB(insn, lr, contSearch); + } else { + needed = !HavePrevRefInCurBB(insn, lr, contSearch); + } + if (!contSearch) { + return needed; + } + + if (isDef) { + needed = true; + } else { + needed = !FoundPrevBeforeCall(insn, lr, isDef); + } + return needed; +} + +RegOperand *GraphColorRegAllocator::GetReplaceOpnd(Insn &insn, const Operand &opnd, uint32 &spillIdx, + MapleBitVector &usedRegMask, bool isDef) { + if (!opnd.IsRegister()) { + return nullptr; + } + auto ®Opnd = static_cast(opnd); + + uint32 vregNO = regOpnd.GetRegisterNumber(); + if (regInfo->IsFramePointReg(vregNO)) { + cgFunc->SetSeenFP(true); + } + RegType regType = regOpnd.GetRegisterType(); + if (!regInfo->IsVirtualRegister(vregNO) || regInfo->IsUnconcernedReg(regOpnd)) { + return nullptr; + } + +#ifdef USE_LRA + if (doLRA && IsLocalReg(vregNO)) { + return GetReplaceOpndForLRA(insn, opnd, spillIdx, usedRegMask, isDef); + } +#endif /* USE_LRA */ + + ASSERT(vregNO < numVregs, "index out of range in GraphColorRegAllocator::GetReplaceOpnd"); + LiveRange *lr = lrMap[vregNO]; + + bool isSplitPart = false; + bool needSpillLr = false; + if (lr->GetSplitLr() && IsBitArrElemSet(lr->GetSplitLr()->GetBBMember(), insn.GetBB()->GetId())) { + isSplitPart = true; + } + + if (lr->IsSpilled() && !isSplitPart) { + needSpillLr = GetSpillReg(insn, *lr, spillIdx, usedRegMask, isDef); + } + + regno_t regNO; + if (isSplitPart) { + regNO = lr->GetSplitLr()->GetAssignedRegNO(); + } else { + regNO = lr->GetAssignedRegNO(); + } + bool isCalleeReg = regInfo->IsCalleeSavedReg(regNO); + RegOperand &phyOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNO, opnd.GetSize(), regType); + if (GCRA_DUMP) { + std::string regStr = (regType == kRegTyInt) ? "R" : "V"; + regStr += std::to_string(regNO - *regInfo->GetRegsFromType(kRegTyInt).begin()); + LogInfo::MapleLogger() << "replace R" << vregNO << " with " << regStr << "\n"; + } + + insn.AppendComment(" [R" + std::to_string(vregNO) + "] "); + + if (isSplitPart && (isCalleeReg || lr->GetSplitLr()->GetNumCall() == 0)) { + if (isDef) { + SpillOperand(insn, opnd, isDef, phyOpnd); + ++spillIdx; + } else { + if (lr->GetSplitLr()->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->NeedReload()) { + SpillOperand(insn, opnd, isDef, phyOpnd); + ++spillIdx; + } + } + return &phyOpnd; + } + + bool needCallerSave = false; + if ((lr->GetNumCall() > 0) && !isCalleeReg) { + if (isDef) { + needCallerSave = NeedCallerSave(insn, *lr, isDef) && lr->GetRematLevel() == kRematOff; + } else { + needCallerSave = !lr->GetProcessed(); + } + } + + if (lr->IsSpilled() || (isSplitPart && (lr->GetSplitLr()->GetNumCall() != 0)) || needCallerSave || + (!isSplitPart && !(lr->IsSpilled()) && lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->NeedReload())) { + SpillOperandForSpillPre(insn, regOpnd, phyOpnd, spillIdx, needSpillLr); + Insn *spill = SpillOperand(insn, opnd, isDef, phyOpnd); + if (spill != nullptr) { + SpillOperandForSpillPost(*spill, regOpnd, phyOpnd, spillIdx, needSpillLr); + } + ++spillIdx; + } + + return &phyOpnd; +} + +RegOperand *GraphColorRegAllocator::GetReplaceUseDefOpnd(Insn &insn, const Operand &opnd, + uint32 &spillIdx, MapleBitVector &usedRegMask) { + if (!opnd.IsRegister()) { + return nullptr; + } + auto ®Opnd = static_cast(opnd); + + uint32 vregNO = regOpnd.GetRegisterNumber(); + if (regInfo->IsFramePointReg(vregNO)) { + cgFunc->SetSeenFP(true); + } + RegType regType = regOpnd.GetRegisterType(); + if (!regInfo->IsVirtualRegister(vregNO) || regInfo->IsUnconcernedReg(regOpnd)) { + return nullptr; + } + +#ifdef USE_LRA + if (doLRA && IsLocalReg(vregNO)) { + return GetReplaceUseDefOpndForLRA(insn, opnd, spillIdx, usedRegMask); + } +#endif /* USE_LRA */ + + ASSERT(vregNO < numVregs, "index out of range in GraphColorRegAllocator::GetReplaceUseDefOpnd"); + LiveRange *lr = lrMap[vregNO]; + + bool isSplitPart = false; + bool needSpillLr = false; + if (lr->GetSplitLr() && IsBitArrElemSet(lr->GetSplitLr()->GetBBMember(), insn.GetBB()->GetId())) { + isSplitPart = true; + } + + if (lr->IsSpilled() && !isSplitPart) { + needSpillLr = GetSpillReg(insn, *lr, spillIdx, usedRegMask, true); + } + + regno_t regNO; + if (isSplitPart) { + regNO = lr->GetSplitLr()->GetAssignedRegNO(); + } else { + regNO = lr->GetAssignedRegNO(); + } + bool isCalleeReg = regInfo->IsCalleeSavedReg(regNO); + RegOperand &phyOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNO, opnd.GetSize(), regType); + if (GCRA_DUMP) { + std::string regStr = (regType == kRegTyInt) ? "R" : "V"; + regStr += std::to_string(regNO - *regInfo->GetRegsFromType(kRegTyInt).begin()); + LogInfo::MapleLogger() << "replace R" << vregNO << " with " << regStr << "\n"; + } + + insn.AppendComment(" [R" + std::to_string(vregNO) + "] "); + + if (isSplitPart && (isCalleeReg || lr->GetSplitLr()->GetNumCall() == 0)) { + (void)SpillOperand(insn, opnd, true, phyOpnd); + if (lr->GetSplitLr()->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->NeedReload()) { + (void)SpillOperand(insn, opnd, false, phyOpnd); + } + ++spillIdx; + return &phyOpnd; + } + + bool needCallerSave = false; + if ((lr->GetNumCall() > 0) && !isCalleeReg) { + needCallerSave = NeedCallerSave(insn, *lr, true) && lr->GetRematLevel() == kRematOff; + } + + if (lr->IsSpilled() || (isSplitPart && (lr->GetSplitLr()->GetNumCall() != 0)) || needCallerSave || + (!isSplitPart && !(lr->IsSpilled()) && + lr->GetLiveUnitFromLuMap(insn.GetBB()->GetId())->NeedReload())) { + SpillOperandForSpillPre(insn, regOpnd, phyOpnd, spillIdx, needSpillLr); + Insn *defSpill = SpillOperand(insn, opnd, true, phyOpnd); + if (defSpill != nullptr) { + SpillOperandForSpillPost(*defSpill, regOpnd, phyOpnd, spillIdx, needSpillLr); + } + Insn *useSpill = SpillOperand(insn, opnd, false, phyOpnd); + ASSERT(useSpill != nullptr, "null ptr check!"); + SpillOperandForSpillPost(*useSpill, regOpnd, phyOpnd, spillIdx, needSpillLr); + ++spillIdx; + } + + return &phyOpnd; +} + +void GraphColorRegAllocator::MarkUsedRegs(Operand &opnd, MapleBitVector &usedRegMask) { + auto ®Opnd = static_cast(opnd); + uint32 vregNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = GetLiveRange(vregNO); + if (lr != nullptr) { + if (lr->IsSpilled()) { + lr->SetAssignedRegNO(0); + } + if (lr->GetAssignedRegNO() != 0) { + usedRegMask[lr->GetAssignedRegNO()] = true; + } + if ((lr->GetSplitLr() != nullptr) && (lr->GetSplitLr()->GetAssignedRegNO() > 0)) { + usedRegMask[lr->GetSplitLr()->GetAssignedRegNO()] = true; + } + } +} + +bool GraphColorRegAllocator::FinalizeRegisterPreprocess(FinalizeRegisterInfo &fInfo, + const Insn &insn, MapleBitVector &usedRegMask) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + bool hasVirtual = false; + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + ASSERT(md->GetOpndDes(i) != nullptr, "pointer is null in GraphColorRegAllocator::FinalizeRegisters"); + + if (opnd.IsList()) { + if (!insn.IsAsmInsn()) { + continue; + } + hasVirtual = true; + if (i == kAsmOutputListOpnd) { + fInfo.SetDefOperand(opnd, i); + } + if (i == kAsmInputListOpnd) { + fInfo.SetUseOperand(opnd, i); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + fInfo.SetBaseOperand(opnd, i); + MarkUsedRegs(*base, usedRegMask); + hasVirtual = static_cast(base)->IsVirtualRegister() || hasVirtual; + } + Operand *offset = memOpnd.GetIndexRegister(); + if (offset != nullptr) { + fInfo.SetOffsetOperand(opnd); + MarkUsedRegs(*offset, usedRegMask); + hasVirtual = static_cast(offset)->IsVirtualRegister() || hasVirtual; + } + } else { + bool isDef = md->GetOpndDes(i)->IsDef(); + bool isUse = md->GetOpndDes(i)->IsUse(); + if (isDef && isUse) { + fInfo.SetUseDefOperand(opnd, i); + } else if (isDef) { + fInfo.SetDefOperand(opnd, i); + } else { + fInfo.SetUseOperand(opnd, i); + } + if (opnd.IsRegister()) { + hasVirtual |= static_cast(opnd).IsVirtualRegister(); + MarkUsedRegs(opnd, usedRegMask); + } + } + } /* operand */ + return hasVirtual; +} + +void GraphColorRegAllocator::GenerateSpillFillRegs(const Insn &insn) { + uint32 opndNum = insn.GetOperandSize(); + std::set defPregs; + std::set usePregs; + std::vector defLrs; + std::vector useLrs; + if (insn.IsIntRegisterMov()) { + RegOperand &opnd1 = static_cast(insn.GetOperand(1)); + RegOperand &opnd0 = static_cast(insn.GetOperand(0)); + if (regInfo->IsGPRegister(opnd1.GetRegisterNumber()) && + !regInfo->IsUnconcernedReg(opnd1) && + !regInfo->IsCalleeSavedReg(opnd1.GetRegisterNumber()) && + regInfo->IsVirtualRegister(opnd0.GetRegisterNumber())) { + LiveRange *lr = lrMap[opnd0.GetRegisterNumber()]; + if (lr->IsSpilled()) { + lr->SetSpillReg(opnd1.GetRegisterNumber()); + ASSERT(lr->GetSpillReg() != 0, "no spill reg in GenerateSpillFillRegs"); + return; + } + } + if (regInfo->IsGPRegister(opnd0.GetRegisterNumber()) && + !regInfo->IsUnconcernedReg(opnd0) && + !regInfo->IsCalleeSavedReg(opnd0.GetRegisterNumber()) && + regInfo->IsVirtualRegister(opnd1.GetRegisterNumber())) { + LiveRange *lr = lrMap[opnd1.GetRegisterNumber()]; + if (lr->IsSpilled()) { + lr->SetSpillReg(opnd0.GetRegisterNumber()); + ASSERT(lr->GetSpillReg() != 0, "no spill reg in GenerateSpillFillRegs"); + return; + } + } + } + const InsnDesc *md = insn.GetDesc(); + bool isIndexedMemOp = false; + for (uint32 opndIdx = 0; opndIdx < opndNum; ++opndIdx) { + Operand *opnd = &insn.GetOperand(opndIdx); + if (opnd == nullptr) { + continue; + } + if (opnd->IsList()) { + // call parameters + } else if (opnd->IsMemoryAccessOperand()) { + auto *memopnd = static_cast(opnd); + if (memopnd->GetIndexOpt() == MemOperand::kPreIndex || + memopnd->GetIndexOpt() == MemOperand::kPostIndex) { + isIndexedMemOp = true; + } + auto *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && !regInfo->IsUnconcernedReg(*base)) { + if (!memopnd->IsIntactIndexed()) { + if (base->IsPhysicalRegister()) { + defPregs.insert(base->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[base->GetRegisterNumber()]; + if (lr->IsSpilled()) { + defLrs.emplace_back(lr); + } + } + } + if (base->IsPhysicalRegister()) { + usePregs.insert(base->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[base->GetRegisterNumber()]; + if (lr->IsSpilled()) { + useLrs.emplace_back(lr); + } + } + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr) { + if (offset->IsPhysicalRegister()) { + usePregs.insert(offset->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[offset->GetRegisterNumber()]; + if (lr->IsSpilled()) { + useLrs.emplace_back(lr); + } + } + } + } else if (opnd->IsRegister()) { + bool isDef = md->GetOpndDes(static_cast(opndIdx))->IsRegDef(); + bool isUse = md->GetOpndDes(static_cast(opndIdx))->IsRegUse(); + RegOperand *ropnd = static_cast(opnd); + if (regInfo->IsUnconcernedReg(*ropnd)) { + continue; + } + if (ropnd != nullptr) { + if (isUse) { + if (ropnd->IsPhysicalRegister()) { + usePregs.insert(ropnd->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[ropnd->GetRegisterNumber()]; + if (lr->IsSpilled()) { + useLrs.emplace_back(lr); + } + } + } + if (isDef) { + if (ropnd->IsPhysicalRegister()) { + defPregs.insert(ropnd->GetRegisterNumber()); + } else { + LiveRange *lr = lrMap[ropnd->GetRegisterNumber()]; + if (lr->IsSpilled()) { + defLrs.emplace_back(lr); + } + } + } + } + } + } + auto comparator = [=](const LiveRange *lr1, const LiveRange *lr2) -> bool { + return lr1->GetID() > lr2->GetID(); + }; + std::sort(useLrs.begin(), useLrs.end(), comparator); + for (auto lr: useLrs) { + lr->SetID(insn.GetId()); + RegType rtype = lr->GetRegType(); + regno_t firstSpillReg = (rtype == kRegTyInt) ? regInfo->GetIntSpillFillReg(0) : + regInfo->GetFpSpillFillReg(0); + if (lr->GetSpillReg() != 0 && lr->GetSpillReg() < firstSpillReg && lr->GetPregveto(lr->GetSpillReg())) { + lr->SetSpillReg(0); + } + if (lr->GetSpillReg() != 0 && lr->GetSpillReg() >= firstSpillReg && + usePregs.find(lr->GetSpillReg()) == usePregs.end()) { + usePregs.insert(lr->GetSpillReg()); + continue; + } else { + lr->SetSpillReg(0); + } + for (uint32 i = 0; i < kSpillMemOpndNum; i++) { + regno_t preg = (rtype == kRegTyInt) ? regInfo->GetIntSpillFillReg(i) : + regInfo->GetFpSpillFillReg(i); + if (usePregs.find(preg) == usePregs.end()) { + lr->SetSpillReg(preg); + usePregs.insert(preg); + break; + } + } + ASSERT(lr->GetSpillReg() != 0, "no reg"); + } + size_t spillRegIdx = 0; + if (isIndexedMemOp) { + spillRegIdx = useLrs.size(); + } + for (auto lr: defLrs) { + lr->SetID(insn.GetId()); + RegType rtype = lr->GetRegType(); + regno_t firstSpillReg = (rtype == kRegTyInt) ? regInfo->GetIntSpillFillReg(0) : + regInfo->GetFpSpillFillReg(0); + if (lr->GetSpillReg() != 0) { + if (lr->GetSpillReg() < firstSpillReg && lr->GetPregveto(lr->GetSpillReg())) { + lr->SetSpillReg(0); + } + if (lr->GetSpillReg() >= firstSpillReg && defPregs.find(lr->GetSpillReg()) != defPregs.end()) { + lr->SetSpillReg(0); + } + } + if (lr->GetSpillReg() != 0) { + continue; + } + for (; spillRegIdx < kSpillMemOpndNum; spillRegIdx++) { + regno_t preg = (rtype == kRegTyInt) ? regInfo->GetIntSpillFillReg(spillRegIdx) : + regInfo->GetFpSpillFillReg(spillRegIdx); + if (defPregs.find(preg) == defPregs.end()) { + lr->SetSpillReg(preg); + defPregs.insert(preg); + break; + } + } + ASSERT(lr->GetSpillReg() != 0, "no reg"); + } +} + +RegOperand *GraphColorRegAllocator::CreateSpillFillCode(const RegOperand &opnd, Insn &insn, + uint32 spillCnt, bool isdef) { + regno_t vregno = opnd.GetRegisterNumber(); + LiveRange *lr = GetLiveRange(vregno); + if (lr != nullptr && lr->IsSpilled()) { + uint32 bits = opnd.GetSize(); + if (bits < k32BitSize) { + bits = k32BitSize; + } + if (cgFunc->IsExtendReg(vregno)) { + bits = k64BitSize; + } + regno_t spreg = 0; + RegType rtype = lr->GetRegType(); + spreg = lr->GetSpillReg(); + ASSERT(lr->GetSpillReg() != 0, "no reg in CreateSpillFillCode"); + RegOperand *regopnd = &cgFunc->GetOpndBuilder()->CreatePReg(spreg, opnd.GetSize(), rtype); + + if (lr->GetRematLevel() != kRematOff) { + if (isdef) { + return nullptr; + } else { + std::vector rematInsns = lr->Rematerialize(*cgFunc, *regopnd); + for (auto &&remat : rematInsns) { + std::string comment = " REMATERIALIZE color vreg: " + std::to_string(vregno); + remat->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *remat); + } + return regopnd; + } + } + + bool isOutOfRange = false; + Insn *nextInsn = insn.GetNextMachineInsn(); + MemOperand *loadmem = GetSpillOrReuseMem(*lr, opnd.GetSize(), isOutOfRange, + insn, isdef); + PrimType pty = + (lr->GetRegType() == kRegTyInt) ? ((bits > k32BitSize) ? PTY_i64 : PTY_i32) + : ((bits > k32BitSize) ? PTY_f64 : PTY_f32); + CHECK_FATAL(spillCnt < kSpillMemOpndNum, "spill count exceeded"); + Insn *memInsn; + if (isdef) { + memInsn = regInfo->BuildStrInsn(bits, pty, *regopnd, *loadmem); + memInsn->SetIsSpill(); + std::string comment = " SPILLcolor vreg: " + std::to_string(vregno) + + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + memInsn->SetComment(comment); + if (nextInsn == nullptr) { + insn.GetBB()->AppendInsn(*memInsn); + } else { + insn.GetBB()->InsertInsnBefore(*nextInsn, *memInsn); + } + } else { + memInsn = regInfo->BuildLdrInsn(bits, pty, *regopnd, *loadmem); + memInsn->SetIsReload(); + std::string comment = " RELOADcolor vreg: " + std::to_string(vregno) + + " op:" + kOpcodeInfo.GetName(lr->GetOp()); + memInsn->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *memInsn); + } + return regopnd; + } + return nullptr; +} + +bool GraphColorRegAllocator::SpillLiveRangeForSpills() { + bool done = false; + for (uint32_t bbIdx = 0; bbIdx < bfs->sortedBBs.size(); bbIdx++) { + BB *bb = bfs->sortedBBs[bbIdx]; + FOR_BB_INSNS(insn, bb) { + uint32 spillCnt; + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction() || insn->GetId() == 0) { + continue; + } + spillCnt = 0; + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + GenerateSpillFillRegs(*insn); + for (uint32 i = 0; i < opndNum; ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd == nullptr) { + continue; + } + if (opnd->IsList()) { + // call parameters + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *newmemopnd = nullptr; + auto *memopnd = static_cast(opnd); + auto *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && base->IsVirtualRegister()) { + RegOperand *replace = CreateSpillFillCode(*base, *insn, spillCnt); + if (!memopnd->IsIntactIndexed()) { + (void)CreateSpillFillCode(*base, *insn, spillCnt, true); + } + if (replace != nullptr) { + spillCnt++; + newmemopnd = (static_cast(opnd)->Clone(*cgFunc->GetMemoryPool())); + newmemopnd->SetBaseRegister(*replace); + insn->SetOperand(i, *newmemopnd); + done = true; + } + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister()) { + RegOperand *replace = CreateSpillFillCode(*offset, *insn, spillCnt); + if (replace != nullptr) { + spillCnt++; + if (newmemopnd == nullptr) { + newmemopnd = (static_cast(opnd)->Clone(*cgFunc->GetMemoryPool())); + } + newmemopnd->SetIndexRegister(*replace); + insn->SetOperand(i, *newmemopnd); + done = true; + } + } + } else if (opnd->IsRegister()) { + bool isdef = md->opndMD[i]->IsRegDef(); + bool isuse = md->opndMD[i]->IsRegUse(); + RegOperand *replace = CreateSpillFillCode(*static_cast(opnd), *insn, spillCnt, isdef); + if (isuse && isdef) { + (void)CreateSpillFillCode(*static_cast(opnd), *insn, spillCnt, false); + } + if (replace != nullptr) { + if (!isdef) { + spillCnt++; + } + insn->SetOperand(i, *replace); + done = true; + } + } + } + } + } + return done; +} + +void GraphColorRegAllocator::FinalizeSpSaveReg() { + if (!cgFunc->GetCG()->IsLmbc() || cgFunc->GetSpSaveReg() == 0) { + return; + } + LiveRange *lr = lrMap[cgFunc->GetSpSaveReg()]; + if (lr == nullptr) { + return; + } + RegOperand &preg = cgFunc->GetOpndBuilder()->CreatePReg(lr->GetAssignedRegNO(), + k64BitSize, kRegTyInt); + BB *firstBB = cgFunc->GetFirstBB(); + FOR_BB_INSNS(insn, firstBB) { + if (insn->IsIntRegisterMov() && + static_cast(insn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == + regInfo->GetStackPointReg()) { + if (!static_cast(insn->GetOperand(kInsnFirstOpnd)).IsVirtualRegister()) { + break; + } + insn->SetOperand(kInsnFirstOpnd, preg); + break; + } + } + for (auto *retBB : cgFunc->GetExitBBsVec()) { + FOR_BB_INSNS(insn, retBB) { + if (insn->IsIntRegisterMov() && + static_cast(insn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() == + regInfo->GetStackPointReg()) { + if (!static_cast(insn->GetOperand(kInsnSecondOpnd)).IsVirtualRegister()) { + break; + } + insn->SetOperand(kInsnSecondOpnd, preg); + break; + } + } + } +} + +static bool ReloadAtCallee(CgOccur *occ) { + auto *defOcc = occ->GetDef(); + if (defOcc == nullptr || defOcc->GetOccType() != kOccStore) { + return false; + } + return static_cast(defOcc)->Reload(); +} + +void CallerSavePre::DumpWorkCandAndOcc() { + if (workCand->GetTheOperand()->IsRegister()) { + LogInfo::MapleLogger() << "Cand R"; + LogInfo::MapleLogger() << static_cast(workCand->GetTheOperand())->GetRegisterNumber() << '\n'; + } else { + LogInfo::MapleLogger() << "Cand Index" << workCand->GetIndex() << '\n'; + } + for (CgOccur *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } +} + +void CallerSavePre::CodeMotion() { + constexpr uint32 limitNum = UINT32_MAX; + uint32 cnt = 0; + for (auto *occ : allOccs) { + if (occ->GetOccType() == kOccUse) { + ++cnt; + beyondLimit = (cnt == limitNum) || beyondLimit; + if (!beyondLimit && dump) { + LogInfo::MapleLogger() << "opt use occur: "; + occ->Dump(); + } + } + if (occ->GetOccType() == kOccUse && + (beyondLimit || (static_cast(occ)->Reload() && !ReloadAtCallee(occ)))) { + RegOperand &phyOpnd = func->GetOpndBuilder()->CreatePReg(workLr->GetAssignedRegNO(), + occ->GetOperand()->GetSize(), + static_cast(occ->GetOperand())->GetRegisterType()); + (void)regAllocator->SpillOperand(*occ->GetInsn(), *occ->GetOperand(), false, phyOpnd); + continue; + } + if (occ->GetOccType() == kOccPhiopnd && static_cast(occ)->Reload() && !ReloadAtCallee(occ)) { + RegOperand &phyOpnd = func->GetOpndBuilder()->CreatePReg(workLr->GetAssignedRegNO(), + occ->GetOperand()->GetSize(), + static_cast(occ->GetOperand())->GetRegisterType()); + Insn *insn = occ->GetBB()->GetLastInsn(); + if (insn == nullptr) { + auto &comment = func->GetOpndBuilder()->CreateComment("reload caller save register"); + insn = &func->GetInsnBuilder()->BuildCommentInsn(comment); + occ->GetBB()->AppendInsn(*insn); + } + auto defOcc = occ->GetDef(); + bool forCall = (defOcc != nullptr && insn == defOcc->GetInsn()); + (void)regAllocator->SpillOperand(*insn, *occ->GetOperand(), false, phyOpnd, forCall); + continue; + } + if (occ->GetOccType() == kOccStore && static_cast(occ)->Reload()) { + RegOperand &phyOpnd = func->GetOpndBuilder()->CreatePReg(workLr->GetAssignedRegNO(), + occ->GetOperand()->GetSize(), + static_cast(occ->GetOperand())->GetRegisterType()); + (void)regAllocator->SpillOperand(*occ->GetInsn(), *occ->GetOperand(), false, phyOpnd, true); + continue; + } + } + if (dump) { + PreWorkCand *curCand = workCand; + LogInfo::MapleLogger() << "========ssapre candidate " << curCand->GetIndex() << " after codemotion ===========\n"; + DumpWorkCandAndOcc(); + func->DumpCFGToDot("raCodeMotion-"); + } +} + +void CallerSavePre::UpdateLoadSite(CgOccur *occ) { + if (occ == nullptr) { + return; + } + auto *defOcc = occ->GetDef(); + if (occ->GetOccType() == kOccUse) { + defOcc = static_cast(occ)->GetPrevVersionOccur(); + } + if (defOcc == nullptr) { + return; + } + switch (defOcc->GetOccType()) { + case kOccDef: + break; + case kOccUse: + UpdateLoadSite(defOcc); + return; + case kOccStore: { + auto *storeOcc = static_cast(defOcc); + if (storeOcc->Reload()) { + break; + } + switch (occ->GetOccType()) { + case kOccUse: { + static_cast(occ)->SetReload(true); + break; + } + case kOccPhiopnd: { + static_cast(occ)->SetReload(true); + break; + } + default: { + CHECK_FATAL(false, "must not be here"); + } + } + return; + } + case kOccPhiocc: { + auto *phiOcc = static_cast(defOcc); + if (phiOcc->IsFullyAvailable()) { + break; + } + if (!phiOcc->IsDownSafe() || phiOcc->IsNotAvailable()) { + switch (occ->GetOccType()) { + case kOccUse: { + static_cast(occ)->SetReload(true); + break; + } + case kOccPhiopnd: { + static_cast(occ)->SetReload(true); + break; + } + default: { + CHECK_FATAL(false, "must not be here"); + } + } + return; + } + + if (defOcc->Processed()) { + return; + } + defOcc->SetProcessed(true); + for (auto *opndOcc : phiOcc->GetPhiOpnds()) { + UpdateLoadSite(opndOcc); + } + return; + } + default: { + CHECK_FATAL(false, "NIY"); + break; + } + } +} + +void CallerSavePre::CalLoadSites() { + for (auto *occ : allOccs) { + if (occ->GetOccType() == kOccUse) { + UpdateLoadSite(occ); + } + } + std::vector availableDef(classCount, nullptr); + for (auto *occ : allOccs) { + auto classID = static_cast(occ->GetClassID()); + switch (occ->GetOccType()) { + case kOccDef: + availableDef[classID] = occ; + break; + case kOccStore: { + if (static_cast(occ)->Reload()) { + availableDef[classID] = occ; + } else { + availableDef[classID] = nullptr; + } + break; + } + case kOccPhiocc: { + auto *phiOcc = static_cast(occ); + if (!phiOcc->IsNotAvailable() && phiOcc->IsDownSafe()) { + availableDef[classID] = occ; + } else { + availableDef[classID] = nullptr; + } + break; + } + case kOccUse: { + auto *useOcc = static_cast(occ); + if (useOcc->Reload()) { + auto *availDef = availableDef[classID]; + if (availDef != nullptr && dom->Dominate(*availDef->GetBB(), *useOcc->GetBB())) { + useOcc->SetReload(false); + } else { + availableDef[classID] = useOcc; + } + } + break; + } + case kOccPhiopnd: { + auto *phiOpnd = static_cast(occ); + if (phiOpnd->Reload()) { + auto *availDef = availableDef[classID]; + if (availDef != nullptr && dom->Dominate(*availDef->GetBB(), *phiOpnd->GetBB())) { + phiOpnd->SetReload(false); + } else { + availableDef[classID] = phiOpnd; + } + } + break; + } + case kOccExit: + break; + default: + CHECK_FATAL(false, "not supported occur type"); + } + } + if (dump) { + PreWorkCand *curCand = workCand; + LogInfo::MapleLogger() << "========ssapre candidate " << curCand->GetIndex() + << " after CalLoadSite===================\n"; + DumpWorkCandAndOcc(); + LogInfo::MapleLogger() << "\n"; + } +} + +void CallerSavePre::ComputeAvail() { + bool changed = true; + while (changed) { + changed = false; + for (auto *phiOcc : phiOccs) { + if (phiOcc->IsNotAvailable()) { + continue; + } + size_t killedCnt = 0; + for (auto *opndOcc : phiOcc->GetPhiOpnds()) { + auto defOcc = opndOcc->GetDef(); + if (defOcc == nullptr) { + continue; + } + // for not move load too far from use site, set not-fully-available-phi killing availibity of phiOpnd + if ((defOcc->GetOccType() == kOccPhiocc && !static_cast(defOcc)->IsFullyAvailable()) || + defOcc->GetOccType() == kOccStore) { + ++killedCnt; + opndOcc->SetHasRealUse(false); + // opnd at back-edge is killed, set phi not avail + if (dom->Dominate(*phiOcc->GetBB(), *opndOcc->GetBB())) { + killedCnt = phiOcc->GetPhiOpnds().size(); + break; + } + if (opndOcc->GetBB()->IsSoloGoto() && opndOcc->GetBB()->GetLoop() != nullptr) { + killedCnt = phiOcc->GetPhiOpnds().size(); + break; + } + continue; + } + } + if (killedCnt == phiOcc->GetPhiOpnds().size()) { + changed = !phiOcc->IsNotAvailable() || changed; + phiOcc->SetAvailability(kNotAvailable); + } else if (killedCnt > 0) { + changed = !phiOcc->IsPartialAvailable() || changed; + phiOcc->SetAvailability(kPartialAvailable); + } else {} // fully available is default state + } + } +} + +void CallerSavePre::Rename1() { + std::stack occStack; + classCount = 1; + // iterate the occurrence according to its preorder dominator tree + for (CgOccur *occ : allOccs) { + while (!occStack.empty() && !occStack.top()->IsDominate(*dom, *occ)) { + occStack.pop(); + } + switch (occ->GetOccType()) { + case kOccUse: { + if (occStack.empty()) { + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + CgOccur *topOccur = occStack.top(); + if (topOccur->GetOccType() == kOccStore || topOccur->GetOccType() == kOccDef || + topOccur->GetOccType() == kOccPhiocc) { + // assign new class + occ->SetClassID(topOccur->GetClassID()); + occ->SetPrevVersionOccur(topOccur); + occStack.push(occ); + break; + } else if (topOccur->GetOccType() == kOccUse) { + occ->SetClassID(topOccur->GetClassID()); + if (topOccur->GetDef() != nullptr) { + occ->SetDef(topOccur->GetDef()); + } else { + occ->SetDef(topOccur); + } + break; + } + CHECK_FATAL(false, "unsupported occur type"); + break; + } + case kOccPhiocc: { + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + case kOccPhiopnd: { + if (!occStack.empty()) { + CgOccur *topOccur = occStack.top(); + auto *phiOpndOcc = static_cast(occ); + phiOpndOcc->SetDef(topOccur); + phiOpndOcc->SetClassID(topOccur->GetClassID()); + if (topOccur->GetOccType() == kOccUse) { + phiOpndOcc->SetHasRealUse(true); + } + } + break; + } + case kOccDef: { + if (!occStack.empty()) { + CgOccur *topOccur = occStack.top(); + if (topOccur->GetOccType() == kOccPhiocc) { + auto *phiTopOccur = static_cast(topOccur); + phiTopOccur->SetIsDownSafe(false); + } + } + + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + case kOccStore: { + if (!occStack.empty()) { + CgOccur *topOccur = occStack.top(); + auto prevVersionOcc = topOccur->GetDef() ? topOccur->GetDef() : topOccur; + static_cast(occ)->SetPrevVersionOccur(prevVersionOcc); + if (topOccur->GetOccType() == kOccPhiocc) { + auto *phiTopOccur = static_cast(topOccur); + phiTopOccur->SetIsDownSafe(false); + } + } + + // assign new class + occ->SetClassID(static_cast(classCount++)); + occStack.push(occ); + break; + } + case kOccExit: { + if (occStack.empty()) { + break; + } + CgOccur *topOccur = occStack.top(); + if (topOccur->GetOccType() == kOccPhiocc) { + auto *phiTopOccur = static_cast(topOccur); + phiTopOccur->SetIsDownSafe(false); + } + break; + } + default: + ASSERT(false, "should not be here"); + break; + } + } + if (dump) { + PreWorkCand *curCand = workCand; + LogInfo::MapleLogger() << "========ssapre candidate " << curCand->GetIndex() << " after rename1============\n"; + DumpWorkCandAndOcc(); + } +} + +void CallerSavePre::ComputeVarAndDfPhis() { + dfPhiDfns.clear(); + PreWorkCand *workCand = GetWorkCand(); + for (auto *realOcc : workCand->GetRealOccs()) { + BB *defBB = realOcc->GetBB(); + GetIterDomFrontier(defBB, &dfPhiDfns); + } +} + +void CallerSavePre::BuildWorkList() { + size_t numBBs = dom->GetDtPreOrderSize(); + std::vector callSaveLrs; + for (auto it: regAllocator->GetLrMap()) { + LiveRange *lr = it.second; + if (lr == nullptr || lr->IsSpilled()) { + continue; + } + bool isCalleeReg = func->GetTargetRegInfo()->IsCalleeSavedReg(lr->GetAssignedRegNO()); + if (lr->GetSplitLr() == nullptr && (lr->GetNumCall() > 0) && !isCalleeReg) { + callSaveLrs.emplace_back(lr); + } + } + const MapleVector &preOrderDt = dom->GetDtPreOrder(); + for (size_t i = 0; i < numBBs; ++i) { + BB *bb = func->GetBBFromID(preOrderDt[i]); + std::map insnMap; + FOR_BB_INSNS_SAFE(insn, bb, ninsn) { + insnMap.insert(std::make_pair(insn->GetId(), insn)); + } + for (auto lr: callSaveLrs) { + LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb->GetId()); + RegOperand &opnd = func->GetOrCreateVirtualRegisterOperand(lr->GetRegNO()); + if (lu != nullptr && ((lu->GetDefNum() > 0) || (lu->GetUseNum() > 0) || lu->HasCall())) { + const MapleMap &refs = lr->GetRefs(bb->GetId()); + for (auto it = refs.begin(); it != refs.end(); ++it) { + if ((it->second & kIsUse) > 0) { + (void)CreateRealOcc(*insnMap[it->first], opnd, kOccUse); + } + if ((it->second & kIsDef) > 0) { + (void)CreateRealOcc(*insnMap[it->first], opnd, kOccDef); + } + if ((it->second & kIsCall) > 0) { + Insn *callInsn = insnMap[it->first]; + auto *targetOpnd = callInsn->GetCallTargetOperand(); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *mirFunc = funcSt->GetFunction(); + if (mirFunc != nullptr && mirFunc->IsReferedRegsValid()) { + auto regSet = mirFunc->GetReferedRegs(); + if (regSet.find(lr->GetAssignedRegNO()) == regSet.end()) { + continue; + } + } + } + (void) CreateRealOcc(*callInsn, opnd, kOccStore); + } + } + } + } + if (bb->GetKind() == BB::kBBReturn) { + CreateExitOcc(*bb); + } + } +} + +void CallerSavePre::ApplySSAPRE() { + // #0 build worklist + BuildWorkList(); + uint32 cnt = 0; + constexpr uint32 preLimit = UINT32_MAX; + while (!workList.empty()) { + ++cnt; + if (cnt == preLimit) { + beyondLimit = true; + } + workCand = workList.front(); + workCand->SetIndex(static_cast(cnt)); + workLr = regAllocator->GetLiveRange(static_cast(workCand->GetTheOperand())->GetRegisterNumber()); + ASSERT(workLr != nullptr, "exepected non null lr"); + workList.pop_front(); + if (workCand->GetRealOccs().empty()) { + continue; + } + + allOccs.clear(); + phiOccs.clear(); + // #1 Insert PHI; results in allOccs and phiOccs + ComputeVarAndDfPhis(); + CreateSortedOccs(); + if (workCand->GetRealOccs().empty()) { + continue; + } + // #2 Rename + Rename1(); + ComputeDS(); + ComputeAvail(); + CalLoadSites(); + // #6 CodeMotion and recompute worklist based on newly occurrence + CodeMotion(); + ASSERT(workLr->GetProcessed() == false, "exepected unprocessed"); + workLr->SetProcessed(); + } +} + +void GraphColorRegAllocator::OptCallerSave() { + CallerSavePre callerSavePre(this, *cgFunc, domInfo, *memPool, *memPool, kLoadPre, UINT32_MAX); + callerSavePre.SetDump(GCRA_DUMP); + callerSavePre.ApplySSAPRE(); +} + +void GraphColorRegAllocator::SplitVregAroundLoop(const CGFuncLoops &loop, const std::vector &lrs, + BB &headerPred, BB &exitSucc, const std::set &cands) { + size_t maxSplitCount = lrs.size() - intCalleeRegSet.size(); + maxSplitCount = maxSplitCount > kMaxSplitCount ? kMaxSplitCount : maxSplitCount; + uint32 splitCount = 0; + auto it = cands.begin(); + size_t candsSize = cands.size(); + maxSplitCount = maxSplitCount > candsSize ? candsSize : maxSplitCount; + for (auto &lr: lrs) { + if (lr->IsSpilled()) { + continue; + } + if (!regInfo->IsCalleeSavedReg(lr->GetAssignedRegNO())) { + continue; + } + if (cgFunc->GetCG()->IsLmbc() && lr->GetIsSpSave()) { + continue; + } + bool hasRef = false; + for (auto *bb : loop.GetLoopMembers()) { + LiveUnit *lu = lr->GetLiveUnitFromLuMap(bb->GetId()); + if (lu != nullptr && (lu->GetDefNum() != 0 || lu->GetUseNum() != 0)) { + hasRef = true; + break; + } + } + if (!hasRef) { + splitCount++; + RegOperand *ropnd = &cgFunc->GetOrCreateVirtualRegisterOperand(lr->GetRegNO()); + RegOperand &phyOpnd = cgFunc->GetOpndBuilder()->CreatePReg(lr->GetAssignedRegNO(), + ropnd->GetSize(), lr->GetRegType()); + + auto &headerCom = cgFunc->GetOpndBuilder()->CreateComment("split around loop begin"); + headerPred.AppendInsn(cgFunc->GetInsnBuilder()->BuildCommentInsn(headerCom)); + Insn *last = headerPred.GetLastInsn(); + (void)SpillOperand(*last, *ropnd, true, static_cast(phyOpnd)); + + auto &exitCom = cgFunc->GetOpndBuilder()->CreateComment("split around loop end"); + exitSucc.InsertInsnBegin(cgFunc->GetInsnBuilder()->BuildCommentInsn(exitCom)); + Insn *first = exitSucc.GetFirstInsn(); + (void)SpillOperand(*first, *ropnd, false, static_cast(phyOpnd)); + + LiveRange *replacedLr = lrMap[*it]; + replacedLr->SetAssignedRegNO(lr->GetAssignedRegNO()); + replacedLr->SetSpilled(false); + ++it; + } + if (splitCount >= maxSplitCount) { + break; + } + } +} + +bool GraphColorRegAllocator::LrGetBadReg(const LiveRange &lr) const { + if (lr.IsSpilled()) { + return true; + } + if (lr.GetNumCall() != 0 && !regInfo->IsCalleeSavedReg(lr.GetAssignedRegNO())) { + return true; + } + return false; +} + +bool GraphColorRegAllocator::LoopNeedSplit(const CGFuncLoops &loop, std::set &cands) { + std::set regPressure; + const BB *header = loop.GetHeader(); + const MapleSet &liveIn = header->GetLiveInRegNO(); + std::set loopBBs; + for (auto *bb : loop.GetLoopMembers()) { + loopBBs.insert(bb); + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetId() == 0) { + continue; + } + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + continue; + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr && base->IsRegister()) { + RegOperand *regOpnd = static_cast(base); + regno_t regNO = regOpnd->GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && LrGetBadReg(*lr) && + liveIn.find(regNO) == liveIn.end()) { + regPressure.insert(regOpnd->GetRegisterNumber()); + } + } + if (offset != nullptr && offset->IsRegister()) { + RegOperand *regOpnd = static_cast(offset); + regno_t regNO = regOpnd->GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && LrGetBadReg(*lr) && + liveIn.find(regNO) == liveIn.end()) { + regPressure.insert(regOpnd->GetRegisterNumber()); + } + } + } else if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + LiveRange *lr = GetLiveRange(regNO); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && LrGetBadReg(*lr) && + liveIn.find(regNO) == liveIn.end()) { + regPressure.insert(regOpnd.GetRegisterNumber()); + } + } + } + } + } + if (regPressure.size() != 0) { + for (auto reg: regPressure) { + LiveRange *lr = lrMap[reg]; + std::vector smember; + ForEachBBArrElem(lr->GetBBMember(), [this, &smember](uint32 bbID) { (void)smember.emplace_back(bbVec[bbID]); }); + bool liveBeyondLoop = false; + for (auto bb: smember) { + if (loopBBs.find(bb) == loopBBs.end()) { + liveBeyondLoop = true; + break; + } + } + if (liveBeyondLoop) { + continue; + } + cands.insert(reg); + } + if (cands.empty()) { + return false; + } + return true; + } + return false; +} + +void GraphColorRegAllocator::AnalysisLoop(const CGFuncLoops &loop) { + const BB *header = loop.GetHeader(); + const MapleSet &liveIn = header->GetLiveInRegNO(); + std::vector lrs; + size_t intCalleeNum = intCalleeRegSet.size(); + if (loop.GetMultiEntries().size() != 0) { + return; + } + for (auto regno: liveIn) { + LiveRange *lr = GetLiveRange(regno); + if (lr != nullptr && lr->GetRegType() == kRegTyInt && lr->GetNumCall() != 0) { + lrs.emplace_back(lr); + } + } + if (lrs.size() < intCalleeNum) { + return; + } + bool hasCall = false; + std::set loopBBs; + for (auto *bb : loop.GetLoopMembers()) { + if (bb->HasCall()) { + hasCall = true; + } + loopBBs.insert(bb); + } + if (!hasCall) { + return; + } + auto comparator = [=](const LiveRange *lr1, const LiveRange *lr2) -> bool { + return lr1->GetPriority() < lr2->GetPriority(); + }; + std::sort(lrs.begin(), lrs.end(), comparator); + const MapleVector &exits = loop.GetExits(); + std::set loopExits; + for (auto &bb: exits) { + for (auto &succ: bb->GetSuccs()) { + if (loopBBs.find(succ) != loopBBs.end()) { + continue; + } + if (succ->IsSoloGoto() || succ->IsEmpty()) { + BB *realSucc = CGCFG::GetTargetSuc(*succ); + if (realSucc != nullptr) { + loopExits.insert(realSucc); + } + } else { + loopExits.insert(succ); + } + } + } + std::set loopEntra; + for (auto &pred: header->GetPreds()) { + if (loopBBs.find(pred) != loopBBs.end()) { + continue; + } + loopEntra.insert(pred); + } + if (loopEntra.size() != 1 || loopExits.size() != 1) { + return; + } + BB *headerPred = *loopEntra.begin(); + BB *exitSucc = *loopExits.begin(); + if (headerPred->GetKind() != BB::kBBFallthru) { + return; + } + if (exitSucc->GetPreds().size() != loop.GetExits().size()) { + return; + } + std::set cands; + if (!LoopNeedSplit(loop, cands)) { + return; + } + SplitVregAroundLoop(loop, lrs, *headerPred, *exitSucc, cands); +} +void GraphColorRegAllocator::AnalysisLoopPressureAndSplit(const CGFuncLoops &loop) { + if (loop.GetInnerLoops().empty()) { + // only handle inner-most loop + AnalysisLoop(loop); + return; + } + for (const auto *lp : loop.GetInnerLoops()) { + AnalysisLoopPressureAndSplit(*lp); + } +} + +/* Iterate through all instructions and change the vreg to preg. */ +void GraphColorRegAllocator::FinalizeRegisters() { + if (doMultiPass && hasSpill) { + if (GCRA_DUMP) { + LogInfo::MapleLogger() << "In this round, spill vregs : \n"; + for (auto &it: std::as_const(lrMap)) { + LiveRange *lr = it.second; + if (lr->IsSpilled()) { + LogInfo::MapleLogger() << "R" << lr->GetRegNO() << " "; + } + } + LogInfo::MapleLogger() << "\n"; + } + bool done = SpillLiveRangeForSpills(); + if (done) { + FinalizeSpSaveReg(); + return; + } + } + if (CLANG) { + if (!cgFunc->GetLoops().empty()) { + cgFunc->GetTheCFG()->InitInsnVisitor(*cgFunc); + for (const auto *lp : cgFunc->GetLoops()) { + AnalysisLoopPressureAndSplit(*lp); + } + } + OptCallerSave(); + } + for (auto *bb : bfs->sortedBBs) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (insn->IsImmaterialInsn()) { + continue; + } + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetId() == 0) { + continue; + } + + for (uint32 i = 0; i < kSpillMemOpndNum; ++i) { + operandSpilled[i] = false; + } + + FinalizeRegisterInfo *fInfo = memPool->New(alloc); + MapleBitVector usedRegMask(regInfo->GetAllRegNum(), false, alloc.Adapter()); + bool needProcces = FinalizeRegisterPreprocess(*fInfo, *insn, usedRegMask); + if (!needProcces) { + continue; + } + uint32 defSpillIdx = 0; + uint32 useSpillIdx = 0; + MemOperand *memOpnd = nullptr; + if (fInfo->GetBaseOperand()) { + memOpnd = static_cast(fInfo->GetBaseOperand())->Clone(*cgFunc->GetMemoryPool()); + insn->SetOperand(fInfo->GetMemOperandIdx(), *memOpnd); + Operand *base = memOpnd->GetBaseRegister(); + ASSERT(base != nullptr, "nullptr check"); + /* if base register is both defReg and useReg, defSpillIdx should also be increased. But it doesn't exist yet */ + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *base, useSpillIdx, usedRegMask, false); + if (phyOpnd != nullptr) { + memOpnd->SetBaseRegister(*phyOpnd); + } + if (!memOpnd->IsIntactIndexed()) { + (void)GetReplaceOpnd(*insn, *base, useSpillIdx, usedRegMask, true); + } + } + if (fInfo->GetOffsetOperand()) { + ASSERT(memOpnd != nullptr, "memOpnd should not be nullptr"); + Operand *offset = memOpnd->GetIndexRegister(); + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *offset, useSpillIdx, usedRegMask, false); + if (phyOpnd != nullptr) { + memOpnd->SetIndexRegister(*phyOpnd); + } + } + for (const auto [idx, defOpnd] : fInfo->GetDefOperands()) { + if (insn->IsAsmInsn()) { + if (defOpnd->IsList()) { + auto *outList = static_cast(defOpnd); + auto *srcOpndsNew = &cgFunc->GetOpndBuilder()->CreateList( + cgFunc->GetFuncScopeAllocator()->GetMemPool()); + RegOperand *phyOpnd; + for (auto &opnd : outList->GetOperands()) { + if (opnd->IsPhysicalRegister()) { + phyOpnd = opnd; + } else { + phyOpnd = GetReplaceOpnd(*insn, *opnd, useSpillIdx, usedRegMask, true); + } + srcOpndsNew->PushOpnd(*phyOpnd); + } + insn->SetOperand(kAsmOutputListOpnd, *srcOpndsNew); + continue; + } + } + RegOperand *phyOpnd = nullptr; + if (insn->IsSpecialIntrinsic()) { + phyOpnd = GetReplaceOpnd(*insn, *defOpnd, useSpillIdx, usedRegMask, true); + } else { + phyOpnd = GetReplaceOpnd(*insn, *defOpnd, defSpillIdx, usedRegMask, true); + } + if (phyOpnd != nullptr) { + insn->SetOperand(idx, *phyOpnd); + } + } + for (const auto [idx, useOpnd] : fInfo->GetUseOperands()) { + if (insn->IsAsmInsn()) { + if (useOpnd->IsList()) { + auto *inList = static_cast(useOpnd); + auto *srcOpndsNew = &cgFunc->GetOpndBuilder()->CreateList( + cgFunc->GetFuncScopeAllocator()->GetMemPool()); + for (auto &opnd : inList->GetOperands()) { + if (!regInfo->IsVirtualRegister(opnd->GetRegisterNumber())) { + srcOpndsNew->PushOpnd(*opnd); + } else { + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *opnd, useSpillIdx, usedRegMask, false); + srcOpndsNew->PushOpnd(*phyOpnd); + } + } + insn->SetOperand(kAsmInputListOpnd, *srcOpndsNew); + continue; + } + } + RegOperand *phyOpnd = GetReplaceOpnd(*insn, *useOpnd, useSpillIdx, usedRegMask, false); + if (phyOpnd != nullptr) { + insn->SetOperand(idx, *phyOpnd); + } + } + for (const auto [idx, useDefOpnd] : fInfo->GetUseDefOperands()) { + RegOperand *phyOpnd = nullptr; + if (insn->IsSpecialIntrinsic()) { + phyOpnd = GetReplaceUseDefOpnd(*insn, *useDefOpnd, useSpillIdx, usedRegMask); + } else { + phyOpnd = GetReplaceUseDefOpnd(*insn, *useDefOpnd, defSpillIdx, usedRegMask); + } + if (phyOpnd != nullptr) { + insn->SetOperand(idx, *phyOpnd); + } + } + if (insn->IsIntRegisterMov()) { + auto ®1 = static_cast(insn->GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn->GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + bb->RemoveInsn(*insn); + } + } + } /* insn */ + } /* BB */ +} + +void GraphColorRegAllocator::MarkCalleeSaveRegs() { + for (auto regNO : intCalleeUsed) { + cgFunc->AddtoCalleeSaved(regNO); + } + for (auto regNO : fpCalleeUsed) { + cgFunc->AddtoCalleeSaved(regNO); + } +} + +bool GraphColorRegAllocator::AllocateRegisters() { +#ifdef RANDOM_PRIORITY + /* Change this seed for different random numbers */ + srand(0); +#endif /* RANDOM_PRIORITY */ + + if (GCRA_DUMP && doMultiPass) { + LogInfo::MapleLogger() << "\n round start: \n"; + cgFunc->DumpCGIR(); + } + /* + * we store both FP/LR if using FP or if not using FP, but func has a call + * Using FP, record it for saving + */ + regInfo->Fini(); + +#if DEBUG + int32 cnt = 0; + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + ++cnt; + } + } + ASSERT(cnt <= cgFunc->GetTotalNumberOfInstructions(), "Incorrect insn count"); +#endif + cgFunc->SetIsAfterRegAlloc(); + /* EBO propgation extent the live range and might need to be turned off. */ + Bfs localBfs(*cgFunc, *memPool); + bfs = &localBfs; + bfs->ComputeBlockOrder(); + + ComputeLiveRanges(); + + InitFreeRegPool(); + + BuildInterferenceGraph(); + + Separate(); + + SplitAndColor(); + +#ifdef USE_LRA + if (doLRA) { + LocalRegisterAllocator(true); + } +#endif /* USE_LRA */ + + FinalizeRegisters(); + + MarkCalleeSaveRegs(); + + if (GCRA_DUMP) { + cgFunc->DumpCGIR(); + } + + bfs = nullptr; /* bfs is not utilized outside the function. */ + + if (doMultiPass && hasSpill) { + return false; + } else { + return true; + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_data_dep_base.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_data_dep_base.cpp new file mode 100644 index 0000000000000000000000000000000000000000..313aec1268e1b5d3ae7a9a954540c6580ae60cba --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_data_dep_base.cpp @@ -0,0 +1,754 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cg.h" +#include "aarch64_operand.h" +#include "pressure.h" +#include "aarch64_data_dep_base.h" + +/* For building dependence graph, The entry is AArch64DataDepBase::Run. */ +namespace maplebe { +void AArch64DataDepBase::ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn& newInsn, + bool isFromClinit) const { + if (isFromClinit) { + firstNode.AddClinitInsn(*firstNode.GetInsn()); + firstNode.AddClinitInsn(*secondNode.GetInsn()); + firstNode.SetCfiInsns(secondNode.GetCfiInsns()); + } else { + for (Insn *insn : secondNode.GetCfiInsns()) { + firstNode.AddCfiInsn(*insn); + } + for (Insn *insn : secondNode.GetComments()) { + firstNode.AddComments(*insn); + } + secondNode.ClearComments(); + } + firstNode.SetInsn(newInsn); + Reservation *rev = mad.FindReservation(newInsn); + CHECK_FATAL(rev != nullptr, "reservation is nullptr."); + firstNode.SetReservation(*rev); + firstNode.SetUnits(rev->GetUnit()); + firstNode.SetUnitNum(rev->GetUnitNum()); + newInsn.SetDepNode(firstNode); +} + +void AArch64DataDepBase::ClearDepNodeInfo(DepNode &depNode) const { + Insn &insn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_none); + insn.SetDepNode(depNode); + Reservation *seRev = mad.FindReservation(insn); + depNode.SetInsn(insn); + depNode.SetType(kNodeTypeEmpty); + depNode.SetReservation(*seRev); + depNode.SetUnitNum(0); + depNode.ClearCfiInsns(); + depNode.SetUnits(nullptr); +} + +/* Combine (adrpldr & clinit_tail) to clinit. */ +void AArch64DataDepBase::CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) { + ASSERT(firstNode.GetInsn()->GetMachineOpcode() == MOP_adrp_ldr, "first insn should be adrpldr"); + ASSERT(secondNode.GetInsn()->GetMachineOpcode() == MOP_clinit_tail, "second insn should be clinit_tail"); + ASSERT(firstNode.GetCfiInsns().empty(), "There should not be any comment/cfi instructions between clinit."); + ASSERT(secondNode.GetComments().empty(), "There should not be any comment/cfi instructions between clinit."); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn( + MOP_clinit, firstNode.GetInsn()->GetOperand(0), firstNode.GetInsn()->GetOperand(1)); + newInsn.SetId(firstNode.GetInsn()->GetId()); + /* Replace first node with new insn. */ + ReplaceDepNodeWithNewInsn(firstNode, secondNode, newInsn, true); + /* Clear second node information. */ + ClearDepNodeInfo(secondNode); + CombineDependence(firstNode, secondNode, isAcrossSeparator); +} + +/* + * Combine memory access pair: + * 1.ldr to ldp. + * 2.str to stp. + */ +void AArch64DataDepBase::CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) { + ASSERT(firstNode.GetInsn(), "the insn of first Node should not be nullptr"); + ASSERT(secondNode.GetInsn(), "the insn of second Node should not be nullptr"); + MOperator thisMop = firstNode.GetInsn()->GetMachineOpcode(); + MOperator mopPair = GetMopPair(thisMop); + ASSERT(mopPair != 0, "mopPair should not be zero"); + Operand *opnd0 = nullptr; + Operand *opnd1 = nullptr; + Operand *opnd2 = nullptr; + if (useFirstOffset) { + opnd0 = &(firstNode.GetInsn()->GetOperand(0)); + opnd1 = &(secondNode.GetInsn()->GetOperand(0)); + opnd2 = &(firstNode.GetInsn()->GetOperand(1)); + } else { + opnd0 = &(secondNode.GetInsn()->GetOperand(0)); + opnd1 = &(firstNode.GetInsn()->GetOperand(0)); + opnd2 = &(secondNode.GetInsn()->GetOperand(1)); + } + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopPair, *opnd0, *opnd1, *opnd2); + newInsn.SetId(firstNode.GetInsn()->GetId()); + std::string newComment; + const MapleString &comment = firstNode.GetInsn()->GetComment(); + if (comment.c_str() != nullptr) { + newComment += comment.c_str(); + } + const MapleString &secondComment = secondNode.GetInsn()->GetComment(); + if (secondComment.c_str() != nullptr) { + newComment += " "; + newComment += secondComment.c_str(); + } + if ((newComment.c_str() != nullptr) && (strlen(newComment.c_str()) > 0)) { + newInsn.SetComment(newComment); + } + /* Replace first node with new insn. */ + ReplaceDepNodeWithNewInsn(firstNode, secondNode, newInsn, false); + /* Clear second node information. */ + ClearDepNodeInfo(secondNode); + CombineDependence(firstNode, secondNode, false, true); +} + +bool AArch64DataDepBase::IsFrameReg(const RegOperand &opnd) const { + return (opnd.GetRegisterNumber() == RFP) || (opnd.GetRegisterNumber() == RSP); +} + +MemOperand *AArch64DataDepBase::BuildNextMemOperandByByteSize(const MemOperand &aarchMemOpnd, uint32 byteSize) const { + MemOperand *nextMemOpnd = aarchMemOpnd.Clone(memPool); + Operand *nextOfstOpnd = nextMemOpnd->GetOffsetImmediate()->Clone(memPool); + auto *aarchNextOfstOpnd = static_cast(nextOfstOpnd); + CHECK_NULL_FATAL(aarchNextOfstOpnd); + auto offsetVal = static_cast(aarchNextOfstOpnd->GetOffsetValue()); + aarchNextOfstOpnd->SetOffsetValue(offsetVal + static_cast(byteSize)); + nextMemOpnd->SetOffsetOperand(*aarchNextOfstOpnd); + return nextMemOpnd; +} + +/* Get the second memory access operand of stp/ldp instructions. */ +MemOperand *AArch64DataDepBase::GetNextMemOperand(const Insn &insn, const MemOperand &aarchMemOpnd) const { + MemOperand *nextMemOpnd = nullptr; + switch (insn.GetMachineOpcode()) { + case MOP_wldp: + case MOP_sldp: + case MOP_xldpsw: + case MOP_wstp: + case MOP_sstp: { + nextMemOpnd = BuildNextMemOperandByByteSize(aarchMemOpnd, k4ByteSize); + break; + } + case MOP_xldp: + case MOP_dldp: + case MOP_xstp: + case MOP_dstp: { + nextMemOpnd = BuildNextMemOperandByByteSize(aarchMemOpnd, k8ByteSize); + break; + } + default: + break; + } + + return nextMemOpnd; +} + +/* + * Build data dependence of symbol memory access. + * Memory access with symbol must be a heap memory access. + */ +void AArch64DataDepBase::BuildDepsAccessStImmMem(Insn &insn, bool isDest) { + if (isDest) { + AddDependence4InsnInVectorByType(curCDGNode->GetHeapUseInsns(), insn, kDependenceTypeAnti); + /* Build output dependence */ + AddDependence4InsnInVectorByType(curCDGNode->GetHeapDefInsns(), insn, kDependenceTypeOutput); + curCDGNode->AddHeapDefInsn(&insn); + } else { + AddDependence4InsnInVectorByType(curCDGNode->GetHeapDefInsns(), insn, kDependenceTypeTrue); + curCDGNode->AddHeapUseInsn(&insn); + } + Insn *membarInsn = curCDGNode->GetMembarInsn(); + if (membarInsn != nullptr) { + AddDependence(*membarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } +} + +/* Build data dependence of memory bars instructions */ +void AArch64DataDepBase::BuildDepsMemBar(Insn &insn) { + if (IsIntraBlockAnalysis()) { + AddDependence4InsnInVectorByTypeAndCmp(curCDGNode->GetStackUseInsns(), insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(curCDGNode->GetHeapUseInsns(), insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(curCDGNode->GetStackDefInsns(), insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(curCDGNode->GetHeapDefInsns(), insn, kDependenceTypeMembar); + } else { + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), true, kDependenceTypeMembar, kStackUses); + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), true, kDependenceTypeMembar, kHeapUses); + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), true, kDependenceTypeMembar, kStackDefs); + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), true, kDependenceTypeMembar, kHeapDefs); + } + curCDGNode->SetMembarInsn(&insn); +} + +/* Build data dependence of stack memory and heap memory uses */ +void AArch64DataDepBase::BuildDepsUseMem(Insn &insn, MemOperand &aarchMemOpnd) { + aarchMemOpnd.SetAccessSize(insn.GetMemoryByteSize()); + RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); + MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); + if (IsIntraBlockAnalysis()) { + /* Stack memory address */ + MapleVector stackDefs = curCDGNode->GetStackDefInsns(); + for (auto defInsn : stackDefs) { + if (defInsn->IsCall() || NeedBuildDepsMem(aarchMemOpnd, nextMemOpnd, *defInsn)) { + AddDependence(*defInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeTrue); + } + } + /* Heap memory address */ + MapleVector heapDefs = curCDGNode->GetHeapDefInsns(); + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeTrue); + } else { + BuildInterBlockMemDefUseDependency(*insn.GetDepNode(), aarchMemOpnd, nextMemOpnd, false); + } + if (((baseRegister != nullptr) && IsFrameReg(*baseRegister)) || aarchMemOpnd.IsStackMem()) { + curCDGNode->AddStackUseInsn(&insn); + } else { + curCDGNode->AddHeapUseInsn(&insn); + } + Insn *membarInsn = curCDGNode->GetMembarInsn(); + if (membarInsn != nullptr) { + AddDependence(*membarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } else if (!IsIntraBlockAnalysis()) { + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, + kDependenceTypeMembar, kMembar); + } +} + +/* Build data dependence of stack memory and heap memory definitions */ +void AArch64DataDepBase::BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) { + RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); + ASSERT(baseRegister != nullptr, "baseRegister shouldn't be null here"); + MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); + aarchMemOpnd.SetAccessSize(insn.GetMemoryByteSize()); + + if (IsIntraBlockAnalysis()) { + /* Build anti dependence */ + MapleVector stackUses = curCDGNode->GetStackUseInsns(); + for (auto *stackUse : stackUses) { + if (NeedBuildDepsMem(aarchMemOpnd, nextMemOpnd, *stackUse)) { + AddDependence(*stackUse->GetDepNode(), *insn.GetDepNode(), kDependenceTypeAnti); + } + } + /* Build output dependence */ + MapleVector stackDefs = curCDGNode->GetStackDefInsns(); + for (auto stackDef : stackDefs) { + if (stackDef->IsCall() || NeedBuildDepsMem(aarchMemOpnd, nextMemOpnd, *stackDef)) { + AddDependence(*stackDef->GetDepNode(), *insn.GetDepNode(), kDependenceTypeOutput); + } + } + /* Heap memory + * Build anti dependence + */ + MapleVector heapUses = curCDGNode->GetHeapUseInsns(); + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output dependence */ + MapleVector heapDefs = curCDGNode->GetHeapDefInsns(); + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + + /* Memory definition can not across may-throw insns */ + MapleVector mayThrows = curCDGNode->GetMayThrowInsns(); + AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); + } else { + BuildInterBlockMemDefUseDependency(*insn.GetDepNode(), aarchMemOpnd, nextMemOpnd, true); + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeThrow, kMayThrows); + } + + if (baseRegister->GetRegisterNumber() == RSP) { + Insn *lastCallInsn = curCDGNode->GetLastCallInsn(); + if (lastCallInsn != nullptr) { + /* Build a dependence between stack passed arguments and call */ + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } else if (!IsIntraBlockAnalysis()) { + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeControl, kLastCall); + } + } + + Insn *membarInsn = curCDGNode->GetMembarInsn(); + if (membarInsn != nullptr) { + AddDependence(*membarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } else if (!IsIntraBlockAnalysis()) { + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeMembar, kMembar); + } + + /* Update cur cdgNode info */ + if (IsFrameReg(*baseRegister) || aarchMemOpnd.IsStackMem()) { + curCDGNode->AddStackDefInsn(&insn); + } else { + curCDGNode->AddHeapDefInsn(&insn); + } +} + +static bool NoAlias(const MemOperand &leftOpnd, const MemOperand &rightOpnd) { + if (leftOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + rightOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && leftOpnd.GetIndexOpt() == MemOperand::kIntact && + rightOpnd.GetIndexOpt() == MemOperand::kIntact) { + if (leftOpnd.GetBaseRegister()->GetRegisterNumber() == RFP || + rightOpnd.GetBaseRegister()->GetRegisterNumber() == RFP) { + Operand *ofstOpnd = leftOpnd.GetOffsetOperand(); + Operand *rofstOpnd = rightOpnd.GetOffsetOperand(); + ASSERT(ofstOpnd != nullptr, "offset operand should not be null."); + ASSERT(rofstOpnd != nullptr, "offset operand should not be null."); + auto *ofst = static_cast(ofstOpnd); + auto *rofst = static_cast(rofstOpnd); + ASSERT(ofst != nullptr, "CG internal error, invalid type."); + ASSERT(rofst != nullptr, "CG internal error, invalid type."); + return (!ofst->ValueEquals(*rofst)); + } + } + return false; +} + +static bool NoOverlap(const MemOperand &leftOpnd, const MemOperand &rightOpnd) { + if (leftOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || + rightOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || + leftOpnd.GetIndexOpt() != MemOperand::kIntact || + rightOpnd.GetIndexOpt() != MemOperand::kIntact) { + return false; + } + if (leftOpnd.GetBaseRegister()->GetRegisterNumber() != RFP || + rightOpnd.GetBaseRegister()->GetRegisterNumber() != RFP) { + return false; + } + int64 ofset1 = leftOpnd.GetOffsetOperand()->GetValue(); + int64 ofset2 = rightOpnd.GetOffsetOperand()->GetValue(); + if (ofset1 < ofset2) { + return ((ofset1 + static_cast(leftOpnd.GetAccessSize())) <= ofset2); + } else { + return ((ofset2 + static_cast(rightOpnd.GetAccessSize())) <= ofset1); + } +} + +/* Return true if memInsn's memOpnd no alias with memOpnd and nextMemOpnd */ +bool AArch64DataDepBase::NeedBuildDepsMem(const MemOperand &memOpnd, + const MemOperand *nextMemOpnd, + const Insn &memInsn) const { + auto *memOpndOfmemInsn = static_cast(memInsn.GetMemOpnd()); + if (!NoAlias(memOpnd, *memOpndOfmemInsn) || + ((nextMemOpnd != nullptr) && !NoAlias(*nextMemOpnd, *memOpndOfmemInsn))) { + return true; + } + if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && !memInsn.IsCall()) { + static_cast(memInsn.GetMemOpnd())->SetAccessSize(memInsn.GetMemoryByteSize()); + return (!NoOverlap(memOpnd, *memOpndOfmemInsn)); + } + MemOperand *nextMemOpndOfmemInsn = GetNextMemOperand(memInsn, *memOpndOfmemInsn); + if (nextMemOpndOfmemInsn != nullptr) { + if (!NoAlias(memOpnd, *nextMemOpndOfmemInsn) || + ((nextMemOpnd != nullptr) && !NoAlias(*nextMemOpnd, *nextMemOpndOfmemInsn))) { + return true; + } + } + return false; +} + +/* + * Build dependence of call instructions. + * caller-saved physical registers will be defined by a call instruction. + * also a conditional register may be modified by a call. + */ +void AArch64DataDepBase::BuildCallerSavedDeps(Insn &insn) { + /* Build anti dependence and output dependence. */ + for (uint32 i = R0; i <= R7; ++i) { + BuildDepsDefReg(insn, i); + } + for (uint32 i = V0; i <= V7; ++i) { + BuildDepsDefReg(insn, i); + } + if (!beforeRA) { + for (uint32 i = R8; i <= R18; ++i) { + BuildDepsDefReg(insn, i); + } + for (uint32 i = RLR; i <= RSP; ++i) { + BuildDepsUseReg(insn, i); + } + for (uint32 i = V16; i <= V31; ++i) { + BuildDepsDefReg(insn, i); + } + } + /* For condition operand, such as NE, EQ, and so on. */ + if (cgFunc.GetRflag() != nullptr) { + BuildDepsDefReg(insn, kRFLAG); + } +} + +/* + * Build dependence between stack-define-instruction that deal with call-insn's args and a call-instruction. + * insn : a call instruction (call/tail-call) + */ +void AArch64DataDepBase::BuildStackPassArgsDeps(Insn &insn) { + MapleVector stackDefs = curCDGNode->GetStackDefInsns(); + for (auto stackDefInsn : stackDefs) { + if (stackDefInsn->IsCall()) { + continue; + } + Operand *opnd = stackDefInsn->GetMemOpnd(); + ASSERT(opnd->IsMemoryAccessOperand(), "make sure opnd is memOpnd"); + auto *memOpnd = static_cast(opnd); + RegOperand *baseReg = memOpnd->GetBaseRegister(); + if ((baseReg != nullptr) && (baseReg->GetRegisterNumber() == RSP)) { + AddDependence(*stackDefInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + } +} + +/* Some insns may dirty all stack memory, such as "bl MCC_InitializeLocalStackRef" */ +void AArch64DataDepBase::BuildDepsDirtyStack(Insn &insn) { + /* Build anti dependence */ + MapleVector stackUses = curCDGNode->GetStackUseInsns(); + AddDependence4InsnInVectorByType(stackUses, insn, kDependenceTypeAnti); + /* Build output dependence */ + MapleVector stackDefs = curCDGNode->GetStackDefInsns(); + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeOutput); + curCDGNode->AddStackDefInsn(&insn); +} + +/* Some call insns may use all stack memory, such as "bl MCC_CleanupLocalStackRef_NaiveRCFast" */ +void AArch64DataDepBase::BuildDepsUseStack(Insn &insn) { + /* Build true dependence */ + MapleVector stackDefs = curCDGNode->GetStackDefInsns(); + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeTrue); +} + +/* Some insns may dirty all heap memory, such as a call insn */ +void AArch64DataDepBase::BuildDepsDirtyHeap(Insn &insn) { + /* Build anti dependence */ + MapleVector heapUses = curCDGNode->GetHeapUseInsns(); + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output dependence */ + MapleVector heapDefs = curCDGNode->GetHeapDefInsns(); + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + Insn *membarInsn = curCDGNode->GetMembarInsn(); + if (membarInsn != nullptr) { + AddDependence(*membarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } + curCDGNode->AddHeapDefInsn(&insn); +} + +/* Analysis live-in registers in catch bb and cleanup bb */ +void AArch64DataDepBase::AnalysisAmbiInsns(BB &bb) { + curCDGNode->SetHasAmbiRegs(false); + if (bb.GetEhSuccs().empty()) { + return; + } + MapleSet &ehInRegs = curCDGNode->GetEhInRegs(); + + /* Union all catch bb */ + for (auto succBB : bb.GetEhSuccs()) { + const MapleSet &liveInRegSet = succBB->GetLiveInRegNO(); + (void)set_union(liveInRegSet.begin(), liveInRegSet.end(), ehInRegs.begin(), ehInRegs.end(), + inserter(ehInRegs, ehInRegs.begin())); + } + + /* Union cleanup entry bb */ + const MapleSet ®NOSet = cgFunc.GetCleanupBB()->GetLiveInRegNO(); + (void)std::set_union(regNOSet.begin(), regNOSet.end(), ehInRegs.begin(), ehInRegs.end(), + inserter(ehInRegs, ehInRegs.begin())); + + /* Subtract R0 and R1, that is defined by eh runtime */ + (void)ehInRegs.erase(R0); + (void)ehInRegs.erase(R1); + if (ehInRegs.empty()) { + return; + } + curCDGNode->SetHasAmbiRegs(true); +} + +/* + * It is a yieldpoint if loading from a dedicated + * register holding polling page address: + * ldr wzr, [RYP] + */ +static bool IsYieldPoint(const Insn &insn) { + if (insn.IsLoad() && !insn.IsLoadLabel()) { + auto mem = static_cast(insn.GetMemOpnd()); + return (mem != nullptr && mem->GetBaseRegister() != nullptr && mem->GetBaseRegister()->GetRegisterNumber() == RYP); + } + return false; +} + +/* + * Build data dependence of memory operand. + * insn : an instruction with the memory access operand. + * opnd : the memory access operand. + * regProp : operand property of the memory access operand. + */ +void AArch64DataDepBase::BuildMemOpndDependency(Insn &insn, Operand &opnd, const OpndDesc ®Prop) { + ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be memory Operand"); + auto *memOpnd = static_cast(&opnd); + RegOperand *baseRegister = memOpnd->GetBaseRegister(); + if (baseRegister != nullptr) { + regno_t regNO = baseRegister->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + if ((memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) && + (memOpnd->IsPostIndexed() || memOpnd->IsPreIndexed())) { + /* Base operand has changed. */ + BuildDepsDefReg(insn, regNO); + } + } + RegOperand *indexRegister = memOpnd->GetIndexRegister(); + if (indexRegister != nullptr) { + regno_t regNO = indexRegister->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + } + if (regProp.IsUse()) { + BuildDepsUseMem(insn, *memOpnd); + } else { + BuildDepsDefMem(insn, *memOpnd); + BuildDepsAmbiInsn(insn); + } + if (IsYieldPoint(insn)) { + BuildDepsMemBar(insn); + BuildDepsDefReg(insn, kRFLAG); + } +} + +/* Build Dependency for each operand of insn */ +void AArch64DataDepBase::BuildOpndDependency(Insn &insn) { + const InsnDesc* md = insn.GetDesc(); + MOperator mOp = insn.GetMachineOpcode(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + const OpndDesc *regProp = md->opndMD[i]; + if (opnd.IsMemoryAccessOperand()) { + BuildMemOpndDependency(insn, opnd, *regProp); + } else if (opnd.IsStImmediate()) { + if (mOp != MOP_xadrpl12) { + BuildDepsAccessStImmMem(insn, false); + } + } else if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + if (regProp->IsUse()) { + BuildDepsUseReg(insn, regNO); + } + if (regProp->IsDef()) { + BuildDepsDefReg(insn, regNO); + } + } else if (opnd.IsConditionCode()) { + /* For condition operand, such as NE, EQ, and so on. */ + if (regProp->IsUse()) { + BuildDepsUseReg(insn, kRFLAG); + BuildDepsBetweenControlRegAndCall(insn, false); + } + if (regProp->IsDef()) { + BuildDepsDefReg(insn, kRFLAG); + BuildDepsBetweenControlRegAndCall(insn, true); + } + } else if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + /* Build true dependences */ + for (auto &lst : listOpnd.GetOperands()) { + regno_t regNO = lst->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + } + } + } +} + +static bool IsLazyLoad(MOperator op) { + return (op == MOP_lazy_ldr) || (op == MOP_lazy_ldr_static) || (op == MOP_lazy_tail); +} + +/* + * Build dependences in some special issue (stack/heap/throw/clinit/lazy binding/control flow). + * insn : an instruction. + * depNode : insn's depNode. + * nodes : the dependence nodes include insn's depNode. + */ +void AArch64DataDepBase::BuildSpecialInsnDependency(Insn &insn, const MapleVector &nodes) { + const InsnDesc *md = insn.GetDesc(); + MOperator mOp = insn.GetMachineOpcode(); + if (insn.IsCall() || insn.IsTailCall()) { + /* Caller saved registers */ + BuildCallerSavedDeps(insn); + BuildStackPassArgsDeps(insn); + + if (mOp == MOP_xbl) { + auto &target = static_cast(insn.GetOperand(0)); + if ((target.GetName() == "MCC_InitializeLocalStackRef") || + (target.GetName() == "MCC_ClearLocalStackRef") || + (target.GetName() == "MCC_DecRefResetPair")) { + /* Write stack memory */ + BuildDepsDirtyStack(insn); + } else if ((target.GetName() == "MCC_CleanupLocalStackRef_NaiveRCFast") || + (target.GetName() == "MCC_CleanupLocalStackRefSkip_NaiveRCFast") || + (target.GetName() == "MCC_CleanupLocalStackRefSkip")) { + /* Use Stack Memory */ + BuildDepsUseStack(insn); + } else if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC) { + /* potential C aliasing */ + BuildDepsDirtyStack(insn); + } + } + BuildDepsDirtyHeap(insn); + BuildDepsAmbiInsn(insn); + BuildDepsLastCallInsn(insn); + } else if (insn.IsClinit() || IsLazyLoad(insn.GetMachineOpcode()) || + insn.GetMachineOpcode() == MOP_arrayclass_cache_ldr) { + BuildDepsDirtyHeap(insn); + BuildDepsDefReg(insn, kRFLAG); + if (insn.GetMachineOpcode() != MOP_adrp_ldr) { + BuildDepsDefReg(insn, R16); + BuildDepsDefReg(insn, R17); + } + } else if ((mOp == MOP_xret) || md->IsBranch()) { + BuildDepsControlAll(insn, nodes); + } else if (insn.IsMemAccessBar()) { + BuildDepsMemBar(insn); + } else if (insn.IsSpecialIntrinsic()) { + BuildDepsDirtyHeap(insn); + } +} + +void AArch64DataDepBase::UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes) { + /* Update reg use */ + const auto &useRegnos = depNode.GetUseRegnos(); + if (beforeRA) { + depNode.InitRegUsesSize(useRegnos.size()); + } + for (auto regNO : useRegnos) { + // Update reg use for cur depInfo + curCDGNode->AppendUseInsnChain(regNO, &insn, memPool, beforeRA); + if (beforeRA) { + CHECK_FATAL(curCDGNode->GetUseInsnChain(regNO)->insn != nullptr, "get useInsn failed"); + depNode.SetRegUses(*curCDGNode->GetUseInsnChain(regNO)); + if (curCDGNode->GetLatestDefInsn(regNO) == nullptr) { + curCDGNode->SetLatestDefInsn(regNO, nodes[separatorIndex]->GetInsn()); + nodes[separatorIndex]->AddDefReg(regNO); + nodes[separatorIndex]->SetRegDefs(nodes[separatorIndex]->GetDefRegnos().size(), + curCDGNode->GetUseInsnChain(regNO)); + } + } + } + + /* Update reg def */ + const auto &defRegnos = depNode.GetDefRegnos(); + size_t i = 0; + if (beforeRA) { + depNode.InitRegDefsSize(defRegnos.size()); + } + for (const auto regNO : defRegnos) { + // Update reg def for cur depInfo + curCDGNode->SetLatestDefInsn(regNO, &insn); + curCDGNode->ClearUseInsnChain(regNO); + if (beforeRA) { + depNode.SetRegDefs(i, nullptr); + if (regNO >= R0 && regNO <= R3) { + depNode.SetHasPreg(true); + } else if (regNO == R8) { + depNode.SetHasNativeCallRegister(true); + } + } + ++i; + } +} + +/* Build a pseudo node to separate data dependence graph */ +DepNode *AArch64DataDepBase::BuildSeparatorNode() { + Insn &pseudoSepInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_dependence_seperator); + auto *separatorNode = memPool.New(pseudoSepInsn, alloc); + separatorNode->SetType(kNodeTypeSeparator); + pseudoSepInsn.SetDepNode(*separatorNode); + if (beforeRA) { + auto *regPressure = memPool.New(alloc); + separatorNode->SetRegPressure(*regPressure); + separatorNode->InitPressure(); + } + return separatorNode; +} + +void AArch64DataDepBase::BuildInterBlockMemDefUseDependency(DepNode &depNode, MemOperand &memOpnd, + MemOperand *nextMemOpnd, bool isMemDef) { + CHECK_FATAL(!IsIntraBlockAnalysis(), "must be inter block data dependence analysis"); + BB *curBB = curCDGNode->GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); + CDGRegion *curRegion = curCDGNode->GetRegion(); + CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); + std::vector visited(curRegion->GetMaxBBIdInRegion(), false); + if (isMemDef) { + BuildPredPathMemDefDependencyDFS(*curBB, visited, depNode, memOpnd, nextMemOpnd); + } else { + BuildPredPathMemUseDependencyDFS(*curBB, visited, depNode, memOpnd, nextMemOpnd); + } +} + +void AArch64DataDepBase::BuildPredPathMemDefDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, + MemOperand &memOpnd, MemOperand *nextMemOpnd) { + if (visited[curBB.GetId()]) { + return; + } + CDGNode *cdgNode = curBB.GetCDGNode(); + CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); + CDGRegion *curRegion = cdgNode->GetRegion(); + CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); + if (curRegion->GetRegionId() != curCDGNode->GetRegion()->GetRegionId()) { + return; + } + visited[curBB.GetId()] = true; + MapleVector stackUses = cdgNode->GetStackUseInsns(); + for (auto *stackUse : stackUses) { + if (NeedBuildDepsMem(memOpnd, nextMemOpnd, *stackUse)) { + AddDependence(*stackUse->GetDepNode(), depNode, kDependenceTypeAnti); + } + } + /* Build output dependence */ + MapleVector stackDefs = cdgNode->GetStackDefInsns(); + for (auto stackDef : stackDefs) { + if (stackDef->IsCall() || NeedBuildDepsMem(memOpnd, nextMemOpnd, *stackDef)) { + AddDependence(*stackDef->GetDepNode(), depNode, kDependenceTypeOutput); + } + } + /* Heap memory + * Build anti dependence + */ + MapleVector heapUses = curCDGNode->GetHeapUseInsns(); + AddDependence4InsnInVectorByType(heapUses, *depNode.GetInsn(), kDependenceTypeAnti); + /* Build output dependence */ + MapleVector heapDefs = curCDGNode->GetHeapDefInsns(); + AddDependence4InsnInVectorByType(heapDefs, *depNode.GetInsn(), kDependenceTypeOutput); + for (auto predIt = curBB.GetPredsBegin(); predIt != curBB.GetPredsEnd(); ++predIt) { + BuildPredPathMemDefDependencyDFS(**predIt, visited, depNode, memOpnd, nextMemOpnd); + } +} + +void AArch64DataDepBase::BuildPredPathMemUseDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, + MemOperand &memOpnd, MemOperand *nextMemOpnd) { + if (visited[curBB.GetId()]) { + return; + } + CDGNode *cdgNode = curBB.GetCDGNode(); + CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); + CDGRegion *curRegion = cdgNode->GetRegion(); + CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); + if (curRegion->GetRegionId() != curCDGNode->GetRegion()->GetRegionId()) { + return; + } + visited[curBB.GetId()] = true; + /* Stack memory address */ + MapleVector stackDefs = cdgNode->GetStackDefInsns(); + for (auto stackDef : stackDefs) { + if (stackDef->IsCall() || NeedBuildDepsMem(memOpnd, nextMemOpnd, *stackDef)) { + AddDependence(*stackDef->GetDepNode(), depNode, kDependenceTypeTrue); + } + } + /* Heap memory address */ + MapleVector heapDefs = cdgNode->GetHeapDefInsns(); + AddDependence4InsnInVectorByType(heapDefs, *depNode.GetInsn(), kDependenceTypeTrue); + for (auto predIt = curBB.GetPredsBegin(); predIt != curBB.GetPredsEnd(); ++predIt) { + BuildPredPathMemUseDependencyDFS(**predIt, visited, depNode, memOpnd, nextMemOpnd); + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_dce.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_dce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7983fa68a38ada106fbb6077950d744ac298a65d --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_dce.cpp @@ -0,0 +1,102 @@ +/* +* Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#include "aarch64_dce.h" +#include "aarch64_operand.h" +namespace maplebe { +bool AArch64Dce::RemoveUnuseDef(VRegVersion &defVersion) { + /* delete defs which have no uses */ + if (defVersion.GetAllUseInsns().empty()) { + DUInsnInfo *defInsnInfo = defVersion.GetDefInsnInfo(); + if (defInsnInfo == nullptr) { + return false; + } + CHECK_FATAL(defInsnInfo->GetInsn() != nullptr, "Get def insn failed"); + Insn *defInsn = defInsnInfo->GetInsn(); + /* have not support asm/neon opt yet */ + if (defInsn->GetMachineOpcode() == MOP_asm || defInsn->IsVectorOp() || defInsn->IsAtomic()) { + return false; + } + std::set defRegs = defInsn->GetDefRegs(); + std::vector defVersions; + defVersions.push_back(&defVersion); + for (auto defRegNo : defRegs) { + if (defRegNo != defVersion.GetSSAvRegOpnd()->GetRegisterNumber()) { + VRegVersion *otherVersion = ssaInfo->FindSSAVersion(defRegNo); + if (!otherVersion) { // cannot find def use for physical register, return + return false; + } + if (!otherVersion->GetAllUseInsns().empty()) { + return false; + } + defVersions.push_back(otherVersion); + } + } + uint32 bothDUIdx = defInsn->GetBothDefUseOpnd(); + if (bothDUIdx == kInsnMaxOpnd || + (defInsnInfo->GetOperands().count(bothDUIdx) > 0 && defInsnInfo->GetOperands().at(bothDUIdx) == 1)) { + defInsn->GetBB()->RemoveInsn(*defInsn); + if (defInsn->IsPhi()) { + for (auto dv : defVersions) { + defInsn->GetBB()->RemovePhiInsn(dv->GetOriginalRegNO()); + } + } + for (auto dv : defVersions) { + dv->MarkDeleted(); + } + uint32 opndNum = defInsn->GetOperandSize(); + for (uint32 i = opndNum; i > 0; --i) { + Operand &opnd = defInsn->GetOperand(i - 1); + A64DeleteRegUseVisitor deleteUseRegVisitor(*GetSSAInfo(), defInsn->GetId()); + opnd.Accept(deleteUseRegVisitor); + } + return true; + } + } + return false; +} + +void A64DeleteRegUseVisitor::Visit(RegOperand *v) { + if (v->IsSSAForm()) { + VRegVersion *regVersion = GetSSAInfo()->FindSSAVersion(v->GetRegisterNumber()); + ASSERT(regVersion != nullptr, "regVersion should not be nullptr"); + MapleUnorderedMap &useInfos = regVersion->GetAllUseInsns(); + auto it = useInfos.find(deleteInsnId); + if (it != useInfos.end()) { + useInfos.erase(it); + } + } +} +void A64DeleteRegUseVisitor::Visit(ListOperand *v) { + for (auto *regOpnd : std::as_const(v->GetOperands())) { + Visit(regOpnd); + } +} +void A64DeleteRegUseVisitor::Visit(MemOperand *v) { + RegOperand *baseRegOpnd = v->GetBaseRegister(); + RegOperand *indexRegOpnd = v->GetIndexRegister(); + if (baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) { + Visit(baseRegOpnd); + } + if (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm()) { + Visit(indexRegOpnd); + } +} + +void A64DeleteRegUseVisitor::Visit(PhiOperand *v) { + for (auto &phiOpndIt : std::as_const(v->GetOperands())) { + Visit(phiOpndIt.second); + } +} +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_dependence.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_dependence.cpp new file mode 100644 index 0000000000000000000000000000000000000000..751cb5543934f1374da8e290c20c0257e5598449 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_dependence.cpp @@ -0,0 +1,1144 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_dependence.h" + +#include "aarch64_cg.h" +#include "aarch64_operand.h" +#include "pressure.h" + +/* For building dependence graph, The entry is AArch64DepAnalysis::Run. */ +namespace maplebe { +/* constructor */ +AArch64DepAnalysis::AArch64DepAnalysis(CGFunc &func, MemPool &mp, MAD &mad, bool beforeRA) + : DepAnalysis(func, mp, mad, beforeRA), + stackUses(alloc.Adapter()), + stackDefs(alloc.Adapter()), + heapUses(alloc.Adapter()), + heapDefs(alloc.Adapter()), + mayThrows(alloc.Adapter()), + ambiInsns(alloc.Adapter()), + ehInRegs(alloc.Adapter()) { + uint32 maxRegNum; + if (beforeRA) { + maxRegNum = cgFunc.GetMaxVReg(); + } else { + maxRegNum = kAllRegNum; + } + regDefs = memPool.NewArray(maxRegNum); + regUses = memPool.NewArray(maxRegNum); +} + +/* print dep node information */ +void AArch64DepAnalysis::DumpDepNode(DepNode &node) const { + node.GetInsn()->Dump(); + uint32 num = node.GetUnitNum(); + LogInfo::MapleLogger() << "unit num : " << num << ", "; + for (uint32 i = 0; i < num; ++i) { + const Unit *unit = node.GetUnitByIndex(i); + if (unit != nullptr) { + PRINT_VAL(unit->GetName()); + } else { + PRINT_VAL("none"); + } + } + LogInfo::MapleLogger() << '\n'; + node.DumpSchedInfo(); + if (beforeRA) { + node.DumpRegPressure(); + } +} + +/* print dep link information */ +void AArch64DepAnalysis::DumpDepLink(DepLink &link, const DepNode *node) const { + PRINT_VAL(GetDepTypeName(link.GetDepType())); + PRINT_STR_VAL("Latency: ", link.GetLatency()); + if (node != nullptr) { + node->GetInsn()->Dump(); + return; + } + LogInfo::MapleLogger() << "from : "; + link.GetFrom().GetInsn()->Dump(); + LogInfo::MapleLogger() << "to : "; + link.GetTo().GetInsn()->Dump(); +} + +/* Append use register to the list. */ +void AArch64DepAnalysis::AppendRegUseList(Insn &insn, regno_t regNO) { + RegList *regList = memPool.New(); + regList->insn = &insn; + regList->next = nullptr; + if (regUses[regNO] == nullptr) { + regUses[regNO] = regList; + if (beforeRA) { + Insn *defInsn = regDefs[regNO]; + if (defInsn == nullptr) { + return; + } + DepNode *defNode = defInsn->GetDepNode(); + defNode->SetRegDefs(regNO, regList); + } + return; + } + RegList *lastRegList = regUses[regNO]; + while (lastRegList->next != nullptr) { + lastRegList = lastRegList->next; + } + lastRegList->next = regList; +} + +/* + * Add dependence edge. + * Two dependence node has a unique edge. + * True dependence overwirtes other dependences. + */ +void AArch64DepAnalysis::AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType) { + /* Can not build a self loop dependence. */ + if (&fromNode == &toNode) { + return; + } + /* Check if exist edge. */ + if (!fromNode.GetSuccs().empty()) { + DepLink *depLink = fromNode.GetSuccs().back(); + if (&(depLink->GetTo()) == &toNode) { + if (depLink->GetDepType() != kDependenceTypeTrue) { + if (depType == kDependenceTypeTrue) { + /* Has exist edge, replace it. */ + depLink->SetDepType(kDependenceTypeTrue); + depLink->SetLatency(mad.GetLatency(*fromNode.GetInsn(), *toNode.GetInsn())); + } + } + return; + } + } + DepLink *depLink = memPool.New(fromNode, toNode, depType); + if (depType == kDependenceTypeTrue) { + depLink->SetLatency(mad.GetLatency(*fromNode.GetInsn(), *toNode.GetInsn())); + } + fromNode.AddSucc(*depLink); + toNode.AddPred(*depLink); +} + +void AArch64DepAnalysis::AddDependence4InsnInVectorByType(MapleVector &insns, Insn &insn, const DepType &type) { + for (auto anyInsn : insns) { + AddDependence(*anyInsn->GetDepNode(), *insn.GetDepNode(), type); + } +} + +void AArch64DepAnalysis::AddDependence4InsnInVectorByTypeAndCmp(MapleVector &insns, Insn &insn, + const DepType &type) { + for (auto anyInsn : insns) { + if (anyInsn != &insn) { + AddDependence(*anyInsn->GetDepNode(), *insn.GetDepNode(), type); + } + } +} + +/* Remove self dependence (self loop) in dependence graph. */ +void AArch64DepAnalysis::RemoveSelfDeps(Insn &insn) { + DepNode *node = insn.GetDepNode(); + ASSERT(node->GetSuccs().back()->GetTo().GetInsn() == &insn, "Is not a self dependence."); + ASSERT(node->GetPreds().back()->GetFrom().GetInsn() == &insn, "Is not a self dependence."); + node->RemoveSucc(); + node->RemovePred(); +} + +/* Build dependences of source register operand. */ +void AArch64DepAnalysis::BuildDepsUseReg(Insn &insn, regno_t regNO) { + DepNode *node = insn.GetDepNode(); + node->AddUseReg(regNO); + if (regDefs[regNO] != nullptr) { + /* Build true dependences. */ + AddDependence(*regDefs[regNO]->GetDepNode(), *insn.GetDepNode(), kDependenceTypeTrue); + } +} + +/* Build dependences of destination register operand. */ +void AArch64DepAnalysis::BuildDepsDefReg(Insn &insn, regno_t regNO) { + DepNode *node = insn.GetDepNode(); + node->AddDefReg(regNO); + /* Build anti dependences. */ + RegList *regList = regUses[regNO]; + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + AddDependence(*regList->insn->GetDepNode(), *node, kDependenceTypeAnti); + regList = regList->next; + } + /* Build output depnedence. */ + if (regDefs[regNO] != nullptr) { + AddDependence(*regDefs[regNO]->GetDepNode(), *node, kDependenceTypeOutput); + } +} + +void AArch64DepAnalysis::ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn &newInsn, + bool isFromClinit) const { + if (isFromClinit) { + firstNode.AddClinitInsn(*firstNode.GetInsn()); + firstNode.AddClinitInsn(*secondNode.GetInsn()); + firstNode.SetCfiInsns(secondNode.GetCfiInsns()); + } else { + for (Insn *insn : secondNode.GetCfiInsns()) { + firstNode.AddCfiInsn(*insn); + } + for (Insn *insn : secondNode.GetComments()) { + firstNode.AddComments(*insn); + } + secondNode.ClearComments(); + } + firstNode.SetInsn(newInsn); + Reservation *rev = mad.FindReservation(newInsn); + CHECK_FATAL(rev != nullptr, "reservation is nullptr."); + firstNode.SetReservation(*rev); + firstNode.SetUnits(rev->GetUnit()); + firstNode.SetUnitNum(rev->GetUnitNum()); + newInsn.SetDepNode(firstNode); +} + +void AArch64DepAnalysis::ClearDepNodeInfo(DepNode &depNode) const { + Insn &insn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_none); + insn.SetDepNode(depNode); + Reservation *seRev = mad.FindReservation(insn); + depNode.SetInsn(insn); + depNode.SetType(kNodeTypeEmpty); + depNode.SetReservation(*seRev); + depNode.SetUnitNum(0); + depNode.ClearCfiInsns(); + depNode.SetUnits(nullptr); +} + +/* Combine adrpldr&clinit_tail to clinit. */ +void AArch64DepAnalysis::CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) { + ASSERT(firstNode.GetInsn()->GetMachineOpcode() == MOP_adrp_ldr, "first insn should be adrpldr"); + ASSERT(secondNode.GetInsn()->GetMachineOpcode() == MOP_clinit_tail, "second insn should be clinit_tail"); + ASSERT(firstNode.GetCfiInsns().empty(), "There should not be any comment/cfi instructions between clinit."); + ASSERT(secondNode.GetComments().empty(), "There should not be any comment/cfi instructions between clinit."); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_clinit, firstNode.GetInsn()->GetOperand(0), + firstNode.GetInsn()->GetOperand(1)); + newInsn.SetId(firstNode.GetInsn()->GetId()); + /* Replace first node with new insn. */ + ReplaceDepNodeWithNewInsn(firstNode, secondNode, newInsn, true); + /* Clear second node information. */ + ClearDepNodeInfo(secondNode); + CombineDependence(firstNode, secondNode, isAcrossSeparator); +} + +/* + * Combine memory access pair: + * 1.ldr to ldp. + * 2.str to stp. + */ +void AArch64DepAnalysis::CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) { + ASSERT(firstNode.GetInsn(), "the insn of first Node should not be nullptr"); + ASSERT(secondNode.GetInsn(), "the insn of second Node should not be nullptr"); + MOperator thisMop = firstNode.GetInsn()->GetMachineOpcode(); + MOperator mopPair = GetMopPair(thisMop); + ASSERT(mopPair != 0, "mopPair should not be zero"); + Operand *opnd0 = nullptr; + Operand *opnd1 = nullptr; + Operand *opnd2 = nullptr; + if (useFirstOffset) { + opnd0 = &(firstNode.GetInsn()->GetOperand(0)); + opnd1 = &(secondNode.GetInsn()->GetOperand(0)); + opnd2 = &(firstNode.GetInsn()->GetOperand(1)); + } else { + opnd0 = &(secondNode.GetInsn()->GetOperand(0)); + opnd1 = &(firstNode.GetInsn()->GetOperand(0)); + opnd2 = &(secondNode.GetInsn()->GetOperand(1)); + } + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopPair, *opnd0, *opnd1, *opnd2); + newInsn.SetId(firstNode.GetInsn()->GetId()); + std::string newComment; + const MapleString &comment = firstNode.GetInsn()->GetComment(); + if (comment.c_str() != nullptr) { + newComment += comment.c_str(); + } + const MapleString &secondComment = secondNode.GetInsn()->GetComment(); + if (secondComment.c_str() != nullptr) { + newComment += " "; + newComment += secondComment.c_str(); + } + if ((newComment.c_str() != nullptr) && (strlen(newComment.c_str()) > 0)) { + newInsn.SetComment(newComment); + } + /* Replace first node with new insn. */ + ReplaceDepNodeWithNewInsn(firstNode, secondNode, newInsn, false); + /* Clear second node information. */ + ClearDepNodeInfo(secondNode); + CombineDependence(firstNode, secondNode, false, true); +} + +/* Combine two dependence nodes to one */ +void AArch64DepAnalysis::CombineDependence(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator, + bool isMemCombine) { + if (isAcrossSeparator) { + /* Clear all latency of the second node. */ + for (auto predLink : secondNode.GetPreds()) { + predLink->SetLatency(0); + } + for (auto succLink : secondNode.GetSuccs()) { + succLink->SetLatency(0); + } + return; + } + std::set uniqueNodes; + + for (auto predLink : firstNode.GetPreds()) { + if (predLink->GetDepType() == kDependenceTypeTrue) { + predLink->SetLatency(mad.GetLatency(*predLink->GetFrom().GetInsn(), *firstNode.GetInsn())); + } + (void)uniqueNodes.insert(&predLink->GetFrom()); + } + for (auto predLink : secondNode.GetPreds()) { + if (&predLink->GetFrom() != &firstNode) { + if (uniqueNodes.insert(&(predLink->GetFrom())).second) { + AddDependence(predLink->GetFrom(), firstNode, predLink->GetDepType()); + } + } + predLink->SetLatency(0); + } + uniqueNodes.clear(); + for (auto succLink : firstNode.GetSuccs()) { + if (succLink->GetDepType() == kDependenceTypeTrue) { + succLink->SetLatency(mad.GetLatency(*succLink->GetFrom().GetInsn(), *firstNode.GetInsn())); + } + (void)uniqueNodes.insert(&(succLink->GetTo())); + } + for (auto succLink : secondNode.GetSuccs()) { + if (uniqueNodes.insert(&(succLink->GetTo())).second) { + AddDependence(firstNode, succLink->GetTo(), succLink->GetDepType()); + if (isMemCombine) { + succLink->GetTo().IncreaseValidPredsSize(); + } + } + succLink->SetLatency(0); + } +} + +/* + * Build dependences of ambiguous instruction. + * ambiguous instruction : instructions that can not across may throw instructions. + */ +void AArch64DepAnalysis::BuildDepsAmbiInsn(Insn &insn) { + AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); + ambiInsns.emplace_back(&insn); +} + +/* Build dependences of may throw instructions. */ +void AArch64DepAnalysis::BuildDepsMayThrowInsn(Insn &insn) { + AddDependence4InsnInVectorByType(ambiInsns, insn, kDependenceTypeThrow); +} + +bool AArch64DepAnalysis::IsFrameReg(const RegOperand &opnd) const { + return (opnd.GetRegisterNumber() == RFP) || (opnd.GetRegisterNumber() == RSP); +} + +MemOperand *AArch64DepAnalysis::BuildNextMemOperandByByteSize(const MemOperand &aarchMemOpnd, uint32 byteSize) const { + MemOperand *nextMemOpnd = aarchMemOpnd.Clone(memPool); + Operand *nextOfstOpnd = nextMemOpnd->GetOffsetImmediate()->Clone(memPool); + OfstOperand *aarchNextOfstOpnd = static_cast(nextOfstOpnd); + CHECK_NULL_FATAL(aarchNextOfstOpnd); + int32 offsetVal = static_cast(aarchNextOfstOpnd->GetOffsetValue()); + aarchNextOfstOpnd->SetOffsetValue(offsetVal + byteSize); + nextMemOpnd->SetOffsetOperand(*aarchNextOfstOpnd); + return nextMemOpnd; +} + +/* Get the second memory access operand of stp/ldp instructions. */ +MemOperand *AArch64DepAnalysis::GetNextMemOperand(const Insn &insn, const MemOperand &aarchMemOpnd) const { + MemOperand *nextMemOpnd = nullptr; + switch (insn.GetMachineOpcode()) { + case MOP_wldp: + case MOP_sldp: + case MOP_xldpsw: + case MOP_wstp: + case MOP_sstp: { + nextMemOpnd = BuildNextMemOperandByByteSize(aarchMemOpnd, k4ByteSize); + break; + } + case MOP_xldp: + case MOP_dldp: + case MOP_xstp: + case MOP_dstp: { + nextMemOpnd = BuildNextMemOperandByByteSize(aarchMemOpnd, k8ByteSize); + break; + } + default: + break; + } + + return nextMemOpnd; +} + +/* + * Build dependences of symbol memory access. + * Memory access with symbol must be a heap memory access. + */ +void AArch64DepAnalysis::BuildDepsAccessStImmMem(Insn &insn, bool isDest) { + if (isDest) { + /* + * Heap memory + * Build anti dependences. + */ + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + heapDefs.emplace_back(&insn); + } else { + /* Heap memory */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeTrue); + heapUses.emplace_back(&insn); + } + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } +} + +/* Build dependences of stack memory and heap memory uses. */ +void AArch64DepAnalysis::BuildDepsUseMem(Insn &insn, MemOperand &aarchMemOpnd) { + RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); + MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); + + aarchMemOpnd.SetAccessSize(insn.GetMemoryByteSize()); + /* Stack memory address */ + for (auto defInsn : stackDefs) { + if (defInsn->IsCall() || NeedBuildDepsMem(aarchMemOpnd, nextMemOpnd, *defInsn)) { + AddDependence(*defInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeTrue); + continue; + } + } + /* Heap memory */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeTrue); + if (((baseRegister != nullptr) && IsFrameReg(*baseRegister)) || aarchMemOpnd.IsStackMem()) { + stackUses.emplace_back(&insn); + } else { + heapUses.emplace_back(&insn); + } + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } +} + +static bool NoAlias(const MemOperand &leftOpnd, const MemOperand &rightOpnd) { + if (leftOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && rightOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + leftOpnd.GetIndexOpt() == MemOperand::kIntact && rightOpnd.GetIndexOpt() == MemOperand::kIntact) { + if (leftOpnd.GetBaseRegister()->GetRegisterNumber() == RFP || + rightOpnd.GetBaseRegister()->GetRegisterNumber() == RFP) { + Operand *ofstOpnd = leftOpnd.GetOffsetOperand(); + Operand *rofstOpnd = rightOpnd.GetOffsetOperand(); + ASSERT(ofstOpnd != nullptr, "offset operand should not be null."); + ASSERT(rofstOpnd != nullptr, "offset operand should not be null."); + ImmOperand *ofst = static_cast(ofstOpnd); + ImmOperand *rofst = static_cast(rofstOpnd); + ASSERT(ofst != nullptr, "CG internal error, invalid type."); + ASSERT(rofst != nullptr, "CG internal error, invalid type."); + return (!ofst->ValueEquals(*rofst)); + } + } + return false; +} + +static bool NoOverlap(const MemOperand &leftOpnd, const MemOperand &rightOpnd) { + if (leftOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || rightOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || + leftOpnd.GetIndexOpt() != MemOperand::kIntact || rightOpnd.GetIndexOpt() != MemOperand::kIntact) { + return false; + } + if (leftOpnd.GetBaseRegister()->GetRegisterNumber() != RFP || + rightOpnd.GetBaseRegister()->GetRegisterNumber() != RFP) { + return false; + } + int64 ofset1 = leftOpnd.GetOffsetOperand()->GetValue(); + int64 ofset2 = rightOpnd.GetOffsetOperand()->GetValue(); + if (ofset1 < ofset2) { + return ((ofset1 + static_cast(leftOpnd.GetAccessSize())) <= ofset2); + } else { + return ((ofset2 + static_cast(rightOpnd.GetAccessSize())) <= ofset1); + } +} + +/* Return true if memInsn's memOpnd no alias with memOpnd and nextMemOpnd */ +bool AArch64DepAnalysis::NeedBuildDepsMem(const MemOperand &memOpnd, const MemOperand *nextMemOpnd, + const Insn &memInsn) const { + auto *memOpndOfmemInsn = static_cast(memInsn.GetMemOpnd()); + if (!NoAlias(memOpnd, *memOpndOfmemInsn) || ((nextMemOpnd != nullptr) && !NoAlias(*nextMemOpnd, *memOpndOfmemInsn))) { + return true; + } + if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && !memInsn.IsCall()) { + static_cast(memInsn.GetMemOpnd())->SetAccessSize(memInsn.GetMemoryByteSize()); + return (!NoOverlap(memOpnd, *memOpndOfmemInsn)); + } + MemOperand *nextMemOpndOfmemInsn = GetNextMemOperand(memInsn, *memOpndOfmemInsn); + if (nextMemOpndOfmemInsn != nullptr) { + if (!NoAlias(memOpnd, *nextMemOpndOfmemInsn) || + ((nextMemOpnd != nullptr) && !NoAlias(*nextMemOpnd, *nextMemOpndOfmemInsn))) { + return true; + } + } + return false; +} + +/* + * Build anti dependences between insn and other insn that use stack memroy. + * insn : the instruction that defines stack memory. + * memOpnd : insn's memOpnd + * nextMemOpnd : some memory pair operator instruction (like ldp/stp) defines two memory. + */ +void AArch64DepAnalysis::BuildAntiDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd) { + memOpnd.SetAccessSize(insn.GetMemoryByteSize()); + for (auto *useInsn : stackUses) { + if (NeedBuildDepsMem(memOpnd, nextMemOpnd, *useInsn)) { + AddDependence(*useInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeAnti); + } + } +} + +/* + * Build output dependences between insn with other insn that define stack memroy. + * insn : the instruction that defines stack memory. + * memOpnd : insn's memOpnd + * nextMemOpnd : some memory pair operator instruction (like ldp/stp) defines two memory. + */ +void AArch64DepAnalysis::BuildOutputDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd) { + memOpnd.SetAccessSize(insn.GetMemoryByteSize()); + for (auto defInsn : stackDefs) { + if (defInsn->IsCall() || NeedBuildDepsMem(memOpnd, nextMemOpnd, *defInsn)) { + AddDependence(*defInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeOutput); + } + } +} + +/* Build dependences of stack memory and heap memory definitions. */ +void AArch64DepAnalysis::BuildDepsDefMem(Insn &insn, MemOperand &aarchMemOpnd) { + RegOperand *baseRegister = aarchMemOpnd.GetBaseRegister(); + MemOperand *nextMemOpnd = GetNextMemOperand(insn, aarchMemOpnd); + + /* Build anti dependences. */ + BuildAntiDepsDefStackMem(insn, aarchMemOpnd, nextMemOpnd); + /* Build output depnedence. */ + BuildOutputDepsDefStackMem(insn, aarchMemOpnd, nextMemOpnd); + if (lastCallInsn != nullptr) { + /* Build a dependence between stack passed arguments and call. */ + ASSERT(baseRegister != nullptr, "baseRegister shouldn't be null here"); + if (baseRegister->GetRegisterNumber() == RSP) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + } + + /* Heap memory + * Build anti dependences. + */ + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + + if (((baseRegister != nullptr) && IsFrameReg(*baseRegister)) || aarchMemOpnd.IsStackMem()) { + stackDefs.emplace_back(&insn); + } else { + heapDefs.emplace_back(&insn); + } + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } + /* Memory definition can not across may-throw insns. */ + AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); +} + +/* Build dependences of memory barrior instructions. */ +void AArch64DepAnalysis::BuildDepsMemBar(Insn &insn) { + AddDependence4InsnInVectorByTypeAndCmp(stackUses, insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(heapUses, insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(stackDefs, insn, kDependenceTypeMembar); + AddDependence4InsnInVectorByTypeAndCmp(heapDefs, insn, kDependenceTypeMembar); + memBarInsn = &insn; +} + +/* A pseudo separator node depends all the other nodes. */ +void AArch64DepAnalysis::BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes) { + uint32 nextSepIndex = (separatorIndex + kMaxDependenceNum) < nodes.size() ? (separatorIndex + kMaxDependenceNum) + : static_cast(nodes.size() - 1); + newSepNode.ReservePreds(nextSepIndex - separatorIndex); + newSepNode.ReserveSuccs(nextSepIndex - separatorIndex); + for (uint32 i = separatorIndex; i < nextSepIndex; ++i) { + AddDependence(*nodes[i], newSepNode, kDependenceTypeSeparator); + } +} + +/* Build control dependence for branch/ret instructions. */ +void AArch64DepAnalysis::BuildDepsControlAll(DepNode &depNode, const MapleVector &nodes) { + for (uint32 i = separatorIndex; i < depNode.GetIndex(); ++i) { + AddDependence(*nodes[i], depNode, kDependenceTypeControl); + } +} + +/* + * Build dependences of call instructions. + * Caller-saved physical registers will defined by a call instruction. + * Also a conditional register may modified by a call. + */ +void AArch64DepAnalysis::BuildCallerSavedDeps(Insn &insn) { + /* Build anti dependence and output dependence. */ + for (uint32 i = R0; i <= R7; ++i) { + BuildDepsDefReg(insn, i); + } + for (uint32 i = V0; i <= V7; ++i) { + BuildDepsDefReg(insn, i); + } + if (!beforeRA) { + for (uint32 i = R8; i <= R18; ++i) { + BuildDepsDefReg(insn, i); + } + for (uint32 i = RLR; i <= RSP; ++i) { + BuildDepsUseReg(insn, i); + } + for (uint32 i = V16; i <= V31; ++i) { + BuildDepsDefReg(insn, i); + } + } + /* For condition operand, such as NE, EQ, and so on. */ + if (cgFunc.GetRflag() != nullptr) { + BuildDepsDefReg(insn, kRFLAG); + } +} + +/* + * Build dependence between control register and last call instruction. + * insn : instruction that with control register operand. + * isDest : if the control register operand is a destination operand. + */ +void AArch64DepAnalysis::BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) { + if (lastCallInsn == nullptr) { + return; + } + if (isDest) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeOutput); + return; + } + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeAnti); +} + +/* + * Build dependence between stack-define-instruction that deal with call-insn's args and a call-instruction. + * insn : a call instruction (call/tail-call) + */ +void AArch64DepAnalysis::BuildStackPassArgsDeps(Insn &insn) { + for (auto stackDefInsn : stackDefs) { + if (stackDefInsn->IsCall()) { + continue; + } + Operand *opnd = stackDefInsn->GetMemOpnd(); + ASSERT(opnd->IsMemoryAccessOperand(), "make sure opnd is memOpnd"); + MemOperand *memOpnd = static_cast(opnd); + RegOperand *baseReg = memOpnd->GetBaseRegister(); + if ((baseReg != nullptr) && (baseReg->GetRegisterNumber() == RSP)) { + AddDependence(*stackDefInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + } +} + +/* Some insns may dirty all stack memory, such as "bl MCC_InitializeLocalStackRef". */ +void AArch64DepAnalysis::BuildDepsDirtyStack(Insn &insn) { + /* Build anti dependences. */ + AddDependence4InsnInVectorByType(stackUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeOutput); + stackDefs.emplace_back(&insn); +} + +/* Some call insns may use all stack memory, such as "bl MCC_CleanupLocalStackRef_NaiveRCFast". */ +void AArch64DepAnalysis::BuildDepsUseStack(Insn &insn) { + /* Build true dependences. */ + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeTrue); +} + +/* Some insns may dirty all heap memory, such as a call insn. */ +void AArch64DepAnalysis::BuildDepsDirtyHeap(Insn &insn) { + /* Build anti dependences. */ + AddDependence4InsnInVectorByType(heapUses, insn, kDependenceTypeAnti); + /* Build output depnedence. */ + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeOutput); + if (memBarInsn != nullptr) { + AddDependence(*memBarInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeMembar); + } + heapDefs.emplace_back(&insn); +} + +/* Build a pseudo node to seperate dependence graph. */ +DepNode *AArch64DepAnalysis::BuildSeparatorNode() { + Insn &pseudoSepInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_dependence_seperator); + DepNode *separatorNode = memPool.New(pseudoSepInsn, alloc); + separatorNode->SetType(kNodeTypeSeparator); + pseudoSepInsn.SetDepNode(*separatorNode); + if (beforeRA) { + RegPressure *regPressure = memPool.New(alloc); + separatorNode->SetRegPressure(*regPressure); + separatorNode->InitPressure(); + } + return separatorNode; +} + +/* Init depAnalysis data struction */ +void AArch64DepAnalysis::Init(BB &bb, MapleVector &nodes) { + curBB = &bb; + ClearAllDepData(); + lastComments.clear(); + /* Analysis live-in registers in catch BB. */ + AnalysisAmbiInsns(bb); + /* Clear all dependence nodes and push the first separator node. */ + nodes.clear(); + DepNode *pseudoSepNode = BuildSeparatorNode(); + nodes.emplace_back(pseudoSepNode); + separatorIndex = 0; + + if (beforeRA) { + /* assump first pseudo_dependence_seperator insn of current bb define live-in's registers */ + Insn *pseudoSepInsn = pseudoSepNode->GetInsn(); + for (auto ®NO : bb.GetLiveInRegNO()) { + regDefs[regNO] = pseudoSepInsn; + pseudoSepNode->AddDefReg(regNO); + pseudoSepNode->SetRegDefs(pseudoSepNode->GetDefRegnos().size(), nullptr); + } + } +} + +/* When a separator build, it is the same as a new basic block. */ +void AArch64DepAnalysis::ClearAllDepData() { + uint32 maxRegNum; + if (beforeRA) { + maxRegNum = cgFunc.GetMaxVReg(); + } else { + maxRegNum = kAllRegNum; + } + errno_t ret = memset_s(regDefs, sizeof(Insn *) * maxRegNum, 0, sizeof(Insn *) * maxRegNum); + CHECK_FATAL(ret == EOK, "call memset_s failed in Unit"); + ret = memset_s(regUses, sizeof(RegList *) * maxRegNum, 0, sizeof(RegList *) * maxRegNum); + CHECK_FATAL(ret == EOK, "call memset_s failed in Unit"); + memBarInsn = nullptr; + lastCallInsn = nullptr; + lastFrameDef = nullptr; + + stackUses.clear(); + stackDefs.clear(); + heapUses.clear(); + heapDefs.clear(); + mayThrows.clear(); + ambiInsns.clear(); +} + +/* Analysis live-in registers in catch bb and cleanup bb. */ +void AArch64DepAnalysis::AnalysisAmbiInsns(BB &bb) { + hasAmbiRegs = false; + if (bb.GetEhSuccs().empty()) { + return; + } + + /* Union all catch bb */ + for (auto succBB : bb.GetEhSuccs()) { + const MapleSet &liveInRegSet = succBB->GetLiveInRegNO(); + set_union(liveInRegSet.begin(), liveInRegSet.end(), ehInRegs.begin(), ehInRegs.end(), + inserter(ehInRegs, ehInRegs.begin())); + } + + /* Union cleanup entry bb. */ + const MapleSet ®NOSet = cgFunc.GetCleanupBB()->GetLiveInRegNO(); + std::set_union(regNOSet.begin(), regNOSet.end(), ehInRegs.begin(), ehInRegs.end(), + inserter(ehInRegs, ehInRegs.begin())); + + /* Subtract R0 and R1, that is defined by eh runtime. */ + ehInRegs.erase(R0); + ehInRegs.erase(R1); + if (ehInRegs.empty()) { + return; + } + hasAmbiRegs = true; +} + +/* Check if regNO is in ehInRegs. */ +bool AArch64DepAnalysis::IfInAmbiRegs(regno_t regNO) const { + if (!hasAmbiRegs) { + return false; + } + if (ehInRegs.find(regNO) != ehInRegs.end()) { + return true; + } + return false; +} + +static bool IsYieldPoint(Insn &insn) { + /* + * It is a yieldpoint if loading from a dedicated + * register holding polling page address: + * ldr wzr, [RYP] + */ + if (insn.IsLoad() && !insn.IsLoadLabel()) { + auto mem = static_cast(insn.GetMemOpnd()); + return (mem != nullptr && mem->GetBaseRegister() != nullptr && mem->GetBaseRegister()->GetRegisterNumber() == RYP); + } + return false; +} + +/* + * Build dependences of memory operand. + * insn : a instruction with the memory access operand. + * opnd : the memory access operand. + * regProp : operand property of the memory access operandess operand. + */ +void AArch64DepAnalysis::BuildMemOpndDependency(Insn &insn, Operand &opnd, const OpndDesc ®Prop) { + ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be memory Operand"); + MemOperand *memOpnd = static_cast(&opnd); + RegOperand *baseRegister = memOpnd->GetBaseRegister(); + if (baseRegister != nullptr) { + regno_t regNO = baseRegister->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + if ((memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) && (memOpnd->IsPostIndexed() || memOpnd->IsPreIndexed())) { + /* Base operand has changed. */ + BuildDepsDefReg(insn, regNO); + } + } + RegOperand *indexRegister = memOpnd->GetIndexRegister(); + if (indexRegister != nullptr) { + regno_t regNO = indexRegister->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + } + if (regProp.IsUse()) { + BuildDepsUseMem(insn, *memOpnd); + } else { + BuildDepsDefMem(insn, *memOpnd); + BuildDepsAmbiInsn(insn); + } + if (IsYieldPoint(insn)) { + BuildDepsMemBar(insn); + BuildDepsDefReg(insn, kRFLAG); + } +} + +/* Build Dependency for each Operand of insn */ +void AArch64DepAnalysis::BuildOpndDependency(Insn &insn) { + const InsnDesc *md = insn.GetDesc(); + MOperator mOp = insn.GetMachineOpcode(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + const OpndDesc *regProp = md->opndMD[i]; + if (opnd.IsMemoryAccessOperand()) { + BuildMemOpndDependency(insn, opnd, *regProp); + } else if (opnd.IsStImmediate()) { + if (mOp != MOP_xadrpl12) { + BuildDepsAccessStImmMem(insn, false); + } + } else if (opnd.IsRegister()) { + RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + + if (regProp->IsUse()) { + BuildDepsUseReg(insn, regNO); + } + + if (regProp->IsDef()) { + BuildDepsDefReg(insn, regNO); + } + } else if (opnd.IsConditionCode()) { + /* For condition operand, such as NE, EQ, and so on. */ + if (regProp->IsUse()) { + BuildDepsUseReg(insn, kRFLAG); + BuildDepsBetweenControlRegAndCall(insn, false); + } + + if (regProp->IsDef()) { + BuildDepsDefReg(insn, kRFLAG); + BuildDepsBetweenControlRegAndCall(insn, true); + } + } else if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + /* Build true dependences */ + for (auto &lst : listOpnd.GetOperands()) { + regno_t regNO = lst->GetRegisterNumber(); + BuildDepsUseReg(insn, regNO); + } + } + } +} + +static bool IsLazyLoad(MOperator op) { + return (op == MOP_lazy_ldr) || (op == MOP_lazy_ldr_static) || (op == MOP_lazy_tail); +} + +/* + * Build dependences in some special issue (stack/heap/throw/clinit/lazy binding/control flow). + * insn : a instruction. + * depNode : insn's depNode. + * nodes : the dependence nodes inclue insn's depNode. + */ +void AArch64DepAnalysis::BuildSpecialInsnDependency(Insn &insn, DepNode &depNode, const MapleVector &nodes) { + const InsnDesc *md = insn.GetDesc(); + MOperator mOp = insn.GetMachineOpcode(); + if (insn.IsCall() || insn.IsTailCall()) { + /* Caller saved registers. */ + BuildCallerSavedDeps(insn); + BuildStackPassArgsDeps(insn); + + if (mOp == MOP_xbl) { + FuncNameOperand &target = static_cast(insn.GetOperand(0)); + if ((target.GetName() == "MCC_InitializeLocalStackRef") || (target.GetName() == "MCC_ClearLocalStackRef") || + (target.GetName() == "MCC_DecRefResetPair")) { + /* Write stack memory. */ + BuildDepsDirtyStack(insn); + } else if ((target.GetName() == "MCC_CleanupLocalStackRef_NaiveRCFast") || + (target.GetName() == "MCC_CleanupLocalStackRefSkip_NaiveRCFast") || + (target.GetName() == "MCC_CleanupLocalStackRefSkip")) { + /* UseStackMemory. */ + BuildDepsUseStack(insn); + } else if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC) { + /* potential C aliasing. */ + BuildDepsDirtyStack(insn); + } + } + BuildDepsDirtyHeap(insn); + BuildDepsAmbiInsn(insn); + if (lastCallInsn != nullptr) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + lastCallInsn = &insn; + } else if (insn.IsClinit() || IsLazyLoad(insn.GetMachineOpcode()) || + insn.GetMachineOpcode() == MOP_arrayclass_cache_ldr) { + BuildDepsDirtyHeap(insn); + BuildDepsDefReg(insn, kRFLAG); + if (insn.GetMachineOpcode() != MOP_adrp_ldr) { + BuildDepsDefReg(insn, R16); + BuildDepsDefReg(insn, R17); + } + } else if ((mOp == MOP_xret) || md->IsBranch()) { + BuildDepsControlAll(depNode, nodes); + } else if (insn.IsMemAccessBar()) { + BuildDepsMemBar(insn); + } else if (insn.IsSpecialIntrinsic()) { + BuildDepsDirtyHeap(insn); + } +} + +/* + * If the instruction's number of current basic block more than kMaxDependenceNum, + * then insert some pseudo separator node to split baic block. + */ +void AArch64DepAnalysis::SeperateDependenceGraph(MapleVector &nodes, uint32 &nodeSum) { + if ((nodeSum > 0) && ((nodeSum % kMaxDependenceNum) == 0)) { + ASSERT(nodeSum == nodes.size(), "CG internal error, nodeSum should equal to nodes.size."); + /* Add a pseudo node to seperate dependence graph. */ + DepNode *separatorNode = BuildSeparatorNode(); + separatorNode->SetIndex(nodeSum); + nodes.emplace_back(separatorNode); + BuildDepsSeparator(*separatorNode, nodes); + + if (beforeRA) { + /* for all live-out register of current bb */ + for (auto ®NO : curBB->GetLiveOutRegNO()) { + if (regDefs[regNO] != nullptr) { + AppendRegUseList(*(separatorNode->GetInsn()), regNO); + separatorNode->AddUseReg(regNO); + separatorNode->SetRegUses(*regUses[regNO]); + } + } + } + ClearAllDepData(); + separatorIndex = nodeSum++; + } +} + +/* + * Generate a depNode, + * insn : create depNode for the instruction. + * nodes : a vector to store depNode. + * nodeSum : the new depNode's index. + * comments : those comment insn between last no-comment's insn and insn. + */ +DepNode *AArch64DepAnalysis::GenerateDepNode(Insn &insn, MapleVector &nodes, int32 nodeSum, + const MapleVector &comments) { + DepNode *depNode = nullptr; + Reservation *rev = mad.FindReservation(insn); + ASSERT(rev != nullptr, "rev is nullptr"); + depNode = memPool.New(insn, alloc, rev->GetUnit(), rev->GetUnitNum(), *rev); + if (beforeRA) { + RegPressure *regPressure = memPool.New(alloc); + depNode->SetRegPressure(*regPressure); + depNode->InitPressure(); + } + depNode->SetIndex(nodeSum); + nodes.emplace_back(depNode); + insn.SetDepNode(*depNode); + + constexpr size_t vectorSize = 5; + depNode->ReservePreds(vectorSize); + depNode->ReserveSuccs(vectorSize); + + if (!comments.empty()) { + depNode->SetComments(comments); + } + return depNode; +} + +void AArch64DepAnalysis::BuildAmbiInsnDependency(Insn &insn) { + const auto &defRegnos = insn.GetDepNode()->GetDefRegnos(); + for (const auto ®NO : defRegnos) { + if (IfInAmbiRegs(regNO)) { + BuildDepsAmbiInsn(insn); + break; + } + } +} + +void AArch64DepAnalysis::BuildMayThrowInsnDependency(Insn &insn) { + /* build dependency for maythrow insn; */ + if (insn.MayThrow()) { + BuildDepsMayThrowInsn(insn); + if (lastFrameDef != nullptr) { + AddDependence(*lastFrameDef->GetDepNode(), *insn.GetDepNode(), kDependenceTypeThrow); + } + } +} + +void AArch64DepAnalysis::UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector &nodes) { + const auto &useRegnos = depNode.GetUseRegnos(); + if (beforeRA) { + depNode.InitRegUsesSize(useRegnos.size()); + } + for (auto regNO : useRegnos) { + AppendRegUseList(insn, regNO); + if (beforeRA) { + depNode.SetRegUses(*regUses[regNO]); + if (regDefs[regNO] == nullptr) { + regDefs[regNO] = nodes[separatorIndex]->GetInsn(); + nodes[separatorIndex]->AddDefReg(regNO); + nodes[separatorIndex]->SetRegDefs(nodes[separatorIndex]->GetDefRegnos().size(), regUses[regNO]); + } + } + } + + const auto &defRegnos = depNode.GetDefRegnos(); + size_t i = 0; + if (beforeRA) { + depNode.InitRegDefsSize(defRegnos.size()); + } + for (const auto regNO : defRegnos) { + regDefs[regNO] = &insn; + regUses[regNO] = nullptr; + if (beforeRA) { + depNode.SetRegDefs(i, nullptr); + if (regNO >= R0 && regNO <= R3) { + depNode.SetHasPreg(true); + } else if (regNO == R8) { + depNode.SetHasNativeCallRegister(true); + } + } + ++i; + } +} + +/* Update stack and heap dependency */ +void AArch64DepAnalysis::UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn) { + if (!insn.MayThrow()) { + return; + } + depNode.SetLocInsn(locInsn); + mayThrows.emplace_back(&insn); + AddDependence4InsnInVectorByType(stackDefs, insn, kDependenceTypeThrow); + AddDependence4InsnInVectorByType(heapDefs, insn, kDependenceTypeThrow); +} + +/* Add a separatorNode to the end of a nodes + * * before RA: add all live-out registers to this separatorNode'Uses + * */ +void AArch64DepAnalysis::AddEndSeparatorNode(MapleVector &nodes) { + DepNode *separatorNode = BuildSeparatorNode(); + nodes.emplace_back(separatorNode); + BuildDepsSeparator(*separatorNode, nodes); + + if (beforeRA) { + /* for all live-out register of current bb */ + for (auto ®NO : curBB->GetLiveOutRegNO()) { + if (regDefs[regNO] != nullptr) { + AppendRegUseList(*(separatorNode->GetInsn()), regNO); + separatorNode->AddUseReg(regNO); + separatorNode->SetRegUses(*regUses[regNO]); + } + } + } +} + +/* + * Build dependence graph. + * 1: Build dependence nodes. + * 2: Build edges between dependence nodes. Edges are: + * 2.1) True dependences + * 2.2) Anti dependences + * 2.3) Output dependences + * 2.4) Barrier dependences + */ +void AArch64DepAnalysis::Run(BB &bb, MapleVector &nodes) { + /* Initial internal datas. */ + Init(bb, nodes); + uint32 nodeSum = 1; + MapleVector comments(alloc.Adapter()); + const Insn *locInsn = bb.GetFirstLoc(); + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + if (insn->IsImmaterialInsn()) { + if (!insn->IsComment()) { + locInsn = insn; + } else { + comments.emplace_back(insn); + } + } else if (insn->IsCfiInsn()) { + if (!nodes.empty()) { + nodes.back()->AddCfiInsn(*insn); + } + } + continue; + } + /* Add a pseudo node to seperate dependence graph when appropriate */ + SeperateDependenceGraph(nodes, nodeSum); + /* generate a DepNode */ + DepNode *depNode = GenerateDepNode(*insn, nodes, nodeSum, comments); + ++nodeSum; + comments.clear(); + /* Build Dependency for maythrow insn; */ + BuildMayThrowInsnDependency(*insn); + /* Build Dependency for each Operand of insn */ + BuildOpndDependency(*insn); + /* Build Dependency for special insn */ + BuildSpecialInsnDependency(*insn, *depNode, nodes); + /* Build Dependency for AmbiInsn if needed */ + BuildAmbiInsnDependency(*insn); + /* Update stack and heap dependency */ + UpdateStackAndHeapDependency(*depNode, *insn, *locInsn); + if (insn->IsFrameDef()) { + lastFrameDef = insn; + } + /* Seperator exists. */ + AddDependence(*nodes[separatorIndex], *insn->GetDepNode(), kDependenceTypeSeparator); + /* Update register use and register def */ + UpdateRegUseAndDef(*insn, *depNode, nodes); + } + + AddEndSeparatorNode(nodes); + + if (!comments.empty()) { + lastComments = comments; + } + comments.clear(); +} + +/* return dependence type name */ +const std::string &AArch64DepAnalysis::GetDepTypeName(DepType depType) const { + ASSERT(depType <= kDependenceTypeNone, "array boundary check failed"); + return kDepTypeName[depType]; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3f1c5c7ca27799c2f8a2ff45ac662ffd9b4aeee9 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ebo.cpp @@ -0,0 +1,1537 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_ebo.h" +#include "aarch64_cg.h" +#include "mpl_logging.h" +#include "aarch64_utils.h" + +namespace maplebe { +using namespace maple; +#define EBO_DUMP CG_DEBUG_FUNC(*cgFunc) + +enum AArch64Ebo::ExtOpTable : uint8 { + AND, + SXTB, + SXTH, + SXTW, + ZXTB, + ZXTH, + ZXTW, + ExtTableSize +}; + +namespace { + +using PairMOperator = MOperator[2]; + +constexpr uint8 kInsPairsNum = 5; + +PairMOperator extInsnPairTable[ExtTableSize][kInsPairsNum] = { + /* {origMop, newMop} */ + {{MOP_wldrb, MOP_wldrb}, {MOP_wldrsh, MOP_wldrb}, {MOP_wldrh, MOP_wldrb}, {MOP_xldrsw, MOP_wldrb}, + {MOP_wldr, MOP_wldrb}}, /* AND */ + {{MOP_wldrb, MOP_wldrsb}, {MOP_wldr, MOP_wldrsb}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* SXTB */ + {{MOP_wldrh, MOP_wldrsh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrsb}, {MOP_wldrsh, MOP_wldrsh}, + {MOP_undef, MOP_undef}}, /* SXTH */ + {{MOP_wldrh, MOP_wldrh}, {MOP_wldrsh, MOP_wldrsh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrsb}, + {MOP_wldr, MOP_xldrsw}}, /* SXTW */ + {{MOP_wldrb, MOP_wldrb}, {MOP_wldrsb, MOP_wldrb}, {MOP_undef, MOP_undef}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* ZXTB */ + {{MOP_wldrh, MOP_wldrh}, {MOP_wldrb, MOP_wldrb}, {MOP_wldr, MOP_wldrh}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}}, /* ZXTH */ + {{MOP_wldr, MOP_wldr}, {MOP_wldrh, MOP_wldrh}, {MOP_wldrb, MOP_wldrb}, {MOP_undef, MOP_undef}, + {MOP_undef, MOP_undef}} /* ZXTW */ +}; + +} // anonymous namespace + +MOperator AArch64Ebo::ExtLoadSwitchBitSize(MOperator lowMop) const { + switch (lowMop) { + case MOP_wldrsb : + return MOP_xldrsb; + case MOP_wldrsh : + return MOP_xldrsh; + default: + break; + } + return lowMop; +} + +bool AArch64Ebo::IsFmov(const Insn &insn) const { + return ((insn.GetMachineOpcode() >= MOP_xvmovsr) && (insn.GetMachineOpcode() <= MOP_xvmovrd)); +} + +bool AArch64Ebo::IsAdd(const Insn &insn) const { + return ((insn.GetMachineOpcode() >= MOP_xaddrrr) && (insn.GetMachineOpcode() <= MOP_ssub)); +} + +bool AArch64Ebo::IsInvalidReg(const RegOperand &opnd) const { + return (opnd.GetRegisterNumber() == AArch64reg::kRinvalid); +} + +bool AArch64Ebo::IsZeroRegister(const Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +bool AArch64Ebo::IsConstantImmOrReg(const Operand &opnd) const { + if (opnd.IsConstImmediate()) { + return true; + } + return IsZeroRegister(opnd); +} + +bool AArch64Ebo::IsClinitCheck(const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + return ((mOp == MOP_clinit) || (mOp == MOP_clinit_tail)); +} + +bool AArch64Ebo::IsDecoupleStaticOp(Insn &insn) const { + if (insn.GetMachineOpcode() == MOP_lazy_ldr_static) { + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + CHECK_FATAL(opnd1 != nullptr, "opnd1 is null!"); + auto *stImmOpnd = static_cast(opnd1); + return StringUtils::StartsWith(stImmOpnd->GetName(), namemangler::kDecoupleStaticValueStr); + } + return false; +} + +static bool IsYieldPoint(Insn &insn) { + /* + * It is a yieldpoint if loading from a dedicated + * register holding polling page address: + * ldr wzr, [RYP] + */ + if (insn.IsLoad() && !insn.IsLoadLabel()) { + auto mem = static_cast(insn.GetMemOpnd()); + return (mem != nullptr && mem->GetBaseRegister() != nullptr && mem->GetBaseRegister()->GetRegisterNumber() == RYP); + } + return false; +} + +/* retrun true if insn is globalneeded */ +bool AArch64Ebo::IsGlobalNeeded(Insn &insn) const { + /* Calls may have side effects. */ + if (insn.IsCall()) { + return true; + } + + /* Intrinsic call should not be removed. */ + if (insn.IsSpecialIntrinsic()) { + return true; + } + + /* Clinit should not be removed. */ + if (IsClinitCheck(insn)) { + return true; + } + + /* Yieldpoints should not be removed by optimizer. */ + if (cgFunc->GetCG()->GenYieldPoint() && IsYieldPoint(insn)) { + return true; + } + + std::set defRegs = insn.GetDefRegs(); + for (auto defRegNo : defRegs) { + if (defRegNo == RZR || defRegNo == RSP || ((defRegNo == RFP || defRegNo == R29) && CGOptions::UseFramePointer())) { + return true; + } + } + return false; +} + +/* in aarch64,resOp will not be def and use in the same time */ +bool AArch64Ebo::ResIsNotDefAndUse(Insn &insn) const { + (void)insn; + return true; +} + +/* Return true if opnd live out of bb. */ +bool AArch64Ebo::LiveOutOfBB(const Operand &opnd, const BB &bb) const { + CHECK_FATAL(opnd.IsRegister(), "expect register here."); + /* when optimize_level < 2, there is need to anlyze live range. */ + if (live == nullptr) { + return false; + } + bool isLiveOut = false; + if (bb.GetLiveOut()->TestBit(static_cast(&opnd)->GetRegisterNumber())) { + isLiveOut = true; + } + return isLiveOut; +} + +bool AArch64Ebo::IsLastAndBranch(BB &bb, Insn &insn) const { + return (bb.GetLastInsn() == &insn) && insn.IsBranch(); +} + +bool AArch64Ebo::IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const { + MOperator mOp = insn.GetMachineOpcode(); + if (!(mOp == MOP_wmovri32 || mOp == MOP_xmovri64 || mOp == MOP_wsfmovri || mOp == MOP_xdfmovri)) { + return false; + } + OpndInfo *sameInfo = opndInfo.same; + if (sameInfo == nullptr || sameInfo->insn == nullptr || sameInfo->bb != &bb || + sameInfo->insn->GetMachineOpcode() != mOp) { + return false; + } + Insn *prevInsn = sameInfo->insn; + if (!prevInsn->GetOperand(kInsnSecondOpnd).IsImmediate()) { + return false; + } + auto &sameOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &opnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (sameOpnd.GetValue() == opnd.GetValue()) { + sameInfo->refCount += opndInfo.refCount; + return true; + } + return false; +} + +const RegOperand &AArch64Ebo::GetRegOperand(const Operand &opnd) const { + CHECK_FATAL(opnd.IsRegister(), "aarch64 shoud not have regShiftOp! opnd is not register!"); + const auto &res = static_cast(opnd); + return res; +} + +/* Create infomation for local_opnd from its def insn current_insn. */ +OpndInfo *AArch64Ebo::OperandInfoDef(BB ¤tBB, Insn ¤tInsn, Operand &localOpnd) { + int32 hashVal = localOpnd.IsRegister() ? -1 : ComputeOpndHash(localOpnd); + OpndInfo *opndInfoPrev = GetOpndInfo(localOpnd, hashVal); + OpndInfo *opndInfo = GetNewOpndInfo(currentBB, ¤tInsn, localOpnd, hashVal); + if (localOpnd.IsMemoryAccessOperand()) { + MemOpndInfo *memInfo = static_cast(opndInfo); + MemOperand *mem = static_cast(&localOpnd); + Operand *base = mem->GetBaseRegister(); + Operand *offset = mem->GetOffset(); + if (base != nullptr && base->IsRegister()) { + memInfo->SetBaseInfo(*OperandInfoUse(currentBB, *base)); + } + if (offset != nullptr && offset->IsRegister()) { + memInfo->SetOffsetInfo(*OperandInfoUse(currentBB, *offset)); + } + } + opndInfo->same = opndInfoPrev; + if ((opndInfoPrev != nullptr)) { + opndInfoPrev->redefined = true; + if (opndInfoPrev->bb == ¤tBB) { + opndInfoPrev->redefinedInBB = true; + opndInfoPrev->redefinedInsn = ¤tInsn; + } + UpdateOpndInfo(localOpnd, *opndInfoPrev, opndInfo, hashVal); + } else { + SetOpndInfo(localOpnd, opndInfo, hashVal); + } + return opndInfo; +} + +void AArch64Ebo::DefineClinitSpecialRegisters(InsnInfo &insnInfo) { + Insn *insn = insnInfo.insn; + CHECK_FATAL(insn != nullptr, "nullptr of currInsnInfo"); + RegOperand &phyOpnd1 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, phyOpnd1); + opndInfo->insnInfo = &insnInfo; + + RegOperand &phyOpnd2 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(R17, k64BitSize, kRegTyInt); + opndInfo = OperandInfoDef(*insn->GetBB(), *insn, phyOpnd2); + opndInfo->insnInfo = &insnInfo; +} + +void AArch64Ebo::BuildCallerSaveRegisters() { + callerSaveRegTable.clear(); + RegOperand &phyOpndR0 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + RegOperand &phyOpndV0 = a64CGFunc->GetOrCreatePhysicalRegisterOperand(V0, k64BitSize, kRegTyFloat); + callerSaveRegTable.emplace_back(&phyOpndR0); + callerSaveRegTable.emplace_back(&phyOpndV0); + for (uint32 i = R1; i <= R18; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); + callerSaveRegTable.emplace_back(&phyOpnd); + } + for (uint32 i = V1; i <= V7; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + callerSaveRegTable.emplace_back(&phyOpnd); + } + for (uint32 i = V16; i <= V31; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + callerSaveRegTable.emplace_back(&phyOpnd); + } + CHECK_FATAL(callerSaveRegTable.size() < kMaxCallerSaveReg, + "number of elements in callerSaveRegTable must less then 45!"); +} + +void AArch64Ebo::DefineAsmRegisters(InsnInfo &insnInfo) { + Insn *insn = insnInfo.insn; + ASSERT(insn->GetMachineOpcode() == MOP_asm, "insn should be a call insn."); + auto &outList = static_cast(insn->GetOperand(kAsmOutputListOpnd)); + for (auto &opnd : outList.GetOperands()) { + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } + auto &clobberList = static_cast(insn->GetOperand(kAsmClobberListOpnd)); + for (auto &opnd : clobberList.GetOperands()) { + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } + auto &inList = static_cast(insn->GetOperand(kAsmInputListOpnd)); + for (auto &opnd : inList.GetOperands()) { + OperandInfoUse(*(insn->GetBB()), *opnd); + } +} + +void AArch64Ebo::DefineCallerSaveRegisters(InsnInfo &insnInfo) { + Insn *insn = insnInfo.insn; + if (insn->IsAsmInsn()) { + DefineAsmRegisters(insnInfo); + return; + } + ASSERT(insn->IsCall() || insn->IsTailCall(), "insn should be a call insn."); + if (CGOptions::DoIPARA()) { + auto *targetOpnd = insn->GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); + if (targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCalleeSavedReg(static_cast(preg))) { + continue; + } + RegOperand *opnd = &a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(preg), k64BitSize, + AArch64isa::IsFPSIMDRegister(static_cast(preg)) ? kRegTyFloat : kRegTyInt); + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } + return; + } + } + } + for (auto opnd : callerSaveRegTable) { + OpndInfo *opndInfo = OperandInfoDef(*insn->GetBB(), *insn, *opnd); + opndInfo->insnInfo = &insnInfo; + } +} + +void AArch64Ebo::DefineReturnUseRegister(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xret) { + return; + } + /* Define scalar callee save register and FP, LR. */ + for (uint32 i = R19; i <= R30; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpnd); + } + + /* Define SP */ + RegOperand &phyOpndSP = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(RSP), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndSP); + + /* Define FP callee save registers. */ + for (uint32 i = V8; i <= V15; i++) { + RegOperand &phyOpnd = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + OperandInfoUse(*insn.GetBB(), phyOpnd); + } +} + +void AArch64Ebo::DefineCallUseSpecialRegister(Insn &insn) { + if (insn.GetMachineOpcode() == MOP_asm) { + return; + } + AArch64reg fpRegNO = RFP; + if (!beforeRegAlloc && cgFunc->UseFP()) { + fpRegNO = R29; + } + /* Define FP, LR. */ + RegOperand &phyOpndFP = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(fpRegNO, k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndFP); + RegOperand &phyOpndLR = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(RLR), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndLR); + + /* Define SP */ + RegOperand &phyOpndSP = + a64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(RSP), k64BitSize, kRegTyInt); + OperandInfoUse(*insn.GetBB(), phyOpndSP); +} + +/* return true if op1 == op2 */ +bool AArch64Ebo::OperandEqSpecial(const Operand &op1, const Operand &op2) const { + switch (op1.GetKind()) { + case Operand::kOpdRegister: { + const RegOperand ®1 = static_cast(op1); + const RegOperand ®2 = static_cast(op2); + return reg1 == reg2; + } + case Operand::kOpdImmediate: { + const ImmOperand &imm1 = static_cast(op1); + const ImmOperand &imm2 = static_cast(op2); + return imm1 == imm2; + } + case Operand::kOpdOffset: { + const OfstOperand &ofst1 = static_cast(op1); + const OfstOperand &ofst2 = static_cast(op2); + return ofst1 == ofst2; + } + case Operand::kOpdStImmediate: { + const StImmOperand &stImm1 = static_cast(op1); + const StImmOperand &stImm2 = static_cast(op2); + return stImm1 == stImm2; + } + case Operand::kOpdMem: { + const MemOperand &mem1 = static_cast(op1); + const MemOperand &mem2 = static_cast(op2); + if (mem1.GetAddrMode() == mem2.GetAddrMode()) { + ASSERT(mem1.GetBaseRegister() != nullptr, "nullptr check"); + ASSERT(mem2.GetBaseRegister() != nullptr, "nullptr check"); + } + return ((mem1.GetAddrMode() == mem2.GetAddrMode()) && + OperandEqual(*(mem1.GetBaseRegister()), *(mem2.GetBaseRegister())) && + OperandEqual(*(mem1.GetIndexRegister()), *(mem2.GetIndexRegister())) && + OperandEqual(*(mem1.GetOffsetOperand()), *(mem2.GetOffsetOperand())) && + (mem1.GetSymbol() == mem2.GetSymbol()) && (mem1.GetSize() == mem2.GetSize())); + } + default: { + return false; + } + } +} + +int32 AArch64Ebo::GetOffsetVal(const MemOperand &memOpnd) const { + OfstOperand *offset = memOpnd.GetOffsetImmediate(); + int32 val = 0; + if (offset != nullptr) { + val += static_cast(offset->GetOffsetValue()); + + if (offset->IsSymOffset() || offset->IsSymAndImmOffset()) { + val += static_cast(offset->GetSymbol()->GetStIdx().Idx()); + } + } + return val; +} + +/* + * move vreg1, #1 + * move vreg2, vreg1 + * ===> + * move vreg1, #1 + * move vreg2, #1 + * return true if do simplify successfully. + */ +bool AArch64Ebo::DoConstProp(Insn &insn, uint32 idx, Operand &opnd) { + ImmOperand *src = static_cast(&opnd); + const InsnDesc *md = &AArch64CG::kMd[(insn.GetMachineOpcode())]; + /* avoid the invalid case "cmp wzr, #0"/"add w1, wzr, #100" */ + Operand &destOpnd = insn.GetOperand(idx); + if (src->IsZero() && destOpnd.IsRegister() && + (static_cast(destOpnd).GetRegisterType() == kRegTyInt) && + (insn.IsStore() || insn.IsMove() || md->IsCondDef())) { + insn.SetOperand(idx, *GetZeroOpnd(src->GetSize())); + return true; + } + MOperator mopCode = insn.GetMachineOpcode(); + switch (mopCode) { + case MOP_xmovrr: + case MOP_wmovrr: { + ASSERT(idx == kInsnSecondOpnd, "src const for move must be the second operand."); + uint32 targetSize = insn.GetOperand(idx).GetSize(); + if (src->GetSize() != targetSize) { + src = static_cast(src->Clone(*cgFunc->GetMemoryPool())); + CHECK_FATAL(src != nullptr, "pointer result is null"); + src->SetSize(targetSize); + } + if (src->IsSingleInstructionMovable() && (insn.GetOperand(kInsnFirstOpnd).GetSize() == targetSize)) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << " Do constprop:Prop constval " << src->GetValue() << "into insn:\n"; + insn.Dump(); + } + insn.SetOperand(kInsnSecondOpnd, *src); + MOperator mOp = (mopCode == MOP_wmovrr) ? MOP_wmovri32 : MOP_xmovri64; + insn.SetMOP(AArch64CG::kMd[mOp]); + if (EBO_DUMP) { + LogInfo::MapleLogger() << " after constprop the insn is:\n"; + insn.Dump(); + } + return true; + } + break; + } + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_xsubrrr: + case MOP_wsubrrr: { + if ((idx != kInsnThirdOpnd) || !src->IsInBitSize(kMaxImmVal24Bits, 0) || + !(src->IsInBitSize(kMaxImmVal12Bits, 0) || src->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + return false; + } + Operand &result = insn.GetOperand(0); + bool is64Bits = (result.GetSize() == k64BitSize); + if (EBO_DUMP) { + LogInfo::MapleLogger() << " Do constprop:Prop constval " << src->GetValue() << "into insn:\n"; + insn.Dump(); + } + if (src->IsZero()) { + MOperator mOp = is64Bits ? MOP_xmovrr : MOP_wmovrr; + insn.SetMOP(AArch64CG::kMd[mOp]); + insn.PopBackOperand(); + if (EBO_DUMP) { + LogInfo::MapleLogger() << " after constprop the insn is:\n"; + insn.Dump(); + } + return true; + } + insn.SetOperand(kInsnThirdOpnd, *src); + if ((mopCode == MOP_xaddrrr) || (mopCode == MOP_waddrrr)) { + is64Bits ? insn.SetMOP(AArch64CG::kMd[MOP_xaddrri12]) : insn.SetMOP(AArch64CG::kMd[MOP_waddrri12]); + } else if ((mopCode == MOP_xsubrrr) || (mopCode == MOP_wsubrrr)) { + is64Bits ? insn.SetMOP(AArch64CG::kMd[MOP_xsubrri12]) : insn.SetMOP(AArch64CG::kMd[MOP_wsubrri12]); + } + if (EBO_DUMP) { + LogInfo::MapleLogger() << " after constprop the insn is:\n"; + insn.Dump(); + } + return true; + } + default: + break; + } + return false; +} + +/* optimize csel to cset */ +bool AArch64Ebo::Csel2Cset(Insn &insn, const MapleVector &opnds) { + MOperator opCode = insn.GetMachineOpcode(); + /* csel ->cset */ + if ((opCode == MOP_wcselrrrc) || (opCode == MOP_xcselrrrc)) { + Operand *res = &insn.GetOperand(kInsnFirstOpnd); + ASSERT(res != nullptr, "expect a register"); + ASSERT(res->IsRegister(), "expect a register"); + /* only do integers */ + RegOperand *reg = static_cast(res); + if ((res == nullptr) || (!reg->IsOfIntClass())) { + return false; + } + Operand *op0 = opnds.at(kInsnSecondOpnd); + Operand *op1 = opnds.at(kInsnThirdOpnd); + ImmOperand *imm0 = nullptr; + ImmOperand *imm1 = nullptr; + if (op0->IsImmediate()) { + imm0 = static_cast(op0); + } + if (op1->IsImmediate()) { + imm1 = static_cast(op1); + } + + bool reverse = (imm1 != nullptr) && imm1->IsOne() && + (((imm0 != nullptr) && imm0->IsZero()) || IsZeroRegister(*op0)); + if (((imm0 != nullptr) && imm0->IsOne() && (((imm1 != nullptr) && imm1->IsZero()) || IsZeroRegister(*op1))) || + reverse) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "change csel insn :\n"; + insn.Dump(); + } + AArch64CGFunc *aarFunc = static_cast(cgFunc); + Operand &condOperand = insn.GetOperand(kInsnFourthOpnd); + Operand &rflag = aarFunc->GetOrCreateRflag(); + if (!reverse) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + (opCode == MOP_xcselrrrc) ? MOP_xcsetrc : MOP_wcsetrc, *res, condOperand, rflag); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "to cset insn ====>\n"; + newInsn.Dump(); + } + } else { + auto &cond = static_cast(condOperand); + if (!CheckCondCode(cond)) { + return false; + } + CondOperand &reverseCond = a64CGFunc->GetCondOperand(GetReverseCond(cond)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + (opCode == MOP_xcselrrrc) ? MOP_xcsetrc : MOP_wcsetrc, *res, reverseCond, rflag); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "to cset insn ====>\n"; + newInsn.Dump(); + } + } + return true; + } + } + return false; +} + +/* Look at an expression that has a constant operand and attempt to simplify the computations. */ +bool AArch64Ebo::SimplifyConstOperand(Insn &insn, const MapleVector &opnds, + const MapleVector &opndInfo) { + BB *bb = insn.GetBB(); + bool result = false; + if (insn.GetOperandSize() <= 1) { + return false; + } + ASSERT(opnds.size() > 1, "opnds size must greater than 1"); + Operand *op0 = opnds[kInsnSecondOpnd]; + Operand *op1 = opnds[kInsnThirdOpnd]; + Operand *res = &insn.GetOperand(kInsnFirstOpnd); + CHECK_FATAL(res != nullptr, "null ptr check"); + uint32 opndSize = insn.GetDesc()->GetOperandSize(); + bool op0IsConstant = IsConstantImmOrReg(*op0) && !IsConstantImmOrReg(*op1); + bool op1IsConstant = !IsConstantImmOrReg(*op0) && IsConstantImmOrReg(*op1); + bool bothConstant = IsConstantImmOrReg(*op0) && IsConstantImmOrReg(*op1); + ImmOperand *immOpnd = nullptr; + Operand *op = nullptr; + int32 idx0 = kInsnSecondOpnd; + if (op0IsConstant) { + // cannot convert zero reg (r30) to a immOperand + immOpnd = IsZeroRegister(*op0) ? &a64CGFunc->CreateImmOperand(0, op0->GetSize(), false) + : static_cast(op0); + op = op1; + if (op->IsMemoryAccessOperand()) { + op = &(insn.GetOperand(kInsnThirdOpnd)); + } + idx0 = kInsnThirdOpnd; + } else if (op1IsConstant) { + // cannot convert zero reg (r30) to a immOperand + immOpnd = IsZeroRegister(*op1) ? &a64CGFunc->CreateImmOperand(0, op1->GetSize(), false) + : static_cast(op1); + op = op0; + if (op->IsMemoryAccessOperand()) { + op = &(insn.GetOperand(kInsnSecondOpnd)); + } + } else if (bothConstant) { + ImmOperand *immOpnd0 = IsZeroRegister(*op0) ? &a64CGFunc->CreateImmOperand(0, op0->GetSize(), false) + : static_cast(op0); + ImmOperand *immOpnd1 = IsZeroRegister(*op1) ? &a64CGFunc->CreateImmOperand(0, op1->GetSize(), false) + : static_cast(op1); + return SimplifyBothConst(*insn.GetBB(), insn, *immOpnd0, *immOpnd1, opndSize); + } + CHECK_FATAL(immOpnd != nullptr, "constant operand required!"); + CHECK_FATAL(op != nullptr, "constant operand required!"); + /* For orr insn and one of the opnd is zero + * orr resOp, imm1, #0 | orr resOp, #0, imm1 + * =======> + * mov resOp, imm1 */ + if (((insn.GetMachineOpcode() == MOP_wiorrri12) || (insn.GetMachineOpcode() == MOP_xiorrri13)) && immOpnd->IsZero()) { + MOperator mOp = opndSize == k64BitSize ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *res, *op); + bb->ReplaceInsn(insn, newInsn); + return true; + } + /* For the imm is 0. Then replace the insn by a move insn. */ + if (((insn.GetMachineOpcode() >= MOP_xaddrrr) && (insn.GetMachineOpcode() <= MOP_sadd) && immOpnd->IsZero()) || + (op1IsConstant && (insn.GetMachineOpcode() >= MOP_xsubrrr) && (insn.GetMachineOpcode() <= MOP_ssub) && + immOpnd->IsZero())) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(opndSize == k64BitSize ? MOP_xmovrr : MOP_wmovrr, *res, *op); + bb->ReplaceInsn(insn, newInsn); + return true; + } + + if ((insn.GetMachineOpcode() == MOP_xaddrrr) || (insn.GetMachineOpcode() == MOP_waddrrr)) { + if (immOpnd->IsInBitSize(kMaxImmVal24Bits, 0)) { + /* + * ADD Wd|WSP, Wn|WSP, #imm{, shift} ; 32-bit general registers + * ADD Xd|SP, Xn|SP, #imm{, shift} ; 64-bit general registers + * imm : 0 ~ 4095, shift: none, LSL #0, or LSL #12 + * aarch64 assembly takes up to 24-bits, if the lower 12 bits is all 0 + */ + if (immOpnd->IsInBitSize(kMaxImmVal12Bits, 0) || immOpnd->IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits)) { + MOperator mOp = opndSize == k64BitSize ? MOP_xaddrri12 : MOP_waddrri12; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *res, *op, *immOpnd); + bb->ReplaceInsn(insn, newInsn); + result = true; + } + } + } + /* Look for the sequence which can be simpified. */ + if (result || (insn.GetMachineOpcode() == MOP_xaddrri12) || (insn.GetMachineOpcode() == MOP_waddrri12)) { + Insn *prev = opndInfo[idx0]->insn; + if ((prev != nullptr) && ((prev->GetMachineOpcode() == MOP_xaddrri12) || + (prev->GetMachineOpcode() == MOP_waddrri12))) { + OpndInfo *prevInfo0 = opndInfo[idx0]->insnInfo->origOpnd[kInsnSecondOpnd]; + /* if prevop0 has been redefined. skip this optimiztation. */ + if (prevInfo0->redefined) { + return result; + } + /* Implicit conversion */ + if (insn.GetOperand(kInsnFirstOpnd).GetSize() != insn.GetOperand(kInsnSecondOpnd).GetSize()) { + return result; + } + Operand &prevOpnd0 = prev->GetOperand(kInsnSecondOpnd); + ImmOperand &imm0 = static_cast(prev->GetOperand(kInsnThirdOpnd)); + int64_t val = imm0.GetValue() + immOpnd->GetValue(); + ImmOperand &imm1 = a64CGFunc->CreateImmOperand(val, opndSize, imm0.IsSignedValue()); + if (imm1.IsInBitSize(kMaxImmVal24Bits, 0) && (imm1.IsInBitSize(kMaxImmVal12Bits, 0) || + imm1.IsInBitSize(kMaxImmVal12Bits, kMaxImmVal12Bits))) { + MOperator mOp = (opndSize == k64BitSize ? MOP_xaddrri12 : MOP_waddrri12); + bb->ReplaceInsn(insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, *res, prevOpnd0, imm1)); + result = true; + } + } + } + return result; +} + +ConditionCode AArch64Ebo::GetReverseCond(const CondOperand &cond) const { + switch (cond.GetCode()) { + case CC_NE: + return CC_EQ; + case CC_EQ: + return CC_NE; + case CC_LT: + return CC_GE; + case CC_GE: + return CC_LT; + case CC_GT: + return CC_LE; + case CC_LE: + return CC_GT; + default: + CHECK_FATAL(false, "Not support yet."); + } + return kCcLast; +} + +/* return true if cond == CC_LE */ +bool AArch64Ebo::CheckCondCode(const CondOperand &cond) const { + switch (cond.GetCode()) { + case CC_NE: + case CC_EQ: + case CC_LT: + case CC_GE: + case CC_GT: + case CC_LE: + return true; + default: + return false; + } +} + +bool AArch64Ebo::SimplifyBothConst(BB &bb, Insn &insn, const ImmOperand &immOperand0, + const ImmOperand &immOperand1, uint32 opndSize) const { + MOperator mOp = insn.GetMachineOpcode(); + int64 val = 0; + /* do not support negative const simplify yet */ + if (immOperand0.GetValue() < 0 || immOperand1.GetValue() < 0) { + return false; + } + uint64 opndValue0 = static_cast(immOperand0.GetValue()); + uint64 opndValue1 = static_cast(immOperand1.GetValue()); + switch (mOp) { + case MOP_weorrri12: + case MOP_weorrrr: + case MOP_xeorrri13: + case MOP_xeorrrr: + val = static_cast(opndValue0 ^ opndValue1); + break; + case MOP_wandrri12: + case MOP_waddrri24: + case MOP_wandrrr: + case MOP_xandrri13: + case MOP_xandrrr: + val = static_cast(opndValue0 & opndValue1); + break; + case MOP_wiorrri12: + case MOP_wiorrrr: + case MOP_xiorrri13: + case MOP_xiorrrr: + val = static_cast(opndValue0 | opndValue1); + break; + default: + return false; + } + Operand *res = &insn.GetOperand(kInsnFirstOpnd); + ImmOperand *immOperand = &a64CGFunc->CreateImmOperand(val, opndSize, false); + if (!immOperand->IsSingleInstructionMovable()) { + ASSERT(res->IsRegister(), " expect a register operand"); + static_cast(cgFunc)->SplitMovImmOpndInstruction(val, *(static_cast(res)), &insn); + bb.RemoveInsn(insn); + } else { + MOperator newmOp = opndSize == k64BitSize ? MOP_xmovri64 : MOP_wmovri32; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newmOp, *res, *immOperand); + bb.ReplaceInsn(insn, newInsn); + } + return true; +} + +bool AArch64Ebo::OperandLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) const { + for (Insn *nextInsn = insn.GetNext(); nextInsn != nullptr; nextInsn = nextInsn->GetNext()) { + if (!nextInsn->IsMachineInstruction()) { + continue; + } + int32 lastOpndId = static_cast(nextInsn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = nextInsn->GetOperand(static_cast(i)); + if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr && base->IsRegister()) { + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + } + + if (!opnd.IsRegister()) { + continue; + } + auto &tmpRegOpnd = static_cast(opnd); + if (tmpRegOpnd.GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + auto *regProp = nextInsn->GetDesc()->opndMD[static_cast(i)]; + bool isUse = regProp->IsUse(); + /* if noUse Redefined, no need to check live-out. */ + return isUse; + } + } + return LiveOutOfBB(regOpnd, *insn.GetBB()); +} + +bool AArch64Ebo::ValidPatternForCombineExtAndLoad(OpndInfo *prevOpndInfo, Insn *insn, MOperator newMop, + MOperator oldMop, const RegOperand& opnd) const { + if (newMop == oldMop) { + return true; + } + if (prevOpndInfo == nullptr || prevOpndInfo->refCount > 1) { + return false; + } + if (OperandLiveAfterInsn(opnd, *insn)) { + return false; + } + Insn *prevInsn = prevOpndInfo->insn; + MemOperand *memOpnd = static_cast(prevInsn->GetMemOpnd()); + ASSERT(!prevInsn->IsStorePair(), "do not do this opt for str pair"); + ASSERT(!prevInsn->IsLoadPair(), "do not do this opt for ldr pair"); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && + !a64CGFunc->IsOperandImmValid(newMop, prevInsn->GetMemOpnd(), kInsnSecondOpnd)) { + return false; + } + uint32 shiftAmount = memOpnd->ShiftAmount(); + if (shiftAmount == 0) { + return true; + } + const InsnDesc *md = &AArch64CG::kMd[newMop]; + uint32 memSize = md->GetOperandSize() / k8BitSize; + uint32 validShiftAmount = memSize == 8 ? 3 : memSize == 4 ? 2 : memSize == 2 ? 1 : 0; + if (shiftAmount != validShiftAmount) { + return false; + } + return true; +} + +bool AArch64Ebo::CombineExtensionAndLoad(Insn *insn, const MapleVector &origInfos, + ExtOpTable idx, bool is64bits) const { + if (!beforeRegAlloc) { + return false; + } + OpndInfo *opndInfo = origInfos[kInsnSecondOpnd]; + if (opndInfo == nullptr) { + return false; + } + Insn *prevInsn = opndInfo->insn; + if (prevInsn == nullptr) { + return false; + } + + MOperator prevMop = prevInsn->GetMachineOpcode(); + ASSERT(prevMop != MOP_undef, "Invalid opcode of instruction!"); + PairMOperator *begin = &extInsnPairTable[idx][0]; + PairMOperator *end = &extInsnPairTable[idx][kInsPairsNum]; + auto pairIt = std::find_if(begin, end, [prevMop](const PairMOperator insPair) { + return prevMop == insPair[0]; + }); + if (pairIt == end) { + return false; + } + + auto &res = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + OpndInfo *prevOpndInfo = GetOpndInfo(res, -1); + MOperator newPreMop = (*pairIt)[1]; + ASSERT(newPreMop != MOP_undef, "Invalid opcode of instruction!"); + if (!ValidPatternForCombineExtAndLoad(prevOpndInfo, insn, newPreMop, prevMop, + res)) { + return false; + } + auto *newMemOp = + GetOrCreateMemOperandForNewMOP(*cgFunc, *prevInsn, newPreMop); + if (newMemOp == nullptr) { + return false; + } + prevInsn->SetMemOpnd(newMemOp); + if (is64bits && idx <= SXTW && idx >= SXTB) { + newPreMop = ExtLoadSwitchBitSize(newPreMop); + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + prevDstOpnd.SetSize(k64BitSize); + prevDstOpnd.SetValidBitsNum(k64BitSize); + } + prevInsn->SetMOP(AArch64CG::kMd[newPreMop]); + MOperator movOp = is64bits ? MOP_xmovrr : MOP_wmovrr; + if (insn->GetMachineOpcode() == MOP_wandrri12 || + insn->GetMachineOpcode() == MOP_xandrri13) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + movOp, insn->GetOperand(kInsnFirstOpnd), + insn->GetOperand(kInsnSecondOpnd)); + insn->GetBB()->ReplaceInsn(*insn, newInsn); + } else { + insn->SetMOP(AArch64CG::kMd[movOp]); + } + return true; +} + +bool AArch64Ebo::CombineMultiplyAdd(Insn *insn, const Insn *prevInsn, InsnInfo *insnInfo, Operand *addOpnd, + bool is64bits, bool isFp) const { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + OpndInfo *opndInfo2 = insnInfo->origOpnd[kInsnThirdOpnd]; + if (((opndInfo1 != nullptr) && opndInfo1->redefined) || ((opndInfo2 != nullptr) && opndInfo2->redefined)) { + return false; + } + Operand &res = insn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = prevInsn->GetOperand(kInsnThirdOpnd); + /* may overflow */ + if ((prevInsn->GetOperand(kInsnFirstOpnd).GetSize() == k32BitSize) && is64bits) { + return false; + } + MOperator mOp = isFp ? (is64bits ? MOP_dmadd : MOP_smadd) : (is64bits ? MOP_xmaddrrrr : MOP_wmaddrrrr); + insn->GetBB()->ReplaceInsn(*insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, opnd2, *addOpnd)); + return true; +} + +bool AArch64Ebo::CheckCanDoMadd(Insn *insn, OpndInfo *opndInfo, int32 pos, bool is64bits, bool isFp) const { + if ((opndInfo == nullptr) || (opndInfo->insn == nullptr)) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo = opndInfo->insnInfo; + if (insnInfo == nullptr) { + return false; + } + Operand &addOpnd = insn->GetOperand(static_cast(pos)); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((isFp && ((opc1 == MOP_xvmuld) || (opc1 == MOP_xvmuls))) || + (!isFp && ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)))) { + return CombineMultiplyAdd(insn, insn1, insnInfo, &addOpnd, is64bits, isFp); + } + return false; +} + +bool AArch64Ebo::CombineMultiplySub(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const { + if ((opndInfo == nullptr) || (opndInfo->insn == nullptr)) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo = opndInfo->insnInfo; + if (insnInfo == nullptr) { + return false; + } + Operand &subOpnd = insn->GetOperand(kInsnSecondOpnd); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((isFp && ((opc1 == MOP_xvmuld) || (opc1 == MOP_xvmuls))) || + (!isFp && ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)))) { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + OpndInfo *opndInfo2 = insnInfo->origOpnd[kInsnThirdOpnd]; + if (((opndInfo1 != nullptr) && opndInfo1->redefined) || ((opndInfo2 != nullptr) && opndInfo2->redefined)) { + return false; + } + Operand &res = insn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = insn1->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = insn1->GetOperand(kInsnThirdOpnd); + /* may overflow */ + if ((insn1->GetOperand(kInsnFirstOpnd).GetSize() == k32BitSize) && is64bits) { + return false; + } + MOperator mOp = isFp ? (is64bits ? MOP_dmsub : MOP_smsub) : (is64bits ? MOP_xmsubrrrr : MOP_wmsubrrrr); + insn->GetBB()->ReplaceInsn(*insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, opnd2, subOpnd)); + return true; + } + return false; +} + +bool CheckInsnRefField(const Insn &insn, size_t opndIndex) { + if (insn.IsAccessRefField() && insn.AccessMem()) { + Operand &opnd0 = insn.GetOperand(opndIndex); + if (opnd0.IsRegister()) { + return true; + } + } + return false; +} + +bool AArch64Ebo::CombineMultiplyNeg(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const { + if ((opndInfo == nullptr) || (opndInfo->insn == nullptr)) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + Operand &res = insn->GetOperand(kInsnFirstOpnd); + Operand &src = insn->GetOperand(kInsnSecondOpnd); + if (res.GetSize() != src.GetSize()) { + return false; + } + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo = opndInfo->insnInfo; + CHECK_NULL_FATAL(insnInfo); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((isFp && ((opc1 == MOP_xvmuld) || (opc1 == MOP_xvmuls))) || + (!isFp && ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)))) { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + OpndInfo *opndInfo2 = insnInfo->origOpnd[kInsnThirdOpnd]; + if (((opndInfo1 != nullptr) && opndInfo1->redefined) || ((opndInfo2 != nullptr) && opndInfo2->redefined)) { + return false; + } + Operand &opnd1 = insn1->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = insn1->GetOperand(kInsnThirdOpnd); + MOperator mOp = isFp ? (is64bits ? MOP_dnmul : MOP_snmul) : (is64bits ? MOP_xmnegrrr : MOP_wmnegrrr); + insn->GetBB()->ReplaceInsn(*insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, opnd2)); + return true; + } + return false; +} + +bool AArch64Ebo::CombineLsrAnd(Insn &insn, const OpndInfo &opndInfo, bool is64bits, bool isFp) const { + if (opndInfo.insn == nullptr) { + return false; + } + if (!cgFunc->GetMirModule().IsCModule()) { + return false; + } + AArch64CGFunc *aarchFunc = static_cast(cgFunc); + Insn *prevInsn = opndInfo.insn; + InsnInfo *insnInfo = opndInfo.insnInfo; + if (insnInfo == nullptr) { + return false; + } + CHECK_NULL_FATAL(insnInfo); + MOperator opc1 = prevInsn->GetMachineOpcode(); + if (!isFp && ((opc1 == MOP_xlsrrri6) || (opc1 == MOP_wlsrrri5))) { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + if ((opndInfo1 != nullptr) && opndInfo1->redefined) { + return false; + } + Operand &res = insn.GetOperand(kInsnFirstOpnd); + Operand &opnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + Operand &immOpnd1 = is64bits ? aarchFunc->CreateImmOperand(immVal1, kMaxImmVal6Bits, false) + : aarchFunc->CreateImmOperand(immVal1, kMaxImmVal5Bits, false); + int64 immVal2 = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + int64 immV2 = __builtin_ffsll(immVal2 + 1) - 1; + if (immVal1 + immV2 < k1BitSize || (is64bits && immVal1 + immV2 > k64BitSize) || + (!is64bits && immVal1 + immV2 > k32BitSize)) { + return false; + } + Operand &immOpnd2 = is64bits ? aarchFunc->CreateImmOperand(immV2, kMaxImmVal6Bits, false) + : aarchFunc->CreateImmOperand(immV2, kMaxImmVal5Bits, false); + MOperator mOp = (is64bits ? MOP_xubfxrri6i6 : MOP_wubfxrri5i5); + insn.GetBB()->ReplaceInsn(insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, opnd1, immOpnd1, immOpnd2)); + return true; + } + return false; +} + +/* + * extension dest, src0 + * and dest, dest, 0xff or 0xffff + * ===> if extension is >= imm then can eliminate extension + * + * and dst1, src0, imm1 + * and dst2, dst1, imm2 where imm2 is 0xff or 0xffff + * ===> and dst2, src0, imm1 if imm1 <= imm2 + */ +bool AArch64Ebo::CombineExtAnd(Insn &insn, const OpndInfo &opndInfo, bool isFp, int64 immVal) const { + if (isFp || opndInfo.insn == nullptr || !cgFunc->GetMirModule().IsCModule()) { + return false; + } + Insn *prevInsn = opndInfo.insn; + InsnInfo *insnInfo = opndInfo.insnInfo; + if (insnInfo == nullptr) { + return false; + } + CHECK_NULL_FATAL(insnInfo); + MOperator opc1 = prevInsn->GetMachineOpcode(); + if (((immVal == 0xff) && + (opc1 >= MOP_xsxtb32) && (opc1 <= MOP_xuxtw64)) || + ((immVal == 0xffff) && + (opc1 == MOP_xsxth32 || opc1 == MOP_xsxth64 || opc1 == MOP_xsxtw64 || + opc1 == MOP_xsxth32 || opc1 == MOP_xsxtw64))) { + /* don't use register if it was redefined. */ + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + if ((opndInfo1 != nullptr) && opndInfo1->redefined) { + return false; + } + Operand &opnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + insn.SetOperand(kInsnSecondOpnd, opnd1); + return true; + } + if ((immVal == 0xff || immVal == 0xffff) && (opc1 == MOP_wandrri12 || opc1 == MOP_xandrri13)) { + OpndInfo *opndInfo1 = insnInfo->origOpnd[kInsnSecondOpnd]; + if ((opndInfo1 != nullptr) && opndInfo1->redefined) { + return false; + } + int64 prevImmVal = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + if (static_cast(prevImmVal) > static_cast(immVal)) { + return false; + } + Operand &opnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + insn.SetOperand(kInsnSecondOpnd, opnd1); + Operand &opnd2 = prevInsn->GetOperand(kInsnThirdOpnd); + insn.SetOperand(kInsnThirdOpnd, opnd2); + } + return false; +} + +/* Do some special pattern */ +bool AArch64Ebo::SpecialSequence(Insn &insn, const MapleVector &origInfos) { + MOperator opCode = insn.GetMachineOpcode(); + AArch64CGFunc *aarchFunc = static_cast(cgFunc); + switch (opCode) { + /* + * mov R503, R0 + * mov R0, R503 + * ==> mov R0, R0 + */ + case MOP_wmovrr: + case MOP_xmovrr: { + OpndInfo *opndInfo = origInfos[kInsnSecondOpnd]; + if (opndInfo == nullptr) { + return false; + } + Insn *prevInsn = opndInfo->insn; + if ((prevInsn != nullptr) && (prevInsn->GetMachineOpcode() == opCode) && + (prevInsn == insn.GetPreviousMachineInsn()) && + !RegistersIdentical(prevInsn->GetOperand(kInsnFirstOpnd), prevInsn->GetOperand(kInsnSecondOpnd)) && + !RegistersIdentical(insn.GetOperand(kInsnFirstOpnd), insn.GetOperand(kInsnSecondOpnd))) { + Operand ®1 = insn.GetOperand(kInsnFirstOpnd); + Operand ®2 = prevInsn->GetOperand(kInsnSecondOpnd); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(insn.GetMachineOpcode(), reg1, reg2); + insn.GetBB()->ReplaceInsn(insn, newInsn); + return true; + } + break; + } + /* + * Extension elimination. Look for load extension pair. There are two cases. + * 1) extension size == load size -> change the load type or eliminate the extension + * 2) extension size > load size -> possibly eliminating the extension + * + * Example of 1) + * ldrb x1, [] or ldrb x1, [] or ldrsb x1, [] or ldrsb x1, [] + * sxtb x1, x1 zxtb x1, x1 sxtb x1, x1 zxtb x1, x1 + * ===> ldrsb x1, [] ===> ldrb x1, [] ===> ldrsb x1, [] ===> ldrb x1, [] + * mov x1, x1 mov x1, x1 mov x1, x1 mov x1, x1 + * + * Example of 2) + * ldrb x1, [] or ldrb x1, [] or ldrsb x1, [] or ldrsb x1, [] + * sxth x1, x1 zxth x1, x1 sxth x1, x1 zxth x1, x1 + * ===> ldrb x1, [] ===> ldrb x1, [] ===> ldrsb x1, [] ===> no change + * mov x1, x1 mov x1, x1 mov x1, x1 + */ + case MOP_wandrri12: + case MOP_xandrri13: { + bool is64Bits = (opCode == MOP_xandrri13); + bool doAndOpt = false; + if (static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue() == 0xff) { + doAndOpt = CombineExtensionAndLoad(&insn, origInfos, AND, is64Bits); + } + if (doAndOpt) { + return doAndOpt; + } + /* + * lsr d0, d1, #6 + * and d0, d0, #1 + * ===> ubfx d0, d1, #6, #1 + */ + int64 immValue = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + if (!beforeRegAlloc && immValue != 0 && + (static_cast(immValue) & (static_cast(immValue) + 1)) == 0) { + /* immValue is (1 << n - 1) */ + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + return CombineLsrAnd(insn, *opndInfo, is64Bits, false); + } + if (beforeRegAlloc && immValue != 0 && + (static_cast(immValue) & (static_cast(immValue) + 1)) == 0) { + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + return CombineExtAnd(insn, *opndInfo, false, immValue); + } + break; + } + case MOP_xsxtb32: + return CombineExtensionAndLoad(&insn, origInfos, SXTB, false); + case MOP_xsxtb64: + return CombineExtensionAndLoad(&insn, origInfos, SXTB, true); + case MOP_xsxth32: + return CombineExtensionAndLoad(&insn, origInfos, SXTH, false); + case MOP_xsxth64: + return CombineExtensionAndLoad(&insn, origInfos, SXTH, true); + case MOP_xsxtw64: + return CombineExtensionAndLoad(&insn, origInfos, SXTW, true); + case MOP_xuxtb32: + return CombineExtensionAndLoad(&insn, origInfos, ZXTB, false); + case MOP_xuxth32: + return CombineExtensionAndLoad(&insn, origInfos, ZXTH, false); + case MOP_xuxtw64: + return CombineExtensionAndLoad(&insn, origInfos, ZXTW, true); + /* + * lsl x1, x1, #3 + * add x0, x0, x1 + * ===> add x0, x0, x1, 3 + * + * mul x1, x1, x2 + * add x0, x0, x1 or add x0, x1, x0 + * ===> madd x0, x1, x2, x0 + */ + case MOP_xaddrrr: + case MOP_waddrrr: { + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnThirdOpnd); + if ((opndInfo != nullptr) && (opndInfo->insn != nullptr)) { + Insn *insn1 = opndInfo->insn; + InsnInfo *insnInfo1 = opndInfo->insnInfo; + if (insnInfo1 == nullptr) { + return false; + } + Operand &op0 = insn.GetOperand(kInsnSecondOpnd); + MOperator opc1 = insn1->GetMachineOpcode(); + if ((opc1 == MOP_xlslrri6) || (opc1 == MOP_wlslrri5)) { + /* don't use register if it was redefined. */ + if (cgFunc->GetMirModule().IsCModule()) { + /* global opt will do this pattern when is CMoudle */ + return false; + } + OpndInfo *opndInfo1 = insnInfo1->origOpnd[kInsnSecondOpnd]; + if ((opndInfo1 != nullptr) && opndInfo1->redefined) { + return false; + } + Operand &res = insn.GetOperand(kInsnFirstOpnd); + Operand &opnd1 = insn1->GetOperand(kInsnSecondOpnd); + auto &immOpnd = static_cast(insn1->GetOperand(kInsnThirdOpnd)); + uint32 xLslrriBitLen = 6; + uint32 wLslrriBitLen = 5; + Operand &shiftOpnd = aarchFunc->CreateBitShiftOperand(BitShiftOperand::kLSL, + static_cast(immOpnd.GetValue()), static_cast(( + opCode == MOP_xlslrri6) ? xLslrriBitLen : wLslrriBitLen)); + MOperator mOp = (is64bits ? MOP_xaddrrrs : MOP_waddrrrs); + insn.GetBB()->ReplaceInsn(insn, cgFunc->GetInsnBuilder()->BuildInsn(mOp, res, op0, opnd1, shiftOpnd)); + return true; + } else if ((opc1 == MOP_xmulrrr) || (opc1 == MOP_wmulrrr)) { + return CombineMultiplyAdd(&insn, insn1, insnInfo1, &op0, is64bits, false); + } + } + opndInfo = origInfos.at(kInsnSecondOpnd); + return CheckCanDoMadd(&insn, opndInfo, kInsnThirdOpnd, is64bits, false); + } + /* + * fmul d1, d1, d2 + * fadd d0, d0, d1 or add d0, d1, d0 + * ===> fmadd d0, d1, d2, d0 + */ + case MOP_dadd: + case MOP_sadd: { + if (!CGOptions::IsFastMath()) { + return false; + } + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + if (CheckCanDoMadd(&insn, opndInfo, kInsnThirdOpnd, is64bits, true)) { + return true; + } + opndInfo = origInfos.at(kInsnThirdOpnd); + if (CheckCanDoMadd(&insn, opndInfo, kInsnSecondOpnd, is64bits, true)) { + return true; + } + break; + } + /* + * mul x1, x1, x2 + * sub x0, x0, x1 + * ===> msub x0, x1, x2, x0 + */ + case MOP_xsubrrr: + case MOP_wsubrrr: { + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnThirdOpnd); + if (CombineMultiplySub(&insn, opndInfo, is64bits, false)) { + return true; + } + break; + } + /* + * fmul d1, d1, d2 + * fsub d0, d0, d1 + * ===> fmsub d0, d1, d2, d0 + */ + case MOP_dsub: + case MOP_ssub: { + if (!CGOptions::IsFastMath()) { + return false; + } + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnThirdOpnd); + if (CombineMultiplySub(&insn, opndInfo, is64bits, true)) { + return true; + } + break; + } + /* + * mul x1, x1, x2 + * neg x0, x1 + * ===> mneg x0, x1, x2 + */ + case MOP_xinegrr: + case MOP_winegrr: { + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + if (CombineMultiplyNeg(&insn, opndInfo, is64bits, false)) { + return true; + } + break; + } + /* + * fmul d1, d1, d2 + * fneg d0, d1 + * ===> fnmul d0, d1, d2 + */ + case MOP_wfnegrr: + case MOP_xfnegrr: { + if (!CGOptions::IsFastMath()) { + return false; + } + bool is64bits = (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize); + OpndInfo *opndInfo = origInfos.at(kInsnSecondOpnd); + if (CombineMultiplyNeg(&insn, opndInfo, is64bits, true)) { + return true; + } + break; + } + case MOP_xcsetrc: + case MOP_wcsetrc: { + /* i. cmp x0, x1 + * cset w0, EQ ===> cmp x0, x1 + * cmp w0, #0 cset w0, EQ + * cset w0, NE + * + * ii. cmp x0, x1 + * cset w0, EQ ===> cmp x0, x1 + * cmp w0, #0 cset w0, NE + * cset w0, EQ + * + * a.< -1 : 0x20ff25e0 > < 0 > cmp(226) (opnd0: vreg:C105 class: [CC]) (opnd1: vreg:R104 class: [I]) (opnd2: + * vreg:R106 class: [I]) + * b.< -1 : 0x20ff60a0 > < 0 > cset(72) (opnd0: vreg:R101 class: [I]) (opnd1: CC: EQ) + * c.< -1* : 0x20ff3870 > < 0 > cmp(223) (opnd0: vreg:C105 class: [CC]) (opnd1: vreg:R101 class: [I]) (opnd2: + * imm:0) + * d.< * -1 : 0x20ff3908 > < 0 > cset(72) (opnd0: vreg:R107 class: [I]) (opnd1: CC: NE) + * d1.< -1 : 0x20ff3908 > < 0 > * cset(72) (opnd0: vreg:R107 class: [I]) (opnd1: CC: EQ) i, d + * ===> mov R107 R101 ii, a,b,c,d1 ===> a,b,cset Rxx + * NE, c, mov R107 Rxx + */ + auto &cond = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if ((cond.GetCode() != CC_NE) && (cond.GetCode() != CC_EQ)) { + return false; + } + bool reverse = (cond.GetCode() == CC_EQ); + OpndInfo *condInfo = origInfos[kInsnSecondOpnd]; + if ((condInfo != nullptr) && condInfo->insn) { + Insn *cmp1 = condInfo->insn; + if ((cmp1->GetMachineOpcode() == MOP_xcmpri) || (cmp1->GetMachineOpcode() == MOP_wcmpri)) { + InsnInfo *cmpInfo1 = condInfo->insnInfo; + CHECK_FATAL(cmpInfo1 != nullptr, "pointor cmpInfo1 is null"); + OpndInfo *info0 = cmpInfo1->origOpnd[kInsnSecondOpnd]; + /* if R101 was not redefined. */ + if ((info0 != nullptr) && (info0->insnInfo != nullptr) && (info0->insn != nullptr) && + (reverse || !info0->redefined) && cmp1->GetOperand(kInsnThirdOpnd).IsImmediate()) { + Insn *csetInsn = info0->insn; + MOperator opc1 = csetInsn->GetMachineOpcode(); + if (((opc1 == MOP_xcsetrc) || (opc1 == MOP_wcsetrc)) && + static_cast(cmp1->GetOperand(kInsnThirdOpnd)).IsZero()) { + CondOperand &cond1 = static_cast(csetInsn->GetOperand(kInsnSecondOpnd)); + if (!CheckCondCode(cond1)) { + return false; + } + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< === do specical condition optimization, replace insn ===> \n"; + insn.Dump(); + } + Operand *result = &insn.GetOperand(kInsnFirstOpnd); + CHECK_FATAL(result != nullptr, "pointor result is null"); + uint32 size = result->GetSize(); + if (reverse) { + /* After regalloction, we can't create a new register. */ + if (!beforeRegAlloc) { + return false; + } + AArch64CGFunc *aarFunc = static_cast(cgFunc); + Operand &r = aarFunc->CreateRegisterOperandOfType(static_cast(result)->GetRegisterType(), + size / kBitsPerByte); + /* after generate a new vreg, check if the size of DataInfo is big enough */ + EnlargeSpaceForLA(*csetInsn); + CondOperand &cond2 = aarFunc->GetCondOperand(GetReverseCond(cond1)); + Operand &rflag = aarFunc->GetOrCreateRflag(); + Insn &newCset = cgFunc->GetInsnBuilder()->BuildInsn( + result->GetSize() == k64BitSize ? MOP_xcsetrc : MOP_wcsetrc, r, cond2, rflag); + /* new_cset use the same cond as cset_insn. */ + IncRef(*info0->insnInfo->origOpnd[kInsnSecondOpnd]); + csetInsn->GetBB()->InsertInsnAfter(*csetInsn, newCset); + MOperator mOp = (result->GetSize() == k64BitSize ? MOP_xmovrr : MOP_wmovrr); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *result, r); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< === with new insn ===> \n"; + newInsn.Dump(); + } + } else { + Operand *result1 = &csetInsn->GetOperand(kInsnFirstOpnd); + MOperator mOp = ((result->GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *result, *result1); + insn.GetBB()->ReplaceInsn(insn, newInsn); + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< === with new insn ===> \n"; + newInsn.Dump(); + } + } + return true; + } + } + } + } + } /* end case MOP_wcsetrc */ + [[clang::fallthrough]]; + default: + break; + } + return false; +} + +/* + * *iii. mov w16, v10.s[1] // FMOV from simd 105 ---> replace_insn + * mov w1, w16 ----->insn + * ==> + * mov w1, v10.s[1] + */ +bool AArch64Ebo::IsMovToSIMDVmov(Insn &insn, const Insn &replaceInsn) const { + if (insn.GetMachineOpcode() == MOP_wmovrr && replaceInsn.GetMachineOpcode() == MOP_xvmovrv) { + insn.SetMOP(AArch64CG::kMd[replaceInsn.GetMachineOpcode()]); + return true; + } + return false; +} + +bool AArch64Ebo::IsPseudoRet(Insn &insn) const { + MOperator mop = insn.GetMachineOpcode(); + if (mop == MOP_pseudo_ret_int || mop == MOP_pseudo_ret_float) { + return true; + } + return false; +} + +bool AArch64Ebo::ChangeLdrMop(Insn &insn, const Operand &opnd) const { + ASSERT(insn.IsLoad(), "expect insn is load in ChangeLdrMop"); + ASSERT(opnd.IsRegister(), "expect opnd is a register in ChangeLdrMop"); + + const RegOperand *regOpnd = static_cast(&opnd); + if (static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterType() != regOpnd->GetRegisterType()) { + return false; + } + + if (static_cast(insn.GetOperand(kInsnSecondOpnd)).GetIndexRegister()) { + return false; + } + + bool bRet = true; + if (regOpnd->GetRegisterType() == kRegTyFloat) { + switch (insn.GetMachineOpcode()) { + case MOP_wldrb: + insn.SetMOP(AArch64CG::kMd[MOP_bldr]); + break; + case MOP_wldrh: + insn.SetMOP(AArch64CG::kMd[MOP_hldr]); + break; + case MOP_wldr: + insn.SetMOP(AArch64CG::kMd[MOP_sldr]); + break; + case MOP_xldr: + insn.SetMOP(AArch64CG::kMd[MOP_dldr]); + break; + case MOP_wldli: + insn.SetMOP(AArch64CG::kMd[MOP_sldli]); + break; + case MOP_xldli: + insn.SetMOP(AArch64CG::kMd[MOP_dldli]); + break; + case MOP_wldrsb: + case MOP_wldrsh: + default: + bRet = false; + break; + } + } else if (regOpnd->GetRegisterType() == kRegTyInt) { + switch (insn.GetMachineOpcode()) { + case MOP_bldr: + insn.SetMOP(AArch64CG::kMd[MOP_wldrb]); + break; + case MOP_hldr: + insn.SetMOP(AArch64CG::kMd[MOP_wldrh]); + break; + case MOP_sldr: + insn.SetMOP(AArch64CG::kMd[MOP_wldr]); + break; + case MOP_dldr: + insn.SetMOP(AArch64CG::kMd[MOP_xldr]); + break; + case MOP_sldli: + insn.SetMOP(AArch64CG::kMd[MOP_wldli]); + break; + case MOP_dldli: + insn.SetMOP(AArch64CG::kMd[MOP_xldli]); + break; + default: + bRet = false; + break; + } + } else { + ASSERT(false, "Internal error."); + } + return bRet; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0008e9443a8f7cc9aed0e397bd0e8675f6e918b6 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_emitter.cpp @@ -0,0 +1,2163 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_emitter.h" +#include +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" +#include "metadata_layout.h" +#include "cfi.h" +#include "dbg.h" + +namespace { +using namespace maple; +const std::unordered_set kJniNativeFuncList = { + "Landroid_2Fos_2FParcel_3B_7CnativeWriteString_7C_28JLjava_2Flang_2FString_3B_29V_native", + "Landroid_2Fos_2FParcel_3B_7CnativeReadString_7C_28J_29Ljava_2Flang_2FString_3B_native", + "Landroid_2Fos_2FParcel_3B_7CnativeWriteInt_7C_28JI_29V_native", + "Landroid_2Fos_2FParcel_3B_7CnativeReadInt_7C_28J_29I_native", + "Landroid_2Fos_2FParcel_3B_7CnativeWriteInterfaceToken_7C_28JLjava_2Flang_2FString_3B_29V_native", + "Landroid_2Fos_2FParcel_3B_7CnativeEnforceInterface_7C_28JLjava_2Flang_2FString_3B_29V_native" +}; +constexpr uint32 kBinSearchInsnCount = 56; +// map func name to pair +using Func2CodeInsnMap = std::unordered_map>; +Func2CodeInsnMap func2CodeInsnMap { + { "Ljava_2Flang_2FString_3B_7ChashCode_7C_28_29I", + { "maple/mrt/codetricks/arch/arm64/hashCode.s", 29 } }, + { "Ljava_2Flang_2FString_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z", + { "maple/mrt/codetricks/arch/arm64/stringEquals.s", 50 } } +}; +constexpr uint32 kQuadInsnCount = 2; + +void GetMethodLabel(const std::string &methodName, std::string &methodLabel) { + methodLabel = ".Lmethod_desc." + methodName; +} +} + +namespace maplebe { +using namespace maple; + +void AArch64AsmEmitter::EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (!cgFunc.GetFunction().IsJava()) { + return; + } + std::string methodDescLabel; + GetMethodLabel(cgFunc.GetFunction().GetName(), methodDescLabel); + (void)emitter.Emit("\t.word " + methodDescLabel + "-.\n"); + emitter.IncreaseJavaInsnCount(); +} + +void AArch64AsmEmitter::EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (cgFunc.GetFunction().GetModule()->IsJavaModule()) { + std::string labelName = ".Label.name." + cgFunc.GetFunction().GetName(); + (void)emitter.Emit("\t.word " + labelName + " - .\n"); + } +} + +/* + * emit java method description which contains address and size of local reference area + * as well as method metadata. + */ +void AArch64AsmEmitter::EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (!cgFunc.GetFunction().IsJava()) { + return; + } + (void)emitter.Emit("\t.section\t.rodata\n"); + (void)emitter.Emit("\t.align\t2\n"); + std::string methodInfoLabel; + GetMethodLabel(cgFunc.GetFunction().GetName(), methodInfoLabel); + (void)emitter.Emit(methodInfoLabel + ":\n"); + EmitRefToMethodInfo(funcEmitInfo, emitter); + /* local reference area */ + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 refOffset = memLayout->GetRefLocBaseLoc(); + uint32 refNum = memLayout->GetSizeOfRefLocals() / kOffsetAlign; + /* for ea usage */ + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + IntrinsiccallNode *cleanEANode = aarchCGFunc.GetCleanEANode(); + if (cleanEANode != nullptr) { + refNum += static_cast(cleanEANode->NumOpnds()); + refOffset -= static_cast(cleanEANode->NumOpnds() * kIntregBytelen); + } + (void)emitter.Emit("\t.short ").Emit(refOffset).Emit("\n"); + (void)emitter.Emit("\t.short ").Emit(refNum).Emit("\n"); +} + +/* the fast_exception_handling lsda */ +void AArch64AsmEmitter::EmitFastLSDA(FuncEmitInfo &funcEmitInfo) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + + Emitter *emitter = currCG->GetEmitter(); + PUIdx pIdx = currCG->GetMIRModule()->CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + /* + * .word 0xFFFFFFFF + * .word .Label.LTest_3B_7C_3Cinit_3E_7C_28_29V3-func_start_label + */ + (void)emitter->Emit("\t.word 0xFFFFFFFF\n"); + (void)emitter->Emit("\t.word .L." + idx + "__"); + if (aarchCGFunc.NeedCleanup()) { + emitter->Emit(cgFunc.GetCleanupLabel()->GetLabelIdx()); + } else { + ASSERT(!cgFunc.GetExitBBsVec().empty(), "exitbbsvec is empty in AArch64AsmEmitter::EmitFastLSDA"); + emitter->Emit(cgFunc.GetExitBB(0)->GetLabIdx()); + } + emitter->Emit("-.L." + idx + "__") + .Emit(cgFunc.GetStartLabel()->GetLabelIdx()) + .Emit("\n"); + emitter->IncreaseJavaInsnCount(); +} + +/* the normal gcc_except_table */ +void AArch64AsmEmitter::EmitFullLSDA(FuncEmitInfo &funcEmitInfo) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + EHFunc *ehFunc = cgFunc.GetEHFunc(); + Emitter *emitter = currCG->GetEmitter(); + /* emit header */ + emitter->Emit("\t.align 3\n"); + emitter->Emit("\t.section .gcc_except_table,\"a\",@progbits\n"); + emitter->Emit("\t.align 3\n"); + /* emit LSDA header */ + LSDAHeader *lsdaHeader = ehFunc->GetLSDAHeader(); + emitter->EmitStmtLabel(lsdaHeader->GetLSDALabel()->GetLabelIdx()); + emitter->Emit("\t.byte ").Emit(lsdaHeader->GetLPStartEncoding()).Emit("\n"); + emitter->Emit("\t.byte ").Emit(lsdaHeader->GetTTypeEncoding()).Emit("\n"); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaHeader->GetTTypeOffset()); + emitter->EmitStmtLabel(lsdaHeader->GetTTypeOffset().GetStartOffset()->GetLabelIdx()); + /* emit call site table */ + emitter->Emit("\t.byte ").Emit(lsdaHeader->GetCallSiteEncoding()).Emit("\n"); + /* callsite table size */ + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(ehFunc->GetLSDACallSiteTable()->GetCSTable()); + /* callsite start */ + emitter->EmitStmtLabel(ehFunc->GetLSDACallSiteTable()->GetCSTable().GetStartOffset()->GetLabelIdx()); + ehFunc->GetLSDACallSiteTable()->SortCallSiteTable([&aarchCGFunc](const LSDACallSite *a, const LSDACallSite *b) { + CHECK_FATAL(a != nullptr, "nullptr check"); + CHECK_FATAL(b != nullptr, "nullptr check"); + LabelIDOrder id1 = aarchCGFunc.GetLabelOperand(a->csStart.GetEndOffset()->GetLabelIdx())->GetLabelOrder(); + LabelIDOrder id2 = aarchCGFunc.GetLabelOperand(b->csStart.GetEndOffset()->GetLabelIdx())->GetLabelOrder(); + /* id1 and id2 should not be default value -1u */ + CHECK_FATAL(id1 != 0xFFFFFFFF, "illegal label order assigned"); + CHECK_FATAL(id2 != 0xFFFFFFFF, "illegal label order assigned"); + return id1 < id2; + }); + const MapleVector &callSiteTable = ehFunc->GetLSDACallSiteTable()->GetCallSiteTable(); + for (size_t i = 0; i < callSiteTable.size(); ++i) { + LSDACallSite *lsdaCallSite = callSiteTable[i]; + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaCallSite->csStart); + + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaCallSite->csLength); + + if (lsdaCallSite->csLandingPad.GetStartOffset()) { + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(lsdaCallSite->csLandingPad); + } else { + ASSERT(lsdaCallSite->csAction == 0, "csAction error!"); + emitter->Emit("\t.uleb128 "); + if (aarchCGFunc.NeedCleanup()) { + /* if landing pad is 0, we emit this call site as cleanup code */ + LabelPair cleaupCode; + cleaupCode.SetStartOffset(cgFunc.GetStartLabel()); + cleaupCode.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->EmitLabelPair(cleaupCode); + } else if (cgFunc.GetFunction().IsJava()) { + ASSERT(!cgFunc.GetExitBBsVec().empty(), "exitbbsvec is empty in AArch64Emitter::EmitFullLSDA"); + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + (void)emitter->Emit(".L." + idx).Emit("__").Emit(cgFunc.GetExitBB(0)->GetLabIdx()); + (void)emitter->Emit(" - .L." + idx).Emit("__").Emit(cgFunc.GetStartLabel()->GetLabelIdx()).Emit("\n"); + } else { + emitter->Emit("0\n"); + } + } + emitter->Emit("\t.uleb128 ").Emit(lsdaCallSite->csAction).Emit("\n"); + } + + /* + * quick hack: insert a call site entry for the whole function body. + * this will hand in any pending (uncaught) exception to its caller. Note that + * __gxx_personality_v0 in libstdc++ is coded so that if exception table exists, + * the call site table must have an entry for any possibly raised exception, + * otherwise __cxa_call_terminate will be invoked immediately, thus the caller + * does not get the chance to take charge. + */ + if (aarchCGFunc.NeedCleanup() || cgFunc.GetFunction().IsJava()) { + /* call site for clean-up */ + LabelPair funcStart; + funcStart.SetStartOffset(cgFunc.GetStartLabel()); + funcStart.SetEndOffset(cgFunc.GetStartLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(funcStart); + LabelPair funcLength; + funcLength.SetStartOffset(cgFunc.GetStartLabel()); + funcLength.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(funcLength); + LabelPair cleaupCode; + cleaupCode.SetStartOffset(cgFunc.GetStartLabel()); + cleaupCode.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->Emit("\t.uleb128 "); + if (aarchCGFunc.NeedCleanup()) { + emitter->EmitLabelPair(cleaupCode); + } else { + ASSERT(!cgFunc.GetExitBBsVec().empty(), "exitbbsvec is empty in AArch64AsmEmitter::EmitFullLSDA"); + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + (void)emitter->Emit(".L." + idx).Emit("__").Emit(cgFunc.GetExitBB(0)->GetLabIdx()); + (void)emitter->Emit(" - .L." + idx).Emit("__").Emit(cgFunc.GetStartLabel()->GetLabelIdx()).Emit("\n"); + } + emitter->Emit("\t.uleb128 0\n"); + if (!cgFunc.GetFunction().IsJava()) { + /* call site for stack unwind */ + LabelPair unwindStart; + unwindStart.SetStartOffset(cgFunc.GetStartLabel()); + unwindStart.SetEndOffset(cgFunc.GetCleanupLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(unwindStart); + LabelPair unwindLength; + unwindLength.SetStartOffset(cgFunc.GetCleanupLabel()); + unwindLength.SetEndOffset(cgFunc.GetEndLabel()); + emitter->Emit("\t.uleb128 "); + emitter->EmitLabelPair(unwindLength); + emitter->Emit("\t.uleb128 0\n"); + emitter->Emit("\t.uleb128 0\n"); + } + } + /* callsite end label */ + emitter->EmitStmtLabel(ehFunc->GetLSDACallSiteTable()->GetCSTable().GetEndOffset()->GetLabelIdx()); + /* tt */ + const LSDAActionTable *lsdaActionTable = ehFunc->GetLSDAActionTable(); + for (size_t i = 0; i < lsdaActionTable->Size(); ++i) { + LSDAAction *lsdaAction = lsdaActionTable->GetActionTable().at(i); + emitter->Emit("\t.byte ").Emit(lsdaAction->GetActionIndex()).Emit("\n"); + emitter->Emit("\t.byte ").Emit(lsdaAction->GetActionFilter()).Emit("\n"); + } + emitter->Emit("\t.align 3\n"); + for (size_t i = ehFunc->GetEHTyTableSize(); i > 0; i--) { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ehFunc->GetEHTyTableMember(i - 1)); + MIRTypeKind typeKind = mirType->GetKind(); + if (((typeKind == kTypeScalar) && (mirType->GetPrimType() == PTY_void)) || (typeKind == kTypeStructIncomplete) || + (typeKind == kTypeInterfaceIncomplete)) { + continue; + } + CHECK_FATAL((typeKind == kTypeClass) || (typeKind == kTypeClassIncomplete), "NYI"); + const std::string &tyName = GlobalTables::GetStrTable().GetStringFromStrIdx(mirType->GetNameStrIdx()); + std::string dwRefString(".LDW.ref."); + dwRefString += CLASSINFO_PREFIX_STR; + dwRefString += tyName; + dwRefString += " - ."; + emitter->Emit("\t.4byte " + dwRefString + "\n"); + } + /* end of lsda */ + emitter->EmitStmtLabel(lsdaHeader->GetTTypeOffset().GetEndOffset()->GetLabelIdx()); +} + +void AArch64AsmEmitter::EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) { + (void)name; + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + Emitter &emitter = *(currCG->GetEmitter()); + LabelOperand &label = aarchCGFunc.GetOrCreateLabelOperand(labIdx); + /* if label order is default value -1, set new order */ + if (label.GetLabelOrder() == 0xFFFFFFFF) { + label.SetLabelOrder(currCG->GetLabelOrderCnt()); + currCG->IncreaseLabelOrderCnt(); + } + PUIdx pIdx = currCG->GetMIRModule()->CurFunction()->GetPuidx(); + char *puIdx = strdup(std::to_string(pIdx).c_str()); + const std::string &labelName = cgFunc.GetFunction().GetLabelTab()->GetName(labIdx); + if (currCG->GenerateVerboseCG()) { + (void)emitter.Emit(".L.").Emit(puIdx).Emit("__").Emit(labIdx).Emit(":\t//label order ").Emit(label.GetLabelOrder()); + if (!labelName.empty() && labelName.at(0) != '@') { + /* If label name has @ as its first char, it is not from MIR */ + (void)emitter.Emit(", MIR: @").Emit(labelName).Emit("\n"); + } else { + (void)emitter.Emit("\n"); + } + } else { + (void)emitter.Emit(".L.").Emit(puIdx).Emit("__").Emit(labIdx).Emit(":\n"); + } + free(puIdx); + puIdx = nullptr; +} + +void AArch64AsmEmitter::EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + if (cgFunc.GetFunction().IsJava()) { + Emitter *emitter = cgFunc.GetCG()->GetEmitter(); + /* emit a comment of current address from the begining of java text section */ + std::stringstream ss; + ss << "\n\t// addr: 0x" << std::hex << (emitter->GetJavaInsnCount() * kInsnSize) << "\n"; + cgFunc.GetCG()->GetEmitter()->Emit(ss.str()); + } +} + +void AArch64AsmEmitter::RecordRegInfo(FuncEmitInfo &funcEmitInfo) const { + if (!CGOptions::DoIPARA() || funcEmitInfo.GetCGFunc().GetFunction().IsJava()) { + return; + } + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + + std::set referedRegs; + MIRFunction &mirFunc = cgFunc.GetFunction(); + FOR_ALL_BB_REV(bb, &aarchCGFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsCall() || insn->IsTailCall()) { + auto *targetOpnd = insn->GetCallTargetOperand(); + bool safeCheck = false; + CHECK_FATAL(targetOpnd != nullptr, "target is null in AArch64Emitter::IsCallToFunctionThatNeverReturns"); + if (targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + safeCheck = true; + for (auto preg : func->GetReferedRegs()) { + referedRegs.insert(preg); + } + } + } + if (!safeCheck) { + mirFunc.SetReferedRegsValid(false); + return; + } + } + if (referedRegs.size() == kMaxRegNum) { + break; + } + uint32 opndNum = insn->GetOperandSize(); + const InsnDesc *md = &AArch64CG::kMd[insn->GetMachineOpcode()]; + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->GetMachineOpcode() == MOP_asm) { + if (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd) { + for (auto &opnd : static_cast(insn->GetOperand(i)).GetOperands()) { + if (opnd->IsRegister()) { + referedRegs.insert(static_cast(opnd)->GetRegisterNumber()); + } + } + } + continue; + } + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + /* all use, skip it */ + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (!memOpnd.IsIntactIndexed()) { + referedRegs.insert(base->GetRegisterNumber()); + } + } else if (opnd.IsRegister()) { + RegType regType = static_cast(opnd).GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + continue; + } + bool isDef = md->GetOpndDes(i)->IsRegDef(); + if (isDef) { + referedRegs.insert(static_cast(opnd).GetRegisterNumber()); + } + } + } + } + } + (void)referedRegs.insert(R16); + (void)referedRegs.insert(R17); + (void)referedRegs.insert(R18); + mirFunc.SetReferedRegsValid(true); +#ifdef DEBUG + for (auto reg : referedRegs) { + if (reg > kMaxRegNum) { + ASSERT(0, "unexpected preg"); + } + } +#endif + mirFunc.CopyReferedRegs(referedRegs); +} + +/* if the last insn is call, then insert nop */ +static void InsertNopAfterLastCall(AArch64CGFunc &cgFunc) { + bool found = false; + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (insn->IsMachineInstruction()) { + if (insn->IsCall()) { + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(MOP_nop); + bb->InsertInsnAfter(*insn, newInsn); + } + found = true; + break; + } + } + if (found) { + break; + } + } +} + +void AArch64AsmEmitter::Run(FuncEmitInfo &funcEmitInfo) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + AArch64CGFunc &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + /* emit header of this function */ + Emitter &emitter = *currCG->GetEmitter(); + // insert for __cxx_global_var_init + if (cgFunc.GetName() == "__cxx_global_var_init") { + (void)emitter.Emit("\t.section\t.init_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } + if (cgFunc.GetFunction().GetAttr(FUNCATTR_initialization)) { + (void)emitter.Emit("\t.section\t.init_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } + if (cgFunc.GetFunction().GetAttr(FUNCATTR_termination)) { + (void)emitter.Emit("\t.section\t.fini_array,\"aw\"\n"); + (void)emitter.Emit("\t.quad\t").Emit(cgFunc.GetName()).Emit("\n"); + } + (void)emitter.Emit("\n"); + EmitMethodDesc(funcEmitInfo, emitter); + /* emit java code to the java section. */ + if (cgFunc.GetFunction().IsJava()) { + std::string sectionName = namemangler::kMuidJavatextPrefixStr; + (void)emitter.Emit("\t.section ." + sectionName + ",\"ax\"\n"); + } else if (cgFunc.GetFunction().GetAttr(FUNCATTR_section)) { + const std::string §ionName = cgFunc.GetFunction().GetAttrs().GetPrefixSectionName(); + (void)emitter.Emit("\t.section " + sectionName).Emit(",\"ax\",@progbits\n"); + } else if (CGOptions::IsFunctionSections()) { + (void)emitter.Emit("\t.section .text.").Emit(cgFunc.GetName()).Emit(",\"ax\",@progbits\n"); + } else if (cgFunc.GetFunction().GetAttr(FUNCATTR_constructor_priority)) { + (void)emitter.Emit("\t.section\t.text.startup").Emit(",\"ax\",@progbits\n"); + } else { + (void)emitter.Emit("\t.text\n"); + } + if (CGOptions::GetFuncAlignPow() != 0) { + (void)emitter.Emit("\t.align ").Emit(CGOptions::GetFuncAlignPow()).Emit("\n"); + } + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc.GetFunction().GetStIdx().Idx()); + const std::string &funcName = std::string(cgFunc.GetShortFuncName().c_str()); + + // manually replace function with optimized assembly language + if (CGOptions::IsReplaceASM()) { + auto it = func2CodeInsnMap.find(funcSt->GetName()); + if (it != func2CodeInsnMap.end()) { + std::string optFile = it->second.first; + struct stat buffer; + if (stat(optFile.c_str(), &buffer) == 0) { + std::ifstream codetricksFd(optFile); + if (!codetricksFd.is_open()) { + ERR(kLncErr, " %s open failed!", optFile.c_str()); + LogInfo::MapleLogger() << "wrong" << '\n'; + } else { + std::string contend; + while (getline(codetricksFd, contend)) { + (void)emitter.Emit(contend + "\n"); + } + } + } + emitter.IncreaseJavaInsnCount(it->second.second); +#ifdef EMIT_INSN_COUNT + EmitJavaInsnAddr(funcEmitInfo); +#endif /* ~EMIT_INSN_COUNT */ + return; + } + } + std::string funcStName = funcSt->GetName(); + if (funcSt->GetFunction()->GetAttr(FUNCATTR_weak)) { + (void)emitter.Emit("\t.weak\t" + funcStName + "\n"); + if (currCG->GetMIRModule()->IsJavaModule()) { + (void)emitter.Emit("\t.hidden\t" + funcStName + "\n"); + } + } else if (funcSt->GetFunction()->GetAttr(FUNCATTR_local)) { + (void)emitter.Emit("\t.local\t" + funcStName + "\n"); + } else if (funcSt->GetFunction() && (!funcSt->GetFunction()->IsJava()) && funcSt->GetFunction()->IsStatic()) { + // nothing + } else { + /* should refer to function attribute */ + (void)emitter.Emit("\t.globl\t").Emit(funcSt->GetName()).Emit("\n"); + if (!currCG->GetMIRModule()->IsCModule()) { + (void)emitter.Emit("\t.hidden\t").Emit(funcSt->GetName()).Emit("\n"); + } + } + (void)emitter.Emit("\t.type\t" + funcStName + ", %function\n"); + /* add these messege , solve the simpleperf tool error */ + EmitRefToMethodDesc(funcEmitInfo, emitter); + (void)emitter.Emit(funcStName + ":\n"); + + if (cgFunc.GetFunction().IsJava()) { + InsertNopAfterLastCall(aarchCGFunc); + } + + RecordRegInfo(funcEmitInfo); + + /* emit instructions */ + FOR_ALL_BB(bb, &aarchCGFunc) { + if (bb->IsUnreachable()) { + continue; + } + if (bb == aarchCGFunc.GetFirstBB() && bb->IsEmpty()) { + continue; + } + if (currCG->GenerateVerboseCG()) { + (void)emitter.Emit("# freq:").Emit(bb->GetFrequency()).Emit("\n"); + } + /* emit bb headers */ + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + if (aarchCGFunc.GetMirModule().IsCModule() && bb->IsBBNeedAlign() && bb->GetAlignNopNum() != kAlignMovedFlag) { + uint32 power = bb->GetAlignPower(); + (void)emitter.Emit("\t.p2align ").Emit(power).Emit("\n"); + } + EmitBBHeaderLabel(funcEmitInfo, funcName, bb->GetLabIdx()); + } + + FOR_BB_INSNS(insn, bb) { + if (insn->IsCfiInsn()) { + EmitAArch64CfiInsn(emitter, *insn); + } else if (insn->IsDbgInsn()) { + EmitAArch64DbgInsn(funcEmitInfo, emitter, *insn); + } else { + EmitAArch64Insn(emitter, *insn); + } + } + } + if (CGOptions::IsMapleLinker()) { + /* Emit a label for calculating method size */ + (void)emitter.Emit(".Label.end." + funcStName + ":\n"); + } + (void)emitter.Emit("\t.size\t" + funcStName + ", .-").Emit(funcStName + "\n"); + + auto constructorAttr = funcSt->GetFunction()->GetAttrs().GetConstructorPriority(); + if (constructorAttr != -1) { + (void)emitter.Emit("\t.section\t.init_array." + std::to_string(constructorAttr) + ",\"aw\"\n"); + (void)emitter.Emit("\t.align 3\n"); + (void)emitter.Emit("\t.xword\t" + funcStName + "\n"); + } + + EHFunc *ehFunc = cgFunc.GetEHFunc(); + /* emit LSDA */ + if (cgFunc.GetFunction().IsJava() && (ehFunc != nullptr)) { + if (!cgFunc.GetHasProEpilogue()) { + (void)emitter.Emit("\t.word 0x55555555\n"); + emitter.IncreaseJavaInsnCount(); + } else if (ehFunc->NeedFullLSDA()) { + LSDAHeader *lsdaHeader = ehFunc->GetLSDAHeader(); + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + /* .word .Label.lsda_label-func_start_label */ + (void)emitter.Emit("\t.word .L." + idx).Emit("__").Emit(lsdaHeader->GetLSDALabel()->GetLabelIdx()); + (void)emitter.Emit("-.L." + idx).Emit("__").Emit(cgFunc.GetStartLabel()->GetLabelIdx()).Emit("\n"); + emitter.IncreaseJavaInsnCount(); + } else if (ehFunc->NeedFastLSDA()) { + EmitFastLSDA(funcEmitInfo); + } + } + + for (auto &it : cgFunc.GetEmitStVec()) { + /* emit switch table only here */ + MIRSymbol *st = it.second; + ASSERT(st->IsReadOnly(), "NYI"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t.align 3\n"); + emitter.IncreaseJavaInsnCount(0, true); /* just aligned */ + (void)emitter.Emit(st->GetName() + ":\n"); + MIRAggConst *arrayConst = safe_cast(st->GetKonst()); + CHECK_FATAL(arrayConst != nullptr, "null ptr check"); + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + char *idx = strdup(std::to_string(pIdx).c_str()); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); i++) { + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + CHECK_FATAL(lblConst != nullptr, "null ptr check"); + (void)emitter.Emit("\t.quad\t.L.").Emit(idx).Emit("__").Emit(lblConst->GetValue()); + (void)emitter.Emit(" - " + st->GetName() + "\n"); + emitter.IncreaseJavaInsnCount(kQuadInsnCount); + } + free(idx); + idx = nullptr; + } + /* insert manually optimized assembly language */ + if (funcSt->GetName() == "Landroid_2Futil_2FContainerHelpers_3B_7C_3Cinit_3E_7C_28_29V") { + std::string optFile = "maple/mrt/codetricks/arch/arm64/ContainerHelpers_binarySearch.s"; + struct stat buffer; + if (stat(optFile.c_str(), &buffer) == 0) { + std::ifstream binarySearchFileFD(optFile); + if (!binarySearchFileFD.is_open()) { + ERR(kLncErr, " %s open failed!", optFile.c_str()); + } else { + std::string contend; + while (getline(binarySearchFileFD, contend)) { + (void)emitter.Emit(contend + "\n"); + } + } + } + emitter.IncreaseJavaInsnCount(kBinSearchInsnCount); + } + + for (const auto &mpPair : cgFunc.GetLabelAndValueMap()) { + LabelOperand &labelOpnd = aarchCGFunc.GetOrCreateLabelOperand(mpPair.first); + A64OpndEmitVisitor visitor(emitter, nullptr); + labelOpnd.Accept(visitor); + (void)emitter.Emit(":\n"); + (void)emitter.Emit("\t.quad ").Emit(static_cast(mpPair.second)).Emit("\n"); + emitter.IncreaseJavaInsnCount(kQuadInsnCount); + } + + if (ehFunc != nullptr && ehFunc->NeedFullLSDA()) { + EmitFullLSDA(funcEmitInfo); + } +#ifdef EMIT_INSN_COUNT + if (cgFunc.GetFunction().IsJava()) { + EmitJavaInsnAddr(funcEmitInfo); + } +#endif /* ~EMIT_INSN_COUNT */ +} + +void AArch64AsmEmitter::EmitAArch64Insn(maplebe::Emitter &emitter, Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + emitter.SetCurrentMOP(mOp); + const InsnDesc *md = insn.GetDesc(); + + if (!GetCG()->GenerateVerboseAsm() && !GetCG()->GenerateVerboseCG() && insn.IsComment()) { + return; + } + + switch (mOp) { + case MOP_clinit: { + EmitClinit(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_adrp_ldr: { + uint32 adrpldrInsnCount = md->GetAtomicNum(); + emitter.IncreaseJavaInsnCount(adrpldrInsnCount); + EmitAdrpLdr(emitter, insn); + if (CGOptions::IsLazyBinding() && !GetCG()->IsLibcore()) { + EmitLazyBindingRoutine(emitter, insn); + emitter.IncreaseJavaInsnCount(adrpldrInsnCount + 1); + } + return; + } + case MOP_counter: { + EmitCounter(emitter, insn); + return; + } + case MOP_c_counter: { + EmitCCounter(emitter, insn); + return; + } + case MOP_asm: { + EmitInlineAsm(emitter, insn); + return; + } + case MOP_clinit_tail: { + EmitClinitTail(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_lazy_ldr: { + EmitLazyLoad(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_adrp_label: { + EmitAdrpLabel(emitter, insn); + return; + } + case MOP_lazy_tail: { + /* No need to emit this pseudo instruction. */ + return; + } + case MOP_lazy_ldr_static: { + EmitLazyLoadStatic(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_arrayclass_cache_ldr: { + EmitArrayClassCacheLoad(emitter, insn); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + case MOP_get_and_addI: + case MOP_get_and_addL: { + EmitGetAndAddInt(emitter, insn); + return; + } + case MOP_get_and_setI: + case MOP_get_and_setL: { + EmitGetAndSetInt(emitter, insn); + return; + } + case MOP_compare_and_swapI: + case MOP_compare_and_swapL: { + EmitCompareAndSwapInt(emitter, insn); + return; + } + case MOP_string_indexof: { + EmitStringIndexOf(emitter, insn); + return; + } + case MOP_pseudo_none: { + return; + } + case MOP_tls_desc_call: { + EmitCTlsDescCall(emitter, insn); + return; + } + case MOP_tls_desc_rel: { + EmitCTlsDescRel(emitter, insn); + return; + } + case MOP_sync_lock_test_setI: + case MOP_sync_lock_test_setL: { + EmitSyncLockTestSet(emitter, insn); + return; + } + default: + break; + } + + if (CGOptions::IsNativeOpt() && mOp == MOP_xbl) { + auto *nameOpnd = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + if (nameOpnd->GetName() == "MCC_CheckThrowPendingException") { + EmitCheckThrowPendingException(emitter); + emitter.IncreaseJavaInsnCount(md->GetAtomicNum()); + return; + } + } + + std::string format(md->format); + (void)emitter.Emit("\t").Emit(md->name).Emit("\t"); + size_t opndSize = insn.GetOperandSize(); + std::vector seq(opndSize, -1); + std::vector prefix(opndSize); /* used for print prefix like "*" in icall *rax */ + uint32 index = 0; + uint32 commaNum = 0; + for (uint32 i = 0; i < format.length(); ++i) { + char c = format[i]; + if (c >= '0' && c <= '5') { + seq[index++] = static_cast(c) - kZeroAsciiNum; + ++commaNum; + } else if (c != ',') { + prefix[index].push_back(c); + } + } + + bool isRefField = (opndSize == 0) ? false : CheckInsnRefField(insn, static_cast(static_cast(seq[0]))); + if (insn.IsComment()) { + emitter.IncreaseJavaInsnCount(); + } + uint32 compositeOpnds = 0; + for (uint32 i = 0; i < commaNum; ++i) { + if (seq[i] == -1) { + continue; + } + if (prefix[i].length() > 0) { + (void)emitter.Emit(prefix[i]); + } + if (emitter.NeedToDealWithHugeSo() && (mOp == MOP_xbl || mOp == MOP_tail_call_opt_xbl)) { + auto *nameOpnd = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + /* Suport huge so here + * As the PLT section is just before java_text section, when java_text section is larger + * then 128M, instrunction of "b" and "bl" would fault to branch to PLT stub functions. Here, to save + * instuctions space, we change the branch target to a local target within 120M address, and add non-plt + * call to the target function. + */ + emitter.InsertHugeSoTarget(nameOpnd->GetName()); + (void)emitter.Emit(nameOpnd->GetName() + emitter.HugeSoPostFix()); + break; + } + auto *opnd = &insn.GetOperand(static_cast(seq[i])); + if (opnd && opnd->IsRegister()) { + auto *regOpnd = static_cast(opnd); + if ((md->opndMD[static_cast(seq[i])])->IsVectorOperand()) { + regOpnd->SetVecLanePosition(-1); + regOpnd->SetVecLaneSize(0); + regOpnd->SetVecElementSize(0); + if (insn.IsVectorOp()) { + PrepareVectorOperand(regOpnd, compositeOpnds, insn); + if (compositeOpnds != 0) { + (void)emitter.Emit("{"); + } + } + } + } + A64OpndEmitVisitor visitor(emitter, md->opndMD[static_cast(seq[i])]); + + insn.GetOperand(static_cast(seq[i])).Accept(visitor); + if (compositeOpnds == 1) { + (void)emitter.Emit("}"); + } + if (compositeOpnds > 0) { + --compositeOpnds; + } + /* reset opnd0 ref-field flag, so following instruction has correct register */ + if (isRefField && (i == 0)) { + static_cast(&insn.GetOperand(static_cast(seq[0])))->SetRefField(false); + } + /* Temporary comment the label:.Label.debug.callee */ + if (i != (commaNum - 1)) { + (void)emitter.Emit(", "); + } + const uint32 commaNumForEmitLazy = 2; + if (!CGOptions::IsLazyBinding() || GetCG()->IsLibcore() || (mOp != MOP_wldr && mOp != MOP_xldr) || + commaNum != commaNumForEmitLazy || i != 1 || + !insn.GetOperand(static_cast(seq[1])).IsMemoryAccessOperand()) { + continue; + } + /* + * Only check the last operand of ldr in lo12 mode. + * Check the second operand, if it's [AArch64MemOperand::kAddrModeLo12Li] + */ + auto *memOpnd = static_cast(&insn.GetOperand(static_cast(seq[1]))); + if (memOpnd == nullptr || memOpnd->GetAddrMode() != MemOperand::kAddrModeLo12Li) { + continue; + } + const MIRSymbol *sym = memOpnd->GetSymbol(); + if (sym->IsMuidFuncDefTab() || sym->IsMuidFuncUndefTab() || + sym->IsMuidDataDefTab() || sym->IsMuidDataUndefTab()) { + (void)emitter.Emit("\n"); + EmitLazyBindingRoutine(emitter, insn); + emitter.IncreaseJavaInsnCount(1); + } + } + if (GetCG()->GenerateVerboseCG() || (GetCG()->GenerateVerboseAsm() && insn.IsComment())) { + const char *comment = insn.GetComment().c_str(); + if (comment != nullptr && strlen(comment) > 0) { + (void)emitter.Emit("\t\t// ").Emit(comment); + } + } + + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitClinit(Emitter &emitter, const Insn &insn) const { + /* + * adrp x3, __muid_data_undef_tab$$GetBoolean_dex+144 + * ldr x3, [x3, #:lo12:__muid_data_undef_tab$$GetBoolean_dex+144] + * or, + * adrp x3, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B + * ldr x3, [x3, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + * + * ldr x3, [x3,#112] + * ldr wzr, [x3] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_clinit]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitClinit"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + (void)emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + if (stImmOpnd->GetSymbol()->IsMuidDataUndefTab()) { + /* emit adrp */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("\n"); + /* emit ldr */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + } else { + /* adrp x3, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B */ + (void)emitter.Emit("\tadrp\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit(namemangler::kPtrPrefixStr + stImmOpnd->GetName()); + (void)emitter.Emit("\n"); + + /* ldr x3, [x3, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] */ + (void)emitter.Emit("\tldr\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", ["); + opnd0->Accept(visitor); + (void)emitter.Emit(", #:lo12:"); + (void)emitter.Emit(namemangler::kPtrPrefixStr + stImmOpnd->GetName()); + (void)emitter.Emit("]\n"); + } + /* emit "ldr x0,[x0,#48]" */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(",#"); + (void)emitter.Emit(static_cast(ClassMetadata::OffsetOfInitState())); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + + /* emit "ldr xzr, [x0]" */ + (void)emitter.Emit("\t").Emit("ldr\txzr, ["); + opnd0->Accept(visitor); + (void)emitter.Emit("]\n"); +} + +static void AsmStringOutputRegNum( + bool isInt, uint32 regno, uint32 intBase, uint32 fpBase, std::string &strToEmit) { + regno_t newRegno; + if (isInt) { + newRegno = regno - intBase; + } else { + newRegno = regno - fpBase; + } + if (newRegno > (kDecimalMax - 1)) { + uint32 tenth = newRegno / kDecimalMax; + strToEmit += static_cast(kZeroAsciiNum + tenth); + newRegno -= (kDecimalMax * tenth); + } + strToEmit += static_cast(static_cast(newRegno) + kZeroAsciiNum); +} + +void AArch64AsmEmitter::EmitInlineAsm(Emitter &emitter, const Insn &insn) const { + (void)emitter.Emit("\t//Inline asm begin\n\t"); + auto &list1 = static_cast(insn.GetOperand(kAsmOutputListOpnd)); + std::vector outOpnds; + for (auto *regOpnd : list1.GetOperands()) { + outOpnds.push_back(regOpnd); + } + auto &list2 = static_cast(insn.GetOperand(kAsmInputListOpnd)); + std::vector inOpnds; + for (auto *regOpnd : list2.GetOperands()) { + inOpnds.push_back(regOpnd); + } + auto &list6 = static_cast(insn.GetOperand(kAsmOutputRegPrefixOpnd)); + auto &list7 = static_cast(insn.GetOperand(kAsmInputRegPrefixOpnd)); + MapleString asmStr = static_cast(insn.GetOperand(kAsmStringOpnd)).GetComment(); + std::string stringToEmit; + size_t sidx = 0; + auto isMemAccess = [](char c)->bool { + return c == '['; + }; + auto emitRegister = [&](const char *p, bool isInt, uint32 regNO, bool unDefRegSize)->void { + if (isMemAccess(p[0])) { + stringToEmit += "[x"; + AsmStringOutputRegNum(isInt, regNO, R0, V0, stringToEmit); + stringToEmit += "]"; + } else { + ASSERT((p[0] == 'w' || p[0] == 'x' || p[0] == 's' || p[0] == 'd' || p[0] == 'v'), "Asm invalid register type"); + if ((p[0] == 'w' || p[0] == 'x') && unDefRegSize) { + stringToEmit += 'x'; + } else { + stringToEmit += p[0]; + } + if (!unDefRegSize) { + isInt = (p[0] == 'w' || p[0] == 'x'); + } + AsmStringOutputRegNum(isInt, regNO, R0, V0, stringToEmit); + } + }; + for (size_t i = 0; i < asmStr.length(); ++i) { + switch (asmStr[i]) { + case '$': { + char c = asmStr[++i]; + if ((c >= '0') && (c <= '9')) { + auto val = static_cast(static_cast(c) - kZeroAsciiNum); + if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { + val = val * kDecimalMax + static_cast(static_cast(asmStr[++i]) - kZeroAsciiNum); + } + if (val < outOpnds.size()) { + const char *prefix = list6.stringList[val]->GetComment().c_str(); + RegOperand *opnd = outOpnds[val]; + emitRegister(prefix, opnd->IsOfIntClass(), opnd->GetRegisterNumber(), true); + } else { + val -= static_cast(outOpnds.size()); + CHECK_FATAL(val < inOpnds.size(), "Inline asm : invalid register constraint number"); + RegOperand *opnd = inOpnds[val]; + /* input is a immediate */ + const char *prefix = list7.stringList[val]->GetComment().c_str(); + if (prefix[0] == 'i') { + stringToEmit += '#'; + for (size_t k = 1; k < list7.stringList[val]->GetComment().length(); ++k) { + stringToEmit += prefix[k]; + } + } else { + emitRegister(prefix, opnd->IsOfIntClass(), opnd->GetRegisterNumber(), true); + } + } + } else if (c == '{') { + c = asmStr[++i]; + CHECK_FATAL(((c >= '0') && (c <= '9')), "Inline asm : invalid register constraint number"); + auto val = static_cast(c - '0'); + if (asmStr[i + 1] >= '0' && asmStr[i + 1] <= '9') { + val = val * kDecimalMax + static_cast(asmStr[++i] - '0'); + } + regno_t regno; + bool isAddr = false; + if (val < outOpnds.size()) { + RegOperand *opnd = outOpnds[val]; + regno = opnd->GetRegisterNumber(); + isAddr = isMemAccess(list6.stringList[val]->GetComment().c_str()[0]); + } else { + val -= static_cast(outOpnds.size()); + CHECK_FATAL(val < inOpnds.size(), "Inline asm : invalid register constraint number"); + RegOperand *opnd = inOpnds[val]; + regno = opnd->GetRegisterNumber(); + isAddr = isMemAccess(list7.stringList[val]->GetComment().c_str()[0]); + } + c = asmStr[++i]; + CHECK_FATAL(c == ':', "Parsing error in inline asm string during emit"); + c = asmStr[++i]; + std::string prefix(1, c); + if (c == 'a' || isAddr) { + prefix = "[x"; + } + emitRegister(prefix.c_str(), true, regno, false); + c = asmStr[++i]; + CHECK_FATAL(c == '}', "Parsing error in inline asm string during emit"); + } + break; + } + case '\n': { + stringToEmit += "\n\t"; + break; + } + default: + stringToEmit += asmStr[i]; + sidx++; + } + } + (void)emitter.Emit(stringToEmit); + (void)emitter.Emit("\n\t//Inline asm end\n"); +} + +void AArch64AsmEmitter::EmitClinitTail(Emitter &emitter, const Insn &insn) const { + /* + * ldr x17, [xs, #112] + * ldr wzr, [x17] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_clinit_tail]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + + /* emit "ldr x17,[xs,#112]" */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\tx17, ["); + opnd0->Accept(visitor); + (void)emitter.Emit(", #"); + (void)emitter.Emit(static_cast(ClassMetadata::OffsetOfInitState())); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + + /* emit "ldr xzr, [x17]" */ + (void)emitter.Emit("\t").Emit("ldr\txzr, [x17]\n"); +} + +void AArch64AsmEmitter::EmitLazyLoad(Emitter &emitter, const Insn &insn) const { + /* + * ldr wd, [xs] # xd and xs should be differenct register + * ldr wd, [xd] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_lazy_ldr]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + const OpndDesc *prop1 = md->opndMD[1]; + A64OpndEmitVisitor visitor(emitter, prop0); + A64OpndEmitVisitor visitor1(emitter, prop1); + + /* emit "ldr wd, [xs]" */ + (void)emitter.Emit("\t").Emit("ldr\t"); +#ifdef USE_32BIT_REF + opnd0->Accept(visitor); +#else + opnd0->Accept(visitor1); +#endif + (void)emitter.Emit(", ["); + opnd1->Accept(visitor1); + (void)emitter.Emit("]\t// lazy load.\n"); + + /* emit "ldr wd, [xd]" */ + (void)emitter.Emit("\t").Emit("ldr\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", ["); + opnd1->Accept(visitor1); + (void)emitter.Emit("]\t// lazy load.\n"); +} + +void AArch64AsmEmitter::EmitCounter(Emitter &emitter, const Insn &insn) const { + /* + * adrp x1, __profile_bb_table$$GetBoolean_dex+4 + * ldr w17, [x1, #:lo12:__profile_bb_table$$GetBoolean_dex+4] + * add w17, w17, #1 + * str w17, [x1, #:lo12:__profile_bb_table$$GetBoolean_dex+4] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_counter]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[kInsnFirstOpnd]; + A64OpndEmitVisitor visitor(emitter, prop0); + StImmOperand *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitCounter"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + (void)emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + /* emit adrp */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("\n"); + /* emit ldr */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\tw17, ["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); + /* emit add */ + (void)emitter.Emit("\t").Emit("add").Emit("\tw17, w17, #1"); + (void)emitter.Emit("\n"); + /* emit str */ + (void)emitter.Emit("\t").Emit("str").Emit("\tw17, ["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + (void)emitter.Emit("]"); + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitCCounter(Emitter &emitter, const Insn &insn) const { + const InsnDesc *md = &AArch64CG::kMd[MOP_c_counter]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop1 = md->opndMD[kInsnSecondOpnd]; + auto *stImmOpnd = static_cast(opnd0); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Insn::EmitCounter"); + /* spill linker register */ + (void)emitter.Emit("\tstr\tx30, [sp, #-16]!\n"); + /* get counter symbol address */ + (void)emitter.Emit("\tadrp\tx16, "); + + if (CGOptions::IsPIC()) { + (void)emitter.Emit(":got:"); + } + (void)emitter.Emit(stImmOpnd->GetName()).Emit("\n"); + + if (CGOptions::IsPIC()) { + (void)emitter.Emit("\tldr\tx16, [x16, :got_lo12:"); + (void)emitter.Emit(stImmOpnd->GetName()); + (void)emitter.Emit("]\n"); + } else { + (void)emitter.Emit("\tadd\tx16, x16, :lo12:\n"); + (void)emitter.Emit(stImmOpnd->GetName()); + (void)emitter.Emit("\n"); + } + + A64OpndEmitVisitor visitor(emitter, prop1); + /* load current count */ + (void)emitter.Emit("\tldr\tx30, [x16, "); + opnd1->Accept(visitor); + (void)emitter.Emit("]\n"); + /* increment */ + (void)emitter.Emit("\tadd\tx30, x30, #1\n"); + /* str new count */ + (void)emitter.Emit("\tstr\tx30, [x16, "); + opnd1->Accept(visitor); + (void)emitter.Emit("]\n"); + + /* reload linker register */ + (void)emitter.Emit("\tldr\tx30, [sp], #16\n"); +} + +void AArch64AsmEmitter::EmitAdrpLabel(Emitter &emitter, const Insn &insn) const { + /* adrp xd, label + * add xd, xd, #lo12:label + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_adrp_label]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto lidx = static_cast(opnd1)->GetValue(); + + /* adrp xd, label */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + char *idx = strdup( + std::to_string(Globals::GetInstance()->GetBECommon()->GetMIRModule().CurFunction()->GetPuidx()).c_str()); + (void)emitter.Emit(".L.").Emit(idx).Emit("__").Emit(lidx).Emit("\n"); + + /* add xd, xd, #lo12:label */ + (void)emitter.Emit("\tadd\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(":lo12:").Emit(".L.").Emit(idx).Emit("__").Emit(lidx).Emit("\n"); + (void)emitter.Emit("\n"); + free(idx); + idx = nullptr; +} + +void AArch64AsmEmitter::EmitAdrpLdr(Emitter &emitter, const Insn &insn) const { + /* + * adrp xd, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B + * ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_adrp_ldr]; + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitAdrpLdr"); + /* emit nop for breakpoint */ + if (GetCG()->GetCGOptions().WithDwarf()) { + (void)emitter.Emit("\t").Emit("nop").Emit("\n"); + } + + /* adrp xd, _PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("\n"); + + /* ldr xd, [xd, #:lo12:_PTR__cinf_Ljava_2Futil_2Fconcurrent_2Fatomic_2FAtomicInteger_3B] */ + (void)emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); + opnd0->Accept(visitor); + static_cast(opnd0)->SetRefField(false); + (void)emitter.Emit(", "); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("]\n"); +} + +void AArch64AsmEmitter::EmitLazyLoadStatic(Emitter &emitter, const Insn &insn) const { + /* adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset + * ldr wd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xxx+offset] + * ldr wzr, [xd] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_lazy_ldr_static]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->GetOpndDes(0); + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitLazyLoadStatic"); + + /* emit "adrp xd, :got:__staticDecoupleValueOffset$$xxx+offset" */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("\t// lazy load static.\n"); + + /* emit "ldr wd, [xd, #:got_lo12:__staticDecoupleValueOffset$$xxx+offset]" */ + (void)emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); +#ifdef USE_32BIT_REF + const OpndDesc prop2(prop0->GetOperandType(), prop0->GetRegProp(), prop0->GetSize() / 2); + opnd0->Emit(emitter, &prop2); /* ldr wd, ... for emui */ +#else + opnd0->Accept(visitor); /* ldr xd, ... for qemu */ +#endif /* USE_32BIT_REF */ + static_cast(opnd0)->SetRefField(false); + (void)emitter.Emit(", "); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("]\t// lazy load static.\n"); + + /* emit "ldr wzr, [xd]" */ + (void)emitter.Emit("\t").Emit("ldr\twzr, ["); + opnd0->Accept(visitor); + (void)emitter.Emit("]\t// lazy load static.\n"); +} + +void AArch64AsmEmitter::EmitArrayClassCacheLoad(Emitter &emitter, const Insn &insn) const { + /* adrp xd, :got:__arrayClassCacheTable$$xxx+offset + * ldr wd, [xd, #:got_lo12:__arrayClassCacheTable$$xxx+offset] + * ldr wzr, [xd] + */ + const InsnDesc *md = &AArch64CG::kMd[MOP_arrayclass_cache_ldr]; + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + Operand *opnd1 = &insn.GetOperand(kInsnSecondOpnd); + const OpndDesc *prop0 = md->GetOpndDes(kInsnFirstOpnd); + A64OpndEmitVisitor visitor(emitter, prop0); + auto *stImmOpnd = static_cast(opnd1); + CHECK_FATAL(stImmOpnd != nullptr, "stImmOpnd is null in AArch64Emitter::EmitLazyLoadStatic"); + + /* emit "adrp xd, :got:__arrayClassCacheTable$$xxx+offset" */ + (void)emitter.Emit("\t").Emit("adrp").Emit("\t"); + opnd0->Accept(visitor); + (void)emitter.Emit(", "); + (void)emitter.Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("\t// load array class.\n"); + + /* emit "ldr wd, [xd, #:got_lo12:__arrayClassCacheTable$$xxx+offset]" */ + (void)emitter.Emit("\tldr\t"); + static_cast(opnd0)->SetRefField(true); +#ifdef USE_32BIT_REF + const OpndDesc prop2(prop0->GetOperandType(), prop0->GetRegProp(), prop0->GetSize() / 2); + A64OpndEmitVisitor visitor2(emitter, prop2); + opnd0->Accept(visitor2); /* ldr wd, ... for emui */ +#else + opnd0->Accept(visitor); /* ldr xd, ... for qemu */ +#endif /* USE_32BIT_REF */ + static_cast(opnd0)->SetRefField(false); + (void)emitter.Emit(", "); + (void)emitter.Emit("["); + opnd0->Accept(visitor); + (void)emitter.Emit(","); + (void)emitter.Emit("#"); + (void)emitter.Emit(":lo12:").Emit(stImmOpnd->GetName()); + if (stImmOpnd->GetOffset() != 0) { + (void)emitter.Emit("+").Emit(stImmOpnd->GetOffset()); + } + (void)emitter.Emit("]\t// load array class.\n"); + + /* emit "ldr wzr, [xd]" */ + (void)emitter.Emit("\t").Emit("ldr\twzr, ["); + opnd0->Accept(visitor); + (void)emitter.Emit("]\t// check resolve array class.\n"); +} + +/* + * intrinsic_get_add_int w0, xt, wt, ws, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * add wt, w0, w3 + * stlxr ws, wt, [xt] + * cbnz ws, label + */ +void AArch64AsmEmitter::EmitGetAndAddInt(Emitter &emitter, const Insn &insn) const { + ASSERT(insn.GetOperandSize() > kInsnEighthOpnd, "ensure the oprands number"); + (void)emitter.Emit("\t//\tstart of Unsafe.getAndAddInt.\n"); + Operand *tempOpnd0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *tempOpnd1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *tempOpnd2 = &insn.GetOperand(kInsnFourthOpnd); + Operand *objOpnd = &insn.GetOperand(kInsnFifthOpnd); + Operand *offsetOpnd = &insn.GetOperand(kInsnSixthOpnd); + Operand *deltaOpnd = &insn.GetOperand(kInsnSeventhOpnd); + Operand *labelOpnd = &insn.GetOperand(kInsnEighthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* emit add. */ + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + tempOpnd0->Accept(visitor); + (void)emitter.Emit(", "); + objOpnd->Accept(visitor); + (void)emitter.Emit(", "); + offsetOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* emit label. */ + labelOpnd->Accept(visitor); + (void)emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + const MOperator mOp = insn.GetMachineOpcode(); + const InsnDesc *md = &AArch64CG::kMd[mOp]; + const OpndDesc *retProp = md->opndMD[kInsnFirstOpnd]; + A64OpndEmitVisitor retVisitor(emitter, retProp); + /* emit ldaxr */ + (void)emitter.Emit("\t").Emit("ldaxr").Emit("\t"); + retVal->Accept(retVisitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* emit add. */ + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + tempOpnd1->Accept(retVisitor); + (void)emitter.Emit(", "); + retVal->Accept(retVisitor); + (void)emitter.Emit(", "); + deltaOpnd->Accept(retVisitor); + (void)emitter.Emit("\n"); + /* emit stlxr. */ + (void)emitter.Emit("\t").Emit("stlxr").Emit("\t"); + tempOpnd2->Accept(visitor); + (void)emitter.Emit(", "); + tempOpnd1->Accept(retVisitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* emit cbnz. */ + (void)emitter.Emit("\t").Emit("cbnz").Emit("\t"); + tempOpnd2->Accept(visitor); + (void)emitter.Emit(", "); + labelOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t//\tend of Unsafe.getAndAddInt.\n"); +} + +/* + * intrinsic_get_set_int w0, xt, ws, x1, x2, w3, label + * add xt, x1, x2 + * label: + * ldaxr w0, [xt] + * stlxr ws, w3, [xt] + * cbnz ws, label + */ +void AArch64AsmEmitter::EmitGetAndSetInt(Emitter &emitter, const Insn &insn) const { + /* MOP_get_and_setI and MOP_get_and_setL have 7 operands */ + ASSERT(insn.GetOperandSize() > kInsnSeventhOpnd, "ensure the operands number"); + Operand *tempOpnd0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *tempOpnd1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *objOpnd = &insn.GetOperand(kInsnFourthOpnd); + Operand *offsetOpnd = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* add x1, x1, x2 */ + (void)emitter.Emit("\tadd\t"); + tempOpnd0->Accept(visitor); + (void)emitter.Emit(", "); + objOpnd->Accept(visitor); + (void)emitter.Emit(", "); + offsetOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *labelOpnd = &insn.GetOperand(kInsnSeventhOpnd); + /* label: */ + labelOpnd->Accept(visitor); + (void)emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* ldaxr w0, [xt] */ + (void)emitter.Emit("\tldaxr\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + Operand *newValueOpnd = &insn.GetOperand(kInsnSixthOpnd); + /* stlxr ws, w3, [xt] */ + (void)emitter.Emit("\tstlxr\t"); + tempOpnd1->Accept(visitor); + (void)emitter.Emit(", "); + newValueOpnd->Accept(visitor); + (void)emitter.Emit(", ["); + tempOpnd0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* cbnz w2, label */ + (void)emitter.Emit("\tcbnz\t"); + tempOpnd1->Accept(visitor); + (void)emitter.Emit(", "); + labelOpnd->Accept(visitor); + (void)emitter.Emit("\n"); +} + +/* + * intrinsic_string_indexof w0, x1, w2, x3, w4, x5, x6, x7, x8, x9, w10, + * Label.FIRST_LOOP, Label.STR2_NEXT, Label.STR1_LOOP, + * Label.STR1_NEXT, Label.LAST_WORD, Label.NOMATCH, Label.RET + * cmp w4, w2 + * b.gt .Label.NOMATCH + * sub w2, w2, w4 + * sub w4, w4, #8 + * mov w10, w2 + * uxtw x4, w4 + * uxtw x2, w2 + * add x3, x3, x4 + * add x1, x1, x2 + * neg x4, x4 + * neg x2, x2 + * ldr x5, [x3,x4] + * .Label.FIRST_LOOP: + * ldr x7, [x1,x2] + * cmp x5, x7 + * b.eq .Label.STR1_LOOP + * .Label.STR2_NEXT: + * adds x2, x2, #1 + * b.le .Label.FIRST_LOOP + * b .Label.NOMATCH + * .Label.STR1_LOOP: + * adds x8, x4, #8 + * add x9, x2, #8 + * b.ge .Label.LAST_WORD + * .Label.STR1_NEXT: + * ldr x6, [x3,x8] + * ldr x7, [x1,x9] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * adds x8, x8, #8 + * add x9, x9, #8 + * b.lt .Label.STR1_NEXT + * .Label.LAST_WORD: + * ldr x6, [x3] + * sub x9, x1, x4 + * ldr x7, [x9,x2] + * cmp x6, x7 + * b.ne .Label.STR2_NEXT + * add w0, w10, w2 + * b .Label.RET + * .Label.NOMATCH: + * mov w0, #-1 + * .Label.RET: + */ +void AArch64AsmEmitter::EmitStringIndexOf(Emitter &emitter, const Insn &insn) const { + /* MOP_string_indexof has 18 operands */ + ASSERT(insn.GetOperandSize() == 18, "ensure the operands number"); + Operand *patternLengthOpnd = &insn.GetOperand(kInsnFifthOpnd); + Operand *srcLengthOpnd = &insn.GetOperand(kInsnThirdOpnd); + const std::string patternLengthReg = + AArch64CG::intRegNames[AArch64CG::kR64List][static_cast(patternLengthOpnd)->GetRegisterNumber()]; + const std::string srcLengthReg = + AArch64CG::intRegNames[AArch64CG::kR64List][static_cast(srcLengthOpnd)->GetRegisterNumber()]; + A64OpndEmitVisitor visitor(emitter, nullptr); + /* cmp w4, w2 */ + (void)emitter.Emit("\tcmp\t"); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 16th operand of MOP_string_indexof is Label.NOMATCH */ + Operand *labelNoMatch = &insn.GetOperand(16); + /* b.gt Label.NOMATCH */ + (void)emitter.Emit("\tb.gt\t"); + labelNoMatch->Accept(visitor); + (void)emitter.Emit("\n"); + /* sub w2, w2, w4 */ + (void)emitter.Emit("\tsub\t"); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* sub w4, w4, #8 */ + (void)emitter.Emit("\tsub\t"); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit(", #8\n"); + /* the 10th operand of MOP_string_indexof is w10 */ + Operand *resultTmp = &insn.GetOperand(10); + /* mov w10, w2 */ + (void)emitter.Emit("\tmov\t"); + resultTmp->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* uxtw x4, w4 */ + (void)emitter.Emit("\tuxtw\t").Emit(patternLengthReg); + (void)emitter.Emit(", "); + patternLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* uxtw x2, w2 */ + (void)emitter.Emit("\tuxtw\t").Emit(srcLengthReg); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *patternStringBaseOpnd = &insn.GetOperand(kInsnFourthOpnd); + /* add x3, x3, x4 */ + (void)emitter.Emit("\tadd\t"); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", "); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit("\n"); + Operand *srcStringBaseOpnd = &insn.GetOperand(kInsnSecondOpnd); + /* add x1, x1, x2 */ + (void)emitter.Emit("\tadd\t"); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", "); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit("\n"); + /* neg x4, x4 */ + (void)emitter.Emit("\tneg\t").Emit(patternLengthReg); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit("\n"); + /* neg x2, x2 */ + (void)emitter.Emit("\tneg\t").Emit(srcLengthReg); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit("\n"); + Operand *first = &insn.GetOperand(kInsnSixthOpnd); + /* ldr x5, [x3,x4] */ + (void)emitter.Emit("\tldr\t"); + first->Accept(visitor); + (void)emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(",").Emit(patternLengthReg); + (void)emitter.Emit("]\n"); + /* the 11th operand of MOP_string_indexof is Label.FIRST_LOOP */ + Operand *labelFirstLoop = &insn.GetOperand(11); + /* .Label.FIRST_LOOP: */ + labelFirstLoop->Accept(visitor); + (void)emitter.Emit(":\n"); + /* the 7th operand of MOP_string_indexof is x7 */ + Operand *ch2 = &insn.GetOperand(7); + /* ldr x7, [x1,x2] */ + (void)emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + (void)emitter.Emit(", ["); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(",").Emit(srcLengthReg); + (void)emitter.Emit("]\n"); + /* cmp x5, x7 */ + (void)emitter.Emit("\tcmp\t"); + first->Accept(visitor); + (void)emitter.Emit(", "); + ch2->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 13th operand of MOP_string_indexof is Label.STR1_LOOP */ + Operand *labelStr1Loop = &insn.GetOperand(13); + /* b.eq .Label.STR1_LOOP */ + (void)emitter.Emit("\tb.eq\t"); + labelStr1Loop->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 12th operand of MOP_string_indexof is Label.STR2_NEXT */ + Operand *labelStr2Next = &insn.GetOperand(12); + /* .Label.STR2_NEXT: */ + labelStr2Next->Accept(visitor); + (void)emitter.Emit(":\n"); + /* adds x2, x2, #1 */ + (void)emitter.Emit("\tadds\t").Emit(srcLengthReg); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit(", #1\n"); + /* b.le .Label.FIRST_LOOP */ + (void)emitter.Emit("\tb.le\t"); + labelFirstLoop->Accept(visitor); + (void)emitter.Emit("\n"); + /* b .Label.NOMATCH */ + (void)emitter.Emit("\tb\t"); + labelNoMatch->Accept(visitor); + (void)emitter.Emit("\n"); + /* .Label.STR1_LOOP: */ + labelStr1Loop->Accept(visitor); + (void)emitter.Emit(":\n"); + /* the 8th operand of MOP_string_indexof is x8 */ + Operand *tmp1 = &insn.GetOperand(kInsnEighthOpnd); + /* adds x8, x4, #8 */ + (void)emitter.Emit("\tadds\t"); + tmp1->Accept(visitor); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit(", #8\n"); + /* the 9th operand of MOP_string_indexof is x9 */ + Operand *tmp2 = &insn.GetOperand(9); + /* add x9, x2, #8 */ + (void)emitter.Emit("\tadd\t"); + tmp2->Accept(visitor); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit(", #8\n"); + /* the 15th operand of MOP_string_indexof is Label.LAST_WORD */ + Operand *labelLastWord = &insn.GetOperand(15); + /* b.ge .Label.LAST_WORD */ + (void)emitter.Emit("\tb.ge\t"); + labelLastWord->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 14th operand of MOP_string_indexof is Label.STR1_NEXT */ + Operand *labelStr1Next = &insn.GetOperand(14); + /* .Label.STR1_NEXT: */ + labelStr1Next->Accept(visitor); + (void)emitter.Emit(":\n"); + /* the 6th operand of MOP_string_indexof is x6 */ + Operand *ch1 = &insn.GetOperand(6); + /* ldr x6, [x3,x8] */ + (void)emitter.Emit("\tldr\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(","); + tmp1->Accept(visitor); + (void)emitter.Emit("]\n"); + /* ldr x7, [x1,x9] */ + (void)emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + (void)emitter.Emit(", ["); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(","); + tmp2->Accept(visitor); + (void)emitter.Emit("]\n"); + /* cmp x6, x7 */ + (void)emitter.Emit("\tcmp\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", "); + ch2->Accept(visitor); + (void)emitter.Emit("\n"); + /* b.ne .Label.STR2_NEXT */ + (void)emitter.Emit("\tb.ne\t"); + labelStr2Next->Accept(visitor); + (void)emitter.Emit("\n"); + /* adds x8, x8, #8 */ + (void)emitter.Emit("\tadds\t"); + tmp1->Accept(visitor); + (void)emitter.Emit(", "); + tmp1->Accept(visitor); + (void)emitter.Emit(", #8\n"); + /* add x9, x9, #8 */ + (void)emitter.Emit("\tadd\t"); + tmp2->Accept(visitor); + (void)emitter.Emit(", "); + tmp2->Accept(visitor); + (void)emitter.Emit(", #8\n"); + /* b.lt .Label.STR1_NEXT */ + (void)emitter.Emit("\tb.lt\t"); + labelStr1Next->Accept(visitor); + (void)emitter.Emit("\n"); + /* .Label.LAST_WORD: */ + labelLastWord->Accept(visitor); + (void)emitter.Emit(":\n"); + /* ldr x6, [x3] */ + (void)emitter.Emit("\tldr\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", ["); + patternStringBaseOpnd->Accept(visitor); + (void)emitter.Emit("]\n"); + /* sub x9, x1, x4 */ + (void)emitter.Emit("\tsub\t"); + tmp2->Accept(visitor); + (void)emitter.Emit(", "); + srcStringBaseOpnd->Accept(visitor); + (void)emitter.Emit(", ").Emit(patternLengthReg); + (void)emitter.Emit("\n"); + /* ldr x7, [x9,x2] */ + (void)emitter.Emit("\tldr\t"); + ch2->Accept(visitor); + (void)emitter.Emit(", ["); + tmp2->Accept(visitor); + (void)emitter.Emit(", ").Emit(srcLengthReg); + (void)emitter.Emit("]\n"); + /* cmp x6, x7 */ + (void)emitter.Emit("\tcmp\t"); + ch1->Accept(visitor); + (void)emitter.Emit(", "); + ch2->Accept(visitor); + (void)emitter.Emit("\n"); + /* b.ne .Label.STR2_NEXT */ + (void)emitter.Emit("\tb.ne\t"); + labelStr2Next->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* add w0, w10, w2 */ + (void)emitter.Emit("\tadd\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", "); + resultTmp->Accept(visitor); + (void)emitter.Emit(", "); + srcLengthOpnd->Accept(visitor); + (void)emitter.Emit("\n"); + /* the 17th operand of MOP_string_indexof Label.ret */ + Operand *labelRet = &insn.GetOperand(17); + /* b .Label.ret */ + (void)emitter.Emit("\tb\t"); + labelRet->Accept(visitor); + (void)emitter.Emit("\n"); + /* .Label.NOMATCH: */ + labelNoMatch->Accept(visitor); + (void)emitter.Emit(":\n"); + /* mov w0, #-1 */ + (void)emitter.Emit("\tmov\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", #-1\n"); + /* .Label.ret: */ + labelRet->Accept(visitor); + (void)emitter.Emit(":\n"); +} + +/* + * intrinsic_compare_swap_int x0, xt, xs, x1, x2, w3, w4, lable1, label2 + * add xt, x1, x2 + * label1: + * ldaxr ws, [xt] + * cmp ws, w3 + * b.ne label2 + * stlxr ws, w4, [xt] + * cbnz ws, label1 + * label2: + * cset x0, eq + */ +void AArch64AsmEmitter::EmitCompareAndSwapInt(Emitter &emitter, const Insn &insn) const { + /* MOP_compare_and_swapI and MOP_compare_and_swapL have 8 operands */ + ASSERT(insn.GetOperandSize() > kInsnEighthOpnd, "ensure the operands number"); + const MOperator mOp = insn.GetMachineOpcode(); + const InsnDesc *md = &AArch64CG::kMd[mOp]; + Operand *temp0 = &insn.GetOperand(kInsnSecondOpnd); + Operand *temp1 = &insn.GetOperand(kInsnThirdOpnd); + Operand *obj = &insn.GetOperand(kInsnFourthOpnd); + Operand *offset = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor visitor(emitter, nullptr); + /* add xt, x1, x2 */ + (void)emitter.Emit("\tadd\t"); + temp0->Accept(visitor); + (void)emitter.Emit(", "); + obj->Accept(visitor); + (void)emitter.Emit(", "); + offset->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *label1 = &insn.GetOperand(kInsnEighthOpnd); + /* label1: */ + label1->Accept(visitor); + (void)emitter.Emit(":\n"); + /* ldaxr ws, [xt] */ + (void)emitter.Emit("\tldaxr\t"); + temp1->Accept(visitor); + (void)emitter.Emit(", ["); + temp0->Accept(visitor); + (void)emitter.Emit("]\n"); + Operand *expectedValue = &insn.GetOperand(kInsnSixthOpnd); + const OpndDesc *expectedValueProp = md->opndMD[kInsnSixthOpnd]; + /* cmp ws, w3 */ + (void)emitter.Emit("\tcmp\t"); + temp1->Accept(visitor); + (void)emitter.Emit(", "); + A64OpndEmitVisitor visitorExpect(emitter, expectedValueProp); + expectedValue->Accept(visitorExpect); + (void)emitter.Emit("\n"); + constexpr uint32 kInsnNinethOpnd = 8; + Operand *label2 = &insn.GetOperand(kInsnNinethOpnd); + /* b.ne label2 */ + (void)emitter.Emit("\tbne\t"); + label2->Accept(visitor); + (void)emitter.Emit("\n"); + Operand *newValue = &insn.GetOperand(kInsnSeventhOpnd); + /* stlxr ws, w4, [xt] */ + (void)emitter.Emit("\tstlxr\t"); + (void)emitter.Emit(AArch64CG::intRegNames[AArch64CG::kR32List][static_cast(temp1)->GetRegisterNumber()]); + (void)emitter.Emit(", "); + newValue->Accept(visitor); + (void)emitter.Emit(", ["); + temp0->Accept(visitor); + (void)emitter.Emit("]\n"); + /* cbnz ws, label1 */ + (void)emitter.Emit("\tcbnz\t"); + (void)emitter.Emit(AArch64CG::intRegNames[AArch64CG::kR32List][static_cast(temp1)->GetRegisterNumber()]); + (void)emitter.Emit(", "); + label1->Accept(visitor); + (void)emitter.Emit("\n"); + /* label2: */ + label2->Accept(visitor); + (void)emitter.Emit(":\n"); + Operand *retVal = &insn.GetOperand(kInsnFirstOpnd); + /* cset x0, eq */ + (void)emitter.Emit("\tcset\t"); + retVal->Accept(visitor); + (void)emitter.Emit(", EQ\n"); +} + +void AArch64AsmEmitter::EmitCTlsDescRel(Emitter &emitter, const Insn &insn) const { + const InsnDesc *md = &AArch64CG::kMd[MOP_tls_desc_rel]; + Operand *result = &insn.GetOperand(kInsnFirstOpnd); + Operand *src = &insn.GetOperand(kInsnSecondOpnd); + Operand *symbol = &insn.GetOperand(kInsnThirdOpnd); + auto stImmOpnd = static_cast(symbol); + std::string symName = stImmOpnd->GetName(); + symName += stImmOpnd->GetSymbol()->GetStorageClass() == kScPstatic ? + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx()) : ""; + A64OpndEmitVisitor resultVisitor(emitter, md->opndMD[0]); + A64OpndEmitVisitor srcVisitor(emitter, md->opndMD[1]); + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", "); + src->Accept(srcVisitor); + (void)emitter.Emit(", #:tprel_hi12:").Emit(symName).Emit(", lsl #12\n"); + (void)emitter.Emit("\t").Emit("add").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", "); + result->Accept(resultVisitor); + (void)emitter.Emit(", #:tprel_lo12_nc:").Emit(symName).Emit("\n"); +} + +void AArch64AsmEmitter::EmitCTlsDescCall(Emitter &emitter, const Insn &insn) const { + const InsnDesc *md = &AArch64CG::kMd[MOP_tls_desc_call]; + Operand *func = &insn.GetOperand(kInsnSecondOpnd); + Operand *symbol = &insn.GetOperand(kInsnThirdOpnd); + const OpndDesc *prop = md->opndMD[kInsnSecondOpnd]; + auto *stImmOpnd = static_cast(symbol); + std::string symName = stImmOpnd->GetName(); + symName += stImmOpnd->GetSymbol()->GetStorageClass() == kScPstatic ? + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx()) : ""; + A64OpndEmitVisitor funcVisitor(emitter, prop); + /* adrp x0, :tlsdesc:symbol */ + (void)emitter.Emit("\t").Emit("adrp\tx0, :tlsdesc:").Emit(symName).Emit("\n"); + /* ldr x1, [x0, #tlsdesc_lo12:symbol] */ + (void)emitter.Emit("\t").Emit("ldr").Emit("\t"); + func->Accept(funcVisitor); + (void)emitter.Emit(", [x0, #:tlsdesc_lo12:").Emit(symName).Emit("]\n"); + /* add x0 ,#tlsdesc_lo12:symbol */ + (void)emitter.Emit("\t").Emit("add\tx0, x0, :tlsdesc_lo12:").Emit(symName).Emit("\n"); + /* .tlsdesccall */ + (void)emitter.Emit("\t").Emit(".tlsdesccall").Emit("\t").Emit(symName).Emit("\n"); + /* blr xd */ + (void)emitter.Emit("\t").Emit("blr").Emit("\t"); + func->Accept(funcVisitor); + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitSyncLockTestSet(Emitter &emitter, const Insn &insn) const { + const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; + auto *result = &insn.GetOperand(kInsnFirstOpnd); + auto *temp = &insn.GetOperand(kInsnSecondOpnd); + auto *addr = &insn.GetOperand(kInsnThirdOpnd); + auto *value = &insn.GetOperand(kInsnFourthOpnd); + auto *label = &insn.GetOperand(kInsnFifthOpnd); + A64OpndEmitVisitor resultVisitor(emitter, md->opndMD[kInsnFirstOpnd]); + A64OpndEmitVisitor tempVisitor(emitter, md->opndMD[kInsnSecondOpnd]); + A64OpndEmitVisitor addrVisitor(emitter, md->opndMD[kInsnThirdOpnd]); + A64OpndEmitVisitor valueVisitor(emitter, md->opndMD[kInsnFourthOpnd]); + A64OpndEmitVisitor labelVisitor(emitter, md->opndMD[kInsnFifthOpnd]); + /* label: */ + label->Accept(labelVisitor); + (void)emitter.Emit(":\n"); + /* ldxr x0, [x2] */ + (void)emitter.Emit("\t").Emit("ldxr").Emit("\t"); + result->Accept(resultVisitor); + (void)emitter.Emit(", ["); + addr->Accept(addrVisitor); + (void)emitter.Emit("]\n"); + /* stxr w1, x3, [x2] */ + (void)emitter.Emit("\t").Emit("stxr").Emit("\t"); + temp->Accept(tempVisitor); + (void)emitter.Emit(", "); + value->Accept(valueVisitor); + (void)emitter.Emit(", ["); + addr->Accept(addrVisitor); + (void)emitter.Emit("]\n"); + /* cbnz w1, label */ + (void)emitter.Emit("\t").Emit("cbnz").Emit("\t"); + temp->Accept(tempVisitor); + (void)emitter.Emit(", "); + label->Accept(labelVisitor); + (void)emitter.Emit("\n"); + /* dmb ish */ + (void)emitter.Emit("\t").Emit("dmb").Emit("\t").Emit("ish").Emit("\n"); +} + +void AArch64AsmEmitter::EmitCheckThrowPendingException(Emitter &emitter) const { + /* + * mrs x16, TPIDR_EL0 + * ldr x16, [x16, #64] + * ldr x16, [x16, #8] + * cbz x16, .lnoexception + * bl MCC_ThrowPendingException + * .lnoexception: + */ + (void)emitter.Emit("\t").Emit("mrs").Emit("\tx16, TPIDR_EL0"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("ldr").Emit("\tx16, [x16, #64]"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("ldr").Emit("\tx16, [x16, #8]"); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("cbz").Emit("\tx16, .lnoeh.").Emit(maplebe::CG::GetCurCGFunc()->GetName()); + (void)emitter.Emit("\n"); + (void)emitter.Emit("\t").Emit("bl").Emit("\tMCC_ThrowPendingException"); + (void)emitter.Emit("\n"); + (void)emitter.Emit(".lnoeh.").Emit(maplebe::CG::GetCurCGFunc()->GetName()).Emit(":"); + (void)emitter.Emit("\n"); +} + +void AArch64AsmEmitter::EmitLazyBindingRoutine(Emitter &emitter, const Insn &insn) const { + /* ldr xzr, [xs] */ + const InsnDesc *md = &AArch64CG::kMd[MOP_adrp_ldr]; + + Operand *opnd0 = &insn.GetOperand(kInsnFirstOpnd); + const OpndDesc *prop0 = md->opndMD[0]; + A64OpndEmitVisitor visitor(emitter, prop0); + + /* emit "ldr xzr,[xs]" */ +#ifdef USE_32BIT_REF + (void)emitter.Emit("\t").Emit("ldr").Emit("\twzr, ["); +#else + (void)emitter.Emit("\t").Emit("ldr").Emit("\txzr, ["); +#endif /* USE_32BIT_REF */ + opnd0->Accept(visitor); + (void)emitter.Emit("]"); + (void)emitter.Emit("\t// Lazy binding\n"); +} + +void AArch64AsmEmitter::PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds, Insn &insn) const { + VectorRegSpec* vecSpec = static_cast(insn).GetAndRemoveRegSpecFromList(); + compositeOpnds = (vecSpec->compositeOpnds > 0) ? vecSpec->compositeOpnds : compositeOpnds; + regOpnd->SetVecLanePosition(vecSpec->vecLane); + switch (insn.GetMachineOpcode()) { + case MOP_vanduuu: + case MOP_vxoruuu: + case MOP_voruuu: + case MOP_vnotuu: + case MOP_vextuuui: { + regOpnd->SetVecLaneSize(k8ByteSize); + regOpnd->SetVecElementSize(k8BitSize); + break; + } + case MOP_vandvvv: + case MOP_vxorvvv: + case MOP_vorvvv: + case MOP_vnotvv: + case MOP_vextvvvi: { + regOpnd->SetVecLaneSize(k16ByteSize); + regOpnd->SetVecElementSize(k8BitSize); + break; + } + default: { + regOpnd->SetVecLaneSize(vecSpec->vecLaneMax); + regOpnd->SetVecElementSize(vecSpec->vecElementSize); + break; + } + } +} + +struct CfiDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store cfi instruction's operand type */ + std::array opndTypes; +}; + +static CfiDescr cfiDescrTable[cfi::kOpCfiLast + 1] = { +#define CFI_DEFINE(k, sub, n, o0, o1, o2) \ + { ".cfi_" #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) \ + { "." #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#include "cfi.def" +#undef CFI_DEFINE +#undef ARM_DIRECTIVES_DEFINE + { ".cfi_undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } +}; + +void AArch64AsmEmitter::EmitAArch64CfiInsn(Emitter &emitter, const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + CfiDescr &cfiDescr = cfiDescrTable[mOp]; + (void)emitter.Emit("\t").Emit(cfiDescr.name); + for (uint32 i = 0; i < cfiDescr.opndCount; ++i) { + (void)emitter.Emit(" "); + Operand &curOperand = insn.GetOperand(i); + cfi::CFIOpndEmitVisitor cfiOpndEmitVisitor(emitter); + curOperand.Accept(cfiOpndEmitVisitor); + if (i < (cfiDescr.opndCount - 1)) { + (void)emitter.Emit(","); + } + } + (void)emitter.Emit("\n"); +} + +struct DbgDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store dbg instruction's operand type */ + std::array opndTypes; +}; + +static DbgDescr dbgDescrTable[mpldbg::kOpDbgLast + 1] = { +#define DBG_DEFINE(k, sub, n, o0, o1, o2) \ + { #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#include "dbg.def" +#undef DBG_DEFINE + { "undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } +}; + +void AArch64AsmEmitter::EmitAArch64DbgInsn(FuncEmitInfo &funcEmitInfo, Emitter &emitter, const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + DbgDescr &dbgDescr = dbgDescrTable[mOp]; + switch (mOp) { + case mpldbg::OP_DBG_scope: { + unsigned scopeId = static_cast(static_cast(insn.GetOperand(0)).GetValue()); + (void)emitter.Emit(".LScp." + std::to_string(scopeId)); + unsigned val = static_cast(static_cast(insn.GetOperand(1)).GetValue()); + (void)emitter.Emit((val == 0) ? "B:" : "E:"); + + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + MIRFunction &mirFunc = cgFunc.GetFunction(); + EmitStatus status = (val == 0) ? kBeginEmited : kEndEmited; + cgFunc.GetCG()->GetMIRModule()->GetDbgInfo()->SetFuncScopeIdStatus(&mirFunc, scopeId, status); + break; + } + default: { + (void)emitter.Emit("\t.").Emit(dbgDescr.name); + for (uint32 i = 0; i < dbgDescr.opndCount; ++i) { + (void)emitter.Emit(" "); + Operand &curOperand = insn.GetOperand(i); + mpldbg::DBGOpndEmitVisitor dbgOpndEmitVisitor(emitter); + curOperand.Accept(dbgOpndEmitVisitor); + } + break; + } + } + (void)emitter.Emit("\n"); +} + +bool AArch64AsmEmitter::CheckInsnRefField(const Insn &insn, size_t opndIndex) const { + if (insn.IsAccessRefField() && insn.AccessMem()) { + Operand &opnd0 = insn.GetOperand(opndIndex); + if (opnd0.IsRegister()) { + static_cast(opnd0).SetRefField(true); + return true; + } + } + return false; +} + +/* new phase manager */ +bool CgEmission::PhaseRun(maplebe::CGFunc &f) { + Emitter *emitter = f.GetCG()->GetEmitter(); + CHECK_NULL_FATAL(emitter); + AsmFuncEmitInfo funcEmitInfo(f); + emitter->EmitLocalVariable(f); + static_cast(emitter)->Run(funcEmitInfo); + emitter->EmitHugeSoRoutines(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgEmission, cgemit) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5264d2801a999620ccb93e7651b306a0f5e24476 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_fixshortbranch.cpp @@ -0,0 +1,138 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_fixshortbranch.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" + +namespace maplebe { +uint32 AArch64FixShortBranch::CalculateAlignRange(const BB &bb, uint32 addr) const { + if (addr == 0) { + return addr; + } + uint32 alignPower = bb.GetAlignPower(); + /* + * The algorithm can avoid the problem that alignment causes conditional branch out of range in two stages. + * 1. asm: .mpl -> .s + * The pseudo-instruction [.p2align 5] is 12B. + * kAlignPseudoSize = 12 / 4 = 3 + * 2. link: .s -> .o + * The pseudo-instruction will be expanded to nop. + * eg. .p2align 5 + * alignPower = 5, alignValue = 2^5 = 32 + * range = (32 - ((addr - 1) * 4) % 32) / 4 - 1 + * + * =======> max[range, kAlignPseudoSize] + */ + uint32 range = ((1U << alignPower) - (((addr - 1) * kInsnSize) & ((1U << alignPower) - 1))) / kInsnSize - 1; + return range > kAlignPseudoSize ? range : kAlignPseudoSize; +} + +void AArch64FixShortBranch::SetInsnId() const { + uint32 i = 0; + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + FOR_ALL_BB(bb, aarch64CGFunc) { + if (aarch64CGFunc->GetMirModule().IsCModule() && bb->IsBBNeedAlign() && bb->GetAlignNopNum() != 0) { + i = i + CalculateAlignRange(*bb, i); + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + i += insn->GetAtomicNum(); + insn->SetId(i); + if (insn->GetMachineOpcode() == MOP_adrp_ldr && CGOptions::IsLazyBinding() && !cgFunc->GetCG()->IsLibcore()) { + /* For 1 additional EmitLazyBindingRoutine in lazybinding + * see function AArch64Insn::Emit in file aarch64_insn.cpp + */ + ++i; + } + } + } +} + +/* + * TBZ/TBNZ instruction is generated under -O2, these branch instructions only have a range of +/-32KB. + * If the branch target is not reachable, we split tbz/tbnz into combination of ubfx and cbz/cbnz, which + * will clobber one extra register. With LSRA under -O2, we can use one of the reserved registers R16 for + * that purpose. To save compile time, we do this change when there are more than 32KB / 4 instructions + * in the function. + */ +void AArch64FixShortBranch::FixShortBranches() const { + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + bool change = false; + do { + change = false; + SetInsnId(); + for (auto *bb = aarch64CGFunc->GetFirstBB(); bb != nullptr && !change; bb = bb->GetNext()) { + /* Do a backward scan searching for short branches */ + for (auto *insn = bb->GetLastInsn(); insn != nullptr && !change; insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + MOperator thisMop = insn->GetMachineOpcode(); + if (thisMop != MOP_wtbz && thisMop != MOP_wtbnz && thisMop != MOP_xtbz && thisMop != MOP_xtbnz) { + continue; + } + LabelOperand &label = static_cast(insn->GetOperand(kInsnThirdOpnd)); + /* should not be commented out after bug fix */ + if (aarch64CGFunc->DistanceCheck(*bb, label.GetLabelIndex(), insn->GetId())) { + continue; + } + auto ® = static_cast(insn->GetOperand(kInsnFirstOpnd)); + ImmOperand &bitSize = aarch64CGFunc->CreateImmOperand(1, k8BitSize, false); + auto &bitPos = static_cast(insn->GetOperand(kInsnSecondOpnd)); + MOperator ubfxOp = MOP_undef; + MOperator cbOp = MOP_undef; + switch (thisMop) { + case MOP_wtbz: + ubfxOp = MOP_wubfxrri5i5; + cbOp = MOP_wcbz; + break; + case MOP_wtbnz: + ubfxOp = MOP_wubfxrri5i5; + cbOp = MOP_wcbnz; + break; + case MOP_xtbz: + ubfxOp = MOP_xubfxrri6i6; + cbOp = MOP_xcbz; + break; + case MOP_xtbnz: + ubfxOp = MOP_xubfxrri6i6; + cbOp = MOP_xcbnz; + break; + default: + break; + } + RegOperand &tmp = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( + R16, (ubfxOp == MOP_wubfxrri5i5) ? k32BitSize : k64BitSize, kRegTyInt); + (void)bb->InsertInsnAfter(*insn, cgFunc->GetInsnBuilder()->BuildInsn(cbOp, tmp, label)); + (void)bb->InsertInsnAfter(*insn, cgFunc->GetInsnBuilder()->BuildInsn(ubfxOp, tmp, reg, bitPos, bitSize)); + bb->RemoveInsn(*insn); + change = true; + } + } + } while (change); +} + +bool CgFixShortBranch::PhaseRun(maplebe::CGFunc &func) { + auto *fixShortBranch = GetPhaseAllocator()->New(&func); + CHECK_FATAL(fixShortBranch != nullptr, "AArch64FixShortBranch instance create failure"); + fixShortBranch->FixShortBranches(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgFixShortBranch, fixshortbranch) +} /* namespace maplebe */ + diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d4d2cd5bb5a474f9a47e362878b7071885ee5d3d --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_global.cpp @@ -0,0 +1,2434 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_global.h" +#include "aarch64_reaching.h" +#include "aarch64_cg.h" +#include "aarch64_live.h" + +namespace maplebe { +using namespace maple; +#define GLOBAL_DUMP CG_DEBUG_FUNC(cgFunc) + +constexpr uint32 kExMOpTypeSize = 9; +constexpr uint32 kLsMOpTypeSize = 15; + +MOperator exMOpTable[kExMOpTypeSize] = { + MOP_undef, MOP_xxwaddrrre, MOP_wwwaddrrre, MOP_xxwsubrrre, MOP_wwwsubrrre, + MOP_xwcmnrre, MOP_wwcmnrre, MOP_xwcmprre, MOP_wwcmprre +}; +MOperator lsMOpTable[kLsMOpTypeSize] = { + MOP_undef, MOP_xaddrrrs, MOP_waddrrrs, MOP_xsubrrrs, MOP_wsubrrrs, + MOP_xcmnrrs, MOP_wcmnrrs, MOP_xcmprrs, MOP_wcmprrs, MOP_xeorrrrs, + MOP_weorrrrs, MOP_xinegrrs, MOP_winegrrs, MOP_xiorrrrs, MOP_wiorrrrs +}; + +/* Optimize ExtendShiftOptPattern: + * ========================================================== + * nosuffix LSL LSR ASR extrn (def) + * nosuffix | F | LSL | LSR | ASR | extrn | + * LSL | F | LSL | F | F | extrn | + * LSR | F | F | LSR | F | F | + * ASR | F | F | F | ASR | F | + * exten | F | F | F | F |exten(self)| + * (use) + * =========================================================== + */ + +static bool IsZeroRegister(const Operand &opnd) { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +void AArch64GlobalOpt::Run() { + OptimizeManager optManager(cgFunc); + bool hasSpillBarrier = (cgFunc.NumBBs() > kMaxBBNum) || (cgFunc.GetRD()->GetMaxInsnNO() > kMaxInsnNum); + if (cgFunc.IsAfterRegAlloc()) { + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + return; + } + if (!hasSpillBarrier) { + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); + } + optManager.Optimize(); + optManager.Optimize(); + optManager.Optimize(); +} + +/* if used Operand in insn is defined by zero in all define insn, return true */ +bool OptimizePattern::OpndDefByZero(Insn &insn, int32 useIdx) const { + ASSERT(insn.GetOperand(useIdx).IsRegister(), "the used Operand must be Register"); + /* Zero Register don't need be defined */ + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { + return true; + } + + InsnSet defInsns = cgFunc.GetRD()->FindDefForRegOpnd(insn, useIdx); + if (defInsns.empty()) { + return false; + } + for (auto &defInsn : defInsns) { + if (!InsnDefZero(*defInsn)) { + return false; + } + } + return true; +} + +/* if used Operand in insn is defined by one in all define insn, return true */ +bool OptimizePattern::OpndDefByOne(Insn &insn, int32 useIdx) const { + ASSERT(insn.GetOperand(useIdx).IsRegister(), "the used Operand must be Register"); + /* Zero Register don't need be defined */ + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { + return false; + } + InsnSet defInsns = cgFunc.GetRD()->FindDefForRegOpnd(insn, useIdx); + if (defInsns.empty()) { + return false; + } + for (auto &defInsn : defInsns) { + if (!InsnDefOne(*defInsn)) { + return false; + } + } + return true; +} + + /* if used Operand in insn is defined by one valid bit in all define insn, return true */ +bool OptimizePattern::OpndDefByOneOrZero(Insn &insn, int32 useIdx) const { + if (IsZeroRegister(insn.GetOperand(static_cast(useIdx)))) { + return true; + } + + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, useIdx); + if (defInsnSet.empty()) { + return false; + } + + for (auto &defInsn : defInsnSet) { + if (!InsnDefOneOrZero(*defInsn)) { + return false; + } + } + return true; +} + +/* if defined operand(must be first insn currently) in insn is const one, return true */ +bool OptimizePattern::InsnDefOne(const Insn &insn) { + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &srcOpnd = insn.GetOperand(1); + ASSERT(srcOpnd.IsIntImmediate(), "expects ImmOperand"); + ImmOperand &srcConst = static_cast(srcOpnd); + int64 srcConstValue = srcConst.GetValue(); + if (srcConstValue == 1) { + return true; + } + return false; + } + default: + return false; + } +} + +/* if defined operand(must be first insn currently) in insn is const zero, return true */ +bool OptimizePattern::InsnDefZero(const Insn &insn) { + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + ASSERT(srcOpnd.IsIntImmediate(), "expects ImmOperand"); + ImmOperand &srcConst = static_cast(srcOpnd); + int64 srcConstValue = srcConst.GetValue(); + if (srcConstValue == 0) { + return true; + } + return false; + } + case MOP_xmovrr: + case MOP_wmovrr: + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); + default: + return false; + } +} + +/* if defined operand(must be first insn currently) in insn has only one valid bit, return true */ +bool OptimizePattern::InsnDefOneOrZero(const Insn &insn) { + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wcsetrc: + case MOP_xcsetrc: + return true; + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = insn.GetOperand(kInsnSecondOpnd); + ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + if (defConstValue != 0 && defConstValue != 1) { + return false; + } else { + return true; + } + } + case MOP_xmovrr: + case MOP_wmovrr: { + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); + } + case MOP_wlsrrri5: + case MOP_xlsrrri6: { + Operand &opnd2 = insn.GetOperand(kInsnThirdOpnd); + ASSERT(opnd2.IsIntImmediate(), "expects ImmOperand"); + ImmOperand &opndImm = static_cast(opnd2); + int64 shiftBits = opndImm.GetValue(); + if (((defMop == MOP_wlsrrri5) && (shiftBits == k32BitSize - 1)) || + ((defMop == MOP_xlsrrri6) && (shiftBits == k64BitSize - 1))) { + return true; + } else { + return false; + } + } + default: + return false; + } +} + +void ReplaceAsmListReg(const Insn *insn, uint32 index, uint32 regNO, Operand *newOpnd) { + MapleList *list = &static_cast(insn->GetOperand(index)).GetOperands(); + int32 size = static_cast(list->size()); + for (int i = 0; i < size; ++i) { + RegOperand *opnd = static_cast(*(list->begin())); + list->pop_front(); + if (opnd->GetRegisterNumber() == regNO) { + list->push_back(static_cast(newOpnd)); + } else { + list->push_back(opnd); + } + } +} + +void OptimizePattern::ReplaceAllUsedOpndWithNewOpnd(const InsnSet &useInsnSet, uint32 regNO, + Operand &newOpnd, bool updateInfo) const { + for (auto useInsn : useInsnSet) { + if (useInsn->GetMachineOpcode() == MOP_asm) { + ReplaceAsmListReg(useInsn, kAsmInputListOpnd, regNO, &newOpnd); + } + const InsnDesc *md = useInsn->GetDesc(); + uint32 opndNum = useInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = useInsn->GetOperand(i); + auto *regProp = md->opndMD[i]; + if (!regProp->IsRegUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + useInsn->SetOperand(i, newOpnd); + if (updateInfo) { + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + MemOperand *newMem = nullptr; + if (base != nullptr && (base->GetRegisterNumber() == regNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetBaseRegister(*static_cast(&newOpnd)); + useInsn->SetOperand(i, *newMem); + if (updateInfo) { + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + if (index != nullptr && (index->GetRegisterNumber() == regNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetIndexRegister(*static_cast(&newOpnd)); + if (static_cast(newOpnd).GetValidBitsNum() != index->GetValidBitsNum()) { + newMem->UpdateExtend(MemOperand::kSignExtend); + } + useInsn->SetOperand(i, *newMem); + if (updateInfo) { + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + } + } + } +} + +bool ForwardPropPattern::IsUseInsnSetValid(Insn &insn, regno_t firstRegNO, regno_t secondRegNO) { + bool toDoOpt = true; + for (auto useInsn : firstRegUseInsnSet) { + if (!cgFunc.GetRD()->RegIsLiveBetweenInsn(secondRegNO, insn, *useInsn)) { + toDoOpt = false; + break; + } + /* part defined */ + if ((useInsn->GetMachineOpcode() == MOP_xmovkri16) || (useInsn->GetMachineOpcode() == MOP_wmovkri16) || + (useInsn->GetMachineOpcode() == MOP_wbfirri5i5) || (useInsn->GetMachineOpcode() == MOP_xbfirri6i6)) { + toDoOpt = false; + break; + } + if (useInsn->GetMachineOpcode() == MOP_asm) { + toDoOpt = false; + break; + } + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstRegNO, true); + if (defInsnSet.size() > 1) { + toDoOpt = false; + break; + } else if (defInsnSet.size() == 1 && *defInsnSet.begin() != &insn) { + toDoOpt = false; + break; + } + } + return toDoOpt; +} + +bool ForwardPropPattern::CheckCondition(Insn &insn) { + if (!insn.IsMachineInstruction()) { + return false; + } + if ((insn.GetMachineOpcode() != MOP_xmovrr) && (insn.GetMachineOpcode() != MOP_wmovrr) && + (insn.GetMachineOpcode() != MOP_xmovrr_uxtw)) { + return false; + } + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + if (firstOpnd.GetSize() != secondOpnd.GetSize() && insn.GetMachineOpcode() != MOP_xmovrr_uxtw) { + return false; + } + auto &firstRegOpnd = static_cast(firstOpnd); + auto &secondRegOpnd = static_cast(secondOpnd); + uint32 firstRegNO = firstRegOpnd.GetRegisterNumber(); + uint32 secondRegNO = secondRegOpnd.GetRegisterNumber(); + if (IsZeroRegister(firstRegOpnd) || !firstRegOpnd.IsVirtualRegister() || !secondRegOpnd.IsVirtualRegister()) { + return false; + } + firstRegUseInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, firstRegNO, true); + if (firstRegUseInsnSet.empty()) { + return false; + } + InsnSet secondRegDefInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, secondRegNO, true); + if (secondRegDefInsnSet.size() != 1 || RegOperand::IsSameReg(firstOpnd, secondOpnd)) { + return false; + } + return IsUseInsnSetValid(insn, firstRegNO, secondRegNO); +} + +void ForwardPropPattern::Optimize(Insn &insn) { + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + RegOperand &firstRegOpnd = static_cast(firstOpnd); + uint32 firstRegNO = firstRegOpnd.GetRegisterNumber(); + for (auto *useInsn : firstRegUseInsnSet) { + if (useInsn->GetMachineOpcode() == MOP_asm) { + ReplaceAsmListReg(useInsn, kAsmInputListOpnd, firstRegNO, &secondOpnd); + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + continue; + } + const InsnDesc *md = useInsn->GetDesc(); + uint32 opndNum = useInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = useInsn->GetOperand(i); + const OpndDesc *regProp = md->GetOpndDes(i); + if (!regProp->IsRegUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == firstRegNO)) { + useInsn->SetOperand(i, secondOpnd); + if (((useInsn->GetMachineOpcode() == MOP_xmovrr) || (useInsn->GetMachineOpcode() == MOP_wmovrr)) && + (static_cast(useInsn->GetOperand(kInsnSecondOpnd)).IsVirtualRegister()) && + (static_cast(useInsn->GetOperand(kInsnFirstOpnd)).IsVirtualRegister())) { + (void)modifiedBB.insert(useInsn->GetBB()); + } + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + MemOperand *newMem = nullptr; + if (base != nullptr && (base->GetRegisterNumber() == firstRegNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetBaseRegister(static_cast(secondOpnd)); + useInsn->SetOperand(i, *newMem); + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + if ((index != nullptr) && (index->GetRegisterNumber() == firstRegNO)) { + newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetIndexRegister(static_cast(secondOpnd)); + if (static_cast(secondOpnd).GetValidBitsNum() != index->GetValidBitsNum()) { + newMem->UpdateExtend(MemOperand::kSignExtend); + } + useInsn->SetOperand(i, *newMem); + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + } + } + insn.SetOperand(0, secondOpnd); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); +} + +void ForwardPropPattern::RemoveMopUxtwToMov(Insn &insn) { + if (CGOptions::DoCGSSA()) { + CHECK_FATAL(false, "check case in ssa"); + } + auto &secondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 destRegNo = destOpnd.GetRegisterNumber(); + destOpnd.SetRegisterNumber(secondOpnd.GetRegisterNumber()); + auto *newOpnd = static_cast(destOpnd.Clone(*cgFunc.GetMemoryPool())); + cgFunc.InsertExtendSet(secondOpnd.GetRegisterNumber()); + InsnSet regUseInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, destRegNo, true); + if (regUseInsnSet.size() >= 1) { + for (auto useInsn : regUseInsnSet) { + uint32 optSize = useInsn->GetOperandSize(); + for (uint32 i = 0; i < optSize; i++) { + ASSERT(useInsn->GetOperand(i).IsRegister(), "only design for register"); + if (destRegNo == static_cast(useInsn->GetOperand(i)).GetRegisterNumber()) { + useInsn->SetOperand(i, *newOpnd); + } + } + cgFunc.GetRD()->InitGenUse(*useInsn->GetBB(), false); + } + } + insn.GetBB()->RemoveInsn(insn); +} + +void ForwardPropPattern::Init() { + firstRegUseInsnSet.clear(); +} + +void ForwardPropPattern::Run() { + bool secondTime = false; + do { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable() || (secondTime && modifiedBB.find(bb) == modifiedBB.end())) { + continue; + } + + if (secondTime) { + modifiedBB.erase(bb); + } + + FOR_BB_INSNS(insn, bb) { + Init(); + if (!CheckCondition(*insn)) { + if (insn->GetMachineOpcode() == MOP_xmovrr_uxtw) { + insn->SetMOP(AArch64CG::kMd[MOP_xuxtw64]); + } + continue; + } + if (insn->GetMachineOpcode() == MOP_xmovrr_uxtw) { + RemoveMopUxtwToMov(*insn); + continue; + } + Optimize(*insn); + } + } + secondTime = true; + } while (!modifiedBB.empty()); +} + +bool BackPropPattern::CheckAndGetOpnd(const Insn &insn) { + if (!insn.IsMachineInstruction()) { + return false; + } + if (!cgFunc.IsAfterRegAlloc() && (insn.GetMachineOpcode() != MOP_xmovrr) && (insn.GetMachineOpcode() != MOP_wmovrr)) { + return false; + } + if (cgFunc.IsAfterRegAlloc() && + (insn.GetMachineOpcode() != MOP_xmovrr) && (insn.GetMachineOpcode() != MOP_wmovrr) && + (insn.GetMachineOpcode() != MOP_xvmovs) && (insn.GetMachineOpcode() != MOP_xvmovd)) { + return false; + } + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + if (RegOperand::IsSameReg(firstOpnd, secondOpnd)) { + return false; + } + if (firstOpnd.GetSize() != secondOpnd.GetSize()) { + return false; + } + firstRegOpnd = &static_cast(firstOpnd); + secondRegOpnd = &static_cast(secondOpnd); + if (IsZeroRegister(*firstRegOpnd)) { + return false; + } + if (!cgFunc.IsAfterRegAlloc() && (!secondRegOpnd->IsVirtualRegister() || !firstRegOpnd->IsVirtualRegister())) { + return false; + } + firstRegNO = firstRegOpnd->GetRegisterNumber(); + secondRegNO = secondRegOpnd->GetRegisterNumber(); + return true; +} + +bool BackPropPattern::DestOpndHasUseInsns(Insn &insn) { + BB &bb = *insn.GetBB(); + InsnSet useInsnSetOfFirstOpnd; + bool findRes = cgFunc.GetRD()->FindRegUseBetweenInsn(firstRegNO, insn.GetNext(), + bb.GetLastInsn(), useInsnSetOfFirstOpnd); + if ((findRes && useInsnSetOfFirstOpnd.empty()) || + (!findRes && useInsnSetOfFirstOpnd.empty() && !bb.GetLiveOut()->TestBit(firstRegNO))) { + return false; + } + return true; +} + +bool BackPropPattern::DestOpndLiveOutToEHSuccs(Insn &insn) const { + BB &bb = *insn.GetBB(); + for (auto ehSucc : bb.GetEhSuccs()) { + if (ehSucc->GetLiveIn()->TestBit(firstRegNO)) { + return true; + } + } + return false; +} + +bool BackPropPattern::CheckSrcOpndDefAndUseInsns(Insn &insn) { + BB &bb = *insn.GetBB(); + /* secondOpnd is defined in other BB */ + std::vector defInsnVec = cgFunc.GetRD()->FindRegDefBetweenInsn(secondRegNO, bb.GetFirstInsn(), insn.GetPrev()); + if (defInsnVec.size() != 1) { + return false; + } + defInsnForSecondOpnd = defInsnVec.back(); + /* don't prop sp to load/store */ + if ((defInsnForSecondOpnd->IsLoad() || defInsnForSecondOpnd->IsStore() || defInsnForSecondOpnd->IsLoadStorePair()) && + firstRegNO == RSP && cgFunc.IsAfterRegAlloc()) { + return false; + } + /* part defined */ + if ((defInsnForSecondOpnd->GetMachineOpcode() == MOP_xmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_wmovkri16) || + (defInsnForSecondOpnd->GetBothDefUseOpnd() != kInsnMaxOpnd) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_asm)) { + return false; + } + if (AArch64isa::IsPseudoInstruction(defInsnForSecondOpnd->GetMachineOpcode()) || defInsnForSecondOpnd->IsCall() || + defInsnForSecondOpnd->IsTailCall()) { + return false; + } + /* unconcerned regs. */ + if ((secondRegNO >= RLR && secondRegNO <= RZR) || secondRegNO == RFP) { + return false; + } + if (defInsnForSecondOpnd->IsStore() || defInsnForSecondOpnd->IsLoad()) { + auto *memOpnd = static_cast(defInsnForSecondOpnd->GetMemOpnd()); + if (memOpnd != nullptr && !memOpnd->IsIntactIndexed()) { + return false; + } + } + + bool findFinish = cgFunc.GetRD()->FindRegUseBetweenInsn(secondRegNO, defInsnForSecondOpnd->GetNext(), + bb.GetLastInsn(), srcOpndUseInsnSet); + if (!findFinish && bb.GetLiveOut()->TestBit(secondRegNO)) { + return false; + } + if (cgFunc.IsAfterRegAlloc() && findFinish && srcOpndUseInsnSet.size() > 1) { + /* use later before killed. */ + return false; + } + if (cgFunc.IsAfterRegAlloc()) { + for (auto *usePoint : srcOpndUseInsnSet) { + if (usePoint->IsCall() || usePoint->IsTailCall()) { + return false; + } + } + } + return true; +} + +bool BackPropPattern::CheckSrcOpndDefAndUseInsnsGlobal(Insn &insn) { + /* secondOpnd is defined in other BB */ + InsnSet defInsnVec = cgFunc.GetRD()->FindDefForRegOpnd(insn, secondRegNO, true); + if (defInsnVec.size() != 1) { + return false; + } + defInsnForSecondOpnd = *(defInsnVec.begin()); + /* don't prop sp to load/store */ + if ((defInsnForSecondOpnd->IsLoad() || defInsnForSecondOpnd->IsStore() || defInsnForSecondOpnd->IsLoadStorePair()) && + firstRegNO == RSP && cgFunc.IsAfterRegAlloc()) { + return false; + } + + /* ensure that there is no fisrt RegNO def/use between insn and defInsnForSecondOpnd */ + std::vector defInsnVecFirst; + + if (insn.GetBB() != defInsnForSecondOpnd->GetBB()) { + defInsnVecFirst = cgFunc.GetRD()->FindRegDefBetweenInsnGlobal(firstRegNO, defInsnForSecondOpnd, &insn); + } else { + defInsnVecFirst = cgFunc.GetRD()->FindRegDefBetweenInsn(firstRegNO, defInsnForSecondOpnd, insn.GetPrev()); + } + if (!defInsnVecFirst.empty()) { + return false; + } + /* part defined */ + if ((defInsnForSecondOpnd->GetMachineOpcode() == MOP_xmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_wmovkri16) || + (defInsnForSecondOpnd->GetMachineOpcode() == MOP_asm)) { + return false; + } + + if (defInsnForSecondOpnd->IsStore() || defInsnForSecondOpnd->IsLoad()) { + auto *memOpnd = static_cast(defInsnForSecondOpnd->GetMemOpnd()); + if (memOpnd != nullptr && !memOpnd->IsIntactIndexed()) { + return false; + } + } + + srcOpndUseInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(*defInsnForSecondOpnd, secondRegNO, true); + /* + * useInsn is not expected to have multiple definition + * replaced opnd is not expected to have definition already + */ + return CheckReplacedUseInsn(insn); +} + +bool BackPropPattern::CheckPredefineInsn(Insn &insn) { + if (insn.GetPrev() == defInsnForSecondOpnd) { + return true; + } + std::vector preDefInsnForFirstOpndVec; + /* there is no predefine insn in current bb */ + if (!cgFunc.GetRD()->RegIsUsedOrDefBetweenInsn(firstRegNO, *defInsnForSecondOpnd, insn)) { + return false; + } + return true; +} + +bool BackPropPattern::CheckReplacedUseInsn(Insn &insn) { + for (auto *useInsn : srcOpndUseInsnSet) { + if (useInsn->GetMemOpnd() != nullptr) { + auto *a64MemOpnd = static_cast(useInsn->GetMemOpnd()); + if (!a64MemOpnd->IsIntactIndexed()) { + if (a64MemOpnd->GetBaseRegister() != nullptr && + a64MemOpnd->GetBaseRegister()->GetRegisterNumber() == secondRegNO) { + return false; + } + } + } + /* insn has been checked def */ + if (useInsn == &insn) { + if (defInsnForSecondOpnd != useInsn->GetPrev() && + cgFunc.GetRD()->FindRegUseBetweenInsnGlobal(firstRegNO, defInsnForSecondOpnd, useInsn, insn.GetBB())) { + return false; + } + continue; + } + auto checkOneDefOnly = [](const InsnSet &defSet, const Insn &oneDef, bool checkHasDef = false)->bool { + if (defSet.size() > 1) { + return false; + } else if (defSet.size() == 1) { + if (&oneDef != *(defSet.begin())) { + return false; + } + } else { + if (checkHasDef) { + CHECK_FATAL(false, "find def insn failed"); + } + } + return true; + }; + /* ensure that the use insns to be replaced is defined by defInsnForSecondOpnd only */ + if (useInsn->IsMemAccess() && static_cast( + useInsn->GetMemOpnd())->GetIndexOpt() != MemOperand::kIntact) { + return false; + } + InsnSet defInsnVecOfSrcOpnd = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, secondRegNO, true); + if (!checkOneDefOnly(defInsnVecOfSrcOpnd, *defInsnForSecondOpnd, true)) { + return false; + } + + InsnSet defInsnVecOfFirstReg = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstRegNO, true); + if (!checkOneDefOnly(defInsnVecOfFirstReg, insn)) { + return false; + } + + if (defInsnForSecondOpnd != useInsn->GetPrev() && + cgFunc.GetRD()->FindRegUseBetweenInsnGlobal(firstRegNO, defInsnForSecondOpnd, useInsn, insn.GetBB())) { + return false; + } + } + return true; +} + +bool BackPropPattern::CheckRedefineInsn(Insn &insn) { + for (auto useInsn : srcOpndUseInsnSet) { + Insn *startInsn = &insn; + Insn *endInsn = useInsn; + if (endInsn == startInsn) { + if (cgFunc.GetRD()->RegIsUsedIncaller(firstRegNO, insn, *useInsn)) { + return false; + } else { + continue; + } + } + + if (useInsn->GetBB() == insn.GetBB()) { + if (useInsn->GetId() < insn.GetId()) { + startInsn = useInsn; + endInsn = &insn; + } + } + if (!cgFunc.GetRD()->RegIsLiveBetweenInsn(firstRegNO, *startInsn, *endInsn, true, true)) { + return false; + } + if (!cgFunc.GetRD()->RegIsLiveBetweenInsn(secondRegNO, *startInsn, *endInsn, true)) { + return false; + } + } + return true; +} + +bool BackPropPattern::CheckCondition(Insn &insn) { + if (!CheckAndGetOpnd(insn)) { + return false; + } + /* Unless there is a reason that dest can not live out the current BB */ + if (cgFunc.HasAsm() && !DestOpndHasUseInsns(insn)) { + return false; + } + /* first register must not be live out to eh_succs */ + if (DestOpndLiveOutToEHSuccs(insn)) { + return false; + } + if (globalProp) { + if (!CheckSrcOpndDefAndUseInsnsGlobal(insn)) { + return false; + } + } else { + if (!CheckSrcOpndDefAndUseInsns(insn)) { + return false; + } + if (!CheckPredefineInsn(insn)) { + return false; + } + if (!CheckRedefineInsn(insn)) { + return false; + } + } + return true; +} + +void BackPropPattern::Optimize(Insn &insn) { + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + ReplaceAllUsedOpndWithNewOpnd(srcOpndUseInsnSet, secondRegNO, firstOpnd, true); + /* replace define insn */ + const InsnDesc *md = defInsnForSecondOpnd->GetDesc(); + uint32 opndNum = defInsnForSecondOpnd->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = defInsnForSecondOpnd->GetOperand(i); + if (!md->opndMD[i]->IsRegDef() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == secondRegNO)) { + /* remove remat info */ + Operand &defOp = defInsnForSecondOpnd->GetOperand(i); + CHECK_FATAL(defOp.IsRegister(), "unexpect def opnd type"); + auto &defRegOp = static_cast(defOp); + MIRPreg *preg = static_cast(cgFunc).GetPseudoRegFromVirtualRegNO( + defRegOp.GetRegisterNumber(), CGOptions::DoCGSSA()); + if (preg != nullptr) { + preg->SetOp(OP_undef); + } + defInsnForSecondOpnd->SetOperand(i, firstOpnd); + cgFunc.GetRD()->UpdateInOut(*defInsnForSecondOpnd->GetBB()); + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (base != nullptr && memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed()) && base->GetRegisterNumber() == secondRegNO) { + MemOperand *newMem = static_cast(opnd.Clone(*cgFunc.GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "null ptr check"); + newMem->SetBaseRegister(static_cast(firstOpnd)); + defInsnForSecondOpnd->SetOperand(i, *newMem); + cgFunc.GetRD()->UpdateInOut(*defInsnForSecondOpnd->GetBB()); + } + } + } + /* There is special implication when backward propagation is allowed for physical register R0. + * This is a case that the calling func foo directly returns the result from the callee bar as follows: + * + * foo: + * bl bl // bar() + * mov vreg, X0 //res = bar() naive bkprop + * .... //X0 is not redefined ====> .... //X0 may be reused as RA sees "X0 has not been used" after bl + * mov X0, vreg //In fact, X0 is implicitly used by foo. We need to tell RA that X0 is live + * ret ret + * + * To make RA simple, we tell RA to not use X0 by keeping "mov X0, X0". That is + * foo: + * bl //bar() + * .... // Perform backward prop X0 and force X0 cant be reused + * mov X0, X0 // This can be easily remved later in peephole phase + * ret + */ + if (cgFunc.HasCall() && + !(cgFunc.GetFunction().IsReturnVoid()) && + (firstRegNO == R0) && + (static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == R0)) { + /* Keep this instruction: mov R0, R0 */ + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); + return; + } else { + insn.GetBB()->RemoveInsn(insn); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); + } +} + +void BackPropPattern::Init() { + firstRegOpnd = nullptr; + secondRegOpnd = nullptr; + firstRegNO = 0; + secondRegNO = 0; + srcOpndUseInsnSet.clear(); + defInsnForSecondOpnd = nullptr; +} + +void BackPropPattern::Run() { + bool secondTime = false; + std::set modifiedBB; + do { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable() || (secondTime && modifiedBB.find(bb) == modifiedBB.end())) { + continue; + } + + if (secondTime) { + modifiedBB.erase(bb); + } + + FOR_BB_INSNS_REV(insn, bb) { + Init(); + if (!CheckCondition(*insn)) { + continue; + } + (void)modifiedBB.insert(bb); + Optimize(*insn); + } + } + secondTime = true; + } while (!modifiedBB.empty()); +} + +bool CmpCsetPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr || !insn.IsMachineInstruction()) { + return false; + } + + MOperator firstMop = insn.GetMachineOpcode(); + MOperator secondMop = nextInsn->GetMachineOpcode(); + if (!(((firstMop == MOP_wcmpri) || (firstMop == MOP_xcmpri)) && + ((secondMop == MOP_wcsetrc) || (secondMop == MOP_xcsetrc)))) { + return false; + } + + /* get cmp_first operand */ + cmpFirstOpnd = &(insn.GetOperand(kInsnSecondOpnd)); + /* get cmp second Operand, ImmOperand must be 0 or 1 */ + cmpSecondOpnd = &(insn.GetOperand(kInsnThirdOpnd)); + ASSERT(cmpSecondOpnd->IsIntImmediate(), "expects ImmOperand"); + ImmOperand *cmpConstOpnd = static_cast(cmpSecondOpnd); + cmpConstVal = cmpConstOpnd->GetValue(); + /* get cset first Operand */ + csetFirstOpnd = &(nextInsn->GetOperand(kInsnFirstOpnd)); + if (((cmpConstVal != 0) && (cmpConstVal != 1)) || (cmpFirstOpnd->GetSize() != csetFirstOpnd->GetSize()) || + !OpndDefByOneOrZero(insn, 1)) { + return false; + } + + InsnSet useInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, 0, false); + if (useInsnSet.size() > 1) { + return false; + } + return true; +} + +void CmpCsetPattern::Optimize(Insn &insn) { + Insn *csetInsn = nextInsn; + BB &bb = *insn.GetBB(); + nextInsn = nextInsn->GetNextMachineInsn(); + /* get condition Operand */ + CondOperand &cond = static_cast(csetInsn->GetOperand(kInsnSecondOpnd)); + if (((cmpConstVal == 0) && (cond.GetCode() == CC_NE)) || ((cmpConstVal == 1) && (cond.GetCode() == CC_EQ))) { + if (RegOperand::IsSameReg(*cmpFirstOpnd, *csetFirstOpnd)) { + bb.RemoveInsn(insn); + bb.RemoveInsn(*csetInsn); + } else { + MOperator mopCode = (cmpFirstOpnd->GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopCode, *csetFirstOpnd, *cmpFirstOpnd); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + bb.RemoveInsn(*csetInsn); + } + } else if (((cmpConstVal == 1) && (cond.GetCode() == CC_NE)) || + ((cmpConstVal == 0) && (cond.GetCode() == CC_EQ))) { + MOperator mopCode = (cmpFirstOpnd->GetSize() == k64BitSize) ? MOP_xeorrri13 : MOP_weorrri12; + constexpr int64 eorImm = 1; + auto &aarch64CGFunc = static_cast(cgFunc); + ImmOperand &one = aarch64CGFunc.CreateImmOperand(eorImm, k8BitSize, false); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mopCode, *csetFirstOpnd, *cmpFirstOpnd, one); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + bb.RemoveInsn(*csetInsn); + } + cgFunc.GetRD()->UpdateInOut(bb, true); +} + +void CmpCsetPattern::Init() { + cmpConstVal = 0; + cmpFirstOpnd = nullptr; + cmpSecondOpnd = nullptr; + csetFirstOpnd = nullptr; +} + +void CmpCsetPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool CselPattern::CheckCondition(Insn &insn) { + MOperator mopCode = insn.GetMachineOpcode(); + if ((mopCode != MOP_xcselrrrc) && (mopCode != MOP_wcselrrrc)) { + return false; + } + return true; +} + +void CselPattern::Optimize(Insn &insn) { + BB &bb = *insn.GetBB(); + Operand &opnd0 = insn.GetOperand(kInsnFirstOpnd); + Operand &cond = insn.GetOperand(kInsnFourthOpnd); + MOperator newMop = ((opnd0.GetSize()) == k64BitSize ? MOP_xcsetrc : MOP_wcsetrc); + Operand &rflag = cgFunc.GetOrCreateRflag(); + if (OpndDefByOne(insn, kInsnSecondOpnd) && OpndDefByZero(insn, kInsnThirdOpnd)) { + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, opnd0, cond, rflag); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + cgFunc.GetRD()->InitGenUse(bb, false); + } else if (OpndDefByZero(insn, kInsnSecondOpnd) && OpndDefByOne(insn, kInsnThirdOpnd)) { + auto &originCond = static_cast(cond); + ConditionCode inverseCondCode = GetReverseCC(originCond.GetCode()); + if (inverseCondCode == kCcLast) { + return; + } + auto &aarchCGFunc = static_cast(cgFunc); + CondOperand &inverseCond = aarchCGFunc.GetCondOperand(inverseCondCode); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, opnd0, inverseCond, rflag); + newInsn.SetId(insn.GetId()); + bb.ReplaceInsn(insn, newInsn); + cgFunc.GetRD()->InitGenUse(bb, false); + } +} + +void CselPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +uint32 RedundantUxtPattern::GetInsnValidBit(const Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + uint32 nRet; + switch (mOp) { + case MOP_wcsetrc: + case MOP_xcsetrc: + nRet = 1; + break; + case MOP_wldrb: + case MOP_wldarb: + case MOP_wldxrb: + case MOP_wldaxrb: + nRet = k8BitSize; + break; + case MOP_wldrh: + case MOP_wldarh: + case MOP_wldxrh: + case MOP_wldaxrh: + nRet = k16BitSize; + break; + case MOP_wmovrr: + case MOP_wmovri32: + case MOP_wldrsb: + case MOP_wldrsh: + case MOP_wldli: + case MOP_wldr: + case MOP_wldp: + case MOP_wldar: + case MOP_wmovkri16: + case MOP_wmovzri16: + case MOP_wmovnri16: + case MOP_wldxr: + case MOP_wldaxr: + case MOP_wldaxp: + case MOP_wcsincrrrc: + case MOP_wcselrrrc: + case MOP_wcsinvrrrc: + nRet = k32BitSize; + break; + default: + nRet = k64BitSize; + break; + } + return nRet; +} + +uint32 RedundantUxtPattern::GetMaximumValidBit(Insn &insn, uint8 index, InsnSet &visitedInsn) const { + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, index); + if (defInsnSet.empty()) { + /* disable opt when there is no def point. */ + return k64BitSize; + } + + uint32 validBit = 0; + uint32 nMaxValidBit = 0; + for (auto &defInsn : defInsnSet) { + if (visitedInsn.find(defInsn) != visitedInsn.end()) { + continue; + } + + (void)visitedInsn.insert(defInsn); + MOperator mOp = defInsn->GetMachineOpcode(); + if ((mOp == MOP_wmovrr) || (mOp == MOP_xmovrr)) { + validBit = GetMaximumValidBit(*defInsn, 1, visitedInsn); + } else { + validBit = GetInsnValidBit(*defInsn); + } + + nMaxValidBit = nMaxValidBit < validBit ? validBit : nMaxValidBit; + } + return nMaxValidBit; +} + +bool RedundantUxtPattern::CheckCondition(Insn &insn) { + BB &bb = *insn.GetBB(); + InsnSet visitedInsn1; + InsnSet visitedInsn2; + if (!((insn.GetMachineOpcode() == MOP_xuxth32 && + GetMaximumValidBit(insn, kInsnSecondOpnd, visitedInsn1) <= k16BitSize) || + (insn.GetMachineOpcode() == MOP_xuxtb32 && + GetMaximumValidBit(insn, kInsnSecondOpnd, visitedInsn2) <= k8BitSize))) { + return false; + } + + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + secondOpnd = &(insn.GetOperand(kInsnSecondOpnd)); + if (RegOperand::IsSameReg(firstOpnd, *secondOpnd)) { + bb.RemoveInsn(insn); + /* update in/out */ + cgFunc.GetRD()->UpdateInOut(bb, true); + return false; + } + useInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(insn, 0, false); + RegOperand &firstRegOpnd = static_cast(firstOpnd); + firstRegNO = firstRegOpnd.GetRegisterNumber(); + /* for uxth R1, V501, R1 is parameter register, this can't be optimized. */ + if (firstRegOpnd.IsPhysicalRegister()) { + return false; + } + + if (useInsnSet.empty()) { + bb.RemoveInsn(insn); + /* update in/out */ + cgFunc.GetRD()->UpdateInOut(bb, true); + return false; + } + + RegOperand *secondRegOpnd = static_cast(secondOpnd); + ASSERT(secondRegOpnd != nullptr, "secondRegOpnd should not be nullptr"); + uint32 secondRegNO = secondRegOpnd->GetRegisterNumber(); + for (auto useInsn : useInsnSet) { + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstRegNO, true); + if ((defInsnSet.size() > 1) || !(cgFunc.GetRD()->RegIsLiveBetweenInsn(secondRegNO, insn, *useInsn))) { + return false; + } + } + return true; +} + +void RedundantUxtPattern::Optimize(Insn &insn) { + BB &bb = *insn.GetBB(); + ReplaceAllUsedOpndWithNewOpnd(useInsnSet, firstRegNO, *secondOpnd, true); + bb.RemoveInsn(insn); + cgFunc.GetRD()->UpdateInOut(bb, true); +} + +void RedundantUxtPattern::Init() { + useInsnSet.clear(); + secondOpnd = nullptr; +} + +void RedundantUxtPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable()) { + continue; + } + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool LocalVarSaveInsnPattern::CheckFirstInsn(const Insn &firstInsn) { + MOperator mOp = firstInsn.GetMachineOpcode(); + if (mOp != MOP_xmovrr && mOp != MOP_wmovrr) { + return false; + } + firstInsnSrcOpnd = &(firstInsn.GetOperand(kInsnSecondOpnd)); + RegOperand *firstInsnSrcReg = static_cast(firstInsnSrcOpnd); + if (firstInsnSrcReg->GetRegisterNumber() != R0) { + return false; + } + firstInsnDestOpnd = &(firstInsn.GetOperand(kInsnFirstOpnd)); + RegOperand *firstInsnDestReg = static_cast(firstInsnDestOpnd); + if (firstInsnDestReg->IsPhysicalRegister()) { + return false; + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckSecondInsn() { + MOperator mOp = secondInsn->GetMachineOpcode(); + if (mOp != MOP_wstr && mOp != MOP_xstr) { + return false; + } + secondInsnSrcOpnd = &(secondInsn->GetOperand(kInsnFirstOpnd)); + if (!RegOperand::IsSameReg(*firstInsnDestOpnd, *secondInsnSrcOpnd)) { + return false; + } + /* check memOperand is stack memOperand, and x0 is stored in localref var region */ + secondInsnDestOpnd = &(secondInsn->GetOperand(kInsnSecondOpnd)); + MemOperand *secondInsnDestMem = static_cast(secondInsnDestOpnd); + RegOperand *baseReg = secondInsnDestMem->GetBaseRegister(); + RegOperand *indexReg = secondInsnDestMem->GetIndexRegister(); + if ((baseReg == nullptr) || !(cgFunc.IsFrameReg(*baseReg)) || (indexReg != nullptr)) { + return false; + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckAndGetUseInsn(Insn &firstInsn) { + InsnSet useInsnSet = cgFunc.GetRD()->FindUseForRegOpnd(firstInsn, kInsnFirstOpnd, false); + if (useInsnSet.size() != 2) { /* secondInsn and another useInsn */ + return false; + } + + /* useInsnSet includes secondInsn and another useInsn */ + for (auto tmpUseInsn : useInsnSet) { + if (tmpUseInsn->GetId() != secondInsn->GetId()) { + useInsn = tmpUseInsn; + return true; + } + } + return false; +} + +bool LocalVarSaveInsnPattern::CheckLiveRange(const Insn &firstInsn) { + uint32 maxInsnNO = cgFunc.GetRD()->GetMaxInsnNO(); + uint32 useInsnID = useInsn->GetId(); + uint32 defInsnID = firstInsn.GetId(); + uint32 distance = useInsnID > defInsnID ? useInsnID - defInsnID : defInsnID - useInsnID; + float liveRangeProportion = static_cast(distance) / maxInsnNO; + /* 0.3 is a balance for real optimization effect */ + if (liveRangeProportion < 0.3) { + return false; + } + return true; +} + +bool LocalVarSaveInsnPattern::CheckCondition(Insn &firstInsn) { + secondInsn = firstInsn.GetNext(); + if (secondInsn == nullptr) { + return false; + } + /* check firstInsn is : mov vreg, R0; */ + if (!CheckFirstInsn(firstInsn)) { + return false; + } + /* check the secondInsn is : str vreg, stackMem */ + if (!CheckSecondInsn()) { + return false; + } + /* find the uses of the vreg */ + if (!CheckAndGetUseInsn(firstInsn)) { + return false; + } + /* simulate live range using insn distance */ + if (!CheckLiveRange(firstInsn)) { + return false; + } + RegOperand *firstInsnDestReg = static_cast(firstInsnDestOpnd); + regno_t firstInsnDestRegNO = firstInsnDestReg->GetRegisterNumber(); + InsnSet defInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(*useInsn, firstInsnDestRegNO, true); + if (defInsnSet.size() != 1) { + return false; + } + ASSERT((*(defInsnSet.begin()))->GetId() == firstInsn.GetId(), "useInsn has only one define Insn : firstInsn"); + /* check whether the stack mem is changed or not */ + MemOperand *secondInsnDestMem = static_cast(secondInsnDestOpnd); + int64 memOffset = secondInsnDestMem->GetOffsetImmediate()->GetOffsetValue(); + InsnSet memDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*useInsn, memOffset, true); + if (memDefInsnSet.size() != 1) { + return false; + } + if ((*(memDefInsnSet.begin()))->GetId() != secondInsn->GetId()) { + return false; + } + /* check whether has call between use and def */ + if (!cgFunc.GetRD()->HasCallBetweenDefUse(firstInsn, *useInsn)) { + return false; + } + return true; +} + +void LocalVarSaveInsnPattern::Optimize(Insn &insn) { + /* insert ldr insn before useInsn */ + MOperator ldrOpCode = secondInsnSrcOpnd->GetSize() == k64BitSize ? MOP_xldr : MOP_wldr; + Insn &ldrInsn = cgFunc.GetInsnBuilder()->BuildInsn(ldrOpCode, *secondInsnSrcOpnd, *secondInsnDestOpnd); + ldrInsn.SetId(useInsn->GetId() - 1); + useInsn->GetBB()->InsertInsnBefore(*useInsn, ldrInsn); + cgFunc.GetRD()->UpdateInOut(*useInsn->GetBB(), true); + secondInsn->SetOperand(kInsnFirstOpnd, *firstInsnSrcOpnd); + BB *saveInsnBB = insn.GetBB(); + saveInsnBB->RemoveInsn(insn); + cgFunc.GetRD()->UpdateInOut(*saveInsnBB, true); +} + +void LocalVarSaveInsnPattern::Init() { + firstInsnSrcOpnd = nullptr; + firstInsnDestOpnd = nullptr; + secondInsnSrcOpnd = nullptr; + secondInsnDestOpnd = nullptr; + useInsn = nullptr; + secondInsn = nullptr; +} + +void LocalVarSaveInsnPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsCleanup()) { + continue; + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!insn->IsCall()) { + continue; + } + Insn *firstInsn = insn->GetNextMachineInsn(); + if (firstInsn == nullptr) { + continue; + } + Init(); + if (!CheckCondition(*firstInsn)) { + continue; + } + Optimize(*firstInsn); + } + } +} + +void ExtendShiftOptPattern::SetExMOpType(const Insn &use) { + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xxwaddrrre: + case MOP_xaddrrrs: { + exMOpType = kExAdd; + break; + } + case MOP_waddrrr: + case MOP_wwwaddrrre: + case MOP_waddrrrs: { + exMOpType = kEwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xxwsubrrre: + case MOP_xsubrrrs: { + exMOpType = kExSub; + break; + } + case MOP_wsubrrr: + case MOP_wwwsubrrre: + case MOP_wsubrrrs: { + exMOpType = kEwSub; + break; + } + case MOP_xcmnrr: + case MOP_xwcmnrre: + case MOP_xcmnrrs: { + exMOpType = kExCmn; + break; + } + case MOP_wcmnrr: + case MOP_wwcmnrre: + case MOP_wcmnrrs: { + exMOpType = kEwCmn; + break; + } + case MOP_xcmprr: + case MOP_xwcmprre: + case MOP_xcmprrs: { + exMOpType = kExCmp; + break; + } + case MOP_wcmprr: + case MOP_wwcmprre: + case MOP_wcmprrs: { + exMOpType = kEwCmp; + break; + } + default: { + exMOpType = kExUndef; + } + } +} + +void ExtendShiftOptPattern::SetLsMOpType(const Insn &use) { + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xaddrrrs: { + lsMOpType = kLxAdd; + break; + } + case MOP_waddrrr: + case MOP_waddrrrs: { + lsMOpType = kLwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xsubrrrs: { + lsMOpType = kLxSub; + break; + } + case MOP_wsubrrr: + case MOP_wsubrrrs: { + lsMOpType = kLwSub; + break; + } + case MOP_xcmnrr: + case MOP_xcmnrrs: { + lsMOpType = kLxCmn; + break; + } + case MOP_wcmnrr: + case MOP_wcmnrrs: { + lsMOpType = kLwCmn; + break; + } + case MOP_xcmprr: + case MOP_xcmprrs: { + lsMOpType = kLxCmp; + break; + } + case MOP_wcmprr: + case MOP_wcmprrs: { + lsMOpType = kLwCmp; + break; + } + case MOP_xeorrrr: + case MOP_xeorrrrs: { + lsMOpType = kLxEor; + break; + } + case MOP_weorrrr: + case MOP_weorrrrs: { + lsMOpType = kLwEor; + break; + } + case MOP_xinegrr: + case MOP_xinegrrs: { + lsMOpType = kLxNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_winegrr: + case MOP_winegrrs: { + lsMOpType = kLwNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_xiorrrr: + case MOP_xiorrrrs: { + lsMOpType = kLxIor; + break; + } + case MOP_wiorrrr: + case MOP_wiorrrrs: { + lsMOpType = kLwIor; + break; + } + default: { + lsMOpType = kLsUndef; + } + } +} + +void ExtendShiftOptPattern::SelectExtendOrShift(const Insn &def) { + MOperator op = def.GetMachineOpcode(); + switch (op) { + case MOP_xsxtb32: + case MOP_xsxtb64: extendOp = ExtendShiftOperand::kSXTB; + break; + case MOP_xsxth32: + case MOP_xsxth64: extendOp = ExtendShiftOperand::kSXTH; + break; + case MOP_xsxtw64: extendOp = ExtendShiftOperand::kSXTW; + break; + case MOP_xuxtb32: extendOp = ExtendShiftOperand::kUXTB; + break; + case MOP_xuxth32: extendOp = ExtendShiftOperand::kUXTH; + break; + case MOP_xuxtw64: extendOp = ExtendShiftOperand::kUXTW; + break; + case MOP_wlslrri5: + case MOP_xlslrri6: shiftOp = BitShiftOperand::kLSL; + break; + case MOP_xlsrrri6: + case MOP_wlsrrri5: shiftOp = BitShiftOperand::kLSR; + break; + case MOP_xasrrri6: + case MOP_wasrrri5: shiftOp = BitShiftOperand::kASR; + break; + default: { + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + } + } +} + +/* first use must match SelectExtendOrShift */ +bool ExtendShiftOptPattern::CheckDefUseInfo(Insn &use, uint32 size) { + auto ®Operand = static_cast(defInsn->GetOperand(kInsnFirstOpnd)); + Operand &defSrcOpnd = defInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(defSrcOpnd.IsRegister(), "defSrcOpnd must be register!"); + auto ®DefSrc = static_cast(defSrcOpnd); + if (regDefSrc.IsPhysicalRegister()) { + return false; + } + /* + * has Implict cvt + * + * avoid cases as following: + * lsr x2, x2, #8 + * ubfx w2, x2, #0, #32 lsr x2, x2, #8 + * eor w0, w0, w2 ===> eor w0, w0, x2 ==\=> eor w0, w0, w2, LSR #8 + * + * the truncation causes the wrong value by shift right + * shift left does not matter + */ + auto &useDefOpnd = static_cast(use.GetOperand(kInsnFirstOpnd)); + if ((shiftOp != BitShiftOperand::kUndef || extendOp != ExtendShiftOperand::kUndef) && + (regDefSrc.GetSize() > regOperand.GetSize() || useDefOpnd.GetSize() != size)) { + return false; + } + if ((shiftOp == BitShiftOperand::kLSR || shiftOp == BitShiftOperand::kASR) && + (defSrcOpnd.GetSize() > size)) { + return false; + } + regno_t defSrcRegNo = regDefSrc.GetRegisterNumber(); + /* check regDefSrc */ + InsnSet defSrcSet = cgFunc.GetRD()->FindDefForRegOpnd(use, defSrcRegNo, true); + /* The first defSrcInsn must be closest to useInsn */ + if (defSrcSet.empty()) { + return false; + } + Insn *defSrcInsn = *defSrcSet.begin(); + const InsnDesc *md = defSrcInsn->GetDesc(); + if ((size != regOperand.GetSize()) && md->IsMove()) { + return false; + } + if (defInsn->GetBB() == use.GetBB()) { + /* check replace reg def between defInsn and currInsn */ + Insn *tmpInsn = defInsn->GetNext(); + while (tmpInsn != &use) { + if (tmpInsn == defSrcInsn || tmpInsn == nullptr) { + return false; + } + tmpInsn = tmpInsn->GetNext(); + } + } else { /* def use not in same BB */ + if (defSrcInsn->GetBB() != defInsn->GetBB()) { + return false; + } + if (defSrcInsn->GetId() > defInsn->GetId()) { + return false; + } + } + /* case: + * lsl w0, w0, #5 + * eor w0, w2, w0 + * ---> + * eor w0, w2, w0, lsl 5 + */ + if (defSrcInsn == defInsn) { + InsnSet replaceRegUseSet = cgFunc.GetRD()->FindUseForRegOpnd(*defInsn, defSrcRegNo, true); + if (replaceRegUseSet.size() != k1BitSize) { + return false; + } + removeDefInsn = true; + } + return true; +} + +/* Check whether ExtendShiftOptPattern optimization can be performed. */ +SuffixType ExtendShiftOptPattern::CheckOpType(const Operand &lastOpnd) const { + /* Assign values to useType and defType. */ + uint32 useType = kNoSuffix; + uint32 defType = shiftOp; + if (extendOp != ExtendShiftOperand::kUndef) { + defType = kExten; + } + if (lastOpnd.IsOpdShift()) { + BitShiftOperand lastShiftOpnd = static_cast(lastOpnd); + useType = lastShiftOpnd.GetShiftOp(); + } else if (lastOpnd.IsOpdExtend()) { + ExtendShiftOperand lastExtendOpnd = static_cast(lastOpnd); + useType = kExten; + /* two insn is exten and exten ,value is exten(oneself) */ + if (useType == defType && extendOp != lastExtendOpnd.GetExtendOp()) { + return kNoSuffix; + } + } + return kDoOptimizeTable[useType][defType]; +} + +/* new Insn extenType: + * ===================== + * (useMop) (defMop) (newmop) + * | nosuffix | all | all| + * | exten | ex | ex | + * | ls | ex | ls | + * | asr | !asr | F | + * | !asr | asr | F | + * (useMop) (defMop) + * ===================== + */ +void ExtendShiftOptPattern::ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount) { + AArch64CGFunc &a64CGFunc = static_cast(cgFunc); + uint32 lastIdx = use.GetOperandSize() - k1BitSize; + Operand &lastOpnd = use.GetOperand(lastIdx); + SuffixType optType = CheckOpType(lastOpnd); + Operand *shiftOpnd = nullptr; + if (optType == kNoSuffix) { + return; + }else if (optType == kExten) { + replaceOp = exMOpTable[exMOpType]; + if (amount > k4BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateExtendShiftOperand(extendOp, amount, static_cast(k64BitSize)); + } else { + replaceOp = lsMOpTable[lsMOpType]; + if (amount >= k32BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateBitShiftOperand(shiftOp, amount, static_cast(k64BitSize)); + } + if (replaceOp == MOP_undef) { + return; + } + + Insn *replaceUseInsn = nullptr; + Operand &firstOpnd = use.GetOperand(kInsnFirstOpnd); + Operand *secondOpnd = &use.GetOperand(kInsnSecondOpnd); + if (replaceIdx == kInsnSecondOpnd) { /* replace neg insn */ + secondOpnd = &def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, *shiftOpnd); + } else { + Operand &thirdOpnd = def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, thirdOpnd, *shiftOpnd); + } + use.GetBB()->ReplaceInsn(use, *replaceUseInsn); + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In ExtendShiftOptPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======ReplaceInsn :\n"; + use.Dump(); + LogInfo::MapleLogger() << "=======NewInsn :\n"; + replaceUseInsn->Dump(); + } + if (removeDefInsn) { + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In ExtendShiftOptPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======RemoveDefInsn :\n"; + defInsn->Dump(); + } + defInsn->GetBB()->RemoveInsn(*defInsn); + } + cgFunc.GetRD()->InitGenUse(*defInsn->GetBB(), false); + cgFunc.GetRD()->UpdateInOut(*use.GetBB(), true); + newInsn = replaceUseInsn; + optSuccess = true; +} + +/* + * pattern1: + * UXTB/UXTW X0, W1 <---- def x0 + * .... <---- (X0 not used) + * AND/SUB/EOR X0, X1, X0 <---- use x0 + * ======> + * AND/SUB/EOR X0, X1, W1 UXTB/UXTW + * + * pattern2: + * LSL/LSR X0, X1, #8 + * ....(X0 not used) + * AND/SUB/EOR X0, X1, X0 + * ======> + * AND/SUB/EOR X0, X1, X1 LSL/LSR #8 + */ +void ExtendShiftOptPattern::Optimize(Insn &insn) { + uint32 amount = 0; + uint32 offset = 0; + uint32 lastIdx = insn.GetOperandSize() - k1BitSize; + Operand &lastOpnd = insn.GetOperand(lastIdx); + if (lastOpnd.IsOpdShift()) { + BitShiftOperand &lastShiftOpnd = static_cast(lastOpnd); + amount = lastShiftOpnd.GetShiftAmount(); + } else if (lastOpnd.IsOpdExtend()) { + ExtendShiftOperand &lastExtendOpnd = static_cast(lastOpnd); + amount = lastExtendOpnd.GetShiftAmount(); + } + if (shiftOp != BitShiftOperand::kUndef) { + ImmOperand &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + offset = static_cast(immOpnd.GetValue()); + } + amount += offset; + + ReplaceUseInsn(insn, *defInsn, amount); +} + +void ExtendShiftOptPattern::DoExtendShiftOpt(Insn &insn) { + Init(); + if (!CheckCondition(insn)) { + return; + } + Optimize(insn); + if (optSuccess) { + DoExtendShiftOpt(*newInsn); + } +} + +/* check and set: + * exMOpType, lsMOpType, extendOp, shiftOp, defInsn + */ +bool ExtendShiftOptPattern::CheckCondition(Insn &insn) { + SetLsMOpType(insn); + SetExMOpType(insn); + MOperator mOp = insn.GetMachineOpcode(); + if ((exMOpType == kExUndef) && (lsMOpType == kLsUndef)) { + return false; + } + RegOperand ®Operand = static_cast(insn.GetOperand(replaceIdx)); + if (regOperand.IsPhysicalRegister()) { + return false; + } + regno_t regNo = regOperand.GetRegisterNumber(); + InsnSet regDefInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, regNo, true); + Operand &firstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + if (firstOpnd.GetSize() != secondOpnd.GetSize() && (mOp == MOP_xcmprr || mOp == MOP_xcmnrr)) { + return false; + } + if (regDefInsnSet.size() != k1BitSize) { + return false; + } + defInsn = *regDefInsnSet.begin(); + CHECK_FATAL((defInsn != nullptr), "defInsn is null!"); + + SelectExtendOrShift(*defInsn); + /* defInsn must be shift or extend */ + if ((extendOp == ExtendShiftOperand::kUndef) && (shiftOp == BitShiftOperand::kUndef)) { + return false; + } + return CheckDefUseInfo(insn, regOperand.GetSize()); +} + +void ExtendShiftOptPattern::Init() { + replaceOp = MOP_undef; + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + defInsn = nullptr; + replaceIdx = kInsnThirdOpnd; + newInsn = nullptr; + optSuccess = false; + removeDefInsn = false; + exMOpType = kExUndef; + lsMOpType = kLsUndef; +} + +void ExtendShiftOptPattern::Run() { + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + DoExtendShiftOpt(*insn); + } + } +} + +void ExtenToMovPattern::Run() { + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +/* Check for Implicit uxtw */ +bool ExtenToMovPattern::CheckHideUxtw(const Insn &insn, regno_t regno) const { + const InsnDesc *md = &AArch64CG::kMd[insn.GetMachineOpcode()]; + if (md->IsMove()) { + return false; + } + uint32 optSize = insn.GetOperandSize(); + for (uint32 i = 0; i < optSize; ++i) { + if (regno == static_cast(insn.GetOperand(i)).GetRegisterNumber()) { + auto *curOpndDescription = md->GetOpndDes(i); + if (curOpndDescription->IsDef() && curOpndDescription->GetSize() == k32BitSize) { + return true; + } + break; + } + } + return false; +} + +bool ExtenToMovPattern::CheckUxtw(Insn &insn) { + if (insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize && + insn.GetOperand(kInsnSecondOpnd).GetSize() == k32BitSize) { + ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "is not Register"); + regno_t regno = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + InsnSet preDef = cgFunc.GetRD()->FindDefForRegOpnd(insn, kInsnSecondOpnd, false); + if (preDef.empty()) { + return false; + } + for (auto defInsn : preDef) { + if (!CheckHideUxtw(*defInsn, regno)) { + return false; + } + } + replaceMop = MOP_xmovrr_uxtw; + return true; + } + return false; +} + +bool ExtenToMovPattern::CheckSrcReg(Insn &insn, regno_t srcRegNo, uint32 validNum, std::vector &checkedInsns) { + InsnSet srcDefSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, srcRegNo, true); + for (auto defInsn : srcDefSet) { + /* Skip checked instructions */ + if (std::find(checkedInsns.begin(), checkedInsns.end(), defInsn) != checkedInsns.end()) { + continue; + } + checkedInsns.push_back(defInsn); + + CHECK_FATAL((defInsn != nullptr), "defInsn is null!"); + /* for loop, def insn might be the same insn */ + if (defInsn == &insn) { + continue; + } + MOperator mOp = defInsn->GetMachineOpcode(); + switch (mOp) { + case MOP_wiorrri12: + case MOP_weorrri12: { + /* check immVal if mop is OR */ + ImmOperand &imm = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + auto bitNum = static_cast(imm.GetValue()); + if ((bitNum >> validNum) != 0) { + return false; + } + } + case MOP_wandrri12: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + regno_t defSrcRegNo = defSrcRegOpnd.GetRegisterNumber(); + if (!CheckSrcReg(*defInsn, defSrcRegNo, validNum, checkedInsns)) { + return false; + } + break; + } + case MOP_wandrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(*defInsn, defSrcRegNo1, validNum, checkedInsns) && + !CheckSrcReg(*defInsn, defSrcRegNo2, validNum, checkedInsns)) { + return false; + } + break; + } + case MOP_wiorrrr: + case MOP_weorrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(*defInsn, defSrcRegNo1, validNum, checkedInsns) || + !CheckSrcReg(*defInsn, defSrcRegNo2, validNum, checkedInsns)) { + return false; + } + break; + } + case MOP_wldrb: { + if (validNum != k8BitSize) { + return false; + } + break; + } + case MOP_wldrh: { + if (validNum != k16BitSize) { + return false; + } + break; + } + default: + return false; + } + } + return true; +} + +bool ExtenToMovPattern::BitNotAffected(Insn &insn, uint32 validNum) { + RegOperand &firstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + RegOperand &secondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + regno_t desRegNo = firstOpnd.GetRegisterNumber(); + regno_t srcRegNo = secondOpnd.GetRegisterNumber(); + InsnSet desDefSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, desRegNo, true); + /* desReg is not redefined */ + if (!desDefSet.empty()) { + return false; + } + std::vector checkedInsns; + if (!CheckSrcReg(insn, srcRegNo, validNum, checkedInsns)) { + return false; + } + replaceMop = MOP_wmovrr; + return true; +} + +bool ExtenToMovPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtw64: return CheckUxtw(insn); + case MOP_xuxtb32: return BitNotAffected(insn, k8BitSize); + case MOP_xuxth32: return BitNotAffected(insn, k16BitSize); + default: return false; + } +} + +/* No initialization required */ +void ExtenToMovPattern::Init() { + replaceMop = MOP_undef; +} + +void ExtenToMovPattern::Optimize(Insn &insn) { + insn.SetMOP(AArch64CG::kMd[replaceMop]); +} + +void SameDefPattern::Run() { + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!CheckCondition(*insn) || !bb->GetEhPreds().empty()) { + continue; + } + Optimize(*insn); + } + } +} + +void SameDefPattern::Init() { + currInsn = nullptr; + sameInsn = nullptr; +} + +bool SameDefPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (insn.GetBB()->GetPreds().size() > k1BitSize) { + return false; + } + if (insn.GetBB()->HasCall()) { + return false; + } + return (mOp == MOP_wcmprr) || (mOp == MOP_xcmprr) || (mOp == MOP_xwcmprre) || (mOp == MOP_xcmprrs); +} + +void SameDefPattern::Optimize(Insn &insn) { + InsnSet sameDefSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, 0, false); + if (sameDefSet.size() != k1BitSize) { + return; + } + Insn *sameDefInsn = *sameDefSet.begin(); + if (sameDefInsn == nullptr) { + return; + } + currInsn = &insn; + sameInsn = sameDefInsn; + if (!IsSameDef()) { + return; + } + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In SameDefPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======remove insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "=======sameDef insn: \n"; + sameDefInsn->Dump(); + } + insn.GetBB()->RemoveInsn(insn); +} + +bool SameDefPattern::IsSameDef() { + if (!CheckCondition(*sameInsn)) { + return false; + } + if (currInsn == sameInsn) { + return false; + } + if (currInsn->GetMachineOpcode() != sameInsn->GetMachineOpcode()) { + return false; + } + for (uint32 i = k1BitSize; i < currInsn->GetOperandSize(); ++i) { + Operand &opnd0 = currInsn->GetOperand(i); + Operand &opnd1 = sameInsn->GetOperand(i); + if (!IsSameOperand(opnd0, opnd1)) { + return false; + } + } + return true; +} + +bool SameDefPattern::IsSameOperand(Operand &opnd0, Operand &opnd1) { + if (opnd0.IsRegister()) { + CHECK_FATAL(opnd1.IsRegister(), "must be RegOperand!"); + RegOperand ®Opnd0 = static_cast(opnd0); + RegOperand ®Opnd1 = static_cast(opnd1); + if (!RegOperand::IsSameReg(regOpnd0, regOpnd1)) { + return false; + } + regno_t regNo = regOpnd0.GetRegisterNumber(); + /* src reg not redefined between sameInsn and currInsn */ + if (SrcRegIsRedefined(regNo)) { + return false; + } + } else if (opnd0.IsOpdShift()) { + CHECK_FATAL(opnd1.IsOpdShift(), "must be ShiftOperand!"); + BitShiftOperand &shiftOpnd0 = static_cast(opnd0); + BitShiftOperand &shiftOpnd1 = static_cast(opnd1); + if (shiftOpnd0.GetShiftAmount() != shiftOpnd1.GetShiftAmount()) { + return false; + } + } else if (opnd0.IsOpdExtend()) { + CHECK_FATAL(opnd1.IsOpdExtend(), "must be ExtendOperand!"); + ExtendShiftOperand &extendOpnd0 = static_cast(opnd0); + ExtendShiftOperand &extendOpnd1 = static_cast(opnd1); + if (extendOpnd0.GetShiftAmount() != extendOpnd1.GetShiftAmount()) { + return false; + } + } else { + return false; + } + return true; +} + +bool SameDefPattern::SrcRegIsRedefined(regno_t regNo) { + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + if (currInsn->GetBB() == sameInsn->GetBB()) { + FOR_BB_INSNS(insn, currInsn->GetBB()) { + if (insn->GetMachineOpcode() == MOP_xbl) { + return true; + } + } + if (!a64RD->FindRegDefBetweenInsn(regNo, sameInsn, currInsn).empty()) { + return true; + } + } else if (a64RD->HasRegDefBetweenInsnGlobal(regNo, *sameInsn, *currInsn)) { + return true; + } + return false; +} + +void AndCbzPattern::Init() { + prevInsn = nullptr; +} + +bool AndCbzPattern::IsAdjacentArea(Insn &prev, Insn &curr) const { + if (prev.GetBB() == curr.GetBB()) { + return true; + } + for (auto *succ : prev.GetBB()->GetSuccs()) { + if (succ == curr.GetBB()) { + return true; + } + } + return false; +} + +bool AndCbzPattern::CheckCondition(Insn &insn) { + auto *aar64RD = static_cast(cgFunc.GetRD()); + MOperator mOp = insn.GetMachineOpcode(); + if ((mOp != MOP_wcbz) && (mOp != MOP_xcbz) && (mOp != MOP_wcbnz) && (mOp != MOP_xcbnz)) { + return false; + } + regno_t regNo = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + InsnSet defSet = cgFunc.GetRD()->FindDefForRegOpnd(insn, regNo, true); + if (defSet.size() != k1BitSize) { + return false; + } + prevInsn = *defSet.begin(); + if (prevInsn->GetMachineOpcode() != MOP_wandrri12 && prevInsn->GetMachineOpcode() != MOP_xandrri13) { + return false; + } + if (!IsAdjacentArea(*prevInsn, insn)) { + return false; + } + regno_t propRegNo = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + if (prevInsn->GetBB() == insn.GetBB() && !(aar64RD->FindRegDefBetweenInsn(propRegNo, prevInsn, &insn).empty())) { + return false; + } + if (prevInsn->GetBB() != insn.GetBB() && aar64RD->HasRegDefBetweenInsnGlobal(propRegNo, *prevInsn, insn)) { + return false; + } + if (!(cgFunc.GetRD()->FindUseForRegOpnd(insn, regNo, true).empty())) { + return false; + } + return true; +} + +int64 AndCbzPattern::CalculateLogValue(int64 val) const { + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : -1; +} + +void AndCbzPattern::Optimize(Insn &insn) { + BB *bb = insn.GetBB(); + auto &aarchFunc = static_cast(cgFunc); + auto &andImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int64 tbzVal = CalculateLogValue(andImm.GetValue()); + if (tbzVal < 0) { + return; + } + MOperator mOp = insn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (mOp) { + case MOP_wcbz: + newMop = MOP_wtbz; + break; + case MOP_wcbnz: + newMop = MOP_wtbnz; + break; + case MOP_xcbz: + newMop = MOP_xtbz; + break; + case MOP_xcbnz: + newMop = MOP_xtbnz; + break; + default: + CHECK_FATAL(false, "must be cbz/cbnz"); + break; + } + auto &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &tbzImm = aarchFunc.CreateImmOperand(tbzVal, k8BitSize, false); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, prevInsn->GetOperand(kInsnSecondOpnd), tbzImm, label); + newInsn.SetId(insn.GetId()); + bb->ReplaceInsn(insn, newInsn); + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In AndCbzPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======PrevInsn :\n"; + LogInfo::MapleLogger() << "=======ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "=======NewInsn :\n"; + newInsn.Dump(); + } + cgFunc.GetRD()->UpdateInOut(*bb, true); +} + +void AndCbzPattern::Run() { + Init(); + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction() || !CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +void SameRHSPropPattern::Init() { + prevInsn = nullptr; + candidates = {MOP_waddrri12, MOP_xaddrri12, MOP_wsubrri12, MOP_xsubrri12, + MOP_wmovri32, MOP_xmovri64, MOP_wmovrr, MOP_xmovrr}; +} + +bool SameRHSPropPattern::IsSameOperand(Operand *opnd1, Operand *opnd2) const { + if (opnd1 == nullptr && opnd2 == nullptr) { + return true; + } else if (opnd1 == nullptr || opnd2 == nullptr) { + return false; + } + if (opnd1->IsRegister() && opnd2->IsRegister()) { + return RegOperand::IsSameReg(*opnd1, *opnd2); + } else if (opnd1->IsImmediate() && opnd2->IsImmediate()) { + auto *immOpnd1 = static_cast(opnd1); + auto *immOpnd2 = static_cast(opnd2); + return (immOpnd1->GetSize() == immOpnd2->GetSize()) && (immOpnd1->GetValue() == immOpnd2->GetValue()); + } + return false; +} + +bool SameRHSPropPattern::FindSameRHSInsnInBB(Insn &insn) { + uint32 opndNum = insn.GetOperandSize(); + Operand *curRegOpnd = nullptr; + Operand *curImmOpnd = nullptr; + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.OpndIsDef(i)) { + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsRegister()) { + curRegOpnd = &opnd; + } else if (opnd.IsImmediate()) { + auto &immOpnd = static_cast(opnd); + if (immOpnd.GetVary() == kUnAdjustVary) { + return false; + } + curImmOpnd = &opnd; + } + } + if (curRegOpnd == nullptr && curImmOpnd != nullptr && static_cast(curImmOpnd)->IsZero()) { + return false; + } + BB *bb = insn.GetBB(); + for (auto *cursor = insn.GetPrev(); cursor != nullptr && cursor != bb->GetFirstInsn(); cursor = cursor->GetPrev()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + if (cursor->IsCall() && !cgFunc.IsAfterRegAlloc()) { + return false; + } + if (cursor->GetMachineOpcode() != insn.GetMachineOpcode()) { + continue; + } + uint32 candOpndNum = cursor->GetOperandSize(); + Operand *candRegOpnd = nullptr; + Operand *candImmOpnd = nullptr; + for (uint32 i = 0; i < candOpndNum; ++i) { + Operand &opnd = cursor->GetOperand(i); + if (cursor->OpndIsDef(i)) { + continue; + } + if (opnd.IsRegister()) { + candRegOpnd = &opnd; + } else if (opnd.IsImmediate()) { + auto &immOpnd = static_cast(opnd); + if (immOpnd.GetVary() == kUnAdjustVary) { + return false; + } + candImmOpnd = &opnd; + } + } + if (IsSameOperand(curRegOpnd, candRegOpnd) && IsSameOperand(curImmOpnd, candImmOpnd)) { + prevInsn = cursor; + return true; + } + } + return false; +} + +bool SameRHSPropPattern::CheckCondition(Insn &insn) { + if (!insn.IsMachineInstruction()) { + return false; + } + MOperator mOp = insn.GetMachineOpcode(); + if (std::find(candidates.begin(), candidates.end(), mOp) == candidates.end()) { + return false; + } + if (!FindSameRHSInsnInBB(insn)) { + return false; + } + CHECK_FATAL(prevInsn->GetOperand(kInsnFirstOpnd).IsRegister(), "prevInsn first operand must be register"); + if (static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() == R16) { + return false; + } + if (prevInsn->GetOperand(kInsnSecondOpnd).IsRegister() && + RegOperand::IsSameReg(prevInsn->GetOperand(kInsnFirstOpnd), prevInsn->GetOperand(kInsnSecondOpnd))) { + return false; + } + uint32 opndNum = prevInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = prevInsn->GetOperand(i); + if (!opnd.IsRegister()) { + continue; + } + regno_t regNO = static_cast(opnd).GetRegisterNumber(); + if (!(cgFunc.GetRD()->FindRegDefBetweenInsn(regNO, prevInsn->GetNext(), insn.GetPrev()).empty())) { + return false; + } + } + return true; +} + +void SameRHSPropPattern::Optimize(Insn &insn) { + BB *bb = insn.GetBB(); + Operand &destOpnd = insn.GetOperand(kInsnFirstOpnd); + uint32 bitSize = static_cast(destOpnd).GetSize(); + MOperator mOp = (bitSize == k64BitSize ? MOP_xmovrr : MOP_wmovrr); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, destOpnd, prevInsn->GetOperand(kInsnFirstOpnd)); + newInsn.SetId(insn.GetId()); + bb->ReplaceInsn(insn, newInsn); + if (GLOBAL_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In SameRHSPropPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======PrevInsn :\n"; + LogInfo::MapleLogger() << "======= ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "======= NewInsn :\n"; + newInsn.Dump(); + } + cgFunc.GetRD()->UpdateInOut(*bb, true); +} + +void SameRHSPropPattern::Run() { + Init(); + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool ContinuousLdrPattern::IsMopMatch(const Insn &insn) { + auto mop = insn.GetMachineOpcode(); + return mop == MOP_wldrh || mop == MOP_wldr || mop == MOP_xldr; +} + +bool ContinuousLdrPattern::IsUsedBySameCall(Insn &insn1, Insn &insn2, Insn &insn3) const { + auto ®1 = static_cast(insn1.GetOperand(kFirstOpnd)); + auto &&usesite1 = cgFunc.GetRD()->FindUseForRegOpnd(insn1, reg1.GetRegisterNumber(), true); + if (usesite1.size() != 1 || !((*usesite1.begin())->IsCall() || (*usesite1.begin())->IsTailCall())) { + return false; + } + auto ®2 = static_cast(insn2.GetOperand(kFirstOpnd)); + auto &&usesite2 = cgFunc.GetRD()->FindUseForRegOpnd(insn2, reg2.GetRegisterNumber(), true); + if (usesite2.size() != 1 || !((*usesite2.begin())->IsCall() || (*usesite1.begin())->IsTailCall())) { + return false; + } + auto ®3 = static_cast(insn3.GetOperand(kFirstOpnd)); + auto &&usesite3 = cgFunc.GetRD()->FindUseForRegOpnd(insn3, reg3.GetRegisterNumber(), true); + if (usesite3.size() != 1 || !((*usesite3.begin())->IsCall() || (*usesite3.begin())->IsTailCall())) { + return false; + } + return (*usesite1.begin())->GetId() == (*usesite2.begin())->GetId() && (*usesite3.begin())->GetId(); +} + +bool ContinuousLdrPattern::IsMemValid(const MemOperand &memopnd) { + return memopnd.GetAddrMode() == MemOperand::kAddrModeBOi && memopnd.GetIndexOpt() == MemOperand::kIntact; +} + +bool ContinuousLdrPattern::IsImmValid(MOperator mop, const ImmOperand &imm) { + return AArch64CG::kMd[mop].IsValidImmOpnd(imm.GetValue()); +} + +int64 ContinuousLdrPattern::GetMemOffsetValue(const Insn &insn) { + return static_cast(insn.GetOperand(kSecondOpnd)).GetOffsetOperand()->GetValue(); +} + +bool ContinuousLdrPattern::CheckCondition(Insn &insn) { + if (!IsMopMatch(insn)) { + return false; + } + // merge the three adjacent ldr insns + insnList.clear(); + insnList.push_back(&insn); + auto prevInsn = insn.GetPrev(); + if (!prevInsn || !IsMopMatch(*prevInsn)) { + return false; + } + insnList.push_back(prevInsn); + auto prevPrevInsn = prevInsn->GetPrev(); + if (!prevPrevInsn || !IsMopMatch(*prevPrevInsn)) { + return false; + } + insnList.push_back(prevPrevInsn); + if (!IsUsedBySameCall(insn, *prevInsn, *prevPrevInsn)) { + return false; + } + + auto &currMem = static_cast(insn.GetOperand(kSecondOpnd)); + auto &prevMem = static_cast(prevInsn->GetOperand(kSecondOpnd)); + auto &prevPrevMem = static_cast(prevPrevInsn->GetOperand(kSecondOpnd)); + if (!IsMemValid(currMem) || !IsMemValid(prevMem) || !IsMemValid(prevPrevMem)) { + return false; + } + auto baseRegNumOfCurr = currMem.GetBaseRegister()->GetRegisterNumber(); + auto baseRegNumOfPrev = prevMem.GetBaseRegister()->GetRegisterNumber(); + auto baseRegNumOfPrevPrev = prevPrevMem.GetBaseRegister()->GetRegisterNumber(); + if (baseRegNumOfCurr != baseRegNumOfPrev || baseRegNumOfCurr != baseRegNumOfPrevPrev) { + return false; + } + + return true; +} + +void ContinuousLdrPattern::Optimize(Insn &insn) { + auto bb = insn.GetBB(); + auto &aarch64CGFunc = static_cast(cgFunc); + + // sort ldr insns by offset value of insn's memopnd + std::sort(insnList.begin(), insnList.end(), + [](const Insn *insn1, const Insn *insn2) { return GetMemOffsetValue(*insn1) < GetMemOffsetValue(*insn2); }); + + auto currMop = insnList[1]->GetMachineOpcode(); + Insn *currInsn = nullptr; + Insn *prevInsn = nullptr; + Insn *extraInsn = nullptr; + + // the two adjacent insns's mop should be same (all equals to ldrh) + // assign the second ldrh to currInsn, another to prevInsn + // and extraInsn will be ldr + if (insnList[0]->GetMachineOpcode() == insnList[1]->GetMachineOpcode()) { + currInsn = insnList[1]; + prevInsn = insnList[0]; + extraInsn = insnList[2]; + } else if (insnList[1]->GetMachineOpcode() == insnList[2]->GetMachineOpcode()) { + currInsn = insnList[2]; + prevInsn = insnList[1]; + extraInsn = insnList[0]; + } else { + return; + } + + auto offset = GetMemOffsetValue(*currInsn) - GetMemOffsetValue(*prevInsn); + auto extraOffset = GetMemOffsetValue(*extraInsn) - GetMemOffsetValue(*prevInsn); + MOperator mop = MOP_undef; + MOperator ubfx = MOP_undef; + ImmOperand *imm; + if (currMop == MOP_wldrh && offset == k2ByteSize) { + if (abs(extraOffset) == k4ByteSize) { + imm = &aarch64CGFunc.CreateImmOperand(k16BitSize, k5BitSize, false); + mop = MOP_wldp; + ubfx = MOP_wubfxrri5i5; + } else if (abs(extraOffset) == k8ByteSize) { + imm = &aarch64CGFunc.CreateImmOperand(k16BitSize, k6BitSize, false); + mop = MOP_xldp; + ubfx = MOP_xubfxrri6i6; + } else { + return; + } + } else { + return; + } + + // ldpRt1's first opnd will be the first opnd of ldp, and second opnd will be ldp's MemOperannd + // ldpRt2's first opnd will be ldp's second opnd + Insn *ldpRt1 = prevInsn; + Insn *ldpRt2 = extraInsn; + if (extraOffset < 0) { + std::swap(ldpRt1, ldpRt2); + } + + if (IsImmValid(mop, *static_cast(ldpRt1->GetOperand(kSecondOpnd)).GetOffsetOperand())) { + auto &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mop, ldpRt1->GetOperand(kFirstOpnd), + ldpRt2->GetOperand(kFirstOpnd), ldpRt1->GetOperand(kSecondOpnd)); + auto &ubfxInsn = cgFunc.GetInsnBuilder()->BuildInsn(ubfx, currInsn->GetOperand(kFirstOpnd), + prevInsn->GetOperand(kFirstOpnd), *imm, *imm); + newInsn.SetId(insn.GetPrev()->GetId()); + bb->ReplaceInsn(*insn.GetPrev(), newInsn); + ubfxInsn.SetId(insn.GetId()); + bb->ReplaceInsn(insn, ubfxInsn); + bb->RemoveInsn(*insn.GetPrev()->GetPrev()); + cgFunc.GetRD()->UpdateInOut(*bb, true); + return; + } + + cgFunc.GetRD()->UpdateInOut(*bb, true); +} + +void ContinuousLdrPattern::Run() { + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp new file mode 100755 index 0000000000000000000000000000000000000000..ff286bc10b42539fdb9aeec4e4d38d3e87377444 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ico.cpp @@ -0,0 +1,1229 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_ico.h" +#include "ico.h" +#include "cg.h" +#include "cg_option.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#include "aarch64_cgfunc.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +namespace maplebe { +void AArch64IfConversionOptimizer::InitOptimizePatterns() { + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); + singlePassPatterns.emplace_back(memPool->New(*cgFunc)); +} + +/* build ccmp Insn */ +Insn *AArch64ICOPattern::BuildCcmpInsn(ConditionCode ccCode, const Insn *cmpInsn) const { + Operand &opnd0 = cmpInsn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = cmpInsn->GetOperand(kInsnSecondOpnd); + Operand &opnd2 = cmpInsn->GetOperand(kInsnThirdOpnd); + /* ccmp has only int opnd */ + if (!static_cast(opnd1).IsOfIntClass()) { + return nullptr; + } + AArch64CGFunc *func = static_cast(cgFunc); + uint32 nzcv = GetNZCV(ccCode, false); + if (nzcv == k16BitSize) { + return nullptr; + } + ImmOperand &opnd3 = func->CreateImmOperand(PTY_u8, nzcv); + CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); + uint32 dSize = opnd1.GetSize(); + bool isIntTy = opnd2.IsIntImmediate(); + MOperator mOpCode = isIntTy ? (dSize == k64BitSize ? MOP_xccmpriic : MOP_wccmpriic) + : (dSize == k64BitSize ? MOP_xccmprric : MOP_wccmprric); + /* cmp opnd2 in the range 0-4095, ccmp opnd2 in the range 0-31 */ + if (isIntTy && static_cast(opnd2).GetRegisterNumber() >= k32BitSize) { + return nullptr; + } + return &cgFunc->GetInsnBuilder()->BuildInsn(mOpCode, opnd0, opnd1, opnd2, opnd3, cond); +} + +/* Rooted ccCode resource NZCV */ +uint32 AArch64ICOPattern::GetNZCV(ConditionCode ccCode, bool inverse) { + switch (ccCode) { + case CC_EQ: + return inverse ? k4BitSize : k0BitSize; + case CC_HS: + return inverse ? k2BitSize : k0BitSize; + case CC_MI: + return inverse ? k8BitSize : k0BitSize; + case CC_VS: + return inverse ? k1BitSize : k0BitSize; + case CC_VC: + return inverse ? k0BitSize : k1BitSize; + case CC_LS: + return inverse ? k4BitSize : k2BitSize; + case CC_LO: + return inverse ? k0BitSize : k2BitSize; + case CC_NE: + return inverse ? k0BitSize : k4BitSize; + case CC_HI: + return inverse ? k2BitSize : k4BitSize; + case CC_PL: + return inverse ? k0BitSize : k8BitSize; + default: + return k16BitSize; + } +} + +Insn *AArch64ICOPattern::BuildCmpInsn(const Insn &condBr) const { + AArch64CGFunc *func = static_cast(cgFunc); + RegOperand ® = static_cast(condBr.GetOperand(0)); + PrimType ptyp = (reg.GetSize() == k64BitSize) ? PTY_u64 : PTY_u32; + ImmOperand &numZero = func->CreateImmOperand(ptyp, 0); + Operand &rflag = func->GetOrCreateRflag(); + MOperator mopCode = (reg.GetSize() == k64BitSize) ? MOP_xcmpri : MOP_wcmpri; + Insn &cmpInsn = func->GetInsnBuilder()->BuildInsn(mopCode, rflag, reg, numZero); + return &cmpInsn; +} + +bool AArch64ICOIfThenElsePattern::IsExpansionMOperator(const Insn &insn) const { + MOperator mOpCode = insn.GetMachineOpcode(); + return mOpCode >= MOP_xuxtb32 && mOpCode <= MOP_xuxtw64; +} + +bool AArch64ICOIfThenElsePattern::IsMovMOperator(const Insn &insn) const { + MOperator mOpCode = insn.GetMachineOpcode(); + return mOpCode >= MOP_xmovrr && mOpCode <= MOP_xvmovd; +} + +bool AArch64ICOIfThenElsePattern::IsShiftMOperator(const Insn &insn) const { + MOperator mOpCode = insn.GetMachineOpcode(); + return mOpCode >= MOP_xlslrri6 && mOpCode <= MOP_wrorrrr; +} + +bool AArch64ICOIfThenElsePattern::IsEorMOperator(const Insn &insn) const { + MOperator mOpCode = insn.GetMachineOpcode(); + return (mOpCode >= MOP_xeorrrr && mOpCode <= MOP_weorrri12); +} + +bool AArch64ICOIfThenElsePattern::Has2SrcOpndSetInsn(const Insn &insn) const { + return IsEorMOperator(insn) || IsShiftMOperator(insn) || cgFunc->GetTheCFG()->IsAddOrSubInsn(insn); +} + +bool AArch64ICOIfThenElsePattern::IsSetInsnMOperator(const Insn &insn) const { + return IsExpansionMOperator(insn) || IsMovMOperator(insn) || Has2SrcOpndSetInsn(insn); +} + +bool AArch64ICOIfThenElsePattern::IsSetInsn(const Insn &insn, Operand **dest, std::vector &src) const { + if (IsSetInsnMOperator(insn)) { + *dest = &(insn.GetOperand(0)); + for (uint32 i = 1; i < insn.GetOperandSize(); ++i) { + (void)src.emplace_back(&(insn.GetOperand(i))); + } + return true; + } + *dest = nullptr; + src.clear(); + return false; +} + +ConditionCode AArch64ICOPattern::Encode(MOperator mOp, bool inverse) const { + switch (mOp) { + case MOP_bmi: + return inverse ? CC_PL : CC_MI; + case MOP_bvc: + return inverse ? CC_VS : CC_VC; + case MOP_bls: + return inverse ? CC_HI : CC_LS; + case MOP_blt: + return inverse ? CC_GE : CC_LT; + case MOP_ble: + return inverse ? CC_GT : CC_LE; + case MOP_bcs: + return inverse ? CC_CC : CC_CS; + case MOP_bcc: + return inverse ? CC_CS : CC_CC; + case MOP_beq: + return inverse ? CC_NE : CC_EQ; + case MOP_bne: + return inverse ? CC_EQ : CC_NE; + case MOP_blo: + return inverse ? CC_HS : CC_LO; + case MOP_bpl: + return inverse ? CC_MI : CC_PL; + case MOP_bhs: + return inverse ? CC_LO : CC_HS; + case MOP_bvs: + return inverse ? CC_VC : CC_VS; + case MOP_bhi: + return inverse ? CC_LS : CC_HI; + case MOP_bgt: + return inverse ? CC_LE : CC_GT; + case MOP_bge: + return inverse ? CC_LT : CC_GE; + case MOP_wcbnz: + return inverse ? CC_EQ : CC_NE; + case MOP_xcbnz: + return inverse ? CC_EQ : CC_NE; + case MOP_wcbz: + return inverse ? CC_NE : CC_EQ; + case MOP_xcbz: + return inverse ? CC_NE : CC_EQ; + default: + return kCcLast; + } +} + +Insn *AArch64ICOPattern::BuildCondSet(const Insn &branch, RegOperand ®, bool inverse) const { + ConditionCode ccCode = Encode(branch.GetMachineOpcode(), inverse); + ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + AArch64CGFunc *func = static_cast(cgFunc); + CondOperand &cond = func->GetCondOperand(ccCode); + Operand &rflag = func->GetOrCreateRflag(); + MOperator mopCode = (reg.GetSize() == k64BitSize) ? MOP_xcsetrc : MOP_wcsetrc; + return &func->GetInsnBuilder()->BuildInsn(mopCode, reg, cond, rflag); +} + +Insn *AArch64ICOPattern::BuildCondSel(const Insn &branch, MOperator mOp, RegOperand &dst, RegOperand &src1, + RegOperand &src2) const { + ConditionCode ccCode = Encode(branch.GetMachineOpcode(), false); + ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); + Operand &rflag = static_cast(cgFunc)->GetOrCreateRflag(); + return &cgFunc->GetInsnBuilder()->BuildInsn(mOp, dst, src1, src2, cond, rflag); +} + +void AArch64ICOIfThenElsePattern::GenerateInsnForImm(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, + RegOperand &destReg, std::vector &generateInsn) const { + ImmOperand &imm1 = static_cast(ifDest); + ImmOperand &imm2 = static_cast(elseDest); + bool inverse = imm1.IsZero() && imm2.IsOne(); + if (inverse || (imm2.IsZero() && imm1.IsOne())) { + Insn *csetInsn = BuildCondSet(branchInsn, destReg, inverse); + ASSERT(csetInsn != nullptr, "build a insn failed"); + generateInsn.emplace_back(csetInsn); + } else if (imm1.GetValue() == imm2.GetValue()) { + bool destIsIntTy = destReg.IsOfIntClass(); + MOperator mOp = destIsIntTy ? ((destReg.GetSize() == k64BitSize ? MOP_xmovri64 : MOP_wmovri32)) : + ((destReg.GetSize() == k64BitSize ? MOP_xdfmovri : MOP_wsfmovri)); + Insn &tempInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, destReg, imm1); + generateInsn.emplace_back(&tempInsn); + } else { + bool destIsIntTy = destReg.IsOfIntClass(); + uint32 dSize = destReg.GetSize(); + bool isD64 = dSize == k64BitSize; + MOperator mOp = destIsIntTy ? ((destReg.GetSize() == k64BitSize ? MOP_xmovri64 : MOP_wmovri32)) : + ((destReg.GetSize() == k64BitSize ? MOP_xdfmovri : MOP_wsfmovri)); + RegOperand *tempTarIf = nullptr; + if (imm1.IsZero()) { + tempTarIf = &cgFunc->GetZeroOpnd(dSize); + } else { + tempTarIf = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); + Insn &tempInsnIf = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *tempTarIf, imm1); + generateInsn.emplace_back(&tempInsnIf); + } + + RegOperand *tempTarElse = nullptr; + if (imm2.IsZero()) { + tempTarElse = &cgFunc->GetZeroOpnd(dSize); + } else { + tempTarElse = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); + Insn &tempInsnElse = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *tempTarElse, imm2); + generateInsn.emplace_back(&tempInsnElse); + } + + bool isIntTy = destReg.IsOfIntClass(); + MOperator mOpCode = isIntTy ? (isD64 ? MOP_xcselrrrc : MOP_wcselrrrc) + : (isD64 ? MOP_dcselrrrc : (dSize == k32BitSize ? MOP_scselrrrc : MOP_hcselrrrc)); + Insn *cselInsn = BuildCondSel(branchInsn, mOpCode, destReg, *tempTarIf, *tempTarElse); + CHECK_FATAL(cselInsn != nullptr, "build a csel insn failed"); + generateInsn.emplace_back(cselInsn); + } +} + +RegOperand *AArch64ICOIfThenElsePattern::GenerateRegAndTempInsn(Operand &dest, const RegOperand &destReg, + std::vector &generateInsn) const { + RegOperand *reg = nullptr; + if (!dest.IsRegister()) { + bool destIsIntTy = destReg.IsOfIntClass(); + bool isDest64 = destReg.GetSize() == k64BitSize; + MOperator mOp = destIsIntTy ? (isDest64 ? MOP_xmovri64 : MOP_wmovri32) : (isDest64 ? MOP_xdfmovri : MOP_wsfmovri); + reg = cgFunc->GetTheCFG()->CreateVregFromReg(destReg); + ImmOperand &tempSrcElse = static_cast(dest); + if (tempSrcElse.IsZero()) { + return &cgFunc->GetZeroOpnd(destReg.GetSize()); + } + Insn &tempInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *reg, tempSrcElse); + generateInsn.emplace_back(&tempInsn); + return reg; + } else { + return (static_cast(&dest)); + } +} + +void AArch64ICOIfThenElsePattern::GenerateInsnForReg(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, + RegOperand &destReg, std::vector &generateInsn) const { + RegOperand *tReg = static_cast(&ifDest); + RegOperand *eReg = static_cast(&elseDest); + + /* mov w0, w1 mov w0, w1 --> mov w0, w1 */ + if (eReg->GetRegisterNumber() == tReg->GetRegisterNumber()) { + uint32 dSize = destReg.GetSize(); + bool srcIsIntTy = tReg->IsOfIntClass(); + bool destIsIntTy = destReg.IsOfIntClass(); + MOperator mOp; + if (dSize == k64BitSize) { + mOp = srcIsIntTy ? (destIsIntTy ? MOP_xmovrr : MOP_xvmovdr) : (destIsIntTy ? MOP_xvmovrd : MOP_xvmovd); + } else { + mOp = srcIsIntTy ? (destIsIntTy ? MOP_wmovrr : MOP_xvmovsr) : (destIsIntTy ? MOP_xvmovrs : MOP_xvmovs); + } + Insn &tempInsnIf = cgFunc->GetInsnBuilder()->BuildInsn(mOp, destReg, *tReg); + generateInsn.emplace_back(&tempInsnIf); + } else { + uint32 dSize = destReg.GetSize(); + bool isIntTy = destReg.IsOfIntClass(); + MOperator mOpCode = isIntTy ? (dSize == k64BitSize ? MOP_xcselrrrc : MOP_wcselrrrc) + : (dSize == k64BitSize ? MOP_dcselrrrc : (dSize == k32BitSize ? + MOP_scselrrrc : MOP_hcselrrrc)); + Insn *cselInsn = BuildCondSel(branchInsn, mOpCode, destReg, *tReg, *eReg); + CHECK_FATAL(cselInsn != nullptr, "build a csel insn failed"); + generateInsn.emplace_back(cselInsn); + } +} + +Operand *AArch64ICOIfThenElsePattern::GetDestReg(const std::map> &destSrcMap, + const RegOperand &destReg) const { + Operand *dest = nullptr; + for (const auto &destSrcPair : destSrcMap) { + ASSERT(destSrcPair.first->IsRegister(), "opnd must be register"); + RegOperand *destRegInMap = static_cast(destSrcPair.first); + ASSERT(destRegInMap != nullptr, "nullptr check"); + if (destRegInMap->GetRegisterNumber() == destReg.GetRegisterNumber()) { + if (destSrcPair.second.size() > 1) { + dest = destSrcPair.first; + } else { + dest = destSrcPair.second[0]; + } + break; + } + } + return dest; +} + +bool AArch64ICOIfThenElsePattern::BuildCondMovInsn(const BB &bb, + const std::map> &ifDestSrcMap, + const std::map> &elseDestSrcMap, + bool elseBBIsProcessed, + std::vector &generateInsn) { + Insn *branchInsn = cgFunc->GetTheCFG()->FindLastCondBrInsn(*cmpBB); + FOR_BB_INSNS_CONST(insn, (&bb)) { + if (!insn->IsMachineInstruction() || insn->IsBranch()) { + continue; + } + Operand *dest = nullptr; + std::vector src; + + if (!IsSetInsn(*insn, &dest, src)) { + ASSERT(false, "insn check"); + } + ASSERT(dest->IsRegister(), "register check"); + RegOperand *destReg = static_cast(dest); + + Operand *elseDest = GetDestReg(elseDestSrcMap, *destReg); + Operand *ifDest = GetDestReg(ifDestSrcMap, *destReg); + + if (elseBBIsProcessed) { + if (elseDest != nullptr) { + continue; + } + elseDest = dest; + ASSERT(ifDest != nullptr, "null ptr check"); + if (!bb.GetLiveOut()->TestBit(destReg->GetRegisterNumber())) { + continue; + } + } else { + ASSERT(elseDest != nullptr, "null ptr check"); + if (ifDest == nullptr) { + if (!bb.GetLiveOut()->TestBit(destReg->GetRegisterNumber())) { + continue; + } + ifDest = dest; + } + } + + /* generate cset or csel instruction */ + ASSERT(ifDest != nullptr, "null ptr check"); + if (ifDest->IsIntImmediate() && elseDest->IsIntImmediate()) { + GenerateInsnForImm(*branchInsn, *ifDest, *elseDest, *destReg, generateInsn); + } else { + RegOperand *tReg = GenerateRegAndTempInsn(*ifDest, *destReg, generateInsn); + RegOperand *eReg = GenerateRegAndTempInsn(*elseDest, *destReg, generateInsn); + if ((tReg->GetRegisterType() != eReg->GetRegisterType()) || + (tReg->GetRegisterType() != destReg->GetRegisterType())) { + return false; + } + GenerateInsnForReg(*branchInsn, *tReg, *eReg, *destReg, generateInsn); + } + } + + return true; +} + +bool AArch64ICOIfThenElsePattern::CheckHasSameDest(std::vector &lInsn, std::vector &rInsn) const { + for (size_t i = 0; i < lInsn.size(); ++i) { + if (Has2SrcOpndSetInsn(*lInsn[i])) { + bool hasSameDest = false; + for (size_t j = 0; j < rInsn.size(); ++j) { + RegOperand *rDestReg = static_cast(&rInsn[j]->GetOperand(0)); + RegOperand *lDestReg = static_cast(&lInsn[i]->GetOperand(0)); + if (lDestReg->GetRegisterNumber() == rDestReg->GetRegisterNumber()) { + hasSameDest = true; + break; + } + } + if (!hasSameDest) { + return false; + } + } + } + return true; +} + +bool AArch64ICOIfThenElsePattern::CheckModifiedRegister(Insn &insn, std::map> &destSrcMap, std::vector &src, + std::map &dest2InsnMap, Insn **toBeRremovedOutOfCurrBB) const { + /* src was modified in this blcok earlier */ + for (auto srcOpnd : src) { + if (srcOpnd->IsRegister()) { + auto &srcReg = static_cast(*srcOpnd); + for (auto &destSrcPair : destSrcMap) { + ASSERT(destSrcPair.first->IsRegister(), "opnd must be register"); + RegOperand *mapSrcReg = static_cast(destSrcPair.first); + if (mapSrcReg->GetRegisterNumber() == srcReg.GetRegisterNumber()) { + if (*toBeRremovedOutOfCurrBB == nullptr && mapSrcReg->IsVirtualRegister() && + !insn.GetBB()->GetLiveOut()->TestBit(srcReg.GetRegisterNumber())) { + ASSERT(dest2InsnMap.find(mapSrcReg) != dest2InsnMap.end(), "must find"); + *toBeRremovedOutOfCurrBB = dest2InsnMap[mapSrcReg]; + continue; + } + return false; + } + } + } + } + auto &dest = insn.GetOperand(0); + /* dest register was modified earlier in this block */ + ASSERT(dest.IsRegister(), "opnd must be register"); + auto &destReg = static_cast(dest); + for (auto &destSrcPair : destSrcMap) { + ASSERT(destSrcPair.first->IsRegister(), "opnd must be register"); + RegOperand *mapSrcReg = static_cast(destSrcPair.first); + if (mapSrcReg->GetRegisterNumber() == destReg.GetRegisterNumber()) { + return false; + } + } + + /* src register is modified later in this block, will not be processed */ + for (auto srcOpnd : src) { + if (srcOpnd->IsRegister()) { + RegOperand &srcReg = static_cast(*srcOpnd); + if (destReg.IsOfFloatOrSIMDClass() && srcReg.GetRegisterNumber() == RZR) { + return false; + } + for (Insn *tmpInsn = &insn; tmpInsn != nullptr; tmpInsn = tmpInsn->GetNext()) { + Operand *tmpDest = nullptr; + std::vector tmpSrc; + if (IsSetInsn(*tmpInsn, &tmpDest, tmpSrc) && tmpDest->Equals(*srcOpnd)) { + ASSERT(tmpDest->IsRegister(), "opnd must be register"); + RegOperand *tmpDestReg = static_cast(tmpDest); + if (srcReg.GetRegisterNumber() == tmpDestReg->GetRegisterNumber()) { + return false; + } + } + } + } + } + + /* add/sub insn's dest register does not exist in cmp insn. */ + return CheckModifiedInCmpInsn(insn); +} + +bool AArch64ICOIfThenElsePattern::CheckCondMoveBB(BB *bb, std::map> &destSrcMap, + std::vector &destRegs, std::vector &setInsn, Insn **toBeRremovedOutOfCurrBB) const { + std::map dest2InsnMap; // CheckModifiedRegister will ensure that dest is defined only once. + if (bb == nullptr) { + return false; + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction() || insn->IsBranch()) { + continue; + } + Operand *dest = nullptr; + std::vector src; + + if (!IsSetInsn(*insn, &dest, src)) { + return false; + } + ASSERT(dest != nullptr, "null ptr check"); + ASSERT(src.size() != 0, "null ptr check"); + + if (!dest->IsRegister()) { + return false; + } + + for (auto srcOpnd : src) { + if (!(srcOpnd->IsConstImmediate()) && !srcOpnd->IsRegister()) { + return false; + } + } + + if (flagOpnd != nullptr) { + RegOperand *flagReg = static_cast(flagOpnd); + regno_t flagRegNO = flagReg->GetRegisterNumber(); + if (bb->GetLiveOut()->TestBit(flagRegNO)) { + return false; + } + } + + if (!CheckModifiedRegister(*insn, destSrcMap, src, dest2InsnMap, toBeRremovedOutOfCurrBB)) { + return false; + } + + if (insn != *toBeRremovedOutOfCurrBB) { + if (IsExpansionMOperator(*insn)) { + if (*toBeRremovedOutOfCurrBB == nullptr && static_cast(dest)->IsVirtualRegister()) { + *toBeRremovedOutOfCurrBB = insn; + } else { + return false; + } + } + } + + (void)destSrcMap.insert(std::make_pair(dest, src)); + destRegs.emplace_back(dest); + (void)setInsn.emplace_back(insn); + dest2InsnMap[dest] = insn; + } + return true; +} + +bool AArch64ICOIfThenElsePattern::CheckModifiedInCmpInsn(const Insn &insn) const { + /* add/sub insn's dest register does not exist in cmp insn. */ + if (Has2SrcOpndSetInsn(insn)) { + RegOperand &insnDestReg = static_cast(insn.GetOperand(0)); + if (flagOpnd) { + RegOperand &cmpReg = static_cast(cmpInsn->GetOperand(0)); + if (insnDestReg.GetRegisterNumber() == cmpReg.GetRegisterNumber()) { + return false; + } + } else { + RegOperand &cmpReg1 = static_cast(cmpInsn->GetOperand(1)); + if (cmpInsn->GetOperand(2).IsRegister()) { + RegOperand &cmpReg2 = static_cast(cmpInsn->GetOperand(2)); + if (insnDestReg.GetRegisterNumber() == cmpReg1.GetRegisterNumber() || + insnDestReg.GetRegisterNumber() == cmpReg2.GetRegisterNumber()) { + return false; + } + } else { + if (insnDestReg.GetRegisterNumber() == cmpReg1.GetRegisterNumber()) { + return false; + } + } + } + } + return true; +} + +// If the first insn of if/else bb has the same machine opcode and src opnds, put the insn in cmpBB, do opt like: +// cmp BB: cmp BB: +// lsr w1, w0, #1 (change) +// cmp w5, #1 cmp w5, #1 +// beq beq +// if bb: if bb: +// lsr w1, w0, #1 (change) => +// mov w3, w1 mov w3, w1 +// else bb: else bb: +// lsr w2, w0, #1 (change) +// mov w4, w2 mov w4, w1 (change) +bool AArch64ICOIfThenElsePattern::DoHostBeforeDoCselOpt(BB &ifBB, BB &elseBB) { + auto firstInsnOfIfBB = ifBB.GetFirstMachineInsn(); + auto firstInsnOfElseBB = elseBB.GetFirstMachineInsn(); + if (firstInsnOfIfBB == nullptr || firstInsnOfElseBB == nullptr) { + return false; + } + if (!IsSetInsnMOperator(*firstInsnOfIfBB) || + firstInsnOfIfBB->GetMachineOpcode() != firstInsnOfElseBB->GetMachineOpcode() || + firstInsnOfIfBB->GetOperandSize() != firstInsnOfElseBB->GetOperandSize()) { + return false; + } + if (!firstInsnOfIfBB->GetOperand(0).IsRegister() || !firstInsnOfElseBB->GetOperand(0).IsRegister()) { + return false; + } + auto *destOpndOfInsnInIfBB = static_cast(&firstInsnOfIfBB->GetOperand(0)); + auto *destOpndOfInsnInElseBB = static_cast(&firstInsnOfElseBB->GetOperand(0)); + if (!destOpndOfInsnInIfBB->IsVirtualRegister() || !destOpndOfInsnInElseBB->IsVirtualRegister()) { + return false; + } + for (uint32 i = 1; i < firstInsnOfIfBB->GetOperandSize(); ++i) { + auto *opndOfInsnInIfBB = &firstInsnOfIfBB->GetOperand(i); + auto *opndOfInsnInElseBB = &firstInsnOfElseBB->GetOperand(i); + if (opndOfInsnInIfBB->IsRegister() && static_cast(opndOfInsnInIfBB)->Equals(*opndOfInsnInElseBB)) { + continue; + } + if (opndOfInsnInIfBB->GetKind() == Operand::kOpdImmediate && + static_cast(opndOfInsnInIfBB)->Equals(*opndOfInsnInElseBB)) { + continue; + } + return false; + } + if (ifBB.GetLiveOut()->TestBit(destOpndOfInsnInIfBB->GetRegisterNumber()) || + ifBB.GetLiveOut()->TestBit(destOpndOfInsnInElseBB->GetRegisterNumber())) { + return false; + } + FOR_BB_INSNS(insn, &elseBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + auto *opnd = &insn->GetOperand(i); + switch (opnd->GetKind()) { + case Operand::kOpdRegister: { + if (destOpndOfInsnInElseBB->Equals(*opnd)) { + insn->SetOperand(i, *destOpndOfInsnInIfBB); + } + break; + } + case Operand::kOpdMem: { + auto *memOpnd = static_cast(opnd); + RegOperand *base = memOpnd->GetBaseRegister(); + RegOperand *index = memOpnd->GetIndexRegister(); + if (base != nullptr && destOpndOfInsnInElseBB->Equals(*base)) { + memOpnd->SetBaseRegister(*destOpndOfInsnInIfBB); + } + if (index != nullptr && destOpndOfInsnInElseBB->Equals(*index)) { + memOpnd->SetIndexRegister(*destOpndOfInsnInIfBB); + } + break; + } + case Operand::kOpdList: { + auto *listOpnd = static_cast(opnd); + auto& opndList = listOpnd->GetOperands(); + std::list tempList; + while (!opndList.empty()) { + auto* op = opndList.front(); + opndList.pop_front(); + if (destOpndOfInsnInElseBB->Equals(*op)) { + tempList.push_back(destOpndOfInsnInElseBB); + } else { + tempList.push_back(op); + } + } + listOpnd->GetOperands().assign(tempList.begin(), tempList.end()); + break; + } + default: + break; + } + } + } + ifBB.RemoveInsn(*firstInsnOfIfBB); + elseBB.RemoveInsn(*firstInsnOfElseBB); + (void)cmpBB->InsertInsnBefore(*cmpInsn, *firstInsnOfIfBB); + return true; +} + +void AArch64ICOIfThenElsePattern::UpdateTemps(std::vector &destRegs, std::vector &setInsn, + std::map> &destSrcMap, const Insn &oldInsn, Insn *newInsn) { + for (auto it = setInsn.begin(); it != setInsn.end(); ++it) { + if (*it == &oldInsn) { + (void)setInsn.erase(it); + break; + } + } + if (newInsn != nullptr) { + setInsn.push_back(newInsn); + return; + } + auto &opnd = oldInsn.GetOperand(0); + if (!opnd.IsRegister()) { + return; + } + for (auto it = destRegs.begin(); it != destRegs.end(); ++it) { + if (opnd.Equals(**it)) { + (void)destRegs.erase(it); + break; + } + } + + for (auto it = destSrcMap.begin(); it != destSrcMap.end(); ++it) { + if (opnd.Equals(*(it->first))) { + (void)destSrcMap.erase(it); + break; + } + } +} + +void AArch64ICOIfThenElsePattern::RevertMoveInsns(BB *bb, Insn *prevInsnInBB, Insn *newInsnOfBB, + Insn *insnInBBToBeRremovedOutOfCurrBB) { + if (bb == nullptr || insnInBBToBeRremovedOutOfCurrBB == nullptr) { + return; + } + if (newInsnOfBB != nullptr) { + bb->RemoveInsn(*newInsnOfBB); + } + cmpBB->RemoveInsn(*insnInBBToBeRremovedOutOfCurrBB); + if (prevInsnInBB != nullptr) { + (void)bb->InsertInsnAfter(*prevInsnInBB, *insnInBBToBeRremovedOutOfCurrBB); + } else { + bb->InsertInsnBegin(*insnInBBToBeRremovedOutOfCurrBB); + } +} + +Insn *AArch64ICOIfThenElsePattern::MoveSetInsn2CmpBB(Insn &toBeRremoved2CmpBB, BB &currBB, + std::vector &anotherBranchDestRegs, std::map> &destSrcMap) { + Insn *newInsn = nullptr; + bool findInAnotherBB = false; + for (auto *tempReg: anotherBranchDestRegs) { + if (static_cast(tempReg)->Equals(static_cast(toBeRremoved2CmpBB.GetOperand(0)))) { + findInAnotherBB = true; + } + } + if (findInAnotherBB) { + // If the target register w0 is both the target register in the if and else branches, do opt like: + // cmpBB: cmpBB: + // uxth w2, w1 (change) + // cmp w5, #1 cmp w5, #1 + // beq beq + // + // if bb: if bb: + // eor w0, w3, w4 => eor w0, w3, w4 + // + // else bb: else bb: + // uxth w0, w1 mov w0, w2 (change) + auto &oldDestReg = static_cast(static_cast(toBeRremoved2CmpBB.GetOperand(0))); + ASSERT(oldDestReg.IsVirtualRegister(), "must be vreg"); + auto &newDestReg = cgFunc->CreateVirtualRegisterOperand( + cgFunc->NewVReg(oldDestReg.GetRegisterType(), oldDestReg.GetSize())); + toBeRremoved2CmpBB.SetOperand(0, newDestReg); + uint32 mOp = (oldDestReg.GetSize() == 64) ? MOP_xmovrr : MOP_wmovrr; + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(mOp, oldDestReg, newDestReg)); + (void)currBB.InsertInsnBefore(toBeRremoved2CmpBB, *newInsn); + currBB.RemoveInsn(toBeRremoved2CmpBB); + (void)cmpBB->InsertInsnBefore(*cmpInsn, toBeRremoved2CmpBB); + destSrcMap[&oldDestReg].clear(); + destSrcMap[&oldDestReg].push_back(&newDestReg); + } else { + // If the target register w0 is in if or else branch, do opt like: + // cmpBB: cmpBB: + // mov w4, #40961 (change) + // cmp w5, #1 cmp w5, #1 + // beq beq + // + // if bb: if bb: + // mov w4, #40961 (change) + // eor w1, w3, w4 => eor w1, w3, w4 + // + // else bb: else bb: + // uxth w0, w1 uxth w0, w1 + toBeRremoved2CmpBB.GetBB()->RemoveInsn(toBeRremoved2CmpBB); + (void)cmpBB->InsertInsnBefore(*cmpInsn, toBeRremoved2CmpBB); + } + return newInsn; +} + +/* Convert conditional branches into cset/csel instructions */ +bool AArch64ICOIfThenElsePattern::DoOpt(BB *ifBB, BB *elseBB, BB &joinBB) { + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(*cmpBB); + ASSERT(condBr != nullptr, "nullptr check"); + cmpInsn = FindLastCmpInsn(*cmpBB); + flagOpnd = nullptr; + /* for cbnz and cbz institution */ + if (cgFunc->GetTheCFG()->IsCompareAndBranchInsn(*condBr)) { + Operand &opnd0 = condBr->GetOperand(0); + if (opnd0.IsRegister() && static_cast(opnd0).GetRegisterNumber() == RZR) { + return false; + } + cmpInsn = condBr; + flagOpnd = &(opnd0); + } + + /* tbz will not be optimized */ + MOperator mOperator = condBr->GetMachineOpcode(); + if (mOperator == MOP_xtbz || mOperator == MOP_wtbz || mOperator == MOP_xtbnz || mOperator == MOP_wtbnz) { + return false; + } + if (cmpInsn == nullptr) { + return false; + } + if (ifBB != nullptr && elseBB != nullptr) { + while (DoHostBeforeDoCselOpt(*ifBB, *elseBB)) {} + } + + std::vector ifDestRegs; + std::vector ifSetInsn; + std::vector elseDestRegs; + std::vector elseSetInsn; + + std::map> ifDestSrcMap; + std::map> elseDestSrcMap; + + Insn *insnInElseBBToBeRremovedOutOfCurrBB = nullptr; + Insn *insnInIfBBToBeRremovedOutOfCurrBB = nullptr; + + if (!CheckCondMoveBB(elseBB, elseDestSrcMap, elseDestRegs, elseSetInsn, &insnInElseBBToBeRremovedOutOfCurrBB) || + (ifBB != nullptr && !CheckCondMoveBB(ifBB, ifDestSrcMap, ifDestRegs, ifSetInsn, + &insnInIfBBToBeRremovedOutOfCurrBB))) { + return false; + } + + if (!CheckHasSameDest(ifSetInsn, elseSetInsn) || !CheckHasSameDest(elseSetInsn, ifSetInsn)) { + return false; + } + + size_t count = elseDestRegs.size(); + + for (size_t i = 0; i < ifDestRegs.size(); ++i) { + bool foundInElse = false; + for (size_t j = 0; j < elseDestRegs.size(); ++j) { + RegOperand *elseDestReg = static_cast(elseDestRegs[j]); + RegOperand *ifDestReg = static_cast(ifDestRegs[i]); + if (ifDestReg->GetRegisterNumber() == elseDestReg->GetRegisterNumber()) { + if (Has2SrcOpndSetInsn(*ifSetInsn[i]) && Has2SrcOpndSetInsn(*elseSetInsn[j])) { + return false; + } + foundInElse = true; + break; + } + } + if (foundInElse) { + continue; + } else { + ++count; + } + } + if (count > kThreshold) { + return false; + } + Insn *newInsnOfIfBB = nullptr; + Insn *newInsnOfElseBB = nullptr; + Insn *prevInsnInIfBB = nullptr; + Insn *prevInsnInElseBB = nullptr; + if (insnInElseBBToBeRremovedOutOfCurrBB != nullptr) { + prevInsnInElseBB = insnInElseBBToBeRremovedOutOfCurrBB->GetPrev(); + ASSERT_NOT_NULL(elseBB); + newInsnOfElseBB = MoveSetInsn2CmpBB( + *insnInElseBBToBeRremovedOutOfCurrBB, *elseBB, ifDestRegs, elseDestSrcMap); + UpdateTemps(elseDestRegs, elseSetInsn, elseDestSrcMap, *insnInElseBBToBeRremovedOutOfCurrBB, newInsnOfElseBB); + } + if (insnInIfBBToBeRremovedOutOfCurrBB != nullptr) { + prevInsnInIfBB = insnInIfBBToBeRremovedOutOfCurrBB->GetPrev(); + ASSERT_NOT_NULL(ifBB); + newInsnOfIfBB = MoveSetInsn2CmpBB( + *insnInIfBBToBeRremovedOutOfCurrBB, *ifBB, elseDestRegs, ifDestSrcMap); + UpdateTemps(ifDestRegs, ifSetInsn, ifDestSrcMap, *insnInIfBBToBeRremovedOutOfCurrBB, newInsnOfIfBB); + } + + /* generate insns */ + std::vector elseGenerateInsn; + std::vector ifGenerateInsn; + bool elseBBProcessResult = false; + if (elseBB != nullptr) { + elseBBProcessResult = BuildCondMovInsn(*elseBB, ifDestSrcMap, elseDestSrcMap, false, elseGenerateInsn); + } + bool ifBBProcessResult = false; + if (ifBB != nullptr) { + ifBBProcessResult = BuildCondMovInsn(*ifBB, ifDestSrcMap, elseDestSrcMap, true, ifGenerateInsn); + } + if (!elseBBProcessResult || (ifBB != nullptr && !ifBBProcessResult)) { + RevertMoveInsns(elseBB, prevInsnInElseBB, newInsnOfElseBB, insnInElseBBToBeRremovedOutOfCurrBB); + RevertMoveInsns(ifBB, prevInsnInIfBB, newInsnOfIfBB, insnInIfBBToBeRremovedOutOfCurrBB); + return false; + } + + /* insert insn */ + if (cgFunc->GetTheCFG()->IsCompareAndBranchInsn(*condBr)) { + Insn *innerCmpInsn = BuildCmpInsn(*condBr); + cmpBB->InsertInsnBefore(*cmpInsn, *innerCmpInsn); + cmpInsn = innerCmpInsn; + } + + if (elseBB != nullptr) { + cmpBB->SetKind(elseBB->GetKind()); + } else { + ASSERT(ifBB != nullptr, "ifBB should not be nullptr"); + cmpBB->SetKind(ifBB->GetKind()); + } + + for (auto setInsn : ifSetInsn) { + if (Has2SrcOpndSetInsn(*setInsn)) { + (void)cmpBB->InsertInsnBefore(*cmpInsn, *setInsn); + } + } + + for (auto setInsn : elseSetInsn) { + if (Has2SrcOpndSetInsn(*setInsn)) { + (void)cmpBB->InsertInsnBefore(*cmpInsn, *setInsn); + } + } + + /* delete condBr */ + cmpBB->RemoveInsn(*condBr); + /* Insert goto insn after csel insn. */ + if (cmpBB->GetKind() == BB::kBBGoto || cmpBB->GetKind() == BB::kBBIf) { + if (elseBB != nullptr) { + (void)cmpBB->InsertInsnAfter(*cmpBB->GetLastInsn(), *elseBB->GetLastInsn()); + } else { + ASSERT(ifBB != nullptr, "ifBB should not be nullptr"); + (void)cmpBB->InsertInsnAfter(*cmpBB->GetLastInsn(), *ifBB->GetLastInsn()); + } + } + + /* Insert instructions in branches after cmpInsn */ + for (auto itr = elseGenerateInsn.crbegin(); itr != elseGenerateInsn.crend(); ++itr) { + (void)cmpBB->InsertInsnAfter(*cmpInsn, **itr); + } + for (auto itr = ifGenerateInsn.crbegin(); itr != ifGenerateInsn.crend(); ++itr) { + (void)cmpBB->InsertInsnAfter(*cmpInsn, **itr); + } + + /* Remove branches and merge join */ + if (ifBB != nullptr) { + BB *prevLast = ifBB->GetPrev(); + cgFunc->GetTheCFG()->RemoveBB(*ifBB); + if (ifBB->GetId() == cgFunc->GetLastBB()->GetId()) { + cgFunc->SetLastBB(*prevLast); + } + } + if (elseBB != nullptr) { + BB *prevLast = elseBB->GetPrev(); + cgFunc->GetTheCFG()->RemoveBB(*elseBB); + if (elseBB->GetId() == cgFunc->GetLastBB()->GetId()) { + cgFunc->SetLastBB(*prevLast); + } + } + /* maintain won't exit bb info. */ + if ((ifBB != nullptr && ifBB->IsWontExit()) || (elseBB != nullptr && elseBB->IsWontExit())) { + cgFunc->GetCommonExitBB()->PushBackPreds(*cmpBB); + } + + if (cmpBB->GetKind() != BB::kBBIf && cmpBB->GetNext() == &joinBB && + !maplebe::CGCFG::InLSDA(joinBB.GetLabIdx(), cgFunc->GetEHFunc()) && + cgFunc->GetTheCFG()->CanMerge(*cmpBB, joinBB)) { + maplebe::CGCFG::MergeBB(*cmpBB, joinBB, *cgFunc); + keepPosition = true; + } + return true; +} + +/* + * Find IF-THEN-ELSE or IF-THEN basic block pattern, + * and then invoke DoOpt(...) to finish optimize. + */ +bool AArch64ICOIfThenElsePattern::Optimize(BB &curBB) { + if (curBB.GetKind() != BB::kBBIf) { + return false; + } + BB *ifBB = nullptr; + BB *elseBB = nullptr; + BB *joinBB = nullptr; + + BB *thenDest = CGCFG::GetTargetSuc(curBB); + BB *elseDest = curBB.GetNext(); + CHECK_FATAL(thenDest != nullptr, "then_dest is null in ITEPattern::Optimize"); + CHECK_FATAL(elseDest != nullptr, "else_dest is null in ITEPattern::Optimize"); + /* IF-THEN-ELSE */ + if (thenDest->NumPreds() == 1 && thenDest->NumSuccs() == 1 && elseDest->NumSuccs() == 1 && + elseDest->NumPreds() == 1 && thenDest->GetSuccs().front() == elseDest->GetSuccs().front()) { + ifBB = thenDest; + elseBB = elseDest; + joinBB = thenDest->GetSuccs().front(); + } else if (elseDest->NumPreds() == 1 && elseDest->NumSuccs() == 1 && elseDest->GetSuccs().front() == thenDest) { + /* IF-THEN */ + ifBB = nullptr; + elseBB = elseDest; + joinBB = thenDest; + } else { + /* not a form we can handle */ + return false; + } + ASSERT(elseBB != nullptr, "elseBB should not be nullptr"); + if (CGCFG::InLSDA(elseBB->GetLabIdx(), cgFunc->GetEHFunc()) || + CGCFG::InSwitchTable(elseBB->GetLabIdx(), *cgFunc)) { + return false; + } + + if (ifBB != nullptr && + (CGCFG::InLSDA(ifBB->GetLabIdx(), cgFunc->GetEHFunc()) || + CGCFG::InSwitchTable(ifBB->GetLabIdx(), *cgFunc))) { + return false; + } + cmpBB = &curBB; + return DoOpt(ifBB, elseBB, *joinBB); +} + +/* If( cmp || cmp ) then + * or + * If( cmp && cmp ) then */ +bool AArch64ICOSameCondPattern::Optimize(BB &secondIfBB) { + if (secondIfBB.GetKind() != BB::kBBIf || secondIfBB.NumPreds() != 1) { + return false; + } + BB *firstIfBB = secondIfBB.GetPrev(); + BB *nextBB = firstIfBB->GetNext(); + CHECK_FATAL(nextBB != nullptr, "nextBB is null in AArch64ICOSameCondPattern::Optimize"); + /* firstIfBB's nextBB is secondIfBB */ + if (firstIfBB == nullptr || firstIfBB->GetKind() != BB::kBBIf || nextBB->GetId() != secondIfBB.GetId()) { + return false; + } + return DoOpt(firstIfBB, secondIfBB); +} + +bool AArch64ICOPattern::CheckMop(MOperator mOperator) const { + switch (mOperator) { + case MOP_beq: + case MOP_bne: + case MOP_blt: + case MOP_ble: + case MOP_bgt: + case MOP_bge: + case MOP_blo: + case MOP_bls: + case MOP_bhs: + case MOP_bhi: + case MOP_bpl: + case MOP_bmi: + case MOP_bvc: + case MOP_bvs: + return true; + default: + return false; + } +} + +/* branchInsn1 is firstIfBB's LastCondBrInsn + * branchInsn2 is secondIfBB's LastCondBrInsn + * + * Limitations: branchInsn1 is the same as branchInsn2 + * */ +bool AArch64ICOSameCondPattern::DoOpt(BB *firstIfBB, BB &secondIfBB) const { + Insn *branchInsn1 = cgFunc->GetTheCFG()->FindLastCondBrInsn(*firstIfBB); + ASSERT(branchInsn1 != nullptr, "nullptr check"); + Insn *cmpInsn1 = FindLastCmpInsn(*firstIfBB); + MOperator mOperator1 = branchInsn1->GetMachineOpcode(); + Insn *branchInsn2 = cgFunc->GetTheCFG()->FindLastCondBrInsn(secondIfBB); + ASSERT(branchInsn2 != nullptr, "nullptr check"); + Insn *cmpInsn2 = FindLastCmpInsn(secondIfBB); + MOperator mOperator2 = branchInsn2->GetMachineOpcode(); + if (cmpInsn1 == nullptr || cmpInsn2 == nullptr) { + return false; + } + + /* tbz and cbz will not be optimized */ + if (mOperator1 != mOperator2 || !CheckMop(mOperator1)) { + return false; + } + + /* two BB has same branch */ + std::vector labelOpnd1 = GetLabelOpnds(*branchInsn1); + std::vector labelOpnd2 = GetLabelOpnds(*branchInsn2); + if (labelOpnd1.size() != 1 || labelOpnd1.size() != 1 || + labelOpnd1[0]->GetLabelIndex() != labelOpnd2[0]->GetLabelIndex()) { + return false; + } + + /* secondifBB only has branchInsn and cmpInsn */ + FOR_BB_INSNS_REV(insn, &secondIfBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn != branchInsn2 && insn != cmpInsn2) { + return false; + } + } + + /* build ccmp Insn */ + ConditionCode ccCode = Encode(branchInsn1->GetMachineOpcode(), true); + ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + Insn *ccmpInsn = BuildCcmpInsn(ccCode, cmpInsn2); + if (ccmpInsn == nullptr) { + return false; + } + + /* insert ccmp Insn */ + firstIfBB->InsertInsnBefore(*branchInsn1, *ccmpInsn); + + /* Remove secondIfBB */ + BB *nextBB = secondIfBB.GetNext(); + cgFunc->GetTheCFG()->RemoveBB(secondIfBB); + firstIfBB->PushFrontSuccs(*nextBB); + nextBB->PushFrontPreds(*firstIfBB); + return true; +} +/* + * find the preds all is ifBB + */ +bool AArch64ICOMorePredsPattern::Optimize(BB &curBB) { + if (curBB.GetKind() != BB::kBBGoto) { + return false; + } + for (BB *preBB : curBB.GetPreds()) { + if (preBB->GetKind() != BB::kBBIf) { + return false; + } + } + for (BB *succsBB : curBB.GetSuccs()) { + if (succsBB->GetKind() != BB::kBBFallthru) { + return false; + } + if (succsBB->NumPreds() > 2) { + return false; + } + } + Insn *gotoBr = curBB.GetLastMachineInsn(); + ASSERT(gotoBr != nullptr, "gotoBr should not be nullptr"); + auto &gotoLabel = static_cast(gotoBr->GetOperand(gotoBr->GetOperandSize() - 1)); + for (BB *preBB : curBB.GetPreds()) { + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(*preBB); + ASSERT(condBr != nullptr, "nullptr check"); + Operand &condBrLastOpnd = condBr->GetOperand(condBr->GetOperandSize() - 1); + ASSERT(condBrLastOpnd.IsLabelOpnd(), "label Operand must be exist in branch insn"); + auto &labelOpnd = static_cast(condBrLastOpnd); + if (labelOpnd.GetLabelIndex() != curBB.GetLabIdx()) { + return false; + } + if (gotoLabel.GetLabelIndex() != preBB->GetNext()->GetLabIdx()) { + /* do not if convert if 'else' clause present */ + return false; + } + } + return DoOpt(curBB); +} + +/* this BBGoto only has mov Insn and Branch */ +bool AArch64ICOMorePredsPattern::CheckGotoBB(BB &gotoBB, std::vector &movInsn) const { + FOR_BB_INSNS(insn, &gotoBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsMove()) { + movInsn.push_back(insn); + continue; + } + if (insn->GetId() != gotoBB.GetLastInsn()->GetId()) { + return false; + } else if (!insn->IsBranch()) { /* last Insn is Branch */ + return false; + } + } + return true; +} + +/* this BBGoto only has mov Insn */ +bool AArch64ICOMorePredsPattern::MovToCsel(std::vector &movInsn, std::vector &cselInsn, + const Insn &branchInsn) const { + Operand &branchOpnd0 = branchInsn.GetOperand(kInsnFirstOpnd); + regno_t branchRegNo; + if (branchOpnd0.IsRegister()) { + branchRegNo = static_cast(branchOpnd0).GetRegisterNumber(); + } + for (Insn *insn:movInsn) { + /* use mov build csel */ + Operand &opnd0 = insn->GetOperand(kInsnFirstOpnd); + Operand &opnd1 = insn->GetOperand(kInsnSecondOpnd); + ConditionCode ccCode = AArch64ICOPattern::Encode(branchInsn.GetMachineOpcode(), false); + ASSERT(ccCode != kCcLast, "unknown cond, ccCode can't be kCcLast"); + CondOperand &cond = static_cast(cgFunc)->GetCondOperand(ccCode); + Operand &rflag = static_cast(cgFunc)->GetOrCreateRflag(); + RegOperand ®Opnd0 = static_cast(opnd0); + RegOperand ®Opnd1 = static_cast(opnd1); + /* movInsn's opnd1 is Immediate */ + if (opnd1.IsImmediate()) { + return false; + } + /* opnd0 and opnd1 hsa same type and size */ + if (regOpnd0.GetSize() != regOpnd1.GetSize() || (regOpnd0.IsOfIntClass() != regOpnd1.IsOfIntClass())) { + return false; + } + /* The branchOpnd0 cannot be modified for csel. */ + regno_t movRegNo0 = static_cast(opnd0).GetRegisterNumber(); + if (branchOpnd0.IsRegister() && branchRegNo == movRegNo0) { + return false; + } + uint32 dSize = regOpnd0.GetSize(); + bool isIntTy = regOpnd0.IsOfIntClass(); + MOperator mOpCode = isIntTy ? (dSize == k64BitSize ? MOP_xcselrrrc : MOP_wcselrrrc) + : (dSize == k64BitSize ? MOP_dcselrrrc : (dSize == k32BitSize ? + MOP_scselrrrc : MOP_hcselrrrc)); + (void)cselInsn.emplace_back(&cgFunc->GetInsnBuilder()->BuildInsn(mOpCode, opnd0, opnd1, opnd0, cond, rflag)); + } + if (cselInsn.size() < 1) { + return false; + } + return true; +} + +bool AArch64ICOMorePredsPattern::DoOpt(BB &gotoBB) const { + std::vector movInsn; + std::vector> presCselInsn; + std::vector presBB; + Insn *branchInsn = gotoBB.GetLastMachineInsn(); + if (branchInsn == nullptr || !branchInsn->IsUnCondBranch()) { + return false; + } + /* get preds's new label */ + std::vector labelOpnd = GetLabelOpnds(*branchInsn); + if (labelOpnd.size() != 1) { + return false; + } + if (!CheckGotoBB(gotoBB, movInsn)) { + return false; + } + /* Check all preBB, Exclude gotoBBs that cannot be optimized. */ + for (BB *preBB : gotoBB.GetPreds()) { + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(*preBB); + ASSERT(condBr != nullptr, "nullptr check"); + + /* tbz/cbz will not be optimized */ + MOperator mOperator = condBr->GetMachineOpcode(); + if (!CheckMop(mOperator)) { + return false; + } + std::vector cselInsn; + if (!MovToCsel(movInsn, cselInsn, *condBr)) { + return false; + } + if (cselInsn.size() < 1) { + return false; + } + presCselInsn.emplace_back(cselInsn); + presBB.emplace_back(preBB); + } + /* modifies presBB */ + for (size_t i = 0; i < presCselInsn.size(); ++i) { + BB *preBB = presBB[i]; + Insn *condBr = cgFunc->GetTheCFG()->FindLastCondBrInsn(*preBB); + std::vector cselInsn = presCselInsn[i]; + /* insert csel insn */ + for (Insn *csel : cselInsn) { + preBB->InsertInsnBefore(*condBr, *csel); + } + /* new condBr */ + condBr->SetOperand(condBr->GetOperandSize() - 1, *labelOpnd[0]); + } + /* Remove branches and merge gotoBB */ + cgFunc->GetTheCFG()->RemoveBB(gotoBB); + return true; +} + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp new file mode 100644 index 0000000000000000000000000000000000000000..390de992738e9db28ef86e0c4d2b027a56f37858 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_insn.cpp @@ -0,0 +1,603 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_insn.h" +#include "aarch64_cg.h" +#include "common_utils.h" +#include "insn.h" +#include "metadata_layout.h" +#include + +namespace maplebe { + +void A64OpndEmitVisitor::EmitIntReg(const RegOperand &v, int32 opndSz) { + CHECK_FATAL(v.GetRegisterType() == kRegTyInt, "wrong Type"); + int32 opndSize = (opndSz == kMaxSimm32) ? static_cast(v.GetSize()) : opndSz; + ASSERT((opndSize == k32BitSizeInt || opndSize == k64BitSizeInt), "illegal register size"); +#ifdef USE_32BIT_REF + bool r32 = (opndSize == k32BitSizeInt) || isRefField; +#else + bool r32 = (opndSize == k32BitSizeInt); +#endif /* USE_32BIT_REF */ + (void)emitter.Emit(AArch64CG::intRegNames[(r32 ? AArch64CG::kR32List : AArch64CG::kR64List)][v.GetRegisterNumber()]); +} + +void A64OpndEmitVisitor::Visit(maplebe::RegOperand *v) { + ASSERT(opndProp == nullptr || opndProp->IsRegister(), + "operand type doesn't match"); + uint32 size = v->GetSize(); + regno_t regNO = v->GetRegisterNumber(); + uint32 opndSize = (opndProp != nullptr) ? opndProp->GetSize() : size; + switch (v->GetRegisterType()) { + case kRegTyInt: { + EmitIntReg(*v, static_cast(opndSize)); + break; + } + case kRegTyFloat: { + ASSERT((opndSize == k8BitSize || opndSize == k16BitSize || opndSize == k32BitSize || + opndSize == k64BitSize || opndSize == k128BitSize), "illegal register size"); + if (opndProp->IsVectorOperand() && v->GetVecLaneSize() != 0) { + EmitVectorOperand(*v); + } else { + /* FP reg cannot be reffield. 8~0, 16~1, 32~2, 64~3. 8 is 1000b, has 3 zero. */ + int32 regSet = __builtin_ctz(static_cast(opndSize)) - 3; + (void)emitter.Emit(AArch64CG::intRegNames[static_cast(regSet)][regNO]); + } + break; + } + default: + ASSERT(false, "NYI"); + break; + } +} + +void A64OpndEmitVisitor::Visit(maplebe::ImmOperand *v) { + if (v->IsOfstImmediate()) { + Visit(static_cast(v)); + return; + } + + if (v->IsStImmediate()) { + Visit(*v->GetSymbol(), v->GetValue()); + return; + } + + int64 value = v->GetValue(); + bool isNegative = (value < 0); + if (!v->IsFmov()) { + value = (v->GetSize() == k64BitSize ? value : (isNegative ? + static_cast(static_cast(value)) : static_cast(static_cast(value)))); + (void)emitter.Emit((opndProp != nullptr && opndProp->IsLoadLiteral()) ? "=" : "#").Emit(value); + return; + } + if (v->GetKind() == Operand::kOpdFPImmediate) { + CHECK_FATAL(value == 0, "NIY"); + (void)emitter.Emit("#0.0"); + } + /* + * compute float value + * use top 4 bits expect MSB of value . then calculate its fourth power + */ + int32 exp = static_cast((((static_cast(value) & 0x70) >> 4) ^ 0x4) - 3); + /* use the lower four bits of value in this expression */ + const float mantissa = 1.0 + (static_cast(static_cast(value) & 0xf) / 16.0); + float result = static_cast(std::pow(2, exp)) * mantissa; + + std::stringstream ss; + ss << std::setprecision(10) << result; + std::string res; + ss >> res; + size_t dot = res.find('.'); + if (dot == std::string::npos) { + res += ".0"; + dot = res.find('.'); + CHECK_FATAL(dot != std::string::npos, "cannot find in string"); + } + (void)res.erase(dot, 1); + std::string integer(res, 0, 1); + std::string fraction(res, 1); + while (fraction.size() != 1 && fraction[fraction.size() - 1] == '0') { + fraction.pop_back(); + } + /* fetch the sign bit of this value */ + std::string sign = ((static_cast(value) & 0x80) > 0) ? "-" : ""; + (void)emitter.Emit(sign + integer + "." + fraction + "e+").Emit(static_cast(dot) - 1); +} + +void A64OpndEmitVisitor::Visit(maplebe::MemOperand *v) { + auto a64v = static_cast(v); + MemOperand::AArch64AddressingMode addressMode = a64v->GetAddrMode(); +#if DEBUG + const InsnDesc *md = &AArch64CG::kMd[emitter.GetCurrentMOP()]; + bool isLDSTpair = md->IsLoadStorePair(); + ASSERT(md->Is64Bit() || md->GetOperandSize() <= k32BitSize || md->GetOperandSize() == k128BitSize, + "unexpected opnd size"); +#endif + if (addressMode == MemOperand::kAddrModeBOi) { + (void)emitter.Emit("["); + auto *baseReg = v->GetBaseRegister(); + ASSERT(baseReg != nullptr, "expect an RegOperand here"); + uint32 baseSize = baseReg->GetSize(); + if (baseSize != k64BitSize) { + baseReg->SetSize(k64BitSize); + } + EmitIntReg(*baseReg); + baseReg->SetSize(baseSize); + OfstOperand *offset = a64v->GetOffsetImmediate(); + if (offset != nullptr) { +#ifndef USE_32BIT_REF /* can be load a ref here */ + /* + * Cortex-A57 Software Optimization Guide: + * The ARMv8-A architecture allows many types of load and store accesses to be arbitrarily aligned. + * The Cortex- A57 processor handles most unaligned accesses without performance penalties. + */ +#if DEBUG + if (a64v->IsOffsetMisaligned(md->GetOperandSize())) { + INFO(kLncInfo, "The Memory operand's offset is misaligned:", ""); + + } +#endif +#endif /* USE_32BIT_REF */ + if (a64v->IsPostIndexed()) { + ASSERT(!a64v->IsSIMMOffsetOutOfRange(offset->GetOffsetValue(), md->Is64Bit(), isLDSTpair), + "should not be SIMMOffsetOutOfRange"); + (void)emitter.Emit("]"); + if (!offset->IsZero()) { + (void)emitter.Emit(", "); + Visit(offset); + } + } else if (a64v->IsPreIndexed()) { + ASSERT(!a64v->IsSIMMOffsetOutOfRange(offset->GetOffsetValue(), md->Is64Bit(), isLDSTpair), + "should not be SIMMOffsetOutOfRange"); + if (!offset->IsZero()) { + (void)emitter.Emit(","); + Visit(offset); + } + (void)emitter.Emit("]!"); + } else { + if (CGOptions::IsPIC() && (offset->IsSymOffset() || offset->IsSymAndImmOffset()) && + (offset->GetSymbol()->NeedPIC() || offset->GetSymbol()->IsThreadLocal())) { + std::string gotEntry = offset->GetSymbol()->IsThreadLocal() ? ", #:tlsdesc_lo12:" : ", #:got_lo12:"; + std::string symbolName = offset->GetSymbolName(); + symbolName += offset->GetSymbol()->GetStorageClass() == kScPstatic && !offset->GetSymbol()->IsConst() ? + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx()) : ""; + (void)emitter.Emit(gotEntry + symbolName); + } else { + if (!offset->IsZero()) { + (void)emitter.Emit(","); + Visit(offset); + } + } + (void)emitter.Emit("]"); + } + } else { + (void)emitter.Emit("]"); + } + } else if (addressMode == MemOperand::kAddrModeBOrX) { + /* + * Base plus offset | [base{, #imm}] [base, Xm{, LSL #imm}] [base, Wm, (S|U)XTW {#imm}] + * offset_opnds=nullptr + * offset_opnds=64 offset_opnds=32 + * imm=0 or 3 imm=0 or 2, s/u + */ + (void)emitter.Emit("["); + auto *baseReg = v->GetBaseRegister(); + // After ssa version support different size, the value is changed back + baseReg->SetSize(k64BitSize); + + EmitIntReg(*baseReg); + (void)emitter.Emit(","); + EmitIntReg(*a64v->GetIndexRegister()); + if (a64v->ShouldEmitExtend() || v->GetBaseRegister()->GetSize() > a64v->GetIndexRegister()->GetSize()) { + (void)emitter.Emit(","); + /* extend, #0, of #3/#2 */ + (void)emitter.Emit(a64v->GetExtendAsString()); + if (a64v->GetExtendAsString() == "LSL" || a64v->ShiftAmount() != 0) { + (void)emitter.Emit(" #"); + (void)emitter.Emit(a64v->ShiftAmount()); + } + } + (void)emitter.Emit("]"); + } else if (addressMode == MemOperand::kAddrModeLiteral) { + CHECK_FATAL(opndProp != nullptr, "prop is nullptr in MemOperand::Emit"); + if (opndProp->IsMemLow12()) { + (void)emitter.Emit("#:lo12:"); + } + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + (void)emitter.Emit(v->GetSymbol()->GetName() + std::to_string(pIdx)); + } else if (addressMode == MemOperand::kAddrModeLo12Li) { + (void)emitter.Emit("["); + EmitIntReg(*v->GetBaseRegister()); + + OfstOperand *offset = a64v->GetOffsetImmediate(); + ASSERT(offset != nullptr, "nullptr check"); + + (void)emitter.Emit(", #:lo12:"); + if (v->GetSymbol()->GetAsmAttr() != UStrIdx(0) && + (v->GetSymbol()->GetStorageClass() == kScPstatic || v->GetSymbol()->GetStorageClass() == kScPstatic)) { + std::string asmSection = GlobalTables::GetUStrTable().GetStringFromStrIdx(v->GetSymbol()->GetAsmAttr()); + (void)emitter.Emit(asmSection); + } else { + if (v->GetSymbol()->GetStorageClass() == kScPstatic && v->GetSymbol()->IsLocal()) { + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + (void)emitter.Emit(a64v->GetSymbolName() + std::to_string(pIdx)); + } else { + (void)emitter.Emit(a64v->GetSymbolName()); + } + } + if (!offset->IsZero()) { + (void)emitter.Emit("+"); + (void)emitter.Emit(std::to_string(offset->GetOffsetValue())); + } + (void)emitter.Emit("]"); + } else { + ASSERT(false, "nyi"); + } +} + +void A64OpndEmitVisitor::Visit(LabelOperand *v) { + emitter.EmitLabelRef(v->GetLabelIndex()); +} + +void A64OpndEmitVisitor::Visit(CondOperand *v) { + (void)emitter.Emit(CondOperand::ccStrs[v->GetCode()]); +} + +void A64OpndEmitVisitor::Visit(ExtendShiftOperand *v) { + ASSERT(v->GetShiftAmount() <= k4BitSize && v->GetShiftAmount() >= 0, + "shift amount out of range in ExtendShiftOperand"); + auto emitExtendShift = [this, v](const std::string &extendKind)->void { + (void)emitter.Emit(extendKind); + if (v->GetShiftAmount() != 0) { + (void)emitter.Emit(" #").Emit(v->GetShiftAmount()); + } + }; + switch (v->GetExtendOp()) { + case ExtendShiftOperand::kUXTB: + emitExtendShift("UXTB"); + break; + case ExtendShiftOperand::kUXTH: + emitExtendShift("UXTH"); + break; + case ExtendShiftOperand::kUXTW: + emitExtendShift("UXTW"); + break; + case ExtendShiftOperand::kUXTX: + emitExtendShift("UXTX"); + break; + case ExtendShiftOperand::kSXTB: + emitExtendShift("SXTB"); + break; + case ExtendShiftOperand::kSXTH: + emitExtendShift("SXTH"); + break; + case ExtendShiftOperand::kSXTW: + emitExtendShift("SXTW"); + break; + case ExtendShiftOperand::kSXTX: + emitExtendShift("SXTX"); + break; + default: + ASSERT(false, "should not be here"); + break; + } +} + +void A64OpndEmitVisitor::Visit(BitShiftOperand *v) { + std::string shiftOp; + switch (v->GetShiftOp()) { + case BitShiftOperand::kLSL: + shiftOp = "LSL #"; + break; + case BitShiftOperand::kLSR: + shiftOp = "LSR #"; + break; + case BitShiftOperand::kASR: + shiftOp = "ASR #"; + break; + case BitShiftOperand::kROR: + shiftOp = "ROR #"; + break; + default: + CHECK_FATAL(false, "check shiftOp"); + } + (void)emitter.Emit(shiftOp).Emit(v->GetShiftAmount()); +} + +void A64OpndEmitVisitor::Visit(StImmOperand *v) { + Visit(*v->GetSymbol(), v->GetOffset()); +} + +void A64OpndEmitVisitor::Visit(const MIRSymbol &symbol, int64 offset) { + CHECK_FATAL(opndProp != nullptr, "opndProp is nullptr in StImmOperand::Emit"); + const bool isThreadLocal = symbol.IsThreadLocal(); + const bool isLiteralLow12 = opndProp->IsLiteralLow12(); + const bool hasGotEntry = CGOptions::IsPIC() && symbol.NeedPIC(); + bool hasPrefix = false; + if (isThreadLocal) { + (void)emitter.Emit(":tlsdesc"); + hasPrefix = true; + } + if (!hasPrefix && hasGotEntry) { + (void)emitter.Emit(":got"); + hasPrefix = true; + } + if (isLiteralLow12) { + std::string lo12String = hasPrefix ? "_lo12" : ":lo12"; + (void)emitter.Emit(lo12String); + hasPrefix = true; + } + if (hasPrefix) { + (void)emitter.Emit(":"); + } + if (symbol.GetAsmAttr() != UStrIdx(0) && + (symbol.GetStorageClass() == kScPstatic || symbol.GetStorageClass() == kScPstatic)) { + std::string asmSection = GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol.GetAsmAttr()); + (void)emitter.Emit(asmSection); + } else { + if (symbol.GetStorageClass() == kScPstatic && symbol.GetSKind() != kStConst) { + (void)emitter.Emit(symbol.GetName() + + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx())); + } else { + (void)emitter.Emit(symbol.GetName()); + } + } + if (!hasGotEntry && offset != 0) { + (void)emitter.Emit("+" + std::to_string(offset)); + } +} + +void A64OpndEmitVisitor::Visit(FuncNameOperand *v) { + (void)emitter.Emit(v->GetName()); +} + +void A64OpndEmitVisitor::Visit(CommentOperand *v) { + (void)emitter.Emit(v->GetComment()); +} + +void A64OpndEmitVisitor::Visit(ListOperand *v) { + (void)opndProp; + size_t nLeft = v->GetOperands().size(); + if (nLeft == 0) { + return; + } + + for (auto it = v->GetOperands().cbegin(); it != v->GetOperands().cend(); ++it) { + Visit(*it); + if (--nLeft >= 1) { + (void)emitter.Emit(", "); + } + } +} + +void A64OpndEmitVisitor::Visit(OfstOperand *v) { + int64 value = v->GetValue(); + if (v->IsImmOffset()) { + (void)emitter.Emit((opndProp != nullptr && opndProp->IsLoadLiteral()) ? "=" : "#") + .Emit((v->GetSize() == k64BitSize) ? value : static_cast(static_cast(value))); + return; + } + const MIRSymbol *symbol = v->GetSymbol(); + if (CGOptions::IsPIC() && symbol->NeedPIC()) { + (void)emitter.Emit(":got:" + symbol->GetName()); + } else if (symbol->GetStorageClass() == kScPstatic && symbol->GetSKind() != kStConst && symbol->IsLocal()) { + (void)emitter.Emit(symbol->GetName() + + std::to_string(emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx())); + } else { + (void)emitter.Emit(symbol->GetName()); + } + if (value != 0) { + (void)emitter.Emit("+" + std::to_string(value)); + } +} + +void A64OpndEmitVisitor::EmitVectorOperand(const RegOperand &v) { + std::string width; + switch (v.GetVecElementSize()) { + case k8BitSize: + width = "b"; + break; + case k16BitSize: + width = "h"; + break; + case k32BitSize: + width = "s"; + break; + case k64BitSize: + width = "d"; + break; + default: + CHECK_FATAL(false, "unexpected value size for vector element"); + break; + } + (void)emitter.Emit(AArch64CG::vectorRegNames[v.GetRegisterNumber()]); + int32 lanePos = v.GetVecLanePosition(); + if (lanePos == -1) { + (void)emitter.Emit("." + std::to_string(v.GetVecLaneSize()) + width); + } else { + (void)emitter.Emit("." + width + "[" + std::to_string(lanePos) + "]"); + } +} + +void A64OpndDumpVisitor::Visit(RegOperand *v) { + std::array prims = { "U", "R", "V", "C", "X", "Vra" }; + std::array classes = { "[U]", "[I]", "[F]", "[CC]", "[X87]", "[Vra]" }; + uint32 regType = v->GetRegisterType(); + ASSERT(regType < kRegTyLast, "unexpected regType"); + + regno_t reg = v->GetRegisterNumber(); + reg = v->IsVirtualRegister() ? reg : (reg - 1); + LogInfo::MapleLogger() << (v->IsVirtualRegister() ? "vreg:" : " reg:") << prims[regType]; + if (reg + 1 == RSP && v->IsPhysicalRegister()) { + LogInfo::MapleLogger() << "SP"; + } else if (reg + 1 == RZR && v->IsPhysicalRegister()) { + LogInfo::MapleLogger() << "ZR"; + } else { + LogInfo::MapleLogger() << reg; + } + LogInfo::MapleLogger() << " " << classes[regType]; + uint32 vb = v->GetValidBitsNum(); + if (vb != v->GetSize()) { + LogInfo::MapleLogger() << " Vb: [" << vb << "]"; + } + LogInfo::MapleLogger() << " Sz: [" << v->GetSize() << "]" ; +} + +void A64OpndDumpVisitor::Visit(ImmOperand *v) { + if (v->IsStImmediate()) { + LogInfo::MapleLogger() << v->GetName(); + LogInfo::MapleLogger() << "+offset:" << v->GetValue(); + } else { + LogInfo::MapleLogger() << "imm:" << v->GetValue(); + } +} + +void A64OpndDumpVisitor::Visit(MemOperand *a64v) { + LogInfo::MapleLogger() << "Mem:"; + LogInfo::MapleLogger() << " size:" << a64v->GetSize() << " "; + LogInfo::MapleLogger() << " isStack:" << a64v->IsStackMem() << "-" << a64v->IsStackArgMem() << " "; + switch (a64v->GetAddrMode()) { + case MemOperand::kAddrModeBOi: { + LogInfo::MapleLogger() << "base:"; + ASSERT(a64v->GetBaseRegister(), " lack of base register"); + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + if (a64v->GetOffsetOperand()) { + Visit(a64v->GetOffsetOperand()); + } + switch (a64v->GetIndexOpt()) { + case MemOperand::kIntact: + LogInfo::MapleLogger() << " intact"; + break; + case MemOperand::kPreIndex: + LogInfo::MapleLogger() << " pre-index"; + break; + case MemOperand::kPostIndex: + LogInfo::MapleLogger() << " post-index"; + break; + default: + break; + } + break; + } + case MemOperand::kAddrModeBOrX: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + Visit(a64v->GetIndexRegister()); + LogInfo::MapleLogger() << " " << a64v->GetExtendAsString(); + LogInfo::MapleLogger() << " shift: " << a64v->ShiftAmount(); + LogInfo::MapleLogger() << " extend: " << a64v->GetExtendAsString(); + break; + } + case MemOperand::kAddrModeLiteral: + LogInfo::MapleLogger() << "literal: " << a64v->GetSymbolName(); + break; + case MemOperand::kAddrModeLo12Li: { + LogInfo::MapleLogger() << "base:"; + Visit(a64v->GetBaseRegister()); + LogInfo::MapleLogger() << "offset:"; + OfstOperand *offOpnd = a64v->GetOffsetImmediate(); + LogInfo::MapleLogger() << "#:lo12:"; + if (a64v->GetSymbol()->GetStorageClass() == kScPstatic && a64v->GetSymbol()->IsLocal()) { + PUIdx pIdx = CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetPuidx(); + LogInfo::MapleLogger() << a64v->GetSymbolName() << std::to_string(pIdx); + } else { + LogInfo::MapleLogger() << a64v->GetSymbolName(); + } + LogInfo::MapleLogger() << "+" << std::to_string(offOpnd->GetOffsetValue()); + break; + } + default: + ASSERT(false, "error memoperand dump"); + break; + } +} + +void A64OpndDumpVisitor::Visit(CondOperand *v) { + LogInfo::MapleLogger() << "CC: " << CondOperand::ccStrs[v->GetCode()]; +} +void A64OpndDumpVisitor::Visit(StImmOperand *v) { + LogInfo::MapleLogger() << v->GetName(); + LogInfo::MapleLogger() << "+offset:" << v->GetOffset(); +} +void A64OpndDumpVisitor::Visit(BitShiftOperand *v) { + BitShiftOperand::ShiftOp shiftOp = v->GetShiftOp(); + uint32 shiftAmount = v->GetShiftAmount(); + LogInfo::MapleLogger() << ((shiftOp == BitShiftOperand::kLSL) ? "LSL: " : + ((shiftOp == BitShiftOperand::kLSR) ? "LSR: " : "ASR: ")); + LogInfo::MapleLogger() << shiftAmount; +} +void A64OpndDumpVisitor::Visit(ExtendShiftOperand *v) { + auto dumpExtendShift = [v](const std::string &extendKind)->void { + LogInfo::MapleLogger() << extendKind; + if (v->GetShiftAmount() != 0) { + LogInfo::MapleLogger() << " : " << v->GetShiftAmount(); + } + }; + switch (v->GetExtendOp()) { + case ExtendShiftOperand::kUXTB: + dumpExtendShift("UXTB"); + break; + case ExtendShiftOperand::kUXTH: + dumpExtendShift("UXTH"); + break; + case ExtendShiftOperand::kUXTW: + dumpExtendShift("UXTW"); + break; + case ExtendShiftOperand::kUXTX: + dumpExtendShift("UXTX"); + break; + case ExtendShiftOperand::kSXTB: + dumpExtendShift("SXTB"); + break; + case ExtendShiftOperand::kSXTH: + dumpExtendShift("SXTH"); + break; + case ExtendShiftOperand::kSXTW: + dumpExtendShift("SXTW"); + break; + case ExtendShiftOperand::kSXTX: + dumpExtendShift("SXTX"); + break; + default: + ASSERT(false, "should not be here"); + break; + } +} +void A64OpndDumpVisitor::Visit(LabelOperand *v) { + LogInfo::MapleLogger() << "label:" << v->GetLabelIndex(); +} +void A64OpndDumpVisitor::Visit(FuncNameOperand *v) { + LogInfo::MapleLogger() << "func :" << v->GetName(); +} +void A64OpndDumpVisitor::Visit(CommentOperand *v) { + LogInfo::MapleLogger() << " #" << v->GetComment(); +} +void A64OpndDumpVisitor::Visit(PhiOperand *v) { + auto &phiList = v->GetOperands(); + for (auto it = phiList.cbegin(); it != phiList.cend();) { + Visit(it->second); + LogInfo::MapleLogger() << " fBB<" << it->first << ">"; + LogInfo::MapleLogger() << (++it == phiList.end() ? "" : " ,"); + } +} +void A64OpndDumpVisitor::Visit(ListOperand *v) { + auto &opndList = v->GetOperands(); + for (auto it = opndList.cbegin(); it != opndList.cend();) { + Visit(*it); + LogInfo::MapleLogger() << (++it == opndList.end() ? "" : " ,"); + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_isa.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_isa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..517d7184ed2dcb8ac0ccd299a1ff84de5f798945 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_isa.cpp @@ -0,0 +1,144 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_isa.h" +#include "insn.h" + +namespace maplebe { +/* + * Get the ldp/stp corresponding to ldr/str + * mop : a ldr or str machine operator + */ +MOperator GetMopPair(MOperator mop) { + switch (mop) { + case MOP_xldr: + return MOP_xldp; + case MOP_wldr: + return MOP_wldp; + case MOP_xstr: + return MOP_xstp; + case MOP_wstr: + return MOP_wstp; + case MOP_dldr: + return MOP_dldp; + case MOP_qldr: + return MOP_qldp; + case MOP_sldr: + return MOP_sldp; + case MOP_dstr: + return MOP_dstp; + case MOP_sstr: + return MOP_sstp; + case MOP_qstr: + return MOP_qstp; + default: + ASSERT(false, "should not run here"); + return MOP_undef; + } +} +namespace AArch64isa { +MOperator FlipConditionOp(MOperator flippedOp) { + switch (flippedOp) { + case AArch64MOP_t::MOP_beq: + return AArch64MOP_t::MOP_bne; + case AArch64MOP_t::MOP_bge: + return AArch64MOP_t::MOP_blt; + case AArch64MOP_t::MOP_bgt: + return AArch64MOP_t::MOP_ble; + case AArch64MOP_t::MOP_bhi: + return AArch64MOP_t::MOP_bls; + case AArch64MOP_t::MOP_bhs: + return AArch64MOP_t::MOP_blo; + case AArch64MOP_t::MOP_ble: + return AArch64MOP_t::MOP_bgt; + case AArch64MOP_t::MOP_blo: + return AArch64MOP_t::MOP_bhs; + case AArch64MOP_t::MOP_bls: + return AArch64MOP_t::MOP_bhi; + case AArch64MOP_t::MOP_blt: + return AArch64MOP_t::MOP_bge; + case AArch64MOP_t::MOP_bne: + return AArch64MOP_t::MOP_beq; + case AArch64MOP_t::MOP_bpl: + return AArch64MOP_t::MOP_bmi; + case AArch64MOP_t::MOP_xcbnz: + return AArch64MOP_t::MOP_xcbz; + case AArch64MOP_t::MOP_wcbnz: + return AArch64MOP_t::MOP_wcbz; + case AArch64MOP_t::MOP_xcbz: + return AArch64MOP_t::MOP_xcbnz; + case AArch64MOP_t::MOP_wcbz: + return AArch64MOP_t::MOP_wcbnz; + case AArch64MOP_t::MOP_wtbnz: + return AArch64MOP_t::MOP_wtbz; + case AArch64MOP_t::MOP_wtbz: + return AArch64MOP_t::MOP_wtbnz; + case AArch64MOP_t::MOP_xtbnz: + return AArch64MOP_t::MOP_xtbz; + case AArch64MOP_t::MOP_xtbz: + return AArch64MOP_t::MOP_xtbnz; + default: + break; + } + return AArch64MOP_t::MOP_undef; +} + +uint32 GetJumpTargetIdx(const Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + switch (curMop) { + /* unconditional jump */ + case MOP_xuncond: { + return kInsnFirstOpnd; + } + case MOP_xbr: { + ASSERT(insn.GetOperandSize() == 2, "ERR"); + return kInsnSecondOpnd; + } + /* conditional jump */ + case MOP_bcc: + case MOP_bcs: + case MOP_bmi: + case MOP_bvc: + case MOP_bls: + case MOP_blt: + case MOP_ble: + case MOP_blo: + case MOP_beq: + case MOP_bpl: + case MOP_bhs: + case MOP_bvs: + case MOP_bhi: + case MOP_bgt: + case MOP_bge: + case MOP_bne: + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + return kInsnSecondOpnd; + } + case MOP_wtbz: + case MOP_xtbz: + case MOP_wtbnz: + case MOP_xtbnz: { + return kInsnThirdOpnd; + } + default: + CHECK_FATAL(false, "Not a jump insn"); + } + return kInsnFirstOpnd; +} +} /* namespace AArch64isa */ +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_isolate_fastpath.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_isolate_fastpath.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cdfca0954444a561d125dcd1bf873cdab86ad3ac --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_isolate_fastpath.cpp @@ -0,0 +1,432 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_isolate_fastpath.h" +#include "aarch64_cg.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +bool AArch64IsolateFastPath::FindRegs(Operand &op, std::set &vecRegs) const { + Operand *opnd = &op; + if (opnd == nullptr || vecRegs.empty()) { + return false; + } + if (opnd->IsList()) { + MapleList pregList = static_cast(opnd)->GetOperands(); + for (auto *preg : as_const(pregList)) { + if (preg->GetRegisterNumber() == R29 || + vecRegs.find(preg->GetRegisterNumber()) != vecRegs.end()) { + return true; /* the opReg will overwrite or reread the vecRegs */ + } + } + } + if (opnd->IsMemoryAccessOperand()) { /* the registers of kOpdMem are complex to be detected */ + RegOperand *baseOpnd = static_cast(opnd)->GetBaseRegister(); + RegOperand *indexOpnd = static_cast(opnd)->GetIndexRegister(); + if ((baseOpnd != nullptr && baseOpnd->GetRegisterNumber() == R29) || + (indexOpnd != nullptr && indexOpnd->GetRegisterNumber() == R29)) { + return true; /* Avoid modifying data on the stack */ + } + if ((baseOpnd != nullptr && vecRegs.find(baseOpnd->GetRegisterNumber()) != vecRegs.end()) || + (indexOpnd != nullptr && vecRegs.find(indexOpnd->GetRegisterNumber()) != vecRegs.end())) { + return true; + } + } + if (opnd->IsRegister()) { + RegOperand *regOpnd = static_cast(opnd); + if (regOpnd->GetRegisterNumber() == R29 || + vecRegs.find(regOpnd->GetRegisterNumber()) != vecRegs.end()) { + return true; /* dst is a target register, result_dst is a target register */ + } + } + return false; +} + +bool AArch64IsolateFastPath::InsertOpndRegs(Operand &op, std::set &vecRegs) const { + Operand *opnd = &op; + if (opnd->IsList()) { + MapleList pregList = static_cast(opnd)->GetOperands(); + for (auto *preg : as_const(pregList)) { + if (preg != nullptr) { + (void)vecRegs.insert(preg->GetRegisterNumber()); + } + } + } + if (opnd->IsMemoryAccessOperand()) { /* the registers of kOpdMem are complex to be detected */ + RegOperand *baseOpnd = static_cast(opnd)->GetBaseRegister(); + if (baseOpnd != nullptr) { + (void)vecRegs.insert(baseOpnd->GetRegisterNumber()); + } + RegOperand *indexOpnd = static_cast(opnd)->GetIndexRegister(); + if (indexOpnd != nullptr) { + (void)vecRegs.insert(indexOpnd->GetRegisterNumber()); + } + } + if (opnd->IsRegister()) { + RegOperand *preg = static_cast(opnd); + if (preg != nullptr) { + (void)vecRegs.insert(preg->GetRegisterNumber()); + } + } + return true; +} + +bool AArch64IsolateFastPath::InsertInsnRegs(Insn &insn, bool insertSource, std::set &vecSourceRegs, + bool insertTarget, std::set &vecTargetRegs) const { + Insn *curInsn = &insn; + for (uint32 o = 0; o < curInsn->GetOperandSize(); ++o) { + Operand &opnd = curInsn->GetOperand(o); + if (insertSource && curInsn->OpndIsUse(o)) { + (void)InsertOpndRegs(opnd, vecSourceRegs); + } + if (insertTarget && curInsn->OpndIsDef(o)) { + (void)InsertOpndRegs(opnd, vecTargetRegs); + } + } + return true; +} + +bool AArch64IsolateFastPath::BackwardFindDependency(BB &ifbb, std::set &vecReturnSourceRegs, + std::list &existingInsns, + std::list &moveInsns) const { + /* + * Pattern match,(*) instruction are moved down below branch. + * ******************** + * curInsn: + * in predBB + * in ifBB + * in returnBB + * ********************* + * list: the insns can be moved into the coldBB + * (1) the instruction is neither a branch nor a call, except for the ifbb.GetLastInsn() + * As long as a branch insn exists, + * the fast path finding fails and the return value is false, + * but the code sinking can be continued. + * (2) the predBB is not a ifBB, + * As long as a ifBB in preds exists, + * the code sinking fails, + * but fast path finding can be continued. + * (3) the targetRegs of insns in existingInsns can neither be reread or overwrite + * (4) the sourceRegs of insns in existingInsns can not be overwrite + * (5) the sourceRegs of insns in returnBB can neither be reread or overwrite + * (6) the targetRegs and sourceRegs cannot be R29 R30, to protect the stack + * (7) modified the reg when: + * -------------- + * curInsn: move R2,R1 + * : s s s + * s s s + * -> s s s + * ------------ + * (a) all targets cannot be R1, all sources cannot be R1 + * all targets cannot be R2, all return sources cannot be R2 + * (b) the targetRegs and sourceRegs cannot be list or MemoryAccess + * (c) no ifBB in preds, no branch insns + * (d) the bits of source-R2 must be equal to the R2 + * (e) replace the R2 with R1 + */ + BB *pred = &ifbb; + std::set vecTargetRegs; /* the targrtRegs of existingInsns */ + std::set vecSourceRegs; /* the soureRegs of existingInsns */ + bool ifPred = false; /* Indicates whether a ifBB in pred exists */ + bool bl = false; /* Indicates whether a branch insn exists */ + do { + FOR_BB_INSNS_REV(insn, pred) { + /* code sinking */ + if (insn->IsImmaterialInsn()) { + moveInsns.push_back(insn); + continue; + } + /* code sinking */ + if (!insn->IsMachineInstruction()) { + moveInsns.push_back(insn); + continue; + } + /* code sinking fails, the insns must be retained in the ifBB */ + if (ifPred || insn == ifbb.GetLastInsn() || insn->IsBranch() || insn->IsCall() || + insn->IsStore() || insn->IsStorePair()) { + /* fast path finding fails */ + if (insn != ifbb.GetLastInsn() && (insn->IsBranch() || insn->IsCall() || + insn->IsStore() || insn->IsStorePair())) { + bl = true; + } + (void)InsertInsnRegs(*insn, true, vecSourceRegs, true, vecTargetRegs); + existingInsns.push_back(insn); + continue; + } + bool allow = true; /* whether allow this insn move into the codeBB */ + for (uint32 o = 0; allow && o < insn->GetOperandSize(); ++o) { + Operand &opnd = insn->GetOperand(o); + if (insn->OpndIsDef(o)) { + allow = allow && !FindRegs(opnd, vecTargetRegs); + allow = allow && !FindRegs(opnd, vecSourceRegs); + allow = allow && !FindRegs(opnd, vecReturnSourceRegs); + } + if (insn->OpndIsUse(o)) { + allow = allow && !FindRegs(opnd, vecTargetRegs); + } + } + /* if a result_dst not allowed, this insn can be allowed on the condition of mov Rx,R0/R1, + * and tje existing insns cannot be blr + * RLR 31, RFP 32, RSP 33, RZR 34 */ + if (!ifPred && !bl && !allow && (insn->GetMachineOpcode() == MOP_xmovrr || + insn->GetMachineOpcode() == MOP_wmovrr)) { + Operand *resultOpnd = &(insn->GetOperand(0)); + Operand *srcOpnd = &(insn->GetOperand(1)); + regno_t resultNO = static_cast(resultOpnd)->GetRegisterNumber(); + regno_t srcNO = static_cast(srcOpnd)->GetRegisterNumber(); + if (!FindRegs(*resultOpnd, vecTargetRegs) && !FindRegs(*srcOpnd, vecTargetRegs) && + !FindRegs(*srcOpnd, vecSourceRegs) && !FindRegs(*srcOpnd, vecReturnSourceRegs) && + (srcNO < RLR || srcNO > RZR)) { + allow = true; /* allow on the conditional mov Rx,Rxx */ + for (auto *exit : as_const(existingInsns)) { + /* the registers of kOpdMem are complex to be detected */ + for (uint32 o = 0; o < exit->GetOperandSize(); ++o) { + if (!exit->OpndIsUse(o)) { + continue; + } + Operand *opd = &(exit->GetOperand(o)); + if (opd->IsList() || opd->IsMemoryAccessOperand()) { + allow = false; + break; + } + /* Distinguish between 32-bit regs and 64-bit regs */ + if (opd->IsRegister() && + static_cast(opd)->GetRegisterNumber() == resultNO && + opd != resultOpnd) { + allow = false; + break; + } + } + } + } + /* replace the R2 with R1 */ + if (allow) { + for (auto *exit : existingInsns) { + for (uint32 o = 0; o < exit->GetOperandSize(); ++o) { + if (!exit->OpndIsUse(o)) { + continue; + } + Operand *opd = &(exit->GetOperand(o)); + if (opd->IsRegister() && (opd == resultOpnd)) { + exit->SetOperand(o, *srcOpnd); + } + } + } + } + } + if (!allow) { /* all result_dsts are not target register */ + /* code sinking fails */ + (void)InsertInsnRegs(*insn, true, vecSourceRegs, true, vecTargetRegs); + existingInsns.push_back(insn); + } else { + moveInsns.push_back(insn); + } + } + if (pred->GetPreds().empty()) { + break; + } + if (!ifPred) { + for (auto *tmPred : pred->GetPreds()) { + pred = tmPred; + /* try to find the BB without branch */ + if (tmPred->GetKind() == BB::kBBGoto || tmPred->GetKind() == BB::kBBFallthru) { + ifPred = false; + break; + } else { + ifPred = true; + } + } + } + } while (pred != nullptr); + for (std::set::iterator it = vecTargetRegs.begin(); it != vecTargetRegs.end(); ++it) { + if (AArch64Abi::IsCalleeSavedReg(static_cast(*it))) { /* flag register */ + return false; + } + } + return !bl; +} + +void AArch64IsolateFastPath::IsolateFastPathOpt() { + /* + * Detect "if (cond) return" fast path, and move extra instructions + * to the slow path. + * Must match the following block structure. BB1 can be a series of + * single-pred/single-succ blocks. + * BB1 ops1 cmp-br to BB3 BB1 cmp-br to BB3 + * BB2 ops2 br to retBB ==> BB2 ret + * BB3 slow path BB3 ops1 ops2 + * if the detect is successful, BB3 will be used to generate prolog stuff. + */ + BB &bb = *cgFunc.GetFirstBB(); + if (bb.GetPrev() != nullptr) { + return; + } + BB *ifBB = nullptr; + BB *returnBB = nullptr; + BB *coldBB = nullptr; + { + BB *curBB = &bb; + /* Look for straight line code */ + while (1) { + if (!curBB->GetEhSuccs().empty()) { + return; + } + if (curBB->GetSuccs().size() == 1) { + if (curBB->HasCall()) { + return; + } + BB *succ = curBB->GetSuccs().front(); + if (succ->GetPreds().size() != 1 || !succ->GetEhPreds().empty()) { + return; + } + curBB = succ; + } else if (curBB->GetKind() == BB::kBBIf) { + ifBB = curBB; + break; + } else { + return; + } + } + } + /* targets of if bb can only be reached by if bb */ + { + CHECK_FATAL(!ifBB->GetSuccs().empty(), "null succs check!"); + BB *first = ifBB->GetSuccs().front(); + BB *second = ifBB->GetSuccs().back(); + if (first->GetPreds().size() != 1 || !first->GetEhPreds().empty()) { + return; + } + if (second->GetPreds().size() != 1 || !second->GetEhPreds().empty()) { + return; + } + /* One target of the if bb jumps to a return bb */ + if (first->GetKind() != BB::kBBGoto && first->GetKind() != BB::kBBFallthru) { + return; + } + if (first->GetSuccs().size() != 1) { + return; + } + if (first->GetSuccs().front()->GetKind() != BB::kBBReturn) { + return; + } + if (first->GetSuccs().front()->GetPreds().size() != 1) { + return; + } + constexpr int32 maxNumInsn = 2; + if (first->GetSuccs().front()->NumInsn() > maxNumInsn) { /* avoid a insn is used to debug */ + return; + } + if (second->GetSuccs().empty()) { + return; + } + returnBB = first; + coldBB = second; + } + /* Search backward looking for dependencies for the cond branch */ + std::list existingInsns; /* the insns must be retained in the ifBB (and the return BB) */ + std::list moveInsns; /* instructions to be moved to coldbb */ + /* + * The control flow matches at this point. + * Make sure the SourceRegs of the insns in returnBB (vecReturnSourceReg) cannot be overwrite. + * the regs in insns have three forms: list, MemoryAccess, or Register. + */ + CHECK_FATAL(returnBB != nullptr, "null ptr check"); + std::set vecReturnSourceRegs; + FOR_BB_INSNS_REV(insn, returnBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsBranch() || insn->IsCall() || insn->IsStore() || insn->IsStorePair()) { + return; + } + (void)InsertInsnRegs(*insn, true, vecReturnSourceRegs, false, vecReturnSourceRegs); + existingInsns.push_back(insn); + } + FOR_BB_INSNS_REV(insn, returnBB->GetSuccs().front()) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsBranch() || insn->IsCall() || insn->IsStore() || insn->IsStorePair()) { + return; + } + (void)InsertInsnRegs(*insn, true, vecReturnSourceRegs, false, vecReturnSourceRegs); + existingInsns.push_back(insn); + } + /* + * The mv is the 1st move using the parameter register leading to the branch + * The ld is the load using the parameter register indirectly for the branch + * The depMv is the move which preserves the result of the load but might + * destroy a parameter register which will be moved below the branch. + */ + bool fast = BackwardFindDependency(*ifBB, vecReturnSourceRegs, existingInsns, moveInsns); + /* move extra instructions to the slow path */ + if (!fast) { + return; + } + for (auto &in : as_const(moveInsns)) { + in->GetBB()->RemoveInsn(*in); + CHECK_FATAL(coldBB != nullptr, "null ptr check"); + coldBB->InsertInsnBegin(*in); + } + /* All instructions are in the right place, replace branch to ret bb to just ret. */ + /* Remove the lastInsn of gotoBB */ + if (returnBB->GetKind() == BB::kBBGoto) { + returnBB->RemoveInsn(*returnBB->GetLastInsn()); + } + BB *tgtBB = returnBB->GetSuccs().front(); + CHECK_FATAL(tgtBB->GetKind() == BB::kBBReturn, "check return bb of isolatefastpatch"); + CHECK_FATAL(tgtBB != nullptr, "null ptr check"); + FOR_BB_INSNS(insn, tgtBB) { + returnBB->AppendInsn(*insn); /* add the insns such as MOP_xret */ + } + returnBB->AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xret)); + /* bb is now a retbb and has no succ. */ + returnBB->SetKind(BB::kBBReturn); + + // add to exitvec and common exitpreds + BB* commonExit = cgFunc.GetCommonExitBB(); + auto cEpredtgt = std::find(commonExit->GetPredsBegin(), commonExit->GetPredsEnd(), tgtBB); + auto cEpredreturn = std::find(commonExit->GetPredsBegin(), commonExit->GetPredsEnd(), returnBB); + if (cEpredtgt == commonExit->GetPredsEnd() || cEpredreturn != commonExit->GetPredsEnd()) { + CHECK_FATAL(false, "check case in isolatefast"); + } + commonExit->RemovePreds(*tgtBB); + commonExit->PushBackPreds(*returnBB); + + auto commonExitVecTgt = std::find(cgFunc.GetExitBBsVec().begin(), cgFunc.GetExitBBsVec().end(), tgtBB); + auto commonExitVecRet = std::find(cgFunc.GetExitBBsVec().begin(), cgFunc.GetExitBBsVec().end(), returnBB); + if (commonExitVecTgt == cgFunc.GetExitBBsVec().end() || commonExitVecRet != cgFunc.GetExitBBsVec().end()) { + CHECK_FATAL(false, "check case in isolatefast"); + } + (void)cgFunc.GetExitBBsVec().erase(commonExitVecTgt); + cgFunc.GetExitBBsVec().push_back(returnBB); + + MapleList::const_iterator predIt = std::find(tgtBB->GetPredsBegin(), tgtBB->GetPredsEnd(), returnBB); + tgtBB->ErasePreds(predIt); + tgtBB->ClearInsns(); + returnBB->ClearSuccs(); + if (tgtBB->GetPrev() != nullptr && tgtBB->GetNext() != nullptr) { + tgtBB->GetPrev()->SetNext(tgtBB->GetNext()); + tgtBB->GetNext()->SetPrev(tgtBB->GetPrev()); + } + SetFastPathReturnBB(returnBB); + cgFunc.SetPrologureBB(*coldBB); +} + +void AArch64IsolateFastPath::Run() { + IsolateFastPathOpt(); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_live.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_live.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8e44853b46ec3667a5b2795a3ee1628ea52f275c --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_live.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_live.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64LiveAnalysis::GenerateReturnBBDefUse(BB &bb) const { + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + auto *aarchCGFunc = static_cast(cgFunc); + if (IsPrimitiveFloat(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(V0), k64BitSize, kRegTyFloat); + CollectLiveInfo(bb, phyOpnd, false, true); + } else if (IsPrimitiveInteger(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R0), k64BitSize, kRegTyInt); + CollectLiveInfo(bb, phyOpnd, false, true); + } +} + +void AArch64LiveAnalysis::InitEhDefine(BB &bb) { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + /* Insert MOP_pseudo_eh_def_x R1. */ + RegOperand ®R1 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regR1); + bb.InsertInsnBegin(pseudoInsn1); + + /* Insert MOP_pseudo_eh_def_x R0. */ + RegOperand ®R0 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + Insn &pseudoInsn2 = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regR0); + bb.InsertInsnBegin(pseudoInsn2); +} + +bool AArch64LiveAnalysis::CleanupBBIgnoreReg(regno_t reg) { + regno_t regNO = reg + R0; + if (regNO < R8 || (RLR <= regNO && regNO <= RZR)) { + return true; + } + return false; +} + +void AArch64LiveAnalysis::ProcessCallInsnParam(BB &bb, const Insn &insn) const { + /* R0 ~ R7(R0 + 0 ~ R0 + 7) and V0 ~ V7 (V0 + 0 ~ V0 + 7) is parameter register */ + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + auto *targetOpnd = insn.GetCallTargetOperand(); + CHECK_FATAL(targetOpnd != nullptr, "target is null in Insn::IsCallToFunctionThatNeverReturns"); + if (CGOptions::DoIPARA() && targetOpnd->IsFuncNameOpnd()) { + FuncNameOperand *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + ASSERT(funcSt->GetSKind() == kStFunc, "funcst must be a function name symbol"); + MIRFunction *func = funcSt->GetFunction(); + if (func != nullptr && func->IsReferedRegsValid()) { + for (auto preg : func->GetReferedRegs()) { + if (AArch64Abi::IsCalleeSavedReg(static_cast(preg))) { + continue; + } + RegOperand *opnd = &aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(preg), k64BitSize, + AArch64isa::IsFPSIMDRegister(static_cast(preg)) ? kRegTyFloat : kRegTyInt); + CollectLiveInfo(bb, *opnd, true, false); + } + return; + } + } + for (uint32 i = 0; i < 8; ++i) { + Operand &phyOpndR = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R0 + i), k64BitSize, kRegTyInt); + CollectLiveInfo(bb, phyOpndR, true, false); + Operand &phyOpndV = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(V0 + i), k64BitSize, kRegTyFloat); + CollectLiveInfo(bb, phyOpndV, true, false); + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..19bfb4a15452a83809ba16d5c0be162fe6e8f397 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_memlayout.cpp @@ -0,0 +1,586 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_memlayout.h" +#include "aarch64_cgfunc.h" +#include "becommon.h" +#include "mir_nodes.h" + +namespace maplebe { +using namespace maple; + +/* + * Returns stack space required for a call + * which is used to pass arguments that cannot be + * passed through registers + */ +uint32 AArch64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall) { + /* instantiate a parm locator */ + AArch64CallConvImpl parmLocator(be); + uint32 sizeOfArgsToStkPass = 0; + uint32 i = 0; + /* An indirect call's first operand is the invocation target */ + if (isIcall) { + ++i; + } + + if (std::strcmp(stmt.GetOpName(), "call") == 0) { + CallNode *callNode = static_cast(&stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + CHECK_FATAL(fn != nullptr, "get MIRFunction failed"); + MIRSymbol *symbol = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + if (symbol->GetName() == "MCC_CallFastNative" || symbol->GetName() == "MCC_CallFastNativeExt" || + symbol->GetName() == "MCC_CallSlowNative0" || symbol->GetName() == "MCC_CallSlowNative1" || + symbol->GetName() == "MCC_CallSlowNative2" || symbol->GetName() == "MCC_CallSlowNative3" || + symbol->GetName() == "MCC_CallSlowNative4" || symbol->GetName() == "MCC_CallSlowNative5" || + symbol->GetName() == "MCC_CallSlowNative6" || symbol->GetName() == "MCC_CallSlowNative7" || + symbol->GetName() == "MCC_CallSlowNative8" || symbol->GetName() == "MCC_CallSlowNativeExt") { + ++i; + } + } + + aggCopySize = 0; + for (uint32 anum = 0; i < stmt.NumOpnds(); ++i, ++anum) { + BaseNode *opnd = stmt.Opnd(i); + MIRType *ty = nullptr; + if (opnd->GetPrimType() != PTY_agg) { + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(opnd->GetPrimType())]; + } else { + Opcode opndOpcode = opnd->GetOpCode(); + if (be.GetMIRModule().GetFlavor() != kFlavorLmbc) { + ASSERT(opndOpcode == OP_dread || opndOpcode == OP_iread, "opndOpcode should be OP_dread or OP_iread"); + } + if (opndOpcode == OP_dread) { + DreadNode *dread = static_cast(opnd); + MIRSymbol *sym = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (dread->GetFieldID() != 0) { + ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } + } + } else if (opndOpcode == OP_iread) { + IreadNode *iread = static_cast(opnd); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx()); + ASSERT(ty->GetKind() == kTypePointer, "expect pointer"); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(ty)->GetPointedTyIdx()); + if (iread->GetFieldID() != 0) { + ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } + } + } else if ((opndOpcode == OP_ireadfpoff || opndOpcode == OP_ireadoff || + opndOpcode == OP_dreadoff) && opnd->GetPrimType() == PTY_agg) { + ty = static_cast(cgFunc)->GetLmbcStructArgType(stmt, i); + } + if (ty == nullptr) { /* type mismatch */ + continue; + } + } + CCLocInfo ploc; + aggCopySize += parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { + continue; /* passed in register, so no effect on actual area */ + } + sizeOfArgsToStkPass = RoundUp(ploc.memOffset + ploc.memSize, GetPointerSize()); + } + return sizeOfArgsToStkPass; +} + +void AArch64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const { + if (be.GetTypeSize(typeIdx) > k16ByteSize) { + /* size > 16 is passed on stack, the formal is just a pointer to the copy on stack. */ + if (CGOptions::IsArm64ilp32()) { + align = k8ByteSize; + size = k8ByteSize; + } else { + align = GetPointerSize(); + size = GetPointerSize(); + } + } else { + align = be.GetTypeAlign(typeIdx); + size = static_cast(be.GetTypeSize(typeIdx)); + } +} + +void AArch64MemLayout::SetSegmentSize(AArch64SymbolAlloc &symbolAlloc, MemSegment &segment, uint32 typeIdx) const { + uint32 size; + uint32 align; + SetSizeAlignForTypeIdx(typeIdx, size, align); + segment.SetSize(static_cast(RoundUp(static_cast(segment.GetSize()), align))); + symbolAlloc.SetOffset(segment.GetSize()); + segment.SetSize(segment.GetSize() + size); + segment.SetSize(static_cast(RoundUp(static_cast(segment.GetSize()), GetPointerSize()))); +} + +void AArch64MemLayout::LayoutVarargParams() { + uint32 nIntRegs = 0; + uint32 nFpRegs = 0; + AArch64CallConvImpl parmlocator(be); + CCLocInfo ploc; + MIRFunction *func = mirFunction; + if (be.GetMIRModule().IsCModule() && func->GetAttr(FUNCATTR_varargs)) { + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + if (i == 0) { + if (func->IsFirstArgReturn() && func->GetReturnType()->GetPrimType() != PTY_void) { + TyIdx tyIdx = func->GetFuncRetStructTyIdx(); + if (be.GetTypeSize(tyIdx.GetIdx()) <= k16ByteSize) { + continue; + } + } + } + MIRType *ty = func->GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, func); + if (ploc.reg0 != kRinvalid) { + if (ploc.reg0 >= R0 && ploc.reg0 <= R7) { + nIntRegs++; + } else if (ploc.reg0 >= V0 && ploc.reg0 <= V7) { + nFpRegs++; + } + } + if (ploc.reg1 != kRinvalid) { + if (ploc.reg1 >= R0 && ploc.reg1 <= R7) { + nIntRegs++; + } else if (ploc.reg1 >= V0 && ploc.reg1 <= V7) { + nFpRegs++; + } + } + if (ploc.reg2 != kRinvalid) { + if (ploc.reg2 >= R0 && ploc.reg2 <= R7) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + if (ploc.reg3 != kRinvalid) { + if (ploc.reg3 >= R0 && ploc.reg3 <= R7) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + } + if (CGOptions::IsArm64ilp32()) { + SetSizeOfGRSaveArea((k8BitSize - nIntRegs) * k8ByteSize); + } else { + SetSizeOfGRSaveArea((k8BitSize - nIntRegs) * GetPointerSize()); + } + if (CGOptions::UseGeneralRegOnly()) { + SetSizeOfVRSaveArea(0); + } else { + if (CGOptions::IsArm64ilp32()) { + SetSizeOfVRSaveArea((k8BitSize - nFpRegs) * k8ByteSize * k2ByteSize); + } else { + SetSizeOfVRSaveArea((k8BitSize - nFpRegs) * GetPointerSize() * k2ByteSize); + } + } + } +} + +void AArch64MemLayout::LayoutFormalParams() { + AArch64CallConvImpl parmLocator(be); + CCLocInfo ploc; + for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { + MIRSymbol *sym = mirFunction->GetFormal(i); + uint32 stIndex = sym->GetStIndex(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + if (i == 0) { + if (mirFunction->IsReturnStruct() && mirFunction->IsFirstArgReturn()) { + symLoc->SetMemSegment(GetSegArgsRegPassed()); + symLoc->SetOffset(GetSegArgsRegPassed().GetSize()); + TyIdx tyIdx = mirFunction->GetFuncRetStructTyIdx(); + if (be.GetTypeSize(tyIdx.GetIdx()) > k16ByteSize) { + if (CGOptions::IsArm64ilp32()) { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + k8ByteSize); + } else { + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + GetPointerSize()); + } + } + continue; + } + } + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx); + uint32 ptyIdx = ty->GetTypeIndex(); + parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction); + if (ploc.reg0 != kRinvalid) { /* register */ + symLoc->SetRegisters(static_cast(ploc.reg0), static_cast(ploc.reg1), + static_cast(ploc.reg2), static_cast(ploc.reg3)); + if (!cgFunc->GetMirModule().IsCModule() && mirFunction->GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + symLoc->SetMemSegment(segRefLocals); + SetSegmentSize(*symLoc, segRefLocals, ptyIdx); + } else if (!sym->IsPreg()) { + uint32 size; + uint32 align; + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsRegPassed()); + /* the type's alignment requirement may be smaller than a registser's byte size */ + if (ty->GetPrimType() == PTY_agg) { + /* struct param aligned on 8 byte boundary unless it is small enough */ + if (CGOptions::IsArm64ilp32()) { + align = k8ByteSize; + } else { + align = GetPointerSize(); + } + } + uint32 tSize = 0; + if ((IsPrimitiveVector(ty->GetPrimType()) && GetPrimTypeSize(ty->GetPrimType()) > k8ByteSize) || + AArch64Abi::IsVectorArrayType(ty, tSize) != PTY_void) { + align = k16ByteSize; + } + segArgsRegPassed.SetSize(static_cast(RoundUp(segArgsRegPassed.GetSize(), align))); + symLoc->SetOffset(segArgsRegPassed.GetSize()); + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + size); + } + } else { /* stack */ + uint32 size; + uint32 align; + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsStkPassed()); + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), align))); + symLoc->SetOffset(segArgsStkPassed.GetSize()); + segArgsStkPassed.SetSize(segArgsStkPassed.GetSize() + size); + /* We need it as dictated by the AArch64 ABI $5.4.2 C12 */ + if (CGOptions::IsArm64ilp32()) { + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), k8ByteSize))); + } else { + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize()))); + } + if (!cgFunc->GetMirModule().IsCModule() && mirFunction->GetNthParamAttr(i).GetAttr(ATTR_localrefvar)) { + SetLocalRegLocInfo(sym->GetStIdx(), *symLoc); + AArch64SymbolAlloc *symLoc1 = memAllocator->GetMemPool()->New(); + symLoc1->SetMemSegment(segRefLocals); + SetSegmentSize(*symLoc1, segRefLocals, ptyIdx); + SetSymAllocInfo(stIndex, *symLoc1); + } + } + if (cgFunc->GetCG()->GetCGOptions().WithDwarf() && (symLoc->GetMemSegment() != nullptr)) { + cgFunc->AddDIESymbolLocation(sym, symLoc, true); + } + } +} + +void AArch64MemLayout::LayoutLocalVariables(std::vector &tempVar, std::vector &returnDelays) { + if (be.GetMIRModule().GetFlavor() == kFlavorLmbc) { + segLocals.SetSize(mirFunction->GetFrameSize() - mirFunction->GetOutParmSize()); + return; + } + + uint32 symTabSize = mirFunction->GetSymTab()->GetSymbolTableSize(); + for (uint32 i = 0; i < symTabSize; ++i) { + MIRSymbol *sym = mirFunction->GetSymTab()->GetSymbolFromStIdx(i); + if (sym == nullptr || sym->GetStorageClass() != kScAuto || sym->IsDeleted()) { + continue; + } + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + CHECK_FATAL(!symLoc->IsRegister(), "expect not register"); + + if (sym->IsRefType()) { + if (mirFunction->GetRetRefSym().find(sym) != mirFunction->GetRetRefSym().end()) { + /* try to put ret_ref at the end of segRefLocals */ + returnDelays.emplace_back(sym); + continue; + } + symLoc->SetMemSegment(segRefLocals); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + symLoc->SetOffset(segRefLocals.GetSize()); + segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + } else { + if (sym->GetName() == "__EARetTemp__" || + sym->GetName().substr(0, kEARetTempNameSize) == "__EATemp__") { + tempVar.emplace_back(sym); + continue; + } + symLoc->SetMemSegment(segLocals); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + uint32 align = be.GetTypeAlign(tyIdx); + uint32 tSize = 0; + if ((IsPrimitiveVector(ty->GetPrimType()) && GetPrimTypeSize(ty->GetPrimType()) > k8ByteSize) || + AArch64Abi::IsVectorArrayType(ty, tSize) != PTY_void) { + align = k16ByteSize; + } + if (ty->GetPrimType() == PTY_agg && align < k8BitSize) { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), k8BitSize))); + } else { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), align))); + } + symLoc->SetOffset(segLocals.GetSize()); + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(tyIdx)); + } + if (cgFunc->GetCG()->GetCGOptions().WithDwarf()) { + cgFunc->AddDIESymbolLocation(sym, symLoc, false); + } + } +} + +void AArch64MemLayout::LayoutEAVariales(std::vector &tempVar) { + for (auto sym : tempVar) { + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + ASSERT(!symLoc->IsRegister(), "expect not register"); + symLoc->SetMemSegment(segRefLocals); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + symLoc->SetOffset(segRefLocals.GetSize()); + segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + } +} + +void AArch64MemLayout::LayoutReturnRef(std::vector &returnDelays, + int32 &structCopySize, int32 &maxParmStackSize) { + for (auto sym : returnDelays) { + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + ASSERT(!symLoc->IsRegister(), "expect not register"); + + ASSERT(sym->IsRefType(), "expect reftype "); + symLoc->SetMemSegment(segRefLocals); + segRefLocals.SetSize(RoundUp(segRefLocals.GetSize(), be.GetTypeAlign(tyIdx))); + symLoc->SetOffset(segRefLocals.GetSize()); + segRefLocals.SetSize(segRefLocals.GetSize() + be.GetTypeSize(tyIdx)); + } + segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); + maxParmStackSize = static_cast(segArgsToStkPass.GetSize()); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + AssignSpillLocationsToPseudoRegisters(); + } else { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + /* 8-VirtualRegNode occupy byte number */ + aarchCGFunc->SetCatchRegno(cgFunc->NewVReg(kRegTyInt, 8)); + } + segRefLocals.SetSize(static_cast(RoundUp(segRefLocals.GetSize(), GetPointerSize()))); + if (CGOptions::IsArm64ilp32()) { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), k8ByteSize))); + } else { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), GetPointerSize()))); + } +} + +void AArch64MemLayout::LayoutActualParams() { + for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { + if (i == 0) { + if (mirFunction->IsReturnStruct() && mirFunction->IsFirstArgReturn()) { + continue; + } + } + MIRSymbol *sym = mirFunction->GetFormal(i); + if (sym->IsPreg()) { + continue; + } + uint32 stIndex = sym->GetStIndex(); + AArch64SymbolAlloc *symLoc = static_cast(GetSymAllocInfo(stIndex)); + if (symLoc->GetMemSegment() == &GetSegArgsRegPassed()) { /* register */ + /* + * In O0, we store parameters passed via registers into memory. + * So, each of such parameter needs to get assigned storage in stack. + * If a function parameter is never accessed in the function body, + * and if we don't create its memory operand here, its offset gets + * computed when the instruction to store its value into stack + * is generated in the prologue when its memory operand is created. + * But, the parameter would see a different StackFrameSize than + * the parameters that are accessed in the body, because + * the size of the storage for FP/LR is added to the stack frame + * size in between. + * To make offset assignment easier, we create a memory operand + * for each of function parameters in advance. + * This has to be done after all of formal parameters and local + * variables get assigned their respecitve storage, i.e. + * CallFrameSize (discounting callee-saved and FP/LR) is known. + */ + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirFunction->GetFormalDefVec()[i].formalTyIdx); + uint32 ptyIdx = ty->GetTypeIndex(); + static_cast(cgFunc)->GetOrCreateMemOpnd(*sym, 0, be.GetTypeAlign(ptyIdx) * kBitsPerByte); + } + } +} + +void AArch64MemLayout::LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) { + LayoutVarargParams(); + LayoutFormalParams(); + /* + * We do need this as LDR/STR with immediate + * requires imm be aligned at a 8/4-byte boundary, + * and local varirables may need 8-byte alignment. + */ + if (CGOptions::IsArm64ilp32()) { + segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), k8ByteSize)); + /* we do need this as SP has to be aligned at a 16-bytes bounardy */ + segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), k8ByteSize + k8ByteSize)); + } else { + segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), GetPointerSize())); + segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize() + GetPointerSize())); + } + /* allocate the local variables in the stack */ + std::vector eaTempVar; + std::vector retDelays; + LayoutLocalVariables(eaTempVar, retDelays); + LayoutEAVariales(eaTempVar); + + /* handle ret_ref sym now */ + LayoutReturnRef(retDelays, structCopySize, maxParmStackSize); + + /* + * for the actual arguments that cannot be pass through registers + * need to allocate space for caller-save registers + */ + LayoutActualParams(); + + fixStackSize = static_cast(RealStackFrameSize()); + cgFunc->SetUseFP(cgFunc->UseFP() || fixStackSize > kMaxPimm32); +} + +void AArch64MemLayout::AssignSpillLocationsToPseudoRegisters() { + MIRPregTable *pregTab = cgFunc->GetFunction().GetPregTab(); + + /* BUG: n_regs include index 0 which is not a valid preg index. */ + size_t nRegs = pregTab->Size(); + spillLocTable.resize(nRegs); + for (size_t i = 1; i < nRegs; ++i) { + PrimType pType = pregTab->PregFromPregIdx(i)->GetPrimType(); + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + symLoc->SetMemSegment(segLocals); + segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPrimTypeSize(pType))); + symLoc->SetOffset(segLocals.GetSize()); + MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeTable()[pType]; + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(mirTy->GetTypeIndex())); + spillLocTable[i] = symLoc; + } + + if (!cgFunc->GetMirModule().IsJavaModule()) { + return; + } + + /* + * Allocate additional stack space for "thrownval". + * segLocals need 8 bit align + */ + if (CGOptions::IsArm64ilp32()) { + segLocals.SetSize(RoundUp(segLocals.GetSize(), k8ByteSize)); + } else { + segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPointerSize())); + } + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + RegOperand &baseOpnd = aarchCGFunc->GetOrCreateStackBaseRegOperand(); + uint32 offset = segLocals.GetSize(); + + OfstOperand *offsetOpnd = + &aarchCGFunc->CreateOfstOpnd(offset + k16BitSize, k64BitSize); + MemOperand *throwMem = aarchCGFunc->CreateMemOperand( + MemOperand::kAddrModeBOi, k64BitSize, baseOpnd, static_cast(nullptr), offsetOpnd, + nullptr); + aarchCGFunc->SetCatchOpnd(*throwMem); + if (CGOptions::IsArm64ilp32()) { + segLocals.SetSize(segLocals.GetSize() + k8ByteSize); + } else { + segLocals.SetSize(segLocals.GetSize() + GetPointerSize()); + } +} + +SymbolAlloc *AArch64MemLayout::AssignLocationToSpillReg(regno_t vrNum) { + AArch64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + symLoc->SetMemSegment(segSpillReg); + uint32 regSize = cgFunc->IsExtendReg(vrNum) ? k8ByteSize : cgFunc->GetVRegSize(vrNum); + segSpillReg.SetSize(RoundUp(segSpillReg.GetSize(), regSize)); + symLoc->SetOffset(segSpillReg.GetSize()); + segSpillReg.SetSize(segSpillReg.GetSize() + regSize); + SetSpillRegLocInfo(vrNum, *symLoc); + return symLoc; +} + +uint64 AArch64MemLayout::StackFrameSize() const { + uint64 total = segArgsRegPassed.GetSize() + static_cast(cgFunc)->SizeOfCalleeSaved() + + GetSizeOfRefLocals() + Locals().GetSize() + GetSizeOfSpillReg(); + + if (cgFunc->GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + if (GetSizeOfGRSaveArea() > 0) { + total += RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment); + } + if (GetSizeOfVRSaveArea() > 0) { + total += RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); + } + } + + /* + * if the function does not have VLA nor alloca, + * we allocate space for arguments to stack-pass + * in the call frame; otherwise, it has to be allocated for each call and reclaimed afterward. + */ + total += segArgsToStkPass.GetSize(); + return RoundUp(total, kAarch64StackPtrAlignment); +} + +uint32 AArch64MemLayout::RealStackFrameSize() const { + auto size = StackFrameSize(); + if (cgFunc->GetCG()->IsStackProtectorStrong() || cgFunc->GetCG()->IsStackProtectorAll()) { + size += static_cast(kAarch64StackPtrAlignment); + } + return static_cast(size); +} + +int32 AArch64MemLayout::GetRefLocBaseLoc() const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + auto beforeSize = GetSizeOfLocals(); + if (aarchCGFunc->UsedStpSubPairForCallFrameAllocation()) { + return static_cast(beforeSize); + } + return static_cast(beforeSize + kSizeOfFplr); +} + +int32 AArch64MemLayout::GetGRSaveAreaBaseLoc() const { + int32 total = static_cast(RealStackFrameSize() - + RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment)); + total -= static_cast(SizeOfArgsToStackPass()); + return total; +} + +int32 AArch64MemLayout::GetVRSaveAreaBaseLoc() const { + int32 total = static_cast((RealStackFrameSize() - + RoundUp(GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment)) - + RoundUp(GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment)); + total -= static_cast(SizeOfArgsToStackPass()); + return total; +} + +int32 AArch64MemLayout::GetCalleeSaveBaseLoc() const { + auto offset = StackFrameSize() - static_cast(cgFunc)->SizeOfCalleeSaved(); + if (cgFunc->GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offset -= GetSizeOfLocals(); + } else { + offset = (offset - SizeOfArgsToStackPass()) + kSizeOfFplr; + } + + if (cgFunc->GetMirModule().IsCModule() && cgFunc->GetFunction().GetAttr(FUNCATTR_varargs)) { + /* GR/VR save areas are above the callee save area */ + auto saveareasize = RoundUp(GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + + RoundUp(GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize); + offset -= saveareasize; + } + + return static_cast(offset); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp new file mode 100644 index 0000000000000000000000000000000000000000..01be7f0a07a03e9caf1264c2800531ffc0ad2ad0 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_offset_adjust.cpp @@ -0,0 +1,247 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_offset_adjust.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64FPLROffsetAdjustment::Run() { + AdjustmentOffsetForFPLR(); +} + +void AArch64FPLROffsetAdjustment::AdjustmentOffsetForOpnd(Insn &insn, AArch64CGFunc &aarchCGFunc) const { + bool isLmbc = (aarchCGFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc); + uint32 opndNum = insn.GetOperandSize(); + MemLayout *memLayout = aarchCGFunc.GetMemlayout(); + bool stackBaseOpnd = false; + AArch64reg stackBaseReg = isLmbc ? R29 : (aarchCGFunc.UseFP() ? R29 : RSP); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsOfVary()) { + insn.SetOperand(i, aarchCGFunc.GetOrCreateStackBaseRegOperand()); + } + if (regOpnd.GetRegisterNumber() == RFP) { + insn.SetOperand(i, aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt)); + stackBaseOpnd = true; + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + if (((memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) || + (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOrX)) && + memOpnd.GetBaseRegister() != nullptr) { + if (memOpnd.GetBaseRegister()->IsOfVary()) { + memOpnd.SetBaseRegister(static_cast(aarchCGFunc.GetOrCreateStackBaseRegOperand())); + } + RegOperand *memBaseReg = memOpnd.GetBaseRegister(); + if (memBaseReg->GetRegisterNumber() == RFP) { + RegOperand &newBaseOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + MemOperand &newMemOpnd = aarchCGFunc.GetOrCreateMemOpnd( + memOpnd.GetAddrMode(), memOpnd.GetSize(), &newBaseOpnd, memOpnd.GetIndexRegister(), + memOpnd.GetOffsetImmediate(), memOpnd.GetSymbol()); + insn.SetOperand(i, newMemOpnd); + stackBaseOpnd = true; + } + } + if ((memOpnd.GetAddrMode() != MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { + continue; + } + OfstOperand *ofstOpnd = memOpnd.GetOffsetImmediate(); + if (ofstOpnd == nullptr) { + continue; + } + if (ofstOpnd->GetVary() == kUnAdjustVary) { + ofstOpnd->AdjustOffset(static_cast(static_cast(memLayout)->RealStackFrameSize() - + (isLmbc ? 0 : memLayout->SizeOfArgsToStackPass()))); + ofstOpnd->SetVary(kAdjustVary); + } + if (ofstOpnd->GetVary() == kAdjustVary || ofstOpnd->GetVary() == kNotVary) { + bool condition = aarchCGFunc.IsOperandImmValid(insn.GetMachineOpcode(), &memOpnd, i); + if (!condition) { + MemOperand &newMemOpnd = aarchCGFunc.SplitOffsetWithAddInstruction( + memOpnd, memOpnd.GetSize(), static_cast(R16), false, &insn, insn.IsLoadStorePair()); + insn.SetOperand(i, newMemOpnd); + } + } + } else if (opnd.IsIntImmediate()) { + AdjustmentOffsetForImmOpnd(insn, i, aarchCGFunc); + } + } + if (stackBaseOpnd && !aarchCGFunc.UseFP()) { + AdjustmentStackPointer(insn, aarchCGFunc); + } +} + +void AArch64FPLROffsetAdjustment::AdjustmentOffsetForImmOpnd(Insn &insn, uint32 index, + AArch64CGFunc &aarchCGFunc) const { + auto &immOpnd = static_cast(insn.GetOperand(index)); + MemLayout *memLayout = aarchCGFunc.GetMemlayout(); + if (immOpnd.GetVary() == kUnAdjustVary) { + int64 ofst = static_cast(memLayout)->RealStackFrameSize() - memLayout->SizeOfArgsToStackPass(); + if (insn.GetMachineOpcode() == MOP_xsubrri12 || insn.GetMachineOpcode() == MOP_wsubrri12) { + immOpnd.SetValue(immOpnd.GetValue() - ofst); + if (immOpnd.GetValue() < 0) { + immOpnd.Negate(); + } + insn.SetMOP(AArch64CG::kMd[A64ConstProp::GetReversalMOP(insn.GetMachineOpcode())]); + } else { + immOpnd.Add(ofst); + } + } + if (!aarchCGFunc.IsOperandImmValid(insn.GetMachineOpcode(), &immOpnd, index)) { + if (insn.GetMachineOpcode() >= MOP_xaddrri24 && insn.GetMachineOpcode() <= MOP_waddrri12) { + PrimType destTy = + static_cast(insn.GetOperand(kInsnFirstOpnd)).GetSize() == k64BitSize ? PTY_i64 : PTY_i32; + RegOperand *resOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + ImmOperand ©ImmOpnd = aarchCGFunc.CreateImmOperand( + immOpnd.GetValue(), immOpnd.GetSize(), immOpnd.IsSignedValue()); + aarchCGFunc.SelectAddAfterInsn(*resOpnd, insn.GetOperand(kInsnSecondOpnd), copyImmOpnd, destTy, false, insn); + insn.GetBB()->RemoveInsn(insn); + } else if (insn.GetMachineOpcode() == MOP_xsubrri12 || insn.GetMachineOpcode() == MOP_wsubrri12) { + if (immOpnd.IsSingleInstructionMovable()) { + RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + bool is64bit = insn.GetOperand(kInsnFirstOpnd).GetSize() == k64BitSize; + MOperator tempMovOp = is64bit ? MOP_xmovri64 : MOP_wmovri32; + Insn &tempMov = cgFunc->GetInsnBuilder()->BuildInsn(tempMovOp, tempReg, immOpnd); + insn.SetOperand(index, tempReg); + insn.SetMOP(is64bit ? AArch64CG::kMd[MOP_xsubrrr] : AArch64CG::kMd[MOP_wsubrrr]); + (void)insn.GetBB()->InsertInsnBefore(insn, tempMov); + } + } else { + CHECK_FATAL(false, "NIY"); + } + } + immOpnd.SetVary(kAdjustVary); +} + +void AArch64FPLROffsetAdjustment::AdjustmentStackPointer(Insn &insn, AArch64CGFunc &aarchCGFunc) const { + AArch64MemLayout *aarch64memlayout = static_cast(aarchCGFunc.GetMemlayout()); + int32 offset = static_cast(aarch64memlayout->SizeOfArgsToStackPass()); + if (offset == 0) { + return; + } + if (insn.IsLoad() || insn.IsStore()) { + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + ASSERT(memOpnd.GetBaseRegister() != nullptr, "Unexpect, need check"); + CHECK_FATAL(memOpnd.IsIntactIndexed(), "unsupport yet"); + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) { + ImmOperand *ofstOpnd = memOpnd.GetOffsetOperand(); + ImmOperand *newOfstOpnd = &aarchCGFunc.GetOrCreateOfstOpnd( + static_cast(ofstOpnd->GetValue() + offset), ofstOpnd->GetSize()); + MemOperand &newOfstMemOpnd = aarchCGFunc.GetOrCreateMemOpnd( + MemOperand::kAddrModeBOi, memOpnd.GetSize(), memOpnd.GetBaseRegister(), memOpnd.GetIndexRegister(), + newOfstOpnd, memOpnd.GetSymbol()); + insn.SetOperand(i, newOfstMemOpnd); + if (!aarchCGFunc.IsOperandImmValid(insn.GetMachineOpcode(), &newOfstMemOpnd, i)) { + bool isPair = (i == kInsnThirdOpnd); + MemOperand &newMemOpnd = aarchCGFunc.SplitOffsetWithAddInstruction( + newOfstMemOpnd, newOfstMemOpnd.GetSize(), static_cast(R16), false, &insn, isPair); + insn.SetOperand(i, newMemOpnd); + } + continue; + } else if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOrX) { + CHECK_FATAL(false, "Unexpect adjust insn"); + } else { + insn.Dump(); + CHECK_FATAL(false, "Unexpect adjust insn"); + } + } + } + } else { + switch (insn.GetMachineOpcode()) { + case MOP_xaddrri12: { + ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + auto *newAddImmOpnd = static_cast( + static_cast(insn.GetOperand(kInsnThirdOpnd)).Clone(*cgFunc->GetMemoryPool())); + newAddImmOpnd->SetValue(newAddImmOpnd->GetValue() + offset); + insn.SetOperand(kInsnThirdOpnd, *newAddImmOpnd); + AdjustmentOffsetForImmOpnd(insn, kInsnThirdOpnd, aarchCGFunc); /* legalize imm opnd */ + break; + } + case MOP_xaddrri24: { + ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + ImmOperand &offsetReg = aarchCGFunc.CreateImmOperand(offset, k64BitSize, false); + aarchCGFunc.SelectAddAfterInsn(tempReg, insn.GetOperand(kInsnSecondOpnd), offsetReg, PTY_i64, false, insn); + insn.SetOperand(kInsnSecondOpnd, tempReg); + break; + } + case MOP_xsubrri12: { + ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + ImmOperand &subend = static_cast(insn.GetOperand(kInsnThirdOpnd)); + subend.SetValue(subend.GetValue() - offset); + break; + } + case MOP_xsubrri24: { + ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + RegOperand &tempReg = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + ImmOperand &offsetReg = aarchCGFunc.CreateImmOperand(offset, k64BitSize, false); + aarchCGFunc.SelectAddAfterInsn(tempReg, insn.GetOperand(kInsnSecondOpnd), offsetReg, PTY_i64, false, insn); + insn.SetOperand(kInsnSecondOpnd, tempReg); + break; + } + case MOP_waddrri12: { + if (!CGOptions::IsArm64ilp32()) { + insn.Dump(); + CHECK_FATAL(false, "Unexpect offset adjustment insn"); + } else { + ASSERT(static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == RSP, + "regNumber should be changed in AdjustmentOffsetForOpnd"); + ImmOperand &addend = static_cast(insn.GetOperand(kInsnThirdOpnd)); + addend.SetValue(addend.GetValue() + offset); + AdjustmentOffsetForImmOpnd(insn, kInsnThirdOpnd, aarchCGFunc); /* legalize imm opnd */ + } + break; + } + default: + insn.Dump(); + CHECK_FATAL(false, "Unexpect offset adjustment insn"); + } + } +} + +void AArch64FPLROffsetAdjustment::AdjustmentOffsetForFPLR() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + FOR_ALL_BB(bb, aarchCGFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + AdjustmentOffsetForOpnd(*insn, *aarchCGFunc); + } + } + +#undef STKLAY_DBUG +#ifdef STKLAY_DBUG + AArch64MemLayout *aarch64memlayout = static_cast(cgFunc->GetMemlayout()); + LogInfo::MapleLogger() << "stkpass: " << aarch64memlayout->GetSegArgsStkpass().size << "\n"; + LogInfo::MapleLogger() << "local: " << aarch64memlayout->GetSizeOfLocals() << "\n"; + LogInfo::MapleLogger() << "ref local: " << aarch64memlayout->GetSizeOfRefLocals() << "\n"; + LogInfo::MapleLogger() << "regpass: " << aarch64memlayout->GetSegArgsRegPassed().size << "\n"; + LogInfo::MapleLogger() << "regspill: " << aarch64memlayout->GetSizeOfSpillReg() << "\n"; + LogInfo::MapleLogger() << "calleesave: " << SizeOfCalleeSaved() << "\n"; + +#endif +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_operand.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_operand.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eda313759eba456fd254d15d179fa47f9d970236 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_operand.cpp @@ -0,0 +1,60 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_operand.h" +#include +#include +#include "aarch64_abi.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +namespace maplebe { +bool StImmOperand::Less(const Operand &right) const{ + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const StImmOperand *rightOpnd = static_cast(&right); + if (symbol != rightOpnd->symbol) { + return symbol < rightOpnd->symbol; + } + if (offset != rightOpnd->offset) { + return offset < rightOpnd->offset; + } + return relocs < rightOpnd->relocs; +} + +bool ExtendShiftOperand::Less(const Operand &right) const { + if (&right == this) { + return false; + } + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const ExtendShiftOperand *rightOpnd = static_cast(&right); + + /* The same type. */ + if (extendOp != rightOpnd->extendOp) { + return extendOp < rightOpnd->extendOp; + } + return shiftAmount < rightOpnd->shiftAmount; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d962ed7b966d6181d4e2f98a9c085151d4c8b182 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_optimize_common.cpp @@ -0,0 +1,203 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_optimize_common.h" +#include "aarch64_cg.h" +#include "aarch64_cgfunc.h" +#include "cgbb.h" + +namespace maplebe { +void AArch64InsnVisitor::ModifyJumpTarget(Operand &targetOperand, BB &bb) { + if (bb.GetKind() == BB::kBBIgoto) { + bool modified = false; + for (Insn *insn = bb.GetLastInsn(); insn != nullptr; insn = insn->GetPrev()) { + if (insn->GetMachineOpcode() == MOP_adrp_label) { + LabelIdx labIdx = static_cast(targetOperand).GetLabelIndex(); + ImmOperand &immOpnd = static_cast(GetCGFunc())->CreateImmOperand(labIdx, k8BitSize, false); + insn->SetOperand(1, immOpnd); + modified = true; + } + } + CHECK_FATAL(modified, "ModifyJumpTarget: Could not change jump target"); + return; + } else if (bb.GetKind() == BB::kBBGoto) { + for (Insn *insn = bb.GetLastInsn(); insn != nullptr; insn = insn->GetPrev()) { + if (insn->GetMachineOpcode() == MOP_adrp_label) { + maple::LabelIdx labidx = static_cast(targetOperand).GetLabelIndex(); + LabelOperand &label = static_cast(GetCGFunc())->GetOrCreateLabelOperand(labidx); + insn->SetOperand(1, label); + break; + } + } + // fallthru below to patch the branch insn + } + bb.GetLastInsn()->SetOperand(AArch64isa::GetJumpTargetIdx(*bb.GetLastInsn()), targetOperand); +} + +void AArch64InsnVisitor::ModifyJumpTarget(maple::LabelIdx targetLabel, BB &bb) { + ModifyJumpTarget(static_cast(GetCGFunc())->GetOrCreateLabelOperand(targetLabel), bb); +} + +void AArch64InsnVisitor::ModifyJumpTarget(BB &newTarget, BB &bb) { + ModifyJumpTarget(newTarget.GetLastInsn()->GetOperand( + AArch64isa::GetJumpTargetIdx(*newTarget.GetLastInsn())), bb); +} + +Insn *AArch64InsnVisitor::CloneInsn(Insn &originalInsn) { + MemPool *memPool = const_cast(CG::GetCurCGFunc()->GetMemoryPool()); + if (originalInsn.IsTargetInsn()) { + if (!originalInsn.IsVectorOp()) { + return memPool->Clone(originalInsn); + } else { + auto *insn = memPool->Clone(*static_cast(&originalInsn)); + insn->SetRegSpecList(static_cast(originalInsn).GetRegSpecList()); + return insn; + } + } else if (originalInsn.IsCfiInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } else if (originalInsn.IsDbgInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } + if (originalInsn.IsComment()) { + return memPool->Clone(originalInsn); + } + CHECK_FATAL(false, "Cannot clone"); + return nullptr; +} + +/* + * Precondition: The given insn is a jump instruction. + * Get the jump target label from the given instruction. + * Note: MOP_xbr is a branching instruction, but the target is unknown at compile time, + * because a register instead of label. So we don't take it as a branching instruction. + */ +LabelIdx AArch64InsnVisitor::GetJumpLabel(const Insn &insn) const { + uint32 operandIdx = AArch64isa::GetJumpTargetIdx(insn); + if (insn.GetOperand(operandIdx).IsLabelOpnd()) { + return static_cast(insn.GetOperand(operandIdx)).GetLabelIndex(); + } + ASSERT(false, "Operand is not label"); + return 0; +} + +bool AArch64InsnVisitor::IsCompareInsn(const Insn &insn) const { + switch (insn.GetMachineOpcode()) { + case MOP_wcmpri: + case MOP_wcmprr: + case MOP_xcmpri: + case MOP_xcmprr: + case MOP_hcmperi: + case MOP_hcmperr: + case MOP_scmperi: + case MOP_scmperr: + case MOP_dcmperi: + case MOP_dcmperr: + case MOP_hcmpqri: + case MOP_hcmpqrr: + case MOP_scmpqri: + case MOP_scmpqrr: + case MOP_dcmpqri: + case MOP_dcmpqrr: + case MOP_wcmnri: + case MOP_wcmnrr: + case MOP_xcmnri: + case MOP_xcmnrr: + return true; + default: + return false; + } +} + +bool AArch64InsnVisitor::IsCompareAndBranchInsn(const Insn &insn) const { + switch (insn.GetMachineOpcode()) { + case MOP_wcbnz: + case MOP_xcbnz: + case MOP_wcbz: + case MOP_xcbz: + return true; + default: + return false; + } +} + +bool AArch64InsnVisitor::IsAddOrSubInsn(const Insn &insn) const { + switch (insn.GetMachineOpcode()) { + case MOP_xaddrrr: + case MOP_xaddrri12: + case MOP_waddrrr: + case MOP_waddrri12: + case MOP_xsubrrr: + case MOP_xsubrri12: + case MOP_wsubrrr: + case MOP_wsubrri12: + return true; + default: + return false; + } +} + +RegOperand *AArch64InsnVisitor::CreateVregFromReg(const RegOperand &pReg) { + return &static_cast(GetCGFunc())->CreateRegisterOperandOfType( + pReg.GetRegisterType(), pReg.GetSize() / k8BitSize); +} + +void AArch64InsnVisitor::ReTargetSuccBB(BB &bb, LabelIdx newTarget) const { + Insn *lastInsn = bb.GetLastMachineInsn(); + if (lastInsn && (lastInsn->IsBranch() || lastInsn->IsCondBranch() || lastInsn->IsUnCondBranch())) { + CHECK_FATAL(false, "check last insn of a ft BB"); + } + LabelOperand &targetOpnd = GetCGFunc()->GetOrCreateLabelOperand(newTarget); + Insn &newInsn = GetCGFunc()->GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd); + bb.AppendInsn(newInsn); +} + +void AArch64InsnVisitor::FlipIfBB(BB &bb, LabelIdx ftLabel) const { + Insn *lastInsn = bb.GetLastMachineInsn(); + CHECK_FATAL(lastInsn && lastInsn->IsCondBranch(), "must be ? of a if BB"); + uint32 targetIdx = AArch64isa::GetJumpTargetIdx(*lastInsn); + MOperator mOp = AArch64isa::FlipConditionOp(lastInsn->GetMachineOpcode()); + if (mOp == 0 || mOp > MOP_nop) { + CHECK_FATAL(false, "get flip op failed"); + } + lastInsn->SetMOP(AArch64CG::kMd[mOp]); + LabelOperand &targetOpnd = GetCGFunc()->GetOrCreateLabelOperand(ftLabel); + lastInsn->SetOperand(targetIdx, targetOpnd); +} + +BB *AArch64InsnVisitor::CreateGotoBBAfterCondBB(BB &bb, BB &fallthru, bool isTargetFallthru) const { + BB *newBB = GetCGFunc()->CreateNewBB(); + newBB->SetKind(BB::kBBGoto); + LabelIdx fallthruLabel = fallthru.GetLabIdx(); + if (fallthruLabel == MIRLabelTable::GetDummyLabel()) { + fallthruLabel = GetCGFunc()->CreateLabel(); + fallthru.SetLabIdx(fallthruLabel); + } + LabelOperand &targetOpnd = GetCGFunc()->GetOrCreateLabelOperand(fallthruLabel); + Insn &gotoInsn = GetCGFunc()->GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd); + newBB->AppendInsn(gotoInsn); + + /* maintain pred and succ */ + if (!isTargetFallthru) { + fallthru.RemovePreds(bb); + } + fallthru.PushBackPreds(*newBB); + if (!isTargetFallthru) { + bb.RemoveSuccs(fallthru); + } + bb.PushBackSuccs(*newBB); + newBB->PushBackSuccs(fallthru); + newBB->PushBackPreds(bb); + return newBB; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ba3911004f0f8926e54e05a2a848f18deb1c8a1e --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_peep.cpp @@ -0,0 +1,4796 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_peep.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" +#include "cg_option.h" +#include "aarch64_utils.h" + +namespace maplebe { +#define JAVALANG (cgFunc->GetMirModule().IsJavaModule()) +#define CG_PEEP_DUMP CG_DEBUG_FUNC(*cgFunc) +namespace { +const std::string kMccLoadRef = "MCC_LoadRefField"; +const std::string kMccLoadRefV = "MCC_LoadVolatileField"; +const std::string kMccLoadRefS = "MCC_LoadRefStatic"; +const std::string kMccLoadRefVS = "MCC_LoadVolatileStaticField"; +const std::string kMccDummy = "MCC_Dummy"; + +const std::string GetReadBarrierName(const Insn &insn) { + constexpr int32 totalBarrierNamesNum = 5; + std::array barrierNames = { + kMccLoadRef, kMccLoadRefV, kMccLoadRefS, kMccLoadRefVS, kMccDummy + }; + if (insn.GetMachineOpcode() == MOP_xbl || + insn.GetMachineOpcode() == MOP_tail_call_opt_xbl) { + auto &op = static_cast(insn.GetOperand(kInsnFirstOpnd)); + const std::string &funcName = op.GetName(); + for (const std::string &singleBarrierName : barrierNames) { + if (funcName == singleBarrierName) { + return singleBarrierName; + } + } + } + return ""; +} + +MOperator GetLoadOperator(uint32 refSize, bool isVolatile) { + if (refSize == k32BitSize) { + return isVolatile ? MOP_wldar : MOP_wldr; + } + return isVolatile ? MOP_xldar : MOP_xldr; +} +} + +static bool IsZeroRegister(const Operand &opnd) { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +void AArch64CGPeepHole::Run() { + bool optSuccess = false; + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (ssaInfo != nullptr) { + optSuccess |= DoSSAOptimize(*bb, *insn); + } else { + DoNormalOptimize(*bb, *insn); + } + } + } + if (optSuccess) { + Run(); + } +} + +bool AArch64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + manager = peepMemPool->New(*cgFunc, bb, insn, *ssaInfo); + manager->SetOptSuccess(false); + switch (thisMop) { + case MOP_xandrrr: + case MOP_wandrrr: { + manager->Optimize(true); + break; + } + case MOP_wiorrri12: + case MOP_xiorrri13: { + manager->Optimize(true); + break; + } + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + manager->Optimize(true); + manager->Optimize(true); + manager->Optimize(true); + break; + } + case MOP_beq: + case MOP_bne: { + manager->Optimize(true); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + manager->Optimize(true); + break; + } + case MOP_waddrrr: + case MOP_xaddrrr: { + manager->Optimize(true); + manager->Optimize(true); + break; + } + case MOP_dadd: + case MOP_sadd: + case MOP_wsubrrr: + case MOP_xsubrrr: + case MOP_dsub: + case MOP_ssub: + case MOP_xinegrr: + case MOP_winegrr: + case MOP_wfnegrr: + case MOP_xfnegrr: { + manager->Optimize(true); + break; + } + case MOP_wandrri12: + case MOP_xandrri13: { + manager->Optimize(true); + manager->Optimize(true); + break; + } + case MOP_wcselrrrc: + case MOP_xcselrrrc: { + manager->Optimize(true); + break; + } + case MOP_wiorrrr: + case MOP_xiorrrr: + case MOP_wiorrrrs: + case MOP_xiorrrrs: { + manager->Optimize(true); + break; + } + case MOP_bge: + case MOP_ble: + case MOP_blt: + case MOP_bgt: { + manager->Optimize(true); + break; + } + case MOP_wcmprr: + case MOP_xcmprr: { + manager->Optimize(true); + break; + } + case MOP_xlslrri6: { + manager->Optimize(); + manager->Optimize(true); + manager->Optimize(true); + break; + } + case MOP_xsxtb32: + case MOP_xsxtb64: + case MOP_xsxth32: + case MOP_xsxth64: + case MOP_xsxtw64: + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + manager->Optimize(true); + break; + } + case MOP_wlsrrri5: + case MOP_xlsrrri6: + case MOP_wasrrri5: + case MOP_xasrrri6: + case MOP_waddrri12: + case MOP_xaddrri12: + case MOP_wsubrri12: + case MOP_xsubrri12: { + manager->Optimize(true); + break; + } + case MOP_wlslrri5: { + manager->Optimize(true); + manager->Optimize(true); + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: { + manager->Optimize(true); + break; + } + default: + break; + } + return manager->OptSuccess(); +} + +bool ContinuousCmpCsetPattern::CheckCondCode(const CondOperand &condOpnd) const { + switch (condOpnd.GetCode()) { + case CC_NE: + case CC_EQ: + case CC_LT: + case CC_GE: + case CC_GT: + case CC_LE: + return true; + default: + return false; + } +} + +bool ContinuousCmpCsetPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcsetrc && curMop != MOP_xcsetrc) { + return false; + } + auto &condOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (condOpnd.GetCode() != CC_NE && condOpnd.GetCode() != CC_EQ) { + return false; + } + reverse = (condOpnd.GetCode() == CC_EQ); + auto &ccReg = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevCmpInsn = ssaInfo->GetDefInsn(ccReg); + if (prevCmpInsn == nullptr) { + return false; + } + MOperator prevCmpMop = prevCmpInsn->GetMachineOpcode(); + if (prevCmpMop != MOP_wcmpri && prevCmpMop != MOP_xcmpri) { + return false; + } + if (!static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)).IsZero()) { + return false; + } + auto &cmpCCReg = static_cast(prevCmpInsn->GetOperand(kInsnFirstOpnd)); + InsnSet useSet = GetAllUseInsn(cmpCCReg); + if (useSet.size() > 1) { + return false; + } + auto &cmpUseReg = static_cast(prevCmpInsn->GetOperand(kInsnSecondOpnd)); + prevCsetInsn1 = ssaInfo->GetDefInsn(cmpUseReg); + if (prevCsetInsn1 == nullptr) { + return false; + } + MOperator prevCsetMop1 = prevCsetInsn1->GetMachineOpcode(); + if (prevCsetMop1 != MOP_wcsetrc && prevCsetMop1 != MOP_xcsetrc) { + return false; + } + auto &condOpnd1 = static_cast(prevCsetInsn1->GetOperand(kInsnSecondOpnd)); + if (!CheckCondCode(condOpnd1)) { + return false; + } + auto &ccReg1 = static_cast(prevCsetInsn1->GetOperand(kInsnThirdOpnd)); + prevCmpInsn1 = ssaInfo->GetDefInsn(ccReg1); + if (prevCmpInsn1 == nullptr) { + return false; + } + if (IsCCRegCrossVersion(*prevCsetInsn1, *prevCmpInsn, ccReg1)) { + return false; + } + return true; +} + +void ContinuousCmpCsetPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + MOperator curMop = insn.GetMachineOpcode(); + Operand &resOpnd = insn.GetOperand(kInsnFirstOpnd); + Insn *newCsetInsn = nullptr; + if (reverse) { + MOperator prevCsetMop = prevCsetInsn1->GetMachineOpcode(); + auto &prevCsetCondOpnd = static_cast(prevCsetInsn1->GetOperand(kInsnSecondOpnd)); + CondOperand &newCondOpnd = aarFunc->GetCondOperand(GetReverseCC(prevCsetCondOpnd.GetCode())); + regno_t tmpRegNO = 0; + auto *tmpDefOpnd = aarFunc->CreateVirtualRegisterOperand(tmpRegNO, + resOpnd.GetSize(), static_cast(resOpnd).GetRegisterType()); + tmpDefOpnd->SetValidBitsNum(k1BitSize); + newCsetInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + prevCsetMop, *tmpDefOpnd, newCondOpnd, prevCsetInsn1->GetOperand(kInsnThirdOpnd)); + BB *prevCsetBB = prevCsetInsn1->GetBB(); + (void)prevCsetBB->InsertInsnAfter(*prevCsetInsn1, *newCsetInsn); + /* update ssa info */ + auto *a64SSAInfo = static_cast(ssaInfo); + a64SSAInfo->CreateNewInsnSSAInfo(*newCsetInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevCmpInsn1); + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, prevCmpInsn, newCsetInsn); + } + } + MOperator newMop = (curMop == MOP_wcsetrc) ? MOP_wmovrr : MOP_xmovrr; + Insn *newInsn = nullptr; + if (newCsetInsn == nullptr) { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + newMop, insn.GetOperand(kInsnFirstOpnd), prevCsetInsn1->GetOperand(kInsnFirstOpnd)); + } else { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + newMop, insn.GetOperand(kInsnFirstOpnd), newCsetInsn->GetOperand(kInsnFirstOpnd)); + } + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevCmpInsn1); + prevs.emplace_back(prevCsetInsn1); + if (newCsetInsn == nullptr) { + (void)prevs.emplace_back(prevCmpInsn); + } else { + (void)prevs.emplace_back(newCsetInsn); + } + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +bool NegCmpToCmnPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcmprr && curMop != MOP_xcmprr) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevInsn = ssaInfo->GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_winegrr && prevMop != MOP_xinegrr && + prevMop != MOP_winegrrs && prevMop != MOP_xinegrrs) { + return false; + } + // Determine whether implicit conversion is existed. + if ((prevMop == MOP_winegrr && curMop == MOP_xcmprr) || (prevMop == MOP_winegrrs && curMop == MOP_xcmprr) || + (prevMop == MOP_xinegrr && curMop == MOP_wcmprr) || (prevMop == MOP_winegrrs && curMop == MOP_xcmprr)) { + return false; + } + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + InsnSet useInsns = GetAllUseInsn(ccReg); + for (auto *useInsn : useInsns) { + if (useInsn == nullptr) { + continue; + } + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop == MOP_bhi || useMop == MOP_bls) { + return false; + } + bool findUnsignedCond = false; + for (uint32 i = 0; i < useInsn->GetOperandSize(); ++i) { + if (useInsn->GetOperand(i).GetKind() == Operand::kOpdCond) { + ConditionCode cond = static_cast(useInsn->GetOperand(i)).GetCode(); + /* in case of ignoring v flag + * adds xt, x0, x1 (0x8000000000000000) -> not set v + * ==> + * neg x1 x1 (0x8000000000000000) which is same for negative 0 + * subs xt, x0, x1 () -> set v + */ + if (cond == CC_HI || cond == CC_LS || cond == CC_GE || cond == CC_GT || + cond == CC_LE || cond == CC_LT) { + findUnsignedCond = true; + break; + } + } + } + if (findUnsignedCond) { + return false; + } + } + return true; +} + +void NegCmpToCmnPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Operand &opnd1 = insn.GetOperand(kInsnSecondOpnd); + Operand &opnd2 = prevInsn->GetOperand(kInsnSecondOpnd); + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator currMop = insn.GetMachineOpcode(); + Insn *newInsn = nullptr; + if (prevMop == MOP_winegrr || prevMop == MOP_xinegrr) { + MOperator newMop = (currMop == MOP_wcmprr) ? MOP_wcmnrr : MOP_xcmnrr; + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, ccReg, opnd1, opnd2)); + } else { + /* prevMop == MOP_winegrrs || prevMop == MOP_xinegrrs */ + MOperator newMop = (currMop == MOP_wcmprr) ? MOP_wcmnrrs : MOP_xcmnrrs; + Operand &shiftOpnd = prevInsn->GetOperand(kInsnThirdOpnd); + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, ccReg, opnd1, opnd2, shiftOpnd)); + } + CHECK_FATAL(newInsn != nullptr, "must create newInsn"); + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +void LdrCmpPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(*ldr2); + bb.RemoveInsn(*ldr1); + bb.RemoveInsn(insn); + bb.RemoveInsn(*bne1); + prevLdr1->SetMOP(AArch64CG::kMd[MOP_xldr]); + prevLdr2->SetMOP(AArch64CG::kMd[MOP_xldr]); + prevCmp->SetMOP(AArch64CG::kMd[MOP_xcmprr]); +} + +bool LdrCmpPattern::CheckCondition(Insn &insn) { + /* a pattern which breaks cfg + * it is more suitable for peephole after pgo using */ + if (CGOptions::DoLiteProfGen() || CGOptions::DoLiteProfUse()) { + return false; + } + if (currInsn != &insn) { + return false; + } + if (!SetInsns()) { + return false; + } + if (!CheckInsns()) { + return false; + } + auto ®0 = static_cast(currInsn->GetOperand(kInsnSecondOpnd)); + auto ®1 = static_cast(currInsn->GetOperand(kInsnThirdOpnd)); + return !(IfOperandIsLiveAfterInsn(reg0, insn) || IfOperandIsLiveAfterInsn(reg1, insn)); +} + +/* + * mopSeq: + * ldr,ldr,cmp,bne + */ +bool LdrCmpPattern::SetInsns() { + if (!IsLdr(currInsn->GetPreviousMachineInsn())) { + return false; + } + ldr2 = currInsn->GetPreviousMachineInsn(); + if (!IsLdr(ldr2->GetPreviousMachineInsn())) { + return false; + } + ldr1 = ldr2->GetPreviousMachineInsn(); + /* ldr1 must be firstInsn in currBB */ + if (currInsn->GetBB()->GetFirstInsn() != ldr1) { + return false; + } + if (!IsBne(currInsn->GetNextMachineInsn())) { + return false; + } + bne1 = currInsn->GetNextMachineInsn(); + BB *prevBB = currInsn->GetBB()->GetPrev(); + /* single prev, single pred */ + const MapleList &predBBs = currInsn->GetBB()->GetPreds(); + if ((prevBB == nullptr) || (predBBs.size() != 1) || (prevBB != *predBBs.begin())) { + return false; + } + if (!IsBne(prevBB->GetLastInsn())) { + return false; + } + bne2 = prevBB->GetLastInsn(); + if (!IsCmp(bne2->GetPreviousMachineInsn())) { + return false; + } + prevCmp = bne2->GetPreviousMachineInsn(); + if (!IsLdr(prevCmp->GetPreviousMachineInsn())) { + return false; + } + prevLdr2 = prevCmp->GetPreviousMachineInsn(); + if (!IsLdr(prevLdr2->GetPreviousMachineInsn())) { + return false; + } + prevLdr1 = prevLdr2->GetPreviousMachineInsn(); + return true; +} + +bool LdrCmpPattern::CheckInsns() const { + auto &label1 = static_cast(bne1->GetOperand(kInsnSecondOpnd)); + auto &label2 = static_cast(bne2->GetOperand(kInsnSecondOpnd)); + if (label1.GetLabelIndex() != label2.GetLabelIndex()) { + return false; + } + auto ®0 = static_cast(currInsn->GetOperand(kInsnSecondOpnd)); + auto ®1 = static_cast(currInsn->GetOperand(kInsnThirdOpnd)); + regno_t regno0 = reg0.GetRegisterNumber(); + regno_t regno1 = reg1.GetRegisterNumber(); + if (regno0 == regno1) { + return false; + } + auto &mem1 = static_cast(ldr1->GetOperand(kInsnSecondOpnd)); + auto &preMem1 = static_cast(prevLdr1->GetOperand(kInsnSecondOpnd)); + auto &mem2 = static_cast(ldr2->GetOperand(kInsnSecondOpnd)); + auto &preMem2 = static_cast(prevLdr2->GetOperand(kInsnSecondOpnd)); + regno_t regnoBase0 = mem1.GetBaseRegister()->GetRegisterNumber(); + regno_t regnoBase1 = mem2.GetBaseRegister()->GetRegisterNumber(); + if (regnoBase0 == regnoBase1) { + return false; + } + if ((regno0 == regnoBase0) || (regno0 == regnoBase1) || (regno1 == regnoBase0) || (regno1 == regnoBase1)) { + return false; + } + if ((reg0 == static_cast(ldr2->GetOperand(kInsnFirstOpnd))) && + (reg0 == static_cast(prevLdr2->GetOperand(kInsnFirstOpnd))) && + (reg1 == static_cast(ldr1->GetOperand(kInsnFirstOpnd))) && + (reg1 == static_cast(prevLdr1->GetOperand(kInsnFirstOpnd)))) { + if (MemOffet4Bit(preMem2, mem2) && MemOffet4Bit(preMem1, mem1)) { + return true; + } + } + if ((reg0 == static_cast(ldr1->GetOperand(kInsnFirstOpnd))) && + (reg0 == static_cast(prevLdr1->GetOperand(kInsnFirstOpnd))) && + (reg1 == static_cast(ldr2->GetOperand(kInsnFirstOpnd))) && + (reg1 == static_cast(prevLdr2->GetOperand(kInsnFirstOpnd)))) { + if (MemOffet4Bit(preMem2, mem2) && MemOffet4Bit(preMem1, mem1)) { + return true; + } + } + return false; +} + +bool LdrCmpPattern::MemOffet4Bit(const MemOperand &m1, const MemOperand &m2) const { + if (m1.GetAddrMode() != m2.GetAddrMode()) { + return false; + } + if (m1.GetAddrMode() != MemOperand::kAddrModeBOi) { + return false; + } + if (m1.GetBaseRegister()->GetRegisterNumber() != m2.GetBaseRegister()->GetRegisterNumber()) { + return false; + } + int64 offset = m2.GetOffsetOperand()->GetValue() - m1.GetOffsetOperand()->GetValue(); + return offset == k4BitSizeInt; +} + +bool CsetCbzToBeqPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = ssaInfo->GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wcsetrc && prevMop != MOP_xcsetrc) { + return false; + } + auto &ccReg = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (IsCCRegCrossVersion(*prevInsn, insn, ccReg)) { + return false; + } + return true; +} + +MOperator CsetCbzToBeqPattern::SelectNewMop(ConditionCode condCode, bool inverse) const { + switch (condCode) { + case CC_NE: + return inverse ? MOP_beq : MOP_bne; + case CC_EQ: + return inverse ? MOP_bne : MOP_beq; + case CC_MI: + return inverse ? MOP_bpl : MOP_bmi; + case CC_PL: + return inverse ? MOP_bmi : MOP_bpl; + case CC_VS: + return inverse ? MOP_bvc : MOP_bvs; + case CC_VC: + return inverse ? MOP_bvs : MOP_bvc; + case CC_HI: + return inverse ? MOP_bls : MOP_bhi; + case CC_LS: + return inverse ? MOP_bhi : MOP_bls; + case CC_GE: + return inverse ? MOP_blt : MOP_bge; + case CC_LT: + return inverse ? MOP_bge : MOP_blt; + case CC_HS: + return inverse ? MOP_blo : MOP_bhs; + case CC_LO: + return inverse ? MOP_bhs : MOP_blo; + case CC_LE: + return inverse ? MOP_bgt : MOP_ble; + case CC_GT: + return inverse ? MOP_ble : MOP_bgt; + case CC_CS: + return inverse ? MOP_bcc : MOP_bcs; + default: + return MOP_undef; + } +} + +void CsetCbzToBeqPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator curMop = insn.GetMachineOpcode(); + bool reverse = (curMop == MOP_wcbz || curMop == MOP_xcbz); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &condOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + MOperator newMop = SelectNewMop(condOpnd.GetCode(), reverse); + ASSERT(newMop != MOP_undef, "unknown condition code"); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevInsn->GetOperand(kInsnThirdOpnd), labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool ExtLslToBitFieldInsertPattern::CheckCondition(Insn &insn) { + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevInsn = ssaInfo->GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_xsxtw64 && prevMop != MOP_xuxtw64) { + return false; + } + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (immOpnd.GetValue() > k32BitSize) { + return false; + } + return true; +} + +void ExtLslToBitFieldInsertPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto &prevSrcReg = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + cgFunc->InsertExtendSet(prevSrcReg.GetRegisterNumber()); + MOperator newMop = (prevInsn->GetMachineOpcode() == MOP_xsxtw64) ? MOP_xsbfizrri6i6 : MOP_xubfizrri6i6; + auto *aarFunc = static_cast(cgFunc); + auto &newImmOpnd1 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ImmOperand &newImmOpnd2 = aarFunc->CreateImmOperand(k32BitSize, k6BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, insn.GetOperand(kInsnFirstOpnd), prevSrcReg, newImmOpnd1, newImmOpnd2); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool CselToCsetPattern::IsOpndDefByZero(const Insn &insn) const { + MOperator movMop = insn.GetMachineOpcode(); + switch (movMop) { + case MOP_xmovrr: + case MOP_wmovrr: { + return IsZeroRegister(insn.GetOperand(kInsnSecondOpnd)); + } + case MOP_wmovri32: + case MOP_xmovri64: { + auto &immOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + return immOpnd.GetValue() == 0; + } + default: + return false; + } +} + +bool CselToCsetPattern::IsOpndDefByOne(const Insn &insn) const { + MOperator movMop = insn.GetMachineOpcode(); + if ((movMop != MOP_wmovri32) && (movMop != MOP_xmovri64)) { + return false; + } + auto &immOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + return immOpnd.GetValue() == 1; +} + +bool CselToCsetPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcselrrrc && curMop != MOP_xcselrrrc) { + return false; + } + auto &useOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevMovInsn1 = ssaInfo->GetDefInsn(useOpnd1); + if (prevMovInsn1 == nullptr) { + return false; + } + MOperator prevMop1 = prevMovInsn1->GetMachineOpcode(); + if (prevMop1 != MOP_wmovri32 && prevMop1 != MOP_xmovri64 && + prevMop1 != MOP_wmovrr && prevMop1 != MOP_xmovrr) { + return false; + } + auto &useOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevMovInsn2 = ssaInfo->GetDefInsn(useOpnd2); + if (prevMovInsn2 == nullptr) { + return false; + } + MOperator prevMop2 = prevMovInsn2->GetMachineOpcode(); + if (prevMop2 != MOP_wmovri32 && prevMop2 != MOP_xmovri64 && + prevMop2 != MOP_wmovrr && prevMop2 != MOP_xmovrr) { + return false; + } + return true; +} + +void CselToCsetPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + MOperator newMop = (dstOpnd.GetSize() == k64BitSize ? MOP_xcsetrc : MOP_wcsetrc); + Operand &condOpnd = insn.GetOperand(kInsnFourthOpnd); + Operand &rflag = insn.GetOperand(kInsnFifthOpnd); + Insn *newInsn = nullptr; + if (IsOpndDefByOne(*prevMovInsn1) && IsOpndDefByZero(*prevMovInsn2)) { + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, dstOpnd, condOpnd, rflag)); + } else if (IsOpndDefByZero(*prevMovInsn1) && IsOpndDefByOne(*prevMovInsn2)) { + auto &origCondOpnd = static_cast(condOpnd); + ConditionCode inverseCondCode = GetReverseCC(origCondOpnd.GetCode()); + if (inverseCondCode == kCcLast) { + return; + } + auto *aarFunc = static_cast(cgFunc); + CondOperand &inverseCondOpnd = aarFunc->GetCondOperand(inverseCondCode); + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, dstOpnd, inverseCondOpnd, rflag)); + } + if (newInsn == nullptr) { + return; + } + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevMovInsn1); + prevs.emplace_back(prevMovInsn2); + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +bool CsetToCincPattern::CheckDefInsn(const RegOperand &opnd, Insn &insn) { + Insn *tempDefInsn = ssaInfo->GetDefInsn(opnd); + if (tempDefInsn != nullptr && tempDefInsn->GetBB()->GetId() == insn.GetBB()->GetId()) { + InsnSet useInsns = GetAllUseInsn(opnd); + if (useInsns.size() != 1) { + return false; + } + MOperator mop = tempDefInsn->GetMachineOpcode(); + if (mop == MOP_wcsetrc || mop == MOP_xcsetrc) { + /* DefInsn and tempDefInsn are in the same BB. Select a close to useInsn(add) */ + if (!CheckRegTyCc(*tempDefInsn, insn)) { + return false; + } + defInsn = tempDefInsn; + return true; + } + } + return false; +} + +/* If a new ConditionCode is generated after csetInsn, this optimization is not performed. */ +bool CsetToCincPattern::CheckRegTyCc(const Insn &tempDefInsn, Insn &insn) { + bool betweenUseAndDef = false; + FOR_BB_INSNS_REV(bbInsn, insn.GetBB()) { + if (!bbInsn->IsMachineInstruction()) { + continue; + } + if (bbInsn->GetId() == insn.GetId()) { + betweenUseAndDef = true; + } + if (betweenUseAndDef) { + /* Select a close to useInsn(add) */ + if (defInsn != nullptr && bbInsn->GetId() == defInsn->GetId()) { + return false; + } else if (bbInsn->GetId() == tempDefInsn.GetId()) { + return true; + } else if (static_cast(bbInsn->GetOperand(kInsnFirstOpnd)).IsOfCC()) { + return false; + } + } + } + return false; +} + +bool CsetToCincPattern::CheckCondition(Insn &insn) { + RegOperand &opnd2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + RegOperand &opnd3 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + bool opnd2Cset = CheckDefInsn(opnd2, insn); + bool opnd3Cset = CheckDefInsn(opnd3, insn); + if (opnd3Cset) { + csetOpnd1 = kInsnThirdOpnd; + return true; + } else if (opnd2Cset) { + csetOpnd1 = kInsnSecondOpnd; + return true; + } + return false; +} + +void CsetToCincPattern::Run(BB &bb, Insn &insn) { + RegOperand &opnd1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + /* Exclude other patterns that have been optimized. */ + Insn *newAddInsn = ssaInfo->GetDefInsn(opnd1); + if (newAddInsn == nullptr) { + return; + } + MOperator mop = newAddInsn->GetMachineOpcode(); + if (mop != MOP_waddrrr && mop != MOP_xaddrrr) { + return; + } + if (!CheckCondition(insn) || defInsn == nullptr || csetOpnd1 == 0) { + return; + } + MOperator newMop = (insn.GetMachineOpcode() == MOP_waddrrr) ? MOP_wcincrc : MOP_xcincrc; + int32 cincOpnd2 = (csetOpnd1 == kInsnSecondOpnd) ? kInsnThirdOpnd : kInsnSecondOpnd; + RegOperand &opnd2 = static_cast(insn.GetOperand(static_cast(cincOpnd2))); + Operand &condOpnd = defInsn->GetOperand(kInsnSecondOpnd); + Operand &rflag = defInsn->GetOperand(kInsnThirdOpnd); + Insn *newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, opnd1, opnd2, condOpnd, rflag)); + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + (void)prevs.emplace_back(defInsn); + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +bool AndCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { + MOperator curMop = currInsn.GetMachineOpcode(); + MOperator prevAndMop = prevAndInsn->GetMachineOpcode(); + auto &andImmOpnd = static_cast(prevAndInsn->GetOperand(kInsnThirdOpnd)); + auto &cmpImmOpnd = static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)); + if (cmpImmOpnd.GetValue() == 0) { + tbzImmVal = GetLogValueAtBase2(andImmOpnd.GetValue()); + if (tbzImmVal < 0) { + return false; + } + switch (curMop) { + case MOP_beq: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbz : MOP_xtbz; + break; + case MOP_bne: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbnz : MOP_xtbnz; + break; + default: + return false; + } + } else { + tbzImmVal = GetLogValueAtBase2(andImmOpnd.GetValue()); + int64 tmpVal = GetLogValueAtBase2(cmpImmOpnd.GetValue()); + if (tbzImmVal < 0 || tmpVal < 0 || tbzImmVal != tmpVal) { + return false; + } + switch (curMop) { + case MOP_beq: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbnz : MOP_xtbnz; + break; + case MOP_bne: + newMop = (prevAndMop == MOP_wandrri12) ? MOP_wtbz : MOP_xtbz; + break; + default: + return false; + } + } + return true; +} + +bool AndCmpBranchesToTbzPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_beq && curMop != MOP_bne) { + return false; + } + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevCmpInsn = ssaInfo->GetDefInsn(ccReg); + if (prevCmpInsn == nullptr) { + return false; + } + MOperator prevCmpMop = prevCmpInsn->GetMachineOpcode(); + if (prevCmpMop != MOP_wcmpri && prevCmpMop != MOP_xcmpri) { + return false; + } + auto &cmpUseReg = static_cast(prevCmpInsn->GetOperand(kInsnSecondOpnd)); + prevAndInsn = ssaInfo->GetDefInsn(cmpUseReg); + if (prevAndInsn == nullptr) { + return false; + } + MOperator prevAndMop = prevAndInsn->GetMachineOpcode(); + if (prevAndMop != MOP_wandrri12 && prevAndMop != MOP_xandrri13) { + return false; + } + CHECK_FATAL(prevAndInsn->GetOperand(kInsnFirstOpnd).GetSize() == + prevCmpInsn->GetOperand(kInsnSecondOpnd).GetSize(), "def-use reg size must be same based-on ssa"); + if (!CheckAndSelectPattern(insn)) { + return false; + } + return true; +} + +void AndCmpBranchesToTbzPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &tbzImmOpnd = aarFunc->CreateImmOperand(tbzImmVal, k8BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, prevAndInsn->GetOperand(kInsnSecondOpnd), tbzImmOpnd, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevAndInsn); + prevs.emplace_back(prevCmpInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool ZeroCmpBranchesToTbzPattern::CheckAndSelectPattern(const Insn &currInsn) { + MOperator currMop = currInsn.GetMachineOpcode(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + switch (prevMop) { + case MOP_wcmpri: + case MOP_xcmpri: { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &immOpnd = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (immOpnd.GetValue() != 0) { + return false; + } + switch (currMop) { + case MOP_bge: + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + break; + case MOP_blt: + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + break; + default: + return false; + } + break; + } + case MOP_wcmprr: + case MOP_xcmprr: { + auto ®Opnd0 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto ®Opnd1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (!IsZeroRegister(regOpnd0) && !IsZeroRegister(regOpnd1)) { + return false; + } + switch (currMop) { + case MOP_bge: + if (IsZeroRegister(regOpnd1)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else { + return false; + } + break; + case MOP_ble: + if (IsZeroRegister(regOpnd0)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbz : MOP_xtbz; + } else { + return false; + } + break; + case MOP_blt: + if (IsZeroRegister(regOpnd1)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return false; + } + break; + case MOP_bgt: + if (IsZeroRegister(regOpnd0)) { + regOpnd = &static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + newMop = (regOpnd->GetSize() <= k32BitSize) ? MOP_wtbnz : MOP_xtbnz; + } else { + return false; + } + break; + default: + return false; + } + break; + } + default: + return false; + } + return true; +} + +bool ZeroCmpBranchesToTbzPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_bge && curMop != MOP_ble && curMop != MOP_blt && curMop != MOP_bgt) { + return false; + } + CHECK_FATAL(insn.GetOperand(kInsnSecondOpnd).IsLabel(), "must be labelOpnd"); + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = ssaInfo->GetDefInsn(ccReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wcmpri && prevMop != MOP_xcmpri && prevMop != MOP_wcmprr && prevMop != MOP_xcmprr) { + return false; + } + if (!CheckAndSelectPattern(insn)) { + return false; + } + return true; +} + +void ZeroCmpBranchesToTbzPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + CHECK_FATAL(regOpnd != nullptr, "must have regOpnd"); + auto *aarFunc = static_cast(cgFunc); + ImmOperand &bitOpnd = aarFunc->CreateImmOperand( + (regOpnd->GetSize() <= k32BitSize) ? (k32BitSize - 1) : (k64BitSize - 1), k8BitSize, false); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *static_cast(regOpnd), bitOpnd, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool LsrAndToUbfxPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wandrri12 && curMop != MOP_xandrri13) { + return false; + } + int64 immValue = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + /* and_imm value must be (1 << n - 1) */ + if (immValue <= 0 || + (((static_cast(immValue)) & (static_cast(immValue) + 1)) != 0)) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevInsn = ssaInfo->GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wlsrrri5 && prevMop != MOP_xlsrrri6) { + return false; + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currUseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* check def-use reg size found by ssa */ + CHECK_FATAL(prevDstOpnd.GetSize() == currUseOpnd.GetSize(), "def-use reg size must be same"); + auto &andDstReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + VRegVersion *andDstVersion = ssaInfo->FindSSAVersion(andDstReg.GetRegisterNumber()); + ASSERT(andDstVersion != nullptr, "find destReg Version failed"); + for (auto useDUInfoIt : andDstVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = (useDUInfoIt.second)->GetInsn(); + if (useInsn == nullptr) { + continue; + } + MOperator useMop = useInsn->GetMachineOpcode(); + /* combine [and & cbz --> tbz] first, to eliminate more insns becase of incompleted copy prop */ + if (useMop == MOP_wcbz || useMop == MOP_xcbz || useMop == MOP_wcbnz || useMop == MOP_xcbnz) { + return false; + } + } + return true; +} + +void LsrAndToUbfxPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + bool is64Bits = (static_cast(insn.GetOperand(kInsnFirstOpnd)).GetSize() == k64BitSize); + Operand &resOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = prevInsn->GetOperand(kInsnSecondOpnd); + int64 immVal1 = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + Operand &immOpnd1 = is64Bits ? aarFunc->CreateImmOperand(immVal1, kMaxImmVal6Bits, false) : + aarFunc->CreateImmOperand(immVal1, kMaxImmVal5Bits, false); + int64 tmpVal = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValue(); + int64 immVal2 = __builtin_ffsll(tmpVal + 1) - 1; + if ((immVal2 < k1BitSize) || (is64Bits && (immVal1 + immVal2) > k64BitSize) || + (!is64Bits && (immVal1 + immVal2) > k32BitSize)) { + return; + } + Operand &immOpnd2 = is64Bits ? aarFunc->CreateImmOperand(immVal2, kMaxImmVal6Bits, false) : + aarFunc->CreateImmOperand(immVal2, kMaxImmVal5Bits, false); + MOperator newMop = (is64Bits ? MOP_xubfxrri6i6 : MOP_wubfxrri5i5); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, resOpnd, srcOpnd, immOpnd1, immOpnd2); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool LslAndToUbfizPattern::CheckCondition(Insn &insn) { + MOperator mop = insn.GetMachineOpcode(); + RegOperand &opnd2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + defInsn = ssaInfo->GetDefInsn(opnd2); + InsnSet useInsns = GetAllUseInsn(opnd2); + if (useInsns.size() != 1 || defInsn == nullptr) { + return false; + } + MOperator defMop = defInsn->GetMachineOpcode(); + if ((mop == MOP_wandrri12 || mop == MOP_xandrri13) && (defMop == MOP_wlslrri5 || defMop == MOP_xlslrri6)) { + return true; + } else if ((defMop == MOP_wandrri12 || defMop == MOP_xandrri13) && (mop == MOP_wlslrri5 || mop == MOP_xlslrri6)) { + /* lsl w1, w2, #n. insn and w1's useInsn can do prop, skipping this pattern */ + for (auto *useInsn : GetAllUseInsn(static_cast(insn.GetOperand(kInsnFirstOpnd)))) { + if (useInsn == nullptr) { + continue; + } + if (!CheckUseInsnMop(*useInsn)) { + return false; + } + } + return true; + } + return false; +} + +bool LslAndToUbfizPattern::CheckUseInsnMop(const Insn &useInsn) { + if (useInsn.IsLoad() || useInsn.IsStore()) { + return false; + } + MOperator useMop = useInsn.GetMachineOpcode(); + switch (useMop) { + case MOP_xeorrrr: + case MOP_xeorrrrs: + case MOP_weorrrr: + case MOP_weorrrrs: + case MOP_xiorrrr: + case MOP_xiorrrrs: + case MOP_wiorrrr: + case MOP_wiorrrrs: + case MOP_xaddrrr: + case MOP_xxwaddrrre: + case MOP_xaddrrrs: + case MOP_waddrrr: + case MOP_wwwaddrrre: + case MOP_waddrrrs: + case MOP_waddrri12: + case MOP_xaddrri12: + case MOP_xsubrrr: + case MOP_xxwsubrrre: + case MOP_xsubrrrs: + case MOP_wsubrrr: + case MOP_wwwsubrrre: + case MOP_wsubrrrs: + case MOP_xinegrr: + case MOP_winegrr: + case MOP_xsxtb32: + case MOP_xsxtb64: + case MOP_xsxth32: + case MOP_xsxth64: + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: + case MOP_xsxtw64: + case MOP_xubfxrri6i6: + case MOP_xcmprr: + case MOP_xwcmprre: + case MOP_xcmprrs: + case MOP_wcmprr: + case MOP_wwcmprre: + case MOP_wcmprrs: + return false; + default: + break; + } + return true; +} + +void LslAndToUbfizPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator mop = insn.GetMachineOpcode(); + Insn *newInsn = nullptr; + if (mop == MOP_wandrri12 || mop == MOP_xandrri13) { + newInsn = BuildNewInsn(insn, *defInsn, insn); + } + if (mop == MOP_wlslrri5 || mop == MOP_xlslrri6) { + newInsn = BuildNewInsn(*defInsn, insn, insn); + } + if (newInsn == nullptr) { + return; + } + bb.ReplaceInsn(insn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, *newInsn); + optSuccess = true; + SetCurrInsn(newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + (void)prevs.emplace_back(defInsn); + DumpAfterPattern(prevs, &insn, newInsn); + } +} + +/* Build ubfiz insn or mov insn */ +Insn *LslAndToUbfizPattern::BuildNewInsn(const Insn &andInsn, const Insn &lslInsn, const Insn &useInsn) { + uint64 andImmValue = static_cast(static_cast(andInsn.GetOperand(kInsnThirdOpnd)).GetValue()); + /* Check whether the value of immValue is 2^n-1. */ + uint64 judgment = andImmValue & (andImmValue + 1); + if (judgment != 0) { + return nullptr; + } + MOperator mop = andInsn.GetMachineOpcode(); + MOperator useMop = useInsn.GetMachineOpcode(); + RegOperand &ubfizOpnd1 = static_cast(useInsn.GetOperand(kInsnFirstOpnd)); + uint32 opnd1Size = ubfizOpnd1.GetSize(); + RegOperand &ubfizOpnd2 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + uint32 opnd2Size = ubfizOpnd2.GetSize(); + ImmOperand &ubfizOpnd3 = static_cast(lslInsn.GetOperand(kInsnThirdOpnd)); + uint32 mValue = static_cast(ubfizOpnd3.GetValue()); + uint32 nValue = static_cast(__builtin_popcountll(andImmValue)); + auto *aarFunc = static_cast(cgFunc); + if (opnd1Size != opnd2Size) { + return nullptr; + } + if (nValue > mValue || useMop == MOP_wlslrri5 || useMop == MOP_xlslrri6) { + MOperator newMop = (mop == MOP_wandrri12) ? MOP_wubfizrri5i5 : MOP_xubfizrri6i6; + uint32 size = (mop == MOP_wandrri12) ? kMaxImmVal5Bits : kMaxImmVal6Bits; + int64 val = 0; + if (useMop == MOP_wlslrri5 || useMop == MOP_xlslrri6) { + val = opnd1Size > (nValue + mValue) ? nValue : opnd1Size - mValue; + } else { + val = nValue - mValue; + } + ImmOperand &ubfizOpnd4 = aarFunc->CreateImmOperand(val, size, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, ubfizOpnd1, ubfizOpnd2, ubfizOpnd3, ubfizOpnd4); + return &newInsn; + } + return nullptr; +} + +bool MvnAndToBicPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wandrrr && curMop != MOP_xandrrr) { + return false; + } + auto &useReg1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &useReg2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + prevInsn1 = ssaInfo->GetDefInsn(useReg1); + prevInsn2 = ssaInfo->GetDefInsn(useReg2); + MOperator mop = insn.GetMachineOpcode(); + MOperator desMop = mop == MOP_xandrrr ? MOP_xnotrr : MOP_wnotrr; + op1IsMvnDef = prevInsn1 != nullptr && prevInsn1->GetMachineOpcode() == desMop; + op2IsMvnDef = prevInsn2 != nullptr && prevInsn2->GetMachineOpcode() == desMop; + if (op1IsMvnDef || op2IsMvnDef) { + return true; + } + return false; +} + +void MvnAndToBicPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator newMop = insn.GetMachineOpcode() == MOP_xandrrr ? MOP_xbicrrr : MOP_wbicrrr; + Insn *prevInsn = op1IsMvnDef ? prevInsn1 : prevInsn2; + auto &prevOpnd1 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &opnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, opnd0, op1IsMvnDef ? opnd2 : opnd1, prevOpnd1); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + bb.ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool AndCbzToTbzPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = ssaInfo ? ssaInfo->GetDefInsn(useReg) : insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wandrri12 && prevMop != MOP_xandrri13) { + return false; + } + if (!ssaInfo && (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd)))) { + return false; + } + return true; +} + +void AndCbzToTbzPattern::Run(BB &bb, Insn &insn) { + auto *aarchFunc = static_cast(cgFunc); + if (!CheckCondition(insn)) { + return; + } + auto &andImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int64 tbzVal = GetLogValueAtBase2(andImm.GetValue()); + if (tbzVal == -1) { + return; + } + MOperator mOp = insn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (mOp) { + case MOP_wcbz: + newMop = MOP_wtbz; + break; + case MOP_wcbnz: + newMop = MOP_wtbnz; + break; + case MOP_xcbz: + newMop = MOP_xtbz; + break; + case MOP_xcbnz: + newMop = MOP_xtbnz; + break; + default: + CHECK_FATAL(false, "must be cbz/cbnz"); + break; + } + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &tbzImm = aarchFunc->CreateImmOperand(tbzVal, k8BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevInsn->GetOperand(kInsnSecondOpnd), + tbzImm, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + if (ssaInfo) { + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + } + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool CombineSameArithmeticPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (std::find(validMops.begin(), validMops.end(), curMop) == validMops.end()) { + return false; + } + Operand &useOpnd = insn.GetOperand(kInsnSecondOpnd); + CHECK_FATAL(useOpnd.IsRegister(), "expect regOpnd"); + prevInsn = ssaInfo->GetDefInsn(static_cast(useOpnd)); + if (prevInsn == nullptr) { + return false; + } + if (prevInsn->GetMachineOpcode() != curMop) { + return false; + } + auto &prevDefOpnd = prevInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(prevDefOpnd.IsRegister(), "expect regOpnd"); + InsnSet useInsns = GetAllUseInsn(static_cast(prevDefOpnd)); + if (useInsns.size() > 1) { + return false; + } + auto *aarFunc = static_cast(cgFunc); + CHECK_FATAL(prevInsn->GetOperand(kInsnThirdOpnd).IsIntImmediate(), "expect immOpnd"); + CHECK_FATAL(insn.GetOperand(kInsnThirdOpnd).IsIntImmediate(), "expect immOpnd"); + auto &prevImmOpnd = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + auto &curImmOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + int64 prevImm = prevImmOpnd.GetValue(); + int64 curImm = curImmOpnd.GetValue(); + newImmOpnd = &aarFunc->CreateImmOperand(prevImmOpnd.GetValue() + curImmOpnd.GetValue(), + curImmOpnd.GetSize(), curImmOpnd.IsSignedValue()); + switch (curMop) { + case MOP_wlsrrri5: + case MOP_wasrrri5: + case MOP_wlslrri5: { + if ((prevImm + curImm) < k0BitSizeInt || (prevImm + curImm) >= k32BitSizeInt) { + return false; + } + break; + } + case MOP_xlsrrri6: + case MOP_xasrrri6: + case MOP_xlslrri6: { + if ((prevImm + curImm) < k0BitSizeInt || (prevImm + curImm) >= k64BitSizeInt) { + return false; + } + break; + } + case MOP_waddrri12: + case MOP_xaddrri12: + case MOP_wsubrri12: + case MOP_xsubrri12: { + if (!newImmOpnd->IsSingleInstructionMovable()) { + return false; + } + break; + } + default: + return false; + } + return true; +} + +void CombineSameArithmeticPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(insn.GetMachineOpcode(), + insn.GetOperand(kInsnFirstOpnd), + prevInsn->GetOperand(kInsnSecondOpnd), + *newImmOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + (void)prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool LogicShiftAndOrrToExtrPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wiorrrr && curMop != MOP_xiorrrr && curMop != MOP_wiorrrrs && curMop != MOP_xiorrrrs) { + return false; + } + Operand &curDstOpnd = insn.GetOperand(kInsnFirstOpnd); + is64Bits = (curDstOpnd.GetSize() == k64BitSize); + if (curMop == MOP_wiorrrr || curMop == MOP_xiorrrr) { + auto &useReg1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + Insn *prevInsn1 = ssaInfo->GetDefInsn(useReg1); + auto &useReg2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + Insn *prevInsn2 = ssaInfo->GetDefInsn(useReg2); + if (prevInsn1 == nullptr || prevInsn2 == nullptr) { + return false; + } + MOperator prevMop1 = prevInsn1->GetMachineOpcode(); + MOperator prevMop2 = prevInsn2->GetMachineOpcode(); + if ((prevMop1 == MOP_wlsrrri5 || prevMop1 == MOP_xlsrrri6) && + (prevMop2 == MOP_wlslrri5 || prevMop2 == MOP_xlslrri6)) { + prevLsrInsn = prevInsn1; + prevLslInsn = prevInsn2; + } else if ((prevMop2 == MOP_wlsrrri5 || prevMop2 == MOP_xlsrrri6) && + (prevMop1 == MOP_wlslrri5 || prevMop1 == MOP_xlslrri6)) { + prevLsrInsn = prevInsn2; + prevLslInsn = prevInsn1; + } else { + return false; + } + int64 prevLsrImmValue = static_cast(prevLsrInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + int64 prevLslImmValue = static_cast(prevLslInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + if ((prevLsrImmValue + prevLslImmValue) < 0) { + return false; + } + if ((is64Bits && (prevLsrImmValue + prevLslImmValue) != k64BitSize) || + (!is64Bits && (prevLsrImmValue + prevLslImmValue) != k32BitSize)) { + return false; + } + shiftValue = prevLsrImmValue; + } else if (curMop == MOP_wiorrrrs || curMop == MOP_xiorrrrs) { + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + Insn *prevInsn = ssaInfo->GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wlsrrri5 && prevMop != MOP_xlsrrri6 && prevMop != MOP_wlslrri5 && prevMop != MOP_xlslrri6) { + return false; + } + int64 prevImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue(); + auto &shiftOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); + uint32 shiftAmount = shiftOpnd.GetShiftAmount(); + if (shiftOpnd.GetShiftOp() == BitShiftOperand::kLSL && (prevMop == MOP_wlsrrri5 || prevMop == MOP_xlsrrri6)) { + prevLsrInsn = prevInsn; + shiftValue = prevImm; + } else if (shiftOpnd.GetShiftOp() == BitShiftOperand::kLSR && + (prevMop == MOP_wlslrri5 || prevMop == MOP_xlslrri6)) { + prevLslInsn = prevInsn; + shiftValue = shiftAmount; + } else { + return false; + } + if (prevImm + static_cast(shiftAmount) < 0) { + return false; + } + if ((is64Bits && (prevImm + static_cast(shiftAmount)) != k64BitSize) || + (!is64Bits && (prevImm + static_cast(shiftAmount)) != k32BitSize)) { + return false; + } + } else { + CHECK_FATAL(false, "must be above mop"); + return false; + } + return true; +} + +void LogicShiftAndOrrToExtrPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + Operand &opnd1 = (prevLslInsn == nullptr ? insn.GetOperand(kInsnThirdOpnd) : + prevLslInsn->GetOperand(kInsnSecondOpnd)); + Operand &opnd2 = (prevLsrInsn == nullptr ? insn.GetOperand(kInsnThirdOpnd) : + prevLsrInsn->GetOperand(kInsnSecondOpnd)); + ImmOperand &immOpnd = is64Bits ? aarFunc->CreateImmOperand(shiftValue, kMaxImmVal6Bits, false) : + aarFunc->CreateImmOperand(shiftValue, kMaxImmVal5Bits, false); + MOperator newMop = is64Bits ? MOP_xextrrrri6 : MOP_wextrrrri5; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, insn.GetOperand(kInsnFirstOpnd), opnd1, opnd2, immOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevLsrInsn); + prevs.emplace_back(prevLslInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +void SimplifyMulArithmeticPattern::SetArithType(const Insn &currInsn) { + MOperator mOp = currInsn.GetMachineOpcode(); + switch (mOp) { + case MOP_waddrrr: + case MOP_xaddrrr: { + arithType = kAdd; + isFloat = false; + break; + } + case MOP_dadd: + case MOP_sadd: { + arithType = kFAdd; + isFloat = true; + break; + } + case MOP_wsubrrr: + case MOP_xsubrrr: { + arithType = kSub; + isFloat = false; + validOpndIdx = kInsnThirdOpnd; + break; + } + case MOP_dsub: + case MOP_ssub: { + arithType = kFSub; + isFloat = true; + validOpndIdx = kInsnThirdOpnd; + break; + } + case MOP_xinegrr: + case MOP_winegrr: { + arithType = kNeg; + isFloat = false; + validOpndIdx = kInsnSecondOpnd; + break; + } + case MOP_wfnegrr: + case MOP_xfnegrr: { + arithType = kFNeg; + isFloat = true; + validOpndIdx = kInsnSecondOpnd; + break; + } + default: { + CHECK_FATAL(false, "must be above mop"); + break; + } + } +} + +bool SimplifyMulArithmeticPattern::CheckCondition(Insn &insn) { + if (arithType == kUndef || validOpndIdx < 0) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(static_cast(validOpndIdx))); + prevInsn = ssaInfo->GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + regno_t useRegNO = useReg.GetRegisterNumber(); + VRegVersion *useVersion = ssaInfo->FindSSAVersion(useRegNO); + ASSERT(useVersion != nullptr, "useVersion should not be nullptr"); + if (useVersion->GetAllUseInsns().size() > 1) { + return false; + } + MOperator currMop = insn.GetMachineOpcode(); + if (currMop == MOP_dadd || currMop == MOP_sadd || currMop == MOP_dsub || currMop == MOP_ssub || + currMop == MOP_wfnegrr || currMop == MOP_xfnegrr) { + isFloat = true; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_wmulrrr && prevMop != MOP_xmulrrr && prevMop != MOP_xvmuld && prevMop != MOP_xvmuls) { + return false; + } + if (isFloat && (prevMop == MOP_wmulrrr || prevMop == MOP_xmulrrr)) { + return false; + } + if (!isFloat && (prevMop == MOP_xvmuld || prevMop == MOP_xvmuls)) { + return false; + } + if ((currMop == MOP_xaddrrr) || (currMop == MOP_waddrrr)) { + return true; + } + return CGOptions::IsFastMath(); +} + +void SimplifyMulArithmeticPattern::DoOptimize(BB &currBB, Insn &currInsn) { + Operand &resOpnd = currInsn.GetOperand(kInsnFirstOpnd); + Operand &opndMulOpnd1 = prevInsn->GetOperand(kInsnSecondOpnd); + Operand &opndMulOpnd2 = prevInsn->GetOperand(kInsnThirdOpnd); + bool is64Bits = (static_cast(resOpnd).GetSize() == k64BitSize); + /* may overflow */ + if ((prevInsn->GetOperand(kInsnFirstOpnd).GetSize() == k32BitSize) && is64Bits) { + return; + } + MOperator newMop = is64Bits ? curMop2NewMopTable[arithType][1] : curMop2NewMopTable[arithType][0]; + Insn *newInsn = nullptr; + if (arithType == kNeg || arithType == kFNeg) { + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, resOpnd, opndMulOpnd1, opndMulOpnd2)); + } else { + Operand &opnd3 = (validOpndIdx == kInsnSecondOpnd) ? currInsn.GetOperand(kInsnThirdOpnd) : + currInsn.GetOperand(kInsnSecondOpnd); + newInsn = &(cgFunc->GetInsnBuilder()->BuildInsn(newMop, resOpnd, opndMulOpnd1, opndMulOpnd2, opnd3)); + } + CHECK_FATAL(newInsn != nullptr, "must create newInsn"); + currBB.ReplaceInsn(currInsn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(currInsn, *newInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &currInsn, newInsn); + } +} + +void SimplifyMulArithmeticPattern::Run(BB &bb, Insn &insn) { + SetArithType(insn); + if (arithType == kAdd || arithType == kFAdd) { + validOpndIdx = kInsnSecondOpnd; + if (CheckCondition(insn)) { + DoOptimize(bb, insn); + return; + } else { + validOpndIdx = kInsnThirdOpnd; + } + } + if (!CheckCondition(insn)) { + return; + } + DoOptimize(bb, insn); +} + +void ElimSpecificExtensionPattern::SetSpecificExtType(const Insn &currInsn) { + MOperator mOp = currInsn.GetMachineOpcode(); + switch (mOp) { + case MOP_xsxtb32: { + is64Bits = false; + extTypeIdx = SXTB; + break; + } + case MOP_xsxtb64: { + is64Bits = true; + extTypeIdx = SXTB; + break; + } + case MOP_xsxth32: { + is64Bits = false; + extTypeIdx = SXTH; + break; + } + case MOP_xsxth64: { + is64Bits = true; + extTypeIdx = SXTH; + break; + } + case MOP_xsxtw64: { + is64Bits = true; + extTypeIdx = SXTW; + break; + } + case MOP_xuxtb32: { + is64Bits = false; + extTypeIdx = UXTB; + break; + } + case MOP_xuxth32: { + is64Bits = false; + extTypeIdx = UXTH; + break; + } + case MOP_xuxtw64: { + is64Bits = true; + extTypeIdx = UXTW; + break; + } + default: { + extTypeIdx = EXTUNDEF; + } + } +} + +void ElimSpecificExtensionPattern::SetOptSceneType() { + if (prevInsn->IsCall()) { + sceneType = kSceneMov; + return; + } + MOperator preMop = prevInsn->GetMachineOpcode(); + switch (preMop) { + case MOP_wldr: + case MOP_wldrb: + case MOP_wldrsb: + case MOP_wldrh: + case MOP_wldrsh: + case MOP_xldrsw: { + sceneType = kSceneLoad; + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + sceneType = kSceneMov; + break; + } + case MOP_xsxtb32: + case MOP_xsxtb64: + case MOP_xsxth32: + case MOP_xsxth64: + case MOP_xsxtw64: + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + sceneType = kSceneSameExt; + break; + } + default: { + sceneType = kSceneUndef; + } + } +} + +void ElimSpecificExtensionPattern::ReplaceExtWithMov(Insn &currInsn) { + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(currInsn.GetOperand(kInsnFirstOpnd)); + MOperator newMop = is64Bits ? MOP_xmovrr : MOP_wmovrr; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, currDstOpnd, prevDstOpnd); + currBB->ReplaceInsn(currInsn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(currInsn, newInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &currInsn, &newInsn); + } +} + +void ElimSpecificExtensionPattern::ElimExtensionAfterMov(Insn &insn) { + if (&insn == currBB->GetFirstInsn()) { + return; + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &currSrcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (prevDstOpnd.GetSize() != currDstOpnd.GetSize()) { + return; + } + MOperator currMop = insn.GetMachineOpcode(); + /* example 2) [mov w0, R0] is return value of call and return size is not of range */ + if (prevInsn->IsCall() && (currSrcOpnd.GetRegisterNumber() == R0 || currSrcOpnd.GetRegisterNumber() == V0) && + currDstOpnd.GetRegisterNumber() == currSrcOpnd.GetRegisterNumber()) { + uint32 retSize = prevInsn->GetRetSize(); + if (retSize > 0 && + ((currMop == MOP_xuxtb32 && retSize <= k1ByteSize) || + (currMop == MOP_xuxth32 && retSize <= k2ByteSize) || + (currMop == MOP_xuxtw64 && retSize <= k4ByteSize))) { + ReplaceExtWithMov(insn); + } + return; + } + if (prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned()) { + return; + } + auto &immMovOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + int64 value = immMovOpnd.GetValue(); + uint64 minRange = extValueRangeTable[extTypeIdx][0]; + uint64 maxRange = extValueRangeTable[extTypeIdx][1]; + if (currMop == MOP_xsxtb32 || currMop == MOP_xsxth32) { + /* value should be in valid range */ + if (static_cast(value) >= minRange && static_cast(value) <= maxRange && + immMovOpnd.IsSingleInstructionMovable(currDstOpnd.GetSize())) { + ReplaceExtWithMov(insn); + } + } else if (currMop == MOP_xuxtb32 || currMop == MOP_xuxth32) { + if ((static_cast(value) & minRange) == 0) { + ReplaceExtWithMov(insn); + } + } else if (currMop == MOP_xuxtw64) { + ReplaceExtWithMov(insn); + } else { + /* MOP_xsxtb64 & MOP_xsxth64 & MOP_xsxtw64 */ + if ((static_cast(value) & minRange) == 0 && immMovOpnd.IsSingleInstructionMovable(currDstOpnd.GetSize())) { + ReplaceExtWithMov(insn); + } + } +} + +bool ElimSpecificExtensionPattern::IsValidLoadExtPattern(MOperator oldMop, MOperator newMop) const { + if (oldMop == newMop) { + return true; + } + auto *aarFunc = static_cast(cgFunc); + auto *memOpnd = static_cast(prevInsn->GetMemOpnd()); + ASSERT(!prevInsn->IsStorePair(), "do not do ElimSpecificExtensionPattern for str pair"); + ASSERT(!prevInsn->IsLoadPair(), "do not do ElimSpecificExtensionPattern for ldr pair"); + if (memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi && + !aarFunc->IsOperandImmValid(newMop, memOpnd, kInsnSecondOpnd)) { + return false; + } + uint32 shiftAmount = memOpnd->ShiftAmount(); + if (shiftAmount == 0) { + return true; + } + const InsnDesc *md = &AArch64CG::kMd[newMop]; + uint32 memSize = md->GetOperandSize() / k8BitSize; + uint32 validShiftAmount = ((memSize == k8BitSize) ? k3BitSize : ((memSize == k4BitSize) ? k2BitSize : + ((memSize == k2BitSize) ? k1BitSize : k0BitSize))); + if (shiftAmount != validShiftAmount) { + return false; + } + return true; +} + +MOperator ElimSpecificExtensionPattern::SelectNewLoadMopByBitSize(MOperator lowBitMop) const { + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + switch (lowBitMop) { + case MOP_wldrsb: { + prevDstOpnd.SetSize(k64BitSize); + return MOP_xldrsb; + } + case MOP_wldrsh: { + prevDstOpnd.SetSize(k64BitSize); + return MOP_xldrsh; + } + default: + break; + } + return lowBitMop; +} + +void ElimSpecificExtensionPattern::ElimExtensionAfterLoad(Insn &insn) { + if (extTypeIdx == EXTUNDEF) { + return; + } + MOperator prevOrigMop = prevInsn->GetMachineOpcode(); + for (uint8 i = 0; i < kPrevLoadPatternNum; i++) { + ASSERT(extTypeIdx < SETS, "extTypeIdx must be lower than SETS"); + if (prevOrigMop != loadMappingTable[extTypeIdx][i][0]) { + continue; + } + MOperator prevNewMop = loadMappingTable[extTypeIdx][i][1]; + if (!IsValidLoadExtPattern(prevOrigMop, prevNewMop)) { + return; + } + if (is64Bits && extTypeIdx >= SXTB && extTypeIdx <= SXTW) { + prevNewMop = SelectNewLoadMopByBitSize(prevNewMop); + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + /* to avoid {mov [64], [32]} in the case of big endian */ + if (prevDstOpnd.GetSize() != currDstOpnd.GetSize()) { + return; + } + + auto *newMemOp = + GetOrCreateMemOperandForNewMOP(*cgFunc, *prevInsn, prevNewMop); + + if (newMemOp == nullptr) { + return; + } + + auto *aarCGSSAInfo = static_cast(ssaInfo); + if (CG_PEEP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In " << GetPatternName() << " : <<<<<<<\n"; + if (prevOrigMop != prevNewMop) { + LogInfo::MapleLogger() << "======= OrigPrevInsn : \n"; + prevInsn->Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(*prevInsn); + } + } + + prevInsn->SetMemOpnd(newMemOp); + prevInsn->SetMOP(AArch64CG::kMd[prevNewMop]); + + if ((prevOrigMop != prevNewMop) && CG_PEEP_DUMP) { + LogInfo::MapleLogger() << "======= NewPrevInsn : \n"; + prevInsn->Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(*prevInsn); + } + + MOperator movMop = is64Bits ? MOP_xmovrr : MOP_wmovrr; + Insn &newMovInsn = cgFunc->GetInsnBuilder()->BuildInsn(movMop, insn.GetOperand(kInsnFirstOpnd), + prevInsn->GetOperand(kInsnFirstOpnd)); + currBB->ReplaceInsn(insn, newMovInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newMovInsn); + optSuccess = true; + /* dump pattern info */ + if (CG_PEEP_DUMP) { + LogInfo::MapleLogger() << "======= ReplacedInsn :\n"; + insn.Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(insn); + LogInfo::MapleLogger() << "======= NewInsn :\n"; + newMovInsn.Dump(); + aarCGSSAInfo->DumpInsnInSSAForm(newMovInsn); + } + } +} + +void ElimSpecificExtensionPattern::ElimExtensionAfterSameExt(Insn &insn) { + if (extTypeIdx == EXTUNDEF) { + return; + } + auto &prevDstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + auto &currDstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (prevDstOpnd.GetSize() != currDstOpnd.GetSize()) { + return; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator currMop = insn.GetMachineOpcode(); + for (uint8 i = 0; i < kSameExtPatternNum; i++) { + ASSERT(extTypeIdx < SETS, "extTypeIdx must be lower than SETS"); + if (sameExtMappingTable[extTypeIdx][i][0] == MOP_undef || sameExtMappingTable[extTypeIdx][i][1] == MOP_undef) { + continue; + } + if (prevMop == sameExtMappingTable[extTypeIdx][i][0] && currMop == sameExtMappingTable[extTypeIdx][i][1]) { + ReplaceExtWithMov(insn); + } + } +} + +bool ElimSpecificExtensionPattern::CheckCondition(Insn &insn) { + auto &useReg = static_cast(insn.GetOperand(kInsnSecondOpnd)); + prevInsn = ssaInfo->GetDefInsn(useReg); + InsnSet useInsns = GetAllUseInsn(useReg); + if ((prevInsn == nullptr) || (useInsns.size() != 1)) { + return false; + } + SetOptSceneType(); + SetSpecificExtType(insn); + if (sceneType == kSceneUndef) { + return false; + } + return true; +} + +void ElimSpecificExtensionPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + if (sceneType == kSceneMov) { + ElimExtensionAfterMov(insn); + } else if (sceneType == kSceneLoad) { + ElimExtensionAfterLoad(insn); + } else if (sceneType == kSceneSameExt) { + ElimExtensionAfterSameExt(insn); + } +} + +void OneHoleBranchPattern::FindNewMop(const BB &bb, const Insn &insn) { + if (&insn != bb.GetLastInsn()) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_wcbz: + newOp = MOP_wtbnz; + break; + case MOP_wcbnz: + newOp = MOP_wtbz; + break; + case MOP_xcbz: + newOp = MOP_xtbnz; + break; + case MOP_xcbnz: + newOp = MOP_xtbz; + break; + default: + break; + } +} + +/* + * pattern1: + * uxtb w0, w1 <-----(ValidBitsNum <= 8) + * cbz w0, .label + * ===> + * cbz w1, .label + * + * pattern2: + * uxtb w2, w1 <-----(ValidBitsNum == 1) + * eor w3, w2, #1 + * cbz w3, .label + * ===> + * tbnz w1, #0, .label + */ +void OneHoleBranchPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + LabelOperand &label = static_cast(insn.GetOperand(kInsnSecondOpnd)); + bool pattern1 = (prevInsn->GetMachineOpcode() == MOP_xuxtb32) && + (static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() <= k8BitSize || + static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetValidBitsNum() <= k8BitSize); + if (pattern1) { + Insn &newCbzInsn = cgFunc->GetInsnBuilder()->BuildInsn( + insn.GetMachineOpcode(), prevInsn->GetOperand(kInsnSecondOpnd), label); + bb.ReplaceInsn(insn, newCbzInsn); + ssaInfo->ReplaceInsn(insn, newCbzInsn); + optSuccess = true; + SetCurrInsn(&newCbzInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + DumpAfterPattern(prevs, &newCbzInsn, nullptr); + } + return; + } + bool pattern2 = (prevInsn->GetMachineOpcode() == MOP_xeorrri13 || prevInsn->GetMachineOpcode() == MOP_weorrri12) && + (static_cast(prevInsn->GetOperand(kInsnThirdOpnd)).GetValue() == 1); + if (pattern2) { + if (!CheckPrePrevInsn()) { + return; + } + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + ImmOperand &oneHoleOpnd = aarch64CGFunc->CreateImmOperand(0, k8BitSize, false); + auto ®Operand = static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)); + Insn &newTbzInsn = cgFunc->GetInsnBuilder()->BuildInsn(newOp, regOperand, oneHoleOpnd, label); + bb.ReplaceInsn(insn, newTbzInsn); + ssaInfo->ReplaceInsn(insn, newTbzInsn); + optSuccess = true; + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(prevInsn); + prevs.emplace_back(prePrevInsn); + DumpAfterPattern(prevs, &newTbzInsn, nullptr); + } + } +} + +bool OneHoleBranchPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbz && curMop != MOP_xcbz && curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + FindNewMop(*insn.GetBB(), insn); + if (newOp == MOP_undef) { + return false; + } + auto &useReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevInsn = ssaInfo->GetDefInsn(useReg); + if (prevInsn == nullptr) { + return false; + } + if (&(prevInsn->GetOperand(kInsnFirstOpnd)) != &(insn.GetOperand(kInsnFirstOpnd))) { + return false; + } + return true; +} + +bool OneHoleBranchPattern::CheckPrePrevInsn() { + auto &useReg = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + prePrevInsn = ssaInfo->GetDefInsn(useReg); + if (prePrevInsn == nullptr) { + return false; + } + if (prePrevInsn->GetMachineOpcode() != MOP_xuxtb32 || + static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() != 1) { + return false; + } + if (&(prePrevInsn->GetOperand(kInsnFirstOpnd)) != &(prevInsn->GetOperand(kInsnSecondOpnd))) { + return false; + } + return true; +} + +void OrrToMovPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + RegOperand *reg1 = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *reg1, *reg2); + bb.ReplaceInsn(insn, newInsn); + ssaInfo->ReplaceInsn(insn, newInsn); + optSuccess = true; + SetCurrInsn(&newInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &newInsn, nullptr); + } +} + +bool OrrToMovPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wiorrri12 && curMop != MOP_xiorrri13) { + return false; + } + MOperator thisMop = insn.GetMachineOpcode(); + Operand *opndOfOrr = nullptr; + switch (thisMop) { + case MOP_wiorrri12: { /* opnd1 is reg32 and opnd3 is immediate. */ + opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + newMop = MOP_wmovrr; + break; + } + case MOP_xiorrri13: { /* opnd1 is reg64 and opnd3 is immediate. */ + opndOfOrr = &(insn.GetOperand(kInsnThirdOpnd)); + reg2 = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + newMop = MOP_xmovrr; + break; + } + default: + return false; + } + CHECK_FATAL(opndOfOrr->IsIntImmediate(), "expects immediate operand"); + ImmOperand *immOpnd = static_cast(opndOfOrr); + if (immOpnd->GetValue() != 0) { + return false; + } + return true; +} + +void AArch64CGPeepHole::DoNormalOptimize(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + manager = peepMemPool->New(*cgFunc, bb, insn); + switch (thisMop) { + /* + * e.g. + * execute before & after RA: manager->NormalPatternOpt<>(true) + * execute before RA: manager->NormalPatternOpt<>(!cgFunc->IsAfterRegAlloc()) + * execute after RA: manager->NormalPatternOpt<>(cgFunc->IsAfterRegAlloc()) + */ + case MOP_xubfxrri6i6: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xmovzri16: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wcmpri: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wcmprr: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wmovrr: + case MOP_xmovrr: + case MOP_xvmovs: + case MOP_xvmovd: + case MOP_vmovuu: + case MOP_vmovvv: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wstrb: + case MOP_wldrb: + case MOP_wstrh: + case MOP_wldrh: + case MOP_xldr: + case MOP_xstr: + case MOP_wldr: + case MOP_wstr: + case MOP_dldr: + case MOP_dstr: + case MOP_sldr: + case MOP_sstr: + case MOP_qldr: + case MOP_qstr: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xvmovrv: + case MOP_xvmovrd: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xsbfxrri6i6: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xsxtb32: + case MOP_xsxth32: + case MOP_xsxtb64: + case MOP_xsxth64: + case MOP_xsxtw64: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_wsdivrrr: { + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + break; + } + case MOP_xbl: { + if (JAVALANG) { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + manager->NormalPatternOpt(cgFunc->IsAfterRegAlloc()); + } + if (CGOptions::IsGCOnly() && CGOptions::DoWriteRefFieldOpt()) { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + } + break; + } + default: + break; + } + /* skip if it is not a read barrier call. */ + if (GetReadBarrierName(insn) != "") { + manager->NormalPatternOpt(!cgFunc->IsAfterRegAlloc()); + } +} +/* ======== CGPeepPattern End ======== */ + +void AArch64PeepHole::InitOpts() { + optimizations.resize(kPeepholeOptsNum); + optimizations[kEliminateSpecifcUXTOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCmpBranchesToCsetOpt] = optOwnMemPool->New(cgFunc); + optimizations[kAndCbzBranchesToTstOpt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PeepHole::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xuxtw64: { + (static_cast(optimizations[kEliminateSpecifcUXTOpt]))->Run(bb, insn); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + (static_cast(optimizations[kAndCmpBranchesToCsetOpt]))->Run(bb, insn); + break; + } + case MOP_xandrrr: + case MOP_wandrrr: + case MOP_wandrri12: + case MOP_xandrri13: { + (static_cast(optimizations[kAndCbzBranchesToTstOpt]))->Run(bb, insn); + break; + } + default: + break; + } +} + +void AArch64PeepHole0::InitOpts() { + optimizations.resize(kPeepholeOptsNum); + optimizations[kDeleteMovAfterCbzOrCbnzOpt] = optOwnMemPool->New(cgFunc); + optimizations[kRemoveMovingtoSameRegOpt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PeepHole0::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: { + (static_cast(optimizations[kDeleteMovAfterCbzOrCbnzOpt]))->Run(bb, insn); + break; + } + case MOP_wmovrr: + case MOP_xmovrr: + case MOP_xvmovs: + case MOP_xvmovd: + case MOP_vmovuu: + case MOP_vmovvv: { + (static_cast(optimizations[kRemoveMovingtoSameRegOpt]))->Run(bb, insn); + break; + } + default: + break; + } +} + +void AArch64PrePeepHole::InitOpts() { + optimizations.resize(kPeepholeOptsNum); + optimizations[kReplaceCmpToCmnOpt] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandOpt] = optOwnMemPool->New(cgFunc); + optimizations[kComplexMemOperandPreOptAdd] = optOwnMemPool->New(cgFunc); + optimizations[kEnhanceStrLdrAArch64Opt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PrePeepHole::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_wmovri32: + case MOP_xmovri64: { + (static_cast(optimizations[kReplaceCmpToCmnOpt]))->Run(bb, insn); + break; + } + case MOP_xadrpl12: { + (static_cast(optimizations[kComplexMemOperandOpt]))->Run(bb, insn); + break; + } + case MOP_xaddrrr: { + (static_cast(optimizations[kComplexMemOperandPreOptAdd]))->Run(bb, insn); + break; + } + case MOP_xldr: + case MOP_xstr: + case MOP_wldr: + case MOP_wstr: + case MOP_dldr: + case MOP_dstr: + case MOP_sldr: + case MOP_sstr: { + (static_cast(optimizations[kEnhanceStrLdrAArch64Opt]))->Run(bb, insn); + break; + } + default: + break; + } +} + +void AArch64PrePeepHole1::InitOpts() { + optimizations.resize(kPeepholeOptsNum); + optimizations[kComplexExtendWordLslOpt] = optOwnMemPool->New(cgFunc); +} + +void AArch64PrePeepHole1::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + switch (thisMop) { + case MOP_xsxtw64: + case MOP_xuxtw64: { + (static_cast(optimizations[kComplexExtendWordLslOpt]))->Run(bb, insn); + break; + } + default: + break; + } +} + +bool RemoveIdenticalLoadAndStorePattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + return true; +} + +void RemoveIdenticalLoadAndStorePattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator mop1 = insn.GetMachineOpcode(); + MOperator mop2 = nextInsn->GetMachineOpcode(); + if ((mop1 == MOP_wstr && mop2 == MOP_wstr) || (mop1 == MOP_xstr && mop2 == MOP_xstr)) { + if (IsMemOperandsIdentical(insn, *nextInsn)) { + bb.RemoveInsn(insn); + } + } else if ((mop1 == MOP_wstr && mop2 == MOP_wldr) || (mop1 == MOP_xstr && mop2 == MOP_xldr)) { + if (IsMemOperandsIdentical(insn, *nextInsn)) { + bb.RemoveInsn(*nextInsn); + } + } +} + +bool RemoveIdenticalLoadAndStorePattern::IsMemOperandsIdentical(const Insn &insn1, const Insn &insn2) const { + regno_t regNO1 = static_cast(insn1.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + regno_t regNO2 = static_cast(insn2.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (regNO1 != regNO2) { + return false; + } + /* Match only [base + offset] */ + auto &memOpnd1 = static_cast(insn1.GetOperand(kInsnSecondOpnd)); + if (memOpnd1.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + return false; + } + auto &memOpnd2 = static_cast(insn2.GetOperand(kInsnSecondOpnd)); + if (memOpnd2.GetAddrMode() != MemOperand::kAddrModeBOi || !memOpnd1.IsIntactIndexed()) { + return false; + } + Operand *base1 = memOpnd1.GetBaseRegister(); + Operand *base2 = memOpnd2.GetBaseRegister(); + if (!((base1 != nullptr) && base1->IsRegister()) || !((base2 != nullptr) && base2->IsRegister())) { + return false; + } + + regno_t baseRegNO1 = static_cast(base1)->GetRegisterNumber(); + /* First insn re-write base addr reg1 <- [ reg1 + offset ] */ + if (baseRegNO1 == regNO1) { + return false; + } + + regno_t baseRegNO2 = static_cast(base2)->GetRegisterNumber(); + if (baseRegNO1 != baseRegNO2) { + return false; + } + + return memOpnd1.GetOffsetImmediate()->GetOffsetValue() == memOpnd2.GetOffsetImmediate()->GetOffsetValue(); +} + +bool RemoveMovingtoSameRegPattern::CheckCondition(Insn &insn) { + ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + return true; + } + return false; +} + +void RemoveMovingtoSameRegPattern::Run(BB &bb, Insn &insn) { + /* remove mov x0,x0 when it cast i32 to i64 */ + if (CheckCondition(insn)) { + bb.RemoveInsn(insn); + } +} + +void RemoveMovingtoSameRegAArch64::Run(BB &bb, Insn &insn) { + ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + bb.RemoveInsn(insn); + } +} + +void EnhanceStrLdrAArch64::Run(BB &bb, Insn &insn) { + Insn *prevInsn = insn.GetPrev(); + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + + if (prevInsn == nullptr) { + return; + } + Operand &memOpnd = insn.GetOperand(kInsnSecondOpnd); + CHECK_FATAL(memOpnd.GetKind() == Operand::kOpdMem, "Unexpected operand in EnhanceStrLdrAArch64"); + auto &a64MemOpnd = static_cast(memOpnd); + RegOperand *baseOpnd = a64MemOpnd.GetBaseRegister(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (IsEnhanceAddImm(prevMop) && a64MemOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + a64MemOpnd.GetOffsetImmediate()->GetValue() == 0) { + auto &addDestOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (baseOpnd == &addDestOpnd && !IfOperandIsLiveAfterInsn(addDestOpnd, insn)) { + auto &concreteMemOpnd = static_cast(memOpnd); + auto *origBaseReg = concreteMemOpnd.GetBaseRegister(); + concreteMemOpnd.SetBaseRegister( + static_cast(prevInsn->GetOperand(kInsnSecondOpnd))); + auto &ofstOpnd = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + OfstOperand &offOpnd = static_cast(cgFunc).CreateOfstOpnd( + static_cast(ofstOpnd.GetValue()), k32BitSize); + offOpnd.SetVary(ofstOpnd.GetVary()); + auto *origOffOpnd = concreteMemOpnd.GetOffsetImmediate(); + concreteMemOpnd.SetOffsetOperand(offOpnd); + if (!static_cast(cgFunc).IsOperandImmValid(insn.GetMachineOpcode(), &memOpnd, kInsnSecondOpnd)) { + // If new offset is invalid, undo it + concreteMemOpnd.SetBaseRegister(*static_cast(origBaseReg)); + concreteMemOpnd.SetOffsetOperand(*origOffOpnd); + return; + } + bb.RemoveInsn(*prevInsn); + } + } +} + +bool EnhanceStrLdrAArch64::IsEnhanceAddImm(MOperator prevMop) const { + return prevMop == MOP_xaddrri12 || prevMop == MOP_waddrri12; +} + +bool IsSameRegisterOperation(const RegOperand &desMovOpnd, + const RegOperand &uxtDestOpnd, + const RegOperand &uxtFromOpnd) { + return ((desMovOpnd.GetRegisterNumber() == uxtDestOpnd.GetRegisterNumber()) && + (uxtDestOpnd.GetRegisterNumber() == uxtFromOpnd.GetRegisterNumber())); +} + +bool CombineContiLoadAndStorePattern::IsRegNotSameMemUseInInsn(const Insn &insn, regno_t regNO, bool isStore, + int64 baseOfst) const { + uint32 opndNum = insn.GetOperandSize(); + bool sameMemAccess = false; /* both store or load */ + if (insn.IsStore() == isStore) { + sameMemAccess = true; + } + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto &listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + return true; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOperand = static_cast(opnd); + RegOperand *base = memOperand.GetBaseRegister(); + /* need check offset as well */ + regno_t stackBaseRegNO = cgFunc->UseFP() ? R29 : RSP; + if (!sameMemAccess && base != nullptr) { + regno_t curBaseRegNO = base->GetRegisterNumber(); + int64 memBarrierRange = static_cast(insn.IsLoadStorePair() ? k16BitSize : k8BitSize); + if (!(curBaseRegNO == regNO && memOperand.GetAddrMode() == MemOperand::kAddrModeBOi && + memOperand.GetOffsetImmediate() != nullptr && + (memOperand.GetOffsetImmediate()->GetOffsetValue() <= (baseOfst - memBarrierRange) || + memOperand.GetOffsetImmediate()->GetOffsetValue() >= (baseOfst + memBarrierRange)))) { + return true; + } + } + /* do not trust the following situation : + * str x1, [x9] + * str x6, [x2] + * str x3, [x9, #8] + */ + if (isStore && regNO != stackBaseRegNO && base != nullptr && + base->GetRegisterNumber() != stackBaseRegNO && base->GetRegisterNumber() != regNO) { + return true; + } + if (isStore && base != nullptr && base->GetRegisterNumber() == regNO) { + if (memOperand.GetAddrMode() == MemOperand::kAddrModeBOi && memOperand.GetOffsetImmediate() != nullptr) { + int64 curOffset = memOperand.GetOffsetImmediate()->GetOffsetValue(); + if (memOperand.GetSize() == k64BitSize) { + uint32 memBarrierRange = insn.IsLoadStorePair() ? k16BitSize : k8BitSize; + if (curOffset < baseOfst + memBarrierRange && curOffset > baseOfst - static_cast(memBarrierRange)) { + return true; + } + } else if (memOperand.GetSize() == k32BitSize) { + uint32 memBarrierRange = insn.IsLoadStorePair() ? k8BitSize : k4BitSize; + if (curOffset < baseOfst + memBarrierRange && curOffset > baseOfst - memBarrierRange) { + return true; + } + } + } + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + return true; + } + } else if (opnd.IsRegister()) { + if (!isStore && static_cast(opnd).GetRegisterNumber() == regNO) { + return true; + } + } + } + return false; +} + +std::vector CombineContiLoadAndStorePattern::FindPrevStrLdr(Insn &insn, regno_t destRegNO, + regno_t memBaseRegNO, int64 baseOfst) const { + std::vector prevContiInsns; + bool isStr = insn.IsStore(); + for (Insn *curInsn = insn.GetPrev(); curInsn != nullptr; curInsn = curInsn->GetPrev()) { + if (!curInsn->IsMachineInstruction()) { + continue; + } + if (curInsn->IsRegDefined(memBaseRegNO)) { + return prevContiInsns; + } + if (IsRegNotSameMemUseInInsn(*curInsn, memBaseRegNO, insn.IsStore(), static_cast(baseOfst))) { + return prevContiInsns; + } + /* return continuous STD/LDR insn */ + if (((isStr && curInsn->IsStore()) || (!isStr && curInsn->IsLoad())) && !curInsn->IsLoadStorePair()) { + auto *memOperand = static_cast(curInsn->GetMemOpnd()); + /* do not combine ldr r0, label */ + if (memOperand != nullptr) { + auto *baseRegOpnd = static_cast(memOperand->GetBaseRegister()); + ASSERT(baseRegOpnd == nullptr || !baseRegOpnd->IsVirtualRegister(), + "physical register has not been allocated?"); + if (memOperand->GetAddrMode() == MemOperand::kAddrModeBOi && + baseRegOpnd->GetRegisterNumber() == memBaseRegNO) { + prevContiInsns.emplace_back(curInsn); + } + } + } + /* check insn that changes the data flow */ + regno_t stackBaseRegNO = cgFunc->UseFP() ? R29 : RSP; + /* ldr x8, [x21, #8] + * call foo() + * ldr x9, [x21, #16] + * although x21 is a calleeSave register, there is no guarantee data in memory [x21] is not changed + */ + if (curInsn->IsCall() && (!AArch64Abi::IsCalleeSavedReg(static_cast(destRegNO)) || + memBaseRegNO != stackBaseRegNO)) { + return prevContiInsns; + } + /* store opt should not cross call due to stack args */ + if (curInsn->IsCall() && isStr) { + return prevContiInsns; + } + if (curInsn->GetMachineOpcode() == MOP_asm) { + return prevContiInsns; + } + if (curInsn->ScanReg(destRegNO)) { + return prevContiInsns; + } + } + return prevContiInsns; +} + +Insn *CombineContiLoadAndStorePattern::FindValidSplitAddInsn(Insn &combineInsn, const RegOperand &baseOpnd) const { + Insn *splitAdd = nullptr; + for (Insn *cursor = combineInsn.GetPrev(); cursor != nullptr; cursor = cursor->GetPrev()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + if (cursor->IsCall()) { + break; + } + if (cursor->IsRegDefined(baseOpnd.GetRegisterNumber())) { + break; + } + MOperator mOp = cursor->GetMachineOpcode(); + if (mOp != MOP_xaddrri12 && mOp != MOP_waddrri12) { + continue; + } + auto &destOpnd = static_cast(cursor->GetOperand(kInsnFirstOpnd)); + if (destOpnd.GetRegisterNumber() != R16 || destOpnd.GetSize() != baseOpnd.GetSize()) { + continue; + } + auto &useOpnd = static_cast(cursor->GetOperand(kInsnSecondOpnd)); + /* + * split add as following: + * add R16, R0, #2, LSL #12 + * add R16, R16, #1536 + */ + if (useOpnd.GetRegisterNumber() != baseOpnd.GetRegisterNumber()) { + if (useOpnd.GetRegisterNumber() == R16) { + Insn *defInsn = cursor->GetPrev(); + CHECK_FATAL(defInsn != nullptr, "invalid defInsn"); + CHECK_FATAL(defInsn->GetMachineOpcode() == MOP_xaddrri24 || defInsn->GetMachineOpcode() == MOP_waddrri24, + "split with wrong add"); + auto &opnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + if (opnd.GetRegisterNumber() == baseOpnd.GetRegisterNumber()) { + splitAdd = cursor; + } + } + break; + } else { + splitAdd = cursor; + break; + } + } + return splitAdd; +} + +bool CombineContiLoadAndStorePattern::FindTmpRegOnlyUseAfterCombineInsn(const Insn &curInsn) const { + /* + * avoid the case as following: + * add R16, R20, #28672 + * stp R1, R2, [R16, #408] + * ldr R1, [R0, #14032] add R16, R0, #3, LSL #12 + * ====> add R16, R16, #1536 (this r16 will clobber use of ldp R2, R3) + * ldp R1, R0, [R16, #208] + * ldp R2, R3, [R16, #424] + * ldr R0, [R0, #14036] + */ + for (Insn *cursor = curInsn.GetNext(); cursor != nullptr; cursor = cursor->GetNext()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + MOperator mOp = cursor->GetMachineOpcode(); + if (mOp == MOP_xaddrri12 || mOp == MOP_waddrri12 || mOp == MOP_xaddrri24 || mOp == MOP_waddrri24) { + auto &destOpnd = static_cast(cursor->GetOperand(kInsnFirstOpnd)); + if (destOpnd.GetRegisterNumber() == R16) { + return false; + } + } + if (!cursor->IsLoad() && !cursor->IsStore() && !cursor->IsLoadStorePair()) { + continue; + } + const InsnDesc *md = &AArch64CG::kMd[cursor->GetMachineOpcode()]; + if (cursor->IsLoadLabel() || md->IsLoadAddress()) { + continue; + } + uint32 memIdx = (cursor->IsLoadStorePair() ? kInsnThirdOpnd : kInsnSecondOpnd); + auto &curMemOpnd = static_cast(cursor->GetOperand(memIdx)); + RegOperand *baseOpnd = curMemOpnd.GetBaseRegister(); + if (baseOpnd != nullptr && baseOpnd->GetRegisterNumber() == R16) { + return true; + } + } + return false; +} + +bool CombineContiLoadAndStorePattern::PlaceSplitAddInsn(const Insn &curInsn, Insn &combineInsn, + const MemOperand &memOperand, + RegOperand &baseOpnd, uint32 bitLen) const { + Insn *cursor = nullptr; + MemOperand *maxOfstMem = nullptr; + int64 maxOfstVal = 0; + MOperator mop = curInsn.GetMachineOpcode(); + OfstOperand *ofstOpnd = memOperand.GetOffsetImmediate(); + int64 ofstVal = ofstOpnd->GetOffsetValue(); + auto &aarFunc = static_cast(*cgFunc); + for (cursor = curInsn.GetNext(); cursor != nullptr; cursor = cursor->GetNext()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + if (cursor->GetMachineOpcode() == mop && (cursor->IsLoad() || cursor->IsStore())) { + auto &curMemOpnd = static_cast(cursor->GetOperand(kInsnSecondOpnd)); + RegOperand *curBaseOpnd = curMemOpnd.GetBaseRegister(); + if (curMemOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && RegOperand::IsSameReg(baseOpnd, *curBaseOpnd)) { + OfstOperand *curOfstOpnd = curMemOpnd.GetOffsetImmediate(); + CHECK_FATAL(curOfstOpnd != nullptr, "invalid OfstOperand"); + if (curOfstOpnd->GetOffsetValue() > ofstVal && + (curOfstOpnd->GetOffsetValue() - ofstVal) < MemOperand::GetMaxPairPIMM(bitLen) && + !aarFunc.IsOperandImmValid(combineInsn.GetMachineOpcode(), &curMemOpnd, kInsnThirdOpnd)) { + maxOfstMem = &curMemOpnd; + maxOfstVal = curOfstOpnd->GetOffsetValue(); + } + } + } + if (cursor->IsRegDefined(baseOpnd.GetRegisterNumber())) { + break; + } + if (cursor->IsRegDefined(R16)) { + break; + } + } + MemOperand *newMemOpnd = nullptr; + if (maxOfstMem == nullptr) { + newMemOpnd = &aarFunc.SplitOffsetWithAddInstruction(memOperand, bitLen, static_cast(R16), + false, &combineInsn, true); + } else { + RegOperand *addResOpnd = aarFunc.GetBaseRegForSplit(R16); + ImmOperand &immAddend = aarFunc.SplitAndGetRemained(*maxOfstMem, bitLen, maxOfstVal, true); + newMemOpnd = &aarFunc.CreateReplacementMemOperand(bitLen, *addResOpnd, ofstVal - immAddend.GetValue()); + if (!(aarFunc.IsOperandImmValid(combineInsn.GetMachineOpcode(), newMemOpnd, kInsnThirdOpnd))) { + newMemOpnd = &aarFunc.SplitOffsetWithAddInstruction(memOperand, bitLen, static_cast(R16), + false, &combineInsn, true); + } else { + aarFunc.SelectAddAfterInsn(*addResOpnd, baseOpnd, immAddend, PTY_i64, false, combineInsn); + } + } + if (!(aarFunc.IsOperandImmValid(combineInsn.GetMachineOpcode(), newMemOpnd, kInsnThirdOpnd))) { + return false; + } + combineInsn.SetOperand(kInsnThirdOpnd, *newMemOpnd); + return true; +} + +bool CombineContiLoadAndStorePattern::SplitOfstWithAddToCombine(const Insn &curInsn, Insn &combineInsn, + const MemOperand &memOperand) const { + auto *baseRegOpnd = static_cast(memOperand.GetBaseRegister()); + auto *ofstOpnd = static_cast(memOperand.GetOffsetImmediate()); + ASSERT(baseRegOpnd && ofstOpnd, "get baseOpnd and ofstOpnd failed"); + CHECK_FATAL(combineInsn.GetOperand(kInsnFirstOpnd).GetSize() == combineInsn.GetOperand(kInsnSecondOpnd).GetSize(), + "the size must equal"); + if (baseRegOpnd->GetRegisterNumber() == R16) { + return false; + } + Insn *splitAdd = FindValidSplitAddInsn(combineInsn, *baseRegOpnd); + const InsnDesc *md = &AArch64CG::kMd[combineInsn.GetMachineOpcode()]; + auto *opndProp = md->opndMD[kInsnFirstOpnd]; + auto &aarFunc = static_cast(*cgFunc); + if (splitAdd == nullptr) { + if (combineInsn.IsLoadStorePair() && ofstOpnd->GetOffsetValue() < 0) { + return false; /* do not split */ + } + if (FindTmpRegOnlyUseAfterCombineInsn(combineInsn)) { + return false; + } + /* create and place addInsn */ + return PlaceSplitAddInsn(curInsn, combineInsn, memOperand, *baseRegOpnd, opndProp->GetSize()); + } else { + auto &newBaseReg = static_cast(splitAdd->GetOperand(kInsnFirstOpnd)); + auto &addImmOpnd = static_cast(splitAdd->GetOperand(kInsnThirdOpnd)); + int64 addVal = 0; + if (static_cast(splitAdd->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() == R16) { + Insn *defInsn = splitAdd->GetPrev(); + CHECK_FATAL(defInsn->GetMachineOpcode() == MOP_xaddrri24 || defInsn->GetMachineOpcode() == MOP_waddrri24, + "split with wrong add"); + auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + auto &shiftOpnd = static_cast(defInsn->GetOperand(kInsnFourthOpnd)); + addVal = static_cast((static_cast(immOpnd.GetValue()) << shiftOpnd.GetShiftAmount())) + + addImmOpnd.GetValue(); + } else { + addVal = addImmOpnd.GetValue(); + } + auto *newOfstOpnd = &aarFunc.CreateOfstOpnd(static_cast(ofstOpnd->GetOffsetValue() - addVal), + ofstOpnd->GetSize()); + auto *newMemOpnd = aarFunc.CreateMemOperand(MemOperand::kAddrModeBOi, opndProp->GetSize(), + newBaseReg, nullptr, newOfstOpnd, memOperand.GetSymbol()); + if (!(static_cast(*cgFunc).IsOperandImmValid(combineInsn.GetMachineOpcode(), newMemOpnd, + kInsnThirdOpnd))) { + return PlaceSplitAddInsn(curInsn, combineInsn, memOperand, *baseRegOpnd, opndProp->GetSize()); + } + combineInsn.SetOperand(kInsnThirdOpnd, *newMemOpnd); + return true; + } +} + +bool CombineContiLoadAndStorePattern::CheckCondition(Insn &insn) { + memOpnd = static_cast(insn.GetMemOpnd()); + ASSERT(memOpnd != nullptr, "get mem operand failed"); + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return false; + } + if (!doAggressiveCombine) { + return false; + } + return true; +} + +/* Combining 2 STRs into 1 stp or 2 LDRs into 1 ldp */ +void CombineContiLoadAndStorePattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "unexpect operand"); + auto &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto *baseRegOpnd = static_cast(memOpnd->GetBaseRegister()); + OfstOperand *offsetOpnd = memOpnd->GetOffsetImmediate(); + CHECK_FATAL(offsetOpnd != nullptr, "offset opnd lost"); + ASSERT(baseRegOpnd == nullptr || !baseRegOpnd->IsVirtualRegister(), "physical register has not been allocated?"); + std::vector prevContiInsnVec = FindPrevStrLdr( + insn, destOpnd.GetRegisterNumber(), baseRegOpnd->GetRegisterNumber(), offsetOpnd->GetOffsetValue()); + for (auto prevContiInsn : prevContiInsnVec) { + ASSERT(prevContiInsn != nullptr, "get previous consecutive instructions failed"); + auto *prevMemOpnd = static_cast(prevContiInsn->GetMemOpnd()); + if (memOpnd->GetIndexOpt() != prevMemOpnd->GetIndexOpt()) { + continue; + } + OfstOperand *prevOffsetOpnd = prevMemOpnd->GetOffsetImmediate(); + CHECK_FATAL(offsetOpnd != nullptr && prevOffsetOpnd != nullptr, "both conti str/ldr have no offset"); + auto &prevDestOpnd = static_cast(prevContiInsn->GetOperand(kInsnFirstOpnd)); + uint32 memSize = insn.GetMemoryByteSize(); + uint32 prevMemSize = prevContiInsn->GetMemoryByteSize(); + if (prevDestOpnd.GetRegisterType() != destOpnd.GetRegisterType()) { + continue; + } + int64 offsetVal = offsetOpnd->GetOffsetValue(); + int64 prevOffsetVal = prevOffsetOpnd->GetOffsetValue(); + auto diffVal = std::abs(offsetVal - prevOffsetVal); + regno_t destRegNO = destOpnd.GetRegisterNumber(); + regno_t prevDestRegNO = prevDestOpnd.GetRegisterNumber(); + if (insn.IsStore() && memOpnd->IsStackArgMem() && prevMemOpnd->IsStackArgMem() && + (memSize == k4ByteSize || memSize == k8ByteSize) && diffVal == k8BitSize && + (prevMemSize == k4ByteSize || prevMemSize == k8ByteSize) && + (destOpnd.GetValidBitsNum() == memSize * k8BitSize) && + (prevDestOpnd.GetValidBitsNum() == prevMemSize * k8BitSize)) { + RegOperand &newDest = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(destRegNO), k64BitSize, destOpnd.GetRegisterType()); + RegOperand &newPrevDest = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(prevDestRegNO), k64BitSize, prevDestOpnd.GetRegisterType()); + MemOperand *combineMemOpnd = (offsetVal < prevOffsetVal) ? memOpnd : prevMemOpnd; + MOperator mopPair = (destOpnd.GetRegisterType() == kRegTyInt) ? MOP_xstp : MOP_dstp; + if ((static_cast(*cgFunc).IsOperandImmValid(mopPair, combineMemOpnd, kInsnThirdOpnd))) { + Insn &combineInsn = (offsetVal < prevOffsetVal) ? + cgFunc->GetInsnBuilder()->BuildInsn(mopPair, newDest, newPrevDest, *combineMemOpnd): + cgFunc->GetInsnBuilder()->BuildInsn(mopPair, newPrevDest, newDest, *combineMemOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } + if (memSize != prevMemSize || + thisMop != prevContiInsn->GetMachineOpcode() || prevDestOpnd.GetSize() != destOpnd.GetSize()) { + continue; + } + /* do combination str/ldr -> stp/ldp */ + if ((insn.IsStore() || destRegNO != prevDestRegNO) || (destRegNO == RZR && prevDestRegNO == RZR)) { + if ((memSize == k8ByteSize && diffVal == k8BitSize) || + (memSize == k4ByteSize && diffVal == k4BitSize) || + (memSize == k16ByteSize && diffVal == k16BitSize)) { + MOperator mopPair = GetMopPair(thisMop); + MemOperand *combineMemOpnd = (offsetVal < prevOffsetVal) ? memOpnd : prevMemOpnd; + Insn &combineInsn = (offsetVal < prevOffsetVal) ? + cgFunc->GetInsnBuilder()->BuildInsn(mopPair, destOpnd, prevDestOpnd, *combineMemOpnd) : + cgFunc->GetInsnBuilder()->BuildInsn(mopPair, prevDestOpnd, destOpnd, *combineMemOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + if (!(static_cast(*cgFunc).IsOperandImmValid(mopPair, combineMemOpnd, kInsnThirdOpnd)) && + !SplitOfstWithAddToCombine(insn, combineInsn, *combineMemOpnd)) { + bb.RemoveInsn(combineInsn); + return; + } + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } + /* do combination strb/ldrb -> strh/ldrh -> str/ldr */ + if (destRegNO == prevDestRegNO && destRegNO == RZR && prevDestRegNO == RZR) { + if ((memSize == k1ByteSize && diffVal == k1ByteSize) || (memSize == k2ByteSize && diffVal == k2ByteSize)) { + MOperator mopPair = GetMopHigherByte(thisMop); + if (offsetVal < prevOffsetVal) { + if (static_cast(*cgFunc).IsOperandImmValid(mopPair, memOpnd, kInsnSecondOpnd)) { + Insn &combineInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopPair, destOpnd, *memOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } else { + if (static_cast(*cgFunc).IsOperandImmValid(mopPair, prevMemOpnd, kInsnSecondOpnd)) { + Insn &combineInsn = cgFunc->GetInsnBuilder()->BuildInsn(mopPair, prevDestOpnd, *prevMemOpnd); + bb.InsertInsnAfter(*prevContiInsn, combineInsn); + RemoveInsnAndKeepComment(bb, insn, *prevContiInsn); + return; + } + } + } + } + } +} + +MOperator CombineContiLoadAndStorePattern::GetMopHigherByte(MOperator mop) const { + switch (mop) { + case MOP_wldrb: + return MOP_wldrh; + case MOP_wstrb: + return MOP_wstrh; + case MOP_wldrh: + return MOP_wldr; + case MOP_wstrh: + return MOP_wstr; + default: + ASSERT(false, "should not run here"); + return MOP_undef; + } +} + +void CombineContiLoadAndStorePattern::RemoveInsnAndKeepComment(BB &bb, Insn &insn, Insn &prevInsn) const { + /* keep the comment */ + Insn *nn = prevInsn.GetNextMachineInsn(); + std::string newComment = ""; + MapleString comment = insn.GetComment(); + if (comment.c_str() != nullptr && strlen(comment.c_str()) > 0) { + newComment += comment.c_str(); + } + comment = prevInsn.GetComment(); + if (comment.c_str() != nullptr && strlen(comment.c_str()) > 0) { + newComment = newComment + " " + comment.c_str(); + } + if (newComment.c_str() != nullptr && strlen(newComment.c_str()) > 0) { + ASSERT(nn != nullptr, "nn should not be nullptr"); + nn->SetComment(newComment); + } + bb.RemoveInsn(insn); + bb.RemoveInsn(prevInsn); +} + +bool EliminateSpecifcSXTPattern::CheckCondition(Insn &insn) { + BB *bb = insn.GetBB(); + if (bb->GetFirstMachineInsn() == &insn) { + BB *prevBB = bb->GetPrev(); + if (prevBB != nullptr && (bb->GetPreds().size() == 1) && (*(bb->GetPreds().cbegin()) == prevBB)) { + prevInsn = prevBB->GetLastMachineInsn(); + } + } else { + prevInsn = insn.GetPreviousMachineInsn(); + } + if (prevInsn == nullptr) { + return false; + } + return true; +} + +void EliminateSpecifcSXTPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®Opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (&insn != bb.GetFirstInsn() && regOpnd0.GetRegisterNumber() == regOpnd1.GetRegisterNumber() && + prevInsn->IsMachineInstruction()) { + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstMovOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + Operand &opnd = prevInsn->GetOperand(kInsnSecondOpnd); + if (opnd.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd); + int64 value = immOpnd.GetValue(); + if (thisMop == MOP_xsxtb32) { + /* value should in range between -127 and 127 */ + if (value >= static_cast(0xFFFFFFFFFFFFFF80) && value <= 0x7F && + immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { + bb.RemoveInsn(insn); + } + } else if (thisMop == MOP_xsxth32) { + /* value should in range between -32678 and 32678 */ + if (value >= static_cast(0xFFFFFFFFFFFF8000) && value <= 0x7FFF && + immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { + bb.RemoveInsn(insn); + } + } else { + uint64 flag = 0xFFFFFFFFFFFFFF80; /* initialize the flag with fifty-nine 1s at top */ + if (thisMop == MOP_xsxth64) { + flag = 0xFFFFFFFFFFFF8000; /* specify the flag with forty-nine 1s at top in this case */ + } else if (thisMop == MOP_xsxtw64) { + flag = 0xFFFFFFFF80000000; /* specify the flag with thirty-three 1s at top in this case */ + } + if ((static_cast(value) & flag) == 0 && immOpnd.IsSingleInstructionMovable(regOpnd0.GetSize())) { + auto *aarch64CGFunc = static_cast(cgFunc); + RegOperand &dstOpnd = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(dstMovOpnd.GetRegisterNumber()), k64BitSize, dstMovOpnd.GetRegisterType()); + prevInsn->SetOperand(kInsnFirstOpnd, dstOpnd); + prevInsn->SetMOP(AArch64CG::kMd[MOP_xmovri64]); + bb.RemoveInsn(insn); + } + } + } + } + } +} + +void EliminateSpecifcUXTAArch64::Run(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + Insn *prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return; + } + auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®Opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (prevInsn->IsCall() && + prevInsn->GetIsCallReturnUnsigned() && + regOpnd0.GetRegisterNumber() == regOpnd1.GetRegisterNumber() && + (regOpnd1.GetRegisterNumber() == R0 || regOpnd1.GetRegisterNumber() == V0)) { + uint32 retSize = prevInsn->GetRetSize(); + if (retSize > 0 && + ((thisMop == MOP_xuxtb32 && retSize <= k1ByteSize) || + (thisMop == MOP_xuxth32 && retSize <= k2ByteSize) || + (thisMop == MOP_xuxtw64 && retSize <= k4ByteSize))) { + bb.RemoveInsn(insn); + } + return; + } + if (&insn == bb.GetFirstInsn() || regOpnd0.GetRegisterNumber() != regOpnd1.GetRegisterNumber() || + !prevInsn->IsMachineInstruction()) { + return; + } + if (cgFunc.GetMirModule().GetSrcLang() == kSrcLangC && prevInsn->IsCall() && prevInsn->GetIsCallReturnSigned()) { + return; + } + if (thisMop == MOP_xuxtb32) { + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (!IsSameRegisterOperation(dstMovOpnd, regOpnd1, regOpnd0)) { + return; + } + Operand &opnd = prevInsn->GetOperand(kInsnSecondOpnd); + if (opnd.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd); + int64 value = immOpnd.GetValue(); + /* check the top 56 bits of value */ + if ((static_cast(value) & 0xFFFFFFFFFFFFFF00) == 0) { + bb.RemoveInsn(insn); + } + } + } else if (prevInsn->GetMachineOpcode() == MOP_wldrb) { + auto &dstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + bb.RemoveInsn(insn); + } + } else if (thisMop == MOP_xuxth32) { + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_xmovri64) { + auto &dstMovOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (!IsSameRegisterOperation(dstMovOpnd, regOpnd1, regOpnd0)) { + return; + } + Operand &opnd = prevInsn->GetOperand(kInsnSecondOpnd); + if (opnd.IsIntImmediate()) { + auto &immOpnd = static_cast(opnd); + int64 value = immOpnd.GetValue(); + if ((static_cast(value) & 0xFFFFFFFFFFFF0000) == 0) { + bb.RemoveInsn(insn); + } + } + } else if (prevInsn->GetMachineOpcode() == MOP_wldrh) { + auto &dstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (dstOpnd.GetRegisterNumber() != regOpnd1.GetRegisterNumber()) { + return; + } + bb.RemoveInsn(insn); + } + } else { + /* this_mop == MOP_xuxtw64 */ + if (prevInsn->GetMachineOpcode() == MOP_wmovri32 || prevInsn->GetMachineOpcode() == MOP_wldrsb || + prevInsn->GetMachineOpcode() == MOP_wldrb || prevInsn->GetMachineOpcode() == MOP_wldrsh || + prevInsn->GetMachineOpcode() == MOP_wldrh || prevInsn->GetMachineOpcode() == MOP_wldr) { + auto &dstOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (!IsSameRegisterOperation(dstOpnd, regOpnd1, regOpnd0)) { + return; + } + /* 32-bit ldr does zero-extension by default, so this conversion can be skipped */ + bb.RemoveInsn(insn); + } + } +} + +bool FmovRegPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + if (&insn == insn.GetBB()->GetFirstInsn()) { + return false; + } + prevInsn = insn.GetPrev(); + auto &curSrcRegOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &prevSrcRegOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + /* same src freg */ + if (curSrcRegOpnd.GetRegisterNumber() != prevSrcRegOpnd.GetRegisterNumber()) { + return false; + } + return true; +} + +void FmovRegPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator newMop; + uint32 doOpt = 0; + if (prevMop == MOP_xvmovrv && thisMop == MOP_xvmovrv) { + doOpt = k32BitSize; + newMop = MOP_wmovrr; + } else if (prevMop == MOP_xvmovrd && thisMop == MOP_xvmovrd) { + doOpt = k64BitSize; + newMop = MOP_xmovrr; + } + if (doOpt == 0) { + return; + } + auto &curDstRegOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t curDstReg = curDstRegOpnd.GetRegisterNumber(); + /* optimize case 1 */ + auto &prevDstRegOpnd = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + regno_t prevDstReg = prevDstRegOpnd.GetRegisterNumber(); + auto *aarch64CGFunc = static_cast(cgFunc); + RegOperand &dst = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(curDstReg), doOpt, kRegTyInt); + RegOperand &src = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(prevDstReg), doOpt, kRegTyInt); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, dst, src); + bb.InsertInsnBefore(insn, newInsn); + bb.RemoveInsn(insn); + RegOperand &newOpnd = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(prevDstReg), doOpt, kRegTyInt); + uint32 opndNum = nextInsn->GetOperandSize(); + for (uint32 opndIdx = 0; opndIdx < opndNum; ++opndIdx) { + Operand &opnd = nextInsn->GetOperand(opndIdx); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + if (base->IsRegister()) { + auto *reg = static_cast(base); + if (reg->GetRegisterNumber() == curDstReg) { + memOpnd.SetBaseRegister(newOpnd); + } + } + } + Operand *offset = memOpnd.GetIndexRegister(); + if (offset != nullptr) { + if (offset->IsRegister()) { + auto *reg = static_cast(offset); + if (reg->GetRegisterNumber() == curDstReg) { + memOpnd.SetIndexRegister(newOpnd); + } + } + } + } else if (opnd.IsRegister()) { + /* Check if it is a source operand. */ + auto *regProp = nextInsn->GetDesc()->opndMD[opndIdx]; + if (regProp->IsUse()) { + auto ® = static_cast(opnd); + if (reg.GetRegisterNumber() == curDstReg) { + nextInsn->SetOperand(opndIdx, newOpnd); + } + } + } + } +} + +bool SbfxOptPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + auto &curDstRegOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 opndNum = nextInsn->GetOperandSize(); + const InsnDesc *md = nextInsn->GetDesc(); + for (uint32 opndIdx = 0; opndIdx < opndNum; ++opndIdx) { + Operand &opnd = nextInsn->GetOperand(opndIdx); + /* Check if it is a source operand. */ + if (opnd.IsMemoryAccessOperand() || opnd.IsList()) { + return false; + } else if (opnd.IsRegister()) { + auto ® = static_cast(opnd); + auto *regProp = md->opndMD[opndIdx]; + if (reg.GetRegisterNumber() == curDstRegOpnd.GetRegisterNumber()) { + if (reg.GetSize() != k32BitSize) { + return false; + } + if (regProp->IsDef()) { + toRemove = true; + } else { + (void)cands.emplace_back(opndIdx); + } + } + } + } + return cands.size() != 0; +} + +void SbfxOptPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto &srcRegOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + RegOperand &newReg = static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand( + static_cast(srcRegOpnd.GetRegisterNumber()), k32BitSize, srcRegOpnd.GetRegisterType()); + // replace use point of opnd in nextInsn + for (auto i: cands) { + nextInsn->SetOperand(i, newReg); + } + if (toRemove) { + bb.RemoveInsn(insn); + } +} + +bool CbnzToCbzPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wcbnz && curMop != MOP_xcbnz) { + return false; + } + /* reg has to be R0, since return value is in R0 */ + auto ®Opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (regOpnd0.GetRegisterNumber() != R0) { + return false; + } + nextBB = insn.GetBB()->GetNext(); + /* Make sure nextBB can only be reached by bb */ + if (nextBB->GetPreds().size() > 1 || nextBB->GetEhPreds().empty()) { + return false; + } + /* Next insn should be a mov R0 = 0 */ + movInsn = nextBB->GetFirstMachineInsn(); + if (movInsn == nullptr) { + return false; + } + MOperator movInsnMop = movInsn->GetMachineOpcode(); + if (movInsnMop != MOP_wmovri32 && movInsnMop != MOP_xmovri64) { + return false; + } + auto &movDest = static_cast(movInsn->GetOperand(kInsnFirstOpnd)); + if (movDest.GetRegisterNumber() != R0) { + return false; + } + auto &movImm = static_cast(movInsn->GetOperand(kInsnSecondOpnd)); + if (movImm.GetValue() != 0) { + return false; + } + Insn *nextBrInsn = movInsn->GetNextMachineInsn(); + if (nextBrInsn == nullptr) { + return false; + } + if (nextBrInsn->GetMachineOpcode() != MOP_xuncond) { + return false; + } + /* Is nextBB branch to the return-bb? */ + if (nextBB->GetSuccs().size() != 1) { + return false; + } + return true; +} + +void CbnzToCbzPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + BB *targetBB = nullptr; + auto it = bb.GetSuccsBegin(); + if (*it == nextBB) { + ++it; + } + targetBB = *it; + /* Make sure when nextBB is empty, targetBB is fallthru of bb. */ + if (targetBB != nextBB->GetNext()) { + return; + } + BB *nextBBTarget = *(nextBB->GetSuccsBegin()); + if (nextBBTarget->GetKind() != BB::kBBReturn) { + return; + } + /* Control flow looks nice, instruction looks nice */ + Operand &brTarget = brInsn->GetOperand(kInsnFirstOpnd); + insn.SetOperand(kInsnSecondOpnd, brTarget); + if (thisMop == MOP_wcbnz) { + insn.SetMOP(AArch64CG::kMd[MOP_wcbz]); + } else { + insn.SetMOP(AArch64CG::kMd[MOP_xcbz]); + } + nextBB->RemoveInsn(*movInsn); + nextBB->RemoveInsn(*brInsn); + /* nextBB is now a fallthru bb, not a goto bb */ + nextBB->SetKind(BB::kBBFallthru); + /* + * fix control flow, we have bb, nextBB, targetBB, nextBB_target + * connect bb -> nextBB_target erase targetBB + */ + it = bb.GetSuccsBegin(); + CHECK_FATAL(it != bb.GetSuccsEnd(), "succs is empty."); + if (*it == targetBB) { + bb.EraseSuccs(it); + bb.PushFrontSuccs(*nextBBTarget); + } else { + ++it; + bb.EraseSuccs(it); + bb.PushBackSuccs(*nextBBTarget); + } + for (auto targetBBIt = targetBB->GetPredsBegin(); targetBBIt != targetBB->GetPredsEnd(); ++targetBBIt) { + if (*targetBBIt == &bb) { + targetBB->ErasePreds(targetBBIt); + break; + } + } + for (auto nextIt = nextBBTarget->GetPredsBegin(); nextIt != nextBBTarget->GetPredsEnd(); ++nextIt) { + if (*nextIt == nextBB) { + nextBBTarget->ErasePreds(nextIt); + break; + } + } + nextBBTarget->PushBackPreds(bb); + + /* nextBB has no target, originally just branch target */ + nextBB->EraseSuccs(nextBB->GetSuccsBegin()); + ASSERT(nextBB->GetSuccs().empty(), "peep: branch target incorrect"); + /* Now make nextBB fallthru to targetBB */ + nextBB->PushFrontSuccs(*targetBB); + targetBB->PushBackPreds(*nextBB); +} + +bool ContiLDRorSTRToSameMEMPattern::CheckCondition(Insn &insn) { + prevInsn = insn.GetPrev(); + while (prevInsn != nullptr && prevInsn->GetMachineOpcode() == 0 && prevInsn != insn.GetBB()->GetFirstInsn()) { + prevInsn = prevInsn->GetPrev(); + } + if (!insn.IsMachineInstruction() || prevInsn == nullptr) { + return false; + } + MOperator thisMop = insn.GetMachineOpcode(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + /* + * store regB, RegC, offset + * load regA, RegC, offset + */ + if ((thisMop == MOP_xldr && prevMop == MOP_xstr) || (thisMop == MOP_wldr && prevMop == MOP_wstr) || + (thisMop == MOP_dldr && prevMop == MOP_dstr) || (thisMop == MOP_sldr && prevMop == MOP_sstr)) { + loadAfterStore = true; + } + /* + * load regA, RegC, offset + * load regB, RegC, offset + */ + if ((thisMop == MOP_xldr || thisMop == MOP_wldr || thisMop == MOP_dldr || thisMop == MOP_sldr) && + prevMop == thisMop) { + loadAfterLoad = true; + } + if (!loadAfterStore && !loadAfterLoad) { + return false; + } + ASSERT(insn.GetOperand(kInsnSecondOpnd).IsMemoryAccessOperand(), "expects mem operands"); + ASSERT(prevInsn->GetOperand(kInsnSecondOpnd).IsMemoryAccessOperand(), "expects mem operands"); + return true; +} + +void ContiLDRorSTRToSameMEMPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + auto &memOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MemOperand::AArch64AddressingMode addrMode1 = memOpnd1.GetAddrMode(); + if (addrMode1 != MemOperand::kAddrModeBOi || (!memOpnd1.IsIntactIndexed())) { + return; + } + + auto *base1 = static_cast(memOpnd1.GetBaseRegister()); + ASSERT(base1 == nullptr || !base1->IsVirtualRegister(), "physical register has not been allocated?"); + OfstOperand *offset1 = memOpnd1.GetOffsetImmediate(); + + auto &memOpnd2 = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + MemOperand::AArch64AddressingMode addrMode2 = memOpnd2.GetAddrMode(); + if (addrMode2 != MemOperand::kAddrModeBOi || (!memOpnd2.IsIntactIndexed())) { + return; + } + + auto *base2 = static_cast(memOpnd2.GetBaseRegister()); + ASSERT(base2 == nullptr || !base2->IsVirtualRegister(), "physical register has not been allocated?"); + OfstOperand *offset2 = memOpnd2.GetOffsetImmediate(); + + if (base1 == nullptr || base2 == nullptr || offset1 == nullptr || offset2 == nullptr) { + return; + } + + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + int64 offsetVal1 = offset1->GetOffsetValue(); + int64 offsetVal2 = offset2->GetOffsetValue(); + if (base1->GetRegisterNumber() != base2->GetRegisterNumber() || + reg1.GetRegisterType() != reg2.GetRegisterType() || reg1.GetSize() != reg2.GetSize() || + offsetVal1 != offsetVal2) { + return; + } + if (loadAfterStore && reg1.GetRegisterNumber() != reg2.GetRegisterNumber()) { + /* replace it with mov */ + MOperator newOp = MOP_wmovrr; + if (reg1.GetRegisterType() == kRegTyInt) { + newOp = (reg1.GetSize() <= k32BitSize) ? MOP_wmovrr : MOP_xmovrr; + } else if (reg1.GetRegisterType() == kRegTyFloat) { + newOp = (reg1.GetSize() <= k32BitSize) ? MOP_xvmovs : MOP_xvmovd; + } + Insn *nextInsn = insn.GetNext(); + while (nextInsn != nullptr && nextInsn->GetMachineOpcode() == 0 && nextInsn != bb.GetLastInsn()) { + nextInsn = nextInsn->GetNext(); + } + bool moveSameReg = false; + if (nextInsn && nextInsn->GetIsSpill() && !IfOperandIsLiveAfterInsn(reg1, *nextInsn)) { + MOperator nextMop = nextInsn->GetMachineOpcode(); + if ((thisMop == MOP_xldr && nextMop == MOP_xstr) || (thisMop == MOP_wldr && nextMop == MOP_wstr) || + (thisMop == MOP_dldr && nextMop == MOP_dstr) || (thisMop == MOP_sldr && nextMop == MOP_sstr)) { + nextInsn->Insn::SetOperand(kInsnFirstOpnd, reg2); + moveSameReg = true; + } + } + if (!moveSameReg) { + (void)bb.InsertInsnAfter(*prevInsn, cgFunc->GetInsnBuilder()->BuildInsn(newOp, reg1, reg2)); + } + bb.RemoveInsn(insn); + } else if (reg1.GetRegisterNumber() == reg2.GetRegisterNumber() && + base1->GetRegisterNumber() != reg2.GetRegisterNumber()) { + bb.RemoveInsn(insn); + } +} + +bool RemoveIncDecRefPattern::CheckCondition(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xbl) { + return false; + } + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_xmovrr) { + return false; + } + auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target.GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; + } + if (static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R1 || + static_cast(prevInsn->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() != R0) { + return false; + } + return true; +} + +void RemoveIncDecRefPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(insn); +} + +#ifdef USE_32BIT_REF +constexpr uint32 kRefSize = 32; +#else +constexpr uint32 kRefSize = 64; +#endif + +bool InlineReadBarriersPattern::CheckCondition(Insn &insn) { + /* Inline read barriers only enabled for GCONLY. */ + if (!CGOptions::IsGCOnly()) { + return false; + } + return true; +} + +void InlineReadBarriersPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + const std::string &barrierName = GetReadBarrierName(insn); + if (barrierName == kMccDummy) { + /* remove dummy call. */ + bb.RemoveInsn(insn); + } else { + /* replace barrier function call with load instruction. */ + bool isVolatile = (barrierName == kMccLoadRefV || barrierName == kMccLoadRefVS); + bool isStatic = (barrierName == kMccLoadRefS || barrierName == kMccLoadRefVS); + /* refSize is 32 if USE_32BIT_REF defined, otherwise 64. */ + const uint32 refSize = kRefSize; + auto *aarch64CGFunc = static_cast(cgFunc); + MOperator loadOp = GetLoadOperator(refSize, isVolatile); + RegOperand ®Op = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(R0, refSize, kRegTyInt); + AArch64reg addrReg = isStatic ? R0 : R1; + MemOperand &addr = aarch64CGFunc->CreateMemOpnd(addrReg, 0, refSize); + Insn &loadInsn = cgFunc->GetInsnBuilder()->BuildInsn(loadOp, regOp, addr); + bb.ReplaceInsn(insn, loadInsn); + } + bool isTailCall = (insn.GetMachineOpcode() == MOP_tail_call_opt_xbl); + if (isTailCall) { + /* add 'ret' instruction for tail call optimized load barrier. */ + Insn &retInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xret); + bb.AppendInsn(retInsn); + bb.SetKind(BB::kBBReturn); + } +} + +bool ReplaceDivToMultiPattern::CheckCondition(Insn &insn) { + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + prePrevInsn = prevInsn->GetPreviousMachineInsn(); + auto &sdivOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &sdivOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (sdivOpnd1.GetRegisterNumber() == sdivOpnd2.GetRegisterNumber() || sdivOpnd1.GetRegisterNumber() == R16 || + sdivOpnd2.GetRegisterNumber() == R16 || prePrevInsn == nullptr) { + return false; + } + MOperator prevMop = prevInsn->GetMachineOpcode(); + MOperator prePrevMop = prePrevInsn->GetMachineOpcode(); + if ((prevMop > 0) && (prevMop == MOP_wmovkri16) && (prePrevMop > 0) && (prePrevMop == MOP_wmovri32)) { + return true; + } + return false; +} + +void ReplaceDivToMultiPattern::Run(BB &bb, Insn &insn) { + if (CheckCondition(insn)) { + auto &sdivOpnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &sdivOpnd2 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + /* Check if dest operand of insn is idential with register of prevInsn and prePrevInsn. */ + auto &prevReg = prevInsn->GetOperand(kInsnFirstOpnd); + auto &prePrevReg = prePrevInsn->GetOperand(kInsnFirstOpnd); + if (!prevReg.IsRegister() || + !prePrevReg.IsRegister() || + static_cast(prevReg).GetRegisterNumber() != sdivOpnd2.GetRegisterNumber() || + static_cast(prePrevReg).GetRegisterNumber() != sdivOpnd2.GetRegisterNumber()) { + return; + } + auto &prevLsl = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + if (prevLsl.GetShiftAmount() != k16BitSize) { + return; + } + auto &prevImmOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + auto &prePrevImmOpnd = static_cast(prePrevInsn->GetOperand(kInsnSecondOpnd)); + /* + * expect the immediate value of first mov is 0x086A0 which matches 0x186A0 + * because 0x10000 is ignored in 32 bits register + */ + if ((prevImmOpnd.GetValue() != 1) || (prePrevImmOpnd.GetValue() != 34464)) { + return; + } + auto *aarch64CGFunc = static_cast(cgFunc); + /* mov w16, #0x588f */ + RegOperand &tempOpnd = aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R16), + k64BitSize, kRegTyInt); + /* create a immedate operand with this specific value */ + ImmOperand &multiplierLow = aarch64CGFunc->CreateImmOperand(0x588f, k32BitSize, false); + Insn &multiplierLowInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_wmovri32, tempOpnd, multiplierLow); + bb.InsertInsnBefore(*prePrevInsn, multiplierLowInsn); + + /* + * movk w16, #0x4f8b, LSL #16 + * create a immedate operand with this specific value + */ + ImmOperand &multiplierHigh = aarch64CGFunc->CreateImmOperand(0x4f8b, k32BitSize, false); + BitShiftOperand *multiplierHighLsl = aarch64CGFunc->GetLogicalShiftLeftOperand(k16BitSize, true); + Insn &multiplierHighInsn = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_wmovkri16, tempOpnd, multiplierHigh, *multiplierHighLsl); + bb.InsertInsnBefore(*prePrevInsn, multiplierHighInsn); + + /* smull x16, w0, w16 */ + Insn &newSmullInsn = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xsmullrrr, tempOpnd, sdivOpnd1, tempOpnd); + bb.InsertInsnBefore(*prePrevInsn, newSmullInsn); + + /* asr x16, x16, #32 */ + ImmOperand &dstLsrImmHigh = aarch64CGFunc->CreateImmOperand(k32BitSize, k32BitSize, false); + Insn &dstLsrInsnHigh = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xasrrri6, tempOpnd, tempOpnd, dstLsrImmHigh); + bb.InsertInsnBefore(*prePrevInsn, dstLsrInsnHigh); + + /* add x16, x16, w0, SXTW */ + Operand &sxtw = aarch64CGFunc->CreateExtendShiftOperand(ExtendShiftOperand::kSXTW, 0, 3); + Insn &addInsn = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xxwaddrrre, tempOpnd, tempOpnd, sdivOpnd1, sxtw); + bb.InsertInsnBefore(*prePrevInsn, addInsn); + + /* asr x16, x16, #17 */ + ImmOperand &dstLsrImmChange = aarch64CGFunc->CreateImmOperand(17, k32BitSize, false); + Insn &dstLsrInsnChange = + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xasrrri6, tempOpnd, tempOpnd, dstLsrImmChange); + bb.InsertInsnBefore(*prePrevInsn, dstLsrInsnChange); + + /* add x2, x16, x0, LSR #31 */ + auto &sdivOpnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t sdivOpnd0RegNO = sdivOpnd0.GetRegisterNumber(); + RegOperand &extSdivO0 = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(sdivOpnd0RegNO), + k64BitSize, kRegTyInt); + + regno_t sdivOpnd1RegNum = sdivOpnd1.GetRegisterNumber(); + RegOperand &extSdivO1 = + aarch64CGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(sdivOpnd1RegNum), + k64BitSize, kRegTyInt); + /* shift bit amount is thirty-one at this insn */ + BitShiftOperand &addLsrOpnd = aarch64CGFunc->CreateBitShiftOperand(BitShiftOperand::kLSR, 31, 6); + Insn &addLsrInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xaddrrrs, extSdivO0, tempOpnd, extSdivO1, addLsrOpnd); + bb.InsertInsnBefore(*prePrevInsn, addLsrInsn); + + /* + * remove insns + * Check if x1 is used after sdiv insn, and if it is in live-out. + */ + if (sdivOpnd2.GetRegisterNumber() != sdivOpnd0.GetRegisterNumber()) { + if (IfOperandIsLiveAfterInsn(sdivOpnd2, insn)) { + /* Only remove div instruction. */ + bb.RemoveInsn(insn); + return; + } + } + + bb.RemoveInsn(*prePrevInsn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(insn); + } +} + +Insn *AndCmpBranchesToCsetAArch64::FindPreviousCmp(Insn &insn) const { + regno_t defRegNO = static_cast(insn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + for (Insn *curInsn = insn.GetPrev(); curInsn != nullptr; curInsn = curInsn->GetPrev()) { + if (!curInsn->IsMachineInstruction()) { + continue; + } + if (curInsn->GetMachineOpcode() == MOP_wcmpri || curInsn->GetMachineOpcode() == MOP_xcmpri) { + return curInsn; + } + /* + * if any def/use of CC or insn defReg between insn and curInsn, stop searching and return nullptr. + */ + if (curInsn->ScanReg(defRegNO) || + curInsn->ScanReg(kRFLAG)) { + return nullptr; + } + } + return nullptr; +} + +void AndCmpBranchesToCsetAArch64::Run(BB &bb, Insn &insn) { + /* prevInsn must be "cmp" insn */ + Insn *prevInsn = FindPreviousCmp(insn); + if (prevInsn == nullptr) { + return; + } + /* prevPrevInsn must be "and" insn */ + Insn *prevPrevInsn = prevInsn->GetPreviousMachineInsn(); + if (prevPrevInsn == nullptr || + (prevPrevInsn->GetMachineOpcode() != MOP_wandrri12 && prevPrevInsn->GetMachineOpcode() != MOP_xandrri13)) { + return; + } + + auto &csetCond = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &cmpImm = static_cast(prevInsn->GetOperand(kInsnThirdOpnd)); + int64 cmpImmVal = cmpImm.GetValue(); + auto &andImm = static_cast(prevPrevInsn->GetOperand(kInsnThirdOpnd)); + int64 andImmVal = andImm.GetValue(); + if ((csetCond.GetCode() == CC_EQ && cmpImmVal == andImmVal) || + (csetCond.GetCode() == CC_NE && cmpImmVal == 0)) { + /* if flag_reg of "cmp" is live later, we can't remove cmp insn. */ + auto &flagReg = static_cast(prevInsn->GetOperand(kInsnFirstOpnd)); + if (IfOperandIsLiveAfterInsn(flagReg, insn)) { + return; + } + + auto &csetReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &prevInsnSecondReg = prevInsn->GetOperand(kInsnSecondOpnd); + bool isRegDiff = !RegOperand::IsSameRegNO(csetReg, prevInsnSecondReg); + if (isRegDiff && IfOperandIsLiveAfterInsn(static_cast(prevInsnSecondReg), insn)) { + return; + } + if (andImmVal == 1) { + if (!RegOperand::IsSameRegNO(prevInsnSecondReg, prevPrevInsn->GetOperand(kInsnFirstOpnd))) { + return; + } + /* save the "and" insn only. */ + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + if (isRegDiff) { + prevPrevInsn->Insn::SetOperand(kInsnFirstOpnd, csetReg); + } + } else { + if (!RegOperand::IsSameReg(prevInsnSecondReg, prevPrevInsn->GetOperand(kInsnFirstOpnd)) || + !RegOperand::IsSameReg(prevInsnSecondReg, prevPrevInsn->GetOperand(kInsnSecondOpnd))) { + return; + } + + /* andImmVal is n power of 2 */ + int n = LogValueAtBase2(andImmVal); + if (n < 0) { + return; + } + + /* create ubfx insn */ + MOperator ubfxOp = (csetReg.GetSize() <= k32BitSize) ? MOP_wubfxrri5i5 : MOP_xubfxrri6i6; + if (ubfxOp == MOP_wubfxrri5i5 && static_cast(n) >= k32BitSize) { + return; + } + auto &dstReg = static_cast(csetReg); + auto &srcReg = static_cast(prevInsnSecondReg); + auto *aarch64CGFunc = static_cast(&cgFunc); + ImmOperand &bitPos = aarch64CGFunc->CreateImmOperand(n, k8BitSize, false); + ImmOperand &bitSize = aarch64CGFunc->CreateImmOperand(1, k8BitSize, false); + Insn &ubfxInsn = cgFunc.GetInsnBuilder()->BuildInsn(ubfxOp, dstReg, srcReg, bitPos, bitSize); + bb.InsertInsnBefore(*prevPrevInsn, ubfxInsn); + bb.RemoveInsn(insn); + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(*prevPrevInsn); + } + } +} + +void AndCbzBranchesToTstAArch64::Run(BB &bb, Insn &insn) { + /* nextInsn must be "cbz" or "cbnz" insn */ + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr || + (nextInsn->GetMachineOpcode() != MOP_wcbz && nextInsn->GetMachineOpcode() != MOP_xcbz)) { + return; + } + auto &andRegOp = static_cast(insn.GetOperand(kInsnFirstOpnd)); + regno_t andRegNO1 = andRegOp.GetRegisterNumber(); + auto &cbzRegOp2 = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); + regno_t cbzRegNO2 = cbzRegOp2.GetRegisterNumber(); + if (andRegNO1 != cbzRegNO2) { + return; + } + /* If the reg will be used later, we shouldn't optimize the and insn here */ + if (IfOperandIsLiveAfterInsn(andRegOp, *nextInsn)) { + return; + } + /* build tst insn */ + Operand &andOpnd3 = insn.GetOperand(kInsnThirdOpnd); + auto &andRegOp2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &andRegOp3 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + MOperator newTstOp = MOP_undef; + if (andOpnd3.IsRegister()) { + newTstOp = (andRegOp2.GetSize() <= k32BitSize && andRegOp3.GetSize() <= k32BitSize) ? MOP_wtstrr : MOP_xtstrr; + } else { + newTstOp = (andRegOp2.GetSize() <= k32BitSize && andRegOp3.GetSize() <= k32BitSize) ? MOP_wtstri32 : MOP_xtstri64; + } + Operand &rflag = static_cast(&cgFunc)->GetOrCreateRflag(); + Insn &newInsnTst = cgFunc.GetInsnBuilder()->BuildInsn(newTstOp, rflag, andRegOp2, andOpnd3); + if (andOpnd3.IsImmediate()) { + if (!static_cast(andOpnd3).IsBitmaskImmediate(andRegOp2.GetSize())) { + return; + } + } + /* build beq insn */ + MOperator opCode = nextInsn->GetMachineOpcode(); + bool reverse = (opCode == MOP_xcbz || opCode == MOP_wcbz); + auto &label = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); + MOperator jmpOperator = reverse ? MOP_beq : MOP_bne; + Insn &newInsnJmp = cgFunc.GetInsnBuilder()->BuildInsn(jmpOperator, rflag, label); + bb.ReplaceInsn(insn, newInsnTst); + bb.ReplaceInsn(*nextInsn, newInsnJmp); +} + +/* + * help function for DeleteMovAfterCbzOrCbnz + * input: + * bb: the bb to be checked out + * checkCbz: to check out BB end with cbz or cbnz, if cbz, input true + * opnd: for MOV reg, #0, opnd indicate reg + * return: + * according to cbz, return true if insn is cbz or cbnz and the first operand of cbz(cbnz) is same as input + * operand + */ +bool DeleteMovAfterCbzOrCbnzAArch64::PredBBCheck(BB &bb, bool checkCbz, const Operand &opnd) const { + if (bb.GetKind() != BB::kBBIf) { + return false; + } + + Insn *condBr = cgcfg->FindLastCondBrInsn(bb); + ASSERT(condBr != nullptr, "condBr must be found"); + if (!cgcfg->IsCompareAndBranchInsn(*condBr)) { + return false; + } + MOperator mOp = condBr->GetMachineOpcode(); + if (checkCbz && mOp != MOP_wcbz && mOp != MOP_xcbz) { + return false; + } + if (!checkCbz && mOp != MOP_xcbnz && mOp != MOP_wcbnz) { + return false; + } + return RegOperand::IsSameRegNO(condBr->GetOperand(kInsnFirstOpnd), opnd); +} + +bool DeleteMovAfterCbzOrCbnzAArch64::OpndDefByMovZero(const Insn &insn) const { + MOperator defMop = insn.GetMachineOpcode(); + switch (defMop) { + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = insn.GetOperand(kInsnSecondOpnd); + ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + if (defConstValue == 0) { + return true; + } + return false; + } + case MOP_xmovrr: + case MOP_wmovrr: { + Operand &secondOpnd = insn.GetOperand(kInsnSecondOpnd); + ASSERT(secondOpnd.IsRegister(), "expects RegOperand here"); + auto ®Opnd = static_cast(secondOpnd); + return IsZeroRegister(regOpnd); + } + default: + return false; + } +} + +/* check whether predefine insn of first operand of test_insn is exist in current BB */ +bool DeleteMovAfterCbzOrCbnzAArch64::NoPreDefine(Insn &testInsn) const { + Insn *nextInsn = nullptr; + for (Insn *insn = testInsn.GetBB()->GetFirstInsn(); insn != nullptr && insn != &testInsn; insn = nextInsn) { + nextInsn = insn->GetNextMachineInsn(); + if (!insn->IsMachineInstruction()) { + continue; + } + ASSERT(!insn->IsCall(), "CG internal error, call insn should not be at the middle of the BB."); + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (!md->opndMD[i]->IsDef()) { + continue; + } + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + ASSERT(base != nullptr, "nullptr check"); + ASSERT(base->IsRegister(), "expects RegOperand"); + if (RegOperand::IsSameRegNO(*base, testInsn.GetOperand(kInsnFirstOpnd)) && + memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + return false; + } + } else if (opnd.IsList()) { + for (auto &operand : static_cast(opnd).GetOperands()) { + if (RegOperand::IsSameRegNO(testInsn.GetOperand(kInsnFirstOpnd), *operand)) { + return false; + } + } + } else if (opnd.IsRegister()) { + if (RegOperand::IsSameRegNO(testInsn.GetOperand(kInsnFirstOpnd), opnd)) { + return false; + } + } + } + } + return true; +} +void DeleteMovAfterCbzOrCbnzAArch64::ProcessBBHandle(BB *processBB, const BB &bb, const Insn &insn) const { + ASSERT(processBB != nullptr, "process_bb is null in ProcessBBHandle"); + FOR_BB_INSNS_SAFE(processInsn, processBB, nextProcessInsn) { + nextProcessInsn = processInsn->GetNextMachineInsn(); + if (!processInsn->IsMachineInstruction()) { + continue; + } + /* register may be a caller save register */ + if (processInsn->IsCall()) { + break; + } + if (!OpndDefByMovZero(*processInsn) || !NoPreDefine(*processInsn) || + !RegOperand::IsSameRegNO(processInsn->GetOperand(kInsnFirstOpnd), insn.GetOperand(kInsnFirstOpnd))) { + continue; + } + bool toDoOpt = true; + MOperator condBrMop = insn.GetMachineOpcode(); + /* process elseBB, other preds must be cbz */ + if (condBrMop == MOP_wcbnz || condBrMop == MOP_xcbnz) { + /* check out all preds of process_bb */ + for (auto *processBBPred : processBB->GetPreds()) { + if (processBBPred == &bb) { + continue; + } + if (!PredBBCheck(*processBBPred, true, processInsn->GetOperand(kInsnFirstOpnd))) { + toDoOpt = false; + break; + } + } + } else { + /* process ifBB, other preds can be cbz or cbnz(one at most) */ + for (auto processBBPred : processBB->GetPreds()) { + if (processBBPred == &bb) { + continue; + } + /* for cbnz pred, there is one at most */ + if (!PredBBCheck(*processBBPred, processBBPred != processBB->GetPrev(), + processInsn->GetOperand(kInsnFirstOpnd))) { + toDoOpt = false; + break; + } + } + } + if (!toDoOpt) { + continue; + } + processBB->RemoveInsn(*processInsn); + } +} + +void DeleteMovAfterCbzOrCbnzAArch64::Run(BB &bb, Insn &insn) { + if (bb.GetKind() != BB::kBBIf) { + return; + } + if (&insn != cgcfg->FindLastCondBrInsn(bb)) { + return; + } + if (!cgcfg->IsCompareAndBranchInsn(insn)) { + return; + } + BB *processBB = nullptr; + if (bb.GetNext() == maplebe::CGCFG::GetTargetSuc(bb)) { + return; + } + + MOperator condBrMop = insn.GetMachineOpcode(); + if (condBrMop == MOP_wcbnz || condBrMop == MOP_xcbnz) { + processBB = bb.GetNext(); + } else { + processBB = maplebe::CGCFG::GetTargetSuc(bb); + } + + ProcessBBHandle(processBB, bb, insn); +} + +bool LoadFloatPointPattern::FindLoadFloatPoint(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + optInsn.clear(); + if (mOp != MOP_xmovzri16) { + return false; + } + optInsn.emplace_back(&insn); + + Insn *insnMov2 = insn.GetNextMachineInsn(); + if (insnMov2 == nullptr) { + return false; + } + if (insnMov2->GetMachineOpcode() != MOP_xmovkri16) { + return false; + } + optInsn.emplace_back(insnMov2); + + Insn *insnMov3 = insnMov2->GetNextMachineInsn(); + if (insnMov3 == nullptr) { + return false; + } + if (insnMov3->GetMachineOpcode() != MOP_xmovkri16) { + return false; + } + optInsn.emplace_back(insnMov3); + + Insn *insnMov4 = insnMov3->GetNextMachineInsn(); + if (insnMov4 == nullptr) { + return false; + } + if (insnMov4->GetMachineOpcode() != MOP_xmovkri16) { + return false; + } + optInsn.emplace_back(insnMov4); + return true; +} + +bool LoadFloatPointPattern::IsPatternMatch() { + int insnNum = 0; + Insn *insn1 = optInsn[insnNum]; + Insn *insn2 = optInsn[++insnNum]; + Insn *insn3 = optInsn[++insnNum]; + Insn *insn4 = optInsn[++insnNum]; + if ((static_cast(insn1->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != + static_cast(insn2->GetOperand(kInsnFirstOpnd)).GetRegisterNumber()) || + (static_cast(insn2->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != + static_cast(insn3->GetOperand(kInsnFirstOpnd)).GetRegisterNumber()) || + (static_cast(insn3->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != + static_cast(insn4->GetOperand(kInsnFirstOpnd)).GetRegisterNumber())) { + return false; + } + if ((static_cast(insn1->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != 0) || + (static_cast(insn2->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != k16BitSize) || + (static_cast(insn3->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != k32BitSize) || + (static_cast(insn4->GetOperand(kInsnThirdOpnd)).GetShiftAmount() != + (k16BitSize + k32BitSize))) { + return false; + } + return true; +} + +bool LoadFloatPointPattern::CheckCondition(Insn &insn) { + if (FindLoadFloatPoint(insn) && IsPatternMatch()) { + return true; + } + return false; +} + +void LoadFloatPointPattern::Run(BB &bb, Insn &insn) { + /* logical shift left values in three optimized pattern */ + if (CheckCondition(insn)) { + int insnNum = 0; + Insn *insn1 = optInsn[insnNum]; + Insn *insn2 = optInsn[++insnNum]; + Insn *insn3 = optInsn[++insnNum]; + Insn *insn4 = optInsn[++insnNum]; + auto &movConst1 = static_cast(insn1->GetOperand(kInsnSecondOpnd)); + auto &movConst2 = static_cast(insn2->GetOperand(kInsnSecondOpnd)); + auto &movConst3 = static_cast(insn3->GetOperand(kInsnSecondOpnd)); + auto &movConst4 = static_cast(insn4->GetOperand(kInsnSecondOpnd)); + /* movk/movz's immOpnd is 16-bit unsigned immediate */ + uint64 value = static_cast(movConst1.GetValue()) + + (static_cast(movConst2.GetValue()) << k16BitSize) + + (static_cast(movConst3.GetValue()) << k32BitSize) + + (static_cast(movConst4.GetValue()) << (k16BitSize + k32BitSize)); + + LabelIdx lableIdx = cgFunc->CreateLabel(); + AArch64CGFunc *aarch64CGFunc = static_cast(cgFunc); + LabelOperand &target = aarch64CGFunc->GetOrCreateLabelOperand(lableIdx); + cgFunc->InsertLabelMap(lableIdx, value); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xldli, insn4->GetOperand(kInsnFirstOpnd), + target); + bb.InsertInsnAfter(*insn4, newInsn); + bb.RemoveInsn(*insn1); + bb.RemoveInsn(*insn2); + bb.RemoveInsn(*insn3); + bb.RemoveInsn(*insn4); + } +} + +void ReplaceCmpToCmnAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + MOperator thisMop = insn.GetMachineOpcode(); + MOperator nextMop = MOP_undef; + MOperator newMop = MOP_undef; + uint64 negOne = UINT64_MAX; + switch (thisMop) { + case MOP_wmovri32: { + nextMop = MOP_wcmprr; + newMop = MOP_wcmnri; + negOne = UINT32_MAX; + break; + } + case MOP_xmovri64: { + nextMop = MOP_xcmprr; + newMop = MOP_xcmnri; + break; + } + default: + break; + } + Operand *opnd1OfMov = &(insn.GetOperand(kInsnFirstOpnd)); + Operand *opnd2OfMov = &(insn.GetOperand(kInsnSecondOpnd)); + if (opnd2OfMov->IsIntImmediate()) { + ImmOperand *immOpnd = static_cast(opnd2OfMov); + int64 iVal = immOpnd->GetValue(); + if ((kNegativeImmLowerLimit <= iVal && iVal < 0) || static_cast(iVal) == negOne) { + Insn *nextInsn = insn.GetNextMachineInsn(); /* get the next insn to judge if it is a cmp instruction. */ + if (nextInsn != nullptr) { + if (nextInsn->GetMachineOpcode() == nextMop) { + Operand *opndCmp2 = &(nextInsn->GetOperand(kInsnSecondOpnd)); + Operand *opndCmp3 = &(nextInsn->GetOperand(kInsnThirdOpnd)); /* get the third operand of cmp */ + /* if the first operand of mov equals the third operand of cmp, match the pattern. */ + if (opnd1OfMov == opndCmp3) { + if (iVal == static_cast(negOne)) { + iVal = -1; + } + ImmOperand &newOpnd = aarch64CGFunc->CreateImmOperand(iVal * (-1), immOpnd->GetSize(), false); + Operand ®Flag = nextInsn->GetOperand(kInsnFirstOpnd); + bb.ReplaceInsn(*nextInsn, cgFunc.GetInsnBuilder()->BuildInsn(newMop, regFlag, *opndCmp2, newOpnd)); + } + } + } + } + } +} + +bool RemoveIncRefPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp != MOP_xbl) { + return false; + } + auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target.GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; + } + insnMov2 = insn.GetPreviousMachineInsn(); + if (insnMov2 == nullptr) { + return false; + } + MOperator mopMov2 = insnMov2->GetMachineOpcode(); + if (mopMov2 != MOP_xmovrr) { + return false; + } + insnMov1 = insnMov2->GetPreviousMachineInsn(); + if (insnMov1 == nullptr) { + return false; + } + MOperator mopMov1 = insnMov1->GetMachineOpcode(); + if (mopMov1 != MOP_xmovrr) { + return false; + } + if (static_cast(insnMov1->GetOperand(kInsnSecondOpnd)).GetRegisterNumber() != + static_cast(insnMov2->GetOperand(kInsnSecondOpnd)).GetRegisterNumber()) { + return false; + } + auto &mov2Dest = static_cast(insnMov2->GetOperand(kInsnFirstOpnd)); + auto &mov1Dest = static_cast(insnMov1->GetOperand(kInsnFirstOpnd)); + if (mov1Dest.IsVirtualRegister() || mov2Dest.IsVirtualRegister() || mov1Dest.GetRegisterNumber() != R0 || + mov2Dest.GetRegisterNumber() != R1) { + return false; + } + return true; +} + +void RemoveIncRefPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(insn); + bb.RemoveInsn(*insnMov2); + bb.RemoveInsn(*insnMov1); +} + +bool LongIntCompareWithZPattern::FindLondIntCmpWithZ(Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + optInsn.clear(); + /* forth */ + if (thisMop != MOP_wcmpri) { + return false; + } + (void)optInsn.emplace_back(&insn); + + /* third */ + Insn *preInsn1 = insn.GetPreviousMachineInsn(); + if (preInsn1 == nullptr) { + return false; + } + MOperator preMop1 = preInsn1->GetMachineOpcode(); + if (preMop1 != MOP_wcsincrrrc) { + return false; + } + (void)optInsn.emplace_back(preInsn1); + + /* second */ + Insn *preInsn2 = preInsn1->GetPreviousMachineInsn(); + if (preInsn2 == nullptr) { + return false; + } + MOperator preMop2 = preInsn2->GetMachineOpcode(); + if (preMop2 != MOP_wcsinvrrrc) { + return false; + } + (void)optInsn.emplace_back(preInsn2); + + /* first */ + Insn *preInsn3 = preInsn2->GetPreviousMachineInsn(); + if (preInsn3 == nullptr) { + return false; + } + MOperator preMop3 = preInsn3->GetMachineOpcode(); + if (preMop3 != MOP_xcmpri) { + return false; + } + (void)optInsn.emplace_back(preInsn3); + return true; +} + +bool LongIntCompareWithZPattern::IsPatternMatch() { + constexpr int insnLen = 4; + if (optInsn.size() != insnLen) { + return false; + } + int insnNum = 0; + Insn *insn1 = optInsn[insnNum]; + Insn *insn2 = optInsn[++insnNum]; + Insn *insn3 = optInsn[++insnNum]; + Insn *insn4 = optInsn[++insnNum]; + ASSERT(insnNum == 3, " this specific case has three insns"); + if (IsZeroRegister(insn3->GetOperand(kInsnSecondOpnd)) && IsZeroRegister(insn3->GetOperand(kInsnThirdOpnd)) && + IsZeroRegister(insn2->GetOperand(kInsnThirdOpnd)) && + &(insn2->GetOperand(kInsnFirstOpnd)) == &(insn2->GetOperand(kInsnSecondOpnd)) && + static_cast(insn3->GetOperand(kInsnFourthOpnd)).GetCode() == CC_GE && + static_cast(insn2->GetOperand(kInsnFourthOpnd)).GetCode() == CC_LE && + static_cast(insn1->GetOperand(kInsnThirdOpnd)).GetValue() == 0 && + static_cast(insn4->GetOperand(kInsnThirdOpnd)).GetValue() == 0) { + return true; + } + return false; +} + +bool LongIntCompareWithZPattern::CheckCondition(Insn &insn) { + if (FindLondIntCmpWithZ(insn) && IsPatternMatch()) { + return true; + } + return false; +} + +void LongIntCompareWithZPattern::Run(BB &bb, Insn &insn) { + /* found pattern */ + if (CheckCondition(insn)) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(optInsn[3]->GetMachineOpcode(), + optInsn[3]->GetOperand(kInsnFirstOpnd), + optInsn[3]->GetOperand(kInsnSecondOpnd), + optInsn[3]->GetOperand(kInsnThirdOpnd)); + /* use newInsn to replace the third optInsn */ + bb.ReplaceInsn(*optInsn[0], newInsn); + optInsn.clear(); + } +} + +void ComplexMemOperandAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xadrpl12) { + return; + } + + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop > 0 && + ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldp) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstp))) { + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + ASSERT(memOpnd != nullptr, "memOpnd is null in AArch64Peep::ComplexMemOperandAArch64"); + + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return; + } + + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + return; + } + + auto ®Opnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + + /* Avoid linking issues when object is not 16byte aligned */ + if (memOpnd->GetSize() == k128BitSize) { + return; + } + + /* Check if dest operand of insn is idential with base register of nextInsn. */ + if (memOpnd->GetBaseRegister() != ®Opnd) { + return; + } + + /* Check if x0 is used after ldr insn, and if it is in live-out. */ + if (IfOperandIsLiveAfterInsn(regOpnd, *nextInsn)) { + return; + } + + /* load store pairs cannot have relocation */ + if (nextInsn->IsLoadStorePair() && insn.GetOperand(kInsnThirdOpnd).IsStImmediate()) { + return; + } + + auto &stImmOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + OfstOperand &offOpnd = aarch64CGFunc->GetOrCreateOfstOpnd( + stImmOpnd.GetOffset() + memOpnd->GetOffsetImmediate()->GetOffsetValue(), k32BitSize); + + /* do not guarantee rodata alignment at Os */ + if (CGOptions::OptimizeForSize() && stImmOpnd.GetSymbol()->IsReadOnly()) { + return; + } + + /* avoid relocation */ + if ((offOpnd.GetValue() % static_cast(kBitsPerByte)) != 0) { + return; + } + + if (cgFunc.GetMirModule().IsCModule()) { + Insn *prevInsn = insn.GetPrev(); + MOperator prevMop = prevInsn->GetMachineOpcode(); + if (prevMop != MOP_xadrp) { + return; + } else { + auto &prevStImmOpnd = static_cast(prevInsn->GetOperand(kInsnSecondOpnd)); + prevStImmOpnd.SetOffset(offOpnd.GetValue()); + } + } + auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeLo12Li, memOpnd->GetSize(), + &newBaseOpnd, nullptr, &offOpnd, stImmOpnd.GetSymbol()); + + nextInsn->SetMemOpnd(&newMemOpnd); + bb.RemoveInsn(insn); + CHECK_FATAL(!CGOptions::IsLazyBinding() || cgFunc.GetCG()->IsLibcore(), + "this pattern can't be found in this phase"); + } +} + +void ComplexMemOperandPreAddAArch64::Run(BB &bb, Insn &insn) { + AArch64CGFunc *aarch64CGFunc = static_cast(&cgFunc); + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + MOperator thisMop = insn.GetMachineOpcode(); + if (thisMop != MOP_xaddrrr && thisMop != MOP_waddrrr) { + return; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop > 0 && + ((nextMop >= MOP_wldrsb && nextMop <= MOP_dldr) || (nextMop >= MOP_wstrb && nextMop <= MOP_dstr))) { + if (!IsMemOperandOptPattern(insn, *nextInsn)) { + return; + } + MemOperand *memOpnd = static_cast(nextInsn->GetMemOpnd()); + auto &newBaseOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (newBaseOpnd.GetSize() != k64BitSize) { + return; + } + auto &newIndexOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (newIndexOpnd.GetSize() <= k32BitSize) { + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, + &newIndexOpnd, 0, false); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + } else { + auto *newOfstOpnd = &aarch64CGFunc->GetOrCreateOfstOpnd(0, k32BitSize); + MemOperand &newMemOpnd = + aarch64CGFunc->GetOrCreateMemOpnd(MemOperand::kAddrModeBOrX, memOpnd->GetSize(), &newBaseOpnd, + &newIndexOpnd, newOfstOpnd, nullptr); + nextInsn->SetOperand(kInsnSecondOpnd, newMemOpnd); + } + bb.RemoveInsn(insn); + } +} + +static bool MayThrowBetweenInsn(const Insn &prevCallInsn, const Insn &currCallInsn) { + for (Insn *insn = prevCallInsn.GetNext(); insn != nullptr && insn != &currCallInsn; insn = insn->GetNext()) { + if (insn->MayThrow()) { + return true; + } + } + return false; +} + +/* + * mov R0, vreg1 / R0 -> objDesignateInsn + * add vreg2, vreg1, #imm -> fieldDesignateInsn + * mov R1, vreg2 -> fieldParamDefInsn + * mov R2, vreg3 -> fieldValueDefInsn + */ +bool WriteFieldCallPattern::WriteFieldCallOptPatternMatch(const Insn &writeFieldCallInsn, WriteRefFieldParam ¶m) { + Insn *fieldValueDefInsn = writeFieldCallInsn.GetPreviousMachineInsn(); + if (fieldValueDefInsn == nullptr || fieldValueDefInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + Operand &fieldValueDefInsnDestOpnd = fieldValueDefInsn->GetOperand(kInsnFirstOpnd); + auto &fieldValueDefInsnDestReg = static_cast(fieldValueDefInsnDestOpnd); + if (fieldValueDefInsnDestReg.GetRegisterNumber() != R2) { + return false; + } + paramDefInsns.emplace_back(fieldValueDefInsn); + param.fieldValue = &(fieldValueDefInsn->GetOperand(kInsnSecondOpnd)); + Insn *fieldParamDefInsn = fieldValueDefInsn->GetPreviousMachineInsn(); + if (fieldParamDefInsn == nullptr || fieldParamDefInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + Operand &fieldParamDestOpnd = fieldParamDefInsn->GetOperand(kInsnFirstOpnd); + auto &fieldParamDestReg = static_cast(fieldParamDestOpnd); + if (fieldParamDestReg.GetRegisterNumber() != R1) { + return false; + } + paramDefInsns.emplace_back(fieldParamDefInsn); + Insn *fieldDesignateInsn = fieldParamDefInsn->GetPreviousMachineInsn(); + if (fieldDesignateInsn == nullptr || fieldDesignateInsn->GetMachineOpcode() != MOP_xaddrri12) { + return false; + } + Operand &fieldParamDefSrcOpnd = fieldParamDefInsn->GetOperand(kInsnSecondOpnd); + Operand &fieldDesignateDestOpnd = fieldDesignateInsn->GetOperand(kInsnFirstOpnd); + if (!RegOperand::IsSameReg(fieldParamDefSrcOpnd, fieldDesignateDestOpnd)) { + return false; + } + Operand &fieldDesignateBaseOpnd = fieldDesignateInsn->GetOperand(kInsnSecondOpnd); + param.fieldBaseOpnd = &(static_cast(fieldDesignateBaseOpnd)); + auto &immOpnd = static_cast(fieldDesignateInsn->GetOperand(kInsnThirdOpnd)); + param.fieldOffset = immOpnd.GetValue(); + paramDefInsns.emplace_back(fieldDesignateInsn); + Insn *objDesignateInsn = fieldDesignateInsn->GetPreviousMachineInsn(); + if (objDesignateInsn == nullptr || objDesignateInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + Operand &objDesignateDestOpnd = objDesignateInsn->GetOperand(kInsnFirstOpnd); + auto &objDesignateDestReg = static_cast(objDesignateDestOpnd); + if (objDesignateDestReg.GetRegisterNumber() != R0) { + return false; + } + Operand &objDesignateSrcOpnd = objDesignateInsn->GetOperand(kInsnSecondOpnd); + if (RegOperand::IsSameReg(objDesignateDestOpnd, objDesignateSrcOpnd) || + !RegOperand::IsSameReg(objDesignateSrcOpnd, fieldDesignateBaseOpnd)) { + return false; + } + param.objOpnd = &(objDesignateInsn->GetOperand(kInsnSecondOpnd)); + paramDefInsns.emplace_back(objDesignateInsn); + return true; +} + +bool WriteFieldCallPattern::IsWriteRefFieldCallInsn(const Insn &insn) const { + if (!insn.IsCall() || insn.GetMachineOpcode() == MOP_xblr) { + return false; + } + Operand *targetOpnd = insn.GetCallTargetOperand(); + ASSERT(targetOpnd != nullptr, "targetOpnd must not be nullptr"); + if (!targetOpnd->IsFuncNameOpnd()) { + return false; + } + auto *target = static_cast(targetOpnd); + const MIRSymbol *funcSt = target->GetFunctionSymbol(); + ASSERT(funcSt->GetSKind() == kStFunc, "the kind of funcSt is unreasonable"); + const std::string &funcName = funcSt->GetName(); + return funcName == "MCC_WriteRefField" || funcName == "MCC_WriteVolatileField"; +} + +bool WriteFieldCallPattern::CheckCondition(Insn &insn) { + nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return false; + } + if (!IsWriteRefFieldCallInsn(insn)) { + return false; + } + if (!hasWriteFieldCall) { + if (!WriteFieldCallOptPatternMatch(insn, firstCallParam)) { + return false; + } + prevCallInsn = &insn; + hasWriteFieldCall = true; + return false; + } + if (!WriteFieldCallOptPatternMatch(insn, currentCallParam)) { + return false; + } + if (prevCallInsn == nullptr || MayThrowBetweenInsn(*prevCallInsn, insn)) { + return false; + } + if (firstCallParam.objOpnd == nullptr || currentCallParam.objOpnd == nullptr || + currentCallParam.fieldBaseOpnd == nullptr) { + return false; + } + if (!RegOperand::IsSameReg(*firstCallParam.objOpnd, *currentCallParam.objOpnd)) { + return false; + } + return true; +} + +void WriteFieldCallPattern::Run(BB &bb, Insn &insn) { + paramDefInsns.clear(); + if (!CheckCondition(insn)) { + return; + } + auto *aarCGFunc = static_cast(cgFunc); + MemOperand &addr = + aarCGFunc->CreateMemOpnd(*currentCallParam.fieldBaseOpnd, currentCallParam.fieldOffset, k64BitSize); + Insn &strInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_xstr, *currentCallParam.fieldValue, addr); + strInsn.AppendComment("store reference field"); + strInsn.MarkAsAccessRefField(true); + bb.InsertInsnAfter(insn, strInsn); + for (Insn *paramDefInsn : paramDefInsns) { + bb.RemoveInsn(*paramDefInsn); + } + bb.RemoveInsn(insn); + prevCallInsn = &strInsn; + nextInsn = strInsn.GetNextMachineInsn(); +} + +bool RemoveDecRefPattern::CheckCondition(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xbl) { + return false; + } + auto &target = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target.GetName() != "MCC_DecRef_NaiveRCFast") { + return false; + } + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator mopMov = prevInsn->GetMachineOpcode(); + if ((mopMov != MOP_xmovrr && mopMov != MOP_xmovri64) || + static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R0) { + return false; + } + Operand &srcOpndOfMov = prevInsn->GetOperand(kInsnSecondOpnd); + if (!IsZeroRegister(srcOpndOfMov) && + !(srcOpndOfMov.IsImmediate() && static_cast(srcOpndOfMov).GetValue() == 0)) { + return false; + } + return true; +} + +void RemoveDecRefPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + bb.RemoveInsn(*prevInsn); + bb.RemoveInsn(insn); +} + +bool ReplaceIncDecWithIncPattern::CheckCondition(Insn &insn) { + if (insn.GetMachineOpcode() != MOP_xbl) { + return false; + } + target = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (target->GetName() != "MCC_IncDecRef_NaiveRCFast") { + return false; + } + prevInsn = insn.GetPreviousMachineInsn(); + if (prevInsn == nullptr) { + return false; + } + MOperator mopMov = prevInsn->GetMachineOpcode(); + if (mopMov != MOP_xmovrr) { + return false; + } + if (static_cast(prevInsn->GetOperand(kInsnFirstOpnd)).GetRegisterNumber() != R1 || + !IsZeroRegister(prevInsn->GetOperand(kInsnSecondOpnd))) { + return false; + } + return true; +} + +void ReplaceIncDecWithIncPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + std::string funcName = "MCC_IncRef_NaiveRCFast"; + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(funcName); + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx, true); + if (st == nullptr) { + LogInfo::MapleLogger() << "WARNING: Replace IncDec With Inc fail due to no MCC_IncRef_NaiveRCFast func\n"; + return; + } + bb.RemoveInsn(*prevInsn); + target->SetFunctionSymbol(*st); +} + +void UbfxToUxtwPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Insn *newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + MOP_xuxtw64, insn.GetOperand(kInsnFirstOpnd), insn.GetOperand(kInsnSecondOpnd)); + bb.ReplaceInsn(insn, *newInsn); + if (CG_PEEP_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, newInsn, nullptr); + } +} + +bool UbfxToUxtwPattern::CheckCondition(Insn &insn) { + ImmOperand &imm0 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + ImmOperand &imm1 = static_cast(insn.GetOperand(kInsnFourthOpnd)); + if ((imm0.GetValue() != 0) || (imm1.GetValue() != k32BitSize)) { + return false; + } + return true; +} + +void UbfxAndCbzToTbzPattern::Run(BB &bb, Insn &insn) { + Operand &opnd2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &imm3 = static_cast(insn.GetOperand(kInsnThirdOpnd)); + if (!CheckCondition(insn)) { + return; + } + auto &label = static_cast(useInsn->GetOperand(kInsnSecondOpnd)); + MOperator nextMop = useInsn->GetMachineOpcode(); + switch (nextMop) { + case MOP_wcbz: + case MOP_xcbz: + newMop = opnd2.GetSize() == k64BitSize ? MOP_xtbz : MOP_wtbz; + break; + case MOP_wcbnz: + case MOP_xcbnz: + newMop = opnd2.GetSize() == k64BitSize ? MOP_xtbnz : MOP_wtbnz; + break; + default: + return; + } + if (newMop == MOP_undef) { + return; + } + Insn *newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(newMop, opnd2, imm3, label); + BB *useInsnBB = useInsn->GetBB(); + useInsnBB->ReplaceInsn(*useInsn, *newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(*useInsn, *newInsn); + optSuccess = true; + if (CG_PEEP_DUMP) { + std::vector prevs; + (void)prevs.emplace_back(useInsn); + DumpAfterPattern(prevs, newInsn, nullptr); + } +} + +bool UbfxAndCbzToTbzPattern::CheckCondition(Insn &insn) { + ImmOperand &imm4 = static_cast(insn.GetOperand(kInsnFourthOpnd)); + RegOperand &opnd1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + InsnSet useInsns = GetAllUseInsn(opnd1); + if (useInsns.size() != 1) { + return false; + } + useInsn = *useInsns.begin(); + if (useInsn == nullptr) { + return false; + } + if (imm4.GetValue() == 1) { + switch (useInsn->GetMachineOpcode()) { + case MOP_wcbz: + case MOP_xcbz: + case MOP_wcbnz: + case MOP_xcbnz: + return true; + default: + break; + } + } + return false; +} + +bool ComplexExtendWordLslAArch64::IsExtendWordLslPattern(const Insn &insn) const { + Insn *nextInsn = insn.GetNext(); + if (nextInsn == nullptr) { + return false; + } + MOperator nextMop = nextInsn->GetMachineOpcode(); + if (nextMop != MOP_xlslrri6) { + return false; + } + return true; +} + +void ComplexExtendWordLslAArch64::Run(BB &bb, Insn &insn) { + if (!IsExtendWordLslPattern(insn)) { + return; + } + MOperator mop = insn.GetMachineOpcode(); + Insn *nextInsn = insn.GetNext(); + auto &nextOpnd2 = static_cast(nextInsn->GetOperand(kInsnThirdOpnd)); + if (nextOpnd2.GetValue() > k32BitSize) { + return; + } + auto &opnd0 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &nextOpnd1 = static_cast(nextInsn->GetOperand(kInsnSecondOpnd)); + regno_t regNO0 = opnd0.GetRegisterNumber(); + regno_t nextRegNO1 = nextOpnd1.GetRegisterNumber(); + if (regNO0 != nextRegNO1 || IfOperandIsLiveAfterInsn(opnd0, *nextInsn)) { + return; + } + auto &opnd1 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &nextOpnd0 = static_cast(nextInsn->GetOperand(kInsnFirstOpnd)); + regno_t regNO1 = opnd1.GetRegisterNumber(); + cgFunc.InsertExtendSet(regNO1); + MOperator mopNew = mop == MOP_xsxtw64 ? MOP_xsbfizrri6i6 : MOP_xubfizrri6i6; + auto *aarch64CGFunc = static_cast(&cgFunc); + RegOperand ®1 = aarch64CGFunc->GetOrCreateVirtualRegisterOperand(regNO1); + ImmOperand &newImm = aarch64CGFunc->CreateImmOperand(k32BitSize, k6BitSize, false); + Insn &newInsnSbfiz = cgFunc.GetInsnBuilder()->BuildInsn(mopNew, + nextOpnd0, reg1, nextOpnd2, newImm); + bb.RemoveInsn(*nextInsn); + bb.ReplaceInsn(insn, newInsnSbfiz); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_pgo_gen.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_pgo_gen.cpp new file mode 100644 index 0000000000000000000000000000000000000000..68196db3bd7a135784da82cf0e8bd8cee1ce13ea --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_pgo_gen.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_pgo_gen.h" +#include "aarch64_cg.h" +namespace maplebe { + +void AArch64ProfGen::CreateClearIcall(BB &bb, const std::string &symName) { + auto *mirBuilder = f->GetFunction().GetModule()->GetMIRBuilder(); + auto *voidPtrType = GlobalTables::GetTypeTable().GetVoidPtr(); + auto *funcPtrSym = mirBuilder->GetOrCreateGlobalDecl(symName, *voidPtrType); + funcPtrSym->SetAttr(ATTR_weak); // weak symbol + CHECK_FATAL(!bb.IsEmpty() || bb.IsUnreachable(), "empty first BB?"); + + auto *a64Func = static_cast(f); + RegOperand &tempReg = a64Func->GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + StImmOperand &funcPtrSymOpnd = a64Func->CreateStImmOperand(*funcPtrSym, 0, 0); + auto &adrpInsn = f->GetInsnBuilder()->BuildInsn(MOP_xadrp, tempReg, funcPtrSymOpnd); + + Insn *firstI = bb.GetFirstMachineInsn(); + + while (firstI && firstI->GetNextMachineInsn() && firstI->GetNextMachineInsn()->IsStore()) { + firstI = firstI->GetPreviousMachineInsn(); + } + + if (firstI) { + (void)bb.InsertInsnAfter(*firstI, adrpInsn); + } else { + bb.InsertInsnBegin(adrpInsn); + } + + /* load func ptr */ + OfstOperand &funcPtrSymOfst = a64Func->CreateOfstOpnd(*funcPtrSym, 0, 0); + MemOperand *ldrOpnd = a64Func->CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, tempReg, + nullptr, &funcPtrSymOfst, funcPtrSym); + auto &ldrSymInsn = f->GetInsnBuilder()->BuildInsn(MOP_xldr, tempReg, *ldrOpnd); + (void)bb.InsertInsnAfter(adrpInsn, ldrSymInsn); + + Insn *getAddressInsn = &ldrSymInsn; + if (CGOptions::IsPIC()) { + MemOperand *ldrOpndPic = a64Func->CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, tempReg, + nullptr, nullptr, nullptr); + auto &ldrSymPicInsn = f->GetInsnBuilder()->BuildInsn(MOP_xldr, tempReg, *ldrOpndPic); + (void)bb.InsertInsnAfter(ldrSymInsn, ldrSymPicInsn); + getAddressInsn = &ldrSymPicInsn; + } + /* call weak symbol */ + auto &bInsn = f->GetInsnBuilder()->BuildInsn(MOP_xblr, tempReg); + (void)bb.InsertInsnAfter(*getAddressInsn, bInsn); +} + +void AArch64ProfGen::CreateIcallForWeakSymbol(BB &bb, const std::string &symName) { + auto *mirBuilder = f->GetFunction().GetModule()->GetMIRBuilder(); + auto *voidPtrType = GlobalTables::GetTypeTable().GetVoidPtr(); + auto *funcPtrSym = mirBuilder->GetOrCreateGlobalDecl(symName, *voidPtrType); + funcPtrSym->SetAttr(ATTR_weak); // weak symbol + + CHECK_FATAL(!bb.IsEmpty() || bb.IsUnreachable(), "empty exit BB?"); + + Insn *lastI = bb.GetLastMachineInsn(); + if (lastI) { + CHECK_FATAL(lastI->GetMachineOpcode() == MOP_xret || lastI->GetMachineOpcode() == MOP_tail_call_opt_xbl, + "check this return bb"); + } + + while (lastI && lastI->GetPreviousMachineInsn() && + (lastI->GetPreviousMachineInsn()->IsLoad() || + lastI->GetMachineOpcode() == MOP_pseudo_ret_int || lastI->GetMachineOpcode() == MOP_pseudo_ret_float)) { + lastI = lastI->GetPreviousMachineInsn(); + } + + auto *a64Func = static_cast(f); + /* load func ptr page */ + RegOperand &tempReg = a64Func->GetOrCreatePhysicalRegisterOperand(R16, k64BitSize, kRegTyInt); + StImmOperand &funcPtrSymOpnd = a64Func->CreateStImmOperand(*funcPtrSym, 0, 0); + auto &adrpInsn = f->GetInsnBuilder()->BuildInsn(MOP_xadrp, tempReg, funcPtrSymOpnd); + + if (lastI) { + (void)bb.InsertInsnBefore(*lastI, adrpInsn); + } else { + bb.InsertInsnBegin(adrpInsn); + } + + /* load func ptr */ + OfstOperand &funcPtrSymOfst = a64Func->CreateOfstOpnd(*funcPtrSym, 0, 0); + MemOperand *ldrOpnd = a64Func->CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, tempReg, + nullptr, &funcPtrSymOfst, funcPtrSym); + Insn &ldrSymInsn = f->GetInsnBuilder()->BuildInsn(MOP_xldr, tempReg, *ldrOpnd); + (void)bb.InsertInsnAfter(adrpInsn, ldrSymInsn); + + Insn *getAddressInsn = &ldrSymInsn; + if (CGOptions::IsPIC()) { + MemOperand *ldrOpndPic = a64Func->CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, tempReg, + nullptr, nullptr, nullptr); + CHECK_FATAL(ldrOpndPic != nullptr, "create mem failed"); + Insn &ldrSymPicInsn = f->GetInsnBuilder()->BuildInsn(MOP_xldr, tempReg, *ldrOpndPic); + (void)bb.InsertInsnAfter(ldrSymInsn, ldrSymPicInsn); + getAddressInsn = &ldrSymPicInsn; + } + /* call weak symbol */ + Insn &bInsn = f->GetInsnBuilder()->BuildInsn(MOP_xblr, tempReg); + (void)bb.InsertInsnAfter(*getAddressInsn, bInsn); +} + +void AArch64ProfGen::InstrumentBB(BB &bb, MIRSymbol &countTab, uint32 offset) { + auto *a64Func = static_cast(f); + StImmOperand &funcPtrSymOpnd = a64Func->CreateStImmOperand(countTab, 0, 0); + CHECK_FATAL(offset <= kMaxPimm8, "profile BB number out of range!!!"); + /* skip size */ + uint32 ofStByteSize = (offset + 1U) << 3U; + ImmOperand &countOfst = a64Func->CreateImmOperand(ofStByteSize, k64BitSize, false); + auto &counterInsn = f->GetInsnBuilder()->BuildInsn(MOP_c_counter, funcPtrSymOpnd, countOfst); + bb.InsertInsnBegin(counterInsn); +} +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d2ca7adab7cdca45f4060899a15833323e6952c1 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_phi_elimination.cpp @@ -0,0 +1,213 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_phi_elimination.h" +#include "aarch64_cg.h" + +namespace maplebe { +RegOperand &AArch64PhiEliminate::CreateTempRegForCSSA(RegOperand &oriOpnd) { + return *phiEliAlloc.New(GetAndIncreaseTempRegNO(), oriOpnd.GetSize(), oriOpnd.GetRegisterType()); +} + +Insn &AArch64PhiEliminate::CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) { + ASSERT(destOpnd.GetRegisterType() == fromOpnd.GetRegisterType(), "do not support this move in aarch64"); + bool is64bit = destOpnd.GetSize() == k64BitSize; + bool isFloat = destOpnd.IsOfFloatOrSIMDClass(); + Insn *insn = nullptr; + if (destOpnd.GetSize() == k128BitSize) { + ASSERT(isFloat, "unexpect 128bit int operand in aarch64"); + insn = &cgFunc->GetInsnBuilder()->BuildVectorInsn(MOP_vmovvv, AArch64CG::kMd[MOP_vmovvv]); + (void)insn->AddOpndChain(destOpnd).AddOpndChain(fromOpnd); + auto *vecSpecSrc = cgFunc->GetMemoryPool()->New(k128BitSize >> k3ByteSize, k8BitSize); + auto *vecSpecDest = cgFunc->GetMemoryPool()->New(k128BitSize >> k3ByteSize, k8BitSize); + (void)static_cast(insn)->PushRegSpecEntry(vecSpecDest).PushRegSpecEntry(vecSpecSrc); + } else { + insn = &cgFunc->GetInsnBuilder()->BuildInsn( + is64bit ? isFloat ? MOP_xvmovd : MOP_xmovrr : isFloat ? MOP_xvmovs : MOP_wmovrr, destOpnd, fromOpnd); + } + /* copy remat info */ + MaintainRematInfo(destOpnd, fromOpnd, true); + ASSERT(insn != nullptr, "create move insn failed"); + insn->SetIsPhiMovInsn(true); + return *insn; +} + +RegOperand &AArch64PhiEliminate::GetCGVirtualOpearnd(RegOperand &ssaOpnd, const Insn &curInsn) { + VRegVersion *ssaVersion = GetSSAInfo()->FindSSAVersion(ssaOpnd.GetRegisterNumber()); + ASSERT(ssaVersion != nullptr, "find ssaVersion failed"); + ASSERT(!ssaVersion->IsDeleted(), "ssaVersion has been deleted"); + RegOperand *regForRecreate = &ssaOpnd; + if (curInsn.GetMachineOpcode() != MOP_asm && + !curInsn.IsVectorOp() && + !curInsn.IsSpecialIntrinsic() && + ssaVersion->GetAllUseInsns().empty() && + !curInsn.IsAtomic()) { + CHECK_FATAL(false, "plz delete dead version"); + } + if (GetSSAInfo()->IsNoDefVReg(ssaOpnd.GetRegisterNumber())) { + regForRecreate = MakeRoomForNoDefVreg(ssaOpnd); + } else { + ASSERT(regForRecreate->IsSSAForm(), "Opnd is not in ssa form"); + } + RegOperand &newReg = cgFunc->GetOrCreateVirtualRegisterOperand(*regForRecreate); + + DUInsnInfo *defInfo = ssaVersion->GetDefInsnInfo(); + Insn *defInsn = defInfo != nullptr ? defInfo->GetInsn() : nullptr; + /* + * case1 : both def/use + * case2 : inline-asm (do not do aggressive optimization) "0" + * case3 : cc flag operand + */ + if (defInsn != nullptr) { + /* case 1 */ + uint32 defUseIdx = defInsn->GetBothDefUseOpnd(); + if (defUseIdx != kInsnMaxOpnd) { + if (defInfo->GetOperands().count(defUseIdx) > 0) { + CHECK_FATAL(defInfo->GetOperands()[defUseIdx] == 1, "multiple definiation"); + Operand &preOpnd = defInsn->GetOperand(defUseIdx); + ASSERT(preOpnd.IsRegister(), "unexpect operand type"); + newReg.SetRegisterNumber(RecursiveBothDU(static_cast(preOpnd))); + } + } + /* case 2 */ + if (defInsn->GetMachineOpcode() == MOP_asm) { + auto &inputList = static_cast(defInsn->GetOperand(kAsmInputListOpnd)); + VRegVersion *lastVersion = nullptr; + for (auto &inputReg : inputList.GetOperands()) { + lastVersion = GetSSAInfo()->FindSSAVersion(inputReg->GetRegisterNumber()); + if (lastVersion != nullptr && lastVersion->GetOriginalRegNO() == ssaVersion->GetOriginalRegNO()) { + break; + } + lastVersion = nullptr; + } + if (lastVersion != nullptr) { + newReg.SetRegisterNumber(lastVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } else { + const MapleMap& bindingMap = defInsn->GetRegBinding(); + auto pairIt = bindingMap.find(ssaVersion->GetOriginalRegNO()); + if (pairIt != bindingMap.end()) { + newReg.SetRegisterNumber(pairIt->second); + } + } + } + /* case 3 */ + if (ssaVersion->GetOriginalRegNO() == kRFLAG) { + newReg.SetRegisterNumber(kRFLAG); + } + } else { + newReg.SetRegisterNumber(ssaVersion->GetOriginalRegNO()); + } + MaintainRematInfo(newReg, ssaOpnd, true); + newReg.SetOpndOutOfSSAForm(); + return newReg; +} + +void AArch64PhiEliminate::AppendMovAfterLastVregDef(BB &bb, Insn &movInsn) const { + Insn *posInsn = nullptr; + bool isPosPhi = false; + FOR_BB_INSNS_REV(insn, &bb) { + if (insn->IsPhi()) { + posInsn = insn; + isPosPhi = true; + break; + } + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->IsBranch()) { + posInsn = insn; + continue; + } + break; + } + CHECK_FATAL(posInsn != nullptr, "insert mov for phi failed"); + if (isPosPhi) { + bb.InsertInsnAfter(*posInsn, movInsn); + } else { + bb.InsertInsnBefore(*posInsn, movInsn); + } +} + +/* copy remat info */ +void AArch64PhiEliminate::MaintainRematInfo(RegOperand &destOpnd, RegOperand &fromOpnd, bool isCopy) { + if (CGOptions::GetRematLevel() > 0 && isCopy) { + if (fromOpnd.IsSSAForm()) { + VRegVersion *fromSSAVersion = GetSSAInfo()->FindSSAVersion(fromOpnd.GetRegisterNumber()); + ASSERT(fromSSAVersion != nullptr, "fromSSAVersion should not be nullptr"); + regno_t rematRegNO = fromSSAVersion->GetOriginalRegNO(); + MIRPreg *fPreg = static_cast(cgFunc)->GetPseudoRegFromVirtualRegNO(rematRegNO); + if (fPreg != nullptr) { + PregIdx fPregIdx = cgFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno( + static_cast(fPreg->GetPregNo())); + RecordRematInfo(destOpnd.GetRegisterNumber(), fPregIdx); + } + } else { + regno_t rematRegNO = fromOpnd.GetRegisterNumber(); + PregIdx fPreg = FindRematInfo(rematRegNO); + if (fPreg > 0) { + RecordRematInfo(destOpnd.GetRegisterNumber(), fPreg); + } + } + } +} + +void AArch64PhiEliminate::ReCreateRegOperand(Insn &insn) { + auto opndNum = static_cast(insn.GetOperandSize()); + for (int i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + A64OperandPhiElmVisitor a64OpndPhiElmVisitor(this, insn, i); + opnd.Accept(a64OpndPhiElmVisitor); + } +} + +void A64OperandPhiElmVisitor::Visit(RegOperand *v) { + if (v->IsSSAForm()) { + ASSERT(v->GetRegisterNumber() != kRFLAG, "both condi and reg"); + insn->SetOperand(idx, a64PhiEliminator->GetCGVirtualOpearnd(*v, *insn)); + } +} + +void A64OperandPhiElmVisitor::Visit(ListOperand *v) { + std::list tempRegStore; + auto& opndList = v->GetOperands(); + + while (!opndList.empty()) { + auto *regOpnd = opndList.front(); + opndList.pop_front(); + + if (regOpnd->IsSSAForm()) { + tempRegStore.push_back(&a64PhiEliminator->GetCGVirtualOpearnd(*regOpnd, *insn)); + } else { + tempRegStore.push_back(regOpnd); + } + } + + ASSERT(v->GetOperands().empty(), "need to clean list"); + v->GetOperands().assign(tempRegStore.begin(), tempRegStore.end()); +} + +void A64OperandPhiElmVisitor::Visit(MemOperand *a64MemOpnd) { + RegOperand *baseRegOpnd = a64MemOpnd->GetBaseRegister(); + RegOperand *indexRegOpnd = a64MemOpnd->GetIndexRegister(); + if ((baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) || + (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm())) { + if (baseRegOpnd != nullptr && baseRegOpnd->IsSSAForm()) { + a64MemOpnd->SetBaseRegister(a64PhiEliminator->GetCGVirtualOpearnd(*baseRegOpnd, *insn)); + } + if (indexRegOpnd != nullptr && indexRegOpnd->IsSSAForm()) { + a64MemOpnd->SetIndexRegister(a64PhiEliminator->GetCGVirtualOpearnd(*indexRegOpnd, *insn)); + } + } +} +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5918ed14881e9bb0e086ce04f3ce50d2c396b0ab --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_proepilog.cpp @@ -0,0 +1,1192 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_proepilog.h" +#include "aarch64_cg.h" +#include "cg_option.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +namespace { + +constexpr int32 kSoeChckOffset = 8192; + +enum RegsPushPop : uint8 { + kRegsPushOp, + kRegsPopOp +}; + +enum PushPopType : uint8 { + kPushPopSingle = 0, + kPushPopPair = 1 +}; + +MOperator pushPopOps[kRegsPopOp + 1][kRegTyFloat + 1][kPushPopPair + 1] = { + { /* push */ + { 0 }, /* undef */ + { /* kRegTyInt */ + MOP_xstr, /* single */ + MOP_xstp, /* pair */ + }, + { /* kRegTyFloat */ + MOP_dstr, /* single */ + MOP_dstp, /* pair */ + }, + }, + { /* pop */ + { 0 }, /* undef */ + { /* kRegTyInt */ + MOP_xldr, /* single */ + MOP_xldp, /* pair */ + }, + { /* kRegTyFloat */ + MOP_dldr, /* single */ + MOP_dldp, /* pair */ + }, + } +}; + +inline void AppendInstructionTo(Insn &insn, CGFunc &func) { + func.GetCurBB()->AppendInsn(insn); +} +} + +bool AArch64GenProEpilog::NeedProEpilog() { + if (cgFunc.GetMirModule().GetSrcLang() != kSrcLangC) { + return true; + } else if (cgFunc.GetFunction().GetAttr(FUNCATTR_varargs) || cgFunc.HasVLAOrAlloca()) { + return true; + } + bool funcHasCalls = false; + /* note that tailcall insn is not a call */ + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (insn->IsMachineInstruction() && insn->IsCall()) { + funcHasCalls = true; + } + } + } + auto &aarchCGFunc = static_cast(cgFunc); + const MapleVector ®sToRestore = (aarchCGFunc.GetProEpilogSavedRegs().empty()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + size_t calleeSavedRegSize = kTwoRegister; + CHECK_FATAL(regsToRestore.size() >= calleeSavedRegSize, "Forgot FP and LR ?"); + if (funcHasCalls || regsToRestore.size() > calleeSavedRegSize || aarchCGFunc.HasStackLoadStore() || + static_cast(cgFunc.GetMemlayout())->GetSizeOfLocals() > 0 || + cgFunc.GetFunction().GetAttr(FUNCATTR_callersensitive)) { + return true; + } + return false; +} +MemOperand *AArch64GenProEpilog::GetDownStack() { + auto &aarchCGFunc = static_cast(cgFunc); + uint64 vArea = 0; + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + if (ml->GetSizeOfGRSaveArea() > 0) { + vArea += RoundUp(ml->GetSizeOfGRSaveArea(), kAarch64StackPtrAlignment); + } + if (ml->GetSizeOfVRSaveArea() > 0) { + vArea += RoundUp(ml->GetSizeOfVRSaveArea(), kAarch64StackPtrAlignment); + } + } + + int32 stkSize = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + if (useFP) { + stkSize -= static_cast(static_cast(cgFunc.GetMemlayout())->SizeOfArgsToStackPass()); + } + int32 memSize = (stkSize - kOffset8MemPos) - static_cast(vArea); + MemOperand *downStk = aarchCGFunc.CreateStackMemOpnd(stackBaseReg, memSize, GetPointerSize() * kBitsPerByte); + if (downStk->GetMemVaryType() == kNotVary && + aarchCGFunc.IsImmediateOffsetOutOfRange(*downStk, k64BitSize)) { + downStk = &aarchCGFunc.SplitOffsetWithAddInstruction(*downStk, k64BitSize, R10); + } + return downStk; +} + +void AArch64GenProEpilog::GenStackGuard() { + auto &aarchCGFunc = static_cast(cgFunc); + aarchCGFunc.GetDummyBB()->ClearInsns(); + cgFunc.SetCurBB(*aarchCGFunc.GetDummyBB()); + + MIRSymbol *stkGuardSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_guard"))); + StImmOperand &stOpnd = aarchCGFunc.CreateStImmOperand(*stkGuardSym, 0, 0); + RegOperand &stAddrOpnd = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + aarchCGFunc.SelectAddrof(stAddrOpnd, stOpnd); + + MemOperand *guardMemOp = + aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOi, GetPointerSize() * kBitsPerByte, + stAddrOpnd, nullptr, &aarchCGFunc.GetOrCreateOfstOpnd(0, k32BitSize), stkGuardSym); + MOperator mOp = aarchCGFunc.PickLdInsn(k64BitSize, PTY_u64); + Insn &insn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, stAddrOpnd, *guardMemOp); + insn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(insn); +} + +void AArch64GenProEpilog::AddStackGuard(BB &bb) { + if (!stackProtect) { + return; + } + BB *formerCurBB = cgFunc.GetCurBB(); + auto &aarchCGFunc = static_cast(cgFunc); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(true); + GenStackGuard(); + RegOperand &stAddrOpnd = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + auto mOp = aarchCGFunc.PickStInsn(GetPointerSize() * kBitsPerByte, PTY_u64); + Insn &tmpInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, stAddrOpnd, *GetDownStack()); + tmpInsn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(tmpInsn); + + bb.InsertAtBeginning(*aarchCGFunc.GetDummyBB()); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(false); + cgFunc.SetCurBB(*formerCurBB); +} + +BB &AArch64GenProEpilog::GenStackGuardCheckInsn(BB &bb) { + if (!stackProtect) { + return bb; + } + + BB *formerCurBB = cgFunc.GetCurBB(); + auto &aarchCGFunc = static_cast(cgFunc); + GenStackGuard(); + RegOperand &stAddrOpnd = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, GetPointerSize() * kBitsPerByte, kRegTyInt); + RegOperand &checkOp = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R10, GetPointerSize() * kBitsPerByte, kRegTyInt); + auto mOp = aarchCGFunc.PickLdInsn(GetPointerSize() * kBitsPerByte, PTY_u64); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, checkOp, *GetDownStack()); + newInsn.SetDoNotRemove(true); + cgFunc.GetCurBB()->AppendInsn(newInsn); + + cgFunc.SelectBxor(stAddrOpnd, stAddrOpnd, checkOp, PTY_u64); + LabelIdx failLable = aarchCGFunc.CreateLabel(); + aarchCGFunc.SelectCondGoto(aarchCGFunc.GetOrCreateLabelOperand(failLable), OP_brtrue, OP_ne, + stAddrOpnd, aarchCGFunc.CreateImmOperand(0, k64BitSize, false), PTY_u64, false); + + bb.AppendBBInsns(*(cgFunc.GetCurBB())); + + LabelIdx nextBBLableIdx = aarchCGFunc.CreateLabel(); + BB *nextBB = cgFunc.CreateNewBB(nextBBLableIdx, bb.IsUnreachable(), BB::kBBFallthru, bb.GetFrequency()); + bb.AppendBB(*nextBB); + bb.PushBackSuccs(*nextBB); + nextBB->PushBackPreds(bb); + if (cgFunc.GetLastBB() == &bb) { + cgFunc.SetLastBB(*nextBB); + } + + BB *newBB = aarchCGFunc.CreateNewBB(failLable, bb.IsUnreachable(), bb.GetKind(), bb.GetFrequency()); + cgFunc.SetCurBB(*newBB); + MIRSymbol *failFunc = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(std::string("__stack_chk_fail"))); + ListOperand *srcOpnds = aarchCGFunc.CreateListOpnd(*cgFunc.GetFuncScopeAllocator()); + Insn &callInsn = aarchCGFunc.AppendCall(*failFunc, *srcOpnds); + callInsn.SetDoNotRemove(true); + nextBB->AppendBB(*newBB); + bb.PushBackSuccs(*newBB); + nextBB->PushBackSuccs(*newBB); + newBB->PushBackPreds(*nextBB); + newBB->PushBackPreds(bb); + + bb.SetKind(BB::kBBIf); + cgFunc.SetCurBB(*formerCurBB); + return *nextBB; +} + +MemOperand *AArch64GenProEpilog::SplitStpLdpOffsetForCalleeSavedWithAddInstruction(CGFunc &cgFunc, + const MemOperand &mo, uint32 bitLen, AArch64reg baseRegNum) { + auto &aarchCGFunc = static_cast(cgFunc); + CHECK_FATAL(mo.GetAddrMode() == MemOperand::kAddrModeBOi, "mode should be kAddrModeBOi"); + OfstOperand *ofstOp = mo.GetOffsetImmediate(); + int32 offsetVal = static_cast(ofstOp->GetOffsetValue()); + CHECK_FATAL(offsetVal > 0, "offsetVal should be greater than 0"); + CHECK_FATAL((static_cast(offsetVal) & 0x7) == 0, "(offsetVal & 0x7) should be equal to 0"); + /* + * Offset adjustment due to FP/SP has already been done + * in AArch64GenProEpilog::GeneratePushRegs() and AArch64GenProEpilog::GeneratePopRegs() + */ + RegOperand &br = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(baseRegNum, bitLen, kRegTyInt); + if (aarchCGFunc.GetSplitBaseOffset() == 0) { + aarchCGFunc.SetSplitBaseOffset(offsetVal); /* remember the offset; don't forget to clear it */ + ImmOperand &immAddEnd = aarchCGFunc.CreateImmOperand(offsetVal, k64BitSize, true); + RegOperand *origBaseReg = mo.GetBaseRegister(); + aarchCGFunc.SelectAdd(br, *origBaseReg, immAddEnd, PTY_i64); + } + offsetVal = offsetVal - aarchCGFunc.GetSplitBaseOffset(); + return &aarchCGFunc.CreateReplacementMemOperand(bitLen, br, offsetVal); +} + +void AArch64GenProEpilog::AppendInstructionPushPair(CGFunc &cgFunc, + AArch64reg reg0, AArch64reg reg1, RegType rty, int32 offset) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand *o2 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + + uint32 dataSize = GetPointerSize() * kBitsPerByte; + CHECK_FATAL(offset >= 0, "offset must >= 0"); + if (offset > kStpLdpImm64UpperBound) { + o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, *static_cast(o2), dataSize, R16); + } + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + std::string comment = "SAVE CALLEE REGISTER PAIR"; + pushInsn.SetComment(comment); + AppendInstructionTo(pushInsn, cgFunc); +} + +void AArch64GenProEpilog::AppendInstructionPushSingle(CGFunc &cgFunc, + AArch64reg reg, RegType rty, int32 offset) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopSingle]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, rty); + Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + + MemOperand *aarchMemO1 = static_cast(o1); + uint32 dataSize = GetPointerSize() * kBitsPerByte; + if (aarchMemO1->GetMemVaryType() == kNotVary && + aarchCGFunc.IsImmediateOffsetOutOfRange(*aarchMemO1, dataSize)) { + o1 = &aarchCGFunc.SplitOffsetWithAddInstruction(*aarchMemO1, dataSize, R16); + } + + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o1); + std::string comment = "SAVE CALLEE REGISTER"; + pushInsn.SetComment(comment); + AppendInstructionTo(pushInsn, cgFunc); +} + +Insn &AArch64GenProEpilog::AppendInstructionForAllocateOrDeallocateCallFrame(int64 argsToStkPassSize, + AArch64reg reg0, AArch64reg reg1, + RegType rty, bool isAllocate) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = isAllocate ? pushPopOps[kRegsPushOp][rty][kPushPopPair] : pushPopOps[kRegsPopOp][rty][kPushPopPair]; + uint8 size; + if (CGOptions::IsArm64ilp32()) { + size = k8ByteSize; + } else { + size = GetPointerSize(); + } + if (argsToStkPassSize <= kStrLdrImm64UpperBound - kOffset8MemPos) { + mOp = isAllocate ? pushPopOps[kRegsPushOp][rty][kPushPopSingle] : pushPopOps[kRegsPopOp][rty][kPushPopSingle]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); + MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), size * kBitsPerByte); + Insn &insn1 = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o2); + AppendInstructionTo(insn1, cgFunc); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize + size), + size * kBitsPerByte); + Insn &insn2 = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o1, *o2); + AppendInstructionTo(insn2, cgFunc); + return insn2; + } else { + RegOperand &oo = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(R9, size * kBitsPerByte, kRegTyInt); + ImmOperand &io1 = aarchCGFunc.CreateImmOperand(argsToStkPassSize, k64BitSize, true); + aarchCGFunc.SelectCopyImm(oo, io1, PTY_i64); + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, size * kBitsPerByte, rty); + RegOperand &rsp = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, size * kBitsPerByte, kRegTyInt); + MemOperand *mo = aarchCGFunc.CreateMemOperand( + MemOperand::kAddrModeBOrX, size * kBitsPerByte, rsp, oo, 0); + Insn &insn1 = cgFunc.GetInsnBuilder()->BuildInsn(isAllocate ? MOP_xstr : MOP_xldr, o0, *mo); + AppendInstructionTo(insn1, cgFunc); + ImmOperand &io2 = aarchCGFunc.CreateImmOperand(size, k64BitSize, true); + aarchCGFunc.SelectAdd(oo, oo, io2, PTY_i64); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, size * kBitsPerByte, rty); + mo = aarchCGFunc.CreateMemOperand(MemOperand::kAddrModeBOrX, + size * kBitsPerByte, rsp, oo, 0); + Insn &insn2 = cgFunc.GetInsnBuilder()->BuildInsn(isAllocate ? MOP_xstr : MOP_xldr, o1, *mo); + AppendInstructionTo(insn2, cgFunc); + return insn2; + } +} + +Insn &AArch64GenProEpilog::CreateAndAppendInstructionForAllocateCallFrame(int64 argsToStkPassSize, + AArch64reg reg0, AArch64reg reg1, + RegType rty) { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + Insn *allocInsn = nullptr; + if (argsToStkPassSize > kStpLdpImm64UpperBound) { + allocInsn = &AppendInstructionForAllocateOrDeallocateCallFrame(argsToStkPassSize, reg0, reg1, rty, true); + } else { + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), + GetPointerSize() * kBitsPerByte); + allocInsn = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + AppendInstructionTo(*allocInsn, cgFunc); + } + if (currCG->InstrumentWithDebugTraceCall()) { + aarchCGFunc.AppendCall(*currCG->GetDebugTraceEnterFunction()); + } + return *allocInsn; +} + +void AArch64GenProEpilog::AppendInstructionAllocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty) { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + if (currCG->GenerateVerboseCG()) { + auto &comment = cgFunc.GetOpndBuilder()->CreateComment("allocate activation frame"); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildCommentInsn(comment)); + } + + Insn *ipoint = nullptr; + /* + * stackFrameSize includes the size of args to stack-pass + * if a function has neither VLA nor alloca. + */ + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + /* + * ldp/stp's imm should be within -512 and 504; + * if stp's imm > 512, we fall back to the stp-sub version + */ + bool useStpSub = false; + int64 offset = 0; + if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { + /* + * stack_frame_size == size of formal parameters + callee-saved (including FP/RL) + * + size of local vars + * + size of actuals + * (when passing more than 8 args, its caller's responsibility to + * allocate space for it. size of actuals represent largest such size in the function. + */ + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + } else { + if (stackFrameSize > kStpLdpImm64UpperBound) { + useStpSub = true; + offset = kOffset16MemPos; + stackFrameSize -= offset; + } else { + offset = stackFrameSize; + } + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(static_cast(-offset), GetPointerSize() * kBitsPerByte); + ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + AppendInstructionTo(*ipoint, cgFunc); + if (currCG->InstrumentWithDebugTraceCall()) { + aarchCGFunc.AppendCall(*currCG->GetDebugTraceEnterFunction()); + } + } + ipoint->SetStackDef(true); + + if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { + CHECK_FATAL(!useStpSub, "Invalid assumption"); + ipoint = &CreateAndAppendInstructionForAllocateCallFrame(argsToStkPassSize, reg0, reg1, rty); + } + + CHECK_FATAL(ipoint != nullptr, "ipoint should not be nullptr at this point"); + if (useStpSub) { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + aarchCGFunc.SetUsedStpSubPairForCallFrameAllocation(true); + ipoint->SetStackDef(true); + } +} + +void AArch64GenProEpilog::AppendInstructionAllocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty) { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + if (currCG->GenerateVerboseCG()) { + auto &comment = cgFunc.GetOpndBuilder()->CreateComment("allocate activation frame for debugging"); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildCommentInsn(comment)); + } + + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + + Insn *ipoint = nullptr; + + if (argsToStkPassSize > 0) { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + ipoint->SetStackDef(true); + ipoint = &CreateAndAppendInstructionForAllocateCallFrame(argsToStkPassSize, reg0, reg1, rty); + CHECK_FATAL(ipoint != nullptr, "ipoint should not be nullptr at this point"); + } else { + bool useStpSub = false; + + if (stackFrameSize > kStpLdpImm64UpperBound) { + useStpSub = true; + RegOperand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + ImmOperand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + ipoint = cgFunc.GetCurBB()->GetLastInsn(); + ipoint->SetStackDef(true); + } else { + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(-stackFrameSize, GetPointerSize() * kBitsPerByte); + ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + AppendInstructionTo(*ipoint, cgFunc); + ipoint->SetStackDef(true); + } + + if (useStpSub) { + MOperator mOp = pushPopOps[kRegsPushOp][rty][kPushPopPair]; + RegOperand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + RegOperand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + MemOperand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, 0, GetPointerSize() * kBitsPerByte); + ipoint = &cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + AppendInstructionTo(*ipoint, cgFunc); + } + if (currCG->InstrumentWithDebugTraceCall()) { + aarchCGFunc.AppendCall(*currCG->GetDebugTraceEnterFunction()); + } + } +} + +/* + * From AArch64 Reference Manual + * C1.3.3 Load/Store Addressing Mode + * ... + * When stack alignment checking is enabled by system software and + * the base register is the SP, the current stack pointer must be + * initially quadword aligned, that is aligned to 16 bytes. Misalignment + * generates a Stack Alignment fault. The offset does not have to + * be a multiple of 16 bytes unless the specific Load/Store instruction + * requires this. SP cannot be used as a register offset. + */ +void AArch64GenProEpilog::GeneratePushRegs() { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + const MapleVector ®sToSave = (aarchCGFunc.GetProEpilogSavedRegs().empty()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + + CHECK_FATAL(!regsToSave.empty(), "FP/LR not added to callee-saved list?"); + + AArch64reg intRegFirstHalf = kRinvalid; + AArch64reg fpRegFirstHalf = kRinvalid; + + if (currCG->GenerateVerboseCG()) { + auto &comment = cgFunc.GetOpndBuilder()->CreateComment("save callee-saved registers"); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildCommentInsn(comment)); + } + + /* + * Even if we don't use RFP, since we push a pair of registers in one instruction + * and the stack needs be aligned on a 16-byte boundary, push RFP as well if function has a call + * Make sure this is reflected when computing callee_saved_regs.size() + */ + if (!currCG->GenerateDebugFriendlyCode()) { + AppendInstructionAllocateCallFrame(R29, RLR, kRegTyInt); + } else { + AppendInstructionAllocateCallFrameDebug(R29, RLR, kRegTyInt); + } + + if (useFP) { + if (currCG->GenerateVerboseCG()) { + auto &comment = cgFunc.GetOpndBuilder()->CreateComment("copy SP to FP"); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildCommentInsn(comment)); + } + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; + if ((argsToStkPassSize > 0) || isLmbc) { + Operand *immOpnd; + if (isLmbc) { + int32 size = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + immOpnd = &aarchCGFunc.CreateImmOperand(size, k32BitSize, true); + } else { + immOpnd = &aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); + } + if (!isLmbc || cgFunc.SeenFP() || cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + aarchCGFunc.SelectAdd(fpOpnd, spOpnd, *immOpnd, PTY_u64); + } + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + } else { + aarchCGFunc.SelectCopy(fpOpnd, PTY_u64, spOpnd, PTY_u64); + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + } + } + + MapleVector::const_iterator it = regsToSave.begin(); + /* skip the first two registers */ + CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); + ++it; + CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); + ++it; + + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 offset; + int32 tmp; + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + tmp = static_cast(memLayout->RealStackFrameSize() - + /* FP/LR */ + (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen))); + offset = tmp - static_cast(memLayout->GetSizeOfLocals()); + /* SizeOfArgsToStackPass not deducted since + AdjustmentStackPointer() is not called for lmbc */ + } else { + tmp = static_cast(memLayout->RealStackFrameSize() - + /* FP/LR */ + (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen))); + offset = tmp - static_cast(memLayout->SizeOfArgsToStackPass()); + } + + if (cgFunc.GetCG()->IsStackProtectorStrong() || cgFunc.GetCG()->IsStackProtectorAll()) { + offset -= kAarch64StackPtrAlignmentInt; + } + + if (cgFunc.GetMirModule().IsCModule() && + cgFunc.GetFunction().GetAttr(FUNCATTR_varargs) && + cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + /* GR/VR save areas are above the callee save area */ + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + auto saveareasize = static_cast(RoundUp(ml->GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + + RoundUp(ml->GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize)); + offset -= saveareasize; + } + + for (; it != regsToSave.end(); ++it) { + AArch64reg reg = *it; + CHECK_FATAL(reg != RFP, "stray RFP in callee_saved_list?"); + CHECK_FATAL(reg != RLR, "stray RLR in callee_saved_list?"); + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; + if (firstHalf == kRinvalid) { + /* remember it */ + firstHalf = reg; + } else { + AppendInstructionPushPair(cgFunc, firstHalf, reg, regType, offset); + GetNextOffsetCalleeSaved(offset); + firstHalf = kRinvalid; + } + } + + if (intRegFirstHalf != kRinvalid) { + AppendInstructionPushSingle(cgFunc, intRegFirstHalf, kRegTyInt, offset); + GetNextOffsetCalleeSaved(offset); + } + + if (fpRegFirstHalf != kRinvalid) { + AppendInstructionPushSingle(cgFunc, fpRegFirstHalf, kRegTyFloat, offset); + GetNextOffsetCalleeSaved(offset); + } + + /* + * in case we split stp/ldp instructions, + * so that we generate a load-into-base-register instruction + * for pop pairs as well. + */ + aarchCGFunc.SetSplitBaseOffset(0); +} + +void AArch64GenProEpilog::GeneratePushUnnamedVarargRegs() { + auto &aarchCGFunc = static_cast(cgFunc); + uint32 offset; + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + AArch64MemLayout *memlayout = static_cast(cgFunc.GetMemlayout()); + uint8 size; + if (CGOptions::IsArm64ilp32()) { + size = k8ByteSize; + } else { + size = GetPointerSize(); + } + uint32 dataSizeBits = size * kBitsPerByte; + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offset = static_cast(memlayout->GetGRSaveAreaBaseLoc()); /* SP reference */ + } else { + offset = static_cast(memlayout->GetGRSaveAreaBaseLoc()) + + memlayout->SizeOfArgsToStackPass(); + } + if (memlayout->GetSizeOfGRSaveArea() % kAarch64StackPtrAlignment) { + offset += size; /* End of area should be aligned. Hole between VR and GR area */ + } + uint32 startRegno = k8BitSize - (memlayout->GetSizeOfGRSaveArea() / size); + ASSERT(startRegno <= k8BitSize, "Incorrect starting GR regno for GR Save Area"); + for (uint32 i = startRegno + static_cast(R0); i < static_cast(R8); i++) { + uint32 tmpOffset = 0; + if (CGOptions::IsBigEndian()) { + if ((dataSizeBits >> 3) < 8) { + tmpOffset += 8U - (dataSizeBits >> 3); + } + } + Operand *stackLoc = &aarchCGFunc.CreateStkTopOpnd(offset + tmpOffset, dataSizeBits); + RegOperand ® = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyInt); + Insn &inst = cgFunc.GetInsnBuilder()->BuildInsn(aarchCGFunc.PickStInsn(dataSizeBits, PTY_i64), reg, *stackLoc); + cgFunc.GetCurBB()->AppendInsn(inst); + offset += size; + } + if (!CGOptions::UseGeneralRegOnly()) { + if (cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + offset = static_cast(memlayout->GetVRSaveAreaBaseLoc()); /* SP reference */ + } else { + offset = static_cast(memlayout->GetVRSaveAreaBaseLoc()) + + memlayout->SizeOfArgsToStackPass(); + } + startRegno = k8BitSize - (memlayout->GetSizeOfVRSaveArea() / (size * k2BitSize)); + ASSERT(startRegno <= k8BitSize, "Incorrect starting GR regno for VR Save Area"); + for (uint32 i = startRegno + static_cast(V0); i < static_cast(V8); i++) { + uint32 tmpOffset = 0; + if (CGOptions::IsBigEndian()) { + if ((dataSizeBits >> 3) < 16) { + tmpOffset += 16U - (dataSizeBits >> 3); + } + } + Operand *stackLoc = &aarchCGFunc.CreateStkTopOpnd(offset + tmpOffset, dataSizeBits); + RegOperand ® = + aarchCGFunc.GetOrCreatePhysicalRegisterOperand(static_cast(i), k64BitSize, kRegTyFloat); + Insn &inst = cgFunc.GetInsnBuilder()->BuildInsn(aarchCGFunc.PickStInsn(dataSizeBits, PTY_f64), reg, *stackLoc); + cgFunc.GetCurBB()->AppendInsn(inst); + offset += (size * k2BitSize); + } + } + } +} + +void AArch64GenProEpilog::AppendInstructionStackCheck(AArch64reg reg, RegType rty, int32 offset) { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + /* sub x16, sp, #0x2000 */ + auto &x16Opnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, k64BitSize, rty); + auto &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, rty); + auto &imm1 = aarchCGFunc.CreateImmOperand(offset, k64BitSize, true); + aarchCGFunc.SelectSub(x16Opnd, spOpnd, imm1, PTY_u64); + + /* ldr wzr, [x16] */ + auto &wzr = cgFunc.GetZeroOpnd(k32BitSize); + auto &refX16 = aarchCGFunc.CreateMemOpnd(reg, 0, k64BitSize); + auto &soeInstr = cgFunc.GetInsnBuilder()->BuildInsn(MOP_wldr, wzr, refX16); + if (currCG->GenerateVerboseCG()) { + soeInstr.SetComment("soerror"); + } + soeInstr.SetDoNotRemove(true); + AppendInstructionTo(soeInstr, cgFunc); +} + +void AArch64GenProEpilog::GenerateProlog(BB &bb) { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + BB *formerCurBB = cgFunc.GetCurBB(); + aarchCGFunc.GetDummyBB()->ClearInsns(); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*aarchCGFunc.GetDummyBB()); + + bool hasProEpilogue = cgFunc.GetHasProEpilogue(); + if (!hasProEpilogue) { + return; + } + + // insert .loc for function + if (currCG->GetCGOptions().WithLoc() && (!currCG->GetMIRModule()->IsCModule())) { + MIRFunction *func = &cgFunc.GetFunction(); + MIRSymbol *fSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + if (currCG->GetCGOptions().WithSrc()) { + uint32 tempmaxsize = static_cast(currCG->GetMIRModule()->GetSrcFileInfo().size()); + uint32 endfilenum = currCG->GetMIRModule()->GetSrcFileInfo()[tempmaxsize - 1].second; + if (fSym->GetSrcPosition().FileNum() != 0 && fSym->GetSrcPosition().FileNum() <= endfilenum) { + Operand *o0 = cgFunc.CreateDbgImmOperand(fSym->GetSrcPosition().FileNum()); + int64_t lineNum = fSym->GetSrcPosition().LineNum(); + if (lineNum == 0) { + if (cgFunc.GetFunction().GetAttr(FUNCATTR_native)) { + lineNum = 0xffffe; + } else { + lineNum = 0xffffd; + } + } + Operand *o1 = cgFunc.CreateDbgImmOperand(lineNum); + Operand *o2 = cgFunc.CreateDbgImmOperand(fSym->GetSrcPosition().Column()); + Insn &loc = cgFunc.GetInsnBuilder()->BuildDbgInsn( + mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1).AddOpndChain(*o2); + cgFunc.GetCurBB()->AppendInsn(loc); + } + } else { + Operand *o0 = cgFunc.CreateDbgImmOperand(1); + Operand *o1 = cgFunc.CreateDbgImmOperand(fSym->GetSrcPosition().MplLineNum()); + Operand *o2 = cgFunc.CreateDbgImmOperand(0); + Insn &loc = cgFunc.GetInsnBuilder()->BuildDbgInsn( + mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1).AddOpndChain(*o2); + cgFunc.GetCurBB()->AppendInsn(loc); + } + } + + const MapleVector ®sToSave = (aarchCGFunc.GetProEpilogSavedRegs().empty()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + if (!regsToSave.empty()) { + /* + * Among other things, push the FP & LR pair. + * FP/LR are added to the callee-saved list in AllocateRegisters() + * We add them to the callee-saved list regardless of UseFP() being true/false. + * Activation Frame is allocated as part of pushing FP/LR pair + */ + GeneratePushRegs(); + } else { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + if (stackFrameSize > 0) { + if (currCG->GenerateVerboseCG()) { + auto &comment = cgFunc.GetOpndBuilder()->CreateComment("allocate activation frame"); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildCommentInsn(comment)); + } + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectSub(spOpnd, spOpnd, immOpnd, PTY_u64); + cgFunc.GetCurBB()->GetLastInsn()->SetStackDef(true); + } + if (currCG->GenerateVerboseCG()) { + auto &comment = cgFunc.GetOpndBuilder()->CreateComment("copy SP to FP"); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildCommentInsn(comment)); + } + if (useFP) { + Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + if ((argsToStkPassSize > 0) || isLmbc) { + Operand *immOpnd; + if (isLmbc) { + int32 size = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + immOpnd = &aarchCGFunc.CreateImmOperand(size, k32BitSize, true); + } else { + immOpnd = &aarchCGFunc.CreateImmOperand(argsToStkPassSize, k32BitSize, true); + } + aarchCGFunc.SelectAdd(fpOpnd, spOpnd, *immOpnd, PTY_u64); + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + } else { + aarchCGFunc.SelectCopy(fpOpnd, PTY_u64, spOpnd, PTY_u64); + cgFunc.GetCurBB()->GetLastInsn()->SetFrameDef(true); + } + } + } + GeneratePushUnnamedVarargRegs(); + if (currCG->DoCheckSOE()) { + AppendInstructionStackCheck(R16, kRegTyInt, kSoeChckOffset); + } + bb.InsertAtBeginning(*aarchCGFunc.GetDummyBB()); + cgFunc.SetCurBB(*formerCurBB); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(false); +} + +void AArch64GenProEpilog::GenerateRet(BB &bb) { + bb.AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xret)); +} + +/* + * If exitBB made the TailcallOpt(replace blr/bl with br/b), return true, we don't create ret insn. + * Otherwise, return false, create the ret insn. + */ +bool AArch64GenProEpilog::TestPredsOfRetBB(const BB &exitBB) { + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + if (cgFunc.GetMirModule().IsCModule() && + (cgFunc.GetFunction().GetAttr(FUNCATTR_varargs) || + ml->GetSizeOfLocals() > 0 || cgFunc.HasVLAOrAlloca())) { + return false; + } + const Insn *lastInsn = exitBB.GetLastInsn(); + while (lastInsn != nullptr && (!lastInsn->IsMachineInstruction() || + AArch64isa::IsPseudoInstruction(lastInsn->GetMachineOpcode()))) { + lastInsn = lastInsn->GetPrev(); + } + bool isTailCall = lastInsn == nullptr ? false : lastInsn->IsTailCall(); + return isTailCall; +} + +void AArch64GenProEpilog::AppendInstructionPopSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int32 offset) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopSingle]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg, GetPointerSize() * kBitsPerByte, rty); + Operand *o1 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + MemOperand *aarchMemO1 = static_cast(o1); + uint32 dataSize = GetPointerSize() * kBitsPerByte; + if (aarchMemO1->GetMemVaryType() == kNotVary && aarchCGFunc.IsImmediateOffsetOutOfRange(*aarchMemO1, dataSize)) { + o1 = &aarchCGFunc.SplitOffsetWithAddInstruction(*aarchMemO1, dataSize, R16); + } + + Insn &popInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, *o1); + popInsn.SetComment("RESTORE"); + cgFunc.GetCurBB()->AppendInsn(popInsn); +} + +void AArch64GenProEpilog::AppendInstructionPopPair(CGFunc &cgFunc, + AArch64reg reg0, AArch64reg reg1, RegType rty, int32 offset) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + Operand *o2 = &aarchCGFunc.CreateStkTopOpnd(static_cast(offset), GetPointerSize() * kBitsPerByte); + + uint32 dataSize = GetPointerSize() * kBitsPerByte; + CHECK_FATAL(offset >= 0, "offset must >= 0"); + if (offset > kStpLdpImm64UpperBound) { + o2 = SplitStpLdpOffsetForCalleeSavedWithAddInstruction(cgFunc, + static_cast(*o2), dataSize, R16); + } + Insn &popInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + popInsn.SetComment("RESTORE RESTORE"); + cgFunc.GetCurBB()->AppendInsn(popInsn); +} + + +void AArch64GenProEpilog::AppendInstructionDeallocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + /* + * ldp/stp's imm should be within -512 and 504; + * if ldp's imm > 504, we fall back to the ldp-add version + */ + bool useLdpAdd = false; + int32 offset = 0; + + Operand *o2 = nullptr; + if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { + o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), GetPointerSize() * kBitsPerByte); + } else { + if (stackFrameSize > kStpLdpImm64UpperBound) { + useLdpAdd = true; + offset = kOffset16MemPos; + stackFrameSize -= offset; + } else { + offset = stackFrameSize; + } + o2 = &aarchCGFunc.CreateCallFrameOperand(offset, GetPointerSize() * kBitsPerByte); + } + + if (useLdpAdd) { + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } + + if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { + CHECK_FATAL(!useLdpAdd, "Invalid assumption"); + if (argsToStkPassSize > kStpLdpImm64UpperBound) { + (void)AppendInstructionForAllocateOrDeallocateCallFrame(argsToStkPassSize, reg0, reg1, rty, false); + } else { + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + } + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } else { + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + } +} + +void AArch64GenProEpilog::AppendInstructionDeallocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty) { + auto &aarchCGFunc = static_cast(cgFunc); + MOperator mOp = pushPopOps[kRegsPopOp][rty][kPushPopPair]; + Operand &o0 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg0, GetPointerSize() * kBitsPerByte, rty); + Operand &o1 = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(reg1, GetPointerSize() * kBitsPerByte, rty); + int32 stackFrameSize = static_cast( + static_cast(cgFunc.GetMemlayout())->RealStackFrameSize()); + int32 argsToStkPassSize = static_cast(cgFunc.GetMemlayout()->SizeOfArgsToStackPass()); + /* + * ldp/stp's imm should be within -512 and 504; + * if ldp's imm > 504, we fall back to the ldp-add version + */ + bool isLmbc = cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc; + if (cgFunc.HasVLAOrAlloca() || argsToStkPassSize == 0 || isLmbc) { + int32 lmbcOffset = 0; + if (!isLmbc) { + stackFrameSize -= argsToStkPassSize; + } else { + lmbcOffset = argsToStkPassSize; + } + if (stackFrameSize > kStpLdpImm64UpperBound || isLmbc) { + Operand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, (isLmbc ? lmbcOffset : 0), GetPointerSize() * kBitsPerByte); + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } else { + MemOperand &o2 = aarchCGFunc.CreateCallFrameOperand(stackFrameSize, GetPointerSize() * kBitsPerByte); + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + } + } else { + Operand *o2 = aarchCGFunc.CreateStackMemOpnd(RSP, static_cast(argsToStkPassSize), GetPointerSize() * kBitsPerByte); + if (argsToStkPassSize > kStpLdpImm64UpperBound) { + (void)AppendInstructionForAllocateOrDeallocateCallFrame(argsToStkPassSize, reg0, reg1, rty, false); + } else { + Insn &deallocInsn = cgFunc.GetInsnBuilder()->BuildInsn(mOp, o0, o1, *o2); + cgFunc.GetCurBB()->AppendInsn(deallocInsn); + } + + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } +} + +void AArch64GenProEpilog::GeneratePopRegs() { + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + + const MapleVector ®sToRestore = (aarchCGFunc.GetProEpilogSavedRegs().empty()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + + CHECK_FATAL(!regsToRestore.empty(), "FP/LR not added to callee-saved list?"); + + AArch64reg intRegFirstHalf = kRinvalid; + AArch64reg fpRegFirstHalf = kRinvalid; + + if (currCG->GenerateVerboseCG()) { + auto &comment = cgFunc.GetOpndBuilder()->CreateComment("restore callee-saved registers"); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildCommentInsn(comment)); + } + + MapleVector::const_iterator it = regsToRestore.begin(); + /* + * Even if we don't use FP, since we push a pair of registers + * in a single instruction (i.e., stp) and the stack needs be aligned + * on a 16-byte boundary, push FP as well if the function has a call. + * Make sure this is reflected when computing calleeSavedRegs.size() + * skip the first two registers + */ + CHECK_FATAL(*it == RFP, "The first callee saved reg is expected to be RFP"); + ++it; + CHECK_FATAL(*it == RLR, "The second callee saved reg is expected to be RLR"); + ++it; + + AArch64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int32 offset; + int32 tmp; + if (cgFunc.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + tmp = static_cast(memLayout->RealStackFrameSize() - + /* FP/LR */ + (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen))); + offset = tmp - static_cast(memLayout->GetSizeOfLocals()); + /* SizeOfArgsToStackPass not deducted since + AdjustmentStackPointer() is not called for lmbc */ + } else { + tmp = static_cast(static_cast(cgFunc.GetMemlayout())->RealStackFrameSize() - + /* for FP/LR */ + (aarchCGFunc.SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen))); + offset = tmp - static_cast(memLayout->SizeOfArgsToStackPass()); + } + + if (cgFunc.GetCG()->IsStackProtectorStrong() || cgFunc.GetCG()->IsStackProtectorAll()) { + offset -= kAarch64StackPtrAlignmentInt; + } + + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs) && + cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + /* GR/VR save areas are above the callee save area */ + AArch64MemLayout *ml = static_cast(cgFunc.GetMemlayout()); + auto saveareasize = static_cast(RoundUp(ml->GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + + RoundUp(ml->GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize)); + offset -= saveareasize; + } + + /* + * We are using a cleared dummy block; so insertPoint cannot be ret; + * see GenerateEpilog() + */ + for (; it != regsToRestore.end(); ++it) { + AArch64reg reg = *it; + CHECK_FATAL(reg != RFP, "stray RFP in callee_saved_list?"); + CHECK_FATAL(reg != RLR, "stray RLR in callee_saved_list?"); + + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; + if (firstHalf == kRinvalid) { + /* remember it */ + firstHalf = reg; + } else { + /* flush the pair */ + AppendInstructionPopPair(cgFunc, firstHalf, reg, regType, offset); + GetNextOffsetCalleeSaved(offset); + firstHalf = kRinvalid; + } + } + + if (intRegFirstHalf != kRinvalid) { + AppendInstructionPopSingle(cgFunc, intRegFirstHalf, kRegTyInt, offset); + GetNextOffsetCalleeSaved(offset); + } + + if (fpRegFirstHalf != kRinvalid) { + AppendInstructionPopSingle(cgFunc, fpRegFirstHalf, kRegTyFloat, offset); + GetNextOffsetCalleeSaved(offset); + } + + if (!currCG->GenerateDebugFriendlyCode()) { + AppendInstructionDeallocateCallFrame(R29, RLR, kRegTyInt); + } else { + AppendInstructionDeallocateCallFrameDebug(R29, RLR, kRegTyInt); + } + + /* + * in case we split stp/ldp instructions, + * so that we generate a load-into-base-register instruction + * for the next function, maybe? (seems not necessary, but...) + */ + aarchCGFunc.SetSplitBaseOffset(0); +} + +void AArch64GenProEpilog::AppendJump(const MIRSymbol &funcSymbol) { + auto &aarchCGFunc = static_cast(cgFunc); + Operand &targetOpnd = aarchCGFunc.GetOrCreateFuncNameOpnd(funcSymbol); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); +} + +void AArch64GenProEpilog::AppendBBtoEpilog(BB &epilogBB, BB &newBB) { + if (epilogBB.GetPreds().empty() && cgFunc.GetMirModule().IsCModule() && cgFunc.GetCG()->DoTailCall()) { + epilogBB.SetNeedRestoreCfi(false); + Insn &junk = cgFunc.GetInsnBuilder()->BuildInsn(MOP_pseudo_none); + epilogBB.AppendInsn(junk); + return; + } + FOR_BB_INSNS(insn, &newBB) { + insn->SetDoNotRemove(true); + } + Insn *lastInsn = epilogBB.GetLastInsn(); + while (lastInsn != nullptr && (!lastInsn->IsMachineInstruction() || + AArch64isa::IsPseudoInstruction(lastInsn->GetMachineOpcode()))) { + lastInsn = lastInsn->GetPrev(); + } + bool isTailCall = lastInsn == nullptr ? false : lastInsn->IsTailCall(); + if (isTailCall) { + Insn *retInsn = newBB.GetLastInsn(); + if (retInsn != nullptr && retInsn->GetMachineOpcode() == MOP_xret) { + newBB.RemoveInsn(*retInsn); + } + epilogBB.RemoveInsn(*lastInsn); + epilogBB.AppendBBInsns(newBB); + epilogBB.AppendInsn(*lastInsn); + } else { + epilogBB.AppendBBInsns(newBB); + } + epilogBB.SetNeedRestoreCfi(true);; +} + +void AArch64GenProEpilog::GenerateEpilog(BB &bb) { + if (!cgFunc.GetHasProEpilogue()) { + if (!bb.GetPreds().empty() && !TestPredsOfRetBB(bb)) { + GenerateRet(bb); + } + return; + } + /* generate stack protected instruction */ + BB &epilogBB = GenStackGuardCheckInsn(bb); + + auto &aarchCGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + BB *formerCurBB = cgFunc.GetCurBB(); + aarchCGFunc.GetDummyBB()->ClearInsns(); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*aarchCGFunc.GetDummyBB()); + + Operand &spOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + Operand &fpOpnd = aarchCGFunc.GetOrCreatePhysicalRegisterOperand(stackBaseReg, k64BitSize, kRegTyInt); + + if (cgFunc.HasVLAOrAlloca() && cgFunc.GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + aarchCGFunc.SelectCopy(spOpnd, PTY_u64, fpOpnd, PTY_u64); + } + + const MapleVector ®sToSave = (aarchCGFunc.GetProEpilogSavedRegs().empty()) ? + aarchCGFunc.GetCalleeSavedRegs() : aarchCGFunc.GetProEpilogSavedRegs(); + if (!regsToSave.empty()) { + GeneratePopRegs(); + } else { + auto stackFrameSize = static_cast(cgFunc.GetMemlayout())->RealStackFrameSize(); + if (stackFrameSize > 0) { + if (currCG->GenerateVerboseCG()) { + auto &comment = cgFunc.GetOpndBuilder()->CreateComment("pop up activation frame"); + cgFunc.GetCurBB()->AppendInsn(cgFunc.GetInsnBuilder()->BuildCommentInsn(comment)); + } + + if (cgFunc.HasVLAOrAlloca()) { + auto size = static_cast(cgFunc.GetMemlayout())->GetSegArgsToStkPass().GetSize(); + stackFrameSize = stackFrameSize < size ? 0 : stackFrameSize - size; + } + + if (stackFrameSize > 0) { + Operand &immOpnd = aarchCGFunc.CreateImmOperand(stackFrameSize, k32BitSize, true); + aarchCGFunc.SelectAdd(spOpnd, spOpnd, immOpnd, PTY_u64); + } + } + } + + if (currCG->InstrumentWithDebugTraceCall()) { + AppendJump(*(currCG->GetDebugTraceExitFunction())); + } + + GenerateRet(*(cgFunc.GetCurBB())); + AppendBBtoEpilog(epilogBB, *cgFunc.GetCurBB()); + if (cgFunc.GetCurBB()->GetHasCfi()) { + epilogBB.SetHasCfi(); + } + + cgFunc.SetCurBB(*formerCurBB); + aarchCGFunc.GetDummyBB()->SetIsProEpilog(false); +} + +void AArch64GenProEpilog::GenerateEpilogForCleanup(BB &bb) { + auto &aarchCGFunc = static_cast(cgFunc); + CHECK_FATAL(!cgFunc.GetExitBBsVec().empty(), "exit bb size is zero!"); + if (cgFunc.GetExitBB(0)->IsUnreachable()) { + /* if exitbb is unreachable then exitbb can not be generated */ + GenerateEpilog(bb); + } else if (aarchCGFunc.NeedCleanup()) { /* bl to the exit epilogue */ + LabelOperand &targetOpnd = aarchCGFunc.GetOrCreateLabelOperand(cgFunc.GetExitBB(0)->GetLabIdx()); + bb.AppendInsn(cgFunc.GetInsnBuilder()->BuildInsn(MOP_xuncond, targetOpnd)); + } +} + +void AArch64GenProEpilog::Run() { + CHECK_FATAL(cgFunc.GetFunction().GetBody()->GetFirst()->GetOpCode() == OP_label, + "The first statement should be a label"); + stackProtect = cgFunc.GetNeedStackProtect(); + cgFunc.SetHasProEpilogue(NeedProEpilog()); + if (cgFunc.GetHasProEpilogue()) { + AddStackGuard(*(cgFunc.GetFirstBB())); + } + + if (cgFunc.IsExitBBsVecEmpty()) { + if (cgFunc.GetCleanupBB() != nullptr && cgFunc.GetCleanupBB()->GetPrev() != nullptr) { + cgFunc.PushBackExitBBsVec(*cgFunc.GetCleanupBB()->GetPrev()); + } else { + cgFunc.PushBackExitBBsVec(*cgFunc.GetLastBB()->GetPrev()); + } + } + + GenerateProlog(*(cgFunc.GetPrologureBB())); + + for (auto *exitBB : cgFunc.GetExitBBsVec()) { + GenerateEpilog(*exitBB); + } + + if (cgFunc.GetFunction().IsJava()) { + GenerateEpilogForCleanup(*(cgFunc.GetCleanupBB())); + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c009e7d9a62223e9391e4356ece4c709fa846e1b --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_prop.cpp @@ -0,0 +1,2912 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "aarch64_isa.h" +#include "aarch64_cg.h" +#include "aarch64_reg_coalesce.h" +#include "aarch64_prop.h" + +namespace maplebe { + +#define PROP_DUMP CG_DEBUG_FUNC(cgFunc) + +bool MayOverflow(const ImmOperand &value1, const ImmOperand &value2, bool is64Bit, bool isAdd, bool isSigned) { + if (value1.GetVary() > 0 || value2.GetVary() > 0) { + return false; + } + int64 cstA = value1.GetValue(); + int64 cstB = value2.GetValue(); + if (isAdd) { + int64 res = static_cast(static_cast(cstA) + static_cast(cstB)); + if (!isSigned) { + return static_cast(res) < static_cast(cstA); + } + uint32 rightShiftNumToGetSignFlag = (is64Bit ? k64BitSize : k32BitSize) - 1; + return (static_cast(res) >> rightShiftNumToGetSignFlag != + static_cast(cstA) >> rightShiftNumToGetSignFlag) && + (static_cast(res) >> rightShiftNumToGetSignFlag != + static_cast(cstB) >> rightShiftNumToGetSignFlag); + } else { + /* sub */ + if (!isSigned) { + return cstA < cstB; + } + int64 res = static_cast(static_cast(cstA) - static_cast(cstB)); + uint32 rightShiftNumToGetSignFlag = (is64Bit ? k64BitSize : k32BitSize) - 1; + return (static_cast(cstA) >> rightShiftNumToGetSignFlag != + static_cast(cstB) >> rightShiftNumToGetSignFlag) && + (static_cast(res) >> rightShiftNumToGetSignFlag != + static_cast(cstA) >> rightShiftNumToGetSignFlag); + } +} + +bool AArch64Prop::IsInLimitCopyRange(VRegVersion *toBeReplaced) { + uint32 baseID = toBeReplaced->GetDefInsnInfo()->GetInsn()->GetId(); + MapleUnorderedMap &useList = toBeReplaced->GetAllUseInsns(); + for (auto it : useList) { + if (it.second->GetInsn()->GetId() - baseID > k16BitSize) { + return false; + } + } + return true; +} + +void AArch64Prop::CopyProp() { + PropOptimizeManager optManager; + optManager.Optimize(*cgFunc, GetSSAInfo(), GetRegll()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); +} + +void AArch64Prop::TargetProp(Insn &insn) { + A64ConstProp a64ConstProp(*memPool, *cgFunc, *GetSSAInfo(), insn); + a64ConstProp.DoOpt(); + A64StrLdrProp a64StrLdrProp(*memPool, *cgFunc, *GetSSAInfo(), insn, *GetDce()); + a64StrLdrProp.DoOpt(); +} + +void A64ConstProp::DoOpt() { + if (curInsn->GetMachineOpcode() == MOP_wmovri32 || curInsn->GetMachineOpcode() == MOP_xmovri64) { + Operand &destOpnd = curInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(destOpnd.IsRegister(), "must be reg operand"); + auto &destReg = static_cast(destOpnd); + if (destReg.IsSSAForm()) { + VRegVersion *destVersion = ssaInfo->FindSSAVersion(destReg.GetRegisterNumber()); + ASSERT(destVersion != nullptr, "find Version failed"); + Operand &constOpnd = curInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(constOpnd.IsImmediate(), "must be imm operand"); + auto &immOperand = static_cast(constOpnd); + bool isZero = immOperand.IsZero(); + for (auto useDUInfoIt : destVersion->GetAllUseInsns()) { + if (isZero) { + ZeroRegProp(*useDUInfoIt.second, *destVersion->GetSSAvRegOpnd()); + destVersion->CheckDeadUse(*useDUInfoIt.second->GetInsn()); + } + (void)ConstProp(*useDUInfoIt.second, immOperand); + } + } + } +} + +void A64ConstProp::ZeroRegProp(DUInsnInfo &useDUInfo, RegOperand &toReplaceReg) const { + auto *useInsn = useDUInfo.GetInsn(); + const InsnDesc *md = &AArch64CG::kMd[(useInsn->GetMachineOpcode())]; + /* special case */ + bool isSpecficCase = useInsn->GetMachineOpcode() == MOP_wbfirri5i5 || useInsn->GetMachineOpcode() == MOP_xbfirri6i6; + isSpecficCase = (useDUInfo.GetOperands().size() == 1) && + (useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd) && isSpecficCase; + if (useInsn->IsStore() || md->IsCondDef() || isSpecficCase) { + RegOperand &zeroOpnd = cgFunc->GetZeroOpnd(toReplaceReg.GetSize()); + for (auto &opndIt : as_const(useDUInfo.GetOperands())) { + if (useInsn->IsStore() && opndIt.first != 0) { + return; + } + Operand &opnd = useInsn->GetOperand(opndIt.first); + A64ReplaceRegOpndVisitor replaceRegOpndVisitor(*cgFunc, *useInsn, + opndIt.first, toReplaceReg, zeroOpnd); + opnd.Accept(replaceRegOpndVisitor); + useDUInfo.ClearDU(opndIt.first); + } + } +} + +MOperator A64ConstProp::GetReversalMOP(MOperator arithMop) { + switch (arithMop) { + case MOP_waddrri12: + return MOP_wsubrri12; + case MOP_xaddrri12: + return MOP_xsubrri12; + case MOP_xsubrri12: + return MOP_xaddrri12; + case MOP_wsubrri12: + return MOP_waddrri12; + default: + CHECK_FATAL(false, "NYI"); + break; + } + return MOP_undef; +} + +MOperator A64ConstProp::GetRegImmMOP(MOperator regregMop, bool withLeftShift) { + switch (regregMop) { + case MOP_xaddrrrs: + case MOP_xaddrrr: { + return withLeftShift ? MOP_xaddrri24 : MOP_xaddrri12; + } + case MOP_waddrrrs: + case MOP_waddrrr: { + return withLeftShift ? MOP_waddrri24 : MOP_waddrri12; + } + case MOP_xsubrrrs: + case MOP_xsubrrr: { + return withLeftShift ? MOP_xsubrri24 : MOP_xsubrri12; + } + case MOP_wsubrrrs: + case MOP_wsubrrr: { + return withLeftShift ? MOP_wsubrri24 : MOP_wsubrri12; + } + case MOP_xandrrr: + case MOP_xandrrrs: + return MOP_xandrri13; + case MOP_wandrrr: + case MOP_wandrrrs: + return MOP_wandrri12; + case MOP_xeorrrr: + case MOP_xeorrrrs: + return MOP_xeorrri13; + case MOP_weorrrr: + case MOP_weorrrrs: + return MOP_weorrri12; + case MOP_xiorrrr: + case MOP_xiorrrrs: + case MOP_xbfirri6i6: + return MOP_xiorrri13; + case MOP_wiorrrr: + case MOP_wiorrrrs: + case MOP_wbfirri5i5: + return MOP_wiorrri12; + case MOP_xmovrr: { + return MOP_xmovri64; + } + case MOP_wmovrr: { + return MOP_wmovri32; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + return MOP_undef; +} + +MOperator A64ConstProp::GetFoldMopAndVal(int64 &newVal, int64 constVal, const Insn &arithInsn) { + MOperator arithMop = arithInsn.GetMachineOpcode(); + MOperator newMop = MOP_undef; + switch (arithMop) { + case MOP_waddrrr: + case MOP_xaddrrr: { + newVal = constVal + constVal; + newMop = (arithMop == MOP_waddrrr) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + case MOP_waddrrrs: + case MOP_xaddrrrs: { + auto &shiftOpnd = static_cast(arithInsn.GetOperand(kInsnFourthOpnd)); + uint32 amount = shiftOpnd.GetShiftAmount(); + BitShiftOperand::ShiftOp sOp = shiftOpnd.GetShiftOp(); + switch (sOp) { + case BitShiftOperand::kLSL: { + newVal = constVal + static_cast((static_cast(constVal) << amount)); + break; + } + case BitShiftOperand::kLSR: { + newVal = constVal + (static_cast(constVal) >> amount); + break; + } + case BitShiftOperand::kASR: { + newVal = constVal + (constVal >> amount); + break; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + newMop = (arithMop == MOP_waddrrrs) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + case MOP_wsubrrr: + case MOP_xsubrrr: { + newVal = 0; + newMop = (arithMop == MOP_wsubrrr) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + case MOP_wsubrrrs: + case MOP_xsubrrrs: { + auto &shiftOpnd = static_cast(arithInsn.GetOperand(kInsnFourthOpnd)); + uint32 amount = shiftOpnd.GetShiftAmount(); + BitShiftOperand::ShiftOp sOp = shiftOpnd.GetShiftOp(); + switch (sOp) { + case BitShiftOperand::kLSL: { + newVal = constVal - static_cast((static_cast(constVal) << amount)); + break; + } + case BitShiftOperand::kLSR: { + newVal = constVal - (static_cast(constVal) >> amount); + break; + } + case BitShiftOperand::kASR: { + newVal = constVal - (constVal >> amount); + break; + } + default: + CHECK_FATAL(false, "NYI"); + break; + } + newMop = (arithMop == MOP_wsubrrrs) ? MOP_wmovri32 : MOP_xmovri64; + break; + } + default: + ASSERT(false, "this case is not supported currently"); + break; + } + return newMop; +} + +void A64ConstProp::ReplaceInsnAndUpdateSSA(Insn &oriInsn, Insn &newInsn) const { + ssaInfo->ReplaceInsn(oriInsn, newInsn); + oriInsn.GetBB()->ReplaceInsn(oriInsn, newInsn); + /* dump insn replacement here */ +} + +bool A64ConstProp::MovConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) const { + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + MOperator newMop = GetRegImmMOP(curMop, false); + Operand &destOpnd = useInsn->GetOperand(kInsnFirstOpnd); + if (constOpnd.IsSingleInstructionMovable(destOpnd.GetSize())) { + auto useOpndInfoIt = useDUInfo.GetOperands().cbegin(); + uint32 useOpndIdx = useOpndInfoIt->first; + ASSERT(useOpndIdx == kInsnSecondOpnd, "invalid instruction in ssa form"); + if (useOpndIdx == kInsnSecondOpnd) { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, destOpnd, constOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + } else { + ASSERT(false, "invalid instruction in ssa form"); + } + return false; +} + +bool A64ConstProp::ArithConstReplaceForOneOpnd(Insn &useInsn, DUInsnInfo &useDUInfo, + ImmOperand &constOpnd, ArithmeticType aT) { + MOperator curMop = useInsn.GetMachineOpcode(); + MOperator newMop = GetRegImmMOP(curMop, false); + auto useOpndInfoIt = useDUInfo.GetOperands().cbegin(); + uint32 useOpndIdx = useOpndInfoIt->first; + CHECK_FATAL(useOpndIdx == kInsnSecondOpnd || useOpndIdx == kInsnThirdOpnd, "check this insn"); + Insn *newInsn = nullptr; + if (static_cast(cgFunc)->IsOperandImmValid(newMop, &constOpnd, kInsnThirdOpnd)) { + if (useOpndIdx == kInsnThirdOpnd) { + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + newMop, useInsn.GetOperand(kInsnFirstOpnd), useInsn.GetOperand(kInsnSecondOpnd), constOpnd); + } else if (useOpndIdx == kInsnSecondOpnd && aT == kAArch64Add) { /* swap operand due to legality in aarch */ + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + newMop, useInsn.GetOperand(kInsnFirstOpnd), useInsn.GetOperand(kInsnThirdOpnd), constOpnd); + } + } + /* try aggressive opt in aarch64 add and sub */ + if (newInsn == nullptr && (aT == kAArch64Add || aT == kAArch64Sub)) { + auto *tempImm = static_cast(constOpnd.Clone(*constPropMp)); + /* try aarch64 imm shift mode */ + tempImm->SetValue(tempImm->GetValue() >> 12); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, tempImm, kInsnThirdOpnd) && + CGOptions::GetInstance().GetOptimizeLevel() < CGOptions::kLevel0) { + ASSERT(false, "NIY"); + } + auto *zeroImm = &(static_cast(cgFunc)-> + CreateImmOperand(0, constOpnd.GetSize(), true)); + /* value in immOpnd is signed */ + if (MayOverflow(*zeroImm, constOpnd, constOpnd.GetSize() == 64, false, true)) { + return false; + } + /* (constA - var) can not reversal to (var + (-constA)) */ + if (useOpndIdx == kInsnSecondOpnd && aT == kAArch64Sub) { + return false; + } + /* Addition and subtraction reversal */ + tempImm->SetValue(-constOpnd.GetValue()); + newMop = GetReversalMOP(newMop); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, tempImm, kInsnThirdOpnd)) { + auto *cgImm = static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())); + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn( + newMop, useInsn.GetOperand(kInsnFirstOpnd), useInsn.GetOperand(kInsnSecondOpnd), *cgImm); + if (useOpndIdx == kInsnSecondOpnd) { /* swap operand due to legality in aarch */ + newInsn->SetOperand(kInsnSecondOpnd, useInsn.GetOperand(kInsnThirdOpnd)); + } + } + } + if (newInsn == nullptr) { + return false; + } + ReplaceInsnAndUpdateSSA(useInsn, *newInsn); + return true; +} + +bool A64ConstProp::ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT) { + Insn *useInsn = useDUInfo.GetInsn(); + CHECK_FATAL(useInsn != nullptr, "get useInsn failed"); + if (useDUInfo.GetOperands().size() == 1) { + return ArithConstReplaceForOneOpnd(*useInsn, useDUInfo, constOpnd, aT); + } else if (useDUInfo.GetOperands().size() == 2) { + /* only support add & sub now */ + int64 newValue = 0; + MOperator newMop = GetFoldMopAndVal(newValue, constOpnd.GetValue(), *useInsn); + bool isSigned = (newValue < 0); + auto *tempImm = static_cast(constOpnd.Clone(*constPropMp)); + tempImm->SetValue(newValue); + tempImm->SetSigned(isSigned); + if (tempImm->IsSingleInstructionMovable()) { + auto *newImmOpnd = static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())); + auto &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, useInsn->GetOperand(kInsnFirstOpnd), *newImmOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } else { + CHECK_FATAL(false, "invalid immediate"); + } + } else { + ASSERT(false, "invalid instruction in ssa form"); + } + return false; +} + +bool A64ConstProp::ArithmeticConstFold(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd, + ArithmeticType aT) const { + Insn *useInsn = useDUInfo.GetInsn(); + if (useDUInfo.GetOperands().size() == 1) { + Operand &existedImm = useInsn->GetOperand(kInsnThirdOpnd); + ASSERT(existedImm.IsImmediate(), "must be"); + Operand &destOpnd = useInsn->GetOperand(kInsnFirstOpnd); + bool is64Bit = destOpnd.GetSize() == k64BitSize; + ImmOperand *foldConst = CanDoConstFold(constOpnd, static_cast(existedImm), aT, is64Bit); + if (foldConst != nullptr) { + MOperator newMop = is64Bit ? MOP_xmovri64 : MOP_wmovri32; + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, destOpnd, *foldConst); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + return false; +} + +bool A64ConstProp::ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) { + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + auto useOpndInfoIt = useDUInfo.GetOperands().cbegin(); + uint32 useOpndIdx = useOpndInfoIt->first; + if (useOpndIdx == kInsnThirdOpnd) { + auto &shiftBit = static_cast(useInsn->GetOperand(kInsnFourthOpnd)); + int64 val = constOpnd.GetValue(); + if (shiftBit.GetShiftOp() == BitShiftOperand::kLSL) { + val = val << shiftBit.GetShiftAmount(); + } else if (shiftBit.GetShiftOp() == BitShiftOperand::kLSR) { + val = val >> shiftBit.GetShiftAmount(); + } else if (shiftBit.GetShiftOp() == BitShiftOperand::kASR) { + val = static_cast((static_cast(val)) >> shiftBit.GetShiftAmount()); + } else { + CHECK_FATAL(false, "shift type is not defined"); + } + auto *newImm = static_cast(constOpnd.Clone(*constPropMp)); + newImm->SetValue(val); + MOperator newMop = GetRegImmMOP(curMop, false); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, newImm, kInsnThirdOpnd)) { + auto *cgNewImm = static_cast(constOpnd.Clone(*cgFunc->GetMemoryPool())); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, useInsn->GetOperand(kInsnFirstOpnd), useInsn->GetOperand(kInsnSecondOpnd), *cgNewImm); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + } + return false; +} + +bool A64ConstProp::ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd) { + MOperator curMop = useDUInfo.GetInsn()->GetMachineOpcode(); + switch (curMop) { + case MOP_xmovrr: + case MOP_wmovrr: { + return MovConstReplace(useDUInfo, constOpnd); + } + case MOP_xsubrrr: + case MOP_wsubrrr: { + return ArithmeticConstReplace(useDUInfo, constOpnd, kAArch64Sub); + } + case MOP_xaddrrr: + case MOP_waddrrr: { + return ArithmeticConstReplace(useDUInfo, constOpnd, kAArch64Add); + } + case MOP_xaddrri12: + case MOP_waddrri12: { + return ArithmeticConstFold(useDUInfo, constOpnd, kAArch64Add); + } + case MOP_xsubrri12: + case MOP_wsubrri12: { + return ArithmeticConstFold(useDUInfo, constOpnd, kAArch64Sub); + } + case MOP_xandrrr: + case MOP_wandrrr: + case MOP_xeorrrr: + case MOP_weorrrr: + case MOP_xiorrrr: + case MOP_wiorrrr: { + return ArithmeticConstReplace(useDUInfo, constOpnd, kAArch64Logic); + } + case MOP_xiorrrrs: + case MOP_wiorrrrs: + case MOP_xeorrrrs: + case MOP_weorrrrs: + case MOP_xandrrrs: + case MOP_wandrrrs: + case MOP_xaddrrrs: + case MOP_waddrrrs: + case MOP_wsubrrrs: + case MOP_xsubrrrs: { + return ShiftConstReplace(useDUInfo, constOpnd); + } + case MOP_wbfirri5i5: + case MOP_xbfirri6i6: { + return BitInsertReplace(useDUInfo, constOpnd); + } + default: + break; + } + return false; +} + +bool A64ConstProp::BitInsertReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd) const { + Insn *useInsn = useDUInfo.GetInsn(); + MOperator curMop = useInsn->GetMachineOpcode(); + if (useDUInfo.GetOperands().size() == 1) { + auto useOpndInfoIt = useDUInfo.GetOperands().cbegin(); + uint32 useOpndIdx = useOpndInfoIt->first; + if (useOpndIdx == kInsnSecondOpnd) { + auto &lsbOpnd = static_cast(useInsn->GetOperand(kInsnThirdOpnd)); + auto &widthOpnd = static_cast(useInsn->GetOperand(kInsnFourthOpnd)); + auto val = static_cast(constOpnd.GetValue()); + /* bfi width in the range [1 -64] */ + auto width = static_cast(widthOpnd.GetValue()); + /* bit number of the lsb of the destination bitfield */ + auto lsb = static_cast(lsbOpnd.GetValue()); + val = val & ((1U << width) - 1U); + if (__builtin_popcountl(val) == static_cast(width)) { + val = val << lsb; + MOperator newMop = GetRegImmMOP(curMop, false); + Operand &newOpnd = cgFunc->CreateImmOperand(PTY_i64, static_cast(val)); + if (static_cast(cgFunc)->IsOperandImmValid(newMop, &newOpnd, kInsnThirdOpnd)) { + RegOperand *defOpnd = useInsn->GetSSAImpDefOpnd(); + CHECK_FATAL(defOpnd, "check ssaInfo of the defOpnd"); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn( + newMop, *defOpnd, useInsn->GetOperand(kInsnFirstOpnd), newOpnd); + ReplaceInsnAndUpdateSSA(*useInsn, newInsn); + return true; + } + } + } + } + return false; +} + +ImmOperand *A64ConstProp::CanDoConstFold( + const ImmOperand &value1, const ImmOperand &value2, ArithmeticType aT, bool is64Bit) const { + auto *tempImm = static_cast(value1.Clone(*constPropMp)); + int64 newVal = 0; + bool isSigned = value1.IsSignedValue(); + if (value1.IsSignedValue() != value2.IsSignedValue()) { + isSigned = false; + } + if (MayOverflow(value1, value2, is64Bit, aT == kAArch64Add, isSigned)) { + return nullptr; + } + switch (aT) { + case kAArch64Add : { + newVal = value1.GetValue() + value2.GetValue(); + break; + } + case kAArch64Sub : { + newVal = value1.GetValue() - value2.GetValue(); + break; + } + default: + return nullptr; + } + if (!is64Bit && isSigned && (newVal > INT_MAX || newVal < INT_MIN)) { + return nullptr; + } + if (!is64Bit && !isSigned && (newVal > UINT_MAX || newVal < 0)) { + return nullptr; + } + if (newVal < 0) { + tempImm->SetSigned(); + } + tempImm->SetValue(newVal); + if (value2.GetVary() == kUnAdjustVary) { + tempImm->SetVary(kUnAdjustVary); + } + bool canBeMove = tempImm->IsSingleInstructionMovable(k64BitSize); + return canBeMove ? static_cast(tempImm->Clone(*cgFunc->GetMemoryPool())) : nullptr; +} + +void A64StrLdrProp::DoOpt() { + ASSERT(curInsn != nullptr, "not input insn"); + bool tryOptAgain = false; + do { + tryOptAgain = false; + Init(); + MemOperand *currMemOpnd = StrLdrPropPreCheck(*curInsn); + if (currMemOpnd != nullptr && memPropMode != kUndef) { + /* can be changed to recursive propagation */ + if (ReplaceMemOpnd(*currMemOpnd)) { + tryOptAgain = true; + } + replaceVersions.clear(); + } + } while (tryOptAgain); +} + +bool A64StrLdrProp::IsSameOpndsOfInsn(const Insn &insn1, const Insn &insn2, uint32 opndIdx) { + Operand &opnd = insn2.GetOperand(opndIdx); + Operand::OperandType opndType = opnd.GetKind(); + switch (opndType) { + case Operand::kOpdRegister: { + if (!static_cast(opnd).Equals(static_cast(insn1.GetOperand(opndIdx)))) { + return false; + } + break; + } + case Operand::kOpdImmediate: { + if (!static_cast(opnd).Equals(static_cast(insn1.GetOperand(opndIdx)))) { + return false; + } + break; + } + case Operand::kOpdExtend: { + if (!static_cast(opnd).Equals( + static_cast(insn1.GetOperand(opndIdx)))) { + return false; + } + break; + } + case Operand::kOpdShift: { + if (!static_cast(opnd).Equals( + static_cast(insn1.GetOperand(opndIdx)))) { + return false; + } + break; + } + default: + return false; + } + return true; +} + +bool A64StrLdrProp::IsPhiInsnValid(const Insn &phiInsn) { + std::vector validDefInsns; + auto &phiOpnd = static_cast(phiInsn.GetOperand(kInsnSecondOpnd)); + for (auto useIt : phiOpnd.GetOperands()) { + ASSERT(useIt.second != nullptr, "get phiUseOpnd failed"); + Insn *defPhiInsn = ssaInfo->GetDefInsn(*useIt.second); + /* check only one layer of phi */ + if (defPhiInsn == nullptr || defPhiInsn->IsPhi()) { + return false; + } + (void)validDefInsns.emplace_back(defPhiInsn); + } + if (validDefInsns.empty()) { + return false; + } + MOperator mOp = validDefInsns[0]->GetMachineOpcode(); + uint32 opndNum = validDefInsns[0]->GetOperandSize(); + for (uint32 insnIdx = 1; insnIdx < validDefInsns.size(); ++insnIdx) { + Insn *insn = validDefInsns[insnIdx]; + if (insn->GetMachineOpcode() != mOp) { + return false; + } + if (insn->GetOperandSize() != opndNum) { + return false; + } + for (uint i = 0 ; i < opndNum; ++i) { + if (insn->OpndIsDef(i)) { + continue; + } + if (!IsSameOpndsOfInsn(*validDefInsns[0], *insn, i)) { + return false; + } + } + } + defInsn = validDefInsns[0]; + return true; +} + +Insn *A64StrLdrProp::GetDefInsn(const RegOperand ®Opnd, std::vector &allUseInsns) { + Insn *insn = nullptr; + if (regOpnd.IsSSAForm()) { + VRegVersion *replacedV = ssaInfo->FindSSAVersion(regOpnd.GetRegisterNumber()); + if (replacedV->GetDefInsnInfo() != nullptr) { + for (auto it : replacedV->GetAllUseInsns()) { + (void)allUseInsns.emplace_back(it.second->GetInsn()); + } + insn = replacedV->GetDefInsnInfo()->GetInsn(); + } + } + return insn; +} + +bool A64StrLdrProp::ReplaceMemOpnd(const MemOperand &currMemOpnd) { + RegOperand *replacedReg = nullptr; + if (memPropMode == kPropBase) { + replacedReg = currMemOpnd.GetBaseRegister(); + } else { + Operand *offset = currMemOpnd.GetOffset(); + ASSERT(offset->IsRegister(), "must be"); + replacedReg = static_cast(offset); + } + CHECK_FATAL(replacedReg != nullptr, "check this insn"); + std::vector allUseInsns; + std::vector newMemOpnds; + defInsn = GetDefInsn(*replacedReg, allUseInsns); + if (defInsn == nullptr) { + return false; + } + for (auto useInsn : allUseInsns) { + MemOperand *oldMemOpnd = StrLdrPropPreCheck(*useInsn, memPropMode); + if (CheckSameReplace(*replacedReg, oldMemOpnd)) { + if (defInsn->IsPhi() && !IsPhiInsnValid(*defInsn)) { + return false; + } + MemOperand *newMemOpnd = SelectReplaceMem(*oldMemOpnd); + if (newMemOpnd != nullptr) { + uint32 opndIdx = GetMemOpndIdx(oldMemOpnd, *useInsn); + if (CheckNewMemOffset(*useInsn, newMemOpnd, opndIdx)) { + newMemOpnds.emplace_back(newMemOpnd); + continue; + } + } + } + return false; + } + /* due to register pressure, do not do partial prop */ + for (size_t i = 0; i < newMemOpnds.size(); ++i) { + DoMemReplace(*replacedReg, *newMemOpnds[i], *allUseInsns[i]); + } + return true; +} + +bool A64StrLdrProp::CheckSameReplace(const RegOperand &replacedReg, const MemOperand *memOpnd) const { + if (memOpnd != nullptr && memPropMode != kUndef) { + if (memPropMode == kPropBase) { + return replacedReg.GetRegisterNumber() == memOpnd->GetBaseRegister()->GetRegisterNumber(); + } else { + Operand *offset = memOpnd->GetOffset(); + ASSERT(offset != nullptr, "offset should not be nullptr"); + ASSERT(offset->IsRegister(), "must be"); + return replacedReg.GetRegisterNumber() == static_cast(offset)->GetRegisterNumber(); + } + } + return false; +} + +uint32 A64StrLdrProp::GetMemOpndIdx(MemOperand *newMemOpnd, const Insn &insn) const { + uint32 opndIdx = kInsnMaxOpnd; + if (insn.IsLoadPair() || insn.IsStorePair()) { + ASSERT(newMemOpnd->GetOffsetImmediate() != nullptr, "unexpect insn"); + opndIdx = kInsnThirdOpnd; + } else { + opndIdx = kInsnSecondOpnd; + } + return opndIdx; +} + +void A64StrLdrProp::DoMemReplace(const RegOperand &replacedReg, MemOperand &newMem, Insn &useInsn) { + VRegVersion *replacedV = ssaInfo->FindSSAVersion(replacedReg.GetRegisterNumber()); + ASSERT(replacedV != nullptr, "must in ssa form"); + uint32 opndIdx = GetMemOpndIdx(&newMem, useInsn); + replacedV->RemoveUseInsn(useInsn, opndIdx); + if (replacedV->GetAllUseInsns().empty()) { + (void)cgDce->RemoveUnuseDef(*replacedV); + } + for (auto &replaceit : as_const(replaceVersions)) { + replaceit.second->AddUseInsn(*ssaInfo, useInsn, opndIdx); + } + useInsn.SetOperand(opndIdx, newMem); +} + +MemOperand *A64StrLdrProp::StrLdrPropPreCheck(const Insn &insn, MemPropMode prevMod) { + memPropMode = kUndef; + if (insn.IsLoad() || insn.IsStore()) { + if (insn.IsAtomic()) { + return nullptr; + } + auto *currMemOpnd = static_cast(insn.GetMemOpnd()); + if (currMemOpnd != nullptr) { + memPropMode = SelectStrLdrPropMode(*currMemOpnd); + if (prevMod != kUndef) { + if (prevMod != memPropMode) { + memPropMode = prevMod; + return nullptr; + } + } + return currMemOpnd; + } + } + return nullptr; +} + +MemPropMode A64StrLdrProp::SelectStrLdrPropMode(const MemOperand &currMemOpnd) { + MemOperand::AArch64AddressingMode currAddrMode = currMemOpnd.GetAddrMode(); + MemPropMode innerMemPropMode = kUndef; + switch (currAddrMode) { + case MemOperand::kAddrModeBOi: { + if (!currMemOpnd.IsPreIndexed() && !currMemOpnd.IsPostIndexed()) { + innerMemPropMode = kPropBase; + } + break; + } + case MemOperand::kAddrModeBOrX: { + innerMemPropMode = kPropOffset; + auto amount = currMemOpnd.ShiftAmount(); + if (currMemOpnd.GetExtendAsString() == "LSL") { + if (amount != 0) { + innerMemPropMode = kPropShift; + } + break; + } else if (currMemOpnd.SignedExtend()) { + innerMemPropMode = kPropSignedExtend; + } else if (currMemOpnd.UnsignedExtend()) { + innerMemPropMode = kPropUnsignedExtend; + } + break; + } + default: + innerMemPropMode = kUndef; + } + return innerMemPropMode; +} + +MemOperand *A64StrLdrProp::SelectReplaceMem(const MemOperand &currMemOpnd) { + MemOperand *newMemOpnd = nullptr; + Operand *offset = currMemOpnd.GetOffset(); + RegOperand *base = currMemOpnd.GetBaseRegister(); + MOperator opCode = defInsn->GetMachineOpcode(); + switch (opCode) { + case MOP_xsubrri12: + case MOP_wsubrri12: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn->GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + int64 defVal = -(immOpnd.GetValue()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xaddrri12: + case MOP_waddrri12: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn->GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + int64 defVal = immOpnd.GetValue(); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xaddrri24: + case MOP_waddrri24: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn->GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + auto &shiftOpnd = static_cast(defInsn->GetOperand(kInsnFourthOpnd)); + CHECK_FATAL(shiftOpnd.GetShiftAmount() == 12, "invalid shiftAmount"); + auto defVal = static_cast(static_cast(immOpnd.GetValue()) << shiftOpnd.GetShiftAmount()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xsubrri24: + case MOP_wsubrri24: { + RegOperand *replace = GetReplaceReg(static_cast(defInsn->GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + auto &immOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + auto &shiftOpnd = static_cast(defInsn->GetOperand(kInsnFourthOpnd)); + CHECK_FATAL(shiftOpnd.GetShiftAmount() == 12, "invalid shiftAmount"); + int64 defVal = -static_cast(static_cast(immOpnd.GetValue()) << shiftOpnd.GetShiftAmount()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal, currMemOpnd.GetSize()); + } + break; + } + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_dadd: + case MOP_sadd: { + if (memPropMode == kPropBase) { + ASSERT(offset != nullptr, "offset should not be nullptr"); + auto *ofstOpnd = static_cast(offset); + if (!ofstOpnd->IsZero()) { + break; + } + + RegOperand *replace = GetReplaceReg(static_cast(defInsn->GetOperand(kInsnSecondOpnd))); + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn->GetOperand(kInsnThirdOpnd))); + if (replace != nullptr && newOfst != nullptr) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *replace, newOfst, nullptr, nullptr); + } + } + break; + } + case MOP_xaddrrrs: + case MOP_waddrrrs: { + if (memPropMode == kPropBase) { + auto *ofstOpnd = static_cast(offset); + if (!ofstOpnd->IsZero()) { + break; + } + RegOperand *newBaseOpnd = GetReplaceReg( + static_cast(defInsn->GetOperand(kInsnSecondOpnd))); + RegOperand *newIndexOpnd = GetReplaceReg( + static_cast(defInsn->GetOperand(kInsnThirdOpnd))); + auto &shift = static_cast(defInsn->GetOperand(kInsnFourthOpnd)); + if (shift.GetShiftOp() != BitShiftOperand::kLSL) { + break; + } + if (newBaseOpnd != nullptr && newIndexOpnd != nullptr) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *newBaseOpnd, *newIndexOpnd, + shift.GetShiftAmount(), false); + } + } + break; + } + case MOP_xadrpl12: { + if (memPropMode == kPropBase) { + if (currMemOpnd.GetSize() >= 128) { + // We can not be sure that the page offset is 16-byte aligned + break; + } + auto *ofstOpnd = static_cast(offset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffset is null!"); + int64 val = ofstOpnd->GetValue(); + auto *offset1 = static_cast(&defInsn->GetOperand(kInsnThirdOpnd)); + CHECK_FATAL(offset1 != nullptr, "offset1 is null!"); + val += offset1->GetOffset(); + OfstOperand *newOfsetOpnd = &static_cast(cgFunc)->CreateOfstOpnd(static_cast(val), + k32BitSize); + CHECK_FATAL(newOfsetOpnd != nullptr, "newOfsetOpnd is null!"); + const MIRSymbol *addr = offset1->GetSymbol(); + /* do not guarantee rodata alignment at Os */ + if (CGOptions::OptimizeForSize() && addr->IsReadOnly()) { + break; + } + RegOperand *replace = GetReplaceReg( + static_cast(defInsn->GetOperand(kInsnSecondOpnd))); + if (replace != nullptr) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeLo12Li, currMemOpnd.GetSize(), *replace, nullptr, newOfsetOpnd, addr); + } + } + break; + } + /* do this in const prop ? */ + case MOP_wmovri32: + case MOP_xmovri64: { + if (memPropMode == kPropOffset) { + auto *imm = static_cast(&defInsn->GetOperand(kInsnSecondOpnd)); + OfstOperand *newOffset = &static_cast(cgFunc)->CreateOfstOpnd( + static_cast(imm->GetValue()), k32BitSize); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOi, currMemOpnd.GetSize(), *base, nullptr, newOffset, nullptr); + } + break; + } + case MOP_xlslrri6: + case MOP_wlslrri5: { + auto *imm = static_cast(&defInsn->GetOperand(kInsnThirdOpnd)); + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn->GetOperand(kInsnSecondOpnd))); + if (newOfst != nullptr) { + auto shift = static_cast(static_cast(imm->GetValue())); + if (memPropMode == kPropOffset) { + if (shift < k4ByteSize) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *base, *newOfst, shift); + } + } else if (memPropMode == kPropShift) { + shift += currMemOpnd.ShiftAmount(); + if (shift < k4ByteSize) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, currMemOpnd.GetSize(), *base, *newOfst, shift); + } + } + } + break; + } + case MOP_xsxtw64: { + newMemOpnd = SelectReplaceExt(*base, static_cast(currMemOpnd.ShiftAmount()), + true, currMemOpnd.GetSize()); + break; + } + case MOP_xuxtw64: { + newMemOpnd = SelectReplaceExt(*base, static_cast(currMemOpnd.ShiftAmount()), + false, currMemOpnd.GetSize()); + break; + } + default: + break; + } + return newMemOpnd; +} + +RegOperand *A64StrLdrProp::GetReplaceReg(RegOperand &a64Reg) { + if (a64Reg.IsSSAForm()) { + regno_t ssaIndex = a64Reg.GetRegisterNumber(); + replaceVersions[ssaIndex] = ssaInfo->FindSSAVersion(ssaIndex); + ASSERT(replaceVersions.size() <= 2, "CHECK THIS CASE IN A64PROP"); + return &a64Reg; + } + return nullptr; +} + +MemOperand *A64StrLdrProp::HandleArithImmDef(RegOperand &replace, Operand *oldOffset, + int64 defVal, uint32 memSize) const { + if (memPropMode != kPropBase) { + return nullptr; + } + OfstOperand *newOfstImm = nullptr; + if (oldOffset == nullptr) { + newOfstImm = &static_cast(cgFunc)->CreateOfstOpnd(static_cast(defVal), k32BitSize); + } else { + auto *ofstOpnd = static_cast(oldOffset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); + newOfstImm = &static_cast(cgFunc)->CreateOfstOpnd( + static_cast(defVal + ofstOpnd->GetValue()), k32BitSize); + } + CHECK_FATAL(newOfstImm != nullptr, "newOffset is null!"); + return static_cast(cgFunc)->CreateMemOperand(MemOperand::kAddrModeBOi, memSize, + replace, nullptr, newOfstImm, nullptr); +} + +MemOperand *A64StrLdrProp::SelectReplaceExt(RegOperand &base, uint32 amount, bool isSigned, uint32 memSize) { + MemOperand *newMemOpnd = nullptr; + RegOperand *newOfst = GetReplaceReg(static_cast(defInsn->GetOperand(kInsnSecondOpnd))); + if (newOfst == nullptr) { + return nullptr; + } + /* defInsn is extend, currMemOpnd is same extend or shift */ + bool propExtend = (memPropMode == kPropShift) || ((memPropMode == kPropSignedExtend) && isSigned) || + ((memPropMode == kPropUnsignedExtend) && !isSigned); + if (memPropMode == kPropOffset) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, memSize, base, *newOfst, 0, isSigned); + } else if (propExtend) { + newMemOpnd = static_cast(cgFunc)->CreateMemOperand( + MemOperand::kAddrModeBOrX, memSize, base, *newOfst, amount, isSigned); + } else { + return nullptr; + } + return newMemOpnd; +} + +bool A64StrLdrProp::CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) const { + auto *a64CgFunc = static_cast(cgFunc); + if ((newMemOpnd->GetOffsetImmediate() != nullptr) && + !a64CgFunc->IsOperandImmValid(insn.GetMachineOpcode(), newMemOpnd, opndIdx)) { + return false; + } + auto newAmount = static_cast(newMemOpnd->ShiftAmount()); + if (!AArch64StoreLoadOpt::CheckNewAmount(insn, newAmount)) { + return false; + } + /* is ldp or stp, addrMode must be BOI */ + if ((opndIdx == kInsnThirdOpnd) && (newMemOpnd->GetAddrMode() != MemOperand::kAddrModeBOi)) { + return false; + } + return true; +} + +void AArch64Prop::PropPatternOpt() { + PropOptimizeManager optManager; + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); + optManager.Optimize(*cgFunc, GetSSAInfo()); +} + +bool ExtendShiftPattern::IsSwapInsn(const Insn &insn) const { + MOperator op = insn.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_xiorrrr: + case MOP_wiorrrr: + return true; + default: + return false; + } +} + +void ExtendShiftPattern::SetExMOpType(const Insn &use) { + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xxwaddrrre: + case MOP_xaddrrrs: { + exMOpType = kExAdd; + is64BitSize = true; + break; + } + case MOP_waddrrr: + case MOP_wwwaddrrre: + case MOP_waddrrrs: { + exMOpType = kEwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xxwsubrrre: + case MOP_xsubrrrs: { + exMOpType = kExSub; + is64BitSize = true; + break; + } + case MOP_wsubrrr: + case MOP_wwwsubrrre: + case MOP_wsubrrrs: { + exMOpType = kEwSub; + break; + } + case MOP_xcmnrr: + case MOP_xwcmnrre: + case MOP_xcmnrrs: { + exMOpType = kExCmn; + is64BitSize = true; + break; + } + case MOP_wcmnrr: + case MOP_wwcmnrre: + case MOP_wcmnrrs: { + exMOpType = kEwCmn; + break; + } + case MOP_xcmprr: + case MOP_xwcmprre: + case MOP_xcmprrs: { + exMOpType = kExCmp; + is64BitSize = true; + break; + } + case MOP_wcmprr: + case MOP_wwcmprre: + case MOP_wcmprrs: { + exMOpType = kEwCmp; + break; + } + default: { + exMOpType = kExUndef; + } + } +} + +void ExtendShiftPattern::SetLsMOpType(const Insn &use) { + MOperator op = use.GetMachineOpcode(); + switch (op) { + case MOP_xaddrrr: + case MOP_xaddrrrs: { + lsMOpType = kLxAdd; + is64BitSize = true; + break; + } + case MOP_waddrrr: + case MOP_waddrrrs: { + lsMOpType = kLwAdd; + break; + } + case MOP_xsubrrr: + case MOP_xsubrrrs: { + lsMOpType = kLxSub; + is64BitSize = true; + break; + } + case MOP_wsubrrr: + case MOP_wsubrrrs: { + lsMOpType = kLwSub; + break; + } + case MOP_xcmnrr: + case MOP_xcmnrrs: { + lsMOpType = kLxCmn; + is64BitSize = true; + break; + } + case MOP_wcmnrr: + case MOP_wcmnrrs: { + lsMOpType = kLwCmn; + break; + } + case MOP_xcmprr: + case MOP_xcmprrs: { + lsMOpType = kLxCmp; + is64BitSize = true; + break; + } + case MOP_wcmprr: + case MOP_wcmprrs: { + lsMOpType = kLwCmp; + break; + } + case MOP_xeorrrr: + case MOP_xeorrrrs: { + lsMOpType = kLxEor; + is64BitSize = true; + break; + } + case MOP_weorrrr: + case MOP_weorrrrs: { + lsMOpType = kLwEor; + break; + } + case MOP_xinegrr: + case MOP_xinegrrs: { + lsMOpType = kLxNeg; + replaceIdx = kInsnSecondOpnd; + is64BitSize = true; + break; + } + case MOP_winegrr: + case MOP_winegrrs: { + lsMOpType = kLwNeg; + replaceIdx = kInsnSecondOpnd; + break; + } + case MOP_xiorrrr: + case MOP_xiorrrrs: { + lsMOpType = kLxIor; + is64BitSize = true; + break; + } + case MOP_wiorrrr: + case MOP_wiorrrrs: { + lsMOpType = kLwIor; + break; + } + default: { + lsMOpType = kLsUndef; + } + } +} + +void ExtendShiftPattern::SelectExtendOrShift(const Insn &def) { + MOperator op = def.GetMachineOpcode(); + switch (op) { + case MOP_xsxtb32: + case MOP_xsxtb64: extendOp = ExtendShiftOperand::kSXTB; + break; + case MOP_xsxth32: + case MOP_xsxth64: extendOp = ExtendShiftOperand::kSXTH; + break; + case MOP_xsxtw64: extendOp = ExtendShiftOperand::kSXTW; + break; + case MOP_xuxtb32: extendOp = ExtendShiftOperand::kUXTB; + break; + case MOP_xuxth32: extendOp = ExtendShiftOperand::kUXTH; + break; + case MOP_xuxtw64: extendOp = ExtendShiftOperand::kUXTW; + break; + case MOP_wlslrri5: + case MOP_xlslrri6: shiftOp = BitShiftOperand::kLSL; + break; + case MOP_xlsrrri6: + case MOP_wlsrrri5: shiftOp = BitShiftOperand::kLSR; + break; + case MOP_xasrrri6: + case MOP_wasrrri5: shiftOp = BitShiftOperand::kASR; + break; + case MOP_wextrrrri5: + case MOP_xextrrrri6: shiftOp = BitShiftOperand::kROR; + break; + default: { + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + } + } +} + +/* Optimize ExtendShiftPattern: + * ========================================================== + * nosuffix LSL LSR ASR extrn (def) + * nosuffix | F | LSL | LSR | ASR | extrn | + * LSL | F | LSL | F | F | extrn | + * LSR | F | F | LSR | F | F | + * ASR | F | F | F | ASR | F | + * exten | F | F | F | F |exten(self)| + * (use) + * =========================================================== + */ + + +/* Check whether ExtendShiftPattern optimization can be performed. */ +SuffixType ExtendShiftPattern::CheckOpType(const Operand &lastOpnd) const { + /* Assign values to useType and defType. */ + uint32 useType = kNoSuffix; + uint32 defType = shiftOp; + if (extendOp != ExtendShiftOperand::kUndef) { + defType = kExten; + } + if (lastOpnd.IsOpdShift()) { + BitShiftOperand lastShiftOpnd = static_cast(lastOpnd); + useType = lastShiftOpnd.GetShiftOp(); + } else if (lastOpnd.IsOpdExtend()) { + ExtendShiftOperand lastExtendOpnd = static_cast(lastOpnd); + useType = kExten; + /* two insn is exten and exten ,value is exten(oneself) */ + if (useType == defType && extendOp != lastExtendOpnd.GetExtendOp()) { + return kNoSuffix; + } + } + return kDoOptimizeTable[useType][defType]; +} + +constexpr uint32 kExMopTypeSize = 9; +constexpr uint32 kLsMopTypeSize = 15; + +MOperator exMopTable[kExMopTypeSize] = { + MOP_undef, MOP_xxwaddrrre, MOP_wwwaddrrre, MOP_xxwsubrrre, MOP_wwwsubrrre, + MOP_xwcmnrre, MOP_wwcmnrre, MOP_xwcmprre, MOP_wwcmprre +}; +MOperator lsMopTable[kLsMopTypeSize] = { + MOP_undef, MOP_xaddrrrs, MOP_waddrrrs, MOP_xsubrrrs, MOP_wsubrrrs, + MOP_xcmnrrs, MOP_wcmnrrs, MOP_xcmprrs, MOP_wcmprrs, MOP_xeorrrrs, + MOP_weorrrrs, MOP_xinegrrs, MOP_winegrrs, MOP_xiorrrrs, MOP_wiorrrrs +}; +/* new Insn extenType: + * ===================== + * (useMop) (defMop) (newmop) + * | nosuffix | all | all| + * | exten | ex | ex | + * | ls | ex | ls | + * | asr | !asr | F | + * | !asr | asr | F | + * (useMop) (defMop) + * ===================== + */ +void ExtendShiftPattern::ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount) { + AArch64CGFunc &a64CGFunc = static_cast(cgFunc); + uint32 lastIdx = use.GetOperandSize() - k1BitSize; + Operand &lastOpnd = use.GetOperand(lastIdx); + SuffixType optType = CheckOpType(lastOpnd); + Operand *shiftOpnd = nullptr; + if (optType == kNoSuffix) { + return; + }else if (optType == kExten) { + replaceOp = exMopTable[exMOpType]; + if (amount > k4BitSize) { + return; + } + shiftOpnd = &a64CGFunc.CreateExtendShiftOperand(extendOp, amount, static_cast(k64BitSize)); + } else { + replaceOp = lsMopTable[lsMOpType]; + if ((is64BitSize && amount >= k64BitSize) || (!is64BitSize && amount >= k32BitSize)) { + return; + } + shiftOpnd = &a64CGFunc.CreateBitShiftOperand(shiftOp, amount, static_cast(k64BitSize)); + } + if (replaceOp == MOP_undef) { + return; + } + + Insn *replaceUseInsn = nullptr; + Operand &firstOpnd = use.GetOperand(kInsnFirstOpnd); + Operand *secondOpnd = &use.GetOperand(kInsnSecondOpnd); + if (replaceIdx == kInsnSecondOpnd) { /* replace neg insn */ + secondOpnd = &def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, *shiftOpnd); + } else { + Operand &thirdOpnd = def.GetOperand(kInsnSecondOpnd); + replaceUseInsn = &cgFunc.GetInsnBuilder()->BuildInsn(replaceOp, firstOpnd, *secondOpnd, thirdOpnd, *shiftOpnd); + } + use.GetBB()->ReplaceInsn(use, *replaceUseInsn); + if (PROP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In ExtendShiftPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======ReplaceInsn :\n"; + use.Dump(); + LogInfo::MapleLogger() << "=======NewInsn :\n"; + replaceUseInsn->Dump(); + } + /* update ssa info */ + optSsaInfo->ReplaceInsn(use, *replaceUseInsn); + newInsn = replaceUseInsn; + optSuccess = true; +} + +/* + * pattern1: + * UXTB/UXTW X0, W1 <---- def x0 + * .... <---- (X0 not used) + * AND/SUB/EOR X0, X1, X0 <---- use x0 + * ======> + * AND/SUB/EOR X0, X1, W1 UXTB/UXTW + * + * pattern2: + * LSL/LSR X0, X1, #8 + * ....(X0 not used) + * AND/SUB/EOR X0, X1, X0 + * ======> + * AND/SUB/EOR X0, X1, X1 LSL/LSR #8 + */ +void ExtendShiftPattern::Optimize(Insn &insn) { + uint32 amount = 0; + uint32 offset = 0; + uint32 lastIdx = insn.GetOperandSize() - k1BitSize; + Operand &lastOpnd = insn.GetOperand(lastIdx); + if (lastOpnd.IsOpdShift()) { + auto &lastShiftOpnd = static_cast(lastOpnd); + amount = lastShiftOpnd.GetShiftAmount(); + } else if (lastOpnd.IsOpdExtend()) { + auto &lastExtendOpnd = static_cast(lastOpnd); + amount = lastExtendOpnd.GetShiftAmount(); + } + if (shiftOp != BitShiftOperand::kUndef) { + auto &immOpnd = (shiftOp == BitShiftOperand::kROR ? + static_cast(defInsn->GetOperand(kInsnFourthOpnd)) : + static_cast(defInsn->GetOperand(kInsnThirdOpnd))); + offset = static_cast(immOpnd.GetValue()); + } + amount += offset; + + ReplaceUseInsn(insn, *defInsn, amount); +} + +void ExtendShiftPattern::DoExtendShiftOpt(Insn &insn) { + if (!CheckAllOpndCondition(insn)) { + return; + } + Optimize(*curInsn); + if (optSuccess) { + DoExtendShiftOpt(*newInsn); + } +} + +void ExtendShiftPattern::SwapOpnd(Insn &insn) { + Insn *swapInsn = &cgFunc.GetInsnBuilder()->BuildInsn(insn.GetMachineOpcode(), + insn.GetOperand(kInsnFirstOpnd), + insn.GetOperand(kInsnThirdOpnd), + insn.GetOperand(kInsnSecondOpnd)); + insn.GetBB()->ReplaceInsn(insn, *swapInsn); + optSsaInfo->ReplaceInsn(insn, *swapInsn); + curInsn = swapInsn; + replaceIdx = kInsnThirdOpnd; +} + +bool ExtendShiftPattern::CheckAllOpndCondition(Insn &insn) { + Init(); + SetLsMOpType(insn); + SetExMOpType(insn); + curInsn = &insn; + if (IsSwapInsn(insn)) { + if (CheckCondition(insn)) { + return true; + } + Init(); + SetLsMOpType(insn); + SetExMOpType(insn); + replaceIdx = kInsnSecondOpnd; + if (CheckCondition(insn)) { + SwapOpnd(insn); + return true; + } + } else { + return CheckCondition(insn); + } + return false; +} + +/* check and set: + * exMOpType, lsMOpType, extendOp, shiftOp, defInsn + */ +bool ExtendShiftPattern::CheckCondition(Insn &insn) { + if ((exMOpType == kExUndef) && (lsMOpType == kLsUndef)) { + return false; + } + auto ®Operand = static_cast(insn.GetOperand(replaceIdx)); + regno_t regNo = regOperand.GetRegisterNumber(); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(regNo); + defInsn = FindDefInsn(useVersion); + // useVersion must not be nullptr when defInsn is not nullptr + if (!defInsn || (useVersion->GetAllUseInsns().size() > 1)) { + return false; + } + SelectExtendOrShift(*defInsn); + /* defInsn must be shift or extend */ + if ((extendOp == ExtendShiftOperand::kUndef) && (shiftOp == BitShiftOperand::kUndef)) { + return false; + } + Operand &defSrcOpnd = defInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(defSrcOpnd.IsRegister(), "defSrcOpnd must be register!"); + if (shiftOp == BitShiftOperand::kROR) { + if (lsMOpType != kLxEor && lsMOpType != kLwEor && lsMOpType != kLxIor && lsMOpType != kLwIor) { + return false; + } + Operand &defThirdOpnd = defInsn->GetOperand(kInsnThirdOpnd); + CHECK_FATAL(defThirdOpnd.IsRegister(), "defThirdOpnd must be register"); + if (static_cast(defSrcOpnd).GetRegisterNumber() != + static_cast(defThirdOpnd).GetRegisterNumber()) { + return false; + } + } + auto ®DefSrc = static_cast(defSrcOpnd); + if (regDefSrc.IsPhysicalRegister()) { + return false; + } + /* + * has Implict cvt + * + * avoid cases as following: + * lsr x2, x2, #8 + * ubfx w2, x2, #0, #32 lsr x2, x2, #8 + * eor w0, w0, w2 ===> eor w0, w0, x2 ==\=> eor w0, w0, w2, LSR #8 + * + * the truncation causes the wrong value by shift right + * shift left does not matter + */ + if (useVersion->HasImplicitCvt() && shiftOp != BitShiftOperand::kUndef) { + return false; + } + if ((shiftOp == BitShiftOperand::kLSR || shiftOp == BitShiftOperand::kASR) && + (defSrcOpnd.GetSize() > regOperand.GetSize())) { + return false; + } + regno_t defSrcRegNo = regDefSrc.GetRegisterNumber(); + /* check regDefSrc */ + VRegVersion *replaceUseV = optSsaInfo->FindSSAVersion(defSrcRegNo); + CHECK_FATAL(replaceUseV != nullptr, "useVRegVersion must not be null based on ssa"); + if (replaceUseV->GetAllUseInsns().size() > 1 && shiftOp != BitShiftOperand::kROR) { + return false; + } + return true; +} + +void ExtendShiftPattern::Init() { + replaceOp = MOP_undef; + extendOp = ExtendShiftOperand::kUndef; + shiftOp = BitShiftOperand::kUndef; + defInsn = nullptr; + newInsn = nullptr; + replaceIdx = kInsnThirdOpnd; + optSuccess = false; + exMOpType = kExUndef; + lsMOpType = kLsUndef; + is64BitSize = false; +} + +void ExtendShiftPattern::Run() { + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB_REV(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + DoExtendShiftOpt(*insn); + } + } +} + +void ExtendMovPattern::Run() { + if (!cgFunc.GetMirModule().IsCModule()) { + return; + } + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool ExtendMovPattern::CheckSrcReg(regno_t srcRegNo, uint32 validNum) { + InsnSet srcDefSet; + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(srcRegNo); + CHECK_FATAL(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + if (defInfo == nullptr) { + return false; + } + Insn *insn = defInfo->GetInsn(); + srcDefSet.insert(insn); + /* reserve insn set for non ssa version. */ + for (auto defInsn : srcDefSet) { + CHECK_FATAL((defInsn != nullptr), "defInsn is null!"); + MOperator mOp = defInsn->GetMachineOpcode(); + switch (mOp) { + case MOP_wiorrri12: + case MOP_weorrri12: { + /* check immVal if mop is OR */ + ImmOperand &imm = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + uint32 bitNum = static_cast(imm.GetValue()); + if ((bitNum >> validNum) != 0) { + return false; + } + } + case MOP_wandrri12: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + regno_t defSrcRegNo = defSrcRegOpnd.GetRegisterNumber(); + if (!CheckSrcReg(defSrcRegNo, validNum)) { + return false; + } + break; + } + case MOP_wandrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(defSrcRegNo1, validNum) && !CheckSrcReg(defSrcRegNo2, validNum)) { + return false; + } + break; + } + case MOP_wiorrrr: + case MOP_weorrrr: { + /* check defSrcReg */ + RegOperand &defSrcRegOpnd1 = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + RegOperand &defSrcRegOpnd2 = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + regno_t defSrcRegNo1 = defSrcRegOpnd1.GetRegisterNumber(); + regno_t defSrcRegNo2 = defSrcRegOpnd2.GetRegisterNumber(); + if (!CheckSrcReg(defSrcRegNo1, validNum) || !CheckSrcReg(defSrcRegNo2, validNum)) { + return false; + } + break; + } + case MOP_wldrb: { + if (validNum != k8BitSize) { + return false; + } + break; + } + case MOP_wldrh: { + if (validNum != k16BitSize) { + return false; + } + break; + } + default: + return false; + } + } + return true; +} + +bool ExtendMovPattern::BitNotAffected(const Insn &insn, uint32 validNum) { + RegOperand &firstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (firstOpnd.IsPhysicalRegister()) { + return false; + } + RegOperand &secondOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + regno_t desRegNo = firstOpnd.GetRegisterNumber(); + regno_t srcRegNo = secondOpnd.GetRegisterNumber(); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(desRegNo); + CHECK_FATAL(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + if (defInfo == nullptr) { + return false; + } + if (!CheckSrcReg(srcRegNo, validNum)) { + return false; + } + replaceMop = MOP_wmovrr; + return true; +} + +bool ExtendMovPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: return BitNotAffected(insn, k8BitSize); + case MOP_xuxth32: return BitNotAffected(insn, k16BitSize); + default: return false; + } +} + +/* No initialization required */ +void ExtendMovPattern::Init() { + replaceMop = MOP_undef; +} + +void ExtendMovPattern::Optimize(Insn &insn) { + insn.SetMOP(AArch64CG::kMd[replaceMop]); +} + +void CopyRegProp::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool CopyRegProp::IsValidCopyProp(const RegOperand &dstReg, const RegOperand &srcReg) const { + ASSERT(destVersion != nullptr, "find destVersion failed"); + ASSERT(srcVersion != nullptr, "find srcVersion failed"); + LiveInterval *dstll = nullptr; + LiveInterval *srcll = nullptr; + if (destVersion->GetOriginalRegNO() == srcVersion->GetOriginalRegNO()) { + return true; + } + regno_t dstRegNO = dstReg.GetRegisterNumber(); + regno_t srcRegNO = srcReg.GetRegisterNumber(); + for (auto useDUInfoIt : destVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = (useDUInfoIt.second)->GetInsn(); + if (useInsn == nullptr) { + continue; + } + if (useInsn->IsPhi() && dstReg.GetSize() != srcReg.GetSize()) { + return false; + } + + dstll = regll->GetLiveInterval(dstRegNO); + srcll = regll->GetLiveInterval(srcRegNO); + ASSERT(dstll != nullptr, "dstll should not be nullptr"); + ASSERT(srcll != nullptr, "srcll should not be nullptr"); + static_cast(regll)->CheckInterference(*dstll, *srcll); + BB *useBB = useInsn->GetBB(); + if (dstll->IsConflictWith(srcRegNO) && + /* support override value when the version is not transphi */ + (((useBB->IsInPhiDef(srcRegNO) || useBB->IsInPhiList(srcRegNO)) && useBB->HasCriticalEdge()) || + useBB->IsInPhiList(dstRegNO))) { + return false; + } + } + if (dstll && srcll) { + regll->CoalesceLiveIntervals(*dstll, *srcll); + } + return true; +} + +bool CopyRegProp::CheckCondition(Insn &insn) { + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn)) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_xmovrr || mOp == MOP_wmovrr || mOp == MOP_xvmovs || mOp == MOP_xvmovd) { + Operand &destOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + ASSERT(destOpnd.IsRegister() && srcOpnd.IsRegister(), "must be"); + auto &destReg = static_cast(destOpnd); + auto &srcReg = static_cast(srcOpnd); + if (srcReg.GetRegisterNumber() == RZR) { + insn.SetMOP(AArch64CG::kMd[mOp == MOP_xmovrr ? MOP_xmovri64 : MOP_wmovri32]); + insn.SetOperand(kInsnSecondOpnd, cgFunc.CreateImmOperand(PTY_u64, 0)); + } + if (destReg.IsSSAForm() && srcReg.IsSSAForm()) { + /* case for ExplicitExtendProp */ + auto &propInsns = optSsaInfo->GetSafePropInsns(); + bool isSafeCvt = std::find(propInsns.begin(), propInsns.end(), insn.GetId()) != propInsns.end(); + if (destReg.GetSize() != srcReg.GetSize() && !isSafeCvt) { + VaildateImplicitCvt(destReg, srcReg, insn); + return false; + } + if (destReg.GetValidBitsNum() >= srcReg.GetValidBitsNum()) { + destReg.SetValidBitsNum(srcReg.GetValidBitsNum()); + } else if (!isSafeCvt) { + CHECK_FATAL(false, "do not support explicit extract bit in mov"); + return false; + } + destVersion = optSsaInfo->FindSSAVersion(destReg.GetRegisterNumber()); + ASSERT(destVersion != nullptr, "find Version failed"); + srcVersion = optSsaInfo->FindSSAVersion(srcReg.GetRegisterNumber()); + ASSERT(srcVersion != nullptr, "find Version failed"); + if (!IsValidCopyProp(destReg, srcReg)) { + return false; + } + return true; + } else { + /* should be eliminated by ssa peep */ + } + } + } + return false; +} + +bool CopyRegProp::IsNotSpecialOptimizedInsn(const Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_wbfirri5i5 && curMop != MOP_xbfirri6i6) { + return true; + } + auto &useOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + VRegVersion *version = optSsaInfo->FindSSAVersion(useOpnd.GetRegisterNumber()); + CHECK_FATAL(version != nullptr, "get SSAVersion failed"); + Insn *defInsn = FindDefInsn(version); + if (defInsn == nullptr) { + return true; + } + MOperator defMop = defInsn->GetMachineOpcode(); + if (defMop == MOP_wmovri32 || defMop == MOP_xmovri64) { + return false; + } + + bool hasCrossDUUse = false; + int32 bothDUId = -1; + for (auto duInfoIt : srcVersion->GetAllUseInsns()) { + CHECK_FATAL(duInfoIt.second != nullptr, "get DUInsnInfo failed"); + Insn *useInsn = duInfoIt.second->GetInsn(); + MOperator useMop = useInsn->GetMachineOpcode(); + if (useInsn->GetSSAImpDefOpnd() != nullptr) { + bothDUId = static_cast(useInsn->GetId()); + } + if (useInsn->GetSSAImpDefOpnd() == nullptr && useMop != MOP_xmovrr && useMop != MOP_wmovrr && + useMop != MOP_xvmovs && useMop != MOP_xvmovd && bothDUId > -1 && + useInsn->GetId() > static_cast(bothDUId)) { + hasCrossDUUse = true; + break; + } + } + return hasCrossDUUse; +} + +void CopyRegProp::ReplaceAllUseForCopyProp() { + MapleUnorderedMap &useList = destVersion->GetAllUseInsns(); + Insn *srcRegDefInsn = FindDefInsn(srcVersion); + for (auto it = useList.begin(); it != useList.end();) { + Insn *useInsn = it->second->GetInsn(); + if (srcRegDefInsn != nullptr && srcRegDefInsn->GetSSAImpDefOpnd() != nullptr && + useInsn->GetSSAImpDefOpnd() != nullptr) { + if (IsNotSpecialOptimizedInsn(*useInsn)) { + ++it; + continue; + } + } + auto *a64SSAInfo = static_cast(optSsaInfo); + a64SSAInfo->CheckAsmDUbinding(*useInsn, destVersion, srcVersion); + for (auto &opndIt : it->second->GetOperands()) { + Operand &opnd = useInsn->GetOperand(opndIt.first); + A64ReplaceRegOpndVisitor replaceRegOpndVisitor(cgFunc, *useInsn, opndIt.first, + *destVersion->GetSSAvRegOpnd(), *srcVersion->GetSSAvRegOpnd()); + opnd.Accept(replaceRegOpndVisitor); + srcVersion->AddUseInsn(*optSsaInfo, *useInsn, opndIt.first); + it->second->ClearDU(opndIt.first); + } + it = useList.erase(it); + } +} + +void CopyRegProp::Optimize(Insn &insn) { + ReplaceAllUseForCopyProp(); + if (cgFunc.IsExtendReg(destVersion->GetSSAvRegOpnd()->GetRegisterNumber())) { + cgFunc.InsertExtendSet(srcVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } +} + +void CopyRegProp::VaildateImplicitCvt(RegOperand &destReg, const RegOperand &srcReg, Insn &movInsn) { + ASSERT(movInsn.GetMachineOpcode() == MOP_xmovrr || movInsn.GetMachineOpcode() == MOP_wmovrr, "NIY explicit CVT"); + if (destReg.GetSize() == k64BitSize && srcReg.GetSize() == k32BitSize) { + movInsn.SetMOP(AArch64CG::kMd[MOP_xuxtw64]); + } else if (destReg.GetSize() == k32BitSize && srcReg.GetSize() == k64BitSize) { + movInsn.SetMOP(AArch64CG::kMd[MOP_xubfxrri6i6]); + movInsn.AddOperand(cgFunc.CreateImmOperand(PTY_i64, 0)); + movInsn.AddOperand(cgFunc.CreateImmOperand(PTY_i64, k32BitSize)); + } else { + CHECK_FATAL(false, " unknown explicit integer cvt, need implement in ssa prop "); + } + destReg.SetValidBitsNum(k32BitSize); +} + +void RedundantPhiProp::Run() { + FOR_ALL_BB(bb, &cgFunc) { + for (auto &phiIt : as_const(bb->GetPhiInsns())) { + Init(); + if (!CheckCondition(*phiIt.second)) { + continue; + } + Optimize(*phiIt.second); + } + } +} + +void RedundantPhiProp::Optimize(Insn &insn) { + optSsaInfo->ReplaceAllUse(destVersion, srcVersion); +} + +bool RedundantPhiProp::CheckCondition(Insn &insn) { + ASSERT(insn.IsPhi(), "must be phi insn here"); + auto &phiOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (phiOpnd.IsRedundancy()) { + auto &phiDestReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + destVersion = optSsaInfo->FindSSAVersion(phiDestReg.GetRegisterNumber()); + ASSERT(destVersion != nullptr, "find Version failed"); + uint32 srcRegNO = phiOpnd.GetOperands().cbegin()->second->GetRegisterNumber(); + srcVersion = optSsaInfo->FindSSAVersion(srcRegNO); + ASSERT(srcVersion != nullptr, "find Version failed"); + return true; + } + return false; +} + +bool ValidBitNumberProp::IsImplicitUse(const RegOperand &dstOpnd, const RegOperand &srcOpnd) const { + for (auto destUseIt : destVersion->GetAllUseInsns()) { + Insn *useInsn = destUseIt.second->GetInsn(); + if (useInsn->GetMachineOpcode() == MOP_xuxtw64) { + return true; + } + if (useInsn->GetMachineOpcode() == MOP_xubfxrri6i6) { + auto &lsbOpnd = static_cast(useInsn->GetOperand(kInsnThirdOpnd)); + auto &widthOpnd = static_cast(useInsn->GetOperand(kInsnFourthOpnd)); + if (lsbOpnd.GetValue() == k0BitSize && widthOpnd.GetValue() == k32BitSize) { + return false; + } + } + if (useInsn->IsPhi()) { + auto &defOpnd = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); + if (defOpnd.GetSize() == k32BitSize) { + return false; + } + } + /* if srcOpnd upper 32 bits are valid, it can not prop to mop_x */ + if (srcOpnd.GetSize() == k64BitSize && dstOpnd.GetSize() == k64BitSize) { + const InsnDesc *useMD = &AArch64CG::kMd[useInsn->GetMachineOpcode()]; + for (auto &opndUseIt : as_const(destUseIt.second->GetOperands())) { + const OpndDesc *useProp = useMD->GetOpndDes(opndUseIt.first); + if (useProp->GetSize() == k64BitSize) { + return true; + } + } + } + } + return false; +} + +bool ValidBitNumberProp::CheckCondition(Insn &insn) { + /* extend to all shift pattern in future */ + RegOperand *destOpnd = nullptr; + RegOperand *srcOpnd = nullptr; + if (insn.GetMachineOpcode() == MOP_xuxtw64) { + destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + srcOpnd = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + } + if (insn.GetMachineOpcode() == MOP_xubfxrri6i6) { + destOpnd = &static_cast(insn.GetOperand(kInsnFirstOpnd)); + srcOpnd = &static_cast(insn.GetOperand(kInsnSecondOpnd)); + auto &lsb = static_cast(insn.GetOperand(kInsnThirdOpnd)); + auto &width = static_cast(insn.GetOperand(kInsnFourthOpnd)); + if ((lsb.GetValue() != 0) || (width.GetValue() != k32BitSize)) { + return false; + } + } + if (destOpnd != nullptr && destOpnd->IsSSAForm() && srcOpnd != nullptr && srcOpnd->IsSSAForm()) { + destVersion = optSsaInfo->FindSSAVersion(destOpnd->GetRegisterNumber()); + ASSERT(destVersion != nullptr, "find Version failed"); + srcVersion = optSsaInfo->FindSSAVersion(srcOpnd->GetRegisterNumber()); + ASSERT(srcVersion != nullptr, "find Version failed"); + if (destVersion->HasImplicitCvt()) { + return false; + } + if (IsImplicitUse(*destOpnd, *srcOpnd)) { + return false; + } + srcVersion->SetImplicitCvt(); + return true; + } + return false; +} + +void ValidBitNumberProp::Optimize(Insn &insn) { + optSsaInfo->ReplaceAllUse(destVersion, srcVersion); + cgFunc.InsertExtendSet(srcVersion->GetSSAvRegOpnd()->GetRegisterNumber()); +} + +void ValidBitNumberProp::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +void FpSpConstProp::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool FpSpConstProp::CheckCondition(Insn &insn) { + std::set defRegs = insn.GetDefRegs(); + auto &a64CGFunc = static_cast(cgFunc); + if (defRegs.size() <= 1) { + if (insn.ScanReg(RSP)) { + fpSpBase = &a64CGFunc.GetOrCreatePhysicalRegisterOperand(RSP, k64BitSize, kRegTyInt); + /* not safe due to varied sp in alloca */ + if (cgFunc.HasVLAOrAlloca()) { + return false; + } + } + if (insn.ScanReg(RFP)) { + ASSERT(fpSpBase == nullptr, " unexpect for both sp fp using "); + fpSpBase = &a64CGFunc.GetOrCreatePhysicalRegisterOperand(RFP, k64BitSize, kRegTyInt); + } + if (fpSpBase == nullptr) { + return false; + } + if (insn.GetMachineOpcode() == MOP_xaddrri12) { + aT = kAArch64Add; + if (GetValidSSAInfo(insn.GetOperand(kInsnFirstOpnd))) { + shiftOpnd = &static_cast(insn.GetOperand(kInsnThirdOpnd)); + return true; + } + } else if (insn.GetMachineOpcode() == MOP_xsubrri12) { + aT = kAArch64Sub; + if (GetValidSSAInfo(insn.GetOperand(kInsnFirstOpnd))) { + shiftOpnd = &static_cast(insn.GetOperand(kInsnThirdOpnd)); + return true; + } + } + } + return false; +} + +bool FpSpConstProp::GetValidSSAInfo(Operand &opnd) { + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsSSAForm()) { + replaced = optSsaInfo->FindSSAVersion(regOpnd.GetRegisterNumber()); + ASSERT(replaced != nullptr, "find ssa version failed in FpSpConstProp"); + return true; + } + } + return false; +} + +int64 FpSpConstProp::ArithmeticFold(int64 valInUse, ArithmeticType useAT) const { + int64 valInDef = shiftOpnd->GetValue(); + int64 returnVal = 0; + CHECK_FATAL(aT == kAArch64Add || aT == kAArch64Sub, "unsupport sp/fp arthimetic in aarch64"); + if (useAT == aT) { + returnVal = valInUse + valInDef; + } else { + returnVal = valInUse - valInDef; + } + return returnVal; +} + +void FpSpConstProp::PropInMem(DUInsnInfo &useDUInfo, Insn &useInsn) { + MOperator useMop = useInsn.GetMachineOpcode(); + if (useInsn.IsAtomic()) { + return; + } + if (useInsn.IsStore() || useInsn.IsLoad()) { + if (useDUInfo.GetOperands().size() == 1) { + auto useOpndIt = useDUInfo.GetOperands().begin(); + if (useOpndIt->first == kInsnSecondOpnd || useOpndIt->first == kInsnThirdOpnd) { + ASSERT(useOpndIt->second == 1, "multiple use in memory opnd"); + auto *a64memOpnd = static_cast(useInsn.GetMemOpnd()); + if (a64memOpnd->IsIntactIndexed() && a64memOpnd->GetAddrMode() == MemOperand::kAddrModeBOi) { + auto *ofstOpnd = static_cast(a64memOpnd->GetOffsetImmediate()); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); + int64 newVal = ArithmeticFold(ofstOpnd->GetValue(), kAArch64Add); + auto *newOfstImm = &static_cast(cgFunc).CreateOfstOpnd(static_cast(newVal), + k64BitSize); + if (ofstOpnd->GetVary() == kUnAdjustVary || shiftOpnd->GetVary() == kUnAdjustVary) { + newOfstImm->SetVary(kUnAdjustVary); + } + auto *newMem = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, a64memOpnd->GetSize(), *fpSpBase, + nullptr, newOfstImm, nullptr); + if (static_cast(cgFunc).IsOperandImmValid(useMop, newMem, useOpndIt->first)) { + useInsn.SetMemOpnd(newMem); + useDUInfo.DecreaseDU(useOpndIt->first); + replaced->CheckDeadUse(useInsn); + } + } + } + } else { + /* + * case : store stack location on stack + * add x1, sp, #8 + * ... + * store x1 [x1, #16] + * not prop , not benefit to live range yet + */ + return; + } + } +} + +void FpSpConstProp::PropInArith(DUInsnInfo &useDUInfo, Insn &useInsn, ArithmeticType curAT) { + if (useDUInfo.GetOperands().size() == 1) { + auto &a64cgFunc = static_cast(cgFunc); + MOperator useMop = useInsn.GetMachineOpcode(); + ASSERT(useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd, "NIY"); + ASSERT(useDUInfo.GetOperands().begin()->second == 1, "multiple use in add/sub"); + auto &curVal = static_cast(useInsn.GetOperand(kInsnThirdOpnd)); + ImmOperand &newVal = a64cgFunc.CreateImmOperand(ArithmeticFold(curVal.GetValue(), curAT), + curVal.GetSize(), false); + if (newVal.GetValue() < 0) { + newVal.Negate(); + useMop = A64ConstProp::GetReversalMOP(useMop); + } + if (curVal.GetVary() == kUnAdjustVary || shiftOpnd->GetVary() == kUnAdjustVary) { + newVal.SetVary(kUnAdjustVary); + } + if (static_cast(cgFunc).IsOperandImmValid(useMop, &newVal, kInsnThirdOpnd)) { + Insn &newInsn = + cgFunc.GetInsnBuilder()->BuildInsn(useMop, useInsn.GetOperand(kInsnFirstOpnd), *fpSpBase, newVal); + useInsn.GetBB()->ReplaceInsn(useInsn, newInsn); + optSsaInfo->ReplaceInsn(useInsn, newInsn); + } + } else { + CHECK_FATAL(false, "NYI"); + } +} + +void FpSpConstProp::PropInCopy(DUInsnInfo &useDUInfo, Insn &useInsn, MOperator oriMop) { + if (useDUInfo.GetOperands().size() == 1) { + ASSERT(useDUInfo.GetOperands().begin()->first == kInsnSecondOpnd, "NIY"); + ASSERT(useDUInfo.GetOperands().begin()->second == 1, "multiple use in add/sub"); + auto &newVal = *static_cast(shiftOpnd->Clone(*cgFunc.GetMemoryPool())); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(oriMop, useInsn.GetOperand(kInsnFirstOpnd), *fpSpBase, newVal); + useInsn.GetBB()->ReplaceInsn(useInsn, newInsn); + optSsaInfo->ReplaceInsn(useInsn, newInsn); + } else { + CHECK_FATAL(false, "NYI"); + } +} + +void FpSpConstProp::Optimize(Insn &insn) { + for (auto &useInsnInfo : replaced->GetAllUseInsns()) { + Insn *useInsn = useInsnInfo.second->GetInsn(); + MOperator useMop = useInsn->GetMachineOpcode(); + PropInMem(*useInsnInfo.second, *useInsn); + switch (useMop) { + case MOP_xmovrr: + case MOP_wmovrr: + PropInCopy(*useInsnInfo.second, *useInsn, insn.GetMachineOpcode()); + break; + case MOP_xaddrri12: + PropInArith(*useInsnInfo.second, *useInsn, kAArch64Add); + break; + case MOP_xsubrri12: + PropInArith(*useInsnInfo.second, *useInsn, kAArch64Sub); + break; + default: + break; + } + } +} + +A64ConstFoldPattern::TypeAndSize A64ConstFoldPattern::SelectFoldTypeAndCheck64BitSize(const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_waddrri12: return std::pair{kAdd, false}; + case MOP_xaddrri12: return std::pair{kAdd, true}; + case MOP_wsubrri12: return std::pair{kSub, false}; + case MOP_xsubrri12: return std::pair{kSub, true}; + case MOP_wlslrri5: return std::pair{kLsl, false}; + case MOP_xlslrri6: return std::pair{kLsl, true}; + case MOP_wlsrrri5: return std::pair{kLsr, false}; + case MOP_xlsrrri6: return std::pair{kLsr, true}; + case MOP_wasrrri5: return std::pair{kAsr, false}; + case MOP_xasrrri6: return std::pair{kAsr, true}; + case MOP_wandrri12: return std::pair{kAnd, false}; + case MOP_xandrri13: return std::pair{kAnd, true}; + case MOP_wiorrri12: return std::pair{kOrr, false}; + case MOP_xiorrri13: return std::pair{kOrr, true}; + case MOP_weorrri12: return std::pair{kEor, false}; + case MOP_xeorrri13: return std::pair{kEor, true}; + default: + return std::pair{kFoldUndef, false}; + } +} + +bool A64ConstFoldPattern::IsDefInsnValid(const Insn &curInsn, const Insn &validDefInsn) { + std::pair defInfo = SelectFoldTypeAndCheck64BitSize(validDefInsn); + defFoldType = defInfo.first; + if (defFoldType == kFoldUndef) { + return false; + } + /* do not optimize MOP_x and MOP_w */ + if (is64Bit != defInfo.second) { + return false; + } + ASSERT(curInsn.GetOperand(kInsnFirstOpnd).IsRegister() && + validDefInsn.GetOperand(kInsnSecondOpnd).IsRegister(), "must be"); + dstOpnd = &static_cast(curInsn.GetOperand(kInsnFirstOpnd)); + srcOpnd = &static_cast(validDefInsn.GetOperand(kInsnSecondOpnd)); + if (dstOpnd->IsPhysicalRegister() || srcOpnd->IsPhysicalRegister()) { + return false; + } + optType = constFoldTable[useFoldType][defFoldType]; + if (optType == kOptUndef) { + return false; + } + ASSERT(defInsn->GetOperand(kInsnFirstOpnd).IsRegister(), "must be"); + defDstOpnd = &static_cast(defInsn->GetOperand(kInsnFirstOpnd)); + return true; +} + +bool A64ConstFoldPattern::IsPhiInsnValid(const Insn &curInsn, const Insn &phiInsn) { + std::vector validDefInsns; + auto &phiOpnd = static_cast(phiInsn.GetOperand(kInsnSecondOpnd)); + for (auto useIt : phiOpnd.GetOperands()) { + ASSERT(useIt.second != nullptr, "get phiUseOpnd failed"); + Insn *defPhiInsn = optSsaInfo->GetDefInsn(*useIt.second); + /* check only one layer of phi */ + if (defPhiInsn == nullptr || defPhiInsn->IsPhi()) { + return false; + } + (void)validDefInsns.emplace_back(defPhiInsn); + } + if (validDefInsns.empty()) { + return false; + } + if (!IsDefInsnValid(curInsn, *validDefInsns[0])) { + return false; + } + MOperator mOp = validDefInsns[0]->GetMachineOpcode(); + CHECK_FATAL(validDefInsns[0]->GetOperand(kInsnSecondOpnd).IsRegister(), "check this insn"); + CHECK_FATAL(validDefInsns[0]->GetOperand(kInsnThirdOpnd).IsImmediate(), "check this insn"); + auto &validSrcOpnd = static_cast(validDefInsns[0]->GetOperand(kInsnSecondOpnd)); + auto &validImmOpnd = static_cast(validDefInsns[0]->GetOperand(kInsnThirdOpnd)); + uint32 opndNum = validDefInsns[0]->GetOperandSize(); + for (uint32 insnIdx = 1; insnIdx < validDefInsns.size(); ++insnIdx) { + Insn *insn = validDefInsns[insnIdx]; + if (insn->GetMachineOpcode() != mOp) { + return false; + } + if (insn->GetOperandSize() != opndNum) { + return false; + } + if (!insn->GetOperand(kInsnSecondOpnd).IsRegister() || !insn->GetOperand(kInsnThirdOpnd).IsImmediate()) { + return false; + } + if (!static_cast(insn->GetOperand(kInsnSecondOpnd)).Equals(validSrcOpnd) || + !static_cast(insn->GetOperand(kInsnThirdOpnd)).Equals(validImmOpnd)) { + return false; + } + } + defInsn = validDefInsns[0]; + return true; +} + +bool A64ConstFoldPattern::IsCompleteOptimization() { + VRegVersion *defDstVersion = optSsaInfo->FindSSAVersion(defDstOpnd->GetRegisterNumber()); + ASSERT(defDstVersion != nullptr, "get defDstVersion failed"); + /* check all uses of dstOpnd of defInsn to avoid spill */ + for (auto useInfoIt : defDstVersion->GetAllUseInsns()) { + ASSERT(useInfoIt.second != nullptr, "get duInsnInfo failed"); + Insn *useInsn = useInfoIt.second->GetInsn(); + ASSERT(useInsn != nullptr, "get useInsn failed"); + if (useInsn->IsPhi()) { + continue; + } + std::pair useInfo = SelectFoldTypeAndCheck64BitSize(*useInsn); + if (useInfo.first == kFoldUndef) { + return false; + } + } + return true; +} + +bool A64ConstFoldPattern::CheckCondition(Insn &insn) { + std::pair useInfo = SelectFoldTypeAndCheck64BitSize(insn); + useFoldType = useInfo.first; + if (useFoldType == kFoldUndef) { + return false; + } + is64Bit = useInfo.second; + ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "check this insn"); + auto &useOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + defInsn = optSsaInfo->GetDefInsn(useOpnd); + if (defInsn == nullptr) { + return false; + } + if (defInsn->IsPhi()) { + return IsPhiInsnValid(insn, *defInsn); + } else { + return IsDefInsnValid(insn, *defInsn); + } +} + +MOperator A64ConstFoldPattern::GetNewMop(bool isNegativeVal, MOperator curMop) const { + MOperator newMop = MOP_undef; + switch (useFoldType) { + case kAdd: + newMop = (isNegativeVal ? (is64Bit ? MOP_xsubrri12 : MOP_wsubrri12) : curMop); + break; + case kSub: + newMop = (isNegativeVal ? curMop : (is64Bit ? MOP_xaddrri12 : MOP_waddrri12)); + break; + case kLsl: + newMop = (isNegativeVal ? (is64Bit ? MOP_xlsrrri6 : MOP_wlsrrri5) : curMop); + break; + case kLsr: + newMop = (isNegativeVal ? (is64Bit ? MOP_xlslrri6 : MOP_wlslrri5) : curMop); + break; + case kAsr: + newMop = (isNegativeVal ? MOP_undef : curMop); + break; + case kAnd: + case kOrr: + case kEor: + return curMop; + default: + return MOP_undef; + } + return newMop; +} + +ImmOperand &A64ConstFoldPattern::GetNewImmOpnd(const ImmOperand &immOpnd, int64 newImmVal) const { + auto &a64Func = static_cast(cgFunc); + switch (useFoldType) { + case kAdd: + case kSub: + case kLsl: + case kLsr: + case kAsr: + return (newImmVal < 0 ? + a64Func.CreateImmOperand(-newImmVal, immOpnd.GetSize(), immOpnd.IsSignedValue()) : + a64Func.CreateImmOperand(newImmVal, immOpnd.GetSize(), immOpnd.IsSignedValue())); + case kAnd: + case kOrr: + case kEor: + return a64Func.CreateImmOperand(newImmVal, immOpnd.GetSize(), immOpnd.IsSignedValue()); + default: + CHECK_FATAL(false, "can not run here"); + } +} + +void A64ConstFoldPattern::ReplaceWithNewInsn(Insn &insn, const ImmOperand &immOpnd, int64 newImmVal) { + auto &a64Func = static_cast(cgFunc); + MOperator curMop = insn.GetMachineOpcode(); + MOperator newMop = GetNewMop(newImmVal < 0, curMop); + ImmOperand &newImmOpnd = GetNewImmOpnd(immOpnd, newImmVal); + if (!a64Func.IsOperandImmValid(newMop, &newImmOpnd, kInsnThirdOpnd)) { + return; + } + if (useFoldType == kLsl || useFoldType == kLsr || useFoldType == kAsr) { + if (newImmVal < 0 || (is64Bit && newImmVal >= k64BitSize) || (!is64Bit && newImmVal >= k32BitSize)) { + return; + } + } + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, *dstOpnd, *srcOpnd, newImmOpnd); + insn.GetBB()->ReplaceInsn(insn, newInsn); + /* update ssa info */ + optSsaInfo->ReplaceInsn(insn, newInsn); + if (PROP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In A64ConstFoldPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "=======ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "=======NewInsn :\n"; + newInsn.Dump(); + } +} + +int64 A64ConstFoldPattern::GetNewImmVal(const Insn &insn, const ImmOperand &defImmOpnd) const { + ASSERT(insn.GetOperand(kInsnThirdOpnd).IsImmediate(), "check this insn"); + auto &useImmOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + int64 newImmVal = 0; + switch (optType) { + case kPositive: + newImmVal = useImmOpnd.GetValue() + defImmOpnd.GetValue(); + break; + case kNegativeDef: + newImmVal = useImmOpnd.GetValue() - defImmOpnd.GetValue(); + break; + case kNegativeUse: + newImmVal = defImmOpnd.GetValue() - useImmOpnd.GetValue(); + break; + case kNegativeBoth: + newImmVal = -defImmOpnd.GetValue() - useImmOpnd.GetValue(); + break; + case kLogicalAnd: + newImmVal = defImmOpnd.GetValue() & useImmOpnd.GetValue(); + break; + case kLogicalOrr: + newImmVal = defImmOpnd.GetValue() | useImmOpnd.GetValue(); + break; + case kLogicalEor: + newImmVal = defImmOpnd.GetValue() ^ useImmOpnd.GetValue(); + break; + default: + CHECK_FATAL(false, "can not be here"); + } + return newImmVal; +} + +void A64ConstFoldPattern::Optimize(Insn &insn) { + ASSERT(defInsn->GetOperand(kInsnThirdOpnd).IsImmediate(), "check this insn"); + auto &defImmOpnd = static_cast(defInsn->GetOperand(kInsnThirdOpnd)); + int64 newImmVal = GetNewImmVal(insn, defImmOpnd); + if (newImmVal == 0 && optType != kLogicalAnd && optType != kLogicalOrr && optType != kLogicalEor) { + VRegVersion *dstVersion = optSsaInfo->FindSSAVersion(dstOpnd->GetRegisterNumber()); + VRegVersion *srcVersion = optSsaInfo->FindSSAVersion(srcOpnd->GetRegisterNumber()); + CHECK_FATAL(dstVersion != nullptr, "get dstVersion failed"); + CHECK_FATAL(srcVersion != nullptr, "get srcVersion failed"); + if (cgFunc.IsExtendReg(dstOpnd->GetRegisterNumber())) { + cgFunc.InsertExtendSet(srcOpnd->GetRegisterNumber()); + } + optSsaInfo->ReplaceAllUse(dstVersion, srcVersion); + } else if (IsCompleteOptimization()) { + ReplaceWithNewInsn(insn, defImmOpnd, newImmVal); + } +} + +void A64ConstFoldPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } +} + +bool A64PregCopyPattern::DFSFindValidDefInsns(Insn *curDefInsn, std::vector &visitedPhiDefs, + std::unordered_map &visited) { + if (curDefInsn == nullptr) { + return false; + } + /* + * avoid the case as following: + * R113 and R117 define each other. + * [BB5] ---------------------------- + * phi: R113, (R111<4>, R117<9>) | + * / \ | + * / \ | + * [BB6] ---- [BB7] | + * add R116, R113, #4 phi: R117, (R113<5>, R116<6>) | + * / \ | + * / \ | + * [BB8] [BB28] | + * / | + * / | + * [BB9] ------ [BB5] | + * mov R1, R117 -------------------------- + * + * but the cases as following is right: + * (1) + * [BB124] + * add R339, R336, #345 -------- is found twice + * / \ + * / \ + * / [BB125] + * \ / + * \ / + * [BB56] + * phi: R370, (R339<124>, R339<125>) + * | + * | + * [BB61] + * mov R0, R370 + * (2) + * [BB17] + * phi: R242, (R241<14>, R218<53>) ------- is found twice + * / \ + * / \ + * / [BB26] [BB32] + * \ \ / + * \ [BB27] + * \ phi: R273, (R242<26>, R320<32>) + * [BB25] / + * \ [BB42] + * \ / + * [BB43] + * phi: R321, (R242<25>, R273<42>) + * | + * [BB47] + * mov R0, R321 + */ + if (visited[curDefInsn->GetId()] && curDefInsn->IsPhi() && !visitedPhiDefs.empty()) { + auto &curPhiOpnd = static_cast(curDefInsn->GetOperand(kInsnSecondOpnd)); + for (auto &curPhiListIt : curPhiOpnd.GetOperands()) { + auto &curUseOpnd = static_cast(*curPhiListIt.second); + if (std::find(visitedPhiDefs.begin(), visitedPhiDefs.end(), curUseOpnd.GetRegisterNumber()) != + visitedPhiDefs.end()) { + return false; + } + } + } + if (visited[curDefInsn->GetId()]) { + return true; + } + visited[curDefInsn->GetId()] = true; + if (!curDefInsn->IsPhi()) { + CHECK_FATAL(curDefInsn->IsMachineInstruction(), "expect valid insn"); + (void)validDefInsns.emplace_back(curDefInsn); + return true; + } + auto &phiOpnd = static_cast(curDefInsn->GetOperand(kInsnSecondOpnd)); + for (auto &phiListIt : phiOpnd.GetOperands()) { + auto &useOpnd = static_cast(*phiListIt.second); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useOpnd.GetRegisterNumber()); + Insn *defInsn = FindDefInsn(useVersion); + if (defInsn == nullptr) { + return false; + } + if (defInsn->IsPhi()) { + auto &curPhiDef = static_cast(curDefInsn->GetOperand(kInsnFirstOpnd)); + (void)visitedPhiDefs.emplace_back(curPhiDef.GetRegisterNumber()); + } + if (!DFSFindValidDefInsns(defInsn, visitedPhiDefs, visited)) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckMultiUsePoints(const Insn *defInsn) const { + Operand &dstOpnd = defInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(dstOpnd.IsRegister(), "dstOpnd must be register"); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(static_cast(dstOpnd).GetRegisterNumber()); + ASSERT(defVersion != nullptr, "defVersion should not be nullptr"); + /* use: (phi) or (mov preg) */ + for (auto &useInfoIt : defVersion->GetAllUseInsns()) { + DUInsnInfo *useInfo = useInfoIt.second; + CHECK_FATAL(useInfo, "get useDUInfo failed"); + Insn *useInsn = useInfo->GetInsn(); + CHECK_FATAL(useInsn, "get useInsn failed"); + if (!useInsn->IsPhi() && useInsn->GetMachineOpcode() != MOP_wmovrr && useInsn->GetMachineOpcode() != MOP_xmovrr) { + return false; + } + if ((useInsn->GetMachineOpcode() == MOP_wmovrr || useInsn->GetMachineOpcode() == MOP_xmovrr) && + !static_cast(useInsn->GetOperand(kInsnFirstOpnd)).IsPhysicalRegister()) { + return false; + } + } + return true; +} + +bool A64PregCopyPattern::CheckPhiCaseCondition(Insn &defInsn) { + std::unordered_map visited; + std::vector visitedPhiDefs; + if (defInsn.IsPhi()) { + (void)visitedPhiDefs.emplace_back( + static_cast(defInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber()); + } + if (!DFSFindValidDefInsns(&defInsn, visitedPhiDefs, visited)) { + return false; + } + if (!CheckValidDefInsn(validDefInsns[0])) { + return false; + } + MOperator defMop = validDefInsns[0]->GetMachineOpcode(); + uint32 defOpndNum = validDefInsns[0]->GetOperandSize(); + for (size_t i = 1; i < validDefInsns.size(); ++i) { + if (defMop != validDefInsns[i]->GetMachineOpcode()) { + return false; + } + if (!CheckMultiUsePoints(validDefInsns[i])) { + return false; + } + for (uint32 idx = 0; idx < defOpndNum; ++idx) { + if (validDefInsns[0]->OpndIsDef(idx) && validDefInsns[i]->OpndIsDef(idx)) { + continue; + } + Operand &opnd1 = validDefInsns[0]->GetOperand(idx); + Operand &opnd2 = validDefInsns[i]->GetOperand(idx); + if (!opnd1.Equals(opnd2) && differIdx == -1) { + differIdx = static_cast(idx); + if (!validDefInsns[0]->GetOperand(static_cast(differIdx)).IsRegister() || + !validDefInsns[i]->GetOperand(static_cast(differIdx)).IsRegister()) { + return false; + } + auto &differOpnd1 = static_cast(validDefInsns[0]->GetOperand(static_cast(differIdx))); + auto &differOpnd2 = static_cast(validDefInsns[1]->GetOperand(static_cast(differIdx))); + /* avoid cc reg */ + if (!differOpnd1.IsOfIntClass() || !differOpnd2.IsOfIntClass() || + differOpnd1.IsPhysicalRegister() || differOpnd2.IsPhysicalRegister()) { + return false; + } + VRegVersion *differVersion1 = optSsaInfo->FindSSAVersion(differOpnd1.GetRegisterNumber()); + VRegVersion *differVersion2 = optSsaInfo->FindSSAVersion(differOpnd2.GetRegisterNumber()); + if (!differVersion1 || !differVersion2) { + return false; + } + if (differVersion1->GetOriginalRegNO() != differVersion2->GetOriginalRegNO()) { + return false; + } + differOrigNO = differVersion1->GetOriginalRegNO(); + } else if (!opnd1.Equals(opnd2) && static_cast(idx) != differIdx) { + return false; + } + } + if (differIdx <= 0) { + return false; + } + } + if (differIdx == -1) { + return false; + } + return true; +} + +bool A64PregCopyPattern::CheckUselessDefInsn(const Insn *defInsn) const { + Operand &dstOpnd = defInsn->GetOperand(kInsnFirstOpnd); + CHECK_FATAL(dstOpnd.IsRegister(), "dstOpnd must be register"); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(static_cast(dstOpnd).GetRegisterNumber()); + ASSERT(defVersion != nullptr, "defVersion should not be nullptr"); + if (defVersion->GetAllUseInsns().size() == 1) { + return true; + } + /* + * avoid the case as following + * In a loop: + * [BB43] + * phi: R356, (R345<42>, R377<63>) + * / \ + * / \ + * [BB44] \ + * add R377, R356, #1 / + * mov R1, R377 / + * bl / + * \ / + * \ / + * [BB63] + */ + for (auto &useInfoIt : defVersion->GetAllUseInsns()) { + DUInsnInfo *useInfo = useInfoIt.second; + CHECK_FATAL(useInfo, "get useDUInfo failed"); + Insn *useInsn = useInfo->GetInsn(); + CHECK_FATAL(useInsn, "get useInsn failed"); + if (useInsn->IsPhi()) { + auto &phiDefOpnd = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); + uint32 opndNum = defInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + continue; + } + Operand &opnd = defInsn->GetOperand(i); + if (opnd.IsRegister() && static_cast(opnd).GetRegisterNumber() == phiDefOpnd.GetRegisterNumber()) { + return false; + } + } + } + } + return true; +} + +bool A64PregCopyPattern::CheckValidDefInsn(const Insn *defInsn) { + const auto *md = defInsn->GetDesc(); + CHECK_FATAL(md != nullptr, "expect valid AArch64MD"); + /* this pattern applies to all basicOps */ + if (md->IsMove() || md->IsStore() || md->IsLoad() || md->IsLoadStorePair() || md->IsLoadAddress() || md->IsCall() || + md->IsDMB() || md->IsVectorOp() || md->IsCondDef() || md->IsCondBranch() || md->IsUnCondBranch()) { + return false; + } + uint32 opndNum = defInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = defInsn->GetOperand(i); + if (!opnd.IsRegister() && !opnd.IsImmediate() && !opnd.IsOpdShift() && !opnd.IsOpdExtend()) { + return false; + } + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + if (cgFunc.IsSPOrFP(regOpnd) || regOpnd.IsPhysicalRegister() || + (!regOpnd.IsOfIntClass() && !regOpnd.IsOfFloatOrSIMDClass())) { + return false; + } + } + } + return true; +} + +bool A64PregCopyPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_xmovrr && curMop != MOP_wmovrr) { + return false; + } + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if (!dstOpnd.IsPhysicalRegister()) { + return false; + } + regno_t useRegNO = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetRegisterNumber(); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useRegNO); + Insn *defInsn = FindDefInsn(useVersion); + if (defInsn == nullptr) { + return false; + } + Operand &defDstOpnd = defInsn->GetOperand(kInsnFirstOpnd); + /* avoid inline-asm */ + if (!defDstOpnd.IsRegister()) { + return false; + } + if (!CheckMultiUsePoints(defInsn)) { + return false; + } + if (defInsn->IsPhi()) { + isCrossPhi = true; + firstPhiInsn = defInsn; + return CheckPhiCaseCondition(*defInsn); + } else { + if (!CheckValidDefInsn(defInsn)) { + return false; + } + if (!CheckUselessDefInsn(defInsn)) { + return false; + } + (void)validDefInsns.emplace_back(defInsn); + } + return true; +} + +Insn &A64PregCopyPattern::CreateNewPhiInsn(std::unordered_map &newPhiList, Insn *curInsn) { + CHECK_FATAL(!newPhiList.empty(), "empty newPhiList"); + RegOperand *differOrigOpnd = cgFunc.GetVirtualRegisterOperand(differOrigNO); + CHECK_FATAL(differOrigOpnd != nullptr, "get original opnd default"); + PhiOperand &phiList = optSsaInfo->CreatePhiOperand(); + for (auto &it : newPhiList) { + phiList.InsertOpnd(it.first, *it.second); + } + Insn &phiInsn = cgFunc.GetCG()->BuildPhiInsn(*differOrigOpnd, phiList); + optSsaInfo->CreateNewInsnSSAInfo(phiInsn); + BB *bb = curInsn->GetBB(); + (void)bb->InsertInsnBefore(*curInsn, phiInsn); + /* */ + bb->AddPhiInsn(differOrigNO, phiInsn); + return phiInsn; +} + +/* + * Check whether the required phi is available, do not insert phi repeatedly. + */ +RegOperand *A64PregCopyPattern::CheckAndGetExistPhiDef(Insn &phiInsn, std::vector &validDifferRegNOs) const { + std::set validDifferOrigRegNOs; + for (regno_t ssaRegNO : validDifferRegNOs) { + VRegVersion *version = optSsaInfo->FindSSAVersion(ssaRegNO); + (void)validDifferOrigRegNOs.insert(version->GetOriginalRegNO()); + } + MapleMap &phiInsns = phiInsn.GetBB()->GetPhiInsns(); + for (auto &phiIt : as_const(phiInsns)) { + auto &def = static_cast(phiIt.second->GetOperand(kInsnFirstOpnd)); + VRegVersion *defVersion = optSsaInfo->FindSSAVersion(def.GetRegisterNumber()); + ASSERT(defVersion != nullptr, "defVersion should not be nullptr"); + /* + * if the phi of the change point has been created (according to original regNO), return the phiDefOpnd. + * But, there is a problem: the phiDefOpnd of the same original regNO is not the required phi. + * For example: (in parentheses is the original regNO) + * add R110(R80), R106(R80), #1 add R122(R80), R118(R80), #1 + * \ / + * \ / + * (1) phi: R123(R80), [R110, R122] + * mov R0, R123 + * It will return R123 of phi(1) because the differOrigNO is 80, but that's not what we want, + * we need to create a new phi(2): R140(R80), [R106, R118]. + * so we need to check whether all phiOpnds have correct ssaRegNO. + */ + if (defVersion->GetOriginalRegNO() == differOrigNO) { + auto &phiOpnd = static_cast(phiIt.second->GetOperand(kInsnSecondOpnd)); + if (phiOpnd.GetOperands().size() == validDifferRegNOs.size()) { + bool exist = true; + for (auto &phiListIt : phiOpnd.GetOperands()) { + VRegVersion *phiUseVersion = optSsaInfo->FindSSAVersion( + static_cast(phiListIt.second)->GetRegisterNumber()); + if (validDifferOrigRegNOs.find(phiUseVersion->GetOriginalRegNO()) == validDifferOrigRegNOs.end()) { + exist = false; + break; + } + } + if (exist) { + return &static_cast(phiIt.second->GetOperand(kInsnFirstOpnd)); + } + } + } + } + return nullptr; +} + +RegOperand &A64PregCopyPattern::DFSBuildPhiInsn(Insn *curInsn, std::unordered_map &visited) { + CHECK_FATAL(curInsn, "curInsn must not be null"); + if (visited[curInsn->GetId()] != nullptr) { + return *visited[curInsn->GetId()]; + } + if (!curInsn->IsPhi()) { + return static_cast(curInsn->GetOperand(static_cast(differIdx))); + } + std::unordered_map differPhiList; + std::vector validDifferRegNOs; + auto &phiOpnd = static_cast(curInsn->GetOperand(kInsnSecondOpnd)); + for (auto &phiListIt : phiOpnd.GetOperands()) { + auto &useOpnd = static_cast(*phiListIt.second); + VRegVersion *useVersion = optSsaInfo->FindSSAVersion(useOpnd.GetRegisterNumber()); + Insn *defInsn = FindDefInsn(useVersion); + CHECK_FATAL(defInsn != nullptr, "get defInsn failed"); + RegOperand &phiDefOpnd = DFSBuildPhiInsn(defInsn, visited); + (void)differPhiList.emplace(phiListIt.first, &phiDefOpnd); + (void)validDifferRegNOs.emplace_back(phiDefOpnd.GetRegisterNumber()); + } + /* + * The phi in control flow may already exists. + * For example: + * [BB26] [BB45] + * add R191, R103, R187 add R166, R103, R164 + * \ / + * \ / + * [BB27] + * phi: R192, (R191<26>, R166<45>) ------ curInsn + * phi: R194, (R187<26>, R164<45>) ------ the phi witch we need already exists + * / validDifferRegNOs : [187, 164] + * / + * [BB28] [BB46] + * add R215, R103, R211 / + * \ / + * \ / + * [BB29] + * phi: R216, (R215<28>, R192<46>) + * phi: R218, (R211<28>, R194<46>) ------ the phi witch we need already exists + * mov R0, R216 validDifferRegNOs : [211, 194] + */ + RegOperand *existPhiDef = CheckAndGetExistPhiDef(*curInsn, validDifferRegNOs); + if (existPhiDef == nullptr) { + Insn &phiInsn = CreateNewPhiInsn(differPhiList, curInsn); + visited[curInsn->GetId()] = &static_cast(phiInsn.GetOperand(kInsnFirstOpnd)); + existPhiDef = &static_cast(phiInsn.GetOperand(kInsnFirstOpnd)); + } + return *existPhiDef; +} + +void A64PregCopyPattern::Optimize(Insn &insn) { + Insn *defInsn = *validDefInsns.begin(); + MOperator newMop = defInsn->GetMachineOpcode(); + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + Insn &newInsn = cgFunc.GetInsnBuilder()->BuildInsn(newMop, AArch64CG::kMd[newMop]); + uint32 opndNum = defInsn->GetOperandSize(); + newInsn.ResizeOpnds(opndNum); + if (!isCrossPhi) { + for (uint32 i = 0; i < opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + newInsn.SetOperand(i, dstOpnd); + } else { + newInsn.SetOperand(i, defInsn->GetOperand(i)); + } + } + } else { + std::vector validDifferRegNOs; + for (Insn *vdInsn : validDefInsns) { + auto &vdOpnd = static_cast(vdInsn->GetOperand(static_cast(differIdx))); + (void)validDifferRegNOs.emplace_back(vdOpnd.GetRegisterNumber()); + } + RegOperand *differPhiDefOpnd = CheckAndGetExistPhiDef(*firstPhiInsn, validDifferRegNOs); + if (differPhiDefOpnd == nullptr) { + std::unordered_map visited; + differPhiDefOpnd = &DFSBuildPhiInsn(firstPhiInsn, visited); + } + CHECK_FATAL(differPhiDefOpnd, "get differPhiDefOpnd failed"); + for (uint32 i = 0; i < opndNum; ++i) { + if (defInsn->OpndIsDef(i)) { + newInsn.SetOperand(i, dstOpnd); + } else if (i == static_cast(differIdx)) { + newInsn.SetOperand(i, *differPhiDefOpnd); + } else { + newInsn.SetOperand(i, defInsn->GetOperand(i)); + } + } + } + insn.GetBB()->ReplaceInsn(insn, newInsn); + /* update ssa info */ + optSsaInfo->ReplaceInsn(insn, newInsn); + + if (PROP_DUMP) { + LogInfo::MapleLogger() << ">>>>>>> In A64PregCopyPattern : <<<<<<<\n"; + LogInfo::MapleLogger() << "======= ReplaceInsn :\n"; + insn.Dump(); + LogInfo::MapleLogger() << "======= NewInsn :\n"; + newInsn.Dump(); + } +} + +void A64PregCopyPattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + Init(); + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*insn); + } + } + validDefInsns.clear(); + validDefInsns.shrink_to_fit(); +} + +void A64ReplaceRegOpndVisitor::Visit(RegOperand *v) { + (void)v; + insn->SetOperand(idx, *newReg); +} +void A64ReplaceRegOpndVisitor::Visit(MemOperand *a64memOpnd) { + bool changed = false; + CHECK_FATAL(a64memOpnd->IsIntactIndexed(), "NYI post/pre index model"); + StackMemPool tempMemPool(memPoolCtrler, "temp mempool for A64ReplaceRegOpndVisitor"); + auto *cpyMem = a64memOpnd->Clone(tempMemPool); + if (cpyMem->GetBaseRegister() != nullptr && + cpyMem->GetBaseRegister()->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + cpyMem->SetBaseRegister(*static_cast(newReg)); + changed = true; + } + if (cpyMem->GetIndexRegister() != nullptr && + cpyMem->GetIndexRegister()->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + CHECK_FATAL(!changed, "base reg is equal to index reg"); + cpyMem->SetIndexRegister(*newReg); + changed = true; + } + if (changed) { + insn->SetMemOpnd(&static_cast(cgFunc)->GetOrCreateMemOpnd(*cpyMem)); + } +} +void A64ReplaceRegOpndVisitor::Visit(ListOperand *v) { + for (auto &it : v->GetOperands()) { + if (it->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + it = newReg; + } + } +} +void A64ReplaceRegOpndVisitor::Visit(PhiOperand *v) { + for (auto &it : v->GetOperands()) { + if (it.second->GetRegisterNumber() == oldReg->GetRegisterNumber()) { + it.second = newReg; + } + } + auto &phiDest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + if (phiDest.GetValidBitsNum() > v->GetLeastCommonValidBit()) { + phiDest.SetValidBitsNum(v->GetLeastCommonValidBit()); + } +} +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4b4615d309809e4d75c8246d071d4f73c008fd1d --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ra_opt.cpp @@ -0,0 +1,696 @@ +/* + * Copyright (c) [2021] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "loop.h" +#include "aarch64_ra_opt.h" + +namespace maplebe { + +#define RAOPT_DUMP CG_DEBUG_FUNC(*cgFunc) + +using namespace std; +bool RaX0Opt::PropagateX0CanReplace(Operand *opnd, regno_t replaceReg) const { + if (opnd != nullptr) { + RegOperand *regopnd = static_cast(opnd); + regno_t regCandidate = regopnd->GetRegisterNumber(); + if (regCandidate == replaceReg) { + return true; + } + } + return false; +} + +/* + * Replace replace_reg with rename_reg. + * return true if there is a redefinition that needs to terminate the propagation. + */ +bool RaX0Opt::PropagateRenameReg(Insn *nInsn, const X0OptInfo &optVal) const { + uint32 renameReg = static_cast(optVal.GetRenameOpnd())->GetRegisterNumber(); + const InsnDesc *md = nInsn->GetDesc(); + int32 lastOpndId = static_cast(nInsn->GetOperandSize() - 1); + for (int32_t i = lastOpndId; i >= 0; i--) { + Operand &opnd = nInsn->GetOperand(static_cast(i)); + + if (opnd.IsList()) { + /* call parameters */ + } else if (opnd.IsMemoryAccessOperand()) { + MemOperand &memopnd = static_cast(opnd); + if (PropagateX0CanReplace(memopnd.GetBaseRegister(), optVal.GetReplaceReg())) { + RegOperand *renameOpnd = static_cast(optVal.GetRenameOpnd()); + memopnd.SetBaseRegister(*renameOpnd); + } + if (PropagateX0CanReplace(memopnd.GetIndexRegister(), optVal.GetReplaceReg())) { + RegOperand *renameOpnd = static_cast(optVal.GetRenameOpnd()); + memopnd.SetIndexRegister(*renameOpnd); + } + } else if (opnd.IsRegister()) { + bool isdef = (md->GetOpndDes(static_cast(i)))->IsRegDef(); + RegOperand ®opnd = static_cast(opnd); + regno_t regCandidate = regopnd.GetRegisterNumber(); + if (isdef) { + /* Continue if both replace_reg & rename_reg are not redefined. */ + if (regCandidate == optVal.GetReplaceReg() || regCandidate == renameReg) { + return true; + } + } else { + if (regCandidate == optVal.GetReplaceReg()) { + nInsn->SetOperand(static_cast(i), *optVal.GetRenameOpnd()); + } + } + } + } + return false; /* false == no redefinition */ +} + +/* Propagate x0 from a call return value to a def of x0. + * This eliminates some local reloads under high register pressure, since + * the use has been replaced by x0. + */ +bool RaX0Opt::PropagateX0DetectX0(const Insn *insn, X0OptInfo &optVal) const { + if (insn->GetMachineOpcode() != MOP_xmovrr && insn->GetMachineOpcode() != MOP_wmovrr) { + return false; + } + RegOperand &movSrc = static_cast(insn->GetOperand(1)); + if (movSrc.GetRegisterNumber() != R0) { + return false; + } + + optVal.SetMovSrc(&movSrc); + return true; +} + +bool RaX0Opt::PropagateX0DetectRedefine(const InsnDesc *md, const Insn *ninsn, const X0OptInfo &optVal, + uint32 index) const { + bool isdef = (md->GetOpndDes(static_cast(index)))->IsRegDef(); + if (isdef) { + RegOperand &opnd = static_cast(ninsn->GetOperand(index)); + if (opnd.GetRegisterNumber() == optVal.GetReplaceReg()) { + return true; + } + } + return false; +} + +bool RaX0Opt::PropagateX0Optimize(const BB *bb, const Insn *insn, X0OptInfo &optVal) const { + bool redefined = false; + for (Insn *ninsn = insn->GetNext(); (ninsn != nullptr) && ninsn != bb->GetLastInsn()->GetNext(); + ninsn = ninsn->GetNext()) { + if (!ninsn->IsMachineInstruction()) { + continue; + } + + if (ninsn->IsCall()) { + break; + } + + /* Will continue as long as the reg being replaced is not redefined. + * Does not need to check for x0 redefinition. The mov instruction src + * being replaced already defines x0 and will terminate this loop. + */ + const InsnDesc *md = ninsn->GetDesc(); + for (uint32 i = 0; i < ninsn->GetDefRegs().size(); i++) { + redefined = PropagateX0DetectRedefine(md, ninsn, optVal, i); + if (redefined) { + break; + } + } + if (redefined) { + break; + } + + /* Look for move where src is the register equivalent to x0. */ + if (ninsn->GetMachineOpcode() != MOP_xmovrr && ninsn->GetMachineOpcode() != MOP_wmovrr) { + continue; + } + + Operand *src = &ninsn->GetOperand(1); + RegOperand *srcreg = static_cast(src); + if (srcreg->GetRegisterNumber() != optVal.GetReplaceReg()) { + continue; + } + + /* Setup for the next optmization pattern. */ + Operand *dst = &ninsn->GetOperand(0); + RegOperand *dstreg = static_cast(dst); + if (dstreg->GetRegisterNumber() != R0) { + /* This is to set up for further propagation later. */ + if (srcreg->GetRegisterNumber() == optVal.GetReplaceReg()) { + if (optVal.GetRenameInsn() != nullptr) { + redefined = true; + break; + } else { + optVal.SetRenameInsn(ninsn); + optVal.SetRenameOpnd(dst); + optVal.SetRenameReg(dstreg->GetRegisterNumber()); + } + } + continue; + } + + if (redefined) { + break; + } + + /* x0 = x0 */ + ninsn->SetOperand(1, *optVal.GetMovSrc()); + break; + } + + return redefined; +} + +bool RaX0Opt::PropagateX0ForCurrBb(BB *bb, const X0OptInfo &optVal) const { + bool redefined = false; + for (Insn *ninsn = optVal.GetRenameInsn()->GetNext(); (ninsn != nullptr) && ninsn != bb->GetLastInsn()->GetNext(); + ninsn = ninsn->GetNext()) { + if (!ninsn->IsMachineInstruction()) { + continue; + } + redefined = PropagateRenameReg(ninsn, optVal); + if (redefined) { + break; + } + } + if (!redefined) { + auto it = bb->GetLiveOutRegNO().find(optVal.GetReplaceReg()); + if (it != bb->GetLiveOutRegNO().end()) { + bb->EraseLiveOutRegNO(it); + } + uint32 renameReg = static_cast(optVal.GetRenameOpnd())->GetRegisterNumber(); + bb->InsertLiveOutRegNO(renameReg); + } + return redefined; +} + +void RaX0Opt::PropagateX0ForNextBb(BB *nextBb, const X0OptInfo &optVal) const { + bool redefined = false; + for (Insn *ninsn = nextBb->GetFirstInsn(); ninsn != nextBb->GetLastInsn()->GetNext(); ninsn = ninsn->GetNext()) { + if (!ninsn->IsMachineInstruction()) { + continue; + } + redefined = PropagateRenameReg(ninsn, optVal); + if (redefined) { + break; + } + } + if (!redefined) { + auto it = nextBb->GetLiveOutRegNO().find(optVal.GetReplaceReg()); + if (it != nextBb->GetLiveOutRegNO().end()) { + nextBb->EraseLiveOutRegNO(it); + } + uint32 renameReg = static_cast(optVal.GetRenameOpnd())->GetRegisterNumber(); + nextBb->InsertLiveOutRegNO(renameReg); + } +} + +/* + * Perform optimization. + * First propagate x0 in a bb. + * Second propagation see comment in function. + */ +void RaX0Opt::PropagateX0() { + FOR_ALL_BB(bb, cgFunc) { + X0OptInfo optVal; + + Insn *insn = bb->GetFirstInsn(); + while ((insn != nullptr) && !insn->IsMachineInstruction()) { + insn = insn->GetNext(); + continue; + } + if (insn == nullptr) { + continue; + } + if (!PropagateX0DetectX0(insn, optVal)) { + continue; + } + + /* At this point the 1st insn is a mov from x0. */ + RegOperand &movDst = static_cast(insn->GetOperand(0)); + optVal.SetReplaceReg(movDst.GetRegisterNumber()); + optVal.ResetRenameInsn(); + bool redefined = PropagateX0Optimize(bb, insn, optVal); + if (redefined || (optVal.GetRenameInsn() == nullptr)) { + continue; + } + + /* Next pattern to help LSRA. Short cross bb live interval. + * Straight line code. Convert reg2 into bb local. + * bb1 + * mov reg2 <- x0 => mov reg2 <- x0 + * mov reg1 <- reg2 mov reg1 <- reg2 + * call call + * bb2 : livein< reg1 reg2 > + * use reg2 use reg1 + * .... + * reg2 not liveout + * + * Can allocate caller register for reg2. + * + * Further propagation of very short live interval cross bb reg + */ + if (optVal.GetRenameReg() < kMaxRegNum) { /* dont propagate physical reg */ + continue; + } + BB *nextBb = bb->GetNext(); + if (nextBb == nullptr) { + break; + } + if (bb->GetSuccs().size() != 1 || nextBb->GetPreds().size() != 1) { + continue; + } + if (bb->GetSuccs().front() != nextBb || nextBb->GetPreds().front() != bb) { + continue; + } + if (bb->GetLiveOutRegNO().find(optVal.GetReplaceReg()) == bb->GetLiveOutRegNO().end() || + bb->GetLiveOutRegNO().find(optVal.GetRenameReg()) == bb->GetLiveOutRegNO().end() || + nextBb->GetLiveOutRegNO().find(optVal.GetReplaceReg()) != nextBb->GetLiveOutRegNO().end()) { + continue; + } + /* Replace replace_reg by rename_reg. */ + redefined = PropagateX0ForCurrBb(bb, optVal); + if (redefined) { + continue; + } + PropagateX0ForNextBb(nextBb, optVal); + } +} + +void VregRename::PrintRenameInfo(regno_t regno) const { + VregRenameInfo *info = (regno <= maxRegnoSeen) ? renameInfo[regno] : nullptr; + if (info == nullptr || (info->numDefs == 0 && info->numUses == 0)) { + return; + } + LogInfo::MapleLogger() << "reg: " << regno; + if (info->firstBBLevelSeen != nullptr) { + LogInfo::MapleLogger() << " fromLevel " << info->firstBBLevelSeen->GetInternalFlag2(); + } + if (info->lastBBLevelSeen != nullptr) { + LogInfo::MapleLogger() << " toLevel " << info->lastBBLevelSeen->GetInternalFlag2(); + } + if (info->numDefs > 0) { + LogInfo::MapleLogger() << " defs " << info->numDefs; + } + if (info->numUses > 0) { + LogInfo::MapleLogger() << " uses " << info->numUses; + } + if (info->numDefs > 0) { + LogInfo::MapleLogger() << " innerDefs " << info->numInnerDefs; + } + if (info->numUses > 0) { + LogInfo::MapleLogger() << " innerUses " << info->numInnerUses; + } + LogInfo::MapleLogger() << "\n"; +} + +void VregRename::PrintAllRenameInfo() const { + for (uint32 regno = 0; regno < cgFunc->GetMaxRegNum(); ++regno) { + PrintRenameInfo(regno); + } +} + +bool VregRename::IsProfitableToRename(const VregRenameInfo *info) const{ + if ((info->numInnerDefs == 0) && (info->numUses != info->numInnerUses)) { + return true; + } + return false; +} + +void VregRename::RenameProfitableVreg(RegOperand *ropnd, const CGFuncLoops *loop) { + regno_t vreg = ropnd->GetRegisterNumber(); + VregRenameInfo *info = (vreg <= maxRegnoSeen) ? renameInfo[vreg] : nullptr; + if ((info == nullptr) || loop->GetMultiEntries().size() > 0 || (!IsProfitableToRename(info))) { + return; + } + + uint32 size = (ropnd->GetSize() == k64BitSize) ? k8ByteSize : k4ByteSize; + regno_t newRegno = cgFunc->NewVReg(ropnd->GetRegisterType(), size); + RegOperand *renameVreg = &cgFunc->CreateVirtualRegisterOperand(newRegno); + + const BB *header = loop->GetHeader(); + for (auto pred : header->GetPreds()) { + if (find(loop->GetBackedge().begin(), loop->GetBackedge().end(), pred) != loop->GetBackedge().end()) { + continue; + } + MOperator mOp = (ropnd->GetRegisterType() == kRegTyInt) ? + ((size == k8BitSize) ? MOP_xmovrr : MOP_wmovrr) : + ((size == k8BitSize) ? MOP_xvmovd : MOP_xvmovs); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, *renameVreg, *ropnd); + Insn *last = pred->GetLastInsn(); + if (last) { + if (last->IsBranch()) { + last->GetBB()->InsertInsnBefore(*last, newInsn); + } else { + last->GetBB()->InsertInsnAfter(*last, newInsn); + } + } else { + pred->AppendInsn(newInsn); + } + } + + for (auto bb : loop->GetLoopMembers()) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd->IsList()) { + /* call parameters */ + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *memopnd = static_cast(opnd); + RegOperand *base = static_cast(memopnd->GetBaseRegister()); + MemOperand *newMemOpnd = nullptr; + if (base != nullptr && base->IsVirtualRegister() && base->GetRegisterNumber() == vreg) { + newMemOpnd = static_cast(memopnd->Clone(*cgFunc->GetMemoryPool())); + newMemOpnd->SetBaseRegister(*renameVreg); + insn->SetOperand(i, *newMemOpnd); + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister() && offset->GetRegisterNumber() == vreg) { + if (newMemOpnd == nullptr) { + newMemOpnd = static_cast(memopnd->Clone(*cgFunc->GetMemoryPool())); + } + newMemOpnd->SetIndexRegister(*renameVreg); + insn->SetOperand(i, *newMemOpnd); + } + } else if (opnd->IsRegister() && static_cast(opnd)->IsVirtualRegister() && + static_cast(opnd)->GetRegisterNumber() == vreg) { + insn->SetOperand(i, *renameVreg); + } + } + } + } +} + +void VregRename::RenameFindLoopVregs(const CGFuncLoops *loop) { + for (auto *bb : loop->GetLoopMembers()) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd->IsList()) { + /* call parameters */ + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *memopnd = static_cast(opnd); + RegOperand *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && base->IsVirtualRegister()) { + RenameProfitableVreg(base, loop); + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister()) { + RenameProfitableVreg(offset, loop); + } + } else if (opnd->IsRegister() && static_cast(opnd)->IsVirtualRegister() && + static_cast(opnd)->GetRegisterNumber() != ccRegno) { + RenameProfitableVreg(static_cast(opnd), loop); + } + } + } + } +} + +/* Only the bb level is important, not the bb itself. + * So if multiple bbs have the same level, only one bb represents the level + */ +void VregRename::UpdateVregInfo(regno_t vreg, BB *bb, bool isInner, bool isDef) { + VregRenameInfo *info = renameInfo[vreg]; + if (info == nullptr) { + info = memPool->New(); + renameInfo[vreg] = info; + if (vreg > maxRegnoSeen) { + maxRegnoSeen = vreg; + } + } + if (isDef) { + info->numDefs++; + if (isInner) { + info->numInnerDefs++; + } + } else { + info->numUses++; + if (isInner) { + info->numInnerUses++; + } + } + if (info->firstBBLevelSeen) { + if (info->firstBBLevelSeen->GetInternalFlag2() > bb->GetInternalFlag2()) { + info->firstBBLevelSeen = bb; + } + } else { + info->firstBBLevelSeen = bb; + } + if (info->lastBBLevelSeen) { + if (info->lastBBLevelSeen->GetInternalFlag2() < bb->GetInternalFlag2()) { + info->lastBBLevelSeen = bb; + } + } else { + info->lastBBLevelSeen = bb; + } +} + +void VregRename::RenameGetFuncVregInfo() { + FOR_ALL_BB(bb, cgFunc) { + bool isInner = bb->GetLoop() ? bb->GetLoop()->GetInnerLoops().empty() : false; + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + const InsnDesc *md = insn->GetDesc(); + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + Operand *opnd = &insn->GetOperand(i); + if (opnd->IsList()) { + /* call parameters */ + } else if (opnd->IsMemoryAccessOperand()) { + MemOperand *memopnd = static_cast(opnd); + RegOperand *base = static_cast(memopnd->GetBaseRegister()); + if (base != nullptr && base->IsVirtualRegister()) { + regno_t vreg = base->GetRegisterNumber(); + UpdateVregInfo(vreg, bb, isInner, false); + } + RegOperand *offset = static_cast(memopnd->GetIndexRegister()); + if (offset != nullptr && offset->IsVirtualRegister()) { + regno_t vreg = offset->GetRegisterNumber(); + UpdateVregInfo(vreg, bb, isInner, false); + } + } else if (opnd->IsRegister() && static_cast(opnd)->IsVirtualRegister() && + static_cast(opnd)->GetRegisterNumber() != ccRegno) { + bool isdef = (md->opndMD[i])->IsRegDef(); + regno_t vreg = static_cast(opnd)->GetRegisterNumber(); + UpdateVregInfo(vreg, bb, isInner, isdef); + } + } + } + } +} + +void VregRename::RenameFindVregsToRename(const CGFuncLoops *loop) { + if (loop->GetInnerLoops().empty()) { + RenameFindLoopVregs(loop); + return; + } + for (auto inner : loop->GetInnerLoops()) { + RenameFindVregsToRename(inner); + } +} + + +void VregRename::VregLongLiveRename() { + if (cgFunc->GetLoops().size() == 0) { + return; + } + RenameGetFuncVregInfo(); + for (const auto *lp : cgFunc->GetLoops()) { + RenameFindVregsToRename(lp); + } +} + +bool ParamRegOpt::DominatorAll(uint32 domBB, std::set &refBBs) const { + for (auto it: refBBs) { + if (!domInfo->Dominate(*cgFunc->GetBBFromID(domBB), *cgFunc->GetBBFromID(it))) { + return false; + } + } + return true; +} + +BB* ParamRegOpt::GetCommondDom(std::set &refBBs) { + MapleVector &domOrder = domInfo->GetDtPreOrder(); + uint32 minId = static_cast(domOrder.size()); + for (auto it = domOrder.crbegin(); it != domOrder.crend(); ++it) { + uint32 curBBId = *it; + if (refBBs.find(curBBId) != refBBs.end()) { + minId = curBBId; + } + } + if (DominatorAll(minId, refBBs)) { + BB* domBB = cgFunc->GetBBFromID(minId); + while (domBB->GetLoop() != nullptr) { + domBB = domInfo->GetDom(domBB->GetId()); + } + return domBB; + } + BB *curBB = domInfo->GetDom(minId); + while (curBB != nullptr && curBB != cgFunc->GetFirstBB()) { + if (DominatorAll(curBB->GetId(), refBBs)) { + break; + } + curBB = domInfo->GetDom(curBB->GetId()); + } + if (curBB == nullptr || curBB == cgFunc->GetFirstBB()) { + return nullptr; + } + while (curBB->GetLoop() != nullptr) { + curBB = domInfo->GetDom(curBB->GetId()); + } + return curBB; +} + +void ParamRegOpt::SplitAtDomBB(RegOperand &movDest, BB &domBB, Insn &posInsn) const { + if (dumpInfo) { + LogInfo::MapleLogger() << "----cand R" << movDest.GetRegisterNumber() << + " to split at BB" << domBB.GetId() << " \n"; + } + uint32 size = (movDest.GetSize() == k64BitSize) ? k8ByteSize : k4ByteSize; + regno_t newRegno = cgFunc->NewVReg(movDest.GetRegisterType(), size); + RegOperand *renameVreg = &cgFunc->CreateVirtualRegisterOperand(newRegno); + MOperator mOp = (movDest.GetRegisterType() == kRegTyInt) ? + ((size == k8BitSize) ? MOP_xmovrr : MOP_wmovrr) : + ((size == k8BitSize) ? MOP_xvmovd : MOP_xvmovs); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, movDest, *renameVreg); + domBB.InsertInsnBegin(newInsn); + posInsn.SetOperand(kFirstOpnd, *renameVreg); +} + +void ParamRegOpt::CollectRefBBs(RegOperand &movDest, std::set &refBBs) { + regno_t cand = movDest.GetRegisterNumber(); + BB* firstBB = cgFunc->GetFirstBB(); + std::set defBBs; + std::set useBBs; + std::set crossCallBBs; + FOR_ALL_BB(bb, cgFunc) { + bool bbHasCall = false; + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + bbHasCall = bbHasCall || insn->IsCall(); + if (insn->ScanReg(cand)) { + if (insn->IsRegDefined(cand)) { + (void)defBBs.insert(bb->GetId()); + if (bbHasCall) { + (void)crossCallBBs.insert(bb->GetId()); + } + } else { + (void)useBBs.insert(bb->GetId()); + if (bbHasCall) { + (void)crossCallBBs.insert(bb->GetId()); + } + } + } + } + if (bbHasCall && (bb->GetLiveOutRegNO().find(cand) != bb->GetLiveOutRegNO().end() || + bb->GetLiveInRegNO().find(cand) != bb->GetLiveInRegNO().end())) { + (void)crossCallBBs.insert(bb->GetId()); + } + } + /* expect single def and cross call */ + if (defBBs.size() != 1 || crossCallBBs.empty()) { + return; + } + /* single defBB should be the firstBB */ + if (defBBs.find(firstBB->GetId()) == defBBs.end()) { + return; + } + /* expect no use or call in the firstBB */ + if (useBBs.find(firstBB->GetId()) != useBBs.end() || crossCallBBs.find(firstBB->GetId()) != crossCallBBs.end()) { + return; + } + useBBs.insert(crossCallBBs.cbegin(), crossCallBBs.cend()); + refBBs.insert(useBBs.cbegin(), useBBs.cend()); +} + +void ParamRegOpt::TryToSplitParamReg(RegOperand &movDest, Insn &posInsn) { + std::set useBBs; + CollectRefBBs(movDest, useBBs); + if (useBBs.empty()) { + return; + } + /* common dom */ + BB* firstBB = cgFunc->GetFirstBB(); + BB *domBB = GetCommondDom(useBBs); + BB *secondBB = nullptr; + if (firstBB->GetSuccs().size() == 1) { + secondBB = *firstBB->GetSuccs().begin(); + } + if (domBB == nullptr || domBB == firstBB || domBB == secondBB) { + return; + } + /* do split */ + SplitAtDomBB(movDest, *domBB, posInsn); +} + +void ParamRegOpt::HandleParamReg() { + uint32 formalCount = static_cast(cgFunc->GetFunction().GetFormalCount()); + if (formalCount == 0) { + return; + } + BB* firstBB = cgFunc->GetFirstBB(); + FOR_BB_INSNS(insn, firstBB) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetMachineOpcode() != MOP_xmovrr && insn->GetMachineOpcode() != MOP_wmovrr && + insn->GetMachineOpcode() != MOP_xvmovd && insn->GetMachineOpcode() != MOP_xvmovs) { + return; + } + RegOperand &movDest = static_cast(insn->GetOperand(kFirstOpnd)); + RegOperand &movSrc = static_cast(insn->GetOperand(kSecondOpnd)); + if (movSrc.IsVirtualRegister()) { + return; + } + if (movSrc.IsPhysicalRegister() && movSrc.GetRegisterNumber() == RSP) { + return; + } + TryToSplitParamReg(movDest, *insn); + } +} + +void AArch64RaOpt::Run() { + RaX0Opt x0Opt(cgFunc); + x0Opt.PropagateX0(); + + if (RAOPT_DUMP) { + LogInfo::MapleLogger() << "Handle func:" << cgFunc->GetName() << ", funcid: " << + cgFunc->GetFunction().GetPuidx() << " \n"; + } + ParamRegOpt argOpt(cgFunc, domInfo); + argOpt.SetDumpInfo(RAOPT_DUMP); + argOpt.HandleParamReg(); + + if (cgFunc->GetMirModule().GetSrcLang() == kSrcLangC && CGOptions::DoVregRename()) { + /* loop detection considers EH bb. That is not handled. So C only for now. */ + LoopFinder *lf = memPool->New(*cgFunc, *memPool); + lf->FormLoopHierarchy(); + VregRename rename(cgFunc, memPool); + Bfs localBfs(*cgFunc, *memPool); + rename.bfs = &localBfs; + rename.bfs->ComputeBlockOrder(); + rename.VregLongLiveRename(); + cgFunc->ClearLoopInfo(); + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_rce.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_rce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..27f8c1cfefb3ca2332e96b55857585361bcda155 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_rce.cpp @@ -0,0 +1,382 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_rce.h" + +namespace maplebe { +void AArch64RedundantComputeElim::Run() { + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsUnreachable()) { + continue; + } + bool opt; + g_count = 0; + do { + /* reset hashSeed and hashSet */ + g_hashSeed = 0; + candidates.clear(); + opt = DoOpt(bb); + ++g_count; + } while (opt); + } + if (CG_RCE_DUMP) { + DumpHash(); + } +} + +bool AArch64RedundantComputeElim::CheckFakeOptmization(const Insn &existInsn) const { + /* insns such as {movrr & zxt/sxt & ...} are optimized by prop */ + MOperator mop = existInsn.GetMachineOpcode(); + if (mop == MOP_wmovrr || mop == MOP_xmovrr || (mop >= MOP_xsxtb32 && mop <= MOP_xuxtw64)) { + return false; + } + /* patterns such as {movz ... movk ...} are optimized by LoadFloatPointPattern in cgprepeephole */ + if (mop == MOP_wmovzri16 || mop == MOP_xmovzri16) { + auto &dstOpnd = static_cast(existInsn.GetOperand(kInsnFirstOpnd)); + VRegVersion *dstVersion = ssaInfo->FindSSAVersion(dstOpnd.GetRegisterNumber()); + ASSERT(dstVersion, "get ssa version failed"); + for (auto useIt : dstVersion->GetAllUseInsns()) { + ASSERT(useIt.second, "get DUInsnInfo failed"); + Insn *useInsn = useIt.second->GetInsn(); + ASSERT(useInsn, "get useInsn by ssaVersion failed"); + if (useInsn->GetMachineOpcode() == MOP_wmovkri16 || useInsn->GetMachineOpcode() == MOP_xmovkri16) { + return false; + } + } + } + return true; +} + +void AArch64RedundantComputeElim::CheckCondition(const Insn &existInsn, const Insn &curInsn) { + if (!CheckFakeOptmization(existInsn)) { + doOpt = false; + return; + } + RegOperand *existDefOpnd = nullptr; + RegOperand *curDefOpnd = nullptr; + /* + * the case as following is 'fake' redundancy opportunity: + * [BB8] -------------------- + * phi: R138, (R136<7>, R146<12>) | + * phi: R139, (R131<7>, R147<12>) | + * / \ | + * / \ | + * [BB9] ----------- [BB12] | + * sub R146, R139, #1 | + * sub R147, R139, #1 | + * cbnz R147, label ---- + */ + uint32 opndNum = existInsn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (existInsn.OpndIsDef(i) && existInsn.OpndIsUse(i) && curInsn.OpndIsDef(i) && curInsn.OpndIsUse(i)) { + doOpt = true; + return; + } + if (!existInsn.GetOperand(i).IsRegister()) { + CHECK_FATAL(!curInsn.GetOperand(i).IsRegister(), "check the two insns"); + continue; + } + if (existInsn.OpndIsDef(i) && curInsn.OpndIsDef(i)) { + existDefOpnd = &static_cast(existInsn.GetOperand(i)); + curDefOpnd = &static_cast(curInsn.GetOperand(i)); + /* def points without use are not processed */ + VRegVersion *existDefVer = ssaInfo->FindSSAVersion(existDefOpnd->GetRegisterNumber()); + VRegVersion *curDefVer = ssaInfo->FindSSAVersion(curDefOpnd->GetRegisterNumber()); + CHECK_FATAL(existDefVer && curDefVer, "get ssa version failed"); + if (existDefVer->GetAllUseInsns().empty() || curDefVer->GetAllUseInsns().empty()) { + doOpt = false; + return; + } + } + if (existInsn.OpndIsUse(i) && curInsn.OpndIsUse(i)) { + auto &existUseOpnd = static_cast(existInsn.GetOperand(i)); + Insn *predInsn = ssaInfo->GetDefInsn(existUseOpnd); + if (predInsn != nullptr && predInsn->IsPhi()) { + auto &phiOpnd = static_cast(predInsn->GetOperand(kInsnSecondOpnd)); + for (auto &phiListIt : phiOpnd.GetOperands()) { + if (phiListIt.second == nullptr) { + continue; + } + regno_t phiRegNO = phiListIt.second->GetRegisterNumber(); + CHECK_FATAL(existDefOpnd && curDefOpnd, "check the def of this insn"); + if (phiRegNO == existDefOpnd->GetRegisterNumber() || phiRegNO == curDefOpnd->GetRegisterNumber()) { + doOpt = false; + return; + } + } + } + } + } + /* + * 1) avoid spills for propagating across calls: + * mov R100, #1 + * ... + * bl func + * mov R103, #1 + * str R103, [mem] + * + * 2) avoid cc redefine between two same insns that define cc + */ + CHECK_FATAL(existDefOpnd != nullptr && curDefOpnd != nullptr, "invalied defOpnd"); + bool isDefCC = (existDefOpnd->IsOfCC() || curDefOpnd->IsOfCC()); + for (const Insn *cursor = &existInsn; cursor != nullptr && cursor != &curInsn; cursor = cursor->GetNext()) { + if (!cursor->IsMachineInstruction()) { + continue; + } + if (cursor->IsCall()) { + doOpt = false; + return; + } + if (isDefCC && cursor->GetOperand(kInsnFirstOpnd).IsRegister() && + static_cast(cursor->GetOperand(kInsnFirstOpnd)).IsOfCC()) { + doOpt = false; + return; + } + } +} + +std::size_t AArch64RedundantComputeElim::ComputeDefUseHash(const Insn &insn, const RegOperand *replaceOpnd) const { + std::size_t hashSeed = 0; + std::string hashS = std::to_string(insn.GetMachineOpcode()); + uint32 opndNum = insn.GetOperandSize(); + hashS += std::to_string(opndNum); + for (uint i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (insn.OpndIsDef(i) && insn.OpndIsUse(i)) { + CHECK_FATAL(opnd.IsRegister(), "must be"); + hashS += (replaceOpnd == nullptr ? + static_cast(opnd).GetHashContent() : replaceOpnd->GetHashContent()); + } else if (insn.OpndIsUse(i)) { + Operand::OperandType opndKind = opnd.GetKind(); + if (opndKind == Operand::kOpdImmediate) { + hashS += static_cast(opnd).GetHashContent(); + } else if (opndKind == Operand::kOpdExtend) { + hashS += static_cast(opnd).GetHashContent(); + } else if (opndKind == Operand::kOpdShift) { + hashS += static_cast(opnd).GetHashContent(); + } else { + hashS += std::to_string(++hashSeed); + } + } + } + return std::hash{}(hashS); +} + +DUInsnInfo *AArch64RedundantComputeElim::GetDefUseInsnInfo(VRegVersion &defVersion) { + DUInsnInfo *defUseInfo = nullptr; + for (auto duInfoIt : defVersion.GetAllUseInsns()) { + ASSERT(duInfoIt.second, "get DUInsnInfo failed"); + Insn *useInsn = duInfoIt.second->GetInsn(); + ASSERT(useInsn, "get useInsn by ssaVersion failed"); + for (auto &opndIt : as_const(duInfoIt.second->GetOperands())) { + if (opndIt.first == useInsn->GetBothDefUseOpnd()) { + if (defUseInfo != nullptr) { + doOpt = false; + return nullptr; + } else { + defUseInfo = duInfoIt.second; + break; + } + } + } + } + return defUseInfo; +} + +/* + * useInsns of existVersion that have both def & use must check complete chain + * For example: + * movn R143, #2 + * movk R143(use){implicit-def: R144}, #32767, LSL #16 + * movk R144(use){implicit-def: R145}, #65534, LSL #32 + * ... + * movn R152, #2 ===> can not be optimized + * movk R152(use){implicit-def: R153), #32767, LSL #16 + * movk R153(use){implicit-def: R154), #8198, LSL #48 ----> different + */ +void AArch64RedundantComputeElim::CheckBothDefAndUseChain(RegOperand *curDstOpnd, RegOperand *existDstOpnd) { + if (curDstOpnd == nullptr || existDstOpnd == nullptr) { + doOpt = false; + return; + } + VRegVersion *existDefVer = ssaInfo->FindSSAVersion(existDstOpnd->GetRegisterNumber()); + VRegVersion *curDefVer = ssaInfo->FindSSAVersion(curDstOpnd->GetRegisterNumber()); + CHECK_FATAL(existDefVer && curDefVer, "get ssa version failed"); + DUInsnInfo *existInfo = GetDefUseInsnInfo(*existDefVer); + DUInsnInfo *curInfo = GetDefUseInsnInfo(*curDefVer); + if (existInfo == nullptr) { + return; + } + if (curInfo == nullptr) { + doOpt = false; + return; + } + if (existInfo->GetOperands().size() > 1 || curInfo->GetOperands().size() > 1 || + existInfo->GetOperands().begin()->first != curInfo->GetOperands().begin()->first) { + doOpt = false; + return; + } + uint32 opndIdx = existInfo->GetOperands().cbegin()->first; + Insn *existUseInsn = existInfo->GetInsn(); + Insn *curUseInsn = curInfo->GetInsn(); + CHECK_FATAL(existUseInsn && curUseInsn, "get useInsn failed"); + if (existUseInsn->OpndIsDef(opndIdx) && curUseInsn->OpndIsDef(opndIdx)) { + std::size_t existHash = ComputeDefUseHash(*existUseInsn, nullptr); + std::size_t curHash = ComputeDefUseHash(*curUseInsn, existDstOpnd); + if (existHash != curHash) { + doOpt = false; + return; + } else { + curDstOpnd = curUseInsn->GetSSAImpDefOpnd(); + existDstOpnd = existUseInsn->GetSSAImpDefOpnd(); + CheckBothDefAndUseChain(curDstOpnd, existDstOpnd); + } + } else if (existUseInsn->OpndIsDef(opndIdx)) { + doOpt = false; + return; + } +} + +bool AArch64RedundantComputeElim::IsBothDefUseCase(VRegVersion &version) const { + for (auto infoIt : version.GetAllUseInsns()) { + ASSERT(infoIt.second != nullptr, "get duInsnInfo failed"); + Insn *useInsn = infoIt.second->GetInsn(); + ASSERT(useInsn != nullptr, "get useInsn failed"); + for (auto &opndIt : as_const(infoIt.second->GetOperands())) { + if (useInsn->GetBothDefUseOpnd() == opndIt.first) { + return true; + } + } + } + return false; +} + +bool AArch64RedundantComputeElim::DoOpt(BB *bb) { + bool optimize = false; + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + doOpt = true; + if (cgFunc->GetName() == "expand_shift" && insn->GetId() == 144) { + std::cout << "add logical shift!" << std::endl; + } + auto iter = candidates.find(insn); + if (iter != candidates.end()) { + /* during iteration, avoid repeated processing of insn, which may access wrong duInsnInfo of ssaVersion */ + if (insn->HasProcessedRHS()) { + continue; + } + Operand *curDst = &insn->GetOperand(kInsnFirstOpnd); + ASSERT(curDst->IsRegister(), "must be"); + auto *curDstOpnd = static_cast(curDst); + regno_t curRegNO = curDstOpnd->GetRegisterNumber(); + if (!insn->IsRegDefined(curRegNO) || static_cast(curDstOpnd)->IsPhysicalRegister()) { + continue; + } + Insn *existInsn = *iter; + Operand *existDst = &existInsn->GetOperand(kInsnFirstOpnd); + ASSERT(existDst->IsRegister(), "must be"); + auto *existDstOpnd = static_cast(existDst); + regno_t existRegNO = (existDstOpnd)->GetRegisterNumber(); + if (!existInsn->IsRegDefined(existRegNO) || static_cast(existDstOpnd)->IsPhysicalRegister()) { + continue; + } + /* two insns have both def & use opnd */ + if (insn->OpndIsUse(kInsnFirstOpnd) && existInsn->OpndIsUse(kInsnFirstOpnd)) { + curDstOpnd = insn->GetSSAImpDefOpnd(); + existDstOpnd = existInsn->GetSSAImpDefOpnd(); + CHECK_FATAL(curDstOpnd && existDstOpnd, "get ssa implicit def opnd failed"); + } + VRegVersion *existDefVer = ssaInfo->FindSSAVersion(existDstOpnd->GetRegisterNumber()); + VRegVersion *curDefVer = ssaInfo->FindSSAVersion(curDstOpnd->GetRegisterNumber()); + CHECK_FATAL(existDefVer && curDefVer, "get ssa version failed"); + isBothDefUse = IsBothDefUseCase(*existDefVer); + if (isBothDefUse) { + CheckBothDefAndUseChain(curDstOpnd, existDstOpnd); + } + if (!doOpt) { + continue; + } + CheckCondition(*existInsn, *insn); + if (!doOpt) { + continue; + } + if (CG_RCE_DUMP) { + Dump(existInsn, insn); + } + Optimize(*bb, *insn, *curDstOpnd, *existDstOpnd); + optimize = true; + } else { + (void)candidates.insert(insn); + } + } + return optimize; +} + +MOperator AArch64RedundantComputeElim::GetNewMop(const RegOperand &curDstOpnd, const RegOperand &existDstOpnd) const { + MOperator newMop = MOP_undef; + bool is64Bit = (curDstOpnd.GetSize() == k64BitSize); + if (curDstOpnd.IsOfFloatOrSIMDClass() && existDstOpnd.IsOfIntClass()) { + newMop = (is64Bit ? MOP_xvmovdr : MOP_xvmovsr); + } else if (curDstOpnd.IsOfIntClass() && existDstOpnd.IsOfFloatOrSIMDClass()) { + newMop = (is64Bit ? MOP_xvmovrd : MOP_xvmovrs); + } else if (curDstOpnd.IsOfFloatOrSIMDClass() && existDstOpnd.IsOfFloatOrSIMDClass()) { + newMop = (is64Bit ? MOP_xvmovd : MOP_xvmovs); + } else { + newMop = (is64Bit ? MOP_xmovrr : MOP_wmovrr); + } + return newMop; +} + +void AArch64RedundantComputeElim::Optimize(BB &curBB, Insn &curInsn, + RegOperand &curDstOpnd, RegOperand &existDstOpnd) const { + /* + * 1) useInsns of existVersion that have both def & use need replace ssaVersion first. + * 2) other cases can be replaced by mov. + */ + if (isBothDefUse) { + VRegVersion *existDefVersion = ssaInfo->FindSSAVersion(existDstOpnd.GetRegisterNumber()); + VRegVersion *curDefVersion = ssaInfo->FindSSAVersion(curDstOpnd.GetRegisterNumber()); + CHECK_FATAL(existDefVersion && curDefVersion, "get ssa version failed"); + ssaInfo->ReplaceAllUse(curDefVersion, existDefVersion); + curInsn.SetProcessRHS(); + } else { + MOperator newMop = GetNewMop(curDstOpnd, existDstOpnd); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, curDstOpnd, existDstOpnd); + curBB.ReplaceInsn(curInsn, newInsn); + ssaInfo->ReplaceInsn(curInsn, newInsn); + newInsn.SetProcessRHS(); + } +} + +void AArch64RedundantComputeElim::DumpHash() const { + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << ">>>>>> Insn Hash For " << cgFunc->GetName() << " <<<<<<\n"; + FOR_ALL_BB(bb, cgFunc) { + g_hashSeed = 0; + if (bb->IsUnreachable()) { + continue; + } + cgFunc->DumpBBInfo(bb); + FOR_BB_INSNS(insn, bb) { + LogInfo::MapleLogger() << "[primal form] "; + insn->Dump(); + if (insn->IsMachineInstruction()) { + LogInfo::MapleLogger() << "{ RHSHashCode: " << InsnRHSHash{}(insn) << " }\n"; + } + } + } +} +} /* namespace maplebe */ \ No newline at end of file diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp new file mode 100644 index 0000000000000000000000000000000000000000..797eca00fdb4188c089276a6cf12b5c2bc8b8f92 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reaching.cpp @@ -0,0 +1,1259 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_reaching.h" +#include "aarch64_cg.h" +namespace maplebe { +/* MCC_ClearLocalStackRef clear 1 stack slot, and MCC_DecRefResetPair clear 2 stack slot, + * the stack positins cleared are recorded in callInsn->clearStackOffset + */ +constexpr short kFirstClearMemIndex = 0; +constexpr short kSecondClearMemIndex = 1; + +/* insert pseudo insn for parameters definition */ +void AArch64ReachingDefinition::InitStartGen() { + BB *bb = cgFunc->GetFirstBB(); + + /* Parameters should be define first. */ + AArch64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo pLoc; + for (uint32 i = 0; i < cgFunc->GetFunction().GetFormalCount(); ++i) { + MIRType *type = cgFunc->GetFunction().GetNthParamType(i); + (void)parmLocator.LocateNextParm(*type, pLoc, i == 0, &cgFunc->GetFunction()); + if (pLoc.reg0 == 0) { + /* If is a large frame, parameter addressing mode is based vreg:Vra. */ + continue; + } + + uint64 symSize = cgFunc->GetBecommon().GetTypeSize(type->GetTypeIndex()); + if ((cgFunc->GetMirModule().GetSrcLang() == kSrcLangC) && (symSize > k8ByteSize)) { + /* For C structure passing in one or two registers. */ + symSize = k8ByteSize; + } + RegType regType = (pLoc.reg0 < V0) ? kRegTyInt : kRegTyFloat; + uint32 srcBitSize = ((symSize < k4ByteSize) ? k4ByteSize : symSize) * kBitsPerByte; + + MOperator mOp; + if (regType == kRegTyInt) { + if (srcBitSize <= k32BitSize) { + mOp = MOP_pseudo_param_def_w; + } else { + mOp = MOP_pseudo_param_def_x; + } + } else { + if (srcBitSize <= k32BitSize) { + mOp = MOP_pseudo_param_def_s; + } else { + mOp = MOP_pseudo_param_def_d; + } + } + + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + RegOperand ®Opnd = aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(pLoc.reg0), srcBitSize, regType); + Insn &pseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd); + bb->InsertInsnBegin(pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); + if (pLoc.reg1 > 0) { + RegOperand ®Opnd1 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(pLoc.reg1), srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd1); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + if (pLoc.reg2 > 0) { + RegOperand ®Opnd2 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(pLoc.reg2), srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd2); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + if (pLoc.reg3 > 0) { + RegOperand ®Opnd3 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand( + static_cast(pLoc.reg3), srcBitSize, regType); + Insn &pseudoInsn1 = cgFunc->GetInsnBuilder()->BuildInsn(mOp, regOpnd3); + bb->InsertInsnBegin(pseudoInsn1); + pseudoInsns.emplace_back(&pseudoInsn1); + } + + { + /* + * define memory address since store param may be transfered to stp and which with the short offset range. + * we can not get the correct definition before RA. + * example: + * add x8, sp, #712 + * stp x0, x1, [x8] // store param: _this Reg40_R313644 + * stp x2, x3, [x8,#16] // store param: Reg41_R333743 Reg42_R333622 + * stp x4, x5, [x8,#32] // store param: Reg43_R401297 Reg44_R313834 + * str x7, [x8,#48] // store param: Reg46_R401297 + */ + MIRSymbol *sym = cgFunc->GetFunction().GetFormal(i); + if (!sym->IsPreg()) { + MIRSymbol *firstSym = cgFunc->GetFunction().GetFormal(i); + const AArch64SymbolAlloc *firstSymLoc = + static_cast(cgFunc->GetMemlayout()->GetSymAllocInfo(firstSym->GetStIndex())); + int32 stOffset = cgFunc->GetBaseOffset(*firstSymLoc); + MIRType *firstType = cgFunc->GetFunction().GetNthParamType(i); + uint32 firstSymSize = cgFunc->GetBecommon().GetTypeSize(firstType->GetTypeIndex()); + uint32 firstStackSize = firstSymSize < k4ByteSize ? k4ByteSize : firstSymSize; + + MemOperand *memOpnd = aarchCGFunc->CreateStackMemOpnd(RFP, stOffset, firstStackSize * kBitsPerByte); + MOperator mopTemp = firstStackSize <= k4ByteSize ? MOP_pseudo_param_store_w : MOP_pseudo_param_store_x; + Insn &pseudoInsnTemp = cgFunc->GetInsnBuilder()->BuildInsn(mopTemp, *memOpnd); + bb->InsertInsnBegin(pseudoInsnTemp); + pseudoInsns.emplace_back(&pseudoInsnTemp); + } + } + } + + /* if function has "bl MCC_InitializeLocalStackRef", should define corresponding memory. */ + AArch64CGFunc *a64CGFunc = static_cast(cgFunc); + + for (uint32 i = 0; i < a64CGFunc->GetRefCount(); ++i) { + MemOperand *memOpnd = a64CGFunc->CreateStackMemOpnd( + RFP, a64CGFunc->GetBeginOffset() + static_cast(i * k8BitSize), k64BitSize); + Insn &pseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_ref_init_x, *memOpnd); + + bb->InsertInsnBegin(pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); + } +} + +/* insert pseudoInsns for ehBB, R0 and R1 are defined in pseudoInsns */ +void AArch64ReachingDefinition::InitEhDefine(BB &bb) { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + /* Insert MOP_pseudo_eh_def_x R1. */ + RegOperand ®Opnd1 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R1, k64BitSize, kRegTyInt); + Insn &pseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regOpnd1); + bb.InsertInsnBegin(pseudoInsn); + pseudoInsns.emplace_back(&pseudoInsn); + + /* insert MOP_pseudo_eh_def_x R0. */ + RegOperand ®Opnd2 = aarchCGFunc->GetOrCreatePhysicalRegisterOperand(R0, k64BitSize, kRegTyInt); + Insn &newPseudoInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_eh_def_x, regOpnd2); + bb.InsertInsnBegin(newPseudoInsn); + pseudoInsns.emplace_back(&newPseudoInsn); +} + +/* insert pseudoInsns for return value R0/V0 */ +void AArch64ReachingDefinition::AddRetPseudoInsn(BB &bb) { + AArch64reg regNO = static_cast(cgFunc)->GetReturnRegisterNumber(); + if (regNO == kInvalidRegNO) { + return; + } + + if (regNO == R0) { + RegOperand ®Opnd = + static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand(regNO, k64BitSize, kRegTyInt); + Insn &retInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_int, regOpnd); + bb.AppendInsn(retInsn); + pseudoInsns.emplace_back(&retInsn); + } else if (regNO == V0) { + RegOperand ®Opnd = + static_cast(cgFunc)->GetOrCreatePhysicalRegisterOperand(regNO, k64BitSize, kRegTyFloat); + Insn &retInsn = cgFunc->GetInsnBuilder()->BuildInsn(MOP_pseudo_ret_float, regOpnd); + bb.AppendInsn(retInsn); + pseudoInsns.emplace_back(&retInsn); + } +} + +void AArch64ReachingDefinition::AddRetPseudoInsns() { + uint32 exitBBSize = cgFunc->GetExitBBsVec().size(); + if (exitBBSize == 0) { + if (cgFunc->GetCleanupBB() != nullptr && cgFunc->GetCleanupBB()->GetPrev() != nullptr) { + AddRetPseudoInsn(*cgFunc->GetCleanupBB()->GetPrev()); + } else { + AddRetPseudoInsn(*cgFunc->GetLastBB()->GetPrev()); + } + } else { + for (uint32 i = 0; i < exitBBSize; ++i) { + AddRetPseudoInsn(*cgFunc->GetExitBB(i)); + } + } +} + +void AArch64ReachingDefinition::GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) { + for (auto ® : static_cast(insn.GetOperand(index)).GetOperands()) { + regGen[bb.GetId()]->SetBit(static_cast(reg)->GetRegisterNumber()); + } +} + +void AArch64ReachingDefinition::GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) { + for (auto ® : static_cast(insn.GetOperand(index)).GetOperands()) { + regUse[bb.GetId()]->SetBit(static_cast(reg)->GetRegisterNumber()); + } +} + +/* all caller saved register are modified by call insn */ +void AArch64ReachingDefinition::GenAllCallerSavedRegs(BB &bb, Insn &insn) { + if (CGOptions::DoIPARA()) { + std::set callerSaveRegs; + cgFunc->GetRealCallerSaveRegs(insn, callerSaveRegs); + for (auto i : callerSaveRegs) { + regGen[bb.GetId()]->SetBit(i); + } + } else { + for (uint32 i = R0; i <= V31; ++i) { + if (AArch64Abi::IsCallerSaveReg(static_cast(i))) { + regGen[bb.GetId()]->SetBit(i); + } + } + } +} + +/* reg killed killed by call insn */ +bool AArch64ReachingDefinition::IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const { + if (CGOptions::DoIPARA()) { + std::set callerSaveRegs; + cgFunc->GetRealCallerSaveRegs(insn, callerSaveRegs); + return callerSaveRegs.find(regNO) != callerSaveRegs.end(); + } else { + return AArch64Abi::IsCallerSaveReg(static_cast(regNO)); + } +} + +bool AArch64ReachingDefinition::KilledByCallBetweenInsnInSameBB(const Insn &startInsn, + const Insn &endInsn, regno_t regNO) const { + ASSERT(startInsn.GetBB() == endInsn.GetBB(), "two insns must be in same bb"); + if (CGOptions::DoIPARA()) { + for (const Insn *insn = &startInsn; insn != endInsn.GetNext(); insn = insn->GetNext()) { + if (insn->IsMachineInstruction() && insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + return true; + } + } + return false; + } else { + return HasCallBetweenInsnInSameBB(startInsn, endInsn); + } +} +/* + * find definition for register between startInsn and endInsn. + * startInsn and endInsn is not in same BB + * make sure that in path between startBB and endBB there is no redefine. + */ +std::vector AArch64ReachingDefinition::FindRegDefBetweenInsnGlobal( + uint32 regNO, Insn *startInsn, Insn *endInsn) const { + ASSERT(startInsn->GetBB() != endInsn->GetBB(), "call FindRegDefBetweenInsn please"); + std::vector defInsnVec; + if (startInsn == nullptr || endInsn == nullptr) { + return defInsnVec; + } + /* check startBB */ + BB *startBB = startInsn->GetBB(); + std::vector startBBdefInsnVec = FindRegDefBetweenInsn(regNO, startInsn->GetNext(), startBB->GetLastInsn()); + if (startBBdefInsnVec.size() == 1) { + defInsnVec.emplace_back(*startBBdefInsnVec.begin()); + } + if (startBBdefInsnVec.size() > 1 || + (startBBdefInsnVec.empty() && regOut[startBB->GetId()]->TestBit(regNO))) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + if (IsCallerSavedReg(regNO) && startInsn->GetNext() != nullptr && + KilledByCallBetweenInsnInSameBB(*startInsn->GetNext(), *startBB->GetLastInsn(), regNO)) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + /* check endBB */ + BB *endBB = endInsn->GetBB(); + std::vector endBBdefInsnVec = FindRegDefBetweenInsn(regNO, endBB->GetFirstInsn(), endInsn->GetPrev()); + if (endBBdefInsnVec.size() == 1) { + defInsnVec.emplace_back(*endBBdefInsnVec.begin()); + } + if (endBBdefInsnVec.size() > 1 || (endBBdefInsnVec.empty() && regIn[endBB->GetId()]->TestBit(regNO))) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + if (IsCallerSavedReg(regNO) && endInsn->GetPrev() != nullptr && + KilledByCallBetweenInsnInSameBB(*endBB->GetFirstInsn(), *endInsn->GetPrev(), regNO)) { + defInsnVec.emplace_back(startInsn); + defInsnVec.emplace_back(endInsn); + return defInsnVec; + } + InsnSet defInsnSet; + std::vector visitedBB(kMaxBBNum, kNotVisited); + visitedBB[endBB->GetId()] = kNormalVisited; + visitedBB[startBB->GetId()] = kNormalVisited; + std::list pathStatus; + if (DFSFindRegInfoBetweenBB(*startBB, *endBB, regNO, visitedBB, pathStatus, kDumpRegIn)) { + defInsnVec.emplace_back(endInsn); + } + return defInsnVec; +} + +static bool IsRegInAsmList(Insn *insn, uint32 index, uint32 regNO, InsnSet &insnSet) { + for (auto ® : static_cast(insn->GetOperand(index)).GetOperands()) { + if (static_cast(reg)->GetRegisterNumber() == regNO) { + insnSet.insert(insn); + return true; + } + } + return false; +} + +void AArch64ReachingDefinition::FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const { + if (!regGen[bb.GetId()]->TestBit(regNO)) { + return; + } + + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->GetMachineOpcode() == MOP_asm) { + if (IsRegInAsmList(insn, kAsmOutputListOpnd, regNO, defInsnSet)) { + continue; + } + IsRegInAsmList(insn, kAsmClobberListOpnd, regNO, defInsnSet); + continue; + } + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + (void)defInsnSet.insert(insn); + continue; + } + if (insn->IsRegDefined(regNO)) { + (void)defInsnSet.insert(insn); + } + } +} + +/* check whether call insn changed the stack status or not. */ +bool AArch64ReachingDefinition::CallInsnClearDesignateStackRef(const Insn &callInsn, int64 offset) const { + return offset == callInsn.GetClearStackOffset(kFirstClearMemIndex) || + offset == callInsn.GetClearStackOffset(kSecondClearMemIndex); +} + +/* + * find definition for stack memory operand between startInsn and endInsn. + * startInsn and endInsn must be in same BB and startInsn and endInsn are included + * special case: + * MCC_ClearLocalStackRef clear designate stack position, the designate stack position is thought defined + * for example: + * add x0, x29, #24 + * bl MCC_ClearLocalStackRef + */ +std::vector AArch64ReachingDefinition::FindMemDefBetweenInsn( + uint32 offset, const Insn *startInsn, Insn *endInsn) const { + std::vector defInsnVec; + if (startInsn == nullptr || endInsn == nullptr) { + return defInsnVec; + } + + ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + ASSERT(endInsn->GetId() >= startInsn->GetId(), "two insns must be in a same BB"); + if (!memGen[startInsn->GetBB()->GetId()]->TestBit(offset / kMemZoomSize)) { + return defInsnVec; + } + + for (Insn *insn = endInsn; insn != nullptr && insn != startInsn->GetPrev(); insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->GetMachineOpcode() == MOP_asm) { + if (insn->IsAsmModMem()) { + defInsnVec.emplace_back(insn); + return defInsnVec; + } + continue; + } + + if (insn->IsCall()) { + if (CallInsnClearDesignateStackRef(*insn, offset)) { + defInsnVec.emplace_back(insn); + return defInsnVec; + } + continue; + } + + if (!(insn->IsStore() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode()))) { + continue; + } + + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + + if (base == nullptr || !IsFrameReg(*base) || index != nullptr) { + break; + } + + if (!insn->IsSpillInsn() && cgFunc->IsAfterRegAlloc()) { + break; + } + + ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffset = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if (memOffset < 0) { + memOffset = stackSize + memOffset; + } + if ((offset == memOffset) || + (insn->IsStorePair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode()))) { + defInsnVec.emplace_back(insn); + return defInsnVec; + } + } + } + } + return defInsnVec; +} + +void AArch64ReachingDefinition::FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const { + if (!memGen[bb.GetId()]->TestBit(offset / kMemZoomSize)) { + return; + } + + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->IsCall()) { + if (insn->GetMachineOpcode() == MOP_asm) { + if (insn->IsAsmModMem()) { + (void)defInsnSet.insert(insn); + } + continue; + } + if (CallInsnClearDesignateStackRef(*insn, offset)) { + (void)defInsnSet.insert(insn); + } + continue; + } + + if (!(insn->IsStore() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode()))) { + continue; + } + + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + + if (base == nullptr || !IsFrameReg(*base) || index != nullptr) { + break; + } + + ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffset = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if (memOffset < 0) { + memOffset = stackSize + memOffset; + } + if (offset == memOffset) { + (void)defInsnSet.insert(insn); + break; + } + if (insn->IsStorePair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode())) { + (void)defInsnSet.insert(insn); + break; + } + } + } + } +} + +/* + * find defininition for register Iteratively. + * input: + * startBB: find definnition starting from startBB + * regNO: the No of register to be find + * visitedBB: record these visited BB + * defInsnSet: insn defining register is saved in this set + */ +void AArch64ReachingDefinition::DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, + std::vector &visitedBB, InsnSet &defInsnSet) const { + std::vector defInsnVec; + for (auto predBB : startBB.GetPreds()) { + if (visitedBB[predBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[predBB->GetId()] = kNormalVisited; + if (regGen[predBB->GetId()]->TestBit(regNO) || (regNO == kRFLAG && predBB->HasCall())) { + defInsnVec.clear(); + defInsnVec = FindRegDefBetweenInsn(regNO, predBB->GetFirstInsn(), predBB->GetLastInsn()); + defInsnSet.insert(defInsnVec.cbegin(), defInsnVec.cend()); + } else if (regIn[predBB->GetId()]->TestBit(regNO)) { + DFSFindDefForRegOpnd(*predBB, regNO, visitedBB, defInsnSet); + } + } + + for (auto predEhBB : startBB.GetEhPreds()) { + if (visitedBB[predEhBB->GetId()] == kEHVisited) { + continue; + } + visitedBB[predEhBB->GetId()] = kEHVisited; + if (regGen[predEhBB->GetId()]->TestBit(regNO) || (regNO == kRFLAG && predEhBB->HasCall())) { + FindRegDefInBB(regNO, *predEhBB, defInsnSet); + } + + if (regIn[predEhBB->GetId()]->TestBit(regNO)) { + DFSFindDefForRegOpnd(*predEhBB, regNO, visitedBB, defInsnSet); + } + } +} + +/* + * find defininition for stack memory iteratively. + * input: + * startBB: find definnition starting from startBB + * offset: the offset of memory to be find + * visitedBB: record these visited BB + * defInsnSet: insn defining register is saved in this set + */ +void AArch64ReachingDefinition::DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, + std::vector &visitedBB, InsnSet &defInsnSet) const { + std::vector defInsnVec; + for (auto predBB : startBB.GetPreds()) { + if (visitedBB[predBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[predBB->GetId()] = kNormalVisited; + if (memGen[predBB->GetId()]->TestBit(offset / kMemZoomSize)) { + defInsnVec.clear(); + defInsnVec = FindMemDefBetweenInsn(offset, predBB->GetFirstInsn(), predBB->GetLastInsn()); + ASSERT(!defInsnVec.empty(), "opnd must be defined in this bb"); + defInsnSet.insert(defInsnVec.cbegin(), defInsnVec.cend()); + } else if (memIn[predBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindDefForMemOpnd(*predBB, offset, visitedBB, defInsnSet); + } + } + + for (auto predEhBB : startBB.GetEhPreds()) { + if (visitedBB[predEhBB->GetId()] == kEHVisited) { + continue; + } + visitedBB[predEhBB->GetId()] = kEHVisited; + if (memGen[predEhBB->GetId()]->TestBit(offset / kMemZoomSize)) { + FindMemDefInBB(offset, *predEhBB, defInsnSet); + } + + if (memIn[predEhBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindDefForMemOpnd(*predEhBB, offset, visitedBB, defInsnSet); + } + } +} + +/* + * find defininition for register. + * input: + * insn: the insn in which register is used + * indexOrRegNO: the index of register in insn or the No of register to be find + * isRegNO: if indexOrRegNO is index, this argument is false, else is true + * return: + * the set of definition insns for register + */ +InsnSet AArch64ReachingDefinition::FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const { + uint32 regNO = indexOrRegNO; + if (!isRegNO) { + Operand &opnd = insn.GetOperand(indexOrRegNO); + auto ®Opnd = static_cast(opnd); + regNO = regOpnd.GetRegisterNumber(); + } + + std::vector defInsnVec; + if (regGen[insn.GetBB()->GetId()]->TestBit(regNO)) { + defInsnVec = FindRegDefBetweenInsn(regNO, insn.GetBB()->GetFirstInsn(), insn.GetPrev()); + } + InsnSet defInsnSet; + if (!defInsnVec.empty()) { + defInsnSet.insert(defInsnVec.cbegin(), defInsnVec.cend()); + return defInsnSet; + } + std::vector visitedBB(kMaxBBNum, kNotVisited); + if (insn.GetBB()->IsCleanup()) { + DFSFindDefForRegOpnd(*insn.GetBB(), regNO, visitedBB, defInsnSet); + if (defInsnSet.empty()) { + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + if (regGen[bb->GetId()]->TestBit(regNO)) { + FindRegDefInBB(regNO, *bb, defInsnSet); + } + } + } + } else { + DFSFindDefForRegOpnd(*insn.GetBB(), regNO, visitedBB, defInsnSet); + } + return defInsnSet; +} + +bool AArch64ReachingDefinition::FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, + BB* movBB) const { + if (startInsn == nullptr || endInsn == nullptr) { + return false; + } + if (startInsn->GetBB() == endInsn->GetBB()) { + if (startInsn->GetNextMachineInsn() == endInsn) { + return false; + } else { + return FindRegUsingBetweenInsn(regNO, startInsn->GetNextMachineInsn(), endInsn->GetPreviousMachineInsn()); + } + } else { + /* check Start BB */ + BB* startBB = startInsn->GetBB(); + if (FindRegUsingBetweenInsn(regNO, startInsn->GetNextMachineInsn(), startBB->GetLastInsn())) { + return true; + } + /* check End BB */ + BB *endBB = endInsn->GetBB(); + if (FindRegUsingBetweenInsn(regNO, endBB->GetFirstInsn(), endInsn->GetPreviousMachineInsn())) { + return true; + } + /* Global : startBB cannot dominate BB which it doesn't dominate before */ + if (startBB == movBB) { + return false; /* it will not change dominate */ + } + std::vector visitedBB(kMaxBBNum, kNotVisited); + visitedBB[movBB->GetId()] = kNormalVisited; + visitedBB[startBB->GetId()] = kNormalVisited; + if (DFSFindRegDomianBetweenBB(*startBB, regNO, visitedBB)) { + return true; + } + } + return false; +} + +bool AArch64ReachingDefinition::HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn) const { + CHECK_FATAL((startInsn.GetBB() != endInsn.GetBB()), "Is same BB!"); + /* check Start BB */ + BB* startBB = startInsn.GetBB(); + auto startInsnSet = FindRegDefBetweenInsn(regNO, startInsn.GetNext(), startBB->GetLastInsn()); + if (!startInsnSet.empty()) { + return true; + } + /* check End BB */ + BB *endBB = endInsn.GetBB(); + auto endInsnSet = FindRegDefBetweenInsn(regNO, endBB->GetFirstInsn(), endInsn.GetPrev()); + if (!endInsnSet.empty()) { + return true; + } + if (!startBB->GetSuccs().empty()) { + for (auto *succ : startBB->GetSuccs()) { + if (succ == endBB) { + return (!startInsnSet.empty() && !endInsnSet.empty()); + } + } + } + /* check bb Between start and end */ + std::vector visitedBB(kMaxBBNum, kNotVisited); + visitedBB[startBB->GetId()] = kNormalVisited; + visitedBB[endBB->GetId()] = kNormalVisited; + return DFSFindRegDefBetweenBB(*startBB, *endBB, regNO, visitedBB); +} + +bool AArch64ReachingDefinition::DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB) const { + if (&startBB == &endBB) { + return false; + } + for (auto succBB : startBB.GetSuccs()) { + if (visitedBB[succBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[succBB->GetId()] = kNormalVisited; + if (regGen[succBB->GetId()]->TestBit(regNO)) { + return true; + } + if (DFSFindRegDefBetweenBB(*succBB, endBB, regNO, visitedBB)) { + return true; + } + } + return false; +} + +bool AArch64ReachingDefinition::DFSFindRegDomianBetweenBB(const BB startBB, uint32 regNO, + std::vector &visitedBB) const { + for (auto succBB : startBB.GetSuccs()) { + if (visitedBB[succBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[succBB->GetId()] = kNormalVisited; + if (regIn[succBB->GetId()]->TestBit(regNO)) { + return true; + } else if (regGen[succBB->GetId()]->TestBit(regNO)) { + continue; + } + if (DFSFindRegDomianBetweenBB(*succBB, regNO, visitedBB)) { + return true; + } + } + CHECK_FATAL(startBB.GetEhSuccs().empty(), "C Module have no eh"); + return false; +} + +bool AArch64ReachingDefinition::DFSFindRegInfoBetweenBB(const BB startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB, + std::list &pathStatus, DumpType infoType) const { + for (auto succBB : startBB.GetSuccs()) { + if (succBB == &endBB) { + for (auto &status : as_const(pathStatus)) { + if (!status) { + return true; + } + } + continue; + } + if (visitedBB[succBB->GetId()] != kNotVisited) { + continue; + } + visitedBB[succBB->GetId()] = kNormalVisited; + /* path is no clean check regInfo */ + bool isPathClean = true; + switch (infoType) { + case kDumpRegUse: { + isPathClean = !regUse[succBB->GetId()]->TestBit(regNO); + break; + } + case kDumpRegGen: { + isPathClean = !regGen[succBB->GetId()]->TestBit(regNO); + break; + } + case kDumpRegIn: { + isPathClean = !(regIn[succBB->GetId()]->TestBit(regNO) || regGen[succBB->GetId()]->TestBit(regNO)); + break; + } + default: + CHECK_FATAL(false, "NIY"); + } + pathStatus.emplace_back(isPathClean); + if (DFSFindRegInfoBetweenBB(*succBB, endBB, regNO, visitedBB, pathStatus, infoType)) { + return true; + } + pathStatus.pop_back(); + } + CHECK_FATAL(startBB.GetEhSuccs().empty(), "C Module have no eh"); + return false; +} + +bool AArch64ReachingDefinition::FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const { + if (startInsn == nullptr || endInsn == nullptr) { + return false; + } + + ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetMachineOpcode() == MOP_asm) { + InsnSet temp; + if (IsRegInAsmList(insn, kAsmInputListOpnd, regNO, temp) || + IsRegInAsmList(insn, kAsmOutputListOpnd, regNO, temp)) { + return true; + } + continue; + } + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto &listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + return true; + } + } + continue; + } + + auto *regProp = md->opndMD[i]; + if (!regProp->IsUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + return true; + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + return true; + } + } else if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + return true; + } + } + } + return false; +} + +/* + * find insn using register between startInsn and endInsn. + * startInsn and endInsn must be in same BB and startInsn and endInsn are included + */ +bool AArch64ReachingDefinition::FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, + InsnSet ®UseInsnSet) const { + bool findFinish = false; + if (startInsn == nullptr || endInsn == nullptr) { + return findFinish; + } + + ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetMachineOpcode() == MOP_asm) { + IsRegInAsmList(insn, kAsmInputListOpnd, regNO, regUseInsnSet); + if (IsRegInAsmList(insn, kAsmOutputListOpnd, regNO, regUseInsnSet)) { + break; + } + continue; + } + /* if insn is call and regNO is caller-saved register, then regNO will not be used later */ + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + findFinish = true; + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto &listElem : listOpnd.GetOperands()) { + auto *regOpnd = static_cast(listElem); + ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + (void)regUseInsnSet.insert(insn); + } + } + continue; + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *baseOpnd = memOpnd.GetBaseRegister(); + if (baseOpnd != nullptr && + (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi) && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed()) && + baseOpnd->GetRegisterNumber() == regNO) { + findFinish = true; + } + } + + auto *regProp = md->opndMD[i]; + if (regProp->IsDef() && opnd.IsRegister() && + (static_cast(opnd).GetRegisterNumber() == regNO)) { + findFinish = true; + } + + if (!regProp->IsUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } + + if (findFinish) { + break; + } + } + return findFinish; +} + +/* + * find insn using stack memory operand between startInsn and endInsn. + * startInsn and endInsn must be in same BB and startInsn and endInsn are included + */ +bool AArch64ReachingDefinition::FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &memUseInsnSet) const { + bool findFinish = false; + if (startInsn == nullptr || endInsn == nullptr) { + return findFinish; + } + + ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + ASSERT(endInsn->GetId() >= startInsn->GetId(), "end ID must be greater than or equal to start ID"); + + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->IsCall()) { + if (insn->GetMachineOpcode() == MOP_asm) { + return true; + } + if (CallInsnClearDesignateStackRef(*insn, offset)) { + return true; + } + continue; + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (!opnd.IsMemoryAccessOperand()) { + continue; + } + + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (base == nullptr || !IsFrameReg(*base)) { + continue; + } + + ASSERT(memOpnd.GetIndexRegister() == nullptr, "offset must not be Register for frame MemOperand"); + ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffset = memOpnd.GetOffsetImmediate()->GetValue(); + if (memOffset < 0) { + memOffset = stackSize + memOffset; + } + + if (insn->IsStore() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode())) { + if (memOffset == offset) { + findFinish = true; + continue; + } + if (insn->IsStorePair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode())) { + findFinish = true; + continue; + } + } + + if (!md->opndMD[i]->IsUse()) { + continue; + } + + if (offset == memOffset) { + (void)memUseInsnSet.insert(insn); + } else if (insn->IsLoadPair() && offset == memOffset + GetEachMemSizeOfPair(insn->GetMachineOpcode())) { + (void)memUseInsnSet.insert(insn); + } + } + + if (findFinish) { + break; + } + } + return findFinish; +} + +/* find all definition for stack memory operand insn.opnd[index] */ +InsnSet AArch64ReachingDefinition::FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset) const { + InsnSet defInsnSet; + int64 memOffSet = 0; + if (!isOffset) { + Operand &opnd = insn.GetOperand(indexOrOffset); + ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); + + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *indexReg = memOpnd.GetIndexRegister(); + + if (base == nullptr || !IsFrameReg(*base) || indexReg) { + return defInsnSet; + } + ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + memOffSet = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if (memOffSet < 0) { + memOffSet = stackSize + memOffSet; + } + } else { + memOffSet = indexOrOffset; + } + std::vector defInsnVec; + if (memGen[insn.GetBB()->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + defInsnVec = FindMemDefBetweenInsn(memOffSet, insn.GetBB()->GetFirstInsn(), insn.GetPrev()); + } + + if (!defInsnVec.empty()) { + defInsnSet.insert(defInsnVec.cbegin(), defInsnVec.cend()); + return defInsnSet; + } + std::vector visitedBB(kMaxBBNum, kNotVisited); + if (insn.GetBB()->IsCleanup()) { + DFSFindDefForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, defInsnSet); + if (defInsnSet.empty()) { + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + + if (memGen[bb->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + FindMemDefInBB(memOffSet, *bb, defInsnSet); + } + } + } + } else { + DFSFindDefForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, defInsnSet); + } + + return defInsnSet; +} + +/* + * find all insn using stack memory operand insn.opnd[index] + * secondMem is used to represent the second stack memory opernad in store pair insn + */ +InsnSet AArch64ReachingDefinition::FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem) const { + Operand &opnd = insn.GetOperand(index); + ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + + InsnSet useInsnSet; + if (base == nullptr || !IsFrameReg(*base)) { + return useInsnSet; + } + + ASSERT(memOpnd.GetIndexRegister() == nullptr, "IndexRegister no nullptr"); + ASSERT(memOpnd.GetOffsetImmediate() != nullptr, "offset must be a immediate value"); + int64 memOffSet = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if (memOffSet < 0) { + memOffSet = stackSize + memOffSet; + } + if (secondMem) { + ASSERT(insn.IsStorePair(), "second MemOperand can only be defined in stp insn"); + memOffSet += GetEachMemSizeOfPair(insn.GetMachineOpcode()); + } + /* memOperand may be redefined in current BB */ + bool findFinish = FindMemUseBetweenInsn(memOffSet, insn.GetNext(), insn.GetBB()->GetLastInsn(), useInsnSet); + std::vector visitedBB(kMaxBBNum, false); + if (findFinish || !memOut[insn.GetBB()->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + if (insn.GetBB()->GetEhSuccs().size() != 0) { + DFSFindUseForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, useInsnSet, true); + } + } else { + DFSFindUseForMemOpnd(*insn.GetBB(), memOffSet, visitedBB, useInsnSet, false); + } + if (!insn.GetBB()->IsCleanup() && firstCleanUpBB) { + if (memUse[firstCleanUpBB->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + findFinish = FindMemUseBetweenInsn(memOffSet, firstCleanUpBB->GetFirstInsn(), + firstCleanUpBB->GetLastInsn(), useInsnSet); + if (findFinish || !memOut[firstCleanUpBB->GetId()]->TestBit(static_cast(memOffSet / kMemZoomSize))) { + return useInsnSet; + } + } + DFSFindUseForMemOpnd(*firstCleanUpBB, memOffSet, visitedBB, useInsnSet, false); + } + return useInsnSet; +} + +/* + * initialize bb.gen and bb.use + * if it is not computed in first time, bb.gen and bb.use must be cleared firstly + */ +void AArch64ReachingDefinition::InitGenUse(BB &bb, bool firstTime) { + if (!firstTime && (mode & kRDRegAnalysis) != 0) { + regGen[bb.GetId()]->ResetAllBit(); + regUse[bb.GetId()]->ResetAllBit(); + } + if (!firstTime && ((mode & kRDMemAnalysis) != 0)) { + memGen[bb.GetId()]->ResetAllBit(); + memUse[bb.GetId()]->ResetAllBit(); + } + + if (bb.IsEmpty()) { + return; + } + + FOR_BB_INSNS(insn, (&bb)) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->GetMachineOpcode() == MOP_asm) { + GenAllAsmDefRegs(bb, *insn, kAsmOutputListOpnd); + GenAllAsmDefRegs(bb, *insn, kAsmClobberListOpnd); + GenAllAsmUseRegs(bb, *insn, kAsmInputListOpnd); + continue; + } + if (insn->IsCall() || insn->IsTailCall()) { + GenAllCallerSavedRegs(bb, *insn); + InitMemInfoForClearStackCall(*insn); + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + auto *regProp = md->opndMD[i]; + if (opnd.IsList() && (mode & kRDRegAnalysis) != 0) { + ASSERT(regProp->IsUse(), "ListOperand is used in insn"); + InitInfoForListOpnd(bb, opnd); + } else if (opnd.IsMemoryAccessOperand()) { + InitInfoForMemOperand(*insn, opnd, regProp->IsDef()); + } else if (opnd.IsConditionCode() && ((mode & kRDRegAnalysis) != 0)) { + ASSERT(regProp->IsUse(), "condition code is used in insn"); + InitInfoForConditionCode(bb); + } else if (opnd.IsRegister() && (mode & kRDRegAnalysis) != 0) { + InitInfoForRegOpnd(bb, opnd, regProp->IsDef()); + } + } + } +} + +void AArch64ReachingDefinition::InitMemInfoForClearStackCall(Insn &callInsn) { + if ((mode & kRDMemAnalysis) == 0 || !callInsn.IsClearDesignateStackCall()) { + return; + } + int64 firstOffset = callInsn.GetClearStackOffset(kFirstClearMemIndex); + constexpr int64 defaultValOfClearMemOffset = -1; + if (firstOffset != defaultValOfClearMemOffset) { + memGen[callInsn.GetBB()->GetId()]->SetBit(firstOffset / kMemZoomSize); + } + int64 secondOffset = callInsn.GetClearStackOffset(kSecondClearMemIndex); + if (secondOffset != defaultValOfClearMemOffset) { + memGen[callInsn.GetBB()->GetId()]->SetBit(static_cast(secondOffset / kMemZoomSize)); + } +} + +void AArch64ReachingDefinition::InitInfoForMemOperand(Insn &insn, Operand &opnd, bool isDef) { + ASSERT(opnd.IsMemoryAccessOperand(), "opnd must be MemOperand"); + MemOperand &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + + if (base == nullptr) { + return; + } + if (((mode & kRDMemAnalysis) != 0) && IsFrameReg(*base)) { + if (index != nullptr) { + SetAnalysisMode(kRDRegAnalysis); + return; + } + CHECK_FATAL(index == nullptr, "Existing [x29 + index] Memory Address"); + ASSERT(memOpnd.GetOffsetImmediate(), "offset must be a immediate value"); + int64 offsetVal = memOpnd.GetOffsetImmediate()->GetOffsetValue(); + if (offsetVal < 0) { + offsetVal = stackSize + offsetVal; + } + if ((offsetVal % kMemZoomSize) != 0) { + SetAnalysisMode(kRDRegAnalysis); + } + + if (!isDef) { + memUse[insn.GetBB()->GetId()]->SetBit(offsetVal / kMemZoomSize); + if (insn.IsLoadPair()) { + int64 nextMemOffset = offsetVal + GetEachMemSizeOfPair(insn.GetMachineOpcode()); + memUse[insn.GetBB()->GetId()]->SetBit(nextMemOffset / kMemZoomSize); + } + } else if (isDef) { + memGen[insn.GetBB()->GetId()]->SetBit(offsetVal / kMemZoomSize); + if (insn.IsStorePair()) { + int64 nextMemOffset = offsetVal + GetEachMemSizeOfPair(insn.GetMachineOpcode()); + memGen[insn.GetBB()->GetId()]->SetBit(nextMemOffset / kMemZoomSize); + } + } + } + + if ((mode & kRDRegAnalysis) != 0) { + regUse[insn.GetBB()->GetId()]->SetBit(base->GetRegisterNumber()); + if (index != nullptr) { + regUse[insn.GetBB()->GetId()]->SetBit(index->GetRegisterNumber()); + } + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + /* Base operand has changed. */ + regGen[insn.GetBB()->GetId()]->SetBit(base->GetRegisterNumber()); + } + } +} + +void AArch64ReachingDefinition::InitInfoForListOpnd(const BB &bb, const Operand &opnd) { + auto *listOpnd = static_cast(&opnd); + for (auto &listElem : listOpnd->GetOperands()) { + auto *regOpnd = static_cast(listElem); + ASSERT(regOpnd != nullptr, "used Operand in call insn must be Register"); + regUse[bb.GetId()]->SetBit(regOpnd->GetRegisterNumber()); + } +} + +void AArch64ReachingDefinition::InitInfoForConditionCode(const BB &bb) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + regUse[bb.GetId()]->SetBit(rflagReg.GetRegisterNumber()); +} + +void AArch64ReachingDefinition::InitInfoForRegOpnd(const BB &bb, Operand &opnd, bool isDef) { + RegOperand *regOpnd = static_cast(&opnd); + if (!isDef) { + regUse[bb.GetId()]->SetBit(regOpnd->GetRegisterNumber()); + } else { + regGen[bb.GetId()]->SetBit(regOpnd->GetRegisterNumber()); + } +} + +int32 AArch64ReachingDefinition::GetStackSize() const { + const int sizeofFplr = kDivide2 * kIntregBytelen; + return static_cast(static_cast(cgFunc->GetMemlayout())->RealStackFrameSize() + sizeofFplr); +} + +bool AArch64ReachingDefinition::IsCallerSavedReg(uint32 regNO) const { + return AArch64Abi::IsCallerSaveReg(static_cast(regNO)); +} + +int64 AArch64ReachingDefinition::GetEachMemSizeOfPair(MOperator opCode) const { + switch (opCode) { + case MOP_wstp: + case MOP_sstp: + case MOP_wstlxp: + case MOP_wldp: + case MOP_xldpsw: + case MOP_sldp: + case MOP_wldaxp: + return kWordByteNum; + case MOP_xstp: + case MOP_dstp: + case MOP_xstlxp: + case MOP_xldp: + case MOP_dldp: + case MOP_xldaxp: + return kDoubleWordByteNum; + default: + return 0; + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..70b43641f245cd0be7a0d4ea33507e7e24023686 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_coalesce.cpp @@ -0,0 +1,428 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_reg_coalesce.h" +#include "cg.h" +#include "cg_option.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +namespace maplebe { + +#define REGCOAL_DUMP CG_DEBUG_FUNC(*cgFunc) + +bool AArch64LiveIntervalAnalysis::IsUnconcernedReg(const RegOperand ®Opnd) const { + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + if (regOpnd.GetRegisterNumber() == RZR) { + return true; + } + if (!regOpnd.IsVirtualRegister()) { + return true; + } + return false; +} + +LiveInterval *AArch64LiveIntervalAnalysis::GetOrCreateLiveInterval(regno_t regNO) { + LiveInterval *lr = GetLiveInterval(regNO); + if (lr == nullptr) { + lr = memPool->New(alloc); + vregIntervals[regNO] = lr; + lr->SetRegNO(regNO); + } + return lr; +} + +void AArch64LiveIntervalAnalysis::UpdateCallInfo() { + for (auto vregNO : vregLive) { + LiveInterval *lr = GetLiveInterval(vregNO); + if (lr == nullptr) { + return; + } + lr->IncNumCall(); + } +} + +void AArch64LiveIntervalAnalysis::SetupLiveIntervalByOp(const Operand &op, Insn &insn, bool isDef) { + if (!op.IsRegister()) { + return; + } + auto ®Opnd = static_cast(op); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (IsUnconcernedReg(regOpnd)) { + return; + } + LiveInterval *lr = GetOrCreateLiveInterval(regNO); + uint32 point = isDef ? insn.GetId() : (insn.GetId() - 1); + lr->AddRange(insn.GetBB()->GetId(), point, vregLive.find(regNO) != vregLive.end()); + if (lr->GetRegType() == kRegTyUndef) { + lr->SetRegType(regOpnd.GetRegisterType()); + } + if (candidates.find(regNO) != candidates.end()) { + lr->AddRefPoint(&insn, isDef); + } + if (isDef) { + vregLive.erase(regNO); + } else { + vregLive.insert(regNO); + } +} + +void AArch64LiveIntervalAnalysis::ComputeLiveIntervalsForEachDefOperand(Insn &insn) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.GetMachineOpcode() == MOP_asm && (i == kAsmOutputListOpnd || i == kAsmClobberListOpnd)) { + for (auto &opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveIntervalByOp(*static_cast(opnd), insn, true); + } + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + if (!memOpnd.IsIntactIndexed()) { + SetupLiveIntervalByOp(opnd, insn, true); + } + } + if (!md->GetOpndDes(i)->IsRegDef()) { + continue; + } + SetupLiveIntervalByOp(opnd, insn, true); + } +} + +void AArch64LiveIntervalAnalysis::ComputeLiveIntervalsForEachUseOperand(Insn &insn) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + if (insn.GetMachineOpcode() == MOP_asm && i == kAsmInputListOpnd) { + for (auto &opnd : static_cast(insn.GetOperand(i)).GetOperands()) { + SetupLiveIntervalByOp(*static_cast(opnd), insn, false); + } + continue; + } + if (md->GetOpndDes(i)->IsRegDef() && !md->GetOpndDes(i)->IsRegUse()) { + continue; + } + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto &op : listOpnd.GetOperands()) { + SetupLiveIntervalByOp(*op, insn, false); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + SetupLiveIntervalByOp(*base, insn, false); + } + if (offset != nullptr) { + SetupLiveIntervalByOp(*offset, insn, false); + } + } else if (opnd.IsPhi()) { + auto &phiOpnd = static_cast(opnd); + for (auto &opIt : phiOpnd.GetOperands()) { + SetupLiveIntervalByOp(*opIt.second, insn, false); + } + } else { + SetupLiveIntervalByOp(opnd, insn, false); + } + } +} + +/* handle live range for bb->live_out */ +void AArch64LiveIntervalAnalysis::SetupLiveIntervalInLiveOut(regno_t liveOut, const BB &bb, uint32 currPoint) { + --currPoint; + + if (liveOut >= kAllRegNum) { + (void)vregLive.insert(liveOut); + LiveInterval *lr = GetOrCreateLiveInterval(liveOut); + if (lr == nullptr) { + return; + } + lr->AddRange(bb.GetId(), currPoint, false); + return; + } +} + +void AArch64LiveIntervalAnalysis::CollectCandidate() { + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + FOR_BB_INSNS_SAFE(insn, bb, ninsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (IsRegistersCopy(*insn)) { + RegOperand ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + RegOperand ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (regDest.GetRegisterNumber() == regSrc.GetRegisterNumber()) { + continue; + } + if (regDest.IsVirtualRegister()) { + candidates.insert(regDest.GetRegisterNumber()); + } + if (regSrc.IsVirtualRegister()) { + candidates.insert(regSrc.GetRegisterNumber()); + } + } + } + } +} + +bool AArch64LiveIntervalAnalysis::IsRegistersCopy(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_xmovrr || mOp == MOP_wmovrr || mOp == MOP_xvmovs || mOp == MOP_xvmovd) { + return true; + } + return false; +} + +void AArch64LiveIntervalAnalysis::ComputeLiveIntervals() { + /* colloct refpoints and build interfere only for cands. */ + CollectCandidate(); + + uint32 currPoint = static_cast(cgFunc->GetTotalNumberOfInstructions()) + + static_cast(bfs->sortedBBs.size()); + /* distinguish use/def */ + CHECK_FATAL(currPoint < (INT_MAX >> 2), "integer overflow check"); + currPoint = currPoint << 2; + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + vregLive.clear(); + for (auto liveOut : bb->GetLiveOutRegNO()) { + SetupLiveIntervalInLiveOut(liveOut, *bb, currPoint); + } + --currPoint; + + if (bb->GetLastInsn() != nullptr && bb->GetLastInsn()->IsMachineInstruction() && bb->GetLastInsn()->IsCall()) { + UpdateCallInfo(); + } + + FOR_BB_INSNS_REV_SAFE(insn, bb, ninsn) { + if (!runAnalysis) { + insn->SetId(currPoint); + } + if (!insn->IsMachineInstruction() && !insn->IsPhi()) { + --currPoint; + if (ninsn != nullptr && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(); + } + continue; + } + + ComputeLiveIntervalsForEachDefOperand(*insn); + ComputeLiveIntervalsForEachUseOperand(*insn); + + if (ninsn != nullptr && ninsn->IsMachineInstruction() && ninsn->IsCall()) { + UpdateCallInfo(); + } + + /* distinguish use/def */ + currPoint -= 2; + } + for (auto lin : bb->GetLiveInRegNO()) { + if (lin >= kAllRegNum) { + LiveInterval *li = GetLiveInterval(lin); + if (li != nullptr) { + li->AddRange(bb->GetId(), currPoint, currPoint); + } + } + } + /* move one more step for each BB */ + --currPoint; + } + + if (REGCOAL_DUMP) { + LogInfo::MapleLogger() << "\nAfter ComputeLiveIntervals\n"; + Dump(); + } +} + +void AArch64LiveIntervalAnalysis::CheckInterference(LiveInterval &li1, LiveInterval &li2) const { + auto ranges1 = li1.GetRanges(); + auto ranges2 = li2.GetRanges(); + bool conflict = false; + for (auto &range : as_const(ranges1)) { + auto bbid = range.first; + auto posVec1 = range.second; + MapleMap>::const_iterator it = ranges2.find(bbid); + if (it == ranges2.cend()) { + continue; + } else { + /* check overlap */ + auto posVec2 = it->second; + for (auto pos1 : posVec1) { + for (auto pos2 : posVec2) { + if (!((pos1.first < pos2.first && pos1.second < pos2.first) || + (pos2.first < pos1.second && pos2.second < pos1.first))) { + conflict = true; + break; + } + } + } + } + } + if (conflict) { + li1.AddConflict(li2.GetRegNO()); + li2.AddConflict(li1.GetRegNO()); + } + return; +} + +/* replace regDest with regSrc. */ +void AArch64LiveIntervalAnalysis::CoalesceRegPair(RegOperand ®Dest, RegOperand ®Src) { + LiveInterval *lrDest = GetLiveInterval(regDest.GetRegisterNumber()); + LiveInterval *lrSrc = GetLiveInterval(regSrc.GetRegisterNumber()); + CHECK_FATAL(lrDest && lrSrc, "find live interval failed"); + /* replace dest with src */ + if (regDest.GetSize() != regSrc.GetSize()) { + if (!cgFunc->IsExtendReg(regDest.GetRegisterNumber()) && !cgFunc->IsExtendReg(regSrc.GetRegisterNumber())) { + lrDest->AddConflict(lrSrc->GetRegNO()); + lrSrc->AddConflict(lrDest->GetRegNO()); + return; + } + cgFunc->InsertExtendSet(regSrc.GetRegisterNumber()); + } + + regno_t destNO = regDest.GetRegisterNumber(); + /* replace all refPoints */ + for (auto insn : lrDest->GetDefPoint()) { + cgFunc->ReplaceOpndInInsn(regDest, regSrc, *insn, destNO); + } + for (auto insn : lrDest->GetUsePoint()) { + cgFunc->ReplaceOpndInInsn(regDest, regSrc, *insn, destNO); + } + + ASSERT(lrDest && lrSrc, "get live interval failed"); + CoalesceLiveIntervals(*lrDest, *lrSrc); +} + +void AArch64LiveIntervalAnalysis::CollectMoveForEachBB(BB &bb, std::vector &movInsns) const { + FOR_BB_INSNS_SAFE(insn, &bb, ninsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (IsRegistersCopy(*insn)) { + auto ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + auto ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (!regSrc.IsVirtualRegister() || !regDest.IsVirtualRegister()) { + continue; + } + if (regSrc.GetRegisterNumber() == regDest.GetRegisterNumber()) { + continue; + } + movInsns.emplace_back(insn); + } + } +} + +void AArch64LiveIntervalAnalysis::CoalesceMoves(std::vector &movInsns, bool phiOnly) { + AArch64CGFunc *a64CGFunc = static_cast(cgFunc); + bool changed = false; + do { + changed = false; + for (auto insn : movInsns) { + RegOperand ®Dest = static_cast(insn->GetOperand(kInsnFirstOpnd)); + RegOperand ®Src = static_cast(insn->GetOperand(kInsnSecondOpnd)); + if (regSrc.GetRegisterNumber() == regDest.GetRegisterNumber()) { + continue; + } + if (!insn->IsPhiMovInsn() && phiOnly) { + continue; + } + if (a64CGFunc->IsRegRematCand(regDest) != a64CGFunc->IsRegRematCand(regSrc)) { + if (insn->IsPhiMovInsn()) { + a64CGFunc->ClearRegRematInfo(regDest); + a64CGFunc->ClearRegRematInfo(regSrc); + } else { + continue; + } + } + if (a64CGFunc->IsRegRematCand(regDest) && a64CGFunc->IsRegRematCand(regSrc) && + !a64CGFunc->IsRegSameRematInfo(regDest, regSrc)) { + if (insn->IsPhiMovInsn()) { + a64CGFunc->ClearRegRematInfo(regDest); + a64CGFunc->ClearRegRematInfo(regSrc); + } else { + continue; + } + } + LiveInterval *li1 = GetLiveInterval(regDest.GetRegisterNumber()); + LiveInterval *li2 = GetLiveInterval(regSrc.GetRegisterNumber()); + if (li1 == nullptr || li2 == nullptr) { + return; + } + CheckInterference(*li1, *li2); + if (!li1->IsConflictWith(regSrc.GetRegisterNumber()) || + (li1->GetDefPoint().size() == 1 && li2->GetDefPoint().size() == 1)) { + if (REGCOAL_DUMP) { + LogInfo::MapleLogger() << "try to coalesce: " << regDest.GetRegisterNumber() << " <- " + << regSrc.GetRegisterNumber() << std::endl; + } + CoalesceRegPair(regDest, regSrc); + changed = true; + } else { + if (insn->IsPhiMovInsn() && phiOnly && REGCOAL_DUMP) { + LogInfo::MapleLogger() << "fail to coalesce: " << regDest.GetRegisterNumber() << " <- " + << regSrc.GetRegisterNumber() << std::endl; + } + } + } + } while (changed); +} + +void AArch64LiveIntervalAnalysis::CoalesceRegisters() { + std::vector movInsns; + AArch64CGFunc *a64CGFunc = static_cast(cgFunc); + if (REGCOAL_DUMP) { + cgFunc->DumpCFGToDot("regcoal-"); + LogInfo::MapleLogger() << "handle function: " << a64CGFunc->GetFunction().GetName() << std::endl; + } + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + if (!bb->GetCritical()) { + continue; + } + CollectMoveForEachBB(*bb, movInsns); + } + for (size_t bbIdx = bfs->sortedBBs.size(); bbIdx > 0; --bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx - 1]; + + if (bb->GetCritical()) { + continue; + } + CollectMoveForEachBB(*bb, movInsns); + } + + /* handle phi move first. */ + CoalesceMoves(movInsns, true); + + /* clean up dead mov */ + a64CGFunc->CleanupDeadMov(REGCOAL_DUMP); +} + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..179d7f998977d401f01246a79347fd3b6650d91a --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_reg_info.cpp @@ -0,0 +1,139 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_cgfunc.h" +#include "aarch64_cg.h" +#include "becommon.h" + +namespace maplebe { +using namespace maple; + +void AArch64RegInfo::Init() { + for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { + /* when yieldpoint is enabled, x19 is reserved. */ + if (IsYieldPointReg(regNO)) { + continue; + } + if (regNO == R29 && GetCurrFunction()->UseFP()) { + continue; + } + if (!AArch64Abi::IsAvailableReg(static_cast(regNO))) { + continue; + } + if (AArch64isa::IsGPRegister(static_cast(regNO))) { + AddToIntRegs(regNO); + } else { + AddToFpRegs(regNO); + } + AddToAllRegs(regNO); + } + return; +} + +void AArch64RegInfo::Fini() { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + a64CGFunc->AddtoCalleeSaved(RFP); + a64CGFunc->AddtoCalleeSaved(RLR); + a64CGFunc->NoteFPLRAddedToCalleeSavedList(); +} + +void AArch64RegInfo::SaveCalleeSavedReg(MapleSet savedRegs) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + for (auto reg: savedRegs) { + a64CGFunc->AddtoCalleeSaved(static_cast(reg)); + } +} + +bool AArch64RegInfo::IsSpecialReg(regno_t regno) const { + AArch64reg reg = static_cast(regno); + if ((reg == RLR) || (reg == RSP)) { + return true; + } + + /* when yieldpoint is enabled, the dedicated register can not be allocated. */ + if (IsYieldPointReg(reg)) { + return true; + } + + return false; +} +bool AArch64RegInfo::IsSpillRegInRA(regno_t regNO, bool has3RegOpnd) { + return AArch64Abi::IsSpillRegInRA(static_cast(regNO), has3RegOpnd); +} +bool AArch64RegInfo::IsCalleeSavedReg(regno_t regno) const { + return AArch64Abi::IsCalleeSavedReg(static_cast(regno)); +} +bool AArch64RegInfo::IsYieldPointReg(regno_t regno) const { + /* when yieldpoint is enabled, x19 is reserved. */ + if (GetCurrFunction()->GetCG()->GenYieldPoint()) { + return (static_cast(regno) == RYP); + } + return false; +} +bool AArch64RegInfo::IsUnconcernedReg(regno_t regNO) const { + /* RFP = 32, RLR = 31, RSP = 33, RZR = 34, ccReg = 68 */ + if ((regNO >= RLR && regNO <= RZR) || regNO == RFP || regNO == kRFLAG) { + return true; + } + + /* when yieldpoint is enabled, the RYP(x19) can not be used. */ + if (IsYieldPointReg(regNO)) { + return true; + } + return false; +} + +bool AArch64RegInfo::IsUnconcernedReg(const RegOperand ®Opnd) const { + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regNO == RZR) { + return true; + } + return IsUnconcernedReg(regNO); +} + +/* r16,r17 are used besides ra. */ +bool AArch64RegInfo::IsReservedReg(regno_t regNO, bool doMultiPass) const { + if (!doMultiPass || GetCurrFunction()->GetMirModule().GetSrcLang() != kSrcLangC) { + return (regNO == R16) || (regNO == R17); + } else { + return (regNO == R16); + } +} + +RegOperand *AArch64RegInfo::GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, maplebe::RegType kind, uint32 flag) { + AArch64CGFunc *aarch64CgFunc = static_cast(GetCurrFunction()); + return &aarch64CgFunc->GetOrCreatePhysicalRegisterOperand(static_cast(regNO), size, kind, flag); +} + +Insn *AArch64RegInfo::BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return &a64CGFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickStInsn(regSize, stype), phyOpnd, memOpnd); +} + +Insn *AArch64RegInfo::BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return &a64CGFunc->GetInsnBuilder()->BuildInsn(a64CGFunc->PickLdInsn(regSize, stype), phyOpnd, memOpnd); +} + +MemOperand *AArch64RegInfo::AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, + bool isDest, Insn &insn, regno_t regNum, bool &isOutOfRange) { + AArch64CGFunc *a64CGFunc = static_cast(GetCurrFunction()); + return a64CGFunc->AdjustMemOperandIfOffsetOutOfRange(memOpnd, static_cast(vrNum), isDest, insn, + static_cast(regNum), isOutOfRange); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp new file mode 100644 index 0000000000000000000000000000000000000000..76c3c17d630cde790d04a810413005a2529956e3 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_regsaves.cpp @@ -0,0 +1,856 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_regsaves.h" +#include "aarch64_cg.h" +#include "aarch64_live.h" +#include "aarch64_cg.h" +#include "aarch64_proepilog.h" +#include "cg_dominance.h" +#include "cg_ssa_pre.h" +#include "cg_ssu_pre.h" + +namespace maplebe { + +#define RS_DUMP 0 +#define M_LOG LogInfo::MapleLogger() + +#define SKIP_FPLR(REG) \ + if (REG >= R29 && REG < V8) { \ + continue; \ + } + +void AArch64RegSavesOpt::InitData() { + calleeBitsDef = cgFunc->GetMemoryPool()->NewArray(cgFunc->NumBBs()); + errno_t retDef = memset_s(calleeBitsDef, cgFunc->NumBBs() * sizeof(CalleeBitsType), + 0, cgFunc->NumBBs() * sizeof(CalleeBitsType)); + calleeBitsUse = cgFunc->GetMemoryPool()->NewArray(cgFunc->NumBBs()); + errno_t retUse = memset_s(calleeBitsUse, cgFunc->NumBBs() * sizeof(CalleeBitsType), + 0, cgFunc->NumBBs() * sizeof(CalleeBitsType)); + calleeBitsAcc = cgFunc->GetMemoryPool()->NewArray(cgFunc->NumBBs()); + errno_t retAccDef = memset_s(calleeBitsAcc, cgFunc->NumBBs() * sizeof(CalleeBitsType), + 0, cgFunc->NumBBs() * sizeof(CalleeBitsType)); + CHECK_FATAL(retDef == EOK && retUse == EOK && retAccDef == EOK, "memset_s of calleesBits failed"); + + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + const MapleVector &sp = aarchCGFunc->GetCalleeSavedRegs(); + if (!sp.empty()) { + if (std::find(sp.begin(), sp.end(), RFP) != sp.end()) { + aarchCGFunc->GetProEpilogSavedRegs().push_back(RFP); + } + if (std::find(sp.begin(), sp.end(), RLR) != sp.end()) { + aarchCGFunc->GetProEpilogSavedRegs().push_back(RLR); + } + } + + for (auto bb : bfs->sortedBBs) { + SetId2bb(bb); + } +} + + +void AArch64RegSavesOpt::CollectLiveInfo(const BB &bb, const Operand &opnd, bool isDef, bool isUse) { + if (!opnd.IsRegister()) { + return; + } + const RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + if (!AArch64Abi::IsCalleeSavedReg(static_cast(regNO)) || + (regNO >= R29 && regNO <= R31)) { + return; /* check only callee-save registers */ + } + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return; + } + if (isDef) { + /* First def */ + if (!IsCalleeBitSet(GetCalleeBitsDef(), bb.GetId(), regNO)) { + SetCalleeBit(GetCalleeBitsDef(), bb.GetId(), regNO); + } + } + if (isUse) { + /* Last use */ + SetCalleeBit(GetCalleeBitsUse(), bb.GetId(), regNO); + } +} + +void AArch64RegSavesOpt::GenerateReturnBBDefUse(const BB &bb) { + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + if (IsPrimitiveFloat(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(V0), k64BitSize, kRegTyFloat); + CollectLiveInfo(bb, phyOpnd, false, true); + } else if (IsPrimitiveInteger(returnType)) { + Operand &phyOpnd = + aarchCGFunc->GetOrCreatePhysicalRegisterOperand(static_cast(R0), k64BitSize, kRegTyInt); + CollectLiveInfo(bb, phyOpnd, false, true); + } +} + +void AArch64RegSavesOpt::ProcessAsmListOpnd(const BB &bb, const Operand &opnd, uint32 idx) { + bool isDef = false; + bool isUse = false; + switch (idx) { + case kAsmOutputListOpnd: + case kAsmClobberListOpnd: { + isDef = true; + break; + } + case kAsmInputListOpnd: { + isUse = true; + break; + } + default: + return; + } + auto &listOpnd = static_cast(opnd); + for (auto &op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, isDef, isUse); + } +} + +void AArch64RegSavesOpt::ProcessListOpnd(const BB &bb, const Operand &opnd) { + auto &listOpnd = static_cast(opnd); + for (auto &op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, false, true); + } +} + +void AArch64RegSavesOpt::ProcessMemOpnd(const BB &bb, Operand &opnd) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + CollectLiveInfo(bb, *base, !memOpnd.IsIntactIndexed(), true); + } + if (offset != nullptr) { + CollectLiveInfo(bb, *offset, false, true); + } +} + +void AArch64RegSavesOpt::ProcessCondOpnd(const BB &bb) { + Operand &rflag = cgFunc->GetOrCreateRflag(); + CollectLiveInfo(bb, rflag, false, true); +} + +void AArch64RegSavesOpt::ProcessOperands(const Insn &insn, const BB &bb) { + const InsnDesc *md = insn.GetDesc(); + bool isAsm = (insn.GetMachineOpcode() == MOP_asm); + + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + auto *regProp = md->opndMD[i]; + bool isDef = regProp->IsRegDef(); + bool isUse = regProp->IsRegUse(); + if (opnd.IsList()) { + if (isAsm) { + ProcessAsmListOpnd(bb, opnd, i); + } else { + ProcessListOpnd(bb, opnd); + } + } else if (opnd.IsMemoryAccessOperand()) { + ProcessMemOpnd(bb, opnd); + } else if (opnd.IsConditionCode()) { + ProcessCondOpnd(bb); + } else { + CollectLiveInfo(bb, opnd, isDef, isUse); + } + } /* for all operands */ +} + +static bool IsBackEdge(BB* bb, BB* targ) { + CGFuncLoops *loop = bb->GetLoop(); + if (loop != nullptr && loop->GetHeader() == bb) { + if (find(loop->GetBackedge().begin(), loop->GetBackedge().end(), targ) != loop->GetBackedge().end()) { + return true; + } + } + return false; +} + +void AArch64RegSavesOpt::GenAccDefs() { + /* Set up accumulated callee def bits in all blocks */ + for (auto bb : bfs->sortedBBs) { + if (bb->GetPreds().size() == 0) { + SetCalleeBit(GetCalleeBitsAcc(), bb->GetId(), GetBBCalleeBits(GetCalleeBitsDef(), bb->GetId())); + } else { + CalleeBitsType curbbBits = GetBBCalleeBits(GetCalleeBitsDef(), bb->GetId()); + int64 n = -1; + CalleeBitsType tmp = static_cast(n); + for (auto pred : bb->GetPreds()) { + if (IsBackEdge(bb, pred)) { + continue; + } + tmp &= GetBBCalleeBits(GetCalleeBitsAcc(), pred->GetId()); + } + SetCalleeBit(GetCalleeBitsAcc(), bb->GetId(), curbbBits | tmp); + } + } +} + +/* Record in each local BB the 1st def and the last use of a callee-saved + register */ +void AArch64RegSavesOpt::GenRegDefUse() { + for (auto bbp : bfs->sortedBBs) { + BB &bb = *bbp; + if (bb.GetKind() == BB::kBBReturn) { + GenerateReturnBBDefUse(bb); + } + if (bb.IsEmpty()) { + continue; + } + + FOR_BB_INSNS(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + ProcessOperands(*insn, bb); + } /* for all insns */ + } /* for all sortedBBs */ + + GenAccDefs(); + +#if RS_DUMP + M_LOG << "CalleeBits for " << cgFunc->GetName() << ":\n"; + for (BBID i = 1; i < cgFunc->NumBBs(); ++i) { + M_LOG << i << " : " << calleeBitsDef[i] << " " << calleeBitsUse[i] << " " << calleeBitsAcc[i] << "\n"; + } +#endif +} + +bool AArch64RegSavesOpt::CheckForUseBeforeDefPath() { + /* Check if any block has a use without a def as shown in its accumulated + calleeBitsDef from above */ + BBID found = 0; + CalleeBitsType use; + CalleeBitsType acc; + for (BBID bid = 0; bid < cgFunc->NumBBs(); ++bid) { + use = GetBBCalleeBits(GetCalleeBitsUse(), bid); + acc = GetBBCalleeBits(GetCalleeBitsAcc(), bid); + if ((use & acc) != use) { + found = bid; + break; + } + } + if (found) { +#if RS_DUMP + CalleeBitsType mask = 1; + for (uint32 i = 0; i < static_cast(sizeof(CalleeBitsType) << k3BitSize); ++i) { + regno_t reg = ReverseRegBitMap(i); + if ((use & mask) != 0 && (acc & mask) == 0) { + M_LOG << "R" << (reg - 1) << " in BB" << found << " is in a use before def path\n"; + } + mask <<= 1; + } +#endif + return true; + } + return false; +} + +void AArch64RegSavesOpt::PrintBBs() const { + M_LOG << "RegSaves LiveIn/Out of BFS nodes:\n"; + for (auto *bb : bfs->sortedBBs) { + M_LOG << "< === > "; + M_LOG << bb->GetId(); + M_LOG << " pred:["; + for (auto predBB : bb->GetPreds()) { + M_LOG << " " << predBB->GetId(); + } + M_LOG << "] succs:["; + for (auto succBB : bb->GetSuccs()) { + M_LOG << " " << succBB->GetId(); + } + M_LOG << "]\n LiveIn of [" << bb->GetId() << "]: "; + for (auto liveIn: bb->GetLiveInRegNO()) { + M_LOG << liveIn << " "; + } + M_LOG << "\n LiveOut of [" << bb->GetId() << "]: "; + for (auto liveOut: bb->GetLiveOutRegNO()) { + M_LOG << liveOut << " "; + } + M_LOG << "\n"; + } +} + +/* 1st def MUST not have preceding save in dominator list. Each dominator + block must not have livein or liveout of the register */ +int32 AArch64RegSavesOpt::CheckCriteria(BB *bb, regno_t reg) const { + /* Already a site to save */ + SavedRegInfo *sp = bbSavedRegs[bb->GetId()]; + if (sp != nullptr && sp->ContainSaveReg(reg)) { + return 1; + } + + /* This preceding block has livein OR liveout of reg */ + MapleSet &liveIn = bb->GetLiveInRegNO(); + MapleSet &liveOut = bb->GetLiveOutRegNO(); + if (liveIn.find(reg) != liveIn.end() || + liveOut.find(reg) != liveOut.end()) { + return 2; + } + + return 0; +} + +/* Return true if reg is already to be saved in its dominator list */ +bool AArch64RegSavesOpt::AlreadySavedInDominatorList(const BB *bb, regno_t reg) const { + BB *aBB = GetDomInfo()->GetDom(bb->GetId()); + +#if RS_DUMP + M_LOG << "Checking dom list starting " << bb->GetId() << " for saved R" << (reg - 1) << ":\n "; +#endif + while (!aBB->GetPreds().empty()) { /* can't go beyond prolog */ +#if RS_DUMP + M_LOG << aBB->GetId() << " "; +#endif + int t = CheckCriteria(aBB, reg); + if (t != 0) { +#if RS_DUMP + std::string str = t == 1 ? " saved here, skip!\n" : " has livein/out, skip!\n"; + M_LOG << " --R" << (reg - 1) << str; +#endif + return true; /* previously saved, inspect next reg */ + } + aBB = GetDomInfo()->GetDom(aBB->GetId()); + } + return false; /* not previously saved, to save at bb */ +} + +BB* AArch64RegSavesOpt::FindLoopDominator(BB *bb, regno_t reg, bool *done) { + BB *bbDom = bb; + while (bbDom->GetLoop() != nullptr) { + bbDom = GetDomInfo()->GetDom(bbDom->GetId()); + if (CheckCriteria(bbDom, reg) != 0) { + *done = true; + break; + } + ASSERT(bbDom, "Can't find dominator for save location"); + } + return bbDom; +} + +/* If the newly found blk is a dominator of blk(s) in the current + to be saved list, remove these blks from bbSavedRegs */ +void AArch64RegSavesOpt::CheckAndRemoveBlksFromCurSavedList(SavedBBInfo *sp, BB *bbDom, regno_t reg) { + for (BB *sbb : sp->GetBBList()) { + for (BB *abb = sbb; !abb->GetPreds().empty();) { + if (abb->GetId() == bbDom->GetId()) { + /* Found! Don't plan to save in abb */ + sp->RemoveBB(sbb); + bbSavedRegs[sbb->GetId()]->RemoveSaveReg(reg); +#if RS_DUMP + M_LOG << " --R" << (reg - 1) << " save removed from BB" << sbb->GetId() << "\n"; +#endif + break; + } + abb = GetDomInfo()->GetDom(abb->GetId()); + } + } +} + +/* Determine callee-save regs save locations and record them in bbSavedRegs. + Save is needed for a 1st def callee-save register at its dominator block + outside any loop. */ +void AArch64RegSavesOpt::DetermineCalleeSaveLocationsDoms() { +#if RS_DUMP + M_LOG << "Determining regsave sites using dom list for " << cgFunc->GetName() << ":\n"; +#endif + for (auto *bb : bfs->sortedBBs) { +#if RS_DUMP + M_LOG << "BB: " << bb->GetId() << "\n"; +#endif + CalleeBitsType c = GetBBCalleeBits(GetCalleeBitsDef(), bb->GetId()); + if (c == 0) { + continue; + } + CalleeBitsType mask = 1; + for (uint32 i = 0; i < static_cast(sizeof(CalleeBitsType) << 3); ++i) { + MapleSet &liveIn = bb->GetLiveInRegNO(); + regno_t reg = ReverseRegBitMap(i); + if ((c & mask) != 0 && liveIn.find(reg) == liveIn.end()) { /* not livein */ + BB *bbDom = bb; /* start from current BB */ + bool done = false; + bbDom = FindLoopDominator(bbDom, reg, &done); + if (done) { + mask <<= 1; + continue; + } + + /* Check if a dominator of bbDom was already a location to save */ + if (AlreadySavedInDominatorList(bbDom, reg)) { + mask <<= 1; + continue; /* no need to save again, next reg */ + } + + /* If the newly found blk is a dominator of blk(s) in the current + to be saved list, remove these blks from bbSavedRegs */ + uint32 creg = i; + SavedBBInfo *sp = regSavedBBs[creg]; + if (sp == nullptr) { + regSavedBBs[creg] = memPool->New(alloc); + } else { + CheckAndRemoveBlksFromCurSavedList(sp, bbDom, reg); + } + regSavedBBs[creg]->InsertBB(bbDom); + + BBID bid = bbDom->GetId(); +#if RS_DUMP + M_LOG << " --R" << (reg - 1); + M_LOG << " to save in " << bid << "\n"; +#endif + SavedRegInfo *ctx = GetbbSavedRegsEntry(bid); + if (!ctx->ContainSaveReg(reg)) { + ctx->InsertSaveReg(reg); + } + } + mask <<= 1; + CalleeBitsType t = c; + t >>= 1; + if (t == 0) { + break; /* short cut */ + } + } + } +} + +void AArch64RegSavesOpt::DetermineCalleeSaveLocationsPre() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + MapleAllocator sprealloc(memPool); +#if RS_DUMP + M_LOG << "Determining regsave sites using ssa_pre for " << cgFunc->GetName() << ":\n"; +#endif + const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); + /* do 2 regs at a time to force store pairs */ + for (uint32 i = 0; i < callees.size(); ++i) { + AArch64reg reg1 = callees[i]; + SKIP_FPLR(reg1); + AArch64reg reg2 = kRinvalid; + if ((i + 1) < callees.size()) { + reg2 = callees[i + 1]; + SKIP_FPLR(reg2); + ++i; + } + + SsaPreWorkCand wkCand(&sprealloc); + for (BBID bid = 1; bid < static_cast(bbSavedRegs.size()); ++bid) { + /* Set the BB occurrences of this callee-saved register */ + if (IsCalleeBitSet(GetCalleeBitsDef(), bid, reg1) || + IsCalleeBitSet(GetCalleeBitsUse(), bid, reg1)) { + (void)wkCand.occBBs.insert(bid); + } + if (reg2 != kRinvalid) { + if (IsCalleeBitSet(GetCalleeBitsDef(), bid, reg2) || + IsCalleeBitSet(GetCalleeBitsUse(), bid, reg2)) { + (void)wkCand.occBBs.insert(bid); + } + } + } + DoSavePlacementOpt(cgFunc, GetDomInfo(), &wkCand); + if (wkCand.saveAtEntryBBs.empty()) { + /* something gone wrong, skip this reg */ + wkCand.saveAtProlog = true; + } + if (wkCand.saveAtProlog) { + /* Save cannot be applied, skip this reg and place save/restore + in prolog/epilog */ + MapleVector &pe = aarchCGFunc->GetProEpilogSavedRegs(); + if (std::find(pe.begin(), pe.end(), reg1) == pe.end()) { + pe.push_back(reg1); + } + if (reg2 != kRinvalid && std::find(pe.begin(), pe.end(), reg2) == pe.end()) { + pe.push_back(reg2); + } +#if RS_DUMP + M_LOG << "Save R" << (reg1 - 1) << " n/a, do in Pro/Epilog\n"; + if (reg2 != kRinvalid) { + M_LOG << " R " << (reg2 - 1) << " n/a, do in Pro/Epilog\n"; + } +#endif + continue; + } + if (!wkCand.saveAtEntryBBs.empty()) { + for (BBID entBB : wkCand.saveAtEntryBBs) { +#if RS_DUMP + std::string r = reg1 <= R28 ? "R" : "V"; + M_LOG << "BB " << entBB << " save for : " << r << (reg1 - 1) << "\n"; + if (reg2 != kRinvalid) { + std::string r2 = reg2 <= R28 ? "R" : "V"; + M_LOG << " : " << r2 << (reg2 - 1) << "\n"; + } +#endif + GetbbSavedRegsEntry(entBB)->InsertSaveReg(reg1); + if (reg2 != kRinvalid) { + GetbbSavedRegsEntry(entBB)->InsertSaveReg(reg2); + } + } + } + } +} + +void AArch64RegSavesOpt::CheckCriticalEdge(BB *bb, AArch64reg reg) { + for (BB *sbb : bb->GetSuccs()) { + if (sbb->GetPreds().size() > 1) { + CHECK_FATAL(false, "critical edge detected"); + } + /* To insert at all succs */ + GetbbSavedRegsEntry(sbb->GetId())->InsertEntryReg(reg); + } +} + +/* Restore cannot be applied, skip this reg and place save/restore + in prolog/epilog */ +void AArch64RegSavesOpt::RevertToRestoreAtEpilog(AArch64reg reg) { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + for (size_t bid = 1; bid < bbSavedRegs.size(); ++bid) { + SavedRegInfo *sp = bbSavedRegs[bid]; + if (sp != nullptr && !sp->GetSaveSet().empty() && sp->ContainSaveReg(reg)) { + sp->RemoveSaveReg(reg); + } + } + MapleVector &pe = aarchCGFunc->GetProEpilogSavedRegs(); + if (std::find(pe.begin(), pe.end(), reg) == pe.end()) { + pe.push_back(reg); + } +#if RS_DUMP + M_LOG << "Restore R" << (reg - 1) << " n/a, do in Pro/Epilog\n"; +#endif +} + +/* Determine calleesave regs restore locations by calling ssu-pre, + previous bbSavedRegs memory is cleared and restore locs recorded in it */ +void AArch64RegSavesOpt::DetermineCalleeRestoreLocations() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + MapleAllocator sprealloc(memPool); +#if RS_DUMP + M_LOG << "Determining Callee Restore Locations:\n"; +#endif + const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); + for (auto reg : callees) { + SKIP_FPLR(reg); + + SPreWorkCand wkCand(&sprealloc); + for (BBID bid = 1; bid < static_cast(bbSavedRegs.size()); ++bid) { + /* Set the saved BB locations of this callee-saved register */ + SavedRegInfo *sp = bbSavedRegs[bid]; + if (sp != nullptr) { + if (sp->ContainSaveReg(reg)) { + (void)wkCand.saveBBs.insert(bid); + } + } + /* Set the BB occurrences of this callee-saved register */ + if (IsCalleeBitSet(GetCalleeBitsDef(), bid, reg) || + IsCalleeBitSet(GetCalleeBitsUse(), bid, reg)) { + (void)wkCand.occBBs.insert(bid); + } + } + DoRestorePlacementOpt(cgFunc, GetPostDomInfo(), &wkCand); + if (wkCand.saveBBs.empty()) { + /* something gone wrong, skip this reg */ + wkCand.restoreAtEpilog = true; + } + if (wkCand.restoreAtEpilog) { + RevertToRestoreAtEpilog(reg); + continue; + } + if (!wkCand.restoreAtEntryBBs.empty() || !wkCand.restoreAtExitBBs.empty()) { + for (BBID entBB : wkCand.restoreAtEntryBBs) { +#if RS_DUMP + std::string r = reg <= R28 ? "r" : "v"; + M_LOG << "BB " << entBB << " restore: " << r << (reg - 1) << "\n"; +#endif + GetbbSavedRegsEntry(entBB)->InsertEntryReg(reg); + } + for (BBID exitBB : wkCand.restoreAtExitBBs) { + BB *bb = GetId2bb(exitBB); + if (bb->GetKind() == BB::kBBIgoto) { + CHECK_FATAL(false, "igoto detected"); + } + Insn *lastInsn = bb->GetLastMachineInsn(); + if (lastInsn != nullptr && (lastInsn->IsBranch() || lastInsn->IsTailCall()) && + lastInsn->GetOperand(0).IsRegister() && + static_cast(lastInsn->GetOperand(0)).GetRegisterNumber() == reg) { + RevertToRestoreAtEpilog(reg); + } + if (lastInsn != nullptr && (lastInsn->IsBranch() || lastInsn->IsTailCall())) { + /* To insert in this block - 1 instr */ + SavedRegInfo *sp = GetbbSavedRegsEntry(exitBB); + sp->InsertExitReg(reg); + sp->insertAtLastMinusOne = true; + } else if (bb->GetSuccs().size() > 1) { + CheckCriticalEdge(bb, reg); + } else { + /* otherwise, BB_FT etc */ + GetbbSavedRegsEntry(exitBB)->InsertExitReg(reg); + } +#if RS_DUMP + std::string r = reg <= R28 ? "R" : "V"; + M_LOG << "BB " << exitBB << " restore: " << r << (reg - 1) << "\n"; +#endif + } + } + } +} + +int32 AArch64RegSavesOpt::FindCalleeBase() const { + int32 offset = static_cast( + static_cast(cgFunc->GetMemlayout())->RealStackFrameSize() - + (static_cast(cgFunc)->SizeOfCalleeSaved() - (kDivide2 * kIntregBytelen) /* FP/LR */) - + cgFunc->GetMemlayout()->SizeOfArgsToStackPass()); + + if (cgFunc->GetFunction().GetAttr(FUNCATTR_varargs)) { + /* GR/VR save areas are above the callee save area */ + AArch64MemLayout *ml = static_cast(cgFunc->GetMemlayout()); + int saveareasize = static_cast(RoundUp(ml->GetSizeOfGRSaveArea(), GetPointerSize() * k2BitSize) + + RoundUp(ml->GetSizeOfVRSaveArea(), GetPointerSize() * k2BitSize)); + offset -= saveareasize; + } + return offset; +} + +void AArch64RegSavesOpt::SetupRegOffsets() { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + const MapleVector &proEpilogRegs = aarchCGFunc->GetProEpilogSavedRegs(); + int32 regsInProEpilog = static_cast(proEpilogRegs.size() - 2); + const MapleVector &callees = aarchCGFunc->GetCalleeSavedRegs(); + + int32 offset = FindCalleeBase(); + for (auto reg : callees) { + SKIP_FPLR(reg); + if (std::count(proEpilogRegs.begin(), proEpilogRegs.end(), reg)) { + continue; + } + if (regOffset.find(reg) == regOffset.end()) { + regOffset[reg] = static_cast(offset + (regsInProEpilog * kBitsPerByte)); + offset += static_cast(kIntregBytelen); + } + } +} + +void AArch64RegSavesOpt::InsertCalleeSaveCode() { + BBID bid = 0; + BB *saveBB = cgFunc->GetCurBB(); + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + +#if RS_DUMP + M_LOG << "Inserting Save for " << cgFunc->GetName() << ":\n"; +#endif + int32 offset = FindCalleeBase(); + for (BB *bb : bfs->sortedBBs) { + bid = bb->GetId(); + aarchCGFunc->SetSplitBaseOffset(0); + if (bbSavedRegs[bid] != nullptr && !bbSavedRegs[bid]->GetSaveSet().empty()) { + aarchCGFunc->GetDummyBB()->ClearInsns(); + cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + AArch64reg intRegFirstHalf = kRinvalid; + AArch64reg fpRegFirstHalf = kRinvalid; + for (auto areg : bbSavedRegs[bid]->GetSaveSet()) { + AArch64reg reg = static_cast(areg); + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64reg &firstHalf = AArch64isa::IsGPRegister(reg) ? intRegFirstHalf : fpRegFirstHalf; + std::string r = reg <= R28 ? "R" : "V"; + /* If reg not seen before, record offset and then update */ + if (regOffset.find(areg) == regOffset.end()) { + regOffset[areg] = static_cast(offset); + offset += static_cast(kIntregBytelen); + } + if (firstHalf == kRinvalid) { + /* 1st half in reg pair */ + firstHalf = reg; +#if RS_DUMP + M_LOG << r << (reg - 1) << " save in BB" << bid << " Offset = " << regOffset[reg]<< "\n"; +#endif + } else { + if (regOffset[reg] == (regOffset[firstHalf] + k8ByteSize)) { + /* firstHalf & reg consecutive, make regpair */ + AArch64GenProEpilog::AppendInstructionPushPair(*cgFunc, firstHalf, reg, regType, + static_cast(regOffset[firstHalf])); + } else if (regOffset[firstHalf] == (regOffset[reg] + k8ByteSize)) { + /* reg & firstHalf consecutive, make regpair */ + AArch64GenProEpilog::AppendInstructionPushPair(*cgFunc, reg, firstHalf, regType, + static_cast(regOffset[reg])); + } else { + /* regs cannot be paired */ + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, firstHalf, regType, + static_cast(regOffset[firstHalf])); + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, reg, regType, + static_cast(regOffset[reg])); + } + firstHalf = kRinvalid; +#if RS_DUMP + M_LOG << r << (reg - 1) << " save in BB" << bid << " Offset = " << regOffset[reg]<< "\n"; +#endif + } + } + + if (intRegFirstHalf != kRinvalid) { + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, + intRegFirstHalf, kRegTyInt, static_cast(regOffset[intRegFirstHalf])); + } + + if (fpRegFirstHalf != kRinvalid) { + AArch64GenProEpilog::AppendInstructionPushSingle(*cgFunc, + fpRegFirstHalf, kRegTyFloat, static_cast(regOffset[fpRegFirstHalf])); + } + bb->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + } + } + cgFunc->SetCurBB(*saveBB); +} + +void AArch64RegSavesOpt::PrintSaveLocs(AArch64reg reg) { + M_LOG << " for save @BB [ "; + for (size_t b = 1; b < bbSavedRegs.size(); ++b) { + if (bbSavedRegs[b] != nullptr && bbSavedRegs[b]->ContainSaveReg(reg)) { + M_LOG << b << " "; + } + } + M_LOG << "]\n"; +} + +void AArch64RegSavesOpt::InsertCalleeRestoreCode() { + BBID bid = 0; + BB *saveBB = cgFunc->GetCurBB(); + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + +#if RS_DUMP + M_LOG << "Inserting Restore: \n"; +#endif + int32 offset = FindCalleeBase(); + for (BB *bb : bfs->sortedBBs) { + bid = bb->GetId(); + aarchCGFunc->SetSplitBaseOffset(0); + SavedRegInfo *sp = bbSavedRegs[bid]; + if (sp != nullptr) { + if (sp->GetEntrySet().empty() && sp->GetExitSet().empty()) { + continue; + } + + aarchCGFunc->GetDummyBB()->ClearInsns(); + cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + for (auto areg : sp->GetEntrySet()) { + AArch64reg reg = static_cast(areg); + offset = static_cast(regOffset[areg]); +#if RS_DUMP + std::string r = reg <= R28 ? "R" : "V"; + M_LOG << r << (reg - 1) << " entry restore in BB " << bid << " Saved Offset = " << offset << "\n"; + PrintSaveLocs(reg); +#endif + + /* restore is always the same from saved offset */ + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64GenProEpilog::AppendInstructionPopSingle(*cgFunc, reg, regType, offset); + } + FOR_BB_INSNS(insn, aarchCGFunc->GetDummyBB()) { + insn->SetDoNotRemove(true); /* do not let ebo remove these restores */ + } + bb->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + + aarchCGFunc->GetDummyBB()->ClearInsns(); + cgFunc->SetCurBB(*aarchCGFunc->GetDummyBB()); + for (auto areg : sp->GetExitSet()) { + AArch64reg reg = static_cast(areg); + offset = static_cast(regOffset[areg]); +#if RS_DUMP + std::string r = reg <= R28 ? "R" : "V"; + M_LOG << r << (reg - 1) << " exit restore in BB " << bid << " Offset = " << offset << "\n"; + PrintSaveLocs(reg); +#endif + + /* restore is always single from saved offset */ + RegType regType = AArch64isa::IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + AArch64GenProEpilog::AppendInstructionPopSingle(*cgFunc, reg, regType, offset); + } + FOR_BB_INSNS(insn, aarchCGFunc->GetDummyBB()) { + insn->SetDoNotRemove(true); + } + if (sp->insertAtLastMinusOne) { + bb->InsertAtEndMinus1(*aarchCGFunc->GetDummyBB()); + } else { + bb->InsertAtEnd(*aarchCGFunc->GetDummyBB()); + } + } + } + cgFunc->SetCurBB(*saveBB); +} + +/* Callee-save registers save/restore placement optimization */ +void AArch64RegSavesOpt::Run() { + // DotGenerator::GenerateDot("SR", *cgFunc, cgFunc->GetMirModule(), true, cgFunc->GetName()); + if (Globals::GetInstance()->GetOptimLevel() <= CGOptions::kLevel1 || !cgFunc->GetMirModule().IsCModule()) { + return; + } + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + + Bfs localBfs(*cgFunc, *memPool); + bfs = &localBfs; + bfs->ComputeBlockOrder(); +#if RS_DUMP + M_LOG << "##Calleeregs Placement for: " << cgFunc->GetName() << "\n"; + PrintBBs(); +#endif + + /* Determined 1st def and last use of all callee-saved registers used + for all BBs */ + InitData(); + GenRegDefUse(); + + if (CGOptions::UseSsaPreSave()) { + /* Use ssapre */ + if (cgFunc->GetNeedStackProtect() || CheckForUseBeforeDefPath()) { + for (auto reg : aarchCGFunc->GetCalleeSavedRegs()) { + if (reg != RFP && reg != RLR) { + aarchCGFunc->GetProEpilogSavedRegs().push_back(reg); + } + } + return; + } + DetermineCalleeSaveLocationsPre(); + } else { + /* Determine save sites at dominators of 1st def with no live-in and + not within loop */ + /* Obsolete, to be deleted */ + DetermineCalleeSaveLocationsDoms(); + } + + /* Determine restore sites */ + DetermineCalleeRestoreLocations(); + +#ifdef VERIFY + /* Verify saves/restores are in pair */ + std::vector rlist = { R19, R20, R21, R22, R23, R24, R25, R26, R27, R28 }; + for (auto reg : rlist) { + M_LOG << "Verify calleeregs_placement data for R" << (reg - 1) << ":\n"; + std::set visited; + uint32 saveBid = 0; + uint32 restoreBid = 0; + Verify(reg, cgFunc->GetFirstBB(), &visited, &saveBid, &restoreBid); + M_LOG << "\nVerify Done\n"; + } +#endif + + /* Assign stack offset to each shrinkwrapped register, skip over the offsets + for registers saved in prolog */ + SetupRegOffsets(); + + /* Generate callee save instrs at found sites */ + InsertCalleeSaveCode(); + + /* Generate callee restores at found sites */ + InsertCalleeRestoreCode(); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_rematerialize.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_rematerialize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dd44849a78419ee64476227b1a4353323f3e19ca --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_rematerialize.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_rematerialize.h" +#include "aarch64_color_ra.h" +#include "aarch64_insn.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +bool AArch64Rematerializer::IsRematerializableForConstval(int64 val, uint32 bitLen) const { + if (val >= -kMax16UnsignedImm && val <= kMax16UnsignedImm) { + return true; + } + auto uval = static_cast(val); + if (IsMoveWidableImmediate(uval, bitLen)) { + return true; + } + return IsBitmaskImmediate(uval, bitLen); +} + +bool AArch64Rematerializer::IsRematerializableForDread(int32 offset) const { + /* check stImm.GetOffset() is in addri12 */ + return IsBitSizeImmediate(static_cast(static_cast(offset)), kMaxImmVal12Bits, 0); +} + +std::vector AArch64Rematerializer::RematerializeForConstval(CGFunc &cgFunc, + RegOperand ®Op, const LiveRange &lr) { + std::vector insns; + auto intConst = static_cast(rematInfo.mirConst); + ImmOperand *immOp = &cgFunc.GetOpndBuilder()->CreateImm( + GetPrimTypeBitSize(intConst->GetType().GetPrimType()), intConst->GetExtValue()); + MOperator movOp = (lr.GetSpillSize() == k32BitSize) ? MOP_wmovri32 : MOP_xmovri64; + insns.push_back(&cgFunc.GetInsnBuilder()->BuildInsn(movOp, regOp, *immOp)); + return insns; +} + +std::vector AArch64Rematerializer::RematerializeForAddrof(CGFunc &cgFunc, + RegOperand ®Op, int32 offset) { + std::vector insns; + auto &a64Func = static_cast(cgFunc); + const MIRSymbol *symbol = rematInfo.sym; + + StImmOperand &stImm = a64Func.CreateStImmOperand(*symbol, offset, 0); + if ((symbol->GetStorageClass() == kScAuto) || (symbol->GetStorageClass() == kScFormal)) { + SymbolAlloc *symLoc = cgFunc.GetMemlayout()->GetSymAllocInfo(symbol->GetStIndex()); + ImmOperand *offsetOp = &cgFunc.GetOpndBuilder()->CreateImm(k64BitSize, + static_cast(cgFunc.GetBaseOffset(*symLoc)) + offset); + Insn *insn = &cgFunc.GetInsnBuilder()->BuildInsn(MOP_xaddrri12, regOp, + *cgFunc.GetBaseReg(*symLoc), *offsetOp); + if (cgFunc.GetCG()->GenerateVerboseCG()) { + std::string comm = "local/formal var: " + symbol->GetName(); + insn->SetComment(comm); + } + insns.push_back(insn); + } else { + Insn *insn = &cgFunc.GetInsnBuilder()->BuildInsn(MOP_xadrp, regOp, stImm); + insns.push_back(insn); + if (!addrUpper && CGOptions::IsPIC() && ((symbol->GetStorageClass() == kScGlobal) || + (symbol->GetStorageClass() == kScExtern))) { + /* ldr x0, [x0, #:got_lo12:Ljava_2Flang_2FSystem_3B_7Cout] */ + OfstOperand &offsetOp = a64Func.CreateOfstOpnd(*symbol, offset, 0); + MemOperand &memOpnd = a64Func.GetOrCreateMemOpnd(MemOperand::kAddrModeBOi, + GetPointerSize() * kBitsPerByte, ®Op, nullptr, &offsetOp, nullptr); + MOperator ldOp = (memOpnd.GetSize() == k64BitSize) ? MOP_xldr : MOP_wldr; + insn = &cgFunc.GetInsnBuilder()->BuildInsn(ldOp, regOp, memOpnd); + insns.push_back(insn); + if (offset > 0) { + ImmOperand &ofstOpnd = cgFunc.GetOpndBuilder()->CreateImm(k32BitSize, offset); + insns.push_back(&cgFunc.GetInsnBuilder()->BuildInsn(MOP_xaddrri12, regOp, regOp, ofstOpnd)); + } + } else if (!addrUpper) { + insns.push_back(&cgFunc.GetInsnBuilder()->BuildInsn(MOP_xadrpl12, regOp, regOp, stImm)); + } + } + return insns; +} + +std::vector AArch64Rematerializer::RematerializeForDread(CGFunc &cgFunc, + RegOperand ®Op, int32 offset, PrimType type) { + std::vector insns; + auto &a64Func = static_cast(cgFunc); + RegOperand *regOp64 = &cgFunc.GetOpndBuilder()->CreatePReg(regOp.GetRegisterNumber(), + k64BitSize, regOp.GetRegisterType()); + uint32 dataSize = GetPrimTypeBitSize(type); + MemOperand *spillMemOp = &a64Func.GetOrCreateMemOpndAfterRa(*rematInfo.sym, + offset, dataSize, false, regOp64, insns); + Insn *ldInsn = cgFunc.GetTargetRegInfo()->BuildLdrInsn(dataSize, type, regOp, *spillMemOp); + insns.push_back(ldInsn); + return insns; +} + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..28b74b6f8e561b6171755763212f29fced444534 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_schedule.cpp @@ -0,0 +1,1515 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_schedule.h" +#include +#include "aarch64_cg.h" +#include "aarch64_operand.h" +#include "aarch64_data_dep_base.h" +#include "control_dep_analysis.h" +#include "pressure.h" + +/* + * This phase is Instruction Scheduling. + * There is a local list scheduling, it is scheduling in basic block. + * The entry is AArch64Schedule::ListScheduling, will traversal all basic block, + * for a basic block: + * 1. build a dependence graph; + * 2. combine clinit pairs and str&ldr pairs; + * 3. reorder instructions. + */ +namespace maplebe { +namespace { +constexpr uint32 kClinitAdvanceCycle = 10; +constexpr uint32 kAdrpLdrAdvanceCycle = 2; +constexpr uint32 kClinitTailAdvanceCycle = 4; +constexpr uint32 kSecondToLastNode = 2; +} + +uint32 AArch64Schedule::maxUnitIndex = 0; +/* reserve two register for special purpose */ +int AArch64Schedule::intRegPressureThreshold = static_cast(R27 - R0); +int AArch64Schedule::fpRegPressureThreshold = static_cast(V30 - V0); +int AArch64Schedule::intCalleeSaveThresholdBase = static_cast(R29 - R19); +int AArch64Schedule::intCalleeSaveThresholdEnhance = static_cast(R30 - R19); +int AArch64Schedule::fpCalleeSaveThreshold = static_cast(R16 - R8); +/* Init schedule's data struction. */ +void AArch64Schedule::Init() { + readyList.clear(); + nodeSize = nodes.size(); + lastSeparatorIndex = 0; + mad->ReleaseAllUnits(); + DepNode *node = nodes[0]; + + ASSERT(node->GetType() == kNodeTypeSeparator, "CG internal error, the first node should be a separator node."); + + if (CGOptions::IsDruteForceSched() || CGOptions::IsSimulateSched()) { + for (auto nodeTemp : nodes) { + nodeTemp->SetVisit(0); + nodeTemp->SetState(kNormal); + nodeTemp->SetSchedCycle(0); + nodeTemp->SetEStart(0); + nodeTemp->SetLStart(0); + } + } + + readyList.emplace_back(node); + node->SetState(kReady); + + /* Init validPredsSize and validSuccsSize. */ + for (auto nodeTemp : nodes) { + nodeTemp->SetValidPredsSize(nodeTemp->GetPreds().size()); + nodeTemp->SetValidSuccsSize(nodeTemp->GetSuccs().size()); + } +} + +/* + * A insn which can be combine should meet this conditions: + * 1. it is str/ldr insn; + * 2. address mode is kAddrModeBOi, [baseReg, offset]; + * 3. the register operand size equal memory operand size; + * 4. if define USE_32BIT_REF, register operand size should be 4 byte; + * 5. for stp/ldp, the imm should be within -512 and 504(64bit), or -256 and 252(32bit); + * 6. pair instr for 8/4 byte registers must have multiple of 8/4 for imm. + * If insn can be combine, return true. + */ +bool AArch64Schedule::CanCombine(const Insn &insn) const { + MOperator opCode = insn.GetMachineOpcode(); + if ((opCode != MOP_xldr) && (opCode != MOP_wldr) && (opCode != MOP_dldr) && (opCode != MOP_sldr) && + (opCode != MOP_xstr) && (opCode != MOP_wstr) && (opCode != MOP_dstr) && (opCode != MOP_sstr)) { + return false; + } + + ASSERT(insn.GetOperand(1).IsMemoryAccessOperand(), "expects mem operands"); + auto &memOpnd = static_cast(insn.GetOperand(1)); + MemOperand::AArch64AddressingMode addrMode = memOpnd.GetAddrMode(); + if ((addrMode != MemOperand::kAddrModeBOi) || !memOpnd.IsIntactIndexed()) { + return false; + } + + auto ®Opnd = static_cast(insn.GetOperand(0)); + if (regOpnd.GetSize() != memOpnd.GetSize()) { + return false; + } + + uint32 size = regOpnd.GetSize() >> kLog2BitsPerByte; +#ifdef USE_32BIT_REF + if (insn.IsAccessRefField() && (size > (kIntregBytelen >> 1))) { + return false; + } +#endif /* USE_32BIT_REF */ + + OfstOperand *offset = memOpnd.GetOffsetImmediate(); + if (offset == nullptr) { + return false; + } + int32 offsetValue = static_cast(offset->GetOffsetValue()); + if (size == kIntregBytelen) { /* 64 bit */ + if ((offsetValue <= kStpLdpImm64LowerBound) || (offsetValue >= kStpLdpImm64UpperBound)) { + return false; + } + } else if (size == (kIntregBytelen >> 1)) { /* 32 bit */ + if ((offsetValue <= kStpLdpImm32LowerBound) || (offsetValue >= kStpLdpImm32UpperBound)) { + return false; + } + } + + /* pair instr for 8/4 byte registers must have multiple of 8/4 for imm */ + if ((static_cast(offsetValue) % size) != 0) { + return false; + } + return true; +} + +/* After building dependence graph, combine str&ldr pairs. */ +void AArch64Schedule::MemoryAccessPairOpt() { + Init(); + std::vector memList; + + while ((!readyList.empty()) || !memList.empty()) { + DepNode *readNode = nullptr; + if (!readyList.empty()) { + readNode = readyList[0]; + readyList.erase(readyList.begin()); + } else { + if (memList[0]->GetType() != kNodeTypeEmpty) { + FindAndCombineMemoryAccessPair(memList); + } + readNode = memList[0]; + memList.erase(memList.begin()); + } + + /* schedule readNode */ + CHECK_FATAL(readNode != nullptr, "readNode is null in MemoryAccessPairOpt"); + readNode->SetState(kScheduled); + + /* add readNode's succs to readyList or memList. */ + for (auto succLink : readNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.DescreaseValidPredsSize(); + if (succNode.GetValidPredsSize() == 0) { + ASSERT(succNode.GetState() == kNormal, "schedule state should be kNormal"); + succNode.SetState(kReady); + ASSERT(succNode.GetInsn() != nullptr, "insn can't be nullptr!"); + if (CanCombine(*succNode.GetInsn())) { + memList.emplace_back(&succNode); + } else { + readyList.emplace_back(&succNode); + } + } + } + } + + for (auto node : nodes) { + node->SetVisit(0); + node->SetState(kNormal); + } +} + +/* Find and combine correct MemoryAccessPair for memList[0]. */ +void AArch64Schedule::FindAndCombineMemoryAccessPair(const std::vector &memList) { + ASSERT(!memList.empty(), "memList should not be empty"); + CHECK_FATAL(memList[0]->GetInsn() != nullptr, "memList[0]'s insn should not be nullptr"); + MemOperand *currMemOpnd = static_cast(memList[0]->GetInsn()->GetMemOpnd()); + ASSERT(currMemOpnd != nullptr, "opnd should not be nullptr"); + ASSERT(currMemOpnd->IsMemoryAccessOperand(), "opnd should be memOpnd"); + int32 currOffsetVal = static_cast(currMemOpnd->GetOffsetImmediate()->GetOffsetValue()); + MOperator currMop = memList[0]->GetInsn()->GetMachineOpcode(); + /* find a depNode to combine with memList[0], and break; */ + for (auto it = std::next(memList.begin(), 1); it != memList.end(); ++it) { + ASSERT((*it)->GetInsn() != nullptr, "null ptr check"); + + if (currMop == (*it)->GetInsn()->GetMachineOpcode()) { + MemOperand *nextMemOpnd = static_cast((*it)->GetInsn()->GetMemOpnd()); + CHECK_FATAL(nextMemOpnd != nullptr, "opnd should not be nullptr"); + CHECK_FATAL(nextMemOpnd->IsMemoryAccessOperand(), "opnd should be MemOperand"); + int32 nextOffsetVal = static_cast(nextMemOpnd->GetOffsetImmediate()->GetOffsetValue()); + uint32 size = currMemOpnd->GetSize() >> kLog2BitsPerByte; + if ((nextMemOpnd->GetBaseRegister() == currMemOpnd->GetBaseRegister()) && + (nextMemOpnd->GetSize() == currMemOpnd->GetSize()) && + (static_cast(abs(nextOffsetVal - currOffsetVal)) == size)) { + /* + * In ARM Architecture Reference Manual ARMv8, for ARMv8-A architecture profile + * LDP on page K1-6125 declare that ldp can't use same reg + */ + if (((currMop == MOP_xldr) || (currMop == MOP_sldr) || (currMop == MOP_dldr) || (currMop == MOP_wldr)) && + &(memList[0]->GetInsn()->GetOperand(0)) == &((*it)->GetInsn()->GetOperand(0))) { + continue; + } + if (static_cast((*it)->GetInsn()->GetOperand(0)).GetRegisterType() != + static_cast(memList[0]->GetInsn()->GetOperand(0)).GetRegisterType()) { + continue; + } + + if (LIST_SCHED_DUMP_REF) { + LogInfo::MapleLogger() << "Combine insn: " << "\n"; + memList[0]->GetInsn()->Dump(); + (*it)->GetInsn()->Dump(); + } + ddb->CombineMemoryAccessPair(*memList[0], **it, nextOffsetVal > currOffsetVal); + if (LIST_SCHED_DUMP_REF) { + LogInfo::MapleLogger() << "To: " << "\n"; + memList[0]->GetInsn()->Dump(); + } + break; + } + } + } +} + +/* combine clinit pairs. */ +void AArch64Schedule::ClinitPairOpt() { + for (auto it = nodes.begin(); it != nodes.end(); ++it) { + auto nextIt = std::next(it, 1); + if (nextIt == nodes.end()) { + return; + } + + if ((*it)->GetInsn()->GetMachineOpcode() == MOP_adrp_ldr) { + if ((*nextIt)->GetInsn()->GetMachineOpcode() == MOP_clinit_tail) { + ddb->CombineClinit(**it, **(nextIt), false); + } else if ((*nextIt)->GetType() == kNodeTypeSeparator) { + nextIt = std::next(nextIt, 1); + if (nextIt == nodes.end()) { + return; + } + if ((*nextIt)->GetInsn()->GetMachineOpcode() == MOP_clinit_tail) { + /* Do something. */ + ddb->CombineClinit(**it, **(nextIt), true); + } + } + } + } +} + +/* Return the next node's index who is kNodeTypeSeparator. */ +uint32 AArch64Schedule::GetNextSepIndex() const { + return ((lastSeparatorIndex + kMaxDependenceNum) < nodeSize) ? (lastSeparatorIndex + kMaxDependenceNum) + : (nodes.size() - 1); +} + +/* Do register pressure schduling. */ +void AArch64Schedule::RegPressureScheduling(BB &bb, MapleVector &nodes) { + RegPressureSchedule *regSchedule = memPool.New(cgFunc, alloc); + /* + * Get physical register amount currently + * undef, Int Reg, Float Reg, Flag Reg + */ + const std::vector kRegNumVec = { 0, V0, (kMaxRegNum - V0) + 1, 1 }; + regSchedule->InitBBInfo(bb, memPool, nodes); + regSchedule->BuildPhyRegInfo(kRegNumVec); + regSchedule->DoScheduling(nodes); +} + +/* + * Compute earliest start of the node, + * return value : the maximum estart. + */ +uint32 AArch64Schedule::ComputeEstart(uint32 cycle) { + std::vector readyNodes; + uint32 maxIndex = GetNextSepIndex(); + + if (CGOptions::IsDebugSched()) { + /* Check validPredsSize. */ + for (uint32 i = lastSeparatorIndex; i <= maxIndex; ++i) { + DepNode *node = nodes[i]; + int32 schedNum = 0; + for (const auto *predLink : node->GetPreds()) { + if (predLink->GetFrom().GetState() == kScheduled) { + ++schedNum; + } + } + ASSERT((node->GetPreds().size() - schedNum) == node->GetValidPredsSize(), "validPredsSize error."); + } + } + + ASSERT(nodes[maxIndex]->GetType() == kNodeTypeSeparator, + "CG internal error, nodes[maxIndex] should be a separator node."); + + (void)readyNodes.insert(readyNodes.begin(), readyList.begin(), readyList.end()); + + uint32 maxEstart = cycle; + for (uint32 i = lastSeparatorIndex; i <= maxIndex; ++i) { + DepNode *node = nodes[i]; + node->SetVisit(0); + } + + for (auto *node : readyNodes) { + ASSERT(node->GetState() == kReady, "CG internal error, all nodes in ready list should be ready."); + if (node->GetEStart() < cycle) { + node->SetEStart(cycle); + } + } + + while (!readyNodes.empty()) { + DepNode *node = readyNodes.front(); + readyNodes.erase(readyNodes.begin()); + + for (const auto *succLink : node->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + if (succNode.GetType() == kNodeTypeSeparator) { + continue; + } + + if (succNode.GetEStart() < (node->GetEStart() + succLink->GetLatency())) { + succNode.SetEStart(node->GetEStart() + succLink->GetLatency()); + } + maxEstart = (maxEstart < succNode.GetEStart() ? succNode.GetEStart() : maxEstart); + succNode.IncreaseVisit(); + if ((succNode.GetVisit() >= succNode.GetValidPredsSize()) && (succNode.GetType() != kNodeTypeSeparator)) { + readyNodes.emplace_back(&succNode); + } + ASSERT(succNode.GetVisit() <= succNode.GetValidPredsSize(), "CG internal error."); + } + } + + return maxEstart; +} + +/* Compute latest start of the node. */ +void AArch64Schedule::ComputeLstart(uint32 maxEstart) { + /* std::vector is better than std::queue in run time */ + std::vector readyNodes; + uint32 maxIndex = GetNextSepIndex(); + + ASSERT(nodes[maxIndex]->GetType() == kNodeTypeSeparator, + "CG internal error, nodes[maxIndex] should be a separator node."); + + for (uint32 i = lastSeparatorIndex; i <= maxIndex; ++i) { + DepNode *node = nodes[i]; + node->SetLStart(maxEstart); + node->SetVisit(0); + } + + readyNodes.emplace_back(nodes[maxIndex]); + while (!readyNodes.empty()) { + DepNode *node = readyNodes.front(); + readyNodes.erase(readyNodes.begin()); + for (const auto *predLink : node->GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + if (predNode.GetState() == kScheduled) { + continue; + } + + if (predNode.GetLStart() > (node->GetLStart() - predLink->GetLatency())) { + predNode.SetLStart(node->GetLStart() - predLink->GetLatency()); + } + predNode.IncreaseVisit(); + if ((predNode.GetVisit() >= predNode.GetValidSuccsSize()) && (predNode.GetType() != kNodeTypeSeparator)) { + readyNodes.emplace_back(&predNode); + } + + ASSERT(predNode.GetVisit() <= predNode.GetValidSuccsSize(), "CG internal error."); + } + } +} + +/* Compute earliest start and latest start of the node that is in readyList and not be scheduled. */ +void AArch64Schedule::UpdateELStartsOnCycle(uint32 cycle) { + ComputeLstart(ComputeEstart(cycle)); +} + +/* + * If all unit of this node need when it be scheduling is free, this node can be scheduled, + * Return true. + */ +bool DepNode::CanBeScheduled() const { + for (uint32 i = 0; i < unitNum; ++i) { + Unit *unit = units[i]; + if (unit != nullptr) { + if (!unit->IsFree(i)) { + return false; + } + } + } + return true; +} + +/* Mark those unit that this node need occupy unit when it is being scheduled. */ +void DepNode::OccupyUnits() { + for (uint32 i = 0; i < unitNum; ++i) { + Unit *unit = units[i]; + if (unit != nullptr) { + unit->Occupy(*insn, i); + } + } +} + +/* Get unit kind of this node's units[0]. */ +uint32 DepNode::GetUnitKind() const { + uint32 retValue = 0; + if ((units == nullptr) || (units[0] == nullptr)) { + return retValue; + } + + switch (units[0]->GetUnitId()) { + case kUnitIdSlotD: + retValue |= kUnitKindSlot0; + break; + case kUnitIdAgen: + case kUnitIdSlotSAgen: + retValue |= kUnitKindAgen; + break; + case kUnitIdSlotDAgen: + retValue |= kUnitKindAgen; + retValue |= kUnitKindSlot0; + break; + case kUnitIdHazard: + case kUnitIdSlotSHazard: + retValue |= kUnitKindHazard; + break; + case kUnitIdCrypto: + retValue |= kUnitKindCrypto; + break; + case kUnitIdMul: + case kUnitIdSlotSMul: + retValue |= kUnitKindMul; + break; + case kUnitIdDiv: + retValue |= kUnitKindDiv; + break; + case kUnitIdBranch: + case kUnitIdSlotSBranch: + retValue |= kUnitKindBranch; + break; + case kUnitIdStAgu: + retValue |= kUnitKindStAgu; + break; + case kUnitIdLdAgu: + retValue |= kUnitKindLdAgu; + break; + case kUnitIdFpAluS: + case kUnitIdFpAluD: + retValue |= kUnitKindFpAlu; + break; + case kUnitIdFpMulS: + case kUnitIdFpMulD: + retValue |= kUnitKindFpMul; + break; + case kUnitIdFpDivS: + case kUnitIdFpDivD: + retValue |= kUnitKindFpDiv; + break; + case kUnitIdSlot0LdAgu: + retValue |= kUnitKindSlot0; + retValue |= kUnitKindLdAgu; + break; + case kUnitIdSlot0StAgu: + retValue |= kUnitKindSlot0; + retValue |= kUnitKindStAgu; + break; + default: + break; + } + + return retValue; +} + +/* Count unit kinds to an array. Each element of the array indicates the unit kind number of a node set. */ +void AArch64Schedule::CountUnitKind(const DepNode &depNode, uint32 array[], const uint32 arraySize) const { + (void)arraySize; + ASSERT(arraySize >= kUnitKindLast, "CG internal error. unit kind number is not correct."); + uint32 unitKind = depNode.GetUnitKind(); + uint32 index = static_cast(__builtin_ffs(unitKind)); + while (index != 0) { + ASSERT(index < kUnitKindLast, "CG internal error. index error."); + ++array[index]; + unitKind &= ~(1u << (index - 1u)); + index = static_cast(__builtin_ffs(unitKind)); + } +} + +/* Check if a node use a specific unit kind. */ +bool AArch64Schedule::IfUseUnitKind(const DepNode &depNode, uint32 index) { + uint32 unitKind = depNode.GetUnitKind(); + uint32 idx = static_cast(__builtin_ffs(unitKind)); + while (idx != 0) { + ASSERT(index < kUnitKindLast, "CG internal error. index error."); + if (idx == index) { + return true; + } + unitKind &= ~(1u << (idx - 1u)); + idx = static_cast(__builtin_ffs(unitKind)); + } + + return false; +} + +/* A sample schedule according dependence graph only, to verify correctness of dependence graph. */ +void AArch64Schedule::RandomTest() { + Init(); + nodes.clear(); + + while (!readyList.empty()) { + DepNode *currNode = readyList.back(); + currNode->SetState(kScheduled); + readyList.pop_back(); + nodes.emplace_back(currNode); + + for (auto succLink : currNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + bool ready = true; + for (auto predLink : succNode.GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + if (predNode.GetState() != kScheduled) { + ready = false; + break; + } + } + + if (ready) { + ASSERT(succNode.GetState() == kNormal, "succNode must be kNormal"); + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + } + } + } +} + +/* Remove target from readyList. */ +void AArch64Schedule::EraseNodeFromReadyList(const DepNode &target) { + EraseNodeFromNodeList(target, readyList); +} + +/* Remove target from nodeList. */ +void AArch64Schedule::EraseNodeFromNodeList(const DepNode &target, MapleVector &nodeList) { + for (auto it = nodeList.begin(); it != nodeList.end(); ++it) { + if ((*it) == &target) { + nodeList.erase(it); + return; + } + } + + ASSERT(false, "CG internal error, erase node fail."); +} + +/* Dump all node of availableReadyList schedule information in current cycle. */ +void AArch64Schedule::DumpDebugInfo(const ScheduleProcessInfo &scheduleInfo) { + LogInfo::MapleLogger() << "Current cycle[ " << scheduleInfo.GetCurrCycle() << " ], Available in readyList is : \n"; + for (auto node : scheduleInfo.GetAvailableReadyList()) { + LogInfo::MapleLogger() << "NodeIndex[ " << node->GetIndex() + << " ], Estart[ " << node->GetEStart() << " ], Lstart[ "; + LogInfo::MapleLogger() << node->GetLStart() << " ], slot[ "; + LogInfo::MapleLogger() << + (node->GetReservation() == nullptr ? "SlotNone" : node->GetReservation()->GetSlotName()) << " ], "; + LogInfo::MapleLogger() << "succNodeNum[ " << node->GetSuccs().size() << " ], "; + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << '\n'; + } +} + +/* + * Select a node from availableReadyList according to some heuristic rules, then: + * 1. change targetNode's schedule information; + * 2. try to add successors of targetNode to readyList; + * 3. update unscheduled node set, when targetNode is last kNodeTypeSeparator; + * 4. update AdvanceCycle. + */ +void AArch64Schedule::SelectNode(AArch64ScheduleProcessInfo &scheduleInfo) { + auto &availableReadyList = scheduleInfo.GetAvailableReadyList(); + auto it = availableReadyList.begin(); + DepNode *targetNode = *it; + if (availableReadyList.size() > 1) { + CalculateMaxUnitKindCount(scheduleInfo); + if (GetConsiderRegPressure()) { + UpdateReleaseRegInfo(scheduleInfo); + } + ++it; + for (; it != availableReadyList.end(); ++it) { + if (CompareDepNode(**it, *targetNode, scheduleInfo)) { + targetNode = *it; + } + } + } + /* The priority of free-reg node is higher than pipeline */ + while (!targetNode->CanBeScheduled()) { + scheduleInfo.IncCurrCycle(); + mad->AdvanceCycle(); + } + if (GetConsiderRegPressure() && !scheduleInfo.IsFirstSeparator()) { + UpdateLiveRegSet(scheduleInfo, *targetNode); + } + /* push target node into scheduled nodes and turn it into kScheduled state */ + scheduleInfo.PushElemIntoScheduledNodes(targetNode); + + EraseNodeFromReadyList(*targetNode); + + if (CGOptions::IsDebugSched()) { + LogInfo::MapleLogger() << "TargetNode : "; + targetNode->GetInsn()->Dump(); + LogInfo::MapleLogger() << "\n"; + } + + /* Update readyList. */ + UpdateReadyList(*targetNode, readyList, true); + + if (targetNode->GetType() == kNodeTypeSeparator) { + /* If target node is separator node, update lastSeparatorIndex and calculate those depNodes's estart and lstart + * between current separator node and new Separator node. + */ + if (!scheduleInfo.IsFirstSeparator()) { + lastSeparatorIndex += kMaxDependenceNum; + UpdateELStartsOnCycle(scheduleInfo.GetCurrCycle()); + } else { + scheduleInfo.ResetIsFirstSeparator(); + } + } + + UpdateAdvanceCycle(scheduleInfo, *targetNode); +} + +void AArch64Schedule::UpdateAdvanceCycle(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode &targetNode) const { + switch (targetNode.GetInsn()->GetLatencyType()) { + case kLtClinit: + scheduleInfo.SetAdvanceCycle(kClinitAdvanceCycle); + break; + case kLtAdrpLdr: + scheduleInfo.SetAdvanceCycle(kAdrpLdrAdvanceCycle); + break; + case kLtClinitTail: + scheduleInfo.SetAdvanceCycle(kClinitTailAdvanceCycle); + break; + default: + break; + } + + if ((scheduleInfo.GetAdvanceCycle() == 0) && mad->IsFullIssued()) { + if (targetNode.GetEStart() > scheduleInfo.GetCurrCycle()) { + scheduleInfo.SetAdvanceCycle(1 + targetNode.GetEStart() - scheduleInfo.GetCurrCycle()); + } else { + scheduleInfo.SetAdvanceCycle(1); + } + } +} + +/* + * Advance mad's cycle until info's advanceCycle equal zero, + * and then clear info's availableReadyList. + */ +void AArch64Schedule::UpdateScheduleProcessInfo(AArch64ScheduleProcessInfo &info) const { + while (info.GetAdvanceCycle() > 0) { + info.IncCurrCycle(); + mad->AdvanceCycle(); + info.DecAdvanceCycle(); + } + info.ClearAvailableReadyList(); +} + +/* + * Forward traversal readyList, if a node in readyList can be Schedule, add it to availableReadyList. + * Return true, if availableReadyList is not empty. + */ +bool AArch64Schedule::CheckSchedulable(AArch64ScheduleProcessInfo &info) const { + for (auto node : readyList) { + if (GetConsiderRegPressure()) { + info.PushElemIntoAvailableReadyList(node); + } else { + if (node->CanBeScheduled() && node->GetEStart() <= info.GetCurrCycle()) { + info.PushElemIntoAvailableReadyList(node); + } + } + } + return info.AvailableReadyListIsEmpty() ? false : true; +} + +/* + * Calculate estimated machine cycle count for an input node series + */ +int AArch64Schedule::CalSeriesCycles(const MapleVector &nodes) const { + int currentCycle = 0; + /* after an instruction is issued, the minimum cycle count for the next instruction is 1 */ + int instructionBaseCycleCount = 1; + std::map scheduledCycleMap; + for (auto node : nodes) { + int latencyCycle = 0; + /* calculate the latest begin time of this node based on its predecessor's issue time and latency */ + for (auto pred : node->GetPreds()) { + DepNode &from = pred->GetFrom(); + int latency = static_cast(pred->GetLatency()); + int fromCycle = scheduledCycleMap[&from]; + if (fromCycle + latency > latencyCycle) { + latencyCycle = fromCycle + latency; + } + } + /* the issue time of this node is the max value between the next cycle and latest begin time */ + if (currentCycle + instructionBaseCycleCount >= latencyCycle) { + currentCycle = currentCycle + instructionBaseCycleCount; + } else { + currentCycle = latencyCycle; + } + /* record this node's issue cycle */ + scheduledCycleMap[node] = currentCycle; + } + return currentCycle; +} + +/* After building dependence graph, schedule insns. */ +uint32 AArch64Schedule::DoSchedule() { + AArch64ScheduleProcessInfo scheduleInfo(nodeSize); + Init(); + UpdateELStartsOnCycle(scheduleInfo.GetCurrCycle()); + InitLiveRegSet(scheduleInfo); + while (!readyList.empty()) { + UpdateScheduleProcessInfo(scheduleInfo); + /* Check if schedulable */ + if (!CheckSchedulable(scheduleInfo)) { + /* Advance cycle. */ + scheduleInfo.SetAdvanceCycle(1); + continue; + } + + if (scheduleInfo.GetLastUpdateCycle() < scheduleInfo.GetCurrCycle()) { + scheduleInfo.SetLastUpdateCycle(scheduleInfo.GetCurrCycle()); + } + + if (CGOptions::IsDebugSched()) { + DumpDebugInfo(scheduleInfo); + } + + /* Select a node to scheduling */ + SelectNode(scheduleInfo); + } + + ASSERT(scheduleInfo.SizeOfScheduledNodes() == nodes.size(), "CG internal error, Not all nodes scheduled."); + + nodes.clear(); + (void)nodes.insert(nodes.begin(), scheduleInfo.GetScheduledNodes().begin(), scheduleInfo.GetScheduledNodes().end()); + /* the second to last node is the true last node, because the last is kNodeTypeSeparator node */ + ASSERT(nodes.size() - 2 >= 0, "size of nodes should be greater than or equal 2"); + return (nodes[nodes.size() - 2]->GetSchedCycle()); +} + +struct RegisterInfoUnit { + RegisterInfoUnit() : intRegNum(0), fpRegNum(0), ccRegNum(0) {} + uint32 intRegNum = 0; + uint32 fpRegNum = 0; + uint32 ccRegNum = 0; +}; + +RegisterInfoUnit GetDepNodeDefType(const DepNode &depNode, CGFunc &f) { + RegisterInfoUnit rIU; + for (auto defRegNO : depNode.GetDefRegnos()) { + RegType defRegTy = AArch64ScheduleProcessInfo::GetRegisterType(f, defRegNO); + if (defRegTy == kRegTyInt) { + rIU.intRegNum++; + } else if (defRegTy == kRegTyFloat) { + rIU.fpRegNum++; + } else if (defRegTy == kRegTyCc) { + rIU.ccRegNum++; + ASSERT(rIU.ccRegNum <= 1, "spill cc reg?"); + } else { + CHECK_FATAL(false, "NIY aarch64 register type"); + } + } + /* call node will not increase reg def pressure */ + if (depNode.GetInsn() != nullptr && depNode.GetInsn()->IsCall()) { + rIU.intRegNum = 0; + rIU.fpRegNum = 0; + } + return rIU; +} + +AArch64Schedule::CSRResult AArch64Schedule::DoCSR(DepNode &node1, DepNode &node2, + AArch64ScheduleProcessInfo &scheduleInfo) const { + RegisterInfoUnit defRIU1 = GetDepNodeDefType(node1, cgFunc); + RegisterInfoUnit defRIU2 = GetDepNodeDefType(node2, cgFunc); + /* do not increase callee save pressure before call */ + if (static_cast(scheduleInfo.SizeOfCalleeSaveLiveRegister(true)) >= intCalleeSaveThreshold) { + if (defRIU1.intRegNum > 0 && defRIU2.intRegNum > 0) { + CSRResult csrInfo = ScheduleCrossCall(node1, node2); + if ((csrInfo == kNode1 && defRIU1.intRegNum >= scheduleInfo.GetFreeIntRegs(node1)) || + (csrInfo == kNode2 && defRIU2.intRegNum >= scheduleInfo.GetFreeIntRegs(node2))) { + return csrInfo; + } + } + } + if (static_cast(scheduleInfo.SizeOfCalleeSaveLiveRegister(false)) >= fpCalleeSaveThreshold) { + if (defRIU1.fpRegNum > 0 && defRIU2.fpRegNum > 0) { + CSRResult csrInfo = ScheduleCrossCall(node1, node2); + if ((csrInfo == kNode1 && defRIU1.fpRegNum >= scheduleInfo.GetFreeFpRegs(node1)) || + (csrInfo == kNode2 && defRIU2.fpRegNum >= scheduleInfo.GetFreeFpRegs(node2))) { + return csrInfo; + } + } + } + auto findFreeRegNode = [&](bool isInt)->CSRResult { + auto freeRegNodes = isInt ? scheduleInfo.GetFreeIntRegNodeSet() : scheduleInfo.GetFreeFpRegNodeSet(); + if (freeRegNodes.find(&node1) != freeRegNodes.end() && freeRegNodes.find(&node2) == freeRegNodes.end()) { + return kNode1; + } + if (freeRegNodes.find(&node1) == freeRegNodes.end() && freeRegNodes.find(&node2) != freeRegNodes.end()) { + return kNode2; + } + return kDoCSP; + }; + if (static_cast(scheduleInfo.SizeOfIntLiveRegSet()) >= intRegPressureThreshold) { + if (findFreeRegNode(true) != kDoCSP) { + return findFreeRegNode(true); + } + } + if (static_cast(scheduleInfo.SizeOfFpLiveRegSet()) >= fpRegPressureThreshold) { + if (findFreeRegNode(false) != kDoCSP) { + return findFreeRegNode(false); + } + } + + bool canDoCSPFurther = false; + if (static_cast(scheduleInfo.SizeOfIntLiveRegSet()) >= intRegPressureThreshold) { + if (defRIU1.intRegNum != defRIU2.intRegNum) { + return defRIU1.intRegNum < defRIU2.intRegNum ? kNode1 : kNode2; + } else { + canDoCSPFurther = defRIU1.intRegNum == 0; + } + } + if (static_cast(scheduleInfo.SizeOfFpLiveRegSet()) >= fpRegPressureThreshold) { + if (defRIU1.fpRegNum != defRIU2.fpRegNum) { + return defRIU1.fpRegNum < defRIU2.fpRegNum ? kNode1 : kNode2; + } else { + canDoCSPFurther = (defRIU1.fpRegNum == 0 && canDoCSPFurther); + } + } + /* if both nodes are going to increase reg pressure, do not do CSP further */ + return canDoCSPFurther ? kDoCSP : (node1.GetInsn()->GetId() < node2.GetInsn()->GetId() ? kNode1 : kNode2); +} + +AArch64Schedule::CSRResult AArch64Schedule::ScheduleCrossCall(const DepNode &node1, const DepNode &node2) const { + uint32 node1ID = node1.GetInsn()->GetId(); + uint32 node2ID = node2.GetInsn()->GetId(); + bool order = node1ID < node2ID; /* true -- node1 before node2 false -- node1 after node2 */ + Insn *beginInsn = order ? node1.GetInsn() : node2.GetInsn(); + uint32 finialId = order ? node2ID : node1ID; + for (Insn *checkInsn = beginInsn; (checkInsn != nullptr && checkInsn->GetId() <= finialId); + checkInsn = checkInsn->GetNextMachineInsn()) { + if (checkInsn->IsCall()) { + return order ? kNode1 : kNode2; + } + } + return kDoCSP; +}; + +/* + * Comparing priorities of node1 and node2 according to some heuristic rules + * return true if node1's priority is higher + * crp -- consider reg pressure + */ +bool AArch64Schedule::CompareDepNode(DepNode &node1, DepNode &node2, AArch64ScheduleProcessInfo &scheduleInfo) const { + /* + * strategy CSR -- code schedule for register pressure + * if pressure is above the threshold, select the node which can reduce register pressure + */ + if (GetConsiderRegPressure()) { + switch (DoCSR(node1, node2, scheduleInfo)) { + case kNode1: + return true; + case kNode2: + return false; + default: + break; + } + } + /* strategy CSP -- code schedule for CPU pipeline */ + /* less LStart first */ + if (node1.GetLStart() != node2.GetLStart()) { + return node1.GetLStart() < node2.GetLStart(); + } + + /* max unit kind use */ + bool use1 = IfUseUnitKind(node1, maxUnitIndex); + bool use2 = IfUseUnitKind(node2, maxUnitIndex); + if (use1 != use2) { + return use1; + } + + /* slot0 first */ + SlotType slotType1 = node1.GetReservation()->GetSlot(); + SlotType slotType2 = node2.GetReservation()->GetSlot(); + if (slotType1 == kSlots) { + slotType1 = kSlot0; + } + if (slotType2 == kSlots) { + slotType2 = kSlot0; + } + if (slotType1 != slotType2) { + return slotType1 < slotType2; + } + + /* more succNodes fisrt */ + if (node1.GetSuccs().size() != node2.GetSuccs().size()) { + return node1.GetSuccs().size() > node2.GetSuccs().size(); + } + + /* default order */ + return node1.GetInsn()->GetId() < node2.GetInsn()->GetId(); +} + +/* + * Calculate number of every unit that used by avaliableReadyList's nodes and save the max in maxUnitIndex + */ +void AArch64Schedule::CalculateMaxUnitKindCount(ScheduleProcessInfo &scheduleInfo) const { + uint32 unitKindCount[kUnitKindLast] = { 0 }; + for (auto node : scheduleInfo.GetAvailableReadyList()) { + CountUnitKind(*node, unitKindCount, kUnitKindLast); + } + + uint32 maxCount = 0; + maxUnitIndex = 0; + for (size_t i = 1; i < kUnitKindLast; ++i) { + if (maxCount < unitKindCount[i]) { + maxCount = unitKindCount[i]; + maxUnitIndex = i; + } + } +} + +/* + * Update the release reg node set + * When node in this set is scheduled, register pressure can be reduced + */ +void AArch64Schedule::UpdateReleaseRegInfo(AArch64ScheduleProcessInfo &scheduleInfo) { + auto &availableReadyList = scheduleInfo.GetAvailableReadyList(); + scheduleInfo.ClearALLFreeRegNodeSet(); + /* Traverse availableReadyList and add those can reduce register pressure to release reg node set */ + for (auto node : availableReadyList) { + std::set freeRegNO = CanFreeRegister(*node); + if (!freeRegNO.empty()) { + scheduleInfo.VaryFreeRegSet(cgFunc, freeRegNO, *node); + } + } +} + +/* + * return registers which an instruction can release after being scheduled + */ +std::set AArch64Schedule::CanFreeRegister(const DepNode &node) const { + std::set freeRegSet; + for (auto reg : node.GetUseRegnos()) { + if (RegPressureSchedule::IsLastUse(node, reg)) { + freeRegSet.emplace(reg); + } + } + return freeRegSet; +} + +/* + * After an instruction is scheduled, update live reg set + */ +void AArch64Schedule::UpdateLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode& node) { + /* dealing with def reg, add def reg into the live reg set */ + size_t i = 1; + for (auto &defReg : node.GetDefRegnos()) { + if (scheduleInfo.FindIntLiveReg(defReg) == 0 && scheduleInfo.FindFpLiveReg(defReg) == 0) { + scheduleInfo.VaryLiveRegSet(cgFunc, defReg, true); + } + /* delete dead def reg from live reg set because its live range is only 1 cycle */ + if (node.GetRegDefs(i) == nullptr && liveOutRegNo.find(defReg) == liveOutRegNo.end()) { + scheduleInfo.VaryLiveRegSet(cgFunc, defReg, false); + } + ++i; + } + /* dealing with use reg, delete use reg from live reg set if this instruction is last use of it */ + for (auto &useReg : node.GetUseRegnos()) { + if (RegPressureSchedule::IsLastUse(node, useReg)) { + if ((scheduleInfo.FindIntLiveReg(useReg) != 0 || scheduleInfo.FindFpLiveReg(useReg) != 0) && + liveOutRegNo.find(useReg) == liveOutRegNo.end()) { + scheduleInfo.VaryLiveRegSet(cgFunc, useReg, false); + } + } + } +} + +/* + * Initialize the live reg set based on the live in reg information + */ +void AArch64Schedule::InitLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo) { + if (GetConsiderRegPressure()) { + for (auto reg : liveInRegNo) { + scheduleInfo.VaryLiveRegSet(cgFunc, reg, true); + } + } +} + +/* + * A simulated schedule: + * scheduling instruction in original order to calculate original execute cycles. + */ +uint32 AArch64Schedule::SimulateOnly() { + uint32 currCycle = 0; + uint32 advanceCycle = 0; + Init(); + + for (uint32 i = 0; i < nodes.size();) { + while (advanceCycle > 0) { + ++currCycle; + mad->AdvanceCycle(); + --advanceCycle; + } + + DepNode *targetNode = nodes[i]; + if ((currCycle >= targetNode->GetEStart()) && targetNode->CanBeScheduled()) { + targetNode->SetSimulateCycle(currCycle); + targetNode->OccupyUnits(); + + /* Update estart. */ + for (auto succLink : targetNode->GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + uint32 eStart = currCycle + succLink->GetLatency(); + if (succNode.GetEStart() < eStart) { + succNode.SetEStart(eStart); + } + } + + if (CGOptions::IsDebugSched()) { + LogInfo::MapleLogger() << "[Simulate] TargetNode : "; + targetNode->GetInsn()->Dump(); + LogInfo::MapleLogger() << "\n"; + } + + switch (targetNode->GetInsn()->GetLatencyType()) { + case kLtClinit: + advanceCycle = kClinitAdvanceCycle; + break; + case kLtAdrpLdr: + advanceCycle = kAdrpLdrAdvanceCycle; + break; + case kLtClinitTail: + advanceCycle = kClinitTailAdvanceCycle; + break; + default: + break; + } + + ++i; + } else { + advanceCycle = 1; + } + } + /* the second to last node is the true last node, because the last is kNodeTypeSeparator nod */ + ASSERT(nodes.size() - kSecondToLastNode >= 0, "size of nodes should be greater than or equal 2"); + return (nodes[nodes.size() - kSecondToLastNode]->GetSimulateCycle()); +} + +/* Restore dependence graph to normal CGIR */ +void AArch64Schedule::FinalizeScheduling(BB &bb, const DataDepBase &dataDepBase) { + bb.ClearInsns(); + + const Insn *prevLocInsn = (bb.GetPrev() != nullptr ? bb.GetPrev()->GetLastLoc() : nullptr); + for (auto node : nodes) { + /* Append comments first */ + for (auto comment : node->GetComments()) { + if (comment->GetPrev() != nullptr && comment->GetPrev()->IsDbgInsn()) { + bb.AppendInsn(*comment->GetPrev()); + } + bb.AppendInsn(*comment); + } + /* Append insn */ + if (!node->GetClinitInsns().empty()) { + for (auto clinit : node->GetClinitInsns()) { + bb.AppendInsn(*clinit); + } + } else if (node->GetType() == kNodeTypeNormal) { + if (node->GetInsn()->GetPrev() != nullptr && node->GetInsn()->GetPrev()->IsDbgInsn()) { + bb.AppendInsn(*node->GetInsn()->GetPrev()); + } + bb.AppendInsn(*node->GetInsn()); + } + + /* Append cfi instructions. */ + for (auto cfi : node->GetCfiInsns()) { + bb.AppendInsn(*cfi); + } + } + bb.SetLastLoc(prevLocInsn); + + for (auto lastComment : dataDepBase.GetLastComments()) { + bb.AppendInsn(*lastComment); + } + dataDepBase.ClearLastComments(); +} + +/* For every node of nodes, update it's bruteForceSchedCycle. */ +void AArch64Schedule::UpdateBruteForceSchedCycle() { + for (auto node : nodes) { + node->SetBruteForceSchedCycle(node->GetSchedCycle()); + } +} + +/* Recursively schedule all of the possible node. */ +void AArch64Schedule::IterateBruteForce(DepNode &targetNode, MapleVector &readyList, uint32 currCycle, + MapleVector &scheduledNodes, uint32 &maxCycleCount, + MapleVector &optimizedScheduledNodes) { + /* Save states. */ + constexpr int32 unitSize = 31; + ASSERT(unitSize == mad->GetAllUnitsSize(), "CG internal error."); + std::vector occupyTable; + occupyTable.resize(unitSize, 0); + mad->SaveStates(occupyTable, unitSize); + + /* Schedule targetNode first. */ + targetNode.SetState(kScheduled); + targetNode.SetSchedCycle(currCycle); + scheduledNodes.emplace_back(&targetNode); + + MapleVector tempList = readyList; + EraseNodeFromNodeList(targetNode, tempList); + targetNode.OccupyUnits(); + + /* Update readyList. */ + UpdateReadyList(targetNode, tempList, true); + + if (targetNode.GetType() == kNodeTypeSeparator) { + /* If target node is separator node, update lastSeparatorIndex. */ + lastSeparatorIndex += kMaxDependenceNum; + } + + if (tempList.empty()) { + ASSERT(scheduledNodes.size() == nodes.size(), "CG internal error, Not all nodes scheduled."); + if (currCycle < maxCycleCount) { + maxCycleCount = currCycle; + UpdateBruteForceSchedCycle(); + optimizedScheduledNodes = scheduledNodes; + } + } else { + uint32 advanceCycle = 0; + switch (targetNode.GetInsn()->GetLatencyType()) { + case kLtClinit: + advanceCycle = kClinitAdvanceCycle; + break; + case kLtAdrpLdr: + advanceCycle = kAdrpLdrAdvanceCycle; + break; + case kLtClinitTail: + advanceCycle = kClinitTailAdvanceCycle; + break; + default: + break; + } + + do { + std::vector availableReadyList; + std::vector tempAvailableList; + while (advanceCycle > 0) { + ++currCycle; + mad->AdvanceCycle(); + --advanceCycle; + } + /* Check EStart. */ + for (auto node : tempList) { + if (node->GetEStart() <= currCycle) { + tempAvailableList.emplace_back(node); + } + } + + if (tempAvailableList.empty()) { + /* Advance cycle. */ + advanceCycle = 1; + continue; + } + + /* Check if schedulable */ + for (auto node : tempAvailableList) { + if (node->CanBeScheduled()) { + availableReadyList.emplace_back(node); + } + } + + if (availableReadyList.empty()) { + /* Advance cycle. */ + advanceCycle = 1; + continue; + } + + for (auto node : availableReadyList) { + IterateBruteForce(*node, tempList, currCycle, scheduledNodes, maxCycleCount, optimizedScheduledNodes); + } + + break; + } while (true); + } + + /* + * Recover states. + * Restore targetNode first. + */ + targetNode.SetState(kReady); + targetNode.SetSchedCycle(0); + scheduledNodes.pop_back(); + mad->RestoreStates(occupyTable, unitSize); + + /* Update readyList. */ + for (auto succLink : targetNode.GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.IncreaseValidPredsSize(); + succNode.SetState(kNormal); + } + + if (targetNode.GetType() == kNodeTypeSeparator) { + /* If target node is separator node, update lastSeparatorIndex. */ + lastSeparatorIndex -= kMaxDependenceNum; + } +} + +/* + * Brute force schedule: + * Finding all possibile schedule list of current bb, and calculate every list's execute cycles, + * return the optimal schedule list and it's cycles. + */ +uint32 AArch64Schedule::DoBruteForceSchedule() { + MapleVector scheduledNodes(alloc.Adapter()); + MapleVector optimizedScheduledNodes(alloc.Adapter()); + + uint32 currCycle = 0; + uint32 maxCycleCount = 0xFFFFFFFF; + Init(); + + /* Schedule First separator. */ + DepNode *targetNode = readyList.front(); + targetNode->SetState(kScheduled); + targetNode->SetSchedCycle(currCycle); + scheduledNodes.emplace_back(targetNode); + readyList.clear(); + + /* Update readyList. */ + UpdateReadyList(*targetNode, readyList, false); + + ASSERT(targetNode->GetType() == kNodeTypeSeparator, "The first node should be separator node."); + ASSERT(!readyList.empty(), "readyList should not be empty."); + + for (auto targetNodeTemp : readyList) { + IterateBruteForce(*targetNodeTemp, readyList, currCycle, scheduledNodes, maxCycleCount, optimizedScheduledNodes); + } + + nodes = optimizedScheduledNodes; + return maxCycleCount; +} + +/* + * Update ready list after the targetNode has been scheduled. + * For every targetNode's successor, if it's all predecessors have been scheduled, + * add it to ready list and update it's information (like state, estart). + */ +void AArch64Schedule::UpdateReadyList(DepNode &targetNode, MapleVector &readyList, bool updateEStart) { + for (auto succLink : targetNode.GetSuccs()) { + DepNode &succNode = succLink->GetTo(); + succNode.DescreaseValidPredsSize(); + if (succNode.GetValidPredsSize() == 0) { + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + + /* Set eStart. */ + if (updateEStart) { + uint32 maxEstart = 0; + for (auto predLink : succNode.GetPreds()) { + DepNode &predNode = predLink->GetFrom(); + uint32 eStart = predNode.GetSchedCycle() + predLink->GetLatency(); + maxEstart = (maxEstart < eStart ? eStart : maxEstart); + } + succNode.SetEStart(maxEstart); + } + } + } +} + +/* For every node of nodes, dump it's Depdence information. */ +void AArch64Schedule::DumpDepGraph(const MapleVector &nodes) const { + for (auto node : nodes) { + ddb->DumpDepNode(*node); + LogInfo::MapleLogger() << "---------- preds ----------" << "\n"; + for (auto pred : node->GetPreds()) { + ddb->DumpDepLink(*pred, &(pred->GetFrom())); + } + LogInfo::MapleLogger() << "---------- succs ----------" << "\n"; + for (auto succ : node->GetSuccs()) { + ddb->DumpDepLink(*succ, &(succ->GetTo())); + } + LogInfo::MapleLogger() << "---------------------------" << "\n"; + } +} + +/* For every node of nodes, dump it's schedule time according simulate type and instruction information. */ +void AArch64Schedule::DumpScheduleResult(const MapleVector &nodes, SimulateType type) const { + for (auto node : nodes) { + LogInfo::MapleLogger() << "cycle[ "; + switch (type) { + case kListSchedule: + LogInfo::MapleLogger() << node->GetSchedCycle(); + break; + case kBruteForce: + LogInfo::MapleLogger() << node->GetBruteForceSchedCycle(); + break; + case kSimulateOnly: + LogInfo::MapleLogger() << node->GetSimulateCycle(); + break; + } + LogInfo::MapleLogger() << " ] "; + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << "\n"; + } +} + +/* Print bb's dependence dot graph information to a file. */ +void AArch64Schedule::GenerateDot(const BB &bb, const MapleVector &nodes) const { + std::streambuf *coutBuf = std::cout.rdbuf(); /* keep original cout buffer */ + std::ofstream dgFile; + std::streambuf *buf = dgFile.rdbuf(); + std::cout.rdbuf(buf); + + /* construct the file name */ + std::string fileName; + fileName.append(phaseName); + fileName.append("_"); + fileName.append(cgFunc.GetName()); + fileName.append("_BB"); + auto str = std::to_string(bb.GetId()); + fileName.append(str); + fileName.append("_dep_graph.dot"); + + dgFile.open(fileName.c_str(), std::ios::trunc); + if (!dgFile.is_open()) { + LogInfo::MapleLogger(kLlWarn) << "fileName:" << fileName << " open failure.\n"; + return; + } + dgFile << "digraph {\n"; + for (auto node : nodes) { + for (auto succ : node->GetSuccs()) { + dgFile << "insn" << node->GetInsn() << " -> " << "insn" << succ->GetTo().GetInsn(); + dgFile << " ["; + if (succ->GetDepType() == kDependenceTypeTrue) { + dgFile << "color=red,"; + } + dgFile << "label= \"" << succ->GetLatency() << "\""; + dgFile << "];\n"; + } + } + + for (auto node : nodes) { + MOperator mOp = node->GetInsn()->GetMachineOpcode(); + const InsnDesc *md = &AArch64CG::kMd[mOp]; + dgFile << "insn" << node->GetInsn() << "["; + dgFile << "shape=box,label= \" " << node->GetInsn()->GetId() << ":\n"; + dgFile << "{ "; + dgFile << md->name << "\n"; + dgFile << "}\"];\n"; + } + dgFile << "}\n"; + dgFile.flush(); + dgFile.close(); + std::cout.rdbuf(coutBuf); +} + +RegType AArch64ScheduleProcessInfo::GetRegisterType(CGFunc &f, regno_t regNO) { + if (AArch64isa::IsPhysicalRegister(regNO)) { + if (AArch64isa::IsGPRegister(static_cast(regNO))) { + return kRegTyInt; + } else if (AArch64isa::IsFPSIMDRegister(static_cast(regNO))) { + return kRegTyFloat; + } else { + CHECK_FATAL(false, "unknown physical reg"); + } + } else { + RegOperand *curRegOpnd = f.GetVirtualRegisterOperand(regNO); + ASSERT(curRegOpnd != nullptr, "register which is not physical and virtual"); + return curRegOpnd->GetRegisterType(); + } +} + +void AArch64ScheduleProcessInfo::VaryLiveRegSet(CGFunc &f, regno_t regNO, bool isInc) { + RegType registerTy = GetRegisterType(f, regNO); + if (registerTy == kRegTyInt || registerTy == kRegTyVary) { + isInc ? IncIntLiveRegSet(regNO) : DecIntLiveRegSet(regNO); + } else if (registerTy == kRegTyFloat) { + isInc ? IncFpLiveRegSet(regNO) : DecFpLiveRegSet(regNO); + } + /* consider other type register */ +} + +void AArch64ScheduleProcessInfo::VaryFreeRegSet(CGFunc &f, std::set regNOs, DepNode &node) { + for (auto regNO : regNOs) { + RegType registerTy = GetRegisterType(f, regNO); + if (registerTy == kRegTyInt || registerTy == kRegTyVary /* memory base register must be int */) { + IncFreeIntRegNode(node); + } else if (registerTy == kRegTyFloat) { + IncFreeFpRegNode(node); + } else if (registerTy == kRegTyCc) { + /* do not count CC reg */ + return; + } else { + /* consider other type register */ + CHECK_FATAL(false, "do not support this type of register"); + } + } +} + +/* Do brute force scheduling and dump scheduling information */ +void AArch64Schedule::BruteForceScheduling(const BB &bb) { + LogInfo::MapleLogger() << "\n\n$$ Function: " << cgFunc.GetName(); + LogInfo::MapleLogger() << "\n BB id = " << bb.GetId() << "; nodes.size = " << nodes.size() << "\n"; + + constexpr uint32 maxBruteForceNum = 50; + if (nodes.size() < maxBruteForceNum) { + GenerateDot(bb, nodes); + uint32 maxBruteForceCycle = DoBruteForceSchedule(); + MapleVector bruteNodes = nodes; + uint32 maxSchedCycle = DoSchedule(); + if (maxBruteForceCycle < maxSchedCycle) { + LogInfo::MapleLogger() << "maxBruteForceCycle = " << maxBruteForceCycle << "; maxSchedCycle = "; + LogInfo::MapleLogger() << maxSchedCycle << "\n"; + LogInfo::MapleLogger() << "\n ## Dump dependence graph ## " << "\n"; + DumpDepGraph(nodes); + LogInfo::MapleLogger() << "\n ** Dump bruteForce scheduling result." << "\n"; + DumpScheduleResult(bruteNodes, kBruteForce); + LogInfo::MapleLogger() << "\n ^^ Dump list scheduling result." << "\n"; + DumpScheduleResult(nodes, kListSchedule); + } + } else { + LogInfo::MapleLogger() << "Skip BruteForce scheduling." << "\n"; + DoSchedule(); + } +} + +/* Do simulate scheduling and dump scheduling information */ +void AArch64Schedule::SimulateScheduling(const BB &bb) { + uint32 originCycle = SimulateOnly(); + MapleVector oldNodes = nodes; + uint32 schedCycle = DoSchedule(); + if (originCycle < schedCycle) { + LogInfo::MapleLogger() << "Worse cycle [ " << (schedCycle - originCycle) << " ]; "; + LogInfo::MapleLogger() << "originCycle = " << originCycle << "; schedCycle = "; + LogInfo::MapleLogger() << schedCycle << "; nodes.size = " << nodes.size(); + LogInfo::MapleLogger() << "; $$ Function: " << cgFunc.GetName(); + LogInfo::MapleLogger() << "; BB id = " << bb.GetId() << "\n"; + LogInfo::MapleLogger() << "\n ** Dump original result." << "\n"; + DumpScheduleResult(oldNodes, kSimulateOnly); + LogInfo::MapleLogger() << "\n ^^ Dump list scheduling result." << "\n"; + DumpScheduleResult(nodes, kListSchedule); + } else if (originCycle > schedCycle) { + LogInfo::MapleLogger() << "Advance cycle [ " << (originCycle - schedCycle) << " ]; "; + LogInfo::MapleLogger() << "originCycle = " << originCycle << "; schedCycle = "; + LogInfo::MapleLogger() << schedCycle << "; nodes.size = " << nodes.size(); + LogInfo::MapleLogger() << "; $$ Function: " << cgFunc.GetName(); + LogInfo::MapleLogger() << "; BB id = " << bb.GetId() << "\n"; + } else { + LogInfo::MapleLogger() << "Equal cycle [ 0 ]; originCycle = " << originCycle; + LogInfo::MapleLogger() << " ], ignore. nodes.size = " << nodes.size() << "\n"; + } +} + +/* + * A local list scheduling. + * Schedule insns in basic blocks. + */ +void AArch64Schedule::ListScheduling(bool beforeRA) { + InitIDAndLoc(); + + mad = Globals::GetInstance()->GetMAD(); + if (beforeRA) { + RegPressure::SetMaxRegClassNum(kRegisterLast); + } + // construct cdgNode for each BB + auto *cda = memPool.New(cgFunc, memPool); + cda->CreateAllCDGNodes(); + ddb = memPool.New(memPool, cgFunc, *mad); + intraDDA = memPool.New(memPool, cgFunc, *ddb); + FOR_ALL_BB(bb, &cgFunc) { + intraDDA->Run(*bb, nodes); + + if (LIST_SCHED_DUMP_REF) { + GenerateDot(*bb, nodes); + DumpDepGraph(nodes); + } + if (beforeRA) { + liveInRegNo = bb->GetLiveInRegNO(); + liveOutRegNo = bb->GetLiveOutRegNO(); + if (bb->GetKind() != BB::kBBReturn) { + SetConsiderRegPressure(); + DoSchedule(); + } else { + RegPressureScheduling(*bb, nodes); + } + } else { + ClinitPairOpt(); + MemoryAccessPairOpt(); + if (CGOptions::IsDruteForceSched()) { + BruteForceScheduling(*bb); + } else if (CGOptions::IsSimulateSched()) { + SimulateScheduling(*bb); + } else { + DoSchedule(); + } + } + + FinalizeScheduling(*bb, *ddb); + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_ssa.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ssa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..212b82c122b935b1e956722dbaf9348bd5b1d4ad --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_ssa.cpp @@ -0,0 +1,384 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "aarch64_ssa.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64CGSSAInfo::RenameInsn(Insn &insn) { + auto opndNum = static_cast(insn.GetOperandSize()); + const InsnDesc *md = insn.GetDesc(); + if (md->IsPhi()) { + return; + } + for (int i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + auto *opndProp = (md->opndMD[static_cast(i)]); + A64SSAOperandRenameVisitor renameVisitor(*this, insn, *opndProp, i); + opnd.Accept(renameVisitor); + } +} + +MemOperand *AArch64CGSSAInfo::CreateMemOperand(MemOperand &memOpnd, bool isOnSSA) const { + return isOnSSA ? memOpnd.Clone(*memPool) : + &static_cast(cgFunc)->GetOrCreateMemOpnd(memOpnd); +} + +RegOperand *AArch64CGSSAInfo::GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) { + if (vRegOpnd.IsVirtualRegister()) { + ASSERT(!vRegOpnd.IsSSAForm(), "Unexpect ssa operand"); + if (isDef) { + VRegVersion *newVersion = CreateNewVersion(vRegOpnd, curInsn, idx); + CHECK_FATAL(newVersion != nullptr, "get ssa version failed"); + return newVersion->GetSSAvRegOpnd(); + } else { + VRegVersion *curVersion = GetVersion(vRegOpnd); + if (curVersion == nullptr) { + curVersion = RenamedOperandSpecialCase(vRegOpnd, curInsn, idx); + } + curVersion->AddUseInsn(*this, curInsn, idx); + return curVersion->GetSSAvRegOpnd(); + } + } + ASSERT(false, "Get Renamed operand failed"); + return nullptr; +} + +VRegVersion *AArch64CGSSAInfo::RenamedOperandSpecialCase(RegOperand &vRegOpnd, Insn &curInsn, uint32 idx) { + LogInfo::MapleLogger() << "WARNING: " << vRegOpnd.GetRegisterNumber() << " has no def info in function : " + << cgFunc->GetName() << " !\n"; + /* occupy operand for no def vreg */ + if (!IncreaseSSAOperand(vRegOpnd.GetRegisterNumber(), nullptr)) { + ASSERT(GetAllSSAOperands().find(vRegOpnd.GetRegisterNumber()) != GetAllSSAOperands().end(), "should find"); + AddNoDefVReg(vRegOpnd.GetRegisterNumber()); + } + VRegVersion *version = CreateNewVersion(vRegOpnd, curInsn, idx); + version->SetDefInsn(nullptr, kDefByNo); + return version; +} + +RegOperand *AArch64CGSSAInfo::CreateSSAOperand(RegOperand &virtualOpnd) { + regno_t ssaRegNO = static_cast(GetAllSSAOperands().size()) + ssaRegNObase; + while (GetAllSSAOperands().count(ssaRegNO) != 0) { + ssaRegNO++; + ssaRegNObase++; + } + RegOperand *newVreg = memPool->New(ssaRegNO, + virtualOpnd.GetSize(), virtualOpnd.GetRegisterType()); + newVreg->SetOpndSSAForm(); + return newVreg; +} + +void AArch64CGSSAInfo::ReplaceInsn(Insn &oriInsn, Insn &newInsn) { + A64OpndSSAUpdateVsitor ssaUpdator(*this); + auto updateInsnSSAInfo = [&ssaUpdator](Insn &curInsn, bool isDelete) { + const InsnDesc *md = curInsn.GetDesc(); + for (uint32 i = 0; i < curInsn.GetOperandSize(); ++i) { + Operand &opnd = curInsn.GetOperand(i); + auto *opndProp = md->opndMD[i]; + if (isDelete) { + ssaUpdator.MarkDecrease(); + } else { + ssaUpdator.MarkIncrease(); + } + ssaUpdator.SetInsnOpndInfo(curInsn, *opndProp, i); + opnd.Accept(ssaUpdator); + } + }; + updateInsnSSAInfo(oriInsn, true); + newInsn.SetId(oriInsn.GetId()); + updateInsnSSAInfo(newInsn, false); + CHECK_FATAL(!ssaUpdator.HasDeleteDef(), "delete def point in replace insn, please check"); +} + +/* do not break binding between input and output operands in asm */ +void AArch64CGSSAInfo::CheckAsmDUbinding(Insn &insn, const VRegVersion *toBeReplaced, VRegVersion *newVersion) { + if (insn.GetMachineOpcode() == MOP_asm) { + for (auto &opndIt : static_cast(insn.GetOperand(kAsmOutputListOpnd)).GetOperands()) { + if (opndIt->IsSSAForm()) { + VRegVersion *defVersion = FindSSAVersion(opndIt->GetRegisterNumber()); + if (defVersion && defVersion->GetOriginalRegNO() == toBeReplaced->GetOriginalRegNO()) { + insn.AddRegBinding(defVersion->GetOriginalRegNO(), newVersion->GetSSAvRegOpnd()->GetRegisterNumber()); + } + } + } + } +} + +void AArch64CGSSAInfo::ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) { + MapleUnorderedMap &useList = toBeReplaced->GetAllUseInsns(); + for (auto it = useList.begin(); it != useList.end();) { + Insn *useInsn = it->second->GetInsn(); + CheckAsmDUbinding(*useInsn, toBeReplaced, newVersion); + for (auto &opndIt : it->second->GetOperands()) { + Operand &opnd = useInsn->GetOperand(opndIt.first); + A64ReplaceRegOpndVisitor replaceRegOpndVisitor( + *cgFunc, *useInsn, opndIt.first, *toBeReplaced->GetSSAvRegOpnd(), *newVersion->GetSSAvRegOpnd()); + opnd.Accept(replaceRegOpndVisitor); + newVersion->AddUseInsn(*this, *useInsn, opndIt.first); + it->second->ClearDU(opndIt.first); + } + it = useList.erase(it); + } +} + +void AArch64CGSSAInfo::CreateNewInsnSSAInfo(Insn &newInsn) { + uint32 opndNum = newInsn.GetOperandSize(); + MarkInsnsInSSA(newInsn); + for (uint32 i = 0; i < opndNum; i++) { + Operand &opnd = newInsn.GetOperand(i); + auto *opndProp = newInsn.GetDesc()->opndMD[i]; + if (opndProp->IsDef() && opndProp->IsUse()) { + CHECK_FATAL(false, "do not support both def and use"); + } + if (opndProp->IsDef()) { + CHECK_FATAL(opnd.IsRegister(), "defOpnd must be reg"); + auto &defRegOpnd = static_cast(opnd); + regno_t defRegNO = defRegOpnd.GetRegisterNumber(); + uint32 defVIdx = IncreaseVregCount(defRegNO); + RegOperand *defSSAOpnd = CreateSSAOperand(defRegOpnd); + newInsn.SetOperand(i, *defSSAOpnd); + auto *defVersion = memPool->New(ssaAlloc, *defSSAOpnd, defVIdx, defRegNO); + auto *defInfo = CreateDUInsnInfo(&newInsn, i); + defVersion->SetDefInsn(defInfo, kDefByInsn); + if (!IncreaseSSAOperand(defSSAOpnd->GetRegisterNumber(), defVersion)) { + CHECK_FATAL(false, "insert ssa operand failed"); + } + uint32 curSSAVregCount = cgFunc->GetSSAvRegCount(); + cgFunc->SetSSAvRegCount(++curSSAVregCount); + } else if (opndProp->IsUse()) { + A64OpndSSAUpdateVsitor ssaUpdator(*this); + ssaUpdator.MarkIncrease(); + ssaUpdator.SetInsnOpndInfo(newInsn, *opndProp, i); + opnd.Accept(ssaUpdator); + } + } +} + +void AArch64CGSSAInfo::DumpInsnInSSAForm(const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + const InsnDesc *md = insn.GetDesc(); + ASSERT(md != nullptr, "md should not be nullptr"); + + LogInfo::MapleLogger() << "< " << insn.GetId() << " > "; + LogInfo::MapleLogger() << md->name << "(" << mOp << ")"; + + for (uint32 i = 0; i < insn.GetOperandSize(); ++i) { + Operand &opnd = insn.GetOperand(i); + LogInfo::MapleLogger() << " (opnd" << i << ": "; + A64SSAOperandDumpVisitor a64OpVisitor(GetAllSSAOperands()); + opnd.Accept(a64OpVisitor); + if (!a64OpVisitor.HasDumped()) { + A64OpndDumpVisitor dumpVisitor(*md->GetOpndDes(i)); + opnd.Accept(dumpVisitor); + LogInfo::MapleLogger() << ")"; + } + } + if (insn.IsVectorOp()) { + auto &vInsn = static_cast(insn); + if (vInsn.GetNumOfRegSpec() != 0) { + LogInfo::MapleLogger() << " (vecSpec: " << vInsn.GetNumOfRegSpec() << ")"; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void A64SSAOperandRenameVisitor::Visit(RegOperand *v) { + if (v->IsVirtualRegister()) { + if (opndDes->IsRegDef() && opndDes->IsRegUse()) { /* both def use */ + insn->SetOperand(idx, *ssaInfo->GetRenamedOperand(*v, false, *insn, idx)); + RegOperand *ssaDefOpnd = ssaInfo->GetRenamedOperand(*v, true, *insn, idx); + insn->SetSSAImpDefOpnd(ssaDefOpnd); + } else { + insn->SetOperand(idx, *ssaInfo->GetRenamedOperand(*v, opndDes->IsRegDef(), *insn, idx)); + } + } +} + +void A64SSAOperandRenameVisitor::Visit(MemOperand *v) { + RegOperand *base = v->GetBaseRegister(); + RegOperand *index = v->GetIndexRegister(); + bool needCopy = (base != nullptr && base->IsVirtualRegister()) || (index != nullptr && index->IsVirtualRegister()); + if (needCopy) { + MemOperand *cpyMem = ssaInfo->CreateMemOperand(*v, true); + if (base != nullptr && base->IsVirtualRegister()) { + bool isDef = !v->IsIntactIndexed(); + cpyMem->SetBaseRegister(*ssaInfo->GetRenamedOperand(*base, isDef, *insn, idx)); + } + if (index != nullptr && index->IsVirtualRegister()) { + cpyMem->SetIndexRegister(*ssaInfo->GetRenamedOperand(*index, false, *insn, idx)); + } + insn->SetMemOpnd(ssaInfo->CreateMemOperand(*cpyMem, false)); + } +} + +void A64SSAOperandRenameVisitor::Visit(ListOperand *v) { + bool isAsm = insn->GetMachineOpcode() == MOP_asm; + /* record the orignal list order */ + std::list tempList; + auto& opndList = v->GetOperands(); + while (!opndList.empty()) { + auto* op = opndList.front(); + opndList.pop_front(); + + if (op->IsSSAForm() || !op->IsVirtualRegister()) { + tempList.push_back(op); + continue; + } + + bool isDef = + isAsm && (idx == kAsmClobberListOpnd || idx == kAsmOutputListOpnd); + RegOperand *renameOpnd = ssaInfo->GetRenamedOperand(*op, isDef, *insn, idx); + tempList.push_back(renameOpnd); + } + ASSERT(v->GetOperands().empty(), "need to clean list"); + v->GetOperands().assign(tempList.begin(), tempList.end()); +} + +void A64OpndSSAUpdateVsitor::Visit(RegOperand *v) { + if (v->IsSSAForm()) { + if (opndDes->IsRegDef() && opndDes->IsRegUse()) { + UpdateRegUse(v->GetRegisterNumber()); + ASSERT(insn->GetSSAImpDefOpnd(), "must be"); + UpdateRegDef(insn->GetSSAImpDefOpnd()->GetRegisterNumber()); + } else { + if (opndDes->IsRegDef()) { + UpdateRegDef(v->GetRegisterNumber()); + } else if (opndDes->IsRegUse()) { + UpdateRegUse(v->GetRegisterNumber()); + } else if (IsPhi()) { + UpdateRegUse(v->GetRegisterNumber()); + } else { + ASSERT(false, "invalid opnd"); + } + } + } +} + +void A64OpndSSAUpdateVsitor::Visit(maplebe::MemOperand *a64MemOpnd) { + RegOperand *base = a64MemOpnd->GetBaseRegister(); + RegOperand *index = a64MemOpnd->GetIndexRegister(); + if (base != nullptr && base->IsSSAForm()) { + if (a64MemOpnd->IsIntactIndexed()) { + UpdateRegUse(base->GetRegisterNumber()); + } else { + UpdateRegDef(base->GetRegisterNumber()); + } + } + if (index != nullptr && index->IsSSAForm()) { + UpdateRegUse(index->GetRegisterNumber()); + } +} + +void A64OpndSSAUpdateVsitor::Visit(PhiOperand *v) { + SetPhi(true); + for (auto phiListIt = v->GetOperands().begin(); phiListIt != v->GetOperands().end(); ++phiListIt) { + Visit(phiListIt->second); + } + SetPhi(false); +} + +void A64OpndSSAUpdateVsitor::Visit(ListOperand *v) { + /* do not handle asm here, so there is no list def */ + if (insn->GetMachineOpcode() == MOP_asm) { + ASSERT(false, "do not support asm yet"); + return; + } + for (auto *op : v->GetOperands()) { + if (op->IsSSAForm()) { + UpdateRegUse(op->GetRegisterNumber()); + } + } +} + +void A64OpndSSAUpdateVsitor::UpdateRegUse(uint32 ssaIdx) { + VRegVersion *curVersion = ssaInfo->FindSSAVersion(ssaIdx); + CHECK_NULL_FATAL(curVersion); + if (isDecrease) { + curVersion->RemoveUseInsn(*insn, idx); + } else { + curVersion->AddUseInsn(*ssaInfo, *insn, idx); + } +} + +void A64OpndSSAUpdateVsitor::UpdateRegDef(uint32 ssaIdx) { + VRegVersion *curVersion = ssaInfo->FindSSAVersion(ssaIdx); + CHECK_NULL_FATAL(curVersion); + if (isDecrease) { + deletedDef.emplace(ssaIdx); + curVersion->MarkDeleted(); + } else { + if (deletedDef.count(ssaIdx) != 0) { + deletedDef.erase(ssaIdx); + curVersion->MarkRecovery(); + } else { + CHECK_FATAL(false, "do no support new define in ssaUpdating"); + } + ASSERT(!insn->IsPhi(), "do no support yet"); + curVersion->SetDefInsn(ssaInfo->CreateDUInsnInfo(insn, idx), kDefByInsn); + } +} + +void A64SSAOperandDumpVisitor::Visit(RegOperand *v) { + ASSERT(!v->IsConditionCode(), "both condi and reg"); + if (v->IsSSAForm()) { + std::array prims = { "U", "R", "V", "C", "X", "Vra" }; + std::array classes = { "[U]", "[I]", "[F]", "[CC]", "[X87]", "[Vra]" }; + CHECK_FATAL(v->IsVirtualRegister() && v->IsSSAForm(), "only dump ssa opnd here"); + RegType regType = v->GetRegisterType(); + ASSERT(regType < kRegTyLast, "unexpected regType"); + auto ssaVit = allSSAOperands.find(v->GetRegisterNumber()); + CHECK_FATAL(ssaVit != allSSAOperands.end(), "find ssa version failed"); + LogInfo::MapleLogger() << "ssa_reg:" << prims[regType] << ssaVit->second->GetOriginalRegNO() << "_" + << ssaVit->second->GetVersionIdx() << " class: " << classes[regType] << " validBitNum: [" + << static_cast(v->GetValidBitsNum()) << "]"; + LogInfo::MapleLogger() << ")"; + SetHasDumped(); + } +} + +void A64SSAOperandDumpVisitor::Visit(ListOperand *v) { + for (auto regOpnd : v->GetOperands()) { + if (regOpnd->IsSSAForm()) { + Visit(regOpnd); + continue; + } + } +} + +void A64SSAOperandDumpVisitor::Visit(MemOperand *v) { + if (v->GetBaseRegister() != nullptr && v->GetBaseRegister()->IsSSAForm()) { + LogInfo::MapleLogger() << "Mem: "; + Visit(v->GetBaseRegister()); + if (v->GetAddrMode() == MemOperand::kAddrModeBOi) { + LogInfo::MapleLogger() << "offset:"; + v->GetOffsetOperand()->Dump(); + } + } + if (v->GetIndexRegister() != nullptr && v->GetIndexRegister()->IsSSAForm()) { + ASSERT(v->GetAddrMode() == MemOperand::kAddrModeBOrX, "mem mode false"); + LogInfo::MapleLogger() << "offset:"; + Visit(v->GetIndexRegister()); + } +} + +void A64SSAOperandDumpVisitor::Visit(PhiOperand *v) { + for (auto phiListIt = v->GetOperands().begin(); phiListIt != v->GetOperands().end();) { + Visit(phiListIt->second); + LogInfo::MapleLogger() << " fBB<" << phiListIt->first << ">"; + LogInfo::MapleLogger() << (++phiListIt == v->GetOperands().end() ? ")" : ", "); + } +} +} diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4300efbb0e36543f3b389d1b87bcf53c37154ac2 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_strldr.cpp @@ -0,0 +1,1079 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_strldr.h" +#include "aarch64_reaching.h" +#include "aarch64_cgfunc.h" +#include "common_utils.h" + +namespace maplebe { +using namespace maple; + +static MOperator SelectMovMop(bool isFloatOrSIMD, bool is64Bit) { + return isFloatOrSIMD ? (is64Bit ? MOP_xvmovd : MOP_xvmovs) + : (is64Bit ? MOP_xmovrr : MOP_wmovrr); +} + +void AArch64StoreLoadOpt::Run() { + DoStoreLoadOpt(); +} + +/* + * Transfer: store x100, [MEM] + * ... // May exist branches. + * load x200, [MEM] + * ==> + * OPT_VERSION_STR_LIVE: + * store x100, [MEM] + * ... // May exist branches. if x100 not dead here. + * mov x200, x100 + * OPT_VERSION_STR_DIE: + * store x100, [MEM] + * mov x9000(new reg), x100 + * ... // May exist branches. if x100 dead here. + * mov x200, x9000 + * Params: + * strInsn: indicate store insn. + * strSrcIdx: index of source register operand of store insn. (x100 in this example) + * memSeq: represent first memOpreand or second memOperand + * memUseInsnSet: insns using memOperand + */ +void AArch64StoreLoadOpt::DoLoadToMoveTransfer(Insn &strInsn, short strSrcIdx, + short memSeq, const InsnSet &memUseInsnSet) { + /* stp instruction need two registers, str only need one register */ + ASSERT(strSrcIdx < kDivide2, "CG internal error."); + /* Find x100's definition insn. */ + InsnSet regDefInsnSet = cgFunc.GetRD()->FindDefForRegOpnd(strInsn, strSrcIdx); + ASSERT(!regDefInsnSet.empty(), "RegOperand is used before defined"); + if (regDefInsnSet.size() != 1) { + return; + } + std::map insnState; + for (auto *ldrInsn : memUseInsnSet) { + insnState[ldrInsn] = true; + } + for (auto *ldrInsn : memUseInsnSet) { + if (!ldrInsn->IsLoad() || (ldrInsn->GetDefRegs().size() > 1) || ldrInsn->GetBB()->IsCleanup()) { + continue; + } + + if (HasMemBarrier(*ldrInsn, strInsn)) { + continue; + } + + /* ldr x200, [mem], mem index is 1, x200 index is 0 */ + InsnSet memDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*ldrInsn, kInsnSecondOpnd); + ASSERT(!memDefInsnSet.empty(), "load insn should have definitions."); + /* If load has multiple definition, continue. */ + if (memDefInsnSet.size() > 1) { + insnState[ldrInsn] = false; + continue; + } + + Operand &resOpnd = ldrInsn->GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = strInsn.GetOperand(static_cast(strSrcIdx)); + if (resOpnd.GetSize() != srcOpnd.GetSize()) { + return; + } + + auto &resRegOpnd = static_cast(resOpnd); + auto &srcRegOpnd = static_cast(srcOpnd); + if (resRegOpnd.GetRegisterType() != srcRegOpnd.GetRegisterType()) { + continue; + } + + /* Check if use operand of store is live at load insn. */ + if (cgFunc.GetRD()->RegIsLiveBetweenInsn(srcRegOpnd.GetRegisterNumber(), strInsn, *ldrInsn)) { + GenerateMoveLiveInsn(resRegOpnd, srcRegOpnd, *ldrInsn, strInsn, memSeq); + insnState[ldrInsn] = false; + } else if (!cgFunc.IsAfterRegAlloc()) { + GenerateMoveDeadInsn(resRegOpnd, srcRegOpnd, *ldrInsn, strInsn, memSeq); + } + + if (CG_DEBUG_FUNC(cgFunc)) { + LogInfo::MapleLogger() << "Do store-load optimization 1: str version"; + LogInfo::MapleLogger() << cgFunc.GetName() << '\n'; + LogInfo::MapleLogger() << "Store insn: "; + strInsn.Dump(); + LogInfo::MapleLogger() << "Load insn: "; + ldrInsn->Dump(); + } + } + auto it = memUseInsnSet.begin(); + ++it; + for (; it != memUseInsnSet.end(); ++it) { + Insn *curInsn = *it; + if (insnState[curInsn] == false) { + continue; + } + if (!curInsn->IsLoad() || (curInsn->GetDefRegs().size() > 1) || curInsn->GetBB()->IsCleanup()) { + continue; + } + InsnSet memDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*curInsn, kInsnSecondOpnd); + ASSERT(!memDefInsnSet.empty(), "load insn should have definitions."); + if (memDefInsnSet.size() > 1) { + continue; + } + auto prevIt = it; + do { + --prevIt; + Insn *prevInsn = *prevIt; + if (insnState[prevInsn] == false) { + continue; + } + if (prevInsn->GetBB() != curInsn->GetBB()) { + break; + } + if (!prevInsn->IsLoad() || (prevInsn->GetDefRegs().size() > 1) || prevInsn->GetBB()->IsCleanup()) { + continue; + } + InsnSet memoryDefInsnSet = cgFunc.GetRD()->FindDefForMemOpnd(*curInsn, kInsnSecondOpnd); + ASSERT(!memoryDefInsnSet.empty(), "load insn should have definitions."); + if (memoryDefInsnSet.size() > 1) { + break; + } + Operand &resOpnd = curInsn->GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = prevInsn->GetOperand(kInsnFirstOpnd); + if (resOpnd.GetSize() != srcOpnd.GetSize()) { + continue; + } + + auto &resRegOpnd = static_cast(resOpnd); + auto &srcRegOpnd = static_cast(srcOpnd); + if (resRegOpnd.GetRegisterType() != srcRegOpnd.GetRegisterType()) { + continue; + } + /* Check if use operand of store is live at load insn. */ + if (cgFunc.GetRD()->FindRegDefBetweenInsn(srcRegOpnd.GetRegisterNumber(), + prevInsn->GetNext(), curInsn->GetPrev()).empty()) { + GenerateMoveLiveInsn(resRegOpnd, srcRegOpnd, *curInsn, *prevInsn, memSeq); + insnState[curInsn] = false; + } + break; + } while (prevIt != memUseInsnSet.begin()); + } +} + +void AArch64StoreLoadOpt::GenerateMoveLiveInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, + Insn &ldrInsn, Insn &strInsn, short memSeq) { + MOperator movMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + Insn *movInsn = nullptr; + if (str2MovMap[&strInsn][memSeq] != nullptr && !cgFunc.IsAfterRegAlloc()) { + Insn *movInsnOfStr = str2MovMap[&strInsn][memSeq]; + auto &vregOpnd = static_cast(movInsnOfStr->GetOperand(kInsnFirstOpnd)); + movInsn = &cgFunc.GetInsnBuilder()->BuildInsn(movMop, resRegOpnd, vregOpnd); + } else { + movInsn = &cgFunc.GetInsnBuilder()->BuildInsn(movMop, resRegOpnd, srcRegOpnd); + } + if (resRegOpnd.GetRegisterNumber() == srcRegOpnd.GetRegisterNumber() && + cgFunc.IsAfterRegAlloc()) { + ldrInsn.GetBB()->RemoveInsn(ldrInsn); + cgFunc.GetRD()->InitGenUse(*ldrInsn.GetBB(), false); + return; + } + movInsn->SetId(ldrInsn.GetId()); + ldrInsn.GetBB()->ReplaceInsn(ldrInsn, *movInsn); + if (CG_DEBUG_FUNC(cgFunc)) { + LogInfo::MapleLogger() << "replace ldrInsn:\n"; + ldrInsn.Dump(); + LogInfo::MapleLogger() << "with movInsn:\n"; + movInsn->Dump(); + } + /* Add comment. */ + MapleString newComment = ldrInsn.GetComment(); + if (strInsn.IsStorePair()) { + newComment += "; stp-load live version."; + } else { + newComment += "; str-load live version."; + } + movInsn->SetComment(newComment); + cgFunc.GetRD()->InitGenUse(*ldrInsn.GetBB(), false); +} + +void AArch64StoreLoadOpt::GenerateMoveDeadInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, + Insn &ldrInsn, Insn &strInsn, short memSeq) { + Insn *newMovInsn = nullptr; + RegOperand *vregOpnd = nullptr; + + if (str2MovMap[&strInsn][memSeq] == nullptr) { + RegType regTy = srcRegOpnd.IsOfFloatOrSIMDClass() ? kRegTyFloat : kRegTyInt; + regno_t vRegNO = + cgFunc.NewVReg(regTy, srcRegOpnd.GetSize() <= k32BitSize ? k4ByteSize : k8ByteSize); + /* generate a new vreg, check if the size of DataInfo is big enough */ + if (vRegNO >= cgFunc.GetRD()->GetRegSize(*strInsn.GetBB())) { + cgFunc.GetRD()->EnlargeRegCapacity(vRegNO); + } + vregOpnd = &cgFunc.CreateVirtualRegisterOperand(vRegNO); + MOperator newMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + newMovInsn = &cgFunc.GetInsnBuilder()->BuildInsn(newMop, *vregOpnd, srcRegOpnd); + newMovInsn->SetId(strInsn.GetId() + memSeq + 1); + strInsn.GetBB()->InsertInsnAfter(strInsn, *newMovInsn); + str2MovMap[&strInsn][memSeq] = newMovInsn; + /* update DataInfo */ + cgFunc.GetRD()->UpdateInOut(*strInsn.GetBB(), true); + } else { + newMovInsn = str2MovMap[&strInsn][memSeq]; + vregOpnd = &static_cast(newMovInsn->GetOperand(kInsnFirstOpnd)); + } + MOperator movMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + Insn &movInsn = cgFunc.GetInsnBuilder()->BuildInsn(movMop, resRegOpnd, *vregOpnd); + movInsn.SetId(ldrInsn.GetId()); + ldrInsn.GetBB()->ReplaceInsn(ldrInsn, movInsn); + if (CG_DEBUG_FUNC(cgFunc)) { + LogInfo::MapleLogger() << "replace ldrInsn:\n"; + ldrInsn.Dump(); + LogInfo::MapleLogger() << "with movInsn:\n"; + movInsn.Dump(); + } + + /* Add comment. */ + MapleString newComment = ldrInsn.GetComment(); + if (strInsn.IsStorePair()) { + newComment += "; stp-load die version."; + } else { + newComment += "; str-load die version."; + } + movInsn.SetComment(newComment); + cgFunc.GetRD()->InitGenUse(*ldrInsn.GetBB(), false); +} + +bool AArch64StoreLoadOpt::HasMemBarrier(const Insn &ldrInsn, const Insn &strInsn) const { + if (!cgFunc.GetMirModule().IsCModule()) { + return false; + } + const Insn *currInsn = strInsn.GetNext(); + while (currInsn != &ldrInsn) { + if (currInsn == nullptr) { + return false; + } + if (currInsn->IsMachineInstruction() && currInsn->IsCall()) { + return true; + } + currInsn = currInsn->GetNext(); + } + return false; +} + +/* + * Transfer: store wzr, [MEM] + * ... // May exist branches. + * load x200, [MEM] + * ==> + * OPT_VERSION_STP_ZERO / OPT_VERSION_STR_ZERO: + * store wzr, [MEM] + * ... // May exist branches. if x100 not dead here. + * mov x200, wzr + * + * Params: + * stInsn: indicate store insn. + * strSrcIdx: index of source register operand of store insn. (wzr in this example) + * memUseInsnSet: insns using memOperand + */ +void AArch64StoreLoadOpt::DoLoadZeroToMoveTransfer(const Insn &strInsn, short strSrcIdx, + const InsnSet &memUseInsnSet) const { + /* comment for strInsn should be only added once */ + for (auto *ldrInsn : memUseInsnSet) { + /* Currently we don't support useInsn is ldp insn. */ + if (!ldrInsn->IsLoad() || ldrInsn->GetDefRegs().size() > 1) { + continue; + } + if (HasMemBarrier(*ldrInsn, strInsn)) { + continue; + } + /* ldr reg, [mem], the index of [mem] is 1 */ + InsnSet defInsnForUseInsns = cgFunc.GetRD()->FindDefForMemOpnd(*ldrInsn, 1); + /* If load has multiple definition, continue. */ + if (defInsnForUseInsns.size() > 1) { + continue; + } + + auto &resOpnd = ldrInsn->GetOperand(0); + auto &srcOpnd = strInsn.GetOperand(static_cast(strSrcIdx)); + + if (resOpnd.GetSize() != srcOpnd.GetSize()) { + return; + } + RegOperand &resRegOpnd = static_cast(resOpnd); + MOperator movMop = SelectMovMop(resRegOpnd.IsOfFloatOrSIMDClass(), resRegOpnd.GetSize() == k64BitSize); + Insn &movInsn = cgFunc.GetInsnBuilder()->BuildInsn(movMop, resOpnd, srcOpnd); + movInsn.SetId(ldrInsn->GetId()); + ldrInsn->GetBB()->ReplaceInsn(*ldrInsn, movInsn); + + /* Add comment. */ + MapleString newComment = ldrInsn->GetComment(); + newComment += ", str-load zero version"; + movInsn.SetComment(newComment); + } +} + +bool AArch64StoreLoadOpt::CheckStoreOpCode(MOperator opCode) const { + switch (opCode) { + case MOP_wstr: + case MOP_xstr: + case MOP_sstr: + case MOP_dstr: + case MOP_wstp: + case MOP_xstp: + case MOP_sstp: + case MOP_dstp: + case MOP_wstrb: + case MOP_wstrh: + return true; + default: + return false; + } +} + +void AArch64StoreLoadOpt::MemPropInit() { + propMode = kUndef; + amount = 0; + removeDefInsn = false; +} + +bool AArch64StoreLoadOpt::CheckReplaceReg(Insn &defInsn, Insn &currInsn, InsnSet &replaceRegDefSet, + regno_t replaceRegNo) { + if (replaceRegDefSet.empty()) { + return true; + } + if (defInsn.GetBB() == currInsn.GetBB()) { + /* check replace reg def between defInsn and currInsn */ + Insn *tmpInsn = defInsn.GetNext(); + while (tmpInsn != nullptr && tmpInsn != &currInsn) { + if (replaceRegDefSet.find(tmpInsn) != replaceRegDefSet.end()) { + return false; + } + tmpInsn = tmpInsn->GetNext(); + } + } else { + regno_t defRegno = static_cast(defInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber(); + if (defRegno == replaceRegNo) { + uint32 defLoopId = 0; + uint32 curLoopId = 0; + if (defInsn.GetBB()->GetLoop()) { + defLoopId = defInsn.GetBB()->GetLoop()->GetHeader()->GetId(); + } + if (currInsn.GetBB()->GetLoop()) { + curLoopId = currInsn.GetBB()->GetLoop()->GetHeader()->GetId(); + } + if (defLoopId != curLoopId) { + return false; + } + } + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + if (a64RD->HasRegDefBetweenInsnGlobal(replaceRegNo, defInsn, currInsn)) { + return false; + } + } + + if (replaceRegDefSet.size() == 1 && *replaceRegDefSet.begin() == &defInsn) { + /* lsl x1, x1, #3 <-----should be removed after replace MemOperand of ldrInsn. + * ldr x0, [x0,x1] <-----should be single useInsn for x1 + */ + InsnSet newRegUseSet = cgFunc.GetRD()->FindUseForRegOpnd(defInsn, replaceRegNo, true); + if (newRegUseSet.size() != k1BitSize) { + return false; + } + removeDefInsn = true; + } + return true; +} + +bool AArch64StoreLoadOpt::CheckDefInsn(Insn &defInsn, Insn &currInsn) { + if (defInsn.GetOperandSize() < k2ByteSize) { + return false; + } + for (uint32 i = kInsnSecondOpnd; i < defInsn.GetOperandSize(); i++) { + Operand &opnd = defInsn.GetOperand(i); + if (defInsn.IsMove() && opnd.IsRegister() && !cgFunc.IsSPOrFP(static_cast(opnd))) { + return false; + } + if (opnd.IsRegister()) { + RegOperand &a64OpndTmp = static_cast(opnd); + regno_t replaceRegNo = a64OpndTmp.GetRegisterNumber(); + InsnSet newRegDefSet = cgFunc.GetRD()->FindDefForRegOpnd(currInsn, replaceRegNo, true); + if (!CheckReplaceReg(defInsn, currInsn, newRegDefSet, replaceRegNo)) { + return false; + } + } + } + return true; +} + +bool AArch64StoreLoadOpt::CheckNewAmount(const Insn &insn, uint32 newAmount) { + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_wstrb: + case MOP_wldrsb: + case MOP_xldrsb: + case MOP_wldrb: { + return newAmount == 0; + } + case MOP_wstrh: + case MOP_wldrsh: + case MOP_xldrsh: + case MOP_wldrh: { + return (newAmount == 0) || (newAmount == k1BitSize); + } + case MOP_wstr: + case MOP_sstr: + case MOP_wldr: + case MOP_sldr: + case MOP_xldrsw: { + return (newAmount == 0) || (newAmount == k2BitSize); + } + case MOP_qstr: + case MOP_qldr: { + return (newAmount == 0) || (newAmount == k4BitSize); + } + default: { + return (newAmount == 0) || (newAmount == k3ByteSize); + } + } +} + +bool AArch64StoreLoadOpt::CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) { + AArch64CGFunc &a64CgFunc = static_cast(cgFunc); + if ((newMemOpnd->GetOffsetImmediate() != nullptr) && + !a64CgFunc.IsOperandImmValid(insn.GetMachineOpcode(), newMemOpnd, opndIdx)) { + return false; + } + auto newAmount = newMemOpnd->ShiftAmount(); + if (!CheckNewAmount(insn, newAmount)) { + return false; + } + /* is ldp or stp, addrMode must be BOI */ + if ((opndIdx == kInsnThirdOpnd) && (newMemOpnd->GetAddrMode() != MemOperand::kAddrModeBOi)) { + return false; + } + return true; +} + +MemOperand *AArch64StoreLoadOpt::SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned) { + MemOperand *newMemOpnd = nullptr; + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + /* defInsn is extend, currMemOpnd is same extend or shift */ + bool propExtend = (propMode == kPropShift) || ((propMode == kPropSignedExtend) && isSigned) || + ((propMode == kPropUnsignedExtend) && !isSigned); + if (propMode == kPropOffset) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, 0, isSigned); + } else if (propExtend) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, amount, isSigned); + } else { + return nullptr; + } + return newMemOpnd; +} + +MemOperand *AArch64StoreLoadOpt::HandleArithImmDef(RegOperand &replace, + Operand *oldOffset, int64 defVal) { + if (propMode != kPropBase) { + return nullptr; + } + OfstOperand *newOfstImm = nullptr; + if (oldOffset == nullptr) { + newOfstImm = &static_cast(cgFunc).CreateOfstOpnd(static_cast(defVal), k32BitSize); + } else { + auto *ofstOpnd = static_cast(oldOffset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffsetOpnd is null"); + newOfstImm = &static_cast(cgFunc).CreateOfstOpnd( + static_cast(defVal + ofstOpnd->GetValue()), k32BitSize); + } + CHECK_FATAL(newOfstImm != nullptr, "newOffset is null!"); + return static_cast(cgFunc).CreateMemOperand(MemOperand::kAddrModeBOi, k64BitSize, + replace, nullptr, newOfstImm, nullptr); +} + +/* + * limit to adjacent bb to avoid ra spill. + */ +bool AArch64StoreLoadOpt::IsAdjacentBB(Insn &defInsn, Insn &curInsn) const { + if (defInsn.GetBB() == curInsn.GetBB()) { + return true; + } + for (auto *bb : defInsn.GetBB()->GetSuccs()) { + if (bb == curInsn.GetBB()) { + return true; + } + if (bb->IsSoloGoto()) { + BB *tragetBB = CGCFG::GetTargetSuc(*bb); + if (tragetBB == curInsn.GetBB()) { + return true; + } + } + } + return false; +} + +/* + * currAddrMode | defMop | propMode | replaceAddrMode + * ============================================================================= + * boi | addrri | base | boi, update imm(offset) + * | addrrr | base | imm(offset) == 0(nullptr) ? borx : NA + * | subrri | base | boi, update imm(offset) + * | subrrr | base | NA + * | adrpl12 | base | imm(offset) == 0(nullptr) ? literal : NA + * | movrr | base | boi + * | movri | base | NA + * | extend/lsl | base | NA + * ============================================================================= + * borx | addrri | offset | NA + * (noextend) | addrrr | offset | NA + * | subrri | offset | NA + * | subrrr | offset | NA + * | adrpl12 | offset | NA + * | movrr | offset | borx + * | movri | offset | bori + * | extend/lsl | offset | borx(with extend) + * ============================================================================= + * borx | addrri | extend | NA + * (extend) | addrrr | extend | NA + * | subrri | extend | NA + * | subrrr | extend | NA + * | adrpl12 | extend | NA + * | movrr | extend | borx + * | movri | extend | NA + * | extend/lsl | extend | borx(with extend) + * ============================================================================= + */ +MemOperand *AArch64StoreLoadOpt::SelectReplaceMem(Insn &defInsn, Insn &curInsn, + RegOperand &base, Operand *offset) { + MemOperand *newMemOpnd = nullptr; + MOperator opCode = defInsn.GetMachineOpcode(); + RegOperand *replace = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + switch (opCode) { + case MOP_xsubrri12: + case MOP_wsubrri12: { + if (!IsAdjacentBB(defInsn, curInsn)) { + break; + } + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + int64 defVal = -(immOpnd.GetValue()); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal); + break; + } + case MOP_xaddrri12: + case MOP_waddrri12: { + auto &immOpnd = static_cast(defInsn.GetOperand(kInsnThirdOpnd)); + int64 defVal = immOpnd.GetValue(); + newMemOpnd = HandleArithImmDef(*replace, offset, defVal); + break; + } + case MOP_xaddrrr: + case MOP_waddrrr: + case MOP_dadd: + case MOP_sadd: { + if (propMode == kPropBase) { + ImmOperand *ofstOpnd = static_cast(offset); + if (!ofstOpnd->IsZero()) { + break; + } + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, *replace, newOffset, nullptr, nullptr); + } + break; + } + case MOP_xadrpl12: { + if (propMode == kPropBase) { + ImmOperand *ofstOpnd = static_cast(offset); + CHECK_FATAL(ofstOpnd != nullptr, "oldOffset is null!"); + int64 val = ofstOpnd->GetValue(); + StImmOperand *offset1 = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + CHECK_FATAL(offset1 != nullptr, "offset1 is null!"); + val += offset1->GetOffset(); + OfstOperand *newOfsetOpnd = &static_cast(cgFunc).CreateOfstOpnd( + static_cast(val), k32BitSize); + CHECK_FATAL(newOfsetOpnd != nullptr, "newOfsetOpnd is null!"); + const MIRSymbol *addr = offset1->GetSymbol(); + /* do not guarantee rodata alignment at Os */ + if (CGOptions::OptimizeForSize() && addr->IsReadOnly()) { + break; + } + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeLo12Li, k64BitSize, *replace, nullptr, newOfsetOpnd, addr); + } + break; + } + case MOP_xmovrr: + case MOP_wmovrr: { + if (propMode == kPropBase) { + OfstOperand *offsetTmp = static_cast(offset); + CHECK_FATAL(offsetTmp != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, k64BitSize, *replace, nullptr, offsetTmp, nullptr); + } else if (propMode == kPropOffset) { /* if newOffset is SP, swap base and newOffset */ + if (cgFunc.IsSPOrFP(*replace)) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, *replace, &base, nullptr, nullptr); + } else { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, replace, nullptr, nullptr); + } + } else if (propMode == kPropSignedExtend) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *replace, amount, true); + } else { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *replace, amount); + } + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + if (propMode == kPropOffset) { + ImmOperand *imm = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + OfstOperand *newOffset = &static_cast(cgFunc).CreateOfstOpnd( + static_cast(imm->GetValue()), k32BitSize); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOi, k64BitSize, base, nullptr, newOffset, nullptr); + } + break; + } + case MOP_xlslrri6: + case MOP_wlslrri5: { + ImmOperand *imm = static_cast(&defInsn.GetOperand(kInsnThirdOpnd)); + RegOperand *newOffset = static_cast(&defInsn.GetOperand(kInsnSecondOpnd)); + CHECK_FATAL(newOffset != nullptr, "newOffset is null!"); + uint32 shift = static_cast(imm->GetValue()); + if (propMode == kPropOffset) { + if ((shift < k4ByteSize) && (shift >= 0)) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, shift); + } + } else if (propMode == kPropShift) { + shift += amount; + if ((shift < k4ByteSize) && (shift >= 0)) { + newMemOpnd = static_cast(cgFunc).CreateMemOperand( + MemOperand::kAddrModeBOrX, k64BitSize, base, *newOffset, shift); + } + } + break; + } + case MOP_xsxtw64: { + newMemOpnd = SelectReplaceExt(defInsn, base, true); + break; + } + case MOP_xuxtw64: { + newMemOpnd = SelectReplaceExt(defInsn, base, false); + break; + } + default: + break; + } + return newMemOpnd; +} + +bool AArch64StoreLoadOpt::ReplaceMemOpnd(Insn &insn, regno_t regNo, RegOperand &base, Operand *offset) { + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + CHECK_FATAL((a64RD != nullptr), "check a64RD!"); + InsnSet regDefSet = a64RD->FindDefForRegOpnd(insn, regNo, true); + if (regDefSet.size() != k1BitSize) { + return false; + } + Insn *regDefInsn = *regDefSet.begin(); + if (!CheckDefInsn(*regDefInsn, insn)) { + return false; + } + MemOperand *newMemOpnd = SelectReplaceMem(*regDefInsn, insn, base, offset); + if (newMemOpnd == nullptr) { + return false; + } + + /* check new memOpnd */ + if (newMemOpnd->GetBaseRegister() != nullptr) { + InsnSet regDefSetForNewBase = + a64RD->FindDefForRegOpnd(insn, newMemOpnd->GetBaseRegister()->GetRegisterNumber(), true); + if (regDefSetForNewBase.size() != k1BitSize) { + return false; + } + } + if (newMemOpnd->GetIndexRegister() != nullptr) { + InsnSet regDefSetForNewIndex = + a64RD->FindDefForRegOpnd(insn, newMemOpnd->GetIndexRegister()->GetRegisterNumber(), true); + if (regDefSetForNewIndex.size() != k1BitSize) { + return false; + } + } + + uint32 opndIdx; + if (insn.IsLoadPair() || insn.IsStorePair()) { + if (newMemOpnd->GetOffsetImmediate() == nullptr) { + return false; + } + opndIdx = kInsnThirdOpnd; + } else { + opndIdx = kInsnSecondOpnd; + } + if (!CheckNewMemOffset(insn, newMemOpnd, opndIdx)) { + return false; + } + if (CG_DEBUG_FUNC(cgFunc)) { + std::cout << "replace insn:" << std::endl; + insn.Dump(); + } + insn.SetOperand(opndIdx, *newMemOpnd); + if (CG_DEBUG_FUNC(cgFunc)) { + std::cout << "new insn:" << std::endl; + insn.Dump(); + } + if (removeDefInsn) { + if (CG_DEBUG_FUNC(cgFunc)) { + std::cout << "remove insn:" << std::endl; + regDefInsn->Dump(); + } + regDefInsn->GetBB()->RemoveInsn(*regDefInsn); + } + cgFunc.GetRD()->InitGenUse(*regDefInsn->GetBB(), false); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), false); + cgFunc.GetRD()->UpdateInOut(*insn.GetBB(), true); + return true; +} + +bool AArch64StoreLoadOpt::CanDoMemProp(const Insn *insn) { + if (!cgFunc.GetMirModule().IsCModule()) { + return false; + } + if (!insn->IsMachineInstruction()) { + return false; + } + if (insn->GetMachineOpcode() == MOP_qstr) { + return false; + } + + if (insn->IsLoad() || insn->IsStore()) { + if (insn->IsAtomic()) { + return false; + } + // It is not desired to propagate on 128bit reg with immediate offset + // which may cause linker to issue misalignment error + if (insn->IsAtomic() || insn->GetOperand(0).GetSize() == k128BitSize) { + return false; + } + MemOperand *currMemOpnd = static_cast(insn->GetMemOpnd()); + return currMemOpnd != nullptr; + } + return false; +} + +void AArch64StoreLoadOpt::SelectPropMode(const MemOperand &currMemOpnd) { + MemOperand::AArch64AddressingMode currAddrMode = currMemOpnd.GetAddrMode(); + switch (currAddrMode) { + case MemOperand::kAddrModeBOi: { + if (!currMemOpnd.IsPreIndexed() && !currMemOpnd.IsPostIndexed()) { + propMode = kPropBase; + } + break; + } + case MemOperand::kAddrModeBOrX: { + propMode = kPropOffset; + amount = currMemOpnd.ShiftAmount(); + if (currMemOpnd.GetExtendAsString() == "LSL") { + if (amount != 0) { + propMode = kPropShift; + } + break; + } else if (currMemOpnd.SignedExtend()) { + propMode = kPropSignedExtend; + } else if (currMemOpnd.UnsignedExtend()) { + propMode = kPropUnsignedExtend; + } + break; + } + default: + propMode = kUndef; + } +} + +/* + * Optimize: store x100, [MEM] + * ... // May exist branches. + * load x200, [MEM] + * ==> + * OPT_VERSION_STP_LIVE / OPT_VERSION_STR_LIVE: + * store x100, [MEM] + * ... // May exist branches. if x100 not dead here. + * mov x200, x100 + * OPT_VERSION_STP_DIE / OPT_VERSION_STR_DIE: + * store x100, [MEM] + * mov x9000(new reg), x100 + * ... // May exist branches. if x100 dead here. + * mov x200, x9000 + * + * Note: x100 may be wzr/xzr registers. + */ +void AArch64StoreLoadOpt::DoStoreLoadOpt() { + AArch64CGFunc &a64CgFunc = static_cast(cgFunc); + if (a64CgFunc.IsIntrnCallForC()) { + return; + } + FOR_ALL_BB(bb, &a64CgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, next) { + MOperator mOp = insn->GetMachineOpcode(); + if (CanDoMemProp(insn)) { + MemProp(*insn); + } + if (a64CgFunc.GetMirModule().IsCModule() && cgFunc.GetRD()->OnlyAnalysisReg()) { + continue; + } + if (!insn->IsMachineInstruction() || !insn->IsStore() || !CheckStoreOpCode(mOp) || + (a64CgFunc.GetMirModule().IsCModule() && !a64CgFunc.IsAfterRegAlloc()) || + (!a64CgFunc.GetMirModule().IsCModule() && a64CgFunc.IsAfterRegAlloc())) { + continue; + } + if (insn->IsStorePair()) { + ProcessStrPair(*insn); + continue; + } + ProcessStr(*insn); + } + } +} + +/* + * PropBase: + * add/sub x1, x2, #immVal1 + * ...(no def of x2) + * ldr/str x0, [x1, #immVal2] + * ======> + * add/sub x1, x2, #immVal1 + * ... + * ldr/str x0, [x2, #(immVal1 + immVal2)/#(-immVal1 + immVal2)] + * + * PropOffset: + * sxtw x2, w2 + * lsl x1, x2, #1~3 + * ...(no def of x2) + * ldr/str x0, [x0, x1] + * ======> + * sxtw x2, w2 + * lsl x1, x2, #1~3 + * ... + * ldr/str x0, [x0, w2, sxtw 1~3] + */ +void AArch64StoreLoadOpt::MemProp(Insn &insn) { + MemPropInit(); + MemOperand *currMemOpnd = static_cast(insn.GetMemOpnd()); + SelectPropMode(*currMemOpnd); + RegOperand *base = currMemOpnd->GetBaseRegister(); + Operand *offset = currMemOpnd->GetOffset(); + bool memReplaced = false; + + if (propMode == kUndef) { + return; + } else if (propMode == kPropBase) { + ImmOperand *immOffset = static_cast(offset); + CHECK_FATAL(immOffset != nullptr, "immOffset is nullptr!"); + regno_t baseRegNo = base->GetRegisterNumber(); + memReplaced = ReplaceMemOpnd(insn, baseRegNo, *base, immOffset); + } else { + RegOperand *regOffset = static_cast(offset); + if (regOffset == nullptr) { + return; + } + regno_t offsetRegNo = regOffset->GetRegisterNumber(); + memReplaced = ReplaceMemOpnd(insn, offsetRegNo, *base, regOffset); + } + + /* if prop success, find more prop chance */ + if (memReplaced) { + MemProp(insn); + } +} + +/* + * Assume stack(FP) will not be varied out of pro/epi log + * PreIndex: + * add/sub x1, x1 #immVal1 + * ...(no def/use of x1) + * ldr/str x0, [x1] + * ======> + * ldr/str x0, [x1, #immVal1]! + * + * PostIndex: + * ldr/str x0, [x1] + * ...(no def/use of x1) + * add/sub x1, x1, #immVal1 + * ======> + * ldr/str x0, [x1], #immVal1 + */ +void AArch64StoreLoadOpt::StrLdrIndexModeOpt(Insn &currInsn) { + auto *curMemopnd = static_cast(currInsn.GetMemOpnd()); + ASSERT(curMemopnd != nullptr, " get memopnd failed"); + /* one instruction cannot define one register twice */ + if (!CanDoIndexOpt(*curMemopnd) || currInsn.IsRegDefined(curMemopnd->GetBaseRegister()->GetRegisterNumber())) { + return; + } + MemOperand *newMemopnd = SelectIndexOptMode(currInsn, *curMemopnd); + if (newMemopnd != nullptr) { + currInsn.SetMemOpnd(newMemopnd); + } +} + +bool AArch64StoreLoadOpt::CanDoIndexOpt(const MemOperand &currMemOpnd) { + if (currMemOpnd.GetAddrMode() != MemOperand::kAddrModeBOi || !currMemOpnd.IsIntactIndexed()) { + return false; + } + ASSERT(currMemOpnd.GetOffsetImmediate() != nullptr, " kAddrModeBOi memopnd have no offset imm"); + if (!currMemOpnd.GetOffsetImmediate()->IsImmOffset()) { + return false; + } + if (cgFunc.IsSPOrFP(*currMemOpnd.GetBaseRegister())) { + return false; + } + OfstOperand *a64Ofst = currMemOpnd.GetOffsetImmediate(); + if (a64Ofst == nullptr) { + return false; + } + return a64Ofst->GetValue() == 0; +} + +int64 AArch64StoreLoadOpt::GetOffsetForNewIndex(Insn &defInsn, Insn &insn, + regno_t baseRegNO, uint32 memOpndSize) const { + bool subMode = defInsn.GetMachineOpcode() == MOP_wsubrri12 || defInsn.GetMachineOpcode() == MOP_xsubrri12; + bool addMode = defInsn.GetMachineOpcode() == MOP_waddrri12 || defInsn.GetMachineOpcode() == MOP_xaddrri12; + if (addMode || subMode) { + ASSERT(static_cast(defInsn.GetOperand(kInsnFirstOpnd)).GetRegisterNumber() == baseRegNO, + "check def opnd"); + auto &srcOpnd = static_cast(defInsn.GetOperand(kInsnSecondOpnd)); + if (srcOpnd.GetRegisterNumber() == baseRegNO && defInsn.GetBB() == insn.GetBB()) { + int64 offsetVal = static_cast(defInsn.GetOperand(kInsnThirdOpnd)).GetValue(); + if (!MemOperand::IsSIMMOffsetOutOfRange(offsetVal, memOpndSize == k64BitSize, insn.IsLoadStorePair())) { + return subMode ? -offsetVal : offsetVal; + } + } + } + return kMaxPimm8; /* simm max value cannot excced pimm max value */ +}; + + +MemOperand *AArch64StoreLoadOpt::SelectIndexOptMode(Insn &insn, const MemOperand &curMemOpnd) { + AArch64ReachingDefinition *a64RD = static_cast(cgFunc.GetRD()); + ASSERT((a64RD != nullptr), "check a64RD!"); + regno_t baseRegisterNO = curMemOpnd.GetBaseRegister()->GetRegisterNumber(); + auto &a64cgFunc = static_cast(cgFunc); + /* pre index */ + InsnSet regDefSet = a64RD->FindDefForRegOpnd(insn, baseRegisterNO, true); + if (regDefSet.size() == k1BitSize) { + Insn *defInsn = *regDefSet.begin(); + int64 defOffset = GetOffsetForNewIndex(*defInsn, insn, baseRegisterNO, curMemOpnd.GetSize()); + if (defOffset < kMaxPimm8) { + InsnSet tempCheck; + (void)a64RD->FindRegUseBetweenInsn(baseRegisterNO, defInsn->GetNext(), insn.GetPrev(), tempCheck); + if (tempCheck.empty() && (defInsn->GetBB() == insn.GetBB())) { + auto &newMem = + a64cgFunc.CreateMemOpnd(*curMemOpnd.GetBaseRegister(), defOffset, curMemOpnd.GetSize()); + ASSERT(newMem.GetOffsetImmediate() != nullptr, "need offset for memopnd in this case"); + newMem.SetIndexOpt(MemOperand::kPreIndex); + insn.GetBB()->RemoveInsn(*defInsn); + return &newMem; + } + } + } + /* post index */ + std::vector refDefVec = a64RD->FindRegDefBetweenInsn(baseRegisterNO, &insn, insn.GetBB()->GetLastInsn(), true); + if (!refDefVec.empty()) { + Insn *defInsn = refDefVec.back(); + int64 defOffset = GetOffsetForNewIndex(*defInsn, insn, baseRegisterNO, curMemOpnd.GetSize()); + if (defOffset < kMaxPimm8) { + InsnSet tempCheck; + (void)a64RD->FindRegUseBetweenInsn(baseRegisterNO, insn.GetNext(), defInsn->GetPrev(), tempCheck); + if (tempCheck.empty() && (defInsn->GetBB() == insn.GetBB())) { + auto &newMem = a64cgFunc.CreateMemOpnd( + *curMemOpnd.GetBaseRegister(), defOffset, curMemOpnd.GetSize()); + ASSERT(newMem.GetOffsetImmediate() != nullptr, "need offset for memopnd in this case"); + newMem.SetIndexOpt(MemOperand::kPostIndex); + insn.GetBB()->RemoveInsn(*defInsn); + return &newMem; + } + } + } + return nullptr; +} + +void AArch64StoreLoadOpt::ProcessStrPair(Insn &insn) { + const short memIndex = 2; + short regIndex = 0; + Operand &opnd = insn.GetOperand(memIndex); + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if ((base == nullptr) || !(cgFunc.GetRD()->IsFrameReg(*base))) { + return; + } + if (cgFunc.IsAfterRegAlloc() && !insn.IsSpillInsn()) { + return; + } + ASSERT(memOpnd.GetIndexRegister() == nullptr, "frame MemOperand must not be exist register index"); + InsnSet memUseInsnSet; + for (int i = 0; i != kMaxMovNum; ++i) { + memUseInsnSet.clear(); + if (i == 0) { + regIndex = 0; + memUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex); + } else { + regIndex = 1; + memUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex, true); + } + if (memUseInsnSet.empty()) { + return; + } + auto ®Opnd = static_cast(insn.GetOperand(static_cast(regIndex))); + if (regOpnd.GetRegisterNumber() == RZR) { + DoLoadZeroToMoveTransfer(insn, regIndex, memUseInsnSet); + } else { + DoLoadToMoveTransfer(insn, regIndex, i, memUseInsnSet); + } + } +} + +void AArch64StoreLoadOpt::ProcessStr(Insn &insn) { + /* str x100, [mem], mem index is 1, x100 index is 0; */ + const short memIndex = 1; + const short regIndex = 0; + Operand &opnd = insn.GetOperand(memIndex); + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if ((base == nullptr) || !(cgFunc.GetRD()->IsFrameReg(*base))) { + return; + } + + if (cgFunc.IsAfterRegAlloc() && !insn.IsSpillInsn()) { + return; + } + ASSERT(memOpnd.GetIndexRegister() == nullptr, "frame MemOperand must not be exist register index"); + + InsnSet memUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex); + if (memUseInsnSet.empty()) { + return; + } + + auto *regOpnd = static_cast(&insn.GetOperand(regIndex)); + CHECK_NULL_FATAL(regOpnd); + if (regOpnd->GetRegisterNumber() == RZR) { + DoLoadZeroToMoveTransfer(insn, regIndex, memUseInsnSet); + } else { + DoLoadToMoveTransfer(insn, regIndex, 0, memUseInsnSet); + } + if (cgFunc.IsAfterRegAlloc() && insn.IsSpillInsn()) { + InsnSet newmemUseInsnSet = cgFunc.GetRD()->FindUseForMemOpnd(insn, memIndex); + if (newmemUseInsnSet.empty()) { + insn.GetBB()->RemoveInsn(insn); + } + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_tailcall.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_tailcall.cpp new file mode 100644 index 0000000000000000000000000000000000000000..df44817e5197f6325e654be6a4b4df4f4701eaa9 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_tailcall.cpp @@ -0,0 +1,116 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ +#include "aarch64_tailcall.h" +#include "aarch64_abi.h" +#include "aarch64_cg.h" + +namespace maplebe { +using namespace std; +const std::set kFrameWhiteListFunc { +#include "framewhitelist.def" +}; + +bool AArch64TailCallOpt::IsFuncNeedFrame(Insn &callInsn) const { + auto &target = static_cast(callInsn.GetOperand(0)); + return kFrameWhiteListFunc.find(target.GetName()) != kFrameWhiteListFunc.end(); +} + +bool AArch64TailCallOpt::InsnIsCallCand(Insn &insn) const { + return (insn.GetMachineOpcode() == MOP_xbr || + insn.GetMachineOpcode() == MOP_xblr || + insn.GetMachineOpcode() == MOP_xbl || + insn.GetMachineOpcode() == MOP_xuncond); +} + +bool AArch64TailCallOpt::InsnIsLoadPair(Insn &insn) const { + return (insn.GetMachineOpcode() == MOP_xldr || + insn.GetMachineOpcode() == MOP_xldp || + insn.GetMachineOpcode() == MOP_dldr || + insn.GetMachineOpcode() == MOP_dldp); +} + +bool AArch64TailCallOpt::InsnIsMove(Insn &insn) const { + return (insn.GetMachineOpcode() == MOP_wmovrr || + insn.GetMachineOpcode() == MOP_xmovrr); +} + +bool AArch64TailCallOpt::InsnIsIndirectCall(Insn &insn) const { + return (insn.GetMachineOpcode() == MOP_xblr); +} + +bool AArch64TailCallOpt::InsnIsCall(Insn &insn) const { + return (insn.GetMachineOpcode() == MOP_xbl); +} + +bool AArch64TailCallOpt::InsnIsUncondJump(Insn &insn) const { + return (insn.GetMachineOpcode() == MOP_xuncond); +} + +bool AArch64TailCallOpt::InsnIsAddWithRsp(Insn &insn) const { + if (insn.GetMachineOpcode() == MOP_xaddrri12 || insn.GetMachineOpcode() == MOP_xaddrri24) { + RegOperand ® = static_cast(insn.GetOperand(0)); + if (reg.GetRegisterNumber() == RSP) { + return true; + } + } + return false; +} + +bool AArch64TailCallOpt::OpndIsStackRelatedReg(RegOperand &opnd) const { + return (opnd.GetRegisterNumber() == R29 || opnd.GetRegisterNumber() == R31 || opnd.GetRegisterNumber() == RSP); +} + +bool AArch64TailCallOpt::OpndIsR0Reg(RegOperand &opnd) const { + return (opnd.GetRegisterNumber() == R0); +} + +bool AArch64TailCallOpt::OpndIsCalleeSaveReg(RegOperand &opnd) const { + return AArch64Abi::IsCalleeSavedReg(static_cast(opnd.GetRegisterNumber())); +} + +bool AArch64TailCallOpt::IsAddOrSubOp(MOperator mOp) const { + switch (mOp) { + case MOP_xaddrrr: + case MOP_xaddrrrs: + case MOP_xxwaddrrre: + case MOP_xaddrri24: + case MOP_xaddrri12: + case MOP_xsubrrr: + case MOP_xsubrrrs: + case MOP_xxwsubrrre: + case MOP_xsubrri12: + return true; + default: + return false; + } +} + +void AArch64TailCallOpt::ReplaceInsnMopWithTailCall(Insn &insn) { + MOperator insnMop = insn.GetMachineOpcode(); + switch (insnMop) { + case MOP_xbl: { + insn.SetMOP(AArch64CG::kMd[MOP_tail_call_opt_xbl]); + break; + } + case MOP_xblr: { + insn.SetMOP(AArch64CG::kMd[MOP_tail_call_opt_xblr]); + break; + } + default: + CHECK_FATAL(false, "Internal error."); + break; + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4de26aa51e2bcbe651ccb8f03b76a516564c7511 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_utils.cpp @@ -0,0 +1,67 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_utils.h" +#include "cg_option.h" + +namespace maplebe { + +MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, + const Insn &loadIns, + MOperator newLoadMop) { + MemPool &memPool = *cgFunc.GetMemoryPool(); + auto *memOp = static_cast(loadIns.GetMemOpnd()); + MOperator loadMop = loadIns.GetMachineOpcode(); + + ASSERT(loadIns.IsLoad() && AArch64CG::kMd[newLoadMop].IsLoad(), + "ins and Mop must be load"); + + MemOperand *newMemOp = memOp; + + uint32 memSize = AArch64CG::kMd[loadMop].GetOperandSize(); + uint32 newMemSize = AArch64CG::kMd[newLoadMop].GetOperandSize(); + if (newMemSize == memSize) { + // if sizes are the same just return old memory operand + return newMemOp; + } + + newMemOp = memOp->Clone(memPool); + newMemOp->SetSize(newMemSize); + + if (!CGOptions::IsBigEndian()) { + return newMemOp; + } + + // for big-endian it's necessary to adjust offset if it's present + if (memOp->GetAddrMode() != MemOperand::kAddrModeBOi || + newMemSize > memSize) { + // currently, it's possible to adjust an offset only for immediate offset + // operand if new size is less than the original one + return nullptr; + } + + auto *newOffOp = static_cast( + memOp->GetOffsetImmediate()->Clone(memPool)); + + newOffOp->AdjustOffset(static_cast((memSize - newMemSize) >> kLog2BitsPerByte)); + newMemOp->SetOffsetOperand(*newOffOp); + + ASSERT(memOp->IsOffsetMisaligned(memSize) || + !newMemOp->IsOffsetMisaligned(newMemSize), + "New offset value is misaligned!"); + + return newMemOp; +} + +} // namespace maplebe diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..39e0e0b8e582a49f93c85f048e3e94f3c737a668 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_validbit_opt.cpp @@ -0,0 +1,621 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_validbit_opt.h" +#include "aarch64_cg.h" + +namespace maplebe { +void AArch64ValidBitOpt::DoOpt(BB &bb, Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + switch (curMop) { + case MOP_wandrri12: + case MOP_xandrri13: { + Optimize(bb, insn); + break; + } + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xsxtw64: + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: + case MOP_wsbfxrri5i5: + case MOP_xsbfxrri6i6: { + Optimize(bb, insn); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + Optimize(bb, insn); + break; + } + case MOP_bge: + case MOP_blt: { + Optimize(bb, insn); + break; + } + default: + break; + } +} + +void AArch64ValidBitOpt::SetValidBits(Insn &insn) { + MOperator mop = insn.GetMachineOpcode(); + switch (mop) { + case MOP_wuxtb_vb: + case MOP_wuxth_vb: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 newVB = (mop == MOP_wuxtb_vb ? k8BitSize : k16BitSize); + dstOpnd.SetValidBitsNum(newVB); + MOperator recoverMop = (srcOpnd.GetSize() == k32BitSize ? MOP_wmovrr : MOP_xmovrr); + insn.SetMOP(AArch64CG::kMd[recoverMop]); + break; + } + case MOP_wcsetrc: + case MOP_xcsetrc: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + dstOpnd.SetValidBitsNum(k1BitSize); + break; + } + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + ASSERT(srcOpnd.IsIntImmediate(), "must be ImmOperand"); + auto &immOpnd = static_cast(srcOpnd); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + dstOpnd.SetValidBitsNum(GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize())); + break; + } + case MOP_xmovrr: + case MOP_wmovrr: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (srcOpnd.IsPhysicalRegister() || dstOpnd.IsPhysicalRegister()) { + return; + } + if (srcOpnd.GetRegisterNumber() == RZR) { + srcOpnd.SetValidBitsNum(k1BitSize); + } + if (!(dstOpnd.GetSize() == k64BitSize && srcOpnd.GetSize() == k32BitSize) && + !(dstOpnd.GetSize() == k32BitSize && srcOpnd.GetSize() == k64BitSize)) { + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum()); + } + break; + } + case MOP_wlsrrri5: + case MOP_xlsrrri6: { + Operand &opnd = insn.GetOperand(kInsnThirdOpnd); + ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); + auto shiftBits = static_cast(static_cast(opnd).GetValue()); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if ((static_cast(srcOpnd.GetValidBitsNum()) - shiftBits) <= 0) { + dstOpnd.SetValidBitsNum(k1BitSize); + } else { + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum() - shiftBits); + } + break; + } + case MOP_wlslrri5: + case MOP_xlslrri6: { + Operand &opnd = insn.GetOperand(kInsnThirdOpnd); + ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); + auto shiftBits = static_cast(static_cast(opnd).GetValue()); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 newVB = ((srcOpnd.GetValidBitsNum() + shiftBits) > srcOpnd.GetSize()) ? + srcOpnd.GetSize() : (srcOpnd.GetValidBitsNum() + shiftBits); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wasrrri5: + case MOP_xasrrri6: { + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if ((mop == MOP_wasrrri5 && srcOpnd.GetValidBitsNum() < k32BitSize) || + (mop == MOP_xasrrri6 && srcOpnd.GetValidBitsNum() < k64BitSize)) { + Operand &opnd = insn.GetOperand(kInsnThirdOpnd); + ASSERT(opnd.IsIntImmediate(), "must be ImmOperand"); + auto shiftBits = static_cast(static_cast(opnd).GetValue()); + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + if ((static_cast(srcOpnd.GetValidBitsNum()) - shiftBits) <= 0) { + dstOpnd.SetValidBitsNum(k1BitSize); + } else { + dstOpnd.SetValidBitsNum(srcOpnd.GetValidBitsNum() - shiftBits); + } + } + break; + } + case MOP_xuxtb32: + case MOP_xuxth32: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + uint32 srcVB = srcOpnd.GetValidBitsNum(); + uint32 newVB = dstOpnd.GetValidBitsNum(); + newVB = (mop == MOP_xuxtb32) ? ((srcVB < k8BitSize) ? srcVB : k8BitSize) : newVB; + newVB = (mop == MOP_xuxth32) ? ((srcVB < k16BitSize) ? srcVB : k16BitSize) : newVB; + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &widthOpnd = static_cast(insn.GetOperand(kInsnFourthOpnd)); + dstOpnd.SetValidBitsNum(static_cast(widthOpnd.GetValue())); + break; + } + case MOP_wldrb: + case MOP_wldrh: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 newVB = (mop == MOP_wldrb) ? k8BitSize : k16BitSize; + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wandrrr: + case MOP_xandrrr: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValidBitsNum(); + uint32 newVB = (src1VB <= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wandrri12: + case MOP_xandrri13: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize()); + uint32 newVB = (src1VB <= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wiorrrr: + case MOP_xiorrrr: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = static_cast(insn.GetOperand(kInsnThirdOpnd)).GetValidBitsNum(); + uint32 newVB = (src1VB >= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wiorrri12: + case MOP_xiorrri13: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &immOpnd = static_cast(insn.GetOperand(kInsnThirdOpnd)); + uint32 src1VB = static_cast(insn.GetOperand(kInsnSecondOpnd)).GetValidBitsNum(); + uint32 src2VB = GetImmValidBit(immOpnd.GetValue(), dstOpnd.GetSize()); + uint32 newVB = (src1VB >= src2VB ? src1VB : src2VB); + dstOpnd.SetValidBitsNum(newVB); + break; + } + case MOP_wrevrr16: { + auto &dstOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto &srcOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (srcOpnd.GetValidBitsNum() <= k16BitSize) { + dstOpnd.SetValidBitsNum(k16BitSize); + } + break; + } + default: + break; + } +} + +bool AArch64ValidBitOpt::SetPhiValidBits(Insn &insn) { + Operand &defOpnd = insn.GetOperand(kInsnFirstOpnd); + ASSERT(defOpnd.IsRegister(), "expect register"); + auto &defRegOpnd = static_cast(defOpnd); + Operand &phiOpnd = insn.GetOperand(kInsnSecondOpnd); + ASSERT(phiOpnd.IsPhi(), "expect phiList"); + auto &phiList = static_cast(phiOpnd); + int32 maxVB = -1; + for (auto phiOpndIt : phiList.GetOperands()) { + if (phiOpndIt.second != nullptr) { + maxVB = (maxVB < static_cast(phiOpndIt.second->GetValidBitsNum())) ? + static_cast(phiOpndIt.second->GetValidBitsNum()) : maxVB; + } + } + if (maxVB >= static_cast(k0BitSize) && static_cast(maxVB) != defRegOpnd.GetValidBitsNum()) { + defRegOpnd.SetValidBitsNum(static_cast(maxVB)); + return true; + } + return false; +} + +static bool IsZeroRegister(const Operand &opnd) { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand *regOpnd = static_cast(&opnd); + return regOpnd->GetRegisterNumber() == RZR; +} + +bool AndValidBitPattern::CheckImmValidBit(int64 andImm, uint32 andImmVB, int64 shiftImm) const { + if ((__builtin_ffs(static_cast(andImm)) - 1 == shiftImm) && + ((andImm >> shiftImm) == ((1 << (andImmVB - shiftImm)) - 1))) { + return true; + } + return false; +} + +bool AndValidBitPattern::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == MOP_wandrri12) { + newMop = MOP_wmovrr; + } else if (mOp == MOP_xandrri13) { + newMop = MOP_xmovrr; + } + if (newMop == MOP_undef) { + return false; + } + CHECK_FATAL(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "must be register!"); + CHECK_FATAL(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "must be register!"); + CHECK_FATAL(insn.GetOperand(kInsnThirdOpnd).IsImmediate(), "must be imm!"); + desReg = static_cast(&insn.GetOperand(kInsnFirstOpnd)); + srcReg = static_cast(&insn.GetOperand(kInsnSecondOpnd)); + auto &andImm = static_cast(insn.GetOperand(kInsnThirdOpnd)); + int64 immVal = andImm.GetValue(); + uint32 validBit = srcReg->GetValidBitsNum(); + if (validBit == k8BitSize && immVal == 0xFF) { + return true; + } else if (validBit == k16BitSize && immVal == 0xFFFF) { + return true; + } + /* and R287[32], R286[64], #255 */ + if ((desReg->GetSize() < srcReg->GetSize()) && (srcReg->GetValidBitsNum() > desReg->GetSize())) { + return false; + } + InsnSet useInsns = GetAllUseInsn(*desReg); + if (useInsns.size() == 1) { + Insn *useInsn = *useInsns.begin(); + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop != MOP_wasrrri5 && useMop != MOP_xasrrri6 && useMop != MOP_wlsrrri5 && useMop != MOP_xlsrrri6) { + return false; + } + Operand &shiftOpnd = useInsn->GetOperand(kInsnThirdOpnd); + CHECK_FATAL(shiftOpnd.IsImmediate(), "must be immediate"); + int64 shiftImm = static_cast(shiftOpnd).GetValue(); + uint32 andImmVB = ValidBitOpt::GetImmValidBit(andImm.GetValue(), desReg->GetSize()); + if ((srcReg->GetValidBitsNum() == andImmVB) && CheckImmValidBit(andImm.GetValue(), andImmVB, shiftImm)) { + return true; + } + } + return false; +} + +void AndValidBitPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *desReg, *srcReg); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + ssaInfo->InsertSafePropInsn(newInsn.GetId()); + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} + +bool ExtValidBitPattern::CheckCondition(Insn &insn) { + Operand &dstOpnd = insn.GetOperand(kInsnFirstOpnd); + Operand &srcOpnd = insn.GetOperand(kInsnSecondOpnd); + CHECK_FATAL(dstOpnd.IsRegister() && srcOpnd.IsRegister(), "must be register"); + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: + case MOP_xuxth32: { + if (static_cast(dstOpnd).GetValidBitsNum() != + static_cast(srcOpnd).GetValidBitsNum()) { + return false; + } + newMop = MOP_wmovrr; + break; + } + case MOP_xsxtw64: { + if (static_cast(srcOpnd).GetValidBitsNum() >= k32BitSize) { + return false; + } + newMop = MOP_xmovrr; + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: + case MOP_wsbfxrri5i5: + case MOP_xsbfxrri6i6: { + Operand &immOpnd1 = insn.GetOperand(kInsnThirdOpnd); + Operand &immOpnd2 = insn.GetOperand(kInsnFourthOpnd); + CHECK_FATAL(immOpnd1.IsImmediate(), "must be immediate"); + CHECK_FATAL(immOpnd2.IsImmediate(), "must be immediate"); + int64 lsb = static_cast(immOpnd1).GetValue(); + int64 width = static_cast(immOpnd2).GetValue(); + if (lsb != 0 || static_cast(srcOpnd).GetValidBitsNum() > width) { + return false; + } + if ((mOp == MOP_wsbfxrri5i5 || mOp == MOP_xsbfxrri6i6) && + static_cast(srcOpnd).GetValidBitsNum() == width) { + return false; + } + if (mOp == MOP_wubfxrri5i5 || mOp == MOP_wsbfxrri5i5) { + newMop = MOP_wmovrr; + } else if (mOp == MOP_xubfxrri6i6 || mOp == MOP_xsbfxrri6i6) { + newMop = MOP_xmovrr; + } + break; + } + default: + return false; + } + newDstOpnd = &static_cast(dstOpnd); + newSrcOpnd = &static_cast(srcOpnd); + return true; +} + +void ExtValidBitPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + MOperator mOp = insn.GetMachineOpcode(); + switch (mOp) { + case MOP_xuxtb32: + case MOP_xuxth32: + case MOP_xsxtw64: { + insn.SetMOP(AArch64CG::kMd[newMop]); + if (newDstOpnd->GetSize() > newSrcOpnd->GetSize()) { + ssaInfo->InsertSafePropInsn(insn.GetId()); + } + break; + } + case MOP_wubfxrri5i5: + case MOP_xubfxrri6i6: + case MOP_wsbfxrri5i5: + case MOP_xsbfxrri6i6: { + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, *newDstOpnd, *newSrcOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + if (newDstOpnd->GetSize() > newSrcOpnd->GetSize() || newDstOpnd->GetSize() != newDstOpnd->GetValidBitsNum()) { + ssaInfo->InsertSafePropInsn(newInsn.GetId()); + } + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(&insn); + DumpAfterPattern(prevs, &insn, &newInsn); + } + } + default: + return; + } +} + +bool CmpCsetVBPattern::IsContinuousCmpCset(const Insn &curInsn) const { + auto &csetDstReg = static_cast(curInsn.GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(csetDstReg.IsSSAForm(), "dstOpnd must be ssa form"); + VRegVersion *dstVersion = ssaInfo->FindSSAVersion(csetDstReg.GetRegisterNumber()); + ASSERT(dstVersion != nullptr, "find vRegVersion failed"); + for (auto useDUInfoIt : dstVersion->GetAllUseInsns()) { + if (useDUInfoIt.second == nullptr) { + continue; + } + Insn *useInsn = useDUInfoIt.second->GetInsn(); + if (useInsn == nullptr) { + continue; + } + MOperator useMop = useInsn->GetMachineOpcode(); + if (useMop == MOP_wcmpri || useMop == MOP_xcmpri) { + auto &ccDstReg = static_cast(useInsn->GetOperand(kInsnFirstOpnd)); + CHECK_FATAL(ccDstReg.IsSSAForm(), "dstOpnd must be ssa form"); + VRegVersion *ccDstVersion = ssaInfo->FindSSAVersion(ccDstReg.GetRegisterNumber()); + ASSERT(ccDstVersion != nullptr, "find vRegVersion failed"); + for (auto ccUseDUInfoIt : ccDstVersion->GetAllUseInsns()) { + if (ccUseDUInfoIt.second == nullptr) { + continue; + } + Insn *ccUseInsn = ccUseDUInfoIt.second->GetInsn(); + if (ccUseInsn == nullptr) { + continue; + } + MOperator ccUseMop = ccUseInsn->GetMachineOpcode(); + if (ccUseMop == MOP_wcsetrc || ccUseMop == MOP_xcsetrc) { + return true; + } + } + } + } + return false; +} + +bool CmpCsetVBPattern::OpndDefByOneValidBit(const Insn &defInsn) const { + if (defInsn.IsPhi()) { + return (static_cast(cmpInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() == k1BitSize) || + (static_cast(cmpInsn->GetOperand(kInsnSecondOpnd)).GetValidBitsNum() == k0BitSize); + } + MOperator defMop = defInsn.GetMachineOpcode(); + switch (defMop) { + case MOP_wcsetrc: + case MOP_xcsetrc: + return true; + case MOP_wmovri32: + case MOP_xmovri64: { + Operand &defOpnd = defInsn.GetOperand(kInsnSecondOpnd); + ASSERT(defOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &defConst = static_cast(defOpnd); + int64 defConstValue = defConst.GetValue(); + return (defConstValue == 0 || defConstValue == 1); + } + case MOP_xmovrr: + case MOP_wmovrr: + return IsZeroRegister(defInsn.GetOperand(kInsnSecondOpnd)); + case MOP_wlsrrri5: + case MOP_xlsrrri6: { + Operand &opnd2 = defInsn.GetOperand(kInsnThirdOpnd); + ASSERT(opnd2.IsIntImmediate(), "expects ImmOperand"); + auto &opndImm = static_cast(opnd2); + int64 shiftBits = opndImm.GetValue(); + return ((defMop == MOP_wlsrrri5 && shiftBits == (k32BitSize - 1)) || + (defMop == MOP_xlsrrri6 && shiftBits == (k64BitSize - 1))); + } + default: + return false; + } +} + +bool CmpCsetVBPattern::CheckCondition(Insn &csetInsn) { + MOperator curMop = csetInsn.GetMachineOpcode(); + if (curMop != MOP_wcsetrc && curMop != MOP_xcsetrc) { + return false; + } + /* combine [continuous cmp & cset] first, to eliminate more insns */ + if (IsContinuousCmpCset(csetInsn)) { + return false; + } + RegOperand &ccReg = static_cast(csetInsn.GetOperand(kInsnThirdOpnd)); + regno_t ccRegNo = ccReg.GetRegisterNumber(); + cmpInsn = ssaInfo->GetDefInsn(ccReg); + CHECK_NULL_FATAL(cmpInsn); + MOperator mop = cmpInsn->GetMachineOpcode(); + if ((mop != MOP_wcmpri) && (mop != MOP_xcmpri)) { + return false; + } + VRegVersion *ccRegVersion = ssaInfo->FindSSAVersion(ccRegNo); + CHECK_NULL_FATAL(ccRegVersion); + if (ccRegVersion->GetAllUseInsns().size() > k1BitSize) { + return false; + } + Operand &cmpSecondOpnd = cmpInsn->GetOperand(kInsnThirdOpnd); + CHECK_FATAL(cmpSecondOpnd.IsIntImmediate(), "expects ImmOperand"); + auto &cmpConst = static_cast(cmpSecondOpnd); + cmpConstVal = cmpConst.GetValue(); + /* get ImmOperand, must be 0 or 1 */ + if ((cmpConstVal != 0) && (cmpConstVal != k1BitSize)) { + return false; + } + Operand &cmpFirstOpnd = cmpInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(cmpFirstOpnd.IsRegister(), "cmpFirstOpnd must be register!"); + RegOperand &cmpReg = static_cast(cmpFirstOpnd); + Insn *defInsn = ssaInfo->GetDefInsn(cmpReg); + if (defInsn == nullptr) { + return false; + } + if (defInsn->GetMachineOpcode() == MOP_wmovrr || defInsn->GetMachineOpcode() == MOP_xmovrr) { + auto &srcOpnd = static_cast(defInsn->GetOperand(kInsnSecondOpnd)); + if (!srcOpnd.IsVirtualRegister()) { + return false; + } + } + return ((cmpReg.GetValidBitsNum() == k1BitSize) || (cmpReg.GetValidBitsNum() == k0BitSize) || + OpndDefByOneValidBit(*defInsn)); +} + +void CmpCsetVBPattern::Run(BB &bb, Insn &csetInsn) { + if (!CheckCondition(csetInsn)) { + return; + } + Operand &csetFirstOpnd = csetInsn.GetOperand(kInsnFirstOpnd); + Operand &cmpFirstOpnd = cmpInsn->GetOperand(kInsnSecondOpnd); + auto &cond = static_cast(csetInsn.GetOperand(kInsnSecondOpnd)); + Insn *newInsn = nullptr; + + /* cmpFirstOpnd == 1 */ + if ((cmpConstVal == 0 && cond.GetCode() == CC_NE) || (cmpConstVal == 1 && cond.GetCode() == CC_EQ)) { + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xmovrr : MOP_wmovrr; + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(mopCode, csetFirstOpnd, cmpFirstOpnd); + } else if ((cmpConstVal == 1 && cond.GetCode() == CC_NE) || (cmpConstVal == 0 && cond.GetCode() == CC_EQ)) { + /* cmpFirstOpnd == 0 */ + MOperator mopCode = (cmpFirstOpnd.GetSize() == k64BitSize) ? MOP_xeorrri13 : MOP_weorrri12; + ImmOperand &one = static_cast(cgFunc)->CreateImmOperand(1, k8BitSize, false); + newInsn = &cgFunc->GetInsnBuilder()->BuildInsn(mopCode, csetFirstOpnd, cmpFirstOpnd, one); + } + if (newInsn == nullptr) { + return; + } + bb.ReplaceInsn(csetInsn, *newInsn); + ssaInfo->ReplaceInsn(csetInsn, *newInsn); + if (CG_VALIDBIT_OPT_DUMP && (newInsn != nullptr)) { + std::vector prevInsns; + prevInsns.emplace_back(cmpInsn); + prevInsns.emplace_back(&csetInsn); + DumpAfterPattern(prevInsns, newInsn, nullptr); + } +} + +void CmpBranchesPattern::SelectNewMop(MOperator mop) { + switch (mop) { + case MOP_bge: { + newMop = is64Bit ? MOP_xtbnz : MOP_wtbnz; + break; + } + case MOP_blt: { + newMop = is64Bit ? MOP_xtbz : MOP_wtbz; + break; + } + default: + break; + } +} + +bool CmpBranchesPattern::CheckCondition(Insn &insn) { + MOperator curMop = insn.GetMachineOpcode(); + if (curMop != MOP_bge && curMop != MOP_blt) { + return false; + } + auto &ccReg = static_cast(insn.GetOperand(kInsnFirstOpnd)); + prevCmpInsn = ssaInfo->GetDefInsn(ccReg); + if (prevCmpInsn == nullptr) { + return false; + } + MOperator cmpMop = prevCmpInsn->GetMachineOpcode(); + if (cmpMop != MOP_wcmpri && cmpMop != MOP_xcmpri) { + return false; + } + is64Bit = (cmpMop == MOP_xcmpri); + auto &cmpUseOpnd = static_cast(prevCmpInsn->GetOperand(kInsnSecondOpnd)); + auto &cmpImmOpnd = static_cast(prevCmpInsn->GetOperand(kInsnThirdOpnd)); + int64 cmpImmVal = cmpImmOpnd.GetValue(); + newImmVal = ValidBitOpt::GetLogValueAtBase2(cmpImmVal); + if (newImmVal < 0 || cmpUseOpnd.GetValidBitsNum() != (newImmVal + 1)) { + return false; + } + SelectNewMop(curMop); + if (newMop == MOP_undef) { + return false; + } + return true; +} + +void CmpBranchesPattern::Run(BB &bb, Insn &insn) { + if (!CheckCondition(insn)) { + return; + } + auto *aarFunc = static_cast(cgFunc); + auto &labelOpnd = static_cast(insn.GetOperand(kInsnSecondOpnd)); + ImmOperand &newImmOpnd = aarFunc->CreateImmOperand(newImmVal, k8BitSize, false); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(newMop, prevCmpInsn->GetOperand(kInsnSecondOpnd), + newImmOpnd, labelOpnd); + bb.ReplaceInsn(insn, newInsn); + /* update ssa info */ + ssaInfo->ReplaceInsn(insn, newInsn); + /* dump pattern info */ + if (CG_VALIDBIT_OPT_DUMP) { + std::vector prevs; + prevs.emplace_back(prevCmpInsn); + DumpAfterPattern(prevs, &insn, &newInsn); + } +} +} /* namespace maplebe */ + diff --git a/src/mapleall/maple_be/src/cg/aarch64/aarch64_yieldpoint.cpp b/src/mapleall/maple_be/src/cg/aarch64/aarch64_yieldpoint.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d64980001498bef25d57a01e35b2eadcdabb3852 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/aarch64_yieldpoint.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "aarch64_yieldpoint.h" +#include "aarch64_cgfunc.h" + +namespace maplebe { +using namespace maple; + +void AArch64YieldPointInsertion::Run() { + InsertYieldPoint(); +} + +void AArch64YieldPointInsertion::InsertYieldPoint() const { + AArch64CGFunc *aarchCGFunc = static_cast(cgFunc); + std::string refQueueName = "Ljava_2Flang_2Fref_2FReference_3B_7C_3Cinit_3E_7C_" + "28Ljava_2Flang_2FObject_3BLjava_2Flang_2Fref_2FReferenceQueue_3B_29V"; + if (!CGOptions::IsGCOnly() && (aarchCGFunc->GetName() == refQueueName)) { + /* skip insert yieldpoint in reference constructor, avoid rc verify issue */ + ASSERT(aarchCGFunc->GetYieldPointInsn() != nullptr, "the entry yield point has been inserted"); + aarchCGFunc->GetYieldPointInsn()->GetBB()->RemoveInsn(*aarchCGFunc->GetYieldPointInsn()); + return; + } + + /* + * do not insert yieldpoint in function that not saved X30 into stack, + * because X30 will be changed after yieldpoint is taken. + */ + if (!aarchCGFunc->GetHasProEpilogue()) { + ASSERT (aarchCGFunc->GetYieldPointInsn() != nullptr, "the entry yield point has been inserted"); + aarchCGFunc->GetYieldPointInsn()->GetBB()->RemoveInsn(*aarchCGFunc->GetYieldPointInsn()); + return; + } + /* skip if no GetFirstbb(). */ + if (aarchCGFunc->GetFirstBB() == nullptr) { + return; + } + /* + * The yield point in the entry of the GetFunction() is inserted just after the initialization + * of localrefvars in HandleRCCall. + * for BBs after firstbb. + */ + for (BB *bb = aarchCGFunc->GetFirstBB()->GetNext(); bb != nullptr; bb = bb->GetNext()) { + /* insert a yieldpoint at beginning if BB is BackEdgeDest. */ + if (bb->IsBackEdgeDest()) { + aarchCGFunc->GetDummyBB()->ClearInsns(); + aarchCGFunc->GenerateYieldpoint(*aarchCGFunc->GetDummyBB()); + bb->InsertAtBeginning(*aarchCGFunc->GetDummyBB()); + } + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/aarch64/mpl_atomic.cpp b/src/mapleall/maple_be/src/cg/aarch64/mpl_atomic.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1b966afc91d26c2dface288fc3910de50e8ac66d --- /dev/null +++ b/src/mapleall/maple_be/src/cg/aarch64/mpl_atomic.cpp @@ -0,0 +1,72 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mpl_atomic.h" +#include +#include "mpl_logging.h" + +namespace maple { +namespace { +constexpr int32 kMaxSizeOfTab = 6; +}; +MemOrd MemOrdFromU32(uint32 val) { + /* 6 is the size of tab below. 2 is memory_order_consume, it is Disabled. */ + CHECK_FATAL(val <= kMaxSizeOfTab, "Illegal number for MemOrd: %u", val); + CHECK_FATAL(val != 2, "Illegal number for MemOrd: %u", val); + static std::array tab = { + MemOrd::kNotAtomic, + MemOrd::memory_order_relaxed, + /* + * memory_order_consume Disabled. Its semantics is debatable. + * We don't support it now, but reserve the number. Use memory_order_acquire instead. + */ + MemOrd::memory_order_acquire, /* padding entry */ + MemOrd::memory_order_acquire, + MemOrd::memory_order_release, + MemOrd::memory_order_acq_rel, + MemOrd::memory_order_seq_cst, + }; + return tab[val]; +} + +bool MemOrdIsAcquire(MemOrd ord) { + static std::array tab = { + false, /* kNotAtomic */ + false, /* memory_order_relaxed */ + true, /* memory_order_consume */ + true, /* memory_order_acquire */ + false, /* memory_order_release */ + true, /* memory_order_acq_rel */ + true, /* memory_order_seq_cst */ + }; + uint32 tabIndex = static_cast(ord); + CHECK_FATAL(tabIndex <= kMaxSizeOfTab, "Illegal number for MemOrd: %u", tabIndex); + return tab[tabIndex]; +} + +bool MemOrdIsRelease(MemOrd ord) { + static std::array tab = { + false, /* kNotAtomic */ + false, /* memory_order_relaxed */ + false, /* memory_order_consume */ + false, /* memory_order_acquire */ + true, /* memory_order_release */ + true, /* memory_order_acq_rel */ + true, /* memory_order_seq_cst */ + }; + uint32 tabIndex = static_cast(ord); + CHECK_FATAL(tabIndex <= kMaxSizeOfTab, "Illegal number for MemOrd: %u", tabIndex); + return tab[tabIndex]; +} +} /* namespace maple */ diff --git a/src/mapleall/maple_be/src/cg/alignment.cpp b/src/mapleall/maple_be/src/cg/alignment.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bad9a5b690e5e4f96dcb6d48fd50a60ef5ce2fdc --- /dev/null +++ b/src/mapleall/maple_be/src/cg/alignment.cpp @@ -0,0 +1,108 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "alignment.h" +#include "optimize_common.h" +#include "cgfunc.h" +#include "cg.h" +#include "cg_option.h" + +namespace maplebe { +#define ALIGN_ANALYZE_DUMP_NEWPW CG_DEBUG_FUNC(func) + +void AlignAnalysis::AnalysisAlignment() { + FindLoopHeader(); + FindJumpTarget(); + ComputeLoopAlign(); + ComputeJumpAlign(); + if (CGOptions::DoCondBrAlign()) { + ComputeCondBranchAlign(); + } +} + +void AlignAnalysis::Dump() { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n********* alignment for " << funcSt->GetName() << " *********\n"; + LogInfo::MapleLogger() << "------ jumpTargetBBs: " << jumpTargetBBs.size() << " total ------\n"; + for (auto *jumpLabel : jumpTargetBBs) { + LogInfo::MapleLogger() << " === BB_" << jumpLabel->GetId() << " (" << std::hex << jumpLabel << ")" + << std::dec << " <" << jumpLabel->GetKindName(); + if (jumpLabel->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << jumpLabel->GetLabIdx() << "]> ===\n"; + } + if (!jumpLabel->GetPreds().empty()) { + LogInfo::MapleLogger() << "\tpreds: [ "; + for (auto *pred : jumpLabel->GetPreds()) { + LogInfo::MapleLogger() << "BB_" << pred->GetId(); + if (pred->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "GetLabIdx() << ">"; + } + LogInfo::MapleLogger() << " (" << std::hex << pred << ") " << std::dec << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + if (jumpLabel->GetPrev() != nullptr) { + LogInfo::MapleLogger() << "\tprev: [ "; + LogInfo::MapleLogger() << "BB_" << jumpLabel->GetPrev()->GetId(); + if (jumpLabel->GetPrev()->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "GetLabIdx() << ">"; + } + LogInfo::MapleLogger() << " (" << std::hex << jumpLabel->GetPrev() << ") " << std::dec << " "; + LogInfo::MapleLogger() << "]\n"; + } + FOR_BB_INSNS_CONST(insn, jumpLabel) { + insn->Dump(); + } + } + LogInfo::MapleLogger() << "\n------ loopHeaderBBs: " << loopHeaderBBs.size() << " total ------\n"; + for (auto *loopHeader : loopHeaderBBs) { + LogInfo::MapleLogger() << " === BB_" << loopHeader->GetId() << " (" << std::hex << loopHeader << ")" + << std::dec << " <" << loopHeader->GetKindName(); + if (loopHeader->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << loopHeader->GetLabIdx() << "]> ===\n"; + } + LogInfo::MapleLogger() << "\tLoop Level: " << loopHeader->GetLoop()->GetLoopLevel() << "\n"; + FOR_BB_INSNS_CONST(insn, loopHeader) { + insn->Dump(); + } + } + LogInfo::MapleLogger() << "\n------ alignInfos: " << alignInfos.size() << " total ------\n"; + MapleUnorderedMap::iterator iter; + for (iter = alignInfos.begin(); iter != alignInfos.end(); ++iter) { + BB *bb = iter->first; + LogInfo::MapleLogger() << " === BB_" << bb->GetId() << " (" << std::hex << bb << ")" + << std::dec << " <" << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx() << "]> ===\n"; + } + LogInfo::MapleLogger() << "\talignPower: " << iter->second << "\n"; + } +} + +bool CgAlignAnalysis::PhaseRun(maplebe::CGFunc &func) { + if (ALIGN_ANALYZE_DUMP_NEWPW) { + DotGenerator::GenerateDot("alignanalysis", func, func.GetMirModule(), true, func.GetName()); + } + MemPool *alignMemPool = GetPhaseMemPool(); + AlignAnalysis *alignAnalysis = func.GetCG()->CreateAlignAnalysis(*alignMemPool, func); + + CHECK_FATAL(alignAnalysis != nullptr, "AlignAnalysis instance create failure"); + alignAnalysis->AnalysisAlignment(); + if (ALIGN_ANALYZE_DUMP_NEWPW) { + alignAnalysis->Dump(); + } + return true; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/args.cpp b/src/mapleall/maple_be/src/cg/args.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b647ec327c22840814b195dfabf34b1e651c8ac1 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/args.cpp @@ -0,0 +1,29 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "args.h" +#include "cg.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; +bool CgMoveRegArgs::PhaseRun(maplebe::CGFunc &f) { + MemPool *memPool = GetPhaseMemPool(); + MoveRegArgs *movRegArgs = nullptr; + movRegArgs = f.GetCG()->CreateMoveRegArgs(*memPool, f); + movRegArgs->Run(); + return true; +} + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cfgo.cpp b/src/mapleall/maple_be/src/cg/cfgo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a9c8040b3860a6f6008b15e8e8c848362d11f7d3 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cfgo.cpp @@ -0,0 +1,915 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cfgo.h" +#include "cgbb.h" +#include "cg.h" +#include "loop.h" +#include "mpl_logging.h" + +/* + * This phase traverses all basic block of cgFunc and finds special + * basic block patterns, like continuous fallthrough basic block, continuous + * uncondition jump basic block, unreachable basic block and empty basic block, + * then do basic mergering, basic block placement transformations, + * unnecessary jumps elimination, and remove unreachable or empty basic block. + * This optimization is done on control flow graph basis. + */ +namespace maplebe { +using namespace maple; + +#define CFGO_DUMP_NEWPM CG_DEBUG_FUNC(f) + +/* return true if to is put after from and there is no real insns between from and to, */ +bool ChainingPattern::NoInsnBetween(const BB &from, const BB &to) const { + const BB *bb = nullptr; + for (bb = from.GetNext(); bb != nullptr && bb != &to && bb != cgFunc->GetLastBB(); bb = bb->GetNext()) { + if (!bb->IsEmptyOrCommentOnly() || bb->IsUnreachable() || bb->GetKind() != BB::kBBFallthru) { + return false; + } + } + return (bb == &to); +} + +/* return true if insns in bb1 and bb2 are the same except the last goto insn. */ +bool ChainingPattern::DoSameThing(const BB &bb1, const Insn &last1, const BB &bb2, const Insn &last2) const { + const Insn *insn1 = bb1.GetFirstInsn(); + const Insn *insn2 = bb2.GetFirstInsn(); + while (insn1 != nullptr && insn1 != last1.GetNext() && insn2 != nullptr && insn2 != last2.GetNext()) { + if (!insn1->IsMachineInstruction()) { + insn1 = insn1->GetNext(); + continue; + } + if (!insn2->IsMachineInstruction()) { + insn2 = insn2->GetNext(); + continue; + } + if (insn1->GetMachineOpcode() != insn2->GetMachineOpcode()) { + return false; + } + uint32 opndNum = insn1->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &op1 = insn1->GetOperand(i); + Operand &op2 = insn2->GetOperand(i); + if (&op1 == &op2) { + continue; + } + if (!op1.Equals(op2)) { + return false; + } + } + insn1 = insn1->GetNext(); + insn2 = insn2->GetNext(); + } + return (insn1 == last1.GetNext() && insn2 == last2.GetNext()); +} + +/* + * BB2 can be merged into BB1, if + * 1. BB1's kind is fallthrough; + * 2. BB2 has only one predecessor which is BB1 and BB2 is not the lastbb + * 3. BB2 is neither catch BB nor switch case BB + */ +bool ChainingPattern::MergeFallthuBB(BB &curBB) { + BB *sucBB = curBB.GetNext(); + if (sucBB == nullptr || + IsLabelInLSDAOrSwitchTable(sucBB->GetLabIdx()) || + !cgFunc->GetTheCFG()->CanMerge(curBB, *sucBB)) { + return false; + } + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + if (sucBB == cgFunc->GetLastBB()) { + cgFunc->SetLastBB(curBB); + } + cgFunc->GetTheCFG()->MergeBB(curBB, *sucBB, *cgFunc); + keepPosition = true; + return true; +} + +bool ChainingPattern::MergeGotoBB(BB &curBB, BB &sucBB) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + cgFunc->GetTheCFG()->MergeBB(curBB, sucBB, *cgFunc); + keepPosition = true; + return true; +} + +bool ChainingPattern::MoveSuccBBAsCurBBNext(BB &curBB, BB &sucBB) { + /* + * without the judge below, there is + * Assembler Error: CFI state restore without previous remember + */ + if (sucBB.GetHasCfi() || (sucBB.GetFirstInsn() != nullptr && sucBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + /* put sucBB as curBB's next. */ + ASSERT(sucBB.GetPrev() != nullptr, "the target of current goto BB will not be the first bb"); + sucBB.GetPrev()->SetNext(sucBB.GetNext()); + if (sucBB.GetNext() != nullptr) { + sucBB.GetNext()->SetPrev(sucBB.GetPrev()); + } + sucBB.SetNext(curBB.GetNext()); + if (curBB.GetNext() != nullptr) { + curBB.GetNext()->SetPrev(&sucBB); + } + if (sucBB.GetId() == cgFunc->GetLastBB()->GetId()) { + cgFunc->SetLastBB(*(sucBB.GetPrev())); + } else if (curBB.GetId() == cgFunc->GetLastBB()->GetId()) { + cgFunc->SetLastBB(sucBB); + } + sucBB.SetPrev(&curBB); + curBB.SetNext(&sucBB); + curBB.RemoveInsn(*curBB.GetLastInsn()); + curBB.SetKind(BB::kBBFallthru); + return true; +} + +bool ChainingPattern::RemoveGotoInsn(BB &curBB, BB &sucBB) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + if (&sucBB != curBB.GetNext()) { + ASSERT(curBB.GetNext() != nullptr, "nullptr check"); + curBB.RemoveSuccs(sucBB); + curBB.PushBackSuccs(*curBB.GetNext()); + curBB.GetNext()->PushBackPreds(curBB); + sucBB.RemovePreds(curBB); + } + curBB.RemoveInsn(*curBB.GetLastInsn()); + curBB.SetKind(BB::kBBFallthru); + return true; +} + +bool ChainingPattern::ClearCurBBAndResetTargetBB(BB &curBB, BB &sucBB) { + if (curBB.GetHasCfi() || (curBB.GetFirstInsn() != nullptr && curBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + Insn *brInsn = nullptr; + for (brInsn = curBB.GetLastInsn(); brInsn != nullptr; brInsn = brInsn->GetPrev()) { + if (brInsn->IsUnCondBranch()){ + break; + } + } + ASSERT(brInsn != nullptr, "goto BB has no branch"); + BB *newTarget = sucBB.GetPrev(); + ASSERT(newTarget != nullptr, "get prev bb failed in ChainingPattern::ClearCurBBAndResetTargetBB"); + Insn *last1 = newTarget->GetLastInsn(); + if (newTarget->GetKind() == BB::kBBGoto) { + Insn *br = nullptr; + for (br = newTarget->GetLastInsn(); br != newTarget->GetFirstInsn()->GetPrev(); br = br->GetPrev()) { + if (br->IsUnCondBranch()){ + break; + } + } + ASSERT(br != nullptr, "goto BB has no branch"); + last1 = br->GetPrev(); + } + if (last1 == nullptr || !DoSameThing(*newTarget, *last1, curBB, *brInsn->GetPrev())) { + return false; + } + + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + + LabelIdx tgtLabIdx = newTarget->GetLabIdx(); + if (newTarget->GetLabIdx() == MIRLabelTable::GetDummyLabel()) { + tgtLabIdx = cgFunc->CreateLabel(); + newTarget->AddLabel(tgtLabIdx); + } + LabelOperand &brTarget = cgFunc->GetOrCreateLabelOperand(tgtLabIdx); + brInsn->SetOperand(0, brTarget); + curBB.RemoveInsnSequence(*curBB.GetFirstInsn(), *brInsn->GetPrev()); + + curBB.RemoveFromSuccessorList(sucBB); + curBB.PushBackSuccs(*newTarget); + sucBB.RemoveFromPredecessorList(curBB); + newTarget->PushBackPreds(curBB); + + sucBB.GetPrev()->SetUnreachable(false); + keepPosition = true; + return true; +} + +/* + * Following optimizations are performed: + * 1. Basic block merging + * 2. unnecessary jumps elimination + * 3. Remove duplicates Basic block. + */ +bool ChainingPattern::Optimize(BB &curBB) { + if (curBB.GetKind() == BB::kBBFallthru) { + return MergeFallthuBB(curBB); + } + + if (curBB.GetKind() == BB::kBBGoto && !curBB.IsEmpty()) { + Insn* last = curBB.GetLastInsn(); + if (last->IsTailCall()) { + return false; + } + + BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + /* + * BB2 can be merged into BB1, if + * 1. BB1 ends with a goto; + * 2. BB2 has only one predecessor which is BB1 + * 3. BB2 is of goto kind. Otherwise, the original fall through will be broken + * 4. BB2 is neither catch BB nor switch case BB + */ + if (sucBB == nullptr || curBB.GetEhSuccs().size() != sucBB->GetEhSuccs().size()) { + return false; + } + if (!curBB.GetEhSuccs().empty() && (curBB.GetEhSuccs().front() != sucBB->GetEhSuccs().front())) { + return false; + } + if (sucBB->GetKind() == BB::kBBGoto && + !IsLabelInLSDAOrSwitchTable(sucBB->GetLabIdx()) && + cgFunc->GetTheCFG()->CanMerge(curBB, *sucBB)) { + return MergeGotoBB(curBB, *sucBB); + } else if (sucBB != &curBB && + curBB.GetNext() != sucBB && + sucBB != cgFunc->GetLastBB() && + !sucBB->IsPredecessor(*sucBB->GetPrev()) && + !(sucBB->GetNext() != nullptr && sucBB->GetNext()->IsPredecessor(*sucBB)) && + !IsLabelInLSDAOrSwitchTable(sucBB->GetLabIdx()) && + sucBB->GetEhSuccs().empty() && + sucBB->GetKind() != BB::kBBThrow && curBB.GetNext() != nullptr) { + return MoveSuccBBAsCurBBNext(curBB, *sucBB); + } + /* + * Last goto instruction can be removed, if: + * 1. The goto target is physically the next one to current BB. + */ + else if (sucBB == curBB.GetNext() || + (NoInsnBetween(curBB, *sucBB) && !IsLabelInLSDAOrSwitchTable(curBB.GetNext()->GetLabIdx()))) { + return RemoveGotoInsn(curBB, *sucBB); + } + /* + * Clear curBB and target it to sucBB->GetPrev() + * if sucBB->GetPrev() and curBB's insns are the same. + * + * curBB: curBB: + * insn_x0 b prevbb + * b sucBB ... + * ... ==> prevbb: + * prevbb: insn_x0 + * insn_x0 sucBB: + * sucBB: + */ + else if (sucBB != curBB.GetNext() && + !curBB.IsSoloGoto() && + !IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx()) && + sucBB->GetKind() == BB::kBBReturn && + sucBB->GetPreds().size() > 1 && + sucBB->GetPrev() != nullptr && + sucBB->IsPredecessor(*sucBB->GetPrev()) && + (sucBB->GetPrev()->GetKind() == BB::kBBFallthru || sucBB->GetPrev()->GetKind() == BB::kBBGoto)) { + return ClearCurBBAndResetTargetBB(curBB, *sucBB); + } + } + return false; +} + +/* + * curBB: curBB: + * insn_x0 insn_x0 + * b targetBB b BB + * ... ==> ... + * targetBB: targetBB: + * b BB b BB + * ... ... + * BB: BB: + * *------------------------------ + * curBB: curBB: + * insn_x0 insn_x0 + * cond_br brBB cond_br BB + * ... ... + * brBB: ==> brBB: + * b BB b BB + * ... ... + * BB: BB: + * + * conditions: + * 1. only goto and comment in brBB; + */ +bool SequentialJumpPattern::Optimize(BB &curBB) { + if (curBB.IsUnreachable()) { + return false; + } + if (curBB.GetKind() == BB::kBBGoto && !curBB.IsEmpty()) { + BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + CHECK_FATAL(sucBB != nullptr, "sucBB is null in SequentialJumpPattern::Optimize"); + BB *tragetBB = CGCFG::GetTargetSuc(*sucBB); + if ((sucBB != &curBB) && sucBB->IsSoloGoto() && tragetBB != nullptr && tragetBB != sucBB) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + cgFunc->GetTheCFG()->RetargetJump(*sucBB, curBB); + SkipSucBB(curBB, *sucBB); + return true; + } + } else if (curBB.GetKind() == BB::kBBIf) { + for (BB *sucBB : curBB.GetSuccs()) { + BB *tragetBB = CGCFG::GetTargetSuc(*sucBB); + if (sucBB != curBB.GetNext() && sucBB->IsSoloGoto() && + tragetBB != nullptr && tragetBB != sucBB) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + cgFunc->GetTheCFG()->RetargetJump(*sucBB, curBB); + SkipSucBB(curBB, *sucBB); + return true; + } + } + } else if (curBB.GetKind() == BB::kBBRangeGoto) { + bool changed = false; + for (BB *sucBB : curBB.GetSuccs()) { + if (sucBB != curBB.GetNext() && sucBB->IsSoloGoto() && + cgFunc->GetTheCFG()->GetTargetSuc(*sucBB) != nullptr) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + UpdateSwitchSucc(curBB, *sucBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(*sucBB, *cgFunc); + changed = true; + } + } + return changed; + } + return false; +} + +void SequentialJumpPattern::UpdateSwitchSucc(BB &curBB, BB &sucBB) const { + BB *gotoTarget = cgFunc->GetTheCFG()->GetTargetSuc(sucBB); + CHECK_FATAL(gotoTarget != nullptr, "gotoTarget is null in SequentialJumpPattern::UpdateSwitchSucc"); + const MapleVector &labelVec = curBB.GetRangeGotoLabelVec(); + bool isPred = false; + for (auto label: labelVec) { + if (label == gotoTarget->GetLabIdx()) { + isPred = true; + break; + } + } + for (size_t i = 0; i < labelVec.size(); ++i) { + if (labelVec[i] == sucBB.GetLabIdx()) { + curBB.SetRangeGotoLabel(i, gotoTarget->GetLabIdx()); + } + } + cgFunc->UpdateEmitSt(curBB, sucBB.GetLabIdx(), gotoTarget->GetLabIdx()); + + /* connect curBB, gotoTarget */ + for (auto it = gotoTarget->GetPredsBegin(); it != gotoTarget->GetPredsEnd(); ++it) { + if (*it == &sucBB) { + auto origIt = it; + if (isPred) { + break; + } + if (origIt != gotoTarget->GetPredsBegin()) { + --origIt; + gotoTarget->InsertPred(origIt, curBB); + } else { + gotoTarget->PushFrontPreds(curBB); + } + break; + } + } + for (auto it = curBB.GetSuccsBegin(); it != curBB.GetSuccsEnd(); ++it) { + if (*it == &sucBB) { + auto origIt = it; + curBB.EraseSuccs(it); + if (isPred) { + break; + } + if (origIt != curBB.GetSuccsBegin()) { + --origIt; + curBB.InsertSucc(origIt, *gotoTarget); + } else { + curBB.PushFrontSuccs(*gotoTarget); + } + break; + } + } + /* cut curBB -> sucBB */ + for (auto it = sucBB.GetPredsBegin(); it != sucBB.GetPredsEnd(); ++it) { + if (*it == &curBB) { + sucBB.ErasePreds(it); + } + } + for (auto it = curBB.GetSuccsBegin(); it != curBB.GetSuccsEnd(); ++it) { + if (*it == &sucBB) { + curBB.EraseSuccs(it); + } + } +} + +/* + * preCond: + * sucBB is one of curBB's successor. + * + * Change curBB's successor to sucBB's successor + */ +void SequentialJumpPattern::SkipSucBB(BB &curBB, BB &sucBB) const { + BB *gotoTarget = cgFunc->GetTheCFG()->GetTargetSuc(sucBB); + CHECK_FATAL(gotoTarget != nullptr, "gotoTarget is null in SequentialJumpPattern::SkipSucBB"); + curBB.RemoveSuccs(sucBB); + curBB.PushBackSuccs(*gotoTarget); + sucBB.RemovePreds(curBB); + gotoTarget->PushBackPreds(curBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(sucBB, *cgFunc); +} + +/* + * Found pattern + * curBB: curBB: + * ... ==> ... + * cond_br brBB cond1_br ftBB + * ftBB: brBB: + * bl throwfunc ... + * brBB: retBB: + * ... ... + * retBB: ftBB: + * ... bl throwfunc + */ +void FlipBRPattern::RelocateThrowBB(BB &curBB) { + BB *ftBB = curBB.GetNext(); + CHECK_FATAL(ftBB != nullptr, "ifBB has a fall through BB"); + CGCFG *theCFG = cgFunc->GetTheCFG(); + CHECK_FATAL(theCFG != nullptr, "nullptr check"); + BB *retBB = theCFG->FindLastRetBB(); + retBB = (retBB == nullptr ? cgFunc->GetLastBB() : retBB); + if (ftBB->GetKind() != BB::kBBThrow || !ftBB->GetEhSuccs().empty() || + IsLabelInLSDAOrSwitchTable(ftBB->GetLabIdx()) || !retBB->GetEhSuccs().empty()) { + return; + } + BB *brBB = theCFG->GetTargetSuc(curBB); + if (brBB != ftBB->GetNext()) { + return; + } + + EHFunc *ehFunc = cgFunc->GetEHFunc(); + if (ehFunc != nullptr && ehFunc->GetLSDACallSiteTable() != nullptr) { + const MapleVector &callsiteTable = ehFunc->GetLSDACallSiteTable()->GetCallSiteTable(); + for (size_t i = 0; i < callsiteTable.size(); ++i) { + LSDACallSite *lsdaCallsite = callsiteTable[i]; + BB *endTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetEndOffset()->GetLabelIdx()); + BB *startTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetStartOffset()->GetLabelIdx()); + if (retBB->GetId() >= startTry->GetId() && retBB->GetId() <= endTry->GetId()) { + if (retBB->GetNext()->GetId() < startTry->GetId() || retBB->GetNext()->GetId() > endTry->GetId() || + curBB.GetId() < startTry->GetId() || curBB.GetId() > endTry->GetId()) { + return; + } + } else { + if ((retBB->GetNext()->GetId() >= startTry->GetId() && retBB->GetNext()->GetId() <= endTry->GetId()) || + (curBB.GetId() >= startTry->GetId() && curBB.GetId() <= endTry->GetId())) { + return; + } + } + } + } + /* get branch insn of curBB */ + Insn *curBBBranchInsn = theCFG->FindLastCondBrInsn(curBB); + CHECK_FATAL(curBBBranchInsn != nullptr, "curBB(it is a kBBif) has no branch"); + + /* Reverse the branch */ + uint32 targetIdx = GetJumpTargetIdx(*curBBBranchInsn); + MOperator mOp = FlipConditionOp(curBBBranchInsn->GetMachineOpcode()); + LabelOperand &brTarget = cgFunc->GetOrCreateLabelOperand(*ftBB); + curBBBranchInsn->SetMOP(cgFunc->GetCG()->GetTargetMd(mOp)); + curBBBranchInsn->SetOperand(targetIdx, brTarget); + + /* move ftBB after retBB */ + curBB.SetNext(brBB); + CHECK_NULL_FATAL(brBB); + brBB->SetPrev(&curBB); + + retBB->GetNext()->SetPrev(ftBB); + ftBB->SetNext(retBB->GetNext()); + ftBB->SetPrev(retBB); + retBB->SetNext(ftBB); +} + +/* + * 1. relocate goto BB + * Found pattern (1) ftBB->GetPreds().size() == 1 + * curBB: curBB: cond1_br target + * ... ==> brBB: + * cond_br brBB ... + * ftBB: targetBB: (ftBB,targetBB) + * goto target (2) ftBB->GetPreds().size() > 1 + * brBB: curBB : cond1_br ftBB + * ... brBB: + * targetBB ... + * ftBB + * targetBB + * + * loopHeaderBB: loopHeaderBB: + * ... ... + * cond_br loopExit: cond_br loopHeaderBB + * ftBB: ftBB: + * goto loopHeaderBB: goto loopExit + * + * 3. relocate throw BB in RelocateThrowBB() + */ +bool FlipBRPattern::Optimize(BB &curBB) { + if (curBB.GetKind() == BB::kBBIf && !curBB.IsEmpty()) { + BB *ftBB = curBB.GetNext(); + ASSERT(ftBB != nullptr, "ftBB is null in FlipBRPattern::Optimize"); + BB *brBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + ASSERT(brBB != nullptr, "brBB is null in FlipBRPattern::Optimize"); + /* Check if it can be optimized */ + if (ftBB->GetKind() == BB::kBBGoto && ftBB->GetNext() == brBB) { + if (!ftBB->GetEhSuccs().empty()) { + return false; + } + Insn *curBBBranchInsn = nullptr; + for (curBBBranchInsn = curBB.GetLastInsn(); curBBBranchInsn != nullptr; + curBBBranchInsn = curBBBranchInsn->GetPrev()) { + if (curBBBranchInsn->IsBranch()) { + break; + } + } + ASSERT(curBBBranchInsn != nullptr, "FlipBRPattern: curBB has no branch"); + Insn *brInsn = nullptr; + for (brInsn = ftBB->GetLastInsn(); brInsn != nullptr; brInsn = brInsn->GetPrev()) { + if (brInsn->IsUnCondBranch()) { + break; + } + } + ASSERT(brInsn != nullptr, "FlipBRPattern: ftBB has no branch"); + + /* Reverse the branch */ + uint32 targetIdx = GetJumpTargetIdx(*curBBBranchInsn); + MOperator mOp = FlipConditionOp(curBBBranchInsn->GetMachineOpcode()); + if (mOp == 0) { + return false; + } + auto it = ftBB->GetSuccsBegin(); + BB *tgtBB = *it; + if (ftBB->GetPreds().size() == 1 && + (ftBB->IsSoloGoto() || + (!IsLabelInLSDAOrSwitchTable(tgtBB->GetLabIdx()) && + cgFunc->GetTheCFG()->CanMerge(*ftBB, *tgtBB)))) { + curBBBranchInsn->SetMOP(cgFunc->GetCG()->GetTargetMd(mOp)); + Operand &brTarget = brInsn->GetOperand(GetJumpTargetIdx(*brInsn)); + curBBBranchInsn->SetOperand(targetIdx, brTarget); + /* Insert ftBB's insn at the beginning of tgtBB. */ + if (!ftBB->IsSoloGoto()) { + ftBB->RemoveInsn(*brInsn); + tgtBB->InsertAtBeginning(*ftBB); + } + /* Patch pred and succ lists */ + ftBB->EraseSuccs(it); + ftBB->PushBackSuccs(*brBB); + it = curBB.GetSuccsBegin(); + CHECK_FATAL(*it != nullptr, "nullptr check"); + if (*it == brBB) { + curBB.EraseSuccs(it); + curBB.PushBackSuccs(*tgtBB); + } else { + ++it; + curBB.EraseSuccs(it); + curBB.PushFrontSuccs(*tgtBB); + } + for (it = tgtBB->GetPredsBegin(); it != tgtBB->GetPredsEnd(); ++it) { + if (*it == ftBB) { + tgtBB->ErasePreds(it); + break; + } + } + tgtBB->PushBackPreds(curBB); + for (it = brBB->GetPredsBegin(); it != brBB->GetPredsEnd(); ++it) { + if (*it == &curBB) { + brBB->ErasePreds(it); + break; + } + } + brBB->PushFrontPreds(*ftBB); + /* Remove instructions from ftBB so curBB falls thru to brBB */ + ftBB->SetFirstInsn(nullptr); + ftBB->SetLastInsn(nullptr); + ftBB->SetKind(BB::kBBFallthru); + } else if (!IsLabelInLSDAOrSwitchTable(ftBB->GetLabIdx()) && + !tgtBB->IsPredecessor(*tgtBB->GetPrev())) { + curBBBranchInsn->SetMOP(cgFunc->GetCG()->GetTargetMd(mOp)); + LabelIdx tgtLabIdx = ftBB->GetLabIdx(); + if (ftBB->GetLabIdx() == MIRLabelTable::GetDummyLabel()) { + tgtLabIdx = cgFunc->CreateLabel(); + ftBB->AddLabel(tgtLabIdx); + } + LabelOperand &brTarget = cgFunc->GetOrCreateLabelOperand(tgtLabIdx); + curBBBranchInsn->SetOperand(targetIdx, brTarget); + curBB.SetNext(brBB); + brBB->SetPrev(&curBB); + ftBB->SetPrev(tgtBB->GetPrev()); + tgtBB->GetPrev()->SetNext(ftBB); + ftBB->SetNext(tgtBB); + tgtBB->SetPrev(ftBB); + + ftBB->RemoveInsn(*brInsn); + ftBB->SetKind(BB::kBBFallthru); + } + } else if (GetPhase() == kCfgoPostRegAlloc && ftBB->GetKind() == BB::kBBGoto && + curBB.GetLoop() != nullptr && curBB.GetLoop() == ftBB->GetLoop() && + ftBB->IsSoloGoto() && + ftBB->GetLoop()->GetHeader() == *(ftBB->GetSuccsBegin()) && + !curBB.GetLoop()->IsBBLoopMember((curBB.GetSuccs().front() == ftBB) ? + curBB.GetSuccs().back() : curBB.GetSuccs().front())) { + Insn *curBBBranchInsn = nullptr; + for (curBBBranchInsn = curBB.GetLastInsn(); curBBBranchInsn != nullptr; + curBBBranchInsn = curBBBranchInsn->GetPrev()) { + if (curBBBranchInsn->IsBranch()) { + break; + } + } + ASSERT(curBBBranchInsn != nullptr, "FlipBRPattern: curBB has no branch"); + Insn *brInsn = nullptr; + for (brInsn = ftBB->GetLastInsn(); brInsn != nullptr; brInsn = brInsn->GetPrev()) { + if (brInsn->IsUnCondBranch()) { + break; + } + } + ASSERT(brInsn != nullptr, "FlipBRPattern: ftBB has no branch"); + uint32 condTargetIdx = GetJumpTargetIdx(*curBBBranchInsn); + LabelOperand &condTarget = static_cast(curBBBranchInsn->GetOperand(condTargetIdx)); + MOperator mOp = FlipConditionOp(curBBBranchInsn->GetMachineOpcode()); + if (mOp == 0) { + return false; + } + uint32 gotoTargetIdx = GetJumpTargetIdx(*brInsn); + LabelOperand &gotoTarget = static_cast(brInsn->GetOperand(gotoTargetIdx)); + curBBBranchInsn->SetMOP(cgFunc->GetCG()->GetTargetMd(mOp)); + curBBBranchInsn->SetOperand(condTargetIdx, gotoTarget); + brInsn->SetOperand(gotoTargetIdx, condTarget); + auto it = ftBB->GetSuccsBegin(); + BB *loopHeadBB = *it; + + curBB.RemoveSuccs(*brBB); + brBB->RemovePreds(curBB); + ftBB->RemoveSuccs(*loopHeadBB); + loopHeadBB->RemovePreds(*ftBB); + + curBB.PushBackSuccs(*loopHeadBB); + loopHeadBB->PushBackPreds(curBB); + ftBB->PushBackSuccs(*brBB); + brBB->PushBackPreds(*ftBB); + } else { + RelocateThrowBB(curBB); + } + } + return false; +} + +/* remove a basic block that contains nothing */ +bool EmptyBBPattern::Optimize(BB &curBB) { + if (curBB.IsUnreachable()) { + return false; + } + /* Empty bb and it's not a cleanupBB/returnBB/lastBB/catchBB. */ + if (curBB.GetPrev() != nullptr && !curBB.IsCleanup() && + curBB.GetFirstInsn() == nullptr && curBB.GetLastInsn() == nullptr && &curBB != cgFunc->GetLastBB() && + curBB.GetKind() != BB::kBBReturn && !IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx())) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + + BB *sucBB = cgFunc->GetTheCFG()->GetTargetSuc(curBB); + if (sucBB == nullptr || sucBB->IsCleanup()) { + return false; + } + cgFunc->GetTheCFG()->RemoveBB(curBB); + /* removeBB may do nothing. since no need to repeat, always ret false here. */ + return false; + } + return false; +} + +/* + * remove unreachable BB + * condition: + * 1. unreachable BB can't have cfi instruction when postcfgo. + */ +bool UnreachBBPattern::Optimize(BB &curBB) { + if (curBB.IsUnreachable()) { + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + /* if curBB in exitbbsvec,return false. */ + if (cgFunc->IsExitBB(curBB)) { + /* In C some bb follow noreturn calls should remain unreachable */ + curBB.SetUnreachable(cgFunc->GetMirModule().GetSrcLang() == kSrcLangC); + return false; + } + + if (curBB.GetHasCfi() || (curBB.GetFirstInsn() != nullptr && curBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* if curBB InLSDA ,replace curBB's label with nextReachableBB before remove it. */ + if (ehFunc != nullptr && ehFunc->NeedFullLSDA() && + cgFunc->GetTheCFG()->InLSDA(curBB.GetLabIdx(), ehFunc)) { + /* find nextReachableBB */ + BB *nextReachableBB = nullptr; + for (BB *bb = &curBB; bb != nullptr; bb = bb->GetNext()) { + if (!bb->IsUnreachable()) { + nextReachableBB = bb; + break; + } + } + CHECK_FATAL(nextReachableBB != nullptr, "nextReachableBB not be nullptr"); + if (nextReachableBB->GetLabIdx() == 0) { + LabelIdx labIdx = cgFunc->CreateLabel(); + nextReachableBB->AddLabel(labIdx); + cgFunc->SetLab2BBMap(labIdx, *nextReachableBB); + } + + ehFunc->GetLSDACallSiteTable()->UpdateCallSite(curBB, *nextReachableBB); + } + if (curBB.GetPrev() != nullptr) { + curBB.GetPrev()->SetNext(curBB.GetNext()); + } + if (curBB.GetNext() != nullptr) { + curBB.GetNext()->SetPrev(curBB.GetPrev()); + } else { + cgFunc->SetLastBB(*(curBB.GetPrev())); + } + + /* flush after remove; */ + for (BB *bb : curBB.GetSuccs()) { + bb->RemovePreds(curBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(*bb, *cgFunc); + } + for (BB *bb : curBB.GetEhSuccs()) { + bb->RemoveEhPreds(curBB); + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(*bb, *cgFunc); + } + curBB.ClearSuccs(); + curBB.ClearEhSuccs(); + /* return always be false */ + } + return false; +} + +/* BB_pred1: BB_pred1: + * b curBB insn_x0 + * ... b BB2 + * BB_pred2: ==> ... + * b curBB BB_pred2: + * ... insn_x0 + * curBB: b BB2 + * insn_x0 ... + * b BB2 curBB: + * insn_x0 + * b BB2 + * condition: + * 1. The number of instruct in curBB + * is less than THRESHOLD; + * 2. curBB can't have cfi instruction when postcfgo. + */ +bool DuplicateBBPattern::Optimize(BB &curBB) { + if (curBB.IsUnreachable()) { + return false; + } + if (CGOptions::IsNoDupBB() || CGOptions::OptimizeForSize()) { + return false; + } + + /* curBB can't be in try block */ + if (curBB.GetKind() != BB::kBBGoto || IsLabelInLSDAOrSwitchTable(curBB.GetLabIdx()) || + !curBB.GetEhSuccs().empty()) { + return false; + } + +#if TARGARM32 + FOR_BB_INSNS(insn, (&curBB)) { + if (insn->IsPCLoad() || insn->IsClinit()) { + return false; + } + } +#endif + /* It is possible curBB jump to itself */ + uint32 numPreds = curBB.NumPreds(); + for (BB *bb : curBB.GetPreds()) { + if (bb == &curBB) { + numPreds--; + } + } + + if (numPreds > 1 && cgFunc->GetTheCFG()->GetTargetSuc(curBB) != nullptr && + cgFunc->GetTheCFG()->GetTargetSuc(curBB)->NumPreds() > 1) { + std::vector candidates; + for (BB *bb : curBB.GetPreds()) { + if (bb->GetKind() == BB::kBBGoto && bb->GetNext() != &curBB && bb != &curBB && !bb->IsEmpty()) { + candidates.emplace_back(bb); + } + } + if (candidates.empty()) { + return false; + } + if (curBB.NumInsn() <= kThreshold) { + if (curBB.GetHasCfi() || (curBB.GetFirstInsn() != nullptr && curBB.GetFirstInsn()->IsCfiInsn())) { + return false; + } + Log(curBB.GetId()); + if (checkOnly) { + return false; + } + bool changed = false; + for (BB *bb : candidates) { + if (curBB.GetEhSuccs().size() != bb->GetEhSuccs().size()) { + continue; + } + if (!curBB.GetEhSuccs().empty() && (curBB.GetEhSuccs().front() != bb->GetEhSuccs().front())) { + continue; + } + bb->RemoveInsn(*bb->GetLastInsn()); + FOR_BB_INSNS(insn, (&curBB)) { + Insn *clonedInsn = cgFunc->GetTheCFG()->CloneInsn(*insn); + clonedInsn->SetPrev(nullptr); + clonedInsn->SetNext(nullptr); + clonedInsn->SetBB(nullptr); + bb->AppendInsn(*clonedInsn); + } + bb->RemoveSuccs(curBB); + for (BB *item : curBB.GetSuccs()) { + bb->PushBackSuccs(*item); + item->PushBackPreds(*bb); + } + curBB.RemovePreds(*bb); + changed = true; + } + cgFunc->GetTheCFG()->FlushUnReachableStatusAndRemoveRelations(curBB, *cgFunc); + return changed; + } + } + return false; +} + +/* === new pm === */ +bool CgCfgo::PhaseRun(maplebe::CGFunc &f) { + CFGOptimizer *cfgOptimizer = f.GetCG()->CreateCFGOptimizer(*GetPhaseMemPool(), f); + if (f.IsAfterRegAlloc()) { + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + cfgOptimizer->SetPhase(kCfgoPostRegAlloc); + } + const std::string &funcClass = f.GetFunction().GetBaseClassName(); + const std::string &funcName = f.GetFunction().GetBaseFuncName(); + const std::string &name = funcClass + funcName; + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("before-cfgo", f, f.GetMirModule()); + } + cfgOptimizer->Run(name); + if (f.IsAfterRegAlloc()) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLoopAnalysis::id); + } + if (CFGO_DUMP_NEWPM) { + f.GetTheCFG()->CheckCFG(); + DotGenerator::GenerateDot("after-cfgo", f, f.GetMirModule()); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgCfgo, cfgo) + +bool CgPostCfgo::PhaseRun(maplebe::CGFunc &f) { + CFGOptimizer *cfgOptimizer = f.GetCG()->CreateCFGOptimizer(*GetPhaseMemPool(), f); + const std::string &funcClass = f.GetFunction().GetBaseClassName(); + const std::string &funcName = f.GetFunction().GetBaseFuncName(); + const std::string &name = funcClass + funcName; + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("before-postcfgo", f, f.GetMirModule()); + } + cfgOptimizer->Run(name); + if (CFGO_DUMP_NEWPM) { + DotGenerator::GenerateDot("after-postcfgo", f, f.GetMirModule()); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPostCfgo, postcfgo) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cfi.cpp b/src/mapleall/maple_be/src/cg/cfi.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3c5e64769cd83a2c5a2ab32736f081f5879e5432 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cfi.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cfi.h" +#include "emit.h" + +namespace cfi { +using maplebe::Operand; +using maplebe::MOperator; +using maplebe::CG; +using maplebe::Emitter; +using maplebe::OpndDesc; + +struct CfiDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store cfi instruction's operand type */ + std::array opndTypes; +}; + +static CfiDescr cfiDescrTable[kOpCfiLast + 1] = { +#define CFI_DEFINE(k, sub, n, o0, o1, o2) \ + { ".cfi_" #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) \ + { "." #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#include "cfi.def" +#undef CFI_DEFINE +#undef ARM_DIRECTIVES_DEFINE + { ".cfi_undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } +}; + +void CfiInsn::Dump() const { + MOperator mOp = GetMachineOpcode(); + CfiDescr &cfiDescr = cfiDescrTable[mOp]; + LogInfo::MapleLogger() << "CFI " << cfiDescr.name; + for (uint32 i = 0; i < static_cast(cfiDescr.opndCount); ++i) { + LogInfo::MapleLogger() << (i == 0 ? " : " : " "); + Operand &curOperand = GetOperand(i); + curOperand.Dump(); + } + LogInfo::MapleLogger() << "\n"; +} + +#if DEBUG +void CfiInsn::Check() const { + CfiDescr &cfiDescr = cfiDescrTable[GetMachineOpcode()]; + /* cfi instruction's 3rd /4th/5th operand must be null */ + for (uint32 i = 0; i < static_cast(cfiDescr.opndCount); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.GetKind() != cfiDescr.opndTypes[i]) { + CHECK_FATAL(false, "incorrect operand in cfi insn"); + } + } +} +#endif + +void RegOperand::Dump() const { + LogInfo::MapleLogger() << "reg: " << regNO << "[ size: " << GetSize() << "] "; +} + +void ImmOperand::Dump() const { + LogInfo::MapleLogger() << "imm: " << val << "[ size: " << GetSize() << "] "; +} + +void StrOperand::Dump() const { + LogInfo::MapleLogger() << str; +} + +void LabelOperand::Dump() const { + LogInfo::MapleLogger() << "label:" << labelIndex; +} +void CFIOpndEmitVisitor::Visit(RegOperand *v) { + emitter.Emit(v->GetRegisterNO()); +} +void CFIOpndEmitVisitor::Visit(ImmOperand *v) { + emitter.Emit(v->GetValue()); +} +void CFIOpndEmitVisitor::Visit(SymbolOperand *v) { + CHECK_FATAL(false, "NIY"); +} +void CFIOpndEmitVisitor::Visit(StrOperand *v) { + emitter.Emit(v->GetStr()); +} +void CFIOpndEmitVisitor::Visit(LabelOperand *v) { + if (emitter.GetCG()->GetMIRModule()->IsCModule()) { + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + const char *idx = strdup(std::to_string(pIdx).c_str()); + emitter.Emit(".label.").Emit(idx).Emit("__").Emit(v->GetIabelIdx()); + } else { + emitter.Emit(".label.").Emit(v->GetParentFunc()).Emit(v->GetIabelIdx()); + } +} +} /* namespace cfi */ diff --git a/src/mapleall/maple_be/src/cg/cfi_generator.cpp b/src/mapleall/maple_be/src/cg/cfi_generator.cpp new file mode 100644 index 0000000000000000000000000000000000000000..40862914cff3c1e79548b6d4b024f165bcac1845 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cfi_generator.cpp @@ -0,0 +1,139 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cfi_generator.h" +#include "cgfunc.h" +#if TARGAARCH64 +#include "aarch64_cfi_generator.h" +#include "aarch64_cgfunc.h" +#endif + +namespace maplebe { +Insn &GenCfi::FindStackDefNextInsn(BB &bb) const { + FOR_BB_INSNS(insn, &bb) { + if (insn->IsStackDef()) { + if (insn->GetNext() == nullptr) { + auto &comment = cgFunc.GetOpndBuilder()->CreateComment("stack alloc end"); + bb.AppendInsn(cgFunc.GetInsnBuilder()->BuildCommentInsn(comment)); + } + return *(insn->GetNext()); + } + } + CHECK_FATAL(false, "bb need a stackdef insn"); +} + +void GenCfi::InsertCFIDefCfaOffset(BB &bb, Insn &insn, int32 &cfiOffset) { + cfiOffset = AddtoOffsetFromCFA(cfiOffset); + Insn &cfiInsn = cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_def_cfa_offset).AddOpndChain( + cgFunc.CreateCfiImmOperand(cfiOffset, k64BitSize)); + (void)bb.InsertInsnBefore(insn, cfiInsn); + cgFunc.SetDbgCallFrameOffset(cfiOffset); +} + +void GenCfi::GenerateStartDirective(BB &bb) { + Insn &startprocInsn = cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_startproc); + if (bb.GetFirstInsn() != nullptr) { + (void)bb.InsertInsnBefore(*bb.GetFirstInsn(), startprocInsn); + } else { + bb.AppendInsn(startprocInsn); + } + +#if !defined(TARGARM32) + /* + * always generate ".cfi_personality 155, DW.ref.__mpl_personality_v0" for Java methods. + * we depend on this to tell whether it is a java method. (maybe we can get a function attribute to determine it) + */ + if (cgFunc.GetFunction().IsJava()) { + Insn &personality = cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_personality_symbol).AddOpndChain( + cgFunc.CreateCfiImmOperand(EHFunc::kTypeEncoding, k8BitSize)).AddOpndChain( + cgFunc.CreateCfiStrOperand("DW.ref.__mpl_personality_v0")); + bb.InsertInsnAfter(startprocInsn, personality); + } +#endif +} + +void GenCfi::GenerateEndDirective(BB &bb) { + bb.AppendInsn(cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_endproc)); +} + +void GenCfi::GenerateRegisterStateDirective(BB &bb) { + if (cg.GetMIRModule()->IsCModule()) { + return; + } + + if (&bb == cgFunc.GetLastBB() || bb.GetNext() == nullptr) { + return; + } + + BB *nextBB = bb.GetNext(); + do { + if (nextBB == cgFunc.GetLastBB() || !nextBB->IsEmpty()) { + break; + } + nextBB = nextBB->GetNext(); + } while (nextBB != nullptr); + + if (nextBB != nullptr && !nextBB->IsEmpty()) { + bb.InsertInsnBegin(cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_remember_state)); + nextBB->InsertInsnBegin(cgFunc.GetInsnBuilder()->BuildCfiInsn(cfi::OP_CFI_restore_state)); + } +} + +void GenCfi::InsertFirstLocation(BB &bb) { + MIRSymbol *fSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc.GetFunction().GetStIdx().Idx()); + + if (fSym == nullptr || !cg.GetCGOptions().WithLoc() || !cg.GetMIRModule()->IsCModule() || + (fSym->GetSrcPosition().FileNum() == 0)) { + return; + } + + uint32 fileNum = fSym->GetSrcPosition().FileNum(); + uint32 lineNum = fSym->GetSrcPosition().LineNum(); + uint32 columnNum = fSym->GetSrcPosition().Column(); + Operand *fileNumOpnd = cgFunc.CreateDbgImmOperand(fileNum); + Operand *lineNumOpnd = cgFunc.CreateDbgImmOperand(lineNum); + Operand *columnNumOpnd = cgFunc.CreateDbgImmOperand(columnNum); + Insn &loc = cgFunc.GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain( + *fileNumOpnd).AddOpndChain(*lineNumOpnd).AddOpndChain(*columnNumOpnd); + (void)(bb.InsertInsnBefore(*bb.GetFirstInsn(), loc)); +} + +void GenCfi::Run() { + auto *startBB = cgFunc.GetFirstBB(); + GenerateStartDirective(*startBB); + InsertFirstLocation(*startBB); + + if (cgFunc.GetHasProEpilogue()) { + GenerateRegisterSaveDirective(*(cgFunc.GetPrologureBB())); + + FOR_ALL_BB(bb, &cgFunc) { + if (!bb->IsFastPathReturn() && bb->IsNeedRestoreCfi()) { + GenerateRegisterStateDirective(*bb); + GenerateRegisterRestoreDirective(*bb); + } + } + } + + GenerateEndDirective(*(cgFunc.GetLastBB())); +} + +bool CgGenCfi::PhaseRun(maplebe::CGFunc &f) { +#if TARGAARCH64 + GenCfi *genCfi = GetPhaseAllocator()->New(f); + genCfi->Run(); +#endif + return true; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgGenCfi, gencfi) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cg.cpp b/src/mapleall/maple_be/src/cg/cg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9fe0fc7afb0bd583bde9a8ca2186b5c9b20ab150 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg.cpp @@ -0,0 +1,286 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "emit.h" + +namespace maplebe { +using namespace maple; + +#define JAVALANG (mirModule->IsJavaModule()) + +void Globals::SetTarget(CG &target) { + cg = ⌖ +} +const CG *Globals::GetTarget() const { + ASSERT(cg, " set target info please "); + return cg; +} + +CGFunc *CG::currentCGFunction = nullptr; +std::map> CG::funcWrapLabels; + +CG::~CG() { + if (emitter != nullptr) { + emitter->CloseOutput(); + } + delete memPool; + memPool = nullptr; + mirModule = nullptr; + emitter = nullptr; + currentCGFunction = nullptr; + dbgTraceEnter = nullptr; + dbgTraceExit = nullptr; + dbgFuncProfile = nullptr; +} +/* This function intends to be a more general form of GenFieldOffsetmap. */ +void CG::GenExtraTypeMetadata(const std::string &classListFileName, const std::string &outputBaseName) { + const std::string &cMacroDefSuffix = ".macros.def"; + BECommon *beCommon = Globals::GetInstance()->GetBECommon(); + std::vector classesToGenerate; + + if (classListFileName.empty()) { + /* + * Class list not specified. Visit all classes. + */ + std::set visited; + + for (const auto &tyId : mirModule->GetClassList()) { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyId); + if ((mirType->GetKind() != kTypeClass) && (mirType->GetKind() != kTypeClassIncomplete)) { + continue; /* Skip non-class. Too paranoid. We just enumerated classlist_! */ + } + MIRClassType *classType = static_cast(mirType); + const std::string &name = classType->GetName(); + + if (visited.find(name) != visited.end()) { + continue; /* Skip duplicated class definitions. */ + } + + (void)visited.insert(name); + classesToGenerate.emplace_back(classType); + } + } else { + /* Visit listed classes. */ + std::ifstream inFile(classListFileName); + CHECK_FATAL(inFile.is_open(), "Failed to open file: %s", classListFileName.c_str()); + std::string str; + + /* check each class name first and expose all unknown classes */ + while (inFile >> str) { + MIRType *type = GlobalTables::GetTypeTable().GetOrCreateClassType(str, *mirModule); + MIRClassType *classType = static_cast(type); + if (classType == nullptr) { + LogInfo::MapleLogger() << " >>>>>>>> unknown class: " << str.c_str() << "\n"; + return; + } + + classesToGenerate.emplace_back(classType); + } + } + + if (cgOption.GenDef()) { + const std::string &outputFileName = outputBaseName + cMacroDefSuffix; + FILE *outputFile = fopen(outputFileName.c_str(), "w"); + if (outputFile == nullptr) { + FATAL(kLncFatal, "open file failed in CG::GenExtraTypeMetadata"); + } + + for (auto classType : classesToGenerate) { + beCommon->GenObjSize(*classType, *outputFile); + beCommon->GenFieldOffsetMap(*classType, *outputFile); + } + fclose(outputFile); + } + + if (cgOption.GenGctib()) { + maple::LogInfo::MapleLogger(kLlErr) << "--gen-gctib-file option not implemented"; + } +} + +void CG::GenPrimordialObjectList(const std::string &outputBaseName) { + const std::string &kPrimorListSuffix = ".primordials.txt"; + if (!cgOption.GenPrimorList()) { + return; + } + + const std::string &outputFileName = outputBaseName + kPrimorListSuffix; + FILE *outputFile = fopen(outputFileName.c_str(), "w"); + if (outputFile == nullptr) { + FATAL(kLncFatal, "open file failed in CG::GenPrimordialObjectList"); + } + + for (StIdx stIdx : mirModule->GetSymbolSet()) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + ASSERT(symbol != nullptr, "get symbol from st idx failed"); + if (symbol->IsPrimordialObject()) { + const std::string &name = symbol->GetName(); + fprintf(outputFile, "%s\n", name.c_str()); + } + } + + fclose(outputFile); +} + +void CG::AddStackGuardvar() const { + MIRSymbol *chkGuard = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + chkGuard->SetNameStrIdx(std::string("__stack_chk_guard")); + chkGuard->SetStorageClass(kScExtern); + chkGuard->SetSKind(kStVar); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().size() > PTY_u64, "out of vector range"); + chkGuard->SetTyIdx(GlobalTables::GetTypeTable().GetTypeTable()[PTY_u64]->GetTypeIndex()); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*chkGuard); + + MIRSymbol *chkFunc = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + chkFunc->SetNameStrIdx(std::string("__stack_chk_fail")); + chkFunc->SetStorageClass(kScText); + chkFunc->SetSKind(kStFunc); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*chkFunc); +} + +#define DBG_TRACE_ENTER MplDtEnter +#define DBG_TRACE_EXIT MplDtExit +#define XSTR(s) str(s) +#define str(s) #s + +void CG::DefineDebugTraceFunctions() { + dbgTraceEnter = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + dbgTraceEnter->SetNameStrIdx(std::string("__" XSTR(DBG_TRACE_ENTER) "__")); + dbgTraceEnter->SetStorageClass(kScText); + dbgTraceEnter->SetSKind(kStFunc); + + dbgTraceExit = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + dbgTraceExit->SetNameStrIdx(std::string("__" XSTR(DBG_TRACE_EXIT) "__")); + dbgTraceExit->SetStorageClass(kScText); + dbgTraceExit->SetSKind(kStFunc); + + dbgFuncProfile = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + dbgFuncProfile->SetNameStrIdx(std::string("__" XSTR(MplFuncProfile) "__")); + dbgFuncProfile->SetStorageClass(kScText); + dbgFuncProfile->SetSKind(kStFunc); +} + +/* + * Add the fields of curStructType to the result. Used to handle recursive + * structures. + */ +static void AppendReferenceOffsets64(const BECommon &beCommon, MIRStructType &curStructType, int64 &curOffset, + std::vector &result) { + /* + * We are going to reimplement BECommon::GetFieldOffset so that we can do + * this in one pass through all fields. + * + * The tricky part is to make sure the object layout described here is + * compatible with the rest of the system. This implies that we need + * something like a "Maple ABI" documented for each platform. + */ + if (curStructType.GetKind() == kTypeClass) { + MIRClassType &curClassTy = static_cast(curStructType); + auto maybeParent = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curClassTy.GetParentTyIdx()); + if (maybeParent != nullptr) { + if (maybeParent->GetKind() == kTypeClass) { + auto parentClassType = static_cast(maybeParent); + AppendReferenceOffsets64(beCommon, *parentClassType, curOffset, result); + } else { + LogInfo::MapleLogger() << "WARNING:: generating objmap for incomplete class\n"; + } + } + } + + for (const auto &fieldPair : curStructType.GetFields()) { + auto fieldNameIdx = fieldPair.first; + auto fieldTypeIdx = fieldPair.second.first; + + auto &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldNameIdx); + auto fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTypeIdx); + auto &fieldTypeName = GlobalTables::GetStrTable().GetStringFromStrIdx(fieldType->GetNameStrIdx()); + auto fieldTypeKind = fieldType->GetKind(); + + auto fieldSize = beCommon.GetTypeSize(fieldTypeIdx); + auto fieldAlign = beCommon.GetTypeAlign(fieldTypeIdx); + int64 myOffset = static_cast(RoundUp(curOffset, fieldAlign)); + int64 nextOffset = myOffset + fieldSize; + + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " field: " << fieldName << "\n"; + LogInfo::MapleLogger() << " type: " << fieldTypeIdx << ": " << fieldTypeName << "\n"; + LogInfo::MapleLogger() << " type kind: " << fieldTypeKind << "\n"; + LogInfo::MapleLogger() << " size: " << fieldSize << "\n"; /* int64 */ + LogInfo::MapleLogger() << " align: " << static_cast(fieldAlign) << "\n"; /* int8_t */ + LogInfo::MapleLogger() << " field offset:" << myOffset << "\n"; /* int64 */ + } + + if (fieldTypeKind == kTypePointer) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " ** Is a pointer field.\n"; + } + result.emplace_back(myOffset); + } + + if ((fieldTypeKind == kTypeArray) || (fieldTypeKind == kTypeStruct) || (fieldTypeKind == kTypeClass) || + (fieldTypeKind == kTypeInterface)) { + if (!CGOptions::IsQuiet()) { + LogInfo::MapleLogger() << " ** ERROR: We are not expecting nested aggregate type. "; + LogInfo::MapleLogger() << "All Java classes are flat -- no nested structs. "; + LogInfo::MapleLogger() << "Please extend me if we are going to work with non-java languages.\n"; + } + } + + curOffset = nextOffset; + } +} + +/* Return a list of offsets of reference fields. */ +std::vector CG::GetReferenceOffsets64(const BECommon &beCommon, MIRStructType &structType) const { + std::vector result; + /* java class layout has already been done in previous phase. */ + if (structType.GetKind() == kTypeClass) { + for (auto fieldInfo : beCommon.GetJClassLayout(static_cast(structType))) { + if (fieldInfo.IsRef()) { + result.emplace_back(static_cast(fieldInfo.GetOffset())); + } + } + } else if (structType.GetKind() != kTypeInterface) { /* interface doesn't have reference fields */ + int64 curOffset = 0; + AppendReferenceOffsets64(beCommon, structType, curOffset, result); + } + + return result; +} + +const std::string CG::ExtractFuncName(const std::string &str) const { + /* 3: length of "_7C" */ + size_t offset = 3; + size_t pos1 = str.find("_7C"); + if (pos1 == std::string::npos) { + return str; + } + size_t pos2 = str.find("_7C", pos1 + offset); + if (pos2 == std::string::npos) { + return str; + } + std::string funcName = str.substr(pos1 + offset, pos2 - pos1 - offset); + /* avoid funcName like __LINE__ and __FILE__ which will be resolved by assembler */ + if (funcName.find("__") != std::string::npos) { + return str; + } + if (funcName == "_3Cinit_3E") { + return "init"; + } + if (funcName == "_3Cclinit_3E") { + return "clinit"; + } + return funcName; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cg_cfg.cpp b/src/mapleall/maple_be/src/cg/cg_cfg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4572d2501e052bb7ff7ad0c63d310371659e8800 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_cfg.cpp @@ -0,0 +1,986 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_cfg.h" +#if TARGAARCH64 +#include "aarch64_insn.h" +#elif TARGRISCV64 +#include "riscv64_insn.h" +#endif +#if TARGARM32 +#include "arm32_insn.h" +#endif +#include "cg_option.h" +#include "mpl_logging.h" +#if TARGX86_64 +#include "x64_cgfunc.h" +#include "cg.h" +#endif +#include + +namespace { +using namespace maplebe; +bool CanBBThrow(const BB &bb) { + FOR_BB_INSNS_CONST(insn, &bb) { + if (insn->IsTargetInsn() && insn->CanThrow()) { + return true; + } + } + return false; +} +} + +namespace maplebe { +void CGCFG::BuildCFG() { + /* + * Second Pass: + * Link preds/succs in the BBs + */ + BB *firstBB = cgFunc->GetFirstBB(); + for (BB *curBB = firstBB; curBB != nullptr; curBB = curBB->GetNext()) { + BB::BBKind kind = curBB->GetKind(); + switch (kind) { + case BB::kBBIntrinsic: + /* + * An intrinsic BB append a MOP_wcbnz instruction at the end, check + * AArch64CGFunc::SelectIntrinCall(IntrinsiccallNode *intrinsiccallNode) for details + */ + if (!curBB->GetLastInsn()->IsBranch()) { + break; + } + /* else fall through */ + [[clang::fallthrough]]; + case BB::kBBIf: { + BB *fallthruBB = curBB->GetNext(); + curBB->PushBackSuccs(*fallthruBB); + fallthruBB->PushBackPreds(*curBB); + Insn *branchInsn = curBB->GetLastMachineInsn(); + CHECK_FATAL(branchInsn != nullptr, "machine instruction must be exist in ifBB"); + ASSERT(branchInsn->IsCondBranch(), "must be a conditional branch generated from an intrinsic"); + /* Assume the last non-null operand is the branch target */ + int lastOpndIndex = curBB->GetLastMachineInsn()->GetOperandSize() - 1; + ASSERT(lastOpndIndex > -1, "lastOpndIndex's opnd is greater than -1"); + Operand &lastOpnd = branchInsn->GetOperand(static_cast(lastOpndIndex)); + ASSERT(lastOpnd.IsLabelOpnd(), "label Operand must be exist in branch insn"); + auto &labelOpnd = static_cast(lastOpnd); + BB *brToBB = cgFunc->GetBBFromLab2BBMap(labelOpnd.GetLabelIndex()); + if (fallthruBB->GetId() != brToBB->GetId()) { + curBB->PushBackSuccs(*brToBB); + brToBB->PushBackPreds(*curBB); + } + break; + } + case BB::kBBGoto: { + Insn *insn = curBB->GetLastMachineInsn(); + CHECK_FATAL(insn != nullptr, "machine insn must be exist in gotoBB"); + ASSERT(insn->IsUnCondBranch(), "insn must be a unconditional branch insn"); + LabelIdx labelIdx = static_cast(insn->GetOperand(0)).GetLabelIndex(); + BB *gotoBB = cgFunc->GetBBFromLab2BBMap(labelIdx); + CHECK_FATAL(gotoBB != nullptr, "gotoBB is null"); + curBB->PushBackSuccs(*gotoBB); + gotoBB->PushBackPreds(*curBB); + break; + } + case BB::kBBIgoto: { + for (auto lidx : CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetLabelTab()->GetAddrTakenLabels()) { + BB *igotobb = cgFunc->GetBBFromLab2BBMap(lidx); + CHECK_FATAL(igotobb, "igotobb is null"); + curBB->PushBackSuccs(*igotobb); + igotobb->PushBackPreds(*curBB); + } + break; + } + case BB::kBBRangeGoto: { + std::set bbs; + for (auto labelIdx : curBB->GetRangeGotoLabelVec()) { + BB *gotoBB = cgFunc->GetBBFromLab2BBMap(labelIdx); + bbs.insert(gotoBB); + } + for (auto gotoBB : bbs) { + curBB->PushBackSuccs(*gotoBB); + gotoBB->PushBackPreds(*curBB); + } + break; + } + case BB::kBBThrow: + break; + case BB::kBBFallthru: { + BB *fallthruBB = curBB->GetNext(); + if (fallthruBB != nullptr) { + curBB->PushBackSuccs(*fallthruBB); + fallthruBB->PushBackPreds(*curBB); + } + break; + } + default: + break; + } /* end switch */ + + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* Check exception table. If curBB is in a try block, add catch BB to its succs */ + if (ehFunc != nullptr && ehFunc->GetLSDACallSiteTable() != nullptr) { + /* Determine if insn in bb can actually except */ + if (CanBBThrow(*curBB)) { + const MapleVector &callsiteTable = ehFunc->GetLSDACallSiteTable()->GetCallSiteTable(); + for (size_t i = 0; i < callsiteTable.size(); ++i) { + LSDACallSite *lsdaCallsite = callsiteTable[i]; + BB *endTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetEndOffset()->GetLabelIdx()); + BB *startTry = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLength.GetStartOffset()->GetLabelIdx()); + if (curBB->GetId() >= startTry->GetId() && curBB->GetId() <= endTry->GetId() && + lsdaCallsite->csLandingPad.GetEndOffset() != nullptr) { + BB *landingPad = cgFunc->GetBBFromLab2BBMap(lsdaCallsite->csLandingPad.GetEndOffset()->GetLabelIdx()); + curBB->PushBackEhSuccs(*landingPad); + landingPad->PushBackEhPreds(*curBB); + } + } + } + } + } + FindAndMarkUnreachable(*cgFunc); +} + +void CGCFG::CheckCFG() { + FOR_ALL_BB(bb, cgFunc) { + for (BB *sucBB : bb->GetSuccs()) { + bool found = false; + for (BB *sucPred : sucBB->GetPreds()) { + if (sucPred == bb) { + if (found == false) { + found = true; + } else { + LogInfo::MapleLogger() << "dup pred " << sucPred->GetId() << " for sucBB " << sucBB->GetId() << "\n"; + } + } + } + if (found == false) { + LogInfo::MapleLogger() << "non pred for sucBB " << sucBB->GetId() << " for BB " << bb->GetId() << "\n"; + } + } + } + FOR_ALL_BB(bb, cgFunc) { + for (BB *predBB : bb->GetPreds()) { + bool found = false; + for (BB *predSucc : predBB->GetSuccs()) { + if (predSucc == bb) { + if (found == false) { + found = true; + } else { + LogInfo::MapleLogger() << "dup succ " << predSucc->GetId() << " for predBB " << predBB->GetId() << "\n"; + } + } + } + if (found == false) { + LogInfo::MapleLogger() << "non succ for predBB " << predBB->GetId() << " for BB " << bb->GetId() << "\n"; + } + } + } +} + +void CGCFG::CheckCFGFreq() { + auto verifyBBFreq = [this](const BB *bb, uint32 succFreq) { + uint32 res = bb->GetFrequency(); + if ((res != 0 && static_cast(abs(static_cast(res - succFreq)) / res > 1.0)) || + (res == 0 && res != succFreq)) { + // Not included + if (bb->GetSuccs().size() > 1 && bb->GetPreds().size() > 1) { + return; + } + LogInfo::MapleLogger() << cgFunc->GetName() << " curBB: " << bb->GetId() << " freq: " + << bb->GetFrequency() << std::endl; + CHECK_FATAL(false, "Verifyfreq failure BB frequency!"); + } + }; + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsUnreachable() || bb->IsCleanup()) { + continue; + } + uint32 res = 0; + if (bb->GetSuccs().size() > 1) { + for (auto *succBB : bb->GetSuccs()) { + res += succBB->GetFrequency(); + if (succBB->GetPreds().size() > 1) { + LogInfo::MapleLogger() << cgFunc->GetName() << " critical edges: curBB: " << bb->GetId() << std::endl; + CHECK_FATAL(false, "The CFG has critical edges!"); + } + } + verifyBBFreq(bb, res); + } else if (bb->GetSuccs().size() == 1) { + auto *succBB = bb->GetSuccs().front(); + if (succBB->GetPreds().size() == 1) { + verifyBBFreq(bb, succBB->GetFrequency()); + } else if (succBB->GetPreds().size() > 1) { + for (auto *pred : succBB->GetPreds()) { + res += pred->GetFrequency(); + } + verifyBBFreq(succBB, res); + } + } + } + LogInfo::MapleLogger() << "Check Frequency for " << cgFunc->GetName() << " success!\n"; +} + +InsnVisitor *CGCFG::insnVisitor; + +void CGCFG::InitInsnVisitor(CGFunc &func) const { + insnVisitor = func.NewInsnModifier(); +} + +Insn *CGCFG::CloneInsn(Insn &originalInsn) const { + cgFunc->IncTotalNumberOfInstructions(); + return insnVisitor->CloneInsn(originalInsn); +} + +RegOperand *CGCFG::CreateVregFromReg(const RegOperand &pReg) const { + return insnVisitor->CreateVregFromReg(pReg); +} + +/* + * return true if: + * mergee has only one predecessor which is merger, + * or mergee has other comments only predecessors & merger is soloGoto + * mergee can't have cfi instruction when postcfgo. + */ +bool CGCFG::BBJudge(const BB &first, const BB &second) const { + if (first.GetKind() == BB::kBBReturn || second.GetKind() == BB::kBBReturn) { + return false; + } + if (&first == &second) { + return false; + } + if (second.GetPreds().size() == 1 && second.GetPreds().front() == &first) { + return true; + } + for (BB *bb : second.GetPreds()) { + if (bb != &first && !AreCommentAllPreds(*bb)) { + return false; + } + } + return first.IsSoloGoto(); +} + +/* + * Check if a given BB mergee can be merged into BB merger. + * Returns true if: + * 1. mergee has only one predecessor which is merger, or mergee has + * other comments only predecessors. + * 2. merge has only one successor which is mergee. + * 3. mergee can't have cfi instruction when postcfgo. + */ +bool CGCFG::CanMerge(const BB &merger, const BB &mergee) const { + if (!BBJudge(merger, mergee)) { + return false; + } + if (mergee.GetFirstInsn() != nullptr && mergee.GetFirstInsn()->IsCfiInsn()) { + return false; + } + return (merger.GetSuccs().size() == 1) && (merger.GetSuccs().front() == &mergee); +} + +/* Check if the given BB contains only comments and all its predecessors are comments */ +bool CGCFG::AreCommentAllPreds(const BB &bb) { + if (!bb.IsCommentBB()) { + return false; + } + for (BB *pred : bb.GetPreds()) { + if (!AreCommentAllPreds(*pred)) { + return false; + } + } + return true; +} + +/* Merge sucBB into curBB. */ +void CGCFG::MergeBB(BB &merger, BB &mergee, CGFunc &func) { + BB *prevLast = mergee.GetPrev(); + MergeBB(merger, mergee); + if (func.GetLastBB()->GetId() == mergee.GetId()) { + func.SetLastBB(*prevLast); + } + if (mergee.GetKind() == BB::kBBReturn) { + for (size_t i = 0; i < func.ExitBBsVecSize(); ++i) { + if (func.GetExitBB(i) == &mergee) { + func.EraseExitBBsVec(func.GetExitBBsVec().begin() + i); + } + } + func.PushBackExitBBsVec(merger); + } + /* if mergee is infinite loop */ + BB *commonExit = func.GetCommonExitBB(); + auto exitPredIt = std::find(commonExit->GetPredsBegin(), commonExit->GetPredsEnd(), &mergee); + if (exitPredIt != commonExit->GetPredsEnd()) { + commonExit->ErasePreds(exitPredIt); + commonExit->PushBackPreds(merger); + } + + if (mergee.GetKind() == BB::kBBRangeGoto) { + func.AddEmitSt(merger.GetId(), *func.GetEmitSt(mergee.GetId())); + func.DeleteEmitSt(mergee.GetId()); + } +} + +void CGCFG::MergeBB(BB &merger, BB &mergee) { + if (merger.GetKind() == BB::kBBGoto) { + if (!merger.GetLastInsn()->IsBranch()) { + CHECK_FATAL(false, "unexpected insn kind"); + } + merger.RemoveInsn(*merger.GetLastInsn()); + } + merger.AppendBBInsns(mergee); + if (mergee.GetPrev() != nullptr) { + mergee.GetPrev()->SetNext(mergee.GetNext()); + } + if (mergee.GetNext() != nullptr) { + mergee.GetNext()->SetPrev(mergee.GetPrev()); + } + merger.RemoveSuccs(mergee); + if (!merger.GetEhSuccs().empty()) { +#if DEBUG + for (BB *bb : merger.GetEhSuccs()) { + ASSERT((bb != &mergee), "CGCFG::MergeBB: Merging of EH bb"); + } +#endif + } + if (!mergee.GetEhSuccs().empty()) { + for (BB *bb : mergee.GetEhSuccs()) { + bb->RemoveEhPreds(mergee); + bb->PushBackEhPreds(merger); + merger.PushBackEhSuccs(*bb); + } + } + for (BB *bb : mergee.GetSuccs()) { + bb->RemovePreds(mergee); + bb->PushBackPreds(merger); + merger.PushBackSuccs(*bb); + } + merger.SetKind(mergee.GetKind()); + merger.SetNeedRestoreCfi(mergee.IsNeedRestoreCfi()); + mergee.SetNext(nullptr); + mergee.SetPrev(nullptr); + mergee.ClearPreds(); + mergee.ClearSuccs(); + mergee.ClearEhPreds(); + mergee.ClearEhSuccs(); + mergee.SetFirstInsn(nullptr); + mergee.SetLastInsn(nullptr); +} + +/* + * Find all reachable BBs by dfs in cgfunc and mark their field false, then all other bbs should be + * unreachable. + */ +void CGCFG::FindAndMarkUnreachable(CGFunc &func) { + BB *firstBB = func.GetFirstBB(); + std::stack toBeAnalyzedBBs; + toBeAnalyzedBBs.push(firstBB); + std::unordered_set instackBBs; + + BB *bb = firstBB; + /* set all bb's unreacable to true */ + while (bb != nullptr) { + /* Check if bb is the cleanupBB/switchTableBB/firstBB/lastBB of the function */ + if (bb->IsCleanup() || InSwitchTable(bb->GetLabIdx(), func) || bb == func.GetFirstBB() || bb == func.GetLastBB()) { + toBeAnalyzedBBs.push(bb); + } else if (bb->IsLabelTaken() == false) { + bb->SetUnreachable(true); + } + bb = bb->GetNext(); + } + + /* do a dfs to see which bbs are reachable */ + while (!toBeAnalyzedBBs.empty()) { + bb = toBeAnalyzedBBs.top(); + toBeAnalyzedBBs.pop(); + (void)instackBBs.insert(bb->GetId()); + + bb->SetUnreachable(false); + + for (BB *succBB : bb->GetSuccs()) { + if (instackBBs.count(succBB->GetId()) == 0) { + toBeAnalyzedBBs.push(succBB); + (void)instackBBs.insert(succBB->GetId()); + } + } + for (BB *succBB : bb->GetEhSuccs()) { + if (instackBBs.count(succBB->GetId()) == 0) { + toBeAnalyzedBBs.push(succBB); + (void)instackBBs.insert(succBB->GetId()); + } + } + } + FOR_ALL_BB(tmpBB, &func) { + for (MapleList::iterator predIt = tmpBB->GetPredsBegin(); predIt != tmpBB->GetPredsEnd(); ++predIt) { + if ((*predIt)->IsUnreachable()) { + tmpBB->ErasePreds(predIt); + } + } + for (MapleList::iterator predIt = tmpBB->GetEhPredsBegin(); predIt != tmpBB->GetEhPredsEnd(); ++predIt) { + if ((*predIt)->IsUnreachable()) { + tmpBB->ErasePreds(predIt); + } + } + } +} + +/* + * Theoretically, every time you remove from a bb's preds, you should consider invoking this method. + * + * @param bb + * @param func + */ +void CGCFG::FlushUnReachableStatusAndRemoveRelations(BB &bb, const CGFunc &func) const { + bool isFirstBBInfunc = (&bb == func.GetFirstBB()); + bool isLastBBInfunc = (&bb == func.GetLastBB()); + /* Check if bb is the cleanupBB/switchTableBB/firstBB/lastBB of the function */ + if (bb.IsCleanup() || InSwitchTable(bb.GetLabIdx(), func) || isFirstBBInfunc || isLastBBInfunc) { + return; + } + std::stack toBeAnalyzedBBs; + toBeAnalyzedBBs.push(&bb); + std::set instackBBs; + BB *it = nullptr; + while (!toBeAnalyzedBBs.empty()) { + it = toBeAnalyzedBBs.top(); + (void)instackBBs.insert(it->GetId()); + toBeAnalyzedBBs.pop(); + /* Check if bb is the first or the last BB of the function */ + isFirstBBInfunc = (it == func.GetFirstBB()); + isLastBBInfunc = (it == func.GetLastBB()); + bool needFlush = !isFirstBBInfunc && !isLastBBInfunc && !it->IsCleanup() && + (it->GetPreds().empty() || (it->GetPreds().size() == 1 && it->GetEhPreds().front() == it)) && + it->GetEhPreds().empty() && + !InSwitchTable(it->GetLabIdx(), *cgFunc) && + !cgFunc->IsExitBB(*it) && + (it->IsLabelTaken() == false); + if (!needFlush) { + continue; + } + it->SetUnreachable(true); + it->SetFirstInsn(nullptr); + it->SetLastInsn(nullptr); + for (BB *succ : it->GetSuccs()) { + if (instackBBs.count(succ->GetId()) == 0) { + toBeAnalyzedBBs.push(succ); + (void)instackBBs.insert(succ->GetId()); + } + succ->RemovePreds(*it); + succ->RemoveEhPreds(*it); + } + it->ClearSuccs(); + for (BB *succ : it->GetEhSuccs()) { + if (instackBBs.count(succ->GetId()) == 0) { + toBeAnalyzedBBs.push(succ); + (void)instackBBs.insert(succ->GetId()); + } + succ->RemoveEhPreds(*it); + succ->RemovePreds(*it); + } + it->ClearEhSuccs(); + } +} + +void CGCFG::RemoveBB(BB &curBB, bool isGotoIf) const { + BB *sucBB = CGCFG::GetTargetSuc(curBB, false, isGotoIf); + if (sucBB != nullptr) { + sucBB->RemovePreds(curBB); + } + BB *fallthruSuc = nullptr; + if (isGotoIf) { + for (BB *succ : curBB.GetSuccs()) { + if (succ == sucBB) { + continue; + } + fallthruSuc = succ; + break; + } + ASSERT(fallthruSuc == curBB.GetNext(), "fallthru succ should be its next bb."); + if (fallthruSuc != nullptr) { + fallthruSuc->RemovePreds(curBB); + } + } + for (BB *preBB : curBB.GetPreds()) { + if (preBB->GetKind() == BB::kBBIgoto) { + sucBB->PushBackPreds(curBB); + return; + } + /* + * If curBB is the target of its predecessor, change + * the jump target. + */ + if (&curBB == GetTargetSuc(*preBB, true, isGotoIf)) { + LabelIdx targetLabel; + if (curBB.GetNext()->GetLabIdx() == 0) { + targetLabel = insnVisitor->GetCGFunc()->CreateLabel(); + curBB.GetNext()->SetLabIdx(targetLabel); + } else { + targetLabel = curBB.GetNext()->GetLabIdx(); + } + insnVisitor->ModifyJumpTarget(targetLabel, *preBB); + } + if (fallthruSuc != nullptr && !fallthruSuc->IsPredecessor(*preBB)) { + preBB->PushBackSuccs(*fallthruSuc); + fallthruSuc->PushBackPreds(*preBB); + } + if (sucBB != nullptr && !sucBB->IsPredecessor(*preBB)) { + preBB->PushBackSuccs(*sucBB); + sucBB->PushBackPreds(*preBB); + } + preBB->RemoveSuccs(curBB); + } + + for (BB *ehSucc : curBB.GetEhSuccs()) { + ehSucc->RemoveEhPreds(curBB); + } + for (BB *ehPred : curBB.GetEhPreds()) { + ehPred->RemoveEhSuccs(curBB); + } + if (curBB.GetNext() != nullptr) { + cgFunc->GetCommonExitBB()->RemovePreds(curBB); + curBB.GetNext()->RemovePreds(curBB); + curBB.GetNext()->SetPrev(curBB.GetPrev()); + } else { + cgFunc->SetLastBB(*curBB.GetPrev()); + } + curBB.GetPrev()->SetNext(curBB.GetNext()); + cgFunc->ClearBBInVec(curBB.GetId()); + /* remove callsite */ + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* only java try has ehFunc->GetLSDACallSiteTable */ + if (ehFunc != nullptr && ehFunc->GetLSDACallSiteTable() != nullptr) { + ehFunc->GetLSDACallSiteTable()->RemoveCallSite(curBB); + } +} + +void CGCFG::RetargetJump(BB &srcBB, BB &targetBB) const { + insnVisitor->ModifyJumpTarget(srcBB, targetBB); +} + +BB *CGCFG::GetTargetSuc(BB &curBB, bool branchOnly, bool isGotoIf) { + switch (curBB.GetKind()) { + case BB::kBBGoto: + case BB::kBBIntrinsic: + case BB::kBBIf: { + const Insn* origLastInsn = curBB.GetLastMachineInsn(); + if (isGotoIf && (curBB.GetPrev() != nullptr) && + (curBB.GetKind() == BB::kBBGoto || curBB.GetKind() == BB::kBBIf) && + (curBB.GetPrev()->GetKind() == BB::kBBGoto || curBB.GetPrev()->GetKind() == BB::kBBIf)) { + origLastInsn = curBB.GetPrev()->GetLastMachineInsn(); + } + LabelIdx label = insnVisitor->GetJumpLabel(*origLastInsn); + for (BB *bb : curBB.GetSuccs()) { + if (bb->GetLabIdx() == label) { + return bb; + } + } + break; + } + case BB::kBBIgoto: { + for (Insn *insn = curBB.GetLastInsn(); insn != nullptr; insn = insn->GetPrev()) { +#if TARGAARCH64 + if (insn->GetMachineOpcode() == MOP_adrp_label) { + int64 label = static_cast(insn->GetOperand(1)).GetValue(); + for (BB *bb : curBB.GetSuccs()) { + if (bb->GetLabIdx() == static_cast(label)) { + return bb; + } + } + } +#endif + } + /* can also be a MOP_xbr. */ + return nullptr; + } + case BB::kBBFallthru: { + return (branchOnly ? nullptr : curBB.GetNext()); + } + case BB::kBBThrow: + return nullptr; + default: + return nullptr; + } + return nullptr; +} + +bool CGCFG::InLSDA(LabelIdx label, const EHFunc *ehFunc) { + /* the function have no exception handle module */ + if (ehFunc == nullptr) { + return false; + } + + if ((label == 0) || ehFunc->GetLSDACallSiteTable() == nullptr) { + return false; + } + if (label == ehFunc->GetLSDACallSiteTable()->GetCSTable().GetEndOffset()->GetLabelIdx() || + label == ehFunc->GetLSDACallSiteTable()->GetCSTable().GetStartOffset()->GetLabelIdx()) { + return true; + } + return ehFunc->GetLSDACallSiteTable()->InCallSiteTable(label); +} + +bool CGCFG::InSwitchTable(LabelIdx label, const CGFunc &func) { + if (label == 0) { + return false; + } + return func.InSwitchTable(label); +} + +bool CGCFG::IsCompareAndBranchInsn(const Insn &insn) const { + return insnVisitor->IsCompareAndBranchInsn(insn); +} + +bool CGCFG::IsAddOrSubInsn(const Insn &insn) const { + return insnVisitor->IsAddOrSubInsn(insn); +} + +Insn *CGCFG::FindLastCondBrInsn(BB &bb) const { + if (bb.GetKind() != BB::kBBIf) { + return nullptr; + } + FOR_BB_INSNS_REV(insn, (&bb)) { + if (insn->IsBranch()) { + return insn; + } + } + return nullptr; +} + +void CGCFG::MarkLabelTakenBB() const { + if (cgFunc->GetMirModule().GetSrcLang() != kSrcLangC) { + return; + } + for (BB *bb = cgFunc->GetFirstBB(); bb != nullptr; bb = bb->GetNext()) { + if (cgFunc->GetFunction().GetLabelTab()->GetAddrTakenLabels().find(bb->GetLabIdx()) != + cgFunc->GetFunction().GetLabelTab()->GetAddrTakenLabels().end()) { + cgFunc->SetHasTakenLabel(); + bb->SetLabelTaken(); + } + } +} + +/* + * analyse the CFG to find the BBs that are not reachable from function entries + * and delete them + */ +void CGCFG::UnreachCodeAnalysis() const { + if (cgFunc->GetMirModule().GetSrcLang() == kSrcLangC && + (cgFunc->HasTakenLabel() || + (cgFunc->GetEHFunc() && cgFunc->GetEHFunc()->GetLSDAHeader()))) { + return; + } + /* + * Find all reachable BBs by dfs in cgfunc and mark their field false, + * then all other bbs should be unreachable. + */ + BB *firstBB = cgFunc->GetFirstBB(); + std::forward_list toBeAnalyzedBBs; + toBeAnalyzedBBs.push_front(firstBB); + std::set unreachBBs; + + BB *bb = firstBB; + /* set all bb's unreacable to true */ + while (bb != nullptr) { + /* Check if bb is the firstBB/cleanupBB/returnBB/lastBB of the function */ + if (bb->IsCleanup() || InSwitchTable(bb->GetLabIdx(), *cgFunc) || + bb == cgFunc->GetFirstBB() || bb == cgFunc->GetLastBB() || + (bb->GetKind() == BB::kBBReturn && !cgFunc->GetMirModule().IsCModule())) { + toBeAnalyzedBBs.push_front(bb); + } else { + (void)unreachBBs.insert(bb); + } + if (bb->IsLabelTaken() == false) { + bb->SetUnreachable(true); + } + bb = bb->GetNext(); + } + + /* do a dfs to see which bbs are reachable */ + while (!toBeAnalyzedBBs.empty()) { + bb = toBeAnalyzedBBs.front(); + toBeAnalyzedBBs.pop_front(); + if (!bb->IsUnreachable()) { + continue; + } + bb->SetUnreachable(false); + for (BB *succBB : bb->GetSuccs()) { + toBeAnalyzedBBs.push_front(succBB); + unreachBBs.erase(succBB); + } + for (BB *succBB : bb->GetEhSuccs()) { + toBeAnalyzedBBs.push_front(succBB); + unreachBBs.erase(succBB); + } + } + + /* remove unreachable bb */ + std::set::iterator it; + for (it = unreachBBs.begin(); it != unreachBBs.end(); it++) { + BB *unreachBB = *it; + ASSERT(unreachBB != nullptr, "unreachBB must not be nullptr"); + if (cgFunc->IsExitBB(*unreachBB) && !cgFunc->GetMirModule().IsCModule()) { + unreachBB->SetUnreachable(false); + } + EHFunc *ehFunc = cgFunc->GetEHFunc(); + /* if unreachBB InLSDA ,replace unreachBB's label with nextReachableBB before remove it. */ + if (ehFunc != nullptr && ehFunc->NeedFullLSDA() && + cgFunc->GetTheCFG()->InLSDA(unreachBB->GetLabIdx(), ehFunc)) { + /* find next reachable BB */ + BB* nextReachableBB = nullptr; + for (BB* curBB = unreachBB; curBB != nullptr; curBB = curBB->GetNext()) { + if (!curBB->IsUnreachable()) { + nextReachableBB = curBB; + break; + } + } + CHECK_FATAL(nextReachableBB != nullptr, "nextReachableBB not be nullptr"); + if (nextReachableBB->GetLabIdx() == 0) { + LabelIdx labelIdx = cgFunc->CreateLabel(); + nextReachableBB->AddLabel(labelIdx); + cgFunc->SetLab2BBMap(labelIdx, *nextReachableBB); + } + + ehFunc->GetLSDACallSiteTable()->UpdateCallSite(*unreachBB, *nextReachableBB); + } + + unreachBB->GetPrev()->SetNext(unreachBB->GetNext()); + unreachBB->GetNext()->SetPrev(unreachBB->GetPrev()); + + for (BB *sucBB : unreachBB->GetSuccs()) { + sucBB->RemovePreds(*unreachBB); + } + for (BB *ehSucBB : unreachBB->GetEhSuccs()) { + ehSucBB->RemoveEhPreds(*unreachBB); + } + + unreachBB->ClearSuccs(); + unreachBB->ClearEhSuccs(); + + cgFunc->ClearBBInVec(unreachBB->GetId()); + + /* Clear insns in GOT Map. */ + cgFunc->ClearUnreachableGotInfos(*unreachBB); + cgFunc->ClearUnreachableConstInfos(*unreachBB); + } +} + +void CGCFG::FindWillExitBBs(BB *bb, std::set *visitedBBs) { + if (visitedBBs->count(bb) != 0) { + return; + } + visitedBBs->insert(bb); + for (BB *predbb : bb->GetPreds()) { + FindWillExitBBs(predbb, visitedBBs); + } +} + +/* + * analyse the CFG to find the BBs that will not reach any function exit; these + * are BBs inside infinite loops; mark their wontExit flag and create + * artificial edges from them to commonExitBB + */ +void CGCFG::WontExitAnalysis() { + std::set visitedBBs; + FindWillExitBBs(cgFunc->GetCommonExitBB(), &visitedBBs); + BB *bb = cgFunc->GetFirstBB(); + while (bb != nullptr) { + if (visitedBBs.count(bb) == 0) { + bb->SetWontExit(true); + if (bb->GetKind() == BB::kBBGoto || bb->GetKind() == BB::kBBThrow) { + // make this bb a predecessor of commonExitBB + cgFunc->GetCommonExitBB()->PushBackPreds(*bb); + } + } + bb = bb->GetNext(); + } +} + +BB *CGCFG::FindLastRetBB() { + FOR_ALL_BB_REV(bb, cgFunc) { + if (bb->GetKind() == BB::kBBReturn) { + return bb; + } + } + return nullptr; +} + +void CGCFG::UpdatePredsSuccsAfterSplit(BB &pred, BB &succ, BB &newBB) const { + /* connext newBB -> succ */ + for (auto it = succ.GetPredsBegin(); it != succ.GetPredsEnd(); ++it) { + if (*it == &pred) { + auto origIt = it; + succ.ErasePreds(it); + if (origIt != succ.GetPredsBegin()) { + --origIt; + succ.InsertPred(origIt, newBB); + } else { + succ.PushFrontPreds(newBB); + } + break; + } + } + newBB.PushBackSuccs(succ); + + /* connext pred -> newBB */ + for (auto it = pred.GetSuccsBegin(); it != pred.GetSuccsEnd(); ++it) { + if (*it == &succ) { + auto origIt = it; + pred.EraseSuccs(it); + if (origIt != succ.GetSuccsBegin()) { + --origIt; + pred.InsertSucc(origIt, newBB); + } else { + pred.PushFrontSuccs(newBB); + } + break; + } + } + newBB.PushBackPreds(pred); + + /* maintain eh info */ + for (auto it = pred.GetEhSuccs().begin(); it != pred.GetEhSuccs().end(); ++it) { + newBB.PushBackEhSuccs(**it); + } + for (auto it = pred.GetEhPredsBegin(); it != pred.GetEhPredsEnd(); ++it) { + newBB.PushBackEhPreds(**it); + } + + /* update phi */ + for (auto phiInsnIt : succ.GetPhiInsns()) { + auto &phiList = static_cast(phiInsnIt.second->GetOperand(kInsnSecondOpnd)); + for (auto phiOpndIt : phiList.GetOperands()) { + uint32 fBBId = phiOpndIt.first; + ASSERT(fBBId != 0, "GetFromBBID = 0"); + BB *predBB = cgFunc->GetBBFromID(fBBId); + if (predBB == &pred) { + phiList.UpdateOpnd(fBBId, newBB.GetId(), *phiOpndIt.second); + break; + } + } + } +} + +#if TARGAARCH64 +BB *CGCFG::BreakCriticalEdge(BB &pred, BB &succ) { + LabelIdx newLblIdx = cgFunc->CreateLabel(); + BB *newBB = cgFunc->CreateNewBB(newLblIdx, false, BB::kBBGoto, pred.GetFrequency()); + newBB->SetCritical(true); + bool isFallThru = pred.GetNext() == ≻ + /* set prev, next */ + if (isFallThru) { + BB *origNext = pred.GetNext(); + origNext->SetPrev(newBB); + newBB->SetNext(origNext); + pred.SetNext(newBB); + newBB->SetPrev(&pred); + newBB->SetKind(BB::kBBFallthru); + } else { + BB *exitBB = cgFunc->GetExitBBsVec().size() == 0 ? nullptr : cgFunc->GetExitBB(0); + if (exitBB == nullptr || exitBB->IsUnreachable()) { + cgFunc->GetLastBB()->AppendBB(*newBB); + cgFunc->SetLastBB(*newBB); + } else { + exitBB->AppendBB(*newBB); + if (cgFunc->GetLastBB() == exitBB) { + cgFunc->SetLastBB(*newBB); + } + } + newBB->AppendInsn( + cgFunc->GetInsnBuilder()->BuildInsn(MOP_xuncond, cgFunc->GetOrCreateLabelOperand(succ.GetLabIdx()))); + } + + /* update offset if succ is goto target */ + if (pred.GetKind() == BB::kBBIf) { + Insn *brInsn = FindLastCondBrInsn(pred); + ASSERT(brInsn != nullptr, "null ptr check"); + LabelOperand &brTarget = static_cast(brInsn->GetOperand(AArch64isa::GetJumpTargetIdx(*brInsn))); + if (brTarget.GetLabelIndex() == succ.GetLabIdx()) { + brInsn->SetOperand(AArch64isa::GetJumpTargetIdx(*brInsn), cgFunc->GetOrCreateLabelOperand(newLblIdx)); + } + } else if (pred.GetKind() == BB::kBBRangeGoto) { + const MapleVector &labelVec = pred.GetRangeGotoLabelVec(); + for (size_t i = 0; i < labelVec.size(); ++i) { + if (labelVec[i] == succ.GetLabIdx()) { + /* single edge for multi jump target, so have to replace all. */ + pred.SetRangeGotoLabel(i, newLblIdx); + } + } + cgFunc->UpdateEmitSt(pred, succ.GetLabIdx(), newLblIdx); + } else { + ASSERT(0, "unexpeced bb kind in BreakCriticalEdge"); + } + + /* update pred, succ */ + UpdatePredsSuccsAfterSplit(pred, succ, *newBB); + return newBB; +} + +void CGCFG::ReverseCriticalEdge(BB &cbb) { + CHECK_FATAL(cbb.GetPreds().size() == 1, "critical edge bb has more than 1 preds"); + CHECK_FATAL(cbb.GetSuccs().size() == 1, "critical edge bb has more than 1 succs"); + + BB *pred = *cbb.GetPreds().begin(); + BB *succ = *cbb.GetSuccs().begin(); + + if (pred->GetKind() == BB::kBBIf) { + Insn *brInsn = FindLastCondBrInsn(*pred); + ASSERT(brInsn != nullptr, "null ptr check"); + LabelOperand &brTarget = static_cast(brInsn->GetOperand(AArch64isa::GetJumpTargetIdx(*brInsn))); + if (brTarget.GetLabelIndex() == cbb.GetLabIdx()) { + CHECK_FATAL(succ->GetLabIdx() != MIRLabelTable::GetDummyLabel(), "unexpect label"); + brInsn->SetOperand(AArch64isa::GetJumpTargetIdx(*brInsn), cgFunc->GetOrCreateLabelOperand(succ->GetLabIdx())); + } else { + CHECK_FATAL(false, "pred of critical edge bb do not goto cbb"); + } + } else if (pred->GetKind() == BB::kBBRangeGoto) { + const MapleVector &labelVec = pred->GetRangeGotoLabelVec(); + uint32 index = 0; + CHECK_FATAL(succ->GetLabIdx() != MIRLabelTable::GetDummyLabel(), "unexpect label"); + for (auto label: labelVec) { + /* single edge for multi jump target, so have to replace all. */ + if (label == cbb.GetLabIdx()) { + pred->SetRangeGotoLabel(index, succ->GetLabIdx()); + } + index++; + } + MIRSymbol *st = cgFunc->GetEmitSt(pred->GetId()); + MIRAggConst *arrayConst = safe_cast(st->GetKonst()); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + MIRConst *mirConst = cgFunc->GetMemoryPool()->New(succ->GetLabIdx(), cgFunc->GetFunction().GetPuidx(), + *etype); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); ++i) { + CHECK_FATAL(arrayConst->GetConstVecItem(i)->GetKind() == kConstLblConst, "not a kConstLblConst"); + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + if (cbb.GetLabIdx() == lblConst->GetValue()) { + arrayConst->SetConstVecItem(i, *mirConst); + } + } + } else { + ASSERT(0, "unexpeced bb kind in BreakCriticalEdge"); + } + + pred->RemoveSuccs(cbb); + pred->PushBackSuccs(*succ); + succ->RemovePreds(cbb); + succ->PushBackPreds(*pred); +} +#endif + +bool CgHandleCFG::PhaseRun(maplebe::CGFunc &f) { + CGCFG *cfg = f.GetMemoryPool()->New(f); + f.SetTheCFG(cfg); + /* build control flow graph */ + f.GetTheCFG()->BuildCFG(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleCFG, handlecfg) + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cg_critical_edge.cpp b/src/mapleall/maple_be/src/cg/cg_critical_edge.cpp new file mode 100644 index 0000000000000000000000000000000000000000..37b2226e6567bc64353e4dadc77f9251dbae1f1e --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_critical_edge.cpp @@ -0,0 +1,61 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_critical_edge.h" +#include "cg_ssa.h" + +namespace maplebe { +void CriticalEdge::SplitCriticalEdges() { + for (auto it = criticalEdges.begin(); it != criticalEdges.end(); ++it) { + BB *newBB = cgFunc->GetTheCFG()->BreakCriticalEdge(*((*it).first), *((*it).second)); + (void)newBBcreated.emplace(newBB->GetId()); + } +} + +void CriticalEdge::CollectCriticalEdges() { + constexpr int multiPredsNum = 2; + FOR_ALL_BB(bb, cgFunc) { + const auto &preds = bb->GetPreds(); + if (preds.size() < multiPredsNum) { + continue; + } + // current BB is a merge + for (BB *pred : preds) { + if (pred->GetKind() == BB::kBBGoto || pred->GetKind() == BB::kBBIgoto) { + continue; + } + if (pred->GetSuccs().size() > 1) { + // pred has more than one succ + criticalEdges.push_back(std::make_pair(pred, bb)); + } + } + } +} + +bool CgCriticalEdge::PhaseRun(maplebe::CGFunc &f) { + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && f.NumBBs() < kBBLimit) { + MemPool *memPool = GetPhaseMemPool(); + CriticalEdge *split = memPool->New(f, *memPool); + f.GetTheCFG()->InitInsnVisitor(f); + split->CollectCriticalEdges(); + split->SplitCriticalEdges(); + } + return false; +} + +void CgCriticalEdge::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgCriticalEdge, cgsplitcriticaledge) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cg_dce.cpp b/src/mapleall/maple_be/src/cg/cg_dce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..658f69c368f8fadf0b7f2d2bdc36cdb4529324bb --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_dce.cpp @@ -0,0 +1,45 @@ +/* +* Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ +#include "cg_dce.h" +#include "cg.h" +namespace maplebe { +void CGDce::DoDce() { + bool tryDceAgain = false; + do { + tryDceAgain = false; + for (auto &ssaIt : GetSSAInfo()->GetAllSSAOperands()) { + if (ssaIt.second != nullptr && !ssaIt.second->IsDeleted()) { + if (RemoveUnuseDef(*ssaIt.second)) { + tryDceAgain = true; + } + } + } + } while (tryDceAgain); +} + +bool CgDce::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + CGDce *cgDce = f.GetCG()->CreateCGDce(*GetPhaseMemPool(), f, *ssaInfo); + cgDce->DoDce(); + return false; +} + +void CgDce::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgDce, cgdeadcodeelimination) +} + diff --git a/src/mapleall/maple_be/src/cg/cg_dominance.cpp b/src/mapleall/maple_be/src/cg/cg_dominance.cpp new file mode 100644 index 0000000000000000000000000000000000000000..468205daf219723eb2826ef7ebf30611c02beb65 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_dominance.cpp @@ -0,0 +1,545 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_dominance.h" +#include +#include "cg_option.h" +#include "cgfunc.h" + +/* + * This phase build dominance + */ +namespace maplebe { +constexpr uint32 kBBVectorInitialSize = 2; +void DomAnalysis::PostOrderWalk(const BB &bb, int32 &pid, MapleVector &visitedMap) { + ASSERT(bb.GetId() < visitedMap.size(), "index out of range in Dominance::PostOrderWalk"); + if (visitedMap[bb.GetId()]) { + return; + } + visitedMap[bb.GetId()] = true; + for (const BB *suc : bb.GetSuccs()) { + PostOrderWalk(*suc, pid, visitedMap); + } + ASSERT(bb.GetId() < postOrderIDVec.size(), "index out of range in Dominance::PostOrderWalk"); + postOrderIDVec[bb.GetId()] = pid++; +} + +void DomAnalysis::GenPostOrderID() { + ASSERT(!bbVec.empty(), "size to be allocated is 0"); + MapleVector visitedMap(bbVec.size() + 1, false, cgFunc.GetFuncScopeAllocator()->Adapter()); + int32 postOrderID = 0; + PostOrderWalk(commonEntryBB, postOrderID, visitedMap); + // initialize reversePostOrder + int32 maxPostOrderID = postOrderID - 1; + reversePostOrder.resize(static_cast(maxPostOrderID + 1)); + for (size_t i = 0; i < postOrderIDVec.size(); ++i) { + int32 postOrderNo = postOrderIDVec[i]; + if (postOrderNo == -1) { + continue; + } + reversePostOrder[static_cast(maxPostOrderID - postOrderNo)] = bbVec[i]; + } +} + +BB *DomAnalysis::Intersect(BB &bb1, const BB &bb2) { + auto *ptrBB1 = &bb1; + auto *ptrBB2 = &bb2; + while (ptrBB1 != ptrBB2) { + while (postOrderIDVec[ptrBB1->GetId()] < postOrderIDVec[ptrBB2->GetId()]) { + ptrBB1 = GetDom(ptrBB1->GetId()); + } + while (postOrderIDVec[ptrBB2->GetId()] < postOrderIDVec[ptrBB1->GetId()]) { + ptrBB2 = GetDom(ptrBB2->GetId()); + } + } + return ptrBB1; +} + +bool DominanceBase::CommonEntryBBIsPred(const BB &bb) const { + for (const BB *suc : commonEntryBB.GetSuccs()) { + if (suc == &bb) { + return true; + } + } + return false; +} + +// Figure 3 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void DomAnalysis::ComputeDominance() { + SetDom(commonEntryBB.GetId(), &commonEntryBB); + bool changed; + do { + changed = false; + for (size_t i = 1; i < reversePostOrder.size(); ++i) { + BB *bb = reversePostOrder[i]; + if (bb == nullptr) { + continue; + } + BB *pre = nullptr; + auto it = bb->GetPredsBegin(); + if (CommonEntryBBIsPred(*bb) || bb->GetPreds().empty()) { + pre = &commonEntryBB; + } else { + pre = *it; + } + ++it; + while ((GetDom(pre->GetId()) == nullptr || pre == bb) && it != bb->GetPredsEnd()) { + pre = *it; + ++it; + } + BB *newIDom = pre; + for (; it != bb->GetPredsEnd(); ++it) { + pre = *it; + if (GetDom(pre->GetId()) != nullptr && pre != bb) { + newIDom = Intersect(*pre, *newIDom); + } + } + if (GetDom(bb->GetId()) != newIDom) { + SetDom(bb->GetId(), newIDom); + changed = true; + } + } + } while (changed); +} + +// Figure 5 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void DomAnalysis::ComputeDomFrontiers() { + for (const BB *bb : bbVec) { + if (bb == nullptr || bb == &commonExitBB) { + continue; + } + if (bb->GetPreds().size() < kBBVectorInitialSize) { + continue; + } + for (BB *pre : bb->GetPreds()) { + BB *runner = pre; + while (runner != nullptr && runner != GetDom(bb->GetId()) && runner != &commonEntryBB) { + if (!HasDomFrontier(runner->GetId(), bb->GetId())) { + domFrontier[runner->GetId()].push_back(bb->GetId()); + } + runner = GetDom(runner->GetId()); + } + } + } + // check entry bb's predBB, such as : + // bb1 is commonEntryBB, bb2 is entryBB, bb2 is domFrontier of bb3 and bb7. + // 1 + // | + // 2 <- + // / | + // 3 | + // / \ | + // 4 7--- + // / \ ^ + // | | | + // 5-->6-- + for (BB *succ : commonEntryBB.GetSuccs()) { + if (succ->GetPreds().size() != 1) { // Only deal with one pred bb. + continue; + } + for (BB *pre : succ->GetPreds()) { + BB *runner = pre; + while (runner != GetDom(succ->GetId()) && runner != &commonEntryBB && runner != succ) { + if (!HasDomFrontier(runner->GetId(), succ->GetId())) { + domFrontier[runner->GetId()].push_back(succ->GetId()); + } + runner = GetDom(runner->GetId()); + } + } + } +} + +void DomAnalysis::ComputeDomChildren() { + for (auto *bb : reversePostOrder) { + if (bb == nullptr || GetDom(bb->GetId()) == nullptr) { + continue; + } + BB *parent = GetDom(bb->GetId()); + if (parent == bb) { + continue; + } + domChildren[parent->GetId()].push_back(bb->GetId()); + } +} + +// bbidMarker indicates that the iterDomFrontier results for bbid < bbidMarker +// have been computed +void DomAnalysis::GetIterDomFrontier(const BB *bb, MapleSet *dfset, uint32 bbidMarker, + std::vector &visitedMap) { + if (visitedMap[bb->GetId()]) { + return; + } + visitedMap[bb->GetId()] = true; + for (uint32 frontierbbid : domFrontier[bb->GetId()]) { + (void)dfset->insert(frontierbbid); + if (frontierbbid < bbidMarker) { // union with its computed result + dfset->insert(iterDomFrontier[frontierbbid].begin(), iterDomFrontier[frontierbbid].end()); + } else { // recursive call + BB *frontierbb = bbVec[frontierbbid]; + GetIterDomFrontier(frontierbb, dfset, bbidMarker, visitedMap); + } + } +} + +void DomAnalysis::ComputeIterDomFrontiers() { + for (BB *bb : bbVec) { + if (bb == nullptr || bb == &commonExitBB) { + continue; + } + std::vector visitedMap(bbVec.size(), false); + GetIterDomFrontier(bb, &iterDomFrontier[bb->GetId()], bb->GetId(), visitedMap); + } +} + + +uint32 DomAnalysis::ComputeDtPreorder(const BB &bb, uint32 &num) { + ASSERT(num < dtPreOrder.size(), "index out of range in Dominance::ComputeDtPreorder"); + dtPreOrder[num] = bb.GetId(); + dtDfn[bb.GetId()] = num; + uint32 maxDtDfnOut = num; + ++num; + + for (uint32 k : domChildren[bb.GetId()]) { + maxDtDfnOut = ComputeDtPreorder(*bbVec[k], num); + } + + dtDfnOut[bb.GetId()] = maxDtDfnOut; + return maxDtDfnOut; +} + +// true if b1 dominates b2 +bool DomAnalysis::Dominate(const BB &bb1, const BB &bb2) { + return dtDfn[bb1.GetId()] <= dtDfn[bb2.GetId()] && dtDfnOut[bb1.GetId()] >= dtDfnOut[bb2.GetId()]; +} + +void DomAnalysis::Compute() { + GenPostOrderID(); + ComputeDominance(); + ComputeDomFrontiers(); + ComputeDomChildren(); + ComputeIterDomFrontiers(); + uint32 num = 0; + (void)ComputeDtPreorder(*cgFunc.GetFirstBB(), num); + GetDtPreOrder().resize(num); +} + +void DomAnalysis::Dump() { + for (BB *bb : reversePostOrder) { + LogInfo::MapleLogger() << "postorder no " << postOrderIDVec[bb->GetId()]; + LogInfo::MapleLogger() << " is bb:" << bb->GetId(); + LogInfo::MapleLogger() << " im_dom is bb:" << GetDom(bb->GetId())->GetId(); + LogInfo::MapleLogger() << " domfrontier: ["; + for (uint32 id : domFrontier[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "] domchildren: ["; + for (uint32 id : domChildren[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + LogInfo::MapleLogger() << "\npreorder traversal of dominator tree:"; + for (uint32 id : dtPreOrder) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "\n\n"; +} + +/* ================= for PostDominance ================= */ +void PostDomAnalysis::PdomPostOrderWalk(const BB &bb, int32 &pid, MapleVector &visitedMap) { + ASSERT(bb.GetId() < visitedMap.size(), "index out of range in Dominance::PdomPostOrderWalk"); + if (bbVec[bb.GetId()] == nullptr) { + return; + } + if (visitedMap[bb.GetId()]) { + return; + } + visitedMap[bb.GetId()] = true; + for (BB *pre : bb.GetPreds()) { + PdomPostOrderWalk(*pre, pid, visitedMap); + } + ASSERT(bb.GetId() < pdomPostOrderIDVec.size(), "index out of range in Dominance::PdomPostOrderWalk"); + pdomPostOrderIDVec[bb.GetId()] = pid++; +} + +void PostDomAnalysis::PdomGenPostOrderID() { + ASSERT(!bbVec.empty(), "call calloc failed in Dominance::PdomGenPostOrderID"); + MapleVector visitedMap(bbVec.size(), false, cgFunc.GetFuncScopeAllocator()->Adapter()); + int32 postOrderID = 0; + PdomPostOrderWalk(commonExitBB, postOrderID, visitedMap); + // initialize pdomReversePostOrder + int32 maxPostOrderID = postOrderID - 1; + pdomReversePostOrder.resize(static_cast(maxPostOrderID + 1)); + for (size_t i = 0; i < pdomPostOrderIDVec.size(); ++i) { + int32 postOrderNo = pdomPostOrderIDVec[i]; + if (postOrderNo == -1) { + continue; + } + pdomReversePostOrder[static_cast(maxPostOrderID - postOrderNo)] = bbVec[i]; + } +} + +BB *PostDomAnalysis::PdomIntersect(BB &bb1, const BB &bb2) { + auto *ptrBB1 = &bb1; + auto *ptrBB2 = &bb2; + while (ptrBB1 != ptrBB2) { + while (pdomPostOrderIDVec[ptrBB1->GetId()] < pdomPostOrderIDVec[ptrBB2->GetId()]) { + ptrBB1 = GetPdom(ptrBB1->GetId()); + } + while (pdomPostOrderIDVec[ptrBB2->GetId()] < pdomPostOrderIDVec[ptrBB1->GetId()]) { + ptrBB2 = GetPdom(ptrBB2->GetId()); + } + } + return ptrBB1; +} + +// Figure 3 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void PostDomAnalysis::ComputePostDominance() { + SetPdom(commonExitBB.GetId(), &commonExitBB); + bool changed = false; + do { + changed = false; + for (size_t i = 1; i < pdomReversePostOrder.size(); ++i) { + BB *bb = pdomReversePostOrder[i]; + BB *suc = nullptr; + auto it = bb->GetSuccsBegin(); + if (cgFunc.IsExitBB(*bb) || bb->GetSuccs().empty() || + (bb->IsWontExit() && bb->GetKind() == BB::kBBGoto)) { + suc = &commonExitBB; + } else { + suc = *it; + } + ++it; + while ((GetPdom(suc->GetId()) == nullptr || suc == bb) && it != bb->GetSuccsEnd()) { + suc = *it; + ++it; + } + if (GetPdom(suc->GetId()) == nullptr) { + suc = &commonExitBB; + } + BB *newIDom = suc; + for (; it != bb->GetSuccsEnd(); ++it) { + suc = *it; + if (GetPdom(suc->GetId()) != nullptr && suc != bb) { + newIDom = PdomIntersect(*suc, *newIDom); + } + } + if (GetPdom(bb->GetId()) != newIDom) { + SetPdom(bb->GetId(), newIDom); + ASSERT(GetPdom(newIDom->GetId()) != nullptr, "null ptr check"); + changed = true; + } + } + } while (changed); +} + +// Figure 5 in "A Simple, Fast Dominance Algorithm" by Keith Cooper et al. +void PostDomAnalysis::ComputePdomFrontiers() { + for (const BB *bb : bbVec) { + if (bb == nullptr || bb == &commonEntryBB) { + continue; + } + if (bb->GetSuccs().size() < kBBVectorInitialSize) { + continue; + } + for (BB *suc : bb->GetSuccs()) { + BB *runner = suc; + while (runner != GetPdom(bb->GetId()) && runner != &commonEntryBB && + GetPdom(runner->GetId()) != nullptr) { // add infinite loop code limit + if (!HasPdomFrontier(runner->GetId(), bb->GetId())) { + pdomFrontier[runner->GetId()].push_back(bb->GetId()); + } + runner = GetPdom(runner->GetId()); + } + } + } +} + +void PostDomAnalysis::ComputePdomChildren() { + for (const BB *bb : bbVec) { + if (bb == nullptr || GetPdom(bb->GetId()) == nullptr) { + continue; + } + const BB *parent = GetPdom(bb->GetId()); + if (parent == bb) { + continue; + } + pdomChildren[parent->GetId()].push_back(bb->GetId()); + } +} + +// bbidMarker indicates that the iterPdomFrontier results for bbid < bbidMarker +// have been computed +void PostDomAnalysis::GetIterPdomFrontier(const BB *bb, MapleSet *dfset, uint32 bbidMarker, + std::vector &visitedMap) { + if (visitedMap[bb->GetId()]) { + return; + } + visitedMap[bb->GetId()] = true; + for (uint32 frontierbbid : pdomFrontier[bb->GetId()]) { + (void)dfset->insert(frontierbbid); + if (frontierbbid < bbidMarker) { // union with its computed result + dfset->insert(iterPdomFrontier[frontierbbid].begin(), iterPdomFrontier[frontierbbid].end()); + } else { // recursive call + BB *frontierbb = bbVec[frontierbbid]; + GetIterPdomFrontier(frontierbb, dfset, bbidMarker, visitedMap); + } + } +} + +void PostDomAnalysis::ComputeIterPdomFrontiers() { + for (BB *bb : bbVec) { + if (bb == nullptr || bb == &commonEntryBB) { + continue; + } + std::vector visitedMap(bbVec.size(), false); + GetIterPdomFrontier(bb, &iterPdomFrontier[bb->GetId()], bb->GetId(), visitedMap); + } +} + +uint32 PostDomAnalysis::ComputePdtPreorder(const BB &bb, uint32 &num) { + ASSERT(num < pdtPreOrder.size(), "index out of range in Dominance::ComputePdtPreOrder"); + pdtPreOrder[num] = bb.GetId(); + pdtDfn[bb.GetId()] = num; + uint32 maxDtDfnOut = num; + ++num; + + for (uint32 k : pdomChildren[bb.GetId()]) { + maxDtDfnOut = ComputePdtPreorder(*bbVec[k], num); + } + + pdtDfnOut[bb.GetId()] = maxDtDfnOut; + return maxDtDfnOut; +} + +// true if b1 postdominates b2 +bool PostDomAnalysis::PostDominate(const BB &bb1, const BB &bb2) { + return pdtDfn[bb1.GetId()] <= pdtDfn[bb2.GetId()] && pdtDfnOut[bb1.GetId()] >= pdtDfnOut[bb2.GetId()]; +} + +void PostDomAnalysis::Dump() { + for (BB *bb : pdomReversePostOrder) { + LogInfo::MapleLogger() << "pdom_postorder no " << pdomPostOrderIDVec[bb->GetId()]; + LogInfo::MapleLogger() << " is bb:" << bb->GetId(); + LogInfo::MapleLogger() << " im_pdom is bb:" << GetPdom(bb->GetId())->GetId(); + LogInfo::MapleLogger() << " pdomfrontier: ["; + for (uint32 id : pdomFrontier[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "] pdomchildren: ["; + for (uint32 id : pdomChildren[bb->GetId()]) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "preorder traversal of post-dominator tree:"; + for (uint32 id : pdtPreOrder) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "\n\n"; +} + +void PostDomAnalysis::GeneratePdomTreeDot() { + std::streambuf *coutBuf = std::cout.rdbuf(); + std::ofstream pdomFile; + std::streambuf *fileBuf = pdomFile.rdbuf(); + (void)std::cout.rdbuf(fileBuf); + + std::string fileName; + (void)fileName.append("pdom_tree_"); + (void)fileName.append(cgFunc.GetName()); + (void)fileName.append(".dot"); + + pdomFile.open(fileName.c_str(), std::ios::trunc); + if (!pdomFile.is_open()) { + LogInfo::MapleLogger(kLlWarn) << "fileName:" << fileName << " open failed.\n"; + return; + } + pdomFile << "digraph Pdom_" << cgFunc.GetName() << " {\n\n"; + pdomFile << " node [shape=box];\n\n"; + + FOR_ALL_BB_CONST(bb, &cgFunc) { + if (bb->IsUnreachable()) { + continue; + } + pdomFile << " BB_" << bb->GetId(); + pdomFile << "[label= \""; + if (bb == cgFunc.GetFirstBB()) { + pdomFile << "ENTRY\n"; + } + pdomFile << "BB_" << bb->GetId() << "\"];\n"; + } + BB *exitBB = cgFunc.GetCommonExitBB(); + pdomFile << " BB_" << exitBB->GetId(); + pdomFile << "[label= \"EXIT\n"; + pdomFile << "BB_" << exitBB->GetId() << "\"];\n"; + pdomFile << "\n"; + + for (uint32 bbId = 0; bbId < pdomChildren.size(); ++bbId) { + if (pdomChildren[bbId].empty()) { + continue; + } + BB *parent = cgFunc.GetBBFromID(bbId); + CHECK_FATAL(parent != nullptr, "get pdom parent-node failed"); + for (auto childId : pdomChildren[bbId]) { + BB *child = cgFunc.GetBBFromID(childId); + CHECK_FATAL(child != nullptr, "get pdom child-node failed"); + pdomFile << " BB_" << parent->GetId() << " -> " << "BB_" << child->GetId(); + pdomFile << " [dir=none]" << ";\n"; + } + } + pdomFile << "\n"; + + pdomFile << "}\n"; + (void)pdomFile.flush(); + pdomFile.close(); + (void)std::cout.rdbuf(coutBuf); +} + +void PostDomAnalysis::Compute() { + PdomGenPostOrderID(); + ComputePostDominance(); + ComputePdomFrontiers(); + ComputePdomChildren(); + ComputeIterPdomFrontiers(); + uint32 num = 0; + (void)ComputePdtPreorder(GetCommonExitBB(), num); + ResizePdtPreOrder(num); +} + +bool CgDomAnalysis::PhaseRun(maplebe::CGFunc &f) { + MemPool *domMemPool = GetPhaseMemPool(); + domAnalysis = domMemPool->New(f, *domMemPool, *domMemPool, f.GetAllBBs(), + *f.GetFirstBB(), *f.GetCommonExitBB()); + domAnalysis->Compute(); + if (CG_DEBUG_FUNC(f)) { + domAnalysis->Dump(); + } + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgDomAnalysis, domanalysis) + +bool CgPostDomAnalysis::PhaseRun(maplebe::CGFunc &f) { + MemPool *pdomMemPool = GetPhaseMemPool(); + pdomAnalysis = pdomMemPool->New(f, *pdomMemPool, *pdomMemPool, f.GetAllBBs(), + *f.GetFirstBB(), *f.GetCommonExitBB()); + pdomAnalysis->Compute(); + if (CG_DEBUG_FUNC(f)) { + pdomAnalysis->Dump(); + } + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgPostDomAnalysis, pdomanalysis) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cd31bfdb889ff0492edd786a159d31711fbe3fc1 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_irbuilder.cpp @@ -0,0 +1,138 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "cg_irbuilder.h" +#include "isa.h" +#include "cg.h" +#include "cfi.h" +#include "dbg.h" + +namespace maplebe { +Insn &InsnBuilder::BuildInsn(MOperator opCode, const InsnDesc &idesc) { + auto *newInsn = mp->New(*mp, opCode); + newInsn->SetInsnDescrption(idesc); + IncreaseInsnNum(); + return *newInsn; +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0); +} +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1); +} +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2); +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + return BuildInsn(opCode, tMd).AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2).AddOpndChain(o3); +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3, Operand &o4) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + Insn &nI = BuildInsn(opCode, tMd); + return nI.AddOpndChain(o0).AddOpndChain(o1).AddOpndChain(o2).AddOpndChain(o3).AddOpndChain(o4); +} + +Insn &InsnBuilder::BuildInsn(MOperator opCode, std::vector &opnds) { + const InsnDesc &tMd = Globals::GetInstance()->GetTarget()->GetTargetMd(opCode); + Insn &nI = BuildInsn(opCode, tMd); + for (auto *opnd : opnds) { + nI.AddOperand(*opnd); + } + return nI; +} + +Insn &InsnBuilder::BuildCfiInsn(MOperator opCode) { + auto *nI = mp->New(*mp, opCode); + IncreaseInsnNum(); + return *nI; +} +Insn &InsnBuilder::BuildDbgInsn(MOperator opCode) { + auto *nI = mp->New(*mp, opCode); + IncreaseInsnNum(); + return *nI; +} +Insn &InsnBuilder::BuildCommentInsn(CommentOperand &comment) { + Insn &insn = BuildInsn(abstract::MOP_comment, InsnDesc::GetAbstractId(abstract::MOP_comment)); + insn.AddOperand(comment); + return insn; +} +VectorInsn &InsnBuilder::BuildVectorInsn(MOperator opCode, const InsnDesc &idesc) { + auto *newInsn = mp->New(*mp, opCode); + newInsn->SetInsnDescrption(idesc); + IncreaseInsnNum(); + return *newInsn; +} + +ImmOperand &OperandBuilder::CreateImm(uint32 size, int64 value, MemPool *mp) { + return mp ? *mp->New(value, size, false) : *alloc.New(value, size, false); +} + +ImmOperand &OperandBuilder::CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp) { + return mp ? *mp->New(symbol, offset, relocs, false) : + *alloc.New(symbol, offset, relocs, false); +} + +MemOperand &OperandBuilder::CreateMem(uint32 size, MemPool *mp) { + return mp ? *mp->New(size) : *alloc.New(size); +} + +MemOperand &OperandBuilder::CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size) { + MemOperand *memOprand = &CreateMem(size); + memOprand->SetBaseRegister(baseOpnd); + memOprand->SetOffsetOperand(CreateImm(baseOpnd.GetSize(), offset)); + return *memOprand; +} + +RegOperand &OperandBuilder::CreateVReg(uint32 size, RegType type, MemPool *mp) { + virtualRegNum++; + regno_t vRegNO = kBaseVirtualRegNO + virtualRegNum; + return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); +} + +RegOperand &OperandBuilder::CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp) { + return mp ? *mp->New(vRegNO, size, type) : *alloc.New(vRegNO, size, type); +} + +RegOperand &OperandBuilder::CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp) { + return mp ? *mp->New(pRegNO, size, type) : *alloc.New(pRegNO, size, type); +} + +ListOperand &OperandBuilder::CreateList(MemPool *mp) { + return mp ? *mp->New(alloc) : *alloc.New(alloc); +} + +FuncNameOperand &OperandBuilder::CreateFuncNameOpnd(MIRSymbol &symbol, MemPool *mp){ + return mp ? *mp->New(symbol) : *alloc.New(symbol); +} + +LabelOperand &OperandBuilder::CreateLabel(const char *parent, LabelIdx idx, MemPool *mp){ + return mp ? *mp->New(parent, idx, *mp) : *alloc.New(parent, idx, *alloc.GetMemPool()); +} + +CommentOperand &OperandBuilder::CreateComment(const std::string &s, MemPool *mp) { + return mp ? *mp->New(s, *mp) : *alloc.New(s, *alloc.GetMemPool()); +} + +CommentOperand &OperandBuilder::CreateComment(const MapleString &s, MemPool *mp) { + return mp ? *mp->New(s.c_str(), *mp) : *alloc.New(s.c_str(), *alloc.GetMemPool()); +} +} diff --git a/src/mapleall/maple_be/src/cg/cg_occur.cpp b/src/mapleall/maple_be/src/cg/cg_occur.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ec87c882bec82988ef7386abc1c5e603c0d39786 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_occur.cpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_occur.h" +#include "cg_pre.h" + +/* The methods associated with the data structures that represent occurrences and work candidates for PRE */ +namespace maplebe { +/* return if this occur dominate occ */ +bool CgOccur::IsDominate(DomAnalysis &dom, CgOccur &occ) { + return dom.Dominate(*GetBB(), *occ.GetBB()); +} + +/* compute bucket index for the work candidate in workCandHashTable */ +uint32 PreWorkCandHashTable::ComputeWorkCandHashIndex(const Operand &opnd) { + uint32 hashIdx = static_cast(reinterpret_cast(&opnd) >> k4ByteSize); + return hashIdx % workCandHashLength; +} + +uint32 PreWorkCandHashTable::ComputeStmtWorkCandHashIndex(const Insn &insn) { + uint32 hIdx = (static_cast(insn.GetMachineOpcode())) << k3ByteSize; + return hIdx % workCandHashLength; +} +} // namespace maple diff --git a/src/mapleall/maple_be/src/cg/cg_option.cpp b/src/mapleall/maple_be/src/cg/cg_option.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4d64e1c8ca0f7d93cda2361f0729546b1be1be28 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_option.cpp @@ -0,0 +1,876 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_option.h" +#include +#include +#include "cg_options.h" +#include "driver_options.h" +#include "mpl_logging.h" +#include "parser_opt.h" +#include "mir_parser.h" +#include "string_utils.h" +#include "triple.h" + +namespace maplebe { +using namespace maple; + +const std::string kMplcgVersion = ""; + +bool CGOptions::timePhases = false; +std::string CGOptions::targetArch = ""; +std::unordered_set CGOptions::dumpPhases = {}; +std::unordered_set CGOptions::skipPhases = {}; +std::unordered_map> CGOptions::cyclePatternMap = {}; +std::string CGOptions::skipFrom = ""; +std::string CGOptions::skipAfter = ""; +std::string CGOptions::dumpFunc = "*"; +std::string CGOptions::globalVarProfile = ""; +std::string CGOptions::profileData = ""; +std::string CGOptions::profileFuncData = ""; +std::string CGOptions::profileClassData = ""; +#ifdef TARGARM32 +std::string CGOptions::duplicateAsmFile = ""; +#else +std::string CGOptions::duplicateAsmFile = "maple/mrt/codetricks/arch/arm64/duplicateFunc.s"; +#endif +Range CGOptions::range = Range(); +std::string CGOptions::fastFuncsAsmFile = ""; +Range CGOptions::spillRanges = Range(); +uint8 CGOptions::fastAllocMode = 0; /* 0: fast, 1: spill all */ +bool CGOptions::fastAlloc = false; +uint64 CGOptions::lsraBBOptSize = 150000; +uint64 CGOptions::lsraInsnOptSize = 200000; +uint64 CGOptions::overlapNum = 28; +uint8 CGOptions::rematLevel = 2; +bool CGOptions::optForSize = false; +bool CGOptions::enableHotColdSplit = false; +uint32 CGOptions::alignMinBBSize = 16; +uint32 CGOptions::alignMaxBBSize = 96; +uint32 CGOptions::loopAlignPow = 4; +uint32 CGOptions::jumpAlignPow = 5; +uint32 CGOptions::funcAlignPow = 5; +bool CGOptions::liteProfGen = false; +bool CGOptions::liteProfUse = false; +std::string CGOptions::liteProfile = ""; +std::string CGOptions::instrumentationWhiteList = ""; +std::string CGOptions::litePgoOutputFunction = "main"; +#if TARGAARCH64 || TARGRISCV64 +bool CGOptions::useBarriersForVolatile = false; +#else +bool CGOptions::useBarriersForVolatile = true; +#endif +bool CGOptions::exclusiveEH = false; +bool CGOptions::doEBO = false; +bool CGOptions::doCGSSA = false; +bool CGOptions::doIPARA = true; +bool CGOptions::doCFGO = false; +bool CGOptions::doICO = false; +bool CGOptions::doStoreLoadOpt = false; +bool CGOptions::doGlobalOpt = false; +bool CGOptions::doVregRename = false; +bool CGOptions::doMultiPassColorRA = true; +bool CGOptions::doPrePeephole = false; +bool CGOptions::doPeephole = false; +bool CGOptions::doRetMerge = false; +bool CGOptions::doSchedule = false; +bool CGOptions::doWriteRefFieldOpt = false; +bool CGOptions::dumpOptimizeCommonLog = false; +bool CGOptions::checkArrayStore = false; +bool CGOptions::doPIC = false; +bool CGOptions::noDupBB = false; +bool CGOptions::noCalleeCFI = true; +bool CGOptions::emitCyclePattern = false; +bool CGOptions::insertYieldPoint = false; +bool CGOptions::mapleLinker = false; +bool CGOptions::printFunction = false; +bool CGOptions::nativeOpt = false; +bool CGOptions::lazyBinding = false; +bool CGOptions::hotFix = false; +bool CGOptions::debugSched = false; +bool CGOptions::bruteForceSched = false; +bool CGOptions::simulateSched = false; +CGOptions::ABIType CGOptions::abiType = kABIHard; +CGOptions::EmitFileType CGOptions::emitFileType = kAsm; +bool CGOptions::genLongCalls = false; +bool CGOptions::functionSections = false; +bool CGOptions::useFramePointer = false; +bool CGOptions::gcOnly = false; +bool CGOptions::quiet = false; +bool CGOptions::doPatchLongBranch = false; +bool CGOptions::doPreSchedule = false; +bool CGOptions::emitBlockMarker = true; +bool CGOptions::inRange = false; +bool CGOptions::doPreLSRAOpt = false; +bool CGOptions::doLocalRefSpill = false; +bool CGOptions::doCalleeToSpill = false; +bool CGOptions::doRegSavesOpt = false; +bool CGOptions::useSsaPreSave = false; +bool CGOptions::useSsuPreRestore = false; +bool CGOptions::replaceASM = false; +bool CGOptions::generalRegOnly = false; +bool CGOptions::fastMath = false; +bool CGOptions::doAlignAnalysis = false; +bool CGOptions::doCondBrAlign = false; +bool CGOptions::cgBigEndian = false; +bool CGOptions::arm64ilp32 = false; +bool CGOptions::noCommon = false; +bool CGOptions::flavorLmbc = false; + +CGOptions &CGOptions::GetInstance() { + static CGOptions instance; + return instance; +} + +void CGOptions::DecideMplcgRealLevel(bool isDebug) { + if (opts::cg::o0) { + if (isDebug) { + LogInfo::MapleLogger() << "Real Mplcg level: O0\n"; + } + EnableO0(); + } + + if (opts::cg::o1) { + if (isDebug) { + LogInfo::MapleLogger() << "Real Mplcg level: O1\n"; + } + EnableO1(); + } + + if (opts::cg::o2 || opts::cg::os) { + if (opts::cg::os) { + optForSize = true; + } + if (isDebug) { + std::string oLog = (opts::cg::os == true) ? "Os" : "O2"; + LogInfo::MapleLogger() << "Real Mplcg level: " << oLog << "\n"; + } + EnableO2(); + } + if (opts::cg::olitecg) { + if (isDebug) { + LogInfo::MapleLogger() << "Real Mplcg level: LiteCG\n"; + } + EnableLiteCG(); + } +} + +bool CGOptions::SolveOptions(bool isDebug) { + DecideMplcgRealLevel(isDebug); + + for (const auto &opt : cgCategory.GetEnabledOptions()) { + std::string printOpt; + if (isDebug) { + for (const auto &val : opt->GetRawValues()) { + printOpt += opt->GetName() + " " + val + " "; + } + LogInfo::MapleLogger() << "cg options: " << printOpt << '\n'; + } + } + + if (opts::cg::quiet.IsEnabledByUser()) { + SetQuiet(true); + } + + if (opts::verbose.IsEnabledByUser()) { + SetQuiet(false); + } + + if (opts::cg::pie.IsEnabledByUser()) { + opts::cg::pie ? SetOption(CGOptions::kGenPie) : ClearOption(CGOptions::kGenPie); + } + + if (opts::cg::fpic.IsEnabledByUser()) { + if (opts::cg::fpic) { + EnablePIC(); + SetOption(CGOptions::kGenPic); + } else { + DisablePIC(); + ClearOption(CGOptions::kGenPic); + } + } + + if (opts::cg::verboseAsm.IsEnabledByUser()) { + opts::cg::verboseAsm ? SetOption(CGOptions::kVerboseAsm) : ClearOption(CGOptions::kVerboseAsm); + } + + if (opts::cg::verboseCg.IsEnabledByUser()) { + opts::cg::verboseCg ? SetOption(CGOptions::kVerboseCG) : ClearOption(CGOptions::kVerboseCG); + } + + if (opts::cg::maplelinker.IsEnabledByUser()) { + opts::cg::maplelinker ? EnableMapleLinker() : DisableMapleLinker(); + } + + if (opts::cg::fastAlloc.IsEnabledByUser()) { + EnableFastAlloc(); + SetFastAllocMode(opts::cg::fastAlloc); + } + + if (opts::cg::useBarriersForVolatile.IsEnabledByUser()) { + opts::cg::useBarriersForVolatile ? EnableBarriersForVolatile() : DisableBarriersForVolatile(); + } + + if (opts::cg::spillRange.IsEnabledByUser()) { + SetRange(opts::cg::spillRange, "--pill-range", GetSpillRanges()); + } + + if (opts::cg::range.IsEnabledByUser()) { + SetRange(opts::cg::range, "--range", GetRange()); + } + + if (opts::cg::timePhases.IsEnabledByUser()) { + opts::cg::timePhases ? EnableTimePhases() : DisableTimePhases(); + } + + if (opts::cg::dumpFunc.IsEnabledByUser()) { + SetDumpFunc(opts::cg::dumpFunc); + } + + if (opts::cg::duplicateAsmList.IsEnabledByUser()) { + SetDuplicateAsmFile(opts::cg::duplicateAsmList); + } + + if (opts::cg::duplicateAsmList2.IsEnabledByUser()) { + SetFastFuncsAsmFile(opts::cg::duplicateAsmList2); + } + + if (opts::stackProtectorStrong.IsEnabledByUser()) { + SetOption(kUseStackProtectorStrong); + } + + if (opts::stackProtectorAll.IsEnabledByUser()) { + SetOption(kUseStackProtectorAll); + } + + if (opts::cg::debug.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + ClearOption(kSuppressFileInfo); + } + + if (opts::cg::gdwarf.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithDwarf); + SetParserOption(kWithDbgInfo); + ClearOption(kSuppressFileInfo); + } + + if (opts::cg::gsrc.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + ClearOption(kWithMpl); + } + + if (opts::cg::gmixedsrc.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + SetOption(kWithMpl); + } + + if (opts::cg::gmixedasm.IsEnabledByUser()) { + SetOption(kDebugFriendly); + SetOption(kWithLoc); + SetOption(kWithSrc); + SetOption(kWithMpl); + SetOption(kWithAsm); + } + + if (opts::cg::profile.IsEnabledByUser()) { + SetOption(kWithProfileCode); + SetParserOption(kWithProfileInfo); + } + + if (opts::cg::withRaLinearScan.IsEnabledByUser()) { + SetOption(kDoLinearScanRegAlloc); + ClearOption(kDoColorRegAlloc); + } + + if (opts::cg::withRaGraphColor.IsEnabledByUser()) { + SetOption(kDoColorRegAlloc); + ClearOption(kDoLinearScanRegAlloc); + } + + if (opts::cg::printFunc.IsEnabledByUser()) { + opts::cg::printFunc ? EnablePrintFunction() : DisablePrintFunction(); + } + + if (opts::cg::addDebugTrace.IsEnabledByUser()) { + SetOption(kAddDebugTrace); + } + + if (opts::cg::suppressFileinfo.IsEnabledByUser()) { + SetOption(kSuppressFileInfo); + } + + if (opts::cg::patchLongBranch.IsEnabledByUser()) { + SetOption(kPatchLongBranch); + } + + if (opts::cg::constFold.IsEnabledByUser()) { + opts::cg::constFold ? SetOption(kConstFold) : ClearOption(kConstFold); + } + + if (opts::cg::dumpCfg.IsEnabledByUser()) { + SetOption(kDumpCFG); + } + + if (opts::cg::classListFile.IsEnabledByUser()) { + SetClassListFile(opts::cg::classListFile); + } + + if (opts::cg::genCMacroDef.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kCMacroDef, opts::cg::genCMacroDef); + } + + if (opts::cg::genGctibFile.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGctib, opts::cg::genGctibFile); + } + + if (opts::cg::yieldpoint.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGenYieldPoint, opts::cg::yieldpoint); + } + + if (opts::cg::localRc.IsEnabledByUser()) { + SetOrClear(GetGenerateFlags(), CGOptions::kGenLocalRc, opts::cg::localRc); + } + + if (opts::cg::ehExclusiveList.IsEnabledByUser()) { + SetEHExclusiveFile(opts::cg::ehExclusiveList); + EnableExclusiveEH(); + ParseExclusiveFunc(opts::cg::ehExclusiveList); + } + + if (opts::cg::cyclePatternList.IsEnabledByUser()) { + SetCyclePatternFile(opts::cg::cyclePatternList); + EnableEmitCyclePattern(); + ParseCyclePattern(opts::cg::cyclePatternList); + } + + if (opts::cg::cg.IsEnabledByUser()) { + SetRunCGFlag(opts::cg::cg); + opts::cg::cg ? SetOption(CGOptions::kDoCg) : ClearOption(CGOptions::kDoCg); + } + + if (opts::cg::objmap.IsEnabledByUser()) { + SetGenerateObjectMap(opts::cg::objmap); + } + + if (opts::cg::replaceAsm.IsEnabledByUser()) { + opts::cg::replaceAsm ? EnableReplaceASM() : DisableReplaceASM(); + } + + if (opts::cg::generalRegOnly.IsEnabledByUser()) { + opts::cg::generalRegOnly ? EnableGeneralRegOnly() : DisableGeneralRegOnly(); + } + + if (opts::cg::lazyBinding.IsEnabledByUser()) { + opts::cg::lazyBinding ? EnableLazyBinding() : DisableLazyBinding(); + } + + if (opts::cg::hotFix.IsEnabledByUser()) { + opts::cg::hotFix ? EnableHotFix() : DisableHotFix(); + } + + if (opts::cg::soeCheck.IsEnabledByUser()) { + SetOption(CGOptions::kSoeCheckInsert); + } + + if (opts::cg::checkArraystore.IsEnabledByUser()) { + opts::cg::checkArraystore ? EnableCheckArrayStore() : DisableCheckArrayStore(); + } + + if (opts::cg::ebo.IsEnabledByUser()) { + opts::cg::ebo ? EnableEBO() : DisableEBO(); + } + + if (opts::cg::cfgo.IsEnabledByUser()) { + opts::cg::cfgo ? EnableCFGO() : DisableCFGO(); + } + + if (opts::cg::ico.IsEnabledByUser()) { + opts::cg::ico ? EnableICO() : DisableICO(); + } + + if (opts::cg::storeloadopt.IsEnabledByUser()) { + opts::cg::storeloadopt ? EnableStoreLoadOpt() : DisableStoreLoadOpt(); + } + + if (opts::cg::globalopt.IsEnabledByUser()) { + opts::cg::globalopt ? EnableGlobalOpt() : DisableGlobalOpt(); + } + + // only on master + if (opts::cg::hotcoldsplit.IsEnabledByUser()) { + opts::cg::hotcoldsplit ? EnableHotColdSplit() : DisableHotColdSplit(); + } + + if (opts::cg::prelsra.IsEnabledByUser()) { + opts::cg::prelsra ? EnablePreLSRAOpt() : DisablePreLSRAOpt(); + } + + if (opts::cg::lsraLvarspill.IsEnabledByUser()) { + opts::cg::lsraLvarspill ? EnableLocalRefSpill() : DisableLocalRefSpill(); + } + + if (opts::cg::lsraOptcallee.IsEnabledByUser()) { + opts::cg::lsraOptcallee ? EnableCalleeToSpill() : DisableCalleeToSpill(); + } + + if (opts::cg::prepeep.IsEnabledByUser()) { + opts::cg::prepeep ? EnablePrePeephole() : DisablePrePeephole(); + } + + if (opts::cg::peep.IsEnabledByUser()) { + opts::cg::peep ? EnablePeephole() : DisablePeephole(); + } + + if (opts::cg::retMerge.IsEnabledByUser()) { + opts::cg::retMerge ? EnableRetMerge() : DisableRetMerge(); + } + + if (opts::cg::preschedule.IsEnabledByUser()) { + opts::cg::preschedule ? EnablePreSchedule() : DisablePreSchedule(); + } + + if (opts::cg::schedule.IsEnabledByUser()) { + opts::cg::schedule ? EnableSchedule() : DisableSchedule(); + } + + if (opts::cg::vregRename.IsEnabledByUser()) { + opts::cg::vregRename ? EnableVregRename() : DisableVregRename(); + } + + if (opts::cg::fullcolor.IsEnabledByUser()) { + opts::cg::fullcolor ? EnableMultiPassColorRA() : DisableMultiPassColorRA(); + } + + if (opts::cg::writefieldopt.IsEnabledByUser()) { + opts::cg::writefieldopt ? EnableWriteRefFieldOpt() : DisableWriteRefFieldOpt(); + } + + if (opts::cg::dumpOlog.IsEnabledByUser()) { + opts::cg::dumpOlog ? EnableDumpOptimizeCommonLog() : DisableDumpOptimizeCommonLog(); + } + + if (opts::cg::nativeopt.IsEnabledByUser()) { + // FIXME: Disabling Looks strage: should be checked by author of the code + DisableNativeOpt(); + } + + if (opts::cg::dupBb.IsEnabledByUser()) { + opts::cg::dupBb ? DisableNoDupBB() : EnableNoDupBB(); + } + + if (opts::cg::calleeCfi.IsEnabledByUser()) { + opts::cg::calleeCfi ? DisableNoCalleeCFI() : EnableNoCalleeCFI(); + } + + if (opts::cg::proepilogue.IsEnabledByUser()) { + opts::cg::proepilogue ? SetOption(CGOptions::kProEpilogueOpt) + : ClearOption(CGOptions::kProEpilogueOpt); + } + + if (opts::cg::tailcall.IsEnabledByUser()) { + opts::cg::tailcall ? SetOption(CGOptions::kTailCallOpt) + : ClearOption(CGOptions::kTailCallOpt); + } + + if (opts::cg::calleeregsPlacement.IsEnabledByUser()) { + opts::cg::calleeregsPlacement ? EnableRegSavesOpt() : DisableRegSavesOpt(); + } + + if (opts::cg::ssapreSave.IsEnabledByUser()) { + opts::cg::ssapreSave ? EnableSsaPreSave() : DisableSsaPreSave(); + } + + if (opts::cg::ssupreRestore.IsEnabledByUser()) { + opts::cg::ssupreRestore ? EnableSsuPreRestore() : DisableSsuPreRestore(); + } + + if (opts::cg::lsraBb.IsEnabledByUser()) { + SetLSRABBOptSize(opts::cg::lsraBb); + } + + if (opts::cg::lsraInsn.IsEnabledByUser()) { + SetLSRAInsnOptSize(opts::cg::lsraInsn); + } + + if (opts::cg::lsraOverlap.IsEnabledByUser()) { + SetOverlapNum(opts::cg::lsraOverlap); + } + + if (opts::cg::remat.IsEnabledByUser()) { + SetRematLevel(opts::cg::remat); + } + + if (opts::cg::dumpPhases.IsEnabledByUser()) { + SplitPhases(opts::cg::dumpPhases, GetDumpPhases()); + } + + if (opts::cg::target.IsEnabledByUser()) { + SetTargetMachine(opts::cg::target); + } + + if (opts::cg::skipPhases.IsEnabledByUser()) { + SplitPhases(opts::cg::skipPhases, GetSkipPhases()); + } + + if (opts::cg::skipFrom.IsEnabledByUser()) { + SetSkipFrom(opts::cg::skipFrom); + } + + if (opts::cg::skipAfter.IsEnabledByUser()) { + SetSkipAfter(opts::cg::skipAfter); + } + + if (opts::cg::debugSchedule.IsEnabledByUser()) { + opts::cg::debugSchedule ? EnableDebugSched() : DisableDebugSched(); + } + + if (opts::cg::bruteforceSchedule.IsEnabledByUser()) { + opts::cg::bruteforceSchedule ? EnableDruteForceSched() : DisableDruteForceSched(); + } + + if (opts::cg::simulateSchedule.IsEnabledByUser()) { + opts::cg::simulateSchedule ? EnableSimulateSched() : DisableSimulateSched(); + } + + if (opts::profile.IsEnabledByUser()) { + SetProfileData(opts::profile); + } + + if (opts::cg::unwindTables.IsEnabledByUser()) { + SetOption(kUseUnwindTables); + } + + if (opts::cg::nativeopt.IsEnabledByUser()) { + DisableNativeOpt(); + } + + if (opts::cg::floatAbi.IsEnabledByUser()) { + SetABIType(opts::cg::floatAbi); + } + + if (opts::cg::filetype.IsEnabledByUser()) { + SetEmitFileType(opts::cg::filetype); + } + + if (opts::cg::longCalls.IsEnabledByUser()) { + opts::cg::longCalls ? EnableLongCalls() : DisableLongCalls(); + } + + if (opts::cg::functionSections.IsEnabledByUser()) { + opts::cg::functionSections ? EnableFunctionSections() : DisableFunctionSections(); + } + + if (opts::cg::omitFramePointer.IsEnabledByUser()) { + opts::cg::omitFramePointer ? DisableFramePointer() : EnableFramePointer(); + } + + if (opts::gcOnly.IsEnabledByUser()) { + opts::gcOnly ? EnableGCOnly() : DisableGCOnly(); + } + + if (opts::cg::fastMath.IsEnabledByUser()) { + opts::cg::fastMath ? EnableFastMath() : DisableFastMath(); + } + + if (opts::cg::alignAnalysis.IsEnabledByUser()) { + opts::cg::alignAnalysis ? EnableAlignAnalysis() : DisableAlignAnalysis(); + } + + if (opts::cg::condbrAlign.IsEnabledByUser()) { + opts::cg::condbrAlign ? EnableCondBrAlign() : DisableCondBrAlign(); + } + + /* big endian can be set with several options: --target, -Be. + * Triple takes to account all these options and allows to detect big endian with IsBigEndian() interface */ + Triple::GetTriple().IsBigEndian() ? EnableBigEndianInCG() : DisableBigEndianInCG(); + (maple::Triple::GetTriple().GetEnvironment() == Triple::GNUILP32) ? EnableArm64ilp32() : DisableArm64ilp32(); + + if (opts::cg::cgSsa.IsEnabledByUser()) { + opts::cg::cgSsa ? EnableCGSSA() : DisableCGSSA(); + } + + if (opts::cg::common.IsEnabledByUser()) { + opts::cg::common ? EnableCommon() : DisableCommon(); + } + + if (opts::cg::alignMinBbSize.IsEnabledByUser()) { + SetAlignMinBBSize(opts::cg::alignMinBbSize); + } + + if (opts::cg::alignMaxBbSize.IsEnabledByUser()) { + SetAlignMaxBBSize(opts::cg::alignMaxBbSize); + } + + if (opts::cg::loopAlignPow.IsEnabledByUser()) { + SetLoopAlignPow(opts::cg::loopAlignPow); + } + + if (opts::cg::jumpAlignPow.IsEnabledByUser()) { + SetJumpAlignPow(opts::cg::jumpAlignPow); + } + + if (opts::cg::funcAlignPow.IsEnabledByUser()) { + SetFuncAlignPow(opts::cg::funcAlignPow); + } + + if (opts::cg::litePgoGen.IsEnabledByUser()) { + opts::cg::litePgoGen ? EnableLiteProfGen() : DisableLiteProfGen(); + } + + if (opts::cg::litePgoOutputFunc.IsEnabledByUser()) { + EnableLiteProfGen(); + if (!opts::cg::litePgoOutputFunc.GetValue().empty()) { + SetLitePgoOutputFunction(opts::cg::litePgoOutputFunc); + } + } + + if (opts::cg::instrumentationFile.IsEnabledByUser()) { + SetInstrumentationWhiteList(opts::cg::instrumentationFile); + if (!opts::cg::instrumentationFile.GetValue().empty()) { + EnableLiteProfGen(); + } + } + + if (opts::cg::litePgoFile.IsEnabledByUser()) { + SetLiteProfile(opts::cg::litePgoFile); + if (!opts::cg::litePgoFile.GetValue().empty()) { + EnableLiteProfUse(); + } + } + + /* override some options when loc, dwarf is generated */ + if (WithLoc()) { + DisableSchedule(); + SetOption(kWithSrc); + } + if (WithDwarf()) { + DisableEBO(); + DisableCFGO(); + DisableICO(); + DisableSchedule(); + SetOption(kDebugFriendly); + SetOption(kWithSrc); + SetOption(kWithLoc); + ClearOption(kSuppressFileInfo); + } + + return true; +} + +void CGOptions::ParseExclusiveFunc(const std::string &fileName) { + std::ifstream file(fileName); + if (!file.is_open()) { + ERR(kLncErr, "%s open failed!", fileName.c_str()); + return; + } + std::string content; + while (file >> content) { + ehExclusiveFunctionName.push_back(content); + } +} + +void CGOptions::ParseCyclePattern(const std::string &fileName) const { + std::ifstream file(fileName); + if (!file.is_open()) { + ERR(kLncErr, "%s open failed!", fileName.c_str()); + return; + } + std::string content; + std::string classStr("class: "); + while (getline(file, content)) { + if (content.compare(0, classStr.length(), classStr) == 0) { + std::vector classPatternContent; + std::string patternContent; + while (getline(file, patternContent)) { + if (patternContent.length() == 0) { + break; + } + classPatternContent.push_back(patternContent); + } + std::string className = content.substr(classStr.length()); + CGOptions::cyclePatternMap[className] = move(classPatternContent); + } + } +} + +void CGOptions::SetRange(const std::string &str, const std::string &cmd, Range &subRange) const { + const std::string &tmpStr = str; + size_t comma = tmpStr.find_first_of(",", 0); + subRange.enable = true; + + if (comma != std::string::npos) { + subRange.begin = std::stoul(tmpStr.substr(0, comma), nullptr); + subRange.end = std::stoul(tmpStr.substr(comma + 1, std::string::npos - (comma + 1)), nullptr); + } + CHECK_FATAL(range.begin < range.end, "invalid values for %s=%lu,%lu", cmd.c_str(), subRange.begin, subRange.end); +} + +/* Set default options according to different languages. */ +void CGOptions::SetDefaultOptions(const maple::MIRModule &mod) { + if (mod.IsJavaModule()) { + generateFlag = generateFlag | kGenYieldPoint | kGenLocalRc | kGrootList | kPrimorList; + } + if (mod.GetFlavor() == MIRFlavor::kFlavorLmbc) { + EnableFlavorLmbc(); + } + insertYieldPoint = GenYieldPoint(); +} + +void CGOptions::EnableO0() { + optimizeLevel = kLevel0; + doEBO = false; + doCGSSA = false; + doCFGO = false; + doICO = false; + doPrePeephole = false; + doPeephole = false; + doStoreLoadOpt = false; + doGlobalOpt = false; + doPreLSRAOpt = false; + doLocalRefSpill = false; + doCalleeToSpill = false; + doPreSchedule = false; + doSchedule = false; + doRegSavesOpt = false; + useSsaPreSave = false; + useSsuPreRestore = false; + doWriteRefFieldOpt = false; + doAlignAnalysis = false; + doCondBrAlign = false; + SetOption(kUseUnwindTables); + if (maple::Triple::GetTriple().GetEnvironment() == Triple::GNUILP32) { + ClearOption(kUseStackProtectorStrong); + } else { + SetOption(kUseStackProtectorStrong); + } + ClearOption(kUseStackProtectorAll); + ClearOption(kConstFold); + ClearOption(kProEpilogueOpt); + ClearOption(kTailCallOpt); +} + +void CGOptions::EnableO1() { + optimizeLevel = kLevel1; + doPreLSRAOpt = true; + doCalleeToSpill = true; + SetOption(kConstFold); + SetOption(kProEpilogueOpt); + SetOption(kTailCallOpt); + SetOption(kUseUnwindTables); + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); +} + +void CGOptions::EnableO2() { + optimizeLevel = kLevel2; + doEBO = true; + doCGSSA = true; + doCFGO = true; + doICO = true; + doPrePeephole = true; + doPeephole = true; + doStoreLoadOpt = true; + doGlobalOpt = true; + doPreSchedule = true; + doSchedule = true; + doAlignAnalysis = true; + doCondBrAlign = true; + doRetMerge = true; + SetOption(kConstFold); + SetOption(kUseUnwindTables); + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); +#if TARGARM32 + doPreLSRAOpt = false; + doLocalRefSpill = false; + doCalleeToSpill = false; + doWriteRefFieldOpt = false; + ClearOption(kProEpilogueOpt); + ClearOption(kTailCallOpt); +#else + doPreLSRAOpt = true; + doLocalRefSpill = true; + doCalleeToSpill = true; + doRegSavesOpt = true; + useSsaPreSave = true; + useSsuPreRestore = true; + doWriteRefFieldOpt = true; + SetOption(kProEpilogueOpt); + SetOption(kTailCallOpt); +#endif +} + +void CGOptions::EnableLiteCG() { + optimizeLevel = kLevelLiteCG; + doEBO = false; + doCGSSA = false; + doCFGO = false; + doICO = false; + doPrePeephole = false; + doPeephole = false; + doStoreLoadOpt = false; + doGlobalOpt = false; + doPreLSRAOpt = false; + doLocalRefSpill = false; + doCalleeToSpill = false; + doPreSchedule = false; + doSchedule = false; + doRegSavesOpt = false; + useSsaPreSave = false; + useSsuPreRestore = false; + doWriteRefFieldOpt = false; + doAlignAnalysis = false; + doCondBrAlign = false; + + ClearOption(kUseStackProtectorStrong); + ClearOption(kUseStackProtectorAll); + ClearOption(kConstFold); + ClearOption(kProEpilogueOpt); + ClearOption(kTailCallOpt); +} + +void CGOptions::SetTargetMachine(const std::string &str) { + if (str == "aarch64") { + targetArch = "aarch64"; + } else if (str == "x86_64") { + targetArch = "x86_64"; + } + CHECK_FATAL(false, "unknown target. not implement yet"); +} + +void CGOptions::SplitPhases(const std::string &str, std::unordered_set &set) const { + const std::string& tmpStr{ str }; + if ((tmpStr.compare("*") == 0) || (tmpStr.compare("cgir") == 0)) { + (void)set.insert(tmpStr); + return; + } + StringUtils::Split(tmpStr, set, ','); +} + +bool CGOptions::DumpPhase(const std::string &phase) { + return (IS_STR_IN_SET(dumpPhases, "*") || IS_STR_IN_SET(dumpPhases, "cgir") || IS_STR_IN_SET(dumpPhases, phase)); +} + +/* match sub std::string of function name */ +bool CGOptions::FuncFilter(const std::string &name) { + return dumpFunc == "*" || dumpFunc == name; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cg_options.cpp b/src/mapleall/maple_be/src/cg/cg_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..635bb79df8ce3ad1e76ffcfa5d80fa9248c7e75e --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_options.cpp @@ -0,0 +1,618 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "driver_options.h" + +#include +#include +#include + +namespace opts::cg { + +maplecl::Option pie({"-fPIE", "--pie", "-pie"}, + " --pie \tGenerate position-independent executable\n" + " --no-pie\n", + {cgCategory, driverCategory, ldCategory}, + maplecl::DisableWith("--no-pie")); + +maplecl::Option fpic({"-fPIC", "--fpic", "-fpic"}, + " --fpic \tGenerate position-independent shared library\n" + " --no-fpic\n", + {cgCategory, driverCategory, ldCategory}, + maplecl::DisableWith("--no-fpic")); + +maplecl::Option verboseAsm({"--verbose-asm"}, + " --verbose-asm \tAdd comments to asm output\n" + " --no-verbose-asm\n", + {cgCategory}, + maplecl::DisableWith("--no-verbose-asm")); + +maplecl::Option verboseCg({"--verbose-cg"}, + " --verbose-cg \tAdd comments to cg output\n" + " --no-verbose-cg\n", + {cgCategory}, + maplecl::DisableWith("--no-verbose-cg")); + +maplecl::Option maplelinker({"--maplelinker"}, + " --maplelinker \tGenerate the MapleLinker .s format\n" + " --no-maplelinker\n", + {cgCategory}, + maplecl::DisableWith("--no-maplelinker")); + +maplecl::Option quiet({"--quiet"}, + " --quiet \tBe quiet (don't output debug messages)\n" + " --no-quiet\n", + {cgCategory}, + maplecl::DisableWith("--no-quiet")); + +maplecl::Option cg({"--cg"}, + " --cg \tGenerate the output .s file\n" + " --no-cg\n", + {cgCategory}, + maplecl::DisableWith("--no-cg")); + +maplecl::Option replaceAsm({"--replaceasm"}, + " --replaceasm \tReplace the the assembly code\n" + " --no-replaceasm\n", + {cgCategory}, + maplecl::DisableWith("--no-replaceasm")); + +maplecl::Option generalRegOnly({"--general-reg-only"}, + " --general-reg-only \tdisable floating-point or Advanced SIMD registers\n" + " --no-general-reg-only\n", + {cgCategory}, + maplecl::DisableWith("--no-general-reg-only")); + +maplecl::Option lazyBinding({"--lazy-binding"}, + " --lazy-binding \tBind class symbols lazily[default off]\n", + {cgCategory}, + maplecl::DisableWith("--no-lazy-binding")); + +maplecl::Option hotFix({"--hot-fix"}, + " --hot-fix \tOpen for App hot fix[default off]\n" + " --no-hot-fix\n", + {cgCategory}, + maplecl::DisableWith("--no-hot-fix")); + +maplecl::Option ebo({"--ebo"}, + " --ebo \tPerform Extend block optimization\n" + " --no-ebo\n", + {cgCategory}, + maplecl::DisableWith("--no-ebo")); + +maplecl::Option cfgo({"--cfgo"}, + " --cfgo \tPerform control flow optimization\n" + " --no-cfgo\n", + {cgCategory}, + maplecl::DisableWith("--no-cfgo")); + +maplecl::Option ico({"--ico"}, + " --ico \tPerform if-conversion optimization\n" + " --no-ico\n", + {cgCategory}, + maplecl::DisableWith("--no-ico")); + +maplecl::Option storeloadopt({"--storeloadopt"}, + " --storeloadopt \tPerform global store-load optimization\n" + " --no-storeloadopt\n", + {cgCategory}, + maplecl::DisableWith("--no-storeloadopt")); + +maplecl::Option globalopt({"--globalopt"}, + " --globalopt \tPerform global optimization\n" + " --no-globalopt\n", + {cgCategory}, + maplecl::DisableWith("--no-globalopt")); + +maplecl::Option hotcoldsplit({"--hotcoldsplit"}, + " --hotcoldsplit \tPerform HotColdSplit optimization\n" + " --no-hotcoldsplit\n", + {cgCategory}, + maplecl::DisableWith("--no-hotcoldsplit")); + +maplecl::Option prelsra({"--prelsra"}, + " --prelsra \tPerform live interval simplification in LSRA\n" + " --no-prelsra\n", + {cgCategory}, + maplecl::DisableWith("--no-prelsra")); + +maplecl::Option lsraLvarspill({"--lsra-lvarspill"}, + " --lsra-lvarspill" + " \tPerform LSRA spill using local ref var stack locations\n" + " --no-lsra-lvarspill\n", + {cgCategory}, + maplecl::DisableWith("--no-lsra-lvarspill")); + +maplecl::Option lsraOptcallee({"--lsra-optcallee"}, + " --lsra-optcallee \tSpill callee if only one def to use\n" + " --no-lsra-optcallee\n", + {cgCategory}, + maplecl::DisableWith("--no-lsra-optcallee")); + +maplecl::Option calleeregsPlacement({"--calleeregs-placement"}, + " --calleeregs-placement \tOptimize placement of callee-save registers\n" + " --no-calleeregs-placement\n", + {cgCategory}, + maplecl::DisableWith("--no-calleeregs-placement")); + +maplecl::Option ssapreSave({"--ssapre-save"}, + " --ssapre-save \tUse ssapre algorithm to save callee-save registers\n" + " --no-ssapre-save\n", + {cgCategory}, + maplecl::DisableWith("--no-ssapre-save")); + +maplecl::Option ssupreRestore({"--ssupre-restore"}, + " --ssupre-restore" + " \tUse ssupre algorithm to restore callee-save registers\n" + " --no-ssupre-restore\n", + {cgCategory}, + maplecl::DisableWith("--no-ssupre-restore")); + +maplecl::Option prepeep({"--prepeep"}, + " --prepeep \tPerform peephole optimization before RA\n" + " --no-prepeep\n", + {cgCategory}, + maplecl::DisableWith("--no-prepeep")); + +maplecl::Option peep({"--peep"}, + " --peep \tPerform peephole optimization after RA\n" + " --no-peep\n", + {cgCategory}, + maplecl::DisableWith("--no-peep")); + +maplecl::Option preschedule({"--preschedule"}, + " --preschedule \tPerform prescheduling\n" + " --no-preschedule\n", + {cgCategory}, + maplecl::DisableWith("--no-preschedule")); + +maplecl::Option schedule({"--schedule"}, + " --schedule \tPerform scheduling\n" + " --no-schedule\n", + {cgCategory}, + maplecl::DisableWith("--no-schedule")); + +maplecl::Option retMerge({"--ret-merge"}, + " --ret-merge \tMerge return bb into a single destination\n" + " --no-ret-merge \tallows for multiple return bb\n", + {cgCategory}, + maplecl::DisableWith("--no-ret-merge")); + +maplecl::Option vregRename({"--vreg-rename"}, + " --vreg-rename" + " \tPerform rename of long live range around loops in coloring RA\n" + " --no-vreg-rename\n", + {cgCategory}, + maplecl::DisableWith("--no-vreg-rename")); + +maplecl::Option fullcolor({"--fullcolor"}, + " --fullcolor \tPerform multi-pass coloring RA\n" + " --no-fullcolor\n", + {cgCategory}, + maplecl::DisableWith("--no-fullcolor")); + +maplecl::Option writefieldopt({"--writefieldopt"}, + " --writefieldopt \tPerform WriteRefFieldOpt\n" + " --no-writefieldopt\n", + {cgCategory}, + maplecl::DisableWith("--no-writefieldopt")); + +maplecl::Option dumpOlog({"--dump-olog"}, + " --dump-olog \tDump CFGO and ICO debug information\n" + " --no-dump-olog\n", + {cgCategory}, + maplecl::DisableWith("--no-dump-olog")); + +maplecl::Option nativeopt({"--nativeopt"}, + " --nativeopt \tEnable native opt\n" + " --no-nativeopt\n", + {cgCategory}, + maplecl::DisableWith("--no-nativeopt")); + +maplecl::Option objmap({"--objmap"}, + " --objmap" + " \tCreate object maps (GCTIBs) inside the main output (.s) file\n" + " --no-objmap\n", + {cgCategory}, + maplecl::DisableWith("--no-objmap")); + +maplecl::Option yieldpoint({"--yieldpoint"}, + " --yieldpoint \tGenerate yieldpoints [default]\n" + " --no-yieldpoint\n", + {cgCategory}, + maplecl::DisableWith("--no-yieldpoint")); + +maplecl::Option proepilogue({"--proepilogue"}, + " --proepilogue \tDo tail call optimization and" + " eliminate unnecessary prologue and epilogue.\n" + " --no-proepilogue\n", + {cgCategory}, + maplecl::DisableWith("--no-proepilogue")); + +maplecl::Option localRc({"--local-rc"}, + " --local-rc \tHandle Local Stack RC [default]\n" + " --no-local-rc\n", + {cgCategory}, + maplecl::DisableWith("--no-local-rc")); + +maplecl::Option addDebugTrace({"--add-debug-trace"}, + " --add-debug-trace" + " \tInstrumentation the output .s file to print call traces at runtime\n", + {cgCategory}); + + +maplecl::Option classListFile({"--class-list-file"}, + " --class-list-file" + " \tSet the class list file for the following generation options,\n" + " \tif not given, " + "generate for all visible classes\n" + " \t--class-list-file=class_list_file\n", + {cgCategory}); + +maplecl::Option genCMacroDef({"--gen-c-macro-def"}, + " --gen-c-macro-def" + " \tGenerate a .def file that contains extra type metadata, including the\n" + " \tclass instance sizes and field offsets (default)\n" + " --no-gen-c-macro-def\n", + {cgCategory}, + maplecl::DisableWith("--no-gen-c-macro-def")); + +maplecl::Option genGctibFile({"--gen-gctib-file"}, + " --gen-gctib-file" + " \tGenerate a separate .s file for GCTIBs. Usually used together with\n" + " \t--no-objmap (not implemented yet)\n" + " --no-gen-gctib-file\n", + {cgCategory}, + maplecl::DisableWith("--no-gen-gctib-file")); + +maplecl::Option unwindTables({"--unwind-tables"}, + " --unwind-tables \tgenerate unwind tables for function \n" + " --no-unwind-tables\n", + {cgCategory}, + maplecl::DisableWith("--no-unwind-tables")); + +maplecl::Option debug({"-g", "--g"}, + " -g \tGenerate debug information\n", + {cgCategory}); + +maplecl::Option gdwarf({"--gdwarf"}, + " --gdwarf \tGenerate dwarf infomation\n", + {cgCategory}); + +maplecl::Option gsrc({"--gsrc"}, + " --gsrc \tUse original source file instead of mpl file for debugging\n", + {cgCategory}); + +maplecl::Option gmixedsrc({"--gmixedsrc"}, + " --gmixedsrc" + " \tUse both original source file and mpl file for debugging\n", + {cgCategory}); + +maplecl::Option gmixedasm({"--gmixedasm"}, + " --gmixedasm" + " \tComment out both original source file and mpl file for debugging\n", + {cgCategory}); + +maplecl::Option profile({"--p", "-p"}, + " -p \tGenerate profiling infomation\n", + {cgCategory}); + +maplecl::Option withRaLinearScan({"--with-ra-linear-scan"}, + " --with-ra-linear-scan \tDo linear-scan register allocation\n", + {cgCategory}); + +maplecl::Option withRaGraphColor({"--with-ra-graph-color"}, + " --with-ra-graph-color \tDo coloring-based register allocation\n", + {cgCategory}); + +maplecl::Option patchLongBranch({"--patch-long-branch"}, + " --patch-long-branch" + " \tEnable patching long distance branch with jumping pad\n", + {cgCategory}); + +maplecl::Option constFold({"--const-fold"}, + " --const-fold \tEnable constant folding\n" + " --no-const-fold\n", + {cgCategory}, + maplecl::DisableWith("--no-const-fold")); + +maplecl::Option ehExclusiveList({"--eh-exclusive-list"}, + " --eh-exclusive-list \tFor generating gold files in unit testing\n" + " \t--eh-exclusive-list=list_file\n", + {cgCategory}); + +maplecl::Option o0({"-O0", "--O0"}, + " -O0 \tNo optimization.\n", + {cgCategory}); + +maplecl::Option o1({"-O1", "--O1"}, + " -O1 \tDo some optimization.\n", + {cgCategory}); + +maplecl::Option o2({"-O2", "--O2"}, + " -O2 \tDo some optimization.\n", + {cgCategory}); + +maplecl::Option os({"-Os", "--Os"}, + " -Os \tOptimize for size, based on O2.\n", + {cgCategory}); + +maplecl::Option olitecg({"-Olitecg", "--Olitecg"}, + " -Olitecg \tOptimize for litecg.\n", + {cgCategory}); + +maplecl::Option lsraBb({"--lsra-bb"}, + " --lsra-bb=NUM" + " \tSwitch to spill mode if number of bb in function exceeds NUM\n", + {cgCategory}); + +maplecl::Option lsraInsn({"--lsra-insn"}, + " --lsra-insn=NUM" + " \tSwitch to spill mode if number of instructons in function exceeds NUM\n", + {cgCategory}); + +maplecl::Option lsraOverlap({"--lsra-overlap"}, + " --lsra-overlap=NUM \toverlap NUM to decide pre spill in lsra\n", + {cgCategory}); + +maplecl::Option remat({"--remat"}, + " --remat \tEnable rematerialization during register allocation\n" + " \t 0: no rematerialization (default)\n" + " \t >= 1: rematerialize constants\n" + " \t >= 2: rematerialize addresses\n" + " \t >= 3: rematerialize local dreads\n" + " \t >= 4: rematerialize global dreads\n", + {cgCategory}); + +maplecl::Option suppressFileinfo({"--suppress-fileinfo"}, + " --suppress-fileinfo \tFor generating gold files in unit testing\n", + {cgCategory}); + +maplecl::Option dumpCfg({"--dump-cfg"}, + " --dump-cfg\n", + {cgCategory}); + +maplecl::Option target({"--target"}, + " --target=TARGETMACHINE \t generate code for TARGETMACHINE\n", + {cgCategory}, + maplecl::optionalValue); + +maplecl::Option dumpPhases({"--dump-phases"}, + " --dump-phases=PHASENAME,..." + " \tEnable debug trace for specified phases in the comma separated list\n", + {cgCategory}); + +maplecl::Option skipPhases({"--skip-phases"}, + " --skip-phases=PHASENAME,..." + " \tSkip the phases specified in the comma separated list\n", + {cgCategory}); + +maplecl::Option skipFrom({"--skip-from"}, + " --skip-from=PHASENAME \tSkip the rest phases from PHASENAME(included)\n", + {cgCategory}); + +maplecl::Option skipAfter({"--skip-after"}, + " --skip-after=PHASENAME \tSkip the rest phases after PHASENAME(excluded)\n", + {cgCategory}); + +maplecl::Option dumpFunc({"--dump-func"}, + " --dump-func=FUNCNAME" + " \tDump/trace only for functions whose names contain FUNCNAME as substring\n" + " \t(can only specify once)\n", + {cgCategory}); + +maplecl::Option timePhases({"--time-phases"}, + " --time-phases \tCollect compilation time stats for each phase\n" + " --no-time-phases \tDon't Collect compilation time stats for each phase\n", + {cgCategory}, + maplecl::DisableWith("--no-time-phases")); + +maplecl::Option useBarriersForVolatile({"--use-barriers-for-volatile"}, + " --use-barriers-for-volatile \tOptimize volatile load/str\n" + " --no-use-barriers-for-volatile\n", + {cgCategory}, + maplecl::DisableWith("--no-use-barriers-for-volatile")); + +maplecl::Option range({"--range"}, + " --range=NUM0,NUM1 \tOptimize only functions in the range [NUM0, NUM1]\n", + {cgCategory}); + +maplecl::Option fastAlloc({"--fast-alloc"}, + " --fast-alloc=[0/1] \tO2 RA fast mode, set to 1 to spill all registers\n", + {cgCategory}); + +maplecl::Option spillRange({"--spill_range"}, + " --spill_range=NUM0,NUM1 \tO2 RA spill registers in the range [NUM0, NUM1]\n", + {cgCategory}); + +maplecl::Option dupBb({"--dup-bb"}, + " --dup-bb \tAllow cfg optimizer to duplicate bb\n" + " --no-dup-bb \tDon't allow cfg optimizer to duplicate bb\n", + {cgCategory}, + maplecl::DisableWith("--no-dup-bb")); + +maplecl::Option calleeCfi({"--callee-cfi"}, + " --callee-cfi \tcallee cfi message will be generated\n" + " --no-callee-cfi \tcallee cfi message will not be generated\n", + {cgCategory}, + maplecl::DisableWith("--no-callee-cfi")); + +maplecl::Option printFunc({"--print-func"}, + " --print-func\n" + " --no-print-func\n", + {cgCategory}, + maplecl::DisableWith("--no-print-func")); + +maplecl::Option cyclePatternList({"--cycle-pattern-list"}, + " --cycle-pattern-list \tFor generating cycle pattern meta\n" + " \t--cycle-pattern-list=list_file\n", + {cgCategory}); + +maplecl::Option duplicateAsmList({"--duplicate_asm_list"}, + " --duplicate_asm_list \tDuplicate asm functions to delete plt call\n" + " \t--duplicate_asm_list=list_file\n", + {cgCategory}); + +maplecl::Option duplicateAsmList2({"--duplicate_asm_list2"}, + " --duplicate_asm_list2" + " \tDuplicate more asm functions to delete plt call\n" + " \t--duplicate_asm_list2=list_file\n", + {cgCategory}); + +maplecl::Option blockMarker({"--block-marker"}, + " --block-marker" + " \tEmit block marker symbols in emitted assembly files\n", + {cgCategory}); + +maplecl::Option soeCheck({"--soe-check"}, + " --soe-check \tInsert a soe check instruction[default off]\n", + {cgCategory}); + +maplecl::Option checkArraystore({"--check-arraystore"}, + " --check-arraystore \tcheck arraystore exception[default off]\n" + " --no-check-arraystore\n", + {cgCategory}, + maplecl::DisableWith("--no-check-arraystore")); + +maplecl::Option debugSchedule({"--debug-schedule"}, + " --debug-schedule \tdump scheduling information\n" + " --no-debug-schedule\n", + {cgCategory}, + maplecl::DisableWith("--no-debug-schedule")); + +maplecl::Option bruteforceSchedule({"--bruteforce-schedule"}, + " --bruteforce-schedule \tdo brute force schedule\n" + " --no-bruteforce-schedule\n", + {cgCategory}, + maplecl::DisableWith("--no-bruteforce-schedule")); + +maplecl::Option simulateSchedule({"--simulate-schedule"}, + " --simulate-schedule \tdo simulate schedule\n" + " --no-simulate-schedule\n", + {cgCategory}, + maplecl::DisableWith("--no-simulate-schedule")); + +maplecl::Option crossLoc({"--cross-loc"}, + " --cross-loc \tcross loc insn schedule\n" + " --no-cross-loc\n", + {cgCategory}, + maplecl::DisableWith("--no-cross-loc")); + +maplecl::Option floatAbi({"--float-abi"}, + " --float-abi=name \tPrint the abi type.\n" + " \tname=hard: abi-hard (Default)\n" + " \tname=soft: abi-soft\n" + " \tname=softfp: abi-softfp\n", + {cgCategory}); + +maplecl::Option filetype({"--filetype"}, + " --filetype=name \tChoose a file type.\n" + " \tname=asm: Emit an assembly file (Default)\n" + " \tname=obj: Emit an object file\n" + " \tname=null: not support yet\n", + {cgCategory}); + +maplecl::Option longCalls({"--long-calls"}, + " --long-calls \tgenerate long call\n" + " --no-long-calls\n", + {cgCategory}, + maplecl::DisableWith("--no-long-calls")); + +maplecl::Option functionSections({"--function-sections"}, + " --function-sections \t \n" + " --no-function-sections\n", + {cgCategory}, + maplecl::DisableWith("--no-function-sections")); + +maplecl::Option omitFramePointer({"--omit-frame-pointer", "-fomit-frame-pointer"}, + " --omit-frame-pointer \t do not use frame pointer \n" + " --no-omit-frame-pointer\n", + {cgCategory, driverCategory}, + maplecl::DisableEvery({"--no-omit-frame-pointer", "-fno-omit-frame-pointer"})); + +maplecl::Option fastMath({"--fast-math"}, + " --fast-math \tPerform fast math\n" + " --no-fast-math\n", + {cgCategory}, + maplecl::DisableWith("--no-fast-math")); + +maplecl::Option tailcall({"--tailcall"}, + " --tailcall \tDo tail call optimization\n" + " --no-tailcall\n", + {cgCategory}, + maplecl::DisableWith("--no-tailcall")); + +maplecl::Option alignAnalysis({"--align-analysis"}, + " --align-analysis \tPerform alignanalysis\n" + " --no-align-analysis\n", + {cgCategory}, + maplecl::DisableWith("--no-align-analysis")); + +maplecl::Option cgSsa({"--cg-ssa"}, + " --cg-ssa \tPerform cg ssa\n" + " --no-cg-ssa\n", + {cgCategory}, + maplecl::DisableWith("--no-cg-ssa")); + +maplecl::Option common({"--common", "-fcommon"}, + " --common \t \n" + " --no-common\n", + {cgCategory, driverCategory}, + maplecl::DisableEvery({"--no-common", "-fno-common"})); + +maplecl::Option condbrAlign({"--condbr-align"}, + " --condbr-align \tPerform condbr align\n" + " --no-condbr-align\n", + {cgCategory}, + maplecl::DisableWith("--no-condbr-align")); + +maplecl::Option alignMinBbSize({"--align-min-bb-size"}, + " --align-min-bb-size=NUM" + " \tO2 Minimum bb size for alignment unit:byte\n", + {cgCategory}); + +maplecl::Option alignMaxBbSize({"--align-max-bb-size"}, + " --align-max-bb-size=NUM" + " \tO2 Maximum bb size for alignment unit:byte\n", + {cgCategory}); + +maplecl::Option loopAlignPow({"--loop-align-pow"}, + " --loop-align-pow=NUM \tO2 loop bb align pow (NUM == 0, no loop-align)\n", + {cgCategory}); + +maplecl::Option jumpAlignPow({"--jump-align-pow"}, + " --jump-align-pow=NUM \tO2 jump bb align pow (NUM == 0, no jump-align)\n", + {cgCategory}); + +maplecl::Option funcAlignPow({"--func-align-pow"}, + " --func-align-pow=NUM \tO2 func bb align pow (NUM == 0, no func-align)\n", + {cgCategory}); +maplecl::Option litePgoGen({"--lite-pgo-gen"}, + " --lite-pgo-gen \tinstrumentation CG bb and generate bb-cnt info\n" + " --no-lite-pgo-gen\n", + {cgCategory}, + maplecl::DisableWith("--no-lite-pgo-gen")); +maplecl::Option instrumentationFile ({"--instrumentation-file"}, + "--instrumentation-file=filepath \t instrumentation function white list\n", + {cgCategory}); + +maplecl::Option litePgoOutputFunc ({"--lite-pgo-output-func"}, + "--lite-pgo-output-func=function name \t genereate lite profile at the exit of the output function[default main]\n", + {cgCategory}); + +maplecl::Option litePgoFile({"--lite-pgo-file"}, + " --lite-pgo-file=filepath \tlite pgo guide file\n", + {cgCategory}); +} diff --git a/src/mapleall/maple_be/src/cg/cg_pgo_gen.cpp b/src/mapleall/maple_be/src/cg/cg_pgo_gen.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a69ee1515bc91f5df5d118bc04c671faaf7cdd81 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_pgo_gen.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_pgo_gen.h" +#include "optimize_common.h" +#include "instrument.h" +#include "cg.h" +namespace maplebe { +uint64 CGProfGen::counterIdx = 0; +void CGProfGen::InstrumentFunction() { + instrumenter.PrepareInstrumentInfo(f->GetFirstBB(), f->GetCommonExitBB()); + std::vector iBBs; + instrumenter.GetInstrumentBBs(iBBs, f->GetFirstBB()); + /* skip large bb function currently due to offset in ldr/store */ + if (iBBs.size() > kMaxPimm8) { + return; + } + + uint32 oldTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + MIRSymbol *bbCounterTab = GetOrCreateProfSymForFunc(f->GetFunction(), static_cast(iBBs.size() + 1U)); + BECommon *be = Globals::GetInstance()->GetBECommon(); + uint32 newTypeTableSize = GlobalTables::GetTypeTable().GetTypeTableSize(); + if (newTypeTableSize != oldTypeTableSize) { + ASSERT(be, "get be in CGProfGen::InstrumentFunction() failed"); + be->AddNewTypeAfterBecommon(oldTypeTableSize, newTypeTableSize); + } + for (uint32 i = 0; i < iBBs.size(); ++i) { + InstrumentBB(*iBBs[i], *bbCounterTab, i); + } +} + +void CGProfGen::CreateProfileCalls() { + static bool created = false; + if (created) { + return; + } + created = true; + + // Create symbol for recording call times + auto *mirBuilder = f->GetFunction().GetModule()->GetMIRBuilder(); + auto *u64Type = GlobalTables::GetTypeTable().GetUInt64(); + std::string symNameCallTimes = "MCC_profile_call_times"; + auto *symCallTimes = mirBuilder->GetOrCreateGlobalDecl(symNameCallTimes, *u64Type); + MIRIntConst *constVal = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *u64Type); + symCallTimes->SetKonst(constVal); + symCallTimes->SetStorageClass(kScGlobal); + + /* Insert ClearProfile */ + CreateClearIcall(*f->GetFirstBB(), "MCC_ClearProfile"); + + /* Insert SaveProfile */ + for (auto *bb : f->GetCommonExitBB()->GetPreds()) { + CreateIcallForWeakSymbol(*bb, "MCC_SaveProfile"); + } +} + +bool CgPgoGen::PhaseRun(maplebe::CGFunc &f) { + if (!LiteProfile::IsInWhiteList(f.GetName()) && f.GetName() != CGOptions::GetLitePgoOutputFunction()) { + return false; + } + CHECK_FATAL(f.NumBBs() < LiteProfile::GetBBNoThreshold(), "stop ! bb out of range!"); + + CGProfGen *cgProfGeg = f.GetCG()->CreateCGProfGen(*GetPhaseMemPool(), f); + cgProfGeg->InstrumentFunction(); + if (f.GetName() == CGOptions::GetLitePgoOutputFunction()) { + cgProfGeg->CreateProfileCalls(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgPgoGen, cgpgogen) +} diff --git a/src/mapleall/maple_be/src/cg/cg_pgo_use.cpp b/src/mapleall/maple_be/src/cg/cg_pgo_use.cpp new file mode 100644 index 0000000000000000000000000000000000000000..232e6ae4be70b141d0d5a403fb84d15979c92ede --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_pgo_use.cpp @@ -0,0 +1,753 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_pgo_use.h" +#include "cg_critical_edge.h" +#include "loop.h" +#include "optimize_common.h" +namespace maplebe { +bool CGProfUse::ApplyPGOData() { + instrumenter.PrepareInstrumentInfo(f->GetFirstBB(), f->GetCommonExitBB()); + std::vector iBBs; + instrumenter.GetInstrumentBBs(iBBs, f->GetFirstBB()); + /* skip large bb function currently due to offset in ldr/store */ + if (iBBs.size() > kMaxPimm8) { + return false; + } + LiteProfile::BBInfo *bbInfo = f->GetFunction().GetModule()->GetLiteProfile().GetFuncBBProf(f->GetName()); + if (bbInfo == nullptr) { + std::cout << "find profile for " << f->GetName() << "Failed" << std::endl; + } + CHECK_FATAL(bbInfo != nullptr, "Get profile Failed"); + + if (bbInfo->counter.size() != iBBs.size()) { + LogInfo::MapleLogger() << f->GetName() << " counter doesn't match profile counter " + << bbInfo->counter.size() << " func real counter " << iBBs.size() << '\n'; + CHECK_FATAL(false, ""); + return false; + } + + for (size_t i = 0; i < iBBs.size(); ++i) { + auto *bbUseInfo = GetOrCreateBBUseInfo(*iBBs[i]); + bbUseInfo->SetCount(bbInfo->counter[i]); + } + + InitBBEdgeInfo(); + ComputeEdgeFreq(); + + ApplyOnBB(); +#if DEBUG + f->GetTheCFG()->CheckCFGFreq(); +#endif + return true; +} + +void CGProfUse::InitBBEdgeInfo() { + const std::vector *> &allEdges = instrumenter.GetAllEdges(); + for (auto &e : allEdges) { + BB *src = e->GetSrcBB(); + BB *dest = e->GetDestBB(); + BBUseInfo *srcUseInfo = GetOrCreateBBUseInfo(*src); + srcUseInfo->AddOutEdge(e); + BBUseInfo *destUseInfo = GetOrCreateBBUseInfo(*dest); + destUseInfo->AddInEdge(e); + } + for (auto &e : allEdges) { + if (e->IsInMST()) { + continue; + } + BB *src = e->GetSrcBB(); + BBUseInfo *srcUseInfo = GetOrCreateBBUseInfo(*src, true); + if (srcUseInfo->GetStatus() && srcUseInfo->GetOutEdgeSize() == 1) { + SetEdgeCount(*e, srcUseInfo->GetCount()); + } else { + BB *dest = e->GetDestBB(); + auto destUseInfo = GetOrCreateBBUseInfo(*dest, true); + if (destUseInfo->GetStatus() && destUseInfo->GetInEdgeSize() == 1) { + SetEdgeCount(*e, destUseInfo->GetCount()); + } + } + if (e->GetStatus()) { + continue; + } + SetEdgeCount(*e, 0); + } +} + +void CGProfUse::ComputeEdgeFreq() { + bool change = true; + size_t times = 0; + BB *commonEntry = f->GetFirstBB(); + while (change) { + change = false; + times++; + CHECK_FATAL(times != UINT32_MAX, "parse all edges fail"); + for (BB *curbb = commonEntry; curbb != nullptr; curbb = curbb->GetNext()) { + /* skip isolated bb */ + if (curbb->GetSuccs().empty() && curbb->GetPreds().empty()) { + continue; + } + BBUseInfo *useInfo = GetOrCreateBBUseInfo(*curbb, true); + if (!useInfo) { + continue; + } + ComputeBBFreq(*useInfo, change); + if (useInfo->GetStatus()) { + if (useInfo->GetUnknownOutEdges() == 1) { + uint64 total = 0; + uint64 outCount = SumEdgesCount(useInfo->GetOutEdges()); + if (useInfo->GetCount() > outCount) { + total = useInfo->GetCount() - outCount; + } + ASSERT(useInfo->GetCount() >= outCount, "find bad frequency"); + /* set the only unknown edge frequency */ + SetEdgeCount(*useInfo->GetOnlyUnknownOutEdges(), total); + change = true; + } + if (useInfo->GetUnknownInEdges() == 1) { + uint64 total = 0; + uint64 inCount = SumEdgesCount(useInfo->GetInEdges()); + if (useInfo->GetCount() > inCount) { + total = useInfo->GetCount() - inCount; + } + ASSERT(useInfo->GetCount() >= inCount, "find bad frequency"); + SetEdgeCount(*useInfo->GetOnlyUnknownInEdges(), total); + change = true; + } + } + } + } + if (debugChainLayout) { + LogInfo::MapleLogger() << "parse all edges in " << times << " times" << '\n'; + LogInfo::MapleLogger() << f->GetName() << " succ compute all edges " << '\n'; + } +} + +void CGProfUse::ApplyOnBB() { + BB *commonEntry = f->GetFirstBB(); + for (BB *curbb = commonEntry; curbb != nullptr; curbb = curbb->GetNext()) { + /* skip isolated bb */ + if (curbb->GetSuccs().empty() && curbb->GetPreds().empty()) { + continue; + } + BBUseInfo *useInfo = GetOrCreateBBUseInfo(*curbb, true); + if (!useInfo) { + LogInfo::MapleLogger() << "find use info for bb " << curbb->GetId(); + CHECK_FATAL(false, ""); + } + curbb->SetFrequency(static_cast(useInfo->GetCount())); + if (curbb == f->GetCommonExitBB()) { + continue; + } + curbb->InitEdgeFreq(); + auto outEdges = useInfo->GetOutEdges(); + for (auto *e : outEdges) { + auto *destBB = e->GetDestBB(); + if (destBB == f->GetCommonExitBB()) { + continue; + } + curbb->SetEdgeFreq(*destBB, e->GetCount()); + } + } +} + +void CGProfUse::ComputeBBFreq(BBUseInfo &bbInfo, bool &change) { + uint64 count = 0; + if (!bbInfo.GetStatus()) { + if (bbInfo.GetUnknownOutEdges() == 0) { + count = SumEdgesCount(bbInfo.GetOutEdges()); + bbInfo.SetCount(count); + change = true; + } else if (bbInfo.GetUnknownInEdges() == 0) { + count = SumEdgesCount(bbInfo.GetInEdges()); + bbInfo.SetCount(count); + change = true; + } + } +} + +uint64 CGProfUse::SumEdgesCount(const MapleVector *> &edges) const { + uint64 count = 0; + for (const auto &e : edges) { + count += e->GetCount(); + } + return count; +} + +void CGProfUse::SetEdgeCount(maple::BBUseEdge &e, size_t count) { + if (!e.GetStatus()) { + e.SetCount(count); + BBUseInfo *srcUseInfo = GetOrCreateBBUseInfo(*(e.GetSrcBB()), true); + BBUseInfo *destUseInfo = GetOrCreateBBUseInfo(*(e.GetDestBB()), true); + srcUseInfo->DecreaseUnKnownOutEdges(); + destUseInfo->DecreaseUnKnownInEdges(); + } +} + +BBUseInfo *CGProfUse::GetOrCreateBBUseInfo(const maplebe::BB &bb, bool notCreate) { + auto item = bbProfileInfo.find(bb.GetId()); + if (item != bbProfileInfo.end()) { + return item->second; + } else { + CHECK_FATAL(!notCreate, "do not create new bb useinfo in this case"); + auto *newInfo = mp->New>(*mp); + (void)bbProfileInfo.emplace(std::make_pair(bb.GetId(), newInfo)); + return newInfo; + } +} + +void CGProfUse::LayoutBBwithProfile() { + /* initialize */ + laidOut.resize(f->GetAllBBs().size(), false); + /* BB chain layout */ + BuildChainForFunc(); + BBChain *mainChain = bb2chain[f->GetFirstBB()->GetId()]; + for (auto bbId : bbSplit) { + BB *cbb = f->GetBBFromID(bbId); + CHECK_FATAL(cbb, "get bb failed"); + f->GetTheCFG()->ReverseCriticalEdge(*cbb); + } + for (auto it = mainChain->begin(); it != mainChain->end(); ++it) { + if (!bbSplit.count((*it)->GetId())) { + AddBBProf(**it); + } + } +} + +void CGProfUse::InitBBChains() { + uint32 id = 0; + bb2chain.resize(f->GetAllBBs().size(), nullptr); + BB *commonEntry = f->GetFirstBB(); + for (BB *curbb = commonEntry; curbb != nullptr; curbb = curbb->GetNext()) { + // BBChain constructor will update bb2chain + // attention cleanup & unreachable + (void)mp->New(puAlloc, bb2chain, *curbb, id++); + } +} + +void CGProfUse::BuildChainForFunc() { + uint32 validBBNum = 0; + BB *commonEntry = f->GetFirstBB(); + // attention cleanup & unreachable + for (BB *curbb = commonEntry; curbb != nullptr; curbb = curbb->GetNext()) { + if (curbb->IsUnreachable()) { + ASSERT(false, "check unreachable bb"); + continue; + } + if (f->IsExitBB(*curbb)) { + if (curbb->GetPrev() && curbb->GetPrev()->GetKind() == BB::kBBGoto && + curbb->GetPreds().empty() && curbb->GetSuccs().empty()) { + continue; + } + } + ++validBBNum; + } + // --validBBNum; // exclude cleanup BB + LogInfo::MapleLogger() << "\n[Chain layout] " << f->GetName() << ", valid bb num: " << validBBNum << std::endl; + InitBBChains(); + BuildChainForLoops(); + // init ready chains for func + for (BB *curbb = commonEntry; curbb != nullptr; curbb = curbb->GetNext()) { + uint32 bbId = curbb->GetId(); + BBChain *chain = bb2chain[bbId]; + + if (chain->IsReadyToLayout(nullptr)) { + (void)readyToLayoutChains.insert(chain); + } + } + BBChain *entryChain = bb2chain[commonEntry->GetId()]; + DoBuildChain(*commonEntry, *entryChain, nullptr); + + /* merge clean up */ + if (f->GetCleanupBB()) { + BBChain *cleanup = bb2chain[f->GetCleanupBB()->GetId()]; + if (readyToLayoutChains.find(cleanup) == readyToLayoutChains.end()) { + LogInfo::MapleLogger() << "clean up bb is not in ready layout "; + } + entryChain->MergeFrom(cleanup); + } + /* merge symbol label in C which is not in control flow */ + std::vector labelBB; + { + for (BB *curbb = commonEntry; curbb != nullptr; curbb = curbb->GetNext()) { + if (curbb->IsUnreachable()) { + /* delete unreachable bb in cfgo */ + ASSERT(false, "check unreachable bb"); + continue; + } + if (f->IsExitBB(*curbb)) { + if (curbb->GetPrev() && curbb->GetPrev()->GetKind() == BB::kBBGoto && + curbb->GetPreds().empty() && curbb->GetSuccs().empty()) { + continue; + } + } + if (!entryChain->FindBB(*curbb)) { + if (curbb->GetPreds().empty() && CGCFG::InSwitchTable(curbb->GetLabIdx(), *f)) { + labelBB.push_back(curbb); + } else { + LogInfo::MapleLogger() << "In function " << f->GetName() << " bb " << curbb->GetId() << " is no in chain\n"; + } + } + } + + for (auto bb : labelBB) { + BBChain *labelchain = bb2chain[bb->GetId()]; + if (readyToLayoutChains.find(labelchain) == readyToLayoutChains.end()) { + LogInfo::MapleLogger() << "label bb is not in ready layout "; + } + entryChain->MergeFrom(labelchain); + bb->SetNext(nullptr); + bb->SetPrev(nullptr); + } + } + + // To sure all of BBs have been laid out + CHECK_FATAL(entryChain->size() == validBBNum, "has any BB not been laid out?"); +} + +void CGProfUse::BuildChainForLoops() { + if (f->GetLoops().empty()) { + return; + } + auto &loops = f->GetLoops(); + // sort loops from inner most to outer most + std::stable_sort(loops.begin(), loops.end(), [](const CGFuncLoops *loop1, const CGFuncLoops *loop2) { + return loop1->GetLoopLevel() > loop2->GetLoopLevel(); + }); + auto *context = mp->New>(f->GetAllBBs().size(), false, puAlloc.Adapter()); + for (auto *loop : loops) { + BuildChainForLoop(*loop, context); + } +} + +void CGProfUse::BuildChainForLoop(CGFuncLoops &loop, MapleVector *context) { + // init loop context + std::fill(context->begin(), context->end(), false); + for (auto *bbMember : loop.GetLoopMembers()) { + CHECK_FATAL(bbMember->GetId() < context->size(), "index out of range"); + (*context)[bbMember->GetId()] = true; + } + // init ready chains for loop + for (auto *bbMember : loop.GetLoopMembers()) { + BBChain *chain = bb2chain[bbMember->GetId()]; + if (chain->IsReadyToLayout(context)) { + (void)readyToLayoutChains.insert(chain); + } + } + // find loop chain starting BB + BB *startBB = FindBestStartBBForLoop(loop, context); + if (startBB == nullptr) { + return; // all blocks in the loop have been laid out, just return + } + BBChain *startChain = bb2chain[startBB->GetId()]; + DoBuildChain(*startBB, *startChain, context); + readyToLayoutChains.clear(); +} + +// Multiple loops may share the same header, we try to find the best unplaced BB in the loop +BB *CGProfUse::FindBestStartBBForLoop(CGFuncLoops &loop, const MapleVector *context) { + auto *headerChain = bb2chain[loop.GetHeader()->GetId()]; + if (headerChain->size() == 1) { + return loop.GetHeader(); + } + // take inner loop chain tail BB as start BB + if (headerChain->size() > 1 && IsBBInCurrContext(*headerChain->GetTail(), context)) { + return headerChain->GetTail(); + } + for (auto *bbMember : loop.GetLoopMembers()) { + if (bb2chain[bbMember->GetId()]->size() == 1) { + return f->GetBBFromID(bbMember->GetId()); + } + } + return nullptr; +} + +bool CGProfUse::IsBBInCurrContext(const BB &bb, const MapleVector *context) const { + if (context == nullptr) { + return true; + } + return (*context)[bb.GetId()]; +} + +void CGProfUse::DoBuildChain(const BB &header, BBChain &chain, const MapleVector *context) { + CHECK_FATAL(bb2chain[header.GetId()] == &chain, "bb2chain mis-match"); + BB *bb = chain.GetTail(); + BB *bestSucc = GetBestSucc(*bb, chain, context, true); + while (bestSucc != nullptr) { + BBChain *succChain = bb2chain[bestSucc->GetId()]; + succChain->UpdateSuccChainBeforeMerged(chain, context, readyToLayoutChains); + chain.MergeFrom(succChain); + (void)readyToLayoutChains.erase(succChain); + bb = chain.GetTail(); + bestSucc = GetBestSucc(*bb, chain, context, true); + } + + if (debugChainLayout) { + bool inLoop = context != nullptr; + LogInfo::MapleLogger() << "Finish forming " << (inLoop ? "loop" : "func") << " chain: "; + chain.Dump(); + } +} + +BB *CGProfUse::GetBestSucc(BB &bb, const BBChain &chain, const MapleVector *context, bool considerBetterPred) { + // (1) search in succ + CHECK_FATAL(bb2chain[bb.GetId()] == &chain, "bb2chain mis-match"); + uint64 bestEdgeFreq = 0; + BB *bestSucc = nullptr; + auto iterBB = bb.GetSuccsBegin(); + for (uint32 i = 0; i < bb.GetSuccs().size(); ++i, ++iterBB) { + CHECK_FATAL(iterBB != bb.GetSuccsEnd(), "check unexpect BB"); + BB *succ = *iterBB; + CHECK_FATAL(succ, "check Empty BB"); + if (!IsCandidateSucc(bb, *succ, context)) { + continue; + } + if (considerBetterPred && HasBetterLayoutPred(bb, *succ)) { + continue; + } + uint64 currEdgeFreq = bb.GetEdgeFreq(i); // attention: entryBB->succFreq[i] is always 0 + if (bb.GetId() == 0) { // special case for common entry BB + CHECK_FATAL(bb.GetSuccs().size() == 1, "common entry BB should not have more than 1 succ"); + bestSucc = succ; + break; + } + if (currEdgeFreq > bestEdgeFreq) { // find max edge freq + bestEdgeFreq = currEdgeFreq; + bestSucc = succ; + } + } + if (bestSucc != nullptr) { + if (debugChainLayout) { + LogInfo::MapleLogger() << "Select [range1 succ ]: "; + LogInfo::MapleLogger() << bb.GetId() << " -> " << bestSucc->GetId() << std::endl; + } + return bestSucc; + } + + // (2) search in readyToLayoutChains + uint32 bestFreq = 0; + for (auto it = readyToLayoutChains.begin(); it != readyToLayoutChains.end(); ++it) { + BBChain *readyChain = *it; + BB *header = readyChain->GetHeader(); + if (!IsCandidateSucc(bb, *header, context)) { + continue; + } + bool useBBFreq = false; + if (useBBFreq) { // use bb freq + if (header->GetFrequency() > bestFreq) { // find max bb freq + bestFreq = header->GetFrequency(); + bestSucc = header; + } + } else { // use edge freq + uint32 subBestFreq = 0; + for (auto *pred : header->GetPreds()) { + uint32 curFreq = static_cast(pred->GetEdgeFreq(*header)); + if (curFreq > subBestFreq) { + subBestFreq = curFreq; + } + } + if (subBestFreq > bestFreq) { + bestFreq = subBestFreq; + bestSucc = header; + } else if (subBestFreq == bestFreq && bestSucc != nullptr && + bb2chain[header->GetId()]->GetId() < bb2chain[bestSucc->GetId()]->GetId()) { + bestSucc = header; + } + } + } + if (bestSucc != nullptr) { + (void)readyToLayoutChains.erase(bb2chain[bestSucc->GetId()]); + if (debugChainLayout) { + LogInfo::MapleLogger() << "Select [range2 ready]: "; + LogInfo::MapleLogger() << bb.GetId() << " -> " << bestSucc->GetId() << std::endl; + } + return bestSucc; + } + + // (3) search left part in context by topological sequence + const auto &rpoVec = domInfo->GetReversePostOrder(); + bool searchedAgain = false; + for (uint32 i = rpoSearchPos; i < rpoVec.size(); ++i) { + BB *candBB = rpoVec[i]; + if (IsBBInCurrContext(*candBB, context) && bb2chain[candBB->GetId()] != &chain) { + rpoSearchPos = i; + if (debugChainLayout) { + LogInfo::MapleLogger() << "Select [range3 rpot ]: "; + LogInfo::MapleLogger() << bb.GetId() << " -> " << candBB->GetId() << std::endl; + } + return candBB; + } + if (i == rpoVec.size() - 1 && !searchedAgain) { + i = 0; + searchedAgain = true; + } + } + return nullptr; +} + +bool CGProfUse::HasBetterLayoutPred(const BB &bb, const BB &succ) const { + auto &predList = succ.GetPreds(); + // predList.size() may be 0 if bb is common entry BB + if (predList.size() <= 1) { + return false; + } + uint32 sumEdgeFreq = succ.GetFrequency(); + const double hotEdgeFreqPercent = 0.6; // should further fine tuning + uint64 hotEdgeFreq = static_cast(sumEdgeFreq * hotEdgeFreqPercent); + // if edge freq(bb->succ) contribute more than 60% to succ block freq, no better layout pred than bb + for (auto predIt = predList.begin(); predIt != predList.end(); ++predIt) { + if (*predIt == &bb) { + continue; + } + uint64 edgeFreq = (*predIt)->GetEdgeFreq(succ); + if (edgeFreq > (sumEdgeFreq - hotEdgeFreq)) { + return true; + } + } + return false; +} + +bool CGProfUse::IsCandidateSucc(const BB &bb, const BB &succ, const MapleVector *context) { + if (!IsBBInCurrContext(succ, context)) { // succ must be in the current context (current loop or current func) + return false; + } + if (bb2chain[succ.GetId()] == bb2chain[bb.GetId()]) { // bb and succ should belong to different chains + return false; + } + if (succ.GetId() == 1) { // special case, exclude common exit BB + return false; + } + return true; +} + +bool CgPgoUse::PhaseRun(maplebe::CGFunc &f) { + CHECK_FATAL(f.NumBBs() < LiteProfile::GetBBNoThreshold(), "stop ! bb out of range!"); + if (!LiteProfile::IsInWhiteList(f.GetName())) { + return false; + } + + // for 525 538 only + LiteProfile::BBInfo *bbInfo = f.GetFunction().GetModule()->GetLiteProfile().GetFuncBBProf(f.GetName()); + if (!bbInfo) { + return false; + } + + MemPool *memPool = GetPhaseMemPool(); + auto *split = memPool->New(f, *memPool); + f.GetTheCFG()->InitInsnVisitor(f); + split->CollectCriticalEdges(); + split->SplitCriticalEdges(); + MapleSet newbbinsplit = split->CopyNewBBInfo(); + + MaplePhase *it = GetAnalysisInfoHook()-> + ForceRunAnalysisPhase, CGFunc>(&CgDomAnalysis::id, f); + auto *domInfo = static_cast(it)->GetResult(); + + (void)GetAnalysisInfoHook()-> + ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + + CHECK_FATAL(domInfo, "find dom failed"); + CGProfUse pUse(f, *memPool, domInfo, newbbinsplit); + if (pUse.ApplyPGOData()) { + pUse.LayoutBBwithProfile(); + } + + if (f.GetName() == "LBM_performStreamCollideTRT") { + LogInfo::MapleLogger() << "After applying pgo : " << f.GetName() << "\n"; + DotGenerator::GenerateDot("litePGO", f, f.GetMirModule(), true); + } + + uint64 count = 0; + FOR_ALL_BB(bb, &f) { + count++; + if (count > f.GetAllBBs().size()) { + CHECK_FATAL(false, "infinte loop"); + } + } + return false; +} + +MAPLE_TRANSFORM_PHASE_REGISTER(CgPgoUse, cgpgouse) + +void RelinkBB(BB &prev, BB &next) { + prev.SetNext(&next); + next.SetPrev(&prev); +} + +void CGProfUse::AddBBProf(BB &bb) { + if (layoutBBs.empty()) { + AddBB(bb); + return; + } + BB *curBB = layoutBBs.back(); + if ((curBB->GetKind() == BB::kBBFallthru || curBB->GetKind() == BB::kBBGoto) && !curBB->GetSuccs().empty()) { + BB *targetBB = curBB->GetSuccs().front(); + CHECK_FATAL(!bbSplit.count(targetBB->GetId()), "check split bb"); + if (curBB->GetKind() == BB::kBBFallthru && (&bb != targetBB)) { + ReTargetSuccBB(*curBB, *targetBB); + } else if (curBB->GetKind() == BB::kBBGoto && (&bb == targetBB)) { + // delete the goto + ChangeToFallthruFromGoto(*curBB); + } + } else if (curBB->GetKind() == BB::kBBIf) { + CHECK_FATAL(curBB->GetSuccs().size() <= kSuccSizeOfIfBB, " if bb have more than 2 succs"); + CHECK_FATAL(!curBB->GetSuccs().empty(), " if bb have no succs"); + BB *targetBB = CGCFG::GetTargetSuc(*curBB); + BB *ftBB = nullptr; + for (BB *sucBB : curBB->GetSuccs()) { + if (sucBB != targetBB) { + ftBB = sucBB; + } + } + if (curBB->GetSuccs().size() == 1 && targetBB != curBB->GetNext()) { + CHECK_FATAL(false, " only 1 succs for if bb and ft not equal to target"); + } + if (curBB->GetSuccs().size() == 1 && targetBB == curBB->GetNext()) { + ftBB = targetBB; + } + if (!ftBB && curBB->GetSuccs().size() == kSuccSizeOfIfBB) { + auto bIt = curBB->GetSuccs().begin(); + BB *firstSucc = *bIt++; + BB *secondSucc = *bIt; + if (firstSucc == secondSucc) { + ftBB = targetBB; + } + } + CHECK_FATAL(ftBB, "find ft bb after ifBB failed"); + if (&bb == targetBB) { + CHECK_FATAL(!bbSplit.count(ftBB->GetId()), "check split bb"); + LabelIdx fallthruLabel = ftBB->GetLabIdx(); + if (fallthruLabel == MIRLabelTable::GetDummyLabel()) { + fallthruLabel = f->CreateLabel(); + ftBB->SetLabIdx(fallthruLabel); + } + f->GetTheCFG()->GetInsnModifier()->FlipIfBB(*curBB, fallthruLabel); + } else if (&bb != ftBB) { + CHECK_FATAL(!bbSplit.count(targetBB->GetId()), "check split bb"); + BB *newBB = f->GetTheCFG()->GetInsnModifier()->CreateGotoBBAfterCondBB(*curBB, *ftBB, targetBB == ftBB); + CHECK_FATAL(newBB, "create goto failed"); + newBB->SetFrequency(ftBB->GetFrequency()); + laidOut.push_back(false); + RelinkBB(*curBB, *newBB); + AddBB(*newBB); + curBB = newBB; + } + } else if (curBB->GetKind() == BB::kBBIntrinsic) { + CHECK_FATAL(false, "check intrinsic bb"); + } + RelinkBB(*curBB, bb); + AddBB(bb); +} + +void CGProfUse::ReTargetSuccBB(BB &bb, BB &fallthru) { + LabelIdx fallthruLabel = fallthru.GetLabIdx(); + if (fallthruLabel == MIRLabelTable::GetDummyLabel()) { + fallthruLabel = f->CreateLabel(); + fallthru.SetLabIdx(fallthruLabel); + } + f->GetTheCFG()->GetInsnModifier()->ReTargetSuccBB(bb, fallthruLabel); + bb.SetKind(BB::kBBGoto); +} + +void CGProfUse::ChangeToFallthruFromGoto(BB &bb) { + CHECK_FATAL(bb.GetLastMachineInsn(), "Get last insn in GOTO bb failed"); + bb.RemoveInsn(*bb.GetLastMachineInsn()); + bb.SetKind(BB::kBBFallthru); +} + +void CGProfUse::AddBB(BB &bb) { + CHECK_FATAL(bb.GetId() < laidOut.size(), "index out of range in BBLayout::AddBB"); + CHECK_FATAL(!laidOut[bb.GetId()], "AddBB: bb already laid out"); + layoutBBs.push_back(&bb); + laidOut[bb.GetId()] = true; + + if (bb.GetKind() == BB::kBBReturn) { + CHECK_FATAL(bb.GetSuccs().empty(), " common entry?"); + bb.SetNext(nullptr); + } + + // If the pred bb is goto bb and the target bb of goto bb is the current bb which is be added to layoutBBs, change the + // goto bb to fallthru bb. + if (layoutBBs.size() > 1) { + BB *predBB = layoutBBs.at(layoutBBs.size() - 2); // Get the pred of bb. + if (predBB->GetKind() != BB::kBBGoto) { + return; + } + if (predBB->GetSuccs().front() != &bb) { + return; + } + CHECK_FATAL(false, " implement ft bb to goto bb optimize "); + ChangeToFallthruFromGoto(*predBB); + } +} + +void BBChain::MergeFrom(BBChain *srcChain) { + CHECK_FATAL(this != srcChain, "merge same chain?"); + ASSERT_NOT_NULL(srcChain); + if (srcChain->empty()) { + return; + } + for (BB *bb : *srcChain) { + bbVec.push_back(bb); + bb2chain[bb->GetId()] = this; + } + srcChain->bbVec.clear(); + srcChain->unlaidPredCnt = 0; + srcChain->isCacheValid = false; + isCacheValid = false; // is this necessary? +} + +void BBChain::UpdateSuccChainBeforeMerged(const BBChain &destChain, const MapleVector *context, + MapleSet &readyToLayoutChains) { + for (BB *bb : bbVec) { + for (BB *succ : bb->GetSuccs()) { + if (context != nullptr && !(*context)[succ->GetId()]) { + continue; + } + if (bb2chain[succ->GetId()] == this || bb2chain[succ->GetId()] == &destChain) { + continue; + } + BBChain *succChain = bb2chain[succ->GetId()]; + succChain->MayRecalculateUnlaidPredCnt(context); + if (succChain->unlaidPredCnt != 0) { + --succChain->unlaidPredCnt; + } + if (succChain->unlaidPredCnt == 0) { + (void)readyToLayoutChains.insert(succChain); + } + } + } +} + +void BBChain::MayRecalculateUnlaidPredCnt(const MapleVector *context) { + if (isCacheValid) { + return; // If cache is trustable, no need to recalculate + } + unlaidPredCnt = 0; + for (BB *bb : bbVec) { + for (BB *pred : bb->GetPreds()) { + // exclude blocks out of context + if (context != nullptr && !(*context)[pred->GetId()]) { + continue; + } + // exclude blocks within the same chain + if (bb2chain[pred->GetId()] == this) { + continue; + } + ++unlaidPredCnt; + } + } + isCacheValid = true; +} +} diff --git a/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp b/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0285694e1fc83ecf98dd8230954d7778e5d7800b --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_phasemanager.cpp @@ -0,0 +1,549 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_phasemanager.h" +#include +#include +#include "cg_option.h" +#include "args.h" +#include "label_creation.h" +#include "isel.h" +#include "offset_adjust.h" +#include "alignment.h" +#include "yieldpoint.h" +#include "emit.h" +#include "reg_alloc.h" +#include "target_info.h" +#include "standardize.h" +#if TARGAARCH64 +#include "aarch64_emitter.h" +#include "aarch64_cg.h" +#elif TARGRISCV64 +#include "riscv64_emitter.h" +#elif TARGX86_64 +#include "x64_cg.h" +#include "x64_emitter.h" +#include "string_utils.h" +#endif + +namespace maplebe { +#define JAVALANG (module.IsJavaModule()) +#define CLANG (module.GetSrcLang() == kSrcLangC) + +#define RELEASE(pointer) \ + do { \ + if (pointer != nullptr) { \ + delete pointer; \ + pointer = nullptr; \ + } \ + } while (0) + +namespace { + +void DumpMIRFunc(MIRFunction &func, const char *msg, bool printAlways = false, const char* extraMsg = nullptr) { + bool dumpAll = (CGOptions::GetDumpPhases().find("*") != CGOptions::GetDumpPhases().end()); + bool dumpFunc = CGOptions::FuncFilter(func.GetName()); + if (printAlways || (dumpAll && dumpFunc)) { + LogInfo::MapleLogger() << msg << '\n'; + func.Dump(); + if (extraMsg) { + LogInfo::MapleLogger() << extraMsg << '\n'; + } + } +} + +} /* anonymous namespace */ + +void CgFuncPM::GenerateOutPutFile(MIRModule &m) const { + CHECK_FATAL(cg != nullptr, "cg is null"); + CHECK_FATAL(cg->GetEmitter(), "emitter is null"); + if (!cgOptions->SuppressFileInfo()) { + cg->GetEmitter()->EmitFileInfo(m.GetInputFileName()); + } + if (cgOptions->WithDwarf()) { + cg->GetEmitter()->EmitDIHeader(); + } + InitProfile(m); +} + +bool CgFuncPM::FuncLevelRun(CGFunc &cgFunc, AnalysisDataManager &serialADM) { + bool changed = false; + for (size_t i = 0; i < phasesSequence.size(); ++i) { + SolveSkipFrom(CGOptions::GetSkipFromPhase(), i); + const MaplePhaseInfo *curPhase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(phasesSequence[i]); + if (!IsQuiet()) { + LogInfo::MapleLogger() << "---Run MplCG " << (curPhase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << curPhase->PhaseName() << " ]---\n"; + } + if (curPhase->IsAnalysis()) { + changed = RunAnalysisPhase, CGFunc>(*curPhase, serialADM, cgFunc) || changed; + } else { + changed = RunTransformPhase, CGFunc>(*curPhase, serialADM, cgFunc) || changed; + DumpFuncCGIR(cgFunc, curPhase->PhaseName()); + } + SolveSkipAfter(CGOptions::GetSkipAfterPhase(), i); + } + return changed; +} + +void CgFuncPM::PostOutPut(MIRModule &m) const { + cg->GetEmitter()->EmitHugeSoRoutines(true); + if (cgOptions->WithDwarf()) { + cg->GetEmitter()->EmitDIFooter(); + } + /* Emit global info */ + EmitGlobalInfo(m); +} + +void MarkUsedStaticSymbol(const StIdx &symbolIdx); +std::map visitedSym; + +void CollectStaticSymbolInVar(MIRConst *mirConst) { + if (mirConst->GetKind() == kConstAddrof) { + auto *addrSymbol = static_cast(mirConst); + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(addrSymbol->GetSymbolIndex().Idx(), true); + if (sym != nullptr) { + MarkUsedStaticSymbol(sym->GetStIdx()); + } + } else if (mirConst->GetKind() == kConstAggConst) { + auto &constVec = static_cast(mirConst)->GetConstVec(); + for (auto &cst : constVec) { + CollectStaticSymbolInVar(cst); + } + } +} + +void MarkUsedStaticSymbol(const StIdx &symbolIdx) { + if (!symbolIdx.IsGlobal()) { + return; + } + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolIdx.Idx(), true); + if (symbol == nullptr) { + return; + } + if (visitedSym[symbolIdx]) { + return; + } else { + visitedSym[symbolIdx] = true; + } + symbol->ResetIsDeleted(); + if (symbol->IsConst()) { + auto *konst = symbol->GetKonst(); + CollectStaticSymbolInVar(konst); + } +} + +void RecursiveMarkUsedStaticSymbol(const BaseNode *baseNode) { + if (baseNode == nullptr) { + return; + } + Opcode op = baseNode->GetOpCode(); + switch (op) { + case OP_block: { + const BlockNode *blk = static_cast(baseNode); + for (auto &stmt : blk->GetStmtNodes()) { + RecursiveMarkUsedStaticSymbol(&stmt); + } + break; + } + case OP_dassign: { + const DassignNode *dassignNode = static_cast(baseNode); + MarkUsedStaticSymbol(dassignNode->GetStIdx()); + break; + } + case OP_addrof: + case OP_addrofoff: + case OP_dread: { + const AddrofNode *dreadNode = static_cast(baseNode); + MarkUsedStaticSymbol(dreadNode->GetStIdx()); + break; + } + default: { + break; + } + } + for (size_t i = 0; i < baseNode->NumOpnds(); ++i) { + RecursiveMarkUsedStaticSymbol(baseNode->Opnd(i)); + } +} + +void CollectStaticSymbolInFunction(MIRFunction &func) { + RecursiveMarkUsedStaticSymbol(func.GetBody()); +} + +void CgFuncPM::SweepUnusedStaticSymbol(MIRModule &m) const { + if (!m.IsCModule()) { + return; + } + size_t size = GlobalTables::GetGsymTable().GetSymbolTableSize(); + for (size_t i = 0; i < size; ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(static_cast(i)); + if (mirSymbol != nullptr && (mirSymbol->GetSKind() == kStVar || mirSymbol->GetSKind() == kStConst) && + (mirSymbol->GetStorageClass() == kScFstatic || mirSymbol->GetStorageClass() == kScPstatic)) { + mirSymbol->SetIsDeleted(); + } + } + + visitedSym.clear(); + /* scan all funtions */ + std::vector &funcTable = GlobalTables::GetFunctionTable().GetFuncTable(); + /* don't optimize this loop to iterator or range-base loop + * because AddCallGraphNode(mirFunc) will change GlobalTables::GetFunctionTable().GetFuncTable() + */ + for (size_t index = 0; index < funcTable.size(); ++index) { + MIRFunction *mirFunc = funcTable.at(index); + if (mirFunc == nullptr || mirFunc->GetBody() == nullptr) { + continue; + } + m.SetCurFunction(mirFunc); + CollectStaticSymbolInFunction(*mirFunc); + /* scan function symbol declaration + * find addrof static const */ + MIRSymbolTable *funcSymTab = mirFunc->GetSymTab(); + if (funcSymTab) { + size_t localSymSize = funcSymTab->GetSymbolTableSize(); + for (uint32 i = 0; i < localSymSize; ++i) { + MIRSymbol *st = funcSymTab->GetSymbolFromStIdx(i); + if (st && st->IsConst()) { + MIRConst *mirConst = st->GetKonst(); + CollectStaticSymbolInVar(mirConst); + } + } + } + } + /* scan global symbol declaration + * find addrof static const */ + auto &symbolSet = m.GetSymbolSet(); + for (auto sit = symbolSet.begin(); sit != symbolSet.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(sit->Idx(), true); + if (s->IsConst()) { + MIRConst *mirConst = s->GetKonst(); + CollectStaticSymbolInVar(mirConst); + } + } +} + +/* =================== new phase manager =================== */ +#ifdef RA_PERF_ANALYSIS +extern void printLSRATime(); +extern void printRATime(); +#endif + +bool CgFuncPM::PhaseRun(MIRModule &m) { + CreateCGAndBeCommon(m); + bool changed = false; + /* reserve static symbol for debugging */ + if (!cgOptions->WithDwarf()) { + SweepUnusedStaticSymbol(m); + } + if (cgOptions->IsRunCG()) { + GenerateOutPutFile(m); + + /* Run the cg optimizations phases */ + PrepareLower(m); + + uint32 countFuncId = 0; + unsigned long rangeNum = 0; + + auto userDefinedOptLevel = cgOptions->GetOptimizeLevel(); + cg->EnrollTargetPhases(this); + + auto admMempool = AllocateMemPoolInPhaseManager("cg phase manager's analysis data manager mempool"); + auto *serialADM = GetManagerMemPool()->New(*(admMempool.get())); + for (auto it = m.GetFunctionList().begin(); it != m.GetFunctionList().end(); ++it) { + ASSERT(serialADM->CheckAnalysisInfoEmpty(), "clean adm before function run"); + MIRFunction *mirFunc = *it; + if (mirFunc->GetBody() == nullptr) { + continue; + } + if (userDefinedOptLevel == CGOptions::kLevel2 && m.HasPartO2List()) { + if (m.IsInPartO2List(mirFunc->GetNameStrIdx())) { + cgOptions->EnableO2(); + } else { + cgOptions->EnableO0(); + } + ClearAllPhases(); + cg->EnrollTargetPhases(this); + cg->UpdateCGOptions(*cgOptions); + Globals::GetInstance()->SetOptimLevel(cgOptions->GetOptimizeLevel()); + } + if (!IsQuiet()) { + LogInfo::MapleLogger() << ">>>>>>>>>>>>>>>>>>>>>>>>>>>>> Optimizing Function < " << mirFunc->GetName() + << " id=" << mirFunc->GetPuidxOrigin() << " >---\n"; + } + /* LowerIR. */ + m.SetCurFunction(mirFunc); + if (cg->DoConstFold()) { + DumpMIRFunc(*mirFunc, "************* before ConstantFold **************"); + ConstantFold cf(m); + (void)cf.Simplify(mirFunc->GetBody()); + } + + if (m.GetFlavor() != MIRFlavor::kFlavorLmbc) { + DoFuncCGLower(m, *mirFunc); + } + /* create CGFunc */ + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(mirFunc->GetStIdx().Idx()); + auto funcMp = std::make_unique(memPoolCtrler, funcSt->GetName()); + auto stackMp = std::make_unique(funcMp->GetCtrler(), ""); + MapleAllocator funcScopeAllocator(funcMp.get()); + mirFunc->SetPuidxOrigin(++countFuncId); + CGFunc *cgFunc = cg->CreateCGFunc(m, *mirFunc, *beCommon, *funcMp, *stackMp, funcScopeAllocator, countFuncId); + CHECK_FATAL(cgFunc != nullptr, "Create CG Function failed in cg_phase_manager"); + CG::SetCurCGFunc(*cgFunc); + + if (cgOptions->WithDwarf()) { + cgFunc->SetDebugInfo(m.GetDbgInfo()); + } + /* Run the cg optimizations phases. */ + if (CGOptions::UseRange() && rangeNum >= CGOptions::GetRangeBegin() && rangeNum <= CGOptions::GetRangeEnd()) { + CGOptions::EnableInRange(); + } + changed = FuncLevelRun(*cgFunc, *serialADM); + /* Delete mempool. */ + mirFunc->ReleaseCodeMemory(); + ++rangeNum; + CGOptions::DisableInRange(); + } + PostOutPut(m); +#ifdef RA_PERF_ANALYSIS + if (cgOptions->IsEnableTimePhases()) { + printLSRATime(); + printRATime(); + } +#endif + } else { + LogInfo::MapleLogger(kLlErr) << "Skipped generating .s because -no-cg is given" << '\n'; + } + RELEASE(cg); + RELEASE(beCommon); + return changed; +} + +void CgFuncPM::DumpFuncCGIR(const CGFunc &f, const std::string &phaseName) const { + if (CGOptions::DumpPhase(phaseName) && CGOptions::FuncFilter(f.GetName())) { + LogInfo::MapleLogger() << "\n******** CG IR After " << phaseName << ": *********\n"; + f.DumpCGIR(); + } +} + +void CgFuncPM::EmitGlobalInfo(MIRModule &m) const { + EmitDuplicatedAsmFunc(m); + EmitFastFuncs(m); + if (cgOptions->IsGenerateObjectMap()) { + cg->GenerateObjectMaps(*beCommon); + } + cg->GetEmitter()->EmitGlobalVariable(); + EmitDebugInfo(m); + cg->GetEmitter()->CloseOutput(); +} + +void CgFuncPM::InitProfile(MIRModule &m) const { + if (!CGOptions::IsProfileDataEmpty()) { + uint32 dexNameIdx = m.GetFileinfo(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename")); + const std::string &dexName = GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(dexNameIdx)); + bool deCompressSucc = m.GetProfile().DeCompress(CGOptions::GetProfileData(), dexName); + if (!deCompressSucc) { + LogInfo::MapleLogger() << "WARN: DeCompress() " << CGOptions::GetProfileData() << "failed in mplcg()\n"; + } + } + if (!CGOptions::GetInstrumentationWhiteList().empty()) { + bool handleSucc = m.GetLiteProfile().HandleInstrumentationWhiteList(CGOptions::GetInstrumentationWhiteList()); + if (!handleSucc) { + LogInfo::MapleLogger() << "WARN: Handle instrumentation white list file " << + CGOptions::GetInstrumentationWhiteList() << + "failed in mplcg\n"; + } + } + if (!CGOptions::GetLiteProfile().empty()) { + bool handleSucc = m.GetLiteProfile().HandleLitePGOFile(CGOptions::GetLiteProfile(), m.GetFileName()); + if (!handleSucc) { + LogInfo::MapleLogger() << "WARN: Handle Lite PGO input file " << CGOptions::GetLiteProfile() << + "failed in mplcg\n"; + } + } +} + +void CgFuncPM::CreateCGAndBeCommon(MIRModule &m) { + ASSERT(cgOptions != nullptr, "New cg phase manager running FAILED :: cgOptions unset"); +#if TARGAARCH64 || TARGRISCV64 + cg = new AArch64CG(m, *cgOptions, cgOptions->GetEHExclusiveFunctionNameVec(), CGOptions::GetCyclePatternMap()); + cg->SetEmitter(*m.GetMemPool()->New(*cg, m.GetOutputFileName())); +#elif TARGARM32 + cg = new Arm32CG(m, *cgOptions, cgOptions->GetEHExclusiveFunctionNameVec(), CGOptions::GetCyclePatternMap()); + cg->SetEmitter(*m.GetMemPool()->New(*cg, m.GetOutputFileName())); +#elif TARGX86_64 + cg = new X64CG(m, *cgOptions); + cg->SetEmitter(*m.GetMemPool()->New(*cg, m.GetOutputFileName())); +#else +#error "unknown platform" +#endif + + + /* + * Must be done before creating any BECommon instances. + * + * BECommon, when constructed, will calculate the type, size and align of all types. As a side effect, it will also + * lower ptr and ref types into a64. That will drop the information of what a ptr or ref points to. + * + * All metadata generation passes which depend on the pointed-to type must be done here. + */ + cg->GenPrimordialObjectList(m.GetBaseName()); + /* We initialize a couple of BECommon's tables using the size information of GlobalTables.type_table_. + * So, BECommon must be allocated after all the parsing is done and user-defined types are all acounted. + */ + beCommon = new BECommon(m); + Globals::GetInstance()->SetBECommon(*beCommon); + Globals::GetInstance()->SetTarget(*cg); + + /* If a metadata generation pass depends on object layout it must be done after creating BECommon. */ + cg->GenExtraTypeMetadata(cgOptions->GetClassListFile(), m.GetBaseName()); + +#if TARGAARCH64 + if (!m.IsCModule()) { + CGOptions::EnableFramePointer(); + } +#endif +} + +void CgFuncPM::PrepareLower(MIRModule &m) { + mirLower = GetManagerMemPool()->New(m, nullptr); + mirLower->Init(); + cgLower = GetManagerMemPool()->New(m, + *beCommon, *GetPhaseMemPool(), cg->GenerateExceptionHandlingCode(), cg->GenerateVerboseCG()); + cgLower->RegisterBuiltIns(); + if (m.IsJavaModule()) { + cgLower->InitArrayClassCacheTableIndex(); + } + cgLower->RegisterExternalLibraryFunctions(); + cgLower->SetCheckLoadStore(CGOptions::IsCheckArrayStore()); + if (cg->IsStackProtectorStrong() || cg->IsStackProtectorAll() || m.HasPartO2List()) { + cg->AddStackGuardvar(); + } +} + +void CgFuncPM::DoFuncCGLower(const MIRModule &m, MIRFunction &mirFunc) const { + if (m.GetFlavor() <= kFeProduced) { + mirLower->SetLowerCG(); + mirLower->SetMirFunc(&mirFunc); + + DumpMIRFunc(mirFunc, "************* before MIRLowerer **************"); + mirLower->LowerFunc(mirFunc); + } + + bool isNotQuiet = !CGOptions::IsQuiet(); + DumpMIRFunc(mirFunc, "************* before CGLowerer **************", isNotQuiet); + + cgLower->LowerFunc(mirFunc); + + DumpMIRFunc(mirFunc, "************* after CGLowerer **************", isNotQuiet, + "************* end CGLowerer **************"); +} + +void CgFuncPM::EmitDuplicatedAsmFunc(MIRModule &m) const { + if (CGOptions::IsDuplicateAsmFileEmpty()) { + return; + } + + std::ifstream duplicateAsmFileFD(CGOptions::GetDuplicateAsmFile()); + + if (!duplicateAsmFileFD.is_open()) { + duplicateAsmFileFD.close(); + ERR(kLncErr, " %s open failed!", CGOptions::GetDuplicateAsmFile().c_str()); + return; + } + std::string contend; + bool onlyForFramework = false; + bool isFramework = IsFramework(m); + + while (getline(duplicateAsmFileFD, contend)) { + if (contend.compare("#Libframework_start") == 0) { + onlyForFramework = true; + } + + if (contend.compare("#Libframework_end") == 0) { + onlyForFramework = false; + } + + if (onlyForFramework && !isFramework) { + continue; + } + + (void)cg->GetEmitter()->Emit(contend + "\n"); + } + duplicateAsmFileFD.close(); +} + +void CgFuncPM::EmitFastFuncs(const MIRModule &m) const { + if (CGOptions::IsFastFuncsAsmFileEmpty() || !(m.IsJavaModule())) { + return; + } + + struct stat buffer; + if (stat(CGOptions::GetFastFuncsAsmFile().c_str(), &buffer) != 0) { + return; + } + + std::ifstream fastFuncsAsmFileFD(CGOptions::GetFastFuncsAsmFile()); + if (fastFuncsAsmFileFD.is_open()) { + std::string contend; + (void)cg->GetEmitter()->Emit("#define ENABLE_LOCAL_FAST_FUNCS 1\n"); + + while (getline(fastFuncsAsmFileFD, contend)) { + (void)cg->GetEmitter()->Emit(contend + "\n"); + } + } + fastFuncsAsmFileFD.close(); +} + +void CgFuncPM::EmitDebugInfo(const MIRModule &m) const { + if (!cgOptions->WithDwarf()) { + return; + } + cg->GetEmitter()->SetupDBGInfo(m.GetDbgInfo()); + cg->GetEmitter()->EmitDIHeaderFileInfo(); + cg->GetEmitter()->EmitDIDebugInfoSection(m.GetDbgInfo()); + cg->GetEmitter()->EmitDIDebugAbbrevSection(m.GetDbgInfo()); + cg->GetEmitter()->EmitDIDebugARangesSection(); + cg->GetEmitter()->EmitDIDebugRangesSection(); + cg->GetEmitter()->EmitDIDebugLineSection(); + cg->GetEmitter()->EmitDIDebugStrSection(); +} + +bool CgFuncPM::IsFramework(MIRModule &m) const { + auto &funcList = m.GetFunctionList(); + for (auto it = funcList.begin(); it != funcList.end(); ++it) { + MIRFunction *mirFunc = *it; + ASSERT(mirFunc != nullptr, "nullptr check"); + if (mirFunc->GetBody() != nullptr && + mirFunc->GetName() == "Landroid_2Fos_2FParcel_3B_7CnativeWriteString_7C_28JLjava_2Flang_2FString_3B_29V") { + return true; + } + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgFuncPM, cgFuncPhaseManager) +/* register codegen common phases */ +MAPLE_TRANSFORM_PHASE_REGISTER(CgLayoutFrame, layoutstackframe) +MAPLE_TRANSFORM_PHASE_REGISTER(CgCreateLabel, createstartendlabel) +MAPLE_TRANSFORM_PHASE_REGISTER(InstructionSelector, instructionselector) +MAPLE_TRANSFORM_PHASE_REGISTER(InstructionStandardize, instructionstandardize) +MAPLE_TRANSFORM_PHASE_REGISTER(CgMoveRegArgs, moveargs) +MAPLE_TRANSFORM_PHASE_REGISTER(CgRegAlloc, regalloc) +MAPLE_TRANSFORM_PHASE_REGISTER(CgAlignAnalysis, alignanalysis) +MAPLE_TRANSFORM_PHASE_REGISTER(CgFrameFinalize, framefinalize) +MAPLE_TRANSFORM_PHASE_REGISTER(CgYieldPointInsertion, yieldpoint) +MAPLE_TRANSFORM_PHASE_REGISTER(CgGenProEpiLog, generateproepilog) +MAPLE_TRANSFORM_PHASE_REGISTER(CgIsolateFastPath, isolatefastpath) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cg_phi_elimination.cpp b/src/mapleall/maple_be/src/cg/cg_phi_elimination.cpp new file mode 100644 index 0000000000000000000000000000000000000000..09d4c704b7a5dfefd65a1107a7f3484bd64aa64c --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_phi_elimination.cpp @@ -0,0 +1,152 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_phi_elimination.h" +#include "cg.h" +#include "cgbb.h" + +namespace maplebe { +void PhiEliminate::TranslateTSSAToCSSA() { + FOR_ALL_BB(bb, cgFunc) { + eliminatedBB.emplace(bb->GetId()); + for (auto phiInsnIt : bb->GetPhiInsns()) { + /* Method I create a temp move for phi-node */ + auto &destReg = static_cast(phiInsnIt.second->GetOperand(kInsnFirstOpnd)); + RegOperand &tempMovDest = cgFunc->GetOrCreateVirtualRegisterOperand(CreateTempRegForCSSA(destReg)); + auto &phiList = static_cast(phiInsnIt.second->GetOperand(kInsnSecondOpnd)); + for (auto phiOpndIt : phiList.GetOperands()) { + uint32 fBBId = phiOpndIt.first; + ASSERT(fBBId != 0, "GetFromBBID = 0"); +#if DEBUG + bool find = false; + for (auto predBB : bb->GetPreds()) { + if (predBB->GetId() == fBBId) { + find = true; + } + } + CHECK_FATAL(find, "dont exited pred for phi-node"); +#endif + PlaceMovInPredBB(fBBId, CreateMov(tempMovDest, *(phiOpndIt.second))); + } + Insn &movInsn = CreateMov(destReg, tempMovDest); + bb->ReplaceInsn(*phiInsnIt.second, movInsn); + } + } + + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + CHECK_FATAL(eliminatedBB.count(bb->GetId()) != 0, "still have phi"); + if (!insn->IsMachineInstruction()) { + continue; + } + ReCreateRegOperand(*insn); + bb->GetPhiInsns().clear(); + insn->SetSSAImpDefOpnd(nullptr); + } + } + UpdateRematInfo(); + cgFunc->SetSSAvRegCount(0); +} + +void PhiEliminate::UpdateRematInfo() { + if (CGOptions::GetRematLevel() > 0) { + cgFunc->UpdateAllRegisterVregMapping(remateInfoAfterSSA); + } +} + +void PhiEliminate::PlaceMovInPredBB(uint32 predBBId, Insn &movInsn) const { + BB *predBB = cgFunc->GetBBFromID(predBBId); + ASSERT(movInsn.GetOperand(kInsnSecondOpnd).IsRegister(), "unexpect operand"); + if (predBB->GetKind() == BB::kBBFallthru) { + predBB->AppendInsn(movInsn); + } else { + AppendMovAfterLastVregDef(*predBB, movInsn); + } +} + +regno_t PhiEliminate::RecursiveBothDU(RegOperand &ssaOpnd) { + if (!ssaOpnd.IsSSAForm()) { + return ssaOpnd.GetRegisterNumber(); + } + VRegVersion *ssaVersion = GetSSAInfo()->FindSSAVersion(ssaOpnd.GetRegisterNumber()); + ASSERT(ssaVersion != nullptr, "find ssaVersion failed"); + ASSERT(!ssaVersion->IsDeleted(), "ssaVersion has been deleted"); + RegOperand *regForRecreate = &ssaOpnd; + if (GetSSAInfo()->IsNoDefVReg(ssaOpnd.GetRegisterNumber())) { + regForRecreate = MakeRoomForNoDefVreg(ssaOpnd); + } else { + ASSERT(regForRecreate->IsSSAForm(), "Opnd is not in ssa form"); + } + DUInsnInfo *defInfo = ssaVersion->GetDefInsnInfo(); + Insn *defInsn = defInfo != nullptr ? defInfo->GetInsn() : nullptr; + if (defInsn != nullptr) { + uint32 defUseIdx = defInsn->GetBothDefUseOpnd(); + if (defUseIdx != kInsnMaxOpnd) { + if (defInfo->GetOperands().count(defUseIdx) > 0) { + CHECK_FATAL(defInfo->GetOperands()[defUseIdx] == 1, "multiple definiation"); + Operand &preOpnd = defInsn->GetOperand(defUseIdx); + ASSERT(preOpnd.IsRegister(), "unexpect operand type"); + return RecursiveBothDU(static_cast(preOpnd)); + } + } + CHECK_FATAL(defInsn->GetMachineOpcode() != MOP_asm, "not implement yet"); + CHECK_FATAL(ssaVersion->GetOriginalRegNO() != kRFLAG, "not implement yet"); + } else { + return ssaVersion->GetOriginalRegNO(); + } + return regForRecreate->GetRegisterNumber(); +} + +regno_t PhiEliminate::GetAndIncreaseTempRegNO() { + while (GetSSAInfo()->GetAllSSAOperands().count(tempRegNO) != 0) { + tempRegNO++; + } + regno_t ori = tempRegNO; + tempRegNO++; + return ori; +} + +RegOperand *PhiEliminate::MakeRoomForNoDefVreg(RegOperand &conflictReg) { + regno_t conflictVregNO = conflictReg.GetRegisterNumber(); + auto rVregIt = replaceVreg.find(conflictVregNO); + if (rVregIt != replaceVreg.end()) { + return rVregIt->second; + } else { + RegOperand *regForRecreate = &CreateTempRegForCSSA(conflictReg); + (void)replaceVreg.emplace(std::pair(conflictVregNO, regForRecreate)); + return regForRecreate; + } +} + +void PhiEliminate::RecordRematInfo(regno_t vRegNO, PregIdx pIdx) { + if (remateInfoAfterSSA.count(vRegNO) != 0) { + if (remateInfoAfterSSA[vRegNO] != pIdx) { + remateInfoAfterSSA.erase(vRegNO); + } + } else { + (void)remateInfoAfterSSA.emplace(std::pair(vRegNO, pIdx)); + } +} + +bool CgPhiElimination::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + PhiEliminate *pe = f.GetCG()->CreatePhiElimintor(*GetPhaseMemPool(), f, *ssaInfo); + pe->TranslateTSSAToCSSA(); + return false; +} +void CgPhiElimination::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgPhiElimination, cgphielimination) +} diff --git a/src/mapleall/maple_be/src/cg/cg_pre.cpp b/src/mapleall/maple_be/src/cg/cg_pre.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5730f91da6f5c4bd0972c202ed2cd5a39cefc2ab --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_pre.cpp @@ -0,0 +1,222 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_pre.h" +#include "cg_dominance.h" +#include "aarch64_cg.h" + +namespace maplebe { +/* Implement PRE in cgir */ +void CGPre::ResetDS(CgPhiOcc *phiOcc) { + if (!phiOcc->IsDownSafe()) { + return; + } + + phiOcc->SetIsDownSafe(false); + for (auto *phiOpnd : phiOcc->GetPhiOpnds()) { + auto *defOcc = phiOpnd->GetDef(); + if (defOcc != nullptr && defOcc->GetOccType() == kOccPhiocc) { + ResetDS(static_cast(defOcc)); + } + } +} + +void CGPre::ComputeDS() { + for (auto phiIt = phiOccs.rbegin(); phiIt != phiOccs.rend(); ++phiIt) { + auto *phiOcc = *phiIt; + if (phiOcc->IsDownSafe()) { + continue; + } + for (auto *phiOpnd : phiOcc->GetPhiOpnds()) { + if (phiOpnd->HasRealUse()) { + continue; + } + auto *defOcc = phiOpnd->GetDef(); + if (defOcc != nullptr && defOcc->GetOccType() == kOccPhiocc) { + ResetDS(static_cast(defOcc)); + } + } + } +} + +/* based on ssapre->workCand's realOccs and dfPhiDfns (which now privides all + the inserted phis), create the phi and phiOpnd occ nodes; link them all up in + order of dt_preorder in ssapre->allOccs; the phi occ nodes are in addition + provided in order of dt_preorder in ssapre->phiOccs */ +void CGPre::CreateSortedOccs() { + // merge varPhiDfns to dfPhiDfns + dfPhiDfns.insert(varPhiDfns.begin(), varPhiDfns.end()); + + auto comparator = [this](const CgPhiOpndOcc *occA, const CgPhiOpndOcc *occB) -> bool { + return dom->GetDtDfnItem(occA->GetBB()->GetId()) < dom->GetDtDfnItem(occB->GetBB()->GetId()); + }; + + std::vector phiOpnds; + for (auto dfn : dfPhiDfns) { + uint32 bbId = dom->GetDtPreOrderItem(dfn); + BB *bb = GetBB(bbId); + auto *phiOcc = perCandMemPool->New(*bb, workCand->GetTheOperand(), perCandAllocator); + phiOccs.push_back(phiOcc); + + for (BB *pred : bb->GetPreds()) { + auto phiOpnd = perCandMemPool->New(pred, workCand->GetTheOperand(), phiOcc); + phiOpnds.push_back(phiOpnd); + phiOcc->AddPhiOpnd(*phiOpnd); + phiOpnd->SetPhiOcc(*phiOcc); + } + } + std::sort(phiOpnds.begin(), phiOpnds.end(), comparator); + + auto realOccIt = workCand->GetRealOccs().begin(); + auto exitOccIt = exitOccs.begin(); + auto phiIt = phiOccs.begin(); + auto phiOpndIt = phiOpnds.begin(); + + CgOccur *nextRealOcc = nullptr; + if (realOccIt != workCand->GetRealOccs().end()) { + nextRealOcc = *realOccIt; + } + + CgOccur *nextExitOcc = nullptr; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } + + CgPhiOcc *nextPhiOcc = nullptr; + if (phiIt != phiOccs.end()) { + nextPhiOcc = *phiIt; + } + + CgPhiOpndOcc *nextPhiOpndOcc = nullptr; + if (phiOpndIt != phiOpnds.end()) { + nextPhiOpndOcc = *phiOpndIt; + } + + CgOccur *pickedOcc; // the next picked occ in order of preorder traveral of dominator tree + do { + pickedOcc = nullptr; + // the 4 kinds of occ must be checked in this order, so it will be right + // if more than 1 has the same dfn + if (nextPhiOcc != nullptr) { + pickedOcc = nextPhiOcc; + } + if (nextRealOcc != nullptr && (pickedOcc == nullptr || + dom->GetDtDfnItem(nextRealOcc->GetBB()->GetId()) < + dom->GetDtDfnItem(pickedOcc->GetBB()->GetId()))) { + pickedOcc = nextRealOcc; + } + if (nextExitOcc != nullptr && (pickedOcc == nullptr || + dom->GetDtDfnItem(nextExitOcc->GetBB()->GetId()) < + dom->GetDtDfnItem(pickedOcc->GetBB()->GetId()))) { + pickedOcc = nextExitOcc; + } + if (nextPhiOpndOcc != nullptr && (pickedOcc == nullptr || + dom->GetDtDfnItem(nextPhiOpndOcc->GetBB()->GetId()) < + dom->GetDtDfnItem(pickedOcc->GetBB()->GetId()))) { + pickedOcc = nextPhiOpndOcc; + } + if (pickedOcc != nullptr) { + allOccs.push_back(pickedOcc); + switch (pickedOcc->GetOccType()) { + case kOccReal: + case kOccUse: + case kOccDef: + case kOccStore: + case kOccMembar: { + ++realOccIt; + if (realOccIt != workCand->GetRealOccs().end()) { + nextRealOcc = *realOccIt; + } else { + nextRealOcc = nullptr; + } + break; + } + case kOccExit: { + ++exitOccIt; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } else { + nextExitOcc = nullptr; + } + break; + } + case kOccPhiocc: { + ++phiIt; + if (phiIt != phiOccs.end()) { + nextPhiOcc = *phiIt; + } else { + nextPhiOcc = nullptr; + } + break; + } + case kOccPhiopnd: { + ++phiOpndIt; + if (phiOpndIt != phiOpnds.end()) { + nextPhiOpndOcc = *phiOpndIt; + } else { + nextPhiOpndOcc = nullptr; + } + break; + } + default: + ASSERT(false, "CreateSortedOccs: unexpected occty"); + break; + } + } + } while (pickedOcc != nullptr); +} + +CgOccur *CGPre::CreateRealOcc(Insn &insn, Operand &opnd, OccType occType) { + uint64 hashIdx = PreWorkCandHashTable::ComputeWorkCandHashIndex(opnd); + PreWorkCand *wkCand = preWorkCandHashTable.GetWorkcandFromIndex(hashIdx); + while (wkCand != nullptr) { + Operand *currOpnd = wkCand->GetTheOperand(); + ASSERT(currOpnd != nullptr, "CreateRealOcc: found workcand with theMeExpr as nullptr"); + if (currOpnd == &opnd) { + break; + } + wkCand = static_cast(wkCand->GetNext()); + } + + CgOccur *newOcc = nullptr; + switch (occType) { + case kOccDef: + newOcc = ssaPreMemPool->New(insn.GetBB(), &insn, &opnd); + break; + case kOccStore: + newOcc = ssaPreMemPool->New(insn.GetBB(), &insn, &opnd); + break; + case kOccUse: + newOcc = ssaPreMemPool->New(insn.GetBB(), &insn, &opnd); + break; + default: + CHECK_FATAL(false, "unsupported occur type"); + break; + } + + if (wkCand != nullptr) { + wkCand->AddRealOccAsLast(*newOcc, GetPUIdx()); + return newOcc; + } + + // workcand not yet created; create a new one and add to worklist + wkCand = ssaPreMemPool->New(ssaPreAllocator, &opnd, GetPUIdx()); + workList.push_back(wkCand); + wkCand->AddRealOccAsLast(*newOcc, GetPUIdx()); + // add to bucket at workcandHashTable[hashIdx] + wkCand->SetNext(*preWorkCandHashTable.GetWorkcandFromIndex(hashIdx)); + preWorkCandHashTable.SetWorkCandAt(hashIdx, *wkCand); + return newOcc; +} +} // namespace maple diff --git a/src/mapleall/maple_be/src/cg/cg_prop.cpp b/src/mapleall/maple_be/src/cg/cg_prop.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8d3339323e9e458e8d27cf02d6fcbc55007894f7 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_prop.cpp @@ -0,0 +1,82 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "loop.h" +#include "cg_prop.h" + +namespace maplebe { +void CGProp::DoCopyProp() { + CopyProp(); + cgDce->DoDce(); +} + +void CGProp::DoTargetProp() { + DoCopyProp(); + /* instruction level opt */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + TargetProp(*insn); + } + } + /* pattern level opt */ + if (CGOptions::GetInstance().GetOptimizeLevel() == CGOptions::kLevel2) { + PropPatternOpt(); + } +} + +Insn *PropOptimizePattern::FindDefInsn(const VRegVersion *useVersion) const { + if (!useVersion) { + return nullptr; + } + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + if (!defInfo) { + return nullptr; + } + return defInfo->GetInsn(); +} + +bool CgCopyProp::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + LiveIntervalAnalysis *ll = GET_ANALYSIS(CGliveIntervalAnalysis, f); + CGProp *cgProp = f.GetCG()->CreateCGProp(*GetPhaseMemPool(), f, *ssaInfo, *ll); + cgProp->DoCopyProp(); + ll->ClearBFS(); + return false; +} +void CgCopyProp::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgCopyProp, cgcopyprop) + +bool CgTargetProp::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + LiveIntervalAnalysis *ll = GET_ANALYSIS(CGliveIntervalAnalysis, f); + CGProp *cgProp = f.GetCG()->CreateCGProp(*GetPhaseMemPool(), f, *ssaInfo, *ll); + cgProp->DoTargetProp(); + ll->ClearBFS(); + return false; +} +void CgTargetProp::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgTargetProp, cgtargetprop) +} diff --git a/src/mapleall/maple_be/src/cg/cg_rce.cpp b/src/mapleall/maple_be/src/cg/cg_rce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8479ba2a295e31327df41c32afcca239eaebd9e4 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_rce.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_rce.h" + +namespace maplebe { +void RedundantComputeElim::Dump(const Insn *insn1, const Insn *insn2) const { + CHECK_FATAL(insn1 && insn2, "dump insn is null"); + LogInfo::MapleLogger() << ">>>>>> SameRHSInsnPair in BB(" << + insn1->GetBB()->GetId() << ") at {" << g_count << "} <<<<<<\n"; + insn1->Dump(); + insn2->Dump(); +} + +bool CgRedundantCompElim::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + CHECK_FATAL(ssaInfo != nullptr, "Get ssaInfo failed"); + MemPool *mp = GetPhaseMemPool(); + CHECK_FATAL(mp != nullptr, "get memPool failed"); + auto *rce = f.GetCG()->CreateRedundantCompElim(*mp, f, *ssaInfo); + CHECK_FATAL(rce != nullptr, "rce instance create failed"); + rce->Run(); + return true; +} + +void CgRedundantCompElim::GetAnalysisDependence(AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgRedundantCompElim, cgredundantcompelim) +} /* namespace maplebe */ \ No newline at end of file diff --git a/src/mapleall/maple_be/src/cg/cg_ssa.cpp b/src/mapleall/maple_be/src/cg/cg_ssa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4b4aa8f09ef73946c965233149c60689094544e0 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_ssa.cpp @@ -0,0 +1,329 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_ssa.h" +#include "cg.h" + +#include "optimize_common.h" + +namespace maplebe { +uint32 CGSSAInfo::ssaRegNObase = 100; +void CGSSAInfo::ConstructSSA() { + InsertPhiInsn(); + /* Rename variables */ + RenameVariablesForBB(domInfo->GetCommonEntryBB().GetId()); +#if DEBUG + /* Check phiListOpnd, must be ssaForm */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsPhi()) { + continue; + } + Operand &phiListOpnd = insn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(phiListOpnd.IsPhi(), "unexpect phi operand"); + MapleMap &phiList = static_cast(phiListOpnd).GetOperands(); + for (auto &phiOpndIt : phiList) { + if (!phiOpndIt.second->IsSSAForm()) { + CHECK_FATAL(false, "phiOperand is not ssaForm!"); + } + } + } + } +#endif + cgFunc->SetSSAvRegCount(static_cast(GetAllSSAOperands().size()) + ssaRegNObase + 1); + /* save reversePostOrder of bbs for rectify validbit */ + SetReversePostOrder(); +} + +void CGSSAInfo::MarkInsnsInSSA(Insn &insn) { + CHECK_FATAL(insn.GetId() == 0, "insn is not clean !!"); /* change to assert */ + insnCount += 2; + insn.SetId(static_cast(insnCount)); +} + +void CGSSAInfo::InsertPhiInsn() { + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + std::set defRegNOs = insn->GetDefRegs(); + for (auto vRegNO : defRegNOs) { + RegOperand *virtualOpnd = cgFunc->GetVirtualRegisterOperand(vRegNO); + if (virtualOpnd != nullptr) { + PrunedPhiInsertion(*bb, *virtualOpnd); + } + } + } + } +} + +void CGSSAInfo::PrunedPhiInsertion(const BB &bb, RegOperand &virtualOpnd) { + regno_t vRegNO = virtualOpnd.GetRegisterNumber(); + MapleVector frontiers = domInfo->GetDomFrontier(bb.GetId()); + for (auto i : frontiers) { + BB *phiBB = cgFunc->GetBBFromID(i); + CHECK_FATAL(phiBB != nullptr, "get phiBB failed change to ASSERT"); + if (phiBB->HasPhiInsn(vRegNO)) { + continue; + } + if (phiBB->GetLiveIn()->TestBit(vRegNO)) { + CG *codeGen = cgFunc->GetCG(); + PhiOperand &phiList = codeGen->CreatePhiOperand(*memPool, ssaAlloc); + /* do not insert phi opnd when insert phi insn? */ + for (auto prevBB : phiBB->GetPreds()) { + if (prevBB->GetLiveOut()->TestBit(vRegNO)) { + auto *paraOpnd = static_cast(virtualOpnd.Clone(*tempMp)); + phiList.InsertOpnd(prevBB->GetId(), *paraOpnd); + } else { + CHECK_FATAL(false, "multipule BB in"); + } + } + Insn &phiInsn = codeGen->BuildPhiInsn(virtualOpnd, phiList); + MarkInsnsInSSA(phiInsn); + bool insertSuccess = false; + FOR_BB_INSNS(insn, phiBB) { + if (insn->IsMachineInstruction()) { + (void)phiBB->InsertInsnBefore(*insn, phiInsn); + insertSuccess = true; + break; + } + } + if (!insertSuccess) { + phiBB->InsertInsnBegin(phiInsn); + } + phiBB->AddPhiInsn(vRegNO, phiInsn); + PrunedPhiInsertion(*phiBB, virtualOpnd); + } + } +} + +void CGSSAInfo::RenameVariablesForBB(uint32 bbID) { + RenameBB(*cgFunc->GetBBFromID(bbID)); /* rename first BB */ + const auto &domChildren = domInfo->GetDomChildren(bbID); + for (const auto &child : domChildren) { + RenameBB(*cgFunc->GetBBFromID(child)); + } +} + +void CGSSAInfo::RenameBB(BB &bb) { + if (IsBBRenamed(bb.GetId())) { + return; + } + AddRenamedBB(bb.GetId()); + /* record version stack size */ + size_t tempSize = vRegStk.empty() ? allSSAOperands.size() + cgFunc->GetFirstMapleIrVRegNO() + 1 : + vRegStk.rbegin()->first + 1; + std::vector oriStackSize(tempSize, -1); + for (auto it : vRegStk) { + ASSERT(it.first < oriStackSize.size(), "out of range"); + oriStackSize[it.first] = static_cast(it.second.size()); + } + RenamePhi(bb); + FOR_BB_INSNS(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + MarkInsnsInSSA(*insn); + RenameInsn(*insn); + } + RenameSuccPhiUse(bb); + RenameVariablesForBB(bb.GetId()); + /* stack pop up */ + for (auto &it : vRegStk) { + if (it.first < oriStackSize.size() && oriStackSize[it.first] >= 0) { + while (static_cast(it.second.size()) > oriStackSize[static_cast(it.first)]) { + ASSERT(!it.second.empty(), "empty stack"); + it.second.pop(); + } + } + } +} + +void CGSSAInfo::RenamePhi(BB &bb) { + for (auto phiInsnIt : bb.GetPhiInsns()) { + Insn *phiInsn = phiInsnIt.second; + CHECK_FATAL(phiInsn != nullptr, "get phi insn failed"); + auto *phiDefOpnd = static_cast(&phiInsn->GetOperand(kInsnFirstOpnd)); + VRegVersion *newVst = CreateNewVersion(*phiDefOpnd, *phiInsn, kInsnFirstOpnd, true); + phiInsn->SetOperand(kInsnFirstOpnd, *newVst->GetSSAvRegOpnd()); + } +} + +void CGSSAInfo::RenameSuccPhiUse(const BB &bb) { + for (auto *sucBB : bb.GetSuccs()) { + for (auto phiInsnIt : sucBB->GetPhiInsns()) { + Insn *phiInsn = phiInsnIt.second; + CHECK_FATAL(phiInsn != nullptr, "get phi insn failed"); + Operand *phiListOpnd = &phiInsn->GetOperand(kInsnSecondOpnd); + CHECK_FATAL(phiListOpnd->IsPhi(), "unexpect phi operand"); + MapleMap &phiList = static_cast(phiListOpnd)->GetOperands(); + ASSERT(phiList.size() <= sucBB->GetPreds().size(), "unexpect phiList size need check"); + for (auto phiOpndIt = phiList.begin(); phiOpndIt != phiList.end(); ++phiOpndIt) { + if (phiOpndIt->first == bb.GetId()) { + RegOperand *renamedOpnd = GetRenamedOperand(*(phiOpndIt->second), false, *phiInsn, kInsnSecondOpnd); + phiList[phiOpndIt->first] = renamedOpnd; + } + } + } + } +} + +uint32 CGSSAInfo::IncreaseVregCount(regno_t vRegNO) { + if (vRegDefCount.count(vRegNO) == 0) { + vRegDefCount.emplace(vRegNO, 0); + } else { + vRegDefCount[vRegNO]++; + } + return vRegDefCount[vRegNO]; +} + +bool CGSSAInfo::IncreaseSSAOperand(regno_t vRegNO, VRegVersion *vst) { + if (allSSAOperands.count(vRegNO) != 0) { + return false; + } + allSSAOperands.emplace(vRegNO, vst); + return true; +} + +VRegVersion *CGSSAInfo::CreateNewVersion(RegOperand &virtualOpnd, Insn &defInsn, uint32 idx, bool isDefByPhi) { + regno_t vRegNO = virtualOpnd.GetRegisterNumber(); + uint32 verionIdx = IncreaseVregCount(vRegNO); + RegOperand *ssaOpnd = CreateSSAOperand(virtualOpnd); + auto *newVst = memPool->New(ssaAlloc, *ssaOpnd, verionIdx, vRegNO); + auto *defInfo = CreateDUInsnInfo(&defInsn, idx); + newVst->SetDefInsn(defInfo, isDefByPhi ? kDefByPhi : kDefByInsn); + if (!IncreaseSSAOperand(ssaOpnd->GetRegisterNumber(), newVst)) { + CHECK_FATAL(false, "insert ssa operand failed"); + } + auto it = vRegStk.find(vRegNO); + if (it == vRegStk.end()) { + MapleStack vRegVersionStack(ssaAlloc.Adapter()); + auto ret = vRegStk.emplace(std::pair>(vRegNO, vRegVersionStack)); + CHECK_FATAL(ret.second, "insert failed"); + it = ret.first; + } + it->second.push(newVst); + return newVst; +} + +VRegVersion *CGSSAInfo::GetVersion(const RegOperand &virtualOpnd) { + regno_t vRegNO = virtualOpnd.GetRegisterNumber(); + auto vRegIt = vRegStk.find(vRegNO); + return vRegIt != vRegStk.end() ? vRegIt->second.top() : nullptr; +} + +VRegVersion *CGSSAInfo::FindSSAVersion(regno_t ssaRegNO) { + auto it = allSSAOperands.find(ssaRegNO); + return it != allSSAOperands.end() ? it->second : nullptr; +} + +PhiOperand &CGSSAInfo::CreatePhiOperand() { + return cgFunc->GetCG()->CreatePhiOperand(*memPool, ssaAlloc); +} + +void CGSSAInfo::SetReversePostOrder() { + MapleVector &reverse = domInfo->GetReversePostOrder(); + for (auto *bb : reverse) { + if (bb != nullptr) { + reversePostOrder.emplace_back(bb->GetId()); + } + } +} + +Insn *CGSSAInfo::GetDefInsn(const RegOperand &useReg) { + if (!useReg.IsSSAForm()) { + return nullptr; + } + regno_t useRegNO = useReg.GetRegisterNumber(); + VRegVersion *useVersion = FindSSAVersion(useRegNO); + ASSERT(useVersion != nullptr, "useVRegVersion must not be null based on ssa"); + CHECK_FATAL(!useVersion->IsDeleted(), "deleted version"); + DUInsnInfo *defInfo = useVersion->GetDefInsnInfo(); + return defInfo == nullptr ? nullptr : defInfo->GetInsn(); +} + +void CGSSAInfo::DumpFuncCGIRinSSAForm() const { + LogInfo::MapleLogger() << "\n****** SSA CGIR for " << cgFunc->GetName() << " *******\n"; + FOR_ALL_BB_CONST(bb, cgFunc) { + cgFunc->DumpBBInfo(bb); + FOR_BB_INSNS_CONST(insn, bb) { + if (insn->IsCfiInsn() && insn->IsDbgInsn()) { + insn->Dump(); + } else { + DumpInsnInSSAForm(*insn); + } + } + } +} + +void VRegVersion::AddUseInsn(CGSSAInfo &ssaInfo, Insn &useInsn, uint32 idx) { + ASSERT(useInsn.GetId() > 0, "insn should be marked during ssa"); + auto useInsnIt = useInsnInfos.find(useInsn.GetId()); + if (useInsnIt != useInsnInfos.end()) { + useInsnIt->second->IncreaseDU(idx); + } else { + useInsnInfos.insert(std::make_pair(useInsn.GetId(), ssaInfo.CreateDUInsnInfo(&useInsn, idx))); + } +} + +void VRegVersion::RemoveUseInsn(const Insn &useInsn, uint32 idx) { + auto useInsnIt = useInsnInfos.find(useInsn.GetId()); + ASSERT(useInsnIt != useInsnInfos.end(), "use Insn not found"); + useInsnIt->second->DecreaseDU(idx); + if (useInsnIt->second->HasNoDU()) { + useInsnInfos.erase(useInsnIt); + } +} + +void VRegVersion::CheckDeadUse(const Insn &useInsn) { + auto useInsnIt = useInsnInfos.find(useInsn.GetId()); + ASSERT(useInsnIt != useInsnInfos.end(), "use Insn not found"); + if (useInsnIt->second->HasNoDU()) { + useInsnInfos.erase(useInsnIt); + } +} + +void CgSSAConstruct::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} + +bool CgSSAConstruct::PhaseRun(maplebe::CGFunc &f) { + if (CG_DEBUG_FUNC(f)) { + DotGenerator::GenerateDot("beforessa", f, f.GetMirModule(), true); + } + MemPool *ssaMemPool = GetPhaseMemPool(); + MemPool *ssaTempMp = ApplyTempMemPool(); + DomAnalysis *domInfo = nullptr; + domInfo = GET_ANALYSIS(CgDomAnalysis, f); + LiveAnalysis *liveInfo = nullptr; + liveInfo = GET_ANALYSIS(CgLiveAnalysis, f); + ssaInfo = f.GetCG()->CreateCGSSAInfo(*ssaMemPool, f, *domInfo, *ssaTempMp); + ssaInfo->ConstructSSA(); + + if (CG_DEBUG_FUNC(f)) { + LogInfo::MapleLogger() << "******** CG IR After ssaconstruct in ssaForm: *********" << "\n"; + ssaInfo->DumpFuncCGIRinSSAForm(); + } + if (liveInfo != nullptr) { + liveInfo->ClearInOutDataInfo(); + } + /* due to change of register number */ + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLiveAnalysis::id); + return true; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgSSAConstruct, cgssaconstruct) /* both transform & analysis */ +} diff --git a/src/mapleall/maple_be/src/cg/cg_ssa_pre.cpp b/src/mapleall/maple_be/src/cg/cg_ssa_pre.cpp new file mode 100644 index 0000000000000000000000000000000000000000..30e92d384122922eb234883ebe3beb9b1a2c9cb9 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_ssa_pre.cpp @@ -0,0 +1,602 @@ +/* + * Copyright (c) [2022] Futureweiwei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cgfunc.h" +#include "loop.h" +#include "cg_ssa_pre.h" + +namespace maplebe { + +// ================ Step 6: Code Motion ================ +void SSAPre::CodeMotion() { + // pass 1 only doing insertion + for (Occ *occ : allOccs) { + if (occ->occTy != kAOccPhiOpnd) { + continue; + } + PhiOpndOcc *phiOpndOcc = static_cast(occ); + if (phiOpndOcc->insertHere) { + ASSERT(phiOpndOcc->cgbb->GetLoop() == nullptr, "cg_ssapre: save inserted inside loop"); + workCand->saveAtEntryBBs.insert(phiOpndOcc->cgbb->GetId()); + } + } + // pass 2 only doing deletion + for (Occ *occ : realOccs) { + if (occ->occTy != kAOccReal) { + continue; + } + RealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + ASSERT(realOcc->cgbb->GetLoop() == nullptr, "cg_ssapre: save in place inside loop"); + workCand->saveAtEntryBBs.insert(realOcc->cgbb->GetId()); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ output _______" << '\n'; + LogInfo::MapleLogger() << " saveAtEntryBBs: ["; + for (uint32 id : workCand->saveAtEntryBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n\n"; + } +} + +// ================ Step 5: Finalize ================ +// for setting RealOcc's redundant flag and PhiOpndOcc's insertHere flag +void SSAPre::Finalize() { + std::vector availDefVec(classCount + 1, nullptr); + // preorder traversal of dominator tree + for (Occ *occ : allOccs) { + size_t classId = static_cast(occ->classId); + switch (occ->occTy) { + case kAOccPhi: { + PhiOcc *phiOcc = static_cast(occ); + if (phiOcc->WillBeAvail()) { + availDefVec[classId] = phiOcc; + } + break; + } + case kAOccReal: { + RealOcc *realOcc = static_cast(occ); + if (availDefVec[classId] == nullptr || !availDefVec[classId]->IsDominate(dom, occ)) { + realOcc->redundant = false; + availDefVec[classId] = realOcc; + } else { + realOcc->redundant = true; + } + break; + } + case kAOccPhiOpnd: { + PhiOpndOcc *phiOpndOcc = static_cast(occ); + const PhiOcc *phiOcc = phiOpndOcc->defPhiOcc; + if (phiOcc->WillBeAvail()) { + if (phiOpndOcc->def == nullptr || (!phiOpndOcc->hasRealUse && + phiOpndOcc->def->occTy == kAOccPhi && + !static_cast(phiOpndOcc->def)->WillBeAvail())) { + // insert a store + if (phiOpndOcc->cgbb->GetSuccs().size() != 1) { // critical edge + workCand->saveAtProlog = true; + break; + } + phiOpndOcc->insertHere = true; + } else { + phiOpndOcc->def = availDefVec[classId]; + } + } + break; + } + case kAOccExit: + break; + default: + ASSERT(false, "Finalize: unexpected occ type"); + break; + } + if (workCand->saveAtProlog) { + break; + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after finalize _______" << '\n'; + if (workCand->saveAtProlog) { + LogInfo::MapleLogger() << "Giving up because of insertion at critical edge" << '\n'; + return; + } + for (Occ *occ : allOccs) { + if (occ->occTy == kAOccReal) { + RealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + occ->Dump(); + LogInfo::MapleLogger() << " non-redundant" << '\n'; + } + } else if (occ->occTy == kAOccPhiOpnd) { + PhiOpndOcc *phiOpndOcc = static_cast(occ); + if (phiOpndOcc->insertHere) { + occ->Dump(); + LogInfo::MapleLogger() << " insertHere" << '\n'; + } + } + } + } +} + +// ================ Step 4: WillBeAvail Computation ================ + +void SSAPre::ResetCanBeAvail(PhiOcc *phi) const { + phi->isCanBeAvail = false; + // the following loop finds phi's uses and reset them + for (PhiOcc *phiOcc : phiOccs) { + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def == phi) { + if (!phiOpndOcc->hasRealUse && !phiOcc->isDownsafe && phiOcc->isCanBeAvail) { + ResetCanBeAvail(phiOcc); + } + } + } + } +} + +void SSAPre::ComputeCanBeAvail() const { + for (PhiOcc *phiOcc : phiOccs) { + if (!phiOcc->isDownsafe && phiOcc->isCanBeAvail) { + bool existNullUse = false; + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def == nullptr) { + existNullUse = true; + break; + } + } + if (existNullUse) { + ResetCanBeAvail(phiOcc); + } + } + } +} + +void SSAPre::ResetLater(PhiOcc *phi) const { + phi->isLater = false; + // the following loop finds phi's uses and reset them + for (PhiOcc *phiOcc : phiOccs) { + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def == phi) { + if (phiOcc->isLater) { + ResetLater(phiOcc); + } + } + } + } +} + +void SSAPre::ComputeLater() const { + for (PhiOcc *phiOcc : phiOccs) { + phiOcc->isLater = phiOcc->isCanBeAvail; + } + for (PhiOcc *phiOcc : phiOccs) { + if (phiOcc->isLater) { + bool existNonNullUse = false; + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->hasRealUse) { + existNonNullUse = true; + break; + } + } + if (existNonNullUse || phiOcc->speculativeDownsafe) { + ResetLater(phiOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after later computation _______" << '\n'; + for (PhiOcc *phiOcc : phiOccs) { + phiOcc->Dump(); + if (phiOcc->isCanBeAvail) { + LogInfo::MapleLogger() << " canbeAvail"; + } + if (phiOcc->isLater) { + LogInfo::MapleLogger() << " later"; + } + if (phiOcc->isCanBeAvail && !phiOcc->isLater) { + LogInfo::MapleLogger() << " will be Avail"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 3: Downsafe Computation ================ +void SSAPre::ResetDownsafe(const PhiOpndOcc *phiOpnd) const { + if (phiOpnd->hasRealUse) { + return; + } + Occ *defOcc = phiOpnd->def; + if (defOcc == nullptr || defOcc->occTy != kAOccPhi) { + return; + } + PhiOcc *defPhiOcc = static_cast(defOcc); + if (defPhiOcc->speculativeDownsafe) { + return; + } + if (!defPhiOcc->isDownsafe) { + return; + } + defPhiOcc->isDownsafe = false; + for (PhiOpndOcc *phiOpndOcc : defPhiOcc->phiOpnds) { + ResetDownsafe(phiOpndOcc); + } +} + +void SSAPre::ComputeDownsafe() const { + for (PhiOcc *phiOcc : phiOccs) { + if (!phiOcc->isDownsafe) { + // propagate not-Downsafe backward along use-def edges + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + ResetDownsafe(phiOpndOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after downsafe computation _______" << '\n'; + for (PhiOcc *phiOcc : phiOccs) { + phiOcc->Dump(); + if (phiOcc->speculativeDownsafe) { + LogInfo::MapleLogger() << " spec_downsafe /"; + } + if (phiOcc->isDownsafe) { + LogInfo::MapleLogger() << " downsafe"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 2: rename ================ +static void PropagateSpeculativeDownsafe(PhiOcc *phiOcc) { + if (phiOcc->speculativeDownsafe) { + return; + } + phiOcc->isDownsafe = true; + phiOcc->speculativeDownsafe = true; + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def->occTy == kAOccPhi) { + PhiOcc *nextPhiOcc = static_cast(phiOpndOcc->def); + if (nextPhiOcc->cgbb->GetLoop() != nullptr) { + PropagateSpeculativeDownsafe(nextPhiOcc); + } + } + } +} + +void SSAPre::Rename() { + std::stack occStack; + classCount = 0; + // iterate thru the occurrences in order of preorder traversal of dominator + // tree + for (Occ *occ : allOccs) { + while (!occStack.empty() && !occStack.top()->IsDominate(dom, occ)) { + occStack.pop(); + } + switch (occ->occTy) { + case kAOccExit: + if (!occStack.empty()) { + Occ *topOcc = occStack.top(); + if (topOcc->occTy == kAOccPhi) { + PhiOcc *phiTopOcc = static_cast(topOcc); + if (!phiTopOcc->speculativeDownsafe) { + phiTopOcc->isDownsafe = false; + } + } + } + break; + case kAOccPhi: + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + case kAOccReal: { + if (occStack.empty()) { + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + } + Occ *topOcc = occStack.top(); + occ->classId = topOcc->classId; + if (topOcc->occTy == kAOccPhi) { + occStack.push(occ); + if (occ->cgbb->GetLoop() != nullptr) { + static_cast(topOcc)->isDownsafe = true; + static_cast(topOcc)->speculativeDownsafe = true; + } + } + break; + } + case kAOccPhiOpnd: { + if (occStack.empty()) { + // leave classId as 0 + break; + } + Occ *topOcc = occStack.top(); + occ->def = topOcc; + occ->classId = topOcc->classId; + if (topOcc->occTy == kAOccReal) { + static_cast(occ)->hasRealUse = true; + } + break; + } + default: + ASSERT(false, "Rename: unexpected type of occurrence"); + break; + } + } + // loop thru phiOccs to propagate speculativeDownsafe + for (PhiOcc *phiOcc : phiOccs) { + if (phiOcc->speculativeDownsafe) { + for (PhiOpndOcc *phiOpndOcc : phiOcc->phiOpnds) { + if (phiOpndOcc->def != nullptr && phiOpndOcc->def->occTy == kAOccPhi) { + PhiOcc *nextPhiOcc = static_cast(phiOpndOcc->def); + if (nextPhiOcc->cgbb->GetLoop() != nullptr) { + PropagateSpeculativeDownsafe(nextPhiOcc); + } + } + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after rename _______" << '\n'; + for (Occ *occ : allOccs) { + occ->Dump(); + if (occ->occTy == kAOccPhi) { + PhiOcc *phiOcc = static_cast(occ); + if (phiOcc->speculativeDownsafe) { + LogInfo::MapleLogger() << " spec_downsafe /"; + } + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 1: insert phis ================ + +// form pih occ based on the real occ in workCand->realOccs; result is +// stored in phiDfns +void SSAPre::FormPhis() { + for (Occ *occ : realOccs) { + GetIterDomFrontier(occ->cgbb, &phiDfns); + } +} + +// form allOccs inclusive of real, phi, phiOpnd, exit occurrences; +// form phiOccs containing only the phis +void SSAPre::CreateSortedOccs() { + // form phiOpnd occs based on the preds of the phi occs; result is + // stored in phiOpndDfns + std::multiset phiOpndDfns; + for (uint32 dfn : phiDfns) { + const BBId bbId = dom->GetDtPreOrderItem(dfn); + BB *cgbb = cgFunc->GetAllBBs()[bbId]; + for (BB *pred : cgbb->GetPreds()) { + (void)phiOpndDfns.insert(dom->GetDtDfnItem(pred->GetId())); + } + } + std::unordered_map> bb2PhiOpndMap; + MapleVector::iterator realOccIt = realOccs.begin(); + MapleVector::iterator exitOccIt = exitOccs.begin(); + MapleSet::iterator phiDfnIt = phiDfns.begin(); + MapleSet::iterator phiOpndDfnIt = phiOpndDfns.begin(); + Occ *nextRealOcc = nullptr; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } + ExitOcc *nextExitOcc = nullptr; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } + PhiOcc *nextPhiOcc = nullptr; + if (phiDfnIt != phiDfns.end()) { + nextPhiOcc = preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiDfnIt)), preAllocator); + } + PhiOpndOcc *nextPhiOpndOcc = nullptr; + if (phiOpndDfnIt != phiOpndDfns.end()) { + nextPhiOpndOcc = preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiOpndDfnIt))); + auto it = bb2PhiOpndMap.find(dom->GetDtPreOrderItem(*phiOpndDfnIt)); + if (it == bb2PhiOpndMap.end()) { + std::forward_list newlist = { nextPhiOpndOcc }; + bb2PhiOpndMap[dom->GetDtPreOrderItem(*phiOpndDfnIt)] = newlist; + } else { + it->second.push_front(nextPhiOpndOcc); + } + } + Occ *pickedOcc = nullptr; // the next picked occ in order of preorder traversal of dominator tree + do { + pickedOcc = nullptr; + if (nextPhiOcc != nullptr) { + pickedOcc = nextPhiOcc; + } + if (nextRealOcc != nullptr && (pickedOcc == nullptr || dom->GetDtDfnItem(nextRealOcc->cgbb->GetId()) < + dom->GetDtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextRealOcc; + } + if (nextPhiOpndOcc != nullptr && + (pickedOcc == nullptr || *phiOpndDfnIt < dom->GetDtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextPhiOpndOcc; + } + if (nextExitOcc != nullptr && (pickedOcc == nullptr || dom->GetDtDfnItem(nextExitOcc->cgbb->GetId()) < + dom->GetDtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextExitOcc; + } + if (pickedOcc != nullptr) { + allOccs.push_back(pickedOcc); + switch (pickedOcc->occTy) { + case kAOccReal: { + // get the next real occ + CHECK_FATAL(realOccIt != realOccs.end(), "iterator check"); + ++realOccIt; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } else { + nextRealOcc = nullptr; + } + break; + } + case kAOccExit: { + CHECK_FATAL(exitOccIt != exitOccs.end(), "iterator check"); + ++exitOccIt; + if (exitOccIt != exitOccs.end()) { + nextExitOcc = *exitOccIt; + } else { + nextExitOcc = nullptr; + } + break; + } + case kAOccPhi: { + phiOccs.push_back(static_cast(pickedOcc)); + CHECK_FATAL(phiDfnIt != phiDfns.end(), "iterator check"); + ++phiDfnIt; + if (phiDfnIt != phiDfns.end()) { + nextPhiOcc = preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiDfnIt)), preAllocator); + } else { + nextPhiOcc = nullptr; + } + break; + } + case kAOccPhiOpnd: { + CHECK_FATAL(phiOpndDfnIt != phiOpndDfns.end(), "iterator check"); + ++phiOpndDfnIt; + if (phiOpndDfnIt != phiOpndDfns.end()) { + nextPhiOpndOcc = preMp->New(cgFunc->GetAllBBs().at(dom->GetDtPreOrderItem(*phiOpndDfnIt))); + auto it = bb2PhiOpndMap.find(dom->GetDtPreOrderItem(*phiOpndDfnIt)); + if (it == bb2PhiOpndMap.end()) { + std::forward_list newlist = { nextPhiOpndOcc }; + bb2PhiOpndMap[dom->GetDtPreOrderItem(*phiOpndDfnIt)] = newlist; + } else { + it->second.push_front(nextPhiOpndOcc); + } + } else { + nextPhiOpndOcc = nullptr; + } + break; + } + default: + ASSERT(false, "CreateSortedOccs: unexpected occTy"); + break; + } + } + } while (pickedOcc != nullptr); + // initialize phiOpnd vector in each PhiOcc node and defPhiOcc in each PhiOpndOcc + for (PhiOcc *phiOcc : phiOccs) { + for (BB *pred : phiOcc->cgbb->GetPreds()) { + PhiOpndOcc *phiOpndOcc = bb2PhiOpndMap[pred->GetId()].front(); + phiOcc->phiOpnds.push_back(phiOpndOcc); + phiOpndOcc->defPhiOcc = phiOcc; + bb2PhiOpndMap[pred->GetId()].pop_front(); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after phi insertion _______" << '\n'; + for (Occ *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 0: Preparations ================ + +void SSAPre::PropagateNotAnt(BB *bb, std::set *visitedBBs) { + if (visitedBBs->count(bb) != 0) { + return; + } + visitedBBs->insert(bb); + if (workCand->occBBs.count(bb->GetId()) != 0) { + return; + } + fullyAntBBs[bb->GetId()] = false; + for (BB *predbb : bb->GetPreds()) { + PropagateNotAnt(predbb, visitedBBs); + } +} + +void SSAPre::FormRealsNExits() { + std::set visitedBBs; + if (asEarlyAsPossible) { + for (BB *cgbb : cgFunc->GetExitBBsVec()) { + if (!cgbb->IsUnreachable()) { + PropagateNotAnt(cgbb, &visitedBBs); + } + } + } + + for (uint32 i = 0; i < dom->GetDtPreOrderSize(); i++) { + BBId bbid = dom->GetDtPreOrderItem(i); + BB *cgbb = cgFunc->GetAllBBs()[bbid]; + if (asEarlyAsPossible) { + if (fullyAntBBs[cgbb->GetId()]) { + RealOcc *realOcc = preMp->New(cgbb); + realOccs.push_back(realOcc); + } + } else { + if (workCand->occBBs.count(cgbb->GetId()) != 0) { + RealOcc *realOcc = preMp->New(cgbb); + realOccs.push_back(realOcc); + } + } + if (!cgbb->IsUnreachable() && (cgbb->NumSuccs() == 0 || cgbb->GetKind() == BB::kBBReturn)) { + ExitOcc *exitOcc = preMp->New(cgbb); + exitOccs.push_back(exitOcc); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << "Placement Optimization for callee-save saves" << '\n'; + LogInfo::MapleLogger() << "-----------------------------------------------" << '\n'; + LogInfo::MapleLogger() << " _______ input _______" << '\n'; + LogInfo::MapleLogger() << " occBBs: ["; + for (uint32 id : workCand->occBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } +} + +void SSAPre::ApplySSAPre() { + FormRealsNExits(); + // #1 insert phis; results in allOccs and phiOccs + FormPhis(); // result put in the set phi_bbs + CreateSortedOccs(); + // #2 rename + Rename(); + if (!phiOccs.empty()) { + // #3 DownSafety + ComputeDownsafe(); + // #4 CanBeAvail + ComputeCanBeAvail(); + ComputeLater(); + } + // #5 Finalize + Finalize(); + if (!workCand->saveAtProlog) { + // #6 Code Motion + CodeMotion(); + } +} + +void DoSavePlacementOpt(CGFunc *f, DomAnalysis *dom, SsaPreWorkCand *workCand) { + MemPool *tempMP = memPoolCtrler.NewMemPool("cg_ssa_pre", true); + SSAPre cgssapre(f, dom, tempMP, workCand, false/*asEarlyAsPossible*/, false/*enabledDebug*/); + + cgssapre.ApplySSAPre(); + + memPoolCtrler.DeleteMemPool(tempMP); +} + +} // namespace maplebe diff --git a/src/mapleall/maple_be/src/cg/cg_ssu_pre.cpp b/src/mapleall/maple_be/src/cg/cg_ssu_pre.cpp new file mode 100644 index 0000000000000000000000000000000000000000..65e5529a8e4306e85d6b7aadffe8d784ba047e40 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_ssu_pre.cpp @@ -0,0 +1,603 @@ +/* + * Copyright (c) [2021] Futurewei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cgfunc.h" +#include "cg_ssu_pre.h" + +namespace maplebe { + +// ================ Step 6: Code Motion ================ +void SSUPre::CodeMotion() { + // pass 1 only donig insertion + for (SOcc *occ : allOccs) { + if (occ->occTy != kSOccLambdaRes) { + continue; + } + SLambdaResOcc *lambdaResOcc = static_cast(occ); + if (lambdaResOcc->insertHere) { + workCand->restoreAtEntryBBs.insert(lambdaResOcc->cgbb->GetId()); + } + } + // pass 2 only doing deletion + for (SOcc *occ : realOccs) { + if (occ->occTy != kSOccReal) { + continue; + } + SRealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + if (realOcc->cgbb->IsWontExit()) { + workCand->restoreAtEpilog = true; + break; + } + workCand->restoreAtExitBBs.insert(realOcc->cgbb->GetId()); + } + } + if (enabledDebug) { + if (workCand->restoreAtEpilog) { + LogInfo::MapleLogger() << "Giving up because of restore inside infinite loop" << '\n'; + return; + } + LogInfo::MapleLogger() << " _______ output _______" << '\n'; + LogInfo::MapleLogger() << " restoreAtEntryBBs: ["; + for (uint32 id : workCand->restoreAtEntryBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n restoreAtExitBBs: ["; + for (uint32 id : workCand->restoreAtExitBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n\n"; + } +} + +// ================ Step 5: Finalize ================ +// for setting SRealOcc's redundant flag and SLambdaResOcc's insertHere flag +void SSUPre::Finalize() { + std::vector anticipatedDefVec(classCount + 1, nullptr); + // preorder traversal of post-dominator tree + for (SOcc *occ : allOccs) { + size_t classId = static_cast(occ->classId); + switch (occ->occTy) { + case kSOccLambda: { + auto *lambdaOcc = static_cast(occ); + if (lambdaOcc->WillBeAnt()) { + anticipatedDefVec[classId] = lambdaOcc; + } + break; + } + case kSOccReal: { + auto *realOcc = static_cast(occ); + if (anticipatedDefVec[classId] == nullptr || !anticipatedDefVec[classId]->IsPostDominate(pdom, occ)) { + realOcc->redundant = false; + anticipatedDefVec[classId] = realOcc; + } else { + realOcc->redundant = true; + } + break; + } + case kSOccLambdaRes: { + auto *lambdaResOcc = static_cast(occ); + const SLambdaOcc *lambdaOcc = lambdaResOcc->useLambdaOcc; + if (lambdaOcc->WillBeAnt()) { + if (lambdaResOcc->use == nullptr || (!lambdaResOcc->hasRealUse && + lambdaResOcc->use->occTy == kSOccLambda && + !static_cast(lambdaResOcc->use)->WillBeAnt())) { + // insert a store + if (lambdaResOcc->cgbb->GetPreds().size() != 1) { // critical edge + workCand->restoreAtEpilog = true; + break; + } + lambdaResOcc->insertHere = true; + } else { + lambdaResOcc->use = anticipatedDefVec[classId]; + } + } + break; + } + case kSOccEntry: + case kSOccKill: + break; + default: + ASSERT(false, "Finalize: unexpected occ type"); + break; + } + if (workCand->restoreAtEpilog) { + break; + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after finalize _______" << '\n'; + if (workCand->restoreAtEpilog) { + LogInfo::MapleLogger() << "Giving up because of insertion at critical edge" << '\n'; + return; + } + for (SOcc *occ : allOccs) { + if (occ->occTy == kSOccReal) { + SRealOcc *realOcc = static_cast(occ); + if (!realOcc->redundant) { + occ->Dump(); + LogInfo::MapleLogger() << " non-redundant" << '\n'; + } + } else if (occ->occTy == kSOccLambdaRes) { + SLambdaResOcc *lambdaResOcc = static_cast(occ); + if (lambdaResOcc->insertHere) { + occ->Dump(); + LogInfo::MapleLogger() << " insertHere" << '\n'; + } + } + } + } +} + +// ================ Step 4: WillBeAnt Computation ================ + +void SSUPre::ResetCanBeAnt(SLambdaOcc *lambda) const { + lambda->isCanBeAnt = false; + // the following loop finds lambda's defs and reset them + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use != nullptr && lambdaResOcc->use == lambda) { + if (!lambdaResOcc->hasRealUse && !lambdaOcc->isUpsafe && lambdaOcc->isCanBeAnt) { + ResetCanBeAnt(lambdaOcc); + } + } + } + } +} + +void SSUPre::ComputeCanBeAnt() const { + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + if (!lambdaOcc->isUpsafe && lambdaOcc->isCanBeAnt) { + bool existNullUse = false; + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use == nullptr) { + existNullUse = true; + break; + } + } + if (existNullUse) { + ResetCanBeAnt(lambdaOcc); + } + } + } +} + +void SSUPre::ResetEarlier(SLambdaOcc *lambda) const { + lambda->isEarlier = false; + // the following loop finds lambda's defs and reset them + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use != nullptr && lambdaResOcc->use == lambda) { + if (lambdaOcc->isEarlier) { + ResetEarlier(lambdaOcc); + } + } + } + } +} + +void SSUPre::ComputeEarlier() const { + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + lambdaOcc->isEarlier = lambdaOcc->isCanBeAnt; + } + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + if (lambdaOcc->isEarlier) { + bool existNonNullUse = false; + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + if (lambdaResOcc->use != nullptr && lambdaResOcc->hasRealUse) { + existNonNullUse = true; + break; + } + } + if (existNonNullUse) { + ResetEarlier(lambdaOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after earlier computation _______" << '\n'; + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + lambdaOcc->Dump(); + if (lambdaOcc->isCanBeAnt) { + LogInfo::MapleLogger() << " canbeant"; + } + if (lambdaOcc->isEarlier) { + LogInfo::MapleLogger() << " earlier"; + } + if (lambdaOcc->isCanBeAnt && !lambdaOcc->isEarlier) { + LogInfo::MapleLogger() << " will be ant"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 3: Upsafe Computation ================ +void SSUPre::ResetUpsafe(const SLambdaResOcc *lambdaRes) const { + if (lambdaRes->hasRealUse) { + return; + } + SOcc *useOcc = lambdaRes->use; + if (useOcc == nullptr || useOcc->occTy != kSOccLambda) { + return; + } + auto *useLambdaOcc = static_cast(useOcc); + if (!useLambdaOcc->isUpsafe) { + return; + } + useLambdaOcc->isUpsafe = false; + for (SLambdaResOcc *lambdaResOcc : useLambdaOcc->lambdaRes) { + ResetUpsafe(lambdaResOcc); + } +} + +void SSUPre::ComputeUpsafe() const { + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + if (!lambdaOcc->isUpsafe) { + // propagate not-upsafe forward along def-use edges + for (SLambdaResOcc *lambdaResOcc : lambdaOcc->lambdaRes) { + ResetUpsafe(lambdaResOcc); + } + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after upsafe computation _______" << '\n'; + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + lambdaOcc->Dump(); + if (lambdaOcc->isUpsafe) { + LogInfo::MapleLogger() << " upsafe"; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 2: rename ================ +void SSUPre::Rename() { + std::stack occStack; + classCount = 0; + // iterate thru the occurrences in order of preorder traversal of + // post-dominator tree + for (SOcc *occ : allOccs) { + while (!occStack.empty() && !occStack.top()->IsPostDominate(pdom, occ)) { + occStack.pop(); + } + switch (occ->occTy) { + case kSOccKill: + if (!occStack.empty()) { + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccLambda) { + static_cast(topOcc)->isUpsafe = false; + } + } + occStack.push(occ); + break; + case kSOccEntry: + if (!occStack.empty()) { + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccLambda) { + static_cast(topOcc)->isUpsafe = false; + } + } + break; + case kSOccLambda: + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + case kSOccReal: { + if (occStack.empty()) { + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + } + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccKill) { + // assign new class + occ->classId = ++classCount; + occStack.push(occ); + break; + } + ASSERT(topOcc->occTy == kSOccLambda || topOcc->occTy == kSOccReal, + "Rename: unexpected top-of-stack occ"); + occ->classId = topOcc->classId; + if (topOcc->occTy == kSOccLambda) { + occStack.push(occ); + } + break; + } + case kSOccLambdaRes: { + if (occStack.empty()) { + // leave classId as 0 + break; + } + SOcc *topOcc = occStack.top(); + if (topOcc->occTy == kSOccKill) { + // leave classId as 0 + break; + } + ASSERT(topOcc->occTy == kSOccLambda || topOcc->occTy == kSOccReal, + "Rename: unexpected top-of-stack occ"); + occ->use = topOcc; + occ->classId = topOcc->classId; + if (topOcc->occTy == kSOccReal) { + static_cast(occ)->hasRealUse = true; + } + break; + } + default: + ASSERT(false, "Rename: unexpected type of occurrence"); + break; + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after rename _______" << '\n'; + for (SOcc *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 1: insert lambdas ================ + +// form lambda occ based on the real occ in workCand->realOccs; result is +// stored in lambdaDfns +void SSUPre::FormLambdas() { + for (SOcc *occ : realOccs) { + if (occ->occTy == kSOccKill) { + continue; + } + GetIterPdomFrontier(occ->cgbb, &lambdaDfns); + } +} + +// form allOccs inclusive of real, kill, lambda, lambdaRes, entry occurrences; +// form lambdaOccs containing only the lambdas +void SSUPre::CreateSortedOccs() { + // form lambdaRes occs based on the succs of the lambda occs; result is + // stored in lambdaResDfns + std::multiset lambdaResDfns; + for (uint32 dfn : lambdaDfns) { + const BBId bbId = pdom->GetPdtPreOrderItem(dfn); + BB *cgbb = cgFunc->GetAllBBs()[bbId]; + for (BB *succ : cgbb->GetSuccs()) { + (void)lambdaResDfns.insert(pdom->GetPdtDfnItem(succ->GetId())); + } + } + std::unordered_map> bb2LambdaResMap; + MapleVector::iterator realOccIt = realOccs.begin(); + MapleVector::iterator entryOccIt = entryOccs.begin(); + MapleSet::iterator lambdaDfnIt = lambdaDfns.begin(); + MapleSet::iterator lambdaResDfnIt = lambdaResDfns.begin(); + SOcc *nextRealOcc = nullptr; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } + SEntryOcc *nextEntryOcc = nullptr; + if (entryOccIt != entryOccs.end()) { + nextEntryOcc = *entryOccIt; + } + SLambdaOcc *nextLambdaOcc = nullptr; + if (lambdaDfnIt != lambdaDfns.end()) { + nextLambdaOcc = + spreMp->New(cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaDfnIt)), spreAllocator); + } + SLambdaResOcc *nextLambdaResOcc = nullptr; + if (lambdaResDfnIt != lambdaResDfns.end()) { + nextLambdaResOcc = spreMp->New(cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaResDfnIt))); + auto it = bb2LambdaResMap.find(pdom->GetPdtPreOrderItem(*lambdaResDfnIt)); + if (it == bb2LambdaResMap.end()) { + std::forward_list newlist = { nextLambdaResOcc }; + bb2LambdaResMap[pdom->GetPdtPreOrderItem(*lambdaResDfnIt)] = newlist; + } else { + it->second.push_front(nextLambdaResOcc); + } + } + SOcc *pickedOcc = nullptr; // the next picked occ in order of preorder traversal of post-dominator tree + do { + pickedOcc = nullptr; + if (nextLambdaOcc != nullptr) { + pickedOcc = nextLambdaOcc; + } + if (nextRealOcc != nullptr && (pickedOcc == nullptr || pdom->GetPdtDfnItem(nextRealOcc->cgbb->GetId()) < + pdom->GetPdtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextRealOcc; + } + if (nextLambdaResOcc != nullptr && + (pickedOcc == nullptr || *lambdaResDfnIt < pdom->GetPdtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextLambdaResOcc; + } + if (nextEntryOcc != nullptr && (pickedOcc == nullptr || pdom->GetPdtDfnItem(nextEntryOcc->cgbb->GetId()) < + pdom->GetPdtDfnItem(pickedOcc->cgbb->GetId()))) { + pickedOcc = nextEntryOcc; + } + if (pickedOcc != nullptr) { + allOccs.push_back(pickedOcc); + switch (pickedOcc->occTy) { + case kSOccReal: + case kSOccKill: { + // get the next real/kill occ + CHECK_FATAL(realOccIt != realOccs.end(), "iterator check"); + ++realOccIt; + if (realOccIt != realOccs.end()) { + nextRealOcc = *realOccIt; + } else { + nextRealOcc = nullptr; + } + break; + } + case kSOccEntry: { + CHECK_FATAL(entryOccIt != entryOccs.end(), "iterator check"); + ++entryOccIt; + if (entryOccIt != entryOccs.end()) { + nextEntryOcc = *entryOccIt; + } else { + nextEntryOcc = nullptr; + } + break; + } + case kSOccLambda: { + lambdaOccs.push_back(static_cast(pickedOcc)); + CHECK_FATAL(lambdaDfnIt != lambdaDfns.end(), "iterator check"); + ++lambdaDfnIt; + if (lambdaDfnIt != lambdaDfns.end()) { + nextLambdaOcc = + spreMp->New(cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaDfnIt)), spreAllocator); + } else { + nextLambdaOcc = nullptr; + } + break; + } + case kSOccLambdaRes: { + CHECK_FATAL(lambdaResDfnIt != lambdaResDfns.end(), "iterator check"); + ++lambdaResDfnIt; + if (lambdaResDfnIt != lambdaResDfns.end()) { + nextLambdaResOcc = + spreMp->New(cgFunc->GetAllBBs().at(pdom->GetPdtPreOrderItem(*lambdaResDfnIt))); + auto it = bb2LambdaResMap.find(pdom->GetPdtPreOrderItem(*lambdaResDfnIt)); + if (it == bb2LambdaResMap.end()) { + std::forward_list newlist = { nextLambdaResOcc }; + bb2LambdaResMap[pdom->GetPdtPreOrderItem(*lambdaResDfnIt)] = newlist; + } else { + it->second.push_front(nextLambdaResOcc); + } + } else { + nextLambdaResOcc = nullptr; + } + break; + } + default: + ASSERT(false, "CreateSortedOccs: unexpected occTy"); + break; + } + } + } while (pickedOcc != nullptr); + // initialize lambdaRes vector in each SLambdaOcc node and useLambdaOcc in each SLambdaResOcc + for (SLambdaOcc *lambdaOcc : lambdaOccs) { + for (BB *succ : lambdaOcc->cgbb->GetSuccs()) { + SLambdaResOcc *lambdaResOcc = bb2LambdaResMap[succ->GetId()].front(); + lambdaOcc->lambdaRes.push_back(lambdaResOcc); + lambdaResOcc->useLambdaOcc = lambdaOcc; + bb2LambdaResMap[succ->GetId()].pop_front(); + } + } + if (enabledDebug) { + LogInfo::MapleLogger() << " _______ after lambda insertion _______" << '\n'; + for (SOcc *occ : allOccs) { + occ->Dump(); + LogInfo::MapleLogger() << '\n'; + } + } +} + +// ================ Step 0: Preparations ================ + +void SSUPre::PropagateNotAvail(BB *bb, std::set *visitedBBs) { + if (visitedBBs->count(bb) != 0) { + return; + } + visitedBBs->insert(bb); + if (workCand->occBBs.count(bb->GetId()) != 0 || + workCand->saveBBs.count(bb->GetId()) != 0) { + return; + } + fullyAvailBBs[bb->GetId()] = false; + for (BB *succbb : bb->GetSuccs()) { + PropagateNotAvail(succbb, visitedBBs); + } +} + +void SSUPre::FormReals() { + if (!asLateAsPossible) { + for (uint32 i = 0; i < pdom->GetPdtPreOrderSize(); i++) { + BBId bbid = pdom->GetPdtPreOrderItem(i); + BB *cgbb = cgFunc->GetAllBBs()[bbid]; + if (workCand->saveBBs.count(cgbb->GetId()) != 0) { + SRealOcc *realOcc = spreMp->New(cgbb); + realOccs.push_back(realOcc); + SKillOcc *killOcc = spreMp->New(cgbb); + realOccs.push_back(killOcc); + } else if (workCand->occBBs.count(cgbb->GetId()) != 0) { + SRealOcc *realOcc = spreMp->New(cgbb); + realOccs.push_back(realOcc); + } + } + } else { + std::set visitedBBs; + fullyAvailBBs[cgFunc->GetCommonExitBB()->GetId()] = false; + PropagateNotAvail(cgFunc->GetFirstBB(), &visitedBBs); + for (uint32 i = 0; i < pdom->GetPdtPreOrderSize(); i++) { + BBId bbid = pdom->GetPdtPreOrderItem(i); + BB *cgbb = cgFunc->GetAllBBs()[bbid]; + if (fullyAvailBBs[cgbb->GetId()]) { + SRealOcc *realOcc = spreMp->New(cgbb); + realOccs.push_back(realOcc); + if (workCand->saveBBs.count(cgbb->GetId()) != 0) { + SKillOcc *killOcc = spreMp->New(cgbb); + realOccs.push_back(killOcc); + } + } + } + } + + if (enabledDebug) { + LogInfo::MapleLogger() << "Placement Optimization for callee-save restores" << '\n'; + LogInfo::MapleLogger() << "-----------------------------------------------" << '\n'; + LogInfo::MapleLogger() << " _______ input _______" << '\n'; + LogInfo::MapleLogger() << " occBBs: ["; + for (uint32 id : workCand->occBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n saveBBs: ["; + for (uint32 id : workCand->saveBBs) { + LogInfo::MapleLogger() << id << " "; + } + LogInfo::MapleLogger() << "]\n"; + } +} + +void SSUPre::ApplySSUPre() { + FormReals(); + // #1 insert lambdas; results in allOccs and lambdaOccs + FormLambdas(); // result put in the set lambda_bbs + CreateSortedOccs(); + // #2 rename + Rename(); + if (!lambdaOccs.empty()) { + // #3 UpSafety + ComputeUpsafe(); + // #4 CanBeAnt + ComputeCanBeAnt(); + ComputeEarlier(); + } + // #5 Finalize + Finalize(); + if (!workCand->restoreAtEpilog) { + // #6 Code Motion + CodeMotion(); + } +} + +void DoRestorePlacementOpt(CGFunc *f, PostDomAnalysis *pdom, SPreWorkCand *workCand) { + MemPool *tempMP = memPoolCtrler.NewMemPool("cg_ssu_pre", true); + SSUPre cgssupre(f, pdom, tempMP, workCand, true, false); /* asLateAsPossible, enabledDebug */ + + cgssupre.ApplySSUPre(); + + memPoolCtrler.DeleteMemPool(tempMP); +} + +} // namespace maplebe diff --git a/src/mapleall/maple_be/src/cg/cg_validbit_opt.cpp b/src/mapleall/maple_be/src/cg/cg_validbit_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..586f1135fafff2aec1edeb706254e2424e7363dc --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cg_validbit_opt.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cg_validbit_opt.h" +#include "mempool.h" +#include "aarch64_validbit_opt.h" + +namespace maplebe { +InsnSet ValidBitPattern::GetAllUseInsn(const RegOperand &defReg) { + InsnSet allUseInsn; + if ((ssaInfo != nullptr) && defReg.IsSSAForm()) { + VRegVersion *defVersion = ssaInfo->FindSSAVersion(defReg.GetRegisterNumber()); + CHECK_FATAL(defVersion != nullptr, "useVRegVersion must not be null based on ssa"); + for (auto insnInfo : defVersion->GetAllUseInsns()) { + Insn *currInsn = insnInfo.second->GetInsn(); + allUseInsn.emplace(currInsn); + } + } + return allUseInsn; +} + +void ValidBitPattern::DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn) { + LogInfo::MapleLogger() << ">>>>>>> In " << GetPatternName() << " : <<<<<<<\n"; + if (!prevInsns.empty()) { + if ((replacedInsn == nullptr) && (newInsn == nullptr)) { + LogInfo::MapleLogger() << "======= RemoveInsns : {\n"; + } else { + LogInfo::MapleLogger() << "======= PrevInsns : {\n"; + } + for (auto *prevInsn : prevInsns) { + if (prevInsn != nullptr) { + LogInfo::MapleLogger() << "[primal form] "; + prevInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*prevInsn); + } + } + } + LogInfo::MapleLogger() << "}\n"; + } + if (replacedInsn != nullptr) { + LogInfo::MapleLogger() << "======= OldInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + replacedInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*replacedInsn); + } + } + if (newInsn != nullptr) { + LogInfo::MapleLogger() << "======= NewInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + newInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*newInsn); + } + } +} + +void ValidBitOpt::RectifyValidBitNum() { + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + SetValidBits(*insn); + } + } + bool iterate; + /* Use reverse postorder to converge with minimal iterations */ + do { + iterate = false; + MapleVector reversePostOrder = ssaInfo->GetReversePostOrder(); + for (uint32 bbId : reversePostOrder) { + BB *bb = cgFunc->GetBBFromID(bbId); + FOR_BB_INSNS(insn, bb) { + if (!insn->IsPhi()) { + continue; + } + bool change = SetPhiValidBits(*insn); + if (change) { + /* if vb changes once, iterate. */ + iterate = true; + } + } + } + } while (iterate); +} + +void ValidBitOpt::RecoverValidBitNum() { + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction() && !insn->IsPhi()) { + continue; + } + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (!opnd.IsRegister()) { + continue; + } + auto ®Opnd = static_cast(opnd); + if (insn->OpndIsDef(i)) { + regOpnd.SetValidBitsNum(regOpnd.GetSize()); + } + } + } + } +} + +void ValidBitOpt::Run() { + /* + * Set validbit of regOpnd before optimization + */ + RectifyValidBitNum(); + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + DoOpt(*bb, *insn); + } + } + /* + * Recover validbit of regOpnd after optimization + */ + RecoverValidBitNum(); +} + +bool CgValidBitOpt::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *ssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + CHECK_FATAL(ssaInfo != nullptr, "Get ssaInfo failed"); + auto *vbOpt = f.GetCG()->CreateValidBitOpt(*GetPhaseMemPool(), f, *ssaInfo); + CHECK_FATAL(vbOpt != nullptr, "vbOpt instance create failed"); + vbOpt->Run(); + return true; +} + +void CgValidBitOpt::GetAnalysisDependence(AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgValidBitOpt, cgvalidbitopt) +} /* namespace maplebe */ + diff --git a/src/mapleall/maple_be/src/cg/cgbb.cpp b/src/mapleall/maple_be/src/cg/cgbb.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1a4b3f72163b5709189dca7ef1998de09e16621b --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cgbb.cpp @@ -0,0 +1,557 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cgbb.h" +#include "cgfunc.h" + +namespace maplebe { +constexpr uint32 kCondBrNum = 2; +constexpr uint32 kSwitchCaseNum = 5; + +const std::string BB::bbNames[BB::kBBLast] = { + "BB_ft", + "BB_if", + "BB_goto", + "BB_igoto", + "BB_ret", + "BB_intrinsic", + "BB_rangegoto", + "BB_throw" +}; + +Insn *BB::InsertInsnBefore(Insn &existing, Insn &newInsn) { + Insn *pre = existing.GetPrev(); + newInsn.SetPrev(pre); + newInsn.SetNext(&existing); + existing.SetPrev(&newInsn); + if (pre != nullptr) { + pre->SetNext(&newInsn); + } + if (&existing == firstInsn) { + firstInsn = &newInsn; + } + newInsn.SetBB(this); + return &newInsn; +} + +Insn *BB::InsertInsnAfter(Insn &existing, Insn &newInsn) { + newInsn.SetPrev(&existing); + newInsn.SetNext(existing.GetNext()); + existing.SetNext(&newInsn); + if (&existing == lastInsn) { + lastInsn = &newInsn; + } else if (newInsn.GetNext()) { + newInsn.GetNext()->SetPrev(&newInsn); + } + newInsn.SetBB(this); + internalFlag1++; + return &newInsn; +} + +void BB::ReplaceInsn(Insn &insn, Insn &newInsn) { + if (insn.IsAccessRefField()) { + newInsn.MarkAsAccessRefField(true); + } + if (insn.GetDoNotRemove()) { + newInsn.SetDoNotRemove(true); + } + newInsn.SetPrev(insn.GetPrev()); + newInsn.SetNext(insn.GetNext()); + if (&insn == lastInsn) { + lastInsn = &newInsn; + } else if (newInsn.GetNext() != nullptr) { + newInsn.GetNext()->SetPrev(&newInsn); + } + if (firstInsn == &insn) { + firstInsn = &newInsn; + } else if (newInsn.GetPrev() != nullptr) { + newInsn.GetPrev()->SetNext(&newInsn); + } + newInsn.SetComment(insn.GetComment()); + newInsn.SetBB(this); +} + +void BB::RemoveInsn(Insn &insn) { + if ((firstInsn == &insn) && (lastInsn == &insn)) { + firstInsn = lastInsn = nullptr; + } else if (firstInsn == &insn) { + firstInsn = insn.GetNext(); + } else if (lastInsn == &insn) { + lastInsn = insn.GetPrev(); + } + /* remove insn from lir list */ + Insn *prevInsn = insn.GetPrev(); + Insn *nextInsn = insn.GetNext(); + if (prevInsn != nullptr) { + prevInsn->SetNext(nextInsn); + } + if (nextInsn != nullptr) { + nextInsn->SetPrev(prevInsn); + } +} + +void BB::RemoveInsnPair(Insn &insn, const Insn &nextInsn) { + ASSERT(insn.GetNext() == &nextInsn, "next_insn is supposed to follow insn"); + ASSERT(nextInsn.GetPrev() == &insn, "next_insn is supposed to follow insn"); + if ((firstInsn == &insn) && (lastInsn == &nextInsn)) { + firstInsn = lastInsn = nullptr; + } else if (firstInsn == &insn) { + firstInsn = nextInsn.GetNext(); + } else if (lastInsn == &nextInsn) { + lastInsn = insn.GetPrev(); + } + if (insn.GetPrev() != nullptr) { + insn.GetPrev()->SetNext(nextInsn.GetNext()); + } + if (nextInsn.GetNext() != nullptr) { + nextInsn.GetNext()->SetPrev(insn.GetPrev()); + } +} + +/* Remove insns in this bb from insn1 to insn2. */ +void BB::RemoveInsnSequence(Insn &insn, const Insn &nextInsn) { + ASSERT(insn.GetBB() == this, "remove insn sequence in one bb"); + ASSERT(nextInsn.GetBB() == this, "remove insn sequence in one bb"); + if ((firstInsn == &insn) && (lastInsn == &nextInsn)) { + firstInsn = lastInsn = nullptr; + } else if (firstInsn == &insn) { + firstInsn = nextInsn.GetNext(); + } else if (lastInsn == &nextInsn) { + lastInsn = insn.GetPrev(); + } + + if (insn.GetPrev() != nullptr) { + insn.GetPrev()->SetNext(nextInsn.GetNext()); + } + if (nextInsn.GetNext() != nullptr) { + nextInsn.GetNext()->SetPrev(insn.GetPrev()); + } +} + +/* append all insns from bb into this bb */ +void BB::AppendBBInsns(BB &bb) { + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + if (firstInsn != nullptr) { + FOR_BB_INSNS(i, &bb) { + i->SetBB(this); + } + } + return; + } + if ((bb.firstInsn == nullptr) || (bb.lastInsn == nullptr)) { + return; + } + FOR_BB_INSNS_SAFE(insn, &bb, nextInsn) { + AppendInsn(*insn); + } +} + +/* prepend all insns from bb into this bb */ +void BB::InsertAtBeginning(BB &bb) { + if (bb.firstInsn == nullptr) { /* nothing to add */ + return; + } + + FOR_BB_INSNS(insn, &bb) { + insn->SetBB(this); + } + + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + } else { + bb.lastInsn->SetNext(firstInsn); + firstInsn->SetPrev(bb.lastInsn); + firstInsn = bb.firstInsn; + } + bb.firstInsn = bb.lastInsn = nullptr; +} + +/* append all insns from bb into this bb */ +void BB::InsertAtEnd(BB &bb) { + if (bb.firstInsn == nullptr) { /* nothing to add */ + return; + } + + FOR_BB_INSNS(insn, &bb) { + insn->SetBB(this); + } + + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + } else { + bb.firstInsn->SetPrev(lastInsn); + lastInsn->SetNext(bb.firstInsn); + lastInsn = bb.lastInsn; + } + bb.firstInsn = bb.lastInsn = nullptr; +} + +/* Insert all insns from bb into this bb before the last instr */ +void BB::InsertAtEndMinus1(BB &bb) { + if (bb.firstInsn == nullptr) { /* nothing to add */ + return; + } + + if (NumInsn() == 1) { + InsertAtBeginning(bb); + return; + } + + FOR_BB_INSNS(insn, &bb) { + insn->SetBB(this); + } + + if (firstInsn == nullptr) { + firstInsn = bb.firstInsn; + lastInsn = bb.lastInsn; + } else { + /* Add between prevLast and lastInsn */ + Insn *realLastInsn = GetLastMachineInsn() != nullptr ? GetLastMachineInsn() : lastInsn; + Insn *prevLast = realLastInsn->GetPrev(); + if (prevLast == nullptr) { + firstInsn = bb.firstInsn; + } else { + bb.firstInsn->SetPrev(prevLast); + prevLast->SetNext(bb.firstInsn); + } + if (lastInsn == realLastInsn) { + lastInsn->SetPrev(bb.lastInsn); + bb.lastInsn->SetNext(lastInsn); + } else { + realLastInsn->SetPrev(bb.lastInsn); + bb.lastInsn->SetNext(realLastInsn); + } + } + bb.firstInsn = bb.lastInsn = nullptr; +} + +/* Number of instructions excluding DbgInsn and comments */ +int32 BB::NumInsn() const { + int32 bbSize = 0; + FOR_BB_INSNS_CONST(i, this) { + if (i->IsImmaterialInsn() || i->IsDbgInsn()) { + continue; + } + ++bbSize; + } + return bbSize; +} + +bool BB::IsInPhiList(regno_t regNO) { + for (auto phiInsnIt : phiInsnList) { + Insn *phiInsn = phiInsnIt.second; + if (phiInsn == nullptr) { + continue; + } + auto &phiListOpnd = static_cast(phiInsn->GetOperand(kInsnSecondOpnd)); + for (auto phiListIt : phiListOpnd.GetOperands()) { + RegOperand *phiUseOpnd = phiListIt.second; + if (phiUseOpnd == nullptr) { + continue; + } + if (phiUseOpnd->GetRegisterNumber() == regNO) { + return true; + } + } + } + return false; +} + +bool BB::IsInPhiDef(regno_t regNO) { + for (auto phiInsnIt : phiInsnList) { + Insn *phiInsn = phiInsnIt.second; + if (phiInsn == nullptr) { + continue; + } + auto &phiDefOpnd = static_cast(phiInsn->GetOperand(kInsnFirstOpnd)); + if (phiDefOpnd.GetRegisterNumber() == regNO) { + return true; + } + } + return false; +} + +bool BB::HasCriticalEdge() { + constexpr int minPredsNum = 2; + if (preds.size() < minPredsNum) { + return false; + } + for (BB *pred : preds) { + if (pred->GetKind() == BB::kBBGoto || pred->GetKind() == BB::kBBIgoto) { + continue; + } + if (pred->GetSuccs().size() > 1) { + return true; + } + } + return false; +} + +void BB::Dump() const { + LogInfo::MapleLogger() << "=== BB " << this << " <" << GetKindName(); + if (labIdx != 0) { + LogInfo::MapleLogger() << "[labeled with " << labIdx << "]"; + if (labelTaken) { + LogInfo::MapleLogger() << " taken"; + } + } + LogInfo::MapleLogger() << "> <" << id << "> "; + if (isCleanup) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (unreachable) { + LogInfo::MapleLogger() << "[unreachable] "; + } + LogInfo::MapleLogger() << "succs "; + for (auto *bb : succs) { + LogInfo::MapleLogger() << bb->id << " "; + } + LogInfo::MapleLogger() << "preds "; + for (auto *bb : preds) { + LogInfo::MapleLogger() << bb->id << " "; + } + LogInfo::MapleLogger() << "frequency:" << frequency << "===\n"; + + Insn *insn = firstInsn; + while (insn != nullptr) { + insn->Dump(); + insn = insn->GetNext(); + } +} + +bool BB::IsCommentBB() const { + if (GetKind() != kBBFallthru) { + return false; + } + FOR_BB_INSNS_CONST(insn, this) { + if (insn->IsMachineInstruction()) { + return false; + } + } + return true; +} + +/* return true if bb has no real insns. */ +bool BB::IsEmptyOrCommentOnly() const { + return (IsEmpty() || IsCommentBB()); +} + +bool BB::IsSoloGoto() const { + if (GetKind() != kBBGoto) { + return false; + } + if (GetHasCfi()) { + return false; + } + FOR_BB_INSNS_CONST(insn, this) { + if (!insn->IsMachineInstruction()) { + continue; + } + return (insn->IsUnCondBranch()); + } + return false; +} + +BB *BB::GetValidPrev() { + BB *pre = GetPrev(); + while (pre != nullptr && (pre->IsEmptyOrCommentOnly() || pre->IsUnreachable())) { + pre = pre->GetPrev(); + } + return pre; +} + +bool Bfs::AllPredBBVisited(const BB &bb, long &level) const { + bool isAllPredsVisited = true; + for (const auto *predBB : bb.GetPreds()) { + /* See if pred bb is a loop back edge */ + bool isBackEdge = false; + for (const auto *loopBB : predBB->GetLoopSuccs()) { + if (loopBB == &bb) { + isBackEdge = true; + break; + } + } + if (!isBackEdge && !visitedBBs[predBB->GetId()]) { + isAllPredsVisited = false; + break; + } + level = std::max(level, predBB->GetInternalFlag2()); + } + for (const auto *predEhBB : bb.GetEhPreds()) { + bool isBackEdge = false; + for (const auto *loopBB : predEhBB->GetLoopSuccs()) { + if (loopBB == &bb) { + isBackEdge = true; + break; + } + } + if (!isBackEdge && !visitedBBs[predEhBB->GetId()]) { + isAllPredsVisited = false; + break; + } + level = std::max(level, predEhBB->GetInternalFlag2()); + } + return isAllPredsVisited; +} + +/* + * During live interval construction, bb has only one predecessor and/or one + * successor are stright line bb. It can be considered to be a single large bb + * for the purpose of finding live interval. This is to prevent extending live + * interval of registers unnecessarily when interleaving bb from other paths. + */ +BB *Bfs::MarkStraightLineBBInBFS(BB *bb) { + while (true) { + if ((bb->GetSuccs().size() != 1) || !bb->GetEhSuccs().empty()) { + break; + } + BB *sbb = bb->GetSuccs().front(); + if (visitedBBs[sbb->GetId()]) { + break; + } + if ((sbb->GetPreds().size() != 1) || !sbb->GetEhPreds().empty()) { + break; + } + sortedBBs.push_back(sbb); + visitedBBs[sbb->GetId()] = true; + sbb->SetInternalFlag2(bb->GetInternalFlag2() + 1); + bb = sbb; + } + return bb; +} + +BB *Bfs::SearchForStraightLineBBs(BB &bb) { + if ((bb.GetSuccs().size() != kCondBrNum) || bb.GetEhSuccs().empty()) { + return &bb; + } + BB *sbb1 = bb.GetSuccs().front(); + BB *sbb2 = bb.GetSuccs().back(); + size_t predSz1 = sbb1->GetPreds().size(); + size_t predSz2 = sbb2->GetPreds().size(); + BB *candidateBB = nullptr; + if ((predSz1 == 1) && (predSz2 > kSwitchCaseNum)) { + candidateBB = sbb1; + } else if ((predSz2 == 1) && (predSz1 > kSwitchCaseNum)) { + candidateBB = sbb2; + } else { + return &bb; + } + ASSERT(candidateBB->GetId() < visitedBBs.size(), "index out of range in RA::SearchForStraightLineBBs"); + if (visitedBBs[candidateBB->GetId()]) { + return &bb; + } + if (!candidateBB->GetEhPreds().empty()) { + return &bb; + } + if (candidateBB->GetSuccs().size() != 1) { + return &bb; + } + + sortedBBs.push_back(candidateBB); + visitedBBs[candidateBB->GetId()] = true; + return MarkStraightLineBBInBFS(candidateBB); +} + +void Bfs::BFS(BB &curBB) { + std::queue workList; + workList.push(&curBB); + ASSERT(curBB.GetId() < cgfunc->NumBBs(), "RA::BFS visitedBBs overflow"); + ASSERT(curBB.GetId() < visitedBBs.size(), "index out of range in RA::BFS"); + visitedBBs[curBB.GetId()] = true; + do { + BB *bb = workList.front(); + sortedBBs.push_back(bb); + ASSERT(bb->GetId() < cgfunc->NumBBs(), "RA::BFS visitedBBs overflow"); + visitedBBs[bb->GetId()] = true; + workList.pop(); + /* Look for straight line bb */ + bb = MarkStraightLineBBInBFS(bb); + /* Look for an 'if' followed by some straight-line bb */ + bb = SearchForStraightLineBBs(*bb); + for (auto *ibb : bb->GetSuccs()) { + /* See if there are unvisited predecessor */ + if (visitedBBs[ibb->GetId()]) { + continue; + } + long prevLevel = 0; + if (AllPredBBVisited(*ibb, prevLevel)) { + ibb->SetInternalFlag2(prevLevel + 1); + workList.push(ibb); + ASSERT(ibb->GetId() < cgfunc->NumBBs(), "GCRA::BFS visitedBBs overflow"); + visitedBBs[ibb->GetId()] = true; + } + } + } while (!workList.empty()); +} + +void Bfs::ComputeBlockOrder() { + visitedBBs.clear(); + sortedBBs.clear(); + visitedBBs.resize(cgfunc->NumBBs()); + for (uint32 i = 0; i < cgfunc->NumBBs(); ++i) { + visitedBBs[i] = false; + } + BB *cleanupBB = nullptr; + FOR_ALL_BB(bb, cgfunc) { + bb->SetInternalFlag1(0); + bb->SetInternalFlag2(1); + if (bb->IsCleanup()) { + ASSERT(cleanupBB == nullptr, "one cleanupBB in the function only"); + cleanupBB = bb; + } + } + if (cleanupBB != nullptr) { + cleanupBB->SetInternalFlag1(1); + } + + bool changed; + size_t sortedCnt = 0; + bool done = false; + do { + changed = false; + FOR_ALL_BB(bb, cgfunc) { + if (bb->GetInternalFlag1() == 1) { + continue; + } + if (visitedBBs[bb->GetId()]) { + continue; + } + changed = true; + long prevLevel = 0; + if (AllPredBBVisited(*bb, prevLevel)) { + bb->SetInternalFlag2(prevLevel + 1); + BFS(*bb); + } + } + /* Make sure there is no infinite loop. */ + if (sortedCnt == sortedBBs.size()) { + if (!done) { + done = true; + } else { + LogInfo::MapleLogger() << "Error: RA BFS loop " << sortedCnt << " in func " << cgfunc->GetName() << "\n"; + CHECK_FATAL(false, ""); + } + } + sortedCnt = sortedBBs.size(); + } while (changed); + + if (cleanupBB != nullptr) { + sortedBBs.push_back(cleanupBB); + } +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/cgfunc.cpp b/src/mapleall/maple_be/src/cg/cgfunc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9162042dd37b5553cd51d9ba2702f180514daa52 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/cgfunc.cpp @@ -0,0 +1,2352 @@ +/* + * Copyright (c) [2020-2022] Huawei Technologies Co.,Ltd.All rights reserved + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "cgfunc.h" +#if DEBUG +#include +#endif +#include "cg.h" +#include "insn.h" +#include "loop.h" +#include "mir_builder.h" +#include "factory.h" +#include "debug_info.h" +#include "cfgo.h" +#include "optimize_common.h" +#include "me_function.h" + +namespace maplebe { +using namespace maple; + +#define JAVALANG (GetMirModule().IsJavaModule()) + +Operand *HandleDread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &dreadNode = static_cast(expr); + return cgFunc.SelectDread(parent, dreadNode); +} + +Operand *HandleRegread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + auto ®ReadNode = static_cast(expr); + if (regReadNode.GetRegIdx() == -kSregRetval0 || regReadNode.GetRegIdx() == -kSregRetval1) { + return &cgFunc.ProcessReturnReg(regReadNode.GetPrimType(), -(regReadNode.GetRegIdx())); + } + return cgFunc.SelectRegread(regReadNode); +} + +Operand *HandleConstVal(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &constValNode = static_cast(expr); + MIRConst *mirConst = constValNode.GetConstVal(); + ASSERT(mirConst != nullptr, "get constval of constvalnode failed"); + if (mirConst->GetKind() == kConstInt) { + auto *mirIntConst = safe_cast(mirConst); + return cgFunc.SelectIntConst(*mirIntConst); + } else if (mirConst->GetKind() == kConstFloatConst) { + auto *mirFloatConst = safe_cast(mirConst); + return cgFunc.SelectFloatConst(*mirFloatConst, parent); + } else if (mirConst->GetKind() == kConstDoubleConst) { + auto *mirDoubleConst = safe_cast(mirConst); + return cgFunc.SelectDoubleConst(*mirDoubleConst, parent); + } else { + CHECK_FATAL(false, "NYI"); + } + return nullptr; +} + +Operand *HandleConstStr(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + auto &constStrNode = static_cast(expr); +#if TARGAARCH64 || TARGRISCV64 + if (CGOptions::IsArm64ilp32()) { + return cgFunc.SelectStrConst(*cgFunc.GetMemoryPool()->New( + constStrNode.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a32)))); + } else { + return cgFunc.SelectStrConst(*cgFunc.GetMemoryPool()->New( + constStrNode.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)))); + } +#else + return cgFunc.SelectStrConst(*cgFunc.GetMemoryPool()->New( + constStrNode.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a32))); +#endif +} + +Operand *HandleConstStr16(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + auto &constStr16Node = static_cast(expr); +#if TARGAARCH64 || TARGRISCV64 + if (CGOptions::IsArm64ilp32()) { + return cgFunc.SelectStr16Const(*cgFunc.GetMemoryPool()->New( + constStr16Node.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a32)))); + } else { + return cgFunc.SelectStr16Const(*cgFunc.GetMemoryPool()->New( + constStr16Node.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)))); + } +#else + return cgFunc.SelectStr16Const(*cgFunc.GetMemoryPool()->New( + constStr16Node.GetStrIdx(), *GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a32))); +#endif +} + +Operand *HandleAdd(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && expr.Opnd(0)->GetOpCode() == OP_mul && + !IsPrimitiveVector(expr.GetPrimType()) && + !IsPrimitiveFloat(expr.GetPrimType()) && expr.Opnd(0)->Opnd(0)->GetOpCode() != OP_constval && + expr.Opnd(0)->Opnd(1)->GetOpCode() != OP_constval) { + return cgFunc.SelectMadd(static_cast(expr), + *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(0)->Opnd(0)), + *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(0)->Opnd(1)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } else if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2 && expr.Opnd(1)->GetOpCode() == OP_mul && + !IsPrimitiveVector(expr.GetPrimType()) && + !IsPrimitiveFloat(expr.GetPrimType()) && expr.Opnd(1)->Opnd(0)->GetOpCode() != OP_constval && + expr.Opnd(1)->Opnd(1)->GetOpCode() != OP_constval) { + return cgFunc.SelectMadd(static_cast(expr), + *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(1)->Opnd(0)), + *cgFunc.HandleExpr(*expr.Opnd(0), *expr.Opnd(1)->Opnd(1)), + *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); + } else { + return cgFunc.SelectAdd(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } +} + +Operand *HandleCGArrayElemAdd(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return &cgFunc.SelectCGArrayElemAdd(static_cast(expr), parent); +} + +BaseNode *IsConstantInVectorFromScalar(BaseNode *expr) { + if (expr->op != OP_intrinsicop) { + return nullptr; + } + IntrinsicopNode *intrn = static_cast(expr); + switch (intrn->GetIntrinsic()) { + case INTRN_vector_from_scalar_v8u8: case INTRN_vector_from_scalar_v8i8: + case INTRN_vector_from_scalar_v4u16: case INTRN_vector_from_scalar_v4i16: + case INTRN_vector_from_scalar_v2u32: case INTRN_vector_from_scalar_v2i32: + case INTRN_vector_from_scalar_v1u64: case INTRN_vector_from_scalar_v1i64: + case INTRN_vector_from_scalar_v16u8: case INTRN_vector_from_scalar_v16i8: + case INTRN_vector_from_scalar_v8u16: case INTRN_vector_from_scalar_v8i16: + case INTRN_vector_from_scalar_v4u32: case INTRN_vector_from_scalar_v4i32: + case INTRN_vector_from_scalar_v2u64: case INTRN_vector_from_scalar_v2i64: { + if (intrn->Opnd(0) != nullptr && intrn->Opnd(0)->op == OP_constval) { + return intrn->Opnd(0); + } + break; + } + default: + break; + } + return nullptr; +} + +Operand *HandleShift(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + BaseNode *cExpr = IsConstantInVectorFromScalar(expr.Opnd(1)); + if (cExpr == nullptr) { + return cgFunc.SelectShift(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } else { + return cgFunc.SelectShift(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(*expr.Opnd(1), *cExpr), parent); + } +} + +Operand *HandleRor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectRor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMpy(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectMpy(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleDiv(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectDiv(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleRem(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectRem(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAddrof(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &addrofNode = static_cast(expr); + return cgFunc.SelectAddrof(addrofNode, parent, false); +} + +Operand *HandleAddrofoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &addrofoffNode = static_cast(expr); + return cgFunc.SelectAddrofoff(addrofoffNode, parent); +} + +Operand *HandleAddroffunc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &addroffuncNode = static_cast(expr); + return &cgFunc.SelectAddrofFunc(addroffuncNode, parent); +} + +Operand *HandleAddrofLabel(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &addrofLabelNode = static_cast(expr); + return &cgFunc.SelectAddrofLabel(addrofLabelNode, parent); +} + +Operand *HandleIread(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &ireadNode = static_cast(expr); + return cgFunc.SelectIread(parent, ireadNode); +} + +Operand *HandleIreadoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &ireadNode = static_cast(expr); + return cgFunc.SelectIreadoff(parent, ireadNode); +} + +Operand *HandleIreadfpoff(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &ireadNode = static_cast(expr); + return cgFunc.SelectIreadfpoff(parent, ireadNode); +} + +Operand *HandleSub(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectSub(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBand(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectBand(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBior(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectBior(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBxor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectBxor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAbs(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectAbs(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleBnot(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectBnot(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleExtractBits(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + ExtractbitsNode &node = static_cast(expr); + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + if (!CGOptions::IsBigEndian() && (bitSize == k8BitSize || bitSize == k16BitSize) && + GetPrimTypeBitSize(node.GetPrimType()) != k64BitSize && + (bitOffset == 0 || bitOffset == k8BitSize || bitOffset == k16BitSize || bitOffset == k24BitSize) && + expr.Opnd(0)->GetOpCode() == OP_iread && node.GetOpCode() == OP_extractbits) { + return cgFunc.SelectRegularBitFieldLoad(node, parent); + } + return cgFunc.SelectExtractbits(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleDepositBits(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectDepositBits(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleLnot(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectLnot(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleLand(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectLand(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleLor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + if (parent.IsCondBr()) { + return cgFunc.SelectLor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent, true); + } else { + return cgFunc.SelectLor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); + } +} + +Operand *HandleMin(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectMin(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMax(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectMax(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleNeg(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectNeg(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleRecip(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectRecip(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleSqrt(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectSqrt(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleCeil(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectCeil(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleFloor(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectFloor(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectRetype(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleCvt(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectCvt(parent, static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleRound(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectRound(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleTrunc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + return cgFunc.SelectTrunc(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +static bool HasCompare(const BaseNode *expr) { + if (kOpcodeInfo.IsCompare(expr->GetOpCode())) { + return true; + } + for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { + if (HasCompare(expr->Opnd(i))) { + return true; + } + } + return false; +} + +Operand *HandleSelect(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + /* 0,1,2 represent the first opnd and the second opnd and the third opnd of expr */ + bool hasCompare = false; + if (HasCompare(expr.Opnd(1)) || HasCompare(expr.Opnd(2))) { + hasCompare = true; + } + Operand &trueOpnd = *cgFunc.HandleExpr(expr, *expr.Opnd(1)); + Operand &falseOpnd = *cgFunc.HandleExpr(expr, *expr.Opnd(2)); + Operand *cond = cgFunc.HandleExpr(expr, *expr.Opnd(0)); + return cgFunc.SelectSelect(static_cast(expr), *cond, trueOpnd, falseOpnd, parent, hasCompare); +} + +Operand *HandleCmp(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + // fix opnd type before select insn + PrimType targetPtyp = parent.GetPrimType(); + if (kOpcodeInfo.IsCompare(parent.GetOpCode())) { + targetPtyp = static_cast(parent).GetOpndType(); + } else if (kOpcodeInfo.IsTypeCvt(parent.GetOpCode())) { + targetPtyp = static_cast(parent).FromType(); + } + if (IsPrimitiveInteger(targetPtyp) && targetPtyp != expr.GetPrimType()) { + expr.SetPrimType(targetPtyp); + } + return cgFunc.SelectCmpOp(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0)), + *cgFunc.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAlloca(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectAlloca(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleMalloc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectMalloc(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleGCMalloc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectGCMalloc(static_cast(expr)); +} + +Operand *HandleJarrayMalloc(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + (void)parent; + return cgFunc.SelectJarrayMalloc(static_cast(expr), *cgFunc.HandleExpr(expr, *expr.Opnd(0))); +} + +/* Neon intrinsic handling */ +Operand *HandleVectorAddLong(const BaseNode &expr, CGFunc &cgFunc, bool isLow) { + Operand *o1 = cgFunc.HandleExpr(expr, *expr.Opnd(0)); + Operand *o2 = cgFunc.HandleExpr(expr, *expr.Opnd(1)); + return cgFunc.SelectVectorAddLong(expr.GetPrimType(), o1, o2, expr.Opnd(0)->GetPrimType(), isLow); +} + +Operand *HandleVectorAddWiden(const BaseNode &expr, CGFunc &cgFunc, bool isLow) { + Operand *o1 = cgFunc.HandleExpr(expr, *expr.Opnd(0)); + Operand *o2 = cgFunc.HandleExpr(expr, *expr.Opnd(1)); + return cgFunc.SelectVectorAddWiden(o1, expr.Opnd(0)->GetPrimType(), o2, expr.Opnd(1)->GetPrimType(), isLow); +} + +Operand *HandleVectorFromScalar(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + return cgFunc.SelectVectorFromScalar(intrnNode.GetPrimType(), cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)), + intrnNode.Opnd(0)->GetPrimType()); +} + +Operand *HandleVectorAbsSubL(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) { + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + return cgFunc.SelectVectorAbsSubL(intrnNode.GetPrimType(), opnd1, opnd2, intrnNode.Opnd(0)->GetPrimType(), isLow); +} + +Operand *HandleVectorMerge(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand2 */ + BaseNode *index = intrnNode.Opnd(2); /* index operand */ + int32 iNum = 0; + if (index->GetOpCode() == OP_constval) { + MIRConst *mirConst = static_cast(index)->GetConstVal(); + iNum = static_cast(safe_cast(mirConst)->GetExtValue()); + PrimType ty = intrnNode.Opnd(0)->GetPrimType(); + if (!IsPrimitiveVector(ty)) { + iNum = 0; + } else { + iNum *= GetPrimTypeSize(ty) / GetVecLanes(ty); /* 64x2: 0-1 -> 0-8 */ + } + } else { /* 32x4: 0-3 -> 0-12 */ + CHECK_FATAL(0, "VectorMerge does not have const index"); + } + return cgFunc.SelectVectorMerge(intrnNode.GetPrimType(), opnd1, opnd2, iNum); +} + +Operand *HandleVectorGetHigh(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + return cgFunc.SelectVectorDup(rType, opnd1, false); +} + +Operand *HandleVectorGetLow(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + return cgFunc.SelectVectorDup(rType, opnd1, true); +} + +Operand *HandleVectorGetElement(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + PrimType o1Type = intrnNode.Opnd(0)->GetPrimType(); + Operand *opndLane = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); + int32 laneNum = -1; + if (opndLane->IsConstImmediate()) { + MIRConst *mirConst = static_cast(intrnNode.Opnd(1))->GetConstVal(); + laneNum = static_cast(safe_cast(mirConst)->GetExtValue()); + } else { + CHECK_FATAL(0, "VectorGetElement does not have lane const"); + } + return cgFunc.SelectVectorGetElement(intrnNode.GetPrimType(), opnd1, o1Type, laneNum); +} + +Operand *HandleVectorPairwiseAdd(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + Operand *src = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector src operand */ + PrimType sType = intrnNode.Opnd(0)->GetPrimType(); + return cgFunc.SelectVectorPairwiseAdd(intrnNode.GetPrimType(), src, sType); +} + +Operand *HandleVectorPairwiseAdalp(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + BaseNode *arg1 = intrnNode.Opnd(0); + BaseNode *arg2 = intrnNode.Opnd(1); + Operand *src1 = cgFunc.HandleExpr(intrnNode, *arg1); /* vector src operand 1 */ + Operand *src2 = cgFunc.HandleExpr(intrnNode, *arg2); /* vector src operand 2 */ + return cgFunc.SelectVectorPairwiseAdalp(src1, arg1->GetPrimType(), src2, arg2->GetPrimType()); +} + +Operand *HandleVectorSetElement(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + BaseNode *arg0 = intrnNode.Opnd(0); /* uint32_t operand */ + Operand *opnd0 = cgFunc.HandleExpr(intrnNode, *arg0); + PrimType aType = arg0->GetPrimType(); + + BaseNode *arg1 = intrnNode.Opnd(1); /* vector operand == result */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *arg1); + PrimType vType = arg1->GetPrimType(); + + BaseNode *arg2 = intrnNode.Opnd(2); /* lane const operand */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *arg2); + int32 laneNum = -1; + if (opnd2->IsConstImmediate()) { + MIRConst *mirConst = static_cast(arg2)->GetConstVal(); + laneNum = static_cast(safe_cast(mirConst)->GetExtValue()); + } else { + CHECK_FATAL(0, "VectorSetElement does not have lane const"); + } + return cgFunc.SelectVectorSetElement(opnd0, aType, opnd1, vType, laneNum); +} + +Operand *HandleVectorReverse(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, uint32 size) { + BaseNode *argExpr = intrnNode.Opnd(0); /* src operand */ + Operand *src = cgFunc.HandleExpr(intrnNode, *argExpr); + MIRType *type = intrnNode.GetIntrinDesc().GetReturnType(); + ASSERT(type != nullptr, "null ptr check"); + auto revVecType = type->GetPrimType(); + return cgFunc.SelectVectorReverse(revVecType, src, revVecType, size); +} + +Operand *HandleVectorShiftNarrow(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) { + PrimType rType = intrnNode.GetPrimType(); /* vector result */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* shift const */ + if (!opnd2->IsConstImmediate()) { + CHECK_FATAL(0, "VectorShiftNarrow does not have shift const"); + } + return cgFunc.SelectVectorShiftRNarrow(rType, opnd1, intrnNode.Opnd(0)->GetPrimType(), opnd2, isLow); +} + +Operand *HandleVectorSubWiden(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow, bool isWide) { + PrimType resType = intrnNode.GetPrimType(); /* uint32_t result */ + Operand *o1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); + Operand *o2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); + return cgFunc.SelectVectorSubWiden(resType, o1, intrnNode.Opnd(0)->GetPrimType(), + o2, intrnNode.Opnd(1)->GetPrimType(), isLow, isWide); +} + +Operand *HandleVectorSum(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + PrimType resType = intrnNode.GetPrimType(); /* uint32_t result */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand */ + return cgFunc.SelectVectorSum(resType, opnd1, intrnNode.Opnd(0)->GetPrimType()); +} + +Operand *HandleVectorTableLookup(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + return cgFunc.SelectVectorTableLookup(rType, opnd1, opnd2); +} + +Operand *HandleVectorMadd(const IntrinsicopNode &intrnNode, CGFunc &cgFunc) { + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + Operand *opnd3 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(2)); /* vector operand 3 */ + PrimType oTyp1 = intrnNode.Opnd(0)->GetPrimType(); + PrimType oTyp2 = intrnNode.Opnd(1)->GetPrimType(); + PrimType oTyp3 = intrnNode.Opnd(2)->GetPrimType(); + return cgFunc.SelectVectorMadd(opnd1, oTyp1, opnd2, oTyp2, opnd3, oTyp3); +} + +Operand *HandleVectorMull(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector operand 1 */ + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector operand 2 */ + PrimType oTyp1 = intrnNode.Opnd(0)->GetPrimType(); + PrimType oTyp2 = intrnNode.Opnd(1)->GetPrimType(); + return cgFunc.SelectVectorMull(rType, opnd1, oTyp1, opnd2, oTyp2, isLow); +} + +Operand *HandleVectorNarrow(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector opnd 1 */ + if (isLow) { + return cgFunc.SelectVectorNarrow(rType, opnd1, intrnNode.Opnd(0)->GetPrimType()); + } else { + Operand *opnd2 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(1)); /* vector opnd 2 */ + return cgFunc.SelectVectorNarrow2(rType, opnd1, intrnNode.Opnd(0)->GetPrimType(), opnd2, + intrnNode.Opnd(1)->GetPrimType()); + } +} + +Operand *HandleVectorWiden(const IntrinsicopNode &intrnNode, CGFunc &cgFunc, bool isLow) { + PrimType rType = intrnNode.GetPrimType(); /* result operand */ + Operand *opnd1 = cgFunc.HandleExpr(intrnNode, *intrnNode.Opnd(0)); /* vector opnd 1 */ + return cgFunc.SelectVectorWiden(rType, opnd1, intrnNode.Opnd(0)->GetPrimType(), isLow); +} + +Operand *HandleVectorMovNarrow(const IntrinsicopNode &intrinsicNode, CGFunc &cgFunc) { + PrimType rType = intrinsicNode.GetPrimType(); /* result operand */ + Operand *opnd = cgFunc.HandleExpr(intrinsicNode, *intrinsicNode.Opnd(0)); /* vector opnd 1 */ + return cgFunc.SelectVectorMovNarrow(rType, opnd, intrinsicNode.Opnd(0)->GetPrimType()); +} + +Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, CGFunc &cgFunc) { + auto &intrinsicopNode = static_cast(expr); + switch (intrinsicopNode.GetIntrinsic()) { + case INTRN_MPL_READ_OVTABLE_ENTRY_LAZY: { + Operand *srcOpnd = cgFunc.HandleExpr(intrinsicopNode, *intrinsicopNode.Opnd(0)); + return cgFunc.SelectLazyLoad(*srcOpnd, intrinsicopNode.GetPrimType()); + } + case INTRN_MPL_READ_STATIC_OFFSET_TAB: { + auto addrOfNode = static_cast(intrinsicopNode.Opnd(0)); + MIRSymbol *st = cgFunc.GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(addrOfNode->GetStIdx()); + auto constNode = static_cast(intrinsicopNode.Opnd(1)); + CHECK_FATAL(constNode != nullptr, "null ptr check"); + auto mirIntConst = static_cast(constNode->GetConstVal()); + return cgFunc.SelectLazyLoadStatic(*st, mirIntConst->GetExtValue(), intrinsicopNode.GetPrimType()); + } + case INTRN_MPL_READ_ARRAYCLASS_CACHE_ENTRY: { + auto addrOfNode = static_cast(intrinsicopNode.Opnd(0)); + MIRSymbol *st = cgFunc.GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(addrOfNode->GetStIdx()); + auto constNode = static_cast(intrinsicopNode.Opnd(1)); + CHECK_FATAL(constNode != nullptr, "null ptr check"); + auto mirIntConst = static_cast(constNode->GetConstVal()); + return cgFunc.SelectLoadArrayClassCache(*st, mirIntConst->GetExtValue(), intrinsicopNode.GetPrimType()); + } + // double + case INTRN_C_sin: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sin"); + case INTRN_C_sinh: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sinh"); + case INTRN_C_asin: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "asin"); + case INTRN_C_cos: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "cos"); + case INTRN_C_cosh: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "cosh"); + case INTRN_C_acos: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "acos"); + case INTRN_C_atan: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "atan"); + case INTRN_C_exp: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "exp"); + case INTRN_C_log: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "log"); + case INTRN_C_log10: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "log10"); + // float + case INTRN_C_sinf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sinf"); + case INTRN_C_sinhf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "sinhf"); + case INTRN_C_asinf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "asinf"); + case INTRN_C_cosf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "cosf"); + case INTRN_C_coshf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "coshf"); + case INTRN_C_acosf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "acosf"); + case INTRN_C_atanf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "atanf"); + case INTRN_C_expf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "expf"); + case INTRN_C_logf: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "logf"); + case INTRN_C_log10f: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "log10f"); + // int + case INTRN_C_ffs: + return cgFunc.SelectIntrinsicOpWithOneParam(intrinsicopNode, "ffs"); + // libc mem* and str* functions as intrinsicops + case INTRN_C_memcmp: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_i32, "memcmp"); + case INTRN_C_strlen: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_u64, "strlen"); + case INTRN_C_strcmp: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_i32, "strcmp"); + case INTRN_C_strncmp: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_i32, "strncmp"); + case INTRN_C_strchr: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_a64, "strchr"); + case INTRN_C_strrchr: + return cgFunc.SelectIntrinsicOpWithNParams(intrinsicopNode, PTY_a64, "strrchr"); + case INTRN_C_rev16_2: + case INTRN_C_rev_4: + case INTRN_C_rev_8: + case INTRN_C_bswap64: + case INTRN_C_bswap32: + case INTRN_C_bswap16: + return cgFunc.SelectBswap(intrinsicopNode, *cgFunc.HandleExpr(expr, *expr.Opnd(0)), parent); + case INTRN_C_clz32: + case INTRN_C_clz64: + return cgFunc.SelectCclz(intrinsicopNode); + case INTRN_C_ctz32: + case INTRN_C_ctz64: + return cgFunc.SelectCctz(intrinsicopNode); + case INTRN_C_popcount32: + case INTRN_C_popcount64: + return cgFunc.SelectCpopcount(intrinsicopNode); + case INTRN_C_parity32: + case INTRN_C_parity64: + return cgFunc.SelectCparity(intrinsicopNode); + case INTRN_C_clrsb32: + case INTRN_C_clrsb64: + return cgFunc.SelectCclrsb(intrinsicopNode); + case INTRN_C_isaligned: + return cgFunc.SelectCisaligned(intrinsicopNode); + case INTRN_C_alignup: + return cgFunc.SelectCalignup(intrinsicopNode); + case INTRN_C_aligndown: + return cgFunc.SelectCaligndown(intrinsicopNode); + case INTRN_C___sync_add_and_fetch_1: + case INTRN_C___sync_add_and_fetch_2: + case INTRN_C___sync_add_and_fetch_4: + case INTRN_C___sync_add_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_add, false); + case INTRN_C___sync_sub_and_fetch_1: + case INTRN_C___sync_sub_and_fetch_2: + case INTRN_C___sync_sub_and_fetch_4: + case INTRN_C___sync_sub_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_sub, false); + case INTRN_C___sync_fetch_and_add_1: + case INTRN_C___sync_fetch_and_add_2: + case INTRN_C___sync_fetch_and_add_4: + case INTRN_C___sync_fetch_and_add_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_add, true); + case INTRN_C___sync_fetch_and_sub_1: + case INTRN_C___sync_fetch_and_sub_2: + case INTRN_C___sync_fetch_and_sub_4: + case INTRN_C___sync_fetch_and_sub_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_sub, true); + case INTRN_C___sync_bool_compare_and_swap_1: + case INTRN_C___sync_bool_compare_and_swap_2: + case INTRN_C___sync_bool_compare_and_swap_4: + case INTRN_C___sync_bool_compare_and_swap_8: + return cgFunc.SelectCSyncBoolCmpSwap(intrinsicopNode); + case INTRN_C___sync_val_compare_and_swap_1: + case INTRN_C___sync_val_compare_and_swap_2: + case INTRN_C___sync_val_compare_and_swap_4: + case INTRN_C___sync_val_compare_and_swap_8: + return cgFunc.SelectCSyncValCmpSwap(intrinsicopNode); + case INTRN_C___sync_lock_test_and_set_1: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i8); + case INTRN_C___sync_lock_test_and_set_2: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i16); + case INTRN_C___sync_lock_test_and_set_4: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i32); + case INTRN_C___sync_lock_test_and_set_8: + return cgFunc.SelectCSyncLockTestSet(intrinsicopNode, PTY_i64); + case INTRN_C___sync_fetch_and_and_1: + case INTRN_C___sync_fetch_and_and_2: + case INTRN_C___sync_fetch_and_and_4: + case INTRN_C___sync_fetch_and_and_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_band, true); + case INTRN_C___sync_and_and_fetch_1: + case INTRN_C___sync_and_and_fetch_2: + case INTRN_C___sync_and_and_fetch_4: + case INTRN_C___sync_and_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_band, false); + case INTRN_C___sync_fetch_and_or_1: + case INTRN_C___sync_fetch_and_or_2: + case INTRN_C___sync_fetch_and_or_4: + case INTRN_C___sync_fetch_and_or_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bior, true); + case INTRN_C___sync_or_and_fetch_1: + case INTRN_C___sync_or_and_fetch_2: + case INTRN_C___sync_or_and_fetch_4: + case INTRN_C___sync_or_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bior, false); + case INTRN_C___sync_fetch_and_xor_1: + case INTRN_C___sync_fetch_and_xor_2: + case INTRN_C___sync_fetch_and_xor_4: + case INTRN_C___sync_fetch_and_xor_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bxor, true); + case INTRN_C___sync_xor_and_fetch_1: + case INTRN_C___sync_xor_and_fetch_2: + case INTRN_C___sync_xor_and_fetch_4: + case INTRN_C___sync_xor_and_fetch_8: + return cgFunc.SelectCSyncFetch(intrinsicopNode, OP_bxor, false); + case INTRN_C___sync_synchronize: + return cgFunc.SelectCSyncSynchronize(intrinsicopNode); + case INTRN_C___atomic_load_n: + return cgFunc.SelectCAtomicLoadN(intrinsicopNode); + case INTRN_C___atomic_fetch_add: + return cgFunc.SelectCAtomicFetch(intrinsicopNode, OP_add, true); + case INTRN_C___atomic_fetch_sub: + return cgFunc.SelectCAtomicFetch(intrinsicopNode, OP_sub, true); + case INTRN_C___atomic_fetch_and: + return cgFunc.SelectCAtomicFetch(intrinsicopNode, OP_band, true); + case INTRN_C___atomic_fetch_or: + return cgFunc.SelectCAtomicFetch(intrinsicopNode, OP_bior, true); + case INTRN_C___atomic_fetch_xor: + return cgFunc.SelectCAtomicFetch(intrinsicopNode, OP_bxor, true); + case INTRN_C___atomic_add_fetch: + return cgFunc.SelectCAtomicFetch(intrinsicopNode, OP_add, false); + case INTRN_C___atomic_sub_fetch: + return cgFunc.SelectCAtomicFetch(intrinsicopNode, OP_sub, false); + case INTRN_C___atomic_and_fetch: + return cgFunc.SelectCAtomicFetch(intrinsicopNode, OP_band, false); + case INTRN_C___atomic_or_fetch: + return cgFunc.SelectCAtomicFetch(intrinsicopNode, OP_bior, false); + case INTRN_C___atomic_xor_fetch: + return cgFunc.SelectCAtomicFetch(intrinsicopNode, OP_bxor, false); + + case INTRN_C__builtin_return_address: + case INTRN_C__builtin_extract_return_addr: + return cgFunc.SelectCReturnAddress(intrinsicopNode); + + case INTRN_vector_abs_v8i8: case INTRN_vector_abs_v4i16: + case INTRN_vector_abs_v2i32: case INTRN_vector_abs_v1i64: + case INTRN_vector_abs_v16i8: case INTRN_vector_abs_v8i16: + case INTRN_vector_abs_v4i32: case INTRN_vector_abs_v2i64: + return HandleAbs(parent, intrinsicopNode, cgFunc); + + case INTRN_vector_addl_low_v8i8: case INTRN_vector_addl_low_v8u8: + case INTRN_vector_addl_low_v4i16: case INTRN_vector_addl_low_v4u16: + case INTRN_vector_addl_low_v2i32: case INTRN_vector_addl_low_v2u32: + return HandleVectorAddLong(intrinsicopNode, cgFunc, true); + + case INTRN_vector_addl_high_v8i8: case INTRN_vector_addl_high_v8u8: + case INTRN_vector_addl_high_v4i16: case INTRN_vector_addl_high_v4u16: + case INTRN_vector_addl_high_v2i32: case INTRN_vector_addl_high_v2u32: + return HandleVectorAddLong(intrinsicopNode, cgFunc, false); + + case INTRN_vector_addw_low_v8i8: case INTRN_vector_addw_low_v8u8: + case INTRN_vector_addw_low_v4i16: case INTRN_vector_addw_low_v4u16: + case INTRN_vector_addw_low_v2i32: case INTRN_vector_addw_low_v2u32: + return HandleVectorAddWiden(intrinsicopNode, cgFunc, true); + + case INTRN_vector_addw_high_v8i8: case INTRN_vector_addw_high_v8u8: + case INTRN_vector_addw_high_v4i16: case INTRN_vector_addw_high_v4u16: + case INTRN_vector_addw_high_v2i32: case INTRN_vector_addw_high_v2u32: + return HandleVectorAddWiden(intrinsicopNode, cgFunc, false); + + case INTRN_vector_sum_v8u8: case INTRN_vector_sum_v8i8: + case INTRN_vector_sum_v4u16: case INTRN_vector_sum_v4i16: + case INTRN_vector_sum_v2u32: case INTRN_vector_sum_v2i32: + case INTRN_vector_sum_v16u8: case INTRN_vector_sum_v16i8: + case INTRN_vector_sum_v8u16: case INTRN_vector_sum_v8i16: + case INTRN_vector_sum_v4u32: case INTRN_vector_sum_v4i32: + case INTRN_vector_sum_v2u64: case INTRN_vector_sum_v2i64: + return HandleVectorSum(intrinsicopNode, cgFunc); + + case INTRN_vector_from_scalar_v8u8: case INTRN_vector_from_scalar_v8i8: + case INTRN_vector_from_scalar_v4u16: case INTRN_vector_from_scalar_v4i16: + case INTRN_vector_from_scalar_v2u32: case INTRN_vector_from_scalar_v2i32: + case INTRN_vector_from_scalar_v1u64: case INTRN_vector_from_scalar_v1i64: + case INTRN_vector_from_scalar_v16u8: case INTRN_vector_from_scalar_v16i8: + case INTRN_vector_from_scalar_v8u16: case INTRN_vector_from_scalar_v8i16: + case INTRN_vector_from_scalar_v4u32: case INTRN_vector_from_scalar_v4i32: + case INTRN_vector_from_scalar_v2u64: case INTRN_vector_from_scalar_v2i64: + return HandleVectorFromScalar(intrinsicopNode, cgFunc); + + case INTRN_vector_labssub_low_v8u8: case INTRN_vector_labssub_low_v8i8: + case INTRN_vector_labssub_low_v4u16: case INTRN_vector_labssub_low_v4i16: + case INTRN_vector_labssub_low_v2u32: case INTRN_vector_labssub_low_v2i32: + return HandleVectorAbsSubL(intrinsicopNode, cgFunc, true); + + case INTRN_vector_labssub_high_v8u8: case INTRN_vector_labssub_high_v8i8: + case INTRN_vector_labssub_high_v4u16: case INTRN_vector_labssub_high_v4i16: + case INTRN_vector_labssub_high_v2u32: case INTRN_vector_labssub_high_v2i32: + return HandleVectorAbsSubL(intrinsicopNode, cgFunc, false); + + case INTRN_vector_merge_v8u8: case INTRN_vector_merge_v8i8: + case INTRN_vector_merge_v4u16: case INTRN_vector_merge_v4i16: + case INTRN_vector_merge_v2u32: case INTRN_vector_merge_v2i32: + case INTRN_vector_merge_v1u64: case INTRN_vector_merge_v1i64: + case INTRN_vector_merge_v16u8: case INTRN_vector_merge_v16i8: + case INTRN_vector_merge_v8u16: case INTRN_vector_merge_v8i16: + case INTRN_vector_merge_v4u32: case INTRN_vector_merge_v4i32: + case INTRN_vector_merge_v2u64: case INTRN_vector_merge_v2i64: + return HandleVectorMerge(intrinsicopNode, cgFunc); + + case INTRN_vector_set_element_v8u8: case INTRN_vector_set_element_v8i8: + case INTRN_vector_set_element_v4u16: case INTRN_vector_set_element_v4i16: + case INTRN_vector_set_element_v2u32: case INTRN_vector_set_element_v2i32: + case INTRN_vector_set_element_v1u64: case INTRN_vector_set_element_v1i64: + case INTRN_vector_set_element_v16u8: case INTRN_vector_set_element_v16i8: + case INTRN_vector_set_element_v8u16: case INTRN_vector_set_element_v8i16: + case INTRN_vector_set_element_v4u32: case INTRN_vector_set_element_v4i32: + case INTRN_vector_set_element_v2u64: case INTRN_vector_set_element_v2i64: + return HandleVectorSetElement(intrinsicopNode, cgFunc); + + case INTRN_vector_get_high_v16u8: case INTRN_vector_get_high_v16i8: + case INTRN_vector_get_high_v8u16: case INTRN_vector_get_high_v8i16: + case INTRN_vector_get_high_v4u32: case INTRN_vector_get_high_v4i32: + case INTRN_vector_get_high_v2u64: case INTRN_vector_get_high_v2i64: + return HandleVectorGetHigh(intrinsicopNode, cgFunc); + + case INTRN_vector_get_low_v16u8: case INTRN_vector_get_low_v16i8: + case INTRN_vector_get_low_v8u16: case INTRN_vector_get_low_v8i16: + case INTRN_vector_get_low_v4u32: case INTRN_vector_get_low_v4i32: + case INTRN_vector_get_low_v2u64: case INTRN_vector_get_low_v2i64: + return HandleVectorGetLow(intrinsicopNode, cgFunc); + + case INTRN_vector_get_element_v8u8: case INTRN_vector_get_element_v8i8: + case INTRN_vector_get_element_v4u16: case INTRN_vector_get_element_v4i16: + case INTRN_vector_get_element_v2u32: case INTRN_vector_get_element_v2i32: + case INTRN_vector_get_element_v1u64: case INTRN_vector_get_element_v1i64: + case INTRN_vector_get_element_v16u8: case INTRN_vector_get_element_v16i8: + case INTRN_vector_get_element_v8u16: case INTRN_vector_get_element_v8i16: + case INTRN_vector_get_element_v4u32: case INTRN_vector_get_element_v4i32: + case INTRN_vector_get_element_v2u64: case INTRN_vector_get_element_v2i64: + return HandleVectorGetElement(intrinsicopNode, cgFunc); + + case INTRN_vector_pairwise_adalp_v8i8: case INTRN_vector_pairwise_adalp_v4i16: + case INTRN_vector_pairwise_adalp_v2i32: case INTRN_vector_pairwise_adalp_v8u8: + case INTRN_vector_pairwise_adalp_v4u16: case INTRN_vector_pairwise_adalp_v2u32: + case INTRN_vector_pairwise_adalp_v16i8: case INTRN_vector_pairwise_adalp_v8i16: + case INTRN_vector_pairwise_adalp_v4i32: case INTRN_vector_pairwise_adalp_v16u8: + case INTRN_vector_pairwise_adalp_v8u16: case INTRN_vector_pairwise_adalp_v4u32: + return HandleVectorPairwiseAdalp(intrinsicopNode, cgFunc); + + case INTRN_vector_pairwise_add_v8u8: case INTRN_vector_pairwise_add_v8i8: + case INTRN_vector_pairwise_add_v4u16: case INTRN_vector_pairwise_add_v4i16: + case INTRN_vector_pairwise_add_v2u32: case INTRN_vector_pairwise_add_v2i32: + case INTRN_vector_pairwise_add_v16u8: case INTRN_vector_pairwise_add_v16i8: + case INTRN_vector_pairwise_add_v8u16: case INTRN_vector_pairwise_add_v8i16: + case INTRN_vector_pairwise_add_v4u32: case INTRN_vector_pairwise_add_v4i32: + return HandleVectorPairwiseAdd(intrinsicopNode, cgFunc); + + case INTRN_vector_madd_v8u8: case INTRN_vector_madd_v8i8: + case INTRN_vector_madd_v4u16: case INTRN_vector_madd_v4i16: + case INTRN_vector_madd_v2u32: case INTRN_vector_madd_v2i32: + return HandleVectorMadd(intrinsicopNode, cgFunc); + + case INTRN_vector_mull_low_v8u8: case INTRN_vector_mull_low_v8i8: + case INTRN_vector_mull_low_v4u16: case INTRN_vector_mull_low_v4i16: + case INTRN_vector_mull_low_v2u32: case INTRN_vector_mull_low_v2i32: + return HandleVectorMull(intrinsicopNode, cgFunc, true); + + case INTRN_vector_mull_high_v8u8: case INTRN_vector_mull_high_v8i8: + case INTRN_vector_mull_high_v4u16: case INTRN_vector_mull_high_v4i16: + case INTRN_vector_mull_high_v2u32: case INTRN_vector_mull_high_v2i32: + return HandleVectorMull(intrinsicopNode, cgFunc, false); + + case INTRN_vector_narrow_low_v8u16: case INTRN_vector_narrow_low_v8i16: + case INTRN_vector_narrow_low_v4u32: case INTRN_vector_narrow_low_v4i32: + case INTRN_vector_narrow_low_v2u64: case INTRN_vector_narrow_low_v2i64: + return HandleVectorNarrow(intrinsicopNode, cgFunc, true); + + case INTRN_vector_narrow_high_v8u16: case INTRN_vector_narrow_high_v8i16: + case INTRN_vector_narrow_high_v4u32: case INTRN_vector_narrow_high_v4i32: + case INTRN_vector_narrow_high_v2u64: case INTRN_vector_narrow_high_v2i64: + return HandleVectorNarrow(intrinsicopNode, cgFunc, false); + + case INTRN_vector_reverse_v8u8: case INTRN_vector_reverse_v8i8: + case INTRN_vector_reverse_v4u16: case INTRN_vector_reverse_v4i16: + case INTRN_vector_reverse_v16u8: case INTRN_vector_reverse_v16i8: + case INTRN_vector_reverse_v8u16: case INTRN_vector_reverse_v8i16: + return HandleVectorReverse(intrinsicopNode, cgFunc, k32BitSize); + + case INTRN_vector_reverse16_v16u8: case INTRN_vector_reverse16_v16i8: + case INTRN_vector_reverse16_v8u8: case INTRN_vector_reverse16_v8i8: + return HandleVectorReverse(intrinsicopNode, cgFunc, k16BitSize); + + case INTRN_vector_reverse64_v16u8: case INTRN_vector_reverse64_v16i8: + case INTRN_vector_reverse64_v8u8: case INTRN_vector_reverse64_v8i8: + case INTRN_vector_reverse64_v8u16: case INTRN_vector_reverse64_v8i16: + case INTRN_vector_reverse64_v4u16: case INTRN_vector_reverse64_v4i16: + case INTRN_vector_reverse64_v4u32: case INTRN_vector_reverse64_v4i32: + case INTRN_vector_reverse64_v2u32: case INTRN_vector_reverse64_v2i32: + return HandleVectorReverse(intrinsicopNode, cgFunc, k64BitSize); + + case INTRN_vector_shr_narrow_low_v8u16: case INTRN_vector_shr_narrow_low_v8i16: + case INTRN_vector_shr_narrow_low_v4u32: case INTRN_vector_shr_narrow_low_v4i32: + case INTRN_vector_shr_narrow_low_v2u64: case INTRN_vector_shr_narrow_low_v2i64: + return HandleVectorShiftNarrow(intrinsicopNode, cgFunc, true); + + case INTRN_vector_subl_low_v8i8: case INTRN_vector_subl_low_v8u8: + case INTRN_vector_subl_low_v4i16: case INTRN_vector_subl_low_v4u16: + case INTRN_vector_subl_low_v2i32: case INTRN_vector_subl_low_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, true, false); + + case INTRN_vector_subl_high_v8i8: case INTRN_vector_subl_high_v8u8: + case INTRN_vector_subl_high_v4i16: case INTRN_vector_subl_high_v4u16: + case INTRN_vector_subl_high_v2i32: case INTRN_vector_subl_high_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, false, false); + + case INTRN_vector_subw_low_v8i8: case INTRN_vector_subw_low_v8u8: + case INTRN_vector_subw_low_v4i16: case INTRN_vector_subw_low_v4u16: + case INTRN_vector_subw_low_v2i32: case INTRN_vector_subw_low_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, true, true); + + case INTRN_vector_subw_high_v8i8: case INTRN_vector_subw_high_v8u8: + case INTRN_vector_subw_high_v4i16: case INTRN_vector_subw_high_v4u16: + case INTRN_vector_subw_high_v2i32: case INTRN_vector_subw_high_v2u32: + return HandleVectorSubWiden(intrinsicopNode, cgFunc, false, true); + + case INTRN_vector_table_lookup_v8u8: case INTRN_vector_table_lookup_v8i8: + case INTRN_vector_table_lookup_v16u8: case INTRN_vector_table_lookup_v16i8: + return HandleVectorTableLookup(intrinsicopNode, cgFunc); + + case INTRN_vector_widen_low_v8u8: case INTRN_vector_widen_low_v8i8: + case INTRN_vector_widen_low_v4u16: case INTRN_vector_widen_low_v4i16: + case INTRN_vector_widen_low_v2u32: case INTRN_vector_widen_low_v2i32: + return HandleVectorWiden(intrinsicopNode, cgFunc, true); + + case INTRN_vector_widen_high_v8u8: case INTRN_vector_widen_high_v8i8: + case INTRN_vector_widen_high_v4u16: case INTRN_vector_widen_high_v4i16: + case INTRN_vector_widen_high_v2u32: case INTRN_vector_widen_high_v2i32: + return HandleVectorWiden(intrinsicopNode, cgFunc, false); + + case INTRN_vector_mov_narrow_v2i64: case INTRN_vector_mov_narrow_v2u64: + case INTRN_vector_mov_narrow_v4i32: case INTRN_vector_mov_narrow_v4u32: + case INTRN_vector_mov_narrow_v8i16: case INTRN_vector_mov_narrow_v8u16: + return HandleVectorMovNarrow(intrinsicopNode, cgFunc); + default: + ASSERT(false, "Should not reach here."); + return nullptr; + } +} + +using HandleExprFactory = FunctionFactory; +void InitHandleExprFactory() { + RegisterFactoryFunction(OP_dread, HandleDread); + RegisterFactoryFunction(OP_regread, HandleRegread); + RegisterFactoryFunction(OP_constval, HandleConstVal); + RegisterFactoryFunction(OP_conststr, HandleConstStr); + RegisterFactoryFunction(OP_conststr16, HandleConstStr16); + RegisterFactoryFunction(OP_add, HandleAdd); + RegisterFactoryFunction(OP_CG_array_elem_add, HandleCGArrayElemAdd); + RegisterFactoryFunction(OP_ashr, HandleShift); + RegisterFactoryFunction(OP_lshr, HandleShift); + RegisterFactoryFunction(OP_shl, HandleShift); + RegisterFactoryFunction(OP_ror, HandleRor); + RegisterFactoryFunction(OP_mul, HandleMpy); + RegisterFactoryFunction(OP_div, HandleDiv); + RegisterFactoryFunction(OP_rem, HandleRem); + RegisterFactoryFunction(OP_addrof, HandleAddrof); + RegisterFactoryFunction(OP_addrofoff, HandleAddrofoff); + RegisterFactoryFunction(OP_addroffunc, HandleAddroffunc); + RegisterFactoryFunction(OP_addroflabel, HandleAddrofLabel); + RegisterFactoryFunction(OP_iread, HandleIread); + RegisterFactoryFunction(OP_ireadoff, HandleIreadoff); + RegisterFactoryFunction(OP_ireadfpoff, HandleIreadfpoff); + RegisterFactoryFunction(OP_sub, HandleSub); + RegisterFactoryFunction(OP_band, HandleBand); + RegisterFactoryFunction(OP_bior, HandleBior); + RegisterFactoryFunction(OP_bxor, HandleBxor); + RegisterFactoryFunction(OP_abs, HandleAbs); + RegisterFactoryFunction(OP_bnot, HandleBnot); + RegisterFactoryFunction(OP_sext, HandleExtractBits); + RegisterFactoryFunction(OP_zext, HandleExtractBits); + RegisterFactoryFunction(OP_extractbits, HandleExtractBits); + RegisterFactoryFunction(OP_depositbits, HandleDepositBits); + RegisterFactoryFunction(OP_lnot, HandleLnot); + RegisterFactoryFunction(OP_land, HandleLand); + RegisterFactoryFunction(OP_lior, HandleLor); + RegisterFactoryFunction(OP_min, HandleMin); + RegisterFactoryFunction(OP_max, HandleMax); + RegisterFactoryFunction(OP_neg, HandleNeg); + RegisterFactoryFunction(OP_recip, HandleRecip); + RegisterFactoryFunction(OP_sqrt, HandleSqrt); + RegisterFactoryFunction(OP_ceil, HandleCeil); + RegisterFactoryFunction(OP_floor, HandleFloor); + RegisterFactoryFunction(OP_retype, HandleRetype); + RegisterFactoryFunction(OP_cvt, HandleCvt); + RegisterFactoryFunction(OP_round, HandleRound); + RegisterFactoryFunction(OP_trunc, HandleTrunc); + RegisterFactoryFunction(OP_select, HandleSelect); + RegisterFactoryFunction(OP_le, HandleCmp); + RegisterFactoryFunction(OP_ge, HandleCmp); + RegisterFactoryFunction(OP_gt, HandleCmp); + RegisterFactoryFunction(OP_lt, HandleCmp); + RegisterFactoryFunction(OP_ne, HandleCmp); + RegisterFactoryFunction(OP_eq, HandleCmp); + RegisterFactoryFunction(OP_cmp, HandleCmp); + RegisterFactoryFunction(OP_cmpl, HandleCmp); + RegisterFactoryFunction(OP_cmpg, HandleCmp); + RegisterFactoryFunction(OP_alloca, HandleAlloca); + RegisterFactoryFunction(OP_malloc, HandleMalloc); + RegisterFactoryFunction(OP_gcmalloc, HandleGCMalloc); + RegisterFactoryFunction(OP_gcpermalloc, HandleGCMalloc); + RegisterFactoryFunction(OP_gcmallocjarray, HandleJarrayMalloc); + RegisterFactoryFunction(OP_gcpermallocjarray, HandleJarrayMalloc); + RegisterFactoryFunction(OP_intrinsicop, HandleIntrinOp); +} + +void HandleLabel(StmtNode &stmt, CGFunc &cgFunc) { + ASSERT(stmt.GetOpCode() == OP_label, "error"); + auto &label = static_cast(stmt); + BB *newBB = cgFunc.StartNewBBImpl(false, label); + newBB->AddLabel(label.GetLabelIdx()); + if (newBB->GetId() == 1) { + newBB->SetFrequency(kFreqBase); + } + cgFunc.SetLab2BBMap(newBB->GetLabIdx(), *newBB); + cgFunc.SetCurBB(*newBB); + + if (cgFunc.GetCleanupLabel() == &label) { + cgFunc.SetCleanupBB(*newBB); + } else if (cgFunc.GetReturnLabel() == &label) { + cgFunc.SetReturnBB(*newBB); + } +} + +void HandleGoto(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &gotoNode = static_cast(stmt); + cgFunc.SetCurBBKind(BB::kBBGoto); + cgFunc.SelectGoto(gotoNode); + cgFunc.SetCurBB(*cgFunc.StartNewBB(gotoNode)); + ASSERT(&stmt == &gotoNode, "stmt must be same as gotoNoe"); + + if ((gotoNode.GetNext() != nullptr) && (gotoNode.GetNext()->GetOpCode() != OP_label)) { + ASSERT(cgFunc.GetCurBB()->GetPrev()->GetLastStmt() == &stmt, "check the relation between BB and stmt"); + } +} + +void HandleIgoto(StmtNode &stmt, CGFunc &cgFunc) { + auto &igotoNode = static_cast(stmt); + Operand *targetOpnd = cgFunc.HandleExpr(stmt, *igotoNode.Opnd(0)); + cgFunc.SelectIgoto(targetOpnd); + cgFunc.SetCurBB(*cgFunc.StartNewBB(igotoNode)); +} + +void HandleCondbr(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &condGotoNode = static_cast(stmt); + BaseNode *condNode = condGotoNode.Opnd(0); + ASSERT(condNode != nullptr, "expect first operand of cond br"); + Opcode condOp = condGotoNode.GetOpCode(); + if (condNode->GetOpCode() == OP_constval) { + auto *constValNode = static_cast(condNode); + if ((constValNode->GetConstVal()->IsZero() && (OP_brfalse == condOp)) || + (!constValNode->GetConstVal()->IsZero() && (OP_brtrue == condOp))) { + auto *gotoStmt = cgFunc.GetMemoryPool()->New(OP_goto); + gotoStmt->SetOffset(condGotoNode.GetOffset()); + HandleGoto(*gotoStmt, cgFunc); + auto *labelStmt = cgFunc.GetMemoryPool()->New(); + labelStmt->SetLabelIdx(cgFunc.CreateLabel()); + HandleLabel(*labelStmt, cgFunc); + } + return; + } + cgFunc.SetCurBBKind(BB::kBBIf); + /* if condNode is not a cmp node, cmp it with zero. */ + if (!kOpcodeInfo.IsCompare(condNode->GetOpCode())) { + Operand *opnd0 = cgFunc.HandleExpr(condGotoNode, *condNode); + PrimType primType = condNode->GetPrimType(); + Operand *zeroOpnd = nullptr; + if (IsPrimitiveInteger(primType)) { + zeroOpnd = &cgFunc.CreateImmOperand(primType, 0); + } else { + ASSERT(((PTY_f32 == primType) || (PTY_f64 == primType)), "we don't support half-precision FP operands yet"); + zeroOpnd = &cgFunc.CreateImmOperand(primType, 0); + } + cgFunc.SelectCondGoto(condGotoNode, *opnd0, *zeroOpnd); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); + return; + } + /* + * Special case: + * bgt (cmp (op0, op1), 0) ==> + * bgt (op0, op1) + * but skip the case cmp(op0, 0) + */ + BaseNode *op0 = condNode->Opnd(0); + ASSERT(op0 != nullptr, "get first opnd of a condNode failed"); + BaseNode *op1 = condNode->Opnd(1); + ASSERT(op1 != nullptr, "get second opnd of a condNode failed"); + if ((op0->GetOpCode() == OP_cmp) && (op1->GetOpCode() == OP_constval)) { + auto *constValNode = static_cast(op1); + MIRConst *mirConst = constValNode->GetConstVal(); + auto *cmpNode = static_cast(op0); + bool skip = false; + if (cmpNode->Opnd(1)->GetOpCode() == OP_constval) { + auto *constVal = static_cast(cmpNode->Opnd(1))->GetConstVal(); + if (constVal->IsZero()) { + skip = true; + } + } + if (!skip && mirConst->IsZero()) { + cgFunc.SelectCondSpecialCase1(condGotoNode, *op0); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); + return; + } + } + /* + * Special case: + * brfalse(ge (cmpg (op0, op1), 0) ==> + * fcmp op1, op2 + * blo + */ + if ((condGotoNode.GetOpCode() == OP_brfalse) && (condNode->GetOpCode() == OP_ge) && + (op0->GetOpCode() == OP_cmpg) && (op1->GetOpCode() == OP_constval)) { + auto *constValNode = static_cast(op1); + MIRConst *mirConst = constValNode->GetConstVal(); + if (mirConst->IsZero()) { + cgFunc.SelectCondSpecialCase2(condGotoNode, *op0); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); + return; + } + } + Operand *opnd0 = cgFunc.HandleExpr(*condNode, *condNode->Opnd(0)); + Operand *opnd1 = cgFunc.HandleExpr(*condNode, *condNode->Opnd(1)); + cgFunc.SelectCondGoto(condGotoNode, *opnd0, *opnd1); + cgFunc.SetCurBB(*cgFunc.StartNewBB(condGotoNode)); +} + +void HandleReturn(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &retNode = static_cast(stmt); + cgFunc.HandleRetCleanup(retNode); + ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); + Operand *opnd = nullptr; + if (retNode.NumOpnds() != 0) { + if (!cgFunc.GetFunction().StructReturnedInRegs()) { + opnd = cgFunc.HandleExpr(retNode, *retNode.Opnd(0)); + } else { + cgFunc.SelectReturnSendOfStructInRegs(retNode.Opnd(0)); + } + } + cgFunc.SelectReturn(opnd); + cgFunc.SetCurBBKind(BB::kBBGoto); + cgFunc.SetCurBB(*cgFunc.StartNewBB(retNode)); +} + +void HandleCall(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &callNode = static_cast(stmt); + cgFunc.SelectCall(callNode); + if (cgFunc.GetCurBB()->GetKind() != BB::kBBFallthru) { + cgFunc.SetCurBB(*cgFunc.StartNewBB(callNode)); + } + + StmtNode *prevStmt = stmt.GetPrev(); + if (prevStmt == nullptr || prevStmt->GetOpCode() != OP_catch) { + return; + } + if ((stmt.GetNext() != nullptr) && (stmt.GetNext()->GetOpCode() == OP_label)) { + cgFunc.SetCurBB(*cgFunc.StartNewBBImpl(true, stmt)); + } + cgFunc.HandleCatch(); +} + +void HandleICall(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &icallNode = static_cast(stmt); + cgFunc.GetCurBB()->SetHasCall(); + Operand *opnd0 = cgFunc.HandleExpr(stmt, *icallNode.GetNopndAt(0)); + cgFunc.SelectIcall(icallNode, *opnd0); + if (cgFunc.GetCurBB()->GetKind() != BB::kBBFallthru) { + cgFunc.SetCurBB(*cgFunc.StartNewBB(icallNode)); + } +} + +void HandleIntrinCall(StmtNode &stmt, CGFunc &cgFunc) { + auto &call = static_cast(stmt); + cgFunc.SelectIntrinCall(call); +} + +void HandleDassign(StmtNode &stmt, CGFunc &cgFunc) { + auto &dassignNode = static_cast(stmt); + ASSERT(dassignNode.GetOpCode() == OP_dassign, "expect dassign"); + BaseNode *rhs = dassignNode.GetRHS(); + ASSERT(rhs != nullptr, "get rhs of dassignNode failed"); + if (rhs->GetOpCode() == OP_malloc || rhs->GetOpCode() == OP_alloca) { + UnaryStmtNode &uNode = static_cast(stmt); + Operand *opnd0 = cgFunc.HandleExpr(dassignNode, *(uNode.Opnd())); + cgFunc.SelectDassign(dassignNode, *opnd0); + return; + } else if (rhs->GetPrimType() == PTY_agg) { + cgFunc.SelectAggDassign(dassignNode); + return; + } + bool isSaveRetvalToLocal = false; + if (rhs->GetOpCode() == OP_regread) { + isSaveRetvalToLocal = (static_cast(rhs)->GetRegIdx() == -kSregRetval0); + } + Operand *opnd0 = cgFunc.HandleExpr(dassignNode, *rhs); + cgFunc.SelectDassign(dassignNode, *opnd0); + if (isSaveRetvalToLocal) { + cgFunc.GetCurBB()->GetLastInsn()->MarkAsSaveRetValToLocal(); + } +} + +void HandleDassignoff(StmtNode &stmt, CGFunc &cgFunc) { + auto &dassignoffNode = static_cast(stmt); + BaseNode *rhs = dassignoffNode.GetRHS(); + CHECK_FATAL(rhs->GetOpCode() == OP_constval, "dassignoffNode without constval"); + Operand *opnd0 = cgFunc.HandleExpr(dassignoffNode, *rhs); + cgFunc.SelectDassignoff(dassignoffNode, *opnd0); +} + +void HandleRegassign(StmtNode &stmt, CGFunc &cgFunc) { + ASSERT(stmt.GetOpCode() == OP_regassign, "expect regAssign"); + auto ®AssignNode = static_cast(stmt); + bool isSaveRetvalToLocal = false; + BaseNode *operand = regAssignNode.Opnd(0); + ASSERT(operand != nullptr, "get operand of regassignNode failed"); + if (operand->GetOpCode() == OP_regread) { + isSaveRetvalToLocal = (static_cast(operand)->GetRegIdx() == -kSregRetval0); + } + Operand *opnd0 = cgFunc.HandleExpr(regAssignNode, *operand); + cgFunc.SelectRegassign(regAssignNode, *opnd0); + if (isSaveRetvalToLocal) { + cgFunc.GetCurBB()->GetLastInsn()->MarkAsSaveRetValToLocal(); + } +} + +void HandleIassign(StmtNode &stmt, CGFunc &cgFunc) { + ASSERT(stmt.GetOpCode() == OP_iassign, "expect stmt"); + auto &iassignNode = static_cast(stmt); + if ((iassignNode.GetRHS() != nullptr) && iassignNode.GetRHS()->GetPrimType() != PTY_agg) { + cgFunc.SelectIassign(iassignNode); + } else { + BaseNode *addrNode = iassignNode.Opnd(0); + if (addrNode == nullptr) { + return; + } + cgFunc.SelectAggIassign(iassignNode, *cgFunc.HandleExpr(stmt, *addrNode)); + } +} + +void HandleIassignoff(StmtNode &stmt, CGFunc &cgFunc) { + ASSERT(stmt.GetOpCode() == OP_iassignoff, "expect iassignoff"); + auto &iassignoffNode = static_cast(stmt); + cgFunc.SelectIassignoff(iassignoffNode); +} + +void HandleIassignfpoff(StmtNode &stmt, CGFunc &cgFunc) { + ASSERT(stmt.GetOpCode() == OP_iassignfpoff, "expect iassignfpoff"); + auto &iassignfpoffNode = static_cast(stmt); + cgFunc.SelectIassignfpoff(iassignfpoffNode, *cgFunc.HandleExpr(stmt, *stmt.Opnd(0))); +} + +void HandleIassignspoff(StmtNode &stmt, CGFunc &cgFunc) { + ASSERT(stmt.GetOpCode() == OP_iassignspoff, "expect iassignspoff"); + auto &baseNode = static_cast(stmt); /* same as FP */ + BaseNode *rhs = baseNode.GetRHS(); + ASSERT(rhs != nullptr, "get rhs of iassignspoffNode failed"); + Operand *opnd0 = cgFunc.HandleExpr(baseNode, *rhs); + cgFunc.SelectIassignspoff(baseNode.GetPrimType(), baseNode.GetOffset(), *opnd0); +} + +void HandleBlkassignoff(StmtNode &stmt, CGFunc &cgFunc) { + ASSERT(stmt.GetOpCode() == OP_blkassignoff, "expect blkassignoff"); + auto &baseNode = static_cast(stmt); + Operand *src = cgFunc.HandleExpr(baseNode, *baseNode.Opnd(1)); + cgFunc.SelectBlkassignoff(baseNode, src); +} + +void HandleEval(const StmtNode &stmt, CGFunc &cgFunc) { + (void)cgFunc.HandleExpr(stmt, *static_cast(stmt).Opnd(0)); +} + +void HandleRangeGoto(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.UpdateFrequency(stmt); + auto &rangeGotoNode = static_cast(stmt); + cgFunc.SetCurBBKind(BB::kBBRangeGoto); + cgFunc.SelectRangeGoto(rangeGotoNode, *cgFunc.HandleExpr(rangeGotoNode, *rangeGotoNode.Opnd(0))); + cgFunc.SetCurBB(*cgFunc.StartNewBB(rangeGotoNode)); +} + +void HandleMembar(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.SelectMembar(stmt); + if (stmt.GetOpCode() != OP_membarrelease) { + return; + } +#if TARGAARCH64 || TARGRISCV64 + if (CGOptions::UseBarriersForVolatile()) { + return; + } +#endif + StmtNode *secondStmt = stmt.GetRealNext(); + if (secondStmt == nullptr || + ((secondStmt->GetOpCode() != OP_iassign) && (secondStmt->GetOpCode() != OP_dassign))) { + return; + } + StmtNode *thirdStmt = secondStmt->GetRealNext(); + if (thirdStmt == nullptr || thirdStmt->GetOpCode() != OP_membarstoreload) { + return; + } + cgFunc.SetVolStore(true); + cgFunc.SetVolReleaseInsn(cgFunc.GetCurBB()->GetLastInsn()); +} + +void HandleComment(StmtNode &stmt, CGFunc &cgFunc) { + if (cgFunc.GetCG()->GenerateVerboseAsm() || cgFunc.GetCG()->GenerateVerboseCG()) { + cgFunc.SelectComment(static_cast(stmt)); + } +} + +void HandleCatchOp(const StmtNode &stmt, const CGFunc &cgFunc) { + (void)stmt; + (void)cgFunc; + ASSERT(stmt.GetNext()->GetOpCode() == OP_call, "The next statement of OP_catch should be OP_call."); +} + +void HandleAssertNull(StmtNode &stmt, CGFunc &cgFunc) { + auto &cgAssertNode = static_cast(stmt); + cgFunc.SelectAssertNull(cgAssertNode); +} + +void HandleAbort(const StmtNode &stmt, CGFunc &cgFunc) { + (void)stmt; + cgFunc.SelectAbort(); +} + +void HandleAsm(StmtNode &stmt, CGFunc &cgFunc) { + cgFunc.SelectAsm(static_cast(stmt)); +} + +using HandleStmtFactory = FunctionFactory; +void InitHandleStmtFactory() { + RegisterFactoryFunction(OP_label, HandleLabel); + RegisterFactoryFunction(OP_goto, HandleGoto); + RegisterFactoryFunction(OP_igoto, HandleIgoto); + RegisterFactoryFunction(OP_brfalse, HandleCondbr); + RegisterFactoryFunction(OP_brtrue, HandleCondbr); + RegisterFactoryFunction(OP_return, HandleReturn); + RegisterFactoryFunction(OP_call, HandleCall); + RegisterFactoryFunction(OP_icall, HandleICall); + RegisterFactoryFunction(OP_icallproto, HandleICall); + RegisterFactoryFunction(OP_intrinsiccall, HandleIntrinCall); + RegisterFactoryFunction(OP_intrinsiccallassigned, HandleIntrinCall); + RegisterFactoryFunction(OP_intrinsiccallwithtype, HandleIntrinCall); + RegisterFactoryFunction(OP_intrinsiccallwithtypeassigned, HandleIntrinCall); + RegisterFactoryFunction(OP_dassign, HandleDassign); + RegisterFactoryFunction(OP_dassignoff, HandleDassignoff); + RegisterFactoryFunction(OP_regassign, HandleRegassign); + RegisterFactoryFunction(OP_iassign, HandleIassign); + RegisterFactoryFunction(OP_iassignoff, HandleIassignoff); + RegisterFactoryFunction(OP_iassignfpoff, HandleIassignfpoff); + RegisterFactoryFunction(OP_iassignspoff, HandleIassignspoff); + RegisterFactoryFunction(OP_blkassignoff, HandleBlkassignoff); + RegisterFactoryFunction(OP_eval, HandleEval); + RegisterFactoryFunction(OP_rangegoto, HandleRangeGoto); + RegisterFactoryFunction(OP_membarrelease, HandleMembar); + RegisterFactoryFunction(OP_membaracquire, HandleMembar); + RegisterFactoryFunction(OP_membarstoreload, HandleMembar); + RegisterFactoryFunction(OP_membarstorestore, HandleMembar); + RegisterFactoryFunction(OP_comment, HandleComment); + RegisterFactoryFunction(OP_catch, HandleCatchOp); + RegisterFactoryFunction(OP_abort, HandleAbort); + RegisterFactoryFunction(OP_assertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_callassertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_assignassertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_returnassertnonnull, HandleAssertNull); + RegisterFactoryFunction(OP_asm, HandleAsm); +} + +CGFunc::CGFunc(MIRModule &mod, CG &cg, MIRFunction &mirFunc, BECommon &beCommon, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &allocator, uint32 funcId) + : vRegTable(allocator.Adapter()), + bbVec(allocator.Adapter()), + vRegOperandTable(allocator.Adapter()), + pRegSpillMemOperands(allocator.Adapter()), + spillRegMemOperands(allocator.Adapter()), + reuseSpillLocMem(allocator.Adapter()), + labelMap(std::less(), allocator.Adapter()), + vregsToPregsMap(std::less(), allocator.Adapter()), + hasVLAOrAlloca(mirFunc.HasVlaOrAlloca()), + dbgParamCallFrameLocations(allocator.Adapter()), + dbgLocalCallFrameLocations(allocator.Adapter()), + cg(&cg), + mirModule(mod), + memPool(&memPool), + stackMp(stackMp), + func(mirFunc), + exitBBVec(allocator.Adapter()), + noReturnCallBBVec(allocator.Adapter()), + extendSet(allocator.Adapter()), + lab2BBMap(allocator.Adapter()), + beCommon(beCommon), + funcScopeAllocator(&allocator), + emitStVec(allocator.Adapter()), + switchLabelCnt(allocator.Adapter()), +#if TARGARM32 + sortedBBs(allocator.Adapter()), + lrVec(allocator.Adapter()), +#endif /* TARGARM32 */ + loops(allocator.Adapter()), + lmbcParamVec(allocator.Adapter()), + scpIdSet(allocator.Adapter()), + shortFuncName(cg.ExtractFuncName(mirFunc.GetName()) + "." + std::to_string(funcId), &memPool) { + mirModule.SetCurFunction(&func); + SetMemlayout(*GetCG()->CreateMemLayout(memPool, beCommon, func, allocator)); + GetMemlayout()->SetCurrFunction(*this); + SetTargetRegInfo(*GetCG()->CreateRegisterInfo(memPool, allocator)); + GetTargetRegInfo()->SetCurrFunction(*this); + if (func.GetAttr(FUNCATTR_varargs) || func.HasVlaOrAlloca()) { + SetHasVLAOrAlloca(true); + } + SetHasAlloca(func.HasVlaOrAlloca()); + + dummyBB = CreateNewBB(); + vRegCount = firstMapleIrVRegNO + func.GetPregTab()->Size(); + firstNonPregVRegNO = vRegCount; + /* maximum register count initial be increased by 1024 */ + maxRegCount = vRegCount + 1024; + if (func.GetMayWriteToAddrofStack()) { + SetStackProtectInfo(kAddrofStack); + } + + insnBuilder = memPool.New(memPool); + opndBuilder = memPool.New(memPool, func.GetPregTab()->Size()); + + vRegTable.resize(maxRegCount); + /* func.GetPregTab()->_preg_table[0] is nullptr, so skip it */ + ASSERT(func.GetPregTab()->PregFromPregIdx(0) == nullptr, "PregFromPregIdx(0) must be nullptr"); + for (size_t i = 1; i < func.GetPregTab()->Size(); ++i) { + PrimType primType = func.GetPregTab()->PregFromPregIdx(i)->GetPrimType(); + uint32 byteLen = GetPrimTypeSize(primType); + if (byteLen < k4ByteSize) { + byteLen = k4ByteSize; + } + if (primType == PTY_u128 || primType == PTY_i128) { + byteLen = k8ByteSize; + } + new (&GetVirtualRegNodeFromPseudoRegIdx(i)) VirtualRegNode(GetRegTyFromPrimTy(primType), byteLen); + } + firstCGGenLabelIdx = func.GetLabelTab()->GetLabelTableSize(); + lSymSize = 0; + if (func.GetSymTab()) { + lSymSize = func.GetSymTab()->GetSymbolTableSize(); + } +} + +CGFunc::~CGFunc() { + mirModule.SetCurFunction(nullptr); +} + +Operand *CGFunc::HandleExpr(const BaseNode &parent, BaseNode &expr) { + auto function = CreateProductFunction(expr.GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode in HandleExpr()"); + return function(parent, expr, *this); +} + +StmtNode *CGFunc::HandleFirstStmt() { + BlockNode *block = func.GetBody(); + + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + StmtNode *stmt = block->GetFirst(); + if (stmt == nullptr) { + return nullptr; + } + bool withFreqInfo = func.HasFreqMap() && !func.GetLastFreqMap().empty(); + if (withFreqInfo) { + frequency = kFreqBase; + } + ASSERT(stmt->GetOpCode() == OP_label, "The first statement should be a label"); + HandleLabel(*stmt, *this); + firstBB = curBB; + stmt = stmt->GetNext(); + if (stmt == nullptr) { + return nullptr; + } + curBB = StartNewBBImpl(false, *stmt); + curBB->SetFrequency(frequency); + if (JAVALANG) { + HandleRCCall(true); + } + return stmt; +} + +bool CGFunc::CheckSkipMembarOp(const StmtNode &stmt) { + StmtNode *nextStmt = stmt.GetRealNext(); + if (nextStmt == nullptr) { + return false; + } + + Opcode opCode = stmt.GetOpCode(); + if (((opCode == OP_membaracquire) || (opCode == OP_membarrelease)) && (nextStmt->GetOpCode() == stmt.GetOpCode())) { + return true; + } + if ((opCode == OP_membarstorestore) && (nextStmt->GetOpCode() == OP_membarrelease)) { + return true; + } + if ((opCode == OP_membarstorestore) && func.IsConstructor() && MemBarOpt(stmt)) { + return true; + } +#if TARGAARCH64 || TARGRISCV64 + if ((!CGOptions::UseBarriersForVolatile()) && (nextStmt->GetOpCode() == OP_membaracquire)) { + isVolLoad = true; + } +#endif /* TARGAARCH64 */ + return false; +} + +void CGFunc::RemoveUnreachableBB() { + OptimizationPattern *pattern = memPool->New(*this); + for (BB *bb = firstBB; bb != nullptr; bb = bb->GetNext()) { + (void)pattern->Optimize(*bb); + if (bb->GetPreds().size() == 0 && bb->GetSuccs().size() == 0) { + auto it = find(noReturnCallBBVec.begin(), noReturnCallBBVec.end(), bb); + if (it != noReturnCallBBVec.end()) { + (void)noReturnCallBBVec.erase(it); + } + } + } +} + +void CGFunc::GenerateLoc(StmtNode *stmt, SrcPosition &lastSrcPos, SrcPosition &lastMplPos) { + /* insert Insn for .loc before cg for the stmt */ + if (cg->GetCGOptions().WithLoc() && stmt->op != OP_label && stmt->op != OP_comment) { + /* if original src file location info is availiable for this stmt, + * use it and skip mpl file location info for this stmt + */ + bool hasLoc = false; + SrcPosition &newSrcPos = stmt->GetSrcPos(); + if (!newSrcPos.IsValid()) { + return; + } + + if (cg->GetCGOptions().WithSrc() && !lastSrcPos.IsEq(newSrcPos)) { + /* .loc for original src file */ + Operand *o0 = CreateDbgImmOperand(newSrcPos.FileNum()); + Operand *o1 = CreateDbgImmOperand(newSrcPos.LineNum()); + Operand *o2 = CreateDbgImmOperand(newSrcPos.Column()); + Insn &loc = + GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1).AddOpndChain(*o2); + curBB->AppendInsn(loc); + lastSrcPos.UpdateWith(newSrcPos); + hasLoc = true; + } + /* .loc for mpl file, skip if already has .loc from src for this stmt */ + if (cg->GetCGOptions().WithMpl() && !hasLoc && !lastMplPos.IsEqMpl(newSrcPos)) { + Operand *o0 = CreateDbgImmOperand(1); + Operand *o1 = CreateDbgImmOperand(newSrcPos.MplLineNum()); + Operand *o2 = CreateDbgImmOperand(0); + Insn &loc = + GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_loc).AddOpndChain(*o0).AddOpndChain(*o1).AddOpndChain(*o2); + curBB->AppendInsn(loc); + lastMplPos.UpdateWith(newSrcPos); + } + } +} + +void CGFunc::GenerateScopeLabel(StmtNode *stmt, SrcPosition &lastSrcPos, bool &posDone) { + /* insert lable for scope begin and end .LScp.1B .LScp.1E */ + MIRFunction &mirFunc = GetFunction(); + DebugInfo *dbgInfo = GetMirModule().GetDbgInfo(); + if (cg->GetCGOptions().WithDwarf() && stmt->op != OP_comment) { + SrcPosition newSrcPos = stmt->GetSrcPos(); + if (!newSrcPos.IsValid()) { + return; + } + // check if newSrcPos is done + if (posDone && lastSrcPos.IsEq(newSrcPos)) { + return; + } + std::unordered_set idSetB; + std::unordered_set idSetE; + idSetB.clear(); + idSetE.clear(); + dbgInfo->GetCrossScopeId(&mirFunc, idSetB, true, lastSrcPos, newSrcPos); + dbgInfo->GetCrossScopeId(&mirFunc, idSetE, false, lastSrcPos, newSrcPos); + for (auto id : idSetE) { + // skip if begin label is not in yet + if (scpIdSet.find(id) == scpIdSet.end()) { + continue; + } + Operand *o0 = CreateDbgImmOperand(id); + Operand *o1 = CreateDbgImmOperand(1); + Insn &scope = GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_scope).AddOpndChain(*o0).AddOpndChain(*o1); + curBB->AppendInsn(scope); + (void)scpIdSet.erase(id); + } + for (auto id : idSetB) { + // skip if begin label is already in + if (scpIdSet.find(id) != scpIdSet.end()) { + continue; + } + Operand *o0 = CreateDbgImmOperand(id); + Operand *o1 = CreateDbgImmOperand(0); + Insn &scope = GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_scope).AddOpndChain(*o0).AddOpndChain(*o1); + curBB->AppendInsn(scope); + (void)scpIdSet.insert(id); + } + lastSrcPos.UpdateWith(newSrcPos); + posDone = false; + } +} + +int32 CGFunc::GetFreqFromStmt(uint32 stmtId) { + int32 freq = GetFunction().GetFreqFromLastStmt(stmtId); + if (freq != -1) { + return freq; + } + return GetFunction().GetFreqFromFirstStmt(stmtId); +} + +LmbcFormalParamInfo *CGFunc::GetLmbcFormalParamInfo(uint32 offset) { + MapleVector ¶mVec = GetLmbcParamVec(); + for (auto *param : paramVec) { + uint32 paramOffset = param->GetOffset(); + uint32 paramSize = param->GetSize(); + if (paramOffset <= offset && offset < (paramOffset + paramSize)) { + return param; + } + } + return nullptr; +} + +/* + * For formals of lmbc, the formal list is deleted if there is no + * passing of aggregate by value. + */ +void CGFunc::CreateLmbcFormalParamInfo() { + if (GetMirModule().GetFlavor() != MIRFlavor::kFlavorLmbc) { + return; + } + PrimType primType; + uint32 offset; + uint32 typeSize; + MIRFunction &lmbcFunc = GetFunction(); + if (lmbcFunc.GetFormalCount() > 0) { + /* Whenever lmbc cannot delete call type info, the prototype is available */ + uint32 stackOffset = 0; + for (size_t idx = 0; idx < lmbcFunc.GetFormalCount(); ++idx) { + MIRSymbol *sym = lmbcFunc.GetFormal(idx); + MIRType *type; + TyIdx tyIdx; + if (sym) { + tyIdx = lmbcFunc.GetFormalDefVec()[idx].formalTyIdx; + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } else { + FormalDef vec = const_cast(GetBecommon().GetMIRModule().CurFunction())->GetFormalDefAt(idx); + tyIdx = vec.formalTyIdx; + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } + primType = type->GetPrimType(); + offset = stackOffset; + typeSize = static_cast(GetBecommon().GetTypeSize(tyIdx)); + stackOffset += (typeSize + 7) & (-8); + LmbcFormalParamInfo *info = GetMemoryPool()->New(primType, offset, typeSize); + lmbcParamVec.push_back(info); + if (idx == 0 && lmbcFunc.IsFirstArgReturn()) { + info->SetIsReturn(); + } + if (type->GetKind() == kTypeStruct) { + MIRStructType *structType = static_cast(type); + info->SetType(structType); + uint32 fpSize; + uint32 numFpRegs = FloatParamRegRequired(structType, fpSize); + if (numFpRegs > 0) { + info->SetIsPureFloat(); + info->SetNumRegs(numFpRegs); + info->SetFpSize(fpSize); + } + } + } + } else { + /* No aggregate pass by value here */ + for (StmtNode *stmt = lmbcFunc.GetBody()->GetFirst(); stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt == nullptr) { + break; + } + if (stmt->GetOpCode() == OP_label) { + continue; + } + if (stmt->GetOpCode() != OP_regassign) { + break; + } + RegassignNode *regAssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode->Opnd(0); + if (operand->GetOpCode() != OP_ireadfpoff) { + break; + } + IreadFPoffNode *ireadNode = static_cast(operand); + primType = ireadNode->GetPrimType(); + if (ireadNode->GetOffset() < 0) { + continue; + } + offset = static_cast(ireadNode->GetOffset()); + typeSize = GetPrimTypeSize(primType); + CHECK_FATAL((offset % k8ByteSize) == 0, ""); /* scalar only, no struct for now */ + LmbcFormalParamInfo *info = GetMemoryPool()->New(primType, offset, typeSize); + lmbcParamVec.push_back(info); + } + } + std::sort(lmbcParamVec.begin(), lmbcParamVec.end(), + [] (const LmbcFormalParamInfo *x, const LmbcFormalParamInfo *y) + { return x->GetOffset() < y->GetOffset(); } + ); + + /* When a scalar param address is taken, its regassign is not in the 1st block */ + for (StmtNode *stmt = lmbcFunc.GetBody()->GetFirst(); stmt != nullptr; stmt = stmt->GetNext()) { + if (stmt == nullptr) { + break; + } + if (stmt->GetOpCode() == OP_label) { + continue; + } + if (stmt->GetOpCode() != OP_regassign) { + break; + } + RegassignNode *regAssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode->Opnd(0); + if (operand->GetOpCode() != OP_ireadfpoff) { + break; + } + IreadFPoffNode *ireadNode = static_cast(operand); + if (ireadNode->GetOffset() < 0) { + continue; + } + LmbcFormalParamInfo *info = GetLmbcFormalParamInfo(static_cast(ireadNode->GetOffset())); + ASSERT_NOT_NULL(info); + info->SetHasRegassign(); + } + + AssignLmbcFormalParams(); +} + +void CGFunc::GenerateInstruction() { + InitHandleExprFactory(); + InitHandleStmtFactory(); + StmtNode *secondStmt = HandleFirstStmt(); + + /* First Pass: Creates the doubly-linked list of BBs (next,prev) */ + volReleaseInsn = nullptr; + + SrcPosition pos = GetFunction().GetScope()->GetRangeLow(); + if (!pos.IsValid()) { + pos = GetFunction().GetSrcPosition(); + } + SrcPosition lastScpPos = pos; + SrcPosition lastStmtPos = pos; + SrcPosition lastLocPos = SrcPosition(); + SrcPosition lastMplPos = SrcPosition(); + std::set bbFreqSet; + bool posDone = false; + scpIdSet.clear(); + for (StmtNode *stmt = secondStmt; stmt != nullptr; stmt = stmt->GetNext()) { + /* insert Insn for scope begin/end labels */ + if (lastStmtPos.IsBfOrEq(stmt->GetSrcPos())) { + GenerateScopeLabel(stmt, lastScpPos, posDone); + lastStmtPos = stmt->GetSrcPos(); + } + /* insert Insn for .loc before cg for the stmt */ + GenerateLoc(stmt, lastLocPos, lastMplPos); + BB *tmpBB = curBB; + isVolLoad = false; + if (CheckSkipMembarOp(*stmt)) { + continue; + } + bool tempLoad = isVolLoad; + auto function = CreateProductFunction(stmt->GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode or has been lowered before"); + function(*stmt, *this); + /* skip the membar acquire if it is just after the iread. ldr + membaraquire->ldar */ + if (tempLoad && !isVolLoad) { + stmt = stmt->GetNext(); + } + int32 freq = GetFreqFromStmt(stmt->GetStmtID()); + if (freq != -1) { + if (tmpBB != curBB) { + if (curBB->GetFirstInsn() == nullptr && curBB->GetLabIdx() == 0 && bbFreqSet.count(tmpBB->GetId()) == 0) { + tmpBB->SetFrequency(static_cast(freq)); + bbFreqSet.insert(tmpBB->GetId()); + } else if ((curBB->GetFirstInsn() != nullptr || curBB->GetLabIdx() != 0) && + bbFreqSet.count(curBB->GetId()) == 0) { + curBB->SetFrequency(static_cast(freq)); + bbFreqSet.insert(tmpBB->GetId()); + } + } else if (bbFreqSet.count(curBB->GetId()) == 0) { + curBB->SetFrequency(static_cast(freq)); + bbFreqSet.insert(curBB->GetId()); + } + } + + /* + * skip the membarstoreload if there is the pattern for volatile write( membarrelease + store + membarstoreload ) + * membarrelease + store + membarstoreload -> stlr + */ + if (volReleaseInsn != nullptr) { + if ((stmt->GetOpCode() != OP_membarrelease) && (stmt->GetOpCode() != OP_comment)) { + if (!isVolStore) { + /* remove the generated membar release insn. */ + curBB->RemoveInsn(*volReleaseInsn); + /* skip the membarstoreload. */ + stmt = stmt->GetNext(); + } + volReleaseInsn = nullptr; + isVolStore = false; + } + } + } + + /* Set lastbb's frequency */ + BlockNode *block = func.GetBody(); + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + curBB->SetLastStmt(*block->GetLast()); + curBB->SetFrequency(frequency); + lastBB = curBB; + /* All stmts are handled */ + frequency = 0; +} + +LabelIdx CGFunc::CreateLabel() { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + ASSERT(funcSt != nullptr, "Get func failed at CGFunc::CreateLabel"); + std::string funcName = funcSt->GetName(); + std::string labelStr = funcName.append(std::to_string(labelIdx++)); + return func.GetOrCreateLableIdxFromName(labelStr); +} + +MIRSymbol *CGFunc::GetRetRefSymbol(BaseNode &expr) { + Opcode opcode = expr.GetOpCode(); + if (opcode != OP_dread) { + return nullptr; + } + auto &retExpr = static_cast(expr); + MIRSymbol *symbol = mirModule.CurFunction()->GetLocalOrGlobalSymbol(retExpr.GetStIdx()); + ASSERT(symbol != nullptr, "get symbol in mirmodule failed"); + if (symbol->IsRefType()) { + MIRSymbol *sym = nullptr; + for (uint32 i = 0; i < func.GetFormalCount(); i++) { + sym = func.GetFormal(i); + if (sym == symbol) { + return nullptr; + } + } + return symbol; + } + return nullptr; +} + +void CGFunc::TraverseAndClearCatchMark(BB &bb) { + /* has bb been visited */ + if (bb.GetInternalFlag3() != 0) { + return; + } + bb.SetIsCatch(false); + bb.SetInternalFlag3(1); + for (auto succBB : bb.GetSuccs()) { + TraverseAndClearCatchMark(*succBB); + } +} + +/* + * Two types of successor edges, normal and eh. Any bb which is not + * reachable by a normal successor edge is considered to be in a + * catch block. + * Marking it as a catch block does not automatically make it into + * a catch block. Unreachables can be marked as such too. + */ +void CGFunc::MarkCatchBBs() { + /* First, suspect all bb to be in catch */ + FOR_ALL_BB(bb, this) { + bb->SetIsCatch(true); + bb->SetInternalFlag3(0); /* mark as not visited */ + } + /* Eliminate cleanup section from catch */ + FOR_ALL_BB(bb, this) { + if (bb->IsCleanup()) { + bb->SetIsCatch(false); + ASSERT(bb->GetSuccs().size() <= 1, "MarkCatchBBs incorrect cleanup label"); + BB *succ = nullptr; + if (!bb->GetSuccs().empty()) { + succ = bb->GetSuccs().front(); + } else { + continue; + } + ASSERT(succ != nullptr, "Get front succsBB failed"); + while (1) { + ASSERT(succ->GetSuccs().size() <= 1, "MarkCatchBBs incorrect cleanup label"); + succ->SetIsCatch(false); + if (!succ->GetSuccs().empty()) { + succ = succ->GetSuccs().front(); + } else { + break; + } + } + } + } + /* Unmark all normally reachable bb as NOT catch. */ + TraverseAndClearCatchMark(*firstBB); +} + +/* + * Mark CleanupBB + * Note: Cleanup bb and func body bbs are seperated, no edges between them. + * No ehSuccs or eh_prevs between cleanup bbs. + */ +void CGFunc::MarkCleanupBB() const { + /* there is no cleanup BB in the function */ + if (cleanupBB == nullptr) { + return; + } + ASSERT(ExitbbNotInCleanupArea(*cleanupBB), "exitBB created in cleanupArea."); + ASSERT(cleanupBB->GetEhSuccs().empty(), "CG internal error. Cleanup bb should not have ehSuccs."); + +#if DEBUG /* Please don't remove me. */ + /* Check if all of the cleanup bb is at bottom of the function. */ + bool isCleanupArea = true; + if (!mirModule.IsCModule()) { + FOR_ALL_BB_REV_CONST(bb, this) { + if (bb == GetLastBB()) { + continue; + } + if (isCleanupArea) { + ASSERT(bb->IsCleanup(), "CG internal error, cleanup BBs should be at the bottom of the function."); + } else { + ASSERT(!bb->IsCleanup(), "CG internal error, cleanup BBs should be at the bottom of the function."); + } + + if (bb == cleanupBB) { + isCleanupArea = false; + } + } + } +#endif /* DEBUG */ +} + +bool CGFunc::ExitbbNotInCleanupArea(const BB &bb) const { + for (const BB *nextBB = bb.GetNext(); nextBB != nullptr; nextBB = nextBB->GetNext()) { + if (nextBB->GetKind() == BB::kBBReturn) { + return false; + } + } + return true; +} + +/* + * Do mem barrier optimization for constructor funcs as follow: + * membarstorestore + * write field of this_ ==> write field of this_ + * membarrelease membarrelease. + */ +bool CGFunc::MemBarOpt(const StmtNode &membar) { + if (func.GetFormalCount() == 0) { + return false; + } + MIRSymbol *thisSym = func.GetFormal(0); + if (thisSym == nullptr) { + return false; + } + StmtNode *stmt = membar.GetNext(); + for (; stmt != nullptr; stmt = stmt->GetNext()) { + BaseNode *base = nullptr; + if (stmt->GetOpCode() == OP_comment) { + continue; + } else if (stmt->GetOpCode() == OP_iassign) { + base = static_cast(stmt)->Opnd(0); + } else if (stmt->GetOpCode() == OP_call) { + auto *callNode = static_cast(stmt); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode->GetPUIdx()); + MIRSymbol *fsym = GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(fn->GetStIdx(), false); + ASSERT(fsym != nullptr, "null ptr check"); + if (fsym->GetName() == "MCC_WriteRefFieldNoDec") { + base = callNode->Opnd(0); + } + } + if (base != nullptr) { + Opcode op = base->GetOpCode(); + if (op == OP_regread && thisSym->IsPreg() && + thisSym->GetPreg()->GetPregNo() == static_cast(base)->GetRegIdx()) { + continue; + } + if ((op == OP_dread || op == OP_addrof) && !thisSym->IsPreg() && + static_cast(base)->GetStIdx() == thisSym->GetStIdx()) { + continue; + } + } + break; + } + + CHECK_NULL_FATAL(stmt); + return stmt->GetOpCode() == OP_membarrelease; +} + +void CGFunc::MakeupScopeLabels(BB &bb) { + /* insert leftover scope-end labels */ + if (!scpIdSet.empty()) { + std::set::reverse_iterator rit; + for (rit=scpIdSet.rbegin(); rit != scpIdSet.rend(); ++rit) { + Operand *o0 = CreateDbgImmOperand(*rit); + Operand *o1 = CreateDbgImmOperand(1); + Insn &scope = GetInsnBuilder()->BuildDbgInsn(mpldbg::OP_DBG_scope).AddOpndChain(*o0).AddOpndChain(*o1); + bb.AppendInsn(scope); + } + } +} + +void CGFunc::ProcessExitBBVec() { + ASSERT(exitBBVec.size() == 1, "there must be one BB_return in func"); + MakeupScopeLabels(*exitBBVec[0]); +} + +void CGFunc::AddCommonExitBB() { + // create fake commonExitBB + commonExitBB = CreateNewBB(true, BB::kBBFallthru, 0); + ASSERT(commonExitBB != nullptr, "cannot create fake commonExitBB"); + for (BB *cgbb : exitBBVec) { + if (!cgbb->IsUnreachable()) { + commonExitBB->PushBackPreds(*cgbb); + } + } + for (BB *cgbb : noReturnCallBBVec) { + commonExitBB->PushBackPreds(*cgbb); + } +} + +void CGFunc::UpdateCallBBFrequency() { + if (!func.HasFreqMap() || func.GetLastFreqMap().empty()) { + return; + } + FOR_ALL_BB(bb, this) { + if (bb->GetKind() != BB::kBBFallthru || !bb->HasCall()) { + continue; + } + ASSERT(bb->GetSuccs().size() <= 1, "fallthru BB has only one successor."); + if (!bb->GetSuccs().empty()) { + bb->SetFrequency((*(bb->GetSuccsBegin()))->GetFrequency()); + } + } +} + +void CGFunc::HandleFunction() { + /* select instruction */ + GenerateInstruction(); + ProcessExitBBVec(); + LmbcGenSaveSpForAlloca(); + + if (func.IsJava()) { + GenerateCleanupCodeForExtEpilog(*cleanupBB); + } else if (!func.GetModule()->IsCModule()) { + GenerateCleanupCode(*cleanupBB); + } + GenSaveMethodInfoCode(*firstBB); + /* build control flow graph */ + theCFG = memPool->New(*this); + theCFG->BuildCFG(); + RemoveUnreachableBB(); + AddCommonExitBB(); + if (mirModule.GetSrcLang() != kSrcLangC) { + MarkCatchBBs(); + } + MarkCleanupBB(); + DetermineReturnTypeofCall(); + theCFG->MarkLabelTakenBB(); + theCFG->UnreachCodeAnalysis(); + if (mirModule.GetSrcLang() == kSrcLangC) { + theCFG->WontExitAnalysis(); + } + if (CGOptions::IsLazyBinding() && !GetCG()->IsLibcore()) { + ProcessLazyBinding(); + } + if (GetCG()->DoPatchLongBranch()) { + PatchLongBranch(); + } + if (CGOptions::DoEnableHotColdSplit()) { + theCFG->CheckCFGFreq(); + } + NeedStackProtect(); +} + +void CGFunc::AddDIESymbolLocation(const MIRSymbol *sym, SymbolAlloc *loc, bool isParam) { + ASSERT(debugInfo != nullptr, "debugInfo is null!"); + ASSERT(loc->GetMemSegment() != nullptr, "only support those variable that locate at stack now"); + DBGDie *sdie = debugInfo->GetLocalDie(&func, sym->GetNameStrIdx()); + if (sdie == nullptr) { + return; + } + + DBGExprLoc *exprloc = sdie->GetExprLoc(); + CHECK_FATAL(exprloc != nullptr, "exprloc is null in CGFunc::AddDIESymbolLocation"); + exprloc->SetSymLoc(loc); + + GetDbgCallFrameLocations(isParam).push_back(exprloc); +} + +void CGFunc::DumpCFG() const { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n****** CFG built by CG for " << funcSt->GetName() << " *******\n"; + FOR_ALL_BB_CONST(bb, this) { + LogInfo::MapleLogger() << "=== BB ( " << std::hex << bb << std::dec << " ) <" << bb->GetKindName() << "> ===\n"; + LogInfo::MapleLogger() << "BB id:" << bb->GetId() << "\n"; + if (!bb->GetPreds().empty()) { + LogInfo::MapleLogger() << " pred [ "; + for (auto *pred : bb->GetPreds()) { + LogInfo::MapleLogger() << pred->GetId() << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << " succ [ "; + for (auto *succ : bb->GetSuccs()) { + LogInfo::MapleLogger() << succ->GetId() << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + const StmtNode *stmt = bb->GetFirstStmt(); + if (stmt != nullptr) { + bool done = false; + do { + done = stmt == bb->GetLastStmt(); + stmt->Dump(1); + LogInfo::MapleLogger() << "\n"; + stmt = stmt->GetNext(); + } while (!done); + } else { + LogInfo::MapleLogger() << "\n"; + } + } +} + +void CGFunc::DumpBBInfo(const BB *bb) const { + LogInfo::MapleLogger() << "=== BB " << " <" << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx(); + LogInfo::MapleLogger() << " ==> @" << func.GetLabelName(bb->GetLabIdx()) << "]"; + } + + LogInfo::MapleLogger() << "> <" << bb->GetId() << "> "; + if (bb->GetLoop()) { + LogInfo::MapleLogger() << "[Loop level " << bb->GetLoop()->GetLoopLevel(); + LogInfo::MapleLogger() << ", head BB " << bb->GetLoop()->GetHeader()->GetId() << "]"; + } + if (bb->IsCleanup()) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (bb->IsUnreachable()) { + LogInfo::MapleLogger() << "[unreachable] "; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << "succs: "; + for (auto *succBB : bb->GetSuccs()) { + LogInfo::MapleLogger() << succBB->GetId() << " "; + } + } + if (!bb->GetPreds().empty()) { + LogInfo::MapleLogger() << "preds: "; + for (auto *predBB : bb->GetPreds()) { + LogInfo::MapleLogger() << predBB->GetId() << " "; + } + } + if (!bb->GetEhSuccs().empty()) { + LogInfo::MapleLogger() << "eh_succs: "; + for (auto *ehSuccBB : bb->GetEhSuccs()) { + LogInfo::MapleLogger() << ehSuccBB->GetId() << " "; + } + } + if (!bb->GetEhPreds().empty()) { + LogInfo::MapleLogger() << "eh_preds: "; + for (auto *ehPredBB : bb->GetEhPreds()) { + LogInfo::MapleLogger() << ehPredBB->GetId() << " "; + } + } + LogInfo::MapleLogger() << "===\n"; + LogInfo::MapleLogger() << "frequency:" << bb->GetFrequency() << "\n"; +} + +void CGFunc::DumpCGIR() const { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n****** CGIR for " << funcSt->GetName() << " *******\n"; + FOR_ALL_BB_CONST(bb, this) { + if (bb->IsUnreachable()) { + continue; + } + DumpBBInfo(bb); + FOR_BB_INSNS_CONST(insn, bb) { + insn->Dump(); + } + } +} + +void CGFunc::DumpLoop() const { + for (const auto *lp : loops) { + lp->PrintLoops(*lp); + } +} + +void CGFunc::ClearLoopInfo() { + loops.clear(); + loops.shrink_to_fit(); + FOR_ALL_BB(bb, this) { + bb->ClearLoopPreds(); + bb->ClearLoopSuccs(); + } +} + +void CGFunc::DumpCFGToDot(const std::string &fileNamePrefix) { + std::ofstream file(fileNamePrefix + GetName()); + file << "digraph {" << std::endl; + for (auto *bb : GetAllBBs()) { + if (bb == nullptr) { + continue; + } + auto &succs = bb->GetSuccs(); + if (succs.empty()) { + continue; + } + file << " " << bb->GetId() << "->{"; + for (auto *succ : succs) { + file << succ->GetId() << " "; + } + file << "};"; + } + file << "}" << std::endl; +} + +void CGFunc::PatchLongBranch() { + for (BB *bb = firstBB->GetNext(); bb != nullptr; bb = bb->GetNext()) { + bb->SetInternalFlag1(bb->GetInternalFlag1() + bb->GetPrev()->GetInternalFlag1()); + } + BB *next = nullptr; + for (BB *bb = firstBB; bb != nullptr; bb = next) { + next = bb->GetNext(); + if (bb->GetKind() != BB::kBBIf && bb->GetKind() != BB::kBBGoto) { + continue; + } + Insn *insn = bb->GetLastMachineInsn(); + while (insn->IsImmaterialInsn()) { + insn = insn->GetPrev(); + } + BB *tbb = GetBBFromLab2BBMap(GetLabelInInsn(*insn)); + if ((tbb->GetInternalFlag1() - bb->GetInternalFlag1()) < MaxCondBranchDistance()) { + continue; + } + InsertJumpPad(insn); + } +} + +void CGFunc::UpdateAllRegisterVregMapping(MapleMap &newMap) { + vregsToPregsMap.clear(); + for (auto it : newMap) { + vregsToPregsMap[it.first] = it.second; + } +} + +/* there are two stack protector: + * 1. stack protector all: for all function + * 2. stack protector strong: for some functon that + * <1> invoke alloca functon; + * <2> use stack address (address of or array symbol); + * <3> callee use return stack slot; + * */ +void CGFunc::NeedStackProtect() { + ASSERT(GetNeedStackProtect() == false, "no stack protect default"); + CG *currCG = GetCG(); + if (currCG->IsStackProtectorAll()) { + SetNeedStackProtect(true); + return; + } + + if (!currCG->IsStackProtectorStrong()) { + return; + } + + if (HasAlloca()) { + SetNeedStackProtect(true); + return; + } + + /* check if function use stack address or callee function return stack slot */ + auto stackProInfo = GetStackProtectInfo(); + if ((stackProInfo & kAddrofStack) != 0 || (stackProInfo & kRetureStackSlot) != 0) { + SetNeedStackProtect(true); + return; + } +} + +bool CgHandleFunction::PhaseRun(maplebe::CGFunc &f) { + f.HandleFunction(); + if (!f.GetCG()->GetCGOptions().DoEmitCode() || f.GetCG()->GetCGOptions().DoDumpCFG()) { + f.DumpCFG(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgHandleFunction, handlefunction) + +bool CgFixCFLocOsft::PhaseRun(maplebe::CGFunc &f) { + if (f.GetCG()->GetCGOptions().WithDwarf()) { + f.DBGFixCallFrameLocationOffsets(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgFixCFLocOsft, dbgfixcallframeoffsets) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/control_dep_analysis.cpp b/src/mapleall/maple_be/src/cg/control_dep_analysis.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4952edf920e6b812f72fe0e9ba79fe811e9939df --- /dev/null +++ b/src/mapleall/maple_be/src/cg/control_dep_analysis.cpp @@ -0,0 +1,504 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "control_dep_analysis.h" +#include "mpl_logging.h" + +namespace maplebe { +void ControlDepAnalysis::Run() { + pdom->GeneratePdomTreeDot(); + BuildCFGInfo(); + ConstructFCDG(); + ComputeRegions(); +} + +/* Augment CFG info */ +void ControlDepAnalysis::BuildCFGInfo() { + CHECK_FATAL(cgFunc.GetCommonExitBB() != nullptr, "there must be a virtual ExitBB in cfg"); + cfgMST->BuildEdges(cgFunc.GetFirstBB(), cgFunc.GetCommonExitBB()); + // denote back-edge on CFGEdge + for (auto cfgEdge : cfgMST->GetAllEdges()) { + BB *srcBB = cfgEdge->GetSrcBB(); + BB *destBB = cfgEdge->GetDestBB(); + for (auto loop : cgFunc.GetLoops()) { + if (loop->IsBackEdge(*srcBB, *destBB)) { + cfgEdge->SetIsBackEdge(); + } + } + } + // denote the condition on CFGEdge except for back-edge + for (auto &cfgEdge : cfgMST->GetAllEdges()) { + if (cfgEdge->IsBackEdge()) { + continue; + } + BB *srcBB = cfgEdge->GetSrcBB(); + BB *destBB = cfgEdge->GetDestBB(); + CHECK_FATAL(srcBB != nullptr, "get srcBB of cfgEdge failed"); + if (srcBB == cgFunc.GetFirstBB()) { + CHECK_FATAL(srcBB->GetSuccsSize() == 1, "EntryBB should have only one succ"); + cfgEdge->SetCondition(0); + continue; + } else if (srcBB == cgFunc.GetCommonExitBB()) { + continue; + } + BB::BBKind srcKind = srcBB->GetKind(); + switch (srcKind) { + case BB::kBBFallthru: + case BB::kBBGoto: + case BB::kBBIgoto: + case BB::kBBReturn: + cfgEdge->SetCondition(0); + break; + case BB::kBBIntrinsic: + if (!srcBB->GetLastMachineInsn()->IsBranch()) { + // set default cond number + cfgEdge->SetCondition(0); + } + /* else fall through */ + [[clang::fallthrough]]; + case BB::kBBIf: { + Insn *branchInsn = srcBB->GetLastMachineInsn(); + CHECK_FATAL(branchInsn != nullptr, "ifBB must have a machine insn at the end"); + ASSERT(branchInsn->IsCondBranch(), "ifBB must have a conditional branch insn at the end"); + int lastOpndIdx = static_cast(branchInsn->GetOperandSize()) - 1; + ASSERT(lastOpndIdx > -1, "lastOpndIdx must greater than -1"); + Operand &lastOpnd = branchInsn->GetOperand(static_cast(lastOpndIdx)); + ASSERT(lastOpnd.IsLabelOpnd(), "lastOpnd must be labelOpnd in branchInsn"); + BB *jumpBB = cgFunc.GetBBFromLab2BBMap(static_cast(lastOpnd).GetLabelIndex()); + if (jumpBB == destBB) { + cfgEdge->SetCondition(0); + } else { + cfgEdge->SetCondition(1); + } + break; + } + case BB::kBBRangeGoto: { + // Each successor cfgEdge is assigned a different cond number + cfgEdge->SetCondition(static_cast(GetAndAccSuccedCondNum(srcBB->GetId()))); + break; + } + default: + // these kindBBs set default cond number [kBBNoReturn kBBThrow kBBLast] + cfgEdge->SetCondition(0); + break; + } + } +} + +/* Construct forward control dependence graph */ +void ControlDepAnalysis::ConstructFCDG() { + CreateAllCDGNodes(); + /* 1. Collect all edges(A, B) in CFG that B does not post-dom A */ + for (auto cfgEdge : cfgMST->GetAllEdges()) { + if (cfgEdge->IsBackEdge()) { + continue; + } + BB *srcBB = cfgEdge->GetSrcBB(); + BB *destBB = cfgEdge->GetDestBB(); + CHECK_FATAL(srcBB != nullptr && destBB != nullptr, "get edge-connected nodes in cfg failed"); + if (srcBB == cgFunc.GetCommonExitBB()) { + continue; + } + if (!pdom->PostDominate(*destBB, *srcBB)) { + AddNonPdomEdges(cfgEdge); + } + } + + /* 2. Determine control dependence by traversal backward in the post-dom tree for every bbEdge in nonPdomEdges */ + for (auto candiEdge : nonPdomEdges) { + BB *srcBB = candiEdge->GetSrcBB(); + BB *destBB = candiEdge->GetDestBB(); + CHECK_FATAL(srcBB != nullptr && destBB != nullptr, "get edge-connected nodes in nonPdomEdges failed"); + /* + * Find the nearest common ancestor (L) of srcBB and destBB in the pdom-tree : + * (1) L == parent of srcBB in the pdom-tree (immediate dominator of srcBB) + * (2) L == srcBB + */ + BB *ancestor = (pdom->GetPdom(destBB->GetId()) == srcBB) ? srcBB : pdom->GetPdom(srcBB->GetId()); + BB *curBB = destBB; + while (curBB != ancestor && curBB != cgFunc.GetCommonExitBB()) { + (void)BuildControlDependence(*srcBB, *curBB, candiEdge->GetCondition()); + curBB = pdom->GetPdom(curBB->GetId()); + } + } +} + +/* + * Divide regions for the CDGNodes : + * Traverse the post-dominator tree by means of a post-order to + * assure that all children in the post-dominator tree are visited before their parent. + */ +void ControlDepAnalysis::ComputeRegions() { + // The default bbId starts from 1 + std::vector visited(fcdg->GetFCDGNodeSize(), false); + for (uint32 bbId = 1; bbId < fcdg->GetFCDGNodeSize(); ++bbId) { + if (!visited[bbId]) { + ComputeRegionForCurNode(bbId, visited); + } + } + ComputeRegionForNonDepNodes(); +} + +/* Nodes that don't have any control dependency are divided into a region */ +void ControlDepAnalysis::ComputeRegionForNonDepNodes() { + CDGRegion *curRegion = nullptr; + CDGNode *mergeNode = nullptr; + for (auto node : fcdg->GetAllFCDGNodes()) { + if (node == nullptr) { + continue; + } + if (node->GetInEdgesNum() != 0) { + continue; + } + if (curRegion == nullptr) { + curRegion = node->GetRegion(); + CHECK_FATAL(curRegion != nullptr, "each CDGNode must be in a region"); + mergeNode = node; + } else if (node->GetRegion() != curRegion) { + // Merge Region + CHECK_FATAL(mergeNode != nullptr, "invalid non-dep cdgNode"); + MergeRegions(*mergeNode, *node); + } + } +} + +/* Recursively computes the region of each node */ +void ControlDepAnalysis::ComputeRegionForCurNode(uint32 curBBId, std::vector &visited) { + if (visited[curBBId]) { + return; + } + visited[curBBId] = true; + MapleVector children = pdom->GetPdomChildrenItem(curBBId); + if (!children.empty()) { + // Check that each child of the node has been computed + for (auto childId : children) { + if (!visited[childId]) { + ComputeRegionForCurNode(childId, visited); + } + } + } + /* Leaf nodes and the nodes whose children have been computed in the pdom-tree that can be merged region */ + CreateAndDivideRegion(curBBId); +} + +void ControlDepAnalysis::CreateAndDivideRegion(uint32 pBBId) { + /* 1. Visit every CDGNode:N, Get and Create the region of the control dependence set */ + CDGNode *parentNode = fcdg->GetCDGNodeFromId(CDGNodeId(pBBId)); + CHECK_FATAL(parentNode != nullptr, "get CDGNode failed"); + CDGRegion *region = FindExistRegion(*parentNode); + if (region == nullptr) { + region = CreateFCDGRegion(*parentNode); + } else { + region->AddCDGNode(parentNode); + parentNode->SetRegion(*region); + } + MapleVector ®ionNodes = region->GetRegionNodes(); + /* 2. Visit each immediate child of N in the post-dom tree, compute the intersection of CDs */ + BB *curBB = parentNode->GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb of CDGNode failed"); + for (auto childBBId : pdom->GetPdomChildrenItem(curBB->GetId())) { + CDGNode *childNode = fcdg->GetCDGNodeFromId(CDGNodeId(childBBId)); + if (std::find(regionNodes.begin(), regionNodes.end(), childNode) != regionNodes.end()) { + continue; + } + if (IsISEqualToCDs(*parentNode, *childNode)) { + MergeRegions(*parentNode, *childNode); + } + } +} + +/* Check whether the region corresponding to the control dependence set exists */ +CDGRegion *ControlDepAnalysis::FindExistRegion(CDGNode &node) { + MapleVector &allRegions = fcdg->GetAllRegions(); + MapleVector &curCDs = node.GetAllInEdges(); + // Nodes that don't have control dependencies are processed in a unified method at last + if (curCDs.empty()) { + return nullptr; + } + for (auto region : allRegions) { + if (region == nullptr) { + continue; + } + MapleVector ®ionCDs = region->GetCDEdges(); + if (regionCDs.size() != curCDs.size()) { + continue; + } + bool isAllCDExist = true; + for (auto curCD : curCDs) { + CHECK_FATAL(curCD != nullptr, "invalid control dependence edge"); + bool isOneCDExist = false; + for (auto regionCD : regionCDs) { + CHECK_FATAL(regionCD != nullptr, "invalid control dependence edge"); + if (IsSameControlDependence(*curCD, *regionCD)) { + isOneCDExist = true; + break; + } + } + if (!isOneCDExist) { + isAllCDExist = false; + break; + } + } + if (isAllCDExist) { + return region; + } + } + return nullptr; +} + +/* + * Check whether the intersection(IS) of the control dependency set of the parent node (CDs) + * and the child node is equal to the control dependency set of the parent node + */ +bool ControlDepAnalysis::IsISEqualToCDs(CDGNode &parent, CDGNode &child) { + MapleVector &parentCDs = parent.GetAllInEdges(); + MapleVector &childCDs = child.GetAllInEdges(); + // Nodes that don't have control dependencies are processed in a unified method at last + if (parentCDs.empty() || childCDs.empty()) { + return false; + } + bool equal = true; + for (auto parentCD : parentCDs) { + CHECK_FATAL(parentCD != nullptr, "invalid CDGEdge in parentCDs"); + for (auto childCD : childCDs) { + if (!IsSameControlDependence(*parentCD, *childCD)) { + equal = false; + continue; + } + } + if (!equal) { + return false; + } + } + return true; +} + +/* Merge regions of parentNode and childNode */ +void ControlDepAnalysis::MergeRegions(CDGNode &mergeNode, CDGNode &candiNode) { + CDGRegion *oldRegion = candiNode.GetRegion(); + CHECK_FATAL(oldRegion != nullptr, "get child's CDGRegion failed"); + + // Set newRegion of all memberNodes in oldRegion of child + CDGRegion *mergeRegion = mergeNode.GetRegion(); + CHECK_FATAL(mergeRegion != nullptr, "get parent's CDGRegion failed"); + for (auto node : oldRegion->GetRegionNodes()) { + node->SetRegion(*mergeRegion); + mergeRegion->AddCDGNode(node); + oldRegion->RemoveCDGNode(node); + } + + if (oldRegion->GetRegionNodeSize() == 0) { + fcdg->RemoveRegionById(oldRegion->GetRegionId()); + } +} + +CDGEdge *ControlDepAnalysis::BuildControlDependence(const BB &fromBB, const BB &toBB, int32 condition) { + auto *fromNode = fcdg->GetCDGNodeFromId(CDGNodeId(fromBB.GetId())); + auto *toNode = fcdg->GetCDGNodeFromId(CDGNodeId(toBB.GetId())); + CHECK_FATAL(fromNode != nullptr && toNode != nullptr, "get CDGNode failed"); + auto *cdgEdge = cdgMemPool.New(*fromNode, *toNode, condition); + + fromNode->AddOutEdges(cdgEdge); + toNode->AddInEdges(cdgEdge); + fcdg->AddFCDGEdge(cdgEdge); + return cdgEdge; +} + +/* Create CDGNode for every BB */ +void ControlDepAnalysis::CreateAllCDGNodes() { + fcdg = cdgMemPool.New(cgFunc, cdgAlloc); + FOR_ALL_BB(bb, &cgFunc) { + if (bb->IsUnreachable()) { + continue; + } + auto *node = cdgMemPool.New(CDGNodeId(bb->GetId()), *bb, cdgAlloc); + if (bb == cgFunc.GetFirstBB()) { + node->SetEntryNode(); + } + bb->SetCDGNode(node); + fcdg->AddFCDGNode(*node); + } + // Create CDGNode for exitBB + BB *exitBB = cgFunc.GetCommonExitBB(); + auto *exitNode = cdgMemPool.New(CDGNodeId(exitBB->GetId()), *exitBB, cdgAlloc); + exitNode->SetExitNode(); + exitBB->SetCDGNode(exitNode); + fcdg->AddFCDGNode(*exitNode); +} + +CDGRegion *ControlDepAnalysis::CreateFCDGRegion(CDGNode &curNode) { + MapleVector cdEdges = curNode.GetAllInEdges(); + auto *region = cdgMemPool.New(CDGRegionId(lastRegionId++), cdgAlloc); + region->AddCDEdgeSet(cdEdges); + region->AddCDGNode(&curNode); + fcdg->AddRegion(*region); + curNode.SetRegion(*region); + return region; +} + +void ControlDepAnalysis::GenerateFCDGDot() const { + CHECK_FATAL(fcdg != nullptr, "construct FCDG failed"); + MapleVector &allNodes = fcdg->GetAllFCDGNodes(); + MapleVector &allEdges = fcdg->GetAllFCDGEdges(); + MapleVector &allRegions = fcdg->GetAllRegions(); + + std::streambuf *coutBuf = std::cout.rdbuf(); + std::ofstream fcdgFile; + std::streambuf *fileBuf = fcdgFile.rdbuf(); + (void)std::cout.rdbuf(fileBuf); + + /* Define the output file name */ + std::string fileName; + (void)fileName.append("fcdg_"); + (void)fileName.append(cgFunc.GetName()); + (void)fileName.append(".dot"); + + fcdgFile.open(fileName.c_str(), std::ios::trunc); + if (!fcdgFile.is_open()) { + LogInfo::MapleLogger(kLlWarn) << "fileName:" << fileName << " open failed.\n"; + return; + } + fcdgFile << "digraph FCDG_" << cgFunc.GetName() << " {\n\n"; + fcdgFile << " node [shape=box,style=filled,color=lightgrey];\n\n"; + + /* Dump nodes style */ + for (auto node : allNodes) { + if (node == nullptr) { + continue; + } + BB *bb = node->GetBB(); + CHECK_FATAL(bb != nullptr, "get bb of CDGNode failed"); + fcdgFile << " BB_" << bb->GetId(); + fcdgFile << "[label= \""; + if (node->IsEntryNode()) { + fcdgFile << "ENTRY\n"; + } else if (node->IsExitNode()) { + fcdgFile << "EXIT\n"; + } + fcdgFile << "BB_" << bb->GetId() << " Label_" << bb->GetLabIdx() << ":\n"; + fcdgFile << " { " << bb->GetKindName() << " }\"];\n"; + } + fcdgFile << "\n"; + + /* Dump edges style */ + for (auto edge : allEdges) { + CDGNode &fromNode = edge->GetFromNode(); + CDGNode &toNode = edge->GetToNode(); + fcdgFile << " BB_" << fromNode.GetBB()->GetId() << " -> " << "BB_" << toNode.GetBB()->GetId(); + fcdgFile << " [label = \""; + fcdgFile << edge->GetCondition() << "\"];\n"; + } + fcdgFile << "\n"; + + /* Dump region style using cluster in dot language */ + for (auto region : allRegions) { + if (region == nullptr) { + continue; + } + CHECK_FATAL(region->GetRegionNodeSize() != 0, "invalid region"); + fcdgFile << " subgraph cluster_" << region->GetRegionId() << " {\n"; + fcdgFile << " color=red;\n"; + fcdgFile << " label = \"region #" << region->GetRegionId() << "\";\n"; + MapleVector &memberNodes = region->GetRegionNodes(); + for (auto node : memberNodes) { + fcdgFile << " BB_" << node->GetBB()->GetId() << ";\n"; + } + fcdgFile << "}\n\n"; + } + + fcdgFile << "}\n"; + (void)fcdgFile.flush(); + fcdgFile.close(); + (void)std::cout.rdbuf(coutBuf); +} + +void ControlDepAnalysis::GenerateCFGDot() const { + std::streambuf *coutBuf = std::cout.rdbuf(); + std::ofstream cfgFile; + std::streambuf *fileBuf = cfgFile.rdbuf(); + (void)std::cout.rdbuf(fileBuf); + + /* Define the output file name */ + std::string fileName; + (void)fileName.append("cfg_before_cdg_"); + (void)fileName.append(cgFunc.GetName()); + (void)fileName.append(".dot"); + + cfgFile.open(fileName.c_str(), std::ios::trunc); + if (!cfgFile.is_open()) { + LogInfo::MapleLogger(kLlWarn) << "fileName:" << fileName << " open failed.\n"; + return; + } + + cfgFile << "digraph CFG_" << cgFunc.GetName() << " {\n\n"; + cfgFile << " node [shape=box];\n\n"; + + /* Dump nodes style */ + FOR_ALL_BB_CONST(bb, &cgFunc) { + if (bb->IsUnreachable()) { + continue; + } + cfgFile << " BB_" << bb->GetId(); + cfgFile << "[label= \""; + if (bb == cgFunc.GetFirstBB()) { + cfgFile << "ENTRY\n"; + } + cfgFile << "BB_" << bb->GetId() << " Label_" << bb->GetLabIdx() << ":\n"; + cfgFile << " { " << bb->GetKindName() << " }\"];\n"; + } + BB *exitBB = cgFunc.GetCommonExitBB(); + cfgFile << " BB_" << exitBB->GetId(); + cfgFile << "[label= \"EXIT\n"; + cfgFile << "BB_" << exitBB->GetId() << "\"];\n"; + cfgFile << "\n"; + + /* Dump edges style */ + for (auto cfgEdge : cfgMST->GetAllEdges()) { + BB *srcBB = cfgEdge->GetSrcBB(); + BB *destBB = cfgEdge->GetDestBB(); + CHECK_FATAL(srcBB != nullptr && destBB != nullptr, "get wrong cfg-edge"); + if (srcBB == cgFunc.GetCommonExitBB()) { + continue; + } + cfgFile << " BB_" << srcBB->GetId() << " -> " << "BB_" << destBB->GetId(); + cfgFile << " [label = \""; + cfgFile << cfgEdge->GetCondition() << "\"];\n"; + } + cfgFile << "\n"; + + cfgFile << "}\n"; + (void)cfgFile.flush(); + cfgFile.close(); + (void)std::cout.rdbuf(coutBuf); +} + +void CgControlDepAnalysis::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); +} + +bool CgControlDepAnalysis::PhaseRun(maplebe::CGFunc &f) { + MemPool *cdgMemPool = GetPhaseMemPool(); + MemPool *tmpMemPool = ApplyTempMemPool(); + CHECK_FATAL(cdgMemPool != nullptr && tmpMemPool != nullptr, "get memPool failed"); + PostDomAnalysis *pdomInfo = GET_ANALYSIS(CgPostDomAnalysis, f); + CHECK_FATAL(pdomInfo != nullptr, "get result of PostDomAnalysis failed"); + auto *cfgmst = cdgMemPool->New, maplebe::BB>>(*cdgMemPool); + cda = cdgMemPool->New(f, *cdgMemPool, *tmpMemPool, + *pdomInfo, *cfgmst); + cda->Run(); + return true; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgControlDepAnalysis, cgcontroldepanalysis) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/data_dep_analysis.cpp b/src/mapleall/maple_be/src/cg/data_dep_analysis.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c055683ece14296f6fd4dca4734ca544bbe47ea7 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/data_dep_analysis.cpp @@ -0,0 +1,271 @@ +/* +* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "data_dep_analysis.h" +#include "control_dep_analysis.h" +#include "aarch64_cg.h" + +namespace maplebe { +void IntraDataDepAnalysis::Run(BB &bb, MapleVector &dataNodes) { + if (bb.IsUnreachable()) { + return; + } + MemPool *localMp = memPoolCtrler.NewMemPool("dda for bb mempool", true); + auto *localAlloc = new MapleAllocator(localMp); + InitCurNodeInfo(*localMp, *localAlloc, bb, dataNodes); + uint32 nodeSum = 1; + MapleVector comments(intraAlloc.Adapter()); + const Insn *locInsn = bb.GetFirstLoc(); + FOR_BB_INSNS(insn, &bb) { + if (!insn->IsMachineInstruction()) { + ddb.ProcessNonMachineInsn(*insn, comments, dataNodes, locInsn); + continue; + } + /* Add a pseudo node to separate dependence graph when appropriate */ + ddb.SeparateDependenceGraph(dataNodes, nodeSum); + /* Generate a DepNode */ + DepNode *ddgNode = ddb.GenerateDepNode(*insn, dataNodes, nodeSum, comments); + /* Build Dependency for may-throw insn */ + ddb.BuildMayThrowInsnDependency(*insn); + /* Build Dependency for each operand of insn */ + ddb.BuildOpndDependency(*insn); + /* Build Dependency for special insn */ + ddb.BuildSpecialInsnDependency(*insn, dataNodes); + /* Build Dependency for ambi insn if needed */ + ddb.BuildAmbiInsnDependency(*insn); + /* Update stack and heap dependency */ + ddb.UpdateStackAndHeapDependency(*ddgNode, *insn, *locInsn); + if (insn->IsFrameDef()) { + ddb.SetLastFrameDefInsn(insn); + } + /* Separator exists */ + uint32 separatorIndex = ddb.GetSeparatorIndex(); + ddb.AddDependence(*dataNodes[separatorIndex], *insn->GetDepNode(), kDependenceTypeSeparator); + /* Update register use and register def */ + ddb.UpdateRegUseAndDef(*insn, *ddgNode, dataNodes); + } + AddEndSeparatorNode(bb, dataNodes); + ddb.CopyAndClearComments(comments); + ClearCurNodeInfo(localMp, localAlloc); +} + +/* Init dataDepBase data struct */ +void IntraDataDepAnalysis::InitCurNodeInfo(MemPool &tmpMp, MapleAllocator &tmpAlloc, BB &bb, + MapleVector &dataNodes) { + CDGNode *curCDGNode = bb.GetCDGNode(); + CHECK_FATAL(curCDGNode != nullptr, "invalid cdgNode from bb"); + ddb.SetCDGNode(curCDGNode); + // Need to move to target subclass + uint32 maxRegNum = (cgFunc.IsAfterRegAlloc() ? AArch64reg::kAllRegNum : cgFunc.GetMaxVReg()); + curCDGNode->InitDataDepInfo(tmpMp, tmpAlloc, maxRegNum); + /* Analysis live-in registers in catch BB */ + ddb.AnalysisAmbiInsns(bb); + /* Clear all dependence nodes and push the first separator node */ + dataNodes.clear(); + DepNode *pseudoSepNode = ddb.BuildSeparatorNode(); + (void)dataNodes.emplace_back(pseudoSepNode); + curCDGNode->AddPseudoSepNodes(pseudoSepNode); + ddb.SetSeparatorIndex(0); + + if (!cgFunc.IsAfterRegAlloc()) { + /* assume first pseudo_dependence_separator insn of current bb define live-in's registers */ + Insn *pseudoSepInsn = pseudoSepNode->GetInsn(); + for (auto ®NO : bb.GetLiveInRegNO()) { + curCDGNode->SetLatestDefInsn(regNO, pseudoSepInsn); + pseudoSepNode->AddDefReg(regNO); + pseudoSepNode->SetRegDefs(pseudoSepNode->GetDefRegnos().size(), nullptr); + } + } +} + +/* Clear local mempool and data-dep-info for cur cdgNode */ +void IntraDataDepAnalysis::ClearCurNodeInfo(MemPool *tmpMp, MapleAllocator *tmpAlloc) { + delete tmpAlloc; + memPoolCtrler.DeleteMemPool(tmpMp); + CDGNode *curCDGNode = ddb.GetCDGNode(); + curCDGNode->ClearDataDepInfo(); +} + +/* Add a separatorNode to the end of a nodes + * before RA: add all live-out registers to Uses of this separatorNode + */ +void IntraDataDepAnalysis::AddEndSeparatorNode(BB &bb, MapleVector &nodes) { + CDGNode *curCDGNode = bb.GetCDGNode(); + CHECK_FATAL(curCDGNode != nullptr, "invalid cdgNode from bb"); + DepNode *separatorNode = ddb.BuildSeparatorNode(); + (void)nodes.emplace_back(separatorNode); + curCDGNode->AddPseudoSepNodes(separatorNode); + ddb.BuildDepsSeparator(*separatorNode, nodes); + + bool beforeRA = !cgFunc.IsAfterRegAlloc(); + if (beforeRA) { + /* for all live-out register of current bb */ + for (auto ®NO : bb.GetLiveOutRegNO()) { + if (curCDGNode->GetLatestDefInsn(regNO) != nullptr) { + curCDGNode->AppendUseInsnChain(regNO, separatorNode->GetInsn(), intraMp, beforeRA); + separatorNode->AddUseReg(regNO); + CHECK_FATAL(curCDGNode->GetUseInsnChain(regNO) != nullptr, "get useInsnChain failed"); + separatorNode->SetRegUses(*curCDGNode->GetUseInsnChain(regNO)); + } + } + } +} + +void InterDataDepAnalysis::Run(CDGRegion ®ion, MapleVector &dataNodes) { + uint32 nodeSum = 1; + MapleVector comments(interAlloc.Adapter()); + // Visit CDGNodes in the region follow the topological order of CFG + ComputeTopologicalOrderInRegion(region); + // Init data dependence info for the entire region + GlobalInit(dataNodes); + ddb.SeparateDependenceGraph(dataNodes, nodeSum); + for (std::size_t idx = 0; idx < readyNodes.size(); ++idx) { + CDGNode *cdgNode = readyNodes[idx]; + BB *curBB = cdgNode->GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb from CDGNode failed"); + // Init data dependence info for cur cdgNode + LocalInit(*curBB, *cdgNode, dataNodes, idx); + const Insn *locInsn = curBB->GetFirstLoc(); + FOR_BB_INSNS(insn, curBB) { + if (!insn->IsMachineInstruction()) { + ddb.ProcessNonMachineInsn(*insn, comments, dataNodes, locInsn); + continue; + } + /* Add a pseudo node to separate dependence graph when appropriate */ + ddb.SeparateDependenceGraph(dataNodes, nodeSum); + /* Generate a DepNode */ + DepNode *ddgNode = ddb.GenerateDepNode(*insn, dataNodes, nodeSum, comments); + /* Build Dependency for may-throw insn */ + ddb.BuildMayThrowInsnDependency(*insn); + /* Build Dependency for each operand of insn */ + ddb.BuildOpndDependency(*insn); + /* Build Dependency for special insn */ + ddb.BuildSpecialInsnDependency(*insn, dataNodes); + /* Build Dependency for ambi insn if needed */ + ddb.BuildAmbiInsnDependency(*insn); + /* Update stack and heap dependency */ + ddb.UpdateStackAndHeapDependency(*ddgNode, *insn, *locInsn); + if (insn->IsFrameDef()) { + ddb.SetLastFrameDefInsn(insn); + } + /* Separator exists */ + uint32 separatorIndex = ddb.GetSeparatorIndex(); + ddb.AddDependence(*dataNodes[separatorIndex], *insn->GetDepNode(), kDependenceTypeSeparator); + /* Update register use and register def */ + ddb.UpdateRegUseAndDef(*insn, *ddgNode, dataNodes); + } + ddb.CopyAndClearComments(comments); + } +} + +void InterDataDepAnalysis::GlobalInit(MapleVector &dataNodes) { + dataNodes.clear(); + // Need Check: where to record the pseudoSepNode? cdgNode? pseudoNode is of BB or of Region? + DepNode *pseudoSepNode = ddb.BuildSeparatorNode(); + (void)dataNodes.emplace_back(pseudoSepNode); + ddb.SetSeparatorIndex(0); +} + +void InterDataDepAnalysis::LocalInit(BB &bb, CDGNode &cdgNode, MapleVector &dataNodes, std::size_t idx) { + ddb.SetCDGNode(&cdgNode); + cdgNode.ClearDataDepInfo(); + /* Analysis live-in registers in catch BB */ + ddb.AnalysisAmbiInsns(bb); + + if (!cgFunc.IsAfterRegAlloc() && idx == 0) { + /* assume first pseudo_dependence_separator insn of current region define live-in's registers for first bb */ + DepNode *pseudoSepNode = dataNodes[0]; + Insn *pseudoSepInsn = pseudoSepNode->GetInsn(); + for (auto ®NO : bb.GetLiveInRegNO()) { + cdgNode.SetLatestDefInsn(regNO, pseudoSepInsn); + pseudoSepNode->AddDefReg(regNO); + pseudoSepNode->SetRegDefs(pseudoSepNode->GetDefRegnos().size(), nullptr); + } + } +} + +void InterDataDepAnalysis::ComputeTopologicalOrderInRegion(CDGRegion ®ion) { + MapleVector controlNodes = region.GetRegionNodes(); + InitRestNodes(controlNodes); + for (auto cdgNode : restNodes) { + // Check whether CFG preds of the CDGNode are in the cur region + BB *bb = cdgNode->GetBB(); + CHECK_FATAL(bb != nullptr, "get bb from CDGNode failed"); + bool hasNonPredsInRegion = true; + for (auto predIt = bb->GetPredsBegin(); predIt != bb->GetPredsEnd(); ++predIt) { + CDGNode *predNode = (*predIt)->GetCDGNode(); + CHECK_FATAL(predNode != nullptr, "get CDGNode from bb failed"); + if (predNode->GetRegion() == ®ion) { + hasNonPredsInRegion = false; + break; + } + } + if (hasNonPredsInRegion) { + AddReadyNode(cdgNode); + } + } +} + +void InterDataDepAnalysis::GenerateInterDDGDot(MapleVector &dataNodes) { + std::streambuf *coutBuf = std::cout.rdbuf(); + std::ofstream iddgFile; + std::streambuf *fileBuf = iddgFile.rdbuf(); + (void)std::cout.rdbuf(fileBuf); + + /* Define the output file name */ + std::string fileName; + (void)fileName.append("interDDG_"); + (void)fileName.append(cgFunc.GetName()); + (void)fileName.append(".dot"); + + iddgFile.open(fileName.c_str(), std::ios::trunc); + if (!iddgFile.is_open()) { + LogInfo::MapleLogger(kLlWarn) << "fileName:" << fileName << " open failed.\n"; + return; + } + iddgFile << "digraph InterDDG_" << cgFunc.GetName() << " {\n\n"; + iddgFile << " node [shape=box];\n\n"; + + /* Dump nodes style */ + for (auto node : dataNodes) { + MOperator mOp = node->GetInsn()->GetMachineOpcode(); + // Need move to target + const InsnDesc *md = &AArch64CG::kMd[mOp]; + iddgFile << " insn_" << node->GetInsn() << "["; + iddgFile << "label = \"" << node->GetInsn()->GetId() << ":\n"; + iddgFile << "{ " << md->name << "}\"];\n"; + } + iddgFile << "\n"; + + /* Dump edges style */ + for (auto node : dataNodes) { + for (auto succ : node->GetSuccs()) { + iddgFile << " insn" << node->GetInsn() << " -> " << "insn" << succ->GetTo().GetInsn(); + iddgFile <<" ["; + if (succ->GetDepType() == kDependenceTypeTrue) { + iddgFile << "color=red,"; + } + iddgFile << "label= \"" << succ->GetLatency() << "\""; + iddgFile << "];\n"; + } + } + iddgFile << "\n"; + + iddgFile << "}\n"; + (void)iddgFile.flush(); + iddgFile.close(); + (void)std::cout.rdbuf(coutBuf); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/data_dep_base.cpp b/src/mapleall/maple_be/src/cg/data_dep_base.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8ed355d25d5f880e43cb35784ad6c149be3bda30 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/data_dep_base.cpp @@ -0,0 +1,593 @@ +/* +* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "data_dep_base.h" + +namespace maplebe { +void DataDepBase::ProcessNonMachineInsn(Insn &insn, MapleVector &comments, MapleVector &dataNodes, + const Insn *&locInsn) { + CHECK_FATAL(!insn.IsMachineInstruction(), "need non-machine-instruction"); + if (insn.IsImmaterialInsn()) { + if (!insn.IsComment()) { + locInsn = &insn; + } else { + (void)comments.emplace_back(&insn); + } + } else if (insn.IsCfiInsn()) { + if (!dataNodes.empty()) { + dataNodes.back()->AddCfiInsn(insn); + } + } +} + +/* + * If the instruction's number of current basic block more than kMaxDependenceNum, + * then insert some pseudo separator node to split basic block. + */ +void DataDepBase::SeparateDependenceGraph(MapleVector &nodes, uint32 &nodeSum) { + if ((nodeSum > 0) && ((nodeSum % kMaxDependenceNum) == 0)) { + ASSERT(nodeSum == nodes.size(), "CG internal error, nodeSum should equal to nodes.size."); + /* Add a pseudo node to separate dependence graph */ + DepNode *separatorNode = BuildSeparatorNode(); + separatorNode->SetIndex(nodeSum); + (void)nodes.emplace_back(separatorNode); + curCDGNode->AddPseudoSepNodes(separatorNode); + BuildDepsSeparator(*separatorNode, nodes); + + if (beforeRA) { + /* for all live-out register of current bb */ + BB *curBB = curCDGNode->GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); + for (auto ®NO : curBB->GetLiveOutRegNO()) { + if (curCDGNode->GetLatestDefInsn(regNO) != nullptr) { + curCDGNode->AppendUseInsnChain(regNO, separatorNode->GetInsn(), memPool, beforeRA); + separatorNode->AddUseReg(regNO); + CHECK_FATAL(curCDGNode->GetUseInsnChain(regNO)->insn != nullptr, "get useInsn failed"); + separatorNode->SetRegUses(*curCDGNode->GetUseInsnChain(regNO)); + } + } + } + curCDGNode->ClearDepDataVec(); + separatorIndex = nodeSum++; + } +} + +/* + * Generate a data depNode. + * insn : create depNode for the instruction. + * nodes : a vector to store depNode. + * nodeSum : the new depNode's index. + * comments : those comment insn between last no-comment's insn and insn. + */ +DepNode *DataDepBase::GenerateDepNode(Insn &insn, MapleVector &nodes, + uint32 &nodeSum, MapleVector &comments) { + Reservation *rev = mad.FindReservation(insn); + ASSERT(rev != nullptr, "get reservation of insn failed"); + auto *depNode = memPool.New(insn, alloc, rev->GetUnit(), rev->GetUnitNum(), *rev); + if (beforeRA) { + auto *regPressure = memPool.New(alloc); + depNode->SetRegPressure(*regPressure); + depNode->InitPressure(); + } + depNode->SetIndex(nodeSum++); + (void)nodes.emplace_back(depNode); + insn.SetDepNode(*depNode); + + constexpr size_t vectorSize = 5; + depNode->ReservePreds(vectorSize); + depNode->ReserveSuccs(vectorSize); + + if (!comments.empty()) { + depNode->SetComments(comments); + comments.clear(); + } + return depNode; +} + +void DataDepBase::BuildDepsLastCallInsn(Insn &insn) { + Insn *lastCallInsn = curCDGNode->GetLastCallInsn(); + if (lastCallInsn != nullptr) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeControl); + } + curCDGNode->SetLastCallInsn(&insn); +} + +void DataDepBase::BuildMayThrowInsnDependency(Insn &insn) { + /* Build dependency for may throw insn */ + if (insn.MayThrow()) { + BuildDepsMayThrowInsn(insn); + Insn *lastFrameDef = curCDGNode->GetLastFrameDefInsn(); + if (lastFrameDef != nullptr) { + AddDependence(*lastFrameDef->GetDepNode(), *insn.GetDepNode(), kDependenceTypeThrow); + } else if (!IsIntraBlockAnalysis()) { + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeThrow, kLastFrameDef); + } + } +} + +void DataDepBase::BuildAmbiInsnDependency(Insn &insn) { + const auto &defRegnos = insn.GetDepNode()->GetDefRegnos(); + for (const auto ®NO : defRegnos) { + if (IfInAmbiRegs(regNO)) { + BuildDepsAmbiInsn(insn); + break; + } + } +} + +/* + * Build data dependence between control register and last call instruction. + * insn : instruction that with control register operand. + * isDest : if the control register operand is a destination operand. + */ +void DataDepBase::BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) { + Insn *lastCallInsn = curCDGNode->GetLastCallInsn(); + if (lastCallInsn == nullptr) { + return; + } + if (isDest) { + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeOutput); + return; + } + AddDependence(*lastCallInsn->GetDepNode(), *insn.GetDepNode(), kDependenceTypeAnti); +} + +/* Build control data dependence for branch/ret instructions */ +void DataDepBase::BuildDepsControlAll(Insn &insn, const MapleVector &nodes) { + DepNode *depNode = insn.GetDepNode(); + for (uint32 i = separatorIndex; i < depNode->GetIndex(); ++i) { + AddDependence(*nodes[i], *depNode, kDependenceTypeControl); + } +} + +/* A pseudo separator node depends all the other nodes */ +void DataDepBase::BuildDepsSeparator(DepNode &newSepNode, MapleVector &nodes) { + uint32 nextSepIndex = (separatorIndex + kMaxDependenceNum) < nodes.size() ? (separatorIndex + kMaxDependenceNum) + : static_cast(nodes.size() - 1); + newSepNode.ReservePreds(nextSepIndex - separatorIndex); + newSepNode.ReserveSuccs(nextSepIndex - separatorIndex); + for (uint32 i = separatorIndex; i < nextSepIndex; ++i) { + AddDependence(*nodes[i], newSepNode, kDependenceTypeSeparator); + } +} + +/* Build data dependence of may throw instructions */ +void DataDepBase::BuildDepsMayThrowInsn(Insn &insn) { + if (IsIntraBlockAnalysis()) { + MapleVector ambiInsns = curCDGNode->GetAmbiguousInsns(); + AddDependence4InsnInVectorByType(ambiInsns, insn, kDependenceTypeThrow); + } else { + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeThrow, kAmbiguous); + } +} + +/* + * Build data dependence of ambiguous instruction. + * ambiguous instruction: instructions that can not across may throw instructions + */ +void DataDepBase::BuildDepsAmbiInsn(Insn &insn) { + if (IsIntraBlockAnalysis()) { + MapleVector mayThrows = curCDGNode->GetMayThrowInsns(); + AddDependence4InsnInVectorByType(mayThrows, insn, kDependenceTypeThrow); + } else { + BuildInterBlockSpecialDataInfoDependency(*insn.GetDepNode(), false, kDependenceTypeThrow, kMayThrows); + } + curCDGNode->AddAmbiguousInsn(&insn); +} + +/* Build data dependence of destination register operand */ +void DataDepBase::BuildDepsDefReg(Insn &insn, regno_t regNO) { + DepNode *node = insn.GetDepNode(); + node->AddDefReg(regNO); + /* + * 1. For building intra-block data dependence, only require the data flow info of the curBB(cur CDGNode) + * 2. For building inter-block data dependence, require the data flow info of all BBs on the pred path in CFG + */ + /* Build anti dependence */ + // Build intra block data dependence + RegList *regList = curCDGNode->GetUseInsnChain(regNO); + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + AddDependence(*regList->insn->GetDepNode(), *node, kDependenceTypeAnti); + regList = regList->next; + } + // Build inter block data dependence + if (!IsIntraBlockAnalysis()) { + BuildInterBlockDefUseDependency(*node, regNO, kDependenceTypeAnti, false); + } + + /* Build output dependence */ + // Build intra block data dependence + Insn *defInsn = curCDGNode->GetLatestDefInsn(regNO); + if (defInsn != nullptr) { + AddDependence(*defInsn->GetDepNode(), *node, kDependenceTypeOutput); + } else if (!IsIntraBlockAnalysis()) { + // Build inter block data dependence + BuildInterBlockDefUseDependency(*node, regNO, kDependenceTypeOutput, true); + } +} + +/* Build data dependence of source register operand */ +void DataDepBase::BuildDepsUseReg(Insn &insn, regno_t regNO) { + DepNode *node = insn.GetDepNode(); + node->AddUseReg(regNO); + + // Build intra block data dependence + Insn *defInsn = curCDGNode->GetLatestDefInsn(regNO); + if (defInsn != nullptr) { + AddDependence(*defInsn->GetDepNode(), *node, kDependenceTypeTrue); + } else if (!IsIntraBlockAnalysis()) { + // Build inter block data dependence + BuildInterBlockDefUseDependency(*node, regNO, kDependenceTypeTrue, true); + } +} + +/* Update stack and heap dependency */ +void DataDepBase::UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn) { + if (!insn.MayThrow()) { + return; + } + depNode.SetLocInsn(locInsn); + curCDGNode->AddMayThrowInsn(&insn); + if (IsIntraBlockAnalysis()) { + AddDependence4InsnInVectorByType(curCDGNode->GetStackDefInsns(), insn, kDependenceTypeThrow); + AddDependence4InsnInVectorByType(curCDGNode->GetHeapDefInsns(), insn, kDependenceTypeThrow); + } else { + BuildInterBlockSpecialDataInfoDependency(depNode, false, kDependenceTypeThrow, kStackDefs); + BuildInterBlockSpecialDataInfoDependency(depNode, false, kDependenceTypeThrow, kHeapDefs); + } +} + +void DataDepBase::BuildSeparatorNodeDependency(MapleVector &dataNodes, Insn &insn) { + AddDependence(*dataNodes[separatorIndex], *insn.GetDepNode(), kDependenceTypeSeparator); +} + +/* For inter data dependence analysis */ +void DataDepBase::BuildInterBlockDefUseDependency(DepNode &curDepNode, regno_t regNO, DepType depType, + bool isDef) { + CHECK_FATAL(!IsIntraBlockAnalysis(), "must be inter block data dependence analysis"); + BB *curBB = curCDGNode->GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); + CDGRegion *curRegion = curCDGNode->GetRegion(); + CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); + std::vector visited(curRegion->GetMaxBBIdInRegion(), false); + if (isDef) { + BuildPredPathDefDependencyDFS(*curBB, visited, curDepNode, regNO, depType); + } else { + BuildPredPathUseDependencyDFS(*curBB, visited, curDepNode, regNO, depType); + } +} + +void DataDepBase::BuildPredPathDefDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, + regno_t regNO, DepType depType) { + if (visited[curBB.GetId()]) { + return; + } + CDGNode *cdgNode = curBB.GetCDGNode(); + CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); + CDGRegion *curRegion = cdgNode->GetRegion(); + CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); + if (curRegion->GetRegionId() != curCDGNode->GetRegion()->GetRegionId()) { + return; + } + Insn *curDefInsn = cdgNode->GetLatestDefInsn(regNO); + if (curDefInsn != nullptr) { + visited[curBB.GetId()] = true; + AddDependence(*curDefInsn->GetDepNode(), depNode, depType); + return; + } + for (auto predIt = curBB.GetPredsBegin(); predIt != curBB.GetPredsEnd(); ++predIt) { + BuildPredPathDefDependencyDFS(**predIt, visited, depNode, regNO, depType); + } +} + +void DataDepBase::BuildPredPathUseDependencyDFS(BB &curBB, std::vector &visited, DepNode &depNode, + regno_t regNO, DepType depType) { + if (visited[curBB.GetId()]) { + return; + } + CDGNode *cdgNode = curBB.GetCDGNode(); + CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); + CDGRegion *curRegion = cdgNode->GetRegion(); + CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); + if (curRegion->GetRegionId() != curCDGNode->GetRegion()->GetRegionId()) { + return; + } + visited[curBB.GetId()] = true; + RegList *useChain = cdgNode->GetUseInsnChain(regNO); + while (useChain != nullptr) { + Insn *useInsn = useChain->insn; + CHECK_FATAL(useInsn != nullptr, "get useInsn failed"); + AddDependence(*useInsn->GetDepNode(), depNode, depType); + } + for (auto predIt = curBB.GetPredsBegin(); predIt != curBB.GetPredsEnd(); ++predIt) { + BuildPredPathDefDependencyDFS(**predIt, visited, depNode, regNO, depType); + } +} + +void DataDepBase::BuildInterBlockSpecialDataInfoDependency(DepNode &curDepNode, bool needCmp, DepType depType, + DataDepBase::DataFlowInfoType infoType) { + CHECK_FATAL(!IsIntraBlockAnalysis(), "must be inter block data dependence analysis"); + BB *curBB = curCDGNode->GetBB(); + CHECK_FATAL(curBB != nullptr, "get bb from cdgNode failed"); + CDGRegion *curRegion = curCDGNode->GetRegion(); + CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); + std::vector visited(curRegion->GetMaxBBIdInRegion(), false); + BuildPredPathSpecialDataInfoDependencyDFS(*curBB, visited, needCmp, curDepNode, depType, infoType); +} + +void DataDepBase::BuildPredPathSpecialDataInfoDependencyDFS(BB &curBB, std::vector &visited, bool needCmp, + DepNode &depNode, DepType depType, + DataDepBase::DataFlowInfoType infoType) { + if (visited[curBB.GetId()]) { + return; + } + CDGNode *cdgNode = curBB.GetCDGNode(); + CHECK_FATAL(cdgNode != nullptr, "get cdgNode from bb failed"); + CDGRegion *curRegion = cdgNode->GetRegion(); + CHECK_FATAL(curRegion != nullptr, "get region from cdgNode failed"); + if (curRegion->GetRegionId() != curCDGNode->GetRegion()->GetRegionId()) { + return; + } + + switch (infoType) { + case kMembar: { + Insn *membarInsn = cdgNode->GetMembarInsn(); + if (membarInsn != nullptr) { + visited[curBB.GetId()] = true; + AddDependence(*membarInsn->GetDepNode(), depNode, depType); + } + break; + } + case kLastCall: { + Insn *lastCallInsn = cdgNode->GetLastCallInsn(); + if (lastCallInsn != nullptr) { + visited[curBB.GetId()] = true; + AddDependence(*lastCallInsn->GetDepNode(), depNode, depType); + } + break; + } + case kLastFrameDef: { + Insn *lastFrameDef = cdgNode->GetLastFrameDefInsn(); + if (lastFrameDef != nullptr) { + visited[curBB.GetId()] = true; + AddDependence(*lastFrameDef->GetDepNode(), depNode, depType); + } + break; + } + case kStackUses: { + visited[curBB.GetId()] = true; + MapleVector stackUses = cdgNode->GetStackUseInsns(); + if (needCmp) { + AddDependence4InsnInVectorByTypeAndCmp(stackUses, *depNode.GetInsn(), depType); + } else { + AddDependence4InsnInVectorByType(stackUses, *depNode.GetInsn(), depType); + } + break; + } + case kStackDefs: { + visited[curBB.GetId()] = true; + MapleVector stackDefs = cdgNode->GetStackDefInsns(); + if (needCmp) { + AddDependence4InsnInVectorByTypeAndCmp(stackDefs, *depNode.GetInsn(), depType); + } else { + AddDependence4InsnInVectorByType(stackDefs, *depNode.GetInsn(), depType); + } + break; + } + case kHeapUses: { + visited[curBB.GetId()] = true; + MapleVector heapUses = cdgNode->GetHeapUseInsns(); + if (needCmp) { + AddDependence4InsnInVectorByTypeAndCmp(heapUses, *depNode.GetInsn(), depType); + } else { + AddDependence4InsnInVectorByType(heapUses, *depNode.GetInsn(), depType); + } + break; + } + case kHeapDefs: { + visited[curBB.GetId()] = true; + MapleVector heapDefs = cdgNode->GetHeapDefInsns(); + if (needCmp) { + AddDependence4InsnInVectorByTypeAndCmp(heapDefs, *depNode.GetInsn(), depType); + } else { + AddDependence4InsnInVectorByType(heapDefs, *depNode.GetInsn(), depType); + } + break; + } + case kMayThrows: { + visited[curBB.GetId()] = true; + MapleVector mayThrows = cdgNode->GetMayThrowInsns(); + AddDependence4InsnInVectorByType(mayThrows, *depNode.GetInsn(), depType); + break; + } + case kAmbiguous: { + visited[curBB.GetId()] = true; + MapleVector ambiInsns = cdgNode->GetAmbiguousInsns(); + AddDependence4InsnInVectorByType(ambiInsns, *depNode.GetInsn(), depType); + break; + } + default: { + visited[curBB.GetId()] = true; + break; + } + } + for (auto predIt = curBB.GetPredsBegin(); predIt != curBB.GetPredsEnd(); ++predIt) { + BuildPredPathSpecialDataInfoDependencyDFS(**predIt, visited, needCmp, depNode, depType, infoType); + } +} + +/* + * Add data dependence edge : + * Two data dependence node has a unique edge. + * True data dependence overwrites other dependence. + */ +void DataDepBase::AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType) { + /* Can not build a self loop dependence */ + if (&fromNode == &toNode) { + return; + } + /* Check if exist edge */ + if (!fromNode.GetSuccs().empty()) { + DepLink *depLink = fromNode.GetSuccs().back(); + if (&(depLink->GetTo()) == &toNode) { + if (depLink->GetDepType() != kDependenceTypeTrue && depType == kDependenceTypeTrue) { + /* Has existed edge, replace it */ + depLink->SetDepType(kDependenceTypeTrue); + depLink->SetLatency(static_cast(mad.GetLatency(*fromNode.GetInsn(), *toNode.GetInsn()))); + } + return; + } + } + auto *depLink = memPool.New(fromNode, toNode, depType); + if (depType == kDependenceTypeTrue) { + depLink->SetLatency(static_cast(mad.GetLatency(*fromNode.GetInsn(), *toNode.GetInsn()))); + } + fromNode.AddSucc(*depLink); + toNode.AddPred(*depLink); +} + +void DataDepBase::AddDependence4InsnInVectorByType(MapleVector &insns, Insn &insn, const DepType &type) { + for (auto anyInsn : insns) { + AddDependence(*anyInsn->GetDepNode(), *insn.GetDepNode(), type); + } +} + +void DataDepBase::AddDependence4InsnInVectorByTypeAndCmp(MapleVector &insns, Insn &insn, const DepType &type) { + for (auto anyInsn : insns) { + if (anyInsn != &insn) { + AddDependence(*anyInsn->GetDepNode(), *insn.GetDepNode(), type); + } + } +} + +/* Combine two data dependence nodes to one */ +void DataDepBase::CombineDependence(DepNode &firstNode, const DepNode &secondNode, bool isAcrossSeparator, + bool isMemCombine) { + if (isAcrossSeparator) { + /* Clear all latency of the second node. */ + for (auto predLink : secondNode.GetPreds()) { + predLink->SetLatency(0); + } + for (auto succLink : secondNode.GetSuccs()) { + succLink->SetLatency(0); + } + return; + } + std::set uniqueNodes; + + for (auto predLink : firstNode.GetPreds()) { + if (predLink->GetDepType() == kDependenceTypeTrue) { + predLink->SetLatency( + static_cast(mad.GetLatency(*predLink->GetFrom().GetInsn(), *firstNode.GetInsn()))); + } + (void)uniqueNodes.insert(&predLink->GetFrom()); + } + for (auto predLink : secondNode.GetPreds()) { + if (&predLink->GetFrom() != &firstNode) { + if (uniqueNodes.insert(&(predLink->GetFrom())).second) { + AddDependence(predLink->GetFrom(), firstNode, predLink->GetDepType()); + } + } + predLink->SetLatency(0); + } + uniqueNodes.clear(); + for (auto succLink : firstNode.GetSuccs()) { + if (succLink->GetDepType() == kDependenceTypeTrue) { + succLink->SetLatency( + static_cast(mad.GetLatency(*succLink->GetFrom().GetInsn(), *firstNode.GetInsn()))); + } + (void)uniqueNodes.insert(&(succLink->GetTo())); + } + for (auto succLink : secondNode.GetSuccs()) { + if (uniqueNodes.insert(&(succLink->GetTo())).second) { + AddDependence(firstNode, succLink->GetTo(), succLink->GetDepType()); + if (isMemCombine) { + succLink->GetTo().IncreaseValidPredsSize(); + } + } + succLink->SetLatency(0); + } +} + +/* Remove self data dependence (self loop) in data dependence graph. */ +void DataDepBase::RemoveSelfDeps(Insn &insn) { + DepNode *node = insn.GetDepNode(); + ASSERT(node->GetSuccs().back()->GetTo().GetInsn() == &insn, "Is not a self dependence."); + ASSERT(node->GetPreds().back()->GetFrom().GetInsn() == &insn, "Is not a self dependence."); + node->RemoveSucc(); + node->RemovePred(); +} + +/* Check if in intra-block data dependence analysis */ +bool DataDepBase::IsIntraBlockAnalysis() const { + if (curCDGNode->GetRegion() == nullptr || curCDGNode->GetRegion()->GetRegionNodes().size() == 1) { + return true; + } + return false; +} + +/* Check if regNO is in ehInRegs. */ +bool DataDepBase::IfInAmbiRegs(regno_t regNO) const { + if (!curCDGNode->HasAmbiRegs()) { + return false; + } + MapleSet ehInRegs = curCDGNode->GetEhInRegs(); + if (ehInRegs.find(regNO) != ehInRegs.end()) { + return true; + } + return false; +} + +/* Return data dependence type name */ +const std::string &DataDepBase::GetDepTypeName(DepType depType) const { + ASSERT(depType <= kDependenceTypeNone, "array boundary check failed"); + return kDepTypeName[depType]; +} + +/* Print data dep node information */ +void DataDepBase::DumpDepNode(const DepNode &node) const { + node.GetInsn()->Dump(); + uint32 num = node.GetUnitNum(); + LogInfo::MapleLogger() << "unit num : " << num << ", "; + for (uint32 i = 0; i < num; ++i) { + const Unit *unit = node.GetUnitByIndex(i); + if (unit != nullptr) { + PRINT_VAL(unit->GetName()); + } else { + PRINT_VAL("none"); + } + } + LogInfo::MapleLogger() << '\n'; + node.DumpSchedInfo(); + if (beforeRA) { + node.DumpRegPressure(); + } +} + +/* Print dep link information */ +void DataDepBase::DumpDepLink(const DepLink &link, const DepNode *node) const { + PRINT_VAL(GetDepTypeName(link.GetDepType())); + PRINT_STR_VAL("Latency: ", link.GetLatency()); + if (node != nullptr) { + node->GetInsn()->Dump(); + return; + } + LogInfo::MapleLogger() << "from : "; + link.GetFrom().GetInsn()->Dump(); + LogInfo::MapleLogger() << "to : "; + link.GetTo().GetInsn()->Dump(); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/dbg.cpp b/src/mapleall/maple_be/src/cg/dbg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a5fecd9b8c63e8230aa80989cd1bbfd71034fe4a --- /dev/null +++ b/src/mapleall/maple_be/src/cg/dbg.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (C) [2020-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "dbg.h" +#include "emit.h" + +namespace mpldbg { +using maplebe::Operand; +using maplebe::MOperator; +using maplebe::CG; +using maplebe::Emitter; +using maplebe::OpndDesc; + +struct DbgDescr { + const std::string name; + uint32 opndCount; + /* create 3 OperandType array to store dbg instruction's operand type */ + std::array opndTypes; +}; + +static DbgDescr dbgDescrTable[kOpDbgLast + 1] = { +#define DBG_DEFINE(k, sub, n, o0, o1, o2) \ + { #k, n, { Operand::kOpd##o0, Operand::kOpd##o1, Operand::kOpd##o2 } }, +#include "dbg.def" +#undef DBG_DEFINE + { "undef", 0, { Operand::kOpdUndef, Operand::kOpdUndef, Operand::kOpdUndef } } +}; + +void DbgInsn::Dump() const { + MOperator mOp = GetMachineOpcode(); + DbgDescr &dbgDescr = dbgDescrTable[mOp]; + LogInfo::MapleLogger() << "DBG " << dbgDescr.name; + for (uint32 i = 0; i < dbgDescr.opndCount; ++i) { + LogInfo::MapleLogger() << (i == 0 ? " : " : " "); + Operand &curOperand = GetOperand(i); + curOperand.Dump(); + } + LogInfo::MapleLogger() << "\n"; +} + +#if DEBUG +void DbgInsn::Check() const { + DbgDescr &dbgDescr = dbgDescrTable[GetMachineOpcode()]; + /* dbg instruction's 3rd /4th/5th operand must be null */ + for (uint32 i = 0; i < dbgDescr.opndCount; ++i) { + Operand &opnd = GetOperand(i); + if (opnd.GetKind() != dbgDescr.opndTypes[i]) { + CHECK_FATAL(false, "incorrect operand in debug insn"); + } + } +} +#endif + +uint32 DbgInsn::GetLoc() const { + if (mOp != OP_DBG_loc) { + return 0; + } + return static_cast(static_cast(opnds[0])->GetVal()); +} + +void ImmOperand::Dump() const { + LogInfo::MapleLogger() << " " << val; +} +void DBGOpndEmitVisitor::Visit(ImmOperand *v) { + emitter.Emit(v->GetVal()); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/ebo.cpp b/src/mapleall/maple_be/src/cg/ebo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..258808e83bf2572f4efa862a5091ba75bfe2a5df --- /dev/null +++ b/src/mapleall/maple_be/src/cg/ebo.cpp @@ -0,0 +1,1306 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#if TARGAARCH64 +#include "aarch64_ebo.h" +#elif TARGRISCV64 +#include "riscv64_ebo.h" +#endif +#if TARGARM32 +#include "arm32_ebo.h" +#endif +#include "securec.h" + +#include "optimize_common.h" + +/* + * The Optimizations include forward propagation, common expression elimination, constant folding, + * dead code elimination and some target optimizations. The main entry of the optimization is run. + * When the Optimization level is less than O2, it can only perform in single block. and in O2 it + * can perform it a sequence of blocks. + */ +namespace maplebe { +using namespace maple; + +#define EBO_DUMP CG_DEBUG_FUNC(*cgFunc) +#define EBO_DUMP_NEWPM CG_DEBUG_FUNC(f) +#define TRUE_OPND cgFunc->GetTrueOpnd() + +constexpr uint32 kEboOpndHashLength = 521; +constexpr uint32 kEboMaxBBNums = 200; + +/* Return the opndInfo for the first mem operand of insn. */ +MemOpndInfo *Ebo::GetMemInfo(InsnInfo &insnInfo) const { + Insn *insn = insnInfo.insn; + CHECK_FATAL(insn != nullptr, "insnInfo.insn is nullptr!"); + CHECK_FATAL(insn->AccessMem(), "insn is not access memory!"); + uint32 opndNum = insn->GetOperandSize(); + if (insn->IsLoad()) { + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->GetOperand(i).IsMemoryAccessOperand()) { + return static_cast(insnInfo.origOpnd[i]); + } + } + } else if (insn->IsStore()) { + int32 resId = 0; + for (uint32 i = 0; i < opndNum; ++i) { + if (insn->OpndIsDef(i)) { + if (insn->GetOperand(i).IsMemoryAccessOperand()) { + return static_cast(insnInfo.result[resId]); + } else { + resId++; + } + } + } + } + return nullptr; +} + +void Ebo::EnlargeSpaceForLA(Insn &csetInsn) const { + CHECK_FATAL(live != nullptr, "no live info!"); + live->EnlargeSpaceForLiveAnalysis(*csetInsn.GetBB()); +} + +bool Ebo::IsFrameReg(Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + RegOperand ® = static_cast(opnd); + return cgFunc->IsFrameReg(reg); +} + +Operand *Ebo::GetZeroOpnd(uint32 size) const { +#if TARGAARCH64 || TARGRISCV64 + return size > k64BitSize ? nullptr : &cgFunc->GetZeroOpnd(size); +#else + return nullptr; +#endif +} + +bool Ebo::IsSaveReg(const Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand ® = static_cast(opnd); + return cgFunc->IsSaveReg(reg, *cgFunc->GetFunction().GetReturnType(), cgFunc->GetBecommon()); +} + +bool Ebo::IsPhysicalReg(const Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + const RegOperand ® = static_cast(opnd); + return reg.IsPhysicalRegister(); +} + +bool Ebo::HasAssignedReg(const Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + const auto ® = static_cast(opnd); + return reg.IsVirtualRegister() ? (!IsInvalidReg(reg)) : true; +} + +bool Ebo::IsOfSameClass(const Operand &op0, const Operand &op1) const { + if (!op0.IsRegister() || !op1.IsRegister()) { + return false; + } + const auto ®0 = static_cast(op0); + const auto ®1 = static_cast(op1); + return reg0.GetRegisterType() == reg1.GetRegisterType(); +} + +/* return true if opnd of bb is available. */ +bool Ebo::OpndAvailableInBB(const BB &bb, OpndInfo *info) const { + if (info == nullptr) { + return false; + } + if (info->opnd == nullptr) { + return false; + } + + Operand *op = info->opnd; + if (IsConstantImmOrReg(*op)) { + return true; + } + + int32 hashVal = 0; + if (op->IsRegShift() || op->IsRegister()) { + hashVal = -1; + } else { + hashVal = info->hashVal; + } + if (GetOpndInfo(*op, hashVal) != info) { + return false; + } + /* global operands aren't supported at low levels of optimization. */ + if ((Globals::GetInstance()->GetOptimLevel() < CGOptions::kLevel2) && (&bb != info->bb)) { + return false; + } + if (beforeRegAlloc && IsPhysicalReg(*op)) { + return false; + } + return true; +} + +bool Ebo::ForwardPropCheck(const Operand *opndReplace, const OpndInfo &opndInfo, const Operand &opnd, Insn &insn) { + if (opndReplace == nullptr) { + return false; + } + if ((opndInfo.replacementInfo != nullptr) && opndInfo.replacementInfo->redefined) { + return false; + } +#if TARGARM32 + /* for arm32, disable forwardProp in strd insn. */ + if (insn.GetMachineOpcode() == MOP_strd) { + return false; + } + if (opndInfo.mayReDef) { + return false; + } +#endif + if (!(IsConstantImmOrReg(*opndReplace) || + ((OpndAvailableInBB(*insn.GetBB(), opndInfo.replacementInfo) || RegistersIdentical(opnd, *opndReplace)) && + (HasAssignedReg(opnd) == HasAssignedReg(*opndReplace))))) { + return false; + } + /* if beforeRA, replace op should not be PhysicalRe */ + return !beforeRegAlloc || !IsPhysicalReg(*opndReplace); +} + +bool Ebo::RegForwardCheck(Insn &insn, const Operand &opnd, const Operand *opndReplace, Operand &oldOpnd, + const OpndInfo *tmpInfo) const { + if (IsConstantImmOrReg(opnd)) { + return false; + } + if (!(!beforeRegAlloc || (HasAssignedReg(oldOpnd) == HasAssignedReg(*opndReplace)) || IsZeroRegister(opnd) || + !insn.IsMove())) { + return false; + } + std::set defRegs = insn.GetDefRegs(); + if (!(defRegs.empty() || + ((opnd.IsRegister() && !defRegs.count(static_cast(opnd).GetRegisterNumber())) || + !beforeRegAlloc))) { + return false; + } + if (!(beforeRegAlloc || !IsFrameReg(oldOpnd))) { + return false; + } + if (insn.GetBothDefUseOpnd() != kInsnMaxOpnd) { + return false; + } + if (IsPseudoRet(insn)) { + return false; + } + + return ((IsOfSameClass(oldOpnd, *opndReplace) && (oldOpnd.GetSize() <= opndReplace->GetSize())) || + ((tmpInfo != nullptr) && IsMovToSIMDVmov(insn, *tmpInfo->insn))); +} + +/* For Memory Operand, its info was stored in a hash table, this function is to compute its hash value. */ +int32 Ebo::ComputeOpndHash(const Operand &opnd) const { + uint64 hashIdx = reinterpret_cast(&opnd) >> k4ByteSize; + return static_cast(hashIdx % kEboOpndHashLength); +} + +/* Store the operand information. Store it to the vRegInfo if is register. otherwise put it to the hash table. */ +void Ebo::SetOpndInfo(const Operand &opnd, OpndInfo *opndInfo, int32 hashVal) { + /* opnd is Register or RegShift */ + if (hashVal == -1) { + const RegOperand ® = GetRegOperand(opnd); + vRegInfo[reg.GetRegisterNumber()] = opndInfo; + return; + } + + CHECK_FATAL(static_cast(static_cast(hashVal)) < exprInfoTable.size(), + "SetOpndInfo hashval outof range!"); + opndInfo->hashVal = hashVal; + opndInfo->hashNext = exprInfoTable.at(hashVal); + exprInfoTable.at(hashVal) = opndInfo; +} + +/* Used to change the info of opnd from opndinfo to newinfo. */ +void Ebo::UpdateOpndInfo(const Operand &opnd, OpndInfo &opndInfo, OpndInfo *newInfo, int32 hashVal) { + if (hashVal == -1) { + const RegOperand ® = GetRegOperand(opnd); + vRegInfo[reg.GetRegisterNumber()] = newInfo; + return; + } + ASSERT(hashVal < exprInfoTable.size(), "SetOpndInfo hashval outof range!"); + OpndInfo *info = exprInfoTable.at(hashVal); + if (newInfo != nullptr) { + newInfo->hashNext = opndInfo.hashNext; + opndInfo.hashNext = nullptr; + if (info == &opndInfo) { + exprInfoTable.at(hashVal) = newInfo; + return; + } + while (info != nullptr) { + if (info->hashNext == &opndInfo) { + info->hashNext = newInfo; + return; + } + info = info->hashNext; + } + return; + } + if (info == &opndInfo) { + exprInfoTable.at(hashVal) = opndInfo.hashNext; + return; + } + while (info != nullptr) { + if (info->hashNext == &opndInfo) { + info->hashNext = opndInfo.next; + opndInfo.hashNext = nullptr; + return; + } + info = info->hashNext; + } +} + +/* return true if op1 op2 is equal */ +bool Ebo::OperandEqual(const Operand &op1, const Operand &op2) const { + if (&op1 == &op2) { + return true; + } + if (op1.GetKind() != op2.GetKind()) { + return false; + } + return OperandEqSpecial(op1, op2); +} + +OpndInfo *Ebo::GetOpndInfo(const Operand &opnd, int32 hashVal) const { + if (hashVal < 0) { + const RegOperand ® = GetRegOperand(opnd); + auto it = vRegInfo.find(reg.GetRegisterNumber()); + return it != vRegInfo.end() ? it->second : nullptr; + } + /* do not find prev memOpend */ + if (opnd.IsMemoryAccessOperand()) { + return nullptr; + } + ASSERT(hashVal < exprInfoTable.size(), "SetOpndInfo hashval outof range!"); + OpndInfo *info = exprInfoTable.at(hashVal); + while (info != nullptr) { + if (&opnd == info->opnd) { + return info; + } + info = info->hashNext; + } + return nullptr; +} + +/* Create a opndInfo for opnd. */ +OpndInfo *Ebo::GetNewOpndInfo(BB &bb, Insn *insn, Operand &opnd, int32 hashVal) { + OpndInfo *opndInfo = nullptr; + if (opnd.IsMemoryAccessOperand()) { + opndInfo = eboMp->New(opnd); + } else { + opndInfo = eboMp->New(opnd); + } + /* Initialize the entry. */ + opndInfo->hashVal = hashVal; + opndInfo->opnd = &opnd; + opndInfo->bb = &bb; + opndInfo->insn = insn; + opndInfo->prev = lastOpndInfo; + if (firstOpndInfo == nullptr) { + firstOpndInfo = opndInfo; + } else { + lastOpndInfo->next = opndInfo; + } + lastOpndInfo = opndInfo; + return opndInfo; +} + +/* Update the use infomation for localOpnd because of its use insn currentInsn. */ +OpndInfo *Ebo::OperandInfoUse(BB ¤tBB, Operand &localOpnd) { + if (!(localOpnd.IsRegister() || localOpnd.IsRegShift()) && !localOpnd.IsMemoryAccessOperand()) { + return nullptr; + } + int hashVal = 0; + /* only arm32 has regShift */ + if (localOpnd.IsRegister() || localOpnd.IsRegShift()) { + hashVal = -1; + } else { + hashVal = ComputeOpndHash(localOpnd); + } + OpndInfo *opndInfo = GetOpndInfo(localOpnd, hashVal); + + if (opndInfo == nullptr) { + opndInfo = GetNewOpndInfo(currentBB, nullptr, localOpnd, hashVal); + SetOpndInfo(localOpnd, opndInfo, hashVal); + } + IncRef(*opndInfo); + return opndInfo; +} + +/* return true if op0 is identical with op1 */ +bool Ebo::RegistersIdentical(const Operand &op0, const Operand &op1) const { + if (&op0 == &op1) { + return true; + } + if (!(op0.IsRegister() && op1.IsRegister())) { + return false; + } + const RegOperand ®0 = static_cast(op0); + const RegOperand ®1 = static_cast(op1); + return ((reg0.IsPhysicalRegister() || !IsInvalidReg(reg0)) && + (reg1.IsPhysicalRegister() || !IsInvalidReg(reg1)) && + (reg0.GetRegisterType() == reg1.GetRegisterType()) && + (reg0.GetRegisterNumber() == reg1.GetRegisterNumber())); +} + +InsnInfo *Ebo::GetNewInsnInfo(Insn &insn) { + InsnInfo *insnInfo = eboMp->New(*eboMp, insn); + insnInfo->prev = lastInsnInfo; + if (firstInsnInfo == nullptr) { + firstInsnInfo = insnInfo; + } else { + lastInsnInfo->next = insnInfo; + } + lastInsnInfo = insnInfo; + insnInfo->next = nullptr; + return insnInfo; +} + +uint32 Ebo::ComputeHashVal(Insn &insn, const MapleVector &opndInfos) const { + uint32 hashVal = 0; + if (insn.AccessMem()) { + hashVal = kEboDefaultMemHash; + if (insn.NoAlias()) { + hashVal = kEboNoAliasMemHash; + } + MemOperand *memOpnd = static_cast(insn.GetMemOpnd()); + if (memOpnd != nullptr) { + Operand *baseReg = memOpnd->GetBaseRegister(); + if ((baseReg != nullptr) && IsFrameReg(*baseReg)) { + hashVal = kEboSpillMemHash; + } + } + } else if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn)) { + hashVal = kEboCopyInsnHash; + } else { + uint32 opndNum = insn.GetOperandSize(); + hashVal = insn.GetMachineOpcode(); + for (uint32 i = 0; i < opndNum; ++i) { + hashVal += static_cast(reinterpret_cast(opndInfos.at(i))); + } + hashVal = static_cast(kEboReservedInsnHash + EBO_EXP_INSN_HASH(hashVal)); + } + return hashVal; +} + +/* computeHashVal of insn */ +void Ebo::HashInsn(Insn &insn, const MapleVector &origInfo, const MapleVector &opndInfos) { + uint32 hashVal = ComputeHashVal(insn, opndInfos); + /* Create a new insnInfo entry and add the new insn to the hash table. */ + InsnInfo *insnInfo = GetNewInsnInfo(insn); + insnInfo->bb = insn.GetBB(); + insnInfo->insn = &insn; + insnInfo->hashIndex = hashVal; + insnInfo->same = insnInfoTable.at(hashVal); + + if (!beforeRegAlloc) { + if ((insn.IsCall() || insn.IsTailCall() || insn.IsAsmInsn()) && !insn.GetIsThrow()) { + DefineCallerSaveRegisters(*insnInfo); + } else if (IsClinitCheck(insn)) { + DefineClinitSpecialRegisters(*insnInfo); + } + } + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + /* Copy all the opndInfo entries for the operands. */ + insnInfo->origOpnd.emplace_back(origInfo.at(i)); + insnInfo->optimalOpnd.emplace_back(opndInfos.at(i)); + /* Keep the result info. */ + if (!insn.OpndIsDef(i)) { + continue; + } + auto genOpndInfoDef = [this, insnInfo](Operand &op) { + OpndInfo *opndInfo = nullptr; + if ((&op != TRUE_OPND) && + ((op.IsRegister() && (&op) != GetZeroOpnd(op.GetSize())) || + (op.IsMemoryAccessOperand() && (static_cast(op)).GetBaseRegister() != nullptr))) { + opndInfo = OperandInfoDef(*insnInfo->bb, *insnInfo->insn, op); + opndInfo->insnInfo = insnInfo; + } + insnInfo->result.emplace_back(opndInfo); + }; + Operand &op = insn.GetOperand(i); + if (op.IsList() && !static_cast(op).GetOperands().empty()) { + for (auto operand : static_cast(op).GetOperands()) { + genOpndInfoDef(*operand); + } + } else { + genOpndInfoDef(op); + } + } + SetInsnInfo(hashVal, *insnInfo); +} + +/* do decref of orig_info, refCount will be set to 0 */ +void Ebo::RemoveUses(uint32 opndNum, const MapleVector &origInfo) const { + OpndInfo *info = nullptr; + for (uint32 i = 0; i < opndNum; ++i) { + info = origInfo.at(i); + if (info != nullptr) { + DecRef(*info); + if (info->opnd->IsMemoryAccessOperand()) { + MemOpndInfo *memInfo = static_cast(info); + OpndInfo *baseInfo = memInfo->GetBaseInfo(); + OpndInfo *offsetInfo = memInfo->GetOffsetInfo(); + if (baseInfo != nullptr) { + DecRef(*baseInfo); + } + if (offsetInfo != nullptr) { + DecRef(*offsetInfo); + } + } + } + } +} + +OpndInfo *Ebo::BuildMemOpndInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex) { + auto *memOpnd = static_cast(&opnd); + Operand *base = memOpnd->GetBaseRegister(); + Operand *offset = memOpnd->GetOffset(); + OpndInfo *baseInfo = nullptr; + OpndInfo *offsetInfo = nullptr; + if (base != nullptr) { + if (!memOpnd->IsIntactIndexed()) { + baseInfo = OperandInfoUse(bb, *base); + baseInfo = OperandInfoDef(bb, insn, *base); + return baseInfo; + } else { + baseInfo = OperandInfoUse(bb, *base); + } + /* forward prop for base register. */ + if ((baseInfo != nullptr) && base->IsRegister()) { + auto *baseReg = static_cast(base); + Operand *replaceOpnd = baseInfo->replacementOpnd; + OpndInfo *replaceInfo = baseInfo->replacementInfo; + if ((replaceInfo != nullptr) && (replaceOpnd != nullptr) && !cgFunc->IsSPOrFP(*baseReg) && + (!beforeRegAlloc || (!IsPhysicalReg(*replaceOpnd) && !IsPhysicalReg(*base))) && + IsOfSameClass(*base, *replaceOpnd) && memOpnd->IsIntactIndexed() && + (base->GetSize() <= replaceOpnd->GetSize()) && + /* In case that replace opnd was redefined. */ + !replaceInfo->redefined) { + MemOperand *newMem = static_cast(memOpnd->Clone(*cgFunc->GetMemoryPool())); + CHECK_FATAL(newMem != nullptr, "newMem is null in Ebo::BuildAllInfo(BB *bb)"); + newMem->SetBaseRegister(*static_cast(replaceOpnd)); + insn.SetOperand(opndIndex, *newMem); + DecRef(*baseInfo); + IncRef(*replaceInfo); + baseInfo = replaceInfo; + } + } + } + if ((offset != nullptr) && offset->IsRegister()) { + offsetInfo = OperandInfoUse(bb, *offset); + } + OpndInfo *opndInfo = OperandInfoUse(bb, insn.GetOperand(opndIndex)); + CHECK_FATAL(opndInfo != nullptr, "opndInfo should not be null ptr"); + MemOpndInfo *memInfo = static_cast(opndInfo); + if (baseInfo != nullptr) { + memInfo->SetBaseInfo(*baseInfo); + } + if (offsetInfo != nullptr) { + memInfo->SetOffsetInfo(*offsetInfo); + } + return memInfo; +} + +OpndInfo *Ebo::BuildOperandInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex, + MapleVector &origInfos) { + if (opnd.IsList()) { + ListOperand *listOpnd = static_cast(&opnd); + for (auto op : listOpnd->GetOperands()) { + OperandInfoUse(bb, *op); + } + return nullptr; + } + ASSERT(opndIndex < origInfos.size(), "SetOpndInfo hashval outof range!"); + if (opnd.IsConditionCode()) { + Operand &rFlag = cgFunc->GetOrCreateRflag(); + OperandInfoUse(bb, rFlag); + /* if operand is Opnd_cond, the orig_info store the info of rFlag. */ + OpndInfo *tempOpndInfo = GetOpndInfo(rFlag, -1); + origInfos.at(opndIndex) = tempOpndInfo; + return nullptr; + } + + if (!(opnd.IsRegister() || opnd.IsRegShift()) && !opnd.IsMemoryAccessOperand()) { + return nullptr; + } + + if (opnd.IsMemoryAccessOperand()) { + OpndInfo *memInfo = BuildMemOpndInfo(bb, insn, opnd, opndIndex); + CHECK_FATAL(memInfo != nullptr, "build memopnd info failed in Ebo::BuildAllInfo"); + origInfos.at(opndIndex) = memInfo; + return nullptr; + } + OpndInfo *opndInfo = OperandInfoUse(bb, opnd); + origInfos.at(opndIndex) = opndInfo; + return opndInfo; +} + +bool Ebo::ForwardPropagateOpnd(Insn &insn, Operand *&opnd, uint32 opndIndex, + OpndInfo *&opndInfo, MapleVector &origInfos) { + CHECK_FATAL(opnd != nullptr, "nullptr check"); + Operand *opndReplace = opndInfo->replacementOpnd; + /* Don't propagate physical registers before register allocation. */ + if (beforeRegAlloc && (opndReplace != nullptr) && (IsPhysicalReg(*opndReplace) || IsPhysicalReg(*opnd))) { + return false; + } + + /* forward propagation of constants */ + CHECK_FATAL(opndIndex < origInfos.size(), "SetOpndInfo hashval outof range!"); + if (!ForwardPropCheck(opndReplace, *opndInfo, *opnd, insn)) { + return false; + } + Operand *oldOpnd = opnd; + opnd = opndInfo->replacementOpnd; + opndInfo = opndInfo->replacementInfo; + + /* constant prop. */ + if (opnd->IsIntImmediate() && oldOpnd->IsRegister()) { + if (DoConstProp(insn, opndIndex, *opnd)) { + DecRef(*origInfos.at(opndIndex)); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + } + } + /* move reg, wzr, store vreg, mem ==> store wzr, mem */ +#if TARGAARCH64 || TARGRISCV64 + if (IsZeroRegister(*opnd) && opndIndex == 0 && + (insn.GetMachineOpcode() == MOP_wstr || insn.GetMachineOpcode() == MOP_xstr)) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "the new insn is:\n"; + } + insn.SetOperand(opndIndex, *opnd); + DecRef(*origInfos.at(opndIndex)); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + if (EBO_DUMP) { + insn.Dump(); + } + } +#endif + /* forward prop for registers. */ + if (!RegForwardCheck(insn, *opnd, opndReplace, *oldOpnd, origInfos.at(opndIndex))) { + return false; + } + /* Copies to and from the same register are not needed. */ + if (!beforeRegAlloc && Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn) && (opndIndex == kInsnSecondOpnd) && + RegistersIdentical(*opnd, insn.GetOperand(kInsnFirstOpnd)) && !LiveOutOfBB(*opnd, *(insn.GetBB()))) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "===Remove the new insn because Copies to and from the same register. \n"; + } + return true; + } + if (static_cast(opnd)->GetRegisterNumber() == RSP) { + /* Disallow optimization with stack pointer */ + return false; + } + + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===replace operand " << opndIndex << " of insn: \n"; + insn.Dump(); + LogInfo::MapleLogger() << "the new insn is:\n"; + } + DecRef(*origInfos.at(opndIndex)); + insn.SetOperand(opndIndex, *opnd); + + if (EBO_DUMP) { + insn.Dump(); + } + IncRef(*opndInfo); + /* Update the actual expression info. */ + origInfos.at(opndIndex) = opndInfo; + /* extend the live range of the replacement operand. */ + if ((opndInfo->bb != insn.GetBB()) && opnd->IsRegister()) { + MarkOpndLiveIntoBB(*opnd, *insn.GetBB(), *opndInfo->bb); + } + return false; +} + +/* + * this func do only one of the following optimization: + * 1. Remove DupInsns + * 2. SpecialSequence OPT + * 3. Remove Redundant "Load" + * 4. Constant Fold + */ +void Ebo::SimplifyInsn(Insn &insn, bool &insnReplaced, bool opndsConstant, + const MapleVector &opnds, const MapleVector &opndInfos, + const MapleVector &origInfos) { + if (insn.AccessMem()) { + if (!insnReplaced) { + insnReplaced = SpecialSequence(insn, origInfos); + } + return; + } + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(insn)) { + if (!insnReplaced) { + insnReplaced = SpecialSequence(insn, opndInfos); + } + return; + } + if (!insnReplaced && !insn.HasSideEffects()) { + uint32 opndNum = insn.GetOperandSize(); + if (opndsConstant && (opndNum > 1)) { + if (!insn.GetDefRegs().empty()) { + insnReplaced = Csel2Cset(insn, opnds); + } + } + if (insnReplaced) { + return; + } + if (opndNum >= 2) { + /* special case */ + if (!insn.GetDefRegs().empty() && ResIsNotDefAndUse(insn)) { + if ((opndNum == 3) && (insn.GetDefRegs().size() == 1) && + (((kInsnSecondOpnd < opnds.size()) && (opnds[kInsnSecondOpnd] != nullptr) && + IsConstantImmOrReg(*opnds[kInsnSecondOpnd])) || + ((kInsnThirdOpnd < opnds.size()) && (opnds[kInsnThirdOpnd] != nullptr) && + IsConstantImmOrReg(*opnds[kInsnThirdOpnd])))) { + insnReplaced = SimplifyConstOperand(insn, opnds, opndInfos); + } + } + if (!insnReplaced) { + insnReplaced = SpecialSequence(insn, origInfos); + } + } + } +} + +/* + * this func do: + * 1. delete DupInsn if SimplifyInsn failed. + * 2. buildInsnInfo if delete DupInsn failed(func HashInsn do this). + * 3. update replaceInfo. + */ +void Ebo::FindRedundantInsns(BB &bb, Insn *&insn, const Insn *prev, bool insnReplaced, + MapleVector &opnds, MapleVector &opndInfos, + const MapleVector &origInfos) { + CHECK_FATAL(insn != nullptr, "nullptr check"); + if (!insnReplaced) { + CHECK_FATAL(origInfos.size() != 0, "null ptr check"); + CHECK_FATAL(opndInfos.size() != 0, "null ptr check"); + HashInsn(*insn, origInfos, opndInfos); + /* Processing the result of the insn. */ + if ((Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn) || + !insn->GetDefRegs().empty()) && !insn->IsSpecialIntrinsic()) { + Operand *res = &insn->GetOperand(kInsnFirstOpnd); + if ((res != nullptr) && (res != TRUE_OPND) && (res != GetZeroOpnd(res->GetSize()))) { + CHECK_FATAL(lastInsnInfo != nullptr, "lastInsnInfo is null!"); + OpndInfo *opndInfo = lastInsnInfo->result[0]; + /* Don't propagate for fmov insns. */ + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn) && (opndInfo != nullptr) && !IsFmov(*insn)) { + CHECK_FATAL(!opnds.empty(), "null container!"); + opndInfo->replacementOpnd = opnds[kInsnSecondOpnd]; + opndInfo->replacementInfo = opndInfos[kInsnSecondOpnd]; + } else if (insn->GetBothDefUseOpnd() != kInsnMaxOpnd && (opndInfo != nullptr)) { + opndInfo->replacementOpnd = nullptr; + opndInfo->replacementInfo = nullptr; + } + } + } + insn = insn->GetNext(); + } else { + uint32 opndNum = insn->GetOperandSize(); + RemoveUses(opndNum, origInfos); + /* If insn is replaced, reanalyze the new insn to have more opportunities. */ + insn = (prev == nullptr ? bb.GetFirstInsn() : prev->GetNext()); + } +} + +void Ebo::PreProcessSpecialInsn(Insn &insn) { + DefineReturnUseRegister(insn); + + if (insn.IsCall() || insn.IsClinit()) { + DefineCallUseSpecialRegister(insn); + } +} + +/* + * this func do : + * 1.build opereand info of bb; + * 2.do Forward propagation after regalloc; + * 3.simplify the insn,include Constant folding,redundant insns elimination. + */ +void Ebo::BuildAllInfo(BB &bb) { + if (EBO_DUMP) { + LogInfo::MapleLogger() << "===Enter BuildOperandinfo of bb:" << bb.GetId() << "===\n"; + } + Insn *insn = bb.GetFirstInsn(); + while ((insn != nullptr) && (insn != bb.GetLastInsn()->GetNext())) { + if (!insn->IsTargetInsn()) { + insn = insn->GetNext(); + continue; + } + PreProcessSpecialInsn(*insn); + uint32 opndNum = insn->GetOperandSize(); + if (!insn->IsMachineInstruction () || opndNum == 0) { + insn = insn->GetNext(); + continue; + } + MapleVector opnds(eboAllocator.Adapter()); + MapleVector opndInfos(eboAllocator.Adapter()); + MapleVector origInfos(eboAllocator.Adapter()); + Insn *prev = insn->GetPrev(); + bool insnReplaced = false; + bool opndsConstant = true; + /* start : Process all the operands. */ + for (uint32 i = 0; i < opndNum; ++i) { + if (!insn->OpndIsUse(i)) { + opnds.emplace_back(nullptr); + opndInfos.emplace_back(nullptr); + origInfos.emplace_back(nullptr); + continue; + } + Operand *opnd = &(insn->GetOperand(i)); + opnds.emplace_back(opnd); + opndInfos.emplace_back(nullptr); + origInfos.emplace_back(nullptr); + if (IsConstantImmOrReg(*opnd)) { + continue; + } + OpndInfo *opndInfo = BuildOperandInfo(bb, *insn, *opnd, i, origInfos); + if (opndInfo == nullptr) { + continue; + } + + /* Don't do propagation for special intrinsic insn. */ + if (!insn->IsSpecialIntrinsic()) { + insnReplaced = ForwardPropagateOpnd(*insn, opnd, i, opndInfo, origInfos); + } + if (insnReplaced) { + continue; + } + opnds.at(i) = opnd; + opndInfos.at(i) = opndInfo; + if (!IsConstantImmOrReg(*opnd)) { + opndsConstant = false; + } + } /* End : Process all the operands. */ +#if TARGARM32 + Arm32Insn *currArm32Insn = static_cast(insn); + if (currArm32Insn->IsCondExecution()) { + Operand &rFlag = cgFunc->GetOrCreateRflag(); + OperandInfoUse(bb, rFlag); + } +#endif + + if (insnReplaced) { + RemoveUses(opndNum, origInfos); + Insn *temp = insn->GetNext(); + bb.RemoveInsn(*insn); + insn = temp; + continue; + } + + /* simplify the insn. */ + if (!insn->IsSpecialIntrinsic()) { + SimplifyInsn(*insn, insnReplaced, opndsConstant, opnds, opndInfos, origInfos); + } + FindRedundantInsns(bb, insn, prev, insnReplaced, opnds, opndInfos, origInfos); + } +} + +/* Decrement the use counts for the actual operands of an insnInfo. */ +void Ebo::RemoveInsn(InsnInfo &info) const { + Insn *insn = info.insn; + CHECK_FATAL(insn != nullptr, "get insn in info failed in Ebo::RemoveInsn"); + uint32 opndNum = insn->GetOperandSize(); + OpndInfo *opndInfo = nullptr; + for (uint32 i = 0; i < opndNum; i++) { + if (!insn->OpndIsUse(i)) { + continue; + } + opndInfo = info.origOpnd[i]; + if (opndInfo != nullptr) { + DecRef(*opndInfo); + Operand *opndTemp = opndInfo->opnd; + if (opndTemp == nullptr) { + continue; + } + if (opndTemp->IsMemoryAccessOperand()) { + MemOpndInfo *memInfo = static_cast(opndInfo); + OpndInfo *baseInfo = memInfo->GetBaseInfo(); + OpndInfo *offInfo = memInfo->GetOffsetInfo(); + if (baseInfo != nullptr) { + DecRef(*baseInfo); + } + if (offInfo != nullptr) { + DecRef(*offInfo); + } + } + } + } +#if TARGARM32 + Arm32CGFunc *a32CGFunc = static_cast(cgFunc); + auto &gotInfosMap = a32CGFunc->GetGotInfosMap(); + for (auto it = gotInfosMap.begin(); it != gotInfosMap.end();) { + if (it->first == insn) { + it = gotInfosMap.erase(it); + } else { + ++it; + } + } + auto &constInfosMap = a32CGFunc->GetConstInfosMap(); + for (auto it = constInfosMap.begin(); it != constInfosMap.end();) { + if (it->first == insn) { + it = constInfosMap.erase(it); + } else { + ++it; + } + } +#endif +} + +/* Mark opnd is live between def bb and into bb. */ +void Ebo::MarkOpndLiveIntoBB(const Operand &opnd, BB &into, BB &def) const { + if (live == nullptr) { + return; + } + if (&into == &def) { + return; + } + CHECK_FATAL(opnd.IsRegister(), "expect register here."); + const RegOperand ® = static_cast(opnd); + into.SetLiveInBit(reg.GetRegisterNumber()); + def.SetLiveOutBit(reg.GetRegisterNumber()); +} + +/* return insn information if has insnInfo,else,return lastInsnInfo */ +InsnInfo *Ebo::LocateInsnInfo(const OpndInfo &info) const { + if (info.insn != nullptr) { + if (info.insnInfo != nullptr) { + return info.insnInfo; + } else { + InsnInfo *insnInfo = lastInsnInfo; + int32 limit = 50; + for (; (insnInfo != nullptr) && (limit != 0); insnInfo = insnInfo->prev, limit--) { + if (insnInfo->insn == info.insn) { + return insnInfo; + } + } + } + } + return nullptr; +} + +/* redundant insns elimination */ +void Ebo::RemoveUnusedInsns(BB &bb, bool normal) { + OpndInfo *opndInfo = nullptr; + Operand *opnd = nullptr; + + if (firstInsnInfo == nullptr) { + return; + } + + for (InsnInfo *insnInfo = lastInsnInfo; insnInfo != nullptr; insnInfo = insnInfo->prev) { + Insn *insn = insnInfo->insn; + if ((insn == nullptr) || (insn->GetBB() == nullptr)) { + continue; + } + /* stop looking for insn when it goes out of bb. */ + if (insn->GetBB() != &bb) { + break; + } + + uint32 resNum = insn->GetDefRegs().size(); + if (IsLastAndBranch(bb, *insn)) { + goto insn_is_needed; + } + + if (insn->IsClinit()) { + goto insn_is_needed; + } + + if ((resNum == 0) || IsGlobalNeeded(*insn) || insn->IsStore() || + IsDecoupleStaticOp(*insn) || insn->GetBothDefUseOpnd() != kInsnMaxOpnd) { + goto insn_is_needed; + } + + /* last insn of a 64x1 function is a float, 64x1 function may not be a float */ + if (cgFunc->GetFunction().GetAttr(FUNCATTR_oneelem_simd) && + insnInfo == lastInsnInfo) { + goto insn_is_needed; + } + + if (insn->GetMachineOpcode() == MOP_asm || insn->IsAtomic()) { + goto insn_is_needed; + } + + /* Check all result that can be removed. */ + for (uint32 i = 0; i < resNum; ++i) { + opndInfo = insnInfo->result[i]; + /* A couple of checks. */ + if (opndInfo == nullptr) { + continue; + } + if ((opndInfo->bb != &bb) || (opndInfo->insn == nullptr)) { + goto insn_is_needed; + } + opnd = opndInfo->opnd; + if (opnd == GetZeroOpnd(opnd->GetSize())) { + continue; + } +/* this part optimize some spacial case after RA. */ + if (!beforeRegAlloc && Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn) && opndInfo && + insn->GetOperand(kInsnSecondOpnd).IsImmediate() && IsSameRedefine(bb, *insn, *opndInfo)) { + goto can_be_removed; + } + /* end special case optimize */ + if ((beforeRegAlloc && IsPhysicalReg(*opnd)) || (IsSaveReg(*opnd) && !opndInfo->redefinedInBB)) { + goto insn_is_needed; + } + /* Copies to and from the same register are not needed. */ + if (Globals::GetInstance()->GetTarget()->IsEffectiveCopy(*insn)) { + if (HasAssignedReg(*opnd) && HasAssignedReg(insn->GetOperand(kInsnSecondOpnd)) && + RegistersIdentical(*opnd, insn->GetOperand(kInsnSecondOpnd))) { + /* We may be able to get rid of the copy, but be sure that the operand is marked live into this block. */ + if ((insnInfo->origOpnd[kInsnSecondOpnd] != nullptr) && (&bb != insnInfo->origOpnd[kInsnSecondOpnd]->bb)) { + MarkOpndLiveIntoBB(*opnd, bb, *insnInfo->origOpnd[kInsnSecondOpnd]->bb); + } + /* propagate use count for this opnd to it's input operand. */ + if (opndInfo->same != nullptr) { + opndInfo->same->refCount += opndInfo->refCount; + } + + /* remove the copy causes the previous def to reach the end of the block. */ + if (!opndInfo->redefined && (opndInfo->same != nullptr)) { + opndInfo->same->redefined = false; + opndInfo->same->redefinedInBB = false; + } + goto can_be_removed; + } + } + /* there must bo no direct references to the operand. */ + if (!normal || (opndInfo->refCount != 0)) { + goto insn_is_needed; + } + /* + * When O1, the vreg who live out of bb should be recognized. + * The regs for clinit is also be marked to recognize it can't be deleted. so extend it to O2. + */ + if (opnd->IsRegister()) { + RegOperand *reg = static_cast(opnd); + if (beforeRegAlloc && !reg->IsBBLocalVReg()) { + goto insn_is_needed; + } + } + /* Volatile || sideeffect */ + if (opndInfo->insn->IsVolatile() || opndInfo->insn->HasSideEffects()) { + goto insn_is_needed; + } + + if (!opndInfo->redefinedInBB && LiveOutOfBB(*opnd, *opndInfo->bb)) { + goto insn_is_needed; + } + + if (opndInfo->redefinedInBB && opndInfo->redefinedInsn != nullptr && + opndInfo->redefinedInsn->GetBothDefUseOpnd() != kInsnMaxOpnd) { + goto insn_is_needed; + } + } + + if (!normal || insnInfo->mustNotBeRemoved || insn->GetDoNotRemove()) { + goto insn_is_needed; + } +can_be_removed: + if (EBO_DUMP) { + LogInfo::MapleLogger() << "< ==== Remove Unused insn in bb:" << bb.GetId() << "====\n"; + insn->Dump(); + } + RemoveInsn(*insnInfo); + bb.RemoveInsn(*insn); + insnInfo->insn = nullptr; + insnInfo->bb = nullptr; + for (uint32 i = 0; i < resNum; i++) { + opndInfo = insnInfo->result[i]; + if (opndInfo == nullptr) { + continue; + } + if (opndInfo->redefined && (opndInfo->same != nullptr)) { + OpndInfo *next = opndInfo->same; + next->redefined = true; + if (opndInfo->redefinedInBB && (opndInfo->same->bb == &bb)) { + next->redefinedInBB = true; + } + } + if (!opndInfo->redefinedInBB && (opndInfo->same != nullptr) && (opndInfo->same->bb == &bb)) { + opndInfo->same->redefinedInBB = false; + } + if (!opndInfo->redefined && (opndInfo->same != nullptr)) { + opndInfo->same->redefined = false; + opndInfo->same->redefinedInBB = false; + } + } + optSuccess = true; + continue; +insn_is_needed: + if (!bb.GetEhSuccs().empty()) { + for (uint32 i = 0; i < resNum; i++) { + opndInfo = insnInfo->result[i]; + if ((opndInfo != nullptr) && (opndInfo->opnd != nullptr) && (opndInfo->same != nullptr)) { + UpdateNextInfo(*opndInfo); + } + } + } + + if (!bb.GetEhPreds().empty()) { + for (uint32 i = 0; i < insn->GetOperandSize(); ++i) { + opndInfo = insnInfo->origOpnd[i]; + if ((opndInfo != nullptr) && (opndInfo->opnd != nullptr) && (opndInfo->same != nullptr)) { + UpdateNextInfo(*opndInfo); + } + if ((opndInfo != nullptr) && opndInfo->opnd && (&bb != opndInfo->bb) && opndInfo->opnd->IsRegister()) { + MarkOpndLiveIntoBB(*opndInfo->opnd, bb, *opndInfo->bb); + } + } + } + } /* end proccess insnInfo in currBB */ +} + +void Ebo::UpdateNextInfo(const OpndInfo &opndInfo) { + OpndInfo *nextInfo = opndInfo.same; + while (nextInfo != nullptr) { + if (nextInfo->insn != nullptr) { + InsnInfo *info = LocateInsnInfo(*nextInfo); + if (info != nullptr) { + info->mustNotBeRemoved = true; + } else { + /* + * Couldn't find the insnInfo entry. Make sure that the operand has + * a use count so that the defining insn will not be deleted. + */ + nextInfo->refCount += opndInfo.refCount; + } + } + nextInfo = nextInfo->same; + } +} + +/* back up to last saved OpndInfo */ +void Ebo::BackupOpndInfoList(OpndInfo *saveLast) { + if (lastOpndInfo == saveLast) { + return; + } + OpndInfo *opndInfo = lastOpndInfo; + while (opndInfo != saveLast) { + int32 hashVal = 0; + if (opndInfo->opnd->IsRegister() || opndInfo->opnd->IsRegShift()) { + hashVal = -1; + } else { + hashVal = opndInfo->hashVal; + } + UpdateOpndInfo(*opndInfo->opnd, *opndInfo, opndInfo->same, hashVal); + opndInfo = opndInfo->prev; + } + if (saveLast != nullptr) { + saveLast->next = nullptr; + lastOpndInfo = saveLast; + } else { + firstOpndInfo = nullptr; + lastOpndInfo = nullptr; + } +} + +/* back up to last saved insn */ +void Ebo::BackupInsnInfoList(InsnInfo *saveLast) { + if (lastInsnInfo == saveLast) { + return; + } + InsnInfo *insnInfo = lastInsnInfo; + while (insnInfo != saveLast) { + SetInsnInfo(insnInfo->hashIndex, *(insnInfo->same)); + insnInfo = insnInfo->prev; + } + if (saveLast != nullptr) { + saveLast->next = nullptr; + lastInsnInfo = saveLast; + } else { + firstInsnInfo = nullptr; + lastInsnInfo = nullptr; + } +} + +/* add bb to eb ,and build operandinfo of bb */ +void Ebo::AddBB2EB(BB &bb) { + OpndInfo *saveLastOpndInfo = lastOpndInfo; + InsnInfo *saveLastInsnInfo = lastInsnInfo; + SetBBVisited(bb); + bbNum++; + BuildAllInfo(bb); + /* Stop adding BB to EB if the bbs in the current EB exceeds kEboMaxBBNums */ + if (bbNum < kEboMaxBBNums) { + for (auto *bbSucc : bb.GetSuccs()) { + if ((bbSucc->GetPreds().size() == 1) && IsNotVisited(*bbSucc)) { + AddBB2EB(*bbSucc); + } + } + } + + RemoveUnusedInsns(bb, true); + /* Remove information about Operand's and Insn's in this block. */ + BackupOpndInfoList(saveLastOpndInfo); + BackupInsnInfoList(saveLastInsnInfo); + bbNum--; +} + +/* Perform EBO */ +void Ebo::EboProcess() { + FOR_ALL_BB(bb, cgFunc) { + if (IsNotVisited(*bb)) { + bbNum = 0; + AddBB2EB(*bb); + } + } +} + +/* Perform EBO on O1 which the optimization can only be in a single block. */ +void Ebo::EboProcessSingleBB() { + FOR_ALL_BB(bb, cgFunc) { + OpndInfo *saveLastOpndInfo = lastOpndInfo; + InsnInfo *saveLastInsnInfo = lastInsnInfo; + BuildAllInfo(*bb); + RemoveUnusedInsns(*bb, true); + /* Remove information about Operand's and Insn's in this block. */ + BackupOpndInfoList(saveLastOpndInfo); + BackupInsnInfoList(saveLastInsnInfo); + } +} + +void Ebo::EboInit() { + visitedBBs.resize(cgFunc->NumBBs()); + for (uint32 i = 0; i < cgFunc->NumBBs(); ++i) { + visitedBBs[i] = false; + } + exprInfoTable.resize(kEboMaxOpndHash); + for (uint32 i = 0; i < kEboMaxOpndHash; ++i) { + exprInfoTable.at(i) = nullptr; + } + insnInfoTable.resize(kEboMaxInsnHash); + for (uint32 i = 0; i < kEboMaxInsnHash; ++i) { + insnInfoTable.at(i) = nullptr; + } + if (!beforeRegAlloc) { + BuildCallerSaveRegisters(); + } + optSuccess = false; +} + +/* perform EB optimizations right after instruction selection. */ +void Ebo::Run() { + EboInit(); + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2) { + EboProcess(); + } else { + EboProcessSingleBB(); /* Perform SingleBB Optimization when -O1. */ + } + if (optSuccess && cgFunc->GetMirModule().IsCModule()) { + Run(); + } +} + +/* === new pm === */ +bool CgEbo0::PhaseRun(maplebe::CGFunc &f) { + if (EBO_DUMP_NEWPM) { + DotGenerator::GenerateDot("ebo0", f, f.GetMirModule()); + } + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + MemPool *eboMp = GetPhaseMemPool(); + Ebo *ebo = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ebo = eboMp->New(f, *eboMp, live, true, PhaseName()); +#endif +#if TARGARM32 + ebo = eboMp->New(f, *eboMp, live, true, "ebo0"); +#endif + ebo->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgEbo0::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgEbo0, ebo) + +bool CgEbo1::PhaseRun(maplebe::CGFunc &f) { + if (EBO_DUMP_NEWPM) { + DotGenerator::GenerateDot(PhaseName(), f, f.GetMirModule(), true); + } + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + MemPool *eboMp = GetPhaseMemPool(); + Ebo *ebo = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ebo = eboMp->New(f, *eboMp, live, true, PhaseName()); +#endif +#if TARGARM32 + ebo = eboMp->New(f, *eboMp, live, true, PhaseName()); +#endif + ebo->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgEbo1::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgEbo1, ebo1) + +bool CgPostEbo::PhaseRun(maplebe::CGFunc &f) { + if (EBO_DUMP_NEWPM) { + DotGenerator::GenerateDot(PhaseName(), f, f.GetMirModule()); + } + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + MemPool *eboMp = GetPhaseMemPool(); + Ebo *ebo = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ebo = eboMp->New(f, *eboMp, live, false, PhaseName()); +#endif +#if TARGARM32 + ebo = eboMp->New(f, *eboMp, live, false, PhaseName()); +#endif + ebo->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgPostEbo::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPostEbo, postebo) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/eh_func.cpp b/src/mapleall/maple_be/src/cg/eh_func.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d761e65c26e8aba3d5c39eea883a2c4091447768 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/eh_func.cpp @@ -0,0 +1,739 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "eh_func.h" +#include "cgfunc.h" +#include "cg.h" +#include "mir_builder.h" +#include "switch_lowerer.h" + +namespace maplebe { +using namespace maple; + +void EHFunc::CollectEHInformation(std::vector> &catchVec) { + MIRFunction &mirFunc = cgFunc->GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + CHECK_FATAL(mirModule != nullptr, "mirModule is nullptr in CGFunc::BuildEHFunc"); + BlockNode *blkNode = mirFunc.GetBody(); + CHECK_FATAL(blkNode != nullptr, "current function body is nullptr in CGFunc::BuildEHFunc"); + EHTry *lastTry = nullptr; /* record last try */ + /* + * curTry: record the current try wrapping the current statement, + * reset to null when meet a endtry + */ + EHTry *curTry = nullptr; + StmtNode *nextStmt = nullptr; + + /* collect all try-catch blocks */ + for (StmtNode *stmt = blkNode->GetFirst(); stmt != nullptr; stmt = nextStmt) { + nextStmt = stmt->GetNext(); + Opcode op = stmt->GetOpCode(); + switch (op) { + case OP_try: { + TryNode *tryNode = static_cast(stmt); + EHTry *ehTry = cgFunc->GetMemoryPool()->New(*(cgFunc->GetFuncScopeAllocator()), *tryNode); + lastTry = ehTry; + curTry = ehTry; + AddTry(*ehTry); + break; + } + case OP_endtry: { + ASSERT(lastTry != nullptr, "lastTry is nullptr when current node is endtry"); + lastTry->SetEndtryNode(*stmt); + lastTry = nullptr; + curTry = nullptr; + break; + } + case OP_catch: { + CatchNode *catchNode = static_cast(stmt); + ASSERT(stmt->GetPrev()->GetOpCode() == OP_label, "catch's previous node is not a label"); + LabelNode *labelStmt = static_cast(stmt->GetPrev()); + catchVec.emplace_back(std::pair(labelStmt->GetLabelIdx(), catchNode)); + /* rename the type of <*void> to <*Throwable> */ + for (uint32 i = 0; i < catchNode->Size(); i++) { + MIRType *ehType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(catchNode->GetExceptionTyIdxVecElement(i)); + ASSERT(ehType->GetKind() == kTypePointer, "ehType must be kTypePointer."); + MIRPtrType *ehPointedTy = static_cast(ehType); + if (ehPointedTy->GetPointedTyIdx() == static_cast(PTY_void)) { + ASSERT(mirModule->GetThrowableTyIdx() != 0, "throwable type id is 0"); + const MIRType *throwType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(mirModule->GetThrowableTyIdx()); + MIRType *pointerType = cgFunc->GetBecommon().BeGetOrCreatePointerType(*throwType); + catchNode->SetExceptionTyIdxVecElement(pointerType->GetTypeIndex(), i); + } + } + break; + } + case OP_throw: { + if (!cgFunc->GetCG()->GetCGOptions().GenerateExceptionHandlingCode() || + (cgFunc->GetCG()->IsExclusiveEH() && cgFunc->GetCG()->IsExclusiveFunc(mirFunc))) { + /* remove the statment */ + BlockNode *bodyNode = mirFunc.GetBody(); + bodyNode->RemoveStmt(stmt); + break; + } + UnaryStmtNode *throwNode = static_cast(stmt); + EHThrow *ehReThrow = cgFunc->GetMemoryPool()->New(*throwNode); + ehReThrow->SetJavaTry(curTry); + AddRethrow(*ehReThrow); + break; + } + case OP_block: + CHECK_FATAL(false, "should've lowered earlier"); + default: + break; + } + } +} + +void EHTry::DumpEHTry(const MIRModule &mirModule [[maybe_unused]]) { + if (tryNode != nullptr) { + tryNode->Dump(); + } + + if (endTryNode != nullptr) { + endTryNode->Dump(); + } + + for (const auto *currCatch : catchVec) { + if (currCatch == nullptr) { + continue; + } + currCatch->Dump(); + } +} + +void EHThrow::ConvertThrowToRuntime(CGFunc &cgFunc, BaseNode &arg) const { + MIRFunction &mirFunc = cgFunc.GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + MIRFunction *calleeFunc = mirModule->GetMIRBuilder()->GetOrCreateFunction( + "MCC_ThrowException", static_cast(PTY_void)); + cgFunc.GetBecommon().UpdateTypeTable(*calleeFunc->GetMIRFuncType()); + calleeFunc->SetNoReturn(); + MapleVector args(mirModule->GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(&arg); + CallNode *callAssign = mirModule->GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); + mirFunc.GetBody()->ReplaceStmt1WithStmt2(rethrow, callAssign); +} + +void EHThrow::ConvertThrowToRethrow(CGFunc &cgFunc) const { + MIRFunction &mirFunc = cgFunc.GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + MIRBuilder *mirBuilder = mirModule->GetMIRBuilder(); + MIRFunction *unFunc = mirBuilder->GetOrCreateFunction("MCC_RethrowException", static_cast(PTY_void)); + cgFunc.GetBecommon().UpdateTypeTable(*unFunc->GetMIRFuncType()); + unFunc->SetNoReturn(); + MapleVector args(mirBuilder->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(rethrow->Opnd(0)); + CallNode *callNode = mirBuilder->CreateStmtCall(unFunc->GetPuidx(), args); + mirFunc.GetBody()->ReplaceStmt1WithStmt2(rethrow, callNode); +} + +void EHThrow::Lower(CGFunc &cgFunc) { + BaseNode *opnd0 = rethrow->Opnd(0); + ASSERT(((opnd0->GetPrimType() == GetLoweredPtrType()) || (opnd0->GetPrimType() == PTY_ref)), + "except a dread of a pointer to get its type"); + MIRFunction &mirFunc = cgFunc.GetFunction(); + MIRModule *mirModule = mirFunc.GetModule(); + MIRBuilder *mirBuilder = mirModule->GetMIRBuilder(); + ASSERT(mirBuilder != nullptr, "get mirBuilder failed in EHThrow::Lower"); + MIRSymbol *mirSymbol = nullptr; + BaseNode *arg = nullptr; + MIRType *pstType = nullptr; + switch (opnd0->GetOpCode()) { + case OP_dread: { + DreadNode *drNode = static_cast(opnd0); + mirSymbol = mirFunc.GetLocalOrGlobalSymbol(drNode->GetStIdx()); + ASSERT(mirSymbol != nullptr, "get symbol failed in EHThrow::Lower"); + pstType = mirSymbol->GetType(); + arg = drNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_iread: { + IreadNode *irNode = static_cast(opnd0); + MIRPtrType *pointerTy = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(irNode->GetTyIdx())); + if (irNode->GetFieldID() != 0) { + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + MIRStructType *structTy = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structTy = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structTy = static_cast(pointedTy)->GetParentType(); + } + ASSERT(structTy != nullptr, "structTy is nullptr in EHThrow::Lower "); + pstType = structTy->GetFieldType(irNode->GetFieldID()); + } else { + pstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + } + arg = irNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_regread: { + RegreadNode *rrNode = static_cast(opnd0); + MIRPreg *pReg = mirFunc.GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx()); + ASSERT(pReg->GetPrimType() == GetLoweredPtrType(), "must be a pointer type"); + pstType = pReg->GetMIRType(); + arg = rrNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_retype: { + RetypeNode *retypeNode = static_cast(opnd0); + pstType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retypeNode->GetTyIdx()); + arg = retypeNode->CloneTree(mirModule->GetCurFuncCodeMPAllocator()); + break; + } + case OP_cvt: { + TypeCvtNode *cvtNode = static_cast(opnd0); + PrimType prmType = cvtNode->GetPrimType(); + // prmType supposed to be Pointer. + if ((prmType == PTY_ptr) || (prmType == PTY_ref) || (prmType == PTY_a32) || (prmType == PTY_a64)) { + ConvertThrowToRethrow(cgFunc); + } + return; + } + default: + ASSERT(false, " NYI throw something"); + } + CHECK_FATAL(pstType != nullptr, "pstType is null in EHThrow::Lower"); + if (pstType->GetKind() != kTypePointer) { + LogInfo::MapleLogger() << "Error in function " << mirFunc.GetName() << "\n"; + rethrow->Dump(); + LogInfo::MapleLogger() << "pstType is supposed to be Pointer, but is not"; + pstType->Dump(0); + CHECK_FATAL(false, "throw operand type kind must be kTypePointer"); + } + + MIRType *stType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(pstType)->GetPointedTyIdx()); + if (!IsUnderTry()) { + /* + * in this case the throw happens without a try...endtry wrapping it, need to generate lsda. + * insert 2 labels before and after throw + */ + LabelNode *throwBeginLbl = mirBuilder->CreateStmtLabel(mirBuilder->CreateLabIdx(mirFunc)); + LabelNode *throwEndLbl = mirBuilder->CreateStmtLabel(mirBuilder->CreateLabIdx(mirFunc)); + BlockNode *bodyNode = mirFunc.GetBody(); + bodyNode->InsertBefore(rethrow, throwBeginLbl); + bodyNode->InsertAfter(rethrow, throwEndLbl); + startLabel = throwBeginLbl; + endLabel = throwEndLbl; + } + + if (stType->GetKind() == kTypeClass) { + ConvertThrowToRuntime(cgFunc, *arg); + } else { + ConvertThrowToRethrow(cgFunc); + } +} + +EHFunc::EHFunc(CGFunc &func) + : cgFunc(&func), + tryVec(func.GetFuncScopeAllocator()->Adapter()), + ehTyTable(func.GetFuncScopeAllocator()->Adapter()), + ty2IndexTable(std::less(), func.GetFuncScopeAllocator()->Adapter()), + rethrowVec(func.GetFuncScopeAllocator()->Adapter()) {} + +EHFunc *CGFunc::BuildEHFunc() { + EHFunc *newEHFunc = GetMemoryPool()->New(*this); + SetEHFunc(*newEHFunc); + std::vector> catchVec; + newEHFunc->CollectEHInformation(catchVec); + newEHFunc->MergeCatchToTry(catchVec); + newEHFunc->BuildEHTypeTable(catchVec); + newEHFunc->InsertEHSwitchTable(); + newEHFunc->InsertCxaAfterEachCatch(catchVec); + newEHFunc->GenerateCleanupLabel(); + + GetBecommon().BeGetOrCreatePointerType(*GlobalTables::GetTypeTable().GetVoid()); + if (newEHFunc->NeedFullLSDA()) { + newEHFunc->CreateLSDA(); + } else if (newEHFunc->HasThrow()) { + newEHFunc->LowerThrow(); + } + if (GetCG()->GetCGOptions().GenerateExceptionHandlingCode()) { + newEHFunc->CreateTypeInfoSt(); + } + + return newEHFunc; +} + +bool EHFunc::NeedFullLSDA() const { + if (cgFunc->GetFunction().IsJava()) { + return HasTry(); + } else { + return false; + } +} + +bool EHFunc::NeedFastLSDA() const { + if (cgFunc->GetFunction().IsJava()) { + return !HasTry(); + } else { + return false; + } +} + +bool EHFunc::HasTry() const { + return !tryVec.empty(); +} + +void EHFunc::CreateTypeInfoSt() const { + MIRFunction &mirFunc = cgFunc->GetFunction(); + bool ctorDefined = false; + if (mirFunc.GetAttr(FUNCATTR_constructor) && !mirFunc.GetAttr(FUNCATTR_static) && (mirFunc.GetBody() != nullptr)) { + ctorDefined = true; + } + + if (!ctorDefined) { + return; + } + + const auto *classType = static_cast(mirFunc.GetClassType()); + if (cgFunc->GetMirModule().IsCModule() && classType == nullptr) { + return; + } + ASSERT(classType != nullptr, ""); + if (classType->GetMethods().empty() && (classType->GetFieldsSize() == 0)) { + return; + } + + if (classType->GetExceptionRootType() == nullptr) { + return; /* not a exception type */ + } +} + +void EHFunc::LowerThrow() { + MIRFunction &mirFunc = cgFunc->GetFunction(); + /* just lower without building LSDA */ + for (EHThrow *rethrow : rethrowVec) { + BaseNode *opnd0 = rethrow->GetRethrow()->Opnd(0); + /* except a dread of a point to get its type */ + switch (opnd0->GetOpCode()) { + case OP_retype: { + RetypeNode *retypeNode = static_cast(opnd0); + ASSERT(GlobalTables::GetTypeTable().GetTypeFromTyIdx(retypeNode->GetTyIdx())->GetKind() == kTypePointer, + "expecting a pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, *retypeNode->CloneTree( + mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_dread: { + DreadNode *drNode = static_cast(opnd0); + ASSERT(mirFunc.GetLocalOrGlobalSymbol(drNode->GetStIdx())->GetType()->GetKind() == kTypePointer, + "expect pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, *drNode->CloneTree( + mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_iread: { + IreadNode *irNode = static_cast(opnd0); + MIRPtrType *receiverPtrType = nullptr; + if (irNode->GetFieldID() != 0) { + MIRPtrType *pointerTy = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(irNode->GetTyIdx())); + MIRType *pointedTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerTy->GetPointedTyIdx()); + MIRStructType *structTy = nullptr; + if (pointedTy->GetKind() != kTypeJArray) { + structTy = static_cast(pointedTy); + } else { + /* it's a Jarray type. using it's parent's field info: java.lang.Object */ + structTy = static_cast(pointedTy)->GetParentType(); + } + ASSERT(structTy != nullptr, "structTy is nullptr in EHFunc::LowerThrow"); + receiverPtrType = + static_cast(structTy->GetFieldType(irNode->GetFieldID())); + } else { + receiverPtrType = + static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(irNode->GetTyIdx())); + receiverPtrType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(receiverPtrType->GetPointedTyIdx())); + } + ASSERT(receiverPtrType->GetKind() == kTypePointer, "expecting a pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, *irNode->CloneTree( + mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_regread: { + RegreadNode *rrNode = static_cast(opnd0); + ASSERT(mirFunc.GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx())->GetPrimType() == GetLoweredPtrType(), + "expect GetLoweredPtrType()"); + ASSERT(mirFunc.GetPregTab()->PregFromPregIdx(rrNode->GetRegIdx())->GetMIRType()->GetKind() == kTypePointer, + "expect pointer type"); + rethrow->ConvertThrowToRuntime(*cgFunc, *rrNode->CloneTree( + mirFunc.GetModule()->GetCurFuncCodeMPAllocator())); + break; + } + case OP_constval: { + ConstvalNode *constValNode = static_cast(opnd0); + BaseNode *newNode = constValNode->CloneTree(mirFunc.GetModule()->GetCurFuncCodeMPAllocator()); + ASSERT(newNode != nullptr, "nullptr check"); + rethrow->ConvertThrowToRuntime(*cgFunc, *newNode); + break; + } + case OP_cvt: { + TypeCvtNode *cvtNode = static_cast(opnd0); + PrimType prmType = cvtNode->GetPrimType(); + // prmType supposed to be Pointer. + if ((prmType == PTY_ptr) || (prmType == PTY_ref) || (prmType == PTY_a32) || (prmType == PTY_a64)) { + BaseNode *newNode = cvtNode->CloneTree(mirFunc.GetModule()->GetCurFuncCodeMPAllocator()); + rethrow->ConvertThrowToRuntime(*cgFunc, *newNode); + } + break; + } + default: + ASSERT(false, "unexpected or NYI"); + } + } +} + +/* + * merge catch to try + */ +void EHFunc::MergeCatchToTry(const std::vector> &catchVec) { + size_t tryOffsetCount; + for (auto *ehTry : tryVec) { + tryOffsetCount = ehTry->GetTryNode()->GetOffsetsCount(); + for (size_t i = 0; i < tryOffsetCount; i++) { + auto o = ehTry->GetTryNode()->GetOffset(i); + for (const auto &catchVecPair : catchVec) { + LabelIdx lbIdx = catchVecPair.first; + if (lbIdx == o) { + ehTry->PushBackCatchVec(*catchVecPair.second); + break; + } + } + } + CHECK_FATAL(ehTry->GetCatchVecSize() == tryOffsetCount, "EHTry instance offset does not equal catch node amount."); + } +} + +/* catchvec is going to be released by the caller */ +void EHFunc::BuildEHTypeTable(const std::vector> &catchVec) { + if (!catchVec.empty()) { + /* the first one assume to be <*void> */ + TyIdx voidTyIdx(PTY_void); + ehTyTable.emplace_back(voidTyIdx); + ty2IndexTable[voidTyIdx] = 0; + /* create void pointer and update becommon's size table */ + cgFunc->GetBecommon().UpdateTypeTable(*GlobalTables::GetTypeTable().GetVoidPtr()); + } + + /* create the type table for this function, just iterate each catch */ + CatchNode *jCatchNode = nullptr; + size_t catchNodeSize; + for (const auto &catchVecPair : catchVec) { + jCatchNode = catchVecPair.second; + catchNodeSize = jCatchNode->Size(); + for (size_t i = 0; i < catchNodeSize; i++) { + MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(jCatchNode->GetExceptionTyIdxVecElement(i)); + ASSERT(mirTy->GetKind() == kTypePointer, "mirTy is not pointer type"); + TyIdx ehTyIdx = static_cast(mirTy)->GetPointedTyIdx(); + if (ty2IndexTable.find(ehTyIdx) != ty2IndexTable.end()) { + continue; + } + + ty2IndexTable[ehTyIdx] = ehTyTable.size(); + ehTyTable.emplace_back(ehTyIdx); + MIRClassType *catchType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(ehTyIdx)); + MIRClassType *rootType = catchType->GetExceptionRootType(); + if (rootType == nullptr) { + rootType = static_cast(GlobalTables::GetTypeTable().GetOrCreateClassType( + "Ljava_2Flang_2FThrowable_3B", *GlobalTables::GetGsymTable().GetModule())); + catchType->SetParentTyIdx(rootType->GetTypeIndex()); + } + } + } +} + +void EHFunc::DumpEHFunc() const { + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + for (uint32 i = 0; i < this->tryVec.size(); i++) { + LogInfo::MapleLogger() << "\n========== start " << i << " th eh:\n"; + EHTry *ehTry = tryVec[i]; + ehTry->DumpEHTry(mirModule); + LogInfo::MapleLogger() << "========== end " << i << " th eh =========\n"; + } + + LogInfo::MapleLogger() << "\n========== start LSDA type table ========\n"; + for (uint32 i = 0; i < this->ehTyTable.size(); i++) { + LogInfo::MapleLogger() << i << " vector to "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(ehTyTable[i])->Dump(0); + LogInfo::MapleLogger() << "\n"; + } + LogInfo::MapleLogger() << "========== end LSDA type table ========\n"; + + LogInfo::MapleLogger() << "\n========== start type-index map ========\n"; + for (const auto &ty2indexTablePair : ty2IndexTable) { + GlobalTables::GetTypeTable().GetTypeFromTyIdx(ty2indexTablePair.first)->Dump(0); + LogInfo::MapleLogger() << " map to "; + LogInfo::MapleLogger() << ty2indexTablePair.second << "\n"; + } + LogInfo::MapleLogger() << "========== end type-index map ========\n"; +} + +/* + * cleanup_label is an LabelNode, and placed just before endLabel. + * cleanup_label is the first statement of cleanupbb. + * the layout of clean up code is: + * //return bb + * ... + * //cleanup bb = lastbb->prev; cleanupbb->PrependBB(retbb) + * cleanup_label: + * ... + * //lastbb + * endLabel: + * .cfi_endproc + * .Label.xx.end: + * .size + */ +void EHFunc::GenerateCleanupLabel() { + MIRModule *mirModule = cgFunc->GetFunction().GetModule(); + cgFunc->SetCleanupLabel(*mirModule->GetMIRBuilder()->CreateStmtLabel(CreateLabel(".LCLEANUP"))); + BlockNode *blockNode = cgFunc->GetFunction().GetBody(); + blockNode->InsertBefore(cgFunc->GetEndLabel(), cgFunc->GetCleanupLabel()); +} + +void EHFunc::InsertDefaultLabelAndAbortFunc(BlockNode &blkNode, SwitchNode &switchNode, + const StmtNode &beforeEndLabel) const { + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + LabelIdx dfLabIdx = cgFunc->GetFunction().GetLabelTab()->CreateLabel(); + cgFunc->GetFunction().GetLabelTab()->AddToStringLabelMap(dfLabIdx); + StmtNode *dfLabStmt = mirModule.GetMIRBuilder()->CreateStmtLabel(dfLabIdx); + blkNode.InsertAfter(&beforeEndLabel, dfLabStmt); + MIRFunction *calleeFunc = mirModule.GetMIRBuilder()->GetOrCreateFunction("abort", static_cast(PTY_void)); + calleeFunc->SetAttr(FUNCATTR_noreturn); + cgFunc->GetBecommon().UpdateTypeTable(*calleeFunc->GetMIRFuncType()); + MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + CallNode *callExit = mirModule.GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); + blkNode.InsertAfter(dfLabStmt, callExit); + switchNode.SetDefaultLabel(dfLabIdx); +} + +void EHFunc::FillSwitchTable(SwitchNode &switchNode, const EHTry &ehTry) { + CatchNode *catchNode = nullptr; + MIRType *exceptionType = nullptr; + MIRPtrType *ptType = nullptr; + size_t catchVecSize = ehTry.GetCatchVecSize(); + /* update switch node's cases */ + for (size_t i = 0; i < catchVecSize; i++) { + catchNode = ehTry.GetCatchNodeAt(i); + for (size_t j = 0; j < catchNode->Size(); j++) { + exceptionType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(catchNode->GetExceptionTyIdxVecElement(j)); + ptType = static_cast(exceptionType); + MapleMap::iterator ty2IdxIt = ty2IndexTable.find(ptType->GetPointedTyIdx()); + ASSERT(ty2IdxIt != ty2IndexTable.end(), "find tyIdx failed!"); + uint32 tableIdx = ty2IdxIt->second; + LabelNode *catchLabelNode = static_cast(catchNode->GetPrev()); + CasePair p(tableIdx, catchLabelNode->GetLabelIdx()); + bool inserted = false; + for (auto x : switchNode.GetSwitchTable()) { + if (x == p) { + inserted = true; + break; + } + } + if (!inserted) { + switchNode.InsertCasePair(p); + } + } + } +} + +/* this is also the landing pad code. */ +void EHFunc::InsertEHSwitchTable() { + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + BlockNode *blockNode = cgFunc->GetFunction().GetBody(); + CHECK_FATAL(blockNode != nullptr, "get function body failed in EHThrow::InsertEHSwitchTable"); + StmtNode *endLabelPrevNode = nullptr; + SwitchNode *switchNode = nullptr; + for (auto *ehTry : tryVec) { + endLabelPrevNode = cgFunc->GetEndLabel()->GetPrev(); + /* + * get the next statement of the trynode. when no throw happend in try block, jump to the statement directly + * create a switch statement and insert after tryend; + */ + switchNode = mirModule.CurFuncCodeMemPool()->New(mirModule); + /* create a new label as default, and if program excute here, error it */ + InsertDefaultLabelAndAbortFunc(*blockNode, *switchNode, *endLabelPrevNode); + /* create s special symbol that use the second return of __builtin_eh_return() */ + MIRSymbol *mirSymbol = mirModule.GetMIRBuilder()->CreateSymbol(TyIdx(PTY_i32), "__eh_index__", kStVar, kScAuto, + &cgFunc->GetFunction(), kScopeLocal); + switchNode->SetSwitchOpnd(mirModule.GetMIRBuilder()->CreateExprDread(*mirSymbol)); + FillSwitchTable(*switchNode, *ehTry); + SwitchLowerer switchLower(mirModule, *switchNode, *cgFunc->GetFuncScopeAllocator()); + blockNode->InsertBlockAfter(*switchLower.LowerSwitch(), endLabelPrevNode); + ehTry->SetFallthruGoto(endLabelPrevNode->GetNext()); + } + if (!CGOptions::IsQuiet()) { + cgFunc->GetFunction().Dump(); + } +} + +LabelIdx EHFunc::CreateLabel(const std::string &cstr) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + CHECK_FATAL(mirSymbol != nullptr, "get function symbol failed in EHFunc::CreateLabel"); + std::string funcName = mirSymbol->GetName(); + std::string labStr = funcName.append(cstr).append(std::to_string(labelIdx++)); + return cgFunc->GetFunction().GetOrCreateLableIdxFromName(labStr); +} + +/* think about moving this to BELowerer where LowerThrownval is already written */ +void EHFunc::InsertCxaAfterEachCatch(const std::vector> &catchVec) const { + MIRModule &mirModule = *cgFunc->GetFunction().GetModule(); + BlockNode *funcBody = cgFunc->GetFunction().GetBody(); + CatchNode *jCatchNode = nullptr; + TyIdx voidPTy = GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(); + for (const auto &catchVecPair : catchVec) { + jCatchNode = catchVecPair.second; + MIRFunction *calleeFunc = mirModule.GetMIRBuilder()->GetOrCreateFunction("MCC_JavaBeginCatch", voidPTy); + cgFunc->GetBecommon().UpdateTypeTable(*calleeFunc->GetMIRFuncType()); + RegreadNode *retRegRead0 = mirModule.CurFuncCodeMemPool()->New(); + retRegRead0->SetRegIdx(-kSregRetval0); + retRegRead0->SetPrimType(GetLoweredPtrType()); + MapleVector args(mirModule.GetMIRBuilder()->GetCurrentFuncCodeMpAllocator()->Adapter()); + args.emplace_back(retRegRead0); + CallNode *callAssign = mirModule.GetMIRBuilder()->CreateStmtCall(calleeFunc->GetPuidx(), args); + funcBody->InsertAfter(jCatchNode, callAssign); + } +} + +void EHFunc::CreateLSDAHeader() { + constexpr uint8 startEncoding = 0xff; + constexpr uint8 typeEncoding = 0x9b; + constexpr uint8 callSiteEncoding = 0x1; + MIRBuilder *mirBuilder = cgFunc->GetFunction().GetModule()->GetMIRBuilder(); + + LSDAHeader *lsdaHeaders = cgFunc->GetMemoryPool()->New(); + LabelIdx lsdaHdLblIdx = CreateLabel("LSDAHD"); /* LSDA head */ + LabelNode *lsdaHdLblNode = mirBuilder->CreateStmtLabel(lsdaHdLblIdx); + lsdaHeaders->SetLSDALabel(*lsdaHdLblNode); + + LabelIdx lsdaTTStartIdx = CreateLabel("LSDAALLS"); /* LSDA all start; */ + LabelNode *lsdaTTLblNode = mirBuilder->CreateStmtLabel(lsdaTTStartIdx); + LabelIdx lsdaTTEndIdx = CreateLabel("LSDAALLE"); /* LSDA all end; */ + LabelNode *lsdaCSTELblNode = mirBuilder->CreateStmtLabel(lsdaTTEndIdx); + lsdaHeaders->SetTTypeOffset(lsdaTTLblNode, lsdaCSTELblNode); + + lsdaHeaders->SetLPStartEncoding(startEncoding); + lsdaHeaders->SetTTypeEncoding(typeEncoding); + lsdaHeaders->SetCallSiteEncoding(callSiteEncoding); + lsdaHeader = lsdaHeaders; +} + +void EHFunc::FillLSDACallSiteTable() { + constexpr uint8 callSiteFirstAction = 0x1; + MIRBuilder *mirBuilder = cgFunc->GetFunction().GetModule()->GetMIRBuilder(); + BlockNode *bodyNode = cgFunc->GetFunction().GetBody(); + + lsdaCallSiteTable = cgFunc->GetMemoryPool()->New(*cgFunc->GetFuncScopeAllocator()); + LabelIdx lsdaCSTStartIdx = CreateLabel("LSDACSTS"); /* LSDA callsite table start; */ + LabelNode *lsdaCSTStartLabel = mirBuilder->CreateStmtLabel(lsdaCSTStartIdx); + LabelIdx lsdaCSTEndIdx = CreateLabel("LSDACSTE"); /* LSDA callsite table end; */ + LabelNode *lsdaCSTEndLabel = mirBuilder->CreateStmtLabel(lsdaCSTEndIdx); + lsdaCallSiteTable->SetCSTable(lsdaCSTStartLabel, lsdaCSTEndLabel); + + /* create LDSACallSite for each EHTry instance */ + for (auto *ehTry : tryVec) { + ASSERT(ehTry != nullptr, "null ptr check"); + /* replace try with a label which is the callsite_start */ + LabelIdx csStartLblIdx = CreateLabel("LSDACS"); + LabelNode *csLblNode = mirBuilder->CreateStmtLabel(csStartLblIdx); + LabelIdx csEndLblIdx = CreateLabel("LSDACE"); + LabelNode *ceLblNode = mirBuilder->CreateStmtLabel(csEndLblIdx); + TryNode *tryNode = ehTry->GetTryNode(); + bodyNode->ReplaceStmt1WithStmt2(tryNode, csLblNode); + StmtNode *endTryNode = ehTry->GetEndtryNode(); + bodyNode->ReplaceStmt1WithStmt2(endTryNode, ceLblNode); + + LabelNode *ladpadEndLabel = nullptr; + if (ehTry->GetFallthruGoto()) { + ladpadEndLabel = mirBuilder->CreateStmtLabel(CreateLabel("LSDALPE")); + bodyNode->InsertBefore(ehTry->GetFallthruGoto(), ladpadEndLabel); + } else { + ladpadEndLabel = ceLblNode; + } + /* When there is only one catch, the exception table is optimized. */ + if (ehTry->GetCatchVecSize() == 1) { + ladpadEndLabel = static_cast(ehTry->GetCatchNodeAt(0)->GetPrev()); + } + + LSDACallSite *lsdaCallSite = cgFunc->GetMemoryPool()->New(); + LabelPair csStart(cgFunc->GetStartLabel(), csLblNode); + LabelPair csLength(csLblNode, ceLblNode); + LabelPair csLandingPad(cgFunc->GetStartLabel(), ladpadEndLabel); + lsdaCallSite->Init(csStart, csLength, csLandingPad, callSiteFirstAction); + ehTry->SetLSDACallSite(*lsdaCallSite); + lsdaCallSiteTable->PushBack(*lsdaCallSite); + } +} + +void EHFunc::CreateLSDA() { + constexpr uint8 callSiteCleanUpAction = 0x0; + /* create header */ + CreateLSDAHeader(); + /* create and fill callsite table */ + FillLSDACallSiteTable(); + + for (auto *rethrow : rethrowVec) { + ASSERT(rethrow != nullptr, "null ptr check"); + /* replace throw (void * obj) with call __java_rethrow and unwind resume */ + rethrow->Lower(*cgFunc); + if (rethrow->HasLSDA()) { + LSDACallSite *lsdaCallSite = cgFunc->GetMemoryPool()->New(); + LabelPair csStart(cgFunc->GetStartLabel(), rethrow->GetStartLabel()); + LabelPair csLength(rethrow->GetStartLabel(), rethrow->GetEndLabel()); + LabelPair csLandingPad(nullptr, nullptr); + lsdaCallSite->Init(csStart, csLength, csLandingPad, callSiteCleanUpAction); + lsdaCallSiteTable->PushBack(*lsdaCallSite); + } + } + + /* LSDAAction table */ + CreateLSDAAction(); +} + +void EHFunc::CreateLSDAAction() { + constexpr uint8 actionTableNextEncoding = 0x7d; + /* iterate each try and its corresponding catch */ + LSDAActionTable *actionTable = cgFunc->GetMemoryPool()->New(*cgFunc->GetFuncScopeAllocator()); + lsdaActionTable = actionTable; + + for (auto *ehTry : tryVec) { + LSDAAction *lastAction = nullptr; + for (int32 j = static_cast(ehTry->GetCatchVecSize()) - 1; j >= 0; --j) { + CatchNode *catchNode = ehTry->GetCatchNodeAt(j); + ASSERT(catchNode != nullptr, "null ptr check"); + for (uint32 idx = 0; idx < catchNode->Size(); ++idx) { + MIRPtrType *ptType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(catchNode->GetExceptionTyIdxVecElement(idx))); + uint32 tyIndex = ty2IndexTable[ptType->GetPointedTyIdx()]; /* get the index of ptType of ehTyTable; */ + ASSERT(tyIndex != 0, "exception type index not allow equal zero"); + LSDAAction *lsdaAction = + cgFunc->GetMemoryPool()->New(tyIndex, lastAction == nullptr ? 0 : actionTableNextEncoding); + lastAction = lsdaAction; + actionTable->PushBack(*lsdaAction); + } + } + + /* record actionTable group offset, per LSDAAction object in actionTable occupy 2 bytes */ + ehTry->SetCSAction((actionTable->Size() - 1) * 2 + 1); + } +} + +bool CgBuildEHFunc::PhaseRun(maplebe::CGFunc &f) { + f.BuildEHFunc(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgBuildEHFunc, buildehfunc) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/emit.cpp b/src/mapleall/maple_be/src/cg/emit.cpp new file mode 100644 index 0000000000000000000000000000000000000000..53c7a1ba050c3999790d871b17e27f9f65987b0c --- /dev/null +++ b/src/mapleall/maple_be/src/cg/emit.cpp @@ -0,0 +1,3723 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "emit.h" +#include +#ifdef _WIN32 +#include +#endif +#include "reflection_analysis.h" +#include "muid_replacement.h" +#include "metadata_layout.h" +#include "string_utils.h" +using namespace namemangler; + +namespace { +using namespace maple; +constexpr uint32 kSizeOfHugesoRoutine = 3; +constexpr uint32 kFromDefIndexMask32Mod = 0x40000000; + +int32 GetPrimitiveTypeSize(const std::string &name) { + if (name.length() != 1) { + return -1; + } + char typeName = name[0]; + switch (typeName) { + case 'Z': + return static_cast(GetPrimTypeSize(PTY_u1)); + case 'B': + return static_cast(GetPrimTypeSize(PTY_i8)); + case 'S': + return static_cast(GetPrimTypeSize(PTY_i16)); + case 'C': + return static_cast(GetPrimTypeSize(PTY_u16)); + case 'I': + return static_cast(GetPrimTypeSize(PTY_i32)); + case 'J': + return static_cast(GetPrimTypeSize(PTY_i64)); + case 'F': + return static_cast(GetPrimTypeSize(PTY_f32)); + case 'D': + return static_cast(GetPrimTypeSize(PTY_f64)); + case 'V': + return static_cast(GetPrimTypeSize(PTY_void)); + default: + return -1; + } +} +DBGDieAttr *LFindAttribute(MapleVector &vec, DwAt key) { + for (DBGDieAttr *at : vec) + if (at->GetDwAt() == key) { + return at; + } + return nullptr; +} + +DBGAbbrevEntry *LFindAbbrevEntry(MapleVector &abbvec, unsigned int key) { + for (DBGAbbrevEntry *daie : abbvec) { + if (!daie) { + continue; + } + if (daie->GetAbbrevId() == key) { + return daie; + } + } + ASSERT(0, ""); + return nullptr; +} + +bool LShouldEmit(unsigned int dwform) { + return dwform != DW_FORM_flag_present; +} + +DBGDie *LFindChildDieWithName(DBGDie *die, DwTag tag, const GStrIdx key) { + for (DBGDie *c : die->GetSubDieVec()) { + if (c->GetTag() == tag) { + for (DBGDieAttr *a : c->GetAttrVec()) { + if (a->GetDwAt() == DW_AT_name) { + if ((a->GetDwForm() == DW_FORM_string || a->GetDwForm() == DW_FORM_strp) && a->GetId() == key.GetIdx()) { + return c; + } else { + break; + } + } + } + } + } + return nullptr; +} + +DBGDieAttr *LFindDieAttr(DBGDie *die, DwAt attrname) { + for (DBGDieAttr *attr : die->GetAttrVec()) { + if (attr->GetDwAt() == attrname) { + return attr; + } + } + return nullptr; +} + +static void LUpdateAttrValue(DBGDieAttr *attr, int64_t newval) { + attr->SetI(int32_t(newval)); +} +} + +namespace maplebe { +using namespace maple; +using namespace cfi; + +void Emitter::EmitLabelRef(LabelIdx labIdx) { + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + char *idx = strdup(std::to_string(pIdx).c_str()); + outStream << ".L." << idx << "__" << labIdx; + free(idx); + idx = nullptr; +} + +void Emitter::EmitStmtLabel(LabelIdx labIdx) { + EmitLabelRef(labIdx); + outStream << ":\n"; +} + +void Emitter::EmitLabelPair(const LabelPair &pairLabel) { + ASSERT(pairLabel.GetEndOffset() || pairLabel.GetStartOffset(), "NYI"); + EmitLabelRef(pairLabel.GetEndOffset()->GetLabelIdx()); + outStream << " - "; + EmitLabelRef(pairLabel.GetStartOffset()->GetLabelIdx()); + outStream << "\n"; +} + +void Emitter::EmitLabelForFunc(const MIRFunction *func, LabelIdx labIdx) { + char *idx = strdup(std::to_string(func->GetPuidx()).c_str()); + outStream << ".L." << idx << "__" << labIdx; + free(idx); + idx = nullptr; +} + +AsmLabel Emitter::GetTypeAsmInfoName(PrimType primType) const { + uint32 size = GetPrimTypeSize(primType); + /* case x : x occupies bytes of pty */ + switch (size) { + case k1ByteSize: + return kAsmByte; + case k2ByteSize: +#if TARGAARCH64 || TARGRISCV64 + return kAsmShort; +#else + return kAsmValue; +#endif + case k4ByteSize: + return kAsmLong; + case k8ByteSize: + return kAsmQuad; + default: + ASSERT(false, "NYI"); + break; + } + return kAsmLong; +} + +void Emitter::EmitFileInfo(const std::string &fileName) { +#if defined(_WIN32) || defined(DARWIN) + char *curDirName = getcwd(nullptr, 0); +#else + char *curDirName = get_current_dir_name(); +#endif + CHECK_FATAL(curDirName != nullptr, "null ptr check "); + Emit(asmInfo->GetCmnt()); + std::string path(curDirName); +#ifdef _WIN32 + std::string cgFile(path.append("\\mplcg")); +#else + std::string cgFile(path.append("/mplcg")); +#endif + Emit(cgFile); + Emit("\n"); + + std::string compile("Compiling "); + Emit(asmInfo->GetCmnt()); + Emit(compile); + Emit("\n"); + + std::string beOptions("Be options"); + Emit(asmInfo->GetCmnt()); + Emit(beOptions); + Emit("\n"); + + path = curDirName; + path.append("/").append(fileName); + /* strip path before out/ */ + std::string out = "/out/"; + size_t pos = path.find(out.c_str(), 0, out.length()); + if (pos != std::string::npos) { + path.erase(0, pos + 1); + } + std::string irFile("\""); + irFile.append(path).append("\""); + Emit(asmInfo->GetFile()); + Emit(irFile); + Emit("\n"); + + /* save directory path in index 8 */ + SetFileMapValue(0, path); + + /* .file #num src_file_name */ + if (cg->GetCGOptions().WithLoc()) { + /* .file 1 mpl_file_name */ + if (cg->GetCGOptions().WithAsm()) { + Emit("\t// "); + } + Emit(asmInfo->GetFile()); + Emit("1 "); + Emit(irFile); + Emit("\n"); + SetFileMapValue(1, irFile); /* save ir file in 1 */ + if (cg->GetCGOptions().WithSrc()) { + /* insert a list of src files */ + uint32 i = 2; + for (auto it : cg->GetMIRModule()->GetSrcFileInfo()) { + if (cg->GetCGOptions().WithAsm()) { + Emit("\t// "); + } + Emit(asmInfo->GetFile()); + Emit(it.second).Emit(" \""); + std::string kStr = GlobalTables::GetStrTable().GetStringFromStrIdx(it.first); + Emit(kStr); + Emit("\"\n"); + SetFileMapValue(i++, kStr); + } + } + } + free(curDirName); + + EmitInlineAsmSection(); +#if TARGARM32 + Emit("\t.syntax unified\n"); + /* + * "The arm instruction set is a subset of + * the most commonly used 32-bit ARM instructions." + * http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0210c/CACBCAAE.html + */ + Emit("\t.arm\n"); + Emit("\t.fpu vfpv4\n"); + Emit("\t.arch armv7-a\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_RW_data, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_RO_data, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_GOT_use, 2\n"); + if (CGOptions::GetABIType() == CGOptions::kABIHard) { + Emit("\t.eabi_attribute Tag_ABI_VFP_args, 1\n"); + } + Emit("\t.eabi_attribute Tag_ABI_FP_denormal, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_FP_exceptions, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_FP_number_model, 3\n"); + Emit("\t.eabi_attribute Tag_ABI_align_needed, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_align_preserved, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_enum_size, 2\n"); + Emit("\t.eabi_attribute 30, 6\n"); + Emit("\t.eabi_attribute Tag_CPU_unaligned_access, 1\n"); + Emit("\t.eabi_attribute Tag_ABI_PCS_wchar_t, 4\n"); +#endif /* TARGARM32 */ +} + +void Emitter::EmitInlineAsmSection() { + MapleVector &asmSections = cg->GetMIRModule()->GetAsmDecls(); + if (!asmSections.empty()) { + Emit("#APP\n"); + for (auto &singleSection : asmSections) { + Emit("\t"); + Emit(singleSection); + Emit("\n"); + } + Emit("#NO_APP\n"); + } +} +void Emitter::EmitAsmLabel(AsmLabel label) { + switch (label) { + case kAsmData: { + (void)Emit(asmInfo->GetData()); + (void)Emit("\n"); + return; + } + case kAsmText: { + (void)Emit(asmInfo->GetText()); + (void)Emit("\n"); + return; + } + case kAsmType: { + (void)Emit(asmInfo->GetType()); + return; + } + case kAsmByte: { + (void)Emit(asmInfo->GetByte()); + return; + } + case kAsmShort: { + (void)Emit(asmInfo->GetShort()); + return; + } + case kAsmValue: { + (void)Emit(asmInfo->GetValue()); + return; + } + case kAsmLong: { + (void)Emit(asmInfo->GetLong()); + return; + } + case kAsmQuad: { + (void)Emit(asmInfo->GetQuad()); + return; + } + case kAsmZero: + (void)Emit(asmInfo->GetZero()); + return; + default: + ASSERT(false, "should not run here"); + return; + } +} + +void Emitter::EmitAsmLabel(const MIRSymbol &mirSymbol, AsmLabel label) { + MIRType *mirType = mirSymbol.GetType(); + std::string symName; + if (mirSymbol.GetStorageClass() == kScPstatic && mirSymbol.IsLocal()) { + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + symName = mirSymbol.GetName() + std::to_string(pIdx); + } else { + symName = mirSymbol.GetName(); + } + if (mirSymbol.GetAsmAttr() != UStrIdx(0) && + (mirSymbol.GetStorageClass() == kScPstatic || mirSymbol.GetStorageClass() == kScPstatic)) { + std::string asmSection = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirSymbol.GetAsmAttr()); + symName = asmSection; + } + if (Globals::GetInstance()->GetBECommon()->IsEmptyOfTypeAlignTable()) { + ASSERT(false, "container empty check"); + } + + switch (label) { + case kAsmGlbl: { + Emit(asmInfo->GetGlobal()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmHidden: { + Emit(asmInfo->GetHidden()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmLocal: { + Emit(asmInfo->GetLocal()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmWeak: { + Emit(asmInfo->GetWeak()); + Emit(symName); + Emit("\n"); + return; + } + case kAsmZero: { + uint64 size = Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()); + EmitNullConstant(size); + return; + } + case kAsmComm: { + std::string size; + if (isFlexibleArray) { + size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()) + arraySize); + } else { + size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())); + } + (void)Emit(asmInfo->GetComm()).Emit(symName).Emit(", ").Emit(size).Emit(", "); +#if PECOFF +#if TARGARM || TARGAARCH64 || TARGARK || TARGRISCV64 + std::string align = std::to_string( + static_cast(log2(Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex())))); +#else + std::string align = std::to_string( + Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex())); +#endif + emit(align.c_str()); +#else /* ELF */ + /* output align, symbol name begin with "classInitProtectRegion" align is 4096 */ + MIRTypeKind kind = mirSymbol.GetType()->GetKind(); + MIRStorageClass storage = mirSymbol.GetStorageClass(); + if (symName.find("classInitProtectRegion") == 0) { + Emit(4096); + } else if (((kind == kTypeStruct) || (kind == kTypeClass) || (kind == kTypeArray) || (kind == kTypeUnion)) && + ((storage == kScGlobal) || (storage == kScPstatic) || (storage == kScFstatic))) { + int32 align = Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex()); + if (GetPointerSize() < align) { + (void)Emit(std::to_string(align)); + } else { + (void)Emit(std::to_string(k8ByteSize)); + } + } else { + (void)Emit(std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirType->GetTypeIndex()))); + } +#endif + Emit("\n"); + return; + } + case kAsmAlign: { + uint8 align = mirSymbol.GetAttrs().GetAlignValue(); + if (align == 0) { + if (mirSymbol.GetType()->GetKind() == kTypeStruct || + mirSymbol.GetType()->GetKind() == kTypeClass || + mirSymbol.GetType()->GetKind() == kTypeArray || + mirSymbol.GetType()->GetKind() == kTypeUnion) { +#if TARGX86 || TARGX86_64 + return; +#else + align = kAlignOfU8; +#endif + } else { + align = Globals::GetInstance()->GetBECommon()->GetTypeAlign(mirSymbol.GetType()->GetTypeIndex()); +#if TARGARM32 || TARGAARCH64 || TARGARK || TARGRISCV64 + if (CGOptions::IsArm64ilp32() && mirSymbol.GetType()->GetPrimType() == PTY_a32) { + align = kAlignOfU8; + } else { + align = static_cast(log2(align)); + } +#endif + } + } + Emit(asmInfo->GetAlign()); + Emit(std::to_string(align)); + Emit("\n"); + return; + } + case kAsmSyname: { + Emit(symName); + Emit(":\n"); + return; + } + case kAsmSize: { + Emit(asmInfo->GetSize()); + Emit(symName); + Emit(", "); +#if TARGX86 || TARGX86_64 + Emit(".-"); + Emit(symName); +#else + std::string size; + if (isFlexibleArray) { + size = std::to_string( + Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()) + arraySize); + } else { + size = std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())); + } + Emit(size); +#endif + Emit("\n"); + return; + } + case kAsmType: { + Emit(asmInfo->GetType()); + if (GetCG()->GetMIRModule()->IsCModule() && (symName == "sys_nerr" || symName == "sys_errlist")) { + /* eliminate warning from deprecated C name */ + Emit("strerror"); + } else { + Emit(symName); + } + Emit(","); + Emit(asmInfo->GetAtobt()); + Emit("\n"); + return; + } + default: + ASSERT(false, "should not run here"); + return; + } +} + +void Emitter::EmitNullConstant(uint64 size) { + EmitAsmLabel(kAsmZero); + Emit(std::to_string(size)); + Emit("\n"); +} + +void Emitter::EmitCombineBfldValue(StructEmitInfo &structEmitInfo) { + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * kBitsPerByte; + auto emitBfldValue = [&structEmitInfo, charBitWidth, this](bool flag) { + while (structEmitInfo.GetCombineBitFieldWidth() > charBitWidth) { + uint8 shift = flag ? (structEmitInfo.GetCombineBitFieldWidth() - charBitWidth) : 0U; + uint64 tmp = (structEmitInfo.GetCombineBitFieldValue() >> shift) & 0x00000000000000ffUL; + EmitAsmLabel(kAsmByte); + Emit(std::to_string(tmp)); + Emit("\n"); + structEmitInfo.DecreaseCombineBitFieldWidth(charBitWidth); + uint64 value = flag ? + structEmitInfo.GetCombineBitFieldValue() - (tmp << structEmitInfo.GetCombineBitFieldWidth()) : + structEmitInfo.GetCombineBitFieldValue() >> charBitWidth; + structEmitInfo.SetCombineBitFieldValue(value); + } + }; + if (CGOptions::IsBigEndian()) { + /* + * If the total number of bits in the bit field is not a multiple of 8, + * the bits must be aligned to 8 bits to prevent errors in the emit. + */ + auto width = static_cast(RoundUp(structEmitInfo.GetCombineBitFieldWidth(), charBitWidth)); + if (structEmitInfo.GetCombineBitFieldWidth() < width) { + structEmitInfo.SetCombineBitFieldValue(structEmitInfo.GetCombineBitFieldValue() << + (width - structEmitInfo.GetCombineBitFieldWidth())); + structEmitInfo.IncreaseCombineBitFieldWidth(static_cast( + width - structEmitInfo.GetCombineBitFieldWidth())); + } + emitBfldValue(true); + } else { + emitBfldValue(false); + } + if (structEmitInfo.GetCombineBitFieldWidth() != 0) { + EmitAsmLabel(kAsmByte); + uint64 value = structEmitInfo.GetCombineBitFieldValue() & 0x00000000000000ffUL; + Emit(std::to_string(value)); + Emit("\n"); + } + CHECK_FATAL(charBitWidth != 0, "divide by zero"); + if ((structEmitInfo.GetNextFieldOffset() % charBitWidth) != 0) { + uint8 value = charBitWidth - (structEmitInfo.GetNextFieldOffset() % charBitWidth); + structEmitInfo.IncreaseNextFieldOffset(value); + } + structEmitInfo.SetTotalSize(structEmitInfo.GetNextFieldOffset() / charBitWidth); + structEmitInfo.SetCombineBitFieldValue(0); + structEmitInfo.SetCombineBitFieldWidth(0); +} + +void Emitter::EmitBitFieldConstant(StructEmitInfo &structEmitInfo, MIRConst &mirConst, const MIRType *nextType, + uint64 fieldOffset) { + MIRType &mirType = mirConst.GetType(); + if (fieldOffset > structEmitInfo.GetNextFieldOffset()) { + uint16 curFieldOffset = structEmitInfo.GetNextFieldOffset() - structEmitInfo.GetCombineBitFieldWidth(); + structEmitInfo.SetCombineBitFieldWidth(fieldOffset - curFieldOffset); + EmitCombineBfldValue(structEmitInfo); + ASSERT(structEmitInfo.GetNextFieldOffset() <= fieldOffset, + "structEmitInfo's nextFieldOffset should be <= fieldOffset"); + structEmitInfo.SetNextFieldOffset(fieldOffset); + } + uint32 fieldSize = static_cast(mirType).GetFieldSize(); + MIRIntConst &fieldValue = static_cast(mirConst); + /* Truncate the size of FieldValue to the bit field size. */ + if (fieldSize < fieldValue.GetActualBitWidth()) { + fieldValue.Trunc(fieldSize); + } + /* Clear higher Bits for signed value */ + if (structEmitInfo.GetCombineBitFieldValue() != 0) { + structEmitInfo.SetCombineBitFieldValue((~(~0ULL << structEmitInfo.GetCombineBitFieldWidth())) & + structEmitInfo.GetCombineBitFieldValue()); + } + if (CGOptions::IsBigEndian()) { + uint64 beValue = static_cast(fieldValue.GetExtValue()); + if (fieldValue.IsNegative()) { + beValue = beValue - ((beValue >> fieldSize) << fieldSize); + } + structEmitInfo.SetCombineBitFieldValue( + (structEmitInfo.GetCombineBitFieldValue() << fieldSize) + beValue); + } else { + structEmitInfo.SetCombineBitFieldValue((fieldValue.GetExtValue() << structEmitInfo.GetCombineBitFieldWidth()) + + structEmitInfo.GetCombineBitFieldValue()); + } + structEmitInfo.IncreaseCombineBitFieldWidth(fieldSize); + structEmitInfo.IncreaseNextFieldOffset(fieldSize); + if ((nextType == nullptr) || (kTypeBitField != nextType->GetKind())) { + /* emit structEmitInfo->combineBitFieldValue */ + EmitCombineBfldValue(structEmitInfo); + } +} + +void Emitter::EmitStr(const std::string& mplStr, bool emitAscii, bool emitNewline) { + const char *str = mplStr.c_str(); + size_t len = mplStr.size(); + + if (emitAscii) { + Emit("\t.ascii\t\""); /* Do not terminate with \0 */ + } else { + Emit("\t.string\t\""); + } + + /* + * don't expand special character in a writeout to .s, + * convert all \s to \\s in string for storing in .string + */ + for (size_t i = 0; i < len; i++) { + /* Referred to GNU AS: 3.6.1.1 Strings */ + constexpr int kBufSize = 5; + constexpr int kFirstChar = 0; + constexpr int kSecondChar = 1; + constexpr int kThirdChar = 2; + constexpr int kLastChar = 4; + char buf[kBufSize]; + if (isprint(*str)) { + buf[kFirstChar] = *str; + buf[kSecondChar] = 0; + if (*str == '\\' || *str == '\"') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = *str; + buf[kThirdChar] = 0; + } + Emit(buf); + } else if (*str == '\b') { + Emit("\\b"); + } else if (*str == '\n') { + Emit("\\n"); + } else if (*str == '\r') { + Emit("\\r"); + } else if (*str == '\t') { + Emit("\\t"); + } else if (*str == '\0') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = '0'; + buf[kThirdChar] = 0; + Emit(buf); + } else { + /* all others, print as number */ + int ret = snprintf_s(buf, sizeof(buf), k4BitSize, "\\%03o", (*str) & 0xFF); + if (ret < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[kLastChar] = '\0'; + Emit(buf); + } + str++; + } + + Emit("\""); + if (emitNewline) { + Emit("\n"); + } +} + +void Emitter::EmitStrConstant(const MIRStrConst &mirStrConst, bool isIndirect) { + if (isIndirect) { + uint32 strId = mirStrConst.GetValue().GetIdx(); + + if (stringPtr.find(mirStrConst.GetValue()) == stringPtr.end()) { + stringPtr.insert(mirStrConst.GetValue()); + } + if (CGOptions::IsArm64ilp32()) { + (void)Emit("\t.word\t").Emit(".LSTR__").Emit(std::to_string(strId).c_str()); + } else { +#if TARGAARCH64 + (void)Emit("\t.xword\t").Emit(".LSTR__").Emit(std::to_string(strId).c_str()); +#elif TARGX86_64 + EmitAsmLabel(kAsmQuad); + (void)Emit(".LSTR__").Emit(std::to_string(strId).c_str()); +#else + CHECK_FATAL(false, "target not supported"); +#endif + } + return; + } + + const std::string ustr = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirStrConst.GetValue()); + size_t len = ustr.size(); + if (isFlexibleArray) { + arraySize += static_cast(len) + 1; + } + EmitStr(ustr, false, false); +} + +void Emitter::EmitStr16Constant(const MIRStr16Const &mirStr16Const) { + Emit("\t.byte "); + /* note: for now, u16string is emitted 2 bytes without any \u indication */ + const std::u16string &str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(mirStr16Const.GetValue()); + constexpr int bufSize = 9; + char buf[bufSize]; + char16_t c = str16[0]; + /* fetch the type of char16_t c's top 8 bit data */ + int ret1 = snprintf_s(buf, sizeof(buf), bufSize - 1, "%d,%d", (c >> 8) & 0xFF, c & 0xFF); + if (ret1 < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[bufSize - 1] = '\0'; + Emit(buf); + for (uint32 i = 1; i < str16.length(); ++i) { + c = str16[i]; + /* fetch the type of char16_t c's top 8 bit data */ + int ret2 = snprintf_s(buf, sizeof(buf), bufSize - 1, ",%d,%d", (c >> 8) & 0xFF, c & 0xFF); + if (ret2 < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[bufSize - 1] = '\0'; + Emit(buf); + } + if ((str16.length() & 0x1) == 1) { + Emit(",0,0"); + } +} + +void Emitter::EmitScalarConstant(MIRConst &mirConst, bool newLine, bool flag32, bool isIndirect) { + MIRType &mirType = mirConst.GetType(); + AsmLabel asmName = GetTypeAsmInfoName(mirType.GetPrimType()); + switch (mirConst.GetKind()) { + case kConstInt: { + MIRIntConst &intCt = static_cast(mirConst); + uint32 sizeInBits = GetPrimTypeBitSize(mirType.GetPrimType()); + if (intCt.GetActualBitWidth() > sizeInBits) { + intCt.Trunc(sizeInBits); + } + if (flag32) { + EmitAsmLabel(AsmLabel::kAsmLong); + } else { + EmitAsmLabel(asmName); + } + Emit(intCt.GetValue()); + if (isFlexibleArray) { + arraySize += (sizeInBits / kBitsPerByte); + } + break; + } + case kConstFloatConst: { + MIRFloatConst &floatCt = static_cast(mirConst); + EmitAsmLabel(asmName); + Emit(std::to_string(floatCt.GetIntValue())); + if (isFlexibleArray) { + arraySize += k4ByteFloatSize; + } + break; + } + case kConstDoubleConst: { + MIRDoubleConst &doubleCt = static_cast(mirConst); + EmitAsmLabel(asmName); + Emit(std::to_string(doubleCt.GetIntValue())); + if (isFlexibleArray) { + arraySize += k8ByteDoubleSize; + } + break; + } + case kConstStrConst: { + MIRStrConst &strCt = static_cast(mirConst); + if (cg->GetMIRModule()->IsCModule()) { + EmitStrConstant(strCt, isIndirect); + } else { + EmitStrConstant(strCt); + } + break; + } + case kConstStr16Const: { + MIRStr16Const &str16Ct = static_cast(mirConst); + EmitStr16Constant(str16Ct); + break; + } + case kConstAddrof: { + MIRAddrofConst &symAddr = static_cast(mirConst); + StIdx stIdx = symAddr.GetSymbolIndex(); + MIRSymbol *symAddrSym = stIdx.IsGlobal() ? GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()) + : CG::GetCurCGFunc()->GetMirModule().CurFunction()->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + ASSERT(symAddrSym != nullptr, "null ptr check"); + std::string str; + if (CGOptions::IsArm64ilp32()) { + str = ".word"; + } else { + str = ".quad"; + } + if (stIdx.IsGlobal() == false && symAddrSym->GetStorageClass() == kScPstatic) { + PUIdx pIdx = GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + (void)Emit("\t" + str + "\t" + symAddrSym->GetName() + std::to_string(pIdx)); + } else { + (void)Emit("\t" + str + "\t" + symAddrSym->GetName()); + } + if (symAddr.GetOffset() != 0) { + (void)Emit(" + ").Emit(symAddr.GetOffset()); + } + if (symAddr.GetFieldID() > 1) { + MIRStructType *structType = static_cast(symAddrSym->GetType()); + ASSERT(structType != nullptr, "EmitScalarConstant: non-zero fieldID for non-structure"); + (void)Emit(" + ").Emit(Globals::GetInstance()->GetBECommon()->GetFieldOffset( + *structType, symAddr.GetFieldID()).first); + } + break; + } + case kConstAddrofFunc: { + MIRAddroffuncConst &funcAddr = static_cast(mirConst); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFuncTable().at(funcAddr.GetValue()); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + std::string str; + if (CGOptions::IsArm64ilp32()) { + str = ".word"; + } else { + str = ".quad"; + } + (void)Emit("\t" + str + "\t" + symAddrSym->GetName()); + break; + } + case kConstLblConst: { + MIRLblConst &lbl = static_cast(mirConst); + if (CGOptions::IsArm64ilp32()) { + (void)Emit("\t.word\t"); + } else { + EmitAsmLabel(kAsmQuad); + } + EmitLabelRef(lbl.GetValue()); + break; + } + default: + ASSERT(false, "NYI"); + break; + } + if (newLine) { + Emit("\n"); + } +} + +void Emitter::EmitAddrofFuncConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx) { + MIRAddroffuncConst &funcAddr = static_cast(elemConst); + const std::string stName = mirSymbol.GetName(); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(funcAddr.GetValue()); + const std::string &funcName = func->GetName(); + if ((idx == kFuncDefNameIndex) && mirSymbol.IsMuidFuncInfTab()) { + Emit("\t.long\t.Label.name."); + Emit(funcName + " - ."); + Emit("\n"); + return; + } + if ((idx == kFuncDefSizeIndex) && mirSymbol.IsMuidFuncInfTab()) { + Emit("\t.long\t.Label.end."); + Emit(funcName + " - "); + Emit(funcName + "\n"); + return; + } + if ((idx == static_cast(MethodProperty::kPaddrData)) && mirSymbol.IsReflectionMethodsInfo()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(funcName + " - .\n"); + return; + } + if (((idx == static_cast(MethodInfoCompact::kPaddrData)) && mirSymbol.IsReflectionMethodsInfoCompact()) || + ((idx == static_cast(ClassRO::kClinitAddr)) && mirSymbol.IsReflectionClassInfoRO())) { + Emit("\t.long\t"); + Emit(funcName + " - .\n"); + return; + } + + if (mirSymbol.IsReflectionMethodAddrData()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(funcName + " - .\n"); + return; + } + + if (idx == kFuncDefAddrIndex && mirSymbol.IsMuidFuncDefTab()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 6 means kBindingStateMethodDef:6 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x6\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateMethodDef:6. */ +#else + Emit("__BindingProtectRegion__ + 6\n"); +#endif /* USE_32BIT_REF */ + } else { +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(funcName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(funcName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(funcName + "\n"); +#endif /* USE_32BIT_REF */ + } + return; + } + + if (idx == kFuncDefAddrIndex && mirSymbol.IsMuidFuncDefOrigTab()) { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(funcName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(funcName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(funcName + "\n"); +#endif /* USE_32BIT_REF */ + } + return; + } + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + Emit(funcName); + if ((stName.find(VTAB_PREFIX_STR) == 0) || (stName.find(ITAB_PREFIX_STR) == 0) || + (stName.find(ITAB_CONFLICT_PREFIX_STR) == 0)) { + Emit(" - .\n"); + return; + } + if (cg->GetCGOptions().GeneratePositionIndependentExecutable()) { + Emit(" - "); + Emit(stName); + } + Emit("\n"); +} + +void Emitter::EmitAddrofSymbolConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx) { + MIRAddrofConst &symAddr = static_cast(elemConst); + const std::string stName = mirSymbol.GetName(); + + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr.GetSymbolIndex().Idx()); + const std::string &symAddrName = symAddrSym->GetName(); + + if (((idx == static_cast(FieldProperty::kPOffset)) && mirSymbol.IsReflectionFieldsInfo()) || + mirSymbol.IsReflectionFieldOffsetData()) { +#if USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(symAddrName + " - .\n"); + return; + } + + if (((idx == static_cast(FieldPropertyCompact::kPOffset)) && mirSymbol.IsReflectionFieldsInfoCompact()) || + ((idx == static_cast(MethodProperty::kSigName)) && mirSymbol.IsReflectionMethodsInfo()) || + ((idx == static_cast(MethodSignatureProperty::kParameterTypes)) && + mirSymbol.IsReflectionMethodSignature())) { + Emit("\t.long\t"); + Emit(symAddrName + " - .\n"); + return; + } + + if (((idx == static_cast(MethodProperty::kDeclarclass)) || + (idx == static_cast(MethodProperty::kPaddrData))) && mirSymbol.IsReflectionMethodsInfo()) { +#if USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + if (idx == static_cast(MethodProperty::kDeclarclass)) { + Emit(symAddrName + " - .\n"); + } else { + Emit(symAddrName + " - . + 2\n"); + } + return; + } + + if ((idx == static_cast(MethodInfoCompact::kPaddrData)) && mirSymbol.IsReflectionMethodsInfoCompact()) { + Emit("\t.long\t"); + Emit(symAddrName + " - . + 2\n"); + return; + } + + if ((idx == static_cast(FieldProperty::kDeclarclass)) && mirSymbol.IsReflectionFieldsInfo()) { +#if USE_32BIT_REF + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + Emit(symAddrName + " - .\n"); + return; + } + + if ((idx == kDataDefAddrIndex) && (mirSymbol.IsMuidDataUndefTab() || mirSymbol.IsMuidDataDefTab())) { + if (symAddrSym->IsReflectionClassInfo()) { + Emit(".LDW.ref." + symAddrName + ":\n"); + } + Emit(kPtrPrefixStr + symAddrName + ":\n"); +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + if (mirSymbol.IsMuidDataUndefTab()) { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + if (symAddrSym->IsReflectionClassInfo()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 1 means kBindingStateCinfUndef:1 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x1\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateCinfUndef:1. */ +#else + Emit("__BindingProtectRegion__ + 1\n"); +#endif /* USE_32BIT_REF */ + } else { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 3 means kBindingStateDataUndef:3 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x3\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateDataUndef:3. */ +#else + Emit("__BindingProtectRegion__ + 3\n"); +#endif /* USE_32BIT_REF */ + } + } else { + Emit("0\n"); + } + } else { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + if (symAddrSym->IsReflectionClassInfo()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 2 means kBindingStateCinfDef:2 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x2\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateCinfDef:2. */ +#else + Emit("__BindingProtectRegion__ + 2\n"); +#endif /* USE_32BIT_REF */ + } else { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 4 means kBindingStateDataDef:4 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x4\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateDataDef:4. */ +#else + Emit("__BindingProtectRegion__ + 4\n"); +#endif /* USE_32BIT_REF */ + } + } else { +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(symAddrName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(symAddrName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(symAddrName + "\n"); +#endif /* USE_32BIT_REF */ + } + } + return; + } + + if (idx == kDataDefAddrIndex && mirSymbol.IsMuidDataDefOrigTab()) { + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + +#if defined(USE_32BIT_REF) +#if defined(MPL_LNK_ADDRESS_VIA_BASE) + Emit(symAddrName + "\n"); +#else /* MPL_LNK_ADDRESS_VIA_BASE */ + Emit(symAddrName + "-.\n"); +#endif /* MPL_LNK_ADDRESS_VIA_BASE */ +#else /* USE_32BIT_REF */ + Emit(symAddrName + "\n"); +#endif /* USE_32BIT_REF */ + } + return; + } + + if (StringUtils::StartsWith(stName, kLocalClassInfoStr)) { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + Emit(symAddrName); + Emit(" - . + ").Emit(kDataRefIsOffset); + Emit("\n"); + return; + } +#ifdef USE_32BIT_REF + if (mirSymbol.IsReflectionHashTabBucket() || (stName.find(ITAB_PREFIX_STR) == 0) || + (mirSymbol.IsReflectionClassInfo() && (idx == static_cast(ClassProperty::kInfoRo)))) { + Emit("\t.word\t"); + } else { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + } +#else + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word\t"); +#endif + +#endif /* USE_32BIT_REF */ + + if ((stName.find(ITAB_CONFLICT_PREFIX_STR) == 0) || (stName.find(ITAB_PREFIX_STR) == 0)) { + Emit(symAddrName + " - .\n"); + return; + } + if (mirSymbol.IsMuidRangeTab()) { + if (idx == kRangeBeginIndex) { + Emit(symAddrSym->GetMuidTabName() + "_begin\n"); + } else { + Emit(symAddrSym->GetMuidTabName() + "_end\n"); + } + return; + } + + if (symAddrName.find(GCTIB_PREFIX_STR) == 0) { + Emit(cg->FindGCTIBPatternName(symAddrName)); + } else { + Emit(symAddrName); + } + + if ((((idx == static_cast(ClassRO::kIfields)) || (idx == static_cast(ClassRO::kMethods))) && + mirSymbol.IsReflectionClassInfoRO()) || + mirSymbol.IsReflectionHashTabBucket()) { + Emit(" - ."); + if (symAddrSym->IsReflectionFieldsInfoCompact() || + symAddrSym->IsReflectionMethodsInfoCompact()) { + /* Mark the least significant bit as 1 for compact fieldinfo */ + Emit(" + ").Emit(MethodFieldRef::kMethodFieldRefIsCompact); + } + } else if (mirSymbol.IsReflectionClassInfo()) { + if ((idx == static_cast(ClassProperty::kItab)) || + (idx == static_cast(ClassProperty::kVtab)) || + (idx == static_cast(ClassProperty::kInfoRo))) { + Emit(" - . + ").Emit(kDataRefIsOffset); + } else if (idx == static_cast(ClassProperty::kGctib)) { + if (cg->FindGCTIBPatternName(symAddrName).find(REF_PREFIX_STR) == 0) { + Emit(" - . + ").Emit(kGctibRefIsIndirect); + } else { + Emit(" - ."); + } + } + } else if (mirSymbol.IsReflectionClassInfoRO()) { + if (idx == static_cast(ClassRO::kSuperclass)) { + Emit(" - . + ").Emit(kDataRefIsOffset); + } + } + + if (cg->GetCGOptions().GeneratePositionIndependentExecutable()) { + Emit(" - "); + Emit(stName); + } + Emit("\n"); +} + +MIRAddroffuncConst *Emitter::GetAddroffuncConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst) const { + MIRAddroffuncConst *innerFuncAddr = nullptr; + size_t addrIndex = mirSymbol.IsReflectionMethodsInfo() ? static_cast(MethodProperty::kPaddrData) : + static_cast(MethodInfoCompact::kPaddrData); + MIRConst *pAddrConst = aggConst.GetConstVecItem(addrIndex); + if (pAddrConst->GetKind() == kConstAddrof) { + /* point addr data. */ + MIRAddrofConst *pAddr = safe_cast(pAddrConst); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(pAddr->GetSymbolIndex().Idx()); + MIRAggConst *methodAddrAggConst = safe_cast(symAddrSym->GetKonst()); + MIRAggConst *addrAggConst = safe_cast(methodAddrAggConst->GetConstVecItem(0)); + MIRConst *funcAddrConst = addrAggConst->GetConstVecItem(0); + if (funcAddrConst->GetKind() == kConstAddrofFunc) { + /* func sybmol. */ + innerFuncAddr = safe_cast(funcAddrConst); + } else if (funcAddrConst->GetKind() == kConstInt) { + /* def table index, replaced by def table for lazybinding. */ + std::string funcDefTabName = namemangler::kMuidFuncDefTabPrefixStr + cg->GetMIRModule()->GetFileNameAsPostfix(); + MIRSymbol *funDefTabSy = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(funcDefTabName)); + MIRAggConst &funDefTabAggConst = static_cast(*funDefTabSy->GetKonst()); + MIRIntConst *funcAddrIndexConst = safe_cast(funcAddrConst); + uint64 indexDefTab = static_cast(funcAddrIndexConst->GetExtValue()); + MIRAggConst *defTabAggConst = safe_cast(funDefTabAggConst.GetConstVecItem(indexDefTab)); + MIRConst *funcConst = defTabAggConst->GetConstVecItem(0); + if (funcConst->GetKind() == kConstAddrofFunc) { + innerFuncAddr = safe_cast(funcConst); + } + } + } else if (pAddrConst->GetKind() == kConstAddrofFunc) { + innerFuncAddr = safe_cast(pAddrConst); + } + return innerFuncAddr; +} + +int64 Emitter::GetFieldOffsetValue(const std::string &className, const MIRIntConst &intConst, + const std::map &strIdx2Type) const { + uint64 idx = static_cast(intConst.GetExtValue()); + bool isDefTabIndex = ((idx & 0x1) != 0); + int64 fieldIdx = static_cast(idx >> 1); + if (isDefTabIndex) { + /* it's def table index. */ + return fieldIdx; + } else { + /* really offset. */ + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * kBitsPerByte; + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(className); + auto it = strIdx2Type.find(strIdx); + CHECK_FATAL(it->second != nullptr, "valid iterator check"); + ASSERT(it != strIdx2Type.end(), "Can not find type"); + MIRType &ty = *it->second; + MIRStructType &structType = static_cast(ty); + std::pair fieldOffsetPair = + Globals::GetInstance()->GetBECommon()->GetFieldOffset(structType, fieldIdx); + int64 fieldOffset = fieldOffsetPair.first * static_cast(charBitWidth) + fieldOffsetPair.second; + return fieldOffset; + } +} + +void Emitter::InitRangeIdx2PerfixStr() { + rangeIdx2PrefixStr[RangeIdx::kVtabAndItab] = kMuidVtabAndItabPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kItabConflict] = kMuidItabConflictPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kVtabOffset] = kMuidVtabOffsetPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kFieldOffset] = kMuidFieldOffsetPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kValueOffset] = kMuidValueOffsetPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kLocalClassInfo] = kMuidLocalClassInfoStr; + rangeIdx2PrefixStr[RangeIdx::kConststr] = kMuidConststrPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kSuperclass] = kMuidSuperclassPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kGlobalRootlist] = kMuidGlobalRootlistPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kClassmetaData] = kMuidClassMetadataPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kClassBucket] = kMuidClassMetadataBucketPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kJavatext] = kMuidJavatextPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kDataSection] = kMuidDataSectionStr; + rangeIdx2PrefixStr[RangeIdx::kJavajni] = kRegJNITabPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kJavajniFunc] = kRegJNIFuncTabPrefixStr; + rangeIdx2PrefixStr[RangeIdx::kDecoupleStaticKey] = kDecoupleStaticKeyStr; + rangeIdx2PrefixStr[RangeIdx::kDecoupleStaticValue] = kDecoupleStaticValueStr; + rangeIdx2PrefixStr[RangeIdx::kBssStart] = kBssSectionStr; + rangeIdx2PrefixStr[RangeIdx::kLinkerSoHash] = kLinkerHashSoStr; + rangeIdx2PrefixStr[RangeIdx::kArrayClassCache] = kArrayClassCacheTable; + rangeIdx2PrefixStr[RangeIdx::kArrayClassCacheName] = kArrayClassCacheNameTable; +} + +void Emitter::EmitIntConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst, uint32 itabConflictIndex, + const std::map &strIdx2Type, size_t idx) { + MIRConst *elemConst = aggConst.GetConstVecItem(idx); + const std::string stName = mirSymbol.GetName(); + + MIRIntConst *intConst = safe_cast(elemConst); + ASSERT(intConst != nullptr, "Uexpected const type"); + + /* ignore abstract function addr */ + if ((idx == static_cast(MethodInfoCompact::kPaddrData)) && mirSymbol.IsReflectionMethodsInfoCompact()) { + return; + } + + if (((idx == static_cast(MethodProperty::kVtabIndex)) && (mirSymbol.IsReflectionMethodsInfo())) || + ((idx == static_cast(MethodInfoCompact::kVtabIndex)) && mirSymbol.IsReflectionMethodsInfoCompact())) { + MIRAddroffuncConst *innerFuncAddr = GetAddroffuncConst(mirSymbol, aggConst); + if (innerFuncAddr != nullptr) { + Emit(".Label.name." + GlobalTables::GetFunctionTable().GetFunctionFromPuidx( + innerFuncAddr->GetValue())->GetName()); + Emit(":\n"); + } + } + /* refer to DeCouple::GenOffsetTableType */ + constexpr int fieldTypeIdx = 2; + constexpr int methodTypeIdx = 2; + bool isClassInfo = (idx == static_cast(ClassRO::kClassName) || + idx == static_cast(ClassRO::kAnnotation)) && mirSymbol.IsReflectionClassInfoRO(); + bool isMethodsInfo = (idx == static_cast(MethodProperty::kMethodName) || + idx == static_cast(MethodProperty::kSigName) || + idx == static_cast(MethodProperty::kAnnotation)) && mirSymbol.IsReflectionMethodsInfo(); + bool isFieldsInfo = (idx == static_cast(FieldProperty::kTypeName) || + idx == static_cast(FieldProperty::kName) || + idx == static_cast(FieldProperty::kAnnotation)) && mirSymbol.IsReflectionFieldsInfo(); + bool isMethodSignature = (idx == static_cast(MethodSignatureProperty::kSignatureOffset)) && + mirSymbol.IsReflectionMethodSignature(); + /* RegisterTable has been Int Array, visit element instead of field. */ + bool isInOffsetTab = (idx == 1 || idx == methodTypeIdx) && + (StringUtils::StartsWith(stName, kVtabOffsetTabStr) || + StringUtils::StartsWith(stName, kFieldOffsetTabStr)); + /* The 1 && 2 of Decouple static struct is the string name */ + bool isStaticStr = (idx == 1 || idx == 2) && aggConst.GetConstVec().size() == kSizeOfDecoupleStaticStruct && + StringUtils::StartsWith(stName, kDecoupleStaticKeyStr); + /* process conflict table index larger than itabConflictIndex * 2 + 2 element */ + bool isConflictPerfix = (idx >= (static_cast(itabConflictIndex) * 2 + 2)) && (idx % 2 == 0) && + StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR); + bool isArrayClassCacheName = mirSymbol.IsArrayClassCacheName(); + if (isClassInfo || isMethodsInfo || isFieldsInfo || mirSymbol.IsRegJNITab() || isInOffsetTab || + isStaticStr || isConflictPerfix || isArrayClassCacheName || isMethodSignature) { + /* compare with all 1s */ + uint32 index = static_cast((safe_cast(elemConst))->GetExtValue()) & 0xFFFFFFFF; + bool isHotReflectStr = (index & 0x00000003) != 0; /* use the last two bits of index in this expression */ + std::string hotStr; + if (isHotReflectStr) { + uint32 tag = (index & 0x00000003) - kCStringShift; /* use the last two bits of index in this expression */ + if (tag == kLayoutBootHot) { + hotStr = kReflectionStartHotStrtabPrefixStr; + } else if (tag == kLayoutBothHot) { + hotStr = kReflectionBothHotStrTabPrefixStr; + } else { + hotStr = kReflectionRunHotStrtabPrefixStr; + } + } + std::string reflectStrTabPrefix = isHotReflectStr ? hotStr : kReflectionStrtabPrefixStr; + std::string strTabName = reflectStrTabPrefix + cg->GetMIRModule()->GetFileNameAsPostfix(); + /* left shift 2 bit to get low 30 bit data for MIRIntConst */ + elemConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(index >> 2, elemConst->GetType()); + intConst = safe_cast(elemConst); + aggConst.SetItem(static_cast(idx), intConst, aggConst.GetFieldIdItem(idx)); +#ifdef USE_32BIT_REF + if (stName.find(ITAB_CONFLICT_PREFIX_STR) == 0) { + EmitScalarConstant(*elemConst, false, true); + } else { + EmitScalarConstant(*elemConst, false); + } +#else + EmitScalarConstant(*elemConst, false); +#endif /* USE_32BIT_REF */ + Emit("+" + strTabName); + if (mirSymbol.IsRegJNITab() || mirSymbol.IsReflectionMethodsInfo() || mirSymbol.IsReflectionFieldsInfo() || + mirSymbol.IsArrayClassCacheName() || mirSymbol.IsReflectionMethodSignature()) { + Emit("-."); + } + if (StringUtils::StartsWith(stName, kDecoupleStaticKeyStr)) { + Emit("-."); + } + if (mirSymbol.IsReflectionClassInfoRO()) { + if (idx == static_cast(ClassRO::kAnnotation)) { + Emit("-."); + } else if (idx == static_cast(ClassRO::kClassName)) { + /* output in hex format to show it is a flag of bits. */ + std::stringstream ss; + ss << std::hex << "0x" << MByteRef::kPositiveOffsetBias; + Emit(" - . + " + ss.str()); + } + } + if (StringUtils::StartsWith(stName, ITAB_PREFIX_STR)) { + Emit("-."); + } + if (StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR)) { + /* output in hex format to show it is a flag of bits. */ + std::stringstream ss; + ss << std::hex << "0x" << MByteRef32::kPositiveOffsetBias; + Emit(" - . + " + ss.str()); + } + if ((idx == 1 || idx == methodTypeIdx) && StringUtils::StartsWith(stName, kVtabOffsetTabStr)) { + Emit("-."); + } + if ((idx == 1 || idx == fieldTypeIdx) && StringUtils::StartsWith(stName, kFieldOffsetTabStr)) { + Emit("-."); + } + Emit("\n"); + } else if (idx == kFuncDefAddrIndex && mirSymbol.IsMuidFuncUndefTab()) { +#if defined(USE_32BIT_REF) + Emit("\t.long\t"); +#else + EmitAsmLabel(kAsmQuad); +#endif /* USE_32BIT_REF */ + if (CGOptions::IsLazyBinding() && !cg->IsLibcore()) { + /* + * Check enum BindingState defined in Mpl_Binding.h, + * 5 means kBindingStateMethodUndef:5 offset away from base __BindingProtectRegion__. + */ +#if defined(USE_32BIT_REF) + Emit("0x5\n"); /* Fix it in runtime, __BindingProtectRegion__ + kBindingStateMethodUndef:5. */ +#else + Emit("__BindingProtectRegion__ + 5\n"); +#endif /* USE_32BIT_REF */ + } else { + Emit("0\n"); + } + } else if (idx == static_cast(FieldProperty::kPClassType) && mirSymbol.IsReflectionFieldsInfo()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); + const int width = 4; +#else + EmitAsmLabel(kAsmQuad); + const int width = 8; +#endif /* USE_32BIT_REF */ + uint32 muidDataTabAddr = static_cast((safe_cast(elemConst))->GetExtValue()); + if (muidDataTabAddr != 0) { + bool isDefTabIndex = (muidDataTabAddr & kFromDefIndexMask32Mod) == kFromDefIndexMask32Mod; + std::string muidDataTabPrefix = isDefTabIndex ? kMuidDataDefTabPrefixStr : kMuidDataUndefTabPrefixStr; + std::string muidDataTabName = muidDataTabPrefix + cg->GetMIRModule()->GetFileNameAsPostfix(); + (void)Emit(muidDataTabName + "+"); + uint32 muidDataTabIndex = muidDataTabAddr & 0x3FFFFFFF; /* high 2 bit is the mask of muid tab */ + (void)Emit(std::to_string(muidDataTabIndex * width)); + (void)Emit("-.\n"); + } else { + (void)Emit(muidDataTabAddr); + Emit("\n"); + } + return; + } else if (mirSymbol.IsRegJNIFuncTab()) { + std::string strTabName = kRegJNITabPrefixStr + cg->GetMIRModule()->GetFileNameAsPostfix(); + EmitScalarConstant(*elemConst, false); +#ifdef TARGARM32 + (void)Emit("+" + strTabName).Emit("+").Emit(MByteRef::kPositiveOffsetBias).Emit("-.\n"); +#else + Emit("+" + strTabName + "\n"); +#endif + } else if (mirSymbol.IsReflectionMethodAddrData()) { +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + EmitAsmLabel(kAsmQuad); +#endif /* USE_32BIT_REF */ + Emit(intConst->GetValue()); + Emit("\n"); + } else if (mirSymbol.IsReflectionFieldOffsetData()) { + /* Figure out instance field offset now. */ + size_t prefixStrLen = strlen(kFieldOffsetDataPrefixStr); + size_t pos = stName.find("_FieldID_"); + std::string typeName = stName.substr(prefixStrLen, pos - prefixStrLen); +#ifdef USE_32BIT_REF + std::string widthFlag = ".long"; +#else + std::string widthFlag = ".quad"; +#endif /* USE_32BIT_REF */ + int64 fieldOffset = GetFieldOffsetValue(typeName, *intConst, strIdx2Type); + uint64 fieldIdx = static_cast(intConst->GetExtValue()); + bool isDefTabIndex = ((fieldIdx & 0x1) != 0); + if (isDefTabIndex) { + /* it's def table index. */ + Emit("\t// " + typeName + " static field, data def table index " + std::to_string(fieldOffset) + "\n"); + } else { + /* really offset. */ + fieldIdx >>= 1; + Emit("\t// " + typeName + "\t field" + std::to_string(fieldIdx) + "\n"); + } + Emit("\t" + widthFlag + "\t" + std::to_string(fieldOffset) + "\n"); + } else if (((idx == static_cast(FieldProperty::kPOffset)) && mirSymbol.IsReflectionFieldsInfo()) || + ((idx == static_cast(FieldPropertyCompact::kPOffset)) && + mirSymbol.IsReflectionFieldsInfoCompact())) { + std::string typeName; + std::string widthFlag; +#ifdef USE_32BIT_REF + const int width = 4; +#else + const int width = 8; +#endif /* USE_32BIT_REF */ + if (mirSymbol.IsReflectionFieldsInfo()) { + typeName = stName.substr(strlen(kFieldsInfoPrefixStr)); +#ifdef USE_32BIT_REF + widthFlag = ".long"; +#else + widthFlag = ".quad"; +#endif /* USE_32BIT_REF */ + } else { + size_t prefixStrLen = strlen(kFieldsInfoCompactPrefixStr); + typeName = stName.substr(prefixStrLen); + widthFlag = ".long"; + } + int64 fieldIdx = intConst->GetExtValue(); + MIRSymbol *pOffsetData = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kFieldOffsetDataPrefixStr + typeName)); + if (pOffsetData != nullptr) { + fieldIdx *= width; + std::string fieldOffset = kFieldOffsetDataPrefixStr + typeName; + Emit("\t" + widthFlag + "\t" + std::to_string(fieldIdx) + " + " + fieldOffset + " - .\n"); + } else { + /* pOffsetData null, means FieldMeta.offset is really offset */ + int64 fieldOffset = GetFieldOffsetValue(typeName, *intConst, strIdx2Type); + Emit("\t// " + typeName + "\t field" + std::to_string(fieldIdx) + "\n"); + Emit("\t" + widthFlag + "\t" + std::to_string(fieldOffset) + "\n"); + } + } else if ((idx == static_cast(ClassProperty::kObjsize)) && mirSymbol.IsReflectionClassInfo()) { + std::string delimiter = "$$"; + std::string typeName = + stName.substr(strlen(CLASSINFO_PREFIX_STR), stName.find(delimiter) - strlen(CLASSINFO_PREFIX_STR)); + uint32 objSize = 0; + std::string comments; + + if (typeName.size() > 1 && typeName[0] == '$') { + /* fill element size for array class; */ + std::string newTypeName = typeName.substr(1); + /* another $(arraysplitter) */ + if (newTypeName.find("$") == std::string::npos) { + CHECK_FATAL(false, "can not find $ in std::string"); + } + typeName = newTypeName.substr(newTypeName.find("$") + 1); + int32 pTypeSize; + + /* we only need to calculate primitive type in arrays. */ + if ((pTypeSize = GetPrimitiveTypeSize(typeName)) != -1) { + objSize = static_cast(pTypeSize); + } + comments = "// elemobjsize"; + } else { + comments = "// objsize"; + } + + if (objSize == 0) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(typeName); + auto it = strIdx2Type.find(strIdx); + ASSERT(it != strIdx2Type.end(), "Can not find type"); + MIRType *mirType = it->second; + ASSERT_NOT_NULL(mirType); + objSize = Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex()); + } + /* objSize should not exceed 16 bits */ + CHECK_FATAL(objSize <= 0xffff, "Error:the objSize is too large"); + Emit("\t.short\t" + std::to_string(objSize) + comments + "\n"); + } else if (mirSymbol.IsMuidRangeTab()) { + MIRIntConst *subIntCt = safe_cast(elemConst); + int flag = static_cast(subIntCt->GetExtValue()); + InitRangeIdx2PerfixStr(); + if (rangeIdx2PrefixStr.find(flag) == rangeIdx2PrefixStr.end()) { + EmitScalarConstant(*elemConst, false); + Emit("\n"); + return; + } + std::string prefix = rangeIdx2PrefixStr[flag]; +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + (void)Emit("\t.word\t"); +#endif + if (idx == kRangeBeginIndex) { + Emit(prefix + "_begin\n"); + } else { + Emit(prefix + "_end\n"); + } + } else { +#ifdef USE_32BIT_REF + if (StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR) || StringUtils::StartsWith(stName, ITAB_PREFIX_STR) || + StringUtils::StartsWith(stName, VTAB_PREFIX_STR)) { + EmitScalarConstant(*elemConst, false, true); + } else { + EmitScalarConstant(*elemConst, false); + } +#else + EmitScalarConstant(*elemConst, false); +#endif /* USE_32BIT_REF */ + Emit("\n"); + } +} + +void Emitter::EmitConstantTable(const MIRSymbol &mirSymbol, MIRConst &mirConst, + const std::map &strIdx2Type) { + const std::string stName = mirSymbol.GetName(); + MIRAggConst &aggConst = static_cast(mirConst); + uint32 itabConflictIndex = 0; + for (size_t i = 0; i < aggConst.GetConstVec().size(); ++i) { + MIRConst *elemConst = aggConst.GetConstVecItem(i); + if (i == 0 && StringUtils::StartsWith(stName, ITAB_CONFLICT_PREFIX_STR)) { +#ifdef USE_32BIT_REF + itabConflictIndex = static_cast((safe_cast(elemConst))->GetValue()) & 0xffff; +#else + itabConflictIndex = safe_cast(elemConst)->GetExtValue() & 0xffffffff; +#endif + } + if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + if (elemConst->GetKind() == kConstAddrofFunc) { /* addroffunc const */ + EmitAddrofFuncConst(mirSymbol, *elemConst, i); + } else if (elemConst->GetKind() == kConstAddrof) { /* addrof symbol const */ + EmitAddrofSymbolConst(mirSymbol, *elemConst, i); + } else { /* intconst */ + EmitIntConst(mirSymbol, aggConst, itabConflictIndex, strIdx2Type, i); + } + } else if (elemConst->GetType().GetKind() == kTypeArray || elemConst->GetType().GetKind() == kTypeStruct) { + if (StringUtils::StartsWith(mirSymbol.GetName(), namemangler::kOffsetTabStr) && (i == 0 || i == 1)) { + /* EmitOffsetValueTable */ +#ifdef USE_32BIT_REF + Emit("\t.long\t"); +#else + EmitAsmLabel(kAsmQuad); +#endif + if (i == 0) { + (void)Emit(namemangler::kVtabOffsetTabStr + cg->GetMIRModule()->GetFileNameAsPostfix() + " - .\n"); + } else { + (void)Emit(namemangler::kFieldOffsetTabStr + cg->GetMIRModule()->GetFileNameAsPostfix() + " - .\n"); + } + } else { + EmitConstantTable(mirSymbol, *elemConst, strIdx2Type); + } + } + } +} + +void Emitter::EmitArrayConstant(MIRConst &mirConst) { + MIRType &mirType = mirConst.GetType(); + MIRAggConst &arrayCt = static_cast(mirConst); + MIRArrayType &arrayType = static_cast(mirType); + size_t uNum = arrayCt.GetConstVec().size(); + uint32 dim = arrayType.GetSizeArrayItem(0); + TyIdx scalarIdx = arrayType.GetElemTyIdx(); + MIRType *subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(scalarIdx); + if (uNum == 0 && dim != 0) { + while (subTy->GetKind() == kTypeArray) { + MIRArrayType *aSubTy = static_cast(subTy); + if (aSubTy->GetSizeArrayItem(0) > 0) { + dim *= (aSubTy->GetSizeArrayItem(0)); + } + scalarIdx = aSubTy->GetElemTyIdx(); + subTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(scalarIdx); + } + } + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = arrayCt.GetConstVecItem(i); + if (IsPrimitiveVector(subTy->GetPrimType())) { + EmitVectorConstant(*elemConst); + } else if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + if (cg->GetMIRModule()->IsCModule()) { + bool strLiteral = false; + if (arrayType.GetDim() == 1) { + MIRType *ety = arrayType.GetElemType(); + if (ety->GetPrimType() == PTY_i8 || ety->GetPrimType() == PTY_u8) { + strLiteral = true; + } + } + EmitScalarConstant(*elemConst, true, false, strLiteral == false); + } else { + EmitScalarConstant(*elemConst); + } + } else if (elemConst->GetType().GetKind() == kTypeArray) { + EmitArrayConstant(*elemConst); + } else if (elemConst->GetType().GetKind() == kTypeStruct || elemConst->GetType().GetKind() == kTypeClass || + elemConst->GetType().GetKind() == kTypeUnion) { + EmitStructConstant(*elemConst); + } else if (elemConst->GetKind() == kConstAddrofFunc) { + EmitScalarConstant(*elemConst); + } else { + ASSERT(false, "should not run here"); + } + } + int64 iNum = (arrayType.GetSizeArrayItem(0) > 0) ? (static_cast(arrayType.GetSizeArrayItem(0))) - uNum : 0; + if (iNum > 0) { + if (!cg->GetMIRModule()->IsCModule()) { + CHECK_FATAL(!Globals::GetInstance()->GetBECommon()->IsEmptyOfTypeSizeTable(), "container empty check"); + CHECK_FATAL(!arrayCt.GetConstVec().empty(), "container empty check"); + } + if (uNum > 0) { + uint64 unInSizeInByte = static_cast(iNum) * static_cast( + Globals::GetInstance()->GetBECommon()->GetTypeSize(arrayCt.GetConstVecItem(0)->GetType().GetTypeIndex())); + if (unInSizeInByte != 0) { + EmitNullConstant(unInSizeInByte); + } + } else { + uint64 size = Globals::GetInstance()->GetBECommon()->GetTypeSize(scalarIdx.GetIdx()) * dim; + Emit("\t.zero\t").Emit(static_cast(size)).Emit("\n"); + } + } +} + +void Emitter::EmitVectorConstant(MIRConst &mirConst) { + MIRType &mirType = mirConst.GetType(); + MIRAggConst &vecCt = static_cast(mirConst); + size_t uNum = vecCt.GetConstVec().size(); + for (size_t i = 0; i < uNum; ++i) { + MIRConst *elemConst = vecCt.GetConstVecItem(i); + if (IsPrimitiveScalar(elemConst->GetType().GetPrimType())) { + bool strLiteral = false; + EmitScalarConstant(*elemConst, true, false, strLiteral == false); + } else { + ASSERT(false, "should not run here"); + } + } + size_t lanes = GetVecLanes(mirType.GetPrimType()); + if (lanes > uNum) { + MIRIntConst zConst(0, vecCt.GetConstVecItem(0)->GetType()); + for (size_t i = uNum; i < lanes; i++) { + EmitScalarConstant(zConst, true, false, false); + } + } +} + +void Emitter::EmitStructConstant(MIRConst &mirConst) { + uint32 subStructFieldCounts = 0; + EmitStructConstant(mirConst, subStructFieldCounts); +} + +void Emitter::EmitStructConstant(MIRConst &mirConst, uint32 &subStructFieldCounts) { + StructEmitInfo *sEmitInfo = cg->GetMIRModule()->GetMemPool()->New(); + CHECK_FATAL(sEmitInfo != nullptr, "create a new struct emit info failed in Emitter::EmitStructConstant"); + MIRType &mirType = mirConst.GetType(); + MIRAggConst &structCt = static_cast(mirConst); + MIRStructType &structType = static_cast(mirType); + auto structPack = static_cast(structType.GetTypeAttrs().GetPack()); + /* all elements of struct. */ + uint8 num; + if (structType.GetKind() == kTypeUnion) { + num = 1; + } else { + num = static_cast(structType.GetFieldsSize()); + } + BECommon *beCommon = Globals::GetInstance()->GetBECommon(); + /* total size of emitted elements size. */ + uint32 size = beCommon->GetTypeSize(structType.GetTypeIndex()); + uint32 fieldIdx = 1; + if (structType.GetKind() == kTypeUnion) { + fieldIdx = structCt.GetFieldIdItem(0); + } + for (uint32 i = 0; i < num; ++i) { + if (((i + 1) == num) && cg->GetMIRModule()->GetSrcLang() == kSrcLangC) { + isFlexibleArray = beCommon->GetHasFlexibleArray(mirType.GetTypeIndex().GetIdx()); + arraySize = 0; + } + MIRConst *elemConst; + if (structType.GetKind() == kTypeStruct) { + elemConst = structCt.GetAggConstElement(i + 1); + } else { + elemConst = structCt.GetAggConstElement(fieldIdx); + } + MIRType *elemType = structType.GetElemType(i); + if (structType.GetKind() == kTypeUnion) { + elemType = &(elemConst->GetType()); + } + MIRType *nextElemType = nullptr; + if (i != static_cast(num - 1)) { + nextElemType = structType.GetElemType(i + 1); + } + uint64 elemSize = beCommon->GetTypeSize(elemType->GetTypeIndex()); + uint8 charBitWidth = GetPrimTypeSize(PTY_i8) * kBitsPerByte; + if (elemType->GetKind() == kTypeBitField) { + if (elemConst == nullptr) { + MIRIntConst *zeroFill = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *elemType); + elemConst = zeroFill; + } + uint64 fieldOffset = static_cast(static_cast(beCommon->GetFieldOffset( + structType, fieldIdx).first)) * static_cast(charBitWidth) + static_cast( + static_cast(beCommon->GetFieldOffset(structType, fieldIdx).second)); + EmitBitFieldConstant(*sEmitInfo, *elemConst, nextElemType, fieldOffset); + } else { + if (elemConst != nullptr) { + if (IsPrimitiveVector(elemType->GetPrimType())) { + EmitVectorConstant(*elemConst); + } else if (IsPrimitiveScalar(elemType->GetPrimType())) { + EmitScalarConstant(*elemConst, true, false, true); + } else if (elemType->GetKind() == kTypeArray) { + if (elemType->GetSize() != 0) { + EmitArrayConstant(*elemConst); + } + } else if ((elemType->GetKind() == kTypeStruct) || (elemType->GetKind() == kTypeClass) || + (elemType->GetKind() == kTypeUnion)) { + EmitStructConstant(*elemConst, subStructFieldCounts); + fieldIdx += subStructFieldCounts; + } else { + ASSERT(false, "should not run here"); + } + } else { + EmitNullConstant(elemSize); + } + sEmitInfo->IncreaseTotalSize(elemSize); + sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); + } + + if (nextElemType != nullptr && kTypeBitField != nextElemType->GetKind()) { + ASSERT(i < static_cast(num - 1), "NYI"); + uint8 nextAlign = Globals::GetInstance()->GetBECommon()->GetTypeAlign(nextElemType->GetTypeIndex()); + auto fieldAttr = structType.GetFields()[i + 1].second.second; + nextAlign = fieldAttr.IsPacked() ? 1 : std::min(nextAlign, structPack); + ASSERT(nextAlign != 0, "expect non-zero"); + /* append size, append 0 when align need. */ + uint64 totalSize = sEmitInfo->GetTotalSize(); + uint64 psize = (totalSize % nextAlign == 0) ? 0 : (nextAlign - (totalSize % nextAlign)); + if (psize != 0) { + EmitNullConstant(psize); + sEmitInfo->IncreaseTotalSize(psize); + sEmitInfo->SetNextFieldOffset(sEmitInfo->GetTotalSize() * charBitWidth); + } + /* element is uninitialized, emit null constant. */ + } + fieldIdx++; + } + if (structType.GetKind() == kTypeStruct) { + /* The reason of subtracting one is that fieldIdx adds one at the end of the cycle. */ + subStructFieldCounts = fieldIdx - 1; + } else if (structType.GetKind() == kTypeUnion) { + subStructFieldCounts = static_cast(beCommon->GetStructFieldCount(structType.GetTypeIndex())); + } + + isFlexibleArray = false; + uint64 opSize = size - sEmitInfo->GetTotalSize(); + if (opSize != 0) { + EmitNullConstant(opSize); + } +} + +/* BlockMarker is for Debugging/Profiling */ +void Emitter::EmitBlockMarker(const std::string &markerName, const std::string §ionName, + bool withAddr, const std::string &addrName) { + /* + * .type $marker_name$, %object + * .global $marker_name$ + * .data + * .align 3 + * $marker_name$: + * .quad 0xdeadbeefdeadbeef + * .size $marker_name$, 8 + */ + Emit(asmInfo->GetType()); + Emit(markerName); + Emit(", %object\n"); + if (CGOptions::IsEmitBlockMarker()) { /* exposed as global symbol, for profiling */ + Emit(asmInfo->GetGlobal()); + } else { /* exposed as local symbol, for release. */ + Emit(asmInfo->GetLocal()); + } + Emit(markerName); + Emit("\n"); + + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName); + if (sectionName.find("ro") == 0) { + Emit(",\"a\",%progbits\n"); + } else { + Emit(",\"aw\",%progbits\n"); + } + } else { + EmitAsmLabel(kAsmData); + } + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n" + markerName + ":\n"); +#else + Emit("3\n" + markerName + ":\n"); +#endif + EmitAsmLabel(kAsmQuad); + if (withAddr) { + Emit(addrName + "\n"); + } else { + Emit("0xdeadbeefdeadbeef\n"); /* hexspeak in aarch64 represents crash or dead lock */ + } + Emit(asmInfo->GetSize()); + Emit(markerName + ", 8\n"); +} + +void Emitter::EmitLiteral(const MIRSymbol &literal, const std::map &strIdx2Type) { + /* + * .type _C_STR_xxxx, %object + * .local _C_STR_xxxx + * .data + * .align 3 + * _C_STR_xxxx: + * .quad __cinf_Ljava_2Flang_2FString_3B + * .... + * .size _C_STR_xxxx, 40 + */ + if (literal.GetStorageClass() == kScUnused) { + return; + } + EmitAsmLabel(literal, kAsmType); + /* literal should always be fstatic and readonly? */ + EmitAsmLabel(literal, kAsmLocal); /* alwasy fstatic */ + (void)Emit("\t.section\t." + std::string(kMapleLiteralString) + ",\"aw\", %progbits\n"); + EmitAsmLabel(literal, kAsmAlign); + EmitAsmLabel(literal, kAsmSyname); + /* literal is an array */ + MIRConst *mirConst = literal.GetKonst(); + CHECK_FATAL(mirConst != nullptr, "mirConst should not be nullptr in EmitLiteral"); + if (literal.HasAddrOfValues()) { + EmitConstantTable(literal, *mirConst, strIdx2Type); + } else { + EmitArrayConstant(*mirConst); + } + EmitAsmLabel(literal, kAsmSize); +} + +void Emitter::EmitFuncLayoutInfo(const MIRSymbol &layout) { + /* + * .type $marker_name$, %object + * .global $marker_name$ + * .data + * .align 3 + * $marker_name$: + * .quad funcaddr + * .size $marker_name$, 8 + */ + MIRConst *mirConst = layout.GetKonst(); + MIRAggConst *aggConst = safe_cast(mirConst); + ASSERT(aggConst != nullptr, "null ptr check"); + if (aggConst->GetConstVec().size() != static_cast(LayoutType::kLayoutTypeCount)) { + maple::LogInfo::MapleLogger(kLlErr) << "something wrong happen in funclayoutsym\t" + << "constVec size\t" << aggConst->GetConstVec().size() << "\n"; + return; + } + for (size_t i = 0; i < static_cast(LayoutType::kLayoutTypeCount); ++i) { + std::string markerName = "__MBlock_" + GetLayoutTypeString(i) + "_func_start"; + CHECK_FATAL(aggConst->GetConstVecItem(i)->GetKind() == kConstAddrofFunc, "expect kConstAddrofFunc type"); + MIRAddroffuncConst *funcAddr = safe_cast(aggConst->GetConstVecItem(i)); + ASSERT(funcAddr != nullptr, "null ptr check"); + Emit(asmInfo->GetType()); + Emit(markerName + ", %object\n"); + Emit(asmInfo->GetGlobal()); + Emit(markerName + "\n"); + EmitAsmLabel(kAsmData); +#if TARGX86 || TARGX86_64 + EmitAsmLabel(layout, kAsmAlign); + Emit(markerName + ":\n"); +#else + Emit(asmInfo->GetAlign()); + Emit("3\n" + markerName + ":\n"); +#endif + +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); +#else + Emit("\t.word "); +#endif + Emit(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(funcAddr->GetValue())->GetName()); + Emit("\n"); + Emit(asmInfo->GetSize()); + Emit(markerName + ", 8\n"); + } +} + +void Emitter::EmitStaticFields(const std::vector &fields) { + for (auto *itSymbol : fields) { + EmitAsmLabel(*itSymbol, kAsmType); + /* literal should always be fstatic and readonly? */ + EmitAsmLabel(*itSymbol, kAsmLocal); /* alwasy fstatic */ + EmitAsmLabel(kAsmData); + EmitAsmLabel(*itSymbol, kAsmAlign); + EmitAsmLabel(*itSymbol, kAsmSyname); + /* literal is an array */ + MIRConst *mirConst = itSymbol->GetKonst(); + EmitArrayConstant(*mirConst); + } +} + +void Emitter::EmitLiterals(std::vector> &literals, + const std::map &strIdx2Type) { + /* + * load literals profile + * currently only used here, so declare it as local + */ + if (cg->GetMIRModule()->GetProfile().GetLiteralProfileSize() == 0) { + for (const auto &literalPair : literals) { + EmitLiteral(*(literalPair.first), strIdx2Type); + } + return; + } + /* emit hot literal start symbol */ + EmitBlockMarker("__MBlock_literal_hot_begin", "", false); + /* + * emit literals into .data section + * emit literals in the profile first + */ + for (auto &literalPair : literals) { + if (cg->GetMIRModule()->GetProfile().CheckLiteralHot(literalPair.first->GetName())) { + /* it's in the literal profiling data, means it's "hot" */ + EmitLiteral(*(literalPair.first), strIdx2Type); + literalPair.second = true; + } + } + /* emit hot literal end symbol */ + EmitBlockMarker("__MBlock_literal_hot_end", "", false); + + /* emit cold literal start symbol */ + EmitBlockMarker("__MBlock_literal_cold_begin", "", false); + /* emit other literals (not in the profile) next. */ + for (const auto &literalPair : literals) { + if (!literalPair.second) { + /* not emit yet */ + EmitLiteral(*(literalPair.first), strIdx2Type); + } + } + /* emit cold literal end symbol */ + EmitBlockMarker("__MBlock_literal_cold_end", "", false); +} + +void Emitter::GetHotAndColdMetaSymbolInfo(const std::vector &mirSymbolVec, + std::vector &hotFieldInfoSymbolVec, + std::vector &coldFieldInfoSymbolVec, const std::string &prefixStr, + bool forceCold) const { + bool isHot = false; + for (auto mirSymbol : mirSymbolVec) { + CHECK_FATAL(prefixStr.length() < mirSymbol->GetName().length(), "string length check"); + std::string name = mirSymbol->GetName().substr(prefixStr.length()); + std::string klassJavaDescriptor; + namemangler::DecodeMapleNameToJavaDescriptor(name, klassJavaDescriptor); + if (prefixStr == kFieldsInfoPrefixStr) { + isHot = cg->GetMIRModule()->GetProfile().CheckFieldHot(klassJavaDescriptor); + } else if (prefixStr == kMethodsInfoPrefixStr) { + isHot = cg->GetMIRModule()->GetProfile().CheckMethodHot(klassJavaDescriptor); + } else { + isHot = cg->GetMIRModule()->GetProfile().CheckClassHot(klassJavaDescriptor); + } + if (isHot && !forceCold) { + hotFieldInfoSymbolVec.emplace_back(mirSymbol); + } else { + coldFieldInfoSymbolVec.emplace_back(mirSymbol); + } + } +} + +void Emitter::EmitMetaDataSymbolWithMarkFlag(const std::vector &mirSymbolVec, + const std::map &strIdx2Type, + const std::string &prefixStr, const std::string §ionName, + bool isHotFlag) { + if (cg->GetMIRModule()->IsCModule()) { + return; + } + if (mirSymbolVec.empty()) { + return; + } + const std::string &markString = "__MBlock" + prefixStr; + const std::string &hotOrCold = isHotFlag ? "hot" : "cold"; + EmitBlockMarker((markString + hotOrCold + "_begin"), sectionName, false); + if (prefixStr == kFieldsInfoCompactPrefixStr || prefixStr == kMethodsInfoCompactPrefixStr || + prefixStr == kFieldOffsetDataPrefixStr || prefixStr == kMethodAddrDataPrefixStr) { + for (auto s : mirSymbolVec) { + EmitMethodFieldSequential(*s, strIdx2Type, sectionName); + } + } else { + for (auto s : mirSymbolVec) { + EmitClassInfoSequential(*s, strIdx2Type, sectionName); + } + } + EmitBlockMarker((markString + hotOrCold + "_end"), sectionName, false); +} + +void Emitter::MarkVtabOrItabEndFlag(const std::vector &mirSymbolVec) const { + for (auto mirSymbol : mirSymbolVec) { + auto *aggConst = safe_cast(mirSymbol->GetKonst()); + if ((aggConst == nullptr) || (aggConst->GetConstVec().empty())) { + continue; + } + size_t size = aggConst->GetConstVec().size(); + MIRConst *elemConst = aggConst->GetConstVecItem(size - 1); + ASSERT(elemConst != nullptr, "null ptr check"); + if (elemConst->GetKind() == kConstAddrofFunc) { + maple::LogInfo::MapleLogger(kLlErr) << "ERROR: the last vtab/itab content should not be funcAddr\n"; + } else { + if (elemConst->GetKind() != kConstInt) { + CHECK_FATAL(elemConst->GetKind() == kConstAddrof, "must be"); + continue; + } + MIRIntConst *tabConst = static_cast(elemConst); +#ifdef USE_32BIT_REF + /* #define COLD VTAB ITAB END FLAG 0X4000000000000000 */ + tabConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(tabConst->GetValue()) | 0X40000000, tabConst->GetType()); +#else + /* #define COLD VTAB ITAB END FLAG 0X4000000000000000 */ + tabConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(tabConst->GetExtValue()) | 0X4000000000000000, tabConst->GetType()); +#endif + aggConst->SetItem(static_cast(size) - 1, tabConst, aggConst->GetFieldIdItem(size - 1)); + } + } +} + +void Emitter::EmitStringPointers() { + if (CGOptions::OptimizeForSize()) { + (void)Emit(asmInfo->GetSection()).Emit(".rodata,\"aMS\",@progbits,1").Emit("\n"); +#if TARGX86 || TARGX86_64 + Emit("\t.align 8\n"); +#else + Emit("\t.align 3\n"); +#endif + } else { + (void)Emit(asmInfo->GetSection()).Emit(".rodata").Emit("\n"); + } + for (auto idx: localStrPtr) { + if (idx == 0) { + continue; + } + if (!CGOptions::OptimizeForSize()) { +#if TARGX86 || TARGX86_64 + Emit("\t.align 8\n"); +#else + Emit("\t.align 3\n"); +#endif + } + uint32 strId = idx.GetIdx(); + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(idx); + (void)Emit(".LUstr_").Emit(strId).Emit(":\n"); + std::string mplstr(str); + EmitStr(mplstr, false, true); + } + for (auto idx: stringPtr) { + if (idx == 0) { + continue; + } + if (!CGOptions::OptimizeForSize()) { +#if TARGX86 || TARGX86_64 + Emit("\t.align 8\n"); +#else + Emit("\t.align 3\n"); +#endif + } + uint32 strId = idx.GetIdx(); + std::string str = GlobalTables::GetUStrTable().GetStringFromStrIdx(idx); +#if TARGX86 || TARGX86_64 + Emit(asmInfo->GetAlign()); + Emit("8\n"); +#endif + Emit(".LSTR__").Emit(strId).Emit(":\n"); + std::string mplstr(str); + EmitStr(mplstr, false, true); + } +} + +void Emitter::EmitLocalVariable(const CGFunc &cgFunc) { + MIRSymbolTable *lSymTab = cgFunc.GetMirModule().CurFunction()->GetSymTab(); + if (lSymTab == nullptr) { + return; + } + + size_t lsize = lSymTab->GetSymbolTableSize(); + for (size_t i = 0; i < lsize; i++) { + if (i < cgFunc.GetLSymSize() && !cg->GetMIRModule()->IsCModule()) { + continue; + } + MIRSymbol *st = lSymTab->GetSymbolFromStIdx(static_cast(i)); + if (st == nullptr || st->GetStorageClass() != kScPstatic) { + continue; + } + /* + * Local static names can repeat. + * Append the current program unit index to the name. + */ + PUIdx puIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + std::string localName = st->GetName() + std::to_string(puIdx); + static std::vector emittedLocalSym; + if (std::find(emittedLocalSym.begin(), emittedLocalSym.end(), localName) != emittedLocalSym.end()) { + continue; + } + emittedLocalSym.push_back(localName); + + MIRType *ty = st->GetType(); + MIRConst *ct = st->GetKonst(); + if (ct == nullptr) { + EmitUninitializedSymbol(*st); + continue; + } + /* cg created data should be located in .text */ + /* [cgFunc.GetLSymSize(), lSymTab->GetSymbolTableSize()) -> cg created symbol */ + if (i < cgFunc.GetLSymSize()) { + if (st->IsThreadLocal()) { + (void)Emit("\t.section\t.tdata,\"awT\",@progbits\n"); + } else { + Emit(asmInfo->GetSection()); + Emit(asmInfo->GetData()); + Emit("\n"); + } + } else { + CHECK_FATAL(st->GetStorageClass() == kScPstatic && st->GetSKind() == kStConst, "cg should create constant!"); + /* cg created data should be located in .text */ + (void)Emit("\t.section\t.text\n"); + } + EmitAsmLabel(*st, kAsmAlign); + EmitAsmLabel(*st, kAsmLocal); + if (kTypeStruct == ty->GetKind() || kTypeUnion == ty->GetKind() || kTypeClass == ty->GetKind()) { + EmitAsmLabel(*st, kAsmSyname); + EmitStructConstant(*ct); + continue; + } + if (kTypeArray != ty->GetKind()) { + EmitAsmLabel(*st, kAsmSyname); + EmitScalarConstant(*ct, true, false, true /* isIndirect */); + continue; + } + if (ty->GetSize() != 0) { + EmitAsmLabel(*st, kAsmSyname); + EmitArrayConstant(*ct); + } + } +} + +void Emitter::EmitGlobalVar(const MIRSymbol &globalVar) { + EmitAsmLabel(globalVar, kAsmType); + if (globalVar.sectionAttr != UStrIdx(0)) { /* check section info if it is from inline asm */ + Emit("\t.section\t"); + Emit(GlobalTables::GetUStrTable().GetStringFromStrIdx(globalVar.sectionAttr)); + Emit(",\"aw\",%progbits\n"); + } else { + EmitAsmLabel(globalVar, kAsmLocal); + } + EmitAsmLabel(globalVar, kAsmComm); +} + +void Emitter::EmitGlobalVars(std::vector> &globalVars) { + if (GetCG()->IsLmbc() && GetCG()->GetGP() != nullptr) { + (void)Emit(asmInfo->GetLocal()).Emit("\t").Emit(GetCG()->GetGP()->GetName()).Emit("\n"); + (void)Emit(asmInfo->GetComm()).Emit("\t").Emit(GetCG()->GetGP()->GetName()); + (void)Emit(", ").Emit(GetCG()->GetMIRModule()->GetGlobalMemSize()).Emit(", ").Emit("8\n"); + } + /* load globalVars profile */ + if (globalVars.empty()) { + return; + } + std::unordered_set hotVars; + std::ifstream inFile; + if (!CGOptions::IsGlobalVarProFileEmpty()) { + inFile.open(CGOptions::GetGlobalVarProFile()); + if (inFile.fail()) { + maple::LogInfo::MapleLogger(kLlErr) << "Cannot open globalVar profile file " << CGOptions::GetGlobalVarProFile() + << "\n"; + } + } + if (CGOptions::IsGlobalVarProFileEmpty() || inFile.fail()) { + for (const auto &globalVarPair : globalVars) { + EmitGlobalVar(*(globalVarPair.first)); + } + return; + } + std::string globalVarName; + while (inFile >> globalVarName) { + (void)hotVars.insert(globalVarName); + } + inFile.close(); + bool hotBeginSet = false; + bool coldBeginSet = false; + for (auto &globalVarPair : globalVars) { + if (hotVars.find(globalVarPair.first->GetName()) != hotVars.end()) { + if (!hotBeginSet) { + /* emit hot globalvar start symbol */ + EmitBlockMarker("__MBlock_globalVars_hot_begin", "", true, globalVarPair.first->GetName()); + hotBeginSet = true; + } + EmitGlobalVar(*(globalVarPair.first)); + globalVarPair.second = true; + } + } + for (const auto &globalVarPair : globalVars) { + if (!globalVarPair.second) { /* not emit yet */ + if (!coldBeginSet) { + /* emit hot globalvar start symbol */ + EmitBlockMarker("__MBlock_globalVars_cold_begin", "", true, globalVarPair.first->GetName()); + coldBeginSet = true; + } + EmitGlobalVar(*(globalVarPair.first)); + } + } + MIRSymbol *endSym = globalVars.back().first; + MIRType *mirType = endSym->GetType(); + ASSERT_NOT_NULL(endSym); + ASSERT_NOT_NULL(mirType); + const std::string kStaticVarEndAdd = + std::to_string(Globals::GetInstance()->GetBECommon()->GetTypeSize(mirType->GetTypeIndex())) + "+" + + endSym->GetName(); + EmitBlockMarker("__MBlock_globalVars_cold_end", "", true, kStaticVarEndAdd); +} + +void Emitter::EmitUninitializedSymbolsWithPrefixSection(const MIRSymbol &symbol, const std::string §ionName) { + EmitAsmLabel(symbol, kAsmType); + Emit(asmInfo->GetSection()); + auto sectionConstrains = symbol.IsThreadLocal() ? ",\"awT\"," : ",\"aw\","; + (void)Emit(sectionName).Emit(sectionConstrains); + if (sectionName == ".bss" || StringUtils::StartsWith(sectionName, ".bss.") || + sectionName == ".tbss" || StringUtils::StartsWith(sectionName, ".tbss.")) { + Emit("%nobits\n"); + } else { + Emit("%progbits\n"); + } + if (symbol.GetAttr(ATTR_weak)) { + EmitAsmLabel(symbol, kAsmWeak); + } else if (symbol.GetStorageClass() == kScGlobal) { + EmitAsmLabel(symbol, kAsmGlbl); + } + EmitAsmLabel(symbol, kAsmAlign); + EmitAsmLabel(symbol, kAsmSyname); + EmitAsmLabel(symbol, kAsmZero); + EmitAsmLabel(symbol, kAsmSize); +} + +void Emitter::EmitUninitializedSymbol(const MIRSymbol &mirSymbol) { + if (mirSymbol.sectionAttr != UStrIdx(0)) { + auto §ionName = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirSymbol.sectionAttr); + EmitUninitializedSymbolsWithPrefixSection(mirSymbol, sectionName); + } else if (mirSymbol.IsThreadLocal()) { + EmitUninitializedSymbolsWithPrefixSection(mirSymbol, ".tbss"); + } else if (CGOptions::IsNoCommon() || mirSymbol.GetAttr(ATTR_static_init_zero)) { + EmitUninitializedSymbolsWithPrefixSection(mirSymbol, ".bss"); + } else { + if (mirSymbol.GetStorageClass() != kScGlobal) { + EmitAsmLabel(mirSymbol, kAsmLocal); + } + EmitAsmLabel(mirSymbol, kAsmType); + EmitAsmLabel(mirSymbol, kAsmComm); + } +} + +void Emitter::EmitGlobalVariable() { + std::vector typeStVec; + std::vector typeNameStVec; + std::map strIdx2Type; + + /* Create name2type map which will be used by reflection. */ + for (MIRType *type : GlobalTables::GetTypeTable().GetTypeTable()) { + if (type == nullptr || (type->GetKind() != kTypeClass && type->GetKind() != kTypeInterface)) { + continue; + } + GStrIdx strIdx = type->GetNameStrIdx(); + strIdx2Type[strIdx] = type; + } + + /* sort symbols; classinfo-->field-->method */ + size_t size = GlobalTables::GetGsymTable().GetSymbolTableSize(); + std::vector classInfoVec; + std::vector vtabVec; + std::vector staticFieldsVec; + std::vector> globalVarVec; + std::vector itabVec; + std::vector itabConflictVec; + std::vector vtabOffsetVec; + std::vector fieldOffsetVec; + std::vector valueOffsetVec; + std::vector localClassInfoVec; + std::vector constStrVec; + std::vector> literalVec; + std::vector muidVec = { nullptr }; + std::vector fieldOffsetDatas; + std::vector methodAddrDatas; + std::vector methodSignatureDatas; + std::vector staticDecoupleKeyVec; + std::vector staticDecoupleValueVec; + std::vector superClassStVec; + std::vector arrayClassCacheVec; + std::vector arrayClassCacheNameVec; + + for (size_t i = 0; i < size; ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + if (mirSymbol == nullptr || mirSymbol->IsDeleted() || mirSymbol->GetStorageClass() == kScUnused) { + continue; + } + if (mirSymbol->GetSKind() == kStFunc) { + EmitAliasAndRef(*mirSymbol); + } + + if (mirSymbol->GetName().find(VTAB_PREFIX_STR) == 0) { + vtabVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(ITAB_PREFIX_STR) == 0) { + itabVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(ITAB_CONFLICT_PREFIX_STR) == 0) { + itabConflictVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kVtabOffsetTabStr) == 0) { + vtabOffsetVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kFieldOffsetTabStr) == 0) { + fieldOffsetVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kOffsetTabStr) == 0) { + valueOffsetVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsArrayClassCache()) { + arrayClassCacheVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsArrayClassCacheName()) { + arrayClassCacheNameVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kLocalClassInfoStr) == 0) { + localClassInfoVec.emplace_back(mirSymbol); + continue; + } else if (StringUtils::StartsWith(mirSymbol->GetName(), namemangler::kDecoupleStaticKeyStr)) { + staticDecoupleKeyVec.emplace_back(mirSymbol); + continue; + } else if (StringUtils::StartsWith(mirSymbol->GetName(), namemangler::kDecoupleStaticValueStr)) { + staticDecoupleValueVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsLiteral()) { + literalVec.emplace_back(std::make_pair(mirSymbol, false)); + continue; + } else if (mirSymbol->IsConstString() || mirSymbol->IsLiteralPtr()) { + MIRConst *mirConst = mirSymbol->GetKonst(); + if (mirConst != nullptr && mirConst->GetKind() == kConstAddrof) { + constStrVec.emplace_back(mirSymbol); + continue; + } + } else if (mirSymbol->IsReflectionClassInfoPtr()) { + /* _PTR__cinf is emitted in dataDefTab and dataUndefTab */ + continue; + } else if (mirSymbol->IsMuidTab()) { + muidVec[0] = mirSymbol; + EmitMuidTable(muidVec, strIdx2Type, mirSymbol->GetMuidTabName()); + continue; + } else if (mirSymbol->IsCodeLayoutInfo()) { + if (!GetCG()->GetMIRModule()->IsCModule()) { + EmitFuncLayoutInfo(*mirSymbol); + } + continue; + } else if (mirSymbol->GetName().find(kStaticFieldNamePrefixStr) == 0) { + staticFieldsVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kGcRootList) == 0) { + EmitGlobalRootList(*mirSymbol); + continue; + } else if (mirSymbol->GetName().find(kFunctionProfileTabPrefixStr) == 0) { + muidVec[0] = mirSymbol; + EmitMuidTable(muidVec, strIdx2Type, kFunctionProfileTabPrefixStr); + continue; + } else if (mirSymbol->IsReflectionFieldOffsetData()) { + fieldOffsetDatas.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsReflectionMethodAddrData()) { + methodAddrDatas.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsReflectionSuperclassInfo()) { + superClassStVec.emplace_back(mirSymbol); + continue; + } else if (mirSymbol->IsReflectionMethodSignature()) { + methodSignatureDatas.push_back(mirSymbol); + continue; + } + + if (mirSymbol->IsReflectionInfo()) { + if (mirSymbol->IsReflectionClassInfo()) { + classInfoVec.emplace_back(mirSymbol); + } + continue; + } + /* symbols we do not emit here. */ + if (mirSymbol->GetSKind() == kStFunc || mirSymbol->GetSKind() == kStJavaClass || + mirSymbol->GetSKind() == kStJavaInterface) { + continue; + } + if (mirSymbol->GetStorageClass() == kScTypeInfo) { + typeStVec.emplace_back(mirSymbol); + continue; + } + if (mirSymbol->GetStorageClass() == kScTypeInfoName) { + typeNameStVec.emplace_back(mirSymbol); + continue; + } + if (mirSymbol->GetStorageClass() == kScTypeCxxAbi) { + continue; + } + + MIRType *mirType = mirSymbol->GetType(); + if (mirType == nullptr) { + continue; + } + if (GetCG()->GetMIRModule()->IsCModule() && mirSymbol->GetStorageClass() == kScExtern) { + /* only emit weak & initialized extern at present */ + if (mirSymbol->IsWeak() || mirSymbol->IsConst()) { + EmitAsmLabel(*mirSymbol, kAsmWeak); + } else { + continue; + } + } + /* + * emit uninitialized global/static variables. + * these variables store in .comm section. + */ + if ((mirSymbol->GetStorageClass() == kScGlobal || mirSymbol->GetStorageClass() == kScFstatic) && + !mirSymbol->IsConst()) { + if (mirSymbol->IsGctibSym()) { + /* GCTIB symbols are generated in GenerateObjectMaps */ + continue; + } + if (mirSymbol->GetStorageClass() != kScGlobal && cg->GetMIRModule()->IsJavaModule()) { + globalVarVec.emplace_back(std::make_pair(mirSymbol, false)); + continue; + } + EmitUninitializedSymbol(*mirSymbol); + continue; + } + + /* emit initialized global/static variables. */ + if (mirSymbol->GetStorageClass() == kScGlobal || + (mirSymbol->GetStorageClass() == kScExtern && GetCG()->GetMIRModule()->IsCModule()) || + (mirSymbol->GetStorageClass() == kScFstatic && !mirSymbol->IsReadOnly())) { + /* Emit section */ + EmitAsmLabel(*mirSymbol, kAsmType); + if (mirSymbol->IsReflectionStrTab()) { + std::string sectionName = ".reflection_strtab"; + if (mirSymbol->GetName().find(kReflectionStartHotStrtabPrefixStr) == 0) { + sectionName = ".reflection_start_hot_strtab"; + } else if (mirSymbol->GetName().find(kReflectionBothHotStrTabPrefixStr) == 0) { + sectionName = ".reflection_both_hot_strtab"; + } else if (mirSymbol->GetName().find(kReflectionRunHotStrtabPrefixStr) == 0) { + sectionName = ".reflection_run_hot_strtab"; + } + Emit("\t.section\t" + sectionName + ",\"a\",%progbits\n"); + } else if (mirSymbol->GetName().find(kDecoupleOption) == 0) { + Emit("\t.section\t." + std::string(kDecoupleStr) + ",\"a\",%progbits\n"); + } else if (mirSymbol->IsRegJNITab()) { + Emit("\t.section\t.reg_jni_tab,\"a\", %progbits\n"); + } else if (mirSymbol->GetName().find(kCompilerVersionNum) == 0) { + Emit("\t.section\t." + std::string(kCompilerVersionNumStr) + ",\"a\", %progbits\n"); + } else if (mirSymbol->GetName().find(kSourceMuid) == 0) { + Emit("\t.section\t." + std::string(kSourceMuidSectionStr) + ",\"a\", %progbits\n"); + } else if (mirSymbol->GetName().find(kCompilerMfileStatus) == 0) { + Emit("\t.section\t." + std::string(kCompilerMfileStatus) + ",\"a\", %progbits\n"); + } else if (mirSymbol->IsRegJNIFuncTab()) { + Emit("\t.section\t.reg_jni_func_tab,\"aw\", %progbits\n"); + } else if (mirSymbol->IsReflectionPrimitiveClassInfo()) { + Emit("\t.section\t.primitive_classinfo,\"awG\", %progbits,__primitive_classinfo__,comdat\n"); + } else if (mirSymbol->IsReflectionHashTabBucket()) { + std::string stName = mirSymbol->GetName(); + const std::string delimiter = "$$"; + if (stName.find(delimiter) == std::string::npos) { + FATAL(kLncFatal, "Can not find delimiter in target "); + } + std::string secName = stName.substr(0, stName.find(delimiter)); + /* remove leading "__" in sec name. */ + secName.erase(0, 2); + Emit("\t.section\t." + secName + ",\"a\",%progbits\n"); + } else { + bool isThreadLocal = mirSymbol->IsThreadLocal(); + if (cg->GetMIRModule()->IsJavaModule()) { + (void)Emit("\t.section\t." + std::string(kMapleGlobalVariable) + ",\"aw\", @progbits\n"); + } else if (mirSymbol->sectionAttr != UStrIdx(0)) { + auto §ionName = GlobalTables::GetUStrTable().GetStringFromStrIdx(mirSymbol->sectionAttr); + auto sectionConstrains = isThreadLocal ? ",\"awT\"," : ",\"aw\","; + (void)Emit("\t.section\t" + sectionName + sectionConstrains + "@progbits\n"); + } else if (isThreadLocal) { + (void)Emit("\t.section\t.tdata,\"awT\",@progbits\n"); + } else { + (void)Emit("\t.data\n"); + } + } + /* Emit size and align by type */ + if (mirSymbol->GetStorageClass() == kScGlobal) { + if (mirSymbol->GetAttr(ATTR_weak) || mirSymbol->IsReflectionPrimitiveClassInfo()) { + EmitAsmLabel(*mirSymbol, kAsmWeak); + } else { + EmitAsmLabel(*mirSymbol, kAsmGlbl); + } + if (theMIRModule->IsJavaModule()) { + EmitAsmLabel(*mirSymbol, kAsmHidden); + } + } else if (mirSymbol->GetStorageClass() == kScFstatic) { + if (mirSymbol->sectionAttr == UStrIdx(0)) { + EmitAsmLabel(*mirSymbol, kAsmLocal); + } + } + if (mirSymbol->IsReflectionStrTab()) { /* reflection-string-tab also aligned to 8B boundaries. */ + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n"); +#else + Emit("3\n"); +#endif + } else { + EmitAsmLabel(*mirSymbol, kAsmAlign); + } + EmitAsmLabel(*mirSymbol, kAsmSyname); + MIRConst *mirConst = mirSymbol->GetKonst(); + if (IsPrimitiveVector(mirType->GetPrimType())) { + EmitVectorConstant(*mirConst); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + if (!CGOptions::IsArm64ilp32()) { + if (IsAddress(mirType->GetPrimType())) { + uint32 sizeinbits = GetPrimTypeBitSize(mirConst->GetType().GetPrimType()); + CHECK_FATAL(sizeinbits == k64BitSize, "EmitGlobalVariable: pointer must be of size 8"); + } + } + if (cg->GetMIRModule()->IsCModule()) { + EmitScalarConstant(*mirConst, true, false, true); + } else { + EmitScalarConstant(*mirConst); + } + } else if (mirType->GetKind() == kTypeArray) { + if (mirSymbol->HasAddrOfValues()) { + EmitConstantTable(*mirSymbol, *mirConst, strIdx2Type); + } else { + EmitArrayConstant(*mirConst); + } + } else if (mirType->GetKind() == kTypeStruct || mirType->GetKind() == kTypeClass || + mirType->GetKind() == kTypeUnion) { + if (mirSymbol->HasAddrOfValues()) { + EmitConstantTable(*mirSymbol, *mirConst, strIdx2Type); + } else { + EmitStructConstant(*mirConst); + } + } else { + ASSERT(false, "NYI"); + } + EmitAsmLabel(*mirSymbol, kAsmSize); + /* emit constant float/double */ + } else if (mirSymbol->IsReadOnly()) { + MIRConst *mirConst = mirSymbol->GetKonst(); + if (mirConst->GetKind() == maple::kConstStrConst) { + auto strCt = static_cast(mirConst); + localStrPtr.push_back(strCt->GetValue()); + } else { + EmitAsmLabel(*mirSymbol, kAsmType); + (void)Emit(asmInfo->GetSection()).Emit(asmInfo->GetRodata()).Emit("\n"); + if (!CGOptions::OptimizeForSize()) { + EmitAsmLabel(*mirSymbol, kAsmAlign); + } + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitScalarConstant(*mirConst); + } + } else if (mirSymbol->GetStorageClass() == kScPstatic) { + EmitAsmLabel(*mirSymbol, kAsmType); + Emit(asmInfo->GetSection()); + Emit(asmInfo->GetData()); + Emit("\n"); + EmitAsmLabel(*mirSymbol, kAsmAlign); + EmitAsmLabel(*mirSymbol, kAsmLocal); + MIRConst *ct = mirSymbol->GetKonst(); + if (ct == nullptr) { + EmitAsmLabel(*mirSymbol, kAsmComm); + } else if (IsPrimitiveScalar(mirType->GetPrimType())) { + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitScalarConstant(*ct, true, false, true); + } else if (kTypeArray == mirType->GetKind()) { + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitArrayConstant(*ct); + } else if (kTypeStruct == mirType->GetKind() || kTypeClass == mirType->GetKind() || + kTypeUnion == mirType->GetKind()) { + EmitAsmLabel(*mirSymbol, kAsmSyname); + EmitStructConstant(*ct); + } else { + CHECK_FATAL(0, "Unknown type in Global pstatic"); + } + } + } /* end proccess all mirSymbols. */ + EmitStringPointers(); + /* emit global var */ + EmitGlobalVars(globalVarVec); + /* emit literal std::strings */ + EmitLiterals(literalVec, strIdx2Type); + /* emit static field std::strings */ + EmitStaticFields(staticFieldsVec); + + if (GetCG()->GetMIRModule()->IsCModule()) { + return; + } + + EmitMuidTable(constStrVec, strIdx2Type, kMuidConststrPrefixStr); + + /* emit classinfo, field, method */ + std::vector fieldInfoStVec; + std::vector fieldInfoStCompactVec; + std::vector methodInfoStVec; + std::vector methodInfoStCompactVec; + + std::string sectionName = kMuidClassMetadataPrefixStr; + Emit("\t.section ." + sectionName + ",\"aw\",%progbits\n"); + Emit(sectionName + "_begin:\n"); + + for (size_t i = 0; i < classInfoVec.size(); ++i) { + MIRSymbol *mirSymbol = classInfoVec[i]; + if (mirSymbol != nullptr && mirSymbol->GetKonst() != nullptr && mirSymbol->IsReflectionClassInfo()) { + /* Emit classinfo */ + EmitClassInfoSequential(*mirSymbol, strIdx2Type, sectionName); + std::string stName = mirSymbol->GetName(); + std::string className = stName.substr(strlen(CLASSINFO_PREFIX_STR)); + /* Get classinfo ro symbol */ + MIRSymbol *classInfoROSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(CLASSINFO_RO_PREFIX_STR + className)); + EmitClassInfoSequential(*classInfoROSt, strIdx2Type, sectionName); + /* Get fields */ + MIRSymbol *fieldSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kFieldsInfoPrefixStr + className)); + MIRSymbol *fieldStCompact = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kFieldsInfoCompactPrefixStr + className)); + /* Get methods */ + MIRSymbol *methodSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kMethodsInfoPrefixStr + className)); + MIRSymbol *methodStCompact = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(kMethodsInfoCompactPrefixStr + className)); + + if (fieldSt != nullptr) { + fieldInfoStVec.emplace_back(fieldSt); + } + if (fieldStCompact != nullptr) { + fieldInfoStCompactVec.emplace_back(fieldStCompact); + } + if (methodSt != nullptr) { + methodInfoStVec.emplace_back(methodSt); + } + if (methodStCompact != nullptr) { + methodInfoStCompactVec.emplace_back(methodStCompact); + } + } + } + Emit(sectionName + "_end:\n"); + + std::vector hotVtabStVec; + std::vector coldVtabStVec; + std::vector hotItabStVec; + std::vector coldItabStVec; + std::vector hotItabCStVec; + std::vector coldItabCStVec; + std::vector hotMethodsInfoCStVec; + std::vector coldMethodsInfoCStVec; + std::vector hotFieldsInfoCStVec; + std::vector coldFieldsInfoCStVec; + GetHotAndColdMetaSymbolInfo(vtabVec, hotVtabStVec, coldVtabStVec, VTAB_PREFIX_STR, + ((CGOptions::IsLazyBinding() || CGOptions::IsHotFix()) && !cg->IsLibcore())); + GetHotAndColdMetaSymbolInfo(itabVec, hotItabStVec, coldItabStVec, ITAB_PREFIX_STR, + ((CGOptions::IsLazyBinding() || CGOptions::IsHotFix()) && !cg->IsLibcore())); + GetHotAndColdMetaSymbolInfo(itabConflictVec, hotItabCStVec, coldItabCStVec, ITAB_CONFLICT_PREFIX_STR, + ((CGOptions::IsLazyBinding() || CGOptions::IsHotFix()) && !cg->IsLibcore())); + GetHotAndColdMetaSymbolInfo(fieldInfoStVec, hotFieldsInfoCStVec, coldFieldsInfoCStVec, kFieldsInfoPrefixStr); + GetHotAndColdMetaSymbolInfo(methodInfoStVec, hotMethodsInfoCStVec, coldMethodsInfoCStVec, kMethodsInfoPrefixStr); + + std::string sectionNameIsEmpty; + std::string fieldSectionName("rometadata.field"); + std::string methodSectionName("rometadata.method"); + + /* fieldinfo */ + EmitMetaDataSymbolWithMarkFlag(hotFieldsInfoCStVec, strIdx2Type, kFieldsInfoPrefixStr, fieldSectionName, true); + EmitMetaDataSymbolWithMarkFlag(coldFieldsInfoCStVec, strIdx2Type, kFieldsInfoPrefixStr, fieldSectionName, false); + EmitMetaDataSymbolWithMarkFlag(fieldInfoStCompactVec, strIdx2Type, kFieldsInfoCompactPrefixStr, fieldSectionName, + false); + /* methodinfo */ + EmitMetaDataSymbolWithMarkFlag(hotMethodsInfoCStVec, strIdx2Type, kMethodsInfoPrefixStr, methodSectionName, true); + EmitMetaDataSymbolWithMarkFlag(coldMethodsInfoCStVec, strIdx2Type, kMethodsInfoPrefixStr, methodSectionName, false); + EmitMetaDataSymbolWithMarkFlag(methodInfoStCompactVec, strIdx2Type, kMethodsInfoCompactPrefixStr, methodSectionName, + false); + + /* itabConflict */ + MarkVtabOrItabEndFlag(coldItabCStVec); + EmitMuidTable(hotItabCStVec, strIdx2Type, kMuidItabConflictPrefixStr); + EmitMetaDataSymbolWithMarkFlag(coldItabCStVec, strIdx2Type, ITAB_CONFLICT_PREFIX_STR, kMuidColdItabConflictPrefixStr, + false); + + /* + * vtab + * And itab to vtab section + */ + for (auto sym : hotItabStVec) { + hotVtabStVec.emplace_back(sym); + } + for (auto sym : coldItabStVec) { + coldVtabStVec.emplace_back(sym); + } + MarkVtabOrItabEndFlag(coldVtabStVec); + EmitMuidTable(hotVtabStVec, strIdx2Type, kMuidVtabAndItabPrefixStr); + EmitMetaDataSymbolWithMarkFlag(coldVtabStVec, strIdx2Type, VTAB_AND_ITAB_PREFIX_STR, kMuidColdVtabAndItabPrefixStr, + false); + + /* vtab_offset */ + EmitMuidTable(vtabOffsetVec, strIdx2Type, kMuidVtabOffsetPrefixStr); + /* field_offset */ + EmitMuidTable(fieldOffsetVec, strIdx2Type, kMuidFieldOffsetPrefixStr); + /* value_offset */ + EmitMuidTable(valueOffsetVec, strIdx2Type, kMuidValueOffsetPrefixStr); + /* local clasinfo */ + EmitMuidTable(localClassInfoVec, strIdx2Type, kMuidLocalClassInfoStr); + /* Emit decouple static */ + EmitMuidTable(staticDecoupleKeyVec, strIdx2Type, kDecoupleStaticKeyStr); + EmitMuidTable(staticDecoupleValueVec, strIdx2Type, kDecoupleStaticValueStr); + + /* super class */ + EmitMuidTable(superClassStVec, strIdx2Type, kMuidSuperclassPrefixStr); + + /* field offset rw */ + EmitMetaDataSymbolWithMarkFlag(fieldOffsetDatas, strIdx2Type, kFieldOffsetDataPrefixStr, sectionNameIsEmpty, false); + /* method address rw */ + EmitMetaDataSymbolWithMarkFlag(methodAddrDatas, strIdx2Type, kMethodAddrDataPrefixStr, sectionNameIsEmpty, false); + /* method address ro */ + std::string methodSignatureSectionName("romethodsignature"); + EmitMetaDataSymbolWithMarkFlag(methodSignatureDatas, strIdx2Type, kMethodSignaturePrefixStr, + methodSignatureSectionName, false); + + /* array class cache table */ + EmitMuidTable(arrayClassCacheVec, strIdx2Type, kArrayClassCacheTable); + /* array class cache name table */ + EmitMuidTable(arrayClassCacheNameVec, strIdx2Type, kArrayClassCacheNameTable); + +#if !defined(TARGARM32) + /* finally emit __gxx_personality_v0 DW.ref */ + if (!cg->GetMIRModule()->IsCModule()) { + EmitDWRef("__mpl_personality_v0"); + } +#endif +} +void Emitter::EmitAddressString(const std::string &address) { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + EmitAsmLabel(kAsmQuad); + Emit(address); +#else + Emit("\t.word\t" + address); +#endif +} +void Emitter::EmitGlobalRootList(const MIRSymbol &mirSymbol) { + Emit("\t.section .maple.gcrootsmap").Emit(",\"aw\",%progbits\n"); + std::vector nameVec; + std::string name = mirSymbol.GetName(); + nameVec.emplace_back(name); + nameVec.emplace_back(name + "Size"); + bool gcrootsFlag = true; + uint64 vecSize = 0; + for (const auto &gcrootsName : nameVec) { +#if TARGAARCH64 || TARGRISCV64 || TARGX86_64 + Emit("\t.type\t" + gcrootsName + ", @object\n" + "\t.p2align 3\n"); +#else + Emit("\t.type\t" + gcrootsName + ", %object\n" + "\t.p2align 3\n"); +#endif + Emit("\t.global\t" + gcrootsName + "\n"); + if (gcrootsFlag) { + Emit(kMuidGlobalRootlistPrefixStr).Emit("_begin:\n"); + } + Emit(gcrootsName + ":\n"); + if (gcrootsFlag) { + MIRAggConst *aggConst = safe_cast(mirSymbol.GetKonst()); + if (aggConst == nullptr) { + continue; + } + size_t i = 0; + while (i < aggConst->GetConstVec().size()) { + MIRConst *elemConst = aggConst->GetConstVecItem(i); + if (elemConst->GetKind() == kConstAddrof) { + MIRAddrofConst *symAddr = safe_cast(elemConst); + CHECK_FATAL(symAddr != nullptr, "nullptr of symAddr"); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr->GetSymbolIndex().Idx()); + const std::string &symAddrName = symAddrSym->GetName(); + EmitAddressString(symAddrName + "\n"); + } else { + EmitScalarConstant(*elemConst); + } + i++; + } + vecSize = i; + } else { + EmitAddressString(std::to_string(vecSize) + "\n"); + } + Emit("\t.size\t" + gcrootsName + ",.-").Emit(gcrootsName + "\n"); + if (gcrootsFlag) { + Emit(kMuidGlobalRootlistPrefixStr).Emit("_end:\n"); + } + gcrootsFlag = false; + } +} + +void Emitter::EmitMuidTable(const std::vector &vec, const std::map &strIdx2Type, + const std::string §ionName) { + MIRSymbol *st = nullptr; + if (!vec.empty()) { + st = vec[0]; + } + if (st != nullptr && st->IsMuidRoTab()) { + Emit("\t.section ." + sectionName + ",\"a\",%progbits\n"); + } else { + Emit("\t.section ." + sectionName + ",\"aw\",%progbits\n"); + } + if (!CGOptions::DoLiteProfGen()) { + Emit(sectionName + "_begin:\n"); + } + bool isConstString = sectionName == kMuidConststrPrefixStr; + for (size_t i = 0; i < vec.size(); i++) { + MIRSymbol *st1 = vec[i]; + ASSERT(st1 != nullptr, "null ptr check"); + if (st1->GetStorageClass() == kScUnused) { + continue; + } + EmitAsmLabel(*st1, kAsmType); + if (st1->GetStorageClass() == kScFstatic) { + EmitAsmLabel(*st1, kAsmLocal); + } else { + EmitAsmLabel(*st1, kAsmGlbl); + EmitAsmLabel(*st1, kAsmHidden); + } + EmitAsmLabel(*st1, kAsmAlign); + EmitAsmLabel(*st1, kAsmSyname); + MIRConst *mirConst = st1->GetKonst(); + CHECK_FATAL(mirConst != nullptr, "mirConst should not be nullptr in EmitMuidTable"); + if (mirConst->GetKind() == kConstAddrof) { + MIRAddrofConst *symAddr = safe_cast(mirConst); + CHECK_FATAL(symAddr != nullptr, "call static_cast failed in EmitMuidTable"); + MIRSymbol *symAddrSym = GlobalTables::GetGsymTable().GetSymbolFromStidx(symAddr->GetSymbolIndex().Idx()); + if (isConstString) { + EmitAddressString(symAddrSym->GetName() + " - . + "); + Emit(kDataRefIsOffset); + Emit("\n"); + } else { + EmitAddressString(symAddrSym->GetName() + "\n"); + } + } else if (mirConst->GetKind() == kConstInt) { + EmitScalarConstant(*mirConst, true); + } else { + EmitConstantTable(*st1, *mirConst, strIdx2Type); + } + EmitAsmLabel(*st1, kAsmSize); + } + if (!CGOptions::DoLiteProfGen()) { + Emit(sectionName + "_end:\n"); + } +} + +void Emitter::EmitClassInfoSequential(const MIRSymbol &mirSymbol, const std::map &strIdx2Type, + const std::string §ionName) { + EmitAsmLabel(mirSymbol, kAsmType); + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName); + if (StringUtils::StartsWith(sectionName, "ro")) { + Emit(",\"a\",%progbits\n"); + } else { + Emit(",\"aw\",%progbits\n"); + } + } else { + EmitAsmLabel(kAsmData); + } + EmitAsmLabel(mirSymbol, kAsmGlbl); + EmitAsmLabel(mirSymbol, kAsmHidden); + EmitAsmLabel(mirSymbol, kAsmAlign); + EmitAsmLabel(mirSymbol, kAsmSyname); + MIRConst *mirConst = mirSymbol.GetKonst(); + CHECK_FATAL(mirConst != nullptr, "mirConst should not be nullptr in EmitClassInfoSequential"); + EmitConstantTable(mirSymbol, *mirConst, strIdx2Type); + EmitAsmLabel(mirSymbol, kAsmSize); +} + +void Emitter::EmitMethodDeclaringClass(const MIRSymbol &mirSymbol, const std::string §ionName) { + std::string symName = mirSymbol.GetName(); + std::string emitSyName = symName + "_DeclaringClass"; + std::string declaringClassName = symName.substr(strlen(kFieldsInfoCompactPrefixStr) + 1); + Emit(asmInfo->GetType()); + Emit(emitSyName + ", %object\n"); + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName + "\n"); + } else { + EmitAsmLabel(kAsmData); + } + Emit(asmInfo->GetLocal()); + Emit(emitSyName + "\n"); + Emit(asmInfo->GetAlign()); + Emit(" 2\n" + emitSyName + ":\n"); + Emit("\t.long\t"); + Emit(CLASSINFO_PREFIX_STR + declaringClassName + " - .\n"); + Emit(asmInfo->GetSize()); + Emit(emitSyName + ", 4\n"); +} + +void Emitter::EmitMethodFieldSequential(const MIRSymbol &mirSymbol, + const std::map &strIdx2Type, + const std::string §ionName) { + std::string symName = mirSymbol.GetName(); + if (symName.find(kMethodsInfoCompactPrefixStr) != std::string::npos) { + EmitMethodDeclaringClass(mirSymbol, sectionName); + } + EmitAsmLabel(mirSymbol, kAsmType); + if (!sectionName.empty()) { + Emit("\t.section ." + sectionName + "\n"); + } else { + EmitAsmLabel(kAsmData); + } + EmitAsmLabel(mirSymbol, kAsmLocal); + + /* Emit(2) is 4 bit align */ + Emit(asmInfo->GetAlign()).Emit(2).Emit("\n"); + EmitAsmLabel(mirSymbol, kAsmSyname); + MIRConst *ct = mirSymbol.GetKonst(); + EmitConstantTable(mirSymbol, *ct, strIdx2Type); + std::string symbolName = mirSymbol.GetName(); + Emit("\t.size\t" + symbolName + ", .-"); + Emit(symbolName + "\n"); +} + +void Emitter::EmitDWRef(const std::string &name) { + /* + * .hidden DW.ref._ZTI3xxx + * .weak DW.ref._ZTI3xxx + * .section .data.DW.ref._ZTI3xxx,"awG",@progbits,DW.ref._ZTI3xxx,comdat + * .align 3 + * .type DW.ref._ZTI3xxx, %object + * .size DW.ref._ZTI3xxx, 8 + * DW.ref._ZTI3xxx: + * .xword _ZTI3xxx + */ + Emit("\t.hidden DW.ref." + name + "\n"); + Emit("\t.weak DW.ref." + name + "\n"); + Emit("\t.section .data.DW.ref." + name + ",\"awG\",%progbits,DW.ref."); + Emit(name + ",comdat\n"); + Emit(asmInfo->GetAlign()); +#if TARGX86 || TARGX86_64 + Emit("8\n"); +#else + Emit("3\n"); +#endif + Emit("\t.type DW.ref." + name + ", \%object\n"); + Emit("\t.size DW.ref." + name + ",8\n"); + Emit("DW.ref." + name + ":\n"); +#if TARGAARCH64 || TARGRISCV64 + Emit("\t.xword " + name + "\n"); +#else + Emit("\t.word " + name + "\n"); +#endif +} + +void Emitter::EmitDecSigned(int64 num) { + std::ios::fmtflags flag(outStream.flags()); + outStream << std::dec << num; + outStream.flags(flag); +} + +void Emitter::EmitDecUnsigned(uint64 num) { + std::ios::fmtflags flag(outStream.flags()); + outStream << std::dec << num; + outStream.flags(flag); +} + +void Emitter::EmitHexUnsigned(uint64 num) { + std::ios::fmtflags flag(outStream.flags()); + outStream << "0x" << std::hex << num; + outStream.flags(flag); +} + +#define XSTR(s) STR(s) +#define STR(s) #s + +void Emitter::EmitDIHeader() { + if (cg->GetMIRModule()->GetSrcLang() == kSrcLangC) { + (void)Emit("\t.section ." + std::string("c_text") + ",\"ax\"\n"); + } else { + (void)Emit("\t.section ." + std::string(namemangler::kMuidJavatextPrefixStr) + ",\"ax\"\n"); + } + Emit(".L" XSTR(TEXT_BEGIN) ":\n"); +} + +void Emitter::EmitDIFooter() { + if (cg->GetMIRModule()->GetSrcLang() == kSrcLangC) { + (void)Emit("\t.section ." + std::string("c_text") + ",\"ax\"\n"); + } else { + (void)Emit("\t.section ." + std::string(namemangler::kMuidJavatextPrefixStr) + ",\"ax\"\n"); + } + Emit(".L" XSTR(TEXT_END) ":\n"); +} + +void Emitter::EmitDIHeaderFileInfo() { + Emit("// dummy header file 1\n"); + Emit("// dummy header file 2\n"); + Emit("// dummy header file 3\n"); +} + +void Emitter::AddLabelDieToLabelIdxMapping(DBGDie *lblDie, LabelIdx lblIdx) { + InsertLabdie2labidxTable(lblDie, lblIdx); +} + +LabelIdx Emitter::GetLabelIdxForLabelDie(DBGDie *lblDie) { + auto it = labdie2labidxTable.find(lblDie); + CHECK_FATAL(it != labdie2labidxTable.end(), ""); + return it->second; +} + +void Emitter::ApplyInPrefixOrder(DBGDie *die, const std::function &func) { + func(die); + ASSERT(die, ""); + if (die->GetSubDieVec().size() > 0) { + for (auto c : die->GetSubDieVec()) { + ApplyInPrefixOrder(c, func); + } + /* mark the end of the sibling list */ + func(nullptr); + } +} + +void Emitter::EmitDIFormSpecification(unsigned int dwform) { + switch (dwform) { + case DW_FORM_string: + Emit(".string "); + break; + case DW_FORM_strp: + case DW_FORM_data4: + case DW_FORM_ref4: + Emit(".4byte "); + break; + case DW_FORM_data1: + Emit(".byte "); + break; + case DW_FORM_data2: + Emit(".2byte "); + break; + case DW_FORM_data8: + Emit(".8byte "); + break; + case DW_FORM_sec_offset: + /* if DWARF64, should be .8byte? */ + Emit(".4byte "); + break; + case DW_FORM_addr: /* Should we use DWARF64? for now, we generate .8byte as gcc does for DW_FORM_addr */ + Emit(".8byte "); + break; + case DW_FORM_exprloc: + Emit(".uleb128 "); + break; + default: + CHECK_FATAL(maple::GetDwFormName(dwform) != nullptr, + "GetDwFormName() return null in Emitter::EmitDIFormSpecification"); + LogInfo::MapleLogger() << "unhandled : " << maple::GetDwFormName(dwform) << std::endl; + ASSERT(0, "NYI"); + } +} + +void Emitter::EmitDIAttrValue(DBGDie *die, DBGDieAttr *attr, DwAt attrName, DwTag tagName, DebugInfo *di) { + MapleVector &attrvec = die->GetAttrVec(); + + static MIRFunction *lastMIRFunc = nullptr; + + switch (attr->GetDwForm()) { + case DW_FORM_string: { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(attr->GetId()); + Emit("\"").Emit(name).Emit("\""); + Emit(CMNT "len = "); + EmitDecUnsigned(name.length() + 1); + } break; + case DW_FORM_strp: + Emit(".L" XSTR(DEBUG_STR_LABEL)); + outStream << attr->GetId(); + break; + case DW_FORM_data1: +#if DEBUG + if (attr->GetI() == kDbgDefaultVal) { + EmitHexUnsigned(attr->GetI()); + } else +#endif + EmitHexUnsigned(uint8_t(attr->GetI())); + break; + case DW_FORM_data2: +#if DEBUG + if (attr->GetI() == kDbgDefaultVal) { + EmitHexUnsigned(attr->GetI()); + } else +#endif + EmitHexUnsigned(uint16_t(attr->GetI())); + break; + case DW_FORM_data4: +#if DEBUG + if (attr->GetI() == kDbgDefaultVal) { + EmitHexUnsigned(attr->GetI()); + } else +#endif + EmitHexUnsigned(uint32_t(attr->GetI())); + break; + case DW_FORM_data8: + if (attrName == DW_AT_high_pc) { + if (tagName == DW_TAG_compile_unit) { + Emit(".L" XSTR(TEXT_END) "-.L" XSTR(TEXT_BEGIN)); + } else if (tagName == DW_TAG_subprogram) { + DBGDieAttr *name = LFindAttribute(attrvec, DW_AT_name); + if (name == nullptr) { + DBGDieAttr *spec = LFindAttribute(attrvec, DW_AT_specification); + CHECK_FATAL(spec != nullptr, "spec is null in Emitter::EmitDIAttrValue"); + DBGDie *decl = di->GetDie(spec->GetId()); + name = LFindAttribute(decl->GetAttrVec(), DW_AT_name); + } + CHECK_FATAL(name != nullptr, "name is null in Emitter::EmitDIAttrValue"); + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(name->GetId()); + + MIRBuilder *mirbuilder = GetCG()->GetMIRModule()->GetMIRBuilder(); + MIRFunction *mfunc = mirbuilder->GetFunctionFromName(str); + lastMIRFunc = mfunc; + MapleMap >::iterator it = + CG::GetFuncWrapLabels().find(mfunc); + if (it != CG::GetFuncWrapLabels().end()) { + EmitLabelForFunc(mfunc, (*it).second.second); /* end label */ + } else { + EmitLabelRef(attr->GetId()); /* maybe deadbeef */ + } + Emit("-"); + if (it != CG::GetFuncWrapLabels().end()) { + EmitLabelForFunc(mfunc, (*it).second.first); /* start label */ + } else { + DBGDieAttr *lowpc = LFindAttribute(attrvec, DW_AT_low_pc); + CHECK_FATAL(lowpc != nullptr, "lowpc is null in Emitter::EmitDIAttrValue"); + EmitLabelRef(lowpc->GetId()); /* maybe deadbeef */ + } + } else if (tagName == DW_TAG_lexical_block) { + auto i = static_cast(attr->GetU()); + if (GetCG()->GetMIRModule()->GetDbgInfo()->IsScopeIdEmited(lastMIRFunc, i)) { + (void)Emit(".LScp." + std::to_string(i) + "E-.LScp." + std::to_string(i) + "B"); + } else { + (void)Emit(0); + } + } + } else { + EmitHexUnsigned(static_cast(static_cast(attr->GetI()))); + } + break; + case DW_FORM_sec_offset: + if (attrName == DW_AT_stmt_list) { + Emit(".L"); + Emit(XSTR(DEBUG_LINE_0)); + } + break; + case DW_FORM_addr: + if (attrName == DW_AT_low_pc) { + if (tagName == DW_TAG_compile_unit) { + Emit(".L" XSTR(TEXT_BEGIN)); + } else if (tagName == DW_TAG_subprogram) { + /* if decl, name should be found; if def, we try DW_AT_specification */ + DBGDieAttr *name = LFindAttribute(attrvec, DW_AT_name); + if (name == nullptr) { + DBGDieAttr *spec = LFindAttribute(attrvec, DW_AT_specification); + CHECK_FATAL(spec != nullptr, "spec is null in Emitter::EmitDIAttrValue"); + DBGDie *decl = di->GetDie(spec->GetId()); + name = LFindAttribute(decl->GetAttrVec(), DW_AT_name); + } + CHECK_FATAL(name != nullptr, "name is null in Emitter::EmitDIAttrValue"); + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(name->GetId()); + MIRBuilder *mirbuilder = GetCG()->GetMIRModule()->GetMIRBuilder(); + MIRFunction *mfunc = mirbuilder->GetFunctionFromName(str); + MapleMap >::iterator + it = CG::GetFuncWrapLabels().find(mfunc); + if (it != CG::GetFuncWrapLabels().end()) { + EmitLabelForFunc(mfunc, (*it).second.first); /* it is a */ + } else { + EmitLabelRef(attr->GetId()); /* maybe deadbeef */ + } + } else if (tagName == DW_TAG_label) { + LabelIdx labelIdx = GetLabelIdxForLabelDie(die); + DBGDie *subpgm = die->GetParent(); + ASSERT(subpgm->GetTag() == DW_TAG_subprogram, "Label DIE should be a child of a Subprogram DIE"); + DBGDieAttr *fnameAttr = LFindAttribute(subpgm->GetAttrVec(), DW_AT_name); + if (!fnameAttr) { + DBGDieAttr *specAttr = LFindAttribute(subpgm->GetAttrVec(), DW_AT_specification); + CHECK_FATAL(specAttr, "pointer is null"); + DBGDie *twin = di->GetDie(static_cast(specAttr->GetU())); + fnameAttr = LFindAttribute(twin->GetAttrVec(), DW_AT_name); + } + CHECK_FATAL(fnameAttr, ""); + const std::string &fnameStr = GlobalTables::GetStrTable().GetStringFromStrIdx(fnameAttr->GetId()); + auto *res = memPool->New(fnameStr.c_str(), labelIdx, *memPool); + cfi::CFIOpndEmitVisitor cfiVisitor(*this); + res->Accept(cfiVisitor); + } else if (tagName == DW_TAG_lexical_block) { + auto i = static_cast(attr->GetU()); + if (GetCG()->GetMIRModule()->GetDbgInfo()->IsScopeIdEmited(lastMIRFunc, i)) { + (void)Emit(".LScp." + std::to_string(i) + "B"); + } else { + (void)Emit(0); + } + } + } else if (attrName == DW_AT_high_pc) { + if (tagName == DW_TAG_compile_unit) { + Emit(".L" XSTR(TEXT_END) "-.L" XSTR(TEXT_BEGIN)); + } + } else { + Emit("XXX--ADDR--XXX"); + } + break; + case DW_FORM_ref4: + if (attrName == DW_AT_type) { + DBGDie *die0 = di->GetDie(static_cast(attr->GetU())); + if (die0->GetOffset() != 0) { + EmitHexUnsigned(die0->GetOffset()); + } else { + /* unknown type, missing mplt */ + EmitHexUnsigned(di->GetDummyTypeDie()->GetOffset()); + Emit(CMNT "Warning: dummy type used"); + } + } else if (attrName == DW_AT_specification || attrName == DW_AT_sibling) { + DBGDie *die0 = di->GetDie(static_cast(attr->GetU())); + ASSERT(die0->GetOffset(), ""); + EmitHexUnsigned(die0->GetOffset()); + } else if (attrName == DW_AT_object_pointer) { + GStrIdx thisIdx = GlobalTables::GetStrTable().GetStrIdxFromName(kDebugMapleThis); + DBGDie *that = LFindChildDieWithName(die, DW_TAG_formal_parameter, thisIdx); + /* need to find the this or self based on the source language + what is the name for 'this' used in mapleir? + this has to be with respect to a function */ + if (that) { + EmitHexUnsigned(that->GetOffset()); + } else { + EmitHexUnsigned(attr->GetU()); + } + } else { + Emit(" OFFSET "); + EmitHexUnsigned(attr->GetU()); + } + break; + case DW_FORM_exprloc: { + DBGExprLoc *elp = attr->GetPtr(); + switch (elp->GetOp()) { + case DW_OP_call_frame_cfa: + EmitHexUnsigned(1); + (void)Emit(CMNT "size"); + Emit("\n\t.byte "); + EmitHexUnsigned(elp->GetOp()); + (void)Emit(CMNT); + (void)Emit(maple::GetDwOpName(elp->GetOp())); + break; + case DW_OP_addr: + EmitHexUnsigned(k9ByteSize); + (void)Emit(CMNT "size"); + Emit("\n\t.byte "); + EmitHexUnsigned(elp->GetOp()); + Emit(CMNT); + (void)Emit(maple::GetDwOpName(elp->GetOp())); + Emit("\n\t.8byte "); + (void)Emit(GlobalTables::GetStrTable().GetStringFromStrIdx( + static_cast(elp->GetGvarStridx())).c_str()); + break; + case DW_OP_fbreg: + EmitHexUnsigned(1 + namemangler::GetSleb128Size(elp->GetFboffset())); + (void)Emit(CMNT "uleb128 size"); + Emit("\n\t.byte "); + EmitHexUnsigned(elp->GetOp()); + (void)Emit(CMNT); + (void)Emit(maple::GetDwOpName(elp->GetOp())); + Emit("\n\t.sleb128 "); + EmitDecSigned(elp->GetFboffset()); + (void)Emit(CMNT "fboffset"); + break; + case DW_OP_breg0: + case DW_OP_breg1: + case DW_OP_breg2: + case DW_OP_breg3: + case DW_OP_breg4: + case DW_OP_breg5: + case DW_OP_breg6: + case DW_OP_breg7: + EmitHexUnsigned(k2ByteSize); + (void)Emit(CMNT "size"); + Emit("\n\t.byte "); + EmitHexUnsigned(elp->GetOp()); + Emit(CMNT); + (void)Emit(maple::GetDwOpName(elp->GetOp())); + Emit("\n\t.sleb128 "); + EmitDecSigned(0); + (void)Emit(CMNT "offset"); + break; + default: + EmitHexUnsigned(uintptr_t(elp)); + break; + } + } break; + default: + CHECK_FATAL(maple::GetDwFormName(attr->GetDwForm()) != nullptr, + "GetDwFormName return null in Emitter::EmitDIAttrValue"); + LogInfo::MapleLogger() << "unhandled : " << maple::GetDwFormName(attr->GetDwForm()) << std::endl; + ASSERT(0, "NYI"); + } +} + +void Emitter::EmitDIDebugInfoSection(DebugInfo *mirdi) { + /* From DWARF Standard Specification V4. 7.5.1 + collect section size */ + Emit("\t.section\t.debug_info,\"\",@progbits\n"); + /* label to mark start of the .debug_info section */ + Emit(".L" XSTR(DEBUG_INFO_0) ":\n"); + /* $ 7.5.1.1 */ + Emit("\t.4byte\t"); + EmitHexUnsigned(mirdi->GetDebugInfoLength()); + Emit(CMNT "section length\n"); + /* DWARF version. uhalf. */ + Emit("\t.2byte\t"); + /* 4 for version 4. */ + EmitHexUnsigned(kDwarfVersion); + Emit("\n"); + /* debug_abbrev_offset. 4byte for 32-bit, 8byte for 64-bit */ + Emit("\t.4byte\t.L" XSTR(DEBUG_ABBREV_0) "\n"); + /* address size. ubyte */ + Emit("\t.byte\t"); + EmitHexUnsigned(kSizeOfPTR); + Emit("\n"); + /* + * 7.5.1.2 type unit header + * currently empty... + * + * 7.5.2 Debugging Information Entry (DIE) + */ + Emitter *emitter = this; + MapleVector &abbrevVec = mirdi->GetAbbrevVec(); + ApplyInPrefixOrder(mirdi->GetCompUnit(), [&abbrevVec, &emitter, &mirdi](DBGDie *die) { + if (!die) { + /* emit the null entry and return */ + emitter->Emit("\t.byte 0x0\n"); + return; + } + if (!die->GetKeep()) { + return; + } + bool verbose = emitter->GetCG()->GenerateVerboseAsm(); + if (verbose) { + emitter->Emit("\n"); + } + emitter->Emit("\t.uleb128 "); + emitter->EmitHexUnsigned(die->GetAbbrevId()); + if (verbose) { + emitter->Emit(CMNT); + CHECK_FATAL(maple::GetDwTagName(die->GetTag()) != nullptr, + "GetDwTagName(die->GetTag()) return null in Emitter::EmitDIDebugInfoSection"); + emitter->Emit(maple::GetDwTagName(die->GetTag())); + emitter->Emit(" Offset= "); + emitter->EmitHexUnsigned(die->GetOffset()); + emitter->Emit(" ("); + emitter->EmitDecUnsigned(die->GetOffset()); + emitter->Emit(" ), Size= "); + emitter->EmitHexUnsigned(die->GetSize()); + emitter->Emit(" ("); + emitter->EmitDecUnsigned(die->GetSize()); + emitter->Emit(" )\n"); + } else { + emitter->Emit("\n"); + } + DBGAbbrevEntry *diae = LFindAbbrevEntry(abbrevVec, die->GetAbbrevId()); + CHECK_FATAL(diae != nullptr, "diae is null in Emitter::EmitDIDebugInfoSection"); + MapleVector &apl = diae->GetAttrPairs(); /* attribute pair list */ + + std::string sfile, spath; + if (diae->GetTag() == DW_TAG_compile_unit && sfile.empty()) { + /* get full source path from fileMap[2] */ + if (emitter->GetFileMap().size() > k2ByteSize) { /* have src file map */ + std::string srcPath = emitter->GetFileMap()[k2ByteSize]; + size_t t = srcPath.rfind("/"); + ASSERT(t != std::string::npos, ""); + sfile = srcPath.substr(t + 1); + spath = srcPath.substr(0, t); + } + } + + for (size_t i = 0; i < diae->GetAttrPairs().size(); i += k2ByteSize) { + DBGDieAttr *attr = LFindAttribute(die->GetAttrVec(), DwAt(apl[i])); + ASSERT_NOT_NULL(attr); + if (!attr->GetKeep()) { + continue; + } + if (!LShouldEmit(unsigned(apl[i + 1]))) { + continue; + } + /* update DW_AT_name and DW_AT_comp_dir attrs under DW_TAG_compile_unit + to be C/C++ */ + if (!sfile.empty()) { + if (attr->GetDwAt() == DW_AT_name) { + attr->SetId(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(sfile).GetIdx()); + emitter->GetCG()->GetMIRModule()->GetDbgInfo()->AddStrps(attr->GetId()); + } else if (attr->GetDwAt() == DW_AT_comp_dir) { + attr->SetId(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(spath).GetIdx()); + emitter->GetCG()->GetMIRModule()->GetDbgInfo()->AddStrps(attr->GetId()); + } + } + emitter->Emit("\t"); + emitter->EmitDIFormSpecification(unsigned(apl[i + 1])); + emitter->EmitDIAttrValue(die, attr, unsigned(apl[i]), diae->GetTag(), mirdi); + if (verbose) { + emitter->Emit(CMNT); + emitter->Emit(maple::GetDwAtName(unsigned(apl[i]))); + emitter->Emit(" : "); + emitter->Emit(maple::GetDwFormName(unsigned(apl[i + 1]))); + if (apl[i + 1] == DW_FORM_strp || apl[i + 1] == DW_FORM_string) { + emitter->Emit(" : "); + emitter->Emit(GlobalTables::GetStrTable().GetStringFromStrIdx(attr->GetId()).c_str()); + } else if (apl[i] == DW_AT_data_member_location) { + emitter->Emit(" : "); + emitter->Emit(apl[i + 1]).Emit(" attr= "); + emitter->EmitHexUnsigned(uintptr_t(attr)); + } + } + emitter->Emit("\n"); + } + }); +} + +void Emitter::EmitDIDebugAbbrevSection(DebugInfo *mirdi) { + Emit("\t.section\t.debug_abbrev,\"\",@progbits\n"); + Emit(".L" XSTR(DEBUG_ABBREV_0) ":\n"); + + /* construct a list of DI abbrev entries + 1. DW_TAG_compile_unit 0x11 + 2. DW_TAG_subprogram 0x2e */ + bool verbose = GetCG()->GenerateVerboseAsm(); + for (DBGAbbrevEntry *diae : mirdi->GetAbbrevVec()) { + if (!diae) { + continue; + } + /* ID */ + if (verbose) { + Emit("\n"); + } + Emit("\t.uleb128 "); + EmitHexUnsigned(diae->GetAbbrevId()); + if (verbose) { + Emit(CMNT "Abbrev Entry ID"); + } + Emit("\n"); + /* TAG */ + Emit("\t.uleb128 "); + EmitHexUnsigned(diae->GetTag()); + CHECK_FATAL(maple::GetDwTagName(diae->GetTag()) != nullptr, + "GetDwTagName return null in Emitter::EmitDIDebugAbbrevSection"); + if (verbose) { + Emit(CMNT); + Emit(maple::GetDwTagName(diae->GetTag())); + } + Emit("\n"); + + MapleVector &apl = diae->GetAttrPairs(); /* attribute pair list */ + /* children? */ + Emit("\t.byte "); + EmitHexUnsigned(diae->GetWithChildren()); + if (verbose) { + Emit(diae->GetWithChildren() ? CMNT "DW_CHILDREN_yes" : CMNT "DW_CHILDREN_no"); + } + Emit("\n"); + + for (size_t i = 0; i < diae->GetAttrPairs().size(); i += k2ByteSize) { + /* odd entry -- DW_AT_*, even entry -- DW_FORM_* */ + Emit("\t.uleb128 "); + EmitHexUnsigned(apl[i]); + CHECK_FATAL(maple::GetDwAtName(unsigned(apl[i])) != nullptr, + "GetDwAtName return null in Emitter::EmitDIDebugAbbrevSection"); + if (verbose) { + Emit(CMNT); + Emit(maple::GetDwAtName(unsigned(apl[i]))); + } + Emit("\n"); + Emit("\t.uleb128 "); + EmitHexUnsigned(apl[i + 1]); + CHECK_FATAL(maple::GetDwFormName(unsigned(apl[i + 1])) != nullptr, + "GetDwFormName return null in Emitter::EmitDIDebugAbbrevSection"); + if (verbose) { + Emit(CMNT); + Emit(maple::GetDwFormName(unsigned(apl[i + 1]))); + } + Emit("\n"); + } + /* end of an abbreviation record */ + Emit("\t.byte 0x0\n"); + Emit("\t.byte 0x0\n"); + } + Emit("\t.byte 0x0\n"); +} + +void Emitter::EmitDIDebugARangesSection() { + Emit("\t.section\t.debug_aranges,\"\",@progbits\n"); +} + +void Emitter::EmitDIDebugRangesSection() { + Emit("\t.section\t.debug_ranges,\"\",@progbits\n"); +} + +void Emitter::EmitDIDebugLineSection() { + Emit("\t.section\t.debug_line,\"\",@progbits\n"); + Emit(".L" XSTR(DEBUG_LINE_0) ":\n"); +} + +void Emitter::EmitDIDebugStrSection() { + Emit("\t.section\t.debug_str,\"MS\",@progbits,1\n"); + for (auto it : GetCG()->GetMIRModule()->GetDbgInfo()->GetStrps()) { + Emit(".L" XSTR(DEBUG_STR_LABEL)); + outStream << it; + Emit(":\n"); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it); + Emit("\t.string \"").Emit(name).Emit("\"\n"); + } +} + +void Emitter::FillInClassByteSize(DBGDie *die, DBGDieAttr *byteSizeAttr) const { + ASSERT(byteSizeAttr->GetDwForm() == DW_FORM_data1 || byteSizeAttr->GetDwForm() == DW_FORM_data2 || + byteSizeAttr->GetDwForm() == DW_FORM_data4 || byteSizeAttr->GetDwForm() == DW_FORM_data8, + "Unknown FORM value for DW_AT_byte_size"); + if (static_cast(byteSizeAttr->GetI()) == kDbgDefaultVal) { + /* get class size */ + DBGDieAttr *typeAttr = LFindDieAttr(die, DW_AT_type); + CHECK_FATAL(typeAttr != nullptr, "at_type is nullptr in Emitter::FillInClassByteSize"); + /* hope this is a global type */ + uint32 tid = typeAttr->GetId(); + CHECK_FATAL(tid < Globals::GetInstance()->GetBECommon()->GetSizeOfTypeSizeTable(), + "index out of range in Emitter::FillInClassByteSize"); + int64_t byteSize = static_cast(Globals::GetInstance()->GetBECommon()->GetTypeSize(tid)); + LUpdateAttrValue(byteSizeAttr, byteSize); + } +} + +void Emitter::SetupDBGInfo(DebugInfo *mirdi) { + Emitter *emitter = this; + MapleVector &abbrevVec = mirdi->GetAbbrevVec(); + ApplyInPrefixOrder(mirdi->GetCompUnit(), [&abbrevVec, &emitter](DBGDie *die) { + if (!die || !die->GetKeep()) { + return; + } + + CHECK_FATAL(maple::GetDwTagName(die->GetTag()) != nullptr, + "maple::GetDwTagName(die->GetTag()) is nullptr in Emitter::SetupDBGInfo"); + if (die->GetAbbrevId() == 0) { + LogInfo::MapleLogger() << maple::GetDwTagName(die->GetTag()) << std::endl; + } + CHECK_FATAL(die->GetAbbrevId() < abbrevVec.size(), "index out of range in Emitter::SetupDBGInfo"); + ASSERT(abbrevVec[die->GetAbbrevId()]->GetAbbrevId() == die->GetAbbrevId(), ""); + DBGAbbrevEntry *diae = abbrevVec[die->GetAbbrevId()]; + switch (diae->GetTag()) { + case DW_TAG_subprogram: { + DBGExprLoc *exprloc = emitter->memPool->New(emitter->GetCG()->GetMIRModule()); + exprloc->GetSimpLoc()->SetDwOp(DW_OP_call_frame_cfa); + die->SetAttr(DW_AT_frame_base, exprloc); + } break; + case DW_TAG_structure_type: + case DW_TAG_union_type: + case DW_TAG_class_type: + case DW_TAG_interface_type: { + DBGDieAttr *byteSizeAttr = LFindDieAttr(die, DW_AT_byte_size); + if (byteSizeAttr) { + emitter->FillInClassByteSize(die, byteSizeAttr); + } + /* get the type from tid instead of name as it could be changed by type alias */ + DBGDieAttr *typeAttr = LFindDieAttr(die, DW_AT_type); + CHECK_FATAL(typeAttr != nullptr, "at_type is null in Emitter::SetupDBGInfo"); + uint32 tid = typeAttr->GetId(); + MIRType *mty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tid); + MIRStructType *sty = static_cast(mty); + CHECK_FATAL(sty != nullptr, "pointer cast failed"); + CHECK_FATAL(sty->GetTypeIndex().GetIdx() < + Globals::GetInstance()->GetBECommon()->GetSizeOfStructFieldCountTable(), ""); + uint32 embeddedIDs = 0; + MIRStructType *prevSubstruct = nullptr; + for (size_t i = 0; i < sty->GetFields().size(); i++) { + TyIdx fieldtyidx = sty->GetFieldsElemt(i).second.first; + MIRType *fieldty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldtyidx); + if (prevSubstruct) { + embeddedIDs += static_cast(Globals::GetInstance()->GetBECommon()->GetStructFieldCount( + static_cast(prevSubstruct->GetTypeIndex().GetIdx()))); + } + prevSubstruct = fieldty->EmbeddedStructType(); + FieldID fieldID = static_cast(i + embeddedIDs) + 1; + int offset = Globals::GetInstance()->GetBECommon()->GetFieldOffset(*sty, fieldID).first; + GStrIdx fldName = sty->GetFieldsElemt(i).first; + DBGDie *cdie = LFindChildDieWithName(die, DW_TAG_member, fldName); + CHECK_FATAL(cdie != nullptr, "cdie is null in Emitter::SetupDBGInfo"); + DBGDieAttr *mloc = LFindDieAttr(cdie, DW_AT_data_member_location); + CHECK_FATAL(mloc != nullptr, "mloc is null in Emitter::SetupDBGInfo"); + DBGAbbrevEntry *childDiae = abbrevVec[cdie->GetAbbrevId()]; + CHECK_FATAL(childDiae != nullptr, "child_diae is null in Emitter::SetupDBGInfo"); + LUpdateAttrValue(mloc, offset); + } + } break; + default: + break; + } + }); + + /* compute DIE sizes and offsets */ + mirdi->ComputeSizeAndOffsets(); +} + +void Emitter::EmitAliasAndRef(const MIRSymbol &sym) { + MIRFunction *mFunc = sym.GetFunction(); + if (mFunc == nullptr || !mFunc->GetAttr(FUNCATTR_alias)) { + return; + } + if (mFunc->GetAttr(FUNCATTR_extern)) { + Emit(asmInfo->GetGlobal()).Emit(mFunc->GetName()).Emit("\n"); + } + auto &aliasPrefix = mFunc->GetAttr(FUNCATTR_weakref) ? asmInfo->GetWeakref() : asmInfo->GetSet(); + Emit(aliasPrefix); + Emit(sym.GetName()).Emit(",").Emit(mFunc->GetAttrs().GetAliasFuncName()).Emit("\n"); +} + +void Emitter::EmitHugeSoRoutines(bool lastRoutine) { + if (!lastRoutine && (javaInsnCount < (static_cast(hugeSoSeqence) * + static_cast(kHugeSoInsnCountThreshold)))) { + return; + } + for (auto &target : hugeSoTargets) { + (void)Emit("\t.section\t." + std::string(namemangler::kMuidJavatextPrefixStr) + ",\"ax\"\n"); +#if TARGX86 || TARGX86_64 + Emit("\t.align\t8\n"); +#else + Emit("\t.align 3\n"); +#endif + std::string routineName = target + HugeSoPostFix(); + Emit("\t.type\t" + routineName + ", %function\n"); + Emit(routineName + ":\n"); + Emit("\tadrp\tx17, :got:" + target + "\n"); + Emit("\tldr\tx17, [x17, :got_lo12:" + target + "]\n"); + Emit("\tbr\tx17\n"); + javaInsnCount += kSizeOfHugesoRoutine; + } + hugeSoTargets.clear(); + ++hugeSoSeqence; +} + +void ImmOperand::Dump() const { + LogInfo::MapleLogger() << "imm:" << value; +} + +void LabelOperand::Dump() const { + LogInfo::MapleLogger() << "label:" << labelIndex; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/global.cpp b/src/mapleall/maple_be/src/cg/global.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3801745db379d8eb1ff5c9c1a12b432f06c55f81 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/global.cpp @@ -0,0 +1,95 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#if TARGAARCH64 +#include "aarch64_global.h" +#elif TARGRISCV64 +#include "riscv64_global.h" +#endif +#if TARGARM32 +#include "arm32_global.h" +#endif +#include "reaching.h" +#include "cgfunc.h" +#include "live.h" +/* + * This phase do some optimization using use-def chain and def-use chain. + * each function in Run() is a optimization. mainly include 2 parts: + * 1. find the number of valid bits for register by finding the definition insn of register, + * and then using the valid bits to delete redundant insns. + * 2. copy Propagate: + * a. forward copy propagate + * this optimization aim to optimize following: + * mov x100, x200; + * BBs: + * ... + * mOp ..., x100 /// multiple site that use x100 + * => + * mov x200, x200 + * BBs: + * ... + * mOp ..., x200 // multiple site that use x100 + * b. backward copy propagate + * this optimization aim to optimize following: + * mOp x200, ... // Define insn of x200 + * ... + * mOp ..., x200 // use site of x200 + * mov x100, x200; + * => + * mOp x100, ... // Define insn of x200 + * ... + * mOp ..., x100 // use site of x200 + * mov x100, x100; + * + * NOTE: after insn is modified, UD-chain and DU-chain should be maintained by self. currently several common + * interface has been implemented in RD, but they must be used reasonably. specific instructions for use + * can be found at the begining of corresponding function. + */ +namespace maplebe { +using namespace maple; + +bool CgGlobalOpt::PhaseRun(maplebe::CGFunc &f) { + ReachingDefinition *reachingDef = nullptr; + LiveAnalysis *live = nullptr; + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2) { + reachingDef = GET_ANALYSIS(CgReachingDefinition, f); + live = GET_ANALYSIS(CgLiveAnalysis, f); + } + if (reachingDef == nullptr || !f.GetRDStatus()) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgReachingDefinition::id); + return false; + } + reachingDef->SetAnalysisMode(kRDAllAnalysis); + GlobalOpt *globalOpt = nullptr; +#if TARGAARCH64 || TARGRISCV64 + globalOpt = GetPhaseAllocator()->New(f); +#endif +#if TARGARM32 + globalOpt = GetPhaseAllocator()->New(f); +#endif + globalOpt->Run(); + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return true; +} + +void CgGlobalOpt::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgGlobalOpt, globalopt) + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/global_schedule.cpp b/src/mapleall/maple_be/src/cg/global_schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..efab5310db4399defd033645f2d9dd25dc5202c1 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/global_schedule.cpp @@ -0,0 +1,54 @@ +/* +* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "global_schedule.h" +#include "optimize_common.h" +#include "aarch64_data_dep_base.h" +#include "cg.h" + +namespace maplebe { +void GlobalSchedule::Run() { + FCDG *fcdg = cda.GetFCDG(); + CHECK_FATAL(fcdg != nullptr, "control dependence analysis failed"); + cda.GenerateFCDGDot(); + cda.GenerateCFGDot(); + DotGenerator::GenerateDot("globalsched", cgFunc, cgFunc.GetMirModule(), + true, cgFunc.GetName()); + for (auto region : fcdg->GetAllRegions()) { + if (region == nullptr) { + continue; + } + idda.Run(*region, dataNodes); + idda.GenerateInterDDGDot(dataNodes); + } +} + +void CgGlobalSchedule::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); +} + +bool CgGlobalSchedule::PhaseRun(maplebe::CGFunc &f) { + MemPool *gsMemPool = GetPhaseMemPool(); + ControlDepAnalysis *cda = GET_ANALYSIS(CgControlDepAnalysis, f); + MAD *mad = Globals::GetInstance()->GetMAD(); + // Need move to target + auto *ddb = gsMemPool->New(*gsMemPool, f, *mad); + auto *idda = gsMemPool->New(f, *gsMemPool, *ddb); + auto *globalSched = gsMemPool->New(*gsMemPool, f, *cda, *idda); + globalSched->Run(); + return true; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgGlobalSchedule, globalschedule) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/ico.cpp b/src/mapleall/maple_be/src/cg/ico.cpp new file mode 100644 index 0000000000000000000000000000000000000000..078260aeb49139edc3cb2ea4af4f2c358ff92cfd --- /dev/null +++ b/src/mapleall/maple_be/src/cg/ico.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ico.h" +#include "cg_option.h" +#ifdef TARGAARCH64 +#include "aarch64_ico.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#elif TARGRISCV64 +#include "riscv64_ico.h" +#include "riscv64_isa.h" +#include "riscv64_insn.h" +#elif TARGARM32 +#include "arm32_ico.h" +#include "arm32_isa.h" +#include "arm32_insn.h" +#endif +#include "cg.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +#define ICO_DUMP_NEWPM CG_DEBUG_FUNC(f) +namespace maplebe { +Insn *ICOPattern::FindLastCmpInsn(BB &bb) const { + if (bb.GetKind() != BB::kBBIf) { + return nullptr; + } + FOR_BB_INSNS_REV(insn, (&bb)) { + if (cgFunc->GetTheCFG()->GetInsnModifier()->IsCompareInsn(*insn)) { + return insn; + } + } + return nullptr; +} + +std::vector ICOPattern::GetLabelOpnds(Insn &insn) const { + std::vector labelOpnds; + for (uint32 i = 0; i < insn.GetOperandSize(); i++) { + if (insn.GetOperand(i).IsLabelOpnd()) { + labelOpnds.emplace_back(static_cast(&insn.GetOperand(i))); + } + } + return labelOpnds; +} + +bool CgIco::PhaseRun(maplebe::CGFunc &f) { + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + if (ICO_DUMP_NEWPM) { + DotGenerator::GenerateDot("ico-before", f, f.GetMirModule()); + } + MemPool *memPool = GetPhaseMemPool(); + IfConversionOptimizer *ico = nullptr; +#if TARGAARCH64 || TARGRISCV64 + ico = memPool->New(f, *memPool); +#endif +#if TARGARM32 + ico = memPool->New(f, *memPool); +#endif + const std::string &funcClass = f.GetFunction().GetBaseClassName(); + const std::string &funcName = f.GetFunction().GetBaseFuncName(); + std::string name = funcClass + funcName; + ico->Run(name); + if (ICO_DUMP_NEWPM) { + DotGenerator::GenerateDot("ico-after", f, f.GetMirModule()); + } + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return false; +} +void CgIco::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgIco, ico) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/insn.cpp b/src/mapleall/maple_be/src/cg/insn.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a74c72254a4682fb2a9ff05b1867b6b62efacadb --- /dev/null +++ b/src/mapleall/maple_be/src/cg/insn.cpp @@ -0,0 +1,350 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "insn.h" +#include "isa.h" +#include "cg.h" +namespace maplebe { +bool Insn::IsMachineInstruction() const { + return md && md->IsPhysicalInsn() && Globals::GetInstance()->GetTarget()->IsTargetInsn(mOp); +} +/* phi is not physical insn */ +bool Insn::IsPhi() const { + return md ? md->IsPhi() : false; +} +bool Insn::IsLoad() const { + return md ? md->IsLoad() : false; +} +bool Insn::IsStore() const { + return md ? md->IsStore() : false; +} +bool Insn::IsMove() const { + return md ? md->IsMove() : false; +} +bool Insn::IsBranch() const { + return md ? md->IsBranch() : false; +} +bool Insn::IsCondBranch() const { + return md ? md->IsCondBranch() : false; +} +bool Insn::IsUnCondBranch() const { + return md ? md->IsUnCondBranch() : false; +} +bool Insn::IsBasicOp() const { + return md ? md->IsBasicOp() : false; +} +bool Insn::IsConversion() const { + return md? md->IsConversion() : false; +} +bool Insn::IsUnaryOp() const { + return md ? md->IsUnaryOp() : false; +} +bool Insn::IsShift() const { + return md ? md->IsShift() : false; +} +bool Insn::IsCall() const { + return md ? md->IsCall() : false; +} +bool Insn::IsTailCall() const { + return md ? md->IsTailCall() : false; +} +bool Insn::IsAsmInsn() const { + return md ? md->IsInlineAsm() : false; +} +bool Insn::IsDMBInsn() const { + return md ? md->IsDMB() : false; +} +bool Insn::IsAtomic() const { + return md ? md->IsAtomic() : false; +} +bool Insn::IsVolatile() const { + return md ? md->IsVolatile() : false; +} +bool Insn::IsMemAccessBar() const { + return md ? md->IsMemAccessBar() : false; +} +bool Insn::IsMemAccess() const { + return md ? md->IsMemAccess() : false; +} +bool Insn::CanThrow() const { + return md ? md->CanThrow() : false; +} +bool Insn::IsVectorOp() const { + return md ? md->IsVectorOp() : false; +} +bool Insn::HasLoop() const { + return md ? md->HasLoop() : false; +} +uint32 Insn::GetLatencyType() const { + return md ? md->GetLatencyType() : false; +} +uint32 Insn::GetAtomicNum() const { + return md ? md->GetAtomicNum() : false; +} +bool Insn::IsSpecialIntrinsic() const { + return md ? md->IsSpecialIntrinsic() : false; +} +bool Insn::IsLoadPair() const { + return md ? md->IsLoadPair() : false; +} +bool Insn::IsStorePair() const { + return md ? md->IsStorePair() : false; +} +bool Insn::IsLoadStorePair() const { + return md ? md->IsLoadStorePair() : false; +} +bool Insn::IsLoadLabel() const { + return md && md->IsLoad() && GetOperand(kInsnSecondOpnd).GetKind() == Operand::kOpdBBAddress; +} +bool Insn::OpndIsDef(uint32 id) const { + return md ? md->GetOpndDes(id)->IsDef() : false; +} +bool Insn::OpndIsUse(uint32 id) const { + return md ? md->GetOpndDes(id)->IsUse() : false; +} +bool Insn::IsClinit() const { + return Globals::GetInstance()->GetTarget()->IsClinitInsn(mOp); +} +bool Insn::IsComment() const { + return mOp == abstract::MOP_comment && !md->IsPhysicalInsn(); +} + +bool Insn::IsImmaterialInsn() const { + return IsComment(); +} +Operand *Insn::GetMemOpnd() const { + for (uint32 i = 0; i < opnds.size(); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + return &opnd; + } + } + return nullptr; +} +void Insn::SetMemOpnd(MemOperand *memOpnd) { + for (uint32 i = 0; i < static_cast(opnds.size()); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + SetOperand(i, *memOpnd); + return; + } + } +} + +bool Insn::IsRegDefined(regno_t regNO) const { + return GetDefRegs().count(regNO); +} + +std::set Insn::GetDefRegs() const { + std::set defRegNOs; + size_t opndNum = opnds.size(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = GetOperand(i); + auto *regProp = md->opndMD[i]; + bool isDef = regProp->IsDef(); + if (!isDef && !opnd.IsMemoryAccessOperand()) { + continue; + } + if (opnd.IsList()) { + for (auto *op : static_cast(opnd).GetOperands()) { + ASSERT(op != nullptr, "invalid operand in list operand"); + defRegNOs.emplace(op->GetRegisterNumber()); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + if (memOpnd.GetAddrMode() == MemOperand::kAddrModeBOi && + (memOpnd.IsPostIndexed() || memOpnd.IsPreIndexed())) { + ASSERT(!defRegNOs.count(base->GetRegisterNumber()), "duplicate def in one insn"); + defRegNOs.emplace(base->GetRegisterNumber()); + } + } + } else if (opnd.IsConditionCode() || opnd.IsRegister()) { + defRegNOs.emplace(static_cast(opnd).GetRegisterNumber()); + } + } + return defRegNOs; +} + +#if DEBUG +void Insn::Check() const { + if (!md) { + CHECK_FATAL(false, " need machine description for target insn "); + } + for (uint32 i = 0; i < GetOperandSize(); ++i) { + Operand &opnd = GetOperand(i); + if (opnd.GetKind() != md->GetOpndDes(i)->GetOperandType()) { + CHECK_FATAL(false, " operand type does not match machine description "); + } + } +} +#endif + +Insn *Insn::Clone(MemPool &memPool) const { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *Insn::GetCallTargetOperand() const { + ASSERT(IsCall() || IsTailCall(), "should be call"); + return &GetOperand(kInsnFirstOpnd); +} + +ListOperand *Insn::GetCallArgumentOperand() { + ASSERT(IsCall(), "should be call"); + ASSERT(GetOperand(1).IsList(), "should be list"); + return &static_cast(GetOperand(kInsnSecondOpnd)); +} + + +void Insn::CommuteOperands(uint32 dIndex, uint32 sIndex) { + Operand *tempCopy = opnds[sIndex]; + opnds[sIndex] = opnds[dIndex]; + opnds[dIndex] = tempCopy; +} + +uint32 Insn::GetBothDefUseOpnd() const { + size_t opndNum = opnds.size(); + uint32 opndIdx = kInsnMaxOpnd; + if (md->GetAtomicNum() > 1) { + return opndIdx; + } + for (uint32 i = 0; i < opndNum; ++i) { + auto *opndProp = md->GetOpndDes(i); + if (opndProp->IsRegUse() && opndProp->IsDef()) { + ASSERT(opndIdx == kInsnMaxOpnd, "Do not support yet"); + opndIdx = i; + } + if (opnds[i]->IsMemoryAccessOperand()) { + auto *MemOpnd = static_cast(opnds[i]); + if (!MemOpnd->IsIntactIndexed()) { + ASSERT(opndIdx == kInsnMaxOpnd, "Do not support yet"); + opndIdx = i; + } + } + } + return opndIdx; +} + +uint32 Insn::GetMemoryByteSize() const { + ASSERT(IsMemAccess(), "must be memory access insn"); + uint32 res = 0; + for (size_t i = 0 ; i < opnds.size(); ++i) { + if (md->GetOpndDes(i)->GetOperandType() == Operand::kOpdMem) { + res = md->GetOpndDes(i)->GetSize(); + } + } + ASSERT(res, "cannot access empty memory"); + if (IsLoadStorePair()) { + res = res << 1; + } + res = res >> 3; + return res; +} + +bool Insn::ScanReg(regno_t regNO) const { + uint32 opndNum = GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + auto *regOpnd = static_cast(listElem); + ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + return true; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + return true; + } + } else if (opnd.IsRegister()) { + if (static_cast(opnd).GetRegisterNumber() == regNO) { + return true; + } + } + } + return false; +} + +bool Insn::MayThrow() const { + if (md->IsMemAccess() && !IsLoadLabel()) { + auto *memOpnd = static_cast(GetMemOpnd()); + ASSERT(memOpnd != nullptr, "CG invalid memory operand."); + if (memOpnd->IsStackMem()) { + return false; + } + } + return md->CanThrow(); +} + +void Insn::SetMOP(const InsnDesc &idesc) { + mOp = idesc.GetOpc(); + md = &idesc; +} + +void Insn::Dump() const { +ASSERT(md != nullptr, "md should not be nullptr"); + LogInfo::MapleLogger() << "< " << GetId() << " > "; + LogInfo::MapleLogger() << md->name << "(" << mOp << ")"; + + for (uint32 i = 0; i < GetOperandSize(); ++i) { + Operand &opnd = GetOperand(i); + LogInfo::MapleLogger() << " (opnd" << i << ": "; + Globals::GetInstance()->GetTarget()->DumpTargetOperand(opnd, *md->GetOpndDes(i)); + LogInfo::MapleLogger() << ")"; + } + + if (ssaImplicitDefOpnd != nullptr) { + LogInfo::MapleLogger() << " (implicitDefOpnd: "; + std::array prims = { "U", "R", "V", "C", "X", "Vra" }; + uint32 regType = ssaImplicitDefOpnd->GetRegisterType(); + ASSERT(regType < kRegTyLast, "unexpected regType"); + LogInfo::MapleLogger() << (ssaImplicitDefOpnd->IsVirtualRegister() ? "vreg:" : " reg:") << prims[regType]; + regno_t reg = ssaImplicitDefOpnd->GetRegisterNumber(); + reg = ssaImplicitDefOpnd->IsVirtualRegister() ? reg : (reg - 1); + LogInfo::MapleLogger() << reg; + uint32 vb = ssaImplicitDefOpnd->GetValidBitsNum(); + if (ssaImplicitDefOpnd->GetValidBitsNum() != ssaImplicitDefOpnd->GetSize()) { + LogInfo::MapleLogger() << " Vb: [" << vb << "]"; + } + LogInfo::MapleLogger() << " Sz: [" << ssaImplicitDefOpnd->GetSize() << "]" ; + LogInfo::MapleLogger() << ")"; + } + + if (IsVectorOp()) { + auto *vInsn = static_cast(this); + if (vInsn->GetNumOfRegSpec() != 0) { + LogInfo::MapleLogger() << " (vecSpec: " << vInsn->GetNumOfRegSpec() << ")"; + } + } + LogInfo::MapleLogger() << "\n"; +} + +VectorRegSpec *VectorInsn::GetAndRemoveRegSpecFromList() { + if (regSpecList.size() == 0) { + VectorRegSpec *vecSpec = CG::GetCurCGFuncNoConst()->GetMemoryPool()->New() ; + return vecSpec; + } + VectorRegSpec *ret = regSpecList.back(); + regSpecList.pop_back(); + return ret; +} +} diff --git a/src/mapleall/maple_be/src/cg/instruction_selection.cpp b/src/mapleall/maple_be/src/cg/instruction_selection.cpp new file mode 100644 index 0000000000000000000000000000000000000000..df804ae7dd0788c6d6d47b18a73c706443ade05c --- /dev/null +++ b/src/mapleall/maple_be/src/cg/instruction_selection.cpp @@ -0,0 +1,19 @@ +/* +* Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +* +* OpenArkCompiler is licensed under Mulan PSL v2. +* You can use this software according to the terms and conditions of the Mulan PSL v2. +* You may obtain a copy of Mulan PSL v2 at: +* +* http://license.coscl.org.cn/MulanPSL2 +* +* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +* FIT FOR A PARTICULAR PURPOSE. +* See the Mulan PSL v2 for more details. +*/ + +#include "instruction_selection.h" +namespace maplebe { +bool CgIsel::PhaseRun(maplebe::CGFunc &f) {} +} /* namespace maplebe */ \ No newline at end of file diff --git a/src/mapleall/maple_be/src/cg/isa.cpp b/src/mapleall/maple_be/src/cg/isa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..966ab3de81445f0133713260e7a616a3e22594e3 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/isa.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "isa.h" +namespace maplebe { +#define DEFINE_MOP(op, ...) const OpndDesc OpndDesc::op = __VA_ARGS__; +#include "operand.def" +#undef DEFINE_MOP +#define DEFINE_MOP(op, ...) {abstract::op, __VA_ARGS__}, +const InsnDesc InsnDesc::abstractId[abstract::kMopLast] = { +#include "abstract_mmir.def" +}; +#undef DEFINE_MOP + +bool InsnDesc::IsSame(const InsnDesc &left, + std::function cmp) const { + return cmp == nullptr ? false : cmp(left, *this); +} +} diff --git a/src/mapleall/maple_be/src/cg/isel.cpp b/src/mapleall/maple_be/src/cg/isel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..45a030c8f982530876f4fbbed9e136a5020ef518 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/isel.cpp @@ -0,0 +1,1524 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "isel.h" +#include +#include +#include "factory.h" +#include "cg.h" + +namespace maplebe { +/* register, imm , memory, cond */ +#define DEF_FAST_ISEL_MAPPING_INT(SIZE) \ +MOperator fastIselMapI##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ +{abstract::MOP_copy_rr_##SIZE, abstract::MOP_copy_ri_##SIZE, abstract::MOP_load_##SIZE, abstract::MOP_undef}, \ +{abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +{abstract::MOP_str_##SIZE, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +{abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +}; +#define DEF_FAST_ISEL_MAPPING_FLOAT(SIZE) \ +MOperator fastIselMapF##SIZE[Operand::OperandType::kOpdPhi][Operand::OperandType::kOpdPhi] = { \ +{abstract::MOP_copy_ff_##SIZE, abstract::MOP_copy_fi_##SIZE, abstract::MOP_load_f_##SIZE, abstract::MOP_undef}, \ +{abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +{abstract::MOP_str_f_##SIZE, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +{abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef, abstract::MOP_undef}, \ +}; + +DEF_FAST_ISEL_MAPPING_INT(8) +DEF_FAST_ISEL_MAPPING_INT(16) +DEF_FAST_ISEL_MAPPING_INT(32) +DEF_FAST_ISEL_MAPPING_INT(64) +DEF_FAST_ISEL_MAPPING_FLOAT(8) +DEF_FAST_ISEL_MAPPING_FLOAT(16) +DEF_FAST_ISEL_MAPPING_FLOAT(32) +DEF_FAST_ISEL_MAPPING_FLOAT(64) + +#define DEF_SEL_MAPPING_TBL(SIZE) \ +MOperator SelMapping##SIZE(bool isInt, uint32 x, uint32 y) { \ + return isInt ? fastIselMapI##SIZE[x][y] : fastIselMapF##SIZE[x][y]; \ +} +#define USE_SELMAPPING_TBL(SIZE) \ +{SIZE, SelMapping##SIZE} + +DEF_SEL_MAPPING_TBL(8); +DEF_SEL_MAPPING_TBL(16); +DEF_SEL_MAPPING_TBL(32); +DEF_SEL_MAPPING_TBL(64); + +std::map> fastIselMappingTable = { + USE_SELMAPPING_TBL(8), + USE_SELMAPPING_TBL(16), + USE_SELMAPPING_TBL(32), + USE_SELMAPPING_TBL(64)}; + +MOperator GetFastIselMop(Operand::OperandType dTy, Operand::OperandType sTy, PrimType type) { + uint32 bitSize = GetPrimTypeBitSize(type); + bool isInteger = IsPrimitiveInteger(type); + auto tableDriven = fastIselMappingTable.find(bitSize); + if (tableDriven != fastIselMappingTable.end()) { + auto funcIt = tableDriven->second; + return funcIt(isInteger, dTy, sTy); + } else { + CHECK_FATAL(false, "unsupport type"); + } + return abstract::MOP_undef; +} + +#define DEF_EXTEND_MAPPING_TBL(TYPE) [](bool isSigned) -> MOperator { \ + return isSigned ? abstract::MOP_sext_rr_##TYPE : abstract::MOP_zext_rr_##TYPE; \ +} +using fromToTy = std::pair; /* std::pair */ +#define DEF_USE_EXTEND_MAPPING_TBL(FROMSIZE, TOSIZE) \ +{{k##FROMSIZE##BitSize, k##TOSIZE##BitSize}, DEF_EXTEND_MAPPING_TBL(TOSIZE##_##FROMSIZE)} + +std::map> fastCvtMappingTableI = { + DEF_USE_EXTEND_MAPPING_TBL(8, 16), /* Extend Mapping */ + DEF_USE_EXTEND_MAPPING_TBL(8, 32), + DEF_USE_EXTEND_MAPPING_TBL(8, 64), + DEF_USE_EXTEND_MAPPING_TBL(16, 32), + DEF_USE_EXTEND_MAPPING_TBL(16, 64), + DEF_USE_EXTEND_MAPPING_TBL(32, 64), +}; +#undef DEF_USE_EXTEND_MAPPING_TBL +#undef DEF_EXTEND_MAPPING_TBL + +static MOperator GetFastCvtMopI(uint32 fromSize, uint32 toSize, bool isSigned) { + if (toSize < k8BitSize || toSize > k64BitSize) { + CHECK_FATAL(false, "unsupport type"); + } + if (fromSize < k8BitSize || fromSize > k64BitSize) { + CHECK_FATAL(false, "unsupport type"); + } + /* Extend: fromSize < toSize */ + auto tableDriven = fastCvtMappingTableI.find({fromSize, toSize}); + if (tableDriven == fastCvtMappingTableI.end()) { + CHECK_FATAL(false, "unsupport cvt"); + } + MOperator mOp = tableDriven->second(isSigned); + if (mOp == abstract::MOP_undef) { + CHECK_FATAL(false, "unsupport cvt"); + } + return mOp; +} + +/* + * fast get MOperator + * such as : and, or, shl ... + */ +#define DEF_MOPERATOR_MAPPING_FUNC(TYPE) [](uint32 bitSize)->MOperator { \ + /* 8-bits, 16-bits, 32-bits, 64-bits */ \ + constexpr static std::array fastMapping_##TYPE = \ + {abstract::MOP_##TYPE##_8, abstract::MOP_##TYPE##_16, abstract::MOP_##TYPE##_32, abstract::MOP_##TYPE##_64}; \ + return fastMapping_##TYPE[GetBitIndex(bitSize)]; \ +} + +#define DEF_FLOAT_MOPERATOR_MAPPING_FUNC(TYPE) [](uint32 bitSize)->MOperator { \ + /* 8-bits, 16-bits, 32-bits, 64-bits */ \ + constexpr static std::array fastMapping_f_##TYPE = \ + {abstract::MOP_##TYPE##_f_8, abstract::MOP_##TYPE##_f_16, \ + abstract::MOP_##TYPE##_f_32, abstract::MOP_##TYPE##_f_64}; \ + return fastMapping_f_##TYPE[GetBitIndex(bitSize)]; \ +} + +void HandleDassign(StmtNode &stmt, MPISel &iSel) { + ASSERT(stmt.GetOpCode() == OP_dassign, "expect dassign"); + auto &dassignNode = static_cast(stmt); + BaseNode *rhs = dassignNode.GetRHS(); + ASSERT(rhs != nullptr, "get rhs of dassignNode failed"); + Operand* opndRhs = iSel.HandleExpr(dassignNode, *rhs); + if (opndRhs == nullptr) { + return; + } + iSel.SelectDassign(dassignNode, *opndRhs); +} + +void HandleDassignoff(StmtNode &stmt, MPISel &iSel) { + auto &dassignoffNode = static_cast(stmt); + BaseNode *rhs = dassignoffNode.GetRHS(); + CHECK_FATAL(rhs->GetOpCode() == OP_constval, "dassignoffNode without constval"); + Operand *opnd0 = iSel.HandleExpr(dassignoffNode, *rhs); + iSel.SelectDassignoff(dassignoffNode, *opnd0); +} + +void HandleIassign(StmtNode &stmt, MPISel &iSel) { + ASSERT(stmt.GetOpCode() == OP_iassign, "expect iassign"); + auto &iassignNode = static_cast(stmt); + BaseNode *rhs = iassignNode.GetRHS(); + ASSERT(rhs != nullptr, "null ptr check"); + Operand *opndRhs = iSel.HandleExpr(stmt, *rhs); + BaseNode *addr = iassignNode.Opnd(0); + ASSERT(addr != nullptr, "null ptr check"); + Operand *opndAddr = iSel.HandleExpr(stmt, *addr); + if (opndRhs == nullptr || opndAddr == nullptr) { + return; + } + if (rhs->GetPrimType() != PTY_agg) { + iSel.SelectIassign(iassignNode, *opndAddr, *opndRhs); + } else { + iSel.SelectAggIassign(iassignNode, *opndAddr, *opndRhs); + } +} + +void HandleRegassign(StmtNode &stmt, MPISel &iSel) { + ASSERT(stmt.GetOpCode() == OP_regassign, "expect regAssign"); + auto ®AssignNode = static_cast(stmt); + BaseNode *operand = regAssignNode.Opnd(0); + ASSERT(operand != nullptr, "get operand of regassignNode failed"); + Operand *opnd0 = iSel.HandleExpr(regAssignNode, *operand); + iSel.SelectRegassign(regAssignNode, *opnd0); +} + +void HandleIassignoff(StmtNode &stmt, MPISel &iSel) { + auto &iassignoffNode = static_cast(stmt); + iSel.SelectIassignoff(iassignoffNode); +} + +void HandleLabel(StmtNode &stmt, const MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + ASSERT(stmt.GetOpCode() == OP_label, "error"); + auto &label = static_cast(stmt); + BB *newBB = cgFunc->StartNewBBImpl(false, label); + newBB->AddLabel(label.GetLabelIdx()); + cgFunc->SetLab2BBMap(static_cast(newBB->GetLabIdx()), *newBB); + cgFunc->SetCurBB(*newBB); + + if (cgFunc->GetCleanupLabel() == &label) { + cgFunc->SetCleanupBB(*newBB); + } else if (cgFunc->GetReturnLabel() == &label) { + cgFunc->SetReturnBB(*newBB); + } +} + +void HandleGoto(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + cgFunc->UpdateFrequency(stmt); + auto &gotoNode = static_cast(stmt); + ASSERT(gotoNode.GetOpCode() == OP_goto, "expect goto"); + cgFunc->SetCurBBKind(BB::kBBGoto); + iSel.SelectGoto(gotoNode); + cgFunc->SetCurBB(*cgFunc->StartNewBB(gotoNode)); + ASSERT(&stmt == &gotoNode, "stmt must be same as gotoNoe"); + if ((gotoNode.GetNext() != nullptr) && (gotoNode.GetNext()->GetOpCode() != OP_label)) { + ASSERT(cgFunc->GetCurBB()->GetPrev()->GetLastStmt() == &stmt, "check the relation between BB and stmt"); + } +} + +void HandleIntrinCall(StmtNode &stmt, MPISel &iSel) { + auto &call = static_cast(stmt); + iSel.SelectIntrinCall(call); +} + +void HandleRangeGoto(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &rangeGotoNode = static_cast(stmt); + ASSERT(rangeGotoNode.GetOpCode() == OP_rangegoto, "expect rangegoto"); + BaseNode *srcNode = rangeGotoNode.Opnd(0); + Operand *srcOpnd = iSel.HandleExpr(rangeGotoNode, *srcNode); + cgFunc->SetCurBBKind(BB::kBBRangeGoto); + iSel.SelectRangeGoto(rangeGotoNode, *srcOpnd); +} + +void HandleIgoto(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &igotoNode = static_cast(stmt); + BaseNode *targetNode = igotoNode.Opnd(0); + Operand *targetOpnd = iSel.HandleExpr(igotoNode, *targetNode); + iSel.SelectIgoto(*targetOpnd); + cgFunc->SetCurBBKind(BB::kBBIgoto); + cgFunc->SetCurBB(*cgFunc->StartNewBB(igotoNode)); +} + +void HandleReturn(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &retNode = static_cast(stmt); + ASSERT(retNode.NumOpnds() <= 1, "NYI return nodes number > 1"); + if (retNode.NumOpnds() != 0) { + Operand *opnd = iSel.HandleExpr(retNode, *retNode.Opnd(0)); + iSel.SelectReturn(retNode, *opnd); + } + iSel.SelectReturn(); + /* return stmt will jump to the ret BB, so curBB is gotoBB */ + cgFunc->SetCurBBKind(BB::kBBGoto); + cgFunc->SetCurBB(*cgFunc->StartNewBB(retNode)); +} + +void HandleComment(StmtNode &stmt [[maybe_unused]], MPISel &iSel [[maybe_unused]]) { + return; +} + +void HandleIcall(StmtNode &stmt, MPISel &iSel) { + ASSERT(stmt.GetOpCode() == OP_icall || stmt.GetOpCode() == OP_icallproto, "error"); + auto &iCallNode = static_cast(stmt); + Operand *opnd0 = iSel.HandleExpr(iCallNode, *iCallNode.Opnd(0)); + iSel.SelectIcall(iCallNode, *opnd0); + iSel.SelectCallCommon(stmt, iSel); +} + +void HandleCall(StmtNode &stmt, MPISel &iSel) { + ASSERT(stmt.GetOpCode() == OP_call, "error"); + auto &callNode = static_cast(stmt); + iSel.SelectCall(callNode); + iSel.SelectCallCommon(stmt, iSel); +} + +void HandleCondbr(StmtNode &stmt, MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + auto &condGotoNode = static_cast(stmt); + BaseNode *condNode = condGotoNode.Opnd(0); + ASSERT(condNode != nullptr, "expect first operand of cond br"); + /* select cmpOp Insn and get the result "opnd0". However, the opnd0 is not used + * in most backend architectures */ + Operand *opnd0 = iSel.HandleExpr(stmt, *condNode); + iSel.SelectCondGoto(condGotoNode, *condNode, *opnd0); + cgFunc->SetCurBB(*cgFunc->StartNewBB(condGotoNode)); +} + +Operand *HandleAddrof(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &addrofNode = static_cast(expr); + return iSel.SelectAddrof(addrofNode, parent); +} + +Operand *HandleAddroffunc(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &addrofNode = static_cast(expr); + return iSel.SelectAddrofFunc(addrofNode, parent); +} + +Operand *HandleAddrofLabel(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &addrofLabelNode = static_cast(expr); + return iSel.SelectAddrofLabel(addrofLabelNode, parent); +} + +Operand *HandleShift(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectShift(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleCvt(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectCvt(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleExtractBits(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectExtractbits(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleDread(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &dreadNode = static_cast(expr); + return iSel.SelectDread(parent, dreadNode); +} + +Operand *HandleAdd(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectAdd(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBior(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectBior(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBxor(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectBxor(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleSub(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectSub(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleNeg(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectNeg(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +Operand *HandleDiv(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectDiv(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleRem(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectRem(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleBand(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectBand(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMpy(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectMpy(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleConstStr(const BaseNode &parent [[maybe_unused]], BaseNode &expr, MPISel &iSel) { + auto &constStrNode = static_cast(expr); + return iSel.SelectStrLiteral(constStrNode); +} + +Operand *HandleTrunc(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectCvt(parent, static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleConstVal(const BaseNode &parent [[maybe_unused]], BaseNode &expr, const MPISel &iSel) { + auto &constValNode = static_cast(expr); + MIRConst *mirConst = constValNode.GetConstVal(); + ASSERT(mirConst != nullptr, "get constval of constvalnode failed"); + if (mirConst->GetKind() == kConstInt) { + auto *mirIntConst = safe_cast(mirConst); + return iSel.SelectIntConst(*mirIntConst, constValNode.GetPrimType()); + } else if (mirConst->GetKind() == kConstDoubleConst) { + auto *mirDoubleConst = safe_cast(mirConst); + return iSel.SelectDoubleConst(*mirDoubleConst, constValNode.GetPrimType()); + } else { + CHECK_FATAL(false, "NIY"); + } + return nullptr; +} + +Operand *HandleRegread(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + (void)parent; + auto ®ReadNode = static_cast(expr); + /* handle return Val */ + if (regReadNode.GetRegIdx() == -kSregRetval0 || regReadNode.GetRegIdx() == -kSregRetval1) { + return &iSel.ProcessReturnReg(regReadNode.GetPrimType(), -(regReadNode.GetRegIdx())); + } + return iSel.SelectRegread(regReadNode); +} + +Operand *HandleIread(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &ireadNode = static_cast(expr); + return iSel.SelectIread(parent, ireadNode); +} +Operand *HandleIreadoff(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &ireadNode = static_cast(expr); + return iSel.SelectIreadoff(parent, ireadNode); +} + +Operand *HandleBnot(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectBnot(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); +} + +void HandleEval(const StmtNode &stmt, MPISel &iSel) { + (void)iSel.HandleExpr(stmt, *static_cast(stmt).Opnd(0)); +} + +Operand *HandleDepositBits(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectDepositBits(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleCmp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + // fix opnd type before select insn + PrimType targetPtyp = parent.GetPrimType(); + if (kOpcodeInfo.IsCompare(parent.GetOpCode())) { + targetPtyp = static_cast(parent).GetOpndType(); + } else if (kOpcodeInfo.IsTypeCvt(parent.GetOpCode())) { + targetPtyp = static_cast(parent).FromType(); + } + if (IsPrimitiveInteger(targetPtyp) && targetPtyp != expr.GetPrimType()) { + expr.SetPrimType(targetPtyp); + } + return iSel.SelectCmpOp(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleAbs(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectAbs(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleAlloca(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectAlloca(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleCGArrayElemAdd(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectCGArrayElemAdd(static_cast(expr), parent); +} + +void HandleAsm(StmtNode &stmt, MPISel &iSel) { + iSel.SelectAsm(static_cast(stmt)); +} + +Operand *HandleSelect(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + /* 0,1,2 represent the first opnd and the second opnd and the third opnd of expr */ + Operand &trueOpnd = *iSel.HandleExpr(expr, *expr.Opnd(1)); + Operand &falseOpnd = *iSel.HandleExpr(expr, *expr.Opnd(2)); + Operand &condOpnd = *iSel.HandleExpr(expr, *expr.Opnd(0)); + if (condOpnd.IsImmediate()) { + return (static_cast(condOpnd).GetValue() == 0) ? &falseOpnd : &trueOpnd; + } + return iSel.SelectSelect(static_cast(expr), condOpnd, trueOpnd, falseOpnd, parent); +} + +Operand *HandleMin(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectMin(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} + +Operand *HandleMax(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectMax(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0)), + *iSel.HandleExpr(expr, *expr.Opnd(1)), parent); +} +Operand *HandleRetype(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + return iSel.SelectRetype(static_cast(expr), *iSel.HandleExpr(expr, *expr.Opnd(0))); +} + +Operand *HandleIntrinOp(const BaseNode &parent, BaseNode &expr, MPISel &iSel) { + auto &intrinsicopNode = static_cast(expr); + switch (intrinsicopNode.GetIntrinsic()) { + case INTRN_C_rev16_2: + case INTRN_C_rev_4: + case INTRN_C_rev_8: + return iSel.SelectBswap(intrinsicopNode, *iSel.HandleExpr(expr, *expr.Opnd(0)), parent); + + default: + ASSERT(false, "NIY, unsupported intrinsicop."); + return nullptr; + } +} + +using HandleStmtFactory = FunctionFactory; +using HandleExprFactory = FunctionFactory; +namespace isel { +void InitHandleStmtFactory() { + RegisterFactoryFunction(OP_label, HandleLabel); + RegisterFactoryFunction(OP_dassign, HandleDassign); + RegisterFactoryFunction(OP_dassignoff, HandleDassignoff); + RegisterFactoryFunction(OP_iassign, HandleIassign); + RegisterFactoryFunction(OP_iassignoff, HandleIassignoff); + RegisterFactoryFunction(OP_regassign, HandleRegassign); + RegisterFactoryFunction(OP_return, HandleReturn); + RegisterFactoryFunction(OP_comment, HandleComment); + RegisterFactoryFunction(OP_call, HandleCall); + RegisterFactoryFunction(OP_icall, HandleIcall); + RegisterFactoryFunction(OP_icallproto, HandleIcall); + RegisterFactoryFunction(OP_goto, HandleGoto); + RegisterFactoryFunction(OP_intrinsiccall, HandleIntrinCall); + RegisterFactoryFunction(OP_rangegoto, HandleRangeGoto); + RegisterFactoryFunction(OP_igoto, HandleIgoto); + RegisterFactoryFunction(OP_brfalse, HandleCondbr); + RegisterFactoryFunction(OP_brtrue, HandleCondbr); + RegisterFactoryFunction(OP_eval, HandleEval); + RegisterFactoryFunction(OP_asm, HandleAsm); +} +void InitHandleExprFactory() { + RegisterFactoryFunction(OP_dread, HandleDread); + RegisterFactoryFunction(OP_add, HandleAdd); + RegisterFactoryFunction(OP_sub, HandleSub); + RegisterFactoryFunction(OP_neg, HandleNeg); + RegisterFactoryFunction(OP_mul, HandleMpy); + RegisterFactoryFunction(OP_constval, HandleConstVal); + RegisterFactoryFunction(OP_regread, HandleRegread); + RegisterFactoryFunction(OP_addrof, HandleAddrof); + RegisterFactoryFunction(OP_addroffunc, HandleAddroffunc); + RegisterFactoryFunction(OP_addroflabel, HandleAddrofLabel); + RegisterFactoryFunction(OP_shl, HandleShift); + RegisterFactoryFunction(OP_lshr, HandleShift); + RegisterFactoryFunction(OP_ashr, HandleShift); + RegisterFactoryFunction(OP_cvt, HandleCvt); + RegisterFactoryFunction(OP_zext, HandleExtractBits); + RegisterFactoryFunction(OP_sext, HandleExtractBits); + RegisterFactoryFunction(OP_extractbits, HandleExtractBits); + RegisterFactoryFunction(OP_depositbits, HandleDepositBits); + RegisterFactoryFunction(OP_band, HandleBand); + RegisterFactoryFunction(OP_bior, HandleBior); + RegisterFactoryFunction(OP_bxor, HandleBxor); + RegisterFactoryFunction(OP_iread, HandleIread); + RegisterFactoryFunction(OP_ireadoff, HandleIreadoff); + RegisterFactoryFunction(OP_bnot, HandleBnot); + RegisterFactoryFunction(OP_div, HandleDiv); + RegisterFactoryFunction(OP_rem, HandleRem); + RegisterFactoryFunction(OP_conststr, HandleConstStr); + RegisterFactoryFunction(OP_le, HandleCmp); + RegisterFactoryFunction(OP_ge, HandleCmp); + RegisterFactoryFunction(OP_gt, HandleCmp); + RegisterFactoryFunction(OP_lt, HandleCmp); + RegisterFactoryFunction(OP_ne, HandleCmp); + RegisterFactoryFunction(OP_eq, HandleCmp); + RegisterFactoryFunction(OP_abs, HandleAbs); + RegisterFactoryFunction(OP_alloca, HandleAlloca); + RegisterFactoryFunction(OP_CG_array_elem_add, HandleCGArrayElemAdd); + RegisterFactoryFunction(OP_select, HandleSelect); + RegisterFactoryFunction(OP_min, HandleMin); + RegisterFactoryFunction(OP_max, HandleMax); + RegisterFactoryFunction(OP_retype, HandleRetype); + RegisterFactoryFunction(OP_trunc, HandleTrunc); + RegisterFactoryFunction(OP_intrinsicop, HandleIntrinOp); +} +} + +Operand *MPISel::HandleExpr(const BaseNode &parent, BaseNode &expr) { + auto function = CreateProductFunction(expr.GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode in HandleExpr()"); + return function(parent, expr, *this); +} + +void MPISel::DoMPIS() { + isel::InitHandleStmtFactory(); + isel::InitHandleExprFactory(); + StmtNode *secondStmt = HandleFuncEntry(); + for (StmtNode *stmt = secondStmt; stmt != nullptr; stmt = stmt->GetNext()) { + auto function = CreateProductFunction(stmt->GetOpCode()); + CHECK_FATAL(function != nullptr, "unsupported opCode or has been lowered before"); + function(*stmt, *this); + } + HandleFuncExit(); +} + +PrimType MPISel::GetIntegerPrimTypeFromSize(bool isSigned, uint32 bitSize) const { + static constexpr std::array signedPrimType = {PTY_i8, PTY_i16, PTY_i32, PTY_i64}; + static constexpr std::array unsignedPrimType = {PTY_u8, PTY_u16, PTY_u32, PTY_u64}; + BitIndex index = GetBitIndex(bitSize); + return isSigned ? signedPrimType[index] : unsignedPrimType[index]; +} + +void MPISel::SelectCallCommon(StmtNode &stmt, const MPISel &iSel) { + CGFunc *cgFunc = iSel.GetCurFunc(); + if (cgFunc->GetCurBB()->GetKind() != BB::kBBFallthru) { + cgFunc->SetCurBB(*cgFunc->StartNewBB(stmt)); + } + StmtNode *prevStmt = stmt.GetPrev(); + if (prevStmt == nullptr || prevStmt->GetOpCode() != OP_catch) { + return; + } + if ((stmt.GetNext() != nullptr) && (stmt.GetNext()->GetOpCode() == OP_label)) { + cgFunc->SetCurBB(*cgFunc->StartNewBBImpl(true, stmt)); + } +} + +void MPISel::SelectBasicOp(Operand &resOpnd, Operand &opnd0, Operand &opnd1, MOperator mOp, PrimType primType) { + RegOperand &firstOpnd = SelectCopy2Reg(opnd0, primType); + RegOperand &secondOpnd = SelectCopy2Reg(opnd1, primType); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(firstOpnd).AddOpndChain(secondOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +std::pair MPISel::GetFieldIdAndMirTypeFromMirNode(const BaseNode &node) { + FieldID fieldId = 0; + MIRType *mirType = nullptr; + if (node.GetOpCode() == maple::OP_iread) { + /* mirType stored in an addr. */ + auto &iread = static_cast(node); + fieldId = iread.GetFieldID(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread.GetTyIdx()); + MIRPtrType *pointerType = static_cast(type); + ASSERT(pointerType != nullptr, "expect a pointer type at iread node"); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + if (mirType->GetKind() == kTypeArray) { + MIRArrayType *arrayType = static_cast(mirType); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + } + } else if (node.GetOpCode() == maple::OP_dassign) { + /* mirSymbol */ + auto &dassign = static_cast(node); + fieldId = dassign.GetFieldID(); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dassign.GetStIdx()); + mirType = symbol->GetType(); + } else if (node.GetOpCode() == maple::OP_dread) { + /* mirSymbol */ + auto &dread = static_cast(node); + fieldId = dread.GetFieldID(); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + mirType = symbol->GetType(); + } else if (node.GetOpCode() == maple::OP_iassign) { + auto &iassign = static_cast(node); + fieldId = iassign.GetFieldID(); + AddrofNode &addrofNode = static_cast(iassign.GetAddrExprBase()); + MIRType *iassignMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iassign.GetTyIdx()); + MIRPtrType *pointerType = nullptr; + if (iassignMirType->GetPrimType() == PTY_agg) { + MIRSymbol *addrSym = cgFunc->GetMirModule().CurFunction()->GetLocalOrGlobalSymbol(addrofNode.GetStIdx()); + MIRType *addrMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrSym->GetTyIdx()); + addrMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(addrMirType->GetTypeIndex()); + ASSERT(addrMirType->GetKind() == kTypePointer, "non-pointer"); + pointerType = static_cast(addrMirType); + } else { + ASSERT(iassignMirType->GetKind() == kTypePointer, "non-pointer"); + pointerType = static_cast(iassignMirType); + } + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pointerType->GetPointedTyIdx()); + } else { + CHECK_FATAL(false, "unsupported OpCode"); + } + return {fieldId, mirType}; +} + +MirTypeInfo MPISel::GetMirTypeInfoFormFieldIdAndMirType(FieldID fieldId, MIRType *mirType) { + MirTypeInfo mirTypeInfo; + /* fixup primType and offset */ + if (fieldId != 0) { + ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); + MIRStructType *structType = static_cast(mirType); + mirType = structType->GetFieldType(fieldId); + mirTypeInfo.offset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + } + mirTypeInfo.primType = mirType->GetPrimType(); + // aggSize for AggType + if (mirTypeInfo.primType == maple::PTY_agg) { + mirTypeInfo.size = cgFunc->GetBecommon().GetTypeSize(mirType->GetTypeIndex()); + } + return mirTypeInfo; +} + +MirTypeInfo MPISel::GetMirTypeInfoFromMirNode(const BaseNode &node) { + auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(node); + return GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType); +} + +void MPISel::SelectDassign(const DassignNode &stmt, Operand &opndRhs) { + /* mirSymbol info */ + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(stmt.GetStIdx()); + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt); + CHECK_NULL_FATAL(symbol); + /* Get symbol location */ + MemOperand &symbolMem = GetOrCreateMemOpndFromSymbol(*symbol, stmt.GetFieldID()); + /* rhs mirType info */ + PrimType rhsType = stmt.GetRHS()->GetPrimType(); + /* Generate Insn */ + if (rhsType == PTY_agg) { + /* Agg Type */ + SelectAggDassign(symbolInfo, symbolMem, opndRhs); + return; + } + PrimType memType = symbolInfo.primType; + if (memType == PTY_agg) { + memType = PTY_a64; + } + SelectCopy(symbolMem, opndRhs, memType, rhsType); + return; +} + +void MPISel::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) { + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(stmt.stIdx); + PrimType primType = stmt.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + MemOperand &memOpnd = GetOrCreateMemOpndFromSymbol(*symbol, bitSize, stmt.offset); + + SelectCopy(memOpnd, opnd0, primType); +} + +void MPISel::SelectIassign(const IassignNode &stmt, Operand &opndAddr, Operand &opndRhs) { + /* mirSymbol info */ + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt); + /* handle Lhs, generate (%Rxx) via Rxx */ + PrimType memType = symbolInfo.primType; + if (memType == PTY_agg) { + memType = PTY_a64; + } + RegOperand &lhsBaseOpnd = SelectCopy2Reg(opndAddr, stmt.Opnd(0)->GetPrimType()); + MemOperand &lhsMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(lhsBaseOpnd, symbolInfo.offset, + GetPrimTypeBitSize(memType)); + /* handle Rhs, get R## from Rhs */ + PrimType rhsType = stmt.GetRHS()->GetPrimType(); + /* mov %R##, (%Rxx) */ + SelectCopy(lhsMemOpnd, opndRhs, memType, rhsType); +} + +void MPISel::SelectIassignoff(const IassignoffNode &stmt) { + Operand *addr = HandleExpr(stmt, *stmt.Opnd(0)); + ASSERT(addr != nullptr, "null ptr check"); + Operand *rhs = HandleExpr(stmt, *stmt.Opnd(1)); + ASSERT(rhs != nullptr, "null ptr check"); + + int32 offset = stmt.GetOffset(); + PrimType primType = stmt.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + RegOperand &addrReg = SelectCopy2Reg(*addr, PTY_a64); + RegOperand &rhsReg = SelectCopy2Reg(*rhs, primType); + + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(addrReg, offset, bitSize); + SelectCopy(memOpnd, rhsReg, primType); +} + +ImmOperand *MPISel::SelectIntConst(MIRIntConst &intConst, PrimType primType) const { + return &cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), intConst.GetExtValue()); +} + +Operand *MPISel::SelectShift(const BinaryNode &node, Operand &opnd0, + Operand &opnd1, const BaseNode &parent [[maybe_unused]]) { + PrimType primType = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + Opcode opcode = node.GetOpCode(); + + if (IsPrimitiveInteger(primType)) { + resOpnd = &(cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType))); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + SelectShift(*resOpnd, regOpnd0, opnd1, opcode, primType, node.Opnd(1)->GetPrimType()); + } else { + CHECK_FATAL(false, "NIY vector cvt"); + } + return resOpnd; +} + +void MPISel::SelectShift(Operand &resOpnd, Operand &opnd0, Operand &opnd1, Opcode shiftDirect, + PrimType opnd0Type, PrimType opnd1Type) { + if (opnd1.IsIntImmediate() && static_cast(opnd1).GetValue() == 0) { + SelectCopy(resOpnd, opnd0, opnd0Type); + return; + } + + uint32 dsize = GetPrimTypeBitSize(opnd0Type); + MOperator mOp = abstract::MOP_undef; + if (shiftDirect == OP_shl) { + const static auto fastShlMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(shl); + mOp = fastShlMappingFunc(dsize); + } else if (shiftDirect == OP_ashr) { + const static auto fastAshrMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(ashr); + mOp = fastAshrMappingFunc(dsize); + } else if (shiftDirect == OP_lshr) { + const static auto fastLshrMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(lshr); + mOp = fastLshrMappingFunc(dsize); + } else { + CHECK_FATAL(false, "NIY, Not support shiftdirect case"); + } + RegOperand &firstOpnd = SelectCopy2Reg(opnd0, opnd0Type); + RegOperand &secondOpnd = SelectCopy2Reg(opnd1, opnd1Type); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + insn.AddOpndChain(resOpnd).AddOpndChain(firstOpnd).AddOpndChain(secondOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +void MPISel::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { + PrimType rhsType = stmt.Opnd(0)->GetPrimType(); + PregIdx pregIdx = stmt.GetRegIdx(); + PrimType regType = stmt.GetPrimType(); + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), + GetPrimTypeBitSize(regType), cgFunc->GetRegTyFromPrimTy(regType)); + SelectCopy(regOpnd, opnd0, regType, rhsType); + if ((Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) && (pregIdx >= 0)) { + const SymbolAlloc *symLoc = cgFunc->GetMemlayout()->GetSpillLocOfPseduoRegister(pregIdx); + int64 offset = static_cast(cgFunc->GetBaseOffset(*symLoc)); + MIRPreg *preg = cgFunc->GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + RegOperand &base = GetTargetBasicPointer(PTY_u64); + MemOperand *dest = &cgFunc->GetOpndBuilder()->CreateMem(base, offset, bitLen); + SelectCopy(*dest, regOpnd, preg->GetPrimType(), regType); + } +} + +RegOperand *MPISel::SelectRegread(RegreadNode &expr) { + PregIdx pregIdx = expr.GetRegIdx(); + PrimType rhsType = expr.GetPrimType(); + RegOperand ® = cgFunc->GetOpndBuilder()->CreateVReg(cgFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), + GetPrimTypeSize(rhsType) * kBitsPerByte, cgFunc->GetRegTyFromPrimTy(rhsType)); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + const SymbolAlloc *symLoc = cgFunc->GetMemlayout()->GetSpillLocOfPseduoRegister(pregIdx); + int64 offset = static_cast(cgFunc->GetBaseOffset(*symLoc)); + MIRPreg *preg = cgFunc->GetFunction().GetPregTab()->PregFromPregIdx(pregIdx); + uint32 bitLen = GetPrimTypeSize(preg->GetPrimType()) * kBitsPerByte; + RegOperand &base = GetTargetBasicPointer(PTY_u64); + MemOperand *src = &cgFunc->GetOpndBuilder()->CreateMem(base, offset, bitLen); + SelectCopy(reg, *src, rhsType, preg->GetPrimType()); + } + return ® +} + +Operand *MPISel::SelectDread(const BaseNode &parent [[maybe_unused]], const AddrofNode &expr) { + /* get mirSymbol info*/ + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(expr); + PrimType symbolType = symbolInfo.primType; + /* Get symbol location */ + MemOperand &symbolMem = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID()); + PrimType primType = expr.GetPrimType(); + /* for AggType, return it's location in stack. */ + if (symbolType == maple::PTY_agg) { + CHECK_FATAL(primType == maple::PTY_agg, "NIY"); + return &symbolMem; + } + /* for BasicType, load symbolVal to register. */ + RegOperand ®Opnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + /* Generate Insn */ + SelectCopy(regOpnd, symbolMem, primType, symbolType); + return ®Opnd; +} + +Operand *MPISel::SelectAdd(const BinaryNode &node, Operand &opnd0, + Operand &opnd1, const BaseNode &parent [[maybe_unused]]) { + PrimType primType = node.GetPrimType(); + RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectAdd(resReg, regOpnd0, regOpnd1, primType); + return &resReg; +} + +Operand *MPISel::SelectBand(const BinaryNode &node, Operand &opnd0, + Operand &opnd1, const BaseNode &parent [[maybe_unused]]) { + PrimType primType = node.GetPrimType(); + RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectBand(resReg, regOpnd0, regOpnd1, primType); + return &resReg; +} + +Operand *MPISel::SelectSub(const BinaryNode &node, Operand &opnd0, Operand &opnd1, + const BaseNode &parent [[maybe_unused]]) { + PrimType primType = node.GetPrimType(); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectSub(resOpnd, regOpnd0, regOpnd1, primType); + return &resOpnd; +} + +void MPISel::SelectExtractbits(RegOperand &resOpnd, RegOperand &opnd0, uint8 bitOffset, + uint8 bitSize, PrimType primType) { + uint32 primBitSize = GetPrimTypeBitSize(primType); + bool isSigned = IsSignedInteger(primType); + if (bitOffset == 0 && !isSigned) { + /* + * resOpnd = opnd0 & ((1 << bitSize) - 1) + */ + ImmOperand &imm = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, + (static_cast(1) << bitSize) - 1); + SelectBand(resOpnd, opnd0, imm, primType); + } else { + /* + * tmpOpnd = opnd0 << (primBitSize - bitSize - bitOffset) + * resOpnd = tmpOpnd >> (primBitSize - bitSize) + * if signed : use sar; else use shr + */ + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + ImmOperand &imm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, + primBitSize - bitSize - bitOffset); + SelectShift(tmpOpnd, opnd0, imm1Opnd, OP_shl, primType, primType); + Opcode opcode = isSigned ? OP_ashr : OP_lshr; + ImmOperand &imm2Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, primBitSize - bitSize); + SelectShift(resOpnd, tmpOpnd, imm2Opnd, opcode, primType, primType); + } +} + +Operand *MPISel::SelectExtractbits(const BaseNode &parent [[maybe_unused]], + const ExtractbitsNode &node, Operand &opnd0) { + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + uint8 bitSize = node.GetBitsSize(); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + if (IsPrimitiveInteger(toType)) { + // OP_extractbits or bitSize < 8-bit or bitSize is not pow of 2 + if (node.GetOpCode() == OP_extractbits || bitSize < k8BitSize || (bitSize & (bitSize - 1)) != 0) { + SelectCopy(resOpnd, opnd0, toType, fromType); + SelectExtractbits(resOpnd, resOpnd, node.GetBitsOffset(), bitSize, toType); + } else { + PrimType opndType = GetIntegerPrimTypeFromSize(node.GetOpCode() == OP_sext, bitSize); + RegOperand &tmpRegOpnd = SelectCopy2Reg(opnd0, opndType, fromType); + SelectIntCvt(resOpnd, tmpRegOpnd, toType, opndType); + } + } else { + CHECK_FATAL(false, "NIY vector cvt"); + } + return &resOpnd; +} + +Operand *MPISel::SelectCvt(const BaseNode &parent [[maybe_unused]], const TypeCvtNode &node, Operand &opnd0) { + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + if (fromType == toType) { + return &opnd0; + } + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + if (IsPrimitiveInteger(toType) && IsPrimitiveInteger(fromType)) { + SelectIntCvt(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveFloat(toType) && IsPrimitiveInteger(fromType)) { + SelectCvtInt2Float(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveFloat(toType) && IsPrimitiveFloat(fromType)) { + SelectFloatCvt(*resOpnd, opnd0, toType, fromType); + } else if (IsPrimitiveInteger(toType) && IsPrimitiveFloat(fromType)) { + SelectCvtFloat2Int(*resOpnd, opnd0, toType, fromType); + } else { + CHECK_FATAL(false, "NIY cvt"); + } + return resOpnd; +} + + +void MPISel::SelectCvtFloat2Int(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { + uint32 toSize = GetPrimTypeBitSize(toType); + bool isSigned = !IsPrimitiveUnsigned(toType); + RegOperand &tmpFloatOpnd = cgFunc->GetOpndBuilder()->CreateVReg(toSize, kRegTyFloat); + SelectFloatCvt(tmpFloatOpnd, opnd0, toType, fromType); + MOperator mOp = abstract::MOP_undef; + if (toSize == k32BitSize) { + mOp = isSigned ? abstract::MOP_cvt_rf_i32 : abstract::MOP_cvt_rf_u32; + } else if (toSize == k64BitSize) { + mOp = isSigned ? abstract::MOP_cvt_rf_i64 : abstract::MOP_cvt_rf_u64; + } else { + CHECK_FATAL(false, "niy"); + } + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(tmpFloatOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +void MPISel::SelectCvtInt2Float(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { + uint32 fromSize = GetPrimTypeBitSize(fromType); + bool isSigned = !IsPrimitiveUnsigned(fromType); + MOperator mOp = abstract::MOP_undef; + PrimType newFromType = PTY_begin; + if (fromSize == k32BitSize) { + mOp = isSigned ? abstract::MOP_cvt_fr_i32 : abstract::MOP_cvt_fr_u32; + newFromType = PTY_f32; + } else if (fromSize == k64BitSize) { + mOp = isSigned ? abstract::MOP_cvt_fr_i64 : abstract::MOP_cvt_fr_u64; + newFromType = PTY_f64; + } else { + CHECK_FATAL(false, "niy"); + } + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + RegOperand &tmpFloatOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(newFromType), + cgFunc->GetRegTyFromPrimTy(newFromType)); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(tmpFloatOpnd).AddOpndChain(regOpnd0); + cgFunc->GetCurBB()->AppendInsn(insn); + SelectFloatCvt(resOpnd, tmpFloatOpnd, toType, newFromType); +} + +void MPISel::SelectIntCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + /* + * It is redundancy to insert "nop" casts (unsigned 32 -> singed 32) in abstract CG IR + * The signedness of operands would be shown in the expression. + */ + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + if (toSize <= fromSize) { + resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(regOpnd0.GetRegisterNumber(), + GetPrimTypeBitSize(toType), cgFunc->GetRegTyFromPrimTy(toType)); + return; + } + bool isSigned = !IsPrimitiveUnsigned(fromType); + MOperator mOp = GetFastCvtMopI(fromSize, toSize, isSigned); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); + cgFunc->GetCurBB()->AppendInsn(insn); + return; +} + +void MPISel::SelectFloatCvt(RegOperand &resOpnd, Operand &opnd0, PrimType toType, PrimType fromType) { + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, fromType); + if (fromSize == toSize) { + resOpnd = regOpnd0; + return; + } + MOperator mOp = abstract::MOP_undef; + if (fromSize == k32BitSize && toSize == k64BitSize) { + mOp = abstract::MOP_cvt_ff_64_32; + } else if (fromSize == k64BitSize && toSize == k32BitSize) { + mOp = abstract::MOP_cvt_ff_32_64; + } else { + CHECK_FATAL(false, "niy"); + } + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(regOpnd0); + cgFunc->GetCurBB()->AppendInsn(insn); + return; +} + +void MPISel::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + MOperator mOp = abstract::MOP_undef; + if (IsPrimitiveInteger(primType)) { + const static auto fastSubMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(sub); + mOp = fastSubMappingFunc(GetPrimTypeBitSize(primType)); + } else { + const static auto fastSubFloatMappingFunc = DEF_FLOAT_MOPERATOR_MAPPING_FUNC(sub); + mOp = fastSubFloatMappingFunc(GetPrimTypeBitSize(primType)); + } + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +void MPISel::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + const static auto fastBandMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(and); + MOperator mOp = fastBandMappingFunc(GetPrimTypeBitSize(primType)); + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +void MPISel::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + MOperator mOp = abstract::MOP_undef; + if (IsPrimitiveInteger(primType)) { + const static auto fastAddMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(add); + mOp = fastAddMappingFunc(GetPrimTypeBitSize(primType)); + } else { + const static auto fastAddFloatMappingFunc = DEF_FLOAT_MOPERATOR_MAPPING_FUNC(add); + mOp = fastAddFloatMappingFunc(GetPrimTypeBitSize(primType)); + } + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +Operand* MPISel::SelectNeg(const UnaryNode &node, Operand &opnd0, const BaseNode &parent [[maybe_unused]]) { + PrimType dtype = node.GetPrimType(); + + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + SelectNeg(*resOpnd, regOpnd0, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +void MPISel::SelectNeg(Operand &resOpnd, Operand &opnd0, PrimType primType) const { + MOperator mOp = abstract::MOP_undef; + if (IsPrimitiveInteger(primType)) { + const static auto fastNegMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(neg); + mOp = fastNegMappingFunc(GetPrimTypeBitSize(primType)); + } else { + const static auto fastNegFloatMappingFunc = DEF_FLOAT_MOPERATOR_MAPPING_FUNC(neg); + mOp = fastNegFloatMappingFunc(GetPrimTypeBitSize(primType)); + } + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +Operand *MPISel::SelectBior(const BinaryNode &node, Operand &opnd0, + Operand &opnd1, const BaseNode &parent [[maybe_unused]]) { + PrimType primType = node.GetPrimType(); + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectBior(*resOpnd, regOpnd0, regOpnd1, primType); + return resOpnd; +} + +void MPISel::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + const static auto fastBiorMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(or); + MOperator mOp = fastBiorMappingFunc(GetPrimTypeBitSize(primType)); + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +Operand *MPISel::SelectBxor(const BinaryNode &node, Operand &opnd0, + Operand &opnd1, const BaseNode &parent [[maybe_unused]]) { + PrimType primType = node.GetPrimType(); + RegOperand *resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + SelectBxor(*resOpnd, regOpnd0, regOpnd1, primType); + return resOpnd; +} + +void MPISel::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + const static auto fastBxorMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(xor); + MOperator mOp = fastBxorMappingFunc(GetPrimTypeBitSize(primType)); + SelectBasicOp(resOpnd, opnd0, opnd1, mOp, primType); +} + +MemOperand *MPISel::GetOrCreateMemOpndFromIreadNode(const IreadNode &expr, PrimType primType, int offset) { + /* get rhs*/ + Operand *addrOpnd = HandleExpr(expr, *expr.Opnd(0)); + RegOperand &addrOnReg = SelectCopy2Reg(*addrOpnd, PTY_a64); + /* Generate memOpnd */ + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(addrOnReg, + offset, GetPrimTypeBitSize(primType)); + return &memOpnd; +} + +Operand *MPISel::SelectIread(const BaseNode &parent [[maybe_unused]], const IreadNode &expr, int extraOffset) { + /* get lhs mirType info */ + MirTypeInfo lhsInfo = GetMirTypeInfoFromMirNode(expr); + /* get memOpnd */ + MemOperand &memOpnd = *GetOrCreateMemOpndFromIreadNode(expr, lhsInfo.primType, lhsInfo.offset + extraOffset); + /* for AggType, return addr it self. */ + if (lhsInfo.primType == PTY_agg) { + return &memOpnd; + } + /* for BasicType, load val in addr to register. */ + PrimType primType = expr.GetPrimType(); + RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(result, memOpnd, primType, lhsInfo.primType); + return &result; +} + +Operand *MPISel::SelectIreadoff(const BaseNode &parent [[maybe_unused]], const IreadoffNode &ireadoff) { + int32 offset = ireadoff.GetOffset(); + PrimType primType = ireadoff.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + + Operand *addrOpnd = HandleExpr(ireadoff, *ireadoff.Opnd(0)); + RegOperand &addrOnReg = SelectCopy2Reg(*addrOpnd, PTY_a64); + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(addrOnReg, offset, bitSize); + RegOperand &result = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(result, memOpnd, primType); + return &result; +} + +static inline uint64 CreateDepositBitsImm1(uint32 primBitSize, uint8 bitOffset, uint8 bitSize) { + /* $imm1 = 1(primBitSize - bitSize - bitOffset)0(bitSize)1(bitOffset) */ + uint64 val = UINT64_MAX; // 0xFFFFFFFFFFFFFFFF + if (bitSize + bitOffset >= primBitSize) { + val = 0; + } else { + val <<= (bitSize + bitOffset); + } + val |= (static_cast(1) << bitOffset) - 1; + return val; +} + +Operand *MPISel::SelectDepositBits(const DepositbitsNode &node, Operand &opnd0, Operand &opnd1, + const BaseNode &parent [[maybe_unused]]) { + uint8 bitOffset = node.GetBitsOffset(); + uint8 bitSize = node.GetBitsSize(); + PrimType primType = node.GetPrimType(); + uint32 primBitSize = GetPrimTypeBitSize(primType); + ASSERT((primBitSize == k64BitSize) || (bitOffset < k32BitSize), "wrong bitSize"); + ASSERT(bitSize < k64BitSize, "wrong bitSize"); + + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(primBitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + /* + * resOpnd = (opnd0 and $imm1) or ((opnd1 << bitOffset) and (~$imm1)); + * $imm1 = 1(primBitSize - bitSize - bitOffset)0(bitSize)1(bitOffset) + */ + uint64 imm1Val = CreateDepositBitsImm1(primBitSize, bitOffset, bitSize); + ImmOperand &imm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, + static_cast(imm1Val)); + /* and */ + SelectBand(resOpnd, opnd0, imm1Opnd, primType); + if (opnd1.IsIntImmediate()) { + /* opnd1 is immediate, imm2 = (opnd1.val << bitOffset) & (~$imm1) */ + int64 imm2Val = (static_cast(opnd1).GetValue() << bitOffset) & (~imm1Val); + ImmOperand &imm2Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, imm2Val); + /* or */ + SelectBior(resOpnd, resOpnd, imm2Opnd, primType); + } else { + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(primBitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(tmpOpnd, opnd1, primType, node.Opnd(1)->GetPrimType()); + /* shift -- (opnd1 << bitOffset) */ + ImmOperand &countOpnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, bitOffset); + SelectShift(tmpOpnd, tmpOpnd, countOpnd, OP_shl, primType, primType); + /* and (~$imm1) */ + ImmOperand &nonImm1Opnd = cgFunc->GetOpndBuilder()->CreateImm(primBitSize, (~imm1Val)); + SelectBand(tmpOpnd, tmpOpnd, nonImm1Opnd, primType); + /* or */ + SelectBior(resOpnd, resOpnd, tmpOpnd, primType); + } + return &resOpnd; +} + +Operand *MPISel::SelectAbs(UnaryNode &node, Operand &opnd0) { + PrimType primType = node.GetPrimType(); + if (IsPrimitiveVector(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsPrimitiveFloat(primType)) { + CHECK_FATAL(false, "NIY"); + } else if (IsUnsignedInteger(primType)) { + return &opnd0; + } else { + /* + * abs(x) = (x XOR y) - y + * y = x >>> (bitSize - 1) + */ + uint32 bitSize = GetPrimTypeBitSize(primType); + CHECK_FATAL(bitSize == k64BitSize || bitSize == k32BitSize, "only support 32-bits or 64-bits"); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType); + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(bitSize, bitSize - 1); + RegOperand ®Opndy = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectShift(regOpndy, regOpnd0, immOpnd, OP_ashr, primType, primType); + RegOperand &tmpOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectBxor(tmpOpnd, regOpnd0, regOpndy, primType); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectSub(resOpnd, tmpOpnd, regOpndy, primType); + return &resOpnd; + } +} + +Operand *MPISel::SelectAlloca(UnaryNode &node, Operand &opnd0) { + ASSERT(node.GetPrimType() == PTY_a64, "wrong type"); + PrimType srcType = node.Opnd(0)->GetPrimType(); + RegOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_u64), + cgFunc->GetRegTyFromPrimTy(PTY_u64)); + SelectCopy(sizeOpnd, opnd0, PTY_u64, srcType); + + /* stack byte alignment */ + uint32 stackPtrAlignment = cgFunc->GetMemlayout()->GetStackPtrAlignment(); + RegOperand &aliOp = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_u64), + cgFunc->GetRegTyFromPrimTy(PTY_u64)); + SelectAdd(aliOp, sizeOpnd, cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, + stackPtrAlignment - 1), PTY_u64); + ImmOperand &shiftOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, + __builtin_ctz(stackPtrAlignment)); + SelectShift(aliOp, aliOp, shiftOpnd, OP_lshr, PTY_u64, PTY_u64); + SelectShift(aliOp, aliOp, shiftOpnd, OP_shl, PTY_u64, PTY_u64); + + RegOperand &spOpnd = GetTargetStackPointer(PTY_u64); + SelectSub(spOpnd, spOpnd, aliOp, PTY_u64); + + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_u64), + cgFunc->GetRegTyFromPrimTy(PTY_u64)); + uint32 argsToStkpassSize = cgFunc->GetMemlayout()->SizeOfArgsToStackPass(); + if (argsToStkpassSize > 0) { + SelectAdd(resOpnd, spOpnd, cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, argsToStkpassSize), PTY_u64); + } else { + SelectCopy(resOpnd, spOpnd, PTY_u64); + } + return &resOpnd; +} + +Operand *MPISel::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) { + BaseNode *opnd0 = node.Opnd(0); + BaseNode *opnd1 = node.Opnd(1); + ASSERT(opnd1->GetOpCode() == OP_constval, "NIY, opnd1->op should be OP_constval."); + + switch (opnd0->GetOpCode()) { + case OP_regread: { + return SelectRegread(static_cast(*opnd0)); + } + case OP_addrof: { + Operand *addrOpnd = SelectAddrof(static_cast(*opnd0), node); + + /* OP_constval */ + ConstvalNode *constvalNode = static_cast(opnd1); + MIRConst *mirConst = constvalNode->GetConstVal(); + ASSERT(mirConst->GetKind() == kConstInt, "NIY"); + MIRIntConst *mirIntConst = static_cast(mirConst); + Operand *immOpnd = SelectIntConst(*mirIntConst, constvalNode->GetPrimType()); + + Operand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(PTY_a64), + cgFunc->GetRegTyFromPrimTy(PTY_a64)); + SelectAdd(resOpnd, *addrOpnd, *immOpnd, node.GetPrimType()); + return &resOpnd; + } + default: + CHECK_FATAL(false, "cannot handle opnd0."); + } +} + +StmtNode *MPISel::HandleFuncEntry() { + MIRFunction &mirFunc = cgFunc->GetFunction(); + BlockNode *block = mirFunc.GetBody(); + + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + + StmtNode *stmt = block->GetFirst(); + if (stmt == nullptr) { + return nullptr; + } + ASSERT(stmt->GetOpCode() == OP_label, "The first statement should be a label"); + HandleLabel(*stmt, *this); + cgFunc->SetFirstBB(*cgFunc->GetCurBB()); + stmt = stmt->GetNext(); + if (stmt == nullptr) { + return nullptr; + } + cgFunc->SetCurBB(*cgFunc->StartNewBBImpl(false, *stmt)); + bool withFreqInfo = mirFunc.HasFreqMap() && !mirFunc.GetLastFreqMap().empty(); + if (withFreqInfo) { + cgFunc->GetCurBB()->SetFrequency(kFreqBase); + } + + return stmt; +} + +/* This function loads src to a register, the src can be an imm, mem or a label. + * Once the source and result(destination) types are different, + * implicit conversion is executed here.*/ +RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType toType, PrimType fromType) { + uint32 fromSize = GetPrimTypeBitSize(fromType); + uint32 toSize = GetPrimTypeBitSize(toType); + if (src.IsRegister() && fromSize == toSize) { + return static_cast(src); + } + RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + if (fromSize != toSize) { + SelectCopy(dest, src, toType, fromType); + } else { + SelectCopy(dest, src, toType); + } + return dest; +} +/* Pretty sure that implicit type conversions will not occur. */ +RegOperand &MPISel::SelectCopy2Reg(Operand &src, PrimType dtype) { + ASSERT(src.GetSize() == GetPrimTypeBitSize(dtype), "NIY"); + if (src.IsRegister()) { + return static_cast(src); + } + RegOperand &dest = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCopy(dest, src, dtype); + return dest; +} +/* This function copy/load/store src to a dest, Once the src and dest types + * are different, implicit conversion is executed here. */ +void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType toType, PrimType fromType) { + if (GetPrimTypeBitSize(fromType) != GetPrimTypeBitSize(toType)) { + RegOperand &srcRegOpnd = SelectCopy2Reg(src, fromType); + RegOperand &dstRegOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(toType), + cgFunc->GetRegTyFromPrimTy(toType)); + SelectIntCvt(dstRegOpnd, srcRegOpnd, toType, fromType); + SelectCopy(dest, dstRegOpnd, toType); + } else { + SelectCopy(dest, src, toType); + } +} + +/* Pretty sure that implicit type conversions will not occur. */ +void MPISel::SelectCopy(Operand &dest, Operand &src, PrimType type) { + ASSERT(dest.GetSize() == src.GetSize(), "NIY"); + if (dest.GetKind() == Operand::kOpdRegister) { + SelectCopyInsn(dest, src, type); + } else if (dest.GetKind() == Operand::kOpdMem) { + if (src.GetKind() != Operand::kOpdRegister) { + RegOperand &tempReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(type), + cgFunc->GetRegTyFromPrimTy(type)); + SelectCopyInsn(tempReg, src, type); + SelectCopyInsn(dest, tempReg, type); + } else { + SelectCopyInsn(dest, src, type); + } + }else { + CHECK_FATAL(false, "NIY, CPU supports more than memory and registers"); + } +} + +void MPISel::SelectCopyInsn(Operand &dest, Operand &src, PrimType type) { + MOperator mop = GetFastIselMop(dest.GetKind(), src.GetKind(), type); + CHECK_FATAL(mop != abstract::MOP_undef, "get mop failed"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mop, InsnDesc::GetAbstractId(mop)); + /* For Store, md defines [src, dest] */ + if (insn.IsStore()) { + (void)insn.AddOpndChain(src).AddOpndChain(dest); + } else { + (void)insn.AddOpndChain(dest).AddOpndChain(src); + } + cgFunc->GetCurBB()->AppendInsn(insn); +} + +Operand *MPISel::SelectBnot(const UnaryNode &node, Operand &opnd0, const BaseNode &parent [[maybe_unused]]) { + PrimType dtype = node.GetPrimType(); + + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + SelectBnot(*resOpnd, regOpnd0, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +void MPISel::SelectBnot(Operand &resOpnd, Operand &opnd0, PrimType primType) { + const static auto fastBnotMappingFunc = DEF_MOPERATOR_MAPPING_FUNC(not); + MOperator mOp = fastBnotMappingFunc(GetPrimTypeBitSize(primType)); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)insn.AddOpndChain(resOpnd).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); +} + +Operand *MPISel::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectMin(resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void MPISel::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectMinOrMax(true, resOpnd, opnd0, opnd1, primType); +} + +Operand *MPISel::SelectLiteral(MIRDoubleConst &c, MIRFunction &func, uint32 labelIdx) const { + MIRSymbol *st = func.GetSymTab()->CreateSymbol(kScopeLocal); + std::string lblStr(".LB_"); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func.GetStIdx().Idx()); + std::string funcName = funcSt->GetName(); + (void)lblStr.append(funcName).append(std::to_string(labelIdx)); + st->SetNameStrIdx(lblStr); + st->SetStorageClass(kScPstatic); + st->SetSKind(kStConst); + st->SetKonst(&c); + PrimType primType = c.GetType().GetPrimType(); + st->SetTyIdx(TyIdx(primType)); + uint32 typeBitSize = GetPrimTypeBitSize(primType); + + if (cgFunc->GetMirModule().IsCModule()) { + return &GetOrCreateMemOpndFromSymbol(*st, typeBitSize, 0); + } + return nullptr; +} + +Operand *MPISel::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + SelectMax(resOpnd, opnd0, opnd1, primType); + return &resOpnd; +} + +void MPISel::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + SelectMinOrMax(false, resOpnd, opnd0, opnd1, primType); +} + +Operand *MPISel::SelectRetype(TypeCvtNode &node, Operand &opnd0) { + PrimType fromType = node.Opnd(0)->GetPrimType(); + PrimType toType = node.GetPrimType(); + ASSERT(GetPrimTypeSize(fromType) == GetPrimTypeSize(toType), "retype bit widith doesn' match"); + if (IsPrimitivePoint(fromType) && IsPrimitivePoint(toType)) { + return &SelectCopy2Reg(opnd0, toType); + } + if (IsPrimitiveVector(fromType) || IsPrimitiveVector(toType)) { + return &SelectCopy2Reg(opnd0, toType); + } + if (IsPrimitiveInteger(fromType) && IsPrimitiveInteger(toType)) { + return &SelectCopy2Reg(opnd0, toType, fromType); + } + CHECK_FATAL(false, "NIY, retype"); + return nullptr; +} + +void MPISel::HandleFuncExit() const { + BlockNode *block = cgFunc->GetFunction().GetBody(); + ASSERT(block != nullptr, "get func body block failed in CGFunc::GenerateInstruction"); + cgFunc->GetCurBB()->SetLastStmt(*block->GetLast()); + /* Set lastbb's frequency */ + cgFunc->SetLastBB(*cgFunc->GetCurBB()); + /* the last BB is return BB */ + cgFunc->GetLastBB()->SetKind(BB::kBBReturn); + + cgFunc->AddCommonExitBB(); +} + +bool InstructionSelector::PhaseRun(maplebe::CGFunc &f) { + MPISel *mpIS = f.GetCG()->CreateMPIsel(*GetPhaseMemPool(), f); + mpIS->DoMPIS(); + return true; +} +} diff --git a/src/mapleall/maple_be/src/cg/isolate_fastpath.cpp b/src/mapleall/maple_be/src/cg/isolate_fastpath.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ebcdc28cd077a29065f12bac4b9af29d47a08da1 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/isolate_fastpath.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "isolate_fastpath.h" +#if TARGAARCH64 +#include "aarch64_isolate_fastpath.h" +#endif +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +bool CgIsolateFastPath::PhaseRun(maplebe::CGFunc &f) { + IsolateFastPath *isolateFastPath = nullptr; +#if TARGAARCH64 + isolateFastPath = GetPhaseAllocator()->New(f); +#endif + isolateFastPath->Run(); + return false; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/label_creation.cpp b/src/mapleall/maple_be/src/cg/label_creation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e329da4937ceeb49986ddd1bd2d9b7419b94cda0 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/label_creation.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "label_creation.h" +#include "cgfunc.h" +#include "cg.h" +#include "debug_info.h" + +namespace maplebe { +using namespace maple; + +void LabelCreation::Run() { + CreateStartEndLabel(); +} + +void LabelCreation::CreateStartEndLabel() const { + ASSERT(cgFunc != nullptr, "expect a cgfunc before CreateStartEndLabel"); + MIRBuilder *mirBuilder = cgFunc->GetFunction().GetModule()->GetMIRBuilder(); + ASSERT(mirBuilder != nullptr, "get mirbuilder failed in CreateStartEndLabel"); + + /* create start label */ + LabelIdx startLblIdx = cgFunc->CreateLabel(); + LabelNode *startLabel = mirBuilder->CreateStmtLabel(startLblIdx); + cgFunc->SetStartLabel(*startLabel); + cgFunc->GetFunction().GetBody()->InsertFirst(startLabel); + + /* creat return label */ + LabelIdx returnLblIdx = cgFunc->CreateLabel(); + LabelNode *returnLabel = mirBuilder->CreateStmtLabel(returnLblIdx); + cgFunc->SetReturnLabel(*returnLabel); + cgFunc->GetFunction().GetBody()->InsertLast(returnLabel); + + /* create end label */ + LabelIdx endLblIdx = cgFunc->CreateLabel(); + LabelNode *endLabel = mirBuilder->CreateStmtLabel(endLblIdx); + cgFunc->SetEndLabel(*endLabel); + cgFunc->GetFunction().GetBody()->InsertLast(endLabel); + ASSERT(cgFunc->GetFunction().GetBody()->GetLast() == endLabel, "last stmt must be a endLabel"); + + /* create function's low/high pc if dwarf enabled */ + MIRFunction *func = &cgFunc->GetFunction(); + CG *cg = cgFunc->GetCG(); + if (cg->GetCGOptions().WithDwarf()) { + DebugInfo *di = cg->GetMIRModule()->GetDbgInfo(); + DBGDie *fdie = di->GetDie(func); + fdie->SetAttr(DW_AT_low_pc, startLblIdx); + fdie->SetAttr(DW_AT_high_pc, endLblIdx); + } + + /* add start/end labels into the static map table in class cg */ + if (!CG::IsInFuncWrapLabels(func)) { + CG::SetFuncWrapLabels(func, std::make_pair(startLblIdx, endLblIdx)); + } +} + +bool CgCreateLabel::PhaseRun(maplebe::CGFunc &f) { + MemPool *memPool = GetPhaseMemPool(); + LabelCreation *labelCreate = memPool->New(f); + labelCreate->Run(); + return false; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/live.cpp b/src/mapleall/maple_be/src/cg/live.cpp new file mode 100644 index 0000000000000000000000000000000000000000..60e7070b4be8d90a8cda5812a0cefc68603555df --- /dev/null +++ b/src/mapleall/maple_be/src/cg/live.cpp @@ -0,0 +1,431 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "live.h" +#include +#include "cg.h" +#include "cg_option.h" +#include "cgfunc.h" + +/* + * This phase build two sets: liveOutRegno and liveInRegno of each BB. + * This algorithm mainly include 3 parts: + * 1. initialize and get def[]/use[] of each BB; + * 2. build live_in and live_out based on this algorithm + * Out[B] = U In[S] //S means B's successor; + * In[B] = use[B] U (Out[B]-def[B]); + * 3. deal with cleanup BB. + */ +namespace maplebe { +#define LIVE_ANALYZE_DUMP_NEWPM CG_DEBUG_FUNC(f) + +void LiveAnalysis::InitAndGetDefUse() { + FOR_ALL_BB(bb, cgFunc) { + if (!bb->GetEhPreds().empty()) { + InitEhDefine(*bb); + } + InitBB(*bb); + GetBBDefUse(*bb); + if (bb->GetEhPreds().empty()) { + continue; + } + bb->RemoveInsn(*bb->GetFirstInsn()->GetNext()); + cgFunc->DecTotalNumberOfInstructions(); + bb->RemoveInsn(*bb->GetFirstInsn()); + cgFunc->DecTotalNumberOfInstructions(); + } +} + +/* Out[BB] = Union all of In[Succs(BB)] */ +bool LiveAnalysis::GenerateLiveOut(BB &bb) const { + const auto bbLiveOutBak(bb.GetLiveOut()->GetInfo()); + for (auto succBB : bb.GetSuccs()) { + if (succBB->GetLiveInChange() && !succBB->GetLiveIn()->NoneBit()) { + bb.LiveOutOrBits(*succBB->GetLiveIn()); + } + if (!succBB->GetEhSuccs().empty()) { + for (auto ehSuccBB : succBB->GetEhSuccs()) { + bb.LiveOutOrBits(*ehSuccBB->GetLiveIn()); + } + } + } + for (auto ehSuccBB : bb.GetEhSuccs()) { + if (ehSuccBB->GetLiveInChange() && !ehSuccBB->GetLiveIn()->NoneBit()) { + bb.LiveOutOrBits(*ehSuccBB->GetLiveIn()); + } + } + return !bb.GetLiveOut()->IsEqual(bbLiveOutBak); +} + +/* In[BB] = use[BB] Union (Out[BB]-def[BB]) */ +bool LiveAnalysis::GenerateLiveIn(BB &bb) { + LocalMapleAllocator allocator(stackMp); + const auto bbLiveInBak(bb.GetLiveIn()->GetInfo()); + if (!bb.GetInsertUse()) { + bb.SetLiveInInfo(*bb.GetUse()); + bb.SetInsertUse(true); + } + SparseDataInfo &bbLiveOut = bb.GetLiveOut()->Clone(allocator); + if (!bbLiveOut.NoneBit()) { + bbLiveOut.Difference(*bb.GetDef()); + bb.LiveInOrBits(bbLiveOut); + } + + if (!bb.GetEhSuccs().empty()) { + /* If bb has eh successors, check if multi-gen exists. */ + SparseDataInfo allInOfEhSuccs(cgFunc->GetMaxVReg(), allocator); + for (auto ehSucc : bb.GetEhSuccs()) { + allInOfEhSuccs.OrBits(*ehSucc->GetLiveIn()); + } + allInOfEhSuccs.AndBits(*bb.GetDef()); + bb.LiveInOrBits(allInOfEhSuccs); + } + + if (!bb.GetLiveIn()->IsEqual(bbLiveInBak)) { + return true; + } + return false; +} + +/* building liveIn and liveOut of each BB. */ +void LiveAnalysis::BuildInOutforFunc() { + iteration = 0; + bool hasChange; + do { + ++iteration; + hasChange = false; + FOR_ALL_BB_REV(bb, cgFunc) { + if (!GenerateLiveOut(*bb) && bb->GetInsertUse()) { + continue; + } + if (GenerateLiveIn(*bb)) { + bb->SetLiveInChange(true); + hasChange = true; + } else { + bb->SetLiveInChange(false); + } + } + } while (hasChange); +} + +/* reset to liveout/in_regno */ +void LiveAnalysis::ResetLiveSet() { + FOR_ALL_BB(bb, cgFunc) { + bb->GetLiveIn()->GetBitsOfInfo>(bb->GetLiveInRegNO()); + bb->GetLiveOut()->GetBitsOfInfo>(bb->GetLiveOutRegNO()); + } +} + +/* entry function for LiveAnalysis */ +void LiveAnalysis::AnalysisLive() { + InitAndGetDefUse(); + BuildInOutforFunc(); + InsertInOutOfCleanupBB(); +} + +void LiveAnalysis::DealWithInOutOfCleanupBB() { + const BB *cleanupBB = cgFunc->GetCleanupBB(); + if (cleanupBB == nullptr) { + return; + } + for (size_t i = 0; i != cleanupBB->GetLiveIn()->Size(); ++i) { + if (!cleanupBB->GetLiveIn()->TestBit(i)) { + continue; + } + if (CleanupBBIgnoreReg(regno_t(i))) { + continue; + } + /* + * a param vreg may used in cleanup bb. So this param vreg will live on the whole function + * since everywhere in function body may occur exceptions. + */ + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + /* If bb is not a cleanup bb, then insert reg to both livein and liveout. */ + if ((bb != cgFunc->GetFirstBB()) && !bb->GetDef()->TestBit(i)) { + bb->SetLiveInBit(i); + } + bb->SetLiveOutBit(i); + } + } +} + +void LiveAnalysis::InsertInOutOfCleanupBB() { + LocalMapleAllocator allocator(stackMp); + const BB *cleanupBB = cgFunc->GetCleanupBB(); + if (cleanupBB == nullptr) { + return; + } + if (cleanupBB->GetLiveIn() == nullptr || cleanupBB->GetLiveIn()->NoneBit()) { + return; + } + SparseDataInfo &cleanupBBLi = cleanupBB->GetLiveIn()->Clone(allocator); + /* registers need to be ignored: (reg < 8) || (29 <= reg && reg <= 32) */ + for (uint32 i = 1; i < 8; ++i) { + cleanupBBLi.ResetBit(i); + } + for (uint32 j = 29; j <= 32; ++j) { + cleanupBBLi.ResetBit(j); + } + + FOR_ALL_BB(bb, cgFunc) { + if (bb->IsCleanup()) { + continue; + } + if (bb != cgFunc->GetFirstBB()) { + cleanupBBLi.Difference(*bb->GetDef()); + bb->LiveInOrBits(cleanupBBLi); + } + bb->LiveOutOrBits(cleanupBBLi); + } +} + +/* + * entry of get def/use of bb. + * getting the def or use info of each regopnd as parameters of CollectLiveInfo(). +*/ +void LiveAnalysis::GetBBDefUse(BB &bb) { + if (bb.GetKind() == BB::kBBReturn) { + GenerateReturnBBDefUse(bb); + } + if (bb.IsEmpty()) { + return; + } + bb.DefResetAllBit(); + bb.UseResetAllBit(); + + FOR_BB_INSNS_REV(insn, &bb) { + if (!insn->IsMachineInstruction() && !insn->IsPhi()) { + continue; + } + + bool isAsm = insn->IsAsmInsn(); + const InsnDesc *md = insn->GetDesc(); + if (insn->IsCall() || insn->IsTailCall()) { + ProcessCallInsnParam(bb, *insn); + } + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *opndDesc = md->GetOpndDes(i); + ASSERT(opndDesc != nullptr, "null ptr check"); + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + if (isAsm) { + ProcessAsmListOpnd(bb, opnd, i); + } else { + ProcessListOpnd(bb, opnd, opndDesc->IsDef()); + } + } else if (opnd.IsMemoryAccessOperand()) { + ProcessMemOpnd(bb, opnd); + } else if (opnd.IsConditionCode()) { + ProcessCondOpnd(bb); + } else if (opnd.IsPhi()) { + auto &phiOpnd = static_cast(opnd); + for (auto opIt : phiOpnd.GetOperands()) { + CollectLiveInfo(bb, *opIt.second, false, true); + } + } else { + bool isDef = opndDesc->IsRegDef(); + bool isUse = opndDesc->IsRegUse(); + CollectLiveInfo(bb, opnd, isDef, isUse); + } + } + } +} + +/* build use and def sets of each BB according to the type of regOpnd. */ +void LiveAnalysis::CollectLiveInfo(BB &bb, const Operand &opnd, bool isDef, bool isUse) const { + if (!opnd.IsRegister()) { + return; + } + const RegOperand ®Opnd = static_cast(opnd); + regno_t regNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return; + } + if (isDef) { + bb.SetDefBit(regNO); + if (!isUse) { + bb.UseResetBit(regNO); + } + } + if (isUse) { + bb.SetUseBit(regNO); + bb.DefResetBit(regNO); + } +} + +void LiveAnalysis::ProcessAsmListOpnd(BB &bb, Operand &opnd, uint32 idx) const { + bool isDef = false; + bool isUse = false; + switch (idx) { + case kAsmOutputListOpnd: + case kAsmClobberListOpnd: { + isDef = true; + break; + } + case kAsmInputListOpnd: { + isUse = true; + break; + } + default: + return; + } + ListOperand &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, isDef, isUse); + } +} + +void LiveAnalysis::ProcessListOpnd(BB &bb, Operand &opnd, bool isDef) const { + ListOperand &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + CollectLiveInfo(bb, *op, isDef, !isDef); + } +} + +void LiveAnalysis::ProcessMemOpnd(BB &bb, Operand &opnd) const { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + CollectLiveInfo(bb, *base, !memOpnd.IsIntactIndexed(), true); + } + if (offset != nullptr) { + CollectLiveInfo(bb, *offset, false, true); + } +} + +void LiveAnalysis::ProcessCondOpnd(BB &bb) const { + Operand &rflag = cgFunc->GetOrCreateRflag(); + CollectLiveInfo(bb, rflag, false, true); +} + +/* dump the current info of def/use/livein/liveout */ +void LiveAnalysis::Dump() const { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + ASSERT(funcSt != nullptr, "null ptr check"); + LogInfo::MapleLogger() << "\n--------- liveness for " << funcSt->GetName() << " iteration "; + LogInfo::MapleLogger() << iteration << " ---------\n"; + FOR_ALL_BB(bb, cgFunc) { + LogInfo::MapleLogger() << " === BB_" << bb->GetId() << " (" << std::hex << bb << ") " + << std::dec << " <" << bb->GetKindName(); + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + LogInfo::MapleLogger() << "[labeled with " << bb->GetLabIdx() << "]"; + } + LogInfo::MapleLogger() << "> idx " << bb->GetId() << " ===\n"; + + if (!bb->GetPreds().empty()) { + LogInfo::MapleLogger() << " pred [ "; + for (auto *pred : bb->GetPreds()) { + LogInfo::MapleLogger() << pred->GetId() << " (" << std::hex << pred << ") " << std::dec << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + if (!bb->GetSuccs().empty()) { + LogInfo::MapleLogger() << " succ [ "; + for (auto *succ : bb->GetSuccs()) { + LogInfo::MapleLogger() << succ->GetId() << " (" << std::hex << succ << ") " << std::dec << " "; + } + LogInfo::MapleLogger() << "]\n"; + } + + const SparseDataInfo *infoDef = nullptr; + LogInfo::MapleLogger() << " DEF: "; + infoDef = bb->GetDef(); + DumpInfo(*infoDef); + + const SparseDataInfo *infoUse = nullptr; + LogInfo::MapleLogger() << "\n USE: "; + infoUse = bb->GetUse(); + DumpInfo(*infoUse); + + const SparseDataInfo *infoLiveIn = nullptr; + LogInfo::MapleLogger() << "\n Live IN: "; + infoLiveIn = bb->GetLiveIn(); + DumpInfo(*infoLiveIn); + + const SparseDataInfo *infoLiveOut = nullptr; + LogInfo::MapleLogger() << "\n Live OUT: "; + infoLiveOut = bb->GetLiveOut(); + DumpInfo(*infoLiveOut); + LogInfo::MapleLogger() << "\n"; + } + LogInfo::MapleLogger() << "---------------------------\n"; +} + +void LiveAnalysis::DumpInfo(const SparseDataInfo &info) const { + uint32 count = 1; + std::set res; + info.GetInfo().ConvertToSet(res); + for (uint32 x : res) { + LogInfo::MapleLogger() << x << " "; + ++count; + /* 20 output one line */ + if ((count % 20) == 0) { + LogInfo::MapleLogger() << "\n"; + } + } + LogInfo::MapleLogger() << '\n'; +} + +/* initialize dependent info and container of BB. */ +void LiveAnalysis::InitBB(BB &bb) { + bb.SetLiveInChange(true); + bb.SetInsertUse(false); + bb.ClearLiveInRegNO(); + bb.ClearLiveOutRegNO(); + const uint32 maxRegCount = cgFunc->GetSSAvRegCount() > cgFunc->GetMaxVReg() ? + cgFunc->GetSSAvRegCount() : cgFunc->GetMaxVReg(); + bb.SetLiveIn(*NewLiveIn(maxRegCount)); + bb.SetLiveOut(*NewLiveOut(maxRegCount)); + bb.SetDef(*NewDef(maxRegCount)); + bb.SetUse(*NewUse(maxRegCount)); +} + +void LiveAnalysis::ClearInOutDataInfo() { + FOR_ALL_BB(bb, cgFunc) { + bb->SetLiveInChange(false); + bb->DefClearDataInfo(); + bb->UseClearDataInfo(); + bb->LiveInClearDataInfo(); + bb->LiveOutClearDataInfo(); + } +} + +void LiveAnalysis::EnlargeSpaceForLiveAnalysis(BB &currBB) { + regno_t currMaxVRegNO = cgFunc->GetMaxVReg(); + if (currMaxVRegNO >= currBB.GetLiveIn()->Size()) { + FOR_ALL_BB(bb, cgFunc) { + bb->LiveInEnlargeCapacity(currMaxVRegNO); + bb->LiveOutEnlargeCapacity(currMaxVRegNO); + } + } +} + +bool CgLiveAnalysis::PhaseRun(maplebe::CGFunc &f) { + MemPool *liveMemPool = GetPhaseMemPool(); + live = f.GetCG()->CreateLiveAnalysis(*liveMemPool, f); + CHECK_FATAL(live != nullptr, "NIY"); + live->AnalysisLive(); + if (LIVE_ANALYZE_DUMP_NEWPM) { + live->Dump(); + } + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgLiveAnalysis, liveanalysis) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/local_opt.cpp b/src/mapleall/maple_be/src/cg/local_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..65b5639c324c401e7e500e9d63c848ce97be6337 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/local_opt.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "local_opt.h" +#include "cg.h" +#include "mpl_logging.h" +#if defined TARGX86_64 +#include "x64_reaching.h" +#endif +/* + * this phase does optimization on local level(single bb or super bb) + * this phase requires liveanalysis + */ +namespace maplebe { +void LocalOpt::DoLocalCopyPropOptmize() { + DoLocalCopyProp(); +} + +void LocalPropOptimizePattern::Run() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (!CheckCondition(*insn)) { + continue; + } + Optimize(*bb, *insn); + } + } +} + +bool LocalCopyProp::PhaseRun(maplebe::CGFunc &f) { + MemPool *mp = GetPhaseMemPool(); + LiveAnalysis *liveInfo = nullptr; + liveInfo = GET_ANALYSIS(CgLiveAnalysis, f); + liveInfo->ResetLiveSet(); + auto *reachingDef = f.GetCG()->CreateReachingDefinition(*mp, f); + LocalOpt *localOpt = f.GetCG()->CreateLocalOpt(*mp, f, *reachingDef); + localOpt->DoLocalCopyPropOptmize(); + return false; +} + +void LocalCopyProp::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} + +bool RedundantDefRemove::CheckCondition(Insn &insn) { + uint32 opndNum = insn.GetOperandSize(); + const InsnDesc *md = insn.GetDesc(); + std::vector defOpnds; + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + auto *opndDesc = md->opndMD[i]; + if (opndDesc->IsDef() && opndDesc->IsUse()) { + return false; + } + if (opnd.IsList()) { + continue; + } + if (opndDesc->IsDef()) { + defOpnds.emplace_back(&opnd); + } + } + if (defOpnds.size() != 1 || !defOpnds[0]->IsRegister()) { + return false; + } + auto ®Def = static_cast(*defOpnds[0]); + auto &liveOutRegSet = insn.GetBB()->GetLiveOutRegNO(); + if (liveOutRegSet.find(regDef.GetRegisterNumber()) != liveOutRegSet.end()) { + return false; + } + return true; +} + +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(LocalCopyProp, localcopyprop) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/loop.cpp b/src/mapleall/maple_be/src/cg/loop.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7e752bb0a18d298e5992c315d901fda2c0991479 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/loop.cpp @@ -0,0 +1,679 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "loop.h" +#include "cg.h" +#include "optimize_common.h" + +namespace maplebe { +#define LOOP_ANALYSIS_DUMP_NEWPM CG_DEBUG_FUNC(f) + +static void PrintLoopInfo(const LoopHierarchy &loop) { + LogInfo::MapleLogger() << "header " << loop.GetHeader()->GetId(); + if (loop.GetOtherLoopEntries().size() != 0) { + LogInfo::MapleLogger() << " multi-header "; + for (auto en : loop.GetOtherLoopEntries()) { + LogInfo::MapleLogger() << en->GetId() << " "; + } + } + if (loop.GetOuterLoop() != nullptr) { + LogInfo::MapleLogger() << " parent " << loop.GetOuterLoop()->GetHeader()->GetId(); + } + LogInfo::MapleLogger() << " backedge "; + for (auto *bb : loop.GetBackedge()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n members "; + for (auto *bb : loop.GetLoopMembers()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + if (!loop.GetInnerLoops().empty()) { + LogInfo::MapleLogger() << "\n inner_loop_headers "; + for (auto *inner : loop.GetInnerLoops()) { + LogInfo::MapleLogger() << inner->GetHeader()->GetId() << " "; + } + } + LogInfo::MapleLogger() << "\n"; +} + +static void PrintInner(const LoopHierarchy &loop, uint32 level) { + for (auto *inner : loop.GetInnerLoops()) { + LogInfo::MapleLogger() << "loop-level-" << level << "\n"; + PrintLoopInfo(*inner); + PrintInner(*inner, level + 1); + } +} + +void LoopHierarchy::PrintLoops(const std::string &name) const { + LogInfo::MapleLogger() << name << "\n"; + for (const LoopHierarchy *loop = this; loop != nullptr; loop = loop->next) { + PrintLoopInfo(*loop); + } + for (const LoopHierarchy *loop = this; loop != nullptr; loop = loop->next) { + PrintInner(*loop, 1); + } +} + +void CGFuncLoops::CheckOverlappingInnerLoops(const MapleVector &iLoops, + const MapleVector &loopMem) const { + for (auto iloop : iLoops) { + CHECK_FATAL(iloop->loopMembers.size() > 0, "Empty loop"); + for (auto bb: iloop->loopMembers) { + if (find(loopMem.begin(), loopMem.end(), bb) != loopMem.end()) { + LogInfo::MapleLogger() << "Error: inconsistent loop member"; + CHECK_FATAL(0, "loop member overlap with inner loop"); + } + } + CheckOverlappingInnerLoops(iloop->innerLoops, loopMem); + } +} + +void CGFuncLoops::CheckLoops() const { + // Make sure backedge -> header relationship holds + for (auto bEdge: backedge) { + if (find(bEdge->GetSuccs().begin(), bEdge->GetSuccs().end(), header) == bEdge->GetSuccs().end()) { + bool inOtherEntry = false; + for (auto entry: multiEntries) { + if (find(bEdge->GetSuccs().begin(), bEdge->GetSuccs().end(), entry) != bEdge->GetSuccs().end()) { + inOtherEntry = true; + break; + } + } + if (inOtherEntry == false) { + if (find(bEdge->GetEhSuccs().begin(), bEdge->GetEhSuccs().end(), header) == bEdge->GetEhSuccs().end()) { + LogInfo::MapleLogger() << "Error: inconsistent loop backedge"; + CHECK_FATAL(0, "loop backedge does not go to loop header"); + } + } + } + if (find(header->GetPreds().begin(), header->GetPreds().end(), bEdge) == header->GetPreds().end()) { + bool inOtherEntry = false; + for (auto entry: multiEntries) { + if (find(entry->GetPreds().begin(), entry->GetPreds().end(), bEdge) != entry->GetPreds().end()) { + inOtherEntry = true; + break; + } + } + if (inOtherEntry == false) { + if (find(header->GetEhPreds().begin(), header->GetEhPreds().end(), bEdge) == header->GetEhPreds().end()) { + LogInfo::MapleLogger() << "Error: inconsistent loop header"; + CHECK_FATAL(0, "loop header does not have a backedge"); + } + } + } + } + + // Make sure containing loop members do not overlap + CheckOverlappingInnerLoops(innerLoops, loopMembers); + + if (innerLoops.empty() == false) { + for (auto lp : innerLoops) { + lp->CheckLoops(); + } + } +} + +void CGFuncLoops::PrintLoops(const CGFuncLoops &funcLoop) const { + LogInfo::MapleLogger() << "loop_level(" << funcLoop.loopLevel << ") "; + LogInfo::MapleLogger() << "header " << funcLoop.GetHeader()->GetId() << " "; + if (funcLoop.multiEntries.size() != 0) { + LogInfo::MapleLogger() << "other-header "; + for (auto bb : funcLoop.multiEntries) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + } + if (funcLoop.GetOuterLoop() != nullptr) { + LogInfo::MapleLogger() << "parent " << funcLoop.GetOuterLoop()->GetHeader()->GetId() << " "; + } + LogInfo::MapleLogger() << "backedge "; + for (auto *bb : funcLoop.GetBackedge()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n members "; + for (auto *bb : funcLoop.GetLoopMembers()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n exits "; + for (auto *bb : funcLoop.GetExits()) { + LogInfo::MapleLogger() << bb->GetId() << " "; + } + LogInfo::MapleLogger() << "\n"; + if (!funcLoop.GetInnerLoops().empty()) { + LogInfo::MapleLogger() << " inner_loop_headers "; + for (auto *inner : funcLoop.GetInnerLoops()) { + LogInfo::MapleLogger() << inner->GetHeader()->GetId() << " "; + } + LogInfo::MapleLogger() << "\n"; + for (auto *inner : funcLoop.GetInnerLoops()) { + PrintLoops(*inner); + } + } +} + +bool CGFuncLoops::IsBBLoopMember(const BB *bb) const { + return (*(std::find(loopMembers.begin(), loopMembers.end(), bb)) == bb); +} + +// partial loop body found with formLoop is NOT really needed in down stream +// It should be simplied later +void LoopFinder::FormLoop(BB* headBB, BB* backBB) { + ASSERT(headBB != nullptr && backBB != nullptr, "headBB or backBB is nullptr"); + LoopHierarchy *simpleLoop = memPool->New(*memPool); + + if (headBB != backBB) { + ASSERT(!dfsBBs.empty(), "dfsBBs is empty"); + ASSERT(onPathBBs[headBB->GetId()], "headBB is not on execution path"); + std::stack tempStk; + + tempStk.push(dfsBBs.top()); + dfsBBs.pop(); + + while (tempStk.top() != headBB && !dfsBBs.empty()) { + tempStk.push(dfsBBs.top()); + dfsBBs.pop(); + } + + while (!tempStk.empty()) { + BB *topBB = tempStk.top(); + tempStk.pop(); + + if (onPathBBs[topBB->GetId()]) { + simpleLoop->InsertLoopMembers(*topBB); + } + dfsBBs.push(topBB); + } + } + // Note: backBB is NOT on dfsBBs + simpleLoop->InsertLoopMembers(*backBB); + simpleLoop->SetHeader(*headBB); + simpleLoop->InsertBackedge(*backBB); + simpleLoop->InsertBackBBEdge(*backBB, *headBB); + + if (loops) { + loops->SetPrev(simpleLoop); + } + simpleLoop->SetNext(loops); + loops = simpleLoop; +} + +void LoopFinder::SeekBackEdge(BB* bb, MapleList succs) { + for (const auto succBB : succs) { + if (!visitedBBs[succBB->GetId()]) { + dfsBBs.push(succBB); + } else { + if (onPathBBs[succBB->GetId()]) { + FormLoop(succBB, bb); + bb->PushBackLoopSuccs(*succBB); + succBB->PushBackLoopPreds(*bb); + } + } + } +} + +void LoopFinder::SeekCycles() { + while (!dfsBBs.empty()) { + BB *bb = dfsBBs.top(); + if (visitedBBs[bb->GetId()]) { + onPathBBs[bb->GetId()] = false; + dfsBBs.pop(); + continue; + } + + visitedBBs[bb->GetId()] = true; + onPathBBs[bb->GetId()] = true; + SeekBackEdge(bb, bb->GetSuccs()); + SeekBackEdge(bb, bb->GetEhSuccs()); + } +} + +void LoopFinder::MarkExtraEntryAndEncl() { + ASSERT(dfsBBs.empty(), "dfsBBs is NOT empty"); + std::vector loopEnclosure; + loopEnclosure.resize(cgFunc->NumBBs()); + std::vector startProcess; + startProcess.resize(cgFunc->NumBBs()); + std::vector origEntries; + origEntries.resize(cgFunc->NumBBs()); + std::vector newEntries; + newEntries.resize(cgFunc->NumBBs()); + + for (LoopHierarchy *loop = loops; loop != nullptr; loop = loop->GetNext()) { + fill(visitedBBs.begin(), visitedBBs.end(), false); + fill(loopEnclosure.begin(), loopEnclosure.end(), nullptr); + fill(startProcess.begin(), startProcess.end(), false); + fill(origEntries.begin(), origEntries.end(), nullptr); + fill(newEntries.begin(), newEntries.end(), nullptr); + + for (auto *bb : loop->GetLoopMembers()) { + loopEnclosure[bb->GetId()] = bb; + } + origEntries[loop->GetHeader()->GetId()] = loop->GetHeader(); + + // Form loop closure from the primary entry. At end collect all other entries + bool changed = false; + dfsBBs.push(loop->GetHeader()); + while (true) { + while (!dfsBBs.empty()) { + BB *bb = dfsBBs.top(); + visitedBBs[bb->GetId()] = true; + if (startProcess[bb->GetId()]) { + dfsBBs.pop(); + for (const auto succBB : bb->GetSuccs()) { + if (loopEnclosure[bb->GetId()] == nullptr && + loopEnclosure[succBB->GetId()] != nullptr && + succBB != loop->GetHeader()) { + changed = true; + loopEnclosure[bb->GetId()] = bb; + break; + } + } + continue; + } else { + startProcess[bb->GetId()] = true; + for (const auto succBB : bb->GetSuccs()) { + if (!visitedBBs[succBB->GetId()]) { + dfsBBs.push(succBB); + } + } + } + } + + // Repeat till no new item is added in + if (changed) { + dfsBBs.push(loop->GetHeader()); + changed = false; + fill(visitedBBs.begin(), visitedBBs.end(), false); + fill(startProcess.begin(), startProcess.end(), false); + continue; + } + + // Collect all entries + bool foundNewEntry = false; + fill(visitedBBs.begin(), visitedBBs.end(), false); + FOR_ALL_BB(bb, cgFunc) { + if (!visitedBBs[bb->GetId()]) { + dfsBBs.push(bb); + visitedBBs[bb->GetId()] = true; + while (!dfsBBs.empty()) { + BB *currBB = dfsBBs.top(); + visitedBBs[currBB->GetId()] = true; + dfsBBs.pop(); + for (const auto succBB : currBB->GetSuccs()) { + // check if entering a loop. + if ((loopEnclosure[succBB->GetId()] != nullptr) && + (loopEnclosure[currBB->GetId()] == nullptr)) { + newEntries[succBB->GetId()] = succBB; + if (origEntries[succBB->GetId()] == nullptr) { + foundNewEntry = true; + } + } + if (!visitedBBs[succBB->GetId()]) { + dfsBBs.push(succBB); + } + } + } + } + } + if (foundNewEntry) { + origEntries = newEntries; + for (const auto bb : newEntries) { + if (bb != nullptr) { + dfsBBs.push(bb); + } + } + fill(visitedBBs.begin(), visitedBBs.end(), false); + fill(startProcess.begin(), startProcess.end(), false); + fill(newEntries.begin(), newEntries.end(), nullptr); + } else { + break; + } + } + + // Setup loop body + for (size_t id = 0; id < loopEnclosure.size(); id++) { + if (loopEnclosure[id] != nullptr) { + loop->InsertLoopMembers(*loopEnclosure[id]); + } + } + + // Setup head and extra entries + for (const auto bb : newEntries) { + if (bb != nullptr) { + loop->InsertBBToOtherLoopEntries(bb); + } + } + loop->EraseBBFromOtherLoopEntries(loop->GetHeader()); + } +} + +bool LoopFinder::HasSameHeader(const LoopHierarchy *lp1, const LoopHierarchy *lp2) const { + if (lp1->GetHeader() == lp2->GetHeader()) { + return true; + } + for (auto other1 : lp1->GetOtherLoopEntries()) { + if (lp2->GetHeader() == other1) { + return true; + } + for (auto other2 : lp2->GetOtherLoopEntries()) { + if (other2 == other1) { + return true; + } + } + } + return false; +} + +void LoopFinder::MergeLoops() { + for (LoopHierarchy *loopHierarchy1 = loops; loopHierarchy1 != nullptr; loopHierarchy1 = loopHierarchy1->GetNext()) { + for (LoopHierarchy *loopHierarchy2 = loopHierarchy1->GetNext(); loopHierarchy2 != nullptr; + loopHierarchy2 = loopHierarchy2->GetNext()) { + // Different loop bodies imply different loops + bool sameLoop = true; + if (loopHierarchy1->GetLoopMembers().size() == loopHierarchy2->GetLoopMembers().size()) { + for (auto *bb : loopHierarchy2->GetLoopMembers()) { + if (find(loopHierarchy1->GetLoopMembers().begin(), loopHierarchy1->GetLoopMembers().end(), bb) == + loopHierarchy1->GetLoopMembers().end()) { + sameLoop = false; + break; + } + } + if (sameLoop) { + for (auto *bb : loopHierarchy1->GetLoopMembers()) { + if (find(loopHierarchy2->GetLoopMembers().begin(), loopHierarchy2->GetLoopMembers().end(), bb) == + loopHierarchy2->GetLoopMembers().end()) { + sameLoop = false; + break; + } + } + } + if (sameLoop) { + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + continue; + } + } + if (HasSameHeader(loopHierarchy1, loopHierarchy2) == false) { + continue; + } + for (auto *bb : loopHierarchy2->GetLoopMembers()) { + loopHierarchy1->InsertLoopMembers(*bb); + } + if (loopHierarchy1->GetHeader() != loopHierarchy2->GetHeader()) { + loopHierarchy1->InsertBBToOtherLoopEntries(loopHierarchy2->GetHeader()); + } + for (auto bb : loopHierarchy2->GetOtherLoopEntries()) { + if (loopHierarchy1->GetHeader() != bb) { + loopHierarchy1->InsertBBToOtherLoopEntries(bb); + } + } + for (auto *bb : loopHierarchy2->GetBackedge()) { + loopHierarchy1->InsertBackedge(*bb); + } + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + } + } +} + +void LoopFinder::SortLoops() { + LoopHierarchy *head = nullptr; + LoopHierarchy *next1 = nullptr; + LoopHierarchy *next2 = nullptr; + bool swapped; + do { + swapped = false; + for (LoopHierarchy *loopHierarchy1 = loops; loopHierarchy1 != nullptr;) { + /* remember loopHierarchy1's prev in case if loopHierarchy1 moved */ + head = loopHierarchy1; + next1 = loopHierarchy1->GetNext(); + for (LoopHierarchy *loopHierarchy2 = loopHierarchy1->GetNext(); loopHierarchy2 != nullptr;) { + next2 = loopHierarchy2->GetNext(); + + if (loopHierarchy1->GetLoopMembers().size() > loopHierarchy2->GetLoopMembers().size()) { + if (head->GetPrev() == nullptr) { + /* remove loopHierarchy2 from list */ + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + /* link loopHierarchy2 as head */ + loops = loopHierarchy2; + loopHierarchy2->SetPrev(nullptr); + loopHierarchy2->SetNext(head); + head->SetPrev(loopHierarchy2); + } else { + loopHierarchy2->GetPrev()->SetNext(loopHierarchy2->GetNext()); + if (loopHierarchy2->GetNext() != nullptr) { + loopHierarchy2->GetNext()->SetPrev(loopHierarchy2->GetPrev()); + } + head->GetPrev()->SetNext(loopHierarchy2); + loopHierarchy2->SetPrev(head->GetPrev()); + loopHierarchy2->SetNext(head); + head->SetPrev(loopHierarchy2); + } + head = loopHierarchy2; + swapped = true; + } + loopHierarchy2 = next2; + } + loopHierarchy1 = next1; + } + } while (swapped); +} + +void LoopFinder::UpdateOuterForInnerLoop(BB *bb, LoopHierarchy *outer) { + if (outer == nullptr) { + return; + } + for (auto ito = outer->GetLoopMembers().begin(); ito != outer->GetLoopMembers().end();) { + if (*ito == bb) { + ito = outer->EraseLoopMembers(ito); + } else { + ++ito; + } + } + if (outer->GetOuterLoop() != nullptr) { + UpdateOuterForInnerLoop(bb, const_cast(outer->GetOuterLoop())); + } +} + +void LoopFinder::UpdateOuterLoop(const LoopHierarchy *loop) { + for (auto inner : loop->GetInnerLoops()) { + UpdateOuterLoop(inner); + } + for (auto *bb : loop->GetLoopMembers()) { + UpdateOuterForInnerLoop(bb, const_cast(loop->GetOuterLoop())); + } +} + +void LoopFinder::CreateInnerLoop(LoopHierarchy &inner, LoopHierarchy &outer) { + outer.InsertInnerLoops(inner); + inner.SetOuterLoop(outer); + if (loops == &inner) { + loops = inner.GetNext(); + } else { + LoopHierarchy *prev = loops; + for (LoopHierarchy *loopHierarchy1 = loops->GetNext(); loopHierarchy1 != nullptr; + loopHierarchy1 = loopHierarchy1->GetNext()) { + if (loopHierarchy1 == &inner) { + prev->SetNext(prev->GetNext()->GetNext()); + } + prev = loopHierarchy1; + } + } +} + +static void FindLoopExits(LoopHierarchy *loop) { + for (auto *bb : loop->GetLoopMembers()) { + for (auto succ : bb->GetSuccs()) { + if (find(loop->GetLoopMembers().begin(), loop->GetLoopMembers().end(), succ) == loop->GetLoopMembers().end()) { + loop->InsertExit(*bb); + } + } + } + for (auto *inner : loop->GetInnerLoops()) { + FindLoopExits(inner); + } +} + +void LoopFinder::DetectInnerLoop() { + for (LoopHierarchy *loop = loops; loop != nullptr; loop = loop->GetNext()) { + FindLoopExits(loop); + } + bool innerCreated; + do { + innerCreated = false; + for (LoopHierarchy *loopHierarchy1 = loops; loopHierarchy1 != nullptr; + loopHierarchy1 = loopHierarchy1->GetNext()) { + for (LoopHierarchy *loopHierarchy2 = loopHierarchy1->GetNext(); loopHierarchy2 != nullptr; + loopHierarchy2 = loopHierarchy2->GetNext()) { + if (loopHierarchy1->GetHeader() != loopHierarchy2->GetHeader()) { + const auto &loopHierarchy2Members = loopHierarchy2->GetLoopMembers(); + if (find(loopHierarchy2Members.begin(), loopHierarchy2Members.end(), loopHierarchy1->GetHeader()) != + loopHierarchy2Members.end()) { + bool allin = true; + // Make sure body is included + for (auto *bb1 : loopHierarchy1->GetLoopMembers()) { + if (find(loopHierarchy2Members.begin(), loopHierarchy2Members.end(), bb1) == + loopHierarchy2Members.end()) { + allin = false; + break; + } + } + if (allin) { + CreateInnerLoop(*loopHierarchy1, *loopHierarchy2); + innerCreated = true; + } + } + if (innerCreated) { + break; + } + } + } + if (innerCreated) { + break; + } + } + } while (innerCreated); + + for (LoopHierarchy *outer = loops; outer != nullptr; outer = outer->GetNext()) { + UpdateOuterLoop(outer); + } +} + +static void CopyLoopInfo(const LoopHierarchy *from, CGFuncLoops *to, CGFuncLoops *parent, MemPool *memPool) { + to->SetHeader(*const_cast(from->GetHeader())); + for (auto bb : from->GetOtherLoopEntries()) { + to->AddMultiEntries(*bb); + } + for (auto *bb : from->GetLoopMembers()) { + to->AddLoopMembers(*bb); + bb->SetLoop(*to); + } + for (auto *bb : from->GetBackedge()) { + to->AddBackedge(*bb); + } + for (auto &backPair : from->GetBackBBEdges()) { + CHECK_FATAL(backPair.first != nullptr, "get invalid backEdge info"); + CHECK_FATAL(!backPair.second->empty(), "get invalid backEdge info"); + for (auto headers : *(backPair.second)) { + to->AddBackBBEdge(*backPair.first, *headers); + } + } + for (auto *bb : from->GetExits()) { + to->AddExit(*bb); + } + if (!from->GetInnerLoops().empty()) { + for (auto *inner : from->GetInnerLoops()) { + auto *floop = memPool->New(*memPool); + to->AddInnerLoops(*floop); + floop->SetLoopLevel(to->GetLoopLevel() + 1); + CopyLoopInfo(inner, floop, to, memPool); + } + } + if (parent != nullptr) { + to->SetOuterLoop(*parent); + } +} + +void LoopFinder::UpdateCGFunc() const { + for (LoopHierarchy *loop = loops; loop != nullptr; loop = loop->GetNext()) { + auto *floop = cgFunc->GetMemoryPool()->New(*cgFunc->GetMemoryPool()); + cgFunc->PushBackLoops(*floop); + floop->SetLoopLevel(1); /* top level */ + CopyLoopInfo(loop, floop, nullptr, cgFunc->GetMemoryPool()); + } +} + +void LoopFinder::FormLoopHierarchy() { + visitedBBs.clear(); + visitedBBs.resize(cgFunc->NumBBs(), false); + sortedBBs.clear(); + sortedBBs.resize(cgFunc->NumBBs(), nullptr); + onPathBBs.clear(); + onPathBBs.resize(cgFunc->NumBBs(), false); + + FOR_ALL_BB(bb, cgFunc) { + bb->SetLevel(0); + } + bool changed; + do { + changed = false; + FOR_ALL_BB(bb, cgFunc) { + if (!visitedBBs[bb->GetId()]) { + dfsBBs.push(bb); + SeekCycles(); + changed = true; + } + } + } while (changed); + + MarkExtraEntryAndEncl(); + /* + * FIX : Should merge the partial loops at the time of initial + * construction. And make the linked list as a sorted set, + * then the merge and sort phases below can go away. + * + * Start merging the loops with the same header + */ + MergeLoops(); + /* order loops from least number of members */ + SortLoops(); + DetectInnerLoop(); + UpdateCGFunc(); +} + +bool CgLoopAnalysis::PhaseRun(maplebe::CGFunc &f) { + f.ClearLoopInfo(); + MemPool *loopMemPool = GetPhaseMemPool(); + LoopFinder *loopFinder = loopMemPool->New(f, *loopMemPool); + loopFinder->FormLoopHierarchy(); + + if (LOOP_ANALYSIS_DUMP_NEWPM) { + /* do dot gen after detection so the loop backedge can be properly colored using the loop info */ + DotGenerator::GenerateDot("buildloop", f, f.GetMirModule(), true, f.GetName()); + } +#if DEBUG + for (const auto *lp : f.GetLoops()) { + lp->CheckLoops(); + } +#endif + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgLoopAnalysis, loopanalysis) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/memlayout.cpp b/src/mapleall/maple_be/src/cg/memlayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0c242edf32f3460e0090483bde7da582d1e7ada6 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/memlayout.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "memlayout.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +/* + * Go over all outgoing calls in the function body and get the maximum space + * needed for storing the actuals based on the actual parameters and the ABI. + * These are usually those arguments that cannot be passed + * through registers because a call passes more than 8 arguments, or + * they cannot be fit in a pair of registers. + + * This assumes that all nesting of statements has been removed, + * so that all the statements are at only one block level. + */ +uint32 MemLayout::FindLargestActualArea(int32 &aggCopySize) { + StmtNode *stmt = mirFunction->GetBody()->GetFirst(); + if (stmt == nullptr) { + return 0; + } + uint32 maxActualSize = 0; + uint32 maxParamStackSize = 0; // Size of parameter stack requirement + uint32 maxCopyStackSize = 0; // Size of aggregate param stack copy requirement + for (; stmt != nullptr; stmt = stmt->GetNext()) { + Opcode opCode = stmt->GetOpCode(); + if ((opCode < OP_call || opCode > OP_xintrinsiccallassigned) && opCode != OP_icallproto) { + continue; + } + if (opCode == OP_intrinsiccallwithtypeassigned || opCode == OP_intrinsiccallwithtype || + opCode == OP_intrinsiccallassigned || opCode == OP_intrinsiccall) { + /* + * Some intrinsics, such as MPL_ATOMIC_EXCHANGE_PTR, are handled by CG, + * and map to machine code sequences. We ignore them because they are not + * function calls. + */ + continue; + } + /* + * if the following check fails, most likely dex has invoke-custom etc + * that is not supported yet + */ + DCHECK((opCode == OP_call || opCode == OP_icall || opCode == OP_icallproto), "Not lowered to call or icall?"); + int32 copySize; + uint32 size = ComputeStackSpaceRequirementForCall(*stmt, copySize, opCode == OP_icall || opCode == OP_icallproto); + if (size > maxParamStackSize) { + maxParamStackSize = size; + } + if (static_cast(copySize) > maxCopyStackSize) { + maxCopyStackSize = static_cast(copySize); + } + if ((maxParamStackSize + maxCopyStackSize) > maxActualSize) { + maxActualSize = maxParamStackSize + maxCopyStackSize; + } + } + aggCopySize = static_cast(maxCopyStackSize); + /* GetPointerSize() * 2's pow 2 is 4, set the low 4 bit of maxActualSize to 0 */ + if (CGOptions::IsArm64ilp32()) { + maxActualSize = RoundUp(maxActualSize, k8ByteSize * 2); + } else { + maxActualSize = RoundUp(maxActualSize, GetPointerSize() * 2); + } + return maxActualSize; +} + +bool CgLayoutFrame::PhaseRun(maplebe::CGFunc &f) { + if (CGOptions::IsPrintFunction()) { + LogInfo::MapleLogger() << f.GetName() << "\n"; + } + f.LayoutStackFrame(); + return false; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/offset_adjust.cpp b/src/mapleall/maple_be/src/cg/offset_adjust.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ed94c8593df9a59d31e13288ff336758134db09e --- /dev/null +++ b/src/mapleall/maple_be/src/cg/offset_adjust.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "offset_adjust.h" +#if TARGAARCH64 +#include "aarch64_offset_adjust.h" +#elif TARGRISCV64 +#include "riscv64_offset_adjust.h" +#endif +#if TARGARM32 +#include "arm32_offset_adjust.h" +#endif + +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; +bool CgFrameFinalize::PhaseRun(maplebe::CGFunc &f) { + FrameFinalize *offsetAdjustment = nullptr; +#if TARGAARCH64 || TARGRISCV64 + offsetAdjustment = GetPhaseAllocator()->New(f); +#endif +#if TARGARM32 + offsetAdjustment = GetPhaseAllocator()->New(f); +#endif + offsetAdjustment->Run(); + return false; +} + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/operand.cpp b/src/mapleall/maple_be/src/cg/operand.cpp new file mode 100644 index 0000000000000000000000000000000000000000..35df6e6b82cfd9c8d040f5cd0eea9c1b58ea3a64 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/operand.cpp @@ -0,0 +1,242 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "operand.h" +#include "common_utils.h" +#include "mpl_logging.h" + +namespace maplebe { +bool IsMoveWidableImmediate(uint64 val, uint32 bitLen) { + if (bitLen == k64BitSize) { + /* 0xHHHH000000000000 or 0x0000HHHH00000000, return true */ + if (((val & ((static_cast(0xffff)) << k48BitSize)) == val) || + ((val & ((static_cast(0xffff)) << k32BitSize)) == val)) { + return true; + } + } else { + /* get lower 32 bits */ + val &= static_cast(0xffffffff); + } + /* 0x00000000HHHH0000 or 0x000000000000HHHH, return true */ + return ((val & ((static_cast(0xffff)) << k16BitSize)) == val || + (val & ((static_cast(0xffff)) << 0)) == val); +} + +bool BetterUseMOVZ(uint64 val) { + int32 n16zerosChunks = 0; + int32 n16onesChunks = 0; + uint64 sa = 0; + /* a 64 bits number is split 4 chunks, each chunk has 16 bits. check each chunk whether is all 1 or is all 0 */ + for (uint64 i = 0; i < k4BitSize; ++i, sa += k16BitSize) { + uint64 chunkVal = (val >> (static_cast(sa))) & 0x0000FFFFUL; + if (chunkVal == 0) { + ++n16zerosChunks; + } else if (chunkVal == 0xFFFFUL) { + ++n16onesChunks; + } + } + /* + * note that since we already check if the value + * can be movable with as a single mov instruction, + * we should not exepct either n_16zeros_chunks>=3 or n_16ones_chunks>=3 + */ +#if DEBUG + constexpr uint32 kN16ChunksCheck = 2; + ASSERT(n16zerosChunks <= kN16ChunksCheck, "n16zerosChunks ERR"); + ASSERT(n16onesChunks <= kN16ChunksCheck, "n16onesChunks ERR"); +#endif + return (n16zerosChunks >= n16onesChunks); +} + +bool RegOperand::operator==(const RegOperand &o) const { + regno_t myRn = GetRegisterNumber(); + uint32 mySz = GetSize(); + uint32 myFl = regFlag; + regno_t otherRn = o.GetRegisterNumber(); + uint32 otherSz = o.GetSize(); + uint32 otherFl = o.regFlag; + + if (IsPhysicalRegister()) { + return (myRn == otherRn && mySz == otherSz && myFl == otherFl); + } + return (myRn == otherRn && mySz == otherSz); +} + +bool RegOperand::operator<(const RegOperand &o) const { + regno_t myRn = GetRegisterNumber(); + uint32 mySz = GetSize(); + uint32 myFl = regFlag; + regno_t otherRn = o.GetRegisterNumber(); + uint32 otherSz = o.GetSize(); + uint32 otherFl = o.regFlag; + return myRn < otherRn || (myRn == otherRn && mySz < otherSz) || + (myRn == otherRn && mySz == otherSz && myFl < otherFl); +} + +Operand *MemOperand::GetOffset() const { + switch (addrMode) { + case kAddrModeBOi: + return GetOffsetOperand(); + case kAddrModeBOrX: + return GetIndexRegister(); + case kAddrModeLiteral: + break; + case kAddrModeLo12Li: + break; + default: + ASSERT(false, "error memoperand dump"); + break; + } + return nullptr; +} + +bool MemOperand::Equals(Operand &op) const { + if (!op.IsMemoryAccessOperand()) { + return false; + } + return Equals(static_cast(op)); +} + +bool MemOperand::Equals(const MemOperand &op) const { + if (&op == this) { + return true; + } + + if (addrMode == op.GetAddrMode()) { + switch (addrMode) { + case kAddrModeBOi: + return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && + GetOffsetImmediate()->Equals(*op.GetOffsetImmediate())); + case kAddrModeBOrX: + return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && + GetIndexRegister()->Equals(*op.GetIndexRegister()) && + GetExtendAsString() == op.GetExtendAsString() && + ShiftAmount() == op.ShiftAmount()); + case kAddrModeLiteral: + return GetSymbolName() == op.GetSymbolName(); + case kAddrModeLo12Li: + return (GetBaseRegister()->Equals(*op.GetBaseRegister()) && + GetSymbolName() == op.GetSymbolName() && + GetOffsetImmediate()->Equals(*op.GetOffsetImmediate())); + default: + ASSERT(false, "error memoperand"); + break; + } + } + return false; +} + +bool MemOperand::Less(const Operand &right) const { + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const MemOperand *rightOpnd = static_cast(&right); + if (addrMode != rightOpnd->addrMode) { + return addrMode < rightOpnd->addrMode; + } + + switch (addrMode) { + case kAddrModeBOi: { + ASSERT(idxOpt == kIntact, "Should not compare pre/post index addressing."); + + RegOperand *baseReg = GetBaseRegister(); + RegOperand *rbaseReg = rightOpnd->GetBaseRegister(); + int32 nRet = baseReg->RegCompare(*rbaseReg); + if (nRet == 0) { + Operand *ofstOpnd = GetOffsetOperand(); + const Operand *rofstOpnd = rightOpnd->GetOffsetOperand(); + return ofstOpnd->Less(*rofstOpnd); + } + return nRet < 0; + } + case kAddrModeBOrX: { + if (noExtend != rightOpnd->noExtend) { + return noExtend; + } + if (!noExtend && extend != rightOpnd->extend) { + return extend < rightOpnd->extend; + } + RegOperand *indexReg = GetIndexRegister(); + const RegOperand *rindexReg = rightOpnd->GetIndexRegister(); + return indexReg->Less(*rindexReg); + } + case kAddrModeLiteral: { + return static_cast(GetSymbol()) < static_cast(rightOpnd->GetSymbol()); + } + case kAddrModeLo12Li: { + if (GetSymbol() != rightOpnd->GetSymbol()) { + return static_cast(GetSymbol()) < static_cast(rightOpnd->GetSymbol()); + } + Operand *ofstOpnd = GetOffsetOperand(); + const Operand *rofstOpnd = rightOpnd->GetOffsetOperand(); + return ofstOpnd->Less(*rofstOpnd); + } + default: + ASSERT(false, "Internal error."); + return false; + } +} + +const char *CondOperand::ccStrs[kCcLast] = { + "EQ", "NE", "CS", "HS", "CC", "LO", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT", "LE", "AL" +}; + +bool CondOperand::Less(const Operand &right) const { + if (&right == this) { + return false; + } + + /* For different type. */ + if (GetKind() != right.GetKind()) { + return GetKind() < right.GetKind(); + } + + const CondOperand *rightOpnd = static_cast(&right); + + /* The same type. */ + if (cc == CC_AL || rightOpnd->cc == CC_AL) { + return false; + } + return cc < rightOpnd->cc; +} + +uint32 PhiOperand::GetLeastCommonValidBit() const{ + uint32 leastCommonVb = 0; + for (auto phiOpnd : phiList) { + uint32 curVb = phiOpnd.second->GetValidBitsNum(); + if (curVb > leastCommonVb) { + leastCommonVb = curVb; + } + } + return leastCommonVb; +} +bool PhiOperand::IsRedundancy() const { + uint32 srcSsaIdx = 0; + for (auto phiOpnd : phiList) { + if (srcSsaIdx == 0) { + srcSsaIdx = phiOpnd.second->GetRegisterNumber(); + } + if (srcSsaIdx != phiOpnd.second->GetRegisterNumber()) { + return false; + } + } + return true; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/optimize_common.cpp b/src/mapleall/maple_be/src/cg/optimize_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eba06363011710ca479156b2a066b56a5b219d55 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/optimize_common.cpp @@ -0,0 +1,308 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "optimize_common.h" +#include "cgbb.h" +#include "cg.h" +#include "cg_option.h" +#include "loop.h" +#include "securec.h" + +/* This file provides common class and function for cfgo and ico. */ +namespace maplebe { +void Optimizer::Run(const std::string &funcName, bool checkOnly) { + /* Initialize cfg optimization patterns */ + InitOptimizePatterns(); + + /* For each pattern, search cgFunc for optimization */ + for (OptimizationPattern *p : diffPassPatterns) { + p->Search2Op(checkOnly); + } + /* Search the cgFunc for multiple possible optimizations in one pass */ + if (!singlePassPatterns.empty()) { + BB *curBB = cgFunc->GetFirstBB(); + bool flag = false; + while (curBB != nullptr) { + for (OptimizationPattern *p : singlePassPatterns) { + if (p->Optimize(*curBB)) { + flag = p->IsKeepPosition(); + p->SetKeepPosition(false); + break; + } + } + + if (flag) { + flag = false; + } else { + curBB = curBB->GetNext(); + } + } + } + + if (CGOptions::IsDumpOptimizeCommonLog()) { + constexpr int arrSize = 80; + char post[arrSize]; + errno_t cpyRet = strcpy_s(post, arrSize, "post-"); + CHECK_FATAL(cpyRet == EOK, "call strcpy_s failed"); + errno_t catRes = strcat_s(post, arrSize, name); + CHECK_FATAL(catRes == EOK, "call strcat_s failed "); + OptimizeLogger::GetLogger().Print(funcName); + } + OptimizeLogger::GetLogger().ClearLocal(); +} + +void OptimizationPattern::Search2Op(bool noOptimize) { + checkOnly = noOptimize; + BB *curBB = cgFunc->GetFirstBB(); + while (curBB != nullptr) { + bool changed = false; + do { + changed = Optimize(*curBB); + } while (changed); + if (keepPosition) { + keepPosition = false; + } else { + curBB = curBB->GetNext(); + } + } +} + +void OptimizationPattern::Log(uint32 bbID) { + OptimizeLogger::GetLogger().Log(patternName.c_str()); + DotGenerator::SetColor(bbID, dotColor.c_str()); +} + +std::map DotGenerator::coloringMap; + +void DotGenerator::SetColor(uint32 bbID, const std::string &color) { + coloringMap[bbID] = color; +} + +std::string DotGenerator::GetFileName(const MIRModule &mirModule, const std::string &filePreFix) { + std::string fileName; + if (!filePreFix.empty()) { + fileName.append(filePreFix); + fileName.append("-"); + } + fileName.append(mirModule.GetFileName()); + for (uint32 i = 0; i < fileName.length(); i++) { + if (fileName[i] == ';' || fileName[i] == '/' || fileName[i] == '|') { + fileName[i] = '_'; + } + } + + fileName.append(".dot"); + return fileName; +} + +static bool IsBackEdgeForLoop(const CGFuncLoops &loop, const BB &from, const BB &to) { + const BB *header = loop.GetHeader(); + if (header->GetId() == to.GetId()) { + for (auto *be : loop.GetBackedge()) { + if (be->GetId() == from.GetId()) { + return true; + } + } + } + for (auto *inner : loop.GetInnerLoops()) { + if (IsBackEdgeForLoop(*inner, from, to)) { + return true; + } + } + return false; +} +bool DotGenerator::IsBackEdge(const CGFunc &cgFunction, const BB &from, const BB &to) { + for (const auto *loop : cgFunction.GetLoops()) { + if (IsBackEdgeForLoop(*loop, from, to)) { + return true; + } + } + return false; +} + +void DotGenerator::DumpEdge(const CGFunc &cgFunction, std::ofstream &cfgFileOfStream, bool isIncludeEH) { + FOR_ALL_BB_CONST(bb, &cgFunction) { + for (auto *succBB : bb->GetSuccs()) { + cfgFileOfStream << "BB" << bb->GetId(); + cfgFileOfStream << " -> " + << "BB" << succBB->GetId(); + if (IsBackEdge(cgFunction, *bb, *succBB)) { + cfgFileOfStream << " [color=red]"; + } else { + cfgFileOfStream << " [color=green]"; + } + cfgFileOfStream << ";\n"; + } + if (isIncludeEH) { + for (auto *ehSuccBB : bb->GetEhSuccs()) { + cfgFileOfStream << "BB" << bb->GetId(); + cfgFileOfStream << " -> " + << "BB" << ehSuccBB->GetId(); + cfgFileOfStream << "[color=red]"; + cfgFileOfStream << ";\n"; + } + } + } +} + +bool DotGenerator::FoundListOpndRegNum(ListOperand &listOpnd, const Insn &insnObj, regno_t vReg) { + bool exist = false; + for (auto op : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(op); + if (op->IsRegister() && regOpnd->GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + break; + } + } + return exist; +} + +bool DotGenerator::FoundMemAccessOpndRegNum(const MemOperand &memOperand, const Insn &insnObj, regno_t vReg) { + Operand *base = memOperand.GetBaseRegister(); + Operand *offset = memOperand.GetIndexRegister(); + bool exist = false; + if (base != nullptr && base->IsRegister()) { + RegOperand *regOpnd = static_cast(base); + if (regOpnd->GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + } + } else if (offset != nullptr && offset->IsRegister()) { + RegOperand *regOpnd = static_cast(offset); + if (regOpnd->GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + } + } + return exist; +} + +bool DotGenerator::FoundNormalOpndRegNum(const RegOperand ®Opnd, const Insn &insnObj, regno_t vReg) { + bool exist = false; + if (regOpnd.GetRegisterNumber() == vReg) { + LogInfo::MapleLogger() << "BB" << insnObj.GetBB()->GetId() << " [style=filled, fillcolor=red];\n"; + exist = true; + } + return exist; +} + +void DotGenerator::DumpBBInstructions(const CGFunc &cgFunction, regno_t vReg, std::ofstream &cfgFile) { + FOR_ALL_BB_CONST(bb, &cgFunction) { + if (vReg != 0) { + FOR_BB_INSNS_CONST(insn, bb) { + bool found = false; + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + found = FoundListOpndRegNum(listOpnd, *insn, vReg); + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + found = FoundMemAccessOpndRegNum(memOpnd, *insn, vReg); + } else { + if (opnd.IsRegister()) { + auto ®Opnd = static_cast(opnd); + found = FoundNormalOpndRegNum(regOpnd, *insn, vReg); + } + } + if (found) { + break; + } + } + if (found) { + break; + } + } + } + cfgFile << "BB" << bb->GetId() << "["; + auto it = coloringMap.find(bb->GetId()); + if (it != coloringMap.end()) { + cfgFile << "style=filled,fillcolor=" << it->second << ","; + } + if (bb->GetKind() == BB::kBBIf) { + cfgFile << "shape=diamond,label= \" BB" << bb->GetId() << ":\n"; + } else { + cfgFile << "shape=box,label= \" BB" << bb->GetId() << ":\n"; + } + cfgFile << "{ "; + cfgFile << bb->GetKindName() << "\n"; + cfgFile << bb->GetFrequency() << "\n"; + if (bb->GetLabIdx() != 0) { + cfgFile << "LabIdx=" << bb->GetLabIdx() << "\n"; + } + cfgFile << "}\"];\n"; + } +} + +/* Generate dot file for cfg */ +void DotGenerator::GenerateDot(const std::string &preFix, const CGFunc &cgFunc, const MIRModule &mod, + bool includeEH, const std::string fname, regno_t vReg) { + std::ofstream cfgFile; + std::streambuf *coutBuf = std::cout.rdbuf(); /* keep original cout buffer */ + std::streambuf *buf = cfgFile.rdbuf(); + std::cout.rdbuf(buf); + std::string fileName = GetFileName(mod, (preFix + "-" + fname)); + + cfgFile.open(fileName, std::ios::trunc); + CHECK_FATAL(cfgFile.is_open(), "Failed to open output file: %s", fileName.c_str()); + cfgFile << "digraph {\n"; + /* dump edge */ + DumpEdge(cgFunc, cfgFile, includeEH); + + /* dump instruction in each BB */ + DumpBBInstructions(cgFunc, vReg, cfgFile); + + cfgFile << "}\n"; + coloringMap.clear(); + cfgFile.flush(); + cfgFile.close(); + std::cout.rdbuf(coutBuf); +} + +void OptimizeLogger::Print(const std::string &funcName) { + if (!localStat.empty()) { + LogInfo::MapleLogger() << funcName << '\n'; + for (const auto &localStatPair : localStat) { + LogInfo::MapleLogger() << "Optimized " << localStatPair.first << ":" << localStatPair.second << "\n"; + } + + ClearLocal(); + LogInfo::MapleLogger() << "Total:" << '\n'; + for (const auto &globalStatPair : globalStat) { + LogInfo::MapleLogger() << "Optimized " << globalStatPair.first << ":" << globalStatPair.second << "\n"; + } + } +} + +void OptimizeLogger::Log(const std::string &patternName) { + auto itemInGlobal = globalStat.find(patternName); + if (itemInGlobal != globalStat.end()) { + itemInGlobal->second++; + } else { + (void)globalStat.emplace(std::pair(patternName, 1)); + } + auto itemInLocal = localStat.find(patternName); + if (itemInLocal != localStat.end()) { + itemInLocal->second++; + } else { + (void)localStat.emplace(std::pair(patternName, 1)); + } +} + +void OptimizeLogger::ClearLocal() { + localStat.clear(); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/peep.cpp b/src/mapleall/maple_be/src/cg/peep.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2469d5c3cd14e748b801833387aeec299040edab --- /dev/null +++ b/src/mapleall/maple_be/src/cg/peep.cpp @@ -0,0 +1,724 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "peep.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" +#if TARGAARCH64 +#include "aarch64_peep.h" +#elif TARGRISCV64 +#include "riscv64_peep.h" +#elif defined TARGX86_64 +#include "x64_peep.h" +#endif +#if TARGARM32 +#include "arm32_peep.h" +#endif + +namespace maplebe { +#if TARGAARCH64 +bool CGPeepPattern::IsCCRegCrossVersion(Insn &startInsn, Insn &endInsn, const RegOperand &ccReg) const { + if (startInsn.GetBB() != endInsn.GetBB()) { + return true; + } + CHECK_FATAL(ssaInfo != nullptr, "must have ssaInfo"); + CHECK_FATAL(ccReg.IsSSAForm(), "cc reg must be ssa form"); + for (auto *curInsn = startInsn.GetNext(); curInsn != nullptr && curInsn != &endInsn; curInsn = curInsn->GetNext()) { + if (!curInsn->IsMachineInstruction()) { + continue; + } + if (curInsn->IsCall()) { + return true; + } + uint32 opndNum = curInsn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = curInsn->GetOperand(i); + if (!opnd.IsRegister()) { + continue; + } + auto ®Opnd = static_cast(opnd); + if (!curInsn->IsRegDefined(regOpnd.GetRegisterNumber())) { + continue; + } + if (static_cast(opnd).IsOfCC()) { + VRegVersion *ccVersion = ssaInfo->FindSSAVersion(ccReg.GetRegisterNumber()); + VRegVersion *curCCVersion = ssaInfo->FindSSAVersion(regOpnd.GetRegisterNumber()); + CHECK_FATAL(ccVersion != nullptr && curCCVersion != nullptr, "RegVersion must not be null based on ssa"); + CHECK_FATAL(!ccVersion->IsDeleted() && !curCCVersion->IsDeleted(), "deleted version"); + if (ccVersion->GetVersionIdx() != curCCVersion->GetVersionIdx()) { + return true; + } + } + } + } + return false; +} + +int64 CGPeepPattern::GetLogValueAtBase2(int64 val) const { + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : -1; +} + +InsnSet CGPeepPattern::GetAllUseInsn(const RegOperand &defReg) const { + InsnSet allUseInsn; + if ((ssaInfo != nullptr) && defReg.IsSSAForm()) { + VRegVersion *defVersion = ssaInfo->FindSSAVersion(defReg.GetRegisterNumber()); + CHECK_FATAL(defVersion != nullptr, "useVRegVersion must not be null based on ssa"); + for (auto insnInfo : defVersion->GetAllUseInsns()) { + Insn *secondInsn = insnInfo.second->GetInsn(); + allUseInsn.emplace(secondInsn); + } + } + return allUseInsn; +} + +void CGPeepPattern::DumpAfterPattern(std::vector &prevInsns, const Insn *replacedInsn, const Insn *newInsn) { + LogInfo::MapleLogger() << ">>>>>>> In " << GetPatternName() << " : <<<<<<<\n"; + if (!prevInsns.empty()) { + if ((replacedInsn == nullptr) && (newInsn == nullptr)) { + LogInfo::MapleLogger() << "======= RemoveInsns : {\n"; + } else { + LogInfo::MapleLogger() << "======= PrevInsns : {\n"; + } + for (auto *prevInsn : prevInsns) { + if (prevInsn != nullptr) { + LogInfo::MapleLogger() << "[primal form] "; + prevInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*prevInsn); + } + } + } + LogInfo::MapleLogger() << "}\n"; + } + if (replacedInsn != nullptr) { + LogInfo::MapleLogger() << "======= OldInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + replacedInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*replacedInsn); + } + } + if (newInsn != nullptr) { + LogInfo::MapleLogger() << "======= NewInsn :\n"; + LogInfo::MapleLogger() << "[primal form] "; + newInsn->Dump(); + if (ssaInfo != nullptr) { + LogInfo::MapleLogger() << "[ssa form] "; + ssaInfo->DumpInsnInSSAForm(*newInsn); + } + } +} + +/* Check if a regOpnd is live after insn. True if live, otherwise false. */ +bool CGPeepPattern::IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) { + for (Insn *nextInsn = insn.GetNext(); nextInsn != nullptr; nextInsn = nextInsn->GetNext()) { + if (!nextInsn->IsMachineInstruction()) { + continue; + } + int32 lastOpndId = static_cast(nextInsn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = nextInsn->GetOperand(static_cast(i)); + if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr && base->IsRegister()) { + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + } else if (opnd.IsList()) { + auto &opndList = static_cast(opnd).GetOperands(); + if (find(opndList.begin(), opndList.end(), ®Opnd) != opndList.end()) { + return true; + } + } + + if (!opnd.IsRegister()) { + continue; + } + auto &tmpRegOpnd = static_cast(opnd); + if (opnd.IsRegister() && tmpRegOpnd.GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + const InsnDesc *md = nextInsn->GetDesc(); + auto *regProp = (md->opndMD[static_cast(i)]); + bool isUse = regProp->IsUse(); + /* if noUse Redefined, no need to check live-out. */ + return isUse; + } + } + /* Check if it is live-out. */ + return FindRegLiveOut(regOpnd, *insn.GetBB()); +} + +/* entrance for find if a regOpnd is live-out. */ +bool CGPeepPattern::FindRegLiveOut(const RegOperand ®Opnd, const BB &bb) { + /* + * Each time use peephole, index is initialized by the constructor, + * and the internal_flags3 should be cleared. + */ + if (PeepOptimizer::index == 0) { + FOR_ALL_BB(currbb, cgFunc) { + currbb->SetInternalFlag3(0); + } + } + /* before each invoke check function, increase index. */ + ++PeepOptimizer::index; + return CheckOpndLiveinSuccs(regOpnd, bb); +} + +/* Check regOpnd in succs/ehSuccs. True is live-out, otherwise false. */ +bool CGPeepPattern::CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const { + for (auto succ : bb.GetSuccs()) { + ASSERT(succ->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (succ->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + succ->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *succ); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *succ)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + for (auto ehSucc : bb.GetEhSuccs()) { + ASSERT(ehSucc->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (ehSucc->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + ehSucc->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *ehSucc); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *ehSucc)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + return CheckRegLiveinReturnBB(regOpnd, bb); +} + +/* Check if the reg is used in return BB */ +bool CGPeepPattern::CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const { +#if TARGAARCH64 || TARGRISCV64 + if (bb.GetKind() == BB::kBBReturn) { + regno_t regNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return false; + } + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + regno_t returnReg = R0; + if (IsPrimitiveFloat(returnType)) { + returnReg = V0; + } else if (IsPrimitiveInteger(returnType)) { + returnReg = R0; + } + if (regNO == returnReg) { + return true; + } + } +#endif + return false; +} + +/* + * Check regNO in current bb: + * kResUseFirst:first find use point; kResDefFirst:first find define point; + * kResNotFind:cannot find regNO, need to continue searching. + */ +ReturnType CGPeepPattern::IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const { + FOR_BB_INSNS_CONST(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + const InsnDesc *md = insn->GetDesc(); + int32 lastOpndId = static_cast(insn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = insn->GetOperand(static_cast(i)); + auto *regProp = (md->opndMD[static_cast(i)]); + if (opnd.IsConditionCode()) { + if (regOpnd.GetRegisterNumber() == kRFLAG) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } else if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + if (insn->GetMachineOpcode() == MOP_asm) { + if (static_cast(i) == kAsmOutputListOpnd || static_cast(i) == kAsmClobberListOpnd) { + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResDefFirst; + } + } + continue; + } else if (static_cast(i) != kAsmInputListOpnd) { + continue; + } + /* fall thru for kAsmInputListOpnd */ + } + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr) { + ASSERT(base->IsRegister(), "internal error."); + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsRegister()) { + auto &tmpRegOpnd = static_cast(opnd); + if (tmpRegOpnd.GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } + } + } + return kResNotFind; +} + +int PeepPattern::LogValueAtBase2(int64 val) const { + return (__builtin_popcountll(static_cast(val)) == 1) ? (__builtin_ffsll(val) - 1) : (-1); +} + +/* Check if a regOpnd is live after insn. True if live, otherwise false. */ +bool PeepPattern::IfOperandIsLiveAfterInsn(const RegOperand ®Opnd, Insn &insn) { + for (Insn *nextInsn = insn.GetNext(); nextInsn != nullptr; nextInsn = nextInsn->GetNext()) { + if (!nextInsn->IsMachineInstruction()) { + continue; + } + int32 lastOpndId = static_cast(nextInsn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = nextInsn->GetOperand(static_cast(i)); + if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr && base->IsRegister()) { + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return true; + } + } + } else if (opnd.IsList()) { + auto &opndList = static_cast(opnd).GetOperands(); + if (find(opndList.begin(), opndList.end(), ®Opnd) != opndList.end()) { + return true; + } + } + + if (!opnd.IsRegister()) { + continue; + } + auto &tmpRegOpnd = static_cast(opnd); + if (opnd.IsRegister() && tmpRegOpnd.GetRegisterNumber() != regOpnd.GetRegisterNumber()) { + continue; + } + const InsnDesc *md = nextInsn->GetDesc(); + auto *regProp = (md->opndMD[static_cast(i)]); + bool isUse = regProp->IsUse(); + /* if noUse Redefined, no need to check live-out. */ + return isUse; + } + } + /* Check if it is live-out. */ + return FindRegLiveOut(regOpnd, *insn.GetBB()); +} + +/* entrance for find if a regOpnd is live-out. */ +bool PeepPattern::FindRegLiveOut(const RegOperand ®Opnd, const BB &bb) { + /* + * Each time use peephole, index is initialized by the constructor, + * and the internal_flags3 should be cleared. + */ + if (PeepOptimizer::index == 0) { + FOR_ALL_BB(currbb, &cgFunc) { + currbb->SetInternalFlag3(0); + } + } + /* before each invoke check function, increase index. */ + ++PeepOptimizer::index; + return CheckOpndLiveinSuccs(regOpnd, bb); +} + +/* Check regOpnd in succs/ehSuccs. True is live-out, otherwise false. */ +bool PeepPattern::CheckOpndLiveinSuccs(const RegOperand ®Opnd, const BB &bb) const { + for (auto succ : bb.GetSuccs()) { + ASSERT(succ->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (succ->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + succ->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *succ); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *succ)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + for (auto ehSucc : bb.GetEhSuccs()) { + ASSERT(ehSucc->GetInternalFlag3() <= PeepOptimizer::index, "internal error."); + if (ehSucc->GetInternalFlag3() == PeepOptimizer::index) { + continue; + } + ehSucc->SetInternalFlag3(PeepOptimizer::index); + ReturnType result = IsOpndLiveinBB(regOpnd, *ehSucc); + if (result == kResNotFind) { + if (CheckOpndLiveinSuccs(regOpnd, *ehSucc)) { + return true; + } + continue; + } else if (result == kResUseFirst) { + return true; + } else if (result == kResDefFirst) { + continue; + } + } + return CheckRegLiveinReturnBB(regOpnd, bb); +} + +/* Check if the reg is used in return BB */ +bool PeepPattern::CheckRegLiveinReturnBB(const RegOperand ®Opnd, const BB &bb) const { +#if TARGAARCH64 || TARGRISCV64 + if (bb.GetKind() == BB::kBBReturn) { + regno_t regNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyVary) { + return false; + } + PrimType returnType = cgFunc.GetFunction().GetReturnType()->GetPrimType(); + regno_t returnReg = R0; + if (IsPrimitiveFloat(returnType)) { + returnReg = V0; + } else if (IsPrimitiveInteger(returnType)) { + returnReg = R0; + } + if (regNO == returnReg) { + return true; + } + } +#endif + return false; +} + +/* + * Check regNO in current bb: + * kResUseFirst:first find use point; kResDefFirst:first find define point; + * kResNotFind:cannot find regNO, need to continue searching. + */ +ReturnType PeepPattern::IsOpndLiveinBB(const RegOperand ®Opnd, const BB &bb) const { + FOR_BB_INSNS_CONST(insn, &bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + const InsnDesc *md = insn->GetDesc(); + int32 lastOpndId = static_cast(insn->GetOperandSize() - 1); + for (int32 i = lastOpndId; i >= 0; --i) { + Operand &opnd = insn->GetOperand(static_cast(i)); + auto *regProp = (md->opndMD[static_cast(i)]); + if (opnd.IsConditionCode()) { + if (regOpnd.GetRegisterNumber() == kRFLAG) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } else if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + if (insn->GetMachineOpcode() == MOP_asm) { + if (static_cast(i) == kAsmOutputListOpnd || static_cast(i) == kAsmClobberListOpnd) { + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResDefFirst; + } + } + continue; + } else if (static_cast(i) != kAsmInputListOpnd) { + continue; + } + /* fall thru for kAsmInputListOpnd */ + } + for (auto op : listOpnd.GetOperands()) { + if (op->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &mem = static_cast(opnd); + Operand *base = mem.GetBaseRegister(); + Operand *offset = mem.GetOffset(); + + if (base != nullptr) { + ASSERT(base->IsRegister(), "internal error."); + auto *tmpRegOpnd = static_cast(base); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + if (offset != nullptr && offset->IsRegister()) { + auto *tmpRegOpnd = static_cast(offset); + if (tmpRegOpnd->GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + return kResUseFirst; + } + } + } else if (opnd.IsRegister()) { + auto &tmpRegOpnd = static_cast(opnd); + if (tmpRegOpnd.GetRegisterNumber() == regOpnd.GetRegisterNumber()) { + bool isUse = regProp->IsUse(); + if (isUse) { + return kResUseFirst; + } + ASSERT(regProp->IsDef(), "register should be redefined."); + return kResDefFirst; + } + } + } + } + return kResNotFind; +} + +bool PeepPattern::IsMemOperandOptPattern(const Insn &insn, Insn &nextInsn) { + /* Check if base register of nextInsn and the dest operand of insn are identical. */ + auto *memOpnd = static_cast(nextInsn.GetMemOpnd()); + ASSERT(memOpnd != nullptr, "null ptr check"); + /* Only for AddrMode_B_OI addressing mode. */ + if (memOpnd->GetAddrMode() != MemOperand::kAddrModeBOi) { + return false; + } + /* Only for immediate is 0. */ + if (memOpnd->GetOffsetImmediate()->GetOffsetValue() != 0) { + return false; + } + /* Only for intact memory addressing. */ + if (!memOpnd->IsIntactIndexed()) { + return false; + } + + auto &oldBaseOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + /* Check if dest operand of insn is idential with base register of nextInsn. */ + if (memOpnd->GetBaseRegister() != &oldBaseOpnd) { + return false; + } + +#ifdef USE_32BIT_REF + if (nextInsn.IsAccessRefField() && nextInsn.GetOperand(kInsnFirstOpnd).GetSize() > k32BitSize) { + return false; + } +#endif + /* Check if x0 is used after ldr insn, and if it is in live-out. */ + if (IfOperandIsLiveAfterInsn(oldBaseOpnd, nextInsn)) { + return false; + } + return true; +} + +template +void PeepOptimizer::Run() { + auto *patterMatcher = peepOptMemPool->New(cgFunc, peepOptMemPool); + patterMatcher->InitOpts(); + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + patterMatcher->Run(*bb, *insn); + } + } +} + +int32 PeepOptimizer::index = 0; + +void PeepHoleOptimizer::Peephole0() { + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +void PeepHoleOptimizer::PeepholeOpt() { + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +void PeepHoleOptimizer::PrePeepholeOpt() { + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +void PeepHoleOptimizer::PrePeepholeOpt1() { + auto memPool = std::make_unique(memPoolCtrler, "peepholeOptObj"); + PeepOptimizer peepOptimizer(*cgFunc, memPool.get()); +#if TARGAARCH64 || TARGRISCV64 + peepOptimizer.Run(); +#endif +#if TARGARM32 + peepOptimizer.Run(); +#endif +} + +/* === SSA form === */ +bool CgPeepHole::PhaseRun(maplebe::CGFunc &f) { + CGSSAInfo *cgssaInfo = GET_ANALYSIS(CgSSAConstruct, f); + CHECK_FATAL((cgssaInfo != nullptr), "Get ssaInfo failed!"); + MemPool *mp = GetPhaseMemPool(); + auto *cgpeep = mp->New(f, mp, cgssaInfo); + CHECK_FATAL((cgpeep != nullptr), "Creat AArch64CGPeepHole failed!"); + cgpeep->Run(); + return false; +} + +void CgPeepHole::GetAnalysisDependence(AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddPreserved(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPeepHole, cgpeephole) +#endif +/* === Physical Pre Form === */ +bool CgPrePeepHole::PhaseRun(maplebe::CGFunc &f) { + MemPool *mp = GetPhaseMemPool(); + #if defined TARGAARCH64 + auto *cgpeep = mp->New(f, mp); + #elif defined TARGX86_64 + auto *cgpeep = mp->New(f, mp); + #endif + CHECK_FATAL(cgpeep != nullptr, "PeepHoleOptimizer instance create failure"); + cgpeep->Run(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPrePeepHole, cgprepeephole) + +/* === Physical Post Form === */ +bool CgPostPeepHole::PhaseRun(maplebe::CGFunc &f) { + MemPool *mp = GetPhaseMemPool(); + #if defined TARGAARCH64 + auto *cgpeep = mp->New(f, mp); + #elif defined TARGX86_64 + auto *cgpeep = mp->New(f, mp); + #endif + CHECK_FATAL(cgpeep != nullptr, "PeepHoleOptimizer instance create failure"); + cgpeep->Run(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPostPeepHole, cgpostpeephole) + +#if TARGAARCH64 +bool CgPrePeepHole0::PhaseRun(maplebe::CGFunc &f) { + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->PrePeepholeOpt(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPrePeepHole0, prepeephole) + +bool CgPrePeepHole1::PhaseRun(maplebe::CGFunc &f) { + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->PrePeepholeOpt1(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPrePeepHole1, prepeephole1) + +bool CgPeepHole0::PhaseRun(maplebe::CGFunc &f) { + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->Peephole0(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPeepHole0, peephole0) + +bool CgPeepHole1::PhaseRun(maplebe::CGFunc &f) { + auto *peep = GetPhaseMemPool()->New(&f); + CHECK_FATAL(peep != nullptr, "PeepHoleOptimizer instance create failure"); + peep->PeepholeOpt(); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPeepHole1, peephole) +#endif + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/pressure.cpp b/src/mapleall/maple_be/src/cg/pressure.cpp new file mode 100644 index 0000000000000000000000000000000000000000..65369ba5707882185658aad3d6f7271b80210807 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/pressure.cpp @@ -0,0 +1,37 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "pressure.h" +#if TARGAARCH64 +#include "aarch64_schedule.h" +#elif TARGRISCV64 +#include "riscv64_schedule.h" +#endif +#include "deps.h" + +namespace maplebe { +/* ------- RegPressure function -------- */ +int32 RegPressure::maxRegClassNum = 0; + +/* print regpressure information */ +void RegPressure::DumpRegPressure() const { + PRINT_STR_VAL("Priority: ", priority); + PRINT_STR_VAL("maxDepth: ", maxDepth); + PRINT_STR_VAL("near: ", near); + PRINT_STR_VAL("callNum: ", callNum); + + LogInfo::MapleLogger() << "\n"; +} +} /* namespace maplebe */ + diff --git a/src/mapleall/maple_be/src/cg/proepilog.cpp b/src/mapleall/maple_be/src/cg/proepilog.cpp new file mode 100644 index 0000000000000000000000000000000000000000..53e392d7e1bd30f7c03c120713f19f8944b2d10e --- /dev/null +++ b/src/mapleall/maple_be/src/cg/proepilog.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "proepilog.h" +#if TARGAARCH64 +#include "aarch64_proepilog.h" +#elif TARGRISCV64 +#include "riscv64_proepilog.h" +#endif +#if TARGARM32 +#include "arm32_proepilog.h" +#endif +#if TARGX86_64 +#include "x64_proepilog.h" +#endif +#include "cgfunc.h" +#include "cg.h" + +namespace maplebe { +using namespace maple; + +bool CgGenProEpiLog::PhaseRun(maplebe::CGFunc &f) { + GenProEpilog *genPE = nullptr; +#if TARGAARCH64 || TARGRISCV64 + genPE = GetPhaseAllocator()->New(f, *ApplyTempMemPool()); +#endif +#if TARGARM32 + genPE = GetPhaseAllocator()->New(f); +#endif +#if TARGX86_64 + genPE = GetPhaseAllocator()->New(f); +#endif + genPE->Run(); + return false; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/ra_opt.cpp b/src/mapleall/maple_be/src/cg/ra_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..72af928e06ef62b83b6e0ea3c00bf14dfb09e5d1 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/ra_opt.cpp @@ -0,0 +1,55 @@ +/* + * Copyright (c) [2021] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "cgfunc.h" +#if TARGAARCH64 +#include "aarch64_ra_opt.h" +#elif TARGRISCV64 +#include "riscv64_ra_opt.h" +#endif + +namespace maplebe { +using namespace maple; + +bool CgRaOpt::PhaseRun(maplebe::CGFunc &f) { + MemPool *memPool = GetPhaseMemPool(); + RaOpt *raOpt = nullptr; +#if TARGAARCH64 + raOpt = memPool->New(f, *memPool); +#elif || TARGRISCV64 + raOpt = memPool->New(f, *memPool); +#endif + + if (raOpt) { + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + live->ResetLiveSet(); + MaplePhase *phase = GetAnalysisInfoHook()-> + ForceRunAnalysisPhase, CGFunc>(&CgDomAnalysis::id, f); + DomAnalysis *dom = static_cast(phase)->GetResult(); + raOpt->SetDomInfo(dom); + raOpt->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + } + return false; +} +void CgRaOpt::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgRaOpt, raopt) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/reaching.cpp b/src/mapleall/maple_be/src/cg/reaching.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a2797a8aa835db48afb9355246110c8b9c2382a1 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/reaching.cpp @@ -0,0 +1,1464 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#if TARGAARCH64 +#include "aarch64_reaching.h" +#include "aarch64_isa.h" +#elif TARGRISCV64 +#include "riscv64_reaching.h" +#endif +#if TARGARM32 +#include "arm32_reaching.h" +#endif +#include "cg_option.h" +#include "cgfunc.h" +#include "cg.h" + +/* + * This phase build bb->in and bb->out infomation for stack memOperand and RegOperand. each bit in DataInfo + * represent whether the register or memory is live or not. to save storage space, the offset of stack is divided + * by 4, since the offset is a multiple 4. + * this algorithm mainly include 2 parts: + * 1. initialize each BB + * (1) insert pseudoInsns for function parameters, ehBB, and return R0/V0 + * (2) init bb->gen, bb->use, bb->out + * 2. build in and out + * (1) In[BB] = Union all of out[Parents(bb)] + * (2) Out[BB] = gen[BB] union in[BB] + * aditionally, this phase provide several commen funcfion about data flow. users can call these functions in + * optimization phase conveniently. + */ +namespace maplebe { +ReachingDefinition::ReachingDefinition(CGFunc &func, MemPool &memPool) + : AnalysisResult(&memPool), cgFunc(&func), rdAlloc(&memPool), stackMp(func.GetStackMemPool()), + pseudoInsns(rdAlloc.Adapter()), kMaxBBNum(cgFunc->NumBBs() + 1), normalBBSet(rdAlloc.Adapter()), + cleanUpBBSet(rdAlloc.Adapter()) {} + +/* check whether the opnd is stack register or not */ +bool ReachingDefinition::IsFrameReg(const Operand &opnd) const { + if (!opnd.IsRegister()) { + return false; + } + auto ® = static_cast(opnd); + return cgFunc->IsFrameReg(reg); +} + +/* intialize bb->out, bb->out only include generated DataInfo */ +void ReachingDefinition::InitOut(const BB &bb) { + if ((mode & kRDRegAnalysis) != 0) { + *regOut[bb.GetId()] = *regGen[bb.GetId()]; + } + if ((mode & kRDMemAnalysis) != 0) { + *memOut[bb.GetId()] = *memGen[bb.GetId()]; + } +} + +/* when DataInfo will not be used later, they should be cleared. */ +void ReachingDefinition::ClearDefUseInfo() { + for (auto insn : pseudoInsns) { + /* Keep return pseudo to extend the return register liveness to 'ret'. + * Backward propagation can move the return register definition far from the return. + */ +#ifndef TARGX86_64 + if (insn->GetMachineOpcode() == MOP_pseudo_ret_int || insn->GetMachineOpcode() == MOP_pseudo_ret_float) { + continue; + } +#endif + insn->GetBB()->RemoveInsn(*insn); + } + FOR_ALL_BB(bb, cgFunc) { + delete (regGen[bb->GetId()]); + regGen[bb->GetId()] = nullptr; + delete (regUse[bb->GetId()]); + regUse[bb->GetId()] = nullptr; + delete (regIn[bb->GetId()]); + regIn[bb->GetId()] = nullptr; + delete (regOut[bb->GetId()]); + regOut[bb->GetId()] = nullptr; + delete (memGen[bb->GetId()]); + memGen[bb->GetId()] = nullptr; + delete (memUse[bb->GetId()]); + memUse[bb->GetId()] = nullptr; + delete (memIn[bb->GetId()]); + memIn[bb->GetId()] = nullptr; + delete (memOut[bb->GetId()]); + memOut[bb->GetId()] = nullptr; + } + regGen.clear(); + regGen.shrink_to_fit(); + regUse.clear(); + regUse.shrink_to_fit(); + regIn.clear(); + regIn.shrink_to_fit(); + regOut.clear(); + regOut.shrink_to_fit(); + memGen.clear(); + memGen.shrink_to_fit(); + memUse.clear(); + memUse.shrink_to_fit(); + memIn.clear(); + memIn.shrink_to_fit(); + memOut.clear(); + memOut.shrink_to_fit(); + cgFunc->SetRD(nullptr); +} + +/* + * find used insns for register. + * input: + * insn: the insn in which register is defined + * regNO: the No of register + * isRegNO: this argument is used to form function overloading + * return: + * the set of used insns for register + */ +InsnSet ReachingDefinition::FindUseForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const { + InsnSet useInsnSet; + uint32 regNO = indexOrRegNO; + if (!isRegNO) { + Operand &opnd = insn.GetOperand(indexOrRegNO); + auto ®Opnd = static_cast(opnd); + regNO = regOpnd.GetRegisterNumber(); + } + /* register may be redefined in current bb */ + bool findFinish = FindRegUseBetweenInsn(regNO, insn.GetNext(), insn.GetBB()->GetLastInsn(), useInsnSet); + std::vector visitedBB(kMaxBBNum, false); + if (findFinish || !regOut[insn.GetBB()->GetId()]->TestBit(regNO)) { + if (!insn.GetBB()->GetEhSuccs().empty()) { + DFSFindUseForRegOpnd(*insn.GetBB(), regNO, visitedBB, useInsnSet, true); + } + } else { + DFSFindUseForRegOpnd(*insn.GetBB(), regNO, visitedBB, useInsnSet, false); + } + + if (!insn.GetBB()->IsCleanup() && firstCleanUpBB != nullptr) { + if (regUse[firstCleanUpBB->GetId()]->TestBit(regNO)) { + findFinish = FindRegUseBetweenInsn(regNO, firstCleanUpBB->GetFirstInsn(), + firstCleanUpBB->GetLastInsn(), useInsnSet); + if (findFinish || !regOut[firstCleanUpBB->GetId()]->TestBit(regNO)) { + return useInsnSet; + } + } + DFSFindUseForRegOpnd(*firstCleanUpBB, regNO, visitedBB, useInsnSet, false); + } + + return useInsnSet; +} + +/* + * find used insns for register iteratively. + * input: + * startBB: find used insns starting from startBB + * regNO: the No of register to be find + * visitedBB: record these visited BB + * useInsnSet: used insns of register is saved in this set + */ +void ReachingDefinition::DFSFindUseForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &useInsnSet, bool onlyFindForEhSucc = false) const { + if (!onlyFindForEhSucc) { + for (auto succBB : startBB.GetSuccs()) { + if (!regIn[succBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[succBB->GetId()]) { + continue; + } + visitedBB[succBB->GetId()] = true; + bool findFinish = false; + if (regUse[succBB->GetId()]->TestBit(regNO)) { + findFinish = FindRegUseBetweenInsn(regNO, succBB->GetFirstInsn(), succBB->GetLastInsn(), useInsnSet); + } else if (regGen[succBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[succBB->GetId()]->TestBit(regNO)) { + DFSFindUseForRegOpnd(*succBB, regNO, visitedBB, useInsnSet, false); + } + } + } + + for (auto ehSuccBB : startBB.GetEhSuccs()) { + if (!regIn[ehSuccBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[ehSuccBB->GetId()]) { + continue; + } + visitedBB[ehSuccBB->GetId()] = true; + + bool findFinish = false; + if (regUse[ehSuccBB->GetId()]->TestBit(regNO)) { + findFinish = FindRegUseBetweenInsn(regNO, ehSuccBB->GetFirstInsn(), ehSuccBB->GetLastInsn(), useInsnSet); + } else if (regGen[ehSuccBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[ehSuccBB->GetId()]->TestBit(regNO)) { + DFSFindUseForRegOpnd(*ehSuccBB, regNO, visitedBB, useInsnSet, false); + } + } +} + +/* check whether register defined in regDefInsn has used insns */ +bool ReachingDefinition::RegHasUsePoint(uint32 regNO, Insn ®DefInsn) const { + InsnSet useInsnSet; + bool findFinish = FindRegUseBetweenInsn(regNO, regDefInsn.GetNext(), regDefInsn.GetBB()->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + if (!findFinish) { + std::vector visitedBB(kMaxBBNum, false); + return RegIsUsedInOtherBB(*regDefInsn.GetBB(), regNO, visitedBB); + } + return false; +} + +/* check whether register is used in other BB except startBB */ +bool ReachingDefinition::RegIsUsedInOtherBB(const BB &startBB, uint32 regNO, std::vector &visitedBB) const { + InsnSet useInsnSet; + for (auto succBB : startBB.GetSuccs()) { + if (!regIn[succBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[succBB->GetId()]) { + continue; + } + visitedBB[succBB->GetId()] = true; + bool findFinish = false; + if (regUse[succBB->GetId()]->TestBit(regNO)) { + if (!regGen[succBB->GetId()]->TestBit(regNO)) { + return true; + } + useInsnSet.clear(); + findFinish = FindRegUseBetweenInsn(regNO, succBB->GetFirstInsn(), succBB->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + } else if (regGen[succBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[succBB->GetId()]->TestBit(regNO)) { + if (RegIsUsedInOtherBB(*succBB, regNO, visitedBB)) { + return true; + } + } + } + + for (auto ehSuccBB : startBB.GetEhSuccs()) { + if (!regIn[ehSuccBB->GetId()]->TestBit(regNO)) { + continue; + } + if (visitedBB[ehSuccBB->GetId()]) { + continue; + } + visitedBB[ehSuccBB->GetId()] = true; + + bool findFinish = false; + if (regUse[ehSuccBB->GetId()]->TestBit(regNO)) { + if (!regGen[ehSuccBB->GetId()]->TestBit(regNO)) { + return true; + } + useInsnSet.clear(); + findFinish = FindRegUseBetweenInsn(regNO, ehSuccBB->GetFirstInsn(), ehSuccBB->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + } else if (regGen[ehSuccBB->GetId()]->TestBit(regNO)) { + findFinish = true; + } + if (!findFinish && regOut[ehSuccBB->GetId()]->TestBit(regNO)) { + if (RegIsUsedInOtherBB(*ehSuccBB, regNO, visitedBB)) { + return true; + } + } + } + + return false; +} + +bool ReachingDefinition::RegIsUsedInCleanUpBB(uint32 regNO) const { + if (firstCleanUpBB == nullptr) { + return false; + } + InsnSet useInsnSet; + if (regUse[firstCleanUpBB->GetId()]->TestBit(regNO)) { + bool findFinish = FindRegUseBetweenInsn(regNO, firstCleanUpBB->GetFirstInsn(), + firstCleanUpBB->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return true; + } + if (findFinish) { + return false; + } + } + + std::vector visitedBB(kMaxBBNum, false); + DFSFindUseForRegOpnd(*firstCleanUpBB, regNO, visitedBB, useInsnSet, false); + if (useInsnSet.empty()) { + return true; + } + + return false; +} + +/* + * find used insns for stack memory operand iteratively. + * input: + * startBB: find used insns starting from startBB + * offset: the offset of memory to be find + * visitedBB: record these visited BB + * useInsnSet: used insns of stack memory operand is saved in this set + */ +void ReachingDefinition::DFSFindUseForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &useInsnSet, bool onlyFindForEhSucc = false) const { + if (!onlyFindForEhSucc) { + for (auto succBB : startBB.GetSuccs()) { + if (!memIn[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + continue; + } + if (visitedBB[succBB->GetId()]) { + continue; + } + visitedBB[succBB->GetId()] = true; + bool findFinish = false; + if (memUse[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = FindMemUseBetweenInsn(offset, succBB->GetFirstInsn(), succBB->GetLastInsn(), useInsnSet); + } else if (memGen[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = true; + } + if (!findFinish && memOut[succBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindUseForMemOpnd(*succBB, offset, visitedBB, useInsnSet); + } + } + } + + for (auto ehSuccBB : startBB.GetEhSuccs()) { + if (!memIn[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + continue; + } + if (visitedBB[ehSuccBB->GetId()]) { + continue; + } + visitedBB[ehSuccBB->GetId()] = true; + bool findFinish = false; + if (memUse[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = FindMemUseBetweenInsn(offset, ehSuccBB->GetFirstInsn(), ehSuccBB->GetLastInsn(), useInsnSet); + } else if (memGen[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + findFinish = true; + } + if (!findFinish && memOut[ehSuccBB->GetId()]->TestBit(offset / kMemZoomSize)) { + DFSFindUseForMemOpnd(*ehSuccBB, offset, visitedBB, useInsnSet); + } + } +} + +/* Out[BB] = gen[BB] union in[BB]. if bb->out changed, return true. */ +bool ReachingDefinition::GenerateOut(const BB &bb) { + bool outInfoChanged = false; + if ((mode & kRDRegAnalysis) != 0) { + LocalMapleAllocator alloc(stackMp); + DataInfo &bbRegOutBak = regOut[bb.GetId()]->Clone(alloc); + *regOut[bb.GetId()] = *(regIn[bb.GetId()]); + regOut[bb.GetId()]->OrBits(*regGen[bb.GetId()]); + if (!regOut[bb.GetId()]->IsEqual(bbRegOutBak)) { + outInfoChanged = true; + } + } + + if ((mode & kRDMemAnalysis) != 0) { + LocalMapleAllocator alloc(stackMp); + DataInfo &bbMemOutBak = memOut[bb.GetId()]->Clone(alloc); + *memOut[bb.GetId()] = *memIn[bb.GetId()]; + memOut[bb.GetId()]->OrBits(*memGen[bb.GetId()]); + if (!memOut[bb.GetId()]->IsEqual(bbMemOutBak)) { + outInfoChanged = true; + } + } + return outInfoChanged; +} + +bool ReachingDefinition::GenerateOut(const BB &bb, const std::set &infoIndex, const bool isReg) { + bool outInfoChanged = false; + if (isReg) { + for (auto index : infoIndex) { + uint64 bbRegOutBak = regOut[bb.GetId()]->GetElem(index); + regOut[bb.GetId()]->SetElem(index, regIn[bb.GetId()]->GetElem(index)); + regOut[bb.GetId()]->OrDesignateBits(*regGen[bb.GetId()], index); + if (!outInfoChanged && (bbRegOutBak != regOut[bb.GetId()]->GetElem(index))) { + outInfoChanged = true; + } + } + } else { + for (auto index : infoIndex) { + uint64 bbMemOutBak = memOut[bb.GetId()]->GetElem(index); + memOut[bb.GetId()]->SetElem(index, memIn[bb.GetId()]->GetElem(index)); + memOut[bb.GetId()]->OrDesignateBits(*memGen[bb.GetId()], index); + if (bbMemOutBak != memOut[bb.GetId()]->GetElem(index)) { + outInfoChanged = true; + } + } + } + return outInfoChanged; +} + + +/* In[BB] = Union all of out[Parents(bb)]. return true if bb->in changed. */ +bool ReachingDefinition::GenerateIn(const BB &bb) { + bool inInfoChanged = false; + if ((mode & kRDRegAnalysis) != 0) { + LocalMapleAllocator alloc(stackMp); + DataInfo &bbRegInBak = regIn[bb.GetId()]->Clone(alloc); + for (auto predBB : bb.GetPreds()) { + regIn[bb.GetId()]->OrBits(*regOut[predBB->GetId()]); + } + for (auto predEhBB : bb.GetEhPreds()) { + regIn[bb.GetId()]->OrBits(*regOut[predEhBB->GetId()]); + } + + if (!regIn[bb.GetId()]->IsEqual(bbRegInBak)) { + inInfoChanged = true; + } + } + if ((mode & kRDMemAnalysis) != 0) { + LocalMapleAllocator alloc(stackMp); + DataInfo &memInBak = memIn[bb.GetId()]->Clone(alloc); + for (auto predBB : bb.GetPreds()) { + memIn[bb.GetId()]->OrBits(*memOut[predBB->GetId()]); + } + for (auto predEhBB : bb.GetEhPreds()) { + memIn[bb.GetId()]->OrBits(*memOut[predEhBB->GetId()]); + } + + if (!memIn[bb.GetId()]->IsEqual(memInBak)) { + inInfoChanged = true; + } + } + return inInfoChanged; +} + +/* In[BB] = Union all of out[Parents(bb)]. return true if bb->in changed. */ +bool ReachingDefinition::GenerateIn(const BB &bb, const std::set &infoIndex, const bool isReg) { + bool inInfoChanged = false; + + if (isReg) { + for (auto index : infoIndex) { + uint64 bbRegInBak = regIn[bb.GetId()]->GetElem(index); + regIn[bb.GetId()]->SetElem(index, 0ULL); + for (auto predBB : bb.GetPreds()) { + regIn[bb.GetId()]->OrDesignateBits(*regOut[predBB->GetId()], index); + } + for (auto predEhBB : bb.GetEhPreds()) { + regIn[bb.GetId()]->OrDesignateBits(*regOut[predEhBB->GetId()], index); + } + + if (bbRegInBak != regIn[bb.GetId()]->GetElem(index)) { + inInfoChanged = true; + } + } + } else { + for (auto index : infoIndex) { + uint64 bbMemInBak = memIn[bb.GetId()]->GetElem(index); + memIn[bb.GetId()]->SetElem(index, 0ULL); + for (auto predBB : bb.GetPreds()) { + memIn[bb.GetId()]->OrDesignateBits(*memOut[predBB->GetId()], index); + } + for (auto predEhBB : bb.GetEhPreds()) { + memIn[bb.GetId()]->OrDesignateBits(*memOut[predEhBB->GetId()], index); + } + + if (bbMemInBak != memIn[bb.GetId()]->GetElem(index)) { + inInfoChanged = true; + } + } + } + return inInfoChanged; +} + + +/* In[firstCleanUpBB] = Union all of out[bbNormalSet] */ +bool ReachingDefinition::GenerateInForFirstCleanUpBB() { + CHECK_NULL_FATAL(firstCleanUpBB); + if ((mode & kRDRegAnalysis) != 0) { + regIn[firstCleanUpBB->GetId()]->ResetAllBit(); + } + if ((mode & kRDMemAnalysis) != 0) { + memIn[firstCleanUpBB->GetId()]->ResetAllBit(); + } + + for (auto normalBB : normalBBSet) { + if ((mode & kRDRegAnalysis) != 0) { + regIn[firstCleanUpBB->GetId()]->OrBits(*regOut[normalBB->GetId()]); + } + + if ((mode & kRDMemAnalysis) != 0) { + memIn[firstCleanUpBB->GetId()]->OrBits(*memOut[normalBB->GetId()]); + } + } + + return ((regIn[firstCleanUpBB->GetId()] != nullptr && regIn[firstCleanUpBB->GetId()]->Size() > 0) || + (memIn[firstCleanUpBB->GetId()] != nullptr && memIn[firstCleanUpBB->GetId()]->Size() > 0)); +} + +bool ReachingDefinition::GenerateInForFirstCleanUpBB(bool isReg, const std::set &infoIndex) { + CHECK_NULL_FATAL(firstCleanUpBB); + bool inInfoChanged = false; + if (isReg) { + for (auto index : infoIndex) { + uint64 regInElemBak = regIn[firstCleanUpBB->GetId()]->GetElem(index); + regIn[firstCleanUpBB->GetId()]->SetElem(index, 0ULL); + for (auto &normalBB : normalBBSet) { + regIn[firstCleanUpBB->GetId()]->OrDesignateBits(*regOut[normalBB->GetId()], index); + } + if (!inInfoChanged && (regIn[firstCleanUpBB->GetId()]->GetElem(index) != regInElemBak)) { + inInfoChanged = true; + } + } + } else { + for (auto index : infoIndex) { + uint64 memInElemBak = memIn[firstCleanUpBB->GetId()]->GetElem(index); + memIn[firstCleanUpBB->GetId()]->SetElem(index, 0ULL); + for (auto &normalBB : normalBBSet) { + memIn[firstCleanUpBB->GetId()]->OrDesignateBits(*memOut[normalBB->GetId()], index); + } + if (!inInfoChanged && (memIn[firstCleanUpBB->GetId()]->GetElem(index) != memInElemBak)) { + inInfoChanged = true; + } + } + } + return inInfoChanged; +} + +/* allocate memory for DataInfo of bb */ +void ReachingDefinition::InitRegAndMemInfo(const BB &bb) { + if ((mode & kRDRegAnalysis) != 0) { + const uint32 kMaxRegCount = cgFunc->GetMaxVReg(); + regGen[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + regUse[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + regIn[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + regOut[bb.GetId()] = new DataInfo(kMaxRegCount, rdAlloc); + } + + if ((mode & kRDMemAnalysis) != 0) { + const int32 kStackSize = GetStackSize(); + memGen[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + memUse[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + memIn[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + memOut[bb.GetId()] = new DataInfo((kStackSize / kMemZoomSize), rdAlloc); + } +} + +/* insert pseudoInsns for function parameters, ehBB, and return R0/V0. init bb->gen, bb->use, bb->out */ +void ReachingDefinition::Initialize() { + InitDataSize(); + AddRetPseudoInsns(); + FOR_ALL_BB(bb, cgFunc) { + InitRegAndMemInfo(*bb); + } + FOR_ALL_BB(bb, cgFunc) { + if (bb == cgFunc->GetFirstBB()) { + InitStartGen(); + } + if (!bb->GetEhPreds().empty()) { + InitEhDefine(*bb); + } + InitGenUse(*bb); + InitOut(*bb); + + if (bb->IsCleanup()) { + (void)cleanUpBBSet.insert(bb); + } else { + (void)normalBBSet.insert(bb); + } + } + maxInsnNO = 0; + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + insn->SetId(maxInsnNO); + maxInsnNO += kInsnNoInterval; + } + } +} + +void ReachingDefinition::InitDataSize() { + /* to visit vec[cgFunc->NumBBs()], size should be cgFunc->NumBBs() + 1 */ + const uint32 dataSize = cgFunc->NumBBs() + 1; + regIn.resize(dataSize); + regOut.resize(dataSize); + regGen.resize(dataSize); + regUse.resize(dataSize); + memIn.resize(dataSize); + memOut.resize(dataSize); + memGen.resize(dataSize); + memUse.resize(dataSize); +} + +/* compute bb->in, bb->out for each BB execpt cleanup BB */ +void ReachingDefinition::BuildInOutForFuncBody() { + std::unordered_set normalBBSetBak(normalBBSet.begin(), normalBBSet.end()); + std::unordered_set::iterator setItr; + while (!normalBBSetBak.empty()) { + setItr = normalBBSetBak.begin(); + BB *bb = *setItr; + ASSERT(bb != nullptr, "null ptr check"); + (void)normalBBSetBak.erase(setItr); + + if (GenerateIn(*bb)) { + if (GenerateOut(*bb)) { + for (auto succ : bb->GetSuccs()) { + (void)normalBBSetBak.insert(succ); + } + + for (auto ehSucc : bb->GetEhSuccs()) { + (void)normalBBSetBak.insert(ehSucc); + } + } + } + } + ASSERT(normalBBSetBak.empty(), "CG internal error."); +} + +void ReachingDefinition::BuildInOutForFuncBodyBFS() { + std::vector inQueued(kMaxBBNum, false); + std::vector firstVisited(kMaxBBNum, true); + std::queue worklist; + for (auto *bb : normalBBSet) { + worklist.push(bb); + inQueued[bb->GetId()] = true; + } + + while (!worklist.empty()) { + auto *curBB = worklist.front(); + worklist.pop(); + inQueued[curBB->GetId()] = false; + if (GenerateOut(*curBB) || firstVisited[curBB->GetId()]) { + firstVisited[curBB->GetId()] = false; + GenerateIn(*curBB, worklist, inQueued); + } + } +} + +/* In[BB] = Union all of out[Parents(bb)]. add succ to worklist, if it is changed and not in worklist */ +void ReachingDefinition::GenerateIn(const BB &bb, std::queue &worklist, std::vector &inQueued) { + if ((mode & kRDRegAnalysis) != 0) { + for (auto *succBB : bb.GetSuccs()) { + if (regIn[succBB->GetId()]->OrBitsCheck(*regOut[bb.GetId()]) && !inQueued[succBB->GetId()]) { + worklist.push(succBB); + inQueued[succBB->GetId()] = true; + } + } + for (auto succEhBB : bb.GetEhSuccs()) { + if (regIn[succEhBB->GetId()]->OrBitsCheck(*regOut[bb.GetId()]) && !inQueued[succEhBB->GetId()]) { + worklist.push(succEhBB); + inQueued[succEhBB->GetId()] = true; + } + } + } + if ((mode & kRDMemAnalysis) != 0) { + for (auto *succBB : bb.GetSuccs()) { + if (memIn[succBB->GetId()]->OrBitsCheck(*memOut[bb.GetId()]) && !inQueued[succBB->GetId()]) { + worklist.push(succBB); + inQueued[succBB->GetId()] = true; + } + } + for (auto succEhBB : bb.GetEhSuccs()) { + if (memIn[succEhBB->GetId()]->OrBitsCheck(*memOut[bb.GetId()]) && !inQueued[succEhBB->GetId()]) { + worklist.push(succEhBB); + inQueued[succEhBB->GetId()] = true; + } + } + } +} + +/* if bb->out changed, update in and out */ +void ReachingDefinition::UpdateInOut(BB &changedBB) { + InitGenUse(changedBB, false); + if (!GenerateOut(changedBB)) { + return; + } + + std::unordered_set bbSet; + std::unordered_set::iterator setItr; + + for (auto succ : changedBB.GetSuccs()) { + (void)bbSet.insert(succ); + } + + for (auto ehSucc : changedBB.GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + + while (!bbSet.empty()) { + setItr = bbSet.begin(); + BB *bb = *setItr; + ASSERT(bb != nullptr, "null ptr check"); + bbSet.erase(setItr); + + if (GenerateIn(*bb)) { + if (GenerateOut(*bb)) { + for (auto succ : bb->GetSuccs()) { + (void)bbSet.insert(succ); + } + + for (auto ehSucc : bb->GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + } + } + } + + if (!changedBB.IsCleanup() && firstCleanUpBB != nullptr) { + BuildInOutForCleanUpBB(); + } +} + +void ReachingDefinition::UpdateInOut(BB &changedBB, bool isReg) { + std::set changedInfoIndex; + if (isReg) { + LocalMapleAllocator alloc(stackMp); + DataInfo &genInfoBak = regGen[changedBB.GetId()]->Clone(alloc); + InitGenUse(changedBB, false); + genInfoBak.EorBits(*regGen[changedBB.GetId()]); + genInfoBak.GetNonZeroElemsIndex(changedInfoIndex); + } else { + LocalMapleAllocator alloc(stackMp); + DataInfo &genInfoBak = memGen[changedBB.GetId()]->Clone(alloc); + InitGenUse(changedBB, false); + genInfoBak.EorBits(*memGen[changedBB.GetId()]); + genInfoBak.GetNonZeroElemsIndex(changedInfoIndex); + } + if (changedInfoIndex.empty()) { + return; + } + if (!GenerateOut(changedBB, changedInfoIndex, isReg)) { + return; + } + std::set bbSet; + std::set::iterator setItr; + for (auto &succ : changedBB.GetSuccs()) { + (void)bbSet.insert(succ); + } + + for (auto &ehSucc : changedBB.GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + while (!bbSet.empty()) { + setItr = bbSet.begin(); + BB *bb = *setItr; + bbSet.erase(setItr); + if (GenerateIn(*bb, changedInfoIndex, isReg)) { + if (GenerateOut(*bb, changedInfoIndex, isReg)) { + for (auto &succ : bb->GetSuccs()) { + (void)bbSet.insert(succ); + } + for (auto &ehSucc : bb->GetEhSuccs()) { + (void)bbSet.insert(ehSucc); + } + } + } + } + + if (!changedBB.IsCleanup() && firstCleanUpBB != nullptr) { + BuildInOutForCleanUpBB(isReg, changedInfoIndex); + } +} + + +/* compute bb->in, bb->out for cleanup BBs */ +void ReachingDefinition::BuildInOutForCleanUpBB() { + ASSERT(firstCleanUpBB != nullptr, "firstCleanUpBB must not be nullptr"); + if (GenerateInForFirstCleanUpBB()) { + GenerateOut(*firstCleanUpBB); + } + std::unordered_set cleanupBBSetBak(cleanUpBBSet.begin(), cleanUpBBSet.end()); + std::unordered_set::iterator setItr; + + while (!cleanupBBSetBak.empty()) { + setItr = cleanupBBSetBak.begin(); + BB *bb = *setItr; + cleanupBBSetBak.erase(setItr); + if (GenerateIn(*bb)) { + if (GenerateOut(*bb)) { + for (auto succ : bb->GetSuccs()) { + (void)cleanupBBSetBak.insert(succ); + } + for (auto ehSucc : bb->GetEhSuccs()) { + (void)cleanupBBSetBak.insert(ehSucc); + } + } + } + } + ASSERT(cleanupBBSetBak.empty(), "CG internal error."); +} + +void ReachingDefinition::BuildInOutForCleanUpBB(bool isReg, const std::set &index) { + ASSERT(firstCleanUpBB != nullptr, "firstCleanUpBB must not be nullptr"); + if (GenerateInForFirstCleanUpBB(isReg, index)) { + GenerateOut(*firstCleanUpBB, index, isReg); + } + std::unordered_set cleanupBBSetBak(cleanUpBBSet.begin(), cleanUpBBSet.end()); + std::unordered_set::iterator setItr; + while (!cleanupBBSetBak.empty()) { + setItr = cleanupBBSetBak.begin(); + BB *bb = *setItr; + cleanupBBSetBak.erase(setItr); + if (GenerateIn(*bb, index, isReg)) { + if (GenerateOut(*bb, index, isReg)) { + for (auto &succ : bb->GetSuccs()) { + (void)cleanupBBSetBak.insert(succ); + } + for (auto &ehSucc : bb->GetEhSuccs()) { + (void)cleanupBBSetBak.insert(ehSucc); + } + } + } + } + ASSERT(cleanupBBSetBak.empty(), "CG internal error."); +} + +/* entry for ReachingDefinition Analysis, mode represent to analyze RegOperand, MemOperand or both of them */ +void ReachingDefinition::AnalysisStart() { + if (!cgFunc->GetFirstBB()) { + return; + } + stackSize = static_cast(GetStackSize()); + Initialize(); + /* Build in/out for function body first. (Except cleanup bb) */ + BuildInOutForFuncBodyBFS(); + /* If cleanup bb exists, build in/out for cleanup bbs. firstCleanUpBB->in = Union all non-cleanup bb's out. */ + if (firstCleanUpBB != nullptr) { + BuildInOutForCleanUpBB(); + } + cgFunc->SetRD(this); +} + +/* check whether currentBB can reach endBB according to control flow */ +bool ReachingDefinition::CanReachEndBBFromCurrentBB(const BB ¤tBB, const BB &endBB, + std::vector &traversedBBSet) const { + if (¤tBB == &endBB) { + return true; + } + for (auto predBB : endBB.GetPreds()) { + if (traversedBBSet[predBB->GetId()]) { + continue; + } + traversedBBSet[predBB->GetId()] = true; + if (predBB == ¤tBB) { + return true; + } + if (CanReachEndBBFromCurrentBB(currentBB, *predBB, traversedBBSet)) { + return true; + } + } + for (auto ehPredBB : endBB.GetEhPreds()) { + if (traversedBBSet[ehPredBB->GetId()]) { + continue; + } + traversedBBSet[ehPredBB->GetId()] = true; + if (ehPredBB == ¤tBB) { + return true; + } + if (CanReachEndBBFromCurrentBB(currentBB, *ehPredBB, traversedBBSet)) { + return true; + } + } + return false; +} + +/* check whether register may be redefined form startBB to endBB */ +bool ReachingDefinition::IsLiveInAllPathBB(uint32 regNO, const BB &startBB, const BB &endBB, + std::vector &visitedBB, bool isFirstNo) const { + for (auto succ : startBB.GetSuccs()) { + if (visitedBB[succ->GetId()]) { + continue; + } + visitedBB[succ->GetId()] = true; + if (isFirstNo && CheckRegLiveinReturnBB(regNO, *succ)) { + return false; + } + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[succ->GetId()]->TestBit(regNO)) { + canReachEndBB = CanReachEndBBFromCurrentBB(*succ, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsLiveInAllPathBB(regNO, *succ, endBB, visitedBB, isFirstNo); + if (!isLive) { + return false; + } + } + + for (auto ehSucc : startBB.GetEhSuccs()) { + if (visitedBB[ehSucc->GetId()]) { + continue; + } + visitedBB[ehSucc->GetId()] = true; + if (isFirstNo && CheckRegLiveinReturnBB(regNO, *ehSucc)) { + return false; + } + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[ehSucc->GetId()]->TestBit(regNO)) { + canReachEndBB = CanReachEndBBFromCurrentBB(*ehSucc, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsLiveInAllPathBB(regNO, *ehSucc, endBB, visitedBB, isFirstNo); + if (!isLive) { + return false; + } + } + return true; +} + +/* Check if the reg is used in return BB */ +bool ReachingDefinition::CheckRegLiveinReturnBB(uint32 regNO, const BB &bb) const { +#if TARGAARCH64 || TARGRISCV64 + if (bb.GetKind() == BB::kBBReturn) { + PrimType returnType = cgFunc->GetFunction().GetReturnType()->GetPrimType(); + regno_t returnReg = R0; + if (IsPrimitiveFloat(returnType)) { + returnReg = V0; + } else if (IsPrimitiveInteger(returnType)) { + returnReg = R0; + } + if (regNO == returnReg) { + return true; + } + } +#endif + return false; +} + +bool ReachingDefinition::RegIsUsedIncaller(uint32 regNO, Insn &startInsn, Insn &endInsn) const { + if (startInsn.GetBB() != endInsn.GetBB()) { + return false; + } + if (startInsn.GetNext() == &endInsn || &startInsn == &endInsn) { + return false; + } + auto regDefVec = FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()); + if (!regDefVec.empty()) { + return false; + } + if (IsCallerSavedReg(regNO) && startInsn.GetNext() != nullptr && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *(startInsn.GetBB()->GetLastInsn()), regNO)) { + return true; + } + if (CheckRegLiveinReturnBB(regNO, *startInsn.GetBB())) { + return true; + } + return false; +} + +/* check whether control flow can reach endInsn from startInsn */ +bool ReachingDefinition::RegIsLiveBetweenInsn(uint32 regNO, Insn &startInsn, Insn &endInsn, bool isBack, + bool isFirstNo) const { + ASSERT(&startInsn != &endInsn, "startInsn is not equal to endInsn"); + if (startInsn.GetBB() == endInsn.GetBB()) { + /* register is difined more than once */ + if (startInsn.GetId() > endInsn.GetId()) { + if (!isBack) { + return false; + } else { + return true; + } + } + if (startInsn.GetNext() == &endInsn) { + return true; + } + if (regGen[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + std::vector regDefVec; + if (isBack) { + regDefVec = FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()); + } else { + regDefVec = FindRegDefBetweenInsn(regNO, &startInsn, endInsn.GetPrev()); + } + if (!regDefVec.empty()) { + return false; + } + } + if (IsCallerSavedReg(regNO) && KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *endInsn.GetPrev(), regNO)) { + return false; + } + return true; + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && + regGen[startInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, startInsn.GetNext(), startInsn.GetBB()->GetLastInsn()).empty()) { + return false; + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && + IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *startInsn.GetBB()->GetLastInsn(), regNO)) { + return false; + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && + regGen[endInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, endInsn.GetBB()->GetFirstInsn(), endInsn.GetPrev()).empty()) { + return false; + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && + IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*endInsn.GetBB()->GetFirstInsn(), *endInsn.GetPrev(), regNO)) { + return false; + } + + std::vector visitedBB(kMaxBBNum, false); + return IsLiveInAllPathBB(regNO, *startInsn.GetBB(), *endInsn.GetBB(), visitedBB, isFirstNo); +} + +static bool SetDefInsnVecForAsm(Insn *insn, uint32 index, uint32 regNO, std::vector &defInsnVec) { + for (auto reg : static_cast(insn->GetOperand(index)).GetOperands()) { + if (static_cast(reg)->GetRegisterNumber() == regNO) { + defInsnVec.emplace_back(insn); + return true; + } + } + return false; +} + +std::vector ReachingDefinition::FindRegDefBetweenInsn( + uint32 regNO, Insn *startInsn, Insn *endInsn, bool findAll, bool analysisDone) const { + std::vector defInsnVec; + if (startInsn == nullptr || endInsn == nullptr) { + return defInsnVec; + } + + ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + if (analysisDone && !regGen[startInsn->GetBB()->GetId()]->TestBit(regNO)) { + return defInsnVec; + } + + for (Insn *insn = endInsn; insn != nullptr && insn != startInsn->GetPrev(); insn = insn->GetPrev()) { + if (!insn->IsMachineInstruction()) { + continue; + } + + if (insn->IsAsmInsn()) { + if (SetDefInsnVecForAsm(insn, kAsmOutputListOpnd, regNO, defInsnVec) || + SetDefInsnVecForAsm(insn, kAsmClobberListOpnd, regNO, defInsnVec)) { + if (findAll) { + defInsnVec.emplace_back(insn); + } else { + return defInsnVec; + } + } + } + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + defInsnVec.emplace_back(insn); + if (!findAll) { + return defInsnVec; + } + } + if (insn->IsRegDefined(regNO)) { + defInsnVec.emplace_back(insn); + if (!findAll) { + return defInsnVec; + } + } + } + return defInsnVec; +} + +bool ReachingDefinition::RegIsUsedOrDefBetweenInsn(uint32 regNO, Insn &startInsn, Insn &endInsn) const { + ASSERT(&startInsn != &endInsn, "startInsn is not equal to endInsn"); + if (startInsn.GetBB() == endInsn.GetBB()) { + /* register is difined more than once */ + if (startInsn.GetId() > endInsn.GetId()) { + return false; + } + if (startInsn.GetNext() == &endInsn) { + return true; + } + if (regGen[startInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()).empty()) { + return false; + } + if (regUse[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + if (IsCallerSavedReg(regNO) && KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *endInsn.GetPrev(), regNO)) { + return false; + } + return true; + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && + regGen[startInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, startInsn.GetNext(), startInsn.GetBB()->GetLastInsn()).empty()) { + return false; + } + + if (regUse[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, startInsn.GetNext(), startInsn.GetBB()->GetLastInsn(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + + if (&startInsn != startInsn.GetBB()->GetLastInsn() && + IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*startInsn.GetNext(), *startInsn.GetBB()->GetLastInsn(), regNO)) { + return false; + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && + regGen[endInsn.GetBB()->GetId()]->TestBit(regNO) && + !FindRegDefBetweenInsn(regNO, endInsn.GetBB()->GetFirstInsn(), endInsn.GetPrev()).empty()) { + return false; + } + + if (regUse[startInsn.GetBB()->GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, endInsn.GetBB()->GetFirstInsn(), endInsn.GetPrev(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + + if (&endInsn != endInsn.GetBB()->GetFirstInsn() && + IsCallerSavedReg(regNO) && + KilledByCallBetweenInsnInSameBB(*endInsn.GetBB()->GetFirstInsn(), *endInsn.GetPrev(), regNO)) { + return false; + } + + std::vector visitedBB(kMaxBBNum, false); + return IsUseOrDefInAllPathBB(regNO, *startInsn.GetBB(), *endInsn.GetBB(), visitedBB); +} + +/* check whether register may be redefined form in the same BB */ +bool ReachingDefinition::IsUseOrDefBetweenInsn(uint32 regNO, const BB &curBB, + const Insn &startInsn, Insn &endInsn) const { + if (regGen[curBB.GetId()]->TestBit(regNO)) { + if (!FindRegDefBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev()).empty()) { + return false; + } + } + if (regUse[curBB.GetId()]->TestBit(regNO)) { + InsnSet useInsnSet; + FindRegUseBetweenInsn(regNO, startInsn.GetNext(), endInsn.GetPrev(), useInsnSet); + if (!useInsnSet.empty()) { + return false; + } + } + return true; +} + +/* check whether register may be redefined form startBB to endBB */ +bool ReachingDefinition::IsUseOrDefInAllPathBB(uint32 regNO, const BB &startBB, const BB &endBB, + std::vector &visitedBB) const { + for (auto succ : startBB.GetSuccs()) { + if (visitedBB[succ->GetId()] || succ == &endBB) { + continue; + } + visitedBB[succ->GetId()] = true; + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[succ->GetId()]->TestBit(regNO) || regUse[succ->GetId()]->TestBit(regNO) || + (succ->HasCall() && IsCallerSavedReg(regNO))) { + canReachEndBB = CanReachEndBBFromCurrentBB(*succ, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsUseOrDefInAllPathBB(regNO, *succ, endBB, visitedBB); + if (!isLive) { + return false; + } + } + + for (auto ehSucc : startBB.GetEhSuccs()) { + if (visitedBB[ehSucc->GetId()]) { + continue; + } + visitedBB[ehSucc->GetId()] = true; + std::vector traversedPathSet(kMaxBBNum, false); + bool canReachEndBB = true; + if (regGen[ehSucc->GetId()]->TestBit(regNO) || regUse[ehSucc->GetId()]->TestBit(regNO)) { + canReachEndBB = CanReachEndBBFromCurrentBB(*ehSucc, endBB, traversedPathSet); + if (canReachEndBB) { + return false; + } + } + if (!canReachEndBB) { + continue; + } + bool isLive = IsUseOrDefInAllPathBB(regNO, *ehSucc, endBB, visitedBB); + if (!isLive) { + return false; + } + } + return true; +} + +bool ReachingDefinition::HasCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn) const { + ASSERT(startInsn.GetBB() == endInsn.GetBB(), "two insns must be in same bb"); + for (const Insn *insn = &startInsn; insn != endInsn.GetNext(); insn = insn->GetNext()) { + if (insn->IsMachineInstruction() && insn->IsCall()) { + return true; + } + } + return false; +} + +/* operand is only defined in startBB, and only used in endBB. + * so traverse from endBB to startBB, all paths reach startBB finally. + * startBB and endBB are different, and call insns in both of them are not counted. + * whether startBB and endBB are in a loop is not counted. + */ +bool ReachingDefinition::HasCallInPath(const BB &startBB, const BB &endBB, std::vector &visitedBB) const { + ASSERT(&startBB != &endBB, "startBB and endBB are not counted"); + std::queue bbQueue; + bbQueue.push(&endBB); + visitedBB[endBB.GetId()] = true; + while (!bbQueue.empty()) { + const BB *bb = bbQueue.front(); + bbQueue.pop(); + for (auto predBB : bb->GetPreds()) { + if (predBB == &startBB || visitedBB[predBB->GetId()]) { + continue; + } + if (predBB->HasCall()) { + return true; + } + visitedBB[predBB->GetId()] = true; + bbQueue.push(predBB); + } + for (auto ehPredBB : bb->GetEhPreds()) { + if (ehPredBB == &startBB || visitedBB[ehPredBB->GetId()]) { + continue; + } + if (ehPredBB->HasCall()) { + return true; + } + visitedBB[ehPredBB->GetId()] = true; + bbQueue.push(ehPredBB); + } + } + return false; +} + +/* because of time cost, this function is not precise, BB in loop is not counted */ +bool ReachingDefinition::HasCallBetweenDefUse(const Insn &defInsn, const Insn &useInsn) const { + if (defInsn.GetBB()->GetId() == useInsn.GetBB()->GetId()) { + if (&useInsn == defInsn.GetNext()) { + return false; + } + if (useInsn.GetId() > defInsn.GetId()) { + return HasCallBetweenInsnInSameBB(defInsn, *useInsn.GetPrev()); + } + /* useInsn is in front of defInsn, we think there is call insn between them conservatively */ + return true; + } + /* check defInsn->GetBB() */ + if (&defInsn != defInsn.GetBB()->GetLastInsn() && + defInsn.GetBB()->HasCall() && + HasCallBetweenInsnInSameBB(*defInsn.GetNext(), *defInsn.GetBB()->GetLastInsn())) { + return true; + } + /* check useInsn->GetBB() */ + if (&useInsn != useInsn.GetBB()->GetFirstInsn() && + useInsn.GetBB()->HasCall() && + HasCallBetweenInsnInSameBB(*useInsn.GetBB()->GetFirstInsn(), *useInsn.GetPrev())) { + return true; + } + std::vector visitedBB(kMaxBBNum, false); + return HasCallInPath(*defInsn.GetBB(), *useInsn.GetBB(), visitedBB); +} + +void ReachingDefinition::EnlargeRegCapacity(uint32 size) { + FOR_ALL_BB(bb, cgFunc) { + regIn[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + regOut[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + regGen[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + regUse[bb->GetId()]->EnlargeCapacityToAdaptSize(size); + } +} + +void ReachingDefinition::DumpInfo(const BB &bb, DumpType flag) const { + const DataInfo *info = nullptr; + switch (flag) { + case kDumpRegGen: + LogInfo::MapleLogger() << " regGen:\n"; + info = regGen[bb.GetId()]; + break; + case kDumpRegUse: + LogInfo::MapleLogger() << " regUse:\n"; + info = regUse[bb.GetId()]; + break; + case kDumpRegIn: + LogInfo::MapleLogger() << " regIn:\n"; + info = regIn[bb.GetId()]; + break; + case kDumpRegOut: + LogInfo::MapleLogger() << " regOut:\n"; + info = regOut[bb.GetId()]; + break; + case kDumpMemGen: + LogInfo::MapleLogger() << " memGen:\n"; + info = memGen[bb.GetId()]; + break; + case kDumpMemIn: + LogInfo::MapleLogger() << " memIn:\n"; + info = memIn[bb.GetId()]; + break; + case kDumpMemOut: + LogInfo::MapleLogger() << " memOut:\n"; + info = memOut[bb.GetId()]; + break; + case kDumpMemUse: + LogInfo::MapleLogger() << " memUse:\n"; + info = memUse[bb.GetId()]; + break; + default: + return; + } + ASSERT(info != nullptr, "null ptr check"); + uint32 count = 1; + LogInfo::MapleLogger() << " "; + for (uint32 i = 0; i != info->Size(); ++i) { + if (info->TestBit(i)) { + count += 1; + if (kDumpMemGen <= flag && flag <= kDumpMemUse) { + /* Each element i means a 4 byte stack slot. */ + LogInfo::MapleLogger() << (i * 4) << " "; + } else { + LogInfo::MapleLogger() << i << " "; + } + /* 10 output per line */ + if (count % 10 == 0) { + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << " "; + } + } + } + + LogInfo::MapleLogger() << "\n"; +} + +void ReachingDefinition::DumpBBCGIR(const BB &bb) const { + if (bb.IsCleanup()) { + LogInfo::MapleLogger() << "[is_cleanup] "; + } + if (bb.IsUnreachable()) { + LogInfo::MapleLogger() << "[unreachable] "; + } + if (bb.GetSuccs().size() != 0) { + LogInfo::MapleLogger() << " succs: "; + for (auto *succBB : bb.GetSuccs()) { + LogInfo::MapleLogger() << succBB->GetId() << " "; + } + } + if (bb.GetEhSuccs().size() != 0) { + LogInfo::MapleLogger() << " eh_succs: "; + for (auto *ehSuccBB : bb.GetEhSuccs()) { + LogInfo::MapleLogger() << ehSuccBB->GetId() << " "; + } + } + LogInfo::MapleLogger() << "\n"; + + FOR_BB_INSNS_CONST(insn, &bb) { + LogInfo::MapleLogger() << " "; + insn->Dump(); + } + LogInfo::MapleLogger() << "\n"; +} + +void ReachingDefinition::Dump(uint32 flag) const { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(cgFunc->GetFunction().GetStIdx().Idx()); + ASSERT(mirSymbol != nullptr, "get symbol in function failed in ReachingDefinition::Dump"); + LogInfo::MapleLogger() << "\n---- Reaching definition analysis for " << mirSymbol->GetName(); + LogInfo::MapleLogger() << " ----\n"; + FOR_ALL_BB(bb, cgFunc) { + LogInfo::MapleLogger() << " === BB_" << bb->GetId() << " ===\n"; + + if ((flag & kDumpBBCGIR) != 0) { + DumpBBCGIR(*bb); + } + + if ((flag & kDumpRegIn) != 0) { + DumpInfo(*bb, kDumpRegIn); + } + + if ((flag & kDumpRegUse) != 0) { + DumpInfo(*bb, kDumpRegUse); + } + + if ((flag & kDumpRegGen) != 0) { + DumpInfo(*bb, kDumpRegGen); + } + + if ((flag & kDumpRegOut) != 0) { + DumpInfo(*bb, kDumpRegOut); + } + + if ((flag & kDumpMemIn) != 0) { + DumpInfo(*bb, kDumpMemIn); + } + + if ((flag & kDumpMemGen) != 0) { + DumpInfo(*bb, kDumpMemGen); + } + + if ((flag & kDumpMemOut) != 0) { + DumpInfo(*bb, kDumpMemOut); + } + + if ((flag & kDumpMemUse) != 0) { + DumpInfo(*bb, kDumpMemUse); + } + } + LogInfo::MapleLogger() << "------------------------------------------------------\n"; +} + +bool CgReachingDefinition::PhaseRun(maplebe::CGFunc &f) { +#if TARGAARCH64 || TARGRISCV64 + reachingDef = GetPhaseAllocator()->New(f, *GetPhaseMemPool()); +#endif +#if TARGARM32 + reachingDef = GetPhaseAllocator()->New(f, *GetPhaseMemPool()); +#endif + reachingDef->SetAnalysisMode(kRDAllAnalysis); + reachingDef->AnalysisStart(); + return false; +} +MAPLE_ANALYSIS_PHASE_REGISTER(CgReachingDefinition, reachingdefinition) + +bool CgClearRDInfo::PhaseRun(maplebe::CGFunc &f) { + if (f.GetRDStatus()) { + f.GetRD()->ClearDefUseInfo(); + } + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgClearRDInfo, clearrdinfo) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/reg_alloc.cpp b/src/mapleall/maple_be/src/cg/reg_alloc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..04442defd2703e2006aa53484f661da07f1d2d8e --- /dev/null +++ b/src/mapleall/maple_be/src/cg/reg_alloc.cpp @@ -0,0 +1,178 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "reg_alloc.h" +#include "live.h" +#include "loop.h" +#include "cg_dominance.h" +#include "mir_lower.h" +#include "securec.h" +#include "reg_alloc_basic.h" +#include "reg_alloc_lsra.h" +#include "cg.h" +#if TARGAARCH64 +#include "aarch64_color_ra.h" +#endif + +namespace maplebe { + +#ifdef RA_PERF_ANALYSIS +static long loopAnalysisUS = 0; +static long liveAnalysisUS = 0; +static long createRAUS = 0; +static long raUS = 0; +static long cleanupUS = 0; +static long totalUS = 0; +extern void printRATime() { + std::cout << "============================================================\n"; + std::cout << " RA sub-phase time information \n"; + std::cout << "============================================================\n"; + std::cout << "loop analysis cost: " << loopAnalysisUS << "us \n"; + std::cout << "live analysis cost: " << liveAnalysisUS << "us \n"; + std::cout << "create RA cost: " << createRAUS << "us \n"; + std::cout << "doRA cost: " << raUS << "us \n"; + std::cout << "cleanup cost: " << cleanupUS << "us \n"; + std::cout << "RA total cost: " << totalUS << "us \n"; + std::cout << "============================================================\n"; +} +#endif + +bool CgRegAlloc::PhaseRun(maplebe::CGFunc &f) { + bool success = false; + +#ifdef RA_PERF_ANALYSIS + auto begin = std::chrono::system_clock::now(); +#endif + + /* loop Analysis */ +#ifdef RA_PERF_ANALYSIS + auto start = std::chrono::system_clock::now(); +#endif + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + } + +#if TARGAARCH64 + /* dom Analysis */ + DomAnalysis *dom = nullptr; + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0 && + f.GetCG()->GetCGOptions().DoColoringBasedRegisterAllocation()) { + MaplePhase *it = GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>( + &CgDomAnalysis::id, f); + dom = static_cast(it)->GetResult(); + CHECK_FATAL(dom != nullptr, "null ptr check"); + } +#endif + +#ifdef RA_PERF_ANALYSIS + auto end = std::chrono::system_clock::now(); + loopAnalysisUS += std::chrono::duration_cast(end - start).count(); +#endif + + while (success == false) { + MemPool *phaseMp = GetPhaseMemPool(); +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + /* live analysis */ + LiveAnalysis *live = nullptr; + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + MaplePhase *it = GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>( + &CgLiveAnalysis::id, f); + live = static_cast(it)->GetResult(); + CHECK_FATAL(live != nullptr, "null ptr check"); + /* revert liveanalysis result container. */ + live->ResetLiveSet(); + } +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + liveAnalysisUS += std::chrono::duration_cast(end - start).count(); +#endif + +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + /* create register allocator */ + RegAllocator *regAllocator = nullptr; + MemPool *tempMP = nullptr; + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + regAllocator = phaseMp->New(f, *phaseMp); + } else if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevelLiteCG) { +#if TARGX86_64 + regAllocator = phaseMp->New(f, *phaseMp); +#endif +#if TARGAARCH64 + maple::LogInfo::MapleLogger(kLlErr) << "Error: -LiteCG option is unsupported for aarch64.\n"; +#endif + } else { +#if TARGAARCH64 + if (f.GetCG()->GetCGOptions().DoLinearScanRegisterAllocation()) { + regAllocator = phaseMp->New(f, *phaseMp); + } else if (f.GetCG()->GetCGOptions().DoColoringBasedRegisterAllocation()) { + tempMP = memPoolCtrler.NewMemPool("colrRA", true); + regAllocator = phaseMp->New(f, *tempMP, *dom); + } else { + maple::LogInfo::MapleLogger(kLlErr) << + "Warning: We only support Linear Scan and GraphColor register allocation\n"; + } +#endif +#if TARGX86_64 + maple::LogInfo::MapleLogger(kLlErr) << + "Error: We only support -O0, and -LiteCG for x64.\n"; +#endif + } +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + createRAUS += std::chrono::duration_cast(end - start).count(); +#endif + +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + /* do register allocation */ + CHECK_FATAL(regAllocator != nullptr, "regAllocator is null in CgDoRegAlloc::Run"); + f.SetIsAfterRegAlloc(); + success = regAllocator->AllocateRegisters(); +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + raUS += std::chrono::duration_cast(end - start).count(); +#endif + +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLiveAnalysis::id); + } +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + cleanupUS += std::chrono::duration_cast(end - start).count(); +#endif + memPoolCtrler.DeleteMemPool(tempMP); + } + if (Globals::GetInstance()->GetOptimLevel() > CGOptions::kLevel0) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgLoopAnalysis::id); + } + +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + totalUS += std::chrono::duration_cast(end - begin).count(); +#endif + return false; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/reg_alloc_basic.cpp b/src/mapleall/maple_be/src/cg/reg_alloc_basic.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4c413d49c6c7a9040f3ac6a3eaac0e0e2e1eb8e2 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/reg_alloc_basic.cpp @@ -0,0 +1,453 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "reg_alloc_basic.h" +#include "cg.h" + +namespace maplebe { +/* +* NB. As an optimization we can use X8 as a scratch (temporary) +* register if the return value is not returned through memory. +*/ +Operand *DefaultO0RegAllocator::HandleRegOpnd(Operand &opnd) { + ASSERT(opnd.IsRegister(), "Operand should be register operand"); + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsOfCC()) { + return &opnd; + } + if (!regInfo->IsVirtualRegister(regOpnd)) { + availRegSet[regOpnd.GetRegisterNumber()] = false; + (void)liveReg.insert(regOpnd.GetRegisterNumber()); + return ®Opnd; + } + auto regMapIt = regMap.find(regOpnd.GetRegisterNumber()); + if (regMapIt != regMap.end()) { /* already allocated this register */ + ASSERT(regMapIt->second < regInfo->GetAllRegNum(), "must be a physical register"); + regno_t newRegNO = regMapIt->second; + availRegSet[newRegNO] = false; /* make sure the real register can not be allocated and live */ + (void)liveReg.insert(newRegNO); + (void)allocatedSet.insert(&opnd); + return &cgFunc->GetOpndBuilder()->CreatePReg(newRegNO, regOpnd.GetSize(), regOpnd.GetRegisterType()); + } + if (AllocatePhysicalRegister(regOpnd)) { + (void)allocatedSet.insert(&opnd); + auto regMapItSecond = regMap.find(regOpnd.GetRegisterNumber()); + ASSERT(regMapItSecond != regMap.end(), " ERROR: can not find register number in regmap "); + return &cgFunc->GetOpndBuilder()->CreatePReg(regMapItSecond->second, regOpnd.GetSize(), regOpnd.GetRegisterType()); + } + + /* use 0 register as spill register */ + regno_t regNO = 0; + return &cgFunc->GetOpndBuilder()->CreatePReg(regNO, regOpnd.GetSize(), regOpnd.GetRegisterType()); +} + +Operand *DefaultO0RegAllocator::HandleMemOpnd(Operand &opnd) { + ASSERT(opnd.IsMemoryAccessOperand(), "Operand should be memory access operand"); + auto *memOpnd = static_cast(&opnd); + Operand *res = nullptr; + if (memOpnd->GetBaseRegister() != nullptr) { + res = AllocSrcOpnd(*memOpnd->GetBaseRegister()); + memOpnd->SetBaseRegister(static_cast(*res)); + } + if (memOpnd->GetIndexRegister() != nullptr) { + res = AllocSrcOpnd(*memOpnd->GetIndexRegister()); + memOpnd->SetIndexRegister(static_cast(*res)); + } + (void)allocatedSet.insert(&opnd); + return memOpnd; +} + +Operand *DefaultO0RegAllocator::AllocSrcOpnd(Operand &opnd) { + if (opnd.IsRegister()) { + if (regInfo->IsUnconcernedReg(static_cast(opnd))) { + return &opnd; + } + return HandleRegOpnd(opnd); + } else if (opnd.IsMemoryAccessOperand()) { + return HandleMemOpnd(opnd); + } + ASSERT(false, "NYI"); + return nullptr; +} + +Operand *DefaultO0RegAllocator::AllocDestOpnd(Operand &opnd, const Insn &insn) { + if (!opnd.IsRegister()) { + ASSERT(false, "result operand must be of type register"); + return nullptr; + } + auto ®Opnd = static_cast(opnd); + if (regInfo->IsUnconcernedReg(static_cast(opnd))) { + return &opnd; + } + if (!regInfo->IsVirtualRegister(regOpnd)) { + auto reg = regOpnd.GetRegisterNumber(); + availRegSet[reg] = true; + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(reg); + } + return &opnd; + } + + auto regMapIt = regMap.find(regOpnd.GetRegisterNumber()); + if (regMapIt != regMap.end()) { + regno_t reg = regMapIt->second; + if (!insn.IsCondDef()) { + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(reg); + } + } + } else { + /* AllocatePhysicalRegister insert a mapping from vreg no to phy reg no into regMap */ + if (AllocatePhysicalRegister(regOpnd)) { + regMapIt = regMap.find(regOpnd.GetRegisterNumber()); + if (!insn.IsCondDef()) { + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id && (id <= insn.GetId())) { + ReleaseReg(regMapIt->second); + } + } + } else { + /* For register spill. use 0 register as spill register */ + regno_t regNO = 0; + return &cgFunc->GetOpndBuilder()->CreatePReg(regNO, regOpnd.GetSize(), regOpnd.GetRegisterType()); + } + } + (void)allocatedSet.insert(&opnd); + return &cgFunc->GetOpndBuilder()->CreatePReg(regMapIt->second, regOpnd.GetSize(), regOpnd.GetRegisterType()); +} + +void DefaultO0RegAllocator::InitAvailReg() { + for (auto it : regInfo->GetAllRegs()) { + availRegSet[it] = true; + } +} + +void DefaultO0RegAllocator::ReleaseReg(const RegOperand ®Opnd) { + ReleaseReg(regMap[regOpnd.GetRegisterNumber()]); +} + +void DefaultO0RegAllocator::ReleaseReg(regno_t reg) { + ASSERT(reg < regInfo->GetAllRegNum(), "can't release virtual register"); + liveReg.erase(reg); + /* Special registers can not be allocated */ + if (!regInfo->IsSpecialReg(reg)) { + availRegSet[reg] = true; + } +} + +/* trying to allocate a physical register to opnd. return true if success */ +bool DefaultO0RegAllocator::AllocatePhysicalRegister(const RegOperand &opnd) { + RegType regType = opnd.GetRegisterType(); + regno_t regNo = opnd.GetRegisterNumber(); + auto ®Bank = regInfo->GetRegsFromType(regType); + regno_t regStart = *regBank.begin(); + regno_t regEnd = *regBank.rbegin(); + + const auto opndRegIt = regLiveness.find(regNo); + for (regno_t reg = regStart; reg <= regEnd; ++reg) { + if (!availRegSet[reg]) { + continue; + } + + if (opndRegIt != regLiveness.end()) { + const auto regIt = regLiveness.find(reg); + ASSERT(opndRegIt->second.size() == 1, "NIY, opnd reg liveness range must be 1."); + if (regIt != regLiveness.end() && + CheckRangesOverlap(opndRegIt->second.front(), regIt->second)) { + continue; + } + } + + regMap[opnd.GetRegisterNumber()] = reg; + availRegSet[reg] = false; + (void)liveReg.insert(reg); /* this register is live now */ + return true; + } + return false; +} + +/* If opnd is a callee saved register, save it in the prolog and restore it in the epilog */ +void DefaultO0RegAllocator::SaveCalleeSavedReg(const RegOperand ®Opnd) { + regno_t regNO = regOpnd.GetRegisterNumber(); + auto phyReg = regInfo->IsVirtualRegister(regOpnd) ? regMap[regNO] : regNO; + /* when yieldpoint is enabled, skip the reserved register for yieldpoint. */ + if (cgFunc->GetCG()->GenYieldPoint() && (regInfo->IsYieldPointReg(phyReg))) { + return; + } + + if (regInfo->IsCalleeSavedReg(phyReg)) { + calleeSaveUsed.insert(phyReg); + } +} + +uint32 DefaultO0RegAllocator::GetRegLivenessId(regno_t regNo) { + auto regIt = regLiveness.find(regNo); + return ((regIt == regLiveness.end()) ? 0 : regIt->second.back().second); +} + +bool DefaultO0RegAllocator::CheckRangesOverlap(const std::pair &range1, + const MapleVector> &ranges2) const { + /* + * Check whether range1 and ranges2 overlap. + * The ranges2 is sorted. + */ + auto pos = std::lower_bound(ranges2.begin(), ranges2.end(), range1, + [](const std::pair &r2, const std::pair &r1) { + return r1.first >= r2.second; + }); + if (pos == ranges2.end()) { + return false; + } + auto &range2 = *pos; + if (std::max(range1.first, range2.first) <= std::min(range1.second, range2.second)) { + return true; + } + return false; +} + +void DefaultO0RegAllocator::SetupRegLiveness(BB *bb) { + regLiveness.clear(); + + uint32 id = 1; + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + insn->SetId(id); + id++; + uint32 opndNum = insn->GetOperandSize(); + const InsnDesc *curMd = insn->GetDesc(); + for (uint32 i = 0; i < opndNum; i++) { + Operand &opnd = insn->GetOperand(i); + const OpndDesc *opndDesc = curMd->GetOpndDes(i); + if (opnd.IsRegister()) { + /* def-use is processed by use */ + SetupRegLiveness(static_cast(opnd), insn->GetId(), !opndDesc->IsUse()); + } else if (opnd.IsMemoryAccessOperand()) { + SetupRegLiveness(static_cast(opnd), insn->GetId()); + } else if (opnd.IsList()) { + SetupRegLiveness(static_cast(opnd), insn->GetId(), opndDesc->IsDef()); + } + } + } + + /* clear the last empty range */ + for (auto ®LivenessIt : regLiveness) { + auto ®LivenessRanges = regLivenessIt.second; + if (regLivenessRanges.back().first == 0) { + regLivenessRanges.pop_back(); + } + } +} + +void DefaultO0RegAllocator::SetupRegLiveness(MemOperand &opnd, uint32 insnId) { + /* base regOpnd is use in O0 */ + if (opnd.GetBaseRegister()) { + SetupRegLiveness(*opnd.GetBaseRegister(), insnId, false); + } + /* index regOpnd must be use */ + if (opnd.GetIndexRegister()) { + SetupRegLiveness(*opnd.GetIndexRegister(), insnId, false); + } +} + +void DefaultO0RegAllocator::SetupRegLiveness(ListOperand &opnd, uint32 insnId, bool isDef) { + for (RegOperand *regOpnd : opnd.GetOperands()) { + SetupRegLiveness(*regOpnd, insnId, isDef); + } +} + +void DefaultO0RegAllocator::SetupRegLiveness(RegOperand &opnd, uint32 insnId, bool isDef) { + MapleVector> ranges(alloc.Adapter()); + auto regLivenessIt = regLiveness.emplace(opnd.GetRegisterNumber(), ranges).first; + auto ®LivenessRanges = regLivenessIt->second; + if (regLivenessRanges.empty()) { + regLivenessRanges.push_back(std::make_pair(0, 0)); + } + auto ®LivenessLastRange = regLivenessRanges.back(); + if (regLivenessLastRange.first == 0) { + regLivenessLastRange.first = insnId; + } + regLivenessLastRange.second = insnId; + + /* create new range, only phyReg need to be segmented */ + if (isDef && regInfo->IsAvailableReg(opnd.GetRegisterNumber())) { + regLivenessRanges.push_back(std::make_pair(0, 0)); + } +} + +void DefaultO0RegAllocator::AllocHandleDestList(Insn &insn, Operand &opnd, uint32 idx) { + if (!opnd.IsList()) { + return; + } + auto *listOpnds = &static_cast(opnd); + auto *listOpndsNew = &cgFunc->GetOpndBuilder()->CreateList(); + for (auto *dstOpnd : listOpnds->GetOperands()) { + if (allocatedSet.find(dstOpnd) != allocatedSet.end()) { + auto ®Opnd = static_cast(*dstOpnd); + SaveCalleeSavedReg(regOpnd); + listOpndsNew->PushOpnd( + cgFunc->GetOpndBuilder()->CreatePReg( + regMap[regOpnd.GetRegisterNumber()], regOpnd.GetSize(), regOpnd.GetRegisterType())); + continue; /* already allocated */ + } + RegOperand *regOpnd = static_cast(AllocDestOpnd(*dstOpnd, insn)); + ASSERT(regOpnd != nullptr, "null ptr check"); + auto physRegno = regOpnd->GetRegisterNumber(); + availRegSet[physRegno] = false; + (void)liveReg.insert(physRegno); + listOpndsNew->PushOpnd( + cgFunc->GetOpndBuilder()->CreatePReg(physRegno, regOpnd->GetSize(), regOpnd->GetRegisterType())); + } + insn.SetOperand(idx, *listOpndsNew); + for (auto *dstOpnd : listOpndsNew->GetOperands()) { + uint32 id = GetRegLivenessId(dstOpnd->GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(*dstOpnd); + } + } +} + +void DefaultO0RegAllocator::AllocHandleDest(Insn &insn, Operand &opnd, uint32 idx) { + if (allocatedSet.find(&opnd) != allocatedSet.end()) { + /* free the live range of this register */ + auto ®Opnd = static_cast(opnd); + SaveCalleeSavedReg(regOpnd); + if (insn.IsAtomicStore() || insn.IsSpecialIntrinsic()) { + /* remember the physical machine register assigned */ + regno_t regNO = regOpnd.GetRegisterNumber(); + rememberRegs.push_back(regInfo->IsVirtualRegister(regOpnd) ? regMap[regNO] : regNO); + } else if (!insn.IsCondDef()) { + uint32 id = GetRegLivenessId(regOpnd.GetRegisterNumber()); + if (id != 0 && id <= insn.GetId()) { + ReleaseReg(regOpnd); + } + } + insn.SetOperand(idx, cgFunc->GetOpndBuilder()->CreatePReg( + regMap[regOpnd.GetRegisterNumber()], regOpnd.GetSize(), regOpnd.GetRegisterType())); + return; /* already allocated */ + } + + if (opnd.IsRegister()) { + insn.SetOperand(idx, *AllocDestOpnd(opnd, insn)); + SaveCalleeSavedReg(static_cast(opnd)); + } +} + +void DefaultO0RegAllocator::AllocHandleSrcList(Insn &insn, Operand &opnd, uint32 idx) { + if (!opnd.IsList()) { + return; + } + auto *listOpnds = &static_cast(opnd); + auto *listOpndsNew = &cgFunc->GetOpndBuilder()->CreateList(); + for (auto *srcOpnd : listOpnds->GetOperands()) { + if (allocatedSet.find(srcOpnd) != allocatedSet.end()) { + auto *regOpnd = static_cast(srcOpnd); + regno_t reg = regMap[regOpnd->GetRegisterNumber()]; + availRegSet[reg] = false; + (void)liveReg.insert(reg); /* this register is live now */ + listOpndsNew->PushOpnd( + cgFunc->GetOpndBuilder()->CreatePReg(reg, regOpnd->GetSize(), regOpnd->GetRegisterType())); + continue; /* already allocated */ + } + RegOperand *regOpnd = static_cast(AllocSrcOpnd(*srcOpnd)); + CHECK_NULL_FATAL(regOpnd); + listOpndsNew->PushOpnd(*regOpnd); + } + insn.SetOperand(idx, *listOpndsNew); +} + +void DefaultO0RegAllocator::AllocHandleSrc(Insn &insn, Operand &opnd, uint32 idx) { + if (allocatedSet.find(&opnd) != allocatedSet.end() && opnd.IsRegister()) { + auto *regOpnd = &static_cast(opnd); + regno_t reg = regMap[regOpnd->GetRegisterNumber()]; + availRegSet[reg] = false; + (void)liveReg.insert(reg); /* this register is live now */ + insn.SetOperand( + idx, cgFunc->GetOpndBuilder()->CreatePReg(reg, regOpnd->GetSize(), regOpnd->GetRegisterType())); + } else { + Operand *srcOpnd = AllocSrcOpnd(opnd); + CHECK_NULL_FATAL(srcOpnd); + insn.SetOperand(idx, *srcOpnd); + } +} + +bool DefaultO0RegAllocator::AllocateRegisters() { + InitAvailReg(); + cgFunc->SetIsAfterRegAlloc(); + + FOR_ALL_BB_REV(bb, cgFunc) { + if (bb->IsEmpty()) { + continue; + } + + SetupRegLiveness(bb); + FOR_BB_INSNS_REV(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + + /* handle inline assembly first due to specific def&use order */ + if (insn->IsAsmInsn()) { + AllocHandleDestList(*insn, insn->GetOperand(kAsmClobberListOpnd), kAsmClobberListOpnd); + AllocHandleDestList(*insn, insn->GetOperand(kAsmOutputListOpnd), kAsmOutputListOpnd); + AllocHandleSrcList(*insn, insn->GetOperand(kAsmInputListOpnd), kAsmInputListOpnd); + } + + const InsnDesc *curMd = insn->GetDesc(); + + for (uint32 i = 0; i < insn->GetOperandSize() && !insn->IsAsmInsn(); ++i) { /* the dest registers */ + Operand &opnd = insn->GetOperand(i); + if (!(opnd.IsRegister() && curMd->GetOpndDes(i)->IsDef())) { + continue; + } + if (opnd.IsList()) { + AllocHandleDestList(*insn, opnd, i); + } else { + AllocHandleDest(*insn, opnd, i); + } + } + + for (uint32 i = 0; i < insn->GetOperandSize() && !insn->IsAsmInsn(); ++i) { /* the src registers */ + Operand &opnd = insn->GetOperand(i); + if (!((opnd.IsRegister() && curMd->GetOpndDes(i)->IsUse()) || opnd.IsMemoryAccessOperand())) { + continue; + } + if (opnd.IsList()) { + AllocHandleSrcList(*insn, opnd, i); + } else { + AllocHandleSrc(*insn, opnd, i); + } + } + + /* hack. a better way to handle intrinsics? */ + for (auto rememberReg : rememberRegs) { + ASSERT(rememberReg != regInfo->GetInvalidReg(), "not a valid register"); + ReleaseReg(rememberReg); + } + rememberRegs.clear(); + } + } + /* + * we store both FP/LR if using FP or if not using FP, but func has a call + * Using FP, record it for saving + * notice the order here : the first callee saved reg is expected to be RFP. + */ + regInfo->Fini(); + regInfo->SaveCalleeSavedReg(calleeSaveUsed); + return true; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/reg_alloc_lsra.cpp b/src/mapleall/maple_be/src/cg/reg_alloc_lsra.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fc6fe9957180f4a93d478e634901947279057a5f --- /dev/null +++ b/src/mapleall/maple_be/src/cg/reg_alloc_lsra.cpp @@ -0,0 +1,2462 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "reg_alloc_lsra.h" +#include +#include +#include + +namespace maplebe { +/* + * ================== + * = Linear Scan RA + * ================== + */ +#define LSRA_DUMP (CG_DEBUG_FUNC(*cgFunc)) + +namespace { +constexpr uint32 kSpilled = 1; +constexpr uint32 kSpecialIntSpillReg = 16; +constexpr uint32 kMinLiveIntervalLength = 20; +constexpr uint32 kPrintedActiveListLength = 10; +constexpr uint32 kMinRangesSize = 2; +} + +static uint32 insnNumBeforRA = 0; + +#define IN_SPILL_RANGE \ + (cgFunc->GetName().find(CGOptions::GetDumpFunc()) != std::string::npos && (++debugSpillCnt > 0) && \ + (CGOptions::GetSpillRangesBegin() < debugSpillCnt) && (debugSpillCnt < CGOptions::GetSpillRangesEnd())) + +#undef LSRA_GRAPH + +#ifdef RA_PERF_ANALYSIS +static long bfsUS = 0; +static long liveIntervalUS = 0; +static long holesUS = 0; +static long lsraUS = 0; +static long finalizeUS = 0; +static long totalUS = 0; + +extern void printLSRATime() { + std::cout << "============================================================\n"; + std::cout << " LSRA sub-phase time information \n"; + std::cout << "============================================================\n"; + std::cout << "BFS BB sorting cost: " << bfsUS << "us \n"; + std::cout << "live interval computing cost: " << liveIntervalUS << "us \n"; + std::cout << "live range approximation cost: " << holesUS << "us \n"; + std::cout << "LSRA cost: " << lsraUS << "us \n"; + std::cout << "finalize cost: " << finalizeUS << "us \n"; + std::cout << "LSRA total cost: " << totalUS << "us \n"; + std::cout << "============================================================\n"; +} +#endif + +/* + * This LSRA implementation is an interpretation of the [Poletto97] paper. + * BFS BB ordering is used to order the instructions. The live intervals are vased on + * this instruction order. All vreg defines should come before an use, else a warning is + * given. + * Live interval is traversed in order from lower instruction order to higher order. + * When encountering a live interval for the first time, it is assumed to be live and placed + * inside the 'active' structure until the vreg's last access. During the time a vreg + * is in 'active', the vreg occupies a physical register allocation and no other vreg can + * be allocated the same physical register. + */ +void LSRALinearScanRegAllocator::PrintRegSet(const MapleSet &set, const std::string &str) const { + LogInfo::MapleLogger() << str; + for (auto reg : set) { + LogInfo::MapleLogger() << " " << reg; + } + LogInfo::MapleLogger() << "\n"; +} + +bool LSRALinearScanRegAllocator::CheckForReg(Operand &opnd, const Insn &insn, const LiveInterval &li, regno_t regNO, + bool isDef) const { + if (!opnd.IsRegister()) { + return false; + } + auto ®Opnd = static_cast(opnd); + if (regOpnd.GetRegisterType() == kRegTyCc || regOpnd.GetRegisterType() == kRegTyVary) { + return false; + } + if (regOpnd.GetRegisterNumber() == regNO) { + LogInfo::MapleLogger() << "set object circle at " << insn.GetId() << "," << li.GetRegNO() << + " size 5 fillcolor rgb \""; + if (isDef) { + LogInfo::MapleLogger() << "black\"\n"; + } else { + LogInfo::MapleLogger() << "orange\"\n"; + } + } + return true; +} + +/* + * This is a support routine to compute the overlapping live intervals in graph form. + * The output file can be viewed by gnuplot. + * Despite the function name of LiveRanges, it is using live intervals. + */ +void LSRALinearScanRegAllocator::PrintLiveRanges() const { + /* ================= Output to plot.pg =============== */ + std::ofstream out("plot.pg"); + CHECK_FATAL(out.is_open(), "Failed to open output file: plot.pg"); + std::streambuf *coutBuf = LogInfo::MapleLogger().rdbuf(); /* old buf */ + LogInfo::MapleLogger().rdbuf(out.rdbuf()); /* new buf */ + + LogInfo::MapleLogger() << "#!/usr/bin/gnuplot\n"; + LogInfo::MapleLogger() << "#maxInsnNum " << maxInsnNum << "\n"; + LogInfo::MapleLogger() << "#minVregNum " << minVregNum << "\n"; + LogInfo::MapleLogger() << "#maxVregNum " << maxVregNum << "\n"; + LogInfo::MapleLogger() << "reset\nset terminal png\n"; + LogInfo::MapleLogger() << "set xrange [1:" << maxInsnNum << "]\n"; + LogInfo::MapleLogger() << "set grid\nset style data linespoints\n"; + LogInfo::MapleLogger() << "set datafile missing '0'\n"; + std::vector> graph; + graph.resize(maxVregNum); + for (uint32 i = 0; i < maxVregNum; ++i) { + graph[i].resize(maxInsnNum); + } + uint32 minY = 0xFFFFFFFF; + uint32 maxY = 0; + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + uint32 regNO = li->GetRegNO(); + if ((li->GetLastUse() - li->GetFirstDef()) < kMinLiveIntervalLength) { + continue; + } + if (regNO < minY) { + minY = regNO; + } + if (regNO > maxY) { + maxY = regNO; + } + uint32 n; + for (n = 0; n <= (li->GetFirstDef() - 1); ++n) { + graph[regNO - minVregNum][n] = 0; + } + if (li->GetLastUse() >= n) { + for (; n <= (li->GetLastUse() - 1); ++n) { + graph[regNO - minVregNum][n] = regNO; + } + } + for (; n < maxInsnNum; ++n) { + graph[regNO - minVregNum][n] = 0; + } + + for (auto *bb : bfs->sortedBBs) { + FOR_BB_INSNS(insn, bb) { + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 iSecond = 0; iSecond < opndNum; ++iSecond) { + Operand &opnd = insn->GetOperand(iSecond); + const OpndDesc *regProp = md->GetOpndDes(iSecond); + ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::PrintLiveRanges"); + bool isDef = regProp->IsRegDef(); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + (void)CheckForReg(*op, *insn, *li, regNO, isDef); + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr && !CheckForReg(*base, *insn, *li, regNO, false)) { + continue; + } + if (offset != nullptr && !CheckForReg(*offset, *insn, *li, regNO, false)) { + continue; + } + } else { + (void)CheckForReg(opnd, *insn, *li, regNO, isDef); + } + } + } + } + } + LogInfo::MapleLogger() << "set yrange [" << (minY - 1) << ":" << (maxY + 1) << "]\n"; + + LogInfo::MapleLogger() << "plot \"plot.dat\" using 1:2 title \"R" << minVregNum << "\""; + for (uint32 i = 1; i < ((maxVregNum - minVregNum) + 1); ++i) { + LogInfo::MapleLogger() << ", \\\n\t\"\" using 1:" << (i + kDivide2) << " title \"R" << (minVregNum + i) << "\""; + } + LogInfo::MapleLogger() << ";\n"; + + /* ================= Output to plot.dat =============== */ + std::ofstream out2("plot.dat"); + CHECK_FATAL(out2.is_open(), "Failed to open output file: plot.dat"); + LogInfo::MapleLogger().rdbuf(out2.rdbuf()); /* new buf */ + LogInfo::MapleLogger() << "##reg"; + for (uint32 i = minVregNum; i <= maxVregNum; ++i) { + LogInfo::MapleLogger() << " R" << i; + } + LogInfo::MapleLogger() << "\n"; + for (uint32 n = 0; n < maxInsnNum; ++n) { + LogInfo::MapleLogger() << (n + 1); + for (uint32 i = minVregNum; i <= maxVregNum; ++i) { + LogInfo::MapleLogger() << " " << graph[i - minVregNum][n]; + } + LogInfo::MapleLogger() << "\n"; + } + LogInfo::MapleLogger().rdbuf(coutBuf); +} + +void LSRALinearScanRegAllocator::PrintLiveInterval(const LiveInterval &li, const std::string &str) const { + LogInfo::MapleLogger() << str << "\n"; + if (li.GetIsCall() != nullptr) { + LogInfo::MapleLogger() << " firstDef " << li.GetFirstDef(); + LogInfo::MapleLogger() << " isCall"; + } else if (li.GetPhysUse() > 0) { + LogInfo::MapleLogger() << "\tregNO " << li.GetRegNO(); + LogInfo::MapleLogger() << " firstDef " << li.GetFirstDef(); + LogInfo::MapleLogger() << " physUse " << li.GetPhysUse(); + LogInfo::MapleLogger() << " endByCall " << li.IsEndByCall(); + } else { + /* show regno/firstDef/lastUse with 5/8/8 width respectively */ + LogInfo::MapleLogger() << "\tregNO " << std::setw(5) << li.GetRegNO(); + LogInfo::MapleLogger() << " firstDef " << std::setw(8) << li.GetFirstDef(); + LogInfo::MapleLogger() << " lastUse " << std::setw(8) << li.GetLastUse(); + LogInfo::MapleLogger() << " assigned " << li.GetAssignedReg(); + LogInfo::MapleLogger() << " refCount " << li.GetRefCount(); + LogInfo::MapleLogger() << " priority " << li.GetPriority(); + } + LogInfo::MapleLogger() << " object_address 0x" << std::hex << &li << std::dec << "\n"; +} + +void LSRALinearScanRegAllocator::PrintParamQueue(const std::string &str) { + LogInfo::MapleLogger() << str << "\n"; + for (SingleQue &que : intParamQueue) { + if (que.empty()) { + continue; + } + LiveInterval *li = que.front(); + LiveInterval *last = que.back(); + PrintLiveInterval(*li, ""); + while (li != last) { + que.pop_front(); + que.push_back(li); + li = que.front(); + PrintLiveInterval(*li, ""); + } + que.pop_front(); + que.push_back(li); + } +} + +void LSRALinearScanRegAllocator::PrintCallQueue(const std::string &str) const { + LogInfo::MapleLogger() << str << "\n"; + for (auto *li : callList) { + PrintLiveInterval(*li, ""); + } +} + +void LSRALinearScanRegAllocator::PrintActiveList(const std::string &str, uint32 len) const { + uint32 count = 0; + LogInfo::MapleLogger() << str << " " << active.size() << "\n"; + for (auto *li : active) { + PrintLiveInterval(*li, ""); + ++count; + if ((len != 0) && (count == len)) { + break; + } + } +} + +void LSRALinearScanRegAllocator::PrintActiveListSimple() const { + for (const auto *li : active) { + uint32 assignedReg = li->GetAssignedReg(); + if (li->GetStackSlot() == kSpilled) { + assignedReg = kSpecialIntSpillReg; + } + LogInfo::MapleLogger() << li->GetRegNO() << "(" << assignedReg << ", "; + if (li->GetPhysUse() > 0) { + LogInfo::MapleLogger() << "p) "; + } else { + LogInfo::MapleLogger() << li->GetFirstAcrossedCall(); + } + LogInfo::MapleLogger() << "<" << li->GetFirstDef() << "," << li->GetLastUse() << ">) "; + } + LogInfo::MapleLogger() << "\n"; +} + +void LSRALinearScanRegAllocator::PrintLiveIntervals() const { + /* vreg LogInfo */ + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + PrintLiveInterval(*li, ""); + } + LogInfo::MapleLogger() << "\n"; + /* preg LogInfo */ + for (auto param : intParamQueue) { + for (auto *li : param) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + PrintLiveInterval(*li, ""); + } + } + LogInfo::MapleLogger() << "\n"; +} + +void LSRALinearScanRegAllocator::DebugCheckActiveList() const { + LiveInterval *prev = nullptr; + for (auto *li : active) { + if (prev != nullptr) { + if ((li->GetRegNO() <= regInfo->GetLastParamsFpReg()) && (prev->GetRegNO() > regInfo->GetLastParamsFpReg())) { + if (li->GetFirstDef() < prev->GetFirstDef()) { + LogInfo::MapleLogger() << "ERRer: active list with out of order phys + vreg\n"; + PrintLiveInterval(*prev, "prev"); + PrintLiveInterval(*li, "current"); + PrintActiveList("Active", kPrintedActiveListLength); + } + } + if ((li->GetRegNO() <= regInfo->GetLastParamsFpReg()) && (prev->GetRegNO() <= regInfo->GetLastParamsFpReg())) { + if (li->GetFirstDef() < prev->GetFirstDef()) { + LogInfo::MapleLogger() << "ERRer: active list with out of order phys reg use\n"; + PrintLiveInterval(*prev, "prev"); + PrintLiveInterval(*li, "current"); + PrintActiveList("Active", kPrintedActiveListLength); + } + } + } else { + prev = li; + } + } +} + +/* + * Prepare the free physical register pool for allocation. + * When a physical register is allocated, it is removed from the pool. + * The physical register is re-inserted into the pool when the associated live + * interval has ended. + */ +void LSRALinearScanRegAllocator::InitFreeRegPool() { + for (regno_t regNO = regInfo->GetInvalidReg(); regNO < regInfo->GetAllRegNum(); ++regNO) { + if (!regInfo->IsAvailableReg(regNO)) { + continue; + } + if (regInfo->IsGPRegister(regNO)) { + if (regInfo->IsYieldPointReg(regNO)) { + continue; + } + /* ExtraSpillReg */ + if (regInfo->IsSpillRegInRA(regNO, needExtraSpillReg)) { + intSpillRegSet.push_back(regNO - firstIntReg); + continue; + } + if (regInfo->IsPreAssignedReg(regNO)) { + /* Parameters/Return registers */ + (void)intParamRegSet.insert(regNO - firstIntReg); + intParamMask |= 1u << (regNO - firstIntReg); + } else if (regInfo->IsCalleeSavedReg(regNO)) { + /* callee-saved registers */ + (void)intCalleeRegSet.insert(regNO - firstIntReg); + intCalleeMask |= 1u << (regNO - firstIntReg); + } else { + /* caller-saved registers */ + (void)intCallerRegSet.insert(regNO - firstIntReg); + intCallerMask |= 1u << (regNO - firstIntReg); + } + } else { + /* fp ExtraSpillReg */ + if (regInfo->IsSpillRegInRA(regNO, needExtraSpillReg)) { + fpSpillRegSet.push_back(regNO - firstFpReg); + continue; + } + if (regInfo->IsPreAssignedReg(regNO)) { + /* fp Parameters/Return registers */ + (void)fpParamRegSet.insert(regNO - firstFpReg); + fpParamMask |= 1u << (regNO - firstFpReg); + } else if (regInfo->IsCalleeSavedReg(regNO)) { + /* fp callee-saved registers */ + (void)fpCalleeRegSet.insert(regNO - firstFpReg); + fpCalleeMask |= 1u << (regNO - firstFpReg); + } else { + /* fp caller-saved registers */ + (void)fpCallerRegSet.insert(regNO - firstFpReg); + fpCallerMask |= 1u << (regNO - firstFpReg); + } + } + } + + if (LSRA_DUMP) { + PrintRegSet(intCallerRegSet, "ALLOCATABLE_INT_CALLER"); + PrintRegSet(intCalleeRegSet, "ALLOCATABLE_INT_CALLEE"); + PrintRegSet(intParamRegSet, "ALLOCATABLE_INT_PARAM"); + PrintRegSet(fpCallerRegSet, "ALLOCATABLE_FP_CALLER"); + PrintRegSet(fpCalleeRegSet, "ALLOCATABLE_FP_CALLEE"); + PrintRegSet(fpParamRegSet, "ALLOCATABLE_FP_PARAM"); + LogInfo::MapleLogger() << "INT_SPILL_REGS"; + for (uint32 intSpillRegNO : intSpillRegSet) { + LogInfo::MapleLogger() << " " << intSpillRegNO; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << "FP_SPILL_REGS"; + for (uint32 fpSpillRegNO : fpSpillRegSet) { + LogInfo::MapleLogger() << " " << fpSpillRegNO; + } + LogInfo::MapleLogger() << "\n"; + LogInfo::MapleLogger() << std::hex; + LogInfo::MapleLogger() << "INT_CALLER_MASK " << intCallerMask << "\n"; + LogInfo::MapleLogger() << "INT_CALLEE_MASK " << intCalleeMask << "\n"; + LogInfo::MapleLogger() << "INT_PARAM_MASK " << intParamMask << "\n"; + LogInfo::MapleLogger() << "FP_CALLER_FP_MASK " << fpCallerMask << "\n"; + LogInfo::MapleLogger() << "FP_CALLEE_FP_MASK " << fpCalleeMask << "\n"; + LogInfo::MapleLogger() << "FP_PARAM_FP_MASK " << fpParamMask << "\n"; + LogInfo::MapleLogger() << std::dec; + } +} + +/* Remember calls for caller/callee allocation. */ +void LSRALinearScanRegAllocator::RecordCall(Insn &insn) { + /* Maintain call at the beginning of active list */ + auto *li = memPool->New(alloc); + li->SetFirstDef(insn.GetId()); + li->SetIsCall(insn); + callList.push_back(li); +} + +void LSRALinearScanRegAllocator::RecordPhysRegs(const RegOperand ®Opnd, uint32 insnNum, bool isDef) { + RegType regType = regOpnd.GetRegisterType(); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return; + } + if (regInfo->IsUntouchableReg(regNO)) { + return; + } + if (!regInfo->IsPreAssignedReg(regNO)) { + return; + } + if (isDef) { + /* parameter/return register def is assumed to be live until a call. */ + auto *li = memPool->New(alloc); + li->SetRegNO(regNO); + li->SetRegType(regType); + li->SetStackSlot(0xFFFFFFFF); + li->SetFirstDef(insnNum); + li->SetPhysUse(insnNum); + li->SetAssignedReg(regNO); + + if (regType == kRegTyInt) { + intParamQueue[regInfo->GetIntParamRegIdx(regNO)].push_back(li); + } else { + fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].push_back(li); + } + } else { + if (regType == kRegTyInt) { + CHECK_FATAL(!intParamQueue[regInfo->GetIntParamRegIdx(regNO)].empty(), + "was not defined before use, impossible"); + LiveInterval *li = intParamQueue[regInfo->GetIntParamRegIdx(regNO)].back(); + li->SetPhysUse(insnNum); + } else { + CHECK_FATAL(!fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].empty(), + "was not defined before use, impossible"); + LiveInterval *li = fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].back(); + li->SetPhysUse(insnNum); + } + } +} + +void LSRALinearScanRegAllocator::UpdateLiveIntervalState(const BB &bb, LiveInterval &li) const { + if (bb.IsCatch()) { + li.SetInCatchState(); + } else { + li.SetNotInCatchState(); + } + + if (bb.GetInternalFlag1() != 0) { + li.SetInCleanupState(); + } else { + li.SetNotInCleanupState(bb.GetId() == 1); + } +} + +/* main entry function for live interval computation. */ +void LSRALinearScanRegAllocator::SetupLiveInterval(Operand &opnd, Insn &insn, bool isDef, uint32 &nUses) { + if (!opnd.IsRegister()) { + return; + } + auto ®Opnd = static_cast(opnd); + uint32 insnNum = insn.GetId(); + if (regOpnd.IsPhysicalRegister()) { + RecordPhysRegs(regOpnd, insnNum, isDef); + return; + } + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return; + } + + LiveInterval *li = nullptr; + uint32 regNO = regOpnd.GetRegisterNumber(); + if (liveIntervalsArray[regNO] == nullptr) { + li = memPool->New(alloc); + li->SetRegNO(regNO); + li->SetStackSlot(0xFFFFFFFF); + liveIntervalsArray[regNO] = li; + } else { + li = liveIntervalsArray[regNO]; + } + li->SetRegType(regType); + + BB *curBB = insn.GetBB(); + if (isDef) { + /* never set to 0 before, why consider this condition ? */ + if (li->GetFirstDef() == 0) { + li->SetFirstDef(insnNum); + li->SetLastUse(insnNum + 1); + } else if (!curBB->IsUnreachable()) { + if (li->GetLastUse() < insnNum || li->IsUseBeforeDef()) { + li->SetLastUse(insnNum + 1); + } + } + /* + * try-catch related + * Not set when extending live interval with bb's livein in ComputeLiveInterval. + */ + li->SetResultCount(li->GetResultCount() + 1); + } else { + if (li->GetFirstDef() == 0) { + ASSERT(false, "SetupLiveInterval: use before def"); + } + /* + * In ComputeLiveInterval when extending live interval using + * live-out information, li created does not have a type. + */ + if (!curBB->IsUnreachable()) { + li->SetLastUse(insnNum); + } + ++nUses; + } + UpdateLiveIntervalState(*curBB, *li); + + li->SetRefCount(li->GetRefCount() + 1); + + uint32 index = regNO / (sizeof(uint64) * k8ByteSize); + uint64 bit = regNO % (sizeof(uint64) * k8ByteSize); + if ((regUsedInBB[index] & (static_cast(1) << bit)) != 0) { + li->SetMultiUseInBB(true); + } + regUsedInBB[index] |= (static_cast(1) << bit); + + if (minVregNum > regNO) { + minVregNum = regNO; + } + if (maxVregNum < regNO) { + maxVregNum = regNO; + } + + /* setup the def/use point for it */ + ASSERT(regNO < liveIntervalsArray.size(), "out of range of vector liveIntervalsArray"); +} + +/* + * Support 'hole' in LSRA. + * For a live interval, there might be multiple segments of live ranges, + * and between these segments a 'hole'. + * Some other short lived vreg can go into these 'holes'. + * + * from : starting instruction sequence id + * to : ending instruction sequence id + */ +void LSRALinearScanRegAllocator::LiveInterval::AddRange(uint32 from, uint32 to) { + if (ranges.empty()) { + ranges.push_back(std::pair(from, to)); + } else { + if (to < ranges.front().first) { + (void)ranges.insert(ranges.cbegin(), std::pair(from, to)); + } else if (to >= ranges.front().second && from < ranges.front().first) { + ranges.front().first = from; + ranges.front().second = to; + } else if (to >= ranges.front().first && from < ranges.front().first) { + ranges.front().first = from; + } else if (from > ranges.front().second) { + ASSERT(false, "No possible on reverse traverse."); + } + } +} + +void LSRALinearScanRegAllocator::LiveInterval::AddUsePos(uint32 pos) { + (void)usePositions.insert(pos); +} + +/* See if a vreg can fit in one of the holes of a longer live interval. */ +uint32 LSRALinearScanRegAllocator::FillInHole(LiveInterval &li) { + MapleSet::iterator it; + for (it = active.begin(); it != active.end(); ++it) { + auto *ili = static_cast(*it); + + /* + * If ili is part in cleanup, the hole info will be not correct, + * since cleanup bb do not have edge to normal func bb, and the + * live-out info will not correct. + */ + if (!ili->IsAllOutCleanup() || ili->IsInCatch()) { + continue; + } + + if (ili->GetRegType() != li.GetRegType() || ili->GetStackSlot() != 0xFFFFFFFF || ili->GetLiChild() != nullptr || + ili->GetAssignedReg() == 0) { + continue; + } + for (const auto &inner : ili->GetHoles()) { + if (inner.first <= li.GetFirstDef() && inner.second >= li.GetLastUse()) { + ili->SetLiChild(&li); + li.SetLiParent(ili); + li.SetAssignedReg(ili->GetAssignedReg()); + /* If assigned physical register is callee save register, set shouldSave false; */ + regno_t phyReg = regInfo->GetInvalidReg(); + if (li.GetRegType() == kRegTyInt || li.GetRegType() == kRegTyFloat) { + phyReg = li.GetAssignedReg(); + } else { + ASSERT(false, "FillInHole, Invalid register type"); + } + + if (regInfo->IsAvailableReg(phyReg) && regInfo->IsCalleeSavedReg(phyReg)) { + li.SetShouldSave(false); + } + return ili->GetAssignedReg(); + } else if (inner.first > li.GetLastUse()) { + break; + } + } + } + return 0; +} + +void LSRALinearScanRegAllocator::SetupIntervalRangesByOperand(Operand &opnd, const Insn &insn, uint32 blockFrom, + bool isDef, bool isUse) { + auto ®Opnd = static_cast(opnd); + RegType regType = regOpnd.GetRegisterType(); + if (regType != kRegTyCc && regType != kRegTyVary) { + regno_t regNO = regOpnd.GetRegisterNumber(); + if (regNO > regInfo->GetAllRegNum()) { + if (isDef) { + if (!liveIntervalsArray[regNO]->GetRanges().empty()) { + liveIntervalsArray[regNO]->GetRanges().front().first = insn.GetId(); + liveIntervalsArray[regNO]->UsePositionsInsert(insn.GetId()); + } + } + if (isUse) { + liveIntervalsArray[regNO]->AddRange(blockFrom, insn.GetId()); + liveIntervalsArray[regNO]->UsePositionsInsert(insn.GetId()); + } + } + } +} + +void LSRALinearScanRegAllocator::BuildIntervalRangesForEachOperand(const Insn &insn, uint32 blockFrom) { + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr && base->IsRegister()) { + SetupIntervalRangesByOperand(*base, insn, blockFrom, false, true); + } + if (offset != nullptr && offset->IsRegister()) { + SetupIntervalRangesByOperand(*offset, insn, blockFrom, false, true); + } + } else if (opnd.IsRegister()) { + const OpndDesc *regProp = md->GetOpndDes(i); + ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::BuildIntervalRangesForEachOperand"); + bool isDef = regProp->IsRegDef(); + bool isUse = regProp->IsRegUse(); + SetupIntervalRangesByOperand(opnd, insn, blockFrom, isDef, isUse); + } + } +} + +/* Support finding holes by searching for ranges where holes exist. */ +void LSRALinearScanRegAllocator::BuildIntervalRanges() { + size_t bbIdx = bfs->sortedBBs.size(); + if (bbIdx == 0) { + return; + } + + do { + --bbIdx; + BB *bb = bfs->sortedBBs[bbIdx]; + if (bb->GetFirstInsn() == nullptr || bb->GetLastInsn() == nullptr) { + continue; + } + uint32 blockFrom = bb->GetFirstInsn()->GetId(); + uint32 blockTo = bb->GetLastInsn()->GetId() + 1; + + for (auto regNO : bb->GetLiveOutRegNO()) { + if (regNO < regInfo->GetAllRegNum()) { + /* Do not consider physical regs. */ + continue; + } + liveIntervalsArray[regNO]->AddRange(blockFrom, blockTo); + } + + FOR_BB_INSNS_REV(insn, bb) { + BuildIntervalRangesForEachOperand(*insn, blockFrom); + } + } while (bbIdx != 0); + + /* Build holes. */ + for (uint32 i = 0; i < cgFunc->GetMaxVReg(); ++i) { + LiveInterval *li = liveIntervalsArray[i]; + if (li == nullptr) { + continue; + } + if (li->GetRangesSize() < kMinRangesSize) { + continue; + } + + auto it = li->GetRanges().begin(); + auto itPrev = it++; + for (; it != li->GetRanges().end(); ++it) { + if (((*it).first - (*itPrev).second) > kMinRangesSize) { + li->HolesPushBack((*itPrev).second, (*it).first); + } + itPrev = it; + } + } +} + +/* Extend live interval with live-in info */ +void LSRALinearScanRegAllocator::UpdateLiveIntervalByLiveIn(const BB &bb, uint32 insnNum) { + for (const auto ®NO : bb.GetLiveInRegNO()) { + if (regNO < regInfo->GetAllRegNum()) { + /* Do not consider physical regs. */ + continue; + } + LiveInterval *liOuter = liveIntervalsArray[regNO]; + if (liOuter != nullptr || (bb.IsEmpty() && bb.GetId() != 1)) { + continue; + } + /* + * try-catch related + * Since it is livein but not seen before, its a use before def + * spill it temporarily + */ + auto *li = memPool->New(alloc); + li->SetRegNO(regNO); + li->SetStackSlot(kSpilled); + liveIntervalsArray[regNO] = li; + li->SetFirstDef(insnNum); + li->SetUseBeforeDef(true); + + if (!bb.IsUnreachable()) { + if (bb.GetId() != 1) { + LogInfo::MapleLogger() << "ERROR: " << regNO << " use before def in bb " << bb.GetId() << " : " << + cgFunc->GetName() << "\n"; + ASSERT(false, "There should only be [use before def in bb 1], temporarily."); + } + LogInfo::MapleLogger() << "WARNING: " << regNO << " use before def in bb " << bb.GetId() << " : " << + cgFunc->GetName() << "\n"; + } + UpdateLiveIntervalState(bb, *li); + } +} + +/* traverse live in regNO, for each live in regNO create a new liveinterval */ +void LSRALinearScanRegAllocator::UpdateParamLiveIntervalByLiveIn(const BB &bb, uint32 insnNum) { + for (const auto ®NO : bb.GetLiveInRegNO()) { + if (!regInfo->IsPreAssignedReg(regNO)) { + continue; + } + auto *li = memPool->New(alloc); + li->SetRegNO(regNO); + li->SetStackSlot(0xFFFFFFFF); + li->SetFirstDef(insnNum); + li->SetPhysUse(insnNum); + li->SetAssignedReg(regNO); + + if (regInfo->IsGPRegister(regNO)) { + li->SetRegType(kRegTyInt); + intParamQueue[regInfo->GetIntParamRegIdx(regNO)].push_back(li); + } else { + li->SetRegType(kRegTyFloat); + fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].push_back(li); + } + } +} + +void LSRALinearScanRegAllocator::ComputeLiveIn(BB &bb, uint32 insnNum) { + UpdateLiveIntervalByLiveIn(bb, insnNum); + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "bb(" << bb.GetId() << ")LIVEOUT:"; + for (const auto &liveOutRegNO : bb.GetLiveOutRegNO()) { + LogInfo::MapleLogger() << " " << liveOutRegNO; + } + LogInfo::MapleLogger() << ".\n"; + LogInfo::MapleLogger() << "bb(" << bb.GetId() << ")LIVEIN:"; + for (const auto &liveInRegNO : bb.GetLiveInRegNO()) { + LogInfo::MapleLogger() << " " << liveInRegNO; + } + LogInfo::MapleLogger() << ".\n"; + } + + regUsedInBBSz = (cgFunc->GetMaxVReg() / (sizeof(uint64) * k8ByteSize) + 1); + regUsedInBB = new uint64[regUsedInBBSz]; + CHECK_FATAL(regUsedInBB != nullptr, "alloc regUsedInBB memory failure."); + errno_t ret = memset_s(regUsedInBB, regUsedInBBSz * sizeof(uint64), 0, regUsedInBBSz * sizeof(uint64)); + if (ret != EOK) { + CHECK_FATAL(false, "call memset_s failed in LSRALinearScanRegAllocator::ComputeLiveInterval()"); + } + + if (bb.GetFirstInsn() == nullptr) { + return; + } + if (!bb.GetEhPreds().empty()) { + bb.InsertLiveInRegNO(firstIntReg); + bb.InsertLiveInRegNO(firstIntReg + 1); + } + UpdateParamLiveIntervalByLiveIn(bb, insnNum); + if (!bb.GetEhPreds().empty()) { + bb.EraseLiveInRegNO(firstIntReg); + bb.EraseLiveInRegNO(firstIntReg + 1); + } +} + +void LSRALinearScanRegAllocator::ComputeLiveOut(BB &bb, uint32 insnNum) { + /* + * traverse live out regNO + * for each live out regNO if the last corresponding live interval is created within this bb + * update this lastUse of li to the end of BB + */ + for (const auto ®NO : bb.GetLiveOutRegNO()) { + if (regInfo->IsPreAssignedReg(static_cast(regNO))) { + LiveInterval *liOut = nullptr; + if (regInfo->IsGPRegister(regNO)) { + if (intParamQueue[regInfo->GetIntParamRegIdx(regNO)].empty()) { + continue; + } + liOut = intParamQueue[regInfo->GetIntParamRegIdx(regNO)].back(); + if (bb.GetFirstInsn() && liOut->GetFirstDef() >= bb.GetFirstInsn()->GetId()) { + liOut->SetPhysUse(insnNum); + } + } else { + if (fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].empty()) { + continue; + } + liOut = fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].back(); + if (bb.GetFirstInsn() && liOut->GetFirstDef() >= bb.GetFirstInsn()->GetId()) { + liOut->SetPhysUse(insnNum); + } + } + } + /* Extend live interval with live-out info */ + LiveInterval *li = liveIntervalsArray[regNO]; + if (li != nullptr && !bb.IsEmpty()) { + li->SetLastUse(bb.GetLastInsn()->GetId()); + UpdateLiveIntervalState(bb, *li); + } + } +} + +void LSRALinearScanRegAllocator::ComputeLiveIntervalForEachOperand(Insn &insn) { + uint32 numUses = 0; + const InsnDesc *md = insn.GetDesc(); + uint32 opndNum = insn.GetOperandSize(); + /* + * we need to process src opnd first just in case the src/dest vreg are the same and the src vreg belongs to the + * last interval. + */ + for (int32 i = opndNum - 1; i >= 0; --i) { + Operand &opnd = insn.GetOperand(static_cast(i)); + const OpndDesc *opndDesc = md->GetOpndDes(i); + ASSERT(opndDesc != nullptr, "ptr null check."); + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto &op : listOpnd.GetOperands()) { + SetupLiveInterval(*op, insn, opndDesc->IsDef(), numUses); + } + } else if (opnd.IsMemoryAccessOperand()) { + bool isDef = false; + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + if (base != nullptr) { + SetupLiveInterval(*base, insn, isDef, numUses); + } + if (offset != nullptr) { + SetupLiveInterval(*offset, insn, isDef, numUses); + } + } else { + /* Specifically, the "use-def" opnd is treated as a "use" opnd */ + bool isUse = opndDesc->IsRegUse(); + SetupLiveInterval(opnd, insn, !isUse, numUses); + } + } + if (numUses >= regInfo->GetNormalUseOperandNum()) { + needExtraSpillReg = true; + } +} + +void LSRALinearScanRegAllocator::ComputeLiveInterval() { + calleeUseCnt.resize(regInfo->GetAllRegNum()); + liveIntervalsArray.resize(cgFunc->GetMaxVReg()); + /* LiveInterval queue for each param/return register */ + lastIntParamLi.resize(regInfo->GetIntRegsParmsNum()); + lastFpParamLi.resize(regInfo->GetFloatRegsParmsNum()); + uint32 insnNum = 1; + for (BB *bb : bfs->sortedBBs) { + ComputeLiveIn(*bb, insnNum); + FOR_BB_INSNS(insn, bb) { + insn->SetId(insnNum); + /* skip comment and debug insn */ + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction()) { + continue; + } + /* RecordCall */ + if (insn->IsCall()) { + if (!insn->GetIsThrow() || !bb->GetEhSuccs().empty()) { + RecordCall(*insn); + } + } + + ComputeLiveIntervalForEachOperand(*insn); + + /* handle return value for call insn */ + if (insn->IsCall()) { + /* For all backend architectures so far, adopt all RetRegs as Def via this insn, + * and then their live begins. + * next optimization, you can determine which registers are actually used. + */ + RegOperand *retReg = nullptr; + if (insn->GetRetType() == Insn::kRegInt) { + for (int i = 0; i < regInfo->GetIntRetRegsNum(); i++) { + retReg = regInfo->GetOrCreatePhyRegOperand(regInfo->GetIntRetReg(i), + k64BitSize, kRegTyInt); + RecordPhysRegs(*retReg, insnNum, true); + } + } else { + for (int i = 0; i < regInfo->GetFpRetRegsNum(); i++) { + retReg = regInfo->GetOrCreatePhyRegOperand(regInfo->GetFpRetReg(i), + k64BitSize, kRegTyFloat); + RecordPhysRegs(*retReg, insnNum, true); + } + } + } + ++insnNum; + } + + ComputeLiveOut(*bb, insnNum); + + delete[] regUsedInBB; + regUsedInBB = nullptr; + maxInsnNum = insnNum - 1; /* insn_num started from 1 */ + } + + for (auto *li : liveIntervalsArray) { + if (li == nullptr || li->GetRegNO() == 0) { + continue; + } + if (li->GetIsCall() != nullptr || (li->GetPhysUse() > 0)) { + continue; + } + if (li->GetLastUse() > li->GetFirstDef()) { + li->SetPriority(static_cast(li->GetRefCount()) / + static_cast(li->GetLastUse() - li->GetFirstDef())); + } else { + li->SetPriority(static_cast(li->GetRefCount()) / + static_cast(li->GetFirstDef() - li->GetLastUse())); + } + } + + if (LSRA_DUMP) { + PrintLiveIntervals(); + } + +} + +/* A physical register is freed at the end of the live interval. Return to pool. */ +void LSRALinearScanRegAllocator::ReturnPregToSet(const LiveInterval &li, uint32 preg) { + if (preg == 0) { + return; + } + if (li.GetRegType() == kRegTyInt) { + preg -= firstIntReg; + } else if (li.GetRegType() == kRegTyFloat) { + preg -= firstFpReg; + } else { + ASSERT(false, "ReturnPregToSet: Invalid reg type"); + } + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "\trestoring preg " << preg << " as allocatable\n"; + } + uint32 mask = 1u << preg; + if (preg == kSpecialIntSpillReg && li.GetStackSlot() == 0xFFFFFFFF) { + /* this reg is temporary used for liveinterval which lastUse-firstDef == 1 */ + return; + } + if (li.GetRegType() == kRegTyInt) { + if ((intCallerMask & mask) > 0) { + (void)intCallerRegSet.insert(preg); + } else if ((intCalleeMask & mask) > 0) { + (void)intCalleeRegSet.insert(preg); + } else if ((intParamMask & mask) > 0) { + (void)intParamRegSet.insert(preg); + } else { + ASSERT(false, "ReturnPregToSet: Unknown caller/callee type"); + } + } else if ((fpCallerMask & mask) > 0) { + (void)fpCallerRegSet.insert(preg); + } else if ((fpCalleeMask & mask) > 0) { + (void)fpCalleeRegSet.insert(preg); + } else if ((fpParamMask & mask) > 0) { + (void)fpParamRegSet.insert(preg); + } else { + ASSERT(false, "ReturnPregToSet invalid physical register"); + } +} + +/* A physical register is removed from allocation as it is assigned. */ +void LSRALinearScanRegAllocator::ReleasePregToSet(const LiveInterval &li, uint32 preg) { + if (preg == 0) { + return; + } + if (li.GetRegType() == kRegTyInt) { + preg -= firstIntReg; + } else if (li.GetRegType() == kRegTyFloat) { + preg -= firstFpReg; + } else { + ASSERT(false, "ReleasePregToSet: Invalid reg type"); + } + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "\treleasing preg " << preg << " as allocatable\n"; + } + uint32 mask = 1u << preg; + if (preg == kSpecialIntSpillReg && li.GetStackSlot() == 0xFFFFFFFF) { + /* this reg is temporary used for liveinterval which lastUse-firstDef == 1 */ + return; + } + if (li.GetRegType() == kRegTyInt) { + if ((intCallerMask & mask) > 0) { + intCallerRegSet.erase(preg); + } else if ((intCalleeMask & mask) > 0) { + intCalleeRegSet.erase(preg); + } else if ((intParamMask & mask) > 0) { + intParamRegSet.erase(preg); + } else { + ASSERT(false, "ReleasePregToSet: Unknown caller/callee type"); + } + } else if ((fpCallerMask & mask) > 0) { + fpCallerRegSet.erase(preg); + } else if ((fpCalleeMask & mask) > 0) { + fpCalleeRegSet.erase(preg); + } else if ((fpParamMask & mask) > 0) { + fpParamRegSet.erase(preg); + } else { + ASSERT(false, "ReleasePregToSet invalid physical register"); + } +} + +/* update active in retire */ +void LSRALinearScanRegAllocator::UpdateActiveAtRetirement(uint32 insnID) { + /* Retire live intervals from active list */ + MapleSet::iterator it; + for (it = active.begin(); it != active.end(); /* erase will update */) { + auto *li = static_cast(*it); + if (li->GetLastUse() > insnID) { + break; + } + /* Is it phys reg which can be pre-Assignment? */ + if (li->GetRegNO() >= firstIntReg && regInfo->IsPreAssignedReg(li->GetRegNO())) { + if (li->GetPhysUse() != 0 && li->GetPhysUse() <= insnID) { + it = active.erase(it); + if (li->GetPhysUse() != 0) { + ReturnPregToSet(*li, li->GetRegNO()); + } + if (LSRA_DUMP) { + PrintLiveInterval(*li, "\tRemoving phys_reg li\n"); + } + } else { + ++it; + } + continue; + } else if (li->GetRegNO() >= firstFpReg && regInfo->IsPreAssignedReg(li->GetRegNO())) { + if (li->GetPhysUse() != 0 && li->GetPhysUse() <= insnID) { + it = active.erase(it); + if (li->GetPhysUse() != 0) { + ReturnPregToSet(*li, li->GetRegNO()); + } + if (LSRA_DUMP) { + PrintLiveInterval(*li, "\tRemoving phys_reg li\n"); + } + } else { + ++it; + } + continue; + } + /* + * live interval ended for this reg in active + * release physical reg assigned to free reg pool + */ + if (li->GetLiParent() != nullptr) { + li->SetLiParentChild(nullptr); + li->SetLiParent(nullptr); + } else { + ReturnPregToSet(*li, li->GetAssignedReg()); + } + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Removing " << "(" << li->GetAssignedReg() << ")" << "from regset\n"; + PrintLiveInterval(*li, "\tRemoving virt_reg li\n"); + } + it = active.erase(it); + } +} + +/* Remove a live interval from 'active' list. */ +void LSRALinearScanRegAllocator::RetireFromActive(const Insn &insn) { + uint32 insnID = insn.GetId(); + /* + * active list is sorted based on increasing lastUse + * any operand whose use is greater than current + * instruction number is still in use. + * If the use is less than or equal to instruction number + * then it is possible to retire this live interval and + * reclaim the physical register associated with it. + */ + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "RetireFromActive instr_num " << insnID << "\n"; + } + /* Retire call from call queue */ + for (auto it = callList.cbegin(); it != callList.cend();) { + auto *li = static_cast(*it); + if (li->GetFirstDef() > insnID) { + break; + } + callList.pop_front(); + /* at here, it is invalidated */ + it = callList.begin(); + } + + for (uint32 i = 0; i < intParamQueue.size(); ++i) { + /* push back the param not yet use <- as only one is popped, just push it back again */ + if (lastIntParamLi[i] != nullptr) { + intParamQueue[i].push_front(lastIntParamLi[i]); + (void)intParamRegSet.insert(lastIntParamLi[i]->GetAssignedReg() - firstIntReg); + lastIntParamLi[i] = nullptr; + } + if (lastFpParamLi[i] != nullptr) { + fpParamQueue[i].push_front(lastFpParamLi[i]); + (void)fpParamRegSet.insert(lastFpParamLi[i]->GetAssignedReg() - firstFpReg); + lastFpParamLi[i] = nullptr; + } + } + + UpdateActiveAtRetirement(insnID); +} + +/* the return value is a physical reg */ +uint32 LSRALinearScanRegAllocator::GetRegFromSet(MapleSet &set, regno_t offset, LiveInterval &li, + regno_t forcedReg) const { + uint32 regNO; + if (forcedReg > 0) { + /* forced_reg is a caller save reg */ + regNO = forcedReg; + } else { + CHECK(!set.empty(), "set is null in LSRALinearScanRegAllocator::GetRegFromSet"); + regNO = *(set.begin()); + } + set.erase(regNO); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "\tAssign " << regNO << "\n"; + } + regNO += offset; /* Mapping into Maplecg reg */ + li.SetAssignedReg(regNO); + if (LSRA_DUMP) { + PrintRegSet(set, "Reg Set AFTER"); + PrintLiveInterval(li, "LiveInterval after assignment"); + } + return regNO; +} + +/* + * Handle adrp register assignment. Use the same register for the next + * instruction. + */ +uint32 LSRALinearScanRegAllocator::AssignSpecialPhysRegPattern(const Insn &insn, LiveInterval &li) { + Insn *nInsn = insn.GetNext(); + if (nInsn == nullptr || !nInsn->IsMachineInstruction() || nInsn->IsDMBInsn()) { + return 0; + } + + const InsnDesc *md = insn.GetDesc(); + if (!md->GetOpndDes(0)->IsRegDef()) { + return 0; + } + Operand &opnd = nInsn->GetOperand(0); + if (!opnd.IsRegister()) { + return 0; + } + auto ®Opnd = static_cast(opnd); + if (!regOpnd.IsPhysicalRegister()) { + return 0; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + if (!regInfo->IsPreAssignedReg(regNO)) { + return 0; + } + + /* next insn's dest is a physical param reg 'regNO' */ + bool match = false; + uint32 opndNum = nInsn->GetOperandSize(); + for (uint32 i = 1; i < opndNum; ++i) { + Operand &src = nInsn->GetOperand(i); + if (src.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(src); + Operand *base = memOpnd.GetBaseRegister(); + if (base != nullptr) { + auto *regSrc = static_cast(base); + uint32 srcRegNO = regSrc->GetRegisterNumber(); + if (li.GetRegNO() == srcRegNO) { + match = true; + break; + } + } + Operand *offset = memOpnd.GetIndexRegister(); + if (offset != nullptr) { + auto *regSrc = static_cast(offset); + uint32 srcRegNO = regSrc->GetRegisterNumber(); + if (li.GetRegNO() == srcRegNO) { + match = true; + break; + } + } + } else if (src.IsRegister()) { + auto ®Src = static_cast(src); + uint32 srcRegNO = regSrc.GetRegisterNumber(); + if (li.GetRegNO() == srcRegNO) { + ASSERT(md->GetOpndDes(i) != nullptr, + "pointer is null in LSRALinearScanRegAllocator::AssignSpecialPhysRegPattern"); + if (md->GetOpndDes(i)->IsRegDef()) { + break; + } + match = true; + break; + } + } + } + if (match && li.GetLastUse() > nInsn->GetId()) { + return 0; + } + /* dest of adrp is src of next insn */ + if (match) { + return GetRegFromSet(intParamRegSet, firstIntReg, li, regNO - firstIntReg); + } + return 0; +} + +uint32 LSRALinearScanRegAllocator::FindAvailablePhyRegByFastAlloc(LiveInterval &li) { + uint32 regNO = 0; + if (li.GetRegType() == kRegTyInt) { + if (!intCalleeRegSet.empty()) { + regNO = GetRegFromSet(intCalleeRegSet, firstIntReg, li); + li.SetShouldSave(false); + } else if (!intCallerRegSet.empty()) { + regNO = GetRegFromSet(intCallerRegSet, firstIntReg, li); + li.SetShouldSave(true); + } else { + li.SetShouldSave(false); + } + } else if (li.GetRegType() == kRegTyFloat) { + if (!fpCalleeRegSet.empty()) { + regNO = GetRegFromSet(fpCalleeRegSet, firstFpReg, li); + li.SetShouldSave(false); + } else if (!fpCallerRegSet.empty()) { + regNO = GetRegFromSet(fpCallerRegSet, firstFpReg, li); + li.SetShouldSave(true); + } else { + li.SetShouldSave(false); + } + } + return regNO; +} + +bool LSRALinearScanRegAllocator::NeedSaveAcrossCall(LiveInterval &li) { + bool saveAcrossCall = false; + for (auto *cli : std::as_const(callList)) { + if (cli->GetFirstDef() > li.GetLastUse()) { + break; + } + /* Determine if live interval crosses the call */ + if ((cli->GetFirstDef() > li.GetFirstDef()) && (cli->GetFirstDef() < li.GetLastUse())) { + li.SetShouldSave(true); + /* Need to spill/fill around this call */ + saveAcrossCall = true; + break; + } + } + return saveAcrossCall; +} + +uint32 LSRALinearScanRegAllocator::FindAvailablePhyReg(LiveInterval &li, const Insn &insn, + bool isIntReg) { + uint32 regNO = 0; + MapleSet &callerRegSet = isIntReg ? intCallerRegSet : fpCallerRegSet; + MapleSet &calleeRegSet = isIntReg ? intCalleeRegSet : fpCalleeRegSet; + MapleSet ¶mRegSet = isIntReg ? intParamRegSet : fpParamRegSet; + regno_t reg0 = isIntReg ? firstIntReg : firstFpReg; + + /* See if register is live accross a call */ + bool saveAcrossCall = NeedSaveAcrossCall(li); + if (saveAcrossCall) { + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "\t\tlive interval crosses a call\n"; + } + if (regNO == 0) { + if (!li.IsInCatch() && !li.IsAllInCleanupOrFirstBB() && !calleeRegSet.empty()) { + /* call in live interval, use callee if available */ + regNO = GetRegFromSet(calleeRegSet, reg0, li); + /* Since it is callee saved, no need to continue search */ + li.SetShouldSave(false); + } else if (li.IsMultiUseInBB()) { + /* + * allocate caller save if there are multiple uses in one bb + * else it is no different from spilling + */ + if (!callerRegSet.empty()) { + regNO = GetRegFromSet(callerRegSet, reg0, li); + } else if (!paramRegSet.empty()) { + regNO = GetRegFromSet(paramRegSet, reg0, li); + } + } + } + if (regNO == 0) { + /* No register left for allocation */ + regNO = FillInHole(li); + if (regNO == 0) { + li.SetShouldSave(false); + } + } + return regNO; + } else { + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "\t\tlive interval does not cross a call\n"; + } + if (isIntReg) { + regNO = AssignSpecialPhysRegPattern(insn, li); + if (regNO != 0) { + return regNO; + } + } + if (!paramRegSet.empty()) { + regNO = GetRegFromSet(paramRegSet, reg0, li); + } + if (regNO == 0) { + if (!callerRegSet.empty()) { + regNO = GetRegFromSet(callerRegSet, reg0, li); + } else if (!calleeRegSet.empty()) { + regNO = GetRegFromSet(calleeRegSet, reg0, li); + } else { + regNO = FillInHole(li); + } + } + return regNO; + } +} + +/* Return a phys register number for the live interval. */ +uint32 LSRALinearScanRegAllocator::FindAvailablePhyReg(LiveInterval &li, const Insn &insn) { + if (fastAlloc) { + return FindAvailablePhyRegByFastAlloc(li); + } + uint32 regNO = 0; + if (li.GetRegType() == kRegTyInt) { + regNO = FindAvailablePhyReg(li, insn, true); + } else if (li.GetRegType() == kRegTyFloat) { + regNO = FindAvailablePhyReg(li, insn, false); + } + return regNO; +} + +/* Spill and reload for caller saved registers. */ +void LSRALinearScanRegAllocator::InsertCallerSave(Insn &insn, Operand &opnd, bool isDef) { + auto ®Opnd = static_cast(opnd); + uint32 vRegNO = regOpnd.GetRegisterNumber(); + if (vRegNO >= liveIntervalsArray.size()) { + CHECK_FATAL(false, "index out of range in LSRALinearScanRegAllocator::InsertCallerSave"); + } + LiveInterval *rli = liveIntervalsArray[vRegNO]; + RegType regType = regOpnd.GetRegisterType(); + + isSpillZero = false; + if (!isDef) { + uint32 mask; + uint32 regBase; + if (regType == kRegTyInt) { + mask = intBBDefMask; + regBase = firstIntReg; + } else { + mask = fpBBDefMask; + regBase = firstFpReg; + } + if ((mask & (1u << (rli->GetAssignedReg() - regBase))) > 0) { + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "InsertCallerSave " << rli->GetAssignedReg() << " skipping due to local def\n"; + } + return; + } + } + + if (!rli->IsShouldSave()) { + return; + } + + uint32 regSize = regOpnd.GetSize(); + PrimType spType; + + if (regType == kRegTyInt) { + spType = (regSize <= k32BitSize) ? PTY_i32 : PTY_i64; + intBBDefMask |= (1u << (rli->GetAssignedReg() - firstIntReg)); + } else { + spType = (regSize <= k32BitSize) ? PTY_f32 : PTY_f64; + fpBBDefMask |= (1u << (rli->GetAssignedReg() - firstFpReg)); + } + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "InsertCallerSave " << vRegNO << "\n"; + } + + if (!isDef && !rli->IsCallerSpilled()) { + LogInfo::MapleLogger() << "WARNING: " << vRegNO << " caller restore without spill in bb " + << insn.GetBB()->GetId() << " : " << cgFunc->GetName() << "\n"; + } + rli->SetIsCallerSpilled(true); + + MemOperand *memOpnd = nullptr; + RegOperand *phyOpnd = nullptr; + + phyOpnd = regInfo->GetOrCreatePhyRegOperand(static_cast(rli->GetAssignedReg()), regSize, + regType); + std::string comment; + bool isOutOfRange = false; + if (isDef) { + memOpnd = GetSpillMem(vRegNO, true, insn, static_cast(intSpillRegSet[0] + firstIntReg), + isOutOfRange); + Insn *stInsn = regInfo->BuildStrInsn(regSize, spType, *phyOpnd, *memOpnd); + comment = " SPILL for caller_save " + std::to_string(vRegNO); + ++callerSaveSpillCount; + if (rli->GetLastUse() == insn.GetId()) { + cgFunc->FreeSpillRegMem(vRegNO); + comment += " end"; + } + stInsn->SetComment(comment); + if (isOutOfRange) { + insn.GetBB()->InsertInsnAfter(*insn.GetNext(), *stInsn); + } else { + insn.GetBB()->InsertInsnAfter(insn, *stInsn); + } + } else { + memOpnd = GetSpillMem(vRegNO, false, insn, static_cast(intSpillRegSet[0] + firstIntReg), + isOutOfRange); + Insn *ldInsn = regInfo->BuildLdrInsn(regSize, spType, *phyOpnd, *memOpnd); + comment = " RELOAD for caller_save " + std::to_string(vRegNO); + ++callerSaveReloadCount; + if (rli->GetLastUse() == insn.GetId()) { + cgFunc->FreeSpillRegMem(vRegNO); + comment += " end"; + } + ldInsn->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *ldInsn); + } +} + +/* Shell function to find a physical register for an operand. */ +RegOperand *LSRALinearScanRegAllocator::AssignPhysRegs(Operand &opnd, const Insn &insn) { + auto ®Opnd = static_cast(opnd); + uint32 vRegNO = regOpnd.GetRegisterNumber(); + RegType regType = regOpnd.GetRegisterType(); + if (vRegNO >= liveIntervalsArray.size()) { + CHECK_FATAL(false, "index out of range in LSRALinearScanRegAllocator::AssignPhysRegs"); + } + LiveInterval *li = liveIntervalsArray[vRegNO]; + + /* + * if only def, no use, then should assign a new phyreg, + * otherwise, there may be conflict + */ + if (li->GetAssignedReg() != 0 && (li->GetLastUse() != 0 || li->GetPhysUse() != 0)) { + if (regInfo->IsCalleeSavedReg(li->GetAssignedReg())) { + ++calleeUseCnt[li->GetAssignedReg()]; + } + if (li->GetStackSlot() == 0xFFFFFFFF) { + return regInfo->GetOrCreatePhyRegOperand( + static_cast(li->GetAssignedReg()), opnd.GetSize(), regType); + } else { + /* need to reload */ + return nullptr; + } + } + + /* pre spilled: */ + if (li->GetStackSlot() != 0xFFFFFFFF) { + return nullptr; + } + + if (LSRA_DUMP) { + uint32 activeSz = active.size(); + LogInfo::MapleLogger() << "\tAssignPhysRegs-active_sz " << activeSz << "\n"; + } + + uint32 regNO = FindAvailablePhyReg(*li, insn); + if (regNO != 0) { + if (regInfo->IsCalleeSavedReg(regNO)) { + if (!CGOptions::DoCalleeToSpill()) { + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "\tCallee-save register for save/restore in prologue/epilogue: " << regNO << "\n"; + } + cgFunc->AddtoCalleeSaved(regNO); + } + ++calleeUseCnt[regNO]; + } + return regInfo->GetOrCreatePhyRegOperand( + static_cast(li->GetAssignedReg()), opnd.GetSize(), regType); + } + + return nullptr; +} + +MemOperand *LSRALinearScanRegAllocator::GetSpillMem(uint32 vRegNO, bool isDest, Insn &insn, + regno_t regNO, bool &isOutOfRange) const { + MemOperand *memOpnd = cgFunc->GetOrCreatSpillMem(vRegNO); + return regInfo->AdjustMemOperandIfOffsetOutOfRange(memOpnd, vRegNO, isDest, insn, regNO, isOutOfRange); +} + +/* Set a vreg in live interval as being marked for spill. */ +void LSRALinearScanRegAllocator::SetOperandSpill(Operand &opnd) { + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "SetOperandSpill " << regNO; + LogInfo::MapleLogger() << "(" << liveIntervalsArray[regNO]->GetFirstAcrossedCall(); + LogInfo::MapleLogger() << ", refCount " << liveIntervalsArray[regNO]->GetRefCount() << ")\n"; + } + + ASSERT(regNO < liveIntervalsArray.size(), + "index out of vector size in LSRALinearScanRegAllocator::SetOperandSpill"); + LiveInterval *li = liveIntervalsArray[regNO]; + li->SetStackSlot(kSpilled); +} + +/* + * Generate spill/reload for an operand. + * spill_idx : one of 3 phys regs set aside for the purpose of spills. + */ +void LSRALinearScanRegAllocator::SpillOperand(Insn &insn, Operand &opnd, bool isDef, uint32 spillIdx) { + /* + * Insert spill (def) and fill (use) instructions for the operand. + * Keep track of the 'slot' (base 0). The actual slot on the stack + * will be some 'base_slot_offset' + 'slot' off FP. + * For simplification, entire 64bit register is spilled/filled. + * + * For example, a virtual register home 'slot' on the stack is location 5. + * This represents a 64bit slot (8bytes). The base_slot_offset + * from the base 'slot' determined by whoever is added, off FP. + * stack address is ( FP - (5 * 8) + base_slot_offset ) + * So the algorithm is simple, for each virtual register that is not + * allocated, it has to have a home address on the stack (a slot). + * A class variable is used, start from 0, increment by 1. + * Since LiveInterval already represent unique regNO information, + * just add a slot number to it. Subsequent reference to a regNO + * will either get an allocated physical register or a slot number + * for computing the stack location. + * + * This function will also determine the operand to be a def or use. + * For def, spill instruction(s) is appended after the insn. + * For use, spill instruction(s) is prepended before the insn. + * Use FP - (slot# *8) for now. Will recompute if base_slot_offset + * is not 0. + * + * The total number of slots used will be used to compute the stack + * frame size. This will require some interface external to LSRA. + * + * For normal instruction, two spill regs should be enough. The caller + * controls which ones to use. + * For more complex operations, need to break down the instruction. + * eg. store v1 -> [v2 + v3] // 3 regs needed + * => p1 <- v2 // address part 1 + * p2 <- v3 // address part 2 + * p1 <- p1 + p2 // freeing up p2 + * p2 <- v1 + * store p2 -> [p1] + * or we can allocate more registers to the spill register set + * For store multiple, need to break it down into two or more instr. + */ + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "SpillOperand " << regNO << "\n"; + } + + regno_t spReg; + PrimType spType; + CHECK_FATAL(regNO < liveIntervalsArray.size(), "index out of range in LSRALinearScanRegAllocator::SpillOperand"); + LiveInterval *li = liveIntervalsArray[regNO]; + ASSERT(!li->IsShouldSave(), "SpillOperand: Should not be caller"); + uint32 regSize = regOpnd.GetSize(); + RegType regType = regOpnd.GetRegisterType(); + + if (li->GetRegType() == kRegTyInt) { + ASSERT((spillIdx < intSpillRegSet.size()), "SpillOperand: ran out int spill reg"); + spReg = intSpillRegSet[spillIdx] + firstIntReg; + spType = (regSize <= k32BitSize) ? PTY_i32 : PTY_i64; + } else if (li->GetRegType() == kRegTyFloat) { + ASSERT((spillIdx < fpSpillRegSet.size()), "SpillOperand: ran out fp spill reg"); + spReg = fpSpillRegSet[spillIdx] + firstFpReg; + spType = (regSize <= k32BitSize) ? PTY_f32 : PTY_f64; + } else { + CHECK_FATAL(false, "SpillOperand: Should be int or float type"); + } + + bool isOutOfRange = false; + RegOperand *phyOpnd = nullptr; + if (isSpillZero) { + phyOpnd = &cgFunc->GetZeroOpnd(regSize); + } else { + phyOpnd = regInfo->GetOrCreatePhyRegOperand(static_cast(spReg), regSize, regType); + } + li->SetAssignedReg(phyOpnd->GetRegisterNumber()); + + MemOperand *memOpnd = nullptr; + if (isDef) { + /* + * Need to assign spReg (one of the two spill reg) to the destination of the insn. + * spill_vreg <- opn1 op opn2 + * to + * spReg <- opn1 op opn2 + * store spReg -> spillmem + */ + li->SetStackSlot(kSpilled); + + ++spillCount; + memOpnd = GetSpillMem(regNO, true, insn, + static_cast(intSpillRegSet[spillIdx + 1] + firstIntReg), isOutOfRange); + Insn *stInsn = regInfo->BuildStrInsn(regSize, spType, *phyOpnd, *memOpnd); + std::string comment = " SPILL vreg:" + std::to_string(regNO); + if (li->GetLastUse() == insn.GetId()) { + cgFunc->FreeSpillRegMem(regNO); + comment += " end"; + } + stInsn->SetComment(comment); + if (isOutOfRange) { + insn.GetBB()->InsertInsnAfter(*insn.GetNext(), *stInsn); + } else { + insn.GetBB()->InsertInsnAfter(insn, *stInsn); + } + } else { + /* Here, reverse of isDef, change either opn1 or opn2 to the spReg. */ + if (li->GetStackSlot() == 0xFFFFFFFF) { + LogInfo::MapleLogger() << "WARNING: " << regNO << " assigned " << li->GetAssignedReg() << + " restore without spill in bb " << insn.GetBB()->GetId() << " : " << + cgFunc->GetName() << "\n"; + } + ++reloadCount; + memOpnd = GetSpillMem(regNO, false, insn, + static_cast(intSpillRegSet[spillIdx] + firstIntReg), isOutOfRange); + Insn *ldInsn = regInfo->BuildLdrInsn(regSize, spType, *phyOpnd, *memOpnd); + std::string comment = " RELOAD vreg" + std::to_string(regNO); + if (li->GetLastUse() == insn.GetId()) { + cgFunc->FreeSpillRegMem(regNO); + comment += " end"; + } + ldInsn->SetComment(comment); + insn.GetBB()->InsertInsnBefore(insn, *ldInsn); + } +} + +RegOperand *LSRALinearScanRegAllocator::HandleSpillForInsn(const Insn &insn, Operand &opnd) { + /* choose the lowest priority li to spill */ + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + ASSERT(regNO < liveIntervalsArray.size(), + "index out of range of MapleVector in LSRALinearScanRegAllocator::HandleSpillForInsn"); + LiveInterval *li = liveIntervalsArray[regNO]; + RegType regType = regOpnd.GetRegisterType(); + LiveInterval *spillLi = nullptr; + FindLowestPrioInActive(spillLi, regType, true); + + /* + * compare spill_li with current li + * spill_li is null and li->SetStackSlot(Spilled) when the li is spilled due to LiveIntervalAnalysis + */ + if (spillLi == nullptr || spillLi->GetLiParent() || spillLi->GetLiChild() || li->GetStackSlot() == kSpilled || + li->GetFirstDef() != insn.GetId() || li->GetPriority() < spillLi->GetPriority() || + li->GetRefCount() < spillLi->GetRefCount() || + !(regInfo->IsCalleeSavedReg(spillLi->GetAssignedReg()))) { + /* spill current li */ + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Flexible Spill: still spill " << li->GetRegNO() << ".\n"; + } + SetOperandSpill(opnd); + return nullptr; + } + + ReturnPregToSet(*spillLi, spillLi->GetAssignedReg()); + RegOperand *newOpnd = AssignPhysRegs(opnd, insn); + if (newOpnd == nullptr) { + ReleasePregToSet(*spillLi, spillLi->GetAssignedReg()); + SetOperandSpill(opnd); + return nullptr; + } + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Flexible Spill: " << spillLi->GetRegNO() << " instead of " << li->GetRegNO() << ".\n"; + PrintLiveInterval(*spillLi, "TO spill: "); + PrintLiveInterval(*li, "Instead of: "); + } + + /* spill this live interval */ + active.erase(itFinded); + spillLi->SetStackSlot(kSpilled); + + return newOpnd; +} + +bool LSRALinearScanRegAllocator::OpndNeedAllocation(const Insn &insn, Operand &opnd, bool isDef, uint32 insnNum) { + if (!opnd.IsRegister()) { + return false; + } + auto ®Opnd = static_cast(opnd); + RegType regType = regOpnd.GetRegisterType(); + uint32 regNO = regOpnd.GetRegisterNumber(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return false; + } + if (regInfo->IsUntouchableReg(regNO)) { + return false; + } + + if (regOpnd.IsPhysicalRegister()) { + if (isDef) { + if (regType == kRegTyInt) { + if (!regInfo->IsPreAssignedReg(regNO)) { + return false; + } + if (intParamQueue[regInfo->GetIntParamRegIdx(regNO)].empty()) { + return false; + } + LiveInterval *li = intParamQueue[regInfo->GetIntParamRegIdx(regNO)].front(); + /* li may have been inserted by InsertParamToActive */ + if (li->GetFirstDef() == insnNum) { + CHECK_FATAL(intParamRegSet.find(regNO - firstIntReg) != intParamRegSet.end(), "impossible"); + intParamRegSet.erase(regNO - firstIntReg); + (void)active.insert(li); + CHECK_FATAL(active.find(li) != active.end(), "impossible"); + intParamQueue[regInfo->GetIntParamRegIdx(regNO)].pop_front(); + } + } else { + if (regNO > regInfo->GetLastParamsFpReg() || fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].empty()) { + return false; + } + LiveInterval *li = fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].front(); + /* li may have been inserted by InsertParamToActive */ + if (li->GetFirstDef() == insnNum) { + CHECK_FATAL(fpParamRegSet.find(regNO - firstIntReg) != fpParamRegSet.end(), "impossible"); + fpParamRegSet.erase(regInfo->GetFpParamRegIdx(regNO)); + (void)active.insert(li); + CHECK_FATAL(active.find(li) != active.end(), "impossible"); + fpParamQueue[regInfo->GetFpParamRegIdx(regNO)].pop_front(); + } + } + } + return false; + } + /* This is a virtual register */ + return true; +} + +void LSRALinearScanRegAllocator::InsertParamToActive(Operand &opnd) { + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + CHECK_FATAL(regNO < liveIntervalsArray.size(), + "index out of range in LSRALinearScanRegAllocator::InsertParamToActive"); + LiveInterval *li = liveIntervalsArray[regNO]; + /* Search for parameter registers that is in the live range to insert into queue */ + if (li->GetRegType() == kRegTyInt) { + for (uint32 i = 0; i < intParamQueue.size(); ++i) { + if (intParamQueue[i].empty()) { + continue; + } + LiveInterval *pli = intParamQueue[i].front(); + do { + if ((pli->GetFirstDef() <= li->GetFirstDef()) && (pli->GetPhysUse() <= li->GetFirstDef())) { + /* just discard it */ + intParamQueue[i].pop_front(); + if (intParamQueue[i].empty()) { + break; + } + pli = intParamQueue[i].front(); + } else { + break; + } + } while (true); + if ((pli->GetFirstDef() < li->GetLastUse()) && (pli->GetPhysUse() > li->GetFirstDef())) { + if (intParamRegSet.find(pli->GetAssignedReg() - firstIntReg) != intParamRegSet.end()) { + /* reserve this param register and active the its first use */ + lastIntParamLi[i] = pli; + intParamRegSet.erase(pli->GetAssignedReg() - firstIntReg); + intParamQueue[i].pop_front(); + } + } + } + } else { + ASSERT((li->GetRegType() == kRegTyFloat), "InsertParamToActive: Incorrect register type"); + for (uint32 i = 0; i < fpParamQueue.size(); ++i) { + if (fpParamQueue[i].empty()) { + continue; + } + LiveInterval *pli = fpParamQueue[i].front(); + do { + if ((pli->GetFirstDef() <= li->GetFirstDef()) && (pli->GetPhysUse() <= li->GetFirstDef())) { + /* just discard it */ + fpParamQueue[i].pop_front(); + if (fpParamQueue[i].empty()) { + break; + } + pli = fpParamQueue[i].front(); + } else { + break; + } + } while (true); + if ((pli->GetFirstDef() < li->GetLastUse()) && (pli->GetPhysUse() > li->GetFirstDef())) { + if (fpParamRegSet.find(i) != fpParamRegSet.end()) { + lastFpParamLi[i] = pli; + fpParamRegSet.erase(i); + fpParamQueue[i].pop_front(); + } + } + } + } +} + +/* Insert a live interval into the 'active' list. */ +void LSRALinearScanRegAllocator::InsertToActive(Operand &opnd, uint32 insnNum) { + auto ®Opnd = static_cast(opnd); + uint32 regNO = regOpnd.GetRegisterNumber(); + CHECK_FATAL(regNO < liveIntervalsArray.size(), + "index out of range in LSRALinearScanRegAllocator::InsertToActive"); + LiveInterval *li = liveIntervalsArray[regNO]; + if (li->GetLastUse() <= insnNum) { + /* insert first, and retire later, then the assigned reg can be released */ + (void)active.insert(li); + if (LSRA_DUMP) { + PrintLiveInterval(*li, "LiveInterval is skip due to past insn num --- opt to remove redunant insn"); + } + return; + } + (void)active.insert(li); +} + +/* find the lowest one and erase it from active */ +void LSRALinearScanRegAllocator::FindLowestPrioInActive(LiveInterval *&targetLi, RegType regType, bool startRa) { + float lowestPrio = 100.0; + bool found = false; + MapleSet::iterator it; + MapleSet::iterator lowestIt; + for (it = active.begin(); it != active.end(); ++it) { + auto *li = static_cast(*it); + if (startRa && li->GetPhysUse() != 0) { + continue; + } + if (li->GetPriority() < lowestPrio && li->GetRegType() == regType) { + lowestPrio = li->GetPriority(); + lowestIt = it; + found = true; + } + } + if (found) { + targetLi = *lowestIt; + itFinded = lowestIt; + } +} + +/* Calculate the weight of a live interval for pre-spill and flexible spill */ +void LSRALinearScanRegAllocator::LiveIntervalAnalysis() { + for (uint32 bbIdx = 0; bbIdx < bfs->sortedBBs.size(); ++bbIdx) { + BB *bb = bfs->sortedBBs[bbIdx]; + + FOR_BB_INSNS(insn, bb) { + insnNumBeforRA++; + /* 1 calculate live interfere */ + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction() || insn->GetId() == 0) { + /* New instruction inserted by reg alloc (ie spill) */ + continue; + } + /* 1.1 simple retire from active */ + MapleSet::iterator it; + for (it = active.begin(); it != active.end(); /* erase will update */) { + auto *li = static_cast(*it); + if (li->GetLastUse() > insn->GetId()) { + break; + } + it = active.erase(it); + } + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::LiveIntervalAnalysis"); + bool isDef = regProp->IsRegDef(); + Operand &opnd = insn->GetOperand(i); + if (isDef) { + auto ®Opnd = static_cast(opnd); + if (regOpnd.IsVirtualRegister() && regOpnd.GetRegisterType() != kRegTyCc) { + /* 1.2 simple insert to active */ + uint32 regNO = regOpnd.GetRegisterNumber(); + LiveInterval *li = liveIntervalsArray[regNO]; + if (li->GetFirstDef() == insn->GetId()) { + (void)active.insert(li); + } + } + } + } + + /* 2 get interfere info, and analysis */ + uint32 interNum = active.size(); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "In insn " << insn->GetId() << ", " << interNum << " overlap live intervals.\n"; + LogInfo::MapleLogger() << "\n"; + } + + /* 2.2 interfere with each other, analysis which to spill */ + while (interNum > CGOptions::GetOverlapNum()) { + LiveInterval *lowestLi = nullptr; + FindLowestPrioInActive(lowestLi); + if (lowestLi != nullptr) { + if (LSRA_DUMP) { + PrintLiveInterval(*lowestLi, "Pre spilled: "); + } + lowestLi->SetStackSlot(kSpilled); + active.erase(itFinded); + interNum = active.size(); + } else { + break; + } + } + } + } + active.clear(); +} + +/* Iterate through the operands of an instruction for allocation. */ +void LSRALinearScanRegAllocator::AssignPhysRegsForInsn(Insn &insn) { + const InsnDesc *md = insn.GetDesc(); + /* At the beginning of the landing pad, we handle the x1, x2 as if they are implicitly defined. */ + if (!insn.GetBB()->GetEhPreds().empty() && &insn == insn.GetBB()->GetFirstInsn()) { + if (!intParamQueue[0].empty()) { + LiveInterval *li = intParamQueue[0].front(); + if (li->GetFirstDef() == insn.GetId()) { + intParamRegSet.erase(li->GetAssignedReg() - firstIntReg); + (void)active.insert(li); + intParamQueue[0].pop_front(); + } + } + + if (!intParamQueue[1].empty()) { + LiveInterval *li = intParamQueue[1].front(); + if (li->GetFirstDef() == insn.GetId()) { + intParamRegSet.erase(li->GetAssignedReg() - firstIntReg); + (void)active.insert(li); + intParamQueue[1].pop_front(); + } + } + } + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "active in " << insn.GetId() << " :"; + PrintActiveListSimple(); + } + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::AssignPhysRegsForInsn"); + bool isDef = regProp->IsRegDef(); + Operand &opnd = insn.GetOperand(i); + RegOperand *newOpnd = nullptr; + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto op : listOpnd.GetOperands()) { + if (!OpndNeedAllocation(insn, *op, isDef, insn.GetId())) { + continue; + } + if (isDef && !fastAlloc) { + InsertParamToActive(*op); + } + newOpnd = AssignPhysRegs(*op, insn); + if (newOpnd != nullptr) { + if (isDef) { + InsertToActive(*op, insn.GetId()); + } + } else { + /* + * If dest and both src are spilled, src will use both of the + * spill registers. + * dest can use any spill reg, choose 0 + */ + if (isDef) { + newOpnd = HandleSpillForInsn(insn, *op); + if (newOpnd != nullptr) { + InsertToActive(*op, insn.GetId()); + } + } else { + SetOperandSpill(*op); + } + } + } + } else if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + Operand *base = memOpnd.GetBaseRegister(); + Operand *offset = memOpnd.GetIndexRegister(); + isDef = false; + if (base != nullptr) { + if (OpndNeedAllocation(insn, *base, isDef, insn.GetId())) { + newOpnd = AssignPhysRegs(*base, insn); + if (newOpnd == nullptr) { + SetOperandSpill(*base); + } + /* add ASSERT here. */ + } + } + if (offset != nullptr) { + if (!OpndNeedAllocation(insn, *offset, isDef, insn.GetId())) { + continue; + } + newOpnd = AssignPhysRegs(*offset, insn); + if (newOpnd == nullptr) { + SetOperandSpill(*offset); + } + } + } else { + if (!OpndNeedAllocation(insn, opnd, isDef, insn.GetId())) { + continue; + } + if (isDef && !fastAlloc) { + InsertParamToActive(opnd); + } + newOpnd = AssignPhysRegs(opnd, insn); + if (newOpnd != nullptr) { + if (isDef) { + InsertToActive(opnd, insn.GetId()); + } + } else { + /* + * If dest and both src are spilled, src will use both of the + * spill registers. + * dest can use any spill reg, choose 0 + */ + if (isDef) { + newOpnd = HandleSpillForInsn(insn, opnd); + if (newOpnd != nullptr) { + InsertToActive(opnd, insn.GetId()); + } + } else { + SetOperandSpill(opnd); + } + } + } + } +} + +/* Replace Use-Def Opnd */ +RegOperand *LSRALinearScanRegAllocator::GetReplaceUdOpnd(Insn &insn, Operand &opnd, uint32 &spillIdx) { + if (!opnd.IsRegister()) { + return nullptr; + } + const auto *regOpnd = static_cast(&opnd); + + uint32 vRegNO = regOpnd->GetRegisterNumber(); + RegType regType = regOpnd->GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return nullptr; + } + if (regInfo->IsUntouchableReg(vRegNO)) { + return nullptr; + } + if (regOpnd->IsPhysicalRegister()) { + return nullptr; + } + + ASSERT(vRegNO < liveIntervalsArray.size(), + "index out of range of MapleVector in LSRALinearScanRegAllocator::GetReplaceUdOpnd"); + LiveInterval *li = liveIntervalsArray[vRegNO]; + + regno_t regNO = li->GetAssignedReg(); + if (regInfo->IsCalleeSavedReg(regNO)) { + cgFunc->AddtoCalleeSaved(regNO); + } + + if (li->IsShouldSave()) { + InsertCallerSave(insn, opnd, false); + } else if (li->GetStackSlot() == kSpilled) { + SpillOperand(insn, opnd, false, spillIdx); + SpillOperand(insn, opnd, true, spillIdx); + ++spillIdx; + } + RegOperand *phyOpnd = regInfo->GetOrCreatePhyRegOperand( + static_cast(li->GetAssignedReg()), opnd.GetSize(), regType); + + return phyOpnd; +} + +/* + * Create an operand with physical register assigned, or a spill register + * in the case where a physical register cannot be assigned. + */ +RegOperand *LSRALinearScanRegAllocator::GetReplaceOpnd(Insn &insn, Operand &opnd, uint32 &spillIdx, bool isDef) { + if (!opnd.IsRegister()) { + return nullptr; + } + const auto *regOpnd = static_cast(&opnd); + + uint32 vRegNO = regOpnd->GetRegisterNumber(); + RegType regType = regOpnd->GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return nullptr; + } + if (regInfo->IsUntouchableReg(vRegNO)) { + return nullptr; + } + if (regOpnd->IsPhysicalRegister()) { + return nullptr; + } + + ASSERT(vRegNO < liveIntervalsArray.size(), + "index out of range of MapleVector in LSRALinearScanRegAllocator::GetReplaceOpnd"); + LiveInterval *li = liveIntervalsArray[vRegNO]; + + regno_t regNO = li->GetAssignedReg(); + if (regInfo->IsCalleeSavedReg(regNO)) { + cgFunc->AddtoCalleeSaved(regNO); + } + + if (li->IsShouldSave()) { + InsertCallerSave(insn, opnd, isDef); + } else if (li->GetStackSlot() == kSpilled) { + spillIdx = isDef ? 0 : spillIdx; + SpillOperand(insn, opnd, isDef, spillIdx); + if (!isDef) { + ++spillIdx; + } + } + RegOperand *phyOpnd = regInfo->GetOrCreatePhyRegOperand( + static_cast(li->GetAssignedReg()), opnd.GetSize(), regType); + + return phyOpnd; +} + +/* Try to estimate if spill callee should be done based on even/odd for stp in prolog. */ +void LSRALinearScanRegAllocator::CheckSpillCallee() { + if (CGOptions::DoCalleeToSpill()) { + uint32 pairCnt = 0; + for (size_t idx = 0; idx < sizeof(uint32); ++idx) { + if ((intCalleeMask & (1ULL << idx)) != 0 && calleeUseCnt[idx] != 0) { + ++pairCnt; + } + } + if ((pairCnt & 0x01) != 0) { + shouldOptIntCallee = true; + } + + for (size_t idx = 0; idx < sizeof(uint32); ++idx) { + if ((fpCalleeMask & (1ULL << idx)) != 0 && calleeUseCnt[idx] != 0) { + ++pairCnt; + } + } + if ((pairCnt & 0x01) != 0) { + shouldOptFpCallee = true; + } + } +} + +/* Iterate through all instructions and change the vreg to preg. */ +void LSRALinearScanRegAllocator::FinalizeRegisters() { + CheckSpillCallee(); + for (BB *bb : bfs->sortedBBs) { + intBBDefMask = 0; + fpBBDefMask = 0; + + FOR_BB_INSNS(insn, bb) { + if (insn->IsImmaterialInsn() || !insn->IsMachineInstruction() || insn->GetId() == 0) { + continue; + } + if (insn->IsCall()) { + intBBDefMask = 0; + fpBBDefMask = 0; + } + + uint32 spillIdx = 0; + const InsnDesc *md = insn->GetDesc(); + uint opndNum = insn->GetOperandSize(); + /* Handle source(use) opernads first */ + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + bool isDef = regProp->IsRegDef(); + if (isDef) { + continue; + } + Operand &opnd = insn->GetOperand(i); + RegOperand *phyOpnd = nullptr; + if (opnd.IsList()) { + /* For arm32, not arm64 */ + } else if (opnd.IsMemoryAccessOperand()) { + auto *memOpnd = + static_cast(static_cast(opnd).Clone(*cgFunc->GetMemoryPool())); + ASSERT(memOpnd != nullptr, "memopnd is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + insn->SetOperand(i, *memOpnd); + Operand *base = memOpnd->GetBaseRegister(); + Operand *offset = memOpnd->GetIndexRegister(); + if (base != nullptr) { + phyOpnd = GetReplaceOpnd(*insn, *base, spillIdx, false); + if (phyOpnd != nullptr) { + memOpnd->SetBaseRegister(*phyOpnd); + } + } + if (offset != nullptr) { + phyOpnd = GetReplaceOpnd(*insn, *offset, spillIdx, false); + if (phyOpnd != nullptr) { + memOpnd->SetIndexRegister(*phyOpnd); + } + } + } else { + phyOpnd = GetReplaceOpnd(*insn, opnd, spillIdx, false); + if (phyOpnd != nullptr) { + insn->SetOperand(i, *phyOpnd); + } + } + } + /* Handle ud(use-def) opernads */ + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + Operand &opnd = insn->GetOperand(i); + bool isUseDef = regProp->IsRegDef() && regProp->IsRegUse(); + if (!isUseDef) { + continue; + } + RegOperand *phyOpnd = GetReplaceUdOpnd(*insn, opnd, spillIdx); + if (phyOpnd != nullptr) { + insn->SetOperand(i, *phyOpnd); + } + } + /* Handle dest(def) opernads last */ + for (uint32 i = 0; i < opndNum; ++i) { + const OpndDesc *regProp = md->GetOpndDes(i); + ASSERT(regProp != nullptr, "pointer is null in LSRALinearScanRegAllocator::FinalizeRegisters"); + Operand &opnd = insn->GetOperand(i); + bool isUse = (regProp->IsRegUse()) || (opnd.IsMemoryAccessOperand()); + if (isUse) { + continue; + } + isSpillZero = false; + RegOperand *phyOpnd = GetReplaceOpnd(*insn, opnd, spillIdx, true); + if (phyOpnd != nullptr) { + insn->SetOperand(i, *phyOpnd); + if (isSpillZero) { + insn->GetBB()->RemoveInsn(*insn); + } + } + } + } + } +} + +void LSRALinearScanRegAllocator::SetAllocMode() { + if (CGOptions::IsFastAlloc()) { + if (CGOptions::GetFastAllocMode() == 0) { + fastAlloc = true; + } else { + spillAll = true; + } + /* In-Range spill range can still be specified (only works with --dump-func=). */ + } else if (cgFunc->NumBBs() > CGOptions::GetLSRABBOptSize()) { + /* instruction size is checked in ComputeLieveInterval() */ + fastAlloc = true; + } + + if (LSRA_DUMP) { + if (fastAlloc) { + LogInfo::MapleLogger() << "fastAlloc mode on\n"; + } + if (spillAll) { + LogInfo::MapleLogger() << "spillAll mode on\n"; + } + } +} + +void LSRALinearScanRegAllocator::LinearScanRegAllocator() { + if (LSRA_DUMP) { + PrintParamQueue("Initial param queue"); + PrintCallQueue("Initial call queue"); + } + /* handle param register */ + for (auto &intParam : intParamQueue) { + if (!intParam.empty() && intParam.front()->GetFirstDef() == 0) { + LiveInterval *li = intParam.front(); + intParamRegSet.erase(li->GetAssignedReg() - firstIntReg); + (void)active.insert(li); + intParam.pop_front(); + } + } + for (auto &fpParam : fpParamQueue) { + if (!fpParam.empty() && fpParam.front()->GetFirstDef() == 0) { + LiveInterval *li = fpParam.front(); + fpParamRegSet.erase(li->GetAssignedReg() - firstFpReg); + (void)active.insert(li); + fpParam.pop_front(); + } + } + + for (BB *bb : bfs->sortedBBs) { + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "======New BB=====" << bb->GetId() << " " << std::hex << bb << std::dec << "\n"; + } + FOR_BB_INSNS(insn, bb) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (insn->GetId() == 0) { + /* New instruction inserted by reg alloc (ie spill) */ + continue; + } + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "======New Insn=====" << insn->GetId() << " " << insn->GetBB()->GetId() << "\n"; + insn->Dump(); + } + RetireFromActive(*insn); +#ifdef LSRA_DEBUG + DebugCheckActiveList(); +#endif + AssignPhysRegsForInsn(*insn); + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "======After Alloc=====" << insn->GetId() << " " << insn->GetBB()->GetId() << "\n"; + insn->Dump(); + } + } + } +} + +/* Main entrance for the LSRA register allocator */ +bool LSRALinearScanRegAllocator::AllocateRegisters() { + cgFunc->SetIsAfterRegAlloc(); + SetAllocMode(); +#ifdef RA_PERF_ANALYSIS + auto begin = std::chrono::system_clock::now(); +#endif + if (LSRA_DUMP) { + const MIRModule &mirModule = cgFunc->GetMirModule(); + DotGenerator::GenerateDot("RA", *cgFunc, mirModule); + DotGenerator::GenerateDot("RAe", *cgFunc, mirModule, true); + LogInfo::MapleLogger() << "Entering LinearScanRegAllocator\n"; + } +/* ================= ComputeBlockOrder =============== */ +#ifdef RA_PERF_ANALYSIS + auto start = std::chrono::system_clock::now(); +#endif + /* + * The basic blocks are sorted into a linear order for allocation. + * initialize block ordering + * Can be either breadth first or depth first. + * To avoid use before set, we prefer breadth first. + * TODO: why we need sort BB here? can it be merged with ISel? + */ + Bfs localBfs(*cgFunc, *memPool); + bfs = &localBfs; + bfs->ComputeBlockOrder(); +#ifdef RA_PERF_ANALYSIS + auto end = std::chrono::system_clock::now(); + bfsUS += std::chrono::duration_cast(end - start).count(); +#endif + +/* ================= LiveInterval =============== */ +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + ComputeLiveInterval(); + +#ifdef LSRA_GRAPH + PrintLiveRanges(); +#endif + + LiveIntervalAnalysis(); +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + liveIntervalUS += std::chrono::duration_cast(end - start).count(); +#endif + +/* ================= LiveRange =============== */ +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + /* using live interval + holes to approximate live ranges */ + BuildIntervalRanges(); +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + holesUS += std::chrono::duration_cast(end - start).count(); +#endif +/* ================= InitFreeRegPool =============== */ + InitFreeRegPool(); + +/* ================= LinearScanRegAllocator =============== */ +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + LinearScanRegAllocator(); +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + lsraUS += std::chrono::duration_cast(end - start).count(); +#endif + +#ifdef RA_PERF_ANALYSIS + start = std::chrono::system_clock::now(); +#endif + FinalizeRegisters(); +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + finalizeUS += std::chrono::duration_cast(end - start).count(); +#endif + + if (LSRA_DUMP) { + LogInfo::MapleLogger() << "Total " << spillCount << " spillCount in " << cgFunc->GetName() << " \n"; + LogInfo::MapleLogger() << "Total " << reloadCount << " reloadCount\n"; + LogInfo::MapleLogger() << "Total " << "(" << spillCount << "+ " << callerSaveSpillCount << ") = " << + (spillCount + callerSaveSpillCount) << " SPILL\n"; + LogInfo::MapleLogger() << "Total " << "(" << reloadCount << "+ " << callerSaveReloadCount << ") = " << + (reloadCount + callerSaveReloadCount) << " RELOAD\n"; + uint32_t insertInsn = spillCount + callerSaveSpillCount + reloadCount + callerSaveReloadCount; + float rate = (float(insertInsn) / float(insnNumBeforRA)); + LogInfo::MapleLogger() <<"insn Num Befor RA:"<< insnNumBeforRA <<", insert " << insertInsn << + " insns: " << ", insertInsn/insnNumBeforRA: "<< rate <<"\n"; + } + + bfs = nullptr; /* bfs is not utilized outside the function. */ + +#ifdef RA_PERF_ANALYSIS + end = std::chrono::system_clock::now(); + totalUS += std::chrono::duration_cast(end - begin).count(); +#endif + + return true; +} + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/reg_coalesce.cpp b/src/mapleall/maple_be/src/cg/reg_coalesce.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c41a6762ebc891dd8ab74381d60695b5b2b2b911 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/reg_coalesce.cpp @@ -0,0 +1,115 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "reg_coalesce.h" +#include "cg_option.h" +#ifdef TARGAARCH64 +#include "aarch64_reg_coalesce.h" +#include "aarch64_isa.h" +#include "aarch64_insn.h" +#endif +#include "cg.h" + +/* + * This phase implements if-conversion optimization, + * which tries to convert conditional branches into cset/csel instructions + */ +namespace maplebe { + +void LiveIntervalAnalysis::Run() { + Analysis(); + CoalesceRegisters(); + ClearBFS(); +} + +void LiveIntervalAnalysis::DoAnalysis() { + runAnalysis = true; + Analysis(); +} + +void LiveIntervalAnalysis::Analysis() { + bfs = memPool->New(*cgFunc, *memPool); + bfs->ComputeBlockOrder(); + ComputeLiveIntervals(); +} + +/* bfs is not utilized outside the function. */ +void LiveIntervalAnalysis::ClearBFS() { + bfs = nullptr; +} + +void LiveIntervalAnalysis::Dump() { + for (auto it : vregIntervals) { + LiveInterval *li = it.second; + li->Dump(); + li->DumpDefs(); + li->DumpUses(); + } +} + +void LiveIntervalAnalysis::CoalesceLiveIntervals(LiveInterval &lrDest, LiveInterval &lrSrc) { + if (cgFunc->IsExtendReg(lrDest.GetRegNO())) { + cgFunc->InsertExtendSet(lrSrc.GetRegNO()); + } + cgFunc->RemoveFromExtendSet(lrDest.GetRegNO()); + /* merge destlr to srclr */ + lrSrc.MergeRanges(lrDest); + /* update conflicts */ + lrSrc.MergeConflict(lrDest); + for (auto reg : lrDest.GetConflict()) { + LiveInterval *conf = GetLiveInterval(reg); + if (conf) { + conf->AddConflict(lrSrc.GetRegNO()); + } + } + /* merge refpoints */ + lrSrc.MergeRefPoints(lrDest); + vregIntervals.erase(lrDest.GetRegNO()); +} + +bool CGliveIntervalAnalysis::PhaseRun(maplebe::CGFunc &f) { + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + live->ResetLiveSet(); + MemPool *memPool = GetPhaseMemPool(); + liveInterval = f.GetCG()->CreateLLAnalysis(*memPool, f); + liveInterval->DoAnalysis(); + return false; +} +void CGliveIntervalAnalysis::GetAnalysisDependence(AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); +} +MAPLE_ANALYSIS_PHASE_REGISTER_CANSKIP(CGliveIntervalAnalysis, cgliveintervalananlysis) + +bool CgRegCoalesce::PhaseRun(maplebe::CGFunc &f) { + LiveAnalysis *live = GET_ANALYSIS(CgLiveAnalysis, f); + live->ResetLiveSet(); + MemPool *memPool = GetPhaseMemPool(); + LiveIntervalAnalysis *ll = f.GetCG()->CreateLLAnalysis(*memPool, f); + ll->Run(); + /* the live range info may changed, so invalid the info. */ + if (live != nullptr) { + live->ClearInOutDataInfo(); + } + return false; +} + +void CgRegCoalesce::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgRegCoalesce, cgregcoalesce) + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/regsaves.cpp b/src/mapleall/maple_be/src/cg/regsaves.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e37a768ca3348285b533a964e16f6f5fdf5cf91c --- /dev/null +++ b/src/mapleall/maple_be/src/cg/regsaves.cpp @@ -0,0 +1,77 @@ +/* + * Copyright (c) [2022] Futurewei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "cgfunc.h" +#if TARGAARCH64 +#include "aarch64_regsaves.h" +#elif TARGRISCV64 +#include "riscv64_regsaves.h" +#endif + +namespace maplebe { +using namespace maple; + +bool CgRegSavesOpt::PhaseRun(maplebe::CGFunc &f) { + if (Globals::GetInstance()->GetOptimLevel() <= CGOptions::kLevel1 || + f.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) { + return false; + } + + /* Perform loop analysis, result to be obtained in CGFunc */ + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + + /* Perform live analysis, result to be obtained in CGFunc */ + LiveAnalysis *live = nullptr; + MaplePhase *it = GetAnalysisInfoHook()-> + ForceRunAnalysisPhase, CGFunc>(&CgLiveAnalysis::id, f); + live = static_cast(it)->GetResult(); + CHECK_FATAL(live != nullptr, "null ptr check"); + /* revert liveanalysis result container. */ + live->ResetLiveSet(); + + /* Perform dom analysis, result to be inserted into AArch64RegSavesOpt object */ + DomAnalysis *dom = nullptr; + PostDomAnalysis *pdom = nullptr; + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel1 && + f.GetCG()->GetCGOptions().DoColoringBasedRegisterAllocation()) { + MaplePhase *phase = GetAnalysisInfoHook()-> + ForceRunAnalysisPhase, CGFunc>(&CgDomAnalysis::id, f); + dom = static_cast(phase)->GetResult(); + CHECK_FATAL(dom != nullptr, "null ptr check"); + phase = GetAnalysisInfoHook()-> + ForceRunAnalysisPhase, CGFunc>(&CgPostDomAnalysis::id, f); + pdom = static_cast(phase)->GetResult(); + CHECK_FATAL(pdom != nullptr, "null ptr check"); + } + + MemPool *memPool = GetPhaseMemPool(); + RegSavesOpt *regSavesOpt = nullptr; +#if TARGAARCH64 + regSavesOpt = memPool->New(f, *memPool, *dom, *pdom); +#elif || TARGRISCV64 + regSavesOpt = memPool->New(f, *memPool); +#endif + + if (regSavesOpt) { + regSavesOpt->SetEnabledDebug(false); /* To turn on debug trace */ + if (regSavesOpt->GetEnabledDebug()) { + dom->Dump(); + } + regSavesOpt->Run(); + } + return true; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgRegSavesOpt, regsaves) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/rematerialize.cpp b/src/mapleall/maple_be/src/cg/rematerialize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a9db0a12cc54c2ed0e5938bde0b29bb40fb83a1b --- /dev/null +++ b/src/mapleall/maple_be/src/cg/rematerialize.cpp @@ -0,0 +1,154 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "rematerialize.h" +#if TARGAARCH64 +#include "aarch64_color_ra.h" +#endif + +namespace maplebe { + +bool Rematerializer::IsRematerializableForAddrof(CGFunc &cgFunc, const LiveRange &lr) const { + const MIRSymbol *symbol = rematInfo.sym; + if (symbol->IsDeleted()) { + return false; + } + if (symbol->IsThreadLocal()) { + return false; + } + /* cost too much to remat */ + if ((symbol->GetStorageClass() == kScFormal) && (symbol->GetSKind() == kStVar) && + ((fieldID != 0) || + (cgFunc.GetBecommon().GetTypeSize(symbol->GetType()->GetTypeIndex().GetIdx()) > + k16ByteSize))) { + return false; + } + if (!addrUpper && CGOptions::IsPIC() && ((symbol->GetStorageClass() == kScGlobal) || + (symbol->GetStorageClass() == kScExtern))) { + /* check if in loop */ + bool useInLoop = false; + bool defOutLoop = false; + for (auto luIt: lr.GetLuMap()) { + BB *bb = cgFunc.GetBBFromID(luIt.first); + LiveUnit *curLu = luIt.second; + if (bb->GetLoop() != nullptr && curLu->GetUseNum() != 0) { + useInLoop = true; + } + if (bb->GetLoop() == nullptr && curLu->GetDefNum() != 0) { + defOutLoop = true; + } + } + return !(useInLoop && defOutLoop); + } + return true; +} + +bool Rematerializer::IsRematerializableForDread(CGFunc &cgFunc, RematLevel rematLev) const { + const MIRSymbol *symbol = rematInfo.sym; + if (symbol->IsDeleted()) { + return false; + } + // cost greater than benefit + if (symbol->IsThreadLocal()) { + return false; + } + MIRStorageClass storageClass = symbol->GetStorageClass(); + if ((storageClass == kScAuto) || (storageClass == kScFormal)) { + /* cost too much to remat. */ + return false; + } + int32 offset = 0; + if (fieldID != 0) { + ASSERT(symbol->GetType()->IsMIRStructType(), "non-zero fieldID for non-structure"); + MIRStructType *structType = static_cast(symbol->GetType()); + offset = cgFunc.GetBecommon().GetFieldOffset(*structType, fieldID).first; + } + if (rematLev < kRematDreadGlobal && !symbol->IsLocal()) { + return false; + } + return IsRematerializableForDread(offset); +} + +bool Rematerializer::IsRematerializable(CGFunc &cgFunc, RematLevel rematLev, + const LiveRange &lr) const { + if (rematLev == kRematOff) { + return false; + } + switch (op) { + case OP_undef: + return false; + case OP_constval: { + const MIRConst *mirConst = rematInfo.mirConst; + if (mirConst->GetKind() != kConstInt) { + return false; + } + const MIRIntConst *intConst = static_cast(rematInfo.mirConst); + int64 val = intConst->GetExtValue(); + return IsRematerializableForConstval(val, lr.GetSpillSize()); + } + case OP_addrof: { + if (rematLev < kRematAddr) { + return false; + } + return IsRematerializableForAddrof(cgFunc, lr); + } + case OP_dread: { + if (rematLev < kRematDreadLocal) { + return false; + } + return IsRematerializableForDread(cgFunc, rematLev); + } + default: + return false; + } +} + +std::vector Rematerializer::Rematerialize(CGFunc &cgFunc, RegOperand ®Op, + const LiveRange &lr) { + switch (op) { + case OP_constval: { + ASSERT(rematInfo.mirConst->GetKind() == kConstInt, "Unsupported constant"); + return RematerializeForConstval(cgFunc, regOp, lr); + } + case OP_dread: { + const MIRSymbol *symbol = rematInfo.sym; + PrimType symType = symbol->GetType()->GetPrimType(); + int32 offset = 0; + if (fieldID != 0) { + ASSERT(symbol->GetType()->IsMIRStructType(), "non-zero fieldID for non-structure"); + MIRStructType *structType = static_cast(symbol->GetType()); + symType = structType->GetFieldType(fieldID)->GetPrimType(); + offset = cgFunc.GetBecommon().GetFieldOffset(*structType, fieldID).first; + } + return RematerializeForDread(cgFunc, regOp, offset, symType); + } + case OP_addrof: { + const MIRSymbol *symbol = rematInfo.sym; + int32 offset = 0; + if (fieldID != 0) { + ASSERT(symbol->GetType()->IsMIRStructType(), "non-zero fieldID for non-structure"); + MIRStructType *structType = static_cast(symbol->GetType()); + offset = cgFunc.GetBecommon().GetFieldOffset(*structType, fieldID).first; + } + return RematerializeForAddrof(cgFunc, regOp, offset); + } + default: + ASSERT(false, "Unexpected op in live range"); + } + + return std::vector(); +} + +} /* namespace maplebe */ \ No newline at end of file diff --git a/src/mapleall/maple_be/src/cg/schedule.cpp b/src/mapleall/maple_be/src/cg/schedule.cpp new file mode 100644 index 0000000000000000000000000000000000000000..920b5bad2306d5a2f3f29771f76e705820caa33a --- /dev/null +++ b/src/mapleall/maple_be/src/cg/schedule.cpp @@ -0,0 +1,940 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#if TARGAARCH64 +#include "aarch64_schedule.h" +#elif TARGRISCV64 +#include "riscv64_schedule.h" +#endif +#if TARGARM32 +#include "arm32_schedule.h" +#endif +#include "cg.h" +#include "optimize_common.h" + +#undef PRESCHED_DEBUG + +namespace maplebe { +/* pressure standard value; pressure under this value will not lead to spill operation */ +static constexpr int kPressureStandard = 27; +/* optimistic scheduling option */ +static constexpr bool kOptimisticScheduling = false; +/* brute maximum count limit option */ +static constexpr bool kBruteMaximumLimit = true; +/* brute maximum count */ +static constexpr int kSchedulingMaximumCount = 20000; + +/* ---- RegPressureSchedule function ---- */ +void RegPressureSchedule::InitBBInfo(BB &b, MemPool &memPool, const MapleVector &nodes) { + bb = &b; + liveReg.clear(); + scheduledNode.clear(); + readyList.clear(); + maxPriority = 0; + maxPressure = memPool.NewArray(RegPressure::GetMaxRegClassNum()); + curPressure = memPool.NewArray(RegPressure::GetMaxRegClassNum()); + physicalRegNum = memPool.NewArray(RegPressure::GetMaxRegClassNum()); + for (auto node : nodes) { + node->SetState(kNormal); + } +} + +/* return register type according to register number */ +RegType RegPressureSchedule::GetRegisterType(regno_t reg) const { + return cgFunc.GetRegisterType(reg); +} + +/* Get amount of every physical register */ +void RegPressureSchedule::BuildPhyRegInfo(const std::vector ®NumVec) { + FOR_ALL_REGCLASS(i) { + physicalRegNum[i] = regNumVec[i]; + } +} + +/* Initialize pre-scheduling split point in BB */ +void RegPressureSchedule::InitPartialSplitters(const MapleVector &nodes) { + bool addFirstAndLastNodeIndex = false; + constexpr uint32 kSecondLastNodeIndexFromBack = 2; + constexpr uint32 kLastNodeIndexFromBack = 1; + constexpr uint32 kFirstNodeIndex = 0; + constexpr uint32 kMiniMumBBSize = 2; + /* Add split point for the last instruction in return BB */ + if (bb->GetKind() == BB::kBBReturn && nodes.size() > kMiniMumBBSize) { + splitterIndexes.emplace_back(nodes.size() - kSecondLastNodeIndexFromBack); + addFirstAndLastNodeIndex = true; + } + /* Add first and last node as split point if needed */ + if (addFirstAndLastNodeIndex) { + splitterIndexes.emplace_back(nodes.size() - kLastNodeIndexFromBack); + splitterIndexes.emplace_back(kFirstNodeIndex); + } + std::sort(splitterIndexes.begin(), splitterIndexes.end(), std::less{}); +} + +/* initialize register pressure information according to bb's live-in data. + * initialize node's valid preds size. + */ +void RegPressureSchedule::Init(const MapleVector &nodes) { + readyList.clear(); + scheduledNode.clear(); + liveReg.clear(); + liveInRegNO.clear(); + liveOutRegNO.clear(); + liveInRegNO = bb->GetLiveInRegNO(); + liveOutRegNO = bb->GetLiveOutRegNO(); + + FOR_ALL_REGCLASS(i) { + curPressure[i] = 0; + maxPressure[i] = 0; + } + + for (auto *node : nodes) { + /* calculate the node uses'register pressure */ + for (auto &useReg : node->GetUseRegnos()) { + CalculatePressure(*node, useReg, false); + } + + /* calculate the node defs'register pressure */ + size_t i = 0; + for (auto &defReg : node->GetDefRegnos()) { + CalculatePressure(*node, defReg, true); + RegType regType = GetRegisterType(defReg); + /* if no use list, a register is only defined, not be used */ + if (node->GetRegDefs(i) == nullptr && liveOutRegNO.find(defReg) == liveOutRegNO.end()) { + node->IncDeadDefByIndex(regType); + } + ++i; + } + /* Calculate pred size of the node */ + CalculatePredSize(*node); + } + + DepNode *firstNode = nodes.front(); + readyList.emplace_back(firstNode); + firstNode->SetState(kReady); + scheduledNode.reserve(nodes.size()); + constexpr size_t readyListSize = 10; + readyList.reserve(readyListSize); +} + +void RegPressureSchedule::SortReadyList() { + std::sort(readyList.begin(), readyList.end(), DepNodePriorityCmp); +} + +/* return true if nodes1 first. */ +bool RegPressureSchedule::DepNodePriorityCmp(const DepNode *node1, const DepNode *node2) { + CHECK_NULL_FATAL(node1); + CHECK_NULL_FATAL(node2); + int32 priority1 = node1->GetPriority(); + int32 priority2 = node2->GetPriority(); + if (priority1 != priority2) { + return priority1 > priority2; + } + + int32 numCall1 = node1->GetNumCall(); + int32 numCall2 = node2->GetNumCall(); + if (node1->GetIncPressure() == true && node2->GetIncPressure() == true) { + if (numCall1 != numCall2) { + return numCall1 > numCall2; + } + } + + int32 near1 = node1->GetNear(); + int32 near2 = node1->GetNear(); + int32 depthS1 = node1->GetMaxDepth() + near1; + int32 depthS2 = node2->GetMaxDepth() + near2; + if (depthS1 != depthS2) { + return depthS1 > depthS2; + } + + if (near1 != near2) { + return near1 > near2; + } + + if (numCall1 != numCall2) { + return numCall1 > numCall2; + } + + size_t succsSize1 = node1->GetSuccs().size(); + size_t succsSize2 = node1->GetSuccs().size(); + if (succsSize1 != succsSize2) { + return succsSize1 < succsSize2; + } + + if (node1->GetHasPreg() != node2->GetHasPreg()) { + return node1->GetHasPreg(); + } + + return node1->GetInsn()->GetId() < node2->GetInsn()->GetId(); +} + +/* set a node's incPressure is true, when a class register inscrease */ +void RegPressureSchedule::ReCalculateDepNodePressure(DepNode &node) const { + /* if there is a type of register pressure increases, set incPressure as true. */ + auto &pressures = node.GetPressure(); + node.SetIncPressure(pressures[kRegisterInt] > 0); +} + +/* calculate the maxDepth of every node in nodes. */ +void RegPressureSchedule::CalculateMaxDepth(const MapleVector &nodes) const { + /* from the last node to first node. */ + for (auto it = nodes.rbegin(); it != nodes.rend(); ++it) { + /* init call count */ + if ((*it)->GetInsn()->IsCall()) { + (*it)->SetNumCall(1); + } + /* traversing each successor of it. */ + for (auto succ : (*it)->GetSuccs()) { + DepNode &to = succ->GetTo(); + if ((*it)->GetMaxDepth() < (to.GetMaxDepth() + 1)) { + (*it)->SetMaxDepth(to.GetMaxDepth() + 1); + } + + if (to.GetInsn()->IsCall() && ((*it)->GetNumCall() < to.GetNumCall() + 1)) { + (*it)->SetNumCall(to.GetNumCall() + 1); + } else if ((*it)->GetNumCall() < to.GetNumCall()) { + (*it)->SetNumCall(to.GetNumCall()); + } + } + } +} + +/* calculate the near of every successor of the node. */ +void RegPressureSchedule::CalculateNear(const DepNode &node) { + for (auto succ : node.GetSuccs()) { + DepNode &to = succ->GetTo(); + if (succ->GetDepType() == kDependenceTypeTrue && to.GetNear() < node.GetNear() + 1) { + to.SetNear(node.GetNear() + 1); + } + } +} + +/* return true if it is last time using the regNO. */ +bool RegPressureSchedule::IsLastUse(const DepNode &node, regno_t regNO) { + size_t i = 0; + for (auto reg : node.GetUseRegnos()) { + if (reg == regNO) { + break; + } + ++i; + } + RegList *regList = node.GetRegUses(i); + + /* + * except the node, if there are insn that has no scheduled in regNO's regList, + * then it is not the last time using the regNO, return false. + */ + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + DepNode *useNode = regList->insn->GetDepNode(); + ASSERT(useNode != nullptr, "get depend node failed in RegPressureSchedule::IsLastUse"); + if ((regList->insn != node.GetInsn()) && (useNode->GetState() != kScheduled)) { + return false; + } + regList = regList->next; + } + return true; +} + +void RegPressureSchedule::CalculatePressure(DepNode &node, regno_t reg, bool def) const { + RegType regType = GetRegisterType(reg); + /* if def a register, register pressure increase. */ + if (def) { + node.IncPressureByIndex(regType); + } else { + /* if it is the last time using the reg, register pressure decrease. */ + if (IsLastUse(node, reg)) { + node.DecPressureByIndex(regType); + } + } +} + +/* update live reg information. */ +void RegPressureSchedule::UpdateLiveReg(const DepNode &node, regno_t reg, bool def) { + if (def) { + if (liveReg.find(reg) == liveReg.end()) { + (void)liveReg.insert(reg); +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Add new def R" << reg << " to live reg list \n"; +#endif + } + /* if no use list, a register is only defined, not be used */ + size_t i = 1; + for (auto defReg : node.GetDefRegnos()) { + if (defReg == reg) { + break; + } + ++i; + } + if (node.GetRegDefs(i) == nullptr && liveOutRegNO.find(reg) == liveOutRegNO.end()) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Remove dead def " << reg << " from live reg list \n"; +#endif + liveReg.erase(reg); + } else if (node.GetRegDefs(i) != nullptr) { +#ifdef PRESCHED_DEBUG + auto regList = node.GetRegDefs(i); + LogInfo::MapleLogger() << i << " Live def, dump use insn here \n"; + while (regList != nullptr) { + node.GetRegDefs(i)->insn->Dump(); + regList = regList->next; + } +#endif + } + } else { + if (IsLastUse(node, reg)) { + if (liveReg.find(reg) != liveReg.end() && liveOutRegNO.find(reg) == liveOutRegNO.end()) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Remove last use R" << reg << " from live reg list\n"; +#endif + liveReg.erase(reg); + } + } + } +} + +/* update register pressure information. */ +void RegPressureSchedule::UpdateBBPressure(const DepNode &node) { + size_t idx = 0; + for (auto ® : node.GetUseRegnos()) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "Use Reg : R" << reg << "\n"; + UpdateLiveReg(node, reg, false); + if (liveReg.find(reg) == liveReg.end()) { + ++idx; + continue; + } +#endif + + /* find all insn that use the reg, if a insn use the reg lastly, insn'pressure - 1 */ + RegList *regList = node.GetRegUses(idx); + + while (regList != nullptr) { + CHECK_NULL_FATAL(regList->insn); + DepNode *useNode = regList->insn->GetDepNode(); + if (useNode->GetState() == kScheduled) { + regList = regList->next; + continue; + } + + if (IsLastUse(*useNode, reg)) { + RegType regType = GetRegisterType(reg); + useNode->DecPressureByIndex(regType); + } + break; + } + ++idx; + } + +#ifdef PRESCHED_DEBUG + for (auto &defReg : node.GetDefRegnos()) { + UpdateLiveReg(node, defReg, true); + } +#endif + + const auto &pressures = node.GetPressure(); + const auto &deadDefNum = node.GetDeadDefNum(); +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "\nnode's pressure: "; + for (auto pressure : pressures) { + LogInfo::MapleLogger() << pressure << " "; + } + LogInfo::MapleLogger() << "\n"; +#endif + + FOR_ALL_REGCLASS(i) { + curPressure[i] += pressures[i]; + curPressure[i] -= deadDefNum[i]; + if (curPressure[i] > maxPressure[i]) { + maxPressure[i] = curPressure[i]; + } + } +} + +/* update node priority and try to update the priority of all node's ancestor. */ +void RegPressureSchedule::UpdatePriority(DepNode &node) { + std::vector workQueue; + workQueue.emplace_back(&node); + node.SetPriority(maxPriority++); + do { + DepNode *nowNode = workQueue.front(); + (void)workQueue.erase(workQueue.begin()); + for (auto pred : nowNode->GetPreds()) { + DepNode &from = pred->GetFrom(); + if (from.GetState() != kScheduled && from.GetPriority() < maxPriority) { + from.SetPriority(maxPriority); + workQueue.emplace_back(&from); + } + } + } while (!workQueue.empty()); +} + +/* return true if all node's pred has been scheduled. */ +bool RegPressureSchedule::CanSchedule(const DepNode &node) const { + return node.GetValidPredsSize() == 0; +} + +/* + * delete node from readylist and + * add the successor of node to readyList when + * 1. successor has no been scheduled; + * 2. successor's has been scheduled or the dependence between node and successor is true-dependence. + */ +void RegPressureSchedule::UpdateReadyList(const DepNode &node) { + /* delete node from readylist */ + for (auto it = readyList.begin(); it != readyList.end(); ++it) { + if (*it == &node) { + readyList.erase(it); + break; + } + } + /* update dependency information of the successors and add nodes into readyList */ + for (auto *succ : node.GetSuccs()) { + DepNode &succNode = succ->GetTo(); + if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) { + continue; + } + succNode.DescreaseValidPredsSize(); + if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) && (succNode.GetState() == kNormal)) { + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + } + } +} +/* + * Another version of UpdateReadyList for brute force ready list update + * The difference is to store the state change status for the successors for later restoring + */ +void RegPressureSchedule::BruteUpdateReadyList(const DepNode &node, std::vector &changedToReady) { + /* delete node from readylist */ + for (auto it = readyList.begin(); it != readyList.end(); ++it) { + if (*it == &node) { + readyList.erase(it); + break; + } + } + /* update dependency information of the successors and add nodes into readyList */ + for (auto *succ : node.GetSuccs()) { + DepNode &succNode = succ->GetTo(); + if (!partialSet.empty() && (partialSet.find(&succNode) == partialSet.end())) { + continue; + } + succNode.DescreaseValidPredsSize(); + if (((succ->GetDepType() == kDependenceTypeTrue) || CanSchedule(succNode)) && (succNode.GetState() == kNormal)) { + readyList.emplace_back(&succNode); + succNode.SetState(kReady); + changedToReady.emplace_back(true); + } else { + changedToReady.emplace_back(false); + } + } +} + +/* + * Restore the ready list status when finishing one brute scheduling series generation + */ +void RegPressureSchedule::RestoreReadyList(DepNode &node, std::vector &changedToReady) { + uint32 i = 0; + /* restore state information of the successors and delete them from readyList */ + for (auto *succ : node.GetSuccs()) { + DepNode &succNode = succ->GetTo(); + succNode.IncreaseValidPredsSize(); + if (changedToReady.at(i)) { + succNode.SetState(kNormal); + for (auto it = readyList.begin(); it != readyList.end(); ++it) { + if (*it == &succNode) { + readyList.erase(it); + break; + } + } + } + ++i; + } + /* add the node back into the readyList */ + readyList.emplace_back(&node); +} +/* choose a node to schedule */ +DepNode *RegPressureSchedule::ChooseNode() { + DepNode *node = nullptr; + for (auto *it : readyList) { + if (!it->GetIncPressure() && !it->GetHasNativeCallRegister()) { + if (CanSchedule(*it)) { + return it; + } else if (node == nullptr) { + node = it; + } + } + } + if (node == nullptr) { + node = readyList.front(); + } + return node; +} + +void RegPressureSchedule::DumpBBLiveInfo() const { + LogInfo::MapleLogger() << "Live In: "; + for (auto reg : bb->GetLiveInRegNO()) { + LogInfo::MapleLogger() << "R" <GetLiveOutRegNO()) { + LogInfo::MapleLogger() << "R" << reg << " "; + } + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DumpReadyList() const { + LogInfo::MapleLogger() << "readyList: " << "\n"; + for (DepNode *it : readyList) { + if (CanSchedule(*it)) { + LogInfo::MapleLogger() << it->GetInsn()->GetId() << "CS "; + } else { + LogInfo::MapleLogger() << it->GetInsn()->GetId() << "NO "; + } + } + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DumpSelectInfo(const DepNode &node) const { + LogInfo::MapleLogger() << "select a node: " << "\n"; + node.DumpSchedInfo(); + node.DumpRegPressure(); + node.GetInsn()->Dump(); + + LogInfo::MapleLogger() << "liveReg: "; + for (auto reg : liveReg) { + LogInfo::MapleLogger() << "R" << reg << " "; + } + LogInfo::MapleLogger() << "\n"; + + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DumpDependencyInfo(const MapleVector &nodes) { + LogInfo::MapleLogger() << "Dump Dependency Begin \n"; + for (auto node : nodes) { + LogInfo::MapleLogger() << "Insn \n"; + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << "Successors \n"; + /* update dependency information of the successors and add nodes into readyList */ + for (auto *succ : node->GetSuccs()) { + DepNode &succNode = succ->GetTo(); + succNode.GetInsn()->Dump(); + } + } + LogInfo::MapleLogger() << "Dump Dependency End \n"; +} + +void RegPressureSchedule::ReportScheduleError() const { + LogInfo::MapleLogger() << "Error No Equal Length for Series" << "\n"; + DumpDependencyInfo(originalNodeSeries); + for (auto node : scheduledNode) { + node->GetInsn()->Dump(); + } + LogInfo::MapleLogger() << "Original One" << "\n"; + for (auto node : originalNodeSeries) { + node->GetInsn()->Dump(); + } + LogInfo::MapleLogger() << "Error No Equal Length for End" << "\n"; +} + +void RegPressureSchedule::ReportScheduleOutput() const { + LogInfo::MapleLogger() << "Original Pressure : " << originalPressure << " \n"; + LogInfo::MapleLogger() << "Scheduled Pressure : " << scheduledPressure << " \n"; + if (originalPressure > scheduledPressure) { + LogInfo::MapleLogger() << "Pressure Reduced by : " << (originalPressure - scheduledPressure) << " \n"; + return; + } else if (originalPressure == scheduledPressure) { + LogInfo::MapleLogger() << "Pressure Not Changed \n"; + } else { + LogInfo::MapleLogger() << "Pressure Increased by : " << (scheduledPressure - originalPressure) << " \n"; + } + LogInfo::MapleLogger() << "Pressure Not Reduced, Restore Node Series \n"; +} + +void RegPressureSchedule::DumpBBPressureInfo() const { + LogInfo::MapleLogger() << "curPressure: "; + FOR_ALL_REGCLASS(i) { + LogInfo::MapleLogger() << curPressure[i] << " "; + } + LogInfo::MapleLogger() << "\n"; + + LogInfo::MapleLogger() << "maxPressure: "; + FOR_ALL_REGCLASS(i) { + LogInfo::MapleLogger() << maxPressure[i] << " "; + } + LogInfo::MapleLogger() << "\n"; +} + +void RegPressureSchedule::DoScheduling(MapleVector &nodes) { + /* Store the original series */ + originalNodeSeries.clear(); + for (auto node : nodes) { + originalNodeSeries.emplace_back(node); + } + InitPartialSplitters(nodes); +#if PRESCHED_DEBUG + LogInfo::MapleLogger() << "\n Calculate Pressure Info for Schedule Input Series \n"; +#endif + originalPressure = CalculateRegisterPressure(nodes); +#if PRESCHED_DEBUG + LogInfo::MapleLogger() << "Original pressure : " << originalPressure << "\n"; +#endif + /* Original pressure is small enough, skip pre-scheduling */ + if (originalPressure < kPressureStandard) { +#if PRESCHED_DEBUG + LogInfo::MapleLogger() << "Original pressure is small enough, skip pre-scheduling \n"; +#endif + return; + } + if (splitterIndexes.empty()) { + LogInfo::MapleLogger() << "No splitter, normal scheduling \n"; + if (!kOptimisticScheduling) { + HeuristicScheduling(nodes); + } else { + InitBruteForceScheduling(nodes); + BruteForceScheduling(); + if (optimisticScheduledNodes.size() == nodes.size() && minPressure < originalPressure) { + nodes.clear(); + for (auto node : optimisticScheduledNodes) { + nodes.emplace_back(node); + } + } + } + } else { + /* Split the node list into multiple parts based on split point and conduct scheduling */ + PartialScheduling(nodes); + } + scheduledPressure = CalculateRegisterPressure(nodes); + EmitSchedulingSeries(nodes); +} + +void RegPressureSchedule::HeuristicScheduling(MapleVector &nodes) { +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "--------------- bb " << bb->GetId() <<" begin scheduling -------------" << "\n"; + DumpBBLiveInfo(); +#endif + + /* initialize register pressure information and readylist. */ + Init(nodes); + CalculateMaxDepth(nodes); + while (!readyList.empty()) { + /* calculate register pressure */ + for (DepNode *it : readyList) { + ReCalculateDepNodePressure(*it); + } + if (readyList.size() > 1) { + SortReadyList(); + } + + /* choose a node can be scheduled currently. */ + DepNode *node = ChooseNode(); +#ifdef PRESCHED_DEBUG + DumpBBPressureInfo(); + DumpReadyList(); + LogInfo::MapleLogger() << "first tmp select node: " << node->GetInsn()->GetId() << "\n"; +#endif + + while (!CanSchedule(*node)) { + UpdatePriority(*node); + SortReadyList(); + node = readyList.front(); +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "update ready list: " << "\n"; + DumpReadyList(); +#endif + } + + scheduledNode.emplace_back(node); + /* mark node has scheduled */ + node->SetState(kScheduled); + UpdateBBPressure(*node); + CalculateNear(*node); + UpdateReadyList(*node); +#ifdef PRESCHED_DEBUG + DumpSelectInfo(*node); +#endif + } + +#ifdef PRESCHED_DEBUG + LogInfo::MapleLogger() << "---------------------------------- end --------------------------------" << "\n"; +#endif + /* update nodes according to scheduledNode. */ + nodes.clear(); + for (auto node : scheduledNode) { + nodes.emplace_back(node); + } +} +/* + * Calculate the register pressure for current BB based on an instruction series + */ +int RegPressureSchedule::CalculateRegisterPressure(MapleVector &nodes) { + /* Initialize the live, live in, live out register max pressure information */ + liveReg.clear(); + liveInRegNO = bb->GetLiveInRegNO(); + liveOutRegNO = bb->GetLiveOutRegNO(); + std::vector restoreStateSeries; + int maximumPressure = 0; + /* Mock all the nodes to kScheduled status for pressure calculation */ + for (auto node : nodes) { + restoreStateSeries.emplace_back(node->GetState()); + node->SetState(kScheduled); + } + /* Update live register set according to the instruction series */ + for (auto node : nodes) { + for (auto ® : node->GetUseRegnos()) { + UpdateLiveReg(*node, reg, false); + } + for (auto &defReg : node->GetDefRegnos()) { + UpdateLiveReg(*node, defReg, true); + } + int currentPressure = static_cast(liveReg.size()); + if (currentPressure > maximumPressure) { + maximumPressure = currentPressure; + } +#ifdef PRESCHED_DEBUG + node->GetInsn()->Dump(); + LogInfo::MapleLogger() << "Dump Live Reg : " << "\n"; + for (auto reg : liveReg) { + LogInfo::MapleLogger() << "R" << reg << " "; + } + LogInfo::MapleLogger() << "\n"; +#endif + } + /* Restore the Schedule State */ + uint32 i = 0; + for (auto node : nodes){ + node->SetState(restoreStateSeries.at(i)); + ++i; + } + return maximumPressure; +} + +/* + * Split the series into multiple parts and conduct pre-scheduling in every part + */ +void RegPressureSchedule::PartialScheduling(MapleVector &nodes) { + for (size_t i = 0; i < splitterIndexes.size() - 1; ++i) { + constexpr uint32 lastTwoNodeIndex = 2; + auto begin = static_cast(splitterIndexes.at(i)); + auto end = static_cast(splitterIndexes.at(i + 1)); + for (uint32 j = begin; j < end; ++j) { + partialList.emplace_back(nodes.at(j)); + } + if (i == splitterIndexes.size() - lastTwoNodeIndex) { + partialList.emplace_back(nodes.at(end)); + } + for (auto node : partialList) { + partialSet.insert(node); + } + HeuristicScheduling(partialList); + for (auto node : partialList) { + partialScheduledNode.emplace_back(node); + } + partialList.clear(); + partialSet.clear(); + } + nodes.clear(); + /* Construct overall scheduling output */ + for (auto node : partialScheduledNode) { + nodes.emplace_back(node); + } +} + +/* + * Brute-force scheduling algorithm + * It enumerates all the possible schedule series and pick a best one + */ +void RegPressureSchedule::BruteForceScheduling() { + /* stop brute force scheduling when exceeding the count limit */ + if (kBruteMaximumLimit && (scheduleSeriesCount > kSchedulingMaximumCount)) { + return; + } + int defaultPressureValue = -1; + /* ReadyList is empty, scheduling is over */ + if (readyList.empty()) { + if (originalNodeSeries.size() != scheduledNode.size()) { +#ifdef PRESCHED_DEBUG + ReportScheduleError(); +#endif + return; + } + ++scheduleSeriesCount; + int currentPressure = CalculateRegisterPressure(scheduledNode); + if (minPressure == defaultPressureValue || currentPressure < minPressure) { + minPressure = currentPressure; + /* update better scheduled series */ + optimisticScheduledNodes.clear(); + for (auto node : scheduledNode) { + optimisticScheduledNodes.emplace_back(node); + } + return; + } + return; + } + /* store the current status of the ready list */ + std::vector innerList; + for (auto tempNode : readyList) { + innerList.emplace_back(tempNode); + } + for (auto *node : innerList) { + if (CanSchedule(*node)) { + /* update readyList and node dependency info */ + std::vector changedToReady; + BruteUpdateReadyList(*node, changedToReady); + scheduledNode.emplace_back(node); + node->SetState(kScheduled); + BruteForceScheduling(); + node->SetState(kReady); + /* restore readyList and node dependency info */ + RestoreReadyList(*node, changedToReady); + scheduledNode.pop_back(); + } + } +} + +/* + * Calculate the pred size based on the dependency information + */ +void RegPressureSchedule::CalculatePredSize(DepNode &node) { + constexpr uint32 emptyPredsSize = 0; + node.SetValidPredsSize(emptyPredsSize); + for (auto pred : node.GetPreds()) { + DepNode &from = pred->GetFrom(); + if (!partialSet.empty() && (partialSet.find(&from) == partialSet.end())) { + continue; + } else { + node.IncreaseValidPredsSize(); + } + } +} + +void RegPressureSchedule::InitBruteForceScheduling(MapleVector &nodes) { + /* Calculate pred size of the node */ + for (auto node : nodes) { + CalculatePredSize(*node); + } + readyList.clear(); + optimisticScheduledNodes.clear(); + scheduledNode.clear(); + DepNode *firstNode = nodes.front(); + firstNode->SetState(kReady); + readyList.emplace_back(firstNode); +} + +/* + * Give out the pre-scheduling output based on new register pressure + */ +void RegPressureSchedule::EmitSchedulingSeries(MapleVector &nodes) { +#ifdef PRESCHED_DEBUG + ReportScheduleOutput(); +#endif + if (originalPressure <= scheduledPressure) { + /* Restore the original series */ + nodes.clear(); + for (auto node : originalNodeSeries) { + nodes.emplace_back(node); + } + } +} + +/* + * ------------- Schedule function ---------- + * calculate and mark each insn id, each BB's firstLoc and lastLoc. + */ +void Schedule::InitIDAndLoc() { + uint32 id = 0; + FOR_ALL_BB(bb, &cgFunc) { + bb->SetLastLoc(bb->GetPrev() ? bb->GetPrev()->GetLastLoc() : nullptr); + FOR_BB_INSNS(insn, bb) { + insn->SetId(id++); +#if DEBUG + insn->AppendComment(" Insn id: " + std::to_string(insn->GetId())); +#endif + if (insn->IsImmaterialInsn() && !insn->IsComment()) { + bb->SetLastLoc(insn); + } else if (!bb->GetFirstLoc() && insn->IsMachineInstruction()) { + bb->SetFirstLoc(*bb->GetLastLoc()); + } + } + } +} + +/* === new pm === */ +bool CgPreScheduling::PhaseRun(maplebe::CGFunc &f) { + if (f.HasAsm()) { + return true; + } + if (LIST_SCHED_DUMP_NEWPM) { + LogInfo::MapleLogger() << "Before CgDoPreScheduling : " << f.GetName() << "\n"; + DotGenerator::GenerateDot("preschedule", f, f.GetMirModule(), true); + } + auto *live = GET_ANALYSIS(CgLiveAnalysis, f); + /* revert liveanalysis result container. */ + ASSERT(live != nullptr, "nullptr check"); + live->ResetLiveSet(); + + Schedule *schedule = nullptr; +#if TARGAARCH64 || TARGRISCV64 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif +#if TARGARM32 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif + schedule->ListScheduling(true); + live->ClearInOutDataInfo(); + + return true; +} + +void CgPreScheduling::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgPreScheduling, prescheduling) + +bool CgScheduling::PhaseRun(maplebe::CGFunc &f) { + if (f.HasAsm()) { + return true; + } + if (LIST_SCHED_DUMP_NEWPM) { + LogInfo::MapleLogger() << "Before CgDoScheduling : " << f.GetName() << "\n"; + DotGenerator::GenerateDot("scheduling", f, f.GetMirModule(), true); + } + auto *live = GET_ANALYSIS(CgLiveAnalysis, f); + /* revert liveanalysis result container. */ + ASSERT(live != nullptr, "nullptr check"); + live->ResetLiveSet(); + + Schedule *schedule = nullptr; +#if TARGAARCH64 || TARGRISCV64 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif +#if TARGARM32 + schedule = GetPhaseAllocator()->New(f, *GetPhaseMemPool(), *live, PhaseName()); +#endif + schedule->ListScheduling(false); + live->ClearInOutDataInfo(); + + return true; +} + +void CgScheduling::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgScheduling, scheduling) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/standardize.cpp b/src/mapleall/maple_be/src/cg/standardize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cf14b253f5931e460dbebcc406cbdd0131ed9079 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/standardize.cpp @@ -0,0 +1,92 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "isel.h" +#include "standardize.h" +#include "cg.h" + +namespace maplebe { + +void Standardize::DoStandardize() { + /* two address mapping first */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsMachineInstruction()) { + continue; + } + if (NeedAddressMapping(*insn)) { + AddressMapping(*insn); + } + } + } + + /* standardize for each op */ + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsMachineInstruction()) { + continue; + } + if (insn->IsMove()) { + StdzMov(*insn); + } else if (insn->IsStore() || insn->IsLoad()) { + StdzStrLdr(*insn); + } else if (insn->IsBasicOp()) { + StdzBasicOp(*insn); + } else if (insn->IsUnaryOp()) { + StdzUnaryOp(*insn, *cgFunc); + } else if (insn->IsConversion()) { + StdzCvtOp(*insn, *cgFunc); + } else if (insn->IsShift()) { + StdzShiftOp(*insn, *cgFunc); + } else { + LogInfo::MapleLogger() << "Need STDZ function for " << insn->GetDesc()->GetName() << "\n"; + CHECK_FATAL(false, "NIY"); + } + } + } +} + +void Standardize::AddressMapping(Insn &insn) const { + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + Operand &src1 = insn.GetOperand(kInsnSecondOpnd); + uint32 destSize = dest.GetSize(); + MOperator mOp = abstract::MOP_undef; + switch (destSize) { + case k8BitSize: + mOp = abstract::MOP_copy_rr_8; + break; + case k16BitSize: + mOp = abstract::MOP_copy_rr_16; + break; + case k32BitSize: + mOp = abstract::MOP_copy_rr_32; + break; + case k64BitSize: + mOp = abstract::MOP_copy_rr_64; + break; + default: + break; + } + CHECK_FATAL(mOp != abstract::MOP_undef, "do two address mapping failed"); + Insn &newInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, InsnDesc::GetAbstractId(mOp)); + (void)newInsn.AddOpndChain(dest).AddOpndChain(src1); + (void)insn.GetBB()->InsertInsnBefore(insn, newInsn); +} + +bool InstructionStandardize::PhaseRun(maplebe::CGFunc &f) { + Standardize *stdz = f.GetCG()->CreateStandardize(*GetPhaseMemPool(), f); + stdz->DoStandardize(); + return true; +} +} diff --git a/src/mapleall/maple_be/src/cg/strldr.cpp b/src/mapleall/maple_be/src/cg/strldr.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c621bf4450f25d6d09ea953ba048758b434cdbc0 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/strldr.cpp @@ -0,0 +1,59 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#if TARGAARCH64 +#include "aarch64_strldr.h" +#elif TARGRISCV64 +#include "riscv64_strldr.h" +#endif +#if TARGARM32 +#include "arm32_strldr.h" +#endif +#include "reaching.h" +#include "cg.h" +#include "optimize_common.h" + +namespace maplebe { +using namespace maple; +#define SCHD_DUMP_NEWPM CG_DEBUG_FUNC(f) +bool CgStoreLoadOpt::PhaseRun(maplebe::CGFunc &f) { + if (SCHD_DUMP_NEWPM) { + DotGenerator::GenerateDot("storeloadopt", f, f.GetMirModule(), true); + } + ReachingDefinition *reachingDef = nullptr; + if (Globals::GetInstance()->GetOptimLevel() >= CGOptions::kLevel2) { + reachingDef = GET_ANALYSIS(CgReachingDefinition, f); + } + if (reachingDef == nullptr || !f.GetRDStatus()) { + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(f.GetUniqueID(), &CgReachingDefinition::id); + return false; + } + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); + + StoreLoadOpt *storeLoadOpt = nullptr; +#if TARGAARCH64 || TARGRISCV64 + storeLoadOpt = GetPhaseMemPool()->New(f, *GetPhaseMemPool()); +#endif +#if TARGARM32 + storeLoadOpt = GetPhaseMemPool()->New(f, *GetPhaseMemPool()); +#endif + storeLoadOpt->Run(); + return true; +} +void CgStoreLoadOpt::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.SetPreservedAll(); +} +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgStoreLoadOpt, storeloadopt) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/tailcall.cpp b/src/mapleall/maple_be/src/cg/tailcall.cpp new file mode 100644 index 0000000000000000000000000000000000000000..34b90aa5f1d1baaf1077be96b9ed521e4616adaf --- /dev/null +++ b/src/mapleall/maple_be/src/cg/tailcall.cpp @@ -0,0 +1,270 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "tailcall.h" +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +/* tailcallopt cannot be used if stack address of this function is taken and passed, + not checking the passing for now, just taken */ +bool TailCallOpt::IsStackAddrTaken() { + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS_REV(insn, bb) { + if (!IsAddOrSubOp(insn->GetMachineOpcode())) { + continue; + } + for (uint32 i = 0; i < insn->GetOperandSize(); i++) { + if (insn->GetOperand(i).IsRegister()) { + RegOperand ® = static_cast(insn->GetOperand(i)); + if (OpndIsStackRelatedReg(reg)) { + return true; + } + } + } + } + } + return false; +} + +/* + * Remove redundant mov and mark optimizable bl/blr insn in the BB. + * Return value: true to call this modified block again. + */ +bool TailCallOpt::OptimizeTailBB(BB &bb, MapleSet &callInsns, const BB &exitBB) const { + Insn *lastInsn = bb.GetLastInsn(); + if (bb.NumInsn() == 1 && lastInsn->IsMachineInstruction() && + !AArch64isa::IsPseudoInstruction(lastInsn->GetMachineOpcode()) && !InsnIsCallCand(*bb.GetLastInsn())) { + return false; + } + FOR_BB_INSNS_REV_SAFE(insn, &bb, prevInsn) { + if (!insn->IsMachineInstruction() || AArch64isa::IsPseudoInstruction(insn->GetMachineOpcode())) { + continue; + } + if (InsnIsLoadPair(*insn)) { + if (bb.GetKind() == BB::kBBReturn) { + RegOperand ® = static_cast(insn->GetOperand(0)); + if (OpndIsCalleeSaveReg(reg)) { + continue; /* inserted restore from calleeregs-placement, ignore */ + } + } + return false; + } else if (InsnIsMove(*insn)) { + CHECK_FATAL(insn->GetOperand(0).IsRegister(), "operand0 is not register"); + CHECK_FATAL(insn->GetOperand(1).IsRegister(), "operand1 is not register"); + auto ®1 = static_cast(insn->GetOperand(0)); + auto ®2 = static_cast(insn->GetOperand(1)); + if (!OpndIsR0Reg(reg1) || !OpndIsR0Reg(reg2)) { + return false; + } + bb.RemoveInsn(*insn); + continue; + } else if (InsnIsIndirectCall(*insn)) { + if (insn->GetOperand(0).IsRegister()) { + RegOperand ® = static_cast(insn->GetOperand(0)); + if (OpndIsCalleeSaveReg(reg)) { + return false; /* can't tailcall, register will be overwritten by restore */ + } + } + (void)callInsns.insert(insn); + return false; + } else if (InsnIsCall(*insn)) { + (void)callInsns.insert(insn); + return false; + } else if (InsnIsUncondJump(*insn)) { + LabelOperand &bLab = static_cast(insn->GetOperand(0)); + if (exitBB.GetLabIdx() == bLab.GetLabelIndex()) { + continue; + } + return false; + } else { + return false; + } + } + return true; +} + +/* Recursively invoke this function for all predecessors of exitBB */ +void TailCallOpt::TailCallBBOpt(BB &bb, MapleSet &callInsns, BB &exitBB) { + /* callsite also in the return block as in "if () return; else foo();" + call in the exit block */ + if (!bb.IsEmpty() && !OptimizeTailBB(bb, callInsns, exitBB)) { + return; + } + + for (auto tmpBB : bb.GetPreds()) { + if (tmpBB->GetSuccs().size() != 1 || !tmpBB->GetEhSuccs().empty() || + (tmpBB->GetKind() != BB::kBBFallthru && tmpBB->GetKind() != BB::kBBGoto)) { + continue; + } + + if (OptimizeTailBB(*tmpBB, callInsns, exitBB)) { + TailCallBBOpt(*tmpBB, callInsns, exitBB); + } + } +} + +/* + * If a function without callee-saved register, and end with a function call, + * then transfer bl/blr to b/br. + * Return value: true if function do not need Prologue/Epilogue. false otherwise. + */ +bool TailCallOpt::DoTailCallOpt() { + /* Count how many call insns in the whole function. */ + uint32 nCount = 0; + bool hasGetStackClass = false; + + FOR_ALL_BB(bb, &cgFunc) { + FOR_BB_INSNS(insn, bb) { + if (insn->IsCall()) { + /* + * lib call "savectx, vfork, getcontext" which might cause fault + * not in whitelist yet + */ + if (InsnIsCall(*insn) && IsFuncNeedFrame(*insn)) { + hasGetStackClass = true; + } + ++nCount; + } + } + } + if ((nCount > 0 && cgFunc.GetFunction().GetAttr(FUNCATTR_interface)) || hasGetStackClass) { + return false; + } + + if (nCount == 0) { + // no bl instr in any bb + return true; + } + + size_t exitBBSize = cgFunc.GetExitBBsVec().size(); + /* For now to reduce complexity */ + + BB *exitBB = nullptr; + if (exitBBSize == 0) { + if (cgFunc.GetCleanupBB() != nullptr && cgFunc.GetCleanupBB()->GetPrev() != nullptr) { + exitBB = cgFunc.GetCleanupBB()->GetPrev(); + } else { + exitBB = cgFunc.GetLastBB()->GetPrev(); + } + } else { + exitBB = cgFunc.GetExitBBsVec().front(); + } + uint32 i = 1; + size_t optCount = 0; + do { + MapleSet callInsns(tmpAlloc.Adapter()); + TailCallBBOpt(*exitBB, callInsns, *exitBB); + if (callInsns.size() != 0) { + optCount += callInsns.size(); + (void)exitBB2CallSitesMap.emplace(exitBB, callInsns); + } + if (i < exitBBSize) { + exitBB = cgFunc.GetExitBBsVec()[i]; + ++i; + } else { + break; + } + } while (true); + + /* unequal means regular calls exist in function */ + return nCount == optCount; +} + +void TailCallOpt::ConvertToTailCalls(MapleSet &callInsnsMap) { + BB *exitBB = GetCurTailcallExitBB(); + + /* ExitBB is filled only by now. If exitBB has restore of SP indicating extra stack space has + been allocated, such as a function call with more than 8 args, argument with large aggr etc */ + int64 argsToStkPassSize = cgFunc.GetMemlayout()->SizeOfArgsToStackPass(); + if (!cgFunc.HasVLAOrAlloca() && argsToStkPassSize > 0) { + return; + } + FOR_BB_INSNS(insn, exitBB) { + if (InsnIsAddWithRsp(*insn)) { + return; + } + } + + /* Replace all of the call insns. */ + for (Insn *callInsn : callInsnsMap) { + ReplaceInsnMopWithTailCall(*callInsn); + BB *bb = callInsn->GetBB(); + if (bb->GetKind() == BB::kBBGoto) { + bb->SetKind(BB::kBBFallthru); + if (InsnIsUncondJump(*bb->GetLastInsn())) { + bb->RemoveInsn(*bb->GetLastInsn()); + } + } + ASSERT(bb->GetSuccs().size() <= 1, "expect no succ or single succ"); + for (auto sBB: bb->GetSuccs()) { + bb->RemoveSuccs(*sBB); + sBB->RemovePreds(*bb); + bb->SetKind(BB::kBBReturn); + cgFunc.PushBackExitBBsVec(*bb); + cgFunc.GetCommonExitBB()->PushBackPreds(*bb); + /* if next bb is exit BB */ + if (sBB->GetKind() == BB::kBBReturn && sBB->GetPreds().empty() && + !CGCFG::InSwitchTable(sBB->GetLabIdx(), cgFunc)) { + auto it = std::find(cgFunc.GetExitBBsVec().begin(), cgFunc.GetExitBBsVec().end(), sBB); + CHECK_FATAL(it != cgFunc.GetExitBBsVec().end(), "find unuse exit failed"); + cgFunc.EraseExitBBsVec(it); + cgFunc.GetTheCFG()->RemoveBB(*sBB); + } + break; + } + } +} + +void TailCallOpt::TideExitBB() { + cgFunc.GetTheCFG()->UnreachCodeAnalysis(); + std::vector realRets; + for (auto *exitBB : cgFunc.GetExitBBsVec()) { + if (!exitBB->GetPreds().empty()) { + (void)realRets.emplace_back(exitBB); + } + } + cgFunc.ClearExitBBsVec(); + for (auto *cand: realRets) { + cgFunc.PushBackExitBBsVec(*cand); + } +} + +void TailCallOpt::Run() { + stackProtect = cgFunc.GetNeedStackProtect(); + if (cgFunc.GetCG()->DoTailCall() && !IsStackAddrTaken() && !stackProtect) { + (void)DoTailCallOpt(); // return value == "no call instr/only or 1 tailcall" + } + if (cgFunc.GetMirModule().IsCModule() && !exitBB2CallSitesMap.empty()) { + cgFunc.GetTheCFG()->InitInsnVisitor(cgFunc); + for (auto pair : exitBB2CallSitesMap) { + BB *curExitBB = pair.first; + MapleSet& callInsnsMap = pair.second; + SetCurTailcallExitBB(curExitBB); + ConvertToTailCalls(callInsnsMap); + } + TideExitBB(); + } +} + +bool CgTailCallOpt::PhaseRun(maplebe::CGFunc &f) { + TailCallOpt *tailCallOpt = f.GetCG()->CreateCGTailCallOpt(*GetPhaseMemPool(), f); + tailCallOpt->Run(); + return false; +} + +MAPLE_TRANSFORM_PHASE_REGISTER_CANSKIP(CgTailCallOpt, tailcallopt) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a0ab915ee2d448f25422d6815c5feb0c41060eaf --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_MPIsel.cpp @@ -0,0 +1,1278 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_MPISel.h" +#include "x64_memlayout.h" +#include "x64_cgfunc.h" +#include "x64_isa_tbl.h" +#include "x64_cg.h" +#include "isel.h" + +namespace maplebe { +/* Field-ID 0 is assigned to the top level structure. (Field-ID also defaults to 0 if it is not a structure.) */ +MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, FieldID fieldId) const { + PrimType symType; + int32 fieldOffset = 0; + if (fieldId == 0) { + symType = symbol.GetType()->GetPrimType(); + } else { + MIRType *mirType = symbol.GetType(); + ASSERT((mirType->IsMIRStructType() || mirType->IsMIRUnionType()), "non-structure"); + MIRStructType *structType = static_cast(mirType); + symType = structType->GetFieldType(fieldId)->GetPrimType(); + fieldOffset = static_cast(cgFunc->GetBecommon().GetFieldOffset(*structType, fieldId).first); + } + uint32 opndSz = (symType == PTY_agg) ? k64BitSize : GetPrimTypeBitSize(symType); + return GetOrCreateMemOpndFromSymbol(symbol, opndSz, fieldOffset); +} + +MemOperand &X64MPIsel::GetOrCreateMemOpndFromSymbol(const MIRSymbol &symbol, uint32 opndSize, int64 offset) const { + MIRStorageClass storageClass = symbol.GetStorageClass(); + MemOperand *result = nullptr; + RegOperand *stackBaseReg = nullptr; + if ((storageClass == kScAuto) || (storageClass == kScFormal)) { + auto *symloc = static_cast(cgFunc->GetMemlayout()->GetSymAllocInfo(symbol.GetStIndex())); + ASSERT(symloc != nullptr, "sym loc should have been defined"); + stackBaseReg = static_cast(cgFunc)->GetBaseReg(*symloc); + int stOfst = cgFunc->GetBaseOffset(*symloc); + /* Create field symbols in aggregate structure */ + result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize); + result->SetBaseRegister(*stackBaseReg); + result->SetOffsetOperand(GetCurFunc()->GetOpndBuilder()->CreateImm( + k64BitSize, stOfst + offset)); + CHECK_FATAL(result != nullptr, "NIY"); + return *result; + } + if ((storageClass == kScGlobal) || (storageClass == kScExtern) || + (storageClass == kScPstatic) || (storageClass == kScFstatic)) { + stackBaseReg = &GetCurFunc()->GetOpndBuilder()->CreatePReg(x64::RIP, k64BitSize, kRegTyInt); + result = &GetCurFunc()->GetOpndBuilder()->CreateMem(opndSize); + ImmOperand &stOfstOpnd = GetCurFunc()->GetOpndBuilder()->CreateImm(symbol, offset, 0); + result->SetBaseRegister(*stackBaseReg); + result->SetOffsetOperand(stOfstOpnd); + CHECK_FATAL(result != nullptr, "NIY"); + return *result; + } + CHECK_FATAL(false, "NIY"); + return *result; +} + +void X64MPIsel::SelectReturn(NaryStmtNode &retNode, Operand &opnd) { + MIRType *retType = cgFunc->GetFunction().GetReturnType(); + X64CallConvImpl retLocator(cgFunc->GetBecommon()); + CCLocInfo retMech; + retLocator.LocateRetVal(*retType, retMech); + if (retMech.GetRegCount() == 0) { + return; + } + std::vector retRegs; + if (!cgFunc->GetFunction().StructReturnedInRegs() || + retNode.Opnd(0)->GetOpCode() == OP_constval) { + PrimType oriPrimType = retMech.GetPrimTypeOfReg0(); + regno_t retReg = retMech.GetReg0(); + ASSERT(retReg != kRinvalid, "NIY"); + RegOperand &retOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, GetPrimTypeBitSize(oriPrimType), + cgFunc->GetRegTyFromPrimTy(oriPrimType)); + retRegs.push_back(&retOpnd); + SelectCopy(retOpnd, opnd, oriPrimType, retNode.Opnd(0)->GetPrimType()); + } else { + CHECK_FATAL(opnd.IsMemoryAccessOperand(), "NIY"); + MemOperand &memOpnd = static_cast(opnd); + ImmOperand *offsetOpnd = memOpnd.GetOffsetOperand(); + RegOperand *baseOpnd = memOpnd.GetBaseRegister(); + + PrimType oriPrimType0 = retMech.GetPrimTypeOfReg0(); + regno_t retReg0 = retMech.GetReg0(); + ASSERT(retReg0 != kRinvalid, "NIY"); + RegOperand &retOpnd0 = cgFunc->GetOpndBuilder()->CreatePReg(retReg0, GetPrimTypeBitSize(oriPrimType0), + cgFunc->GetRegTyFromPrimTy(oriPrimType0)); + MemOperand &rhsMemOpnd0 = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(oriPrimType0)); + rhsMemOpnd0.SetBaseRegister(*baseOpnd); + rhsMemOpnd0.SetOffsetOperand(*offsetOpnd); + retRegs.push_back(&retOpnd0); + SelectCopy(retOpnd0, rhsMemOpnd0, oriPrimType0); + + regno_t retReg1 = retMech.GetReg1(); + if (retReg1 != kRinvalid) { + PrimType oriPrimType1 = retMech.GetPrimTypeOfReg1(); + RegOperand &retOpnd1 = cgFunc->GetOpndBuilder()->CreatePReg(retReg1, GetPrimTypeBitSize(oriPrimType1), + cgFunc->GetRegTyFromPrimTy(oriPrimType1)); + MemOperand &rhsMemOpnd1 = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(oriPrimType1)); + ImmOperand &newOffsetOpnd = static_cast(*offsetOpnd->Clone(*cgFunc->GetMemoryPool())); + newOffsetOpnd.SetValue(newOffsetOpnd.GetValue() + GetPrimTypeSize(oriPrimType0)); + rhsMemOpnd1.SetBaseRegister(*baseOpnd); + rhsMemOpnd1.SetOffsetOperand(newOffsetOpnd); + retRegs.push_back(&retOpnd1); + SelectCopy(retOpnd1, rhsMemOpnd1, oriPrimType1); + } + } + /* for optimization ,insert pseudo ret ,in case rax,rdx is removed*/ + SelectPseduoForReturn(retRegs); +} + +void X64MPIsel::SelectPseduoForReturn(std::vector &retRegs) { + for (auto retReg : retRegs) { + MOperator mop = x64::MOP_pseudo_ret_int; + Insn &pInsn = cgFunc->GetInsnBuilder()->BuildInsn(mop, X64CG::kMd[mop]); + cgFunc->GetCurBB()->AppendInsn(pInsn); + pInsn.AddOpndChain(*retReg); + } +} + +void X64MPIsel::SelectReturn() { + /* jump to epilogue */ + MOperator mOp = x64::MOP_jmpq_l; + LabelNode *endLabel = cgFunc->GetEndLabel(); + auto endLabelName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(endLabel->GetLabelIdx()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(endLabelName.c_str(), endLabel->GetLabelIdx()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + cgFunc->GetExitBBsVec().emplace_back(cgFunc->GetCurBB()); +} + +void X64MPIsel::CreateCallStructParamPassByStack(MemOperand &memOpnd, int32 symSize, int32 baseOffset) { + int32 copyTime = RoundUp(symSize, GetPointerSize()) / GetPointerSize(); + for (int32 i = 0; i < copyTime; ++i) { + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + i * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + RegOperand &spOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, kRegTyInt); + Operand &stMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(spOpnd, + (baseOffset + i * GetPointerSize()), k64BitSize); + SelectCopy(stMemOpnd, addrMemOpnd, PTY_u64); + } +} + +void X64MPIsel::CreateCallStructParamPassByReg(MemOperand &memOpnd, regno_t regNo, uint32 parmNum) { + CHECK_FATAL(parmNum < kMaxStructParamByReg, "Exceeded maximum allowed fp parameter registers for struct passing"); + RegOperand &parmOpnd = cgFunc->GetOpndBuilder()->CreatePReg(regNo, k64BitSize, kRegTyInt); + MemOperand &addrMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + addrMemOpnd.SetBaseRegister(*memOpnd.GetBaseRegister()); + ImmOperand &newImmOpnd = static_cast(*memOpnd.GetOffsetOperand()->Clone(*cgFunc->GetMemoryPool())); + newImmOpnd.SetValue(newImmOpnd.GetValue() + parmNum * GetPointerSize()); + addrMemOpnd.SetOffsetOperand(newImmOpnd); + paramPassByReg.push_back({&parmOpnd, &addrMemOpnd, PTY_a64}); +} + +std::tuple X64MPIsel::GetMemOpndInfoFromAggregateNode(BaseNode &argExpr) { + /* get mirType info */ + auto [fieldId, mirType] = GetFieldIdAndMirTypeFromMirNode(argExpr); + MirTypeInfo symInfo = GetMirTypeInfoFormFieldIdAndMirType(fieldId, mirType); + /* get symbol memOpnd info */ + MemOperand *symMemOpnd = nullptr; + if (argExpr.GetOpCode() == OP_dread) { + AddrofNode &dread = static_cast(argExpr); + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(dread.GetStIdx()); + symMemOpnd = &GetOrCreateMemOpndFromSymbol(*symbol, dread.GetFieldID()); + } else if (argExpr.GetOpCode() == OP_iread) { + IreadNode &iread = static_cast(argExpr); + symMemOpnd = GetOrCreateMemOpndFromIreadNode(iread, symInfo.primType, symInfo.offset); + } else { + CHECK_FATAL(false, "unsupported opcode"); + } + return {symMemOpnd, symInfo.size, mirType}; +} + +void X64MPIsel::SelectParmListForAggregate(BaseNode &argExpr, X64CallConvImpl &parmLocator, bool isArgUnused) { + auto [argOpnd, argSize, mirType] = GetMemOpndInfoFromAggregateNode(argExpr); + ASSERT(argOpnd->IsMemoryAccessOperand(), "wrong opnd"); + MemOperand &memOpnd = static_cast(*argOpnd); + + CCLocInfo ploc; + parmLocator.LocateNextParm(*mirType, ploc); + if (isArgUnused) { + return; + } + + /* create call struct param pass */ + if (argSize > k16ByteSize || ploc.reg0 == kRinvalid) { + CreateCallStructParamPassByStack(memOpnd, argSize, ploc.memOffset); + } else { + CHECK_FATAL(ploc.fpSize == 0, "Unknown call parameter state"); + CreateCallStructParamPassByReg(memOpnd, ploc.reg0, 0); + if (ploc.reg1 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg1, 1); + } + if (ploc.reg2 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg2, 2); + } + if (ploc.reg3 != kRinvalid) { + CreateCallStructParamPassByReg(memOpnd, ploc.reg3, 3); + } + } +} + +/* + * SelectParmList generates an instrunction for each of the parameters + * to load the parameter value into the corresponding register. + * We return a list of registers to the call instruction because + * they may be needed in the register allocation phase. + * fp Num is a return value which is the number of vector + * registers used; + */ +void X64MPIsel::SelectParmList(StmtNode &naryNode, ListOperand &srcOpnds, uint32 &fpNum) { + paramPassByReg.clear(); + fpNum = 0; + /* for IcallNode, the 0th operand is the function pointer */ + size_t argBegin = 0; + if (naryNode.GetOpCode() == OP_icall || naryNode.GetOpCode() == OP_icallproto) { + ++argBegin; + } + + MIRFunction *callee = nullptr; + if (naryNode.GetOpCode() == OP_call) { + PUIdx calleePuIdx = static_cast(naryNode).GetPUIdx(); + callee = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(calleePuIdx); + } + X64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo ploc; + for (size_t i = argBegin; i < naryNode.NumOpnds(); ++i) { + BaseNode *argExpr = naryNode.Opnd(i); + ASSERT(argExpr != nullptr, "not null check"); + PrimType primType = argExpr->GetPrimType(); + ASSERT(primType != PTY_void, "primType should not be void"); + bool isArgUnused = (callee != nullptr && callee->GetFuncDesc().IsArgUnused(i)); + if (primType == PTY_agg) { + SelectParmListForAggregate(*argExpr, parmLocator, isArgUnused); + continue; + } + + Operand *argOpnd = HandleExpr(naryNode, *argExpr); + ASSERT(argOpnd != nullptr, "not null check"); + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(primType)]; + parmLocator.LocateNextParm(*mirType, ploc); + + /* skip unused args */ + if (isArgUnused) { + continue; + } + + if (ploc.reg0 != x64::kRinvalid) { + /* load to the register. */ + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, + GetPrimTypeBitSize(primType), cgFunc->GetRegTyFromPrimTy(primType)); + paramPassByReg.push_back({&parmRegOpnd, argOpnd, primType}); + if (x64::IsFPSIMDRegister(static_cast(ploc.reg0))) { + fpNum++; + } + } else { + /* load to stack memory */ + RegOperand &baseOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + MemOperand &actMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(baseOpnd, ploc.memOffset, + GetPrimTypeBitSize(primType)); + SelectCopy(actMemOpnd, *argOpnd, primType); + } + ASSERT(ploc.reg1 == 0, "SelectCall NIY"); + } + + /* param pass by reg */ + for (auto [regOpnd, argOpnd, primType] : paramPassByReg) { + ASSERT(regOpnd != nullptr, "not null check"); + ASSERT(argOpnd != nullptr, "not null check"); + SelectCopy(*regOpnd, *argOpnd, primType); + srcOpnds.PushOpnd(*regOpnd); + } +} + +bool X64MPIsel::IsParamStructCopy(const MIRSymbol &symbol) { + if (symbol.GetStorageClass() == kScFormal && + cgFunc->GetBecommon().GetTypeSize(symbol.GetTyIdx().GetIdx()) > k16ByteSize) { + return true; + } + return false; +} + +void X64MPIsel::SelectIntAggCopyReturn(MemOperand &symbolMem, uint64 aggSize) { + CHECK_FATAL((aggSize > 0) && (aggSize <= k16ByteSize), "out of range."); + RegOperand *baseOpnd = symbolMem.GetBaseRegister(); + int32 stOffset = symbolMem.GetOffsetOperand()->GetValue(); + bool isCopyOneReg = (aggSize <= k8ByteSize); + int32 extraSize = (aggSize % k8ByteSize) * kBitsPerByte; + if (extraSize == 0) { + extraSize = k64BitSize; + } else if (extraSize <= k8BitSize) { + extraSize = k8BitSize; + } else if (extraSize <= k16BitSize) { + extraSize = k16BitSize; + } else if (extraSize <= k32BitSize) { + extraSize = k32BitSize; + } else { + extraSize = k64BitSize; + } + /* generate move from return registers(rax, rdx) to mem of symbol */ + PrimType extraTy = GetIntegerPrimTypeFromSize(false, extraSize); + /* mov %rax mem */ + RegOperand ®Rhs0 = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, + (isCopyOneReg ? extraSize : k64BitSize), kRegTyInt); + MemOperand &memSymbo0 = cgFunc->GetOpndBuilder()->CreateMem(*baseOpnd, + static_cast(stOffset), isCopyOneReg ? extraSize : k64BitSize); + SelectCopy(memSymbo0, regRhs0, isCopyOneReg ? extraTy : PTY_u64); + /* mov %rdx mem */ + if (!isCopyOneReg) { + RegOperand ®Rhs1 = cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, extraSize, kRegTyInt); + MemOperand &memSymbo1 = cgFunc->GetOpndBuilder()->CreateMem(*baseOpnd, + static_cast(stOffset + k8ByteSize), extraSize); + SelectCopy(memSymbo1, regRhs1, extraTy); + } + return; +} + +void X64MPIsel::SelectAggCopy(MemOperand &lhs, MemOperand &rhs, uint32 copySize) { + /* in x86-64, 8 bytes data is copied at a time */ + uint32 copyTimes = copySize / k8ByteSize; + uint32 extraCopySize = copySize % k8ByteSize; + ImmOperand *stOfstLhs = lhs.GetOffsetOperand(); + ImmOperand *stOfstRhs = rhs.GetOffsetOperand(); + RegOperand *baseLhs = lhs.GetBaseRegister(); + RegOperand *baseRhs = rhs.GetBaseRegister(); + if (copySize < 40U) { + for (int32 i = 0; i < copyTimes; ++i) { + /* prepare dest addr */ + MemOperand &memOpndLhs = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + memOpndLhs.SetBaseRegister(*baseLhs); + ImmOperand &newStOfstLhs = static_cast(*stOfstLhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstLhs.SetValue(newStOfstLhs.GetValue() + i * k8ByteSize); + memOpndLhs.SetOffsetOperand(newStOfstLhs); + /* prepare src addr */ + MemOperand &memOpndRhs = cgFunc->GetOpndBuilder()->CreateMem(k64BitSize); + memOpndRhs.SetBaseRegister(*baseRhs); + ImmOperand &newStOfstRhs = static_cast(*stOfstRhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstRhs.SetValue(newStOfstRhs.GetValue() + i * k8ByteSize); + memOpndRhs.SetOffsetOperand(newStOfstRhs); + /* copy data */ + SelectCopy(memOpndLhs, memOpndRhs, PTY_a64); + } + } else { + /* adopt rep insn in x64's isa */ + std::vector opndVec; + opndVec.push_back(PrepareMemcpyParm(lhs, MOP_leaq_m_r)); + opndVec.push_back(PrepareMemcpyParm(rhs, MOP_leaq_m_r)); + opndVec.push_back(PrepareMemcpyParm(copySize)); + SelectLibCallNoReturn("memcpy", opndVec, PTY_a64); + return; + } + /* take care of extra content at the end less than the unit */ + if (extraCopySize == 0) { + return; + } + extraCopySize = ((extraCopySize <= k4ByteSize) ? k4ByteSize : k8ByteSize) * kBitsPerByte; + PrimType extraTy = GetIntegerPrimTypeFromSize(false, extraCopySize); + MemOperand &memOpndLhs = cgFunc->GetOpndBuilder()->CreateMem(extraCopySize); + memOpndLhs.SetBaseRegister(*baseLhs); + ImmOperand &newStOfstLhs = static_cast(*stOfstLhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstLhs.SetValue(newStOfstLhs.GetValue() + copyTimes * k8ByteSize); + memOpndLhs.SetOffsetOperand(newStOfstLhs); + MemOperand &memOpndRhs = cgFunc->GetOpndBuilder()->CreateMem(extraCopySize); + memOpndRhs.SetBaseRegister(*baseRhs); + ImmOperand &newStOfstRhs = static_cast(*stOfstRhs->Clone(*cgFunc->GetMemoryPool())); + newStOfstRhs.SetValue(newStOfstRhs.GetValue() + copyTimes * k8ByteSize); + memOpndRhs.SetOffsetOperand(newStOfstRhs); + SelectCopy(memOpndLhs, memOpndRhs, extraTy); +} + +void X64MPIsel::SelectLibCallNoReturn(const std::string &funcName, std::vector &opndVec, PrimType primType) { + /* generate libcall withou return value */ + std::vector pt(opndVec.size(), primType); + SelectLibCallNArg(funcName, opndVec, pt); + return; +} + +void X64MPIsel::SelectLibCallNArg(const std::string &funcName, std::vector &opndVec, + std::vector pt) { + std::string newName = funcName; + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(newName); + st->SetStorageClass(kScExtern); + st->SetSKind(kStFunc); + + /* setup the type of the callee function */ + std::vector vec; + std::vector vecAt; + for (size_t i = 1; i < opndVec.size(); ++i) { + vec.emplace_back(GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]->GetTypeIndex()); + vecAt.emplace_back(TypeAttrs()); + } + + /* only support no return function */ + MIRType *retType = GlobalTables::GetTypeTable().GetTypeTable().at(static_cast(PTY_void)); + st->SetTyIdx(cgFunc->GetBecommon().BeGetOrCreateFunctionType(retType->GetTypeIndex(), vec, vecAt)->GetTypeIndex()); + + /* setup actual parameters */ + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + + X64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo ploc; + for (size_t i = 0; i < opndVec.size(); ++i) { + ASSERT(pt[i] != PTY_void, "primType check"); + MIRType *ty; + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(pt[i])]; + Operand *stOpnd = opndVec[i]; + ASSERT(stOpnd->IsRegister(), "exp result should be reg"); + RegOperand *expRegOpnd = static_cast(stOpnd); + parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { /* load to the register */ + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(ploc.reg0, + expRegOpnd->GetSize(), cgFunc->GetRegTyFromPrimTy(pt[i])); + SelectCopy(parmRegOpnd, *expRegOpnd, pt[i]); + paramOpnds.PushOpnd(parmRegOpnd); + } + ASSERT(ploc.reg1 == 0, "SelectCall NYI"); + } + + MIRSymbol *sym = cgFunc->GetFunction().GetLocalOrGlobalSymbol(st->GetStIdx(), false); + Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*sym); + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds); + + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } + return; +} + +Operand *X64MPIsel::SelectDoubleConst(MIRDoubleConst &doubleConst, PrimType primType) const { + uint32 labelIdxTmp = cgFunc->GetLabelIdx(); + Operand *result = SelectLiteral(doubleConst, cgFunc->GetFunction(), labelIdxTmp++); + cgFunc->SetLabelIdx(labelIdxTmp); + return result; +} + +RegOperand *X64MPIsel::PrepareMemcpyParm(MemOperand &memOperand, MOperator mOp) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOperand).AddOpndChain(regResult); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return ®Result; +} + +RegOperand *X64MPIsel::PrepareMemcpyParm(uint64 copySize) { + RegOperand ®Result = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + ImmOperand &sizeOpnd = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, copySize); + SelectCopy(regResult, sizeOpnd, PTY_i64); + return ®Result; +} + +void X64MPIsel::SelectAggDassign(MirTypeInfo &lhsInfo, MemOperand &symbolMem, Operand &opndRhs) { + /* rhs is Func Return, it must be from Regread */ + if (opndRhs.IsRegister()) { + SelectIntAggCopyReturn(symbolMem, lhsInfo.size); + return; + } + /* In generally, rhs is from Dread/Iread */ + CHECK_FATAL(opndRhs.IsMemoryAccessOperand(), "Aggregate Type RHS must be mem"); + MemOperand &memRhs = static_cast(opndRhs); + SelectAggCopy(symbolMem, memRhs, lhsInfo.size); +} + +void X64MPIsel::SelectAggIassign(IassignNode &stmt, Operand &AddrOpnd, Operand &opndRhs) { + /* mirSymbol info */ + MirTypeInfo symbolInfo = GetMirTypeInfoFromMirNode(stmt); + MIRType *stmtMirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stmt.GetTyIdx()); + + /* In generally, RHS is from Dread/Iread */ + CHECK_FATAL(opndRhs.IsMemoryAccessOperand(), "Aggregate Type RHS must be mem"); + MemOperand &memRhs = static_cast(opndRhs); + ImmOperand *stOfstSrc = memRhs.GetOffsetOperand(); + RegOperand *baseSrc = memRhs.GetBaseRegister(); + + if (stmtMirType->GetPrimType() == PTY_agg) { + /* generate move to regs for agg return */ + RegOperand *result[kFourRegister] = { nullptr }; /* up to 2 int or 4 fp */ + uint32 numRegs = (symbolInfo.size <= k8ByteSize) ? kOneRegister : kTwoRegister; + PrimType retPrimType = (symbolInfo.size <= k4ByteSize) ? PTY_u32 : PTY_u64; + for (int i = 0; i < numRegs; i++) { + MemOperand &rhsMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(retPrimType)); + rhsMemOpnd.SetBaseRegister(*baseSrc); + ImmOperand &newStOfstSrc = static_cast(*stOfstSrc->Clone(*cgFunc->GetMemoryPool())); + newStOfstSrc.SetValue(newStOfstSrc.GetValue() + i * k8ByteSize); + rhsMemOpnd.SetOffsetOperand(newStOfstSrc); + regno_t regNo = (i == 0) ? x64::RAX : x64::RDX; + result[i] = &cgFunc->GetOpndBuilder()->CreatePReg(regNo, GetPrimTypeBitSize(retPrimType), + cgFunc->GetRegTyFromPrimTy(retPrimType)); + SelectCopy(*(result[i]), rhsMemOpnd, retPrimType); + } + } else { + RegOperand *lhsAddrOpnd = &SelectCopy2Reg(AddrOpnd, stmt.Opnd(0)->GetPrimType()); + MemOperand &symbolMem = cgFunc->GetOpndBuilder()->CreateMem(*lhsAddrOpnd, symbolInfo.offset, + GetPrimTypeBitSize(PTY_u64)); + SelectAggCopy(symbolMem, memRhs, symbolInfo.size); + } +} + +Insn &X64MPIsel::AppendCall(x64::X64MOP_t mOp, Operand &targetOpnd, + ListOperand ¶mOpnds, ListOperand &retOpnds) { + Insn &callInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + callInsn.AddOpndChain(targetOpnd).AddOpndChain(paramOpnds).AddOpndChain(retOpnds); + cgFunc->GetCurBB()->AppendInsn(callInsn); + cgFunc->GetCurBB()->SetHasCall(); + cgFunc->GetFunction().SetHasCall(); + return callInsn; +} + +void X64MPIsel::SelectCalleeReturn(MIRType *retType, ListOperand &retOpnds) { + if (retType == nullptr) { + return; + } + auto retSize = retType->GetSize() * kBitsPerByte; + if (retType->GetPrimType() != PTY_agg || retSize <= k128BitSize) { + if (retSize > k0BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt)); + } + if (retSize > k64BitSize) { + retOpnds.PushOpnd(cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, k64BitSize, kRegTyInt)); + } + } +} + +void X64MPIsel::SelectCall(CallNode &callNode) { + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callNode.GetPUIdx()); + MIRSymbol *fsym = GlobalTables::GetGsymTable().GetSymbolFromStidx(fn->GetStIdx().Idx(), false); + Operand &targetOpnd = cgFunc->GetOpndBuilder()->CreateFuncNameOpnd(*fsym); + + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + uint32 fpNum = 0; + SelectParmList(callNode, paramOpnds, fpNum); + /* x64abi: rax = with variable arguments passes information about the number of vector registers used */ + if (fn->IsVarargs()) { + ImmOperand &fpNumImm = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, fpNum); + RegOperand &raxOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k64BitSize, kRegTyInt); + SelectCopy(raxOpnd, fpNumImm, PTY_i64); + } + + MIRType *retType = fn->GetReturnType(); + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + SelectCalleeReturn(retType, retOpnds); + + Insn &callInsn = AppendCall(x64::MOP_callq_l, targetOpnd, paramOpnds, retOpnds); + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } +} + +void X64MPIsel::SelectIcall(IcallNode &iCallNode, Operand &opnd0) { + RegOperand &targetOpnd = SelectCopy2Reg(opnd0, iCallNode.Opnd(0)->GetPrimType()); + ListOperand ¶mOpnds = cgFunc->GetOpndBuilder()->CreateList(); + uint32 fpNum = 0; + SelectParmList(iCallNode, paramOpnds, fpNum); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iCallNode.GetRetTyIdx()); + if (iCallNode.GetOpCode() == OP_icallproto) { + MIRPtrType *ptrType = static_cast(retType); + MIRFuncType *calleeType = static_cast(ptrType->GetPointedType()); + retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(calleeType->GetRetTyIdx()); + } + ListOperand &retOpnds = cgFunc->GetOpndBuilder()->CreateList(); + SelectCalleeReturn(retType, retOpnds); + + Insn &callInsn = AppendCall(x64::MOP_callq_r, targetOpnd, paramOpnds, retOpnds); + callInsn.SetRetType(Insn::kRegInt); + if (retType != nullptr) { + callInsn.SetRetSize(static_cast(retType->GetSize())); + callInsn.SetIsCallReturnUnsigned(IsUnsignedInteger(retType->GetPrimType())); + } +} + +Operand &X64MPIsel::ProcessReturnReg(PrimType primType, int32 sReg) { + return GetTargetRetOperand(primType, sReg); +} + +void X64MPIsel::SelectGoto(GotoNode &stmt) { + MOperator mOp = x64::MOP_jmpq_l; + auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset()); + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->SetKind(BB::kBBGoto); + return; +} + +void X64MPIsel::SelectIgoto(Operand &opnd0) { + CHECK_FATAL(opnd0.IsRegister(), "only register implemented!"); + MOperator mOp = x64::MOP_jmpq_r; + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + jmpInsn.AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); + return; +} + +/* This function is to generate an inline function to generate the va_list data structure */ +/* type $__va_list align(8), + @__gr_top <* void> align(8), + @__vr_top <* void> align(8), + @__gr_offs i32 align(4), + @__vr_offs i32 align(4)}> + } +*/ +void X64MPIsel::GenCVaStartIntrin(RegOperand &opnd, uint32 stkOffset) { + /* FPLR only pushed in regalloc() after intrin function */ + RegOperand &fpOpnd = cgFunc->GetOpndBuilder()->CreatePReg(RFP, k64BitSize, kRegTyInt); + + uint32 fpLrLength = k16BitSize; + /* __stack */ + if (stkOffset != 0) { + stkOffset += fpLrLength; + } + + /* isvary reset StackFrameSize */ + ImmOperand &vaListOnPassArgStackOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset); + RegOperand &vReg = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + SelectAdd(vReg, fpOpnd, vaListOnPassArgStackOffset, GetLoweredPtrType()); + + // The 8-byte data in the a structure needs to use this mop. + MOperator mOp = x64::MOP_movq_r_m; + + /* mem operand in va_list struct (lhs) */ + MemOperand &vaList = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, 0, k64BitSize); + Insn &fillInStkOffsetInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInStkOffsetInsn.AddOpndChain(vReg).AddOpndChain(vaList); + cgFunc->GetCurBB()->AppendInsn(fillInStkOffsetInsn); + + /* __gr_top ; it's the same as __stack before the 1st va_arg */ + stkOffset = 0; + ImmOperand &grTopOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset); + SelectSub(vReg, fpOpnd, grTopOffset, PTY_a64); + + /* mem operand in va_list struct (lhs) */ + MemOperand &vaListGRTop = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k8BitSize, k64BitSize); + Insn &fillInGRTopInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInGRTopInsn.AddOpndChain(vReg).AddOpndChain(vaListGRTop); + cgFunc->GetCurBB()->AppendInsn(fillInGRTopInsn); + + /* __vr_top */ + int32 grAreaSize = static_cast(static_cast(cgFunc->GetMemlayout())->GetSizeOfGRSaveArea()); + stkOffset += grAreaSize; + stkOffset += k8BitSize; + ImmOperand &vaListVRTopOffset = cgFunc->GetOpndBuilder()->CreateImm(k64BitSize, stkOffset); + SelectSub(vReg, fpOpnd, vaListVRTopOffset, PTY_a64); + + MemOperand &vaListVRTop = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k16BitSize, k64BitSize); + Insn &fillInVRTopInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInVRTopInsn.AddOpndChain(vReg).AddOpndChain(vaListVRTop); + cgFunc->GetCurBB()->AppendInsn(fillInVRTopInsn); + + // The 4-byte data in the a structure needs to use this mop. + mOp = x64::MOP_movl_r_m; + + /* __gr_offs */ + int32 grOffs = 0 - grAreaSize; + ImmOperand &vaListGROffsOffset = cgFunc->GetOpndBuilder()->CreateImm(k32BitSize, grOffs); + RegOperand &grOffsRegOpnd = SelectCopy2Reg(vaListGROffsOffset, PTY_a32); + + MemOperand &vaListGROffs = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k24BitSize, k64BitSize); + Insn &fillInGROffsInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInGROffsInsn.AddOpndChain(grOffsRegOpnd).AddOpndChain(vaListGROffs); + cgFunc->GetCurBB()->AppendInsn(fillInGROffsInsn); + + /* __vr_offs */ + int32 vrOffs = static_cast(0UL - static_cast(static_cast( + cgFunc->GetMemlayout())->GetSizeOfVRSaveArea())); + ImmOperand &vaListVROffsOffset = cgFunc->GetOpndBuilder()->CreateImm(k32BitSize, vrOffs); + RegOperand &vrOffsRegOpnd = SelectCopy2Reg(vaListVROffsOffset, PTY_a32); + + MemOperand &vaListVROffs = GetCurFunc()->GetOpndBuilder()->CreateMem(opnd, k24BitSize + 4, k64BitSize); + Insn &fillInVROffsInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + fillInVROffsInsn.AddOpndChain(vrOffsRegOpnd).AddOpndChain(vaListVROffs); + cgFunc->GetCurBB()->AppendInsn(fillInVROffsInsn); +} + +/* The second parameter in function va_start does not need to be concerned here, + * it is mainly used in proepilog */ +void X64MPIsel::SelectCVaStart(const IntrinsiccallNode &intrnNode) { + ASSERT(intrnNode.NumOpnds() == 2, "must be 2 operands"); + /* 2 operands, but only 1 needed. Don't need to emit code for second operand + * + * va_list is a passed struct with an address, load its address + */ + BaseNode *argExpr = intrnNode.Opnd(0); + Operand *opnd = HandleExpr(intrnNode, *argExpr); + RegOperand &opnd0 = SelectCopy2Reg(*opnd, GetLoweredPtrType()); /* first argument of intrinsic */ + + /* Find beginning of unnamed arg on stack. + * Ex. void foo(int i1, int i2, ... int i8, struct S r, struct S s, ...) + * where struct S has size 32, address of r and s are on stack but they are named. + */ + X64CallConvImpl parmLocator(cgFunc->GetBecommon()); + CCLocInfo pLoc; + uint32 stkSize = 0; + for (uint32 i = 0; i < cgFunc->GetFunction().GetFormalCount(); i++) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(cgFunc->GetFunction().GetNthParamTyIdx(i)); + parmLocator.LocateNextParm(*ty, pLoc); + if (pLoc.reg0 == kRinvalid) { /* on stack */ + stkSize = static_cast(pLoc.memOffset + pLoc.memSize); + } + } + + stkSize = static_cast(RoundUp(stkSize, GetPointerSize())); + + GenCVaStartIntrin(opnd0, stkSize); + + return; +} + +void X64MPIsel::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { + MIRIntrinsicID intrinsic = intrinsiccallNode.GetIntrinsic(); + + if (intrinsic == INTRN_C_va_start) { + SelectCVaStart(intrinsiccallNode); + return; + } + if (intrinsic == INTRN_C_stack_save || intrinsic == INTRN_C_stack_restore) { + return; + } + + CHECK_FATAL(false, "Intrinsic %d: %s not implemented by the X64 CG.", intrinsic, GetIntrinsicName(intrinsic)); +} + +void X64MPIsel::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &srcOpnd) { + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64); + std::vector sizeArray; + const SmallCaseVector &switchTable = rangeGotoNode.GetRangeGotoTable(); + sizeArray.emplace_back(switchTable.size()); + MemPool *memPool = cgFunc->GetMemoryPool(); + MIRArrayType *arrayType = memPool->New(etype->GetTypeIndex(), sizeArray); + MIRAggConst *arrayConst = memPool->New(cgFunc->GetMirModule(), *arrayType); + for (const auto &itPair : switchTable) { + LabelIdx labelIdx = itPair.second; + cgFunc->GetCurBB()->PushBackRangeGotoLabel(labelIdx); + MIRConst *mirConst = memPool->New(labelIdx, cgFunc->GetFunction().GetPuidx(), *etype); + arrayConst->AddItem(mirConst, 0); + } + MIRSymbol *lblSt = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + lblSt->SetStorageClass(kScFstatic); + lblSt->SetSKind(kStConst); + lblSt->SetTyIdx(arrayType->GetTypeIndex()); + lblSt->SetKonst(arrayConst); + std::string lblStr(".L_"); + uint32 labelIdxTmp = cgFunc->GetLabelIdx(); + lblStr.append(std::to_string(cgFunc->GetUniqueID())).append("_LOCAL_CONST.").append(std::to_string(labelIdxTmp++)); + cgFunc->SetLabelIdx(labelIdxTmp); + lblSt->SetNameStrIdx(lblStr); + cgFunc->AddEmitSt(cgFunc->GetCurBB()->GetId(), *lblSt); + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*lblSt, 0, 0); + /* get index */ + PrimType srcType = rangeGotoNode.Opnd(0)->GetPrimType(); + RegOperand &opnd0 = SelectCopy2Reg(srcOpnd, srcType); + int32 minIdx = switchTable[0].first; + ImmOperand &opnd1 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(srcType), + -minIdx - rangeGotoNode.GetTagOffset()); + RegOperand &indexOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(srcType), kRegTyInt); + SelectAdd(indexOpnd, opnd0, opnd1, srcType); + + /* load the displacement into a register by accessing memory at base + index * 8 */ + /* mov .L_xxx_LOCAL_CONST.x(%baseReg, %indexOpnd, 8), %dstRegOpnd */ + MemOperand &dstMemOpnd = cgFunc->GetOpndBuilder()->CreateMem(GetPrimTypeBitSize(PTY_a64)); + RegOperand &baseReg = cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(PTY_i64), kRegTyInt); + dstMemOpnd.SetBaseRegister(baseReg); + dstMemOpnd.SetIndexRegister(indexOpnd); + dstMemOpnd.SetOffsetOperand(stOpnd); + dstMemOpnd.SetScaleOperand(cgFunc->GetOpndBuilder()->CreateImm(baseReg.GetSize(), k8ByteSize)); + + /* jumping to the absolute address which is stored in dstRegOpnd */ + MOperator mOp = x64::MOP_jmpq_m; + Insn &jmpInsn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + jmpInsn.AddOpndChain(dstMemOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); +} + +Operand *X64MPIsel::SelectAddrof(AddrofNode &expr, const BaseNode &parent) { + /* get mirSymbol info*/ + MIRSymbol *symbol = cgFunc->GetFunction().GetLocalOrGlobalSymbol(expr.GetStIdx()); + /* of AddrofNode must be either ptr, a32 or a64 */ + PrimType ptype = expr.GetPrimType(); + RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(ptype), + cgFunc->GetRegTyFromPrimTy(ptype)); + MemOperand &memOperand = GetOrCreateMemOpndFromSymbol(*symbol, expr.GetFieldID()); + uint pSize = GetPrimTypeSize(ptype); + MOperator mOp; + if (pSize <= k4ByteSize) { + mOp = x64::MOP_leal_m_r; + } else if (pSize <= k8ByteSize) { + mOp = x64::MOP_leaq_m_r; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOperand).AddOpndChain(resReg); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return &resReg; +} + +Operand *X64MPIsel::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { + uint32 instrSize = static_cast(expr.SizeOfInstr()); + /* must be either a32 or a64. */ + PrimType primType = (instrSize == k8ByteSize) ? PTY_a64 : (instrSize == k4ByteSize) ? PTY_a32 : PTY_begin; + CHECK_FATAL(primType != PTY_begin, "prim-type of Func Addr must be either a32 or a64!"); + MIRFunction *mirFunction = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(expr.GetPUIdx()); + MIRSymbol *symbol = mirFunction->GetFuncSymbol(); + MIRStorageClass storageClass = symbol->GetStorageClass(); + RegOperand &resReg = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); + if (storageClass == maple::kScText && symbol->GetSKind() == maple::kStFunc) { + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*symbol, 0, 0); + X64MOP_t mOp = x64::MOP_movabs_s_r; + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(stOpnd).AddOpndChain(resReg); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + } else { + CHECK_FATAL(false, "NIY"); + } + return &resReg; +} + +Operand *X64MPIsel::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) { + PrimType primType = expr.GetPrimType(); + uint32 bitSize = GetPrimTypeBitSize(primType); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + RegOperand &baseOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RIP, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + + auto labelStr = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(expr.GetOffset()); + MIRSymbol *labelSym = cgFunc->GetFunction().GetSymTab()->CreateSymbol(kScopeLocal); + ASSERT(labelSym != nullptr, "null ptr check"); + labelSym->SetStorageClass(kScFstatic); + labelSym->SetSKind(kStConst); + labelSym->SetNameStrIdx(labelStr); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64); + ASSERT(etype != nullptr, "null ptr check"); + auto *labelConst = cgFunc->GetMemoryPool()->New(expr.GetOffset(), + cgFunc->GetFunction().GetPuidx(), *etype); + ASSERT(labelConst != nullptr, "null ptr check"); + labelSym->SetKonst(labelConst); + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*labelSym, 0, 0); + + MemOperand &memOpnd = cgFunc->GetOpndBuilder()->CreateMem(bitSize); + memOpnd.SetBaseRegister(baseOpnd); + memOpnd.SetOffsetOperand(stOpnd); + + X64MOP_t mOp = x64::MOP_begin; + if (bitSize <= k32BitSize) { + mOp = x64::MOP_leal_m_r; + } else if (bitSize <= k64BitSize) { + mOp = x64::MOP_leaq_m_r; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &addrInsn = (cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp])); + addrInsn.AddOpndChain(memOpnd).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(addrInsn); + return &resOpnd; +} + +static X64MOP_t PickJmpInsn(Opcode brOp, Opcode cmpOp, bool isFloat, bool isSigned) { + switch (cmpOp) { + case OP_ne: + return (brOp == OP_brtrue) ? MOP_jne_l : MOP_je_l; + case OP_eq: + return (brOp == OP_brtrue) ? MOP_je_l : MOP_jne_l; + case OP_lt: + return (brOp == OP_brtrue) ? (isSigned ? MOP_jl_l : MOP_jb_l) + : (isSigned ? MOP_jge_l : MOP_jae_l); + case OP_le: + return (brOp == OP_brtrue) ? (isSigned ? MOP_jle_l : MOP_jbe_l) + : (isSigned ? MOP_jg_l : MOP_ja_l); + case OP_gt: + return (brOp == OP_brtrue) ? (isFloat ? MOP_ja_l : (isSigned ? MOP_jg_l : MOP_ja_l)) + : (isSigned ? MOP_jle_l : MOP_jbe_l); + case OP_ge: + return (brOp == OP_brtrue) ? (isSigned ? MOP_jge_l : MOP_jae_l) + : (isSigned ? MOP_jl_l : MOP_jb_l); + default: + CHECK_FATAL(false, "PickJmpInsn error"); + } +} + +/* + * handle brfalse/brtrue op, opnd0 can be a compare node or non-compare node + * such as a dread for example + */ +void X64MPIsel::SelectCondGoto(CondGotoNode &stmt, BaseNode &condNode, Operand &opnd0) { + Opcode opcode = stmt.GetOpCode(); + X64MOP_t jmpOperator = x64::MOP_begin; + if (opnd0.IsImmediate()) { + ASSERT(opnd0.IsIntImmediate(), "only support int immediate"); + ASSERT(opcode == OP_brtrue || opcode == OP_brfalse, "unsupported opcode"); + ImmOperand &immOpnd0 = static_cast(opnd0); + if ((opcode == OP_brtrue && !(immOpnd0.GetValue() != 0)) || + (opcode == OP_brfalse && !(immOpnd0.GetValue() == 0))) { + return; + } + jmpOperator = x64::MOP_jmpq_l; + cgFunc->SetCurBBKind(BB::kBBGoto); + } else { + PrimType primType; + Opcode condOpcode = condNode.GetOpCode(); + if (!kOpcodeInfo.IsCompare(condOpcode)) { + primType = condNode.GetPrimType(); + ImmOperand &imm0 = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(primType), 0); + SelectCmp(opnd0, imm0, primType); + condOpcode = OP_ne; + } else { + primType = static_cast(condNode).GetOpndType(); + } + bool isFloat = IsPrimitiveFloat(primType); + jmpOperator = PickJmpInsn(opcode, condOpcode, isFloat, IsSignedInteger(primType)); + cgFunc->SetCurBBKind(BB::kBBIf); + } + /* gen targetOpnd, .L.xxx__xx */ + auto funcName = ".L." + std::to_string(cgFunc->GetUniqueID()) + "__" + std::to_string(stmt.GetOffset()); + LabelOperand &targetOpnd = cgFunc->GetOpndBuilder()->CreateLabel(funcName.c_str(), stmt.GetOffset()); + /* select jump Insn */ + Insn &jmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(jmpOperator, X64CG::kMd[jmpOperator])); + jmpInsn.AddOpndChain(targetOpnd); + cgFunc->GetCurBB()->AppendInsn(jmpInsn); +} + +Operand *X64MPIsel::SelectStrLiteral(ConststrNode &constStr) { + std::string labelStr; + labelStr.append(".LUstr_"); + labelStr.append(std::to_string(constStr.GetStrIdx())); + MIRSymbol *labelSym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx( + GlobalTables::GetStrTable().GetStrIdxFromName(labelStr)); + MIRType *etype = GlobalTables::GetTypeTable().GetTypeFromTyIdx((TyIdx)PTY_a64); + auto *c = cgFunc->GetMemoryPool()->New(constStr.GetStrIdx(), *etype); + if (labelSym == nullptr) { + labelSym = cgFunc->GetMirModule().GetMIRBuilder()->CreateGlobalDecl(labelStr, c->GetType()); + labelSym->SetStorageClass(kScFstatic); + labelSym->SetSKind(kStConst); + /* c may be local, we need a global node here */ + labelSym->SetKonst(cgFunc->NewMirConst(*c)); + } + if (c->GetPrimType() == PTY_ptr) { + ImmOperand &stOpnd = cgFunc->GetOpndBuilder()->CreateImm(*labelSym, 0, 0); + RegOperand &addrOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k64BitSize, cgFunc->GetRegTyFromPrimTy(PTY_a64)); + Insn &addrOfInsn = (cgFunc->GetInsnBuilder()->BuildInsn(x64::MOP_movabs_s_r, X64CG::kMd[x64::MOP_movabs_s_r])); + addrOfInsn.AddOpndChain(stOpnd).AddOpndChain(addrOpnd); + cgFunc->GetCurBB()->AppendInsn(addrOfInsn); + return &addrOpnd; + } + CHECK_FATAL(false, "Unsupported const string type"); + return nullptr; +} + +Operand &X64MPIsel::GetTargetRetOperand(PrimType primType, int32 sReg) { + uint32 bitSize = GetPrimTypeBitSize(primType); + regno_t retReg = 0; + switch (sReg) { + case kSregRetval0: + retReg = IsPrimitiveFloat(primType) ? x64::V0 : x64::RAX; + break; + case kSregRetval1: + retReg = x64::RDX; + break; + default: + CHECK_FATAL(false, "GetTargetRetOperand: NIY"); + break; + } + RegOperand &parmRegOpnd = cgFunc->GetOpndBuilder()->CreatePReg(retReg, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + return parmRegOpnd; +} + +Operand *X64MPIsel::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + RegOperand *resOpnd = nullptr; + if (!IsPrimitiveVector(dtype)) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, dtype, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, dtype, node.Opnd(1)->GetPrimType()); + SelectMpy(*resOpnd, regOpnd0, regOpnd1, dtype); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + + return resOpnd; +} + +void X64MPIsel::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + uint32 bitSize = GetPrimTypeBitSize(primType); + SelectCopy(resOpnd, opnd0, primType); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType); + if (IsSignedInteger(primType) || IsUnsignedInteger(primType)) { + X64MOP_t mOp = (bitSize == k64BitSize) ? x64::MOP_imulq_r_r : + (bitSize == k32BitSize) ? x64::MOP_imull_r_r : (bitSize == k16BitSize) ? x64::MOP_imulw_r_r : x64::MOP_begin; + CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); + } else if (IsPrimitiveFloat(primType)) { + X64MOP_t mOp = (bitSize == k64BitSize) ? x64::MOP_mulfd_r_r : + (bitSize == k32BitSize) ? x64::MOP_mulfs_r_r : x64::MOP_begin; + CHECK_FATAL(mOp != x64::MOP_begin, "NIY mapping"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(regOpnd1).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); + } +} + +/* + * Dividend(EDX:EAX) / Divisor(reg/mem32) = Quotient(EAX) Remainder(EDX) + * IDIV instruction perform signed division of EDX:EAX by the contents of 32-bit register or memory location and + * store the quotient in EAX and the remainder in EDX. + * The instruction truncates non-integral results towards 0. The sign of the remainder is always the same as the sign + * of the dividend, and the absolute value of the remainder is less than the absolute value of the divisor. + * An overflow generates a #DE (divide error) exception, rather than setting the OF flag. + * To avoid overflow problems, precede this instruction with a CDQ instruction to sign-extend the dividend Divisor. + * CDQ Sign-extend EAX into EDX:EAX. This action helps avoid overflow problems in signed number arithmetic. + */ +Operand *X64MPIsel::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + Operand *resOpnd = nullptr; + if (!IsPrimitiveVector(primType)) { + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode()); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +Operand *X64MPIsel::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType primType = node.GetPrimType(); + Operand *resOpnd = nullptr; + if (!IsPrimitiveVector(primType)) { + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primType, node.Opnd(1)->GetPrimType()); + resOpnd = SelectDivRem(regOpnd0, regOpnd1, primType, node.GetOpCode()); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +Operand *X64MPIsel::SelectDivRem(RegOperand &opnd0, RegOperand &opnd1, PrimType primType, Opcode opcode) { + ASSERT(opcode == OP_div || opcode == OP_rem, "unsupported opcode"); + if(IsSignedInteger(primType) || IsUnsignedInteger(primType)) { + uint32 bitSize = GetPrimTypeBitSize(primType); + /* copy dividend to eax */ + RegOperand &raxOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(raxOpnd, opnd0, primType); + + RegOperand &rdxOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RDX, bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + bool isSigned = IsSignedInteger(primType); + if (isSigned) { + /* cdq edx:eax = sign-extend of eax*/ + X64MOP_t cvtMOp = (bitSize == k64BitSize) ? x64::MOP_cqo : + (bitSize == k32BitSize) ? x64::MOP_cdq : + (bitSize == k16BitSize) ? x64::MOP_cwd : x64::MOP_begin; + CHECK_FATAL(cvtMOp != x64::MOP_begin, "NIY mapping"); + Insn &cvtInsn = cgFunc->GetInsnBuilder()->BuildInsn(cvtMOp, raxOpnd, rdxOpnd); + cgFunc->GetCurBB()->AppendInsn(cvtInsn); + } else { + /* set edx = 0 */ + SelectCopy(rdxOpnd, cgFunc->GetOpndBuilder()->CreateImm(bitSize, 0), primType); + } + /* div */ + X64MOP_t divMOp = (bitSize == k64BitSize) ? (isSigned ? x64::MOP_idivq_r : x64::MOP_divq_r) : + (bitSize == k32BitSize) ? (isSigned ? x64::MOP_idivl_r : x64::MOP_divl_r) : + (bitSize == k16BitSize) ? (isSigned ? x64::MOP_idivw_r : x64::MOP_divw_r) : + x64::MOP_begin; + CHECK_FATAL(divMOp != x64::MOP_begin, "NIY mapping"); + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(divMOp, opnd1, raxOpnd, rdxOpnd); + cgFunc->GetCurBB()->AppendInsn(insn); + /* return */ + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(bitSize, + cgFunc->GetRegTyFromPrimTy(primType)); + SelectCopy(resOpnd, ((opcode == OP_div) ? raxOpnd : rdxOpnd), primType); + return &resOpnd; + } else if (IsPrimitiveFloat(primType)) { + X64MOP_t divMOp = x64::MOP_divsd_r; + Insn &insn = cgFunc->GetInsnBuilder()->BuildInsn(divMOp, opnd1, opnd0); + cgFunc->GetCurBB()->AppendInsn(insn); + return &opnd0; + } else { + CHECK_FATAL(false, "NIY"); + } +} + +Operand *X64MPIsel::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + PrimType primOpndType = node.GetOpndType(); + RegOperand *resOpnd = nullptr; + RegOperand ®Opnd0 = SelectCopy2Reg(opnd0, primOpndType, node.Opnd(0)->GetPrimType()); + RegOperand ®Opnd1 = SelectCopy2Reg(opnd1, primOpndType, node.Opnd(1)->GetPrimType()); + if (!IsPrimitiveVector(node.GetPrimType())) { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCmp(regOpnd0, regOpnd1, primOpndType); + Opcode parentOp = parent.GetOpCode(); + if (parentOp == OP_brfalse || parentOp == OP_brtrue || parentOp == OP_select) { + return resOpnd; + } + SelectCmpResult(*resOpnd, node.GetOpCode(), dtype, primOpndType); + } else { + /* vector operand */ + CHECK_FATAL(false, "NIY"); + } + return resOpnd; +} + +void X64MPIsel::SelectCmp(Operand &opnd0, Operand &opnd1, PrimType primType) { + x64::X64MOP_t cmpMOp = x64::MOP_begin; + if (IsPrimitiveInteger(primType)) { + cmpMOp = GetCmpMop(opnd0.GetKind(), opnd1.GetKind(), primType); + } else if (IsPrimitiveFloat(primType)) { + cmpMOp = x64::MOP_ucomisd_r_r; + } else { + CHECK_FATAL(false, "NIY"); + } + ASSERT(cmpMOp != x64::MOP_begin, "unsupported mOp"); + Insn &cmpInsn = (cgFunc->GetInsnBuilder()->BuildInsn(cmpMOp, X64CG::kMd[cmpMOp])); + cmpInsn.AddOpndChain(opnd1).AddOpndChain(opnd0); + cgFunc->GetCurBB()->AppendInsn(cmpInsn); +} + +void X64MPIsel::SelectCmpResult(RegOperand &resOpnd, Opcode opCode, PrimType primType, PrimType primOpndType) { + bool isSigned = !IsPrimitiveUnsigned(primOpndType); + /* set result -> u8 */ + RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k8BitSize, cgFunc->GetRegTyFromPrimTy(PTY_u8)); + x64::X64MOP_t setMOp = GetSetCCMop(opCode, tmpResOpnd.GetKind(), isSigned); + ASSERT(setMOp != x64::MOP_begin, "unsupported mOp"); + Insn &setInsn = cgFunc->GetInsnBuilder()->BuildInsn(setMOp, X64CG::kMd[setMOp]); + setInsn.AddOpndChain(tmpResOpnd); + cgFunc->GetCurBB()->AppendInsn(setInsn); + /* cvt u8 -> primType */ + SelectIntCvt(resOpnd, tmpResOpnd, primType, PTY_u8); +} + +Operand *X64MPIsel::SelectSelect(TernaryNode &expr, Operand &cond, Operand &trueOpnd, Operand &falseOpnd, + const BaseNode &parent) { + PrimType dtype = expr.GetPrimType(); + RegOperand &resOpnd = cgFunc->GetOpndBuilder()->CreateVReg(GetPrimTypeBitSize(dtype), + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand &trueRegOpnd = SelectCopy2Reg(trueOpnd, dtype, expr.Opnd(1)->GetPrimType()); + RegOperand &falseRegOpnd = SelectCopy2Reg(falseOpnd, dtype, expr.Opnd(2)->GetPrimType()); + Opcode cmpOpcode; + PrimType cmpPrimType; + if (kOpcodeInfo.IsCompare(expr.Opnd(0)->GetOpCode())) { + CompareNode* cmpNode = static_cast(expr.Opnd(0)); + ASSERT(cmpNode != nullptr, "null ptr check"); + cmpOpcode = cmpNode->GetOpCode(); + cmpPrimType = cmpNode->GetOpndType(); + } else { + cmpPrimType = expr.Opnd(0)->GetPrimType(); + cmpOpcode = OP_ne; + ImmOperand &immOpnd = cgFunc->GetOpndBuilder()->CreateImm(GetPrimTypeBitSize(cmpPrimType), 0); + SelectCmp(cond, immOpnd, cmpPrimType); + } + SelectSelect(resOpnd, trueRegOpnd, falseRegOpnd, dtype, cmpOpcode, cmpPrimType); + return &resOpnd; +} + +void X64MPIsel::SelectSelect(Operand &resOpnd, Operand &trueOpnd, Operand &falseOpnd, PrimType primType, + Opcode cmpOpcode, PrimType cmpPrimType) { + CHECK_FATAL(!IsPrimitiveFloat(primType), "NIY"); + bool isSigned = !IsPrimitiveUnsigned(primType); + uint32 bitSize = GetPrimTypeBitSize(primType); + if (bitSize == k8BitSize) { + /* cmov unsupported 8bit, cvt to 32bit */ + PrimType cvtType = isSigned ? PTY_i32 : PTY_u32; + RegOperand &tmpResOpnd = cgFunc->GetOpndBuilder()->CreateVReg(k32BitSize, kRegTyInt); + Operand &tmpTrueOpnd = SelectCopy2Reg(trueOpnd, cvtType, primType); + Operand &tmpFalseOpnd = SelectCopy2Reg(falseOpnd, cvtType, primType); + SelectSelect(tmpResOpnd, tmpTrueOpnd, tmpFalseOpnd, cvtType, cmpOpcode, cmpPrimType); + SelectCopy(resOpnd, tmpResOpnd, primType, cvtType); + return; + } + RegOperand &tmpOpnd = SelectCopy2Reg(trueOpnd, primType); + SelectCopy(resOpnd, falseOpnd, primType); + x64::X64MOP_t cmovMop = GetCMovCCMop(cmpOpcode, bitSize, !IsPrimitiveUnsigned(cmpPrimType)); + ASSERT(cmovMop != x64::MOP_begin, "unsupported mOp"); + Insn &comvInsn = cgFunc->GetInsnBuilder()->BuildInsn(cmovMop, X64CG::kMd[cmovMop]); + comvInsn.AddOpndChain(tmpOpnd).AddOpndChain(resOpnd); + cgFunc->GetCurBB()->AppendInsn(comvInsn); +} + +void X64MPIsel::SelectMinOrMax(bool isMin, Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + if (IsPrimitiveInteger(primType)) { + SelectCmp(opnd0, opnd1, primType); + Opcode cmpOpcode = isMin ? OP_lt : OP_gt; + SelectSelect(resOpnd, opnd0, opnd1, primType, cmpOpcode, primType); + } else { + CHECK_FATAL(false, "NIY type max or min"); + } +} + +Operand *X64MPIsel::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + PrimType dtype = node.GetPrimType(); + auto bitWidth = GetPrimTypeBitSize(dtype); + // bswap only support 32/64-bit, xchg support 16-bit -- xchg al, ah + CHECK_FATAL(bitWidth == k16BitSize || bitWidth == k32BitSize || + bitWidth == k64BitSize, "NIY, unsupported bitWidth."); + + RegOperand *resOpnd = nullptr; + + if (bitWidth == k16BitSize) { + /* + * For 16-bit, use xchg, such as: xchg ah, al. So, the register must support high 8-bit. + * For x64, we can use RAX(AH:AL), RBX(BH:BL), RCX(CH:CL), RDX(DH:DL). + * The RA does not perform special processing for the high 8-bit case. + * So, we use the RAX regiser in here. + */ + resOpnd = &cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, bitWidth, + cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCopy(*resOpnd, opnd0, dtype, node.Opnd(0)->GetPrimType()); + RegOperand &lowerOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k8BitSize, + cgFunc->GetRegTyFromPrimTy(dtype)); + RegOperand &highOpnd = cgFunc->GetOpndBuilder()->CreatePReg(x64::RAX, k8BitSize, + cgFunc->GetRegTyFromPrimTy(dtype)); + highOpnd.SetHigh8Bit(); + x64::X64MOP_t xchgMop = MOP_xchgb_r_r; + Insn &xchgInsn = cgFunc->GetInsnBuilder()->BuildInsn(xchgMop, X64CG::kMd[xchgMop]); + xchgInsn.AddOpndChain(highOpnd).AddOpndChain(lowerOpnd); + cgFunc->GetCurBB()->AppendInsn(xchgInsn); + } else { + resOpnd = &cgFunc->GetOpndBuilder()->CreateVReg(bitWidth, + cgFunc->GetRegTyFromPrimTy(dtype)); + SelectCopy(*resOpnd, opnd0, dtype, node.Opnd(0)->GetPrimType()); + x64::X64MOP_t bswapMop = (bitWidth == k64BitSize) ? MOP_bswapq_r : MOP_bswapl_r; + Insn &bswapInsn = cgFunc->GetInsnBuilder()->BuildInsn(bswapMop, X64CG::kMd[bswapMop]); + bswapInsn.AddOperand(*resOpnd); + cgFunc->GetCurBB()->AppendInsn(bswapInsn); + } + return resOpnd; +} + +RegOperand &X64MPIsel::GetTargetStackPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(x64::RSP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +RegOperand &X64MPIsel::GetTargetBasicPointer(PrimType primType) { + return cgFunc->GetOpndBuilder()->CreatePReg(x64::RBP, GetPrimTypeBitSize(primType), + cgFunc->GetRegTyFromPrimTy(primType)); +} + +void X64MPIsel::SelectAsm(AsmNode &node) { + cgFunc->SetHasAsm(); + CHECK_FATAL(false, "NIY"); +} +} diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_abi.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_abi.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0c3aff7ba3ebdc9673f2c540b8bd5cd60eb7c32d --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_abi.cpp @@ -0,0 +1,151 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_cgfunc.h" +#include "becommon.h" +#include "x64_isa.h" + +namespace maplebe { +using namespace maple; +namespace x64 { +bool IsAvailableReg(X64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return canBeAssigned; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return canBeAssigned; +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsCallerSaveReg(X64reg regNO) { + return (regNO == R0) || (regNO == R4) || (R2 <= regNO && regNO <= R3) || + (R6 <= regNO && regNO <= R7) || (R8 <= regNO && regNO <= R11) || + (V2 <= regNO && regNO <= V7) || (V16 <= regNO && regNO <= V23); +} + +bool IsCalleeSavedReg(X64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isCalleeSave; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isCalleeSave; +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsParamReg(X64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isParam; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isParam; +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsSpillReg(X64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isSpill; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isSpill; +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsExtraSpillReg(X64reg reg) { + switch (reg) { +/* integer registers */ +#define INT_REG(ID, PREF8, PREF8_16, PREF16, PREF32, PREF64, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case R##ID: \ + return isExtraSpill; +#define INT_REG_ALIAS(ALIAS, ID) +#include "x64_int_regs.def" +#undef INT_REG +#undef INT_REG_ALIAS +/* fp-simd registers */ +#define FP_SIMD_REG(ID, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, \ + isParam, isSpill, isExtraSpill) \ + case V##ID: \ + return isExtraSpill; +#include "x64_fp_simd_regs.def" +#undef FP_SIMD_REG + default: + return false; + } +} + +bool IsSpillRegInRA(X64reg regNO, bool has3RegOpnd) { + /* if has 3 RegOpnd, previous reg used to spill. */ + if (has3RegOpnd) { + return IsSpillReg(regNO) || IsExtraSpillReg(regNO); + } + return IsSpillReg(regNO); +} +} /* namespace x64 */ +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_args.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_args.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8ed44aa639461c1c7e45bd0b9d87c18f67caff44 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_args.cpp @@ -0,0 +1,298 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_cg.h" +#include "x64_isa.h" +#include "x64_MPISel.h" + +namespace maplebe { +using namespace maple; + +void X64MoveRegArgs::Run() { + MoveVRegisterArgs(); + MoveRegisterArgs(); +} + +void X64MoveRegArgs::CollectRegisterArgs(std::map &argsList, + std::vector &indexList, + std::map &pairReg, + std::vector &numFpRegs, + std::vector &fpSize) const { + X64CGFunc *x64CGFunc = static_cast(cgFunc); + uint32 numFormal = static_cast(x64CGFunc->GetFunction().GetFormalCount()); + numFpRegs.resize(numFormal); + fpSize.resize(numFormal); + X64CallConvImpl parmlocator(x64CGFunc->GetBecommon()); + CCLocInfo ploc; + uint32 start = 0; + if (numFormal) { + MIRFunction *func = const_cast(x64CGFunc->GetBecommon().GetMIRModule().CurFunction()); + if (x64CGFunc->GetBecommon().HasFuncReturnType(*func)) { + TyIdx tyIdx = x64CGFunc->GetBecommon().GetFuncReturnType(*func); + if (x64CGFunc->GetBecommon().GetTypeSize(tyIdx) <= k16ByteSize) { + start = 1; + } + } + } + for (uint32 i = start; i < numFormal; ++i) { + MIRType *ty = x64CGFunc->GetFunction().GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, &x64CGFunc->GetFunction()); + if (ploc.reg0 == kRinvalid) { + continue; + } + X64reg reg0 = static_cast(ploc.reg0); + MIRSymbol *sym = x64CGFunc->GetFunction().GetFormal(i); + if (sym->IsPreg()) { + continue; + } + argsList[i] = reg0; + indexList.emplace_back(i); + if (ploc.reg1 == kRinvalid) { + continue; + } + if (ploc.numFpPureRegs) { + uint32 index = i; + numFpRegs[index] = ploc.numFpPureRegs; + fpSize[index] = ploc.fpSize; + continue; + } + pairReg[i] = static_cast(ploc.reg1); + } +} + +ArgInfo X64MoveRegArgs::GetArgInfo(std::map &argsList, + uint32 argIndex, std::vector &numFpRegs, std::vector &fpSize) const { + X64CGFunc *x64CGFunc = static_cast(cgFunc); + ArgInfo argInfo; + argInfo.reg = argsList[argIndex]; + argInfo.mirTy = x64CGFunc->GetFunction().GetNthParamType(argIndex); + argInfo.symSize = x64CGFunc->GetBecommon().GetTypeSize(argInfo.mirTy->GetTypeIndex()); + argInfo.memPairSecondRegSize = 0; + argInfo.doMemPairOpt = false; + argInfo.createTwoStores = false; + argInfo.isTwoRegParm = false; + if ((argInfo.symSize > k8ByteSize) && (argInfo.symSize <= k16ByteSize)) { + argInfo.isTwoRegParm = true; + if (numFpRegs[argIndex] > kOneRegister) { + argInfo.symSize = argInfo.stkSize = fpSize[argIndex]; + } else { + if (argInfo.symSize > k12ByteSize) { + argInfo.memPairSecondRegSize = k8ByteSize; + } else { + /* Round to 4 the stack space required for storing the struct */ + argInfo.memPairSecondRegSize = k4ByteSize; + } + argInfo.doMemPairOpt = true; + argInfo.symSize = argInfo.stkSize = GetPointerSize(); + } + } else if (argInfo.symSize > k16ByteSize) { + /* For large struct passing, a pointer to the copy is used. */ + argInfo.symSize = argInfo.stkSize = GetPointerSize(); + } else if ((argInfo.mirTy->GetPrimType() == PTY_agg) && (argInfo.symSize < k8ByteSize)) { + argInfo.symSize = argInfo.stkSize = k8ByteSize; + } else { + argInfo.stkSize = (argInfo.symSize < k4ByteSize) ? k4ByteSize : argInfo.symSize; + if (argInfo.symSize > k4ByteSize) { + argInfo.symSize = k8ByteSize; + } + } + + if (GetVecLanes(argInfo.mirTy->GetPrimType()) > 0) { + /* vector type */ + CHECK_FATAL(false, "NIY"); + } + + argInfo.regType = (argInfo.reg < V0) ? kRegTyInt : kRegTyFloat; + argInfo.sym = x64CGFunc->GetFunction().GetFormal(argIndex); + CHECK_NULL_FATAL(argInfo.sym); + argInfo.symLoc = static_cast(x64CGFunc->GetMemlayout()-> + GetSymAllocInfo(argInfo.sym->GetStIndex())); + CHECK_NULL_FATAL(argInfo.symLoc); + return argInfo; +} + +void X64MoveRegArgs::GenerateMovInsn(ArgInfo &argInfo, X64reg reg2) { + /* reg2 is required when the struct size is between 8-16 bytes */ + X64CGFunc *x64CGFunc = static_cast(cgFunc); + int32 stOffset = x64CGFunc->GetBaseOffset(*argInfo.symLoc); + RegOperand *baseOpnd = static_cast(x64CGFunc->GetBaseReg(*argInfo.symLoc)); + uint32 opndSize = argInfo.symSize * kBitsPerByte; + RegOperand ®Opnd = x64CGFunc->GetOpndBuilder()->CreatePReg(argInfo.reg, + opndSize, argInfo.regType); + MemOperand *memOpnd = &x64CGFunc->GetOpndBuilder()->CreateMem(*baseOpnd, stOffset, opndSize); + + MOperator mOp; + if (opndSize == k64BitSize) { + mOp = x64::MOP_movq_r_m; + } else if (opndSize == k32BitSize) { + mOp = x64::MOP_movl_r_m; + } else if (opndSize == k16BitSize) { + mOp = x64::MOP_movw_r_m; + } else if (opndSize == k8BitSize) { + mOp = x64::MOP_movb_r_m; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &insn = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(regOpnd).AddOpndChain(*memOpnd); + x64CGFunc->GetCurBB()->AppendInsn(insn); + if (reg2 != kRinvalid) { + RegOperand ®Opnd2 = x64CGFunc->GetOpndBuilder()->CreatePReg(reg2, opndSize, argInfo.regType); + MemOperand *memOpnd2 = &x64CGFunc->GetOpndBuilder()->CreateMem(*baseOpnd, stOffset + 8, opndSize); + Insn &insn2 = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn2.AddOpndChain(regOpnd2).AddOpndChain(*memOpnd2); + x64CGFunc->GetCurBB()->AppendInsn(insn2); + } +} + +void X64MoveRegArgs::MoveRegisterArgs() { + X64CGFunc *x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = x64CGFunc->GetCurBB(); + x64CGFunc->GetDummyBB()->ClearInsns(); + x64CGFunc->SetCurBB(*x64CGFunc->GetDummyBB()); + + /* <[0], maplebe::R0>; <[1], maplebe::V0> */ + std::map movePara; + /* [0], [1] */ + std::vector moveParaIndex; + std::map pairReg; + std::vector numFpRegs; + std::vector fpSize; + CollectRegisterArgs(movePara, moveParaIndex, pairReg, numFpRegs, fpSize); + + for (auto indexItem = moveParaIndex.begin(); indexItem != moveParaIndex.end(); ++indexItem) { + uint32 index = *indexItem; + ArgInfo argInfo = GetArgInfo(movePara, index, numFpRegs, fpSize); + GenerateMovInsn(argInfo, pairReg[index]); + } + + x64CGFunc->GetFirstBB()->InsertAtBeginning(*x64CGFunc->GetDummyBB()); + x64CGFunc->SetCurBB(*formerCurBB); +} + +void X64MoveRegArgs::LoadStackArgsToVReg(MIRSymbol &mirSym) { + ASSERT(mirSym.GetStorageClass() == kScFormal, "NIY, vreg parameters should be kScFormal type."); + X64CGFunc *x64CGFunc = static_cast(cgFunc); + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 opndSize = GetPrimTypeBitSize(stype); + auto symLoc = static_cast(x64CGFunc->GetMemlayout()-> + GetSymAllocInfo(mirSym.GetStIndex())); + int32 stOffset = x64CGFunc->GetBaseOffset(*symLoc); + RegOperand *baseOpnd = static_cast(x64CGFunc->GetBaseReg(*symLoc)); + MemOperand &memOpnd = x64CGFunc->GetOpndBuilder()->CreateMem(*baseOpnd, stOffset, opndSize); + PregIdx pregIdx = x64CGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + RegOperand &dstRegOpnd = x64CGFunc->GetOpndBuilder()->CreateVReg( + x64CGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), opndSize, cgFunc->GetRegTyFromPrimTy(stype)); + + MOperator mOp; + if (opndSize == k64BitSize) { + mOp = x64::MOP_movq_m_r; + } else if (opndSize == k32BitSize) { + mOp = x64::MOP_movl_m_r; + } else if (opndSize == k16BitSize) { + mOp = x64::MOP_movw_m_r; + } else if (opndSize == k8BitSize) { + mOp = x64::MOP_movb_m_r; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &insn = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(memOpnd).AddOpndChain(dstRegOpnd); + if (x64CGFunc->GetCG()->GenerateVerboseCG()) { + std::string key = "param: %%"; + key += std::to_string(mirSym.GetPreg()->GetPregNo()); + insn.SetComment(key); + } + x64CGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void X64MoveRegArgs::MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym) { + ASSERT(mirSym.GetStorageClass() == kScFormal, "NIY, vreg parameters should be kScFormal type."); + X64CGFunc *x64CGFunc = static_cast(cgFunc); + RegType regType = (ploc.reg0 < V0) ? kRegTyInt : kRegTyFloat; + PrimType stype = mirSym.GetType()->GetPrimType(); + uint32 byteSize = GetPrimTypeSize(stype); + uint32 srcBitSize = ((byteSize < k4ByteSize) ? k4ByteSize : byteSize) * kBitsPerByte; + PregIdx pregIdx = x64CGFunc->GetFunction().GetPregTab()->GetPregIdxFromPregno(mirSym.GetPreg()->GetPregNo()); + RegOperand &dstRegOpnd = x64CGFunc->GetOpndBuilder()->CreateVReg( + x64CGFunc->GetVirtualRegNOFromPseudoRegIdx(pregIdx), srcBitSize, regType); + RegOperand &srcRegOpnd = x64CGFunc->GetOpndBuilder()->CreateVReg(ploc.reg0, srcBitSize, regType); + + MOperator mOp; + if (srcBitSize == k64BitSize) { + mOp = x64::MOP_movq_r_r; + } else if (srcBitSize == k32BitSize) { + mOp = x64::MOP_movl_r_r; + } else if (srcBitSize == k16BitSize) { + mOp = x64::MOP_movw_r_r; + } else if (srcBitSize == k8BitSize) { + mOp = x64::MOP_movb_r_r; + } else { + CHECK_FATAL(false, "NIY"); + } + Insn &insn = x64CGFunc->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(srcRegOpnd).AddOpndChain(dstRegOpnd); + if (x64CGFunc->GetCG()->GenerateVerboseCG()) { + std::string key = "param: %%"; + key += std::to_string(mirSym.GetPreg()->GetPregNo()); + insn.SetComment(key); + } + x64CGFunc->GetCurBB()->InsertInsnBegin(insn); +} + +void X64MoveRegArgs::MoveVRegisterArgs() { + X64CGFunc *x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = x64CGFunc->GetCurBB(); + x64CGFunc->GetDummyBB()->ClearInsns(); + x64CGFunc->SetCurBB(*x64CGFunc->GetDummyBB()); + X64CallConvImpl parmlocator(x64CGFunc->GetBecommon()); + CCLocInfo ploc; + + uint32 formalCount = static_cast(x64CGFunc->GetFunction().GetFormalCount()); + uint32 start = 0; + if (formalCount) { + MIRFunction *func = const_cast(x64CGFunc->GetBecommon().GetMIRModule().CurFunction()); + if (x64CGFunc->GetBecommon().HasFuncReturnType(*func)) { + TyIdx idx = x64CGFunc->GetBecommon().GetFuncReturnType(*func); + if (x64CGFunc->GetBecommon().GetTypeSize(idx) <= k16BitSize) { + start = 1; + } + } + } + for (uint32 i = start; i < formalCount; ++i) { + MIRType *ty = x64CGFunc->GetFunction().GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, &x64CGFunc->GetFunction()); + MIRSymbol *sym = x64CGFunc->GetFunction().GetFormal(i); + + /* load locarefvar formals to store in the reflocals. */ + if (x64CGFunc->GetFunction().GetNthParamAttr(i).GetAttr(ATTR_localrefvar) && ploc.reg0 == kRinvalid) { + CHECK_FATAL(false, "NIY"); + } + + if (!sym->IsPreg()) { + continue; + } + + if (ploc.reg0 == kRinvalid) { + /* load stack parameters to the vreg. */ + LoadStackArgsToVReg(*sym); + } else { + MoveArgsToVReg(ploc, *sym); + } + } + + x64CGFunc->GetFirstBB()->InsertAtBeginning(*x64CGFunc->GetDummyBB()); + x64CGFunc->SetCurBB(*formerCurBB); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_call_conv.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_call_conv.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8e3c4adf654a7b924945cd48d42692bd2595afbe --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_call_conv.cpp @@ -0,0 +1,207 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_cgfunc.h" +#include "becommon.h" +#include "abi.h" +#include "x64_call_conv.h" +namespace maplebe { +using namespace maple; +using namespace x64; +constexpr int kMaxRegCount = 4; + +int32 ClassifyAggregate(MIRType &mirType, uint64 sizeOfTy, ArgumentClass classes[kMaxRegCount]) { + /* + * 1. If the size of an object is larger than four eightbytes, or it contains unaligned + * fields, it has class MEMORY; + * 2. for the processors that do not support the __m256 type, if the size of an object + * is larger than two eightbytes and the first eightbyte is not SSE or any other eightbyte + * is not SSEUP, it still has class MEMORY. + * This in turn ensures that for rocessors that do support the __m256 type, if the size of + * an object is four eightbytes and the first eightbyte is SSE and all other eightbytes are + * SSEUP, it can be passed in a register. + *(Currently, assume that m256 is not supported) + */ + if (sizeOfTy > k2EightBytesSize) { + classes[0] = kMemoryClass; + } else if (sizeOfTy > k1EightBytesSize) { + classes[0] = kIntegerClass; + classes[1] = kIntegerClass; + } else { + classes[0] = kIntegerClass; + } + return static_cast(sizeOfTy); +} + +int32 Classification(const BECommon &be, MIRType &mirType, ArgumentClass classes[kMaxRegCount]) { + switch (mirType.GetPrimType()) { + /* + * Arguments of types void, (signed and unsigned) _Bool, char, short, int, + * long, long long, and pointers are in the INTEGER class. + */ + case PTY_void: + case PTY_u1: + case PTY_u8: + case PTY_i8: + case PTY_u16: + case PTY_i16: + case PTY_a32: + case PTY_u32: + case PTY_i32: + case PTY_a64: + case PTY_ptr: + case PTY_ref: + case PTY_u64: + case PTY_i64: + classes[0] = kIntegerClass; + return k8ByteSize; + /* + * Arguments of type __int128 offer the same operations as INTEGERs, + * yet they do not fit into one general purpose register but require + * two registers. + */ + case PTY_i128: + case PTY_u128: + classes[0] = kIntegerClass; + classes[1] = kIntegerClass; + return k16ByteSize; + case PTY_f32: + case PTY_f64: + classes[0] = kFloatClass; + return k8ByteSize; + case PTY_agg: { + /* + * The size of each argument gets rounded up to eightbytes, + * Therefore the stack will always be eightbyte aligned. + */ + uint64 sizeOfTy = RoundUp(be.GetTypeSize(mirType.GetTypeIndex()), k8ByteSize); + if (sizeOfTy == 0) { + return 0; + } + /* If the size of an object is larger than four eightbytes, it has class MEMORY */ + if ((sizeOfTy > k4EightBytesSize)) { + classes[0] = kMemoryClass; + return static_cast(sizeOfTy); + } + return ClassifyAggregate(mirType, sizeOfTy, classes); + } + default: + CHECK_FATAL(false, "NYI"); + } + return 0; +} + +void X64CallConvImpl::InitCCLocInfo(CCLocInfo &pLoc) const { + pLoc.reg0 = kRinvalid; + pLoc.reg1 = kRinvalid; + pLoc.reg2 = kRinvalid; + pLoc.reg3 = kRinvalid; + pLoc.memOffset = nextStackArgAdress; + pLoc.fpSize = 0; + pLoc.numFpPureRegs = 0; +} + +int32 X64CallConvImpl::LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst, MIRFunction *tFunc) { + InitCCLocInfo(pLoc); + ArgumentClass classes[kMaxRegCount] = { kNoClass }; /* Max of four Regs. */ + int32 alignedTySize = Classification(beCommon, mirType, classes); + if (alignedTySize == 0) { + return 0; + } + pLoc.memSize = alignedTySize; + ++paramNum; + if (classes[0] == kIntegerClass) { + if (alignedTySize == k8ByteSize) { + pLoc.reg0 = AllocateGPParmRegister(); + ASSERT(nextGeneralParmRegNO <= kNumIntParmRegs, "RegNo should be pramRegNO"); + } else if (alignedTySize == k16ByteSize) { + AllocateTwoGPParmRegisters(pLoc); + ASSERT(nextGeneralParmRegNO <= kNumIntParmRegs, "RegNo should be pramRegNO"); + } + } else if (classes[0] == kFloatClass) { + if (alignedTySize == k8ByteSize) { + pLoc.reg0 = AllocateSIMDFPRegister(); + ASSERT(nextGeneralParmRegNO <= kNumFloatParmRegs, "RegNo should be pramRegNO"); + } else { + CHECK_FATAL(false, "niy"); + } + } + if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) { + /* being passed in memory */ + nextStackArgAdress = pLoc.memOffset + alignedTySize; + } + return 0; +} + +int32 X64CallConvImpl::LocateRetVal(MIRType &retType, CCLocInfo &pLoc) { + InitCCLocInfo(pLoc); + ArgumentClass classes[kMaxRegCount] = { kNoClass }; /* Max of four Regs. */ + int32 alignedTySize = Classification(beCommon, retType, classes); + if (alignedTySize == 0) { + return 0; /* size 0 ret val */ + } + if (classes[0] == kIntegerClass) { + /* If the class is INTEGER, the next available register of the sequence %rax, */ + /* %rdx is used. */ + CHECK_FATAL(alignedTySize <= k16ByteSize, "LocateRetVal: illegal number of regs"); + pLoc.regCount = alignedTySize; + if (alignedTySize == k8ByteSize) { + pLoc.reg0 = AllocateGPReturnRegister(); + ASSERT(nextGeneralReturnRegNO <= kNumIntReturnRegs, "RegNo should be pramRegNO"); + } else if (alignedTySize == k16ByteSize) { + AllocateTwoGPReturnRegisters(pLoc); + ASSERT(nextGeneralReturnRegNO <= kNumIntReturnRegs, "RegNo should be pramRegNO"); + } + if (nextGeneralReturnRegNO == kOneRegister) { + pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType(); + } else if (nextGeneralReturnRegNO == kTwoRegister) { + pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType(); + pLoc.primTypeOfReg1 = retType.GetPrimType() == PTY_agg ? PTY_u64 : retType.GetPrimType(); + } + return 0; + } else if (classes[0] == kFloatClass) { + /* If the class is SSE, the next available vector register of the sequence %xmm0, */ + /* %xmm1 is used. */ + CHECK_FATAL(alignedTySize <= k16ByteSize, "LocateRetVal: illegal number of regs"); + pLoc.regCount = alignedTySize; + if (alignedTySize == k8ByteSize) { + pLoc.reg0 = AllocateSIMDFPRegister(); + ASSERT(nextGeneralParmRegNO <= kNumFloatParmRegs, "RegNo should be pramRegNO"); + } else if (alignedTySize == k16ByteSize) { + CHECK_FATAL(false, "niy"); + } + if (nextFloatRegNO == kOneRegister) { + pLoc.primTypeOfReg0 = retType.GetPrimType() == PTY_agg ? PTY_f64 : retType.GetPrimType(); + } else if (nextFloatRegNO == kTwoRegister) { + CHECK_FATAL(false, "niy"); + } + return 0; + } + if (pLoc.reg0 == kRinvalid || classes[0] == kMemoryClass) { + /* + * the caller provides space for the return value and passes + * the address of this storage in %rdi as if it were the first + * argument to the function. In effect, this address becomes a + * “hidden” first argument. + * On return %rax will contain the address that has been passed + * in by the caller in %rdi. + * Currently, this scenario is not fully supported. + */ + pLoc.reg0 = AllocateGPReturnRegister(); + return 0; + } + CHECK_FATAL(false, "NYI"); + return 0; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_cfgo.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_cfgo.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b3a34fe6680b9ac055b1541c374949a51a2e2758 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_cfgo.cpp @@ -0,0 +1,35 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_cfgo.h" +#include "x64_isa.h" + +namespace maplebe { +/* Initialize cfg optimization patterns */ +void X64CFGOptimizer::InitOptimizePatterns() { + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); + diffPassPatterns.emplace_back(memPool->New(*cgFunc)); +} + +uint32 X64FlipBRPattern::GetJumpTargetIdx(const Insn &insn) { + return x64::GetJumpTargetIdx(insn); +} +MOperator X64FlipBRPattern::FlipConditionOp(MOperator flippedOp) { + return x64::FlipConditionOp(flippedOp); +} +} diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_cg.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_cg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d58b82d54848e2c8adf999d902bcb5dbb166cbc6 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_cg.cpp @@ -0,0 +1,96 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_cg.h" +#include "x64_cgfunc.h" +#include "x64_isa.h" +namespace maplebe { +using namespace x64; +#define DEFINE_MOP(...) {__VA_ARGS__}, +const InsnDesc X64CG::kMd[kMopLast] = { +#include "abstract_mmir.def" +#include "x64_md.def" +}; +#undef DEFINE_MOP + +std::array, kIntRegTypeNum> X64CG::intRegNames = { + std::array { + "err", "al", "bl", "cl", "dl", "spl", "bpl", "sil", "dil", "r8b", "r9b", "r10b", "r11b", "r12b", "r13b", + "r14b", "r15b", "err1", "errMaxRegNum" + }, std::array { + "err", "ah", "bh", "ch", "dh", "err0", "err1", "err2", "err3", "err4", "err5", "err6", "err7", "err8", "err9", + "err10", "err11", "err12", "errMaxRegNum" + }, std::array { + "err", "ax", "bx", "cx", "dx", "sp", "bp", "si", "di", "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", + "r14w", "r15w", "err1", "errMaxRegNum" + }, std::array { + "err", "eax", "ebx", "ecx", "edx", "esp", "ebp", "esi", "edi", "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", + "r14d", "r15d", "err1", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", + "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "errMaxRegNum" + }, std::array { + "err", "rax", "rbx", "rcx", "rdx", "rsp", "rbp", "rsi", "rdi", "r8", "r9", "r10", "r11", "r12", "r13", + "r14", "r15", "rip", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", + "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", "errMaxRegNum" + }, +}; + +void X64CG::EnrollTargetPhases(maple::MaplePhaseManager *pm) const { +#include "x64_phases.def" +} + +CGFunc *X64CG::CreateCGFunc(MIRModule &mod, MIRFunction &mirFunc, BECommon &bec, MemPool &memPool, + StackMemPool &stackMp, MapleAllocator &mallocator, uint32 funcId) { + return memPool.New(mod, *this, mirFunc, bec, memPool, stackMp, mallocator, funcId); +} + +bool X64CG::IsEffectiveCopy(Insn &insn) const { + return false; +} +bool X64CG::IsTargetInsn(MOperator mOp) const { + return (mOp >= MOP_movb_r_r && mOp <= MOP_pseudo_ret_int); +} +bool X64CG::IsClinitInsn(MOperator mOp) const { + return false; +} + +Insn &X64CG::BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) { + CHECK_FATAL(false, "NIY"); + Insn *a = nullptr; + return *a; +} + +PhiOperand &X64CG::CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) { + CHECK_FATAL(false, "NIY"); + PhiOperand *a = nullptr; + return *a; +} + +void X64CG::DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const { + X64OpndDumpVisitor visitor(opndDesc); + opnd.Accept(visitor); +} + +bool X64CG::IsExclusiveFunc(MIRFunction &mirFunc) { + return false; +} + +/* NOTE: Consider making be_common a field of CG. */ +void X64CG::GenerateObjectMaps(BECommon &beCommon) {} + +/* Used for GCTIB pattern merging */ +std::string X64CG::FindGCTIBPatternName(const std::string &name) const { + return ""; +} +} diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp new file mode 100644 index 0000000000000000000000000000000000000000..af38d2fca193381b330470fdce3d9351b52ea735 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_cgfunc.cpp @@ -0,0 +1,918 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include +#include "x64_cgfunc.h" +#include "x64_memlayout.h" +#include "x64_isa.h" + +namespace maplebe { +/* null implementation yet */ +void X64CGFunc::GenSaveMethodInfoCode(BB &bb) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::GenerateCleanupCode(BB &bb) { + CHECK_FATAL(false, "NIY"); +} +bool X64CGFunc::NeedCleanup() { + CHECK_FATAL(false, "NIY"); + return false; +} +void X64CGFunc::GenerateCleanupCodeForExtEpilog(BB &bb) { + CHECK_FATAL(false, "NIY"); +} +uint32 X64CGFunc::FloatParamRegRequired(MIRStructType *structType, uint32 &fpSize) { + CHECK_FATAL(false, "NIY"); + return 0; +} +void X64CGFunc::AssignLmbcFormalParams() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::LmbcGenSaveSpForAlloca() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::MergeReturn() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::DetermineReturnTypeofCall() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::HandleRCCall(bool begin, const MIRSymbol *retRef) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::HandleRetCleanup(NaryStmtNode &retNode) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectDassign(DassignNode &stmt, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectDassignoff(DassignoffNode &stmt, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectRegassign(RegassignNode &stmt, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAbort() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAssertNull(UnaryStmtNode &stmt) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAsm(AsmNode &node) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAggDassign(DassignNode &stmt) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassign(IassignNode &stmt) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassignoff(IassignoffNode &stmt) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassignfpoff(IassignFPoffNode &stmt, Operand &opnd) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIassignspoff(PrimType pTy, int32 offset, Operand &opnd) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectBlkassignoff(BlkassignoffNode &bNode, Operand *src) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectAggIassign(IassignNode &stmt, Operand &lhsAddrOpnd) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectReturnSendOfStructInRegs(BaseNode *x) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectReturn(Operand *opnd) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIgoto(Operand *opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCondGoto(CondGotoNode &stmt, Operand &opnd0, Operand &opnd1) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCondSpecialCase1(CondGotoNode &stmt, BaseNode &opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCondSpecialCase2(const CondGotoNode &stmt, BaseNode &opnd0) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectGoto(GotoNode &stmt) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectCall(CallNode &callNode) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIcall(IcallNode &icallNode, Operand &fptrOpnd) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectIntrinCall(IntrinsiccallNode &intrinsiccallNode) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectIntrinsicOpWithOneParam(IntrinsicopNode &intrinopNode, std::string name) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCclz(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCctz(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCpopcount(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCparity(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCclrsb(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCisaligned(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCalignup(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCaligndown(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncBoolCmpSwap(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncValCmpSwap(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncLockTestSet(IntrinsicopNode &intrinopNode, PrimType pty) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectBswap(IntrinsicopNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCSyncSynchronize(IntrinsicopNode &intrinsicopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCAtomicLoadN(IntrinsicopNode &intrinsicopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCAtomicExchangeN(const IntrinsiccallNode &intrinsiccallNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCAtomicFetch(IntrinsicopNode &intrinsicopNode, Opcode op, bool fetchBefore) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCReturnAddress(IntrinsicopNode &intrinopNode) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectCAtomicExchange(const IntrinsiccallNode &intrinsiccallNode) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectMembar(StmtNode &membar) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::SelectComment(CommentNode &comment) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::HandleCatch() { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectDread(const BaseNode &parent, AddrofNode &expr) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectRegread(RegreadNode &expr) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectAddrof(AddrofNode &expr, const BaseNode &parent, bool isAddrofoff) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectAddrofoff(AddrofoffNode &expr, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} + +Operand &X64CGFunc::SelectAddrofFunc(AddroffuncNode &expr, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::SelectAddrofLabel(AddroflabelNode &expr, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand *X64CGFunc::SelectIread(const BaseNode &parent, IreadNode &expr, int extraOffset, + PrimType finalBitFieldDestType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIreadoff(const BaseNode &parent, IreadoffNode &ireadoff) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIreadfpoff(const BaseNode &parent, IreadFPoffNode &ireadoff) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIntConst(MIRIntConst &intConst) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectFloatConst(MIRFloatConst &floatConst, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectDoubleConst(MIRDoubleConst &doubleConst, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectStrConst(MIRStrConst &strConst) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectStr16Const(MIRStr16Const &strConst) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectAdd(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectAdd(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMadd(Operand &resOpnd, Operand &opndM0, Operand &opndM1, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMadd(BinaryNode &node, Operand &opndM0, Operand &opndM1, Operand &opnd1, + const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand &X64CGFunc::SelectCGArrayElemAdd(BinaryNode &node, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand *X64CGFunc::SelectShift(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMpy(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMpy(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRem(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectDiv(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectDiv(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectSub(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectSub(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectBand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectBand(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectLand(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent, + bool parentIsBr) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMin(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMin(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectMax(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectMax(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCmpOp(CompareNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectBior(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectBior(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectBxor(BinaryNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectBxor(Operand &resOpnd, Operand &opnd0, Operand &opnd1, PrimType primType) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectAbs(UnaryNode &node, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectBnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectExtractbits(ExtractbitsNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectDepositBits(DepositbitsNode &node, Operand &opnd0, Operand &opnd1, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRegularBitFieldLoad(ExtractbitsNode &node, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLnot(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectNeg(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRecip(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectSqrt(UnaryNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCeil(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectFloor(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRetype(TypeCvtNode &node, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectRound(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectCvt(const BaseNode &parent, TypeCvtNode &node, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectTrunc(TypeCvtNode &node, Operand &opnd0, const BaseNode &parent) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectSelect(TernaryNode &node, Operand &cond, Operand &opnd0, Operand &opnd1, + const BaseNode &parent, bool hasCompare) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectMalloc(UnaryNode &call, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand &X64CGFunc::SelectCopy(Operand &src, PrimType srcType, PrimType dstType) { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +Operand *X64CGFunc::SelectAlloca(UnaryNode &call, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectGCMalloc(GCMallocNode &call) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectJarrayMalloc(JarrayMallocNode &call, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::SelectRangeGoto(RangeGotoNode &rangeGotoNode, Operand &opnd0) { + CHECK_FATAL(false, "NIY"); +} +Operand *X64CGFunc::SelectLazyLoad(Operand &opnd0, PrimType primType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLazyLoadStatic(MIRSymbol &st, int64 offset, PrimType primType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectLoadArrayClassCache(MIRSymbol &st, int64 offset, PrimType primType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::GenerateYieldpoint(BB &bb) { + CHECK_FATAL(false, "NIY"); +} +Operand &X64CGFunc::ProcessReturnReg(PrimType primType, int32 sReg) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::GetOrCreateRflag() { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +const Operand *X64CGFunc::GetRflag() const { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +const Operand *X64CGFunc::GetFloatRflag() const { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +const LabelOperand *X64CGFunc::GetLabelOperand(LabelIdx labIdx) const { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +LabelOperand &X64CGFunc::GetOrCreateLabelOperand(LabelIdx labIdx) { + std::string lableName = ".L." + std::to_string(GetUniqueID()) + + "__" + std::to_string(labIdx); + return GetOpndBuilder()->CreateLabel(lableName.c_str(), labIdx); +} +LabelOperand &X64CGFunc::GetOrCreateLabelOperand(BB &bb) { + CHECK_FATAL(false, "NIY"); + LabelOperand *a; + return *a; +} +RegOperand &X64CGFunc::CreateVirtualRegisterOperand(regno_t vRegNO) { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateVirtualRegisterOperand(regno_t vRegNO) { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateVirtualRegisterOperand(RegOperand ®Opnd) { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateFramePointerRegOperand() { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +RegOperand &X64CGFunc::GetOrCreateStackBaseRegOperand() { + return GetOpndBuilder()->CreatePReg(x64::RBP, GetPointerSize() * kBitsPerByte, kRegTyInt); +} +RegOperand &X64CGFunc::GetZeroOpnd(uint32 size) { + CHECK_FATAL(false, "NIY"); + RegOperand *a; + return *a; +} +Operand &X64CGFunc::CreateCfiRegOperand(uint32 reg, uint32 size) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::GetTargetRetOperand(PrimType primType, int32 sReg) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +Operand &X64CGFunc::CreateImmOperand(PrimType primType, int64 val) { + CHECK_FATAL(false, "NIY"); + Operand *a; + return *a; +} +void X64CGFunc::ReplaceOpndInInsn(RegOperand ®Dest, RegOperand ®Src, Insn &insn, regno_t regno) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::CleanupDeadMov(bool dump) { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::GetRealCallerSaveRegs(const Insn &insn, std::set &realCallerSave) { + CHECK_FATAL(false, "NIY"); +} +bool X64CGFunc::IsFrameReg(const RegOperand &opnd) const { + CHECK_FATAL(false, "NIY"); + return false; +} +RegOperand *X64CGFunc::SelectVectorAddLong(PrimType rTy, Operand *o1, Operand *o2, PrimType oty, bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorAddWiden(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorAbs(PrimType rType, Operand *o1) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorBinOp(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, + PrimType oTyp2, Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorBitwiseOp(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, + PrimType oty2, Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorCompareZero(Operand *o1, PrimType oty1, Operand *o2, Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorCompare(Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorFromScalar(PrimType pType, Operand *opnd, PrimType sType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorDup(PrimType rType, Operand *src, bool getLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorGetElement(PrimType rType, Operand *src, PrimType sType, int32 lane) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorAbsSubL(PrimType rType, Operand *o1, Operand *o2, PrimType oTy, bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorMadd(Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, Operand *o3, + PrimType oTyp3) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorMerge(PrimType rTyp, Operand *o1, Operand *o2, int32 iNum) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorMull(PrimType rType, Operand *o1, PrimType oTyp1, Operand *o2, PrimType oTyp2, + bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNarrow(PrimType rType, Operand *o1, PrimType otyp) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNarrow2(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNeg(PrimType rType, Operand *o1) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorNot(PrimType rType, Operand *o1) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorPairwiseAdalp(Operand *src1, PrimType sty1, Operand *src2, PrimType sty2) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorPairwiseAdd(PrimType rType, Operand *src, PrimType sType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorReverse(PrimType rtype, Operand *src, PrimType stype, uint32 size) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorSetElement(Operand *eOp, PrimType eTyp, Operand *vOpd, PrimType vTyp, + int32 lane) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorShift(PrimType rType, Operand *o1, PrimType oty1, Operand *o2, PrimType oty2, + Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorShiftImm(PrimType rType, Operand *o1, Operand *imm, int32 sVal, Opcode opc) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorShiftRNarrow(PrimType rType, Operand *o1, PrimType oType, Operand *o2, bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorSubWiden(PrimType resType, Operand *o1, PrimType otyp1, Operand *o2, PrimType otyp2, + bool isLow, bool isWide) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorSum(PrimType rtype, Operand *o1, PrimType oType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorTableLookup(PrimType rType, Operand *o1, Operand *o2) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorWiden(PrimType rType, Operand *o1, PrimType otyp, bool isLow) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +RegOperand *X64CGFunc::SelectVectorMovNarrow(PrimType rType, Operand *opnd, PrimType oType) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +Operand *X64CGFunc::SelectIntrinsicOpWithNParams(IntrinsicopNode &intrinopNode, PrimType retType, + const std::string &name) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +void X64CGFunc::ProcessLazyBinding() { + CHECK_FATAL(false, "NIY"); +} +void X64CGFunc::DBGFixCallFrameLocationOffsets() { + CHECK_FATAL(false, "NIY"); +} +MemOperand *X64CGFunc::GetPseudoRegisterSpillMemoryOperand(PregIdx idx) { + CHECK_FATAL(false, "NIY"); + return nullptr; +} +int32 X64CGFunc::GetBaseOffset(const SymbolAlloc &symbolAlloc) { + const auto *symAlloc = static_cast(&symbolAlloc); + /* Call Frame layout of X64 + * Refer to layout in x64_memlayout.h. + * Do Not change this unless you know what you do + * memlayout like this + * rbp position + * ArgsReg -- + * Locals | -- FrameSize + * Spill | + * ArgsStk -- + */ + constexpr const int32 sizeofFplr = 2 * kIntregBytelen; + // baseOffset is the offset of this symbol based on the rbp position. + int32 baseOffset = symAlloc->GetOffset(); + MemSegmentKind sgKind = symAlloc->GetMemSegment()->GetMemSegmentKind(); + auto *memLayout = static_cast(this->GetMemlayout()); + if (sgKind == kMsSpillReg) { + /* spill = -(Locals + ArgsReg + baseOffset + kSizeOfPtr) */ + return -(memLayout->GetSizeOfLocals() + memLayout->SizeOfArgsRegisterPassed() + baseOffset + GetPointerSize()); + } + else if (sgKind == kMsLocals) { + /* Locals = baseOffset-(Locals + ArgsReg) */ + return baseOffset - (memLayout->GetSizeOfLocals() + memLayout->SizeOfArgsRegisterPassed()); + } else if (sgKind == kMsArgsRegPassed) { + /* ArgsReg = baseOffset-(ArgsReg) */ + return baseOffset - memLayout->SizeOfArgsRegisterPassed(); + } else if (sgKind == kMsArgsStkPassed) { + return baseOffset + sizeofFplr; + } else { + CHECK_FATAL(false, "sgKind check"); + } + return 0; +} + +RegOperand *X64CGFunc::GetBaseReg(const maplebe::SymbolAlloc &symAlloc) { + MemSegmentKind sgKind = symAlloc.GetMemSegment()->GetMemSegmentKind(); + ASSERT(((sgKind == kMsArgsRegPassed) || (sgKind == kMsLocals) || (sgKind == kMsRefLocals) || + (sgKind == kMsArgsToStkPass) || (sgKind == kMsArgsStkPassed)), "NIY"); + if (sgKind == kMsLocals || sgKind == kMsArgsRegPassed || sgKind == kMsArgsStkPassed) { + return &GetOpndBuilder()->CreatePReg(x64::RBP, GetPointerSize() * kBitsPerByte, kRegTyInt); + } else { + CHECK_FATAL(false, "NIY sgKind"); + } + return nullptr; +} + +void X64CGFunc::FreeSpillRegMem(regno_t vrNum) { + MemOperand *memOpnd = nullptr; + + auto p = spillRegMemOperands.find(vrNum); + if (p != spillRegMemOperands.end()) { + memOpnd = p->second; + } + + if ((memOpnd == nullptr) && IsVRegNOForPseudoRegister(vrNum)) { + auto pSecond = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (pSecond != pRegSpillMemOperands.end()) { + memOpnd = pSecond->second; + } + } + + if (memOpnd == nullptr) { + ASSERT(false, "free spillreg have no mem"); + return; + } + + uint32 size = memOpnd->GetSize(); + MapleUnorderedMap::iterator iter; + if ((iter = reuseSpillLocMem.find(size)) != reuseSpillLocMem.end()) { + iter->second->Add(*memOpnd); + } else { + reuseSpillLocMem[size] = memPool->New(*GetFuncScopeAllocator()); + reuseSpillLocMem[size]->Add(*memOpnd); + } +} + +MemOperand *X64CGFunc::GetOrCreatSpillMem(regno_t vrNum) { + /* NOTES: must used in RA, not used in other place. */ + if (IsVRegNOForPseudoRegister(vrNum)) { + auto p = pRegSpillMemOperands.find(GetPseudoRegIdxFromVirtualRegNO(vrNum)); + if (p != pRegSpillMemOperands.end()) { + return p->second; + } + } + + auto p = spillRegMemOperands.find(vrNum); + if (p == spillRegMemOperands.end()) { + uint32 memBitSize = k64BitSize; + auto it = reuseSpillLocMem.find(memBitSize); + if (it != reuseSpillLocMem.end()) { + MemOperand *memOpnd = it->second->GetOne(); + if (memOpnd != nullptr) { + spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } + } + + RegOperand &baseOpnd = GetOrCreateStackBaseRegOperand(); + int32 offset = GetOrCreatSpillRegLocation(vrNum); + MemOperand *memOpnd = &GetOpndBuilder()->CreateMem(baseOpnd, offset, memBitSize); + spillRegMemOperands.emplace(std::pair(vrNum, memOpnd)); + return memOpnd; + } else { + return p->second; + } +} + +void X64OpndDumpVisitor::Visit(maplebe::RegOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "reg "; + DumpRegInfo(*v); + DumpSize(*v); + const OpndDesc *regDesc = GetOpndDesc(); + LogInfo::MapleLogger() << " ["; + if (regDesc->IsRegDef()) { + LogInfo::MapleLogger() << "DEF "; + } + if (regDesc->IsRegUse()) { + LogInfo::MapleLogger() << "USE"; + } + LogInfo::MapleLogger() << "]"; + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(CommentOperand *v) { + LogInfo::MapleLogger() << ":#" << v->GetComment(); +} + +void X64OpndDumpVisitor::Visit(maplebe::ImmOperand *v) { + DumpOpndPrefix(); + if (v->IsStImmediate()) { + LogInfo::MapleLogger() << v->GetName(); + LogInfo::MapleLogger() << "+offset:" << v->GetValue(); + } else { + LogInfo::MapleLogger() << "imm:" << v->GetValue(); + } + DumpSize(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(maplebe::MemOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "mem "; + if (v->GetBaseRegister() != nullptr) { + DumpRegInfo(*v->GetBaseRegister()); + if (v->GetOffsetOperand() != nullptr) { + LogInfo::MapleLogger() << " + " << v->GetOffsetOperand()->GetValue(); + } + } + DumpSize(*v); + DumpOpndSuffix(); +} +void X64OpndDumpVisitor::DumpRegInfo(maplebe::RegOperand &v) { + if (v.GetRegisterNumber() > kBaseVirtualRegNO) { + LogInfo::MapleLogger() << "V" << v.GetRegisterNumber(); + } else { + bool r32 = (v.GetSize() == k32BitSize); + LogInfo::MapleLogger() << "%" + << X64CG::intRegNames[(r32 ? X64CG::kR32List : X64CG::kR64List)][v.GetRegisterNumber()]; + } +} + +void X64OpndDumpVisitor::Visit(maplebe::FuncNameOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "funcname "; + LogInfo::MapleLogger() << v->GetName(); + DumpSize(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(maplebe::ListOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "list "; + + MapleList opndList = v->GetOperands(); + for (auto it = opndList.begin(); it != opndList.end();) { + (*it)->Dump(); + LogInfo::MapleLogger() << (++it == opndList.end() ? "" : " ,"); + } + DumpSize(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(maplebe::LabelOperand *v) { + DumpOpndPrefix(); + LogInfo::MapleLogger() << "label "; + LogInfo::MapleLogger() << v->GetLabelIndex(); + DumpSize(*v); + DumpOpndSuffix(); +} + +void X64OpndDumpVisitor::Visit(PhiOperand *v) { + CHECK_FATAL(false, "NIY"); +} + +void X64OpndDumpVisitor::Visit(CondOperand *v) { + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +void X64OpndDumpVisitor::Visit(StImmOperand *v) { + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +void X64OpndDumpVisitor::Visit(BitShiftOperand *v) { + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +void X64OpndDumpVisitor::Visit(ExtendShiftOperand *v) { + CHECK_FATAL(false, "do not use this operand, it will be eliminated soon"); +} +} diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_emitter.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_emitter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ab229f3d61fbbeffe1df2878fb315c21b2e6d6dd --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_emitter.cpp @@ -0,0 +1,307 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_cgfunc.h" +#include "x64_cg.h" +#include "x64_emitter.h" +#include "insn.h" + +namespace maplebe { +void X64Emitter::EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) {} +void X64Emitter::EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) {} +void X64Emitter::EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) {} +void X64Emitter::EmitFastLSDA(FuncEmitInfo &funcEmitInfo) {} +void X64Emitter::EmitFullLSDA(FuncEmitInfo &funcEmitInfo) {} +void X64Emitter::EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) {} + +void X64Emitter::EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + CG *currCG = cgFunc.GetCG(); + Emitter &emitter = *(currCG->GetEmitter()); + + PUIdx pIdx = currCG->GetMIRModule()->CurFunction()->GetPuidx(); + const char *puIdx = strdup(std::to_string(pIdx).c_str()); + const std::string &labelName = cgFunc.GetFunction().GetLabelTab()->GetName(labIdx); + if (currCG->GenerateVerboseCG()) { + emitter.Emit(".L.").Emit(puIdx).Emit("__").Emit(labIdx).Emit(":\t"); + if (!labelName.empty() && labelName.at(0) != '@') { + /* If label name has @ as its first char, it is not from MIR */ + emitter.Emit("// MIR: @").Emit(labelName).Emit("\n"); + } else { + emitter.Emit("\n"); + } + } else { + emitter.Emit(".L.").Emit(puIdx).Emit("__").Emit(labIdx).Emit(":\n"); + } +} + +void X64OpndEmitVisitor::Visit(maplebe::RegOperand *v) { + ASSERT(v->IsRegister(), "NIY"); + /* check legality of register operand: reg no. should not be larger than 100 or equal to 0 */ + ASSERT(v->IsPhysicalRegister(), "register is still virtual"); + ASSERT(v->GetRegisterNumber() > 0, "register no. is 0: ERR"); + /* Mapping with physical register after register allocation is done + * try table-driven register mapping ? */ + uint8 regType = -1; + switch (v->GetSize()) { + case k8BitSize: + regType = v->IsHigh8Bit() ? X64CG::kR8HighList : X64CG::kR8LowList; + break; + case k16BitSize: + regType = X64CG::kR16List; + break; + case k32BitSize: + regType = X64CG::kR32List; + break; + case k64BitSize: + regType = X64CG::kR64List; + break; + default: + CHECK_FATAL(false, "unkown reg size"); + break; + } + emitter.Emit("%").Emit(X64CG::intRegNames[regType][v->GetRegisterNumber()]); +} + +void X64OpndEmitVisitor::Visit(maplebe::ImmOperand *v) { + ASSERT(v->IsImmediate(), "NIY"); + emitter.Emit("$"); + if (v->GetKind() == maplebe::Operand::kOpdStImmediate) { + /* symbol form imm */ + emitter.Emit(v->GetName()); + } else { + /* general imm */ + emitter.Emit(v->GetValue()); + } + return; +} + +void X64OpndEmitVisitor::Visit(maplebe::MemOperand *v) { + if (v->GetOffsetOperand() != nullptr) { + if (v->GetOffsetOperand()->GetKind() == maplebe::Operand::kOpdStImmediate) { + /* symbol form offset */ + emitter.Emit(v->GetOffsetOperand()->GetName()); + MIRStorageClass storageClass = v->GetOffsetOperand()->GetSymbol()->GetStorageClass(); + bool isLocalVar = v->GetOffsetOperand()->GetSymbol()->IsLocal(); + if (storageClass == kScPstatic && isLocalVar) { + PUIdx pIdx = emitter.GetCG()->GetMIRModule()->CurFunction()->GetPuidx(); + emitter.Emit(pIdx); + } + if (v->GetOffsetOperand()->GetValue() != 0) { + emitter.Emit("+").Emit(v->GetOffsetOperand()->GetValue()); + } + + } else { + /* general offset */ + emitter.Emit(v->GetOffsetOperand()->GetValue()); + } + } + emitter.Emit("("); + if (v->GetBaseRegister() != nullptr) { + /* Emit RBP or EBP only when index register doesn't exist */ + if ((v->GetIndexRegister() != nullptr && v->GetBaseRegister()->GetRegisterNumber() != x64::RBP) || + v->GetIndexRegister() == nullptr) { + Visit(v->GetBaseRegister()); + } + } + if (v->GetIndexRegister() != nullptr) { + emitter.Emit(", "); + Visit(v->GetIndexRegister()); + emitter.Emit(", ").Emit(v->GetScaleOperand()->GetValue()); + } + emitter.Emit(")"); +} + +void X64OpndEmitVisitor::Visit(maplebe::LabelOperand *v) { + ASSERT(v->IsLabel(), "NIY"); + const MapleString &labelName = v->GetParentFunc(); + /* If this label indicates a bb's addr (named as: ".L." + UniqueID + "__" + Offset), + * prefix "$" is not required. */ + if (!labelName.empty() && labelName[0] != '.') { + emitter.Emit("$"); + } + emitter.Emit(labelName); +} + +void X64OpndEmitVisitor::Visit(maplebe::FuncNameOperand *v) { + emitter.Emit(v->GetName()); +} + +void X64OpndEmitVisitor::Visit(maplebe::ListOperand *v) { + CHECK_FATAL(false, "do not run here"); +} + +void X64OpndEmitVisitor::Visit(maplebe::StImmOperand *v) { + CHECK_FATAL(false, "do not run here"); +} + +void X64OpndEmitVisitor::Visit(maplebe::CondOperand *v) { + CHECK_FATAL(false, "do not run here"); +} + +void X64OpndEmitVisitor::Visit(maplebe::BitShiftOperand *v) { + CHECK_FATAL(false, "do not run here"); +} + +void X64OpndEmitVisitor::Visit(maplebe::ExtendShiftOperand *v) { + CHECK_FATAL(false, "do not run here"); +} + +void X64OpndEmitVisitor::Visit(maplebe::CommentOperand *v) { + CHECK_FATAL(false, "do not run here"); +} + +void X64OpndEmitVisitor::Visit(maplebe::OfstOperand *v) { + CHECK_FATAL(false, "do not run here"); +} + +void DumpTargetASM(Emitter &emitter, Insn &insn) { + emitter.Emit("\t"); + const InsnDesc &curMd = X64CG::kMd[insn.GetMachineOpcode()]; + + /* Get Operands Number */ + size_t size = 0; + std::string format(curMd.format); + for (char c : format) { + if (c != ',') { + size = size + 1; + } + } + +#if DEBUG + insn.Check(); +#endif + + emitter.Emit(curMd.GetName()).Emit("\t"); + /* In AT&T assembly syntax, Indirect jump/call operands are indicated + * with asterisk "*" (as opposed to direct). + * Direct jump/call: jmp .L.xxx__x or callq funcName + * Indirect jump/call: jmp *%rax; jmp *(%rax) or callq *%rax; callq *(%rax) + */ + if (curMd.IsCall() || curMd.IsUnCondBranch()) { + const OpndDesc* opndDesc = curMd.GetOpndDes(0); + if (opndDesc->IsRegister() || opndDesc->IsMem()) { + emitter.Emit("*"); + } + } + + for (int i = 0; i < size; i++) { + Operand *opnd = &insn.GetOperand(i); + X64OpndEmitVisitor visitor(emitter); + opnd->Accept(visitor); + if (i != size - 1) { + emitter.Emit(",\t"); + } + } + emitter.Emit("\n"); +} + +void EmitFunctionHeader(FuncEmitInfo &funcEmitInfo) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + CG *currCG = cgFunc.GetCG(); + const MIRSymbol *funcSymbol = cgFunc.GetFunction().GetFuncSymbol(); + Emitter &emitter = *currCG->GetEmitter(); + + if (cgFunc.GetFunction().GetAttr(FUNCATTR_section)) { + const std::string §ionName = cgFunc.GetFunction().GetAttrs().GetPrefixSectionName(); + (void)emitter.Emit("\t.section " + sectionName).Emit(",\"ax\",@progbits\n"); + } else { + emitter.EmitAsmLabel(kAsmText); + } + emitter.EmitAsmLabel(*funcSymbol, kAsmAlign); + + if (funcSymbol->GetFunction()->GetAttr(FUNCATTR_weak)) { + emitter.EmitAsmLabel(*funcSymbol, kAsmWeak); + emitter.EmitAsmLabel(*funcSymbol, kAsmHidden); + } else if (funcSymbol->GetFunction()->GetAttr(FUNCATTR_local)) { + emitter.EmitAsmLabel(*funcSymbol, kAsmLocal); + } else if (!funcSymbol->GetFunction()->GetAttr(FUNCATTR_static)) { + emitter.EmitAsmLabel(*funcSymbol, kAsmGlbl); + if (!currCG->GetMIRModule()->IsCModule()) { + emitter.EmitAsmLabel(*funcSymbol, kAsmHidden); + } + } + emitter.EmitAsmLabel(kAsmType); + emitter.Emit(funcSymbol->GetName()).Emit(", %function\n"); + emitter.EmitAsmLabel(*funcSymbol, kAsmSyname); +} + +/* Specially, emit switch table here */ +void EmitJmpTable(Emitter &emitter, CGFunc &cgFunc) { + const MIRSymbol *funcSymbol = cgFunc.GetFunction().GetFuncSymbol(); + for (auto &it : cgFunc.GetEmitStVec()) { + MIRSymbol *st = it.second; + ASSERT(st->IsReadOnly(), "NYI"); + emitter.Emit("\n"); + emitter.EmitAsmLabel(*funcSymbol, kAsmAlign); + emitter.Emit(st->GetName() + ":\n"); + MIRAggConst *arrayConst = safe_cast(st->GetKonst()); + CHECK_FATAL(arrayConst != nullptr, "null ptr check"); + PUIdx pIdx = cgFunc.GetMirModule().CurFunction()->GetPuidx(); + const std::string &idx = strdup(std::to_string(pIdx).c_str()); + for (size_t i = 0; i < arrayConst->GetConstVec().size(); i++) { + MIRLblConst *lblConst = safe_cast(arrayConst->GetConstVecItem(i)); + CHECK_FATAL(lblConst != nullptr, "null ptr check"); + emitter.EmitAsmLabel(kAsmQuad); + (void)emitter.Emit(".L." + idx).Emit("__").Emit(lblConst->GetValue()); + (void)emitter.Emit("\n"); + } + (void)emitter.Emit("\n"); + } +} + +void X64Emitter::Run(FuncEmitInfo &funcEmitInfo) { + CGFunc &cgFunc = funcEmitInfo.GetCGFunc(); + X64CGFunc &x64CGFunc = static_cast(cgFunc); + CG *currCG = cgFunc.GetCG(); + const MIRSymbol *funcSymbol = cgFunc.GetFunction().GetFuncSymbol(); + Emitter &emitter = *currCG->GetEmitter(); + /* emit function header */ + EmitFunctionHeader(funcEmitInfo); + + /* emit instructions */ + const std::string &funcName = std::string(cgFunc.GetShortFuncName().c_str()); + FOR_ALL_BB(bb, &x64CGFunc) { + if (bb->IsUnreachable()) { + continue; + } + if (currCG->GenerateVerboseCG()) { + emitter.Emit("// freq:").Emit(bb->GetFrequency()).Emit("\n"); + } + /* emit bb headers */ + if (bb->GetLabIdx() != MIRLabelTable::GetDummyLabel()) { + EmitBBHeaderLabel(funcEmitInfo, funcName, bb->GetLabIdx()); + } + + FOR_BB_INSNS(insn, bb) { + DumpTargetASM(emitter, *insn); + } + } + + EmitJmpTable(emitter, cgFunc); + + emitter.EmitAsmLabel(*funcSymbol, kAsmSize); +} + +bool CgEmission::PhaseRun(maplebe::CGFunc &f) { + Emitter *emitter = f.GetCG()->GetEmitter(); + CHECK_NULL_FATAL(emitter); + AsmFuncEmitInfo funcEmitInfo(f); + emitter->EmitLocalVariable(f); + static_cast(emitter)->Run(funcEmitInfo); + return false; +} +MAPLE_TRANSFORM_PHASE_REGISTER(CgEmission, cgemit) +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_isa.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_isa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..770caf0000379b47b1fbc1f8793ccaecacdc1de8 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_isa.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_isa.h" +#include "insn.h" + +namespace maplebe { +namespace x64 { +MOperator FlipConditionOp(MOperator flippedOp) { + switch (flippedOp) { + case X64MOP_t::MOP_je_l: + return X64MOP_t::MOP_jne_l; + case X64MOP_t::MOP_jne_l: + return X64MOP_t::MOP_je_l; + case X64MOP_t::MOP_ja_l: + return X64MOP_t::MOP_jbe_l; + case X64MOP_t::MOP_jbe_l: + return X64MOP_t::MOP_ja_l; + case X64MOP_t::MOP_jae_l: + return X64MOP_t::MOP_jb_l; + case X64MOP_t::MOP_jb_l: + return X64MOP_t::MOP_jae_l; + case X64MOP_t::MOP_jg_l: + return X64MOP_t::MOP_jle_l; + case X64MOP_t::MOP_jle_l: + return X64MOP_t::MOP_jg_l; + case X64MOP_t::MOP_jge_l: + return X64MOP_t::MOP_jl_l; + case X64MOP_t::MOP_jl_l: + return X64MOP_t::MOP_jge_l; + default: + break; + } + return X64MOP_t::MOP_begin; +} + +uint32 GetJumpTargetIdx(const Insn &insn) { + CHECK_FATAL(insn.IsCondBranch() || insn.IsUnCondBranch(), "Not a jump insn"); + return kInsnFirstOpnd; +} +} /* namespace x64 */ +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_live.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_live.cpp new file mode 100644 index 0000000000000000000000000000000000000000..994d33a5f3d940cfd5c5f0d2f784e68d3d73ddd1 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_live.cpp @@ -0,0 +1,27 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_live.h" +#include "x64_cg.h" + +namespace maplebe { +static const std::set intParamRegSet = {RDI, RSI, RDX, RCX, R8, R9}; + +bool X64LiveAnalysis::CleanupBBIgnoreReg(regno_t reg) { + if (intParamRegSet.find(reg) != intParamRegSet.end()) { + return true; + } + return false; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_local_opt.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_local_opt.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1fea1d297f38c7c365047a87e5044c47a6a74289 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_local_opt.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_local_opt.h" +#include "x64_reaching.h" +#include "operand.h" +#include "x64_cg.h" + +namespace maplebe { +void X64LocalOpt::DoLocalCopyProp() { + LocalOptimizeManager optManager(*cgFunc, *GetRDInfo()); + optManager.Optimize(); + optManager.Optimize(); +} + +bool CopyRegProp::CheckCondition(Insn &insn) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp != MOP_movb_r_r && mOp != MOP_movw_r_r && mOp != MOP_movl_r_r && mOp != MOP_movq_r_r) { + return false; + } + ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®Use = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®Def = static_cast(insn.GetOperand(kInsnSecondOpnd)); + if (regUse.GetRegisterNumber() == regDef.GetRegisterNumber()) { + return false; + } + auto &liveOutRegSet = insn.GetBB()->GetLiveOutRegNO(); + if (liveOutRegSet.find(regDef.GetRegisterNumber()) != liveOutRegSet.end()) { + return false; + } + return true; +} + +void CopyRegProp::Optimize(BB &bb, Insn &insn) { + InsnSet useInsnSet; + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + auto ®Def = static_cast(insn.GetOperand(kInsnSecondOpnd)); + reachingDef->FindRegUseBetweenInsn(regDef.GetRegisterNumber(), nextInsn, bb.GetLastInsn(), useInsnSet); + bool redefined = false; + auto &replaceOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + for (Insn *tInsn : useInsnSet) { + std::vector defInsnVec = reachingDef->FindRegDefBetweenInsn(replaceOpnd.GetRegisterNumber(), + &insn, tInsn, false, false); + if (defInsnVec.size() > 0) { + redefined = true; + } + if (redefined) { + break; + } + propagateOperand(*tInsn, regDef, replaceOpnd); + } + return; +} + +bool CopyRegProp::propagateOperand(Insn &insn, RegOperand& oldOpnd, RegOperand& replaceOpnd) { + bool propagateSuccess = false; + uint32 opndNum = insn.GetOperandSize(); + const InsnDesc *md = insn.GetDesc(); + if (insn.IsShift() && oldOpnd.GetRegisterNumber() == x64::RCX) { + return false; + } + if (insn.GetMachineOpcode() == MOP_pseudo_ret_int) { + return false; + } + for (int i = 0; i < opndNum; i++) { + Operand &opnd = insn.GetOperand(i); + if (opnd.IsList()) { + /* list operands are used by call, + * which can not be propagated + */ + continue; + } + + auto *regProp = md->opndMD[i]; + if (regProp->IsUse() && !regProp->IsDef() && opnd.IsRegister()) { + RegOperand ®Opnd = static_cast(opnd); + if (RegOperand::IsSameReg(regOpnd, oldOpnd)) { + insn.SetOperand(i, replaceOpnd); + propagateSuccess = true; + } + } + } + return propagateSuccess; +} + +void X64RedundantDefRemove::Optimize(BB &bb, Insn &insn) { + const InsnDesc *md = insn.GetDesc(); + RegOperand *regDef = nullptr; + uint32 opndNum = insn.GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn.GetOperand(i); + auto *opndDesc = md->opndMD[i]; + if (opndDesc->IsRegDef()) { + regDef = static_cast(&opnd); + } + } + InsnSet useInsnSet; + Insn *nextInsn = insn.GetNextMachineInsn(); + if (nextInsn == nullptr) { + return; + } + reachingDef->FindRegUseBetweenInsn(regDef->GetRegisterNumber(), + nextInsn, bb.GetLastInsn(), useInsnSet); + if (useInsnSet.size() == 0) { + bb.RemoveInsn(insn); + return; + } + return; +} +} + diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_memlayout.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_memlayout.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2bd58bdc9a39a902bc5a84de6fe2f712d454bdfd --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_memlayout.cpp @@ -0,0 +1,292 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_memlayout.h" +#include "x64_cgfunc.h" +#include "becommon.h" +#include "mir_nodes.h" +#include "x64_call_conv.h" +#include "cg.h" + +namespace maplebe { +using namespace maple; + +uint32 X64MemLayout::ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall) { + /* instantiate a parm locator */ + X64CallConvImpl parmLocator(be); + uint32 sizeOfArgsToStkPass = 0; + size_t i = 0; + /* An indirect call's first operand is the invocation target */ + if (isIcall) { + ++i; + } + + aggCopySize = 0; + for (uint32 anum = 0; i < stmt.NumOpnds(); ++i, ++anum) { + BaseNode *opnd = stmt.Opnd(i); + MIRType *ty = nullptr; + if (opnd->GetPrimType() != PTY_agg) { + ty = GlobalTables::GetTypeTable().GetTypeTable()[static_cast(opnd->GetPrimType())]; + } else { + Opcode opndOpcode = opnd->GetOpCode(); + ASSERT(opndOpcode == OP_dread || opndOpcode == OP_iread, "opndOpcode should be OP_dread or OP_iread"); + if (opndOpcode == OP_dread) { + DreadNode *dread = static_cast(opnd); + MIRSymbol *sym = be.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(dread->GetStIdx()); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + if (dread->GetFieldID() != 0) { + ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(dread->GetFieldID()); + } + } + } else { + /* OP_iread */ + IreadNode *iread = static_cast(opnd); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(iread->GetTyIdx()); + ASSERT(ty->GetKind() == kTypePointer, "expect pointer"); + ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(ty)->GetPointedTyIdx()); + if (iread->GetFieldID() != 0) { + ASSERT(ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeClass || + ty->GetKind() == kTypeUnion, "expect struct or class"); + if (ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion) { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } else { + ty = static_cast(ty)->GetFieldType(iread->GetFieldID()); + } + } + } + } + CCLocInfo ploc; + aggCopySize += parmLocator.LocateNextParm(*ty, ploc); + if (ploc.reg0 != 0) { + continue; /* passed in register, so no effect on actual area */ + } + sizeOfArgsToStkPass = RoundUp(ploc.memOffset + ploc.memSize, GetPointerSize()); + } + return sizeOfArgsToStkPass; +} + +void X64MemLayout::SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const { + align = be.GetTypeAlign(typeIdx); + size = static_cast(be.GetTypeSize(typeIdx)); +} + +void X64MemLayout::LayoutVarargParams() { + uint32 nIntRegs = 0; + uint32 nFpRegs = 0; + X64CallConvImpl parmlocator(be); + CCLocInfo ploc; + MIRFunction *func = mirFunction; + if (be.GetMIRModule().IsCModule() && func->GetAttr(FUNCATTR_varargs)) { + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + if (i == 0) { + if (be.HasFuncReturnType(*func)) { + TyIdx tidx = be.GetFuncReturnType(*func); + if (be.GetTypeSize(tidx.GetIdx()) <= k16ByteSize) { + continue; + } + } + } + MIRType *ty = func->GetNthParamType(i); + parmlocator.LocateNextParm(*ty, ploc, i == 0, func); + if (ploc.reg0 != kRinvalid) { + /* The range here is R0 to R15. However, not all registers in the range are parameter registers. + * If necessary later, you can add parameter register checks. */ + if (ploc.reg0 >= R0 && ploc.reg0 <= R15) { + nIntRegs++; + } else if (ploc.reg0 >= V0 && ploc.reg0 <= V7) { + nFpRegs++; + } + } + if (ploc.reg1 != kRinvalid) { + if (ploc.reg1 >= R0 && ploc.reg1 <= R15) { + nIntRegs++; + } else if (ploc.reg1 >= V0 && ploc.reg1 <= V7) { + nFpRegs++; + } + } + if (ploc.reg2 != kRinvalid) { + if (ploc.reg2 >= R0 && ploc.reg2 <= R15) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + if (ploc.reg3 != kRinvalid) { + if (ploc.reg3 >= R0 && ploc.reg3 <= R15) { + nIntRegs++; + } else if (ploc.reg2 >= V0 && ploc.reg2 <= V7) { + nFpRegs++; + } + } + } + + SetSizeOfGRSaveArea((k6BitSize - nIntRegs) * GetPointerSize()); + SetSizeOfVRSaveArea((k6BitSize - nFpRegs) * GetPointerSize() * k2ByteSize); + } +} + +void X64MemLayout::LayoutFormalParams() { + X64CallConvImpl parmLocator(be); + CCLocInfo ploc; + for (size_t i = 0; i < mirFunction->GetFormalCount(); ++i) { + MIRSymbol *sym = mirFunction->GetFormal(i); + uint32 stIndex = sym->GetStIndex(); + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + if (i == 0) { + // The function name here is not appropriate, it should be to determine + // whether the function returns a structure less than 16 bytes. At this + // time, the first parameter is a structure occupant, which has no + // practical significance. + if (be.HasFuncReturnType(*mirFunction)) { + symLoc->SetMemSegment(GetSegArgsRegPassed()); + symLoc->SetOffset(GetSegArgsRegPassed().GetSize()); + continue; + } + } + + MIRType *ty = mirFunction->GetNthParamType(i); + uint32 ptyIdx = ty->GetTypeIndex(); + parmLocator.LocateNextParm(*ty, ploc, i == 0, mirFunction); + uint32 size = 0; + uint32 align = 0; + if (ploc.reg0 != kRinvalid) { + if (!sym->IsPreg()) { + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsRegPassed()); + if (ty->GetPrimType() == PTY_agg && be.GetTypeSize(ptyIdx) > k4ByteSize) { + /* struct param aligned on 8 byte boundary unless it is small enough */ + align = GetPointerSize(); + } + segArgsRegPassed.SetSize(static_cast(RoundUp(segArgsRegPassed.GetSize(), align))); + symLoc->SetOffset(segArgsRegPassed.GetSize()); + segArgsRegPassed.SetSize(segArgsRegPassed.GetSize() + size); + } + } else { + SetSizeAlignForTypeIdx(ptyIdx, size, align); + symLoc->SetMemSegment(GetSegArgsStkPassed()); + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), align))); + symLoc->SetOffset(segArgsStkPassed.GetSize()); + segArgsStkPassed.SetSize(segArgsStkPassed.GetSize() + size); + segArgsStkPassed.SetSize(static_cast(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize()))); + } + } +} + +void X64MemLayout::LayoutLocalVariables() { + uint32 symTabSize = mirFunction->GetSymTab()->GetSymbolTableSize(); + for (uint32 i = 0; i < symTabSize; ++i) { + MIRSymbol *sym = mirFunction->GetSymTab()->GetSymbolFromStIdx(i); + if (sym == nullptr || sym->GetStorageClass() != kScAuto || sym->IsDeleted()) { + continue; + } + uint32 stIndex = sym->GetStIndex(); + TyIdx tyIdx = sym->GetTyIdx(); + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + SetSymAllocInfo(stIndex, *symLoc); + CHECK_FATAL(!symLoc->IsRegister(), "expect not register"); + + symLoc->SetMemSegment(segLocals); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + uint32 align = be.GetTypeAlign(tyIdx); + if (ty->GetPrimType() == PTY_agg && align < k8BitSize) { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), k8BitSize))); + } else { + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), align))); + } + symLoc->SetOffset(segLocals.GetSize()); + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(tyIdx)); + } +} + +void X64MemLayout::AssignSpillLocationsToPseudoRegisters() { + MIRPregTable *pregTab = cgFunc->GetFunction().GetPregTab(); + + /* BUG: n_regs include index 0 which is not a valid preg index. */ + size_t nRegs = pregTab->Size(); + spillLocTable.resize(nRegs); + for (size_t i = 1; i < nRegs; ++i) { + PrimType pType = pregTab->PregFromPregIdx(i)->GetPrimType(); + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + symLoc->SetMemSegment(segLocals); + segLocals.SetSize(RoundUp(segLocals.GetSize(), GetPrimTypeSize(pType))); + symLoc->SetOffset(segLocals.GetSize()); + MIRType *mirTy = GlobalTables::GetTypeTable().GetTypeTable()[pType]; + segLocals.SetSize(segLocals.GetSize() + be.GetTypeSize(mirTy->GetTypeIndex())); + spillLocTable[i] = symLoc; + } +} + +SymbolAlloc *X64MemLayout::AssignLocationToSpillReg(regno_t vrNum) { + X64SymbolAlloc *symLoc = memAllocator->GetMemPool()->New(); + symLoc->SetMemSegment(segSpillReg); + uint32 regSize = GetPointerSize(); + segSpillReg.SetSize(RoundUp(segSpillReg.GetSize(), regSize)); + symLoc->SetOffset(segSpillReg.GetSize()); + segSpillReg.SetSize(segSpillReg.GetSize() + regSize); + SetSpillRegLocInfo(vrNum, *symLoc); + return symLoc; +} + +void X64MemLayout::LayoutReturnRef(int32 &structCopySize, int32 &maxParmStackSize) { + segArgsToStkPass.SetSize(FindLargestActualArea(structCopySize)); + maxParmStackSize = static_cast(segArgsToStkPass.GetSize()); + if (Globals::GetInstance()->GetOptimLevel() == CGOptions::kLevel0) { + AssignSpillLocationsToPseudoRegisters(); + } + segLocals.SetSize(static_cast(RoundUp(segLocals.GetSize(), GetPointerSize()))); +} + +void X64MemLayout::LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) { + LayoutVarargParams(); + LayoutFormalParams(); + + // Need to be aligned ? + segArgsRegPassed.SetSize(RoundUp(segArgsRegPassed.GetSize(), GetPointerSize())); + segArgsStkPassed.SetSize(RoundUp(segArgsStkPassed.GetSize(), GetPointerSize() + GetPointerSize())); + + /* allocate the local variables in the stack */ + LayoutLocalVariables(); + LayoutReturnRef(structCopySize, maxParmStackSize); + + // Need to adapt to the cc interface. + structCopySize = 0; + // Scenes with more than 6 parameters are not yet enabled. + maxParmStackSize = 0; + + cgFunc->SetUseFP(cgFunc->UseFP() || static_cast(StackFrameSize()) > kMaxPimm32); +} + +uint64 X64MemLayout::StackFrameSize() const { + uint64 total = Locals().GetSize() + segArgsRegPassed.GetSize() + segArgsToStkPass.GetSize() + + segGrSaveArea.GetSize() + segVrSaveArea.GetSize() + segSpillReg.GetSize(); + return RoundUp(total, stackPtrAlignment); +} + +int32 X64MemLayout::GetGRSaveAreaBaseLoc() { + int32 total = static_cast(RoundUp(GetSizeOfGRSaveArea(), stackPtrAlignment)); + return total; +} + +int32 X64MemLayout::GetVRSaveAreaBaseLoc() { + int32 total = static_cast(RoundUp(GetSizeOfGRSaveArea(), stackPtrAlignment) + + RoundUp(GetSizeOfVRSaveArea(), stackPtrAlignment)); + return total; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_optimize_common.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_optimize_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..125369f0bbee242043f82c910f6cdffb7023ac10 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_optimize_common.cpp @@ -0,0 +1,182 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_optimize_common.h" +#include "x64_cgfunc.h" +#include "cgbb.h" +#include "cg.h" + +namespace maplebe { +void X64InsnVisitor::ModifyJumpTarget(Operand &targetOperand, BB &bb) { + Insn *jmpInsn = bb.GetLastInsn(); + if (bb.GetKind() == BB::kBBIgoto) { + CHECK_FATAL(targetOperand.IsLabel(), "NIY"); + CHECK_FATAL(false, "NIY"); + } + jmpInsn->SetOperand(x64::GetJumpTargetIdx(*jmpInsn), targetOperand); +} + +void X64InsnVisitor::ModifyJumpTarget(LabelIdx targetLabel, BB &bb) { + std::string lableName = ".L." + std::to_string(GetCGFunc()->GetUniqueID()) + + "__" + std::to_string(targetLabel); + ModifyJumpTarget(GetCGFunc()->GetOpndBuilder()->CreateLabel(lableName.c_str(), targetLabel), bb); +} + +void X64InsnVisitor::ModifyJumpTarget(BB &newTarget, BB &bb) { + ModifyJumpTarget(newTarget.GetLastInsn()->GetOperand( + x64::GetJumpTargetIdx(*newTarget.GetLastInsn())), bb); +} + +Insn *X64InsnVisitor::CloneInsn(Insn &originalInsn) { + MemPool *memPool = const_cast(CG::GetCurCGFunc()->GetMemoryPool()); + if (originalInsn.IsTargetInsn()) { + if (!originalInsn.IsVectorOp()) { + return memPool->Clone(originalInsn); + } else { + auto *insn = memPool->Clone(*static_cast(&originalInsn)); + insn->SetRegSpecList(static_cast(originalInsn).GetRegSpecList()); + return insn; + } + } else if (originalInsn.IsCfiInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } else if (originalInsn.IsDbgInsn()) { + return memPool->Clone(*static_cast(&originalInsn)); + } + if (originalInsn.IsComment()) { + return memPool->Clone(originalInsn); + } + CHECK_FATAL(false, "Cannot clone"); + return nullptr; +} + +/* + * Precondition: The given insn is a jump instruction. + * Get the jump target label operand index from the given instruction. + * Note: MOP_jmp_m, MOP_jmp_r is a jump instruction, but the target is unknown at compile time. + */ +LabelIdx X64InsnVisitor::GetJumpLabel(const Insn &insn) const { + uint32 operandIdx = x64::GetJumpTargetIdx(insn); + if (insn.GetOperand(operandIdx).IsLabelOpnd()) { + return static_cast(insn.GetOperand(operandIdx)).GetLabelIndex(); + } + ASSERT(false, "Operand is not label"); + return 0; +} + +bool X64InsnVisitor::IsCompareInsn(const Insn &insn) const { + switch (insn.GetMachineOpcode()) { + case x64::MOP_cmpb_r_r: + case x64::MOP_cmpb_m_r: + case x64::MOP_cmpb_i_r: + case x64::MOP_cmpb_r_m: + case x64::MOP_cmpb_i_m: + case x64::MOP_cmpw_r_r: + case x64::MOP_cmpw_m_r: + case x64::MOP_cmpw_i_r: + case x64::MOP_cmpw_r_m: + case x64::MOP_cmpw_i_m: + case x64::MOP_cmpl_r_r: + case x64::MOP_cmpl_m_r: + case x64::MOP_cmpl_i_r: + case x64::MOP_cmpl_r_m: + case x64::MOP_cmpl_i_m: + case x64::MOP_cmpq_r_r: + case x64::MOP_cmpq_m_r: + case x64::MOP_cmpq_i_r: + case x64::MOP_cmpq_r_m: + case x64::MOP_cmpq_i_m: + case x64::MOP_testq_r_r: + return true; + default: + return false; + } +} + +bool X64InsnVisitor::IsCompareAndBranchInsn(const Insn &insn) const { + return false; +} + +bool X64InsnVisitor::IsAddOrSubInsn(const Insn &insn) const { + switch (insn.GetMachineOpcode()) { + case x64::MOP_addb_r_r: + case x64::MOP_addw_r_r: + case x64::MOP_addl_r_r: + case x64::MOP_addq_r_r: + case x64::MOP_addb_m_r: + case x64::MOP_addw_m_r: + case x64::MOP_addl_m_r: + case x64::MOP_addq_m_r: + case x64::MOP_addb_i_r: + case x64::MOP_addw_i_r: + case x64::MOP_addl_i_r: + case x64::MOP_addq_i_r: + case x64::MOP_addb_r_m: + case x64::MOP_addw_r_m: + case x64::MOP_addl_r_m: + case x64::MOP_addq_r_m: + case x64::MOP_addb_i_m: + case x64::MOP_addw_i_m: + case x64::MOP_addl_i_m: + case x64::MOP_addq_i_m: + case x64::MOP_subb_r_r: + case x64::MOP_subw_r_r: + case x64::MOP_subl_r_r: + case x64::MOP_subq_r_r: + case x64::MOP_subb_m_r: + case x64::MOP_subw_m_r: + case x64::MOP_subl_m_r: + case x64::MOP_subq_m_r: + case x64::MOP_subb_i_r: + case x64::MOP_subw_i_r: + case x64::MOP_subl_i_r: + case x64::MOP_subq_i_r: + case x64::MOP_subb_r_m: + case x64::MOP_subw_r_m: + case x64::MOP_subl_r_m: + case x64::MOP_subq_r_m: + case x64::MOP_subb_i_m: + case x64::MOP_subw_i_m: + case x64::MOP_subl_i_m: + case x64::MOP_subq_i_m: + return true; + default: + return false; + } +} + +RegOperand *X64InsnVisitor::CreateVregFromReg(const RegOperand &pReg) { + return &GetCGFunc()->GetOpndBuilder()->CreateVReg(pReg.GetRegisterNumber(), + pReg.GetSize(), pReg.GetRegisterType()); +} + +void X64InsnVisitor::ReTargetSuccBB(BB &bb, LabelIdx newTarget) const { + ASSERT(false, "not implement in X86_64"); + (void)bb; + (void)newTarget; + return; +} +void X64InsnVisitor::FlipIfBB(BB &bb, LabelIdx ftLabel) const { + ASSERT(false, "not implement in X86_64"); + (void)bb; + (void)ftLabel; + return; +} +BB *X64InsnVisitor::CreateGotoBBAfterCondBB(BB &bb, BB &fallthru, bool isTargetFallthru) const { + ASSERT(false, "not implement in X86_64"); + (void)bb; + (void)fallthru; + (void)isTargetFallthru; + return nullptr; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_peep.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_peep.cpp new file mode 100644 index 0000000000000000000000000000000000000000..08356b89bdc1932b0c49dc1176026053011468f5 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_peep.cpp @@ -0,0 +1,75 @@ + /* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_peep.h" +#include "cg.h" +#include "mpl_logging.h" +#include "common_utils.h" +#include "cg_option.h" +#include "x64_cg.h" + +namespace maplebe { +void X64CGPeepHole::Run() { + FOR_ALL_BB(bb, cgFunc) { + FOR_BB_INSNS_SAFE(insn, bb, nextInsn) { + if (!insn->IsMachineInstruction()) { + continue; + } + if (ssaInfo == nullptr) { + DoNormalOptimize(*bb, *insn); + } + } + } +} + +bool X64CGPeepHole::DoSSAOptimize(BB &bb, Insn &insn) { + CHECK_FATAL(false, "x64 does not support ssa optimize"); + return false; +} + +bool RemoveMovingtoSameRegPattern::CheckCondition(Insn &insn) { + ASSERT(insn.GetOperand(kInsnFirstOpnd).IsRegister(), "expects registers"); + ASSERT(insn.GetOperand(kInsnSecondOpnd).IsRegister(), "expects registers"); + auto ®1 = static_cast(insn.GetOperand(kInsnFirstOpnd)); + auto ®2 = static_cast(insn.GetOperand(kInsnSecondOpnd)); + /* remove mov x0,x0 when it cast i32 to i64 */ + if ((reg1.GetRegisterNumber() == reg2.GetRegisterNumber()) && (reg1.GetSize() >= reg2.GetSize())) { + return true; + } + return false; +} + +void RemoveMovingtoSameRegPattern::Run(BB &bb, Insn &insn) { + /* remove mov x0,x0 when it cast i32 to i64 */ + if (CheckCondition(insn)) { + bb.RemoveInsn(insn); + } +} + +void X64CGPeepHole::DoNormalOptimize(BB &bb, Insn &insn) { + MOperator thisMop = insn.GetMachineOpcode(); + manager = peepMemPool->New(*cgFunc, bb, insn); + switch (thisMop) { + case MOP_movb_r_r: + case MOP_movw_r_r: + case MOP_movl_r_r: + case MOP_movq_r_r: { + manager->NormalPatternOpt(true); + break; + } + default: + break; + } + } +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_proepilog.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_proepilog.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d541dcf3978d95a6a6764d7aaf45ebfa7814ad1c --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_proepilog.cpp @@ -0,0 +1,193 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_proepilog.h" +#include "x64_memlayout.h" +#include "x64_isa.h" +#include "isel.h" +#include "x64_cg.h" + +namespace maplebe { +using namespace maple; + +bool X64GenProEpilog::NeedProEpilog() { + return true; +} +void X64GenProEpilog::GenerateCalleeSavedRegs(bool isPush) { + X64CGFunc &x64cgFunc = static_cast(cgFunc); + const auto &calleeSavedRegs = x64cgFunc.GetCalleeSavedRegs(); + if (calleeSavedRegs.empty()) { + return; + } + /* CalleeSave(0) = -(FrameSize + CalleeReg - ArgsStk) */ + X64MemLayout *memLayout = static_cast(cgFunc.GetMemlayout()); + int64 offset = -(memLayout->StackFrameSize() + static_cast(cgFunc).SizeOfCalleeSaved() - + memLayout->SizeOfArgsToStackPass()); + RegOperand &baseReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + for (const auto ® : calleeSavedRegs) { + RegType regType = IsGPRegister(reg) ? kRegTyInt : kRegTyFloat; + uint32 regByteSize = IsGPRegister(reg) ? kIntregBytelen : kFpregBytelen; + uint32 regSize = regByteSize * kBitsPerByte; + ASSERT((regSize == k32BitSize || regSize == k64BitSize), "only supported 32/64-bits"); + RegOperand &calleeReg = cgFunc.GetOpndBuilder()->CreatePReg(reg, regSize, regType); + MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(baseReg, offset, regSize); + if (isPush) { + GeneratePushCalleeSavedRegs(calleeReg, memOpnd, regSize); + } else { + GeneratePopCalleeSavedRegs(calleeReg, memOpnd, regSize); + } + offset += regByteSize; + } +} + +void X64GenProEpilog::GeneratePushCalleeSavedRegs(RegOperand ®Opnd, MemOperand &memOpnd, uint32 regSize) { + MOperator mMovrmOp = (regSize == k32BitSize) ? x64::MOP_movl_r_m : x64::MOP_movq_r_m; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd); + cgFunc.GetCurBB()->AppendInsn(copyInsn); +} + +void X64GenProEpilog::GeneratePopCalleeSavedRegs(RegOperand ®Opnd, MemOperand &memOpnd, uint32 regSize) { + MOperator mMovrmOp = (regSize == k32BitSize) ? x64::MOP_movl_m_r : x64::MOP_movq_m_r; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + copyInsn.AddOpndChain(memOpnd).AddOpndChain(regOpnd); + cgFunc.GetCurBB()->AppendInsn(copyInsn); +} + +void X64GenProEpilog::GeneratePushUnnamedVarargRegs() { + if (cgFunc.GetMirModule().IsCModule() && cgFunc.GetFunction().GetAttr(FUNCATTR_varargs)) { + X64MemLayout *memlayout = static_cast(cgFunc.GetMemlayout()); + uint8 size = GetPointerSize(); + uint32 dataSizeBits = size * kBitsPerByte; + int64 offset = -memlayout->GetGRSaveAreaBaseLoc(); + if (memlayout->GetSizeOfGRSaveArea() % kX64StackPtrAlignment) { + offset += size; /* End of area should be aligned. Hole between VR and GR area */ + } + uint32 start_regno = k6BitSize - (memlayout->GetSizeOfGRSaveArea() / size); + ASSERT(start_regno <= k6BitSize, "Incorrect starting GR regno for GR Save Area"); + + /* Parameter registers in x86: %rdi, %rsi, %rdx, %rcx, %r8, %r9 */ + std::vector paramRegs = {RDI, RSI, RDX, RCX, R8, R9}; + for (uint32 i = start_regno; i < paramRegs.size(); i++) { + MOperator mMovrmOp = x64::MOP_movq_r_m; + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(opndFpReg, offset, dataSizeBits); + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + RegOperand ®Opnd = cgFunc.GetOpndBuilder()->CreatePReg(paramRegs[i], k64BitSize, kRegTyInt); + copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd); + cgFunc.GetCurBB()->AppendInsn(copyInsn); + offset += size; + } + + if (!CGOptions::UseGeneralRegOnly()) { + offset = -memlayout->GetVRSaveAreaBaseLoc(); + start_regno = k6BitSize - (memlayout->GetSizeOfVRSaveArea() / (size * k2BitSize)); + ASSERT(start_regno <= k6BitSize, "Incorrect starting GR regno for VR Save Area"); + for (uint32 i = start_regno + static_cast(V0); i < static_cast(V6); i++) { + MOperator mMovrmOp = x64::MOP_movq_r_m; + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + MemOperand &memOpnd = cgFunc.GetOpndBuilder()->CreateMem(opndFpReg, offset, dataSizeBits); + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrmOp, X64CG::kMd[mMovrmOp]); + RegOperand ®Opnd = cgFunc.GetOpndBuilder()->CreatePReg(static_cast(i), k64BitSize, kRegTyInt); + copyInsn.AddOpndChain(regOpnd).AddOpndChain(memOpnd); + + cgFunc.GetCurBB()->AppendInsn(copyInsn); + offset += (size * k2BitSize); + } + } + } +} + +void X64GenProEpilog::GenerateProlog(BB &bb) { + auto &x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = cgFunc.GetCurBB(); + x64CGFunc.GetDummyBB()->ClearInsns(); + x64CGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*x64CGFunc.GetDummyBB()); + + /* push %rbp */ + MOperator mPushrOp = x64::MOP_pushq_r; + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mPushrOp, X64CG::kMd[mPushrOp]); + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + pushInsn.AddOpndChain(opndFpReg); + cgFunc.GetCurBB()->AppendInsn(pushInsn); + + /* mov %rsp, %rbp */ + MOperator mMovrrOp = x64::MOP_movq_r_r; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(mMovrrOp, X64CG::kMd[mMovrrOp]); + RegOperand &opndSpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RSP, k64BitSize, kRegTyInt); + copyInsn.AddOpndChain(opndSpReg).AddOpndChain(opndFpReg); + cgFunc.GetCurBB()->AppendInsn(copyInsn); + + /* sub $framesize, %rsp */ + if (cgFunc.GetFunction().HasCall() || cgFunc.HasVLAOrAlloca()) { + MOperator mSubirOp = x64::MOP_subq_i_r; + Insn &subInsn = cgFunc.GetInsnBuilder()->BuildInsn(mSubirOp, X64CG::kMd[mSubirOp]); + auto *memLayout = static_cast(cgFunc.GetMemlayout()); + int64 trueFrameSize = memLayout->StackFrameSize() + + static_cast(cgFunc).SizeOfCalleeSaved(); + ImmOperand &opndImm = cgFunc.GetOpndBuilder()->CreateImm(k32BitSize, trueFrameSize); + subInsn.AddOpndChain(opndImm).AddOpndChain(opndSpReg); + cgFunc.GetCurBB()->AppendInsn(subInsn); + } + + GenerateCalleeSavedRegs(true); + GeneratePushUnnamedVarargRegs(); + + bb.InsertAtBeginning(*x64CGFunc.GetDummyBB()); + x64CGFunc.GetDummyBB()->SetIsProEpilog(false); + cgFunc.SetCurBB(*formerCurBB); +} + +void X64GenProEpilog::GenerateEpilog(BB &bb) { + auto &x64CGFunc = static_cast(cgFunc); + BB *formerCurBB = cgFunc.GetCurBB(); + x64CGFunc.GetDummyBB()->ClearInsns(); + x64CGFunc.GetDummyBB()->SetIsProEpilog(true); + cgFunc.SetCurBB(*x64CGFunc.GetDummyBB()); + + GenerateCalleeSavedRegs(false); + + if (cgFunc.GetFunction().HasCall() || cgFunc.HasVLAOrAlloca()) { + /* + * leave equal with + * mov rsp rbp + * pop rbp + */ + MOperator mLeaveOp = x64::MOP_leaveq; + Insn &popInsn = cgFunc.GetInsnBuilder()->BuildInsn(mLeaveOp, X64CG::kMd[mLeaveOp]); + cgFunc.GetCurBB()->AppendInsn(popInsn); + } else { + /* pop %rbp */ + MOperator mPopOp = x64::MOP_popq_r; + Insn &pushInsn = cgFunc.GetInsnBuilder()->BuildInsn(mPopOp, X64CG::kMd[mPopOp]); + RegOperand &opndFpReg = cgFunc.GetOpndBuilder()->CreatePReg(x64::RBP, k64BitSize, kRegTyInt); + pushInsn.AddOpndChain(opndFpReg); + cgFunc.GetCurBB()->AppendInsn(pushInsn); + } + /* ret */ + MOperator mRetOp = x64::MOP_retq; + Insn &retInsn = cgFunc.GetInsnBuilder()->BuildInsn(mRetOp, X64CG::kMd[mRetOp]); + cgFunc.GetCurBB()->AppendInsn(retInsn); + + bb.AppendBBInsns(*x64CGFunc.GetDummyBB()); + x64CGFunc.GetDummyBB()->SetIsProEpilog(false); + cgFunc.SetCurBB(*formerCurBB); +} + +void X64GenProEpilog::Run() { + GenerateProlog(*(cgFunc.GetFirstBB())); + GenerateEpilog(*(cgFunc.GetLastBB())); +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_reaching.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_reaching.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ba9d9dbb9793f1357d10517a76db6daab5749cf6 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_reaching.cpp @@ -0,0 +1,238 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_reaching.h" +#include "x64_cg.h" +#include "insn.h" +#include "isa.h" +namespace maplebe { +/* find insn using register between startInsn and endInsn +* startInsn and endInsn must be in the same BB. +*/ +bool X64ReachingDefinition::FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, + Insn *endInsn, InsnSet ®UseInsnSet) const { + ASSERT(startInsn->GetBB() == endInsn->GetBB(), "two insns must be in a same BB"); + bool findFinish = false; + if (startInsn == nullptr || endInsn == nullptr) { + return findFinish; + } + for (Insn *insn = startInsn; insn != nullptr && insn != endInsn->GetNext(); insn = insn->GetNext()) { + if (!insn->IsMachineInstruction()) { + continue; + } + /* if insn is call and regNO is caller-saved register, then regNO will not be used later */ + if (insn->IsCall() && IsRegKilledByCallInsn(*insn, regNO)) { + findFinish = true; + } + + if (IsDiv(*insn) && regNO == x64::RAX) { + /* div insn use rax implicitly */ + findFinish = true; + } + + const InsnDesc *md = insn->GetDesc(); + uint32 opndNum = insn->GetOperandSize(); + for (uint32 i = 0; i < opndNum; ++i) { + Operand &opnd = insn->GetOperand(i); + /* handle def or def use */ + auto *regProp = md->opndMD[i]; + if (regProp->IsDef() && opnd.IsRegister() && + (static_cast(opnd).GetRegisterNumber() == regNO)) { + findFinish = true; + } + + if (opnd.IsList()) { + auto &listOpnd = static_cast(opnd); + for (auto listElem : listOpnd.GetOperands()) { + RegOperand *regOpnd = static_cast(listElem); + ASSERT(regOpnd != nullptr, "parameter operand must be RegOperand"); + if (regNO == regOpnd->GetRegisterNumber()) { + (void)regUseInsnSet.insert(insn); + } + } + continue; + } + if (!regProp->IsUse() && !opnd.IsMemoryAccessOperand()) { + continue; + } + + /* handle use */ + if (opnd.IsMemoryAccessOperand()) { + auto &memOpnd = static_cast(opnd); + RegOperand *base = memOpnd.GetBaseRegister(); + RegOperand *index = memOpnd.GetIndexRegister(); + if ((base != nullptr && base->GetRegisterNumber() == regNO) || + (index != nullptr && index->GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsConditionCode()) { + Operand &rflagOpnd = cgFunc->GetOrCreateRflag(); + RegOperand &rflagReg = static_cast(rflagOpnd); + if (rflagReg.GetRegisterNumber() == regNO) { + (void)regUseInsnSet.insert(insn); + } + } else if (opnd.IsRegister() && (static_cast(opnd).GetRegisterNumber() == regNO)) { + (void)regUseInsnSet.insert(insn); + } + } + if (findFinish) { + break; + } + } + return findFinish; +} + +std::vector X64ReachingDefinition::FindRegDefBetweenInsnGlobal(uint32 regNO, + Insn *startInsn, Insn *endInsn) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +std::vector X64ReachingDefinition::FindMemDefBetweenInsn(uint32 offset, + const Insn *startInsn, Insn *endInsn) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +bool X64ReachingDefinition::FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, BB* movBB) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +bool X64ReachingDefinition::FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, + InsnSet &useInsnSet) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +bool X64ReachingDefinition::HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +bool X64ReachingDefinition::DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO, + std::vector &visitedBB) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +InsnSet X64ReachingDefinition::FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +InsnSet X64ReachingDefinition::FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +InsnSet X64ReachingDefinition::FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return {}; +} + +bool X64ReachingDefinition::FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +void X64ReachingDefinition::InitStartGen() { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::InitEhDefine(BB &bb) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::InitGenUse(BB &bb, bool firstTime) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::GenAllCallerSavedRegs(BB &bb, Insn &insn) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +bool X64ReachingDefinition::KilledByCallBetweenInsnInSameBB(const Insn &startInsn, + const Insn &endInsn, regno_t regNO) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +void X64ReachingDefinition::AddRetPseudoInsn(BB &bb) { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +bool X64ReachingDefinition::IsCallerSavedReg(uint32 regNO) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return false; +} + +void X64ReachingDefinition::FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, std::vector &visitedBB, + InsnSet &defInsnSet) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +void X64ReachingDefinition::DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, std::vector &visitedBB, + InsnSet &defInsnSet) const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +} + +int32 X64ReachingDefinition::GetStackSize() const { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return 0; +}; + +void X64ReachingDefinition::AddRetPseudoInsns() { + CHECK_FATAL(false, "x64_reaching analysis not implemented yet!"); + return; +}; + +/* reg killed killed by call insn */ +bool X64ReachingDefinition::IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const { + return x64::IsCallerSaveReg((X64reg)regNO); +} + +bool X64ReachingDefinition::IsDiv(const Insn &insn) const { + MOperator mOp = insn.GetMachineOpcode(); + return (MOP_idivw_r <= mOp && mOp <= MOP_divq_m); +} + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_reg_info.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_reg_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..63a3460430f5067208df4fd0734a243fc5348901 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_reg_info.cpp @@ -0,0 +1,147 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "x64_reg_info.h" +#include "x64_cgfunc.h" +#include "x64_cg.h" + +namespace maplebe { +using namespace maple; +using namespace x64; +void X64RegInfo::Init() { + for (regno_t regNO = kRinvalid; regNO < kMaxRegNum; ++regNO) { + /* when yieldpoint is enabled, the RYP(R12) can not be used. */ + if (IsYieldPointReg(static_cast(regNO))) { + continue; + } + if (!x64::IsAvailableReg(static_cast(regNO))) { + continue; + } + if (x64::IsGPRegister(static_cast(regNO))) { + AddToIntRegs(regNO); + } else { + AddToFpRegs(regNO); + } + AddToAllRegs(regNO); + } + return; +} + +void X64RegInfo::SaveCalleeSavedReg(MapleSet savedRegs) { + X64CGFunc *x64CGFunc = static_cast(GetCurrFunction()); + for (auto reg: savedRegs) { + x64CGFunc->AddtoCalleeSaved(static_cast(reg)); + } +} + +bool X64RegInfo::IsSpecialReg(regno_t regno) const { + X64reg reg = static_cast(regno); + if ((reg == RBP) || (reg == RSP)) { + return true; + } + + /* when yieldpoint is enabled, the dedicated register(RYP) can not be allocated. */ + if (IsYieldPointReg(reg)) { + return true; + } + return false; +} + +bool X64RegInfo::IsCalleeSavedReg(regno_t regno) const { + return x64::IsCalleeSavedReg(static_cast(regno)); +} + +bool X64RegInfo::IsYieldPointReg(regno_t regno) const { + return false; +} + +bool X64RegInfo::IsUnconcernedReg(regno_t regNO) const { + /* when yieldpoint is enabled, the RYP(R12) can not be used. */ + if (IsYieldPointReg(static_cast(regNO))) { + return true; + } + return false; +} + +bool X64RegInfo::IsUnconcernedReg(const RegOperand ®Opnd) const { + RegType regType = regOpnd.GetRegisterType(); + if (regType == kRegTyCc || regType == kRegTyVary) { + return true; + } + uint32 regNO = regOpnd.GetRegisterNumber(); + return IsUnconcernedReg(regNO); +} + +void X64RegInfo::Fini() { +} + +RegOperand *X64RegInfo::GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag) { + return &(GetCurrFunction()->GetOpndBuilder()->CreatePReg(regNO, size, kind)); +} + +Insn *X64RegInfo::BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) { + X64MOP_t mOp = x64::MOP_begin; + switch (regSize) { + case k8BitSize: + mOp = x64::MOP_movb_r_m; + break; + case k16BitSize: + mOp = x64::MOP_movw_r_m; + break; + case k32BitSize: + mOp = x64::MOP_movl_r_m; + break; + case k64BitSize: + mOp = x64::MOP_movq_r_m; + break; + default: + CHECK_FATAL(false, "NIY"); + break; + } + Insn &insn = GetCurrFunction()->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(phyOpnd).AddOpndChain(memOpnd); + return &insn; +} + +Insn *X64RegInfo::BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) { + X64MOP_t mOp = x64::MOP_begin; + switch (regSize) { + case k8BitSize: + mOp = x64::MOP_movb_m_r; + break; + case k16BitSize: + mOp = x64::MOP_movw_m_r; + break; + case k32BitSize: + mOp = x64::MOP_movl_m_r; + break; + case k64BitSize: + mOp = x64::MOP_movq_m_r; + break; + default: + CHECK_FATAL(false, "NIY"); + break; + } + Insn &insn = GetCurrFunction()->GetInsnBuilder()->BuildInsn(mOp, X64CG::kMd[mOp]); + insn.AddOpndChain(memOpnd).AddOpndChain(phyOpnd); + return &insn; +} + +MemOperand *X64RegInfo::AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, + bool isDest, Insn &insn, regno_t regNum, bool &isOutOfRange) { + isOutOfRange = false; + return memOpnd; +} + +} /* namespace maplebe */ diff --git a/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a506e0e28fdff46c7bcdaef3fee4bcd6f93fe861 --- /dev/null +++ b/src/mapleall/maple_be/src/cg/x86_64/x64_standardize.cpp @@ -0,0 +1,186 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "x64_standardize.h" +#include "x64_isa.h" +#include "x64_cg.h" +#include "insn.h" + +namespace maplebe { +#define DEFINE_MAPPING(ABSTRACT_IR, X64_MOP, ...) {ABSTRACT_IR, X64_MOP}, +std::unordered_map x64AbstractMapping = { +#include "x64_abstract_mapping.def" +}; + +static inline X64MOP_t GetMopFromAbstraceIRMop(MOperator mOp) { + auto iter = x64AbstractMapping.find(mOp); + if (iter == x64AbstractMapping.end()) { + CHECK_FATAL(false, "NIY mapping"); + } + CHECK_FATAL(iter->second != x64::MOP_begin, "NIY mapping"); + return iter->second; +} + +void X64Standardize::StdzMov(maplebe::Insn &insn) { + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + insn.CommuteOperands(kInsnFirstOpnd, kInsnSecondOpnd); +} + +void X64Standardize::StdzStrLdr(Insn &insn) { + /* abstract ir store is in same order with x86, so reverse twice */ + if (insn.IsStore()) { + insn.CommuteOperands(kInsnFirstOpnd, kInsnSecondOpnd); + } + StdzMov(insn); +} + +void X64Standardize::StdzBasicOp(Insn &insn) { + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + Operand &src2 = insn.GetOperand(kInsnThirdOpnd); + insn.CleanAllOperand(); + insn.AddOpndChain(src2).AddOpndChain(dest); +} + +void X64Standardize::StdzUnaryOp(Insn &insn, CGFunc &cgFunc) { + MOperator mOp = insn.GetMachineOpcode(); + if (mOp == abstract::MOP_neg_f_32 || mOp == abstract::MOP_neg_f_64) { + StdzFloatingNeg(insn, cgFunc); + return; + } + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + insn.CleanAllOperand(); + insn.AddOpndChain(dest); +} + +void X64Standardize::StdzCvtOp(Insn &insn, CGFunc &cgFunc) { + uint32 OpndDesSize = insn.GetDesc()->GetOpndDes(kInsnFirstOpnd)->GetSize(); + uint32 destSize = OpndDesSize; + uint32 OpndSrcSize = insn.GetDesc()->GetOpndDes(kInsnSecondOpnd)->GetSize(); + uint32 srcSize = OpndSrcSize; + switch (insn.GetMachineOpcode()) { + case abstract::MOP_zext_rr_64_8: + case abstract::MOP_zext_rr_64_16: + case abstract::MOP_zext_rr_64_32: + destSize = k32BitSize; + break; + case abstract::MOP_cvt_fr_u32: + srcSize = k64BitSize; + break; + case abstract::MOP_cvt_rf_u32: + destSize = k64BitSize; + break; + default: + break; + } + MOperator directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + if (directlyMappingMop != abstract::MOP_undef) { + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + Operand *opnd0 = &insn.GetOperand(kInsnSecondOpnd); + RegOperand *src = static_cast(opnd0); + if (srcSize != OpndSrcSize) { + src = &cgFunc.GetOpndBuilder()->CreateVReg(src->GetRegisterNumber(), + srcSize, src->GetRegisterType()); + } + Operand *opnd1 = &insn.GetOperand(kInsnFirstOpnd); + RegOperand *dest = static_cast(opnd1); + if (destSize != OpndDesSize) { + dest = &cgFunc.GetOpndBuilder()->CreateVReg(dest->GetRegisterNumber(), + destSize, dest->GetRegisterType()); + } + insn.CleanAllOperand(); + insn.AddOpndChain(*src).AddOpndChain(*dest); + } else { + CHECK_FATAL(false, "NIY mapping"); + } +} + +/* x86 does not have floating point neg instruction + * neg_f operand0 operand1 + * ==> + * movd xmm0 R1 + * 64: movabsq 0x8000000000000000 R2 + * xorq R2 R1 + * 32: xorl 0x80000000 R1 + * movd R1 xmm0 +*/ +void X64Standardize::StdzFloatingNeg(Insn &insn, CGFunc &cgFunc) { + MOperator mOp = insn.GetMachineOpcode(); + uint32 bitSize = mOp == abstract::MOP_neg_f_32 ? k32BitSize : k64BitSize; + + // mov dest -> tmpOperand0 + MOperator movOp = mOp == abstract::MOP_neg_f_32 ? x64::MOP_movd_fr_r : x64::MOP_movq_fr_r; + RegOperand *tmpOperand0 = &cgFunc.GetOpndBuilder()->CreateVReg(bitSize, kRegTyInt); + Insn &movInsn0 = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + Operand &dest = insn.GetOperand(kInsnFirstOpnd); + movInsn0.AddOpndChain(dest).AddOpndChain(*tmpOperand0); + insn.GetBB()->InsertInsnBefore(insn, movInsn0); + + // 32 : xorl 0x80000000 tmpOperand0 + // 64 : movabs 0x8000000000000000 tmpOperand1 + // xorq tmpOperand1 tmpOperand0 + ImmOperand &imm = cgFunc.GetOpndBuilder()->CreateImm(bitSize, (static_cast(1) << (bitSize - 1))); + if (mOp == abstract::MOP_neg_f_64) { + Operand *tmpOperand1 = &cgFunc.GetOpndBuilder()->CreateVReg(k64BitSize, kRegTyInt); + Insn &movabs = cgFunc.GetInsnBuilder()->BuildInsn(x64::MOP_movabs_i_r, X64CG::kMd[x64::MOP_movabs_i_r]); + movabs.AddOpndChain(imm).AddOpndChain(*tmpOperand1); + insn.GetBB()->InsertInsnBefore(insn, movabs); + + MOperator xorOp = x64::MOP_xorq_r_r; + Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + xorq.AddOpndChain(*tmpOperand1).AddOpndChain(*tmpOperand0); + insn.GetBB()->InsertInsnBefore(insn, xorq); + } else { + MOperator xorOp = x64::MOP_xorl_i_r; + Insn &xorq = cgFunc.GetInsnBuilder()->BuildInsn(xorOp, X64CG::kMd[xorOp]); + xorq.AddOpndChain(imm).AddOpndChain(*tmpOperand0); + insn.GetBB()->InsertInsnBefore(insn, xorq); + } + + // mov tmpOperand0 -> dest + Insn &movq = cgFunc.GetInsnBuilder()->BuildInsn(movOp, X64CG::kMd[movOp]); + movq.AddOpndChain(*tmpOperand0).AddOpndChain(dest); + insn.GetBB()->InsertInsnBefore(insn, movq); + + insn.GetBB()->RemoveInsn(insn); + return; +} + +void X64Standardize::StdzShiftOp(Insn &insn, CGFunc &cgFunc) { + RegOperand *countOpnd = static_cast(&insn.GetOperand(kInsnThirdOpnd)); + /* count operand cvt -> PTY_u8 */ + if (countOpnd->GetSize() != GetPrimTypeBitSize(PTY_u8)) { + countOpnd = &cgFunc.GetOpndBuilder()->CreateVReg(countOpnd->GetRegisterNumber(), + GetPrimTypeBitSize(PTY_u8), countOpnd->GetRegisterType()); + } + /* copy count operand to cl(rcx) register */ + RegOperand &clOpnd = cgFunc.GetOpndBuilder()->CreatePReg(x64::RCX, GetPrimTypeBitSize(PTY_u8), kRegTyInt); + X64MOP_t copyMop = x64::MOP_movb_r_r; + Insn ©Insn = cgFunc.GetInsnBuilder()->BuildInsn(copyMop, X64CG::kMd[copyMop]); + copyInsn.AddOpndChain(*countOpnd).AddOpndChain(clOpnd); + insn.GetBB()->InsertInsnBefore(insn, copyInsn); + /* shift OP */ + X64MOP_t directlyMappingMop = GetMopFromAbstraceIRMop(insn.GetMachineOpcode()); + insn.SetMOP(X64CG::kMd[directlyMappingMop]); + RegOperand &destOpnd = static_cast(insn.GetOperand(kInsnFirstOpnd)); + insn.CleanAllOperand(); + insn.AddOpndChain(clOpnd).AddOpndChain(destOpnd); +} + +} diff --git a/src/mapleall/maple_be/src/cg/yieldpoint.cpp b/src/mapleall/maple_be/src/cg/yieldpoint.cpp new file mode 100644 index 0000000000000000000000000000000000000000..082e19251ba3f836f8c0fa5e08608d459d75738e --- /dev/null +++ b/src/mapleall/maple_be/src/cg/yieldpoint.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "yieldpoint.h" +#include "loop.h" +#if TARGAARCH64 +#include "aarch64_yieldpoint.h" +#elif TARGRISCV64 +#include "riscv64_yieldpoint.h" +#endif +#if TARGARM32 +#include "arm32_yieldpoint.h" +#endif +#include "cgfunc.h" + +namespace maplebe { +using namespace maple; + +bool CgYieldPointInsertion::PhaseRun(maplebe::CGFunc &f) { + YieldPointInsertion *yieldPoint = nullptr; + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase, CGFunc>(&CgLoopAnalysis::id, f); +#if TARGAARCH64 || TARGRISCV64 + yieldPoint = GetPhaseAllocator()->New(f); +#endif +#if TARGARM32 + yieldPoint = GetPhaseAllocator()->New(f); +#endif + yieldPoint->Run(); + return false; +} +} /* namespace maplebe */ diff --git a/src/mapleall/maple_driver/BUILD.gn b/src/mapleall/maple_driver/BUILD.gn new file mode 100644 index 0000000000000000000000000000000000000000..009891ecc246174732c62bcb8c4f0034fca0bfc1 --- /dev/null +++ b/src/mapleall/maple_driver/BUILD.gn @@ -0,0 +1,101 @@ +# +#Copyright(c)[2020 - 2021] Huawei Technologies Co., Ltd.All rights reserved. +# +#OpenArkCompiler is licensed under Mulan PSL v2. +#You can use this software according to the terms and conditions of the Mulan PSL v2. +#You may obtain a copy of Mulan PSL v2 at: +# +#http: // license.coscl.org.cn/MulanPSL2 +# +#THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON - INFRINGEMENT, MERCHANTABILITY OR +#FIT FOR A PARTICULAR PURPOSE. +#See the Mulan PSL v2 for more details. +# +configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + +include_directories = [ + "${MAPLEALL_ROOT}", + "${MAPLEALL_ROOT}/maple_be/include", + "${MAPLEALL_ROOT}/maple_be/include/cg", + "${MAPLEALL_ROOT}/maple_be/include/be", + "${MAPLEALL_ROOT}/maple_be/include/ad", + "${MAPLEALL_ROOT}/maple_be/include/ad/target", + "${MAPLE_BUILD_OUTPUT}/common/target", + "${MAPLEALL_ROOT}/maple_be/include/cg/aarch64", + "${MAPLEALL_ROOT}/maple_be/include/be/aarch64", + "${MAPLEALL_ROOT}/maple_driver/defs", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/maple_phase/include", + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${THIRD_PARTY_ROOT}/llvm_modified/llvm/include/llvm/BinaryFormat", +] + +executable("maple") { + sources = [ + "src/as_compiler.cpp", + "src/compiler.cpp", + "src/compiler_factory.cpp", + "src/dex2mpl_compiler.cpp", + "src/driver_runner.cpp", + "src/ipa_compiler.cpp", + "src/jbc2mpl_compiler.cpp", + "src/cpp2mpl_compiler.cpp", + "src/clang_compiler.cpp", + "src/ld_compiler.cpp", + "src/maple.cpp", + "src/maple_comb_compiler_wrapper.cpp", + "src/maple_comb_compiler.cpp", + "src/mpl_options.cpp", + "src/mplcg_compiler.cpp", + "src/hided_options.cpp", + ] + + include_dirs = include_directories + + deps = [ + ":libdriver_option", + ":libmaple_driver", + "${MAPLEALL_ROOT}/maple_be:libcg", + "${MAPLEALL_ROOT}/maple_be:libmplad", + "${MAPLEALL_ROOT}/maple_be:libmplbe", + "${MAPLEALL_ROOT}/maple_ipa:libmplipa", + "${MAPLEALL_ROOT}/maple_ir:libmplir", + "${MAPLEALL_ROOT}/maple_me:libmplme", + "${MAPLEALL_ROOT}/maple_me:libmplmewpo", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/maple_util:libcommandline", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libHWSecureC", + ] +} + +static_library("libmaple_driver") { + sources = [ "src/triple.cpp" ] + + include_dirs = [ "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_util/include" + ] + + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" +} + +static_library("libdriver_option") { + sources = [ "src/driver_options.cpp" ] + + include_dirs = [ "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/maple_util/include" + ] + + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" +} diff --git a/src/mapleall/maple_driver/CMakeLists.txt b/src/mapleall/maple_driver/CMakeLists.txt new file mode 100755 index 0000000000000000000000000000000000000000..f2d9dc73dd2a1893ee46bbbf598f7a2cd802fbe7 --- /dev/null +++ b/src/mapleall/maple_driver/CMakeLists.txt @@ -0,0 +1,104 @@ +# +# Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN AS IS BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +set(inc_libmaple + ${MAPLEALL_ROOT} + ${MAPLEALL_ROOT}/maple_be/include + ${MAPLEALL_ROOT}/maple_be/include/cg + ${MAPLEALL_ROOT}/maple_be/include/be + ${MAPLEALL_ROOT}/maple_be/include/ad + ${MAPLEALL_ROOT}/maple_be/include/ad/target + ${MAPLE_BUILD_OUTPUT}/common/target + ${MAPLEALL_ROOT}/maple_be/include/cg/aarch64 + ${MAPLEALL_ROOT}/maple_be/include/be/aarch64 + ${MAPLEALL_ROOT}/maple_driver/defs + ${MAPLEALL_ROOT}/maple_driver/include + ${MAPLEALL_ROOT}/maple_util/include + ${MAPLEALL_ROOT}/mpl2mpl/include + ${MAPLEALL_ROOT}/maple_me/include + ${MAPLEALL_ROOT}/maple_ipa/include + ${MAPLEALL_ROOT}/maple_ipa/include/old + ${MAPLEALL_ROOT}/maple_phase/include + ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/mempool/include + ${THIRD_PARTY_ROOT}/bounds_checking_function/include + ${THIRD_PARTY_ROOT}/llvm_modified/llvm/include/llvm/BinaryFormat +) + +set(src_libmaple + src/as_compiler.cpp + src/compiler.cpp + src/compiler_factory.cpp + src/dex2mpl_compiler.cpp + src/driver_runner.cpp + src/ipa_compiler.cpp + src/jbc2mpl_compiler.cpp + src/cpp2mpl_compiler.cpp + src/clang_compiler.cpp + src/ld_compiler.cpp + src/maple.cpp + src/maple_comb_compiler_wrapper.cpp + src/maple_comb_compiler.cpp + src/mpl_options.cpp + src/mplcg_compiler.cpp + src/hided_options.cpp +) + +#libmaple +add_library(libmaple STATIC ${src_libmaple}) +set_target_properties(libmaple PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_libmaple}" + LINK_LIBRARIES "" + RUNTIME_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/lib/${HOST_ARCH}" +) + +set(src_libmaple_driver + src/triple.cpp +) + +set(inc_libmaple_driver + ${MAPLEALL_ROOT}/maple_driver/include + ${MAPLEALL_ROOT}/maple_util/include +) + +#libmaple_driver +add_library(libmaple_driver STATIC ${src_libmaple_driver}) +set_target_properties(libmaple_driver PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_libmaple_driver}" + LINK_LIBRARIES "" + ARCHIVE_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/lib/${HOST_ARCH}" +) + +set(src_libdriver_option + src/driver_options.cpp +) + +set(inc_libdriver_option + ${MAPLEALL_ROOT}/maple_driver/include + ${MAPLEALL_ROOT}/maple_util/include +) + +#libdriver_option +add_library(libdriver_option STATIC ${src_libdriver_option}) +set_target_properties(libdriver_option PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_libdriver_option}" + LINK_LIBRARIES "" + ARCHIVE_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/lib/${HOST_ARCH}" +) + + diff --git a/src/mapleall/maple_driver/defs/default/O0_options_clang.def b/src/mapleall/maple_driver/defs/default/O0_options_clang.def new file mode 100644 index 0000000000000000000000000000000000000000..fb70797f01067addccfdeb08a9b7a1d6fd843524 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O0_options_clang.def @@ -0,0 +1 @@ +{"-emit-ast", "", false}, \ No newline at end of file diff --git a/src/mapleall/maple_driver/defs/default/O0_options_cpp2mpl.def b/src/mapleall/maple_driver/defs/default/O0_options_cpp2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..04bbc03d702e2d0ceb7b15a16077132a597ff6b5 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O0_options_cpp2mpl.def @@ -0,0 +1 @@ +{"--enable-variable-array", "", false}, diff --git a/src/mapleall/maple_driver/defs/default/O0_options_dex2mpl.def b/src/mapleall/maple_driver/defs/default/O0_options_dex2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..2878491cab305fe95e99cca35ce2d8187761ccd0 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O0_options_dex2mpl.def @@ -0,0 +1,4 @@ +// option name, option value, append maple root path? +{ "-j100", "", false }, +{ "-litprofile", "out/target/product/maple_arm64/lib/codetricks/profile/meta.list", true }, +{ "-refine-catch", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/O0_options_jbc2mpl.def b/src/mapleall/maple_driver/defs/default/O0_options_jbc2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..4d82bd2e54cfc7894a41d3c87013c5904f03c759 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O0_options_jbc2mpl.def @@ -0,0 +1,15 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? diff --git a/src/mapleall/maple_driver/defs/default/O0_options_ld.def b/src/mapleall/maple_driver/defs/default/O0_options_ld.def new file mode 100644 index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O0_options_ld.def @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/src/mapleall/maple_driver/defs/default/O0_options_me.def b/src/mapleall/maple_driver/defs/default/O0_options_me.def new file mode 100644 index 0000000000000000000000000000000000000000..ddb4ef6084bcae13795a469adaa0f751c1f6d481 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O0_options_me.def @@ -0,0 +1,16 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "--quiet", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/O0_options_mpl2mpl.def b/src/mapleall/maple_driver/defs/default/O0_options_mpl2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..bda5ba0de368fd40763d76d96f8ce8585cafb599 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O0_options_mpl2mpl.def @@ -0,0 +1,20 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "--quiet", "", false }, +{ "-regnativefunc", "", false }, +{ "--maplelinker", "", false }, +{ "--profile", "out/target/product/maple_arm64/lib/codetricks/profile.pv/meta.list", true }, +{ "--maplelinker-nolocal", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/O0_options_mpl2mpl_c.def b/src/mapleall/maple_driver/defs/default/O0_options_mpl2mpl_c.def new file mode 100644 index 0000000000000000000000000000000000000000..c015aa6e5225dc457651ad1c1e5570cee881ba39 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O0_options_mpl2mpl_c.def @@ -0,0 +1,16 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "--quiet", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/O0_options_mplcg.def b/src/mapleall/maple_driver/defs/default/O0_options_mplcg.def new file mode 100644 index 0000000000000000000000000000000000000000..c92727c76559c1b50577a3aeb6a8e2b27fb7dce9 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O0_options_mplcg.def @@ -0,0 +1,21 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "--quiet", "", false }, +{ "--no-pie", "", false }, +{ "--fpic", "", false }, +{ "--verbose-asm", "", false }, +{ "--maplelinker", "", false }, +{ "--duplicate_asm_list", "out/target/product/maple_arm64/lib/codetricks/asm/duplicateFunc.s", true }, diff --git a/src/mapleall/maple_driver/defs/default/O0_options_mplcg_c.def b/src/mapleall/maple_driver/defs/default/O0_options_mplcg_c.def new file mode 100644 index 0000000000000000000000000000000000000000..1ab10cf1a3f4000518600963eb32399065e8beb0 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O0_options_mplcg_c.def @@ -0,0 +1,18 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O0", "", false }, +{ "--quiet", "", false }, +{ "--fpic", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/O2_options_dex2mpl.def b/src/mapleall/maple_driver/defs/default/O2_options_dex2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..2878491cab305fe95e99cca35ce2d8187761ccd0 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O2_options_dex2mpl.def @@ -0,0 +1,4 @@ +// option name, option value, append maple root path? +{ "-j100", "", false }, +{ "-litprofile", "out/target/product/maple_arm64/lib/codetricks/profile/meta.list", true }, +{ "-refine-catch", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/O2_options_me.def b/src/mapleall/maple_driver/defs/default/O2_options_me.def new file mode 100644 index 0000000000000000000000000000000000000000..47d61b942e8d94dc16a89820ec14811c9d4aa398 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O2_options_me.def @@ -0,0 +1,21 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, +{ "--inlinefunclist", "out/target/product/maple_arm64/lib/codetricks/profile.pv/inline_funcs.list", true }, +{ "--no-nativeopt", "", false }, +{ "--no-ignoreipa", "", false }, +{ "--enable-ea", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/O2_options_me_c.def b/src/mapleall/maple_driver/defs/default/O2_options_me_c.def new file mode 100644 index 0000000000000000000000000000000000000000..8ed5de607fd8829c860c032deeeba28d8d62b122 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O2_options_me_c.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/O2_options_mpl2mpl.def b/src/mapleall/maple_driver/defs/default/O2_options_mpl2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..b74863dcc076f8c50f6db0fba121730d11a6c773 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O2_options_mpl2mpl.def @@ -0,0 +1,22 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, +{ "--regnativefunc", "", false }, +{ "--no-nativeopt", "", false }, +{ "--maplelinker", "", false }, +{ "--profile", "out/target/product/maple_arm64/lib/codetricks/profile.pv/meta.list", true }, +{ "--maplelinker-nolocal", "", false }, \ No newline at end of file diff --git a/src/mapleall/maple_driver/defs/default/O2_options_mpl2mpl_c.def b/src/mapleall/maple_driver/defs/default/O2_options_mpl2mpl_c.def new file mode 100644 index 0000000000000000000000000000000000000000..83c259a8086271412158d2716f8bfe626c2259d0 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O2_options_mpl2mpl_c.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, \ No newline at end of file diff --git a/src/mapleall/maple_driver/defs/default/O2_options_mplcg.def b/src/mapleall/maple_driver/defs/default/O2_options_mplcg.def new file mode 100644 index 0000000000000000000000000000000000000000..fc489f6b0e90e884a9fd48e09a88023994b3569b --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O2_options_mplcg.def @@ -0,0 +1,23 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, +{ "--no-pie", "", false }, +{ "--verbose-asm", "", false }, +{ "--fpic", "", false }, +{ "--maplelinker", "", false }, +{ "--gen-c-macro-def", "", false }, +{ "--duplicate_asm_list", "out/target/product/maple_arm64/lib/codetricks/asm/duplicateFunc.s", true }, diff --git a/src/mapleall/maple_driver/defs/default/O2_options_mplcg_c.def b/src/mapleall/maple_driver/defs/default/O2_options_mplcg_c.def new file mode 100644 index 0000000000000000000000000000000000000000..b4cb79a23af2ab5b3423374163ed9e77ef27bb89 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O2_options_mplcg_c.def @@ -0,0 +1,18 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-O2", "", false }, +{ "--quiet", "", false }, +{ "--fpic", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/O2_options_mplipa.def b/src/mapleall/maple_driver/defs/default/O2_options_mplipa.def new file mode 100644 index 0000000000000000000000000000000000000000..1791acef4470a9f8718fc7644eb56a8d51f70110 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/O2_options_mplipa.def @@ -0,0 +1,3 @@ +// option name, option value, append maple root path? +{ "--quiet", "", false }, +{ "--effectipa", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/Os_options_me.def b/src/mapleall/maple_driver/defs/default/Os_options_me.def new file mode 100644 index 0000000000000000000000000000000000000000..020c5c8c83a6ac4e2191e75fd943ab86af17a7e3 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/Os_options_me.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-Os", "", false }, +{ "--quiet", "", false }, diff --git a/src/mapleall/maple_driver/defs/default/Os_options_mpl2mpl.def b/src/mapleall/maple_driver/defs/default/Os_options_mpl2mpl.def new file mode 100644 index 0000000000000000000000000000000000000000..55189dc380c24a08bdfa24e4b4a6882d441bcd9e --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/Os_options_mpl2mpl.def @@ -0,0 +1,17 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-Os", "", false }, +{ "--quiet", "", false }, \ No newline at end of file diff --git a/src/mapleall/maple_driver/defs/default/Os_options_mplcg.def b/src/mapleall/maple_driver/defs/default/Os_options_mplcg.def new file mode 100644 index 0000000000000000000000000000000000000000..197ad1f12714d1c2c4c7411435c6eff8f49bc232 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default/Os_options_mplcg.def @@ -0,0 +1,18 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// option name, option value, append maple root path? +{ "-Os", "", false }, +{ "--quiet", "", false }, +{ "--fpic", "", false }, diff --git a/src/mapleall/maple_driver/defs/default_options.def b/src/mapleall/maple_driver/defs/default_options.def new file mode 100644 index 0000000000000000000000000000000000000000..9b8978ca917e949aa7fb5d0a50b814c866e5de39 --- /dev/null +++ b/src/mapleall/maple_driver/defs/default_options.def @@ -0,0 +1,136 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_DEFAULT_OPTIONS_H +#define MAPLE_DRIVER_INCLUDE_DEFAULT_OPTIONS_H + +namespace maple { +// O0 ME options +static MplOption kMeDefaultOptionsO0[] = { +#ifdef ANDROID +#include "android/O0_options_me.def" +#else +#include "default/O0_options_me.def" +#endif +}; +// O2 ME options +static MplOption kMeDefaultOptionsO2[] = { +#ifdef ANDROID +#include "android/O2_options_me.def" +#else +#include "default/O2_options_me.def" +#endif +}; +// Os ME options +static MplOption kMeDefaultOptionsOs[] = { +#include "default/Os_options_me.def" +}; +// O0 mpl2mpl options +static MplOption kMpl2MplDefaultOptionsO0[] = { +#ifdef ANDROID +#include "android/O0_options_mpl2mpl.def" +#else +#include "default/O0_options_mpl2mpl.def" +#endif +}; +// O2 mpl2mpl options +static MplOption kMpl2MplDefaultOptionsO2[] = { +#ifdef ANDROID +#include "android/O2_options_mpl2mpl.def" +#else +#include "default/O2_options_mpl2mpl.def" +#endif +}; +// Os mpl2mpl options +static MplOption kMpl2MplDefaultOptionsOs[] = { +#include "default/Os_options_mpl2mpl.def" +}; +// O0 mplcg options +static MplOption kMplcgDefaultOptionsO0[] = { +#ifdef ANDROID +#include "android/O0_options_mplcg.def" +#else +#include "default/O0_options_mplcg.def" +#endif +}; +// O2 mplcg options +static MplOption kMplcgDefaultOptionsO2[] = { +#ifdef ANDROID +#include "android/O2_options_mplcg.def" +#else +#include "default/O2_options_mplcg.def" +#endif +}; +// Os mplcg options +static MplOption kMplcgDefaultOptionsOs[] = { +#include "default/Os_options_mplcg.def" +}; +// O2 mplipa options +static MplOption kMplipaDefaultOptionsO2[] = { +#ifdef ANDROID +#include "android/O2_options_mplipa.def" +#else +#include "default/O2_options_mplipa.def" +#endif +}; +// O0 dex2mpl options +static MplOption kDex2mplDefaultOptionsO0[] = { +#ifdef ANDROID +#include "android/O0_options_dex2mpl.def" +#else +#include "default/O0_options_dex2mpl.def" +#endif +}; +// O2 dex2mpl options +static MplOption kDex2mplDefaultOptionsO2[] = { +#ifdef ANDROID +#include "android/O2_options_dex2mpl.def" +#else +#include "default/O2_options_dex2mpl.def" +#endif +}; +// O0 mpl2mpl options for C language +static MplOption kMpl2MplDefaultOptionsO0ForC[] = { +#include "default/O0_options_mpl2mpl_c.def" +}; +// O2 mpl2mpl options for C language +static MplOption kMpl2MplDefaultOptionsO2ForC[] = { +#include "default/O2_options_mpl2mpl_c.def" +}; +// O0 mplcg options for C language +static MplOption kMplcgDefaultOptionsO0ForC[] = { +#include "default/O0_options_mplcg_c.def" +}; +// O2 ME options for C language +static MplOption kMeDefaultOptionsO2ForC[] = { +#include "default/O2_options_me_c.def" +}; +// O2 mplcg options for C language +static MplOption kMplcgDefaultOptionsO2ForC[] = { +#include "default/O2_options_mplcg_c.def" +}; +// O0 cpp2mpl options +static MplOption kCpp2MplDefaultOptionsForAst[] = { +#include "default/O0_options_cpp2mpl.def" +}; +// O0 clang options +static MplOption kClangDefaultOptions[] = { +#include "default/O0_options_clang.def" +}; +// O0 ld options +static MplOption kLdDefaultOptions[] = { +#include "default/O0_options_ld.def" +}; +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_DEFAULT_OPTIONS_H diff --git a/src/mapleall/maple_driver/include/compiler.h b/src/mapleall/maple_driver/include/compiler.h new file mode 100644 index 0000000000000000000000000000000000000000..7e77f7966afda8baa6dc430b2102c65c80f29c1d --- /dev/null +++ b/src/mapleall/maple_driver/include/compiler.h @@ -0,0 +1,331 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_COMPILER_H +#define MAPLE_DRIVER_INCLUDE_COMPILER_H +#include +#include +#include "error_code.h" +#include "mpl_options.h" +#include "cg_option.h" +#include "me_option.h" +#include "option.h" +#include "mir_module.h" +#include "mir_parser.h" +#include "driver_runner.h" +#include "bin_mplt.h" + +namespace maple { +const std::string kBinNameNone = ""; +const std::string kBinNameJbc2mpl = "jbc2mpl"; +const std::string kBinNameCpp2mpl = "hir2mpl"; +const std::string kBinNameClang = "clang"; +const std::string kBinNameDex2mpl = "dex2mpl"; +const std::string kBinNameMplipa = "mplipa"; +const std::string kBinNameMe = "me"; +const std::string kBinNameMpl2mpl = "mpl2mpl"; +const std::string kBinNameMplcg = "mplcg"; +const std::string kBinNameMapleComb = "maplecomb"; +const std::string kBinNameMapleCombWrp = "maplecombwrp"; +const std::string kMachine = "aarch64-"; +const std::string kVendor = "unknown-"; +const std::string kOperatingSystem = "linux-gnu-"; +const std::string kLdFlag = "ld"; +const std::string kGccFlag = "gcc"; +const std::string kGppFlag = "g++"; +const std::string kAsFlag = "as"; +const std::string kInputPhase = "input"; +const std::string kBinNameLd = kMachine + kOperatingSystem + kLdFlag; +const std::string kBinNameAs = kMachine + kOperatingSystem + kAsFlag; +const std::string kBinNameGcc = kMachine + kOperatingSystem + kGccFlag; +const std::string kBinNameGpp = kMachine + kOperatingSystem + kGppFlag; + +constexpr char kGccBeIlp32SysrootPathEnv[] = "GCC_BIGEND_ILP32_SYSROOT_PATH"; +constexpr char kGccBeSysrootPathEnv[] = "GCC_BIGEND_SYSROOT_PATH"; +constexpr char kGccBePathEnv[] = "GCC_BIGEND_PATH"; + +class Compiler { + public: + explicit Compiler(const std::string &name) : name(name) {} + + virtual ~Compiler() = default; + + virtual ErrorCode Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule); + + virtual void GetTmpFilesToDelete(const MplOptions &mplOptions [[maybe_unused]], + const Action &action [[maybe_unused]], + std::vector &tempFiles [[maybe_unused]]) const {} + + virtual std::unordered_set GetFinalOutputs(const MplOptions &mplOptions [[maybe_unused]], + const Action &action [[maybe_unused]]) const { + return std::unordered_set(); + } + + virtual void PrintCommand(const MplOptions&, const Action&) const {} + + protected: + virtual std::string GetBinPath(const MplOptions &mplOptions) const; + virtual const std::string &GetBinName() const { + return kBinNameNone; + } + + /* Default behaviour ToolName==BinName, But some tools have another behaviour: + * AsCompiler: ToolName=kAsFlag, BinName=kMachine + kOperatingSystem + kAsFlag + */ + virtual const std::string &GetTool() const { + return GetBinName(); + } + + virtual std::string GetInputFileName(const MplOptions &options [[maybe_unused]], const Action &action) const { + return action.GetInputFile(); + } + + virtual DefaultOption GetDefaultOptions(const MplOptions &options [[maybe_unused]], + const Action &action [[maybe_unused]]) const { + return DefaultOption(); + } + + virtual void AppendOutputOption(std::vector &, const std::string &) const { + return; + } + + private: + const std::string name; + std::vector MakeOption(const MplOptions &options, + const Action &action) const; + void AppendDefaultOptions(std::vector &finalOptions, + const std::vector &defaultOptions, + bool isDebug) const; + void AppendExtraOptions(std::vector &finalOptions, const MplOptions &options, + bool isDebug, const Action &action) const; + void AppendInputsAsOptions(std::vector &finalOptions, + const MplOptions &mplOptions, const Action &action) const; + void ReplaceOrInsertOption(std::vector &finalOptions, + const std::string &key, const std::string &value) const; + std::vector MakeDefaultOptions(const MplOptions &options, + const Action &action) const; + int Exe(const MplOptions &mplOptions, const std::vector &options) const; + const std::string &GetName() const { + return name; + } +}; + +class Jbc2MplCompiler : public Compiler { + public: + explicit Jbc2MplCompiler(const std::string &name) : Compiler(name) {} + + ~Jbc2MplCompiler() = default; + + private: + const std::string &GetBinName() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; +}; + +class ClangCompiler : public Compiler { + public: + explicit ClangCompiler(const std::string &name) : Compiler(name) {} + + ~ClangCompiler() = default; + + private: + const std::string &GetBinName() const override; + std::string GetBinPath(const MplOptions &mplOptions) const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override ; + void AppendOutputOption(std::vector &finalOptions, const std::string &name) const override; +}; + +class ClangCompilerBeILP32 : public ClangCompiler { + public: + explicit ClangCompilerBeILP32(const std::string &name) : ClangCompiler(name) {} + private: + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; +}; + +class Cpp2MplCompiler : public Compiler { + public: + explicit Cpp2MplCompiler(const std::string &name) : Compiler(name) {} + + ~Cpp2MplCompiler() = default; + + private: + std::string GetBinPath(const MplOptions &mplOptions) const override; + const std::string &GetBinName() const override; + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; + void AppendOutputOption(std::vector &finalOptions, const std::string &name) const override; +}; + +class Dex2MplCompiler : public Compiler { + public: + explicit Dex2MplCompiler(const std::string &name) : Compiler(name) {} + + ~Dex2MplCompiler() = default; +#ifdef INTERGRATE_DRIVER + ErrorCode Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) override; +#endif + + void PrintCommand(const MplOptions &options, const Action &action) const override; + + private: + const std::string &GetBinName() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; +#ifdef INTERGRATE_DRIVER + void PostDex2Mpl(std::unique_ptr &theModule) const; + bool MakeDex2mplOptions(const MplOptions &options); +#endif +}; + +class IpaCompiler : public Compiler { + public: + explicit IpaCompiler(const std::string &name) : Compiler(name) {} + + ~IpaCompiler() = default; + + private: + const std::string &GetBinName() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; +}; + +class MapleCombCompiler : public Compiler { + public: + explicit MapleCombCompiler(const std::string &name) : Compiler(name) {} + + ~MapleCombCompiler() = default; + + ErrorCode Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) override; + void PrintCommand(const MplOptions &options, const Action &action) const override; + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; + + private: + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + ErrorCode MakeMeOptions(const MplOptions &options, DriverRunner &runner) const; + ErrorCode MakeMpl2MplOptions(const MplOptions &options, DriverRunner &runner) const; + std::string DecideOutExe(const MplOptions &options) const; + std::string GetStringOfSafetyOption() const; +}; + +class MplcgCompiler : public Compiler { + public: + explicit MplcgCompiler(const std::string &name) : Compiler(name) {} + + ~MplcgCompiler() = default; + ErrorCode Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) override; + void PrintMplcgCommand(const MplOptions &options, const Action &action, const MIRModule &md) const; + void SetOutputFileName(const MplOptions &options, const Action &action, const MIRModule &md); + std::string GetInputFile(const MplOptions &options, const Action &action, const MIRModule *md) const; + private: + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + ErrorCode GetMplcgOptions(MplOptions &options, const Action &action, const MIRModule *theModule) const; + ErrorCode MakeCGOptions(const MplOptions &options) const; + const std::string &GetBinName() const override; + std::string baseName; + std::string outputFile; +}; + +class MapleCombCompilerWrp : public Compiler { + public: + explicit MapleCombCompilerWrp(const std::string &name) : Compiler(name) {} + ~MapleCombCompilerWrp() = default; + + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; + + private: + std::string GetBinPath(const MplOptions &mplOptions) const override; + const std::string &GetBinName() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; +}; + +// Build .s to .o +class AsCompiler : public Compiler { + public: + explicit AsCompiler(const std::string &name) : Compiler(name) {} + + ~AsCompiler() = default; + + private: + std::string GetBinPath(const MplOptions &mplOptions) const override; + const std::string &GetBinName() const override; + const std::string &GetTool() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; + void GetTmpFilesToDelete(const MplOptions &mplOptions, const Action &action, + std::vector &tempFiles) const override; + std::unordered_set GetFinalOutputs(const MplOptions &mplOptions, + const Action &action) const override; + void AppendOutputOption(std::vector &finalOptions, const std::string &name) const override; +}; + +class AsCompilerBeILP32 : public AsCompiler { + public: + explicit AsCompilerBeILP32(const std::string &name) : AsCompiler(name) {} + private: + std::string GetBinPath(const MplOptions &mplOptions) const override; + const std::string &GetBinName() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; +}; + +// Build .o to .so +class LdCompiler : public Compiler { + public: + explicit LdCompiler(const std::string &name) : Compiler(name) {} + + ~LdCompiler() = default; + + private: + std::string GetBinPath(const MplOptions &mplOptions) const override; + const std::string &GetBinName() const override; + const std::string &GetTool() const override; + DefaultOption GetDefaultOptions(const MplOptions &options, const Action &action) const override; + std::string GetInputFileName(const MplOptions &options, const Action &action) const override; + void AppendOutputOption(std::vector &finalOptions, const std::string &name) const override; +}; + +class LdCompilerBeILP32 : public LdCompiler { + public: + explicit LdCompilerBeILP32(const std::string &name) : LdCompiler(name) {} + private: + std::string GetBinPath(const MplOptions &mplOptions) const override; + const std::string &GetBinName() const override; +}; + +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_COMPILER_H diff --git a/src/mapleall/maple_driver/include/compiler_factory.h b/src/mapleall/maple_driver/include/compiler_factory.h new file mode 100644 index 0000000000000000000000000000000000000000..bd5f417c50c4b2d8f7eab3e69a9b10d39072e205 --- /dev/null +++ b/src/mapleall/maple_driver/include/compiler_factory.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_COMPILER_FACTORY_H +#define MAPLE_DRIVER_INCLUDE_COMPILER_FACTORY_H +#include +#include "compiler.h" +#include "error_code.h" +#include "mir_module.h" +#include "mir_parser.h" +#include "triple.h" + +namespace maple { + +class Toolchain { + using SupportedCompilers = std::unordered_map>; + SupportedCompilers compilers; + + public: + Compiler *Find(const std::string &toolName) { + auto it = compilers.find(toolName); + if (it != compilers.end()) { + return it->second.get(); + } + return nullptr; + } + + const SupportedCompilers &GetSupportedCompilers() const { + return compilers; + } + + virtual ~Toolchain() = default; + + protected: + template + void AddCompiler(const std::string &toolName) { + (void)compilers.emplace(std::make_pair(toolName, std::make_unique(toolName))); + } +}; + +class Aarch64Toolchain : public Toolchain { + public: + Aarch64Toolchain() { + AddCompiler("jbc2mpl"); + AddCompiler("dex2mpl"); + AddCompiler("hir2mpl"); + AddCompiler("clang"); + AddCompiler("mplipa"); + AddCompiler("me"); + AddCompiler("mpl2mpl"); + AddCompiler("mplcg"); + AddCompiler("maplecomb"); + AddCompiler("maplecombwrp"); + AddCompiler("as"); + AddCompiler("ld"); + } +}; + +class Aarch64BeILP32Toolchain : public Toolchain { + public: + Aarch64BeILP32Toolchain() { + AddCompiler("jbc2mpl"); + AddCompiler("dex2mpl"); + AddCompiler("hir2mpl"); + AddCompiler("clang"); + AddCompiler("mplipa"); + AddCompiler("me"); + AddCompiler("mpl2mpl"); + AddCompiler("mplcg"); + AddCompiler("maplecomb"); + AddCompiler("maplecombwrp"); + AddCompiler("as"); + AddCompiler("ld"); + } +}; + +class CompilerFactory { + public: + static CompilerFactory &GetInstance(); + CompilerFactory(const CompilerFactory&) = delete; + CompilerFactory(CompilerFactory&&) = delete; + CompilerFactory &operator=(const CompilerFactory&) = delete; + CompilerFactory &operator=(CompilerFactory&&) = delete; + ~CompilerFactory() = default; + + ErrorCode Compile(MplOptions &mplOptions); + Toolchain *GetToolChain(); + + private: + CompilerFactory() = default; + + ErrorCode Select(const MplOptions &mplOptions, std::vector &selectedActions); + ErrorCode Select(Action &action, std::vector &selectedActions); + ErrorCode DeleteTmpFiles(const MplOptions &mplOptions, + const std::vector &tempFiles) const; + + bool compileFinished = false; + std::unique_ptr theModule; + std::unique_ptr toolchain; +}; +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_COMPILER_FACTORY_H diff --git a/src/mapleall/maple_driver/include/driver_options.h b/src/mapleall/maple_driver/include/driver_options.h new file mode 100644 index 0000000000000000000000000000000000000000..61a57df5d896e03f2ebbe9a2aff3ae44cd27237c --- /dev/null +++ b/src/mapleall/maple_driver/include/driver_options.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_DRIVER_INCLUDE_DRIVER_OPTIONS_H +#define MAPLE_DRIVER_INCLUDE_DRIVER_OPTIONS_H + +#include "cl_option.h" +#include "cl_parser.h" + +static maplecl::OptionCategory &driverCategory = maplecl::CommandLine::GetCommandLine().defaultCategory; + +static maplecl::OptionCategory &clangCategory = maplecl::CommandLine::GetCommandLine().clangCategory; +static maplecl::OptionCategory &hir2mplCategory = maplecl::CommandLine::GetCommandLine().hir2mplCategory; +static maplecl::OptionCategory &mpl2mplCategory = maplecl::CommandLine::GetCommandLine().mpl2mplCategory; +static maplecl::OptionCategory &meCategory = maplecl::CommandLine::GetCommandLine().meCategory; +static maplecl::OptionCategory &cgCategory = maplecl::CommandLine::GetCommandLine().cgCategory; +static maplecl::OptionCategory &asCategory = maplecl::CommandLine::GetCommandLine().asCategory; +static maplecl::OptionCategory &ldCategory = maplecl::CommandLine::GetCommandLine().ldCategory; + +static maplecl::OptionCategory &dex2mplCategory = maplecl::CommandLine::GetCommandLine().dex2mplCategory; +static maplecl::OptionCategory &jbc2mplCategory = maplecl::CommandLine::GetCommandLine().jbc2mplCategory; +static maplecl::OptionCategory &ipaCategory = maplecl::CommandLine::GetCommandLine().ipaCategory; + +namespace opts { + +/* ##################### BOOL Options ############################################################### */ + +extern maplecl::Option version; +extern maplecl::Option ignoreUnkOpt; +extern maplecl::Option o0; +extern maplecl::Option o1; +extern maplecl::Option o2; +extern maplecl::Option os; +extern maplecl::Option verify; +extern maplecl::Option decoupleStatic; +extern maplecl::Option bigEndian; +extern maplecl::Option gcOnly; +extern maplecl::Option timePhase; +extern maplecl::Option genMeMpl; +extern maplecl::Option compileWOLink; +extern maplecl::Option genVtable; +extern maplecl::Option verbose; +extern maplecl::Option debug; +extern maplecl::Option withDwarf; +extern maplecl::Option withIpa; +extern maplecl::Option npeNoCheck; +extern maplecl::Option npeStaticCheck; +extern maplecl::Option npeDynamicCheck; +extern maplecl::Option npeDynamicCheckSilent; +extern maplecl::Option npeDynamicCheckAll; +extern maplecl::Option boundaryNoCheck; +extern maplecl::Option boundaryStaticCheck; +extern maplecl::Option boundaryDynamicCheck; +extern maplecl::Option boundaryDynamicCheckSilent; +extern maplecl::Option safeRegionOption; +extern maplecl::Option printDriverPhases; +extern maplecl::Option ldStatic; +extern maplecl::Option maplePhase; +extern maplecl::Option genMapleBC; +extern maplecl::Option genLMBC; +extern maplecl::Option profileGen; +extern maplecl::Option profileUse; +extern maplecl::Option stackProtectorStrong; +extern maplecl::Option stackProtectorAll; +extern maplecl::Option inlineAsWeak; + +/* ##################### STRING Options ############################################################### */ + +extern maplecl::Option help; +extern maplecl::Option infile; +extern maplecl::Option mplt; +extern maplecl::Option partO2; +extern maplecl::List jbc2mplOpt; +extern maplecl::List hir2mplOpt; +extern maplecl::List clangOpt; +extern maplecl::List asOpt; +extern maplecl::List ldOpt; +extern maplecl::List dex2mplOpt; +extern maplecl::List mplipaOpt; +extern maplecl::List mplcgOpt; +extern maplecl::List meOpt; +extern maplecl::List mpl2mplOpt; +extern maplecl::Option profile; +extern maplecl::Option run; +extern maplecl::Option optionOpt; +extern maplecl::List ldLib; +extern maplecl::List ldLibPath; +extern maplecl::List enableMacro; +extern maplecl::List disableMacro; +extern maplecl::List includeDir; +extern maplecl::List includeSystem; +extern maplecl::Option output; +extern maplecl::Option saveTempOpt; +extern maplecl::Option target; + +/* ##################### DIGITAL Options ############################################################### */ + +extern maplecl::Option helpLevel; + +/* #################################################################################################### */ + +} /* opts */ + +#endif /* MAPLE_DRIVER_INCLUDE_DRIVER_OPTIONS_H */ diff --git a/src/mapleall/maple_driver/include/driver_runner.h b/src/mapleall/maple_driver/include/driver_runner.h new file mode 100644 index 0000000000000000000000000000000000000000..4eb40886088e31f32645113f545570badfc50cf3 --- /dev/null +++ b/src/mapleall/maple_driver/include/driver_runner.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_DRIVER_RUNNER_H +#define MAPLE_DRIVER_INCLUDE_DRIVER_RUNNER_H + +#include +#include +#include "me_option.h" +#include "module_phase_manager.h" +#include "error_code.h" +#include "cg.h" +#include "cg_option.h" +#include "cg_phasemanager.h" +#include "maple_phase_manager.h" +namespace maple { +using namespace maplebe; + +extern const std::string mplCG; +extern const std::string mpl2Mpl; +extern const std::string mplME; + +class DriverRunner final { + public: + DriverRunner(MIRModule *theModule, const std::vector &exeNames, InputFileType inpFileType, + const std::string &mpl2mplInput, const std::string &meInput, const std::string &actualInput, + bool dwarf, bool fileParsed = false, bool timePhases = false, + bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false, bool genLMBC = false) + : theModule(theModule), + exeNames(exeNames), + mpl2mplInput(mpl2mplInput), + meInput(meInput), + actualInput(actualInput), + withDwarf(dwarf), + fileParsed(fileParsed), + timePhases(timePhases), + genVtableImpl(genVtableImpl), + genMeMpl(genMeMpl), + genMapleBC(genMapleBC), + genLMBC(genLMBC), + inputFileType(inpFileType) { + auto lastDot = actualInput.find_last_of("."); + baseName = (lastDot == std::string::npos) ? actualInput : actualInput.substr(0, lastDot); + } + + DriverRunner(MIRModule *theModule, const std::vector &exeNames, InputFileType inpFileType, + const std::string &actualInput, bool dwarf, bool fileParsed = false, bool timePhases = false, + bool genVtableImpl = false, bool genMeMpl = false, bool genMapleBC = false, bool genLMBC = false) + : DriverRunner(theModule, exeNames, inpFileType, "", "", actualInput, dwarf, + fileParsed, timePhases, genVtableImpl, genMeMpl, genMapleBC, genLMBC) { + auto lastDot = actualInput.find_last_of("."); + baseName = (lastDot == std::string::npos) ? actualInput : actualInput.substr(0, lastDot); + } + + ~DriverRunner() = default; + + ErrorCode Run(); + void RunNewPM(const std::string &output, const std::string &vtableImplFile); + void ProcessCGPhase(const std::string &output, const std::string &originBaseName) const; + void SetCGInfo(CGOptions *newCgOptions, const std::string &newCgInput) { + this->cgOptions = newCgOptions; + this->cgInput = newCgInput; + } + ErrorCode ParseInput() const; + ErrorCode ParseSrcLang(MIRSrcLang &srcLang) const; + void SolveCrossModuleInJava(MIRParser &parser) const; + void SolveCrossModuleInC(MIRParser &parser) const; + void SetPrintOutExe(const std::string outExe) { + printOutExe = outExe; + } + + void SetMpl2mplOptions(Options *options) { + mpl2mplOptions = options; + } + + void SetMeOptions(MeOption *options) { + meOptions = options; + } + + private: + std::string GetPostfix(); + void ProcessMpl2mplAndMePhases(const std::string &output, const std::string &vtableImplFile); + CGOptions *cgOptions = nullptr; + std::string cgInput; + void InitProfile() const; + MIRModule *theModule; + std::vector exeNames = {}; + Options *mpl2mplOptions = nullptr; + std::string mpl2mplInput; + MeOption *meOptions = nullptr; + std::string meInput; + std::string actualInput; + bool withDwarf = false; + bool fileParsed = false; + bool timePhases = false; + bool genVtableImpl = false; + bool genMeMpl = false; + bool genMapleBC = false; + bool genLMBC = false; + std::string printOutExe = ""; + std::string baseName; + std::string outputFile; + InputFileType inputFileType; +}; +} // namespace maple + +#endif // MAPLE_DRIVER_INCLUDE_DRIVER_RUNNER_H diff --git a/src/mapleall/maple_driver/include/mpl_options.h b/src/mapleall/maple_driver/include/mpl_options.h new file mode 100644 index 0000000000000000000000000000000000000000..6422cd4acd7a555a1ad7f71b245b81285d151827 --- /dev/null +++ b/src/mapleall/maple_driver/include/mpl_options.h @@ -0,0 +1,413 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_MPL_OPTIONS_H +#define MAPLE_DRIVER_INCLUDE_MPL_OPTIONS_H +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "driver_options.h" +#include "error_code.h" +#include "file_utils.h" +#include "mpl_logging.h" +#include "mir_module.h" + +namespace maple { +enum InputFileType { + kFileTypeNone, + kFileTypeClass, + kFileTypeJar, + kFileTypeAst, + kFileTypeCpp, + kFileTypeC, + kFileTypeDex, + kFileTypeMpl, + kFileTypeVtableImplMpl, + kFileTypeS, + kFileTypeObj, + kFileTypeBpl, + kFileTypeMeMpl, + kFileTypeMbc, + kFileTypeLmbc, +}; + +enum OptimizationLevel { + kO0, + kO1, + kO2, + kCLangO0, + kCLangO2, +}; + +enum RunMode { + kAutoRun, + kCustomRun, + kUnkownRun +}; + +enum SafetyCheckMode { + kNoCheck, + kStaticCheck, + kDynamicCheck, + kDynamicCheckSilent +}; + +class Compiler; + +class InputInfo { + public: + explicit InputInfo(const std::string &inputFile) + : inputFile(inputFile) { + inputFileType = GetInputFileType(inputFile); + + inputName = FileUtils::GetFileName(inputFile, true); + inputFolder = FileUtils::GetFileFolder(inputFile); + outputFolder = inputFolder; + outputName = FileUtils::GetFileName(inputFile, false); + fullOutput = outputFolder + outputName; + } + + ~InputInfo() = default; + static InputFileType GetInputFileType(const std::string &inputFilePath) { + InputFileType fileType = InputFileType::kFileTypeNone; + std::string extensionName = FileUtils::GetFileExtension(inputFilePath); + if (extensionName == "class") { + fileType = InputFileType::kFileTypeClass; + } + else if (extensionName == "dex") { + fileType = InputFileType::kFileTypeDex; + } + else if (extensionName == "c") { + fileType = InputFileType::kFileTypeC; + } + else if (extensionName == "cpp") { + fileType = InputFileType::kFileTypeCpp; + } + else if (extensionName == "ast") { + fileType = InputFileType::kFileTypeAst; + } + else if (extensionName == "jar") { + fileType = InputFileType::kFileTypeJar; + } + else if (extensionName == "mpl" || extensionName == "bpl") { + if (inputFilePath.find("VtableImpl") == std::string::npos) { + if (inputFilePath.find(".me.mpl") != std::string::npos) { + fileType = InputFileType::kFileTypeMeMpl; + } else { + fileType = extensionName == "mpl" ? InputFileType::kFileTypeMpl : InputFileType::kFileTypeBpl; + } + } else { + fileType = InputFileType::kFileTypeVtableImplMpl; + } + } else if (extensionName == "s") { + fileType = InputFileType::kFileTypeS; + } else if (extensionName == "o") { + fileType = InputFileType::kFileTypeObj; + } else if (extensionName == "mbc") { + fileType = InputFileType::kFileTypeMbc; + } else if (extensionName == "lmbc") { + fileType = InputFileType::kFileTypeLmbc; + } + + return fileType; + } + + InputFileType GetInputFileType() const { + return inputFileType; + } + + const std::string &GetInputFile() const { + return inputFile; + } + + const std::string &GetOutputFolder() const { + return outputFolder; + } + + const std::string &GetOutputName() const { + return outputName; + } + + const std::string &GetFullOutputName() const { + return fullOutput; + } + + private: + std::string inputFile = ""; + InputFileType inputFileType = InputFileType::kFileTypeNone; + + std::string inputName = ""; + std::string inputFolder = ""; + std::string outputName = ""; + std::string outputFolder = ""; + std::string fullOutput = ""; +}; + +class Action { + public: + Action(const std::string &tool, const InputInfo *const inputInfo) + : inputInfo(inputInfo), tool(tool) {} + + Action(const std::string &tool, const InputInfo *const inputInfo, + std::unique_ptr &inAction) + : inputInfo(inputInfo), tool(tool) { + inputActions.push_back(std::move(inAction)); + } + + Action(const std::string &tool, std::vector> &inActions, + const InputInfo *const inputInfo) + : inputInfo(inputInfo), tool(tool) { + for (auto &inAction : inActions) { + linkInputFiles.push_back(inAction->GetInputFile()); + } + + std::move(begin(inActions), end(inActions), std::back_inserter(inputActions)); + } + + ~Action() = default; + + const std::string &GetTool() const { + return tool; + } + + const std::string &GetInputFile() const { + return inputInfo->GetInputFile(); + } + + const std::string &GetOutputFolder() const { + return inputInfo->GetOutputFolder(); + } + + const std::string &GetOutputName() const { + return inputInfo->GetOutputName(); + } + + const std::string &GetFullOutputName() const { + return inputInfo->GetFullOutputName(); + } + + InputFileType GetInputFileType() const { + return inputInfo->GetInputFileType(); + } + + const std::vector &GetLinkFiles() const { + return linkInputFiles; + } + + const std::vector> &GetInputActions() const { + return inputActions; + } + + Compiler *GetCompiler() const { + return compilerTool; + } + + void SetCompiler(Compiler *compiler) { + compilerTool = compiler; + } + + bool IsItFirstRealAction() const { + /* First action is always "Input". + * But first real action will be a tool from kMapleCompilers. + */ + if (inputActions.size() > 0 && inputActions[0]->tool == "input") { + return true; + } + return false; + } + + private: + const InputInfo *inputInfo; + + std::string tool = ""; + std::string exeFolder = ""; + std::vector linkInputFiles; + + Compiler *compilerTool = nullptr; + + /* This vector contains a links to previous actions in Action tree */ + std::vector> inputActions; +}; + +class MplOption { + public: + MplOption(){needRootPath = false;} + MplOption(const std::string &key, const std::string &value, bool needRootPath = false) + : key(key), + value(value), + needRootPath(needRootPath) { + CHECK_FATAL(!key.empty(), "MplOption got an empty key."); + } + + ~MplOption() = default; + + const std::string &GetKey() const { + return key; + } + + const std::string &GetValue() const { + return value; + } + + void SetValue(std::string val) { + value = val; + } + + void SetKey(const std::string &k) { + key = k; + } + + bool GetNeedRootPath() const { + return needRootPath; + } + + private: + // option key + std::string key; + // option value + std::string value; + bool needRootPath; +}; + +struct DefaultOption { + std::unique_ptr mplOptions; + uint32_t length = 0; +}; + +class MplOptions { + public: + using ExeOptMapType = std::unordered_map>; + + MplOptions() = default; + MplOptions(const MplOptions &options) = delete; + MplOptions &operator=(const MplOptions &options) = delete; + ~MplOptions() = default; + + ErrorCode Parse(int argc, char **argv); + + const ExeOptMapType &GetExeOptions() const { + return exeOptions; + } + + const std::vector &GetInputFiles() const { + return inputFiles; + } + + const std::string &GetExeFolder() const { + return exeFolder; + } + + const RunMode &GetRunMode() const { + return runMode; + } + + const std::vector &GetSaveFiles() const { + return saveFiles; + } + + const std::vector &GetRunningExes() const { + return runningExes; + } + + const std::vector &GetSelectedExes() const { + return selectedExes; + } + + bool HasSetGeneralRegOnly() const { + return generalRegOnly; + } + + SafetyCheckMode GetNpeCheckMode() const { + return npeCheckMode; + } + + SafetyCheckMode GetBoundaryCheckMode() const { + return boundaryCheckMode; + } + + const std::vector> &GetActions() const { + return rootActions; + } + + maplecl::OptionCategory *GetCategory(const std::string &tool) const; + ErrorCode AppendCombOptions(MIRSrcLang srcLang); + ErrorCode AppendMplcgOptions(MIRSrcLang srcLang); + std::string GetInputFileNameForPrint(const Action * const action) const; + void PrintCommand(const Action * const action); + void ConnectOptStr(std::string &optionStr, const std::string &exeName, bool &firstComb, std::string &runStr); + std::string GetCommonOptionsStr() const; + void PrintDetailCommand(const Action * const action, bool isBeforeParse); + inline void PrintDetailCommand(bool isBeforeParse) { + PrintDetailCommand(nullptr, isBeforeParse); + } + + private: + ErrorCode CheckInputFiles(); + ErrorCode HandleOptions(); + void HandleExtraOptions(); + ErrorCode HandleEarlyOptions(); + ErrorCode DecideRunningPhases(); + ErrorCode DecideRunningPhases(const std::vector &runExes); + std::unique_ptr DecideRunningPhasesByType(const InputInfo *const inputInfo, bool isMultipleFiles); + ErrorCode MFCreateActionByExe(const std::string &exe, std::unique_ptr ¤tAction, + const InputInfo *const inputInfo, bool &wasWrpCombCompilerCreated) const; + ErrorCode SFCreateActionByExe(const std::string &exe, std::unique_ptr ¤tAction, + const InputInfo *const inputInfo, bool &isCombCompiler) const; + InputInfo *AllocateInputInfo(const std::string &inputFile); + ErrorCode AppendDefaultOptions(const std::string &exeName, MplOption mplOptions[], unsigned int length); + void DumpAppendedOptions(const std::string &exeName, + const MplOption mplOptions[], unsigned int length) const; + void UpdateRunningExe(const std::string &args); + void UpdateExeOptions(const std::string &options, const std::string &tool); + ErrorCode UpdateExeOptions(const std::string &args); + void DumpActionTree(const Action &action, int indents) const; + void DumpActionTree() const; + + std::vector inputFiles; + std::string exeFolder; + RunMode runMode = RunMode::kUnkownRun; + std::vector saveFiles = {}; + std::vector runningExes = {}; + std::vector selectedExes = {}; + std::ostringstream printExtraOptStr; + + /* exeOptions is used to forward options to necessary tool. + * As example: --ld-opt="opt1 opt2" will be forwarded to linker */ + ExeOptMapType exeOptions; + + bool hasPrinted = false; + bool generalRegOnly = false; + SafetyCheckMode npeCheckMode = SafetyCheckMode::kNoCheck; + SafetyCheckMode boundaryCheckMode = SafetyCheckMode::kNoCheck; + + std::vector> inputInfos; + std::vector> rootActions; +}; + +enum Level { + kLevelZero = 0, + kLevelOne = 1, + kLevelTwo = 2, + kLevelThree = 3, + kLevelFour = 4 +}; + +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_MPL_OPTIONS_H diff --git a/src/mapleall/maple_driver/include/safe_exe.h b/src/mapleall/maple_driver/include/safe_exe.h new file mode 100644 index 0000000000000000000000000000000000000000..28cfa4df2672a7e6d816f811a875d2c255a90742 --- /dev/null +++ b/src/mapleall/maple_driver/include/safe_exe.h @@ -0,0 +1,303 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_DRIVER_INCLUDE_SAFE_EXE_H +#define MAPLE_DRIVER_INCLUDE_SAFE_EXE_H + +/* To start a new process for dex2mpl/mplipa, we need sys/wait on unix-like systems to + * make it complete. However, there is not a sys/wait.h for mingw, so we used createprocess + * in windows.h instead + */ +#ifdef _WIN32 +#include +#else +#include +#endif + +#include +#include +#include +#include "error_code.h" +#include "mpl_logging.h" +#include "mpl_options.h" +#include "string_utils.h" +#include "securec.h" + +namespace maple { +class SafeExe { + public: +#ifndef _WIN32 + static ErrorCode HandleCommand(const std::string &cmd, const std::string &args) { + std::vector vectorArgs = ParseArgsVector(cmd, args); + // extra space for exe name and args + char **argv = new char *[vectorArgs.size() + 1]; + // argv[0] is program name + // copy args + for (size_t j = 0; j < vectorArgs.size(); ++j) { + size_t strLength = vectorArgs[j].size(); + argv[j] = new char[strLength + 1]; + strncpy_s(argv[j], strLength + 1, vectorArgs[j].c_str(), strLength); + argv[j][strLength] = '\0'; + } + // end of arguments sentinel is nullptr + argv[vectorArgs.size()] = nullptr; + pid_t pid = fork(); + ErrorCode ret = kErrorNoError; + if (pid == 0) { + // child process + fflush(nullptr); + if (execv(cmd.c_str(), argv) < 0) { + for (size_t j = 0; j < vectorArgs.size(); ++j) { + delete [] argv[j]; + } + delete [] argv; + exit(1); + } + } else { + // parent process + int status = -1; + waitpid(pid, &status, 0); + if (!WIFEXITED(status)) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << args << '\n'; + ret = kErrorCompileFail; + } else if (WEXITSTATUS(status) != 0) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << args << '\n'; + ret = kErrorCompileFail; + } + } + + for (size_t j = 0; j < vectorArgs.size(); ++j) { + delete [] argv[j]; + } + delete [] argv; + return ret; + } + + static ErrorCode HandleCommand(const std::string &cmd, + const std::vector &options) { + size_t argIndex; + char **argv; + std::tie(argv, argIndex) = GenerateUnixArguments(cmd, options); + + LogInfo::MapleLogger() << "Run: " << cmd; + for (auto &opt : options) { + LogInfo::MapleLogger() << " " << opt.GetKey() << " " << opt.GetValue(); + } + LogInfo::MapleLogger() << "\n"; + + pid_t pid = fork(); + ErrorCode ret = kErrorNoError; + if (pid == 0) { + // child process + fflush(nullptr); + if (execv(cmd.c_str(), argv) < 0) { + /* last argv[argIndex] is nullptr, so it's j < argIndex (NOT j <= argIndex) */ + for (size_t j = 0; j < argIndex; ++j) { + delete [] argv[j]; + } + delete [] argv; + exit(1); + } + } else { + // parent process + int status = -1; + waitpid(pid, &status, 0); + if (!WIFEXITED(status)) { + ret = kErrorCompileFail; + } else if (WEXITSTATUS(status) != 0) { + ret = kErrorCompileFail; + } + + if (ret != kErrorNoError) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: "; + for (auto &opt : options) { + LogInfo::MapleLogger() << opt.GetKey() << " " << opt.GetValue(); + } + LogInfo::MapleLogger() << "\n"; + } + } + + /* last argv[argIndex] is nullptr, so it's j < argIndex (NOT j <= argIndex) */ + for (size_t j = 0; j < argIndex; ++j) { + delete [] argv[j]; + } + delete [] argv; + return ret; + } +#else + static ErrorCode HandleCommand(const std::string &cmd, const std::string &args) { + ErrorCode ret = ErrorCode::kErrorNoError; + + STARTUPINFO startInfo; + PROCESS_INFORMATION pInfo; + DWORD exitCode; + + errno_t retSafe = memset_s(&startInfo, sizeof(STARTUPINFO), 0, sizeof(STARTUPINFO)); + CHECK_FATAL(retSafe == EOK, "memset_s for StartUpInfo failed when HandleComand"); + + startInfo.cb = sizeof(STARTUPINFO); + + char* appName = strdup(cmd.c_str()); + char* cmdLine = strdup(args.c_str()); + CHECK_FATAL(appName != nullptr, "strdup for appName failed"); + CHECK_FATAL(cmdLine != nullptr, "strdup for cmdLine failed"); + + bool success = CreateProcess(appName, cmdLine, NULL, NULL, FALSE, + NORMAL_PRIORITY_CLASS, NULL, NULL, &startInfo, &pInfo); + CHECK_FATAL(success != 0, "CreateProcess failed when HandleCommond"); + + WaitForSingleObject(pInfo.hProcess, INFINITE); + GetExitCodeProcess(pInfo.hProcess, &exitCode); + + if (exitCode != 0) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << args + << " exitCode: " << exitCode << '\n'; + ret = ErrorCode::kErrorCompileFail; + } + + free(appName); + free(cmdLine); + appName = nullptr; + cmdLine = nullptr; + return ret; + } + + static ErrorCode HandleCommand(const std::string &cmd, + const std::vector &options) { + ErrorCode ret = ErrorCode::kErrorNoError; + + STARTUPINFO startInfo; + PROCESS_INFORMATION pInfo; + DWORD exitCode; + + errno_t retSafe = memset_s(&startInfo, sizeof(STARTUPINFO), 0, sizeof(STARTUPINFO)); + CHECK_FATAL(retSafe == EOK, "memset_s for StartUpInfo failed when HandleComand"); + + startInfo.cb = sizeof(STARTUPINFO);\ + std::string argString; + for (auto &opt : options) { + argString += opt.GetKey() + " " + opt.GetValue() + " "; + } + + char* appName = strdup(cmd.c_str()); + char* cmdLine = strdup(argString.c_str()); + CHECK_FATAL(appName != nullptr, "strdup for appName failed"); + CHECK_FATAL(cmdLine != nullptr, "strdup for cmdLine failed"); + + bool success = CreateProcess(appName, cmdLine, NULL, NULL, FALSE, + NORMAL_PRIORITY_CLASS, NULL, NULL, &startInfo, &pInfo); + CHECK_FATAL(success != 0, "CreateProcess failed when HandleCommond"); + + WaitForSingleObject(pInfo.hProcess, INFINITE); + GetExitCodeProcess(pInfo.hProcess, &exitCode); + + if (exitCode != 0) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << argString + << " exitCode: " << exitCode << '\n'; + ret = ErrorCode::kErrorCompileFail; + } + + free(appName); + free(cmdLine); + appName = nullptr; + cmdLine = nullptr; + return ret; + } +#endif + + static ErrorCode Exe(const std::string &cmd, const std::string &args) { + LogInfo::MapleLogger() << "Starting:" << cmd << args << '\n'; + if (StringUtils::HasCommandInjectionChar(cmd) || StringUtils::HasCommandInjectionChar(args)) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << " args: " << args << '\n'; + return kErrorCompileFail; + } + ErrorCode ret = HandleCommand(cmd, args); + return ret; + } + + static ErrorCode Exe(const std::string &cmd, + const std::vector &options) { + if (StringUtils::HasCommandInjectionChar(cmd)) { + LogInfo::MapleLogger() << "Error while Exe, cmd: " << cmd << '\n'; + return kErrorCompileFail; + } + ErrorCode ret = HandleCommand(cmd, options); + return ret; + } + + private: + static std::vector ParseArgsVector(const std::string &cmd, const std::string &args) { + std::vector tmpArgs; + StringUtils::Split(args, tmpArgs, ' '); + // remove ' ' in vector + for (auto iter = tmpArgs.begin(); iter != tmpArgs.end();) { + if (*iter == " " || *iter == "") { + iter = tmpArgs.erase(iter); + } else { + ++iter; + } + } + (void)tmpArgs.insert(tmpArgs.cbegin(), cmd); + return tmpArgs; + } + + static std::tuple GenerateUnixArguments(const std::string &cmd, + const std::vector &options) { + /* argSize=2, because we reserve 1st arg as exe binary, and another arg as last nullptr arg */ + size_t argSize = 2; + + /* Calculate how many args are needed. + * (* 2) is needed, because we have key and value arguments in each option + */ + argSize += options.size() * 2; + + /* extra space for exe name and args */ + char **argv = new char *[argSize]; + + // argv[0] is program name + // copy args + auto cmdSize = cmd.size() + 1; // +1 for NUL terminal + argv[0] = new char[cmdSize]; + strncpy_s(argv[0], cmdSize, cmd.c_str(), cmdSize); // c_str includes NUL terminal + + /* Allocate and fill all arguments */ + size_t argIndex = 1; // firts index is reserved for cmd, so it starts with 1 + for (auto &opt : options) { + auto key = opt.GetKey(); + auto val = opt.GetValue(); + /* +1 for NUL terminal */ + auto keySize = key.size() + 1; + auto valSize = val.size() + 1; + + if (keySize != 1) { + argv[argIndex] = new char[keySize]; + strncpy_s(argv[argIndex], keySize, key.c_str(), keySize); + ++argIndex; + } + + if (valSize != 1) { + argv[argIndex] = new char[valSize]; + strncpy_s(argv[argIndex], valSize, val.c_str(), valSize); + ++argIndex; + } + } + + // end of arguments sentinel is nullptr + argv[argIndex] = nullptr; + + return std::make_tuple(argv, argIndex); + } +}; +} // namespace maple +#endif // MAPLE_DRIVER_INCLUDE_SAFE_EXE_H diff --git a/src/mapleall/maple_driver/include/triple.h b/src/mapleall/maple_driver/include/triple.h new file mode 100644 index 0000000000000000000000000000000000000000..601025136ede63b26021244186a9cb6dc7230ce2 --- /dev/null +++ b/src/mapleall/maple_driver/include/triple.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_TRIPLE_H +#define MAPLE_TRIPLE_H + +#include +#include + +#include +#include + +namespace maple { + +class Triple { + public: + /* Currently, only aarch64 is supported */ + enum ArchType { + UnknownArch, + aarch64, + aarch64_be, + LastArchType + }; + + /* Currently, only ILP32 and LP64 are supported */ + enum EnvironmentType { + UnknownEnvironment, + GNU, + GNUILP32, + LastEnvironmentType + }; + + ArchType GetArch() const { return arch; } + EnvironmentType GetEnvironment() const { return environment; } + + bool IsBigEndian() const { + return (GetArch() == ArchType::aarch64_be); + } + + std::string Str() const; + std::string GetArchName() const; + std::string GetEnvironmentName() const; + + static Triple &GetTriple() { + static Triple triple; + return triple; + } + Triple(const Triple &) = delete; + Triple &operator=(const Triple &) = delete; + + void Init(const std::string &target); + void Init(); + + private: + std::string data; + ArchType arch; + EnvironmentType environment; + + Triple() : arch(UnknownArch), environment(UnknownEnvironment) {} + + Triple::ArchType ParseArch(std::string_view archStr) const; + Triple::EnvironmentType ParseEnvironment(std::string_view archStr) const; +}; + +} // namespace maple + +#endif /* MAPLE_TRIPLE_H */ diff --git a/src/mapleall/maple_driver/src/as_compiler.cpp b/src/mapleall/maple_driver/src/as_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..599471adeebce3ecf23793b6f299e1c6766b6795 --- /dev/null +++ b/src/mapleall/maple_driver/src/as_compiler.cpp @@ -0,0 +1,120 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "file_utils.h" +#include "triple.h" +#include "default_options.def" + +namespace maple { +static const std::string kAarch64BeIlp32As = "aarch64_be-linux-gnuilp32-as"; +static const std::string kAarch64BeAs = "aarch64_be-linux-gnu-as"; + +std::string AsCompilerBeILP32::GetBinPath(const MplOptions &mplOptions [[maybe_unused]]) const { + std::string gccPath = FileUtils::SafeGetenv(kGccBePathEnv) + "/"; + const std::string &gccTool = Triple::GetTriple().GetEnvironment() == Triple::EnvironmentType::GNUILP32 ? + kAarch64BeIlp32As : kAarch64BeAs; + std::string gccToolPath = gccPath + gccTool; + + if (!FileUtils::IsFileExists(gccToolPath)) { + LogInfo::MapleLogger(kLlErr) << kGccBePathEnv << " environment variable must be set as the path to " + << gccTool << "\n"; + CHECK_FATAL(false, "%s environment variable must be set as the path to %s\n", + kGccBePathEnv, gccTool.c_str()); + } + + return gccPath; +} + +const std::string &AsCompilerBeILP32::GetBinName() const { + if (Triple::GetTriple().GetEnvironment() == Triple::EnvironmentType::GNUILP32) { + return kAarch64BeIlp32As; + } else { + return kAarch64BeAs; + } +} + +DefaultOption AsCompilerBeILP32::GetDefaultOptions(const MplOptions &options, const Action &action) const { + auto &triple = Triple::GetTriple(); + if (triple.GetArch() != Triple::ArchType::aarch64_be || + triple.GetEnvironment() == Triple::EnvironmentType::UnknownEnvironment) { + CHECK_FATAL(false, "ClangCompilerBeILP32 supports only aarch64_be GNU/GNUILP32 targets\n"); + } + + uint32_t len = 1; // for -o option + if (triple.GetEnvironment() == Triple::EnvironmentType::GNUILP32) { + ++len; // for -mabi=ilp32 + } + DefaultOption defaultOptions = { std::make_unique(len), len }; + + defaultOptions.mplOptions[0].SetKey("-o"); + defaultOptions.mplOptions[0].SetValue(action.GetFullOutputName() + ".o"); + + if (triple.GetEnvironment() == Triple::EnvironmentType::GNUILP32) { + defaultOptions.mplOptions[1].SetKey("-mabi=ilp32"); + defaultOptions.mplOptions[1].SetValue(""); + } + + return defaultOptions; +} + +std::string AsCompiler::GetBinPath(const MplOptions &mplOptions [[maybe_unused]]) const { +#ifdef ANDROID + return "prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/"; +#else + return FileUtils::SafeGetenv(kMapleRoot) + "/tools/bin/"; +#endif +} + +const std::string &AsCompiler::GetBinName() const { + return kBinNameAs; +} + +/* the tool name must be the same as exeName field in Descriptor structure */ +const std::string &AsCompiler::GetTool() const { + return kAsFlag; +} + +DefaultOption AsCompiler::GetDefaultOptions(const MplOptions &options, const Action &action) const { + uint32_t len = 1; // for -o option + DefaultOption defaultOptions = { std::make_unique(len), len }; + + defaultOptions.mplOptions[0].SetKey("-o"); + defaultOptions.mplOptions[0].SetValue(action.GetFullOutputName() + ".o"); + + return defaultOptions; +} + +std::string AsCompiler::GetInputFileName(const MplOptions &options [[maybe_unused]], const Action &action) const { + return action.GetFullOutputName() + ".s"; +} + +void AsCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions [[maybe_unused]], const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".o"); +} + +std::unordered_set AsCompiler::GetFinalOutputs(const MplOptions &mplOptions [[maybe_unused]], + const Action &action) const { + auto finalOutputs = std::unordered_set(); + (void)finalOutputs.insert(action.GetFullOutputName() + ".o"); + return finalOutputs; +} + +void AsCompiler::AppendOutputOption(std::vector &finalOptions, + const std::string &name) const { + (void)finalOptions.emplace_back("-o", name); +} + +} // namespace maple diff --git a/src/mapleall/maple_driver/src/clang_compiler.cpp b/src/mapleall/maple_driver/src/clang_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..dcc81219df10a073996f0f5e2a6930270758b579 --- /dev/null +++ b/src/mapleall/maple_driver/src/clang_compiler.cpp @@ -0,0 +1,153 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include +#include "compiler.h" +#include "file_utils.h" +#include "mpl_timer.h" +#include "triple.h" +#include "default_options.def" + +namespace maple { + +DefaultOption ClangCompilerBeILP32::GetDefaultOptions(const MplOptions &options, + const Action &action) const { + auto &triple = Triple::GetTriple(); + if (triple.GetArch() != Triple::ArchType::aarch64_be || + triple.GetEnvironment() == Triple::EnvironmentType::UnknownEnvironment) { + CHECK_FATAL(false, "ClangCompilerBeILP32 supports only aarch64_be GNU/GNUILP32 targets\n"); + } + + uint32_t additionalLen = 4; // -o and --target + uint32_t fullLen = (sizeof(kClangDefaultOptions) / sizeof(MplOption)) + additionalLen; + DefaultOption defaultOptions = { std::make_unique(fullLen), fullLen }; + + defaultOptions.mplOptions[0].SetKey("-o"); + defaultOptions.mplOptions[0].SetValue(action.GetFullOutputName() + ".ast"); + + defaultOptions.mplOptions[1].SetKey("-target"); + defaultOptions.mplOptions[1].SetValue(triple.Str()); + + if (triple.GetEnvironment() == Triple::EnvironmentType::GNUILP32) { + defaultOptions.mplOptions[2].SetKey("--sysroot=" + FileUtils::SafeGetenv(kGccBeIlp32SysrootPathEnv)); + } else { + defaultOptions.mplOptions[2].SetKey("--sysroot=" + FileUtils::SafeGetenv(kGccBeSysrootPathEnv)); + } + defaultOptions.mplOptions[2].SetValue(""); + + defaultOptions.mplOptions[3].SetKey("-U__SIZEOF_INT128__"); + defaultOptions.mplOptions[3].SetValue(""); + + for (uint32_t i = additionalLen, j = 0; i < fullLen; ++i, ++j) { + defaultOptions.mplOptions[i] = kClangDefaultOptions[j]; + } + for (uint32_t i = additionalLen; i < fullLen; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + + return defaultOptions; +} + +std::string ClangCompiler::GetBinPath(const MplOptions &mplOptions [[maybe_unused]]) const { + return FileUtils::SafeGetenv(kMapleRoot) + "/tools/bin/"; +} + +const std::string &ClangCompiler::GetBinName() const { + return kBinNameClang; +} + +static uint32_t FillSpecialDefaulOpt(std::unique_ptr &opt, + const Action &action) { + uint32_t additionalLen = 1; // for -o option + + auto &triple = Triple::GetTriple(); + if (triple.GetArch() != Triple::ArchType::aarch64 || + triple.GetEnvironment() != Triple::EnvironmentType::GNU) { + CHECK_FATAL(false, "Use -target option to select another toolchain\n"); + } + + additionalLen += 3; // 3 options are filled below + opt = std::make_unique(additionalLen); + + opt[0].SetKey("-isystem"); + opt[0].SetValue(FileUtils::SafeGetenv(kMapleRoot) + + "/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include"); + + opt[1].SetKey("-isystem"); + opt[1].SetValue(FileUtils::SafeGetenv(kMapleRoot) + + "/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include"); + + opt[2].SetKey("-target"); + opt[2].SetValue(triple.Str()); + + /* Set last option as -o option */ + opt[additionalLen - 1].SetKey("-o"); + opt[additionalLen - 1].SetValue(action.GetFullOutputName() + ".ast"); + + return additionalLen; +} + +DefaultOption ClangCompiler::GetDefaultOptions(const MplOptions &options, const Action &action) const { + DefaultOption defaultOptions; + uint32_t fullLen = 0; + uint32_t defaultLen = 0; + uint32_t additionalLen = 0; + std::unique_ptr additionalOptions; + + additionalLen = FillSpecialDefaulOpt(additionalOptions, action); + defaultLen = sizeof(kClangDefaultOptions) / sizeof(MplOption); + fullLen = defaultLen + additionalLen; + + defaultOptions = { std::make_unique(fullLen), fullLen }; + + for (uint32_t i = 0; i < defaultLen; ++i) { + defaultOptions.mplOptions[i] = kClangDefaultOptions[i]; + } + for (uint32_t defInd = defaultLen, additionalInd = 0; + additionalInd < additionalLen; ++additionalInd) { + defaultOptions.mplOptions[defInd++] = additionalOptions[additionalInd]; + } + + for (uint32_t i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +void ClangCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions [[maybe_unused]], const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".ast"); +} + +std::unordered_set ClangCompiler::GetFinalOutputs(const MplOptions &mplOptions [[maybe_unused]], + const Action &action) const { + std::unordered_set finalOutputs; + (void)finalOutputs.insert(action.GetFullOutputName() + ".ast"); + return finalOutputs; +} + +void ClangCompiler::AppendOutputOption(std::vector &finalOptions, + const std::string &name) const { + (void)finalOptions.emplace_back("-o", name); +} + +} diff --git a/src/mapleall/maple_driver/src/compiler.cpp b/src/mapleall/maple_driver/src/compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1d7befe40713ced5c30ddc0ada227d48caa1efb9 --- /dev/null +++ b/src/mapleall/maple_driver/src/compiler.cpp @@ -0,0 +1,183 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "driver_options.h" +#include "file_utils.h" +#include "safe_exe.h" +#include "mpl_timer.h" + +namespace maple { + +int Compiler::Exe(const MplOptions &mplOptions, + const std::vector &options) const { + std::ostringstream ostrStream; + ostrStream << GetBinPath(mplOptions) << GetBinName(); + std::string binPath = ostrStream.str(); + return SafeExe::Exe(binPath, options); +} + +std::string Compiler::GetBinPath(const MplOptions &mplOptions) const { +#ifdef MAPLE_PRODUCT_EXECUTABLE // build flag -DMAPLE_PRODUCT_EXECUTABLE + std::string binPath = std::string(MAPLE_PRODUCT_EXECUTABLE); + if (binPath.empty()) { + binPath = mplOptions.GetExeFolder(); + } else { + binPath = binPath + kFileSeperatorChar; + } +#else + std::string binPath = mplOptions.GetExeFolder(); +#endif + return binPath; +} + +ErrorCode Compiler::Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule [[maybe_unused]]) { + MPLTimer timer = MPLTimer(); + LogInfo::MapleLogger() << "Starting " << GetName() << '\n'; + timer.Start(); + + std::vector generatedOptions = MakeOption(options, action); + if (generatedOptions.empty()) { + return kErrorInvalidParameter; + } + if (Exe(options, generatedOptions) != 0) { + return kErrorCompileFail; + } + timer.Stop(); + LogInfo::MapleLogger() << (GetName() + " consumed ") << timer.Elapsed() << "s\n"; + return kErrorNoError; +} + +std::vector Compiler::MakeOption(const MplOptions &options, + const Action &action) const { + std::vector finalOptions; + std::vector defaultOptions = MakeDefaultOptions(options, action); + + AppendInputsAsOptions(finalOptions, options, action); + AppendDefaultOptions(finalOptions, defaultOptions, opts::debug); + AppendExtraOptions(finalOptions, options, opts::debug, action); + + return finalOptions; +} + +void Compiler::AppendDefaultOptions(std::vector &finalOptions, + const std::vector &defaultOptions, + bool isDebug) const { + for (const auto &defaultIt : defaultOptions) { + finalOptions.push_back(defaultIt); + } + + if (isDebug) { + LogInfo::MapleLogger() << Compiler::GetName() << " Default Options: "; + for (const auto &defaultIt : defaultOptions) { + LogInfo::MapleLogger() << defaultIt.GetKey() << " " + << defaultIt.GetValue(); + } + LogInfo::MapleLogger() << '\n'; + } +} + +void Compiler::AppendExtraOptions(std::vector &finalOptions, const MplOptions &options, + bool isDebug, const Action &action) const { + const std::string &binName = GetTool(); + + if (isDebug) { + LogInfo::MapleLogger() << Compiler::GetName() << " Extra Options: "; + } + + /* Append options setting by: --run=binName --option="-opt1 -opt2" */ + auto &exeOptions = options.GetExeOptions(); + auto it = exeOptions.find(binName); + if (it != exeOptions.end()) { + for (auto &opt : it->second) { + (void)finalOptions.emplace_back(opt, ""); + if (isDebug) { + LogInfo::MapleLogger() << opt << " "; + } + } + } + + maplecl::OptionCategory *category = options.GetCategory(binName); + ASSERT(category != nullptr, "Undefined tool: %s", binName.data()); + + /* Append options setting directly for special category. Example: --verbose */ + for (const auto &opt : category->GetEnabledOptions()) { + for (const auto &val : opt->GetRawValues()) { + if (opt->GetEqualType() == maplecl::EqualType::kWithEqual) { + (void)finalOptions.emplace_back(opt->GetName() + "=" + val, ""); + } else { + (void)finalOptions.emplace_back(opt->GetName(), val); + } + + if (isDebug) { + LogInfo::MapleLogger() << opt->GetName() << " " << val << " "; + } + } + } + + /* output file can not be specified for several last actions. As exaple: + * If last actions are assembly tool for 2 files (to get file1.o, file2.o), + * we can not have one output name for them. */ + if (opts::output.IsEnabledByUser() && options.GetActions().size() == 1) { + /* Set output file for last compilation tool */ + if (&action == options.GetActions()[0].get()) { + /* the tool may not support "-o" for output option */ + AppendOutputOption(finalOptions, opts::output.GetValue()); + } + } + + if (isDebug) { + LogInfo::MapleLogger() << '\n'; + } +} + +void Compiler::ReplaceOrInsertOption(std::vector &finalOptions, + const std::string &key, const std::string &value) const { + bool wasFound = false; + for (auto &opt : finalOptions) { + if (opt.GetKey() == key) { + opt.SetValue(value); + wasFound = true; + } + } + + if (!wasFound) { + (void)finalOptions.emplace_back(MplOption(key, value)); + } +} + +void Compiler::AppendInputsAsOptions(std::vector &finalOptions, + const MplOptions &mplOptions, const Action &action) const { + std::vector splittedInputFileNames; + std::string inputFileNames = GetInputFileName(mplOptions, action); + StringUtils::Split(inputFileNames, splittedInputFileNames, ' '); + + for (auto &inputFileName : splittedInputFileNames) { + finalOptions.emplace_back(MplOption(inputFileName, "")); + } +} + +std::vector Compiler::MakeDefaultOptions(const MplOptions &options, + const Action &action) const { + DefaultOption rawDefaultOptions = GetDefaultOptions(options, action); + std::vector defaultOptions; + if (rawDefaultOptions.mplOptions != nullptr) { + for (uint32_t i = 0; i < rawDefaultOptions.length; ++i) { + defaultOptions.push_back(rawDefaultOptions.mplOptions[i]); + } + } + return defaultOptions; +} +} // namespace maple diff --git a/src/mapleall/maple_driver/src/compiler_factory.cpp b/src/mapleall/maple_driver/src/compiler_factory.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1deec2920d56b6cb6adf4d18ee4a19910d6a6d3a --- /dev/null +++ b/src/mapleall/maple_driver/src/compiler_factory.cpp @@ -0,0 +1,181 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler_factory.h" +#include +#include "driver_options.h" +#include "file_utils.h" +#include "string_utils.h" +#include "mpl_logging.h" + +using namespace maple; + +CompilerFactory &CompilerFactory::GetInstance() { + static CompilerFactory instance; + return instance; +} + +ErrorCode CompilerFactory::DeleteTmpFiles(const MplOptions &mplOptions, + const std::vector &tempFiles) const { + int ret = 0; + for (const std::string &tmpFile : tempFiles) { + bool isSave = false; + for (auto saveFile : mplOptions.GetSaveFiles()) { + if (!saveFile.empty() && std::regex_match(tmpFile, std::regex(StringUtils::Replace(saveFile, "*", ".*?")))) { + isSave = true; + break; + } + } + + auto &inputs = mplOptions.GetInputFiles(); + if (!isSave && (std::find(inputs.begin(), inputs.end(), tmpFile) == inputs.end())) { + bool isNeedRemove = true; + /* If we compile several files we can have several last Actions, + * so we need to NOT remove output files for each last Action. + */ + for (auto &lastAction : mplOptions.GetActions()) { + auto finalOutputs = lastAction->GetCompiler()->GetFinalOutputs(mplOptions, *lastAction); + /* do not remove output files */ + if (finalOutputs.find(tmpFile) != finalOutputs.end()) { + isNeedRemove = false; + } + } + + if (isNeedRemove) { + (void)FileUtils::Remove(tmpFile); + } + } + } + return ret == 0 ? kErrorNoError : kErrorFileNotFound; +} + +Toolchain *CompilerFactory::GetToolChain() { + if (toolchain == nullptr) { + if (maple::Triple::GetTriple().GetArch() == Triple::ArchType::aarch64_be) { + toolchain = std::make_unique(); + } else { + toolchain = std::make_unique(); + } + } + + return toolchain.get(); +} + +ErrorCode CompilerFactory::Select(Action &action, std::vector &selectedActions) { + ErrorCode ret = kErrorNoError; + + /* Traverse Action tree recursively and select compilers in + * "from leaf(clang) to root(ld)" order */ + for (const std::unique_ptr &a : action.GetInputActions()) { + if (a == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Action is not Initialized\n"; + return kErrorToolNotFound; + } + + ret = Select(*a, selectedActions); + if (ret != kErrorNoError) { + return ret; + } + } + + Toolchain *toolChain = GetToolChain(); + if (toolChain == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Wrong ToolChain\n"; + return kErrorToolNotFound; + } + Compiler *compiler = toolChain->Find(action.GetTool()); + + if (compiler == nullptr) { + if (action.GetTool() != "input") { + LogInfo::MapleLogger(kLlErr) << "Fatal error: " << action.GetTool() + << " tool is not supported" << "\n"; + LogInfo::MapleLogger(kLlErr) << "Supported Tool: "; + + auto print = [](const auto &supportedComp) { std::cout << " " << supportedComp.first; }; + (void)std::for_each(toolChain->GetSupportedCompilers().begin(), + toolChain->GetSupportedCompilers().end(), print); + LogInfo::MapleLogger(kLlErr) << "\n"; + + return kErrorToolNotFound; + } + } else { + action.SetCompiler(compiler); + selectedActions.push_back(&action); + } + + return ret; +} + +ErrorCode CompilerFactory::Select(const MplOptions &mplOptions, std::vector &selectedActions) { + for (const std::unique_ptr &action : mplOptions.GetActions()) { + if (action == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Action is not Initialized\n"; + return kErrorToolNotFound; + } + ErrorCode ret = Select(*action, selectedActions); + if (ret != kErrorNoError) { + return ret; + } + } + + return selectedActions.empty() ? kErrorToolNotFound : kErrorNoError; +} + +ErrorCode CompilerFactory::Compile(MplOptions &mplOptions) { + if (compileFinished) { + LogInfo::MapleLogger() << + "Failed! Compilation has been completed in previous time and multi-instance compilation is not supported\n"; + return kErrorCompileFail; + } + + /* Actions owner is MplOption, so while MplOption is alive we can use raw pointers here */ + std::vector actions; + ErrorCode ret = Select(mplOptions, actions); + if (ret != kErrorNoError) { + return ret; + } + + for (auto *action : actions) { + if (action == nullptr) { + LogInfo::MapleLogger() << "Failed! Compiler is null." << "\n"; + return kErrorCompileFail; + } + + Compiler *compiler = action->GetCompiler(); + if (compiler == nullptr) { + return kErrorToolNotFound; + } + + ret = compiler->Compile(mplOptions, *action, this->theModule); + if (ret != kErrorNoError) { + return ret; + } + } + if (opts::debug) { + mplOptions.PrintDetailCommand(false); + } + // Compiler finished + compileFinished = true; + + if (!opts::saveTempOpt.IsEnabledByUser() || !mplOptions.GetSaveFiles().empty()) { + std::vector tmpFiles; + + for (auto *action : actions) { + action->GetCompiler()->GetTmpFilesToDelete(mplOptions, *action, tmpFiles); + } + + ret = DeleteTmpFiles(mplOptions, tmpFiles); + } + return ret; +} diff --git a/src/mapleall/maple_driver/src/cpp2mpl_compiler.cpp b/src/mapleall/maple_driver/src/cpp2mpl_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..501e88a0b794f9548d515dadfd1767f1b88ee874 --- /dev/null +++ b/src/mapleall/maple_driver/src/cpp2mpl_compiler.cpp @@ -0,0 +1,80 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "file_utils.h" +#include "mpl_logging.h" +#include "default_options.def" + +namespace maple { +std::string Cpp2MplCompiler::GetBinPath(const MplOptions &mplOptions [[maybe_unused]]) const{ + return FileUtils::SafeGetenv(kMapleRoot) + "/output/" + + FileUtils::SafeGetenv("MAPLE_BUILD_TYPE") + "/bin/"; +} + +const std::string &Cpp2MplCompiler::GetBinName() const { + return kBinNameCpp2mpl; +} + +std::string Cpp2MplCompiler::GetInputFileName(const MplOptions &options [[maybe_unused]], const Action &action) const { + if (action.IsItFirstRealAction()) { + return action.GetInputFile(); + } + // Get base file name + auto idx = action.GetOutputName().find(".ast"); + std::string outputName = action.GetOutputName(); + if (idx != std::string::npos) { + outputName = action.GetOutputName().substr(0, idx); + } + return action.GetOutputFolder() + outputName + ".ast"; +} + +DefaultOption Cpp2MplCompiler::GetDefaultOptions(const MplOptions &options, + const Action &action [[maybe_unused]]) const { + uint32_t len = sizeof(kCpp2MplDefaultOptionsForAst) / sizeof(MplOption); + DefaultOption defaultOptions = { std::make_unique(len), len }; + + for (uint32_t i = 0; i < len; ++i) { + defaultOptions.mplOptions[i] = kCpp2MplDefaultOptionsForAst[i]; + } + + for (uint32_t i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +void Cpp2MplCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions [[maybe_unused]], const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".mpl"); + tempFiles.push_back(action.GetFullOutputName() + ".mplt"); +} + +std::unordered_set Cpp2MplCompiler::GetFinalOutputs(const MplOptions &mplOptions [[maybe_unused]], + const Action &action) const { + std::unordered_set finalOutputs; + (void)finalOutputs.insert(action.GetFullOutputName() + ".mpl"); + (void)finalOutputs.insert(action.GetFullOutputName() + ".mplt"); + return finalOutputs; +} + +void Cpp2MplCompiler::AppendOutputOption(std::vector &finalOptions, + const std::string &name) const { + (void)finalOptions.emplace_back("-o", name); +} + +} // namespace maple diff --git a/src/mapleall/maple_driver/src/dex2mpl_compiler.cpp b/src/mapleall/maple_driver/src/dex2mpl_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a9c7bf1536ed3ff7ce4ec97bf2fbecc52aa8397b --- /dev/null +++ b/src/mapleall/maple_driver/src/dex2mpl_compiler.cpp @@ -0,0 +1,263 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "compiler.h" +#include "default_options.def" +#ifdef INTERGRATE_DRIVER +#include "dex2mpl_runner.h" +#include "mir_function.h" +#endif + +namespace maple { +const std::string &Dex2MplCompiler::GetBinName() const { + return kBinNameDex2mpl; +} + +DefaultOption Dex2MplCompiler::GetDefaultOptions(const MplOptions &options, + const Action &action [[maybe_unused]]) const { + uint32_t len = 0; + MplOption *kDex2mplDefaultOptions = nullptr; + + if (opts::o0) { + len = sizeof(kDex2mplDefaultOptionsO0) / sizeof(MplOption); + kDex2mplDefaultOptions = kDex2mplDefaultOptionsO0; + } else if (opts::o2) { + len = sizeof(kDex2mplDefaultOptionsO2) / sizeof(MplOption); + kDex2mplDefaultOptions = kDex2mplDefaultOptionsO2; + } + + if (kDex2mplDefaultOptions == nullptr) { + return DefaultOption(); + } + + DefaultOption defaultOptions = { std::make_unique(len), len }; + for (uint32_t i = 0; i < len; ++i) { + defaultOptions.mplOptions[i] = kDex2mplDefaultOptions[i]; + } + + for (unsigned int i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +void Dex2MplCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions [[maybe_unused]], const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".mpl"); + tempFiles.push_back(action.GetFullOutputName() + ".mplt"); +} + +std::unordered_set Dex2MplCompiler::GetFinalOutputs(const MplOptions &mplOptions [[maybe_unused]], + const Action &action) const { + auto finalOutputs = std::unordered_set(); + (void)finalOutputs.insert(action.GetFullOutputName() + ".mpl"); + (void)finalOutputs.insert(action.GetFullOutputName() + ".mplt"); + return finalOutputs; +} + +#ifdef INTERGRATE_DRIVER +void Dex2MplCompiler::PostDex2Mpl(std::unique_ptr &theModule) const { + // for each function + for (auto *func : theModule->GetFunctionList()) { + if (func == nullptr) { + continue; + } + + MIRSymbolTable *symTab = func->GetSymTab(); + // for each symbol + for (size_t i = 0; i != symTab->GetSymbolTableSize(); ++i) { + MIRSymbol *currSymbol = symTab->GetSymbolFromStIdx(i); + if (currSymbol == nullptr) { + continue; + } + // (1) replace void ptr with void ref + if (theModule->IsJavaModule() && currSymbol->GetType() == GlobalTables::GetTypeTable().GetVoidPtr()) { + MIRType *voidRef = GlobalTables::GetTypeTable().GetOrCreatePointerType( + *GlobalTables::GetTypeTable().GetVoid(), PTY_ref); + currSymbol->SetTyIdx(voidRef->GetTypeIndex()); + } + // (2) replace String ref with String ptr if symbol's name starts with "L_STR" + if (currSymbol->GetType()->GetKind() == kTypePointer && currSymbol->GetName().find("L_STR") == 0) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(currSymbol->GetTyIdx()); + auto *ptrTy = static_cast(ty->CopyMIRTypeNode()); + ASSERT(ptrTy != nullptr, "null ptr check"); + ptrTy->SetPrimType(PTY_ptr); + TyIdx newTyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(ptrTy); + delete ptrTy; + currSymbol->SetTyIdx(newTyIdx); + } + } + + // (3) reset pregIndex of pregTab if function has body + if (func->GetBody() != nullptr) { + uint32 maxPregNo = 0; + for (uint32 i = 0; i < func->GetFormalCount(); ++i) { + MIRSymbol *formalSt = func->GetFormal(i); + if (formalSt->IsPreg()) { + // no special register appears in the formals + uint32 pRegNo = static_cast(formalSt->GetPreg()->GetPregNo()); + if (pRegNo > maxPregNo) { + maxPregNo = pRegNo; + } + } + } + if (func->GetPregTab() == nullptr) { + continue; + } + func->GetPregTab()->SetIndex(maxPregNo + 1); + } + } + + // (4) fix unmatched MIRConst type of global symbols + for (size_t i = 0; i != GlobalTables::GetGsymTable().GetSymbolTableSize(); ++i) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(i); + if (symbol == nullptr || !symbol->IsConst()) { + continue; + } + TyIdx stTyIdx = symbol->GetTyIdx(); + if (stTyIdx == 0) { + continue; + } + MIRType *stType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(stTyIdx); + MIRConst *mirConst = symbol->GetKonst(); + + if (mirConst == nullptr || mirConst->GetKind() != kConstInt) { + continue; + } + if (static_cast(mirConst)->GetValue() != 0) { + continue; + } + MIRType &valueType = mirConst->GetType(); + if (valueType.GetTypeIndex() != stTyIdx) { + auto *newIntConst = theModule->GetMemPool()->New(0, *stType); + symbol->SetValue({newIntConst}); + } + } + + // (5) remove type attr `rcunowned` of local symbol in rclocalunowned function specified by dex2mpl + for (auto *func : theModule->GetFunctionList()) { + if (func == nullptr || !func->GetAttr(FUNCATTR_rclocalunowned)) { + continue; + } + MIRSymbolTable *symTab = func->GetSymTab(); + for (size_t i = 0; i != symTab->GetSymbolTableSize(); ++i) { + MIRSymbol *symbol = symTab->GetSymbolFromStIdx(i); + if (symbol == nullptr) { + continue; + } + if (symbol->GetAttr(ATTR_rcunowned)) { + symbol->ResetAttr(ATTR_rcunowned); + } + } + } + + // 1: MIRStructType::isImported has different meaning for dex2mpl and binary mplt importer. + // for dex2mpl, `isImported` means whether a type is imported from mplt file instead of dex file, so all types from + // mplt are marked imported. But for binary mplt importer, `isImported` means whether a type is loaded successfully, + // so only complete types are marked imported. + // The workaround is to reset `isImported` according to the completeness of a type. + for (MIRType *type : GlobalTables::GetTypeTable().GetTypeTable()) { + if (type == nullptr) { + continue; + } + MIRTypeKind typeKind = type->GetKind(); + if (typeKind == kTypeStructIncomplete || typeKind == kTypeClassIncomplete || typeKind == kTypeInterfaceIncomplete) { + auto *structType = static_cast(type); + structType->SetIsImported(false); + } else if (typeKind == kTypeClass || typeKind == kTypeInterface) { + auto *structType = static_cast(type); + structType->SetIsImported(true); + } + } +} +#endif + +void Dex2MplCompiler::PrintCommand(const MplOptions &options, const Action &action) const { + std::string runStr = "--run="; + std::string optionStr = "--option=\""; + std::string connectSym = ""; + if (options.GetExeOptions().find(kBinNameDex2mpl) != options.GetExeOptions().end()) { + runStr += "dex2mpl"; + auto inputDex2mplOptions = options.GetExeOptions().find(kBinNameDex2mpl); + for (auto &opt : inputDex2mplOptions->second) { + optionStr += " --" + opt; + } + } + optionStr += "\""; + LogInfo::MapleLogger() << "Starting:" << options.GetExeFolder() << "maple " << runStr << " " << optionStr + << " --infile " << GetInputFileName(options, action) << '\n'; +} + +#ifdef INTERGRATE_DRIVER +bool Dex2MplCompiler::MakeDex2mplOptions(const MplOptions &options) { + Dex2mplOptions &dex2mplOptions = Dex2mplOptions::GetInstance(); + dex2mplOptions.LoadDefault(); + auto it = options.GetExeOptions().find(kBinNameDex2mpl); + if (it == options.GetExeOptions().end()) { + LogInfo::MapleLogger() << "no dex2mpl input options\n"; + return false; + } + bool result = dex2mplOptions.SolveOptions(it->second, options.HasSetDebugFlag()); + if (result == false) { + LogInfo::MapleLogger() << "Meet error dex2mpl options\n"; + return false; + } + return true; +} + +ErrorCode Dex2MplCompiler::Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) { + Dex2mplOptions &dex2mplOptions = Dex2mplOptions::GetInstance(); + bool result = MakeDex2mplOptions(options); + if (!result) { + return ErrorCode::kErrorCompileFail; + } + // .dex + std::string dexFileName = action.GetInputFile(); + theModule = std::make_unique(dexFileName); + + const auto &runningExes = options.GetRunningExes(); + bool isDex2mplFinalExe = (runningExes[runningExes.size() - 1] == kBinNameDex2mpl); + std::unique_ptr dex2mpl = std::make_unique(dexFileName, dex2mplOptions, + std::move(theModule), options.HasSetSaveTmps(), isDex2mplFinalExe); + if (dex2mpl == nullptr) { + ERR(kLncErr, "new Dex2mplRunner failed."); + return ErrorCode::kErrorCompileFail; + } + LogInfo::MapleLogger() << "Starting dex2mpl" << '\n'; + int ret = dex2mpl->Init(); + if (ret != 0) { + return ErrorCode::kErrorCompileFail; + } + ret = dex2mpl->Run(); + if (ret != 0) { + ERR(kLncErr, "(ToIDEUser)dex2mpl failed."); + return ErrorCode::kErrorCompileFail; + } + // Check that whether the kBinNameDex2mpl is the final compiler + // If not, we need to call PostDex2Mpl() to deal with some differences in theModule to + // adapt to the needs of maplecomb + if (runningExes[runningExes.size() - 1] != kBinNameDex2mpl) { + dex2mpl->MoveMirModule(theModule); + PostDex2Mpl(theModule); + } + PrintCommand(options, action); + return ErrorCode::kErrorNoError; +} +#endif +} // namespace maple diff --git a/src/mapleall/maple_driver/src/driver_options.cpp b/src/mapleall/maple_driver/src/driver_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1f035bc54bad20b2658233ac3e97edb44f40af0b --- /dev/null +++ b/src/mapleall/maple_driver/src/driver_options.cpp @@ -0,0 +1,326 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "driver_options.h" +#include "cl_option.h" + +namespace opts { + +/* ##################### BOOL Options ############################################################### */ + +maplecl::Option version({"--version", "-v"}, + " --version [command] \tPrint version and exit.\n", + {driverCategory}); + +maplecl::Option ignoreUnkOpt({"--ignore-unknown-options"}, + " --ignore-unknown-options \tIgnore unknown compilation options\n", + {driverCategory}); + +maplecl::Option o0({"--O0", "-O0"}, + " -O0 \tNo optimization. (Default)\n", + {driverCategory}); + +maplecl::Option o1({"--O1", "-O1"}, + " -O1 \tDo some optimization.\n", + {driverCategory}); + +maplecl::Option o2({"--O2", "-O2"}, + " -O2 \tDo more optimization.\n", + {driverCategory}); + +maplecl::Option os({"--Os", "-Os"}, + " -Os \tOptimize for size, based on O2.\n", + {driverCategory, hir2mplCategory}); + +maplecl::Option verify({"--verify"}, + " --verify \tVerify mpl file\n", + {driverCategory, dex2mplCategory, mpl2mplCategory}); + +maplecl::Option decoupleStatic({"--decouple-static", "-decouple-static"}, + " --decouple-static \tDecouple the static method and field\n" + " --no-decouple-static \tDon't decouple the static method and field\n", + {driverCategory, dex2mplCategory, meCategory, mpl2mplCategory}, + maplecl::DisableWith("--no-decouple-static")); + +maplecl::Option gcOnly({"--gconly", "-gconly"}, + " --gconly \tMake gconly is enable\n" + " --no-gconly \tDon't make gconly is enable\n", + {driverCategory, dex2mplCategory, meCategory, + mpl2mplCategory, cgCategory}, + maplecl::DisableWith("--no-gconly")); + +maplecl::Option timePhase({"-time-phases"}, + " -time-phases \tTiming phases and print percentages\n", + {driverCategory}); + +maplecl::Option genMeMpl({"--genmempl"}, + " --genmempl \tGenerate me.mpl file\n", + {driverCategory}); + +maplecl::Option compileWOLink({"-c"}, + " -c \tCompile the source files without linking\n", + {driverCategory}); + +maplecl::Option genVtable({"--genVtableImpl"}, + " --genVtableImpl \tGenerate VtableImpl.mpl file\n", + {driverCategory}); + +maplecl::Option verbose({"-verbose"}, + " -verbose \tPrint informations\n", + {driverCategory, jbc2mplCategory, hir2mplCategory, + meCategory, mpl2mplCategory, cgCategory}); + +maplecl::Option debug({"--debug"}, + " --debug \tPrint debug info.\n", + {driverCategory}); + +maplecl::Option withDwarf({"-g"}, + " --debug \tPrint debug info.\n", + {driverCategory}); + +maplecl::Option withIpa({"--with-ipa"}, + " --with-ipa \tRun IPA when building\n" + " --no-with-ipa \n", + {driverCategory}, + maplecl::DisableWith("--no-with-ipa")); + +maplecl::Option npeNoCheck({"--no-npe-check"}, + " --no-npe-check \tDisable null pointer check (Default)\n", + {driverCategory}); + +maplecl::Option npeStaticCheck({"--npe-check-static"}, + " --npe-check-static \tEnable null pointer static check only\n", + {driverCategory}); + +maplecl::Option npeDynamicCheck({"--npe-check-dynamic"}, + " --npe-check-dynamic \tEnable null " + "pointer dynamic check with static warning\n", + {driverCategory}); + +maplecl::Option npeDynamicCheckSilent({"--npe-check-dynamic-silent"}, + " --npe-check-dynamic-silent \tEnable null pointer dynamic " + "without static warning\n", + {driverCategory}); + +maplecl::Option npeDynamicCheckAll({"--npe-check-dynamic-all"}, + " --npe-check-dynamic-all \tKeep dynamic check before dereference, " + "used with --npe-check-dynamic* options\n", + {driverCategory}); + +maplecl::Option boundaryNoCheck({"--no-boundary-check"}, + " --no-boundary-check \tDisable boundary check (Default)\n", + {driverCategory}); + +maplecl::Option boundaryStaticCheck({"--boundary-check-static"}, + " --boundary-check-static \tEnable boundary static check\n", + {driverCategory}); + +maplecl::Option boundaryDynamicCheck({"--boundary-check-dynamic"}, + " --boundary-check-dynamic \tEnable boundary dynamic check " + "with static warning\n", + {driverCategory}); + +maplecl::Option boundaryDynamicCheckSilent({"--boundary-check-dynamic-silent"}, + " --boundary-check-dynamic-silent \tEnable boundary dynamic " + "check without static warning\n", + {driverCategory}); + +maplecl::Option safeRegionOption({"--safe-region"}, + " --safe-region \tEnable safe region\n", + {driverCategory}); + +maplecl::Option printDriverPhases({"--print-driver-phases"}, + " --print-driver-phases \tPrint Driver Phases\n", + {driverCategory}); + +maplecl::Option ldStatic({"-static", "--static"}, + " -static \tForce the linker to link a program statically\n", + {driverCategory, ldCategory}); + +maplecl::Option maplePhase({"--maple-phase"}, + " --maple-phase \tRun maple phase only\n --no-maple-phase\n", + {driverCategory}, + maplecl::DisableWith("--maple-toolchain"), + maplecl::Init(true)); + +maplecl::Option genMapleBC({"--genmaplebc"}, + " --genmaplebc \tGenerate .mbc file\n", + {driverCategory}); + +maplecl::Option genLMBC({"--genlmbc"}, + " --genlmbc \tGenerate .lmbc file\n", + {driverCategory, mpl2mplCategory}); + +maplecl::Option profileGen({"--profileGen"}, + " --profileGen \tGenerate profile data for static languages\n", + {driverCategory, meCategory, mpl2mplCategory, cgCategory}); + +maplecl::Option profileUse({"--profileUse"}, + " --profileUse \tOptimize static languages with profile data\n", + {driverCategory, mpl2mplCategory}); + +maplecl::Option stackProtectorStrong({"--stack-protector-strong"}, + " --stack-protector-strong \tadd stack guard for some function\n", + {driverCategory, meCategory, cgCategory}); + +maplecl::Option stackProtectorAll({"--stack-protector-all"}, + " --stack-protector-all \tadd stack guard for all functions\n", + {driverCategory, meCategory, cgCategory}); + +maplecl::Option inlineAsWeak({"-inline-as-weak", "--inline-as-weak"}, + " --inline-as-weak \tSet inline functions as weak symbols" + " as it's in C++\n", {driverCategory, hir2mplCategory}); + +/* ##################### STRING Options ############################################################### */ +maplecl::Option help({"--help", "-h"}, + " --help \tPrint help\n", + {driverCategory}, + maplecl::optionalValue); + +maplecl::Option infile({"--infile"}, + " --infile file1,file2,file3 \tInput files.\n", + {driverCategory}); + +maplecl::Option mplt({"--mplt", "-mplt"}, + " --mplt=file1,file2,file3 \tImport mplt files.\n", + {driverCategory, dex2mplCategory, jbc2mplCategory}); + +maplecl::Option partO2({"--partO2"}, + " --partO2 \tSet func list for O2\n", + {driverCategory}); + +maplecl::List jbc2mplOpt({"--jbc2mpl-opt"}, + " --jbc2mpl-opt \tSet options for jbc2mpl\n", + {driverCategory}); + +maplecl::List hir2mplOpt({"--hir2mpl-opt"}, + " --hir2mpl-opt \tSet options for hir2mpl\n", + {driverCategory}); + +maplecl::List clangOpt({"--clang-opt"}, + " --clang-opt \tSet options for clang as AST generator\n", + {driverCategory}); + +maplecl::List asOpt({"--as-opt"}, + " --as-opt \tSet options for as\n", + {driverCategory}); + +maplecl::List ldOpt({"--ld-opt"}, + " --ld-opt \tSet options for ld\n", + {driverCategory}); + +maplecl::List dex2mplOpt({"--dex2mpl-opt"}, + " --dex2mpl-opt \tSet options for dex2mpl\n", + {driverCategory}); + +maplecl::List mplipaOpt({"--mplipa-opt"}, + " --mplipa-opt \tSet options for mplipa\n", + {driverCategory}); + +maplecl::List mplcgOpt({"--mplcg-opt"}, + " --mplcg-opt \tSet options for mplcg\n", + {driverCategory}); + +maplecl::List meOpt({"--me-opt"}, + " --me-opt \tSet options for me\n", + {driverCategory}); + +maplecl::List mpl2mplOpt({"--mpl2mpl-opt"}, + " --mpl2mpl-opt \tSet options for mpl2mpl\n", + {driverCategory}); + +maplecl::Option profile({"--profile"}, + " --profile \tFor PGO optimization\n" + " \t--profile=list_file\n", + {driverCategory, dex2mplCategory, mpl2mplCategory, cgCategory}); + +maplecl::Option run({"--run"}, + " --run=cmd1:cmd2 \tThe name of executables that are going\n" + " \tto execute. IN SEQUENCE.\n" + " \tSeparated by \":\".Available exe names:\n" + " \tjbc2mpl, me, mpl2mpl, mplcg\n" + " \tInput file must match the tool can\n" + " \thandle\n", + {driverCategory}); + +maplecl::Option optionOpt({"--option"}, + " --option=\"opt1:opt2\" \tOptions for each executable,\n" + " \tseparated by \":\".\n" + " \tThe sequence must match the sequence in\n" + " \t--run.\n", + {driverCategory}); + +maplecl::List ldLib({"-l"}, + " -l \tLinks with a library file\n", + {driverCategory, ldCategory}, + maplecl::joinedValue); + +maplecl::List ldLibPath({"-L"}, + " -L \tAdd directory to library search path\n", + {driverCategory, ldCategory}, + maplecl::joinedValue); + +maplecl::List enableMacro({"-D"}, + " -D = \tDefine to " + "(or 1 if omitted)\n", + {driverCategory, clangCategory}, + maplecl::joinedValue); + +maplecl::List disableMacro({"-U"}, + " -U \tUndefine macro \n", + {driverCategory, clangCategory}, + maplecl::joinedValue); + +maplecl::List includeDir({"-I"}, + " -I \tAdd directory to include search path\n", + {driverCategory, clangCategory}, + maplecl::joinedValue); + +maplecl::List includeSystem({"--isystem"}, + " -isystem \tAdd directory to SYSTEM include search path\n", + {driverCategory, clangCategory}, + maplecl::joinedValue); + +maplecl::Option output({"-o"}, + " -o \tPlace the output into \n", + {driverCategory}, + maplecl::Init("a.out")); + +maplecl::Option saveTempOpt({"--save-temps"}, + " --save-temps \tDo not delete intermediate files.\n" + " \t--save-temps Save all intermediate files.\n" + " \t--save-temps=file1,file2,file3 Save the\n" + " \ttarget files.\n", + {driverCategory}, + maplecl::optionalValue); + +maplecl::Option target({"--target", "-target"}, + " --target= \tDescribe target platform\n" + " \t\t\t\tExample: --target=aarch64-gnu or --target=aarch64_be-gnuilp32\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); + +/* ##################### DIGITAL Options ############################################################### */ + +maplecl::Option helpLevel({"--level"}, + " --level=NUM \tPrint the help info of specified level.\n" + " \tNUM=0: All options (Default)\n" + " \tNUM=1: Product options\n" + " \tNUM=2: Experimental options\n" + " \tNUM=3: Debug options\n", + {driverCategory}); + +/* #################################################################################################### */ + +} /* namespace opts */ diff --git a/src/mapleall/maple_driver/src/driver_runner.cpp b/src/mapleall/maple_driver/src/driver_runner.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3673d0047ba73b999fab831ecf268781f0480fbc --- /dev/null +++ b/src/mapleall/maple_driver/src/driver_runner.cpp @@ -0,0 +1,369 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "driver_runner.h" +#include +#include "compiler.h" +#include "mpl_timer.h" +#include "mir_function.h" +#include "mir_parser.h" +#include "file_utils.h" +#include "constantfold.h" +#include "lower.h" +#include "me_phase_manager.h" +#include "lfo_loop_vec.h" +#include "seqvec.h" + +using namespace maplebe; + +#define JAVALANG (theModule->IsJavaModule()) +#define CLANG (theModule->IsCModule()) + +#define CHECK_MODULE(errorCode...) \ + do { \ + if (theModule == nullptr) { \ + LogInfo::MapleLogger() << "Fatal error: the module is null" << '\n'; \ + return errorCode; \ + } \ + } while (0) + +#define RELEASE(pointer) \ + do { \ + if (pointer != nullptr) { \ + delete pointer; \ + pointer = nullptr; \ + } \ + } while (0) + +#define ADD_PHASE(name, condition) \ + if ((condition)) { \ + phases.push_back(std::string(name)); \ + } + +#define ADD_EXTRA_PHASE(name, timephases, timeStart) \ + if (timephases) { \ + auto duration = std::chrono::system_clock::now() - (timeStart); \ + extraPhasesTime.emplace_back(std::chrono::duration_cast(duration).count()); \ + extraPhasesName.emplace_back(name); \ + } + +namespace maple { +const std::string kMplCg = "mplcg"; +const std::string kMpl2mpl = "mpl2mpl"; +const std::string kMplMe = "me"; + +enum OptLevel { + kLevelO0, + kLevelO1, + kLevelO2 +}; + +ErrorCode DriverRunner::Run() { + CHECK_MODULE(kErrorExit); + + if (exeNames.empty()) { + LogInfo::MapleLogger() << "Fatal error: no exe specified" << '\n'; + return kErrorExit; + } + std::string originBaseName = baseName; + outputFile = baseName; + (void)outputFile.append(GetPostfix()); + if (mpl2mplOptions != nullptr || meOptions != nullptr) { + std::string vtableImplFile = originBaseName; + std::string postFix = ""; + if (theModule->GetSrcLang() == kSrcLangC) { + postFix = ".me"; + } else { + postFix = ".VtableImpl"; + } + (void)vtableImplFile.append(postFix + ".mpl"); + (void)originBaseName.append(postFix); + ProcessMpl2mplAndMePhases(outputFile, vtableImplFile); + } + return kErrorNoError; +} + +std::string DriverRunner::GetPostfix() { + if (printOutExe == kMplMe) { + return ".me.mpl"; + } + if (printOutExe == kMpl2mpl) { + return ".VtableImpl.mpl"; + } + if (printOutExe == kMplCg) { + if (theModule->GetSrcLang() == kSrcLangC) { + return ".s"; + } else { + return ".VtableImpl.s"; + } + } + return ""; +} + +// trim both leading and trailing space and tab +static void TrimString(std::string &str) { + size_t pos = str.find_first_not_of(kSpaceTabStr); + if (pos != std::string::npos) { + str = str.substr(pos); + } else { + str.clear(); + } + pos = str.find_last_not_of(kSpaceTabStr); + if (pos != std::string::npos) { + str = str.substr(0, pos + 1); + } +} + +void DriverRunner::SolveCrossModuleInJava(MIRParser &parser) const { + if (MeOption::optLevel < kLevelO2 || Options::lazyBinding || + Options::skipPhase == "inline" || Options::buildApp != 0 || + !Options::useInline || !Options::useCrossModuleInline) { + return; + } + std::string originBaseName = baseName; + // read in optimized mpl routines + const MapleVector &inputMplt = theModule->GetImportedMplt(); + auto it = inputMplt.cbegin(); + for (++it; it != inputMplt.cend(); ++it) { + const std::string &curStr = *it; + auto lastDotInner = curStr.find_last_of("."); + std::string tmp = (lastDotInner == std::string::npos) ? curStr : curStr.substr(0, lastDotInner); + if (tmp.find("framework") != std::string::npos && originBaseName.find("framework") != std::string::npos) { + continue; + } + // Skip the import file + if (tmp.find(FileUtils::GetFileName(originBaseName, true)) != std::string::npos) { + continue; + } + size_t index = curStr.rfind("."); + CHECK_FATAL(index != std::string::npos, "can not find ."); + + std::string inputInline = curStr.substr(0, index + 1) + "mplt_inline"; + std::ifstream optFile(inputInline); + if (!optFile.is_open()) { + continue; + } + + LogInfo::MapleLogger() << "Starting parse " << inputInline << '\n'; + bool parsed = parser.ParseInlineFuncBody(optFile); + if (!parsed) { + parser.EmitError(actualInput); + } + optFile.close(); + } +} + +void DriverRunner::SolveCrossModuleInC(MIRParser &parser) const { + if (MeOption::optLevel < kLevelO2 || !Options::useInline || + !Options::useCrossModuleInline || Options::skipPhase == "inline" || + Options::importFileList == "") { + return; + } + char absPath[PATH_MAX]; + if (theModule->GetFileName().size() > PATH_MAX || realpath(theModule->GetFileName().c_str(), absPath) == nullptr) { + CHECK_FATAL(false, "invalid file path"); + } + std::ifstream infile(Options::importFileList); + if (!infile.is_open()) { + LogInfo::MapleLogger(kLlErr) << "Cannot open importfilelist file " << Options::importFileList << '\n'; + } + LogInfo::MapleLogger() << "[CROSS_MODULE] read importfile from list: " << Options::importFileList << '\n'; + std::string input; + while (getline(infile, input)) { + TrimString(input); + if (input.empty() || input.find(absPath) != std::string::npos) { + continue; + } + std::ifstream optFile(input); + if (!optFile.is_open()) { + abort(); + } + LogInfo::MapleLogger() << "Starting parse " << input << '\n'; + bool parsed = parser.ParseInlineFuncBody(optFile); + if (!parsed) { + parser.EmitError(actualInput); + } + optFile.close(); + } + infile.close(); +} + +ErrorCode DriverRunner::ParseInput() const { + CHECK_MODULE(kErrorExit); + LogInfo::MapleLogger() << "Starting parse input" << '\n'; + MPLTimer timer; + timer.Start(); + MIRParser parser(*theModule); + ErrorCode ret = kErrorNoError; + if (!fileParsed) { + if (inputFileType != kFileTypeBpl && + inputFileType != kFileTypeMbc && + inputFileType != kFileTypeLmbc) { + bool parsed = parser.ParseMIR(0, 0, false, true); + if (!parsed) { + ret = kErrorExit; + parser.EmitError(actualInput); + } + } else { + BinaryMplImport binMplt(*theModule); + binMplt.SetImported(false); + std::string modid = theModule->GetFileName(); + bool imported = binMplt.Import(modid, true); + if (!imported) { + ret = kErrorExit; + LogInfo::MapleLogger() << "Cannot open .bpl file: %s" << modid << '\n'; + } + } + } + if (CLANG) { + SolveCrossModuleInC(parser); + } + timer.Stop(); + LogInfo::MapleLogger() << "Parse consumed " << timer.Elapsed() << "s" << '\n'; + return ret; +} + +ErrorCode DriverRunner::ParseSrcLang(MIRSrcLang &srcLang) const { + ErrorCode ret = kErrorNoError; + if (inputFileType != kFileTypeBpl && + inputFileType != kFileTypeMbc && + inputFileType != kFileTypeLmbc) { + MIRParser parser(*theModule); + bool parsed = parser.ParseSrcLang(srcLang); + if (!parsed) { + ret = kErrorExit; + parser.EmitError(actualInput); + } + } else { + BinaryMplImport binMplt(*theModule); + std::string modid = theModule->GetFileName(); + bool imported = binMplt.ImportForSrcLang(modid, srcLang); + if (!imported) { + ret = kErrorExit; + LogInfo::MapleLogger() << "Cannot open .bpl file: %s" << modid << '\n'; + } + } + return ret; +} + +void DriverRunner::RunNewPM(const std::string &output, const std::string &vtableImplFile) { + LogInfo::MapleLogger() << "Processing maplecomb in new phasemanager" << '\n'; + auto pmMemPool = std::make_unique(memPoolCtrler, "PM module mempool"); + const MaplePhaseInfo *curPhase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(&MEBETopLevelManager::id); + auto *topLevelPhaseManager = static_cast(curPhase->GetConstructor()(pmMemPool.get())); + topLevelPhaseManager->SetRunMpl2Mpl(mpl2mplOptions != nullptr); + topLevelPhaseManager->SetRunMe(meOptions != nullptr); + topLevelPhaseManager->SetQuiet(Options::quiet); + if (timePhases) { + topLevelPhaseManager->InitTimeHandler(); + } + MeFuncPM::genMeMpl = genMeMpl; + MeFuncPM::genMapleBC = genMapleBC; + MeFuncPM::genLMBC = genLMBC; + MeFuncPM::timePhases = timePhases; + MPLTimer timer; + timer.Start(); + topLevelPhaseManager->DoPhasesPopulate(*theModule); + topLevelPhaseManager->Run(*theModule); + if (timePhases) { + topLevelPhaseManager->DumpPhaseTime(); + } + // emit after module phase + if (printOutExe == kMpl2mpl || printOutExe == kMplMe) { + theModule->Emit(output); + } else if (genVtableImpl || Options::emitVtableImpl) { + theModule->Emit(vtableImplFile); + } + pmMemPool.reset(); + timer.Stop(); + LogInfo::MapleLogger() << "maplecomb consumed " << timer.Elapsed() << "s" << '\n'; + // dump vectorized loop counter here + { + LogInfo::MapleLogger() << "\n" << LoopVectorization::vectorizedLoop << " loop vectorized\n"; + LogInfo::MapleLogger() << "\n" << SeqVectorize::seqVecStores << " sequencestores vectorized\n"; + LogInfo::MapleLogger() << "\n" << LfoUnrollOneLoop::countOfLoopsUnrolled << " loops unrolled\n"; + } +} + +void DriverRunner::ProcessMpl2mplAndMePhases(const std::string &output, const std::string &vtableImplFile) { + CHECK_MODULE(); + theMIRModule = theModule; + if (mpl2mplOptions != nullptr || meOptions != nullptr) { + // multi-thread is not supported for now. + MeOption::threads = 1; + // main entry of newpm for me&mpl2mpl + RunNewPM(output, vtableImplFile); + } +} + +void DriverRunner::ProcessCGPhase(const std::string &output, const std::string &originBaseName) const { + CHECK_MODULE(); + theMIRModule = theModule; + if (withDwarf && !theModule->IsWithDbgInfo()) { + LogInfo::MapleLogger() << "set up debug info " << '\n'; + theMIRModule->GetDbgInfo()->BuildDebugInfo(); +#if DEBUG + if (cgOptions) { + cgOptions->SetOption(CGOptions::kVerboseAsm); + } +#endif + } + if (cgOptions == nullptr) { + return; + } + LogInfo::MapleLogger() << "Processing mplcg in new phaseManager" << '\n'; + MPLTimer timer; + timer.Start(); + theModule->SetBaseName(originBaseName); + theModule->SetOutputFileName(output); + cgOptions->SetDefaultOptions(*theModule); + if (timePhases) { + CGOptions::EnableTimePhases(); + } + Globals::GetInstance()->SetOptimLevel(cgOptions->GetOptimizeLevel()); + MAD mad; + Globals::GetInstance()->SetMAD(mad); + + auto cgPhaseManager = std::make_unique(memPoolCtrler, "cg function phasemanager"); + const MaplePhaseInfo *cgPMInfo = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(&CgFuncPM::id); + auto *cgfuncPhaseManager = static_cast(cgPMInfo->GetConstructor()(cgPhaseManager.get())); + cgfuncPhaseManager->SetQuiet(CGOptions::IsQuiet()); + if (timePhases) { + cgfuncPhaseManager->InitTimeHandler(); + } + /* It is a specifc work around (need refactor) */ + cgfuncPhaseManager->SetCGOptions(cgOptions); + (void) cgfuncPhaseManager->PhaseRun(*theModule); + if (timePhases) { + cgfuncPhaseManager->DumpPhaseTime(); + } + timer.Stop(); + if (theMIRModule->GetDbgInfo() != nullptr) { + theMIRModule->GetDbgInfo()->ClearDebugInfo(); + } + theMIRModule->ReleasePragmaMemPool(); + LogInfo::MapleLogger() << "Mplcg consumed " << timer.ElapsedMilliseconds() << "ms" << '\n'; +} + +void DriverRunner::InitProfile() const { + if (!cgOptions->IsProfileDataEmpty()) { + uint32 dexNameIdx = theModule->GetFileinfo(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename")); + const std::string &dexName = GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(dexNameIdx)); + bool deCompressSucc = theModule->GetProfile().DeCompress(CGOptions::GetProfileData(), dexName); + if (!deCompressSucc) { + LogInfo::MapleLogger() << "WARN: DeCompress() " << CGOptions::GetProfileData() << "failed in mplcg()\n"; + } + } +} +} // namespace maple diff --git a/src/mapleall/maple_driver/src/hided_options.cpp b/src/mapleall/maple_driver/src/hided_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..34eff7775df8deec2b99e654791fd8d832a2756f --- /dev/null +++ b/src/mapleall/maple_driver/src/hided_options.cpp @@ -0,0 +1,159 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "driver_options.h" +#include "cl_option.h" +#include + +namespace opts { + +maplecl::Option MD({"-MD"}, + " -MD \tWrite a depfile containing user and system headers\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option MT({"-MT"}, + " -MT \tSpecify name of main file output in depfile\n", + {driverCategory, clangCategory}, maplecl::hide, maplecl::joinedValue); + +maplecl::Option MF({"-MF"}, + " -MF \tWrite depfile output from -MD, -M to \n", + {driverCategory, clangCategory}, maplecl::hide, maplecl::joinedValue); + +/* Should we use std option in hir2mpl ??? */ +maplecl::Option std({"-std"}, + " -std Ignonored\n", + {driverCategory, clangCategory}); + +/* ##################### Warnings Options ############################################################### */ + +maplecl::Option wUnusedMacro({"-Wunused-macros"}, + " -Wunused-macros \twarning: macro is not used\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wBadFunctionCast({"-Wbad-function-cast"}, + " -Wbad-function-cast \twarning: " + "cast from function call of type A to non-matching type B\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wStrictPrototypes({"-Wstrict-prototypes"}, + " -Wstrict-prototypes \twarning: " + "Warn if a function is declared or defined without specifying the argument types\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wUndef({"-Wundef"}, + " -Wundef \twarning: " + "Warn if an undefined identifier is evaluated in an #if directive. " + "Such identifiers are replaced with zero\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wCastQual({"-Wcast-qual"}, + " -Wcast-qual \twarning: " + "Warn whenever a pointer is cast so as to remove a type qualifier from the target type. " + "For example, warn if a const char * is cast to an ordinary char *\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wMissingFieldInitializers({"-Wmissing-field-initializers"}, + " -Wmissing-field-initializers\twarning: " + "Warn if a structure’s initializer has some fields missing\n", + {driverCategory, clangCategory}, maplecl::hide, + maplecl::DisableWith("-Wno-missing-field-initializers")); + +maplecl::Option wUnusedParameter({"-Wunused-parameter"}, + " -Wunused-parameter \twarning: " + "Warn whenever a function parameter is unused aside from its declaration\n", + {driverCategory, clangCategory}, maplecl::hide, + maplecl::DisableWith("-Wno-unused-parameter")); + +maplecl::Option wAll({"-Wall"}, + " -Wall \tThis enables all the warnings about constructions " + "that some users consider questionable\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wExtra({"-Wextra"}, + " -Wextra \tEnable some extra warning flags that are not enabled by -Wall\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wWriteStrings({"-Wwrite-strings"}, + " -Wwrite-strings \tWhen compiling C, give string constants the type " + "const char[length] so that copying the address of one into " + "a non-const char * pointer produces a warning\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wVla({"-Wvla"}, + " -Wvla \tWarn if a variable-length array is used in the code\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wFormatSecurity({"-Wformat-security"}, + " -Wformat-security \tWwarn about uses of format " + "functions that represent possible security problems\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wShadow({"-Wshadow"}, + " -Wshadow \tWarn whenever a local variable " + "or type declaration shadows another variable\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wTypeLimits({"-Wtype-limits"}, + " -Wtype-limits \tWarn if a comparison is always true or always " + "false due to the limited range of the data type\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wSignCompare({"-Wsign-compare"}, + " -Wsign-compare \tWarn when a comparison between signed and " + " unsigned values could produce an incorrect result when the signed value is converted " + "to unsigned\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wShiftNegativeValue({"-Wshift-negative-value"}, + " -Wshift-negative-value \tWarn if left " + "shifting a negative value\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wPointerArith({"-Wpointer-arith"}, + " -Wpointer-arith \tWarn about anything that depends on the " + "“size of” a function type or of void\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wIgnoredQualifiers({"-Wignored-qualifiers"}, + " -Wignored-qualifiers \tWarn if the return type of a " + "function has a type qualifier such as const\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wFormat({"-Wformat"}, + " -Wformat \tCheck calls to printf and scanf, etc., " + "to make sure that the arguments supplied have types appropriate " + "to the format string specified\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wFloatEqual({"-Wfloat-equal"}, + " -Wfloat-equal \tWarn if floating-point values are used " + "in equality comparisons\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wDateTime({"-Wdate-time"}, + " -Wdate-time \tWarn when macros __TIME__, __DATE__ or __TIMESTAMP__ " + "are encountered as they might prevent bit-wise-identical reproducible compilations\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wImplicitFallthrough({"-Wimplicit-fallthrough"}, + " -Wimplicit-fallthrough \tWarn when a switch case falls through\n", + {driverCategory, clangCategory}, maplecl::hide); + +maplecl::Option wShiftOverflow({"-Wshift-overflow"}, + " -Wshift-overflow \tWarn about left shift overflows\n", + {driverCategory, clangCategory}, maplecl::hide, + maplecl::DisableWith("-Wno-shift-overflow")); + +} /* namespace opts */ diff --git a/src/mapleall/maple_driver/src/ipa_compiler.cpp b/src/mapleall/maple_driver/src/ipa_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..86fbaacae4653e27460d090d8830138b428f93dc --- /dev/null +++ b/src/mapleall/maple_driver/src/ipa_compiler.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "default_options.def" + +namespace maple { +const std::string &IpaCompiler::GetBinName() const { + return kBinNameMplipa; +} + +DefaultOption IpaCompiler::GetDefaultOptions(const MplOptions &options, const Action &action [[maybe_unused]]) const { + uint32_t len = 0; + MplOption *kMplipaDefaultOptions = nullptr; + + if (opts::o2) { + len = sizeof(kMplipaDefaultOptionsO2) / sizeof(MplOption); + kMplipaDefaultOptions = kMplipaDefaultOptionsO2; + } + + if (kMplipaDefaultOptions == nullptr) { + return DefaultOption(); + } + + DefaultOption defaultOptions = { std::make_unique(len), len }; + for (uint32_t i = 0; i < len; ++i) { + defaultOptions.mplOptions[i] = kMplipaDefaultOptions[i]; + } + + for (uint32_t i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +std::string IpaCompiler::GetInputFileName(const MplOptions &options [[maybe_unused]], const Action &action) const { + return action.GetFullOutputName() + ".mpl"; +} +} // namespace maple diff --git a/src/mapleall/maple_driver/src/jbc2mpl_compiler.cpp b/src/mapleall/maple_driver/src/jbc2mpl_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bce6d2ec7dfa1e5db8862b196a663600ac47dbc0 --- /dev/null +++ b/src/mapleall/maple_driver/src/jbc2mpl_compiler.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "default_options.def" + +namespace maple { +const std::string &Jbc2MplCompiler::GetBinName() const { + return kBinNameJbc2mpl; +} + +DefaultOption Jbc2MplCompiler::GetDefaultOptions(const MplOptions &options [[maybe_unused]], + const Action &action [[maybe_unused]]) const { + return DefaultOption(); +} + +void Jbc2MplCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions [[maybe_unused]], const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".mpl"); + tempFiles.push_back(action.GetFullOutputName() + ".mplt"); +} + +std::unordered_set Jbc2MplCompiler::GetFinalOutputs(const MplOptions &mplOptions [[maybe_unused]], + const Action &action) const { + std::unordered_set finalOutputs; + (void)finalOutputs.insert(action.GetFullOutputName() + ".mpl"); + (void)finalOutputs.insert(action.GetFullOutputName() + ".mplt"); + return finalOutputs; +} +} // namespace maple diff --git a/src/mapleall/maple_driver/src/ld_compiler.cpp b/src/mapleall/maple_driver/src/ld_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f77bad356b69738b5db00c9fe0da782c15e538ed --- /dev/null +++ b/src/mapleall/maple_driver/src/ld_compiler.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "file_utils.h" +#include "triple.h" +#include "default_options.def" + +namespace maple { + +static const std::string kAarch64BeIlp32Gcc = "aarch64_be-linux-gnuilp32-gcc"; +static const std::string kAarch64BeGcc = "aarch64_be-linux-gnu-gcc"; + +std::string LdCompilerBeILP32::GetBinPath(const MplOptions &mplOptions [[maybe_unused]]) const { + std::string gccPath = FileUtils::SafeGetenv(kGccBePathEnv) + "/"; + const std::string &gccTool = Triple::GetTriple().GetEnvironment() == Triple::EnvironmentType::GNUILP32 ? + kAarch64BeIlp32Gcc : kAarch64BeGcc; + std::string gccToolPath = gccPath + gccTool; + + if (!FileUtils::IsFileExists(gccToolPath)) { + LogInfo::MapleLogger(kLlErr) << kGccBePathEnv << " environment variable must be set as the path to " + << gccTool << "\n"; + CHECK_FATAL(false, "%s environment variable must be set as the path to %s\n", + kGccBePathEnv, gccTool.c_str()); + } + + return gccPath; +} + +const std::string &LdCompilerBeILP32::GetBinName() const { + if (Triple::GetTriple().GetEnvironment() == Triple::EnvironmentType::GNUILP32) { + return kAarch64BeIlp32Gcc; + } else { + return kAarch64BeGcc; + } +} + +std::string LdCompiler::GetBinPath(const MplOptions &mplOptions [[maybe_unused]]) const { +#ifdef ANDROID + return "prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin/"; +#else + return FileUtils::SafeGetenv(kMapleRoot) + "/tools/bin/"; +#endif +} + +// Required to use ld instead of gcc; ld will be implemented later +const std::string &LdCompiler::GetBinName() const { + return kBinNameGcc; +} + +/* the tool name must be the same as exeName field in Descriptor structure */ +const std::string &LdCompiler::GetTool() const { + return kLdFlag; +} + +DefaultOption LdCompiler::GetDefaultOptions(const MplOptions &options, const Action &action [[maybe_unused]]) const { + uint32_t len = sizeof(kLdDefaultOptions) / sizeof(MplOption); + DefaultOption defaultOptions = { std::make_unique(len), len }; + + for (uint32_t i = 0; i < len; ++i) { + defaultOptions.mplOptions[i] = kLdDefaultOptions[i]; + } + + for (uint32_t i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +std::string LdCompiler::GetInputFileName(const MplOptions &options [[maybe_unused]], const Action &action) const { + std::string files; + + bool isFirstEntry = true; + for (const auto &file : action.GetLinkFiles()) { + /* Split Input files with " "; (except first entry) */ + if (isFirstEntry) { + isFirstEntry = false; + } else { + files += " "; + } + + files += StringUtils::GetStrBeforeLast(file, ".") + ".o"; + } + return files; +} + +void LdCompiler::AppendOutputOption(std::vector &finalOptions, + const std::string &name) const { + (void)finalOptions.emplace_back("-o", name); +} + +} // namespace maple diff --git a/src/mapleall/maple_driver/src/maple.cpp b/src/mapleall/maple_driver/src/maple.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d969cfca7d52890952e69ad166dfaac1752535fb --- /dev/null +++ b/src/mapleall/maple_driver/src/maple.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler_factory.h" +#include "error_code.h" +#include "mpl_options.h" +#include "mpl_sighandler.h" + +using namespace maple; + +int main(int argc, char **argv) { + SigHandler::EnableAll(); + + MplOptions mplOptions; + int ret = static_cast(mplOptions.Parse(argc, argv)); + if (ret == kErrorNoError) { + ret = CompilerFactory::GetInstance().Compile(mplOptions); + } + PrintErrorMessage(ret); + return ret; +} diff --git a/src/mapleall/maple_driver/src/maple_comb_compiler.cpp b/src/mapleall/maple_driver/src/maple_comb_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..73e5fb46c653a02785234ec987d2e3d0d9ad2a04 --- /dev/null +++ b/src/mapleall/maple_driver/src/maple_comb_compiler.cpp @@ -0,0 +1,276 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "driver_options.h" +#include "string_utils.h" +#include "mpl_logging.h" +#include "driver_runner.h" +#include "inline.h" +#include "me_phase_manager.h" +#include "constantfold.h" + +namespace maple { + +std::string MapleCombCompiler::GetInputFileName(const MplOptions &options [[maybe_unused]], + const Action &action) const { + if (action.IsItFirstRealAction()) { + return action.GetInputFile(); + } + if (action.GetInputFileType() == InputFileType::kFileTypeVtableImplMpl) { + return action.GetFullOutputName() + ".VtableImpl.mpl"; + } + if (action.GetInputFileType() == InputFileType::kFileTypeBpl) { + return action.GetFullOutputName() + ".bpl"; + } + if (action.GetInputFileType() == InputFileType::kFileTypeMbc) { + return action.GetFullOutputName() + ".mbc"; + } + if (action.GetInputFileType() == InputFileType::kFileTypeLmbc) { + return action.GetFullOutputName() + ".lmbc"; + } + return action.GetFullOutputName() + ".mpl"; +} + +void MapleCombCompiler::GetTmpFilesToDelete(const MplOptions &mplOptions [[maybe_unused]], const Action &action, + std::vector &tempFiles) const { + std::string filePath = action.GetFullOutputName() + ".data.muid"; + tempFiles.push_back(filePath); + filePath = action.GetFullOutputName() + ".func.muid"; + tempFiles.push_back(filePath); + for (auto iter = tempFiles.begin(); iter != tempFiles.end();) { + std::ifstream infile; + infile.open(*iter); + if (infile.fail()) { + iter = tempFiles.erase(iter); + } else { + ++iter; + } + infile.close(); + } +} + +std::unordered_set MapleCombCompiler::GetFinalOutputs(const MplOptions &mplOptions [[maybe_unused]], + const Action &action) const { + std::unordered_set finalOutputs; + (void)finalOutputs.insert(action.GetFullOutputName() + ".VtableImpl.mpl"); + return finalOutputs; +} + +void MapleCombCompiler::PrintCommand(const MplOptions &options, const Action &action) const { + std::string runStr = "--run="; + std::ostringstream optionStr; + optionStr << "--option=\""; + std::string connectSym = ""; + bool firstComb = false; + if (options.GetExeOptions().find(kBinNameMe) != options.GetExeOptions().end()) { + runStr += "me"; + auto it = options.GetExeOptions().find(kBinNameMe); + for (auto &opt : it->second) { + optionStr << " " << opt; + } + firstComb = true; + } + if (options.GetExeOptions().find(kBinNameMpl2mpl) != options.GetExeOptions().end()) { + if (firstComb) { + runStr += ":mpl2mpl"; + optionStr << ":"; + } else { + runStr += "mpl2mpl"; + } + auto it = options.GetExeOptions().find(kBinNameMpl2mpl); + for (auto &opt : it->second) { + optionStr << " " << opt; + } + } + + std::string driverOptions = options.GetCommonOptionsStr(); + + optionStr << "\""; + LogInfo::MapleLogger() << "Starting:" << options.GetExeFolder() << "maple " << runStr << " " + << optionStr.str() << " " << driverOptions << GetInputFileName(options, action) << '\n'; +} + +std::string MapleCombCompiler::GetStringOfSafetyOption() const { + std::string safetyOptionStr = ""; + if (MeOption::safeRegionMode) { + safetyOptionStr += "safe-region "; + } + if (MeOption::isNpeCheckAll) { + safetyOptionStr += "npe-check-dynamic-all "; + } + switch (MeOption::npeCheckMode) { + case kStaticCheck: + safetyOptionStr += "npe-check-static "; + break; + case kDynamicCheck: + safetyOptionStr += "npe-check-dynamic "; + break; + case kDynamicCheckSilent: + safetyOptionStr += "npe-check-dynamic-silent "; + break; + default: + break; + } + switch (MeOption::boundaryCheckMode) { + case kStaticCheck: + safetyOptionStr += "boundary-check-static "; + break; + case kDynamicCheck: + safetyOptionStr += "boundary-check-dynamic "; + break; + case kDynamicCheckSilent: + safetyOptionStr += "boundary-check-dynamic-silent "; + break; + default: + break; + } + return safetyOptionStr; +} + +ErrorCode MapleCombCompiler::MakeMeOptions(const MplOptions &options, DriverRunner &runner) const { + auto it = std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMe); + if (it == options.GetRunningExes().end()) { + return kErrorNoError; + } + + auto itOpt = options.GetExeOptions().find(kBinNameMe); + if (itOpt != options.GetExeOptions().end()) { + const auto &meExeOpts = itOpt->second; + const std::deque strMeOptions(meExeOpts.begin(), meExeOpts.end()); + (void)maplecl::CommandLine::GetCommandLine().HandleInputArgs(strMeOptions, meCategory); + } + + bool result = MeOption::GetInstance().SolveOptions(opts::debug); + if (!result) { + LogInfo::MapleLogger() << "Meet error me options\n"; + return kErrorCompileFail; + } + MeOption::generalRegOnly = options.HasSetGeneralRegOnly(); + MeOption::npeCheckMode = options.GetNpeCheckMode(); + MeOption::isNpeCheckAll = opts::npeDynamicCheckAll; + MeOption::boundaryCheckMode = options.GetBoundaryCheckMode(); + MeOption::safeRegionMode = opts::safeRegionOption; + if (MeOption::optLevel == 0) { + std::string safetyOptionStr = GetStringOfSafetyOption(); + if (!safetyOptionStr.empty()) { + (void)safetyOptionStr.erase(safetyOptionStr.cend() - 1); + WARN(kLncWarn, "warning: The safety option %s must be used in conjunction with O2 mode", + safetyOptionStr.c_str()); + } + } + + // Set me options for driver runner + runner.SetMeOptions(&MeOption::GetInstance()); + return kErrorNoError; +} + +ErrorCode MapleCombCompiler::MakeMpl2MplOptions(const MplOptions &options, DriverRunner &runner) const { + auto it = std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMpl2mpl); + if (it == options.GetRunningExes().end()) { + return kErrorNoError; + } + + auto itOpt = options.GetExeOptions().find(kBinNameMpl2mpl); + if (itOpt != options.GetExeOptions().end()) { + const auto &mpl2mplExeOpts = itOpt->second; + const std::deque strMpl2mplOptions(mpl2mplExeOpts.begin(), mpl2mplExeOpts.end()); + (void)maplecl::CommandLine::GetCommandLine().HandleInputArgs(strMpl2mplOptions, mpl2mplCategory); + } + + auto &mpl2mplOption = Options::GetInstance(); + bool result = mpl2mplOption.SolveOptions(opts::debug); + if (!result) { + LogInfo::MapleLogger() << "Meet error mpl2mpl options\n"; + return kErrorCompileFail; + } + // Set mpl2mpl options for driver runner + runner.SetMpl2mplOptions(&Options::GetInstance()); + return kErrorNoError; +} + +std::string MapleCombCompiler::DecideOutExe(const MplOptions &options) const { + std::string printOutExe = ""; + auto &selectExes = options.GetSelectedExes(); + if (selectExes[selectExes.size() - 1] == kBinNameMapleComb) { + auto it = std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMpl2mpl); + if (it != options.GetRunningExes().end()) { + printOutExe = kBinNameMpl2mpl; + return printOutExe; + } + it = std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMe); + if (it != options.GetRunningExes().end()) { + printOutExe = kBinNameMe; + return printOutExe; + } + } + return selectExes[selectExes.size() - 1]; +} + +ErrorCode MapleCombCompiler::Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) { + std::string fileName = GetInputFileName(options, action); + bool fileParsed = true; + if (theModule == nullptr) { + theModule = std::make_unique(fileName); + fileParsed = false; + } + options.PrintCommand(&action); + LogInfo::MapleLogger() << "Starting maplecomb\n"; + theModule->InitPartO2List(opts::partO2); + DriverRunner runner(theModule.get(), options.GetSelectedExes(), action.GetInputFileType(), fileName, + fileName, fileName, opts::withDwarf, fileParsed, + opts::timePhase, opts::genVtable, + opts::genMeMpl, opts::genMapleBC, + opts::genLMBC); + ErrorCode ret = kErrorNoError; + + MIRParser parser(*theModule); + MIRSrcLang srcLang = kSrcLangUnknown; + ret = runner.ParseSrcLang(srcLang); + if (ret != kErrorNoError) { + return ret; + } + theModule->SetSrcLang(srcLang); + + // Add running phases and default options according to the srcLang (only for auto mode) + ret = options.AppendCombOptions(theModule->GetSrcLang()); + if (ret != kErrorNoError) { + return ret; + } + + ret = MakeMeOptions(options, runner); + if (ret != kErrorNoError) { + return ret; + } + ret = MakeMpl2MplOptions(options, runner); + if (ret != kErrorNoError) { + return ret; + } + runner.SetPrintOutExe(DecideOutExe(options)); + + // Parse the input file + ret = runner.ParseInput(); + if (ret != kErrorNoError) { + return ret; + } + + if (opts::debug) { + PrintCommand(options, action); + } + ErrorCode nErr = runner.Run(); + logInfo.PrintUserWarnMessages(); + return nErr; +} +} // namespace maple diff --git a/src/mapleall/maple_driver/src/maple_comb_compiler_wrapper.cpp b/src/mapleall/maple_driver/src/maple_comb_compiler_wrapper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f20e1c04be7982c292f933dde5c0af3b86a9e1d3 --- /dev/null +++ b/src/mapleall/maple_driver/src/maple_comb_compiler_wrapper.cpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include "compiler.h" +#include "types_def.h" + +namespace maple { + +// FixMe +static const std::string kTmpBin = "maple"; + +const std::string &MapleCombCompilerWrp::GetBinName() const { + return kTmpBin; +} + +std::string MapleCombCompilerWrp::GetBinPath(const MplOptions &mplOptions [[maybe_unused]]) const { + return FileUtils::SafeGetenv(kMapleRoot) + "/output/" + + FileUtils::SafeGetenv("MAPLE_BUILD_TYPE") + "/bin/"; +} + +DefaultOption MapleCombCompilerWrp::GetDefaultOptions(const MplOptions &options [[maybe_unused]], + const Action &action [[maybe_unused]]) const { + /* need to add --maple-phase option to run only maple phase. + * linker will be called as separated step (AsCompiler). + */ + opts::maplePhase.SetValue(true); + + /* opts::infile must be cleared because we should run compilation for each file separately. + * Separated input file are set in Actions. + */ + opts::infile.Clear(); + + return DefaultOption(); +} + +std::string MapleCombCompilerWrp::GetInputFileName(const MplOptions &options [[maybe_unused]], + const Action &action) const { + if (action.IsItFirstRealAction()) { + return action.GetInputFile(); + } + + InputFileType fileType = action.GetInputFileType(); + auto fullOutput = action.GetFullOutputName(); + if (fileType == InputFileType::kFileTypeVtableImplMpl) { + return fullOutput + ".VtableImpl.mpl"; + } + if (fileType == InputFileType::kFileTypeBpl) { + return fullOutput + ".bpl"; + } + return fullOutput + ".mpl"; +} + +void MapleCombCompilerWrp::GetTmpFilesToDelete(const MplOptions &mplOptions [[maybe_unused]], const Action &action, + std::vector &tempFiles) const { + tempFiles.push_back(action.GetFullOutputName() + ".s"); +} + +std::unordered_set MapleCombCompilerWrp::GetFinalOutputs(const MplOptions &mplOptions [[maybe_unused]], + const Action &action) const { + std::unordered_set finalOutputs; + (void)finalOutputs.insert(action.GetFullOutputName() + ".s"); + return finalOutputs; +} +} // namespace maple diff --git a/src/mapleall/maple_driver/src/mpl_options.cpp b/src/mapleall/maple_driver/src/mpl_options.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f7e810e38bd11282f35678c7a7be3f606e325d46 --- /dev/null +++ b/src/mapleall/maple_driver/src/mpl_options.cpp @@ -0,0 +1,899 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mpl_options.h" +#include +#include +#include +#include +#include "compiler.h" +#include "compiler_factory.h" +#include "file_utils.h" +#include "mpl_logging.h" +#include "string_utils.h" +#include "version.h" +#include "default_options.def" +#include "me_option.h" +#include "option.h" +#include "cg_option.h" +#include "driver_options.h" +#include "triple.h" + + +namespace maple { +using namespace maplebe; + +/* tool -> OptionCategory map: ld -> ldCategory, me -> meCategory and etc... */ +static std::unordered_map exeCategories = { + {"maple", &driverCategory}, + {maple::kBinNameClang, &clangCategory}, + {maple::kBinNameCpp2mpl, &hir2mplCategory}, + {maple::kBinNameMpl2mpl, &mpl2mplCategory}, + {maple::kBinNameMe, &meCategory}, + {maple::kBinNameMplcg, &cgCategory}, + {maple::kAsFlag, &asCategory}, + {maple::kLdFlag, &ldCategory}, + {maple::kBinNameDex2mpl, &dex2mplCategory}, + {maple::kBinNameJbc2mpl, &jbc2mplCategory}, + {maple::kBinNameMplipa, &ipaCategory} +}; + +#ifdef ANDROID +const std::string kMapleDriverVersion = "MapleDriver " + std::to_string(Version::GetMajorVersion()) + "." + + std::to_string(Version::GetMinorVersion()) + " 20190929"; +#else +const std::string kMapleDriverVersion = "Maple Version : " + Version::GetVersionStr(); +#endif + +const std::vector kMapleCompilers = { "jbc2mpl", "hir2mpl", + "dex2mpl", "mplipa", "as", "ld", + "me", "mpl2mpl", "mplcg", "clang"}; + +ErrorCode MplOptions::Parse(int argc, char **argv) { + (void)maplecl::CommandLine::GetCommandLine().Parse(argc, argv); + exeFolder = FileUtils::GetFileFolder(FileUtils::GetExecutable()); + + // We should recognize O0, O2 and run options firstly to decide the real options + ErrorCode ret = HandleEarlyOptions(); + if (ret != kErrorNoError) { + return ret; + } + + /* Check whether the input files were valid */ + ret = CheckInputFiles(); + if (ret != kErrorNoError) { + return ret; + } + + // Decide runningExes for default options(O0, O2) by input files + if (runMode != RunMode::kCustomRun) { + ret = DecideRunningPhases(); + if (ret != kErrorNoError) { + return ret; + } + } else { // kCustomRun + /* kCustomRun run mode is set if --run=tool1:tool2 option is used. + * This Option is parsed on DecideRunType step. DecideRunType fills runningExes vector. + * DecideRunningPhases(runningExes) creates ActionsTree in kCustomRun mode. + * Maybe we can create Actions tree in DecideRunType in order to not use runningExes? + */ + ret = DecideRunningPhases(runningExes); + if (ret != kErrorNoError) { + return ret; + } + } + + ret = HandleOptions(); + if (ret != kErrorNoError) { + return ret; + } + + return ret; +} + +ErrorCode MplOptions::HandleOptions() { + if (opts::output.IsEnabledByUser() && GetActions().size() > 1) { + LogInfo::MapleLogger(kLlErr) << "Cannot specify -o when generating multiple output\n"; + return kErrorInvalidParameter; + } + + if (opts::saveTempOpt.IsEnabledByUser()) { + opts::genMeMpl.SetValue(true); + opts::genVtable.SetValue(true); + StringUtils::Split(opts::saveTempOpt, saveFiles, ','); + } + + if (opts::target.IsEnabledByUser()) { + Triple::GetTriple().Init(opts::target.GetValue()); + } else { + Triple::GetTriple().Init(); + } + + if (!opts::safeRegionOption) { + if (opts::npeNoCheck) { + npeCheckMode = SafetyCheckMode::kNoCheck; + } + + if (opts::npeStaticCheck) { + npeCheckMode = SafetyCheckMode::kStaticCheck; + } + + if (opts::boundaryNoCheck) { + boundaryCheckMode = SafetyCheckMode::kNoCheck; + } + + if (opts::boundaryStaticCheck) { + boundaryCheckMode = SafetyCheckMode::kStaticCheck; + } + } else { /* safeRegionOption is eanbled */ + npeCheckMode = SafetyCheckMode::kDynamicCheck; + boundaryCheckMode = SafetyCheckMode::kDynamicCheck; + } + + if (opts::npeDynamicCheck) { + npeCheckMode = SafetyCheckMode::kDynamicCheck; + } + + if (opts::npeDynamicCheckSilent) { + npeCheckMode = SafetyCheckMode::kDynamicCheckSilent; + } + + if (opts::boundaryDynamicCheck) { + boundaryCheckMode = SafetyCheckMode::kDynamicCheck; + } + + if (opts::boundaryDynamicCheckSilent) { + boundaryCheckMode = SafetyCheckMode::kDynamicCheckSilent; + } + + HandleExtraOptions(); + + return kErrorNoError; +} + +ErrorCode MplOptions::HandleEarlyOptions() { + if (opts::version) { + LogInfo::MapleLogger() << kMapleDriverVersion << "\n"; + + /* exit, if only one "version" option is set. Else: continue compilation */ + if (driverCategory.GetEnabledOptions().size() == 1) { + return kErrorExitHelp; + } + } + + if (opts::printDriverPhases) { + DumpActionTree(); + return kErrorExitHelp; + } + + if (opts::help.IsEnabledByUser()) { + if (auto it = exeCategories.find(opts::help.GetValue()); it != exeCategories.end()) { + maplecl::CommandLine::GetCommandLine().HelpPrinter(*it->second); + } else { + maple::LogInfo::MapleLogger() << "USAGE: maple [options]\n\n" + " Example 1: /maple --run=me:mpl2mpl:mplcg " + "--option=\"[MEOPT]:[MPL2MPLOPT]:[MPLCGOPT]\"\n" + " --mplt=MPLTPATH inputFile.mpl\n" + " Example 2: /maple -O2 --mplt=mpltPath inputFile.dex\n\n" + "==============================\n" + " Options:\n"; + maplecl::CommandLine::GetCommandLine().HelpPrinter(); + } + return kErrorExitHelp; + } + + if (opts::o0.IsEnabledByUser() || + opts::o1.IsEnabledByUser() || + opts::o2.IsEnabledByUser() || + opts::os.IsEnabledByUser()) { + if (opts::run.IsEnabledByUser()) { + /* -Ox and --run should not appear at the same time */ + LogInfo::MapleLogger(kLlErr) << "Cannot set auto mode and run mode at the same time!\n"; + return kErrorInvalidParameter; + } else { + runMode = RunMode::kAutoRun; + } + } else if (opts::run.IsEnabledByUser()) { + runMode = RunMode::kCustomRun; + + UpdateRunningExe(opts::run); + if (!opts::optionOpt.GetValue().empty()) { + if (UpdateExeOptions(opts::optionOpt) != kErrorNoError) { + return kErrorInvalidParameter; + } + } + } else { + runMode = RunMode::kAutoRun; + opts::o0.SetValue(true); // enable default -O0 + } + + return kErrorNoError; +} + +void MplOptions::HandleExtraOptions() { + for (const auto &val : opts::clangOpt.GetValues()) { + UpdateExeOptions(val, kBinNameClang); + } + + for (const auto &val : opts::hir2mplOpt.GetValues()) { + UpdateExeOptions(val, kBinNameCpp2mpl); + } + + for (const auto &val : opts::mpl2mplOpt.GetValues()) { + UpdateExeOptions(val, kBinNameMpl2mpl); + printExtraOptStr << " --mpl2mpl-opt=" << "\"" << val << "\""; + } + + for (const auto &val : opts::meOpt.GetValues()) { + UpdateExeOptions(val, kBinNameMe); + printExtraOptStr << " --me-opt=" << "\"" << val << "\""; + } + + for (const auto &val : opts::mplcgOpt.GetValues()) { + UpdateExeOptions(val, kBinNameMplcg); + printExtraOptStr << " --mplcg-opt=" << "\"" << val << "\""; + } + + for (const auto &val : opts::asOpt.GetValues()) { + UpdateExeOptions(val, kAsFlag); + } + + for (const auto &val : opts::ldOpt.GetValues()) { + UpdateExeOptions(val, kLdFlag); + } + + for (const auto &val : opts::dex2mplOpt.GetValues()) { + UpdateExeOptions(val, kBinNameDex2mpl); + } + + for (const auto &val : opts::jbc2mplOpt.GetValues()) { + UpdateExeOptions(val, kBinNameJbc2mpl); + } + + for (const auto &val : opts::mplipaOpt.GetValues()) { + UpdateExeOptions(val, kBinNameMplipa); + } + + // A workaround to pass --general-reg-only from the cg options to global options + auto it = exeOptions.find(kBinNameMplcg); + if (it != exeOptions.end()) { + for (auto &opt : std::as_const(it->second)) { + if (opt == "--general-reg-only") { + generalRegOnly = true; + break; + } + } + } +} + +std::unique_ptr MplOptions::DecideRunningPhasesByType(const InputInfo *const inputInfo, + bool isMultipleFiles) { + InputFileType inputFileType = inputInfo->GetInputFileType(); + std::unique_ptr currentAction = std::make_unique(kInputPhase, inputInfo); + std::unique_ptr newAction; + + bool isNeedMapleComb = true; + bool isNeedMplcg = true; + bool isNeedAs = true; + switch (inputFileType) { + case InputFileType::kFileTypeC: + case InputFileType::kFileTypeCpp: + UpdateRunningExe(kBinNameClang); + newAction = std::make_unique(kBinNameClang, inputInfo, currentAction); + currentAction = std::move(newAction); + [[clang::fallthrough]]; + case InputFileType::kFileTypeAst: + UpdateRunningExe(kBinNameCpp2mpl); + newAction = std::make_unique(kBinNameCpp2mpl, inputInfo, currentAction); + currentAction = std::move(newAction); + break; + case InputFileType::kFileTypeJar: + // fall-through + case InputFileType::kFileTypeClass: + UpdateRunningExe(kBinNameJbc2mpl); + newAction = std::make_unique(kBinNameJbc2mpl, inputInfo, currentAction); + currentAction = std::move(newAction); + isNeedAs = false; + break; + case InputFileType::kFileTypeDex: + UpdateRunningExe(kBinNameDex2mpl); + newAction = std::make_unique(kBinNameDex2mpl, inputInfo, currentAction); + currentAction = std::move(newAction); + isNeedAs = false; + break; + case InputFileType::kFileTypeMpl: + break; + case InputFileType::kFileTypeMeMpl: + case InputFileType::kFileTypeVtableImplMpl: + isNeedMapleComb = false; + break; + case InputFileType::kFileTypeS: + isNeedMplcg = false; + isNeedMapleComb = false; + break; + case InputFileType::kFileTypeBpl: + break; + case InputFileType::kFileTypeObj: + isNeedMplcg = false; + isNeedMapleComb = false; + isNeedAs = false; + break; + case InputFileType::kFileTypeNone: + return nullptr; + default: + return nullptr; + } + + if (opts::maplePhase == true) { + isNeedAs = false; + } + + if (isNeedMapleComb) { + if (isMultipleFiles) { + selectedExes.push_back(kBinNameMapleCombWrp); + newAction = std::make_unique(kBinNameMapleCombWrp, inputInfo, currentAction); + currentAction = std::move(newAction); + } else { + selectedExes.push_back(kBinNameMapleComb); + newAction = std::make_unique(kBinNameMapleComb, inputInfo, currentAction); + currentAction = std::move(newAction); + } + } + if (isNeedMplcg && !isMultipleFiles) { + selectedExes.push_back(kBinNameMplcg); + runningExes.push_back(kBinNameMplcg); + newAction = std::make_unique(kBinNameMplcg, inputInfo, currentAction); + currentAction = std::move(newAction); + } + + if (isNeedAs) { + UpdateRunningExe(kAsFlag); + newAction = std::make_unique(kAsFlag, inputInfo, currentAction); + currentAction = std::move(newAction); + } + + if (!opts::compileWOLink) { + UpdateRunningExe(kLdFlag); + /* "Linking step" Action can have several inputActions. + * Each inputAction links to previous Actions to create the action tree. + * For linking step, inputActions are all assembly actions. + * Linking step Action is created outside this function because + * we must create all assembly actions (for all input files) before. + */ + } + + return currentAction; +} + +ErrorCode MplOptions::DecideRunningPhases() { + ErrorCode ret = kErrorNoError; + std::vector> linkActions; + std::unique_ptr lastAction; + + bool isMultipleFiles = (inputInfos.size() > 1); + + for (auto &inputInfo : inputInfos) { + CHECK_FATAL(inputInfo != nullptr, "InputInfo must be created!!"); + + lastAction = DecideRunningPhasesByType(inputInfo.get(), isMultipleFiles); + + /* Add a message interface for correct exit with compilation error. And use it here instead of CHECK_FATAL. */ + CHECK_FATAL(lastAction != nullptr, "Incorrect input file type: %s", + inputInfo->GetInputFile().c_str()); + + if ((lastAction->GetTool() == kAsFlag && !opts::compileWOLink) || + lastAction->GetTool() == kInputPhase) { + /* 1. For linking step, inputActions are all assembly actions; + * 2. If we try to link with maple driver, inputActions are all kInputPhase objects; + */ + linkActions.push_back(std::move(lastAction)); + } else { + rootActions.push_back(std::move(lastAction)); + } + } + + if (!linkActions.empty()) { + /* "a.out" is the default output file name - fix if it's needed */ + auto currentAction = std::make_unique(kLdFlag, linkActions, AllocateInputInfo("a.out")); + rootActions.push_back(std::move(currentAction)); + } + + return ret; +} + +ErrorCode MplOptions::MFCreateActionByExe(const std::string &exe, std::unique_ptr ¤tAction, + const InputInfo *const inputInfo, bool &wasWrpCombCompilerCreated) const { + ErrorCode ret = kErrorNoError; + + if (exe == kBinNameMe || exe == kBinNameMpl2mpl || exe == kBinNameMplcg) { + if (!wasWrpCombCompilerCreated) { + auto newAction = std::make_unique(kBinNameMapleCombWrp, inputInfo, currentAction); + currentAction = std::move(newAction); + wasWrpCombCompilerCreated = true; + } else { + return ret; + } + } + + else { + auto newAction = std::make_unique(exe, inputInfo, currentAction); + currentAction = std::move(newAction); + } + + return ret; +} + +ErrorCode MplOptions::SFCreateActionByExe(const std::string &exe, std::unique_ptr ¤tAction, + const InputInfo *const inputInfo, bool &isCombCompiler) const { + ErrorCode ret = kErrorNoError; + + if (exe == kBinNameMe || exe == kBinNameMpl2mpl) { + if (!isCombCompiler) { + auto newAction = std::make_unique(kBinNameMapleComb, inputInfo, currentAction); + currentAction = std::move(newAction); + isCombCompiler = true; + } else { + return ret; + } + } + + else { + auto newAction = std::make_unique(exe, inputInfo, currentAction); + currentAction = std::move(newAction); + } + + return ret; +} + +ErrorCode MplOptions::DecideRunningPhases(const std::vector &runExes) { + ErrorCode ret = kErrorNoError; + + bool isMultipleFiles = (inputInfos.size() > 1); + + for (auto &inputInfo : inputInfos) { + CHECK_FATAL(inputInfo != nullptr, "InputInfo must be created!!"); + /* MplOption is owner of all InputInfos. MplOption is alive during compilation, + * so we can use raw pointer inside an Action. + */ + const InputInfo *const rawInputInfo = inputInfo.get(); + + bool isCombCompiler = false; + bool wasWrpCombCompilerCreated = false; + + auto currentAction = std::make_unique(kInputPhase, inputInfo.get()); + + for (const auto &exe : runExes) { + if (isMultipleFiles) { + ret = MFCreateActionByExe(exe, currentAction, rawInputInfo, wasWrpCombCompilerCreated); + if (ret != kErrorNoError) { + return ret; + } + } else { + ret = SFCreateActionByExe(exe, currentAction, rawInputInfo, isCombCompiler); + if (ret != kErrorNoError) { + return ret; + } + } + } + + rootActions.push_back(std::move(currentAction)); + } + + return ret; +} + +void MplOptions::DumpActionTree() const { + for (auto &rNode : rootActions) { + DumpActionTree(*rNode, 0); + } +} + +void MplOptions::DumpActionTree(const Action &action, int indents) const { + for (const std::unique_ptr &a : action.GetInputActions()) { + DumpActionTree(*a, indents + 1); + } + + if (indents != 0) { + LogInfo::MapleLogger() << "|"; + /* print indents */ + for (int i = 0; i < indents; ++i) { + LogInfo::MapleLogger() << "-"; + } + } + + if (action.GetTool() == kInputPhase) { + LogInfo::MapleLogger() << action.GetTool() << " " << action.GetInputFile() << '\n'; + } else { + LogInfo::MapleLogger() << action.GetTool() << '\n'; + } +} + +std::string MplOptions::GetCommonOptionsStr() const { + std::string driverOptions; + static const std::vector extraExclude = { + &opts::run, &opts::optionOpt, &opts::infile, &opts::mpl2mplOpt, &opts::meOpt,&opts::mplcgOpt, + &opts::o0, &opts::o1, &opts::o2, &opts::os + }; + + for (auto const &opt : driverCategory.GetEnabledOptions()) { + if (!(std::find(std::begin(extraExclude), std::end(extraExclude), opt) != std::end(extraExclude))) { + for (const auto &val : opt->GetRawValues()) { + if (!val.empty()) { + driverOptions += opt->GetName() + " " + val + " "; + } else { + driverOptions += opt->GetName() + " "; + } + } + } + } + + return driverOptions; +} + +InputInfo *MplOptions::AllocateInputInfo(const std::string &inputFile) { + auto inputInfo = std::make_unique(inputFile); + InputInfo *ret = inputInfo.get(); + + inputInfos.push_back(std::move(inputInfo)); + + /* inputInfo continue to exist in inputInfos vector of unique_ptr so we can return raw pointer */ + return ret; +} + +ErrorCode MplOptions::CheckInputFiles() { + auto &badArgs = maplecl::CommandLine::GetCommandLine().badCLArgs; + + /* Set input files with --infile="file1 file2" option */ + if (opts::infile.IsEnabledByUser()) { + if (StringUtils::Trim(opts::infile).empty()) { + return kErrorFileNotFound; + } + + std::vector splitsInputFiles; + StringUtils::Split(opts::infile, splitsInputFiles, ','); + + /* inputInfo describes each input file for driver */ + for (auto &inFile : splitsInputFiles) { + if (FileUtils::IsFileExists(inFile)) { + inputFiles.push_back(inFile); + inputInfos.push_back(std::make_unique(inFile)); + } else { + LogInfo::MapleLogger(kLlErr) << "File does not exist: " << inFile << "\n"; + return kErrorFileNotFound; + } + } + } + + /* Set input files directly: maple file1 file2 */ + for (auto &arg : badArgs) { + if (FileUtils::IsFileExists(arg.first)) { + inputFiles.push_back(arg.first); + inputInfos.push_back(std::make_unique(arg.first)); + } else { + LogInfo::MapleLogger(kLlErr) << "Unknown option or non-existent input file: " << arg.first << "\n"; + if (!opts::ignoreUnkOpt) { + return kErrorInvalidParameter; + } + } + } + + if (inputFiles.empty()) { + return kErrorFileNotFound; + } + + return kErrorNoError; +} + +ErrorCode MplOptions::AppendCombOptions(MIRSrcLang srcLang) { + ErrorCode ret = kErrorNoError; + if (runMode == RunMode::kCustomRun) { + return ret; + } + + if (opts::o0) { + ret = AppendDefaultOptions(kBinNameMe, kMeDefaultOptionsO0, sizeof(kMeDefaultOptionsO0) / sizeof(MplOption)); + if (ret != kErrorNoError) { + return ret; + } + if (srcLang != kSrcLangC) { + ret = AppendDefaultOptions(kBinNameMpl2mpl, kMpl2MplDefaultOptionsO0, + sizeof(kMpl2MplDefaultOptionsO0) / sizeof(MplOption)); + } else { + ret = AppendDefaultOptions(kBinNameMpl2mpl, kMpl2MplDefaultOptionsO0ForC, + sizeof(kMpl2MplDefaultOptionsO0ForC) / sizeof(MplOption)); + } + } else if (opts::o2) { + if (opts::withIpa) { + UpdateRunningExe(kBinNameMplipa); + } + if (srcLang != kSrcLangC) { + ret = AppendDefaultOptions(kBinNameMe, kMeDefaultOptionsO2, + sizeof(kMeDefaultOptionsO2) / sizeof(MplOption)); + if (ret != kErrorNoError) { + return ret; + } + ret = AppendDefaultOptions(kBinNameMpl2mpl, kMpl2MplDefaultOptionsO2, + sizeof(kMpl2MplDefaultOptionsO2) / sizeof(MplOption)); + } else { + ret = AppendDefaultOptions(kBinNameMe, kMeDefaultOptionsO2ForC, + sizeof(kMeDefaultOptionsO2ForC) / sizeof(MplOption)); + if (ret != kErrorNoError) { + return ret; + } + ret = AppendDefaultOptions(kBinNameMpl2mpl, kMpl2MplDefaultOptionsO2ForC, + sizeof(kMpl2MplDefaultOptionsO2ForC) / sizeof(MplOption)); + } + } else if (opts::os) { + if (srcLang == kSrcLangJava) { + return kErrorNotImplement; + } + ret = AppendDefaultOptions(kBinNameMe, kMeDefaultOptionsOs, + sizeof(kMeDefaultOptionsOs) / sizeof(MplOption)); + if (ret != kErrorNoError) { + return ret; + } + ret = AppendDefaultOptions(kBinNameMpl2mpl, kMpl2MplDefaultOptionsOs, + sizeof(kMpl2MplDefaultOptionsOs) / sizeof(MplOption)); + } + + if (ret != kErrorNoError) { + return ret; + } + + return ret; +} + +ErrorCode MplOptions::AppendMplcgOptions(MIRSrcLang srcLang) { + ErrorCode ret = kErrorNoError; + if (runMode == RunMode::kCustomRun) { + return ret; + } + if (opts::o0) { + if (srcLang != kSrcLangC) { + ret = AppendDefaultOptions(kBinNameMplcg, kMplcgDefaultOptionsO0, + sizeof(kMplcgDefaultOptionsO0) / sizeof(MplOption)); + } else { + ret = AppendDefaultOptions(kBinNameMplcg, kMplcgDefaultOptionsO0ForC, + sizeof(kMplcgDefaultOptionsO0ForC) / sizeof(MplOption)); + } + } else if (opts::o2) { + if (srcLang != kSrcLangC) { + ret = AppendDefaultOptions(kBinNameMplcg, kMplcgDefaultOptionsO2, + sizeof(kMplcgDefaultOptionsO2) / sizeof(MplOption)); + } else { + ret = AppendDefaultOptions(kBinNameMplcg, kMplcgDefaultOptionsO2ForC, + sizeof(kMplcgDefaultOptionsO2ForC) / sizeof(MplOption)); + } + } else if (opts::os) { + if (srcLang == kSrcLangJava) { + return kErrorNotImplement; + } + ret = AppendDefaultOptions(kBinNameMplcg, kMplcgDefaultOptionsOs, + sizeof(kMplcgDefaultOptionsOs) / sizeof(MplOption)); + } + + if (ret != kErrorNoError) { + return ret; + } + + return ret; +} + +void MplOptions::DumpAppendedOptions(const std::string &exeName, + const MplOption mplOptions[], unsigned int length) const { + LogInfo::MapleLogger() << exeName << " Default Options: "; + for (size_t i = 0; i < length; ++i) { + LogInfo::MapleLogger() << mplOptions[i].GetKey() << " " + << mplOptions[i].GetValue() << " "; + } + LogInfo::MapleLogger() << "\n"; + + LogInfo::MapleLogger() << exeName << " Extra Options: "; + auto it = exeOptions.find(exeName); + if (it != exeOptions.end()) { + for (auto &opt : it->second) { + LogInfo::MapleLogger() << opt << " "; + } + } + + LogInfo::MapleLogger() << "\n"; +} + +ErrorCode MplOptions::AppendDefaultOptions(const std::string &exeName, + MplOption mplOptions[], unsigned int length) { + if (opts::debug) { + DumpAppendedOptions(exeName, mplOptions, length); + } + + for (unsigned int i = 0; i < length; ++i) { + mplOptions[i].SetValue(FileUtils::AppendMapleRootIfNeeded(mplOptions[i].GetNeedRootPath(), + mplOptions[i].GetValue(), GetExeFolder())); + auto &key = mplOptions[i].GetKey(); + auto &val = mplOptions[i].GetValue(); + + if (!val.empty()) { + exeOptions[exeName].push_front(val); + } + if (!key.empty()) { + exeOptions[exeName].push_front(key); + } + } + + auto iter = std::find(runningExes.begin(), runningExes.end(), exeName); + if (iter == runningExes.end()) { + runningExes.push_back(exeName); + } + return kErrorNoError; +} + +void MplOptions::UpdateExeOptions(const std::string &options, const std::string &tool) { + std::vector splittedOptions; + StringUtils::Split(options, splittedOptions, ' '); + + auto &toolOptions = exeOptions[tool]; // generate empty entry, if it does not exist + for (auto &opt : splittedOptions) { + if (!opt.empty()) { + toolOptions.push_back(opt); + } + } +} + +ErrorCode MplOptions::UpdateExeOptions(const std::string &args) { + std::vector options; + StringUtils::Split(args, options, ':'); + + /* The number of a tools and options for them must be the same */ + if (options.size() != runningExes.size()) { + LogInfo::MapleLogger(kLlErr) << "The --run and --option are not matched, please check them." + << "(Too many or too few)\n"; + return kErrorInvalidParameter; + } + + auto tool = runningExes.begin(); + for (auto &opt : options) { + UpdateExeOptions(opt, *tool); + ++tool; + } + + return kErrorNoError; +} + +maplecl::OptionCategory *MplOptions::GetCategory(const std::string &tool) const { + auto it = exeCategories.find(tool); + if (it == exeCategories.end()) { + return nullptr; + } + + return it->second; +} + +void MplOptions::UpdateRunningExe(const std::string &args) { + std::vector results; + StringUtils::Split(args, results, ':'); + for (size_t i = 0; i < results.size(); ++i) { + auto iter = std::find(runningExes.begin(), runningExes.end(), results[i]); + if (iter == runningExes.end()) { + runningExes.push_back(results[i]); + selectedExes.push_back(results[i]); + } + } +} + +std::string MplOptions::GetInputFileNameForPrint(const Action * const action) const { + auto genInputs = [](const auto &container) { + std::string inputs; + for (const auto &in : container) { + inputs += " " + in; + } + return inputs; + }; + + if (!runningExes.empty()) { + if (runningExes[0] == kBinNameMe || runningExes[0] == kBinNameMpl2mpl || + runningExes[0] == kBinNameMplcg) { + return genInputs(GetInputFiles()); + } + } + + if (action == nullptr) { + return genInputs(GetInputFiles()); + } + + if (action->GetInputFileType() == InputFileType::kFileTypeVtableImplMpl) { + return action->GetFullOutputName() + ".VtableImpl.mpl"; + } + if (action->GetInputFileType() == InputFileType::kFileTypeBpl) { + return action->GetFullOutputName() + ".bpl"; + } + if (action->GetInputFileType() == InputFileType::kFileTypeMbc) { + return action->GetFullOutputName() + ".mbc"; + } + if (action->GetInputFileType() == InputFileType::kFileTypeLmbc) { + return action->GetFullOutputName() + ".lmbc"; + } + return action->GetFullOutputName() + ".mpl"; +} + +void MplOptions::PrintCommand(const Action * const action) { + if (hasPrinted) { + return; + } + + std::ostringstream optionStr; + if (runMode == RunMode::kAutoRun) { + if (opts::o0) { + optionStr << " -O0"; + } else if (opts::o1) { + optionStr << " -O1"; + } else if (opts::o2) { + optionStr << " -O2"; + } else if (opts::os) { + optionStr << " -Os"; + } + + std::string driverOptions = GetCommonOptionsStr(); + auto inputs = GetInputFileNameForPrint(action); + LogInfo::MapleLogger() << "Starting:" << exeFolder << "maple" << optionStr.str() + << printExtraOptStr.str() << " " << driverOptions << inputs << '\n'; + } + if (runMode == RunMode::kCustomRun) { + PrintDetailCommand(action, true); + } + hasPrinted = true; +} + +void MplOptions::ConnectOptStr(std::string &optionStr, const std::string &exeName, bool &firstComb, + std::string &runStr) { + std::string connectSym = ""; + if (exeOptions.find(exeName) != exeOptions.end()) { + if (!firstComb) { + runStr += (":" + exeName); + optionStr += ":"; + } else { + runStr += exeName; + firstComb = false; + } + auto it = exeOptions.find(exeName); + for (auto &opt : std::as_const(it->second)) { + optionStr += (" " + opt); + } + } +} + +void MplOptions::PrintDetailCommand(const Action * const action, bool isBeforeParse) { + if (exeOptions.find(kBinNameMe) == exeOptions.end() && exeOptions.find(kBinNameMpl2mpl) == exeOptions.end() && + exeOptions.find(kBinNameMplcg) == exeOptions.end()) { + return; + } + std::string runStr = "--run="; + std::string optionStr; + optionStr += "--option=\""; + bool firstComb = true; + ConnectOptStr(optionStr, kBinNameMe, firstComb, runStr); + ConnectOptStr(optionStr, kBinNameMpl2mpl, firstComb, runStr); + ConnectOptStr(optionStr, kBinNameMplcg, firstComb, runStr); + optionStr += "\""; + + std::string driverOptions = GetCommonOptionsStr(); + auto inputs = GetInputFileNameForPrint(action); + + if (isBeforeParse) { + LogInfo::MapleLogger() << "Starting:" << exeFolder << "maple " << runStr << " " << optionStr << " " + << printExtraOptStr.str() << " " << driverOptions << inputs << '\n'; + } else { + LogInfo::MapleLogger() << "Finished:" << exeFolder << "maple " << runStr << " " << optionStr << " " + << driverOptions << inputs << '\n'; + } +} +} // namespace maple diff --git a/src/mapleall/maple_driver/src/mplcg_compiler.cpp b/src/mapleall/maple_driver/src/mplcg_compiler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..61578109cefaa0d084db5307b2401c00e9b527ee --- /dev/null +++ b/src/mapleall/maple_driver/src/mplcg_compiler.cpp @@ -0,0 +1,232 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "compiler.h" +#include "driver_options.h" +#include "default_options.def" +#include "mpl_logging.h" +#include "mpl_timer.h" +#include "driver_runner.h" + +namespace maple { +using namespace maplebe; + +DefaultOption MplcgCompiler::GetDefaultOptions(const MplOptions &options, + const Action &action [[maybe_unused]]) const { + uint32_t len = 0; + MplOption *kMplcgDefaultOptions = nullptr; + + if (opts::o0) { + len = sizeof(kMplcgDefaultOptionsO0) / sizeof(MplOption); + kMplcgDefaultOptions = kMplcgDefaultOptionsO0; + } else if (opts::o2) { + len = sizeof(kMplcgDefaultOptionsO2) / sizeof(MplOption); + kMplcgDefaultOptions = kMplcgDefaultOptionsO2; + } + + if (kMplcgDefaultOptions == nullptr) { + return DefaultOption(); + } + + DefaultOption defaultOptions = { std::make_unique(len), len }; + for (uint32_t i = 0; i < len; ++i) { + defaultOptions.mplOptions[i] = kMplcgDefaultOptions[i]; + } + + for (uint32_t i = 0; i < defaultOptions.length; ++i) { + defaultOptions.mplOptions[i].SetValue( + FileUtils::AppendMapleRootIfNeeded(defaultOptions.mplOptions[i].GetNeedRootPath(), + defaultOptions.mplOptions[i].GetValue(), + options.GetExeFolder())); + } + return defaultOptions; +} + +const std::string &MplcgCompiler::GetBinName() const { + return kBinNameMplcg; +} + +std::string MplcgCompiler::GetInputFile(const MplOptions &options [[maybe_unused]], const Action &action, + const MIRModule *md) const { + if (action.IsItFirstRealAction()) { + return action.GetInputFile(); + } + // Get base file name + auto idx = action.GetOutputName().find(".VtableImpl"); + std::string outputName = action.GetOutputName(); + if (idx != std::string::npos) { + outputName = action.GetOutputName().substr(0, idx); + } + if (md != nullptr && md->GetSrcLang() == kSrcLangC) { + return action.GetOutputFolder() + outputName + ".me.mpl"; + } + return action.GetOutputFolder() + outputName + ".VtableImpl.mpl"; +} + +void MplcgCompiler::SetOutputFileName(const MplOptions &options, const Action &action, const MIRModule &md) { + if (md.GetSrcLang() == kSrcLangC) { + baseName = action.GetFullOutputName(); + } else { + baseName = action.GetOutputFolder() + FileUtils::GetFileName(GetInputFile(options, action, &md), false); + } + outputFile = baseName + ".s"; +} + +void MplcgCompiler::PrintMplcgCommand(const MplOptions &options, const Action &action, + const MIRModule &md) const { + std::string runStr = "--run="; + std::string optionStr = "--option=\""; + std::string connectSym = ""; + if (options.GetExeOptions().find(kBinNameMplcg) != options.GetExeOptions().end()) { + runStr += "mplcg"; + auto it = options.GetExeOptions().find(kBinNameMplcg); + if (it == options.GetExeOptions().end()) { + return; + } + for (auto &opt : it->second) { + optionStr += (" " + opt); + } + } + optionStr += "\""; + + std::string driverOptions = options.GetCommonOptionsStr(); + + LogInfo::MapleLogger() << "Starting:" << options.GetExeFolder() << "maple " << runStr << " " << optionStr << " " + << driverOptions << "--infile " << GetInputFile(options, action, &md) << '\n'; +} + +ErrorCode MplcgCompiler::MakeCGOptions(const MplOptions &options) const { + auto it = std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), kBinNameMplcg); + if (it == options.GetRunningExes().end()) { + return kErrorNoError; + } + CGOptions &cgOption = CGOptions::GetInstance(); + cgOption.SetOption(CGOptions::kDefaultOptions); +#if DEBUG + /* for convinence .loc is generated by default for debug maple compiler */ + cgOption.SetOption(CGOptions::kWithLoc); +#endif + /* use maple flags to set cg flags */ + if (opts::withDwarf) { + cgOption.SetOption(CGOptions::kWithDwarf); +#if DEBUG + cgOption.SetOption(CGOptions::kVerboseAsm); +#endif + } + cgOption.SetGenerateFlags(CGOptions::kDefaultGflags); + + auto itOpt = options.GetExeOptions().find(kBinNameMplcg); + if (itOpt != options.GetExeOptions().end()) { + const auto &cgExeOpts = itOpt->second; + const std::deque strCgOptions(cgExeOpts.begin(), cgExeOpts.end()); + (void)maplecl::CommandLine::GetCommandLine().HandleInputArgs(strCgOptions, cgCategory); + } + + bool result = cgOption.SolveOptions(opts::debug); + if (!result) { + LogInfo::MapleLogger() << "Meet error mplcg options\n"; + return kErrorCompileFail; + } + return kErrorNoError; +} + +ErrorCode MplcgCompiler::GetMplcgOptions(MplOptions &options, const Action &action, + const MIRModule *theModule) const { + ErrorCode ret; + if (options.GetRunMode() == kAutoRun) { + if (theModule == nullptr) { + std::string fileName = GetInputFile(options, action, theModule); + MIRModule module(fileName); + std::unique_ptr theParser; + theParser.reset(new MIRParser(module)); + MIRSrcLang srcLang = kSrcLangUnknown; + bool parsed = theParser->ParseSrcLang(srcLang); + if (!parsed) { + return kErrorCompileFail; + } + ret = options.AppendMplcgOptions(srcLang); + if (ret != kErrorNoError) { + return kErrorCompileFail; + } + } else { + ret = options.AppendMplcgOptions(theModule->GetSrcLang()); + if (ret != kErrorNoError) { + return kErrorCompileFail; + } + } + } + + ret = MakeCGOptions(options); + return ret; +} + +ErrorCode MplcgCompiler::Compile(MplOptions &options, const Action &action, + std::unique_ptr &theModule) { + ErrorCode ret = GetMplcgOptions(options, action, theModule.get()); + if (ret != kErrorNoError) { + return kErrorCompileFail; + } + CGOptions &cgOption = CGOptions::GetInstance(); + std::string fileName = GetInputFile(options, action, theModule.get()); + bool fileRead = true; + if (theModule == nullptr) { + MPLTimer timer; + timer.Start(); + fileRead = false; + theModule = std::make_unique(fileName); + theModule->SetWithMe( + std::find(options.GetRunningExes().begin(), options.GetRunningExes().end(), + kBinNameMe) != options.GetRunningExes().end()); + if (action.GetInputFileType() != kFileTypeBpl && + action.GetInputFileType() != kFileTypeMbc && + action.GetInputFileType() != kFileTypeLmbc) { + std::unique_ptr theParser; + theParser.reset(new MIRParser(*theModule)); + bool parsed = theParser->ParseMIR(0, cgOption.GetParserOption()); + if (parsed) { + if (!CGOptions::IsQuiet() && (theParser->GetWarning().size() != 0)) { + theParser->EmitWarning(fileName); + } + } else { + if (theParser != nullptr) { + theParser->EmitError(fileName); + } + return kErrorCompileFail; + } + } else { + BinaryMplImport binMplt(*theModule); + binMplt.SetImported(false); + std::string modid = theModule->GetFileName(); + bool imported = binMplt.Import(modid, true); + if (!imported) { + return kErrorCompileFail; + } + } + timer.Stop(); + LogInfo::MapleLogger() << "Mplcg Parser consumed " << timer.ElapsedMilliseconds() << "ms\n"; + } + SetOutputFileName(options, action, *theModule); + theModule->SetInputFileName(fileName); + LogInfo::MapleLogger() << "Starting mplcg\n"; + DriverRunner runner(theModule.get(), options.GetSelectedExes(), action.GetInputFileType(), fileName, + opts::withDwarf, fileRead, opts::timePhase); + if (opts::debug) { + PrintMplcgCommand(options, action, *theModule); + } + runner.SetPrintOutExe(kBinNameMplcg); + runner.SetCGInfo(&cgOption, fileName); + runner.ProcessCGPhase(outputFile, baseName); + return kErrorNoError; +} +} // namespace maple diff --git a/src/mapleall/maple_driver/src/triple.cpp b/src/mapleall/maple_driver/src/triple.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fe206a8b4fd304141d0d3c4dee50fe9457d6f919 --- /dev/null +++ b/src/mapleall/maple_driver/src/triple.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "triple.h" +#include "driver_options.h" + +namespace opts { + +maplecl::Option bigendian({"-Be", "--Be", "--BigEndian", "-be", "--be", "-mbig-endian"}, + " --BigEndian/-Be \tUsing BigEndian\n" + " --no-BigEndian \tUsing LittleEndian\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}, + maplecl::DisableWith("--no-BigEndian")); + +maplecl::Option ilp32({"--ilp32", "-ilp32", "--arm64-ilp32"}, + " --ilp32 \tarm64 with a 32-bit ABI instead of a 64bit ABI\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); + +maplecl::Option mabi({"-mabi"}, + " -mabi= \tSpecify integer and floating-point calling convention\n", + {driverCategory, hir2mplCategory, dex2mplCategory, ipaCategory}); + +} + +namespace maple { + +Triple::ArchType Triple::ParseArch(std::string_view archStr) const { + if (maple::utils::Contains({"aarch64", "aarch64_le"}, archStr)) { + return Triple::ArchType::aarch64; + } else if (maple::utils::Contains({"aarch64_be"}, archStr)) { + return Triple::ArchType::aarch64_be; + } + + // Currently Triple support only aarch64 + return Triple::UnknownArch; +} + +Triple::EnvironmentType Triple::ParseEnvironment(std::string_view archStr) const { + if (maple::utils::Contains({"ilp32", "gnu_ilp32", "gnuilp32"}, archStr)) { + return Triple::EnvironmentType::GNUILP32; + } else if (maple::utils::Contains({"gnu"}, archStr)) { + return Triple::EnvironmentType::GNU; + } + + // Currently Triple support only ilp32 and default gnu/LP64 ABI + return Triple::UnknownEnvironment; +} + +void Triple::Init() { + /* Currently Triple is used only to configure aarch64: be/le, ILP32/LP64 + * Other architectures (TARGX86_64, TARGX86, TARGARM32, TARGVM) are configured with compiler build config */ +#if TARGAARCH64 + arch = (opts::bigendian) ? Triple::ArchType::aarch64_be : Triple::ArchType::aarch64; + environment = (opts::ilp32) ? Triple::EnvironmentType::GNUILP32 : Triple::EnvironmentType::GNU; + + if (opts::mabi.IsEnabledByUser()) { + auto tmpEnvironment = ParseEnvironment(opts::mabi.GetValue()); + if (tmpEnvironment != Triple::UnknownEnvironment) { + environment = tmpEnvironment; + } + } +#endif +} + +void Triple::Init(const std::string &target) { + data = target; + + /* Currently Triple is used only to configure aarch64: be/le, ILP32/LP64. + * Other architectures (TARGX86_64, TARGX86, TARGARM32, TARGVM) are configured with compiler build config */ +#if TARGAARCH64 + Init(); + + std::vector components; + maple::StringUtils::SplitSV(data, components, '-'); + if (components.size() == 0) { // as minimum 1 component must be + return; + } + + auto tmpArch = ParseArch(components[0]); // to not overwrite arch seting by opts::bigendian + if (tmpArch == Triple::UnknownArch) { + return; + } + arch = tmpArch; + + /* Try to check environment in option. + * As example, it can be: aarch64-none-linux-gnu or aarch64-linux-gnu or aarch64-gnu, where gnu is environment */ + for (int i = 1; i < components.size(); ++i) { + auto tmpEnvironment = ParseEnvironment(components[i]); + if (tmpEnvironment != Triple::UnknownEnvironment) { + environment = tmpEnvironment; + break; + } + } +#endif +} + +std::string Triple::GetArchName() const { + switch (arch) { + case ArchType::aarch64_be: return "aarch64_be"; + case ArchType::aarch64: return "aarch64"; + default: ASSERT(false, "Unknown Architecture Type\n"); + } + return ""; +} + +std::string Triple::GetEnvironmentName() const { + switch (environment) { + case EnvironmentType::GNUILP32: return "gnu_ilp32"; + case EnvironmentType::GNU: return "gnu"; + default: ASSERT(false, "Unknown Environment Type\n"); + } + return ""; +} + +std::string Triple::Str() const { + if (!data.empty()) { + return data; + } + + if (GetArch() != ArchType::UnknownArch && + GetEnvironment() != Triple::EnvironmentType::UnknownEnvironment) { + /* only linux platform is supported, so "-linux-" is hardcoded */ + return GetArchName() + "-linux-" + GetEnvironmentName(); + } + + CHECK_FATAL(false, "Only aarch64/aarch64_be GNU/GNUILP32 targets are supported\n"); + return data; +} + +} // namespace maple diff --git a/src/mapleall/maple_ipa/BUILD.gn b/src/mapleall/maple_ipa/BUILD.gn new file mode 100755 index 0000000000000000000000000000000000000000..bc10ad127092957797d10efd3684f245f269764b --- /dev/null +++ b/src/mapleall/maple_ipa/BUILD.gn @@ -0,0 +1,48 @@ +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +include_directories = [ + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/mempool/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/maple_phase/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", +] + +src_libmplipa = [ + "src/old/ipa_escape_analysis.cpp", + "src/old/do_ipa_escape_analysis.cpp", + "src/old/ea_connection_graph.cpp", + "src/old/ipa_option.cpp", + "src/ipa_side_effect.cpp", + "src/ipa_phase_manager.cpp", + "src/prop_return_null.cpp", + "src/prop_parameter_type.cpp", + "src/ipa_collect.cpp", + "src/ipa_clone.cpp", + "src/region_identify.cpp", +] + +configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + +static_library("libmplipa") { + sources = src_libmplipa + include_dirs = include_directories + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" +} diff --git a/src/mapleall/maple_ipa/CMakeLists.txt b/src/mapleall/maple_ipa/CMakeLists.txt new file mode 100755 index 0000000000000000000000000000000000000000..8737cfad45938602d35aaeefda5f3e6ab83f7fc7 --- /dev/null +++ b/src/mapleall/maple_ipa/CMakeLists.txt @@ -0,0 +1,46 @@ +# +# Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +set(src_libmplipa + src/old/ipa_escape_analysis.cpp + src/old/do_ipa_escape_analysis.cpp + src/old/ea_connection_graph.cpp + src/old/ipa_option.cpp + src/ipa_side_effect.cpp + src/ipa_phase_manager.cpp + src/prop_return_null.cpp + src/prop_parameter_type.cpp + src/ipa_collect.cpp + src/ipa_clone.cpp + src/region_identify.cpp +) + +add_library(libmplipa STATIC ${src_libmplipa}) +target_include_directories(libmplipa PRIVATE + ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/mempool/include + ${MAPLEALL_ROOT}/maple_util/include + ${MAPLEALL_ROOT}/maple_me/include + ${MAPLEALL_ROOT}/maple_ipa/include + ${MAPLEALL_ROOT}/maple_ipa/include/old + ${MAPLEALL_ROOT}/mpl2mpl/include + ${MAPLEALL_ROOT}/maple_phase/include + ${MAPLEALL_ROOT}/maple_driver/include + ${THIRD_PARTY_ROOT}/bounds_checking_function/include +) + +set_target_properties(libmplipa PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${MAPLE_BUILD_OUTPUT}/lib/${HOST_ARCH} +) \ No newline at end of file diff --git a/src/mapleall/maple_ipa/include/func_desc.def b/src/mapleall/maple_ipa/include/func_desc.def new file mode 100644 index 0000000000000000000000000000000000000000..14c3641f3f165284f8d3dcc99f003d9880eb5b61 --- /dev/null +++ b/src/mapleall/maple_ipa/include/func_desc.def @@ -0,0 +1,539 @@ + /* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +//assert.h +{"assert", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"static_assert", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, + +//fenv.h +{"feclearexcept", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fetestexcept", {FI::kPure, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"feraiseexcept", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fegetexceptflag", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"fesetexceptflag", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"fesetround", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fegetround", {FI::kPure, RI::kNoAlias, {}}}, +{"fegetenv", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"fesetenv", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"feholdexcept", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"feupdateenv", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, + +//ctype.h +{"isalnum", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isalpha", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"islower", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isupper", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isdigit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isxdigit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iscntrl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isgraph", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isspace", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isblank", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isprint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ispunct", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tolower", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"toupper", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atof", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"atoi", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"atol", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"atoll", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"strtol", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strtoll", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strtoul", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strtoull", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strtof", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"strtod", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"strtold", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"strtoimax", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strtoumax", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"strcpy", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"strcpy_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"strncpy", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strncpy_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strcat", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"strcat_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"strncat", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strncat_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strxfrm", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strdup", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"strndup", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strlen", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"strnlen_s", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strcmp", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strncmp", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strcoll", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strchr", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strrchr", {FI::kPure, RI::kAliasParam0, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"strspn", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strcspn", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strpbrk", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strstr", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strtok", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"strtok_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"memchr", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"memcmp", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"memset", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"memset_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"memcpy", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"memcpy_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"memmove", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"memmove_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"memccpy", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"strerror", {FI::kPure, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"strerror_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"strerrorlen_s", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, + +//complex.h +{"CMPLXF", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"CMPLX", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"CMPLXL", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"crealf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"creal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"creall", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cimagf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cimag", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cimagl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cabsf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cabs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cabsl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cargf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"carg", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cargl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"conjf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"conj", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"conjl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cprojf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cproj", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cprojl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"crealf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"creal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"creall", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cexpf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cexp", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cexpl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"clogf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"clog", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"clogl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cpowf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"cpow", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"cpowl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"csqrtf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csqrt", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csqrtl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csinf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csin", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csinl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccosf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccos", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccosl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctanf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctan", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctanl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casinf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casin", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casinl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacosf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacos", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacosl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catanf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catan", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catanl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csinhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csinh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"csinhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccoshf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccosh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ccoshl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctanhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctanh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ctanhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casinhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casinh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"casinhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacoshf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacosh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cacoshl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catanhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catanh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"catanhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, + +//local.h +{"setlocale", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"localeconv", {FI::kUnknown, RI::kNoAlias, {}}}, + +//math.h +{"abs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"labs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llabs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"imaxabs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"div", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"ldiv", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"lldiv", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"imaxdiv", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fabs", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fabsf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fabsl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fabs32", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fabs64", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fabs128", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fmodf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmod", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmodl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"remainderf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"remainder", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"remainderl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"remquo", {FI::kUnknown, RI::kAliasParam2, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"remquof", {FI::kUnknown, RI::kAliasParam2, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"remquol", {FI::kUnknown, RI::kAliasParam2, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"fma", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmaf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmaxf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmax", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmaxl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fminf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fmin", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fminl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fdimf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fdim", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fdiml", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nanf", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"nan", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"nanl", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"nan32", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"nan64", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"nan128", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"exp", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"expf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"expl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"exp2", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"exp2f", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"exp2l", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"expm1", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"expm1f", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"expm1l", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"logf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"logl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log10", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log10f", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log10l", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log2", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log2f", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log2l", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log1p", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log1pf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"log1pl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"pow", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"powf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"powl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"sqrt", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sqrtf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sqrtl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cbrt", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cbrtf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cbrtl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"hypot", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"hypotf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"hypotl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"sin", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sinf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sinl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cos", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cosf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cosl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tan", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tanf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tanl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asin", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asinf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asinl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acos", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acosf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acosl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atan", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atanf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atanl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atan2", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"atan2f", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"atan2l", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"sinh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sinhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"sinhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"cosh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"coshf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"coshl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tanh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tanhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tanhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asinh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asinhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"asinhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acosh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acoshf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"acoshl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atanh", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atanhf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atanhl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erff", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erfl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erfc", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erfcf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"erfcl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tgamma", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tgammaf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"tgammal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lgamma", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lgammaf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lgammal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ceil", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ceilf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ceill", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"floor", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"floorf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"floorl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"trunc", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"truncf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"truncl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"round", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"roundf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"roundl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lround", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lroundf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lroundl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llround", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llroundf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llroundl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"nearbyint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"nearbyintf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"nearbyintl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"rint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"rintf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"rintl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lrint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lrintf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"lrintl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llrint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llrintf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"llrintl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"frexp", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"frexpf", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"frexpl", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"ldexp", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"ldexpf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"ldexpl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"modf", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"modff", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"modfl", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"scalbn", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"scalbnf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"scalbnl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"scalbln", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"scalblnf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"scalblnl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"ilogb", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ilogbf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ilogbl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"logb", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"logbf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"logbl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"nextafter", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nextafterf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nextafterl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nexttoward", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nexttowardf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"nexttowardl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"copysign", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"copysignf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"copysignl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fpclassify", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isfinite", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isinf", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isnan", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isnormal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"signbit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"isgreater", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"isgreaterequal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"isless", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"islessequal", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"islessgreater", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"isunordered", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, + +//setjmp.h +{"abort", {FI::kConst, RI::kNoAlias, {}}}, +{"exit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"quick_exit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"_Exit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"atexit", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"at_quick_exit", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"system", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"getenv", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"getenv_s", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"signal", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"raise", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"setjmp", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly}}}, +{"longjmp", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, + +//stdarg.h +{"va_start", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"va_arg", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"va_copy", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"va_end", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, + +//stdlib.h +{"malloc", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"calloc", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"realloc", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"free", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, +{"aligned_alloc", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"mblen", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"mbtowc", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"wctomb", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"wctomb_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"mbstowcs", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"mbstowcs_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, +{"mbsinit", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, +{"btowc", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"wctob", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"mbrlen", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"mbrtowc", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"wcrtomb", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"wcrtomb_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"mbsrtowcs", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadWriteMemory, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"mbsrtowcs_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"wcsrtombs", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadWriteMemory, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"wcsrtombs_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, + +//wctype.h +{"iswalnum", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswalpha", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswlower", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswupper", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswdigit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswxdigit", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswcntrl", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswgraph", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswspace", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswblank", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswprint", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswpunct", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"iswctype", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"towlower", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"towupper", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"towctrans", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, + +//time.h +{"difftime", {FI::kConst, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"time", {FI::kPure, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, +{"timespec_get", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"timespec_getes", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"asctime", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"asctime_r", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"asctime_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"ctime", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"ctime_r", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"ctime_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"strftime", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"wcsftime", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"gmtime", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"gmtime_r", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"gmtime_s", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"localtime", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"localtime_r", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"localtime_s", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"mktime", {FI::kUnknown, RI::kNoAlias, {PI::kUnknown}}}, + +//stdalign.h +{"offsetof", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly}}}, + +//stdio.h +{"fopen", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"fopen_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"freopen", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"freopen_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly}}}, +{"fclose", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"fflush", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"setbuf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"setvbuf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fwide", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"fread", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"fwrite", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"fgetc", {FI::kPure, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"getc", {FI::kPure, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"fgets", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fputc", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"putc", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"fputs", {FI::kUnknown, RI::kAliasParam1, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"getchar", {FI::kUnknown, RI::kNoAlias, {}}}, +{"gets", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly}}}, +{"gets_s", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, +{"putchar", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"puts", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"ungetc", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"fgetwc", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"getwc", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"fgetws", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadMemoryOnly}}}, +{"fputwc", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"putwc", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"fputws", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"getwchar", {FI::kUnknown, RI::kNoAlias, {}}}, +{"putwchar", {FI::kUnknown, RI::kNoAlias, {PI::kReadSelfOnly}}}, +{"ungetwc", {FI::kUnknown, RI::kAliasParam1, {PI::kReadSelfOnly, PI::kWriteMemoryOnly}}}, +{"scanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"fscanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"sscanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"scanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"fscanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"sscanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"printf", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"fprintf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"sprintf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"snprintf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"printf_s", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"fprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"sprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"snprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"wscanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"fwscanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"swscanf", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"wscanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"fwscanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"swscanf_s", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly, PI::kWriteMemoryOnly}}}, +{"wprintf", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"fwprintf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"swprintf", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"wprintf_s", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"fwprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"swprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"snwprintf_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly, PI::kReadMemoryOnly, PI::kReadWriteMemory, PI::kReadWriteMemory, PI::kReadWriteMemory}}}, +{"ftell", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"fgetpos", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"fseek", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}}, +{"fsetpos", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadMemoryOnly}}}, +{"rewind", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, +{"clearerr", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, +{"feof", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"ferror", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"perror", {FI::kPure, RI::kNoAlias, {PI::kReadMemoryOnly}}}, +{"remove", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly}}}, +{"rename", {FI::kUnknown, RI::kNoAlias, {PI::kReadMemoryOnly, PI::kWriteMemoryOnly}}}, +{"tmpnam", {FI::kUnknown, RI::kAliasParam0, {PI::kWriteMemoryOnly}}}, +{"tmpnam_s", {FI::kUnknown, RI::kNoAlias, {PI::kWriteMemoryOnly, PI::kReadSelfOnly}}}, + +//for java +{"MCC_GetOrInsertLiteral", {FI::kPure, RI::kUnknown, {PI::kReadMemoryOnly}}}, + +// user config +{"Hlog", {FI::kPure, RI::kNoAlias, {PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly, PI::kReadSelfOnly}}} diff --git a/src/mapleall/maple_ipa/include/ipa_clone.h b/src/mapleall/maple_ipa/include/ipa_clone.h new file mode 100644 index 0000000000000000000000000000000000000000..ccb3f4ac78a3a142f02e7b92c60a134d62034cda --- /dev/null +++ b/src/mapleall/maple_ipa/include/ipa_clone.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_IPACLONE_H +#define MAPLE_IPA_INCLUDE_IPACLONE_H +#include "mir_module.h" +#include "mir_function.h" +#include "mir_builder.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "class_hierarchy_phase.h" +#include "me_ir.h" +#include "maple_phase_manager.h" +namespace maple { +constexpr uint32 kNumOfImpExprUpper = 64; +class IpaClone : public AnalysisResult { + public: + IpaClone(MIRModule *mod, MemPool *memPool, MIRBuilder &builder) + : AnalysisResult(memPool), mirModule(mod), allocator(memPool), mirBuilder(builder), curFunc(nullptr) {} + ~IpaClone() override { + mirModule = nullptr; + curFunc = nullptr; + } + + static MIRSymbol *IpaCloneLocalSymbol(const MIRSymbol &oldSym, const MIRFunction &newFunc); + static void IpaCloneSymbols(MIRFunction &newFunc, const MIRFunction &oldFunc); + static void IpaCloneLabels(MIRFunction &newFunc, const MIRFunction &oldFunc); + static void IpaClonePregTable(MIRFunction &newFunc, const MIRFunction &oldFunc); + MIRFunction *IpaCloneFunction(MIRFunction &originalFunction, const std::string &fullName) const; + MIRFunction *IpaCloneFunctionWithFreq(MIRFunction &originalFunction, + const std::string &fullName, uint64_t callSiteFreq) const; + bool IsBrCondOrIf(Opcode op) const; + void DoIpaClone(); + void InitParams(); + void CopyFuncInfo(MIRFunction &originalFunction, MIRFunction &newFunc) const; + void IpaCloneArgument(MIRFunction &originalFunction, ArgVector &argument) const; + void RemoveUnneedParameter(MIRFunction *newFunc, uint32 paramIndex, int64_t value) const; + void DecideCloneFunction(std::vector &result, uint32 paramIndex, std::map> &evalMap) const; + void ReplaceIfCondtion(MIRFunction *newFunc, std::vector &result, uint64_t res) const; + void RemoveSwitchCase(MIRFunction &newFunc, SwitchNode &switchStmt, std::vector &calleeValue) const; + void RemoveUnneedSwitchCase(MIRFunction &newFunc, std::vector &result, + std::vector &calleeValue) const; + bool CheckImportantExprHasBr(const std::vector &exprVec) const; + void EvalCompareResult(std::vector &result, std::map> &evalMap, + std::map> &summary, uint32 index) const; + void EvalImportantExpression(MIRFunction *func, std::vector &result); + bool CheckCostModel(uint32 paramIndex, std::vector &calleeValue, std::vector &result) const; + void ComupteValue(const IntVal& value, const IntVal& paramValue, const CompareNode &cond, uint64_t &bitRes) const; + void CloneNoImportantExpressFunction(MIRFunction *func, uint32 paramIndex) const; + void ModifyParameterSideEffect(MIRFunction *newFunc, uint32 paramIndex) const; + + private: + MIRModule *mirModule; + MapleAllocator allocator; + MIRBuilder &mirBuilder; + MIRFunction *curFunc; + uint32 numOfCloneVersions = 0; + uint32 numOfImpExprLowBound = 0; + uint32 numOfImpExprHighBound = 0; + uint32 numOfCallSiteLowBound = 0; + uint32 numOfCallSiteUpBound = 0; + uint32 numOfConstpropValue = 0; +}; +MAPLE_MODULE_PHASE_DECLARE_BEGIN(M2MIpaClone) + IpaClone *GetResult() { + return cl; + } + IpaClone *cl = nullptr; +OVERRIDE_DEPENDENCE +MAPLE_MODULE_PHASE_DECLARE_END +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_IPACLONE_H diff --git a/src/mapleall/maple_ipa/include/ipa_collect.h b/src/mapleall/maple_ipa/include/ipa_collect.h new file mode 100644 index 0000000000000000000000000000000000000000..090b3bc5ebcf8a324de8cbc70f1be48f7560171a --- /dev/null +++ b/src/mapleall/maple_ipa/include/ipa_collect.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_COLLECT_IPA_INFO_H +#define MAPLE_IPA_INCLUDE_COLLECT_IPA_INFO_H +#include "mir_nodes.h" +#include "mir_builder.h" +#include "call_graph.h" +#include "me_ir.h" +#include "me_irmap.h" +#include "dominance.h" +#include "class_hierarchy.h" +#include "maple_phase.h" +#include "mir_module.h" +#include "stmt_identify.h" +#include "maple_phase_manager.h" +namespace maple { +using StmtIndex = size_t; +using StmtInfoId = size_t; + +union ParamValue { + bool valueBool; + int64_t valueInt; + float valueFloat; + double valueDouble; +}; + +enum ValueType { + kBool, + kInt, + kFloat, + kDouble, +}; + +class CollectIpaInfo { + public: + CollectIpaInfo(MIRModule &mod, MemPool &memPool) + : module(mod), + builder(*mod.GetMIRBuilder()), + curFunc(nullptr), + allocator(&memPool), + stmtInfoToIntegerMap(allocator.Adapter()), + integerString(allocator.Adapter()), + stmtInfoVector(allocator.Adapter()) {} + virtual ~CollectIpaInfo() = default; + + void RunOnScc(SCCNode &scc); + void TraverseStmtInfo(size_t position); + void UpdateCaleeParaAboutFloat(MeStmt &meStmt, float paramValue, uint32 index, CallerSummary &summary); + void UpdateCaleeParaAboutDouble(MeStmt &meStmt, double paramValue, uint32 index, CallerSummary &summary); + void UpdateCaleeParaAboutInt(MeStmt &meStmt, int64_t paramValue, uint32 index, CallerSummary &summary); + bool IsConstKindValue(MeExpr *expr) const; + bool CheckImpExprStmt(const MeStmt &meStmt) const; + bool CollectBrImportantExpression(const MeStmt &meStmt, uint32 &index) const; + void TransformStmtToIntegerSeries(MeStmt &meStmt); + DefUsePositions &GetDefUsePositions(OriginalSt &ost, StmtInfoId position); + void CollectDefUsePosition(ScalarMeExpr &var, StmtInfoId position, + std::unordered_set &cycleCheck); + void CollectJumpInfo(MeStmt &meStmt); + void SetLabel(size_t currStmtInfoId, LabelIdx label); + StmtInfoId GetRealFirstStmtInfoId(BB &bb); + void TraverseMeExpr(MeExpr &meExpr, StmtInfoId position, + std::unordered_set &cycleCheck); + void TraverseMeStmt(MeStmt &meStmt); + bool CollectSwitchImportantExpression(const MeStmt &meStmt, uint32 &index) const; + bool CollectImportantExpression(const MeStmt &meStmt, uint32 &index) const; + bool IsParameterOrUseParameter(const VarMeExpr *varExpr, uint32 &index) const; + void ReplaceMeStmtWithStmtNode(StmtNode *stmt, StmtInfoId position); + void Perform(MeFunction &func); + void Dump(); + + void PushInvalidKeyBack(uint invalidKey) { + integerString.emplace_back(invalidKey); + stmtInfoVector.emplace_back(StmtInfo(nullptr, -1u, allocator)); + } + + MapleVector &GetIntegerString() { + return integerString; + } + + MapleVector &GetStmtInfo() { + return stmtInfoVector; + } + + uint GetCurrNewStmtIndex() { + return ++currNewStmtIndex; + } + + uint GetTotalStmtInfoCount() { + return currNewStmtIndex; + } + + void SetDataMap(AnalysisDataManager *map) { + dataMap = map; + } + + MapleAllocator &GetAllocator() { + return allocator; + } + private: + MIRModule &module; + MIRBuilder &builder; + MIRFunction *curFunc; + AnalysisDataManager *dataMap; + MapleAllocator allocator; + MapleUnorderedMap stmtInfoToIntegerMap; + MapleVector integerString; + MapleVector stmtInfoVector; + StmtIndex currNewStmtIndex = 0; +}; +MAPLE_SCC_PHASE_DECLARE_BEGIN(SCCCollectIpaInfo, maple::SCCNode) +OVERRIDE_DEPENDENCE +MAPLE_SCC_PHASE_DECLARE_END +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_COLLECT_IPA_INFO_H diff --git a/src/mapleall/maple_ipa/include/ipa_phase_manager.h b/src/mapleall/maple_ipa/include/ipa_phase_manager.h new file mode 100644 index 0000000000000000000000000000000000000000..9c3d61f68af5b9b76b33871cd6a87c220728b98a --- /dev/null +++ b/src/mapleall/maple_ipa/include/ipa_phase_manager.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_IPA_PHASE_MANAGER_H +#define MAPLE_IPA_INCLUDE_IPA_PHASE_MANAGER_H +#include +#include "mempool.h" +#include "mempool_allocator.h" +#include "mir_module.h" +#include "mir_function.h" +#include "me_phase_manager.h" +#include "ipa_collect.h" + +namespace maple { +/* ==== new phase manager ==== */ +class IpaSccPM : public SccPM { + public: + explicit IpaSccPM(MemPool *memPool) : SccPM(memPool, &id) {} + void Init(MIRModule &m); + bool PhaseRun(MIRModule &m) override; + PHASECONSTRUCTOR(IpaSccPM); + ~IpaSccPM() override {} + std::string PhaseName() const override; + CollectIpaInfo *GetResult() { + return ipaInfo; + } + private: + CollectIpaInfo *ipaInfo = nullptr; + void GetAnalysisDependence(AnalysisDep &aDep) const override; + virtual void DoPhasesPopulate(const MIRModule &mirModule); +}; + +class SCCPrepare : public MapleSccPhase>, public MaplePhaseManager { + public: + explicit SCCPrepare(MemPool *mp) : MapleSccPhase>(&id, mp), MaplePhaseManager(*mp) {} + ~SCCPrepare() override = default; + std::string PhaseName() const override; + PHASECONSTRUCTOR(SCCPrepare); + bool PhaseRun(SCCNode &scc) override; + void Dump(const MeFunction &f, const std::string phaseName) const; + AnalysisDataManager *GetResult() { + return result; + } + private: + AnalysisDataManager *result = nullptr; +}; + +class SCCEmit : public MapleSccPhase>, public MaplePhaseManager { + public: + explicit SCCEmit(MemPool *mp) : MapleSccPhase>(&id, mp), MaplePhaseManager(*mp) {} + ~SCCEmit() override = default; + std::string PhaseName() const override; + PHASECONSTRUCTOR(SCCEmit); + bool PhaseRun(SCCNode &scc) override; + void Dump(MeFunction &f, const std::string phaseName) const; + private: + void GetAnalysisDependence(maple::AnalysisDep &aDep) const override; +}; + +class SCCProfile : public MapleSccPhase>, public MaplePhaseManager { + public: + explicit SCCProfile(MemPool *mp) : MapleSccPhase>(&id, mp), MaplePhaseManager(*mp) {} + ~SCCProfile() override { + result = nullptr; + } + std::string PhaseName() const override; + PHASECONSTRUCTOR(SCCProfile); + bool PhaseRun(SCCNode &scc) override; + AnalysisDataManager *GetResult() { + return result; + } + private: + AnalysisDataManager *result = nullptr; +}; +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_IPA_PHASE_MANAGER_H diff --git a/src/mapleall/maple_ipa/include/ipa_side_effect.h b/src/mapleall/maple_ipa/include/ipa_side_effect.h new file mode 100644 index 0000000000000000000000000000000000000000..f7f84be13fb80bb002f4f76fa9d5c1ed090e8e2c --- /dev/null +++ b/src/mapleall/maple_ipa/include/ipa_side_effect.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_IPASIDEEFFECT_H +#define MAPLE_IPA_INCLUDE_IPASIDEEFFECT_H +#include "me_phase_manager.h" +#include "ipa_phase_manager.h" + +namespace maple { +class SideEffect { + public: + SideEffect(MeFunction *meFunc, Dominance *dom, AliasClass *alias, CallGraph *cg) + : meFunc(meFunc), dom(dom), alias(alias), callGraph(cg) { + defGlobal = false; + defArg = false; + useGlobal = false; + vstsValueAliasWithFormal.resize(std::min(meFunc->GetMirFunc()->GetFormalCount(), kMaxParamCount)); + } + ~SideEffect() { + alias = nullptr; + dom = nullptr; + meFunc = nullptr; + curFuncDesc = nullptr; + } + bool Perform(MeFunction &f); + static const FuncDesc &GetFuncDesc(MeFunction &f); + static const FuncDesc &GetFuncDesc(MIRFunction &f); + static const std::map &GetWhiteList(); + + private: + void DealWithOperand(MeExpr *expr); + void DealWithOst(OStIdx ostIdx); + void DealWithStmt(MeStmt &stmt); + void PropAllInfoFromCallee(const MeStmt &call, MIRFunction &callee); + void PropParamInfoFromCallee(const MeStmt &call, MIRFunction &callee); + void PropInfoFromOpnd(MeExpr &opnd, const PI &calleeParamInfo); + void ParamInfoUpdater(size_t vstIdx, const PI &calleeParamInfo); + void DealWithOst(const OriginalSt *ost); + void DealWithReturn(const RetMeStmt &retMeStmt) const; + void AnalysisFormalOst(); + void SolveVarArgs(MeFunction &f) const; + void CollectFormalOst(MeFunction &f); + void CollectAllLevelOst(size_t vstIdx, std::set &result); + + std::set> analysisLater; + std::vector> vstsValueAliasWithFormal; + MeFunction *meFunc = nullptr; + FuncDesc *curFuncDesc = nullptr; + Dominance *dom = nullptr; + AliasClass *alias = nullptr; + CallGraph *callGraph = nullptr; + + bool defGlobal = false; + bool defArg = false; + bool useGlobal = false; +}; + +MAPLE_SCC_PHASE_DECLARE_BEGIN(SCCSideEffect, SCCNode) +OVERRIDE_DEPENDENCE +MAPLE_SCC_PHASE_DECLARE_END +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_IPASIDEEFFECT_H diff --git a/src/mapleall/maple_ipa/include/old/do_ipa_escape_analysis.h b/src/mapleall/maple_ipa/include/old/do_ipa_escape_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..615c79b024ccfd689a4542f7714305d5dfa462e5 --- /dev/null +++ b/src/mapleall/maple_ipa/include/old/do_ipa_escape_analysis.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef INCLUDE_MAPLEIPA_INCLUDE_IPAESCAPEANALYSIS_H +#define INCLUDE_MAPLEIPA_INCLUDE_IPAESCAPEANALYSIS_H + +namespace maple { +#ifdef NOT_USED +class DoIpaEA : public MeFuncPhase { + public: + explicit DoIpaEA(MePhaseID id) : MeFuncPhase(id) {} + ~DoIpaEA() = default; + AnalysisResult *Run(MeFunction*, MeFuncResultMgr*, ModuleResultMgr*) override; + std::string PhaseName() const override { + return "ipaea"; + } +}; + +class DoIpaEAOpt : public MeFuncPhase { + public: + explicit DoIpaEAOpt(MePhaseID id) : MeFuncPhase(id) {} + ~DoIpaEAOpt() = default; + AnalysisResult *Run(MeFunction*, MeFuncResultMgr*, ModuleResultMgr*) override; + std::string PhaseName() const override { + return "ipaeaopt"; + } +}; +#endif +} +#endif // INCLUDE_MAPLEIPA_INCLUDE_IPAESCAPEANALYSIS_H diff --git a/src/mapleall/maple_ipa/include/old/ea_connection_graph.h b/src/mapleall/maple_ipa/include/old/ea_connection_graph.h new file mode 100644 index 0000000000000000000000000000000000000000..6a4de2f388f11d13c0045bf655c03b8ef207b7ab --- /dev/null +++ b/src/mapleall/maple_ipa/include/old/ea_connection_graph.h @@ -0,0 +1,697 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIPA_INCLUDE_ESCAPEANALYSIS_H +#define MAPLEIPA_INCLUDE_ESCAPEANALYSIS_H +#include +#include +#include +#include "call_graph.h" +#include "me_ir.h" +#include "irmap.h" + +namespace maple { +enum NodeKind { + kObejectNode, + kReferenceNode, + kActualNode, + kFieldNode, + kPointerNode +}; + +enum EAStatus { + kNoEscape, + kReturnEscape, + kArgumentEscape, + kGlobalEscape +}; + +const inline std::string EscapeName(EAStatus esc) { + switch (esc) { + case kNoEscape: + return "NoEsc"; + case kReturnEscape: + return "RetEsc"; + case kArgumentEscape: + return "ArgEsc"; + case kGlobalEscape: + return "GlobalEsc"; + default: + return ""; + } +} + +class Location { + public: + Location(const std::string &modName, uint32 fileId, uint32 lineId) + : modName(modName), + fileId(fileId), + lineId(lineId) {}; + ~Location() = default; + + const std::string &GetModName() const { + return modName; + } + + uint32 GetFileId() const { + return fileId; + } + + uint32 GetLineId() const { + return lineId; + } + + private: + std::string modName; + uint32 fileId; + uint32 lineId; +}; + +class EACGBaseNode; +class EACGObjectNode; +class EACGFieldNode; +class EACGRefNode; +class EACGActualNode; +class EACGPointerNode; + +class EAConnectionGraph { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + friend class EACGBaseNode; + friend class EACGObjectNode; + friend class EACGFieldNode; + friend class EACGRefNode; + friend class EACGPointerNode; + // If import is false, need init globalNode. + EAConnectionGraph(MIRModule *m, MapleAllocator *allocator, const GStrIdx &funcName, bool import = false) + : mirModule(m), + alloc(allocator), + nodes(allocator->Adapter()), + expr2Nodes(allocator->Adapter()), + funcArgNodes(allocator->Adapter()), + callSite2Nodes(allocator->Adapter()), + funcStIdx(funcName), + hasUpdated(false), + needConv(false), + imported(import), + exprIdMax(0), + globalObj(nullptr), + globalRef(nullptr), + globalField(nullptr) {}; + ~EAConnectionGraph() = default; + + EACGObjectNode *CreateObjectNode(MeExpr *expr, EAStatus initialEas, bool isPh, TyIdx tyIdx); + EACGRefNode *CreateReferenceNode(MeExpr *expr, EAStatus initialEas, bool isStatic); + EACGActualNode *CreateActualNode(EAStatus initialEas, bool isReurtn, bool isPh, uint8 argIdx, + uint32 callSiteInfo); + EACGFieldNode *CreateFieldNode(MeExpr *expr, EAStatus initialEas, FieldID fId, EACGObjectNode *belongTo, bool isPh); + EACGPointerNode *CreatePointerNode(MeExpr *expr, EAStatus initialEas, int inderictL); + EACGBaseNode *GetCGNodeFromExpr(MeExpr *me); + EACGFieldNode *GetOrCreateFieldNodeFromIdx(EACGObjectNode &obj, int32 fieldID); + EACGActualNode *GetReturnNode() const; + const MapleVector *GetFuncArgNodeVector() const; + void TouchCallSite(uint32 callSiteInfo); + MapleVector *GetCallSiteArgNodeVector(uint32 callSite); + bool ExprCanBeOptimized(MeExpr &expr); + + bool CGHasUpdated() const { + return hasUpdated; + } + + void UnSetCGUpdateFlag() { + hasUpdated = false; + } + + void SetCGHasUpdated() { + hasUpdated = true; + } + + void SetExprIdMax(int max) { + exprIdMax = max; + } + + void SetNeedConservation() { + needConv = true; + } + + bool GetNeedConservation() const { + return needConv; + } + + GStrIdx GetFuncNameStrIdx() const { + return funcStIdx; + } + + EACGObjectNode *GetGlobalObject() { + return globalObj; + } + + const EACGObjectNode *GetGlobalObject() const { + return globalObj; + } + + EACGRefNode *GetGlobalReference() { + return globalRef; + } + + const EACGRefNode *GetGlobalReference() const { + return globalRef; + } + + const MapleVector &GetNodes() const { + return nodes; + } + + void ResizeNodes(size_t size, EACGBaseNode *val) { + nodes.resize(size, val); + } + + EACGBaseNode *GetNode(uint32 idx) const { + CHECK_FATAL(idx < nodes.size(), "array check fail"); + return nodes[idx]; + } + + void SetNodeAt(size_t index, EACGBaseNode *val) { + nodes[index] = val; + } + + const MapleVector &GetFuncArgNodes() const { + return funcArgNodes; + } + + const MapleMap*> &GetCallSite2Nodes() const { + return callSite2Nodes; + } + + void InitGlobalNode(); + void AddMaps2Object(EACGObjectNode *caller, EACGObjectNode *callee); + void UpdateExprOfNode(EACGBaseNode &node, MeExpr *me); + void UpdateExprOfGlobalRef(MeExpr *me); + void PropogateEAStatus(); + bool MergeCG(MapleVector &caller, const MapleVector *callee); + void TrimGlobalNode() const; + void UpdateEACGFromCaller(const MapleVector &callerCallSiteArg, + const MapleVector &calleeFuncArg); + void DumpDotFile(const IRMap *irMap, bool dumpPt, MapleVector *dumpVec = nullptr); + void DeleteEACG() const; + void RestoreStatus(bool old); + void CountObjEAStatus() const; + + const std::string &GetFunctionName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(funcStIdx); + } + + private: + MIRModule *mirModule; + MapleAllocator *alloc; + MapleVector nodes; + MapleMap*> expr2Nodes; + // this vector contain func arg nodes first in declaration order and the last is return node + MapleVector funcArgNodes; + MapleMap*> callSite2Nodes; + GStrIdx funcStIdx; + bool hasUpdated; + bool needConv; + bool imported; + int exprIdMax; + EACGObjectNode *globalObj; + EACGRefNode *globalRef; + EACGFieldNode *globalField; + // this is used as a tmp varible for merge cg + std::map> callee2Caller; + void CheckArgNodeOrder(MapleVector &funcArgV); + void UpdateCallerNodes(const MapleVector &caller, const MapleVector &callee); + void UpdateCallerRetNode(MapleVector &caller, const MapleVector &callee); + void UpdateCallerEdges(); + void UpdateCallerEdgesInternal(EACGObjectNode *node1, int32 fieldID, EACGObjectNode *node2); + void UpdateNodes(const EACGBaseNode &actualInCallee, EACGBaseNode &actualInCaller, bool firstTime); + void UpdateCallerWithCallee(EACGObjectNode &objInCaller, const EACGObjectNode &objInCallee, bool firstTime); + + void SetCGUpdateFlag() { + hasUpdated = true; + } +}; + +class EACGBaseNode { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + friend class EACGObjectNode; + friend class EACGFieldNode; + friend class EACGActualNode; + friend class EACGRefNode; + friend class EACGPointerNode; + friend class EAConnectionGraph; + + EACGBaseNode(MIRModule *m, MapleAllocator *a, NodeKind nk, EAConnectionGraph *ec) + : locInfo(nullptr), mirModule(m), alloc(a), kind(nk), meExpr(nullptr), eaStatus(kNoEscape), id(0), eaCG(ec) {} + + EACGBaseNode(MIRModule *m, MapleAllocator *a, NodeKind nk, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, + int i) + : locInfo(nullptr), mirModule(m), alloc(a), kind(nk), meExpr(expr), eaStatus(initialEas), id(i), eaCG(&ec) { + ec.SetCGUpdateFlag(); + } + + virtual ~EACGBaseNode() = default; + + virtual bool IsFieldNode() const { + return kind == kFieldNode; + } + + virtual bool IsObjectNode() const { + return kind == kObejectNode; + } + + virtual bool IsReferenceNode() const { + return kind == kReferenceNode; + } + + virtual bool IsActualNode() const { + return kind == kActualNode; + } + + virtual bool IsPointerNode() const { + return kind == kPointerNode; + } + + virtual const MeExpr *GetMeExpr() const { + return meExpr; + } + + virtual void SetMeExpr(MeExpr &newExpr) { + if (IsFieldNode() && newExpr.GetMeOp() != kMeOpIvar && newExpr.GetMeOp() != kMeOpOp) { + CHECK_FATAL(false, "must be kMeOpIvar or kMeOpOp"); + } else if (IsReferenceNode() && newExpr.GetMeOp() != kMeOpVar && newExpr.GetMeOp() != kMeOpReg && + newExpr.GetMeOp() != kMeOpAddrof && newExpr.GetMeOp() != kMeOpConststr) { + CHECK_FATAL(false, "must be kMeOpVar, kMeOpReg, kMeOpAddrof or kMeOpConststr"); + } + meExpr = &newExpr; + } + + const std::set &GetPointsToSet() const { + CHECK_FATAL(!IsPointerNode(), "must be pointer node"); + return pointsTo; + }; + + virtual bool AddOutNode(EACGBaseNode &newOut); + + virtual EAStatus GetEAStatus() const { + return eaStatus; + } + + virtual const std::set &GetInSet() const { + return in; + } + + virtual void InsertInSet(EACGBaseNode *val) { + (void)in.insert(val); + } + + virtual const std::set &GetOutSet() const { + CHECK_FATAL(IsActualNode(), "must be actual node"); + return out; + } + + virtual void InsertOutSet(EACGBaseNode *val) { + (void)out.insert(val); + } + + virtual bool UpdateEAStatus(EAStatus newEas) { + if (newEas > eaStatus) { + eaStatus = newEas; + PropagateEAStatusForNode(this); + eaCG->SetCGUpdateFlag(); + return true; + } + return false; + } + + bool IsBelongTo(const EAConnectionGraph *cg) const { + return this->eaCG == cg; + } + + const EAConnectionGraph *GetEACG() const { + return eaCG; + } + + EAConnectionGraph *GetEACG() { + return eaCG; + } + + void SetEACG(EAConnectionGraph *cg) { + this->eaCG = cg; + } + + void SetID(int setId) { + this->id = static_cast(setId); + } + + bool CanIgnoreRC() const; + + protected: + Location *locInfo; + MIRModule *mirModule; + MapleAllocator *alloc; + NodeKind kind; + MeExpr *meExpr; + EAStatus eaStatus; + size_t id; + // OBJ<->Field will not in following Set + std::set in; + std::set out; + std::set pointsTo; + EAConnectionGraph *eaCG; + + virtual void CheckAllConnectionInNodes(); + virtual std::string GetName(const IRMap *irMap) const; + virtual void DumpDotFile(std::ostream&, std::map&, bool, const IRMap *irMap = nullptr) = 0; + virtual void PropagateEAStatusForNode(const EACGBaseNode *subRoot) const; + virtual void GetNodeFormatInDot(std::string &label, std::string &color) const; + virtual bool UpdatePointsTo(const std::set &cPointsTo); + + virtual void SetEAStatus(EAStatus status) { + this->eaStatus = status; + } + + virtual NodeKind GetNodeKind() const { + return kind; + } + + private: + virtual bool ReplaceByGlobalNode() { + CHECK_FATAL(false, "impossible"); + return false; + } +}; + +class EACGPointerNode : public EACGBaseNode { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGPointerNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kPointerNode, ec), indirectLevel(0) {} + + EACGPointerNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + int indirectL) + : EACGBaseNode(md, alloc, kPointerNode, ec, expr, initialEas, i), indirectLevel(indirectL) {}; + ~EACGPointerNode() = default; + + void SetLocation(Location *loc) { + this->locInfo = loc; + } + + int GetIndirectLevel() const { + return indirectLevel; + } + + bool AddOutNode(EACGBaseNode &newOut) override { + if (indirectLevel == 1) { + CHECK_FATAL(!newOut.IsPointerNode(), "must be pointer node"); + (void)pointingTo.insert(&newOut); + (void)out.insert(&newOut); + (void)newOut.in.insert(this); + } else { + pointingTo.insert(&newOut); + CHECK_FATAL(pointingTo.size() == 1, "the size must be one"); + CHECK_FATAL(newOut.IsPointerNode(), "must be pointer node"); + CHECK_FATAL((indirectLevel - static_cast(newOut).GetIndirectLevel()) == 1, "must be one"); + (void)out.insert(&newOut); + (void)newOut.in.insert(this); + } + return false; + } + + const std::set &GetPointingTo() const { + return pointingTo; + } + + bool UpdatePointsTo(const std::set&) override { + CHECK_FATAL(false, "impossible to update PointsTo"); + return true; + }; + + void PropagateEAStatusForNode(const EACGBaseNode*) const override { + CHECK_FATAL(false, "impossible to propagate EA status for node"); + } + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr) override; + void CheckAllConnectionInNodes() override {} + + private: + int indirectLevel; + std::set pointingTo; + bool ReplaceByGlobalNode() override { + CHECK_FATAL(false, "impossible to replace by global node"); + return true; + } +}; + +class EACGObjectNode : public EACGBaseNode { + public: + friend class EACGFieldNode; + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGObjectNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kObejectNode, ec), rcOperations(0), ignorRC(false), isPhantom(false) {} + + EACGObjectNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + bool isPh) + : EACGBaseNode(md, alloc, kObejectNode, ec, expr, initialEas, i), rcOperations(0), ignorRC(false), + isPhantom(isPh) { + (void)pointsBy.insert(this); + (void)pointsTo.insert(this); + }; + ~EACGObjectNode() = default; + bool IsPhantom() const { + return isPhantom; + }; + + void SetLocation(Location *loc) { + this->locInfo = loc; + } + + const std::map &GetFieldNodeMap() const { + return fieldNodes; + } + + EACGFieldNode *GetFieldNodeFromIdx(FieldID fId) { + if (fieldNodes.find(-1) != fieldNodes.end()) { // -1 expresses global + return fieldNodes[-1]; + } + if (fieldNodes.find(fId) == fieldNodes.end()) { + return nullptr; + } + return fieldNodes[fId]; + } + + bool AddOutNode(EACGBaseNode &newOut) override; + bool UpdatePointsTo(const std::set&) override { + CHECK_FATAL(false, "impossible"); + return true; + }; + + bool IsPointedByFieldNode() const; + void PropagateEAStatusForNode(const EACGBaseNode *subRoot) const override; + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr) override; + void CheckAllConnectionInNodes() override; + + void Insert2PointsBy(EACGBaseNode *node) { + (void)pointsBy.insert(node); + } + + void EraseNodeFromPointsBy(EACGBaseNode *node) { + pointsBy.erase(node); + } + + void IncresRCOperations() { + ++rcOperations; + } + + void IncresRCOperations(int num) { + rcOperations += num; + } + + int GetRCOperations() const { + return rcOperations; + } + + bool GetIgnorRC() const { + return ignorRC; + } + + void SetIgnorRC(bool ignore) { + ignorRC = ignore; + } + + private: + std::set pointsBy; + int rcOperations; + bool ignorRC; + bool isPhantom; + std::map fieldNodes; + bool ReplaceByGlobalNode() override; +}; + +class EACGRefNode : public EACGBaseNode { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGRefNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kReferenceNode, ec), isStaticField(false), sym(nullptr), version(0) {} + + EACGRefNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + bool isS = false) + : EACGBaseNode(md, alloc, kReferenceNode, ec, expr, initialEas, i), + isStaticField(isS), + sym(nullptr), + version(0) {}; + ~EACGRefNode() = default; + bool IsStaticRef() const { + return isStaticField; + }; + void SetSymbolAndVersion(MIRSymbol *mirSym, int versionIdx) { + if (sym != nullptr) { + CHECK_FATAL(sym == mirSym, "must be sym"); + CHECK_FATAL(versionIdx == version, "must be version "); + } + sym = mirSym; + version = versionIdx; + }; + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr) override; + + private: + bool isStaticField; + MIRSymbol *sym; + int version; + bool ReplaceByGlobalNode() override; +}; +class EACGFieldNode : public EACGBaseNode { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + friend class EACGObjectNode; + EACGFieldNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kFieldNode, ec), + fieldID(0), + isPhantom(false), + sym(nullptr), + version(0), + mirFieldId(0) {} + + EACGFieldNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + FieldID fId, EACGObjectNode *bt, bool isPh) + : EACGBaseNode(md, alloc, kFieldNode, ec, expr, initialEas, i), + fieldID(fId), + isPhantom(isPh), + sym(nullptr), + version(0), + mirFieldId(0) { + bt->fieldNodes[fieldID] = this; + (void)belongsTo.insert(bt); + }; + + ~EACGFieldNode() = default; + + FieldID GetFieldID() const { + return fieldID; + }; + + void SetFieldID(FieldID id) { + fieldID = id; + } + + bool IsPhantom() const { + return isPhantom; + } + + const std::set &GetBelongsToObj() const { + return belongsTo; + } + + void AddBelongTo(EACGObjectNode *newObj) { + (void)belongsTo.insert(newObj); + } + + void SetSymbolAndVersion(MIRSymbol *mirSym, int versionIdx, FieldID fID) { + if (sym != nullptr) { + CHECK_FATAL(sym == mirSym, "must be mirSym"); + CHECK_FATAL(version == versionIdx, "must be version"); + CHECK_FATAL(mirFieldId == fID, "must be mir FieldId"); + } + sym = mirSym; + version = versionIdx; + mirFieldId = fID; + }; + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr) override; + + private: + FieldID fieldID; + std::set belongsTo; + bool isPhantom; + MIRSymbol *sym; + int version; + FieldID mirFieldId; + bool ReplaceByGlobalNode() override; +}; + +class EACGActualNode : public EACGBaseNode { + public: + friend class BinaryMplExport; + friend class BinaryMplImport; + EACGActualNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph *ec) + : EACGBaseNode(md, alloc, kActualNode, ec), isReturn(false), isPhantom(false), argIdx(0), callSiteInfo(0) {}; + EACGActualNode(MIRModule *md, MapleAllocator *alloc, EAConnectionGraph &ec, MeExpr *expr, EAStatus initialEas, int i, + bool isR, bool isPh, uint8 aI, uint32 callSite) + : EACGBaseNode(md, alloc, kActualNode, ec, expr, initialEas, i), + isReturn(isR), + isPhantom(isPh), + argIdx(aI), + callSiteInfo(callSite) {}; + ~EACGActualNode() = default; + + bool IsReturn() const { + return isReturn; + }; + + bool IsPhantom() const { + return isPhantom; + }; + + uint32 GetArgIndex() const { + return argIdx; + }; + + uint32 GetCallSite() const { + return callSiteInfo; + } + + void DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap = nullptr) override; + + private: + bool isReturn; + bool isPhantom; + uint8 argIdx; + uint32 callSiteInfo; + bool ReplaceByGlobalNode() override; +}; +} // namespace maple +#endif diff --git a/src/mapleall/maple_ipa/include/old/ipa_escape_analysis.h b/src/mapleall/maple_ipa/include/old/ipa_escape_analysis.h new file mode 100644 index 0000000000000000000000000000000000000000..dabf2ad9720f4189f55a41769199ac6480de787f --- /dev/null +++ b/src/mapleall/maple_ipa/include/old/ipa_escape_analysis.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIPA_INCLUDE_IPAESCAPEANALYSIS_H +#define MAPLEIPA_INCLUDE_IPAESCAPEANALYSIS_H + +#include +#include "class_hierarchy.h" +#include "call_graph.h" +#include "irmap.h" +#include "me_function.h" +#include "ea_connection_graph.h" +#include "intrinsics.h" + +namespace maple { +class IPAEscapeAnalysis { + public: + static constexpr int kCalleeCandidateLimit = 600; + static constexpr int kFuncInSCCLimit = 200000; + static constexpr int kSCCConvergenceLimit = 20; + static constexpr int kCalleeNodeLimit = 100000; + static constexpr int kRCOperLB = 0; + static constexpr bool kDebug = false; + + IPAEscapeAnalysis(KlassHierarchy *khTmp, IRMap *irMapTmp, MeFunction *funcTmp, MemPool *mp, CallGraph *pcgTmp) + : kh(khTmp), + irMap(irMapTmp), + ssaTab(&irMap->GetSSATab()), + mirModule(&irMapTmp->GetMIRModule()), + func(funcTmp), + eaCG(func->GetMirFunc()->GetEACG()), + pcg(pcgTmp), + allocator(mp), + cgChangedInSCC(false), + tempCount(0), + retVar(nullptr) {} + ~IPAEscapeAnalysis() = default; + void ConstructConnGraph(); + void DoOptimization(); + + private: + TyIdx GetAggElemType(const MIRType &aggregate) const; + bool IsSpecialEscapedObj(const MeExpr &alloc) const; + EACGRefNode *GetOrCreateCGRefNodeForVar(VarMeExpr &var, bool createObjNode = false); + EACGRefNode *GetOrCreateCGRefNodeForAddrof(AddrofMeExpr &var, bool createObjNode = false); + EACGRefNode *GetOrCreateCGRefNodeForReg(RegMeExpr ®, bool createObjNode = false); + EACGRefNode *GetOrCreateCGRefNodeForVarOrReg(MeExpr &var, bool createObjNode = false); + void GetArrayBaseNodeForReg(std::vector &nodes, RegMeExpr ®Var, MeStmt &stmt); + void GetOrCreateCGFieldNodeForIvar(std::vector &fieldNodes, IvarMeExpr &ivar, MeStmt &stmt, + bool createObjNode); + void GetOrCreateCGFieldNodeForIAddrof(std::vector &fieldNodes, OpMeExpr &expr, MeStmt &stmt, + bool createObjNode); + EACGObjectNode *GetOrCreateCGObjNode(MeExpr *expr, const MeStmt *stmt = nullptr, EAStatus easOfPhanObj = kNoEscape); + void GetCGNodeForMeExpr(std::vector &nodes, MeExpr &expr, MeStmt &stmt, bool createObjNode); + void CollectDefStmtForReg(std::set &visited, std::set &defStmts, RegMeExpr ®Var); + void UpdateEscConnGraphWithStmt(MeStmt &stmt); + void UpdateEscConnGraphWithPhi(const BB &bb); + void HandleParaAtFuncEntry(); + void HandleParaAtCallSite(uint32 callInfo, CallMeStmt &call); + void HandleSingleCallee(CallMeStmt &callMeStmt); + bool HandleSpecialCallee(CallMeStmt *callMeStmt); + void HandleMultiCallees(const CallMeStmt &callMeStmt); + EAConnectionGraph *GetEAConnectionGraph(MIRFunction &function) const; + void ProcessNoAndRetEscObj(); + void ProcessRetStmt(); + VarMeExpr *CreateEATempVarWithName(const std::string &name) const; + VarMeExpr *CreateEATempVar(); + VarMeExpr *GetOrCreateEARetTempVar(); + VarMeExpr *CreateEATempVarMeExpr(OriginalSt &ost) const; + OriginalSt *CreateEATempOstWithName(const std::string &name) const; + OriginalSt *CreateEATempOst(); + OriginalSt *CreateEARetTempOst() const; + VarMeExpr *GetOrCreateEARetTempVarMeExpr(OriginalSt &ost); + void CountObjRCOperations(); + void DeleteRedundantRC(); + + KlassHierarchy *kh; + IRMap *irMap; + SSATab *ssaTab; + MIRModule *mirModule; + MeFunction *func; + EAConnectionGraph *eaCG; + CallGraph *pcg; + MapleAllocator allocator; + bool cgChangedInSCC; + uint32 tempCount; + std::vector noAndRetEscObj; + VarMeExpr *retVar; + std::vector noAndRetEscOst; + std::vector gcStmts; +}; +} // namespace maple +#endif diff --git a/src/mapleall/maple_ipa/include/old/ipa_option.h b/src/mapleall/maple_ipa/include/old/ipa_option.h new file mode 100644 index 0000000000000000000000000000000000000000..5b44a8d24e3bc1d950132a069070910f48f254e4 --- /dev/null +++ b/src/mapleall/maple_ipa/include/old/ipa_option.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_OPTION_H +#define MAPLE_IPA_OPTION_H +#include +#include "mir_parser.h" +#include "opcode_info.h" +#include "option.h" +#include "bin_mpl_export.h" +#include "me_phase_manager.h" + +namespace maple { +class IpaOption { + public: + static IpaOption &GetInstance(); + + ~IpaOption() = default; + + bool SolveOptions() const; + + bool ParseCmdline(int argc, char **argv, std::vector &fileNames) const; + + private: + IpaOption() = default; +}; + +class MeFuncPM1 : public MeFuncPM { + public: + explicit MeFuncPM1(MemPool *memPool) : MeFuncPM(memPool) { + SetPhaseID(&MeFuncPM1::id); + } + PHASECONSTRUCTOR(MeFuncPM1); + std::string PhaseName() const override; + ~MeFuncPM1() override {} + + private: + void GetAnalysisDependence(AnalysisDep &aDep) const override; + void DoPhasesPopulate(const MIRModule &m) override; +}; + +class MeFuncPM2 : public MeFuncPM { + public: + explicit MeFuncPM2(MemPool *memPool) : MeFuncPM(memPool) { + SetPhaseID(&MeFuncPM2::id); + } + PHASECONSTRUCTOR(MeFuncPM2); + std::string PhaseName() const override; + ~MeFuncPM2() override {} + + private: + void GetAnalysisDependence(AnalysisDep &aDep) const override; + void DoPhasesPopulate(const MIRModule &m) override; +}; +} // namespace maple +#endif // MAPLE_IPA_OPTION_H diff --git a/src/mapleall/maple_ipa/include/old/mrt_info.def b/src/mapleall/maple_ipa/include/old/mrt_info.def new file mode 100644 index 0000000000000000000000000000000000000000..e8bd87cf7da66941db95af8c79fab90745d78d47 --- /dev/null +++ b/src/mapleall/maple_ipa/include/old/mrt_info.def @@ -0,0 +1,639 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +// pure, defArg, defGlobal, retGlobal, throwEh, retArg, defPrivate, name +{ true, false, false, false, false, false, false, "MCC_GetOrInsertLiteral"}, +{ true, false, false, false, false, false, false, "MCC_StringAppend_StringString"}, +{ true, false, false, false, false, false, false, "MCC_CStrToJStr"}, +{ true, false, false, false, false, false, false, "MCC_String_Equals_NotallCompress"}, +{ true, false, false, false, false, false, false, "MCC_StringAppend"}, +{ true, false, false, false, false, false, false, "MCC_StringAppend_StringInt"}, +{ true, false, false, false, false, false, false, "MCC_StringAppend_StringJcharString"}, +{ true, false, false, false, true, false, false, "MCC_ThrowStringIndexOutOfBoundsException"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_charAt__I"}, +{ false, true, false, false, true, false, false, "Native_java_lang_String_getCharsNoCheck__II_3CI"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_toCharArray__"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_fastSubstring__II"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_compareTo__Ljava_lang_String_2"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_intern__"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_doReplace__CC"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_concat__Ljava_lang_String_2"}, +{ true, false, false, false, true, false, false, "Native_java_lang_String_fastIndexOf__II"}, +{ true, false, false, false, true, false, false, "Native_java_lang_Object_clone_Ljava_lang_Object__"}, +{ false, false, false, true, true, false, false, "Native_Thread_currentThread"}, + +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisProxy_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetTypeName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7Ccast_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPrimitiveClass_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetComponentType_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredField_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetGenericInterfaces_7C_28_29ALjava_2Flang_2Freflect_2FType_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisInstance_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CforName_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CforName_7C_28Ljava_2Flang_2FString_3BZLjava_2Flang_2FClassLoader_3B_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisAssignableFrom_7C_28Ljava_2Flang_2FClass_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetModifiers_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetConstructor_7C_28ALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetConstructors_7C_28_29ALjava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisPrimitive_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisArray_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetInterfaces_7C_28_29ALjava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetSimpleName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetSuperclass_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredMethods_7C_28_29ALjava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredFields_7C_28_29ALjava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredConstructors_7C_28_29ALjava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredConstructor_7C_28ALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisInterface_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetCanonicalName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisEnum_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetAnnotation_7C_28Ljava_2Flang_2FClass_3B_29Ljava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetAnnotations_7C_28_29ALjava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetAnnotationsByType_7C_28Ljava_2Flang_2FClass_3B_29ALjava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredAnnotation_7C_28Ljava_2Flang_2FClass_3B_29Ljava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredAnnotations_7C_28_29ALjava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CcannotCastMsg_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CclassForName_7C_28Ljava_2Flang_2FString_3BZLjava_2Flang_2FClassLoader_3B_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CclassNameImpliesTopLevel_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CfindInterfaceMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPublicMethodRecursive_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetConstructor0_7C_28ALjava_2Flang_2FClass_3BI_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredConstructorInternal_7C_28ALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredConstructorsInternal_7C_28Z_29ALjava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredMethodInternal_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnclosingConstructorNative_7C_28_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnclosingMethodNative_7C_28_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetInnerClassFlags_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetInnerClassName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetInterfacesInternal_7C_28_29ALjava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3BZ_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetNameNative_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPublicDeclaredFields_7C_28_29ALjava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPublicFieldRecursive_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPublicFieldsRecursive_7C_28Ljava_2Futil_2FList_3B_29V"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPublicMethodsInternal_7C_28Ljava_2Futil_2FList_3B_29V"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredMethodsUnchecked_7C_28Z_29ALjava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetSignatureAnnotation_7C_28_29ALjava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetSignatureAttribute_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisDeclaredAnnotationPresent_7C_28Ljava_2Flang_2FClass_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisLocalOrAnonymousClass_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisLocalClass_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisAnonymousClass_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CresolveName_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CasSubclass_7C_28Ljava_2Flang_2FClass_3B_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CdesiredAssertionStatus_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetAccessFlags_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnclosingClass_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetClasses_7C_28_29ALjava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredClasses_7C_28_29ALjava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredFieldsUnchecked_7C_28Z_29ALjava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaringClass_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnclosingConstructor_7C_28_29Ljava_2Flang_2Freflect_2FConstructor_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnclosingMethod_7C_28_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnumConstants_7C_28_29ALjava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetEnumConstantsShared_7C_28_29ALjava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetField_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetFields_7C_28_29ALjava_2Flang_2Freflect_2FField_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetGenericSuperclass_7C_28_29Ljava_2Flang_2Freflect_2FType_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetInstanceMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetMethod_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FClass_3B_29Ljava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetMethods_7C_28_29ALjava_2Flang_2Freflect_2FMethod_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPackage_7C_28_29Ljava_2Flang_2FPackage_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetPackageName_24_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetProtectionDomain_7C_28_29Ljava_2Fsecurity_2FProtectionDomain_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetSigners_7C_28_29ALjava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisMemberClass_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CgetTypeParameters_7C_28_29ALjava_2Flang_2Freflect_2FTypeVariable_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisAnnotation_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisAnnotationPresent_7C_28Ljava_2Flang_2FClass_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisFinalizable_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CisSynthetic_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7CtoGenericString_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FClass_3B_7ChashCode_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmin_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmax_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmin_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmax_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cabs_7C_28J_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cabs_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Clog10_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Csignum_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CaddExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CmultiplyExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CfloorDiv_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CfloorMod_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CsubtractExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CtoIntExact_7C_28J_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CmultiplyExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CaddExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CsubtractExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmin_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Csqrt_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cabs_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmax_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmin_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CcopySign_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CgetExponent_7C_28D_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cmax_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cfloor_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Ctan_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cacos_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Ccos_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Csin_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Casin_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Catan_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Catan2_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cpow_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cround_7C_28D_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Clog_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cceil_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CpowerOfTwoD_7C_28I_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CIEEEremainder_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cabs_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Ccbrt_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CcopySign_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Ccosh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CdecrementExact_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CdecrementExact_7C_28J_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cexp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cexpm1_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CfloorDiv_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CfloorMod_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CgetExponent_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Chypot_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CincrementExact_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CincrementExact_7C_28J_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Clog1p_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnegateExact_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnegateExact_7C_28J_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextAfter_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextAfter_7C_28FD_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextDown_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextDown_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextUp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CnextUp_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CpowerOfTwoF_7C_28I_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Crint_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cround_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cscalb_7C_28DI_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Cscalb_7C_28FI_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Csignum_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Csinh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Ctanh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CtoDegrees_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7CtoRadians_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Culp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FMath_3B_7Culp_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Clog_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Csqrt_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CIEEEremainder_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cabs_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cabs_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cabs_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cabs_7C_28J_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cacos_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CaddExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CaddExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Casin_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Catan_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Catan2_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Ccbrt_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cceil_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CfloorOrCeil_7C_28DDDD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CcopySign_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CcopySign_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Ccos_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Ccosh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cexp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cexpm1_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cfloor_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CfloorDiv_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CfloorDiv_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CfloorMod_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CfloorMod_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CgetExponent_7C_28D_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CgetExponent_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Chypot_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Clog10_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Clog1p_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmax_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmax_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmax_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmax_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmin_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmin_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmin_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cmin_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CmultiplyExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CmultiplyExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextAfter_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextAfter_7C_28FD_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextDown_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextDown_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextUp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CnextUp_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cpow_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Crint_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cround_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cround_7C_28D_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cscalb_7C_28DI_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Cscalb_7C_28FI_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Csignum_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Csignum_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Csin_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Csinh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CsubtractExact_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CsubtractExact_7C_28JJ_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Ctan_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Ctanh_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CtoDegrees_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CtoIntExact_7C_28J_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7CtoRadians_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Culp_7C_28D_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FStrictMath_3B_7Culp_7C_28F_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7Ccompare_7C_28DD_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CdoubleToLongBits_7C_28D_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7ChashCode_7C_28D_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CvalueOf_7C_28D_29Ljava_2Flang_2FDouble_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7ClongBitsToDouble_7C_28J_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CisNaN_7C_28D_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CisInfinite_7C_28D_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CtoString_7C_28D_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CdoubleValue_7C_28_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CparseDouble_7C_28Ljava_2Flang_2FString_3B_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CdoubleToRawLongBits_7C_28D_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CisFinite_7C_28D_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7Cmax_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7Cmin_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7Csum_7C_28DD_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CtoHexString_7C_28D_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CvalueOf_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FDouble_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CbyteValue_7C_28_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CcompareTo_7C_28Ljava_2Flang_2FDouble_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CcompareTo_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CfloatValue_7C_28_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7ChashCode_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CintValue_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CisInfinite_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CisNaN_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7ClongValue_7C_28_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CshortValue_7C_28_29S"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FDouble_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CfloatToIntBits_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CintBitsToFloat_7C_28I_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CisNaN_7C_28F_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CtoString_7C_28F_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CvalueOf_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FFloat_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CfloatValue_7C_28_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CfloatToRawIntBits_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CparseFloat_7C_28Ljava_2Flang_2FString_3B_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CisFinite_7C_28F_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CisInfinite_7C_28F_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CvalueOf_7C_28F_29Ljava_2Flang_2FFloat_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7Ccompare_7C_28FF_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7ChashCode_7C_28F_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7Cmax_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7Cmin_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7Csum_7C_28FF_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CtoHexString_7C_28F_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CbyteValue_7C_28_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CcompareTo_7C_28Ljava_2Flang_2FFloat_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CcompareTo_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CdoubleValue_7C_28_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7ChashCode_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CintValue_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CisInfinite_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CisNaN_7C_28_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7ClongValue_7C_28_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CshortValue_7C_28_29S"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FFloat_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7Cdigit_7C_28CI_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7Ccompare_7C_28CC_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CvalueOf_7C_28C_29Ljava_2Flang_2FCharacter_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcharCount_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointAt_7C_28Ljava_2Flang_2FCharSequence_3BI_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisHighSurrogate_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLowSurrogate_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoCodePoint_7C_28CC_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointAt_7C_28ACI_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointAtImpl_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointAt_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointBefore_7C_28Ljava_2Flang_2FCharSequence_3BI_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointBefore_7C_28ACI_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointBeforeImpl_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointBefore_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointCount_7C_28Ljava_2Flang_2FCharSequence_3BII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointCount_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcodePointCountImpl_7C_28ACII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7Cdigit_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CdigitImpl_7C_28II_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CforDigit_7C_28II_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetDirectionality_7C_28C_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetDirectionality_7C_28I_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetType_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetDirectionalityImpl_7C_28I_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetName_7C_28I_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisValidCodePoint_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetNameImpl_7C_28I_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetNumericValue_7C_28C_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetNumericValue_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetNumericValueImpl_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetType_7C_28C_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CgetTypeImpl_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7ChashCode_7C_28C_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7ChighSurrogate_7C_28I_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisAlphabetic_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisAlphabeticImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisBmpCodePoint_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDefined_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDefined_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDefinedImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDigit_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDigit_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisDigitImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisISOControl_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisISOControl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisIdentifierIgnorable_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisIdentifierIgnorable_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisIdentifierIgnorableImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisIdeographic_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisIdeographicImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaIdentifierPart_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaIdentifierPart_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaIdentifierStart_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaIdentifierStart_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaLetter_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisJavaLetterOrDigit_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetter_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetter_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetterImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetterOrDigit_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetterOrDigit_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLetterOrDigitImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLowerCase_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLowerCase_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisLowerCaseImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisMirrored_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisMirrored_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisMirroredImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSpace_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSpaceChar_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSpaceChar_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSpaceCharImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSupplementaryCodePoint_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSurrogate_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisSurrogatePair_7C_28CC_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisTitleCase_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisTitleCase_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisTitleCaseImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierPart_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierPart_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierPartImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierStart_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierStart_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUnicodeIdentifierStartImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUpperCase_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUpperCase_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisUpperCaseImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisWhitespace_7C_28C_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisWhitespace_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CisWhitespaceImpl_7C_28I_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7ClowSurrogate_7C_28I_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CoffsetByCodePoints_7C_28Ljava_2Flang_2FCharSequence_3BII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CoffsetByCodePoints_7C_28ACIIII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CoffsetByCodePointsImpl_7C_28ACIIII_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CreverseBytes_7C_28C_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoChars_7C_28I_29AC"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoLowerCase_7C_28C_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoLowerCase_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoLowerCaseImpl_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoString_7C_28C_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoTitleCase_7C_28C_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoTitleCase_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoTitleCaseImpl_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoUpperCase_7C_28C_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoUpperCase_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoUpperCaseImpl_7C_28I_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcharValue_7C_28_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcompareTo_7C_28Ljava_2Flang_2FCharacter_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CcompareTo_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7Cequals_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7ChashCode_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FCharacter_3B_7CtoString_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetCalendarSystem_7C_28I_29Lsun_2Futil_2Fcalendar_2FBaseCalendar_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetCalendarDate_7C_28_29Lsun_2Futil_2Fcalendar_2FBaseCalendar_24Date_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetCalendarSystem_7C_28J_29Lsun_2Futil_2Fcalendar_2FBaseCalendar_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetJulianCalendar_7C_28_29Lsun_2Futil_2Fcalendar_2FBaseCalendar_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetCalendarSystem_7C_28Lsun_2Futil_2Fcalendar_2FBaseCalendar_24Date_3B_29Lsun_2Futil_2Fcalendar_2FBaseCalendar_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FDate_3B_7CgetMillisOf_7C_28Ljava_2Futil_2FDate_3B_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetAnnotationNative_7C_28Ljava_2Flang_2FClass_3B_29Ljava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetNameInternal_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetSignatureAnnotation_7C_28_29ALjava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetSignatureAttribute_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetDeclaringClass_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetName_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetType_7C_28_29Ljava_2Flang_2FClass_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7Cget_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetAnnotation_7C_28Ljava_2Flang_2FClass_3B_29Ljava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetAnnotationsByType_7C_28Ljava_2Flang_2FClass_3B_29ALjava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetArtField_7C_28_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetBoolean_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetByte_7C_28Ljava_2Flang_2FObject_3B_29B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetChar_7C_28Ljava_2Flang_2FObject_3B_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetDeclaredAnnotations_7C_28_29ALjava_2Flang_2Fannotation_2FAnnotation_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetDexFieldIndex_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetDouble_7C_28Ljava_2Flang_2FObject_3B_29D"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetFloat_7C_28Ljava_2Flang_2FObject_3B_29F"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetGenericType_7C_28_29Ljava_2Flang_2Freflect_2FType_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetInt_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetLong_7C_28Ljava_2Flang_2FObject_3B_29J"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetModifiers_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetOffset_7C_28_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2Freflect_2FField_3B_7CgetShort_7C_28Ljava_2Flang_2FObject_3B_29S"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FCalendar_3B_7CgetRegionForCalendar_7C_28Landroid_2Ficu_2Futil_2FULocale_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FCalendar_3B_7CgetType_7C_28_29Ljava_2Flang_2FString_3B"}, + +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopy__Ljava_lang_Object_2ILjava_lang_Object_2II"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyByteUnchecked___3BI_3BII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyBooleanUnchecked___3ZI_3ZII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyShortUnchecked___3SI_3SII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyLongUnchecked___3JI_3JII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyIntUnchecked___3II_3III"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyFloatUnchecked___3FI_3FII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyDoubleUnchecked___3DI_3DII"}, +{ false, true, false, false, true, false, false, "Native_java_lang_System_arraycopyCharUnchecked___3CI_3CII"}, + +{ false, false, false, true, true, false, false, "Ljava_2Flang_2FThread_3B_7CcurrentThread_7C_28_29Ljava_2Flang_2FThread_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FStringFactory_3B_7CnewStringFromChars_7C_28IIAC_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CfastSubstring_7C_28II_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CcompareTo_7C_28Ljava_2Flang_2FString_3B_29I"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CgetCharsNoCheck_7C_28IIACI_29V"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FStringFactory_3B_7CnewStringFromBytes_7C_28ABIII_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FStringFactory_3B_7CnewStringFromString_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CtoCharArray_7C_28_29AC"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7Cconcat_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CfastSubstring_7C_28II_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7Cintern_7C_28_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CdoReplace_7C_28CC_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FString_3B_7CfastIndexOf_7C_28II_29I"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7Carraycopy_7C_28Ljava_2Flang_2FObject_3BILjava_2Flang_2FObject_3BII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyCharUnchecked_7C_28ACIACII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyByteUnchecked_7C_28ABIABII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyShortUnchecked_7C_28ASIASII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyIntUnchecked_7C_28AIIAIII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyLongUnchecked_7C_28AJIAJII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyFloatUnchecked_7C_28AFIAFII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyDoubleUnchecked_7C_28ADIADII_29V"}, +{ false, true, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CarraycopyBooleanUnchecked_7C_28AZIAZII_29V"}, + +{ true, false, false, false, true, false, false, "Native_java_lang_StringFactory_newStringFromChars__II_3C"}, +{ true, false, false, false, true, false, false, "Native_java_lang_StringFactory_newStringFromString__Ljava_lang_String_2"}, +{ true, false, false, false, true, false, false, "Native_java_lang_StringFactory_newStringFromBytes___3BIII"}, + +{ false, false, false, true, false, false, false, "Ljava_2Flang_2FThrowable_3B_7CnativeFillInStackTrace_7C_28_29Ljava_2Flang_2FObject_3B"}, + +{ false, true, false, false, false, false, true, "Ljava_2Futil_2FMap_3B_7Cput_7C_28Ljava_2Flang_2FObject_3BLjava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B"}, +{ false, true, false, false, false, false, true, "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapObject_7C_28Ljava_2Flang_2FObject_3BJLjava_2Flang_2FObject_3BLjava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FObject_3B_7CidentityHashCodeNative_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FString_3B_7CcharAt_7C_28I_29C"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FProperties_3B_7CgetProperty_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Futil_2FProperties_3B_7CgetProperty_7C_28Ljava_2Flang_2FString_3BLjava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, + +{ true, false, false, false, true, false, true, "Lsun_2Fnet_2FNetProperties_3B_7Cget_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, true, false, true, "Lsun_2Fnet_2FNetProperties_3B_7CgetBoolean_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FBoolean_3B"}, +{ true, false, false, false, true, false, true, "Lsun_2Fnet_2FNetProperties_3B_7CgetInteger_7C_28Ljava_2Flang_2FString_3BI_29Ljava_2Flang_2FInteger_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2Fref_2FReference_3B_7CgetReferent_7C_28_29Ljava_2Flang_2FObject_3B", }, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CnanoTime_7C_28_29J"}, +{ true, false, false, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cgettid_7C_28_29I"}, +{ true, false, false, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cgetpid_7C_28_29I"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FSystem_3B_7CcurrentTimeMillis_7C_28_29J"}, +{ true, false, false, false, true, false, false, "Lsun_2Futil_2Flocale_2FBaseLocale_24Cache_3B_7Cget_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B"}, +{ true, false, false, false, false, false, false, "Ljava_2Flang_2FString_3B_7Cformat_7C_28Ljava_2Flang_2FString_3BALjava_2Flang_2FObject_3B_29Ljava_2Flang_2FString_3B"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FCalendar_3B_7CisEquivalentTo_7C_28Landroid_2Ficu_2Futil_2FCalendar_3B_29Z"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FGregorianCalendar_3B_7CisEquivalentTo_7C_28Landroid_2Ficu_2Futil_2FCalendar_3B_29Z"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FAnnualTimeZoneRule_3B_7CisEquivalentTo_7C_28Landroid_2Ficu_2Futil_2FTimeZoneRule_3B_29Z"}, +{ true, false, false, false, false, false, false, "Landroid_2Ficu_2Futil_2FGregorianCalendar_3B_7CisEquivalentTo_7C_28Landroid_2Ficu_2Futil_2FCalendar_3B_29Z"}, + +{ true, false, false, false, true, false, false, "Landroid_2Fview_2FSurface_3B_7CnativeIsValid_7C_28J_29Z"}, +{ true, false, false, false, true, false, false, "Lsun_2Fmisc_2FUnsafe_3B_7CgetObjectVolatile_7C_28Ljava_2Flang_2FObject_3BJ_29Ljava_2Flang_2FObject_3B"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FThread_3B_7CnativeHoldsLock_7C_28Ljava_2Flang_2FObject_3B_29Z"}, +{ true, false, false, false, true, false, false, "Landroid_2Futil_2FLog_3B_7Clogger__entry__max__payload__native_7C_28_29I"}, +{ true, false, false, false, true, false, false, "Landroid_2Fos_2FSystemProperties_3B_7Cnative__get__int_7C_28Ljava_2Flang_2FString_3BI_29I"}, +{ true, false, false, false, true, false, false, "Ljava_2Flang_2FThread_3B_7CnativeGetStatus_7C_28Z_29I"}, +{ false, true, false, false, true, false, false, "Ljava_2Ftime_2Fformat_2FDateTimeFormatterBuilder_24ReducedPrinterParser_3B_7Clambda_24setValue_240_24DateTimeFormatterBuilder_24ReducedPrinterParser_7C_28Ljava_2Ftime_2Fformat_2FDateTimeParseContext_3BJIILjava_2Ftime_2Fchrono_2FChronology_3B_29V"}, + +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FMessageQueue_3B_7CnativePollOnce_7C_28JI_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FTrace_3B_7CnativeTraceBegin_7C_28JLjava_2Flang_2FString_3B_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Futil_2FLog_3B_7Cprintln__native_7C_28IILjava_2Flang_2FString_3BLjava_2Flang_2FString_3B_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FSystemProperties_3B_7Cnative__add__change__callback_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CreadProcFile_7C_28Ljava_2Flang_2FString_3BAIALjava_2Flang_2FString_3BAJAF_29Z"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cstat_7C_28Ljava_2Flang_2FString_3B_29Landroid_2Fsystem_2FStructStat_3B"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FFileInputStream_3B_7Copen0_7C_28Ljava_2Flang_2FString_3B_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CgetGidForName_7C_28Ljava_2Flang_2FString_3B_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Futil_2FEventLog_3B_7CwriteEvent_7C_28IALjava_2Flang_2FObject_3B_29I"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FObject_3B_7CinternalClone_7C_28_29Ljava_2Flang_2FObject_3B"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7CwriteBytes_7C_28Ljava_2Fio_2FFileDescriptor_3BLjava_2Flang_2FObject_3BII_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CgetPidsForCommands_7C_28ALjava_2Flang_2FString_3B_29AI"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FUnixFileSystem_3B_7CcreateFileExclusively0_7C_28Ljava_2Flang_2FString_3B_29Z"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FUnixFileSystem_3B_7Clist0_7C_28Ljava_2Fio_2FFile_3B_29ALjava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cclose_7C_28Ljava_2Fio_2FFileDescriptor_3B_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FAsynchronousCloseMonitor_3B_7CsignalBlockedThreads_7C_28Ljava_2Fio_2FFileDescriptor_3B_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7CreadBytes_7C_28Ljava_2Fio_2FFileDescriptor_3BLjava_2Flang_2FObject_3BII_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinder_3B_7CsetThreadStrictModePolicy_7C_28I_29V"}, +{ false, false, true, false, true, false, false, "Ljava_2Futil_2Fregex_2FMatcher_3B_7CfindNextImpl_7C_28JAI_29Z"}, +{ false, false, true, false, true, false, false, "Ljava_2Futil_2Fregex_2FMatcher_3B_7CsetInputImpl_7C_28JLjava_2Flang_2FString_3BII_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Ficu_2FNativeConverter_3B_7CsetCallbackDecode_7C_28JIILjava_2Flang_2FString_3B_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Copen_7C_28Ljava_2Flang_2FString_3BII_29Ljava_2Fio_2FFileDescriptor_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinderProxy_3B_7CtransactNative_7C_28ILandroid_2Fos_2FParcel_3BLandroid_2Fos_2FParcel_3BI_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinder_3B_7CrestoreCallingIdentity_7C_28J_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fnet_2FLocalSocketImpl_3B_7Creadba__native_7C_28ABIILjava_2Fio_2FFileDescriptor_3B_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CgetPids_7C_28Ljava_2Flang_2FString_3BAI_29AI"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FTrace_3B_7CnativeAsyncTraceBegin_7C_28JLjava_2Flang_2FString_3BI_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FTrace_3B_7CnativeAsyncTraceEnd_7C_28JLjava_2Flang_2FString_3BI_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeAppendFrom_7C_28JJII_29J"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FFileDescriptor_3B_7CisSocket_7C_28I_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CsetProcessGroup_7C_28II_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CsetThreadPriority_7C_28I_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeEnforceInterface_7C_28JLjava_2Flang_2FString_3B_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeWriteFloat_7C_28JF_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeWriteString_7C_28JLjava_2Flang_2FString_3B_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7CioctlInetAddress_7C_28Ljava_2Fio_2FFileDescriptor_3BILjava_2Flang_2FString_3B_29Ljava_2Fnet_2FInetAddress_3B"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FObject_3B_7CidentityHashCodeNative_7C_28Ljava_2Flang_2FObject_3B_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinder_3B_7CgetCallingPid_7C_28_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinder_3B_7CflushPendingCommands_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapLong_7C_28Ljava_2Flang_2FObject_3BJJJ_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinder_3B_7CgetCallingUid_7C_28_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeWriteFileDescriptor_7C_28JLjava_2Fio_2FFileDescriptor_3B_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeReadStrongBinder_7C_28J_29Landroid_2Fos_2FIBinder_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FBinderProxy_3B_7ClinkToDeath_7C_28Landroid_2Fos_2FIBinder_24DeathRecipient_3BI_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeReadLong_7C_28J_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeWriteLong_7C_28JJ_29V"}, +{ false, false, true, false, true, false, false, "Lsun_2Fmisc_2FUnsafe_3B_7CputObject_7C_28Ljava_2Flang_2FObject_3BJLjava_2Flang_2FObject_3B_29V"}, +{ false, false, true, false, true, false, false, "Lsun_2Fmisc_2FUnsafe_3B_7CputOrderedObject_7C_28Ljava_2Flang_2FObject_3BJLjava_2Flang_2FObject_3B_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeUnmarshall_7C_28JABII_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fview_2FSurfaceControl_3B_7CnativeIsRogSupport_7C_28_29I"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FUnixFileSystem_3B_7CcheckAccess0_7C_28Ljava_2Fio_2FFile_3BI_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Futil_2FMemoryIntArray_3B_7CnativeGet_7C_28IJI_29I"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FObject_3B_7CnotifyAll_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Futil_2FEventLog_3B_7CwriteEvent_7C_28II_29I"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7CgetsockoptLinger_7C_28Ljava_2Fio_2FFileDescriptor_3BII_29Landroid_2Fsystem_2FStructLinger_3B"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FUnixFileSystem_3B_7Ccanonicalize0_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fgraphics_2FRegion_3B_7CnativeOp_7C_28JJJI_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Futil_2FMemoryIntArray_3B_7CnativeSet_7C_28IJII_29V"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FUnixFileSystem_3B_7Crename0_7C_28Ljava_2Fio_2FFile_3BLjava_2Fio_2FFile_3B_29Z"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FClass_3B_7CclassForName_7C_28Ljava_2Flang_2FString_3BZLjava_2Flang_2FClassLoader_3B_29Ljava_2Flang_2FClass_3B"}, +{ false, false, true, false, true, false, false, "Ldalvik_2Fsystem_2FVMRuntime_3B_7CnewUnpaddedArray_7C_28Ljava_2Flang_2FClass_3BI_29Ljava_2Flang_2FObject_3B"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cchmod_7C_28Ljava_2Flang_2FString_3BI_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FUEventObserver_3B_7CnativeSetup_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fview_2FSurfaceControl_3B_7CnativeSetActiveConfig_7C_28Landroid_2Fos_2FIBinder_3BI_29Z"}, +{ false, false, true, false, true, false, false, "Ljava_2Fio_2FFileDescriptor_3B_7Csync_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fgraphics_2FMatrix_3B_7CnGetValues_7C_28JAF_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fgraphics_2FBaseCanvas_3B_7CnDrawColor_7C_28JII_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fgraphics_2FRegion_3B_7CnativeSetRect_7C_28JIIII_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FDebug_3B_7CgetPss_7C_28IAJAJ_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fcontent_2Fres_2FXmlBlock_3B_7CnativeNext_7C_28J_29I"}, +{ false, false, true, false, true, false, false, "Lsun_2Fnio_2Fch_2FFileDispatcherImpl_3B_7Cread0_7C_28Ljava_2Fio_2FFileDescriptor_3BJI_29I"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cfstat_7C_28Ljava_2Fio_2FFileDescriptor_3B_29Landroid_2Fsystem_2FStructStat_3B"}, +{ false, false, true, false, true, false, false, "Lsun_2Fnio_2Ffs_2FUnixNativeDispatcher_3B_7Copen0_7C_28JII_29I"}, +{ false, false, true, false, true, false, false, "Llibcore_2Futil_2FNativeAllocationRegistry_3B_7CapplyFreeFunction_7C_28JJ_29V"}, +{ false, false, true, false, true, false, false, "Ldalvik_2Fsystem_2FVMRuntime_3B_7CregisterNativeFree_7C_28I_29V"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FThread_3B_7Cinterrupted_7C_28_29Z"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FParcel_3B_7CnativeCreateByteArray_7C_28J_29AB"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FClass_3B_7CgetDeclaredFields_7C_28_29ALjava_2Flang_2Freflect_2FField_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fview_2FSurfaceControl_3B_7CnativeGetContentFrameStats_7C_28JLandroid_2Fview_2FWindowContentFrameStats_3B_29Z"}, +{ false, false, true, false, true, false, false, "Lsun_2Fnio_2Fch_2FFileDispatcherImpl_3B_7Cwrite0_7C_28Ljava_2Fio_2FFileDescriptor_3BJI_29I"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FThread_3B_7CnativeInterrupt_7C_28_29V"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FThread_3B_7CnativeSetPriority_7C_28I_29V"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FRuntime_3B_7CnativeExit_7C_28I_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Fio_2FLinux_3B_7Cstrerror_7C_28I_29Ljava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fcontent_2Fres_2FXmlBlock_3B_7CnativeDestroy_7C_28J_29V"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FProcess_3B_7CgetThreadPriority_7C_28I_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FFileObserver_24ObserverThread_3B_7Cinit_7C_28_29I"}, +{ false, false, true, false, true, false, false, "Landroid_2Fview_2FSurface_3B_7CnativeSyncFrameInfo_7C_28JJ_29V"}, +{ false, false, true, false, true, false, false, "Llibcore_2Ficu_2FICU_3B_7CgetCurrencyCode_7C_28Ljava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Landroid_2Fos_2FSystemProperties_3B_7Cnative__get_7C_28Ljava_2Flang_2FString_3BLjava_2Flang_2FString_3B_29Ljava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Ldalvik_2Fsystem_2FVMRuntime_3B_7Cproperties_7C_28_29ALjava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Ljava_2Futil_2Fregex_2FMatcher_3B_7CopenImpl_7C_28J_29J"}, +{ false, false, true, false, true, false, false, "Ljava_2Futil_2Fregex_2FPattern_3B_7CcompileImpl_7C_28Ljava_2Flang_2FString_3BI_29J"}, +{ false, false, true, false, true, false, false, "Ljava_2Flang_2FThrowable_3B_7CnativeFillInStackTrace_7C_28_29Ljava_2Flang_2FObject_3B"}, +{ false, false, true, false, true, false, false, "Llibcore_2Ficu_2FNativeConverter_3B_7CopenConverter_7C_28Ljava_2Flang_2FString_3B_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fview_2FDisplayEventReceiver_3B_7CnativeInit_7C_28Ljava_2Flang_2Fref_2FWeakReference_3BLandroid_2Fos_2FMessageQueue_3BI_29J"}, +{ false, false, true, false, true, false, false, "Landroid_2Fcontent_2Fres_2FStringBlock_3B_7CnativeGetString_7C_28JI_29Ljava_2Flang_2FString_3B"}, +{ false, false, true, false, true, false, false, "Llibcore_2Futil_2FCharsetUtils_3B_7CtoUtf8Bytes_7C_28Ljava_2Flang_2FString_3BII_29AB"}, diff --git a/src/mapleall/maple_ipa/include/prop_parameter_type.h b/src/mapleall/maple_ipa/include/prop_parameter_type.h new file mode 100644 index 0000000000000000000000000000000000000000..e03811db6417aa519f69d8038dab0915de8cdcc3 --- /dev/null +++ b/src/mapleall/maple_ipa/include/prop_parameter_type.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_PROP_PARAM_TYPE_H +#define MAPLE_IPA_INCLUDE_PROP_PARAM_TYPE_H +#include "mir_nodes.h" +#include "mir_builder.h" +#include "call_graph.h" +#include "me_ir.h" +#include "me_irmap.h" +#include "dominance.h" +#include "class_hierarchy.h" +#include "maple_phase.h" +#include "ipa_phase_manager.h" +namespace maple { +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wunused-private-field" + +class PropParamType { + public: + PropParamType(MemPool &memPool, MapleAllocator &alloc, MIRModule &mod, CallGraph &cg, AnalysisDataManager &dataMap) + : memPool(memPool), alloc(alloc), module(mod), builder(*mod.GetMIRBuilder()), + cg(cg), dataMap(dataMap), curFunc(nullptr), debug(false) {} + virtual ~PropParamType() = default; + bool CheckOpndZero(const MeExpr *expr) const; + void ResolveCallStmt(MeStmt &meStmt); + void InsertNullCheck(CallMeStmt &callStmt, const std::string &funcName, uint32 index, MeExpr &receiver); + bool CheckCondtionStmt(const MeStmt &meStmt) const; + void ResolveIreadExpr(MeExpr &expr); + void TraversalMeStmt(MeStmt &meStmt); + void RunOnScc(maple::SCCNode &scc); + void Prop(MIRFunction &func); + + private: + MemPool &memPool; + MapleAllocator &alloc; + MIRModule &module; + MIRBuilder &builder; + CallGraph &cg; + AnalysisDataManager &dataMap; + std::map formalMapLocal; + MIRFunction *curFunc; + bool debug; +}; +#pragma clang diagnostic pop +MAPLE_SCC_PHASE_DECLARE_BEGIN(SCCPropParamType, maple::SCCNode) +OVERRIDE_DEPENDENCE +MAPLE_SCC_PHASE_DECLARE_END +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_PROP_PARAM_TYPE_H diff --git a/src/mapleall/maple_ipa/include/prop_return_null.h b/src/mapleall/maple_ipa/include/prop_return_null.h new file mode 100644 index 0000000000000000000000000000000000000000..d900be109e660f24dce70cc23f7eba9159b7eb63 --- /dev/null +++ b/src/mapleall/maple_ipa/include/prop_return_null.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_PROP_RETURN_ATTR_H +#define MAPLE_IPA_INCLUDE_PROP_RETURN_ATTR_H +#include "mir_nodes.h" +#include "mir_builder.h" +#include "call_graph.h" +#include "me_ir.h" +#include "me_irmap.h" +#include "dominance.h" +#include "class_hierarchy.h" +#include "maple_phase.h" +#include "ipa_phase_manager.h" +namespace maple { +class PropReturnAttr { + public: + PropReturnAttr(MemPool &memPool, MapleAllocator &alloc, MIRModule &mod, CallGraph &cg, AnalysisDataManager &dataMap) + : memPool(memPool), alloc(alloc), module(mod), builder(*mod.GetMIRBuilder()), + cg(cg), dataMap(dataMap), inferredRetTyIdx(0), + retTy(kNotSeen), maybeNull(true), debug(false) {} + virtual ~PropReturnAttr() = default; + TyIdx GetInferredTyIdx(MeExpr &expr) const; + void InsertNullCheck(const CallMeStmt &callStmt, MeExpr &receiver) const; + void PropVarInferredType(VarMeExpr &varMeExpr) const; + void PropIvarInferredType(IvarMeExpr &ivar) const; + void VisitVarPhiNode(MePhiNode &varPhi) const; + void VisitMeExpr(MeExpr *meExpr) const; + void ReturnTyIdxInferring(const RetMeStmt &retMeStmt); + void TraversalMeStmt(MeStmt &meStmt); + void TraversalBB(BB *bb); + void Perform(MeFunction &func); + void Initialize(maple::SCCNode &scc) const; + void Prop(maple::SCCNode &scc); + bool PhaseRun(maple::SCCNode &scc); + + private: + MemPool &memPool; + MapleAllocator &alloc; + MIRModule &module; + MIRBuilder &builder; + CallGraph &cg; + AnalysisDataManager &dataMap; + TyIdx inferredRetTyIdx; + enum TagRetTyIdx { + kNotSeen, + kSeen, + kFailed + } retTy; + bool maybeNull; + bool debug; +}; + +MAPLE_SCC_PHASE_DECLARE_BEGIN(SCCPropReturnAttr, maple::SCCNode) +OVERRIDE_DEPENDENCE +MAPLE_SCC_PHASE_DECLARE_END +} // namespace maple +#endif // MAPLE_IPA_INCLUDE_PROP_RETURN_ATTR_H diff --git a/src/mapleall/maple_ipa/include/region_identify.h b/src/mapleall/maple_ipa/include/region_identify.h new file mode 100644 index 0000000000000000000000000000000000000000..5ca4f169a3065b8f3c003bf980e5c6bec97f344d --- /dev/null +++ b/src/mapleall/maple_ipa/include/region_identify.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_REGION_IDENTIFY_H +#define MAPLE_IPA_INCLUDE_REGION_IDENTIFY_H + +#include +#include +#include +#include "cfg_primitive_types.h" +#include "ipa_collect.h" +#include "mir_nodes.h" +#include "opcodes.h" +#include "ptr_list_ref.h" +#include "stmt_identify.h" +#include "suffix_array.h" + +namespace maple { +constexpr size_t kGroupSizeLimit = 2; +using GroupId = size_t; +using StmtIterator = PtrListRefIterator; +using SymbolRegPair = std::pair; +using RegionReturnVector = std::vector; + +class RegionCandidate { + public: + RegionCandidate(StmtInfoId startId, StmtInfoId endId, StmtInfo *start, StmtInfo *end, MIRFunction* function) + : startId(startId), endId(endId), start(start), end(end), function(function), length(endId - startId + 1) {} + virtual ~RegionCandidate() = default; + void CollectRegionInputAndOutput(StmtInfo &stmtInfo, CollectIpaInfo &ipaInfo); + const bool HasDefinitionOutofRegion(DefUsePositions &defUse) const; + bool HasJumpOutOfRegion(StmtInfo &stmtInfo, bool isStart); + bool IsLegal(CollectIpaInfo &ipaInfo); + + static SymbolRegPair GetSymbolRegPair(BaseNode &node) { + switch (node.GetOpCode()) { + case OP_regread: { + return SymbolRegPair(StIdx(0), static_cast(node).GetRegIdx()); + } + case OP_regassign: { + return SymbolRegPair(StIdx(0), static_cast(node).GetRegIdx()); + } + case OP_addrof: + case OP_dread: { + return SymbolRegPair(static_cast(node).GetStIdx(), PregIdx(0)); + } + case OP_dassign: { + return SymbolRegPair(static_cast(node).GetStIdx(), PregIdx(0)); + } + case OP_asm: + case OP_callassigned: + case OP_icallassigned: + case OP_icallprotoassigned: + case OP_intrinsiccallassigned: { + auto *callReturnVector = node.GetCallReturnVector(); + auto &pair = callReturnVector->front(); + return callReturnVector->empty() ? SymbolRegPair(0, 0) : SymbolRegPair(pair.first, pair.second.GetPregIdx()); + } + default: { + CHECK_FATAL(false, "unexpected op code"); + return SymbolRegPair(StIdx(0), PregIdx(0)); + } + } + } + + StmtInfo *GetStart() { + return start; + } + + StmtInfo *GetEnd() { + return end; + } + + MIRFunction *GetFunction() { + return function; + } + + void Dump() { + LogInfo::MapleLogger() << "range: " << startId << "->" << endId << "\n"; + function->SetCurrentFunctionToThis(); + for (auto &pair : regionOutputs) { + if (pair.second != 0) { + LogInfo::MapleLogger() << "output: reg %" << pair.second << "\n"; + } else { + LogInfo::MapleLogger() << "output: var %" << function->GetSymbolTabItem(pair.first.Idx())->GetName() << "\n"; + } + } + auto *currStmt = start->GetStmtNode(); + auto endPosition = end->GetStmtNode()->GetStmtInfoId(); + while (currStmt != nullptr && (currStmt->GetOpCode() == OP_label || currStmt->GetStmtInfoId() <= endPosition)) { + currStmt->Dump(); + currStmt = currStmt->GetNext(); + } + } + + void SetGroupId(GroupId id) { + groupId = id; + } + + GroupId GetGroupId() { + return groupId; + } + + StmtInfoId GetStartId() { + return startId; + } + + StmtInfoId GetEndId() { + return endId; + } + + size_t GetLength() { + return length; + } + + std::set &GetRegionInPuts() { + return regionInputs; + } + + std::set &GetRegionOutPuts() { + return regionOutputs; + } + + std::unordered_set &GetStmtJumpToEnd() { + return stmtJumpToEnd; + } + + PrimType GetOutputPrimType() { + if (regionOutputs.empty()) { + return PTY_void; + } + auto stIdx = regionOutputs.begin()->first; + auto pregIdx = regionOutputs.begin()->second; + if (stIdx.Idx() != 0) { + return function->GetSymbolTabItem(stIdx.Idx())->GetType()->GetPrimType(); + } else { + return function->GetPregItem(pregIdx)->GetPrimType(); + } + } + + bool IsOverlapWith(RegionCandidate &rhs) { + return (startId >= rhs.GetStartId() && startId <= rhs.GetEndId()) || + (rhs.GetStartId() >= startId && rhs.GetStartId() <= endId); + } + + bool IsRegionInput(BaseNode &node) { + auto pair = GetSymbolRegPair(node); + return regionInputs.find(pair) != regionInputs.end(); + } + + template + void TraverseRegion(Functor processor) { + auto &stmtList = start->GetCurrBlock()->GetStmtNodes(); + auto begin = StmtNodes::iterator(start->GetStmtNode()); + for (auto it = begin; it != stmtList.end() && it->GetStmtInfoId() <= endId ; ++it) { + processor(it); + } + } + + private: + StmtInfoId startId; + StmtInfoId endId; + StmtInfo* start; + StmtInfo* end; + MIRFunction *function; + size_t length; + GroupId groupId = kInvalidIndex; + std::set regionOutputs; + std::set regionInputs; + std::unordered_set stmtJumpToEnd; +}; + +class RegionGroup { + public: + RegionGroup() = default; + + virtual ~RegionGroup() = default; + + void Dump() { + LogInfo::MapleLogger() << "region group: " << groupId << "\n"; + LogInfo::MapleLogger() << "region number: " << groups.size() << "\n"; + if (groups.size() < kGroupSizeLimit) { + return; + } + for (auto ®ion : groups) { + LogInfo::MapleLogger() << "region candidates: " << region.GetStart()->GetStmtNode()->GetStmtInfoId() << "->" + << region.GetEnd()->GetStmtNode()->GetStmtInfoId() << "\n"; + LogInfo::MapleLogger() << "origin range: " << region.GetStartId() << "->" + << region.GetEndId() << "\n"; + region.Dump(); + } + } + + std::vector &GetGroups() { + return groups; + } + + uint32 GetGroupId() { + return groupId; + } + + void SetGroupId(GroupId id) { + groupId = id; + } + + int64 GetCost() { + return cost; + } + + void SetCost(int64 newCost) { + cost = newCost; + } + + private: + std::vector groups; + uint32 groupId; + int64 cost = 0; +}; + +class RegionIdentify { + public: + explicit RegionIdentify(CollectIpaInfo *ipaInfo) : ipaInfo(ipaInfo) {} + virtual ~RegionIdentify() = default; + void RegionInit(); + std::vector &GetRegionGroups() { + return regionGroups; + } + + private: + void CreateRegionCandidates(SuffixArray &sa); + void CreateRegionGroups(std::vector ®ions); + void ClearSrcMappings(); + bool CheckOverlapAmongGroupRegions(RegionGroup &group, RegionCandidate ®ion); + bool IsRegionLegal(uint startPosition, uint endPosition); + bool CheckCompatibilifyAmongRegionComponents(BaseNode &lhs, BaseNode& rhs); + bool CheckCompatibilifyBetweenSrcs(BaseNode &lhs, BaseNode& rhs); + bool HasSameStructure(RegionCandidate &lhs, RegionCandidate &rhs); + bool CompareSymbolStructure(const StIdx leftIdx, const StIdx rightIdx); + bool CompareRegStructure(const PregIdx leftIdx, const PregIdx rightIdx); + bool CompareConstStructure(const MIRConst *leftConst, const MIRConst *rightConst); + StmtInfo *GetNearestNonnullStmtInfo(StmtInfoId index, bool forward); + + CollectIpaInfo *ipaInfo; + std::vector regionGroups; + std::unordered_map symMap; + std::unordered_map leftRegMap; + std::unordered_map rightRegMap; + std::unordered_map leftConstMap; + std::unordered_map rightConstMap; +}; +} +#endif // MAPLE_IPA_INCLUDE_REGION_IDENTIFY_H diff --git a/src/mapleall/maple_ipa/include/stmt_identify.h b/src/mapleall/maple_ipa/include/stmt_identify.h new file mode 100644 index 0000000000000000000000000000000000000000..b84a57d8a353dd838b6bbd41f4e71dbcbca9c4c9 --- /dev/null +++ b/src/mapleall/maple_ipa/include/stmt_identify.h @@ -0,0 +1,255 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IPA_INCLUDE_STMT_IDENTIFY_H +#define MAPLE_IPA_INCLUDE_STMT_IDENTIFY_H + +#include +#include +#include +#include +#include +#include "irmap.h" +#include "me_ir.h" +#include "mir_nodes.h" +#include "opcodes.h" +#include "types_def.h" +namespace maple { +constexpr PUIdx kInvalidPuIdx = std::numeric_limits::max(); +constexpr size_t kInvalidIndex = std::numeric_limits::max(); +constexpr LabelIdx kInvalidLabelIdx = std::numeric_limits::max(); + +struct DefUsePositions { + MapleVector definePositions; + MapleVector usePositions; + explicit DefUsePositions(MapleAllocator &alloc) + : definePositions(alloc.Adapter()), + usePositions(alloc.Adapter()) {} +}; + +class StmtInfo { + public: + StmtInfo(MeStmt *stmt, PUIdx puIdx, MapleAllocator &alloc) + : allocator(alloc), + meStmt(stmt), + puIdx(puIdx), + hashCandidate(allocator.Adapter()), + symbolDefUse(allocator.Adapter()), + regDefUse(allocator.Adapter()), + locationsJumpFrom(allocator.Adapter()), + locationsJumpTo(allocator.Adapter()) { + if (stmt) { + CreateHashCandidate(); + } + } + virtual ~StmtInfo() = default; + + bool IsValid() { + switch (hashCandidate[0]) { + case OP_switch: + case OP_return: + case OP_asm: + return false; + default: + return valid; + } + } + + bool IsCall() { + switch (hashCandidate[0]) { + case OP_call: + case OP_callassigned: + case OP_icall: + case OP_icallassigned: + case OP_icallprotoassigned:{ + return true; + } + default: { + return false; + } + } + } + + void CreateHashCandidate() { + hashCandidate.emplace_back(meStmt->GetOp()); + if (meStmt->GetOp() == OP_call || meStmt->GetOp() == OP_callassigned) { + hashCandidate.emplace_back(static_cast(meStmt)->GetPUIdx()); + } + if (meStmt->GetOp() == OP_intrinsiccall || + meStmt->GetOp() == OP_intrinsiccallassigned || + meStmt->GetOp() == OP_intrinsiccallwithtypeassigned) { + hashCandidate.emplace_back(static_cast(meStmt)->GetIntrinsic()); + } + if (meStmt->GetVarLHS() != nullptr) { + GetExprHashCandidate(*meStmt->GetVarLHS()); + } + if (meStmt->GetOp() == OP_iassign) { + GetExprHashCandidate(*static_cast(meStmt)->GetLHSVal()); + } + hashCandidate.emplace_back(meStmt->NumMeStmtOpnds()); + for (auto i = 0; i < meStmt->NumMeStmtOpnds(); ++i) { + GetExprHashCandidate(*meStmt->GetOpnd(i)); + } + } + + void GetExprHashCandidate(MeExpr &meExpr) { + hashCandidate.emplace_back(meExpr.GetOp()); + hashCandidate.emplace_back(meExpr.GetPrimType()); + if (meExpr.GetMeOp() == kMeOpIvar) { + auto &ivar = static_cast(meExpr); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetTyIdx()); + hashCandidate.emplace_back(static_cast(type)->GetPointedType()->GetPrimType()); + hashCandidate.emplace_back(ivar.GetFieldID()); + valid &= ivar.GetFieldID() == 0; + } + if (meExpr.GetMeOp() == kMeOpVar) { + auto &var = static_cast(meExpr); + hashCandidate.emplace_back(var.GetFieldID()); + valid &= var.GetFieldID() == 0; + } + if (meExpr.GetMeOp() == kMeOpAddrof) { + auto &addr = static_cast(meExpr); + hashCandidate.emplace_back(addr.GetFieldID()); + valid &= addr.GetFieldID() == 0; + } + if (meExpr.GetMeOp() == kMeOpOp) { + auto &opExpr = static_cast(meExpr); + hashCandidate.emplace_back(opExpr.GetFieldID()); + valid &= opExpr.GetFieldID() == 0; + } + for (auto i = 0; i < meExpr.GetNumOpnds(); ++i) { + GetExprHashCandidate(*meExpr.GetOpnd(i)); + } + } + + void DumpMeStmt(const IRMap *irMap) const { + if (meStmt) { + meStmt->Dump(irMap); + } + } + + void DumpStmtNode() const { + if (stmt) { + stmt->Dump(0); + } + } + + void ClearMeStmt() { + meStmt = nullptr; + } + + MeStmt *GetMeStmt() { + return meStmt; + } + + void SetStmtNode(StmtNode *node) { + stmt = node; + } + + StmtNode *GetStmtNode() { + return stmt; + } + + void SetCurrBlock(BlockNode *block) { + currBlock = block; + } + + BlockNode *GetCurrBlock() { + return currBlock; + } + + const bool operator==(const StmtInfo &rhs) const { + if (hashCandidate.size() != rhs.hashCandidate.size()) { + return false; + } + for (auto i = 0; i < hashCandidate.size(); ++i) { + if (hashCandidate[i] != rhs.hashCandidate[i]) { + return false; + } + } + return true; + } + + const uint8 GetHashCandidateAt(uint index) const { + return hashCandidate[index]; + } + + const size_t GetHashCandidateSize() const { + return hashCandidate.size(); + } + + const PUIdx GetPuIdx() const { + return puIdx; + } + + const uint64 GetFrequency() const { + return frequency; + } + + void SetFrequency(uint64 freq) { + frequency = freq; + } + + DefUsePositions &GetDefUsePositions(OriginalSt &ost) { + if (ost.IsPregOst()) { + return regDefUse.insert({ost.GetPregIdx(), DefUsePositions(allocator)}).first->second; + } else { + return symbolDefUse.insert( + {ost.GetMIRSymbol()->GetStIdx(), DefUsePositions(allocator)}).first->second; + } + } + + MapleUnorderedMap &GetSymbolDefUse() { + return symbolDefUse; + } + + MapleUnorderedMap &GetRegDefUse() { + return regDefUse; + } + + MapleVector &GetLocationsJumpFrom() { + return locationsJumpFrom; + } + + MapleVector &GetLocationsJumpTo() { + return locationsJumpTo; + } + + private: + MapleAllocator &allocator; + StmtNode *stmt = nullptr; + BlockNode *currBlock = nullptr; + MeStmt *meStmt = nullptr; + PUIdx puIdx = kInvalidPuIdx; + uint64 frequency = 0; + bool valid = true; + MapleVector hashCandidate; + MapleUnorderedMap symbolDefUse; + MapleUnorderedMap regDefUse; + MapleVector locationsJumpFrom; + MapleVector locationsJumpTo; +}; + +class StmtInfoHash { + public: + size_t operator()(const StmtInfo &stmtInfo) const { + auto hashCode = stmtInfo.GetHashCandidateAt(0); + for (auto i = 1; i < stmtInfo.GetHashCandidateSize(); ++i) { + hashCode ^= stmtInfo.GetHashCandidateAt(i); + } + return hashCode; + } +}; +} +#endif // MAPLE_IPA_INCLUDE_STMT_IDENTIFY_H diff --git a/src/mapleall/maple_ipa/src/ipa_clone.cpp b/src/mapleall/maple_ipa/src/ipa_clone.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f164000454a4a42c418812cf1115a872f0dfed4e --- /dev/null +++ b/src/mapleall/maple_ipa/src/ipa_clone.cpp @@ -0,0 +1,630 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ipa_clone.h" +#include "clone.h" +#include "mir_symbol.h" +#include "func_desc.h" +#include "inline.h" + +// For some funcs, when we can ignore their return-values, we clone a new func of +// them without return-values. We configure a list to save these funcs and clone +// at the very beginning so that clones can also enjoy the optimizations after. +// This mainly contains the clone of funcbody(include labels, symbols, arguments, +// etc.) and the update of the new func infomation. +namespace maple { +void IpaClone::InitParams() { + // the option for the clone parameter + if (Options::optForSize) { + numOfCloneVersions = 2; + numOfImpExprLowBound = 2; + numOfImpExprHighBound = 5; + numOfCallSiteLowBound = 2; + numOfCallSiteUpBound = 10; + numOfConstpropValue = 2; + } else { + numOfCloneVersions = Options::numOfCloneVersions; + numOfImpExprLowBound = Options::numOfImpExprLowBound; + numOfImpExprHighBound = Options::numOfImpExprHighBound; + numOfCallSiteLowBound = Options::numOfCallSiteLowBound; + numOfCallSiteUpBound = Options::numOfCallSiteUpBound; + numOfConstpropValue = Options::numOfConstpropValue; + } +} + +bool IpaClone::IsBrCondOrIf(Opcode op) const { + return op == OP_brfalse || op == OP_brtrue || op == OP_if; +} + +MIRSymbol *IpaClone::IpaCloneLocalSymbol(const MIRSymbol &oldSym, const MIRFunction &newFunc) { + MemPool *newMP = newFunc.GetDataMemPool(); + MIRSymbol *newSym = newMP->New(oldSym); + if (oldSym.GetSKind() == kStConst) { + newSym->SetKonst(oldSym.GetKonst()->Clone(*newMP)); + } else if (oldSym.GetSKind() == kStPreg) { + newSym->SetPreg(newMP->New(*oldSym.GetPreg())); + } else if (oldSym.GetSKind() == kStFunc) { + CHECK_FATAL(false, "%s has unexpected local func symbol", oldSym.GetName().c_str()); + } + return newSym; +} + +void IpaClone::IpaCloneSymbols(MIRFunction &newFunc, const MIRFunction &oldFunc) { + size_t symTabSize = oldFunc.GetSymbolTabSize(); + for (size_t i = oldFunc.GetFormalCount() + 1; i < symTabSize; ++i) { + MIRSymbol *sym = oldFunc.GetSymbolTabItem(static_cast(i)); + if (sym == nullptr) { + continue; + } + MIRSymbol *newSym = IpaCloneLocalSymbol(*sym, newFunc); + if (!newFunc.GetSymTab()->AddStOutside(newSym)) { + CHECK_FATAL(false, "%s already existed in func %s", sym->GetName().c_str(), newFunc.GetName().c_str()); + } + } +} + +void IpaClone::IpaCloneLabels(MIRFunction &newFunc, const MIRFunction &oldFunc) { + size_t labelTabSize = oldFunc.GetLabelTab()->GetLabelTableSize(); + for (size_t i = 1; i < labelTabSize; ++i) { + GStrIdx strIdx = oldFunc.GetLabelTab()->GetSymbolFromStIdx(static_cast(i)); + (void)newFunc.GetLabelTab()->AddLabel(strIdx); + } +} + +void IpaClone::IpaClonePregTable(MIRFunction &newFunc, const MIRFunction &oldFunc) { + newFunc.AllocPregTab(); + size_t pregTableSize = oldFunc.GetPregTab()->Size(); + MIRPregTable *newPregTable = newFunc.GetPregTab(); + for (size_t i = 0; i < pregTableSize; ++i) { + MIRPreg *temp = const_cast(oldFunc.GetPregTab()->GetPregTableItem(static_cast(i))); + if (temp != nullptr) { + PregIdx id = newPregTable->CreatePreg(temp->GetPrimType(), temp->GetMIRType()); + MIRPreg *newPreg = newPregTable->PregFromPregIdx(id); + if (newPreg == nullptr || newPreg->GetPregNo() != temp->GetPregNo()) { + ASSERT(false, "The cloned pregNo isn't consistent"); + } + } + } +} + +// IpaClone a function +MIRFunction *IpaClone::IpaCloneFunction(MIRFunction &originalFunction, const std::string &fullName) const { + MapleAllocator cgAlloc(originalFunction.GetDataMemPool()); + ArgVector argument(cgAlloc.Adapter()); + IpaCloneArgument(originalFunction, argument); + MIRType *retType = originalFunction.GetReturnType(); + MIRFunction *newFunc = + mirBuilder.CreateFunction(fullName, *retType, argument, false, originalFunction.GetBody() != nullptr); + CHECK_FATAL(newFunc != nullptr, "create cloned function failed"); + mirBuilder.GetMirModule().AddFunction(newFunc); + newFunc->SetFlag(originalFunction.GetFlag()); + newFunc->SetSrcPosition(originalFunction.GetSrcPosition()); + newFunc->SetFuncAttrs(originalFunction.GetFuncAttrs()); + newFunc->SetBaseClassFuncNames(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fullName)); + newFunc->GetFuncSymbol()->SetAppearsInCode(true); + newFunc->SetPuidxOrigin(newFunc->GetPuidx()); + if (originalFunction.GetBody() != nullptr) { + CopyFuncInfo(originalFunction, *newFunc); + newFunc->SetBody( + originalFunction.GetBody()->CloneTree(newFunc->GetCodeMempoolAllocator())); + IpaCloneSymbols(*newFunc, originalFunction); + IpaCloneLabels(*newFunc, originalFunction); + IpaClonePregTable(*newFunc, originalFunction); + } + newFunc->SetFuncDesc(originalFunction.GetFuncDesc()); + // All the cloned functions cannot be accessed from other transform unit. + newFunc->SetAttr(FUNCATTR_static); + return newFunc; +} + +MIRFunction *IpaClone::IpaCloneFunctionWithFreq(MIRFunction &originalFunction, + const std::string &fullName, uint64_t callSiteFreq) const { + MapleAllocator cgAlloc(originalFunction.GetDataMemPool()); + ArgVector argument(cgAlloc.Adapter()); + IpaCloneArgument(originalFunction, argument); + MIRType *retType = originalFunction.GetReturnType(); + MIRFunction *newFunc = + mirBuilder.CreateFunction(fullName, *retType, argument, false, originalFunction.GetBody() != nullptr); + CHECK_FATAL(newFunc != nullptr, "create cloned function failed"); + mirBuilder.GetMirModule().AddFunction(newFunc); + newFunc->SetFlag(originalFunction.GetFlag()); + newFunc->SetSrcPosition(originalFunction.GetSrcPosition()); + newFunc->SetFuncAttrs(originalFunction.GetFuncAttrs()); + newFunc->SetBaseClassFuncNames(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fullName)); + newFunc->GetFuncSymbol()->SetAppearsInCode(true); + newFunc->SetPuidxOrigin(newFunc->GetPuidx()); + FuncProfInfo *origProfData = originalFunction.GetFuncProfData(); + auto *moduleMp = mirBuilder.GetMirModule().GetMemPool(); + FuncProfInfo *newProfData = moduleMp->New(&mirBuilder.GetMirModule().GetMPAllocator(), + newFunc->GetPuidx(), 0, 0); // skip checksum information + newFunc->SetFuncProfData(newProfData); + newProfData->SetFuncFrequency(callSiteFreq); + newProfData->SetFuncRealFrequency(callSiteFreq); + // original function need to update frequency by real entry value + // update real left frequency + origProfData->SetFuncRealFrequency(origProfData->GetFuncRealFrequency() - callSiteFreq); + if (originalFunction.GetBody() != nullptr) { + CopyFuncInfo(originalFunction, *newFunc); + BlockNode *newbody = originalFunction.GetBody()->CloneTreeWithFreqs(newFunc->GetCodeMempoolAllocator(), + newProfData->GetStmtFreqs(), origProfData->GetStmtFreqs(), + callSiteFreq, /* numer */ + origProfData->GetFuncFrequency(), /* denom */ + static_cast(kKeepOrigFreq) | static_cast(kUpdateFreqbyScale)); + newFunc->SetBody(newbody); + IpaCloneSymbols(*newFunc, originalFunction); + IpaCloneLabels(*newFunc, originalFunction); + IpaClonePregTable(*newFunc, originalFunction); + } + newFunc->SetFuncDesc(originalFunction.GetFuncDesc()); + // All the cloned functions cannot be accessed from other transform unit. + newFunc->SetAttr(FUNCATTR_static); + return newFunc; +} + +void IpaClone::IpaCloneArgument(MIRFunction &originalFunction, ArgVector &argument) const { + for (size_t i = 0; i < originalFunction.GetFormalCount(); ++i) { + auto &formalName = originalFunction.GetFormalName(i); + argument.emplace_back(ArgPair(formalName, originalFunction.GetNthParamType(i))); + } +} + +void IpaClone::CopyFuncInfo(MIRFunction &originalFunction, MIRFunction &newFunc) const { + const auto &funcNameIdx = newFunc.GetBaseFuncNameStrIdx(); + const auto &fullNameIdx = newFunc.GetNameStrIdx(); + const auto &classNameIdx = newFunc.GetBaseClassNameStrIdx(); + const static auto &metaFullNameIdx = mirBuilder.GetOrCreateStringIndex(kFullNameStr); + const static auto &metaClassNameIdx = mirBuilder.GetOrCreateStringIndex(kClassNameStr); + const static auto &metaFuncNameIdx = mirBuilder.GetOrCreateStringIndex(kFuncNameStr); + MIRInfoVector &fnInfo = originalFunction.GetInfoVector(); + const MapleVector &infoIsString = originalFunction.InfoIsString(); + size_t size = fnInfo.size(); + for (size_t i = 0; i < size; ++i) { + if (fnInfo[i].first == metaFullNameIdx) { + newFunc.PushbackMIRInfo(std::pair(fnInfo[i].first, fullNameIdx)); + } else if (fnInfo[i].first == metaFuncNameIdx) { + newFunc.PushbackMIRInfo(std::pair(fnInfo[i].first, funcNameIdx)); + } else if (fnInfo[i].first == metaClassNameIdx) { + newFunc.PushbackMIRInfo(std::pair(fnInfo[i].first, classNameIdx)); + } else { + newFunc.PushbackMIRInfo(std::pair(fnInfo[i].first, fnInfo[i].second)); + } + newFunc.PushbackIsString(infoIsString[i]); + } +} + +bool IpaClone::CheckCostModel(uint32 paramIndex, std::vector &calleeValue, + std::vector &result) const { + uint32 impSize = 0; + for (auto impExpr : result) { + if (curFunc->GetStmtNodeFromMeId(impExpr.GetStmtId()) == nullptr) { + continue; + } + if (curFunc->GetStmtNodeFromMeId(impExpr.GetStmtId())->IsCondBr() || + curFunc->GetStmtNodeFromMeId(impExpr.GetStmtId())->GetOpCode() == OP_if) { + ++impSize; + } + } + if (impSize >= numOfImpExprHighBound) { + return true; + } + auto &calleeInfo = mirModule->GetCalleeParamAboutInt(); + CalleePair keyPair(curFunc->GetPuidx(), paramIndex); + uint32 callSiteSize = 0; + for (auto &value : calleeValue) { + callSiteSize += static_cast(calleeInfo[keyPair][value].size()); + } + if (callSiteSize >= numOfCallSiteUpBound) { + return true; + } + if (callSiteSize < numOfCallSiteLowBound || impSize < numOfImpExprLowBound) { + return false; + } + // Later: we will consider the body size + return true; +} + +void IpaClone::ReplaceIfCondtion(MIRFunction *newFunc, std::vector &result, uint64_t res) const { + ASSERT(newFunc != nullptr, "null ptr check"); + MemPool *currentFunMp = newFunc->GetCodeMempool(); + auto elemPrimType = PTY_u8; + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(elemPrimType); + MIRConst *constVal = nullptr; + for (int32 index = static_cast(result.size()) - 1; index >= 0; --index) { + uint32 stmtId = result[static_cast(index)].GetStmtId(); + StmtNode *newReplace = newFunc->GetStmtNodeFromMeId(stmtId); + ASSERT(newReplace != nullptr, "null ptr check"); + if (newReplace->GetOpCode() == OP_switch) { + continue; + } + if (newReplace->GetOpCode() != OP_if && newReplace->GetOpCode() != OP_brtrue && + newReplace->GetOpCode() != OP_brfalse) { + ASSERT(false, "ERROR: cann't find the replace statement"); + } + IfStmtNode *ifStmtNode = static_cast(newReplace); + constVal = GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(res & 0x1), *type); + res >>= 1; + ConstvalNode *constNode = currentFunMp->New(constVal->GetType().GetPrimType(), constVal); + ifStmtNode->SetOpnd(constNode, 0); + } + return; +} + +void IpaClone::RemoveSwitchCase(MIRFunction &newFunc, SwitchNode &switchStmt, std::vector &calleeValue) const { + auto iter = switchStmt.GetSwitchTable().begin(); + while (iter < switchStmt.GetSwitchTable().end()) { + bool isNeed = false; + for (size_t j = 0; j < calleeValue.size(); ++j) { + int64_t value = calleeValue[j]; + if (switchStmt.GetSwitchOpnd()->GetOpCode() == OP_neg) { + value = -value; + } + if (value == iter->first) { + isNeed = true; + break; + } + } + if (!isNeed) { + iter = switchStmt.GetSwitchTable().erase(iter); + } else { + ++iter; + } + } + if (switchStmt.GetSwitchTable().size() == calleeValue.size()) { + StmtNode *stmt = newFunc.GetBody()->GetFirst(); + for (; stmt != newFunc.GetBody()->GetLast(); stmt = stmt->GetNext()) { + if (stmt->GetOpCode() == OP_label && + static_cast(stmt)->GetLabelIdx() == switchStmt.GetDefaultLabel()) { + switchStmt.SetDefaultLabel(0); + newFunc.GetBody()->RemoveStmt(stmt); + break; + } + } + } +} + +void IpaClone::RemoveUnneedSwitchCase(MIRFunction &newFunc, std::vector &result, + std::vector &calleeValue) const { + for (size_t i = 0; i < result.size(); ++i) { + uint32 stmtId = result[i].GetStmtId(); + StmtNode *newSwitch = newFunc.GetStmtNodeFromMeId(stmtId); + if (newSwitch->GetOpCode() != OP_switch) { + continue; + } + SwitchNode *switchStmtNode = static_cast(newSwitch); + RemoveSwitchCase(newFunc, *switchStmtNode, calleeValue); + } +} + +void IpaClone::ModifyParameterSideEffect(MIRFunction *newFunc, uint32 paramIndex) const { + ASSERT(newFunc != nullptr, "null ptr check"); + auto &desc = newFunc->GetFuncDesc(); + if (paramIndex >= kMaxParamCount) { + return; + } + for (size_t idx = paramIndex; idx < kMaxParamCount - 1; ++idx) { + desc.SetParamInfo(idx, desc.GetParamInfo(idx + 1)); + } + desc.SetParamInfo(kMaxParamCount - 1, PI::kUnknown); + return; +} + +void IpaClone::RemoveUnneedParameter(MIRFunction *newFunc, uint32 paramIndex, int64_t value) const { + ASSERT(newFunc != nullptr, "null ptr check"); + if (newFunc->GetBody() != nullptr) { + MemPool *newFuncMP = newFunc->GetCodeMempool(); + // Create the const value + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(PTY_i64); + MIRIntConst *constVal = GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(value), *type); + ConstvalNode *constNode = newFuncMP->New(constVal->GetType().GetPrimType(), constVal); + // Create the dassign statement. + DassignNode *dass = newFuncMP->New(); + MIRSymbol *sym = newFunc->GetFormal(paramIndex); + dass->SetStIdx(sym->GetStIdx()); + dass->SetOpnd(constNode, 0); + dass->SetFieldID(0); + // Insert this dassign statment to the body. + newFunc->GetBody()->InsertFirst(dass); + // Remove the unneed function parameter. + auto &formalVec = newFunc->GetFormalDefVec(); + for (size_t i = paramIndex; i < newFunc->GetFormalCount() - 1; ++i) { + formalVec[i] = formalVec[i + 1]; + } + formalVec.resize(formalVec.size() - 1); + sym->SetStorageClass(kScAuto); + // fix the paramTypelist && paramTypeAttrs. + MIRFuncType *funcType = newFunc->GetMIRFuncType(); + std::vector paramTypeList; + std::vector paramTypeAttrsList; + for (size_t i = 0; i < newFunc->GetParamTypes().size(); i++) { + if (i != paramIndex) { + paramTypeList.push_back(funcType->GetParamTypeList()[i]); + paramTypeAttrsList.push_back(funcType->GetParamAttrsList()[i]); + } + } + MIRSymbol *funcSymbol = newFunc->GetFuncSymbol(); + ASSERT(funcSymbol != nullptr, "null ptr check"); + funcSymbol->SetTyIdx(GlobalTables::GetTypeTable().GetOrCreateFunctionType(funcType->GetRetTyIdx(), paramTypeList, + paramTypeAttrsList, funcType->IsVarargs(), funcType->GetRetAttrs())->GetTypeIndex()); + auto *newFuncType = static_cast(funcSymbol->GetType()); + newFunc->SetMIRFuncType(newFuncType); + // Modify the parameter sideeffect + ModifyParameterSideEffect(newFunc, paramIndex); + } + return; +} + +// Clone Function steps: +// 1. clone Function && replace the condtion +// 2. modify the callsite and update the call_graph +void IpaClone::DecideCloneFunction(std::vector &result, uint32 paramIndex, + std::map> &evalMap) const { + uint32 puidx = curFunc->GetPuidx(); + CalleePair keyPair(puidx, paramIndex); + auto &calleeInfo = mirModule->GetCalleeParamAboutInt(); + uint32 index = 0; + for (auto &eval : std::as_const(evalMap)) { + uint64_t evalValue = eval.first; + std::vector calleeValue = eval.second; + if (!CheckCostModel(paramIndex, calleeValue, result)) { + continue; + } + if (index > numOfCloneVersions) { + break; + } + std::string newFuncName = curFunc->GetName() + ".clone." + std::to_string(index++); + InlineTransformer::ConvertPStaticToFStatic(*curFunc); + MIRFunction *newFunc = nullptr; + if (Options::profileUse && curFunc->GetFuncProfData()) { + uint64_t clonedSiteFreqs = 0; + for (auto &value: calleeValue) { + for (auto &callSite : calleeInfo[keyPair][value]) { + MIRFunction *callerFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callSite.GetPuidx()); + uint32 stmtId = callSite.GetStmtId(); + CallNode *oldCallNode = static_cast(callerFunc->GetStmtNodeFromMeId(stmtId)); + if (oldCallNode == nullptr) { + continue; + } + uint64_t callsiteFreq = callerFunc->GetFuncProfData()->GetStmtFreq(stmtId); + clonedSiteFreqs += callsiteFreq; + } + } + newFunc = IpaCloneFunctionWithFreq(*curFunc, newFuncName, clonedSiteFreqs); + } else { + newFunc = IpaCloneFunction(*curFunc, newFuncName); + } + ReplaceIfCondtion(newFunc, result, evalValue); + RemoveUnneedSwitchCase(*newFunc, result, calleeValue); + for (auto &value: calleeValue) { + bool optCallerParam = false; + if (calleeValue.size() == 1) { + optCallerParam = true; + // If the callleeValue just have one value, it means we can add a dassign stmt. + RemoveUnneedParameter(newFunc, paramIndex, value); + } + for (auto &callSite : calleeInfo[keyPair][value]) { + MIRFunction *callerFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callSite.GetPuidx()); + if (callerFunc == nullptr) { + CHECK_FATAL(callSite.GetPuidx() != 0, "something wrong in calleeInfo?"); + continue; // func has been removed from funcTable by CallGraph, see RemoveFileStaticRootNodes for details + } + uint32 stmtId = callSite.GetStmtId(); + CallNode *oldCallNode = static_cast(callerFunc->GetStmtNodeFromMeId(stmtId)); + if (oldCallNode == nullptr) { + continue; + } + oldCallNode->SetPUIdx(newFunc->GetPuidx()); + if (optCallerParam) { + for (size_t i = paramIndex; i < oldCallNode->GetNopndSize() - 1; ++i) { + oldCallNode->SetNOpndAt(i, oldCallNode->GetNopndAt(i + 1)); + } + oldCallNode->GetNopnd().resize(static_cast(oldCallNode->GetNumOpnds() - 1)); + oldCallNode->SetNumOpnds(static_cast(oldCallNode->GetNumOpnds() - 1)); + } + } + } + } +} + +void IpaClone::ComupteValue(const IntVal& value, const IntVal& paramValue, + const CompareNode &cond, uint64_t &bitRes) const { + if (cond.GetOpCode() == OP_gt) { + bitRes = static_cast(value > paramValue) | (bitRes << 1); + } else if (cond.GetOpCode() == OP_eq) { + bitRes = static_cast(value == paramValue) | (bitRes << 1); + } else if (cond.GetOpCode() == OP_lt) { + bitRes = static_cast(value < paramValue) | (bitRes << 1); + } else if (cond.GetOpCode() == OP_ge) { + bitRes = static_cast(value >= paramValue) | (bitRes << 1); + } else if (cond.GetOpCode() == OP_le) { + bitRes = static_cast(value <= paramValue) | (bitRes << 1); + } else if (cond.GetOpCode() == OP_ne) { + bitRes = static_cast(value != paramValue) | (bitRes << 1); + } +} + +void IpaClone::EvalCompareResult(std::vector &result, std::map> &evalMap, + std::map> &summary, uint32 index) const { + for (auto &it: std::as_const(summary)) { + int64 value = it.first; + uint64_t bitRes = 0; + bool runFlag = false; + for (auto &expr : result) { + StmtNode *stmt = curFunc->GetStmtNodeFromMeId(expr.GetStmtId()); + if (stmt == nullptr || expr.GetParamIndex() != index) { + continue; + } + runFlag = true; + if (stmt->GetOpCode() == OP_switch) { + continue; + } + IfStmtNode* ifStmt = static_cast(stmt); + CompareNode *cond = static_cast(ifStmt->Opnd(0)); + if (cond->Opnd(0)->GetOpCode() == OP_intrinsicop && + static_cast(cond->Opnd(0))->GetIntrinsic() == INTRN_C___builtin_expect) { + cond = static_cast(static_cast(cond->Opnd(0))->Opnd(0)); + } + PrimType primType = cond->GetOpndType(); + BaseNode *opnd1 = cond->Opnd(0)->GetOpCode() == OP_constval ? cond->Opnd(0) : cond->Opnd(1); + ConstvalNode *constNode = static_cast(opnd1); + MIRIntConst *constVal = safe_cast(constNode->GetConstVal()); + ASSERT(constVal, "invalid const type"); + if (primType != PTY_i64 && primType != PTY_u64 && primType != PTY_i32 && primType != PTY_u32 && + primType != PTY_i16 && primType != PTY_u16 && primType != PTY_i8 && primType != PTY_u8) { + runFlag = false; + break; + } + IntVal paramValue = { constVal->GetValue(), primType }; + IntVal newValue = { static_cast(value), primType }; + ComupteValue(newValue, paramValue, *cond, bitRes); + } + if (runFlag) { + (void)evalMap[bitRes].emplace_back(value); + } + } + return; +} + +void IpaClone::EvalImportantExpression(MIRFunction *func, std::vector &result) { + int paramSize = static_cast(func->GetFormalCount()); + uint32 puidx = func->GetPuidx(); + auto &calleeInfo = mirModule->GetCalleeParamAboutInt(); + for (int index = 0; index < paramSize; ++index) { + CalleePair keyPair(puidx, index); + if (calleeInfo.find(keyPair) == calleeInfo.end()) { + continue; + } + std::map > evalMap; + EvalCompareResult(result, evalMap, calleeInfo[keyPair], static_cast(index)); + // Later: Now we just the consider one parameter important expression + std::vector filterRes; + if (!evalMap.empty()) { + bool hasBrExpr = false; + for (auto &expr : result) { + if (expr.GetParamIndex() == static_cast(index) && + func->GetStmtNodeFromMeId(expr.GetStmtId()) != nullptr) { + hasBrExpr = IsBrCondOrIf(func->GetStmtNodeFromMeId(expr.GetStmtId())->GetOpCode()) || hasBrExpr; + (void)filterRes.emplace_back(expr); + // Resolve most numOfImpExprUpper important expression + if (filterRes.size() > kNumOfImpExprUpper) { + break; + } + } + } + if (hasBrExpr) { + DecideCloneFunction(filterRes, static_cast(index), evalMap); + return; + } + } + } +} + +void IpaClone::CloneNoImportantExpressFunction(MIRFunction *func, uint32 paramIndex) const { + uint32 puidx = curFunc->GetPuidx(); + CalleePair keyPair(puidx, paramIndex); + auto &calleeInfo = mirModule->GetCalleeParamAboutInt(); + std::string newFuncName = func->GetName() + ".constprop." + std::to_string(paramIndex); + InlineTransformer::ConvertPStaticToFStatic(*func); + MIRFunction *newFunc = nullptr; + if (Options::profileUse && func->GetFuncProfData()) { + uint64_t clonedSiteFreqs = 0; + int64_t value = calleeInfo[keyPair].cbegin()->first; + for (auto &callSite : std::as_const(calleeInfo[keyPair][value])) { + MIRFunction *callerFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callSite.GetPuidx()); + uint32 stmtId = callSite.GetStmtId(); + CallNode *oldCallNode = static_cast(callerFunc->GetStmtNodeFromMeId(stmtId)); + if (oldCallNode == nullptr) { + continue; + } + uint64_t callsiteFreq = callerFunc->GetFuncProfData()->GetStmtFreq(stmtId); + clonedSiteFreqs += callsiteFreq; + } + newFunc = IpaCloneFunctionWithFreq(*func, newFuncName, clonedSiteFreqs); + } else { + newFunc = IpaCloneFunction(*func, newFuncName); + } + int64_t value = calleeInfo[keyPair].begin()->first; + RemoveUnneedParameter(newFunc, paramIndex, value); + for (auto &callSite : calleeInfo[keyPair][value]) { + MIRFunction *callerFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(callSite.GetPuidx()); + if (callerFunc == nullptr) { + CHECK_FATAL(callSite.GetPuidx() != 0, "something wrong in calleeInfo?"); + continue; // func has been removed from funcTable by CallGraph, see RemoveFileStaticRootNodes for details + } + uint32 stmtId = callSite.GetStmtId(); + CallNode *oldCallNode = static_cast(callerFunc->GetStmtNodeFromMeId(stmtId)); + if (oldCallNode == nullptr) { + continue; + } + oldCallNode->SetPUIdx(newFunc->GetPuidx()); + for (size_t i = paramIndex; i < oldCallNode->GetNopndSize() - 1; ++i) { + oldCallNode->SetNOpndAt(i, oldCallNode->GetNopndAt(i + 1)); + } + oldCallNode->GetNopnd().resize(static_cast(oldCallNode->GetNumOpnds() - 1)); + oldCallNode->SetNumOpnds(static_cast(oldCallNode->GetNumOpnds() - 1)); + } +} + +bool IpaClone::CheckImportantExprHasBr(const std::vector &exprVec) const { + for (auto expr : exprVec) { + if (curFunc->GetStmtNodeFromMeId(expr.GetStmtId()) != nullptr && + IsBrCondOrIf(curFunc->GetStmtNodeFromMeId(expr.GetStmtId())->GetOpCode())) { + return true; + } + } + return false; +} + +void IpaClone::DoIpaClone() { + InitParams(); + for (uint32 i = 0; i < GlobalTables::GetFunctionTable().GetFuncTable().size(); ++i) { + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(i); + if (func == nullptr) { + continue; + } + if (Options::stackProtectorStrong && func->GetMayWriteToAddrofStack()) { + continue; + } + curFunc = func; + std::map> &funcImportantExpr = mirModule->GetFuncImportantExpr(); + if (funcImportantExpr.find(func->GetPuidx()) != funcImportantExpr.end() && + CheckImportantExprHasBr(funcImportantExpr[func->GetPuidx()])) { + EvalImportantExpression(func, funcImportantExpr[func->GetPuidx()]); + } else { + auto &calleeInfo = mirModule->GetCalleeParamAboutInt(); + for (uint index = 0; index < func->GetFormalCount(); ++index) { + CalleePair keyPair(func->GetPuidx(), index); + if (calleeInfo.find(keyPair) != calleeInfo.end() && calleeInfo[keyPair].size() == 1 && + (calleeInfo[keyPair].begin())->second.size() > numOfConstpropValue) { + CloneNoImportantExpressFunction(func, index); + break; + } + } + } + } +} + +void M2MIpaClone::GetAnalysisDependence(AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.PreservedAllExcept(); +} + +bool M2MIpaClone::PhaseRun(maple::MIRModule &m) { + maple::MIRBuilder dexMirBuilder(&m); + cl = GetPhaseAllocator()->New(&m, GetPhaseMemPool(), dexMirBuilder); + cl->DoIpaClone(); + GetAnalysisInfoHook()->ForceEraseAnalysisPhase(m.GetUniqueID(), &M2MCallGraph::id); + (void)GetAnalysisInfoHook()->ForceRunAnalysisPhase(&M2MCallGraph::id, m); + return true; +} +} // namespace maple diff --git a/src/mapleall/maple_ipa/src/ipa_collect.cpp b/src/mapleall/maple_ipa/src/ipa_collect.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4340bdc50564cd570ce31f766d127163b931ec43 --- /dev/null +++ b/src/mapleall/maple_ipa/src/ipa_collect.cpp @@ -0,0 +1,384 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ipa_collect.h" +#include "call_graph.h" +#include "inline_analyzer.h" +#include "maple_phase.h" +#include "option.h" +#include "string_utils.h" +#include "inline_summary.h" +#include "me_stack_protect.h" +#include "ipa_phase_manager.h" + +namespace maple { +void CollectIpaInfo::UpdateCaleeParaAboutFloat(MeStmt &meStmt, float paramValue, uint32 index, CallerSummary &summary) { + auto *callMeStmt = static_cast(&meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + CalleePair calleeKey(called.GetPuidx(), index); + std::map>> &calleeParamAboutFloat = + module.GetCalleeParamAboutFloat(); + calleeParamAboutFloat[calleeKey][paramValue].emplace_back(summary); +} + +void CollectIpaInfo::UpdateCaleeParaAboutDouble(MeStmt &meStmt, double paramValue, uint32 index, + CallerSummary &summary) { + auto *callMeStmt = static_cast(&meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + CalleePair calleeKey(called.GetPuidx(), index); + std::map>> &calleeParamAboutDouble = + module.GetCalleeParamAboutDouble(); + calleeParamAboutDouble[calleeKey][paramValue].emplace_back(summary); +} + +void CollectIpaInfo::UpdateCaleeParaAboutInt(MeStmt &meStmt, int64_t paramValue, uint32 index, CallerSummary &summary) { + auto *callMeStmt = static_cast(&meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + CalleePair calleeKey(called.GetPuidx(), index); + std::map>> &calleeParamAboutInt = + module.GetCalleeParamAboutInt(); + calleeParamAboutInt[calleeKey][paramValue].emplace_back(summary); +} + +bool CollectIpaInfo::IsConstKindValue(MeExpr *expr) const { + if (expr->GetMeOp() != kMeOpConst) { + return false; + } + MIRConst *constV = static_cast(expr)->GetConstVal(); + return constV->GetKind() == kConstInt || constV->GetKind() == kConstFloatConst || + constV->GetKind() == kConstDoubleConst; +} + +bool CollectIpaInfo::CheckImpExprStmt(const MeStmt &meStmt) const { + auto *node = meStmt.GetOpnd(0); + return IsConstKindValue(node->GetOpnd(0)) || IsConstKindValue(node->GetOpnd(1)); +} + +bool CollectIpaInfo::IsParameterOrUseParameter(const VarMeExpr *varExpr, uint32 &index) const { + OriginalSt *sym = varExpr->GetOst(); + MIRSymbol *paramSym = sym->GetMIRSymbol(); + if (sym->IsFormal() && sym->GetIndirectLev() == 0 && varExpr->IsDefByNo() && !varExpr->IsVolatile()) { + for (uint32 i = 0; i < curFunc->GetFormalCount(); i++) { + MIRSymbol *formalSt = curFunc->GetFormal(i); + if (formalSt != nullptr && paramSym->GetNameStrIdx() == formalSt->GetNameStrIdx()) { + index = i; + return true; + } + } + } + return false; +} + +// Now we just resolve two cases, we will collect more case in the future. +bool CollectIpaInfo::CollectBrImportantExpression(const MeStmt &meStmt, uint32 &index) const { + auto *opnd = meStmt.GetOpnd(0); + if (opnd->GetOp() == OP_eq || opnd->GetOp() == OP_ne || opnd->GetOp() == OP_gt || + opnd->GetOp() == OP_ge || opnd->GetOp() == OP_lt || opnd->GetOp() == OP_le) { + if (CheckImpExprStmt(meStmt)) { + auto subOpnd0 = opnd->GetOpnd(0); + auto subOpnd1 = opnd->GetOpnd(1); + MeExpr *expr = IsConstKindValue(subOpnd0) ? subOpnd1 : subOpnd0; + if (expr->GetOp() == OP_dread) { + if (IsParameterOrUseParameter(static_cast(expr), index)) { + return true; + } + } + } + } + return false; +} + +bool CollectIpaInfo::CollectSwitchImportantExpression(const MeStmt &meStmt, uint32 &index) const { + auto *expr = meStmt.GetOpnd(0); + if (expr->GetOp() == OP_neg) { + expr = expr->GetOpnd(0); + } + if (expr->GetOp() == OP_dread) { + return IsParameterOrUseParameter(static_cast(expr), index); + } + return false; +} + +bool CollectIpaInfo::CollectImportantExpression(const MeStmt &meStmt, uint32 &index) const { + if (meStmt.GetOp() == OP_switch) { + return CollectSwitchImportantExpression(meStmt, index); + } else { + return CollectBrImportantExpression(meStmt, index); + } +} + +void CollectIpaInfo::TraverseMeStmt(MeStmt &meStmt) { + if (Options::doOutline) { + TransformStmtToIntegerSeries(meStmt); + } + Opcode op = meStmt.GetOp(); + if (meStmt.GetOp() == OP_brfalse || meStmt.GetOp() == OP_brtrue || meStmt.GetOp() == OP_switch) { + uint32 index = 0; + if (CollectImportantExpression(meStmt, index)) { + ImpExpr imp(meStmt.GetMeStmtId(), index); + module.GetFuncImportantExpr()[curFunc->GetPuidx()].emplace_back(imp); + return; + } + } + if (op != OP_callassigned && op != OP_call) { + return; + } + auto *callMeStmt = static_cast(&meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + if (called.IsExtern() || called.IsVarargs()) { + return; + } + for (uint32 i = 0; i < callMeStmt->NumMeStmtOpnds() && i < called.GetFormalCount(); ++i) { + if (callMeStmt->GetOpnd(i)->GetMeOp() == kMeOpConst) { + ConstMeExpr *constExpr = static_cast(callMeStmt->GetOpnd(i)); + MIRSymbol *formalSt = called.GetFormal(i); + // Some vargs2 We cann't get the actual type + if (formalSt == nullptr) { + continue; + } + if (constExpr->GetConstVal()->GetKind() == kConstInt) { + if (IsPrimitiveInteger(formalSt->GetType()->GetPrimType())) { + CallerSummary summary(curFunc->GetPuidx(), callMeStmt->GetMeStmtId()); + auto *intConst = safe_cast(constExpr->GetConstVal()); + IntVal value = { intConst->GetValue(), formalSt->GetType()->GetPrimType() }; + UpdateCaleeParaAboutInt(meStmt, value.GetExtValue(), i, summary); + } + } else if (constExpr->GetConstVal()->GetKind() == kConstFloatConst) { + if (IsPrimitiveFloat(formalSt->GetType()->GetPrimType())) { + CallerSummary summary(curFunc->GetPuidx(), callMeStmt->GetMeStmtId()); + auto *floatConst = safe_cast(constExpr->GetConstVal()); + UpdateCaleeParaAboutFloat(meStmt, floatConst->GetValue(), i, summary); + } + } else if (constExpr->GetConstVal()->GetKind() == kConstDoubleConst) { + if (formalSt->GetType()->GetPrimType() == PTY_f64) { + CallerSummary summary(curFunc->GetPuidx(), callMeStmt->GetMeStmtId()); + auto *doubleConst = safe_cast(constExpr->GetConstVal()); + UpdateCaleeParaAboutDouble(meStmt, doubleConst->GetValue(), i, summary); + } + } + } + } +} + +void CollectIpaInfo::TransformStmtToIntegerSeries(MeStmt &meStmt) { + if (!Options::doOutline) { + return; + } + auto stmtInfo = StmtInfo(&meStmt, curFunc->GetPuidx(), allocator); + auto integerValue = stmtInfoToIntegerMap[stmtInfo]; + if (integerValue == 0) { + integerValue = stmtInfoToIntegerMap[stmtInfo] = GetCurrNewStmtIndex(); + } + meStmt.SetStmtInfoId(stmtInfoVector.size()); + (void)integerString.emplace_back(integerValue); + (void)stmtInfoVector.emplace_back(std::move(stmtInfo)); +} + +void CollectIpaInfo::Perform(MeFunction &func) { + // Pre-order traverse the dominance tree, so that each def is traversed + // before its use + for (auto *bb : func.GetCfg()->GetAllBBs()) { + if (bb == nullptr) { + continue; + } + // traversal on stmt + for (auto &meStmt : bb->GetMeStmts()) { + TraverseMeStmt(meStmt); + } + } + if (Options::enableGInline) { + auto dominancePhase = static_cast( + dataMap->GetVaildAnalysisPhase(func.GetUniqueID(), &MEDominance::id)); + Dominance *dom = dominancePhase->GetDomResult(); + CHECK_NULL_FATAL(dom); + Dominance *pdom = dominancePhase->GetPdomResult(); + CHECK_NULL_FATAL(pdom); + auto *meLoop = static_cast( + dataMap->GetVaildAnalysisPhase(func.GetUniqueID(), &MELoopAnalysis::id))->GetResult(); + InlineSummaryCollector collector(module.GetInlineSummaryAlloc(), func, *dom, *pdom, *meLoop); + collector.CollectInlineSummary(); + } + if (Options::stackProtectorStrong) { + bool mayWriteStack = FuncMayWriteStack(func); + if (mayWriteStack) { + func.GetMirFunc()->SetMayWriteToAddrofStack(); + } + } +} + +void CollectIpaInfo::CollectDefUsePosition(ScalarMeExpr &scalar, StmtInfoId stmtInfoId, + std::unordered_set &cycleCheck) { + if (cycleCheck.find(&scalar) != cycleCheck.end()) { + return; + } + (void)cycleCheck.insert(&scalar); + auto *ost = scalar.GetOst(); + if (!ost->IsLocal() || ost->GetIndirectLev() != 0) { + return; + } + auto &defUsePosition = stmtInfoVector[stmtInfoId].GetDefUsePositions(*ost); + switch (scalar.GetDefBy()) { + case kDefByNo: { + if (ost->IsFormal()) { + defUsePosition.definePositions.push_back(kInvalidIndex); + } + break; + } + case kDefByPhi: { + for (auto *scalarOpnd : scalar.GetDefPhi().GetOpnds()) { + CollectDefUsePosition(static_cast(*scalarOpnd), stmtInfoId, cycleCheck); + } + break; + } + default: { + auto defStmtInfoId = scalar.GetDefByMeStmt()->GetStmtInfoId(); + defUsePosition.definePositions.push_back(defStmtInfoId); + if (scalar.GetDefBy() != kDefByChi) { + auto &defUsePositionOfDefStmt = stmtInfoVector[defStmtInfoId].GetDefUsePositions(*ost); + defUsePositionOfDefStmt.usePositions.push_back(stmtInfoId); + break; + } + CollectDefUsePosition(*scalar.GetDefChi().GetRHS(), stmtInfoId, cycleCheck); + break; + } + } +} + +void CollectIpaInfo::TraverseMeExpr(MeExpr &meExpr, StmtInfoId stmtInfoId, + std::unordered_set &cycleCheck) { + if (meExpr.IsScalar()) { + CollectDefUsePosition(static_cast(meExpr), stmtInfoId, cycleCheck); + cycleCheck.clear(); + return; + } + for (size_t i = 0; i < meExpr.GetNumOpnds(); ++i) { + TraverseMeExpr(*meExpr.GetOpnd(i), stmtInfoId, cycleCheck); + } +} + +void CollectIpaInfo::SetLabel(size_t currStmtInfoId, LabelIdx label) { + auto *bb = curFunc->GetMeFunc()->GetCfg()->GetLabelBBAt(label); + CHECK_NULL_FATAL(bb); + auto jumpToStmtInfoId = GetRealFirstStmtInfoId(*bb); + (void)stmtInfoVector[currStmtInfoId].GetLocationsJumpTo().emplace_back(jumpToStmtInfoId); + (void)stmtInfoVector[jumpToStmtInfoId].GetLocationsJumpFrom().emplace_back(currStmtInfoId); +} + +void CollectIpaInfo::CollectJumpInfo(MeStmt &meStmt) { + auto stmtInfoId = meStmt.GetStmtInfoId(); + switch (meStmt.GetOp()) { + case OP_brtrue: + case OP_brfalse: { + auto label = static_cast(meStmt).GetOffset(); + SetLabel(stmtInfoId, label); + break; + } + case OP_goto: { + auto label = static_cast(meStmt).GetOffset(); + SetLabel(stmtInfoId, label); + break; + } + case OP_switch: { + auto &switchStmt = static_cast(meStmt); + SetLabel(stmtInfoId, switchStmt.GetDefaultLabel()); + for (auto casePair : switchStmt.GetSwitchTable()) { + SetLabel(stmtInfoId, casePair.second); + } + break; + } + default: { + break; + } + } +} + +StmtInfoId CollectIpaInfo::GetRealFirstStmtInfoId(BB &bb) { + if (!bb.IsMeStmtEmpty()) { + return bb.GetFirstMe()->GetStmtInfoId(); + } + CHECK_FATAL(bb.GetSucc().size() == 1, "empty bb followed with illegal succ size"); + return GetRealFirstStmtInfoId(*bb.GetSucc().front()); +} + +void CollectIpaInfo::TraverseStmtInfo(size_t position) { + std::unordered_set cycleCheck; + for (size_t i = position; i < stmtInfoVector.size(); ++i) { + auto *meStmt = stmtInfoVector[i].GetMeStmt(); + if (meStmt == nullptr) { + continue; + } + CollectJumpInfo(*meStmt); + for (size_t opndIndex = 0; opndIndex < meStmt->NumMeStmtOpnds(); ++opndIndex) { + TraverseMeExpr(*meStmt->GetOpnd(opndIndex), i, cycleCheck); + } + if (meStmt->GetOp() == OP_iassign && static_cast(meStmt)->GetLHSVal()->GetBase()) { + TraverseMeExpr(*static_cast(meStmt)->GetLHSVal()->GetBase(), i, cycleCheck); + } + auto *muList = meStmt->GetMuList(); + if (muList == nullptr) { + continue; + } + for (auto &mu : *muList) { + CollectDefUsePosition(*mu.second, i, cycleCheck); + } + } +} + +void CollectIpaInfo::RunOnScc(SCCNode &scc) { + for (auto *cgNode : scc.GetNodes()) { + auto currStmtPosition = stmtInfoVector.size(); + curFunc = cgNode->GetMIRFunction(); + MeFunction *meFunc = curFunc->GetMeFunc(); + Perform(*meFunc); + if (Options::doOutline) { + PushInvalidKeyBack(GetCurrNewStmtIndex()); + TraverseStmtInfo(currStmtPosition); + } + } +} + +void CollectIpaInfo::Dump() { + LogInfo::MapleLogger() << "integer string: "; + for (auto ele : integerString) { + LogInfo::MapleLogger() << ele << " "; + } + LogInfo::MapleLogger() << "\n"; + + LogInfo::MapleLogger() << "stmtnode"; + for (size_t i = 0; i < stmtInfoVector.size(); ++i) { + LogInfo::MapleLogger() << i << ": " << "\n"; + auto &stmtInfo = stmtInfoVector[i]; + if (stmtInfo.GetPuIdx() != kInvalidPuIdx) { + auto *function = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(stmtInfo.GetPuIdx()); + module.SetCurFunction(function); + } + stmtInfoVector[i].DumpStmtNode(); + } + LogInfo::MapleLogger() << "\n"; +} + +void SCCCollectIpaInfo::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.SetPreservedAll(); +} + +bool SCCCollectIpaInfo::PhaseRun(maple::SCCNode &scc) { + AnalysisDataManager *dataMap = GET_ANALYSIS(SCCPrepare, scc); + auto *hook = GetAnalysisInfoHook(); + auto *ipaInfo = static_cast(hook->GetBindingPM())->GetResult(); + ipaInfo->SetDataMap(dataMap); + ipaInfo->RunOnScc(scc); + return true; +} +} // namespace maple diff --git a/src/mapleall/maple_ipa/src/ipa_phase_manager.cpp b/src/mapleall/maple_ipa/src/ipa_phase_manager.cpp new file mode 100644 index 0000000000000000000000000000000000000000..073d9fd874ddf302978076cbbd1eb4cc7153ce31 --- /dev/null +++ b/src/mapleall/maple_ipa/src/ipa_phase_manager.cpp @@ -0,0 +1,260 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ipa_phase_manager.h" +#include "pme_emit.h" +#include "mpl_profdata_parser.h" +#include "ipa_collect.h" +#include "ipa_side_effect.h" +#include "prop_return_null.h" +#include "prop_parameter_type.h" + +#define JAVALANG (mirModule.IsJavaModule()) +#define CLANG (mirModule.IsCModule()) + +namespace maple { +void IpaSccPM::Init(MIRModule &m) { + SetQuiet(true); + m.SetInIPA(true); + MeOption::mergeStmts = false; + MeOption::propDuringBuild = false; + MeOption::layoutWithPredict = false; + ipaInfo = GetPhaseAllocator()->New(m, *GetPhaseMemPool()); + DoPhasesPopulate(m); +} + +bool IpaSccPM::PhaseRun(MIRModule &m) { + if (theMIRModule->HasPartO2List()) { + return false; + } + bool oldProp = MeOption::propDuringBuild; + bool oldMerge = MeOption::mergeStmts; + bool oldLayout = MeOption::layoutWithPredict; + Init(m); + bool changed = false; + auto admMempool = AllocateMemPoolInPhaseManager("Ipa Phase Manager's Analysis Data Manager mempool"); + auto *serialADM = GetManagerMemPool()->New(*(admMempool.get())); + CallGraph *cg = GET_ANALYSIS(M2MCallGraph, m); + // Need reverse sccV + const MapleVector*> &topVec = cg->GetSCCTopVec(); + for (MapleVector*>::const_reverse_iterator it = topVec.rbegin(); it != topVec.rend(); ++it) { + if (!IsQuiet()) { + LogInfo::MapleLogger() << ">>>>>>>>>>> Optimizing SCC ---\n"; + (*it)->Dump(); + } + auto meFuncMP = std::make_unique(memPoolCtrler, "maple_ipa per-scc mempool"); + auto meFuncStackMP = std::make_unique(memPoolCtrler, ""); + bool runScc = false; + for (auto *cgNode : (*it)->GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty()) { + continue; + } + runScc = true; + m.SetCurFunction(func); + MemPool *versMP = new ThreadLocalMemPool(memPoolCtrler, "first verst mempool"); + MeFunction &meFunc = *(meFuncMP->New(&m, func, meFuncMP.get(), *meFuncStackMP, versMP, "unknown")); + func->SetMeFunc(&meFunc); + meFunc.PartialInit(); + if (!IsQuiet()) { + LogInfo::MapleLogger() << "---Preparing Function for scc phase < " << func->GetName() << " > ---\n"; + } + meFunc.IPAPrepare(); + } + if (!runScc) { + continue; + } + for (size_t i = 0; i < phasesSequence.size(); ++i) { + const MaplePhaseInfo *curPhase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(phasesSequence[i]); + if (!IsQuiet()) { + LogInfo::MapleLogger() << "---Run scc " << (curPhase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << curPhase->PhaseName() << " ]---\n"; + } + changed |= RunAnalysisPhase>, SCCNode>(*curPhase, *serialADM, **it); + } + serialADM->EraseAllAnalysisPhase(); + } + MeOption::mergeStmts = oldMerge; + MeOption::propDuringBuild = oldProp; + MeOption::layoutWithPredict = oldLayout; + if (Options::dumpPhase == "outline") { + ipaInfo->Dump(); + } + m.SetInIPA(false); + return changed; +} + +void IpaSccPM::DoPhasesPopulate(const MIRModule &mirModule) { + (void)mirModule; + if (Options::profileGen) { + AddPhase("sccprofile", true); + } else { + AddPhase("sccprepare", true); + AddPhase("prop_param_type", MeOption::npeCheckMode != SafetyCheckMode::kNoCheck); + AddPhase("prop_return_attr", MeOption::npeCheckMode != SafetyCheckMode::kNoCheck); + AddPhase("collect_ipa_info", true); + AddPhase("sccsideeffect", Options::sideEffect); + AddPhase("sccemit", true); + } +} + +void IpaSccPM::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.AddRequired(); + aDep.AddPreserved(); + aDep.AddPreserved(); + if (Options::profileUse) { + aDep.AddRequired(); + aDep.AddPreserved(); + } +} + +void SCCPrepare::Dump(const MeFunction &f, const std::string phaseName) const { + if (Options::dumpIPA && (Options::dumpFunc == f.GetName() || Options::dumpFunc == "*")) { + LogInfo::MapleLogger() << ">>>>> Dump after " << phaseName << " <<<<<\n"; + f.Dump(false); + LogInfo::MapleLogger() << ">>>>> Dump after End <<<<<\n\n"; + } +} + +bool SCCPrepare::PhaseRun(SCCNode &scc) { + SetQuiet(true); + AddPhase("mecfgbuild", true); + if (Options::profileUse) { + AddPhase("splitcriticaledge", true); + AddPhase("profileUse", true); + } + AddPhase("ssatab", true); + AddPhase("aliasclass", true); + AddPhase("ssa", true); + AddPhase("irmapbuild", true); + AddPhase("objSize", true); + AddPhase("hprop", true); + AddPhase("identloops", Options::enableGInline); // for me_predict when collecting inline summary + + // Not like other phasemanager which use temp mempool to hold analysis results generated from the sub phases. + // Here we use GetManagerMemPool which lives longer than this phase(manager) itself to hold all the analysis result. + // So the following phase can access the result in this phase. + result = GetManagerMemPool()->New(*GetPhaseMemPool()); + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty()) { + continue; + } + MIRModule &m = *func->GetModule(); + m.SetCurFunction(func); + MeFunction &meFunc = *func->GetMeFunc(); + for (size_t i = 0; i < phasesSequence.size(); ++i) { + const MaplePhaseInfo *phase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(phasesSequence[i]); + if (!IsQuiet()) { + LogInfo::MapleLogger() << " >> Prepare " << (phase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << phase->PhaseName() << " ] <<\n"; + } + if (phase->IsAnalysis()) { + (void)RunAnalysisPhase(*phase, *result, meFunc, 1); + } else { + (void)RunTransformPhase(*phase, *result, meFunc, 1); + } + Dump(meFunc, phase->PhaseName()); + } + } + return false; +} + +void SCCEmit::Dump(MeFunction &f, const std::string phaseName) const { + if (Options::dumpIPA && (f.GetName() == Options::dumpFunc || Options::dumpFunc == "*")) { + LogInfo::MapleLogger() << ">>>>> Dump after " << phaseName << " <<<<<\n"; + f.GetMirFunc()->Dump(); + LogInfo::MapleLogger() << ">>>>> Dump after End <<<<<\n\n"; + } +} + +bool SCCEmit::PhaseRun(SCCNode &scc) { + SetQuiet(true); + auto *map = GET_ANALYSIS(SCCPrepare, scc); + if (map == nullptr) { + return false; + } + auto admMempool = AllocateMemPoolInPhaseManager("Ipa Phase Manager's Analysis Data Manager mempool"); + auto *serialADM = GetManagerMemPool()->New(*(admMempool.get())); + serialADM->CopyAnalysisResultFrom(*map); + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->GetBody() == nullptr) { + continue; + } + MIRModule &m = *func->GetModule(); + m.SetCurFunction(func); + const MaplePhaseInfo *phase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(&MEPreMeEmission::id); + if (!IsQuiet()) { + LogInfo::MapleLogger() << " ---call " << (phase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << phase->PhaseName() << " ]---\n"; + } + (void)RunAnalysisPhase(*phase, *serialADM, *func->GetMeFunc()); + Dump(*func->GetMeFunc(), phase->PhaseName()); + delete func->GetMeFunc()->GetPmeMempool(); + func->GetMeFunc()->SetPmeMempool(nullptr); + } + serialADM->EraseAllAnalysisPhase(); + return false; +} + +void SCCEmit::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); +} + +bool SCCProfile::PhaseRun(SCCNode &scc) { + SetQuiet(true); + AddPhase("mecfgbuild", true); + if (Options::profileGen) { + AddPhase("splitcriticaledge", true); + AddPhase("profileGen", true); + } + AddPhase("profgenEmit", true); + // Not like other phasemanager which use temp mempool to hold analysis results generated from the sub phases. + // Here we use GetManagerMemPool which lives longer than this phase(manager) itself to hold all the analysis result. + // So the following phase can access the result in this phase. + result = GetManagerMemPool()->New(*GetPhaseMemPool()); + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty()) { + continue; + } + MIRModule &m = *func->GetModule(); + m.SetCurFunction(func); + MeFunction &meFunc = *func->GetMeFunc(); + for (size_t i = 0; i < phasesSequence.size(); ++i) { + const MaplePhaseInfo *phase = MaplePhaseRegister::GetMaplePhaseRegister()->GetPhaseByID(phasesSequence[i]); + if (!IsQuiet()) { + LogInfo::MapleLogger() << " >> Prepare " << (phase->IsAnalysis() ? "analysis" : "transform") + << " Phase [ " << phase->PhaseName() << " ] <<\n"; + } + if (phase->IsAnalysis()) { + (void)RunAnalysisPhase(*phase, *result, meFunc, 1); + } else { + (void)RunTransformPhase(*phase, *result, meFunc, 1); + } + } + } + return false; +} + +MAPLE_ANALYSIS_PHASE_REGISTER(SCCPrepare, sccprepare) +MAPLE_ANALYSIS_PHASE_REGISTER(SCCProfile, sccprofile) +MAPLE_ANALYSIS_PHASE_REGISTER(SCCCollectIpaInfo, collect_ipa_info); +MAPLE_ANALYSIS_PHASE_REGISTER(SCCPropReturnAttr, prop_return_attr); +MAPLE_TRANSFORM_PHASE_REGISTER(SCCPropParamType, prop_param_type); +MAPLE_ANALYSIS_PHASE_REGISTER(SCCSideEffect, sccsideeffect) +MAPLE_ANALYSIS_PHASE_REGISTER(SCCEmit, sccemit) +} // namespace maple diff --git a/src/mapleall/maple_ipa/src/ipa_side_effect.cpp b/src/mapleall/maple_ipa/src/ipa_side_effect.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d332332df698f956937220d31639e929de1e7b30 --- /dev/null +++ b/src/mapleall/maple_ipa/src/ipa_side_effect.cpp @@ -0,0 +1,446 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ipa_side_effect.h" +#include "func_desc.h" +#include "inline_analyzer.h" +namespace maple { +const std::map whiteList = { +#include "func_desc.def" +}; + +const FuncDesc &SideEffect::GetFuncDesc(MeFunction &f) { + return SideEffect::GetFuncDesc(*f.GetMirFunc()); +} + +const FuncDesc &SideEffect::GetFuncDesc(MIRFunction &f) { + auto it = whiteList.find(f.GetName()); + if (it != whiteList.end()) { + return it->second; + } + return f.GetFuncDesc(); +} + +const std::map &SideEffect::GetWhiteList() { + return whiteList; +} + +void SideEffect::ParamInfoUpdater(size_t vstIdx, const PI &calleeParamInfo) { + for (size_t callerFormalIdx = 0; callerFormalIdx < vstsValueAliasWithFormal.size(); ++callerFormalIdx) { + auto &formalValueAlias = vstsValueAliasWithFormal[callerFormalIdx]; + if (formalValueAlias.find(vstIdx) != formalValueAlias.end()) { + curFuncDesc->SetParamInfoNoBetterThan(callerFormalIdx, calleeParamInfo); + } + } +} + +void SideEffect::PropInfoFromOpnd(MeExpr &opnd, const PI &calleeParamInfo) { + MeExpr &base = opnd.GetAddrExprBase(); + OriginalSt *ost = nullptr; + switch (base.GetMeOp()) { + case kMeOpVar: { + auto &dread = static_cast(base); + ost = dread.GetOst(); + for (auto vstIdx : ost->GetVersionsIndices()) { + ParamInfoUpdater(vstIdx, calleeParamInfo); + } + break; + } + case kMeOpAddrof: { + AddrofMeExpr &addrofMeExpr = static_cast(base); + // As in CollectFormalOst, this is conservative to make sure it's right. + // For example: + // void callee(int *p) : write memory that p points to. + // call callee(&x) : this will modify x but we prop info of 'write memory' to x. + ost = addrofMeExpr.GetOst(); + ASSERT(ost != nullptr, "null ptr check"); + for (auto vstIdx : ost->GetVersionsIndices()) { + ParamInfoUpdater(vstIdx, calleeParamInfo); + } + break; + } + case kMeOpOp: { + if (base.GetOp() == OP_select) { + PropInfoFromOpnd(*base.GetOpnd(kSecondOpnd), calleeParamInfo); + PropInfoFromOpnd(*base.GetOpnd(kThirdOpnd), calleeParamInfo); + } + break; + } + default: + break; + } +} + +void SideEffect::PropParamInfoFromCallee(const MeStmt &call, MIRFunction &callee) { + const FuncDesc &desc = callee.GetFuncDesc(); + size_t skipFirstOpnd = kOpcodeInfo.IsICall(call.GetOp()) ? 1 : 0; + size_t actualParaCount = call.NumMeStmtOpnds() - skipFirstOpnd; + for (size_t formalIdx = 0; formalIdx < actualParaCount; ++formalIdx) { + MeExpr *opnd = call.GetOpnd(formalIdx + skipFirstOpnd); + PropInfoFromOpnd(*opnd, desc.GetParamInfo(formalIdx)); + } +} + +void SideEffect::PropAllInfoFromCallee(const MeStmt &call, MIRFunction &callee) { + const FuncDesc &desc = callee.GetFuncDesc(); + if (!desc.IsPure() && !desc.IsConst()) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } + if (desc.IsPure()) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kPure); + } + if (desc.IsConst()) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kConst); + } + PropParamInfoFromCallee(call, callee); +} + +void SideEffect::DealWithStmt(MeStmt &stmt) { + for (size_t i = 0; i < stmt.NumMeStmtOpnds(); ++i) { + DealWithOperand(stmt.GetOpnd(i)); + } + RetMeStmt *ret = safe_cast(&stmt); + if (ret != nullptr) { + DealWithReturn(*ret); + } + CallMeStmt *call = safe_cast(&stmt); + if (call != nullptr) { + MIRFunction *calleeFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(call->GetPUIdx()); + PropAllInfoFromCallee(*call, *calleeFunc); + } + IcallMeStmt *icall = safe_cast(&stmt); + if (icall != nullptr) { + MIRFunction *mirFunc = meFunc->GetMirFunc(); + CGNode *icallCGNode = callGraph->GetCGNode(mirFunc); + CallInfo callInfo(stmt.GetMeStmtId()); + CHECK_NULL_FATAL(icallCGNode); + auto &callees = icallCGNode->GetCallee(); + auto it = callees.find(&callInfo); + if (it == callees.end() || it->second->empty()) { + // no candidates found, process conservatively + for (size_t formalIdx = 1; formalIdx < icall->NumMeStmtOpnds(); ++formalIdx) { + PropInfoFromOpnd(*icall->GetOpnd(formalIdx), PI::kUnknown); + } + } else { + for (auto *cgNode : *it->second) { + MIRFunction *calleeFunc = cgNode->GetMIRFunction(); + PropAllInfoFromCallee(*icall, *calleeFunc); + } + } + } + if (stmt.GetMuList() == nullptr) { + return; + } + // this may cause some kWriteMemoryOnly regard as kReadWriteMemory. + // Example: {a.f = b} mulist in return stmt will regard param a as used. + for (auto &mu : std::as_const(*stmt.GetMuList())) { + DealWithOst(mu.first); + } +} + +void SideEffect::DealWithOst(OStIdx ostIdx) { + OriginalSt *ost = meFunc->GetMeSSATab()->GetSymbolOriginalStFromID(ostIdx); + DealWithOst(ost); +} + +void SideEffect::DealWithOst(const OriginalSt *ost) { + if (ost == nullptr) { + return; + } + for (auto &pair : analysisLater) { + if (pair.first == ost) { + curFuncDesc->SetParamInfoNoBetterThan(pair.second, PI::kReadWriteMemory); + return; + } + } +} + +void SideEffect::DealWithOperand(MeExpr *expr) { + if (expr == nullptr) { + return; + } + for (uint32 i = 0; i < expr->GetNumOpnds(); ++i) { + DealWithOperand(expr->GetOpnd(i)); + } + switch (expr->GetMeOp()) { + case kMeOpVar: { + ScalarMeExpr *dread = static_cast(expr); + OriginalSt *ost = dread->GetOst(); + DealWithOst(ost); + break; + } + case kMeOpIvar: { + auto *base = static_cast(expr)->GetBase(); + if (base->GetMeOp() == kMeOpVar) { + ScalarMeExpr *dread = static_cast(base); + DealWithOst(dread->GetOst()); + } + break; + } + default: + break; + } + return; +} + +void SideEffect::DealWithReturn(const RetMeStmt &retMeStmt) const { + if (retMeStmt.NumMeStmtOpnds() == 0) { + return; + } + MeExpr *ret = retMeStmt.GetOpnd(0); + if (ret->GetPrimType() == PTY_agg) { + curFuncDesc->SetReturnInfo(RI::kUnknown); + return; + } + if (!IsAddress(ret->GetPrimType())) { + return; + } + if (ret->GetType() != nullptr && ret->GetType()->IsMIRPtrType()) { + auto *ptrType = static_cast(ret->GetType()); + if (ptrType->GetPointedType()->GetPrimType() == PTY_agg) { + curFuncDesc->SetReturnInfo(RI::kUnknown); + return; + } + } + OriginalSt *retOst = nullptr; + size_t vstIdxOfRet = 0; + if (ret->IsScalar()) { + retOst = static_cast(ret)->GetOst(); + vstIdxOfRet = static_cast(ret)->GetVstIdx(); + } else if (ret->GetMeOp() == kMeOpIvar) { + auto *base = static_cast(ret)->GetBase(); + if (base->IsScalar()) { + retOst = static_cast(base)->GetOst(); + vstIdxOfRet = static_cast(base)->GetVstIdx(); + } + } + if (retOst == nullptr) { + return; + } + if (retOst->IsFormal()) { + curFuncDesc->SetReturnInfo(RI::kUnknown); + return; + } + std::set result; + alias->GetValueAliasSetOfVst(vstIdxOfRet, result); + for (auto valueAliasVstIdx : result) { + auto *meExpr = meFunc->GetIRMap()->GetVerst2MeExprTableItem(static_cast(valueAliasVstIdx)); + // meExpr of valueAliasVstIdx not created in IRMap, it must not occured in hssa-mefunction + if (meExpr == nullptr) { + continue; + } + OriginalSt *aliasOst = nullptr; + if (meExpr->GetMeOp() == kMeOpAddrof) { + auto ostIdx = static_cast(meExpr)->GetOstIdx(); + aliasOst = meFunc->GetMeSSATab()->GetOriginalStFromID(ostIdx); + } else if (meExpr->IsScalar()) { + aliasOst = static_cast(meExpr)->GetOst(); + } else { + CHECK_FATAL(false, "not supported meExpr"); + } + ASSERT(aliasOst != nullptr, "null ptr check"); + if (aliasOst->IsFormal()) { + curFuncDesc->SetReturnInfo(RI::kUnknown); + } + } +} + +void SideEffect::SolveVarArgs(MeFunction &f) const { + MIRFunction *func = f.GetMirFunc(); + if (func->IsVarargs()) { + for (size_t i = func->GetFormalCount(); i < kMaxParamCount; ++i) { + curFuncDesc->SetParamInfoNoBetterThan(i, PI::kUnknown); + } + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } +} + +void SideEffect::CollectAllLevelOst(size_t vstIdx, std::set &result) { + (void)result.insert(vstIdx); + auto *nextLevelOsts = meFunc->GetMeSSATab()->GetNextLevelOsts(vstIdx); + if (nextLevelOsts == nullptr) { + return; + } + for (auto *nlOst : *nextLevelOsts) { + for (auto vstIdOfNextLevelOst : nlOst->GetVersionsIndices()) { + CollectAllLevelOst(vstIdOfNextLevelOst, result); + } + } +} + +void SideEffect::CollectFormalOst(MeFunction &f) { + MIRFunction *func = f.GetMirFunc(); + for (auto *ost : f.GetMeSSATab()->GetOriginalStTable().GetOriginalStVector()) { + if (ost == nullptr) { + continue; + } + if (!ost->IsLocal()) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kPure); + if (ost->GetVersionsIndices().size() > 1) { + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } + } + if (ost->IsFormal() && ost->GetIndirectLev() == 0) { + auto idx = func->GetFormalIndex(ost->GetMIRSymbol()); + if (idx >= kMaxParamCount) { + continue; + } + + // Put level -1 ost into it, so we can get a conservative result. + // Because when we solve all vstsValueAliasWithFormal we regard every ost in it as lev 0. + + std::set vstValueAliasFormal; + if (ost->IsAddressTaken()) { + CollectAllLevelOst(ost->GetPointerVstIdx(), vstsValueAliasWithFormal[idx]); + alias->GetValueAliasSetOfVst(ost->GetPointerVstIdx(), vstValueAliasFormal); + } + CollectAllLevelOst(ost->GetZeroVersionIndex(), vstsValueAliasWithFormal[idx]); + alias->GetValueAliasSetOfVst(ost->GetZeroVersionIndex(), vstValueAliasFormal); + + for (size_t vstIdx: vstValueAliasFormal) { + auto *meExpr = meFunc->GetIRMap()->GetVerst2MeExprTableItem(static_cast(vstIdx)); + if (meExpr == nullptr || meExpr->GetMeOp() == kMeOpAddrof) { + // corresponding ScalarMeExpr has not been created in irmap for vstIdx. + CollectAllLevelOst(vstIdx, vstsValueAliasWithFormal[idx]); + continue; + } + CHECK_FATAL(meExpr->IsScalar(), "not supported MeExpr type"); + CHECK_FATAL(static_cast(meExpr)->GetVstIdx() == vstIdx, "VersionSt index must be equal"); + auto *aliasOst = static_cast(meExpr)->GetOst(); + if (aliasOst != ost) { + for (auto vstIdxOfAliasOst : aliasOst->GetVersionsIndices()) { + CollectAllLevelOst(vstIdxOfAliasOst, vstsValueAliasWithFormal[idx]); + } + } + } + } + } +} + +void SideEffect::AnalysisFormalOst() { + for (size_t formalIndex = 0; formalIndex < vstsValueAliasWithFormal.size(); ++formalIndex) { + for (size_t vstIdx : vstsValueAliasWithFormal[formalIndex]) { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kReadSelfOnly); + auto *meExpr = meFunc->GetIRMap()->GetVerst2MeExprTableItem(static_cast(vstIdx)); + if (meExpr == nullptr) { + continue; + } + if (meExpr->GetMeOp() == kMeOpAddrof) { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kUnknown); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + continue; + } + CHECK_FATAL(meExpr->IsScalar(), "must be me scalar"); + auto *ost = static_cast(meExpr)->GetOst(); + if (ost->GetIndirectLev() == 0 && ost->GetVersionsIndices().size() == 1) { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kReadSelfOnly); + continue; + } + if (ost->GetIndirectLev() == 1) { + if (ost->GetVersionsIndices().size() == 1) { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kReadMemoryOnly); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kPure); + } else { + analysisLater.insert(std::make_pair(ost, formalIndex)); + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kWriteMemoryOnly); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } + continue; + } + if (ost->GetIndirectLev() > 1) { + if (ost->GetVersionsIndices().size() == 1) { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kReadMemoryOnly); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kPure); + } else { + curFuncDesc->SetParamInfoNoBetterThan(formalIndex, PI::kUnknown); + curFuncDesc->SetFuncInfoNoBetterThan(FI::kUnknown); + } + } + } + } +} + +bool SideEffect::Perform(MeFunction &f) { + MIRFunction *func = f.GetMirFunc(); + curFuncDesc = &func->GetFuncDesc(); + FuncDesc oldDesc = *curFuncDesc; + + if (func->GetFuncDesc().IsConfiged()) { + return false; + } + SolveVarArgs(f); + CollectFormalOst(f); + AnalysisFormalOst(); + for (auto *node : dom->GetReversePostOrder()) { + auto bb = f.GetCfg()->GetBBFromID(BBId(node->GetID())); + for (auto &stmt : bb->GetMeStmts()) { + DealWithStmt(stmt); + } + } + return !curFuncDesc->Equals(oldDesc); +} + +bool SCCSideEffect::PhaseRun(SCCNode &scc) { + for (CGNode *node : scc.GetNodes()) { + MIRFunction *func = node->GetMIRFunction(); + if (func != nullptr && !func->GetFuncDesc().IsConfiged()) { + func->InitFuncDescToBest(); + func->GetFuncDesc().SetReturnInfo(RI::kUnknown); + if (func->GetParamSize() > kMaxParamCount) { + func->GetFuncDesc().SetFuncInfoNoBetterThan(FI::kUnknown); + } + } + } + bool changed = true; + while (changed) { + changed = false; + auto *map = GET_ANALYSIS(SCCPrepare, scc); + for (CGNode *node : scc.GetNodes()) { + MIRFunction *func = node->GetMIRFunction(); + if (func == nullptr) { + continue; + } + MeFunction *meFunc = func->GetMeFunc(); + if (meFunc == nullptr || meFunc->GetCfg()->NumBBs() == 0) { + continue; + } + auto *phase = map->GetVaildAnalysisPhase(meFunc->GetUniqueID(), &MEDominance::id); + Dominance *dom = static_cast(phase)->GetDomResult(); + phase = map->GetVaildAnalysisPhase(meFunc->GetUniqueID(), &MEAliasClass::id); + AliasClass *alias = static_cast(phase)->GetResult(); + + phase = map->GetVaildAnalysisPhase(meFunc->GetUniqueID(), &MESSATab::id); + SSATab *meSSATab = static_cast(phase)->GetResult(); + CHECK_FATAL(meSSATab == meFunc->GetMeSSATab(), "IPA_PM may be wrong."); + MaplePhase *it = GetAnalysisInfoHook()->GetOverIRAnalyisData(*func->GetModule()); + CallGraph *cg = static_cast(it)->GetResult(); + SideEffect se(meFunc, dom, alias, cg); + changed = changed || se.Perform(*meFunc); + } + } + if (Options::dumpIPA) { + for (CGNode *node : scc.GetNodes()) { + MIRFunction *func = node->GetMIRFunction(); + FuncDesc &desc = func->GetFuncDesc(); + LogInfo::MapleLogger() << "funcid: " << func->GetPuidx() << " funcName: " << func->GetName() << std::endl; + desc.Dump(); + } + } + return false; +} + +void SCCSideEffect::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); +} +} // namespace maple diff --git a/src/mapleall/maple_ipa/src/old/do_ipa_escape_analysis.cpp b/src/mapleall/maple_ipa/src/old/do_ipa_escape_analysis.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5fa3df4246586f998ef37e718dc9e3d703d8f050 --- /dev/null +++ b/src/mapleall/maple_ipa/src/old/do_ipa_escape_analysis.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "do_ipa_escape_analysis.h" + +namespace maple { +#ifdef NOT_USED +AnalysisResult *DoIpaEA::Run(MeFunction *func, MeFuncResultMgr *m, ModuleResultMgr *mrm) { + if (func == nullptr) { + return nullptr; + } + MIRFunction *mirFunc = func->GetMirFunc(); + const std::map &summaryMap = mirFunc->GetModule()->GetEASummary(); + if (!mirFunc->GetModule()->IsInIPA() && summaryMap.size() == 0) { + return nullptr; + } + CHECK_FATAL(mrm != nullptr, "Needs module result manager for ipa"); + KlassHierarchy *kh = static_cast(mrm->GetAnalysisResult(MoPhase_CHA, &func->GetMIRModule())); + CHECK_FATAL(kh != nullptr, "KlassHierarchy phase has problem"); + MeIRMap *irMap = static_cast(m->GetAnalysisResult(MeFuncPhase_IRMAPBUILD, func)); + CHECK_FATAL(irMap != nullptr, "irMap phase has problem"); + + CallGraph *pcg = nullptr; + if (mirFunc->GetModule()->IsInIPA()) { + pcg = static_cast(mrm->GetAnalysisResult(MoPhase_CALLGRAPH_ANALYSIS, &func->GetMIRModule())); + } + MemPool *eaMemPool = memPoolCtrler.NewMemPool(PhaseName(), false /* isLcalPool */); + mirFunc->GetModule()->SetCurFunction(mirFunc); + + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "=======IPAEA BEGIN======== " << mirFunc->GetName() << std::endl; + } + + IPAEscapeAnalysis ipaEA(kh, irMap, func, eaMemPool, pcg); + ipaEA.ConstructConnGraph(); + func->GetMirFunc()->GetEACG()->TrimGlobalNode(); + if (!mirFunc->GetModule()->IsInIPA()) { + auto it = summaryMap.find(func->GetMirFunc()->GetNameStrIdx()); + if (it != summaryMap.end() && it->second != nullptr) { + it->second->DeleteEACG(); + } + } + if (!mirFunc->GetModule()->IsInIPA() && IPAEscapeAnalysis::kDebug) { + func->GetMirFunc()->GetEACG()->CountObjEAStatus(); + } + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "=======IPAEA END========" << mirFunc->GetName() << std::endl; + } + + delete eaMemPool; + return nullptr; +} + +AnalysisResult *DoIpaEAOpt::Run(MeFunction *func, MeFuncResultMgr *mgr, ModuleResultMgr *mrm) { + if (func == nullptr) { + return nullptr; + } + MIRFunction *mirFunc = func->GetMirFunc(); + const std::map &summaryMap = mirFunc->GetModule()->GetEASummary(); + if (!mirFunc->GetModule()->IsInIPA() && summaryMap.size() == 0) { + return nullptr; + } + CHECK_FATAL(mrm != nullptr, "Needs module result manager for ipa"); + KlassHierarchy *kh = static_cast(mrm->GetAnalysisResult(MoPhase_CHA, &func->GetMIRModule())); + CHECK_FATAL(kh != nullptr, "KlassHierarchy phase has problem"); + MeIRMap *irMap = static_cast(mgr->GetAnalysisResult(MeFuncPhase_IRMAPBUILD, func)); + CHECK_FATAL(irMap != nullptr, "irMap phase has problem"); + + mgr->InvalidAnalysisResult(MeFuncPhase_MELOOP, func); + IdentifyLoops *meLoop = static_cast(mgr->GetAnalysisResult(MeFuncPhase_MELOOP, func)); + CHECK_FATAL(meLoop != nullptr, "meLoop phase has problem"); + meLoop->MarkBB(); + + CallGraph *pcg = nullptr; + if (mirFunc->GetModule()->IsInIPA()) { + pcg = static_cast(mrm->GetAnalysisResult(MoPhase_CALLGRAPH_ANALYSIS, &func->GetMIRModule())); + } + MemPool *eaMemPool = memPoolCtrler.NewMemPool(PhaseName(), false /* isLcalPool */); + mirFunc->GetModule()->SetCurFunction(mirFunc); + + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "=======IPAEAOPT BEGIN======== " << mirFunc->GetName() << std::endl; + } + + IPAEscapeAnalysis ipaEA(kh, irMap, func, eaMemPool, pcg); + ipaEA.DoOptimization(); + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "=======IPAEAOPT END========" << mirFunc->GetName() << std::endl; + } + + delete eaMemPool; + return nullptr; +} +#endif +} diff --git a/src/mapleall/maple_ipa/src/old/ea_connection_graph.cpp b/src/mapleall/maple_ipa/src/old/ea_connection_graph.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a313567e96d06b529f485775ed3c4df4b5a5ef16 --- /dev/null +++ b/src/mapleall/maple_ipa/src/old/ea_connection_graph.cpp @@ -0,0 +1,1054 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ea_connection_graph.h" + +namespace maple { +constexpr maple::uint32 kInvalid = 0xffffffff; +void EACGBaseNode::CheckAllConnectionInNodes() { +#ifdef DEBUG + for (EACGBaseNode *inNode : in) { + ASSERT_NOT_NULL(eaCG->nodes[inNode->id - 1]); + ASSERT(eaCG->nodes[inNode->id - 1] == inNode, "must be inNode"); + } + for (EACGBaseNode *outNode : out) { + ASSERT_NOT_NULL(eaCG->nodes[outNode->id - 1]); + ASSERT(eaCG->nodes[outNode->id - 1] == outNode, "must be outNode"); + } + for (EACGObjectNode *obj : pointsTo) { + ASSERT_NOT_NULL(eaCG->nodes[obj->id - 1]); + ASSERT(eaCG->nodes[obj->id - 1] == obj, "must be obj"); + } + if (IsFieldNode()) { + for (EACGObjectNode *obj : static_cast(this)->GetBelongsToObj()) { + ASSERT_NOT_NULL(eaCG->nodes[obj->id - 1]); + ASSERT(eaCG->nodes[obj->id - 1] == obj, "must be obj"); + } + } +#endif +} + +bool EACGBaseNode::AddOutNode(EACGBaseNode &newOut) { + if (out.find(&newOut) != out.end()) { + return false; + } + bool newIsLocal = newOut.UpdateEAStatus(eaStatus); + if (eaStatus == kGlobalEscape && pointsTo.size() > 0) { + if (newIsLocal) { + eaCG->SetCGUpdateFlag(); + } + return newIsLocal; + } + (void)out.insert(&newOut); + (void)newOut.in.insert(this); + ASSERT(newOut.pointsTo.size() != 0, "must be greater than zero"); + bool hasChanged = UpdatePointsTo(newOut.pointsTo); + eaCG->SetCGUpdateFlag(); + return hasChanged; +} + +void EACGBaseNode::PropagateEAStatusForNode(const EACGBaseNode *subRoot [[maybe_unused]]) const { + for (EACGBaseNode *outNode : out) { + (void)outNode->UpdateEAStatus(eaStatus); + } +} + +std::string EACGBaseNode::GetName(const IRMap *irMap) const { + std::string name; + if (irMap == nullptr || meExpr == nullptr) { + name += std::to_string(id); + } else { + name += std::to_string(id); + name += "\\n"; + if (meExpr->GetMeOp() == kMeOpVar) { + VarMeExpr *varMeExpr = static_cast(meExpr); + const MIRSymbol *sym = varMeExpr->GetOst()->GetMIRSymbol(); + name += ((sym->GetStIdx().IsGlobal() ? "$" : "%") + sym->GetName() + "\\nmx" + + std::to_string(meExpr->GetExprID()) + " (field)" + std::to_string(varMeExpr->GetFieldID())); + } else if (meExpr->GetMeOp() == kMeOpIvar) { + IvarMeExpr *ivarMeExpr = static_cast(meExpr); + MeExpr *base = ivarMeExpr->GetBase(); + VarMeExpr *varMeExpr = nullptr; + if (base->GetMeOp() == kMeOpVar) { + varMeExpr = static_cast(base); + } else { + name += std::to_string(id); + return name; + } + const MIRSymbol *sym = varMeExpr->GetOst()->GetMIRSymbol(); + name += (std::string("base :") + (sym->GetStIdx().IsGlobal() ? "$" : "%") + sym->GetName() + "\\nmx" + + std::to_string(meExpr->GetExprID()) + " (field)" + std::to_string(ivarMeExpr->GetFieldID())); + } else if (meExpr->GetOp() == OP_gcmalloc || meExpr->GetOp() == OP_gcmallocjarray) { + name += "mx" + std::to_string(meExpr->GetExprID()); + } + } + return name; +} + +bool EACGBaseNode::UpdatePointsTo(const std::set &cPointsTo) { + size_t oldPtSize = pointsTo.size(); + pointsTo.insert(cPointsTo.cbegin(), cPointsTo.cend()); + if (oldPtSize == pointsTo.size()) { + return false; + } + for (EACGObjectNode *pt : pointsTo) { + pt->Insert2PointsBy(this); + } + for (EACGBaseNode *pred : in) { + (void)pred->UpdatePointsTo(pointsTo); + } + return true; +} + +void EACGBaseNode::GetNodeFormatInDot(std::string &label, std::string &color) const { + switch (GetEAStatus()) { + case kNoEscape: + label += "NoEscape"; + color = "darkgreen"; + break; + case kArgumentEscape: + label += "ArgEscape"; + color = "brown"; + break; + case kReturnEscape: + label += "RetEscape"; + color = "orange"; + break; + case kGlobalEscape: + label += "GlobalEscape"; + color = "red"; + break; + } +} + +bool EACGBaseNode::CanIgnoreRC() const { + for (auto obj : pointsTo) { + if (!obj->GetIgnorRC()) { + return false; + } + } + return true; +} + +void EACGObjectNode::CheckAllConnectionInNodes() { +#ifdef DEBUG + for (EACGBaseNode *inNode : in) { + ASSERT_NOT_NULL(eaCG->nodes[inNode->id - 1]); + ASSERT(eaCG->nodes[inNode->id - 1] == inNode, "must be inNode"); + } + for (EACGBaseNode *outNode : out) { + ASSERT_NOT_NULL(eaCG->nodes[outNode->id - 1]); + ASSERT(eaCG->nodes[outNode->id - 1] == outNode, "must be outNode"); + } + for (EACGBaseNode *pBy : pointsBy) { + ASSERT_NOT_NULL(eaCG->nodes[pBy->id - 1]); + ASSERT(eaCG->nodes[pBy->id - 1] == pBy, "must be pBy"); + } + for (auto fieldPair : fieldNodes) { + EACGFieldNode *field = fieldPair.second; + ASSERT(field->fieldID == fieldPair.first, "must be fieldPair.first"); + ASSERT_NOT_NULL(eaCG->nodes[field->id - 1]); + ASSERT(eaCG->nodes[field->id - 1] == field, "must be filed"); + } +#endif +} + +bool EACGObjectNode::IsPointedByFieldNode() const { + for (EACGBaseNode *pBy : pointsBy) { + if (pBy->IsFieldNode()) { + return true; + } + } + return false; +} + +bool EACGObjectNode::AddOutNode(EACGBaseNode &newOut) { + ASSERT(newOut.IsFieldNode(), "must be fieldNode"); + EACGFieldNode *field = static_cast(&newOut); + fieldNodes[field->GetFieldID()] = field; + (void)newOut.UpdateEAStatus(eaStatus); + field->AddBelongTo(this); + return true; +} + +bool EACGObjectNode::ReplaceByGlobalNode() { + ASSERT(out.size() == 0, "must be zero"); + for (EACGBaseNode *node : pointsBy) { + node->pointsTo.erase(this); + (void)node->pointsTo.insert(eaCG->GetGlobalObject()); + } + pointsBy.clear(); + for (EACGBaseNode *inNode : in) { + (void)inNode->out.erase(this); + (void)inNode->out.insert(eaCG->GetGlobalObject()); + } + in.clear(); + for (auto fieldPair : fieldNodes) { + EACGFieldNode *field = fieldPair.second; + field->belongsTo.erase(this); + } + fieldNodes.clear(); + if (meExpr != nullptr) { + eaCG->expr2Nodes[meExpr]->clear(); + eaCG->expr2Nodes[meExpr]->insert(eaCG->GetGlobalObject()); + } + ASSERT(eaCG->nodes[id - 1] == this, "must be"); + eaCG->nodes[id - 1] = nullptr; + return true; +} + +void EACGObjectNode::PropagateEAStatusForNode(const EACGBaseNode *subRoot [[maybe_unused]]) const { + for (auto fieldNodePair : fieldNodes) { + EACGFieldNode *field = fieldNodePair.second; + (void)field->UpdateEAStatus(eaStatus); + } +} + +void EACGObjectNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) { + if (dumped[this]) { + return; + } + dumped[this] = true; + + std::string name = GetName(nullptr); + std::string label = GetName(irMap) + " Object\\n"; + std::string color; + GetNodeFormatInDot(label, color); + std::string style; + if (IsPhantom()) { + style = "dotted"; + } else { + style = "bold"; + } + fout << name << " [shape=box, label=\"" << label << "\", fontcolor=" << color << ", style=" << style << "];\n"; + for (auto fieldPair : std::as_const(fieldNodes)) { + EACGBaseNode *field = fieldPair.second; + fout << name << "->" << field->GetName(nullptr) << ";" << "\n"; + } + for (auto fieldPair : std::as_const(fieldNodes)) { + EACGBaseNode *field = fieldPair.second; + field->DumpDotFile(fout, dumped, dumpPt, irMap); + } +} + +void EACGRefNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) { + if (dumped[this]) { + return; + } + dumped[this] = true; + + std::string name = GetName(nullptr); + std::string label = GetName(irMap) + " Reference\\n"; + if (IsStaticRef()) { + label += "Static\\n"; + } + std::string color; + GetNodeFormatInDot(label, color); + fout << name << " [shape=ellipse, label=\"" << label << "\", fontcolor=" << color << "];" << "\n"; + if (dumpPt) { + for (auto obj : pointsTo) { + fout << name << "->" << obj->GetName(nullptr) << ";" << "\n"; + } + for (auto obj : pointsTo) { + obj->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } else { + for (auto outNode : out) { + std::string edgeStyle; + if (!outNode->IsObjectNode()) { + edgeStyle = " [style =\"dotted\"]"; + } + fout << name << "->" << outNode->GetName(nullptr) << edgeStyle << ";" << "\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } +} + +bool EACGRefNode::ReplaceByGlobalNode() { + for (EACGBaseNode *inNode : in) { + ASSERT(inNode->id > 3, "must be greater than three"); // the least valid idx is 3 + (void)inNode->out.erase(this); + (void)inNode->out.insert(eaCG->GetGlobalReference()); + } + in.clear(); + for (EACGBaseNode *outNode : out) { + (void)outNode->in.erase(this); + } + out.clear(); + for (EACGObjectNode *base : pointsTo) { + base->EraseNodeFromPointsBy(this); + } + pointsTo.clear(); + if (meExpr != nullptr) { + eaCG->expr2Nodes[meExpr]->clear(); + eaCG->expr2Nodes[meExpr]->insert(eaCG->GetGlobalReference()); + } + ASSERT(eaCG->nodes[id - 1] == this, "must be this"); + eaCG->nodes[id - 1] = nullptr; + return true; +} + +void EACGPointerNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) { + if (dumped[this]) { + return; + } + dumped[this] = true; + std::string name = GetName(nullptr); + std::string label = GetName(irMap) + "\\nPointer Indirect Level : " + std::to_string(indirectLevel) + "\\n"; + std::string color; + GetNodeFormatInDot(label, color); + fout << name << " [shape=ellipse, label=\"" << label << "\", fontcolor=" << color << "];" << "\n"; + for (EACGBaseNode *outNode : out) { + fout << name << "->" << outNode->GetName(nullptr) << " [style =\"dotted\", color = \"blue\"];" << "\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } +} + +void EACGActualNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) { + if (dumped[this]) { + return; + } + dumped[this] = true; + + std::string name = GetName(nullptr); + std::string label; + if (IsReturn()) { + label = GetName(irMap) + "\\nRet Idx : " + std::to_string(GetArgIndex()) + "\\n"; + } else { + label = GetName(irMap) + "\\nArg Idx : " + std::to_string(GetArgIndex()) + + " Call Site : " + std::to_string(GetCallSite()) + "\\n"; + } + std::string style; + if (IsPhantom()) { + style = "dotted"; + } else { + style = "bold"; + } + std::string color; + GetNodeFormatInDot(label, color); + fout << name << " [shape=ellipse, label=\"" << label << "\", fontcolor=" << color << ", style=" << style << "];\n"; + if (dumpPt) { + for (auto obj : pointsTo) { + fout << name << "->" << obj->GetName(nullptr) << ";\n"; + } + for (auto obj : pointsTo) { + obj->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } else { + for (auto outNode : out) { + std::string edgeStyle; + if (!outNode->IsObjectNode()) { + edgeStyle = " [style =\"dotted\"]"; + } + fout << name << "->" << outNode->GetName(nullptr) << edgeStyle << ";\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } +} + +bool EACGActualNode::ReplaceByGlobalNode() { + ASSERT(callSiteInfo == kInvalid, "must be invalid"); + ASSERT(out.size() == 1, "the size of out must be one"); + ASSERT(pointsTo.size() == 1, "the size of pointsTo must be one"); + for (EACGBaseNode *inNode : in) { + inNode->out.erase(this); + } + in.clear(); + return false; +} + +void EACGFieldNode::DumpDotFile(std::ostream &fout, std::map &dumped, bool dumpPt, + const IRMap *irMap) { + if (dumped[this]) { + return; + } + dumped[this] = true; + std::string name = GetName(nullptr); + std::string label = GetName(irMap) + "\\nFIdx : " + std::to_string(GetFieldID()) + "\\n"; + std::string color; + GetNodeFormatInDot(label, color); + std::string style; + if (IsPhantom()) { + style = "dotted"; + } else { + style = "bold"; + } + fout << name << " [shape=circle, label=\"" << label << "\", fontcolor=" << color << ", style=" << style << + ", margin=0];\n"; + if (dumpPt) { + for (auto obj : pointsTo) { + fout << name << "->" << obj->GetName(nullptr) << ";\n"; + } + for (auto obj : pointsTo) { + obj->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } else { + for (auto outNode : out) { + std::string edgeStyle; + if (!outNode->IsObjectNode()) { + edgeStyle = " [style =\"dotted\"]"; + } + fout << name << "->" << outNode->GetName(nullptr) << edgeStyle << ";\n"; + } + for (auto outNode : out) { + outNode->DumpDotFile(fout, dumped, dumpPt, irMap); + } + } +} + +bool EACGFieldNode::ReplaceByGlobalNode() { + for (EACGObjectNode *obj : pointsTo) { + obj->pointsBy.erase(this); + } + pointsTo.clear(); + (void)pointsTo.insert(eaCG->GetGlobalObject()); + for (EACGBaseNode *outNode : out) { + outNode->in.erase(this); + } + out.clear(); + (void)out.insert(eaCG->GetGlobalObject()); + bool canDelete = true; + std::set tmp = belongsTo; + for (EACGObjectNode *obj : tmp) { + if (obj->GetEAStatus() != kGlobalEscape) { + canDelete = false; + } else { + belongsTo.erase(obj); + } + } + if (canDelete) { + ASSERT(eaCG->nodes[id - 1] == this, "must be this"); + eaCG->nodes[id - 1] = nullptr; + for (EACGBaseNode *inNode : in) { + ASSERT(!inNode->IsObjectNode(), "must be ObjectNode"); + inNode->out.erase(this); + (void)inNode->out.insert(eaCG->globalField); + } + for (auto exprPair : eaCG->expr2Nodes) { + size_t eraseSize = exprPair.second->erase(this); + if (eraseSize != 0 && exprPair.first->GetMeOp() != kMeOpIvar && exprPair.first->GetMeOp() != kMeOpOp) { + ASSERT(false, "must be kMeOpIvar or kMeOpOp"); + } + if (exprPair.second->size() == 0) { + exprPair.second->insert(eaCG->globalField); + } + } + in.clear(); + return true; + } + return false; +} + +void EAConnectionGraph::DeleteEACG() const { + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + delete node; + node = nullptr; + } +} + +void EAConnectionGraph::TrimGlobalNode() const { + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + constexpr int leastIdx = 3; + if (node->id <= leastIdx) { + continue; + } + bool canDelete = false; + if (node->GetEAStatus() == kGlobalEscape) { + canDelete = node->ReplaceByGlobalNode(); + } +#ifdef DEBUG + node->CheckAllConnectionInNodes(); +#endif + if (canDelete) { + delete node; + node = nullptr; + } + } +} + +void EAConnectionGraph::InitGlobalNode() { + globalObj = CreateObjectNode(nullptr, kNoEscape, true, TyIdx(0)); + globalRef = CreateReferenceNode(nullptr, kNoEscape, true); + (void)globalRef->AddOutNode(*globalObj); + (void)globalRef->AddOutNode(*globalRef); + globalField = CreateFieldNode(nullptr, kNoEscape, -1, globalObj, true); // -1 expresses global + (void)globalField->AddOutNode(*globalObj); + (void)globalField->AddOutNode(*globalRef); + (void)globalField->AddOutNode(*globalField); + (void)globalRef->AddOutNode(*globalField); + globalObj->eaStatus = kGlobalEscape; + globalField->eaStatus = kGlobalEscape; + globalRef->eaStatus = kGlobalEscape; +} + +EACGObjectNode *EAConnectionGraph::CreateObjectNode(MeExpr *expr, EAStatus initialEas, + bool isPh, TyIdx tyIdx [[maybe_unused]]) { + EACGObjectNode *newObjNode = + new (std::nothrow) EACGObjectNode(mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, isPh); + ASSERT_NOT_NULL(newObjNode); + nodes.push_back(newObjNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newObjNode); + } else { + ASSERT(false, "must find expr"); + } + } + return newObjNode; +} + +EACGPointerNode *EAConnectionGraph::CreatePointerNode(MeExpr *expr, EAStatus initialEas, int inderictL) { + EACGPointerNode *newPointerNode = + new (std::nothrow) EACGPointerNode(mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, inderictL); + ASSERT_NOT_NULL(newPointerNode); + nodes.push_back(newPointerNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newPointerNode); + } else { + ASSERT(false, "must find expr"); + } + } + return newPointerNode; +} + +EACGRefNode *EAConnectionGraph::CreateReferenceNode(MeExpr *expr, EAStatus initialEas, bool isStatic) { + EACGRefNode *newRefNode = + new (std::nothrow) EACGRefNode(mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, isStatic); + ASSERT_NOT_NULL(newRefNode); + nodes.push_back(newRefNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newRefNode); + } else { + ASSERT(false, "must find expr"); + } + if (expr->GetMeOp() != kMeOpVar && expr->GetMeOp() != kMeOpAddrof && + expr->GetMeOp() != kMeOpReg && expr->GetMeOp() != kMeOpOp) { + ASSERT(false, "must be kMeOpVar, kMeOpAddrof, kMeOpReg or kMeOpOp"); + } + } + return newRefNode; +} + +void EAConnectionGraph::TouchCallSite(uint32 callSiteInfo) { + CHECK_FATAL(callSite2Nodes.find(callSiteInfo) != callSite2Nodes.end(), "find failed"); + if (callSite2Nodes[callSiteInfo] == nullptr) { + MapleVector *tmp = alloc->GetMemPool()->New>(alloc->Adapter()); + callSite2Nodes[callSiteInfo] = tmp; + } +} + +EACGActualNode *EAConnectionGraph::CreateActualNode(EAStatus initialEas, bool isReurtn, bool isPh, + uint8 argIdx, uint32 callSiteInfo) { + MeExpr *expr = nullptr; + ASSERT(isPh, "must be ph"); + ASSERT(callSiteInfo != 0, "must not be zero"); + EACGActualNode *newActNode = new (std::nothrow) EACGActualNode( + mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, isReurtn, isPh, argIdx, callSiteInfo); + ASSERT_NOT_NULL(newActNode); + nodes.push_back(newActNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newActNode); + } else { + ASSERT(false, "must find expr"); + } + } + if (callSiteInfo != kInvalid) { + ASSERT(callSite2Nodes[callSiteInfo] != nullptr, "must touched before"); + callSite2Nodes[callSiteInfo]->push_back(newActNode); +#ifdef DEBUG + CheckArgNodeOrder(*callSite2Nodes[callSiteInfo]); +#endif + } else { + funcArgNodes.push_back(newActNode); + } + return newActNode; +} + +EACGFieldNode *EAConnectionGraph::CreateFieldNode(MeExpr *expr, EAStatus initialEas, FieldID fId, + EACGObjectNode *belongTo, bool isPh) { + EACGFieldNode *newFieldNode = new (std::nothrow) EACGFieldNode( + mirModule, alloc, *this, expr, initialEas, nodes.size() + 1, fId, belongTo, isPh); + ASSERT_NOT_NULL(newFieldNode); + nodes.push_back(newFieldNode); + if (expr != nullptr) { + if (expr2Nodes.find(expr) == expr2Nodes.end()) { + expr2Nodes[expr] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[expr]->insert(newFieldNode); + } else { + expr2Nodes[expr]->insert(newFieldNode); + } + if (expr->GetMeOp() != kMeOpIvar && expr->GetMeOp() != kMeOpOp) { + ASSERT(false, "must be kMeOpIvar or kMeOpOp"); + } + } + return newFieldNode; +} + +EACGBaseNode *EAConnectionGraph::GetCGNodeFromExpr(MeExpr *me) { + if (expr2Nodes.find(me) == expr2Nodes.end()) { + return nullptr; + } + return *(expr2Nodes[me]->begin()); +} + +void EAConnectionGraph::UpdateExprOfNode(EACGBaseNode &node, MeExpr *me) { + if (expr2Nodes.find(me) == expr2Nodes.end()) { + expr2Nodes[me] = alloc->GetMemPool()->New>(alloc->Adapter()); + expr2Nodes[me]->insert(&node); + } else { + if (node.IsFieldNode()) { + expr2Nodes[me]->insert(&node); + } else { + if (expr2Nodes[me]->find(&node) == expr2Nodes[me]->end()) { + CHECK_FATAL(false, "must be filed node"); + } + } + } + node.SetMeExpr(*me); +} + +void EAConnectionGraph::UpdateExprOfGlobalRef(MeExpr *me) { + UpdateExprOfNode(*globalRef, me); +} + +EACGActualNode *EAConnectionGraph::GetReturnNode() const { + if (funcArgNodes.size() == 0) { + return nullptr; + } + EACGActualNode *ret = static_cast(funcArgNodes[funcArgNodes.size() - 1]); + if (ret->IsReturn()) { + return ret; + } + return nullptr; +} +#ifdef DEBUG +void EAConnectionGraph::CheckArgNodeOrder(MapleVector &funcArgV) { + uint8 preIndex = 0; + for (size_t i = 0; i < funcArgV.size(); ++i) { + ASSERT(funcArgV[i]->IsActualNode(), "must be ActualNode"); + EACGActualNode *actNode = static_cast(funcArgV[i]); + if (i == funcArgV.size() - 1) { + if (actNode->IsReturn()) { + continue; + } else { + ASSERT(actNode->GetArgIndex() >= preIndex, "must be greater than preIndex"); + } + } else { + ASSERT(!actNode->IsReturn(), "must be return"); + ASSERT(actNode->GetArgIndex() >= preIndex, "must be greater than preIndex"); + } + preIndex = actNode->GetArgIndex(); + } +} +#endif +bool EAConnectionGraph::ExprCanBeOptimized(MeExpr &expr) { + if (expr2Nodes.find(&expr) == expr2Nodes.end()) { + MeExpr *rhs = nullptr; + if (expr.GetMeOp() == kMeOpVar) { + ASSERT(static_cast(&expr)->GetDefBy() == kDefByStmt, "must be kDefByStmt"); + ASSERT(static_cast(&expr)->GetDefStmt()->GetOp() == OP_dassign, "must be OP_dassign"); + MeStmt *defStmt = static_cast(&expr)->GetDefStmt(); + DassignMeStmt *dassignStmt = static_cast(defStmt); + rhs = dassignStmt->GetRHS(); + } else if (expr.GetMeOp() == kMeOpReg) { + ASSERT(static_cast(&expr)->GetDefBy() == kDefByStmt, "must be kDefByStmt"); + ASSERT(static_cast(&expr)->GetDefStmt()->GetOp() == OP_regassign, "must be OP_regassign"); + MeStmt *defStmt = static_cast(&expr)->GetDefStmt(); + AssignMeStmt *regassignStmt = static_cast(defStmt); + rhs = regassignStmt->GetRHS(); + } else { + CHECK_FATAL(false, "impossible"); + } + ASSERT(expr2Nodes.find(rhs) != expr2Nodes.end(), "impossible"); + expr = *rhs; + } + MapleSet &nodesTmp = *expr2Nodes[&expr]; + + for (EACGBaseNode *node : nodesTmp) { + for (EACGObjectNode *obj : node->GetPointsToSet()) { + if (obj->GetEAStatus() != kNoEscape && obj->GetEAStatus() != kReturnEscape) { + return false; + } + } + } + return true; +} + +MapleVector *EAConnectionGraph::GetCallSiteArgNodeVector(uint32 callSite) { + CHECK_FATAL(callSite2Nodes.find(callSite) != callSite2Nodes.end(), "find failed"); + ASSERT_NOT_NULL(callSite2Nodes[callSite]); + return callSite2Nodes[callSite]; +} + +// if we have scc of connection graph, it will be more efficient. +void EAConnectionGraph::PropogateEAStatus() { + bool oldStatus = CGHasUpdated(); + do { + UnSetCGUpdateFlag(); + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + if (node->IsObjectNode()) { + EACGObjectNode *obj = static_cast(node); + for (auto fieldPair : obj->GetFieldNodeMap()) { + EACGBaseNode *field = fieldPair.second; + (void)field->UpdateEAStatus(obj->GetEAStatus()); + } + } else { + for (EACGBaseNode *pointsToNode : node->GetPointsToSet()) { + (void)pointsToNode->UpdateEAStatus(node->GetEAStatus()); + } + } + } + ASSERT(!CGHasUpdated(), "must be Updated"); + } while (CGHasUpdated()); + RestoreStatus(oldStatus); +} + +const MapleVector *EAConnectionGraph::GetFuncArgNodeVector() const { + return &funcArgNodes; +} + +// this func is called from callee context +void EAConnectionGraph::UpdateEACGFromCaller(const MapleVector &callerCallSiteArg, + const MapleVector &calleeFuncArg) { + ASSERT(abs(static_cast(callerCallSiteArg.size()) - static_cast(calleeFuncArg.size())) <= 1, "greater than"); + + UnSetCGUpdateFlag(); + for (uint32 i = 0; i < callerCallSiteArg.size(); ++i) { + EACGBaseNode *callerNode = callerCallSiteArg[i]; + ASSERT_NOT_NULL(callerNode); + ASSERT(callerNode->IsActualNode(), "must be ActualNode"); + if ((i == callerCallSiteArg.size() - 1) && static_cast(callerNode)->IsReturn()) { + continue; + } + bool hasGlobalEA = false; + for (EACGObjectNode *obj : callerNode->GetPointsToSet()) { + if (obj->GetEAStatus() == kGlobalEscape) { + hasGlobalEA = true; + break; + } + } + if (hasGlobalEA) { + EACGBaseNode *calleeNode = (calleeFuncArg)[i]; + for (EACGObjectNode *obj : calleeNode->GetPointsToSet()) { + (void)obj->UpdateEAStatus(kGlobalEscape); + } + } + } + if (CGHasUpdated()) { + PropogateEAStatus(); + } + TrimGlobalNode(); +} + +void EAConnectionGraph::DumpDotFile(const IRMap *irMap, bool dumpPt, MapleVector *dumpVec) { + if (dumpVec == nullptr) { + dumpVec = &nodes; + } + std::filebuf fb; + std::string outFile = GlobalTables::GetStrTable().GetStringFromStrIdx(funcStIdx) + "-connectiongraph.dot"; + fb.open(outFile, std::ios::trunc | std::ios::out); + CHECK_FATAL(fb.is_open(), "open file failed"); + std::ostream cgDotFile(&fb); + cgDotFile << "digraph connectiongraph{\n"; + std::map dumped; + for (auto node : nodes) { + dumped[node] = false; + } + for (EACGBaseNode *node : *dumpVec) { + if (node == nullptr) { + continue; + } + if (dumped[node]) { + continue; + } + node->DumpDotFile(cgDotFile, dumped, dumpPt, irMap); + dumped[node] = true; + } + cgDotFile << "}\n"; + fb.close(); +} + +void EAConnectionGraph::CountObjEAStatus() const { + int sum = 0; + int eaCount[4]; // There are four EAStatus. + for (size_t i = 0; i < 4; ++i) { + eaCount[i] = 0; + } + for (EACGBaseNode *node : nodes) { + if (node == nullptr) { + continue; + } + + if (node->IsObjectNode()) { + EACGObjectNode *objNode = static_cast(node); + if (!objNode->IsPhantom()) { + CHECK_FATAL(objNode->locInfo != nullptr, "Impossible"); + MIRType *type = nullptr; + const MeExpr *expr = objNode->GetMeExpr(); + CHECK_FATAL(expr != nullptr, "Impossible"); + if (expr->GetOp() == OP_gcmalloc || expr->GetOp() == OP_gcpermalloc) { + TyIdx tyIdx = static_cast(expr)->GetTyIdx(); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } else { + TyIdx tyIdx = static_cast(expr)->GetTyIdx(); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + } + LogInfo::MapleLogger() << "[LOCATION] [" << objNode->locInfo->GetModName() << " " << + objNode->locInfo->GetFileId() << " " << objNode->locInfo->GetLineId() << " " << + EscapeName(objNode->GetEAStatus()) << " " << expr->GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << "]\n"; + ++sum; + ++eaCount[node->GetEAStatus()]; + } + } + } + LogInfo::MapleLogger() << "[gcmalloc object statistics] " << + GlobalTables::GetStrTable().GetStringFromStrIdx(funcStIdx) << " " << + "Gcmallocs: " << sum << " " << "NoEscape: " << eaCount[kNoEscape] << " " << + "RetEscape: " << eaCount[kReturnEscape] << " " << "ArgEscape: " << eaCount[kArgumentEscape] << " " << + "GlobalEscape: " << eaCount[kGlobalEscape] << "\n"; +} + +void EAConnectionGraph::RestoreStatus(bool old) { + if (old) { + SetCGHasUpdated(); + } else { + UnSetCGUpdateFlag(); + } +} + +// Update caller's ConnectionGraph using callee's summary information. +// If the callee's summary is not found, we just mark all the pointsTo nodes of caller's actual node to GlobalEscape. +// Otherwise, we do these steps: +// 1, update caller nodes using callee's summary, new node might be added into caller's CG in this step. +// 2, update caller edges using callee's summary, new points-to edge might be added into caller's CG in this step. +bool EAConnectionGraph::MergeCG(MapleVector &caller, const MapleVector *callee) { + TrimGlobalNode(); + bool cgChanged = false; + bool oldStatus = CGHasUpdated(); + UnSetCGUpdateFlag(); + if (callee == nullptr) { + for (EACGBaseNode *actualInCaller : caller) { + for (EACGObjectNode *p : actualInCaller->GetPointsToSet()) { + (void)p->UpdateEAStatus(EAStatus::kGlobalEscape); + } + } + cgChanged = CGHasUpdated(); + if (!cgChanged) { + RestoreStatus(oldStatus); + } + TrimGlobalNode(); + return cgChanged; + } + size_t callerSize = caller.size(); + size_t calleeSize = callee->size(); + if (callerSize > calleeSize) { + ASSERT((callerSize - calleeSize) <= 1, "must be one in EAConnectionGraph::MergeCG()"); + } else { + ASSERT((calleeSize - callerSize) <= 1, "must be one in EAConnectionGraph::MergeCG()"); + } + if (callerSize == 0 || calleeSize == 0) { + cgChanged = CGHasUpdated(); + if (!cgChanged) { + RestoreStatus(oldStatus); + } + return cgChanged; + } + if ((callerSize != calleeSize) && + (callerSize != calleeSize + 1 || static_cast(callee->back())->IsReturn()) && + (callerSize != calleeSize - 1 || !static_cast(callee->back())->IsReturn())) { + ASSERT(false, "Impossible"); + } + + callee2Caller.clear(); + UpdateCallerNodes(caller, *callee); + UpdateCallerEdges(); + UpdateCallerRetNode(caller, *callee); + callee2Caller.clear(); + + cgChanged = CGHasUpdated(); + if (!cgChanged) { + RestoreStatus(oldStatus); + } + TrimGlobalNode(); + return cgChanged; +} + +void EAConnectionGraph::AddMaps2Object(EACGObjectNode *caller, EACGObjectNode *callee) { + if (callee2Caller.find(callee) == callee2Caller.end()) { + std::set callerSet; + callee2Caller[callee] = callerSet; + } + (void)callee2Caller[callee].insert(caller); +} + +void EAConnectionGraph::UpdateCallerRetNode(MapleVector &caller, + const MapleVector &callee) { + EACGActualNode *lastInCaller = static_cast(caller.back()); + EACGActualNode *lastInCallee = static_cast(callee.back()); + if (!lastInCaller->IsReturn()) { + return; + } + CHECK_FATAL(lastInCaller->GetOutSet().size() == 1, "Impossible"); + for (EACGBaseNode *callerRetNode : lastInCaller->GetOutSet()) { + for (EACGObjectNode *calleeRetNode : lastInCallee->GetPointsToSet()) { + for (EACGObjectNode *objInCaller : callee2Caller[calleeRetNode]) { + auto pointsToSet = callerRetNode->GetPointsToSet(); + if (pointsToSet.find(objInCaller) == pointsToSet.end()) { + (void)callerRetNode->AddOutNode(*objInCaller); + } + } + } + } +} + +// Update caller node by adding some nodes which are mapped from callee. +void EAConnectionGraph::UpdateCallerNodes(const MapleVector &caller, + const MapleVector &callee) { + const size_t callerSize = caller.size(); + const size_t calleeSize = callee.size(); + const size_t actualCount = ((callerSize < calleeSize) ? callerSize : calleeSize); + bool firstTime = true; + + for (size_t i = 0; i < actualCount; ++i) { + EACGBaseNode *actualInCaller = caller.at(i); + EACGBaseNode *actualInCallee = callee.at(i); + UpdateNodes(*actualInCallee, *actualInCaller, firstTime); + } +} + +// Update caller edges using information from callee. +void EAConnectionGraph::UpdateCallerEdges() { + std::set set; + for (auto pair : std::as_const(callee2Caller)) { + (void)set.insert(pair.first); + } + for (EACGObjectNode *p : set) { + for (auto tempPair : p->GetFieldNodeMap()) { + int32 fieldID = tempPair.first; + EACGBaseNode *fieldNode = tempPair.second; + for (EACGObjectNode *q : fieldNode->GetPointsToSet()) { + UpdateCallerEdgesInternal(p, fieldID, q); + } + } + } +} + +// Update caller edges using information of given ObjectNode from callee. +void EAConnectionGraph::UpdateCallerEdgesInternal(EACGObjectNode *node1, int32 fieldID, EACGObjectNode *node2) { + CHECK_FATAL(callee2Caller.find(node1) != callee2Caller.end(), "find failed"); + CHECK_FATAL(callee2Caller.find(node2) != callee2Caller.end(), "find failed"); + for (EACGObjectNode *p1 : callee2Caller[node1]) { + for (EACGObjectNode *q1 : callee2Caller[node2]) { + EACGFieldNode *fieldNode = p1->GetFieldNodeFromIdx(fieldID); + if (fieldNode == nullptr) { + CHECK_NULL_FATAL(node1); + fieldNode = node1->GetFieldNodeFromIdx(fieldID); + CHECK_FATAL(fieldNode != nullptr, "fieldNode must not be nullptr because we have handled it before!"); + CHECK_FATAL(fieldNode->IsBelongTo(this), "must be belong to this"); + (void)p1->AddOutNode(*fieldNode); + } + (void)fieldNode->AddOutNode(*q1); + } + } +} + +void EAConnectionGraph::UpdateNodes(const EACGBaseNode &actualInCallee, EACGBaseNode &actualInCaller, bool firstTime) { + ASSERT(actualInCallee.GetPointsToSet().size() > 0, "actualInCallee->GetPointsToSet().size() must gt 0!"); + for (EACGObjectNode *objInCallee : actualInCallee.GetPointsToSet()) { + if (actualInCaller.GetPointsToSet().size() == 0) { + std::set &mapsTo = callee2Caller[objInCallee]; + if (mapsTo.size() > 0) { + for (EACGObjectNode *temp : mapsTo) { + (void)actualInCaller.AddOutNode(*temp); + } + } else if (objInCallee->IsBelongTo(this)) { + ASSERT(false, "must be belong to this"); + } else { + EACGObjectNode *phantom = CreateObjectNode(nullptr, actualInCaller.GetEAStatus(), true, TyIdx(0)); + (void)actualInCaller.AddOutNode(*phantom); + AddMaps2Object(phantom, objInCallee); + UpdateCallerWithCallee(*phantom, *objInCallee, firstTime); + } + } else { + for (EACGObjectNode *objInCaller : actualInCaller.GetPointsToSet()) { + std::set &mapsTo = callee2Caller[objInCallee]; + if (mapsTo.find(objInCaller) == mapsTo.end()) { + AddMaps2Object(objInCaller, objInCallee); + UpdateCallerWithCallee(*objInCaller, *objInCallee, firstTime); + } + } + } + } +} + +// The escape state of the nodes in MapsTo(which is the object node in caller) is marked +// GlobalEscape if the escape state of object node in callee is GlobalEscape. +// Otherwise, the escape state of the caller nodes is not affected. +void EAConnectionGraph::UpdateCallerWithCallee(EACGObjectNode &objInCaller, const EACGObjectNode &objInCallee, + bool firstTime) { + if (objInCallee.GetEAStatus() == EAStatus::kGlobalEscape) { + (void)objInCaller.UpdateEAStatus(EAStatus::kGlobalEscape); + } + + // At this moment, a node in caller is mapped to the corresponding node in callee, + // we need make sure that all the field nodes also exist in caller. If not, + // we create both the field node and the phantom object node it should point to for the caller. + for (auto tempPair : objInCallee.GetFieldNodeMap()) { + EACGFieldNode *fieldInCaller = objInCaller.GetFieldNodeFromIdx(tempPair.first); + EACGFieldNode *fieldInCallee = tempPair.second; + if (fieldInCaller == nullptr && fieldInCallee->IsBelongTo(this)) { + (void)objInCaller.AddOutNode(*fieldInCallee); + } + fieldInCaller = GetOrCreateFieldNodeFromIdx(objInCaller, tempPair.first); + UpdateNodes(*fieldInCallee, *fieldInCaller, firstTime); + } +} + +EACGFieldNode *EAConnectionGraph::GetOrCreateFieldNodeFromIdx(EACGObjectNode &obj, int32 fieldID) { + EACGFieldNode *ret = obj.GetFieldNodeFromIdx(fieldID); + if (ret == nullptr) { + // this node is always phantom + ret = CreateFieldNode(nullptr, obj.GetEAStatus(), fieldID, &obj, true); + } + return ret; +} +} // namespace maple diff --git a/src/mapleall/maple_ipa/src/old/ipa_escape_analysis.cpp b/src/mapleall/maple_ipa/src/old/ipa_escape_analysis.cpp new file mode 100644 index 0000000000000000000000000000000000000000..440c97bc05b82ca803e6e7604f159b8dccd71626 --- /dev/null +++ b/src/mapleall/maple_ipa/src/old/ipa_escape_analysis.cpp @@ -0,0 +1,1614 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ipa_escape_analysis.h" +#include +#include "me_cfg.h" + +namespace maple { +constexpr maple::uint32 kInvalid = 0xffffffff; +static bool IsExprRefOrPtr(const MeExpr &expr) { + return expr.GetPrimType() == PTY_ref || expr.GetPrimType() == PTY_ptr; +} + +static bool IsTypeRefOrPtr(PrimType type) { + return type == PTY_ref || type == PTY_ptr; +} + +static bool IsGlobal(const SSATab &ssaTab, const VarMeExpr &expr) { + const OriginalSt *symOst = ssaTab.GetOriginalStFromID(expr.GetOstIdx()); + ASSERT(symOst != nullptr, "null ptr check"); + if (symOst->GetMIRSymbol()->GetStIdx().IsGlobal()) { + return true; + } + return false; +} + +static bool IsGlobal(const SSATab &ssaTab, const AddrofMeExpr &expr) { + const OriginalSt *symOst = ssaTab.GetOriginalStFromID(expr.GetOstIdx()); + ASSERT(symOst != nullptr, "null ptr check"); + if (symOst->GetMIRSymbol()->GetStIdx().IsGlobal()) { + return true; + } + return false; +} + +static bool IsZeroConst(const VarMeExpr *expr) { + if (expr == nullptr) { + return false; + } + if (expr->GetDefBy() != kDefByStmt) { + return false; + } + MeStmt *stmt = expr->GetDefStmt(); + if (stmt->GetOp() != OP_dassign) { + return false; + } + DassignMeStmt *dasgn = static_cast(stmt); + if (dasgn->GetRHS()->GetMeOp() != kMeOpConst) { + return false; + } + ConstMeExpr *constExpr = static_cast(dasgn->GetRHS()); + if (constExpr->GetConstVal()->GetKind() == kConstInt && constExpr->GetConstVal()->IsZero()) { + return true; + } + return false; +} + +static bool StartWith(const std::string &str, const std::string &head) { + return str.compare(0, head.size(), head) == 0; +} + +static bool IsVirtualVar(const SSATab &ssaTab, const VarMeExpr &expr) { + const OriginalSt *ost = ssaTab.GetOriginalStFromID(expr.GetOstIdx()); + ASSERT(ost != nullptr, "null ptr check"); + return ost->GetIndirectLev() > 0; +} + +static bool IsInWhiteList(const MIRFunction &func) { + std::vector whiteList = { + "MCC_Reflect_Check_Casting_Array", + "MCC_Reflect_Check_Casting_NoArray", + "MCC_ThrowStringIndexOutOfBoundsException", + "MCC_ArrayMap_String_Int_clear", + "MCC_ArrayMap_String_Int_put", + "MCC_ArrayMap_String_Int_getOrDefault", + "MCC_ArrayMap_String_Int_size", + "MCC_ThrowSecurityException", + "MCC_String_Equals_NotallCompress", + "memcmpMpl", + "Native_java_lang_String_compareTo__Ljava_lang_String_2", + "Native_java_lang_String_getCharsNoCheck__II_3CI", + "Native_java_lang_String_toCharArray__", + "Native_java_lang_System_arraycopyBooleanUnchecked___3ZI_3ZII", + "Native_java_lang_System_arraycopyByteUnchecked___3BI_3BII", + "Native_java_lang_System_arraycopyCharUnchecked___3CI_3CII", + "Native_java_lang_System_arraycopyDoubleUnchecked___3DI_3DII", + "Native_java_lang_System_arraycopyFloatUnchecked___3FI_3FII", + "Native_java_lang_System_arraycopyIntUnchecked___3II_3III", + "Native_java_lang_System_arraycopy__Ljava_lang_Object_2ILjava_lang_Object_2II", + "Native_java_lang_System_arraycopyLongUnchecked___3JI_3JII", + "Native_java_lang_System_arraycopyShortUnchecked___3SI_3SII", + "getpriority", + "setpriority" + }; + for (std::string name : whiteList) { + if (func.GetName() == name) { + // close all the whitelist + return false; + } + } + return false; +} + +static bool IsNoSideEffect(CallMeStmt &call) { + CallMeStmt &callAssign = utils::ToRef(&call); + MIRFunction &mirFunc = callAssign.GetTargetFunction(); + if (IsInWhiteList(mirFunc)) { + return true; + } + // Non-nullptr means it has return value + CHECK_FATAL(callAssign.GetMustDefList() != nullptr, "Impossible"); + if (callAssign.GetMustDefList()->size() == 1) { + if (callAssign.GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpVar && + callAssign.GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "NYI"); + } + if (IsExprRefOrPtr(*callAssign.GetMustDefListItem(0).GetLHS())) { + return false; + } + } + + const MapleVector &opnds = call.GetOpnds(); + const size_t size = opnds.size(); + for (size_t i = 0; i < size; ++i) { + if (IsExprRefOrPtr(*opnds[i])) { + return false; + } + } + return true; +} + +static bool IsRegAssignStmtForClassMeta(const AssignMeStmt ®Assign) { + MeExpr &rhs = utils::ToRef(regAssign.GetRHS()); + if (rhs.GetOp() == OP_add) { + return true; + } + + if (instance_of(rhs)) { + IvarMeExpr &ivar = static_cast(rhs); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetTyIdx()); + MIRPtrType &ptrType = utils::ToRef(safe_cast(type)); + if (ptrType.GetPointedType()->GetName() == "__class_meta__" || + (ptrType.GetPointedType()->GetName() == "Ljava_2Flang_2FObject_3B" && ivar.GetFieldID() == 1)) { + return true; + } + } + return false; +} + +TyIdx IPAEscapeAnalysis::GetAggElemType(const MIRType &aggregate) const { + switch (aggregate.GetKind()) { + case kTypePointer: { + const MIRPtrType *pointType = static_cast(&aggregate); + const MIRType *pointedType = pointType->GetPointedType(); + switch (pointedType->GetKind()) { + case kTypeClass: + case kTypeScalar: + return pointedType->GetTypeIndex(); + case kTypePointer: + case kTypeJArray: + return GetAggElemType(*pointedType); + default: + return TyIdx(0); + } + } + case kTypeJArray: { + const MIRJarrayType *arrType = static_cast(&aggregate); + const MIRType *elemType = arrType->GetElemType(); + CHECK_NULL_FATAL(elemType); + switch (elemType->GetKind()) { + case kTypeScalar: + return elemType->GetTypeIndex(); + case kTypePointer: + case kTypeJArray: + return GetAggElemType(*elemType); + default: // Not sure what type is + return TyIdx(0); + } + } + default: + CHECK_FATAL(false, "Should not reach here"); + return TyIdx(0); // to eliminate compilation warning + } +} + +// check whether the newly allocated object implements Runnable, Throwable, extends Reference or has a finalizer +bool IPAEscapeAnalysis::IsSpecialEscapedObj(const MeExpr &alloc) const { + if (alloc.GetOp() == OP_gcpermalloc || alloc.GetOp() == OP_gcpermallocjarray) { + return true; + } + TyIdx tyIdx; + const static TyIdx runnableInterface = kh->GetKlassFromLiteral("Ljava_2Flang_2FRunnable_3B")->GetTypeIdx(); + if (alloc.GetOp() == OP_gcmalloc) { + tyIdx = static_cast(&alloc)->GetTyIdx(); + } else { + CHECK_FATAL(alloc.GetOp() == OP_gcmallocjarray, "must be OP_gcmallocjarray"); + MIRType *arrType = + GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(&alloc)->GetTyIdx()); + tyIdx = GetAggElemType(*arrType); + if (tyIdx == TyIdx(0)) { + return true; // deal as escape + } + } + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (type->GetKind() == kTypeScalar) { + return false; + } + Klass *klass = kh->GetKlassFromTyIdx(tyIdx); + CHECK_FATAL(klass, "impossible"); + for (Klass *inter : klass->GetImplInterfaces()) { + if (inter->GetTypeIdx() == runnableInterface) { + return true; + } + } + if (klass->HasFinalizer() || klass->IsExceptionKlass()) { + return true; + } + + // check subclass of Reference class, such as WeakReference, PhantomReference, SoftReference and Cleaner + const static Klass *referenceKlass = kh->GetKlassFromLiteral("Ljava_2Flang_2Fref_2FReference_3B"); + if (kh->IsSuperKlass(referenceKlass, klass)) { + return true; + } + return false; +} + +EACGRefNode *IPAEscapeAnalysis::GetOrCreateCGRefNodeForReg(RegMeExpr ®, bool createObjNode) { + EACGBaseNode *node = eaCG->GetCGNodeFromExpr(®); + EACGRefNode *refNode = nullptr; + if (node == nullptr) { + refNode = eaCG->CreateReferenceNode(®, kNoEscape, false); + cgChangedInSCC = true; + } else { + refNode = static_cast(node); + } + if (node == nullptr && createObjNode) { + EACGObjectNode *objNode = GetOrCreateCGObjNode(nullptr, nullptr, refNode->GetEAStatus()); + (void)refNode->AddOutNode(*objNode); + } + return refNode; +} + +EACGRefNode *IPAEscapeAnalysis::GetOrCreateCGRefNodeForAddrof(AddrofMeExpr &var, bool createObjNode) { + if (IsGlobal(*ssaTab, var)) { + eaCG->UpdateExprOfGlobalRef(&var); + return eaCG->GetGlobalReference(); + } + EACGBaseNode *node = eaCG->GetCGNodeFromExpr(&var); + EACGRefNode *refNode = nullptr; + if (node == nullptr) { + refNode = eaCG->CreateReferenceNode(&var, kNoEscape, false); + cgChangedInSCC = true; + } else { + refNode = static_cast(node); + } + if (node == nullptr && createObjNode) { + EACGObjectNode *objNode = GetOrCreateCGObjNode(nullptr, nullptr, refNode->GetEAStatus()); + (void)refNode->AddOutNode(*objNode); + } + return refNode; +} + +EACGRefNode *IPAEscapeAnalysis::GetOrCreateCGRefNodeForVar(VarMeExpr &var, bool createObjNode) { + if (IsGlobal(*ssaTab, var)) { + eaCG->UpdateExprOfGlobalRef(&var); + return eaCG->GetGlobalReference(); + } + EACGBaseNode *node = eaCG->GetCGNodeFromExpr(&var); + EACGRefNode *refNode = nullptr; + if (node == nullptr) { + refNode = eaCG->CreateReferenceNode(&var, kNoEscape, false); + cgChangedInSCC = true; + } else { + refNode = static_cast(node); + } + if (node == nullptr && createObjNode) { + EACGObjectNode *objNode = GetOrCreateCGObjNode(nullptr, nullptr, refNode->GetEAStatus()); + (void)refNode->AddOutNode(*objNode); + } + return refNode; +} + +EACGRefNode *IPAEscapeAnalysis::GetOrCreateCGRefNodeForVarOrReg(MeExpr &var, bool createObjNode) { + if (var.GetMeOp() == kMeOpVar) { + return GetOrCreateCGRefNodeForVar(static_cast(var), createObjNode); + } else if (var.GetMeOp() == kMeOpReg) { + return GetOrCreateCGRefNodeForReg(static_cast(var), createObjNode); + } + CHECK_FATAL(false, "Impossible"); + return nullptr; +} + +static FieldID GetBaseFieldId(const KlassHierarchy &kh, const TyIdx &tyIdx, FieldID fieldId) { + FieldID ret = fieldId; + Klass *klass = kh.GetKlassFromTyIdx(tyIdx); + CHECK_FATAL(klass != nullptr, "Impossible"); + Klass *super = klass->GetSuperKlass(); + if (super == nullptr) { + return ret; + } + MIRStructType *structType = super->GetMIRStructType(); + TyIdx typeIdx = structType->GetFieldTyIdx(fieldId - 1); + while (typeIdx != 0u) { + --ret; + klass = super; + super = klass->GetSuperKlass(); + if (super == nullptr) { + return ret; + } + structType = super->GetMIRStructType(); + typeIdx = structType->GetFieldTyIdx(ret - 1); + } + return ret; +} + +void IPAEscapeAnalysis::GetOrCreateCGFieldNodeForIvar(std::vector &fieldNodes, IvarMeExpr &ivar, + MeStmt &stmt, bool createObjNode) { + MeExpr *base = ivar.GetBase(); + FieldID fieldId = ivar.GetFieldID(); + std::vector baseNodes; + if (base->GetMeOp() == kMeOpReg && fieldId == 0) { + GetArrayBaseNodeForReg(baseNodes, static_cast(*base), stmt); + } else { + GetCGNodeForMeExpr(baseNodes, *base, stmt, true); + } + bool ifHandled = (eaCG->GetCGNodeFromExpr(&ivar) != nullptr); + if (ivar.GetFieldID() != 0) { + MIRPtrType *ptrType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetTyIdx())); + TyIdx tyIdx = ptrType->GetPointedTyIdx(); + fieldId = GetBaseFieldId(*kh, tyIdx, ivar.GetFieldID()); + } + for (const auto &baseNode : baseNodes) { + for (const auto &objNode : baseNode->GetPointsToSet()) { + EACGFieldNode *fieldNode = objNode->GetFieldNodeFromIdx(fieldId); + if (!ifHandled && fieldNode != nullptr) { + eaCG->UpdateExprOfNode(*fieldNode, &ivar); + } else if (!ifHandled && fieldNode == nullptr) { + fieldNode = eaCG->CreateFieldNode(&ivar, objNode->GetEAStatus(), fieldId, objNode, false); + cgChangedInSCC = true; + if (createObjNode) { + EACGObjectNode *phanObjNode = GetOrCreateCGObjNode(nullptr); + (void)fieldNode->AddOutNode(*phanObjNode); + } + } + if (fieldNode != nullptr) { + fieldNodes.push_back(fieldNode); + } + } + } +} + +void IPAEscapeAnalysis::GetOrCreateCGFieldNodeForIAddrof(std::vector &fieldNodes, OpMeExpr &expr, + MeStmt &stmt, bool createObjNode) { + MeExpr *base = expr.GetOpnd(0); + FieldID fieldId = expr.GetFieldID(); + std::vector baseNodes; + if (base->GetMeOp() == kMeOpReg && fieldId == 0) { + GetArrayBaseNodeForReg(baseNodes, static_cast(*base), stmt); + } else { + GetCGNodeForMeExpr(baseNodes, *base, stmt, true); + } + bool ifHandled = (eaCG->GetCGNodeFromExpr(&expr) != nullptr); + if (expr.GetFieldID() != 0) { + MIRPtrType *ptrType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(expr.GetTyIdx())); + TyIdx tyIdx = ptrType->GetPointedTyIdx(); + fieldId = GetBaseFieldId(*kh, tyIdx, expr.GetFieldID()); + } + for (const auto &baseNode : baseNodes) { + for (const auto &objNode : baseNode->GetPointsToSet()) { + EACGFieldNode *fieldNode = objNode->GetFieldNodeFromIdx(fieldId); + if (!ifHandled && fieldNode != nullptr) { + eaCG->UpdateExprOfNode(*fieldNode, &expr); + } else if (!ifHandled && fieldNode == nullptr) { + fieldNode = eaCG->CreateFieldNode(&expr, objNode->GetEAStatus(), fieldId, objNode, false); + cgChangedInSCC = true; + if (createObjNode) { + EACGObjectNode *phanObjNode = GetOrCreateCGObjNode(nullptr); + (void)fieldNode->AddOutNode(*phanObjNode); + } + } + if (fieldNode != nullptr) { + fieldNodes.push_back(fieldNode); + } + } + } +} + +EACGObjectNode *IPAEscapeAnalysis::GetOrCreateCGObjNode(MeExpr *expr, const MeStmt *stmt, EAStatus easOfPhanObj) { + EAStatus eas = kNoEscape; + TyIdx tyIdx; + Location *location = nullptr; + bool isPhantom; + if (expr != nullptr) { + EACGBaseNode *cgNode = eaCG->GetCGNodeFromExpr(expr); + if (cgNode != nullptr) { + CHECK_FATAL(cgNode->IsObjectNode(), "should be object"); + EACGObjectNode *objNode = static_cast(cgNode); + return objNode; + } + if (expr->IsGcmalloc()) { + CHECK_FATAL(stmt != nullptr, "Impossible"); + location = mirModule->GetMemPool()->New(mirModule->GetFileName(), stmt->GetSrcPosition().FileNum(), + stmt->GetSrcPosition().LineNum()); + isPhantom = false; + if (IsSpecialEscapedObj(*expr)) { + eas = kGlobalEscape; + } + if (expr->GetOp() == OP_gcmalloc || expr->GetOp() == OP_gcpermalloc) { + tyIdx = static_cast(expr)->GetTyIdx(); + } else { + tyIdx = static_cast(expr)->GetTyIdx(); + } + } else { + isPhantom = true; + eas = easOfPhanObj; + tyIdx = kInitTyIdx; + } + } else { // null alloc means creating phantom object + isPhantom = true; + eas = easOfPhanObj; + tyIdx = kInitTyIdx; + } + if (eas == kGlobalEscape) { + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), expr); + return eaCG->GetGlobalObject(); + } + cgChangedInSCC = true; + EACGObjectNode *objectNode = eaCG->CreateObjectNode(expr, eas, isPhantom, tyIdx); + if (location != nullptr) { + objectNode->SetLocation(location); + } + return objectNode; +} + +void IPAEscapeAnalysis::CollectDefStmtForReg(std::set &visited, std::set &defStmts, + RegMeExpr ®Var) { + if (regVar.GetDefBy() == kDefByStmt) { + AssignMeStmt *regAssignStmt = static_cast(regVar.GetDefStmt()); + (void)defStmts.insert(regAssignStmt); + } else if (regVar.GetDefBy() == kDefByPhi) { + if (visited.find(®Var) == visited.end()) { + (void)visited.insert(®Var); + MePhiNode ®PhiNode = regVar.GetDefPhi(); + for (auto ® : regPhiNode.GetOpnds()) { + CollectDefStmtForReg(visited, defStmts, static_cast(*reg)); + } + } + } else { + CHECK_FATAL(false, "not kDefByStmt or kDefByPhi"); + } +} + +void IPAEscapeAnalysis::GetArrayBaseNodeForReg(std::vector &nodes, RegMeExpr ®Var, MeStmt &stmt) { + std::set defStmts; + std::set visited; + CollectDefStmtForReg(visited, defStmts, regVar); + for (auto ®AssignStmt : defStmts) { + MeExpr *rhs = regAssignStmt->GetRHS(); + CHECK_FATAL(rhs != nullptr, "Impossible"); + CHECK_FATAL(rhs->GetOp() == OP_array, "Impossible, funcName: %s", func->GetName().c_str()); + NaryMeExpr *array = static_cast(rhs); + CHECK_FATAL(array->GetOpnds().size() > 0, "access array->GetOpnds() failed"); + MeExpr *base = array->GetOpnd(0); + std::vector baseNodes; + GetCGNodeForMeExpr(baseNodes, *base, stmt, true); + for (auto baseNode : baseNodes) { + nodes.push_back(baseNode); + } + } +} + +void IPAEscapeAnalysis::GetCGNodeForMeExpr(std::vector &nodes, MeExpr &expr, MeStmt &stmt, + bool createObjNode) { + if (expr.GetMeOp() == kMeOpVar) { + VarMeExpr *var = static_cast(&expr); + EACGRefNode *refNode = GetOrCreateCGRefNodeForVar(*var, createObjNode); + nodes.push_back(refNode); + } else if (expr.GetMeOp() == kMeOpIvar) { + IvarMeExpr *ivar = static_cast(&expr); + GetOrCreateCGFieldNodeForIvar(nodes, *ivar, stmt, createObjNode); + } else if (expr.IsGcmalloc()) { + EACGObjectNode *objNode = GetOrCreateCGObjNode(&expr, &stmt); + nodes.push_back(objNode); + } else if (expr.GetMeOp() == kMeOpReg) { + RegMeExpr *regVar = static_cast(&expr); + if (regVar->GetRegIdx() < 0) { + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), &expr); + EACGObjectNode *objNode = eaCG->GetGlobalObject(); + nodes.push_back(objNode); + } else { + if (regVar->GetDefBy() != kDefByStmt && regVar->GetDefBy() != kDefByMustDef) { + CHECK_FATAL(false, "impossible"); + } + EACGRefNode *refNode = GetOrCreateCGRefNodeForReg(*regVar, createObjNode); + nodes.push_back(refNode); + } + } else if (expr.GetMeOp() == kMeOpOp && (expr.GetOp() == OP_retype || expr.GetOp() == OP_cvt)) { + MeExpr *retypeRhs = (static_cast(&expr))->GetOpnd(0); + if (IsExprRefOrPtr(*retypeRhs)) { + GetCGNodeForMeExpr(nodes, *retypeRhs, stmt, createObjNode); + } else { + EACGObjectNode *objNode = nullptr; + VarMeExpr *var = static_cast(retypeRhs); + if (IsZeroConst(var)) { + objNode = GetOrCreateCGObjNode(&expr, nullptr, kNoEscape); + } else { + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), &expr); + objNode = eaCG->GetGlobalObject(); + } + nodes.push_back(objNode); + } + } else if (expr.GetMeOp() == kMeOpOp && expr.GetOp() == OP_select) { + OpMeExpr *opMeExpr = static_cast(&expr); + EACGBaseNode *refNode = eaCG->GetCGNodeFromExpr(opMeExpr); + if (refNode == nullptr) { + refNode = eaCG->CreateReferenceNode(opMeExpr, kNoEscape, false); + for (size_t i = 1; i < 3; ++i) { // OP_select expr has three operands. + std::vector opndNodes; + GetCGNodeForMeExpr(opndNodes, *opMeExpr->GetOpnd(i), stmt, true); + for (auto opndNode : opndNodes) { + (void)refNode->AddOutNode(*opndNode); + } + } + } + nodes.push_back(refNode); + } else if (expr.GetMeOp() == kMeOpAddrof && expr.GetOp() == OP_addrof) { + AddrofMeExpr *var = static_cast(&expr); + EACGRefNode *refNode = GetOrCreateCGRefNodeForAddrof(*var, createObjNode); + nodes.push_back(refNode); + } else if (expr.GetMeOp() == kMeOpOp && expr.GetOp() == OP_iaddrof) { + OpMeExpr *opExpr = static_cast(&expr); + GetOrCreateCGFieldNodeForIAddrof(nodes, *opExpr, stmt, createObjNode); + } else if (expr.GetMeOp() == kMeOpNary && + (expr.GetOp() == OP_intrinsicopwithtype || expr.GetOp() == OP_intrinsicop)) { + NaryMeExpr *naryMeExpr = static_cast(&expr); + if (naryMeExpr->GetIntrinsic() == INTRN_JAVA_CONST_CLASS) { + // get some class's "Class", metadata + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), &expr); + EACGObjectNode *objNode = eaCG->GetGlobalObject(); + nodes.push_back(objNode); + } else if (naryMeExpr->GetIntrinsic() == INTRN_JAVA_MERGE) { + CHECK_FATAL(naryMeExpr->GetOpnds().size() == 1, "must have one opnd"); + MeExpr *opnd = naryMeExpr->GetOpnd(0); + if (IsExprRefOrPtr(*opnd)) { + GetCGNodeForMeExpr(nodes, *opnd, stmt, createObjNode); + } else { + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), &expr); + EACGObjectNode *objNode = eaCG->GetGlobalObject(); + nodes.push_back(objNode); + } + } else { + stmt.Dump(irMap); + CHECK_FATAL(false, "NYI"); + } + } else if (expr.GetMeOp() == kMeOpNary && expr.GetOp() == OP_array) { + NaryMeExpr *array = static_cast(&expr); + CHECK_FATAL(array->GetOpnds().size() > 0, "access array->GetOpnds() failed"); + MeExpr *arrayBase = array->GetOpnd(0); + GetCGNodeForMeExpr(nodes, *arrayBase, stmt, createObjNode); + } else if (expr.GetMeOp() == kMeOpConst) { + ConstMeExpr *constExpr = static_cast(&expr); + EACGObjectNode *objNode = nullptr; + if (constExpr->GetConstVal()->GetKind() == kConstInt && constExpr->IsZero()) { + objNode = GetOrCreateCGObjNode(&expr, nullptr, kNoEscape); + } else { + eaCG->UpdateExprOfNode(*eaCG->GetGlobalObject(), &expr); + objNode = eaCG->GetGlobalObject(); + } + nodes.push_back(objNode); + } else if (expr.GetMeOp() == kMeOpConststr) { + nodes.push_back(eaCG->GetGlobalReference()); + eaCG->UpdateExprOfGlobalRef(&expr); + } else { + stmt.Dump(irMap); + CHECK_FATAL(false, "NYI funcName: %s", func->GetName().c_str()); + } +} + +void IPAEscapeAnalysis::UpdateEscConnGraphWithStmt(MeStmt &stmt) { + switch (stmt.GetOp()) { + case OP_dassign: { + DassignMeStmt *dasgn = static_cast(&stmt); + if (!IsExprRefOrPtr(*dasgn->GetLHS())) { + break; + } + CHECK_FATAL(IsExprRefOrPtr(*dasgn->GetRHS()), "type mis-match"); + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForVar(*static_cast(dasgn->GetVarLHS()), false); + + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *dasgn->GetRHS(), stmt, true); + for (const auto &rhsNode : rhsNodes) { + cgChangedInSCC = (lhsNode->AddOutNode(*rhsNode) ? true : cgChangedInSCC); + } + break; + } + case OP_iassign: { + IassignMeStmt *iasgn = static_cast(&stmt); + if (!IsExprRefOrPtr(*iasgn->GetLHSVal())) { + break; + } + CHECK_FATAL(IsExprRefOrPtr(*iasgn->GetRHS()), "type mis-match"); + // get or create field nodes for lhs (may need to create a phantom object node) + std::vector lhsNodes; + GetOrCreateCGFieldNodeForIvar(lhsNodes, *iasgn->GetLHSVal(), stmt, false); + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *iasgn->GetRHS(), stmt, true); + for (const auto &lhsNode : lhsNodes) { + for (const auto &rhsNode : rhsNodes) { + cgChangedInSCC = (lhsNode->AddOutNode(*rhsNode) ? true : cgChangedInSCC); + } + } + break; + } + case OP_maydassign: { + MaydassignMeStmt *mdass = static_cast(&stmt); + CHECK_FATAL(mdass->GetChiList() != nullptr, "Impossible"); + if (mdass->GetChiList()->empty() || !IsExprRefOrPtr(*mdass->GetRHS())) { + break; + } + for (auto &it : std::as_const(*mdass->GetChiList())) { + ChiMeNode *chi = it.second; + CHECK_FATAL(IsExprRefOrPtr(*chi->GetLHS()), "type mis-match"); + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForVar(*static_cast(chi->GetLHS()), false); + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *mdass->GetRHS(), stmt, true); + for (const auto &rhsNode : rhsNodes) { + cgChangedInSCC = (lhsNode->AddOutNode(*rhsNode) ? true : cgChangedInSCC); + } + } + break; + } + case OP_regassign: { + AssignMeStmt *regasgn = static_cast(&stmt); + CHECK_FATAL(regasgn->GetLHS() != nullptr, "Impossible"); + CHECK_FATAL(regasgn->GetRHS() != nullptr, "Impossible"); + if (!IsExprRefOrPtr(*regasgn->GetLHS())) { + break; + } + CHECK_FATAL(IsExprRefOrPtr(*regasgn->GetRHS()), "type mis-match"); + if (IsRegAssignStmtForClassMeta(*regasgn) || regasgn->GetRHS()->GetOp() == OP_array) { + break; + } + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForReg(*regasgn->GetLHS(), false); + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *regasgn->GetRHS(), stmt, true); + for (const auto &rhsNode : rhsNodes) { + cgChangedInSCC = (lhsNode->AddOutNode(*rhsNode) ? true : cgChangedInSCC); + } + break; + } + case OP_throw: { + ThrowMeStmt *throwStmt = static_cast(&stmt); + std::vector nodes; + GetCGNodeForMeExpr(nodes, *throwStmt->GetOpnd(), stmt, true); + for (const auto &node : nodes) { + for (const auto &objNode : node->GetPointsToSet()) { + if (objNode->GetEAStatus() != kGlobalEscape) { + (void)objNode->UpdateEAStatus(kGlobalEscape); + cgChangedInSCC = true; + } + } + } + break; + } + case OP_return: { + RetMeStmt *retMeStmt = static_cast(&stmt); + EACGActualNode *retNode = eaCG->GetReturnNode(); + MIRFunction *mirFunc = func->GetMirFunc(); + if (!IsTypeRefOrPtr(mirFunc->GetReturnType()->GetPrimType())) { + break; + } + if (retNode == nullptr && retMeStmt->GetOpnds().size() > 0) { + retNode = eaCG->CreateActualNode(kReturnEscape, true, true, + static_cast(mirFunc->GetFormalCount()), kInvalid); + cgChangedInSCC = true; + } + for (const auto &expr : retMeStmt->GetOpnds()) { + if (!IsExprRefOrPtr(*expr)) { + continue; + } + if (expr->GetMeOp() != kMeOpVar && expr->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "should be"); + } + EACGRefNode *refNode = GetOrCreateCGRefNodeForVarOrReg(*expr, true); + cgChangedInSCC = (retNode->AddOutNode(*refNode) ? true : cgChangedInSCC); + } + break; + } + case OP_icall: + case OP_customcall: + case OP_polymorphiccall: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_xintrinsiccall: + case OP_icallassigned: + case OP_customcallassigned: + case OP_polymorphiccallassigned: + case OP_xintrinsiccallassigned: { + CHECK_FATAL(false, "NYI"); + break; + } + case OP_intrinsiccall: { + IntrinsiccallMeStmt *intrn = static_cast(&stmt); + if (intrn->GetIntrinsic() != INTRN_MPL_CLEANUP_LOCALREFVARS && + intrn->GetIntrinsic() != INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP && + intrn->GetIntrinsic() != INTRN_MCCSetPermanent && + intrn->GetIntrinsic() != INTRN_MCCIncRef && + intrn->GetIntrinsic() != INTRN_MCCDecRef && + intrn->GetIntrinsic() != INTRN_MCCIncDecRef && + intrn->GetIntrinsic() != INTRN_MCCDecRefReset && + intrn->GetIntrinsic() != INTRN_MCCIncDecRefReset && + intrn->GetIntrinsic() != INTRN_MCCWrite && + intrn->GetIntrinsic() != INTRN_MCCWriteNoDec && + intrn->GetIntrinsic() != INTRN_MCCWriteNoInc && + intrn->GetIntrinsic() != INTRN_MCCWriteNoRC && + intrn->GetIntrinsic() != INTRN_MCCWriteReferent && + intrn->GetIntrinsic() != INTRN_MCCWriteS && + intrn->GetIntrinsic() != INTRN_MCCWriteSNoInc && + intrn->GetIntrinsic() != INTRN_MCCWriteSNoDec && + intrn->GetIntrinsic() != INTRN_MCCWriteSNoRC && + intrn->GetIntrinsic() != INTRN_MCCWriteSVol && + intrn->GetIntrinsic() != INTRN_MCCWriteSVolNoInc && + intrn->GetIntrinsic() != INTRN_MCCWriteSVolNoDec && + intrn->GetIntrinsic() != INTRN_MCCWriteSVolNoRC && + intrn->GetIntrinsic() != INTRN_MCCWriteVol && + intrn->GetIntrinsic() != INTRN_MCCWriteVolNoInc && + intrn->GetIntrinsic() != INTRN_MCCWriteVolNoDec && + intrn->GetIntrinsic() != INTRN_MCCWriteVolNoRC && + intrn->GetIntrinsic() != INTRN_MCCWriteVolWeak && + intrn->GetIntrinsic() != INTRN_MCCWriteWeak && + intrn->GetIntrinsic() != INTRN_MCCDecRefResetPair) { + CHECK_FATAL(false, "intrnId: %d in function: %s", intrn->GetIntrinsic(), func->GetName().c_str()); + } + + if (intrn->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS || + intrn->GetIntrinsic() == INTRN_MPL_CLEANUP_LOCALREFVARS_SKIP || + intrn->GetIntrinsic() == INTRN_MCCSetPermanent || + intrn->GetIntrinsic() == INTRN_MCCIncRef || + intrn->GetIntrinsic() == INTRN_MCCDecRef || + intrn->GetIntrinsic() == INTRN_MCCIncDecRef || + intrn->GetIntrinsic() == INTRN_MCCDecRefReset || + intrn->GetIntrinsic() == INTRN_MCCIncDecRefReset || + intrn->GetIntrinsic() == INTRN_MCCWriteReferent || + intrn->GetIntrinsic() == INTRN_MCCDecRefResetPair) { + break; + } + + CHECK_FATAL(intrn->GetOpnds().size() > 1, "must be"); + const size_t opndIdx = 2; + MeExpr *lhs = intrn->GetOpnd(intrn->NumMeStmtOpnds() - opndIdx); + MeExpr *rhs = intrn->GetOpnds().back(); + std::vector lhsNodes; + GetCGNodeForMeExpr(lhsNodes, *lhs, stmt, false); + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *rhs, stmt, true); + for (auto lhsNode : lhsNodes) { + for (auto rhsNode : rhsNodes) { + (void)lhsNode->AddOutNode(*rhsNode); + } + } + break; + } + case OP_call: + case OP_callassigned: + case OP_superclasscallassigned: + case OP_interfaceicallassigned: + case OP_interfacecallassigned: + case OP_virtualicallassigned: + case OP_virtualcallassigned: { + CallMeStmt *callMeStmt = static_cast(&stmt); + MIRFunction &mirFunc = callMeStmt->GetTargetFunction(); + uint32 callInfo = callMeStmt->GetStmtID(); + if (callInfo == 0) { + if (mirFunc.GetName() != "MCC_SetObjectPermanent" && mirFunc.GetName() != "MCC_DecRef_NaiveRCFast") { + CHECK_FATAL(false, "funcName: %s", mirFunc.GetName().c_str()); + } + break; + } + eaCG->TouchCallSite(callInfo); + + // If a function has no reference parameter or return value, then skip it. + if (IsNoSideEffect(*callMeStmt)) { + HandleParaAtCallSite(callInfo, *callMeStmt); + break; + } + + HandleParaAtCallSite(callInfo, *callMeStmt); + if (stmt.GetOp() == OP_call || stmt.GetOp() == OP_callassigned || stmt.GetOp() == OP_superclasscallassigned) { + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[INVOKE] call func " << mirFunc.GetName() << "\n"; + } + HandleSingleCallee(*callMeStmt); + } else { + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[INVOKE] vcall func " << mirFunc.GetName() << "\n"; + } + HandleMultiCallees(*callMeStmt); + } + break; + } + // mainly for JAVA_CLINIT_CHECK + case OP_intrinsiccallwithtype: { + IntrinsiccallMeStmt *intrnMestmt = static_cast(&stmt); + if (intrnMestmt->GetIntrinsic() != INTRN_JAVA_CLINIT_CHECK && + intrnMestmt->GetIntrinsic() != INTRN_JAVA_CHECK_CAST) { + CHECK_FATAL(false, "intrnId: %d in function: %s", intrnMestmt->GetIntrinsic(), func->GetName().c_str()); + } + // 1. INTRN_JAVA_CLINIT_CHECK: Because all the operations in clinit are to initialize the static field + // of the Class, this will not affect the escape status of any reference or object node. + // 2. INTRN_JAVA_CHECK_CAST: When mephase precheckcast is enabled, this will happen, we only hava to solve + // the next dassign stmt. + break; + } + // mainly for JAVA_ARRAY_FILL and JAVA_POLYMORPHIC_CALL + case OP_intrinsiccallassigned: { + IntrinsiccallMeStmt *intrnMestmt = static_cast(&stmt); + if (intrnMestmt->GetIntrinsic() == INTRN_JAVA_POLYMORPHIC_CALL) { + // this intrinsiccall is MethodHandle.invoke, it is a native method. + const MapleVector &opnds = intrnMestmt->GetOpnds(); + const size_t size = opnds.size(); + for (size_t i = 0; i < size; ++i) { + MeExpr *var = opnds[i]; + // we only solve reference node. + if (!IsExprRefOrPtr(*var)) { + continue; + } + std::vector nodes; + GetCGNodeForMeExpr(nodes, *var, *intrnMestmt, true); + for (auto realArgNode : nodes) { + for (EACGObjectNode *obj : realArgNode->GetPointsToSet()) { + (void)obj->UpdateEAStatus(kGlobalEscape); + } + } + } + CHECK_FATAL(intrnMestmt->GetMustDefList()->size() <= 1, "Impossible"); + if (intrnMestmt->GetMustDefList()->size() == 0) { + break; + } + if (intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpVar && + intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "impossible"); + } + if (!IsExprRefOrPtr(*intrnMestmt->GetMustDefListItem(0).GetLHS())) { + break; + } + EACGRefNode *realRetNode = GetOrCreateCGRefNodeForVarOrReg(*intrnMestmt->GetMustDefListItem(0).GetLHS(), true); + for (EACGObjectNode *obj : realRetNode->GetPointsToSet()) { + (void)obj->UpdateEAStatus(kGlobalEscape); + } + break; + } else if (intrnMestmt->GetIntrinsic() == INTRN_JAVA_ARRAY_FILL) { + // JAVA_ARRAY_FILL can be skipped. + break; + } else { + if (intrnMestmt->GetIntrinsic() != INTRN_MCCIncRef && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadRef && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadRefS && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadRefSVol && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadRefVol && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadWeak && + intrnMestmt->GetIntrinsic() != INTRN_MCCLoadWeakVol) { + CHECK_FATAL(false, "intrnId: %d in function: %s", intrnMestmt->GetIntrinsic(), func->GetName().c_str()); + } + + CHECK_FATAL(intrnMestmt->GetMustDefList()->size() == 1, "Impossible"); + if (intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpVar && + intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "impossible"); + } + + if (!IsExprRefOrPtr(*intrnMestmt->GetMustDefListItem(0).GetLHS())) { + break; + } + EACGRefNode *retNode = GetOrCreateCGRefNodeForVarOrReg(*intrnMestmt->GetMustDefListItem(0).GetLHS(), false); + MeExpr *rhs = intrnMestmt->GetOpnds().back(); + std::vector rhsNodes; + GetCGNodeForMeExpr(rhsNodes, *rhs, stmt, true); + for (auto rhsNode : rhsNodes) { + (void)retNode->AddOutNode(*rhsNode); + } + break; + } + } + // mainly for JAVA_CHECK_CAST and JAVA_FILL_NEW_ARRAY + case OP_intrinsiccallwithtypeassigned: { + IntrinsiccallMeStmt *intrnMestmt = static_cast(&stmt); + if (intrnMestmt->GetIntrinsic() == INTRN_JAVA_CHECK_CAST) { + // We regard this as dassign + CHECK_FATAL(intrnMestmt->GetMustDefList()->size() <= 1, "Impossible"); + if (intrnMestmt->GetMustDefList()->size() == 0) { + break; + } + CHECK_FATAL(intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() == kMeOpVar, "must be kMeOpVar"); + VarMeExpr *lhs = static_cast(intrnMestmt->GetMustDefListItem(0).GetLHS()); + if (!IsExprRefOrPtr(*lhs)) { + break; + } + CHECK_FATAL(intrnMestmt->GetOpnds().size() == 1, "Impossible"); + CHECK_FATAL(intrnMestmt->GetOpnd(0)->GetMeOp() == kMeOpVar, "must be kMeOpVar"); + VarMeExpr *rhs = static_cast(intrnMestmt->GetOpnd(0)); + CHECK_FATAL(IsExprRefOrPtr(*rhs), "type mis-match"); + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForVar(*lhs, false); + EACGRefNode *rhsNode = GetOrCreateCGRefNodeForVar(*rhs, true); + (void)lhsNode->AddOutNode(*rhsNode); + } else if (intrnMestmt->GetIntrinsic() == INTRN_JAVA_FILL_NEW_ARRAY) { + CHECK_FATAL(intrnMestmt->GetMustDefList()->size() == 1, "Impossible"); + CHECK_FATAL(intrnMestmt->GetMustDefListItem(0).GetLHS()->GetMeOp() == kMeOpVar, "must be kMeOpVar"); + VarMeExpr *lhs = static_cast(intrnMestmt->GetMustDefListItem(0).GetLHS()); + if (!IsExprRefOrPtr(*lhs)) { + break; + } + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForVar(*lhs, true); + CHECK_FATAL(intrnMestmt->GetOpnds().size() >= 1, "Impossible"); + for (MeExpr *expr : intrnMestmt->GetOpnds()) { + CHECK_FATAL(expr->GetMeOp() == kMeOpVar, "Impossible"); + VarMeExpr *rhs = static_cast(expr); + if (!IsExprRefOrPtr(*rhs)) { + continue; + } + EACGRefNode *rhsNode = GetOrCreateCGRefNodeForVar(*rhs, true); + for (const auto &objNode : lhsNode->GetPointsToSet()) { + // for array case, only one field node represents all elements + EACGFieldNode *fieldNode = objNode->GetFieldNodeFromIdx(0); + if (fieldNode == nullptr) { + fieldNode = eaCG->CreateFieldNode(nullptr, objNode->GetEAStatus(), 0, objNode, true); + } + (void)fieldNode->AddOutNode(*rhsNode); + } + } + } else { + CHECK_FATAL(false, "intrnId: %d in function: %s", intrnMestmt->GetIntrinsic(), func->GetName().c_str()); + } + break; + } + + default: + break; + } +} + +EAConnectionGraph *IPAEscapeAnalysis::GetEAConnectionGraph(MIRFunction &function) const { + if (function.GetEACG() != nullptr) { + return function.GetEACG(); + } + const std::map &summaryMap = mirModule->GetEASummary(); + GStrIdx nameStrIdx = function.GetNameStrIdx(); + auto it = summaryMap.find(nameStrIdx); + if (it != summaryMap.end() && it->second != nullptr) { + return it->second; + } + return nullptr; +} + +void IPAEscapeAnalysis::HandleParaAtCallSite(uint32 callInfo, CallMeStmt &call) { + MapleVector *argVector = eaCG->GetCallSiteArgNodeVector(callInfo); + if (argVector != nullptr && argVector->size() > 0) { + // We have handled this callsite before, skip it. + return; + } + const MapleVector &opnds = call.GetOpnds(); + const uint32 size = static_cast(opnds.size()); + + bool isOptIcall = (call.GetOp() == OP_interfaceicallassigned || call.GetOp() == OP_virtualicallassigned); + uint32 firstParmIdx = (isOptIcall ? 1 : 0); + + for (uint32 i = firstParmIdx; i < size; ++i) { + MeExpr *var = opnds[i]; + // we only solve reference node. + if (!IsExprRefOrPtr(*var) || var->GetOp() == OP_add) { + continue; + } + // for func(u, v), we assume that there exists assignment: a1 = u; a2 = v; + // a1, a2 are phantomArgNode and u, v are realArgNode, we add edge from a1 to u, etc. + EACGActualNode *phantomArgNode = + eaCG->CreateActualNode(kNoEscape, false, true, static_cast(i), callInfo); + // node for u, v. + std::vector nodes; + GetCGNodeForMeExpr(nodes, *var, call, true); + for (auto realArgNode : nodes) { + (void)phantomArgNode->AddOutNode(*realArgNode); + } + } + // Non-nullptr means it has return value + CHECK_FATAL(call.GetMustDefList() != nullptr, "Impossible"); + if (call.GetMustDefList()->size() == 1) { + if (call.GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpVar && + call.GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "NYI"); + } + if (!IsExprRefOrPtr(*call.GetMustDefListItem(0).GetLHS())) { + return; + } + // for x = func(u, v), we assume that there exists assignment: r = x; + // r is a phantom return node, and x is the real return node, we add edge from r to x. + EACGActualNode *phantomRetNode = + eaCG->CreateActualNode(kNoEscape, true, true, static_cast(size), callInfo); + // node for x + EACGRefNode *realRetNode = GetOrCreateCGRefNodeForVarOrReg(*call.GetMustDefListItem(0).GetLHS(), true); + (void)phantomRetNode->AddOutNode(*realRetNode); + } +} + +void IPAEscapeAnalysis::HandleSingleCallee(CallMeStmt &callMeStmt) { + uint32 callInfoId = callMeStmt.GetStmtID(); + MIRFunction &calleeCandidate = callMeStmt.GetTargetFunction(); + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[MERGECG] ready to merge func " << calleeCandidate.GetName() << "\n"; + } + if (calleeCandidate.IsAbstract()) { + CHECK_FATAL(false, "Impossible"); + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[MERGECG] skip to merge func because it is abstract." << "\n"; + } + return; + } + + EAConnectionGraph *calleeSummary = GetEAConnectionGraph(calleeCandidate); + + if (!mirModule->IsInIPA()) { + // This phase is in maplecomb, we need handle single callee differently + if (calleeSummary == nullptr) { + if (!calleeCandidate.IsNative() && !calleeCandidate.IsEmpty()) { + CHECK_FATAL(false, "Impossible"); + } + bool changedAfterMerge = eaCG->MergeCG(*eaCG->GetCallSiteArgNodeVector(callInfoId), nullptr); + if (changedAfterMerge) { + cgChangedInSCC = true; + } + } else { + MapleVector *caller = eaCG->GetCallSiteArgNodeVector(callInfoId); + const MapleVector *callee = calleeSummary->GetFuncArgNodeVector(); + bool changedAfterMerge = eaCG->MergeCG(*caller, callee); + if (changedAfterMerge) { + cgChangedInSCC = true; + } + } + return; + } + + CGNode *callerNode = pcg->GetCGNode(func->GetMirFunc()); + CHECK_FATAL(callerNode != nullptr, "Impossible, funcName: %s", func->GetName().c_str()); + CGNode *calleeNode = pcg->GetCGNode(&calleeCandidate); + CHECK_FATAL(calleeNode != nullptr, "Impossible, funcName: %s", calleeCandidate.GetName().c_str()); + if (calleeNode->GetSCCNode() == callerNode->GetSCCNode() && + (eaCG->GetNeedConservation() || + callerNode->GetSCCNode()->GetNodes().size() > IPAEscapeAnalysis::kFuncInSCCLimit)) { + bool changedAfterMerge = eaCG->MergeCG(*eaCG->GetCallSiteArgNodeVector(callInfoId), nullptr); + if (changedAfterMerge) { + cgChangedInSCC = true; + } + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[MERGECG] skip to merge func because NeedConservation." << "\n"; + } + return; + } + if (calleeSummary == nullptr && calleeCandidate.GetBody() != nullptr && !calleeCandidate.IsNative()) { + if (IPAEscapeAnalysis::kDebug) { + LogInfo::MapleLogger() << "[MERGECG] skip to merge func because this is first loop in scc." << "\n"; + } + return; + } + if (calleeSummary == nullptr) { + bool changedAfterMerge = eaCG->MergeCG(*eaCG->GetCallSiteArgNodeVector(callInfoId), nullptr); + if (changedAfterMerge) { + cgChangedInSCC = true; + } + } else { + MapleVector *caller = eaCG->GetCallSiteArgNodeVector(callInfoId); + const MapleVector *callee = calleeSummary->GetFuncArgNodeVector(); + bool changedAfterMerge = eaCG->MergeCG(*caller, callee); + if (changedAfterMerge) { + cgChangedInSCC = true; + } + } +} + +void IPAEscapeAnalysis::HandleMultiCallees(const CallMeStmt &callMeStmt) { + uint32 callInfoId = callMeStmt.GetStmtID(); + bool changedAfterMerge = eaCG->MergeCG(*eaCG->GetCallSiteArgNodeVector(callInfoId), nullptr); + if (changedAfterMerge) { + cgChangedInSCC = true; + } +} + +void IPAEscapeAnalysis::UpdateEscConnGraphWithPhi(const BB &bb) { + const MapleMap &mePhiList = bb.GetMePhiList(); + for (auto it = mePhiList.begin(); it != mePhiList.end(); ++it) { + MePhiNode *phiNode = it->second; + auto *lhs = phiNode->GetLHS(); + if (lhs->GetMeOp() != kMeOpVar) { + continue; + } + if (!IsExprRefOrPtr(*lhs) || phiNode->GetOpnds().empty() || + IsVirtualVar(*ssaTab, static_cast(*lhs))) { + continue; + } + EACGRefNode *lhsNode = GetOrCreateCGRefNodeForVar(static_cast(*lhs), false); + for (auto itt = phiNode->GetOpnds().begin(); itt != phiNode->GetOpnds().end(); ++itt) { + auto *var = static_cast(*itt); + EACGRefNode *rhsNode = GetOrCreateCGRefNodeForVar(*var, true); + cgChangedInSCC = (lhsNode->AddOutNode(*rhsNode) ? true : cgChangedInSCC); + } + } +} + +void IPAEscapeAnalysis::HandleParaAtFuncEntry() { + if (!mirModule->IsInIPA()) { + CHECK_FATAL(eaCG == nullptr, "Impossible"); + } + + if (eaCG != nullptr) { + return; + } + MIRFunction *mirFunc = func->GetMirFunc(); + eaCG = mirModule->GetMemPool()->New( + mirModule, &mirModule->GetMPAllocator(), mirFunc->GetNameStrIdx()); + eaCG->InitGlobalNode(); + OriginalStTable &ostTab = ssaTab->GetOriginalStTable(); + // create actual node for formal parameter + for (size_t i = 0; i < mirFunc->GetFormalCount(); ++i) { + MIRSymbol *mirSt = mirFunc->GetFormal(i); + OriginalSt *ost = ostTab.FindOrCreateSymbolOriginalSt(*mirSt, mirFunc->GetPuidx(), 0); + VarMeExpr *formal = irMap->GetOrCreateZeroVersionVarMeExpr(*ost); + if (IsExprRefOrPtr(*formal)) { + EACGActualNode *actualNode = + eaCG->CreateActualNode(kArgumentEscape, false, true, static_cast(i), kInvalid); + EACGObjectNode *objNode = eaCG->CreateObjectNode(nullptr, kNoEscape, true, kInitTyIdx); + (void)actualNode->AddOutNode(*objNode); + EACGRefNode *formalNode = eaCG->CreateReferenceNode(formal, kNoEscape, false); + (void)formalNode->AddOutNode(*actualNode); + } + } +} + +void IPAEscapeAnalysis::ConstructConnGraph() { + HandleParaAtFuncEntry(); + auto cfg = func->GetCfg(); + cfg->BuildSCC(); + const MapleVector &sccTopologicalVec = cfg->GetSccTopologicalVec(); + for (size_t i = 0; i < sccTopologicalVec.size(); ++i) { + SCCOfBBs *scc = sccTopologicalVec[i]; + CHECK_FATAL(scc != nullptr, "nullptr check"); + if (scc->GetBBs().size() > 1) { + cfg->BBTopologicalSort(*scc); + } + cgChangedInSCC = true; + bool analyzeAgain = true; + while (analyzeAgain) { + analyzeAgain = false; + cgChangedInSCC = false; + for (BB *bb : scc->GetBBs()) { + if (bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB()) { + continue; + } + UpdateEscConnGraphWithPhi(*bb); + for (MeStmt *stmt = to_ptr(bb->GetMeStmts().begin()); stmt != nullptr; stmt = stmt->GetNextMeStmt()) { + UpdateEscConnGraphWithStmt(*stmt); + } + } + if (scc->HasCycle() && cgChangedInSCC) { + analyzeAgain = true; + } + } + } + eaCG->PropogateEAStatus(); + func->GetMirFunc()->SetEACG(eaCG); +} + +void IPAEscapeAnalysis::DoOptimization() { + CountObjRCOperations(); + ProcessNoAndRetEscObj(); + ProcessRetStmt(); + DeleteRedundantRC(); +} + +VarMeExpr *IPAEscapeAnalysis::CreateEATempVarWithName(const std::string &name) const { + const auto &strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + VarMeExpr *var = irMap->CreateNewVar(strIdx, PTY_ref, false); + return var; +} + +OriginalSt *IPAEscapeAnalysis::CreateEATempOst() { + std::string name = std::string("__EATemp__").append(std::to_string(++tempCount)); + return CreateEATempOstWithName(name); +} + +OriginalSt *IPAEscapeAnalysis::CreateEARetTempOst() const { + std::string name = std::string("__EARetTemp__"); + return CreateEATempOstWithName(name); +} + +OriginalSt *IPAEscapeAnalysis::CreateEATempOstWithName(const std::string &name) const { + MIRSymbol *symbol = func->GetMIRModule().GetMIRBuilder()->CreateLocalDecl(name, + *GlobalTables::GetTypeTable().GetRef()); + OriginalSt *ost = ssaTab->CreateSymbolOriginalSt(*symbol, func->GetMirFunc()->GetPuidx(), 0); + ost->SetZeroVersionIndex(irMap->GetVerst2MeExprTable().size()); + irMap->GetVerst2MeExprTable().push_back(nullptr); + ost->PushbackVersionsIndices(ost->GetZeroVersionIndex()); + return ost; +} + +VarMeExpr *IPAEscapeAnalysis::CreateEATempVarMeExpr(OriginalSt &ost) const { + VarMeExpr *var = irMap->CreateVarMeExprVersion(&ost); + return var; +} + +VarMeExpr *IPAEscapeAnalysis::GetOrCreateEARetTempVarMeExpr(OriginalSt &ost) { + if (retVar != nullptr) { + return retVar; + } + retVar = CreateEATempVarMeExpr(ost); + return retVar; +} + +VarMeExpr *IPAEscapeAnalysis::CreateEATempVar() { + std::string name = std::string("__EATemp__").append(std::to_string(++tempCount)); + return CreateEATempVarWithName(name); +} + +VarMeExpr *IPAEscapeAnalysis::GetOrCreateEARetTempVar() { + if (retVar != nullptr) { + return retVar; + } + std::string name = std::string("__EARetTemp__"); + retVar = CreateEATempVarWithName(name); + return retVar; +} + +void IPAEscapeAnalysis::ProcessNoAndRetEscObj() { + MeCFG *cfg = func->GetCfg(); + for (BB *bb : cfg->GetAllBBs()) { + if (bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB() || bb == nullptr || + bb->GetAttributes(kBBAttrIsInLoopForEA)) { + continue; + } + + for (MeStmt *stmt = to_ptr(bb->GetMeStmts().begin()); stmt != nullptr; stmt = stmt->GetNextMeStmt()) { + if (stmt->GetOp() == OP_dassign || stmt->GetOp() == OP_regassign || stmt->GetOp() == OP_iassign || + stmt->GetOp() == OP_maydassign) { + MeExpr *rhs = stmt->GetRHS(); + CHECK_FATAL(rhs != nullptr, "nullptr check"); + if (!rhs->IsGcmalloc()) { + continue; + } + CHECK_FATAL(func->GetMirFunc()->GetEACG() != nullptr, "Impossible"); + EACGBaseNode *node = func->GetMirFunc()->GetEACG()->GetCGNodeFromExpr(rhs); + CHECK_FATAL(node != nullptr, "nullptr check"); + CHECK_FATAL(node->IsObjectNode(), "impossible"); + EAStatus eaStatus = node->GetEAStatus(); + if ((eaStatus == kNoEscape) && (!static_cast(node)->IsPointedByFieldNode()) && + (static_cast(node)->GetRCOperations() >= kRCOperLB)) { + static_cast(node)->SetIgnorRC(true); + gcStmts.push_back(stmt); + OriginalSt *ost = CreateEATempOst(); + noAndRetEscOst.push_back(ost); + } + } + } + } + if (noAndRetEscOst.size() == 0) { + return; + } + BB *firstBB = cfg->GetFirstBB(); + CHECK_FATAL(firstBB != nullptr, "Impossible"); + for (size_t i = 0; i < noAndRetEscOst.size(); ++i) { + OriginalSt *ost = noAndRetEscOst[i]; + MeStmt *stmt = gcStmts.at(i); + BB *curBB = stmt->GetBB(); + VarMeExpr *initVar = CreateEATempVarMeExpr(*ost); + MeExpr *zeroExpr = irMap->CreateIntConstMeExpr(0, PTY_ref); + DassignMeStmt *initStmt = static_cast(irMap->CreateAssignMeStmt(*initVar, *zeroExpr, *firstBB)); + firstBB->AddMeStmtFirst(initStmt); + + VarMeExpr *var = CreateEATempVarMeExpr(*ost); + noAndRetEscObj.push_back(var); + ScalarMeExpr *lhs = stmt->GetLHS(); + CHECK_FATAL(lhs != nullptr, "nullptr check"); + DassignMeStmt *newStmt = static_cast(irMap->CreateAssignMeStmt(*var, *lhs, *curBB)); + curBB->InsertMeStmtAfter(stmt, newStmt); + IntrinsiccallMeStmt *meStmt = irMap->NewInPool(OP_intrinsiccall, INTRN_MCCSetObjectPermanent); + meStmt->PushBackOpnd(var); + curBB->InsertMeStmtAfter(newStmt, meStmt); + } +} + +void IPAEscapeAnalysis::ProcessRetStmt() { + if (noAndRetEscObj.size() == 0) { + return; + } + MeCFG *cfg = func->GetCfg(); + BB *firstBB = cfg->GetFirstBB(); + OriginalSt *ost = CreateEARetTempOst(); + VarMeExpr *initVar = CreateEATempVarMeExpr(*ost); + MeExpr *zeroExpr = irMap->CreateIntConstMeExpr(0, PTY_ref); + DassignMeStmt *newStmt = static_cast(irMap->CreateAssignMeStmt(*initVar, *zeroExpr, *firstBB)); + ASSERT(firstBB != nullptr, "null ptr check"); + firstBB->AddMeStmtFirst(newStmt); + + for (BB *bb : cfg->GetAllBBs()) { + if (bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB() || bb == nullptr) { + continue; + } + for (MeStmt *stmt = to_ptr(bb->GetMeStmts().begin()); stmt != nullptr; stmt = stmt->GetNextMeStmt()) { + if (stmt->GetOp() == OP_return) { + RetMeStmt *retMeStmt = static_cast(stmt); + CHECK_FATAL(retMeStmt->GetOpnds().size() <= 1, "must less than one"); + VarMeExpr *var = GetOrCreateEARetTempVarMeExpr(*ost); + for (const auto &expr : retMeStmt->GetOpnds()) { + if (IsExprRefOrPtr(*expr)) { + DassignMeStmt *newStmtTmp = static_cast(irMap->CreateAssignMeStmt(*var, *expr, *bb)); + bb->InsertMeStmtBefore(stmt, newStmtTmp); + } + } + IntrinsiccallMeStmt *meStmt = irMap->NewInPool( + OP_intrinsiccall, INTRN_MPL_CLEANUP_NORETESCOBJS); + meStmt->PushBackOpnd(var); + for (auto opnd : noAndRetEscObj) { + meStmt->PushBackOpnd(opnd); + } + bb->InsertMeStmtBefore(stmt, meStmt); + } + } + } +} + +void IPAEscapeAnalysis::CountObjRCOperations() { + MeCFG *cfg = func->GetCfg(); + for (BB *bb : cfg->GetAllBBs()) { + if (bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB() || bb == nullptr) { + continue; + } + for (MeStmt *stmt = to_ptr(bb->GetMeStmts().begin()); stmt != nullptr; stmt = stmt->GetNextMeStmt()) { + switch (stmt->GetOp()) { + case OP_intrinsiccall: { + IntrinsiccallMeStmt *intrn = static_cast(stmt); + switch (intrn->GetIntrinsic()) { + case INTRN_MCCIncRef: + case INTRN_MCCIncDecRef: + case INTRN_MCCIncDecRefReset: { + CHECK_FATAL(eaCG->GetCGNodeFromExpr(intrn->GetOpnd(0)) != nullptr, "nullptr check"); + std::vector nodes; + GetCGNodeForMeExpr(nodes, *intrn->GetOpnd(0), *intrn, false); + for (auto refNode : nodes) { + for (auto objNode : refNode->GetPointsToSet()) { + objNode->IncresRCOperations(); + } + } + break; + } + case INTRN_MCCWrite: + case INTRN_MCCWriteNoDec: + case INTRN_MCCWriteS: + case INTRN_MCCWriteSNoDec: + case INTRN_MCCWriteSVol: + case INTRN_MCCWriteSVolNoDec: + case INTRN_MCCWriteVol: + case INTRN_MCCWriteVolNoDec: + case INTRN_MCCWriteVolWeak: + case INTRN_MCCWriteWeak: { + CHECK_FATAL(eaCG->GetCGNodeFromExpr(intrn->GetOpnds().back()) != nullptr, "nullptr check"); + std::vector nodes; + GetCGNodeForMeExpr(nodes, *intrn->GetOpnds().back(), *intrn, false); + for (auto refNode : nodes) { + for (auto objNode : refNode->GetPointsToSet()) { + objNode->IncresRCOperations(); + } + } + break; + } + default: + break; + } + break; + } + case OP_intrinsiccallassigned: { + IntrinsiccallMeStmt *intrn = static_cast(stmt); + switch (intrn->GetIntrinsic()) { + case INTRN_MCCIncRef: + case INTRN_MCCLoadRef: + case INTRN_MCCLoadRefS: + case INTRN_MCCLoadRefSVol: + case INTRN_MCCLoadRefVol: + case INTRN_MCCLoadWeak: + case INTRN_MCCLoadWeakVol: { + CHECK_FATAL(intrn->GetMustDefList()->size() == 1, "Impossible"); + if (intrn->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpVar && + intrn->GetMustDefListItem(0).GetLHS()->GetMeOp() != kMeOpReg) { + CHECK_FATAL(false, "must be kMeOpVar or kMeOpReg"); + } + + if (!IsExprRefOrPtr(*intrn->GetMustDefListItem(0).GetLHS())) { + break; + } + EACGBaseNode *refNode = eaCG->GetCGNodeFromExpr(intrn->GetMustDefListItem(0).GetLHS()); + CHECK_FATAL(refNode != nullptr, "nullptr check"); + for (auto objNode : refNode->GetPointsToSet()) { + objNode->IncresRCOperations(); + } + break; + } + default: + break; + } + break; + } + case OP_call: + case OP_callassigned: + case OP_superclasscallassigned: { + CallMeStmt *callMeStmt = static_cast(stmt); + + // If a function has no reference parameter or return value, then skip it. + if (IsNoSideEffect(*callMeStmt)) { + break; + } + MIRFunction &calleeCandidate = callMeStmt->GetTargetFunction(); + std::string fName = calleeCandidate.GetName(); + if (fName == "MCC_GetOrInsertLiteral" || + fName == "MCC_GetCurrentClassLoader" || + fName == "Native_Thread_currentThread" || + fName == "Native_java_lang_StringFactory_newStringFromBytes___3BIII" || + fName == "Native_java_lang_StringFactory_newStringFromChars__II_3C" || + fName == "Native_java_lang_StringFactory_newStringFromString__Ljava_lang_String_2" || + fName == "Native_java_lang_String_intern__" || + fName == "MCC_StringAppend" || + fName == "MCC_StringAppend_StringInt" || + fName == "MCC_StringAppend_StringJcharString" || + fName == "MCC_StringAppend_StringString") { + break; + } + const MapleVector &opnds = callMeStmt->GetOpnds(); + const size_t size = opnds.size(); + + bool isOptIcall = + (callMeStmt->GetOp() == OP_interfaceicallassigned || callMeStmt->GetOp() == OP_virtualicallassigned); + size_t firstParmIdx = (isOptIcall ? 1 : 0); + + bool isSpecialCall = false; + if (fName == "Native_java_lang_Object_clone_Ljava_lang_Object__" || + fName == "Native_java_lang_String_concat__Ljava_lang_String_2" || + fName == + "Ljava_2Flang_2FAbstractStringBuilder_3B_7CappendCLONEDignoreret_7C_28Ljava_2Flang_2FString_3B_29V" || + StartWith(fName, "Ljava_2Flang_2FAbstractStringBuilder_3B_7Cappend_7C") || + StartWith(fName, "Ljava_2Flang_2FStringBuilder_3B_7Cappend_7C")) { + CallMeStmt *call = static_cast(callMeStmt); + CHECK_FATAL(call->GetMustDefList() != nullptr, "funcName: %s", fName.c_str()); + CHECK_FATAL(call->GetMustDefList()->size() <= 1, "funcName: %s", fName.c_str()); + if (call->GetMustDefList() != nullptr && call->GetMustDefList()->size() == 0) { + break; + } + isSpecialCall = true; + } + + for (size_t i = firstParmIdx; i < size; ++i) { + MeExpr *var = opnds[i]; + // we only solve reference node. + if (!IsExprRefOrPtr(*var) || var->GetOp() == OP_add) { + continue; + } + CHECK_NULL_FATAL(eaCG->GetCGNodeFromExpr(var)); + std::vector nodes; + GetCGNodeForMeExpr(nodes, *var, *callMeStmt, false); + CHECK_FATAL(nodes.size() > 0, "the size must not be zero"); + for (EACGBaseNode *refNode : nodes) { + for (auto objNode : refNode->GetPointsToSet()) { + objNode->IncresRCOperations(); + } + } + if (isSpecialCall) { + break; + } + } + break; + } + case OP_intrinsiccallwithtypeassigned: { + CHECK_FATAL(false, "must not be OP_intrinsiccallwithtypeassigned"); + break; + } + default: + break; + } + } + } + + for (EACGBaseNode *node : eaCG->GetNodes()) { + if (node == nullptr || !node->IsObjectNode()) { + continue; + } + EACGObjectNode *obj = static_cast(node); + if (obj->IsPhantom()) { + continue; + } + if (obj->IsPointedByFieldNode()) { + obj->IncresRCOperations(kRCOperLB); + } + } +} + +void IPAEscapeAnalysis::DeleteRedundantRC() { + MeCFG *cfg = func->GetCfg(); + for (BB *bb : cfg->GetAllBBs()) { + if (bb == cfg->GetCommonEntryBB() || bb == cfg->GetCommonExitBB() || bb == nullptr) { + continue; + } + for (MeStmt *stmt = to_ptr(bb->GetMeStmts().begin()); stmt != nullptr; stmt = stmt->GetNextMeStmt()) { + if (stmt->GetOp() == OP_intrinsiccall) { + IntrinsiccallMeStmt *intrn = static_cast(stmt); + switch (intrn->GetIntrinsic()) { + case INTRN_MCCIncRef: + case INTRN_MCCDecRef: + case INTRN_MCCIncDecRef: { + bool canRemoveStmt = true; + for (auto expr : intrn->GetOpnds()) { + if (eaCG->GetCGNodeFromExpr(expr) == nullptr) { + canRemoveStmt = false; + break; + } + std::vector nodes; + GetCGNodeForMeExpr(nodes, *expr, *intrn, false); + for (auto node : nodes) { + if (!node->CanIgnoreRC()) { + canRemoveStmt = false; + break; + } + } + if (!canRemoveStmt) { + break; + } + } + if (canRemoveStmt) { + bb->RemoveMeStmt(stmt); + } + break; + } + case INTRN_MCCIncDecRefReset: + case INTRN_MCCDecRefReset: { + bool canRemoveStmt = true; + for (auto expr : intrn->GetOpnds()) { + if (expr->GetMeOp() != kMeOpAddrof) { + if (eaCG->GetCGNodeFromExpr(expr) == nullptr) { + canRemoveStmt = false; + break; + } + std::vector nodes; + GetCGNodeForMeExpr(nodes, *expr, *intrn, false); + for (auto node : nodes) { + if (!node->CanIgnoreRC()) { + canRemoveStmt = false; + break; + } + } + if (!canRemoveStmt) { + break; + } + } else { + AddrofMeExpr *addrof = static_cast(expr); + const OriginalSt *ost = addrof->GetOst(); + ASSERT(ost != nullptr, "null ptr check"); + for (auto index : ost->GetVersionsIndices()) { + if (ost->IsFormal()) { + canRemoveStmt = false; + break; + } + if (index == ost->GetZeroVersionIndex()) { + continue; + } + MeExpr *var = irMap->GetMeExprByVerID(static_cast(index)); + if (var == nullptr) { + continue; + } + if (eaCG->GetCGNodeFromExpr(var) == nullptr) { + canRemoveStmt = false; + break; + } + std::vector nodes; + GetCGNodeForMeExpr(nodes, *var, *intrn, false); + for (auto node : nodes) { + if (!node->CanIgnoreRC()) { + canRemoveStmt = false; + break; + } + } + } + if (!canRemoveStmt) { + break; + } + } + } + if (canRemoveStmt) { + bb->RemoveMeStmt(stmt); + } + break; + } + default: + break; + } + } + } + } +} +} diff --git a/src/mapleall/maple_ipa/src/old/ipa_option.cpp b/src/mapleall/maple_ipa/src/old/ipa_option.cpp new file mode 100644 index 0000000000000000000000000000000000000000..410f0d9ad8bf1ecfeec10666fbf19a903b34e1d1 --- /dev/null +++ b/src/mapleall/maple_ipa/src/old/ipa_option.cpp @@ -0,0 +1,98 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "ipa_option.h" +#include "driver_options.h" +#include "file_utils.h" +#include "mpl_logging.h" +#include "triple.h" + +namespace maple { + +namespace opts::ipa { + maplecl::Option help({"--help", "-h"}, + " -h --help \tPrint usage and exit.Available command names:\n", + {ipaCategory}); + + maplecl::Option o1({"--O1", "-O1"}, + " --O1 \tEnable basic inlining\n", + {ipaCategory}); + + maplecl::Option o2({"--O2", "-O2"}, + " --O2 \tEnable greedy inlining\n", + {ipaCategory}); + + maplecl::Option effectipa({"--effectipa", "-effectipa"}, + " --effectipa \tEnable method side effect for ipa\n", + {ipaCategory}); + + maplecl::Option inlinefunclist({"--inlinefunclist", "-inlinefunclist"}, + " --inlinefunclist= \tInlining related configuration\n", + {ipaCategory}); + + maplecl::Option quiet({"--quiet", "-quiet"}, + " --quiet \tDisable out debug info\n", + {ipaCategory}); +} + +IpaOption &IpaOption::GetInstance() { + static IpaOption instance; + return instance; +} + +bool IpaOption::SolveOptions() const { + if (::opts::target.IsEnabledByUser()) { + Triple::GetTriple().Init(::opts::target.GetValue()); + } else { + Triple::GetTriple().Init(); + } + + if (opts::ipa::help.IsEnabledByUser()) { + maplecl::CommandLine::GetCommandLine().HelpPrinter(ipaCategory); + return false; + } + + if (opts::ipa::quiet.IsEnabledByUser()) { + MeOption::quiet = true; + Options::quiet = true; + } + + maplecl::CopyIfEnabled(MeOption::inlineFuncList, opts::ipa::inlinefunclist); + + return true; +} + +bool IpaOption::ParseCmdline(int argc, char **argv, std::vector &fileNames) const { + // Default value + MeOption::inlineFuncList = ""; + + (void)maplecl::CommandLine::GetCommandLine().Parse(argc, static_cast(argv), ipaCategory); + bool result = SolveOptions(); + if (!result) { + return false; + } + + auto &badArgs = maplecl::CommandLine::GetCommandLine().badCLArgs; + for (auto &arg : badArgs) { + if (FileUtils::IsFileExists(arg.first)) { + fileNames.push_back(arg.first); + } else { + return false; + } + } + + return true; +} +} // namespace maple + diff --git a/src/mapleall/maple_ipa/src/prop_parameter_type.cpp b/src/mapleall/maple_ipa/src/prop_parameter_type.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b3dafdf149d526a9aecdc606be79895d9452b53b --- /dev/null +++ b/src/mapleall/maple_ipa/src/prop_parameter_type.cpp @@ -0,0 +1,237 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "prop_parameter_type.h" +#include "call_graph.h" +#include "maple_phase.h" +#include "maple_phase.h" +#include "option.h" +#include "string_utils.h" +#include "mir_function.h" +#include "me_dominance.h" + +namespace maple { +bool PropParamType::CheckOpndZero(const MeExpr *expr) const { + if (expr->GetMeOp() == kMeOpConst && + static_cast(expr)->IsZero()) { + return true; + } + return false; +} + +bool PropParamType::CheckCondtionStmt(const MeStmt &meStmt) const { + auto *node = meStmt.GetOpnd(0); + auto subOpnd0 = node->GetOpnd(0); + auto subOpnd1 = node->GetOpnd(1); + return CheckOpndZero(subOpnd0) || CheckOpndZero(subOpnd1); +} + +void PropParamType::ResolveIreadExpr(MeExpr &expr) { + switch (expr.GetMeOp()) { + case kMeOpIvar: { + auto *ivarMeExpr = static_cast(&expr); + const MeExpr *base = ivarMeExpr->GetBase(); + if (base->GetMeOp() == kMeOpNary && base->GetOp() == OP_array) { + base = base->GetOpnd(0); + } + if (base->GetMeOp() == kMeOpVar) { + const VarMeExpr *varMeExpr = static_cast(base); + MIRSymbol *sym = varMeExpr->GetOst()->GetMIRSymbol(); + if (sym->IsFormal() && formalMapLocal[sym] != PointerAttr::kPointerNull) { + formalMapLocal[sym] = PointerAttr::kPointerNoNull; + } + } + break; + } + default: { + for (uint32 i = 0; i < expr.GetNumOpnds(); ++i) { + auto *subExpr = expr.GetOpnd(i); + ResolveIreadExpr(*subExpr); + } + } + } +} + +void PropParamType::InsertNullCheck(CallMeStmt &callStmt, const std::string &funcName, + uint32 index, MeExpr &receiver) { + auto *irMap = curFunc->GetMeFunc()->GetIRMap(); + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + CallAssertNonnullMeStmt *nullCheck = irMap->New(OP_callassertnonnull, + stridx, index, builder.GetCurrentFunction()->GetNameStrIdx()); + nullCheck->SetBB(callStmt.GetBB()); + nullCheck->SetSrcPos(callStmt.GetSrcPosition()); + nullCheck->SetMeStmtOpndValue(&receiver); + callStmt.GetBB()->InsertMeStmtBefore(&callStmt, nullCheck); +} + +void PropParamType::ResolveCallStmt(MeStmt &meStmt) { + auto *callMeStmt = static_cast(&meStmt); + PUIdx puidx = callMeStmt->GetPUIdx(); + MIRFunction *calledFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puidx); + + // If the parameter is passed through the fucntion call, we think it maybe have a judge. + for (auto &map : std::as_const(*meStmt.GetMuList())) { + OStIdx idx = map.first; + SSATab *ssaTab = static_cast(dataMap.GetVaildAnalysisPhase(curFunc->GetMeFunc()->GetUniqueID(), + &MESSATab::id))->GetResult(); + OriginalSt *ostTemp = ssaTab->GetSymbolOriginalStFromID(idx); + ASSERT(ostTemp != nullptr, "null ptr check"); + MIRSymbol *tempSymbol = ostTemp->GetMIRSymbol(); + if (tempSymbol != nullptr && tempSymbol->IsFormal() && curFunc->GetParamNonull(tempSymbol) != + PointerAttr::kPointerNoNull) { + formalMapLocal[tempSymbol] = PointerAttr::kPointerNull; + } + } + // Insert the assertstmt && analysis curFunc parameter + if (calledFunc->IsExtern() || calledFunc->IsEmpty()) { + return; + } + for (uint32 i = 0; i < calledFunc->GetFormalCount(); i++) { + MIRSymbol *formalSt = calledFunc->GetFormal(i); + if (formalSt->GetType()->GetKind() == kTypePointer) { + if (calledFunc->CheckParamNullType(formalSt) && + calledFunc->GetParamNonull(formalSt) == PointerAttr::kPointerNoNull) { + InsertNullCheck(*callMeStmt, calledFunc->GetName(), i, *callMeStmt->GetOpnd(i)); + MIRSymbol *calledFuncFormalSt = calledFunc->GetFormal(i); + if (calledFuncFormalSt->IsFormal() && + curFunc->GetParamNonull(calledFuncFormalSt) != PointerAttr::kPointerNull) { + formalMapLocal[calledFuncFormalSt] = PointerAttr::kPointerNoNull; + } + } + } + } +} + +void PropParamType::TraversalMeStmt(MeStmt &meStmt) { + if (meStmt.GetOp() == OP_brfalse || meStmt.GetOp() == OP_brtrue) { + auto *opnd = meStmt.GetOpnd(0); + if (opnd->GetOp() != OP_eq && opnd->GetOp() != OP_ne && opnd->GetOp() != OP_gt && opnd->GetOp() != OP_ge) { + for (uint32 i = 0; i < meStmt.NumMeStmtOpnds(); ++i) { + auto *expr = meStmt.GetOpnd(i); + ResolveIreadExpr(*expr); + } + return; + } + if (CheckCondtionStmt(meStmt)) { + auto subOpnd0 = opnd->GetOpnd(0); + auto subOpnd1 = opnd->GetOpnd(1); + MeExpr *expr = CheckOpndZero(subOpnd0) ? subOpnd1 : subOpnd0; + if (expr->GetOp() == OP_dread) { + VarMeExpr *varExpr = static_cast(expr); + MIRSymbol *sym = varExpr->GetOst()->GetMIRSymbol(); + if (sym->IsFormal()) { + formalMapLocal[sym] = PointerAttr::kPointerNull; + return; + } + if (meStmt.GetMuList() == nullptr) { + return; + } + for (auto map : *meStmt.GetMuList()) { + OStIdx idx = map.first; + SSATab *ssaTab = static_cast(dataMap.GetVaildAnalysisPhase(curFunc->GetMeFunc()->GetUniqueID(), + &MESSATab::id))->GetResult(); + ASSERT(ssaTab != nullptr, "null ptr check"); + OriginalSt *ostTemp = ssaTab->GetSymbolOriginalStFromID(idx); + ASSERT(ostTemp != nullptr, "null ptr check"); + MIRSymbol *tempSymbol = ostTemp->GetMIRSymbol(); + if (tempSymbol->IsFormal()) { + formalMapLocal[tempSymbol] = PointerAttr::kPointerNull; + return; + } + } + } + } + } else if (meStmt.GetOp() == OP_callassigned || meStmt.GetOp() == OP_call) { + ResolveCallStmt(meStmt); + } else { + for (uint32 i = 0; i < meStmt.NumMeStmtOpnds(); ++i) { + auto *expr = meStmt.GetOpnd(i); + ResolveIreadExpr(*expr); + } + } +} + +void PropParamType::RunOnScc(maple::SCCNode &scc) { + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty()) { + continue; + } + formalMapLocal.clear(); + curFunc = func; + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + MIRSymbol *formalSt = func->GetFormal(i); + if (formalSt->GetType()->GetKind() != kTypePointer) { + continue; + } + if (formalSt->GetAttr(ATTR_nonnull)) { + func->SetParamNonull(formalSt, PointerAttr::kPointerNoNull); + formalMapLocal[formalSt] = PointerAttr::kPointerNoNull; + } + } + Prop(*func); + for (auto it = formalMapLocal.begin(); it != formalMapLocal.end(); ++it) { + func->SetParamNonull(it->first, it->second); + if (it->second == PointerAttr::kPointerNoNull) { + static_cast(it->first)->SetAttr(ATTR_nonnull); + uint32 index = func->GetFormalIndex(it->first); + if (index != 0xffffffff) { + FormalDef &formalDef = const_cast(func->GetFormalDefAt(index)); + formalDef.formalAttrs.SetAttr(ATTR_nonnull); + } + } + } + } +} + +void PropParamType::Prop(MIRFunction &func) { + for (uint32 i = 0; i < func.GetFormalCount(); i++) { + MIRSymbol *formalSt = func.GetFormal(i); + if (formalSt->GetType()->GetKind() == kTypePointer) { + formalMapLocal[formalSt] = PointerAttr::kPointerUndeiced; + } + } + auto dom = static_cast( + dataMap.GetVaildAnalysisPhase(curFunc->GetMeFunc()->GetUniqueID(), &MEDominance::id))->GetDomResult(); + CHECK_NULL_FATAL(dom); + for (auto *node : dom->GetReversePostOrder()) { + if (node == nullptr) { + return; + } + auto bb = func.GetMeFunc()->GetCfg()->GetBBFromID(BBId(node->GetID())); + // traversal on stmt + for (auto &meStmt : bb->GetMeStmts()) { + TraversalMeStmt(meStmt); + } + } +} + +void SCCPropParamType::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); + aDep.SetPreservedAll(); +} + +bool SCCPropParamType::PhaseRun(maple::SCCNode &scc) { + MIRModule *m = ((scc.GetNodes()[0])->GetMIRFunction())->GetModule(); + auto *memPool = GetPhaseMemPool(); + MapleAllocator alloc = MapleAllocator(memPool); + MaplePhase *it = GetAnalysisInfoHook()->FindOverIRAnalyisData(*m); + CallGraph *cg = static_cast(it)->GetResult(); + CHECK_FATAL(cg != nullptr, "Expecting a valid CallGraph, found nullptr"); + AnalysisDataManager *dataMap = GET_ANALYSIS(SCCPrepare, scc); + PropParamType prop(*memPool, alloc, *m, *cg, *dataMap); + prop.RunOnScc(scc); + return true; +} +} diff --git a/src/mapleall/maple_ipa/src/prop_return_null.cpp b/src/mapleall/maple_ipa/src/prop_return_null.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9551775c64285b1b97874f3960a56d93cbaef083 --- /dev/null +++ b/src/mapleall/maple_ipa/src/prop_return_null.cpp @@ -0,0 +1,486 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "prop_return_null.h" +#include "call_graph.h" +#include "maple_phase.h" +#include "maple_phase.h" +#include "option.h" +#include "string_utils.h" +#include "mir_function.h" +#include "me_dominance.h" + +namespace maple { +static bool MaybeNull(const MeExpr &expr) { + if (expr.GetMeOp() == kMeOpVar) { + return static_cast(&expr)->GetMaybeNull(); + } else if (expr.GetMeOp() == kMeOpIvar) { + return static_cast(&expr)->GetMaybeNull(); + } else if (expr.GetOp() == OP_retype) { + MeExpr *retypeRHS = (static_cast(&expr))->GetOpnd(0); + return MaybeNull(*retypeRHS); + } + return true; +} + +TyIdx PropReturnAttr::GetInferredTyIdx(MeExpr &expr) const { + if (expr.GetMeOp() == kMeOpVar) { + auto *varMeExpr = static_cast(&expr); + if (varMeExpr->GetInferredTyIdx() == 0u) { + // If varMeExpr->inferredTyIdx has not been set, we can double check + // if it is coming from a static final field + const OriginalSt *ost = varMeExpr->GetOst(); + const MIRSymbol *mirSym = ost->GetMIRSymbol(); + if (mirSym->IsStatic() && mirSym->IsFinal() && mirSym->GetInferredTyIdx() != kInitTyIdx && + mirSym->GetInferredTyIdx() != kNoneTyIdx) { + varMeExpr->SetInferredTyIdx(mirSym->GetInferredTyIdx()); + } + if (mirSym->GetType()->GetKind() == kTypePointer) { + MIRType *pointedType = (static_cast(mirSym->GetType()))->GetPointedType(); + if (pointedType->GetKind() == kTypeClass) { + if ((static_cast(pointedType))->IsFinal()) { + varMeExpr->SetInferredTyIdx(pointedType->GetTypeIndex()); + } + } + } + } + return varMeExpr->GetInferredTyIdx(); + } else if (expr.GetMeOp() == kMeOpIvar) { + return static_cast(&expr)->GetInferredTyIdx(); + } else if (expr.GetOp() == OP_retype) { + MeExpr *retypeRHS = (static_cast(&expr))->GetOpnd(0); + return GetInferredTyIdx(*retypeRHS); + } + return TyIdx(0); +} + +void PropReturnAttr::PropVarInferredType(VarMeExpr &varMeExpr) const { + if (varMeExpr.GetDefBy() == kDefByStmt) { + DassignMeStmt &defStmt = utils::ToRef(safe_cast(varMeExpr.GetDefStmt())); + MeExpr *rhs = defStmt.GetRHS(); + if (rhs->GetOp() == OP_gcmalloc) { + varMeExpr.SetInferredTyIdx(static_cast(rhs)->GetTyIdx()); + varMeExpr.SetMaybeNull(false); + if (PropReturnAttr::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(varMeExpr.GetInferredTyIdx()); + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] mx" << varMeExpr.GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } else if (rhs->GetOp() == OP_gcmallocjarray) { + varMeExpr.SetInferredTyIdx(static_cast(rhs)->GetTyIdx()); + varMeExpr.SetMaybeNull(false); + if (PropReturnAttr::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(varMeExpr.GetInferredTyIdx()); + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] mx" << varMeExpr.GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } else if (!MaybeNull(*rhs)) { + varMeExpr.SetMaybeNull(false); + } else { + TyIdx tyIdx = GetInferredTyIdx(*rhs); + if (tyIdx != 0u) { + varMeExpr.SetInferredTyIdx(tyIdx); + if (PropReturnAttr::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(varMeExpr.GetInferredTyIdx()); + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] mx" << varMeExpr.GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } + } + } else if (varMeExpr.GetDefBy() == kDefByPhi) { + if (PropReturnAttr::debug) { + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] " << "Def by phi " << '\n'; + } + } +} + +void PropReturnAttr::PropIvarInferredType(IvarMeExpr &ivar) const { + IassignMeStmt *defStmt = ivar.GetDefStmt(); + if (defStmt == nullptr) { + return; + } + MeExpr *rhs = defStmt->GetRHS(); + CHECK_NULL_FATAL(rhs); + if (rhs->GetOp() == OP_gcmalloc) { + ivar.GetInferredTyIdx() = static_cast(rhs)->GetTyIdx(); + ivar.SetMaybeNull(false); + if (PropReturnAttr::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetInferredTyIdx()); + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] mx" << ivar.GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } else if (rhs->GetOp() == OP_gcmallocjarray) { + ivar.GetInferredTyIdx() = static_cast(rhs)->GetTyIdx(); + ivar.SetMaybeNull(false); + } else if (!MaybeNull(*rhs)) { + ivar.SetMaybeNull(false); + } else { + TyIdx tyIdx = GetInferredTyIdx(*rhs); + if (tyIdx != 0u) { + ivar.SetInferredTyidx(tyIdx); + if (PropReturnAttr::debug) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ivar.GetInferredTyIdx()); + LogInfo::MapleLogger() << "[PROP-RETURN-ATTR] [TYPE-INFERRING] mx" << ivar.GetExprID() << " "; + type->Dump(0, false); + LogInfo::MapleLogger() << '\n'; + } + } + } +} + +void PropReturnAttr::VisitVarPhiNode(MePhiNode &varPhi) const { + MapleVector opnds = varPhi.GetOpnds(); + auto *lhs = varPhi.GetLHS(); + // RegPhiNode cases NYI + if (lhs == nullptr || lhs->GetMeOp() != kMeOpVar) { + return; + } + VarMeExpr *lhsVar = static_cast(varPhi.GetLHS()); + for (size_t i = 0; i < opnds.size(); ++i) { + VarMeExpr *opnd = static_cast(opnds[i]); + PropVarInferredType(*opnd); + if (MaybeNull(*opnd)) { + return; + } + } + lhsVar->SetMaybeNull(false); + return; +} + +void PropReturnAttr::VisitMeExpr(MeExpr *meExpr) const { + if (meExpr == nullptr) { + return; + } + MeExprOp meOp = meExpr->GetMeOp(); + switch (meOp) { + case kMeOpVar: { + auto *varExpr = static_cast(meExpr); + PropVarInferredType(*varExpr); + break; + } + case kMeOpReg: + break; + case kMeOpIvar: { + auto *iVar = static_cast(meExpr); + PropIvarInferredType(*iVar); + break; + } + case kMeOpOp: { + auto *meOpExpr = static_cast(meExpr); + for (uint32 i = 0; i < kOperandNumTernary; ++i) { + VisitMeExpr(meOpExpr->GetOpnd(i)); + } + break; + } + case kMeOpNary: { + auto *naryMeExpr = static_cast(meExpr); + for (MeExpr *opnd : naryMeExpr->GetOpnds()) { + VisitMeExpr(opnd); + } + break; + } + case kMeOpAddrof: + case kMeOpAddroffunc: + case kMeOpAddroflabel: + case kMeOpGcmalloc: + case kMeOpConst: + case kMeOpConststr: + case kMeOpConststr16: + case kMeOpSizeoftype: + case kMeOpFieldsDist: + break; + default: + CHECK_FATAL(false, "MeOP NIY"); + break; + } +} + +void PropReturnAttr::ReturnTyIdxInferring(const RetMeStmt &retMeStmt) { + const MapleVector &opnds = retMeStmt.GetOpnds(); + CHECK_FATAL(opnds.size() <= 1, "Assume at most one return value for now"); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + TyIdx tyIdx = GetInferredTyIdx(*opnd); + if (retTy == kNotSeen) { + // seen the first return stmt + retTy = kSeen; + inferredRetTyIdx = tyIdx; + if (!MaybeNull(*opnd)) { + maybeNull = false; + } + } else if (retTy == kSeen) { + // has seen an nonull before, check if they agreed + if (inferredRetTyIdx != tyIdx) { + retTy = kFailed; + inferredRetTyIdx = TyIdx(0); // not agreed, cleared. + } + if (MaybeNull(*opnd) || maybeNull) { + maybeNull = true; // not agreed, cleared. + } + } + } +} + +void PropReturnAttr::TraversalMeStmt(MeStmt &meStmt) { + Opcode op = meStmt.GetOp(); + switch (op) { + case OP_dassign: { + auto *varMeStmt = static_cast(&meStmt); + VisitMeExpr(varMeStmt->GetRHS()); + break; + } + case OP_regassign: { + auto *regMeStmt = static_cast(&meStmt); + VisitMeExpr(regMeStmt->GetRHS()); + break; + } + case OP_maydassign: { + auto *maydStmt = static_cast(&meStmt); + VisitMeExpr(maydStmt->GetRHS()); + break; + } + case OP_iassign: { + auto *ivarStmt = static_cast(&meStmt); + VisitMeExpr(ivarStmt->GetRHS()); + break; + } + case OP_syncenter: + case OP_syncexit: { + auto *syncMeStmt = static_cast(&meStmt); + const MapleVector &opnds = syncMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_throw: { + auto *thrMeStmt = static_cast(&meStmt); + VisitMeExpr(thrMeStmt->GetOpnd()); + break; + } + case OP_assertnonnull: + case OP_eval: + case OP_igoto: + case OP_free: { + auto *unaryStmt = static_cast(&meStmt); + VisitMeExpr(unaryStmt->GetOpnd()); + break; + } + case OP_asm: + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: + case OP_polymorphiccall: + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: + case OP_polymorphiccallassigned: { + auto *callMeStmt = static_cast(&meStmt); + const MapleVector &opnds = callMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_icall: + case OP_icallproto: + case OP_icallprotoassigned: + case OP_icallassigned: { + auto *icallMeStmt = static_cast(&meStmt); + const MapleVector &opnds = icallMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_intrinsiccallwithtype: + case OP_intrinsiccall: + case OP_xintrinsiccall: + case OP_intrinsiccallwithtypeassigned: + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + auto *intrinCallStmt = static_cast(&meStmt); + const MapleVector &opnds = intrinCallStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + break; + } + case OP_brtrue: + case OP_brfalse: { + auto *condGotoStmt = static_cast(&meStmt); + VisitMeExpr(condGotoStmt->GetOpnd()); + break; + } + case OP_switch: { + auto *switchStmt = static_cast(&meStmt); + VisitMeExpr(switchStmt->GetOpnd()); + break; + } + case OP_return: { + auto *retMeStmt = static_cast(&meStmt); + const MapleVector &opnds = retMeStmt->GetOpnds(); + for (size_t i = 0; i < opnds.size(); ++i) { + MeExpr *opnd = opnds[i]; + VisitMeExpr(opnd); + } + ReturnTyIdxInferring(*retMeStmt); + break; + } + CASE_OP_ASSERT_BOUNDARY { + auto *assMeStmt = static_cast(&meStmt); + VisitMeExpr(assMeStmt->GetOpnd(0)); + VisitMeExpr(assMeStmt->GetOpnd(1)); + break; + } + case OP_jstry: + case OP_jscatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_try: + case OP_catch: + case OP_goto: + case OP_gosub: + case OP_retsub: + case OP_comment: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstoreload: + case OP_membarstorestore: + case OP_callassertnonnull: + break; + default: + CHECK_FATAL(false, "unexpected stmt or NYI"); + } + if (meStmt.GetOp() != OP_callassigned) { + return; + } + MapleVector *mustDefList = meStmt.GetMustDefList(); + if (mustDefList->empty()) { + return; + } + MeExpr *meLHS = mustDefList->front().GetLHS(); + if (meLHS->GetMeOp() != kMeOpVar) { + return; + } + auto *lhsVar = static_cast(meLHS); + auto *callMeStmt = static_cast(&meStmt); + MIRFunction &called = callMeStmt->GetTargetFunction(); + if (called.GetAttr(FUNCATTR_nonnull)) { + lhsVar->SetMaybeNull(false); + } +} + +void PropReturnAttr::TraversalBB(BB *bb) { + if (bb == nullptr) { + return; + } + // traversal var phi nodes + MapleMap &mePhiList = bb->GetMePhiList(); + for (auto it = mePhiList.cbegin(); it != mePhiList.cend(); ++it) { + MePhiNode *phiMeNode = it->second; + if (phiMeNode == nullptr || phiMeNode->GetLHS()->GetMeOp() != kMeOpVar) { + continue; + } + VisitVarPhiNode(*phiMeNode); + } + // traversal reg phi nodes (NYI) + // traversal on stmt + for (auto &meStmt : bb->GetMeStmts()) { + TraversalMeStmt(meStmt); + } +} + +void PropReturnAttr::Perform(MeFunction &func) { + // Pre-order traverse the cominance tree, so that each def is traversed + // before its use + std::vector bbVisited(func.GetCfg()->GetAllBBs().size(), false); + Dominance *dom = static_cast(dataMap.GetVaildAnalysisPhase(func.GetUniqueID(), + &MEDominance::id))->GetDomResult(); + for (auto *bb : dom->GetReversePostOrder()) { + TraversalBB(func.GetCfg()->GetBBFromID(BBId(bb->GetID()))); + } + MIRFunction *mirFunc = func.GetMirFunc(); + if (mirFunc == nullptr) { + return; + } + if (retTy == kSeen && !maybeNull) { + mirFunc->SetRetrunAttrKind(kPointerNoNull); + mirFunc->SetAttr(FUNCATTR_nonnull); + } +} + +void PropReturnAttr::Initialize(maple::SCCNode &scc) const { + for (auto *cgNode : scc.GetNodes()) { + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty()) { + continue; + } + if (func->GetAttr(FUNCATTR_nonnull)) { + func->SetRetrunAttrKind(kPointerNoNull); + } + } +} + +void PropReturnAttr::Prop(maple::SCCNode &scc) { + for (auto *cgNode : scc.GetNodes()) { + retTy = kNotSeen; + maybeNull = true; + MIRFunction *func = cgNode->GetMIRFunction(); + if (func->IsEmpty() || func->GetReturnType()->GetKind() != kTypePointer) { + continue; + } + if (func->GetRetrunAttrKind() == PointerAttr::kPointerNoNull) { + continue; + } + MeFunction *meFunc = func->GetMeFunc(); + Perform(*meFunc); + } +} + +void SCCPropReturnAttr::GetAnalysisDependence(maple::AnalysisDep &aDep) const { + aDep.AddRequired(); +} + +bool SCCPropReturnAttr::PhaseRun(maple::SCCNode &scc) { + MIRModule *m = ((scc.GetNodes()[0])->GetMIRFunction())->GetModule(); + auto *memPool = GetPhaseMemPool(); + MapleAllocator alloc = MapleAllocator(memPool); + MaplePhase *it = GetAnalysisInfoHook()->GetTopLevelAnalyisData(*m); + CallGraph *cg = static_cast(it)->GetResult(); + CHECK_FATAL(cg != nullptr, "Expecting a valid CallGraph, found nullptr"); + AnalysisDataManager *dataMap = GET_ANALYSIS(SCCPrepare, scc); + PropReturnAttr prop(*memPool, alloc, *m, *cg, *dataMap); + prop.Initialize(scc); + prop.Prop(scc); + return true; +} +} diff --git a/src/mapleall/maple_ipa/src/region_identify.cpp b/src/mapleall/maple_ipa/src/region_identify.cpp new file mode 100644 index 0000000000000000000000000000000000000000..14a1bfce71321a71b9b1fc3eebbf74a0d073f600 --- /dev/null +++ b/src/mapleall/maple_ipa/src/region_identify.cpp @@ -0,0 +1,373 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "region_identify.h" +#include +#include +#include "ipa_collect.h" +#include "opcodes.h" +#include "stmt_identify.h" + +namespace { + constexpr size_t kRegionInputLimit = 8; + constexpr size_t kRegionOutputLimit = 1; + constexpr size_t kRegionMinInvalidStmtNumber = 4; +} + +namespace maple { +const bool RegionCandidate::HasDefinitionOutofRegion(DefUsePositions &defUse) const { + for (auto defPos : defUse.definePositions) { + if (defPos > endId || defPos < startId) { + return true; + } + } + return false; +} + +void RegionCandidate::CollectRegionInputAndOutput(StmtInfo &stmtInfo, CollectIpaInfo &ipaInfo) { + for (auto defUsePositions : stmtInfo.GetRegDefUse()) { + auto ®Idx = defUsePositions.first; + auto src = SymbolRegPair(StIdx(0), defUsePositions.first); + for (auto usePos : defUsePositions.second.usePositions) { + if (usePos >= startId && usePos <= endId) { + continue; + } + (void)regionOutputs.insert(src); + auto &useStmtInfo = ipaInfo.GetStmtInfo()[usePos]; + auto iter = useStmtInfo.GetRegDefUse().find(regIdx); + if (iter != useStmtInfo.GetRegDefUse().end() && + HasDefinitionOutofRegion(iter->second)) { + (void)regionInputs.insert(src); + } + } + if (HasDefinitionOutofRegion(defUsePositions.second)) { + (void)regionInputs.insert(src); + } + } + for (auto defUsePositions : stmtInfo.GetSymbolDefUse()) { + auto &stIdx = defUsePositions.first; + auto src = SymbolRegPair(stIdx, PregIdx(0)); + for (auto usePos : defUsePositions.second.usePositions) { + if (usePos >= startId && usePos <= endId) { + continue; + } + (void)regionOutputs.insert(src); + auto &useStmtInfo = ipaInfo.GetStmtInfo()[usePos]; + auto iter = useStmtInfo.GetSymbolDefUse().find(stIdx); + if (iter != useStmtInfo.GetSymbolDefUse().end() && + HasDefinitionOutofRegion(iter->second)) { + (void)regionInputs.insert(src); + } + } + if (HasDefinitionOutofRegion(defUsePositions.second)) { + (void)regionInputs.insert(src); + } + } +} + +bool RegionCandidate::HasJumpOutOfRegion(StmtInfo &stmtInfo, bool isStart) { + for (auto location : stmtInfo.GetLocationsJumpTo()) { + if (location == endId + 1) { + (void)stmtJumpToEnd.insert(stmtInfo.GetStmtNode()); + } + if (location > endId + 1 || location < startId) { + return true; + } + } + if (isStart) { + return false; + } + for (auto location : stmtInfo.GetLocationsJumpFrom()) { + if (location < startId || location > endId) { + return true; + } + } + return false; +} + +bool RegionCandidate::IsLegal(CollectIpaInfo &ipaInfo) { + std::unordered_set stmtTypes; + uint32 validStmts = 0; + bool hasCall = false; + for (auto i = startId; i <= endId; ++i) { + auto &stmtInfo = ipaInfo.GetStmtInfo()[i]; + auto currOp = stmtInfo.GetHashCandidateAt(0); + if (!(currOp == OP_comment) && !(currOp == OP_label) && !(currOp == OP_eval)) { + if (currOp == OP_dassign || currOp == OP_iassign || currOp == OP_regassign) { + (void)stmtTypes.insert(OP_dassign); + } else { + (void)stmtTypes.insert(static_cast(currOp)); + } + validStmts++; + } + if (HasJumpOutOfRegion(stmtInfo, i == startId)) { + return false; + } + if (!stmtInfo.IsValid()) { + return false; + } + if (stmtInfo.IsCall()) { + hasCall = true; + } + CollectRegionInputAndOutput(stmtInfo, ipaInfo); + if (regionInputs.size() > kRegionInputLimit || regionOutputs.size() > kRegionOutputLimit) { + return false; + } + } + return stmtTypes.size() > 1 && (validStmts > kRegionMinInvalidStmtNumber && hasCall); +} + +void RegionIdentify::RegionInit() { + std::vector integerString; + auto &rawString = ipaInfo->GetIntegerString(); + integerString.assign(rawString.begin(), rawString.end()); + if (integerString.empty()) { + return; + } + (void)integerString.emplace_back(0); + (void)ipaInfo->GetStmtInfo().emplace_back( + StmtInfo(nullptr, kInvalidPuIdx, ipaInfo->GetAllocator())); + SuffixArray sa(integerString, integerString.size(), ipaInfo->GetCurrNewStmtIndex()); + sa.Run(true); + CreateRegionCandidates(sa); +} + +void RegionIdentify::CreateRegionCandidates(SuffixArray &sa) { + for (auto *subStrings : sa.GetRepeatedSubStrings()) { + std::vector candidates; + for (auto occurrence : subStrings->GetOccurrences()) { + auto startPosition = occurrence.first; + auto endPosition = occurrence.second; + auto *startStmtInfo = GetNearestNonnullStmtInfo(startPosition, true); + auto *endStmtInfo = GetNearestNonnullStmtInfo(endPosition, false); + auto *function = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(startStmtInfo->GetPuIdx()); + auto currRegion = RegionCandidate(startPosition, endPosition, startStmtInfo, endStmtInfo, function); + if (currRegion.IsLegal(*ipaInfo)) { + (void)candidates.emplace_back(currRegion); + } + } + delete subStrings; + subStrings = nullptr; + if (!(candidates.size() > 1)) { + continue; + } + CreateRegionGroups(candidates); + } +} + +void RegionIdentify::CreateRegionGroups(std::vector ®ions) { + for (size_t i = 0; i < regions.size(); ++i) { + auto &currRegion = regions[i]; + if (currRegion.GetGroupId() != kInvalidIndex) { + continue; + } + auto currGroup = RegionGroup(); + GroupId newGroupId = regionGroups.size(); + currGroup.SetGroupId(newGroupId); + currRegion.SetGroupId(newGroupId); + (void)currGroup.GetGroups().emplace_back(currRegion); + for (size_t j = i + 1; j < regions.size(); ++j) { + auto &nextRegion = regions[j]; + if (CheckOverlapAmongGroupRegions(currGroup, nextRegion)) { + continue; + } + if (HasSameStructure(currRegion, nextRegion)) { + nextRegion.SetGroupId(newGroupId); + (void)currGroup.GetGroups().emplace_back(nextRegion); + } + ClearSrcMappings(); + } + if (currGroup.GetGroups().size() < kGroupSizeLimit) { + continue; + } + (void)regionGroups.emplace_back(currGroup); + } +} + +void RegionIdentify::ClearSrcMappings() { + symMap.clear(); + leftRegMap.clear(); + rightRegMap.clear(); + leftConstMap.clear(); + rightConstMap.clear(); +} + +StmtInfo *RegionIdentify::GetNearestNonnullStmtInfo(StmtInfoId index, bool forward) { + auto &stmtInfoVector = ipaInfo->GetStmtInfo(); + while (stmtInfoVector[index].GetStmtNode() == nullptr) { + index = forward ? index + 1 : index - 1; + } + return &stmtInfoVector[index]; +} + +bool RegionIdentify::CheckOverlapAmongGroupRegions(RegionGroup &group, RegionCandidate ®ion) { + for (auto &groupedRegion : group.GetGroups()) { + if (region.IsOverlapWith(groupedRegion)) { + return true; + } + } + return false; +} + +bool RegionIdentify::CheckCompatibilifyBetweenSrcs(BaseNode &lhs, BaseNode &rhs) { + switch (lhs.GetOpCode()) { + case OP_dassign: { + auto leftStIdx = static_cast(lhs).GetStIdx(); + auto rightStIdx = static_cast(rhs).GetStIdx(); + if (!CompareSymbolStructure(leftStIdx, rightStIdx)) { + return false; + } + break; + } + case OP_regassign: { + auto leftRegIdx = static_cast(lhs).GetRegIdx(); + auto rightRegIdx = static_cast(rhs).GetRegIdx(); + if (!CompareRegStructure(leftRegIdx, rightRegIdx)) { + return false; + } + break; + } + case OP_addrof: + case OP_dread: { + auto leftStIdx = static_cast(lhs).GetStIdx(); + auto rightStIdx = static_cast(rhs).GetStIdx(); + return CompareSymbolStructure(leftStIdx, rightStIdx); + } + case OP_regread: { + auto leftRegIdx = static_cast(lhs).GetRegIdx(); + auto rightRegIdx = static_cast(rhs).GetRegIdx(); + return CompareRegStructure(leftRegIdx, rightRegIdx); + } + case OP_constval: { + auto *leftVal = static_cast(lhs).GetConstVal(); + auto *rightVal = static_cast(rhs).GetConstVal(); + return CompareConstStructure(leftVal, rightVal); + } + default: { + break; + } + } + return true; +} + +bool RegionIdentify::CheckCompatibilifyAmongRegionComponents(BaseNode &lhs, BaseNode &rhs) { + if (lhs.GetOpCode() != rhs.GetOpCode()) { + return false; + } + + if (lhs.GetOpCode() == OP_block) { + auto *leftStmt = static_cast(lhs).GetFirst(); + auto *rightStmt = static_cast(rhs).GetFirst(); + while (leftStmt != nullptr && rightStmt != nullptr) { + if (!CheckCompatibilifyAmongRegionComponents(*leftStmt, *rightStmt)) { + return false; + } + leftStmt = leftStmt->GetNext(); + rightStmt = rightStmt->GetNext(); + } + return (leftStmt == nullptr) && (rightStmt == nullptr); + } + + if (!CheckCompatibilifyBetweenSrcs(lhs, rhs)) { + return false; + } + + if (lhs.GetNumOpnds() != rhs.GetNumOpnds()) { + return false; + } + + for (size_t i = 0; i < lhs.GetNumOpnds(); ++i) { + if (!CheckCompatibilifyAmongRegionComponents(*lhs.Opnd(i), *rhs.Opnd(i))) { + return false; + } + } + return true; +} + +bool RegionIdentify::CompareConstStructure(const MIRConst *leftConst, const MIRConst *rightConst) { + if (leftConstMap.find(leftConst) != leftConstMap.end()) { + if (leftConstMap[leftConst] != rightConst) { + return false; + } + } else { + leftConstMap[leftConst] = rightConst; + } + if (rightConstMap.find(rightConst) != rightConstMap.end()) { + if (rightConstMap[rightConst] != leftConst) { + return false; + } + } else { + rightConstMap[rightConst] = leftConst; + } + return true; +} + +bool RegionIdentify::CompareRegStructure(const PregIdx leftIdx, const PregIdx rightIdx) { + if (leftRegMap.find(leftIdx) != leftRegMap.end()) { + if (leftRegMap[leftIdx] != rightIdx) { + return false; + } + } else { + leftRegMap[leftIdx] = rightIdx; + } + if (rightRegMap.find(rightIdx) != rightRegMap.end()) { + if (rightRegMap[rightIdx] != leftIdx) { + return false; + } + } else { + rightRegMap[rightIdx] = leftIdx; + } + return true; +} + +bool RegionIdentify::CompareSymbolStructure(const StIdx leftIdx, const StIdx rightIdx) { + if (leftIdx.IsGlobal() != rightIdx.IsGlobal()) { + return false; + } + if (leftIdx.IsGlobal() && rightIdx.IsGlobal()) { + return leftIdx == rightIdx; + } + if (symMap.find(leftIdx) != symMap.end()) { + if (symMap[leftIdx] != rightIdx) { + return false; + } + } else { + symMap[leftIdx] = rightIdx; + } + if (symMap.find(rightIdx) != symMap.end()) { + if (symMap[rightIdx] != leftIdx) { + return false; + } + } else { + symMap[rightIdx] = leftIdx; + } + return true; +} + +bool RegionIdentify::HasSameStructure(RegionCandidate &lhs, RegionCandidate &rhs) { + if (lhs.GetRegionOutPuts().size() != rhs.GetRegionOutPuts().size()) { + return false; + } + auto *leftStmt = lhs.GetStart()->GetStmtNode(); + auto *rightStmt = rhs.GetStart()->GetStmtNode(); + while (leftStmt && rightStmt && leftStmt->GetStmtInfoId() <= lhs.GetEndId()) { + if (!CheckCompatibilifyAmongRegionComponents(*leftStmt, *rightStmt)) { + return false; + } + leftStmt = leftStmt->GetNext(); + rightStmt = rightStmt->GetNext(); + } + return true; +} +} diff --git a/src/mapleall/maple_ir/BUILD.gn b/src/mapleall/maple_ir/BUILD.gn new file mode 100755 index 0000000000000000000000000000000000000000..79155184d4f909a4ad848b8a783c71f17953a0fe --- /dev/null +++ b/src/mapleall/maple_ir/BUILD.gn @@ -0,0 +1,109 @@ +# +# Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +include_directories = [ + "${MAPLEALL_ROOT}/maple_ir/include", + "${MAPLEALL_ROOT}/mpl2mpl/include", + "${MAPLEALL_ROOT}/maple_util/include", + "${MAPLEALL_ROOT}/maple_driver/include", + "${MAPLEALL_ROOT}/mempool/include", + "${THIRD_PARTY_ROOT}/bounds_checking_function/include", + "${MAPLEALL_ROOT}/maple_ipa/include", + "${MAPLEALL_ROOT}/maple_ipa/include/old", + "${MAPLEALL_ROOT}/maple_me/include", + "${MAPLEALL_ROOT}/maple_phase/include", + "${THIRD_PARTY_ROOT}/llvm_modified/llvm/include/llvm/BinaryFormat", +] + +src_libmplir = [ + "src/global_tables.cpp", + "src/intrinsics.cpp", + "src/lexer.cpp", + "src/mir_symbol_builder.cpp", + "src/mir_builder.cpp", + "src/mir_const.cpp", + "src/mir_scope.cpp", + "src/mir_function.cpp", + "src/mir_lower.cpp", + "src/mir_module.cpp", + "src/verification.cpp", + "src/verify_annotation.cpp", + "src/verify_mark.cpp", + "src/mir_nodes.cpp", + "src/mir_symbol.cpp", + "src/mir_type.cpp", + "src/mir_enum.cpp", + "src/opcode_info.cpp", + "src/option.cpp", + "src/mpl2mpl_options.cpp", + "src/parser.cpp", + "src/mir_parser.cpp", + "src/mir_pragma.cpp", + "src/printing.cpp", + "src/bin_func_import.cpp", + "src/bin_func_export.cpp", + "src/bin_mpl_import.cpp", + "src/bin_mpl_export.cpp", + "src/debug_info.cpp", + "src/debug_info_util.cpp", + "${MAPLEALL_ROOT}/maple_ipa/src/old/ea_connection_graph.cpp", +] + +src_irbuild = [ "src/driver.cpp" ] + +configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ] + +cflags_cc += [ "-DSTORE_BACK_VTABLE_REF_AFTER_LOAD=1" ] + +static_library("libmplir") { + sources = src_libmplir + include_dirs = include_directories + output_dir = "${root_out_dir}/lib/${HOST_ARCH}" + deps = [ + "${MAPLEALL_ROOT}/maple_driver:libdriver_option", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/maple_util:libcommandline" + ] +} + +executable("irbuild") { + sources = src_irbuild + include_dirs = include_directories + deps = [ + ":libmplir", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/maple_util:libcommandline", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libHWSecureC", + ] +} + +src_mpldbg = [ "src/mpl_dbg.cpp" ] + +executable("mpldbg") { + sources = src_mpldbg + include_dirs = include_directories + deps = [ + ":libmplir", + "${MAPLEALL_ROOT}/maple_phase:libmplphase", + "${MAPLEALL_ROOT}/maple_util:libmplutil", + "${MAPLEALL_ROOT}/maple_util:libcommandline", + "${MAPLEALL_ROOT}/mempool:libmempool", + "${MAPLEALL_ROOT}/mpl2mpl:libmpl2mpl", + "${THIRD_PARTY_ROOT}/bounds_checking_function:libHWSecureC", + ] +} diff --git a/src/mapleall/maple_ir/CMakeLists.txt b/src/mapleall/maple_ir/CMakeLists.txt new file mode 100755 index 0000000000000000000000000000000000000000..24d556a4e6767de9a4f1c0a5ad40b85fe1265310 --- /dev/null +++ b/src/mapleall/maple_ir/CMakeLists.txt @@ -0,0 +1,115 @@ +# +# Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. +# +# OpenArkCompiler is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN AS IS BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR +# FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# + +set(inc_libmplir + ${MAPLEALL_ROOT}/maple_ir/include + ${MAPLEALL_ROOT}/mpl2mpl/include + ${MAPLEALL_ROOT}/maple_util/include + ${MAPLEALL_ROOT}/maple_driver/include + ${MAPLEALL_ROOT}/mempool/include + ${THIRD_PARTY_ROOT}/bounds_checking_function/include + ${MAPLEALL_ROOT}/maple_ipa/include + ${MAPLEALL_ROOT}/maple_ipa/include/old + ${MAPLEALL_ROOT}/maple_me/include + ${MAPLEALL_ROOT}/maple_phase/include + ${THIRD_PARTY_ROOT}/llvm_modified/llvm/include/llvm/BinaryFormat +) + +set(src_libmplir + src/global_tables.cpp + src/intrinsics.cpp + src/lexer.cpp + src/mir_symbol_builder.cpp + src/mir_builder.cpp + src/mir_const.cpp + src/mir_scope.cpp + src/mir_function.cpp + src/mir_lower.cpp + src/mir_module.cpp + src/verification.cpp + src/verify_annotation.cpp + src/verify_mark.cpp + src/mir_nodes.cpp + src/mir_symbol.cpp + src/mir_type.cpp + src/mir_enum.cpp + src/opcode_info.cpp + src/option.cpp + src/mpl2mpl_options.cpp + src/parser.cpp + src/mir_parser.cpp + src/mir_pragma.cpp + src/printing.cpp + src/bin_func_import.cpp + src/bin_func_export.cpp + src/bin_mpl_import.cpp + src/bin_mpl_export.cpp + src/debug_info.cpp + src/debug_info_util.cpp + ${MAPLEALL_ROOT}/maple_ipa/src/old/ea_connection_graph.cpp +) + +set(src_irbuild "src/driver.cpp") + +set(src_mpldbg "src/mpl_dbg.cpp") + +set(deps_libmplir + libdriver_option + libmplphase + libmplutil + libcommandline +) + +set(deps_irbuild + libmplir + libmplphase + libmplutil + libcommandline + libmempool + libmpl2mpl + libHWSecureC + libdriver_option +) + +set(CMAKE_CXX_FLAGS + "${CMAKE_CXX_FLAGS} -DSTORE_BACK_VTABLE_REF_AFTER_LOAD=1") + +#libmplir +add_library(libmplir STATIC ${src_libmplir}) +set_target_properties(libmplir PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_libmplir}" + LINK_LIBRARIES "${deps_libmplir}" + ARCHIVE_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/lib/${HOST_ARCH}" +) + +#irbuild +add_executable(irbuild "${src_irbuild}") +set_target_properties(irbuild PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_libmplir}" + LINK_LIBRARIES "${deps_irbuild}" + RUNTIME_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/bin" +) + +#mpldbg +add_executable(mpldbg "${src_mpldbg}") +set_target_properties(mpldbg PROPERTIES + COMPILE_FLAGS "" + INCLUDE_DIRECTORIES "${inc_libmplir}" + LINK_LIBRARIES "${deps_irbuild}" + RUNTIME_OUTPUT_DIRECTORY "${MAPLE_BUILD_OUTPUT}/bin" +) + diff --git a/src/mapleall/maple_ir/include/all_attributes.def b/src/mapleall/maple_ir/include/all_attributes.def new file mode 100644 index 0000000000000000000000000000000000000000..61957ab1b6743221293f8ca73e918830844b9ad2 --- /dev/null +++ b/src/mapleall/maple_ir/include/all_attributes.def @@ -0,0 +1,116 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +/* all possible attribute names from typeattrs.def, funcattrs.def and fieldattrs.def */ + ATTR(const) + ATTR(final) + ATTR(generic) + ATTR(implicit) + ATTR(private) + ATTR(protected) + ATTR(public) + ATTR(static) + ATTR(synthetic) + ATTR(used) + ATTR(hiddenapiblack) + ATTR(hiddenapigrey) +#ifdef FUNC_ATTR + ATTR(bridge) + ATTR(constructor) + ATTR(critical_native) + ATTR(declared_synchronized) + ATTR(default) + ATTR(destructor) + ATTR(delete) + ATTR(fast_native) + ATTR(inline) + ATTR(always_inline) + ATTR(gnu_inline) + ATTR(noinline) + ATTR(native) + ATTR(strict) + ATTR(varargs) + ATTR(virtual) + ATTR(nosideeffect) + ATTR(pure) + ATTR(noexcept) + ATTR(nodefargeffect) + ATTR(nodefeffect) + ATTR(noretglobal) + ATTR(nothrow_exception) + ATTR(noretarg) + ATTR(noprivate_defeffect) + ATTR(ipaseen) + ATTR(rclocalunowned) + ATTR(callersensitive) + ATTR(weakref) + ATTR(safed) + ATTR(unsafed) + ATTR(noreturn) +#endif +#if defined(FUNC_ATTR) || defined(TYPE_ATTR) + ATTR(abstract) + ATTR(extern) + ATTR(interface) + ATTR(local) + ATTR(optimized) + ATTR(synchronized) + ATTR(weak) +#endif +#if defined(TYPE_ATTR) || defined(FIELD_ATTR) +#include "memory_order_attrs.def" + ATTR(enum) + ATTR(restrict) + ATTR(transient) + ATTR(volatile) + ATTR(rcunowned) + ATTR(rcweak) + ATTR(final_boundary_size) + ATTR(tls_static) + ATTR(tls_dynamic) +#endif +#ifdef TYPE_ATTR + ATTR(annotation) + ATTR(readonly) + ATTR(verified) + ATTR(localrefvar) + ATTR(rcunownedthis) + ATTR(incomplete_array) + ATTR(may_alias) + ATTR(static_init_zero) +#endif +#ifdef FUNC_ATTR + ATTR(firstarg_return) + ATTR(called_once) +#endif +#ifdef STMT_ATTR + ATTR(insaferegion) +#endif + ATTR(oneelem_simd) + ATTR(nonnull) + ATTR(section) + ATTR(asmattr) +#if defined(FUNC_ATTR) && !defined(NOCONTENT_ATTR) + ATTR(alias) + ATTR(constructor_priority) + ATTR(destructor_priority) +#endif +#if (defined(TYPE_ATTR) || defined(FIELD_ATTR)) && !defined(NOCONTENT_ATTR) + ATTR(pack) +#endif +#ifdef FUNC_ATTR + ATTR(initialization) + ATTR(termination) + ATTR(outlined) +#endif diff --git a/src/mapleall/maple_ir/include/bin_mir_file.h b/src/mapleall/maple_ir/include/bin_mir_file.h new file mode 100644 index 0000000000000000000000000000000000000000..2cc010c8d37c9911083903289b6064051bda0507 --- /dev/null +++ b/src/mapleall/maple_ir/include/bin_mir_file.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_BIN_MIR_FILE_H +#define MAPLE_IR_INCLUDE_BIN_MIR_FILE_H +#include +#include "types_def.h" + +namespace maple { +const std::string kBinMirFileID = "HWCMPL"; // for magic in file header +constexpr uint8 kVersionMajor = 0; // experimental version +constexpr uint8 kVersionMinor = 1; +constexpr int kMagicSize = 7; + +enum BinMirFileType { + kMjsvmFileTypeCmplV1, + kMjsvmFileTypeCmpl, // kCmpl v2 is the release version of + kMjsvmFileTypeUnknown +}; + +inline uint8 MakeVersionNum(uint8 major, uint8 minor) { + uint8 mj = major & 0x0Fu; + uint8 mn = minor & 0x0Fu; + constexpr uint8 shiftNum = 4; + return (mj << shiftNum) | mn; +} + +// file header for binary format kMmpl, 8B in total +// Note the header is different with the specification +struct BinMIRFileHeader { + char magic[kMagicSize]; // “HWCMPL”, or "HWLOS_" + uint8 segNum; // number of segments (e.g. one raw IR file is a segment unit) + uint8 type; // enum of type of VM file (e.g. MapleIR, TE) + uint8 version; // version of IR format (should be major.minor) +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MIR_FILE_H diff --git a/src/mapleall/maple_ir/include/bin_mpl_export.h b/src/mapleall/maple_ir/include/bin_mpl_export.h new file mode 100644 index 0000000000000000000000000000000000000000..3f849583dd17eefcce0899245f2a6a02a159bd05 --- /dev/null +++ b/src/mapleall/maple_ir/include/bin_mpl_export.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_BIN_MPL_EXPORT_H +#define MAPLE_IR_INCLUDE_BIN_MPL_EXPORT_H +#include "mir_module.h" +#include "mir_nodes.h" +#include "mir_function.h" +#include "mir_preg.h" +#include "mir_enum.h" +#include "parser_opt.h" +#include "ea_connection_graph.h" + +namespace maple { +enum : uint8 { + kBinString = 1, + kBinUsrString = kBinString, + kBinInitConst = 2, + kBinSymbol = 3, + kBinFunction = 4, + kBinCallinfo = 5, + kBinKindTypeScalar = 6, + kBinKindTypeByName = 7, + kBinKindTypePointer = 8, + kBinKindTypeFArray = 9, + kBinKindTypeJarray = 10, + kBinKindTypeArray = 11, + kBinKindTypeFunction = 12, + kBinKindTypeParam = 13, + kBinKindTypeInstantVector = 14, + kBinKindTypeGenericInstant = 15, + kBinKindTypeBitField = 16, + kBinKindTypeStruct = 17, // for kTypeStruct, kTypeStructIncomplete and kTypeUnion + kBinKindTypeClass = 18, // for kTypeClass, and kTypeClassIncomplete + kBinKindTypeInterface = 19, // for kTypeInterface, and kTypeInterfaceIncomplete + kBinKindConstInt = 20, + kBinKindConstAddrof = 21, + kBinKindConstAddrofFunc = 22, + kBinKindConstStr = 23, + kBinKindConstStr16 = 24, + kBinKindConstFloat = 25, + kBinKindConstDouble = 26, + kBinKindConstAgg = 27, + kBinKindConstSt = 28, + kBinContentStart = 29, + kBinStrStart = 30, + kBinTypeStart = 31, + kBinCgStart = 32, + kBinSeStart = 33, + kBinFinish = 34, + kStartMethod = 35, + kBinEaCgNode = 36, + kBinEaCgActNode = 37, + kBinEaCgFieldNode = 38, + kBinEaCgRefNode = 39, + kBinEaCgObjNode = 40, + kBinEaCgStart = 41, + kBinEaStart = 42, + kBinNodeBlock = 43, + kBinEnumeration = 44, + kBinEnumStart = 45, + kBinReturnvals = 46, + kBinTypeTabStart = 47, + kBinSymStart = 48, + kBinSymTabStart = 49, + kBinFuncIdInfoStart = 50, + kBinFormalStart = 51, + kBinPreg = 52, + kBinSpecialReg = 53, + kBinLabel = 54, + kBinTypenameStart = 55, + kBinHeaderStart = 56, + kBinAliasMapStart = 57, +// kBinKindTypeViaTypename : 58, +// kBinKindSymViaSymname : 59, +// kBinKindFuncViaSymname : 60, + kBinFunctionBodyStart = 61, + kBinFormalWordsTypeTagged = 62, + kBinFormalWordsRefCounted = 63, + kBinLocalWordsTypeTagged = 64, + kBinLocalWordsRefCounter = 65, + kBinKindConstAddrofLabel = 66, + kBinKindConstAddrofLocal = 67, +}; + +// this value is used to check wether a file is a binary mplt file +constexpr int32 kMpltMagicNumber = 0xC0FFEE; +class BinaryMplExport { + public: + explicit BinaryMplExport(MIRModule &md); + virtual ~BinaryMplExport() = default; + + void Export(const std::string &fname, std::unordered_set *dumpFuncSet); + void WriteNum(int64 x); + void Write(uint8 b); + void OutputType(const TyIdx &tyIdx); + void WriteFunctionBodyField(uint64 contentIdx, std::unordered_set *dumpFuncSet); + void OutputConst(MIRConst *constVal); + void OutputConstBase(const MIRConst &constVal); + void OutputTypeBase(const MIRType &type); + void OutputTypePairs(const MIRInstantVectorType &type); + void OutputStr(const GStrIdx &gstr); + void OutputUsrStr(UStrIdx ustr); + void OutputTypeAttrs(const TypeAttrs &ta); + void OutputPragmaElement(const MIRPragmaElement &e); + void OutputPragma(const MIRPragma &p); + void OutputFieldPair(const FieldPair &fp); + void OutputMethodPair(const MethodPair &memPool); + void OutputFieldsOfStruct(const FieldVector &fields); + void OutputMethodsOfStruct(const MethodVector &methods); + void OutputStructTypeData(const MIRStructType &type); + void OutputImplementedInterfaces(const std::vector &interfaces); + void OutputInfoIsString(const std::vector &infoIsString); + void OutputInfo(const std::vector &info, const std::vector &infoIsString); + void OutputPragmaVec(const std::vector &pragmaVec); + void OutputClassTypeData(const MIRClassType &type); + void OutputSymbol(MIRSymbol *sym); + void OutputEnumeration(MIREnum *mirEnum); + void OutputFunction(PUIdx puIdx); + void OutputInterfaceTypeData(const MIRInterfaceType &type); + void OutputSrcPos(const SrcPosition &pos); + void OutputAliasMap(MapleMap &aliasVarMap); + void OutputInfoVector(const MIRInfoVector &infoVector, const MapleVector &infoVectorIsString); + void OutputFuncIdInfo(MIRFunction *func); + void OutputLocalSymbol(MIRSymbol *sym); + void OutputPreg(MIRPreg *preg); + void OutputLabel(LabelIdx lidx); + void OutputLocalTypeNameTab(const MIRTypeNameTable *typeNameTab); + void OutputFormalsStIdx(MIRFunction *func); + void OutputFuncViaSym(PUIdx puIdx); + void OutputExpression(BaseNode *e); + void OutputBaseNode(const BaseNode *b); + void OutputReturnValues(const CallReturnVector *retv); + void OutputBlockNode(BlockNode *block); + + const MIRModule &GetMIRModule() const { + return mod; + } + + bool not2mplt; // this export is not to an mplt file + MIRFunction *curFunc = nullptr; + + private: + using CallSite = std::pair; + void WriteEaField(const CallGraph &cg); + void WriteEaCgField(EAConnectionGraph *eaCg); + void OutEaCgNode(EACGBaseNode &node); + void OutEaCgBaseNode(const EACGBaseNode &node, bool firstPart); + void OutEaCgFieldNode(EACGFieldNode &field); + void OutEaCgRefNode(const EACGRefNode &ref); + void OutEaCgActNode(const EACGActualNode &act); + void OutEaCgObjNode(EACGObjectNode &obj); + void WriteCgField(uint64 contentIdx, const CallGraph *cg); + void WriteSeField(); + void OutputCallInfo(CallInfo &callInfo); + void WriteContentField4mplt(int fieldNum, uint64 *fieldStartP); + void WriteContentField4nonmplt(int fieldNum, uint64 *fieldStartP); + void WriteContentField4nonJava(int fieldNum, uint64 *fieldStartP); + void WriteStrField(uint64 contentIdx); + void WriteHeaderField(uint64 contentIdx); + void WriteTypeField(uint64 contentIdx, bool useClassList = true); + void Init(); + void WriteSymField(uint64 contentIdx); + void WriteInt(int32 x); + uint8 Read(); + int32 ReadInt(); + void WriteInt64(int64 x); + void WriteAsciiStr(const std::string &str); + void Fixup(size_t i, uint32 x); + void DumpBuf(const std::string &name); + void AppendAt(const std::string &name, int32 offset); + void ExpandFourBuffSize(); + void WriteEnumField(uint64 contentIdx); + + MIRModule &mod; + size_t bufI = 0; + std::vector buf; + std::unordered_map gStrMark; + std::unordered_map funcMark; + std::string importFileName; + std::unordered_map uStrMark; + std::unordered_map symMark; + std::unordered_map typMark; + std::unordered_map localSymMark; + std::unordered_map localPregMark; + std::unordered_map labelMark; + friend class UpdateMplt; + std::unordered_map callInfoMark; + std::map *func2SEMap = nullptr; + std::unordered_map eaNodeMark; + bool inIPA = false; + static int typeMarkOffset; // offset of mark (tag in binmplimport) resulting from duplicated function +}; + +class UpdateMplt { + public: + UpdateMplt() = default; + ~UpdateMplt() = default; + class ManualSideEffect { + public: + ManualSideEffect(std::string name, bool p, bool u, bool d, bool o, bool e) + : funcName(name), pure(p), defArg(u), def(d), object(o), exception(e) {}; + virtual ~ManualSideEffect() = default; + + const std::string &GetFuncName() const { + return funcName; + } + + bool GetPure() const { + return pure; + } + + bool GetDefArg() const { + return defArg; + } + + bool GetDef() const { + return def; + } + + bool GetObject() const { + return object; + } + + bool GetException() const { + return exception; + } + + bool GetPrivateUse() const { + return privateUse; + } + + bool GetPrivateDef() const { + return privateDef; + } + + private: + std::string funcName; + bool pure; + bool defArg; + bool def; + bool object; + bool exception; + bool privateUse = false; + bool privateDef = false; + }; + void UpdateCgField(BinaryMplt &binMplt, const CallGraph &cg) const; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MPL_EXPORT_H diff --git a/src/mapleall/maple_ir/include/bin_mpl_import.h b/src/mapleall/maple_ir/include/bin_mpl_import.h new file mode 100644 index 0000000000000000000000000000000000000000..bc3ba10fb8fbe9c3e4ae0330c8c438e3eee64e03 --- /dev/null +++ b/src/mapleall/maple_ir/include/bin_mpl_import.h @@ -0,0 +1,176 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_BIN_MPL_IMPORT_H +#define MAPLE_IR_INCLUDE_BIN_MPL_IMPORT_H +#include "mir_module.h" +#include "mir_nodes.h" +#include "mir_preg.h" +#include "parser_opt.h" +#include "mir_builder.h" +#include "ea_connection_graph.h" +namespace maple { +class BinaryMplImport { + public: + explicit BinaryMplImport(MIRModule &md) : mod(md), mirBuilder(&md) {} + BinaryMplImport &operator=(const BinaryMplImport&) = delete; + BinaryMplImport(const BinaryMplImport&) = delete; + + virtual ~BinaryMplImport() { + for (MIRStructType *structPtr : tmpStruct) { + delete structPtr; + } + for (MIRClassType *classPtr : tmpClass) { + delete classPtr; + } + for (MIRInterfaceType *interfacePtr : tmpInterface) { + delete interfacePtr; + } + } + + uint64 GetBufI() const { + return bufI; + } + void SetBufI(uint64 bufIVal) { + bufI = bufIVal; + } + + bool IsBufEmpty() const { + return buf.empty(); + } + size_t GetBufSize() const { + return buf.size(); + } + + int32 GetContent(int64 key) const { + return content.at(key); + } + + void SetImported(bool importedVal) { + imported = importedVal; + } + + bool Import(const std::string &fname, bool readSymbols = false, bool readSe = false); + bool ImportForSrcLang(const std::string &fname, MIRSrcLang &srcLang); + MIRSymbol *GetOrCreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mclass, MIRStorageClass sclass, + MIRFunction *func, uint8 scpID); + int32 ReadInt(); + int64 ReadNum(); + private: + void ReadContentField(); + void ReadStrField(); + void ReadHeaderField(); + void ReadTypeField(); + void ReadSymField(); + void ReadSymTabField(); + void ReadCgField(); + EAConnectionGraph *ReadEaCgField(); + void ReadEaField(); + EACGBaseNode &InEaCgNode(EAConnectionGraph &newEaCg); + void InEaCgBaseNode(EACGBaseNode &base, EAConnectionGraph &newEaCg, bool firstPart); + void InEaCgActNode(EACGActualNode &actual); + void InEaCgFieldNode(EACGFieldNode &field, EAConnectionGraph &newEaCg); + void InEaCgObjNode(EACGObjectNode &obj, EAConnectionGraph &newEaCg); + void InEaCgRefNode(EACGRefNode &ref); + CallInfo *ImportCallInfo(); + void MergeDuplicated(PUIdx methodPuidx, std::vector &targetSet, std::vector &newSet); + void ReadSeField(); + void Jump2NextField(); + void Reset(); + void SkipTotalSize(); + void ImportFieldsOfStructType(FieldVector &fields, uint32 methodSize); + MIRType &InsertInTypeTables(MIRType &ptype); + void InsertInHashTable(MIRType &type); + void SetupEHRootType(); + void UpdateMethodSymbols(); + void ImportConstBase(MIRConstKind &kind, MIRTypePtr &type); + MIRConst *ImportConst(MIRFunction *func); + GStrIdx ImportStr(); + UStrIdx ImportUsrStr(); + MIRType *CreateMirType(MIRTypeKind kind, GStrIdx strIdx, int64 tag) const; + MIRGenericInstantType *CreateMirGenericInstantType(GStrIdx strIdx) const; + MIRBitFieldType *CreateBitFieldType(uint8 fieldsize, PrimType pt, GStrIdx strIdx) const; + void CompleteAggInfo(TyIdx tyIdx); + TyIdx ImportType(bool forPointedType = false); + TyIdx ImportTypeNonJava(); + void ImportTypeBase(PrimType &primType, GStrIdx &strIdx, bool &nameIsLocal); + void InSymTypeTable(); + void ImportTypePairs(std::vector &insVecType); + TypeAttrs ImportTypeAttrs(); + MIRPragmaElement *ImportPragmaElement(); + MIRPragma *ImportPragma(); + void ImportFieldPair(FieldPair &fp); + void ImportMethodPair(MethodPair &memPool); + void ImportMethodsOfStructType(MethodVector &methods); + void ImportStructTypeData(MIRStructType &type); + void ImportInterfacesOfClassType(std::vector &interfaces); + void ImportInfoIsStringOfStructType(MIRStructType &type); + void ImportInfoOfStructType(MIRStructType &type); + void ImportPragmaOfStructType(MIRStructType &type); + void SetClassTyidxOfMethods(MIRStructType &type) const; + void ImportClassTypeData(MIRClassType &type); + void ImportInterfaceTypeData(MIRInterfaceType &type); + PUIdx ImportFunction(); + MIRSymbol *InSymbol(MIRFunction *func); + void ImportInfoVector(MIRInfoVector &infoVector, MapleVector &infoVectorIsString); + void ImportLocalTypeNameTable(MIRTypeNameTable *typeNameTab); + void ImportFuncIdInfo(MIRFunction *func); + void ImportEnumeration(); + MIRSymbol *ImportLocalSymbol(MIRFunction *func); + PregIdx ImportPreg(MIRFunction *func); + LabelIdx ImportLabel(MIRFunction *func); + void ImportFormalsStIdx(MIRFunction *func); + void ImportAliasMap(MIRFunction *func); + void ImportSrcPos(SrcPosition &pos); + void ImportBaseNode(Opcode &o, PrimType &typ); + PUIdx ImportFuncViaSym(MIRFunction *func); + BaseNode *ImportExpression(MIRFunction *func); + void ImportReturnValues(MIRFunction *func, CallReturnVector *retv); + BlockNode *ImportBlockNode(MIRFunction *func); + void ReadFunctionBodyField(); + void ReadEnumField(); + void ReadFileAt(const std::string &name, int32 offset); + uint8 Read(); + int64 ReadInt64(); + void ReadAsciiStr(std::string &str); + int32 GetIPAFileIndex(std::string &name); + + bool inCG = false; + bool inIPA = false; + bool imported = true; // used only by irbuild to convert to ascii + bool importingFromMplt = false; // decided based on magic number + uint64 bufI = 0; + std::vector buf; + std::map content; + MIRModule &mod; + MIRBuilder mirBuilder; + std::vector gStrTab; + std::vector uStrTab; + std::vector tmpStruct; + std::vector tmpClass; + std::vector tmpInterface; + std::vector typTab; + std::vector funcTab; + std::vector symTab; + std::vector localSymTab; + std::vector localPregTab; + std::vector localLabelTab; + std::vector callInfoTab; + std::vector eaCgTab; + std::vector methodSymbols; + std::vector definedLabels; + std::string importFileName; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MPL_IMPORT_H diff --git a/src/mapleall/maple_ir/include/bin_mplt.h b/src/mapleall/maple_ir/include/bin_mplt.h new file mode 100644 index 0000000000000000000000000000000000000000..e3e8359c1f9280775c68c1ae3c8b657172afdc7a --- /dev/null +++ b/src/mapleall/maple_ir/include/bin_mplt.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_BIN_MPLT_H +#define MAPLE_IR_INCLUDE_BIN_MPLT_H +#include "mir_module.h" +#include "mir_nodes.h" +#include "mir_preg.h" +#include "parser_opt.h" +#include "bin_mpl_export.h" +#include "bin_mpl_import.h" + +namespace maple { +class BinaryMplt { + public: + + explicit BinaryMplt(MIRModule &md) : mirModule(md), binImport(md), binExport(md) {} + + virtual ~BinaryMplt() = default; + + void Export(const std::string &suffix, std::unordered_set *dumpFuncSet = nullptr) { + binExport.Export(suffix, dumpFuncSet); + } + + bool Import(const std::string &modID, bool readCG = false, bool readSE = false) { + importFileName = modID; + return binImport.Import(modID, readCG, readSE); + } + + const MIRModule &GetMod() const { + return mirModule; + } + + BinaryMplImport &GetBinImport() { + return binImport; + } + + BinaryMplExport &GetBinExport() { + return binExport; + } + + std::string &GetImportFileName() { + return importFileName; + } + + void SetImportFileName(const std::string &fileName) { + importFileName = fileName; + } + + private: + MIRModule &mirModule; + BinaryMplImport binImport; + BinaryMplExport binExport; + std::string importFileName; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_BIN_MPLT_H diff --git a/src/mapleall/maple_ir/include/binary_op.def b/src/mapleall/maple_ir/include/binary_op.def new file mode 100644 index 0000000000000000000000000000000000000000..a4a3104c76aab3caf44328daab5f90844877eff4 --- /dev/null +++ b/src/mapleall/maple_ir/include/binary_op.def @@ -0,0 +1,43 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +BINARYOP(add) +BINARYOP(ashr) +BINARYOP(band) +BINARYOP(bior) +BINARYOP(bxor) +BINARYOP(cand) +BINARYOP(cior) +BINARYOP(cmp) +BINARYOP(cmpl) +BINARYOP(cmpg) +BINARYOP(div) +BINARYOP(eq) +BINARYOP(gt) +BINARYOP(land) +BINARYOP(lior) +BINARYOP(le) +BINARYOP(lshr) +BINARYOP(lt) +BINARYOP(max) +BINARYOP(min) +BINARYOP(mul) +BINARYOP(ne) +BINARYOP(ge) +BINARYOP(rem) +BINARYOP(shl) +BINARYOP(ror) +BINARYOP(sub) +BINARYOP(CG_array_elem_add) + diff --git a/src/mapleall/maple_ir/include/cfg_primitive_types.h b/src/mapleall/maple_ir/include/cfg_primitive_types.h new file mode 100644 index 0000000000000000000000000000000000000000..65f9ba78c5551e326e96d80244cf00ecac74d6f0 --- /dev/null +++ b/src/mapleall/maple_ir/include/cfg_primitive_types.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_CFG_PRIMITIVE_TYPES_H +#define MAPLE_IR_INCLUDE_CFG_PRIMITIVE_TYPES_H + +#include "types_def.h" + +namespace maple { +uint8 GetPointerSize(); // Circular include dependency with mir_type.h + +// Declaration of enum PrimType +#define LOAD_ALGO_PRIMARY_TYPE +enum PrimType { + PTY_begin, // PrimType begin +#define PRIMTYPE(P) PTY_##P, +#include "prim_types.def" + PTY_end, // PrimType end +#undef PRIMTYPE +}; + +constexpr PrimType kPtyInvalid = PTY_begin; +// just for test, no primitive type for derived SIMD types to be defined +constexpr PrimType kPtyDerived = PTY_end; + +struct PrimitiveTypeProperty { + PrimType type; + + PrimitiveTypeProperty(PrimType type, bool isInteger, bool isUnsigned, + bool isAddress, bool isFloat, bool isPointer, + bool isSimple, bool isDynamic, bool isDynamicAny, + bool isDynamicNone, bool isVector) : + type(type), isInteger(isInteger), isUnsigned(isUnsigned), + isAddress(isAddress), isFloat(isFloat), isPointer(isPointer), + isSimple(isSimple), isDynamic(isDynamic), isDynamicAny(isDynamicAny), + isDynamicNone(isDynamicNone), isVector(isVector) {} + + bool IsInteger() const { return isInteger; } + bool IsUnsigned() const { return isUnsigned; } + + bool IsAddress() const { + if (type == PTY_u64 || type == PTY_u32) { + if ((type == PTY_u64 && GetPointerSize() == 8) || + (type == PTY_u32 && GetPointerSize() == 4)) { + return true; + } else { + return false; + } + } else { + return isAddress; + } + } + + bool IsFloat() const { return isFloat; } + + bool IsPointer() const { + if (type == PTY_u64 || type == PTY_u32) { + if ((type == PTY_u64 && GetPointerSize() == 8) || + (type == PTY_u32 && GetPointerSize() == 4)) { + return true; + } else { + return false; + } + } else { + return isPointer; + } + } + + bool IsSimple() const { return isSimple; } + bool IsDynamic() const { return isDynamic; } + bool IsDynamicAny() const { return isDynamicAny; } + bool IsDynamicNone() const { return isDynamicNone; } + bool IsVector() const { return isVector; } + +private: + bool isInteger; + bool isUnsigned; + bool isAddress; + bool isFloat; + bool isPointer; + bool isSimple; + bool isDynamic; + bool isDynamicAny; + bool isDynamicNone; + bool isVector; +}; + +const PrimitiveTypeProperty &GetPrimitiveTypeProperty(PrimType pType); +} // namespace maple +#endif // MAPLE_IR_INCLUDE_CFG_PRIMITIVE_TYPES_H diff --git a/src/mapleall/maple_ir/include/cmpl.h b/src/mapleall/maple_ir/include/cmpl.h new file mode 100644 index 0000000000000000000000000000000000000000..4f3a80c36db1c55fc21b88ce40244936c46f807a --- /dev/null +++ b/src/mapleall/maple_ir/include/cmpl.h @@ -0,0 +1,349 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +// This define the lowest level MAPLE IR data structures that are compatible +// with both the C++ and C coding environments of MAPLE +#ifndef MAPLE_INCLUDE_VM_CMPL_V2 +#define MAPLE_INCLUDE_VM_CMPL_V2 +// Still need constant value from MIR +#include +#include "mir_config.h" +#include "types_def.h" +#include "opcodes.h" +#include "prim_types.h" +#include "intrinsics.h" +#include "mir_module.h" + +namespace maple { +extern char appArray[]; +constexpr uint32 kTwoBitVectors = 2; +struct MirFuncT { // 28B + uint16 frameSize; + uint16 upFormalSize; + uint16 moduleID; + uint32 funcSize; // size of code in words + uint8 *formalWordsTypetagged; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP + N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsTypetagged; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP - N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(frameSize) + uint8 *formalWordsRefCounted; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsRefCounted; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(frameSize) + // removed. label table size + // lbl2stmt table, removed; + // the first statement immediately follow MirFuncT + // since it starts with expression, BaseNodeT* is returned + void *FirstInst() const { + return reinterpret_cast(const_cast(this)) + sizeof(MirFuncT); + } + + // there are 4 bitvectors that follow the function code + uint32 FuncCodeSize() const { + return funcSize - (kTwoBitVectors * BlockSize2BitVectorSize(upFormalSize)) - + (kTwoBitVectors * BlockSize2BitVectorSize(frameSize)); + } +}; + +struct MirModuleT { + public: + MIRFlavor flavor; // should be kCmpl + MIRSrcLang srcLang; // the source language + uint16 id; + uint32 globalMemSize; // size of storage space for all global variables + uint8 *globalBlkMap; // the memory map of the block containing all the + // globals, for specifying static initializations + uint8 *globalWordsTypetagged; // bit vector where the Nth bit tells whether + // the Nth word in globalBlkMap has typetag; + // if yes, the typetag is the N+1th word; the + // bitvector's size is given by + // BlockSize2BitvectorSize(globalMemSize) + uint8 *globalWordsRefCounted; // bit vector where the Nth bit tells whether + // the Nth word points to a reference-counted + // dynamic memory block; the bitvector's size + // is given by BlockSize2BitvectorSize(globalMemSize) + PUIdx mainFuncID; // the entry function; 0 if no main function + uint32 numFuncs; // because puIdx 0 is reserved, numFuncs is also the highest puIdx + MirFuncT **funcs; // list of all funcs in the module. +#if 1 // the js2mpl buld always set HAVE_MMAP to 1 // binmir file mmap info + int binMirImageFd; // file handle for mmap +#endif // HAVE_MMAP + void *binMirImageStart; // binimage memory start + uint32 binMirImageLength; // binimage memory size + MirFuncT *FuncFromPuIdx(PUIdx puIdx) const { + MIR_ASSERT(puIdx <= numFuncs); // puIdx starts from 1 + return funcs[puIdx - 1]; + } + + MirModuleT() = default; + ~MirModuleT() = default; + MirFuncT *MainFunc() const { + return (mainFuncID == 0) ? static_cast(nullptr) : FuncFromPuIdx(mainFuncID); + } + + void SetCurFunction(MirFuncT *f) { + curFunction = f; + } + + MirFuncT *GetCurFunction() const { + return curFunction; + } + + MIRSrcLang GetSrcLang() const { + return srcLang; + } + + private: + MirFuncT *curFunction = nullptr; +}; + +// At this stage, MirConstT don't need all information in MIRConst +// Note: only be used within Constval node: +// Warning: it's different from full feature MIR. +// only support 32bit int const (lower 32bit). higher 32bit are tags +union MirIntConstT { + int64 value; + uint32 val[2]; // ARM target load/store 2 32bit val instead of 1 64bit +}; + +// currently in VM, only intconst are used. +using MirConstT = MirIntConstT; +// +// It's a stacking of POD data structure to allow precise memory layout +// control and emulate the inheritance relationship of corresponding C++ +// data structures to keep the interface consistent (as much as possible). +// +// Rule: +// 1. base struct should be the first member (to allow safe pointer casting) +// 2. each node (just ops, no data) should be of either 4B or 8B. +// 3. casting the node to proper base type to access base type's fields. +// +// Current memory layout of nodes follows the postfix notation: +// Each operand instruction is positioned immediately before its parent or +// next operand. Memory layout of sub-expressions tree is done recursively. +// E.g. the code for (a + b) contains 3 instructions, starting with the READ a, +// READ b, and then followed by ADD. +// For (a + (b - c)), it is: +// +// READ a +// READ b +// READ c +// SUB +// ADD +// +// BaseNodeT is an abstraction of expression. +struct BaseNodeT { // 4B + Opcode op; + PrimType ptyp; + uint8 typeFlag; // a flag to speed up type related operations in the VM + uint8 numOpnds; // only used for N-ary operators, switch and rangegoto + // operands immediately before each node + virtual size_t NumOpnds() const { + if (op == OP_switch || op == OP_rangegoto) { + return 1; + } + return numOpnds; + } + + virtual uint8 GetNumOpnds() const { + return numOpnds; + } + virtual void SetNumOpnds(uint8 num) { + numOpnds = num; + } + + virtual Opcode GetOpCode() const { + return op; + } + + virtual void SetOpCode(Opcode o) { + op = o; + } + + virtual PrimType GetPrimType() const { + return ptyp; + } + + virtual void SetPrimType(PrimType type) { + ptyp = type; + } + + BaseNodeT() : op(OP_undef), ptyp(kPtyInvalid), typeFlag(0), numOpnds(0) {} + + virtual ~BaseNodeT() = default; +}; + +// typeFlag is a 8bit flag to provide short-cut information for its +// associated PrimType, because many type related information extraction +// is not very lightweight. +// Here is the convention: +// | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | +// dyn f i sc c (log2(size)) +// +// bit 0 - bit 3 is for type size information. (now not used in VM?) +// bit 0-2 represents the size of concrete types (not void/aggregate) +// it's the result of log2 operation on the real size to fit in 3 bits. +// which has the following correspondence: +// | 2 | 1 | 0 | type size (in Bytes) +// 0 0 0 1 +// 0 0 1 2 +// 0 1 0 4 +// 0 1 1 8 +// 1 0 0 16 +// +// bit 3 is the flag of "concrete types", i.e., types we know the type +// details. +// when it's 1, the bit0-2 size are valid +// when it's 0, the size of the type is 0, and bit0-2 are meaningless. +// +// bit 4 is for scalar types (1 if it's a scalar type) +// bit 5 is for integer types (1 if it's an integer type) +// bit 6 is for floating types (1 if it's a floating type) +// bit 7 is for dynamic types (1 if it's a dynamic type) +// +// refer to mirtypes.h/mirtypes.cpp in maple_ir directory for more information. +const int32 kTypeflagZero = 0x00; +const int32 kTypeflagDynMask = 0x80; +const int32 kTypeflagFloatMask = 0x40; +const int32 kTypeflagIntergerMask = 0x20; +const int32 kTypeflagScalarMask = 0x10; +const int32 kTypeflagConcreteMask = 0x08; +const int32 kTypeflagSizeMask = 0x07; +const int32 kTypeflagDynFloatMask = (kTypeflagDynMask | kTypeflagFloatMask); +const int32 kTypeflagDynIntergerMask = (kTypeflagDynMask | kTypeflagIntergerMask); +inline bool IsDynType(uint8 typeFlag) { + return ((typeFlag & kTypeflagDynMask) != kTypeflagZero); +} + +inline bool IsDynFloat(uint8 typeFlag) { + return ((typeFlag & kTypeflagDynFloatMask) == kTypeflagDynFloatMask); +} + +inline bool IsDynInteger(uint8 typeFlag) { + return ((typeFlag & kTypeflagDynIntergerMask) == kTypeflagDynIntergerMask); +} + +// IsFloat means "is statically floating types", i.e., float, but not dynamic +inline bool IsFloat(uint8 typeFlag) { + return ((typeFlag & kTypeflagDynFloatMask) == kTypeflagFloatMask); +} + +inline bool IsScalarType(uint8 typeFlag) { + return ((typeFlag & kTypeflagScalarMask) != kTypeflagZero); +} + +inline Opcode GetOpcode(const BaseNodeT &nodePtr) { + return nodePtr.op; +} + +inline PrimType GetPrimType(const BaseNodeT &nodePtr) { + return nodePtr.ptyp; +} + +inline uint32 GetOperandsNum(const BaseNodeT &nodePtr) { + return nodePtr.numOpnds; +} + +using UnaryNodeT = BaseNodeT; // alias +struct TypecvtNodeT : public BaseNodeT { // 8B + PrimType fromPTyp; + uint8 fromTypeFlag; // a flag to speed up type related operations + uint8 padding[2]; + PrimType FromType() const { + return fromPTyp; + } +}; + +struct ExtractbitsNodeT : public BaseNodeT { // 8B + uint8 bOffset; + uint8 bSize; + uint16 padding; +}; + +struct IreadoffNodeT : public BaseNodeT { // 8B + int32 offset; +}; + +using BinaryNodeT = BaseNodeT; +// Add expression types to compare node, to +// facilitate the evaluation of postorder stored kCmpl +// Note: the two operands should have the same type if they're +// not dynamic types +struct CompareNodeT : public BaseNodeT { // 8B + PrimType opndType; // type of operands. + uint8 opndTypeFlag; // typeFlag of opntype. + uint8 padding[2]; // every compare node has two opnds. +}; + +using TernaryNodeT = BaseNodeT; +using NaryNodeT = BaseNodeT; +// need to guarantee MIRIntrinsicID is 4B +// Note: this is not supported by c++0x +struct IntrinsicopNodeT : public BaseNodeT { // 8B + MIRIntrinsicID intrinsic; +}; + +struct ConstvalNodeT : public BaseNodeT { // 4B + 8B const value + MirConstT *Constval() const { + auto *tempPtr = const_cast(this); + return (reinterpret_cast(reinterpret_cast(tempPtr) + sizeof(ConstvalNodeT))); + } +}; + +// full MIR exported a pointer to MirConstT +inline MirConstT *GetConstval(const ConstvalNodeT &node) { + return node.Constval(); +} + +// SizeoftypeNode shouldn't be seen here +// ArrayNode shouldn't be seen here +struct AddrofNodeT : public BaseNodeT { // 12B + StIdx stIdx; + FieldID fieldID; +}; + +using DreadNodeT = AddrofNodeT; // same shape. +struct AddroffuncNodeT : public BaseNodeT { // 8B + PUIdx puIdx; // 32bit now +}; + +struct RegreadNodeT : public BaseNodeT { // 8B + PregIdx regIdx; // 32bit, negative if special register +}; + +struct AddroflabelNodeT : public BaseNodeT { // 8B + uint32 offset; +}; +} // namespace maple +#endif // MAPLE_INCLUDE_VM_CMPL_V2 diff --git a/src/mapleall/maple_ir/include/debug_info.h b/src/mapleall/maple_ir/include/debug_info.h new file mode 100644 index 0000000000000000000000000000000000000000..937d18a2131423ab886ff7e2f9640d8524851075 --- /dev/null +++ b/src/mapleall/maple_ir/include/debug_info.h @@ -0,0 +1,857 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#ifndef MAPLE_IR_INCLUDE_DBG_INFO_H +#define MAPLE_IR_INCLUDE_DBG_INFO_H +#include + +#include "mpl_logging.h" +#include "types_def.h" +#include "prim_types.h" +#include "mir_nodes.h" +#include "mir_scope.h" +#include "namemangler.h" +#include "lexer.h" +#include "Dwarf.h" + +namespace maple { +// for more color code: http://ascii-table.com/ansi-escape-sequences.php +#define RESET "\x1B[0m" +#define BOLD "\x1B[1m" +#define RED "\x1B[31m" +#define GRN "\x1B[32m" +#define YEL "\x1B[33m" + +const uint32 kDbgDefaultVal = 0xdeadbeef; +#define HEX(val) std::hex << "0x" << (val) << std::dec + +class MIRModule; +class MIRType; +class MIRSymbol; +class MIRSymbolTable; +class MIRTypeNameTable; +class DBGBuilder; +class DBGCompileMsgInfo; +class MIRLexer; +class MIREnum; + +// for compiletime warnings +class DBGLine { + public: + DBGLine(uint32 lnum, const char *l) : lineNum(lnum), codeLine(l) {} + virtual ~DBGLine() {} + + void Dump() const { + LogInfo::MapleLogger() << "LINE: " << lineNum << " " << codeLine << std::endl; + } + + private: + uint32 lineNum; + const char *codeLine; +}; + +#define MAXLINELEN 4096 + +class DBGCompileMsgInfo { + public: + DBGCompileMsgInfo(); + virtual ~DBGCompileMsgInfo() {} + void ClearLine(uint32 n); + void SetErrPos(uint32 lnum, uint32 cnum); + void UpdateMsg(uint32 lnum, const char *line); + void EmitMsg(); + + private: + uint32 startLine; // mod 3 + uint32 errLNum; + uint32 errCNum; + uint32 errPos; + uint32 lineNum[3]; + uint8 codeLine[3][MAXLINELEN]; // 3 round-robin line buffers +}; + +enum DBGDieKind { kDwTag, kDwAt, kDwOp, kDwAte, kDwForm, kDwCfa }; + +using DwTag = uint32; // for DW_TAG_* +using DwAt = uint32; // for DW_AT_* +using DwOp = uint32; // for DW_OP_* +using DwAte = uint32; // for DW_ATE_* +using DwForm = uint32; // for DW_FORM_* +using DwCfa = uint32; // for DW_CFA_* + +class DBGDieAttr; + +class DBGExpr { + public: + explicit DBGExpr(MIRModule *m) : dwOp(0), value(kDbgDefaultVal), opnds(m->GetMPAllocator().Adapter()) {} + + DBGExpr(MIRModule *m, DwOp op) : dwOp(op), value(kDbgDefaultVal), opnds(m->GetMPAllocator().Adapter()) {} + + virtual ~DBGExpr() {} + + void AddOpnd(uint64 val) { + opnds.push_back(val); + } + + int GetVal() const { + return value; + } + + void SetVal(int v) { + value = v; + } + + DwOp GetDwOp() const { + return dwOp; + } + + void SetDwOp(DwOp op) { + dwOp = op; + } + + MapleVector &GetOpnd() { + return opnds; + } + + size_t GetOpndSize() const { + return opnds.size(); + } + + void Clear() { + return opnds.clear(); + } + + private: + DwOp dwOp; + // for local var fboffset, global var strIdx + int value; + MapleVector opnds; +}; + +class DBGExprLoc { + public: + explicit DBGExprLoc(MIRModule *m) : module(m), exprVec(m->GetMPAllocator().Adapter()), symLoc(nullptr) { + simpLoc = m->GetMemPool()->New(module); + } + + DBGExprLoc(MIRModule *m, DwOp op) : module(m), exprVec(m->GetMPAllocator().Adapter()), symLoc(nullptr) { + simpLoc = m->GetMemPool()->New(module, op); + } + + virtual ~DBGExprLoc() {} + + bool IsSimp() const { + return (exprVec.size() == 0 && simpLoc->GetVal() != static_cast(kDbgDefaultVal)); + } + + int GetFboffset() const { + return simpLoc->GetVal(); + } + + void SetFboffset(int offset) { + simpLoc->SetVal(offset); + } + + int GetGvarStridx() const { + return simpLoc->GetVal(); + } + + void SetGvarStridx(int idx) { + simpLoc->SetVal(idx); + } + + DwOp GetOp() const { + return simpLoc->GetDwOp(); + } + + uint32 GetSize() const { + return static_cast(simpLoc->GetOpndSize()); + } + + void ClearOpnd() { + simpLoc->Clear(); + } + + void AddSimpLocOpnd(uint64 val) { + simpLoc->AddOpnd(val); + } + + DBGExpr *GetSimpLoc() const { + return simpLoc; + } + + void *GetSymLoc() { + return symLoc; + } + + void SetSymLoc(void *loc) { + symLoc = loc; + } + + void Dump() const; + + private: + MIRModule *module; + DBGExpr *simpLoc; + MapleVector exprVec; + void *symLoc; +}; + +class DBGDieAttr { + public: + explicit DBGDieAttr(DBGDieKind k) : dieKind(k), dwAttr(DW_AT_deleted), dwForm(DW_FORM_GNU_strp_alt) { + value.u = kDbgDefaultVal; + } + + virtual ~DBGDieAttr() = default; + + size_t SizeOf(DBGDieAttr *attr) const; + + void AddSimpLocOpnd(uint64 val) { + value.ptr->AddSimpLocOpnd(val); + } + + void ClearSimpLocOpnd() { + value.ptr->ClearOpnd(); + } + + void Dump(int indent); + + DBGDieKind GetKind() const { + return dieKind; + } + + void SetKind(DBGDieKind kind) { + dieKind = kind; + } + + DwAt GetDwAt() const { + return dwAttr; + } + + void SetDwAt(DwAt at) { + dwAttr = at; + } + + DwForm GetDwForm() const { + return dwForm; + } + + void SetDwForm(DwForm form) { + dwForm = form; + } + + int32 GetI() const { + return value.i; + } + + void SetI(int32 val) { + value.i = val; + } + + uint32 GetId() const { + return value.id; + } + + void SetId(uint32 val) { + value.id = val; + } + + int64 GetJ() const { + return value.j; + } + + void SetJ(int64 val) { + value.j = val; + } + + uint64 GetU() const { + return value.u; + } + + void SetU(uint64 val) { + value.u = val; + } + + float GetF() const { + return value.f; + } + + void SetF(float val) { + value.f = val; + } + + double GetD() const { + return value.d; + } + + void SetD(double val) { + value.d = val; + } + + DBGExprLoc *GetPtr() { + return value.ptr; + } + + void SetPtr(DBGExprLoc *val) { + value.ptr = val; + } + + void SetKeep(bool flag) { + keep = flag; + } + + bool GetKeep() const { + return keep; + } + + private: + DBGDieKind dieKind; + DwAt dwAttr; + DwForm dwForm; // type for the attribute value + union { + int32 i; + uint32 id; // dieId when dwForm is of DW_FORM_ref + // strIdx when dwForm is of DW_FORM_string + int64 j; + uint64 u; + float f; + double d; + + DBGExprLoc *ptr; + } value; + bool keep = true; +}; + +class DBGDie { + public: + DBGDie(MIRModule *m, DwTag tag); + virtual ~DBGDie() {} + void AddSubVec(DBGDie *die); + void AddAttr(DBGDieAttr *attr); + void AddAttr(DwAt at, DwForm form, uint64 val, bool keep = true); + void AddSimpLocAttr(DwAt at, DwForm form, DwOp op, uint64 val); + void AddGlobalLocAttr(DwAt at, DwForm form, uint64 val); + void AddFrmBaseAttr(DwAt at, DwForm form); + DBGExprLoc *GetExprLoc(); + bool SetAttr(DwAt attr, uint64 val); + bool SetAttr(DwAt attr, int64 val); + bool SetAttr(DwAt attr, uint32 val); + bool SetAttr(DwAt attr, int32 val); + bool SetAttr(DwAt attr, float val); + bool SetAttr(DwAt attr, double val); + bool SetAttr(DwAt attr, DBGExprLoc *ptr); + bool SetSimpLocAttr(DwAt attr, int64 val); + void ResetParentDie() const; + void Dump(int indent); + + uint32 GetId() const { + return id; + } + + void SetId(uint32 val) { + id = val; + } + + DwTag GetTag() const { + return tag; + } + + void SetTag(DwTag val) { + tag = val; + } + + bool GetWithChildren() const { + return withChildren; + } + + void SetWithChildren(bool val) { + withChildren = val; + } + + bool GetKeep() const { + return keep; + } + + void SetKeep(bool val) { + keep = val; + } + + DBGDie *GetParent() const { + return parent; + } + + void SetParent(DBGDie *val) { + parent = val; + } + + DBGDie *GetSibling() const { + return sibling; + } + + void SetSibling(DBGDie *val) { + sibling = val; + } + + DBGDie *GetFirstChild() const { + return firstChild; + } + + void SetFirstChild(DBGDie *val) { + firstChild = val; + } + + uint32 GetAbbrevId() const { + return abbrevId; + } + + void SetAbbrevId(uint32 val) { + abbrevId = val; + } + + uint32 GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(uint32 val) { + tyIdx = val; + } + + uint32 GetOffset() const { + return offset; + } + + void SetOffset(uint32 val) { + offset = val; + } + + uint32 GetSize() const { + return size; + } + + void SetSize(uint32 val) { + size = val; + } + + const MapleVector &GetAttrVec() const { + return attrVec; + } + + MapleVector &GetAttrVec() { + return attrVec; + } + + const MapleVector &GetSubDieVec() const { + return subDieVec; + } + + MapleVector &GetSubDieVec() { + return subDieVec; + } + + uint32 GetSubDieVecSize() const { + return static_cast(subDieVec.size()); + } + + DBGDie *GetSubDieVecAt(uint32 i) const { + return subDieVec[i]; + } + + // link ExprLoc to die's + void LinkExprLoc(DBGDie *die) { + for (auto &at : attrVec) { + if (at->GetDwAt() == DW_AT_location) { + DBGExprLoc *loc = die->GetExprLoc(); + at->SetPtr(loc); + } + } + } + + private: + MIRModule *module; + DwTag tag; + uint32 id; // starts from 1 which is root die compUnit + bool withChildren; + bool keep; // whether emit into .s + DBGDie *parent; + DBGDie *sibling; + DBGDie *firstChild; + uint32 abbrevId; // id in .debug_abbrev + uint32 tyIdx; // for type TAG + uint32 offset; // Dwarf CU relative offset + uint32 size; // DIE Size in .debug_info + MapleVector attrVec; + MapleVector subDieVec; +}; + +class DBGAbbrevEntry { + public: + DBGAbbrevEntry(MIRModule *m, DBGDie *die); + virtual ~DBGAbbrevEntry() {} + bool Equalto(DBGAbbrevEntry *entry); + void Dump(int indent); + + DwTag GetTag() const { + return tag; + } + + void SetTag(DwTag val) { + tag = val; + } + + uint32 GetAbbrevId() const { + return abbrevId; + } + + void SetAbbrevId(uint32 val) { + abbrevId = val; + } + + bool GetWithChildren() const { + return withChildren; + } + + void SetWithChildren(bool val) { + withChildren = val; + } + + MapleVector &GetAttrPairs() { + return attrPairs; + } + + private: + DwTag tag; + uint32 abbrevId; + bool withChildren; + MapleVector attrPairs; // kDwAt kDwForm pairs +}; + +class DBGAbbrevEntryVec { + public: + DBGAbbrevEntryVec(MIRModule *m, DwTag tag) : tag(tag), entryVec(m->GetMPAllocator().Adapter()) {} + + virtual ~DBGAbbrevEntryVec() {} + + uint32 GetId(MapleVector &attrs); + void Dump(int indent); + + DwTag GetTag() const { + return tag; + } + + void SetTag(DwTag val) { + tag = val; + } + + const MapleVector &GetEntryvec() const { + return entryVec; + } + + MapleVector &GetEntryvec() { + return entryVec; + } + + private: + DwTag tag; + MapleVector entryVec; +}; + +struct ScopePos { + uint32 id; + SrcPosition pos; +}; + +enum EmitStatus : uint8 { + kBeginEmited = 0, + kEndEmited = 1, +}; +constexpr uint8 kAllEmited = 3; + +class DebugInfo { + public: + explicit DebugInfo(MIRModule *m) + : module(m), + compUnit(nullptr), + dummyTypeDie(nullptr), + lexer(nullptr), + maxId(1), + mplSrcIdx(0), + debugInfoLength(0), + curFunction(nullptr), + compileMsg(nullptr), + parentDieStack(m->GetMPAllocator().Adapter()), + idDieMap(std::less(), m->GetMPAllocator().Adapter()), + abbrevVec(m->GetMPAllocator().Adapter()), + tagAbbrevMap(std::less(), m->GetMPAllocator().Adapter()), + baseTypeMap(std::less(), m->GetMPAllocator().Adapter()), + tyIdxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + stridxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + globalStridxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + funcDefStrIdxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + typeDefTyIdxMap(std::less(), m->GetMPAllocator().Adapter()), + pointedPointerMap(std::less(), m->GetMPAllocator().Adapter()), + funcLstrIdxDieIdMap(std::less(), m->GetMPAllocator().Adapter()), + funcLstrIdxLabIdxMap(std::less(), m->GetMPAllocator().Adapter()), + funcScopeLows(std::less(), m->GetMPAllocator().Adapter()), + funcScopeHighs(std::less(), m->GetMPAllocator().Adapter()), + funcScopeIdStatus(std::less(), m->GetMPAllocator().Adapter()), + globalTypeAliasMap(std::less(), m->GetMPAllocator().Adapter()), + constTypeDieMap(std::less(), m->GetMPAllocator().Adapter()), + volatileTypeDieMap(std::less(), m->GetMPAllocator().Adapter()), + strps(std::less(), m->GetMPAllocator().Adapter()) { + /* valid entry starting from index 1 as abbrevid starting from 1 as well */ + abbrevVec.push_back(nullptr); + InitMsg(); + varPtrPrefix = std::string(namemangler::kPtrPrefixStr); + } + + virtual ~DebugInfo() {} + + void BuildDebugInfo(); + void ClearDebugInfo(); + void Dump(int indent); + + DBGDie *GetDie(const MIRFunction *func); + DBGDie *GetLocalDie(MIRFunction *func, GStrIdx strIdx); + DBGDieAttr *CreateAttr(DwAt at, DwForm form, uint64 val) const; + + bool IsScopeIdEmited(MIRFunction *func, uint32 scopeId); + + void GetCrossScopeId(MIRFunction *func, + std::unordered_set &idSet, + bool isLow, + const SrcPosition &oldSrcPos, + const SrcPosition &newSrcPos); + + void ComputeSizeAndOffsets(); + + DBGDie *GetDie(uint32 id) { + return idDieMap[id]; + } + + void SetIdDieMap(uint32 i, DBGDie *die) { + idDieMap[i] = die; + } + + DBGDie *GetDummyTypeDie() { + return dummyTypeDie; + } + + DBGDie *GetParentDie() { + return parentDieStack.top(); + } + + void ResetParentDie() { + parentDieStack.clear(); + parentDieStack.push(compUnit); + } + + uint32 GetDebugInfoLength() const { + return debugInfoLength; + } + + void SetFuncScopeIdStatus(MIRFunction *func, uint32 scopeId, EmitStatus status) { + if (funcScopeIdStatus[func].find(scopeId) == funcScopeIdStatus[func].end()) { + funcScopeIdStatus[func][scopeId] = 0; + } + funcScopeIdStatus[func][scopeId] |= (1U << status); + } + + MapleVector &GetAbbrevVec() { + return abbrevVec; + } + + void AddStrps(uint32 val) { + strps.insert(val); + } + + MapleSet &GetStrps() { + return strps; + } + + uint32 GetMaxId() const { + return maxId; + } + + uint32 GetIncMaxId() { + return maxId++; + } + + DBGDie *GetCompUnit() const { + return compUnit; + } + + size_t GetParentDieSize() const { + return parentDieStack.size(); + } + + void SetErrPos(uint32 lnum, uint32 cnum) { + compileMsg->SetErrPos(lnum, cnum); + } + + void UpdateMsg(uint32 lnum, const char *line) { + compileMsg->UpdateMsg(lnum, line); + } + + void EmitMsg() { + compileMsg->EmitMsg(); + } + + private: + MIRModule *module; + DBGDie *compUnit; // root die: compilation unit + DBGDie *dummyTypeDie; // workaround for unknown types + MIRLexer *lexer; + uint32 maxId; + GStrIdx mplSrcIdx; + uint32 debugInfoLength; + MIRFunction *curFunction; + + // for compilation messages + DBGCompileMsgInfo *compileMsg; + + MapleStack parentDieStack; + MapleMap idDieMap; + MapleVector abbrevVec; // valid entry starting from index 1 + MapleMap tagAbbrevMap; + MapleMap> baseTypeMap; // baseTypeMap: + + // to be used when derived type references a base type die + MapleMap tyIdxDieIdMap; + MapleMap stridxDieIdMap; + // save global var die, global var string idx to die id + MapleMap globalStridxDieIdMap; + MapleMap funcDefStrIdxDieIdMap; + MapleMap typeDefTyIdxMap; // prevtyIdxtypidx_map + MapleMap pointedPointerMap; + MapleMap> funcLstrIdxDieIdMap; + MapleMap> funcLstrIdxLabIdxMap; + + MapleMap> funcScopeLows; + MapleMap> funcScopeHighs; + + /* save functions's scope id that has been emited */ + MapleMap> funcScopeIdStatus; + + /* alias type */ + MapleMap globalTypeAliasMap; + MapleMap constTypeDieMap; + MapleMap volatileTypeDieMap; + + MapleSet strps; + std::string varPtrPrefix; + + void InitMsg() { + compileMsg = module->GetMemPool()->New(); + } + + void Init(); + void InitBaseTypeMap(); + void Finish(); + void SetupCU(); + + void BuildDebugInfoEnums(); + void BuildDebugInfoContainers(); + void BuildDebugInfoGlobalSymbols(); + void BuildDebugInfoFunctions(); + + // build tree to populate withChildren, sibling, firstChild + // also insert DW_AT_sibling attributes when needed + void BuildDieTree(); + + void BuildAbbrev(); + + uint32 GetAbbrevId(DBGAbbrevEntryVec *vec, DBGAbbrevEntry *entry) const; + + DBGDie *GetGlobalDie(const GStrIdx &strIdx); + + void SetLocalDie(GStrIdx strIdx, const DBGDie *die); + void SetLocalDie(MIRFunction *func, GStrIdx strIdx, const DBGDie *die); + DBGDie *GetLocalDie(GStrIdx strIdx); + + LabelIdx GetLabelIdx(GStrIdx strIdx); + LabelIdx GetLabelIdx(MIRFunction *func, GStrIdx strIdx); + void SetLabelIdx(const GStrIdx &strIdx, LabelIdx labIdx); + void SetLabelIdx(MIRFunction *func, const GStrIdx &strIdx, LabelIdx labIdx); + void InsertBaseTypeMap(const std::string &inputName, const std::string &outpuName, PrimType type); + + DBGDie *GetIdDieMapAt(uint32 i) { + return idDieMap[i]; + } + + void PushParentDie(DBGDie *die) { + parentDieStack.push(die); + } + + void PopParentDie() { + parentDieStack.pop(); + } + + MIRFunction *GetCurFunction() { + return curFunction; + } + + void SetCurFunction(MIRFunction *func) { + curFunction = func; + } + + void SetTyidxDieIdMap(const TyIdx tyIdx, const DBGDie *die) { + tyIdxDieIdMap[tyIdx.GetIdx()] = die->GetId(); + } + + DBGDie *CreateVarDie(MIRSymbol *sym); + DBGDie *CreateVarDie(MIRSymbol *sym, const GStrIdx &strIdx); // use alt name + DBGDie *CreateFormalParaDie(MIRFunction *func, uint32 idx, bool isDef); + DBGDie *CreateFieldDie(maple::FieldPair pair); + DBGDie *CreateBitfieldDie(const MIRBitFieldType *type, const GStrIdx &sidx, uint32 &prevBits); + void CreateStructTypeFieldsDies(const MIRStructType *structType, DBGDie *die); + void CreateStructTypeParentFieldsDies(const MIRStructType *structType, DBGDie *die); + void CreateStructTypeMethodsDies(const MIRStructType *structType, DBGDie *die); + DBGDie *CreateStructTypeDie(GStrIdx strIdx, const MIRStructType *structType, bool update = false); + DBGDie *CreateClassTypeDie(const GStrIdx &strIdx, const MIRClassType *classType); + DBGDie *CreateInterfaceTypeDie(const GStrIdx &strIdx, const MIRInterfaceType *interfaceType); + DBGDie *CreatePointedFuncTypeDie(MIRFuncType *fType); + void CreateFuncLocalSymbolsDies(MIRFunction *func, DBGDie *die); + + DBGDie *GetOrCreateLabelDie(LabelIdx labid); + DBGDie *GetOrCreateFuncDeclDie(MIRFunction *func); + DBGDie *GetOrCreateFuncDefDie(MIRFunction *func); + DBGDie *GetOrCreatePrimTypeDie(MIRType *ty); + DBGDie *GetOrCreateBaseTypeDie(const MIRType *type); + DBGDie *GetOrCreateTypeDie(TyIdx tyidx); + DBGDie *GetOrCreateTypeDie(MIRType *type); + DBGDie *GetOrCreateTypeDieWithAttr(AttrKind attr, DBGDie *typeDie); + DBGDie *GetOrCreateTypeDieWithAttr(TypeAttrs attrs, DBGDie *typeDie); + DBGDie *GetOrCreatePointTypeDie(const MIRPtrType *ptrType); + DBGDie *GetOrCreateArrayTypeDie(const MIRArrayType *arrayType); + DBGDie *GetOrCreateStructTypeDie(const MIRType *type); + DBGDie *GetOrCreateTypedefDie(GStrIdx stridx, TyIdx tyidx); + DBGDie *GetOrCreateEnumTypeDie(uint32 idx); + DBGDie *GetOrCreateEnumTypeDie(MIREnum *mirEnum); + DBGDie *GetOrCreateTypeByNameDie(const MIRType &type); + + GStrIdx GetPrimTypeCName(PrimType pty); + + void AddScopeDie(MIRScope *scope); + DBGDie *GetAliasVarTypeDie(const MIRAliasVars &aliasVar, TyIdx tyidx); + void HandleTypeAlias(MIRScope &scope); + void AddAliasDies(MIRScope &scope, bool isLocal); + void CollectScopePos(MIRFunction *func, MIRScope *scope); + + // Functions for calculating the size and offset of each DW_TAG_xxx and DW_AT_xxx + void ComputeSizeAndOffset(DBGDie *die, uint32 &cuOffset); +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_DBG_INFO_H diff --git a/src/mapleall/maple_ir/include/dex2mpl/dexintrinsic.def b/src/mapleall/maple_ir/include/dex2mpl/dexintrinsic.def new file mode 100644 index 0000000000000000000000000000000000000000..cfa6016f0c175e433a07da9b661bfd3c28911ad6 --- /dev/null +++ b/src/mapleall/maple_ir/include/dex2mpl/dexintrinsic.def @@ -0,0 +1,17 @@ +DEF_MIR_INTRINSIC(JAVA_INTERFACE_CALL,\ + "__dex_interface_call", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_PRINT,\ + "printf", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK_SGET,\ + "__dex_clinit_check_sget", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK_SPUT,\ + "__dex__clinit_check_sput", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK_NEW,\ + "__dex_clinit_check_new", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_STR_TO_JSTR,\ + "__dex_str_to_jstr", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +// __dex_random is used to generate a random value used in callback cfg +DEF_MIR_INTRINSIC(JAVA_RANDOM,\ + "__dex_random", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_THROW_CLASSCAST,\ + "MCC_ThrowClassCastException", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNEVERRETURN, kArgTyVoid, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) diff --git a/src/mapleall/maple_ir/include/func_desc.h b/src/mapleall/maple_ir/include/func_desc.h new file mode 100644 index 0000000000000000000000000000000000000000..3cea54fb68613884d45569e81d82b9c785f3e5cf --- /dev/null +++ b/src/mapleall/maple_ir/include/func_desc.h @@ -0,0 +1,205 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_FUNC_DESC_H +#define MAPLE_IR_INCLUDE_FUNC_DESC_H +#include "mpl_logging.h" +namespace maple { + +enum class FI { + kUnknown = 0, + kPure, // means this function will not modify any global memory. + kConst, // means this function will not read/modify any global memory. +}; + +static std::string kFIStr[] = { + "kUnknown", "kPure", "kConst" +}; + +enum class RI { + kUnknown = 0, // for ptr value, don't know anything. + kNoAlias, // for ptr value, no alias with any other ptr when this method is returned. As in malloc. + kAliasParam0, // for ptr value, it may alias with first param. As in memcpy. + kAliasParam1, + kAliasParam2, + kAliasParam3, + kAliasParam4, + kAliasParam5, +}; + +static std::string kRIStr[] = { + "kUnknown", + "kNoAlias", + "kAliasParam0", + "kAliasParam1", + "kAliasParam2", + "kAliasParam3", + "kAliasParam4", + "kAliasParam5" +}; + +enum class PI { + kUnknown = 0, // for ptr param, may read/write every level memory. + kReadWriteMemory, // for ptr param, only read & write the memory it points to. + kWriteMemoryOnly, // for ptr param, only write the memory it points to. + kReadMemoryOnly, // for ptr param, only read the memory it points to. + kReadSelfOnly, // for ptr param, only read the ptr itself, do not dereference. + kUnused, // this param is not used in this function. +}; + +static std::string kPIStr[] = { + "kUnknown", + "kReadWriteMemory", + "kWriteMemoryOnly", + "kReadMemoryOnly", + "kReadSelfOnly", + "kUnused" +}; + +// most function has less than 6 parameters. +const size_t kMaxParamCount = 6; +struct FuncDesc { + FI funcInfo{}; + RI returnInfo{}; + PI paramInfo[kMaxParamCount]{}; + bool configed = false; + + void InitToBest() { + funcInfo = FI::kConst; + returnInfo = RI::kNoAlias; + for (size_t idx = 0; idx < kMaxParamCount; ++idx) { + paramInfo[idx] = PI::kUnused; + } + } + + bool Equals(const FuncDesc &desc) const { + if (funcInfo != desc.funcInfo) { + return false; + } + if (returnInfo != desc.returnInfo) { + return false; + } + for (size_t idx = 0; idx < kMaxParamCount; ++idx) { + if (paramInfo[idx] != desc.paramInfo[idx]) { + return false; + } + } + return true; + } + + bool IsConfiged() const { + return configed; + } + + void SetConfiged() { + configed = true; + } + + bool IsConst() const { + return funcInfo == FI::kConst; + } + + bool IsPure() const { + return funcInfo == FI::kPure; + } + + bool IsReturnNoAlias() const { + return returnInfo == RI::kNoAlias; + } + bool IsReturnAlias() const { + return returnInfo >= RI::kAliasParam0; + } + + void CheckReturnInfo() const { + CHECK_FATAL(returnInfo >= RI::kAliasParam0, "Impossible."); + } + + const PI GetParamInfo(size_t index) const { + if (index >= kMaxParamCount) { + return PI::kUnknown; + } + return paramInfo[index]; + } + + bool IsArgReadSelfOnly(size_t index) const { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kReadSelfOnly; + } + + bool IsArgReadMemoryOnly(size_t index) const { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kReadMemoryOnly; + } + + bool IsArgWriteMemoryOnly(size_t index) const { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kWriteMemoryOnly; + } + + bool IsArgUnused(size_t index) const { + if (index >= kMaxParamCount) { + return false; + } + return paramInfo[index] == PI::kUnused; + } + + void SetFuncInfo(const FI fi) { + funcInfo = fi; + } + + void SetFuncInfoNoBetterThan(const FI fi) { + auto oldValue = static_cast(funcInfo); + auto newValue = static_cast(fi); + if (newValue < oldValue) { + SetFuncInfo(static_cast(newValue)); + } + } + + void SetReturnInfo(const RI ri) { + returnInfo = ri; + } + + void SetParamInfo(const size_t idx, const PI pi) { + if (idx >= kMaxParamCount) { + return; + } + paramInfo[idx] = pi; + } + + void SetParamInfoNoBetterThan(const size_t idx, const PI pi) { + size_t oldValue = static_cast(paramInfo[idx]); + size_t newValue = static_cast(pi); + if (newValue < oldValue) { + SetParamInfo(idx, static_cast(newValue)); + } + } + + void Dump(size_t numParam = kMaxParamCount) { + auto dumpCount = numParam > kMaxParamCount ? kMaxParamCount : numParam; + LogInfo::MapleLogger() << kFIStr[static_cast(funcInfo)] + << " " << kRIStr[static_cast(returnInfo)]; + for (size_t i = 0; i < dumpCount; ++i) { + LogInfo::MapleLogger() << " " << kPIStr[static_cast(paramInfo[i])]; + } + LogInfo::MapleLogger() << "\n"; + } +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_FUNC_DESC_H diff --git a/src/mapleall/maple_ir/include/global_tables.h b/src/mapleall/maple_ir/include/global_tables.h new file mode 100644 index 0000000000000000000000000000000000000000..7b7250be474ffde0493176f40b1dc25cbca2fd30 --- /dev/null +++ b/src/mapleall/maple_ir/include/global_tables.h @@ -0,0 +1,878 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_GLOBAL_TABLES_H +#define MAPLE_IR_INCLUDE_GLOBAL_TABLES_H +#include +#include +#include +#include +#include +#include "thread_env.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "types_def.h" +#include "prim_types.h" +#include "mir_module.h" +#include "namemangler.h" +#include "mir_type.h" +#include "mir_const.h" +#include "mir_enum.h" + +namespace maple { +using TyIdxFieldAttrPair = std::pair; +using FieldPair = std::pair; +using FieldVector = std::vector; + +class BinaryMplImport; // circular dependency exists, no other choice + +// to facilitate the use of unordered_map +class TyIdxHash { + public: + std::size_t operator()(const TyIdx &tyIdx) const { + return std::hash{}(tyIdx); + } +}; + +// to facilitate the use of unordered_map +class GStrIdxHash { + public: + std::size_t operator()(const GStrIdx &gStrIdx) const { + return std::hash{}(gStrIdx); + } +}; + +// to facilitate the use of unordered_map +class UStrIdxHash { + public: + std::size_t operator()(const UStrIdx &uStrIdx) const { + return std::hash{}(uStrIdx); + } +}; + +class IntConstKey { + friend class IntConstHash; + friend class IntConstCmp; + public: + IntConstKey(int64 v, TyIdx tyIdx) : val(v), tyIdx(tyIdx) {} + virtual ~IntConstKey() {} + private: + int64 val; + TyIdx tyIdx; +}; + +class IntConstHash { + public: + std::size_t operator() (const IntConstKey &key) const { + return std::hash{}(key.val) ^ (std::hash{}(static_cast(key.tyIdx)) << 1); + } +}; + +class IntConstCmp { + public: + bool operator() (const IntConstKey &lkey, const IntConstKey &rkey) const { + return lkey.val == rkey.val && lkey.tyIdx == rkey.tyIdx; + } +}; + +class TypeTable { + friend BinaryMplImport; + public: + static MIRType *voidPtrType; + + TypeTable(); + TypeTable(const TypeTable&) = delete; + TypeTable &operator=(const TypeTable&) = delete; + ~TypeTable(); + + std::vector &GetTypeTable() { + return typeTable; + } + + const std::vector &GetTypeTable() const { + return typeTable; + } + + auto &GetTypeHashTable() const { + return typeHashTable; + } + + auto &GetPtrTypeMap() const { + return ptrTypeMap; + } + + auto &GetRefTypeMap() const { + return refTypeMap; + } + + MIRType *GetTypeFromTyIdx(TyIdx tyIdx) { + return const_cast(const_cast(this)->GetTypeFromTyIdx(tyIdx)); + } + const MIRType *GetTypeFromTyIdx(TyIdx tyIdx) const { + CHECK_FATAL(tyIdx < typeTable.size(), "array index out of range"); + return typeTable.at(tyIdx); + } + + MIRType *GetTypeFromTyIdx(uint32 index) const { + CHECK_FATAL(index < typeTable.size(), "array index out of range"); + return typeTable.at(index); + } + + PrimType GetPrimTypeFromTyIdx(const TyIdx &tyIdx) const { + CHECK_FATAL(tyIdx < typeTable.size(), "array index out of range"); + return typeTable.at(tyIdx)->GetPrimType(); + } + + void SetTypeWithTyIdx(const TyIdx &tyIdx, MIRType &type); + MIRType *GetOrCreateMIRTypeNode(MIRType &pType); + + TyIdx GetOrCreateMIRType(MIRType *pType) { + return GetOrCreateMIRTypeNode(*pType)->GetTypeIndex(); + } + + uint32 GetTypeTableSize() const { + return static_cast(typeTable.size()); + } + + // Get primtive types. + MIRType *GetPrimType(PrimType primType) const { + ASSERT(primType < typeTable.size(), "array index out of range"); + return typeTable.at(primType); + } + + MIRType *GetFloat() const { + ASSERT(PTY_f32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_f32); + } + + MIRType *GetDouble() const { + ASSERT(PTY_f64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_f64); + } + + MIRType *GetFloat128() const { + ASSERT(PTY_f128 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_f128); + } + + MIRType *GetUInt1() const { + ASSERT(PTY_u1 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u1); + } + + MIRType *GetUInt8() const { + ASSERT(PTY_u8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u8); + } + + MIRType *GetInt8() const { + ASSERT(PTY_i8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i8); + } + + MIRType *GetUInt16() const { + ASSERT(PTY_u16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u16); + } + + MIRType *GetInt16() const { + ASSERT(PTY_i16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i16); + } + + MIRType *GetInt32() const { + ASSERT(PTY_i32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i32); + } + + MIRType *GetUInt32() const { + ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } + + MIRType *GetInt64() const { + ASSERT(PTY_i64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_i64); + } + + MIRType *GetUInt64() const { + ASSERT(PTY_u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u64); + } + + MIRType *GetPtr() const { + ASSERT(PTY_ptr < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_ptr); + } + +#ifdef USE_ARM32_MACRO + MIRType *GetUIntType() const { + ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } + + MIRType *GetPtrType() const { + ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } +#else + MIRType *GetUIntType() const { + ASSERT(PTY_u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u64); + } + + MIRType *GetPtrType() const { + ASSERT(PTY_ptr < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_ptr); + } +#endif + +#ifdef USE_32BIT_REF + MIRType *GetCompactPtr() const { + ASSERT(PTY_u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u32); + } + +#else + MIRType *GetCompactPtr() const { + ASSERT(PTY_u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_u64); + } + +#endif + MIRType *GetRef() const { + ASSERT(PTY_ref < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_ref); + } + + MIRType *GetAddr32() const { + ASSERT(PTY_a32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_a32); + } + + MIRType *GetAddr64() const { + ASSERT(PTY_a64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_a64); + } + + MIRType *GetVoid() const { + ASSERT(PTY_void < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_void); + } + +#ifdef DYNAMICLANG + MIRType *GetDynundef() const { + ASSERT(PTY_dynundef < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynundef); + } + + MIRType *GetDynany() const { + ASSERT(PTY_dynany < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynany); + } + + MIRType *GetDyni32() const { + ASSERT(PTY_dyni32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dyni32); + } + + MIRType *GetDynf64() const { + ASSERT(PTY_dynf64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynf64); + } + + MIRType *GetDynf32() const { + ASSERT(PTY_dynf32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynf32); + } + + MIRType *GetDynstr() const { + ASSERT(PTY_dynstr < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynstr); + } + + MIRType *GetDynobj() const { + ASSERT(PTY_dynobj < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynobj); + } + + MIRType *GetDynbool() const { + ASSERT(PTY_dynbool < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_dynbool); + } + +#endif + MIRType *GetUnknown() const { + ASSERT(PTY_unknown < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_unknown); + } + // vector type + MIRType *GetV4Int32() const { + ASSERT(PTY_v4i32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4i32); + } + + MIRType *GetV2Int32() const { + ASSERT(PTY_v2i32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2i32); + } + + MIRType *GetV4UInt32() const { + ASSERT(PTY_v4u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4u32); + } + MIRType *GetV2UInt32() const { + ASSERT(PTY_v2u32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2u32); + } + + MIRType *GetV4Int16() const { + ASSERT(PTY_v4i16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4i16); + } + MIRType *GetV8Int16() const { + ASSERT(PTY_v8i16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8i16); + } + + MIRType *GetV4UInt16() const { + ASSERT(PTY_v4u16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4u16); + } + MIRType *GetV8UInt16() const { + ASSERT(PTY_v8u16 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8u16); + } + + MIRType *GetV8Int8() const { + ASSERT(PTY_v8i8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8i8); + } + MIRType *GetV16Int8() const { + ASSERT(PTY_v16i8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v16i8); + } + + MIRType *GetV8UInt8() const { + ASSERT(PTY_v8u8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v8u8); + } + MIRType *GetV16UInt8() const { + ASSERT(PTY_v16u8 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v16u8); + } + MIRType *GetV2Int64() const { + ASSERT(PTY_v2i64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2i64); + } + MIRType *GetV2UInt64() const { + ASSERT(PTY_v2u64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2u64); + } + + MIRType *GetV2Float32() const { + ASSERT(PTY_v2f32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2f32); + } + MIRType *GetV4Float32() const { + ASSERT(PTY_v4f32 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v4f32); + } + MIRType *GetV2Float64() const { + ASSERT(PTY_v2f64 < typeTable.size(), "array index out of range"); + return typeTable.at(PTY_v2f64); + } + + // Get or Create derived types. + MIRType *GetOrCreatePointerType(const TyIdx &pointedTyIdx, PrimType primType = PTY_ptr, + const TypeAttrs &attrs = TypeAttrs()); + MIRType *GetOrCreatePointerType(const MIRType &pointTo, PrimType primType = PTY_ptr, + const TypeAttrs &attrs = TypeAttrs()); + const MIRType *GetPointedTypeIfApplicable(MIRType &type) const; + MIRType *GetPointedTypeIfApplicable(MIRType &type); + MIRType *GetVoidPtr() const { + ASSERT(voidPtrType != nullptr, "voidPtrType should not be null"); + return voidPtrType; + } + + void UpdateMIRType(const MIRType &pType, const TyIdx tyIdx); + MIRArrayType *GetOrCreateArrayType(const MIRType &elem, uint8 dim, const uint32 *sizeArray, + const TypeAttrs &attrs = TypeAttrs()); + // For one dimention array + MIRArrayType *GetOrCreateArrayType(const MIRType &elem, uint32 size, const TypeAttrs &attrs = TypeAttrs()); + MIRType *GetOrCreateFarrayType(const MIRType &elem); + MIRType *GetOrCreateJarrayType(const MIRType &elem); + MIRType *GetOrCreateFunctionType(const TyIdx &retTyIdx, const std::vector &vecType, + const std::vector &vecAttrs, bool isVarg = false, + const TypeAttrs &retAttrs = TypeAttrs()); + MIRType *GetOrCreateStructType(const std::string &name, const FieldVector &fields, const FieldVector &prntFields, + MIRModule &module) { + return GetOrCreateStructOrUnion(name, fields, prntFields, module); + } + + MIRType *GetOrCreateUnionType(const std::string &name, const FieldVector &fields, const FieldVector &parentFields, + MIRModule &module) { + return GetOrCreateStructOrUnion(name, fields, parentFields, module, false); + } + + MIRType *GetOrCreateClassType(const std::string &name, MIRModule &module) { + return GetOrCreateClassOrInterface(name, module, true); + } + + MIRType *GetOrCreateInterfaceType(const std::string &name, MIRModule &module) { + return GetOrCreateClassOrInterface(name, module, false); + } + + void PushIntoFieldVector(FieldVector &fields, const std::string &name, const MIRType &type) const; + void AddFieldToStructType(MIRStructType &structType, const std::string &fieldName, const MIRType &fieldType) const; + + TyIdx lastDefaultTyIdx; + private: + using MIRTypePtr = MIRType*; + struct Hash { + size_t operator()(const MIRTypePtr &ty) const { + return ty->GetHashIndex(); + } + }; + + struct Equal { + bool operator()(const MIRTypePtr &tx, const MIRTypePtr &ty) const { + return tx->EqualTo(*ty); + } + }; + + // create an entry in typeTable for the type node + MIRType *CreateType(const MIRType &oldType) { + MIRType *newType = oldType.CopyMIRTypeNode(); + newType->SetTypeIndex(TyIdx(typeTable.size())); + typeTable.push_back(newType); + return newType; + } + + void PushNull() { typeTable.push_back(nullptr); } + void PopBack() { typeTable.pop_back(); } + + void CreateMirTypeNodeAt(MIRType &pType, TyIdx tyIdxUsed, MIRModule *module, bool isObject, bool isIncomplete); + MIRType *CreateAndUpdateMirTypeNode(MIRType &pType); + MIRType *GetOrCreateStructOrUnion(const std::string &name, const FieldVector &fields, const FieldVector &printFields, + MIRModule &module, bool forStruct = true, + const TypeAttrs &attrs = TypeAttrs()); + MIRType *GetOrCreateClassOrInterface(const std::string &name, MIRModule &module, bool forClass); + + MIRType *CreateMirType(uint32 primTypeIdx) const; + void PutToHashTable(MIRType *mirType); + + std::unordered_set typeHashTable; + std::unordered_map ptrTypeMap; + std::unordered_map refTypeMap; + std::vector typeTable; + mutable std::shared_timed_mutex mtx; +}; + +class StrPtrHash { + public: + size_t operator()(const std::string *str) const { + return std::hash{}(*str); + } + + size_t operator()(const std::u16string *str) const { + return std::hash{}(*str); + } +}; + +class StrPtrEqual { + public: + bool operator()(const std::string *str1, const std::string *str2) const { + return *str1 == *str2; + } + + bool operator()(const std::u16string *str1, const std::u16string *str2) const { + return *str1 == *str2; + } +}; + +// T can be std::string or std::u16string +// U can be GStrIdx, UStrIdx, or U16StrIdx +template +class StringTable { + public: + StringTable() = default; + StringTable(const StringTable&) = delete; + StringTable &operator=(const StringTable&) = delete; + + ~StringTable() { + stringTableMap.clear(); + for (auto it : stringTable) { + delete it; + } + } + + void Init() { + // initialize 0th entry of stringTable with an empty string + T *ptr = new T; + stringTable.push_back(ptr); + } + + U GetStrIdxFromName(const T &str) const { + if (ThreadEnv::IsMeParallel()) { + std::shared_lock lock(mtx); + auto it = stringTableMap.find(&str); + if (it == stringTableMap.end()) { + return U(0); + } + return it->second; + } + auto it = stringTableMap.find(&str); + if (it == stringTableMap.end()) { + return U(0); + } + return it->second; + } + + U GetOrCreateStrIdxFromName(const T &str) { + U strIdx = GetStrIdxFromName(str); + if (strIdx == 0u) { + if (ThreadEnv::IsMeParallel()) { + std::unique_lock lock(mtx); + strIdx.reset(stringTable.size()); + T *newStr = new T(str); + stringTable.push_back(newStr); + stringTableMap[newStr] = strIdx; + return strIdx; + } + strIdx.reset(stringTable.size()); + T *newStr = new T(str); + stringTable.push_back(newStr); + stringTableMap[newStr] = strIdx; + } + return strIdx; + } + + size_t StringTableSize() const { + if (ThreadEnv::IsMeParallel()) { + std::shared_lock lock(mtx); + return stringTable.size(); + } + return stringTable.size(); + } + + const T &GetStringFromStrIdx(U strIdx) const { + if (ThreadEnv::IsMeParallel()) { + std::shared_lock lock(mtx); + ASSERT(strIdx < stringTable.size(), "array index out of range"); + return *stringTable[strIdx]; + } + ASSERT(strIdx < stringTable.size(), "array index out of range"); + return *stringTable[strIdx]; + } + + const T &GetStringFromStrIdx(uint32 idx) const { + ASSERT(idx < stringTable.size(), "array index out of range"); + return *stringTable[idx]; + } + + private: + std::vector stringTable; // index is uint32 + std::unordered_map stringTableMap; + mutable std::shared_timed_mutex mtx; +}; + +class FPConstTable { + public: + FPConstTable(const FPConstTable &p) = delete; + FPConstTable &operator=(const FPConstTable &p) = delete; + ~FPConstTable(); + + // get the const from floatConstTable or create a new one + MIRFloatConst *GetOrCreateFloatConst(float floatVal); + // get the const from doubleConstTable or create a new one + MIRDoubleConst *GetOrCreateDoubleConst(double doubleVal); + + static std::unique_ptr Create() { + auto p = std::unique_ptr(new FPConstTable()); + p->PostInit(); + return p; + } + + private: + FPConstTable() : floatConstTable(), doubleConstTable() {} + void PostInit(); + MIRFloatConst *DoGetOrCreateFloatConst(float floatVal); + MIRDoubleConst *DoGetOrCreateDoubleConst(double doubleVal); + MIRFloatConst *DoGetOrCreateFloatConstThreadSafe(float floatVal); + MIRDoubleConst *DoGetOrCreateDoubleConstThreadSafe(double doubleVal); + std::shared_timed_mutex floatMtx; + std::shared_timed_mutex doubleMtx; + std::unordered_map floatConstTable; // map float const value to the table; + std::unordered_map doubleConstTable; // map double const value to the table; + MIRFloatConst *nanFloatConst = nullptr; + MIRFloatConst *infFloatConst = nullptr; + MIRFloatConst *minusInfFloatConst = nullptr; + MIRFloatConst *minusZeroFloatConst = nullptr; + MIRDoubleConst *nanDoubleConst = nullptr; + MIRDoubleConst *infDoubleConst = nullptr; + MIRDoubleConst *minusInfDoubleConst = nullptr; + MIRDoubleConst *minusZeroDoubleConst = nullptr; +}; + +class IntConstTable { + public: + IntConstTable(const IntConstTable &p) = delete; + IntConstTable &operator=(const IntConstTable &p) = delete; + ~IntConstTable(); + + MIRIntConst *GetOrCreateIntConst(const IntVal &val, MIRType &type); + MIRIntConst *GetOrCreateIntConst(uint64 val, MIRType &type); + + static std::unique_ptr Create() { + auto p = std::unique_ptr(new IntConstTable()); + return p; + } + + private: + IntConstTable() = default; + MIRIntConst *DoGetOrCreateIntConst(uint64 val, MIRType &type); + MIRIntConst *DoGetOrCreateIntConstTreadSafe(uint64 val, MIRType &type); + std::shared_timed_mutex mtx; + std::unordered_map intConstTable; +}; + +// STypeNameTable is only used to store class and interface types. +// Each module maintains its own MIRTypeNameTable. +class STypeNameTable { + public: + STypeNameTable() = default; + virtual ~STypeNameTable() = default; + + const std::unordered_map &GetGStridxToTyidxMap() const { + return gStrIdxToTyIdxMap; + } + + TyIdx GetTyIdxFromGStrIdx(GStrIdx idx) const { + const auto it = gStrIdxToTyIdxMap.find(idx); + if (it == gStrIdxToTyIdxMap.cend()) { + return TyIdx(0); + } + return it->second; + } + + void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) { + gStrIdxToTyIdxMap[gStrIdx] = tyIdx; + } + + private: + std::unordered_map gStrIdxToTyIdxMap; +}; + +class FunctionTable { + public: + FunctionTable() { + funcTable.push_back(nullptr); + } // puIdx 0 is reserved + + virtual ~FunctionTable() = default; + + std::vector &GetFuncTable() { + return funcTable; + } + + MIRFunction *GetFunctionFromPuidx(PUIdx pIdx) const { + CHECK_FATAL(pIdx < funcTable.size(), "Invalid puIdx"); + return funcTable.at(pIdx); + } + + void SetFunctionItem(uint32 pIdx, MIRFunction *func) { + CHECK_FATAL(pIdx < funcTable.size(), "Invalid puIdx"); + funcTable[pIdx] = func; + } + + private: + std::vector funcTable; // index is PUIdx +}; + +class GSymbolTable { + public: + GSymbolTable(); + GSymbolTable(const GSymbolTable&) = delete; + GSymbolTable &operator=(const GSymbolTable&) = delete; + ~GSymbolTable(); + + MIRModule *GetModule() { + return module; + } + + void SetModule(MIRModule *m) { + module = m; + } + + bool IsValidIdx(size_t idx) const { + return idx < symbolTable.size(); + } + + MIRSymbol *GetSymbolFromStidx(uint32 idx, bool checkFirst = false) const { + if (checkFirst && idx >= symbolTable.size()) { + return nullptr; + } + ASSERT(IsValidIdx(idx), "symbol table index out of range"); + return symbolTable[idx]; + } + + void SetStrIdxStIdxMap(GStrIdx strIdx, StIdx stIdx) { + strIdxToStIdxMap[strIdx] = stIdx; + } + + StIdx GetStIdxFromStrIdx(GStrIdx idx) const { + const auto it = strIdxToStIdxMap.find(idx); + if (it == strIdxToStIdxMap.cend()) { + return StIdx(); + } + return it->second; + } + + MIRSymbol *GetSymbolFromStrIdx(GStrIdx idx, bool checkFirst = false) const { + return GetSymbolFromStidx(GetStIdxFromStrIdx(idx).Idx(), checkFirst); + } + + size_t GetSymbolTableSize() const { + return symbolTable.size(); + } + + MIRSymbol *GetSymbol(uint32 idx) const { + ASSERT(idx < symbolTable.size(), "array index out of range"); + return symbolTable.at(idx); + } + + MIRSymbol *CreateSymbol(uint8 scopeID); + bool AddToStringSymbolMap(const MIRSymbol &st); + bool RemoveFromStringSymbolMap(const MIRSymbol &st); + void Dump(bool isLocal, int32 indent = 0) const; + + private: + MIRModule *module = nullptr; + // hash table mapping string index to st index + std::unordered_map strIdxToStIdxMap; + std::vector symbolTable; // map symbol idx to symbol node +}; + +class ConstPool { + public: + std::unordered_map &GetConstU16StringPool() { + return constU16StringPool; + } + + void InsertConstPool(GStrIdx strIdx, MIRConst *cst) { + (void)constMap.emplace(strIdx, cst); + } + + MIRConst *GetConstFromPool(GStrIdx strIdx) { + return constMap[strIdx]; + } + + void PutLiteralNameAsImported(GStrIdx gIdx) { + (void)importedLiteralNames.insert(gIdx); + } + + bool LookUpLiteralNameFromImported(GStrIdx gIdx) { + return importedLiteralNames.find(gIdx) != importedLiteralNames.end(); + } + + protected: + std::unordered_map constMap; + std::set importedLiteralNames; + + private: + std::unordered_map constU16StringPool; +}; + +class GlobalTables { + public: + static GlobalTables &GetGlobalTables(); + + static StringTable &GetStrTable() { + return globalTables.gStringTable; + } + + static StringTable &GetUStrTable() { + return globalTables.uStrTable; + } + + static StringTable &GetU16StrTable() { + return globalTables.u16StringTable; + } + + static TypeTable &GetTypeTable() { + return globalTables.typeTable; + } + + static FPConstTable &GetFpConstTable() { + return *(globalTables.fpConstTablePtr); + } + + static STypeNameTable &GetTypeNameTable() { + return globalTables.typeNameTable; + } + + static FunctionTable &GetFunctionTable() { + return globalTables.functionTable; + } + + static GSymbolTable &GetGsymTable() { + return globalTables.gSymbolTable; + } + + static ConstPool &GetConstPool() { + return globalTables.constPool; + } + + static IntConstTable &GetIntConstTable() { + return *(globalTables.intConstTablePtr); + } + + static EnumTable &GetEnumTable() { + return globalTables.enumTable; + } + + GlobalTables(const GlobalTables &globalTables) = delete; + GlobalTables(const GlobalTables &&globalTables) = delete; + GlobalTables &operator=(const GlobalTables &globalTables) = delete; + GlobalTables &operator=(const GlobalTables &&globalTables) = delete; + + private: + GlobalTables() : fpConstTablePtr(FPConstTable::Create()), + intConstTablePtr(IntConstTable::Create()) { + gStringTable.Init(); + uStrTable.Init(); + u16StringTable.Init(); + } + virtual ~GlobalTables() = default; + static GlobalTables globalTables; + + TypeTable typeTable; + STypeNameTable typeNameTable; + FunctionTable functionTable; + GSymbolTable gSymbolTable; + ConstPool constPool; + std::unique_ptr fpConstTablePtr; + std::unique_ptr intConstTablePtr; + StringTable gStringTable; + StringTable uStrTable; + StringTable u16StringTable; + EnumTable enumTable; +}; + +inline MIRType &GetTypeFromTyIdx(TyIdx idx) { + return *(GlobalTables::GetTypeTable().GetTypeFromTyIdx(idx)); +} +} // namespace maple +#endif // MAPLE_IR_INCLUDE_GLOBAL_TABLES_H diff --git a/src/mapleall/maple_ir/include/intrinsic_c.def b/src/mapleall/maple_ir/include/intrinsic_c.def new file mode 100644 index 0000000000000000000000000000000000000000..cc34f9b8f75ec0480bb989ac659df702b24da445 --- /dev/null +++ b/src/mapleall/maple_ir/include/intrinsic_c.def @@ -0,0 +1,325 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, NUM_INSN, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(C_strcmp,\ + "strcmp", 1, INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(C_strncmp,\ + "strncmp", 1, INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyPtr, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C_strcpy,\ + "strcpy", 1, INTRNWRITEFIRSTOPND | INTRNREADSECONDOPND, \ + kArgTyPtr, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(C_strncpy,\ + "strncpy", 1, INTRNWRITEFIRSTOPND | INTRNREADSECONDOPND | INTRNREADTHIRDOPND, \ + kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_strlen,\ + "strlen", 1, INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyU64, kArgTyPtr) +DEF_MIR_INTRINSIC(C_strchr,\ + "strchr", 1, INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyPtr, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C_strrchr,\ + "strrchr", 1, INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyPtr, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C_memcmp,\ + "memcmp", 1, INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_memcpy,\ + "memcpy", 1, INTRNWRITEFIRSTOPND | INTRNREADSECONDOPND | INTRNREADTHIRDOPND, \ + kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_memmove,\ + "memmove", 1, 0, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_memset,\ + "memset", 1, INTRNWRITEFIRSTOPND | INTRNREADSECONDOPND | INTRNREADTHIRDOPND, \ + kArgTyPtr, kArgTyPtr, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_acosf,\ + "acosf", 1, INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_asinf,\ + "asinf", 1, INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_atanf,\ + "atanf", 1, INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_cosf,\ + "cosf", 1, INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_coshf,\ + "coshf", 1, INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_expf,\ + "expf", 1, INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_logf,\ + "logf", 1, INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_log10f,\ + "log10f", 1, INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_sinf,\ + "sinf", 1, INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_sinhf,\ + "sinhf", 1, INTRNISPURE, kArgTyF32, kArgTyF32) +DEF_MIR_INTRINSIC(C_acos,\ + "acos", 1, INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_asin,\ + "asin", 1, INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_atan,\ + "atan", 1, INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_cos,\ + "cos", 1, INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_cosh,\ + "cosh", 1, INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_exp,\ + "exp", 1, INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_log,\ + "log", 1, INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_log10,\ + "log10", 1, INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_sin,\ + "sin", 1, INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_sinh,\ + "sinh", 1, INTRNISPURE, kArgTyF64, kArgTyF64) +DEF_MIR_INTRINSIC(C_ffs,\ + "ffs", 4, INTRNISPURE, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C_va_start,\ + "va_start", 10,\ + INTRNISPURE | INTRNISSPECIAL | INTRNWRITEFIRSTOPND | INTRNREADSECONDOPND, \ + kArgTyVoid, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C_constant_p,\ + "constant_p", 0, 0, kArgTyI32, kArgTyDynany) +DEF_MIR_INTRINSIC(C_clz32,\ + "clz32", 1, INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_clz64,\ + "clz64", 1, INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_ctz32,\ + "ctz32", 2, INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_ctz64,\ + "ctz64", 2, INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_popcount32,\ + "popcount32", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_popcount64,\ + "popcount64", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_parity32,\ + "parity32", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_parity64,\ + "parity64", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_clrsb32,\ + "clrsb32", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyI32, kArgTyU32) +DEF_MIR_INTRINSIC(C_clrsb64,\ + "clrsb64", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyI32, kArgTyU64) +DEF_MIR_INTRINSIC(C_isaligned,\ + "isaligned", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyU1, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_alignup,\ + "alignup", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyU1, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_aligndown,\ + "aligndown", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyU1, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C_rev16_2,\ + "rev16", DEFAULT_NUM_INSN, INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, kArgTyI16) +DEF_MIR_INTRINSIC(C_rev_4,\ + "rev", DEFAULT_NUM_INSN, INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C_rev_8,\ + "rev", DEFAULT_NUM_INSN, INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(C_bswap16,\ + "rev16", DEFAULT_NUM_INSN, INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, kArgTyI16) +DEF_MIR_INTRINSIC(C_bswap32,\ + "rev", DEFAULT_NUM_INSN, INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C_bswap64,\ + "rev", DEFAULT_NUM_INSN, INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(C_stack_save,\ + "stack_save", DEFAULT_NUM_INSN, INTRNISPURE | INTRNISSPECIAL, kArgTyPtr) +DEF_MIR_INTRINSIC(C_stack_restore,\ + "stack_restore", DEFAULT_NUM_INSN, INTRNISPURE | INTRNISSPECIAL, kArgTyPtr) +// sync +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_1,\ + "__sync_add_and_fetch_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_2,\ + "__sync_add_and_fetch_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_4,\ + "__sync_add_and_fetch_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_add_and_fetch_8,\ + "__sync_add_and_fetch_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_1,\ + "__sync_sub_and_fetch_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_2,\ + "__sync_sub_and_fetch_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_4,\ + "__sync_sub_and_fetch_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_sub_and_fetch_8,\ + "__sync_sub_and_fetch_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_1,\ + "__sync_fetch_and_add_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_2,\ + "__sync_fetch_and_add_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_4,\ + "__sync_fetch_and_add_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_add_8,\ + "__sync_fetch_and_add_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_1,\ + "__sync_fetch_and_sub_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_2,\ + "__sync_fetch_and_sub_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_4,\ + "__sync_fetch_and_sub_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_sub_8,\ + "__sync_fetch_and_sub_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_1,\ + "__sync_bool_compare_and_swap_1", 5, INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU8, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_2,\ + "__sync_bool_compare_and_swap_2", 5, INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU16, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_4,\ + "__sync_bool_compare_and_swap_4", 5, INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU32, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_bool_compare_and_swap_8,\ + "__sync_bool_compare_and_swap_8", 5, INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyU64, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_1,\ + "__sync_val_compare_and_swap_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_2,\ + "__sync_val_compare_and_swap_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_4,\ + "__sync_val_compare_and_swap_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_val_compare_and_swap_8,\ + "__sync_val_compare_and_swap_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_1,\ + "__sync_lock_test_and_set_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_2,\ + "__sync_lock_test_and_set_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_4,\ + "__sync_lock_test_and_set_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_lock_test_and_set_8,\ + "__sync_lock_test_and_set_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_lock_release_8,\ + "__sync_lock_release_8", 1, INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_lock_release_4,\ + "__sync_lock_release_4", 1, INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_lock_release_2,\ + "__sync_lock_release_2", 1, INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_lock_release_1,\ + "__sync_lock_release_1", 1, INTRNATOMIC, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_1,\ + "__sync_fetch_and_and_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_2,\ + "__sync_fetch_and_and_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_4,\ + "__sync_fetch_and_and_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_and_8,\ + "__sync_fetch_and_and_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_1,\ + "__sync_fetch_and_or_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_2,\ + "__sync_fetch_and_or_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_4,\ + "__sync_fetch_and_or_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_or_8,\ + "__sync_fetch_and_or_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_1,\ + "__sync_fetch_and_xor_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_2,\ + "__sync_fetch_and_xor_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_4,\ + "__sync_fetch_and_xor_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_xor_8,\ + "__sync_fetch_and_xor_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_1,\ + "__sync_fetch_and_nand_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_2,\ + "__sync_fetch_and_nand_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_4,\ + "__sync_fetch_and_nand_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_fetch_and_nand_8,\ + "__sync_fetch_and_nand_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_1,\ + "__sync_and_and_fetch_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_2,\ + "__sync_and_and_fetch_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_4,\ + "__sync_and_and_fetch_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_and_and_fetch_8,\ + "__sync_and_and_fetch_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_1,\ + "__sync_or_and_fetch_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_2,\ + "__sync_or_and_fetch_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_4,\ + "__sync_or_and_fetch_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_or_and_fetch_8,\ + "__sync_or_and_fetch_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_1,\ + "__sync_xor_and_fetch_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_2,\ + "__sync_xor_and_fetch_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_4,\ + "__sync_xor_and_fetch_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_xor_and_fetch_8,\ + "__sync_xor_and_fetch_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_1,\ + "__sync_nand_and_fetch_1", 5, INTRNATOMIC, kArgTyU8, kArgTyPtr, kArgTyU8) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_2,\ + "__sync_nand_and_fetch_2", 5, INTRNATOMIC, kArgTyU16, kArgTyPtr, kArgTyU16) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_4,\ + "__sync_nand_and_fetch_4", 5, INTRNATOMIC, kArgTyU32, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C___sync_nand_and_fetch_8,\ + "__sync_nand_and_fetch_8", 5, INTRNATOMIC, kArgTyU64, kArgTyPtr, kArgTyU64) +DEF_MIR_INTRINSIC(C___sync_synchronize,\ + "__sync_synchronize", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyUndef) + +DEF_MIR_INTRINSIC(C__builtin_return_address,\ + "__builtin_return_address", 1, INTRNISPURE, kArgTyPtr, kArgTyU32) +DEF_MIR_INTRINSIC(C__builtin_extract_return_addr,\ + "__builtin_extract_return_addr", 1, INTRNISPURE, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(C___builtin_expect,\ + "__builtin_expect", 0, INTRNISPURE, kArgTyI32, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C___builtin_object_size,\ + "__builtin_object_size", 0, INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyU64, kArgTyPtr, kArgTyI32) +// atomic +DEF_MIR_INTRINSIC(C___atomic_load_n,\ + "__atomic_load_n", 1, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_load,\ + "__atomic_load", 1, INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_store_n,\ + "__atomic_store_n", 1, INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_store,\ + "__atomic_store", 1, INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_exchange_n,\ + "__atomic_exchange_n", 2, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_exchange,\ + "__atomic_exchange", 1, INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_add_fetch,\ + "__atomic_add_fetch", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_sub_fetch,\ + "__atomic_sub_fetch", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_and_fetch,\ + "__atomic_and_fetch", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_xor_fetch,\ + "__atomic_xor_fetch", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_or_fetch,\ + "__atomic_or_fetch", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_nand_fetch,\ + "__atomic_nand_fetch", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_add,\ + "__atomic_fetch_add", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_sub,\ + "__atomic_fetch_sub", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_and,\ + "__atomic_fetch_and", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_xor,\ + "__atomic_fetch_xor", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_or,\ + "__atomic_fetch_or", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_fetch_nand,\ + "__atomic_fetch_nand", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyDynany, kArgTyPtr, kArgTyDynany, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_test_and_set,\ + "__atomic_test_and_set", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_clear,\ + "__atomic_clear", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyVoid, kArgTyPtr, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_thread_fence,\ + "__atomic_thread_fence", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyVoid, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_signal_fence,\ + "__atomic_signal_fence", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyVoid, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_always_lock_free,\ + "__atomic_always_lock_free", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyU1, kArgTyU64, kArgTyPtr) +DEF_MIR_INTRINSIC(C___atomic_is_lock_free,\ + "__atomic_is_lock_free", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyU1, kArgTyU64, kArgTyPtr) +DEF_MIR_INTRINSIC(C___atomic_compare_exchange_n,\ + "__atomic_compare_exchange_n", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyPtr, kArgTyDynany, kArgTyU1, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(C___atomic_compare_exchange,\ + "__atomic_compare_exchange", DEFAULT_NUM_INSN, INTRNATOMIC, kArgTyU1, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyU1, kArgTyI32, kArgTyI32) diff --git a/src/mapleall/maple_ir/include/intrinsic_dai.def b/src/mapleall/maple_ir/include/intrinsic_dai.def new file mode 100644 index 0000000000000000000000000000000000000000..b1c398a7a5f8058fef5c6a3861c8de2e10c3ff3a --- /dev/null +++ b/src/mapleall/maple_ir/include/intrinsic_dai.def @@ -0,0 +1,20 @@ +DEF_MIR_INTRINSIC(MCC_DeferredConstClass,\ + "MCC_DeferredConstClass", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredInstanceOf,\ + "MCC_DeferredInstanceOf", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyU1, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredCheckCast,\ + "MCC_DeferredCheckCast", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredNewInstance,\ + "MCC_DeferredNewInstance", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredNewArray,\ + "MCC_DeferredNewArray", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyI32) +DEF_MIR_INTRINSIC(MCC_DeferredFillNewArray,\ + "MCC_DeferredFillNewArray", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyI32, kArgTyDynany, kArgTyDynany) +DEF_MIR_INTRINSIC(MCC_DeferredLoadField,\ + "MCC_DeferredLoadField", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyDynany, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredStoreField,\ + "MCC_DeferredStoreField", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredInvoke,\ + "MCC_DeferredInvoke", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyDynany, kArgTyRef, kArgTyI32, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCC_DeferredClinitCheck,\ + "MCC_DeferredClinitCheck", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) diff --git a/src/mapleall/maple_ir/include/intrinsic_java.def b/src/mapleall/maple_ir/include/intrinsic_java.def new file mode 100644 index 0000000000000000000000000000000000000000..f49b138b40bff8a70ddf640cdad7cbe9f423d8d4 --- /dev/null +++ b/src/mapleall/maple_ir/include/intrinsic_java.def @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// DEF_MIR_INTRINSIC(STR, NAME, NUM_INSN, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(JAVA_ARRAY_LENGTH,\ + "__java_array_length", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_ARRAY_FILL,\ + "__java_array_fill", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyI32, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_FILL_NEW_ARRAY,\ + "__java_fill_new_array", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyRef, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CHECK_CAST,\ + "__java_check_cast", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CONST_CLASS,\ + "__java_const_class", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_INSTANCE_OF,\ + "__java_instance_of", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_ISASSIGNABLEFROM,\ + "__java_isAssignableFrom", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_MERGE,\ + "__java_merge", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyPtr, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_CLINIT_CHECK,\ + "__java_clinit_check", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_POLYMORPHIC_CALL,\ + "__java_polymorphic_call", DEFAULT_NUM_INSN, INTRNISJAVA, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_THROW_ARITHMETIC,\ + "MCC_ThrowArithmeticException", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNEVERRETURN, kArgTyVoid, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JAVA_GET_CLASS,\ + "MCC_GetClass", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyPtr, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) \ No newline at end of file diff --git a/src/mapleall/maple_ir/include/intrinsic_js.def b/src/mapleall/maple_ir/include/intrinsic_js.def new file mode 100644 index 0000000000000000000000000000000000000000..e366aff88e14264b1c82d10ceb1cc9f1c04ecaf6 --- /dev/null +++ b/src/mapleall/maple_ir/include/intrinsic_js.def @@ -0,0 +1,118 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, NUM_INSN, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(JS_INIT_CONTEXT,\ + "__js_init_context", DEFAULT_NUM_INSN, INTRNISJS, kArgTyVoid, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_REQUIRE,\ + "__js_require", DEFAULT_NUM_INSN, INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_BIOBJECT,\ + "__jsobj_get_or_create_builtin", DEFAULT_NUM_INSN, INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_BISTRING,\ + "__jsstr_get_builtin", DEFAULT_NUM_INSN, INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTySimplestr, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_THIS,\ + "__jsop_this", DEFAULT_NUM_INSN, INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_ADD,\ + "__jsop_add", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_CONCAT,\ + "__jsstr_concat_2", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTySimplestr, kArgTySimplestr, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_STRICTEQ,\ + "__jsop_stricteq", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSSTR_STRICTEQ,\ + "__jsstr_equal", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTySimplestr, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_STRICTNE,\ + "__jsop_strictne", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSSTR_STRICTNE,\ + "__jsstr_ne", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTySimplestr, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INSTANCEOF,\ + "__jsop_instanceof", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_IN,\ + "__jsop_in", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_OR,\ + "__jsop_or", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_AND,\ + "__jsop_and", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSBINARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_TYPEOF,\ + "__jsop_typeof", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW,\ + "__js_new", DEFAULT_NUM_INSN, INTRNISJS | INTRNNOSIDEEFFECT, kArgTyPtr, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_STRING,\ + "__js_ToString", DEFAULT_NUM_INSN, INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTySimplestr, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSSTR_LENGTH,\ + "__jsstr_get_length", DEFAULT_NUM_INSN, INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_BOOLEAN,\ + "__js_ToBoolean", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU1, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NUMBER,\ + "__js_ToNumber", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_INT32,\ + "__js_ToInt32", DEFAULT_NUM_INSN, INTRNISJS | INTRNISJSUNARY | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_PRINT,\ + "__jsop_print", DEFAULT_NUM_INSN, INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_ERROR,\ + "__js_error", DEFAULT_NUM_INSN, INTRNISJS | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNNEVERRETURN, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_EVAL,\ + "__js_eval", DEFAULT_NUM_INSN, kIntrnUndef, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_ICALL,\ + "__js_icall", DEFAULT_NUM_INSN, INTRNISJS | INTRNRETURNSTRUCT, kArgTyDynany, kArgTyA32, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_CALL, + "__jsop_call", DEFAULT_NUM_INSN, INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_CCALL,\ + "__jsop_ccall", DEFAULT_NUM_INSN, INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_NEW, + "__jsop_new", DEFAULT_NUM_INSN, INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_SETTIMEOUT, + "__js_setTimeout", DEFAULT_NUM_INSN, INTRNISJS | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyI32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_SETCYCLEHEADER,\ + "__js_setCycleHeader", DEFAULT_NUM_INSN, INTRNISJS, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_OBJECT_0,\ + "__js_new_obj_obj_0", DEFAULT_NUM_INSN, INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_OBJECT_1,\ + "__js_new_obj_obj_1", DEFAULT_NUM_INSN, INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_SETPROP,\ + "__jsop_setprop", DEFAULT_NUM_INSN, INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_GETPROP,\ + "__jsop_getprop", DEFAULT_NUM_INSN, INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_DELPROP,\ + "__jsop_delprop", DEFAULT_NUM_INSN, INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_SETPROP_BY_NAME,\ + "__jsop_setprop_by_name", DEFAULT_NUM_INSN, INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTySimplestr, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_GETPROP_BY_NAME,\ + "__jsop_getprop_by_name", DEFAULT_NUM_INSN, INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTySimplestr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_SETPROP_BY_INDEX,\ + "__jsop_setprop_by_index", DEFAULT_NUM_INSN, INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTyU32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_GETPROP_BY_INDEX,\ + "__jsop_getprop_by_index", DEFAULT_NUM_INSN, INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INITPROP_BY_NAME,\ + "__jsop_initprop", DEFAULT_NUM_INSN, INTRNISJS, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INITPROP_GETTER,\ + "__jsop_initprop_getter", DEFAULT_NUM_INSN, INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_INITPROP_SETTER,\ + "__jsop_initprop_setter", DEFAULT_NUM_INSN, INTRNISJS, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_FUNCTION,\ + "__js_new_function", DEFAULT_NUM_INSN, INTRNISJS, kArgTyDynany, kArgTyPtr, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_ARR_ELEMS,\ + "__js_new_arr_elems", DEFAULT_NUM_INSN, INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyPtr, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_NEW_ARR_LENGTH,\ + "__js_new_arr_length", DEFAULT_NUM_INSN, INTRNISJS | INTRNNOSIDEEFFECT, kArgTySimpleobj, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_LENGTH,\ + "__jsop_length", DEFAULT_NUM_INSN, INTRNISJS | INTRNLOADMEM | INTRNNOSIDEEFFECT | INTRNISPURE, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_NEW_ITERATOR,\ + "__jsop_valueto_iterator", DEFAULT_NUM_INSN, INTRNISJS, kArgTyPtr, kArgTyDynany, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_NEXT_ITERATOR,\ + "__jsop_iterator_next", DEFAULT_NUM_INSN, INTRNISJS, kArgTyDynany, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_MORE_ITERATOR,\ + "__jsop_more_iterator", DEFAULT_NUM_INSN, INTRNISJS, kArgTyU32, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_ADDSYSEVENTLISTENER,\ + "__js_add_sysevent_listener", DEFAULT_NUM_INSN, INTRNISJS, kArgTyU32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) \ No newline at end of file diff --git a/src/mapleall/maple_ir/include/intrinsic_js_eng.def b/src/mapleall/maple_ir/include/intrinsic_js_eng.def new file mode 100644 index 0000000000000000000000000000000000000000..f21cda4431815f7e09e1c8d50c04dd45e69ec3a9 --- /dev/null +++ b/src/mapleall/maple_ir/include/intrinsic_js_eng.def @@ -0,0 +1,34 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +DEF_MIR_INTRINSIC(JS_GET_ARGUMENTOBJECT,\ + "__jsobj_get_or_create_argument", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_ERROR_OBJECT,\ + "__jsobj_get_or_create_error", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_EVALERROR_OBJECT,\ + "__jsobj_get_or_create_evalError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_RANGEERROR_OBJECT,\ + "__jsobj_get_or_create_rangeError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_REFERENCEERROR_OBJECT,\ + "__jsobj_get_or_create_referenceError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_SYNTAXERROR_OBJECT,\ + "__jsobj_get_or_create_syntaxError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_TYPEERROR_OBJECT,\ + "__jsobj_get_or_create_typeError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JS_GET_URIERROR_OBJECT,\ + "__jsobj_get_or_create_uriError", INTRNISJS | INTRNISPURE, kArgTySimpleobj, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(JSOP_ASSERTVALUE, + "__jsop_assert_value", INTRNISJS, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) \ No newline at end of file diff --git a/src/mapleall/maple_ir/include/intrinsic_op.h b/src/mapleall/maple_ir/include/intrinsic_op.h new file mode 100644 index 0000000000000000000000000000000000000000..b929c1692a33591340b77ee34867a1e27107d769 --- /dev/null +++ b/src/mapleall/maple_ir/include/intrinsic_op.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_INTRINSIC_OP_H +#define MAPLE_IR_INCLUDE_INTRINSIC_OP_H + +namespace maple { +enum MIRIntrinsicID { +#define DEF_MIR_INTRINSIC(STR, NAME, NUM_INSN, INTRN_CLASS, RETURN_TYPE, ...) INTRN_##STR, +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_INTRINSIC_OP_H diff --git a/src/mapleall/maple_ir/include/intrinsic_vector.def b/src/mapleall/maple_ir/include/intrinsic_vector.def new file mode 100644 index 0000000000000000000000000000000000000000..d49c968021f787f611eec2e43e92307586451752 --- /dev/null +++ b/src/mapleall/maple_ir/include/intrinsic_vector.def @@ -0,0 +1,1244 @@ +/* + * Copyright (c) [2021] Futurewei Technologies, Inc. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the + * MulanPSL - 2.0. You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY + * KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO + * NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the + * MulanPSL - 2.0 for more details. + */ + +// DEF_MIR_INTRINSIC(STR, NAME, INTRN_CLASS, RETURN_TYPE, +// ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) + +// vecTy vector_abs(vecTy src) +// Create a vector by getting the absolute value of the elements in src. +DEF_MIR_INTRINSIC(vector_abs_v8i8, "vector_abs_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_abs_v4i16, "vector_abs_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_abs_v2i32, "vector_abs_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_abs_v1i64, "vector_abs_v1i64", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_abs_v2f32, "vector_abs_v2f32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32) +DEF_MIR_INTRINSIC(vector_abs_v1f64, "vector_abs_v1f64", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_abs_v16i8, "vector_abs_v16i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_abs_v8i16, "vector_abs_v8i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_abs_v4i32, "vector_abs_v4i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_abs_v2i64, "vector_abs_v2i64", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_abs_v4f32, "vector_abs_v4f32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_abs_v2f64, "vector_abs_v2f64", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64) + +// vecTy vector_mov_narrow(vecTy src) +// copies each element of the operand vector to the corresponding element of the destination vector. +// The result element is half the width of the operand element, and values are saturated to the result width. +// The results are the same type as the operands. +DEF_MIR_INTRINSIC(vector_mov_narrow_v8u16, "vector_mov_narrow_v8u16", 1, + INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV8U8, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_mov_narrow_v4u32, "vector_mov_narrow_v4u32", 1, + INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV4U16, kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_mov_narrow_v2u64, "vector_mov_narrow_v2u64", 1, + INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV2U32, kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_mov_narrow_v8i16, "vector_mov_narrow_v8i16", 1, + INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV8I8, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_mov_narrow_v4i32, "vector_mov_narrow_v4i32", 1, + INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV4I16, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_mov_narrow_v2i64, "vector_mov_narrow_v2i64", 1, + INTRNISVECTOR | INTRNISPURE |INTRNNOSIDEEFFECT, kArgTyV2I32, kArgTyV2I64) + +// vecTy vector_addl_low(vecTy src1, vecTy src2) +// Add each element of the source vector to second source +// put the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addl_low_v8i8, "vector_addl_low_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_addl_low_v4i16, "vector_addl_low_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_addl_low_v2i32, "vector_addl_low_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_addl_low_v8u8, "vector_addl_low_v8u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_addl_low_v4u16, "vector_addl_low_v4u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_addl_low_v2u32, "vector_addl_low_v2u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) + +// vecTy vector_addl_high(vecTy src1, vecTy src2) +// Add each element of the source vector to upper half of second source +// put the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addl_high_v8i8, "vector_addl_high_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_addl_high_v4i16, "vector_addl_high_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_addl_high_v2i32, "vector_addl_high_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_addl_high_v8u8, "vector_addl_high_v8u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_addl_high_v4u16, "vector_addl_high_v4u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_addl_high_v2u32, "vector_addl_high_v2u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32, kArgTyV4U32) + +// vecTy vector_addw_low(vecTy src1, vecTy src2) +// Add each element of the source vector to second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addw_low_v8i8, "vector_addw_low_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_addw_low_v4i16, "vector_addw_low_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_addw_low_v2i32, "vector_addw_low_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_addw_low_v8u8, "vector_addw_low_v8u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_addw_low_v4u16, "vector_addw_low_v4u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_addw_low_v2u32, "vector_addw_low_v2u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U32) + +// vecTy vector_addw_high(vecTy src1, vecTy src2) +// Add each element of the source vector to upper half of second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_addw_high_v8i8, "vector_addw_high_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_addw_high_v4i16, "vector_addw_high_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_addw_high_v2i32, "vector_addw_high_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_addw_high_v8u8, "vector_addw_high_v8u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_addw_high_v4u16, "vector_addw_high_v4u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_addw_high_v2u32, "vector_addw_high_v2u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV4U32) + +// vecTy vector_from_scalar(scalarTy value) +// Create a vector by repeating the scalar value for each element in the +// vector. +DEF_MIR_INTRINSIC(vector_from_scalar_v2i64, "vector_from_scalar_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyI64) +DEF_MIR_INTRINSIC(vector_from_scalar_v4i32, "vector_from_scalar_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyI32) +DEF_MIR_INTRINSIC(vector_from_scalar_v8i16, "vector_from_scalar_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyI16) +DEF_MIR_INTRINSIC(vector_from_scalar_v16i8, "vector_from_scalar_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyI8) +DEF_MIR_INTRINSIC(vector_from_scalar_v2u64, "vector_from_scalar_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyU64) +DEF_MIR_INTRINSIC(vector_from_scalar_v4u32, "vector_from_scalar_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyU32) +DEF_MIR_INTRINSIC(vector_from_scalar_v8u16, "vector_from_scalar_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyU16) +DEF_MIR_INTRINSIC(vector_from_scalar_v16u8, "vector_from_scalar_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyU8) +DEF_MIR_INTRINSIC(vector_from_scalar_v2f64, "vector_from_scalar_v2f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyF64) +DEF_MIR_INTRINSIC(vector_from_scalar_v4f32, "vector_from_scalar_v4f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyF32) +DEF_MIR_INTRINSIC(vector_from_scalar_v1i64, "vector_from_scalar_v1i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyI64) +DEF_MIR_INTRINSIC(vector_from_scalar_v2i32, "vector_from_scalar_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyI32) +DEF_MIR_INTRINSIC(vector_from_scalar_v4i16, "vector_from_scalar_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyI16) +DEF_MIR_INTRINSIC(vector_from_scalar_v8i8, "vector_from_scalar_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyI8) +DEF_MIR_INTRINSIC(vector_from_scalar_v1u64, "vector_from_scalar_v1u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyU64) +DEF_MIR_INTRINSIC(vector_from_scalar_v2u32, "vector_from_scalar_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyU32) +DEF_MIR_INTRINSIC(vector_from_scalar_v4u16, "vector_from_scalar_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyU16) +DEF_MIR_INTRINSIC(vector_from_scalar_v8u8, "vector_from_scalar_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyU8) +DEF_MIR_INTRINSIC(vector_from_scalar_v1f64, "vector_from_scalar_v1f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyF64) +DEF_MIR_INTRINSIC(vector_from_scalar_v2f32, "vector_from_scalar_v2f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyF32) + +// vecTy2 vector_labssub(vectTy1 src2, vectTy2 src2) +// Create a widened vector by getting the abs value of subtracted arguments. +DEF_MIR_INTRINSIC(vector_labssub_low_v8i8, "vector_labssub_low_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_labssub_low_v4i16, "vector_labssub_low_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_labssub_low_v2i32, "vector_labssub_low_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_labssub_low_v8u8, "vector_labssub_low_v8u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_labssub_low_v4u16, "vector_labssub_low_v4u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_labssub_low_v2u32, "vector_labssub_low_v2u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) + +// vecTy2 vector_labssub_high(vectTy1 src2, vectTy2 src2) +// Create a widened vector by getting the abs value of subtracted high args. +DEF_MIR_INTRINSIC(vector_labssub_high_v8i8, "vector_labssub_high_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_labssub_high_v4i16, "vector_labssub_high_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_labssub_high_v2i32, "vector_labssub_high_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_labssub_high_v8u8, "vector_labssub_high_v8u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_labssub_high_v4u16, "vector_labssub_high_v4u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_labssub_high_v2u32, "vector_labssub_high_v2u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32, kArgTyV4U32) + +// vecTy2 vector_madd(vecTy2 accum, vecTy1 src1, vecTy1 src2) +// Multiply the elements of src1 and src2, then accumulate into accum. +// Elements of vecTy2 are twice as long as elements of vecTy1. +DEF_MIR_INTRINSIC(vector_madd_v2i32, "vector_madd_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_madd_v4i16, "vector_madd_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_madd_v8i8, "vector_madd_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_madd_v2u32, "vector_madd_v2u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_madd_v4u16, "vector_madd_v4u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_madd_v8u8, "vector_madd_v8u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U8, kArgTyV8U8) + +// vecTy2 vector_mull_low(vecTy1 src1, vecTy1 src2) +// Multiply the elements of src1 and src2. Elements of vecTy2 are twice as +// long as elements of vecTy1. +DEF_MIR_INTRINSIC(vector_mull_low_v2i32, "vector_mull_low_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_mull_low_v4i16, "vector_mull_low_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_mull_low_v8i8, "vector_mull_low_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_mull_low_v2u32, "vector_mull_low_v2u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_mull_low_v4u16, "vector_mull_low_v4u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_mull_low_v8u8, "vector_mull_low_v8u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) + +// vecTy2 vector_mull_high(vecTy1 src1, vecTy1 src2) +// Multiply the upper elements of src1 and src2. Elements of vecTy2 are twice +// as long as elements of vecTy1. +DEF_MIR_INTRINSIC(vector_mull_high_v2i32, "vector_mull_high_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_mull_high_v4i16, "vector_mull_high_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_mull_high_v8i8, "vector_mull_high_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_mull_high_v2u32, "vector_mull_high_v2u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_mull_high_v4u16, "vector_mull_high_v4u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_mull_high_v8u8, "vector_mull_high_v8u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) + +// vecTy vector_merge(vecTy src1, vecTy src2, int n) +// Create a vector by concatenating the high elements of src1, starting +// with the nth element, followed by the low elements of src2. +DEF_MIR_INTRINSIC(vector_merge_v2i64, "vector_merge_v2i64", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4i32, "vector_merge_v4i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8i16, "vector_merge_v8i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v16i8, "vector_merge_v16i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8, kArgTyV16I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2u64, "vector_merge_v2u64", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4u32, "vector_merge_v4u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8u16, "vector_merge_v8u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v16u8, "vector_merge_v16u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8, kArgTyV16U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2f64, "vector_merge_v2f64", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64, kArgTyV2F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4f32, "vector_merge_v4f32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32, kArgTyV4F32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v1i64, "vector_merge_v1i64", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64, kArgTyV1I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2i32, "vector_merge_v2i32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32, kArgTyV2I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4i16, "vector_merge_v4i16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16, kArgTyV4I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8i8, "vector_merge_v8i8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8, kArgTyV8I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v1u64, "vector_merge_v1u64", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64, kArgTyV1U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2u32, "vector_merge_v2u32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32, kArgTyV2U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v4u16, "vector_merge_v4u16", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16, kArgTyV4U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v8u8, "vector_merge_v8u8", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8, kArgTyV8U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v1f64, "vector_merge_v1f64", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64, kArgTyV1F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_merge_v2f32, "vector_merge_v2f32", + 1, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32, kArgTyV2F32, kArgTyI32) + +// vecTy2 vector_get_low(vecTy1 src) +// Create a vector from the low part of the source vector. +DEF_MIR_INTRINSIC(vector_get_low_v2i64, "vector_get_low_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_get_low_v4i32, "vector_get_low_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_get_low_v8i16, "vector_get_low_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_get_low_v16i8, "vector_get_low_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_get_low_v2u64, "vector_get_low_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_get_low_v4u32, "vector_get_low_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_get_low_v8u16, "vector_get_low_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_get_low_v16u8, "vector_get_low_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_get_low_v2f64, "vector_get_low_v2f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_get_low_v4f32, "vector_get_low_v4f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV4F32) + +// vecTy2 vector_get_low(vecTy1 src) +// Create a vector from the high part of the source vector. +DEF_MIR_INTRINSIC(vector_get_high_v2i64, "vector_get_high_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_get_high_v4i32, "vector_get_high_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_get_high_v8i16, "vector_get_high_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_get_high_v16i8, "vector_get_high_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_get_high_v2u64, "vector_get_high_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_get_high_v4u32, "vector_get_high_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_get_high_v8u16, "vector_get_high_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_get_high_v16u8, "vector_get_high_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_get_high_v2f64, "vector_get_high_v2f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_get_high_v4f32, "vector_get_high_v4f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV4F32) + +// scalarTy vector_get_element(vecTy src, int n) +// Get the nth element of the source vector. +DEF_MIR_INTRINSIC(vector_get_element_v2i64, "vector_get_element_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4i32, "vector_get_element_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8i16, "vector_get_element_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v16i8, "vector_get_element_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV16I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2u64, "vector_get_element_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4u32, "vector_get_element_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8u16, "vector_get_element_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV8U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v16u8, "vector_get_element_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV16U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2f64, "vector_get_element_v2f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV2F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4f32, "vector_get_element_v4f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV4F32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v1i64, "vector_get_element_v1i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV1I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2i32, "vector_get_element_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV2I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4i16, "vector_get_element_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV4I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8i8, "vector_get_element_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV8I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v1u64, "vector_get_element_v1u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV1U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2u32, "vector_get_element_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV2U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v4u16, "vector_get_element_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV4U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v8u8, "vector_get_element_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV8U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v1f64, "vector_get_element_v1f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV1F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_get_element_v2f32, "vector_get_element_v2f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV2F32, kArgTyI32) + +// vecTy vector_set_element(ScalarTy value, VecTy vec, int n) +// Set the nth element of the source vector to value. +DEF_MIR_INTRINSIC(vector_set_element_v2i64, "vector_set_element_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyI64, kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4i32, "vector_set_element_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyI32, kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8i16, "vector_set_element_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyI16, kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v16i8, "vector_set_element_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyI8, kArgTyV16I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2u64, "vector_set_element_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyU64, kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4u32, "vector_set_element_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyU32, kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8u16, "vector_set_element_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyU16, kArgTyV8U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v16u8, "vector_set_element_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyU8, kArgTyV16U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2f64, "vector_set_element_v2f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyF64, kArgTyV2F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4f32, "vector_set_element_v4f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyF32, kArgTyV4F32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v1i64, "vector_set_element_v1i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyI64, kArgTyV1I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2i32, "vector_set_element_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyI32, kArgTyV2I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4i16, "vector_set_element_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyI16, kArgTyV4I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8i8, "vector_set_element_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyI8, kArgTyV8I8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v1u64, "vector_set_element_v1u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyU64, kArgTyV1U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2u32, "vector_set_element_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyU32, kArgTyV2U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v4u16, "vector_set_element_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyU16, kArgTyV4U16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v8u8, "vector_set_element_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyU8, kArgTyV8U8, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v1f64, "vector_set_element_v1f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyF64, kArgTyV1F64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_set_element_v2f32, "vector_set_element_v2f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyF32, kArgTyV2F32, kArgTyI32) + +// vecTy2 vector_widen_low(vecTy1 src) +// Widen each element of the 64-bit argument to double size of the +// original width to a 128-bit destination vector. +DEF_MIR_INTRINSIC(vector_widen_low_v2i32, "vector_widen_low_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_widen_low_v4i16, "vector_widen_low_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_widen_low_v8i8, "vector_widen_low_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_widen_low_v2u32, "vector_widen_low_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_widen_low_v4u16, "vector_widen_low_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_widen_low_v8u8, "vector_widen_low_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8) + +// vecTy2 vector_widen_high(vecTy1 src) +// Widen each upper element of the 128-bit source vector to double size of +// the original width into a 128-bit destination vector. +DEF_MIR_INTRINSIC(vector_widen_high_v2i32, "vector_widen_high_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_widen_high_v4i16, "vector_widen_high_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_widen_high_v8i8, "vector_widen_high_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_widen_high_v2u32, "vector_widen_high_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_widen_high_v4u16, "vector_widen_high_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_widen_high_v8u8, "vector_widen_high_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8) + +// vecTy2 vector_narrow_low(vecTy1 src) +// Narrow each element of the 128-bit source vector to half of the original width, +// then write it to the lower half of the destination vector. +DEF_MIR_INTRINSIC(vector_narrow_low_v2i64, "vector_narrow_low_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_narrow_low_v4i32, "vector_narrow_low_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_narrow_low_v8i16, "vector_narrow_low_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_narrow_low_v2u64, "vector_narrow_low_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_narrow_low_v4u32, "vector_narrow_low_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_narrow_low_v8u16, "vector_narrow_low_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U16) + +// vecTy2 vector_narrow_high(vecTy1 src) +// Narrow each element of the upper source vector to half of the original width, +// concatenate with the first 64-bit arg into a 128-bit destination vector. +DEF_MIR_INTRINSIC(vector_narrow_high_v2i64, "vector_narrow_high_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV2I32, kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_narrow_high_v4i32, "vector_narrow_high_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV4I16, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_narrow_high_v8i16, "vector_narrow_high_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV8I8, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_narrow_high_v2u64, "vector_narrow_high_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV2U32, kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_narrow_high_v4u32, "vector_narrow_high_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV4U16, kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_narrow_high_v8u16, "vector_narrow_high_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV8U8, kArgTyV8U16) + +// vecTy vector_pairwise_adalp(vecTy src1, vecTy2 src2) +// Pairwise add of src2 then accumulate into src1 as dest +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8i8, "vector_pairwise_adalp_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4i16, "vector_pairwise_adalp_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v2i32, "vector_pairwise_adalp_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8u8, "vector_pairwise_adalp_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4u16, "vector_pairwise_adalp_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v2u32, "vector_pairwise_adalp_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v16i8, "vector_pairwise_adalp_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8i16, "vector_pairwise_adalp_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4i32, "vector_pairwise_adalp_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v16u8, "vector_pairwise_adalp_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v8u16, "vector_pairwise_adalp_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_pairwise_adalp_v4u32, "vector_pairwise_adalp_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV4U32) + +// vecTy2 vector_pairwise_add(vecTy1 src) +// Add pairs of elements from the source vector and put the result into the +// destination vector, whose element size is twice and the number of +// elements is half of the source vector type. +DEF_MIR_INTRINSIC(vector_pairwise_add_v4i32, "vector_pairwise_add_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8i16, "vector_pairwise_add_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v16i8, "vector_pairwise_add_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_pairwise_add_v4u32, "vector_pairwise_add_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8u16, "vector_pairwise_add_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v16u8, "vector_pairwise_add_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_pairwise_add_v2i32, "vector_pairwise_add_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v4i16, "vector_pairwise_add_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8i8, "vector_pairwise_add_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_pairwise_add_v2u32, "vector_pairwise_add_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_pairwise_add_v4u16, "vector_pairwise_add_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_pairwise_add_v8u8, "vector_pairwise_add_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV8U8) + +// vecTy vector_reverse(vecTy src) +// Create a vector by reversing the order of the elements in src. +DEF_MIR_INTRINSIC(vector_reverse_v2i64, "vector_reverse_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_reverse_v4i32, "vector_reverse_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_reverse_v8i16, "vector_reverse_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_reverse_v16i8, "vector_reverse_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_reverse_v2u64, "vector_reverse_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_reverse_v4u32, "vector_reverse_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_reverse_v8u16, "vector_reverse_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_reverse_v16u8, "vector_reverse_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_reverse_v2f64, "vector_reverse_v2f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_reverse_v4f32, "vector_reverse_v4f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_reverse_v1i64, "vector_reverse_v1i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_reverse_v2i32, "vector_reverse_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_reverse_v4i16, "vector_reverse_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_reverse_v8i8, "vector_reverse_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_reverse_v1u64, "vector_reverse_v1u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_reverse_v2u32, "vector_reverse_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_reverse_v4u16, "vector_reverse_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_reverse_v8u8, "vector_reverse_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_reverse_v1f64, "vector_reverse_v1f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_reverse_v2f32, "vector_reverse_v2f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32) + +// vector_reverse16 with 8-bit elements +DEF_MIR_INTRINSIC(vector_reverse16_v16u8, "vector_reverse16_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_reverse16_v16i8, "vector_reverse16_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_reverse16_v8u8, "vector_reverse16_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_reverse16_v8i8, "vector_reverse16_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) + +// vector_reverse64 with 8-bit elements +DEF_MIR_INTRINSIC(vector_reverse64_v16u8, "vector_reverse64_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_reverse64_v16i8, "vector_reverse64_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_reverse64_v8u8, "vector_reverse64_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_reverse64_v8i8, "vector_reverse64_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8) + +// vector_reverse64 with 16-bit elements +DEF_MIR_INTRINSIC(vector_reverse64_v8u16, "vector_reverse64_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_reverse64_v8i16, "vector_reverse64_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_reverse64_v4u16, "vector_reverse64_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_reverse64_v4i16, "vector_reverse64_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16) + +// vector_reverse64 with 32-bit elements +DEF_MIR_INTRINSIC(vector_reverse64_v4u32, "vector_reverse64_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_reverse64_v4i32, "vector_reverse64_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_reverse64_v2u32, "vector_reverse64_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_reverse64_v2i32, "vector_reverse64_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32) + +// vecTy2 vector_shift_narrow_low(vecTy1 src, const int n) +// Shift each element in the vector right by n, narrow each element to half +// of the original width (truncating), then write the result to the lower +// half of the destination vector. +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v2i64, "vector_shr_narrow_low_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v4i32, "vector_shr_narrow_low_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v8i16, "vector_shr_narrow_low_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I16, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v2u64, "vector_shr_narrow_low_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U64, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v4u32, "vector_shr_narrow_low_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U32, kArgTyI32) +DEF_MIR_INTRINSIC(vector_shr_narrow_low_v8u16, "vector_shr_narrow_low_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U16, kArgTyI32) + +// scalarTy vector_sum(vecTy src) +// Sum all of the elements in the vector into a scalar. +DEF_MIR_INTRINSIC(vector_sum_v2i64, "vector_sum_v2i64", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_sum_v4i32, "vector_sum_v4i32", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_sum_v8i16, "vector_sum_v8i16", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_sum_v16i8, "vector_sum_v16i8", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_sum_v2u64, "vector_sum_v2u64", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_sum_v4u32, "vector_sum_v4u32", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_sum_v8u16, "vector_sum_v8u16", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_sum_v16u8, "vector_sum_v16u8", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_sum_v2f64, "vector_sum_v2f64", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_sum_v4f32, "vector_sum_v4f32", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_sum_v1i64, "vector_sum_v1i64", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI64, + kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_sum_v2i32, "vector_sum_v2i32", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI32, + kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_sum_v4i16, "vector_sum_v4i16", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI16, + kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_sum_v8i8, "vector_sum_v8i8", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyI8, + kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_sum_v1u64, "vector_sum_v1u64", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU64, + kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_sum_v2u32, "vector_sum_v2u32", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU32, + kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_sum_v4u16, "vector_sum_v4u16", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU16, + kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_sum_v8u8, "vector_sum_v8u8", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyU8, + kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_sum_v1f64, "vector_sum_v1f64", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF64, + kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_sum_v2f32, "vector_sum_v2f32", + 2, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyF32, + kArgTyV2F32) + +// vecTy table_lookup(vecTy tbl, vecTy idx) +// Performs a table vector lookup. +DEF_MIR_INTRINSIC(vector_table_lookup_v2i64, "vector_table_lookup_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_table_lookup_v4i32, "vector_table_lookup_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_table_lookup_v8i16, "vector_table_lookup_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_table_lookup_v16i8, "vector_table_lookup_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_table_lookup_v2u64, "vector_table_lookup_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_table_lookup_v4u32, "vector_table_lookup_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_table_lookup_v8u16, "vector_table_lookup_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_table_lookup_v16u8, "vector_table_lookup_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_table_lookup_v2f64, "vector_table_lookup_v2f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyV2F64, kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_table_lookup_v4f32, "vector_table_lookup_v4f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyV4F32, kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_table_lookup_v1i64, "vector_table_lookup_v1i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyV1I64, kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_table_lookup_v2i32, "vector_table_lookup_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_table_lookup_v4i16, "vector_table_lookup_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_table_lookup_v8i8, "vector_table_lookup_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_table_lookup_v1u64, "vector_table_lookup_v1u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyV1U64, kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_table_lookup_v2u32, "vector_table_lookup_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_table_lookup_v4u16, "vector_table_lookup_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_table_lookup_v8u8, "vector_table_lookup_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_table_lookup_v1f64, "vector_table_lookup_v1f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyV1F64, kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_table_lookup_v2f32, "vector_table_lookup_v2f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyV2F32, kArgTyV2F32) + +// vecArrTy vector_zip(vecTy a, vecTy b) +// Interleave the upper half of elements from a and b into the destination +// vector. +DEF_MIR_INTRINSIC(vector_zip_v2i32, "vector_zip_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_zip_v4i16, "vector_zip_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_zip_v8i8, "vector_zip_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_zip_v2u32, "vector_zip_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV2U32, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_zip_v4u16, "vector_zip_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_zip_v8u8, "vector_zip_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_zip_v2f32, "vector_zip_v2f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT | INTRNISSPECIAL, + kArgTyAgg, kArgTyV2F32, kArgTyV2F32) + +// vecTy vector_load(scalarTy *ptr) +// Load the elements pointed to by ptr into a vector. +DEF_MIR_INTRINSIC(vector_load_v2i64, "vector_load_v2i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4i32, "vector_load_v4i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8i16, "vector_load_v8i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v16i8, "vector_load_v16i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV16I8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2u64, "vector_load_v2u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4u32, "vector_load_v4u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8u16, "vector_load_v8u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v16u8, "vector_load_v16u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV16U8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2f64, "vector_load_v2f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2F64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4f32, "vector_load_v4f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4F32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v1i64, "vector_load_v1i64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV1I64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2i32, "vector_load_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2I32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4i16, "vector_load_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4I16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8i8, "vector_load_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8I8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v1u64, "vector_load_v1u64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV1U64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2u32, "vector_load_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2U32, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v4u16, "vector_load_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV4U16, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v8u8, "vector_load_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV8U8, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v1f64, "vector_load_v1f64", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV1F64, + kArgTyPtr) +DEF_MIR_INTRINSIC(vector_load_v2f32, "vector_load_v2f32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNLOADMEM | INTRNNOSIDEEFFECT, kArgTyV2F32, + kArgTyPtr) + +// void vector_store(scalarTy *ptr, vecTy src) +// Store the elements from src into the memory pointed to by ptr. +DEF_MIR_INTRINSIC(vector_store_v2i64, "vector_store_v2i64", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2I64) +DEF_MIR_INTRINSIC(vector_store_v4i32, "vector_store_v4i32", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_store_v8i16, "vector_store_v8i16", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_store_v16i8, "vector_store_v16i8", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_store_v2u64, "vector_store_v2u64", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2U64) +DEF_MIR_INTRINSIC(vector_store_v4u32, "vector_store_v4u32", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4U32) +DEF_MIR_INTRINSIC(vector_store_v8u16, "vector_store_v8u16", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_store_v16u8, "vector_store_v16u8", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_store_v2f64, "vector_store_v2f64", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2F64) +DEF_MIR_INTRINSIC(vector_store_v4f32, "vector_store_v4f32", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4F32) +DEF_MIR_INTRINSIC(vector_store_v1i64, "vector_store_v1i64", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV1I64) +DEF_MIR_INTRINSIC(vector_store_v2i32, "vector_store_v2i32", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_store_v4i16, "vector_store_v4i16", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_store_v8i8, "vector_store_v8i8", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_store_v1u64, "vector_store_v1u64", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV1U64) +DEF_MIR_INTRINSIC(vector_store_v2u32, "vector_store_v2u32", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2U32) +DEF_MIR_INTRINSIC(vector_store_v4u16, "vector_store_v4u16", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_store_v8u8, "vector_store_v8u8", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_store_v1f64, "vector_store_v1f64", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV1F64) +DEF_MIR_INTRINSIC(vector_store_v2f32, "vector_store_v2f32", DEFAULT_NUM_INSN, INTRNISVECTOR, + kArgTyVoid, kArgTyPtr, kArgTyV2F32) + +// vecTy vector_subl_low(vecTy src1, vecTy src2) +// Subtract each element of the source vector to second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subl_low_v8i8, "vector_subl_low_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I8, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_subl_low_v4i16, "vector_subl_low_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I16, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_subl_low_v2i32, "vector_subl_low_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I32, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_subl_low_v8u8, "vector_subl_low_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U8, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_subl_low_v4u16, "vector_subl_low_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U16, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_subl_low_v2u32, "vector_subl_low_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U32, kArgTyV2U32) + +// vecTy vector_subl_high(vecTy src1, vecTy src2) +// Subtract each element of the source vector to upper half of second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subl_high_v8i8, "vector_subl_high_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV16I8, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_subl_high_v4i16, "vector_subl_high_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV8I16, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_subl_high_v2i32, "vector_subl_high_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV4I32, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_subl_high_v8u8, "vector_subl_high_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV16U8, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_subl_high_v4u16, "vector_subl_high_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV8U16, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_subl_high_v2u32, "vector_subl_high_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV4U32, kArgTyV4U32) + +// vecTy vector_subw_low(vecTy src1, vecTy src2) +// Subtract each element of the source vector to second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subw_low_v8i8, "vector_subw_low_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV8I8) +DEF_MIR_INTRINSIC(vector_subw_low_v4i16, "vector_subw_low_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV4I16) +DEF_MIR_INTRINSIC(vector_subw_low_v2i32, "vector_subw_low_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV2I32) +DEF_MIR_INTRINSIC(vector_subw_low_v8u8, "vector_subw_low_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV8U8) +DEF_MIR_INTRINSIC(vector_subw_low_v4u16, "vector_subw_low_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV4U16) +DEF_MIR_INTRINSIC(vector_subw_low_v2u32, "vector_subw_low_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV2U32) + +// vecTy vector_subw_high(vecTy src1, vecTy src2) +// Subtract each element of the source vector to upper half of second source +// widen the result into the destination vector. +DEF_MIR_INTRINSIC(vector_subw_high_v8i8, "vector_subw_high_v8i8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8I16, + kArgTyV8I16, kArgTyV16I8) +DEF_MIR_INTRINSIC(vector_subw_high_v4i16, "vector_subw_high_v4i16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4I32, + kArgTyV4I32, kArgTyV8I16) +DEF_MIR_INTRINSIC(vector_subw_high_v2i32, "vector_subw_high_v2i32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2I64, + kArgTyV2I64, kArgTyV4I32) +DEF_MIR_INTRINSIC(vector_subw_high_v8u8, "vector_subw_high_v8u8", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV8U16, + kArgTyV8U16, kArgTyV16U8) +DEF_MIR_INTRINSIC(vector_subw_high_v4u16, "vector_subw_high_v4u16", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV4U32, + kArgTyV4U32, kArgTyV8U16) +DEF_MIR_INTRINSIC(vector_subw_high_v2u32, "vector_subw_high_v2u32", + DEFAULT_NUM_INSN, INTRNISVECTOR | INTRNISPURE | INTRNNOSIDEEFFECT, kArgTyV2U64, + kArgTyV2U64, kArgTyV4U32) diff --git a/src/mapleall/maple_ir/include/intrinsics.def b/src/mapleall/maple_ir/include/intrinsics.def new file mode 100644 index 0000000000000000000000000000000000000000..097769a147d8e1dc949adb1537d114da1ccdc4ed --- /dev/null +++ b/src/mapleall/maple_ir/include/intrinsics.def @@ -0,0 +1,155 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// DEF_MIR_INTRINSIC(STR, NAME, NUM_INSN, INTRN_CLASS, RETURN_TYPE, ARG0, ARG1, ARG2, ARG3, ARG4, ARG5) +#define DEFAULT_NUM_INSN 3 +DEF_MIR_INTRINSIC(UNDEFINED,\ + nullptr, DEFAULT_NUM_INSN, kIntrnUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(DEX_ATOMIC_INC,\ + "__dex_ainc", DEFAULT_NUM_INSN, kIntrnIsAtomic, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(DEX_ATOMIC_DEC,\ + "__dex_adec", DEFAULT_NUM_INSN, kIntrnIsAtomic, kArgTyI32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_ATOMIC_EXCHANGE_PTR,\ + "__mpl_atomic_exchange_ptr", DEFAULT_NUM_INSN, kIntrnIsAtomic, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_CLINIT_CHECK,\ + "__mpl_clinit_check", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyVoid, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_PROF_COUNTER_INC,\ + "__mpl_prof_counter_inc", DEFAULT_NUM_INSN, INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyVoid, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_CLEAR_STACK,\ + "__mpl_clear_stack", DEFAULT_NUM_INSN, kIntrnUndef, kArgTyVoid, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_GET_VTAB_FUNC,\ + "MCC_getFuncPtrFromVtab", DEFAULT_NUM_INSN, kIntrnUndef, kArgTyA64, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_STATIC_OFFSET_TAB,\ + "__mpl_read_static_offset", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyDynany, kArgTyDynany, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY,\ + "__mpl_const_offset", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyA32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY2,\ + "__mpl_const_offset2", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyA32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY_LAZY,\ + "__mpl_const_offset_lazy", DEFAULT_NUM_INSN, INTRNNOSIDEEFFECT, kArgTyA32, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY_VTAB_LAZY,\ + "__mpl_const_offset_vtab_lazy", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_OVTABLE_ENTRY_FIELD_LAZY,\ + "__mpl_const_offset_field_lazy", DEFAULT_NUM_INSN, INTRNISPURE, kArgTyA32, kArgTyDynany, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_BOUNDARY_CHECK,\ + "", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyU1, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_READ_ARRAYCLASS_CACHE_ENTRY,\ + "__mpl_const_arrayclass_cache", DEFAULT_NUM_INSN, kIntrnUndef, kArgTyPtr, kArgTyU32, kArgTyDynany, kArgTyDynany, kArgTyUndef, kArgTyUndef, kArgTyUndef) + +// start of RC Intrinsics with one parameters +DEF_MIR_INTRINSIC(MCCSetPermanent,\ + "MCC_SetObjectPermanent", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCIncRef,\ + "MCC_IncRef_NaiveRCFast", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCDecRef,\ + "MCC_DecRef_NaiveRCFast", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCDecRefReset,\ + "MCC_ClearLocalStackRef", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyPtr) +DEF_MIR_INTRINSIC(MCCLoadRefSVol,\ + "MCC_LoadVolatileStaticField", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadRefS,\ + "MCC_LoadRefStatic", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) +DEF_MIR_INTRINSIC(MCCSetObjectPermanent,\ + "MCC_SetObjectPermanent", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) + +// start of RC Intrinsics with two parameters +DEF_MIR_INTRINSIC(MCCCheck,\ + "MCC_CheckRefCount", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyU32) +DEF_MIR_INTRINSIC(MCCCheckArrayStore,\ + "MCC_Reflect_Check_Arraystore", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCIncDecRef,\ + "MCC_IncDecRef_NaiveRCFast", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCIncDecRefReset,\ + "MCC_IncDecRefReset", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyPtr) +DEF_MIR_INTRINSIC(MCCDecRefResetPair,\ + "MCC_DecRefResetPair", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyPtr, kArgTyPtr) +DEF_MIR_INTRINSIC(MCCLoadWeakVol,\ + "MCC_LoadVolatileWeakField", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadWeak,\ + "MCC_LoadWeakField", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadRef,\ + "MCC_LoadRefField_NaiveRCFast", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCLoadRefVol,\ + "MCC_LoadVolatileField", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteReferent,\ + "MCC_WriteReferent", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVolNoInc,\ + "MCC_WriteVolatileStaticFieldNoInc", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVolNoDec,\ + "MCC_WriteVolatileStaticFieldNoDec", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVolNoRC,\ + "MCC_WriteVolatileStaticFieldNoRC", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSVol,\ + "MCC_WriteVolatileStaticField", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSNoInc,\ + "MCC_WriteRefFieldStaticNoInc", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSNoDec,\ + "MCC_WriteRefFieldStaticNoDec", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteSNoRC,\ + "MCC_WriteRefFieldStaticNoRC", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteS,\ + "MCC_WriteRefFieldStatic", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef) + +// start of RC intrinsics with three parameters +DEF_MIR_INTRINSIC(MCCWriteVolNoInc,\ + "MCC_WriteVolatileFieldNoInc", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVolNoDec,\ + "MCC_WriteVolatileFieldNoDec", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVolNoRC,\ + "MCC_WriteVolatileFieldNoRC", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVol,\ + "MCC_WriteVolatileField", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteNoInc,\ + "MCC_WriteRefFieldNoInc", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteNoDec,\ + "MCC_WriteRefFieldNoDec", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteNoRC,\ + "MCC_WriteRefFieldNoRC", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWrite,\ + "MCC_WriteRefField", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteVolWeak,\ + "MCC_WriteVolatileWeakField", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MCCWriteWeak,\ + "MCC_WriteWeakField", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef, kArgTyRef, kArgTyRef) + +DEF_MIR_INTRINSIC(MPL_CLEANUP_LOCALREFVARS,\ + "__mpl_cleanup_localrefvars", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyUndef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MPL_CLEANUP_LOCALREFVARS_SKIP,\ + "__mpl_cleanup_localrefvars_skip", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyUndef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) +DEF_MIR_INTRINSIC(MPL_MEMSET_LOCALVAR,\ + "", DEFAULT_NUM_INSN, kIntrnUndef, kArgTyPtr, kArgTyU32, kArgTyU8, kArgTyU32, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_SET_CLASS,\ + "", DEFAULT_NUM_INSN, kIntrnUndef, kArgTyPtr, kArgTyPtr, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) +DEF_MIR_INTRINSIC(MPL_CLEANUP_NORETESCOBJS,\ + "__mpl_cleanup_noretescobjs", DEFAULT_NUM_INSN, INTRNISJAVA | INTRNNOSIDEEFFECT | INTRNISSPECIAL, kArgTyUndef, kArgTyRef, kArgTyRef,\ + kArgTyRef, kArgTyRef, kArgTyRef, kArgTyRef) + +// start of GC Intrinsics +DEF_MIR_INTRINSIC(MCCGCCheck,\ + "MCC_CheckObjAllocated", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) + +// start of Profile Intrinsics +DEF_MIR_INTRINSIC(MCCSaveProf,\ + "MCC_SaveProfile", DEFAULT_NUM_INSN, INTRNISRC | INTRNNOSIDEEFFECT, kArgTyVoid, kArgTyRef) + +#include "intrinsic_java.def" +#include "simplifyintrinsics.def" +#include "intrinsic_c.def" +#include "intrinsic_js.def" +#include "intrinsic_js_eng.def" +#include "dex2mpl/dexintrinsic.def" +#include "intrinsic_dai.def" +#include "intrinsic_vector.def" +DEF_MIR_INTRINSIC(LAST,\ + nullptr, DEFAULT_NUM_INSN, kIntrnUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef, kArgTyUndef) diff --git a/src/mapleall/maple_ir/include/intrinsics.h b/src/mapleall/maple_ir/include/intrinsics.h new file mode 100644 index 0000000000000000000000000000000000000000..91e4249dbe6dabdc4fa8cd6b68f9d7d0faef0032 --- /dev/null +++ b/src/mapleall/maple_ir/include/intrinsics.h @@ -0,0 +1,222 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_INTRINSICS_H +#define MAPLE_IR_INCLUDE_INTRINSICS_H +#include "prim_types.h" +#include "intrinsic_op.h" + +namespace maple { +enum IntrinProperty { + kIntrnUndef, + kIntrnIsJs, + kIntrnIsJsUnary, + kIntrnIsJsBinary, + kIntrnIsJava, + kIntrnIsJavaUnary, + kIntrnIsJavaBinary, + kIntrnIsReturnStruct, + kIntrnNoSideEffect, // read only + kIntrnIsLoadMem, + kIntrnIsPure, + kIntrnNeverReturn, + kIntrnIsAtomic, + kIntrnIsRC, + kIntrnIsSpecial, + kIntrnIsVector, + // the opnd is marked as "WRITE" but not "READ" => write only + // the opnd is marked as "READ" but not "WRITE" => read only + // the opnd is marked with nothing but has side effect => write & read + // the opnd is marked as "WRITE" and "READ" => write & read + kIntrnWriteFirstOpnd, + kIntrnWriteSecondOpnd, + kIntrnWriteThirdOpnd, + kIntrnWriteFourthOpnd, + kIntrnWriteFifthOpnd, + kIntrnWriteSixthOpnd, + kIntrnReadFirstOpnd, + kIntrnReadSecondOpnd, + kIntrnReadThirdOpnd, + kIntrnReadFourthOpnd, + kIntrnReadFifthOpnd, + kIntrnReadSixthOpnd, +}; + +enum IntrinArgType { + kArgTyUndef, + kArgTyVoid, + kArgTyI8, + kArgTyI16, + kArgTyI32, + kArgTyI64, + kArgTyU8, + kArgTyU16, + kArgTyU32, + kArgTyU64, + kArgTyU1, + kArgTyPtr, + kArgTyRef, + kArgTyA32, + kArgTyA64, + kArgTyF32, + kArgTyF64, + kArgTyF128, + kArgTyC64, + kArgTyC128, + kArgTyAgg, + kArgTyV2I64, + kArgTyV4I32, + kArgTyV8I16, + kArgTyV16I8, + kArgTyV2U64, + kArgTyV4U32, + kArgTyV8U16, + kArgTyV16U8, + kArgTyV2F64, + kArgTyV4F32, + kArgTyV1I64, + kArgTyV2I32, + kArgTyV4I16, + kArgTyV8I8, + kArgTyV1U64, + kArgTyV2U32, + kArgTyV4U16, + kArgTyV8U8, + kArgTyV1F64, + kArgTyV2F32, +#ifdef DYNAMICLANG + kArgTyDynany, + kArgTyDynu32, + kArgTyDyni32, + kArgTyDynundef, + kArgTyDynnull, + kArgTyDynhole, + kArgTyDynbool, + kArgTyDynf64, + kArgTyDynf32, + kArgTySimplestr, + kArgTyDynstr, + kArgTySimpleobj, + kArgTyDynobj +#endif +}; + +constexpr uint32 INTRNISJS = 1U << kIntrnIsJs; +constexpr uint32 INTRNISJSUNARY = 1U << kIntrnIsJsUnary; +constexpr uint32 INTRNISJSBINARY = 1U << kIntrnIsJsBinary; +constexpr uint32 INTRNISJAVA = 1U << kIntrnIsJava; +constexpr uint32 INTRNNOSIDEEFFECT = 1U << kIntrnNoSideEffect; +constexpr uint32 INTRNRETURNSTRUCT = 1U << kIntrnIsReturnStruct; +constexpr uint32 INTRNLOADMEM = 1U << kIntrnIsLoadMem; +constexpr uint32 INTRNISPURE = 1U << kIntrnIsPure; +constexpr uint32 INTRNNEVERRETURN = 1U << kIntrnNeverReturn; +constexpr uint32 INTRNATOMIC = 1U << kIntrnIsAtomic; +constexpr uint32 INTRNISRC = 1U << kIntrnIsRC; +constexpr uint32 INTRNISSPECIAL = 1U << kIntrnIsSpecial; +constexpr uint32 INTRNISVECTOR = 1U << kIntrnIsVector; +constexpr uint32 INTRNWRITEFIRSTOPND = 1U << kIntrnWriteFirstOpnd; +constexpr uint32 INTRNWRITESECONDOPND = 1U << kIntrnWriteSecondOpnd; +constexpr uint32 INTRNWRITETHIRDOPND = 1U << kIntrnWriteThirdOpnd; +constexpr uint32 INTRNWRITEFOURTHOPND = 1U << kIntrnWriteFourthOpnd; +constexpr uint32 INTRNWRITEFIFTHOPND = 1U << kIntrnWriteFifthOpnd; +constexpr uint32 INTRNWRITESIXTHOPND = 1U << kIntrnWriteSixthOpnd; +constexpr uint32 INTRNREADFIRSTOPND = 1U << kIntrnReadFirstOpnd; +constexpr uint32 INTRNREADSECONDOPND = 1U << kIntrnReadSecondOpnd; +constexpr uint32 INTRNREADTHIRDOPND = 1U << kIntrnReadThirdOpnd; +constexpr uint32 INTRNREADFOURTHOPND = 1U << kIntrnReadFourthOpnd; +constexpr uint32 INTRNREADFIFTHOPND = 1U << kIntrnReadFifthOpnd; +constexpr uint32 INTRNREADSIXTHOPND = 1U << kIntrnReadSixthOpnd; +class MIRType; // circular dependency exists, no other choice +class MIRModule; // circular dependency exists, no other choice +struct IntrinDesc { + static constexpr int kMaxArgsNum = 7; + const char *name; + size_t numInsn; + uint32 properties; + IntrinArgType argTypes[1 + kMaxArgsNum]; // argTypes[0] is the return type + bool IsJS() const { + return static_cast(properties & INTRNISJS); + } + + bool IsJava() const { + return static_cast(properties & INTRNISJAVA); + } + + bool IsJsUnary() const { + return static_cast(properties & INTRNISJSUNARY); + } + + bool IsJsBinary() const { + return static_cast(properties & INTRNISJSBINARY); + } + + bool IsJsOp() const { + return static_cast(properties & INTRNISJSUNARY) || static_cast(properties & INTRNISJSBINARY); + } + + bool IsLoadMem() const { + return static_cast(properties & INTRNLOADMEM); + } + + bool IsJsReturnStruct() const { + return static_cast(properties & INTRNRETURNSTRUCT); + } + + bool IsPure() const { + return static_cast(properties & INTRNISPURE); + } + + bool IsNeverReturn() const { + return static_cast(properties & INTRNNEVERRETURN); + } + + bool IsAtomic() const { + return static_cast(properties & INTRNATOMIC); + } + + bool IsRC() const { + return static_cast(properties & INTRNISRC); + } + + bool IsSpecial() const { + return static_cast(properties & INTRNISSPECIAL); + } + + bool HasNoSideEffect() const { + return static_cast(properties & INTRNNOSIDEEFFECT); + } + + bool IsVectorOp() const { + return static_cast(properties & INTRNISVECTOR); + } + + size_t GetNumInsn() const { + return numInsn; + } + + bool IsNthOpndMarkedToWrite(uint32 opndIdx) const; + bool IsNthOpndMarkedToRead(uint32 opndIdx) const; + bool ReadNthOpnd(uint32 opndIdx) const; + bool WriteNthOpnd(uint32 opndIdx) const; + MIRType *GetReturnType() const; + MIRType *GetArgType(uint32 index) const; + MIRType *GetTypeFromArgTy(IntrinArgType argType) const; + static MIRType *jsValueType; + static MIRModule *mirModule; + static void InitMIRModule(MIRModule *mod); + static MIRType *GetOrCreateJSValueType(); + static IntrinDesc intrinTable[INTRN_LAST + 1]; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_INTRINSICS_H diff --git a/src/mapleall/maple_ir/include/ir_safe_cast_traits.def b/src/mapleall/maple_ir/include/ir_safe_cast_traits.def new file mode 100644 index 0000000000000000000000000000000000000000..14ed1a367b4ea5f8b738103b6412af97f7ae2988 --- /dev/null +++ b/src/mapleall/maple_ir/include/ir_safe_cast_traits.def @@ -0,0 +1,247 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#include +#include "opcode_info.h" + +namespace maple { +#ifdef LOAD_SAFE_CAST_FOR_MIR_CONST +#undef LOAD_SAFE_CAST_FOR_MIR_CONST +REGISTER_SAFE_CAST(MIRIntConst, from.GetKind() == kConstInt); +REGISTER_SAFE_CAST(MIRAddrofConst, from.GetKind() == kConstAddrof); +REGISTER_SAFE_CAST(MIRAddroffuncConst, from.GetKind() == kConstAddrofFunc); +REGISTER_SAFE_CAST(MIRLblConst, from.GetKind() == kConstLblConst); +REGISTER_SAFE_CAST(MIRStrConst, from.GetKind() == kConstStrConst); +REGISTER_SAFE_CAST(MIRStr16Const, from.GetKind() == kConstStr16Const); +REGISTER_SAFE_CAST(MIRFloatConst, from.GetKind() == kConstFloatConst); +REGISTER_SAFE_CAST(MIRDoubleConst, from.GetKind() == kConstDoubleConst); +REGISTER_SAFE_CAST(MIRFloat128Const, from.GetKind() == kConstFloat128Const); +REGISTER_SAFE_CAST(MIRAggConst, from.GetKind() == kConstAggConst); +REGISTER_SAFE_CAST(MIRStConst, from.GetKind() == kConstStConst); +#endif + +#ifdef LOAD_SAFE_CAST_FOR_MIR_TYPE +#undef LOAD_SAFE_CAST_FOR_MIR_TYPE +REGISTER_SAFE_CAST(MIRPtrType, from.GetKind() == kTypePointer); +REGISTER_SAFE_CAST(MIRArrayType, from.GetKind() == kTypeArray); +REGISTER_SAFE_CAST(MIRFarrayType, from.GetKind() == kTypeFArray || + instance_of(from)); +REGISTER_SAFE_CAST(MIRStructType, from.GetKind() == kTypeStruct || + from.GetKind() == kTypeStructIncomplete || + from.GetKind() == kTypeUnion || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(MIRJarrayType, from.GetKind() == kTypeJArray); +REGISTER_SAFE_CAST(MIRClassType, from.GetKind() == kTypeClass || + from.GetKind() == kTypeClassIncomplete); +REGISTER_SAFE_CAST(MIRInterfaceType, from.GetKind() == kTypeInterface || + from.GetKind() == kTypeInterfaceIncomplete); +REGISTER_SAFE_CAST(MIRBitFieldType, from.GetKind() == kTypeBitField); +REGISTER_SAFE_CAST(MIRFuncType, from.GetKind() == kTypeFunction); +REGISTER_SAFE_CAST(MIRTypeByName, from.GetKind() == kTypeByName); +REGISTER_SAFE_CAST(MIRTypeParam, from.GetKind() == kTypeParam); +REGISTER_SAFE_CAST(MIRInstantVectorType, from.GetKind() == kTypeInstantVector); +REGISTER_SAFE_CAST(MIRGenericInstantType, from.GetKind() == kTypeGenericInstant); +#endif + +#ifdef LOAD_SAFE_CAST_FOR_MIR_NODE +#undef LOAD_SAFE_CAST_FOR_MIR_NODE +REGISTER_SAFE_CAST(UnaryNode, from.GetOpCode() == OP_abs || + from.GetOpCode() == OP_bnot || + from.GetOpCode() == OP_lnot || + from.GetOpCode() == OP_neg || + from.GetOpCode() == OP_recip || + from.GetOpCode() == OP_sqrt || + from.GetOpCode() == OP_alloca || + from.GetOpCode() == OP_malloc || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(TypeCvtNode, from.GetOpCode() == OP_ceil || + from.GetOpCode() == OP_cvt || + from.GetOpCode() == OP_floor || + from.GetOpCode() == OP_round || + from.GetOpCode() == OP_trunc || + instance_of(from)); +REGISTER_SAFE_CAST(RetypeNode, from.GetOpCode() == OP_retype); +REGISTER_SAFE_CAST(ExtractbitsNode, from.GetOpCode() == OP_extractbits || + from.GetOpCode() == OP_sext || + from.GetOpCode() == OP_zext); +REGISTER_SAFE_CAST(GCMallocNode, from.GetOpCode() == OP_gcmalloc || + from.GetOpCode() = OP_gcpermalloc); +REGISTER_SAFE_CAST(JarrayMallocNode, from.GetOpCode() == OP_gcmallocjarray || + from.GetOpCode() = OP_gcpermallocjarray); +REGISTER_SAFE_CAST(IreadNode, from.GetOpCode() == OP_iread || + from.GetOpCode() = OP_iaddrof); +REGISTER_SAFE_CAST(IreadoffNode, from.GetOpCode() == OP_ireadoff); +REGISTER_SAFE_CAST(IreadFPoffNode, from.GetOpCode() == OP_ireadfpoff); +REGISTER_SAFE_CAST(BinaryNode, from.GetOpCode() == OP_add || + from.GetOpCode() == OP_sub || + from.GetOpCode() == OP_mul || + from.GetOpCode() == OP_div || + from.GetOpCode() == OP_rem || + from.GetOpCode() == OP_ashr || + from.GetOpCode() == OP_lshr || + from.GetOpCode() == OP_shl || + from.GetOpCode() == OP_max || + from.GetOpCode() == OP_min || + from.GetOpCode() == OP_band || + from.GetOpCode() == OP_bior || + from.GetOpCode() == OP_bxor || + from.GetOpCode() == OP_CG_array_elem_add || + from.GetOpCode() == OP_land || + from.GetOpCode() == OP_lior || + from.GetOpCode() == OP_cand || + from.GetOpCode() == OP_cior || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(CompareNode, from.GetOpCode() == OP_eq || + from.GetOpCode() = OP_ge || + from.GetOpCode() = OP_gt || + from.GetOpCode() = OP_le || + from.GetOpCode() = OP_lt || + from.GetOpCode() = OP_ne || + from.GetOpCode() = OP_cmp || + from.GetOpCode() = OP_cmpl || + from.GetOpCode() = OP_cmpg); +REGISTER_SAFE_CAST(DepositbitsNode, from.GetOpCode() == OP_depositbits); +REGISTER_SAFE_CAST(ResolveFuncNode, from.GetOpCode() == OP_resolveinterfacefunc || + from.GetOpCode() == OP_resolvevirtualfunc); +REGISTER_SAFE_CAST(TernaryNode, from.GetOpCode() == OP_select); +REGISTER_SAFE_CAST(NaryNode, instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(IntrinsicopNode, from.GetOpCode() == OP_intrinsicop || + from.GetOpCode() == OP_intrinsicopwithtype); +REGISTER_SAFE_CAST(ConstvalNode, from.GetOpCode() == OP_constval); +REGISTER_SAFE_CAST(ConststrNode, from.GetOpCode() == OP_conststr); +REGISTER_SAFE_CAST(Conststr16Node, from.GetOpCode() == OP_conststr16); +REGISTER_SAFE_CAST(SizeoftypeNode, from.GetOpCode() == OP_sizeoftype); +REGISTER_SAFE_CAST(FieldsDistNode, from.GetOpCode() == OP_fieldsdist); +REGISTER_SAFE_CAST(ArrayNode, from.GetOpCode() == OP_array); +REGISTER_SAFE_CAST(AddrofNode, from.GetOpCode() == OP_dread || + from.GetOpCode() == OP_addrof); +REGISTER_SAFE_CAST(RegreadNode, from.GetOpCode() == OP_regread); +REGISTER_SAFE_CAST(AddroffuncNode, from.GetOpCode() == OP_addroffunc); +REGISTER_SAFE_CAST(AddroflabelNode, from.GetOpCode() == OP_addroflabel); +REGISTER_SAFE_CAST(StmtNode, from.GetOpCode() == OP_finally || + from.GetOpCode() == OP_cleanuptry || + from.GetOpCode() == OP_endtry || + from.GetOpCode() == OP_retsub || + from.GetOpCode() == OP_membaracquire || + from.GetOpCode() == OP_membarrelease || + from.GetOpCode() == OP_membarstoreload || + from.GetOpCode() == OP_membarstorestore || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(IassignNode, from.GetOpCode() == OP_iassign); +REGISTER_SAFE_CAST(GotoNode, from.GetOpCode() == OP_goto || + from.GetOpCode() == OP_gosub); +REGISTER_SAFE_CAST(JsTryNode, from.GetOpCode() == OP_jstry); +REGISTER_SAFE_CAST(TryNode, from.GetOpCode() == OP_try); +REGISTER_SAFE_CAST(CatchNode, from.GetOpCode() == OP_catch); +REGISTER_SAFE_CAST(SwitchNode, from.GetOpCode() == OP_switch); +REGISTER_SAFE_CAST(MultiwayNode, from.GetOpCode() == OP_multiway); +REGISTER_SAFE_CAST(UnaryStmtNode, from.GetOpCode() == OP_eval || + from.GetOpCode() == OP_throw || + from.GetOpCode() == OP_free || + from.GetOpCode() == OP_decref || + from.GetOpCode() == OP_incref || + from.GetOpCode() == OP_decrefreset || + (kOpcodeInfo.IsAssertNonnull(from.GetOpCode()) && + !kOpcodeInfo.IsCallAssertNonnull(from.GetOpCode())) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(CallAssertNonnullStmtNode, from.GetOpCode() == OP_callassertnonnull); +REGISTER_SAFE_CAST(DassignNode, from.GetOpCode() == OP_dassign || + from.GetOpCode() == OP_maydassign); +REGISTER_SAFE_CAST(RegassignNode, from.GetOpCode() == OP_regassign); +REGISTER_SAFE_CAST(CondGotoNode, from.GetOpCode() == OP_brtrue || + from.GetOpCode() == OP_brfalse); +REGISTER_SAFE_CAST(RangeGotoNode, from.GetOpCode() == OP_rangegoto); +REGISTER_SAFE_CAST(BlockNode, from.GetOpCode() == OP_block); +REGISTER_SAFE_CAST(IfStmtNode, from.GetOpCode() == OP_if); +REGISTER_SAFE_CAST(WhileStmtNode, from.GetOpCode() == OP_while || + from.GetOpCode() == OP_dowhile); +REGISTER_SAFE_CAST(DoloopNode, from.GetOpCode() == OP_doloop); +REGISTER_SAFE_CAST(ForeachelemNode, from.GetOpCode() == OP_foreachelem); +REGISTER_SAFE_CAST(BinaryStmtNode, from.GetOpCode() == OP_assertge || + from.GetOpCode() == OP_assertlt || + instance_of(from)); +REGISTER_SAFE_CAST(IassignoffNode, from.GetOpCode() == OP_iassignoff); +REGISTER_SAFE_CAST(IassignFPoffNode, from.GetOpCode() == OP_iassignfpoff); +REGISTER_SAFE_CAST(NaryStmtNode, from.GetOpCode() == OP_return || + from.GetOpCode() == OP_syncenter || + from.GetOpCode() == OP_syncexit || + instance_of(from) || + instance_of(from) || + instance_of(from)); +REGISTER_SAFE_CAST(CallNode, from.GetOpCode() == OP_call || + from.GetOpCode() == OP_virtualcall || + from.GetOpCode() == OP_superclasscall || + from.GetOpCode() == OP_interfacecall || + from.GetOpCode() == OP_customcall || + from.GetOpCode() == OP_polymorphiccall || + from.GetOpCode() == OP_interfaceicall || + from.GetOpCode() == OP_virtualicall || + from.GetOpCode() == OP_callassigned || + from.GetOpCode() == OP_virtualcallassigned || + from.GetOpCode() == OP_superclasscallassigned || + from.GetOpCode() == OP_interfacecallassigned || + from.GetOpCode() == OP_customcallassigned || + from.GetOpCode() == OP_polymorphiccallassigned || + from.GetOpCode() == OP_interfaceicallassigned || + from.GetOpCode() == OP_virtualicallassigned || + instance_of(from)); +REGISTER_SAFE_CAST(IcallNode, from.GetOpCode() == OP_icall || + from.GetOpCode() == OP_icallassigned || + from.GetOpCode() == OP_icallproto || + from.GetOpCode() == OP_icallprotoassigned); +REGISTER_SAFE_CAST(IntrinsiccallNode, from.GetOpCode() == OP_intrinsiccall || + from.GetOpCode() == OP_intrinsiccallwithtype || + from.GetOpCode() == OP_xintrinsiccall || + from.GetOpCode() == OP_intrinsiccallassigned || + from.GetOpCode() == OP_intrinsiccallwithtypeassigned || + from.GetOpCode() == OP_xintrinsiccallassigned); +REGISTER_SAFE_CAST(CallinstantNode, from.GetOpCode() == OP_callinstant || + from.GetOpCode() == OP_virtualcallinstant || + from.GetOpCode() == OP_superclasscallinstant || + from.GetOpCode() == OP_interfacecallinstant || + from.GetOpCode() == OP_callinstantassigned || + from.GetOpCode() == OP_virtualcallinstantassigned || + from.GetOpCode() == OP_superclasscallinstantassigned || + from.GetOpCode() == OP_interfacecallinstantassigned); +REGISTER_SAFE_CAST(LabelNode, from.GetOpCode() == OP_label); +REGISTER_SAFE_CAST(CommentNode, from.GetOpCode() == OP_comment); +#endif +} diff --git a/src/mapleall/maple_ir/include/java_eh_lower.h b/src/mapleall/maple_ir/include/java_eh_lower.h new file mode 100644 index 0000000000000000000000000000000000000000..5462ea04b899ef303bcfcf1495712a9df7c9fe8a --- /dev/null +++ b/src/mapleall/maple_ir/include/java_eh_lower.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_JAVA_EH_LOWER_H +#define MAPLE_IR_INCLUDE_JAVA_EH_LOWER_H +#include "phase_impl.h" +#include "class_hierarchy.h" +#include "maple_phase_manager.h" + +namespace maple { +class JavaEHLowerer : public FuncOptimizeImpl { + public: + JavaEHLowerer(MIRModule &mod, KlassHierarchy *kh, bool dump) : FuncOptimizeImpl(mod, kh, dump) {} + ~JavaEHLowerer() = default; + + FuncOptimizeImpl *Clone() override { + return new JavaEHLowerer(*this); + } + + void ProcessFunc(MIRFunction *func) override; + + private: + BlockNode *DoLowerBlock(BlockNode&); + BaseNode *DoLowerExpr(BaseNode&, BlockNode&); + BaseNode *DoLowerDiv(BinaryNode&, BlockNode&); + void DoLowerBoundaryCheck(IntrinsiccallNode&, BlockNode&); + BaseNode *DoLowerRem(BinaryNode &expr, BlockNode &blkNode) { + return DoLowerDiv(expr, blkNode); + } + + uint32 divSTIndex = 0; // The index of divide operand and result. + bool useRegTmp = Options::usePreg; // Use register to save temp variable or not. +}; + +MAPLE_MODULE_PHASE_DECLARE(M2MJavaEHLowerer) +} // namespace maple +#endif // MAPLE_IR_INCLUDE_JAVA_EH_LOWER_H diff --git a/src/mapleall/maple_ir/include/keywords.def b/src/mapleall/maple_ir/include/keywords.def new file mode 100644 index 0000000000000000000000000000000000000000..14c906656fa9038ba5477726b305ce102dd1773d --- /dev/null +++ b/src/mapleall/maple_ir/include/keywords.def @@ -0,0 +1,106 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + // opcode keywords +#define OPCODE(X, Y, Z, S) KEYWORD(X) +#include "opcodes.def" +#undef OPCODE + // primitive types +#define LOAD_ALGO_PRIMARY_TYPE +#define PRIMTYPE(P) KEYWORD(P) +#include "prim_types.def" +#undef PRIMTYPE + // intrinsic names +#undef DEF_MIR_INTRINSIC +#define DEF_MIR_INTRINSIC(X, NAME, NUM_INSN, INTRN_CLASS, RETURN_TYPE, ...) KEYWORD(X) +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC + KEYWORD(else) + // declaration keywords + KEYWORD(var) + KEYWORD(tempvar) + KEYWORD(reg) + KEYWORD(type) + KEYWORD(func) + KEYWORD(struct) + KEYWORD(structincomplete) + KEYWORD(union) + KEYWORD(class) + KEYWORD(classincomplete) + KEYWORD(interfaceincomplete) + KEYWORD(javaclass) + KEYWORD(javainterface) + // type attribute keywords +#define FUNC_ATTR +#define TYPE_ATTR +#define FIELD_ATTR +#define ATTR(X) KEYWORD(X) +#include "all_attributes.def" +#undef ATTR +#undef FUNC_ATTR +#undef TYPE_ATTR +#undef FIELD_ATTR + KEYWORD(align) + // per-function declaration keywords + KEYWORD(framesize) + KEYWORD(upformalsize) + KEYWORD(moduleid) + KEYWORD(funcsize) + KEYWORD(funcid) + KEYWORD(formalwordstypetagged) + KEYWORD(localwordstypetagged) + KEYWORD(formalwordsrefcounted) + KEYWORD(localwordsrefcounted) + // per-module declaration keywords + KEYWORD(flavor) + KEYWORD(srclang) + KEYWORD(globalmemsize) + KEYWORD(globalmemmap) + KEYWORD(globalwordstypetagged) + KEYWORD(globalwordsrefcounted) + KEYWORD(id) + KEYWORD(numfuncs) + KEYWORD(entryfunc) + // file related declaration keywords + KEYWORD(fileinfo) + KEYWORD(filedata) + KEYWORD(srcfileinfo) + KEYWORD(funcinfo) + // special float constants + KEYWORD(nanf) + KEYWORD(nan) + KEYWORD(inff) + KEYWORD(inf) + // pragma + KEYWORD(pragma) + KEYWORD(param) + KEYWORD(func_ex) + KEYWORD(func_var) + // staticvalue + KEYWORD(staticvalue) + // import + KEYWORD(import) + KEYWORD(importpath) + // source position information + KEYWORD(LOC) + // dwarf related + KEYWORD(SCOPE) + KEYWORD(ALIAS) + KEYWORD(ENUMERATION) + KEYWORD(TYPEALIAS) + // storage class + KEYWORD(pstatic) + KEYWORD(fstatic) + // file-scope asm + KEYWORD(asmdecl) diff --git a/src/mapleall/maple_ir/include/lexer.h b/src/mapleall/maple_ir/include/lexer.h new file mode 100644 index 0000000000000000000000000000000000000000..0752b34c558d66c1a0a5042b92825d7c9085d223 --- /dev/null +++ b/src/mapleall/maple_ir/include/lexer.h @@ -0,0 +1,171 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_LEXER_H +#define MAPLE_IR_INCLUDE_LEXER_H +#include +#include "cstdio" +#include "types_def.h" +#include "tokens.h" +#include "mempool_allocator.h" +#include "mir_module.h" + +namespace maple { +class MIRParser; // circular dependency exists, no other choice +class MIRLexer { + friend MIRParser; + + public: + explicit MIRLexer(MIRModule &mod); + ~MIRLexer() { + airFile = nullptr; + if (airFileInternal.is_open()) { + airFileInternal.close(); + } + } + + void PrepareForFile(const std::string &filename); + void PrepareForString(const std::string &src); + TokenKind NextToken(); + TokenKind LexToken(); + TokenKind GetTokenKind() const { + return kind; + } + + uint32 GetLineNum() const { + return lineNum; + } + + uint32 GetCurIdx() const { + return curIdx; + } + + // get the identifier name after the % or $ prefix + const std::string &GetName() const { + return name; + } + + uint64 GetTheIntVal() const { + return theIntVal; + } + + float GetTheFloatVal() const { + return theFloatVal; + } + + double GetTheDoubleVal() const { + return theDoubleVal; + } + + std::string GetTokenString() const; // for error reporting purpose + + private: + MIRModule &module; + // for storing the different types of constant values + uint64 theIntVal = 0; // also indicates preg number under TK_preg + float theFloatVal = 0.0; + double theDoubleVal = 0.0; + MapleVector seenComments; + std::ifstream *airFile = nullptr; + std::ifstream airFileInternal; + std::string line; + size_t lineBufSize = 0; // the allocated size of line(buffer). + uint32 currentLineSize = 0; + uint32 curIdx = 0; + uint32 lineNum = 0; + TokenKind kind = TK_invalid; + std::string name = ""; // store the name token without the % or $ prefix + MapleUnorderedMap keywordMap; + + void RemoveReturnInline(std::string &removeLine) const { + if (removeLine.empty()) { + return; + } + if (removeLine.back() == '\n') { + removeLine.pop_back(); + } + if (removeLine.back() == '\r') { + removeLine.pop_back(); + } + } + + int ReadALine(); // read a line from MIR (text) file. + void GenName(); + TokenKind GetConstVal(); + TokenKind GetSpecialFloatConst(); + TokenKind GetHexConst(uint32 valStart, bool negative); + TokenKind GetIntConst(uint32 valStart, bool negative); + TokenKind GetFloatConst(uint32 valStart, uint32 startIdx, bool negative); + TokenKind GetSpecialTokenUsingOneCharacter(char c); + TokenKind GetTokenWithPrefixDollar(); + TokenKind GetTokenWithPrefixPercent(); + TokenKind GetTokenWithPrefixAmpersand(); + TokenKind GetTokenWithPrefixAtOrCircumflex(char prefix); + TokenKind GetTokenWithPrefixExclamation(); + TokenKind GetTokenWithPrefixQuotation(); + TokenKind GetTokenWithPrefixDoubleQuotation(); + TokenKind GetTokenSpecial(); + + char GetCharAt(uint32 idx) const { + return line[idx]; + } + + char GetCharAtWithUpperCheck(uint32 idx) const { + return idx < currentLineSize ? line[idx] : 0; + } + + char GetCharAtWithLowerCheck(uint32 idx) const { + return idx >= 0 ? line[idx] : 0; + } + + char GetCurrentCharWithUpperCheck() { + return curIdx < currentLineSize ? line[curIdx] : 0; + } + + char GetNextCurrentCharWithUpperCheck() { + ++curIdx; + return curIdx < currentLineSize ? line[curIdx] : 0; + } + + void SetFile(std::ifstream &file) { + airFile = &file; + } + + std::ifstream *GetFile() const { + return airFile; + } +}; + +inline bool IsPrimitiveType(TokenKind tk) { + return (tk >= TK_void) && (tk < TK_unknown); +} + +inline bool IsVarName(TokenKind tk) { + return (tk == TK_lname) || (tk == TK_gname); +} + +inline bool IsExprBinary(TokenKind tk) { + return (tk >= TK_add) && (tk <= TK_sub); +} + +inline bool IsConstValue(TokenKind tk) { + return (tk >= TK_intconst) && (tk <= TK_doubleconst); +} + +inline bool IsConstAddrExpr(TokenKind tk) { + return (tk == TK_addrof) || (tk == TK_addroffunc) || (tk == TK_addroflabel) || + (tk == TK_conststr) || (tk == TK_conststr16); +} +} // namespace maple +#endif // MAPLE_IR_INCLUDE_LEXER_H diff --git a/src/mapleall/maple_ir/include/memory_order_attrs.def b/src/mapleall/maple_ir/include/memory_order_attrs.def new file mode 100644 index 0000000000000000000000000000000000000000..e7a241ad03b3c953400f2d9bebc97c690b9cba50 --- /dev/null +++ b/src/mapleall/maple_ir/include/memory_order_attrs.def @@ -0,0 +1,20 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + ATTR(memory_order_relaxed) + ATTR(memory_order_consume) + ATTR(memory_order_acquire) + ATTR(memory_order_release) + ATTR(memory_order_acq_rel) + ATTR(memory_order_seq_cst) diff --git a/src/mapleall/maple_ir/include/metadata_layout.h b/src/mapleall/maple_ir/include/metadata_layout.h new file mode 100644 index 0000000000000000000000000000000000000000..86b975177a1a4d1174f28ac721e4783d70d274a7 --- /dev/null +++ b/src/mapleall/maple_ir/include/metadata_layout.h @@ -0,0 +1,352 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef METADATA_LAYOUT_H +#define METADATA_LAYOUT_H +#include + +// metadata layout is shared between maple compiler and runtime, thus not in namespace maplert +// some of the reference field of metadata is stored as relative offset +// for example, declaring class of Fields/Methods +// which can be negative +#ifdef USE_32BIT_REF +using MetaRef = uint32_t; // consistent with reffield_t in address.h +#else +using MetaRef = uintptr_t; // consistent iwth reffield_t in address.h +#endif // USE_32BIT_REF + +// DataRefOffset aims to represent a reference to data in maple file, which is already an offset. +// DataRefOffset is meant to have pointer size. +// All Xx32 data types defined in this file aim to use 32 bits to save 64-bit address, and thus are +// specific for 64-bit platforms. +struct DataRefOffset32 { + int32_t refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline int32_t GetRawValue() const; + inline void SetRawValue(int32_t value); +}; + +struct DataRefOffsetPtr { + intptr_t refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +struct DataRefOffset { +#ifdef USE_32BIT_REF + DataRefOffset32 refOffset; +#else + DataRefOffsetPtr refOffset; +#endif + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +struct MethodFieldRef { + // MethodFieldRef aims to represent a reference to fields/methods in maple file, which is already an offset. + // also, offset LSB may set 1, to indicate that it is compact fields/methods. + enum MethodFieldRefFormat { + kMethodFieldRefIsCompact = 1, + }; + DataRefOffsetPtr refOffset; + template + inline void SetDataRef(T ref); + template + inline T GetDataRef() const; + inline bool IsCompact() const; + template + inline T GetCompactData() const; + inline intptr_t GetRawValue() const; + inline void SetRawValue(intptr_t value); +}; + +// DataRef aims for reference to data in maple file (generated by maple compiler) and is aligned to at least 4 bytes. +// Perhaps MDataRef is more fit, still DataRef is chosen to make it common. +// DataRef allows 4 formats of value: +// 0. "label_name" for direct reference +// 1. "label_name - . + 1" for padding unused +// 2. "label_name - . + 2" for reference in offset format +// 3. "indirect.label_name - . + 3" for indirect reference +// this format aims to support lld which does not support expression "global_symbol - ." +// DataRef is self-decoded by also encoding the format and is defined for binary compatibility. +// If no compatibility problem is involved, DataRefOffsetPtr is preferred. +enum DataRefFormat { + kDataRefIsDirect = 0, // must be 0 + kDataRefPadding = 1, // unused + kDataRefIsOffset = 2, + kDataRefIsIndirect = 3, // read-only + kDataRefBitMask = 3, +}; + +struct DataRef32 { + // be careful when *refVal* is treated as an offset which is a signed integer actually. + uint32_t refVal; + template + inline T GetDataRef() const; + template + inline void SetDataRef(T ref, DataRefFormat format = kDataRefIsDirect); + template + inline T GetRawValue() const; +}; + +struct DataRef { + uintptr_t refVal; + template + inline T GetDataRef() const; + template + inline void SetDataRef(const T ref, const DataRefFormat format = kDataRefIsDirect); + template + inline T GetRawValue() const; +}; +// GctibRef aims to represent a reference to gctib in maple file, which is an offset by default. +// GctibRef is meant to have pointer size and aligned to at least 4 bytes. +// GctibRef allows 2 formats of value: +// 0. "label_name - . + 0" for reference in offset format +// 1. "indirect.label_name - . + 1" for indirect reference +// this format aims to support lld which does not support expression "global_symbol - ." +// GctibRef is self-decoded by also encoding the format and is defined for binary compatibility. +// If no compatibility problem is involved, DataRef is preferred. +enum GctibRefFormat { + kGctibRefIsOffset = 0, // default + kGctibRefIsIndirect = 1, + kGctibRefBitMask = 3 +}; + +struct GctibRef32 { + // be careful when *refVal* is treated as an offset which is a signed integer actually. + uint32_t refVal; + template + inline T GetGctibRef() const; + template + inline void SetGctibRef(T ref, GctibRefFormat format = kGctibRefIsOffset); +}; + +struct GctibRef { + uintptr_t refVal; + template + inline T GetGctibRef() const; + template + inline void SetGctibRef(const T ref, const GctibRefFormat format = kGctibRefIsOffset); +}; + +// MByteRef is meant to represent a reference to data defined in maple file. It is a direct reference or an offset. +// MByteRef is self-encoded/decoded and aligned to 1 byte. +// Unlike DataRef, the format of MByteRef is determined by its value. +struct MByteRef { + uintptr_t refVal; // initializer prefers this field to be a pointer + +#if defined(__arm__) || defined(USE_ARM32_MACRO) + // assume address range 0 ~ 256MB is unused in arm runtime + // kEncodedOffsetMin ~ kEncodedOffsetMax is the value range of encoded offset + static constexpr intptr_t kOffsetBound = 128 * 1024 * 1024; + static constexpr intptr_t kOffsetMin = -kOffsetBound; + static constexpr intptr_t kOffsetMax = kOffsetBound; + + static constexpr intptr_t kPositiveOffsetBias = 128 * 1024 * 1024; + static constexpr intptr_t kEncodedOffsetMin = kPositiveOffsetBias + kOffsetMin; + static constexpr intptr_t kEncodedOffsetMax = kPositiveOffsetBias + kOffsetMax; +#else + enum { + kBiasBitPosition = sizeof(refVal) * 8 - 4, // the most significant 4 bits + }; + + static constexpr uintptr_t kOffsetBound = 256 * 1024 * 1024; // according to kDsoLoadedAddessEnd = 0xF0000000 + static constexpr uintptr_t kPositiveOffsetMin = 0; + static constexpr uintptr_t kPositiveOffsetMax = kOffsetBound; + + static constexpr uintptr_t kPositiveOffsetBias = static_cast(6) << kBiasBitPosition; + static constexpr uintptr_t kEncodedPosOffsetMin = kPositiveOffsetMin + kPositiveOffsetBias; + static constexpr uintptr_t kEncodedPosOffsetMax = kPositiveOffsetMax + kPositiveOffsetBias; +#endif + + template + inline T GetRef() const; + template + inline void SetRef(const T ref); + inline bool IsOffset() const; +}; + +struct MByteRef32 { + uint32_t refVal; + static constexpr uint32_t kOffsetBound = 256 * 1024 * 1024; // according to kDsoLoadedAddessEnd = 0xF0000000 + static constexpr uint32_t kPositiveOffsetMin = 0; + static constexpr uint32_t kPositiveOffsetMax = kOffsetBound; + + static constexpr uint32_t kPositiveOffsetBias = 0x60000000; // the most significant 4 bits 0110 + static constexpr uint32_t kEncodedPosOffsetMin = kPositiveOffsetMin + kPositiveOffsetBias; + static constexpr uint32_t kEncodedPosOffsetMax = kPositiveOffsetMax + kPositiveOffsetBias; + + static constexpr uint32_t kDirectRefMin = 0xC0000000; // according to kDsoLoadedAddessStart = 0xC0000000 + static constexpr uint32_t kDirectRefMax = 0xF0000000; // according to kDsoLoadedAddessEnd = 0xF0000000 + + static constexpr int32_t kNegativeOffsetMin = -(256 * 1024 * 1024); // -kOffsetBound + static constexpr int32_t kNegativeOffsetMax = 0; + + template + inline T GetRef() const; + template + inline void SetRef(T ref); + inline bool IsOffset() const; + inline bool IsPositiveOffset() const; + inline bool IsNegativeOffset() const; +}; + +// MethodMeta defined in methodmeta.h +// FieldMeta defined in fieldmeta.h +// MethodDesc contains MethodMetadata and stack map +struct MethodDesc { + // relative offset for method metadata relative to current PC. + // method metadata is in compact format if this offset is odd. + uint32_t metadataOffset; + + int16_t localRefOffset; + uint16_t localRefNumber; + + // stack map for a methed might be placed here +}; + +// Note: class init in maplebe and cg is highly dependent on this type. +// update aarch64rtsupport.h if you modify this definition. +struct ClassMetadataRO { + MByteRef className; + MethodFieldRef fields; // point to info of fields + MethodFieldRef methods; // point to info of methods + union { // Element classinfo of array, others parent classinfo + DataRef superclass; + DataRef componentClass; + }; + + uint16_t numOfFields; + uint16_t numOfMethods; + +#ifndef USE_32BIT_REF + uint16_t flag; + uint16_t numOfSuperclasses; + uint32_t padding; +#endif // !USE_32BIT_REF + + uint32_t mod; + DataRefOffset32 annotation; + DataRefOffset32 clinitAddr; +}; + +static constexpr size_t kPageSize = 4096; +static constexpr size_t kCacheLine = 64; + +// according to kSpaceAnchor and kFireBreak defined in bp_allocator.cpp +// the address of this readable page is set as kProtectedMemoryStart for java class +static constexpr uintptr_t kClInitStateAddrBase = 0xc0000000 - (1u << 20) * 2; + +// In Kirin 980, 2 mmap memory address with odd number of page distances may have unreasonable L1&L2 cache conflict. +// kClassInitializedState is used as the init state for class that has no method, it's will be loaded in many +// place for Decouple build App. if we set the value to kClInitStateAddrBase(0xbfe00000), it may conflict with the +// yieldpoind test address globalPollingPage which is defined in yieldpoint.cpp. +// Hence we add 1 cache line (64 byte) offset here to avoid such conflict +static constexpr uintptr_t kClassInitializedState = kClInitStateAddrBase + kCacheLine; + +extern "C" uint8_t classInitProtectRegion[]; + +// Note there is no state to indicate a class is already initialized. +// Any state beyond listed below is treated as initialized. +enum ClassInitState { + kClassInitStateMin = 0, + kClassUninitialized = 1, + kClassInitializing = 2, + kClassInitFailed = 3, + kClassInitialized = 4, + kClassInitStateMax = 4, +}; + +enum SEGVAddr { + kSEGVAddrRangeStart = kPageSize + 0, + + // Note any readable address is treated as Initialized. + kSEGVAddrForClassInitStateMin = kSEGVAddrRangeStart + kClassInitStateMin, + kSEGVAddrForClassUninitialized = kSEGVAddrForClassInitStateMin + kClassUninitialized, + kSEGVAddrForClassInitializing = kSEGVAddrForClassInitStateMin + kClassInitializing, + kSEGVAddrForClassInitFailed = kSEGVAddrForClassInitStateMin + kClassInitFailed, + kSEGVAddrFoClassInitStateMax = kSEGVAddrForClassInitStateMin + kClassInitStateMax, + + kSEGVAddrRangeEnd, +}; + +struct ClassMetadata { + // object common fields + MetaRef shadow; // point to classinfo of java/lang/Class + int32_t monitor; + + // other fields + uint16_t clIndex; // 8bit ClassLoader index, used for querying the address of related ClassLoader instance. + union { + uint16_t objSize; + uint16_t componentSize; + } sizeInfo; + +#ifdef USE_32BIT_REF // for alignment purpose + uint16_t flag; + uint16_t numOfSuperclasses; +#endif // USE_32BIT_REF + + DataRef iTable; // iTable of current class, used for interface call, will insert the content into classinfo + DataRef vTable; // vTable of current class, used for virtual call, will insert the content into classinfo + GctibRef gctib; // for rc + +#ifdef USE_32BIT_REF + DataRef32 classInfoRo; + DataRef32 cacheFalseClass; +#else + DataRef classInfoRo; +#endif + + union { + uintptr_t initState; // a readable address for initState means initialized + DataRef cacheTrueClass; + }; + + public: + static inline intptr_t OffsetOfInitState() { + ClassMetadata *base = nullptr; + return reinterpret_cast(&(base->initState)); + } + + uintptr_t GetInitStateRawValue() const { + return __atomic_load_n(&initState, __ATOMIC_ACQUIRE); + } + + template + void SetInitStateRawValue(T val) { + __atomic_store_n(&initState, reinterpret_cast(val), __ATOMIC_RELEASE); + } +}; + +// function to set Class/Field/Method metadata's shadow field to avoid type conversion +// Note 1: here we don't do NULL-check and type-compatibility check +// NOte 2: C should be of jclass/ClassMetata* type +template +static inline void MRTSetMetadataShadow(M *meta, C cls) { + meta->shadow = static_cast(reinterpret_cast(cls)); +} + +#endif // METADATA_LAYOUT_H diff --git a/src/mapleall/maple_ir/include/mir_builder.h b/src/mapleall/maple_ir/include/mir_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..5fa1c63502ec240651f6a8a446c7d9816ba521e7 --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_builder.h @@ -0,0 +1,362 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_BUILDER_H +#define MAPLE_IR_INCLUDE_MIR_BUILDER_H +#include +#include +#include +#include +#ifdef _WIN32 +#include +#endif +#include "opcodes.h" +#include "prim_types.h" +#include "mir_type.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_nodes.h" +#include "mir_module.h" +#include "mir_preg.h" +#include "mir_function.h" +#include "printing.h" +#include "intrinsic_op.h" +#include "opcode_info.h" +#include "global_tables.h" + +namespace maple { +using ArgPair = std::pair; +using ArgVector = MapleVector; +class MIRBuilder { + public: + enum MatchStyle { + kUpdateFieldID = 0, // do not match but traverse to update fieldID + kMatchTopField = 1, // match top level field only + kMatchAnyField = 2, // match any field + kParentFirst = 4, // traverse parent first + kFoundInChild = 8, // found in child + }; + + explicit MIRBuilder(MIRModule *module) + : mirModule(module), + incompleteTypeRefedSet(mirModule->GetMPAllocator().Adapter()) {} + + virtual ~MIRBuilder() = default; + + virtual void SetCurrentFunction(MIRFunction &fun) { + mirModule->SetCurFunction(&fun); + } + + virtual MIRFunction *GetCurrentFunction() const { + return mirModule->CurFunction(); + } + MIRFunction *GetCurrentFunctionNotNull() const { + MIRFunction *func = GetCurrentFunction(); + CHECK_FATAL(func != nullptr, "nullptr check"); + return func; + } + + MIRModule &GetMirModule() const { + return *mirModule; + } + + const MapleSet &GetIncompleteTypeRefedSet() const { + return incompleteTypeRefedSet; + } + + std::vector> &GetExtraFieldsTuples() { + return extraFieldsTuples; + } + + unsigned int GetLineNum() const { + return lineNum; + } + void SetLineNum(unsigned int num) { + lineNum = num; + } + + GStrIdx GetOrCreateStringIndex(const std::string &str) const { + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + } + + GStrIdx GetOrCreateStringIndex(GStrIdx strIdx, const std::string &str) const { + std::string firstString(GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx)); + firstString.append(str); + return GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(firstString); + } + + GStrIdx GetStringIndex(const std::string &str) const { + return GlobalTables::GetStrTable().GetStrIdxFromName(str); + } + + MIRFunction *GetOrCreateFunction(const std::string &str, TyIdx retTyIdx); + MIRFunction *GetFunctionFromSymbol(const MIRSymbol &funcSymbol) const; + MIRFunction *GetFunctionFromStidx(StIdx stIdx); + MIRFunction *GetFunctionFromName(const std::string &str); + // For compiler-generated metadata struct + void AddIntFieldConst(const MIRStructType &sType, MIRAggConst &newConst, uint32 fieldID, int64 constValue) const; + void AddAddrofFieldConst(const MIRStructType &structType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &fieldSymbol); + void AddAddroffuncFieldConst(const MIRStructType &structType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &funcSymbol); + + bool TraverseToNamedField(MIRStructType &structType, GStrIdx nameIdx, uint32 &fieldID); + bool TraverseToNamedFieldWithTypeAndMatchStyle(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, + uint32 &fieldID, unsigned int matchStyle); + void TraverseToNamedFieldWithType(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, uint32 &fieldID, + uint32 &idx); + + FieldID GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx, unsigned int matchStyle); + FieldID GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx); + FieldID GetStructFieldIDFromNameAndTypeParentFirst(MIRType &type, const std::string &name, TyIdx idx); + FieldID GetStructFieldIDFromNameAndTypeParentFirstFoundInChild(MIRType &type, const std::string &name, TyIdx idx); + + FieldID GetStructFieldIDFromFieldName(MIRType &type, const std::string &name); + FieldID GetStructFieldIDFromFieldNameParentFirst(MIRType *type, const std::string &name); + + void SetStructFieldIDFromFieldName(MIRStructType &structType, const std::string &name, GStrIdx newStrIdx, + const MIRType &newFieldType) const; + // for creating Function. + MIRSymbol *GetFunctionArgument(MIRFunction &fun, uint32 index) const { + CHECK(index < fun.GetFormalCount(), "index out of range in GetFunctionArgument"); + return fun.GetFormal(index); + } + + MIRFunction *CreateFunction(const std::string &name, const MIRType &returnType, const ArgVector &arguments, + bool isVarg = false, bool createBody = true) const; + MIRFunction *CreateFunction(StIdx stIdx, bool addToTable = true) const; + virtual void UpdateFunction(MIRFunction&, const MIRType*, const ArgVector&) {} + + MIRSymbol *GetSymbolFromEnclosingScope(StIdx stIdx) const; + virtual MIRSymbol *GetOrCreateLocalDecl(const std::string &str, const MIRType &type); + MIRSymbol *GetLocalDecl(const std::string &str) const; + MIRSymbol *CreateLocalDecl(const std::string &str, const MIRType &type) const; + MIRSymbol *GetOrCreateGlobalDecl(const std::string &str, const MIRType &type) const; + MIRSymbol *GetGlobalDecl(const std::string &str) const; + MIRSymbol *GetDecl(const std::string &str) const; + MIRSymbol *CreateGlobalDecl(const std::string &str, + const MIRType &type, + MIRStorageClass sc = kScGlobal) const; + MIRSymbol *GetOrCreateDeclInFunc(const std::string &str, const MIRType &type, MIRFunction &func) const; + // for creating Expression + ConstvalNode *CreateConstval(MIRConst *mirConst); + ConstvalNode *CreateIntConst(uint64 val, PrimType pty); + ConstvalNode *CreateFloatConst(float val); + ConstvalNode *CreateDoubleConst(double val); + ConstvalNode *CreateFloat128Const(const uint64 *val); + ConstvalNode *GetConstInt(MemPool &memPool, int val) const; + ConstvalNode *GetConstInt(int val) { + return CreateIntConst(static_cast(static_cast(val)), PTY_i32); + } + + ConstvalNode *GetConstUInt1(bool val) { + return CreateIntConst(val, PTY_u1); + } + + ConstvalNode *GetConstUInt8(uint8 val) { + return CreateIntConst(val, PTY_u8); + } + + ConstvalNode *GetConstUInt16(uint16 val) { + return CreateIntConst(val, PTY_u16); + } + + ConstvalNode *GetConstUInt32(uint32 val) { + return CreateIntConst(val, PTY_u32); + } + + ConstvalNode *GetConstUInt64(uint64 val) { + return CreateIntConst(val, PTY_u64); + } + + ConstvalNode *CreateAddrofConst(BaseNode &node); + ConstvalNode *CreateAddroffuncConst(const BaseNode &node); + ConstvalNode *CreateStrConst(const BaseNode &node); + ConstvalNode *CreateStr16Const(const BaseNode &node); + SizeoftypeNode *CreateExprSizeoftype(const MIRType &type); + FieldsDistNode *CreateExprFieldsDist(const MIRType &type, FieldID field1, FieldID field2); + AddrofNode *CreateExprAddrof(FieldID fieldID, const MIRSymbol &symbol, MemPool *memPool = nullptr); + AddrofNode *CreateExprAddrof(FieldID fieldID, StIdx symbolStIdx, MemPool *memPool = nullptr); + AddroffuncNode *CreateExprAddroffunc(PUIdx puIdx, MemPool *memPool = nullptr); + AddrofNode *CreateExprDread(const MIRType &type, FieldID fieldID, const MIRSymbol &symbol); + AddrofNode *CreateExprDread(PrimType ptyp, FieldID fieldID, const MIRSymbol &symbol); + virtual AddrofNode *CreateExprDread(MIRType &type, MIRSymbol &symbol); + virtual AddrofNode *CreateExprDread(MIRSymbol &symbol); + AddrofNode *CreateExprDread(PregIdx pregID, PrimType pty); + AddrofNode *CreateExprDread(MIRSymbol &symbol, uint16 fieldID); + DreadoffNode *CreateExprDreadoff(Opcode op, PrimType pty, const MIRSymbol &symbol, int32 offset); + RegreadNode *CreateExprRegread(PrimType pty, PregIdx regIdx); + IreadNode *CreateExprIread(PrimType primType, TyIdx ptrTypeIdx, FieldID fieldID, BaseNode *addr); + IreadNode *CreateExprIread(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, BaseNode *addr); + IreadoffNode *CreateExprIreadoff(PrimType pty, int32 offset, BaseNode *opnd0); + IreadFPoffNode *CreateExprIreadFPoff(PrimType pty, int32 offset); + IaddrofNode *CreateExprIaddrof(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, BaseNode *addr); + IaddrofNode *CreateExprIaddrof(PrimType returnTypePty, TyIdx ptrTypeIdx, FieldID fieldID, BaseNode *addr); + BinaryNode *CreateExprBinary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1); + BinaryNode *CreateExprBinary(Opcode opcode, PrimType pty, BaseNode *opnd0, BaseNode *opnd1) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(pty)); + return CreateExprBinary(opcode, *ty, opnd0, opnd1); + } + TernaryNode *CreateExprTernary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1, BaseNode *opnd2); + CompareNode *CreateExprCompare(Opcode opcode, const MIRType &type, const MIRType &opndType, BaseNode *opnd0, + BaseNode *opnd1); + UnaryNode *CreateExprUnary(Opcode opcode, const MIRType &type, BaseNode *opnd); + GCMallocNode *CreateExprGCMalloc(Opcode opcode, const MIRType &pType, const MIRType &type); + JarrayMallocNode *CreateExprJarrayMalloc(Opcode opcode, const MIRType &pType, const MIRType &type, BaseNode *opnd); + TypeCvtNode *CreateExprTypeCvt(Opcode o, PrimType toPrimType, PrimType fromPrimType, BaseNode &opnd); + TypeCvtNode *CreateExprTypeCvt(Opcode o, const MIRType &type, const MIRType &fromType, BaseNode *opnd); + ExtractbitsNode *CreateExprExtractbits(Opcode o, const MIRType &type, uint32 bOffset, uint32 bSize, BaseNode *opnd); + ExtractbitsNode *CreateExprExtractbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, BaseNode *opnd); + DepositbitsNode *CreateExprDepositbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, + BaseNode *leftOpnd, BaseNode* rightOpnd); + RetypeNode *CreateExprRetype(const MIRType &type, const MIRType &fromType, BaseNode *opnd); + RetypeNode *CreateExprRetype(const MIRType &type, PrimType fromType, BaseNode *opnd); + ArrayNode *CreateExprArray(const MIRType &arrayType); + ArrayNode *CreateExprArray(const MIRType &arrayType, BaseNode *op); + ArrayNode *CreateExprArray(const MIRType &arrayType, BaseNode *op1, BaseNode *op2); + ArrayNode *CreateExprArray(const MIRType &arrayType, std::vector ops); + IntrinsicopNode *CreateExprIntrinsicop(MIRIntrinsicID id, Opcode op, PrimType primType, const TyIdx &tyIdx, + const MapleVector &ops); + IntrinsicopNode *CreateExprIntrinsicop(MIRIntrinsicID idx, Opcode opCode, const MIRType &type, + const MapleVector &ops); + // for creating Statement. + NaryStmtNode *CreateStmtReturn(BaseNode *rVal); + NaryStmtNode *CreateStmtNary(Opcode op, BaseNode *rVal); + NaryStmtNode *CreateStmtNary(Opcode op, const MapleVector &rVals); + AssertNonnullStmtNode *CreateStmtAssertNonnull(Opcode op, BaseNode *rVal, GStrIdx funcNameIdx); + CallAssertNonnullStmtNode *CreateStmtCallAssertNonnull(Opcode op, BaseNode *rVal, GStrIdx callFuncNameIdx, + size_t paramIndex, GStrIdx stmtFuncNameIdx); + CallAssertBoundaryStmtNode *CreateStmtCallAssertBoundary(Opcode op, const MapleVector &rVals, + GStrIdx funcNameIdx, size_t paramIndex, + GStrIdx stmtFuncNameIdx); + AssertBoundaryStmtNode *CreateStmtAssertBoundary(Opcode op, const MapleVector &rVals, GStrIdx funcNameIdx); + UnaryStmtNode *CreateStmtUnary(Opcode op, BaseNode *rVal); + UnaryStmtNode *CreateStmtThrow(BaseNode *rVal); + DassignNode *CreateStmtDassign(const MIRSymbol &symbol, FieldID fieldID, BaseNode *src); + DassignNode *CreateStmtDassign(StIdx sIdx, FieldID fieldID, BaseNode *src); + RegassignNode *CreateStmtRegassign(PrimType pty, PregIdx regIdx, BaseNode *src); + IassignNode *CreateStmtIassign(const MIRType &type, FieldID fieldID, BaseNode *addr, BaseNode *src); + IassignoffNode *CreateStmtIassignoff(PrimType pty, int32 offset, BaseNode *addr, BaseNode *src); + IassignFPoffNode *CreateStmtIassignFPoff(Opcode op, PrimType pty, int32 offset, BaseNode *src); + CallNode *CreateStmtCall(PUIdx puIdx, const MapleVector &args, Opcode opCode = OP_call); + CallNode *CreateStmtCall(const std::string &callee, const MapleVector &args); + CallNode *CreateStmtVirtualCall(PUIdx puIdx, const MapleVector &args) { + return CreateStmtCall(puIdx, args, OP_virtualcall); + } + + CallNode *CreateStmtSuperclassCall(PUIdx puIdx, const MapleVector &args) { + return CreateStmtCall(puIdx, args, OP_superclasscall); + } + + CallNode *CreateStmtInterfaceCall(PUIdx puIdx, const MapleVector &args) { + return CreateStmtCall(puIdx, args, OP_interfacecall); + } + + IcallNode *CreateStmtIcall(const MapleVector &args); + IcallNode *CreateStmtIcallAssigned(const MapleVector &args, const MIRSymbol &ret); + IcallNode *CreateStmtIcallproto(const MapleVector &args, const TyIdx &prototypeIdx); + IcallNode *CreateStmtIcallprotoAssigned(const MapleVector &args, const MIRSymbol &ret, + const TyIdx &prototypeIdx); + // For Call, VirtualCall, SuperclassCall, InterfaceCall + IntrinsiccallNode *CreateStmtIntrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments, + TyIdx tyIdx = TyIdx()); + IntrinsiccallNode *CreateStmtXintrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments); + CallNode *CreateStmtCallAssigned(PUIdx puIdx, const MIRSymbol *ret, Opcode op = OP_callassigned); + CallNode *CreateStmtCallAssigned(PUIdx puIdx, const MapleVector &args, const MIRSymbol *ret, + Opcode opcode = OP_callassigned, TyIdx tyIdx = TyIdx()); + CallNode *CreateStmtCallRegassigned(PUIdx, PregIdx, Opcode); + CallNode *CreateStmtCallRegassigned(PUIdx puIdx, PregIdx pRegIdx, Opcode opcode, BaseNode *arg); + CallNode *CreateStmtCallRegassigned(PUIdx puIdx, const MapleVector &args, PregIdx pRegIdx, Opcode opcode); + IntrinsiccallNode *CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + PregIdx retPregIdx); + IntrinsiccallNode *CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + const MIRSymbol *ret, TyIdx tyIdx = TyIdx()); + IntrinsiccallNode *CreateStmtXintrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + const MIRSymbol *ret); + IfStmtNode *CreateStmtIf(BaseNode *cond); + IfStmtNode *CreateStmtIfThenElse(BaseNode *cond); + DoloopNode *CreateStmtDoloop(StIdx doVarStIdx, bool isPReg, BaseNode *startExp, BaseNode *contExp, + BaseNode *incrExp); + SwitchNode *CreateStmtSwitch(BaseNode *opnd, LabelIdx defaultLabel, const CaseVector &switchTable); + GotoNode *CreateStmtGoto(Opcode o, LabelIdx labIdx); + JsTryNode *CreateStmtJsTry(LabelIdx cLabIdx, LabelIdx fLabIdx); + TryNode *CreateStmtTry(const MapleVector &cLabIdxs); + CatchNode *CreateStmtCatch(const MapleVector &tyIdxVec); + LabelIdx GetOrCreateMIRLabel(const std::string &name) const; + LabelIdx CreateLabIdx(MIRFunction &mirFunc) const; + LabelNode *CreateStmtLabel(LabelIdx labIdx); + StmtNode *CreateStmtComment(const std::string &cmnt); + CondGotoNode *CreateStmtCondGoto(BaseNode *cond, Opcode op, LabelIdx labIdx); + void AddStmtInCurrentFunctionBody(StmtNode &stmt) const; + MIRSymbol *GetSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, MIRStorageClass sClass, + uint8 scpID, bool sameType) const; + MIRSymbol *GetSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + uint8 scpID, bool sameType) const; + MIRSymbol *GetOrCreateSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID, bool sametype) const; + MIRSymbol *GetOrCreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID, bool sameType) const; + MIRSymbol *CreatePregFormalSymbol(TyIdx tyIdx, PregIdx pRegIdx, MIRFunction &func) const; + // for creating symbol + MIRSymbol *CreateSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID) const; + MIRSymbol *CreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID) const; + MIRSymbol *CreateConstStringSymbol(const std::string &symbolName, const std::string &content); + MIRSymbol *GetOrCreateLocalDecl(const std::string &str, TyIdx tyIdx, MIRSymbolTable &symbolTable, + bool &created) const; + // for creating nodes + AddrofNode *CreateAddrof(const MIRSymbol &st, PrimType pty = PTY_ptr); + AddrofNode *CreateDread(const MIRSymbol &st, PrimType pty); + virtual MemPool *GetCurrentFuncCodeMp(); + virtual MapleAllocator *GetCurrentFuncCodeMpAllocator(); + virtual MemPool *GetCurrentFuncDataMp(); + + virtual void GlobalLock() {} + virtual void GlobalUnlock() {} + + private: + MIRSymbol *GetOrCreateGlobalDecl(const std::string &str, TyIdx tyIdx, bool &created) const; + + MIRModule *mirModule; + MapleSet incompleteTypeRefedSet; + // + std::vector> extraFieldsTuples; + unsigned int lineNum = 0; +}; + +class MIRBuilderExt : public MIRBuilder { + public: + explicit MIRBuilderExt(MIRModule *module, pthread_mutex_t *mutex = nullptr); + virtual ~MIRBuilderExt() = default; + + void SetCurrentFunction(MIRFunction &func) override { + curFunction = &func; + } + + MIRFunction *GetCurrentFunction() const override { + return curFunction; + } + + MemPool *GetCurrentFuncCodeMp() override; + MapleAllocator *GetCurrentFuncCodeMpAllocator() override; + void GlobalLock() override; + void GlobalUnlock() override; + + private: + MIRFunction *curFunction = nullptr; + pthread_mutex_t *mutex = nullptr; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_BUILDER_H diff --git a/src/mapleall/maple_ir/include/mir_config.h b/src/mapleall/maple_ir/include/mir_config.h new file mode 100644 index 0000000000000000000000000000000000000000..7e753473baf5325cecb344cf0f87bc2db2c0e35e --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_config.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +// configuration definition for code in maple_ir namespace +#ifndef MAPLE_IR_INCLUDE_MIR_CONFIG_H +#define MAPLE_IR_INCLUDE_MIR_CONFIG_H + +// MIR_FEATURE_FULL = 1 : for host/server size building, by default. +// MIR_FEATURE_FULL = 0 : for resource-constrained devices. optimized for memory size +#if !defined(MIR_FEATURE_FULL) +#define MIR_FEATURE_FULL 1 // default to full feature building, for debugging +#endif // MIR_FEATURE_FULL define + +// MIR_DEBUG = 0 : for release building. +// MIR_DEBUG = 1 : for debug building. +#ifndef MIR_DEBUG +#define MIR_DEBUG 0 // currently default to none. turn it on explicitly +#endif // MIR_DEBUG + +// MIR_DEBUG_LEVEL = 0: no debuging information at all. +// 1: with error information. +// 2: with severe warning information +// 3: with normal warning information +// 4: with normal information +// 5: with everything +// +#ifndef MIR_DEBUG_LEVEL +#define MIR_DEBUG_LEVEL 0 +#endif // MIR_DEBUG_LEVEL +// assertion +#if !MIR_FEATURE_FULL +#define MIR_ASSERT(...) \ + do { \ + } while (0) +#define MIR_PRINTF(...) \ + do { \ + } while (0) +#define MIR_INFO(...) \ + do { \ + } while (0) +#define MIR_ERROR(...) \ + do { \ + } while (0) +#define MIR_WARNING(...) \ + do { \ + } while (0) +#define MIR_CAST_TO(var, totype) ((totype)(var)) +#include +#if DEBUG +#include +#define MIR_FATAL(...) \ + do { \ + printf("FATAL ERROR: (%s:%d) ", __FILE__, __LINE__); \ + printf(__VA_ARGS__); \ + exit(1); \ + } while (0) +#else +#define MIR_FATAL(...) \ + do { \ + exit(1); \ + } while (0) +#endif // DEBUG +#else // MIR_FEATURE_FULL +#include +#include +#include + +namespace maple { +#define MIR_ASSERT(...) assert(__VA_ARGS__) +#define MIR_FATAL(...) \ + do { \ + fprintf(stderr, "FATAL ERROR: (%s:%d) ", __FILE__, __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + exit(EXIT_FAILURE); \ + } while (0) +#define MIR_ERROR(...) \ + do { \ + fprintf(stderr, "ERROR: (%s:%d) ", __FILE__, __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + } while (0) +#define MIR_WARNING(...) \ + do { \ + fprintf(stderr, "WARNING: (%s:%d) ", __FILE__, __LINE__); \ + fprintf(stderr, __VA_ARGS__); \ + } while (0) +#define MIR_PRINTF(...) printf(__VA_ARGS__) +#define MIR_INFO(...) printf(__VA_ARGS__) +#define MIR_CAST_TO(var, totype) static_cast(var) +#endif // !MIR_FEATURE_FULL +#if MIR_DEBUG +#else +#endif // MIR_DEBUG + +// MIR specific configurations. +// Note: fix size definition cannot handle arbitary long MIR lines, such +// as those array initialization lines. +constexpr int kMirMaxLineSize = 3072; // a max of 3K characters per line initially +// LIBRARY API availability +#if MIR_FEATURE_FULL +#define HAVE_STRTOD 1 // strtod +#define HAVE_MALLOC 1 // malloc/free +#else // compact VM +#define HAVE_STRTOD 1 // strtod in current libc +#define HAVE_MALLOC 0 // no malloc/free in current libc +#endif // MIR_FEATURE_FULL +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_CONFIG_H diff --git a/src/mapleall/maple_ir/include/mir_const.h b/src/mapleall/maple_ir/include/mir_const.h new file mode 100644 index 0000000000000000000000000000000000000000..fd1f0f9460671e5514a504f26983ded753753955 --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_const.h @@ -0,0 +1,618 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_CONST_H +#define MAPLE_IR_INCLUDE_MIR_CONST_H +#include +#include "mir_type.h" +#include "mpl_int_val.h" + +namespace maple { +class MIRConst; // circular dependency exists, no other choice +using MIRConstPtr = MIRConst*; +#if MIR_FEATURE_FULL +class MIRSymbol; // circular dependency exists, no other choice +enum MIRConstKind { + kConstInvalid, + kConstInt, + kConstAddrof, + kConstAddrofFunc, + kConstLblConst, + kConstStrConst, + kConstStr16Const, + kConstFloatConst, + kConstDoubleConst, + kConstFloat128Const, + kConstAggConst, + kConstStConst +}; + +class MIRConst { + public: + explicit MIRConst(MIRType &type, MIRConstKind constKind = kConstInvalid) + : type(&type), kind(constKind) {} + + virtual ~MIRConst() = default; + + virtual void Dump(const MIRSymbolTable *localSymTab = nullptr) const { + (void)localSymTab; + } + + virtual bool IsZero() const { + return false; + } + + virtual bool IsOne() const { + return false; + } + + virtual bool IsMagicNum() const { + return false; + } + + // NO OP + virtual void Neg() {} + + virtual bool operator==(const MIRConst &rhs) const { + return &rhs == this; + } + + virtual MIRConst *Clone(MemPool &memPool) const = 0; + + MIRConstKind GetKind() const { + return kind; + } + + MIRType &GetType() { + return *type; + } + + const MIRType &GetType() const { + return *type; + } + + void SetType(MIRType &t) { + type = &t; + } + + private: + MIRType *type; + MIRConstKind kind; +}; + +class MIRIntConst : public MIRConst { + public: + MIRIntConst(uint64 val, MIRType &type) + : MIRConst(type, kConstInt), value(val, type.GetPrimType()) {} + + MIRIntConst(const IntVal &val, MIRType &type) : MIRConst(type, kConstInt), value(val) { + [[maybe_unused]] PrimType pType = type.GetPrimType(); + ASSERT(IsPrimitiveInteger(pType) && GetPrimTypeActualBitSize(pType) <= value.GetBitWidth(), + "Constant is tried to be constructed with non-integral type or bit-width is not appropriate for it"); + } + + /// @return number of used bits in the value + uint8 GetActualBitWidth() const; + + void Trunc(uint8 width) { + value.TruncInPlace(width); + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + bool IsNegative() const { + return value.IsSigned() && value.GetSignBit(); + } + + bool IsPositive() const { + return !IsNegative() && value != 0; + } + + bool IsZero() const override { + return value == 0; + } + + bool IsOne() const override { + return value == 1; + } + + void Neg() override { + value = -value; + } + + const IntVal &GetValue() const { + return value; + } + + int64 GetExtValue(uint8 size = 0) const { + return value.GetExtValue(size); + } + + int64 GetSXTValue(uint8 size = 0) const { + return value.GetSXTValue(size); + } + + uint64 GetZXTValue(uint8 size = 0) const { + return value.GetZXTValue(size); + } + + void SetValue(int64 val) const { + (void)val; + CHECK_FATAL(false, "Can't Use This Interface in This Object"); + } + + bool operator==(const MIRConst &rhs) const override; + + MIRIntConst *Clone(MemPool &memPool [[maybe_unused]]) const override { + CHECK_FATAL(false, "Can't Use This Interface in This Object"); + } + + private: + IntVal value; +}; + +class MIRAddrofConst : public MIRConst { + public: + MIRAddrofConst(StIdx sy, FieldID fi, MIRType &ty) + : MIRConst(ty, kConstAddrof), stIdx(sy), fldID(fi), offset(0) {} + + MIRAddrofConst(StIdx sy, FieldID fi, MIRType &ty, int32 ofst) + : MIRConst(ty, kConstAddrof), stIdx(sy), fldID(fi), offset(ofst) {} + + ~MIRAddrofConst() = default; + + StIdx GetSymbolIndex() const { + return stIdx; + } + + void SetSymbolIndex(StIdx idx) { + stIdx = idx; + } + + FieldID GetFieldID() const { + return fldID; + } + + int32 GetOffset() const { + return offset; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + bool operator==(const MIRConst &rhs) const override; + + MIRAddrofConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + private: + StIdx stIdx; + FieldID fldID; + int32 offset; +}; + +class MIRAddroffuncConst : public MIRConst { + public: + MIRAddroffuncConst(PUIdx idx, MIRType &ty) + : MIRConst(ty, kConstAddrofFunc), puIdx(idx) {} + + ~MIRAddroffuncConst() = default; + + PUIdx GetValue() const { + return puIdx; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + bool operator==(const MIRConst &rhs) const override; + + MIRAddroffuncConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + private: + PUIdx puIdx; +}; + +class MIRLblConst : public MIRConst { + public: + MIRLblConst(LabelIdx val, PUIdx pidx, MIRType &type) + : MIRConst(type, kConstLblConst), value(val), puIdx(pidx) {} + + ~MIRLblConst() = default; + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRLblConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + LabelIdx GetValue() const { + return value; + } + + PUIdx GetPUIdx() const { + return puIdx; + } + + private: + LabelIdx value; + PUIdx puIdx; +}; + +class MIRStrConst : public MIRConst { + public: + MIRStrConst(UStrIdx val, MIRType &type) : MIRConst(type, kConstStrConst), value(val) {} + + MIRStrConst(const std::string &str, MIRType &type); + + ~MIRStrConst() = default; + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRStrConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + UStrIdx GetValue() const { + return value; + } + + static PrimType GetPrimType() { + return kPrimType; + } + + private: + UStrIdx value; + static const PrimType kPrimType = PTY_ptr; +}; + +class MIRStr16Const : public MIRConst { + public: + MIRStr16Const(const U16StrIdx &val, MIRType &type) : MIRConst(type, kConstStr16Const), value(val) {} + + MIRStr16Const(const std::u16string &str, MIRType &type); + ~MIRStr16Const() = default; + + static PrimType GetPrimType() { + return kPrimType; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRStr16Const *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + U16StrIdx GetValue() const { + return value; + } + + private: + static const PrimType kPrimType = PTY_ptr; + U16StrIdx value; +}; + +class MIRFloatConst : public MIRConst { + public: + using value_type = float; + MIRFloatConst(float val, MIRType &type) : MIRConst(type, kConstFloatConst) { + value.floatValue = val; + } + + ~MIRFloatConst() = default; + + void SetFloatValue(float fvalue) { + value.floatValue = fvalue; + } + + value_type GetFloatValue() const { + return value.floatValue; + } + + static PrimType GetPrimType() { + return kPrimType; + } + + int32 GetIntValue() const { + return value.intValue; + } + + value_type GetValue() const { + return GetFloatValue(); + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool IsZero() const override { + return fabs(value.floatValue) <= 1e-6; + } + + bool IsGeZero() const { + return value.floatValue >= 0; + } + + bool IsNeg() const { + return ((static_cast(value.intValue) & 0x80000000) == 0x80000000); + } + + bool IsOne() const override { + return fabs(value.floatValue - 1) <= 1e-6; + }; + bool IsAllBitsOne() const { + return fabs(value.floatValue + 1) <= 1e-6; + }; + void Neg() override { + value.floatValue = -value.floatValue; + } + + bool operator==(const MIRConst &rhs) const override; + + MIRFloatConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + private: + static const PrimType kPrimType = PTY_f32; + union { + value_type floatValue; + int32 intValue; + } value; +}; + +class MIRDoubleConst : public MIRConst { + public: + using value_type = double; + MIRDoubleConst(double val, MIRType &type) : MIRConst(type, kConstDoubleConst) { + value.dValue = val; + } + + ~MIRDoubleConst() = default; + + uint32 GetIntLow32() const { + auto unsignVal = static_cast(value.intValue); + return static_cast(unsignVal & 0xffffffff); + } + + uint32 GetIntHigh32() const { + auto unsignVal = static_cast(value.intValue); + return static_cast((unsignVal & 0xffffffff00000000) >> 32); + } + + int64 GetIntValue() const { + return value.intValue; + } + + value_type GetValue() const { + return value.dValue; + } + + static PrimType GetPrimType() { + return kPrimType; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool IsZero() const override { + return fabs(value.dValue) <= 1e-15; + } + + bool IsGeZero() const { + return value.dValue >= 0; + } + + bool IsNeg() const { + return ((static_cast(value.intValue) & 0x8000000000000000LL) == 0x8000000000000000LL); + } + + bool IsOne() const override { + return fabs(value.dValue - 1) <= 1e-15; + }; + bool IsAllBitsOne() const { + return fabs(value.dValue + 1) <= 1e-15; + }; + void Neg() override { + value.dValue = -value.dValue; + } + + bool operator==(const MIRConst &rhs) const override; + + MIRDoubleConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + private: + static const PrimType kPrimType = PTY_f64; + union { + value_type dValue; + int64 intValue; + } value; +}; + +class MIRFloat128Const : public MIRConst { + public: + MIRFloat128Const(const uint64 &val, MIRType &type) : MIRConst(type, kConstFloat128Const) { + value = &val; + } + + ~MIRFloat128Const() = default; + + const uint64 *GetIntValue() const { + return value; + } + + static PrimType GetPrimType() { + return kPrimType; + } + + bool IsZero() const override { + MIR_ASSERT(value && "value must not be nullptr!"); + return value[0] == 0 && value[1] == 0; + } + + bool IsOne() const override { + MIR_ASSERT(value && "value must not be nullptr!"); + return value[0] == 0 && value[1] == 0x3FFF000000000000; + }; + bool IsAllBitsOne() const { + MIR_ASSERT(value && "value must not be nullptr!"); + return (value[0] == 0xffffffffffffffff && value[1] == 0xffffffffffffffff); + }; + bool operator==(const MIRConst &rhs) const override; + + MIRFloat128Const *Clone(MemPool &memPool) const override { + auto *res = memPool.New(*this); + return res; + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + + private: + static const PrimType kPrimType = PTY_f128; + // value[0]: Low 64 bits; value[1]: High 64 bits. + const uint64 *value; +}; + +class MIRAggConst : public MIRConst { + public: + MIRAggConst(MIRModule &mod, MIRType &type) + : MIRConst(type, kConstAggConst), + constVec(mod.GetMPAllocator().Adapter()), + fieldIdVec(mod.GetMPAllocator().Adapter()) {} + + ~MIRAggConst() = default; + + MIRConst *GetAggConstElement(unsigned int fieldId) { + for (size_t i = 0; i < fieldIdVec.size(); ++i) { + if (fieldId == fieldIdVec[i]) { + return constVec[i]; + } + } + return nullptr; + } + + void SetFieldIdOfElement(uint32 index, uint32 fieldId) { + ASSERT(index < fieldIdVec.size(), "index out of range"); + fieldIdVec[index] = fieldId; + } + + const MapleVector &GetConstVec() const { + return constVec; + } + + MapleVector &GetConstVec() { + return constVec; + } + + const MIRConstPtr &GetConstVecItem(size_t index) const { + CHECK_FATAL(index < constVec.size(), "index out of range"); + return constVec[index]; + } + + MIRConstPtr &GetConstVecItem(size_t index) { + CHECK_FATAL(index < constVec.size(), "index out of range"); + return constVec[index]; + } + + void SetConstVecItem(size_t index, MIRConst& st) { + CHECK_FATAL(index < constVec.size(), "index out of range"); + constVec[index] = &st; + } + + uint32 GetFieldIdItem(size_t index) const { + ASSERT(index < fieldIdVec.size(), "index out of range"); + return fieldIdVec[index]; + } + + void SetItem(uint32 index, MIRConst *mirConst, uint32 fieldId) { + CHECK_FATAL(index < constVec.size(), "index out of range"); + constVec[index] = mirConst; + fieldIdVec[index] = fieldId; + } + + void AddItem(MIRConst *mirConst, uint32 fieldId) { + constVec.push_back(mirConst); + fieldIdVec.push_back(fieldId); + } + + void PushBack(MIRConst *elem) { + AddItem(elem, 0); + } + + void Dump(const MIRSymbolTable *localSymTab) const override; + bool operator==(const MIRConst &rhs) const override; + + MIRAggConst *Clone(MemPool &memPool) const override { + return memPool.New(*this); + } + + private: + MapleVector constVec; + MapleVector fieldIdVec; +}; + +// the const has one or more symbols +class MIRStConst : public MIRConst { + public: + MIRStConst(MIRModule &mod, MIRType &type) + : MIRConst(type, kConstStConst), + stVec(mod.GetMPAllocator().Adapter()), + stOffsetVec(mod.GetMPAllocator().Adapter()) {} + + const MapleVector &GetStVec() const { + return stVec; + } + void PushbackSymbolToSt(MIRSymbol *sym) { + stVec.push_back(sym); + } + + MIRSymbol *GetStVecItem(size_t index) { + CHECK_FATAL(index < stVec.size(), "array index out of range"); + return stVec[index]; + } + + const MapleVector &GetStOffsetVec() const { + return stOffsetVec; + } + void PushbackOffsetToSt(uint32 offset) { + stOffsetVec.push_back(offset); + } + + uint32 GetStOffsetVecItem(size_t index) const { + CHECK_FATAL(index < stOffsetVec.size(), "array index out of range"); + return stOffsetVec[index]; + } + + MIRStConst *Clone(MemPool &memPool) const override { + auto *res = memPool.New(*this); + return res; + } + + ~MIRStConst() = default; + + private: + MapleVector stVec; // symbols that in the st const + MapleVector stOffsetVec; // symbols offset +}; +#endif // MIR_FEATURE_FULL + +bool IsDivSafe(const MIRIntConst& dividend, const MIRIntConst& divisor, PrimType pType); + +} // namespace maple + +#define LOAD_SAFE_CAST_FOR_MIR_CONST +#include "ir_safe_cast_traits.def" + +#endif // MAPLE_IR_INCLUDE_MIR_CONST_H diff --git a/src/mapleall/maple_ir/include/mir_enum.h b/src/mapleall/maple_ir/include/mir_enum.h new file mode 100644 index 0000000000000000000000000000000000000000..92f7fb9368415955b615d1ba610e976a3cc799d2 --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_enum.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLEIR_INCLUDE_MIR_ENUMERATION_H +#define MAPLEIR_INCLUDE_MIR_ENUMERATION_H + +namespace maple { + +using EnumElem = std::pair; +class MIREnum { + public: + explicit MIREnum(PrimType ptyp, GStrIdx stridx) + : primType(ptyp), nameStrIdx(stridx) {} + ~MIREnum() = default; + + void NewElement(GStrIdx sidx, IntVal value) { + elements.push_back(EnumElem(sidx, value)); + } + + void AddNextElement(GStrIdx sidx) { + if (elements.empty()) { + elements.push_back(EnumElem(sidx, IntVal(0, primType))); + return; + } + IntVal newValue = elements.back().second + 1; + elements.push_back(EnumElem(sidx, newValue)); + } + + void SetPrimType(PrimType pt) { + primType = pt; + } + + PrimType GetPrimType() const { + return primType; + } + + const std::vector &GetElements() const { + return elements; + } + + GStrIdx GetNameIdx() const { + return nameStrIdx; + } + + const std::string &GetName() const; + void Dump() const; + + private: + PrimType primType = PTY_i32; // must be integer primtype + GStrIdx nameStrIdx{ 0 }; // name of this enum in global string table + std::vector elements{}; +}; + +struct EnumTable { + std::vector enumTable; + + ~EnumTable() { + for (MIREnum *mirEnum : enumTable) { + if (mirEnum == nullptr) { + continue; + } + delete mirEnum; + mirEnum = nullptr; + } + } + + void Dump() { + for (MIREnum *mirEnum : enumTable) { + mirEnum->Dump(); + } + } +}; + +} /* namespace maple */ + +#endif /* MAPLEIR_INCLUDE_MIR_ENUMERATION_H */ diff --git a/src/mapleall/maple_ir/include/mir_function.h b/src/mapleall/maple_ir/include/mir_function.h new file mode 100644 index 0000000000000000000000000000000000000000..2363180fa4fd458e6c613cfa682bb1ae3d5fe1c0 --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_function.h @@ -0,0 +1,1423 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_FUNCTION_H +#define MAPLE_IR_INCLUDE_MIR_FUNCTION_H +#include +#include "mir_module.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_preg.h" +#include "intrinsics.h" +#include "file_layout.h" +#include "mir_nodes.h" +#include "mir_type.h" +#include "mir_scope.h" +#include "profile.h" +#include "func_desc.h" + +#define DEBUGME true + +namespace maple { +enum PointerAttr: uint32_t { + kPointerUndeiced = 0x1, + kPointerNull = 0x2, + kPointerNoNull = 0x3 +}; + +enum FuncAttrProp : uint32_t { + kNoThrowException = 0x1, + kNoRetNewlyAllocObj = 0x2, + kNoDefEffect = 0x4, + kNoDefArgEffect = 0x8, + kPureFunc = 0x10, + kIpaSeen = 0x20, + kUseEffect = 0x40, + kDefEffect = 0x80 +}; + +// describe a formal definition in a function declaration +class FormalDef { + public: + GStrIdx formalStrIdx = GStrIdx(0); // used when processing the prototype + MIRSymbol *formalSym = nullptr; // used in the function definition + TyIdx formalTyIdx = TyIdx(); + TypeAttrs formalAttrs = TypeAttrs(); // the formal's type attributes + + FormalDef() {}; + virtual ~FormalDef() {} + FormalDef(MIRSymbol *s, const TyIdx &tidx, const TypeAttrs &at) : formalSym(s), formalTyIdx(tidx), formalAttrs(at) {} + FormalDef(const GStrIdx &sidx, MIRSymbol *s, const TyIdx &tidx, const TypeAttrs &at) + : formalStrIdx(sidx), formalSym(s), formalTyIdx(tidx), formalAttrs(at) {} +}; + +class InlineSummary; // circular dependency exists, no other choice +class MeFunction; // circular dependency exists, no other choice +class EAConnectionGraph; // circular dependency exists, no other choice +class MIRFunction { + public: + MIRFunction(MIRModule *mod, StIdx idx) + : module(mod), + symbolTableIdx(idx) {} + + ~MIRFunction() = default; + + void Dump(bool withoutBody = false); + void DumpUpFormal(int32 indent) const; + void DumpFrame(int32 indent) const; + void DumpFuncBody(int32 indent); + void DumpScope() const; + const MIRSymbol *GetFuncSymbol() const; + MIRSymbol *GetFuncSymbol(); + + void SetBaseClassFuncNames(GStrIdx strIdx); + void SetMemPool(MemPool *memPool) { + SetCodeMemPool(memPool); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + + /// update signature_strIdx, basefunc_strIdx, baseclass_strIdx, basefunc_withtype_strIdx + /// without considering baseclass_strIdx, basefunc_strIdx's original non-zero values + /// \param strIdx full_name strIdx of the new function name + void OverrideBaseClassFuncNames(GStrIdx strIdx); + const std::string &GetName() const; + + GStrIdx GetNameStrIdx() const; + + const std::string &GetBaseClassName() const; + + const std::string &GetBaseFuncName() const; + + const std::string &GetBaseFuncNameWithType() const; + + const std::string &GetBaseFuncSig() const; + + const std::string &GetSignature() const; + + GStrIdx GetBaseClassNameStrIdx() const { + return baseClassStrIdx; + } + + GStrIdx GetBaseFuncNameStrIdx() const { + return baseFuncStrIdx; + } + + GStrIdx GetBaseFuncNameWithTypeStrIdx() const { + return baseFuncWithTypeStrIdx; + } + + GStrIdx GetBaseFuncSigStrIdx() const { + return baseFuncSigStrIdx; + } + + void SetBaseClassNameStrIdx(GStrIdx id) { + baseClassStrIdx = id; + } + + void SetBaseFuncNameStrIdx(GStrIdx id) { + baseFuncStrIdx = id; + } + + void SetBaseFuncNameWithTypeStrIdx(GStrIdx id) { + baseFuncWithTypeStrIdx = id; + } + + const MIRType *GetReturnType() const; + MIRType *GetReturnType(); + bool IsReturnVoid() const { + return GetReturnType()->GetPrimType() == PTY_void; + } + TyIdx GetReturnTyIdx() const { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + return funcType->GetRetTyIdx(); + } + void SetReturnTyIdx(TyIdx tyidx) { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + funcType->SetRetTyIdx(tyidx); + } + + const MIRType *GetClassType() const; + TyIdx GetClassTyIdx() const { + return classTyIdx; + } + void SetClassTyIdx(TyIdx tyIdx) { + classTyIdx = tyIdx; + } + void SetClassTyIdx(uint32 idx) { + classTyIdx.reset(idx); + } + + void AddArgument(MIRSymbol *st) { + ASSERT(st != nullptr, "null ptr check"); + FormalDef formalDef(st->GetNameStrIdx(), st, st->GetTyIdx(), st->GetAttrs()); + formalDefVec.push_back(formalDef); + } + + void AddFormalDef(const FormalDef &formalDef) { + formalDefVec.push_back(formalDef); + } + + size_t GetParamSize() const { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + return funcType->GetParamTypeList().size(); + } + + auto &GetParamTypes() const { + CHECK_FATAL(funcType != nullptr, "funcType is nullptr"); + return funcType->GetParamTypeList(); + } + + TyIdx GetNthParamTyIdx(size_t i) const { + ASSERT(i < funcType->GetParamTypeList().size(), "array index out of range"); + return funcType->GetParamTypeList()[i]; + } + + const MIRType *GetNthParamType(size_t i) const; + MIRType *GetNthParamType(size_t i); + + const TypeAttrs &GetNthParamAttr(size_t i) const { + ASSERT(i < formalDefVec.size(), "array index out of range"); + ASSERT(formalDefVec[i].formalSym != nullptr, "null ptr check"); + return formalDefVec[i].formalSym->GetAttrs(); + } + + void UpdateFuncTypeAndFormals(const std::vector &symbols, bool clearOldArgs = false); + void UpdateFuncTypeAndFormalsAndReturnType(const std::vector &symbols, const TyIdx &retTyIdx, + bool clearOldArgs = false); + LabelIdx GetOrCreateLableIdxFromName(const std::string &name); + GStrIdx GetLabelStringIndex(LabelIdx labelIdx) const { + CHECK_FATAL(labelTab != nullptr, "labelTab is nullptr"); + ASSERT(labelIdx < labelTab->Size(), "index out of range in GetLabelStringIndex"); + return labelTab->GetSymbolFromStIdx(labelIdx); + } + const std::string &GetLabelName(LabelIdx labelIdx) const { + GStrIdx strIdx = GetLabelStringIndex(labelIdx); + return GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + } + + const MIRSymbol *GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst = false) const; + MIRSymbol *GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst = false); + + void SetAttrsFromSe(uint8 specialEffect); + + const FuncAttrs &GetAttrs() const { + return funcAttrs; + } + + void SetAttrs(FuncAttrs attr) { + funcAttrs = attr; + } + + bool GetAttr(FuncAttrKind attrKind) const { + return funcAttrs.GetAttr(attrKind); + } + + void SetAttr(FuncAttrKind attrKind) { + funcAttrs.SetAttr(attrKind); + } + + void UnSetAttr(FuncAttrKind attrKind) { + funcAttrs.SetAttr(attrKind, true); + } + + bool IsVarargs() const { + return funcAttrs.GetAttr(FUNCATTR_varargs); + } + + bool IsWeak() const { + return funcAttrs.GetAttr(FUNCATTR_weak); + } + + bool IsStatic() const { + return funcAttrs.GetAttr(FUNCATTR_static); + } + + bool IsInline() const { + return funcAttrs.GetAttr(FUNCATTR_inline); + } + + bool IsExtern() const { + return funcAttrs.GetAttr(FUNCATTR_extern); + } + + bool IsNative() const { + return funcAttrs.GetAttr(FUNCATTR_native); + } + + bool IsFinal() const { + return funcAttrs.GetAttr(FUNCATTR_final); + } + + bool IsAbstract() const { + return funcAttrs.GetAttr(FUNCATTR_abstract); + } + + bool IsPublic() const { + return funcAttrs.GetAttr(FUNCATTR_public); + } + + bool IsPrivate() const { + return funcAttrs.GetAttr(FUNCATTR_private); + } + + bool IsProtected() const { + return funcAttrs.GetAttr(FUNCATTR_protected); + } + + bool IsConstructor() const { + return funcAttrs.GetAttr(FUNCATTR_constructor); + } + + bool IsLocal() const { + return funcAttrs.GetAttr(FUNCATTR_local); + } + + bool IsNoDefArgEffect() const { + return funcAttrs.GetAttr(FUNCATTR_nodefargeffect); + } + + bool IsNoDefEffect() const { + return funcAttrs.GetAttr(FUNCATTR_nodefeffect); + } + + bool IsNoRetGlobal() const { + return funcAttrs.GetAttr(FUNCATTR_noretglobal); + } + + bool IsNoThrowException() const { + return funcAttrs.GetAttr(FUNCATTR_nothrow_exception); + } + + bool IsNoRetArg() const { + return funcAttrs.GetAttr(FUNCATTR_noretarg); + } + + bool IsNoPrivateDefEffect() const { + return funcAttrs.GetAttr(FUNCATTR_noprivate_defeffect); + } + + bool IsIpaSeen() const { + return funcAttrs.GetAttr(FUNCATTR_ipaseen); + } + + bool IsPure() const { + return funcAttrs.GetAttr(FUNCATTR_pure); + } + + bool IsFirstArgReturn() const { + return funcAttrs.GetAttr(FUNCATTR_firstarg_return); + } + + bool IsUnSafe() const { + return !funcAttrs.GetAttr(FUNCATTR_safed) || funcAttrs.GetAttr(FUNCATTR_unsafed); + } + + bool IsSafe() const { + return funcAttrs.GetAttr(FUNCATTR_safed); + } + + void SetVarArgs() { + funcAttrs.SetAttr(FUNCATTR_varargs); + } + + void SetNoDefArgEffect() { + funcAttrs.SetAttr(FUNCATTR_nodefargeffect); + } + + void SetNoDefEffect() { + funcAttrs.SetAttr(FUNCATTR_nodefeffect); + } + + void SetNoRetGlobal() { + funcAttrs.SetAttr(FUNCATTR_noretglobal); + } + + void SetNoThrowException() { + funcAttrs.SetAttr(FUNCATTR_nothrow_exception); + } + + void SetNoRetArg() { + funcAttrs.SetAttr(FUNCATTR_noretarg); + } + + void SetNoPrivateDefEffect() { + funcAttrs.SetAttr(FUNCATTR_noprivate_defeffect); + } + + void SetIpaSeen() { + funcAttrs.SetAttr(FUNCATTR_ipaseen); + } + + void SetPure() { + funcAttrs.SetAttr(FUNCATTR_pure); + } + + void SetFirstArgReturn() { + funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } + + void UnsetNoDefArgEffect() { + funcAttrs.SetAttr(FUNCATTR_nodefargeffect, true); + } + + void UnsetNoDefEffect() { + funcAttrs.SetAttr(FUNCATTR_nodefeffect, true); + } + + void UnsetNoRetGlobal() { + funcAttrs.SetAttr(FUNCATTR_noretglobal, true); + } + + void UnsetNoThrowException() { + funcAttrs.SetAttr(FUNCATTR_nothrow_exception, true); + } + + void UnsetPure() { + funcAttrs.SetAttr(FUNCATTR_pure, true); + } + + void UnsetNoRetArg() { + funcAttrs.SetAttr(FUNCATTR_noretarg, true); + } + + void UnsetNoPrivateDefEffect() { + funcAttrs.SetAttr(FUNCATTR_noprivate_defeffect, true); + } + + bool HasCall() const; + void SetHasCall(); + + bool IsReturnStruct() const; + void SetReturnStruct(); + void SetReturnStruct(const MIRType &retType); + + bool IsUserFunc() const; + void SetUserFunc(); + + bool IsInfoPrinted() const; + void SetInfoPrinted(); + void ResetInfoPrinted(); + + void SetNoReturn(); + bool NeverReturns() const; + + void SetHasSetjmp(); + bool HasSetjmp() const; + + void SetHasAsm(); + bool HasAsm() const; + + void SetStructReturnedInRegs(); + bool StructReturnedInRegs() const; + + void SetReturnStruct(const MIRType *retType); + + bool IsEmpty() const; + bool IsClinit() const; + uint32 GetInfo(GStrIdx strIdx) const; + uint32 GetInfo(const std::string &string) const; + bool IsAFormal(const MIRSymbol *st) const { + for (const auto &formalDef : formalDefVec) { + if (st == formalDef.formalSym) { + return true; + } + } + return false; + } + + uint32 GetFormalIndex(const MIRSymbol *symbol) const { + for (size_t i = 0; i < formalDefVec.size(); ++i) { + if (formalDefVec[i].formalSym == symbol) { + return i; + } + } + return 0xffffffff; + } + + FormalDef &GetFormalDefFromMIRSymbol(const MIRSymbol *symbol) { + for (auto &formalDef : formalDefVec) { + if (formalDef.formalSym == symbol) { + return formalDef; + } + } + CHECK_FATAL(false, "Impossible."); + } + + bool IsAFormalName(const GStrIdx idx) const { + for (const auto &formalDef : formalDefVec) { + if (idx == formalDef.formalStrIdx) { + return true; + } + } + return false; + } + + const FormalDef GetFormalFromName(const GStrIdx idx) const { + for (size_t i = 0; i < formalDefVec.size(); ++i) { + if (formalDefVec[i].formalStrIdx == idx) { + return formalDefVec[i]; + } + } + return FormalDef(); + } + + // tell whether this function is a Java method + bool IsJava() const { + return classTyIdx != 0u; + } + + const MIRType *GetNodeType(const BaseNode &node) const; + +#ifdef DEBUGME + void SetUpGDBEnv(); + void ResetGDBEnv(); +#endif + void ReleaseMemory() { + if (codeMemPoolTmp != nullptr) { + delete codeMemPoolTmp; + codeMemPoolTmp = nullptr; + } + } + + void ReleaseCodeMemory() { + if (codeMemPool != nullptr) { + codeMemPoolAllocator.SetMemPool(nullptr); + delete codeMemPool; + SetMemPool(nullptr); + } + } + + MemPool *GetCodeMempool() { + if (useTmpMemPool) { + if (codeMemPoolTmp == nullptr) { + codeMemPoolTmp = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolTmpAllocator.SetMemPool(codeMemPoolTmp); + } + return codeMemPoolTmp; + } + if (codeMemPool == nullptr) { + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + return codeMemPool; + } + + MapleAllocator &GetCodeMemPoolAllocator() { + GetCodeMempool(); + if (useTmpMemPool) { + return codeMemPoolTmpAllocator; + } + return codeMemPoolAllocator; + } + + MapleAllocator &GetCodeMempoolAllocator() { + if (codeMemPool == nullptr) { + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + return codeMemPoolAllocator; + } + + TyIdx GetFuncRetStructTyIdx() { + TyIdx tyIdx = GetFormalDefAt(0).formalTyIdx; + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(ty->GetKind() == kTypePointer, "Fake param not a pointer"); + MIRPtrType *pType = static_cast(ty); + tyIdx = pType->GetPointedTyIdx(); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->IsStructType(), + "Must be struct return type"); + return tyIdx; + } + + void EnterFormals(); + void NewBody(); + + MIRModule *GetModule() { + return module; + } + + void SetCurrentFunctionToThis() { + module->SetCurFunction(this); + } + + PUIdx GetPuidx() const { + return puIdx; + } + void SetPuidx(PUIdx idx) { + puIdx = idx; + } + + PUIdx GetPuidxOrigin() const { + return puIdxOrigin; + } + void SetPuidxOrigin(PUIdx idx) { + puIdxOrigin = idx; + } + + StIdx GetStIdx() const { + return symbolTableIdx; + } + void SetStIdx(StIdx stIdx) { + symbolTableIdx = stIdx; + } + + int32 GetSCCId() const { + return sccID; + } + void SetSCCId(int32 id) { + sccID = id; + } + + MIRFuncType *GetMIRFuncType() { + return funcType; + } + void SetMIRFuncType(MIRFuncType *type) { + funcType = type; + } + + TyIdx GetInferredReturnTyIdx() const { + return inferredReturnTyIdx; + } + + void SetInferredReturnTyIdx(TyIdx tyIdx) { + inferredReturnTyIdx = tyIdx; + } + + MIRTypeNameTable *GetTypeNameTab() const { + return typeNameTab; + } + + void AllocTypeNameTab() { + if (typeNameTab == nullptr) { + typeNameTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + } + bool HaveTypeNameTab() const { + return typeNameTab != nullptr; + } + const MapleMap &GetGStrIdxToTyIdxMap() const { + CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); + return typeNameTab->GetGStrIdxToTyIdxMap(); + } + TyIdx GetTyIdxFromGStrIdx(GStrIdx idx) const { + CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); + return typeNameTab->GetTyIdxFromGStrIdx(idx); + } + void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) { + CHECK_FATAL(typeNameTab != nullptr, "typeNameTab is nullptr"); + typeNameTab->SetGStrIdxToTyIdx(gStrIdx, tyIdx); + } + + const std::string &GetLabelTabItem(LabelIdx labelIdx) const { + CHECK_FATAL(labelTab != nullptr, "labelTab is nullptr"); + return labelTab->GetName(labelIdx); + } + + void AllocLabelTab() { + if (labelTab == nullptr) { + labelTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + } + + MIRPregTable *GetPregTab() const { + return pregTab; + } + + void SetPregTab(MIRPregTable *tab) { + pregTab = tab; + } + void AllocPregTab() { + if (pregTab == nullptr) { + pregTab = module->GetMemPool()->New(&module->GetMPAllocator()); + } + } + MIRPreg *GetPregItem(PregIdx idx) { + return const_cast(const_cast(this)->GetPregItem(idx)); + } + const MIRPreg *GetPregItem(PregIdx idx) const { + return pregTab->PregFromPregIdx(idx); + } + + BlockNode *GetBody() { + return body; + } + const BlockNode *GetBody() const { + return body; + } + void SetBody(BlockNode *node) { + body = node; + } + + SrcPosition &GetSrcPosition() { + ASSERT(GetFuncSymbol() != nullptr, "null ptr check"); + return GetFuncSymbol()->GetSrcPosition(); + } + + void SetSrcPosition(const SrcPosition &position) { + ASSERT(GetFuncSymbol() != nullptr, "null ptr check"); + GetFuncSymbol()->SetSrcPosition(position); + } + + const FuncAttrs &GetFuncAttrs() const { + return funcAttrs; + } + FuncAttrs &GetFuncAttrs() { + return funcAttrs; + } + + void SetFuncAttrs(const FuncAttrs &attrs) { + funcAttrs = attrs; + } + void SetFuncAttrs(uint64 attrFlag) { + funcAttrs.SetAttrFlag(attrFlag); + } + + uint32 GetFlag() const { + return flag; + } + void SetFlag(uint32 newFlag) { + flag = newFlag; + } + + uint16 GetHashCode() const { + return hashCode; + } + void SetHashCode(uint16 newHashCode) { + hashCode = newHashCode; + } + + void SetFileIndex(uint32 newFileIndex) { + fileIndex = newFileIndex; + } + + MIRInfoVector &GetInfoVector() { + return info; + } + + const MIRInfoPair &GetInfoPair(size_t i) const { + return info.at(i); + } + + void PushbackMIRInfo(const MIRInfoPair &pair) { + info.push_back(pair); + } + + void SetMIRInfoNum(size_t idx, uint32 num) { + info[idx].second = num; + } + + MapleVector &InfoIsString() { + return infoIsString; + } + + void PushbackIsString(bool isString) { + infoIsString.push_back(isString); + } + + void SetupScope() { + if (!scope) { + scope = module->GetMemPool()->New(module, this); + } + } + + MIRScope *GetScope() { + SetupScope(); + return scope; + } + + void SetScope(MIRScope *scp) { + scope = scp; + } + + bool NeedEmitAliasInfo() const { + return scope && !scope->IsEmpty(); + } + + MapleMap &GetAliasVarMap() { + SetupScope(); + return scope->GetAliasVarMap(); + } + + void SetAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) { + SetupScope(); + scope->SetAliasVarMap(idx, vars); + } + + bool HasVlaOrAlloca() const { + return hasVlaOrAlloca; + } + + void SetVlaOrAlloca(bool has) { + hasVlaOrAlloca = has; + } + + // Default freq is the lastStmtFreq + bool HasFreqMap() const { + return freqLastMap != nullptr; + } + + bool HasFirstFreqMap() const { + return freqFirstMap != nullptr; + } + + const MapleMap &GetFirstFreqMap() const { + return *freqFirstMap; + } + + void SetFirstFreqMap(uint32 stmtID, uint64 freq) { + if (freqFirstMap == nullptr) { + freqFirstMap = module->GetMemPool()->New>(module->GetMPAllocator().Adapter()); + } + (*freqFirstMap)[stmtID] = freq; + } + + const MapleMap &GetLastFreqMap() const { + return *freqLastMap; + } + + int64 GetFreqFromLastStmt(uint32 stmtId) const { + if (freqLastMap == nullptr) { + return -1; + } + if ((*freqLastMap).find(stmtId) == (*freqLastMap).end()) { + return -1; + } + return (*freqLastMap)[stmtId]; + } + + int64 GetFreqFromFirstStmt(uint32 stmtId) const { + if (freqFirstMap == nullptr) { + return -1; + } + if ((*freqFirstMap).find(stmtId) == (*freqFirstMap).end()) { + return -1; + } + return (*freqFirstMap)[stmtId]; + } + + void SetLastFreqMap(uint32 stmtID, uint64 freq) { + if (freqLastMap == nullptr) { + freqLastMap = module->GetMemPool()->New>(module->GetMPAllocator().Adapter()); + } + (*freqLastMap)[stmtID] = freq; + } + + bool WithLocInfo() const { + return withLocInfo; + } + void SetWithLocInfo(bool withInfo) { + withLocInfo = withInfo; + } + + bool IsDirty() const { + return isDirty; + } + void SetDirty(bool dirty) { + isDirty = dirty; + } + + bool IsFromMpltInline() const { + return fromMpltInline; + } + void SetFromMpltInline(bool isInline) { + fromMpltInline = isInline; + } + + uint8 GetLayoutType() const { + return layoutType; + } + void SetLayoutType(uint8 type) { + layoutType = type; + } + + uint32 GetCallTimes() const { + return callTimes; + } + void SetCallTimes(uint32 times) { + callTimes = times; + } + + uint32 GetFrameSize() const { + return frameSize; + } + void SetFrameSize(uint32 size) { + frameSize = size; + } + + uint32 GetUpFormalSize() const { + return upFormalSize; + } + void SetUpFormalSize(uint32 size) { + upFormalSize = size; + } + + uint32 GetOutParmSize() const { + return outParmSize; + } + void SetOutParmSize(uint32 size) { + outParmSize = size; + } + + uint16 GetModuleId() const { + return moduleID; + } + void SetModuleID(uint16 id) { + moduleID = id; + } + + uint32 GetFuncSize() const { + return funcSize; + } + void SetFuncSize(uint32 size) { + funcSize = size; + } + + uint32 GetTempCount() const { + return tempCount; + } + void IncTempCount() { + ++tempCount; + } + + uint8 *GetFormalWordsTypeTagged() const { + return formalWordsTypeTagged; + } + void SetFormalWordsTypeTagged(uint8 *tagged) { + formalWordsTypeTagged = tagged; + } + uint8 **GetFwtAddress() { + return &formalWordsTypeTagged; + } + + uint8 *GetLocalWordsTypeTagged() const { + return localWordsTypeTagged; + } + void SetLocalWordsTypeTagged(uint8 *tagged) { + localWordsTypeTagged = tagged; + } + uint8 **GetLwtAddress() { + return &localWordsTypeTagged; + } + + uint8 *GetFormalWordsRefCounted() const { + return formalWordsRefCounted; + } + void SetFormalWordsRefCounted(uint8 *counted) { + formalWordsRefCounted = counted; + } + uint8 **GetFwrAddress() { + return &formalWordsRefCounted; + } + + uint8 *GetLocalWordsRefCounted() const { + return localWordsRefCounted; + } + void SetLocalWordsRefCounted(uint8 *counted) { + localWordsRefCounted = counted; + } + + MeFunction *GetMeFunc() { + return meFunc; + } + + void SetMeFunc(MeFunction *func) { + meFunc = func; + } + + EAConnectionGraph *GetEACG() { + return eacg; + } + void SetEACG(EAConnectionGraph *eacgVal) { + eacg = eacgVal; + } + + void SetFormalDefVec(const MapleVector &currFormals) { + formalDefVec = currFormals; + } + + MapleVector &GetFormalDefVec() { + return formalDefVec; + } + + const FormalDef &GetFormalDefAt(size_t i) const { + return formalDefVec[i]; + } + + FormalDef &GetFormalDefAt(size_t i) { + return formalDefVec[i]; + } + + const MIRSymbol *GetFormal(size_t i) const { + return formalDefVec[i].formalSym; + } + + MIRSymbol *GetFormal(size_t i) { + return formalDefVec[i].formalSym; + } + + const std::string &GetFormalName(size_t i) const { + auto *formal = formalDefVec[i].formalSym; + if (formal != nullptr) { + return formal->GetName(); + } + return GlobalTables::GetStrTable().GetStringFromStrIdx(formalDefVec[i].formalStrIdx); + } + + size_t GetFormalCount() const { + return formalDefVec.size(); + } + + void ClearFormals() { + formalDefVec.clear(); + } + + void ClearArguments() { + formalDefVec.clear(); + funcType->GetParamTypeList().clear(); + funcType->GetParamAttrsList().clear(); + } + + size_t GetSymbolTabSize() const { + ASSERT(symTab != nullptr, "symTab is nullptr"); + return symTab->GetSymbolTableSize(); + } + MIRSymbol *GetSymbolTabItem(uint32 idx, bool checkFirst = false) const { + return symTab->GetSymbolFromStIdx(idx, checkFirst); + } + const MIRSymbolTable *GetSymTab() const { + return symTab; + } + MIRSymbolTable *GetSymTab() { + return symTab; + } + void AllocSymTab() { + if (symTab == nullptr) { + symTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + } + MIRLabelTable *GetLabelTab() const { + CHECK_FATAL(labelTab != nullptr, "must be"); + return labelTab; + } + MIRLabelTable *GetLabelTab() { + if (labelTab == nullptr) { + labelTab = module->GetMemPool()->New(module->GetMPAllocator()); + } + return labelTab; + } + void SetLabelTab(MIRLabelTable *currLabelTab) { + labelTab = currLabelTab; + } + + const MapleSet &GetRetRefSym() const { + return retRefSym; + } + void InsertMIRSymbol(MIRSymbol *sym) { + (void)retRefSym.insert(sym); + } + + MemPool *GetDataMemPool() const { + return module->GetMemPool(); + } + + MemPool *GetCodeMemPool() { + if (codeMemPool == nullptr) { + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolAllocator.SetMemPool(codeMemPool); + } + return codeMemPool; + } + + void SetCodeMemPool(MemPool *currCodeMemPool) { + codeMemPool = currCodeMemPool; + } + + MapleAllocator &GetCodeMPAllocator() { + GetCodeMemPool(); + return codeMemPoolAllocator; + } + + void AddFuncGenericDeclare(GenericDeclare *g) { + genericDeclare.push_back(g); + } + + void AddFuncGenericArg(AnnotationType *a) { + genericArg.push_back(a); + } + + void AddFuncGenericRet(AnnotationType *r) { + genericRet = r; + } + + void AddFuncLocalGenericVar(const GStrIdx &str, AnnotationType *at) { + genericLocalVar[str] = at; + } + + MapleVector &GetFuncGenericDeclare() { + return genericDeclare; + } + + MapleVector &GetFuncGenericArg() { + return genericArg; + } + + void SetRetrunAttrKind(const PointerAttr kind) { + returnKind = kind; + } + + PointerAttr GetRetrunAttrKind() const { + return returnKind; + } + + AnnotationType *GetFuncGenericRet() { + return genericRet; + } + + AnnotationType *GetFuncLocalGenericVar(const GStrIdx &str) { + if (genericLocalVar.find(str) == genericLocalVar.end()) { + return nullptr; + } + return genericLocalVar[str]; + } + + StmtNode *FindStmtWithId(StmtNode *stmt, uint32 stmtId) { + while (stmt != nullptr) { + StmtNode *next = stmt->GetNext(); + switch (stmt->GetOpCode()) { + case OP_dowhile: + case OP_while: { + WhileStmtNode *wnode = static_cast(stmt); + if (wnode->GetBody() != nullptr && wnode->GetBody()->GetFirst() != nullptr) { + StmtNode *res = FindStmtWithId(wnode->GetBody()->GetFirst(), stmtId); + if (res != nullptr) { + return res; + } + } + break; + } + case OP_if: { + if (stmt->GetMeStmtID() == stmtId) { + return stmt; + } + IfStmtNode *inode = static_cast(stmt); + if (inode->GetThenPart() != nullptr && inode->GetThenPart()->GetFirst() != nullptr) { + StmtNode *res = FindStmtWithId(inode->GetThenPart()->GetFirst(), stmtId); + if (res != nullptr) { + return res; + } + } + if (inode->GetElsePart() != nullptr && inode->GetElsePart()->GetFirst() != nullptr) { + StmtNode *res = FindStmtWithId(inode->GetElsePart()->GetFirst(), stmtId); + if (res != nullptr) { + return res; + } + } + break; + } + case OP_callassigned: + case OP_call: + case OP_switch: + case OP_brtrue: + case OP_brfalse: { + if (stmt->GetMeStmtID() == stmtId) { + return stmt; + } + break; + } + default: { + break; + } + } + stmt = next; + } + return nullptr; + } + + StmtNode *GetStmtNodeFromMeId(uint32 stmtId) { + if (GetBody() == nullptr) { + return nullptr; + } + StmtNode *stmt = GetBody()->GetFirst(); + return FindStmtWithId(stmt, stmtId); + } + + MemPool *GetCodeMemPoolTmp() { + if (codeMemPoolTmp == nullptr) { + codeMemPoolTmp = new ThreadLocalMemPool(memPoolCtrler, "func code mempool"); + codeMemPoolTmpAllocator.SetMemPool(codeMemPoolTmp); + } + return codeMemPoolTmp; + } + + bool CheckParamNullType(MIRSymbol *sym) { + return paramNonullTypeMap.find(sym) != paramNonullTypeMap.end(); + } + + PointerAttr GetParamNonull(MIRSymbol *sym) { + return paramNonullTypeMap[sym]; + } + + void SetParamNonull(MIRSymbol *sym, PointerAttr type) { + paramNonullTypeMap[sym] = type; + } + + void CopyReferedRegs(std::set regs) { + for (auto reg : regs) { + referedPregs.insert(reg); + } + } + + MapleSet GetReferedRegs() const { + return referedPregs; + } + + bool IsReferedRegsValid() const { + return referedRegsValid; + } + + void SetReferedRegsValid(bool val) { + referedRegsValid = val; + } + + FuncDesc &GetFuncDesc() { + return funcDesc; + } + + void SetFuncDesc(const FuncDesc &value) { + funcDesc = value; + } + + void SetProfCtrTbl(MIRSymbol *pct) { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + profCtrTbl = pct; + } + + MIRSymbol *GetProfCtrTbl() { + return profCtrTbl; + } + + void SetNumCtrs(uint32 num) { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + nCtrs = num; + } + + uint32 GetNumCtrs() const { + return nCtrs; + } + + void SetFileLineNoChksum(uint64 chksum) { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + fileLinenoChksum = chksum; + } + + uint64 GetFileLineNoChksum() const { + return fileLinenoChksum; + } + + void SetCFGChksum(uint64 chksum) { + CHECK_FATAL(Options::profileGen, "This is only for profileGen"); + cfgChksum = chksum; + } + + uint64 GetCFGChksum() const { + return cfgChksum; + } + + void InitFuncDescToBest() { + funcDesc.InitToBest(); + } + + const FuncDesc &GetFuncDesc() const { + return funcDesc; + } + + void AddProfileDesc(uint64 hash, uint32 start, uint32 end) { + profileDesc = module->GetMemPool()->New(hash, start, end); + } + + const IRProfileDesc *GetProfInf() { + if (profileDesc == nullptr) { + // return profileDesc with default value + profileDesc = module->GetMemPool()->New(); + } + return profileDesc; + } + + bool IsVisited() const { + return isVisited; + } + void SetIsVisited() { + isVisited = true; + } + + InlineSummary *GetInlineSummary() { + return inlineSummary; + } + + const InlineSummary *GetInlineSummary() const { + return inlineSummary; + } + + void DiscardInlineSummary() { + inlineSummary = nullptr; + } + + InlineSummary *GetOrCreateInlineSummary(); + + void SetFuncProfData(FuncProfInfo *data) { + funcProfData = data; + } + FuncProfInfo* GetFuncProfData() { + return funcProfData; + } + FuncProfInfo* GetFuncProfData() const { + return funcProfData; + } + void SetStmtFreq(uint32_t stmtID, uint64_t freq) { + ASSERT((funcProfData != nullptr && freq > 0), "nullptr check"); + funcProfData->SetStmtFreq(stmtID, static_cast(freq)); + } + + void SetMayWriteToAddrofStack() { + mayWriteToAddrofStack = true; + } + + void UnsetMayWriteToAddrofStack() { + mayWriteToAddrofStack = false; + } + + bool GetMayWriteToAddrofStack() const { + return mayWriteToAddrofStack; + } + + void CheckMayWriteToAddrofStack() { + checkedMayWriteToAddrofStack = true; + } + + bool IsMayWriteToAddrofStackChecked() const { + // record the info about whether writing to address of stack is checked in me + return checkedMayWriteToAddrofStack; + } + + MIRFunction *GetFuncAlias(); + + private: + MIRModule *module; // the module that owns this function + PUIdx puIdx = 0; // the PU index of this function + PUIdx puIdxOrigin = 0; // the original puIdx when initial generation + StIdx symbolTableIdx; // the symbol table index of this function + int32 sccID = -1; // the scc id of this function, for mplipa + MIRFuncType *funcType = nullptr; + TyIdx inferredReturnTyIdx{0}; // the actual return type of of this function (may be a + // subclass of the above). 0 means can not be inferred. + TyIdx classTyIdx{0}; // class/interface type this function belongs to + MapleVector formalDefVec{module->GetMPAllocator().Adapter()}; // the formals in function definition + MapleSet retRefSym{module->GetMPAllocator().Adapter()}; + + MapleVector genericDeclare{module->GetMPAllocator().Adapter()}; + MapleVector genericArg{module->GetMPAllocator().Adapter()}; + MapleMap genericLocalVar{module->GetMPAllocator().Adapter()}; + AnnotationType *genericRet = nullptr; + + MIRSymbolTable *symTab = nullptr; + MIRTypeNameTable *typeNameTab = nullptr; + MIRLabelTable *labelTab = nullptr; + MIRPregTable *pregTab = nullptr; + MemPool *codeMemPool = nullptr; + MapleAllocator codeMemPoolAllocator{nullptr}; + uint32 callTimes = 0; + BlockNode *body = nullptr; + FuncAttrs funcAttrs{}; + uint32 flag = 0; + uint16 hashCode = 0; // for methodmetadata order + uint32 fileIndex = 0; // this function belongs to which file, used by VM for plugin manager + MIRInfoVector info{module->GetMPAllocator().Adapter()}; + MapleVector infoIsString{module->GetMPAllocator().Adapter()}; // tells if an entry has string value + MIRScope *scope = nullptr; + MapleMap *freqFirstMap = nullptr; // save bb frequency in its first_stmt, key is stmtId + MapleMap *freqLastMap = nullptr; // save bb frequency in its last_stmt, key is stmtId + MapleSet referedPregs{module->GetMPAllocator().Adapter()}; + bool referedRegsValid = false; + bool hasVlaOrAlloca = false; + bool withLocInfo = true; + bool isVisited = false; // only used in inline phase. + bool isDirty = false; + bool fromMpltInline = false; // Whether this function is imported from mplt_inline file or not. + uint8_t layoutType = kLayoutUnused; + uint32 frameSize = 0; + uint32 upFormalSize = 0; + uint32 outParmSize = 0; + uint16 moduleID = 0; + uint32 funcSize = 0; // size of code in words + uint32 tempCount = 0; + uint8 *formalWordsTypeTagged = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP + N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsTypeTagged = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) has + // typetag; if yes, the typetag is the word + // at (%%FP - N*4 + 4); the bitvector's size + // is given by BlockSize2BitvectorSize(frameSize) + uint8 *formalWordsRefCounted = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the formal parameters area + // addressed upward from %%FP (that means + // the word at location (%%FP + N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(upFormalSize) + uint8 *localWordsRefCounted = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in the local stack frame + // addressed downward from %%FP (that means + // the word at location (%%FP - N*4)) points to + // a dynamic memory block that needs reference + // count; the bitvector's size is given by + // BlockSize2BitvectorSize(frameSize) + // removed. label table size + // lbl2stmt table, removed; + // to hold unmangled class and function names + MeFunction *meFunc = nullptr; + EAConnectionGraph *eacg = nullptr; + IRProfileDesc *profileDesc = nullptr; + GStrIdx baseClassStrIdx{0}; // the string table index of base class name + GStrIdx baseFuncStrIdx{0}; // the string table index of base function name + // the string table index of base function name mangled with type info + GStrIdx baseFuncWithTypeStrIdx{0}; + // funcname + types of args, no type of retv + GStrIdx baseFuncSigStrIdx{0}; + GStrIdx signatureStrIdx{0}; + MemPool *codeMemPoolTmp{nullptr}; + MapleAllocator codeMemPoolTmpAllocator{nullptr}; + bool useTmpMemPool = false; + PointerAttr returnKind = PointerAttr::kPointerUndeiced; + MapleMap paramNonullTypeMap{module->GetMPAllocator().Adapter()}; + FuncDesc funcDesc{}; + MIRSymbol *profCtrTbl = nullptr; + uint32 nCtrs = 0; // number of counters + uint64 fileLinenoChksum = 0; + uint64 cfgChksum = 0; + FuncProfInfo *funcProfData = nullptr; + InlineSummary *inlineSummary = nullptr; + void DumpFlavorLoweredThanMmpl() const; + MIRFuncType *ReconstructFormals(const std::vector &symbols, bool clearOldArgs); + bool mayWriteToAddrofStack = false; + bool checkedMayWriteToAddrofStack = false; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_FUNCTION_H diff --git a/src/mapleall/maple_ir/include/mir_lower.h b/src/mapleall/maple_ir/include/mir_lower.h new file mode 100644 index 0000000000000000000000000000000000000000..104a0a932aed185e70955c2aab803e6f2e5ba0c7 --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_lower.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_LOWER_H +#define MAPLE_IR_INCLUDE_MIR_LOWER_H +#include + +#include "mir_builder.h" +#include "opcodes.h" + +namespace maple { +// The base value for branch probability notes and edge probabilities. +static constexpr int32 kProbAll = 10000; +static constexpr int32 kProbLikely = 9000; +static constexpr int32 kProbUnlikely = kProbAll - kProbLikely; +constexpr uint32 kNodeFirstOpnd = 0; +constexpr uint32 kNodeSecondOpnd = 1; +constexpr uint32 kNodeThirdOpnd = 2; +enum MirLowerPhase : uint8 { + kLowerUnder, + kLowerMe, + kLowerExpandArray, + kLowerBe, + kLowerCG, + kLowerLNO +}; + +constexpr uint32 kShiftLowerMe = 1U << kLowerMe; +constexpr uint32 kShiftLowerExpandArray = 1U << kLowerExpandArray; +constexpr uint32 kShiftLowerBe = 1U << kLowerBe; +constexpr uint32 kShiftLowerCG = 1U << kLowerCG; +constexpr uint32 kShiftLowerLNO = 1U << kLowerLNO; +// check if a block node ends with an unconditional jump +inline bool OpCodeNoFallThrough(Opcode opCode) { + return opCode == OP_goto || opCode == OP_return || opCode == OP_switch || opCode == OP_throw || opCode == OP_gosub || + opCode == OP_retsub; +} + +inline bool IfStmtNoFallThrough(const IfStmtNode &ifStmt) { + return OpCodeNoFallThrough(ifStmt.GetThenPart()->GetLast()->GetOpCode()); +} + +class MIRLower { + public: + static const std::set kSetArrayHotFunc; + + MIRLower(MIRModule &mod, MIRFunction *f) : mirModule(mod), mirFunc(f) {} + + virtual ~MIRLower() = default; + + const MIRFunction *GetMirFunc() const { + return mirFunc; + } + + void SetMirFunc(MIRFunction *f) { + mirFunc = f; + } + + void Init() { + mirBuilder = mirModule.GetMemPool()->New(&mirModule); + } + + virtual BlockNode *LowerIfStmt(IfStmtNode &ifStmt, bool recursive); + BlockNode *LowerSwitchStmt(SwitchNode *switchNode); + virtual BlockNode *LowerWhileStmt(WhileStmtNode &whileStmt); + BlockNode *LowerDowhileStmt(WhileStmtNode &doWhileStmt); + BlockNode *LowerDoloopStmt(DoloopNode &doloop); + BlockNode *LowerBlock(BlockNode &block); + BaseNode *LowerEmbeddedCandCior(BaseNode *x, StmtNode *curstmt, BlockNode *blk); + void LowerCandCior(BlockNode &block); + void LowerBuiltinExpect(BlockNode &block) const; + void LowerFunc(MIRFunction &func); + BaseNode *LowerFarray(ArrayNode *array); + BaseNode *LowerCArray(ArrayNode *array); + void ExpandArrayMrt(MIRFunction &func); + IfStmtNode *ExpandArrayMrtIfBlock(IfStmtNode &node); + WhileStmtNode *ExpandArrayMrtWhileBlock(WhileStmtNode &node); + DoloopNode *ExpandArrayMrtDoloopBlock(DoloopNode &node); + ForeachelemNode *ExpandArrayMrtForeachelemBlock(ForeachelemNode &node); + BlockNode *ExpandArrayMrtBlock(BlockNode &block); + void AddArrayMrtMpl(BaseNode &exp, BlockNode &newBlock); + MIRFuncType *FuncTypeFromFuncPtrExpr(BaseNode *x); + void SetLowerME() { + lowerPhase |= kShiftLowerMe; + } + + void SetLowerLNO() { + lowerPhase |= kShiftLowerLNO; + } + + void SetLowerExpandArray() { + lowerPhase |= kShiftLowerExpandArray; + } + + void SetLowerBE() { + lowerPhase |= kShiftLowerBe; + } + + void SetLowerCG() { + lowerPhase |= kShiftLowerCG; + } + + uint8 GetOptLevel() const { + return optLevel; + } + + void SetOptLevel(uint8 optlvl) { + optLevel = optlvl; + } + + bool IsLowerME() const { + return lowerPhase & kShiftLowerMe; + } + + bool IsLowerLNO() const { + return lowerPhase & kShiftLowerLNO; + } + + bool IsLowerExpandArray() const { + return lowerPhase & kShiftLowerExpandArray; + } + + bool IsLowerBE() const { + return lowerPhase & kShiftLowerBe; + } + + bool IsLowerCG() const { + return lowerPhase & kShiftLowerCG; + } + + static bool ShouldOptArrayMrt(const MIRFunction &func); + + virtual bool InLFO() const { + return false; + } + + FuncProfInfo *GetFuncProfData() const { + return mirFunc->GetFuncProfData(); + } + void CopyStmtFrequency(StmtNode *newStmt, StmtNode *oldStmt) { + ASSERT(GetFuncProfData() != nullptr, "nullptr check"); + if (newStmt == oldStmt) { + return; + } + uint64_t freq = GetFuncProfData()->GetStmtFreq(oldStmt->GetStmtID()); + GetFuncProfData()->SetStmtFreq(newStmt->GetStmtID(), freq); + } + + protected: + MIRModule &mirModule; + + private: + MIRFunction *mirFunc; + MIRBuilder *mirBuilder = nullptr; + uint32 lowerPhase = 0; + uint8 optLevel = 0; + LabelIdx CreateCondGotoStmt(Opcode op, BlockNode &blk, const IfStmtNode &ifStmt); + void CreateBrFalseStmt(BlockNode &blk, const IfStmtNode &ifStmt); + void CreateBrTrueStmt(BlockNode &blk, const IfStmtNode &ifStmt); + void CreateBrFalseAndGotoStmt(BlockNode &blk, const IfStmtNode &ifStmt); +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_LOWER_H diff --git a/src/mapleall/maple_ir/include/mir_module.h b/src/mapleall/maple_ir/include/mir_module.h new file mode 100644 index 0000000000000000000000000000000000000000..83eae82df72adaab66a74afa3ebb10212d8d045c --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_module.h @@ -0,0 +1,803 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_MODULE_H +#define MAPLE_IR_INCLUDE_MIR_MODULE_H +#include "intrinsics.h" +#include "mpl_logging.h" +#include "mpl_profdata.h" +#include "muid.h" +#include "namemangler.h" +#include "prim_types.h" +#include "profile.h" +#include "types_def.h" +#if MIR_FEATURE_FULL +#include +#include +#include +#include +#include +#include + +#include "maple_string.h" +#include "mempool.h" +#include "mempool_allocator.h" +#include "thread_env.h" +#endif // MIR_FEATURE_FULL + +namespace maple { +class CallInfo; // circular dependency exists, no other choice +class MIRModule; // circular dependency exists, no other choice +class MIRBuilder; // circular dependency exists, no other choice +class MIRScope; +using MIRModulePtr = MIRModule *; +using MIRBuilderPtr = MIRBuilder *; + +enum MIRFlavor { + kFlavorUnknown, + kFeProduced, + kMeProduced, + kBeLowered, + kFlavorMbc, + kMmpl, + kCmplV1, + kCmpl, // == CMPLv2 + kFlavorLmbc, +}; + +enum MIRSrcLang { + kSrcLangUnknown, + kSrcLangC, + kSrcLangJs, + kSrcLangCPlusPlus, + kSrcLangJava, + kSrcLangChar, + // SrcLangSwift : when clang adds support for Swift. +}; + +class CalleePair { + public: + CalleePair(PUIdx id, int32_t index) : id(id), index(index) {} + bool operator<(const CalleePair &func) const { + if (id < func.id) { + return true; + } else if (id == func.id && index < func.index) { + return true; + } else { + return false; + } + } + + private: + PUIdx id; + int32_t index; +}; + +class CallerSummary { + public: + CallerSummary(PUIdx id, uint32 stmtId) : id(id), stmtId(stmtId) {} + PUIdx GetPuidx() const { + return id; + }; + uint32 GetStmtId() const { + return stmtId; + } + + private: + PUIdx id; + uint32 stmtId; +}; + +// This data structure is for the ipa-cp. Important expresstion is about the condtion statement. +class ImpExpr { + public: + ImpExpr(uint32 stmtId, uint32 paramIndex) : stmtId(stmtId), paramIndex(paramIndex) {} + uint32 GetStmtId() const { + return stmtId; + } + uint32 GetParamIndex() const { + return paramIndex; + } + + private: + uint32 stmtId; + uint32 paramIndex; +}; + +// blksize gives the size of the memory block in bytes; there are (blksize+3)/4 +// words; 1 bit for each word, so the bit vector's length in bytes is +// ((blksize+3)/4+7)/8 +static inline uint32 BlockSize2BitVectorSize(uint32 blkSize) { + uint32 bitVectorLen = ((blkSize + 3) / 4 + 7) / 8; + return ((bitVectorLen + 3) >> 2) << 2; // round up to word boundary +} + +#if MIR_FEATURE_FULL +class MIRType; // circular dependency exists, no other choice +class MIRFunction; // circular dependency exists, no other choice +class MIRSymbol; // circular dependency exists, no other choice +class MIRSymbolTable; // circular dependency exists, no other choice +class MIRFloatConst; // circular dependency exists, no other choice +class MIRDoubleConst; // circular dependency exists, no other choice +class MIRBuilder; // circular dependency exists, no other choice +class DebugInfo; // circular dependency exists, no other choice +class BinaryMplt; // circular dependency exists, no other choice +class EAConnectionGraph; // circular dependency exists, no other choice +using MIRInfoPair = std::pair; +using MIRInfoVector = MapleVector; +using MIRDataPair = std::pair>; +using MIRDataVector = MapleVector; +constexpr int kMaxEncodedValueLen = 10; +struct EncodedValue { + uint8 encodedValue[kMaxEncodedValueLen] = {0}; +}; + +class MIRTypeNameTable { + public: + explicit MIRTypeNameTable(MapleAllocator &allocator) : gStrIdxToTyIdxMap(std::less(), allocator.Adapter()) {} + + ~MIRTypeNameTable() = default; + + const MapleMap &GetGStrIdxToTyIdxMap() const { + return gStrIdxToTyIdxMap; + } + + TyIdx GetTyIdxFromGStrIdx(GStrIdx idx) const { + auto it = gStrIdxToTyIdxMap.find(idx); + if (it == gStrIdxToTyIdxMap.end()) { + return TyIdx(0); + } + return it->second; + } + + void SetGStrIdxToTyIdx(GStrIdx gStrIdx, TyIdx tyIdx) { + gStrIdxToTyIdxMap[gStrIdx] = tyIdx; + } + + size_t Size() const { + return gStrIdxToTyIdxMap.size(); + } + + private: + MapleMap gStrIdxToTyIdxMap; +}; + +class MIRModule { + public: + bool firstInline = true; + using CallSite = std::pair; + + explicit MIRModule(const std::string &fn = ""); + MIRModule(MIRModule &p) = delete; + MIRModule &operator=(const MIRModule &module) = delete; + ~MIRModule(); + + MemPool *GetMemPool() const { + return memPool; + } + MemPool *GetPragmaMemPool() { + return pragmaMemPool; + } + MapleAllocator &GetPragmaMPAllocator() { + return pragmaMemPoolAllocator; + } + const MapleAllocator &GetMPAllocator() const { + return memPoolAllocator; + } + + MapleAllocator &GetInlineSummaryAlloc() { + return inlineSummaryAlloc; + } + + void ReleaseInlineSummaryAlloc() noexcept { + if (inlineSummaryAlloc.GetMemPool() != nullptr) { + delete inlineSummaryAlloc.GetMemPool(); + inlineSummaryAlloc.SetMemPool(nullptr); + } + } + + void ReleasePragmaMemPool() { + if (pragmaMemPool) { + memPoolCtrler.DeleteMemPool(pragmaMemPool); + } + pragmaMemPool = nullptr; + } + + MapleAllocator &GetMPAllocator() { + return memPoolAllocator; + } + + const auto &GetFunctionList() const { + return functionList; + } + auto &GetFunctionList() { + return functionList; + } + + const MapleVector &GetImportedMplt() const { + return importedMplt; + } + void PushbackImportedMplt(const std::string &importFileName) { + importedMplt.push_back(importFileName); + } + + MIRTypeNameTable *GetTypeNameTab() { + return typeNameTab; + } + + const MapleVector &GetTypeDefOrder() const { + return typeDefOrder; + } + void PushbackTypeDefOrder(GStrIdx gstrIdx) { + typeDefOrder.push_back(gstrIdx); + } + + void AddClass(TyIdx tyIdx); + void RemoveClass(TyIdx tyIdx); + + void SetCurFunction(MIRFunction *f) { + if (ThreadEnv::IsMeParallel()) { + std::lock_guard guard(curFunctionMutex); + auto tid = std::this_thread::get_id(); + curFunctionMap[tid] = f; + return; // DO NOT delete the return statement + } + curFunction = f; + } + + MIRSrcLang GetSrcLang() const { + return srcLang; + } + + const MapleSet &GetSymbolSet() const { + return symbolSet; + } + + const MapleVector &GetSymbolDefOrder() const { + return symbolDefOrder; + } + + Profile &GetProfile() { + return profile; + } + + MplProfileData *GetMapleProfile() { + return mplProfile; + } + void SetMapleProfile(MplProfileData *info) { + mplProfile = info; + } + + LiteProfile &GetLiteProfile() { + return liteProfile; + } + + void SetSomeSymbolNeedForDecl(bool s) { + someSymbolNeedForwDecl = s; + } + + MIRFunction *CurFunction() const { + if (ThreadEnv::IsMeParallel()) { + std::lock_guard guard(curFunctionMutex); + auto tid = std::this_thread::get_id(); + auto pair = curFunctionMap.find(tid); + return pair->second; + } + return curFunction; + } + + MemPool *CurFuncCodeMemPool() const; + MapleAllocator *CurFuncCodeMemPoolAllocator() const; + MapleAllocator &GetCurFuncCodeMPAllocator() const; + void AddExternStructType(TyIdx tyIdx); + void AddExternStructType(const MIRType *t); + void AddSymbol(StIdx stIdx); + void AddSymbol(const MIRSymbol *s); + void AddFunction(MIRFunction *pf) { + functionList.push_back(pf); + } + + void DumpGlobals(bool emitStructureType = true) const; + void Dump(bool emitStructureType = true, const std::unordered_set *dumpFuncSet = nullptr) const; + void DumpToFile(const std::string &fileNameStr, bool emitStructureType = true) const; + void DumpInlineCandidateToFile(const std::string &fileNameStr); + void DumpDefType(); + const std::string &GetFileNameFromFileNum(uint32 fileNum) const; + + void DumpToHeaderFile(bool binaryMplt, const std::string &outputName = ""); + void DumpToCxxHeaderFile(std::set &leafClasses, const std::string &pathToOutf) const; + void DumpClassToFile(const std::string &path) const; + void DumpFunctionList(const std::unordered_set *dumpFuncSet) const; + void DumpGlobalArraySymbol() const; + void Emit(const std::string &outFileName) const; + uint32 GetAndIncFloatNum() { + return floatNum++; + } + + void SetEntryFunction(MIRFunction *f) { + entryFunc = f; + } + + MIRFunction *GetEntryFunction() const { + return entryFunc; + } + + MIRFunction *FindEntryFunction(); + uint32 GetFileinfo(GStrIdx strIdx) const; + void OutputAsciiMpl(const char *phaseName, const char *suffix, + const std::unordered_set *dumpFuncSet = nullptr, bool emitStructureType = true, + bool binaryform = false); + void OutputFunctionListAsciiMpl(const std::string &phaseName); + const std::string &GetFileName() const { + return fileName; + } + + std::string GetFileNameAsPostfix() const; + void SetFileName(const std::string &name) { + fileName = name; + } + + std::string GetProfileDataFileName() const { + std::string profileDataFileName = GetFileName().substr(0, GetFileName().find_last_of(".")); + const char *gcovPath = std::getenv("GCOV_PREFIX"); + std::string gcovPrefix = gcovPath ? gcovPath : ""; + if (!gcovPrefix.empty() && (gcovPrefix.back() != '/')) { + gcovPrefix.append("/"); + } + profileDataFileName = gcovPrefix + profileDataFileName; + return profileDataFileName; + } + + bool IsJavaModule() const { + return srcLang == kSrcLangJava; + } + + bool IsCModule() const { + return srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus; + } + + bool IsCPlusPlusModule() const { + return srcLang == kSrcLangCPlusPlus; + } + + bool IsCharModule() const { + return srcLang == kSrcLangChar; + } + + void AddSuperCall(const std::string &func) { + (void)superCallSet.insert(func); + } + + bool FindSuperCall(const std::string &func) const { + return superCallSet.find(func) != superCallSet.end(); + } + + void ReleaseCurFuncMemPoolTmp() const; + void SetUseFuncCodeMemPoolTmp() { + useFuncCodeMemPoolTmp = true; + } + + void ResetUseFuncCodeMemPoolTmp() { + useFuncCodeMemPoolTmp = false; + } + + void SetFuncInfoPrinted() const; + size_t GetOptFuncsSize() const { + return optimizedFuncs.size(); + } + + void AddOptFuncs(MIRFunction *func) { + optimizedFuncs.emplace(func); + } + + const MapleSet &GetOptFuncs() const { + return optimizedFuncs; + } + + bool IsOptFunc(MIRFunction *func) const { + if (std::find(optimizedFuncs.begin(), optimizedFuncs.end(), func) != optimizedFuncs.end()) { + return true; + } + return false; + } + + void AddOptFuncsType(MIRType *type) { + optimizedFuncsType.emplace(type); + } + + const MapleMap*> &GetPuIdxFieldInitializedMap() const { + std::shared_lock lock(fieldMapMutex); + return puIdxFieldInitializedMap; + } + void SetPuIdxFieldSet(PUIdx puIdx, MapleSet *fieldIDSet) { + std::unique_lock lock(fieldMapMutex); + puIdxFieldInitializedMap[puIdx] = fieldIDSet; + } + + std::map>> &GetCalleeParamAboutInt() { + return calleeParamAboutInt; + } + + std::map>> &GetCalleeParamAboutFloat() { + return calleeParamAboutFloat; + } + + std::map>> &GetCalleeParamAboutDouble() { + return calleeParamAboutDouble; + } + + std::map> &GetFuncImportantExpr() { + return funcImportantExpr; + } + + const MapleSet &GetInlineGlobals() const { + return inliningGlobals; + } + void InsertInlineGlobal(uint32_t global) { + (void)inliningGlobals.insert(global); + } + + const MapleSet *GetPUIdxFieldInitializedMapItem(PUIdx key) const { + std::shared_lock lock(fieldMapMutex); + auto it = puIdxFieldInitializedMap.find(key); + if (it != puIdxFieldInitializedMap.end()) { + return it->second; + } + return nullptr; + } + + std::ostream &GetOut() const { + return out; + } + + const MIRBuilderPtr &GetMIRBuilder() const { + return mirBuilder; + } + + const std::string &GetEntryFuncName() const { + return entryFuncName; + } + void SetEntryFuncName(const std::string &entryFunctionName) { + entryFuncName = entryFunctionName; + } + + TyIdx GetThrowableTyIdx() const { + return throwableTyIdx; + } + void SetThrowableTyIdx(TyIdx throwableTypeIndex) { + throwableTyIdx = throwableTypeIndex; + } + + bool GetWithProfileInfo() const { + return withProfileInfo; + } + void SetWithProfileInfo(bool withProfInfo) { + withProfileInfo = withProfInfo; + } + + BinaryMplt *GetBinMplt() { + return binMplt; + } + void SetBinMplt(BinaryMplt *binaryMplt) { + binMplt = binaryMplt; + } + + bool IsInIPA() const { + return inIPA; + } + bool IsWithMe() const { + return withMe; + } + void SetWithMe(bool isWithMe) { + withMe = isWithMe; + } + void SetInIPA(bool isInIPA) { + inIPA = isInIPA; + } + + MIRInfoVector &GetFileInfo() { + return fileInfo; + } + void PushFileInfoPair(MIRInfoPair pair) { + fileInfo.push_back(pair); + } + void SetFileInfo(const MIRInfoVector &fileInf) { + fileInfo = fileInf; + } + + MapleVector &GetFileInfoIsString() { + return fileInfoIsString; + } + void SetFileInfoIsString(const MapleVector &fileInfoIsStr) { + fileInfoIsString = fileInfoIsStr; + } + void PushFileInfoIsString(bool isString) { + fileInfoIsString.push_back(isString); + } + + const MIRDataVector &GetFileData() const { + return fileData; + } + void PushbackFileData(const MIRDataPair &pair) { + fileData.push_back(pair); + } + + const MIRInfoVector &GetSrcFileInfo() const { + return srcFileInfo; + } + void PushbackFileInfo(const MIRInfoPair &pair) { + srcFileInfo.push_back(pair); + } + + const MIRFlavor &GetFlavor() const { + return flavor; + } + void SetFlavor(MIRFlavor flv) { + flavor = flv; + } + + void SetSrcLang(MIRSrcLang sourceLanguage) { + srcLang = sourceLanguage; + } + + uint16 GetID() const { + return id; + } + + void SetID(uint16 num) { + id = num; + } + + uint32 GetGlobalMemSize() const { + return globalMemSize; + } + void SetGlobalMemSize(uint32 globalMemberSize) { + globalMemSize = globalMemberSize; + } + + uint8 *GetGlobalBlockMap() { + return globalBlkMap; + } + void SetGlobalBlockMap(uint8 *globalBlockMap) { + globalBlkMap = globalBlockMap; + } + + uint8 *GetGlobalWordsTypeTagged() { + return globalWordsTypeTagged; + } + void SetGlobalWordsTypeTagged(uint8 *globalWordsTyTagged) { + globalWordsTypeTagged = globalWordsTyTagged; + } + + uint8 *GetGlobalWordsRefCounted() { + return globalWordsRefCounted; + } + void SetGlobalWordsRefCounted(uint8 *counted) { + globalWordsRefCounted = counted; + } + + uint32 GetNumFuncs() const { + return numFuncs; + } + + void SetNumFuncs(uint32 numFunc) { + numFuncs = numFunc; + } + + MapleVector &GetImportFiles() { + return importFiles; + } + + void PushbackImportPath(GStrIdx path) { + importPaths.push_back(path); + } + + MapleVector &GetAsmDecls() { + return asmDecls; + } + + const MapleSet &GetClassList() const { + return classList; + } + + const std::map> &GetMethod2TargetMap() const { + return method2TargetMap; + } + + std::vector &GetMemFromMethod2TargetMap(PUIdx methodPuIdx) { + return method2TargetMap[methodPuIdx]; + } + + void SetMethod2TargetMap(const std::map> &map) { + method2TargetMap = map; + } + + void AddMemToMethod2TargetMap(PUIdx idx, const std::vector &callSite) { + method2TargetMap[idx] = callSite; + } + + bool HasTargetHash(PUIdx idx, uint32 key) const { + auto it = method2TargetHash.find(idx); + if (it == method2TargetHash.end()) { + return false; + } + return it->second.find(key) != it->second.end(); + } + void InsertTargetHash(PUIdx idx, uint32 key) { + (void)method2TargetHash[idx].insert(key); + } + void AddValueToMethod2TargetHash(PUIdx idx, const std::unordered_set &value) { + method2TargetHash[idx] = value; + } + + const std::map &GetEASummary() const { + return eaSummary; + } + void SetEAConnectionGraph(GStrIdx funcNameIdx, EAConnectionGraph *eaCg) { + eaSummary[funcNameIdx] = eaCg; + } + + DebugInfo *GetDbgInfo() const { + return dbgInfo; + } + + MIRScope *GetScope() const { + return scope; + } + + void SetWithDbgInfo(bool v) { + withDbgInfo = v; + } + + bool IsWithDbgInfo() const { + return withDbgInfo; + } + + bool HasPartO2List() const { + return hasPartO2List; + } + + void SetHasPartO2List(bool value) { + hasPartO2List = value; + } + + void InitPartO2List(const std::string &list); + bool IsInPartO2List(const GStrIdx &idx) const { + return partO2FuncList.count(idx) > 0; + } + + void SetBaseName(const std::string &curbaseName) { + baseName = curbaseName; + } + const std::string &GetBaseName() const { + return baseName; + } + void SetOutputFileName(const std::string &curOFileName) { + outputFileName = curOFileName; + } + const std::string &GetOutputFileName() const { + return outputFileName; + } + void SetInputFileName(const std::string &curInFileName) { + inputFileName = curInFileName; + } + const std::string &GetInputFileName() const { + return inputFileName; + } + + uint32 GetUniqueID() const { + return UINT_MAX; + } + + bool HasNotWarned(uint32 position, uint32 stmtOriginalID); + + private: + void DumpTypeTreeToCxxHeaderFile(MIRType &ty, std::unordered_set &dumpedClasses) const; + + MemPool *memPool; + MemPool *pragmaMemPool; + MapleAllocator memPoolAllocator; + MapleAllocator pragmaMemPoolAllocator; + MapleAllocator inlineSummaryAlloc; // For allocating function inline summary + MapleList functionList; // function table in the order of the appearance of function bodies; it + // excludes prototype-only functions + MapleVector importedMplt; + MIRTypeNameTable *typeNameTab; + MapleVector typeDefOrder; + + MapleSet externStructTypeSet; + MapleSet symbolSet; + MapleVector symbolDefOrder; + Profile profile; + MplProfileData *mplProfile; + LiteProfile liteProfile; + bool someSymbolNeedForwDecl = false; // some symbols' addressses used in initialization + + std::ostream &out; + MIRBuilder *mirBuilder; + std::string entryFuncName = ""; // name of the entry function + std::string fileName; + TyIdx throwableTyIdx{0}; // a special type that is the base of java exception type. only used for java + bool withProfileInfo = false; + + DebugInfo *dbgInfo = nullptr; + bool withDbgInfo = false; + MIRScope *scope = nullptr; + + // for cg in mplt + BinaryMplt *binMplt = nullptr; + bool inIPA = false; + bool withMe = true; + MIRInfoVector fileInfo; // store info provided under fileInfo keyword + MapleVector fileInfoIsString; // tells if an entry has string value + MIRDataVector fileData; + MIRInfoVector srcFileInfo; // store info provided under srcFileInfo keyword + MIRFlavor flavor = kFlavorUnknown; + MIRSrcLang srcLang = kSrcLangUnknown; // the source language + uint16 id = 0xffff; + uint32 globalMemSize = 0; // size of storage space for all global variables + uint8 *globalBlkMap = nullptr; // the memory map of the block containing all the + // globals, for specifying static initializations + uint8 *globalWordsTypeTagged = nullptr; // bit vector where the Nth bit tells whether + // the Nth word in globalBlkMap has typetag; + // if yes, the typetag is the N+1th word; the + // bitvector's size is given by + // BlockSize2BitvectorSize(globalMemSize) + uint8 *globalWordsRefCounted = nullptr; // bit vector where the Nth bit tells whether + // the Nth word points to a reference-counted + // dynamic memory block; the bitvector's size + // is given by BlockSize2BitvectorSize(globalMemSize) + uint32 numFuncs = 0; // because puIdx 0 is reserved, numFuncs is also the highest puIdx + MapleVector importFiles; + MapleVector importPaths; + MapleVector asmDecls; + MapleSet classList; + + std::map> method2TargetMap; + std::map> method2TargetHash; + std::map eaSummary; + + bool useFuncCodeMemPoolTmp = false; + MIRFunction *entryFunc = nullptr; + uint32 floatNum = 0; + // curFunction for single thread, curFunctionMap for multiple threads + std::map curFunctionMap; + mutable std::mutex curFunctionMutex; + MIRFunction *curFunction; + MapleSet optimizedFuncs; + MapleSet optimizedFuncsType; + // Add the field for decouple optimization + std::unordered_set superCallSet; + // record all the fields that are initialized in the constructor. module scope, + // if puIdx doesn't appear in this map, it writes to all field id + // if puIdx appears in the map, but it's corresponding MapleSet is nullptr, it writes nothing fieldID + // if puIdx appears in the map, and the value of first corresponding MapleSet is 0, the puIdx appears in this module + // and writes to all field id otherwise, it writes the field ids in MapleSet + MapleMap*> puIdxFieldInitializedMap; + mutable std::shared_timed_mutex fieldMapMutex; + MapleSet inliningGlobals; // global symbols accessed, used for inlining + bool hasPartO2List = false; + MapleSet partO2FuncList; + std::string inputFileName = ""; + std::string baseName = ""; + std::string outputFileName = ""; + MapleMap> safetyWarningMap; // indexed map for large module. + std::map>> calleeParamAboutInt; + std::map>> calleeParamAboutDouble; + std::map>> calleeParamAboutFloat; + std::map> funcImportantExpr; +}; +#endif // MIR_FEATURE_FULL +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_MODULE_H diff --git a/src/mapleall/maple_ir/include/mir_nodes.h b/src/mapleall/maple_ir/include/mir_nodes.h new file mode 100644 index 0000000000000000000000000000000000000000..4457509c6c8c1a1e5980c8f65cced68ed3952ef1 --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_nodes.h @@ -0,0 +1,3791 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_NODES_H +#define MAPLE_IR_INCLUDE_MIR_NODES_H +#include +#include +#include + +#include "cmpl.h" +#include "maple_string.h" +#include "mir_const.h" +#include "mir_module.h" +#include "mir_type.h" +#include "opcode_info.h" +#include "opcodes.h" +#include "ptr_list_ref.h" +#include "src_position.h" + +namespace maple { +constexpr size_t kFirstOpnd = 0; +constexpr size_t kSecondOpnd = 1; +constexpr size_t kThirdOpnd = 2; + +extern MIRModule *theMIRModule; +extern void EmitStr(const MapleString &mplStr); + +class MIRPregTable; // circular dependency exists, no other choice +class TypeTable; // circular dependency exists, no other choice +class VerifyResult; // circular dependency exists, no other choice +class CallNode; + +struct RegFieldPair { + public: + RegFieldPair() = default; + + RegFieldPair(FieldID fidx, PregIdx pidx) : fieldID(fidx), pregIdx(pidx) {} + + bool IsReg() const { + return pregIdx > 0; + } + + FieldID GetFieldID() const { + return fieldID; + } + + PregIdx GetPregIdx() const { + return pregIdx; + } + + void SetFieldID(FieldID fld) { + fieldID = fld; + } + + void SetPregIdx(PregIdx idx) { + pregIdx = idx; + } + + private: + FieldID fieldID = 0; + PregIdx pregIdx = 0; +}; + +using CallReturnPair = std::pair; +using CallReturnVector = MapleVector; +// Made public so that other modules (such as maplebe) can print intrinsic names +// in debug information or comments in assembly files. +const char *GetIntrinsicName(MIRIntrinsicID intrn); +class BaseNode : public BaseNodeT { + public: + explicit BaseNode(Opcode o) { + op = o; + ptyp = kPtyInvalid; + typeFlag = 0; + numOpnds = 0; + } + + BaseNode(Opcode o, uint8 numOpr) { + op = o; + ptyp = kPtyInvalid; + typeFlag = 0; + numOpnds = numOpr; + } + + BaseNode(const Opcode o, const PrimType typ, uint8 numOpr) { + op = o; + ptyp = typ; + typeFlag = 0; + numOpnds = numOpr; + } + + virtual ~BaseNode() = default; + + virtual BaseNode *CloneTree(MapleAllocator &allocator) const { + return allocator.GetMemPool()->New(*this); + } + + virtual void DumpBase(int32 indent) const; + + virtual void Dump(int32 indent) const { + DumpBase(indent); + } + + void Dump() const { + Dump(0); + LogInfo::MapleLogger() << '\n'; + } + + virtual uint8 SizeOfInstr() const { + return kOpcodeInfo.GetTableItemAt(GetOpCode()).instrucSize; + } + + const char *GetOpName() const; + bool MayThrowException() const; + size_t NumOpnds() const override { + return numOpnds; + } + + virtual BaseNode *Opnd(size_t) const { + ASSERT(0, "override needed"); + return nullptr; + } + + virtual void SetOpnd(BaseNode*, size_t) { + ASSERT(0, "This should not happen"); + } + + virtual bool IsLeaf() const { + return true; + } + + virtual CallReturnVector *GetCallReturnVector() { + return nullptr; + } + + virtual MIRType *GetCallReturnType() { + return nullptr; + } + + virtual bool IsUnaryNode() const { + return false; + } + + virtual bool IsBinaryNode() const { + return false; + } + + virtual bool IsTernaryNode() const { + return false; + } + + virtual bool IsNaryNode() const { + return false; + } + + bool IsCondBr() const { + return kOpcodeInfo.IsCondBr(GetOpCode()); + } + + bool IsConstval() const { + return op == OP_constval; + } + + bool IsConstExpr() const { + return op == OP_constval || op == OP_conststr || op == OP_conststr16; + } + + virtual bool Verify() const { + return true; + } + + virtual bool Verify(VerifyResult&) const { + return Verify(); + } + + virtual bool IsSSANode() const { + return false; + } + + virtual bool IsSameContent(const BaseNode *node) const { + return false; + } +}; + +class UnaryNode : public BaseNode { + public: + explicit UnaryNode(Opcode o) : BaseNode(o, 1) {} + + UnaryNode(Opcode o, PrimType typ) : BaseNode(o, typ, 1) {} + + UnaryNode(Opcode o, PrimType typ, BaseNode *expr) : BaseNode(o, typ, 1), uOpnd(expr) {} + + virtual ~UnaryNode() override = default; + + void DumpOpnd(const MIRModule &mod, int32 indent) const; + void DumpOpnd(int32 indent) const; + void Dump(int32 indent) const override; + bool Verify() const override; + + bool Verify(VerifyResult&) const override { + return Verify(); + } + + UnaryNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(uOpnd->CloneTree(allocator), 0); + return node; + } + + BaseNode *Opnd(size_t) const override { + return uOpnd; + } + + size_t NumOpnds() const override { + return 1; + } + + void SetOpnd(BaseNode *node, size_t) override { + uOpnd = node; + } + + bool IsLeaf() const override { + return false; + } + + bool IsUnaryNode() const override { + return true; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + BaseNode *uOpnd = nullptr; +}; + +class TypeCvtNode : public UnaryNode { + public: + explicit TypeCvtNode(Opcode o) : UnaryNode(o) {} + + TypeCvtNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + TypeCvtNode(Opcode o, PrimType typ, PrimType fromtyp, BaseNode *expr) + : UnaryNode(o, typ, expr), fromPrimType(fromtyp) {} + + virtual ~TypeCvtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + bool Verify(VerifyResult&) const override { + return Verify(); + } + + TypeCvtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + PrimType FromType() const { + return fromPrimType; + } + + void SetFromType(PrimType from) { + fromPrimType = from; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + PrimType fromPrimType = kPtyInvalid; +}; + +// used for retype +class RetypeNode : public TypeCvtNode { + public: + RetypeNode() : TypeCvtNode(OP_retype) {} + + explicit RetypeNode(PrimType typ) : TypeCvtNode(OP_retype, typ) {} + + RetypeNode(PrimType typ, PrimType fromtyp, TyIdx idx, BaseNode *expr) + : TypeCvtNode(OP_retype, typ, fromtyp, expr), tyIdx(idx) {} + + virtual ~RetypeNode() = default; + void Dump(int32 indent) const override; + bool Verify(VerifyResult &verifyResult) const override; + + RetypeNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + const TyIdx &GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(const TyIdx tyIdxVal) { + tyIdx = tyIdxVal; + } + + private: + bool VerifyPrimTypesAndOpnd() const; + bool CheckFromJarray(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const; + bool VerifyCompleteMIRType(const MIRType &from, const MIRType &to, bool isJavaRefType, + VerifyResult &verifyResult) const; + bool VerifyJarrayDimention(const MIRJarrayType &from, const MIRJarrayType &to, VerifyResult &verifyResult) const; + bool IsJavaAssignable(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const; + + bool BothPointerOrJarray(const MIRType &from, const MIRType &to) const { + if (from.GetKind() != to.GetKind()) { + return false; + } + return from.IsMIRPtrType() || from.IsMIRJarrayType(); + } + + bool IsInterfaceOrClass(const MIRType &mirType) const { + return mirType.IsMIRClassType() || mirType.IsMIRInterfaceType(); + } + + bool IsJavaRefType(const MIRType &mirType) const { + return mirType.IsMIRJarrayType() || mirType.IsMIRClassType() || mirType.IsMIRInterfaceType(); + } + + TyIdx tyIdx = TyIdx(0); +}; + +// used for extractbits, sext, zext +class ExtractbitsNode : public UnaryNode { + public: + explicit ExtractbitsNode(Opcode o) : UnaryNode(o) {} + + ExtractbitsNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + ExtractbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size) + : UnaryNode(o, typ), bitsOffset(offset), bitsSize(size) {} + + ExtractbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size, BaseNode *expr) + : UnaryNode(o, typ, expr), bitsOffset(offset), bitsSize(size) {} + + virtual ~ExtractbitsNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + ExtractbitsNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + uint8 GetBitsOffset() const { + return bitsOffset; + } + + void SetBitsOffset(uint8 offset) { + bitsOffset = offset; + } + + uint8 GetBitsSize() const { + return bitsSize; + } + + void SetBitsSize(uint8 size) { + bitsSize = size; + } + + private: + uint8 bitsOffset = 0; + uint8 bitsSize = 0; +}; + +class GCMallocNode : public BaseNode { + public: + explicit GCMallocNode(Opcode o) : BaseNode(o) {} + + GCMallocNode(Opcode o, PrimType typ, TyIdx tIdx) : BaseNode(o, typ, 0), tyIdx(tIdx) {} + + virtual ~GCMallocNode() = default; + + void Dump(int32 indent) const override; + + GCMallocNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + return node; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + void SetOrigPType(PrimType type) { + origPrimType = type; + } + + private: + TyIdx tyIdx = TyIdx(0); + PrimType origPrimType = kPtyInvalid; +}; + +class JarrayMallocNode : public UnaryNode { + public: + explicit JarrayMallocNode(Opcode o) : UnaryNode(o) {} + + JarrayMallocNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + JarrayMallocNode(Opcode o, PrimType typ, TyIdx typeIdx) : UnaryNode(o, typ), tyIdx(typeIdx) {} + + JarrayMallocNode(Opcode o, PrimType typ, TyIdx typeIdx, BaseNode *opnd) : UnaryNode(o, typ, opnd), tyIdx(typeIdx) {} + + virtual ~JarrayMallocNode() = default; + + void Dump(int32 indent) const override; + + JarrayMallocNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + private: + TyIdx tyIdx = TyIdx(0); +}; + +// iaddrof also use this node +class IreadNode : public UnaryNode { + public: + explicit IreadNode(Opcode o) : UnaryNode(o) {} + + IreadNode(Opcode o, PrimType typ) : UnaryNode(o, typ) {} + + IreadNode(Opcode o, PrimType typ, TyIdx typeIdx, FieldID fid) : UnaryNode(o, typ), tyIdx(typeIdx), fieldID(fid) {} + + IreadNode(Opcode o, PrimType typ, TyIdx typeIdx, FieldID fid, BaseNode *expr) + : UnaryNode(o, typ, expr), tyIdx(typeIdx), fieldID(fid) {} + + virtual ~IreadNode() = default; + void Dump(int32 indent) const override; + bool Verify() const override; + + IreadNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + const TyIdx &GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(const TyIdx tyIdxVal) { + tyIdx = tyIdxVal; + } + + FieldID GetFieldID() const { + return fieldID; + } + + void SetFieldID(FieldID fieldIDVal) { + fieldID = fieldIDVal; + } + + bool IsSameContent(const BaseNode *node) const override; + + // the base of an address expr is either a leaf or an iread + BaseNode &GetAddrExprBase() const { + BaseNode *base = Opnd(0); + while (base->NumOpnds() != 0 && base->GetOpCode() != OP_iread) { + base = base->Opnd(0); + } + return *base; + } + + bool IsVolatile() const; + + MIRType *GetType() const; + + protected: + TyIdx tyIdx = TyIdx(0); + FieldID fieldID = 0; +}; + +// IaddrofNode has the same member fields and member methods as IreadNode +using IaddrofNode = IreadNode; + +class IreadoffNode : public UnaryNode { + public: + IreadoffNode() : UnaryNode(OP_ireadoff) {} + + IreadoffNode(PrimType ptyp, int32 ofst) : UnaryNode(OP_ireadoff, ptyp), offset(ofst) {} + + IreadoffNode(PrimType ptyp, BaseNode *opnd, int32 ofst) : UnaryNode(OP_ireadoff, ptyp, opnd), offset(ofst) {} + + virtual ~IreadoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IreadoffNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + int32 GetOffset() const { + return offset; + } + + void SetOffset(int32 offsetValue) { + offset = offsetValue; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + int32 offset = 0; +}; + +class IreadFPoffNode : public BaseNode { + public: + IreadFPoffNode() : BaseNode(OP_ireadfpoff) {} + + IreadFPoffNode(PrimType ptyp, int32 ofst) : BaseNode(OP_ireadfpoff, ptyp, 0), offset(ofst) {} + + virtual ~IreadFPoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IreadFPoffNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + return node; + } + + int32 GetOffset() const { + return offset; + } + + void SetOffset(int32 offsetValue) { + offset = offsetValue; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + int32 offset = 0; +}; + +class IreadPCoffNode : public IreadFPoffNode { + public: + IreadPCoffNode(Opcode o, PrimType typ, uint8 numopns) { + op = o; + ptyp = typ; + numOpnds = numopns; + } + ~IreadPCoffNode() override {} +}; + +using AddroffPCNode = IreadPCoffNode; + +class BinaryOpnds { + public: + virtual ~BinaryOpnds() = default; + + virtual void Dump(int32 indent) const; + + BaseNode *GetBOpnd(size_t i) const { + CHECK_FATAL(i < kOperandNumBinary, "Invalid operand idx in BinaryOpnds"); + return bOpnd[i]; + } + + void SetBOpnd(BaseNode *node, size_t i) { + CHECK_FATAL(i < kOperandNumBinary, "Invalid operand idx in BinaryOpnds"); + bOpnd[i] = node; + } + + virtual bool IsSameContent(const BaseNode *node) const; + + private: + BaseNode *bOpnd[kOperandNumBinary]; +}; + +class BinaryNode : public BaseNode, public BinaryOpnds { + public: + explicit BinaryNode(Opcode o) : BaseNode(o, kOperandNumBinary) {} + + BinaryNode(Opcode o, PrimType typ) : BaseNode(o, typ, kOperandNumBinary) {} + + BinaryNode(Opcode o, PrimType typ, BaseNode *l, BaseNode *r) : BaseNode(o, typ, kOperandNumBinary) { + SetBOpnd(l, 0); + SetBOpnd(r, 1); + } + + virtual ~BinaryNode() = default; + + using BaseNode::Dump; + void Dump(int32 indent) const override; + bool Verify() const override; + + BinaryNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + bool IsCommutative() const { + switch (GetOpCode()) { + case OP_add: + case OP_mul: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_land: + case OP_lior: + return true; + default: + return false; + } + } + + BaseNode *Opnd(size_t i) const override { + ASSERT(i < kOperandNumBinary, "invalid operand idx in BinaryNode"); + ASSERT(i >= 0, "invalid operand idx in BinaryNode"); + return GetBOpnd(i); + } + + size_t NumOpnds() const override { + return kOperandNumBinary; + } + + void SetOpnd(BaseNode *node, size_t i = 0) override { + SetBOpnd(node, i); + } + + bool IsLeaf() const override { + return false; + } + + bool IsBinaryNode() const override { + return true; + } + bool IsSameContent(const BaseNode *node) const override; +}; + +class CompareNode : public BinaryNode { + public: + explicit CompareNode(Opcode o) : BinaryNode(o) {} + + CompareNode(Opcode o, PrimType typ) : BinaryNode(o, typ) {} + + CompareNode(Opcode o, PrimType typ, PrimType otype, BaseNode *l, BaseNode *r) + : BinaryNode(o, typ, l, r), opndType(otype) {} + + virtual ~CompareNode() = default; + + using BinaryNode::Dump; + void Dump(int32 indent) const override; + bool Verify() const override; + + CompareNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + PrimType GetOpndType() const { + return opndType; + } + + void SetOpndType(PrimType type) { + opndType = type; + } + + private: + PrimType opndType = kPtyInvalid; // type of operands. +}; + +class DepositbitsNode : public BinaryNode { + public: + DepositbitsNode() : BinaryNode(OP_depositbits) {} + + DepositbitsNode(Opcode o, PrimType typ) : BinaryNode(o, typ) {} + + DepositbitsNode(Opcode o, PrimType typ, uint8 offset, uint8 size, BaseNode *l, BaseNode *r) + : BinaryNode(o, typ, l, r), bitsOffset(offset), bitsSize(size) {} + + virtual ~DepositbitsNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + DepositbitsNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + uint8 GetBitsOffset() const { + return bitsOffset; + } + + void SetBitsOffset(uint8 offset) { + bitsOffset = offset; + } + + uint8 GetBitsSize() const { + return bitsSize; + } + + void SetBitsSize(uint8 size) { + bitsSize = size; + } + + private: + uint8 bitsOffset = 0; + uint8 bitsSize = 0; +}; + +// used for resolveinterfacefunc, resolvevirtualfunc +// bOpnd[0] stores base vtab/itab address +// bOpnd[1] stores offset +class ResolveFuncNode : public BinaryNode { + public: + explicit ResolveFuncNode(Opcode o) : BinaryNode(o) {} + + ResolveFuncNode(Opcode o, PrimType typ) : BinaryNode(o, typ) {} + + ResolveFuncNode(Opcode o, PrimType typ, PUIdx idx) : BinaryNode(o, typ), puIdx(idx) {} + + ResolveFuncNode(Opcode o, PrimType typ, PUIdx pIdx, BaseNode *opnd0, BaseNode *opnd1) + : BinaryNode(o, typ, opnd0, opnd1), puIdx(pIdx) {} + + virtual ~ResolveFuncNode() = default; + + void Dump(int32 indent) const override; + + ResolveFuncNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + BaseNode *GetTabBaseAddr() const { + return GetBOpnd(0); + } + + BaseNode *GetOffset() const { + return GetBOpnd(1); + } + + PUIdx GetPuIdx() const { + return puIdx; + } + + void SetPUIdx(PUIdx idx) { + puIdx = idx; + } + + private: + PUIdx puIdx = 0; +}; + +class TernaryNode : public BaseNode { + public: + explicit TernaryNode(Opcode o) : BaseNode(o, kOperandNumTernary) {} + + TernaryNode(Opcode o, PrimType typ) : BaseNode(o, typ, kOperandNumTernary) {} + + TernaryNode(Opcode o, PrimType typ, BaseNode *e0, BaseNode *e1, BaseNode *e2) : BaseNode(o, typ, kOperandNumTernary) { + topnd[0] = e0; + topnd[1] = e1; + topnd[2] = e2; + } + + virtual ~TernaryNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + TernaryNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->topnd[0] = topnd[0]->CloneTree(allocator); + node->topnd[1] = topnd[1]->CloneTree(allocator); + node->topnd[2] = topnd[2]->CloneTree(allocator); + return node; + } + + BaseNode *Opnd(size_t i) const override { + CHECK_FATAL(i < kOperandNumTernary, "array index out of range"); + return topnd[i]; + } + + size_t NumOpnds() const override { + return kOperandNumTernary; + } + + void SetOpnd(BaseNode *node, size_t i = 0) override { + CHECK_FATAL(i < kOperandNumTernary, "array index out of range"); + topnd[i] = node; + } + + bool IsLeaf() const override { + return false; + } + + bool IsTernaryNode() const override { + return true; + } + + private: + BaseNode *topnd[kOperandNumTernary] = {nullptr, nullptr, nullptr}; +}; + +class NaryOpnds { + public: + explicit NaryOpnds(MapleAllocator &mpallocter) : nOpnd(mpallocter.Adapter()) {} + + virtual ~NaryOpnds() = default; + + virtual void Dump(int32 indent) const; + bool VerifyOpnds() const; + + const MapleVector &GetNopnd() const { + return nOpnd; + } + + MapleVector &GetNopnd() { + return nOpnd; + } + + size_t GetNopndSize() const { + return nOpnd.size(); + } + + BaseNode *GetNopndAt(size_t i) const { + CHECK_FATAL(i < nOpnd.size(), "array index out of range"); + return nOpnd[i]; + } + + void SetNOpndAt(size_t i, BaseNode *opnd) { + CHECK_FATAL(i < nOpnd.size(), "array index out of range"); + nOpnd[i] = opnd; + } + + void SetNOpnd(const MapleVector &val) { + nOpnd = val; + } + + private: + MapleVector nOpnd; +}; + +class NaryNode : public BaseNode, public NaryOpnds { + public: + NaryNode(MapleAllocator &allocator, Opcode o) : BaseNode(o), NaryOpnds(allocator) {} + + NaryNode(const MIRModule &mod, Opcode o) : NaryNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + NaryNode(MapleAllocator &allocator, Opcode o, PrimType typ) : BaseNode(o, typ, 0), NaryOpnds(allocator) {} + + NaryNode(const MIRModule &mod, Opcode o, PrimType typ) : NaryNode(mod.GetCurFuncCodeMPAllocator(), o, typ) {} + + NaryNode(MapleAllocator &allocator, const NaryNode &node) + : BaseNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds), NaryOpnds(allocator) {} + + NaryNode(const MIRModule &mod, const NaryNode &node) : NaryNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + NaryNode(NaryNode &node) = delete; + NaryNode &operator=(const NaryNode &node) = delete; + virtual ~NaryNode() = default; + + void Dump(int32 indent) const override; + + NaryNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + return node; + } + + BaseNode *Opnd(size_t i) const override { + return GetNopndAt(i); + } + + size_t NumOpnds() const override { + ASSERT(numOpnds == GetNopndSize(), "NaryNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void SetOpnd(BaseNode *node, size_t i = 0) override { + ASSERT(i < GetNopnd().size(), "array index out of range"); + SetNOpndAt(i, node); + } + + bool IsLeaf() const override { + return false; + } + + bool Verify() const override { + return true; + } + + bool IsNaryNode() const override { + return true; + } +}; + +class IntrinsicopNode : public NaryNode { + public: + IntrinsicopNode(MapleAllocator &allocator, Opcode o, TyIdx typeIdx = TyIdx()) + : NaryNode(allocator, o), intrinsic(INTRN_UNDEFINED), tyIdx(typeIdx) {} + + IntrinsicopNode(const MIRModule &mod, Opcode o, TyIdx typeIdx = TyIdx()) + : IntrinsicopNode(mod.GetCurFuncCodeMPAllocator(), o, typeIdx) {} + + IntrinsicopNode(MapleAllocator &allocator, Opcode o, PrimType typ, TyIdx typeIdx = TyIdx()) + : NaryNode(allocator, o, typ), intrinsic(INTRN_UNDEFINED), tyIdx(typeIdx) {} + + IntrinsicopNode(const MIRModule &mod, Opcode o, PrimType typ, TyIdx typeIdx = TyIdx()) + : IntrinsicopNode(mod.GetCurFuncCodeMPAllocator(), o, typ, typeIdx) {} + + IntrinsicopNode(MapleAllocator &allocator, const IntrinsicopNode &node) + : NaryNode(allocator, node), intrinsic(node.GetIntrinsic()), tyIdx(node.GetTyIdx()) {} + + IntrinsicopNode(const MIRModule &mod, const IntrinsicopNode &node) + : IntrinsicopNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + IntrinsicopNode(IntrinsicopNode &node) = delete; + IntrinsicopNode &operator=(const IntrinsicopNode &node) = delete; + virtual ~IntrinsicopNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + bool Verify(VerifyResult &verifyResult) const override; + + IntrinsicopNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + MIRIntrinsicID GetIntrinsic() const { + return intrinsic; + } + + void SetIntrinsic(MIRIntrinsicID intrinsicID) { + intrinsic = intrinsicID; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + // IntrinDesc query + const IntrinDesc &GetIntrinDesc() const { + return IntrinDesc::intrinTable[intrinsic]; + } + + bool VerifyJArrayLength(VerifyResult &verifyResult) const; + + private: + MIRIntrinsicID intrinsic; + TyIdx tyIdx; +}; + +class ConstvalNode : public BaseNode { + public: + ConstvalNode() : BaseNode(OP_constval) {} + + explicit ConstvalNode(PrimType typ) : BaseNode(OP_constval, typ, 0) {} + + explicit ConstvalNode(MIRConst *constv) : BaseNode(OP_constval), constVal(constv) {} + + ConstvalNode(PrimType typ, MIRConst *constv) : BaseNode(OP_constval, typ, 0), constVal(constv) {} + virtual ~ConstvalNode() = default; + void Dump(int32 indent) const override; + + ConstvalNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + const MIRConst *GetConstVal() const { + return constVal; + } + + MIRConst *GetConstVal() { + return constVal; + } + + void SetConstVal(MIRConst *val) { + constVal = val; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + MIRConst *constVal = nullptr; +}; + +class ConststrNode : public BaseNode { + public: + ConststrNode() : BaseNode(OP_conststr) {} + + explicit ConststrNode(UStrIdx i) : BaseNode(OP_conststr), strIdx(i) {} + + ConststrNode(PrimType typ, UStrIdx i) : BaseNode(OP_conststr, typ, 0), strIdx(i) {} + + virtual ~ConststrNode() = default; + + void Dump(int32 indent) const override; + bool IsSameContent(const BaseNode *node) const override; + + ConststrNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + UStrIdx GetStrIdx() const { + return strIdx; + } + + void SetStrIdx(UStrIdx idx) { + strIdx = idx; + } + + private: + UStrIdx strIdx = UStrIdx(0); +}; + +class Conststr16Node : public BaseNode { + public: + Conststr16Node() : BaseNode(OP_conststr16) {} + + explicit Conststr16Node(U16StrIdx i) : BaseNode(OP_conststr16), strIdx(i) {} + + Conststr16Node(PrimType typ, U16StrIdx i) : BaseNode(OP_conststr16, typ, 0), strIdx(i) {} + + virtual ~Conststr16Node() = default; + + void Dump(int32 indent) const override; + bool IsSameContent(const BaseNode *node) const override; + + Conststr16Node *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + U16StrIdx GetStrIdx() const { + return strIdx; + } + + void SetStrIdx(U16StrIdx idx) { + strIdx = idx; + } + + private: + U16StrIdx strIdx = U16StrIdx(0); +}; + +class SizeoftypeNode : public BaseNode { + public: + SizeoftypeNode() : BaseNode(OP_sizeoftype) {} + + explicit SizeoftypeNode(TyIdx t) : BaseNode(OP_sizeoftype), tyIdx(t) {} + + SizeoftypeNode(PrimType type, TyIdx t) : BaseNode(OP_sizeoftype, type, 0), tyIdx(t) {} + + virtual ~SizeoftypeNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + SizeoftypeNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + private: + TyIdx tyIdx = TyIdx(0); +}; + +class FieldsDistNode : public BaseNode { + public: + FieldsDistNode() : BaseNode(OP_fieldsdist) {} + + FieldsDistNode(TyIdx t, FieldID f1, FieldID f2) : BaseNode(OP_fieldsdist), tyIdx(t), fieldID1(f1), fieldID2(f2) {} + + FieldsDistNode(PrimType typ, TyIdx t, FieldID f1, FieldID f2) + : BaseNode(OP_fieldsdist, typ, 0), tyIdx(t), fieldID1(f1), fieldID2(f2) {} + + virtual ~FieldsDistNode() = default; + + void Dump(int32 indent) const override; + + FieldsDistNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + FieldID GetFiledID1() const { + return fieldID1; + } + + void SetFiledID1(FieldID id) { + fieldID1 = id; + } + + FieldID GetFiledID2() const { + return fieldID2; + } + + void SetFiledID2(FieldID id) { + fieldID2 = id; + } + + private: + TyIdx tyIdx = TyIdx(0); + FieldID fieldID1 = 0; + FieldID fieldID2 = 0; +}; + +class ArrayNode : public NaryNode { + public: + explicit ArrayNode(MapleAllocator &allocator) : NaryNode(allocator, OP_array) {} + + explicit ArrayNode(const MIRModule &mod) : ArrayNode(mod.GetCurFuncCodeMPAllocator()) {} + + ArrayNode(MapleAllocator &allocator, PrimType typ, TyIdx idx) : NaryNode(allocator, OP_array, typ), tyIdx(idx) {} + + ArrayNode(const MIRModule &mod, PrimType typ, TyIdx idx) : ArrayNode(mod.GetCurFuncCodeMPAllocator(), typ, idx) {} + + ArrayNode(MapleAllocator &allocator, PrimType typ, TyIdx idx, bool bcheck) + : NaryNode(allocator, OP_array, typ), tyIdx(idx), boundsCheck(bcheck) {} + + ArrayNode(const MIRModule &mod, PrimType typ, TyIdx idx, bool bcheck) + : ArrayNode(mod.GetCurFuncCodeMPAllocator(), typ, idx, bcheck) {} + + ArrayNode(MapleAllocator &allocator, const ArrayNode &node) + : NaryNode(allocator, node), tyIdx(node.tyIdx), boundsCheck(node.boundsCheck) {} + + ArrayNode(const MIRModule &mod, const ArrayNode &node) : ArrayNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + ArrayNode(ArrayNode &node) = delete; + ArrayNode &operator=(const ArrayNode &node) = delete; + virtual ~ArrayNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + bool IsSameBase(ArrayNode *arry); + + size_t NumOpnds() const override { + ASSERT(numOpnds == GetNopndSize(), "ArrayNode has wrong numOpnds field"); + return GetNopndSize(); + } + + ArrayNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->boundsCheck = boundsCheck; + node->SetNumOpnds(GetNopndSize()); + return node; + } + + const MIRType *GetArrayType(const TypeTable &tt) const; + MIRType *GetArrayType(const TypeTable &tt); + + BaseNode *GetIndex(size_t i) const { + return Opnd(i + 1); + } + + const BaseNode *GetDim(const MIRModule &mod, TypeTable &tt, int i) const; + BaseNode *GetDim(const MIRModule &mod, TypeTable &tt, int i); + + BaseNode *GetBase() const { + return Opnd(0); + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + bool GetBoundsCheck() const { + return boundsCheck; + } + + void SetBoundsCheck(bool check) { + boundsCheck = check; + } + + private: + TyIdx tyIdx; + bool boundsCheck = true; +}; + +class AddrofNode : public BaseNode { + public: + explicit AddrofNode(Opcode o) : BaseNode(o), stIdx() {} + + AddrofNode(Opcode o, PrimType typ) : AddrofNode(o, typ, StIdx(), 0) {} + + AddrofNode(Opcode o, PrimType typ, StIdx sIdx, FieldID fid) : BaseNode(o, typ, 0), stIdx(sIdx), fieldID(fid) {} + + virtual ~AddrofNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + bool CheckNode(const MIRModule &mod) const; + + AddrofNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + StIdx GetStIdx() const { + return stIdx; + } + + void SetStIdx(StIdx idx) { + stIdx = idx; + } + + void SetStFullIdx(uint32 idx) { + stIdx.SetFullIdx(idx); + } + + FieldID GetFieldID() const { + return fieldID; + } + + void SetFieldID(FieldID fieldIDVal) { + fieldID = fieldIDVal; + } + + bool IsVolatile(const MIRModule &mod) const; + + bool IsSameContent(const BaseNode *node) const override; + + bool MayAccessSameMemory(const BaseNode *node) const; + private: + StIdx stIdx; + FieldID fieldID = 0; +}; + +// DreadNode has the same member fields and member methods as AddrofNode +using DreadNode = AddrofNode; + +class DreadoffNode : public BaseNode { + public: + explicit DreadoffNode(Opcode o) : BaseNode(o), stIdx() {} + + DreadoffNode(Opcode o, PrimType typ) : BaseNode(o, typ, 0), stIdx() {} + + virtual ~DreadoffNode() = default; + + void Dump(int32 indent) const override; + + DreadoffNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + bool IsVolatile(const MIRModule &mod) const; + + bool IsSameContent(const BaseNode *node) const override; + + public: + StIdx stIdx; + int32 offset = 0; +}; + +// AddrofoffNode has the same member fields and member methods as DreadoffNode +using AddrofoffNode = DreadoffNode; + +class RegreadNode : public BaseNode { + public: + RegreadNode() : BaseNode(OP_regread) {} + + explicit RegreadNode(PregIdx pIdx) : BaseNode(OP_regread), regIdx(pIdx) {} + + RegreadNode(PrimType primType, PregIdx pIdx) : RegreadNode(pIdx) { + ptyp = primType; + } + + virtual ~RegreadNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + RegreadNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + PregIdx GetRegIdx() const { + return regIdx; + } + void SetRegIdx(PregIdx reg) { + regIdx = reg; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + PregIdx regIdx = 0; // 32bit, negative if special register +}; + +class AddroffuncNode : public BaseNode { + public: + AddroffuncNode() : BaseNode(OP_addroffunc) {} + + AddroffuncNode(PrimType typ, PUIdx pIdx) : BaseNode(OP_addroffunc, typ, 0), puIdx(pIdx) {} + + virtual ~AddroffuncNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + AddroffuncNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + PUIdx GetPUIdx() const { + return puIdx; + } + + void SetPUIdx(PUIdx puIdxValue) { + puIdx = puIdxValue; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + PUIdx puIdx = 0; // 32bit now +}; + +class AddroflabelNode : public BaseNode { + public: + AddroflabelNode() : BaseNode(OP_addroflabel) {} + + explicit AddroflabelNode(uint32 ofst) : BaseNode(OP_addroflabel), offset(ofst) {} + + virtual ~AddroflabelNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + AddroflabelNode *CloneTree(MapleAllocator &allocator) const override { + return allocator.GetMemPool()->New(*this); + } + + uint32 GetOffset() const { + return offset; + } + + void SetOffset(uint32 offsetValue) { + offset = offsetValue; + } + + bool IsSameContent(const BaseNode *node) const override; + + private: + LabelIdx offset = 0; +}; + +// for cleanuptry, jscatch, finally, retsub, endtry, membaracquire, membarrelease, +// membarstoreload, membarstorestore +class StmtNode : public BaseNode, public PtrListNodeBase { + public: + static std::atomic stmtIDNext; // for assigning stmtID, initialized to 1; 0 is reserved + static uint32 lastPrintedLineNum; // used during printing ascii output + static uint16 lastPrintedColumnNum; + + explicit StmtNode(Opcode o) : BaseNode(o), PtrListNodeBase(), stmtID(stmtIDNext), stmtOriginalID(stmtIDNext) { + ++stmtIDNext; + } + + StmtNode(Opcode o, uint8 numOpr) + : BaseNode(o, numOpr), PtrListNodeBase(), stmtID(stmtIDNext), stmtOriginalID(stmtIDNext) { + ++stmtIDNext; + } + + StmtNode(Opcode o, PrimType typ, uint8 numOpr) + : BaseNode(o, typ, numOpr), PtrListNodeBase(), stmtID(stmtIDNext), stmtOriginalID(stmtIDNext) { + ++stmtIDNext; + } + + // used for NaryStmtNode when clone + StmtNode(Opcode o, PrimType typ, uint8 numOpr, const SrcPosition &srcPosition, uint32 stmtOriginalID, StmtAttrs attrs) + : BaseNode(o, typ, numOpr), + PtrListNodeBase(), + srcPosition(srcPosition), + stmtID(stmtIDNext), + stmtOriginalID(stmtOriginalID), + stmtAttrs(attrs) { + ++stmtIDNext; + } + + virtual ~StmtNode() = default; + + using BaseNode::Dump; + void DumpBase(int32 indent) const override; + void Dump(int32 indent) const override; + void InsertAfterThis(StmtNode &pos); + void InsertBeforeThis(StmtNode &pos); + + StmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *s = allocator.GetMemPool()->New(*this); + s->SetStmtID(stmtIDNext++); + s->SetMeStmtID(meStmtID); + return s; + } + + bool Verify() const override { + return true; + } + + bool Verify(VerifyResult&) const override { + return Verify(); + } + // ISO/IEC 9899:1999 7.21 + // stmt is expanded from ArrayOfChar func defined in + virtual void SetExpandFromArrayOfCharFunc(bool) {} + + virtual bool IsExpandedFromArrayOfCharFunc() const { + return false; + } + + const SrcPosition &GetSrcPos() const { + return srcPosition; + } + + SrcPosition &GetSrcPos() { + return srcPosition; + } + + void SetSrcPos(SrcPosition pos) { + srcPosition = pos; + } + + void SetInlinedSrcPos(uint32 lineNum, uint32 fileNum, const GStrIdx &idx) { + srcPosition.SetInlinedFuncStrIdx(idx); + srcPosition.SetInlinedLineNum(lineNum); + srcPosition.SetInlinedFileNum(fileNum); + } + + uint32 GetStmtID() const { + return stmtID; + } + + void SetStmtID(uint32 id) { + stmtID = id; + } + + uint32 GetOriginalID() const { + return stmtOriginalID; + } + + void SetOriginalID(uint32 id) { + stmtOriginalID = id; + } + + uint32 GetMeStmtID() const { + return meStmtID; + } + + void SetMeStmtID(uint32 id) { + meStmtID = id; + } + + StmtNode *GetRealNext() const; + + virtual BaseNode *GetRHS() const { + return nullptr; + } + + bool GetIsLive() const { + return isLive; + } + + void SetIsLive(bool live) const { + isLive = live; + } + + bool IsInSafeRegion() const { + return stmtAttrs.GetAttr(STMTATTR_insaferegion); + } + + void SetInSafeRegion() { + stmtAttrs.SetAttr(STMTATTR_insaferegion); + } + + void CopySafeRegionAttr(const StmtAttrs &stmtAttr) { + this->stmtAttrs.AppendAttr(stmtAttr.GetTargetAttrFlag(STMTATTR_insaferegion)); + } + + const StmtAttrs &GetStmtAttrs() const { + return stmtAttrs; + } + + void SetStmtInfoId(size_t index) { + stmtInfoId = index; + } + + const uint32 GetStmtInfoId() const { + return stmtInfoId; + } + + bool operator==(const StmtNode &rhs) { + return this == &rhs; + } + + protected: + SrcPosition srcPosition; + + private: + uint32 stmtID; // a unique ID assigned to it + uint32 stmtOriginalID; // first define id, no change when clone, need copy when emit from MeStmt + uint32 meStmtID = 0; // Need copy when emit from MeStmt, attention:this just for two stmt(if && call) + uint32 stmtInfoId = -1u; + mutable bool isLive = false; // only used for dse to save compile time + // mutable to keep const-ness at most situation + StmtAttrs stmtAttrs; +}; + +class IassignNode : public StmtNode { + public: + IassignNode() : IassignNode(TyIdx(0), 0, nullptr, nullptr) {} + + IassignNode(TyIdx tyIdx, FieldID fieldID, BaseNode *addrOpnd, BaseNode *rhsOpnd) + : StmtNode(OP_iassign), tyIdx(tyIdx), fieldID(fieldID), addrExpr(addrOpnd), rhs(rhsOpnd) { + BaseNodeT::SetNumOpnds(kOperandNumBinary); + } + + virtual ~IassignNode() = default; + + TyIdx GetTyIdx() const { + return tyIdx; + } + + MIRType *GetLHSType() const; + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + FieldID GetFieldID() const { + return fieldID; + } + + void SetFieldID(FieldID fid) { + fieldID = fid; + } + + BaseNode *Opnd(size_t i) const override { + if (i == 0) { + return addrExpr; + } + return rhs; + } + + size_t NumOpnds() const override { + return kOperandNumBinary; + } + + void SetOpnd(BaseNode *node, size_t i) override { + if (i == 0) { + addrExpr = node; + } else { + rhs = node; + } + } + + void Dump(int32 indent) const override; + bool Verify() const override; + + IassignNode *CloneTree(MapleAllocator &allocator) const override { + auto *bn = allocator.GetMemPool()->New(*this); + bn->SetStmtID(stmtIDNext++); + bn->SetOpnd(addrExpr->CloneTree(allocator), 0); + bn->SetRHS(rhs->CloneTree(allocator)); + bn->SetExpandFromArrayOfCharFunc(fromAoCFunc); + return bn; + } + + // the base of an address expr is either a leaf or an iread + BaseNode &GetAddrExprBase() const { + BaseNode *base = addrExpr; + while (base->NumOpnds() != 0 && base->GetOpCode() != OP_iread) { + base = base->Opnd(0); + } + return *base; + } + + void SetAddrExpr(BaseNode *exp) { + addrExpr = exp; + } + + BaseNode *GetRHS() const override { + return rhs; + } + + void SetRHS(BaseNode *node) { + rhs = node; + } + + bool AssigningVolatile() const; + + bool IsExpandedFromArrayOfCharFunc() const override { + return fromAoCFunc; + } + + void SetExpandFromArrayOfCharFunc(bool flag) override { + fromAoCFunc = flag; + } + + private: + TyIdx tyIdx; + FieldID fieldID; + bool fromAoCFunc = false; // Array of char func, defined in + public: + BaseNode *addrExpr; + BaseNode *rhs; +}; + +// goto and gosub +class GotoNode : public StmtNode { + public: + explicit GotoNode(Opcode o) : StmtNode(o) {} + + GotoNode(Opcode o, uint32 ofst) : StmtNode(o), offset(ofst) {} + + virtual ~GotoNode() = default; + + void Dump(int32 indent) const override; + + GotoNode *CloneTree(MapleAllocator &allocator) const override { + auto *g = allocator.GetMemPool()->New(*this); + g->SetStmtID(stmtIDNext++); + return g; + } + + uint32 GetOffset() const { + return offset; + } + + void SetOffset(uint32 o) { + offset = o; + } + + private: + uint32 offset = 0; +}; + +// jstry +class JsTryNode : public StmtNode { + public: + JsTryNode() : StmtNode(OP_jstry) {} + + JsTryNode(uint16 catchofst, uint16 finallyofset) + : StmtNode(OP_jstry), catchOffset(catchofst), finallyOffset(finallyofset) {} + + virtual ~JsTryNode() = default; + + void Dump(int32 indent) const override; + + JsTryNode *CloneTree(MapleAllocator &allocator) const override { + auto *t = allocator.GetMemPool()->New(*this); + t->SetStmtID(stmtIDNext++); + return t; + } + + uint16 GetCatchOffset() const { + return catchOffset; + } + + void SetCatchOffset(uint32 offset) { + catchOffset = offset; + } + + uint16 GetFinallyOffset() const { + return finallyOffset; + } + + void SetFinallyOffset(uint32 offset) { + finallyOffset = offset; + } + + private: + uint16 catchOffset = 0; + uint16 finallyOffset = 0; +}; + +// try, cpptry +class TryNode : public StmtNode { + public: + explicit TryNode(MapleAllocator &allocator) : StmtNode(OP_try), offsets(allocator.Adapter()) {} + + explicit TryNode(const MapleVector &offsets) : StmtNode(OP_try), offsets(offsets) {} + + explicit TryNode(const MIRModule &mod) : TryNode(mod.GetCurFuncCodeMPAllocator()) {} + + TryNode(TryNode &node) = delete; + TryNode &operator=(const TryNode &node) = delete; + virtual ~TryNode() = default; + + using StmtNode::Dump; + void Dump(int32 indent) const override; + + MapleVector &GetOffsets() { + return offsets; + } + + LabelIdx GetOffset(size_t i) const { + ASSERT(i < offsets.size(), "array index out of range"); + return offsets.at(i); + } + + void SetOffset(LabelIdx offsetValue, size_t i) { + ASSERT(i < offsets.size(), "array index out of range"); + offsets[i] = offsetValue; + } + + void AddOffset(LabelIdx offsetValue) { + offsets.push_back(offsetValue); + } + + void ResizeOffsets(size_t offsetSize) { + offsets.resize(offsetSize); + } + + void SetOffsets(const MapleVector &offsetsValue) { + offsets = offsetsValue; + } + + size_t GetOffsetsCount() const { + return offsets.size(); + } + + MapleVector::iterator GetOffsetsBegin() { + return offsets.begin(); + } + + MapleVector::iterator GetOffsetsEnd() { + return offsets.end(); + } + + void OffsetsInsert(MapleVector::iterator a, MapleVector::iterator b, + MapleVector::iterator c) { + (void)offsets.insert(a, b, c); + } + + TryNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < offsets.size(); ++i) { + node->AddOffset(offsets[i]); + } + return node; + } + + private: + MapleVector offsets; +}; + +// catch +class CatchNode : public StmtNode { + public: + explicit CatchNode(MapleAllocator &allocator) : StmtNode(OP_catch), exceptionTyIdxVec(allocator.Adapter()) {} + + explicit CatchNode(const MapleVector &tyIdxVec) : StmtNode(OP_catch), exceptionTyIdxVec(tyIdxVec) {} + + explicit CatchNode(const MIRModule &mod) : CatchNode(mod.GetCurFuncCodeMPAllocator()) {} + + CatchNode(CatchNode &node) = delete; + CatchNode &operator=(const CatchNode &node) = delete; + virtual ~CatchNode() = default; + + using StmtNode::Dump; + void Dump(int32 indent) const override; + + TyIdx GetExceptionTyIdxVecElement(size_t i) const { + CHECK_FATAL(i < exceptionTyIdxVec.size(), "array index out of range"); + return exceptionTyIdxVec[i]; + } + + const MapleVector &GetExceptionTyIdxVec() const { + return exceptionTyIdxVec; + } + + size_t Size() const { + return exceptionTyIdxVec.size(); + } + + void SetExceptionTyIdxVecElement(TyIdx idx, size_t i) { + CHECK_FATAL(i < exceptionTyIdxVec.size(), "array index out of range"); + exceptionTyIdxVec[i] = idx; + } + + void SetExceptionTyIdxVec(MapleVector vec) { + exceptionTyIdxVec = vec; + } + + void PushBack(TyIdx idx) { + exceptionTyIdxVec.push_back(idx); + } + + CatchNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < Size(); ++i) { + node->PushBack(GetExceptionTyIdxVecElement(i)); + } + return node; + } + + private: + MapleVector exceptionTyIdxVec; +}; + +// cppcatch +class CppCatchNode : public StmtNode { + public: + explicit CppCatchNode(const TyIdx &idx) : StmtNode(OP_cppcatch), exceptionTyIdx(idx) {} + explicit CppCatchNode() : CppCatchNode(TyIdx(0)) {} + + explicit CppCatchNode(const CppCatchNode &node) = delete; + CppCatchNode &operator=(const CppCatchNode &node) = delete; + ~CppCatchNode() = default; + + void Dump(int32 indent) const override; + + CppCatchNode *CloneTree(MapleAllocator &allocator) const override { + CppCatchNode *node = allocator.GetMemPool()->New(); + node->SetStmtID(stmtIDNext++); + node->exceptionTyIdx = exceptionTyIdx; + return node; + } + + CppCatchNode *CloneTree(const MIRModule &mod) const { + return CppCatchNode::CloneTree(*mod.CurFuncCodeMemPoolAllocator()); + } + + public: + TyIdx exceptionTyIdx; +}; + +using CasePair = std::pair; +using CaseVector = MapleVector; +class SwitchNode : public StmtNode { + public: + explicit SwitchNode(MapleAllocator &allocator) : StmtNode(OP_switch, 1), switchTable(allocator.Adapter()) {} + + explicit SwitchNode(const MIRModule &mod) : SwitchNode(mod.GetCurFuncCodeMPAllocator()) {} + + SwitchNode(MapleAllocator &allocator, LabelIdx label) : SwitchNode(allocator, label, nullptr) {} + + SwitchNode(MapleAllocator &allocator, LabelIdx label, BaseNode *opnd) + : StmtNode(OP_switch, 1), switchOpnd(opnd), defaultLabel(label), switchTable(allocator.Adapter()) {} + + SwitchNode(const MIRModule &mod, LabelIdx label) : SwitchNode(mod.GetCurFuncCodeMPAllocator(), label) {} + + SwitchNode(MapleAllocator &allocator, const SwitchNode &node) + : StmtNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds), + defaultLabel(node.GetDefaultLabel()), + switchTable(allocator.Adapter()) {} + + SwitchNode(const MIRModule &mod, const SwitchNode &node) : SwitchNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + SwitchNode(SwitchNode &node) = delete; + SwitchNode &operator=(const SwitchNode &node) = delete; + virtual ~SwitchNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + SwitchNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetSwitchOpnd(switchOpnd->CloneTree(allocator)); + for (size_t i = 0; i < switchTable.size(); ++i) { + node->InsertCasePair(switchTable[i]); + } + return node; + } + + BaseNode *Opnd(size_t) const override { + return switchOpnd; + } + + void SetOpnd(BaseNode *node, size_t) override { + switchOpnd = node; + } + + BaseNode *GetSwitchOpnd() const { + return switchOpnd; + } + + void SetSwitchOpnd(BaseNode *node) { + switchOpnd = node; + } + + LabelIdx GetDefaultLabel() const { + return defaultLabel; + } + + void SetDefaultLabel(LabelIdx idx) { + defaultLabel = idx; + } + + const CaseVector &GetSwitchTable() const { + return switchTable; + } + + CaseVector &GetSwitchTable() { + return switchTable; + } + + CasePair GetCasePair(size_t idx) const { + ASSERT(idx < switchTable.size(), "out of range in SwitchNode::GetCasePair"); + return switchTable.at(idx); + } + + void SetSwitchTable(CaseVector vec) { + switchTable = vec; + } + + void InsertCasePair(CasePair pair) { + switchTable.push_back(pair); + } + + void UpdateCaseLabelAt(size_t i, LabelIdx idx) { + switchTable[i] = std::make_pair(switchTable[i].first, idx); + } + + void SortCasePair(bool func(const CasePair&, const CasePair&)) { + std::sort(switchTable.begin(), switchTable.end(), func); + } + + private: + BaseNode *switchOpnd = nullptr; + LabelIdx defaultLabel = 0; + CaseVector switchTable; +}; + +using MCasePair = std::pair; +using MCaseVector = MapleVector; +class MultiwayNode : public StmtNode { + public: + explicit MultiwayNode(MapleAllocator &allocator) : StmtNode(OP_multiway, 1), multiWayTable(allocator.Adapter()) {} + + explicit MultiwayNode(const MIRModule &mod) : MultiwayNode(mod.GetCurFuncCodeMPAllocator()) {} + + MultiwayNode(MapleAllocator &allocator, LabelIdx label) + : StmtNode(OP_multiway, 1), defaultLabel(label), multiWayTable(allocator.Adapter()) {} + + MultiwayNode(const MIRModule &mod, LabelIdx label) : MultiwayNode(mod.GetCurFuncCodeMPAllocator(), label) {} + + MultiwayNode(MapleAllocator &allocator, const MultiwayNode &node) + : StmtNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds, node.GetSrcPos(), node.GetOriginalID(), + node.GetStmtAttrs()), + defaultLabel(node.defaultLabel), + multiWayTable(allocator.Adapter()) {} + + MultiwayNode(const MIRModule &mod, const MultiwayNode &node) : MultiwayNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + MultiwayNode(MultiwayNode &node) = delete; + MultiwayNode &operator=(const MultiwayNode &node) = delete; + virtual ~MultiwayNode() = default; + + void Dump(int32 indent) const override; + + MultiwayNode *CloneTree(MapleAllocator &allocator) const override { + auto *nd = allocator.GetMemPool()->New(allocator, *this); + nd->multiWayOpnd = static_cast(multiWayOpnd->CloneTree(allocator)); + for (size_t i = 0; i < multiWayTable.size(); ++i) { + BaseNode *node = multiWayTable[i].first->CloneTree(allocator); + MCasePair pair(static_cast(node), multiWayTable[i].second); + nd->multiWayTable.push_back(pair); + } + return nd; + } + + BaseNode *Opnd(size_t i) const override { + return *(&multiWayOpnd + static_cast(i)); + } + + const BaseNode *GetMultiWayOpnd() const { + return multiWayOpnd; + } + + void SetMultiWayOpnd(BaseNode *multiwayOpndPara) { + multiWayOpnd = multiwayOpndPara; + } + + void SetDefaultlabel(LabelIdx defaultLabelPara) { + defaultLabel = defaultLabelPara; + } + + void AppendElemToMultiWayTable(const MCasePair &mCasrPair) { + multiWayTable.push_back(mCasrPair); + } + + const MCaseVector &GetMultiWayTable() const { + return multiWayTable; + } + + private: + BaseNode *multiWayOpnd = nullptr; + LabelIdx defaultLabel = 0; + MCaseVector multiWayTable; +}; + +// eval, throw, free, decref, incref, decrefreset, assertnonnull, igoto +class UnaryStmtNode : public StmtNode { + public: + explicit UnaryStmtNode(Opcode o) : StmtNode(o, 1) {} + + UnaryStmtNode(Opcode o, PrimType typ) : StmtNode(o, typ, 1) {} + + UnaryStmtNode(Opcode o, PrimType typ, BaseNode *opnd) : StmtNode(o, typ, 1), uOpnd(opnd) {} + + virtual ~UnaryStmtNode() = default; + + using StmtNode::Dump; + void Dump(int32 indent) const override; + void DumpOpnd(const MIRModule &mod, int32 indent) const; + void DumpOpnd(int32 indent) const; + + bool Verify() const override { + return uOpnd->Verify(); + } + + bool Verify(VerifyResult &verifyResult) const override { + if (GetOpCode() == OP_throw && !VerifyThrowable(verifyResult)) { + return false; + } + return uOpnd->Verify(verifyResult); + } + + UnaryStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(uOpnd->CloneTree(allocator), 0); + return node; + } + + bool IsLeaf() const override { + return false; + } + + BaseNode *GetRHS() const override { + return Opnd(0); + } + + virtual void SetRHS(BaseNode *rhs) { + this->SetOpnd(rhs, 0); + } + + BaseNode *Opnd(size_t i = 0) const override { + (void)i; + return uOpnd; + } + + void SetOpnd(BaseNode *node, size_t) override { + uOpnd = node; + } + + private: + bool VerifyThrowable(VerifyResult &verifyResult) const; + BaseNode *uOpnd = nullptr; +}; + +// dassign, maydassign +class DassignNode : public UnaryStmtNode { + public: + DassignNode() : UnaryStmtNode(OP_dassign), stIdx() {} + + explicit DassignNode(PrimType typ) : UnaryStmtNode(OP_dassign, typ), stIdx() {} + + DassignNode(PrimType typ, BaseNode *opnd) : UnaryStmtNode(OP_dassign, typ, opnd), stIdx() {} + + DassignNode(PrimType typ, BaseNode *opnd, StIdx idx, FieldID fieldID) + : UnaryStmtNode(OP_dassign, typ, opnd), stIdx(idx), fieldID(fieldID) {} + + DassignNode(BaseNode *opnd, StIdx idx, FieldID fieldID) : DassignNode(kPtyInvalid, opnd, idx, fieldID) {} + + virtual ~DassignNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + DassignNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + size_t NumOpnds() const override { + return 1; + } + + bool IsIdentityDassign() const { + BaseNode *rhs = GetRHS(); + if (rhs->GetOpCode() != OP_dread) { + return false; + } + auto *dread = static_cast(rhs); + return (stIdx == dread->GetStIdx()); + } + + BaseNode *GetRHS() const override { + return UnaryStmtNode::GetRHS(); + } + + void SetRHS(BaseNode *rhs) override { + UnaryStmtNode::SetOpnd(rhs, 0); + } + + StIdx GetStIdx() const { + return stIdx; + } + void SetStIdx(StIdx s) { + stIdx = s; + } + + const FieldID &GetFieldID() const { + return fieldID; + } + + void SetFieldID(FieldID f) { + fieldID = f; + } + + bool AssigningVolatile(const MIRModule &mod) const; + + private: + StIdx stIdx; + FieldID fieldID = 0; +}; + +class DassignoffNode : public UnaryStmtNode { + public: + DassignoffNode() : UnaryStmtNode(OP_dassignoff), stIdx() {} + + explicit DassignoffNode(PrimType typ) : UnaryStmtNode(OP_dassignoff, typ), stIdx() {} + + DassignoffNode(PrimType typ, BaseNode *opnd) : UnaryStmtNode(OP_dassignoff, typ, opnd), stIdx() {} + + DassignoffNode(const StIdx &lhsStIdx, int32 dOffset, PrimType rhsType, BaseNode *rhsNode) + : DassignoffNode(rhsType, rhsNode) { + stIdx = lhsStIdx; + offset = dOffset; + } + virtual ~DassignoffNode() = default; + + void Dump(int32 indent) const override; + + DassignoffNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + size_t NumOpnds() const override { + return 1; + } + + BaseNode *GetRHS() const override { + return UnaryStmtNode::GetRHS(); + } + + void SetRHS(BaseNode *rhs) override { + UnaryStmtNode::SetOpnd(rhs, 0); + } + + public: + StIdx stIdx; + int32 offset = 0; +}; + +class RegassignNode : public UnaryStmtNode { + public: + RegassignNode() : UnaryStmtNode(OP_regassign) {} + + RegassignNode(PrimType primType, PregIdx idx, BaseNode *opnd) + : UnaryStmtNode(OP_regassign, primType, opnd), regIdx(idx) {} + + virtual ~RegassignNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + RegassignNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + BaseNode *GetRHS() const override { + return UnaryStmtNode::GetRHS(); + } + + void SetRHS(BaseNode *rhs) override { + UnaryStmtNode::SetOpnd(rhs, 0); + } + + PregIdx GetRegIdx() const { + return regIdx; + } + void SetRegIdx(PregIdx idx) { + regIdx = idx; + } + + private: + PregIdx regIdx = 0; // 32bit, negative if special register +}; + +// brtrue and brfalse +class CondGotoNode : public UnaryStmtNode { + public: + static const int32 probAll; + explicit CondGotoNode(Opcode o) : CondGotoNode(o, 0, nullptr) {} + + CondGotoNode(Opcode o, uint32 offset, BaseNode *opnd) : UnaryStmtNode(o, kPtyInvalid, opnd), offset(offset) { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + virtual ~CondGotoNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + uint32 GetOffset() const { + return offset; + } + + void SetOffset(uint32 offsetValue) { + offset = offsetValue; + } + + bool IsBranchProbValid() const { + return branchProb > 0 && branchProb < probAll; + } + + int32 GetBranchProb() const { + return branchProb; + } + + void SetBranchProb(int32 prob) { + branchProb = prob; + } + + void ReverseBranchProb() { + if (IsBranchProbValid()) { + branchProb = probAll - branchProb; + } + } + + void InvalidateBranchProb() { + if (IsBranchProbValid()) { + branchProb = -1; + } + } + + CondGotoNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + private: + uint32 offset; + int32 branchProb = -1; // branch probability, a negative number indicates that the probability is invalid +}; + +using SmallCasePair = std::pair; +using SmallCaseVector = MapleVector; +class RangeGotoNode : public UnaryStmtNode { + public: + explicit RangeGotoNode(MapleAllocator &allocator) + : UnaryStmtNode(OP_rangegoto), rangegotoTable(allocator.Adapter()) {} + + explicit RangeGotoNode(const MIRModule &mod) : RangeGotoNode(mod.GetCurFuncCodeMPAllocator()) {} + + RangeGotoNode(MapleAllocator &allocator, const RangeGotoNode &node) + : UnaryStmtNode(node), tagOffset(node.tagOffset), rangegotoTable(allocator.Adapter()) {} + + RangeGotoNode(const MIRModule &mod, const RangeGotoNode &node) + : RangeGotoNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + RangeGotoNode(RangeGotoNode &node) = delete; + RangeGotoNode &operator=(const RangeGotoNode &node) = delete; + virtual ~RangeGotoNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + RangeGotoNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + for (size_t i = 0; i < rangegotoTable.size(); ++i) { + node->rangegotoTable.push_back(rangegotoTable[i]); + } + return node; + } + + const SmallCaseVector &GetRangeGotoTable() const { + return rangegotoTable; + } + + const SmallCasePair &GetRangeGotoTableItem(size_t i) const { + return rangegotoTable.at(i); + } + + void SetRangeGotoTable(SmallCaseVector rt) { + rangegotoTable = rt; + } + + void AddRangeGoto(uint32 tag, LabelIdx idx) { + rangegotoTable.push_back(SmallCasePair(tag, idx)); + } + + int32 GetTagOffset() const { + return tagOffset; + } + + void SetTagOffset(int32 offset) { + tagOffset = offset; + } + + private: + int32 tagOffset = 0; + // add each tag to tagOffset field to get the actual tag values + SmallCaseVector rangegotoTable; +}; + +class BlockNode : public StmtNode { + public: + using StmtNodes = PtrListRef; + + BlockNode() : StmtNode(OP_block) {} + + ~BlockNode() override { + stmtNodeList.clear(); + } + + void AddStatement(StmtNode *stmt); + void AppendStatementsFromBlock(BlockNode &blk); + void InsertFirst(StmtNode *stmt); // Insert stmt as the first + void InsertLast(StmtNode *stmt); // Insert stmt as the last + void ReplaceStmtWithBlock(StmtNode &stmtNode, BlockNode &blk); + void ReplaceStmt1WithStmt2(const StmtNode *stmtNode1, StmtNode *stmtNode2); + void RemoveStmt(const StmtNode *stmtNode1); + void InsertBefore(const StmtNode *stmtNode1, StmtNode *stmtNode2); // Insert ss2 before ss1 in current block. + void InsertAfter(const StmtNode *stmtNode1, StmtNode *stmtNode2); // Insert ss2 after ss1 in current block. + // insert all the stmts in inblock to the current block after stmt1 + void InsertBlockAfter(BlockNode &inblock, const StmtNode *stmt1); + void Dump(int32 indent, const MIRSymbolTable *theSymTab, MIRPregTable *thePregTab, bool withInfo, bool isFuncbody, + MIRFlavor flavor) const; + bool Verify() const override; + bool Verify(VerifyResult &verifyResult) const override; + + void Dump(int32 indent) const override { + Dump(indent, nullptr, nullptr, false, false, kFlavorUnknown); + } + + BlockNode *CloneTree(MapleAllocator &allocator) const override; + + BlockNode *CloneTreeWithSrcPosition(const MIRModule &mod, const GStrIdx &idx = GStrIdx(), bool setInlinedPos = false, + const SrcPosition &inlinedPosition = SrcPosition()); + + BlockNode *CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map &toFreqs, + std::unordered_map &fromFreqs, uint64_t numer, uint64_t denom, + uint32_t updateOp); + + bool IsEmpty() const { + return stmtNodeList.empty(); + } + + void ResetBlock() { + stmtNodeList.clear(); + } + + StmtNode *GetFirst() { + return &(stmtNodeList.front()); + } + + const StmtNode *GetFirst() const { + return &(stmtNodeList.front()); + } + + void SetFirst(StmtNode *node) { + stmtNodeList.update_front(node); + } + + StmtNode *GetLast() { + return &(stmtNodeList.back()); + } + + const StmtNode *GetLast() const { + return &(stmtNodeList.back()); + } + + void SetLast(StmtNode *node) { + stmtNodeList.update_back(node); + } + + StmtNodes &GetStmtNodes() { + return stmtNodeList; + } + + const StmtNodes &GetStmtNodes() const { + return stmtNodeList; + } + + private: + StmtNodes stmtNodeList; +}; + +struct CallBackData { + public: + virtual ~CallBackData() {} + virtual void Free() {} +}; + +using CallBack = void (*)(const BlockNode &oldBlock, BlockNode &newBlock, + const StmtNode &oldStmt, StmtNode &newStmt, CallBackData *data); + +class BlockCallBack { + public: + BlockCallBack(CallBack hook, CallBackData *callBackData) : callBack(hook), data(callBackData) {} + + ~BlockCallBack() { + if (data != nullptr) { + data->Free(); + } + } + + void Invoke(const BlockNode &oldBlock, BlockNode &newBlock, + const StmtNode &oldStmt, StmtNode &newStmt) { + if (callBack != nullptr) { + callBack(oldBlock, newBlock, oldStmt, newStmt, data); + } + } + + CallBack GetCallBack() const { + return callBack; + } + + private: + CallBack callBack; + CallBackData *data = nullptr; +}; + +class BlockCallBackMgr { + public: + static void AddCallBack(CallBack hook, CallBackData *data) { + auto *node = new BlockCallBack(hook, data); + callBackList.push_back(node); + } + + static void RemoveCallBack(CallBack hook) { + for (auto it = callBackList.begin(); it != callBackList.end(); ++it) { + auto *node = *it; + if (node->GetCallBack() == hook) { + delete node; + callBackList.erase(it); + return; + } + } + } + + static void ClearCallBacks() { + for (auto *node : callBackList) { + delete node; + } + callBackList.clear(); + } + + static void InvokeCallBacks(const BlockNode &oldBlock, BlockNode &newBlock, + const StmtNode &oldStmt, StmtNode &newStmt) { + for (auto *node : callBackList) { + node->Invoke(oldBlock, newBlock, oldStmt, newStmt); + } + } + + private: + static std::list callBackList; +}; + +class IfStmtNode : public UnaryStmtNode { + public: + IfStmtNode() : UnaryStmtNode(OP_if) { + numOpnds = kOperandNumTernary; + } + + virtual ~IfStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IfStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + node->thenPart = thenPart->CloneTree(allocator); + if (elsePart != nullptr) { + node->elsePart = elsePart->CloneTree(allocator); + } + node->SetMeStmtID(GetMeStmtID()); + return node; + } + + IfStmtNode *CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map &toFreqs, + std::unordered_map &fromFreqs, uint64_t numer, uint64_t denom, + uint32_t updateOp) { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + if (fromFreqs.count(GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[GetStmtID()]; + uint64_t newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + toFreqs[node->GetStmtID()] = (newFreq > 0 || numer == 0) ? newFreq : 1; + if (updateOp & kUpdateOrigFreq) { + uint64_t left = ((oldFreq - newFreq) > 0 || oldFreq == 0) ? (oldFreq - newFreq) : 1; + fromFreqs[GetStmtID()] = left; + } + } + node->thenPart = thenPart->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + if (elsePart != nullptr) { + node->elsePart = elsePart->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + } + node->SetMeStmtID(GetMeStmtID()); + return node; + } + + BaseNode *Opnd(size_t i = 0) const override { + if (i == 0) { + return UnaryStmtNode::Opnd(0); + } else if (i == 1) { + return thenPart; + } else if (i == 2) { + ASSERT(elsePart != nullptr, "IfStmtNode has wrong numOpnds field, the elsePart is nullptr"); + ASSERT(numOpnds == kOperandNumTernary, "IfStmtNode has wrong numOpnds field, the elsePart is nullptr"); + return elsePart; + } + ASSERT(false, "IfStmtNode has wrong numOpnds field: %u", NumOpnds()); + return nullptr; + } + + BlockNode *GetThenPart() const { + return thenPart; + } + + void SetThenPart(BlockNode *node) { + thenPart = node; + } + + BlockNode *GetElsePart() const { + return elsePart; + } + + void SetElsePart(BlockNode *node) { + elsePart = node; + } + + size_t NumOpnds() const override { + if (elsePart == nullptr) { + return kOperandNumBinary; + } + return kOperandNumTernary; + } + + private: + BlockNode *thenPart = nullptr; + BlockNode *elsePart = nullptr; +}; + +// for both while and dowhile +class WhileStmtNode : public UnaryStmtNode { + public: + explicit WhileStmtNode(Opcode o) : UnaryStmtNode(o) { + BaseNodeT::SetNumOpnds(kOperandNumBinary); + } + + virtual ~WhileStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + WhileStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + node->body = body->CloneTree(allocator); + return node; + } + + WhileStmtNode *CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map &toFreqs, + std::unordered_map &fromFreqs, uint64_t numer, uint64_t denom, + uint32_t updateOp) { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + if (fromFreqs.count(GetStmtID()) > 0) { + int64_t oldFreq = fromFreqs[GetStmtID()]; + int64_t newFreq = + numer == 0 ? 0 : (denom > 0 ? static_cast(static_cast(oldFreq) * numer / denom) : oldFreq); + toFreqs[node->GetStmtID()] = (newFreq > 0 || numer == 0) ? static_cast(newFreq) : 1; + if (updateOp & kUpdateOrigFreq) { + int64_t left = (oldFreq - newFreq) > 0 ? (oldFreq - newFreq) : 1; + fromFreqs[GetStmtID()] = static_cast(left); + } + } + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + node->body = body->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp); + return node; + } + + void SetBody(BlockNode *node) { + body = node; + } + + BlockNode *GetBody() const { + return body; + } + + BaseNode *Opnd(size_t i = 0) const override { + if (i == 0) { + return UnaryStmtNode::Opnd(); + } else if (i == 1) { + return body; + } + ASSERT(false, "WhileStmtNode has wrong numOpnds field: %u", NumOpnds()); + return nullptr; + } + + private: + BlockNode *body = nullptr; +}; + +class DoloopNode : public StmtNode { + public: + DoloopNode() : DoloopNode(StIdx(), false, nullptr, nullptr, nullptr, nullptr) {} + + DoloopNode(StIdx doVarStIdx, bool isPReg, BaseNode *startExp, BaseNode *contExp, BaseNode *incrExp, BlockNode *doBody) + : StmtNode(OP_doloop, kOperandNumDoloop), + doVarStIdx(doVarStIdx), + isPreg(isPReg), + startExpr(startExp), + condExpr(contExp), + incrExpr(incrExp), + doBody(doBody) {} + + virtual ~DoloopNode() = default; + + void DumpDoVar(const MIRModule &mod) const; + void Dump(int32 indent) const override; + bool Verify() const override; + + DoloopNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetStartExpr(startExpr->CloneTree(allocator)); + node->SetContExpr(GetCondExpr()->CloneTree(allocator)); + node->SetIncrExpr(GetIncrExpr()->CloneTree(allocator)); + node->SetDoBody(GetDoBody()->CloneTree(allocator)); + return node; + } + + DoloopNode *CloneTreeWithFreqs(MapleAllocator &allocator, std::unordered_map &toFreqs, + std::unordered_map &fromFreqs, uint64_t numer, uint64_t denom, + uint32_t updateOp) { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + if (fromFreqs.count(GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[GetStmtID()]; + uint64_t newFreq = oldFreq; + if ((updateOp & kUpdateFreqbyScale) != 0) { // used in inline/clone + newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + } else if ((updateOp & kUpdateUnrolledFreq) != 0) { // used in unrolled part + uint64_t bodyFreq = fromFreqs[GetDoBody()->GetStmtID()]; + newFreq = denom > 0 ? (bodyFreq * numer / denom + (oldFreq - bodyFreq)) : oldFreq; + } else if ((updateOp & kUpdateUnrollRemainderFreq) != 0) { // used in unrolled remainder + uint64_t bodyFreq = fromFreqs[GetDoBody()->GetStmtID()]; + newFreq = denom > 0 ? (((bodyFreq * numer) % denom) + (oldFreq - bodyFreq)) : oldFreq; + } + toFreqs[node->GetStmtID()] = static_cast(newFreq); + ASSERT(oldFreq >= newFreq, "sanity check"); + if ((updateOp & kUpdateOrigFreq) != 0) { + uint64_t left = oldFreq - newFreq; + fromFreqs[GetStmtID()] = left; + } + } + node->SetStartExpr(startExpr->CloneTree(allocator)); + node->SetContExpr(GetCondExpr()->CloneTree(allocator)); + node->SetIncrExpr(GetIncrExpr()->CloneTree(allocator)); + node->SetDoBody(GetDoBody()->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + return node; + } + + void SetDoVarStIdx(StIdx idx) { + doVarStIdx = idx; + } + + PregIdx GetDoVarPregIdx() const { + return static_cast(doVarStIdx.FullIdx()); + } + + const StIdx &GetDoVarStIdx() const { + return doVarStIdx; + } + + void SetDoVarStFullIdx(uint32 idx) { + doVarStIdx.SetFullIdx(idx); + } + + void SetIsPreg(bool isPregVal) { + isPreg = isPregVal; + } + + bool IsPreg() const { + return isPreg; + } + + void SetStartExpr(BaseNode *node) { + startExpr = node; + } + + BaseNode *GetStartExpr() const { + return startExpr; + } + + void SetContExpr(BaseNode *node) { + condExpr = node; + } + + BaseNode *GetCondExpr() const { + return condExpr; + } + + void SetIncrExpr(BaseNode *node) { + incrExpr = node; + } + + BaseNode *GetIncrExpr() const { + return incrExpr; + } + + void SetDoBody(BlockNode *node) { + doBody = node; + } + + BlockNode *GetDoBody() const { + return doBody; + } + + BaseNode *Opnd(size_t i) const override { + if (i == 0) { + return startExpr; + } + if (i == 1) { + return condExpr; + } + if (i == 2) { + return incrExpr; + } + return *(&doBody + i - 3); + } + + size_t NumOpnds() const override { + return kOperandNumDoloop; + } + + void SetOpnd(BaseNode *node, size_t i) override { + if (i == 0) { + startExpr = node; + } + if (i == 1) { + SetContExpr(node); + } + if (i == 2) { + incrExpr = node; + } else { + *(&doBody + i - 3) = static_cast(node); + } + } + + private: + static constexpr int kOperandNumDoloop = 4; + StIdx doVarStIdx; // must be local; cast to PregIdx for preg + bool isPreg; + BaseNode *startExpr; + BaseNode *condExpr; + BaseNode *incrExpr; + BlockNode *doBody; +}; + +class ForeachelemNode : public StmtNode { + public: + ForeachelemNode() : StmtNode(OP_foreachelem) { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + virtual ~ForeachelemNode() = default; + + const StIdx &GetElemStIdx() const { + return elemStIdx; + } + + void SetElemStIdx(StIdx elemStIdxValue) { + elemStIdx = elemStIdxValue; + } + + const StIdx &GetArrayStIdx() const { + return arrayStIdx; + } + + void SetArrayStIdx(StIdx arrayStIdxValue) { + arrayStIdx = arrayStIdxValue; + } + + BlockNode *GetLoopBody() const { + return loopBody; + } + + void SetLoopBody(BlockNode *loopBodyValue) { + loopBody = loopBodyValue; + } + + BaseNode *Opnd(size_t) const override { + return loopBody; + } + + size_t NumOpnds() const override { + return numOpnds; + } + + void Dump(int32 indent) const override; + + ForeachelemNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetLoopBody(loopBody->CloneTree(allocator)); + return node; + } + + private: + StIdx elemStIdx; // must be local symbol + StIdx arrayStIdx; // symbol table entry of the array/collection variable + BlockNode *loopBody = nullptr; +}; + +// used by assertge, assertlt +class BinaryStmtNode : public StmtNode, public BinaryOpnds { + public: + explicit BinaryStmtNode(Opcode o) : StmtNode(o, kOperandNumBinary) {} + + virtual ~BinaryStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + BinaryStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + BaseNode *Opnd(size_t i) const override { + ASSERT(i < kOperandNumBinary, "Invalid operand idx in BinaryStmtNode"); + ASSERT(i >= 0, "Invalid operand idx in BinaryStmtNode"); + return GetBOpnd(i); + } + + size_t NumOpnds() const override { + return kOperandNumBinary; + } + + void SetOpnd(BaseNode *node, size_t i) override { + SetBOpnd(node, i); + } + + bool IsLeaf() const override { + return false; + } +}; + +class IassignoffNode : public BinaryStmtNode { + public: + IassignoffNode() : BinaryStmtNode(OP_iassignoff) {} + + explicit IassignoffNode(int32 ofst) : BinaryStmtNode(OP_iassignoff), offset(ofst) {} + + IassignoffNode(PrimType primType, int32 offset, BaseNode *addrOpnd, BaseNode *srcOpnd) : IassignoffNode(offset) { + BaseNodeT::SetPrimType(primType); + SetBOpnd(addrOpnd, 0); + SetBOpnd(srcOpnd, 1); + } + + virtual ~IassignoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IassignoffNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + int32 GetOffset() const { + return offset; + } + + void SetOffset(int32 newOffset) { + offset = newOffset; + } + + private: + int32 offset = 0; +}; + +// for iassignfpoff, iassignspoff, iassignpcoff +class IassignFPoffNode : public UnaryStmtNode { + public: + explicit IassignFPoffNode(Opcode o) : UnaryStmtNode(o) {} + + explicit IassignFPoffNode(Opcode o, int32 ofst) : UnaryStmtNode(o), offset(ofst) {} + + IassignFPoffNode(Opcode o, PrimType primType, int32 offset, BaseNode *src) : IassignFPoffNode(o, offset) { + BaseNodeT::SetPrimType(primType); + UnaryStmtNode::SetOpnd(src, 0); + } + + virtual ~IassignFPoffNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + IassignFPoffNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd(0)->CloneTree(allocator), 0); + return node; + } + + void SetOffset(int32 ofst) { + offset = ofst; + } + + int32 GetOffset() const { + return offset; + } + + private: + int32 offset = 0; +}; + +using IassignPCoffNode = IassignFPoffNode; + +class BlkassignoffNode : public BinaryStmtNode { + public: + BlkassignoffNode() : BinaryStmtNode(OP_blkassignoff) { + ptyp = PTY_agg; + ptyp = PTY_agg; + alignLog2 = 0; + offset = 0; + } + explicit BlkassignoffNode(int32 ofst, int32 bsize) : BinaryStmtNode(OP_blkassignoff), offset(ofst), blockSize(bsize) { + ptyp = PTY_agg; + alignLog2 = 0; + } + explicit BlkassignoffNode(int32 ofst, int32 bsize, BaseNode *dest, BaseNode *src) + : BinaryStmtNode(OP_blkassignoff), offset(ofst), blockSize(bsize) { + ptyp = PTY_agg; + alignLog2 = 0; + SetBOpnd(dest, 0); + SetBOpnd(src, 1); + } + ~BlkassignoffNode() = default; + + void Dump(int32 indent) const override; + + BlkassignoffNode *CloneTree(MapleAllocator &allocator) const override { + BlkassignoffNode *node = allocator.GetMemPool()->New(offset, blockSize); + node->SetStmtID(stmtIDNext++); + node->SetBOpnd(GetBOpnd(0)->CloneTree(allocator), 0); + node->SetBOpnd(GetBOpnd(1)->CloneTree(allocator), 1); + return node; + } + + uint32 GetAlign() const { + uint32 res = 1; + for (uint32 i = 0; i < alignLog2; i++) { + res *= 2; + } + return res; + } + + void SetAlign(uint32 x) { + if (x == 0) { + alignLog2 = 0; + return; + } + ASSERT((~(x - 1) & x) == x, "SetAlign called with non power of 2"); + uint32 res = 0; + while (x != 1) { + x >>= 1; + ++res; + } + alignLog2 = res; + } + + uint32 alignLog2 : 4; + int32 offset : 28; + int32 blockSize = 0; +}; + +// used by return, syncenter, syncexit +class NaryStmtNode : public StmtNode, public NaryOpnds { + public: + NaryStmtNode(MapleAllocator &allocator, Opcode o) : StmtNode(o), NaryOpnds(allocator) {} + + NaryStmtNode(const MIRModule &mod, Opcode o) : NaryStmtNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + NaryStmtNode(MapleAllocator &allocator, const NaryStmtNode &node) + // do not use stmt copy constructor + : StmtNode(node.GetOpCode(), node.GetPrimType(), node.numOpnds, node.GetSrcPos(), node.GetOriginalID(), + node.GetStmtAttrs()), + NaryOpnds(allocator) {} + + NaryStmtNode(const MIRModule &mod, const NaryStmtNode &node) : NaryStmtNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + explicit NaryStmtNode(const NaryStmtNode &node) = delete; + NaryStmtNode &operator=(const NaryStmtNode &node) = delete; + virtual ~NaryStmtNode() = default; + + void Dump(int32 indent) const override; + bool Verify() const override; + + NaryStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + BaseNode *Opnd(size_t i) const override { + return GetNopndAt(i); + } + + void SetOpnd(BaseNode *node, size_t i) override { + ASSERT(i < GetNopnd().size(), "array index out of range"); + SetNOpndAt(i, node); + } + + size_t NumOpnds() const override { + ASSERT(numOpnds == GetNopndSize(), "NaryStmtNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void SetOpnds(const MapleVector &arguments) { + SetNOpnd(arguments); + SetNumOpnds(arguments.size()); + } + + void PushOpnd(BaseNode *node) { + if (node != nullptr) { + GetNopnd().push_back(node); + } + SetNumOpnds(GetNopndSize()); + } + + void InsertOpnd(BaseNode *node, size_t idx) { + if (node == nullptr || idx > GetNopndSize()) { + return; + } + auto begin = GetNopnd().begin(); + for (size_t i = 0; i < idx; ++i) { + ++begin; + } + (void)GetNopnd().insert(begin, node); + SetNumOpnds(GetNopndSize()); + } +}; + +class SafetyCheckStmtNode { + public: + explicit SafetyCheckStmtNode(GStrIdx funcNameIdx) : funcNameIdx(funcNameIdx) {} + SafetyCheckStmtNode(const SafetyCheckStmtNode &stmtNode) : funcNameIdx(stmtNode.GetFuncNameIdx()) {} + SafetyCheckStmtNode &operator=(const SafetyCheckStmtNode &stmtNode) { + funcNameIdx = stmtNode.GetFuncNameIdx(); + return *this; + } + + virtual ~SafetyCheckStmtNode() = default; + + std::string GetFuncName() const; + + GStrIdx GetFuncNameIdx() const { + return funcNameIdx; + } + + void Dump() const { + LogInfo::MapleLogger() << " <&" << GetFuncName() << ">"; + } + + private: + GStrIdx funcNameIdx; +}; + +// used by callassertnonnull, callassertle +class SafetyCallCheckStmtNode { + public: + SafetyCallCheckStmtNode(GStrIdx callFuncNameIdx, size_t paramIndex, GStrIdx stmtFuncNameIdx) + : callFuncNameIdx(callFuncNameIdx), paramIndex(paramIndex), stmtFuncNameIdx(stmtFuncNameIdx) {} + explicit SafetyCallCheckStmtNode(const SafetyCallCheckStmtNode &stmtNode) + : callFuncNameIdx(stmtNode.GetFuncNameIdx()), + paramIndex(stmtNode.GetParamIndex()), + stmtFuncNameIdx(stmtNode.GetStmtFuncNameIdx()) {} + + virtual ~SafetyCallCheckStmtNode() = default; + + std::string GetFuncName() const; + GStrIdx GetFuncNameIdx() const { + return callFuncNameIdx; + } + std::string GetStmtFuncName() const; + size_t GetParamIndex() const { + return paramIndex; + } + + GStrIdx GetStmtFuncNameIdx() const { + return stmtFuncNameIdx; + } + + void Dump() const { + LogInfo::MapleLogger() << " <&" << GetFuncName() << ", " << paramIndex << ", &" << GetStmtFuncName() << ">"; + } + + private: + GStrIdx callFuncNameIdx; + size_t paramIndex; + GStrIdx stmtFuncNameIdx; +}; + +// used by callassertnonnull +class CallAssertNonnullStmtNode : public UnaryStmtNode, public SafetyCallCheckStmtNode { + public: + CallAssertNonnullStmtNode(Opcode o, GStrIdx callFuncNameIdx, size_t paramIndex, GStrIdx stmtFuncNameIdx) + : UnaryStmtNode(o), SafetyCallCheckStmtNode(callFuncNameIdx, paramIndex, stmtFuncNameIdx) {} + ~CallAssertNonnullStmtNode() override {} + + void Dump(int32 indent) const override; + + CallAssertNonnullStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + return node; + } +}; + +// used by assertnonnull +class AssertNonnullStmtNode : public UnaryStmtNode, public SafetyCheckStmtNode { + public: + AssertNonnullStmtNode(Opcode o, GStrIdx funcNameIdx) : UnaryStmtNode(o), SafetyCheckStmtNode(funcNameIdx) {} + ~AssertNonnullStmtNode() override {} + + void Dump(int32 indent) const override; + + AssertNonnullStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(*this); + node->SetStmtID(stmtIDNext++); + node->SetOpnd(Opnd()->CloneTree(allocator), 0); + return node; + } +}; + +// used by assertle +class AssertBoundaryStmtNode : public NaryStmtNode, public SafetyCheckStmtNode { + public: + AssertBoundaryStmtNode(MapleAllocator &allocator, Opcode o, GStrIdx funcNameIdx) + : NaryStmtNode(allocator, o), SafetyCheckStmtNode(funcNameIdx) {} + ~AssertBoundaryStmtNode() override {} + + AssertBoundaryStmtNode(MapleAllocator &allocator, const AssertBoundaryStmtNode &stmtNode) + : NaryStmtNode(allocator, stmtNode), SafetyCheckStmtNode(stmtNode) {} + + AssertBoundaryStmtNode(const MIRModule &mod, Opcode o, GStrIdx funcNameIdx) + : AssertBoundaryStmtNode(mod.GetCurFuncCodeMPAllocator(), o, funcNameIdx) {} + + void Dump(int32 indent) const override; + + AssertBoundaryStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } +}; + +// used by callassertle +class CallAssertBoundaryStmtNode : public NaryStmtNode, public SafetyCallCheckStmtNode { + public: + CallAssertBoundaryStmtNode(MapleAllocator &allocator, Opcode o, GStrIdx funcNameIdx, size_t paramIndex, + GStrIdx stmtFuncNameIdx) + : NaryStmtNode(allocator, o), SafetyCallCheckStmtNode(funcNameIdx, paramIndex, stmtFuncNameIdx) {} + ~CallAssertBoundaryStmtNode() override {} + + CallAssertBoundaryStmtNode(MapleAllocator &allocator, const CallAssertBoundaryStmtNode &stmtNode) + : NaryStmtNode(allocator, stmtNode), SafetyCallCheckStmtNode(stmtNode) {} + + CallAssertBoundaryStmtNode(const MIRModule &mod, Opcode o, GStrIdx funcNameIdx, size_t paramIndex, + GStrIdx stmtFuncNameIdx) + : CallAssertBoundaryStmtNode(mod.GetCurFuncCodeMPAllocator(), o, funcNameIdx, paramIndex, stmtFuncNameIdx) {} + + void Dump(int32 indent) const override; + + CallAssertBoundaryStmtNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + node->SetStmtID(stmtIDNext++); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } +}; + +// used by call, virtualcall, virtualicall, superclasscall, interfacecall, +// interfaceicall, customcall +// polymorphiccall +// callassigned, virtualcallassigned, virtualicallassigned, +// superclasscallassigned, interfacecallassigned, interfaceicallassigned, +// customcallassigned +// polymorphiccallassigned +class CallNode : public NaryStmtNode { + public: + CallNode(MapleAllocator &allocator, Opcode o) : NaryStmtNode(allocator, o), returnValues(allocator.Adapter()) {} + + CallNode(MapleAllocator &allocator, Opcode o, PUIdx idx) : CallNode(allocator, o, idx, TyIdx()) {} + + CallNode(MapleAllocator &allocator, Opcode o, PUIdx idx, TyIdx tdx) + : NaryStmtNode(allocator, o), puIdx(idx), tyIdx(tdx), returnValues(allocator.Adapter()) {} + + CallNode(const MIRModule &mod, Opcode o) : CallNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + CallNode(const MIRModule &mod, Opcode o, PUIdx idx, TyIdx tdx) + : CallNode(mod.GetCurFuncCodeMPAllocator(), o, idx, tdx) {} + + CallNode(MapleAllocator &allocator, const CallNode &node) + : NaryStmtNode(allocator, node), puIdx(node.GetPUIdx()), tyIdx(node.tyIdx), returnValues(allocator.Adapter()) {} + + CallNode(const MIRModule &mod, const CallNode &node) : CallNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + CallNode(CallNode &node) = delete; + CallNode &operator=(const CallNode &node) = delete; + virtual ~CallNode() = default; + virtual void Dump(int32 indent, bool newline) const; + bool Verify() const override; + MIRType *GetCallReturnType() override; + const MIRSymbol *GetCallReturnSymbol(const MIRModule &mod) const; + + CallNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < returnValues.size(); ++i) { + node->GetReturnVec().push_back(returnValues[i]); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + PUIdx GetPUIdx() const { + return puIdx; + } + + void SetPUIdx(const PUIdx idx) { + puIdx = idx; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + CallReturnVector &GetReturnVec() { + return returnValues; + } + + CallReturnPair GetReturnPair(size_t idx) const { + ASSERT(idx < returnValues.size(), "out of range in CallNode::GetReturnPair"); + return returnValues.at(idx); + } + + void SetReturnPair(CallReturnPair retVal, size_t idx) { + ASSERT(idx < returnValues.size(), "out of range in CallNode::GetReturnPair"); + returnValues.at(idx) = retVal; + } + + const CallReturnVector &GetReturnVec() const { + return returnValues; + } + + CallReturnPair GetNthReturnVec(size_t i) const { + ASSERT(i < returnValues.size(), "array index out of range"); + return returnValues[i]; + } + + void SetReturnVec(const CallReturnVector &vec) { + returnValues = vec; + } + + size_t NumOpnds() const override { + ASSERT(numOpnds == GetNopndSize(), "CallNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void Dump(int32 indent) const override { + Dump(indent, true); + } + + CallReturnVector *GetCallReturnVector() override { + return &returnValues; + } + + void SetCallReturnVector(const CallReturnVector &value) { + returnValues = value; + } + + void SetEnclosingBlock(BlockNode *value) { + enclosingBlk = value; + } + + BlockNode *GetEnclosingBlock() { + return enclosingBlk; + } + + private: + PUIdx puIdx = 0; + TyIdx tyIdx = TyIdx(0); + CallReturnVector returnValues; + BlockNode *enclosingBlk = nullptr; +}; + +// icall, icallassigned, icallproto and icallprotoassigned +class IcallNode : public NaryStmtNode { + public: + IcallNode(MapleAllocator &allocator, Opcode o) + : NaryStmtNode(allocator, o), retTyIdx(0), returnValues(allocator.Adapter()) { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + IcallNode(MapleAllocator &allocator, Opcode o, TyIdx idx) + : NaryStmtNode(allocator, o), retTyIdx(idx), returnValues(allocator.Adapter()) { + BaseNodeT::SetNumOpnds(kOperandNumUnary); + } + + IcallNode(const MIRModule &mod, Opcode o) : IcallNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + IcallNode(const MIRModule &mod, Opcode o, TyIdx idx) : IcallNode(mod.GetCurFuncCodeMPAllocator(), o, idx) {} + + IcallNode(MapleAllocator &allocator, const IcallNode &node) + : NaryStmtNode(allocator, node), retTyIdx(node.retTyIdx), returnValues(allocator.Adapter()) {} + + IcallNode(const MIRModule &mod, const IcallNode &node) : IcallNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + IcallNode(IcallNode &node) = delete; + IcallNode &operator=(const IcallNode &node) = delete; + virtual ~IcallNode() = default; + + virtual void Dump(int32 indent, bool newline) const; + bool Verify() const override; + MIRType *GetCallReturnType() override; + const MIRSymbol *GetCallReturnSymbol(const MIRModule &mod) const; + IcallNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < returnValues.size(); ++i) { + node->returnValues.push_back(returnValues[i]); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + TyIdx GetRetTyIdx() const { + return retTyIdx; + } + + void SetRetTyIdx(TyIdx idx) { + retTyIdx = idx; + } + + const CallReturnVector &GetReturnVec() const { + return returnValues; + } + + CallReturnVector &GetReturnVec() { + return returnValues; + } + + void SetReturnVec(const CallReturnVector &vec) { + returnValues = vec; + } + + size_t NumOpnds() const override { + ASSERT(numOpnds == GetNopndSize(), "IcallNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void Dump(int32 indent) const override { + Dump(indent, true); + } + + CallReturnVector *GetCallReturnVector() override { + return &returnValues; + } + + private: + TyIdx retTyIdx; + // for icall: return type for callee; + // for icallproto: return ptr for callee function type + // the 0th operand is the function pointer + CallReturnVector returnValues; +}; + +// used by intrinsiccall and xintrinsiccall +class IntrinsiccallNode : public NaryStmtNode { + public: + IntrinsiccallNode(MapleAllocator &allocator, Opcode o) + : NaryStmtNode(allocator, o), intrinsic(INTRN_UNDEFINED), tyIdx(0), returnValues(allocator.Adapter()) {} + + IntrinsiccallNode(MapleAllocator &allocator, Opcode o, MIRIntrinsicID id) + : NaryStmtNode(allocator, o), intrinsic(id), tyIdx(0), returnValues(allocator.Adapter()) {} + + IntrinsiccallNode(const MIRModule &mod, Opcode o) : IntrinsiccallNode(mod.GetCurFuncCodeMPAllocator(), o) {} + + IntrinsiccallNode(const MIRModule &mod, Opcode o, MIRIntrinsicID id) + : IntrinsiccallNode(mod.GetCurFuncCodeMPAllocator(), o, id) {} + + IntrinsiccallNode(MapleAllocator &allocator, const IntrinsiccallNode &node) + : NaryStmtNode(allocator, node), + intrinsic(node.GetIntrinsic()), + tyIdx(node.tyIdx), + returnValues(allocator.Adapter()) {} + + IntrinsiccallNode(const MIRModule &mod, const IntrinsiccallNode &node) + : IntrinsiccallNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + IntrinsiccallNode(IntrinsiccallNode &node) = delete; + IntrinsiccallNode &operator=(const IntrinsiccallNode &node) = delete; + virtual ~IntrinsiccallNode() = default; + + virtual void Dump(int32 indent, bool newline) const; + bool Verify() const override; + MIRType *GetCallReturnType() override; + + IntrinsiccallNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < returnValues.size(); ++i) { + node->GetReturnVec().push_back(returnValues[i]); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + MIRIntrinsicID GetIntrinsic() const { + return intrinsic; + } + + void SetIntrinsic(MIRIntrinsicID id) { + intrinsic = id; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + CallReturnVector &GetReturnVec() { + return returnValues; + } + + const CallReturnVector &GetReturnVec() const { + return returnValues; + } + + void SetReturnVec(const CallReturnVector &vec) { + returnValues = vec; + } + + size_t NumOpnds() const override { + ASSERT(numOpnds == GetNopndSize(), "IntrinsiccallNode has wrong numOpnds field"); + return GetNopndSize(); + } + + void Dump(int32 indent) const override { + Dump(indent, true); + } + + CallReturnVector *GetCallReturnVector() override { + return &returnValues; + } + + CallReturnPair &GetCallReturnPair(uint32 i) { + ASSERT(i < returnValues.size(), "array index out of range"); + return returnValues.at(i); + } + + private: + MIRIntrinsicID intrinsic; + TyIdx tyIdx; + CallReturnVector returnValues; +}; + +// used by callinstant, virtualcallinstant, superclasscallinstant and +// interfacecallinstant, callinstantassigned, virtualcallinstantassigned, +// superclasscallinstantassigned and interfacecallinstantassigned +class CallinstantNode : public CallNode { + public: + CallinstantNode(MapleAllocator &allocator, Opcode o, TyIdx tIdx) : CallNode(allocator, o), instVecTyIdx(tIdx) {} + + CallinstantNode(const MIRModule &mod, Opcode o, TyIdx tIdx) + : CallinstantNode(mod.GetCurFuncCodeMPAllocator(), o, tIdx) {} + + CallinstantNode(MapleAllocator &allocator, const CallinstantNode &node) + : CallNode(allocator, node), instVecTyIdx(node.instVecTyIdx) {} + + CallinstantNode(const MIRModule &mod, const CallinstantNode &node) + : CallinstantNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + CallinstantNode(CallinstantNode &node) = delete; + CallinstantNode &operator=(const CallinstantNode &node) = delete; + virtual ~CallinstantNode() = default; + + void Dump(int32 indent, bool newline) const override; + void Dump(int32 indent) const override { + Dump(indent, true); + } + + CallinstantNode *CloneTree(MapleAllocator &allocator) const override { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < GetReturnVec().size(); ++i) { + node->GetReturnVec().push_back(GetNthReturnVec(i)); + } + node->SetNumOpnds(GetNopndSize()); + return node; + } + + CallReturnVector *GetCallReturnVector() override { + return &GetReturnVec(); + } + + private: + TyIdx instVecTyIdx; +}; + +class LabelNode : public StmtNode { + public: + LabelNode() : StmtNode(OP_label) {} + + explicit LabelNode(LabelIdx idx) : StmtNode(OP_label), labelIdx(idx) {} + + virtual ~LabelNode() = default; + + void Dump(int32 indent) const override; + + LabelNode *CloneTree(MapleAllocator &allocator) const override { + auto *l = allocator.GetMemPool()->New(*this); + l->SetStmtID(stmtIDNext++); + return l; + } + + LabelIdx GetLabelIdx() const { + return labelIdx; + } + + void SetLabelIdx(LabelIdx idx) { + labelIdx = idx; + } + + private: + LabelIdx labelIdx = 0; +}; + +class CommentNode : public StmtNode { + public: + explicit CommentNode(const MapleAllocator &allocator) : StmtNode(OP_comment), comment(allocator.GetMemPool()) {} + + explicit CommentNode(const MIRModule &mod) : CommentNode(mod.GetCurFuncCodeMPAllocator()) {} + + CommentNode(const MapleAllocator &allocator, const std::string &cmt) + : StmtNode(OP_comment), comment(cmt, allocator.GetMemPool()) {} + + CommentNode(const MIRModule &mod, const std::string &cmt) : CommentNode(mod.GetCurFuncCodeMPAllocator(), cmt) {} + + CommentNode(const MapleAllocator &allocator, const CommentNode &node) + : StmtNode(node.GetOpCode(), node.GetPrimType()), comment(node.comment, allocator.GetMemPool()) {} + + CommentNode(const MIRModule &mod, const CommentNode &node) : CommentNode(mod.GetCurFuncCodeMPAllocator(), node) {} + + CommentNode(CommentNode &node) = delete; + CommentNode &operator=(const CommentNode &node) = delete; + virtual ~CommentNode() = default; + + void Dump(int32 indent) const override; + + CommentNode *CloneTree(MapleAllocator &allocator) const override { + auto *c = allocator.GetMemPool()->New(allocator, *this); + return c; + } + + const MapleString &GetComment() const { + return comment; + } + + void SetComment(MapleString com) { + comment = com; + } + + void SetComment(const std::string &str) { + comment = str; + } + + void SetComment(const char *str) { + comment = str; + } + + void Append(const std::string &str) { + comment.append(str); + } + + private: + MapleString comment; +}; + +enum AsmQualifierKind : unsigned { // they are alreadgy Maple IR keywords + kASMvolatile, + kASMinline, + kASMgoto, +}; + +class AsmNode : public NaryStmtNode { + public: + explicit AsmNode(MapleAllocator *alloc) + : NaryStmtNode(*alloc, OP_asm), + asmString(alloc->GetMemPool()), + inputConstraints(alloc->Adapter()), + asmOutputs(alloc->Adapter()), + outputConstraints(alloc->Adapter()), + clobberList(alloc->Adapter()), + gotoLabels(alloc->Adapter()), + qualifiers(0) {} + + AsmNode(MapleAllocator &allocator, const AsmNode &node) + : NaryStmtNode(allocator, node), + asmString(node.asmString, allocator.GetMemPool()), + inputConstraints(allocator.Adapter()), + asmOutputs(allocator.Adapter()), + outputConstraints(allocator.Adapter()), + clobberList(allocator.Adapter()), + gotoLabels(allocator.Adapter()), + qualifiers(node.qualifiers) {} + + virtual ~AsmNode() = default; + + AsmNode *CloneTree(MapleAllocator &allocator) const override; + + void SetQualifier(AsmQualifierKind x) { + qualifiers |= (1U << static_cast(x)); + } + + bool GetQualifier(AsmQualifierKind x) const { + return (qualifiers & (1U << static_cast(x))) != 0; + } + + CallReturnVector *GetCallReturnVector() override { + return &asmOutputs; + } + + void SetHasWriteInputs() { + hasWriteInputs = true; + } + + bool HasWriteInputs() const { + return hasWriteInputs; + } + + bool IsSameContent(const BaseNode *node) const override; + void DumpOutputs(int32 indent, std::string &uStr) const; + void DumpInputOperands(int32 indent, std::string &uStr) const; + void Dump(int32 indent) const override; + + MapleString asmString; + MapleVector inputConstraints; // length is numOpnds + CallReturnVector asmOutputs; + MapleVector outputConstraints; // length is returnValues.size() + MapleVector clobberList; + MapleVector gotoLabels; + uint32 qualifiers; + + private: + bool hasWriteInputs = false; +}; + +void DumpCallReturns(const MIRModule &mod, CallReturnVector nrets, int32 indent); +bool HasIreadExpr(const BaseNode *expr); +size_t MaxDepth(const BaseNode *expr); +} // namespace maple + +#define LOAD_SAFE_CAST_FOR_MIR_NODE +#include "ir_safe_cast_traits.def" + +#endif // MAPLE_IR_INCLUDE_MIR_NODES_H diff --git a/src/mapleall/maple_ir/include/mir_parser.h b/src/mapleall/maple_ir/include/mir_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..f8a2897d8d4a179aa35376ed63ac65f7745b1b17 --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_parser.h @@ -0,0 +1,340 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_PARSER_H +#define MAPLE_IR_INCLUDE_MIR_PARSER_H +#include "mir_module.h" +#include "lexer.h" +#include "mir_nodes.h" +#include "mir_preg.h" +#include "mir_scope.h" +#include "parser_opt.h" + +namespace maple { +using BaseNodePtr = BaseNode*; +using StmtNodePtr = StmtNode*; +using BlockNodePtr = BlockNode*; + +class FormalDef; + +class MIRParser { + public: + explicit MIRParser(MIRModule &md) + : lexer(md), + mod(md), + definedLabels(mod.GetMPAllocator().Adapter()) { + safeRegionFlag.push(false); + } + + ~MIRParser() = default; + + MIRFunction *CreateDummyFunction(); + void ResetCurrentFunction() { + mod.SetCurFunction(dummyFunction); + } + + bool ParseLoc(); + bool ParseLocStmt(StmtNodePtr &stmt); + bool ParsePosition(SrcPosition &pos); + bool ParseOneScope(MIRScope &scope); + bool ParseScope(); + bool ParseScopeStmt(StmtNodePtr &stmt); + bool ParseOneAlias(GStrIdx &strIdx, MIRAliasVars &aliasVar); + bool ParseAlias(); + bool ParseAliasStmt(StmtNodePtr &stmt); + bool ParseTypeAlias(MIRScope &scope); + uint8 *ParseWordsInfo(uint32 size); + bool ParseSwitchCase(int64&, LabelIdx&); + bool ParseExprOneOperand(BaseNodePtr &expr); + bool ParseExprTwoOperand(BaseNodePtr &opnd0, BaseNodePtr &opnd1); + bool ParseExprNaryOperand(MapleVector &opndVec); + bool IsDelimitationTK(TokenKind tk) const; + Opcode GetOpFromToken(TokenKind tk) const; + bool IsStatement(TokenKind tk) const; + PrimType GetPrimitiveType(TokenKind tk) const; + MIRIntrinsicID GetIntrinsicID(TokenKind tk) const; + bool ParseScalarValue(MIRConstPtr &stype, MIRType &type); + bool ParseConstAddrLeafExpr(MIRConstPtr &cexpr); + bool ParseInitValue(MIRConstPtr &theConst, TyIdx tyIdx, bool allowEmpty = false); + bool ParseDeclaredSt(StIdx &stidx); + void CreateFuncMIRSymbol(PUIdx &puidx, GStrIdx strIdx); + bool ParseDeclaredFunc(PUIdx &puidx); + bool ParseTypeAttrs(TypeAttrs &attrs); + bool ParseVarTypeAttrs(MIRSymbol &st); + bool CheckAlignTk(); + bool ParseAlignAttrs(TypeAttrs &tA); + bool ParsePackAttrs(); + bool ParseFieldAttrs(FieldAttrs &attrs); + bool ParseFuncAttrs(FuncAttrs &attrs); + void SetAttrContent(FuncAttrs &attrs, FuncAttrKind x, const MIRLexer &lexer) const; + bool CheckPrimAndDerivedType(TokenKind tokenKind, TyIdx &tyIdx); + bool ParsePrimType(TyIdx &tyIdx); + bool ParseFarrayType(TyIdx &arrayTyIdx); + bool ParseArrayType(TyIdx &arrayTyIdx); + bool ParseBitFieldType(TyIdx &fieldTyIdx); + bool ParsePragmaElement(MIRPragmaElement &elem); + bool ParsePragmaElementForArray(MIRPragmaElement &elem); + bool ParsePragmaElementForAnnotation(MIRPragmaElement &elem); + bool ParsePragma(MIRStructType &type); + bool ParseFields(MIRStructType &type); + bool ParseStructType(TyIdx &styIdx, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseClassType(TyIdx &styidx, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseInterfaceType(TyIdx &sTyIdx, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseDefinedTypename(TyIdx &definedTyIdx, MIRTypeKind kind = kTypeUnknown); + bool ParseTypeParam(TyIdx &definedTyIdx); + bool ParsePointType(TyIdx &tyIdx); + bool ParseFuncType(TyIdx &tyIdx); + bool ParseGenericInstantVector(MIRInstantVectorType &insVecType); + bool ParseDerivedType(TyIdx &tyIdx, MIRTypeKind kind = kTypeUnknown, const GStrIdx &strIdx = GStrIdx(0)); + bool ParseType(TyIdx &tyIdx); + bool ParseEnumeration(); + bool ParseStatement(StmtNodePtr &stmt); + bool ParseSpecialReg(PregIdx &pRegIdx); + bool ParsePseudoReg(PrimType primType, PregIdx &pRegIdx); + bool ParseStmtBlock(BlockNodePtr &blk); + bool ParsePrototype(MIRFunction &func, MIRSymbol &funcSymbol, TyIdx &funcTyIdx); + bool ParseFunction(uint32 fileIdx = 0); + bool ParseStorageClass(MIRSymbol &symbol) const; + bool ParseDeclareVarInitValue(MIRSymbol &symbol); + bool ParseDeclareVar(MIRSymbol&); + bool ParseDeclareReg(MIRSymbol &symbol, const MIRFunction &func); + bool ParseDeclareFormal(FormalDef &formalDef); + bool ParsePrototypeRemaining(MIRFunction &func, std::vector &vecTyIdx, + std::vector &vecAttrs, bool &varArgs); + + // Stmt Parser + bool ParseStmtDassign(StmtNodePtr &stmt); + bool ParseStmtDassignoff(StmtNodePtr &stmt); + bool ParseStmtRegassign(StmtNodePtr &stmt); + bool ParseStmtIassign(StmtNodePtr &stmt); + bool ParseStmtIassignoff(StmtNodePtr &stmt); + bool ParseStmtIassignFPoff(StmtNodePtr &stmt); + bool ParseStmtBlkassignoff(StmtNodePtr &stmt); + bool ParseStmtDoloop(StmtNodePtr &stmt); + bool ParseStmtForeachelem(StmtNodePtr &stmt); + bool ParseStmtDowhile(StmtNodePtr &stmt); + bool ParseStmtIf(StmtNodePtr &stmt); + bool ParseStmtWhile(StmtNodePtr &stmt); + bool ParseStmtLabel(StmtNodePtr &stmt); + bool ParseStmtGoto(StmtNodePtr &stmt); + bool ParseStmtBr(StmtNodePtr &stmt); + bool ParseStmtSwitch(StmtNodePtr &stmt); + bool ParseStmtRangegoto(StmtNodePtr &stmt); + bool ParseStmtMultiway(StmtNodePtr &stmt); + PUIdx EnterUndeclaredFunction(bool isMcount = false); // for -pg in order to add "void _mcount()" + bool ParseStmtCall(StmtNodePtr &stmt); + bool ParseStmtCallMcount(StmtNodePtr &stmt); // for -pg in order to add "void _mcount()" to all the functions + bool ParseStmtIcall(StmtNodePtr &stmt, Opcode op); + bool ParseStmtIcall(StmtNodePtr &stmt); + bool ParseStmtIcallassigned(StmtNodePtr &stmt); + bool ParseStmtIcallproto(StmtNodePtr &stmt); + bool ParseStmtIcallprotoassigned(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccall(StmtNodePtr &stmt, bool isAssigned); + bool ParseStmtIntrinsiccall(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccallassigned(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt, bool isAssigned); + bool ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt); + bool ParseStmtIntrinsiccallwithtypeassigned(StmtNodePtr &stmt); + bool ParseCallReturnPair(CallReturnPair &retpair); + bool ParseCallReturns(CallReturnVector &retsvec); + bool ParseBinaryStmt(StmtNodePtr &stmt, Opcode op); + bool ParseNaryStmtAssert(StmtNodePtr &stmt, Opcode op); + bool ParseNaryStmtAssertGE(StmtNodePtr &stmt); + bool ParseNaryStmtAssertLT(StmtNodePtr &stmt); + bool ParseNaryStmtCalcassertGE(StmtNodePtr &stmt); + bool ParseNaryStmtCalcassertLT(StmtNodePtr &stmt); + bool ParseNaryStmtCallAssertLE(StmtNodePtr &stmt); + bool ParseNaryStmtReturnAssertLE(StmtNodePtr &stmt); + bool ParseNaryStmtAssignAssertLE(StmtNodePtr &stmt); + bool ParseNaryStmt(StmtNodePtr &stmt, Opcode op); + bool ParseNaryStmtReturn(StmtNodePtr &stmt); + bool ParseNaryStmtSyncEnter(StmtNodePtr &stmt); + bool ParseNaryStmtSyncExit(StmtNodePtr &stmt); + bool ParseStmtJsTry(StmtNodePtr &stmt); + bool ParseStmtTry(StmtNodePtr &stmt); + bool ParseStmtCatch(StmtNodePtr &stmt); + bool ParseUnaryStmt(Opcode op, StmtNodePtr &stmt); + bool ParseUnaryStmtThrow(StmtNodePtr &stmt); + bool ParseUnaryStmtDecRef(StmtNodePtr &stmt); + bool ParseUnaryStmtIncRef(StmtNodePtr &stmt); + bool ParseUnaryStmtDecRefReset(StmtNodePtr &stmt); + bool ParseUnaryStmtIGoto(StmtNodePtr &stmt); + bool ParseUnaryStmtEval(StmtNodePtr &stmt); + bool ParseUnaryStmtFree(StmtNodePtr &stmt); + bool ParseUnaryStmtAssertNonNullCheck(Opcode op, StmtNodePtr &stmt); + bool ParseUnaryStmtAssertNonNull(StmtNodePtr &stmt); + bool ParseUnaryStmtCallAssertNonNull(StmtNodePtr &stmt); + bool ParseUnaryStmtAssignAssertNonNull(StmtNodePtr &stmt); + bool ParseUnaryStmtReturnAssertNonNull(StmtNodePtr &stmt); + bool ParseStmtMarker(StmtNodePtr &stmt); + bool ParseStmtGosub(StmtNodePtr &stmt); + bool ParseStmtAsm(StmtNodePtr &stmt); + bool ParseStmtSafeRegion(StmtNodePtr &stmt); + + // Expression Parser + bool ParseExpression(BaseNodePtr &expr); + bool ParseExprDread(BaseNodePtr &expr); + bool ParseExprDreadoff(BaseNodePtr &expr); + bool ParseExprRegread(BaseNodePtr &expr); + bool ParseExprBinary(BaseNodePtr &expr); + bool ParseExprCompare(BaseNodePtr &expr); + bool ParseExprDepositbits(BaseNodePtr &expr); + bool ParseExprConstval(BaseNodePtr &expr); + bool ParseExprConststr(BaseNodePtr &expr); + bool ParseExprConststr16(BaseNodePtr &expr); + bool ParseExprSizeoftype(BaseNodePtr &expr); + bool ParseExprFieldsDist(BaseNodePtr &expr); + bool ParseExprIreadIaddrof(IreadNode &expr); + bool ParseExprIread(BaseNodePtr &expr); + bool ParseExprIreadoff(BaseNodePtr &expr); + bool ParseExprIreadFPoff(BaseNodePtr &expr); + bool ParseExprIaddrof(BaseNodePtr &expr); + bool ParseExprAddrof(BaseNodePtr &expr); + bool ParseExprAddrofoff(BaseNodePtr &expr); + bool ParseExprAddroffunc(BaseNodePtr &expr); + bool ParseExprAddroflabel(BaseNodePtr &expr); + bool ParseExprUnary(BaseNodePtr &expr); + bool ParseExprJarray(BaseNodePtr &expr); + bool ParseExprSTACKJarray(BaseNodePtr &expr); + bool ParseExprGCMalloc(BaseNodePtr &expr); + bool ParseExprExtractbits(BaseNodePtr &expr); + bool ParseExprTyconvert(BaseNodePtr &expr); + bool ParseExprRetype(BaseNodePtr &expr); + bool ParseExprTernary(BaseNodePtr &expr); + bool ParseExprArray(BaseNodePtr &expr); + bool ParseExprIntrinsicop(BaseNodePtr &expr); + bool ParseNaryExpr(NaryStmtNode &stmtNode); + + // funcName and paramIndex is out parameter + bool ParseCallAssertInfo(std::string &funcName, int *paramIndex, std::string &stmtFuncName); + bool ParseAssertInfo(std::string &funcName); + bool ParseTypeDefine(); + bool ParseJavaClassInterface(MIRSymbol &symbol, bool isClass); + bool ParseIntrinsicId(IntrinsicopNode &intrnOpNode); + void Error(const std::string &str); + void Warning(const std::string &str); + void FixForwardReferencedTypeForOneAgg(MIRType *type); + void FixupForwardReferencedTypeByMap(); + + const std::string &GetError(); + const std::string &GetWarning() const; + bool ParseFuncInfo(void); + void PrepareParsingMIR(); + void PrepareParsingMplt(); + bool ParseSrcLang(MIRSrcLang &srcLang); + bool ParseMIR(uint32 fileIdx = 0, uint32 option = 0, bool isIPA = false, bool isComb = false); + bool ParseMIR(std::ifstream &mplFile); // the main entry point + bool ParseInlineFuncBody(std::ifstream &mplFile); + bool ParseMPLT(std::ifstream &mpltFile, const std::string &importFileName); + bool ParseMPLTStandalone(std::ifstream &mpltFile, const std::string &importFileName); + bool ParseTypeFromString(const std::string &src, TyIdx &tyIdx); + void EmitError(const std::string &fileName); + void EmitWarning(const std::string &fileName) const; + uint32 GetOptions() const { + return options; + } + + private: + // func ptr map for ParseMIR() + using FuncPtrParseMIRForElem = bool (MIRParser::*)(); + static std::map funcPtrMapForParseMIR; + static std::map InitFuncPtrMapForParseMIR(); + + bool TypeCompatible(const TyIdx &typeIdx1, const TyIdx &typeIdx2) const; + bool IsTypeIncomplete(MIRType *type) const; + + // func for ParseMIR + bool ParseMIRForFunc(); + bool ParseMIRForVar(); + bool ParseMIRForClass(); + bool ParseMIRForInterface(); + bool ParseMIRForFlavor(); + bool ParseMIRForSrcLang(); + bool ParseMIRForGlobalMemSize(); + bool ParseMIRForGlobalMemMap(); + bool ParseMIRForGlobalWordsTypeTagged(); + bool ParseMIRForGlobalWordsRefCounted(); + bool ParseMIRForID(); + bool ParseMIRForNumFuncs(); + bool ParseMIRForEntryFunc(); + bool ParseMIRForFileInfo(); + bool ParseMIRForFileData(); + bool ParseMIRForSrcFileInfo(); + bool ParseMIRForImport(); + bool ParseMIRForImportPath(); + bool ParseMIRForAsmdecl(); + + // func for ParseExpr + using FuncPtrParseExpr = bool (MIRParser::*)(BaseNodePtr &ptr); + static std::map funcPtrMapForParseExpr; + static std::map InitFuncPtrMapForParseExpr(); + + // func and param for ParseStmt + using FuncPtrParseStmt = bool (MIRParser::*)(StmtNodePtr &stmt); + static std::map funcPtrMapForParseStmt; + static std::map InitFuncPtrMapForParseStmt(); + + // func and param for ParseStmtBlock + using FuncPtrParseStmtBlock = bool (MIRParser::*)(); + static std::map funcPtrMapForParseStmtBlock; + static std::map InitFuncPtrMapForParseStmtBlock(); + void ParseStmtBlockForSeenComment(BlockNodePtr blk, uint32 mplNum); + bool ParseStmtBlockForVar(TokenKind stmtTK); + bool ParseStmtBlockForVar(); + bool ParseStmtBlockForTempVar(); + bool ParseStmtBlockForReg(); + bool ParseStmtBlockForType(); + bool ParseStmtBlockForFrameSize(); + bool ParseStmtBlockForUpformalSize(); + bool ParseStmtBlockForModuleID(); + bool ParseStmtBlockForFuncSize(); + bool ParseStmtBlockForFuncID(); + bool ParseStmtBlockForFormalWordsTypeTagged(); + bool ParseStmtBlockForLocalWordsTypeTagged(); + bool ParseStmtBlockForFormalWordsRefCounted(); + bool ParseStmtBlockForLocalWordsRefCounted(); + bool ParseStmtBlockForFuncInfo(); + + // common func + void SetSrcPos(SrcPosition &srcPosition, uint32 mplNum) const; + + // func for ParseExpr + Opcode paramOpForStmt = OP_undef; + TokenKind paramTokenKindForStmt = TK_invalid; + // func and param for ParseStmtBlock + MIRFunction *paramCurrFuncForParseStmtBlock = nullptr; + MIRLexer lexer; + MIRModule &mod; + std::string message; + std::string warningMessage; + uint32 options = kKeepFirst; + MapleVector definedLabels; // true if label at labidx is defined + MIRFunction *dummyFunction = nullptr; + MIRFunction *curFunc = nullptr; + uint16 lastFileNum = 0; // to remember first number after LOC + uint32 lastLineNum = 0; // to remember second number after LOC + uint16 lastColumnNum = 0; // to remember third number after LOC + uint32 firstLineNum = 0; // to track function starting line + std::map typeDefIdxMap; // map previous declared tyIdx + bool firstImport = true; // Mark the first imported mplt file + bool paramParseLocalType = false; // param for ParseTypeDefine + uint32 paramFileIdx = 0; // param for ParseMIR() + bool paramIsIPA = false; + bool paramIsComb = false; + TokenKind paramTokenKind = TK_invalid; + std::vector paramImportFileList; + std::stack safeRegionFlag; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_PARSER_H diff --git a/src/mapleall/maple_ir/include/mir_pragma.h b/src/mapleall/maple_ir/include/mir_pragma.h new file mode 100644 index 0000000000000000000000000000000000000000..ac0c8566a8df296280b816d24671a70015c5ce61 --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_pragma.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_PRAGMA_H +#define MAPLE_IR_INCLUDE_MIR_PRAGMA_H +#include "types_def.h" +#include "prim_types.h" +#include "mir_module.h" +#include "mpl_logging.h" +#include "mempool_allocator.h" + +namespace maple { +class MIRModule; // circular dependency exists, no other choice +class MIRType; // circular dependency exists, no other choice +class MIRFunction; // circular dependency exists, no other choice +class MIRSymbol; // circular dependency exists, no other choice +class MIRSymbolTable; // circular dependency exists, no other choice +class MIRTypeNameTable; // circular dependency exists, no other choice +enum PragmaKind { + kPragmaUnknown, + kPragmaClass, + kPragmaFunc, + kPragmaField, + kPragmaParam, + kPragmaPkg, + kPragmaVar, + kPragmaGlbvar, + kPragmaFuncExecptioni, + kPragmaFuncVar +}; + +enum PragmaVisibility { + kVisBuild, + kVisRuntime, + kVisSystem, + kVisMaple +}; + +enum PragmaValueType { + kValueByte = 0x00, // (none; must be 0) ubyte[1] + kValueShort = 0x02, // size - 1 (0…1) ubyte[size] + kValueChar = 0x03, // size - 1 (0…1) ubyte[size] + kValueInt = 0x04, // size - 1 (0…3) ubyte[size] + kValueLong = 0x06, // size - 1 (0…7) ubyte[size] + kValueFloat = 0x10, // size - 1 (0…3) ubyte[size] + kValueDouble = 0x11, // size - 1 (0…7) ubyte[size] + kValueMethodType = 0x15, // size - 1 (0…3) ubyte[size] + kValueMethodHandle = 0x16, // size - 1 (0…3) ubyte[size] + kValueString = 0x17, // size - 1 (0…3) ubyte[size] + kValueType = 0x18, // size - 1 (0…3) ubyte[size] + kValueField = 0x19, // size - 1 (0…3) ubyte[size] + kValueMethod = 0x1a, // size - 1 (0…3) ubyte[size] + kValueEnum = 0x1b, // size - 1 (0…3) ubyte[size] + kValueArray = 0x1c, // (none; must be 0) encoded_array + kValueAnnotation = 0x1d, // (none; must be 0) encoded_annotation + kValueNull = 0x1e, // (none; must be 0) (none) + kValueBoolean = 0x1f // boolean (0…1) (none) +}; + +class MIRPragmaElement { + public: + explicit MIRPragmaElement(MIRModule &m) : MIRPragmaElement(m.GetPragmaMPAllocator()) { + val.d = 0; + } + + explicit MIRPragmaElement(MapleAllocator &subElemAllocator) + : subElemVec(subElemAllocator.Adapter()) { + subElemVec.clear(); + val.d = 0; + } + + ~MIRPragmaElement() = default; + void Dump(int indent) const; + void SubElemVecPushBack(MIRPragmaElement *elem) { + subElemVec.push_back(elem); + } + + const MapleVector &GetSubElemVec() const { + return subElemVec; + } + + const MIRPragmaElement *GetSubElement(uint64 i) const { + return subElemVec[i]; + } + + MapleVector &GetSubElemVec() { + return subElemVec; + } + + const GStrIdx GetNameStrIdx() const { + return nameStrIdx; + } + + const GStrIdx GetTypeStrIdx() const { + return typeStrIdx; + } + + PragmaValueType GetType() const { + return valueType; + } + + int32 GetI32Val() const { + return val.i; + } + + int64 GetI64Val() const { + return val.j; + } + + uint64 GetU64Val() const { + return val.u; + } + + float GetFloatVal() const { + return val.f; + } + + double GetDoubleVal() const { + return val.d; + } + + void SetTypeStrIdx(GStrIdx strIdx) { + typeStrIdx = strIdx; + } + + void SetNameStrIdx(GStrIdx strIdx) { + nameStrIdx = strIdx; + } + + void SetType(PragmaValueType type) { + valueType = type; + } + + void SetI32Val(int32 valI) { + this->val.i = valI; + } + + void SetI64Val(int64 valJ) { + this->val.j = valJ; + } + + void SetU64Val(uint64 valU) { + this->val.u = valU; + } + + void SetFloatVal(float valF) { + this->val.f = valF; + } + + void SetDoubleVal(double valD) { + this->val.d = valD; + } + + private: + GStrIdx nameStrIdx{ 0 }; + GStrIdx typeStrIdx{ 0 }; + PragmaValueType valueType = kValueNull; + union { + int32 i; + int64 j; + uint64 u; + float f; + double d; + } val; + MapleVector subElemVec; +}; + +class MIRPragma { + public: + explicit MIRPragma(MIRModule &m) : MIRPragma(m, m.GetPragmaMPAllocator()) {} + + MIRPragma(MIRModule &m, MapleAllocator &elemAllocator) + : mod(&m), + elementVec(elemAllocator.Adapter()) {} + + ~MIRPragma() = default; + MIRPragmaElement *GetPragmaElemFromSignature(const std::string &signature); + void Dump(int indent) const; + void PushElementVector(MIRPragmaElement *elem) { + elementVec.push_back(elem); + } + + void ClearElementVector() { + elementVec.clear(); + } + + PragmaKind GetKind() const { + return pragmaKind; + } + + uint8 GetVisibility() const { + return visibility; + } + + const GStrIdx GetStrIdx() const { + return strIdx; + } + + const TyIdx GetTyIdx() const { + return tyIdx; + } + + const TyIdx GetTyIdxEx() const { + return tyIdxEx; + } + + int32 GetParamNum() const { + return paramNum; + } + + const MapleVector &GetElementVector() const { + return elementVec; + } + + const MIRPragmaElement *GetNthElement(uint32 i) const { + return elementVec[i]; + } + + void ElementVecPushBack(MIRPragmaElement *elem) { + elementVec.push_back(elem); + } + + void SetKind(PragmaKind kind) { + pragmaKind = kind; + } + + void SetVisibility(uint8 visValue) { + visibility = visValue; + } + + void SetStrIdx(GStrIdx idx) { + strIdx = idx; + } + + void SetTyIdx(TyIdx idx) { + tyIdx = idx; + } + + void SetTyIdxEx(TyIdx idx) { + tyIdxEx = idx; + } + + void SetParamNum(int32 num) { + paramNum = num; + } + + private: + MIRModule *mod; + PragmaKind pragmaKind = kPragmaUnknown; + uint8 visibility = 0; + GStrIdx strIdx{ 0 }; + TyIdx tyIdx{ 0 }; + TyIdx tyIdxEx{ 0 }; + int32 paramNum = -1; // paramNum th param in function, -1 not for param annotation + MapleVector elementVec; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_PRAGMA_H diff --git a/src/mapleall/maple_ir/include/mir_preg.h b/src/mapleall/maple_ir/include/mir_preg.h new file mode 100644 index 0000000000000000000000000000000000000000..12d22eaa0bde5e6c0e58835b921ea58fae08ca6e --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_preg.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_PREG_H +#define MAPLE_IR_INCLUDE_MIR_PREG_H +#if MIR_FEATURE_FULL +#include +#include "mir_module.h" +#include "global_tables.h" +#endif // MIR_FEATURE_FULL + +namespace maple { +extern void PrintIndentation(int32 indent); + +// these special registers are encoded by negating the enumeration +enum SpecialReg : signed int { + kSregSp = 1, + kSregFp = 2, + kSregGp = 3, + kSregThrownval = 4, + kSregMethodhdl = 5, + kSregRetval0 = 6, + kSregRetval1 = 7, + kSregLast = 8, +}; +#if MIR_FEATURE_FULL +class MIRPreg { + public: + explicit MIRPreg(uint32 n = 0) : MIRPreg(n, kPtyInvalid, nullptr) {} + + MIRPreg(uint32 n, PrimType ptyp) : primType(ptyp), pregNo(n) {} + + MIRPreg(uint32 n, PrimType ptyp, MIRType *mType) : primType(ptyp), pregNo(n), mirType(mType) {} + + ~MIRPreg() = default; + void SetNeedRC(bool newNeedRC = true) { + this->needRC = newNeedRC; + } + + bool NeedRC() const { + return needRC; + } + + bool IsRef() const { + return mirType != nullptr && primType == PTY_ref; + } + + PrimType GetPrimType() const { + return primType; + } + + void SetPrimType(PrimType pty) { + primType = pty; + } + + Opcode GetOp() const { + return op; + } + + void SetOp(Opcode o) { + this->op = o; + } + + int32 GetPregNo() const { + return pregNo; + } + + void SetPregNo(int32 newPregNo) { + this->pregNo = newPregNo; + } + + MIRType *GetMIRType() const { + return mirType; + } + + void SetMIRType(MIRType *newMirType) { + this->mirType = newMirType; + } + union RematInfo { + const MIRConst *mirConst; // used only when op is OP_constval + const MIRSymbol *sym; // used only when op is OP_addrof or OP_dread + } rematInfo; + FieldID fieldID = 0; // used only when op is OP_addrof or OP_dread + bool addrUpper = false; // used only when op is OP_addrof to indicate upper bits of address + + private: + PrimType primType = kPtyInvalid; + bool needRC = false; + Opcode op = OP_undef; // OP_constval, OP_addrof or OP_dread if rematerializable + int32 pregNo; // the number in maple IR after the % + MIRType *mirType = nullptr; +}; + +class MIRPregTable { + public: + explicit MIRPregTable(MapleAllocator *allocator) + : pregNoToPregIdxMap(allocator->Adapter()), + pregTable(allocator->Adapter()), + mAllocator(allocator) { + pregTable.push_back(nullptr); + specPregTable[0].SetPregNo(0); + specPregTable[kSregSp].SetPregNo(-kSregSp); + specPregTable[kSregFp].SetPregNo(-kSregFp); + specPregTable[kSregGp].SetPregNo(-kSregGp); + specPregTable[kSregThrownval].SetPregNo(-kSregThrownval); + specPregTable[kSregMethodhdl].SetPregNo(-kSregMethodhdl); + specPregTable[kSregRetval0].SetPregNo(-kSregRetval0); + specPregTable[kSregRetval1].SetPregNo(-kSregRetval1); + for (uint32 i = 0; i < kSregLast; ++i) { + specPregTable[i].SetPrimType(PTY_unknown); + } + } + + ~MIRPregTable(); + + PregIdx CreatePreg(PrimType primType, MIRType *mtype = nullptr) { + ASSERT(!mtype || mtype->GetPrimType() == PTY_ref || mtype->GetPrimType() == PTY_ptr, "ref or ptr type"); + uint32 index = ++maxPregNo; + auto *preg = mAllocator->GetMemPool()->New(index, primType, mtype); + return AddPreg(*preg); + } + + PregIdx ClonePreg(const MIRPreg &rfpreg) { + PregIdx idx = CreatePreg(rfpreg.GetPrimType(), rfpreg.GetMIRType()); + MIRPreg *preg = pregTable[static_cast(idx)]; + preg->SetNeedRC(rfpreg.NeedRC()); + return idx; + } + + MIRPreg *PregFromPregIdx(PregIdx pregidx) { + if (pregidx < 0) { // special register + return &specPregTable[-pregidx]; + } else { + return pregTable.at(static_cast(pregidx)); + } + } + + PregIdx GetPregIdxFromPregno(uint32 pregNo) { + auto it = pregNoToPregIdxMap.find(pregNo); + return (it == pregNoToPregIdxMap.end()) ? PregIdx(0) : it->second; + } + + void DumpPregsWithTypes(int32 indent) { + MapleVector &pregtable = pregTable; + for (uint32 i = 1; i < pregtable.size(); i++) { + MIRPreg *mirpreg = pregtable[i]; + if (mirpreg->GetMIRType() == nullptr) { + continue; + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "reg "; + LogInfo::MapleLogger() << "%" << mirpreg->GetPregNo(); + LogInfo::MapleLogger() << " "; + mirpreg->GetMIRType()->Dump(0); + LogInfo::MapleLogger() << " " << (mirpreg->NeedRC() ? 1 : 0); + LogInfo::MapleLogger() << "\n"; + } + } + + size_t Size() const { + return pregTable.size(); + } + + PregIdx AddPreg(MIRPreg &preg) { + PregIdx idx = static_cast(pregTable.size()); + pregTable.push_back(&preg); + ASSERT(pregNoToPregIdxMap.find(preg.GetPregNo()) == pregNoToPregIdxMap.end(), "The same pregno is already taken"); + pregNoToPregIdxMap[preg.GetPregNo()] = idx; + return idx; + } + + PregIdx EnterPregNo(uint32 pregNo, PrimType ptyp, MIRType *ty = nullptr) { + PregIdx idx = GetPregIdxFromPregno(pregNo); + if (idx == 0) { + if (pregNo > maxPregNo) { + maxPregNo = pregNo; + } + MIRPreg *preg = mAllocator->GetMemPool()->New(pregNo, ptyp, ty); + return AddPreg(*preg); + } + return idx; + } + + MapleVector &GetPregTable() { + return pregTable; + } + + const MapleVector &GetPregTable() const { + return pregTable; + } + + const MIRPreg *GetPregTableItem(const uint32 index) const { + CHECK_FATAL(index < pregTable.size(), "array index out of range"); + return pregTable[index]; + } + + void SetPregNoToPregIdxMapItem(uint32 key, PregIdx value) { + pregNoToPregIdxMap[key] = value; + } + + uint32 GetMaxPregNo() const { + return maxPregNo; + } + + void SetMaxPregNo(uint32 index) { + maxPregNo = index; + } + + private: + uint32 maxPregNo = 0; // the max pregNo that has been allocated + MapleMap pregNoToPregIdxMap; // for quick lookup based on pregno + MapleVector pregTable; + MIRPreg specPregTable[kSregLast]; // for the MIRPreg nodes corresponding to special registers + MapleAllocator *mAllocator; +}; + +#endif // MIR_FEATURE_FULL +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_PREG_H diff --git a/src/mapleall/maple_ir/include/mir_scope.h b/src/mapleall/maple_ir/include/mir_scope.h new file mode 100644 index 0000000000000000000000000000000000000000..7d404f42efa0d12ab44369aefbdab2866f6ddf5c --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_scope.h @@ -0,0 +1,198 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_SCOPE_H +#define MAPLE_IR_INCLUDE_MIR_SCOPE_H +#include +#include "mir_module.h" +#include "mir_type.h" +#include "src_position.h" + +namespace maple { +// mapping src variable to mpl variables to display debug info +enum AliasTypeKind : uint8 { + kATKType, + kATKString, + kATKEnum, +}; + +struct MIRAliasVars { + GStrIdx mplStrIdx; // maple varialbe name + AliasTypeKind atk; + unsigned index; + bool isLocal; + GStrIdx sigStrIdx; + TypeAttrs attrs; +}; + +class MIRAlias { + public: + explicit MIRAlias(MIRModule *mod) : module(mod) {} + ~MIRAlias() = default; + + bool IsEmpty() const { + return aliasVarMap.size() == 0; + } + + void SetAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) { + aliasVarMap[idx] = vars; + } + + MapleMap &GetAliasVarMap() { + return aliasVarMap; + } + + void Dump(int32 indent, bool isLocal = true) const; + + private: + MIRModule *module; + // source to maple variable alias + MapleMap aliasVarMap { module->GetMPAllocator().Adapter() }; +}; + +class MIRTypeAlias { + public: + explicit MIRTypeAlias(MIRModule *mod) : module(mod) {} + virtual ~MIRTypeAlias() = default; + + bool IsEmpty() const { + return typeAliasMap.size() == 0; + } + + bool Exist(GStrIdx idx) const { + return typeAliasMap.find(idx) != typeAliasMap.end(); + } + + const MapleMap &GetTypeAliasMap() const { + return typeAliasMap; + } + + TyIdx GetTyIdxFromMap(GStrIdx idx) const { + auto it = typeAliasMap.find(idx); + if (it == typeAliasMap.cend()) { + return TyIdx(0); + } + return it->second; + } + + void SetTypeAliasMap(GStrIdx gStrIdx, TyIdx tyIdx) { + typeAliasMap[gStrIdx] = tyIdx; + } + + void Dump(int32 indent) const; + + private: + MIRModule *module; + MapleMap typeAliasMap { module->GetMPAllocator().Adapter() }; +}; + +class MIRScope { + public: + explicit MIRScope(MIRModule *mod, MIRFunction *f = nullptr); + ~MIRScope() = default; + + bool IsEmpty() const { + return (alias == nullptr || alias->IsEmpty()) && + (typeAlias == nullptr || typeAlias->IsEmpty()) && + subScopes.size() == 0; + } + + bool IsSubScope(const MIRScope *scp) const; + bool HasJoinScope(const MIRScope *scp1, const MIRScope *scp2) const; + bool HasSameRange(const MIRScope *s1, const MIRScope *s2) const; + + unsigned GetId() const { + return id; + } + + void SetId(unsigned i) { + id = i; + } + + const SrcPosition &GetRangeLow() const { + return range.first; + } + + const SrcPosition &GetRangeHigh() const { + return range.second; + } + + void SetRange(SrcPosition low, SrcPosition high) { + // The two positions that were changed by the #line directive may be not in the same file. + ASSERT(!low.IsSameFile(high) || low.IsBfOrEq(high), "wrong order of low and high"); + range.first = low; + range.second = high; + } + + void SetAliasVarMap(GStrIdx idx, const MIRAliasVars &vars) { + alias->SetAliasVarMap(idx, vars); + } + + MapleMap &GetAliasVarMap() { + return alias->GetAliasVarMap(); + } + + MapleVector &GetSubScopes() { + return subScopes; + } + + void AddTuple(SrcPosition pos, SrcPosition posB, SrcPosition posE) { + if (pos.LineNum() == 0 || posB.LineNum() == 0 || posE.LineNum() == 0) { + return; + } + std::tuple srcPos(pos, posB, posE); + blkSrcPos.push_back(srcPos); + } + + SrcPosition GetScopeEndPos(const SrcPosition &pos); + bool AddScope(MIRScope *scope); + + void SetTypeAliasMap(GStrIdx gStrIdx, TyIdx tyIdx) { + typeAlias->SetTypeAliasMap(gStrIdx, tyIdx); + } + + MIRTypeAlias *GetTypeAlias() { + return typeAlias; + } + + const MIRTypeAlias *GetTypeAlias() const { + return typeAlias; + } + + void SetIsLocal(bool b) { + isLocal = b; + } + + bool IsLocal() const { + return isLocal; + } + + void DumpTypedef(int32 indent) const; + void Dump(int32 indent) const; + void Dump() const; + + private: + MIRModule *module; + MIRFunction *func; + unsigned id; + bool isLocal; + std::pair range; + MIRAlias *alias = nullptr; + MIRTypeAlias *typeAlias = nullptr; + // subscopes' range should be disjoint + MapleVector subScopes { module->GetMPAllocator().Adapter() }; + MapleVector> blkSrcPos { module->GetMPAllocator().Adapter() }; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_SCOPE_H diff --git a/src/mapleall/maple_ir/include/mir_symbol.h b/src/mapleall/maple_ir/include/mir_symbol.h new file mode 100644 index 0000000000000000000000000000000000000000..7c6324ac53b80c1b602b474e2ee61f6e01462679 --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_symbol.h @@ -0,0 +1,718 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_SYMBOL_H +#define MAPLE_IR_INCLUDE_MIR_SYMBOL_H +#include +#include "mir_const.h" +#include "mir_preg.h" +#include "src_position.h" + +constexpr int kScopeLocal = 2; // the default scope level for function variables +constexpr int kScopeGlobal = 1; // the scope level for global variables + +namespace maple { +enum MIRSymKind { + kStInvalid, + kStVar, + kStFunc, + kStConst, + kStJavaClass, + kStJavaInterface, + kStPreg +}; + +enum MIRStorageClass : uint8 { + kScInvalid, + kScAuto, + kScAliased, + kScFormal, + kScExtern, + kScGlobal, + kScPstatic, // PU-static + kScFstatic, // file-static + kScText, + kScTypeInfo, // used for eh type st + kScTypeInfoName, // used for eh type st name + kScTypeCxxAbi, // used for eh inherited from c++ __cxxabiv1 + kScEHRegionSupp, // used for tables that control C++ exception handling + kScUnused +}; + +// to represent a single symbol +class MIRSymbol { + public: + union SymbolType { // a symbol can either be a const or a function or a preg which currently used for formal + MIRConst *konst; + MIRFunction *mirFunc; + MIRPreg *preg; // the MIRSymKind must be kStPreg + }; + + MIRSymbol() = default; + MIRSymbol(uint32 idx, uint8 scp) : stIdx(scp, idx) {} + ~MIRSymbol() = default; + + void SetIsTmp(bool temp) { + isTmp = temp; + } + + bool GetIsTmp() const { + return isTmp; + } + + void SetNeedForwDecl() { + needForwDecl = true; + } + + bool IsNeedForwDecl() const { + return needForwDecl; + } + + void SetInstrumented() { + instrumented = true; + } + + bool IsInstrumented() const { + return instrumented; + } + + void SetIsImported(bool imported) { + isImported = imported; + } + + bool GetIsImported() const { + return isImported; + } + + void SetWPOFakeParm() { + wpoFakeParam = true; + } + + bool IsWpoFakeParm() const { + return wpoFakeParam; + } + + bool IsWpoFakeRet() const { + return wpoFakeRet; + } + + void SetWPOFakeRet() { + wpoFakeRet = true; + } + + void SetIsTmpUnused(bool unused) { + isTmpUnused = unused; + } + + void SetIsImportedDecl(bool imported) { + isImportedDecl = imported; + } + + bool GetIsImportedDecl() const { + return isImportedDecl; + } + + bool IsTmpUnused() const { + return isTmpUnused; + } + + void SetAppearsInCode(bool appears) { + appearsInCode = appears; + } + + bool GetAppearsInCode() const { + return appearsInCode; + } + + void SetTyIdx(const TyIdx &newTyIdx) { + this->tyIdx = newTyIdx; + } + + TyIdx GetTyIdx() const { + return tyIdx; + } + + void SetInferredTyIdx(const TyIdx &newInferredTyIdx) { + this->inferredTyIdx = newInferredTyIdx; + } + + TyIdx GetInferredTyIdx() const { + return inferredTyIdx; + } + + void SetStIdx(const StIdx &newStIdx) { + this->stIdx = newStIdx; + } + + StIdx GetStIdx() const { + return stIdx; + } + + void SetSKind(MIRSymKind m) { + sKind = m; + } + + MIRSymKind GetSKind() const { + return sKind; + } + + uint32 GetScopeIdx() const { + return stIdx.Scope(); + } + + uint32 GetStIndex() const { + return stIdx.Idx(); + } + + bool IsLocal() const { + return stIdx.Islocal(); + } + + bool IsGlobal() const { + return stIdx.IsGlobal(); + } + + const TypeAttrs &GetAttrs() const { + return typeAttrs; + } + + TypeAttrs &GetAttrs() { + return typeAttrs; + } + + void SetAttrs(TypeAttrs attr) { + typeAttrs = attr; + } + + // AddAttrs adds more attributes instead of overrides the current one + void AddAttrs(TypeAttrs attr) { + typeAttrs.SetAttrFlag(typeAttrs.GetAttrFlag() | attr.GetAttrFlag()); + typeAttrs.AddAttrBoundary(attr.GetAttrBoundary()); + } + + bool GetAttr(AttrKind attrKind) const { + return typeAttrs.GetAttr(attrKind); + } + + void SetAttr(AttrKind attrKind) { + typeAttrs.SetAttr(attrKind); + } + + void ResetAttr(AttrKind attrKind) { + typeAttrs.ResetAttr(attrKind); + } + + bool IsVolatile() const { + return typeAttrs.GetAttr(ATTR_volatile); + } + + bool IsTypeVolatile(int fieldID) const; + + bool NeedPIC() const; + + bool IsThreadLocal() const { + return typeAttrs.GetAttr(ATTR_tls_static) || typeAttrs.GetAttr(ATTR_tls_dynamic); + } + + bool IsStatic() const { + return typeAttrs.GetAttr(ATTR_static); + } + + bool IsPUStatic() const { + return GetStorageClass() == kScPstatic; + } + + bool IsFinal() const { + return ((typeAttrs.GetAttr(ATTR_final) || typeAttrs.GetAttr(ATTR_readonly)) && + staticFinalBlackList.find(GetName()) == staticFinalBlackList.end()) || + IsLiteral() || IsLiteralPtr(); + } + + bool IsWeak() const { + return typeAttrs.GetAttr(ATTR_weak); + } + + bool IsPrivate() const { + return typeAttrs.GetAttr(ATTR_private); + } + + bool IsRefType() const { + return typeAttrs.GetAttr(ATTR_localrefvar); + } + + void SetNameStrIdx(GStrIdx strIdx) { + nameStrIdx = strIdx; + } + + void SetNameStrIdx(const std::string &name); + + GStrIdx GetNameStrIdx() const { + return nameStrIdx; + } + + MIRStorageClass GetStorageClass() const { + return storageClass; + } + + void SetStorageClass(MIRStorageClass cl) { + storageClass = cl; + } + + bool IsReadOnly() const { + return kScFstatic == storageClass && kStConst == sKind; + } + + bool IsConst() const { + return sKind == kStConst || (sKind == kStVar && value.konst != nullptr); + } + + MIRType *GetType() const; + + const std::string &GetName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(nameStrIdx); + } + + MIRConst *GetKonst() const { + ASSERT((sKind == kStConst || sKind == kStVar), "must be const symbol"); + return value.konst; + } + + void SetKonst(MIRConst *mirconst) { + ASSERT((sKind == kStConst || sKind == kStVar), "must be const symbol"); + value.konst = mirconst; + } + + void SetIsDeleted() { + isDeleted = true; + } + + void ResetIsDeleted() { + isDeleted = false; + } + + bool IsDeleted() const { + return isDeleted; + } + + bool IsVar() const { + return sKind == kStVar; + } + + bool IsFunction() const { + return sKind == kStFunc; + } + + bool IsPreg() const { + return sKind == kStPreg; + } + + bool IsJavaClassInterface() const { + return sKind == kStJavaClass || sKind == kStJavaInterface; + } + + SymbolType GetValue() const { + return value; + } + + void SetValue(SymbolType newValue) { + this->value = newValue; + } + + SrcPosition &GetSrcPosition() { + return srcPosition; + } + + const SrcPosition &GetSrcPosition() const { + return srcPosition; + } + + void SetSrcPosition(const SrcPosition &position) { + srcPosition = position; + } + + MIRPreg *GetPreg() { + ASSERT(IsPreg(), "must be Preg"); + return value.preg; + } + + const MIRPreg *GetPreg() const { + CHECK_FATAL(IsPreg(), "must be Preg"); + return value.preg; + } + + void SetPreg(MIRPreg *preg) { + CHECK_FATAL(IsPreg(), "must be Preg"); + value.preg = preg; + } + + bool CanBeIgnored() const { + return isDeleted; + } + + void SetLocalRefVar() { + SetAttr(ATTR_localrefvar); + } + + void ResetLocalRefVar() { + ResetAttr(ATTR_localrefvar); + } + + MIRFunction *GetFunction() const { + ASSERT(sKind == kStFunc, "must be function symbol"); + return value.mirFunc; + } + + void SetFunction(MIRFunction *func) { + ASSERT(sKind == kStFunc, "must be function symbol"); + value.mirFunc = func; + } + + bool IsEhIndex() const { + return GetName() == "__eh_index__"; + } + + bool HasAddrOfValues() const; + bool IsLiteral() const; + bool IsLiteralPtr() const; + bool PointsToConstString() const; + bool IsConstString() const; + bool IsClassInitBridge() const; + bool IsReflectionStrTab() const; + bool IsReflectionHashTabBucket() const; + bool IsReflectionInfo() const; + bool IsReflectionFieldsInfo() const; + bool IsReflectionFieldsInfoCompact() const; + bool IsReflectionSuperclassInfo() const; + bool IsReflectionFieldOffsetData() const; + bool IsReflectionMethodAddrData() const; + bool IsReflectionMethodSignature() const; + bool IsReflectionClassInfo() const; + bool IsReflectionArrayClassInfo() const; + bool IsReflectionClassInfoPtr() const; + bool IsReflectionClassInfoRO() const; + bool IsITabConflictInfo() const; + bool IsVTabInfo() const; + bool IsITabInfo() const; + bool IsReflectionPrimitiveClassInfo() const; + bool IsReflectionMethodsInfo() const; + bool IsReflectionMethodsInfoCompact() const; + bool IsRegJNITab() const; + bool IsRegJNIFuncTab() const; + bool IsMuidTab() const; + bool IsMuidRoTab() const; + bool IsCodeLayoutInfo() const; + std::string GetMuidTabName() const; + bool IsMuidFuncDefTab() const; + bool IsMuidFuncDefOrigTab() const; + bool IsMuidFuncInfTab() const; + bool IsMuidFuncUndefTab() const; + bool IsMuidDataDefTab() const; + bool IsMuidDataDefOrigTab() const; + bool IsMuidDataUndefTab() const; + bool IsMuidFuncDefMuidTab() const; + bool IsMuidFuncUndefMuidTab() const; + bool IsMuidDataDefMuidTab() const; + bool IsMuidDataUndefMuidTab() const; + bool IsMuidFuncMuidIdxMuidTab() const; + bool IsMuidRangeTab() const; + bool IsArrayClassCache() const; + bool IsArrayClassCacheName() const; + bool IsForcedGlobalFunc() const; + bool IsForcedGlobalClassinfo() const; + bool IsGctibSym() const; + bool IsPrimordialObject() const; + bool IgnoreRC() const; + void Dump(bool isLocal, int32 indent, bool suppressInit = false, const MIRSymbolTable *localsymtab = nullptr) const; + void DumpAsLiteralVar() const; + bool operator==(const MIRSymbol &msym) const { + return nameStrIdx == msym.nameStrIdx; + } + + bool operator!=(const MIRSymbol &msym) const { + return nameStrIdx != msym.nameStrIdx; + } + + bool operator<(const MIRSymbol &msym) const { + return nameStrIdx < msym.nameStrIdx; + } + + static uint32 &LastPrintedLineNumRef() { + return lastPrintedLineNum; + } + + static uint16 &LastPrintedColumnNumRef() { + return lastPrintedColumnNum; + } + + bool HasPotentialAssignment() const { + return hasPotentialAssignment; + } + + void SetHasPotentialAssignment() { + hasPotentialAssignment = true; + } + + void SetAsmAttr(const UStrIdx &idx) { + asmAttr = idx; + } + + const UStrIdx &GetAsmAttr() const { + return asmAttr; + } + + void SetWeakrefAttr(const std::pair &idx) { + weakrefAttr = idx; + } + + const std::pair &GetWeakrefAttr() const { + return weakrefAttr; + } + + bool IsFormal() const { + return storageClass == kScFormal; + } + + bool LMBCAllocateOffSpecialReg() const { + if (isDeleted) { + return false; + } + switch (storageClass) { + case kScAuto: + return true; + case kScPstatic: + case kScFstatic: + return value.konst == nullptr && !hasPotentialAssignment; + default: + return false; + } + } + UStrIdx asmAttr { 0 }; // if not 0, the string for the name in C's asm attribute + UStrIdx sectionAttr { 0 }; // if not 0, the string for the name in C's section attribute + + // Please keep order of the fields, avoid paddings. + private: + TyIdx tyIdx{ 0 }; + TyIdx inferredTyIdx{ kInitTyIdx }; + MIRStorageClass storageClass{ kScInvalid }; + MIRSymKind sKind{ kStInvalid }; + bool isTmp = false; + bool needForwDecl = false; // addrof of this symbol used in initialization, NOT serialized + bool wpoFakeParam = false; // fake symbol introduced in wpo phase for a parameter, NOT serialized + bool wpoFakeRet = false; // fake symbol introduced in wpo phase for return value, NOT serialized + bool isDeleted = false; // tell if it is deleted, NOT serialized + bool instrumented = false; // a local ref pointer instrumented by RC opt, NOT serialized + bool isImported = false; + bool isImportedDecl = false; + bool isTmpUnused = false; // when parse the mplt_inline file, mark all the new symbol as tmpunused + bool appearsInCode = false; // only used for kStFunc + bool hasPotentialAssignment = false; // for global static vars, init as false and will be set true + // if assigned by stmt or the address of itself is taken + StIdx stIdx { 0, 0 }; + TypeAttrs typeAttrs; + GStrIdx nameStrIdx{ 0 }; + std::pair weakrefAttr { false, 0 }; + SymbolType value = { nullptr }; + SrcPosition srcPosition; // where the symbol is defined + // following cannot be assumed final even though they are declared final + static const std::set staticFinalBlackList; + static GStrIdx reflectClassNameIdx; + static GStrIdx reflectMethodNameIdx; + static GStrIdx reflectFieldNameIdx; + static uint32 lastPrintedLineNum; // used during printing ascii output + static uint16 lastPrintedColumnNum; +}; + +class MIRSymbolTable { + public: + explicit MIRSymbolTable(const MapleAllocator &allocator) + : mAllocator(allocator), + strIdxToStIdxMap(mAllocator.Adapter()), + symbolTable({ nullptr }, mAllocator.Adapter()) {} + + ~MIRSymbolTable() = default; + + bool IsValidIdx(uint32 idx) const { + return idx < symbolTable.size(); + } + + MIRSymbol *GetSymbolFromStIdx(uint32 idx, bool checkFirst = false) const { + if (checkFirst && idx >= symbolTable.size()) { + return nullptr; + } + CHECK_FATAL(IsValidIdx(idx), "symbol table index out of range"); + return symbolTable[idx]; + } + + MIRSymbol *CreateSymbol(uint8 scopeID) { + auto *st = mAllocator.GetMemPool()->New(symbolTable.size(), scopeID); + symbolTable.push_back(st); + return st; + } + + void PushNullSymbol() { + symbolTable.push_back(nullptr); + } + + // add sym from other symbol table, happens in inline + bool AddStOutside(MIRSymbol *sym) { + if (sym == nullptr) { + return false; + } + sym->SetStIdx(StIdx(sym->GetScopeIdx(), symbolTable.size())); + symbolTable.push_back(sym); + return AddToStringSymbolMap(*sym); + } + + bool AddToStringSymbolMap(const MIRSymbol &st) { + GStrIdx strIdx = st.GetNameStrIdx(); + if (strIdxToStIdxMap[strIdx].FullIdx() != 0) { + return false; + } + strIdxToStIdxMap[strIdx] = st.GetStIdx(); + return true; + } + + StIdx GetStIdxFromStrIdx(GStrIdx idx) const { + auto it = strIdxToStIdxMap.find(idx); + return (it == strIdxToStIdxMap.end()) ? StIdx() : it->second; + } + + MIRSymbol *GetSymbolFromStrIdx(const GStrIdx &idx, bool checkFirst = false) const { + return GetSymbolFromStIdx(GetStIdxFromStrIdx(idx).Idx(), checkFirst); + } + + void Dump(bool isLocal, int32 indent = 0, bool printDeleted = false, + MIRFlavor flavor = kFlavorUnknown) const; + size_t GetSymbolTableSize() const { + return symbolTable.size(); + } + + void Clear() { + symbolTable.clear(); + strIdxToStIdxMap.clear(); + } + + MIRSymbol *CloneLocalSymbol(const MIRSymbol &oldSym) const { + auto *memPool = mAllocator.GetMemPool(); + auto *newSym = memPool->New(oldSym); + if (oldSym.GetSKind() == kStConst) { + newSym->SetKonst(oldSym.GetKonst()->Clone(*memPool)); + } else if (oldSym.GetSKind() == kStPreg) { + newSym->SetPreg(memPool->New(*oldSym.GetPreg())); + } else if (oldSym.GetSKind() == kStFunc) { + CHECK_FATAL(false, "%s has unexpected local func symbol", oldSym.GetName().c_str()); + } + return newSym; + } + + private: + MapleAllocator mAllocator; + // hash table mapping string index to st index + MapleMap strIdxToStIdxMap; + // map symbol idx to symbol node + MapleVector symbolTable; +}; + +class MIRLabelTable { + public: + explicit MIRLabelTable(MapleAllocator &allocator) + : addrTakenLabels(allocator.Adapter()), + caseLabelSet(allocator.Adapter()), + mAllocator(allocator), + strIdxToLabIdxMap(std::less(), mAllocator.Adapter()), + labelTable(mAllocator.Adapter()) { + labelTable.push_back(GStrIdx(kDummyLabel)); // push dummy label index 0 + } + + ~MIRLabelTable() = default; + + LabelIdx CreateLabel() { + LabelIdx labelIdx = labelTable.size(); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(std::to_string(labelIdx)); + labelTable.push_back(strIdx); + return labelIdx; + } + + LabelIdx CreateLabelWithPrefix(char c); + + LabelIdx AddLabel(GStrIdx nameIdx) { + LabelIdx labelIdx = labelTable.size(); + labelTable.push_back(nameIdx); + strIdxToLabIdxMap[nameIdx] = labelIdx; + return labelIdx; + } + + LabelIdx GetLabelIdxFromStrIdx(GStrIdx idx) const { + auto it = strIdxToLabIdxMap.find(idx); + if (it == strIdxToLabIdxMap.end()) { + return LabelIdx(); + } + return it->second; + } + + void AddToStringLabelMap(LabelIdx labelIdx); + size_t GetLabelTableSize() const { + return labelTable.size(); + } + + const std::string &GetName(LabelIdx labelIdx) const; + + size_t Size() const { + return labelTable.size(); + } + + static uint32 GetDummyLabel() { + return kDummyLabel; + } + + GStrIdx GetSymbolFromStIdx(LabelIdx idx) const { + CHECK_FATAL(idx < labelTable.size(), "label table index out of range"); + return labelTable[idx]; + } + + void SetSymbolFromStIdx(LabelIdx idx, GStrIdx strIdx) { + CHECK_FATAL(idx < labelTable.size(), "label table index out of range"); + labelTable[idx] = strIdx; + } + + MapleVector GetLabelTable() { + return labelTable; + } + + const MapleUnorderedSet &GetAddrTakenLabels() const { + return addrTakenLabels; + } + + MapleUnorderedSet &GetAddrTakenLabels() { + return addrTakenLabels; + } + + const MapleMap &GetStrIdxToLabelIdxMap() const { + return strIdxToLabIdxMap; + } + void EraseStrIdxToLabelIdxElem(GStrIdx idx) { + strIdxToLabIdxMap.erase(idx); + } + + MapleUnorderedSet addrTakenLabels; // those appeared in addroflabel or MIRLblConst + MapleUnorderedSet caseLabelSet; // labels marking starts of switch cases + + private: + static constexpr uint32 kDummyLabel = 0; + MapleAllocator mAllocator; + MapleMap strIdxToLabIdxMap; + MapleVector labelTable; // map label idx to label name +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_MIR_SYMBOL_H diff --git a/src/mapleall/maple_ir/include/mir_symbol_builder.h b/src/mapleall/maple_ir/include/mir_symbol_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..866d2e2e3090f581b8d0bb3a2e714d3613b083ef --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_symbol_builder.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIR_INCLUDE_MIRSYMBOLBUILDER_H +#define MAPLEIR_INCLUDE_MIRSYMBOLBUILDER_H +#include +#include +#include +#include +#include "opcodes.h" +#include "prim_types.h" +#include "mir_type.h" +#include "mir_const.h" +#include "mir_symbol.h" +#include "mir_nodes.h" +#include "mir_module.h" +#include "mir_preg.h" +#include "mir_function.h" +#include "printing.h" +#include "intrinsic_op.h" +#include "opcode_info.h" +#include "global_tables.h" + +namespace maple { +class MIRSymbolBuilder { + public: + static MIRSymbolBuilder &Instance() { + static MIRSymbolBuilder builder; + return builder; + } + + MIRSymbol *GetLocalDecl(const MIRSymbolTable &symbolTable, const GStrIdx &strIdx) const; + MIRSymbol *CreateLocalDecl(MIRSymbolTable &symbolTable, GStrIdx strIdx, const MIRType &type) const; + MIRSymbol *GetGlobalDecl(GStrIdx strIdx) const; + MIRSymbol *CreateGlobalDecl(GStrIdx strIdx, const MIRType &type, MIRStorageClass sc) const; + MIRSymbol *GetSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + bool sameType = false) const; + MIRSymbol *CreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID) const; + MIRSymbol *CreatePregFormalSymbol(TyIdx tyIdx, PregIdx pRegIdx, MIRFunction &func) const; + size_t GetSymbolTableSize(const MIRFunction *func = nullptr) const; + const MIRSymbol *GetSymbolFromStIdx(uint32 idx, const MIRFunction *func = nullptr) const; + + private: + MIRSymbolBuilder() = default; + ~MIRSymbolBuilder() = default; + MIRSymbolBuilder(const MIRSymbolBuilder&) = delete; + MIRSymbolBuilder(const MIRSymbolBuilder&&) = delete; + MIRSymbolBuilder &operator=(const MIRSymbolBuilder&) = delete; + MIRSymbolBuilder &operator=(const MIRSymbolBuilder&&) = delete; +}; +} // maple +#endif // MAPLEIR_INCLUDE_MIRSYMBOLBUILDER_H diff --git a/src/mapleall/maple_ir/include/mir_type.h b/src/mapleall/maple_ir/include/mir_type.h new file mode 100644 index 0000000000000000000000000000000000000000..048d56b4c2a18cc5d6fc7b917adcc4a216f2c883 --- /dev/null +++ b/src/mapleall/maple_ir/include/mir_type.h @@ -0,0 +1,2184 @@ +/* + * Copyright (c) [2019-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_MIR_TYPE_H +#define MAPLE_IR_INCLUDE_MIR_TYPE_H +#include +#include +#include "prim_types.h" +#include "mir_pragma.h" +#include "mpl_logging.h" +#if MIR_FEATURE_FULL +#include "mempool.h" +#include "mempool_allocator.h" +#endif // MIR_FEATURE_FULL + +namespace maple { +constexpr uint32 kTypeHashLength = 12289; // hash length for mirtype, ref: planetmath.org/goodhashtableprimes +const std::string kRenameKeyWord = "_MNO"; // A static symbol name will be renamed as oriname_MNOxxx. + +class FieldAttrs; // circular dependency exists, no other choice +class MIRAlias; +using TyIdxFieldAttrPair = std::pair; +using FieldPair = std::pair; +using FieldVector = std::vector; +using MIRTypePtr = MIRType*; + +constexpr size_t kMaxArrayDim = 20; +const std::string kJstrTypeName = "constStr"; +constexpr uint32 kInvalidFieldNum = UINT32_MAX; +constexpr size_t kInvalidSize = UINT64_MAX; +#if MIR_FEATURE_FULL +extern bool VerifyPrimType(PrimType primType1, PrimType primType2); // verify if primType1 and primType2 match +extern PrimType GetExactPtrPrimType(); // return either PTY_a64 or PTY_a32 +extern uint32 GetPrimTypeSize(PrimType primType); // answer in bytes; 0 if unknown +extern uint32 GetPrimTypeP2Size(PrimType primType); // answer in bytes in power-of-two. +extern PrimType GetSignedPrimType(PrimType pty); // return signed version +extern PrimType GetUnsignedPrimType(PrimType pty); // return unsigned version +extern uint32 GetVecEleSize(PrimType primType); // element size of each lane in vector +extern uint32 GetVecLanes(PrimType primType); // lane size if vector +extern const char *GetPrimTypeName(PrimType primType); +extern const char *GetPrimTypeJavaName(PrimType primType); +extern int64 MinValOfSignedInteger(PrimType primType); +extern PrimType GetVecElemPrimType(PrimType primType); +constexpr uint32 k0BitSize = 0; +constexpr uint32 k1BitSize = 1; +constexpr uint32 k2BitSize = 2; +constexpr uint32 k3BitSize = 3; +constexpr uint32 k4BitSize = 4; +constexpr uint32 k5BitSize = 5; +constexpr uint32 k8BitSize = 8; +constexpr uint32 k9BitSize = 9; +constexpr uint32 k10BitSize = 10; +constexpr uint32 k16BitSize = 16; +constexpr uint32 k32BitSize = 32; +constexpr uint32 k64BitSize = 64; + +inline const std::string kDbgLong = "long."; +inline const std::string kDbgULong = "Ulong."; +inline const std::string kDbgLongDouble = "LongDouble."; + +inline uint32 GetPrimTypeBitSize(PrimType primType) { + // 1 byte = 8 bits = 2^3 bits + return GetPrimTypeSize(primType) << 3; +} + +inline uint32 GetAlignedPrimTypeBitSize(PrimType primType) { + auto size = GetPrimTypeBitSize(primType); + return size <= k32BitSize ? k32BitSize : k64BitSize; +} + +inline uint32 GetPrimTypeActualBitSize(PrimType primType) { + // GetPrimTypeSize(PTY_u1) will return 1, so we take it as a special case + if (primType == PTY_u1) { + return 1; + } + // 1 byte = 8 bits = 2^3 bits + return GetPrimTypeSize(primType) << 3; +} + +#endif // MIR_FEATURE_FULL +// return the same type with size increased to register size +PrimType GetRegPrimType(PrimType primType); +PrimType GetDynType(PrimType primType); +PrimType GetReg64PrimType(PrimType primType); +PrimType GetNonDynType(PrimType primType); +PrimType GetIntegerPrimTypeBySizeAndSign(size_t sizeBit, bool isSign); + +inline bool IsAddress(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsAddress(); +} + +inline bool IsPossible64BitAddress(PrimType tp) { + return (tp == PTY_ptr || tp == PTY_ref || tp == PTY_u64 || tp == PTY_a64); +} + +inline bool IsPossible32BitAddress(PrimType tp) { + return (tp == PTY_ptr || tp == PTY_ref || tp == PTY_u32 || tp == PTY_a32); +} + +inline bool MustBeAddress(PrimType tp) { + return (tp == PTY_ptr || tp == PTY_ref || tp == PTY_a64 || tp == PTY_a32); +} + +inline bool IsPrimitivePureScalar(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsInteger() && !primitiveType.IsAddress() && + !primitiveType.IsDynamic() && !primitiveType.IsVector(); +} + +inline bool IsPrimitiveUnsigned(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsUnsigned(); +} + +inline bool IsUnsignedInteger(PrimType type) { + PrimitiveType primitiveType(type); + return IsPrimitiveUnsigned(type) && primitiveType.IsInteger() && !primitiveType.IsDynamic(); +} + +inline bool IsSignedInteger(PrimType type) { + PrimitiveType primitiveType(type); + return !primitiveType.IsUnsigned() && primitiveType.IsInteger() && !primitiveType.IsDynamic(); +} + +inline bool IsPrimitiveInteger(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsInteger() && !primitiveType.IsDynamic() && !primitiveType.IsVector(); +} + +inline bool IsPrimitiveDynType(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsDynamic(); +} + +inline bool IsPrimitiveDynInteger(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsDynamic() && primitiveType.IsInteger(); +} + +inline bool IsPrimitiveDynFloat(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsDynamic() && primitiveType.IsFloat(); +} + +inline bool IsPrimitiveFloat(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsFloat() && !primitiveType.IsDynamic() && !primitiveType.IsVector(); +} + +inline bool IsPrimitiveScalar(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsInteger() || primitiveType.IsFloat() || + (primitiveType.IsDynamic() && !primitiveType.IsDynamicNone()) || + primitiveType.IsSimple(); +} + +inline bool IsPrimitiveValid(PrimType type) { + PrimitiveType primitiveType(type); + return IsPrimitiveScalar(type) && !primitiveType.IsDynamicAny(); +} + +inline bool IsPrimitivePoint(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsPointer(); +} + +inline bool IsPrimitiveVector(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsVector(); +} + +inline bool IsPrimitiveVectorFloat(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsVector() && primitiveType.IsFloat(); +} + +inline bool IsPrimitiveVectorInteger(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsVector() && primitiveType.IsInteger(); +} + +inline bool IsPrimitiveUnSignedVector(PrimType type) { + PrimitiveType primitiveType(type); + return primitiveType.IsUnsigned() && primitiveType.IsVector(); +} + +bool IsNoCvtNeeded(PrimType toType, PrimType fromType); +bool NeedCvtOrRetype(PrimType origin, PrimType compared); + +uint8 GetPointerSize(); +uint8 GetP2Size(); +PrimType GetLoweredPtrType(); + +inline bool IsRefOrPtrAssign(PrimType toType, PrimType fromType) { + return (toType == PTY_ref && fromType == PTY_ptr) || (toType == PTY_ptr && fromType == PTY_ref); +} + +enum MIRTypeKind : std::uint8_t { + kTypeInvalid, + kTypeUnknown, + kTypeScalar, + kTypeBitField, + kTypeArray, + kTypeFArray, + kTypeJArray, + kTypeStruct, + kTypeUnion, + kTypeClass, + kTypeInterface, + kTypeStructIncomplete, + kTypeClassIncomplete, + kTypeConstString, + kTypeInterfaceIncomplete, + kTypePointer, + kTypeFunction, + kTypeVoid, + kTypeByName, // type definition not yet seen + kTypeParam, // to support java generics + kTypeInstantVector, // represent a vector of instantiation pairs + kTypeGenericInstant, // type to be formed by instantiation of a generic type +}; + +enum AttrKind : unsigned { +#define TYPE_ATTR +#define ATTR(STR) ATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef TYPE_ATTR +}; + +class AttrBoundary { + public: + AttrBoundary() = default; + ~AttrBoundary() = default; + + bool operator==(const AttrBoundary &tA) const { + return lenExprHash == tA.lenExprHash && lenParamIdx == tA.lenParamIdx && isBytedLen == tA.isBytedLen; + } + + bool operator!=(const AttrBoundary &tA) const { + return !(*this == tA); + } + + bool operator<(const AttrBoundary &tA) const { + return lenExprHash < tA.lenExprHash && lenParamIdx < tA.lenParamIdx && + static_cast(isBytedLen) < static_cast(tA.isBytedLen); + } + + void SetLenExprHash(uint32 val) { + lenExprHash = val; + } + + uint32 GetLenExprHash() const { + return lenExprHash; + } + + void SetLenParamIdx(int8 idx) { + lenParamIdx = idx; + } + + int8 GetLenParamIdx() const { + return lenParamIdx; + } + + void SetIsBytedLen(bool flag) { + isBytedLen = flag; + } + + bool IsBytedLen() const { + return isBytedLen; + } + + void Clear() { + lenExprHash = 0; + lenParamIdx = -1; + isBytedLen = false; + } + + private: + bool isBytedLen = false; + int8 lenParamIdx = -1; + uint32 lenExprHash = 0; +}; + +class TypeAttrs { + public: + TypeAttrs() = default; + TypeAttrs(const TypeAttrs &ta) = default; + TypeAttrs &operator=(const TypeAttrs &t) = default; + ~TypeAttrs() = default; + + void SetAlignValue(uint8 align) { + attrAlign = align; + } + + uint8 GetAlignValue() const { + return attrAlign; + } + + void SetAttrFlag(uint64 flag) { + attrFlag = flag; + } + + uint64 GetAttrFlag() const { + return attrFlag; + } + + void SetAttr(AttrKind x) { + attrFlag |= (1ULL << static_cast(x)); + } + + void ResetAttr(AttrKind x) { + attrFlag &= ~(1ULL << static_cast(x)); + } + + bool GetAttr(AttrKind x) const { + return (attrFlag & (1ULL << static_cast(x))) != 0; + } + + void SetAlign(uint32 x) { + ASSERT((~(x - 1) & x) == x, "SetAlign called with non-power-of-2"); + attrAlign = 0; + while (x != 1) { + x >>= 1; + ++attrAlign; + } + } + + uint32 GetAlign() const { + if (attrAlign == 0) { + return 1; + } + uint32 res = 1; + uint32 exp = attrAlign; + do { + --exp; + res *= 2; + } while (exp != 0); + return res; + } + + bool operator==(const TypeAttrs &tA) const { + return attrFlag == tA.attrFlag && attrAlign == tA.attrAlign && attrBoundary == tA.attrBoundary; + } + + bool operator!=(const TypeAttrs &tA) const { + return !(*this == tA); + } + + void DumpAttributes() const; + + const AttrBoundary &GetAttrBoundary() const { + return attrBoundary; + } + + AttrBoundary &GetAttrBoundary() { + return attrBoundary; + } + + void AddAttrBoundary(const AttrBoundary &attr) { + if (attr.GetLenExprHash() != 0) { + attrBoundary.SetLenExprHash(attr.GetLenExprHash()); + } + if (attr.GetLenParamIdx() != -1) { + attrBoundary.SetLenParamIdx(attr.GetLenParamIdx()); + } + if (attr.IsBytedLen()) { + attrBoundary.SetIsBytedLen(attr.IsBytedLen()); + } + } + + void SetPack(uint32 pack) { + attrPack = pack; + } + + uint32 GetPack() const { + return attrPack; + } + + bool IsPacked() const { + return GetAttr(ATTR_pack); + } + + private: + uint64 attrFlag = 0; + uint8 attrAlign = 0; // alignment in bytes is 2 to the power of attrAlign + uint32 attrPack = -1; // -1 means inactive + AttrBoundary attrBoundary; // boundary attr for EnhanceC +}; + +enum FieldAttrKind { +#define FIELD_ATTR +#define ATTR(STR) FLDATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef FIELD_ATTR +}; + +class FieldAttrs { + public: + FieldAttrs() = default; + FieldAttrs(const FieldAttrs &ta) = default; + FieldAttrs &operator=(const FieldAttrs &p) = default; + ~FieldAttrs() = default; + + void SetAlignValue(uint8 align) { + attrAlign = align; + } + + uint8 GetAlignValue() const { + return attrAlign; + } + + void SetAttrFlag(uint32 flag) { + attrFlag = flag; + } + + uint32 GetAttrFlag() const { + return attrFlag; + } + + void SetAttr(FieldAttrKind x) { + attrFlag |= (1u << static_cast(x)); + } + + bool GetAttr(FieldAttrKind x) const { + return (attrFlag & (1u << static_cast(x))) != 0; + } + + void SetAlign(uint32 x) { + ASSERT((~(x - 1) & x) == x, "SetAlign called with non-power-of-2"); + attrAlign = 0; + while (x != 1) { + x >>= 1; + ++attrAlign; + } + } + + uint32 GetAlign() const { + return 1U << attrAlign; + } + + bool operator==(const FieldAttrs &tA) const { + return attrFlag == tA.attrFlag && attrAlign == tA.attrAlign && attrBoundary == tA.attrBoundary; + } + + bool operator!=(const FieldAttrs &tA) const { + return !(*this == tA); + } + + bool operator<(const FieldAttrs &tA) const { + return attrFlag < tA.attrFlag && attrAlign < tA.attrAlign && attrBoundary < tA.attrBoundary; + } + + void Clear() { + attrFlag = 0; + attrAlign = 0; + attrBoundary.Clear(); + } + + void DumpAttributes() const; + TypeAttrs ConvertToTypeAttrs() const; + + const AttrBoundary &GetAttrBoundary() const { + return attrBoundary; + } + + AttrBoundary &GetAttrBoundary() { + return attrBoundary; + } + + bool IsPacked() const { + return GetAttr(FLDATTR_pack); + } + + private: + uint8 attrAlign = 0; // alignment in bytes is 2 to the power of attrAlign + uint32 attrFlag = 0; + AttrBoundary attrBoundary; +}; + +enum StmtAttrKind : unsigned { +#define STMT_ATTR +#define ATTR(STR) STMTATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef STMT_ATTR +}; + +class StmtAttrs { + public: + StmtAttrs() = default; + StmtAttrs(const StmtAttrs &ta) = default; + StmtAttrs &operator=(const StmtAttrs &p) = default; + ~StmtAttrs() = default; + + void SetAttr(StmtAttrKind x) { + attrFlag |= (1u << static_cast(x)); + } + + bool GetAttr(StmtAttrKind x) const { + return (attrFlag & (1u << static_cast(x))) != 0; + } + + uint32 GetTargetAttrFlag(StmtAttrKind x) const { + return attrFlag & (1u << static_cast(x)); + } + + uint32 GetAttrFlag() const { + return attrFlag; + } + + void AppendAttr(uint32 flag) { + attrFlag |= flag; + } + + void Clear() { + attrFlag = 0; + } + + void DumpAttributes() const; + + private: + uint32 attrFlag = 0; +}; + +enum FuncAttrKind : unsigned { +#define FUNC_ATTR +#define ATTR(STR) FUNCATTR_##STR, +#include "all_attributes.def" +#undef ATTR +#undef FUNC_ATTR +}; + +class FuncAttrs { + public: + FuncAttrs() = default; + FuncAttrs(const FuncAttrs &ta) = default; + FuncAttrs &operator=(const FuncAttrs &p) = default; + ~FuncAttrs() = default; + + void SetAttr(FuncAttrKind x, bool unSet = false) { + if (!unSet) { + attrFlag |= (1ULL << x); + } else { + attrFlag &= ~(1ULL << x); + } + } + + void SetAliasFuncName(const std::string &name) { + aliasFuncName = name; + } + + const std::string &GetAliasFuncName() const { + return aliasFuncName; + } + + void SetPrefixSectionName(const std::string &name) { + prefixSectionName = name; + } + + const std::string &GetPrefixSectionName() const { + return prefixSectionName; + } + + void SetAttrFlag(uint64 flag) { + attrFlag = flag; + } + + uint64 GetAttrFlag() const { + return attrFlag; + } + + bool GetAttr(FuncAttrKind x) const { + return (attrFlag & (1ULL << x)) != 0; + } + + bool operator==(const FuncAttrs &tA) const { + return attrFlag == tA.attrFlag; + } + + bool operator!=(const FuncAttrs &tA) const { + return !(*this == tA); + } + + void DumpAttributes() const; + + const AttrBoundary &GetAttrBoundary() const { + return attrBoundary; + } + + AttrBoundary &GetAttrBoundary() { + return attrBoundary; + } + + void SetConstructorPriority(int priority) { + constructorPriority = priority; + } + + int GetConstructorPriority() const { + return constructorPriority; + } + + void SetDestructorPriority(int priority) { + destructorPriority = priority; + } + + int GetDestructorPriority() const { + return destructorPriority; + } + + private: + uint64 attrFlag = 0; + std::string aliasFuncName; + std::string prefixSectionName; + AttrBoundary attrBoundary; // ret boundary for EnhanceC + int constructorPriority = -1; // 0~65535, -1 means inactive + int destructorPriority = -1; // 0~65535, -1 means inactive +}; + +#if MIR_FEATURE_FULL +constexpr size_t kShiftNumOfTypeKind = 8; +constexpr size_t kShiftNumOfNameStrIdx = 6; +constexpr int32 kOffsetUnknown = INT_MAX; +constexpr int32 kOffsetMax = (INT_MAX - 1); +constexpr int32 kOffsetMin = INT_MIN; +struct OffsetType { + explicit OffsetType(int64 offset) { + Set(offset); + } + + OffsetType(const OffsetType &other) : val(other.val) {} + + ~OffsetType() = default; + + void Set(int64 offsetVal) { + val = (offsetVal >= kOffsetMin && offsetVal <= kOffsetMax) ? static_cast(offsetVal) + : kOffsetUnknown; + } + + bool IsInvalid() const { + return val == kOffsetUnknown; + } + + OffsetType &operator=(const OffsetType &other) { + val = other.val; + return *this; + } + + OffsetType operator+(int64 offset) const { + if (this->IsInvalid() || OffsetType(offset).IsInvalid()) { + return InvalidOffset(); + } + return OffsetType(val + offset); + } + + OffsetType operator+(OffsetType other) const { + return other + val; + } + + void operator+=(int64 offset) { + if (this->IsInvalid() || OffsetType(offset).IsInvalid()) { + val = kOffsetUnknown; + return; + } + Set(offset + val); + } + + void operator+=(OffsetType other) { + this->operator+=(other.val); + } + + OffsetType operator-() const { + if (this->IsInvalid()) { + return *this; + } + return OffsetType(-val); + } + + bool operator<(OffsetType other) const { + return val < other.val; + } + + bool operator==(OffsetType other) const { + return val == other.val; + } + + bool operator!=(OffsetType other) const { + return val != other.val; + } + + static OffsetType InvalidOffset() { + return OffsetType(kOffsetUnknown); + } + + int32 val = kOffsetUnknown; +}; + +class MIRStructType; // circular dependency exists, no other choice +class MIRFuncType; + +class MIRType { + public: + MIRType(MIRTypeKind kind, PrimType pType) : typeKind(kind), primType(pType) {} + + MIRType(MIRTypeKind kind, PrimType pType, GStrIdx strIdx) + : typeKind(kind), primType(pType), nameStrIdx(strIdx) {} + + virtual ~MIRType() = default; + + virtual void Dump(int indent, bool dontUseName = false) const; + virtual void DumpAsCxx(int indent) const; + virtual bool EqualTo(const MIRType &mirType) const; + virtual bool IsStructType() const { + return false; + } + + virtual MIRType *CopyMIRTypeNode() const { + return new MIRType(*this); + } + + PrimType GetPrimType() const { + return primType; + } + void SetPrimType(const PrimType pt) { + primType = pt; + } + + TyIdx GetTypeIndex() const { + return tyIdx; + } + void SetTypeIndex(TyIdx idx) { + tyIdx = idx; + } + + MIRTypeKind GetKind() const { + return typeKind; + } + void SetMIRTypeKind(MIRTypeKind kind) { + typeKind = kind; + } + + bool IsNameIsLocal() const { + return nameIsLocal; + } + void SetNameIsLocal(bool flag) { + nameIsLocal = flag; + } + + GStrIdx GetNameStrIdx() const { + return nameStrIdx; + } + void SetNameStrIdx(GStrIdx strIdx) { + nameStrIdx = strIdx; + } + void SetNameStrIdxItem(uint32 idx) { + nameStrIdx.reset(idx); + } + + virtual size_t GetSize() const { + return GetPrimTypeSize(primType); + } + + virtual uint32 GetAlign() const { + return GetPrimTypeSize(primType); + } + + virtual bool HasVolatileField() const { + return false; + } + + virtual bool HasTypeParam() const { + return false; + } + + virtual bool IsIncomplete() const { + return typeKind == kTypeStructIncomplete || typeKind == kTypeClassIncomplete || + typeKind == kTypeInterfaceIncomplete; + } + + bool IsVolatile(int fieldID) const; + + bool IsMIRPtrType() const { + return typeKind == kTypePointer; + } + + bool IsMIRStructType() const { + return (typeKind == kTypeStruct) || (typeKind == kTypeStructIncomplete); + } + + bool IsMIRIncompleteStructType() const { + return typeKind == kTypeStructIncomplete; + } + + bool IsMIRUnionType() const { + return typeKind == kTypeUnion; + } + + bool IsMIRClassType() const { + return (typeKind == kTypeClass) || (typeKind == kTypeClassIncomplete); + } + + bool IsMIRInterfaceType() const { + return (typeKind == kTypeInterface) || (typeKind == kTypeInterfaceIncomplete); + } + + bool IsInstanceOfMIRStructType() const { + return IsMIRStructType() || IsMIRClassType() || IsMIRInterfaceType(); + } + + bool IsMIRJarrayType() const { + return typeKind == kTypeJArray; + } + + bool IsMIRArrayType() const { + return typeKind == kTypeArray; + } + + bool IsMIRFuncType() const { + return typeKind == kTypeFunction; + } + + bool IsScalarType() const { + return typeKind == kTypeScalar; + } + + bool IsMIRTypeByName() const { + return typeKind == kTypeByName; + } + + bool IsMIRBitFieldType() const { + return typeKind == kTypeBitField; + } + + virtual bool IsUnsafeType() const { + return false; + } + virtual bool IsVoidPointer() const { + return false; + } + + bool ValidateClassOrInterface(const std::string &className, bool noWarning) const; + bool IsOfSameType(MIRType &type); + const std::string &GetName() const; + virtual std::string GetMplTypeName() const; + virtual std::string GetCompactMplTypeName() const; + virtual bool PointsToConstString() const; + virtual size_t GetHashIndex() const { + constexpr uint8 idxShift = 2; + return ((static_cast(primType) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + + virtual bool HasFields() const { return false; } + // total number of field IDs the type is consisted of, excluding its own field ID + virtual uint32 NumberOfFieldIDs() const { return 0; } + // return any struct type directly embedded in this type + virtual MIRStructType *EmbeddedStructType() { return nullptr; } + + virtual int64 GetBitOffsetFromBaseAddr(FieldID fieldID) const { + (void)fieldID; + return 0; + } + + protected: + MIRTypeKind typeKind; + PrimType primType; + bool nameIsLocal = false; // needed when printing the type name + TyIdx tyIdx{ 0 }; + GStrIdx nameStrIdx{ 0 }; // name in global string table +}; + +class MIRPtrType : public MIRType { + public: + explicit MIRPtrType(TyIdx pTyIdx) : MIRType(kTypePointer, PTY_ptr), pointedTyIdx(pTyIdx) {} + + MIRPtrType(TyIdx pTyIdx, PrimType pty) : MIRType(kTypePointer, pty), pointedTyIdx(pTyIdx) {} + + MIRPtrType(PrimType primType, GStrIdx strIdx) : MIRType(kTypePointer, primType, strIdx), pointedTyIdx(0) {} + + ~MIRPtrType() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRPtrType(*this); + } + + MIRType *GetPointedType() const; + + TyIdx GetPointedTyIdx() const { + return pointedTyIdx; + } + void SetPointedTyIdx(TyIdx idx) { + pointedTyIdx = idx; + } + + TypeAttrs &GetTypeAttrs() { + return typeAttrs; + } + + const TypeAttrs &GetTypeAttrs() const { + return typeAttrs; + } + + void SetTypeAttrs(const TypeAttrs &attrs) { + typeAttrs = attrs; + } + + bool EqualTo(const MIRType &type) const override; + + bool HasTypeParam() const override; + bool IsPointedTypeVolatile(int fieldID) const; + bool IsUnsafeType() const override; + bool IsVoidPointer() const override; + + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override; + uint32 GetAlign() const override; + TyIdxFieldAttrPair GetPointedTyIdxFldAttrPairWithFieldID(FieldID fldId) const; + TyIdx GetPointedTyIdxWithFieldID(FieldID fieldID) const; + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 4; + constexpr uint8 attrShift = 3; + size_t hIdx = (static_cast(pointedTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + hIdx += (typeAttrs.GetAttrFlag() << attrShift) + typeAttrs.GetAlignValue(); + return hIdx % kTypeHashLength; + } + bool IsFunctionPtr() const { + MIRType *pointedType = GetPointedType(); + if (pointedType->GetKind() == kTypeFunction) { + return true; + } + if (pointedType->GetKind() == kTypePointer) { + MIRPtrType *pointedPtrType = static_cast(pointedType); + return pointedPtrType->GetPointedType()->GetKind() == kTypeFunction; + } + return false; + } + + MIRFuncType *GetPointedFuncType() const; + + bool PointsToConstString() const override; + + std::string GetMplTypeName() const override; + + std::string GetCompactMplTypeName() const override; + private: + TyIdx pointedTyIdx; + TypeAttrs typeAttrs; +}; + +class MIRArrayType : public MIRType { + public: + MIRArrayType() : MIRType(kTypeArray, PTY_agg) {} + explicit MIRArrayType(GStrIdx strIdx) : MIRType(kTypeArray, PTY_agg, strIdx) {} + + MIRArrayType(TyIdx eTyIdx, const std::vector &sizeArray) + : MIRType(kTypeArray, PTY_agg), + eTyIdx(eTyIdx), + dim(sizeArray.size()) { + for (size_t i = 0; i < kMaxArrayDim; ++i) { + this->sizeArray[i] = (i < dim) ? sizeArray[i] : 0; + } + } + + MIRArrayType(const MIRArrayType &pat) = default; + MIRArrayType &operator=(const MIRArrayType &p) = default; + ~MIRArrayType() override = default; + + TyIdx GetElemTyIdx() const { + return eTyIdx; + } + void SetElemTyIdx(TyIdx idx) { + eTyIdx = idx; + } + + uint32 GetSizeArrayItem(uint32 n) const { + CHECK_FATAL((n >= 0 && n < kMaxArrayDim), "out of bound of array!"); + return sizeArray[n]; + } + void SetSizeArrayItem(uint32 idx, uint32 value) { + CHECK_FATAL((idx >= 0 && idx < kMaxArrayDim), "out of bound of array!"); + sizeArray[idx] = value; + } + + bool IsIncompleteArray() const { + return typeAttrs.GetAttr(ATTR_incomplete_array); + } + + bool EqualTo(const MIRType &type) const override; + + uint16 GetDim() const { + return dim; + } + void SetDim(uint16 newDim) { + this->dim = newDim; + } + + const TypeAttrs &GetTypeAttrs() const { + return typeAttrs; + } + + TypeAttrs &GetTypeAttrs() { + return typeAttrs; + } + + void SetTypeAttrs(const TypeAttrs &attrs) { + typeAttrs = attrs; + } + + MIRType *GetElemType() const; + + MIRType *CopyMIRTypeNode() const override { + return new MIRArrayType(*this); + } + + bool HasTypeParam() const override { + return GetElemType()->HasTypeParam(); + } + + void Dump(int indent, bool dontUseName) const override; + + size_t GetSize() const override; + uint32 GetAlign() const override; + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 2; + size_t hIdx = (static_cast(eTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + for (size_t i = 0; i < dim; ++i) { + CHECK_FATAL(i < kMaxArrayDim, "array index out of range"); + hIdx += (sizeArray[i] << i); + } + constexpr uint8 attrShift = 3; + hIdx += (typeAttrs.GetAttrFlag() << attrShift) + typeAttrs.GetAlignValue(); + return hIdx % kTypeHashLength; + } + + int64 GetBitOffsetFromBaseAddr(FieldID fieldID) const override { + (void)fieldID; + return kOffsetUnknown; + } + int64 GetBitOffsetFromArrayAddress(std::vector &indexArray); + + std::string GetMplTypeName() const override; + std::string GetCompactMplTypeName() const override; + bool HasFields() const override; + uint32 NumberOfFieldIDs() const override; + MIRStructType *EmbeddedStructType() override; + size_t ElemNumber(); + + private: + TyIdx eTyIdx{ 0 }; + uint16 dim = 0; + std::array sizeArray{ {0} }; + TypeAttrs typeAttrs; + mutable uint32 fieldsNum = kInvalidFieldNum; + mutable size_t size = kInvalidSize; +}; + +// flexible array type, must be last field of a top-level struct +class MIRFarrayType : public MIRType { + public: + MIRFarrayType() : MIRType(kTypeFArray, PTY_agg), elemTyIdx(TyIdx(0)) {} + + explicit MIRFarrayType(TyIdx elemTyIdx) : MIRType(kTypeFArray, PTY_agg), elemTyIdx(elemTyIdx) {} + + explicit MIRFarrayType(GStrIdx strIdx) : MIRType(kTypeFArray, PTY_agg, strIdx), elemTyIdx(TyIdx(0)) {} + + ~MIRFarrayType() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRFarrayType(*this); + }; + + MIRType *GetElemType() const; + + bool HasTypeParam() const override { + return GetElemType()->HasTypeParam(); + } + + TyIdx GetElemTyIdx() const { + return elemTyIdx; + } + void SetElemtTyIdx(TyIdx idx) { + elemTyIdx = idx; + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 5; + return ((static_cast(elemTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + + std::string GetMplTypeName() const override; + std::string GetCompactMplTypeName() const override; + + bool HasFields() const override; + uint32 NumberOfFieldIDs() const override; + MIRStructType *EmbeddedStructType() override; + + int64 GetBitOffsetFromBaseAddr(FieldID fieldID) const override { + (void)fieldID; + return kOffsetUnknown; + } + + int64 GetBitOffsetFromArrayAddress(int64 arrayIndex) const; + + private: + TyIdx elemTyIdx; + mutable uint32 fieldsNum = kInvalidFieldNum; +}; + +using TyidxFuncAttrPair = std::pair; +using MethodPair = std::pair; +using MethodVector = std::vector; +using MethodPtrVector = std::vector; +using MIREncodedArray = std::vector; +class GenericDeclare; +class AnnotationType; +class GenericType; +// used by kTypeStruct, kTypeStructIncomplete, kTypeUnion +class MIRStructType : public MIRType { + public: + explicit MIRStructType(MIRTypeKind typeKind) : MIRType(typeKind, PTY_agg) {} + + MIRStructType(MIRTypeKind typeKind, GStrIdx strIdx) : MIRType(typeKind, PTY_agg, strIdx) {} + + ~MIRStructType() override = default; + + bool IsStructType() const override { + return true; + } + + FieldVector &GetFields() { + return fields; + } + const FieldVector &GetFields() const { + return fields; + } + void SetFields(const FieldVector &newFields) { + this->fields = newFields; + } + + const FieldPair &GetFieldsElemt(size_t n) const { + ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n); + } + + FieldPair &GetFieldsElemt(size_t n) { + ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n); + } + + size_t GetFieldsSize() const { + return fields.size(); + } + + const std::vector &GetFieldInferredTyIdx() const { + return fieldInferredTyIdx; + } + + FieldVector &GetStaticFields() { + return staticFields; + } + const FieldVector &GetStaticFields() const { + return staticFields; + } + + const FieldPair &GetStaticFieldsPair(size_t i) const { + return staticFields.at(i); + } + + GStrIdx GetStaticFieldsGStrIdx(size_t i) const { + return staticFields.at(i).first; + } + + FieldVector &GetParentFields() { + return parentFields; + } + void SetParentFields(const FieldVector &newParentFields) { + this->parentFields = newParentFields; + } + const FieldVector &GetParentFields() const { + return parentFields; + } + const FieldPair &GetParentFieldsElemt(size_t n) const { + ASSERT(n < parentFields.size(), "array index out of range"); + return parentFields.at(n); + } + size_t GetParentFieldsSize() const { + return parentFields.size(); + } + + MethodVector &GetMethods() { + return methods; + } + const MethodVector &GetMethods() const { + return methods; + } + + const MethodPair &GetMethodsElement(size_t n) const { + ASSERT(n < methods.size(), "array index out of range"); + return methods.at(n); + } + + MethodPtrVector &GetVTableMethods() { + return vTableMethods; + } + + const MethodPair *GetVTableMethodsElemt(size_t n) const { + ASSERT(n < vTableMethods.size(), "array index out of range"); + return vTableMethods.at(n); + } + + size_t GetVTableMethodsSize() const { + return vTableMethods.size(); + } + + const MethodPtrVector &GetItableMethods() const { + return iTableMethods; + } + + bool IsImported() const { + return isImported; + } + + void SetIsImported(bool flag) { + isImported = flag; + } + + bool IsUsed() const { + return isUsed; + } + + void SetIsUsed(bool flag) { + isUsed = flag; + } + + bool IsCPlusPlus() const { + return isCPlusPlus; + } + + void SetIsCPlusPlus(bool flag) { + isCPlusPlus = flag; + } + + GStrIdx GetFieldGStrIdx(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.first; + } + + const TyIdxFieldAttrPair GetFieldTyIdxAttrPair(FieldID id) const { + return TraverseToField(id).second; + } + + TyIdxFieldAttrPair GetTyidxFieldAttrPair(size_t n) const { + return fields.at(n).second; + } + + TyIdx GetFieldTyIdx(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.first; + } + + FieldAttrs GetFieldAttrs(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second; + } + + FieldAttrs GetFieldAttrs(GStrIdx fieldStrIdx) const { + const FieldPair &fieldPair = TraverseToField(fieldStrIdx); + return fieldPair.second.second; + } + + bool IsFieldVolatile(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_volatile); + } + + bool IsFieldFinal(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_final); + } + + bool IsFieldRCUnownedRef(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_rcunowned); + } + + bool IsFieldRCWeak(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_rcweak); + } + + bool IsFieldRestrict(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return fieldPair.second.second.GetAttr(FLDATTR_restrict); + } + + bool IsOwnField(FieldID id) const { + const FieldPair &fieldPair = TraverseToField(id); + return std::find(fields.begin(), fields.end(), fieldPair) != fields.end(); + } + + TypeAttrs &GetTypeAttrs() { + return typeAttrs; + } + + const TypeAttrs &GetTypeAttrs() const { + return typeAttrs; + } + + void SetTypeAttrs(const TypeAttrs &attrs) { + typeAttrs = attrs; + } + + bool HasVolatileField() const override; + bool HasTypeParam() const override; + bool EqualTo(const MIRType &type) const override; + MIRType *CopyMIRTypeNode() const override { + return new MIRStructType(*this); + } + + TyIdx GetElemTyIdx(size_t n) const { + ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n).second.first; + } + + void SetElemtTyIdxSimple(size_t n, TyIdx tyIdx) { + ASSERT(n < fields.size(), "array index out of range"); + fields.at(n).second.first = tyIdx; + } + + TyIdx GetStaticElemtTyIdx(size_t n) const { + ASSERT(n < staticFields.size(), "array index out of range"); + return staticFields.at(n).second.first; + } + + void SetStaticElemtTyIdx(size_t n, TyIdx tyIdx) { + staticFields.at(n).second.first = tyIdx; + } + + void SetMethodTyIdx(size_t n, TyIdx tyIdx) { + ASSERT(n < methods.size(), "array index out of range"); + methods.at(n).second.first = tyIdx; + } + + MIRType *GetElemType(uint32 n) const; + + MIRType *GetFieldType(FieldID fieldID); + + void SetElemtTyIdx(size_t n, TyIdx tyIdx) { + ASSERT(n < fields.size(), "array index out of range"); + fields.at(n).second = TyIdxFieldAttrPair(tyIdx, FieldAttrs()); + } + + GStrIdx GetElemStrIdx(size_t n) const { + ASSERT(n < fields.size(), "array index out of range"); + return fields.at(n).first; + } + + void SetElemStrIdx(size_t n, GStrIdx idx) { + ASSERT(n < fields.size(), "array index out of range"); + fields.at(n).first = idx; + } + + void SetElemInferredTyIdx(size_t n, TyIdx tyIdx) { + if (n >= fieldInferredTyIdx.size()) { + (void)fieldInferredTyIdx.insert(fieldInferredTyIdx.end(), n + 1 - fieldInferredTyIdx.size(), kInitTyIdx); + } + ASSERT(n < fieldInferredTyIdx.size(), "array index out of range"); + fieldInferredTyIdx.at(n) = tyIdx; + } + + TyIdx GetElemInferredTyIdx(size_t n) { + if (n >= fieldInferredTyIdx.size()) { + (void)fieldInferredTyIdx.insert(fieldInferredTyIdx.end(), n + 1 - fieldInferredTyIdx.size(), kInitTyIdx); + } + ASSERT(n < fieldInferredTyIdx.size(), "array index out of range"); + return fieldInferredTyIdx.at(n); + } + + void DumpFieldsAndMethods(int indent, bool hasMethod) const; + void Dump(int indent, bool dontUseName = false) const override; + + virtual void SetComplete() { + typeKind = (typeKind == kTypeUnion) ? typeKind : kTypeStruct; + } + + // only meaningful for MIRClassType and MIRInterface types + bool IsLocal() const; + + size_t GetSize() const override; + uint32 GetAlign() const override; + + size_t GetHashIndex() const override { + constexpr uint8 attrShift = 3; + return ((static_cast(nameStrIdx) << kShiftNumOfNameStrIdx) + (typeKind << kShiftNumOfTypeKind) + + ((typeAttrs.GetAttrFlag() << attrShift) + typeAttrs.GetAlignValue())) % kTypeHashLength; + } + + virtual void ClearContents() { + fields.clear(); + staticFields.clear(); + parentFields.clear(); + methods.clear(); + vTableMethods.clear(); + iTableMethods.clear(); + isImported = false; + isUsed = false; + hasVolatileField = false; + hasVolatileFieldSet = false; + } + + virtual const std::vector &GetInfo() const { + CHECK_FATAL(false, "can not use GetInfo"); + } + + virtual const MIRInfoPair &GetInfoElemt(size_t) const { + CHECK_FATAL(false, "can not use GetInfoElemt"); + } + + virtual const std::vector &GetInfoIsString() const { + CHECK_FATAL(false, "can not use GetInfoIsString"); + } + + virtual bool GetInfoIsStringElemt(size_t) const { + CHECK_FATAL(false, "can not use GetInfoIsStringElemt"); + } + + virtual const std::vector &GetPragmaVec() const { + CHECK_FATAL(false, "can not use GetPragmaVec"); + } + + virtual std::vector &GetPragmaVec() { + CHECK_FATAL(false, "can not use GetPragmaVec"); + } + + std::vector& GetGenericDeclare() { + return genericDeclare; + } + + void AddClassGenericDeclare(GenericDeclare *gd) { + genericDeclare.push_back(gd); + } + + void AddFieldGenericDeclare(const GStrIdx &g, AnnotationType *a) { + if (fieldGenericDeclare.find(g) != fieldGenericDeclare.end()) { + CHECK_FATAL(fieldGenericDeclare[g] == a, "MUST BE"); + } + fieldGenericDeclare[g] = a; + } + + AnnotationType *GetFieldGenericDeclare(const GStrIdx &g) { + if (fieldGenericDeclare.find(g) == fieldGenericDeclare.end()) { + return nullptr; + } + return fieldGenericDeclare[g]; + } + + void AddInheritaceGeneric(GenericType *a) { + inheritanceGeneric.push_back(a); + } + + std::vector &GetInheritanceGeneric() { + return inheritanceGeneric; + } + + virtual const MIREncodedArray &GetStaticValue() const { + CHECK_FATAL(false, "can not use GetStaticValue"); + } + + virtual void PushbackMIRInfo(const MIRInfoPair&) { + CHECK_FATAL(false, "can not use PushbackMIRInfo"); + } + + virtual void PushbackPragma(MIRPragma*) { + CHECK_FATAL(false, "can not use PushbackPragma"); + } + + virtual void PushbackStaticValue(EncodedValue&) { + CHECK_FATAL(false, "can not use PushbackStaticValue"); + } + + virtual void PushbackIsString(bool) { + CHECK_FATAL(false, "can not use PushbackIsString"); + } + + bool HasFields() const override { return true; } + uint32 NumberOfFieldIDs() const override; + MIRStructType *EmbeddedStructType() override { return this; } + + virtual FieldPair TraverseToFieldRef(FieldID &fieldID) const; + std::string GetMplTypeName() const override; + std::string GetCompactMplTypeName() const override; + FieldPair TraverseToField(FieldID fieldID) const ; + + int64 GetBitOffsetFromBaseAddr(FieldID fieldID) const override; + + bool HasPadding() const; + + void SetAlias(MIRAlias *mirAlias) { + alias = mirAlias; + } + MIRAlias *GetAlias() const { + return alias; + } + + protected: + FieldVector fields{}; + std::vector fieldInferredTyIdx{}; + FieldVector staticFields{}; + FieldVector parentFields{}; // fields belong to the ancestors not fully defined + MethodVector methods{}; // for the list of member function prototypes + MethodPtrVector vTableMethods{}; // the list of implmentation for all virtual functions for this type + MethodPtrVector iTableMethods{}; // the list of all interface functions for this type; For classes, they are + // implementation functions, For interfaces, they are abstact functions. + // Weak indicates the actual definition is in another module. + bool isImported = false; + bool isUsed = false; + bool isCPlusPlus = false; // empty struct in C++ has size 1 byte + mutable bool hasVolatileField = false; // for caching computed value + mutable bool hasVolatileFieldSet = false; // if true, just read hasVolatileField; + // otherwise compute to initialize hasVolatileField + std::vector genericDeclare; + std::map fieldGenericDeclare; + std::vector inheritanceGeneric; + TypeAttrs typeAttrs; + mutable uint32 fieldsNum = kInvalidFieldNum; + mutable size_t size = kInvalidSize; + + private: + FieldPair TraverseToField(GStrIdx fieldStrIdx) const ; + bool HasVolatileFieldInFields(const FieldVector &fieldsOfStruct) const; + bool HasTypeParamInFields(const FieldVector &fieldsOfStruct) const; + int64 GetBitOffsetFromUnionBaseAddr(FieldID fieldID) const; + int64 GetBitOffsetFromStructBaseAddr(FieldID fieldID) const; + MIRAlias *alias = nullptr; +}; + +// java array type, must not be nested inside another aggregate +class MIRJarrayType : public MIRFarrayType { + public: + MIRJarrayType() { + typeKind = kTypeJArray; + }; + + explicit MIRJarrayType(TyIdx elemTyIdx) : MIRFarrayType(elemTyIdx) { + typeKind = kTypeJArray; + } + + explicit MIRJarrayType(GStrIdx strIdx) : MIRFarrayType(strIdx) { + typeKind = kTypeJArray; + } + + ~MIRJarrayType() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRJarrayType(*this); + } + + MIRStructType *GetParentType(); + const std::string &GetJavaName(); + + bool IsPrimitiveArray() { + if (javaNameStrIdx == 0u) { + DetermineName(); + } + return fromPrimitive; + } + + int GetDim() { + if (javaNameStrIdx == 0u) { + DetermineName(); + } + return dim; + } + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 5; + return ((static_cast(GetElemTyIdx()) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + + private: + void DetermineName(); // determine the internal name of this type + TyIdx parentTyIdx{ 0 }; // since Jarray is also an object, this is java.lang.Object + GStrIdx javaNameStrIdx{ 0 }; // for internal java name of Jarray. nameStrIdx is used for other purpose + bool fromPrimitive = false; // the lowest dimension is primitive type + int dim = 0; // the dimension if decidable at compile time. otherwise 0 +}; + +// used by kTypeClass, kTypeClassIncomplete +class MIRClassType : public MIRStructType { + public: + explicit MIRClassType(MIRTypeKind tKind) : MIRStructType(tKind) {} + MIRClassType(MIRTypeKind tKind, GStrIdx strIdx) : MIRStructType(tKind, strIdx) {} + ~MIRClassType() override = default; + + bool EqualTo(const MIRType &type) const override; + + MIRType *CopyMIRTypeNode() const override { + return new MIRClassType(*this); + } + + const std::vector &GetInfo() const override { + return info; + } + void PushbackMIRInfo(const MIRInfoPair &pair) override { + info.push_back(pair); + } + uint32 GetInfo(const std::string &infoStr) const; + uint32 GetInfo(GStrIdx strIdx) const; + size_t GetInfoSize() const { + return info.size(); + } + + const MIRInfoPair &GetInfoElemt(size_t n) const override { + ASSERT(n < info.size(), "array index out of range"); + return info.at(n); + } + + const std::vector &GetInfoIsString() const override { + return infoIsString; + } + + void PushbackIsString(bool isString) override { + infoIsString.push_back(isString); + } + + size_t GetInfoIsStringSize() const { + return infoIsString.size(); + } + + bool GetInfoIsStringElemt(size_t n) const override { + ASSERT(n < infoIsString.size(), "array index out of range"); + return infoIsString.at(n); + } + + std::vector &GetPragmaVec() override { + return pragmaVec; + } + const std::vector &GetPragmaVec() const override { + return pragmaVec; + } + void PushbackPragma(MIRPragma *pragma) override { + pragmaVec.push_back(pragma); + } + + const MIREncodedArray &GetStaticValue() const override { + return staticValue; + } + void PushbackStaticValue(EncodedValue &encodedValue) override { + staticValue.push_back(encodedValue); + } + + TyIdx GetParentTyIdx() const { + return parentTyIdx; + } + void SetParentTyIdx(TyIdx idx) { + parentTyIdx = idx; + } + + std::vector &GetInterfaceImplemented() { + return interfacesImplemented; + } + const std::vector &GetInterfaceImplemented() const { + return interfacesImplemented; + } + TyIdx GetNthInterfaceImplemented(size_t i) const { + ASSERT(i < interfacesImplemented.size(), "array index out of range"); + return interfacesImplemented.at(i); + } + + void SetNthInterfaceImplemented(size_t i, TyIdx tyIdx) { + ASSERT(i < interfacesImplemented.size(), "array index out of range"); + interfacesImplemented.at(i) = tyIdx; + } + void PushbackInterfaceImplemented(TyIdx idx) { + interfacesImplemented.push_back(idx); + } + + void Dump(int indent, bool dontUseName = false) const override; + void DumpAsCxx(int indent) const override; + void SetComplete() override { + typeKind = kTypeClass; + } + + bool IsFinal() const; + bool IsAbstract() const; + bool IsInner() const; + bool HasVolatileField() const override; + bool HasTypeParam() const override; + FieldPair TraverseToFieldRef(FieldID &fieldID) const override; + size_t GetSize() const override; + + FieldID GetLastFieldID() const; + FieldID GetFirstFieldID() const { + return GetLastFieldID() - fields.size() + 1; + } + + FieldID GetFirstLocalFieldID() const; + // return class id or superclass id accroding to input string + MIRClassType *GetExceptionRootType(); + const MIRClassType *GetExceptionRootType() const; + bool IsExceptionType() const; + void AddImplementedInterface(TyIdx interfaceTyIdx) { + if (std::find(interfacesImplemented.begin(), interfacesImplemented.end(), interfaceTyIdx) == + interfacesImplemented.end()) { + interfacesImplemented.push_back(interfaceTyIdx); + } + } + + void ClearContents() override { + MIRStructType::ClearContents(); + parentTyIdx = TyIdx(0); + interfacesImplemented.clear(); // for the list of interfaces the class implements + info.clear(); + infoIsString.clear(); + pragmaVec.clear(); + staticValue.clear(); + } + + size_t GetHashIndex() const override { + return ((static_cast(nameStrIdx) << kShiftNumOfNameStrIdx) + (typeKind << kShiftNumOfTypeKind)) % + kTypeHashLength; + } + + uint32 NumberOfFieldIDs() const override; + + private: + TyIdx parentTyIdx{ 0 }; + std::vector interfacesImplemented{}; // for the list of interfaces the class implements + std::vector info{}; + std::vector infoIsString{}; + std::vector pragmaVec{}; + MIREncodedArray staticValue{}; // DELETE THIS +}; + +// used by kTypeInterface, kTypeInterfaceIncomplete +class MIRInterfaceType : public MIRStructType { + public: + explicit MIRInterfaceType(MIRTypeKind tKind) : MIRStructType(tKind) {} + MIRInterfaceType(MIRTypeKind tKind, GStrIdx strIdx) : MIRStructType(tKind, strIdx) {} + ~MIRInterfaceType() override = default; + + bool EqualTo(const MIRType &type) const override; + + MIRType *CopyMIRTypeNode() const override { + return new MIRInterfaceType(*this); + } + + const std::vector &GetInfo() const override { + return info; + } + void PushbackMIRInfo(const MIRInfoPair &pair) override { + info.push_back(pair); + } + uint32 GetInfo(const std::string &infoStr) const; + uint32 GetInfo(GStrIdx strIdx) const; + size_t GetInfoSize() const { + return info.size(); + } + + const MIRInfoPair &GetInfoElemt(size_t n) const override { + ASSERT(n < info.size(), "array index out of range"); + return info.at(n); + } + + const std::vector &GetInfoIsString() const override { + return infoIsString; + } + void PushbackIsString(bool isString) override { + infoIsString.push_back(isString); + } + size_t GetInfoIsStringSize() const { + return infoIsString.size(); + } + bool GetInfoIsStringElemt(size_t n) const override { + ASSERT(n < infoIsString.size(), "array index out of range"); + return infoIsString.at(n); + } + + std::vector &GetPragmaVec() override { + return pragmaVec; + } + const std::vector &GetPragmaVec() const override { + return pragmaVec; + } + void PushbackPragma(MIRPragma *pragma) override { + pragmaVec.push_back(pragma); + } + + const MIREncodedArray &GetStaticValue() const override { + return staticValue; + } + void PushbackStaticValue(EncodedValue &encodedValue) override { + staticValue.push_back(encodedValue); + } + + std::vector &GetParentsTyIdx() { + return parentsTyIdx; + } + void SetParentsTyIdx(const std::vector &parents) { + parentsTyIdx = parents; + } + const std::vector &GetParentsTyIdx() const { + return parentsTyIdx; + } + + TyIdx GetParentsElementTyIdx(size_t i) const { + ASSERT(i < parentsTyIdx.size(), "array index out of range"); + return parentsTyIdx[i]; + } + + void SetParentsElementTyIdx(size_t i, TyIdx tyIdx) { + ASSERT(i < parentsTyIdx.size(), "array index out of range"); + parentsTyIdx[i] = tyIdx; + } + + void Dump(int indent, bool dontUseName = false) const override; + bool HasVolatileField() const override; + bool HasTypeParam() const override; + FieldPair TraverseToFieldRef(FieldID &fieldID) const override; + void SetComplete() override { + typeKind = kTypeInterface; + } + + size_t GetSize() const override; + + void ClearContents() override { + MIRStructType::ClearContents(); + parentsTyIdx.clear(); + info.clear(); + infoIsString.clear(); + pragmaVec.clear(); + staticValue.clear(); + } + + size_t GetHashIndex() const override { + return ((static_cast(nameStrIdx) << kShiftNumOfNameStrIdx) + (typeKind << kShiftNumOfTypeKind)) % + kTypeHashLength; + } + + bool HasFields() const override { return false; } + uint32 NumberOfFieldIDs() const override { return 0; } + MIRStructType *EmbeddedStructType() override { return nullptr; } + + private: + std::vector parentsTyIdx{}; // multiple inheritence + std::vector info{}; + std::vector infoIsString{}; + std::vector pragmaVec{}; + MIREncodedArray staticValue{}; // DELETE THIS +}; + + +class MIRBitFieldType : public MIRType { + public: + MIRBitFieldType(uint8 field, PrimType pt) : MIRType(kTypeBitField, pt), fieldSize(field) {} + MIRBitFieldType(uint8 field, PrimType pt, GStrIdx strIdx) : MIRType(kTypeBitField, pt, strIdx), fieldSize(field) {} + ~MIRBitFieldType() override = default; + + uint8 GetFieldSize() const { + return fieldSize; + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + MIRType *CopyMIRTypeNode() const override { + return new MIRBitFieldType(*this); + } + + size_t GetSize() const override { + if (fieldSize == 0) { + return 0; + } else if (fieldSize <= 8) { + return 1; + } else { + return (fieldSize + 7) / 8; + } + } // size not be in bytes + + uint32 GetAlign() const override { + return 0; + } // align not be in bytes + + size_t GetHashIndex() const override { + return ((static_cast(primType) << fieldSize) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } + + private: + uint8 fieldSize; +}; + +class MIRFuncType : public MIRType { + public: + MIRFuncType() : MIRType(kTypeFunction, PTY_ptr) {} + + explicit MIRFuncType(const GStrIdx &strIdx) + : MIRType(kTypeFunction, PTY_ptr, strIdx) {} + + MIRFuncType(const TyIdx &retTyIdx, const std::vector &vecTy, const std::vector &vecAt, + const TypeAttrs &retAttrsIn) + : MIRType(kTypeFunction, PTY_ptr), + retTyIdx(retTyIdx), + paramTypeList(vecTy), + paramAttrsList(vecAt), + retAttrs(retAttrsIn) {} + + ~MIRFuncType() override = default; + + bool EqualTo(const MIRType &type) const override; + bool CompatibleWith(const MIRType &type) const; + MIRType *CopyMIRTypeNode() const override { + return new MIRFuncType(*this); + } + + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override { + return 0; + } // size unknown + + TyIdx GetRetTyIdx() const { + return retTyIdx; + } + + void SetRetTyIdx(TyIdx idx) { + retTyIdx = idx; + } + + const std::vector &GetParamTypeList() const { + return paramTypeList; + } + + std::vector &GetParamTypeList() { + return paramTypeList; + } + + TyIdx GetNthParamType(size_t i) const { + ASSERT(i < paramTypeList.size(), "array index out of range"); + return paramTypeList[i]; + } + + void SetParamTypeList(const std::vector &list) { + paramTypeList.clear(); + (void)paramTypeList.insert(paramTypeList.begin(), list.begin(), list.end()); + } + + const std::vector &GetParamAttrsList() const { + return paramAttrsList; + } + + std::vector &GetParamAttrsList() { + return paramAttrsList; + } + + const TypeAttrs &GetNthParamAttrs(size_t i) const { + ASSERT(i < paramAttrsList.size(), "array index out of range"); + return paramAttrsList[i]; + } + + TypeAttrs &GetNthParamAttrs(size_t i) { + ASSERT(i < paramAttrsList.size(), "array index out of range"); + return paramAttrsList[i]; + } + + void SetParamAttrsList(const std::vector &list) { + paramAttrsList.clear(); + (void)paramAttrsList.insert(paramAttrsList.begin(), list.begin(), list.end()); + } + + void SetNthParamAttrs(size_t i, const TypeAttrs &attrs) { + ASSERT(i < paramAttrsList.size(), "array index out of range"); + paramAttrsList[i] = attrs; + } + + bool IsVarargs() const { + return funcAttrs.GetAttr(FUNCATTR_varargs); + } + + void SetVarArgs() { + funcAttrs.SetAttr(FUNCATTR_varargs); + } + + bool FirstArgReturn() const { + return funcAttrs.GetAttr(FUNCATTR_firstarg_return); + } + + void SetFirstArgReturn() { + funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } + + const TypeAttrs &GetRetAttrs() const { + return retAttrs; + } + + TypeAttrs &GetRetAttrs() { + return retAttrs; + } + + void SetRetAttrs(const TypeAttrs &attrs) { + retAttrs = attrs; + } + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 6; + size_t hIdx = (static_cast(retTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + size_t size = paramTypeList.size(); + hIdx += (size != 0 ? (static_cast(paramTypeList[0]) + size) : 0) << 4; // shift bit is 4 + return hIdx % kTypeHashLength; + } + FuncAttrs funcAttrs; + private: + TyIdx retTyIdx{ 0 }; + std::vector paramTypeList; + std::vector paramAttrsList; + TypeAttrs retAttrs; +}; + +class MIRTypeByName : public MIRType { + // use nameStrIdx to store the name for both local and global + public: + explicit MIRTypeByName(GStrIdx gStrIdx) : MIRType(kTypeByName, PTY_void) { + nameStrIdx = gStrIdx; + } + + ~MIRTypeByName() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRTypeByName(*this); + } + + bool EqualTo(const MIRType &type) const override; + + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override { + return 0; + } // size unknown + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 2; + uint8 nameIsLocalValue = nameIsLocal ? 1 : 0; + return ((static_cast(nameStrIdx) << idxShift) + nameIsLocalValue + (typeKind << kShiftNumOfTypeKind)) % + kTypeHashLength; + } +}; + +class MIRTypeParam : public MIRType { + // use nameStrIdx to store the name + public: + explicit MIRTypeParam(GStrIdx gStrIdx) : MIRType(kTypeParam, PTY_gen) { + nameStrIdx = gStrIdx; + } + + ~MIRTypeParam() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRTypeParam(*this); + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override { + return 0; + } // size unknown + + bool HasTypeParam() const override { + return true; + } + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 3; + return ((static_cast(nameStrIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind)) % kTypeHashLength; + } +}; + +using TypePair = std::pair; +using GenericInstantVector = std::vector; +class MIRInstantVectorType : public MIRType { + public: + MIRInstantVectorType() : MIRType(kTypeInstantVector, PTY_agg) {} + + explicit MIRInstantVectorType(MIRTypeKind kind) : MIRType(kind, PTY_agg) {} + + MIRInstantVectorType(MIRTypeKind kind, GStrIdx strIdx) : MIRType(kind, PTY_agg, strIdx) {} + + ~MIRInstantVectorType() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRInstantVectorType(*this); + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + size_t GetSize() const override { + return 0; + } // size unknown + + const GenericInstantVector &GetInstantVec() const { + return instantVec; + } + + GenericInstantVector &GetInstantVec() { + return instantVec; + } + + void AddInstant(TypePair typePair) { + instantVec.push_back(typePair); + } + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 3; + uint32 hIdx = typeKind << kShiftNumOfTypeKind; + for (const TypePair &typePair : instantVec) { + hIdx += static_cast(typePair.first + typePair.second) << idxShift; + } + return hIdx % kTypeHashLength; + } + + protected: + GenericInstantVector instantVec{}; // in each pair, first is generic type, second is real type +}; + +class MIRGenericInstantType : public MIRInstantVectorType { + public: + explicit MIRGenericInstantType(TyIdx genTyIdx) + : MIRInstantVectorType(kTypeGenericInstant), genericTyIdx(genTyIdx) {} + + explicit MIRGenericInstantType(GStrIdx strIdx) + : MIRInstantVectorType(kTypeGenericInstant, strIdx), genericTyIdx(0) {} + + ~MIRGenericInstantType() override = default; + + MIRType *CopyMIRTypeNode() const override { + return new MIRGenericInstantType(*this); + } + + bool EqualTo(const MIRType &type) const override; + void Dump(int indent, bool dontUseName = false) const override; + + size_t GetSize() const override { + return 0; + } // size unknown + + TyIdx GetGenericTyIdx() const { + return genericTyIdx; + } + void SetGenericTyIdx(TyIdx idx) { + genericTyIdx = idx; + } + + size_t GetHashIndex() const override { + constexpr uint8 idxShift = 2; + uint32 hIdx = (static_cast(genericTyIdx) << idxShift) + (typeKind << kShiftNumOfTypeKind); + for (const TypePair &typePair : instantVec) { + hIdx += static_cast(typePair.first + typePair.second) << 3; // shift bit is 3 + } + return hIdx % kTypeHashLength; + } + + private: + TyIdx genericTyIdx; // the generic type to be instantiated +}; + +MIRType *GetElemType(const MIRType &arrayType); +#endif // MIR_FEATURE_FULL +} // namespace maple + +#define LOAD_SAFE_CAST_FOR_MIR_TYPE +#include "ir_safe_cast_traits.def" + +#endif // MAPLE_IR_INCLUDE_MIR_TYPE_H diff --git a/src/mapleall/maple_ir/include/mpl2mpl_options.h b/src/mapleall/maple_ir/include/mpl2mpl_options.h new file mode 100644 index 0000000000000000000000000000000000000000..9615ec85f9a99e49cf7f5cf3b691020242419c0c --- /dev/null +++ b/src/mapleall/maple_ir/include/mpl2mpl_options.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) [2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#ifndef MAPLE_IR_INCLUDE_MPL2MPL_OPTION_H +#define MAPLE_IR_INCLUDE_MPL2MPL_OPTION_H + +#include "cl_option.h" +#include "cl_parser.h" + +#include +#include +#include + +namespace opts::mpl2mpl { + +extern maplecl::Option dumpPhase; +extern maplecl::Option skipPhase; +extern maplecl::Option skipFrom; +extern maplecl::Option skipAfter; +extern maplecl::Option dumpFunc; +extern maplecl::Option quiet; +extern maplecl::Option mapleLinker; +extern maplecl::Option regNativeFunc; + +extern maplecl::Option inlineWithProfile; +extern maplecl::Option inlineOpt; +extern maplecl::Option ipaClone; +extern maplecl::Option ginlineOpt; +extern maplecl::Option noInlineFunc; +extern maplecl::Option importFileList; +extern maplecl::Option crossModuleInline; +extern maplecl::Option inlineSmallFunctionThreshold; +extern maplecl::Option inlineHotFunctionThreshold; +extern maplecl::Option inlineRecursiveFunctionThreshold; +extern maplecl::Option inlineDepth; +extern maplecl::Option inlineModuleGrow; +extern maplecl::Option inlineColdFuncThresh; +extern maplecl::Option respectAlwaysInline; +extern maplecl::Option ginlineMaxNondeclaredInlineCallee; +extern maplecl::Option ginlineMaxDepthIgnoreGrowthLimit; +extern maplecl::Option ginlineSmallFunc; +extern maplecl::Option ginlineRelaxSmallFuncDecalredInline; +extern maplecl::Option ginlineRelaxSmallFuncCanbeRemoved; +extern maplecl::Option callsiteProfilePath; +extern maplecl::Option inlineToAllCallers; +extern maplecl::Option ginlineAllowNondeclaredInlineSizeGrow; +extern maplecl::Option ginlineAllowIgnoreGrowthLimit; + +extern maplecl::Option profileHotCount; +extern maplecl::Option profileColdCount; +extern maplecl::Option profileHotRate; +extern maplecl::Option profileColdRate; +extern maplecl::Option nativeWrapper; +extern maplecl::Option regNativeDynamicOnly; +extern maplecl::Option staticBindingList; +extern maplecl::Option dumpBefore; +extern maplecl::Option dumpAfter; +extern maplecl::Option dumpMuid; +extern maplecl::Option emitVtableImpl; + +#if MIR_JAVA +extern maplecl::Option skipVirtual; +#endif + +extern maplecl::Option userc; +extern maplecl::Option strictNaiveRc; +extern maplecl::Option rcOpt1; +extern maplecl::Option nativeOpt; +extern maplecl::Option o0; +extern maplecl::Option o2; +extern maplecl::Option os; +extern maplecl::Option criticalNative; +extern maplecl::Option fastNative; +extern maplecl::Option noDot; +extern maplecl::Option genIrProfile; +extern maplecl::Option proFileTest; +extern maplecl::Option barrier; +extern maplecl::Option nativeFuncPropertyFile; +extern maplecl::Option mapleLinkerNolocal; +extern maplecl::Option buildApp; +extern maplecl::Option partialAot; +extern maplecl::Option decoupleInit; +extern maplecl::Option sourceMuid; +extern maplecl::Option deferredVisit; +extern maplecl::Option deferredVisit2; +extern maplecl::Option decoupleSuper; +extern maplecl::Option genDecoupleVtab; +extern maplecl::Option profileFunc; +extern maplecl::Option dumpDevirtual; +extern maplecl::Option readDevirtual; +extern maplecl::Option useWhiteClass; +extern maplecl::Option appPackageName; +extern maplecl::Option checkClInvocation; +extern maplecl::Option dumpClInvocation; +extern maplecl::Option warning; +extern maplecl::Option lazyBinding; +extern maplecl::Option hotFix; +extern maplecl::Option compactMeta; +extern maplecl::Option genPGOReport; +extern maplecl::Option inlineCache; +extern maplecl::Option noComment; +extern maplecl::Option rmNouseFunc; +extern maplecl::Option sideEffect; +extern maplecl::Option dumpIPA; +extern maplecl::Option wpaa; +extern maplecl::Option numOfCloneVersions; +extern maplecl::Option numOfImpExprLowBound; +extern maplecl::Option numOfImpExprHighBound; +extern maplecl::Option numOfCallSiteLowBound; +extern maplecl::Option numOfCallSiteUpBound; +extern maplecl::Option numOfConstpropValue; +extern maplecl::Option outlineThreshold; +} + +#endif /* MAPLE_IR_INCLUDE_MPL2MPL_OPTION_H */ diff --git a/src/mapleall/maple_ir/include/opcode_info.h b/src/mapleall/maple_ir/include/opcode_info.h new file mode 100644 index 0000000000000000000000000000000000000000..5a82646b11d295cbece3c26f3b1a8c4c44c1f09c --- /dev/null +++ b/src/mapleall/maple_ir/include/opcode_info.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_OPCODE_INFO_H +#define MAPLE_IR_INCLUDE_OPCODE_INFO_H +#include "types_def.h" +#include "opcodes.h" +#include "mpl_logging.h" + +namespace maple { +enum OpcodeProp { + kOpcodePropNone, + kOpcodePropIsStmt, // The instruction is a stmt, so has 2 stmt pointers + kOpcodePropIsVarSize, // The instruction size is not fixed + kOpcodePropNotMMPL, // The instruction is not allowed in Machine Maple IR + kOpcodePropIsCompare, // The instruction is one of the 6 comparison ops + kOpcodePropIsTypeCvt, // The instruction is a type conversion op + kOpcodePropHasSSAUse, // The instruction may incur a use in SSA form + kOpcodePropHasSSADef, // The instruction may incur a def in SSA form + kOpcodePropIsCall, // The instruction is among the call instructions + kOpcodePropIsCallAssigned, // The instruction is among the call instructions with implicit assignments of the + // returned values + kOpcodePropNotPure, // The operation does not return same result with idential operands + kOpcodePropMayThrowException, + kOpcodePropIsAssertNonnull, // The operation check nonnnull + kOpcodePropIsAssertUpperBoundary, // The operation check upper boundary + kOpcodePropIsAssertLowerBoundary, // The operation check lower boundary +}; + +constexpr unsigned long OPCODEISSTMT = 1ULL << kOpcodePropIsStmt; +constexpr unsigned long OPCODEISVARSIZE = 1ULL << kOpcodePropIsVarSize; +constexpr unsigned long OPCODENOTMMPL = 1ULL << kOpcodePropNotMMPL; +constexpr unsigned long OPCODEISCOMPARE = 1ULL << kOpcodePropIsCompare; +constexpr unsigned long OPCODEISTYPECVT = 1ULL << kOpcodePropIsTypeCvt; +constexpr unsigned long OPCODEHASSSAUSE = 1ULL << kOpcodePropHasSSAUse; +constexpr unsigned long OPCODEHASSSADEF = 1ULL << kOpcodePropHasSSADef; +constexpr unsigned long OPCODEISCALL = 1ULL << kOpcodePropIsCall; +constexpr unsigned long OPCODEISCALLASSIGNED = 1ULL << kOpcodePropIsCallAssigned; +constexpr unsigned long OPCODENOTPURE = 1ULL << kOpcodePropNotPure; +constexpr unsigned long OPCODEMAYTHROWEXCEPTION = 1ULL << kOpcodePropMayThrowException; +constexpr unsigned long OPCODEASSERTNONNULL = 1ULL << kOpcodePropIsAssertNonnull; +constexpr unsigned long OPCODEASSERTUPPERBOUNDARY = 1ULL << kOpcodePropIsAssertUpperBoundary; +constexpr unsigned long OPCODEASSERTLOWERBOUNDARY = 1ULL << kOpcodePropIsAssertLowerBoundary; + +struct OpcodeDesc { + uint8 instrucSize; // size of instruction in bytes + uint16 flag; // stores the opcode property flags + std::string name; +}; + +class OpcodeTable { + public: + OpcodeTable(); + ~OpcodeTable() = default; + + OpcodeDesc GetTableItemAt(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o]; + } + + bool IsStmt(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISSTMT; + } + + bool IsVarSize(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISVARSIZE; + } + + bool NotMMPL(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODENOTMMPL; + } + + bool IsCompare(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISCOMPARE; + } + + bool IsTypeCvt(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISTYPECVT; + } + + bool HasSSAUse(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEHASSSAUSE; + } + + bool HasSSADef(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEHASSSADEF; + } + + bool IsCall(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISCALL; + } + + bool IsCallAssigned(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEISCALLASSIGNED; + } + + bool IsICall(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return o == OP_icall || o == OP_icallassigned || + o == OP_icallproto || o == OP_icallprotoassigned || + o == OP_virtualicall || o == OP_virtualicallassigned || + o == OP_interfaceicall || o == OP_interfaceicallassigned; + } + + bool NotPure(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODENOTPURE; + } + + bool MayThrowException(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEMAYTHROWEXCEPTION; + } + + bool HasSideEffect(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return MayThrowException(o); + } + + const std::string &GetName(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].name; + } + + bool IsCondBr(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return o == OP_brtrue || o == OP_brfalse; + } + + bool AssignActualVar(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return o == OP_dassign || o == OP_regassign; + } + + bool IsAssertNonnull(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEASSERTNONNULL; + } + + bool IsCallAssertNonnull(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return o == OP_callassertnonnull; + } + + bool IsAssertBoundary(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & (OPCODEASSERTUPPERBOUNDARY | OPCODEASSERTLOWERBOUNDARY); + } + + bool IsAssertUpperBoundary(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEASSERTUPPERBOUNDARY; + } + + bool IsAssertLowerBoundary(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return table[o].flag & OPCODEASSERTLOWERBOUNDARY; + } + + bool IsCallAssertBoundary(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return o == OP_callassertle; + } + + bool IsAssertLeBoundary(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return (o == OP_callassertle || o == OP_returnassertle || o == OP_assignassertle); + } + + bool IsCalcAssertBoundary(Opcode o) const { + ASSERT(o < OP_last, "invalid opcode"); + return (o == OP_calcassertlt || o == OP_calcassertge); + } + + private: + OpcodeDesc table[OP_last]; +}; +extern const OpcodeTable kOpcodeInfo; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_OPCODE_INFO_H diff --git a/src/mapleall/maple_ir/include/opcodes.def b/src/mapleall/maple_ir/include/opcodes.def new file mode 100644 index 0000000000000000000000000000000000000000..a038adf61f3e68795299ea30db04d79d08c5b76e --- /dev/null +++ b/src/mapleall/maple_ir/include/opcodes.def @@ -0,0 +1,224 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +// Stmt & Notmmpl + // storage access opcodes + OPCODE(dassign, DassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 8) + OPCODE(piassign, PiassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 8) + OPCODE(maydassign, DassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 8) + OPCODE(iassign, IassignNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEHASSSADEF), 12) + // hierarchical control flow opcodes + OPCODE(block, BlockNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(doloop, DoloopNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(dowhile, WhileStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(if, IfStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(while, WhileStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(switch, SwitchNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(multiway, MultiwayNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(foreachelem, ForeachelemNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + // other opcodes + OPCODE(comment, CommentNode, (OPCODEISSTMT | OPCODENOTMMPL), 0) + OPCODE(eval, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(free, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(calcassertge, BinaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTLOWERBOUNDARY), 8) + OPCODE(calcassertlt, BinaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(assertge, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTLOWERBOUNDARY), 8) + OPCODE(assertlt, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(callassertle, CallAssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(returnassertle, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(assignassertle, AssertBoundaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTUPPERBOUNDARY), 8) + OPCODE(abort, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(assertnonnull, UnaryStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) + OPCODE(assignassertnonnull, AssignAssertNonnullStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) + OPCODE(callassertnonnull, CallAssertNonnullStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) + OPCODE(returnassertnonnull, ReturnAssertNonnullStmtNode, (OPCODEISSTMT | OPCODENOTMMPL | OPCODEASSERTNONNULL), 8) +// Expr & Notmmpl + // storage access opcodes + OPCODE(dread, AddrofNode, (OPCODENOTMMPL | OPCODEHASSSAUSE), 12) + OPCODE(iread, IreadNode, (OPCODENOTMMPL | OPCODEHASSSAUSE), 12) + // leaf opcodes + OPCODE(addrof, AddrofNode, OPCODENOTMMPL, 12) + OPCODE(iaddrof, IreadNode, OPCODENOTMMPL, 12) + OPCODE(sizeoftype, SizeoftypeNode, OPCODENOTMMPL, 8) + OPCODE(fieldsdist, FieldsDistNode, OPCODENOTMMPL, 8) + // N-ary expression opcodes + OPCODE(array, ArrayNode, (OPCODEISVARSIZE | OPCODENOTMMPL | OPCODEMAYTHROWEXCEPTION), 8) +// Stmt + // storage access opcodes + OPCODE(iassignoff, IassignoffNode, OPCODEISSTMT, 8) + OPCODE(iassignfpoff, IassignFPoffNode, OPCODEISSTMT, 8) + OPCODE(regassign, RegassignNode, (OPCODEISSTMT | OPCODEHASSSADEF), 8) + // flat control flow opcodes + OPCODE(goto, GotoNode, OPCODEISSTMT, 8) + OPCODE(brfalse, CondGotoNode, OPCODEISSTMT, 8) + OPCODE(brtrue, CondGotoNode, OPCODEISSTMT, 8) + OPCODE(return, NaryStmtNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE), 0) + OPCODE(rangegoto, RangeGotoNode, OPCODEISSTMT, 8) + // call opcodes + OPCODE(call, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(virtualcall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(superclasscall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(interfacecall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(customcall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(polymorphiccall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 12) + OPCODE(icall, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(interfaceicall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(virtualicall, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(intrinsiccall, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(intrinsiccallwithtype, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 12) + OPCODE(xintrinsiccall, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(callassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(virtualcallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(superclasscallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(interfacecallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(customcallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(polymorphiccallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(icallassigned, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(interfaceicallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(virtualicallassigned, CallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(intrinsiccallassigned, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(intrinsiccallwithtypeassigned, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(xintrinsiccallassigned, IntrinsiccallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + // call with generic instantiation opcodes + OPCODE(callinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(callinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(virtualcallinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(virtualcallinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(superclasscallinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(superclasscallinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + OPCODE(interfacecallinstant, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 0) + OPCODE(interfacecallinstantassigned, CallinstantNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 0) + // exception handling + OPCODE(jstry, JsTryNode, OPCODEISSTMT, 8) + OPCODE(try, TryNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + OPCODE(cpptry, TryNode, (OPCODEISSTMT | OPCODENOTMMPL), 8) + + OPCODE(throw, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE), 0) + + OPCODE(jscatch, StmtNode, OPCODEISSTMT, 4) + OPCODE(catch, CatchNode, OPCODEISSTMT, 8) + OPCODE(cppcatch, CppCatchNode, OPCODEISSTMT, 8) + + OPCODE(finally, StmtNode, OPCODEISSTMT, 6) + OPCODE(cleanuptry, StmtNode, OPCODEISSTMT, 6) + OPCODE(endtry, StmtNode, OPCODEISSTMT, 6) + OPCODE(safe, StmtNode, OPCODEISSTMT, 6) + OPCODE(endsafe, StmtNode, OPCODEISSTMT, 6) + OPCODE(unsafe, StmtNode, OPCODEISSTMT, 6) + OPCODE(endunsafe, StmtNode, OPCODEISSTMT, 6) + OPCODE(gosub, GotoNode, (OPCODEISSTMT | OPCODEHASSSAUSE), 8) + OPCODE(retsub, StmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE), 6) + // synchronizaion + OPCODE(syncenter, NaryStmtNode, (OPCODEISSTMT | OPCODEHASSSADEF | OPCODEHASSSAUSE), 0) + OPCODE(syncexit, NaryStmtNode, (OPCODEISSTMT | OPCODEHASSSADEF | OPCODEHASSSAUSE), 0) + OPCODE(decref, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE | OPCODENOTMMPL), 0) + OPCODE(incref, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE | OPCODENOTMMPL), 0) + OPCODE(decrefreset, UnaryStmtNode, (OPCODEISSTMT | OPCODEHASSSAUSE | OPCODENOTMMPL), 0) + // barriers + OPCODE(membaracquire, StmtNode, OPCODEISSTMT, 6) + OPCODE(membarrelease, StmtNode, OPCODEISSTMT, 6) + OPCODE(membarstoreload, StmtNode, OPCODEISSTMT, 6) + OPCODE(membarstorestore, StmtNode, OPCODEISSTMT, 6) + // other opcodes + OPCODE(label, LabelNode, OPCODEISSTMT, 8) +// Expr + // storage access opcodes + OPCODE(ireadoff, IreadoffNode, 0, 8) + OPCODE(ireadfpoff, IreadFPoffNode, 0, 8) + OPCODE(regread, RegreadNode, OPCODEHASSSAUSE, 8) + // leaf opcodes + OPCODE(addroffunc, AddroffuncNode, 0, 8) + OPCODE(addroflabel, AddroflabelNode, 0, 8) + OPCODE(constval, ConstvalNode, 0, 8) + OPCODE(conststr, ConststrNode, OPCODENOTMMPL, 8) + OPCODE(conststr16, Conststr16Node, OPCODENOTMMPL, 8) + // type conversion expression opcodes + OPCODE(ceil, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(cvt, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(floor, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(retype, RetypeNode, OPCODEISTYPECVT, 8) + OPCODE(round, TypeCvtNode, OPCODEISTYPECVT, 8) + OPCODE(trunc, TypeCvtNode, OPCODEISTYPECVT, 8) + // unary expression opcodes + OPCODE(abs, UnaryNode, 0, 0) + OPCODE(bnot, UnaryNode, 0, 0) + OPCODE(lnot, UnaryNode, 0, 0) + OPCODE(neg, UnaryNode, 0, 0) + OPCODE(recip, UnaryNode, 0, 0) + OPCODE(sqrt, UnaryNode, 0, 0) + OPCODE(sext, ExtractbitsNode, 0, 8) + OPCODE(zext, ExtractbitsNode, 0, 8) + OPCODE(alloca, UnaryNode, OPCODENOTPURE, 0) + OPCODE(malloc, UnaryNode, OPCODENOTPURE, 0) + OPCODE(gcmalloc, GCMallocNode, OPCODENOTPURE, 8) + OPCODE(gcpermalloc, GCMallocNode, OPCODENOTPURE, 8) + OPCODE(stackmalloc, GCMallocNode, OPCODENOTPURE, 8) + OPCODE(gcmallocjarray, JarrayMallocNode, OPCODENOTPURE, 12) + OPCODE(gcpermallocjarray, JarrayMallocNode, OPCODENOTPURE, 12) + OPCODE(stackmallocjarray, JarrayMallocNode, OPCODENOTPURE, 12) + OPCODE(resolveinterfacefunc, ResolveFuncNode, 0, 8) + OPCODE(resolvevirtualfunc, ResolveFuncNode, 0, 8) + // binary expression opcodes + OPCODE(add, BinaryNode, 0, 0) + OPCODE(sub, BinaryNode, 0, 0) + OPCODE(mul, BinaryNode, 0, 0) + OPCODE(div, BinaryNode, OPCODEMAYTHROWEXCEPTION, 0) + OPCODE(rem, BinaryNode, OPCODEMAYTHROWEXCEPTION, 0) + OPCODE(ashr, BinaryNode, 0, 0) + OPCODE(lshr, BinaryNode, 0, 0) + OPCODE(shl, BinaryNode, 0, 0) + OPCODE(ror, BinaryNode, 0, 0) + OPCODE(max, BinaryNode, 0, 0) + OPCODE(min, BinaryNode, 0, 0) + OPCODE(band, BinaryNode, 0, 0) + OPCODE(bior, BinaryNode, 0, 0) + OPCODE(bxor, BinaryNode, 0, 0) + OPCODE(CG_array_elem_add, BinaryNode, 0, 0) + OPCODE(eq, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(ge, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(gt, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(le, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(lt, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(ne, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(cmp, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(cmpl, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(cmpg, CompareNode, OPCODEISCOMPARE, 8) + OPCODE(land, BinaryNode, 0, 0) + OPCODE(lior, BinaryNode, 0, 0) + OPCODE(cand, BinaryNode, OPCODENOTMMPL, 0) + OPCODE(cior, BinaryNode, OPCODENOTMMPL, 0) + // ternary expression opcodes + OPCODE(select, TernaryNode, 0, 0) + // N-ary expression opcodes + OPCODE(intrinsicop, IntrinsicopNode, OPCODEISVARSIZE, 8) + OPCODE(intrinsicopwithtype, IntrinsicopNode, OPCODEISVARSIZE, 12) + // Other expression opcodes + OPCODE(extractbits, ExtractbitsNode, 0, 8) + OPCODE(depositbits, DepositbitsNode, 0, 8) + // storage access + OPCODE(iassignpcoff, IassignPCoffNode, OPCODEISSTMT, 0) + OPCODE(ireadpcoff, IreadPCoffNode, 0, 0) + // barrier + OPCODE(checkpoint, StmtNode, OPCODEISSTMT, 0) + // leaf node + OPCODE(addroffpc, AddroffPCNode, 0, 0) + OPCODE(igoto, UnaryStmtNode, OPCODEISSTMT, 0) + OPCODE(asm, AsmNode, (OPCODEISSTMT | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALLASSIGNED), 0) + OPCODE(dreadoff, dreadoffNode, OPCODEHASSSAUSE, 12) + OPCODE(addrofoff, addrofoffNode, 0, 12) + OPCODE(dassignoff, DassignoffNode, (OPCODEISSTMT | OPCODEHASSSADEF), 8) + OPCODE(iassignspoff, IassignFPoffNode, OPCODEISSTMT, 8) + OPCODE(blkassignoff, BlkassignoffNode, OPCODEISSTMT, 8) + OPCODE(icallproto, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL), 8) + OPCODE(icallprotoassigned, IcallNode, (OPCODEISSTMT | OPCODEISVARSIZE | OPCODEHASSSAUSE | OPCODEHASSSADEF | OPCODEISCALL | OPCODEISCALLASSIGNED), 8) diff --git a/src/mapleall/maple_ir/include/opcodes.h b/src/mapleall/maple_ir/include/opcodes.h new file mode 100644 index 0000000000000000000000000000000000000000..a1186234a0da79e731816aaafe20c34b8911e77c --- /dev/null +++ b/src/mapleall/maple_ir/include/opcodes.h @@ -0,0 +1,236 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_OPCODES_H +#define MAPLE_IR_INCLUDE_OPCODES_H +#include "types_def.h" +#include "mpl_logging.h" + +namespace maple { +enum Opcode : uint8 { + OP_undef, +#define OPCODE(STR, YY, ZZ, SS) OP_##STR, +#include "opcodes.def" +#undef OPCODE + OP_last, +}; + +#define CASE_OP_ASSERT_NONNULL \ + case OP_assertnonnull: \ + case OP_assignassertnonnull: \ + case OP_callassertnonnull: \ + case OP_returnassertnonnull: + +#define CASE_OP_ASSERT_BOUNDARY \ + case OP_assertge: \ + case OP_assertlt: \ + case OP_calcassertge: \ + case OP_calcassertlt: \ + case OP_callassertle: \ + case OP_returnassertle: \ + case OP_assignassertle: + +inline constexpr bool IsDAssign(Opcode code) { + return (code == OP_dassign || code == OP_maydassign); +} + +inline constexpr bool IsCallAssigned(Opcode code) { + return (code == OP_callassigned || code == OP_virtualcallassigned || + code == OP_virtualicallassigned || code == OP_superclasscallassigned || + code == OP_interfacecallassigned || code == OP_interfaceicallassigned || + code == OP_customcallassigned || code == OP_polymorphiccallassigned || + code == OP_icallassigned || code == OP_icallprotoassigned || code == OP_intrinsiccallassigned || + code == OP_xintrinsiccallassigned || code == OP_intrinsiccallwithtypeassigned); +} + +inline constexpr bool IsBranch(Opcode opcode) { + return (opcode == OP_goto || opcode == OP_brtrue || opcode == OP_brfalse || opcode == OP_switch || + opcode == OP_igoto); +} + +inline constexpr bool IsLogicalShift(Opcode opcode) { + return (opcode == OP_lshr || opcode == OP_shl); +} + +constexpr bool IsCommutative(Opcode opcode) { + switch (opcode) { + case OP_add: + case OP_mul: + case OP_max: + case OP_min: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_eq: + case OP_ne: + case OP_land: + case OP_lior: + return true; + default: + return false; + } +} + +constexpr bool IsStmtMustRequire(Opcode opcode) { + switch (opcode) { + case OP_jstry: + case OP_throw: + case OP_try: + case OP_catch: + case OP_jscatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_gosub: + case OP_retsub: + case OP_return: + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: + case OP_polymorphiccall: + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: + case OP_polymorphiccallassigned: + case OP_icall: + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: + case OP_xintrinsiccall: + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: + case OP_intrinsiccallwithtype: + case OP_intrinsiccallwithtypeassigned: + case OP_asm: + case OP_syncenter: + case OP_syncexit: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstoreload: + case OP_membarstorestore: + CASE_OP_ASSERT_NONNULL + CASE_OP_ASSERT_BOUNDARY + case OP_free: + case OP_incref: + case OP_decref: + case OP_decrefreset: { + return true; + } + default: + return false; + } +} + +// the result of these op is actually u1(may be set as other type, but its return value can only be zero or one) +// different from kOpcodeInfo.IsCompare(op) : cmp/cmpg/cmpl have no reverse op, and may return -1/0/1 +constexpr bool IsCompareHasReverseOp(Opcode op) { + if (op == OP_eq || op == OP_ne || op == OP_ge || op == OP_gt || op == OP_le || op == OP_lt) { + return true; + } + return false; +} + +constexpr Opcode GetSwapCmpOp(Opcode op) { + switch (op) { + case OP_eq: + return OP_eq; + case OP_ne: + return OP_ne; + case OP_ge: + return OP_le; + case OP_gt: + return OP_lt; + case OP_le: + return OP_ge; + case OP_lt: + return OP_gt; + default: + CHECK_FATAL(false, "can't swap op"); + return op; + } +} + +constexpr Opcode GetReverseCmpOp(Opcode op) { + switch (op) { + case OP_eq: + return OP_ne; + case OP_ne: + return OP_eq; + case OP_ge: + return OP_lt; + case OP_gt: + return OP_le; + case OP_le: + return OP_gt; + case OP_lt: + return OP_ge; + default: + CHECK_FATAL(false, "opcode has no reverse op"); + return op; + } +} + +constexpr bool IsSupportedOpForCopyInPhasesLoopUnrollAndVRP(Opcode op, bool supportSwitch = false) { + switch (op) { + case OP_return: + case OP_comment: + case OP_goto: + case OP_dassign: + case OP_regassign: + case OP_brfalse: + case OP_brtrue: + case OP_maydassign: + case OP_iassign: + CASE_OP_ASSERT_NONNULL + CASE_OP_ASSERT_BOUNDARY + case OP_call: + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_interfaceicallassigned: + case OP_interfacecallassigned: + case OP_intrinsiccall: + case OP_intrinsiccallassigned: + case OP_intrinsiccallwithtype: + case OP_intrinsiccallwithtypeassigned: + case OP_membaracquire: + case OP_membarstorestore: + case OP_membarstoreload: + case OP_membarrelease: + case OP_icall: + case OP_icallassigned: + case OP_icallproto: + case OP_icallprotoassigned: + case OP_asm: + case OP_eval: + case OP_incref: + case OP_decref: + case OP_decrefreset: + return true; + case OP_switch: + return supportSwitch; + default: + return false; + } +} +} // namespace maple +#endif // MAPLE_IR_INCLUDE_OPCODES_H diff --git a/src/mapleall/maple_ir/include/option.h b/src/mapleall/maple_ir/include/option.h new file mode 100644 index 0000000000000000000000000000000000000000..e31b36d0c5f11c10a0e43e84a77c71cd3d253c70 --- /dev/null +++ b/src/mapleall/maple_ir/include/option.h @@ -0,0 +1,206 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_OPTION_H +#define MAPLE_IR_INCLUDE_OPTION_H +#include +#include + +#include "mempool.h" +#include "mempool_allocator.h" +#include "parser_opt.h" +#include "types_def.h" + +namespace maple { +class Options { + public: + static Options &GetInstance(); + + bool ParseOptions(int argc, char **argv, std::string &fileName) const; + + bool SolveOptions(bool isDebug) const; + ~Options() = default; + + void DumpOptions() const; + const std::vector &GetSequence() const { + return phaseSeq; + } + + std::string LastPhaseName() const { + return phaseSeq.empty() ? "noopt" : phaseSeq[phaseSeq.size() - 1]; + } + + enum Level { + kMpl2MplLevelZero = 0, + kMpl2MplLevelOne = 1, + kMpl2MplLevelTwo = 2 + }; + enum DecoupleLevel { + kNoDecouple = 0, + kConservativeDecouple = 1, + kAggressiveDecouple = 2, + kDecoupleAndLazy = 3 + }; + + static bool DumpPhase(const std::string &phase) { + if (phase == "") { + return false; + } + return dumpPhase == "*" || dumpPhase == phase; + } + + static bool IsSkipPhase(const std::string &phaseName) { + return skipPhase == phaseName; + } + + static bool DumpFunc() { + return dumpFunc != "*" && dumpFunc != ""; + } + static bool IsBigEndian() { + return bigEndian; + } + + static bool dumpBefore; + static bool dumpAfter; + static std::string dumpPhase; + static std::string skipPhase; + static std::string skipFrom; + static std::string skipAfter; + static std::string dumpFunc; + static bool quiet; + static bool regNativeFunc; + static bool regNativeDynamicOnly; + static bool nativeWrapper; + static bool inlineWithProfile; + static bool useInline; + static bool enableIPAClone; + static bool enableGInline; + static std::string noInlineFuncList; + static std::string importFileList; + static bool useCrossModuleInline; + static uint32 numOfCloneVersions; + static uint32 numOfImpExprLowBound; + static uint32 numOfImpExprHighBound; + static uint32 numOfCallSiteLowBound; + static uint32 numOfCallSiteUpBound; + static uint32 numOfConstpropValue; + static uint32 inlineSmallFunctionThreshold; + static uint32 inlineHotFunctionThreshold; + static uint32 inlineRecursiveFunctionThreshold; + static uint32 inlineDepth; + static uint32 inlineModuleGrowth; + static uint32 inlineColdFunctionThreshold; + static bool respectAlwaysInline; + static bool inlineToAllCallers; + static uint32 ginlineMaxNondeclaredInlineCallee; + static bool ginlineAllowNondeclaredInlineSizeGrow; + static bool ginlineAllowIgnoreGrowthLimit; + static uint32 ginlineMaxDepthIgnoreGrowthLimit; + static uint32 ginlineSmallFunc; + static uint32 ginlineRelaxSmallFuncDecalredInline; + static uint32 ginlineRelaxSmallFuncCanbeRemoved; + static std::string callsiteProfilePath; + + static uint32 profileHotCount; + static uint32 profileColdCount; + static bool profileHotCountSeted; + static bool profileColdCountSeted; + static uint32 profileHotRate; + static uint32 profileColdRate; + static std::string staticBindingList; + static bool usePreg; + static bool mapleLinker; + static bool dumpMuidFile; + static bool emitVtableImpl; +#if MIR_JAVA + static bool skipVirtualMethod; +#endif + // Ready to be deleted. + static bool noRC; + static bool analyzeCtor; + static bool strictNaiveRC; + static bool gcOnly; + static bool bigEndian; + static bool rcOpt1; + static std::string classMetaProFile; + static std::string methodMetaProfile; + static std::string fieldMetaProFile; + static std::string reflectStringProFile; + static bool nativeOpt; + static bool optForSize; + static bool O2; + static bool noDot; + static bool decoupleStatic; + static std::string criticalNativeFile; + static std::string fastNativeFile; + static bool barrier; + static std::string nativeFuncPropertyFile; + static bool mapleLinkerTransformLocal; + static uint32 buildApp; + static bool partialAot; + static uint32 decoupleInit; + static std::string sourceMuid; + static bool decoupleSuper; + static bool deferredVisit; + static bool deferredVisit2; + static bool genVtabAndItabForDecouple; + static bool profileFunc; + static uint32 parserOpt; + static std::string dumpDevirtualList; + static std::string readDevirtualList; + static bool usePreloadedClass; + static std::string profile; + static bool profileGen; + static bool profileUse; + static bool stackProtectorStrong; + static bool stackProtectorAll; + static std::string appPackageName; + static std::string proFileData; + static std::string proFileFuncData; + static std::string proFileClassData; + static bool profileStaticFields; + static bool genIRProfile; + static bool profileTest; + static std::string classLoaderInvocationList; + static bool dumpClassLoaderInvocation; + static unsigned int warningLevel; + static bool lazyBinding; + static bool hotFix; + static bool compactMeta; + static bool genPGOReport; + static bool verify; + static uint32 inlineCache; + static bool checkArrayStore; + static bool noComment; + static bool rmNoUseFunc; + static bool sideEffect; + static bool dumpIPA; + static bool wpaa; + static bool genLMBC; + static bool doOutline; + static size_t outlineThreshold; + + private: + void DecideMpl2MplRealLevel() const; + std::vector phaseSeq; +}; +} // namespace maple +#ifndef TRACE_PHASE +#define TRACE_PHASE (Options::dumpPhase.compare(PhaseName()) == 0) +#endif + +#ifndef TRACE_MAPLE_PHASE +#define TRACE_MAPLE_PHASE (Options::dumpPhase.compare(PhaseName()) == 0) +#endif +#endif // MAPLE_IR_INCLUDE_OPTION_H diff --git a/src/mapleall/maple_ir/include/parser_opt.h b/src/mapleall/maple_ir/include/parser_opt.h new file mode 100644 index 0000000000000000000000000000000000000000..fa470b82d0a8932d93e9ab57b8cf2122e64fb1d6 --- /dev/null +++ b/src/mapleall/maple_ir/include/parser_opt.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_PARSER_OPT_H +#define MAPLE_IR_INCLUDE_PARSER_OPT_H +#include "types_def.h" + +namespace maple { +// option bits passed into ParseMIR +enum ParserOptions : uint8 { + kInvalidOption = 0x0, + kWithDbgInfo = 0x1, // collect dbginfo + kKeepFirst = 0x2, // ignore second type def, not emit error + kWithProfileInfo = 0x4, + kParseOptFunc = 0x08, // parse optimized function mpl file + kParseInlineFuncBody = 0x10 // parse to-be-inlined function bodies +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_PARSER_OPT_H diff --git a/src/mapleall/maple_ir/include/prim_types.def b/src/mapleall/maple_ir/include/prim_types.def new file mode 100644 index 0000000000000000000000000000000000000000..09f95fa2fbfe5cb50284f6ffdabd72513426b607 --- /dev/null +++ b/src/mapleall/maple_ir/include/prim_types.def @@ -0,0 +1,490 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +#ifdef LOAD_ALGO_PRIMARY_TYPE +#undef LOAD_ALGO_PRIMARY_TYPE +// NOTE: this ordering needs to be in sync with ptypesizetable[] in maplevm/src/vmfunc.cpp + PRIMTYPE(void) + PRIMTYPE(i8) + PRIMTYPE(i16) + PRIMTYPE(i32) + PRIMTYPE(i64) + PRIMTYPE(i128) + PRIMTYPE(u8) + PRIMTYPE(u16) + PRIMTYPE(u32) + PRIMTYPE(u64) + PRIMTYPE(u128) + PRIMTYPE(u1) + PRIMTYPE(ptr) + PRIMTYPE(ref) + PRIMTYPE(a32) + PRIMTYPE(a64) + PRIMTYPE(f32) + PRIMTYPE(f64) + PRIMTYPE(f128) + PRIMTYPE(c64) + PRIMTYPE(c128) +#ifdef DYNAMICLANG + PRIMTYPE(simplestr) + PRIMTYPE(simpleobj) + PRIMTYPE(dynany) + PRIMTYPE(dynundef) + PRIMTYPE(dynnull) + PRIMTYPE(dynbool) + PRIMTYPE(dyni32) + PRIMTYPE(dynstr) + PRIMTYPE(dynobj) + PRIMTYPE(dynf64) + PRIMTYPE(dynf32) + PRIMTYPE(dynnone) +#endif + PRIMTYPE(constStr) + PRIMTYPE(gen) + PRIMTYPE(agg) + PRIMTYPE(v2i64) + PRIMTYPE(v4i32) + PRIMTYPE(v8i16) + PRIMTYPE(v16i8) + PRIMTYPE(v2u64) + PRIMTYPE(v4u32) + PRIMTYPE(v8u16) + PRIMTYPE(v16u8) + PRIMTYPE(v2f64) + PRIMTYPE(v4f32) + PRIMTYPE(v2i32) + PRIMTYPE(v4i16) + PRIMTYPE(v8i8) + PRIMTYPE(v2u32) + PRIMTYPE(v4u16) + PRIMTYPE(v8u8) + PRIMTYPE(v2f32) + PRIMTYPE(reservedpty1) + PRIMTYPE(reservedpty2) + PRIMTYPE(reservedpty3) + PRIMTYPE(reservedpty4) + PRIMTYPE(reservedpty5) + PRIMTYPE(reservedpty6) + PRIMTYPE(reservedpty7) + PRIMTYPE(reservedpty8) + PRIMTYPE(reservedpty9) + PRIMTYPE(reservedpty10) + PRIMTYPE(unknown) +#endif // ~LOAD_ALGO_PRIMARY_TYPE + + +#ifdef LOAD_PRIMARY_TYPE_PROPERTY +#undef LOAD_PRIMARY_TYPE_PROPERTY + +static const PrimitiveTypeProperty PTProperty_begin = { + /*type=*/PTY_begin, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_void = { + /*type=*/PTY_void, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i8 = { + /*type=*/PTY_i8, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i16 = { + /*type=*/PTY_i16, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i32 = { + /*type=*/PTY_i32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i64 = { + /*type=*/PTY_i64, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_i128 = { + /*type=*/PTY_i128, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u8 = { + /*type=*/PTY_u8, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u16 = { + /*type=*/PTY_u16, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +/* isAddress and isPointer are overloaded in getter method for PTProperty_u32 */ +static const PrimitiveTypeProperty PTProperty_u32 = { + /*type=*/PTY_u32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +/* isAddress and isPointer are overloaded in getter method for PTProperty_64 */ +static const PrimitiveTypeProperty PTProperty_u64 = { + /*type=*/PTY_u64, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u128 = { + /*type=*/PTY_u128, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_u1 = { + /*type=*/PTY_u1, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_ptr = { + /*type=*/PTY_ptr, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_ref = { + /*type=*/PTY_ref, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_a32 = { + /*type=*/PTY_a32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_a64 = { + /*type=*/PTY_a64, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_f32 = { + /*type=*/PTY_f32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_f64 = { + /*type=*/PTY_f64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_f128 = { + /*type=*/PTY_f128, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_c64 = { + /*type=*/PTY_c64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_c128 = { + /*type=*/PTY_c128, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +#ifdef DYNAMICLANG +static const PrimitiveTypeProperty PTProperty_simplestr = { + /*type=*/PTY_simplestr, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/true, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_simpleobj = { + /*type=*/PTY_simpleobj, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/true, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/true, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynany = { + /*type=*/PTY_dynany, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/true, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynundef = { + /*type=*/PTY_dynundef, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynnull = { + /*type=*/PTY_dynnull, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynbool = { + /*type=*/PTY_dynbool, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dyni32 = { + /*type=*/PTY_dyni32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynstr = { + /*type=*/PTY_dynstr, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynobj = { + /*type=*/PTY_dynobj, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynf64 = { + /*type=*/PTY_dynf64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynf32 = { + /*type=*/PTY_dynf32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_dynnone = { + /*type=*/PTY_dynnone, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/true, /*isDynamicAny=*/false, /*isDynamicNone=*/true, + /*isVector*/false +}; +#endif // ~DYNAMICLANG + +static const PrimitiveTypeProperty PTProperty_constStr = { + /*type=*/PTY_constStr, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/true, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_gen = { + /*type=*/PTY_gen, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_agg = { + /*type=*/PTY_agg, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_v2i64 = { + /*type=*/PTY_v2i64, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4i32 = { + /*type=*/PTY_v4i32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8i16 = { + /*type=*/PTY_v8i16, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v16i8 = { + /*type=*/PTY_v16i8, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2u64 = { + /*type=*/PTY_v2u64, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4u32 = { + /*type=*/PTY_v4u32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8u16 = { + /*type=*/PTY_v8u16, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v16u8 = { + /*type=*/PTY_v16u8, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2f64 = { + /*type=*/PTY_v2f64, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4f32 = { + /*type=*/PTY_v4f32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2i32 = { + /*type=*/PTY_v2i32, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4i16 = { + /*type=*/PTY_v4i16, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8i8 = { + /*type=*/PTY_v8i8, /*isInteger=*/true, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2u32 = { + /*type=*/PTY_v2u32, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v4u16 = { + /*type=*/PTY_v4u16, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v8u8 = { + /*type=*/PTY_v8u8, /*isInteger=*/true, /*isUnsigned=*/true, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_v2f32 = { + /*type=*/PTY_v2f32, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/true, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty1 = { + /*type=*/PTY_reservedpty1, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty2 = { + /*type=*/PTY_reservedpty2, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty3 = { + /*type=*/PTY_reservedpty3, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty4 = { + /*type=*/PTY_reservedpty4, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty5 = { + /*type=*/PTY_reservedpty5, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty6 = { + /*type=*/PTY_reservedpty6, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty7 = { + /*type=*/PTY_reservedpty7, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty8 = { + /*type=*/PTY_reservedpty8, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty9 = { + /*type=*/PTY_reservedpty9, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_reservedpty10 = { + /*type=*/PTY_reservedpty10, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_unknown = { + /*type=*/PTY_unknown, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/false +}; + +static const PrimitiveTypeProperty PTProperty_end = { + /*type=*/PTY_end, /*isInteger=*/false, /*isUnsigned=*/false, /*isAddress=*/false, /*isFloat=*/false, + /*isPointer=*/false, /*isSimple=*/false, /*isDynamic=*/false, /*isDynamicAny=*/false, /*isDynamicNone=*/false, + /*isVector*/true +}; + +#endif // ~LOAD_PRIMARY_TYPE_PROPERTY diff --git a/src/mapleall/maple_ir/include/prim_types.h b/src/mapleall/maple_ir/include/prim_types.h new file mode 100644 index 0000000000000000000000000000000000000000..c5cb29bbc7a1665c38ed350d45457be19b408acd --- /dev/null +++ b/src/mapleall/maple_ir/include/prim_types.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_PRIM_TYPES_H +#define MAPLE_IR_INCLUDE_PRIM_TYPES_H +#include "types_def.h" +#include "cfg_primitive_types.h" + +namespace maple { +class PrimitiveType { + public: + explicit PrimitiveType(PrimType type) : property(GetPrimitiveTypeProperty(type)) {} + ~PrimitiveType() = default; + + PrimType GetType() const { + return property.type; + } + + bool IsInteger() const { + return property.IsInteger(); + } + bool IsUnsigned() const { + return property.IsUnsigned(); + } + bool IsAddress() const { + return property.IsAddress(); + } + bool IsFloat() const { + return property.IsFloat(); + } + bool IsPointer() const { + return property.IsPointer(); + } + bool IsDynamic() const { + return property.IsDynamic(); + } + bool IsSimple() const { + return property.IsSimple(); + } + bool IsDynamicAny() const { + return property.IsDynamicAny(); + } + bool IsDynamicNone() const { + return property.IsDynamicNone(); + } + bool IsVector() const { + return property.IsVector(); + } + + private: + const PrimitiveTypeProperty &property; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_PRIM_TYPES_H diff --git a/src/mapleall/maple_ir/include/printing.h b/src/mapleall/maple_ir/include/printing.h new file mode 100644 index 0000000000000000000000000000000000000000..305932b74732e6c3ace2a2df665d169b65d216e5 --- /dev/null +++ b/src/mapleall/maple_ir/include/printing.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_PRINTING_H +#define MAPLE_IR_INCLUDE_PRINTING_H +#include +#include "types_def.h" + +namespace maple { +void PrintIndentation(int32 indent); +void PrintString(const std::string &str); +} // namespace maple +#endif // MAPLE_IR_INCLUDE_PRINTING_H diff --git a/src/mapleall/maple_ir/include/simplifyintrinsics.def b/src/mapleall/maple_ir/include/simplifyintrinsics.def new file mode 100644 index 0000000000000000000000000000000000000000..f6078c25fb2e7d2bbf5a742d020d0b6c3899c79b --- /dev/null +++ b/src/mapleall/maple_ir/include/simplifyintrinsics.def @@ -0,0 +1,31 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ + +/* INTRINSIC(STR, NAME) */ +DEF_MIR_INTRINSIC(GET_AND_ADDI, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndAddInt_7C_28Ljava_2Flang_2FObject_3BJI_29I",\ + DEFAULT_NUM_INSN, INTRNISJAVA | INTRNISSPECIAL, kArgTyI32, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI32) +DEF_MIR_INTRINSIC(GET_AND_ADDL, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndAddLong_7C_28Ljava_2Flang_2FObject_3BJJ_29J",\ + DEFAULT_NUM_INSN, INTRNISJAVA | INTRNISSPECIAL, kArgTyI64, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(GET_AND_SETI, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndSetInt_7C_28Ljava_2Flang_2FObject_3BJI_29I",\ + DEFAULT_NUM_INSN, INTRNISJAVA | INTRNISSPECIAL, kArgTyI32, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI32) +DEF_MIR_INTRINSIC(GET_AND_SETL, "Lsun_2Fmisc_2FUnsafe_3B_7CgetAndSetLong_7C_28Ljava_2Flang_2FObject_3BJJ_29J",\ + DEFAULT_NUM_INSN, INTRNISJAVA | INTRNISSPECIAL, kArgTyI64, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(COMP_AND_SWAPI, "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapInt_7C_28Ljava_2Flang_2FObject_3BJII_29Z",\ + DEFAULT_NUM_INSN, INTRNISJAVA | INTRNISSPECIAL, kArgTyU1, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI32, kArgTyI32) +DEF_MIR_INTRINSIC(COMP_AND_SWAPL, "Lsun_2Fmisc_2FUnsafe_3B_7CcompareAndSwapLong_7C_28Ljava_2Flang_2FObject_3BJJJ_29Z",\ + DEFAULT_NUM_INSN, INTRNISJAVA | INTRNISSPECIAL, kArgTyU1, kArgTyRef, kArgTyRef, kArgTyI64, kArgTyI64, kArgTyI64) +DEF_MIR_INTRINSIC(STR_INDEXOF, "Ljava_2Flang_2FString_3B_7CindexOf_7C_28Ljava_2Flang_2FString_3B_29I",\ + DEFAULT_NUM_INSN, INTRNISJAVA | INTRNISSPECIAL, kArgTyI32, kArgTyRef, kArgTyRef) + diff --git a/src/mapleall/maple_ir/include/src_position.h b/src/mapleall/maple_ir/include/src_position.h new file mode 100644 index 0000000000000000000000000000000000000000..49804960bba4f8f24c545f6d98689949d61925d7 --- /dev/null +++ b/src/mapleall/maple_ir/include/src_position.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) [2021-2022] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_SRC_POSITION_H +#define MAPLE_IR_INCLUDE_SRC_POSITION_H +#include "mpl_logging.h" + +namespace maple { +// to store source position information +class SrcPosition { + public: + SrcPosition() : lineNum(0), mplLineNum(0) { + u.fileColumn.fileNum = 0; + u.fileColumn.column = 0; + u.word0 = 0; + } + SrcPosition(uint16 fnum, uint32 lnum, uint16 cnum, uint32 mlnum) + : lineNum(lnum), + mplLineNum(mlnum) { + u.fileColumn.fileNum = fnum; + u.fileColumn.column = cnum; + } + + virtual ~SrcPosition() = default; + + uint32 RawData() const { + return u.word0; + } + + uint16 FileNum() const { + return u.fileColumn.fileNum; + } + + uint16 Column() const { + return u.fileColumn.column; + } + + uint32 LineNum() const { + return lineNum; + } + + uint32 MplLineNum() const { + return mplLineNum; + } + + void SetFileNum(uint16 n) { + u.fileColumn.fileNum = n; + } + + void SetColumn(uint16 n) { + u.fileColumn.column = n; + } + + void SetLineNum(uint32 n) { + lineNum = n; + } + + void SetRawData(uint32 n) { + u.word0 = n; + } + + void SetMplLineNum(uint32 n) { + mplLineNum = n; + } + + void CondSetLineNum(uint32 n) { + lineNum = lineNum != 0 ? lineNum : n; + } + + void CondSetFileNum(uint16 n) { + uint16 i = u.fileColumn.fileNum; + u.fileColumn.fileNum = i != 0 ? i : n; + } + + void UpdateWith(const SrcPosition pos) { + u.fileColumn.fileNum = pos.FileNum(); + u.fileColumn.column = pos.Column(); + lineNum = pos.LineNum(); + mplLineNum = pos.MplLineNum(); + } + + bool IsSameFile(const SrcPosition &pos) const { + return pos.FileNum() == FileNum(); + } + + // as you read: pos0->IsBf(pos) "pos0 Is Before pos" + bool IsBf(const SrcPosition &pos) const { + return (pos.FileNum() == FileNum() && + ((LineNum() < pos.LineNum()) || + ((LineNum() == pos.LineNum()) && (Column() < pos.Column())))); + } + + bool IsBfMpl(const SrcPosition &pos) const { + return (pos.FileNum() == FileNum() && + ((MplLineNum() < pos.MplLineNum()) || + ((MplLineNum() == pos.MplLineNum()) && (Column() < pos.Column())))); + } + + bool IsEq(const SrcPosition &pos) const { + return FileNum() == pos.FileNum() && LineNum() == pos.LineNum() && Column() == pos.Column(); + } + + bool IsBfOrEq(const SrcPosition &pos) const { + return IsBf(pos) || IsEq(pos); + } + + bool IsEqMpl(const SrcPosition &pos) const { + return MplLineNum() == pos.MplLineNum(); + } + + bool IsValid() const { + return FileNum() != 0; + } + + void DumpLoc(uint32 &lastLineNum, uint16 &lastColumnNum) const { + if (FileNum() != 0 && LineNum() != 0) { + if (Column() != 0 && (LineNum() != lastLineNum || Column() != lastColumnNum)) { + Dump(); + lastLineNum = LineNum(); + lastColumnNum = Column(); + } else if (LineNum() != lastLineNum) { + DumpLine(); + lastLineNum = LineNum(); + } + } + } + + void DumpLine() const { + LogInfo::MapleLogger() << "LOC " << FileNum() << " " << LineNum() << '\n'; + } + + void Dump() const { + LogInfo::MapleLogger() << "LOC " << FileNum() << " " << LineNum() << " " << Column() << '\n'; + } + + std::string DumpLocWithColToString() const { + std::stringstream ss; + ss << "LOC " << FileNum() << " " << LineNum() << " " << Column(); + return ss.str(); + } + + Loc GetSrcLoc() const { + Loc loc(static_cast(FileNum()), static_cast(LineNum()), static_cast(Column())); + return loc; + } + + void SetInlinedLineNum(uint32 line) { + inlinedLineNum = line; + } + + void SetInlinedFileNum(uint32 line) { + inlinedFileNum = line; + } + + void SetInlinedFuncStrIdx(const GStrIdx &idx) { + strIdx = idx; + } + + uint32 GetInlinedLineNum() const { + return inlinedLineNum; + } + + uint32 GetInlinedFileNum() const { + return inlinedFileNum; + } + + const GStrIdx &GetInlinedFuncStrIdx() const { + return strIdx; + } + + private: + union { + struct { + uint16 fileNum; + uint16 column : 12; + uint16 stmtBegin : 1; + uint16 bbBegin : 1; + uint16 unused : 2; + } fileColumn; + uint32 word0; + } u; + uint32 lineNum; // line number of original src file, like foo.java + uint32 mplLineNum; // line number of mpl file + uint32 inlinedLineNum = 0; + uint32 inlinedFileNum = 0; + GStrIdx strIdx; +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_SRC_POSITION_H diff --git a/src/mapleall/maple_ir/include/tokens.h b/src/mapleall/maple_ir/include/tokens.h new file mode 100644 index 0000000000000000000000000000000000000000..f78767e13b7191c58815a085a0e9d38f4d4ab251 --- /dev/null +++ b/src/mapleall/maple_ir/include/tokens.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_TOKENS_H +#define MAPLE_IR_INCLUDE_TOKENS_H + +namespace maple { +enum TokenKind { + TK_invalid, +// keywords from this file +#define KEYWORD(STR) TK_##STR, +#include "keywords.def" +#undef KEYWORD + // non-keywords starting here + // constants + TK_intconst, + TK_floatconst, + TK_doubleconst, + // local name + TK_lname, + // global name + TK_gname, + // function name + TK_fname, + // pseudo register + TK_preg, + // special register + TK_specialreg, + // parent field + TK_prntfield, + // type parameter name + TK_typeparam, + // misc. + TK_newline, + TK_lparen, // ( + TK_rparen, // ) + TK_lbrace, // { + TK_rbrace, // } + TK_lbrack, // [ + TK_rbrack, // ] + TK_langle, // < + TK_rangle, // > + TK_eqsign, // = + TK_coma, // , + TK_dotdotdot, // ... + TK_colon, // : + TK_asterisk, // * + TK_string, // a literal string enclosed between " + TK_eof +}; +} // namespace maple +#endif // MAPLE_IR_INCLUDE_TOKENS_H diff --git a/src/mapleall/maple_ir/include/types_def.h b/src/mapleall/maple_ir/include/types_def.h new file mode 100644 index 0000000000000000000000000000000000000000..fa5f01ade2afd3bbc4272ecf4e21c636cd15393d --- /dev/null +++ b/src/mapleall/maple_ir/include/types_def.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLE_IR_INCLUDE_TYPES_DEF_H +#define MAPLE_IR_INCLUDE_TYPES_DEF_H + +// NOTE: Since we already committed to -std=c++0x, we should eventually use the +// standard definitions in the and headers rather than +// reinventing our own primitive types. +#include +#include +#include +#include "mpl_number.h" + +namespace maple { +// Let's keep the following definitions so that existing code will continue to work. +using int8 = std::int8_t; +using int16 = std::int16_t; +using int32 = std::int32_t; +using int64 = std::int64_t; +using uint8 = std::uint8_t; +using uint16 = std::uint16_t; +using uint32 = std::uint32_t; +using uint64 = std::uint64_t; +class StIdx { // scope nesting level + symbol table index + public: + union un { + struct { + uint32 idx : 24; + uint8 scope; // scope level, with the global scope is at level 1 + } scopeIdx; + + uint32 fullIdx; + }; + + StIdx() { + u.fullIdx = 0; + } + + StIdx(uint32 level, uint32 i) { + u.scopeIdx.scope = level; + u.scopeIdx.idx = i; + } + + explicit StIdx(uint32 fidx) { + u.fullIdx = fidx; + } + + ~StIdx() = default; + + uint32 Idx() const { + return u.scopeIdx.idx; + } + + void SetIdx(uint32 idx) { + u.scopeIdx.idx = idx; + } + + uint32 Scope() const { + return u.scopeIdx.scope; + } + + void SetScope(uint32 scpe) { + u.scopeIdx.scope = static_cast(scpe); + } + + uint32 FullIdx() const { + return u.fullIdx; + } + + void SetFullIdx(uint32 idx) { + u.fullIdx = idx; + } + + bool Islocal() const { + return u.scopeIdx.scope > 1; + } + + bool IsGlobal() const { + return u.scopeIdx.scope == 1; + } + + bool operator==(const StIdx &x) const { + return u.fullIdx == x.u.fullIdx; + } + + bool operator!=(const StIdx &x) const { + return !(*this == x); + } + + bool operator<(const StIdx &x) const { + return u.fullIdx < x.u.fullIdx; + } + + private: + un u; +}; + +using LabelIdx = uint32; +using phyRegIdx = uint64; +using OfstRegIdx = uint64; +using LabelIDOrder = uint32; +using PUIdx = uint32; +using PregIdx = int32; +using ExprIdx = int32; +using FieldID = int32; + +class TypeTag; +using TyIdx = utils::Index; // global type table index + +class GStrTag; +using GStrIdx = utils::Index; // global string table index + +class UStrTag; +using UStrIdx = utils::Index; // user string table index (from the conststr opcode) + +class U16StrTag; +using U16StrIdx = utils::Index; // user string table index (from the conststr opcode) + +const TyIdx kInitTyIdx = TyIdx(0); +const TyIdx kNoneTyIdx = TyIdx(UINT32_MAX); + +enum SSALevel : uint8 { + kSSAInvalid = 0x00, + kSSATopLevel = 0x01, // ssa only for local top-level is valid + kSSAAddrTaken = 0x02, // ssa only for addr-taken is valid + kSSAMemory = kSSATopLevel | kSSAAddrTaken, // ssa for both top-level and addr-taken is valid + kSSAHSSA = 0x04, // hssa is valid +}; + +constexpr uint8 kOperandNumUnary = 1; +constexpr uint8 kOperandNumBinary = 2; +constexpr uint8 kOperandNumTernary = 3; +} // namespace maple +namespace std { +template<> // function-template-specialization +class hash { + public: + size_t operator()(const maple::StIdx &x) const { + std::size_t seed = 0; + hash_combine(seed, x.Scope()); + hash_combine(seed, x.Idx()); + return seed; + } +}; +} +#endif // MAPLE_IR_INCLUDE_TYPES_DEF_H diff --git a/src/mapleall/maple_ir/include/unary_op.def b/src/mapleall/maple_ir/include/unary_op.def new file mode 100644 index 0000000000000000000000000000000000000000..7c7b072e1c4d86b6fa8e5a48320f2fb7592d09bf --- /dev/null +++ b/src/mapleall/maple_ir/include/unary_op.def @@ -0,0 +1,27 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan PSL v1. + * You can use this software according to the terms and conditions of the Mulan PSL v1. + * You may obtain a copy of Mulan PSL v1 at: + * + * http://license.coscl.org.cn/MulanPSL + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v1 for more details. + */ +UNARYOP(abs) +UNARYOP(bnot) +UNARYOP(lnot) +UNARYOP(neg) +UNARYOP(recip) +UNARYOP(sqrt) +UNARYOP(sext) +UNARYOP(zext) +UNARYOP(extractbits) +UNARYOP(alloca) +UNARYOP(malloc) +UNARYOP(gcmallocjarray) +UNARYOP(gcpermallocjarray) diff --git a/src/mapleall/maple_ir/include/verification.h b/src/mapleall/maple_ir/include/verification.h new file mode 100644 index 0000000000000000000000000000000000000000..3c0c3eff813f5c1d566bbe389455942e8f623535 --- /dev/null +++ b/src/mapleall/maple_ir/include/verification.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIR_VERIFICATION_PHASE_H +#define MAPLEIR_VERIFICATION_PHASE_H +#include "class_hierarchy.h" +#include "verify_pragma_info.h" + +namespace maple { +using ClassVerifyPragmas = MapleUnorderedMap>; + +class VerifyResult { + public: + VerifyResult(const MIRModule &module, const KlassHierarchy &klassHierarchy, MemPool &memPool) + : module(module), + klassHierarchy(klassHierarchy), + allocator(&memPool), + classesCorrectness(allocator.Adapter()), + classesPragma(allocator.Adapter()) {} + + ~VerifyResult() = default; + + const KlassHierarchy &GetKlassHierarchy() const { + return klassHierarchy; + } + + const MIRModule &GetMIRModule() const { + return module; + } + + const MIRFunction *GetCurrentFunction() const { + return module.GetFunctionList().front(); + } + + const std::string &GetCurrentClassName() const { + return GetCurrentFunction()->GetClassType()->GetName(); + } + + const ClassVerifyPragmas &GetDeferredClassesPragma() const { + return classesPragma; + } + + void AddPragmaVerifyError(const std::string &className, std::string errMsg); + void AddPragmaAssignableCheck(const std::string &className, std::string fromType, std::string toType); + void AddPragmaExtendFinalCheck(const std::string &className); + void AddPragmaOverrideFinalCheck(const std::string &className); + + const MapleUnorderedMap &GetResultMap() const { + return classesCorrectness; + } + void SetClassCorrectness(const std::string &className, bool result) { + classesCorrectness[className] = result; + } + + bool HasErrorNotDeferred() const { + for (auto &classResult : classesCorrectness) { + if (!classResult.second) { + if (classesPragma.find(classResult.first) == classesPragma.end()) { + // Verify result is not OK, but has no deferred check or verify error in runtime + return true; + } + } + } + return false; + } + + private: + bool HasVerifyError(const std::vector &pragmaInfoPtrVec) const; + bool HasSamePragmaInfo(const std::vector &pragmaInfoPtrVec, + const VerifyPragmaInfo &verifyPragmaInfo) const; + + const MIRModule &module; + const KlassHierarchy &klassHierarchy; + MapleAllocator allocator; + // classesCorrectness, correctness is true only if the class is verified OK + MapleUnorderedMap classesCorrectness; + // classesPragma + ClassVerifyPragmas classesPragma; +}; + +class VerificationPhaseResult : public AnalysisResult { + public: + VerificationPhaseResult(MemPool &mp, const VerifyResult &verifyResult) + : AnalysisResult(&mp), verifyResult(verifyResult) {} + ~VerificationPhaseResult() = default; + + const ClassVerifyPragmas &GetDeferredClassesPragma() const { + return verifyResult.GetDeferredClassesPragma(); + } + + private: + const VerifyResult &verifyResult; +}; + +#ifdef NOT_USED +class DoVerification : public ModulePhase { + public: + explicit DoVerification(ModulePhaseID id) : ModulePhase(id) {} + + AnalysisResult *Run(MIRModule *module, ModuleResultMgr *mgr) override; + std::string PhaseName() const override { + return "verification"; + } + + ~DoVerification() = default; + + private: + void VerifyModule(MIRModule &module, VerifyResult &result) const; + void DeferredCheckFinalClassAndMethod(VerifyResult &result) const; + bool IsLazyBindingOrDecouple(const KlassHierarchy &klassHierarchy) const; + bool NeedRuntimeFinalCheck(const KlassHierarchy &klassHierarchy, const std::string &className) const; + void CheckExtendFinalClass(VerifyResult &result) const; +}; +#endif +} // namespace maple +#endif // MAPLEIR_VERIFICATION_PHASE_H diff --git a/src/mapleall/maple_ir/include/verify_annotation.h b/src/mapleall/maple_ir/include/verify_annotation.h new file mode 100644 index 0000000000000000000000000000000000000000..ac19180d4801351b8255db63885113742cecda2a --- /dev/null +++ b/src/mapleall/maple_ir/include/verify_annotation.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIR_VERIFY_ANNOTATION_H +#define MAPLEIR_VERIFY_ANNOTATION_H +#include "mir_module.h" +#include "mir_type.h" +#include "verify_pragma_info.h" + +namespace maple { +void AddVerfAnnoThrowVerifyError(MIRModule &md, const ThrowVerifyErrorPragma &info, MIRStructType &clsType); +void AddVerfAnnoAssignableCheck(MIRModule &md, + std::vector &info, + MIRStructType &clsType); +void AddVerfAnnoExtendFinalCheck(MIRModule &md, MIRStructType &clsType); +void AddVerfAnnoOverrideFinalCheck(MIRModule &md, MIRStructType &clsType); +} // namespace maple +#endif // MAPLEALL_VERIFY_ANNOTATION_H \ No newline at end of file diff --git a/src/mapleall/maple_ir/include/verify_mark.h b/src/mapleall/maple_ir/include/verify_mark.h new file mode 100644 index 0000000000000000000000000000000000000000..6fb72a498e3f0cb0b53c52faf056f53e526d8c72 --- /dev/null +++ b/src/mapleall/maple_ir/include/verify_mark.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) [2020-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEALL_VERIFY_MARK_H +#define MAPLEALL_VERIFY_MARK_H +#include "class_hierarchy.h" +#include "verify_pragma_info.h" + +namespace maple { +#ifdef NOT_USED +class DoVerifyMark : public ModulePhase { + public: + explicit DoVerifyMark(ModulePhaseID id) : ModulePhase(id) {} + + AnalysisResult *Run(MIRModule *module, ModuleResultMgr *mgr) override; + + std::string PhaseName() const override { + return "verifymark"; + } + + ~DoVerifyMark() override = default; + + private: + void AddAnnotations(MIRModule &module, const Klass &klass, const std::vector &pragmaInfoVec); +}; +#endif +} // namespace maple +#endif // MAPLEALL_VERIFY_MARK_H \ No newline at end of file diff --git a/src/mapleall/maple_ir/include/verify_pragma_info.h b/src/mapleall/maple_ir/include/verify_pragma_info.h new file mode 100644 index 0000000000000000000000000000000000000000..7fe43caf005d35f8f0457cc1e62b944ab2f7f9c8 --- /dev/null +++ b/src/mapleall/maple_ir/include/verify_pragma_info.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) [2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#ifndef MAPLEIR_VERIFY_PRAGMA_INFO_H +#define MAPLEIR_VERIFY_PRAGMA_INFO_H +#include +#include + +namespace maple { +enum PragmaInfoType { + kThrowVerifyError, + kAssignableCheck, + kExtendFinalCheck, + kOverrideFinalCheck +}; + +class VerifyPragmaInfo { + public: + VerifyPragmaInfo() = default;; + virtual ~VerifyPragmaInfo() = default; + + virtual PragmaInfoType GetPragmaType() const = 0; + bool IsEqualTo(const VerifyPragmaInfo &pragmaInfo) const { + return GetPragmaType() == pragmaInfo.GetPragmaType(); + } + bool IsVerifyError() const { + return GetPragmaType() == kThrowVerifyError; + } + bool IsAssignableCheck() const { + return GetPragmaType() == kAssignableCheck; + } + bool IsExtendFinalCheck() const { + return GetPragmaType() == kExtendFinalCheck; + } + bool IsOverrideFinalCheck() const { + return GetPragmaType() == kOverrideFinalCheck; + } +}; + +class ThrowVerifyErrorPragma : public VerifyPragmaInfo { + public: + explicit ThrowVerifyErrorPragma(std::string errorMessage) + : VerifyPragmaInfo(), + errorMessage(std::move(errorMessage)) {} + ~ThrowVerifyErrorPragma() = default; + + PragmaInfoType GetPragmaType() const override { + return kThrowVerifyError; + } + + const std::string &GetMessage() const { + return errorMessage; + } + + private: + std::string errorMessage; +}; + +class AssignableCheckPragma : public VerifyPragmaInfo { + public: + AssignableCheckPragma(std::string fromType, std::string toType) + : VerifyPragmaInfo(), + fromType(std::move(fromType)), + toType(std::move(toType)) {} + ~AssignableCheckPragma() = default; + + PragmaInfoType GetPragmaType() const override { + return kAssignableCheck; + } + + bool IsEqualTo(const AssignableCheckPragma &pragma) const { + return fromType == pragma.GetFromType() && toType == pragma.GetToType(); + } + + const std::string &GetFromType() const { + return fromType; + } + + const std::string &GetToType() const { + return toType; + } + + private: + std::string fromType; + std::string toType; +}; + +class ExtendFinalCheckPragma : public VerifyPragmaInfo { + public: + ExtendFinalCheckPragma() : VerifyPragmaInfo() {} + ~ExtendFinalCheckPragma() = default; + + PragmaInfoType GetPragmaType() const override { + return kExtendFinalCheck; + } +}; + +class OverrideFinalCheckPragma : public VerifyPragmaInfo { + public: + OverrideFinalCheckPragma() : VerifyPragmaInfo() {} + ~OverrideFinalCheckPragma() = default; + + PragmaInfoType GetPragmaType() const override { + return kOverrideFinalCheck; + } +}; +} // namespace maple +#endif // MAPLEIR_VERIFY_PRAGMA_INFO_H diff --git a/src/mapleall/maple_ir/src/bin_func_export.cpp b/src/mapleall/maple_ir/src/bin_func_export.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7e7f10620275cf6d8e3a07a865444f7bb70dff5a --- /dev/null +++ b/src/mapleall/maple_ir/src/bin_func_export.cpp @@ -0,0 +1,744 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include +#include +#include "mir_function.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "mir_builder.h" +#include "bin_mplt.h" + +using namespace std; +namespace maple { +void BinaryMplExport::OutputInfoVector(const MIRInfoVector &infoVector, const MapleVector &infoVectorIsString) { + if (!mod.IsWithDbgInfo()) { + Write(0); + return; + } + WriteNum(infoVector.size()); + for (uint32 i = 0; i < infoVector.size(); i++) { + OutputStr(infoVector[i].first); + WriteNum(infoVectorIsString[i] ? 1 : 0); + if (!infoVectorIsString[i]) { + WriteNum(infoVector[i].second); + } else { + OutputStr(GStrIdx(infoVector[i].second)); + } + } +} + +void BinaryMplExport::OutputFuncIdInfo(MIRFunction *func) { + WriteNum(kBinFuncIdInfoStart); + WriteNum(func->GetPuidxOrigin()); // the funcid + OutputInfoVector(func->GetInfoVector(), func->InfoIsString()); + if (mod.GetFlavor() == kFlavorLmbc) { + WriteNum(func->GetFrameSize()); + } +} + +void BinaryMplExport::OutputBaseNode(const BaseNode *b) { + Write(static_cast(b->GetOpCode())); + Write(static_cast(b->GetPrimType())); +} + +void BinaryMplExport::OutputLocalSymbol(MIRSymbol *sym) { + std::unordered_map::iterator it = localSymMark.find(sym); + if (it != localSymMark.end()) { + WriteNum(-(it->second)); + return; + } + + CHECK_NULL_FATAL(sym); + WriteNum(kBinSymbol); + OutputStr(sym->GetNameStrIdx()); + WriteNum(sym->GetSKind()); + WriteNum(sym->GetStorageClass()); + size_t mark = localSymMark.size(); + localSymMark[sym] = static_cast(mark); + OutputTypeAttrs(sym->GetAttrs()); + WriteNum(static_cast(sym->GetIsTmp())); + if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { + OutputSrcPos(sym->GetSrcPosition()); + } + OutputType(sym->GetTyIdx()); + if (sym->GetSKind() == kStPreg) { + OutputPreg(sym->GetPreg()); + } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { + OutputConst(sym->GetKonst()); + } else if (sym->GetSKind() == kStFunc) { + OutputFuncViaSym(sym->GetFunction()->GetPuidx()); + } else { + CHECK_FATAL(false, "should not used"); + } +} + +void BinaryMplExport::OutputPreg(MIRPreg *preg) { + if (preg->GetPregNo() < 0) { + WriteNum(kBinSpecialReg); + Write(static_cast(-preg->GetPregNo())); + return; + } + std::unordered_map::iterator it = localPregMark.find(preg); + if (it != localPregMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinPreg); + Write(static_cast(preg->GetPrimType())); + size_t mark = localPregMark.size(); + localPregMark[preg] = static_cast(mark); +} + +void BinaryMplExport::OutputLabel(LabelIdx lidx) { + std::unordered_map::iterator it = labelMark.find(lidx); + if (it != labelMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinLabel); + size_t mark = labelMark.size(); + labelMark[lidx] = static_cast(mark); +} + +void BinaryMplExport::OutputLocalTypeNameTab(const MIRTypeNameTable *typeNameTab) { + WriteNum(kBinTypenameStart); + WriteNum(static_cast(typeNameTab->Size())); + for (std::pair it : typeNameTab->GetGStrIdxToTyIdxMap()) { + OutputStr(it.first); + OutputType(it.second); + } +} + +void BinaryMplExport::OutputFormalsStIdx(MIRFunction *func) { + WriteNum(kBinFormalStart); + WriteNum(func->GetFormalDefVec().size()); + for (FormalDef formalDef : func->GetFormalDefVec()) { + OutputLocalSymbol(formalDef.formalSym); + } +} + +void BinaryMplExport::OutputAliasMap(MapleMap &aliasVarMap) { + WriteNum(kBinAliasMapStart); + WriteInt(static_cast(aliasVarMap.size())); + for (std::pair it : aliasVarMap) { + OutputStr(it.first); + OutputStr(it.second.mplStrIdx); + WriteNum(static_cast(it.second.atk)); + WriteNum(it.second.index); + OutputStr(it.second.sigStrIdx); + } +} + +void BinaryMplExport::OutputFuncViaSym(PUIdx puIdx) { + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + OutputSymbol(funcSt); +} + +void BinaryMplExport::OutputExpression(BaseNode *e) { + OutputBaseNode(e); + switch (e->GetOpCode()) { + // leaf + case OP_constval: { + MIRConst *constVal = static_cast(e)->GetConstVal(); + OutputConst(constVal); + return; + } + case OP_conststr: { + UStrIdx strIdx = static_cast(e)->GetStrIdx(); + OutputUsrStr(strIdx); + return; + } + case OP_addroflabel: { + AddroflabelNode *lNode = static_cast(e); + OutputLabel(lNode->GetOffset()); + return; + } + case OP_addroffunc: { + AddroffuncNode *addrNode = static_cast(e); + OutputFuncViaSym(addrNode->GetPUIdx()); + return; + } + case OP_sizeoftype: { + SizeoftypeNode *sot = static_cast(e); + OutputType(sot->GetTyIdx()); + return; + } + case OP_addrof: + case OP_addrofoff: + case OP_dread: + case OP_dreadoff: { + StIdx stIdx; + if (e->GetOpCode() == OP_addrof || e->GetOpCode() == OP_dread) { + AddrofNode *drNode = static_cast(e); + WriteNum(drNode->GetFieldID()); + stIdx = drNode->GetStIdx(); + } else { + DreadoffNode *droff = static_cast(e); + WriteNum(droff->offset); + stIdx = droff->stIdx; + } + WriteNum(stIdx.Scope()); + if (stIdx.Islocal()) { + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } else { + OutputSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } + return; + } + case OP_regread: { + RegreadNode *regreadNode = static_cast(e); + MIRPreg *preg = curFunc->GetPregTab()->PregFromPregIdx(regreadNode->GetRegIdx()); + OutputPreg(preg); + return; + } + case OP_gcmalloc: + case OP_gcpermalloc: + case OP_stackmalloc: { + GCMallocNode *gcNode = static_cast(e); + OutputType(gcNode->GetTyIdx()); + return; + } + // unary + case OP_ceil: + case OP_cvt: + case OP_floor: + case OP_trunc: { + TypeCvtNode *typecvtNode = static_cast(e); + Write(static_cast(typecvtNode->FromType())); + break; + } + case OP_retype: { + RetypeNode *retypeNode = static_cast(e); + OutputType(retypeNode->GetTyIdx()); + break; + } + case OP_iread: + case OP_iaddrof: { + IreadNode *irNode = static_cast(e); + OutputType(irNode->GetTyIdx()); + WriteNum(irNode->GetFieldID()); + break; + } + case OP_ireadoff: { + IreadoffNode *irNode = static_cast(e); + WriteNum(irNode->GetOffset()); + break; + } + case OP_ireadfpoff: { + IreadFPoffNode *irNode = static_cast(e); + WriteNum(irNode->GetOffset()); + break; + } + case OP_sext: + case OP_zext: + case OP_extractbits: { + ExtractbitsNode *extNode = static_cast(e); + Write(extNode->GetBitsOffset()); + Write(extNode->GetBitsSize()); + break; + } + case OP_depositbits: { + DepositbitsNode *dbNode = static_cast(e); + Write(dbNode->GetBitsOffset()); + Write(dbNode->GetBitsSize()); + break; + } + case OP_gcmallocjarray: + case OP_gcpermallocjarray: { + JarrayMallocNode *gcNode = static_cast(e); + OutputType(gcNode->GetTyIdx()); + break; + } + // binary + case OP_sub: + case OP_mul: + case OP_div: + case OP_rem: + case OP_ashr: + case OP_lshr: + case OP_shl: + case OP_max: + case OP_min: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_cand: + case OP_cior: + case OP_land: + case OP_lior: + case OP_add: { + break; + } + case OP_eq: + case OP_ne: + case OP_lt: + case OP_gt: + case OP_le: + case OP_ge: + case OP_cmpg: + case OP_cmpl: + case OP_cmp: { + CompareNode *cmpNode = static_cast(e); + Write(static_cast(cmpNode->GetOpndType())); + break; + } + case OP_resolveinterfacefunc: + case OP_resolvevirtualfunc: { + ResolveFuncNode *rsNode = static_cast(e); + OutputFuncViaSym(rsNode->GetPuIdx()); + break; + } + // ternary + case OP_select: { + break; + } + // nary + case OP_array: { + ArrayNode *arrNode = static_cast(e); + OutputType(arrNode->GetTyIdx()); + Write(static_cast(arrNode->GetBoundsCheck())); + WriteNum(static_cast(arrNode->NumOpnds())); + break; + } + case OP_intrinsicop: { + IntrinsicopNode *intrnNode = static_cast(e); + WriteNum(intrnNode->GetIntrinsic()); + WriteNum(static_cast(intrnNode->NumOpnds())); + break; + } + case OP_intrinsicopwithtype: { + IntrinsicopNode *intrnNode = static_cast(e); + WriteNum(intrnNode->GetIntrinsic()); + OutputType(intrnNode->GetTyIdx()); + WriteNum(static_cast(intrnNode->NumOpnds())); + break; + } + default: + break; + } + for (uint32 i = 0; i < e->NumOpnds(); ++i) { + OutputExpression(e->Opnd(i)); + } +} + +static SrcPosition lastOutputSrcPosition; + +void BinaryMplExport::OutputSrcPos(const SrcPosition &pos) { + if (!mod.IsWithDbgInfo()) { + return; + } + if (pos.FileNum() == 0 || pos.LineNum() == 0) { // error case, so output 0 + WriteNum(lastOutputSrcPosition.RawData()); + WriteNum(lastOutputSrcPosition.LineNum()); + return; + } + WriteNum(pos.RawData()); + WriteNum(pos.LineNum()); + lastOutputSrcPosition = pos; +} + +void BinaryMplExport::OutputReturnValues(const CallReturnVector *retv) { + WriteNum(kBinReturnvals); + WriteNum(static_cast(retv->size())); + for (uint32 i = 0; i < retv->size(); i++) { + RegFieldPair rfp = (*retv)[i].second; + if (rfp.IsReg()) { + MIRPreg *preg = curFunc->GetPregTab()->PregFromPregIdx(rfp.GetPregIdx()); + OutputPreg(preg); + } else { + WriteNum(0); + WriteNum((rfp.GetFieldID())); + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol((*retv)[i].first)); + } + } +} + +void BinaryMplExport::OutputBlockNode(BlockNode *block) { + WriteNum(kBinNodeBlock); + if (!block->GetStmtNodes().empty()) { + OutputSrcPos(block->GetSrcPos()); + } else { + OutputSrcPos(SrcPosition()); // output 0 + } + uint32 num = 0; + uint64 idx = buf.size(); + ExpandFourBuffSize(); // place holder, Fixup later + for (StmtNode *s = block->GetFirst(); s; s = s->GetNext()) { + bool doneWithOpnds = false; + OutputSrcPos(s->GetSrcPos()); + WriteNum(s->GetOpCode()); + switch (s->GetOpCode()) { + case OP_dassign: + case OP_dassignoff: { + StIdx stIdx; + if (s->GetOpCode() == OP_dassign) { + DassignNode *dass = static_cast(s); + WriteNum(dass->GetFieldID()); + stIdx = dass->GetStIdx(); + } else { + DassignoffNode *dassoff = static_cast(s); + WriteNum(dassoff->GetPrimType()); + WriteNum(dassoff->offset); + stIdx = dassoff->stIdx; + } + WriteNum(stIdx.Scope()); + if (stIdx.Islocal()) { + OutputLocalSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } else { + OutputSymbol(curFunc->GetLocalOrGlobalSymbol(stIdx)); + } + break; + } + case OP_regassign: { + RegassignNode *rass = static_cast(s); + Write(static_cast(rass->GetPrimType())); + MIRPreg *preg = curFunc->GetPregTab()->PregFromPregIdx(rass->GetRegIdx()); + OutputPreg(preg); + break; + } + case OP_iassign: { + IassignNode *iass = static_cast(s); + OutputType(iass->GetTyIdx()); + WriteNum(iass->GetFieldID()); + break; + } + case OP_iassignoff: { + IassignoffNode *iassoff = static_cast(s); + Write(static_cast(iassoff->GetPrimType())); + WriteNum(iassoff->GetOffset()); + break; + } + case OP_iassignspoff: + case OP_iassignfpoff: { + IassignFPoffNode *iassfpoff = static_cast(s); + Write(static_cast(iassfpoff->GetPrimType())); + WriteNum(iassfpoff->GetOffset()); + break; + } + case OP_blkassignoff: { + BlkassignoffNode *bass = static_cast(s); + int32 offsetAlign = (bass->offset << 4) | bass->alignLog2; + WriteNum(offsetAlign); + WriteNum(bass->blockSize); + break; + } + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: + case OP_polymorphiccall: { + CallNode *callnode = static_cast(s); + OutputFuncViaSym(callnode->GetPUIdx()); + if (s->GetOpCode() == OP_polymorphiccall) { + OutputType(static_cast(callnode)->GetTyIdx()); + } + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: { + CallNode *callnode = static_cast(s); + OutputFuncViaSym(callnode->GetPUIdx()); + OutputReturnValues(&callnode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_polymorphiccallassigned: { + CallNode *callnode = static_cast(s); + OutputFuncViaSym(callnode->GetPUIdx()); + OutputType(callnode->GetTyIdx()); + OutputReturnValues(&callnode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_icallproto: + case OP_icall: { + IcallNode *icallnode = static_cast(s); + OutputType(icallnode->GetRetTyIdx()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_icallprotoassigned: + case OP_icallassigned: { + IcallNode *icallnode = static_cast(s); + OutputType(icallnode->GetRetTyIdx()); + OutputReturnValues(&icallnode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccall: + case OP_xintrinsiccall: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + OutputReturnValues(&intrnNode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccallwithtype: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + OutputType(intrnNode->GetTyIdx()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_intrinsiccallwithtypeassigned: { + IntrinsiccallNode *intrnNode = static_cast(s); + WriteNum(intrnNode->GetIntrinsic()); + OutputType(intrnNode->GetTyIdx()); + OutputReturnValues(&intrnNode->GetReturnVec()); + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_syncenter: + case OP_syncexit: + case OP_return: { + WriteNum(static_cast(s->NumOpnds())); + break; + } + case OP_jscatch: + case OP_cppcatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_retsub: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstorestore: + case OP_membarstoreload: { + break; + } + case OP_eval: + case OP_throw: + case OP_free: + case OP_decref: + case OP_incref: + case OP_decrefreset: + CASE_OP_ASSERT_NONNULL + case OP_igoto: { + break; + } + case OP_label: { + LabelNode *lNode = static_cast(s); + OutputLabel(lNode->GetLabelIdx()); + break; + } + case OP_goto: + case OP_gosub: { + GotoNode *gtoNode = static_cast(s); + OutputLabel(gtoNode->GetOffset()); + break; + } + case OP_brfalse: + case OP_brtrue: { + CondGotoNode *cgotoNode = static_cast(s); + OutputLabel(cgotoNode->GetOffset()); + break; + } + case OP_switch: { + SwitchNode *swNode = static_cast(s); + OutputLabel(swNode->GetDefaultLabel()); + WriteNum(static_cast(swNode->GetSwitchTable().size())); + for (CasePair cpair : swNode->GetSwitchTable()) { + WriteNum(cpair.first); + OutputLabel(cpair.second); + } + break; + } + case OP_rangegoto: { + RangeGotoNode *rgoto = static_cast(s); + WriteNum(rgoto->GetTagOffset()); + WriteNum(static_cast(rgoto->GetRangeGotoTable().size())); + for (SmallCasePair cpair : rgoto->GetRangeGotoTable()) { + WriteNum(cpair.first); + OutputLabel(cpair.second); + } + break; + } + case OP_jstry: { + JsTryNode *tryNode = static_cast(s); + OutputLabel(tryNode->GetCatchOffset()); + OutputLabel(tryNode->GetFinallyOffset()); + break; + } + case OP_cpptry: + case OP_try: { + TryNode *tryNode = static_cast(s); + WriteNum(static_cast(tryNode->GetOffsetsCount())); + for (LabelIdx lidx : tryNode->GetOffsets()) { + OutputLabel(lidx); + } + break; + } + case OP_catch: { + CatchNode *catchNode = static_cast(s); + WriteNum(static_cast(catchNode->GetExceptionTyIdxVec().size())); + for (TyIdx tidx : catchNode->GetExceptionTyIdxVec()) { + OutputType(tidx); + } + break; + } + case OP_comment: { + string str(static_cast(s)->GetComment().c_str()); + WriteAsciiStr(str); + break; + } + case OP_dowhile: + case OP_while: { + WhileStmtNode *whileNode = static_cast(s); + OutputBlockNode(whileNode->GetBody()); + OutputExpression(whileNode->Opnd()); + doneWithOpnds = true; + break; + } + case OP_if: { + IfStmtNode *ifNode = static_cast(s); + bool hasElsePart = ifNode->GetElsePart() != nullptr; + WriteNum(static_cast(hasElsePart)); + OutputBlockNode(ifNode->GetThenPart()); + if (hasElsePart) { + OutputBlockNode(ifNode->GetElsePart()); + } + OutputExpression(ifNode->Opnd()); + doneWithOpnds = true; + break; + } + case OP_block: { + BlockNode *blockNode = static_cast(s); + OutputBlockNode(blockNode); + doneWithOpnds = true; + break; + } + case OP_asm: { + AsmNode *asmNode = static_cast(s); + WriteNum(asmNode->qualifiers); + string str(asmNode->asmString.c_str()); + WriteAsciiStr(str); + // the outputs + size_t count = asmNode->asmOutputs.size(); + WriteNum(static_cast(count)); + for (size_t i = 0; i < count; ++i) { + OutputUsrStr(asmNode->outputConstraints[i]); + } + OutputReturnValues(&asmNode->asmOutputs); + // the clobber list + count = asmNode->clobberList.size(); + WriteNum(static_cast(count)); + for (size_t i = 0; i < count; ++i) { + OutputUsrStr(asmNode->clobberList[i]); + } + // the labels + count = asmNode->gotoLabels.size(); + WriteNum(static_cast(count)); + for (size_t i = 0; i < count; ++i) { + OutputLabel(asmNode->gotoLabels[i]); + } + // the inputs + WriteNum(asmNode->NumOpnds()); + for (uint8 i = 0; i < asmNode->numOpnds; ++i) { + OutputUsrStr(asmNode->inputConstraints[i]); + } + break; + } + default: + CHECK_FATAL(false, "Unhandled opcode %d", s->GetOpCode()); + break; + } + num++; + if (!doneWithOpnds) { + for (uint32 i = 0; i < s->NumOpnds(); ++i) { + OutputExpression(s->Opnd(i)); + } + } + } + Fixup(idx, num); +} + +void BinaryMplExport::WriteFunctionBodyField(uint64 contentIdx, std::unordered_set *dumpFuncSet) { + Fixup(contentIdx, static_cast(buf.size())); + // LogInfo::MapleLogger() << "Write FunctionBody Field " << std::endl; + WriteNum(kBinFunctionBodyStart); + uint64 totalSizeIdx = buf.size(); + ExpandFourBuffSize(); /// total size of this field to ~BIN_FUNCTIONBODY_START + uint64 outFunctionBodySizeIdx = buf.size(); + ExpandFourBuffSize(); /// size of outFunctionBody + uint32 size = 0; + + if (not2mplt) { + for (MIRFunction *func : GetMIRModule().GetFunctionList()) { + curFunc = func; + if (func->GetAttr(FUNCATTR_optimized)) { + continue; + } + if (func->GetCodeMemPool() == nullptr || func->GetBody() == nullptr) { + continue; + } + if (dumpFuncSet != nullptr && !dumpFuncSet->empty()) { + // output only if this func matches any name in *dumpFuncSet + const std::string &name = func->GetName(); + bool matched = false; + for (std::string elem : *dumpFuncSet) { + if (name.find(elem.c_str()) != string::npos) { + matched = true; + break; + } + } + if (!matched) { + continue; + } + } + localSymMark.clear(); + localSymMark[nullptr] = 0; + localPregMark.clear(); + localPregMark[nullptr] = 0; + labelMark.clear(); + labelMark[0] = 0; + OutputFunction(func->GetPuidx()); + CHECK_FATAL(func->GetBody() != nullptr, "WriteFunctionBodyField: no function body"); + OutputFuncIdInfo(func); + OutputLocalTypeNameTab(func->GetTypeNameTab()); + OutputFormalsStIdx(func); + if (mod.GetFlavor() < kMmpl) { + OutputAliasMap(func->GetAliasVarMap()); + } + lastOutputSrcPosition = SrcPosition(); + OutputBlockNode(func->GetBody()); + size++; + } + } + + Fixup(totalSizeIdx, static_cast(buf.size() - totalSizeIdx)); + Fixup(outFunctionBodySizeIdx, size); + return; +} +} // namespace maple diff --git a/src/mapleall/maple_ir/src/bin_func_import.cpp b/src/mapleall/maple_ir/src/bin_func_import.cpp new file mode 100644 index 0000000000000000000000000000000000000000..58f792bfa4d714d0a03dbfc1f537c7267823224f --- /dev/null +++ b/src/mapleall/maple_ir/src/bin_func_import.cpp @@ -0,0 +1,947 @@ +/* + * Copyright (c) [2021] Huawei Technologies Co., Ltd. All rights reserved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include "bin_mpl_export.h" +#include "bin_mpl_import.h" +#include "mir_function.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "mir_builder.h" +using namespace std; + +namespace maple { +constexpr uint32 kOffset4bit = 4; +void BinaryMplImport::ImportInfoVector(MIRInfoVector &infoVector, MapleVector &infoVectorIsString) { + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + GStrIdx gStrIdx = ImportStr(); + bool isstring = (ReadNum() != 0); + infoVectorIsString.push_back(isstring); + if (isstring) { + GStrIdx fieldval = ImportStr(); + infoVector.emplace_back(MIRInfoPair(gStrIdx, fieldval.GetIdx())); + } else { + auto fieldval = static_cast(ReadNum()); + infoVector.emplace_back(MIRInfoPair(gStrIdx, fieldval)); + } + } +} + +void BinaryMplImport::ImportFuncIdInfo(MIRFunction *func) { + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinFuncIdInfoStart, "kBinFuncIdInfoStart expected"); + func->SetPuidxOrigin(static_cast(ReadNum())); + ImportInfoVector(func->GetInfoVector(), func->InfoIsString()); + if (mod.GetFlavor() == kFlavorLmbc) { + func->SetFrameSize(static_cast(ReadNum())); + } +} + +void BinaryMplImport::ImportBaseNode(Opcode &o, PrimType &typ) { + o = static_cast(Read()); + typ = static_cast(Read()); +} + +MIRSymbol *BinaryMplImport::ImportLocalSymbol(MIRFunction *func) { + int64 tag = ReadNum(); + if (tag == 0) { + return nullptr; + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < localSymTab.size(), "index out of bounds"); + return localSymTab.at(static_cast(-tag)); + } + CHECK_FATAL(tag == kBinSymbol, "expecting kBinSymbol in ImportLocalSymbol()"); + MIRSymbol *sym = func->GetSymTab()->CreateSymbol(kScopeLocal); + localSymTab.push_back(sym); + sym->SetNameStrIdx(ImportStr()); + (void)func->GetSymTab()->AddToStringSymbolMap(*sym); + sym->SetSKind(static_cast(ReadNum())); + sym->SetStorageClass(static_cast(ReadNum())); + sym->SetAttrs(ImportTypeAttrs()); + sym->SetIsTmp(ReadNum() != 0); + if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { + ImportSrcPos(sym->GetSrcPosition()); + } + sym->SetTyIdx(ImportType()); + if (sym->GetSKind() == kStPreg) { + PregIdx pregidx = ImportPreg(func); + MIRPreg *preg = func->GetPregTab()->PregFromPregIdx(pregidx); + sym->SetPreg(preg); + } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { + sym->SetKonst(ImportConst(func)); + } else if (sym->GetSKind() == kStFunc) { + PUIdx puIdx = ImportFuncViaSym(func); + sym->SetFunction(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx)); + } + return sym; +} + +PregIdx BinaryMplImport::ImportPreg(MIRFunction *func) { + int64 tag = ReadNum(); + if (tag == 0) { + return 0; + } + if (tag == kBinSpecialReg) { + return -Read(); + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < localPregTab.size(), "index out of bounds"); + return localPregTab.at(static_cast(-tag)); + } + CHECK_FATAL(tag == kBinPreg, "expecting kBinPreg in ImportPreg()"); + + PrimType primType = static_cast(Read()); + PregIdx pidx = func->GetPregTab()->CreatePreg(primType); + localPregTab.push_back(pidx); + return pidx; +} + +LabelIdx BinaryMplImport::ImportLabel(MIRFunction *func) { + int64 tag = ReadNum(); + if (tag == 0) { + return 0; + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < localLabelTab.size(), "index out of bounds"); + return localLabelTab.at(static_cast(-tag)); + } + CHECK_FATAL(tag == kBinLabel, "kBinLabel expected in ImportLabel()"); + + LabelIdx lidx = func->GetLabelTab()->CreateLabel(); + localLabelTab.push_back(lidx); + return lidx; +} + +void BinaryMplImport::ImportLocalTypeNameTable(MIRTypeNameTable *typeNameTab) { + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinTypenameStart, "kBinTypenameStart expected in ImportLocalTypeNameTable()"); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + GStrIdx strIdx = ImportStr(); + TyIdx tyIdx = ImportType(); + typeNameTab->SetGStrIdxToTyIdx(strIdx, tyIdx); + } +} + +void BinaryMplImport::ImportFormalsStIdx(MIRFunction *func) { + auto tag = ReadNum(); + CHECK_FATAL(tag == kBinFormalStart, "kBinFormalStart expected in ImportFormalsStIdx()"); + auto size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + func->GetFormalDefVec()[static_cast(i)].formalSym = ImportLocalSymbol(func); + } +} + +void BinaryMplImport::ImportAliasMap(MIRFunction *func) { + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinAliasMapStart, "kBinAliasMapStart expected in ImportAliasMap()"); + int32 size = ReadInt(); + for (int32 i = 0; i < size; ++i) { + MIRAliasVars aliasvars; + GStrIdx strIdx = ImportStr(); + aliasvars.mplStrIdx = ImportStr(); + aliasvars.atk = static_cast(ReadNum()); + aliasvars.index = static_cast(ReadNum()); + (void)ImportStr(); // not assigning to mimic parser + func->GetAliasVarMap()[strIdx] = aliasvars; + } +} + +PUIdx BinaryMplImport::ImportFuncViaSym(MIRFunction *func) { + MIRSymbol *sym = InSymbol(func); + MIRFunction *f = sym->GetFunction(); + return f->GetPuidx(); +} + +BaseNode *BinaryMplImport::ImportExpression(MIRFunction *func) { + Opcode op; + PrimType typ; + ImportBaseNode(op, typ); + switch (op) { + // leaf + case OP_constval: { + MIRConst *constv = ImportConst(func); + ConstvalNode *constNode = mod.CurFuncCodeMemPool()->New(constv); + constNode->SetPrimType(typ); + return constNode; + } + case OP_conststr: { + UStrIdx strIdx = ImportUsrStr(); + ConststrNode *constNode = mod.CurFuncCodeMemPool()->New(typ, strIdx); + constNode->SetPrimType(typ); + return constNode; + } + case OP_addroflabel: { + AddroflabelNode *alabNode = mod.CurFuncCodeMemPool()->New(); + alabNode->SetOffset(ImportLabel(func)); + alabNode->SetPrimType(typ); + (void)func->GetLabelTab()->addrTakenLabels.insert(alabNode->GetOffset()); + return alabNode; + } + case OP_addroffunc: { + PUIdx puIdx = ImportFuncViaSym(func); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFuncTable()[puIdx]; + CHECK_NULL_FATAL(f); + f->GetFuncSymbol()->SetAppearsInCode(true); + AddroffuncNode *addrNode = mod.CurFuncCodeMemPool()->New(typ, puIdx); + return addrNode; + } + case OP_sizeoftype: { + TyIdx tidx = ImportType(); + SizeoftypeNode *sot = mod.CurFuncCodeMemPool()->New(tidx); + return sot; + } + case OP_addrof: + case OP_addrofoff: + case OP_dread: + case OP_dreadoff: { + int32 num = static_cast(ReadNum()); + StIdx stIdx; + stIdx.SetScope(static_cast(ReadNum())); + MIRSymbol *sym = nullptr; + if (stIdx.Islocal()) { + sym = ImportLocalSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); + } else { + sym = InSymbol(nullptr); + CHECK_FATAL(sym != nullptr, "null ptr check"); + if (op == OP_addrof) { + sym->SetHasPotentialAssignment(); + } + } + stIdx.SetIdx(sym->GetStIdx().Idx()); + if (op == OP_addrof || op == OP_dread) { + AddrofNode *drNode = mod.CurFuncCodeMemPool()->New(op); + drNode->SetPrimType(typ); + drNode->SetStIdx(stIdx); + drNode->SetFieldID(num); + return drNode; + } else { + DreadoffNode *dreadoff = mod.CurFuncCodeMemPool()->New(op); + dreadoff->SetPrimType(typ); + dreadoff->stIdx = stIdx; + dreadoff->offset = num; + return dreadoff; + } + } + case OP_regread: { + RegreadNode *regreadNode = mod.CurFuncCodeMemPool()->New(); + regreadNode->SetRegIdx(ImportPreg(func)); + regreadNode->SetPrimType(typ); + return regreadNode; + } + case OP_gcmalloc: + case OP_gcpermalloc: + case OP_stackmalloc: { + TyIdx tyIdx = ImportType(); + GCMallocNode *gcNode = mod.CurFuncCodeMemPool()->New(op, typ, tyIdx); + return gcNode; + } + // unary + case OP_abs: + case OP_bnot: + case OP_lnot: + case OP_neg: + case OP_recip: + case OP_sqrt: + case OP_alloca: + case OP_malloc: { + UnaryNode *unNode = mod.CurFuncCodeMemPool()->New(op, typ); + unNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return unNode; + } + case OP_ceil: + case OP_cvt: + case OP_floor: + case OP_trunc: { + TypeCvtNode *typecvtNode = mod.CurFuncCodeMemPool()->New(op, typ); + typecvtNode->SetFromType(static_cast(Read())); + typecvtNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return typecvtNode; + } + case OP_retype: { + RetypeNode *retypeNode = mod.CurFuncCodeMemPool()->New(typ); + retypeNode->SetTyIdx(ImportType()); + retypeNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return retypeNode; + } + case OP_iread: + case OP_iaddrof: { + IreadNode *irNode = mod.CurFuncCodeMemPool()->New(op, typ); + irNode->SetTyIdx(ImportType()); + irNode->SetFieldID(static_cast(ReadNum())); + irNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return irNode; + } + case OP_ireadoff: { + int32 ofst = static_cast(ReadNum()); + IreadoffNode *irNode = mod.CurFuncCodeMemPool()->New(typ, ofst); + irNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return irNode; + } + case OP_ireadfpoff: { + int32 ofst = static_cast(ReadNum()); + IreadFPoffNode *irNode = mod.CurFuncCodeMemPool()->New(typ, ofst); + return irNode; + } + case OP_sext: + case OP_zext: + case OP_extractbits: { + ExtractbitsNode *extNode = mod.CurFuncCodeMemPool()->New(op, typ); + extNode->SetBitsOffset(Read()); + extNode->SetBitsSize(Read()); + extNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return extNode; + } + case OP_depositbits: { + DepositbitsNode *dbNode = mod.CurFuncCodeMemPool()->New(op, typ); + dbNode->SetBitsOffset(static_cast(ReadNum())); + dbNode->SetBitsSize(static_cast(ReadNum())); + dbNode->SetOpnd(ImportExpression(func), kFirstOpnd); + dbNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return dbNode; + } + case OP_gcmallocjarray: + case OP_gcpermallocjarray: { + JarrayMallocNode *gcNode = mod.CurFuncCodeMemPool()->New(op, typ); + gcNode->SetTyIdx(ImportType()); + gcNode->SetOpnd(ImportExpression(func), kFirstOpnd); + return gcNode; + } + // binary + case OP_sub: + case OP_mul: + case OP_div: + case OP_rem: + case OP_ashr: + case OP_lshr: + case OP_shl: + case OP_max: + case OP_min: + case OP_band: + case OP_bior: + case OP_bxor: + case OP_cand: + case OP_cior: + case OP_land: + case OP_lior: + case OP_add: { + BinaryNode *binNode = mod.CurFuncCodeMemPool()->New(op, typ); + binNode->SetOpnd(ImportExpression(func), kFirstOpnd); + binNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return binNode; + } + case OP_eq: + case OP_ne: + case OP_lt: + case OP_gt: + case OP_le: + case OP_ge: + case OP_cmpg: + case OP_cmpl: + case OP_cmp: { + CompareNode *cmpNode = mod.CurFuncCodeMemPool()->New(op, typ); + cmpNode->SetOpndType(static_cast(Read())); + cmpNode->SetOpnd(ImportExpression(func), kFirstOpnd); + cmpNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return cmpNode; + } + case OP_resolveinterfacefunc: + case OP_resolvevirtualfunc: { + ResolveFuncNode *rsNode = mod.CurFuncCodeMemPool()->New(op, typ); + rsNode->SetPUIdx(ImportFuncViaSym(func)); + rsNode->SetOpnd(ImportExpression(func), kFirstOpnd); + rsNode->SetOpnd(ImportExpression(func), kSecondOpnd); + return rsNode; + } + // ternary + case OP_select: { + TernaryNode *tNode = mod.CurFuncCodeMemPool()->New(op, typ); + tNode->SetOpnd(ImportExpression(func), kFirstOpnd); + tNode->SetOpnd(ImportExpression(func), kSecondOpnd); + tNode->SetOpnd(ImportExpression(func), kThirdOpnd); + return tNode; + } + // nary + case OP_array: { + TyIdx tidx = ImportType(); + bool boundsCheck = static_cast(Read()); + ArrayNode *arrNode = + mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), typ, tidx, boundsCheck); + auto n = static_cast(ReadNum()); + for (uint32 i = 0; i < n; ++i) { + arrNode->GetNopnd().push_back(ImportExpression(func)); + } + arrNode->SetNumOpnds(static_cast(arrNode->GetNopnd().size())); + return arrNode; + } + case OP_intrinsicop: { + IntrinsicopNode *intrnNode = mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), op, typ); + intrnNode->SetIntrinsic(static_cast(ReadNum())); + auto n = static_cast(ReadNum()); + for (uint32 i = 0; i < n; ++i) { + intrnNode->GetNopnd().push_back(ImportExpression(func)); + } + intrnNode->SetNumOpnds(static_cast(intrnNode->GetNopnd().size())); + return intrnNode; + } + case OP_intrinsicopwithtype: { + IntrinsicopNode *intrnNode = + mod.CurFuncCodeMemPool()->New(func->GetCodeMPAllocator(), OP_intrinsicopwithtype, typ); + intrnNode->SetIntrinsic((MIRIntrinsicID)ReadNum()); + intrnNode->SetTyIdx(ImportType()); + auto n = static_cast(ReadNum()); + for (uint32 i = 0; i < n; ++i) { + intrnNode->GetNopnd().push_back(ImportExpression(func)); + } + intrnNode->SetNumOpnds(static_cast(intrnNode->GetNopnd().size())); + return intrnNode; + } + default: + CHECK_FATAL(false, "Unhandled op %d", op); + break; + } +} + +void BinaryMplImport::ImportSrcPos(SrcPosition &pos) { + if (!mod.IsWithDbgInfo()) { + return; + } + pos.SetRawData(static_cast(ReadNum())); + pos.SetLineNum(static_cast(ReadNum())); +} + +void BinaryMplImport::ImportReturnValues(MIRFunction *func, CallReturnVector *retv) { + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinReturnvals, "expecting return values"); + auto size = static_cast(ReadNum()); + for (uint32 i = 0; i < size; ++i) { + RegFieldPair rfp; + rfp.SetPregIdx(ImportPreg(func)); + if (rfp.IsReg()) { + retv->push_back(std::make_pair(StIdx(), rfp)); + continue; + } + rfp.SetFieldID(static_cast(ReadNum())); + MIRSymbol *lsym = ImportLocalSymbol(func); + CHECK_FATAL(lsym != nullptr, "null ptr check"); + retv->push_back(std::make_pair(lsym->GetStIdx(), rfp)); + if (lsym->GetName().find("L_STR") == 0) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lsym->GetTyIdx()); + CHECK_FATAL(ty->GetKind() == kTypePointer, "Pointer type expected for L_STR prefix"); + MIRPtrType tempType(static_cast(ty)->GetPointedTyIdx(), PTY_ptr); + TyIdx newTyidx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&tempType); + lsym->SetTyIdx(newTyidx); + } + } +} + +BlockNode *BinaryMplImport::ImportBlockNode(MIRFunction *func) { + int64 tag = ReadNum(); + ASSERT(tag == kBinNodeBlock, "expecting a BlockNode"); + + BlockNode *block = func->GetCodeMemPool()->New(); + Opcode op; + uint8 numOpr; + ImportSrcPos(block->GetSrcPos()); + int32 size = ReadInt(); + for (int32 k = 0; k < size; ++k) { + SrcPosition thesrcPosition; + ImportSrcPos(thesrcPosition); + op = static_cast(ReadNum()); + StmtNode *stmt = nullptr; + switch (op) { + case OP_dassign: + case OP_dassignoff: { + PrimType primType = PTY_void; + if (op == OP_dassignoff) { + primType = static_cast(ReadNum()); + } + int32 num = static_cast(ReadNum()); + StIdx stIdx; + stIdx.SetScope(static_cast(ReadNum())); + MIRSymbol *sym = nullptr; + if (stIdx.Islocal()) { + sym = ImportLocalSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); + } else { + sym = InSymbol(nullptr); + CHECK_FATAL(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + stIdx.SetIdx(sym->GetStIdx().Idx()); + if (op == OP_dassign) { + DassignNode *s = func->GetCodeMemPool()->New(); + s->SetStIdx(stIdx); + s->SetFieldID(num); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + } else { + DassignoffNode *s = func->GetCodeMemPool()->New(); + s->SetPrimType(primType); + s->stIdx = stIdx; + s->offset = num; + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + } + break; + } + case OP_regassign: { + RegassignNode *s = func->GetCodeMemPool()->New(); + s->SetPrimType(static_cast(Read())); + s->SetRegIdx(ImportPreg(func)); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_iassign: { + IassignNode *s = func->GetCodeMemPool()->New(); + s->SetTyIdx(ImportType()); + s->SetFieldID(static_cast(ReadNum())); + s->SetAddrExpr(ImportExpression(func)); + s->SetRHS(ImportExpression(func)); + stmt = s; + break; + } + case OP_iassignoff: { + IassignoffNode *s = func->GetCodeMemPool()->New(); + s->SetPrimType((PrimType)Read()); + s->SetOffset(static_cast(ReadNum())); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + s->SetOpnd(ImportExpression(func), kSecondOpnd); + stmt = s; + break; + } + case OP_iassignspoff: + case OP_iassignfpoff: { + IassignFPoffNode *s = func->GetCodeMemPool()->New(op); + s->SetPrimType(static_cast(Read())); + s->SetOffset(static_cast(ReadNum())); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_blkassignoff: { + BlkassignoffNode *s = func->GetCodeMemPool()->New(); + int32 offsetAlign = static_cast(ReadNum()); + s->offset = offsetAlign >> kOffset4bit; + s->alignLog2 = offsetAlign & 0xf; + s->blockSize = static_cast(ReadNum()); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + s->SetOpnd(ImportExpression(func), kSecondOpnd); + stmt = s; + break; + } + case OP_call: + case OP_virtualcall: + case OP_virtualicall: + case OP_superclasscall: + case OP_interfacecall: + case OP_interfaceicall: + case OP_customcall: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + CHECK_NULL_FATAL(s); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + CHECK_NULL_FATAL(f); + f->GetFuncSymbol()->SetAppearsInCode(true); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_callassigned: + case OP_virtualcallassigned: + case OP_virtualicallassigned: + case OP_superclasscallassigned: + case OP_interfacecallassigned: + case OP_interfaceicallassigned: + case OP_customcallassigned: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + CHECK_NULL_FATAL(f); + f->GetFuncSymbol()->SetAppearsInCode(true); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + const auto &calleeName = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx())->GetName(); + if (calleeName == "setjmp") { + func->SetHasSetjmp(); + } + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_polymorphiccall: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + CHECK_NULL_FATAL(s); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + CHECK_NULL_FATAL(f); + f->GetFuncSymbol()->SetAppearsInCode(true); + s->SetTyIdx(ImportType()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_polymorphiccallassigned: { + CallNode *s = func->GetCodeMemPool()->New(mod, op); + CHECK_NULL_FATAL(s); + s->SetPUIdx(ImportFuncViaSym(func)); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(s->GetPUIdx()); + CHECK_NULL_FATAL(f); + f->GetFuncSymbol()->SetAppearsInCode(true); + s->SetTyIdx(ImportType()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_icallproto: + case OP_icall: { + IcallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetRetTyIdx(ImportType()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_icallprotoassigned: + case OP_icallassigned: { + IcallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetRetTyIdx(ImportType()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_intrinsiccall: + case OP_xintrinsiccall: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic(static_cast(ReadNum())); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_intrinsiccallassigned: + case OP_xintrinsiccallassigned: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + if (s->GetReturnVec().size() == 1 && s->GetReturnVec()[0].first.Idx() != 0) { + MIRSymbol *retsymbol = func->GetSymTab()->GetSymbolFromStIdx(s->GetReturnVec()[0].first.Idx()); + MIRType *rettype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retsymbol->GetTyIdx()); + CHECK_FATAL(rettype != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallAssigned"); + s->SetPrimType(rettype->GetPrimType()); + } + stmt = s; + break; + } + case OP_intrinsiccallwithtype: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + s->SetTyIdx(ImportType()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_intrinsiccallwithtypeassigned: { + IntrinsiccallNode *s = func->GetCodeMemPool()->New(mod, op); + s->SetIntrinsic((MIRIntrinsicID)ReadNum()); + s->SetTyIdx(ImportType()); + ImportReturnValues(func, &s->GetReturnVec()); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + if (s->GetReturnVec().size() == 1 && s->GetReturnVec()[0].first.Idx() != 0) { + MIRSymbol *retsymbol = func->GetSymTab()->GetSymbolFromStIdx(s->GetReturnVec()[0].first.Idx()); + MIRType *rettype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retsymbol->GetTyIdx()); + CHECK_FATAL(rettype != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallAssigned"); + s->SetPrimType(rettype->GetPrimType()); + } + stmt = s; + break; + } + case OP_syncenter: + case OP_syncexit: + case OP_return: { + NaryStmtNode *s = func->GetCodeMemPool()->New(mod, op); + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + case OP_jscatch: + case OP_cppcatch: + case OP_finally: + case OP_endtry: + case OP_cleanuptry: + case OP_retsub: + case OP_membaracquire: + case OP_membarrelease: + case OP_membarstorestore: + case OP_membarstoreload: { + stmt = mod.CurFuncCodeMemPool()->New(op); + break; + } + case OP_eval: + case OP_throw: + case OP_free: + case OP_decref: + case OP_incref: + case OP_decrefreset: + CASE_OP_ASSERT_NONNULL + case OP_igoto: { + UnaryStmtNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_label: { + LabelNode *s = mod.CurFuncCodeMemPool()->New(); + s->SetLabelIdx(ImportLabel(func)); + stmt = s; + break; + } + case OP_goto: + case OP_gosub: { + GotoNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetOffset(ImportLabel(func)); + stmt = s; + break; + } + case OP_brfalse: + case OP_brtrue: { + CondGotoNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetOffset(ImportLabel(func)); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_switch: { + SwitchNode *s = mod.CurFuncCodeMemPool()->New(mod); + s->SetDefaultLabel(ImportLabel(func)); + auto tagSize = static_cast(ReadNum()); + for (uint32 i = 0; i < tagSize; ++i) { + int64 casetag = ReadNum(); + LabelIdx lidx = ImportLabel(func); + CasePair cpair = std::make_pair(casetag, lidx); + s->GetSwitchTable().push_back(cpair); + } + s->SetSwitchOpnd(ImportExpression(func)); + stmt = s; + break; + } + case OP_rangegoto: { + RangeGotoNode *s = mod.CurFuncCodeMemPool()->New(mod); + s->SetTagOffset(static_cast(ReadNum())); + uint32 tagSize = static_cast(ReadNum()); + for (uint32 i = 0; i < tagSize; ++i) { + uint16 casetag = static_cast(ReadNum()); + LabelIdx lidx = ImportLabel(func); + s->AddRangeGoto(casetag, lidx); + } + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_jstry: { + JsTryNode *s = mod.CurFuncCodeMemPool()->New(); + s->SetCatchOffset(ImportLabel(func)); + s->SetFinallyOffset(ImportLabel(func)); + stmt = s; + break; + } + case OP_cpptry: + case OP_try: { + TryNode *s = mod.CurFuncCodeMemPool()->New(mod); + auto numLabels = static_cast(ReadNum()); + for (uint32 i = 0; i < numLabels; ++i) { + s->GetOffsets().push_back(ImportLabel(func)); + } + stmt = s; + break; + } + case OP_catch: { + CatchNode *s = mod.CurFuncCodeMemPool()->New(mod); + auto numTys = static_cast(ReadNum()); + for (uint32 i = 0; i < numTys; ++i) { + s->PushBack(ImportType()); + } + stmt = s; + break; + } + case OP_comment: { + CommentNode *s = mod.CurFuncCodeMemPool()->New(mod); + string str; + ReadAsciiStr(str); + s->SetComment(str); + stmt = s; + break; + } + case OP_dowhile: + case OP_while: { + WhileStmtNode *s = mod.CurFuncCodeMemPool()->New(op); + s->SetBody(ImportBlockNode(func)); + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_if: { + IfStmtNode *s = mod.CurFuncCodeMemPool()->New(); + bool hasElsePart = (static_cast(ReadNum()) != kFirstOpnd); + s->SetThenPart(ImportBlockNode(func)); + if (hasElsePart) { + s->SetElsePart(ImportBlockNode(func)); + s->SetNumOpnds(kOperandNumTernary); + } + s->SetOpnd(ImportExpression(func), kFirstOpnd); + stmt = s; + break; + } + case OP_block: { + stmt = ImportBlockNode(func); + break; + } + case OP_asm: { + AsmNode *s = mod.CurFuncCodeMemPool()->New(&mod.GetCurFuncCodeMPAllocator()); + mod.CurFunction()->SetHasAsm(); + s->qualifiers = static_cast(ReadNum()); + string str; + ReadAsciiStr(str); + s->asmString = str; + // the outputs + auto count = static_cast(ReadNum()); + UStrIdx strIdx; + for (size_t i = 0; i < count; ++i) { + strIdx = ImportUsrStr(); + s->outputConstraints.push_back(strIdx); + } + ImportReturnValues(func, &s->asmOutputs); + // the clobber list + count = static_cast(ReadNum()); + for (size_t i = 0; i < count; ++i) { + strIdx = ImportUsrStr(); + s->clobberList.push_back(strIdx); + } + // the labels + count = static_cast(ReadNum()); + for (size_t i = 0; i < count; ++i) { + LabelIdx lidx = ImportLabel(func); + s->gotoLabels.push_back(lidx); + } + // the inputs + numOpr = static_cast(ReadNum()); + s->SetNumOpnds(numOpr); + for (int32 i = 0; i < numOpr; ++i) { + strIdx = ImportUsrStr(); + s->inputConstraints.push_back(strIdx); + const std::string &inStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(strIdx); + if (inStr[0] == '+') { + s->SetHasWriteInputs(); + } + } + for (int32 i = 0; i < numOpr; ++i) { + s->GetNopnd().push_back(ImportExpression(func)); + } + stmt = s; + break; + } + default: + CHECK_FATAL(false, "Unhandled opcode tag %d", tag); + break; + } + stmt->SetSrcPos(thesrcPosition); + block->AddStatement(stmt); + } + if (func != nullptr) { + func->SetBody(block); + } + return block; +} + +void BinaryMplImport::ReadFunctionBodyField() { + (void)ReadInt(); /// skip total size + int32 size = ReadInt(); + for (int64 i = 0; i < size; ++i) { + PUIdx puIdx = ImportFunction(); + MIRFunction *fn = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + CHECK_NULL_FATAL(fn); + mod.SetCurFunction(fn); + fn->GetFuncSymbol()->SetAppearsInCode(true); + localSymTab.clear(); + localSymTab.push_back(nullptr); + localPregTab.clear(); + localPregTab.push_back(0); + localLabelTab.clear(); + localLabelTab.push_back(0); + + fn->AllocSymTab(); + fn->AllocPregTab(); + fn->AllocTypeNameTab(); + fn->AllocLabelTab(); + + ImportFuncIdInfo(fn); + ImportLocalTypeNameTable(fn->GetTypeNameTab()); + ImportFormalsStIdx(fn); + if (mod.GetFlavor() < kMmpl) { + ImportAliasMap(fn); + } + (void)ImportBlockNode(fn); + mod.AddFunction(fn); + } + return; +} +} // namespace maple diff --git a/src/mapleall/maple_ir/src/bin_mpl_export.cpp b/src/mapleall/maple_ir/src/bin_mpl_export.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0054948e965286fba38cee355b3b93e81e8f2c40 --- /dev/null +++ b/src/mapleall/maple_ir/src/bin_mpl_export.cpp @@ -0,0 +1,1368 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bin_mpl_export.h" +#include +#include +#include "mir_function.h" +#include "namemangler.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "bin_mplt.h" +#include "factory.h" + +namespace { +using namespace maple; +/* Storage location of field */ +constexpr uint32 kFirstField = 0; +constexpr uint32 kSecondField = 1; +constexpr uint32 kThirdField = 2; +constexpr uint32 kFourthField = 3; +constexpr int32 kFourthFieldInt = 3; +constexpr uint32 kFifthField = 4; +constexpr int32 kSixthFieldInt = 5; + +using OutputConstFactory = FunctionFactory; +using OutputTypeFactory = FunctionFactory; + +void OutputConstInt(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstInt); + mplExport.OutputConstBase(constVal); + mplExport.WriteNum(static_cast(constVal).GetExtValue()); +} + +void OutputConstAddrof(const MIRConst &constVal, BinaryMplExport &mplExport) { + const MIRAddrofConst &addrof = static_cast(constVal); + if (addrof.GetSymbolIndex().IsGlobal()) { + mplExport.WriteNum(kBinKindConstAddrof); + } else { + mplExport.WriteNum(kBinKindConstAddrofLocal); + } + mplExport.OutputConstBase(constVal); + if (addrof.GetSymbolIndex().IsGlobal()) { + mplExport.OutputSymbol(mplExport.GetMIRModule().CurFunction()->GetLocalOrGlobalSymbol(addrof.GetSymbolIndex())); + } else { + mplExport.OutputLocalSymbol(mplExport.curFunc->GetLocalOrGlobalSymbol(addrof.GetSymbolIndex())); + } + mplExport.WriteNum(addrof.GetFieldID()); + mplExport.WriteNum(addrof.GetOffset()); +} + +void OutputConstAddrofFunc(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstAddrofFunc); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.OutputFunction(newConst.GetValue()); +} + +void OutputConstLbl(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstAddrofLabel); + mplExport.OutputConstBase(constVal); + const MIRLblConst &lblConst = static_cast(constVal); + mplExport.OutputLabel(lblConst.GetValue()); +} + +void OutputConstStr(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstStr); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.OutputUsrStr(newConst.GetValue()); +} + +void OutputConstStr16(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstStr16); + mplExport.OutputConstBase(constVal); + const auto &mirStr16 = static_cast(constVal); + std::u16string str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(mirStr16.GetValue()); + std::string str; + (void)namemangler::UTF16ToUTF8(str, str16); + mplExport.WriteNum(str.length()); + for (char c : str) { + mplExport.Write(static_cast(c)); + } +} + +void OutputConstFloat(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstFloat); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.WriteNum(newConst.GetIntValue()); +} + +void OutputConstDouble(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstDouble); + mplExport.OutputConstBase(constVal); + const auto &newConst = static_cast(constVal); + mplExport.WriteNum(newConst.GetIntValue()); +} + +void OutputConstAgg(const MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstAgg); + mplExport.OutputConstBase(constVal); + const auto &aggConst = static_cast(constVal); + size_t size = aggConst.GetConstVec().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.WriteNum(aggConst.GetFieldIdItem(i)); + mplExport.OutputConst(aggConst.GetConstVecItem(i)); + } +} + +void OutputConstSt(MIRConst &constVal, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindConstSt); + mplExport.OutputConstBase(constVal); + auto &stConst = static_cast(constVal); + size_t size = stConst.GetStVec().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.OutputSymbol(stConst.GetStVecItem(i)); + } + size = stConst.GetStOffsetVec().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.WriteNum(stConst.GetStOffsetVecItem(i)); + } +} + +static bool InitOutputConstFactory() { + RegisterFactoryFunction(kConstInt, OutputConstInt); + RegisterFactoryFunction(kConstAddrof, OutputConstAddrof); + RegisterFactoryFunction(kConstAddrofFunc, OutputConstAddrofFunc); + RegisterFactoryFunction(kConstLblConst, OutputConstLbl); + RegisterFactoryFunction(kConstStrConst, OutputConstStr); + RegisterFactoryFunction(kConstStr16Const, OutputConstStr16); + RegisterFactoryFunction(kConstFloatConst, OutputConstFloat); + RegisterFactoryFunction(kConstDoubleConst, OutputConstDouble); + RegisterFactoryFunction(kConstAggConst, OutputConstAgg); + RegisterFactoryFunction(kConstStConst, OutputConstSt); + return true; +} + +void OutputTypeScalar(const MIRType &ty, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindTypeScalar); + mplExport.OutputTypeBase(ty); +} + +void OutputTypePointer(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypePointer); + mplExport.OutputTypeBase(type); + mplExport.OutputTypeAttrs(type.GetTypeAttrs()); + mplExport.OutputType(type.GetPointedTyIdx()); +} + +void OutputTypeByName(const MIRType &ty, BinaryMplExport &mplExport) { + mplExport.WriteNum(kBinKindTypeByName); + mplExport.OutputTypeBase(ty); +} + +void OutputTypeFArray(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeFArray); + mplExport.OutputTypeBase(type); + mplExport.OutputType(type.GetElemTyIdx()); +} + +void OutputTypeJArray(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeJarray); + mplExport.OutputTypeBase(type); + mplExport.OutputType(type.GetElemTyIdx()); +} + +void OutputTypeArray(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeArray); + mplExport.OutputTypeBase(type); + mplExport.WriteNum(type.GetDim()); + for (uint16 i = 0; i < type.GetDim(); ++i) { + mplExport.WriteNum(type.GetSizeArrayItem(i)); + } + mplExport.OutputType(type.GetElemTyIdx()); + mplExport.OutputTypeAttrs(type.GetTypeAttrs()); +} + +void OutputTypeFunction(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeFunction); + mplExport.OutputTypeBase(type); + mplExport.OutputType(type.GetRetTyIdx()); + mplExport.WriteNum(static_cast(type.funcAttrs.GetAttrFlag())); + size_t size = type.GetParamTypeList().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.OutputType(type.GetNthParamType(i)); + } + size = type.GetParamAttrsList().size(); + mplExport.WriteNum(size); + for (size_t i = 0; i < size; ++i) { + mplExport.OutputTypeAttrs(type.GetNthParamAttrs(i)); + } +} + +void OutputTypeParam(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeParam); + mplExport.OutputTypeBase(type); +} + +void OutputTypeInstantVector(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeInstantVector); + mplExport.OutputTypeBase(type); + mplExport.WriteNum(ty.GetKind()); + mplExport.OutputTypePairs(type); +} + +void OutputTypeGenericInstant(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeGenericInstant); + mplExport.OutputTypeBase(type); + mplExport.OutputTypePairs(type); + mplExport.OutputType(type.GetGenericTyIdx()); +} + +void OutputTypeBitField(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeBitField); + mplExport.OutputTypeBase(type); + mplExport.WriteNum(type.GetFieldSize()); +} + +// for Struct/StructIncomplete/Union +void OutputTypeStruct(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeStruct); + mplExport.OutputTypeBase(type); + MIRTypeKind kind = ty.GetKind(); + if (type.IsImported()) { + CHECK_FATAL(ty.GetKind() != kTypeUnion, "Must be."); + kind = kTypeStructIncomplete; + } + mplExport.WriteNum(kind); + mplExport.OutputTypeAttrs(type.GetTypeAttrs()); + if (kind != kTypeStructIncomplete) { + mplExport.OutputStructTypeData(type); + } +} + +void OutputTypeClass(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeClass); + mplExport.OutputTypeBase(type); + MIRTypeKind kind = ty.GetKind(); + if (type.IsImported()) { + kind = kTypeClassIncomplete; + } + mplExport.WriteNum(kind); + if (kind != kTypeClassIncomplete) { + mplExport.OutputStructTypeData(type); + mplExport.OutputClassTypeData(type); + } +} + +void OutputTypeInterface(const MIRType &ty, BinaryMplExport &mplExport) { + const auto &type = static_cast(ty); + mplExport.WriteNum(kBinKindTypeInterface); + mplExport.OutputTypeBase(type); + MIRTypeKind kind = ty.GetKind(); + if (type.IsImported()) { + kind = kTypeInterfaceIncomplete; + } + mplExport.WriteNum(kind); + if (kind != kTypeInterfaceIncomplete) { + mplExport.OutputStructTypeData(type); + mplExport.OutputInterfaceTypeData(type); + } +} + +void OutputTypeConstString(const MIRType &ty, BinaryMplExport&) { + ASSERT(false, "Type's kind not yet implemented: %d", ty.GetKind()); + (void)ty; +} + +static bool InitOutputTypeFactory() { + RegisterFactoryFunction(kTypeScalar, OutputTypeScalar); + RegisterFactoryFunction(kTypePointer, OutputTypePointer); + RegisterFactoryFunction(kTypeByName, OutputTypeByName); + RegisterFactoryFunction(kTypeFArray, OutputTypeFArray); + RegisterFactoryFunction(kTypeJArray, OutputTypeJArray); + RegisterFactoryFunction(kTypeArray, OutputTypeArray); + RegisterFactoryFunction(kTypeFunction, OutputTypeFunction); + RegisterFactoryFunction(kTypeParam, OutputTypeParam); + RegisterFactoryFunction(kTypeInstantVector, OutputTypeInstantVector); + RegisterFactoryFunction(kTypeGenericInstant, OutputTypeGenericInstant); + RegisterFactoryFunction(kTypeBitField, OutputTypeBitField); + RegisterFactoryFunction(kTypeStruct, OutputTypeStruct); + RegisterFactoryFunction(kTypeStructIncomplete, OutputTypeStruct); + RegisterFactoryFunction(kTypeUnion, OutputTypeStruct); + RegisterFactoryFunction(kTypeClass, OutputTypeClass); + RegisterFactoryFunction(kTypeClassIncomplete, OutputTypeClass); + RegisterFactoryFunction(kTypeInterface, OutputTypeInterface); + RegisterFactoryFunction(kTypeInterfaceIncomplete, OutputTypeInterface); + RegisterFactoryFunction(kTypeConstString, OutputTypeConstString); + return true; +} +}; // namespace + +namespace maple { +int BinaryMplExport::typeMarkOffset = 0; + +BinaryMplExport::BinaryMplExport(MIRModule &md) : mod(md) { + bufI = 0; + Init(); + (void)InitOutputConstFactory(); + (void)InitOutputTypeFactory(); + not2mplt = false; +} + +uint8 BinaryMplExport::Read() { + CHECK_FATAL(bufI < buf.size(), "Index out of bound in BinaryMplImport::Read()"); + return buf[bufI++]; +} + +// Little endian +int32 BinaryMplExport::ReadInt() { + uint32 x0 = static_cast(Read()); + uint32 x1 = static_cast(Read()); + uint32 x2 = static_cast(Read()); + uint32 x3 = static_cast(Read()); + int32 x = static_cast((((((x3 << 8) + x2) << 8) + x1) << 8) + x0); + return x; +} + +void BinaryMplExport::Write(uint8 b) { + buf.push_back(b); +} + +// Little endian +void BinaryMplExport::WriteInt(int32 x) { + Write(static_cast(static_cast(x) & 0xFF)); + Write(static_cast((static_cast(x) >> 8) & 0xFF)); + Write(static_cast((static_cast(x) >> 16) & 0xFF)); + Write(static_cast((static_cast(x) >> 24) & 0xFF)); +} + +void BinaryMplExport::ExpandFourBuffSize() { + WriteInt(0); +} + +void BinaryMplExport::Fixup(size_t i, uint32 x) { + constexpr int fixupCount = 4; + CHECK(i <= buf.size() - fixupCount, "Index out of bound in BinaryMplImport::Fixup()"); + buf[i] = static_cast(x & 0xFF); + buf[i + 1] = static_cast((x >> 8) & 0xFF); + buf[i + 2] = static_cast((x >> 16) & 0xFF); + buf[i + 3] = static_cast((x >> 24) & 0xFF); +} + +void BinaryMplExport::WriteInt64(int64 x) { + WriteInt(static_cast(static_cast(x) & 0xFFFFFFFF)); + WriteInt(static_cast((static_cast(x) >> 32) & 0xFFFFFFFF)); +} + +// LEB128 +void BinaryMplExport::WriteNum(int64 x) { + while (x < -0x40 || x >= 0x40) { + Write(static_cast((static_cast(x) & 0x7F) + 0x80)); + x = x >> 7; // This is a compress algorithm, do not cast int64 to uint64. If do so, small negtivate number like -3 + // will occupy 9 bits and we will not get the compressed benefit. + } + Write(static_cast(static_cast(x) & 0x7F)); +} + +void BinaryMplExport::WriteAsciiStr(const std::string &str) { + WriteNum(static_cast(str.size())); + for (size_t i = 0; i < str.size(); ++i) { + Write(static_cast(str[i])); + } +} + +void BinaryMplExport::DumpBuf(const std::string &name) { + FILE *f = fopen(name.c_str(), "wb"); + if (f == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Error while creating the binary file: " << name << '\n'; + FATAL(kLncFatal, "Error while creating the binary file: %s\n", name.c_str()); + } + size_t size = buf.size(); + size_t k = fwrite(&buf[0], sizeof(uint8), size, f); + fclose(f); + if (k != size) { + LogInfo::MapleLogger(kLlErr) << "Error while writing the binary file: " << name << '\n'; + } +} + +void BinaryMplExport::OutputConstBase(const MIRConst &constVal) { + WriteNum(constVal.GetKind()); + OutputType(constVal.GetType().GetTypeIndex()); +} + +void BinaryMplExport::OutputConst(MIRConst *constVal) { + if (constVal == nullptr) { + WriteNum(0); + } else { + auto func = CreateProductFunction(constVal->GetKind()); + if (func != nullptr) { + func(*constVal, *this); + } + } +} + +void BinaryMplExport::OutputStr(const GStrIdx &gstr) { + if (gstr == 0u) { + WriteNum(0); + return; + } + + auto it = gStrMark.find(gstr); + if (it != gStrMark.end()) { + WriteNum(-(it->second)); + return; + } + + size_t mark = gStrMark.size(); + gStrMark[gstr] = mark; + WriteNum(kBinString); + ASSERT(GlobalTables::GetStrTable().StringTableSize() != 0, "Container check"); + WriteAsciiStr(GlobalTables::GetStrTable().GetStringFromStrIdx(gstr)); +} + +void BinaryMplExport::OutputUsrStr(UStrIdx ustr) { + if (ustr == 0u) { + WriteNum(0); + return; + } + + auto it = uStrMark.find(ustr); + if (it != uStrMark.end()) { + WriteNum(-(it->second)); + return; + } + + size_t mark = uStrMark.size(); + uStrMark[ustr] = mark; + WriteNum(kBinUsrString); + WriteAsciiStr(GlobalTables::GetUStrTable().GetStringFromStrIdx(ustr)); +} + +void BinaryMplExport::OutputPragmaElement(const MIRPragmaElement &e) { + OutputStr(e.GetNameStrIdx()); + OutputStr(e.GetTypeStrIdx()); + WriteNum(e.GetType()); + + if (e.GetType() == kValueString || e.GetType() == kValueType || e.GetType() == kValueField || + e.GetType() == kValueMethod || e.GetType() == kValueEnum) { + OutputStr(GStrIdx(e.GetI32Val())); + } else { + WriteInt64(e.GetU64Val()); + } + size_t size = e.GetSubElemVec().size(); + WriteNum(size); + for (size_t i = 0; i < size; ++i) { + OutputPragmaElement(*(e.GetSubElement(i))); + } +} + +void BinaryMplExport::OutputPragma(const MIRPragma &p) { + WriteNum(p.GetKind()); + WriteNum(p.GetVisibility()); + OutputStr(p.GetStrIdx()); + OutputType(p.GetTyIdx()); + OutputType(p.GetTyIdxEx()); + WriteNum(p.GetParamNum()); + size_t size = p.GetElementVector().size(); + WriteNum(size); + for (size_t i = 0; i < size; ++i) { + OutputPragmaElement(*(p.GetNthElement(i))); + } +} + +void BinaryMplExport::OutputTypeBase(const MIRType &type) { + WriteNum(type.GetPrimType()); + OutputStr(type.GetNameStrIdx()); + WriteNum(type.IsNameIsLocal()); +} + +void BinaryMplExport::OutputFieldPair(const FieldPair &fp) { + OutputStr(fp.first); // GStrIdx + OutputType(fp.second.first); // TyIdx + FieldAttrs fa = fp.second.second; + WriteNum(fa.GetAttrFlag()); + WriteNum(fa.GetAlignValue()); + if (fa.GetAttr(FLDATTR_static) && fa.GetAttr(FLDATTR_final) && + (fa.GetAttr(FLDATTR_public) || fa.GetAttr(FLDATTR_protected))) { + const std::string &fieldName = GlobalTables::GetStrTable().GetStringFromStrIdx(fp.first); + MIRSymbol *fieldVar = mod.GetMIRBuilder()->GetGlobalDecl(fieldName); + if ((fieldVar != nullptr) && (fieldVar->GetKonst() != nullptr) && + (fieldVar->GetKonst()->GetKind() == kConstStr16Const)) { + WriteNum(kBinInitConst); + OutputConst(fieldVar->GetKonst()); + } else { + WriteNum(0); + } + } +} + +void BinaryMplExport::OutputMethodPair(const MethodPair &memPool) { + // use GStrIdx instead, StIdx will be created by ImportMethodPair + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(memPool.first.Idx()); + CHECK_FATAL(funcSt != nullptr, "Pointer funcSt is nullptr, can't get symbol! Check it!"); + WriteAsciiStr(GlobalTables::GetStrTable().GetStringFromStrIdx(funcSt->GetNameStrIdx())); + OutputType(memPool.second.first); // TyIdx + WriteNum(memPool.second.second.GetAttrFlag()); // FuncAttrs +} + +void BinaryMplExport::OutputFieldsOfStruct(const FieldVector &fields) { + WriteNum(fields.size()); + for (const FieldPair &fp : fields) { + OutputFieldPair(fp); + } +} + +void BinaryMplExport::OutputMethodsOfStruct(const MethodVector &methods) { + WriteNum(methods.size()); + for (const MethodPair &memPool : methods) { + OutputMethodPair(memPool); + } +} + +void BinaryMplExport::OutputStructTypeData(const MIRStructType &type) { + OutputFieldsOfStruct(type.GetFields()); + OutputFieldsOfStruct(type.GetStaticFields()); + OutputFieldsOfStruct(type.GetParentFields()); + OutputMethodsOfStruct(type.GetMethods()); +} + +void BinaryMplExport::OutputImplementedInterfaces(const std::vector &interfaces) { + WriteNum(interfaces.size()); + for (const TyIdx &tyIdx : interfaces) { + OutputType(tyIdx); + } +} + +void BinaryMplExport::OutputInfoIsString(const std::vector &infoIsString) { + WriteNum(infoIsString.size()); + for (bool isString : infoIsString) { + WriteNum(static_cast(isString)); + } +} + +void BinaryMplExport::OutputInfo(const std::vector &info, const std::vector &infoIsString) { + size_t size = info.size(); + WriteNum(size); + for (size_t i = 0; i < size; ++i) { + OutputStr(info[i].first); // GStrIdx + if (infoIsString[i]) { + OutputStr(GStrIdx(info[i].second)); + } else { + WriteNum(info[i].second); + } + } +} + +void BinaryMplExport::OutputPragmaVec(const std::vector &pragmaVec) { + WriteNum(pragmaVec.size()); + for (MIRPragma *pragma : pragmaVec) { + OutputPragma(*pragma); + } +} + +void BinaryMplExport::OutputClassTypeData(const MIRClassType &type) { + OutputType(type.GetParentTyIdx()); + OutputImplementedInterfaces(type.GetInterfaceImplemented()); + OutputInfoIsString(type.GetInfoIsString()); + if (!inIPA) { + OutputInfo(type.GetInfo(), type.GetInfoIsString()); + OutputPragmaVec(type.GetPragmaVec()); + } +} + +void BinaryMplExport::OutputInterfaceTypeData(const MIRInterfaceType &type) { + OutputImplementedInterfaces(type.GetParentsTyIdx()); + OutputInfoIsString(type.GetInfoIsString()); + if (!inIPA) { + OutputInfo(type.GetInfo(), type.GetInfoIsString()); + OutputPragmaVec(type.GetPragmaVec()); + } +} + +void BinaryMplExport::Init() { + BinaryMplExport::typeMarkOffset = 0; + gStrMark.clear(); + uStrMark.clear(); + symMark.clear(); + funcMark.clear(); + typMark.clear(); + gStrMark[GStrIdx(0)] = 0; + uStrMark[UStrIdx(0)] = 0; + symMark[nullptr] = 0; + funcMark[nullptr] = 0; + eaNodeMark[nullptr] = 0; + curFunc = nullptr; + for (uint32 pti = static_cast(PTY_begin); pti < static_cast(PTY_end); ++pti) { + typMark[GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(pti))] = pti; + } +} + +void BinaryMplExport::OutputSymbol(MIRSymbol *sym) { + if (sym == nullptr) { + WriteNum(0); + return; + } + + std::unordered_map::iterator it = symMark.find(sym); + if (it != symMark.end()) { + WriteNum(-(it->second)); + return; + } + + WriteNum(kBinSymbol); + WriteNum(sym->GetScopeIdx()); + OutputStr(sym->GetNameStrIdx()); + OutputUsrStr(sym->sectionAttr); + OutputUsrStr(sym->GetAsmAttr()); + WriteNum(sym->GetSKind()); + WriteNum(sym->GetStorageClass()); + size_t mark = symMark.size(); + symMark[sym] = mark; + OutputTypeAttrs(sym->GetAttrs()); + WriteNum(sym->GetIsTmp() ? 1 : 0); + if (sym->GetSKind() == kStPreg) { + WriteNum(sym->GetPreg()->GetPregNo()); + } else if (sym->GetSKind() == kStConst || sym->GetSKind() == kStVar) { + if (sym->GetKonst() != nullptr) { + sym->GetKonst()->SetType(*sym->GetType()); + } + OutputConst(sym->GetKonst()); + } else if (sym->GetSKind() == kStFunc) { + OutputFunction(sym->GetFunction()->GetPuidx()); + } else if (sym->GetSKind() == kStJavaClass || sym->GetSKind() == kStJavaInterface) { + } else { + CHECK_FATAL(false, "should not used"); + } + if (sym->GetSKind() == kStVar || sym->GetSKind() == kStFunc) { + OutputSrcPos(sym->GetSrcPosition()); + } + OutputType(sym->GetTyIdx()); +} + +void BinaryMplExport::OutputFunction(PUIdx puIdx) { + if (puIdx == 0) { + WriteNum(0); + mod.SetCurFunction(nullptr); + return; + } + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + CHECK_FATAL(func != nullptr, "Cannot get MIRFunction."); + auto it = funcMark.find(func); + if (it != funcMark.end()) { + WriteNum(-it->second); + mod.SetCurFunction(func); + return; + } + size_t mark = funcMark.size(); + funcMark[func] = mark; + MIRFunction *savedFunc = mod.CurFunction(); + mod.SetCurFunction(func); + + WriteNum(kBinFunction); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + CHECK_FATAL(funcSt != nullptr, "Pointer funcSt is nullptr, cannot get symbol! Check it!"); + OutputSymbol(funcSt); + OutputType(func->GetMIRFuncType()->GetTypeIndex()); + WriteNum(func->GetFuncAttrs().GetAttrFlag()); + + auto &attributes = func->GetFuncAttrs(); + if (attributes.GetAttr(FUNCATTR_constructor_priority)) { + WriteNum(attributes.GetConstructorPriority()); + } + + if (attributes.GetAttr(FUNCATTR_destructor_priority)) { + WriteNum(attributes.GetDestructorPriority()); + } + + WriteNum(func->GetFlag()); + OutputType(func->GetClassTyIdx()); + // output formal parameter information + WriteNum(static_cast(func->GetFormalDefVec().size())); + for (FormalDef formalDef : func->GetFormalDefVec()) { + OutputStr(formalDef.formalStrIdx); + OutputType(formalDef.formalTyIdx); + WriteNum(static_cast(formalDef.formalAttrs.GetAttrFlag())); + } + // store Side Effect for each func + if (func2SEMap) { + uint32 isSee = func->IsIpaSeen() == true ? 1 : 0; + uint32 isPure = func->IsPure() == true ? 1 : 0; + uint32 noDefArg = func->IsNoDefArgEffect() == true ? 1 : 0; + uint32 noDef = func->IsNoDefEffect() == true ? 1 : 0; + uint32 noRetGlobal = func->IsNoRetGlobal() == true ? 1 : 0; + uint32 noThr = func->IsNoThrowException() == true ? 1 : 0; + uint32 noRetArg = func->IsNoRetArg() == true ? 1 : 0; + uint32 noPriDef = func->IsNoPrivateDefEffect() == true ? 1 : 0; + uint32 i = 0; + uint8 se = noThr << i++; + se |= noRetGlobal << i++; + se |= noDef << i++; + se |= noDefArg << i++; + se |= isPure << i++; + se |= isSee << i++; + se |= noRetArg << i++; + se |= noPriDef << i; + if ((*func2SEMap).find(func->GetNameStrIdx()) == (*func2SEMap).end()) { + (*func2SEMap)[func->GetNameStrIdx()] = se; + } else if ((*func2SEMap)[func->GetNameStrIdx()] != se) { + FATAL(kLncFatal, "It is a bug."); + } + } + mod.SetCurFunction(savedFunc); +} + +void BinaryMplExport::WriteStrField(uint64 contentIdx) { + Fixup(contentIdx, buf.size()); + WriteNum(kBinStrStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_STR_START + size_t outStrSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutputStr + + uint32 size = 0; + for (const auto &entity : GlobalTables::GetConstPool().GetConstU16StringPool()) { + MIRSymbol *sym = entity.second; + if (sym->IsLiteral()) { + OutputStr(sym->GetNameStrIdx()); + ++size; + } + } + Fixup(totalSizeIdx, static_cast(buf.size() - totalSizeIdx)); + Fixup(outStrSizeIdx, size); + WriteNum(~kBinStrStart); +} + +void BinaryMplExport::WriteHeaderField(uint64 contentIdx) { + Fixup(contentIdx, buf.size()); + WriteNum(kBinHeaderStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_IMPORT_START + WriteNum(mod.GetFlavor()); + WriteNum(mod.GetSrcLang()); + WriteNum(mod.GetID()); + if (mod.GetFlavor() == kFlavorLmbc) { + WriteNum(mod.GetGlobalMemSize()); + int64 dbgInfo = mod.IsWithDbgInfo() ? 1 : 0; + WriteNum(dbgInfo); + } + WriteNum(mod.GetNumFuncs()); + WriteAsciiStr(mod.GetEntryFuncName()); + OutputInfoVector(mod.GetFileInfo(), mod.GetFileInfoIsString()); + + if (mod.IsWithDbgInfo()) { + WriteNum(static_cast(mod.GetSrcFileInfo().size())); + for (uint32 i = 0; i < mod.GetSrcFileInfo().size(); i++) { + OutputStr(mod.GetSrcFileInfo()[i].first); + WriteNum(mod.GetSrcFileInfo()[i].second); + } + } else { + Write(0); + } + + WriteNum(static_cast(mod.GetImportFiles().size())); + for (GStrIdx strIdx : mod.GetImportFiles()) { + OutputStr(strIdx); + } + + WriteNum(static_cast(mod.GetAsmDecls().size())); + for (MapleString mapleStr : mod.GetAsmDecls()) { + std::string str(mapleStr.c_str()); + WriteAsciiStr(str); + } + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinHeaderStart); + return; +} + +void BinaryMplExport::WriteTypeField(uint64 contentIdx, bool useClassList) { + Fixup(contentIdx, buf.size()); + WriteNum(kBinTypeStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_TYPE_START + size_t outTypeSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutputType + uint32 size = 0; + if (useClassList) { + for (uint32 tyIdx : mod.GetClassList()) { + TyIdx curTyidx(tyIdx); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curTyidx); + CHECK_FATAL(type != nullptr, "Pointer type is nullptr, cannot get type, check it!"); + if (type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + auto *structType = static_cast(type); + // skip imported class/interface and incomplete types + if (!structType->IsImported() && !structType->IsIncomplete()) { + OutputType(curTyidx); + ++size; + } + } + } + } else { + uint32 idx = GlobalTables::GetTypeTable().lastDefaultTyIdx.GetIdx(); + for (idx = idx + 1; idx < GlobalTables::GetTypeTable().GetTypeTableSize(); idx++) { + OutputType(TyIdx(idx)); + size++; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outTypeSizeIdx, size); + WriteNum(~kBinTypeStart); +} + +void BinaryMplExport::OutputCallInfo(CallInfo &callInfo) { + auto it = callInfoMark.find(callInfo.GetID()); + if (it != callInfoMark.end()) { + WriteNum(-(it->second)); + return; + } + WriteNum(kBinCallinfo); + size_t mark = callInfoMark.size(); + callInfoMark[callInfo.GetID()] = mark; + WriteNum(callInfo.GetCallType()); // call type + WriteInt(callInfo.GetLoopDepth()); + WriteInt(callInfo.GetID()); + callInfo.AreAllArgsLocal() ? Write(1) : Write(0); // All args are local variables or not. + OutputSymbol(callInfo.GetFunc()->GetFuncSymbol()); +} + +void BinaryMplExport::WriteCgField(uint64 contentIdx, const CallGraph *cg) { + if (contentIdx != 0) { + Fixup(contentIdx, buf.size()); + } + WriteNum(kBinCgStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_CG_START + size_t outcgSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutCG + uint32 size = 0; + if (cg != nullptr) { + for (auto entry : cg->GetNodesMap()) { + MIRSymbol *methodSym = entry.first->GetFuncSymbol(); + WriteNum(kStartMethod); + OutputSymbol(methodSym); + size_t targetTyIdx = buf.size(); + ExpandFourBuffSize(); + int32 targSize = 0; + callInfoMark.clear(); + callInfoMark[0xffffffff] = 0; + for (const auto &callSite : entry.second->GetCallee()) { + OutputCallInfo(*(callSite.first)); + ++targSize; + } + Fixup(targetTyIdx, static_cast(targSize)); + WriteNum(~kStartMethod); + ++size; + } + } + + ASSERT((buf.size() - totalSizeIdx) <= 0xffffffff, "Integer overflow."); + Fixup(totalSizeIdx, static_cast(buf.size() - totalSizeIdx)); + Fixup(outcgSizeIdx, size); + WriteNum(~kBinCgStart); +} + +void BinaryMplExport::WriteSeField() { + ASSERT(func2SEMap != nullptr, "Expecting a func2SE map"); + WriteNum(kBinSeStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + size_t outseSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutSym + uint32 size = 0; + + for (const auto &func2SE : *func2SEMap) { + uint8 se = func2SE.second; + if (se != 0) { + OutputStr(func2SE.first); + Write(se); + if ((se & kPureFunc) == kPureFunc) { + const std::string &funcStr = GlobalTables::GetStrTable().GetStringFromStrIdx(func2SE.first); + auto *funcSymbol = + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(funcStr)); + MIRFunction *func = (funcSymbol != nullptr) ? GetMIRModule().GetMIRBuilder()->GetFunctionFromSymbol(*funcSymbol) + : nullptr; + OutputType(func->GetReturnTyIdx()); + } + ++size; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outseSizeIdx, size); + WriteNum(~kBinSeStart); +} + +void BinaryMplExport::OutEaCgBaseNode(const EACGBaseNode &node, bool firstPart) { + if (firstPart) { + WriteNum(node.eaStatus); + WriteInt(static_cast(node.id)); + } else { + // in and out set in base node is not necessary to be outed + // start to out point-to set + size_t outP2SizeIdx = buf.size(); + WriteInt(0); + uint32 size = 0; + for (EACGBaseNode *outNode : node.GetPointsToSet()) { + OutEaCgNode(*outNode); + ++size; + } + Fixup(outP2SizeIdx, size); + // start to out in set + outP2SizeIdx = buf.size(); + WriteInt(0); + size = 0; + for (EACGBaseNode *outNode : node.GetInSet()) { + OutEaCgNode(*outNode); + ++size; + } + Fixup(outP2SizeIdx, size); + // start to out out set + outP2SizeIdx = buf.size(); + WriteInt(0); + size = 0; + for (EACGBaseNode *outNode : node.GetOutSet()) { + OutEaCgNode(*outNode); + ++size; + } + Fixup(outP2SizeIdx, size); + } +} + +void BinaryMplExport::OutEaCgObjNode(EACGObjectNode &obj) { + Write(uint8(obj.isPhantom)); + size_t outFieldSizeIdx = buf.size(); + WriteInt(0); + uint32 size = 0; + for (const auto &fieldNodePair : obj.fieldNodes) { + EACGBaseNode *fieldNode = fieldNodePair.second; + ASSERT(fieldNodePair.first == static_cast(fieldNode)->GetFieldID(), "Must be."); + OutEaCgNode(*fieldNode); + ++size; + } + Fixup(outFieldSizeIdx, size); + // start to out point by + outFieldSizeIdx = buf.size(); + WriteInt(0); + size = 0; + for (EACGBaseNode *node : obj.pointsBy) { + OutEaCgNode(*node); + ++size; + } + Fixup(outFieldSizeIdx, size); +} + +void BinaryMplExport::OutEaCgRefNode(const EACGRefNode &ref) { + Write(uint8(ref.isStaticField)); +} + +void BinaryMplExport::OutEaCgFieldNode(EACGFieldNode &field) { + WriteInt(field.GetFieldID()); + uint32 size = 0; + size_t outFieldSizeIdx = buf.size(); + WriteInt(0); + for (EACGBaseNode *obj : field.belongsTo) { + OutEaCgNode(*obj); + ++size; + } + Fixup(outFieldSizeIdx, size); + Write(uint8(field.isPhantom)); +} + +void BinaryMplExport::OutEaCgActNode(const EACGActualNode &act) { + Write(uint8(act.isPhantom)); + Write(uint8(act.isReturn)); + Write(act.argIdx); + WriteInt(act.callSiteInfo); +} + +void BinaryMplExport::OutEaCgNode(EACGBaseNode &node) { + auto it = eaNodeMark.find(&node); + if (it != eaNodeMark.end()) { + WriteNum(-it->second); + return; + } + size_t mark = eaNodeMark.size(); + eaNodeMark[&node] = mark; + WriteNum(kBinEaCgNode); + WriteNum(node.kind); + OutEaCgBaseNode(node, true); + if (node.IsActualNode()) { + WriteNum(kBinEaCgActNode); + OutEaCgActNode(static_cast(node)); + } else if (node.IsFieldNode()) { + WriteNum(kBinEaCgFieldNode); + OutEaCgFieldNode(static_cast(node)); + } else if (node.IsObjectNode()) { + WriteNum(kBinEaCgObjNode); + OutEaCgObjNode(static_cast(node)); + } else if (node.IsReferenceNode()) { + WriteNum(kBinEaCgRefNode); + OutEaCgRefNode(static_cast(node)); + } else { + ASSERT(false, "Must be."); + } + OutEaCgBaseNode(node, false); + WriteNum(~kBinEaCgNode); +} + +void BinaryMplExport::WriteEaField(const CallGraph &cg) { + WriteNum(kBinEaStart); + uint64 totalSizeIdx = buf.size(); + WriteInt(0); + uint64 outeaSizeIdx = buf.size(); + WriteInt(0); + uint32 size = 0; + for (auto cgNodePair : cg.GetNodesMap()) { + MIRFunction *func = cgNodePair.first; + if (func->GetEACG() == nullptr) { + continue; + } + EAConnectionGraph *eacg = func->GetEACG(); + ASSERT(eacg != nullptr, "Must be."); + OutputStr(eacg->GetFuncNameStrIdx()); + WriteInt(eacg->GetNodes().size()); + OutEaCgNode(*eacg->GetGlobalObject()); + uint64 outFunceaIdx = buf.size(); + WriteInt(0); + size_t funceaSize = 0; + for (EACGBaseNode *node : eacg->GetFuncArgNodes()) { + OutEaCgNode(*node); + ++funceaSize; + } + Fixup(outFunceaIdx, funceaSize); + ++size; + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outeaSizeIdx, size); + WriteNum(~kBinEaStart); +} + +void BinaryMplExport::WriteEaCgField(EAConnectionGraph *eaCg) { + if (eaCg == nullptr) { + WriteNum(~kBinEaCgStart); + return; + } + WriteNum(kBinEaCgStart); + size_t totalSizeIdx = buf.size(); + WriteInt(0); + // out this function's arg list + OutputStr(eaCg->GetFuncNameStrIdx()); + WriteInt(static_cast(eaCg->GetNodes().size())); + OutEaCgNode(*eaCg->GetGlobalObject()); + size_t outNodeSizeIdx = buf.size(); + WriteInt(0); + size_t argNodeSize = 0; + for (EACGBaseNode *node : eaCg->GetFuncArgNodes()) { + OutEaCgNode(*node); + ++argNodeSize; + } + Fixup(outNodeSizeIdx, argNodeSize); + // out this function's call site's arg list + outNodeSizeIdx = buf.size(); + WriteInt(0); + size_t callSiteSize = 0; + for (auto nodePair : eaCg->GetCallSite2Nodes()) { + uint32 id = nodePair.first; + MapleVector *calleeArgNode = nodePair.second; + WriteInt(id); + size_t outCalleeArgSizeIdx = buf.size(); + WriteInt(0); + size_t calleeArgSize = 0; + for (EACGBaseNode *node : *calleeArgNode) { + OutEaCgNode(*node); + ++calleeArgSize; + } + Fixup(outCalleeArgSizeIdx, calleeArgSize); + ++callSiteSize; + } + Fixup(outNodeSizeIdx, callSiteSize); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinEaCgStart); +} + +void BinaryMplExport::WriteEnumField(uint64 contentIdx) { + if (GlobalTables::GetEnumTable().enumTable.empty()) { + return; + } + Fixup(contentIdx, static_cast(buf.size())); + WriteNum(kBinEnumStart); + uint64 totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + uint64 outEnumSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutEnum + uint32 size = 0; + for (MIREnum *mirEnum : GlobalTables::GetEnumTable().enumTable) { + OutputEnumeration(mirEnum); + size++; + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outEnumSizeIdx, size); + WriteNum(~kBinEnumStart); + return; +} + +void BinaryMplExport::WriteSymField(uint64 contentIdx) { + Fixup(contentIdx, buf.size()); + WriteNum(kBinSymStart); + uint64 totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + uint64 outsymSizeIdx = buf.size(); + ExpandFourBuffSize(); // size of OutSym + uint32 size = 0; + + if (not2mplt) { + for (auto sit = GetMIRModule().GetSymbolDefOrder().begin(); + sit != GetMIRModule().GetSymbolDefOrder().end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(sit->Idx()); + ASSERT(s != nullptr, "null ptr check"); + // Verify: all wpofake variables should have been deleted from globaltable + ASSERT(!(s->IsWpoFakeParm() || s->IsWpoFakeRet()) || s->IsDeleted(), "wpofake var not deleted"); + MIRStorageClass storageClass = s->GetStorageClass(); + MIRSymKind sKind = s->GetSKind(); + if (s->IsDeleted() || storageClass == kScUnused || + (s->GetIsImported() && !s->GetAppearsInCode()) || + (sKind == kStFunc && + ((storageClass == kScExtern && !s->GetFunction()->GetAttr(FUNCATTR_used)) || + !s->GetAppearsInCode()))) { + continue; + } + OutputSymbol(s); + size++; + } + } + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + Fixup(outsymSizeIdx, size); + WriteNum(~kBinSymStart); + return; +} + +void BinaryMplExport::WriteContentField4mplt(int fieldNum, uint64 *fieldStartP) { + WriteNum(kBinContentStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + + WriteInt(fieldNum); // size of Content item + + WriteNum(kBinStrStart); + fieldStartP[0] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinTypeStart); + fieldStartP[1] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinCgStart); + fieldStartP[2] = buf.size(); + ExpandFourBuffSize(); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinContentStart); +} + +void BinaryMplExport::WriteContentField4nonmplt(int fieldNum, uint64 *fieldStartP) { + CHECK_FATAL(fieldStartP != nullptr, "fieldStartP is null."); + WriteNum(kBinContentStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + + WriteInt(fieldNum); // size of Content item + + WriteNum(kBinHeaderStart); + fieldStartP[kFirstField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinSymStart); + fieldStartP[kSecondField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinFunctionBodyStart); + fieldStartP[kThirdField] = buf.size(); + ExpandFourBuffSize(); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinContentStart); +} + +void BinaryMplExport::WriteContentField4nonJava(int fieldNum, uint64 *fieldStartP) { + CHECK_FATAL(fieldStartP != nullptr, "fieldStartP is null."); + WriteNum(kBinContentStart); + size_t totalSizeIdx = buf.size(); + ExpandFourBuffSize(); // total size of this field to ~BIN_SYM_START + + WriteInt(fieldNum); // size of Content item + + WriteNum(kBinHeaderStart); + fieldStartP[kFirstField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinStrStart); + fieldStartP[kSecondField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinEnumStart); + fieldStartP[kThirdField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinSymStart); + fieldStartP[kFourthField] = buf.size(); + ExpandFourBuffSize(); + + WriteNum(kBinFunctionBodyStart); + fieldStartP[kFifthField] = buf.size(); + ExpandFourBuffSize(); + + Fixup(totalSizeIdx, buf.size() - totalSizeIdx); + WriteNum(~kBinContentStart); +} + +void BinaryMplExport::Export(const std::string &fname, std::unordered_set *dumpFuncSet) { + uint64 fieldStartPoint[5]; + if (!not2mplt) { + WriteInt(kMpltMagicNumber); + WriteContentField4mplt(kFourthFieldInt, fieldStartPoint); + WriteStrField(fieldStartPoint[kFirstField]); + WriteTypeField(fieldStartPoint[kSecondField]); + WriteCgField(fieldStartPoint[kThirdField], nullptr); + importFileName = fname; + } else { + WriteInt(kMpltMagicNumber + 0x10); + if (mod.IsJavaModule()) { + WriteContentField4nonmplt(kFourthFieldInt, fieldStartPoint); + WriteHeaderField(fieldStartPoint[kFirstField]); + WriteSymField(fieldStartPoint[kSecondField]); + WriteFunctionBodyField(fieldStartPoint[kThirdField], dumpFuncSet); + } else { + WriteContentField4nonJava(kSixthFieldInt, fieldStartPoint); + WriteHeaderField(fieldStartPoint[kFirstField]); + WriteEnumField(fieldStartPoint[kThirdField]); + WriteSymField(fieldStartPoint[kFourthField]); + WriteFunctionBodyField(fieldStartPoint[kFifthField], dumpFuncSet); + } + } + WriteNum(kBinFinish); + DumpBuf(fname); +} + +void BinaryMplExport::AppendAt(const std::string &name, int32 offset) { + FILE *f = fopen(name.c_str(), "r+b"); + if (f == nullptr) { + LogInfo::MapleLogger(kLlErr) << "Error while opening the binary file: " << name << '\n'; + FATAL(kLncFatal, "Error while creating the binary file: %s\n", name.c_str()); + } + int seekRet = fseek(f, static_cast(offset), SEEK_SET); + CHECK_FATAL(seekRet == 0, "Call fseek failed."); + size_t size = buf.size(); + size_t k = fwrite(&buf[0], sizeof(uint8), size, f); + fclose(f); + if (k != size) { + LogInfo::MapleLogger(kLlErr) << "Error while writing the binary file: " << name << '\n'; + } +} + +void BinaryMplExport::OutputTypePairs(const MIRInstantVectorType &type) { + size_t size = type.GetInstantVec().size(); + WriteNum(size); + for (const TypePair &typePair : type.GetInstantVec()) { + OutputType(typePair.first); + OutputType(typePair.second); + } +} + +void BinaryMplExport::OutputTypeAttrs(const TypeAttrs &ta) { + WriteNum(ta.GetAttrFlag()); + WriteNum(ta.GetAlignValue()); + WriteNum(ta.GetPack()); +} + +void BinaryMplExport::OutputType(const TyIdx &tyIdx) { + if (tyIdx == 0u) { + WriteNum(0); + return; + } + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(ty != nullptr, "If gets nulltype, should have been returned!"); + auto it = typMark.find(ty); + if (it != typMark.end()) { + if (ty->GetKind() != kTypeFunction) { + WriteNum(-(it->second)); + return; + } + ++BinaryMplExport::typeMarkOffset; + } else { + size_t mark = typMark.size() + BinaryMplExport::typeMarkOffset; + typMark[ty] = mark; + } + + auto func = CreateProductFunction(ty->GetKind()); + if (func != nullptr) { + func(*ty, *this); + } else { + ASSERT(false, "Type's kind not yet implemented: %d", ty->GetKind()); + } +} + +void BinaryMplExport::OutputEnumeration(MIREnum *mirEnum) { + WriteNum(kBinEnumeration); + Write(static_cast(mirEnum->GetPrimType())); + OutputStr(mirEnum->GetNameIdx()); + WriteNum(static_cast(mirEnum->GetElements().size())); + for (size_t i = 0; i < mirEnum->GetElements().size(); ++i) { + OutputStr(mirEnum->GetElements()[i].first); + WriteNum(mirEnum->GetElements()[i].second.GetSXTValue()); + } +} + +void UpdateMplt::UpdateCgField(BinaryMplt &binMplt, const CallGraph &cg) const { + BinaryMplImport &binImport = binMplt.GetBinImport(); + BinaryMplExport &binExport = binMplt.GetBinExport(); + binImport.SetBufI(0); + if (binImport.IsBufEmpty() || binImport.ReadInt() != kMpltMagicNumber) { + INFO(kLncInfo, " This Module depends on nothing"); + return; + } + int64 cgStart = binImport.GetContent(kBinCgStart); + ASSERT(cgStart != 0, "Should be updated in import processing."); + binImport.SetBufI(cgStart); + int64 checkReadNum = binImport.ReadNum(); + ASSERT(checkReadNum == kBinCgStart, "Should be cg start point."); + int32 totalSize = binImport.ReadInt(); + constexpr int32 headLen = 4; + binImport.SetBufI(binImport.GetBufI() + totalSize - headLen); + checkReadNum = binImport.ReadNum(); + ASSERT(checkReadNum == ~kBinCgStart, "Should be end of cg."); + binExport.Init(); + std::map tmp; + binExport.func2SEMap = &tmp; + binExport.inIPA = true; + binExport.WriteCgField(0, &cg); + binExport.Init(); + binExport.WriteSeField(); + binExport.eaNodeMark.clear(); + binExport.eaNodeMark[nullptr] = 0; + binExport.gStrMark.clear(); + binExport.gStrMark[GStrIdx(0)] = 0; + binExport.WriteEaField(cg); + binExport.WriteNum(kBinFinish); + std::string filename(binMplt.GetImportFileName()); + binExport.AppendAt(filename, cgStart); +} + +} // namespace maple diff --git a/src/mapleall/maple_ir/src/bin_mpl_import.cpp b/src/mapleall/maple_ir/src/bin_mpl_import.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7cf1827eb5b4dc800e2f9063c38d2b86fb02ca40 --- /dev/null +++ b/src/mapleall/maple_ir/src/bin_mpl_import.cpp @@ -0,0 +1,1700 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "bin_mpl_import.h" +#include +#include +#include +#include +#include "bin_mplt.h" +#include "mir_function.h" +#include "namemangler.h" +#include "opcode_info.h" +#include "mir_pragma.h" +#include "mir_builder.h" + +namespace maple { +uint8 BinaryMplImport::Read() { + CHECK_FATAL(bufI < buf.size(), "Index out of bound in BinaryMplImport::Read()"); + return buf[bufI++]; +} + +// Little endian +int32 BinaryMplImport::ReadInt() { + uint32 x0 = static_cast(Read()); + uint32 x1 = static_cast(Read()); + uint32 x2 = static_cast(Read()); + uint32 x3 = static_cast(Read()); + return (((((x3 << 8u) + x2) << 8u) + x1) << 8u) + x0; +} + +int64 BinaryMplImport::ReadInt64() { + // casts to avoid sign extension + uint32 x0 = static_cast(ReadInt()); + uint64 x1 = static_cast(ReadInt()); + return static_cast((x1 << 32) + x0); +} + +// LEB128 +int64 BinaryMplImport::ReadNum() { + uint64 n = 0; + int64 y = 0; + uint64 b = static_cast(Read()); + while (b >= 0x80) { + y += ((b - 0x80) << n); + n += 7; + b = static_cast(Read()); + } + b = (b & 0x3F) - (b & 0x40); + return y + (b << n); +} + +void BinaryMplImport::ReadAsciiStr(std::string &str) { + int64 n = ReadNum(); + for (int64 i = 0; i < n; i++) { + uint8 ch = Read(); + str.push_back(static_cast(ch)); + } +} + +void BinaryMplImport::ReadFileAt(const std::string &name, int32 offset) { + FILE *f = fopen(name.c_str(), "rb"); + CHECK_FATAL(f != nullptr, "Error while reading the binary file: %s", name.c_str()); + + int seekRet = fseek(f, 0, SEEK_END); + CHECK_FATAL(seekRet == 0, "call fseek failed"); + + long size = ftell(f); + size -= offset; + + CHECK_FATAL(size >= 0, "should not be negative"); + + seekRet = fseek(f, offset, SEEK_SET); + CHECK_FATAL(seekRet == 0, "call fseek failed"); + buf.resize(size); + + size_t result = fread(&buf[0], sizeof(uint8), static_cast(size), f); + fclose(f); + CHECK_FATAL(result == static_cast(size), "Error while reading the binary file: %s", name.c_str()); +} + +void BinaryMplImport::ImportConstBase(MIRConstKind &kind, MIRTypePtr &type) { + kind = static_cast(ReadNum()); + TyIdx tyidx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyidx); +} + +MIRConst *BinaryMplImport::ImportConst(MIRFunction *func) { + int64 tag = ReadNum(); + if (tag == 0) { + return nullptr; + } + + MIRConstKind kind; + MIRType *type = nullptr; + MemPool *memPool = mod.GetMemPool(); + + ImportConstBase(kind, type); + switch (tag) { + case kBinKindConstInt: + return GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(ReadNum()), *type); + case kBinKindConstAddrof: { + MIRSymbol *sym = InSymbol(func); + CHECK_FATAL(sym != nullptr, "null ptr check"); + FieldID fi = ReadNum(); + int32 ofst = static_cast(ReadNum()); + // do not use "type"; instead, get exprTy from sym + TyIdx ptyIdx = sym->GetTyIdx(); + MIRPtrType ptrType(ptyIdx, (mod.IsJavaModule() ? PTY_ref : GetExactPtrPrimType())); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprTy = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + return memPool->New(sym->GetStIdx(), fi, *exprTy, ofst); + } + case kBinKindConstAddrofLocal: { + MIRSymbol *sym = ImportLocalSymbol(func); + FieldID fi = static_cast(ReadNum()); + int32 ofst = static_cast(ReadNum()); + return memPool->New(sym->GetStIdx(), fi, *type, ofst); + } + case kBinKindConstAddrofFunc: { + PUIdx puIdx = ImportFunction(); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFuncTable()[puIdx]; + CHECK_NULL_FATAL(f); + f->GetFuncSymbol()->SetAppearsInCode(true); + mod.SetCurFunction(func); + return memPool->New(puIdx, *type); + } + case kBinKindConstAddrofLabel: { + LabelIdx lidx = ImportLabel(func); + PUIdx puIdx = func->GetPuidx(); + MIRLblConst *lblConst = memPool->New(lidx, puIdx, *type); + (void)func->GetLabelTab()->addrTakenLabels.insert(lidx); + return lblConst; + } + case kBinKindConstStr: { + UStrIdx ustr = ImportUsrStr(); + return memPool->New(ustr, *type); + } + case kBinKindConstStr16: { + Conststr16Node *cs = memPool->New(); + cs->SetPrimType(type->GetPrimType()); + int64 len = ReadNum(); + std::ostringstream ostr; + for (int64 i = 0; i < len; ++i) { + ostr << Read(); + } + std::u16string str16; + (void)namemangler::UTF8ToUTF16(str16, ostr.str()); + cs->SetStrIdx(GlobalTables::GetU16StrTable().GetOrCreateStrIdxFromName(str16)); + return memPool->New(cs->GetStrIdx(), *type); + } + case kBinKindConstFloat: { + union { + float fvalue; + int32 ivalue; + } value; + + value.ivalue = ReadNum(); + return GlobalTables::GetFpConstTable().GetOrCreateFloatConst(value.fvalue); + } + case kBinKindConstDouble: { + union { + double dvalue; + int64 ivalue; + } value; + + value.ivalue = ReadNum(); + return GlobalTables::GetFpConstTable().GetOrCreateDoubleConst(value.dvalue); + } + case kBinKindConstAgg: { + MIRAggConst *aggConst = mod.GetMemPool()->New(mod, *type); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + auto fieldId = static_cast(ReadNum()); + auto fieldConst = ImportConst(func); + aggConst->AddItem(fieldConst, fieldId); + } + return aggConst; + } + case kBinKindConstSt: { + MIRStConst *stConst = mod.GetMemPool()->New(mod, *type); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + stConst->PushbackSymbolToSt(InSymbol(func)); + } + size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + stConst->PushbackOffsetToSt(ReadNum()); + } + return stConst; + } + default: + CHECK_FATAL(false, "Unhandled const type"); + } +} + +GStrIdx BinaryMplImport::ImportStr() { + int64 tag = ReadNum(); + if (tag == 0) { + return GStrIdx(0); + } + if (tag < 0) { + CHECK_FATAL(-tag < static_cast(gStrTab.size()), "index out of range in BinaryMplt::ImportStr"); + return gStrTab[-tag]; + } + CHECK_FATAL(tag == kBinString, "expecting kBinString"); + std::string str; + ReadAsciiStr(str); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + gStrTab.push_back(strIdx); + return strIdx; +} + +UStrIdx BinaryMplImport::ImportUsrStr() { + int64 tag = ReadNum(); + if (tag == 0) { + return UStrIdx(0); + } + if (tag < 0) { + CHECK_FATAL(-tag < static_cast(uStrTab.size()), "index out of range in BinaryMplt::InUsrStr"); + return uStrTab[-tag]; + } + CHECK_FATAL(tag == kBinUsrString, "expecting kBinUsrString"); + std::string str; + ReadAsciiStr(str); + UStrIdx strIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str); + uStrTab.push_back(strIdx); + return strIdx; +} + +MIRPragmaElement *BinaryMplImport::ImportPragmaElement() { + MIRPragmaElement *element = mod.GetPragmaMemPool()->New(mod); + element->SetNameStrIdx(ImportStr()); + element->SetTypeStrIdx(ImportStr()); + element->SetType(static_cast(ReadNum())); + if (element->GetType() == kValueString || element->GetType() == kValueType || element->GetType() == kValueField || + element->GetType() == kValueMethod || element->GetType() == kValueEnum) { + element->SetI32Val(static_cast(ImportStr())); + } else { + element->SetU64Val(static_cast(ReadInt64())); + } + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + element->SubElemVecPushBack(ImportPragmaElement()); + } + return element; +} + +MIRPragma *BinaryMplImport::ImportPragma() { + MIRPragma *p = mod.GetPragmaMemPool()->New(mod); + p->SetKind(static_cast(ReadNum())); + p->SetVisibility(ReadNum()); + p->SetStrIdx(ImportStr()); + if (mod.IsJavaModule()) { + p->SetTyIdx(ImportType()); + p->SetTyIdxEx(ImportType()); + } else { + p->SetTyIdx(ImportTypeNonJava()); + p->SetTyIdxEx(ImportTypeNonJava()); + } + p->SetParamNum(ReadNum()); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + p->PushElementVector(ImportPragmaElement()); + } + return p; +} + +void BinaryMplImport::ImportFieldPair(FieldPair &fp) { + fp.first = ImportStr(); + fp.second.first = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + fp.second.second.SetAttrFlag(ReadNum()); + fp.second.second.SetAlignValue(ReadNum()); + FieldAttrs fa = fp.second.second; + if (fa.GetAttr(FLDATTR_static) && fa.GetAttr(FLDATTR_final) && + (fa.GetAttr(FLDATTR_public) || fa.GetAttr(FLDATTR_protected))) { + int64 tag = ReadNum(); + if (tag == kBinInitConst) { + GlobalTables::GetConstPool().InsertConstPool(fp.first, ImportConst(nullptr)); + } + } +} + +void BinaryMplImport::ImportMethodPair(MethodPair &memPool) { + std::string funcName; + ReadAsciiStr(funcName); + TyIdx funcTyIdx = ImportType(); + int64 x = ReadNum(); + CHECK_FATAL(x >= 0, "ReadNum error, x: %d", x); + auto attrFlag = static_cast(x); + + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + MIRSymbol *prevFuncSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + MIRSymbol *funcSt = nullptr; + MIRFunction *fn = nullptr; + + if (prevFuncSt != nullptr && (prevFuncSt->GetStorageClass() == kScText && prevFuncSt->GetSKind() == kStFunc)) { + funcSt = prevFuncSt; + fn = funcSt->GetFunction(); + } else { + funcSt = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + funcSt->SetNameStrIdx(strIdx); + GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSt); + funcSt->SetStorageClass(kScText); + funcSt->SetSKind(kStFunc); + funcSt->SetTyIdx(funcTyIdx); + funcSt->SetIsImported(imported); + funcSt->SetIsImportedDecl(imported); + methodSymbols.push_back(funcSt); + + fn = mod.GetMemPool()->New(&mod, funcSt->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + auto *funcType = static_cast(funcSt->GetType()); + fn->SetMIRFuncType(funcType); + fn->SetFileIndex(0); + fn->SetBaseClassFuncNames(funcSt->GetNameStrIdx()); + fn->SetFuncAttrs(attrFlag); + } + memPool.first.SetFullIdx(funcSt->GetStIdx().FullIdx()); + memPool.second.first.reset(funcTyIdx); + memPool.second.second.SetAttrFlag(attrFlag); +} + +void BinaryMplImport::UpdateMethodSymbols() { + for (auto sym : methodSymbols) { + MIRFunction *fn = sym->GetFunction(); + CHECK_FATAL(fn != nullptr, "fn is null"); + auto *funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx())); + fn->SetMIRFuncType(funcType); + fn->SetReturnStruct(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx())); + if (fn->GetFormalDefVec().size() != 0) { + continue; // already updated in ImportFunction() + } + for (size_t i = 0; i < funcType->GetParamTypeList().size(); ++i) { + FormalDef formalDef(nullptr, funcType->GetParamTypeList()[i], funcType->GetParamAttrsList()[i]); + fn->GetFormalDefVec().push_back(formalDef); + } + } +} + +void BinaryMplImport::ImportFieldsOfStructType(FieldVector &fields, uint32 methodSize) { + int64 size = ReadNum(); + int64 initSize = fields.size() + methodSize; + for (int64 i = 0; i < size; ++i) { + FieldPair fp; + ImportFieldPair(fp); + if (initSize == 0) { + fields.push_back(fp); + } + } +} + +void BinaryMplImport::ImportMethodsOfStructType(MethodVector &methods) { + int64 size = ReadNum(); + bool isEmpty = methods.empty(); + for (int64 i = 0; i < size; ++i) { + MethodPair memPool; + ImportMethodPair(memPool); + if (isEmpty) { + methods.push_back(memPool); + } + } +} + +void BinaryMplImport::ImportStructTypeData(MIRStructType &type) { + uint32 methodSize = type.GetMethods().size(); + ImportFieldsOfStructType(type.GetFields(), methodSize); + ImportFieldsOfStructType(type.GetStaticFields(), methodSize); + ImportFieldsOfStructType(type.GetParentFields(), methodSize); + ImportMethodsOfStructType(type.GetMethods()); + type.SetIsImported(imported); +} + +void BinaryMplImport::ImportInterfacesOfClassType(std::vector &interfaces) { + int64 size = ReadNum(); + bool isEmpty = interfaces.empty(); + for (int64 i = 0; i < size; ++i) { + TyIdx idx = ImportType(); + if (isEmpty) { + interfaces.push_back(idx); + } + } +} + +void BinaryMplImport::ImportInfoIsStringOfStructType(MIRStructType &type) { + int64 size = ReadNum(); + bool isEmpty = type.GetInfoIsString().empty(); + + for (int64 i = 0; i < size; ++i) { + auto isString = static_cast(ReadNum()); + + if (isEmpty) { + type.PushbackIsString(isString); + } + } +} + +void BinaryMplImport::ImportInfoOfStructType(MIRStructType &type) { + uint64 size = static_cast(ReadNum()); + bool isEmpty = type.GetInfo().empty(); + for (size_t i = 0; i < size; ++i) { + GStrIdx idx = ImportStr(); + int64 x = (type.GetInfoIsStringElemt(i)) ? static_cast(ImportStr()) : ReadNum(); + CHECK_FATAL(x >= 0, "ReadNum nagative, x: %d", x); + CHECK_FATAL(x <= std::numeric_limits::max(), "ReadNum too large, x: %d", x); + if (isEmpty) { + type.PushbackMIRInfo(MIRInfoPair(idx, static_cast(x))); + } + } +} + +void BinaryMplImport::ImportPragmaOfStructType(MIRStructType &type) { + int64 size = ReadNum(); + bool isEmpty = type.GetPragmaVec().empty(); + for (int64 i = 0; i < size; ++i) { + MIRPragma *pragma = ImportPragma(); + if (isEmpty) { + type.PushbackPragma(pragma); + } + } +} + +void BinaryMplImport::SetClassTyidxOfMethods(MIRStructType &type) const { + if (type.GetTypeIndex() != 0u) { + // set up classTyIdx for methods + for (size_t i = 0; i < type.GetMethods().size(); ++i) { + StIdx stidx = type.GetMethodsElement(i).first; + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + CHECK_FATAL(st != nullptr, "st is null"); + CHECK_FATAL(st->GetSKind() == kStFunc, "unexpected st->sKind"); + st->GetFunction()->SetClassTyIdx(type.GetTypeIndex()); + } + } +} + +void BinaryMplImport::ImportClassTypeData(MIRClassType &type) { + TyIdx tempType = ImportType(); + // Keep the parent_tyidx we first met. + if (type.GetParentTyIdx() == 0u) { + type.SetParentTyIdx(tempType); + } + ImportInterfacesOfClassType(type.GetInterfaceImplemented()); + ImportInfoIsStringOfStructType(type); + if (!inIPA) { + ImportInfoOfStructType(type); + ImportPragmaOfStructType(type); + } + SetClassTyidxOfMethods(type); +} + +void BinaryMplImport::ImportInterfaceTypeData(MIRInterfaceType &type) { + ImportInterfacesOfClassType(type.GetParentsTyIdx()); + ImportInfoIsStringOfStructType(type); + if (!inIPA) { + ImportInfoOfStructType(type); + ImportPragmaOfStructType(type); + } + SetClassTyidxOfMethods(type); +} + +void BinaryMplImport::Reset() { + buf.clear(); + bufI = 0; + gStrTab.clear(); + uStrTab.clear(); + typTab.clear(); + funcTab.clear(); + symTab.clear(); + methodSymbols.clear(); + definedLabels.clear(); + gStrTab.push_back(GStrIdx(0)); // Dummy + uStrTab.push_back(UStrIdx(0)); // Dummy + symTab.push_back(nullptr); // Dummy + funcTab.push_back(nullptr); // Dummy + eaCgTab.push_back(nullptr); + for (int32 pti = static_cast(PTY_begin); pti < static_cast(PTY_end); ++pti) { + typTab.push_back(TyIdx(pti)); + } +} + +TypeAttrs BinaryMplImport::ImportTypeAttrs() { + TypeAttrs ta; + ta.SetAttrFlag(static_cast(ReadNum())); + ta.SetAlignValue(static_cast(ReadNum())); + ta.SetPack(static_cast(ReadNum())); + return ta; +} + +void BinaryMplImport::ImportTypePairs(std::vector &insVecType) { + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + TyIdx t0 = ImportType(); + TyIdx t1 = ImportType(); + TypePair tp(t0, t1); + insVecType.push_back(tp); + } +} + +void BinaryMplImport::CompleteAggInfo(TyIdx tyIdx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type != nullptr, "MIRType is null"); + if (type->GetKind() == kTypeInterface) { + auto *interfaceType = static_cast(type); + ImportStructTypeData(*interfaceType); + ImportInterfaceTypeData(*interfaceType); + } else if (type->GetKind() == kTypeClass) { + auto *classType = static_cast(type); + ImportStructTypeData(*classType); + ImportClassTypeData(*classType); + } else if (type->GetKind() == kTypeStruct || type->GetKind() == kTypeUnion) { + auto *structType = static_cast(type); + ImportStructTypeData(*structType); + } else { + ERR(kLncErr, "in BinaryMplImport::CompleteAggInfo, MIRType error"); + } +} + +inline static bool IsIncomplete(const MIRType &type) { + return (type.GetKind() == kTypeInterfaceIncomplete || type.GetKind() == kTypeClassIncomplete || + type.GetKind() == kTypeStructIncomplete); +} + +TyIdx BinaryMplImport::ImportType(bool forPointedType) { + int64 tag = ReadNum(); + static MIRType *typeNeedsComplete = nullptr; + static int ptrLev = 0; + if (tag == 0) { + return TyIdx(0); + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < typTab.size(), "index out of bounds"); + return typTab.at(static_cast(-tag)); + } + PrimType primType = static_cast(0); + GStrIdx strIdx(0); + bool nameIsLocal = false; + ImportTypeBase(primType, strIdx, nameIsLocal); + + switch (tag) { + case kBinKindTypeScalar: + return TyIdx(primType); + case kBinKindTypePointer: { + MIRPtrType type(primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetTypeAttrs(ImportTypeAttrs()); + ++ptrLev; + type.SetPointedTyIdx(ImportType(true)); + --ptrLev; + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + if (typeNeedsComplete != nullptr && ptrLev == 0) { + TyIdx tyIdxNeedsComplete = typeNeedsComplete->GetTypeIndex(); + typeNeedsComplete = nullptr; + CompleteAggInfo(tyIdxNeedsComplete); + } + return origType->GetTypeIndex(); + } + case kBinKindTypeFArray: { + MIRFarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetElemtTyIdx(ImportType(forPointedType)); + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeJarray: { + MIRJarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetElemtTyIdx(ImportType(forPointedType)); + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeArray: { + MIRArrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetDim(ReadNum()); + CHECK_FATAL(type.GetDim() < kMaxArrayDim, "array index out of range"); + for (uint16 i = 0; i < type.GetDim(); ++i) { + type.SetSizeArrayItem(i, ReadNum()); + } + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetElemTyIdx(ImportType(forPointedType)); + type.SetTypeAttrs(ImportTypeAttrs()); + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeFunction: { + MIRFuncType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + size_t idx = typTab.size(); + typTab.push_back(TyIdx(0)); + type.SetRetTyIdx(ImportType()); + type.funcAttrs.SetAttrFlag(static_cast(ReadNum())); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamTypeList().push_back(ImportType()); + } + size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamAttrsList().push_back(ImportTypeAttrs()); + } + MIRType *origType = &InsertInTypeTables(type); + typTab[idx] = origType->GetTypeIndex(); + return origType->GetTypeIndex(); + } + case kBinKindTypeParam: { + MIRTypeParam type(strIdx); + type.SetNameIsLocal(nameIsLocal); + MIRType *origType = &InsertInTypeTables(type); + typTab.push_back(origType->GetTypeIndex()); + return origType->GetTypeIndex(); + } + case kBinKindTypeInstantVector: { + auto kind = static_cast(ReadNum()); + MIRInstantVectorType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + auto *origType = static_cast(&InsertInTypeTables(type)); + typTab.push_back(origType->GetTypeIndex()); + ImportTypePairs(origType->GetInstantVec()); + return origType->GetTypeIndex(); + } + case kBinKindTypeGenericInstant: { + MIRGenericInstantType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + auto *origType = static_cast(&InsertInTypeTables(type)); + typTab.push_back(origType->GetTypeIndex()); + ImportTypePairs(origType->GetInstantVec()); + origType->SetGenericTyIdx(ImportType()); + return origType->GetTypeIndex(); + } + case kBinKindTypeBitField: { + uint8 fieldSize = ReadNum(); + MIRBitFieldType type(fieldSize, primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + MIRType *origType = &InsertInTypeTables(type); + typTab.push_back(origType->GetTypeIndex()); + return origType->GetTypeIndex(); + } + case kBinKindTypeStruct: { + auto kind = static_cast(ReadNum()); + MIRStructType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetTypeAttrs(ImportTypeAttrs()); + MIRStructType &origType = static_cast(InsertInTypeTables(type)); + typTab.push_back(origType.GetTypeIndex()); + if (kind != kTypeStructIncomplete) { + if (forPointedType) { + typeNeedsComplete = &origType; + } else { + ImportStructTypeData(origType); + } + } + return origType.GetTypeIndex(); + } + case kBinKindTypeClass: { + auto kind = static_cast(ReadNum()); + MIRClassType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + auto &origType = static_cast(InsertInTypeTables(type)); + typTab.push_back(origType.GetTypeIndex()); + if (kind != kTypeClassIncomplete) { + if (forPointedType) { + typeNeedsComplete = &origType; + } else { + ImportStructTypeData(origType); + ImportClassTypeData(origType); + } + } + return origType.GetTypeIndex(); + } + case kBinKindTypeInterface: { + auto kind = static_cast(ReadNum()); + MIRInterfaceType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + auto &origType = static_cast(InsertInTypeTables(type)); + typTab.push_back(origType.GetTypeIndex()); + if (kind != kTypeInterfaceIncomplete) { + if (forPointedType) { + typeNeedsComplete = &origType; + } else { + ImportStructTypeData(origType); + ImportInterfaceTypeData(origType); + } + } + return origType.GetTypeIndex(); + } + default: + CHECK_FATAL(false, "Unexpected binary kind"); + } +} + +TyIdx BinaryMplImport::ImportTypeNonJava() { + int64 tag = ReadNum(); + if (tag == 0) { + return TyIdx(0); + } + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < typTab.size(), "index out of bounds"); + return typTab[static_cast(-tag)]; + } + PrimType primType = static_cast(0); + GStrIdx strIdx(0); + bool nameIsLocal = false; + ImportTypeBase(primType, strIdx, nameIsLocal); + TyIdx tyIdxUsed(GlobalTables::GetTypeTable().GetTypeTableSize()); + if (tag != kBinKindTypeScalar) { + GlobalTables::GetTypeTable().PushNull(); + typTab.push_back(tyIdxUsed); + } + + switch (tag) { + case kBinKindTypeScalar: + return TyIdx(primType); + case kBinKindTypePointer: { + MIRPtrType type(primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetTypeAttrs(ImportTypeAttrs()); + type.SetPointedTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeFArray: { + MIRFarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetElemtTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeJarray: { + MIRJarrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetElemtTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeArray: { + MIRArrayType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetDim(ReadNum()); + CHECK_FATAL(type.GetDim() < kMaxArrayDim, "array index out of range"); + for (uint16 i = 0; i < type.GetDim(); ++i) { + type.SetSizeArrayItem(i, ReadNum()); + } + type.SetElemTyIdx(ImportTypeNonJava()); + type.SetTypeAttrs(ImportTypeAttrs()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeFunction: { + MIRFuncType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetRetTyIdx(ImportTypeNonJava()); + type.funcAttrs.SetAttrFlag(static_cast(ReadNum())); + int64 size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamTypeList().push_back(ImportTypeNonJava()); + } + size = ReadNum(); + for (int64 i = 0; i < size; ++i) { + type.GetParamAttrsList().push_back(ImportTypeAttrs()); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeParam: { + MIRTypeParam type(strIdx); + type.SetNameIsLocal(nameIsLocal); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeInstantVector: { + auto kind = static_cast(ReadNum()); + MIRInstantVectorType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + ImportTypePairs(type.GetInstantVec()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeGenericInstant: { + MIRGenericInstantType type(strIdx); + type.SetNameIsLocal(nameIsLocal); + ImportTypePairs(type.GetInstantVec()); + type.SetGenericTyIdx(ImportTypeNonJava()); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeBitField: { + uint8 fieldSize = ReadNum(); + MIRBitFieldType type(fieldSize, primType, strIdx); + type.SetNameIsLocal(nameIsLocal); + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, false); + return tyIdxUsed; + } + case kBinKindTypeStruct: { + auto kind = static_cast(ReadNum()); + MIRStructType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + type.SetTypeAttrs(ImportTypeAttrs()); + if (kind != kTypeStructIncomplete) { + ImportStructTypeData(type); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, false, IsIncomplete(type)); + return tyIdxUsed; + } + case kBinKindTypeClass: { + auto kind = static_cast(ReadNum()); + MIRClassType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + if (kind != kTypeClassIncomplete) { + ImportStructTypeData(type); + ImportClassTypeData(type); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, true, IsIncomplete(type)); + return tyIdxUsed; + } + case kBinKindTypeInterface: { + auto kind = static_cast(ReadNum()); + MIRInterfaceType type(kind, strIdx); + type.SetNameIsLocal(nameIsLocal); + if (kind != kTypeInterfaceIncomplete) { + ImportStructTypeData(type); + ImportInterfaceTypeData(type); + } + GlobalTables::GetTypeTable().CreateMirTypeNodeAt(type, tyIdxUsed, &mod, true, IsIncomplete(type)); + return tyIdxUsed; + } + default: + CHECK_FATAL(false, "Unexpected binary kind"); + } +} + +void BinaryMplImport::ImportTypeBase(PrimType &primType, GStrIdx &strIdx, bool &nameIsLocal) { + primType = static_cast(ReadNum()); + strIdx = ImportStr(); + nameIsLocal = ReadNum(); +} + +inline static bool IsObject(const MIRType &type) { + return (type.GetKind() == kTypeClass || type.GetKind() == kTypeClassIncomplete || + type.GetKind() == kTypeInterface || type.GetKind() == kTypeInterfaceIncomplete); +} + +MIRType &BinaryMplImport::InsertInTypeTables(MIRType &type) { + MIRType *resultTypePtr = &type; + TyIdx prevTyIdx = mod.GetTypeNameTab()->GetTyIdxFromGStrIdx(type.GetNameStrIdx()); + if (prevTyIdx != 0u && !type.IsNameIsLocal()) { + MIRType *prevType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(prevTyIdx); + if (!prevType->IsMIRTypeByName() && + ((IsIncomplete(*prevType) && IsIncomplete(type)) || + (!IsIncomplete(*prevType) && !IsIncomplete(type)) || + (!IsIncomplete(*prevType) && IsIncomplete(type)))) { + resultTypePtr = prevType->CopyMIRTypeNode(); + if (resultTypePtr->GetKind() == kTypeStruct || resultTypePtr->GetKind() == kTypeUnion || + resultTypePtr->GetKind() == kTypeStructIncomplete) { + tmpStruct.push_back(static_cast(resultTypePtr)); + } else if (resultTypePtr->GetKind() == kTypeClass || resultTypePtr->GetKind() == kTypeClassIncomplete) { + tmpClass.push_back(static_cast(resultTypePtr)); + } else if (resultTypePtr->GetKind() == kTypeInterface || resultTypePtr->GetKind() == kTypeInterfaceIncomplete) { + tmpInterface.push_back(static_cast(resultTypePtr)); + } + } else { + // New definition wins + type.SetTypeIndex(prevTyIdx); + CHECK_FATAL(GlobalTables::GetTypeTable().GetTypeTable().empty() == false, "container check"); + GlobalTables::GetTypeTable().SetTypeWithTyIdx(prevTyIdx, *type.CopyMIRTypeNode()); + resultTypePtr = GlobalTables::GetTypeTable().GetTypeFromTyIdx(prevTyIdx); + if (!IsIncomplete(*resultTypePtr)) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(resultTypePtr->GetNameStrIdx(), + resultTypePtr->GetTypeIndex()); + } + } + } else { + // New type, no previous definition or anonymous type + TyIdx tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&type); + resultTypePtr = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (tyIdx + 1 == GlobalTables::GetTypeTable().GetTypeTable().size() && !resultTypePtr->IsNameIsLocal()) { + GStrIdx stridx = resultTypePtr->GetNameStrIdx(); + if (stridx != 0) { + mod.GetTypeNameTab()->SetGStrIdxToTyIdx(stridx, tyIdx); + mod.PushbackTypeDefOrder(stridx); + if (IsObject(*resultTypePtr)) { + mod.AddClass(tyIdx); + if (!IsIncomplete(*resultTypePtr)) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(stridx, tyIdx); + } + } + } + } + } + return *resultTypePtr; +} + +void BinaryMplImport::SetupEHRootType() { + // setup eh root type with most recent Ljava_2Flang_2FObject_3B + GStrIdx gStrIdx = GlobalTables::GetStrTable().GetStrIdxFromName(namemangler::kJavaLangObjectStr); + if (gStrIdx == 0u) { + return; + } + + TyIdx tyIdx = GlobalTables::GetTypeNameTable().GetTyIdxFromGStrIdx(gStrIdx); + if (tyIdx != 0u) { + mod.SetThrowableTyIdx(tyIdx); + } +} + +MIRSymbol *BinaryMplImport::GetOrCreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mclass, + MIRStorageClass sclass, MIRFunction *func, uint8 scpID) { + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (st != nullptr && st->GetStorageClass() == sclass && st->GetSKind() == mclass && scpID == kScopeGlobal) { + return st; + } + return mirBuilder.CreateSymbol(tyIdx, strIdx, mclass, sclass, func, scpID); +} + +MIRSymbol *BinaryMplImport::InSymbol(MIRFunction *func) { + int64 tag = ReadNum(); + if (tag == 0) { + return nullptr; + } else if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < symTab.size(), "index out of bounds"); + return symTab.at(-tag); + } else { + CHECK_FATAL(tag == kBinSymbol, "expecting kBinSymbol"); + int64 scope = ReadNum(); + GStrIdx stridx = ImportStr(); + UStrIdx secAttr = ImportUsrStr(); + UStrIdx asmAttr = ImportUsrStr(); + auto skind = static_cast(ReadNum()); + auto sclass = static_cast(ReadNum()); + TyIdx tyTmp(0); + MIRSymbol *sym = GetOrCreateSymbol(tyTmp, stridx, skind, sclass, func, scope); + if (secAttr != 0) { + sym->sectionAttr = secAttr; + } + if (asmAttr != 0) { + sym->SetAsmAttr(asmAttr); + } + symTab.push_back(sym); + sym->SetAttrs(ImportTypeAttrs()); + sym->SetIsTmp(ReadNum() != 0); + sym->SetIsImported(imported); + uint32 thepregno = 0; + if (skind == kStPreg) { + CHECK_FATAL(scope == kScopeLocal && func != nullptr, "Expecting kScopeLocal"); + thepregno = static_cast(ReadNum()); + } else if (skind == kStConst || skind == kStVar) { + sym->SetKonst(ImportConst(func)); + } else if (skind == kStFunc) { + PUIdx puidx = ImportFunction(); + mod.SetCurFunction(func); + if (puidx != 0) { + sym->SetFunction(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puidx)); + } + } + if (skind == kStVar || skind == kStFunc) { + ImportSrcPos(sym->GetSrcPosition()); + } + TyIdx tyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + sym->SetTyIdx(tyIdx); + if (skind == kStPreg) { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + PregIdx pregidx = func->GetPregTab()->EnterPregNo(thepregno, mirType->GetPrimType(), mirType); + MIRPregTable *pregTab = func->GetPregTab(); + MIRPreg *preg = pregTab->PregFromPregIdx(pregidx); + preg->SetPrimType(mirType->GetPrimType()); + sym->SetPreg(preg); + } + return sym; + } +} + +PUIdx BinaryMplImport::ImportFunction() { + int64 tag = ReadNum(); + if (tag == 0) { + mod.SetCurFunction(nullptr); + return 0; + } else if (tag < 0) { + CHECK_FATAL(static_cast(-tag) <= funcTab.size(), "index out of bounds"); + if (static_cast(-tag) == funcTab.size()) { // function was exported before its symbol + return static_cast(0); + } + PUIdx puIdx = funcTab[static_cast(-tag)]->GetPuidx(); + mod.SetCurFunction(GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx)); + return puIdx; + } + CHECK_FATAL(tag == kBinFunction, "expecting kBinFunction"); + MIRSymbol *funcSt = InSymbol(nullptr); + CHECK_FATAL(funcSt != nullptr, "null ptr check"); + MIRFunction *func = nullptr; + if (funcSt->GetFunction() == nullptr) { + maple::MIRBuilder builder(&mod); + func = builder.CreateFunction(funcSt->GetStIdx()); + funcTab.push_back(func); + } else { + func = funcSt->GetFunction(); + funcTab.push_back(func); + } + funcSt->SetFunction(func); + methodSymbols.push_back(funcSt); + if (mod.IsJavaModule()) { + func->SetBaseClassFuncNames(funcSt->GetNameStrIdx()); + } + TyIdx funcTyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + func->SetMIRFuncType(static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTyIdx))); + + func->SetStIdx(funcSt->GetStIdx()); + if (!inCG) { + func->SetFuncAttrs(ReadNum()); // merge side effect + } else { + if (!func->IsDirty()) { + func->SetDirty(true); + func->SetFuncAttrs(ReadNum()); // merge side effect + } else { + FuncAttrs tmp; + tmp.SetAttrFlag(ReadNum()); + if (func->IsNoDefArgEffect() != tmp.GetAttr(FUNCATTR_nodefargeffect)) { + tmp.SetAttr(FUNCATTR_nodefargeffect, true); + } + if (func->IsNoDefEffect() != tmp.GetAttr(FUNCATTR_nodefeffect)) { + tmp.SetAttr(FUNCATTR_nodefeffect, true); + } + if (func->IsNoRetGlobal() != tmp.GetAttr(FUNCATTR_noretglobal)) { + tmp.SetAttr(FUNCATTR_noretglobal, true); + } + if (func->IsNoThrowException() != tmp.GetAttr(FUNCATTR_nothrow_exception)) { + tmp.SetAttr(FUNCATTR_nothrow_exception, true); + } + if (func->IsIpaSeen() != tmp.GetAttr(FUNCATTR_ipaseen)) { + tmp.SetAttr(FUNCATTR_ipaseen); + } + if (func->IsPure() != tmp.GetAttr(FUNCATTR_pure)) { + tmp.SetAttr(FUNCATTR_pure, true); + } + if (func->IsNoRetArg() != tmp.GetAttr(FUNCATTR_noretarg)) { + tmp.SetAttr(FUNCATTR_noretarg, true); + } + if (func->IsNoPrivateDefEffect() != tmp.GetAttr(FUNCATTR_noprivate_defeffect)) { + tmp.SetAttr(FUNCATTR_noprivate_defeffect, true); + } + func->SetFuncAttrs(tmp); + } + } + + auto &attributes = func->GetFuncAttrs(); + if (attributes.GetAttr(FUNCATTR_constructor_priority)) { + attributes.SetConstructorPriority(static_cast(ReadNum())); + } + if (attributes.GetAttr(FUNCATTR_destructor_priority)) { + attributes.SetDestructorPriority(static_cast(ReadNum())); + } + + func->SetFlag(ReadNum()); + if (mod.IsJavaModule()) { + (void)ImportType(); // not set the field to mimic parser + } else { + (void)ImportTypeNonJava(); // not set the field to mimic parser + } + size_t size = static_cast(ReadNum()); + if (func->GetFormalDefVec().size() == 0) { + for (size_t i = 0; i < size; i++) { + GStrIdx strIdx = ImportStr(); + TyIdx tyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + FormalDef formalDef(strIdx, nullptr, tyIdx, TypeAttrs()); + formalDef.formalAttrs.SetAttrFlag(static_cast(ReadNum())); + func->GetFormalDefVec().push_back(formalDef); + } + } else { + CHECK_FATAL(func->GetFormalDefVec().size() >= size, "ImportFunction: inconsistent number of formals"); + for (size_t i = 0; i < size; i++) { + func->GetFormalDefVec()[i].formalStrIdx = ImportStr(); + func->GetFormalDefVec()[i].formalTyIdx = mod.IsJavaModule() ? ImportType() : ImportTypeNonJava(); + func->GetFormalDefVec()[i].formalAttrs.SetAttrFlag(static_cast(ReadNum())); + } + } + + mod.SetCurFunction(func); + return func->GetPuidx(); +} + +inline void BinaryMplImport::SkipTotalSize() { + ReadInt(); +} + +void BinaryMplImport::ReadStrField() { + SkipTotalSize(); + + int32 size = ReadInt(); + for (int64 i = 0; i < size; ++i) { + GStrIdx stridx = ImportStr(); + GlobalTables::GetConstPool().PutLiteralNameAsImported(stridx); + } + int64 tag = 0; + tag = ReadNum(); + CHECK_FATAL(tag == ~kBinStrStart, "pattern mismatch in Read STR"); +} + +void BinaryMplImport::ReadHeaderField() { + SkipTotalSize(); + mod.SetFlavor(static_cast(ReadNum())); + mod.SetSrcLang(static_cast(ReadNum())); + mod.SetID(static_cast(ReadNum())); + if (mod.GetFlavor() == kFlavorLmbc) { + mod.SetGlobalMemSize(static_cast(ReadNum())); + mod.SetWithDbgInfo(ReadNum() != 0); + } + mod.SetNumFuncs(static_cast(ReadNum())); + std::string inStr; + ReadAsciiStr(inStr); + mod.SetEntryFuncName(inStr); + ImportInfoVector(mod.GetFileInfo(), mod.GetFileInfoIsString()); + + int32 size = static_cast(ReadNum()); + MIRInfoPair infopair; + for (int32 i = 0; i < size; i++) { + infopair.first = ImportStr(); + infopair.second = static_cast(ReadNum()); + mod.PushbackFileInfo(infopair); + } + + size = static_cast(ReadNum()); + for (int32 i = 0; i < size; i++) { + GStrIdx gStrIdx = ImportStr(); + mod.GetImportFiles().push_back(gStrIdx); + std::string importfilename = GlobalTables::GetStrTable().GetStringFromStrIdx(gStrIdx); + // record the imported file for later reading summary info, if exists + mod.PushbackImportedMplt(importfilename); + BinaryMplt *binMplt = new BinaryMplt(mod); + binMplt->GetBinImport().imported = true; + + INFO(kLncInfo, "importing %s", importfilename.c_str()); + if (!binMplt->GetBinImport().Import(importfilename, false)) { // not a binary mplt + FATAL(kLncFatal, "cannot open binary MPLT file: %s\n", importfilename.c_str()); + } else { + INFO(kLncInfo, "finished import of %s", importfilename.c_str()); + } + if (i == 0) { + binMplt->SetImportFileName(importfilename); + mod.SetBinMplt(binMplt); + } else { + delete binMplt; + } + } + + size = static_cast(ReadNum()); + for (int32 i = 0; i < size; i++) { + std::string str; + ReadAsciiStr(str); + mod.GetAsmDecls().emplace_back(MapleString(str, mod.GetMemPool())); + } + + int32 tag = static_cast(ReadNum()); + CHECK_FATAL(tag == ~kBinHeaderStart, "pattern mismatch in Read Import"); + return; +} + +void BinaryMplImport::ReadTypeField() { + SkipTotalSize(); + + int32 size = ReadInt(); + if (mod.IsJavaModule()) { + for (int64 i = 0; i < size; ++i) { + ImportType(); + } + } else { + for (int64 i = 0; i < size; ++i) { + (void)ImportTypeNonJava(); + } + } + int64 tag = 0; + tag = ReadNum(); + CHECK_FATAL(tag == ~kBinTypeStart, "pattern mismatch in Read TYPE"); +} + +void BinaryMplImport::ImportEnumeration() { + int64 tag = ReadNum(); + CHECK_FATAL(tag == kBinEnumeration, "expecting kBinEnumeration"); + PrimType ptyp = static_cast(Read()); + GStrIdx gStrIdx = ImportStr(); + MIREnum *mirEnum = new MIREnum(ptyp, gStrIdx); + size_t siz = static_cast(ReadNum()); + for (size_t i = 0; i < siz; i++) { + gStrIdx = ImportStr(); + IntVal intVal(ReadNum(), ptyp); + mirEnum->NewElement(gStrIdx, intVal); + } + GlobalTables::GetEnumTable().enumTable.push_back(mirEnum); +} + +void BinaryMplImport::ReadEnumField() { + SkipTotalSize(); + + int32 size = ReadInt(); + for (int64 i = 0; i < size; ++i) { + ImportEnumeration(); + } + int64 tag = ReadNum(); + CHECK_FATAL(tag == ~kBinEnumStart, "pattern mismatch in Reading ENUM"); +} + +CallInfo *BinaryMplImport::ImportCallInfo() { + int64 tag = ReadNum(); + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < callInfoTab.size(), "index out of bounds"); + return callInfoTab.at(-tag); + } + CHECK_FATAL(tag == kBinCallinfo, "expecting kBinCallinfo"); + CallType ctype = static_cast(ReadNum()); // call type + uint32 loopDepth = static_cast(ReadInt()); + uint32 id = static_cast(ReadInt()); + bool argLocal = Read() == 1; + MIRSymbol *funcSym = InSymbol(nullptr); + CHECK_FATAL(funcSym != nullptr, "func_sym is null in BinaryMplImport::InCallInfo"); + CallInfo *ret = mod.GetMemPool()->New(ctype, funcSym->GetFunction(), + static_cast(nullptr), loopDepth, id, argLocal); + callInfoTab.push_back(ret); + return ret; +} + +void BinaryMplImport::MergeDuplicated(PUIdx methodPuidx, std::vector &targetSet, + std::vector &newSet) { + if (targetSet.empty()) { + (void)targetSet.insert(targetSet.begin(), newSet.begin(), newSet.end()); + std::unordered_set tmp; + mod.AddValueToMethod2TargetHash(methodPuidx, tmp); + for (size_t i = 0; i < newSet.size(); ++i) { + mod.InsertTargetHash(methodPuidx, newSet[i]->GetID()); + } + } else { + for (size_t i = 0; i < newSet.size(); ++i) { + CallInfo *newItem = newSet[i]; + if (!mod.HasTargetHash(methodPuidx, newItem->GetID())) { + targetSet.push_back(newItem); + mod.InsertTargetHash(methodPuidx, newItem->GetID()); + } + } + } +} + +void BinaryMplImport::ReadCgField() { + SkipTotalSize(); + + int32 size = ReadInt(); + int64 tag = 0; + + for (int i = 0; i < size; ++i) { + tag = ReadNum(); + CHECK_FATAL(tag == kStartMethod, " should be start point of method"); + MIRSymbol *tmpInSymbol = InSymbol(nullptr); + CHECK_FATAL(tmpInSymbol != nullptr, "null ptr check"); + PUIdx methodPuidx = tmpInSymbol->GetFunction()->GetPuidx(); + CHECK_FATAL(methodPuidx, "should not be 0"); + if (mod.GetMethod2TargetMap().find(methodPuidx) == mod.GetMethod2TargetMap().end()) { + std::vector targetSetTmp; + mod.AddMemToMethod2TargetMap(methodPuidx, targetSetTmp); + } + int32 targSize = ReadInt(); + std::vector targetSet; + callInfoTab.clear(); + callInfoTab.push_back(nullptr); + for (int32 j = 0; j < targSize; ++j) { + CallInfo *callInfo = ImportCallInfo(); + targetSet.push_back(callInfo); + } + MergeDuplicated(methodPuidx, mod.GetMemFromMethod2TargetMap(methodPuidx), targetSet); + tag = ReadNum(); + CHECK_FATAL(tag == ~kStartMethod, " should be start point of method"); + } + tag = ReadNum(); + CHECK_FATAL(tag == ~kBinCgStart, "pattern mismatch in Read CG"); +} + +void BinaryMplImport::ReadEaField() { + ReadInt(); + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + GStrIdx funcName = ImportStr(); + int nodesSize = ReadInt(); + EAConnectionGraph *newEaCg = mod.GetMemPool()->New(&mod, &mod.GetMPAllocator(), funcName, true); + newEaCg->ResizeNodes(nodesSize, nullptr); + InEaCgNode(*newEaCg); + int eaSize = ReadInt(); + for (int j = 0; j < eaSize; ++j) { + EACGBaseNode *node = &InEaCgNode(*newEaCg); + newEaCg->funcArgNodes.push_back(node); + } + mod.SetEAConnectionGraph(funcName, newEaCg); + } + CHECK_FATAL(ReadNum() == ~kBinEaStart, "pattern mismatch in Read EA"); +} + +void BinaryMplImport::ReadSeField() { + SkipTotalSize(); + + int32 size = ReadInt(); +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "SE SIZE : " << size << '\n'; +#endif + for (int32 i = 0; i < size; ++i) { + GStrIdx funcName = ImportStr(); + uint8 specialEffect = Read(); + TyIdx tyIdx = kInitTyIdx; + if ((specialEffect & kPureFunc) == kPureFunc) { + tyIdx = ImportType(); + } + const std::string &funcStr = GlobalTables::GetStrTable().GetStringFromStrIdx(funcName); + if (funcStr == "Ljava_2Flang_2FObject_3B_7Cwait_7C_28_29V") { + specialEffect = 0; + } + auto *funcSymbol = + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(funcStr)); + MIRFunction *func = funcSymbol != nullptr ? mirBuilder.GetFunctionFromSymbol(*funcSymbol) : nullptr; + if (func != nullptr) { + func->SetAttrsFromSe(specialEffect); + } else if ((specialEffect & kPureFunc) == kPureFunc) { + func = mirBuilder.GetOrCreateFunction(funcStr, tyIdx); + func->SetAttrsFromSe(specialEffect); + } + } + int64 tag = ReadNum(); + CHECK_FATAL(tag == ~kBinSeStart, "pattern mismatch in Read TYPE"); +} + +void BinaryMplImport::InEaCgBaseNode(EACGBaseNode &base, EAConnectionGraph &newEaCg, bool firstPart) { + if (firstPart) { + base.SetEAStatus(static_cast(ReadNum())); + base.SetID(ReadInt()); + } else { + // start to in points to + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + CHECK_FATAL(point2Node->IsObjectNode(), "must be"); + (void)base.pointsTo.insert(static_cast(point2Node)); + } + // start to in in + size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + base.InsertInSet(point2Node); + } + // start to in out + size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + base.InsertOutSet(point2Node); + } + } +} + +void BinaryMplImport::InEaCgActNode(EACGActualNode &actual) { + actual.isPhantom = Read() == 1; + actual.isReturn = Read() == 1; + actual.argIdx = Read(); + actual.callSiteInfo = static_cast(ReadInt()); +} + +void BinaryMplImport::InEaCgFieldNode(EACGFieldNode &field, EAConnectionGraph &newEaCg) { + field.SetFieldID(ReadInt()); + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode* node = &InEaCgNode(newEaCg); + CHECK_FATAL(node->IsObjectNode(), "must be"); + (void)field.belongsTo.insert(static_cast(node)); + } + field.isPhantom = Read() == 1; +} + +void BinaryMplImport::InEaCgObjNode(EACGObjectNode &obj, EAConnectionGraph &newEaCg) { + Read(); + obj.isPhantom = true; + int size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *node = &InEaCgNode(newEaCg); + CHECK_FATAL(node->IsFieldNode(), "must be"); + auto *field = static_cast(node); + obj.fieldNodes[static_cast(field)->GetFieldID()] = field; + } + // start to in point by + size = ReadInt(); + for (int i = 0; i < size; ++i) { + EACGBaseNode *point2Node = &InEaCgNode(newEaCg); + (void)obj.pointsBy.insert(point2Node); + } +} + +void BinaryMplImport::InEaCgRefNode(EACGRefNode &ref) { + ref.isStaticField = Read() == 1 ? true : false; +} + +EACGBaseNode &BinaryMplImport::InEaCgNode(EAConnectionGraph &newEaCg) { + int64 tag = ReadNum(); + if (tag < 0) { + CHECK_FATAL(static_cast(-tag) < eaCgTab.size(), "index out of bounds"); + return *eaCgTab[-tag]; + } + CHECK_FATAL(tag == kBinEaCgNode, "must be"); + NodeKind kind = static_cast(ReadNum()); + EACGBaseNode *node = nullptr; + switch (kind) { + case kObejectNode: + node = new EACGObjectNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + case kReferenceNode: + node = new EACGRefNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + case kFieldNode: + node = new EACGFieldNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + case kActualNode: + node = new EACGActualNode(&mod, &mod.GetMPAllocator(), &newEaCg); + break; + default: + CHECK_FATAL(false, "impossible"); + } + node->SetEACG(&newEaCg); + eaCgTab.push_back(node); + InEaCgBaseNode(*node, newEaCg, true); + newEaCg.SetNodeAt(node->id - 1, node); + if (node->IsActualNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgActNode, "must be"); + InEaCgActNode(static_cast(*node)); + } else if (node->IsFieldNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgFieldNode, "must be"); + InEaCgFieldNode(static_cast(*node), newEaCg); + } else if (node->IsObjectNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgObjNode, "must be"); + InEaCgObjNode(static_cast(*node), newEaCg); + } else if (node->IsReferenceNode()) { + CHECK_FATAL(ReadNum() == kBinEaCgRefNode, "must be"); + InEaCgRefNode(static_cast(*node)); + } + InEaCgBaseNode(*node, newEaCg, false); + CHECK_FATAL(ReadNum() == ~kBinEaCgNode, "must be"); + return *node; +} + +EAConnectionGraph* BinaryMplImport::ReadEaCgField() { + if (ReadNum() == ~kBinEaCgStart) { + return nullptr; + } + ReadInt(); + GStrIdx funcStr = ImportStr(); + int nodesSize = ReadInt(); + EAConnectionGraph *newEaCg = mod.GetMemPool()->New(&mod, &mod.GetMPAllocator(), funcStr, true); + newEaCg->ResizeNodes(nodesSize, nullptr); + InEaCgNode(*newEaCg); + CHECK_FATAL(newEaCg->GetNode(0)->IsObjectNode(), "must be"); + CHECK_FATAL(newEaCg->GetNode(1)->IsReferenceNode(), "must be"); + CHECK_FATAL(newEaCg->GetNode(2)->IsFieldNode(), "must be"); + newEaCg->globalField = static_cast(newEaCg->GetNode(2)); + newEaCg->globalObj = static_cast(newEaCg->GetNode(0)); + newEaCg->globalRef = static_cast(newEaCg->GetNode(1)); + CHECK_FATAL(newEaCg->globalField && newEaCg->globalObj && newEaCg->globalRef, "must be"); + int32 nodeSize = ReadInt(); + for (int j = 0; j < nodeSize; ++j) { + EACGBaseNode *node = &InEaCgNode(*newEaCg); + newEaCg->funcArgNodes.push_back(node); + } + + int32 callSitesize = ReadInt(); + for (int i = 0; i < callSitesize; ++i) { + uint32 id = static_cast(ReadInt()); + newEaCg->callSite2Nodes[id] = mod.GetMemPool()->New>(mod.GetMPAllocator().Adapter()); + int32 calleeArgSize = ReadInt(); + for (int j = 0; j < calleeArgSize; ++j) { + EACGBaseNode *node = &InEaCgNode(*newEaCg); + newEaCg->callSite2Nodes[id]->push_back(node); + } + } + +#ifdef DEBUG + for (EACGBaseNode *node : newEaCg->GetNodes()) { + if (node == nullptr) { + continue; + } + node->CheckAllConnectionInNodes(); + } +#endif + CHECK_FATAL(ReadNum() == ~kBinEaCgStart, "pattern mismatch in Read EACG"); + return newEaCg; +} + +void BinaryMplImport::ReadSymField() { + SkipTotalSize(); + int32 size = ReadInt(); + for (int64 i = 0; i < size; i++) { + (void)InSymbol(nullptr); + } + int64 tag = ReadNum(); + CHECK_FATAL(tag == ~kBinSymStart, "pattern mismatch in Read SYM"); + return; +} + +void BinaryMplImport::ReadSymTabField() { + SkipTotalSize(); + int32 size = ReadInt(); + for (int64 i = 0; i < size; i++) { + std::string str; + ReadAsciiStr(str); + } + int64 tag = ReadNum(); + CHECK_FATAL(tag == ~kBinSymTabStart, "pattern mismatch in Read TYPE"); + return; +} + +void BinaryMplImport::ReadContentField() { + SkipTotalSize(); + + int32 size = ReadInt(); + int64 item; + int32 offset; + for (int32 i = 0; i < size; ++i) { + item = ReadNum(); + offset = ReadInt(); + content[item] = offset; + } + CHECK_FATAL(ReadNum() == ~kBinContentStart, "pattern mismatch in Read CONTENT"); +} + +void BinaryMplImport::Jump2NextField() { + uint32 totalSize = static_cast(ReadInt()); + bufI += (totalSize - sizeof(uint32)); + ReadNum(); // skip end tag for this field +} + +bool BinaryMplImport::ImportForSrcLang(const std::string &fname, MIRSrcLang &srcLang) { + Reset(); + ReadFileAt(fname, 0); + int32 magic = ReadInt(); + if (kMpltMagicNumber != magic && (kMpltMagicNumber + 0x10) != magic) { + buf.clear(); + return false; + } + importingFromMplt = kMpltMagicNumber == magic; + int64 fieldID = ReadNum(); + while (fieldID != kBinFinish) { + switch (fieldID) { + case kBinHeaderStart: { + SkipTotalSize(); + (void)ReadNum(); // skip flavor + srcLang = static_cast(ReadNum()); + return true; + } + default: { + Jump2NextField(); + break; + } + } + fieldID = ReadNum(); + } + return false; +} + +bool BinaryMplImport::Import(const std::string &fname, bool readSymbols, bool readSe) { + Reset(); + ReadFileAt(fname, 0); + int32 magic = ReadInt(); + if (kMpltMagicNumber != magic && (kMpltMagicNumber + 0x10) != magic) { + buf.clear(); + return false; + } + importingFromMplt = kMpltMagicNumber == magic; + int64 fieldID = ReadNum(); + if (readSe) { + while (fieldID != kBinFinish) { + if (fieldID == kBinSeStart) { +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "read SE of : " << fname << '\n'; +#endif + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadSeField(); + Jump2NextField(); + } else if (fieldID == kBinEaStart) { + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadEaField(); + Jump2NextField(); + } else { + Jump2NextField(); + } + fieldID = ReadNum(); + } + return true; + } + while (fieldID != kBinFinish) { + switch (fieldID) { + case kBinContentStart: { + ReadContentField(); + break; + } + case kBinStrStart: { + ReadStrField(); + break; + } + case kBinHeaderStart: { + ReadHeaderField(); + break; + } + case kBinTypeStart: { + ReadTypeField(); + break; + } + case kBinSymStart: { + if (readSymbols) { + ReadSymField(); + } else { + Jump2NextField(); + } + break; + } + case kBinSymTabStart: { + ReadSymTabField(); + break; + } + case kBinCgStart: { + if (readSymbols) { +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "read CG of : " << fname << '\n'; +#endif + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.inIPA = true; + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadCgField(); + tmp.UpdateMethodSymbols(); + Jump2NextField(); + } else { + Jump2NextField(); + } + break; + } + case kBinSeStart: { + Jump2NextField(); + break; + } + case kBinEaStart: { + if (readSymbols) { +#ifdef MPLT_DEBUG + LogInfo::MapleLogger() << "read EA of : " << fname << '\n'; +#endif + BinaryMplImport tmp(mod); + tmp.Reset(); + tmp.buf = buf; + tmp.bufI = bufI; + tmp.importFileName = fname; + tmp.ReadEaField(); + Jump2NextField(); + } else { + Jump2NextField(); + } + break; + } + case kBinFunctionBodyStart: { + ReadFunctionBodyField(); + break; + } + case kBinEnumStart: { + ReadEnumField(); + break; + } + default: + CHECK_FATAL(false, "should not run here"); + } + fieldID = ReadNum(); + } + UpdateMethodSymbols(); + SetupEHRootType(); + return true; +} +} // namespace maple diff --git a/src/mapleall/maple_ir/src/debug_info.cpp b/src/mapleall/maple_ir/src/debug_info.cpp new file mode 100644 index 0000000000000000000000000000000000000000..91857a69482951655fc1344e2217ec8c43d3c4a3 --- /dev/null +++ b/src/mapleall/maple_ir/src/debug_info.cpp @@ -0,0 +1,1972 @@ +/* + * Copyright (C) [2021-2022] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ +#include "debug_info.h" +#include +#include "mir_builder.h" +#include "printing.h" +#include "maple_string.h" +#include "global_tables.h" +#include "mir_type.h" +#include "securec.h" +#include "mpl_logging.h" +#include "version.h" +#include "triple.h" + +namespace maple { +extern const char *GetDwTagName(unsigned n); +extern const char *GetDwFormName(unsigned n); +extern const char *GetDwAtName(unsigned n); +extern const char *GetDwOpName(unsigned n); +extern const char *GetDwAteName(unsigned n); +extern const char *GetDwCfaName(unsigned n); +extern DwAte GetAteFromPTY(PrimType pty); + +constexpr uint32 kIndx2 = 2; +constexpr uint32 kStructDBGSize = 8888; + +// DBGDie methods +DBGDie::DBGDie(MIRModule *m, DwTag tag) + : module(m), + tag(tag), + id(m->GetDbgInfo()->GetMaxId()), + withChildren(false), + keep(true), + sibling(nullptr), + firstChild(nullptr), + abbrevId(0), + tyIdx(0), + offset(0), + size(0), + attrVec(m->GetMPAllocator().Adapter()), + subDieVec(m->GetMPAllocator().Adapter()) { + if (module->GetDbgInfo()->GetParentDieSize() != 0) { + parent = module->GetDbgInfo()->GetParentDie(); + } else { + parent = nullptr; + } + m->GetDbgInfo()->SetIdDieMap(m->GetDbgInfo()->GetIncMaxId(), this); + attrVec.clear(); + subDieVec.clear(); +} + +void DBGDie::ResetParentDie() const { + module->GetDbgInfo()->ResetParentDie(); +} + +void DBGDie::AddAttr(DwAt at, DwForm form, uint64 val, bool keepFlag) { + // collect strps which need label + if (form == DW_FORM_strp) { + module->GetDbgInfo()->AddStrps(static_cast(val)); + } + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, val); + attr->SetKeep(keepFlag); + AddAttr(attr); +} + +void DBGDie::AddSimpLocAttr(DwAt at, DwForm form, DwOp op, uint64 val) { + DBGExprLoc *p = module->GetMemPool()->New(module, op); + if (val != kDbgDefaultVal) { + p->AddSimpLocOpnd(val); + } + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, reinterpret_cast(p)); + AddAttr(attr); +} + +void DBGDie::AddGlobalLocAttr(DwAt at, DwForm form, uint64 val) { + DBGExprLoc *p = module->GetMemPool()->New(module, DW_OP_addr); + p->SetGvarStridx(static_cast(val)); + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, reinterpret_cast(p)); + AddAttr(attr); +} + +void DBGDie::AddFrmBaseAttr(DwAt at, DwForm form) { + DBGExprLoc *p = module->GetMemPool()->New(module, DW_OP_call_frame_cfa); + DBGDieAttr *attr = module->GetDbgInfo()->CreateAttr(at, form, reinterpret_cast(p)); + AddAttr(attr); +} + +DBGExprLoc *DBGDie::GetExprLoc() { + for (auto it : attrVec) { + if (it->GetDwAt() == DW_AT_location) { + return it->GetPtr(); + } + } + return nullptr; +} + +bool DBGDie::SetAttr(DwAt attr, uint64 val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetU(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, int32 val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetI(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, uint32 val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetId(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, int64 val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetJ(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, float val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetF(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, double val) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetD(val); + return true; + } + } + return false; +} + +bool DBGDie::SetAttr(DwAt attr, DBGExprLoc *ptr) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr) { + it->SetPtr(ptr); + return true; + } + } + return false; +} + +void DBGDie::AddAttr(DBGDieAttr *attr) { + for (auto it : attrVec) { + if (it->GetDwAt() == attr->GetDwAt()) { + return; + } + } + attrVec.push_back(attr); +} + +void DBGDie::AddSubVec(DBGDie *die) { + if (!die) { + return; + } + for (auto it : subDieVec) { + if (it->GetId() == die->GetId()) { + return; + } + } + subDieVec.push_back(die); + die->parent = this; +} + +// DBGAbbrevEntry methods +DBGAbbrevEntry::DBGAbbrevEntry(MIRModule *m, DBGDie *die) : attrPairs(m->GetMPAllocator().Adapter()) { + tag = die->GetTag(); + abbrevId = 0; + withChildren = die->GetWithChildren(); + for (auto it : die->GetAttrVec()) { + if (!it->GetKeep()) { + continue; + } + attrPairs.push_back(it->GetDwAt()); + attrPairs.push_back(it->GetDwForm()); + } +} + +bool DBGAbbrevEntry::Equalto(DBGAbbrevEntry *entry) { + if (attrPairs.size() != entry->attrPairs.size()) { + return false; + } + if (withChildren != entry->GetWithChildren()) { + return false; + } + for (uint32 i = 0; i < attrPairs.size(); i++) { + if (attrPairs[i] != entry->attrPairs[i]) { + return false; + } + } + return true; +} + +// DebugInfo methods +void DebugInfo::Init() { + mplSrcIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(module->GetFileName()); + compUnit = module->GetMemPool()->New(module, DW_TAG_compile_unit); + module->SetWithDbgInfo(true); + ResetParentDie(); + if (module->GetSrcLang() == kSrcLangC) { + varPtrPrefix = ""; + } + InitBaseTypeMap(); +} + +GStrIdx DebugInfo::GetPrimTypeCName(PrimType pty) { + GStrIdx strIdx = GStrIdx(0); + switch (pty) { +#define TYPECNAME(p, n) \ + case PTY_##p: \ + strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(n); \ + break + + TYPECNAME(i8, "char"); + TYPECNAME(i16, "short"); + TYPECNAME(i32, "int"); + TYPECNAME(i64, "long long"); + TYPECNAME(i128, "int128"); + TYPECNAME(u8, "unsigned char"); + TYPECNAME(u16, "unsigned short"); + TYPECNAME(u32, "unsigned int"); + TYPECNAME(u64, "unsigned long long"); + TYPECNAME(u128, "uint128"); + TYPECNAME(u1, "bool"); + TYPECNAME(f32, "float"); + TYPECNAME(f64, "double"); + TYPECNAME(f128, "float128"); + TYPECNAME(c64, "complex"); + TYPECNAME(c128, "double complex"); + default: + break; + } + return strIdx; +} + +void DebugInfo::InsertBaseTypeMap(const std::string &inputName, const std::string &outpuName, PrimType type) { + baseTypeMap[GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(inputName)] = std::make_pair( + GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(outpuName), type); +} + +void DebugInfo::InitBaseTypeMap() { + InsertBaseTypeMap(kDbgLong, "long", Triple::GetTriple().GetEnvironment() == Triple::GNUILP32 ? PTY_i32 : PTY_i64); + InsertBaseTypeMap(kDbgULong, "unsigned long", + Triple::GetTriple().GetEnvironment() == Triple::GNUILP32 ? PTY_u32 : PTY_u64); + InsertBaseTypeMap(kDbgLongDouble, "long double", PTY_f64); +} + +void DebugInfo::SetupCU() { + compUnit->SetWithChildren(true); + /* Add the Producer (Compiler) Information */ + const char *producer = strdup((std::string("Maple Version ") + Version::GetVersionStr()).c_str()); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(producer); + delete producer; + producer = nullptr; + compUnit->AddAttr(DW_AT_producer, DW_FORM_strp, strIdx.GetIdx()); + + /* Source Languate */ + compUnit->AddAttr(DW_AT_language, DW_FORM_data4, DW_LANG_C99); + + /* Add the compiled source file information */ + compUnit->AddAttr(DW_AT_name, DW_FORM_strp, mplSrcIdx.GetIdx()); + compUnit->AddAttr(DW_AT_comp_dir, DW_FORM_strp, 0); + + compUnit->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + compUnit->AddAttr(DW_AT_high_pc, DW_FORM_data8, kDbgDefaultVal); + + compUnit->AddAttr(DW_AT_stmt_list, DW_FORM_sec_offset, kDbgDefaultVal); +} + +void DebugInfo::AddScopeDie(MIRScope *scope) { + if (!scope || scope->IsEmpty()) { + return; + } + + bool isLocal = scope->IsLocal(); + MIRFunction *func = GetCurFunction(); + // for non-function local scope, add a lexical block + bool createBlock = isLocal && (scope != func->GetScope()); + if (createBlock) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_lexical_block); + die->AddAttr(DW_AT_low_pc, DW_FORM_addr, scope->GetId()); + die->AddAttr(DW_AT_high_pc, DW_FORM_data8, scope->GetId()); + + // add die to parent + GetParentDie()->AddSubVec(die); + + PushParentDie(die); + } + + // process type alias + HandleTypeAlias(*scope); + + // process alias + AddAliasDies(*scope, isLocal); + + if (scope->GetSubScopes().size() > 0) { + // process subScopes + for (auto it : scope->GetSubScopes()) { + AddScopeDie(it); + } + } + + if (createBlock) { + PopParentDie(); + } +} + +DBGDie *DebugInfo::GetAliasVarTypeDie(const MIRAliasVars &aliasVar, TyIdx tyidx) { + DBGDie *typeDie = nullptr; + uint32 index = aliasVar.index; + switch (aliasVar.atk) { + case kATKType: + typeDie = GetOrCreateTypeDie(TyIdx(index)); + break; + case kATKString: + typeDie = GetOrCreateTypedefDie(GStrIdx(index), tyidx); + break; + case kATKEnum: + typeDie = GetOrCreateEnumTypeDie(index); + break; + default: + ASSERT(false, "unknown alias type kind"); + break; + } + + ASSERT(typeDie, "null alias type DIE"); + return GetOrCreateTypeDieWithAttr(aliasVar.attrs, typeDie); +} + +DBGDie *DebugInfo::GetOrCreateTypeByNameDie(const MIRType &type) { + ASSERT(type.GetKind() == kTypeByName, "must be a typename"); + DBGDie *die = GetOrCreateBaseTypeDie(&type); + if (die != nullptr) { + return die; + } + // look for the enum first + for (auto &it : GlobalTables::GetEnumTable().enumTable) { + if (it->GetNameIdx() == type.GetNameStrIdx()) { + die = GetOrCreateEnumTypeDie(it); + break; + } + } + if (!die) { + // look for the typedef + TyIdx undlyingTypeIdx = module->GetScope()->GetTypeAlias()->GetTyIdxFromMap(type.GetNameStrIdx()); + CHECK_FATAL(undlyingTypeIdx != TyIdx(0), "typedef not found in TypeAliasTable"); + die = GetOrCreateTypedefDie(type.GetNameStrIdx(), undlyingTypeIdx); + } + return die; +} + +void DebugInfo::HandleTypeAlias(MIRScope &scope) { + const MIRTypeAlias *typeAlias = scope.GetTypeAlias(); + if (typeAlias == nullptr) { + return; + } + + if (scope.IsLocal()) { + for (auto &i : typeAlias->GetTypeAliasMap()) { + uint32 tid = i.second.GetIdx(); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tid); + CHECK_NULL_FATAL(type); + DBGDie *die = nullptr; + if (type->GetKind() == kTypeByName) { + die = GetOrCreateTypeByNameDie(*type); + } else if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + die = idDieMap[id]; + } else { + ASSERT(false, "type alias type not in tyIdxDieIdMap"); + continue; + } + (void)die->SetAttr(DW_AT_name, i.first.GetIdx()); + AddStrps(i.first.GetIdx()); + } + } else { + for (auto it : typeAlias->GetTypeAliasMap()) { + globalTypeAliasMap[it.first.GetIdx()] = it.second.GetIdx(); + } + + for (auto it : globalTypeAliasMap) { + (void)GetOrCreateTypeDie(TyIdx(it.second)); + DBGDie *die = GetOrCreateTypedefDie(GStrIdx(it.first), TyIdx(it.second)); + compUnit->AddSubVec(die); + // associate typedef's type with die + for (auto i : module->GetTypeNameTab()->GetGStrIdxToTyIdxMap()) { + if (i.first.GetIdx() == it.first) { + tyIdxDieIdMap[i.second.GetIdx()] = die->GetId(); + break; + } + } + } + } +} + +void DebugInfo::AddAliasDies(MIRScope &scope, bool isLocal) { + MIRFunction *func = GetCurFunction(); + for (auto &i : scope.GetAliasVarMap()) { + // maple var and die + MIRSymbol *mplVar = nullptr; + DBGDie *mplDie = nullptr; + GStrIdx mplIdx = i.second.mplStrIdx; + if (i.second.isLocal) { + mplVar = func->GetSymTab()->GetSymbolFromStrIdx(mplIdx); + mplDie = GetLocalDie(mplIdx); + } else { + mplVar = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(mplIdx); + mplDie = GetGlobalDie(mplIdx); + } + // some global vars are introduced by system, and + // some local vars are discarded in O2, skip them + if (mplVar == nullptr || mplDie == nullptr) { + continue; + } + + // for local scope, create alias die using maple var except name and type + // for global scope, update type only if needed + bool updateOnly = (isLocal == i.second.isLocal && i.first == mplIdx); + DBGDie *vdie = updateOnly ? mplDie : CreateVarDie(mplVar, i.first); + + // get type from alias + DBGDie *typeDie = GetAliasVarTypeDie(i.second, mplVar->GetTyIdx()); + vdie->SetAttr(DW_AT_type, typeDie->GetId()); + + // for new var + if (!updateOnly) { + // link vdie's ExprLoc to mplDie's + vdie->LinkExprLoc(mplDie); + + GetParentDie()->AddSubVec(vdie); + + // add alias var name to debug_str section + strps.insert(i.first.GetIdx()); + } + } +} + +void DebugInfo::CollectScopePos(MIRFunction *func, MIRScope *scope) { + if (scope != func->GetScope()) { + ScopePos plow; + plow.id = scope->GetId(); + plow.pos = scope->GetRangeLow(); + funcScopeLows[func].push_back(plow); + + ScopePos phigh; + phigh.id = scope->GetId(); + phigh.pos = scope->GetRangeHigh(); + funcScopeHighs[func].push_back(phigh); + } + + if (scope->GetSubScopes().size() > 0) { + for (auto it : scope->GetSubScopes()) { + CollectScopePos(func, it); + } + } +} + +// result is in idSet, +// a set of scope ids which are crossed from oldSrcPos to newSrcPos +void DebugInfo::GetCrossScopeId(MIRFunction *func, + std::unordered_set &idSet, + bool isLow, + const SrcPosition &oldSrcPos, + const SrcPosition &newSrcPos) { + if (isLow) { + for (auto &it : funcScopeLows[func]) { + if (oldSrcPos.IsBf(it.pos) && (it.pos).IsBfOrEq(newSrcPos)) { + idSet.insert(it.id); + } + } + } else { + if (oldSrcPos.IsEq(newSrcPos)) { + return; + } + for (auto &it : funcScopeHighs[func]) { + if (oldSrcPos.IsBfOrEq(it.pos) && (it.pos).IsBf(newSrcPos)) { + idSet.insert(it.id); + } + } + } +} + +void DebugInfo::Finish() { + SetupCU(); + // build tree from root DIE compUnit + BuildDieTree(); + BuildAbbrev(); + ComputeSizeAndOffsets(); +} + +void DebugInfo::BuildDebugInfoEnums() { + auto size = GlobalTables::GetEnumTable().enumTable.size(); + for (size_t i = 0; i < size; ++i) { + DBGDie *die = GetOrCreateEnumTypeDie(i); + compUnit->AddSubVec(die); + } +} + +void DebugInfo::BuildDebugInfoContainers() { + for (auto it : module->GetTypeNameTab()->GetGStrIdxToTyIdxMap()) { + TyIdx tyIdx = it.second; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx.GetIdx()); + + switch (type->GetKind()) { + case kTypeClass: + case kTypeClassIncomplete: + case kTypeInterface: + case kTypeInterfaceIncomplete: + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeUnion: + (void)GetOrCreateStructTypeDie(type); + break; + case kTypeByName: + if (globalTypeAliasMap.find(it.first.GetIdx()) == globalTypeAliasMap.end()) { +#ifdef DEBUG + // not typedef + LogInfo::MapleLogger() << "named type " + << GlobalTables::GetStrTable().GetStringFromStrIdx(it.first).c_str() + << "\n"; +#endif /* DEBUG */ + } + break; + default: + ASSERT(false, "unknown case in BuildDebugInfoContainers()"); + break; + } + } +} + +void DebugInfo::BuildDebugInfoGlobalSymbols() { + for (size_t i = 0; i < GlobalTables::GetGsymTable().GetSymbolTableSize(); ++i) { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(static_cast(i)); + if (mirSymbol == nullptr || mirSymbol->IsDeleted() || mirSymbol->GetStorageClass() == kScUnused || + mirSymbol->GetStorageClass() == kScExtern) { + continue; + } + if (module->IsCModule() && mirSymbol->IsGlobal() && mirSymbol->IsVar()) { + DBGDie *vdie = CreateVarDie(mirSymbol); + compUnit->AddSubVec(vdie); + } + } +} + +void DebugInfo::BuildDebugInfoFunctions() { + for (auto func : GlobalTables::GetFunctionTable().GetFuncTable()) { + // the first one in funcTable is nullptr + if (!func) { + continue; + } + SetCurFunction(func); + // function decl + if (stridxDieIdMap.find(func->GetNameStrIdx().GetIdx()) == stridxDieIdMap.end()) { + DBGDie *funcDie = GetOrCreateFuncDeclDie(func); + if (!func->GetClassTyIdx().GetIdx() && func->GetBody()) { + compUnit->AddSubVec(funcDie); + } + } + // function def + unsigned idx = func->GetNameStrIdx().GetIdx(); + if (func->GetBody() && funcDefStrIdxDieIdMap.find(idx) == funcDefStrIdxDieIdMap.end()) { + DBGDie *funcDie = GetOrCreateFuncDefDie(func); + if (!func->GetClassTyIdx().GetIdx()) { + compUnit->AddSubVec(funcDie); + } + } + } +} + +void DebugInfo::BuildDebugInfo() { + ASSERT(module->GetDbgInfo(), "null dbgInfo"); + + Init(); + + // setup debug info for enum types + BuildDebugInfoEnums(); + + // setup debug info for global type alias + HandleTypeAlias(*module->GetScope()); + + // containner types + BuildDebugInfoContainers(); + + // setup debug info for global symbols + BuildDebugInfoGlobalSymbols(); + + // handle global scope + AddScopeDie(module->GetScope()); + + // setup debug info for functions + BuildDebugInfoFunctions(); + + // finalize debug info + Finish(); +} + +DBGDieAttr *DebugInfo::CreateAttr(DwAt at, DwForm form, uint64 val) const { + DBGDieAttr *attr = module->GetMemPool()->New(kDwAt); + attr->SetDwAt(at); + attr->SetDwForm(form); + attr->SetU(val); + return attr; +} + +void DebugInfo::SetLocalDie(MIRFunction *func, GStrIdx strIdx, const DBGDie *die) { + (funcLstrIdxDieIdMap[func])[strIdx.GetIdx()] = die->GetId(); +} + +DBGDie *DebugInfo::GetGlobalDie(const GStrIdx &strIdx) { + unsigned idx = strIdx.GetIdx(); + auto it = globalStridxDieIdMap.find(idx); + if (it != globalStridxDieIdMap.end()) { + return idDieMap[it->second]; + } + return nullptr; +} + +DBGDie *DebugInfo::GetLocalDie(MIRFunction *func, GStrIdx strIdx) { + uint32 id = (funcLstrIdxDieIdMap[func])[strIdx.GetIdx()]; + auto it = idDieMap.find(id); + if (it != idDieMap.end()) { + return it->second; + } + return nullptr; +} + +void DebugInfo::SetLocalDie(GStrIdx strIdx, const DBGDie *die) { + (funcLstrIdxDieIdMap[GetCurFunction()])[strIdx.GetIdx()] = die->GetId(); +} + +DBGDie *DebugInfo::GetLocalDie(GStrIdx strIdx) { + return GetLocalDie(GetCurFunction(), strIdx); +} + +void DebugInfo::SetLabelIdx(MIRFunction *func, const GStrIdx &strIdx, LabelIdx labIdx) { + (funcLstrIdxLabIdxMap[func])[strIdx.GetIdx()] = labIdx; +} + +LabelIdx DebugInfo::GetLabelIdx(MIRFunction *func, GStrIdx strIdx) { + LabelIdx labidx = (funcLstrIdxLabIdxMap[func])[strIdx.GetIdx()]; + return labidx; +} + +void DebugInfo::SetLabelIdx(const GStrIdx &strIdx, LabelIdx labIdx) { + (funcLstrIdxLabIdxMap[GetCurFunction()])[strIdx.GetIdx()] = labIdx; +} + +LabelIdx DebugInfo::GetLabelIdx(GStrIdx strIdx) { + LabelIdx labidx = (funcLstrIdxLabIdxMap[GetCurFunction()])[strIdx.GetIdx()]; + return labidx; +} + +static DwOp GetBreg(unsigned i) { + constexpr DwOp baseOpRegs[] = { + DW_OP_breg0, + DW_OP_breg1, + DW_OP_breg2, + DW_OP_breg3, + DW_OP_breg4, + DW_OP_breg5, + DW_OP_breg6, + DW_OP_breg7, + }; + + // struct parameters size larger than 8 are converted into pointers + constexpr uint32 struct2PtrSize = 8; + if (i < struct2PtrSize) { + return baseOpRegs[i]; + } + return DW_OP_breg0; +} + +DBGDie *DebugInfo::CreateFormalParaDie(MIRFunction *func, uint32 idx, bool isDef) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_formal_parameter); + + TyIdx tyIdx = func->GetFormalDefAt(idx).formalTyIdx; + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + DBGDie *typeDie = GetOrCreateTypeDie(type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + + /* var */ + MIRSymbol *sym = func->GetFormalDefAt(idx).formalSym; + if (isDef && sym) { + die->AddAttr(DW_AT_name, DW_FORM_strp, sym->GetNameStrIdx().GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, sym->GetSrcPosition().FileNum()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, sym->GetSrcPosition().LineNum()); + die->AddAttr(DW_AT_decl_column, DW_FORM_data4, sym->GetSrcPosition().Column()); + DwOp op = DW_OP_fbreg; + if (type->IsStructType() && (static_cast(type)->GetSize() > k64BitSize)) { + op = GetBreg(idx); + } + die->AddSimpLocAttr(DW_AT_location, DW_FORM_exprloc, op, kDbgDefaultVal); + SetLocalDie(func, sym->GetNameStrIdx(), die); + } + return die; +} + +DBGDie *DebugInfo::GetOrCreateLabelDie(LabelIdx labid) { + MIRFunction *func = GetCurFunction(); + CHECK(labid < func->GetLabelTab()->GetLabelTableSize(), "index out of range in DebugInfo::GetOrCreateLabelDie"); + GStrIdx strid = func->GetLabelTab()->GetSymbolFromStIdx(labid); + if ((funcLstrIdxDieIdMap[func]).size() && + (funcLstrIdxDieIdMap[func]).find(strid.GetIdx()) != (funcLstrIdxDieIdMap[func]).end()) { + return GetLocalDie(strid); + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_label); + die->AddAttr(DW_AT_name, DW_FORM_strp, strid.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, lexer->GetLineNum()); + die->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + GetParentDie()->AddSubVec(die); + SetLocalDie(strid, die); + SetLabelIdx(strid, labid); + return die; +} + +DBGDie *DebugInfo::CreateVarDie(MIRSymbol *sym) { + // filter vtab + if (sym->GetName().find(VTAB_PREFIX_STR) == 0) { + return nullptr; + } + + if (sym->GetName().find(GCTIB_PREFIX_STR) == 0) { + return nullptr; + } + + if (sym->GetStorageClass() == kScFormal) { + return nullptr; + } + + bool isLocal = sym->IsLocal(); + GStrIdx strIdx = sym->GetNameStrIdx(); + + if (isLocal) { + MIRFunction *func = GetCurFunction(); + if ((funcLstrIdxDieIdMap[func]).size() && + (funcLstrIdxDieIdMap[func]).find(strIdx.GetIdx()) != (funcLstrIdxDieIdMap[func]).end()) { + return GetLocalDie(strIdx); + } + } else { + if (globalStridxDieIdMap.find(strIdx.GetIdx()) != globalStridxDieIdMap.end()) { + uint32 id = globalStridxDieIdMap[strIdx.GetIdx()]; + return idDieMap[id]; + } + } + + DBGDie *die = CreateVarDie(sym, strIdx); + + GetParentDie()->AddSubVec(die); + if (isLocal) { + SetLocalDie(strIdx, die); + } else { + globalStridxDieIdMap[strIdx.GetIdx()] = die->GetId(); + } + + return die; +} + +DBGDie *DebugInfo::CreateVarDie(MIRSymbol *sym, const GStrIdx &strIdx) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_variable); + + /* var Name */ + die->AddAttr(DW_AT_name, DW_FORM_strp, strIdx.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, sym->GetSrcPosition().FileNum()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, sym->GetSrcPosition().LineNum()); + die->AddAttr(DW_AT_decl_column, DW_FORM_data4, sym->GetSrcPosition().Column()); + + bool isLocal = sym->IsLocal(); + if (isLocal) { + if (sym->IsPUStatic()) { + // Use actual internal sym by cg + PUIdx pIdx = GetCurFunction()->GetPuidx(); + std::string ptrName = sym->GetName() + std::to_string(pIdx); + uint64 idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(ptrName).GetIdx(); + die->AddGlobalLocAttr(DW_AT_location, DW_FORM_exprloc, idx); + } else { + die->AddSimpLocAttr(DW_AT_location, DW_FORM_exprloc, DW_OP_fbreg, kDbgDefaultVal); + } + } else { + // global var just use its name as address in .s + uint64 idx = strIdx.GetIdx(); + if ((sym->IsReflectionClassInfo() && !sym->IsReflectionArrayClassInfo()) || sym->IsStatic()) { + std::string ptrName = varPtrPrefix + sym->GetName(); + idx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(ptrName).GetIdx(); + } + die->AddGlobalLocAttr(DW_AT_location, DW_FORM_exprloc, idx); + } + + MIRType *type = sym->GetType(); + DBGDie *typeDie = GetOrCreateTypeDie(type); + DBGDie *newDie = GetOrCreateTypeDieWithAttr(sym->GetAttrs(), typeDie); + die->AddAttr(DW_AT_type, DW_FORM_ref4, newDie->GetId()); + + return die; +} + +DBGDie *DebugInfo::GetOrCreateFuncDeclDie(MIRFunction *func) { + uint32 funcnameidx = func->GetNameStrIdx().GetIdx(); + if (stridxDieIdMap.find(funcnameidx) != stridxDieIdMap.end()) { + uint32 id = stridxDieIdMap[funcnameidx]; + return idDieMap[id]; + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_subprogram); + stridxDieIdMap[funcnameidx] = die->GetId(); + + die->AddAttr(DW_AT_external, DW_FORM_flag_present, 1); + + // Function Name + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + + die->AddAttr(DW_AT_name, DW_FORM_strp, funcnameidx); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, sym->GetSrcPosition().FileNum()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, sym->GetSrcPosition().LineNum()); + die->AddAttr(DW_AT_decl_column, DW_FORM_data4, sym->GetSrcPosition().Column()); + + // Attributes for DW_AT_accessibility + uint32 access = 0; + if (func->IsPublic()) { + access = DW_ACCESS_public; + } else if (func->IsPrivate()) { + access = DW_ACCESS_private; + } else if (func->IsProtected()) { + access = DW_ACCESS_protected; + } + if (access != 0) { + die->AddAttr(DW_AT_accessibility, DW_FORM_data4, access); + } + + die->AddAttr(DW_AT_GNU_all_tail_call_sites, DW_FORM_flag_present, kDbgDefaultVal); + + PushParentDie(die); + + // formal parameter + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + DBGDie *param = CreateFormalParaDie(func, i, false); + die->AddSubVec(param); + } + + if (func->IsVarargs()) { + DBGDie *varargDie = module->GetMemPool()->New(module, DW_TAG_unspecified_parameters); + die->AddSubVec(varargDie); + } + + PopParentDie(); + + return die; +} + +bool LIsCompilerGenerated(const MIRFunction *func) { + return ((func->GetName().c_str())[0] != 'L'); +} + +void DebugInfo::CreateFuncLocalSymbolsDies(MIRFunction *func, DBGDie *die) { + if (func->GetSymTab()) { + // local variables, start from 1 + for (uint32 i = 1; i < func->GetSymTab()->GetSymbolTableSize(); i++) { + MIRSymbol *var = func->GetSymTab()->GetSymbolFromStIdx(i); + DBGDie *vdie = CreateVarDie(var); + if (vdie == nullptr) { + continue; + } + die->AddSubVec(vdie); + // for C, source variable names will be used instead of mangloed maple variables + if (module->IsCModule()) { + vdie->SetKeep(false); + } + } + } +} + +DBGDie *DebugInfo::GetOrCreateFuncDefDie(MIRFunction *func) { + uint32 funcnameidx = func->GetNameStrIdx().GetIdx(); + if (funcDefStrIdxDieIdMap.find(funcnameidx) != funcDefStrIdxDieIdMap.end()) { + uint32 id = funcDefStrIdxDieIdMap[funcnameidx]; + return idDieMap[id]; + } + + DBGDie *funcdecldie = GetOrCreateFuncDeclDie(func); + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_subprogram); + // update funcDefStrIdxDieIdMap and leave stridxDieIdMap for the func decl + funcDefStrIdxDieIdMap[funcnameidx] = die->GetId(); + + die->AddAttr(DW_AT_specification, DW_FORM_ref4, funcdecldie->GetId()); + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, sym->GetSrcPosition().LineNum()); + + if (!func->IsReturnVoid()) { + auto returnType = func->GetReturnType(); + DBGDie *typeDie = GetOrCreateTypeDie(returnType); + die->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + } + + die->AddAttr(DW_AT_low_pc, DW_FORM_addr, kDbgDefaultVal); + die->AddAttr(DW_AT_high_pc, DW_FORM_data8, kDbgDefaultVal); + die->AddFrmBaseAttr(DW_AT_frame_base, DW_FORM_exprloc); + if (!func->IsStatic() && !LIsCompilerGenerated(func)) { + die->AddAttr(DW_AT_object_pointer, DW_FORM_ref4, kDbgDefaultVal); + } + die->AddAttr(DW_AT_GNU_all_tail_call_sites, DW_FORM_flag_present, kDbgDefaultVal); + + PushParentDie(die); + + // formal parameter + for (uint32 i = 0; i < func->GetFormalCount(); i++) { + DBGDie *pdie = CreateFormalParaDie(func, i, true); + die->AddSubVec(pdie); + } + + CreateFuncLocalSymbolsDies(func, die); + + // add scope die + AddScopeDie(func->GetScope()); + CollectScopePos(func, func->GetScope()); + + PopParentDie(); + + return die; +} + +DBGDie *DebugInfo::GetOrCreatePrimTypeDie(MIRType *ty) { + PrimType pty = ty->GetPrimType(); + uint32 tid = static_cast(pty); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_base_type); + die->SetTyIdx(static_cast(pty)); + + GStrIdx strIdx = ty->GetNameStrIdx(); + if (strIdx.GetIdx() == 0) { + std::string pname = std::string(GetPrimTypeName(ty->GetPrimType())); + strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(pname); + ty->SetNameStrIdx(strIdx); + } + + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, GetPrimTypeSize(pty)); + die->AddAttr(DW_AT_encoding, DW_FORM_data4, GetAteFromPTY(pty)); + + // use C type name, int for i32 etc + if (module->IsCModule()) { + GStrIdx idx = GetPrimTypeCName(ty->GetPrimType()); + if (idx.GetIdx() != 0) { + strIdx = idx; + } + } + die->AddAttr(DW_AT_name, DW_FORM_strp, strIdx.GetIdx()); + + compUnit->AddSubVec(die); + tyIdxDieIdMap[static_cast(pty)] = die->GetId(); + return die; +} + +// At present, in order to solve the inaccurate expression of GetOrCreatePrimTypeDie for base types, +// e.g. long or long long. We can consider using this interface uniformly for base types in the future. +DBGDie *DebugInfo::GetOrCreateBaseTypeDie(const MIRType *type) { + uint32 tid = type->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + auto iter = baseTypeMap.find(type->GetNameStrIdx()); + if (iter == baseTypeMap.cend()) { + return nullptr; + } + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_base_type); + die->AddAttr(DW_AT_name, DW_FORM_strp, iter->second.first.GetIdx()); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, GetPrimTypeSize(iter->second.second)); + die->AddAttr(DW_AT_encoding, DW_FORM_data4, GetAteFromPTY(iter->second.second)); + + compUnit->AddSubVec(die); + tyIdxDieIdMap[type->GetTypeIndex().GetIdx()] = die->GetId(); + return die; +} + +DBGDie *DebugInfo::CreatePointedFuncTypeDie(MIRFuncType *fType) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_subroutine_type); + + die->AddAttr(DW_AT_prototyped, DW_FORM_data4, static_cast(fType->GetParamTypeList().size() > 0)); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetRetTyIdx()); + DBGDie *retTypeDie = GetOrCreateTypeDie(retType); + retTypeDie = GetOrCreateTypeDieWithAttr(fType->GetRetAttrs(), retTypeDie); + die->AddAttr(DW_AT_type, DW_FORM_ref4, retTypeDie->GetId()); + + compUnit->AddSubVec(die); + + for (uint32 i = 0; i < fType->GetParamTypeList().size(); i++) { + DBGDie *paramDie = module->GetMemPool()->New(module, DW_TAG_formal_parameter); + MIRType *paramType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fType->GetNthParamType(i)); + DBGDie *paramTypeDie = GetOrCreateTypeDie(paramType); + paramDie->AddAttr(DW_AT_type, DW_FORM_ref4, paramTypeDie->GetId()); + die->AddSubVec(paramDie); + } + + tyIdxDieIdMap[fType->GetTypeIndex().GetIdx()] = die->GetId(); + return die; +} + +DBGDie *DebugInfo::GetOrCreateTypeDieWithAttr(AttrKind attr, DBGDie *typeDie) { + DBGDie *die = typeDie; + uint32 dieId = typeDie->GetId(); + uint32 newId = 0; + switch (attr) { + case ATTR_const: + if (constTypeDieMap.find(dieId) != constTypeDieMap.end()) { + newId = constTypeDieMap[dieId]; + die = idDieMap[newId]; + } else { + die = module->GetMemPool()->New(module, DW_TAG_const_type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, dieId); + compUnit->AddSubVec(die); + newId = die->GetId(); + constTypeDieMap[dieId] = newId; + } + break; + case ATTR_volatile: + if (volatileTypeDieMap.find(dieId) != volatileTypeDieMap.end()) { + newId = volatileTypeDieMap[dieId]; + die = idDieMap[newId]; + } else { + die = module->GetMemPool()->New(module, DW_TAG_volatile_type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, dieId); + compUnit->AddSubVec(die); + newId = die->GetId(); + volatileTypeDieMap[dieId] = newId; + } + break; + default: + break; + } + return die; +} + +DBGDie *DebugInfo::GetOrCreateTypeDieWithAttr(TypeAttrs attrs, DBGDie *typeDie) { + if (attrs.GetAttr(ATTR_const)) { + typeDie = GetOrCreateTypeDieWithAttr(ATTR_const, typeDie); + } + if (attrs.GetAttr(ATTR_volatile)) { + typeDie = GetOrCreateTypeDieWithAttr(ATTR_volatile, typeDie); + } + return typeDie; +} + +DBGDie *DebugInfo::GetOrCreateTypeDie(TyIdx tyidx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyidx); + return GetOrCreateTypeDie(type); +} + +DBGDie *DebugInfo::GetOrCreateTypeDie(MIRType *type) { + if (type == nullptr) { + return nullptr; + } + + uint32 tid = type->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + uint32 sid = type->GetNameStrIdx().GetIdx(); + if (sid != 0 && stridxDieIdMap.find(sid) != stridxDieIdMap.end()) { + uint32 id = stridxDieIdMap[sid]; + return idDieMap[id]; + } + + if (type->GetTypeIndex() == static_cast(type->GetPrimType())) { + return GetOrCreatePrimTypeDie(type); + } + + DBGDie *die = nullptr; + switch (type->GetKind()) { + case kTypePointer: + die = GetOrCreatePointTypeDie(static_cast(type)); + break; + case kTypeFunction: + die = CreatePointedFuncTypeDie(static_cast(type)); + break; + case kTypeArray: + case kTypeFArray: + case kTypeJArray: + die = GetOrCreateArrayTypeDie(static_cast(type)); + break; + case kTypeUnion: + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeClass: + case kTypeClassIncomplete: + case kTypeInterface: + case kTypeInterfaceIncomplete: { + die = GetOrCreateStructTypeDie(type); + break; + } + case kTypeBitField: + break; + case kTypeByName: { + die = GetOrCreateTypeByNameDie(*type); + break; + } + default: + CHECK_FATAL(false, "TODO: support type"); + break; + } + + if (die) { + tyIdxDieIdMap[tid] = die->GetId(); + } + + return die; +} + +DBGDie *DebugInfo::GetOrCreateTypedefDie(GStrIdx stridx, TyIdx tyidx) { + uint32 sid = stridx.GetIdx(); + auto it = stridxDieIdMap.find(sid); + if (it != stridxDieIdMap.end()) { + return idDieMap[it->second]; + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_typedef); + compUnit->AddSubVec(die); + + die->AddAttr(DW_AT_name, DW_FORM_strp, sid); + die->AddAttr(DW_AT_decl_file, DW_FORM_data1, 0); + die->AddAttr(DW_AT_decl_line, DW_FORM_data1, 0); + die->AddAttr(DW_AT_decl_column, DW_FORM_data1, 0); + + DBGDie *typeDie = GetOrCreateTypeDie(tyidx); + die->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + + stridxDieIdMap[sid] = die->GetId(); + return die; +} + +DBGDie *DebugInfo::GetOrCreateEnumTypeDie(unsigned idx) { + MIREnum *mirEnum = GlobalTables::GetEnumTable().enumTable[idx]; + return GetOrCreateEnumTypeDie(mirEnum); +} + +DBGDie *DebugInfo::GetOrCreateEnumTypeDie(MIREnum *mirEnum) { + uint32 sid = mirEnum->GetNameIdx().GetIdx(); + auto it = stridxDieIdMap.find(sid); + if (it != stridxDieIdMap.end()) { + return idDieMap[it->second]; + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_enumeration_type); + + PrimType pty = mirEnum->GetPrimType(); + // check if it is an anonymous enum + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(mirEnum->GetNameIdx()); + bool keep = (name.find("unnamed.") == std::string::npos); + + die->AddAttr(DW_AT_name, DW_FORM_strp, sid, keep); + die->AddAttr(DW_AT_encoding, DW_FORM_data4, GetAteFromPTY(pty)); + die->AddAttr(DW_AT_byte_size, DW_FORM_data1, GetPrimTypeSize(pty)); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pty); + DBGDie *typeDie = GetOrCreateTypeDie(type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + + for (auto &elemIt : mirEnum->GetElements()) { + DBGDie *elem = module->GetMemPool()->New(module, DW_TAG_enumerator); + elem->AddAttr(DW_AT_name, DW_FORM_strp, elemIt.first.GetIdx()); + elem->AddAttr(DW_AT_const_value, DW_FORM_data8, elemIt.second.GetExtValue()); + die->AddSubVec(elem); + } + + stridxDieIdMap[sid] = die->GetId(); + return die; +} + +DBGDie *DebugInfo::GetOrCreatePointTypeDie(const MIRPtrType *ptrType) { + uint32 tid = ptrType->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + MIRType *type = ptrType->GetPointedType(); + DBGDie *typeDie = GetOrCreateTypeDie(type); + typeDie = GetOrCreateTypeDieWithAttr(ptrType->GetTypeAttrs(), typeDie); + // for <* void> and + if ((type != nullptr) && + (type->GetPrimType() == PTY_void || type->GetKind() == kTypeFunction)) { + DBGDie *die = nullptr; + if (type->GetKind() == kTypeFunction) { + // for maple's function pointer type, function type should be used in dwarf + die = typeDie; + tyIdxDieIdMap[type->GetTypeIndex().GetIdx()] = die->GetId(); + } else { + die = module->GetMemPool()->New(module, DW_TAG_pointer_type); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, k8BitSize); + } + die->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + tyIdxDieIdMap[ptrType->GetTypeIndex().GetIdx()] = die->GetId(); + compUnit->AddSubVec(die); + return die; + } + + if (typeDefTyIdxMap.find(type->GetTypeIndex().GetIdx()) != typeDefTyIdxMap.end()) { + uint32 tyIdx = typeDefTyIdxMap[type->GetTypeIndex().GetIdx()]; + if (pointedPointerMap.find(tyIdx) != pointedPointerMap.end()) { + uint32 tyid = pointedPointerMap[tyIdx]; + if (tyIdxDieIdMap.find(tyid) != tyIdxDieIdMap.end()) { + uint32 dieid = tyIdxDieIdMap[tyid]; + DBGDie *die = idDieMap[dieid]; + return die; + } + } + } + + // update incomplete type from stridxDieIdMap to tyIdxDieIdMap + MIRStructType *stype = static_cast(type); + if ((stype != nullptr) && stype->IsIncomplete()) { + uint32 sid = stype->GetNameStrIdx().GetIdx(); + if (stridxDieIdMap.find(sid) != stridxDieIdMap.end()) { + uint32 dieid = stridxDieIdMap[sid]; + if (dieid != 0) { + tyIdxDieIdMap[stype->GetTypeIndex().GetIdx()] = dieid; + } + } + } + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_pointer_type); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, k8BitSize); + die->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + tyIdxDieIdMap[ptrType->GetTypeIndex().GetIdx()] = die->GetId(); + + compUnit->AddSubVec(die); + + return die; +} + +DBGDie *DebugInfo::GetOrCreateArrayTypeDie(const MIRArrayType *arrayType) { + uint32 tid = arrayType->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + MIRType *type = arrayType->GetElemType(); + DBGDie *typeDie = GetOrCreateTypeDie(type); + typeDie = GetOrCreateTypeDieWithAttr(arrayType->GetTypeAttrs(), typeDie); + + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_array_type); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, k8BitSize); + die->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + tyIdxDieIdMap[arrayType->GetTypeIndex().GetIdx()] = die->GetId(); + + compUnit->AddSubVec(die); + + // maple uses array of 1D array to represent 2D array + // so only one DW_TAG_subrange_type entry is needed default + uint16 dim = 1; + if (theMIRModule->IsCModule() || theMIRModule->IsJavaModule()) { + dim = arrayType->GetDim(); + } + typeDie = GetOrCreateTypeDie(TyIdx(PTY_u32)); + bool keep = !arrayType->IsIncompleteArray(); + for (auto i = 0; i < dim; ++i) { + DBGDie *rangeDie = module->GetMemPool()->New(module, DW_TAG_subrange_type); + rangeDie->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId(), keep); + // The default lower bound value for C, C++, or Java is 0 + rangeDie->AddAttr(DW_AT_upper_bound, DW_FORM_data4, arrayType->GetSizeArrayItem(i) - 1, keep); + die->AddSubVec(rangeDie); + } + + return die; +} + +DBGDie *DebugInfo::CreateFieldDie(maple::FieldPair pair) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_member); + + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(pair.first); + bool keep = (name.find("unnamed.") == std::string::npos); + die->AddAttr(DW_AT_name, DW_FORM_strp, pair.first.GetIdx(), keep); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx(), keep); + + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(pair.second.first); + DBGDie *typeDie = GetOrCreateTypeDie(type); + die->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + + die->AddAttr(DW_AT_data_member_location, DW_FORM_data4, kDbgDefaultVal); + + return die; +} + +DBGDie *DebugInfo::CreateBitfieldDie(const MIRBitFieldType *type, const GStrIdx &sidx, uint32 &prevBits) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_member); + + die->AddAttr(DW_AT_name, DW_FORM_strp, sidx.GetIdx()); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(type->GetPrimType()); + DBGDie *typeDie = GetOrCreateTypeDie(ty); + die->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, GetPrimTypeSize(type->GetPrimType())); + die->AddAttr(DW_AT_bit_size, DW_FORM_data4, type->GetFieldSize()); + + uint32 typeSize = GetPrimTypeSize(type->GetPrimType()) * k8BitSize; + prevBits = type->GetFieldSize() + prevBits > typeSize ? type->GetFieldSize() : type->GetFieldSize() + prevBits; + uint32 offset = typeSize - prevBits; + + die->AddAttr(DW_AT_bit_offset, DW_FORM_data4, offset); + die->AddAttr(DW_AT_data_member_location, DW_FORM_data4, 0); + + return die; +} + +DBGDie *DebugInfo::GetOrCreateStructTypeDie(const MIRType *type) { + ASSERT(type, "null struture type"); + GStrIdx strIdx = type->GetNameStrIdx(); + ASSERT(strIdx.GetIdx(), "struture type missing name"); + + uint32 tid = type->GetTypeIndex().GetIdx(); + if (tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end()) { + uint32 id = tyIdxDieIdMap[tid]; + return idDieMap[id]; + } + + DBGDie *die = nullptr; + switch (type->GetKind()) { + case kTypeClass: + case kTypeClassIncomplete: + die = CreateClassTypeDie(strIdx, static_cast(type)); + break; + case kTypeInterface: + case kTypeInterfaceIncomplete: + die = CreateInterfaceTypeDie(strIdx, static_cast(type)); + break; + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeUnion: + die = CreateStructTypeDie(strIdx, static_cast(type), false); + break; + default: +#ifdef DEBUG + LogInfo::MapleLogger() << "named type " + << GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx).c_str() + << "\n"; +#endif /* DEBUG */ + break; + } + + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(strIdx, type->GetTypeIndex()); + + if (die) { + tyIdxDieIdMap[type->GetTypeIndex().GetIdx()] = die->GetId(); + } + return die; +} + +void DebugInfo::CreateStructTypeFieldsDies(const MIRStructType *structType, DBGDie *die) { + uint32 prevBits = 0; + for (size_t i = 0; i < structType->GetFieldsSize(); i++) { + MIRType *elemType = structType->GetElemType(static_cast(i)); + FieldPair fp = structType->GetFieldsElemt(i); + if (elemType->IsMIRBitFieldType()) { + if (die->GetTag() == DW_TAG_union_type) { + prevBits = 0; + } + MIRBitFieldType *bitFieldType = static_cast(elemType); + DBGDie *bfDie = CreateBitfieldDie(bitFieldType, fp.first, prevBits); + die->AddSubVec(bfDie); + } else { + if (die->GetTag() != DW_TAG_union_type) { + prevBits = GetPrimTypeSize(elemType->GetPrimType()) * k8BitSize; + } + DBGDie *fieldDie = CreateFieldDie(fp); + die->AddSubVec(fieldDie); + + // update field type with alias info + MIRAlias *alias = structType->GetAlias(); + if (!alias) { + continue; + } + + for (auto &aliasVar : alias->GetAliasVarMap()) { + if (aliasVar.first == fp.first) { + DBGDie *typeDie = GetAliasVarTypeDie(aliasVar.second, fp.second.first); + fieldDie->SetAttr(DW_AT_type, typeDie->GetId()); + break; + } + } + } + } +} + +void DebugInfo::CreateStructTypeParentFieldsDies(const MIRStructType *structType, DBGDie *die) { + for (size_t i = 0; i < structType->GetParentFieldsSize(); i++) { + FieldPair fp = structType->GetParentFieldsElemt(i); + DBGDie *fieldDie = CreateFieldDie(fp); + die->AddSubVec(fieldDie); + } +} + +void DebugInfo::CreateStructTypeMethodsDies(const MIRStructType *structType, DBGDie *die) { + // member functions decl + for (auto fp : structType->GetMethods()) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(fp.first.Idx()); + ASSERT((symbol != nullptr) && symbol->GetSKind() == kStFunc, "member function symbol not exist"); + MIRFunction *func = symbol->GetValue().mirFunc; + ASSERT(func, "member function not exist"); + DBGDie *funDeclDie = GetOrCreateFuncDeclDie(func); + die->AddSubVec(funDeclDie); + if (func->GetBody()) { + // member functions defination, these die are global + DBGDie *funDefDie = GetOrCreateFuncDefDie(func); + compUnit->AddSubVec(funDefDie); + } + } +} + +// shared between struct and union, also used as part by class and interface +DBGDie *DebugInfo::CreateStructTypeDie(GStrIdx strIdx, const MIRStructType *structType, bool update) { + DBGDie *die = nullptr; + uint32 tid = structType->GetTypeIndex().GetIdx(); + + if (update) { + ASSERT(tyIdxDieIdMap.find(tid) != tyIdxDieIdMap.end(), "update type die not exist"); + uint32 id = tyIdxDieIdMap[tid]; + die = idDieMap[id]; + ASSERT(die, "update type die not exist"); + } else { + DwTag tag = structType->GetKind() == kTypeStruct ? DW_TAG_structure_type : DW_TAG_union_type; + die = module->GetMemPool()->New(module, tag); + tyIdxDieIdMap[tid] = die->GetId(); + } + die = GetOrCreateTypeDieWithAttr(structType->GetTypeAttrs(), die); + + if (strIdx.GetIdx() != 0) { + stridxDieIdMap[strIdx.GetIdx()] = die->GetId(); + } + + compUnit->AddSubVec(die); + + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + bool keep = (name.find("unnamed.") == std::string::npos); + + die->AddAttr(DW_AT_decl_line, DW_FORM_data4, kStructDBGSize); + die->AddAttr(DW_AT_name, DW_FORM_strp, strIdx.GetIdx(), keep); + die->AddAttr(DW_AT_byte_size, DW_FORM_data4, kDbgDefaultVal); + die->AddAttr(DW_AT_decl_file, DW_FORM_data4, mplSrcIdx.GetIdx()); + // store tid for cg emitter + die->AddAttr(DW_AT_type, DW_FORM_data4, tid, false); + + PushParentDie(die); + + // fields + CreateStructTypeFieldsDies(structType, die); + + // parentFields + CreateStructTypeParentFieldsDies(structType, die); + + // member functions + CreateStructTypeMethodsDies(structType, die); + + PopParentDie(); + + return die; +} + +DBGDie *DebugInfo::CreateClassTypeDie(const GStrIdx &strIdx, const MIRClassType *classType) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_class_type); + + PushParentDie(die); + + // parent + uint32 ptid = classType->GetParentTyIdx().GetIdx(); + if (ptid != 0) { + MIRType *parenttype = GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType->GetParentTyIdx()); + DBGDie *parentDie = GetOrCreateTypeDie(parenttype); + if (parentDie) { + parentDie = module->GetMemPool()->New(module, DW_TAG_inheritance); + parentDie->AddAttr(DW_AT_name, DW_FORM_strp, parenttype->GetNameStrIdx().GetIdx()); + DBGDie *typeDie = GetOrCreateStructTypeDie(classType); + parentDie->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + + // set to DW_ACCESS_public for now + parentDie->AddAttr(DW_AT_accessibility, DW_FORM_data4, DW_ACCESS_public); + die->AddSubVec(parentDie); + } + } + + PopParentDie(); + + // update common fields + tyIdxDieIdMap[classType->GetTypeIndex().GetIdx()] = die->GetId(); + DBGDie *die1 = CreateStructTypeDie(strIdx, classType, true); + ASSERT(die == die1, "ClassTypeDie update wrong die"); + + return die1; +} + +DBGDie *DebugInfo::CreateInterfaceTypeDie(const GStrIdx &strIdx, const MIRInterfaceType *interfaceType) { + DBGDie *die = module->GetMemPool()->New(module, DW_TAG_interface_type); + + PushParentDie(die); + + // parents + for (auto it : interfaceType->GetParentsTyIdx()) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it); + DBGDie *parentDie = GetOrCreateTypeDie(type); + if (parentDie) { + continue; + } + parentDie = module->GetMemPool()->New(module, DW_TAG_inheritance); + parentDie->AddAttr(DW_AT_name, DW_FORM_strp, type->GetNameStrIdx().GetIdx()); + DBGDie *typeDie = GetOrCreateStructTypeDie(interfaceType); + parentDie->AddAttr(DW_AT_type, DW_FORM_ref4, typeDie->GetId()); + parentDie->AddAttr(DW_AT_data_member_location, DW_FORM_data4, kDbgDefaultVal); + + // set to DW_ACCESS_public for now + parentDie->AddAttr(DW_AT_accessibility, DW_FORM_data4, DW_ACCESS_public); + die->AddSubVec(parentDie); + } + + PopParentDie(); + + // update common fields + tyIdxDieIdMap[interfaceType->GetTypeIndex().GetIdx()] = die->GetId(); + DBGDie *die1 = CreateStructTypeDie(strIdx, interfaceType, true); + ASSERT(die == die1, "InterfaceTypeDie update wrong die"); + + return die1; +} + +uint32 DebugInfo::GetAbbrevId(DBGAbbrevEntryVec *vec, DBGAbbrevEntry *entry) const { + for (auto it : vec->GetEntryvec()) { + if (it->Equalto(entry)) { + return it->GetAbbrevId(); + } + } + return 0; +} + +void DebugInfo::BuildAbbrev() { + uint32 abbrevid = 1; + for (uint32 i = 1; i < maxId; i++) { + DBGDie *die = idDieMap[i]; + DBGAbbrevEntry *entry = module->GetMemPool()->New(module, die); + + if (!tagAbbrevMap[die->GetTag()]) { + tagAbbrevMap[die->GetTag()] = module->GetMemPool()->New(module, die->GetTag()); + } + + uint32 id = GetAbbrevId(tagAbbrevMap[die->GetTag()], entry); + if (id != 0) { + // using existing abbrev id + die->SetAbbrevId(id); + } else { + // add entry to vector + entry->SetAbbrevId(abbrevid++); + tagAbbrevMap[die->GetTag()]->GetEntryvec().push_back(entry); + abbrevVec.push_back(entry); + // update abbrevid in die + die->SetAbbrevId(entry->GetAbbrevId()); + } + } + for (uint32 i = 1; i < maxId; i++) { + DBGDie *die = idDieMap[i]; + if (die->GetAbbrevId() == 0) { + LogInfo::MapleLogger() << "0 abbrevId i = " << i << " die->id = " << die->GetId() << std::endl; + } + } +} + +void DebugInfo::BuildDieTree() { + for (auto it : idDieMap) { + if (!it.first) { + continue; + } + DBGDie *die = it.second; + uint32 size = die->GetSubDieVecSize(); + die->SetWithChildren(size > 0); + if (size != 0) { + die->SetFirstChild(die->GetSubDieVecAt(0)); + for (uint32 i = 0; i < size - 1; i++) { + DBGDie *it0 = die->GetSubDieVecAt(i); + DBGDie *it1 = die->GetSubDieVecAt(i + 1); + if (it0->GetSubDieVecSize() != 0) { + it0->SetSibling(it1); + it0->AddAttr(DW_AT_sibling, DW_FORM_ref4, it1->GetId()); + } + } + } + } +} + +DBGDie *DebugInfo::GetDie(const MIRFunction *func) { + uint32 id = stridxDieIdMap[func->GetNameStrIdx().GetIdx()]; + if (id != 0) { + return idDieMap[id]; + } + return nullptr; +} + +// Methods for calculating Offset and Size of DW_AT_xxx +size_t DBGDieAttr::SizeOf(DBGDieAttr *attr) const { + DwForm form = attr->dwForm; + switch (form) { + // case DW_FORM_implicitconst: + case DW_FORM_flag_present: + return 0; // Not handled yet. + case DW_FORM_flag: + case DW_FORM_ref1: + case DW_FORM_data1: + return sizeof(int8); + case DW_FORM_ref2: + case DW_FORM_data2: + return sizeof(int16); + case DW_FORM_ref4: + case DW_FORM_data4: + return sizeof(int32); + case DW_FORM_ref8: + case DW_FORM_ref_sig8: + case DW_FORM_data8: + return sizeof(int64); + case DW_FORM_addr: + return sizeof(int64); + case DW_FORM_sec_offset: + case DW_FORM_ref_addr: + case DW_FORM_strp: + case DW_FORM_GNU_ref_alt: + // case DW_FORM_codeLinestrp: + // case DW_FORM_strp_sup: + // case DW_FORM_ref_sup: + return k4BitSize; // DWARF32, 8 if DWARF64 + + case DW_FORM_string: { + GStrIdx stridx(attr->value.id); + const std::string &str = GlobalTables::GetStrTable().GetStringFromStrIdx(stridx); + return str.length() + 1; /* terminal null byte */ + } + case DW_FORM_exprloc: { + DBGExprLoc *ptr = attr->value.ptr; + CHECK_FATAL(ptr != (DBGExprLoc*)(0xdeadbeef), "wrong ptr"); + switch (ptr->GetOp()) { + case DW_OP_call_frame_cfa: + return k2BitSize; // size 1 byte + DW_OP_call_frame_cfa 1 byte + case DW_OP_fbreg: { + // DW_OP_fbreg 1 byte + size_t size = 1 + namemangler::GetSleb128Size(ptr->GetFboffset()); + return size + namemangler::GetUleb128Size(size); + } + case DW_OP_breg0: + case DW_OP_breg1: + case DW_OP_breg2: + case DW_OP_breg3: + case DW_OP_breg4: + case DW_OP_breg5: + case DW_OP_breg6: + case DW_OP_breg7: + return k3BitSize; + case DW_OP_addr: { + return namemangler::GetUleb128Size(k9BitSize) + k9BitSize; + } + default: + return k4BitSize; + } + } + default: + LogInfo::MapleLogger() << "unhandled SizeOf: " << maple::GetDwFormName(form) << std::endl; + CHECK_FATAL(maple::GetDwFormName(form) != nullptr, "null GetDwFormName(form)"); + return 0; + } +} + +void DebugInfo::ComputeSizeAndOffsets() { + // CU-relative offset is reset to 0 here. + uint32 cuOffset = sizeof(int32_t) + // Length of Unit Info + sizeof(int16) + // DWARF version number : 0x0004 + sizeof(int32) + // Offset into Abbrev. Section : 0x0000 + sizeof(int8); // Pointer Size (in bytes) : 0x08 + + // After returning from this function, the length value is the size + // of the .debug_info section + ComputeSizeAndOffset(compUnit, cuOffset); + debugInfoLength = cuOffset - sizeof(int32_t); +} + +// Compute the size and offset of a DIE. The Offset is relative to start of the CU. +// It returns the offset after laying out the DIE. +void DebugInfo::ComputeSizeAndOffset(DBGDie *die, uint32 &cuOffset) { + if (!die->GetKeep()) { + return; + } + uint32 cuOffsetOrg = cuOffset; + die->SetOffset(cuOffset); + + // Add the byte size of the abbreviation code + cuOffset += static_cast(namemangler::GetUleb128Size(uint64_t(die->GetAbbrevId()))); + + // Add the byte size of all the DIE attributes. + for (const auto &attr : die->GetAttrVec()) { + if (!attr->GetKeep()) { + continue; + } + cuOffset += static_cast(attr->SizeOf(attr)); + } + + die->SetSize(cuOffset - cuOffsetOrg); + + // Let the children compute their offsets. + if (die->GetWithChildren()) { + uint32 size = die->GetSubDieVecSize(); + + for (uint32 i = 0; i < size; i++) { + DBGDie *childDie = die->GetSubDieVecAt(i); + ComputeSizeAndOffset(childDie, cuOffset); + } + + // Each child chain is terminated with a zero byte, adjust the offset. + cuOffset += sizeof(int8); + } +} + +bool DebugInfo::IsScopeIdEmited(MIRFunction *func, uint32 scopeId) { + auto it = funcScopeIdStatus.find(func); + if (it == funcScopeIdStatus.end()) { + return false; + } + + auto scopeIdIt = it->second.find(scopeId); + if (scopeIdIt == it->second.end()) { + return false; + } + + if (scopeIdIt->second != kAllEmited) { + return false; + } + return true; +} + +void DebugInfo::ClearDebugInfo() { + for (auto &funcLstrIdxDieId : funcLstrIdxDieIdMap) { + funcLstrIdxDieId.second.clear(); + } + for (auto &funcLstrIdxLabIdx : funcLstrIdxLabIdxMap) { + funcLstrIdxLabIdx.second.clear(); + } + for (auto &funcScopeLow : funcScopeLows) { + funcScopeLow.second.clear(); + funcScopeLow.second.shrink_to_fit(); + } + for (auto &funcScopeHigh : funcScopeHighs) { + funcScopeHigh.second.clear(); + funcScopeHigh.second.shrink_to_fit(); + } + for (auto &status : funcScopeIdStatus) { + status.second.clear(); + } +} + +/* /////////////// + * Dumps + * /////////////// + */ +void DebugInfo::Dump(int indent) { + LogInfo::MapleLogger() << "\n" << std::endl; + LogInfo::MapleLogger() << "maple_debug_information {" + << " Length: " << HEX(debugInfoLength) << std::endl; + compUnit->Dump(indent + 1); + LogInfo::MapleLogger() << "}\n" << std::endl; + LogInfo::MapleLogger() << "maple_debug_abbrev {" << std::endl; + for (uint32 i = 1; i < abbrevVec.size(); i++) { + abbrevVec[i]->Dump(indent + 1); + } + LogInfo::MapleLogger() << "}" << std::endl; + return; +} + +void DBGExprLoc::Dump() const { + LogInfo::MapleLogger() << " " << HEX(GetOp()); + for (auto it : simpLoc->GetOpnd()) { + LogInfo::MapleLogger() << " " << HEX(it); + } +} + +void DBGDieAttr::Dump(int indent) { + PrintIndentation(indent); + CHECK_FATAL(GetDwFormName(dwForm) && GetDwAtName(dwAttr), "null ptr check"); + LogInfo::MapleLogger() << GetDwAtName(dwAttr) << " " << GetDwFormName(dwForm); + if (dwForm == DW_FORM_string || dwForm == DW_FORM_strp) { + GStrIdx idx(value.id); + LogInfo::MapleLogger() << " 0x" << std::hex << value.u << std::dec; + LogInfo::MapleLogger() << " \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(idx).c_str() << "\""; + } else if (dwForm == DW_FORM_ref4) { + LogInfo::MapleLogger() << " <" << HEX(value.id) << ">"; + } else if (dwAttr == DW_AT_encoding) { + CHECK_FATAL(GetDwAteName(static_cast(value.u)), "null ptr check"); + LogInfo::MapleLogger() << " " << GetDwAteName(static_cast(value.u)); + } else if (dwAttr == DW_AT_location) { + value.ptr->Dump(); + } else { + LogInfo::MapleLogger() << " 0x" << std::hex << value.u << std::dec; + } + LogInfo::MapleLogger() << std::endl; +} + +void DBGDie::Dump(int indent) { + PrintIndentation(indent); + LogInfo::MapleLogger() << "<" << HEX(id) << "><" << HEX(offset); + LogInfo::MapleLogger() << "><" << HEX(size) << "><" + << "> abbrev id: " << HEX(abbrevId); + CHECK_FATAL(GetDwTagName(tag), "null ptr check"); + LogInfo::MapleLogger() << " (" << GetDwTagName(tag) << ") "; + if (parent) { + LogInfo::MapleLogger() << "parent <" << HEX(parent->GetId()); + } + LogInfo::MapleLogger() << "> {"; + if (tyIdx != 0) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(tyIdx)); + if (type->GetKind() == kTypeStruct || type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + MIRStructType *stype = static_cast(type); + LogInfo::MapleLogger() << " # " << stype->GetName(); + } else { + LogInfo::MapleLogger() << " # " << GetPrimTypeName(type->GetPrimType()); + } + } + LogInfo::MapleLogger() << std::endl; + for (auto it : attrVec) { + it->Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "} "; + if (subDieVec.size() != 0) { + LogInfo::MapleLogger() << " {" << std::endl; + for (auto it : subDieVec) { + it->Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}"; + } + LogInfo::MapleLogger() << std::endl; + return; +} + +void DBGAbbrevEntry::Dump(int indent) { + PrintIndentation(indent); + CHECK_FATAL(GetDwTagName(tag), "null ptr check "); + LogInfo::MapleLogger() << "<" << HEX(abbrevId) << "> " << GetDwTagName(tag); + if (GetWithChildren()) { + LogInfo::MapleLogger() << " [with children] {" << std::endl; + } else { + LogInfo::MapleLogger() << " [no children] {" << std::endl; + } + for (uint32 i = 0; i < attrPairs.size(); i += k2BitSize) { + PrintIndentation(indent + 1); + CHECK_FATAL(GetDwAtName(attrPairs[i]) && GetDwFormName(attrPairs[i + 1]), "NULLPTR CHECK"); + + LogInfo::MapleLogger() << " " << GetDwAtName(attrPairs[i]) << " " << GetDwFormName(attrPairs[i + 1]) + << " " << std::endl; + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}" << std::endl; + return; +} + +void DBGAbbrevEntryVec::Dump(int indent) { + for (auto it : entryVec) { + PrintIndentation(indent); + it->Dump(indent); + } + return; +} + +// DBGCompileMsgInfo methods +void DBGCompileMsgInfo::ClearLine(uint32 n) { + errno_t eNum = memset_s(codeLine[n], MAXLINELEN, 0, MAXLINELEN); + if (eNum != 0) { + FATAL(kLncFatal, "memset_s failed"); + } +} + +DBGCompileMsgInfo::DBGCompileMsgInfo() : startLine(0), errPos(0) { + lineNum[0] = 0; + lineNum[1] = 0; + lineNum[kIndx2] = 0; + ClearLine(0); + ClearLine(1); + ClearLine(kIndx2); + errLNum = 0; + errCNum = 0; +} + +void DBGCompileMsgInfo::SetErrPos(uint32 lnum, uint32 cnum) { + errLNum = lnum; + errCNum = cnum; +} + +void DBGCompileMsgInfo::UpdateMsg(uint32 lnum, const char *line) { + size_t size = strlen(line); + if (size > MAXLINELEN - 1) { + size = MAXLINELEN - 1; + } + startLine = (startLine + k2BitSize) % k3BitSize; + ClearLine(startLine); + errno_t eNum = memcpy_s(codeLine[startLine], MAXLINELEN, line, size); + if (eNum != 0) { + FATAL(kLncFatal, "memcpy_s failed"); + } + codeLine[startLine][size] = '\0'; + lineNum[startLine] = lnum; +} + +void DBGCompileMsgInfo::EmitMsg() { + char str[MAXLINELEN + 1]; + + errPos = errCNum; + errPos = (errPos < k2BitSize) ? k2BitSize : errPos; + errPos = (errPos > MAXLINELEN) ? MAXLINELEN : errPos; + for (uint32 i = 0; i < errPos - 1; i++) { + str[i] = ' '; + } + str[errPos - 1] = '^'; + str[errPos] = '\0'; + + fprintf(stderr, "\n===================================================================\n"); + fprintf(stderr, "=================="); + fprintf(stderr, BOLD YEL " Compilation Error Diagnosis " RESET); + fprintf(stderr, "==================\n"); + fprintf(stderr, "===================================================================\n"); + fprintf(stderr, "line %4u %s\n", lineNum[(startLine + k2BitSize) % k3BitSize], + reinterpret_cast(codeLine[(startLine + k2BitSize) % k3BitSize])); + fprintf(stderr, "line %4u %s\n", lineNum[(startLine + 1) % k3BitSize], + reinterpret_cast(codeLine[(startLine + 1) % k3BitSize])); + fprintf(stderr, "line %4u %s\n", lineNum[(startLine) % k3BitSize], + reinterpret_cast(codeLine[(startLine) % k3BitSize])); + fprintf(stderr, BOLD RED " %s\n" RESET, str); + fprintf(stderr, "===================================================================\n"); +} +} // namespace maple diff --git a/src/mapleall/maple_ir/src/debug_info_util.cpp b/src/mapleall/maple_ir/src/debug_info_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3fa5d7bc1cb3067d87445bac5ea5999a7b4146e2 --- /dev/null +++ b/src/mapleall/maple_ir/src/debug_info_util.cpp @@ -0,0 +1,136 @@ +/* + * Copyright (C) [2021] Futurewei Technologies, Inc. All rights reverved. + * + * OpenArkCompiler is licensed under the Mulan Permissive Software License v2. + * You can use this software according to the terms and conditions of the MulanPSL - 2.0. + * You may obtain a copy of MulanPSL - 2.0 at: + * + * https://opensource.org/licenses/MulanPSL-2.0 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the MulanPSL - 2.0 for more details. + */ + +#include +#include "mir_builder.h" +#include "printing.h" +#include "maple_string.h" +#include "namemangler.h" +#include "debug_info.h" +#include "global_tables.h" +#include "mir_type.h" +#include "securec.h" +#include "mpl_logging.h" + +namespace maple { +#define TOSTR(s) #s +// utility functions to get the string from tag value etc. +// GetDwTagName(unsigned n) +const char *GetDwTagName(unsigned n) { + switch (n) { +#define HANDLE_DW_TAG(ID, NAME, VERSION, VENDOR, KIND) case DW_TAG_##NAME: return TOSTR(DW_TAG_##NAME); +#include "Dwarf.def" + case DW_TAG_lo_user: return "DW_TAG_lo_user"; + case DW_TAG_hi_user: return "DW_TAG_hi_user"; + case DW_TAG_user_base: return "DW_TAG_user_base"; + default: return nullptr; + } +} + +// GetDwFormName(unsigned n) +const char *GetDwFormName(unsigned n) { + switch (n) { +#define HANDLE_DW_FORM(ID, NAME, VERSION, VENDOR) case DW_FORM_##NAME: return TOSTR(DW_FORM_##NAME); +#include "Dwarf.def" + case DW_FORM_lo_user: return "DW_FORM_lo_user"; + default: return nullptr; + } +} + +// GetDwAtName(unsigned n) +const char *GetDwAtName(unsigned n) { + switch (n) { +#define HANDLE_DW_AT(ID, NAME, VERSION, VENDOR) case DW_AT_##NAME: return TOSTR(DW_AT_##NAME); +#include "Dwarf.def" + case DW_AT_lo_user: return "DW_AT_lo_user"; + default: return nullptr; + } +} + +// GetDwOpName(unsigned n) +const char *GetDwOpName(unsigned n) { + switch (n) { +#define HANDLE_DW_OP(ID, NAME, VERSION, VENDOR) case DW_OP_##NAME: return TOSTR(DW_OP_##NAME); +#include "Dwarf.def" + case DW_OP_hi_user: return "DW_OP_hi_user"; + case DW_OP_LLVM_fragment: return "DW_OP_LLVM_fragment"; + case DW_OP_LLVM_convert: return "DW_OP_LLVM_convert"; + case DW_OP_LLVM_tag_offset: return "DW_OP_LLVM_tag_offset"; + case DW_OP_LLVM_entry_value: return "DW_OP_LLVM_entry_value"; + default: return nullptr; + } +} + +#define DW_ATE_void 0x20 +// GetDwAteName(unsigned n) +const char *GetDwAteName(unsigned n) { + switch (n) { +#define HANDLE_DW_ATE(ID, NAME, VERSION, VENDOR) case DW_ATE_##NAME: return TOSTR(DW_ATE_##NAME); +#include "Dwarf.def" + case DW_ATE_lo_user: return "DW_ATE_lo_user"; + case DW_ATE_hi_user: return "DW_ATE_hi_user"; + case DW_ATE_void: return "DW_ATE_void"; + default: return nullptr; + } +} + +// GetDwCfaName(unsigned n) +const char *GetDwCfaName(unsigned n) { + switch (n) { +#define HANDLE_DW_CFA(ID, NAME) case DW_CFA_##NAME: return TOSTR(DW_CFA_##NAME); +#define HANDLE_DW_CFA_PRED(ID, NAME, ARCH) case DW_CFA_##NAME: return TOSTR(DW_CFA_##NAME); +#include "Dwarf.def" + case DW_CFA_lo_user: return "DW_CFA_lo_user"; + case DW_CFA_hi_user: return "DW_CFA_hi_user"; + default: return nullptr; + } +} + +DwAte GetAteFromPTY(PrimType pty) { + switch (pty) { + case PTY_u1: + return DW_ATE_boolean; + case PTY_u8: + return DW_ATE_unsigned_char; + case PTY_u16: + case PTY_u32: + case PTY_u64: + return DW_ATE_unsigned; + case PTY_i8: + return DW_ATE_signed_char; + case PTY_i16: + case PTY_i32: + case PTY_i64: + return DW_ATE_signed; + case PTY_f32: + case PTY_f64: + case PTY_f128: + return DW_ATE_float; + case PTY_agg: + case PTY_ref: + case PTY_ptr: + case PTY_a32: + case PTY_a64: + return DW_ATE_address; + case PTY_c64: + case PTY_c128: + return DW_ATE_complex_float; + case PTY_void: + return DW_ATE_void; + default: + return DW_ATE_void; + } +} +} // namespace maple diff --git a/src/mapleall/maple_ir/src/driver.cpp b/src/mapleall/maple_ir/src/driver.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6f0c33f4b3a7e2ae4c8e1e1b2fb18ad186c01331 --- /dev/null +++ b/src/mapleall/maple_ir/src/driver.cpp @@ -0,0 +1,125 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include +#include +#include + +#include "bin_mplt.h" +#include "constantfold.h" +#include "mir_function.h" +#include "mir_parser.h" +#include "mir_type.h" +#include "mpl_sighandler.h" +#include "opcode_info.h" + +using namespace maple; + +std::unordered_set dumpFuncSet = {}; + +#if MIR_FEATURE_FULL + +int main(int argc, char **argv) { + SigHandler::EnableAll(); + + constexpr int judgeNumber = 2; + constexpr uint32 k2Argv = 2; + constexpr uint32 k10Argv = 10; + constexpr uint32 kNlSize = 5; + if (argc < judgeNumber) { + (void)MIR_PRINTF( + "usage: ./irbuild [-b] [-dumpfunc=] [-srclang=] \n" + " By default, the files are converted to corresponding ascii format.\n" + " If -b is specified, output is binary format instead.\n" + " If -dumpfunc= is specified, only functions with name containing the string is output.\n" + " -dumpfunc= can be specified multiple times to give multiple strings.\n" + " -srclang specifies the source language that produces the mpl file. \n" + " Each output file has .irb added after its file stem.\n"); + exit(1); + } + + std::vector themodule(argc, nullptr); + bool useBinary = false; + MIRSrcLang srcLang = kSrcLangUnknown; + // process the options which must come first + maple::uint32 i = 1; + while (argv[i][0] == '-') { + if (argv[i][1] == 'b' && argv[i][k2Argv] == '\0') { + useBinary = true; + } else if (strncmp(argv[i], "-dumpfunc=", k10Argv) == 0 && strlen(argv[i]) > k10Argv) { + std::string funcName(&argv[i][k10Argv]); + dumpFuncSet.insert(funcName); + } else if (strcmp(argv[i], "-srclang=java") == 0) { + srcLang = kSrcLangJava; + } else if (strcmp(argv[i], "-srclang=c") == 0) { + srcLang = kSrcLangC; + } else if (strcmp(argv[i], "-srclang=c++") == 0) { + srcLang = kSrcLangCPlusPlus; + } else { + ERR(kLncErr, "irbuild: unrecognized command line option"); + return 1; + } + ++i; + } + // process the input files + while (i < static_cast(argc)) { + themodule[i] = new maple::MIRModule(argv[i]); + themodule[i]->SetSrcLang(srcLang); + std::string::size_type lastdot = themodule[i]->GetFileName().find_last_of("."); + bool ismplt = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".mplt") == 0; + bool istmpl = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".tmpl") == 0; + bool ismpl = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".mpl") == 0; + bool isbpl = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".bpl") == 0; + bool ismbc = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".mbc") == 0; + bool islmbc = themodule[i]->GetFileName().compare(lastdot, kNlSize, ".lmbc") == 0; + if (!ismplt && !istmpl && !ismpl && !isbpl && !ismbc && !islmbc) { + ERR(kLncErr, "irbuild: input must be .mplt or .mpl or .bpl or .mbc or .lmbc or .tmpl file"); + return 1; + } + // input the file + if (ismpl || istmpl) { + maple::MIRParser theparser(*themodule[i]); + if (!theparser.ParseMIR()) { + theparser.EmitError(themodule[i]->GetFileName().c_str()); + return 1; + } + } else { + BinaryMplImport binMplt(*themodule[i]); + binMplt.SetImported(false); + std::string modid = themodule[i]->GetFileName(); + if (!binMplt.Import(modid, true)) { + ERR(kLncErr, "irbuild: cannot open .mplt or .bpl or .mbc or .lmbc file: %s", modid.c_str()); + return 1; + } + } + + // output the file + if (!useBinary) { + themodule[i]->OutputAsciiMpl( + ".irb", (ismpl || isbpl || ismbc || islmbc) ? ".mpl" : ".tmpl", &dumpFuncSet, true, false); + } else { + BinaryMplt binMplt(*themodule[i]); + std::string modid = themodule[i]->GetFileName(); + binMplt.GetBinExport().not2mplt = ismpl || isbpl || ismbc || islmbc; + std::string filestem = modid.substr(0, lastdot); + binMplt.Export(filestem + ((ismpl || isbpl || ismbc || islmbc) ? ".irb.bpl" : ".irb.mplt"), &dumpFuncSet); + } + ++i; + } + return 0; +} +#else +#warning "this module is compiled without MIR_FEATURE_FULL=1 defined" +#endif // MIR_FEATURE_FULL diff --git a/src/mapleall/maple_ir/src/global_tables.cpp b/src/mapleall/maple_ir/src/global_tables.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a519918ce4db559f45493c7d5a5566a18dc1142b --- /dev/null +++ b/src/mapleall/maple_ir/src/global_tables.cpp @@ -0,0 +1,496 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "global_tables.h" +#include "mir_type.h" +#include "mir_symbol.h" + +#if MIR_FEATURE_FULL +namespace maple { +MIRType *TypeTable::CreateMirType(uint32 primTypeIdx) const { + MIRTypeKind defaultKind = (primTypeIdx == PTY_constStr ? kTypeConstString : kTypeScalar); + auto primType = static_cast(primTypeIdx); + auto *mirType = new MIRType(defaultKind, primType); + return mirType; +} + +TypeTable::TypeTable() { + // enter the primitve types in type_table_ + typeTable.push_back(static_cast(nullptr)); + ASSERT(typeTable.size() == static_cast(PTY_void), "use PTY_void as the first index to type table"); + uint32 primTypeIdx; + for (primTypeIdx = static_cast(PTY_begin) + 1; primTypeIdx <= static_cast(PTY_end); ++primTypeIdx) { + MIRType *type = CreateMirType(primTypeIdx); + type->SetTypeIndex(TyIdx{ primTypeIdx }); + typeTable.push_back(type); + PutToHashTable(type); + } + if (voidPtrType == nullptr) { + voidPtrType = GetOrCreatePointerType(*GetVoid(), PTY_ptr); + } + lastDefaultTyIdx.SetIdx(primTypeIdx); +} + +void TypeTable::SetTypeWithTyIdx(const TyIdx &tyIdx, MIRType &type) { + CHECK_FATAL(tyIdx < typeTable.size(), "array index out of range"); + MIRType *oldType = typeTable.at(tyIdx); + typeTable.at(tyIdx) = &type; + if (oldType != nullptr && oldType != &type) { + (void)typeHashTable.erase(oldType); + (void)typeHashTable.insert(&type); + delete oldType; + } +} + +TypeTable::~TypeTable() { + for (auto index = static_cast(PTY_void); index < typeTable.size(); ++index) { + delete typeTable[index]; + typeTable[index] = nullptr; + } +} + +void TypeTable::PutToHashTable(MIRType *mirType) { + (void)typeHashTable.insert(mirType); +} + +void TypeTable::UpdateMIRType(const MIRType &pType, const TyIdx tyIdx) { + MIRType *nType = pType.CopyMIRTypeNode(); + nType->SetTypeIndex(tyIdx); + SetTypeWithTyIdx(tyIdx, *nType); +} + +// used only by bin_mpl_import +void TypeTable::CreateMirTypeNodeAt(MIRType &pType, TyIdx tyIdxUsed, MIRModule *module, + bool isObject, bool isIncomplete) { + MIRType *nType = pType.CopyMIRTypeNode(); + nType->SetTypeIndex(tyIdxUsed); + typeTable[tyIdxUsed] = nType; + + if (pType.IsMIRPtrType()) { + auto &pty = static_cast(pType); + if (pty.GetTypeAttrs() == TypeAttrs()) { + if (pty.GetPrimType() != PTY_ref) { + ptrTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } else { + refTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } + } else { + (void)typeHashTable.insert(nType); + } + } else { + (void)typeHashTable.insert(nType); + } + + GStrIdx stridx = pType.GetNameStrIdx(); + if (stridx != 0) { + module->GetTypeNameTab()->SetGStrIdxToTyIdx(stridx, tyIdxUsed); + module->PushbackTypeDefOrder(stridx); + if (isObject) { + module->AddClass(tyIdxUsed); + if (!isIncomplete) { + GlobalTables::GetTypeNameTable().SetGStrIdxToTyIdx(stridx, tyIdxUsed); + } + } + } +} + +MIRType *TypeTable::CreateAndUpdateMirTypeNode(MIRType &pType) { + MIRType *nType = pType.CopyMIRTypeNode(); + nType->SetTypeIndex(TyIdx(typeTable.size())); + typeTable.push_back(nType); + + if (pType.IsMIRPtrType()) { + auto &pty = static_cast(pType); + if (pty.GetTypeAttrs() == TypeAttrs()) { + if (pty.GetPrimType() != PTY_ref) { + ptrTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } else { + refTypeMap[pty.GetPointedTyIdx()] = nType->GetTypeIndex(); + } + } else { + (void)typeHashTable.insert(nType); + } + } else { + (void)typeHashTable.insert(nType); + } + return nType; +} + +MIRType* TypeTable::GetOrCreateMIRTypeNode(MIRType &pType) { + if (pType.IsMIRPtrType()) { + auto &type = static_cast(pType); + if (type.GetTypeAttrs() == TypeAttrs()) { + auto *pMap = (type.GetPrimType() != PTY_ref ? &ptrTypeMap : &refTypeMap); + auto *otherPMap = (type.GetPrimType() == PTY_ref ? &ptrTypeMap : &refTypeMap); + { + std::shared_lock lock(mtx); + const auto it = pMap->find(type.GetPointedTyIdx()); + if (it != pMap->end()) { + return GetTypeFromTyIdx(it->second); + } + } + std::unique_lock lock(mtx); + CHECK_FATAL(!(type.GetPointedTyIdx().GetIdx() >= kPtyDerived && type.GetPrimType() == PTY_ref && + otherPMap->find(type.GetPointedTyIdx()) != otherPMap->end()), + "GetOrCreateMIRType: ref pointed-to type %d has previous ptr occurrence", + type.GetPointedTyIdx().GetIdx()); + return CreateAndUpdateMirTypeNode(pType); + } + } + { + std::shared_lock lock(mtx); + const auto it = typeHashTable.find(&pType); + if (it != typeHashTable.end()) { + return *it; + } + } + std::unique_lock lock(mtx); + return CreateAndUpdateMirTypeNode(pType); +} + +MIRType *TypeTable::voidPtrType = nullptr; +// get or create a type that pointing to pointedTyIdx +MIRType *TypeTable::GetOrCreatePointerType(const TyIdx &pointedTyIdx, PrimType primType, const TypeAttrs &attrs) { + MIRPtrType type(pointedTyIdx, primType); + type.SetTypeAttrs(attrs); + TyIdx tyIdx = GetOrCreateMIRType(&type); + ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreatePointerType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreatePointerType(const MIRType &pointTo, PrimType primType, const TypeAttrs &attrs) { + if (pointTo.GetPrimType() == PTY_constStr) { + primType = PTY_ptr; + } + return GetOrCreatePointerType(pointTo.GetTypeIndex(), primType, attrs); +} + +const MIRType *TypeTable::GetPointedTypeIfApplicable(MIRType &type) const { + if (type.GetKind() != kTypePointer) { + return &type; + } + auto &ptrType = static_cast(type); + return GetTypeFromTyIdx(ptrType.GetPointedTyIdx()); +} +MIRType *TypeTable::GetPointedTypeIfApplicable(MIRType &type) { + return const_cast(const_cast(this)->GetPointedTypeIfApplicable(type)); +} + +MIRArrayType *TypeTable::GetOrCreateArrayType(const MIRType &elem, uint8 dim, const uint32 *sizeArray, + const TypeAttrs &attrs) { + std::vector sizeVector; + for (size_t i = 0; i < dim; ++i) { + sizeVector.push_back(sizeArray != nullptr ? sizeArray[i] : 0); + } + MIRArrayType arrayType(elem.GetTypeIndex(), sizeVector); + arrayType.SetTypeAttrs(attrs); + TyIdx tyIdx = GetOrCreateMIRType(&arrayType); + return static_cast(typeTable[tyIdx]); +} + +// For one dimension array +MIRArrayType *TypeTable::GetOrCreateArrayType(const MIRType &elem, uint32 size, const TypeAttrs &attrs) { + return GetOrCreateArrayType(elem, 1, &size, attrs); +} + +MIRType *TypeTable::GetOrCreateFarrayType(const MIRType &elem) { + MIRFarrayType type; + type.SetElemtTyIdx(elem.GetTypeIndex()); + TyIdx tyIdx = GetOrCreateMIRType(&type); + ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateFarrayType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreateJarrayType(const MIRType &elem) { + MIRJarrayType type; + type.SetElemtTyIdx(elem.GetTypeIndex()); + TyIdx tyIdx = GetOrCreateMIRType(&type); + ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateJarrayType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreateFunctionType(const TyIdx &retTyIdx, const std::vector &vecType, + const std::vector &vecAttrs, bool isVarg, + const TypeAttrs &retAttrs) { + MIRFuncType funcType(retTyIdx, vecType, vecAttrs, retAttrs); + if (isVarg) { + funcType.SetVarArgs(); + } + TyIdx tyIdx = GetOrCreateMIRType(&funcType); + ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateFunctionType"); + return typeTable.at(tyIdx); +} + +MIRType *TypeTable::GetOrCreateStructOrUnion(const std::string &name, const FieldVector &fields, + const FieldVector &parentFields, MIRModule &module, bool forStruct, + const TypeAttrs &attrs) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + MIRStructType type(forStruct ? kTypeStruct : kTypeUnion, strIdx); + type.SetFields(fields); + type.SetParentFields(parentFields); + type.SetTypeAttrs(attrs); + + TyIdx tyIdx = GetOrCreateMIRType(&type); + // Global? + module.GetTypeNameTab()->SetGStrIdxToTyIdx(strIdx, tyIdx); + module.PushbackTypeDefOrder(strIdx); + ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateStructOrUnion"); + return typeTable.at(tyIdx); +} + +void TypeTable::PushIntoFieldVector(FieldVector &fields, const std::string &name, const MIRType &type) const { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + fields.push_back(FieldPair(strIdx, TyIdxFieldAttrPair(type.GetTypeIndex(), FieldAttrs()))); +} + +MIRType *TypeTable::GetOrCreateClassOrInterface(const std::string &name, MIRModule &module, bool forClass) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + TyIdx tyIdx = module.GetTypeNameTab()->GetTyIdxFromGStrIdx(strIdx); + if (!tyIdx) { + if (forClass) { + MIRClassType type(kTypeClassIncomplete, strIdx); // for class type + tyIdx = GetOrCreateMIRType(&type); + } else { + MIRInterfaceType type(kTypeInterfaceIncomplete, strIdx); // for interface type + tyIdx = GetOrCreateMIRType(&type); + } + module.PushbackTypeDefOrder(strIdx); + module.GetTypeNameTab()->SetGStrIdxToTyIdx(strIdx, tyIdx); + if (typeTable[tyIdx]->GetNameStrIdx() == 0u) { + typeTable[tyIdx]->SetNameStrIdx(strIdx); + } + } + ASSERT(tyIdx < typeTable.size(), "index out of range in TypeTable::GetOrCreateClassOrInterface"); + return typeTable.at(tyIdx); +} + +void TypeTable::AddFieldToStructType(MIRStructType &structType, const std::string &fieldName, + const MIRType &fieldType) const { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(fieldName); + FieldAttrs fieldAttrs; + fieldAttrs.SetAttr(FLDATTR_final); // Mark compiler-generated struct fields as final to improve AliasAnalysis + structType.GetFields().push_back(FieldPair(strIdx, TyIdxFieldAttrPair(fieldType.GetTypeIndex(), fieldAttrs))); +} + +void FPConstTable::PostInit() { + MIRType &typeFloat = *GlobalTables::GetTypeTable().GetPrimType(PTY_f32); + nanFloatConst = new MIRFloatConst(NAN, typeFloat); + infFloatConst = new MIRFloatConst(INFINITY, typeFloat); + minusInfFloatConst = new MIRFloatConst(-INFINITY, typeFloat); + minusZeroFloatConst = new MIRFloatConst(-0.0, typeFloat); + MIRType &typeDouble = *GlobalTables::GetTypeTable().GetPrimType(PTY_f64); + nanDoubleConst = new MIRDoubleConst(NAN, typeDouble); + infDoubleConst = new MIRDoubleConst(INFINITY, typeDouble); + minusInfDoubleConst = new MIRDoubleConst(-INFINITY, typeDouble); + minusZeroDoubleConst = new MIRDoubleConst(-0.0, typeDouble); +} + +MIRIntConst *IntConstTable::GetOrCreateIntConst(const IntVal &val, MIRType &type) { + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateIntConstTreadSafe(static_cast(val.GetExtValue()), type); + } + return DoGetOrCreateIntConst(static_cast(val.GetExtValue()), type); +} + +MIRIntConst *IntConstTable::GetOrCreateIntConst(uint64 val, MIRType &type) { + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateIntConstTreadSafe(val, type); + } + return DoGetOrCreateIntConst(val, type); +} + +MIRIntConst *IntConstTable::DoGetOrCreateIntConst(uint64 val, MIRType &type) { + IntConstKey key(val, type.GetTypeIndex()); + if (intConstTable.find(key) != intConstTable.end()) { + return intConstTable[key]; + } + intConstTable[key] = new MIRIntConst(val, type); + return intConstTable[key]; +} + +MIRIntConst *IntConstTable::DoGetOrCreateIntConstTreadSafe(uint64 val, MIRType &type) { + IntConstKey key(val, type.GetTypeIndex()); + { + std::shared_lock lock(mtx); + if (intConstTable.find(key) != intConstTable.end()) { + return intConstTable[key]; + } + } + std::unique_lock lock(mtx); + intConstTable[key] = new MIRIntConst(val, type); + return intConstTable[key]; +} + +IntConstTable::~IntConstTable() { + for (auto pair : intConstTable) { + delete pair.second; + } +} + +MIRFloatConst *FPConstTable::GetOrCreateFloatConst(float floatVal) { + if (std::isnan(floatVal)) { + return nanFloatConst; + } + if (std::isinf(floatVal)) { + return (floatVal < 0) ? minusInfFloatConst : infFloatConst; + } + if (floatVal == 0.0 && std::signbit(floatVal)) { + return minusZeroFloatConst; + } + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateFloatConstThreadSafe(floatVal); + } + return DoGetOrCreateFloatConst(floatVal); +} + +MIRFloatConst *FPConstTable::DoGetOrCreateFloatConst(float floatVal) { + const auto it = floatConstTable.find(floatVal); + if (it != floatConstTable.cend()) { + return it->second; + } + // create a new one + auto *floatConst = + new MIRFloatConst(floatVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx{ PTY_f32 })); + floatConstTable[floatVal] = floatConst; + return floatConst; +} + +MIRFloatConst *FPConstTable::DoGetOrCreateFloatConstThreadSafe(float floatVal) { + { + std::shared_lock lock(floatMtx); + const auto it = floatConstTable.find(floatVal); + if (it != floatConstTable.cend()) { + return it->second; + } + } + // create a new one + std::unique_lock lock(floatMtx); + auto *floatConst = + new MIRFloatConst(floatVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx{ PTY_f32 })); + floatConstTable[floatVal] = floatConst; + return floatConst; +} + +MIRDoubleConst *FPConstTable::GetOrCreateDoubleConst(double doubleVal) { + if (std::isnan(doubleVal)) { + return nanDoubleConst; + } + if (std::isinf(doubleVal)) { + return (doubleVal < 0) ? minusInfDoubleConst : infDoubleConst; + } + if (doubleVal == 0.0 && std::signbit(doubleVal)) { + return minusZeroDoubleConst; + } + if (ThreadEnv::IsMeParallel()) { + return DoGetOrCreateDoubleConstThreadSafe(doubleVal); + } + return DoGetOrCreateDoubleConst(doubleVal); +} + +MIRDoubleConst *FPConstTable::DoGetOrCreateDoubleConst(double doubleVal) { + const auto it = doubleConstTable.find(doubleVal); + if (it != doubleConstTable.cend()) { + return it->second; + } + // create a new one + auto *doubleConst = new MIRDoubleConst( + doubleVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64))); + doubleConstTable[doubleVal] = doubleConst; + return doubleConst; +} + +MIRDoubleConst *FPConstTable::DoGetOrCreateDoubleConstThreadSafe(double doubleVal) { + { + std::shared_lock lock(doubleMtx); + const auto it = doubleConstTable.find(doubleVal); + if (it != doubleConstTable.cend()) { + return it->second; + } + } + // create a new one + std::unique_lock lock(doubleMtx); + auto *doubleConst = new MIRDoubleConst( + doubleVal, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64))); + doubleConstTable[doubleVal] = doubleConst; + return doubleConst; +} + +FPConstTable::~FPConstTable() { + delete nanFloatConst; + delete infFloatConst; + delete minusInfFloatConst; + delete minusZeroFloatConst; + delete nanDoubleConst; + delete infDoubleConst; + delete minusInfDoubleConst; + delete minusZeroDoubleConst; + for (const auto &floatConst : floatConstTable) { + delete floatConst.second; + } + for (const auto &doubleConst : doubleConstTable) { + delete doubleConst.second; + } +} + +GSymbolTable::GSymbolTable() { + symbolTable.push_back(static_cast(nullptr)); +} + +GSymbolTable::~GSymbolTable() { + for (MIRSymbol *symbol : symbolTable) { + delete symbol; + } +} + +MIRSymbol *GSymbolTable::CreateSymbol(uint8 scopeID) { + auto *st = new MIRSymbol(symbolTable.size(), scopeID); + CHECK_FATAL(st != nullptr, "CreateSymbol failure"); + symbolTable.push_back(st); + module->AddSymbol(st); + return st; +} + +bool GSymbolTable::AddToStringSymbolMap(const MIRSymbol &st) { + GStrIdx strIdx = st.GetNameStrIdx(); + if (strIdxToStIdxMap[strIdx].FullIdx() != 0) { + return false; + } + strIdxToStIdxMap[strIdx] = st.GetStIdx(); + return true; +} + +bool GSymbolTable::RemoveFromStringSymbolMap(const MIRSymbol &st) { + const auto it = strIdxToStIdxMap.find(st.GetNameStrIdx()); + if (it != strIdxToStIdxMap.cend()) { + strIdxToStIdxMap.erase(it); + return true; + } + return false; +} + +void GSymbolTable::Dump(bool isLocal, int32 indent) const { + for (size_t i = 1; i < symbolTable.size(); ++i) { + const MIRSymbol *symbol = symbolTable[i]; + if (symbol != nullptr) { + symbol->Dump(isLocal, indent); + } + } +} + +GlobalTables GlobalTables::globalTables; +GlobalTables &GlobalTables::GetGlobalTables() { + return globalTables; +} +} // namespace maple +#endif // MIR_FEATURE_FULL diff --git a/src/mapleall/maple_ir/src/intrinsics.cpp b/src/mapleall/maple_ir/src/intrinsics.cpp new file mode 100644 index 0000000000000000000000000000000000000000..65d95165957ab1b7cca77c0ccee8c354f86b3724 --- /dev/null +++ b/src/mapleall/maple_ir/src/intrinsics.cpp @@ -0,0 +1,234 @@ +/* + * Copyright (c) [2019] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "intrinsics.h" +#include "mir_module.h" +#include "mir_type.h" +#include "mir_builder.h" + +namespace maple { +MIRType *IntrinDesc::jsValueType = nullptr; +MIRModule *IntrinDesc::mirModule = nullptr; +IntrinDesc IntrinDesc::intrinTable[INTRN_LAST + 1] = { +#define DEF_MIR_INTRINSIC(X, NAME, NUM_INSN, INTRN_CLASS, RETURN_TYPE, ...) \ + { (NAME), (NUM_INSN), (INTRN_CLASS), { (RETURN_TYPE), ##__VA_ARGS__ } }, +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC +}; +MIRType *IntrinDesc::GetOrCreateJSValueType() { + if (jsValueType != nullptr) { + return jsValueType; + } + MIRBuilder *jsBuilder = mirModule->GetMIRBuilder(); + FieldVector payloadFields; + GStrIdx i32 = jsBuilder->GetOrCreateStringIndex("i32"); + GStrIdx u32 = jsBuilder->GetOrCreateStringIndex("u32"); + GStrIdx boo = jsBuilder->GetOrCreateStringIndex("boo"); + GStrIdx ptr = jsBuilder->GetOrCreateStringIndex("ptr"); + payloadFields.push_back( + FieldPair(i32, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetInt32()->GetTypeIndex(), FieldAttrs()))); + payloadFields.push_back( + FieldPair(u32, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt32()->GetTypeIndex(), FieldAttrs()))); + payloadFields.push_back( + FieldPair(boo, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt32()->GetTypeIndex(), FieldAttrs()))); + payloadFields.push_back( + FieldPair(ptr, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(), FieldAttrs()))); + FieldVector parentFields; + MIRType *payloadType = GlobalTables::GetTypeTable().GetOrCreateUnionType("payload_type", payloadFields, + parentFields, *mirModule); + FieldVector sFields; + GStrIdx payload = jsBuilder->GetOrCreateStringIndex("payload"); + GStrIdx tag = jsBuilder->GetOrCreateStringIndex("tag"); + sFields.push_back(FieldPair(payload, TyIdxFieldAttrPair(payloadType->GetTypeIndex(), FieldAttrs()))); + sFields.push_back( + FieldPair(tag, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt32()->GetTypeIndex(), FieldAttrs()))); + MIRType *sType = GlobalTables::GetTypeTable().GetOrCreateStructType("s_type", sFields, parentFields, *mirModule); + CHECK_FATAL(sType != nullptr, "can't get struct type, check it!"); + FieldVector jsValLayoutFields; + GStrIdx asBits = jsBuilder->GetOrCreateStringIndex("asBits"); + GStrIdx s = jsBuilder->GetOrCreateStringIndex("s"); + GStrIdx asDouble = jsBuilder->GetOrCreateStringIndex("asDouble"); + GStrIdx asPtr = jsBuilder->GetOrCreateStringIndex("asPtr"); + jsValLayoutFields.push_back( + FieldPair(asBits, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetUInt64()->GetTypeIndex(), FieldAttrs()))); + jsValLayoutFields.push_back(FieldPair(s, TyIdxFieldAttrPair(sType->GetTypeIndex(), FieldAttrs()))); + jsValLayoutFields.push_back( + FieldPair(asDouble, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetDouble()->GetTypeIndex(), FieldAttrs()))); + jsValLayoutFields.push_back( + FieldPair(asPtr, TyIdxFieldAttrPair(GlobalTables::GetTypeTable().GetVoidPtr()->GetTypeIndex(), FieldAttrs()))); + MIRType *jsValLayoutType = GlobalTables::GetTypeTable().GetOrCreateUnionType("jsval_layout_type", + jsValLayoutFields, + parentFields, *mirModule); + return jsValLayoutType; +} + +void IntrinDesc::InitMIRModule(MIRModule *mod) { + mirModule = mod; +} + +MIRType *IntrinDesc::GetTypeFromArgTy(IntrinArgType argType) const { + switch (argType) { + case kArgTyVoid: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_void)); + case kArgTyI8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i8)); + case kArgTyI16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i16)); + case kArgTyI32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + case kArgTyI64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i64)); + case kArgTyU8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u8)); + case kArgTyU16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u16)); + case kArgTyU32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u32)); + case kArgTyU64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u64)); + case kArgTyU1: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u1)); + case kArgTyPtr: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_ptr)); + case kArgTyRef: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_ref)); + case kArgTyA32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a32)); + case kArgTyA64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_a64)); + case kArgTyF32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f32)); + case kArgTyF64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64)); + case kArgTyF128: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f128)); + case kArgTyC64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_c64)); + case kArgTyC128: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_c128)); + case kArgTyAgg: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_agg)); + case kArgTyV2I64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2i64)); + case kArgTyV4I32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4i32)); + case kArgTyV8I16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8i16)); + case kArgTyV16I8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v16i8)); + case kArgTyV2U64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2u64)); + case kArgTyV4U32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4u32)); + case kArgTyV8U16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8u16)); + case kArgTyV16U8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v16u8)); + case kArgTyV2F64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2f64)); + case kArgTyV4F32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4f32)); + case kArgTyV1I64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i64)); + case kArgTyV2I32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2i32)); + case kArgTyV4I16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4i16)); + case kArgTyV8I8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8i8)); + case kArgTyV1U64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_u64)); + case kArgTyV2U32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2u32)); + case kArgTyV4U16: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v4u16)); + case kArgTyV8U8: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v8u8)); + case kArgTyV1F64: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_f64)); + case kArgTyV2F32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_v2f32)); +#ifdef DYNAMICLANG + case kArgTySimplestr: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_simplestr)); + case kArgTySimpleobj: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_simpleobj)); + case kArgTyDynany: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_dynany)); + case kArgTyDyni32: + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_dyni32)); +#endif + default: + return nullptr; + } +} + +MIRType *IntrinDesc::GetArgType(uint32 index) const { + // 0 is the arg of return type + CHECK_FATAL(index < kMaxArgsNum, "index out of range"); + return GetTypeFromArgTy(argTypes[index + 1]); +} + +MIRType *IntrinDesc::GetReturnType() const { + return GetTypeFromArgTy(argTypes[0]); +} + +bool IntrinDesc::IsNthOpndMarkedToWrite(uint32 opndIdx) const { + CHECK_FATAL(opndIdx < 6, "intrinsic has <= 6 arguments."); + constexpr uint32 kIntrnWriteFlags[] = { + INTRNWRITEFIRSTOPND, INTRNWRITESECONDOPND, INTRNWRITETHIRDOPND, + INTRNWRITEFOURTHOPND, INTRNWRITEFIFTHOPND, INTRNWRITESIXTHOPND + }; + return (properties & kIntrnWriteFlags[opndIdx]) != 0; +} + +bool IntrinDesc::IsNthOpndMarkedToRead(uint32 opndIdx) const { + CHECK_FATAL(opndIdx < 6, "intrinsic has <= 6 arguments."); + constexpr uint32 kIntrnReadFlags[] = { + INTRNREADFIRSTOPND, INTRNREADSECONDOPND, INTRNREADTHIRDOPND, + INTRNREADFOURTHOPND, INTRNREADFIFTHOPND, INTRNREADSIXTHOPND + }; + return (properties & kIntrnReadFlags[opndIdx]) != 0; +} + +bool IntrinDesc::WriteNthOpnd(uint32 opndIdx) const { + // when to write the argument: + // 1. has side effect, and + // 2. marked with WRITE or marked with nothing + if (HasNoSideEffect()) { + return false; // read only + } + if (IsNthOpndMarkedToWrite(opndIdx)) { + return true; + } + // marked with nothing --> by default: write the argument + return !IsNthOpndMarkedToRead(opndIdx); +} + +bool IntrinDesc::ReadNthOpnd(uint32 opndIdx) const { + // when to read the argument: + // 1. has no side effect, or + // 2. has side effect but marked with READ, or + // 3. has side effect but marked with nothing + if (HasNoSideEffect()) { + return true; + } + if (IsNthOpndMarkedToRead(opndIdx)) { + return true; + } + // marked with nothing --> by default: read the argument + return !IsNthOpndMarkedToWrite(opndIdx); +} + +} // namespace maple diff --git a/src/mapleall/maple_ir/src/lexer.cpp b/src/mapleall/maple_ir/src/lexer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..719bb349c604afad93f8e5309ee1dbe3923e0c17 --- /dev/null +++ b/src/mapleall/maple_ir/src/lexer.cpp @@ -0,0 +1,736 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "lexer.h" +#include +#include +#include +#include "mpl_logging.h" +#include "debug_info.h" +#include "mir_module.h" +#include "securec.h" +#include "utils.h" + +namespace maple { +int32 HexCharToDigit(char c) { + int32 ret = utils::ToDigit<16, int32>(c); + return (ret != INT32_MAX ? ret : 0); +} + +static uint8 Char2num(char c) { + uint8 ret = utils::ToDigit<16>(c); + ASSERT(ret != UINT8_MAX, "not a hex value"); + return ret; +} + +// Read (next) line from the MIR (text) file, and return the read +// number of chars. +// if the line is empty (nothing but a newline), returns 0. +// if EOF, return -1. +// The trailing new-line character has been removed. +int MIRLexer::ReadALine() { + if (airFile == nullptr) { + line = ""; + return -1; + } + + curIdx = 0; + if (!std::getline(*airFile, line)) { // EOF + line = ""; + airFile = nullptr; + currentLineSize = 0; + return -1; + } + + RemoveReturnInline(line); + currentLineSize = line.length(); + return currentLineSize; +} + +MIRLexer::MIRLexer(MIRModule &mod) + : module(mod), + seenComments(mod.GetMPAllocator().Adapter()), + keywordMap(mod.GetMPAllocator().Adapter()) { + // initialize keywordMap + keywordMap.clear(); +#define KEYWORD(STR) \ + { \ + std::string str; \ + str = #STR; \ + keywordMap[str] = TK_##STR; \ + } +#include "keywords.def" +#undef KEYWORD +} + +void MIRLexer::PrepareForFile(const std::string &filename) { + // open MIR file + airFileInternal.open(filename); + CHECK_FATAL(airFileInternal.is_open(), "cannot open MIR file %s\n", &filename); + + airFile = &airFileInternal; + // try to read the first line + if (ReadALine() < 0) { + lineNum = 0; + } else { + lineNum = 1; + } + module.GetDbgInfo()->UpdateMsg(lineNum, line.c_str()); + kind = TK_invalid; +} + +void MIRLexer::PrepareForString(const std::string &src) { + line = src; + RemoveReturnInline(line); + currentLineSize = line.size(); + curIdx = 0; + NextToken(); +} + +void MIRLexer::GenName() { + uint32 startIdx = curIdx; + char c = GetNextCurrentCharWithUpperCheck(); + char cp = GetCharAt(curIdx - 1); + if (c == '@' && (cp == 'h' || cp == 'f')) { + // special pattern for exception handling labels: catch or finally + c = GetNextCurrentCharWithUpperCheck(); + } + while (utils::IsAlnum(c) || c < 0 || c == '_' || c == '$' || c == ';' || + c == '/' || c == '|' || c == '.' || c == '?' || + c == '@') { + c = GetNextCurrentCharWithUpperCheck(); + } + name = line.substr(startIdx, curIdx - startIdx); +} + +// get the constant value +TokenKind MIRLexer::GetConstVal() { + bool negative = false; + int valStart = curIdx; + char c = GetCharAtWithUpperCheck(curIdx); + if (c == '-') { + c = GetNextCurrentCharWithUpperCheck(); + TokenKind tk = GetSpecialFloatConst(); + if (tk != TK_invalid) { + return tk; + } + negative = true; + } + const uint32 lenHexPrefix = 2; + if (line.compare(curIdx, lenHexPrefix, "0x") == 0) { + curIdx += lenHexPrefix; + return GetHexConst(valStart, negative); + } + uint32 startIdx = curIdx; + while (isdigit(c)) { + c = GetNextCurrentCharWithUpperCheck(); + } + char cs = GetCharAtWithUpperCheck(startIdx); + if (!isdigit(cs) && c != '.') { + return TK_invalid; + } + if (c != '.' && c != 'f' && c != 'F' && c != 'e' && c != 'E') { + curIdx = startIdx; + return GetIntConst(valStart, negative); + } + return GetFloatConst(valStart, startIdx, negative); +} + +TokenKind MIRLexer::GetSpecialFloatConst() { + constexpr uint32 lenSpecFloat = 4; + constexpr uint32 lenSpecDouble = 3; + if (line.compare(curIdx, lenSpecFloat, "inff") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecFloat))) { + curIdx += lenSpecFloat; + theFloatVal = -INFINITY; + return TK_floatconst; + } + if (line.compare(curIdx, lenSpecDouble, "inf") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecDouble))) { + curIdx += lenSpecDouble; + theDoubleVal = -INFINITY; + return TK_doubleconst; + } + if (line.compare(curIdx, lenSpecFloat, "nanf") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecFloat))) { + curIdx += lenSpecFloat; + theFloatVal = -NAN; + return TK_floatconst; + } + if (line.compare(curIdx, lenSpecDouble, "nan") == 0 && + !utils::IsAlnum(GetCharAtWithUpperCheck(curIdx + lenSpecDouble))) { + curIdx += lenSpecDouble; + theDoubleVal = -NAN; + return TK_doubleconst; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetHexConst(uint32 valStart, bool negative) { + char c = GetCharAtWithUpperCheck(curIdx); + if (!isxdigit(c)) { + name = line.substr(valStart, curIdx - valStart); + return TK_invalid; + } + uint64 tmp = static_cast(HexCharToDigit(c)); + c = GetNextCurrentCharWithUpperCheck(); + while (isxdigit(c)) { + tmp = (tmp << 4) + static_cast(HexCharToDigit(c)); + c = GetNextCurrentCharWithUpperCheck(); + } + theIntVal = tmp; + if (negative) { + theIntVal = -theIntVal; + } + theFloatVal = static_cast(theIntVal); + theDoubleVal = static_cast(theIntVal); + if (negative && theIntVal == 0) { + theFloatVal = -theFloatVal; + theDoubleVal = -theDoubleVal; + } + name = line.substr(valStart, curIdx - valStart); + return TK_intconst; +} + +TokenKind MIRLexer::GetIntConst(uint32 valStart, bool negative) { + auto negOrSelf = [negative](uint64 val) { return negative ? ~val + 1 : val; }; + + theIntVal = static_cast(HexCharToDigit(GetCharAtWithUpperCheck(curIdx))); + + uint64 radix = theIntVal == 0 ? 8 : 10; + + char c = GetNextCurrentCharWithUpperCheck(); + + for (theIntVal = negOrSelf(theIntVal); isdigit(c); c = GetNextCurrentCharWithUpperCheck()) { + theIntVal = (theIntVal * radix) + negOrSelf(static_cast(HexCharToDigit(c))); + } + + if (c == 'u' || c == 'U') { // skip 'u' or 'U' + c = GetNextCurrentCharWithUpperCheck(); + if (c == 'l' || c == 'L') { + c = GetNextCurrentCharWithUpperCheck(); + } + } + + if (c == 'l' || c == 'L') { + c = GetNextCurrentCharWithUpperCheck(); + if (c == 'l' || c == 'L' || c == 'u' || c == 'U') { + ++curIdx; + } + } + + name = line.substr(valStart, curIdx - valStart); + + if (negative) { + theFloatVal = static_cast(static_cast(theIntVal)); + theDoubleVal = static_cast(static_cast(theIntVal)); + + if (theIntVal == 0) { + theFloatVal = -theFloatVal; + theDoubleVal = -theDoubleVal; + } + } else { + theFloatVal = static_cast(theIntVal); + theDoubleVal = static_cast(theIntVal); + } + + return TK_intconst; +} + +TokenKind MIRLexer::GetFloatConst(uint32 valStart, uint32 startIdx, bool negative) { + char c = GetCharAtWithUpperCheck(curIdx); + if (c == '.') { + c = GetNextCurrentCharWithUpperCheck(); + } + while (isdigit(c)) { + c = GetNextCurrentCharWithUpperCheck(); + } + bool doublePrec = true; + if (c == 'e' || c == 'E') { + c = GetNextCurrentCharWithUpperCheck(); + if (!isdigit(c) && c != '-' && c != '+') { + name = line.substr(valStart, curIdx - valStart); + return TK_invalid; + } + if (c == '-' || c == '+') { + c = GetNextCurrentCharWithUpperCheck(); + } + while (isdigit(c)) { + c = GetNextCurrentCharWithUpperCheck(); + } + } + if (c == 'f' || c == 'F') { + doublePrec = false; + c = GetNextCurrentCharWithUpperCheck(); + } + if (c == 'l' || c == 'L') { + MIR_ERROR("warning: not yet support long double\n"); + ++curIdx; + } + + std::string floatStr = line.substr(startIdx, curIdx - startIdx); + // get the float constant value + if (!doublePrec) { + int eNum = sscanf_s(floatStr.c_str(), "%e", &theFloatVal); + CHECK_FATAL(eNum == 1, "sscanf_s failed"); + + if (negative) { + theFloatVal = -theFloatVal; + } + theIntVal = static_cast(theFloatVal); + theDoubleVal = static_cast(theFloatVal); + if (negative && fabs(theFloatVal) <= 1e-6) { + theDoubleVal = -theDoubleVal; + } + name = line.substr(valStart, curIdx - valStart); + return TK_floatconst; + } else { + int eNum = sscanf_s(floatStr.c_str(), "%le", &theDoubleVal); + CHECK_FATAL(eNum == 1, "sscanf_s failed"); + + if (negative) { + theDoubleVal = -theDoubleVal; + } + theIntVal = static_cast(theDoubleVal); + theFloatVal = static_cast(theDoubleVal); + if (negative && fabs(theDoubleVal) <= 1e-15) { + theFloatVal = -theFloatVal; + } + name = line.substr(valStart, curIdx - valStart); + return TK_doubleconst; + } +} + +TokenKind MIRLexer::GetTokenWithPrefixDollar() { + // token with prefix '$' + char c = GetCharAtWithUpperCheck(curIdx); + if (utils::IsAlpha(c) || c == '_' || c == '$') { + GenName(); + return TK_gname; + } else { + // for error reporting. + const uint32 printLength = 2; + name = line.substr(curIdx - 1, printLength); + return TK_invalid; + } +} + +TokenKind MIRLexer::GetTokenWithPrefixPercent() { + // token with prefix '%' + char c = GetCharAtWithUpperCheck(curIdx); + if (isdigit(c)) { + int valStart = curIdx - 1; + theIntVal = static_cast(HexCharToDigit(c)); + c = GetNextCurrentCharWithUpperCheck(); + while (isdigit(c)) { + theIntVal = (theIntVal * 10) + static_cast(HexCharToDigit(c)); + ASSERT(theIntVal >= 0, "int value overflow"); + c = GetNextCurrentCharWithUpperCheck(); + } + name = line.substr(valStart, curIdx - valStart); + return TK_preg; + } + if (utils::IsAlpha(c) || c == '_' || c == '$') { + GenName(); + return TK_lname; + } + if (c == '%' && utils::IsAlpha(GetCharAtWithUpperCheck(curIdx + 1))) { + ++curIdx; + GenName(); + return TK_specialreg; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixAmpersand() { + // token with prefix '&' + char c = GetCurrentCharWithUpperCheck(); + if (utils::IsAlpha(c) || c == '_') { + GenName(); + return TK_fname; + } + // for error reporting. + constexpr uint32 printLength = 2; + name = line.substr(curIdx - 1, printLength); + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixAtOrCircumflex(char prefix) { + // token with prefix '@' or `^` + char c = GetCurrentCharWithUpperCheck(); + if (utils::IsAlnum(c) || c < 0 || c == '_' || c == '@' || c == '$' || c == '|') { + GenName(); + if (prefix == '@') { + return TK_label; + } + return TK_prntfield; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixExclamation() { + // token with prefix '!' + char c = GetCurrentCharWithUpperCheck(); + if (utils::IsAlpha(c)) { + GenName(); + return TK_typeparam; + } + // for error reporting. + const uint32 printLength = 2; + name = line.substr(curIdx - 1, printLength); + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixQuotation() { + if (GetCharAtWithUpperCheck(curIdx + 1) == '\'') { + theIntVal = static_cast(GetCharAtWithUpperCheck(curIdx)); + curIdx += 2; + return TK_intconst; + } + return TK_invalid; +} + +TokenKind MIRLexer::GetTokenWithPrefixDoubleQuotation() { + uint32 startIdx = curIdx; + uint32 shift = 0; + // for \", skip the \ to leave " only internally + // and also for the pair of chars \ and n become '\n' etc. + char c = GetCurrentCharWithUpperCheck(); + while ((c != 0) && (c != '\"' || GetCharAtWithLowerCheck(curIdx - 1) == '\\')) { + if (GetCharAtWithLowerCheck(curIdx - 1) == '\\') { + shift++; + switch (c) { + case '"': + line[curIdx - shift] = c; + break; + case '\\': + line[curIdx - shift] = c; + // avoid 3rd \ in \\\ being treated as an escaped one + line[curIdx] = 0; + break; + case 'a': + line[curIdx - shift] = '\a'; + break; + case 'b': + line[curIdx - shift] = '\b'; + break; + case 't': + line[curIdx - shift] = '\t'; + break; + case 'n': + line[curIdx - shift] = '\n'; + break; + case 'v': + line[curIdx - shift] = '\v'; + break; + case 'f': + line[curIdx - shift] = '\f'; + break; + case 'r': + line[curIdx - shift] = '\r'; + break; + // support hex value \xNN + case 'x': { + const uint32 hexShift = 4; + const uint32 hexLength = 2; + uint8 c1 = Char2num(GetCharAtWithLowerCheck(curIdx + 1)); + uint8 c2 = Char2num(GetCharAtWithLowerCheck(curIdx + 2)); + uint32 cNew = static_cast(c1 << hexShift) + c2; + line[curIdx - shift] = cNew; + curIdx += hexLength; + shift += hexLength; + break; + } + // support oct value \NNN + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': { + const uint32 octShift1 = 3; + const uint32 octShift2 = 6; + const uint32 octLength = 3; + ASSERT(curIdx + octLength < line.size(), "index out of range"); + uint32 cNew = + static_cast(static_cast(GetCharAtWithLowerCheck(curIdx + 1) - '0') << octShift2) + + static_cast(static_cast(GetCharAtWithLowerCheck(curIdx + 2) - '0') << octShift1) + + static_cast(GetCharAtWithLowerCheck(curIdx + 3) - '0'); + line[curIdx - shift] = cNew; + curIdx += octLength; + shift += octLength; + break; + } + default: + line[curIdx - shift] = '\\'; + --shift; + line[curIdx - shift] = c; + break; + } + } else if (shift != 0) { + line[curIdx - shift] = c; + } + c = GetNextCurrentCharWithUpperCheck(); + } + if (c != '\"') { + return TK_invalid; + } + // for empty string + if (startIdx == curIdx) { + name = ""; + } else { + name = line.substr(startIdx, curIdx - startIdx - shift); + } + ++curIdx; + return TK_string; +} + +TokenKind MIRLexer::GetTokenSpecial() { + --curIdx; + char c = GetCharAtWithLowerCheck(curIdx); + if (utils::IsAlpha(c) || c < 0 || c == '_') { + GenName(); + TokenKind tk = keywordMap[name]; + switch (tk) { + case TK_nanf: + theFloatVal = NAN; + return TK_floatconst; + case TK_nan: + theDoubleVal = NAN; + return TK_doubleconst; + case TK_inff: + theFloatVal = INFINITY; + return TK_floatconst; + case TK_inf: + theDoubleVal = INFINITY; + return TK_doubleconst; + default: + return tk; + } + } + MIR_ERROR("error in input file\n"); + return TK_eof; +} + +TokenKind MIRLexer::LexToken() { + // skip spaces + char c = GetCurrentCharWithUpperCheck(); + while (c == ' ' || c == '\t') { + c = GetNextCurrentCharWithUpperCheck(); + } + // check end of line + while (c == 0 || c == '#') { + if (c == '#') { // process comment contents + seenComments.push_back(line.substr(curIdx + 1, currentLineSize - curIdx - 1)); + } + if (ReadALine() < 0) { + return TK_eof; + } + ++lineNum; // a new line read. + module.GetDbgInfo()->UpdateMsg(lineNum, line.c_str()); + // skip spaces + c = GetCurrentCharWithUpperCheck(); + while (c == ' ' || c == '\t') { + c = GetNextCurrentCharWithUpperCheck(); + } + } + char curChar = c; + ++curIdx; + switch (curChar) { + case '\n': + return TK_newline; + case '(': + return TK_lparen; + case ')': + return TK_rparen; + case '{': + return TK_lbrace; + case '}': + return TK_rbrace; + case '[': + return TK_lbrack; + case ']': + return TK_rbrack; + case '<': + return TK_langle; + case '>': + return TK_rangle; + case '=': + return TK_eqsign; + case ',': + return TK_coma; + case ':': + return TK_colon; + case '*': + return TK_asterisk; + case '.': + if (GetCharAtWithUpperCheck(curIdx) == '.') { + const uint32 lenDotdot = 2; + curIdx += lenDotdot; + return TK_dotdotdot; + } + // fall thru for .9100 == 0.9100 + [[clang::fallthrough]]; + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '-': + --curIdx; + return GetConstVal(); + case '$': + return GetTokenWithPrefixDollar(); + case '%': + return GetTokenWithPrefixPercent(); + case '&': + return GetTokenWithPrefixAmpersand(); + case '@': + case '^': + return GetTokenWithPrefixAtOrCircumflex(curChar); + case '!': + return GetTokenWithPrefixExclamation(); + case '\'': + return GetTokenWithPrefixQuotation(); + case '\"': + return GetTokenWithPrefixDoubleQuotation(); + default: + return GetTokenSpecial(); + } +} + +TokenKind MIRLexer::NextToken() { + kind = LexToken(); + return kind; +} + +std::string MIRLexer::GetTokenString() const { + std::string temp; + switch (kind) { + case TK_gname: { + temp = "$"; + temp.append(name); + return temp; + } + case TK_lname: + case TK_preg: { + temp = "%"; + temp.append(name); + return temp; + } + case TK_specialreg: { + temp = "%%"; + temp.append(name); + return temp; + } + case TK_label: { + temp = "@"; + temp.append(name); + return temp; + } + case TK_prntfield: { + temp = "^"; + temp.append(name); + return temp; + } + case TK_intconst: { + temp = std::to_string(theIntVal); + return temp; + } + case TK_floatconst: { + temp = std::to_string(theFloatVal); + return temp; + } + case TK_doubleconst: { + temp = std::to_string(theDoubleVal); + return temp; + } + // misc. + case TK_newline: { + temp = "\\n"; + return temp; + } + case TK_lparen: { + temp = "("; + return temp; + } + case TK_rparen: { + temp = ")"; + return temp; + } + case TK_lbrace: { + temp = "{"; + return temp; + } + case TK_rbrace: { + temp = "}"; + return temp; + } + case TK_lbrack: { + temp = "["; + return temp; + } + case TK_rbrack: { + temp = "]"; + return temp; + } + case TK_langle: { + temp = "<"; + return temp; + } + case TK_rangle: { + temp = ">"; + return temp; + } + case TK_eqsign: { + temp = "="; + return temp; + } + case TK_coma: { + temp = ","; + return temp; + } + case TK_dotdotdot: { + temp = "..."; + return temp; + } + case TK_colon: { + temp = ":"; + return temp; + } + case TK_asterisk: { + temp = "*"; + return temp; + } + case TK_string: { + temp = "\""; + temp.append(name); + temp.append("\""); + return temp; + } + default: + temp = "invalid token"; + return temp; + } +} +} // namespace maple diff --git a/src/mapleall/maple_ir/src/mir_builder.cpp b/src/mapleall/maple_ir/src/mir_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bff9422280f5b92488226783dac4a095de9c3429 --- /dev/null +++ b/src/mapleall/maple_ir/src/mir_builder.cpp @@ -0,0 +1,1182 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_builder.h" +#include "mir_symbol_builder.h" + +namespace maple { +// This is for compiler-generated metadata 1-level struct +void MIRBuilder::AddIntFieldConst(const MIRStructType &sType, MIRAggConst &newConst, + uint32 fieldID, int64 constValue) const { + auto *fieldConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(constValue), *sType.GetElemType(fieldID - 1)); + newConst.AddItem(fieldConst, fieldID); +} + +// This is for compiler-generated metadata 1-level struct +void MIRBuilder::AddAddrofFieldConst(const MIRStructType &structType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &fieldSymbol) { + AddrofNode *fieldExpr = CreateExprAddrof(0, fieldSymbol, mirModule->GetMemPool()); + auto *fieldConst = mirModule->GetMemPool()->New(fieldExpr->GetStIdx(), fieldExpr->GetFieldID(), + *structType.GetElemType(fieldID - 1)); + newConst.AddItem(fieldConst, fieldID); +} + +// This is for compiler-generated metadata 1-level struct +void MIRBuilder::AddAddroffuncFieldConst(const MIRStructType &structType, MIRAggConst &newConst, uint32 fieldID, + const MIRSymbol &funcSymbol) { + MIRConst *fieldConst = nullptr; + MIRFunction *vMethod = funcSymbol.GetFunction(); + if (vMethod->IsAbstract()) { + fieldConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(0, *structType.GetElemType(fieldID - 1)); + } else { + AddroffuncNode *addrofFuncExpr = + CreateExprAddroffunc(funcSymbol.GetFunction()->GetPuidx(), mirModule->GetMemPool()); + fieldConst = mirModule->GetMemPool()->New(addrofFuncExpr->GetPUIdx(), + *structType.GetElemType(fieldID - 1)); + } + newConst.AddItem(fieldConst, fieldID); +} + +// fieldID is continuously being updated during traversal; +// when the field is found, its field id is returned via fieldID +bool MIRBuilder::TraverseToNamedField(MIRStructType &structType, GStrIdx nameIdx, uint32 &fieldID) { + TyIdx tid(0); + return TraverseToNamedFieldWithTypeAndMatchStyle(structType, nameIdx, tid, fieldID, kMatchAnyField); +} + +// traverse parent first but match self first. +void MIRBuilder::TraverseToNamedFieldWithType(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, + uint32 &fieldID, uint32 &idx) { + if (structType.IsIncomplete()) { + (void)incompleteTypeRefedSet.insert(structType.GetTypeIndex()); + } + // process parent + if (structType.GetKind() == kTypeClass || structType.GetKind() == kTypeClassIncomplete) { + auto &classType = static_cast(structType); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType.GetParentTyIdx()); + auto *parentType = static_cast(type); + if (parentType != nullptr) { + ++fieldID; + TraverseToNamedFieldWithType(*parentType, nameIdx, typeIdx, fieldID, idx); + } + } + for (uint32 fieldIdx = 0; fieldIdx < structType.GetFieldsSize(); ++fieldIdx) { + ++fieldID; + TyIdx fieldTyIdx = structType.GetFieldsElemt(fieldIdx).second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + if (structType.GetFieldsElemt(fieldIdx).first == nameIdx) { + if (typeIdx == 0u || fieldTyIdx == typeIdx) { + idx = fieldID; + continue; + } + // for pointer type, check their pointed type + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx); + if (type->IsOfSameType(*fieldType)) { + idx = fieldID; + } + } + + if (fieldType->IsStructType()) { + auto *subStructType = static_cast(fieldType); + TraverseToNamedFieldWithType(*subStructType, nameIdx, typeIdx, fieldID, idx); + } + } +} + +// fieldID is continuously being updated during traversal; +// when the field is found, its field id is returned via fieldID +// typeidx: TyIdx(0) means do not check types. +// matchstyle: 0: do not match but traverse to update fieldID +// 1: match top level field only +// 2: match any field +// 4: traverse parent first +// 0xc: do not match but traverse to update fieldID, traverse parent first, found in child +bool MIRBuilder::TraverseToNamedFieldWithTypeAndMatchStyle(MIRStructType &structType, GStrIdx nameIdx, TyIdx typeIdx, + uint32 &fieldID, unsigned int matchStyle) { + if (structType.IsIncomplete()) { + (void)incompleteTypeRefedSet.insert(structType.GetTypeIndex()); + } + if ((matchStyle & kParentFirst) != 0) { + // process parent + if ((structType.GetKind() != kTypeClass) && (structType.GetKind() != kTypeClassIncomplete)) { + return false; + } + + auto &classType = static_cast(structType); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType.GetParentTyIdx()); + auto *parentType = static_cast(type); + if (parentType != nullptr) { + ++fieldID; + if (matchStyle == (kFoundInChild | kParentFirst | kUpdateFieldID)) { + matchStyle = kParentFirst; + uint32 idxBackup = nameIdx; + nameIdx.reset(); + // do not match but traverse to update fieldID, traverse parent first + TraverseToNamedFieldWithTypeAndMatchStyle(*parentType, nameIdx, typeIdx, fieldID, matchStyle); + nameIdx.reset(idxBackup); + } else if (TraverseToNamedFieldWithTypeAndMatchStyle(*parentType, nameIdx, typeIdx, fieldID, matchStyle)) { + return true; + } + } + } + for (uint32 fieldIdx = 0; fieldIdx < structType.GetFieldsSize(); ++fieldIdx) { + ++fieldID; + TyIdx fieldTyIdx = structType.GetFieldsElemt(fieldIdx).second.first; + MIRType *fieldType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fieldTyIdx); + ASSERT(fieldType != nullptr, "fieldType is null"); + if (matchStyle && structType.GetFieldsElemt(fieldIdx).first == nameIdx) { + if (typeIdx == 0u || fieldTyIdx == typeIdx || + fieldType->IsOfSameType(*GlobalTables::GetTypeTable().GetTypeFromTyIdx(typeIdx))) { + return true; + } + } + unsigned int style = matchStyle & kMatchAnyField; + if (fieldType->IsStructType()) { + auto *subStructType = static_cast(fieldType); + if (TraverseToNamedFieldWithTypeAndMatchStyle(*subStructType, nameIdx, typeIdx, fieldID, style)) { + return true; + } + } + } + return false; +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx, + unsigned int matchStyle) { + auto &structType = static_cast(type); + uint32 fieldID = 0; + GStrIdx strIdx = GetStringIndex(name); + if (TraverseToNamedFieldWithTypeAndMatchStyle(structType, strIdx, idx, fieldID, matchStyle)) { + return fieldID; + } + return 0; +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndType(MIRType &type, const std::string &name, TyIdx idx) { + return GetStructFieldIDFromNameAndType(type, name, idx, kMatchAnyField); +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndTypeParentFirst(MIRType &type, const std::string &name, TyIdx idx) { + return GetStructFieldIDFromNameAndType(type, name, idx, kParentFirst); +} + +FieldID MIRBuilder::GetStructFieldIDFromNameAndTypeParentFirstFoundInChild(MIRType &type, const std::string &name, + TyIdx idx) { + // do not match but traverse to update fieldID, traverse parent first, found in child + return GetStructFieldIDFromNameAndType(type, name, idx, kFoundInChild | kParentFirst | kUpdateFieldID); +} + +FieldID MIRBuilder::GetStructFieldIDFromFieldName(MIRType &type, const std::string &name) { + return GetStructFieldIDFromNameAndType(type, name, TyIdx(0), kMatchAnyField); +} + +FieldID MIRBuilder::GetStructFieldIDFromFieldNameParentFirst(MIRType *type, const std::string &name) { + if (type == nullptr) { + return 0; + } + return GetStructFieldIDFromNameAndType(*type, name, TyIdx(0), kParentFirst); +} + +void MIRBuilder::SetStructFieldIDFromFieldName(MIRStructType &structType, const std::string &name, GStrIdx newStrIdx, + const MIRType &newFieldType) const { + uint32 fieldID = 0; + GStrIdx strIdx = GetStringIndex(name); + while (true) { + if (structType.GetElemStrIdx(fieldID) == strIdx) { + if (newStrIdx != 0u) { + structType.SetElemStrIdx(fieldID, newStrIdx); + } + structType.SetElemtTyIdx(fieldID, newFieldType.GetTypeIndex()); + return; + } + ++fieldID; + } +} + +// create a function named str +MIRFunction *MIRBuilder::GetOrCreateFunction(const std::string &str, TyIdx retTyIdx) { + GStrIdx strIdx = GetStringIndex(str); + MIRSymbol *funcSt = nullptr; + if (strIdx != 0u) { + funcSt = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + if (funcSt == nullptr) { + funcSt = CreateSymbol(TyIdx(0), strIdx, kStFunc, kScText, nullptr, kScopeGlobal); + } else { + ASSERT(funcSt->GetSKind() == kStFunc, "runtime check error"); + return funcSt->GetFunction(); + } + } else { + strIdx = GetOrCreateStringIndex(str); + funcSt = CreateSymbol(TyIdx(0), strIdx, kStFunc, kScText, nullptr, kScopeGlobal); + } + auto *fn = mirModule->GetMemPool()->New(mirModule, funcSt->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + MIRFuncType funcType; + funcType.SetRetTyIdx(retTyIdx); + auto funcTyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&funcType); + auto *funcTypeInTypeTable = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcTyIdx)); + fn->SetMIRFuncType(funcTypeInTypeTable); + fn->SetReturnTyIdx(retTyIdx); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + funcSt->SetTyIdx(funcTyIdx); + return fn; +} + +MIRFunction *MIRBuilder::GetFunctionFromSymbol(const MIRSymbol &funcSymbol) const { + ASSERT(funcSymbol.GetSKind() == kStFunc, "Symbol %s is not a function symbol", funcSymbol.GetName().c_str()); + return funcSymbol.GetFunction(); +} + +MIRFunction *MIRBuilder::GetFunctionFromName(const std::string &str) { + auto *funcSymbol = + GlobalTables::GetGsymTable().GetSymbolFromStrIdx(GlobalTables::GetStrTable().GetStrIdxFromName(str)); + return funcSymbol != nullptr ? GetFunctionFromSymbol(*funcSymbol) : nullptr; +} + +MIRFunction *MIRBuilder::GetFunctionFromStidx(StIdx stIdx) { + auto *funcSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + return funcSymbol != nullptr ? GetFunctionFromSymbol(*funcSymbol) : nullptr; +} + +MIRFunction *MIRBuilder::CreateFunction(const std::string &name, const MIRType &returnType, const ArgVector &arguments, + bool isVarg, bool createBody) const { + MIRSymbol *funcSymbol = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + GStrIdx strIdx = GetOrCreateStringIndex(name); + funcSymbol->SetNameStrIdx(strIdx); + if (!GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSymbol)) { + return nullptr; + } + funcSymbol->SetStorageClass(kScText); + funcSymbol->SetSKind(kStFunc); + auto *fn = mirModule->GetMemPool()->New(mirModule, funcSymbol->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + std::vector funcVecType; + std::vector funcVecAttrs; + for (size_t i = 0; i < arguments.size(); ++i) { + MIRType *ty = arguments[i].second; + FormalDef formalDef(GetOrCreateStringIndex(arguments[i].first.c_str()), nullptr, ty->GetTypeIndex(), TypeAttrs()); + fn->GetFormalDefVec().push_back(formalDef); + funcVecType.push_back(ty->GetTypeIndex()); + funcVecAttrs.push_back(TypeAttrs()); + if (fn->GetSymTab() != nullptr && formalDef.formalSym != nullptr) { + (void)fn->GetSymTab()->AddToStringSymbolMap(*formalDef.formalSym); + } + } + funcSymbol->SetTyIdx(GlobalTables::GetTypeTable().GetOrCreateFunctionType( + returnType.GetTypeIndex(), funcVecType, funcVecAttrs, isVarg)->GetTypeIndex()); + auto *funcType = static_cast(funcSymbol->GetType()); + fn->SetMIRFuncType(funcType); + funcSymbol->SetFunction(fn); + if (createBody) { + fn->NewBody(); + } + return fn; +} + +MIRFunction *MIRBuilder::CreateFunction(StIdx stIdx, bool addToTable) const { + auto *fn = mirModule->GetMemPool()->New(mirModule, stIdx); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + if (addToTable) { + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + } + + auto *funcType = mirModule->GetMemPool()->New(); + fn->SetMIRFuncType(funcType); + return fn; +} + +MIRSymbol *MIRBuilder::GetOrCreateGlobalDecl(const std::string &str, TyIdx tyIdx, bool &created) const { + GStrIdx strIdx = GetStringIndex(str); + if (strIdx != 0u) { + StIdx stIdx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(strIdx); + if (stIdx.Idx() != 0) { + created = false; + return GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + } + } + created = true; + strIdx = GetOrCreateStringIndex(str); + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(strIdx); + st->SetTyIdx(tyIdx); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*st); + return st; +} + +MIRSymbol *MIRBuilder::GetOrCreateLocalDecl(const std::string &str, TyIdx tyIdx, MIRSymbolTable &symbolTable, + bool &created) const { + GStrIdx strIdx = GetStringIndex(str); + if (strIdx != 0u) { + StIdx stIdx = symbolTable.GetStIdxFromStrIdx(strIdx); + if (stIdx.Idx() != 0) { + created = false; + return symbolTable.GetSymbolFromStIdx(stIdx.Idx()); + } + } + created = true; + strIdx = GetOrCreateStringIndex(str); + MIRSymbol *st = symbolTable.CreateSymbol(kScopeLocal); + ASSERT(st != nullptr, "null ptr check"); + st->SetNameStrIdx(strIdx); + st->SetTyIdx(tyIdx); + (void)symbolTable.AddToStringSymbolMap(*st); + return st; +} + +MIRSymbol *MIRBuilder::GetOrCreateDeclInFunc(const std::string &str, const MIRType &type, MIRFunction &func) const { + MIRSymbolTable *symbolTable = func.GetSymTab(); + ASSERT(symbolTable != nullptr, "symbol_table is null"); + bool isCreated = false; + MIRSymbol *st = GetOrCreateLocalDecl(str, type.GetTypeIndex(), *symbolTable, isCreated); + if (isCreated) { + st->SetStorageClass(kScAuto); + st->SetSKind(kStVar); + } + return st; +} + +MIRSymbol *MIRBuilder::GetOrCreateLocalDecl(const std::string &str, const MIRType &type) { + MIRFunction *currentFunc = GetCurrentFunction(); + CHECK_FATAL(currentFunc != nullptr, "null ptr check"); + return GetOrCreateDeclInFunc(str, type, *currentFunc); +} + +MIRSymbol *MIRBuilder::CreateLocalDecl(const std::string &str, const MIRType &type) const { + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + return MIRSymbolBuilder::Instance().CreateLocalDecl(*currentFunctionInner->GetSymTab(), + GetOrCreateStringIndex(str), type); +} + +MIRSymbol *MIRBuilder::GetGlobalDecl(const std::string &str) const { + return MIRSymbolBuilder::Instance().GetGlobalDecl(GetStringIndex(str)); +} + +MIRSymbol *MIRBuilder::GetLocalDecl(const std::string &str) const { + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + return MIRSymbolBuilder::Instance().GetLocalDecl(*currentFunctionInner->GetSymTab(), GetStringIndex(str)); +} + +// search the scope hierarchy +MIRSymbol *MIRBuilder::GetDecl(const std::string &str) const { + GStrIdx strIdx = GetStringIndex(str); + MIRSymbol *sym = nullptr; + if (strIdx != 0u) { + // try to find the decl in local scope first + MIRFunction *currentFunctionInner = GetCurrentFunction(); + if (currentFunctionInner != nullptr) { + sym = currentFunctionInner->GetSymTab()->GetSymbolFromStrIdx(strIdx); + } + if (sym == nullptr) { + sym = GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdx); + } + } + return sym; +} + +MIRSymbol *MIRBuilder::CreateGlobalDecl(const std::string &str, const MIRType &type, MIRStorageClass sc) const { + return MIRSymbolBuilder::Instance().CreateGlobalDecl(GetOrCreateStringIndex(str), type, sc); +} + +MIRSymbol *MIRBuilder::GetOrCreateGlobalDecl(const std::string &str, const MIRType &type) const { + bool isCreated = false; + MIRSymbol *st = GetOrCreateGlobalDecl(str, type.GetTypeIndex(), isCreated); + ASSERT(st != nullptr, "null ptr check"); + if (isCreated) { + st->SetStorageClass(kScGlobal); + st->SetSKind(kStVar); + } else { + // Existing symbol may come from anther module. We need to register it + // in the current module so that per-module mpl file is self-sustained. + mirModule->AddSymbol(st); + } + MIRConst *cst = GlobalTables::GetConstPool().GetConstFromPool(st->GetNameStrIdx()); + if (cst != nullptr) { + st->SetKonst(cst); + } + return st; +} + +MIRSymbol *MIRBuilder::GetSymbolFromEnclosingScope(StIdx stIdx) const { + if (stIdx.FullIdx() == 0) { + return nullptr; + } + if (stIdx.Islocal()) { + MIRFunction *fun = GetCurrentFunctionNotNull(); + MIRSymbol *st = fun->GetSymTab()->GetSymbolFromStIdx(stIdx.Idx()); + if (st != nullptr) { + return st; + } + } + return GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); +} + +MIRSymbol *MIRBuilder::GetSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, MIRStorageClass sClass, + uint8 scpID, bool sameType = false) const { + return GetSymbol(tyIdx, GetOrCreateStringIndex(name), mClass, sClass, scpID, sameType); +} + +// when sametype is true, it means match everything the of the symbol +MIRSymbol *MIRBuilder::GetSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + uint8 scpID, bool sameType = false) const { + if (scpID != kScopeGlobal) { + ERR(kLncErr, "not yet implemented"); + return nullptr; + } + return MIRSymbolBuilder::Instance().GetSymbol(tyIdx, strIdx, mClass, sClass, sameType); +} + +MIRSymbol *MIRBuilder::GetOrCreateSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, + MIRStorageClass sClass, MIRFunction *func, uint8 scpID, + bool sametype = false) const { + return GetOrCreateSymbol(tyIdx, GetOrCreateStringIndex(name), mClass, sClass, func, scpID, sametype); +} + +MIRSymbol *MIRBuilder::GetOrCreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID, bool sameType = false) const { + if (MIRSymbol *st = GetSymbol(tyIdx, strIdx, mClass, sClass, scpID, sameType)) { + return st; + } + return CreateSymbol(tyIdx, strIdx, mClass, sClass, func, scpID); +} + +// when func is null, create global symbol, otherwise create local symbol +MIRSymbol *MIRBuilder::CreateSymbol(TyIdx tyIdx, const std::string &name, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID) const { + return CreateSymbol(tyIdx, GetOrCreateStringIndex(name), mClass, sClass, func, scpID); +} + +// when func is null, create global symbol, otherwise create local symbol +MIRSymbol *MIRBuilder::CreateSymbol(TyIdx tyIdx, GStrIdx strIdx, MIRSymKind mClass, MIRStorageClass sClass, + MIRFunction *func, uint8 scpID) const { + return MIRSymbolBuilder::Instance().CreateSymbol(tyIdx, strIdx, mClass, sClass, func, scpID); +} + +MIRSymbol *MIRBuilder::CreateConstStringSymbol(const std::string &symbolName, const std::string &content) { + auto elemPrimType = PTY_u8; + MIRType *type = GlobalTables::GetTypeTable().GetPrimType(elemPrimType); + uint32 sizeIn = static_cast(content.length()); + MIRType *arrayTypeWithSize = GlobalTables::GetTypeTable().GetOrCreateArrayType( + *GlobalTables::GetTypeTable().GetPrimType(elemPrimType), 1, &sizeIn); + + if (GetLocalDecl(symbolName)) { + return GetLocalDecl(symbolName); + } + MIRSymbol *arrayVar = GetOrCreateGlobalDecl(symbolName, *arrayTypeWithSize); + arrayVar->SetAttr(ATTR_readonly); + arrayVar->SetStorageClass(kScFstatic); + MIRAggConst *val = mirModule->GetMemPool()->New(*mirModule, *arrayTypeWithSize); + for (uint32 i = 0; i < sizeIn; ++i) { + MIRConst *cst = mirModule->GetMemPool()->New(content[i], *type); + val->PushBack(cst); + } + // This interface is only for string literal, 0 is added to the end of the string. + MIRConst *cst0 = mirModule->GetMemPool()->New(0, *type); + val->PushBack(cst0); + arrayVar->SetKonst(val); + return arrayVar; +} + +MIRSymbol *MIRBuilder::CreatePregFormalSymbol(TyIdx tyIdx, PregIdx pRegIdx, MIRFunction &func) const { + return MIRSymbolBuilder::Instance().CreatePregFormalSymbol(tyIdx, pRegIdx, func); +} + +ConstvalNode *MIRBuilder::CreateConstval(MIRConst *mirConst) { + return GetCurrentFuncCodeMp()->New(mirConst->GetType().GetPrimType(), mirConst); +} + +ConstvalNode *MIRBuilder::CreateIntConst(uint64 val, PrimType pty) { + auto *mirConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(val, *GlobalTables::GetTypeTable().GetPrimType(pty)); + return GetCurrentFuncCodeMp()->New(pty, mirConst); +} + +ConstvalNode *MIRBuilder::CreateFloatConst(float val) { + auto *mirConst = GetCurrentFuncDataMp()->New( + val, *GlobalTables::GetTypeTable().GetPrimType(PTY_f32)); + return GetCurrentFuncCodeMp()->New(PTY_f32, mirConst); +} + +ConstvalNode *MIRBuilder::CreateDoubleConst(double val) { + auto *mirConst = GetCurrentFuncDataMp()->New( + val, *GlobalTables::GetTypeTable().GetPrimType(PTY_f64)); + return GetCurrentFuncCodeMp()->New(PTY_f64, mirConst); +} + +ConstvalNode *MIRBuilder::CreateFloat128Const(const uint64 *val) { + auto *mirConst = GetCurrentFuncDataMp()->New( + *val, *GlobalTables::GetTypeTable().GetPrimType(PTY_f128)); + return GetCurrentFuncCodeMp()->New(PTY_f128, mirConst); +} + +ConstvalNode *MIRBuilder::GetConstInt(MemPool &memPool, int val) const { + auto *mirConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(val), *GlobalTables::GetTypeTable().GetInt64()); + return memPool.New(PTY_i32, mirConst); +} + +ConstvalNode *MIRBuilder::CreateAddrofConst(BaseNode &node) { + ASSERT(node.GetOpCode() == OP_addrof, "illegal op for addrof const"); + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + + // determine the type of 'node' and create a pointer type, accordingly + auto &aNode = static_cast(node); + const MIRSymbol *var = currentFunctionInner->GetLocalOrGlobalSymbol(aNode.GetStIdx()); + TyIdx ptyIdx = var->GetTyIdx(); + MIRPtrType ptrType(ptyIdx); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType &exprType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + auto *temp = mirModule->GetMemPool()->New(aNode.GetStIdx(), aNode.GetFieldID(), exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, temp); +} + +ConstvalNode *MIRBuilder::CreateAddroffuncConst(const BaseNode &node) { + ASSERT(node.GetOpCode() == OP_addroffunc, "illegal op for addroffunc const"); + + const auto &aNode = static_cast(node); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(aNode.GetPUIdx()); + TyIdx ptyIdx = f->GetFuncSymbol()->GetTyIdx(); + MIRPtrType ptrType(ptyIdx); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + auto *mirConst = mirModule->GetMemPool()->New(aNode.GetPUIdx(), *exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, mirConst); +} + +ConstvalNode *MIRBuilder::CreateStrConst(const BaseNode &node) { + ASSERT(node.GetOpCode() == OP_conststr, "illegal op for conststr const"); + UStrIdx strIdx = static_cast(node).GetStrIdx(); + CHECK_FATAL(PTY_u8 < GlobalTables::GetTypeTable().GetTypeTable().size(), + "index is out of range in MIRBuilder::CreateStrConst"); + TyIdx tyIdx = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_u8))->GetTypeIndex(); + MIRPtrType ptrType(tyIdx); + tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + auto *mirConst = mirModule->GetMemPool()->New(strIdx, *exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, mirConst); +} + +ConstvalNode *MIRBuilder::CreateStr16Const(const BaseNode &node) { + ASSERT(node.GetOpCode() == OP_conststr16, "illegal op for conststr16 const"); + U16StrIdx strIdx = static_cast(node).GetStrIdx(); + CHECK_FATAL(PTY_u16 < GlobalTables::GetTypeTable().GetTypeTable().size(), + "index out of range in MIRBuilder::CreateStr16Const"); + TyIdx ptyIdx = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_u16))->GetTypeIndex(); + MIRPtrType ptrType(ptyIdx); + ptyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&ptrType); + MIRType *exprType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptyIdx); + auto *mirConst = mirModule->GetMemPool()->New(strIdx, *exprType); + return GetCurrentFuncCodeMp()->New(PTY_ptr, mirConst); +} + +SizeoftypeNode *MIRBuilder::CreateExprSizeoftype(const MIRType &type) { + return GetCurrentFuncCodeMp()->New(PTY_u32, type.GetTypeIndex()); +} + +FieldsDistNode *MIRBuilder::CreateExprFieldsDist(const MIRType &type, FieldID field1, FieldID field2) { + return GetCurrentFuncCodeMp()->New(PTY_i32, type.GetTypeIndex(), field1, field2); +} + +AddrofNode *MIRBuilder::CreateExprAddrof(FieldID fieldID, const MIRSymbol &symbol, MemPool *memPool) { + return CreateExprAddrof(fieldID, symbol.GetStIdx(), memPool); +} + +AddrofNode *MIRBuilder::CreateExprAddrof(FieldID fieldID, StIdx symbolStIdx, MemPool *memPool) { + if (memPool == nullptr) { + memPool = GetCurrentFuncCodeMp(); + } + return memPool->New(OP_addrof, PTY_ptr, symbolStIdx, fieldID); +} + +AddroffuncNode *MIRBuilder::CreateExprAddroffunc(PUIdx puIdx, MemPool *memPool) { + if (memPool == nullptr) { + memPool = GetCurrentFuncCodeMp(); + } + return memPool->New(PTY_ptr, puIdx); +} + +AddrofNode *MIRBuilder::CreateExprDread(const MIRType &type, FieldID fieldID, const MIRSymbol &symbol) { + return CreateExprDread(type.GetPrimType(), fieldID, symbol); +} + +AddrofNode *MIRBuilder::CreateExprDread(PrimType ptyp, FieldID fieldID, const MIRSymbol &symbol) { + auto *node = GetCurrentFuncCodeMp()->New(OP_dread, kPtyInvalid, symbol.GetStIdx(), fieldID); + node->SetPrimType(GetRegPrimType(ptyp)); + return node; +} + +RegreadNode *MIRBuilder::CreateExprRegread(PrimType pty, PregIdx regIdx) { + return GetCurrentFuncCodeMp()->New(pty, regIdx); +} + +AddrofNode *MIRBuilder::CreateExprDread(MIRType &type, MIRSymbol &symbol) { + return CreateExprDread(type, 0, symbol); +} + +AddrofNode *MIRBuilder::CreateExprDread(MIRSymbol &symbol, uint16 fieldID) { + if (fieldID == 0) { + return CreateExprDread(symbol); + } + ASSERT(false, "NYI"); + return nullptr; +} + +AddrofNode *MIRBuilder::CreateExprDread(MIRSymbol &symbol) { + return CreateExprDread(*symbol.GetType(), 0, symbol); +} + +AddrofNode *MIRBuilder::CreateExprDread(PregIdx pregID, PrimType pty) { + auto *dread = GetCurrentFuncCodeMp()->New(OP_dread, pty); + dread->SetStFullIdx(pregID); + return dread; +} + +DreadoffNode *MIRBuilder::CreateExprDreadoff(Opcode op, PrimType pty, const MIRSymbol &symbol, int32 offset) { + DreadoffNode *node = GetCurrentFuncCodeMp()->New(op, pty); + node->stIdx = symbol.GetStIdx(); + node->offset = offset; + return node; +} + +IreadNode *MIRBuilder::CreateExprIread(PrimType primType, TyIdx ptrTypeIdx, FieldID fieldID, BaseNode *addr) { + return GetCurrentFuncCodeMp()->New(OP_iread, GetRegPrimType(primType), ptrTypeIdx, fieldID, addr); +} + +IreadNode *MIRBuilder::CreateExprIread(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, + BaseNode *addr) { + TyIdx returnTypeIdx = returnType.GetTypeIndex(); + CHECK(returnTypeIdx < GlobalTables::GetTypeTable().GetTypeTable().size(), + "index out of range in MIRBuilder::CreateExprIread"); + ASSERT(fieldID != 0 || ptrType.GetPrimType() != PTY_agg, + "Error: Fieldid should not be 0 when trying to iread a field from type "); + return CreateExprIread(returnType.GetPrimType(), ptrType.GetTypeIndex(), fieldID, addr); +} + +IreadoffNode *MIRBuilder::CreateExprIreadoff(PrimType pty, int32 offset, BaseNode *opnd0) { + return GetCurrentFuncCodeMp()->New(pty, opnd0, offset); +} + +IreadFPoffNode *MIRBuilder::CreateExprIreadFPoff(PrimType pty, int32 offset) { + return GetCurrentFuncCodeMp()->New(pty, offset); +} + +IaddrofNode *MIRBuilder::CreateExprIaddrof(const MIRType &returnType, const MIRType &ptrType, FieldID fieldID, + BaseNode *addr) { + IaddrofNode *iAddrOfNode = CreateExprIread(returnType, ptrType, fieldID, addr); + iAddrOfNode->SetOpCode(OP_iaddrof); + return iAddrOfNode; +} + +IaddrofNode *MIRBuilder::CreateExprIaddrof(PrimType returnTypePty, TyIdx ptrTypeIdx, FieldID fieldID, BaseNode *addr) { + return GetCurrentFuncCodeMp()->New(OP_iaddrof, returnTypePty, ptrTypeIdx, fieldID, addr); +} + +UnaryNode *MIRBuilder::CreateExprUnary(Opcode opcode, const MIRType &type, BaseNode *opnd) { + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opnd); +} + +GCMallocNode *MIRBuilder::CreateExprGCMalloc(Opcode opcode, const MIRType &pType, const MIRType &type) { + return GetCurrentFuncCodeMp()->New(opcode, pType.GetPrimType(), type.GetTypeIndex()); +} + +JarrayMallocNode *MIRBuilder::CreateExprJarrayMalloc(Opcode opcode, const MIRType &pType, const MIRType &type, + BaseNode *opnd) { + return GetCurrentFuncCodeMp()->New(opcode, pType.GetPrimType(), type.GetTypeIndex(), opnd); +} + +TypeCvtNode *MIRBuilder::CreateExprTypeCvt(Opcode o, PrimType toPrimType, PrimType fromPrimType, BaseNode &opnd) { + return GetCurrentFuncCodeMp()->New(o, toPrimType, fromPrimType, &opnd); +} + +TypeCvtNode *MIRBuilder::CreateExprTypeCvt(Opcode o, const MIRType &type, const MIRType &fromType, BaseNode *opnd) { + return CreateExprTypeCvt(o, type.GetPrimType(), fromType.GetPrimType(), *opnd); +} + +ExtractbitsNode *MIRBuilder::CreateExprExtractbits(Opcode o, const MIRType &type, uint32 bOffset, uint32 bSize, + BaseNode *opnd) { + return CreateExprExtractbits(o, type.GetPrimType(), bOffset, bSize, opnd); +} + +ExtractbitsNode *MIRBuilder::CreateExprExtractbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, + BaseNode *opnd) { + return GetCurrentFuncCodeMp()->New(o, type, bOffset, bSize, opnd); +} + +DepositbitsNode *MIRBuilder::CreateExprDepositbits(Opcode o, PrimType type, uint32 bOffset, uint32 bSize, + BaseNode *leftOpnd, BaseNode* rightOpnd) { + return GetCurrentFuncCodeMp()->New(o, type, bOffset, bSize, leftOpnd, rightOpnd); +} + +RetypeNode *MIRBuilder::CreateExprRetype(const MIRType &type, const MIRType &fromType, BaseNode *opnd) { + return CreateExprRetype(type, fromType.GetPrimType(), opnd); +} + +RetypeNode *MIRBuilder::CreateExprRetype(const MIRType &type, PrimType fromType, BaseNode *opnd) { + return GetCurrentFuncCodeMp()->New(type.GetPrimType(), fromType, type.GetTypeIndex(), opnd); +} + +BinaryNode *MIRBuilder::CreateExprBinary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1) { + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opnd0, opnd1); +} + +TernaryNode *MIRBuilder::CreateExprTernary(Opcode opcode, const MIRType &type, BaseNode *opnd0, BaseNode *opnd1, + BaseNode *opnd2) { + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opnd0, opnd1, opnd2); +} + +CompareNode *MIRBuilder::CreateExprCompare(Opcode opcode, const MIRType &type, const MIRType &opndType, BaseNode *opnd0, + BaseNode *opnd1) { + return GetCurrentFuncCodeMp()->New(opcode, type.GetPrimType(), opndType.GetPrimType(), opnd0, opnd1); +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType) { + MIRType *addrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(arrayType); + ASSERT(addrType != nullptr, "addrType is null"); + auto *arrayNode = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), + addrType->GetPrimType(), addrType->GetTypeIndex()); + arrayNode->SetNumOpnds(0); + return arrayNode; +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType, BaseNode *op) { + ArrayNode *arrayNode = CreateExprArray(arrayType); + arrayNode->GetNopnd().push_back(op); + arrayNode->SetNumOpnds(1); + return arrayNode; +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType, BaseNode *op1, BaseNode *op2) { + ArrayNode *arrayNode = CreateExprArray(arrayType, op1); + arrayNode->GetNopnd().push_back(op2); + arrayNode->SetNumOpnds(2); + return arrayNode; +} + +ArrayNode *MIRBuilder::CreateExprArray(const MIRType &arrayType, std::vector ops) { + MIRType *addrType = GlobalTables::GetTypeTable().GetOrCreatePointerType(arrayType); + ASSERT(addrType != nullptr, "addrType is null"); + auto *arrayNode = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), + addrType->GetPrimType(), addrType->GetTypeIndex()); + arrayNode->GetNopnd().insert(arrayNode->GetNopnd().begin(), ops.begin(), ops.end()); + arrayNode->SetNumOpnds(static_cast(ops.size())); + return arrayNode; +} + +IntrinsicopNode *MIRBuilder::CreateExprIntrinsicop(MIRIntrinsicID id, Opcode op, PrimType primType, const TyIdx &tyIdx, + const MapleVector &ops) { + auto *expr = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, primType); + expr->SetIntrinsic(id); + expr->SetNOpnd(ops); + expr->SetNumOpnds(ops.size()); + if (op == OP_intrinsicopwithtype) { + expr->SetTyIdx(tyIdx); + } + return expr; +} + +IntrinsicopNode *MIRBuilder::CreateExprIntrinsicop(MIRIntrinsicID idx, Opcode opCode, const MIRType &type, + const MapleVector &ops) { + return CreateExprIntrinsicop(idx, opCode, type.GetPrimType(), type.GetTypeIndex(), ops); +} + +DassignNode *MIRBuilder::CreateStmtDassign(const MIRSymbol &symbol, FieldID fieldID, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(src, symbol.GetStIdx(), fieldID); +} + +RegassignNode *MIRBuilder::CreateStmtRegassign(PrimType pty, PregIdx regIdx, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(pty, regIdx, src); +} + +DassignNode *MIRBuilder::CreateStmtDassign(StIdx sIdx, FieldID fieldID, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(src, sIdx, fieldID); +} + +IassignNode *MIRBuilder::CreateStmtIassign(const MIRType &type, FieldID fieldID, BaseNode *addr, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(type.GetTypeIndex(), fieldID, addr, src); +} + +IassignoffNode *MIRBuilder::CreateStmtIassignoff(PrimType pty, int32 offset, BaseNode *addr, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(pty, offset, addr, src); +} + +IassignFPoffNode *MIRBuilder::CreateStmtIassignFPoff(Opcode op, PrimType pty, + int32 offset, BaseNode *src) { + return GetCurrentFuncCodeMp()->New(op, pty, offset, src); +} + +CallNode *MIRBuilder::CreateStmtCall(PUIdx puIdx, const MapleVector &args, Opcode opCode) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opCode, puIdx, TyIdx()); + stmt->SetNOpnd(args); + stmt->SetNumOpnds(args.size()); + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCall(const std::string &callee, const MapleVector &args) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(callee); + StIdx stIdx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(strIdx); + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + ASSERT(st != nullptr, "MIRSymbol st is null"); + MIRFunction *func = st->GetFunction(); + return CreateStmtCall(func->GetPuidx(), args, OP_call); +} + +IcallNode *MIRBuilder::CreateStmtIcall(const MapleVector &args) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icall); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + return stmt; +} + +IcallNode *MIRBuilder::CreateStmtIcallproto(const MapleVector &args, const TyIdx &prototypeIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallproto); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + stmt->SetRetTyIdx(prototypeIdx); + return stmt; +} + +IcallNode *MIRBuilder::CreateStmtIcallAssigned(const MapleVector &args, const MIRSymbol &ret) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallassigned); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + CHECK_FATAL((ret.GetStorageClass() == kScAuto || ret.GetStorageClass() == kScFormal || + ret.GetStorageClass() == kScExtern || ret.GetStorageClass() == kScGlobal), + "unknown classtype! check it!"); + nrets.emplace_back(CallReturnPair(ret.GetStIdx(), RegFieldPair(0, 0))); + stmt->SetNumOpnds(args.size()); + stmt->GetNopnd().resize(stmt->GetNumOpnds()); + stmt->SetReturnVec(nrets); + for (size_t i = 0; i < stmt->GetNopndSize(); ++i) { + stmt->SetNOpndAt(i, args.at(i)); + } + stmt->SetRetTyIdx(ret.GetTyIdx()); + return stmt; +} + +IcallNode *MIRBuilder::CreateStmtIcallprotoAssigned(const MapleVector &args, const MIRSymbol &ret, + const TyIdx &prototypeIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_icallprotoassigned); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + CHECK_FATAL((ret.GetStorageClass() == kScAuto || ret.GetStorageClass() == kScFormal || + ret.GetStorageClass() == kScExtern || ret.GetStorageClass() == kScGlobal), + "unknown classtype! check it!"); + nrets.emplace_back(CallReturnPair(ret.GetStIdx(), RegFieldPair(0, 0))); + stmt->SetNumOpnds(args.size()); + stmt->GetNopnd().resize(stmt->GetNumOpnds()); + stmt->SetReturnVec(nrets); + for (size_t i = 0; i < stmt->GetNopndSize(); ++i) { + stmt->SetNOpndAt(i, args.at(i)); + } + stmt->SetRetTyIdx(prototypeIdx); + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments, + TyIdx tyIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New( + *GetCurrentFuncCodeMpAllocator(), tyIdx == 0u ? OP_intrinsiccall : OP_intrinsiccallwithtype, idx); + stmt->SetTyIdx(tyIdx); + stmt->SetOpnds(arguments); + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtXintrinsicCall(MIRIntrinsicID idx, const MapleVector &arguments) { + auto *stmt = + GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_xintrinsiccall, idx); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(arguments); + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallAssigned(PUIdx puIdx, const MIRSymbol *ret, Opcode op) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, puIdx); + if (ret) { + ASSERT(ret->IsLocal(), "Not Excepted ret"); + stmt->GetReturnVec().push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallAssigned(PUIdx puIdx, const MapleVector &args, const MIRSymbol *ret, + Opcode opcode, TyIdx tyIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opcode, puIdx, tyIdx); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + if (ret != nullptr) { + ASSERT(ret->IsLocal(), "Not Excepted ret"); + stmt->GetReturnVec().push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallRegassigned(PUIdx puIdx, PregIdx pRegIdx, Opcode opcode, BaseNode *arg) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opcode, puIdx); + stmt->GetNopnd().push_back(arg); + stmt->SetNumOpnds(stmt->GetNopndSize()); + if (pRegIdx > 0) { + stmt->GetReturnVec().push_back(CallReturnPair(StIdx(), RegFieldPair(0, pRegIdx))); + } + return stmt; +} + +CallNode *MIRBuilder::CreateStmtCallRegassigned(PUIdx puIdx, const MapleVector &args, PregIdx pRegIdx, + Opcode opcode) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), opcode, puIdx); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + if (pRegIdx > 0) { + stmt->GetReturnVec().push_back(CallReturnPair(StIdx(), RegFieldPair(0, pRegIdx))); + } + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + PregIdx retPregIdx) { + auto *stmt = + GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_intrinsiccallassigned, idx); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + if (retPregIdx > 0) { + stmt->GetReturnVec().push_back(CallReturnPair(StIdx(), RegFieldPair(0, retPregIdx))); + } + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtIntrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + const MIRSymbol *ret, TyIdx tyIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New( + *GetCurrentFuncCodeMpAllocator(), tyIdx == 0u ? OP_intrinsiccallassigned : OP_intrinsiccallwithtypeassigned, idx); + stmt->SetTyIdx(tyIdx); + stmt->SetOpnds(args); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + if (ret != nullptr) { + ASSERT(ret->IsLocal(), "Not Excepted ret"); + nrets.push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + stmt->SetReturnVec(nrets); + return stmt; +} + +IntrinsiccallNode *MIRBuilder::CreateStmtXintrinsicCallAssigned(MIRIntrinsicID idx, const MapleVector &args, + const MIRSymbol *ret) { + auto *stmt = + GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), OP_xintrinsiccallassigned, idx); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(args); + CallReturnVector nrets(GetCurrentFuncCodeMpAllocator()->Adapter()); + if (ret != nullptr) { + ASSERT(ret->IsLocal(), "Not Excepted ret"); + nrets.push_back(CallReturnPair(ret->GetStIdx(), RegFieldPair(0, 0))); + } + stmt->SetReturnVec(nrets); + return stmt; +} + +NaryStmtNode *MIRBuilder::CreateStmtReturn(BaseNode *rVal) { + return CreateStmtNary(OP_return, rVal); +} + +NaryStmtNode *MIRBuilder::CreateStmtNary(Opcode op, const MapleVector &rVals) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(rVals); + return stmt; +} + +CallAssertBoundaryStmtNode *MIRBuilder::CreateStmtCallAssertBoundary(Opcode op, const MapleVector &rVals, + GStrIdx funcNameIdx, size_t paramIndex, + GStrIdx stmtFuncNameIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, + funcNameIdx, paramIndex, stmtFuncNameIdx); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(rVals); + return stmt; +} + +NaryStmtNode *MIRBuilder::CreateStmtNary(Opcode op, BaseNode *rVal) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->PushOpnd(rVal); + return stmt; +} + +AssertNonnullStmtNode *MIRBuilder::CreateStmtAssertNonnull(Opcode op, BaseNode* rVal, GStrIdx funcNameIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(op, funcNameIdx); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetRHS(rVal); + return stmt; +} + +AssertBoundaryStmtNode *MIRBuilder::CreateStmtAssertBoundary(Opcode op, const MapleVector &rVals, + GStrIdx funcNameIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), op, funcNameIdx); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetOpnds(rVals); + return stmt; +} + +CallAssertNonnullStmtNode *MIRBuilder::CreateStmtCallAssertNonnull(Opcode op, BaseNode* rVal, GStrIdx callFuncNameIdx, + size_t paramIndex, GStrIdx stmtFuncNameIdx) { + auto *stmt = GetCurrentFuncCodeMp()->New(op, callFuncNameIdx, paramIndex, stmtFuncNameIdx); + ASSERT(stmt != nullptr, "stmt is null"); + stmt->SetRHS(rVal); + return stmt; +} + +UnaryStmtNode *MIRBuilder::CreateStmtUnary(Opcode op, BaseNode *rVal) { + return GetCurrentFuncCodeMp()->New(op, kPtyInvalid, rVal); +} + +UnaryStmtNode *MIRBuilder::CreateStmtThrow(BaseNode *rVal) { + return CreateStmtUnary(OP_throw, rVal); +} + +IfStmtNode *MIRBuilder::CreateStmtIf(BaseNode *cond) { + auto *ifStmt = GetCurrentFuncCodeMp()->New(); + ifStmt->SetOpnd(cond, 0); + BlockNode *thenBlock = GetCurrentFuncCodeMp()->New(); + ifStmt->SetThenPart(thenBlock); + return ifStmt; +} + +IfStmtNode *MIRBuilder::CreateStmtIfThenElse(BaseNode *cond) { + auto *ifStmt = GetCurrentFuncCodeMp()->New(); + ifStmt->SetOpnd(cond, 0); + auto *thenBlock = GetCurrentFuncCodeMp()->New(); + ifStmt->SetThenPart(thenBlock); + auto *elseBlock = GetCurrentFuncCodeMp()->New(); + ifStmt->SetElsePart(elseBlock); + ifStmt->SetNumOpnds(3); + return ifStmt; +} + +DoloopNode *MIRBuilder::CreateStmtDoloop(StIdx doVarStIdx, bool isPReg, BaseNode *startExp, BaseNode *contExp, + BaseNode *incrExp) { + return GetCurrentFuncCodeMp()->New(doVarStIdx, isPReg, startExp, contExp, incrExp, + GetCurrentFuncCodeMp()->New()); +} + +SwitchNode *MIRBuilder::CreateStmtSwitch(BaseNode *opnd, LabelIdx defaultLabel, const CaseVector &switchTable) { + auto *switchNode = GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), + defaultLabel, opnd); + switchNode->SetSwitchTable(switchTable); + return switchNode; +} + +GotoNode *MIRBuilder::CreateStmtGoto(Opcode o, LabelIdx labIdx) { + return GetCurrentFuncCodeMp()->New(o, labIdx); +} + +JsTryNode *MIRBuilder::CreateStmtJsTry(LabelIdx cLabIdx, LabelIdx fLabIdx) { + return GetCurrentFuncCodeMp()->New(static_cast(cLabIdx), static_cast(fLabIdx)); +} + +TryNode *MIRBuilder::CreateStmtTry(const MapleVector &cLabIdxs) { + return GetCurrentFuncCodeMp()->New(cLabIdxs); +} + +CatchNode *MIRBuilder::CreateStmtCatch(const MapleVector &tyIdxVec) { + return GetCurrentFuncCodeMp()->New(tyIdxVec); +} + +LabelNode *MIRBuilder::CreateStmtLabel(LabelIdx labIdx) { + return GetCurrentFuncCodeMp()->New(labIdx); +} + +StmtNode *MIRBuilder::CreateStmtComment(const std::string &cmnt) { + return GetCurrentFuncCodeMp()->New(*GetCurrentFuncCodeMpAllocator(), cmnt); +} + +AddrofNode *MIRBuilder::CreateAddrof(const MIRSymbol &st, PrimType pty) { + return GetCurrentFuncCodeMp()->New(OP_addrof, pty, st.GetStIdx(), 0); +} + +AddrofNode *MIRBuilder::CreateDread(const MIRSymbol &st, PrimType pty) { + return GetCurrentFuncCodeMp()->New(OP_dread, pty, st.GetStIdx(), 0); +} + +CondGotoNode *MIRBuilder::CreateStmtCondGoto(BaseNode *cond, Opcode op, LabelIdx labIdx) { + return GetCurrentFuncCodeMp()->New(op, labIdx, cond); +} + +LabelIdx MIRBuilder::GetOrCreateMIRLabel(const std::string &name) const { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + MIRFunction *currentFunctionInner = GetCurrentFunctionNotNull(); + LabelIdx lableIdx = currentFunctionInner->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (lableIdx == 0) { + lableIdx = currentFunctionInner->GetLabelTab()->CreateLabel(); + currentFunctionInner->GetLabelTab()->SetSymbolFromStIdx(lableIdx, strIdx); + currentFunctionInner->GetLabelTab()->AddToStringLabelMap(lableIdx); + } + return lableIdx; +} + +LabelIdx MIRBuilder::CreateLabIdx(MIRFunction &mirFunc) const { + LabelIdx lableIdx = mirFunc.GetLabelTab()->CreateLabel(); + mirFunc.GetLabelTab()->AddToStringLabelMap(lableIdx); + return lableIdx; +} + +void MIRBuilder::AddStmtInCurrentFunctionBody(StmtNode &stmt) const { + MIRFunction *fun = GetCurrentFunctionNotNull(); + stmt.GetSrcPos().CondSetLineNum(lineNum); + fun->GetBody()->AddStatement(&stmt); +} + +MemPool *MIRBuilder::GetCurrentFuncCodeMp() { + if (MIRFunction *curFunction = GetCurrentFunction()) { + return curFunction->GetCodeMemPool(); + } + return mirModule->GetMemPool(); +} + +MapleAllocator *MIRBuilder::GetCurrentFuncCodeMpAllocator() { + if (MIRFunction *curFunction = GetCurrentFunction()) { + return &curFunction->GetCodeMPAllocator(); + } + return &mirModule->GetMPAllocator(); +} + +MemPool *MIRBuilder::GetCurrentFuncDataMp() { + if (MIRFunction *curFunction = GetCurrentFunction()) { + return curFunction->GetDataMemPool(); + } + return mirModule->GetMemPool(); +} + +MIRBuilderExt::MIRBuilderExt(MIRModule *module, pthread_mutex_t *mutex) : MIRBuilder(module), mutex(mutex) {} + +MemPool *MIRBuilderExt::GetCurrentFuncCodeMp() { + ASSERT(curFunction, "curFunction is null"); + return curFunction->GetCodeMemPool(); +} + +MapleAllocator *MIRBuilderExt::GetCurrentFuncCodeMpAllocator() { + ASSERT(curFunction, "curFunction is null"); + return &curFunction->GetCodeMemPoolAllocator(); +} + +void MIRBuilderExt::GlobalLock() { + if (mutex) { + ASSERT(pthread_mutex_lock(mutex) == 0, "lock failed"); + } +} + +void MIRBuilderExt::GlobalUnlock() { + if (mutex) { + ASSERT(pthread_mutex_unlock(mutex) == 0, "unlock failed"); + } +} +} // namespace maple diff --git a/src/mapleall/maple_ir/src/mir_const.cpp b/src/mapleall/maple_ir/src/mir_const.cpp new file mode 100644 index 0000000000000000000000000000000000000000..53724ffaa59087ad7c72dcc40353022cec6d56d6 --- /dev/null +++ b/src/mapleall/maple_ir/src/mir_const.cpp @@ -0,0 +1,281 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_const.h" +#include "mir_function.h" +#include "global_tables.h" +#include "printing.h" +#if MIR_FEATURE_FULL + +namespace maple { +void MIRIntConst::Dump(const MIRSymbolTable *localSymTab [[maybe_unused]]) const { + LogInfo::MapleLogger() << value; +} + +bool MIRIntConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &intConst = static_cast(rhs); + return ((&intConst.GetType() == &GetType()) && (intConst.value == value)); +} + +uint8 MIRIntConst::GetActualBitWidth() const { + if (value == 0) { + return 1; + } + + int64 val = GetExtValue(); + uint64 tmp = static_cast(val < 0 ? -(val + 1) : val); + + uint8 width = 0; + while (tmp != 0) { + ++width; + tmp = tmp >> 1u; + } + + return width; +} + +void MIRAddrofConst::Dump(const MIRSymbolTable *localSymTab) const { + LogInfo::MapleLogger() << "addrof " << GetPrimTypeName(PTY_ptr); + const MIRSymbol *sym = stIdx.IsGlobal() ? GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()) + : localSymTab->GetSymbolFromStIdx(stIdx.Idx()); + CHECK_NULL_FATAL(sym); + ASSERT(stIdx.IsGlobal() || sym->GetStorageClass() == kScPstatic || sym->GetStorageClass() == kScFstatic, + "MIRAddrofConst can only point to a global symbol"); + LogInfo::MapleLogger() << (stIdx.IsGlobal() ? " $" : " %") << sym->GetName(); + if (fldID > 0) { + LogInfo::MapleLogger() << " " << fldID; + } + if (offset != 0) { + LogInfo::MapleLogger() << " (" << offset << ")"; + } +} + +bool MIRAddrofConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsA = static_cast(rhs); + if (&GetType() != &rhs.GetType()) { + return false; + } + return (stIdx == rhsA.stIdx) && (fldID == rhsA.fldID); +} + +void MIRAddroffuncConst::Dump(const MIRSymbolTable *localSymTab [[maybe_unused]]) const { + LogInfo::MapleLogger() << "addroffunc " << GetPrimTypeName(PTY_ptr); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + CHECK_NULL_FATAL(func); + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + CHECK_NULL_FATAL(sym); + LogInfo::MapleLogger() << " &" << sym->GetName(); +} + +bool MIRAddroffuncConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsAf = static_cast(rhs); + return (&GetType() == &rhs.GetType()) && (puIdx == rhsAf.puIdx); +} + +void MIRLblConst::Dump(const MIRSymbolTable *localSymTab [[maybe_unused]]) const { + LogInfo::MapleLogger() << "addroflabel " << GetPrimTypeName(PTY_ptr); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " @" << func->GetLabelName(value); +} + +bool MIRLblConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &lblConst = static_cast(rhs); + return (lblConst.value == value); +} + +bool MIRFloatConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &floatConst = static_cast(rhs); + if (std::isnan(floatConst.value.floatValue)) { + return std::isnan(value.floatValue); + } + if (std::isnan(value.floatValue)) { + return std::isnan(floatConst.value.floatValue); + } + if (floatConst.value.floatValue == 0.0 && value.floatValue == 0.0) { + return floatConst.IsNeg() == IsNeg(); + } + // Use bitwise comparison instead of approximate comparison for FP to avoid treating 0.0 and FLT_MIN as equal + return (floatConst.value.intValue == value.intValue); +} + +bool MIRDoubleConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &floatConst = static_cast(rhs); + if (std::isnan(floatConst.value.dValue)) { + return std::isnan(value.dValue); + } + if (std::isnan(value.dValue)) { + return std::isnan(floatConst.value.dValue); + } + if (floatConst.value.dValue == 0.0 && value.dValue == 0.0) { + return floatConst.IsNeg() == IsNeg(); + } + // Use bitwise comparison instead of approximate comparison for FP to avoid treating 0.0 and DBL_MIN as equal + return (floatConst.value.intValue == value.intValue); +} + +bool MIRFloat128Const::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &floatConst = static_cast(rhs); + if ((value[0] == floatConst.value[0]) && (value[1] == floatConst.value[1])) { + return true; + } + return false; +} + +bool MIRAggConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &aggregateConst = static_cast(rhs); + if (aggregateConst.constVec.size() != constVec.size()) { + return false; + } + for (size_t i = 0; i < constVec.size(); ++i) { + if (!(*aggregateConst.constVec[i] == *constVec[i])) { + return false; + } + } + return true; +} + +void MIRFloatConst::Dump(const MIRSymbolTable *localSymTab [[maybe_unused]]) const { + LogInfo::MapleLogger() << std::setprecision(std::numeric_limits::max_digits10) << value.floatValue << "f"; +} + +void MIRDoubleConst::Dump(const MIRSymbolTable *localSymTab [[maybe_unused]]) const { + LogInfo::MapleLogger() << std::setprecision(std::numeric_limits::max_digits10) << value.dValue; +} + +void MIRFloat128Const::Dump(const MIRSymbolTable *localSymTab [[maybe_unused]]) const { + constexpr int fieldWidth = 16; + std::ios::fmtflags f(LogInfo::MapleLogger().flags()); + LogInfo::MapleLogger().setf(std::ios::uppercase); + LogInfo::MapleLogger() << "0xL" << std::hex << std::setfill('0') << std::setw(fieldWidth) << value[0] + << std::setfill('0') << std::setw(fieldWidth) << value[1]; + LogInfo::MapleLogger().flags(f); +} + +void MIRAggConst::Dump(const MIRSymbolTable *localSymTab) const { + LogInfo::MapleLogger() << "["; + size_t size = constVec.size(); + for (size_t i = 0; i < size; ++i) { + if (fieldIdVec[i] != 0) { + LogInfo::MapleLogger() << fieldIdVec[i] << "= "; + } + constVec[i]->Dump(localSymTab); + if (i != size - 1) { + LogInfo::MapleLogger() << ", "; + } + } + LogInfo::MapleLogger() << "]"; +} + +MIRStrConst::MIRStrConst(const std::string &str, MIRType &type) + : MIRConst(type, kConstStrConst), value(GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(str)) {} + +void MIRStrConst::Dump(const MIRSymbolTable *localSymTab [[maybe_unused]]) const { + LogInfo::MapleLogger() << "conststr " << GetPrimTypeName(GetType().GetPrimType()); + const std::string &dumpStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(value); + PrintString(dumpStr); +} + +bool MIRStrConst::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsCs = static_cast(rhs); + return (&rhs.GetType() == &GetType()) && (value == rhsCs.value); +} + +MIRStr16Const::MIRStr16Const(const std::u16string &str, MIRType &type) + : MIRConst(type, kConstStr16Const), + value(GlobalTables::GetU16StrTable().GetOrCreateStrIdxFromName(str)) {} + +void MIRStr16Const::Dump(const MIRSymbolTable *localSymTab [[maybe_unused]]) const { + LogInfo::MapleLogger() << "conststr16 " << GetPrimTypeName(GetType().GetPrimType()); + std::u16string str16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(value); + // UTF-16 string are dumped as UTF-8 string in mpl to keep the printable chars in ascii form + std::string str; + (void)namemangler::UTF16ToUTF8(str, str16); + PrintString(str); +} + +bool MIRStr16Const::operator==(const MIRConst &rhs) const { + if (&rhs == this) { + return true; + } + if (GetKind() != rhs.GetKind()) { + return false; + } + const auto &rhsCs = static_cast(rhs); + return (&GetType() == &rhs.GetType()) && (value == rhsCs.value); +} + +bool IsDivSafe(const MIRIntConst ÷nd, const MIRIntConst &divisor, PrimType pType) { + if (IsUnsignedInteger(pType)) { + return divisor.GetValue() != 0; + } + + return divisor.GetValue() != 0 && (!dividend.GetValue().IsMinValue() || !divisor.GetValue().AreAllBitsOne()); +} + +} // namespace maple +#endif // MIR_FEATURE_FULL diff --git a/src/mapleall/maple_ir/src/mir_enum.cpp b/src/mapleall/maple_ir/src/mir_enum.cpp new file mode 100644 index 0000000000000000000000000000000000000000..2fc4bb915d9bc1ba609bef6eed050a19bc08d6c3 --- /dev/null +++ b/src/mapleall/maple_ir/src/mir_enum.cpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) [2022] Futurewei Technologies, Inc. All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +#include "global_tables.h" +#include "printing.h" +#include "mir_enum.h" + +// The syntax of enumerated type in Maple IR is: +// ENUMERATION { = , ... } +// If '=' is not specified, will adopt the last value plus 1. +// The default starting is 0. + +namespace maple { + +const std::string &MIREnum::GetName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(nameStrIdx); +} + +void MIREnum::Dump() const { + LogInfo::MapleLogger() << "ENUMERATION $" << GetName() << " " << GetPrimTypeName(primType) << " {"; + if (elements.empty()) { + LogInfo::MapleLogger() << " }\n"; + return; + } + IntVal lastValue(elements.front().second - k9BitSize, primType); // so as to fail first check + for (size_t i = 0; i < elements.size(); ++i) { + EnumElem curElem = elements[i]; + LogInfo::MapleLogger() << " $" << GlobalTables::GetStrTable().GetStringFromStrIdx(curElem.first); + if (curElem.second != lastValue + 1) { + LogInfo::MapleLogger() << " = " << curElem.second; + } + lastValue = curElem.second; + if (i + 1 != elements.size()) { + LogInfo::MapleLogger() << ","; + } + } + LogInfo::MapleLogger() << " }\n"; +} + +} // namespace maple diff --git a/src/mapleall/maple_ir/src/mir_function.cpp b/src/mapleall/maple_ir/src/mir_function.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e97aac7e0c762a6d12c56e0640b12cac23a836a4 --- /dev/null +++ b/src/mapleall/maple_ir/src/mir_function.cpp @@ -0,0 +1,751 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_function.h" +#include +#include +#include "mir_nodes.h" +#include "printing.h" +#include "string_utils.h" +#include "ipa_side_effect.h" +#include "inline_summary.h" + +namespace { +using namespace maple; +enum FuncProp : uint32_t { + kFuncPropHasCall = 1U, // the function has call + kFuncPropRetStruct = 1U << 1, // the function returns struct + kFuncPropUserFunc = 1U << 2, // the function is a user func + kFuncPropInfoPrinted = 1U << 3, // to avoid printing frameSize/moduleid/funcSize info more + // than once per function since they + // can only be printed at the beginning of a block + kFuncPropNeverReturn = 1U << 4, // the function when called never returns + kFuncPropHasSetjmp = 1U << 5, // the function contains call to setjmp + kFuncPropHasAsm = 1U << 6, // the function has use of inline asm + kFuncPropStructReturnedInRegs = 1U << 7, // the function returns struct in registers +}; +} // namespace + +namespace maple { +const MIRSymbol *MIRFunction::GetFuncSymbol() const { + return GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); +} +MIRSymbol *MIRFunction::GetFuncSymbol() { + const MIRFunction *mirFunc = const_cast(this); + ASSERT(mirFunc != nullptr, "null ptr check"); + return const_cast(mirFunc->GetFuncSymbol()); +} + +const std::string &MIRFunction::GetName() const { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); + ASSERT(mirSymbol != nullptr, "null ptr check"); + return mirSymbol->GetName(); +} + +GStrIdx MIRFunction::GetNameStrIdx() const { + MIRSymbol *mirSymbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); + ASSERT(mirSymbol != nullptr, "null ptr check"); + return mirSymbol->GetNameStrIdx(); +} + +const std::string &MIRFunction::GetBaseClassName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseClassStrIdx); +} + +const std::string &MIRFunction::GetBaseFuncName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseFuncStrIdx); +} + +const std::string &MIRFunction::GetBaseFuncNameWithType() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseFuncWithTypeStrIdx); +} + +const std::string &MIRFunction::GetBaseFuncSig() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(baseFuncSigStrIdx); +} + +const std::string &MIRFunction::GetSignature() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(signatureStrIdx); +} + +const MIRType *MIRFunction::GetReturnType() const { + CHECK_FATAL(funcType != nullptr, "funcType should not be nullptr"); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); +} +MIRType *MIRFunction::GetReturnType() { + return const_cast(const_cast(this)->GetReturnType()); +} +const MIRType *MIRFunction::GetClassType() const { + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(classTyIdx); +} +const MIRType *MIRFunction::GetNthParamType(size_t i) const { + CHECK_FATAL(funcType != nullptr, "funcType should not be nullptr"); + ASSERT(i < funcType->GetParamTypeList().size(), "array index out of range"); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetParamTypeList()[i]); +} +MIRType *MIRFunction::GetNthParamType(size_t i) { + return const_cast(const_cast(this)->GetNthParamType(i)); +} + +// reconstruct formals, and return a new MIRFuncType +MIRFuncType *MIRFunction::ReconstructFormals(const std::vector &symbols, bool clearOldArgs) { + auto *newFuncType = static_cast(funcType->CopyMIRTypeNode()); + if (clearOldArgs) { + formalDefVec.clear(); + newFuncType->GetParamTypeList().clear(); + newFuncType->GetParamAttrsList().clear(); + } + for (auto *symbol : symbols) { + FormalDef formalDef(symbol->GetNameStrIdx(), symbol, symbol->GetTyIdx(), symbol->GetAttrs()); + formalDefVec.push_back(formalDef); + newFuncType->GetParamTypeList().push_back(symbol->GetTyIdx()); + newFuncType->GetParamAttrsList().push_back(symbol->GetAttrs()); + } + return newFuncType; +} + +void MIRFunction::UpdateFuncTypeAndFormals(const std::vector &symbols, bool clearOldArgs) { + auto *newFuncType = ReconstructFormals(symbols, clearOldArgs); + auto newFuncTypeIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(newFuncType); + funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(newFuncTypeIdx)); + delete newFuncType; +} + +void MIRFunction::UpdateFuncTypeAndFormalsAndReturnType(const std::vector &symbols, const TyIdx &retTyIdx, + bool clearOldArgs) { + auto *newFuncType = ReconstructFormals(symbols, clearOldArgs); + newFuncType->SetRetTyIdx(retTyIdx); + auto newFuncTypeIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(newFuncType); + funcType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(newFuncTypeIdx)); + delete newFuncType; +} + +LabelIdx MIRFunction::GetOrCreateLableIdxFromName(const std::string &name) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(name); + LabelIdx labelIdx = GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labelIdx == 0) { + labelIdx = GetLabelTab()->CreateLabel(); + GetLabelTab()->SetSymbolFromStIdx(labelIdx, strIdx); + GetLabelTab()->AddToStringLabelMap(labelIdx); + } + return labelIdx; +} + +bool MIRFunction::HasCall() const { + return flag & kFuncPropHasCall; +} +void MIRFunction::SetHasCall() { + flag |= kFuncPropHasCall; +} + +bool MIRFunction::IsReturnStruct() const { + return flag & kFuncPropRetStruct; +} +void MIRFunction::SetReturnStruct() { + flag |= kFuncPropRetStruct; +} +void MIRFunction::SetReturnStruct(const MIRType &retType) { + if (retType.IsStructType()) { + flag |= kFuncPropRetStruct; + } +} +void MIRFunction::SetReturnStruct(const MIRType *retType) { + switch (retType->GetKind()) { + case kTypeUnion: + case kTypeStruct: + case kTypeStructIncomplete: + case kTypeClass: + case kTypeClassIncomplete: + case kTypeInterface: + case kTypeInterfaceIncomplete: + flag |= kFuncPropRetStruct; + break; + default:; + } +} + +bool MIRFunction::IsUserFunc() const { + return flag & kFuncPropUserFunc; +} +void MIRFunction::SetUserFunc() { + flag |= kFuncPropUserFunc; +} + +bool MIRFunction::IsInfoPrinted() const { + return flag & kFuncPropInfoPrinted; +} +void MIRFunction::SetInfoPrinted() { + flag |= kFuncPropInfoPrinted; +} +void MIRFunction::ResetInfoPrinted() { + flag &= ~kFuncPropInfoPrinted; +} + +void MIRFunction::SetNoReturn() { + flag |= kFuncPropNeverReturn; +} +bool MIRFunction::NeverReturns() const { + return flag & kFuncPropNeverReturn; +} + +void MIRFunction::SetHasSetjmp() { + flag |= kFuncPropHasSetjmp; +} + +bool MIRFunction::HasSetjmp() const { + return ((flag & kFuncPropHasSetjmp) != kTypeflagZero); +} + +void MIRFunction::SetHasAsm() { + flag |= kFuncPropHasAsm; +} + +bool MIRFunction::HasAsm() const { + return ((flag & kFuncPropHasAsm) != kTypeflagZero); +} + +void MIRFunction::SetStructReturnedInRegs() { + flag |= kFuncPropStructReturnedInRegs; +} + +bool MIRFunction::StructReturnedInRegs() const { + return ((flag & kFuncPropStructReturnedInRegs) != kTypeflagZero); +} + +void MIRFunction::SetAttrsFromSe(uint8 specialEffect) { + // NoPrivateDefEffect + if ((specialEffect & kDefEffect) == kDefEffect) { + funcAttrs.SetAttr(FUNCATTR_noprivate_defeffect); + } + // NoPrivateUseEffect + if ((specialEffect & kUseEffect) == kUseEffect) { + funcAttrs.SetAttr(FUNCATTR_noretarg); + } + // IpaSeen + if ((specialEffect & kIpaSeen) == kIpaSeen) { + funcAttrs.SetAttr(FUNCATTR_ipaseen); + } + // Pure + if ((specialEffect & kPureFunc) == kPureFunc) { + funcAttrs.SetAttr(FUNCATTR_pure); + } + // NoDefArgEffect + if ((specialEffect & kNoDefArgEffect) == kNoDefArgEffect) { + funcAttrs.SetAttr(FUNCATTR_nodefargeffect); + } + // NoDefEffect + if ((specialEffect & kNoDefEffect) == kNoDefEffect) { + funcAttrs.SetAttr(FUNCATTR_nodefeffect); + } + // NoRetNewlyAllocObj + if ((specialEffect & kNoRetNewlyAllocObj) == kNoRetNewlyAllocObj) { + funcAttrs.SetAttr(FUNCATTR_noretglobal); + } + // NoThrowException + if ((specialEffect & kNoThrowException) == kNoThrowException) { + funcAttrs.SetAttr(FUNCATTR_nothrow_exception); + } +} + +void FuncAttrs::DumpAttributes() const { +// parse no content of attr +#define STRING(s) #s +#define FUNC_ATTR +#define NOCONTENT_ATTR +#define ATTR(AT) \ + if (GetAttr(FUNCATTR_##AT)) { \ + LogInfo::MapleLogger() << " " << STRING(AT); \ + } +#include "all_attributes.def" +#undef ATTR +#undef NOCONTENT_ATTR +#undef FUNC_ATTR +// parse content of attr + if (GetAttr(FUNCATTR_alias) && !GetAliasFuncName().empty()) { + LogInfo::MapleLogger() << " alias ( \"" << GetAliasFuncName() << "\" )"; + } + if (GetAttr(FUNCATTR_constructor_priority) && GetConstructorPriority() != -1) { + LogInfo::MapleLogger() << " constructor_priority ( " << GetConstructorPriority() << " )"; + } + if (GetAttr(FUNCATTR_destructor_priority) && GetDestructorPriority() != -1) { + LogInfo::MapleLogger() << " destructor_priority ( " << GetDestructorPriority() << " )"; + } +} + +void MIRFunction::DumpFlavorLoweredThanMmpl() const { + LogInfo::MapleLogger() << " ("; + + // Dump arguments + bool hasPrintedFormal = false; + for (uint32 i = 0; i < formalDefVec.size(); i++) { + MIRSymbol *symbol = formalDefVec[i].formalSym; + if (symbol == nullptr && + (formalDefVec[i].formalStrIdx.GetIdx() == 0 || + GlobalTables::GetStrTable().GetStringFromStrIdx(formalDefVec[i].formalStrIdx).empty())) { + break; + } + hasPrintedFormal = true; + if (symbol == nullptr) { + LogInfo::MapleLogger() << "var %" + << GlobalTables::GetStrTable().GetStringFromStrIdx(formalDefVec[i].formalStrIdx) + << " "; + } else { + if (symbol->GetSKind() != kStPreg) { + LogInfo::MapleLogger() << "var %" << symbol->GetName() << " "; + } else { + LogInfo::MapleLogger() << "reg %" << symbol->GetPreg()->GetPregNo() << " "; + } + } + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDefVec[i].formalTyIdx); + constexpr uint8 indent = 2; + ty->Dump(indent); + if (symbol != nullptr) { + symbol->GetAttrs().DumpAttributes(); + } else { + formalDefVec[i].formalAttrs.DumpAttributes(); + } + if (i != (formalDefVec.size() - 1)) { + LogInfo::MapleLogger() << ", "; + } + } + if (IsVarargs()) { + if (!hasPrintedFormal) { + LogInfo::MapleLogger() << "..."; + } else { + LogInfo::MapleLogger() << ", ..."; + } + } + + LogInfo::MapleLogger() << ") "; + GetReturnType()->Dump(1); +} + +void MIRFunction::Dump(bool withoutBody) { + // skip the functions that are added during process methods in + // class and interface decls. these has nothing in formals + // they do have paramtypelist_. this can not skip ones without args + // but for them at least the func decls are valid + if ((module->IsJavaModule() && GetParamSize() != formalDefVec.size()) || + GetAttr(FUNCATTR_optimized)) { + return; + } + + // save the module's curFunction and set it to the one currently Dump()ing + MIRFunction *savedFunc = module->CurFunction(); + module->SetCurFunction(this); + + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolTableIdx.Idx()); + ASSERT(symbol != nullptr, "symbol MIRSymbol is null"); + if (!withoutBody) { + symbol->GetSrcPosition().DumpLoc(MIRSymbol::LastPrintedLineNumRef(), MIRSymbol::LastPrintedColumnNumRef()); + } + LogInfo::MapleLogger() << "func " << "&" << symbol->GetName(); + theMIRModule = module; + funcAttrs.DumpAttributes(); + + if (symbol->GetWeakrefAttr().first) { + LogInfo::MapleLogger() << " weakref"; + if (symbol->GetWeakrefAttr().second != UStrIdx(0)) { + LogInfo::MapleLogger() << " ("; + PrintString(GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->GetWeakrefAttr().second)); + LogInfo::MapleLogger() << " )"; + } + } + + if (symbol->sectionAttr != UStrIdx(0)) { + LogInfo::MapleLogger() << " section ("; + PrintString(GlobalTables::GetUStrTable().GetStringFromStrIdx(symbol->sectionAttr)); + LogInfo::MapleLogger() << " )"; + } + + if (module->GetFlavor() != kMmpl) { + DumpFlavorLoweredThanMmpl(); + } + + // codeMemPool is nullptr, means maple_ir has been released for memory's sake + if (codeMemPool == nullptr) { + LogInfo::MapleLogger() << '\n'; + } else if (GetBody() != nullptr && !withoutBody && symbol->GetStorageClass() != kScExtern) { + StmtNode::lastPrintedLineNum = 0; + StmtNode::lastPrintedColumnNum = 0; + ResetInfoPrinted(); // this ensures funcinfo will be printed + GetBody()->Dump(0, module->GetFlavor() == kMmpl ? nullptr : GetSymTab(), + module->GetFlavor() < kMmpl ? GetPregTab() : nullptr, false, + true, module->GetFlavor()); // Dump body + } else { + LogInfo::MapleLogger() << '\n'; + } + + // restore the curFunction + module->SetCurFunction(savedFunc); +} + +void MIRFunction::DumpUpFormal(int32 indent) const { + PrintIndentation(indent + 1); + + LogInfo::MapleLogger() << "upformalsize " << GetUpFormalSize() << '\n'; + if (localWordsTypeTagged != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "formalWordsTypeTagged = [ "; + const auto *p = reinterpret_cast(localWordsTypeTagged); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(localWordsTypeTagged + BlockSize2BitVectorSize(GetUpFormalSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + + if (formalWordsRefCounted != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "formalWordsRefCounted = [ "; + const uint32 *p = reinterpret_cast(formalWordsRefCounted); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(formalWordsRefCounted + BlockSize2BitVectorSize(GetUpFormalSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } +} + +void MIRFunction::DumpFrame(int32 indent) const { + PrintIndentation(indent + 1); + + LogInfo::MapleLogger() << "framesize " << static_cast(GetFrameSize()) << '\n'; + if (localWordsTypeTagged != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "localWordsTypeTagged = [ "; + const uint32 *p = reinterpret_cast(localWordsTypeTagged); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(localWordsTypeTagged + BlockSize2BitVectorSize(GetFrameSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + + if (localWordsRefCounted != nullptr) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "localWordsRefCounted = [ "; + const uint32 *p = reinterpret_cast(localWordsRefCounted); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(localWordsRefCounted + BlockSize2BitVectorSize(GetFrameSize()))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } +} + +void MIRFunction::DumpScope() const { + scope->Dump(0); +} + +void MIRFunction::DumpFuncBody(int32 indent) { + LogInfo::MapleLogger() << " funcid " << GetPuidxOrigin() << '\n'; + + if (IsInfoPrinted()) { + return; + } + + SetInfoPrinted(); + + if (GetUpFormalSize() > 0) { + DumpUpFormal(indent); + } + + if (GetFrameSize() > 0) { + DumpFrame(indent); + } + + if (GetOutParmSize() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "outparmsize " << GetOutParmSize() << '\n'; + } + + if (GetModuleId() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "moduleID " << static_cast(GetModuleId()) << '\n'; + } + + if (GetFuncSize() > 0) { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "funcSize " << GetFuncSize() << '\n'; + } + + if (GetInfoVector().empty()) { + return; + } + + const MIRInfoVector &funcInfo = GetInfoVector(); + const MapleVector &funcInfoIsString = InfoIsString(); + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "funcinfo {\n"; + size_t size = funcInfo.size(); + constexpr int kIndentOffset = 2; + for (size_t i = 0; i < size; ++i) { + PrintIndentation(indent + kIndentOffset); + LogInfo::MapleLogger() << "@" << GlobalTables::GetStrTable().GetStringFromStrIdx(funcInfo[i].first) << " "; + if (!funcInfoIsString[i]) { + LogInfo::MapleLogger() << funcInfo[i].second; + } else { + LogInfo::MapleLogger() << "\"" + << GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(funcInfo[i].second)) + << "\""; + } + if (i < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + LogInfo::MapleLogger() << '\n'; +} + +bool MIRFunction::IsEmpty() const { + return (body == nullptr || body->IsEmpty()); +} + +bool MIRFunction::IsClinit() const { + const std::string clinitPostfix = "_7C_3Cclinit_3E_7C_28_29V"; + const std::string &funcName = this->GetName(); + // this does not work for smali files like art/test/511-clinit-interface/smali/BogusInterface.smali, + // which is decorated without "constructor". + return StringUtils::EndsWith(funcName, clinitPostfix); +} + +uint32 MIRFunction::GetInfo(GStrIdx strIdx) const { + for (const auto &item : info) { + if (item.first == strIdx) { + return item.second; + } + } + ASSERT(false, "get info error"); + return 0; +} + +uint32 MIRFunction::GetInfo(const std::string &string) const { + GStrIdx strIdx = GlobalTables::GetStrTable().GetStrIdxFromName(string); + return GetInfo(strIdx); +} + +void MIRFunction::OverrideBaseClassFuncNames(GStrIdx strIdx) { + baseClassStrIdx.reset(); + baseFuncStrIdx.reset(); + SetBaseClassFuncNames(strIdx); +} + +// there are two ways to represent the delimiter: '|' or "_7C" +// where 7C is the ascii value of char '|' in hex +void MIRFunction::SetBaseClassFuncNames(GStrIdx strIdx) { + if (baseClassStrIdx != 0u || baseFuncStrIdx != 0u) { + return; + } + const std::string name = GlobalTables::GetStrTable().GetStringFromStrIdx(strIdx); + std::string delimiter = "|"; + uint32 width = 1; // delimiter width + size_t pos = name.find(delimiter); + if (pos == std::string::npos) { + delimiter = namemangler::kNameSplitterStr; + width = 3; // delimiter width + pos = name.find(delimiter); + // make sure it is not __7C, but ___7C ok + while (pos != std::string::npos && (name[pos - 1] == '_' && name[pos - 2] != '_')) { + pos = name.find(delimiter, pos + width); + } + } + if (pos != std::string::npos && pos > 0) { + const std::string className = name.substr(0, pos); + baseClassStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(className); + std::string funcNameWithType = name.substr(pos + width, name.length() - pos - width); + baseFuncWithTypeStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcNameWithType); + size_t index = name.find(namemangler::kRightBracketStr); + if (index != std::string::npos) { + size_t posEnd = index + (std::string(namemangler::kRightBracketStr)).length(); + funcNameWithType = name.substr(pos + width, posEnd - pos - width); + } + baseFuncSigStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcNameWithType); + size_t newPos = name.find(delimiter, pos + width); + while (newPos != std::string::npos && (name[newPos - 1] == '_' && name[newPos - 2] != '_')) { + newPos = name.find(delimiter, newPos + width); + } + if (newPos != 0) { + std::string funcName = name.substr(pos + width, newPos - pos - width); + baseFuncStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + std::string signature = name.substr(newPos + width, name.length() - newPos - width); + signatureStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(signature); + } + return; + } + baseFuncStrIdx = strIdx; +} + +const MIRSymbol *MIRFunction::GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst) const { + return idx.Islocal() ? GetSymbolTabItem(idx.Idx(), checkFirst) + : GlobalTables::GetGsymTable().GetSymbolFromStidx(idx.Idx(), checkFirst); +} +MIRSymbol *MIRFunction::GetLocalOrGlobalSymbol(const StIdx &idx, bool checkFirst) { + return const_cast(const_cast(this)->GetLocalOrGlobalSymbol(idx, checkFirst)); +} + +const MIRType *MIRFunction::GetNodeType(const BaseNode &node) const { + if (node.GetOpCode() == OP_dread) { + const MIRSymbol *sym = GetLocalOrGlobalSymbol(static_cast(node).GetStIdx()); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(sym->GetTyIdx()); + } + if (node.GetOpCode() == OP_regread) { + const auto &nodeReg = static_cast(node); + const MIRPreg *pReg = GetPregTab()->PregFromPregIdx(nodeReg.GetRegIdx()); + if (pReg->GetPrimType() == PTY_ref) { + return pReg->GetMIRType(); + } + } + return nullptr; +} + +void MIRFunction::EnterFormals() { + for (auto &formalDef : formalDefVec) { + formalDef.formalSym = symTab->CreateSymbol(kScopeLocal); + formalDef.formalSym->SetStorageClass(kScFormal); + formalDef.formalSym->SetNameStrIdx(formalDef.formalStrIdx); + formalDef.formalSym->SetTyIdx(formalDef.formalTyIdx); + formalDef.formalSym->SetAttrs(formalDef.formalAttrs); + const std::string &formalName = GlobalTables::GetStrTable().GetStringFromStrIdx(formalDef.formalStrIdx); + if (!isdigit(formalName.front())) { + formalDef.formalSym->SetSKind(kStVar); + (void)symTab->AddToStringSymbolMap(*formalDef.formalSym); + } else { + formalDef.formalSym->SetSKind(kStPreg); + uint32 thepregno = static_cast(std::stoi(formalName)); + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(formalDef.formalTyIdx); + PrimType pType = mirType->GetPrimType(); + // if mirType info is not needed, set mirType to nullptr + if (pType != PTY_ref && pType != PTY_ptr) { + mirType = nullptr; + } else if (pType == PTY_ptr && mirType->IsMIRPtrType()) { + MIRType *pointedType = static_cast(mirType)->GetPointedType(); + if (pointedType == nullptr || pointedType->GetKind() != kTypeFunction) { + mirType = nullptr; + } + } + PregIdx pregIdx = pregTab->EnterPregNo(thepregno, pType, mirType); + MIRPreg *preg = pregTab->PregFromPregIdx(pregIdx); + formalDef.formalSym->SetPreg(preg); + } + } +} + +// InlineSummary constructor is dependent on the following Predicate static member functions and it +// will be called by MIRFunction::GetOrCreateInlineSummary(). So we put the implemetation code here. +// True predicate asserts: { 0 } +// TruePredicate object is globally unique, so we allocate it in inlineSummaryAlloc. +// DO NOT call this function after inlineSummaryAlloc is released. +const Predicate *Predicate::TruePredicate() { + auto &summaryAlloc = theMIRModule->GetInlineSummaryAlloc(); + static const auto * const truePredicate = + summaryAlloc.New(std::initializer_list{ 0 }, summaryAlloc); + return truePredicate; +} + +// False predicate asserts: { 1 } +// FalsePredicate object is globally unique, so we allocate it in inlineSummaryAlloc. +// DO NOT call this function after inlineSummaryAlloc is released. +const Predicate *Predicate::FalsePredicate() { + auto &summaryAlloc = theMIRModule->GetInlineSummaryAlloc(); + static const auto * const falsePredicate = + summaryAlloc.New(std::initializer_list{ 1 }, summaryAlloc); + return falsePredicate; +} + +// NotInline predicate asserts: { 2 } +// NotInlinePredicate object is globally unique, so we allocate it in inlineSummaryAlloc. +// DO NOT call this function after inlineSummaryAlloc is released. +const Predicate *Predicate::NotInlinePredicate() { + auto &summaryAlloc = theMIRModule->GetInlineSummaryAlloc(); + static const auto * const notInlinePredicate = + summaryAlloc.New(std::initializer_list{ 2 }, summaryAlloc); + return notInlinePredicate; +} + +InlineSummary *MIRFunction::GetOrCreateInlineSummary() { + if (inlineSummary == nullptr) { + auto &inlineSummaryAlloc = module->GetInlineSummaryAlloc(); + CHECK_FATAL(inlineSummaryAlloc.GetMemPool() != nullptr, "inline summary alloc has been released?"); + inlineSummary = inlineSummaryAlloc.New(inlineSummaryAlloc, this); + } + return GetInlineSummary(); +} + +void MIRFunction::NewBody() { + SetBody(GetCodeMemPool()->New()); + // If mir_function.has been seen as a declaration, its symtab has to be moved + // from module mempool to function mempool. + MIRSymbolTable *oldSymTable = GetSymTab(); + MIRPregTable *oldPregTable = GetPregTab(); + MIRTypeNameTable *oldTypeNameTable = typeNameTab; + MIRLabelTable *oldLabelTable = GetLabelTab(); + symTab = module->GetMemPool()->New(module->GetMPAllocator()); + pregTab = module->GetMemPool()->New(&module->GetMPAllocator()); + typeNameTab = module->GetMemPool()->New(module->GetMPAllocator()); + labelTab = module->GetMemPool()->New(module->GetMPAllocator()); + + if (oldSymTable == nullptr) { + // formals not yet entered into symTab; enter them now + EnterFormals(); + } else { + for (size_t i = 1; i < oldSymTable->GetSymbolTableSize(); ++i) { + (void)GetSymTab()->AddStOutside(oldSymTable->GetSymbolFromStIdx(i)); + } + } + if (oldPregTable != nullptr) { + for (size_t i = 1; i < oldPregTable->Size(); ++i) { + (void)GetPregTab()->AddPreg(*oldPregTable->PregFromPregIdx(static_cast(i))); + } + } + if (oldTypeNameTable != nullptr) { + ASSERT(oldTypeNameTable->Size() == typeNameTab->Size(), + "Does not expect to process typeNameTab in MIRFunction::NewBody"); + } + if (oldLabelTable != nullptr) { + ASSERT(oldLabelTable->Size() == GetLabelTab()->Size(), + "Does not expect to process labelTab in MIRFunction::NewBody"); + } +} + +MIRFunction *MIRFunction::GetFuncAlias() { + if (GetAttr(FUNCATTR_weakref)) { + auto aliasFunc = funcAttrs.GetAliasFuncName(); + auto builder = module->GetMIRBuilder(); + return builder->GetOrCreateFunction(aliasFunc, funcType->GetRetTyIdx()); + } else { + return this; + } +} + +#ifdef DEBUGME +void MIRFunction::SetUpGDBEnv() { + if (codeMemPool != nullptr) { + delete codeMemPool; + } + codeMemPool = new ThreadLocalMemPool(memPoolCtrler, "tmp debug"); + codeMemPoolAllocator.SetMemPool(codeMemPool); +} + +void MIRFunction::ResetGDBEnv() { + delete codeMemPool; + codeMemPool = nullptr; +} +#endif +} // namespace maple diff --git a/src/mapleall/maple_ir/src/mir_lower.cpp b/src/mapleall/maple_ir/src/mir_lower.cpp new file mode 100644 index 0000000000000000000000000000000000000000..389dc6fc87adbacd1a42a9d3e5263498649c2fbc --- /dev/null +++ b/src/mapleall/maple_ir/src/mir_lower.cpp @@ -0,0 +1,1122 @@ +/* + * Copyright (c) [2019-2020] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_lower.h" +#include "constantfold.h" +#include "ext_constantfold.h" + +#define DO_LT_0_CHECK 1 + +namespace maple { + +static constexpr uint64 RoundUpConst(uint64 offset, uint32 align) { + return (-align) & (offset + align - 1); +} + +static inline uint64 RoundUp(uint64 offset, uint32 align) { + if (align == 0) { + return offset; + } + return RoundUpConst(offset, align); +} + +// Remove intrinsicop __builtin_expect and record likely info to brStmt +// Target condExpr example: +// ne u1 i64 ( +// intrinsicop i64 C___builtin_expect ( +// cvt i64 i32 (dread i32 %levVar_9354), cvt i64 i32 (constval i32 0)), +// constval i64 0) +void LowerCondGotoStmtWithBuiltinExpect(CondGotoNode &brStmt) { + BaseNode *condExpr = brStmt.Opnd(0); + // Poke ne for dread shortCircuit + // Example: + // dassign %shortCircuit 0 (ne u1 i64 ( + // intrinsicop i64 C___builtin_expect ( + // cvt i64 i32 (dread i32 %levVar_32349), + // cvt i64 i32 (constval i32 0)), + // constval i64 0)) + // dassign %shortCircuit 0 (ne u1 u32 (dread u32 %shortCircuit, constval u1 0)) + if (condExpr->GetOpCode() == OP_ne && condExpr->Opnd(0)->GetOpCode() == OP_dread && + condExpr->Opnd(1)->GetOpCode() == OP_constval) { + auto *constVal = static_cast(condExpr->Opnd(1))->GetConstVal(); + if (constVal->GetKind() == kConstInt && static_cast(constVal)->GetValue() == 0) { + condExpr = condExpr->Opnd(0); + } + } + if (condExpr->GetOpCode() == OP_dread) { + // Example: + // dassign %shortCircuit 0 (ne u1 i64 ( + // intrinsicop i64 C___builtin_expect ( + // cvt i64 i32 (dread i32 %levVar_9488), + // cvt i64 i32 (constval i32 1)), + // constval i64 0)) + // brfalse @shortCircuit_label_13351 (dread u32 %shortCircuit) + StIdx stIdx = static_cast(condExpr)->GetStIdx(); + FieldID fieldId = static_cast(condExpr)->GetFieldID(); + if (fieldId != 0) { + return; + } + if (brStmt.GetPrev() == nullptr || brStmt.GetPrev()->GetOpCode() != OP_dassign) { + return; // prev stmt may be a label, we skip it too + } + auto *dassign = static_cast(brStmt.GetPrev()); + if (stIdx != dassign->GetStIdx() || dassign->GetFieldID() != 0) { + return; + } + condExpr = dassign->GetRHS(); + } + if (condExpr->GetOpCode() == OP_ne) { + // opnd1 must be int const 0 + BaseNode *opnd1 = condExpr->Opnd(1); + if (opnd1->GetOpCode() != OP_constval) { + return; + } + auto *constVal = static_cast(opnd1)->GetConstVal(); + if (constVal->GetKind() != kConstInt || static_cast(constVal)->GetValue() != 0) { + return; + } + // opnd0 must be intrinsicop C___builtin_expect + BaseNode *opnd0 = condExpr->Opnd(0); + if (opnd0->GetOpCode() != OP_intrinsicop || + static_cast(opnd0)->GetIntrinsic() != INTRN_C___builtin_expect) { + return; + } + // We trust constant fold + auto *expectedConstExpr = opnd0->Opnd(1); + if (expectedConstExpr->GetOpCode() == OP_cvt) { + expectedConstExpr = expectedConstExpr->Opnd(0); + } + if (expectedConstExpr->GetOpCode() != OP_constval) { + return; + } + auto *expectedConstNode = static_cast(expectedConstExpr)->GetConstVal(); + CHECK_FATAL(expectedConstNode->GetKind() == kConstInt, "must be"); + auto expectedVal = static_cast(expectedConstNode)->GetValue(); + if (expectedVal != 0 && expectedVal != 1) { + return; + } + bool likelyTrue = (expectedVal == 1); // The condition is likely to be true + bool likelyBranch = (brStmt.GetOpCode() == OP_brtrue ? likelyTrue : !likelyTrue); // High probability jump + if (likelyBranch) { + brStmt.SetBranchProb(kProbLikely); + } else { + brStmt.SetBranchProb(kProbUnlikely); + } + // Remove __builtin_expect + condExpr->SetOpnd(opnd0->Opnd(0), 0); + } +} + +void MIRLower::LowerBuiltinExpect(BlockNode &block) const { + auto *stmt = block.GetFirst(); + auto *last = block.GetLast(); + while (stmt != last) { + if (stmt->GetOpCode() == OP_brtrue || stmt->GetOpCode() == OP_brfalse) { + LowerCondGotoStmtWithBuiltinExpect(*static_cast(stmt)); + } + stmt = stmt->GetNext(); + } +} + +LabelIdx MIRLower::CreateCondGotoStmt(Opcode op, BlockNode &blk, const IfStmtNode &ifStmt) { + auto *brStmt = mirModule.CurFuncCodeMemPool()->New(op); + brStmt->SetOpnd(ifStmt.Opnd(), 0); + brStmt->SetSrcPos(ifStmt.GetSrcPos()); + LabelIdx lableIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lableIdx); + brStmt->SetOffset(lableIdx); + blk.AddStatement(brStmt); + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(brStmt->GetStmtID(), ifStmt.GetStmtID()); + } + bool thenEmpty = (ifStmt.GetThenPart() == nullptr) || (ifStmt.GetThenPart()->GetFirst() == nullptr); + if (thenEmpty) { + blk.AppendStatementsFromBlock(*ifStmt.GetElsePart()); + } else { + blk.AppendStatementsFromBlock(*ifStmt.GetThenPart()); + } + return lableIdx; +} + +void MIRLower::CreateBrFalseStmt(BlockNode &blk, const IfStmtNode &ifStmt) { + LabelIdx labelIdx = CreateCondGotoStmt(OP_brfalse, blk, ifStmt); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(labelIdx); + blk.AddStatement(lableStmt); + // set stmtfreqs + if (GetFuncProfData()) { + ASSERT(GetFuncProfData()->GetStmtFreq(ifStmt.GetThenPart()->GetStmtID()) >= 0, "sanity check"); + int64_t freq = static_cast(GetFuncProfData()->GetStmtFreq(ifStmt.GetStmtID()) - + GetFuncProfData()->GetStmtFreq(ifStmt.GetThenPart()->GetStmtID())); + GetFuncProfData()->SetStmtFreq(lableStmt->GetStmtID(), static_cast(freq)); + } +} + +void MIRLower::CreateBrTrueStmt(BlockNode &blk, const IfStmtNode &ifStmt) { + LabelIdx labelIdx = CreateCondGotoStmt(OP_brtrue, blk, ifStmt); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(labelIdx); + blk.AddStatement(lableStmt); + // set stmtfreqs + if (GetFuncProfData()) { + ASSERT(GetFuncProfData()->GetStmtFreq(ifStmt.GetElsePart()->GetStmtID()) >= 0, "sanity check"); + int64_t freq = static_cast(GetFuncProfData()->GetStmtFreq(ifStmt.GetStmtID()) - + GetFuncProfData()->GetStmtFreq(ifStmt.GetElsePart()->GetStmtID())); + GetFuncProfData()->SetStmtFreq(lableStmt->GetStmtID(), static_cast(freq)); + } +} + + +void MIRLower::CreateBrFalseAndGotoStmt(BlockNode &blk, const IfStmtNode &ifStmt) { + LabelIdx labelIdx = CreateCondGotoStmt(OP_brfalse, blk, ifStmt); + bool fallThroughFromThen = !IfStmtNoFallThrough(ifStmt); + LabelIdx gotoLableIdx = 0; + if (fallThroughFromThen) { + auto *gotoStmt = mirModule.CurFuncCodeMemPool()->New(OP_goto); + gotoLableIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(gotoLableIdx); + gotoStmt->SetOffset(gotoLableIdx); + blk.AddStatement(gotoStmt); + // set stmtfreqs + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(gotoStmt->GetStmtID(), ifStmt.GetThenPart()->GetStmtID()); + } + } + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(labelIdx); + blk.AddStatement(lableStmt); + blk.AppendStatementsFromBlock(*ifStmt.GetElsePart()); + // set stmtfreqs + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(lableStmt->GetStmtID(), ifStmt.GetElsePart()->GetStmtID()); + } + if (fallThroughFromThen) { + lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(gotoLableIdx); + blk.AddStatement(lableStmt); + // set endlabel stmtfreqs + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(lableStmt->GetStmtID(), ifStmt.GetStmtID()); + } + } +} + +BlockNode *MIRLower::LowerIfStmt(IfStmtNode &ifStmt, bool recursive) { + bool thenEmpty = (ifStmt.GetThenPart() == nullptr) || (ifStmt.GetThenPart()->GetFirst() == nullptr); + bool elseEmpty = (ifStmt.GetElsePart() == nullptr) || (ifStmt.GetElsePart()->GetFirst() == nullptr); + if (recursive) { + if (!thenEmpty) { + ifStmt.SetThenPart(LowerBlock(*ifStmt.GetThenPart())); + } + if (!elseEmpty) { + ifStmt.SetElsePart(LowerBlock(*ifStmt.GetElsePart())); + } + } + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + if (thenEmpty && elseEmpty) { + // generate EVAL statement + auto *evalStmt = mirModule.CurFuncCodeMemPool()->New(OP_eval); + evalStmt->SetOpnd(ifStmt.Opnd(), 0); + evalStmt->SetSrcPos(ifStmt.GetSrcPos()); + blk->AddStatement(evalStmt); + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(evalStmt->GetStmtID(), ifStmt.GetStmtID()); + } + } else if (elseEmpty) { + // brfalse + // + // label + CreateBrFalseStmt(*blk, ifStmt); + } else if (thenEmpty) { + // brtrue + // + // label + CreateBrTrueStmt(*blk, ifStmt); + } else { + // brfalse + // + // goto + // label + // + // label + CreateBrFalseAndGotoStmt(*blk, ifStmt); + } + return blk; +} + +static bool ConsecutiveCaseValsAndSameTarget(const CaseVector *switchTable) { + size_t caseNum = switchTable->size(); + int lastVal = static_cast((*switchTable)[0].first); + LabelIdx lblIdx = (*switchTable)[0].second; + for (size_t id = 1; id < caseNum; id++) { + lastVal++; + if (lastVal != (*switchTable)[id].first) { + return false; + } + if (lblIdx != (*switchTable)[id].second) { + return false; + } + } + return true; +} + +// if there is only 1 case branch, replace with conditional branch(es) and +// return the optimized multiple statements; otherwise, return nullptr +BlockNode *MIRLower::LowerSwitchStmt(SwitchNode *switchNode) { + CaseVector *switchTable = &switchNode->GetSwitchTable(); + if (switchNode->GetDefaultLabel() == 0) { + return nullptr; + } + if (switchTable->empty()) { // goto @defaultLabel + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + LabelIdx defaultLabel = switchNode->GetDefaultLabel(); + MIRBuilder *builder = mirModule.GetMIRBuilder(); + GotoNode *gotoStmt = builder->CreateStmtGoto(OP_goto, defaultLabel); + blk->AddStatement(gotoStmt); + return blk; + } + if (!ConsecutiveCaseValsAndSameTarget(switchTable)) { + return nullptr; + } + BlockNode *blk = mirModule.CurFuncCodeMemPool()->New(); + LabelIdx caseGotoLabel = switchTable->front().second; + LabelIdx defaultLabel = switchNode->GetDefaultLabel(); + int64 minCaseVal = switchTable->front().first; + int64 maxCaseVal = switchTable->back().first; + BaseNode *switchOpnd = switchNode->Opnd(0); + MIRBuilder *builder = mirModule.GetMIRBuilder(); + ConstvalNode *minCaseNode = builder->CreateIntConst(static_cast(minCaseVal), switchOpnd->GetPrimType()); + ConstvalNode *maxCaseNode = builder->CreateIntConst(static_cast(maxCaseVal), switchOpnd->GetPrimType()); + if (minCaseVal == maxCaseVal) { + // brtrue (x == minCaseVal) @case_goto_label + // goto @default_label + CompareNode *eqNode = builder->CreateExprCompare(OP_eq, *GlobalTables::GetTypeTable().GetInt32(), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(switchOpnd->GetPrimType())), switchOpnd, minCaseNode); + CondGotoNode *condGoto = builder->CreateStmtCondGoto(eqNode, OP_brtrue, caseGotoLabel); + blk->AddStatement(condGoto); + GotoNode *gotoStmt = builder->CreateStmtGoto(OP_goto, defaultLabel); + blk->AddStatement(gotoStmt); + } else { + // brtrue (x < minCaseVal) @default_label + // brtrue (x > maxCaseVal) @default_label + // goto @case_goto_label + CompareNode *ltNode = builder->CreateExprCompare(OP_lt, *GlobalTables::GetTypeTable().GetInt32(), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(switchOpnd->GetPrimType())), switchOpnd, minCaseNode); + CondGotoNode *condGoto = builder->CreateStmtCondGoto(ltNode, OP_brtrue, defaultLabel); + blk->AddStatement(condGoto); + CompareNode *gtNode = builder->CreateExprCompare(OP_gt, *GlobalTables::GetTypeTable().GetInt32(), + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(switchOpnd->GetPrimType())), switchOpnd, maxCaseNode); + condGoto = builder->CreateStmtCondGoto(gtNode, OP_brtrue, defaultLabel); + blk->AddStatement(condGoto); + GotoNode *gotoStmt = builder->CreateStmtGoto(OP_goto, caseGotoLabel); + blk->AddStatement(gotoStmt); + } + return blk; +} + +// while +// is lowered to: +// brfalse +// label +// +// brtrue +// label +BlockNode *MIRLower::LowerWhileStmt(WhileStmtNode &whileStmt) { + ASSERT(whileStmt.GetBody() != nullptr, "nullptr check"); + whileStmt.SetBody(LowerBlock(*whileStmt.GetBody())); + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + auto *brFalseStmt = mirModule.CurFuncCodeMemPool()->New(OP_brfalse); + brFalseStmt->SetOpnd(whileStmt.Opnd(0), 0); + brFalseStmt->SetSrcPos(whileStmt.GetSrcPos()); + LabelIdx lalbeIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lalbeIdx); + brFalseStmt->SetOffset(lalbeIdx); + blk->AddStatement(brFalseStmt); + blk->AppendStatementsFromBlock(*whileStmt.GetBody()); + if (MeOption::optForSize) { + // still keep while-do format to avoid coping too much condition-related stmt + LabelIdx whileLalbeIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(whileLalbeIdx); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(whileLalbeIdx); + blk->InsertBefore(brFalseStmt, lableStmt); + auto *whilegotonode = mirModule.CurFuncCodeMemPool()->New(OP_goto, whileLalbeIdx); + if (GetFuncProfData() && blk->GetLast()) { + GetFuncProfData()->CopyStmtFreq(whilegotonode->GetStmtID(), blk->GetLast()->GetStmtID()); + } + blk->AddStatement(whilegotonode); + } else { + LabelIdx bodyLableIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(bodyLableIdx); + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(bodyLableIdx); + blk->InsertAfter(brFalseStmt, lableStmt); + // update frequency + if (GetFuncProfData()) { + GetFuncProfData()->CopyStmtFreq(lableStmt->GetStmtID(), whileStmt.GetStmtID()); + GetFuncProfData()->CopyStmtFreq(brFalseStmt->GetStmtID(), whileStmt.GetStmtID()); + } + auto *brTrueStmt = mirModule.CurFuncCodeMemPool()->New(OP_brtrue); + brTrueStmt->SetOpnd(whileStmt.Opnd(0)->CloneTree(mirModule.GetCurFuncCodeMPAllocator()), 0); + brTrueStmt->SetOffset(bodyLableIdx); + if (GetFuncProfData() && blk->GetLast()) { + GetFuncProfData()->CopyStmtFreq(brTrueStmt->GetStmtID(), whileStmt.GetBody()->GetStmtID()); + } + blk->AddStatement(brTrueStmt); + } + auto *lableStmt = mirModule.CurFuncCodeMemPool()->New(); + lableStmt->SetLabelIdx(lalbeIdx); + blk->AddStatement(lableStmt); + if (GetFuncProfData()) { + int64_t freq = static_cast(GetFuncProfData()->GetStmtFreq(whileStmt.GetStmtID()) - + GetFuncProfData()->GetStmtFreq(blk->GetLast()->GetStmtID())); + ASSERT(freq >= 0, "sanity check"); + GetFuncProfData()->SetStmtFreq(lableStmt->GetStmtID(), static_cast(freq)); + } + return blk; +} + +// doloop (,,) {} +// is lowered to: +// dassign () +// brfalse +// label +// +// dassign () +// brtrue +// label +BlockNode *MIRLower::LowerDoloopStmt(DoloopNode &doloop) { + ASSERT(doloop.GetDoBody() != nullptr, "nullptr check"); + doloop.SetDoBody(LowerBlock(*doloop.GetDoBody())); + int64_t doloopnodeFreq = 0; + int64_t bodynodeFreq = 0; + if (GetFuncProfData()) { + doloopnodeFreq = static_cast(GetFuncProfData()->GetStmtFreq(doloop.GetStmtID())); + bodynodeFreq = static_cast(GetFuncProfData()->GetStmtFreq(doloop.GetDoBody()->GetStmtID())); + } + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + if (doloop.IsPreg()) { + PregIdx regIdx = static_cast(doloop.GetDoVarStIdx().FullIdx()); + MIRPreg *mirPreg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(regIdx); + PrimType primType = mirPreg->GetPrimType(); + ASSERT(primType != kPtyInvalid, "runtime check error"); + auto *startRegassign = mirModule.CurFuncCodeMemPool()->New(); + startRegassign->SetRegIdx(regIdx); + startRegassign->SetPrimType(primType); + startRegassign->SetOpnd(doloop.GetStartExpr(), 0); + startRegassign->SetSrcPos(doloop.GetSrcPos()); + blk->AddStatement(startRegassign); + } else { + auto *startDassign = mirModule.CurFuncCodeMemPool()->New(); + startDassign->SetStIdx(doloop.GetDoVarStIdx()); + startDassign->SetRHS(doloop.GetStartExpr()); + startDassign->SetSrcPos(doloop.GetSrcPos()); + blk->AddStatement(startDassign); + } + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(blk->GetLast()->GetStmtID(), static_cast(doloopnodeFreq - bodynodeFreq)); + } + auto *brFalseStmt = mirModule.CurFuncCodeMemPool()->New(OP_brfalse); + brFalseStmt->SetOpnd(doloop.GetCondExpr(), 0); + LabelIdx lIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lIdx); + brFalseStmt->SetOffset(lIdx); + blk->AddStatement(brFalseStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(brFalseStmt->GetStmtID(), static_cast(doloopnodeFreq - bodynodeFreq)); + } + LabelIdx bodyLabelIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(bodyLabelIdx); + auto *labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(bodyLabelIdx); + blk->AddStatement(labelStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(labelStmt->GetStmtID(), static_cast(bodynodeFreq)); + } + blk->AppendStatementsFromBlock(*doloop.GetDoBody()); + if (doloop.IsPreg()) { + PregIdx regIdx = (PregIdx)doloop.GetDoVarStIdx().FullIdx(); + MIRPreg *mirPreg = mirModule.CurFunction()->GetPregTab()->PregFromPregIdx(regIdx); + PrimType doVarPType = mirPreg->GetPrimType(); + ASSERT(doVarPType != kPtyInvalid, "runtime check error"); + auto *readDoVar = mirModule.CurFuncCodeMemPool()->New(); + readDoVar->SetRegIdx(regIdx); + readDoVar->SetPrimType(doVarPType); + auto *add = + mirModule.CurFuncCodeMemPool()->New(OP_add, doVarPType, doloop.GetIncrExpr(), readDoVar); + auto *endRegassign = mirModule.CurFuncCodeMemPool()->New(); + endRegassign->SetRegIdx(regIdx); + endRegassign->SetPrimType(doVarPType); + endRegassign->SetOpnd(add, 0); + blk->AddStatement(endRegassign); + } else { + const MIRSymbol *doVarSym = mirModule.CurFunction()->GetLocalOrGlobalSymbol(doloop.GetDoVarStIdx()); + PrimType doVarPType = doVarSym->GetType()->GetPrimType(); + auto *readDovar = + mirModule.CurFuncCodeMemPool()->New(OP_dread, doVarPType, doloop.GetDoVarStIdx(), 0); + auto *add = + mirModule.CurFuncCodeMemPool()->New(OP_add, doVarPType, readDovar, doloop.GetIncrExpr()); + auto *endDassign = mirModule.CurFuncCodeMemPool()->New(); + endDassign->SetStIdx(doloop.GetDoVarStIdx()); + endDassign->SetRHS(add); + blk->AddStatement(endDassign); + } + auto *brTrueStmt = mirModule.CurFuncCodeMemPool()->New(OP_brtrue); + brTrueStmt->SetOpnd(doloop.GetCondExpr()->CloneTree(mirModule.GetCurFuncCodeMPAllocator()), 0); + brTrueStmt->SetOffset(bodyLabelIdx); + blk->AddStatement(brTrueStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(brTrueStmt->GetStmtID(), static_cast(bodynodeFreq)); + } + labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(lIdx); + blk->AddStatement(labelStmt); + // udpate stmtFreq + if (GetFuncProfData()) { + GetFuncProfData()->SetStmtFreq(labelStmt->GetStmtID(), static_cast(doloopnodeFreq - bodynodeFreq)); + } + return blk; +} + +// dowhile +// is lowered to: +// label +// +// brtrue +BlockNode *MIRLower::LowerDowhileStmt(WhileStmtNode &doWhileStmt) { + ASSERT(doWhileStmt.GetBody() != nullptr, "nullptr check"); + doWhileStmt.SetBody(LowerBlock(*doWhileStmt.GetBody())); + auto *blk = mirModule.CurFuncCodeMemPool()->New(); + LabelIdx lIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lIdx); + auto *labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(lIdx); + blk->AddStatement(labelStmt); + blk->AppendStatementsFromBlock(*doWhileStmt.GetBody()); + auto *brTrueStmt = mirModule.CurFuncCodeMemPool()->New(OP_brtrue); + brTrueStmt->SetOpnd(doWhileStmt.Opnd(0), 0); + brTrueStmt->SetOffset(lIdx); + blk->AddStatement(brTrueStmt); + return blk; +} + +BlockNode *MIRLower::LowerBlock(BlockNode &block) { + auto *newBlock = mirModule.CurFuncCodeMemPool()->New(); + newBlock->SetSrcPos(block.GetSrcPos()); + BlockNode *tmp = nullptr; + if (block.GetFirst() == nullptr) { + newBlock->SetStmtID(block.GetStmtID()); // keep original block stmtid + return newBlock; + } + StmtNode *nextStmt = block.GetFirst(); + ASSERT(nextStmt != nullptr, "nullptr check"); + do { + StmtNode *stmt = nextStmt; + nextStmt = stmt->GetNext(); + switch (stmt->GetOpCode()) { + case OP_if: + tmp = LowerIfStmt(static_cast(*stmt), true); + newBlock->AppendStatementsFromBlock(*tmp); + break; + case OP_switch: + tmp = LowerSwitchStmt(static_cast(stmt)); + if (tmp != nullptr) { + newBlock->AppendStatementsFromBlock(*tmp); + } else { + newBlock->AddStatement(stmt); + } + break; + case OP_while: + newBlock->AppendStatementsFromBlock(*LowerWhileStmt(static_cast(*stmt))); + break; + case OP_dowhile: + newBlock->AppendStatementsFromBlock(*LowerDowhileStmt(static_cast(*stmt))); + break; + case OP_doloop: + newBlock->AppendStatementsFromBlock(*LowerDoloopStmt(static_cast(*stmt))); + break; + case OP_icallassigned: + case OP_icall: { + if (mirModule.IsCModule()) { + // convert to icallproto/icallprotoassigned + IcallNode *ic = static_cast(stmt); + ic->SetOpCode(stmt->GetOpCode() == OP_icall ? OP_icallproto : OP_icallprotoassigned); + MIRFuncType *funcType = FuncTypeFromFuncPtrExpr(stmt->Opnd(0)); + CHECK_FATAL(funcType != nullptr, "MIRLower::LowerBlock: cannot find prototype for icall"); + ic->SetRetTyIdx(funcType->GetTypeIndex()); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); + if (retType->GetPrimType() == PTY_agg && retType->GetSize() > k16BitSize) { + funcType->funcAttrs.SetAttr(FUNCATTR_firstarg_return); + } + } + newBlock->AddStatement(stmt); + break; + } + case OP_block: + tmp = LowerBlock(static_cast(*stmt)); + newBlock->AppendStatementsFromBlock(*tmp); + break; + default: + newBlock->AddStatement(stmt); + break; + } + } while (nextStmt != nullptr); + newBlock->SetStmtID(block.GetStmtID()); // keep original block stmtid + return newBlock; +} + +// for lowering OP_cand and OP_cior embedded in the expression x which belongs +// to curstmt +BaseNode* MIRLower::LowerEmbeddedCandCior(BaseNode *x, StmtNode *curstmt, BlockNode *blk) { + if (x->GetOpCode() == OP_cand || x->GetOpCode() == OP_cior) { + MIRBuilder *builder = mirModule.GetMIRBuilder(); + BinaryNode *bnode = static_cast(x); + bnode->SetOpnd(LowerEmbeddedCandCior(bnode->Opnd(0), curstmt, blk), 0); + PregIdx pregIdx = mirFunc->GetPregTab()->CreatePreg(x->GetPrimType()); + RegassignNode *regass = builder->CreateStmtRegassign(x->GetPrimType(), pregIdx, bnode->Opnd(0)); + blk->InsertBefore(curstmt, regass); + LabelIdx labIdx = mirFunc->GetLabelTab()->CreateLabel(); + mirFunc->GetLabelTab()->AddToStringLabelMap(labIdx); + BaseNode *cond = builder->CreateExprRegread(x->GetPrimType(), pregIdx); + CondGotoNode *cgoto = mirFunc->GetCodeMempool()->New( + x->GetOpCode() == OP_cior ? OP_brtrue : OP_brfalse); + cgoto->SetOpnd(cond, 0); + cgoto->SetOffset(labIdx); + blk->InsertBefore(curstmt, cgoto); + + bnode->SetOpnd(LowerEmbeddedCandCior(bnode->Opnd(1), curstmt, blk), 1); + regass = builder->CreateStmtRegassign(x->GetPrimType(), pregIdx, bnode->Opnd(1)); + blk->InsertBefore(curstmt, regass); + LabelNode *lbl = mirFunc->GetCodeMempool()->New(); + lbl->SetLabelIdx(labIdx); + blk->InsertBefore(curstmt, lbl); + return builder->CreateExprRegread(x->GetPrimType(), pregIdx); + } else { + for (size_t i = 0; i < x->GetNumOpnds(); i++) { + x->SetOpnd(LowerEmbeddedCandCior(x->Opnd(i), curstmt, blk), i); + } + return x; + } +} + +// for lowering all appearances of OP_cand and OP_cior associated with condional +// branches in the block +void MIRLower::LowerCandCior(BlockNode &block) { +if (block.GetFirst() == nullptr) { + return; +} +StmtNode *nextStmt = block.GetFirst(); +do { + StmtNode *stmt = nextStmt; + nextStmt = stmt->GetNext(); + if (stmt->IsCondBr() && + (stmt->Opnd(0)->GetOpCode() == OP_cand || stmt->Opnd(0)->GetOpCode() == OP_cior)) { + CondGotoNode *condGoto = static_cast(stmt); + BinaryNode *cond = static_cast(condGoto->Opnd(0)); + if ((stmt->GetOpCode() == OP_brfalse && cond->GetOpCode() == OP_cand) || + (stmt->GetOpCode() == OP_brtrue && cond->GetOpCode() == OP_cior)) { + // short-circuit target label is same as original condGoto stmt + condGoto->SetOpnd(cond->GetBOpnd(0), 0); + auto *newCondGoto = mirModule.CurFuncCodeMemPool()->New(Opcode(stmt->GetOpCode())); + newCondGoto->SetOpnd(cond->GetBOpnd(1), 0); + newCondGoto->SetOffset(condGoto->GetOffset()); + block.InsertAfter(condGoto, newCondGoto); + nextStmt = stmt; // so it will be re-processed if another cand/cior + } else { // short-circuit target is next statement + LabelIdx lIdx; + LabelNode *labelStmt = nullptr; + if (nextStmt->GetOpCode() == OP_label) { + labelStmt = static_cast(nextStmt); + lIdx = labelStmt->GetLabelIdx(); + } else { + lIdx = mirModule.CurFunction()->GetLabelTab()->CreateLabel(); + mirModule.CurFunction()->GetLabelTab()->AddToStringLabelMap(lIdx); + labelStmt = mirModule.CurFuncCodeMemPool()->New(); + labelStmt->SetLabelIdx(lIdx); + block.InsertAfter(condGoto, labelStmt); + } + auto *newCondGoto = mirModule.CurFuncCodeMemPool()->New( + stmt->GetOpCode() == OP_brfalse ? OP_brtrue : OP_brfalse); + newCondGoto->SetOpnd(cond->GetBOpnd(0), 0); + newCondGoto->SetOffset(lIdx); + block.InsertBefore(condGoto, newCondGoto); + condGoto->SetOpnd(cond->GetBOpnd(1), 0); + nextStmt = newCondGoto; // so it will be re-processed if another cand/cior + } + } else { // call LowerEmbeddedCandCior() for all the expression operands + for (size_t i = 0; i < stmt->GetNumOpnds(); i++) { + stmt->SetOpnd(LowerEmbeddedCandCior(stmt->Opnd(i), stmt, &block), i); + } + } + } while (nextStmt != nullptr); +} + +void MIRLower::LowerFunc(MIRFunction &func) { + if (GetOptLevel() > 0) { + ExtConstantFold ecf(func.GetModule()); + (void)ecf.ExtSimplify(func.GetBody()); + } + + mirModule.SetCurFunction(&func); + if (IsLowerExpandArray()) { + ExpandArrayMrt(func); + } + BlockNode *origBody = func.GetBody(); + ASSERT(origBody != nullptr, "nullptr check"); + BlockNode *newBody = LowerBlock(*origBody); + ASSERT(newBody != nullptr, "nullptr check"); + LowerBuiltinExpect(*newBody); + if (!InLFO()) { + LowerCandCior(*newBody); + } + func.SetBody(newBody); +} + +BaseNode *MIRLower::LowerFarray(ArrayNode *array) { + auto *farrayType = static_cast(array->GetArrayType(GlobalTables::GetTypeTable())); + size_t eSize = GlobalTables::GetTypeTable().GetTypeFromTyIdx(farrayType->GetElemTyIdx())->GetSize(); + MIRType &arrayType = *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType())); + /* how about multi-dimension array? */ + if (array->GetIndex(0)->GetOpCode() == OP_constval) { + const ConstvalNode *constvalNode = static_cast(array->GetIndex(0)); + if (constvalNode->GetConstVal()->GetKind() == kConstInt) { + const MIRIntConst *pIntConst = static_cast(constvalNode->GetConstVal()); + CHECK_FATAL(mirModule.IsJavaModule() || !pIntConst->IsNegative(), "Array index should >= 0."); + int64 eleOffset = pIntConst->GetExtValue() * static_cast(eSize); + + BaseNode *baseNode = array->GetBase(); + if (eleOffset == 0) { + return baseNode; + } + + MIRIntConst *eleConst = + GlobalTables::GetIntConstTable().GetOrCreateIntConst(static_cast(eleOffset), arrayType); + BaseNode *offsetNode = mirModule.CurFuncCodeMemPool()->New(eleConst); + offsetNode->SetPrimType(array->GetPrimType()); + + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(OP_add); + rAdd->SetPrimType(array->GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(offsetNode, 1); + return rAdd; + } + } + + BaseNode *rMul = nullptr; + + BaseNode *baseNode = array->GetBase(); + + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(OP_add); + rAdd->SetPrimType(array->GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + auto *newAdd = ConstantFold(mirModule).Fold(rAdd); + rAdd = (newAdd != nullptr ? newAdd : rAdd); + return rAdd; +} + +BaseNode *MIRLower::LowerCArray(ArrayNode *array) { + MIRType *aType = array->GetArrayType(GlobalTables::GetTypeTable()); + if (aType->GetKind() == kTypeJArray) { + return array; + } + if (aType->GetKind() == kTypeFArray) { + return LowerFarray(array); + } + + MIRArrayType *arrayType = static_cast(aType); + /* There are two cases where dimension > 1. + * 1) arrayType->dim > 1. Process the current arrayType. (nestedArray = false) + * 2) arrayType->dim == 1, but arraytype->eTyIdx is another array. (nestedArray = true) + * Assume at this time 1) and 2) cannot mix. + * Along with the array dimension, there is the array indexing. + * It is allowed to index arrays less than the dimension. + * This is dictated by the number of indexes. + */ + bool nestedArray = false; + uint64 dim = arrayType->GetDim(); + MIRType *innerType = nullptr; + MIRArrayType *innerArrayType = nullptr; + uint64 elemSize = 0; + if (dim == 1) { + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(arrayType->GetElemTyIdx()); + if (innerType->GetKind() == kTypeArray) { + nestedArray = true; + do { + innerArrayType = static_cast(innerType); + elemSize = RoundUp(innerArrayType->GetElemType()->GetSize(), + arrayType->GetElemType()->GetAlign()); + dim++; + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); + } while (innerType->GetKind() == kTypeArray); + } + } + + size_t numIndex = array->NumOpnds() - 1; + MIRArrayType *curArrayType = arrayType; + BaseNode *resNode = array->GetIndex(0); + if (dim > 1) { + BaseNode *prevNode = nullptr; + for (size_t i = 0; (i < dim) && (i < numIndex); ++i) { + uint32 mpyDim = 1; + if (nestedArray) { + CHECK_FATAL(arrayType->GetSizeArrayItem(0) > 0, "Zero size array dimension"); + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curArrayType->GetElemTyIdx()); + curArrayType = static_cast(innerType); + while (innerType->GetKind() == kTypeArray) { + innerArrayType = static_cast(innerType); + mpyDim *= innerArrayType->GetSizeArrayItem(0); + innerType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(innerArrayType->GetElemTyIdx()); + } + } else { + CHECK_FATAL(arrayType->GetSizeArrayItem(static_cast(i)) > 0, "Zero size array dimension"); + for (size_t j = i + 1; j < dim; ++j) { + mpyDim *= arrayType->GetSizeArrayItem(static_cast(j)); + } + } + + BaseNode *index = static_cast(array->GetIndex(i)); + bool isConst = false; + uint64 indexVal = 0; + if (index->op == OP_constval) { + ConstvalNode *constNode = static_cast(index); + indexVal = static_cast((static_cast(constNode->GetConstVal()))->GetExtValue()); + isConst = true; + MIRIntConst *newConstNode = mirModule.GetMemPool()->New( + indexVal * mpyDim, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *newValNode = mirModule.CurFuncCodeMemPool()->New(newConstNode); + newValNode->SetPrimType(array->GetPrimType()); + if (i == 0) { + prevNode = newValNode; + continue; + } else { + resNode = newValNode; + } + } + if (i > 0 && isConst == false) { + resNode = array->GetIndex(i); + } + + BaseNode *mpyNode; + if (isConst) { + MIRIntConst *mulConst = mirModule.GetMemPool()->New( + static_cast(mpyDim) * indexVal, + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *mulSize = mirModule.CurFuncCodeMemPool()->New(mulConst); + mulSize->SetPrimType(array->GetPrimType()); + mpyNode = mulSize; + } else if (mpyDim == 1 && prevNode) { + mpyNode = prevNode; + prevNode = resNode; + } else { + mpyNode = mirModule.CurFuncCodeMemPool()->New(OP_mul); + mpyNode->SetPrimType(array->GetPrimType()); + MIRIntConst *mulConst = mirModule.GetMemPool()->New( + mpyDim, *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *mulSize = mirModule.CurFuncCodeMemPool()->New(mulConst); + mulSize->SetPrimType(array->GetPrimType()); + mpyNode->SetOpnd(mulSize, 1); + PrimType signedInt4AddressCompute = GetSignedPrimType(array->GetPrimType()); + if (!IsPrimitiveInteger(resNode->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, signedInt4AddressCompute, + resNode->GetPrimType(), resNode); + } else if (GetPrimTypeSize(resNode->GetPrimType()) != GetPrimTypeSize(array->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, array->GetPrimType(), + GetRegPrimType(resNode->GetPrimType()), resNode); + } + mpyNode->SetOpnd(resNode, 0); + } + if (i == 0) { + prevNode = mpyNode; + continue; + } + BaseNode *newResNode = mirModule.CurFuncCodeMemPool()->New(OP_add); + newResNode->SetPrimType(array->GetPrimType()); + newResNode->SetOpnd(mpyNode, 0); + if (NeedCvtOrRetype(prevNode->GetPrimType(), array->GetPrimType())) { + prevNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, array->GetPrimType(), + GetRegPrimType(prevNode->GetPrimType()), prevNode); + } + newResNode->SetOpnd(prevNode, 1); + prevNode = newResNode; + } + resNode = prevNode; + } + + BaseNode *rMul = nullptr; + // esize is the size of the array element (eg. int = 4 long = 8) + uint64 esize; + if (nestedArray) { + esize = elemSize; + } else { + esize = arrayType->GetElemType()->GetSize(); + } + Opcode opadd = OP_add; + MIRIntConst *econst = mirModule.GetMemPool()->New(esize, + *GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(array->GetPrimType()))); + BaseNode *eSize = mirModule.CurFuncCodeMemPool()->New(econst); + eSize->SetPrimType(array->GetPrimType()); + rMul = mirModule.CurFuncCodeMemPool()->New(OP_mul); + PrimType signedInt4AddressCompute = GetSignedPrimType(array->GetPrimType()); + if (!IsPrimitiveInteger(resNode->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, signedInt4AddressCompute, + resNode->GetPrimType(), resNode); + } else if (GetPrimTypeSize(resNode->GetPrimType()) != GetPrimTypeSize(array->GetPrimType())) { + resNode = mirModule.CurFuncCodeMemPool()->New(OP_cvt, array->GetPrimType(), + GetRegPrimType(resNode->GetPrimType()), resNode); + } + rMul->SetPrimType(resNode->GetPrimType()); + rMul->SetOpnd(resNode, 0); + rMul->SetOpnd(eSize, 1); + BaseNode *baseNode = array->GetBase(); + BaseNode *rAdd = mirModule.CurFuncCodeMemPool()->New(opadd); + rAdd->SetPrimType(array->GetPrimType()); + rAdd->SetOpnd(baseNode, 0); + rAdd->SetOpnd(rMul, 1); + auto *newAdd = ConstantFold(mirModule).Fold(rAdd); + rAdd = (newAdd != nullptr ? newAdd : rAdd); + return rAdd; +} + +IfStmtNode *MIRLower::ExpandArrayMrtIfBlock(IfStmtNode &node) { + if (node.GetThenPart() != nullptr) { + node.SetThenPart(ExpandArrayMrtBlock(*node.GetThenPart())); + } + if (node.GetElsePart() != nullptr) { + node.SetElsePart(ExpandArrayMrtBlock(*node.GetElsePart())); + } + return &node; +} + +WhileStmtNode *MIRLower::ExpandArrayMrtWhileBlock(WhileStmtNode &node) { + if (node.GetBody() != nullptr) { + node.SetBody(ExpandArrayMrtBlock(*node.GetBody())); + } + return &node; +} + +DoloopNode *MIRLower::ExpandArrayMrtDoloopBlock(DoloopNode &node) { + if (node.GetDoBody() != nullptr) { + node.SetDoBody(ExpandArrayMrtBlock(*node.GetDoBody())); + } + return &node; +} + +ForeachelemNode *MIRLower::ExpandArrayMrtForeachelemBlock(ForeachelemNode &node) { + if (node.GetLoopBody() != nullptr) { + node.SetLoopBody(ExpandArrayMrtBlock(*node.GetLoopBody())); + } + return &node; +} + +void MIRLower::AddArrayMrtMpl(BaseNode &exp, BlockNode &newBlock) { + MIRModule &mod = mirModule; + MIRBuilder *builder = mod.GetMIRBuilder(); + for (size_t i = 0; i < exp.NumOpnds(); ++i) { + ASSERT(exp.Opnd(i) != nullptr, "nullptr check"); + AddArrayMrtMpl(*exp.Opnd(i), newBlock); + } + if (exp.GetOpCode() == OP_array) { + auto &arrayNode = static_cast(exp); + if (arrayNode.GetBoundsCheck()) { + BaseNode *arrAddr = arrayNode.Opnd(0); + BaseNode *index = arrayNode.Opnd(1); + ASSERT(index != nullptr, "null ptr check"); + MIRType *indexType = GlobalTables::GetTypeTable().GetPrimType(index->GetPrimType()); + UnaryStmtNode *nullCheck = builder->CreateStmtUnary(OP_assertnonnull, arrAddr); + newBlock.AddStatement(nullCheck); +#if DO_LT_0_CHECK + ConstvalNode *indexZero = builder->GetConstUInt32(0); + CompareNode *lessZero = builder->CreateExprCompare(OP_lt, *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetUInt32(), index, indexZero); +#endif + MIRType *infoLenType = GlobalTables::GetTypeTable().GetInt32(); + MapleVector arguments(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); + arguments.push_back(arrAddr); + BaseNode *arrLen = builder->CreateExprIntrinsicop(INTRN_JAVA_ARRAY_LENGTH, OP_intrinsicop, + *infoLenType, arguments); + BaseNode *cpmIndex = index; + if (arrLen->GetPrimType() != index->GetPrimType()) { + cpmIndex = builder->CreateExprTypeCvt(OP_cvt, *infoLenType, *indexType, index); + } + CompareNode *largeLen = builder->CreateExprCompare(OP_ge, *GlobalTables::GetTypeTable().GetUInt1(), + *GlobalTables::GetTypeTable().GetUInt32(), cpmIndex, arrLen); + // maybe should use cior +#if DO_LT_0_CHECK + BinaryNode *indexCon = + builder->CreateExprBinary(OP_lior, *GlobalTables::GetTypeTable().GetUInt1(), lessZero, largeLen); +#endif + MapleVector args(builder->GetCurrentFuncCodeMpAllocator()->Adapter()); +#if DO_LT_0_CHECK + args.push_back(indexCon); + IntrinsiccallNode *boundaryTrinsicCall = builder->CreateStmtIntrinsicCall(INTRN_MPL_BOUNDARY_CHECK, args); +#else + args.push_back(largeLen); + IntrinsiccallNode *boundaryTrinsicCall = builder->CreateStmtIntrinsicCall(INTRN_MPL_BOUNDARY_CHECK, args); +#endif + newBlock.AddStatement(boundaryTrinsicCall); + } + } +} + +BlockNode *MIRLower::ExpandArrayMrtBlock(BlockNode &block) { + auto *newBlock = mirModule.CurFuncCodeMemPool()->New(); + if (block.GetFirst() == nullptr) { + return newBlock; + } + StmtNode *nextStmt = block.GetFirst(); + do { + StmtNode *stmt = nextStmt; + ASSERT(stmt != nullptr, "nullptr check"); + nextStmt = stmt->GetNext(); + switch (stmt->GetOpCode()) { + case OP_if: + newBlock->AddStatement(ExpandArrayMrtIfBlock(static_cast(*stmt))); + break; + case OP_while: + newBlock->AddStatement(ExpandArrayMrtWhileBlock(static_cast(*stmt))); + break; + case OP_dowhile: + newBlock->AddStatement(ExpandArrayMrtWhileBlock(static_cast(*stmt))); + break; + case OP_doloop: + newBlock->AddStatement(ExpandArrayMrtDoloopBlock(static_cast(*stmt))); + break; + case OP_foreachelem: + newBlock->AddStatement(ExpandArrayMrtForeachelemBlock(static_cast(*stmt))); + break; + case OP_block: + newBlock->AddStatement(ExpandArrayMrtBlock(static_cast(*stmt))); + break; + default: + AddArrayMrtMpl(*stmt, *newBlock); + newBlock->AddStatement(stmt); + break; + } + } while (nextStmt != nullptr); + return newBlock; +} + +void MIRLower::ExpandArrayMrt(MIRFunction &func) { + if (ShouldOptArrayMrt(func)) { + BlockNode *origBody = func.GetBody(); + ASSERT(origBody != nullptr, "nullptr check"); + BlockNode *newBody = ExpandArrayMrtBlock(*origBody); + func.SetBody(newBody); + } +} + +MIRFuncType *MIRLower::FuncTypeFromFuncPtrExpr(BaseNode *x) { + MIRFuncType *res = nullptr; + MIRFunction *func = mirModule.CurFunction(); + switch (x->GetOpCode()) { + case OP_regread: { + RegreadNode *regread = static_cast(x); + MIRPreg *preg = func->GetPregTab()->PregFromPregIdx(regread->GetRegIdx()); + // see if it is promoted from a symbol + if (preg->GetOp() == OP_dread) { + const MIRSymbol *symbol = preg->rematInfo.sym; + MIRType *mirType = symbol->GetType(); + if (preg->fieldID != 0) { + MIRStructType *structty = static_cast(mirType); + FieldPair thepair = structty->TraverseToField(preg->fieldID); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + if (res != nullptr) { + break; + } + } + // check if a formal promoted to preg + for (FormalDef &formalDef : func->GetFormalDefVec()) { + if (!formalDef.formalSym->IsPreg()) { + continue; + } + if (formalDef.formalSym->GetPreg() == preg) { + MIRType *mirType = formalDef.formalSym->GetType(); + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + } + break; + } + case OP_dread: { + DreadNode *dread = static_cast(x); + MIRSymbol *symbol = func->GetLocalOrGlobalSymbol(dread->GetStIdx()); + MIRType *mirType = symbol->GetType(); + if (dread->GetFieldID() != 0) { + MIRStructType *structty = static_cast(mirType); + FieldPair thepair = structty->TraverseToField(dread->GetFieldID()); + mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(thepair.second.first); + } + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + case OP_iread: { + IreadNode *iread = static_cast(x); + MIRPtrType *ptrType = static_cast(iread->GetType()); + MIRType *mirType = ptrType->GetPointedType(); + if (mirType->GetKind() == kTypeFunction) { + res = static_cast(mirType); + } else if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + break; + } + case OP_addroffunc: { + AddroffuncNode *addrofFunc = static_cast(x); + PUIdx puIdx = addrofFunc->GetPUIdx(); + MIRFunction *f = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + res = f->GetMIRFuncType(); + break; + } + case OP_retype: { + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx( + static_cast(x)->GetTyIdx()); + if (mirType->GetKind() == kTypePointer) { + res = static_cast(mirType)->GetPointedFuncType(); + } + if (res == nullptr) { + res = FuncTypeFromFuncPtrExpr(x->Opnd(kNodeFirstOpnd)); + } + break; + } + case OP_select: { + res = FuncTypeFromFuncPtrExpr(x->Opnd(kNodeSecondOpnd)); + if (res == nullptr) { + res = FuncTypeFromFuncPtrExpr(x->Opnd(kNodeThirdOpnd)); + } + break; + } + default: CHECK_FATAL(false, "LMBCLowerer::FuncTypeFromFuncPtrExpr: NYI"); + } + return res; +} + +const std::set MIRLower::kSetArrayHotFunc = {}; + +bool MIRLower::ShouldOptArrayMrt(const MIRFunction &func) { + return (MIRLower::kSetArrayHotFunc.find(func.GetName()) != MIRLower::kSetArrayHotFunc.end()); +} +} // namespace maple diff --git a/src/mapleall/maple_ir/src/mir_module.cpp b/src/mapleall/maple_ir/src/mir_module.cpp new file mode 100644 index 0000000000000000000000000000000000000000..334216779e569324ef8089266952982f60ae6564 --- /dev/null +++ b/src/mapleall/maple_ir/src/mir_module.cpp @@ -0,0 +1,803 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_module.h" +#include "mir_const.h" +#include "mir_preg.h" +#include "mir_function.h" +#include "mir_builder.h" +#include "debug_info.h" +#include "intrinsics.h" +#include "bin_mplt.h" + +namespace maple { +#if MIR_FEATURE_FULL // to avoid compilation error when MIR_FEATURE_FULL=0 +MIRModule::MIRModule(const std::string &fn) + : memPool(new ThreadShareMemPool(memPoolCtrler, "maple_ir mempool")), + pragmaMemPool(memPoolCtrler.NewMemPool("pragma mempool", false /* isLcalPool */)), + memPoolAllocator(memPool), + pragmaMemPoolAllocator(pragmaMemPool), + inlineSummaryAlloc(memPoolCtrler.NewMemPool("inline summary mempool", false)), + functionList(memPoolAllocator.Adapter()), + importedMplt(memPoolAllocator.Adapter()), + typeDefOrder(memPoolAllocator.Adapter()), + externStructTypeSet(std::less(), memPoolAllocator.Adapter()), + symbolSet(std::less(), memPoolAllocator.Adapter()), + symbolDefOrder(memPoolAllocator.Adapter()), + out(LogInfo::MapleLogger()), + fileName(fn), + fileInfo(memPoolAllocator.Adapter()), + fileInfoIsString(memPoolAllocator.Adapter()), + fileData(memPoolAllocator.Adapter()), + srcFileInfo(memPoolAllocator.Adapter()), + importFiles(memPoolAllocator.Adapter()), + importPaths(memPoolAllocator.Adapter()), + asmDecls(memPoolAllocator.Adapter()), + classList(memPoolAllocator.Adapter()), + optimizedFuncs(memPoolAllocator.Adapter()), + optimizedFuncsType(memPoolAllocator.Adapter()), + puIdxFieldInitializedMap(std::less(), memPoolAllocator.Adapter()), + inliningGlobals(memPoolAllocator.Adapter()), + partO2FuncList(memPoolAllocator.Adapter()), + safetyWarningMap(memPoolAllocator.Adapter()) { + GlobalTables::GetGsymTable().SetModule(this); + typeNameTab = memPool->New(memPoolAllocator); + mirBuilder = memPool->New(this); + dbgInfo = memPool->New(this); + scope = memPool->New(this, nullptr); + scope->SetIsLocal(false); + IntrinDesc::InitMIRModule(this); +} + +MIRModule::~MIRModule() { + for (MIRFunction *mirFunc : functionList) { + mirFunc->ReleaseCodeMemory(); + } + ReleasePragmaMemPool(); + delete memPool; + delete binMplt; + + // inlineSummaryAlloc is supposed to be released just after inlining. + // The following code is to ensure unexpected memory leak. + ReleaseInlineSummaryAlloc(); +} + +MemPool *MIRModule::CurFuncCodeMemPool() const { + if (useFuncCodeMemPoolTmp) { + return CurFunction()->GetCodeMemPoolTmp(); + } + return CurFunction()->GetCodeMemPool(); +} + +MapleAllocator *MIRModule::CurFuncCodeMemPoolAllocator() const { + MIRFunction *curFunc = CurFunction(); + CHECK_FATAL(curFunc != nullptr, "curFunction is null"); + return &curFunc->GetCodeMempoolAllocator(); +} + +MapleAllocator &MIRModule::GetCurFuncCodeMPAllocator() const { + MIRFunction *curFunc = CurFunction(); + CHECK_FATAL(curFunc != nullptr, "curFunction is null"); + return curFunc->GetCodeMPAllocator(); +} + +void MIRModule::AddExternStructType(TyIdx tyIdx) { + (void)externStructTypeSet.insert(tyIdx); +} + +void MIRModule::AddExternStructType(const MIRType *t) { + ASSERT(t != nullptr, "MIRType is null"); + (void)externStructTypeSet.insert(t->GetTypeIndex()); +} + +void MIRModule::AddSymbol(StIdx stIdx) { + auto it = symbolSet.find(stIdx); + if (it == symbolSet.end()) { + symbolDefOrder.push_back(stIdx); + } + (void)symbolSet.insert(stIdx); +} + +void MIRModule::AddSymbol(const MIRSymbol *s) { + ASSERT(s != nullptr, "s is null"); + AddSymbol(s->GetStIdx()); +} + +void MIRModule::DumpGlobals(bool emitStructureType) const { + if (flavor != kFlavorUnknown) { + LogInfo::MapleLogger() << "flavor " << flavor << '\n'; + } + if (srcLang != kSrcLangUnknown) { + LogInfo::MapleLogger() << "srclang " << srcLang << '\n'; + } + LogInfo::MapleLogger() << "id " << id << '\n'; + if (globalMemSize != 0) { + LogInfo::MapleLogger() << "globalmemsize " << globalMemSize << '\n'; + } + if (globalBlkMap != nullptr) { + LogInfo::MapleLogger() << "globalmemmap = [ "; + auto *p = reinterpret_cast(globalBlkMap); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(globalBlkMap + globalMemSize)) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + p++; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + if (globalWordsTypeTagged != nullptr) { + LogInfo::MapleLogger() << "globalwordstypetagged = [ "; + auto *p = reinterpret_cast(globalWordsTypeTagged); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(globalWordsTypeTagged + BlockSize2BitVectorSize(globalMemSize))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + if (globalWordsRefCounted != nullptr) { + LogInfo::MapleLogger() << "globalwordsrefcounted = [ "; + auto *p = reinterpret_cast(globalWordsRefCounted); + LogInfo::MapleLogger() << std::hex; + while (p < reinterpret_cast(globalWordsRefCounted + BlockSize2BitVectorSize(globalMemSize))) { + LogInfo::MapleLogger() << std::hex << "0x" << *p << " "; + ++p; + } + LogInfo::MapleLogger() << std::dec << "]\n"; + } + LogInfo::MapleLogger() << "numfuncs " << numFuncs << '\n'; + if (!importFiles.empty()) { + // Output current module's mplt on top, imported ones at below + for (auto it = importFiles.rbegin(); it != importFiles.rend(); ++it) { + LogInfo::MapleLogger() << "import \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(*it) << "\"\n"; + } + } + if (!importPaths.empty()) { + size_t size = importPaths.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << "importpath \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(importPaths[i]) + << "\"\n"; + } + } + if (!asmDecls.empty()) { + size_t size = asmDecls.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << "asmdecl "; + EmitStr(asmDecls[i]); + } + } + if (entryFuncName.length() != 0) { + LogInfo::MapleLogger() << "entryfunc &" << entryFuncName << '\n'; + } + if (!fileInfo.empty()) { + LogInfo::MapleLogger() << "fileinfo {\n"; + size_t size = fileInfo.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << " @" << GlobalTables::GetStrTable().GetStringFromStrIdx(fileInfo[i].first) << " "; + if (!fileInfoIsString[i]) { + LogInfo::MapleLogger() << "0x" << std::hex << fileInfo[i].second; + } else { + LogInfo::MapleLogger() << "\"" << GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(fileInfo[i].second)) + << "\""; + } + if (i < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + LogInfo::MapleLogger() << std::dec; + } + if (!srcFileInfo.empty()) { + LogInfo::MapleLogger() << "srcfileinfo {\n"; + size_t size = srcFileInfo.size(); + size_t i = 0; + for (auto infoElem : srcFileInfo) { + LogInfo::MapleLogger() << " " << infoElem.second; + LogInfo::MapleLogger() << " \"" << GlobalTables::GetStrTable().GetStringFromStrIdx(infoElem.first) << "\""; + if (i++ < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + } + if (!fileData.empty()) { + LogInfo::MapleLogger() << "filedata {\n"; + size_t size = fileData.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << " @" << GlobalTables::GetStrTable().GetStringFromStrIdx(fileData[i].first) << " "; + size_t dataSize = fileData[i].second.size(); + for (size_t j = 0; j < dataSize; ++j) { + uint8 data = fileData[i].second[j]; + LogInfo::MapleLogger() << "0x" << std::hex << static_cast(data); + if (j < dataSize - 1) { + LogInfo::MapleLogger() << ' '; + } + } + if (i < size - 1) { + LogInfo::MapleLogger() << ",\n"; + } else { + LogInfo::MapleLogger() << "}\n"; + } + } + LogInfo::MapleLogger() << std::dec; + } + GlobalTables::GetEnumTable().Dump(); + if (flavor < kMmpl || flavor == kFlavorLmbc) { + for (auto it = typeDefOrder.begin(); it != typeDefOrder.end(); ++it) { + TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(*it); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(*it); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + ASSERT(type != nullptr, "type should not be nullptr here"); + bool isStructType = type->IsStructType(); + if (isStructType) { + auto *structType = static_cast(type); + // still emit what in extern_structtype_set_ + if (!emitStructureType && externStructTypeSet.find(structType->GetTypeIndex()) == externStructTypeSet.end()) { + continue; + } + if (structType->IsImported()) { + continue; + } + } + + LogInfo::MapleLogger() << "type $" << name << " "; + if (type->GetKind() == kTypeByName) { + LogInfo::MapleLogger() << "void"; + } else if (type->GetNameStrIdx() == *it) { + type->Dump(1, true); + } else { + type->Dump(1); + } + LogInfo::MapleLogger() << '\n'; + } + if (someSymbolNeedForwDecl) { + // an extra pass thru the global symbol table to print forward decl + for (auto sit = symbolSet.begin(); sit != symbolSet.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx((*sit).Idx()); + if (s->IsNeedForwDecl()) { + s->Dump(false, 0, true); + } + } + } + // dump javaclass and javainterface first + for (auto sit = symbolDefOrder.begin(); sit != symbolDefOrder.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx((*sit).Idx()); + ASSERT(s != nullptr, "null ptr check"); + if (!s->IsJavaClassInterface()) { + continue; + } + // Verify: all wpofake variables should have been deleted from globaltable + if (!s->IsDeleted()) { + s->Dump(false, 0); + } + } + for (auto sit = symbolDefOrder.begin(); sit != symbolDefOrder.end(); ++sit) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx((*sit).Idx()); + CHECK_FATAL(s != nullptr, "nullptr check"); + if (s->IsJavaClassInterface()) { + continue; + } + if (!s->IsDeleted() && !s->GetIsImported() && !s->GetIsImportedDecl()) { + s->Dump(false, 0); + } + } + if (scope && !scope->IsEmpty()) { + scope->Dump(0); + } + } +} + +void MIRModule::Dump(bool emitStructureType, const std::unordered_set *dumpFuncSet) const { + DumpGlobals(emitStructureType); + DumpFunctionList(dumpFuncSet); +} + +void MIRModule::DumpGlobalArraySymbol() const { + for (StIdx stIdx : symbolSet) { + MIRSymbol *symbol = GlobalTables::GetGsymTable().GetSymbolFromStidx(stIdx.Idx()); + ASSERT(symbol != nullptr, "null ptr check"); + MIRType *symbolType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(symbol->GetTyIdx()); + ASSERT(symbolType != nullptr, "null ptr check"); + if (symbolType == nullptr || symbolType->GetKind() != kTypeArray) { + continue; + } + symbol->Dump(false, 0); + } +} + +void MIRModule::Emit(const std::string &outFileName) const { + std::ofstream file; + // Change cout's buffer to file. + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(file.rdbuf()); + file.open(outFileName, std::ios::trunc); + DumpGlobals(); + for (MIRFunction *mirFunc : functionList) { + mirFunc->Dump(); + } + // Restore cout's buffer. + LogInfo::MapleLogger().rdbuf(backup); + file.close(); +} + +void MIRModule::DumpFunctionList(const std::unordered_set *dumpFuncSet) const { + MIRSymbol::LastPrintedLineNumRef() = 0; + MIRSymbol::LastPrintedColumnNumRef() = 0; + for (MIRFunction *mirFunc : functionList) { + if (dumpFuncSet == nullptr || dumpFuncSet->empty()) { + mirFunc->Dump(); + } else { // dump only if this func matches any name in *dumpFuncSet + const std::string &name = mirFunc->GetName(); + bool matched = false; + for (std::string elem : *dumpFuncSet) { + if (name.find(elem.c_str()) != std::string::npos) { + matched = true; + break; + } + } + if (matched) { + mirFunc->Dump(); + } + } + } +} + +void MIRModule::OutputFunctionListAsciiMpl(const std::string &phaseName) { + std::string fileStem; + std::string::size_type lastDot = fileName.find_last_of('.'); + if (lastDot == std::string::npos) { + fileStem = fileName.append(phaseName); + } else { + fileStem = fileName.substr(0, lastDot).append(phaseName); + } + std::string outfileName; + if (flavor >= kMmpl) { + outfileName = fileStem.append(".mmpl"); + } else { + outfileName = fileStem.append(".mpl"); + } + std::ofstream mplFile; + mplFile.open(outfileName, std::ios::app); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); // change cout's buffer to that of file + DumpGlobalArraySymbol(); + DumpFunctionList(nullptr); + LogInfo::MapleLogger().rdbuf(backup); // restore cout's buffer + mplFile.close(); +} + +void MIRModule::DumpToFile(const std::string &fileNameStr, bool emitStructureType) const { + std::ofstream file; + file.open(fileNameStr, std::ios::trunc); + if (!file.is_open()) { + ERR(kLncErr, "Cannot open %s", fileNameStr.c_str()); + return; + } + // Change cout's buffer to file. + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(file.rdbuf()); + Dump(emitStructureType); + // Restore cout's buffer. + LogInfo::MapleLogger().rdbuf(backup); + file.close(); +} + +void MIRModule::DumpDefType() { + for (auto it = typeDefOrder.begin(); it != typeDefOrder.end(); ++it) { + TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(*it); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(*it); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + ASSERT(type != nullptr, "type should not be nullptr here"); + bool isStructType = type->IsStructType(); + if (isStructType) { + auto *structType = static_cast(type); + if (structType->IsImported()) { + continue; + } + } + LogInfo::MapleLogger() << "type $" << name << " "; + if (type->GetKind() == kTypeByName) { + LogInfo::MapleLogger() << "void"; + } else if (type->GetNameStrIdx() == *it) { + type->Dump(1, true); + } else { + type->Dump(1); + } + LogInfo::MapleLogger() << '\n'; + } +} + +void MIRModule::DumpInlineCandidateToFile(const std::string &fileNameStr) { + if (optimizedFuncs.empty()) { + return; + } + std::ofstream file; + // Change cout's buffer to file. + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(file.rdbuf()); + file.open(fileNameStr, std::ios::trunc); + if (IsCModule()) { + DumpDefType(); + } + // dump global variables needed for inlining file + for (auto symbolIdx : inliningGlobals) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolIdx); + ASSERT(s != nullptr, "null ptr check"); + if (s->GetStorageClass() == kScFstatic) { + if (s->IsNeedForwDecl()) { + // const string, including initialization + s->Dump(false, 0, false); + } + } + } + for (auto symbolIdx : inliningGlobals) { + MIRSymbol *s = GlobalTables::GetGsymTable().GetSymbolFromStidx(symbolIdx); + ASSERT(s != nullptr, "null ptr check"); + MIRStorageClass sc = s->GetStorageClass(); + if (s->GetStorageClass() == kScFstatic) { + if (!s->IsNeedForwDecl()) { + // const string, including initialization + s->Dump(false, 0, false); + } + } else if (s->GetSKind() == kStFunc) { + s->GetFunction()->Dump(true); + } else { + // static fields as extern + s->SetStorageClass(kScExtern); + s->Dump(false, 0, true); + } + s->SetStorageClass(sc); + } + for (auto *func : optimizedFuncs) { + func->SetWithLocInfo(false); + func->Dump(); + } + // Restore cout's buffer. + LogInfo::MapleLogger().rdbuf(backup); + file.close(); +} + +// This is not efficient. Only used in debug mode for now. +const std::string &MIRModule::GetFileNameFromFileNum(uint32 fileNum) const { + GStrIdx nameIdx(0); + for (auto &info : srcFileInfo) { + if (info.second == fileNum) { + nameIdx = info.first; + break; + } + } + return GlobalTables::GetStrTable().GetStringFromStrIdx(nameIdx); +} + +void MIRModule::DumpToHeaderFile(bool binaryMplt, const std::string &outputName) { + std::string outfileName; + std::string fileNameLocal = !outputName.empty() ? outputName : fileName; + std::string::size_type lastDot = fileNameLocal.find_last_of('.'); + if (lastDot == std::string::npos) { + outfileName = fileNameLocal.append(".mplt"); + } else { + outfileName = fileNameLocal.substr(0, lastDot).append(".mplt"); + } + if (binaryMplt) { + BinaryMplt binaryMpltTmp(*this); + binaryMpltTmp.Export(outfileName); + } else { + std::ofstream mpltFile; + mpltFile.open(outfileName, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mpltFile.rdbuf()); // change cout's buffer to that of file + for (std::pair entity : GlobalTables::GetConstPool().GetConstU16StringPool()) { + LogInfo::MapleLogger() << "var $"; + entity.second->DumpAsLiteralVar(); + LogInfo::MapleLogger() << '\n'; + } + for (auto it = classList.begin(); it != classList.end(); ++it) { + TyIdx curTyIdx(*it); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(curTyIdx); + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(type->GetNameStrIdx()); + if (type->GetKind() == kTypeClass || type->GetKind() == kTypeInterface) { + auto *structType = static_cast(type); + // skip imported class/interface and incomplete types + if (!structType->IsImported() && !structType->IsIncomplete()) { + LogInfo::MapleLogger() << "type $" << name << " "; + type->Dump(1, true); + LogInfo::MapleLogger() << '\n'; + } + } + } + /* restore cout */ + LogInfo::MapleLogger().rdbuf(backup); + mpltFile.close(); + } +} + +/* + We use MIRStructType (kTypeStruct) to represent C/C++ structs + as well as C++ classes. + + We use MIRClassType (kTypeClass) to represent Java classes, specifically. + MIRClassType has parents which encode Java class's parent (exploiting + the fact Java classes have at most one parent class. + */ +void MIRModule::DumpTypeTreeToCxxHeaderFile(MIRType &ty, std::unordered_set &dumpedClasses) const { + if (dumpedClasses.find(&ty) != dumpedClasses.end()) { + return; + } + // first, insert ty to the dumped_classes to prevent infinite recursion + (void)dumpedClasses.insert(&ty); + ASSERT(ty.GetKind() == kTypeClass || ty.GetKind() == kTypeStruct || ty.GetKind() == kTypeUnion || + ty.GetKind() == kTypeInterface, + "Unexpected MIRType."); + /* No need to emit interfaces; because "interface variables are + final and static by default and methods are public and abstract" + */ + if (ty.GetKind() == kTypeInterface) { + return; + } + // dump all of its parents + if (IsJavaModule()) { + ASSERT(ty.GetKind() != kTypeStruct, "type is not supposed to be struct"); + ASSERT(ty.GetKind() != kTypeUnion, "type is not supposed to be union"); + ASSERT(ty.GetKind() != kTypeInterface, "type is not supposed to be interface"); + } else if (srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus) { + ASSERT((ty.GetKind() == kTypeStruct || ty.GetKind() == kTypeUnion), "type should be either struct or union"); + } else { + ASSERT(false, "source languages other than DEX/C/C++ are not supported yet"); + } + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(ty.GetNameStrIdx()); + if (IsJavaModule()) { + // Java class has at most one parent + auto &classType = static_cast(ty); + MIRClassType *parentType = nullptr; + // find parent and generate its type as well as those of its ancestors + if (classType.GetParentTyIdx() != 0u /* invalid type idx */) { + parentType = static_cast( + GlobalTables::GetTypeTable().GetTypeFromTyIdx(classType.GetParentTyIdx())); + CHECK_FATAL(parentType != nullptr, "nullptr check"); + DumpTypeTreeToCxxHeaderFile(*parentType, dumpedClasses); + } + LogInfo::MapleLogger() << "struct " << name << " "; + if (parentType != nullptr) { + LogInfo::MapleLogger() << ": " << parentType->GetName() << " "; + } + if (!classType.IsIncomplete()) { + /* dump class type; it will dump as '{ ... }' */ + classType.DumpAsCxx(1); + LogInfo::MapleLogger() << ";\n"; + } else { + LogInfo::MapleLogger() << " /* incomplete type */\n"; + } + } else if (srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus) { + // how to access parent fields???? + ASSERT(false, "not yet implemented"); + } +} + +void MIRModule::DumpToCxxHeaderFile(std::set &leafClasses, const std::string &pathToOutf) const { + std::ofstream mpltFile; + mpltFile.open(pathToOutf, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mpltFile.rdbuf()); // change cout's buffer to that of file + char *headerGuard = strdup(pathToOutf.c_str()); + CHECK_FATAL(headerGuard != nullptr, "strdup failed"); + for (char *p = headerGuard; p != nullptr && *p != 0; ++p) { + if (!isalnum(*p)) { + *p = '_'; + } else if (isalpha(*p) && islower(*p)) { + *p = toupper(*p); + } + } + // define a hash table + std::unordered_set dumpedClasses; + const char *prefix = "__SRCLANG_UNKNOWN_"; + if (IsJavaModule()) { + prefix = "__SRCLANG_JAVA_"; + } else if (srcLang == kSrcLangC || srcLang == kSrcLangCPlusPlus) { + prefix = "__SRCLANG_CXX_"; + } + LogInfo::MapleLogger() << "#ifndef " << prefix << headerGuard << "__\n"; + LogInfo::MapleLogger() << "#define " << prefix << headerGuard << "__\n"; + LogInfo::MapleLogger() << "/* this file is compiler-generated; do not edit */\n\n"; + LogInfo::MapleLogger() << "#include \n"; + LogInfo::MapleLogger() << "#include \n"; + for (auto &s : leafClasses) { + CHECK_FATAL(!s.empty(), "string is null"); + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(s); + TyIdx tyIdx = typeNameTab->GetTyIdxFromGStrIdx(strIdx); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (ty == nullptr) { + continue; + } + ASSERT(ty->GetKind() == kTypeClass || ty->GetKind() == kTypeStruct || ty->GetKind() == kTypeUnion || + ty->GetKind() == kTypeInterface, + ""); + DumpTypeTreeToCxxHeaderFile(*ty, dumpedClasses); + } + LogInfo::MapleLogger() << "#endif /* " << prefix << headerGuard << "__ */\n"; + /* restore cout */ + LogInfo::MapleLogger().rdbuf(backup); + free(headerGuard); + headerGuard = nullptr; + mpltFile.close(); +} + +void MIRModule::DumpClassToFile(const std::string &path) const { + std::string strPath(path); + strPath.append("/"); + for (auto it : typeNameTab->GetGStrIdxToTyIdxMap()) { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it.first); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it.second); + std::string outClassFile(name); + /* replace class name / with - */ + std::replace(outClassFile.begin(), outClassFile.end(), '/', '-'); + (void)outClassFile.insert(0, strPath); + outClassFile.append(".mpl"); + std::ofstream mplFile; + mplFile.open(outClassFile, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); + /* dump class type */ + LogInfo::MapleLogger() << "type $" << name << " "; + if (type->GetNameStrIdx() == it.first && type->GetKind() != kTypeByName) { + type->Dump(1, true); + } else { + type->Dump(1); + } + LogInfo::MapleLogger() << '\n'; + /* restore cout */ + LogInfo::MapleLogger().rdbuf(backup); + mplFile.close();; + } +} + +MIRFunction *MIRModule::FindEntryFunction() { + for (MIRFunction *currFunc : functionList) { + if (currFunc->GetName() == entryFuncName) { + entryFunc = currFunc; + return currFunc; + } + } + return nullptr; +} + +// given the phase name (including '.' at beginning), output the program in the +// module to the file with given file suffix, and file stem from +// this->fileName appended with phaseName +void MIRModule::OutputAsciiMpl(const char *phaseName, const char *suffix, + const std::unordered_set *dumpFuncSet, + bool emitStructureType, bool binaryform) { + ASSERT(!(emitStructureType && binaryform), "Cannot emit type info in .bpl"); + std::string fileStem; + std::string::size_type lastDot = fileName.find_last_of('.'); + if (lastDot == std::string::npos) { + fileStem = fileName.append(phaseName); + } else { + fileStem = fileName.substr(0, lastDot).append(phaseName); + } + std::string outfileName = fileStem + suffix; + if (!binaryform) { + std::ofstream mplFile; + mplFile.open(outfileName, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); // change LogInfo::MapleLogger()'s buffer to that of file + Dump(emitStructureType, dumpFuncSet); + LogInfo::MapleLogger().rdbuf(backup); // restore LogInfo::MapleLogger()'s buffer + mplFile.close(); + } else { + BinaryMplt binaryMplt(*this); + binaryMplt.GetBinExport().not2mplt = true; + binaryMplt.Export(outfileName); + } + std::ofstream mplFile; + mplFile.open(outfileName, std::ios::trunc); + std::streambuf *backup = LogInfo::MapleLogger().rdbuf(); + LogInfo::MapleLogger().rdbuf(mplFile.rdbuf()); // change cout's buffer to that of file + Dump(emitStructureType); + if (withDbgInfo) { + dbgInfo->Dump(0); + } + LogInfo::MapleLogger().rdbuf(backup); // restore cout's buffer + mplFile.close(); +} + +uint32 MIRModule::GetFileinfo(GStrIdx strIdx) const { + for (auto &infoElem : fileInfo) { + if (infoElem.first == strIdx) { + return infoElem.second; + } + } + ASSERT(false, "should not be here"); + return 0; +} + +std::string MIRModule::GetFileNameAsPostfix() const { + std::string fileNameStr = namemangler::kFileNameSplitterStr; + if (!fileInfo.empty()) { + // option 1: file name in INFO + uint32 fileNameIdx = GetFileinfo(GlobalTables::GetStrTable().GetOrCreateStrIdxFromName("INFO_filename")); + fileNameStr += GlobalTables::GetStrTable().GetStringFromStrIdx(GStrIdx(fileNameIdx)); + } else { + // option 2: src file name removing ext name. + if (GetSrcFileInfo().size() != 0) { + GStrIdx idx = GetSrcFileInfo()[0].first; + const std::string kStr = GlobalTables::GetStrTable().GetStringFromStrIdx(idx); + ASSERT(kStr.find_last_of('.') != kStr.npos, "not found ."); + fileNameStr += kStr.substr(0, kStr.find_last_of('.')); + } else { + ASSERT(0, "No fileinfo and no srcfileinfo in mpl file"); + } + } + for (char &c : fileNameStr) { + if (!isalpha(c) && !isdigit(c) && c != '_' && c != '$') { + c = '_'; + } + } + return fileNameStr; +} + +void MIRModule::AddClass(TyIdx tyIdx) { + (void)classList.insert(tyIdx); +} + +void MIRModule::RemoveClass(TyIdx tyIdx) { + (void)classList.erase(tyIdx); +} + +#endif // MIR_FEATURE_FULL +void MIRModule::ReleaseCurFuncMemPoolTmp() const { + CurFunction()->ReleaseMemory(); +} + +void MIRModule::SetFuncInfoPrinted() const { + CurFunction()->SetInfoPrinted(); +} + +void MIRModule::InitPartO2List(const std::string &list) { + if (list.empty()) { + return; + } + SetHasPartO2List(true); + std::ifstream infile(list); + if (!infile.is_open()) { + LogInfo::MapleLogger(kLlErr) << "Cannot open partO2 function list file " << list << '\n'; + return; + } + std::string str; + + while (getline(infile, str)) { + if (str.empty()) { + continue; + } + GStrIdx funcStrIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(str); + partO2FuncList.insert(funcStrIdx); + } + infile.close(); +} + +bool MIRModule::HasNotWarned(uint32 position, uint32 stmtOriginalID) { + auto warnedOp = safetyWarningMap.find(position); + if (warnedOp == safetyWarningMap.end()) { + MapleSet opSet(memPoolAllocator.Adapter()); + opSet.emplace(stmtOriginalID); + safetyWarningMap.emplace(std::pair>(position, std::move(opSet))); + return true; + } + if (warnedOp->second.find(stmtOriginalID) == warnedOp->second.end()) { + warnedOp->second.emplace(stmtOriginalID); + return true; + } + return false; +} +} // namespace maple diff --git a/src/mapleall/maple_ir/src/mir_nodes.cpp b/src/mapleall/maple_ir/src/mir_nodes.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7a03023d8561fdb8f4ec9c78a7b808a8b74e3a5b --- /dev/null +++ b/src/mapleall/maple_ir/src/mir_nodes.cpp @@ -0,0 +1,2698 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_nodes.h" + +#include +#include + +#include "maple_string.h" +#include "mir_function.h" +#include "namemangler.h" +#include "opcode_info.h" +#include "printing.h" +#include "utils.h" +#include "verification.h" + +namespace maple { +MIRModule *theMIRModule = nullptr; +std::list BlockCallBackMgr::callBackList; +std::atomic StmtNode::stmtIDNext(1); // 0 is reserved +uint32 StmtNode::lastPrintedLineNum = 0; +uint16 StmtNode::lastPrintedColumnNum = 0; +const int32 CondGotoNode::probAll = 10000; + +const char *GetIntrinsicName(MIRIntrinsicID intrn) { + switch (intrn) { + default: +#define DEF_MIR_INTRINSIC(STR, NAME, NUM_INSN, INTRN_CLASS, RETURN_TYPE, ...) \ + case INTRN_##STR: \ + return #STR; +#include "intrinsics.def" +#undef DEF_MIR_INTRINSIC + } +} + +const char *BaseNode::GetOpName() const { + return kOpcodeInfo.GetTableItemAt(GetOpCode()).name.c_str(); +} + +bool BaseNode::MayThrowException() const { + if (kOpcodeInfo.MayThrowException(GetOpCode())) { + if (GetOpCode() != OP_array) { + return true; + } + const ArrayNode *arry = static_cast(this); + if (arry->GetBoundsCheck()) { + return true; + } + } else if (GetOpCode() == OP_intrinsicop) { + const IntrinsicopNode *inNode = static_cast(this); + if (inNode->GetIntrinsic() == INTRN_JAVA_ARRAY_LENGTH) { + return true; + } + } + for (size_t i = 0; i < NumOpnds(); ++i) { + if (Opnd(i)->MayThrowException()) { + return true; + } + } + return false; +} + +bool AddrofNode::CheckNode(const MIRModule &mod) const { + const MIRSymbol *st = mod.CurFunction()->GetLocalOrGlobalSymbol(GetStIdx()); + ASSERT(st != nullptr, "null ptr check"); + MIRType *ty = st->GetType(); + switch (ty->GetKind()) { + case kTypeScalar: { +#ifdef DYNAMICLANG + if (GetPrimType() == PTY_dynany) { + return true; + } + return IsPrimitiveScalar(GetPrimType()); +#else + return IsPrimitiveScalar(GetPrimType()); +#endif + } + case kTypeArray: { + return GetPrimType() == PTY_agg; + } + case kTypeUnion: + case kTypeStruct: + case kTypeStructIncomplete: { + if (GetFieldID() == 0) { + return GetPrimType() == PTY_agg; + } + auto *structType = static_cast(ty); + TyIdx fTyIdx = structType->GetFieldTyIdx(fieldID); + MIRType *subType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(fTyIdx); + MIRTypeKind subKind = subType->GetKind(); + return (subKind == kTypeBitField && VerifyPrimType(subType->GetPrimType(), GetPrimType())) || + (subKind == kTypeScalar && IsPrimitiveScalar(GetPrimType())) || + (subKind == kTypePointer && IsPrimitivePoint(GetPrimType())) || + (subKind == kTypeStruct && GetPrimType() == PTY_agg) || (fTyIdx != 0u && GetPrimType() == PTY_agg); + } + case kTypeClass: + case kTypeClassIncomplete: { + if (fieldID == 0) { + return GetPrimType() == PTY_agg; + } + auto *classType = static_cast(ty); + MIRType *subType = classType->GetFieldType(fieldID); + MIRTypeKind subKind = subType->GetKind(); + return (subKind == kTypeBitField && VerifyPrimType(subType->GetPrimType(), GetPrimType())) || + (subKind == kTypeScalar && IsPrimitiveScalar(GetPrimType())) || + (subKind == kTypePointer && IsPrimitivePoint(GetPrimType())) || + (subKind == kTypeStruct && GetPrimType() == PTY_agg); + } + case kTypeInterface: + case kTypeInterfaceIncomplete: { + if (fieldID == 0) { + return GetPrimType() == PTY_agg; + } + auto *interfaceType = static_cast(ty); + MIRType *subType = interfaceType->GetFieldType(fieldID); + MIRTypeKind subKind = subType->GetKind(); + return (subKind == kTypeBitField && VerifyPrimType(subType->GetPrimType(), GetPrimType())) || + (subKind == kTypeScalar && IsPrimitiveScalar(GetPrimType())) || + (subKind == kTypePointer && IsPrimitivePoint(GetPrimType())) || + (subKind == kTypeStruct && GetPrimType() == PTY_agg); + } + case kTypePointer: + return IsPrimitivePoint(GetPrimType()); + case kTypeParam: + case kTypeGenericInstant: + return true; + default: + return false; + } +} + +MIRType *IreadNode::GetType() const { + MIRPtrType *ptrtype = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)); + if (fieldID == 0) { + return ptrtype->GetPointedType(); + } + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrtype->GetPointedTyIdxWithFieldID(fieldID)); +} + +bool IreadNode::IsVolatile() const { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + ASSERT(type != nullptr, "null ptr check"); + ASSERT(type->IsMIRPtrType(), "type of iread should be pointer type"); + return static_cast(type)->IsPointedTypeVolatile(fieldID); +} + +bool AddrofNode::IsVolatile(const MIRModule &mod) const { + auto *symbol = mod.CurFunction()->GetLocalOrGlobalSymbol(stIdx); + ASSERT(symbol != nullptr, "null ptr check on symbol"); + return symbol->IsVolatile(); +} + +bool DreadoffNode::IsVolatile(const MIRModule &mod) const { + auto *symbol = mod.CurFunction()->GetLocalOrGlobalSymbol(stIdx); + ASSERT(symbol != nullptr, "null ptr check on symbol"); + return symbol->IsVolatile(); +} + +bool DassignNode::AssigningVolatile(const MIRModule &mod) const { + auto *symbol = mod.CurFunction()->GetLocalOrGlobalSymbol(stIdx); + ASSERT(symbol != nullptr, "null ptr check on symbol"); + return symbol->IsVolatile(); +} + +bool IassignNode::AssigningVolatile() const { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + ASSERT(type != nullptr, "null ptr check"); + ASSERT(type->IsMIRPtrType(), "type of iassign should be pointer type"); + return static_cast(type)->IsPointedTypeVolatile(fieldID); +} + +void BlockNode::AddStatement(StmtNode *stmt) { + ASSERT(stmt != nullptr, "null ptr check"); + stmtNodeList.push_back(stmt); +} + +void BlockNode::AppendStatementsFromBlock(BlockNode &blk) { + if (blk.GetStmtNodes().empty()) { + return; + } + stmtNodeList.splice(stmtNodeList.end(), blk.GetStmtNodes()); +} + +/// Insert stmt as the first +void BlockNode::InsertFirst(StmtNode *stmt) { + ASSERT(stmt != nullptr, "stmt is null"); + stmtNodeList.push_front(stmt); +} + +/// Insert stmt as the last +void BlockNode::InsertLast(StmtNode *stmt) { + ASSERT(stmt != nullptr, "stmt is null"); + stmtNodeList.push_back(stmt); +} + +void BlockNode::ReplaceStmtWithBlock(StmtNode &stmtNode, BlockNode &blk) { + stmtNodeList.splice(&stmtNode, blk.GetStmtNodes()); + stmtNodeList.erase(&stmtNode); + stmtNode.SetNext(blk.GetLast()->GetNext()); +} + +void BlockNode::ReplaceStmt1WithStmt2(const StmtNode *stmtNode1, StmtNode *stmtNode2) { + if (stmtNode2 == stmtNode1) { + // do nothing + } else if (stmtNode2 == nullptr) { + // delete stmtNode1 + stmtNodeList.erase(stmtNode1); + } else { + // replace stmtNode1 with stmtNode2 + stmtNodeList.insert(stmtNode1, stmtNode2); + (void)stmtNodeList.erase(stmtNode1); + } +} + +// remove sstmtNode1 from block +void BlockNode::RemoveStmt(const StmtNode *stmtNode1) { + ASSERT(stmtNode1 != nullptr, "delete a null stmtment"); + (void)stmtNodeList.erase(stmtNode1); +} + +/// Insert stmtNode2 before stmtNode1 in current block. +void BlockNode::InsertBefore(const StmtNode *stmtNode1, StmtNode *stmtNode2) { + stmtNodeList.insert(stmtNode1, stmtNode2); +} + +/// Insert stmtNode2 after stmtNode1 in current block. +void BlockNode::InsertAfter(const StmtNode *stmtNode1, StmtNode *stmtNode2) { + stmtNodeList.insertAfter(stmtNode1, stmtNode2); +} + +// insert all the stmts in inblock to the current block after stmt1 +void BlockNode::InsertBlockAfter(BlockNode &inblock, const StmtNode *stmt1) { + ASSERT(stmt1 != nullptr, "null ptr check"); + ASSERT(!inblock.IsEmpty(), "NYI"); + stmtNodeList.splice(stmt1, inblock.GetStmtNodes()); +} + +BlockNode *BlockNode::CloneTree(MapleAllocator &allocator) const { + auto *blk = allocator.GetMemPool()->New(); + blk->SetStmtID(stmtIDNext++); + for (auto &stmt : stmtNodeList) { + StmtNode *newStmt = stmt.CloneTree(allocator); + ASSERT(newStmt != nullptr, "null ptr check"); + newStmt->SetMeStmtID(stmt.GetMeStmtID()); + newStmt->SetPrev(nullptr); + newStmt->SetNext(nullptr); + blk->AddStatement(newStmt); + BlockCallBackMgr::InvokeCallBacks(*this, *blk, stmt, *newStmt); + } + return blk; +} + +BlockNode *BlockNode::CloneTreeWithSrcPosition(const MIRModule &mod, const GStrIdx &idx, bool setInlinedPos, + const SrcPosition & inlinedPosition) { + MapleAllocator &allocator = mod.GetCurFuncCodeMPAllocator(); + auto *blk = allocator.GetMemPool()->New(); + blk->SetStmtID(stmtIDNext++); + for (auto &stmt : stmtNodeList) { + StmtNode *newStmt = stmt.CloneTree(allocator); + ASSERT(newStmt != nullptr, "null ptr check"); + newStmt->SetSrcPos(stmt.GetSrcPos()); + if (setInlinedPos) { + newStmt->SetInlinedSrcPos(inlinedPosition.LineNum(), inlinedPosition.FileNum(), idx); + } + newStmt->SetPrev(nullptr); + newStmt->SetNext(nullptr); + blk->AddStatement(newStmt); + BlockCallBackMgr::InvokeCallBacks(*this, *blk, stmt, *newStmt); + } + return blk; +} + +BlockNode *BlockNode::CloneTreeWithFreqs(MapleAllocator &allocator, + std::unordered_map& toFreqs, + std::unordered_map& fromFreqs, + uint64_t numer, uint64_t denom, uint32_t updateOp) { + auto *nnode = allocator.GetMemPool()->New(); + nnode->SetStmtID(stmtIDNext++); + if (fromFreqs.count(GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[GetStmtID()]; + uint64_t newFreq; + if (updateOp & kUpdateUnrollRemainderFreq) { + newFreq = denom > 0 ? (oldFreq * numer % denom) : oldFreq; + } else { + newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + } + toFreqs[nnode->GetStmtID()] = (newFreq > 0 || (numer == 0)) ? newFreq : 1; + if ((updateOp & kUpdateOrigFreq) != 0) { // upateOp & 1 : update from + int64_t left = static_cast(((oldFreq - newFreq) > 0 || (oldFreq == 0)) ? (oldFreq - newFreq) : 1); + fromFreqs[GetStmtID()] = static_cast(left); + } + } + for (auto &stmt : stmtNodeList) { + StmtNode *newStmt; + if (stmt.GetOpCode() == OP_block) { + newStmt = static_cast( + (static_cast(&stmt))->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else if (stmt.GetOpCode() == OP_if) { + newStmt = static_cast( + (static_cast(&stmt))->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else if (stmt.GetOpCode() == OP_while) { + newStmt = static_cast( + (static_cast(&stmt))->CloneTreeWithFreqs(allocator, + toFreqs, fromFreqs, numer, denom, updateOp)); + } else if (stmt.GetOpCode() == OP_doloop) { + newStmt = static_cast( + (static_cast(&stmt))->CloneTreeWithFreqs(allocator, toFreqs, fromFreqs, numer, denom, updateOp)); + } else { + newStmt = static_cast(stmt.CloneTree(allocator)); + if (fromFreqs.count(stmt.GetStmtID()) > 0) { + uint64_t oldFreq = fromFreqs[stmt.GetStmtID()]; + uint64_t newFreq; + if ((updateOp & kUpdateUnrollRemainderFreq) != 0) { + newFreq = denom > 0 ? (oldFreq * numer % denom) : oldFreq; + } else { + newFreq = numer == 0 ? 0 : (denom > 0 ? (oldFreq * numer / denom) : oldFreq); + } + toFreqs[newStmt->GetStmtID()] = + (newFreq > 0 || oldFreq == 0 || numer == 0) ? static_cast(newFreq) : 1; + if ((updateOp & kUpdateOrigFreq) != 0) { + int64_t left = static_cast(((oldFreq - newFreq) > 0 || oldFreq == 0) ? (oldFreq - newFreq) : 1); + fromFreqs[stmt.GetStmtID()] = static_cast(left); + } + } + } + ASSERT(newStmt != nullptr, "null ptr check"); + newStmt->SetSrcPos(stmt.GetSrcPos()); + newStmt->SetMeStmtID(stmt.GetMeStmtID()); + newStmt->SetPrev(nullptr); + newStmt->SetNext(nullptr); + nnode->AddStatement(newStmt); + BlockCallBackMgr::InvokeCallBacks(*this, *nnode, stmt, *newStmt); + } + return nnode; +} + +void BaseNode::DumpBase(int32 indent) const { + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); +} + +void CatchNode::Dump(int32 indent) const { + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " {"; + size_t size = exceptionTyIdxVec.size(); + for (size_t i = 0; i < size; ++i) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(exceptionTyIdxVec[i])->Dump(indent + 1); + } + LogInfo::MapleLogger() << " }\n"; +} + +void CppCatchNode::Dump(int32 indent) const { + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetName(op); + if (exceptionTyIdx.GetIdx() != 0) { + LogInfo::MapleLogger() << " { "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(exceptionTyIdx)->Dump(indent + 1); + LogInfo::MapleLogger() << " }"; + } + LogInfo::MapleLogger() << std::endl; +} + +void UnaryNode::DumpOpnd(const MIRModule &mod [[maybe_unused]], int32 indent) const { + DumpOpnd(indent); +} + +void UnaryNode::DumpOpnd(int32 indent) const { + LogInfo::MapleLogger() << " ("; + if (uOpnd != nullptr) { + uOpnd->Dump(indent); + } + LogInfo::MapleLogger() << ")"; +} + +void UnaryNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + DumpOpnd(*theMIRModule, indent); +} + +void TypeCvtNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " "; + LogInfo::MapleLogger() << GetPrimTypeName(GetPrimType()) << " " << GetPrimTypeName(FromType()); + DumpOpnd(*theMIRModule, indent); +} + +void RetypeNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " "; + LogInfo::MapleLogger() << GetPrimTypeName(GetPrimType()) << " "; + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + if (ty->GetKind() == kTypeScalar) { + LogInfo::MapleLogger() << "<"; + ty->Dump(indent + 1); + LogInfo::MapleLogger() << ">"; + } else { + ty->Dump(indent + 1); + } + DumpOpnd(*theMIRModule, indent); +} + +void ExtractbitsNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + if (GetOpCode() == OP_extractbits) { + LogInfo::MapleLogger() << " " << static_cast(bitsOffset) << " " << static_cast(bitsSize); + } else { + LogInfo::MapleLogger() << " " << static_cast(bitsSize); + } + DumpOpnd(*theMIRModule, indent); +} + +void IreadNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + LogInfo::MapleLogger() << " " << fieldID; + DumpOpnd(*theMIRModule, indent); +} + +void IreadoffNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " " << offset; + DumpOpnd(*theMIRModule, indent); +} + +void IreadFPoffNode::Dump(int32 indent [[maybe_unused]]) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " " << offset; +} + +void BinaryNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + BinaryOpnds::Dump(indent); +} + +void BinaryOpnds::Dump(int32 indent) const { + LogInfo::MapleLogger() << " ("; + if (bOpnd[0]->IsLeaf() && bOpnd[1]->IsLeaf()) { + bOpnd[0]->Dump(0); + LogInfo::MapleLogger() << ", "; + bOpnd[1]->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + bOpnd[0]->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + bOpnd[1]->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")"; +} + +void ResolveFuncNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " &" << func->GetName(); + BinaryOpnds::Dump(indent); +} + +void CompareNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " " << GetPrimTypeName(opndType); + BinaryOpnds::Dump(indent); +} + +void DepositbitsNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " " << static_cast(bitsOffset) << " " << static_cast(bitsSize) << " ("; + if (GetBOpnd(0)->IsLeaf() && GetBOpnd(1)->IsLeaf()) { + GetBOpnd(0)->Dump(0); + LogInfo::MapleLogger() << ", "; + GetBOpnd(1)->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + GetBOpnd(0)->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + GetBOpnd(1)->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")"; +} + +void TernaryNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " ("; + if (topnd[kFirstOpnd]->IsLeaf() && topnd[kSecondOpnd]->IsLeaf() && topnd[kThirdOpnd]->IsLeaf()) { + topnd[kFirstOpnd]->Dump(0); + LogInfo::MapleLogger() << ", "; + topnd[kSecondOpnd]->Dump(0); + LogInfo::MapleLogger() << ", "; + topnd[kThirdOpnd]->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + topnd[kFirstOpnd]->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + topnd[kSecondOpnd]->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + topnd[kThirdOpnd]->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")"; +} + +void NaryOpnds::Dump(int32 indent) const { + LogInfo::MapleLogger() << " ("; + if (GetNopndSize() == 0) { + LogInfo::MapleLogger() << ")"; + return; + } + if (GetNopndSize() == 1) { + GetNopndAt(0)->Dump(indent); + } else { + bool allisLeaf = true; + for (size_t i = 0; i < GetNopndSize(); ++i) { + if (!GetNopndAt(i)->IsLeaf()) { + allisLeaf = false; + break; + } + } + if (allisLeaf) { + GetNopndAt(0)->Dump(0); + for (size_t i = 1; i < GetNopndSize(); ++i) { + LogInfo::MapleLogger() << ", "; + GetNopndAt(i)->Dump(0); + } + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + GetNopndAt(0)->Dump(indent + 1); + for (size_t i = 1; i < GetNopndSize(); ++i) { + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + GetNopndAt(i)->Dump(indent + 1); + } + } + } + LogInfo::MapleLogger() << ")"; +} + +bool NaryOpnds::VerifyOpnds() const { + bool nOpndsVerify = true; + for (size_t i = 0; i < GetNopndSize(); ++i) { + if (!GetNopndAt(i)->Verify()) { + nOpndsVerify = false; + break; + } + } + return nOpndsVerify; +} + +void NaryNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + NaryOpnds::Dump(indent); +} + +const MIRType *ArrayNode::GetArrayType(const TypeTable &tt) const { + const MIRType *type = tt.GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type->GetKind() == kTypePointer, "expect array type pointer"); + const auto *pointType = static_cast(type); + return tt.GetTypeFromTyIdx(pointType->GetPointedTyIdx()); +} +MIRType *ArrayNode::GetArrayType(const TypeTable &tt) { + return const_cast(const_cast(this)->GetArrayType(tt)); +} + +const BaseNode *ArrayNode::GetDim(const MIRModule &mod, TypeTable &tt, int i) const { + const auto *arrayType = static_cast(GetArrayType(tt)); + auto *mirConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst( + static_cast(i), *tt.GetTypeFromTyIdx(arrayType->GetElemTyIdx())); + return mod.CurFuncCodeMemPool()->New(mirConst); +} +BaseNode *ArrayNode::GetDim(const MIRModule &mod, TypeTable &tt, int i) { + return const_cast(const_cast(this)->GetDim(mod, tt, i)); +} + +void ArrayNode::Dump(int32 indent) const { + PrintIndentation(0); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " "; + if (boundsCheck) { + LogInfo::MapleLogger() << "1 "; + } else { + LogInfo::MapleLogger() << "0 "; + } + LogInfo::MapleLogger() << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + NaryOpnds::Dump(indent); +} + +bool ArrayNode::IsSameBase(ArrayNode *arry) { + ASSERT(arry != nullptr, "null ptr check"); + if (arry == this) { + return true; + } + BaseNode *curBase = this->GetBase(); + BaseNode *otherBase = arry->GetBase(); + if (curBase->GetOpCode() != OP_addrof || otherBase->GetOpCode() != OP_addrof) { + return false; + } + return static_cast(curBase)->GetStIdx() == static_cast(otherBase)->GetStIdx(); +} + +void IntrinsicopNode::Dump(int32 indent) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + if (GetOpCode() == OP_intrinsicopwithtype) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(indent + 1); + } + LogInfo::MapleLogger() << " " << GetIntrinsicName(GetIntrinsic()); + NaryOpnds::Dump(indent); +} + +void ConstvalNode::Dump(int32 indent [[maybe_unused]]) const { + if (GetConstVal()->GetType().GetKind() != kTypePointer) { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + } + GetConstVal()->Dump(); +} + +void ConststrNode::Dump(int32 indent [[maybe_unused]]) const { + BaseNode::DumpBase(0); + const std::string kStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(UStrIdx(strIdx)); + PrintString(kStr); +} + +void Conststr16Node::Dump(int32 indent [[maybe_unused]]) const { + BaseNode::DumpBase(0); + const std::u16string kStr16 = GlobalTables::GetU16StrTable().GetStringFromStrIdx(U16StrIdx(strIdx)); + // UTF-16 string are dumped as UTF-8 string in mpl to keep the printable chars in ascii form + std::string str; + (void)namemangler::UTF16ToUTF8(str, kStr16); + PrintString(str); +} + +void SizeoftypeNode::Dump(int32 indent [[maybe_unused]]) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); +} + +void FieldsDistNode::Dump(int32 indent [[maybe_unused]]) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + LogInfo::MapleLogger() << " " << fieldID1 << " " << fieldID2; +} + +void AddrofNode::Dump(int32 indent [[maybe_unused]]) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(GetStIdx()); + LogInfo::MapleLogger() << (GetStIdx().Islocal() ? " %" : " $"); + ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << st->GetName(); + if (fieldID != 0) { + LogInfo::MapleLogger() << " " << fieldID; + } +} + +void DreadoffNode::Dump(int32 indent [[maybe_unused]]) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + LogInfo::MapleLogger() << (stIdx.Islocal() ? " %" : " $"); + ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << st->GetName(); + LogInfo::MapleLogger() << " " << offset; +} + +void RegreadNode::Dump(int32 indent [[maybe_unused]]) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + if (regIdx >= 0) { + LogInfo::MapleLogger() << " %" << theMIRModule->CurFunction()->GetPregTab()->PregFromPregIdx(regIdx)->GetPregNo(); + return; + } + LogInfo::MapleLogger() << " %%"; + switch (regIdx) { + case -kSregSp: + LogInfo::MapleLogger() << "SP"; + break; + case -kSregFp: + LogInfo::MapleLogger() << "FP"; + break; + case -kSregGp: + LogInfo::MapleLogger() << "GP"; + break; + case -kSregThrownval: + LogInfo::MapleLogger() << "thrownval"; + break; + case -kSregMethodhdl: + LogInfo::MapleLogger() << "methodhdl"; + break; + default: + int32 retValIdx = (-regIdx) - kSregRetval0; + LogInfo::MapleLogger() << "retval" << retValIdx; + break; + } +} + +void AddroffuncNode::Dump(int32 indent [[maybe_unused]]) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + CHECK_NULL_FATAL(func); + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(func->GetStIdx().Idx()); + CHECK_NULL_FATAL(sym); + LogInfo::MapleLogger() << " &" << sym->GetName(); +} + +void AddroflabelNode::Dump(int32 indent [[maybe_unused]]) const { + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name << " " << GetPrimTypeName(GetPrimType()); + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(offset)); +} + +void StmtNode::DumpBase(int32 indent) const { + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + // dump stmtFreqs + if (Options::profileUse && theMIRModule->CurFunction()->GetFuncProfData()) { + int64_t freq = static_cast(theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID())); + if (freq >= 0) { + LogInfo::MapleLogger() << "stmtID " << GetStmtID() << " freq " << freq << "\n"; + } + } + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetTableItemAt(GetOpCode()).name; +} + +void StmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << '\n'; +} + +// Get the next stmt skip the comment stmt. +StmtNode *StmtNode::GetRealNext() const { + StmtNode *stmt = this->GetNext(); + while (stmt != nullptr) { + if (stmt->GetOpCode() != OP_comment) { + break; + } + stmt = stmt->GetNext(); + } + return stmt; +} + +// insert this before pos +void StmtNode::InsertAfterThis(StmtNode &pos) { + this->SetNext(&pos); + if (pos.GetPrev()) { + this->SetPrev(pos.GetPrev()); + pos.GetPrev()->SetNext(this); + } + pos.SetPrev(this); +} + +// insert stmtnode after pos +void StmtNode::InsertBeforeThis(StmtNode &pos) { + this->SetPrev(&pos); + if (pos.GetNext()) { + this->SetNext(pos.GetNext()); + pos.GetNext()->SetPrev(this); + } + pos.SetNext(this); +} + +void DassignNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << (st->IsLocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID; + LogInfo::MapleLogger() << " ("; + if (GetRHS() != nullptr) { + GetRHS()->Dump(indent + 1); + } else { + LogInfo::MapleLogger() << "/*empty-rhs*/"; + } + LogInfo::MapleLogger() << ")\n"; +} + +void DassignoffNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << (st->IsLocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << offset; + LogInfo::MapleLogger() << " ("; + if (GetRHS() != nullptr) { + GetRHS()->Dump(indent + 1); + } else { + LogInfo::MapleLogger() << "/*empty-rhs*/"; + } + LogInfo::MapleLogger() << ")\n"; +} + +void RegassignNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()); + if (regIdx >= 0) { + LogInfo::MapleLogger() << " %" << theMIRModule->CurFunction()->GetPregTab()->PregFromPregIdx(regIdx)->GetPregNo(); + } else { + LogInfo::MapleLogger() << " %%"; + switch (regIdx) { + case -kSregSp: + LogInfo::MapleLogger() << "SP"; + break; + case -kSregFp: + LogInfo::MapleLogger() << "FP"; + break; + case -kSregGp: + LogInfo::MapleLogger() << "GP"; + break; + case -kSregThrownval: + LogInfo::MapleLogger() << "thrownval"; + break; + case -kSregMethodhdl: + LogInfo::MapleLogger() << "methodhdl"; + break; + case -kSregRetval0: + LogInfo::MapleLogger() << "retval0"; + break; + // no default + default: + break; + } + } + LogInfo::MapleLogger() << " ("; + UnaryStmtNode::Opnd(0)->Dump(indent + 1); + LogInfo::MapleLogger() << ")\n"; +} + +void IassignNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); + LogInfo::MapleLogger() << " " << fieldID; + LogInfo::MapleLogger() << " ("; + if (addrExpr->IsLeaf() && rhs->IsLeaf()) { + addrExpr->Dump(0); + LogInfo::MapleLogger() << ", "; + rhs->Dump(0); + } else { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + addrExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ", \n"; + PrintIndentation(indent + 1); + rhs->Dump(indent + 1); + } + LogInfo::MapleLogger() << ")\n"; +} + +void IassignoffNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()) << " " << offset; + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void IassignFPoffNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << GetPrimTypeName(GetPrimType()) << " " << offset; + DumpOpnd(*theMIRModule, indent); + LogInfo::MapleLogger() << '\n'; +} + +void BlkassignoffNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " " << offset << " " << GetAlign() << " " << blockSize; + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void GotoNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + if (offset == 0) { + LogInfo::MapleLogger() << '\n'; + } else { + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(offset)) << '\n'; + } +} + +void JsTryNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + if (catchOffset == 0) { + LogInfo::MapleLogger() << " 0"; + } else { + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(catchOffset)); + } + if (finallyOffset == 0) { + LogInfo::MapleLogger() << " 0\n"; + } else { + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(finallyOffset)) + << '\n'; + } +} + +void TryNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " {"; + for (size_t i = 0; i < offsets.size(); ++i) { + uint32 offset = offsets[i]; + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName((LabelIdx)offset); + } + LogInfo::MapleLogger() << " }\n"; +} + +void CondGotoNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(static_cast(offset)); + LogInfo::MapleLogger() << " ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ")\n"; +} + +void SwitchNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + switchOpnd->Dump(indent); + if (defaultLabel == 0) { + LogInfo::MapleLogger() << ") 0 {"; + } else { + LogInfo::MapleLogger() << ") @" << theMIRModule->CurFunction()->GetLabelName(defaultLabel) << " {"; + } + for (auto it = switchTable.begin(); it != switchTable.end(); it++) { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << std::hex << "0x" << it->first << std::dec; + LogInfo::MapleLogger() << ": goto @" << theMIRModule->CurFunction()->GetLabelName(it->second); + } + LogInfo::MapleLogger() << " }\n"; +} + +void RangeGotoNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ") " << tagOffset << " {"; + for (auto it = rangegotoTable.begin(); it != rangegotoTable.end(); it++) { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << std::hex << "0x" << it->first << std::dec; + LogInfo::MapleLogger() << ": goto @" << theMIRModule->CurFunction()->GetLabelName(it->second); + } + LogInfo::MapleLogger() << " }\n"; +} + +void MultiwayNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + multiWayOpnd->Dump(indent); + if (defaultLabel == 0) { + LogInfo::MapleLogger() << ") 0 {"; + } else { + LogInfo::MapleLogger() << ") @" << theMIRModule->CurFunction()->GetLabelName(defaultLabel) << " {"; + } + for (auto it = multiWayTable.begin(); it != multiWayTable.end(); it++) { + LogInfo::MapleLogger() << '\n'; + PrintIndentation(indent); + LogInfo::MapleLogger() << " ("; + it->first->Dump(indent + 1); + LogInfo::MapleLogger() << "): goto @" << theMIRModule->CurFunction()->GetLabelName(it->second); + } + LogInfo::MapleLogger() << " }\n"; +} + +void UnaryStmtNode::DumpOpnd(const MIRModule &mod [[maybe_unused]], int32 indent) const { + DumpOpnd(indent); +} + +void UnaryStmtNode::DumpOpnd(int32 indent) const { + LogInfo::MapleLogger() << " ("; + if (uOpnd != nullptr) { + uOpnd->Dump(indent); + } + LogInfo::MapleLogger() << ")\n"; +} + +void UnaryStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + DumpOpnd(indent); +} + +void GCMallocNode::Dump(int32 indent [[maybe_unused]]) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0); +} + +void JarrayMallocNode::Dump(int32 indent) const { + BaseNode::DumpBase(0); + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(0, false); + DumpOpnd(*theMIRModule, indent); +} + +void IfStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + LogInfo::MapleLogger() << " ("; + Opnd()->Dump(indent); + LogInfo::MapleLogger() << ")"; + thenPart->Dump(indent); + if (elsePart) { + PrintIndentation(indent); + LogInfo::MapleLogger() << "else {\n"; + for (auto &stmt : elsePart->GetStmtNodes()) { + stmt.Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}\n"; + } +} + +void WhileStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + if (GetOpCode() == OP_while) { + LogInfo::MapleLogger() << " ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ")"; + body->Dump(indent); + } else { // OP_dowhile + LogInfo::MapleLogger() << " {\n"; + for (auto &stmt : body->GetStmtNodes()) { + stmt.Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "} ("; + Opnd(0)->Dump(indent); + LogInfo::MapleLogger() << ")\n"; + } +} + +void DoloopNode::DumpDoVar(const MIRModule &mod) const { + if (isPreg) { + LogInfo::MapleLogger() << " %" + << mod.CurFunction()->GetPregTab()->PregFromPregIdx(doVarStIdx.FullIdx())->GetPregNo() + << " (\n"; + } else { + const MIRSymbol *st = mod.CurFunction()->GetLocalOrGlobalSymbol(doVarStIdx); + CHECK_NULL_FATAL(st); + LogInfo::MapleLogger() << " %" << st->GetName() << " (\n"; + } +} + +void DoloopNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + DumpDoVar(*theMIRModule); + PrintIndentation(indent + 1); + startExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + condExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ",\n"; + PrintIndentation(indent + 1); + incrExpr->Dump(indent + 1); + LogInfo::MapleLogger() << ")"; + doBody->Dump(indent + 1); +} + +void ForeachelemNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + const MIRSymbol *st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(elemStIdx); + ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << " %" << st->GetName(); + st = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(arrayStIdx); + ASSERT(st != nullptr, "null ptr check"); + LogInfo::MapleLogger() << (arrayStIdx.Islocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName(); + loopBody->Dump(indent + 1); +} + +void BinaryStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + BinaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void NaryStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + NaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void CallAssertNonnullStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + SafetyCallCheckStmtNode::Dump(); + UnaryStmtNode::DumpOpnd(indent); +} + +void AssertNonnullStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + if (theMIRModule->IsCModule()) { + SafetyCheckStmtNode::Dump(); + } + UnaryStmtNode::DumpOpnd(indent); +} + +void AssertBoundaryStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + SafetyCheckStmtNode::Dump(); + NaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void CallAssertBoundaryStmtNode::Dump(int32 indent) const { + StmtNode::DumpBase(indent); + SafetyCallCheckStmtNode::Dump(); + NaryOpnds::Dump(indent); + LogInfo::MapleLogger() << '\n'; +} + +void DumpCallReturns(const MIRModule &mod, CallReturnVector nrets, int32 indent) { + const MIRFunction *mirFunc = mod.CurFunction(); + if (nrets.empty()) { + LogInfo::MapleLogger() << " {}\n"; + return; + } else if (nrets.size() == 1) { + StIdx stIdx = nrets.begin()->first; + RegFieldPair regFieldPair = nrets.begin()->second; + if (!regFieldPair.IsReg()) { + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + ASSERT(st != nullptr, "st is null"); + FieldID fieldID = regFieldPair.GetFieldID(); + LogInfo::MapleLogger() << " { dassign "; + LogInfo::MapleLogger() << (stIdx.Islocal() ? "%" : "$"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID << " }\n"; + return; + } else { + PregIdx regIdx = regFieldPair.GetPregIdx(); + const MIRPreg *mirPreg = mirFunc->GetPregItem(static_cast(regIdx)); + ASSERT(mirPreg != nullptr, "mirPreg is null"); + LogInfo::MapleLogger() << " { regassign"; + LogInfo::MapleLogger() << " " << GetPrimTypeName(mirPreg->GetPrimType()); + LogInfo::MapleLogger() << " %" << mirPreg->GetPregNo() << "}\n"; + return; + } + } + LogInfo::MapleLogger() << " {\n"; + for (auto it = nrets.begin(); it != nrets.end(); it++) { + PrintIndentation(indent + 2); + StIdx stIdx = (it)->first; + RegFieldPair regFieldPair = it->second; + if (!regFieldPair.IsReg()) { + FieldID fieldID = regFieldPair.GetFieldID(); + LogInfo::MapleLogger() << "dassign"; + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + ASSERT(st != nullptr, "st is null"); + LogInfo::MapleLogger() << (stIdx.Islocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID << '\n'; + } else { + PregIdx regIdx = regFieldPair.GetPregIdx(); + const MIRPreg *mirPreg = mirFunc->GetPregItem(static_cast(regIdx)); + ASSERT(mirPreg != nullptr, "mirPreg is null"); + LogInfo::MapleLogger() << "regassign" + << " " << GetPrimTypeName(mirPreg->GetPrimType()); + LogInfo::MapleLogger() << " %" << mirPreg->GetPregNo() << '\n'; + } + } + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "}\n"; +} + +// iread expr has sideeffect, may cause derefference error +bool HasIreadExpr(const BaseNode *expr) { + if (expr->GetOpCode() == OP_iread) { + return true; + } + for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { + if (HasIreadExpr(expr->Opnd(i))) { + return true; + } + } + return false; +} + +// layer to leaf node +size_t MaxDepth(const BaseNode *expr) { + if (expr->IsLeaf()) { + return 1; + } + size_t maxSubDepth = 0; + for (size_t i = 0; i < expr->GetNumOpnds(); ++i) { + size_t depth = MaxDepth(expr->Opnd(i)); + maxSubDepth = (depth > maxSubDepth) ? depth : maxSubDepth; + } + return maxSubDepth + 1; // expr itself +} + +MIRType *CallNode::GetCallReturnType() { + if (!kOpcodeInfo.IsCallAssigned(GetOpCode())) { + return nullptr; + } + ASSERT(GlobalTables::GetFunctionTable().GetFuncTable().empty() == false, "container check"); + MIRFunction *mirFunc = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + return mirFunc->GetReturnType(); +} + +const MIRSymbol *CallNode::GetCallReturnSymbol(const MIRModule &mod) const { + if (!kOpcodeInfo.IsCallAssigned(GetOpCode())) { + return nullptr; + } + const CallReturnVector &nRets = this->GetReturnVec(); + if (nRets.size() == 1) { + StIdx stIdx = nRets.begin()->first; + RegFieldPair regFieldPair = nRets.begin()->second; + if (!regFieldPair.IsReg()) { + const MIRFunction *mirFunc = mod.CurFunction(); + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + return st; + } + } + return nullptr; +} + +void CallNode::Dump(int32 indent, bool newline) const { + StmtNode::DumpBase(indent); + if (tyIdx != 0u) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(indent + 1); + } + CHECK(puIdx < GlobalTables::GetFunctionTable().GetFuncTable().size(), "index out of range in CallNode::Dump"); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(puIdx); + LogInfo::MapleLogger() << " &" << func->GetName(); + NaryOpnds::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->GetReturnVec(), indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +MIRType *IcallNode::GetCallReturnType() { + if (op == OP_icall || op == OP_icallassigned) { + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx); + } + // icallproto or icallprotoassigned + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx); + MIRFuncType *funcType = nullptr; + if (retType->IsMIRPtrType()) { + funcType = static_cast(retType)->GetPointedFuncType(); + } else if (retType->IsMIRFuncType()) { + funcType = static_cast(retType); + } + CHECK_FATAL(funcType != nullptr, "cannot find prototype for icall"); + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(funcType->GetRetTyIdx()); +} + +const MIRSymbol *IcallNode::GetCallReturnSymbol(const MIRModule &mod) const { + if (!kOpcodeInfo.IsCallAssigned(GetOpCode())) { + return nullptr; + } + const CallReturnVector &nRets = this->GetReturnVec(); + if (nRets.size() == 1) { + StIdx stIdx = nRets.begin()->first; + RegFieldPair regFieldPair = nRets.begin()->second; + if (!regFieldPair.IsReg()) { + const MIRFunction *mirFunc = mod.CurFunction(); + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + return st; + } + } + return nullptr; +} + +void IcallNode::Dump(int32 indent, bool newline) const { + StmtNode::DumpBase(indent); + if (op == OP_icallproto || op == OP_icallprotoassigned) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(retTyIdx)->Dump(indent + 1); + } + NaryOpnds::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->returnValues, indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +MIRType *IntrinsiccallNode::GetCallReturnType() { + CHECK_FATAL(intrinsic < INTRN_LAST, "Index out of bound in IntrinsiccallNode::GetCallReturnType"); + IntrinDesc *intrinDesc = &IntrinDesc::intrinTable[intrinsic]; + return intrinDesc->GetReturnType(); +} + +void IntrinsiccallNode::Dump(int32 indent, bool newline) const { + StmtNode::DumpBase(indent); + if (tyIdx != 0u) { + LogInfo::MapleLogger() << " "; + GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)->Dump(indent + 1); + } + if (GetOpCode() == OP_intrinsiccall || GetOpCode() == OP_intrinsiccallassigned || + GetOpCode() == OP_intrinsiccallwithtype || GetOpCode() == OP_intrinsiccallwithtypeassigned) { + LogInfo::MapleLogger() << " " << GetIntrinsicName(intrinsic); + } else { + LogInfo::MapleLogger() << " " << intrinsic; + } + NaryOpnds::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->GetReturnVec(), indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +void CallinstantNode::Dump(int32 indent, bool newline) const { + StmtNode::DumpBase(indent); + MIRFunction *func = GlobalTables::GetFunctionTable().GetFunctionFromPuidx(GetPUIdx()); + LogInfo::MapleLogger() << " &" << func->GetName(); + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(instVecTyIdx); + LogInfo::MapleLogger() << "<"; + auto *instVecType = static_cast(ty); + instVecType->Dump(indent); + LogInfo::MapleLogger() << ">"; + NaryOpnds::Dump(indent); + if (kOpcodeInfo.IsCallAssigned(GetOpCode())) { + DumpCallReturns(*theMIRModule, this->GetReturnVec(), indent); + } else if (newline) { + LogInfo::MapleLogger() << '\n'; + } +} + +void BlockNode::Dump(int32 indent, const MIRSymbolTable *theSymTab, MIRPregTable *thePregTab, bool withInfo, + bool isFuncbody, MIRFlavor flavor) const { + if (!withInfo) { + LogInfo::MapleLogger() << " {\n"; + } + // output puid for debugging purpose + if (isFuncbody) { + theMIRModule->CurFunction()->DumpFuncBody(indent); + if (theSymTab != nullptr || thePregTab != nullptr) { + // print the locally declared type names + if (theMIRModule->CurFunction()->HaveTypeNameTab()) { + for (auto it : theMIRModule->CurFunction()->GetGStrIdxToTyIdxMap()) { + const std::string &name = GlobalTables::GetStrTable().GetStringFromStrIdx(it.first); + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(it.second); + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << "type %" << name << " "; + if (type->GetKind() != kTypeByName) { + type->Dump(indent + 2, true); + } else { + type->Dump(indent + 2); + } + LogInfo::MapleLogger() << '\n'; + } + } + // print the locally declared variables + theSymTab->Dump(true, indent + 1, false, flavor); /* first:isLocal, third:printDeleted */ + if (thePregTab != nullptr) { + thePregTab->DumpPregsWithTypes(indent + 1); + } + } + LogInfo::MapleLogger() << '\n'; + if (theMIRModule->CurFunction()->NeedEmitAliasInfo()) { + theMIRModule->CurFunction()->DumpScope(); + } + } + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + // dump stmtFreqs + if (Options::profileUse && theMIRModule->CurFunction()->GetFuncProfData()) { + int64_t freq = static_cast(theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID())); + if (freq >= 0) { + LogInfo::MapleLogger() << "stmtID " << GetStmtID() << " freq " << freq << "\n"; + } + } + for (auto &stmt : GetStmtNodes()) { + stmt.Dump(indent + 1); + } + PrintIndentation(indent); + LogInfo::MapleLogger() << "}\n"; +} + +void LabelNode::Dump(int32 indent [[maybe_unused]]) const { + if (theMIRModule->CurFunction()->WithLocInfo()) { + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + } + // dump stmtFreqs + if (Options::profileUse && theMIRModule->CurFunction()->GetFuncProfData()) { + int64_t freq = static_cast(theMIRModule->CurFunction()->GetFuncProfData()->GetStmtFreq(GetStmtID())); + if (freq >= 0) { + LogInfo::MapleLogger() << "stmtID " << GetStmtID() << " freq " << freq << "\n"; + } + } + LogInfo::MapleLogger() << "@" << theMIRModule->CurFunction()->GetLabelName(labelIdx) << " "; +} + +void CommentNode::Dump(int32 indent) const { + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + PrintIndentation(indent); + LogInfo::MapleLogger() << "#" << comment << '\n'; +} + +void EmitStr(const MapleString &mplStr) { + const char *str = mplStr.c_str(); + size_t len = mplStr.length(); + LogInfo::MapleLogger() << "\""; + + // don't expand special character; convert all \s to \\s in string + for (size_t i = 0; i < len; ++i) { + /* Referred to GNU AS: 3.6.1.1 Strings */ + constexpr int kBufSize = 5; + constexpr int kFirstChar = 0; + constexpr int kSecondChar = 1; + constexpr int kThirdChar = 2; + constexpr int kLastChar = 4; + char buf[kBufSize]; + if (isprint(*str)) { + buf[kFirstChar] = *str; + buf[kSecondChar] = 0; + if (*str == '\\' || *str == '\"') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = *str; + buf[kThirdChar] = 0; + } + LogInfo::MapleLogger() << buf; + } else if (*str == '\b') { + LogInfo::MapleLogger() << "\\b"; + } else if (*str == '\n') { + LogInfo::MapleLogger() << "\\n"; + } else if (*str == '\r') { + LogInfo::MapleLogger() << "\\r"; + } else if (*str == '\t') { + LogInfo::MapleLogger() << "\\t"; + } else if (*str == '\0') { + buf[kFirstChar] = '\\'; + buf[kSecondChar] = '0'; + buf[kThirdChar] = 0; + LogInfo::MapleLogger() << buf; + } else { + /* all others, print as number */ + int ret = snprintf_s(buf, sizeof(buf), kBufSize - 1, "\\%03o", static_cast(*str) & 0xFF); + if (ret < 0) { + FATAL(kLncFatal, "snprintf_s failed"); + } + buf[kLastChar] = '\0'; + LogInfo::MapleLogger() << buf; + } + str++; + } + + LogInfo::MapleLogger() << "\"\n"; +} + +bool AsmNode::IsSameContent(const BaseNode *node) const { + if (node->GetOpCode() != GetOpCode()) { + return false; + } + auto *asmNode = static_cast(node); + if (asmNode->NumOpnds() != NumOpnds()) { + return false; + } + if (asmNode->asmString != asmString) { + return false; + } + for (size_t i = 0; i < inputConstraints.size(); ++i) { + if (asmNode->inputConstraints[i] != inputConstraints[i]) { + return false; + } + } + for (size_t i = 0; i < outputConstraints.size(); ++i) { + if (asmNode->outputConstraints[i] != outputConstraints[i]) { + return false; + } + } + for (size_t i = 0; i < clobberList.size(); ++i) { + if (asmNode->clobberList[i] != clobberList[i]) { + return false; + } + } + return true; +} + +AsmNode *AsmNode::CloneTree(MapleAllocator &allocator) const { + auto *node = allocator.GetMemPool()->New(allocator, *this); + for (size_t i = 0; i < GetNopndSize(); ++i) { + node->GetNopnd().push_back(GetNopndAt(i)->CloneTree(allocator)); + } + for (size_t i = 0; i < inputConstraints.size(); ++i) { + node->inputConstraints.push_back(inputConstraints[i]); + } + for (size_t i = 0; i < asmOutputs.size(); ++i) { + node->asmOutputs.push_back(asmOutputs[i]); + } + for (size_t i = 0; i < outputConstraints.size(); ++i) { + node->outputConstraints.push_back(outputConstraints[i]); + } + for (size_t i = 0; i < clobberList.size(); ++i) { + node->clobberList.push_back(clobberList[i]); + } + for (size_t i = 0; i < gotoLabels.size(); ++i) { + node->gotoLabels.push_back(gotoLabels[i]); + } + node->SetNumOpnds(static_cast(GetNopndSize())); + return node; +} + +void AsmNode::DumpOutputs(int32 indent, std::string &uStr) const { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + size_t numOutputs = asmOutputs.size(); + + const MIRFunction *mirFunc = theMIRModule->CurFunction(); + if (numOutputs == 0) { + LogInfo::MapleLogger() << '\n'; + } else { + for (size_t i = 0; i < numOutputs; i++) { + if (i != 0) { + PrintIndentation(indent + 2); // Increase the indent by 2 bytes. + } + uStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(outputConstraints[i]); + PrintString(uStr); + LogInfo::MapleLogger() << " "; + StIdx stIdx = asmOutputs[i].first; + RegFieldPair regFieldPair = asmOutputs[i].second; + if (!regFieldPair.IsReg()) { + FieldID fieldID = regFieldPair.GetFieldID(); + LogInfo::MapleLogger() << "dassign"; + const MIRSymbol *st = mirFunc->GetLocalOrGlobalSymbol(stIdx); + ASSERT(st != nullptr, "st is null"); + LogInfo::MapleLogger() << (stIdx.Islocal() ? " %" : " $"); + LogInfo::MapleLogger() << st->GetName() << " " << fieldID; + } else { + PregIdx regIdx = regFieldPair.GetPregIdx(); + const MIRPreg *mirPreg = mirFunc->GetPregItem(static_cast(regIdx)); + ASSERT(mirPreg != nullptr, "mirPreg is null"); + LogInfo::MapleLogger() << "regassign" + << " " << GetPrimTypeName(mirPreg->GetPrimType()); + LogInfo::MapleLogger() << " %" << mirPreg->GetPregNo(); + } + if (i != numOutputs - 1) { + LogInfo::MapleLogger() << ','; + } + LogInfo::MapleLogger() << '\n'; + } + } +} + +void AsmNode::DumpInputOperands(int32 indent, std::string &uStr) const { + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + if (numOpnds == 0) { + LogInfo::MapleLogger() << '\n'; + } else { + for (size_t i = 0; i < numOpnds; i++) { + if (i != 0) { + PrintIndentation(indent + 2); // Increase the indent by 2 bytes. + } + uStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(inputConstraints[i]); + PrintString(uStr); + LogInfo::MapleLogger() << " ("; + GetNopndAt(i)->Dump(indent + 4); // Increase the indent by 4 bytes. + LogInfo::MapleLogger() << ")"; + if (i != static_cast(static_cast(numOpnds - 1))) { + LogInfo::MapleLogger() << ','; + } + LogInfo::MapleLogger() << "\n"; + } + } +} + +void AsmNode::Dump(int32 indent) const { + srcPosition.DumpLoc(lastPrintedLineNum, lastPrintedColumnNum); + PrintIndentation(indent); + LogInfo::MapleLogger() << kOpcodeInfo.GetName(op); + if (GetQualifier(kASMvolatile)) { + LogInfo::MapleLogger() << " volatile"; + } + if (GetQualifier(kASMinline)) { + LogInfo::MapleLogger() << " inline"; + } + if (GetQualifier(kASMgoto)) { + LogInfo::MapleLogger() << " goto"; + } + LogInfo::MapleLogger() << " { "; + EmitStr(asmString); + // print outputs + std::string uStr; + DumpOutputs(indent, uStr); + // print input operands + DumpInputOperands(indent, uStr); + // print clobber list + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + for (size_t i = 0; i < clobberList.size(); i++) { + uStr = GlobalTables::GetUStrTable().GetStringFromStrIdx(clobberList[i]); + PrintString(uStr); + if (i != clobberList.size() - 1) { + LogInfo::MapleLogger() << ','; + } + } + LogInfo::MapleLogger() << '\n'; + // print labels + PrintIndentation(indent + 1); + LogInfo::MapleLogger() << " :"; + for (size_t i = 0; i < gotoLabels.size(); i++) { + LabelIdx offset = gotoLabels[i]; + LogInfo::MapleLogger() << " @" << theMIRModule->CurFunction()->GetLabelName(offset); + if (i != gotoLabels.size() - 1) { + LogInfo::MapleLogger() << ','; + } + } + LogInfo::MapleLogger() << " }\n"; +} + +inline bool IntTypeVerify(PrimType pTyp) { + return pTyp == PTY_i32 || pTyp == PTY_u32 || pTyp == PTY_i64 || pTyp == PTY_u64; +} + +inline bool UnaryTypeVerify0(PrimType pTyp) { + bool verifyResult = IntTypeVerify(pTyp); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result type of bnot,extractbits,sext,zext must be in [i32,u32,i64,u64]\n"; + } + return verifyResult; +} + +bool ArithResTypeVerify(PrimType pTyp) { + switch (pTyp) { + case PTY_i32: + case PTY_u32: + case PTY_i64: + case PTY_u64: + case PTY_f32: + case PTY_f64: + return true; + case PTY_a32: + case PTY_a64: + case PTY_ptr: + return theMIRModule->IsCModule(); + default: + break; + } + + // Arithmetic operations on all vector types are allowed + PrimitiveType pt(pTyp); + if (pt.IsVector()) { + return true; + } + return false; +} + +inline bool UnaryTypeVerify1(PrimType pType) { + bool verifyResult = ArithResTypeVerify(pType); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result type of abs,neg must be in [i32,u32,i64,u64,f32,f64]\n"; + } + return verifyResult; +} + +inline bool UnaryTypeVerify2(PrimType pType) { + bool verifyResult = IsPrimitiveFloat(pType); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result-type of recip,sqrt must be in [f32,f64]\n"; + } + return verifyResult; +} + +inline bool BinaryTypeVerify(PrimType pType) { + return ArithResTypeVerify(pType) || IsPrimitiveDynType(pType); +} + +inline bool BinaryGenericVerify(const BaseNode &bOpnd0, const BaseNode &bOpnd1) { + return bOpnd0.Verify() && bOpnd1.Verify(); +} + +inline bool CompareTypeVerify(PrimType pType) { + bool verifyResult = IsPrimitiveInteger(pType); + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:result type of eq,ge,gt,le,lt,ne must be primitive integer\n"; + } + return verifyResult; +} + +enum PTYGroup { + kPTYGi32u32a32, + kPTYGi32u32a32PtrRef, + kPTYGi64u64a64, + kPTYGPtrRef, + kPTYGDynall, + kPTYGu1, + kPTYGSimpleObj, + kPTYGSimpleStr, + kPTYGOthers +}; + +uint8 GetPTYGroup(PrimType primType) { + switch (primType) { + case PTY_i32: + case PTY_u32: + case PTY_a32: + return kPTYGi32u32a32; + case PTY_i64: + case PTY_u64: + case PTY_a64: + return kPTYGi64u64a64; + case PTY_ref: + case PTY_ptr: + return kPTYGPtrRef; + case PTY_dynany: + case PTY_dyni32: + case PTY_dynf64: + case PTY_dynstr: + case PTY_dynobj: + case PTY_dynundef: + case PTY_dynbool: + case PTY_dynf32: + case PTY_dynnone: + case PTY_dynnull: + return kPTYGDynall; + case PTY_u1: + return kPTYGu1; + case PTY_simpleobj: + return kPTYGSimpleObj; + case PTY_simplestr: + return kPTYGSimpleStr; + default: + return kPTYGOthers; + } +} + +uint8 GetCompGroupID(const BaseNode &opnd) { + return GetPTYGroup(opnd.GetPrimType()); +} + +/* + Refer to C11 Language Specification. + $ 6.3.1.8 Usual arithmetic conversions + */ +bool CompatibleTypeVerify(const BaseNode &opnd1, const BaseNode &opnd2) { + uint8 groupID1 = GetCompGroupID(opnd1); + uint8 groupID2 = GetCompGroupID(opnd2); + Opcode opCode2 = opnd2.GetOpCode(); + bool verifyResult = (groupID1 == groupID2); + if (opCode2 == OP_gcmallocjarray || opCode2 == OP_gcpermallocjarray) { + verifyResult = (groupID1 == kPTYGi32u32a32); + } + if (!verifyResult) { + LogInfo::MapleLogger() << "\n#Error:incompatible operand types :\n"; + opnd1.Dump(); + opnd2.Dump(); + } + return verifyResult; +} + +bool FloatIntCvtTypeVerify(PrimType resPType, PrimType opndPType) { + bool resTypeVerf = resPType == PTY_i32 || resPType == PTY_u32 || resPType == PTY_i64 || resPType == PTY_u64; + if (!resTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:result-type of ceil,floor,round,trunc must be in [i32,u32,i64,u64]\n"; + } + bool opndTypeVerf = opndPType == PTY_f32 || opndPType == PTY_f64; + if (!opndTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:oerand-type of ceil,floor,round,trunc must be in [f32,f64]\n"; + } + return resTypeVerf && opndTypeVerf; +} + +inline MIRTypeKind GetTypeKind(StIdx stIdx) { + const MIRSymbol *var = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(stIdx); + ASSERT(var != nullptr, "null ptr check"); + MIRType *type = var->GetType(); + ASSERT(type != nullptr, "null ptr check"); + return type->GetKind(); +} + +inline MIRTypeKind GetTypeKind(TyIdx tyIdx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + ASSERT(type != nullptr, "null ptr check"); + return type->GetKind(); +} + +inline MIRType *GetPointedMIRType(TyIdx tyIdx) { + MIRType *type = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + CHECK_FATAL(type->GetKind() == kTypePointer, "TyIdx: %d is not pointer type", static_cast(tyIdx)); + auto *ptrType = static_cast(type); + return ptrType->GetPointedType(); +} + +inline MIRTypeKind GetPointedTypeKind(TyIdx tyIdx) { + MIRType *pointedType = GetPointedMIRType(tyIdx); + ASSERT(pointedType != nullptr, "null ptr check"); + return pointedType->GetKind(); +} + +MIRTypeKind GetFieldTypeKind(MIRStructType *structType, FieldID fieldId) { + TyIdx fieldTyIdx; + if (fieldId > 0) { + MIRType *mirType = structType->GetFieldType(fieldId); + fieldTyIdx = mirType->GetTypeIndex(); + } else { + ASSERT(static_cast(-fieldId) < structType->GetParentFieldsSize() + 1, "array index out of range"); + fieldTyIdx = structType->GetParentFieldsElemt(-fieldId - 1).second.first; + } + return GetTypeKind(fieldTyIdx); +} + +inline bool IsStructureTypeKind(MIRTypeKind kind) { + return kind == kTypeStruct || kind == kTypeStructIncomplete || kind == kTypeUnion || kind == kTypeClass || + kind == kTypeClassIncomplete || kind == kTypeInterface || kind == kTypeInterfaceIncomplete; +} + +inline bool IsStructureVerify(FieldID fieldID, StIdx stIdx) { + if ((fieldID != 0) && (!IsStructureTypeKind(GetTypeKind(stIdx)))) { + LogInfo::MapleLogger() << "\n#Error:if fieldID is not 0, the variable must be a structure\n"; + return false; + } + return true; +} + +inline bool IsStructureVerify(FieldID fieldID, TyIdx tyIdx) { + if ((fieldID != 0) && (!IsStructureTypeKind(GetTypeKind(tyIdx)))) { + LogInfo::MapleLogger() << "\n#Error:if fieldID is not 0, the variable must be a structure\n"; + return false; + } + return true; +} + +bool IsSignedType(const BaseNode *opnd) { + switch (opnd->GetPrimType()) { + case PTY_i32: + case PTY_i64: + case PTY_f32: + case PTY_f64: + case PTY_dyni32: + case PTY_dynf32: + case PTY_dynf64: + return true; + default: + break; + } + return false; +} + +inline bool BinaryStrictSignVerify0(const BaseNode *bOpnd0, const BaseNode *bOpnd1) { + ASSERT(bOpnd0 != nullptr, "bOpnd0 is null"); + ASSERT(bOpnd1 != nullptr, "bOpnd1 is null"); + bool isDynany = (bOpnd0->GetPrimType() == PTY_dynany || bOpnd1->GetPrimType() == PTY_dynany); + return isDynany || (IsSignedType(bOpnd0) && IsSignedType(bOpnd1)) || (!IsSignedType(bOpnd0) && !IsSignedType(bOpnd1)); +} + +bool BinaryStrictSignVerify1(const BaseNode *bOpnd0, const BaseNode *bOpnd1, const BaseNode *res) { + if (GetCompGroupID(*res) == kPTYGDynall) { + return BinaryStrictSignVerify0(bOpnd0, res) && BinaryStrictSignVerify0(bOpnd1, res) && + BinaryStrictSignVerify0(bOpnd0, bOpnd1); + } + return (IsSignedType(bOpnd0) && IsSignedType(bOpnd1) && IsSignedType(res)) || + (!IsSignedType(bOpnd0) && !IsSignedType(bOpnd1) && !IsSignedType(res)); +} + +bool UnaryNode::Verify() const { + bool resTypeVerf = true; + if (GetOpCode() == OP_bnot) { + resTypeVerf = UnaryTypeVerify0(GetPrimType()); + } else if (GetOpCode() == OP_lnot) { + if (!IsPrimitiveInteger(GetPrimType())) { + resTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:result-type of lnot must be primitive integer\n"; + } + } else if (GetOpCode() == OP_abs || GetOpCode() == OP_neg) { + resTypeVerf = UnaryTypeVerify1(GetPrimType()); + } else if (GetOpCode() == OP_recip || GetOpCode() == OP_sqrt) { + resTypeVerf = UnaryTypeVerify2(GetPrimType()); + } + + // When an opcode only specifies one type, check for compatibility + // between the operands and the result-type. + bool compVerf = true; + // op_alloca : return type is not compatible with operand, skip + if (GetOpCode() != OP_alloca) { + compVerf = CompatibleTypeVerify(*uOpnd, *this); + } + bool opndExprVerf = uOpnd->Verify(); + return resTypeVerf && compVerf && opndExprVerf; +} + +bool TypeCvtNode::Verify() const { + bool opndTypeVerf = true; + bool opndSizeVerf = true; + if (GetOpCode() == OP_ceil || GetOpCode() == OP_floor || GetOpCode() == OP_round || GetOpCode() == OP_trunc) { + opndTypeVerf = FloatIntCvtTypeVerify(GetPrimType(), Opnd(0)->GetPrimType()); + } else if (GetOpCode() == OP_retype) { + if (GetPrimTypeSize(GetPrimType()) != GetPrimTypeSize(Opnd(0)->GetPrimType())) { + opndSizeVerf = false; + LogInfo::MapleLogger() << "\n#Error:The size of opnd0 and prim-type must be the same\n"; + } + } + bool opndExprVerf = Opnd(0)->Verify(); + return opndTypeVerf && opndSizeVerf && opndExprVerf; +} + +void AddRuntimeVerifyError(std::string errMsg, VerifyResult &verifyResult) { + LogInfo::MapleLogger() << "\n#Error: " << errMsg << '\n'; + // Throw Verify Error + verifyResult.AddPragmaVerifyError(verifyResult.GetCurrentClassName(), std::move(errMsg)); +} + +bool RetypeNode::VerifyPrimTypesAndOpnd() const { + PrimType toPrimType = GetPrimType(); + PrimType fromPrimType = Opnd(0)->GetPrimType(); + if (GetPrimTypeSize(toPrimType) != GetPrimTypeSize(fromPrimType)) { + LogInfo::MapleLogger() << "\n#Error: The size of opnd0 and prim-type must be the same\n"; + return false; + } + + if (!IsPrimitivePoint(toPrimType) || !IsPrimitivePoint(fromPrimType)) { + LogInfo::MapleLogger() << "\n#Error: Wrong prim-type in retype node, should be ref or ptr\n"; + return false; + } + return Opnd(0)->Verify(); +} + +bool RetypeNode::CheckFromJarray(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const { + // Array types are subtypes of Object. + // The intent is also that array types are subtypes of Cloneable and java.io.Serializable. + if (IsInterfaceOrClass(to)) { + Klass &toKlass = utils::ToRef(verifyResult.GetKlassHierarchy().GetKlassFromStrIdx(to.GetNameStrIdx())); + const std::string &toKlassName = toKlass.GetKlassName(); + const std::string &javaLangObject = namemangler::kJavaLangObjectStr; + const std::string javaLangCloneable = "Ljava_2Flang_2FCloneable_3B"; + const std::string javaIoSerializable = "Ljava_2Fio_2FSerializable_3B"; + if (toKlassName == javaLangObject || toKlassName == javaIoSerializable || toKlassName == javaLangCloneable) { + return true; + } + } + + AddRuntimeVerifyError("Java array " + from.GetName() + " is not assignable to " + to.GetName(), verifyResult); + return false; +} + +bool RetypeNode::IsJavaAssignable(const MIRType &from, const MIRType &to, VerifyResult &verifyResult) const { + // isJavaAssignable(arrayOf(X), arrayOf(Y)) :- compound(X), compound(Y), isJavaAssignable(X, Y). + // arrayOf(X), arrayOf(Y) should already be X, Y here + if (from.IsMIRJarrayType()) { + return CheckFromJarray(from, to, verifyResult); + } + // isJavaAssignable(arrayOf(X), arrayOf(Y)) :- atom(X), atom(Y), X = Y. + // This rule is not applicable to Maple IR + if (from.IsScalarType() && to.IsScalarType()) { + return true; + } + + if (IsInterfaceOrClass(from) && IsInterfaceOrClass(to)) { + const KlassHierarchy &klassHierarchy = verifyResult.GetKlassHierarchy(); + const std::string javaLangObject = namemangler::kJavaLangObjectStr; + Klass &fromKlass = utils::ToRef(klassHierarchy.GetKlassFromStrIdx(from.GetNameStrIdx())); + Klass &toKlass = utils::ToRef(klassHierarchy.GetKlassFromStrIdx(to.GetNameStrIdx())); + // We can cast everything to java.lang.Object, but interface isn't subclass of that, so we need this branch + if (toKlass.GetKlassName() == javaLangObject) { + return true; + } + // isJavaAssignable(class(_, _), class(To, L)) :- loadedClass(To, L, ToClass), classIsInterface(ToClass). + // isJavaAssignable(From, To) :- isJavaSubclassOf(From, To). + bool isAssignableKlass = klassHierarchy.IsSuperKlass(&toKlass, &fromKlass) || + klassHierarchy.IsSuperKlassForInterface(&toKlass, &fromKlass) || + klassHierarchy.IsInterfaceImplemented(&toKlass, &fromKlass); + if (isAssignableKlass) { + return true; + } + AddRuntimeVerifyError("Java type " + fromKlass.GetKlassName() + " is NOT assignable to " + toKlass.GetKlassName(), + verifyResult); + return false; + } + AddRuntimeVerifyError(from.GetName() + " is NOT assignable to " + to.GetName(), verifyResult); + return false; +} + +bool RetypeNode::VerifyCompleteMIRType(const MIRType &from, const MIRType &to, bool isJavaRefType, + VerifyResult &verifyResult) const { + if (from.IsScalarType() && to.IsScalarType() && !isJavaRefType) { + if (GetPTYGroup(from.GetPrimType()) == GetPTYGroup(to.GetPrimType())) { + return true; + } + LogInfo::MapleLogger() << "\n#Error: retype scalar type failed\n"; + return false; + } + if (!verifyResult.GetMIRModule().IsJavaModule()) { + return true; + } + isJavaRefType = (IsJavaRefType(from) && IsJavaRefType(to)) || isJavaRefType; + if (isJavaRefType) { + return IsJavaAssignable(from, to, verifyResult); + } + + if (from.GetKind() != to.GetKind()) { + if (from.GetPrimType() == PTY_void || to.GetPrimType() == PTY_void) { + return true; + } + LogInfo::MapleLogger() << "\n#Error: Retype different kind: from " << from.GetKind() << " to " << to.GetKind() + << "\n"; + return false; + } + return true; +} + +bool RetypeNode::VerifyJarrayDimention(const MIRJarrayType &from, const MIRJarrayType &to, + VerifyResult &verifyResult) const { + int fromDim = const_cast(from).GetDim(); + int toDim = const_cast(to).GetDim(); + if (fromDim == toDim) { + return true; + } else if (fromDim > toDim) { + const MIRType *toElemType = to.GetElemType(); + while (toElemType != nullptr && (toElemType->IsMIRJarrayType() || toElemType->IsMIRPtrType())) { + toElemType = toElemType->IsMIRJarrayType() ? static_cast(toElemType)->GetElemType() + : static_cast(toElemType)->GetPointedType(); + } + if (toElemType != nullptr && CheckFromJarray(from, *toElemType, verifyResult)) { + return true; + } + } + Dump(0); + std::string errorMsg = + "Arrays have different dimentions: from " + std::to_string(fromDim) + " to " + std::to_string(toDim); + AddRuntimeVerifyError(std::move(errorMsg), verifyResult); + return false; +} + +bool RetypeNode::Verify(VerifyResult &verifyResult) const { + // If RetypeNode::Verify return false, Dump this node to show the wrong IR + if (!VerifyPrimTypesAndOpnd()) { + Dump(0); + LogInfo::MapleLogger() << "\n#Error: Verify PrimTypes and Opnd failed in retype node\n"; + return false; + } + bool isJavaRefType = false; + const MIRType *fromMIRType = verifyResult.GetCurrentFunction()->GetNodeType(*Opnd(0)); + const MIRType *toMIRType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx); + while (fromMIRType != nullptr && toMIRType != nullptr && BothPointerOrJarray(*fromMIRType, *toMIRType)) { + if (fromMIRType->IsMIRJarrayType()) { + isJavaRefType = true; + if (!VerifyJarrayDimention(static_cast(*fromMIRType), + static_cast(*toMIRType), verifyResult)) { + return false; + } + fromMIRType = static_cast(fromMIRType)->GetElemType(); + toMIRType = static_cast(toMIRType)->GetElemType(); + } else { + fromMIRType = static_cast(fromMIRType)->GetPointedType(); + toMIRType = static_cast(toMIRType)->GetPointedType(); + } + } + if (fromMIRType == nullptr || toMIRType == nullptr) { + Dump(0); + LogInfo::MapleLogger() << "\n#Error: MIRType is nullptr in retype node\n"; + return false; + } + + if (fromMIRType->IsIncomplete() || toMIRType->IsIncomplete()) { + // Add Deferred Check + const std::string ¤tClassName = verifyResult.GetCurrentClassName(); + LogInfo::MapleLogger(kLlDbg) << "Add AssignableCheck from " << fromMIRType->GetName() << " to " + << toMIRType->GetName() << " in class " << currentClassName << '\n'; + verifyResult.AddPragmaAssignableCheck(currentClassName, fromMIRType->GetName(), toMIRType->GetName()); + // Deferred Assignable Check returns true because we should collect all the deferred checks for runtime + return true; + } + + if (VerifyCompleteMIRType(*fromMIRType, *toMIRType, isJavaRefType, verifyResult)) { + return true; + } + Dump(0); + LogInfo::MapleLogger() << "\n#Error: Verify Complete MIRType failed in retype node\n"; + return false; +} + +bool UnaryStmtNode::VerifyThrowable(VerifyResult &verifyResult) const { + const BaseNode *rhs = GetRHS(); + if (rhs == nullptr) { + return true; + } + + const MIRType *mirType = verifyResult.GetCurrentFunction()->GetNodeType(*rhs); + if (mirType != nullptr && mirType->IsMIRPtrType()) { + mirType = static_cast(mirType)->GetPointedType(); + } + if (mirType != nullptr) { + if (mirType->GetPrimType() == PTY_void) { + return true; + } + if (mirType->IsIncomplete()) { + // Add Deferred Check + const std::string ¤tClassName = verifyResult.GetCurrentClassName(); + std::string throwableName = "Ljava_2Flang_2FThrowable_3B"; + LogInfo::MapleLogger(kLlDbg) << "Add AssignableCheck from " << mirType->GetName() << " to " << throwableName + << " in class " << currentClassName << '\n'; + verifyResult.AddPragmaAssignableCheck(currentClassName, mirType->GetName(), std::move(throwableName)); + // Deferred Assignable Check returns true because we should collect all the deferred checks for runtime + return true; + } + if (mirType->IsMIRClassType() && static_cast(mirType)->IsExceptionType()) { + return true; + } + } + Dump(0); + std::string errMsg = (mirType == nullptr ? "nullptr" : mirType->GetName()); + errMsg += " is NOT throwable."; + AddRuntimeVerifyError(std::move(errMsg), verifyResult); + return false; +} + +bool IntrinsicopNode::Verify(VerifyResult &verifyResult) const { + if (GetIntrinsic() == INTRN_JAVA_ARRAY_LENGTH && !VerifyJArrayLength(verifyResult)) { + return false; + } + return VerifyOpnds(); +} + +bool IntrinsicopNode::VerifyJArrayLength(VerifyResult &verifyResult) const { + BaseNode &val = utils::ToRef(Opnd(0)); + const MIRType *valType = verifyResult.GetCurrentFunction()->GetNodeType(val); + if (valType != nullptr && valType->IsMIRPtrType()) { + valType = static_cast(valType)->GetPointedType(); + if (valType != nullptr && !valType->IsMIRJarrayType()) { + Dump(0); + AddRuntimeVerifyError("Operand of array length is not array", verifyResult); + return false; + } + } + return true; +} + +bool IreadNode::Verify() const { + bool addrExprVerf = Opnd(0)->Verify(); + bool pTypeVerf = true; + bool structVerf = true; + if (GetTypeKind(tyIdx) != kTypePointer) { + LogInfo::MapleLogger() << "\n#Error: must be a pointer type\n"; + return false; + } + if (GetOpCode() == OP_iaddrof) { + pTypeVerf = IsAddress(GetPrimType()); + if (!pTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:prim-type must be either ptr, ref, a32 or a64\n"; + } + } else { + if (fieldID == 0 && IsStructureTypeKind(GetPointedTypeKind(tyIdx))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() + << "\n#Error:If the content dereferenced is a structure, then should specify agg\n"; + } + } + } + if (fieldID != 0) { + if (!IsStructureTypeKind(GetPointedTypeKind(tyIdx))) { + structVerf = false; + LogInfo::MapleLogger() << "\n#Error:If field-id is not 0, then type must specify pointer to a structure\n"; + } else { + MIRType *type = GetPointedMIRType(tyIdx); + auto *stTy = static_cast(type); + if (GetOpCode() == OP_iread && stTy->GetFieldsSize() != 0) { + if (IsStructureTypeKind(GetFieldTypeKind(stTy, fieldID))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:If the field itself is a structure, prim-type should specify agg\n"; + } + } + } + } + } + return addrExprVerf && pTypeVerf && structVerf; +} + +bool RegreadNode::Verify() const { + return true; +} + +bool IreadoffNode::Verify() const { + return true; +} + +bool IreadFPoffNode::Verify() const { + return true; +} + +bool ExtractbitsNode::Verify() const { + bool opndExprVerf = Opnd(0)->Verify(); + bool compVerf = CompatibleTypeVerify(*Opnd(0), *this); + bool resTypeVerf = UnaryTypeVerify0(GetPrimType()); + constexpr int numBitsInByte = 8; + bool opnd0SizeVerf = (numBitsInByte * GetPrimTypeSize(Opnd(0)->GetPrimType()) >= bitsSize); + if (!opnd0SizeVerf) { + LogInfo::MapleLogger() + << "\n#Error: The operand of extractbits must be large enough to contain the specified bitfield\n"; + } + return opndExprVerf && compVerf && resTypeVerf && opnd0SizeVerf; +} + +bool BinaryNode::Verify() const { + bool opndsVerf = BinaryGenericVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool resTypeVerf = BinaryTypeVerify(GetPrimType()); + if (!resTypeVerf && theMIRModule->IsCModule()) { + if ((IsAddress(GetBOpnd(0)->GetPrimType()) && !IsAddress(GetBOpnd(1)->GetPrimType())) || + (!IsAddress(GetBOpnd(0)->GetPrimType()) && IsAddress(GetBOpnd(1)->GetPrimType()))) { + resTypeVerf = true; // don't print the same kind of error message twice + if (GetOpCode() != OP_add && GetOpCode() != OP_sub && GetOpCode() != OP_CG_array_elem_add) { + LogInfo::MapleLogger() << "\n#Error: Only add and sub are allowed for pointer arithemetic\n"; + this->Dump(); + } else if (!IsAddress(GetPrimType())) { + LogInfo::MapleLogger() + << "\n#Error: Adding an offset to a pointer or subtracting one from a pointer should result in a pointer " + "value\n"; + this->Dump(); + } + } + } + if (!resTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result type of [add,div,sub,mul,max,min] and [ashr,band,bior,bxor,land,lior,lshr,shl,rem] must " + "be in [i32,u32,i64,u64,f32,f64,dynamic-type]\n"; + this->Dump(); + } + bool comp0Verf = CompatibleTypeVerify(*GetBOpnd(0), *this); + bool comp1Verf = true; + // Shift operations do not require same-type operands + if (GetOpCode() < OP_ashr || GetOpCode() > OP_shl) { + comp1Verf = CompatibleTypeVerify(*GetBOpnd(1), *this); + } + bool signVerf = true; + bool typeVerf = resTypeVerf && comp0Verf && comp1Verf; + if (typeVerf) { + if (GetOpCode() == OP_div || GetOpCode() == OP_mul || GetOpCode() == OP_rem || GetOpCode() == OP_max || + GetOpCode() == OP_min) { + signVerf = BinaryStrictSignVerify1(GetBOpnd(0), GetBOpnd(1), this); + if (!signVerf) { + LogInfo::MapleLogger() + << "\n#Error:the result and operands of [div,mul,rem,max,min] must be of the same sign\n"; + } + } + } + return opndsVerf && typeVerf && signVerf; +} + +bool CompareNode::Verify() const { + bool opndsVerf = BinaryGenericVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool compVerf = CompatibleTypeVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool resTypeVerf = CompareTypeVerify(GetPrimType()); + if (!resTypeVerf) { + this->Dump(); + } + bool signVerf = true; + bool typeVerf = compVerf && resTypeVerf; + if (typeVerf && GetOpCode() != OP_eq && GetOpCode() != OP_ne) { + signVerf = BinaryStrictSignVerify0(GetBOpnd(0), GetBOpnd(1)); + if (!signVerf) { + LogInfo::MapleLogger() << "\n#Error:the operands of [ge,gt,le,lt] must be of the same sign\n"; + } + } + return opndsVerf && typeVerf && signVerf; +} + +bool DepositbitsNode::Verify() const { + bool opndsVerf = BinaryGenericVerify(*GetBOpnd(0), *GetBOpnd(1)); + bool resTypeVerf = IntTypeVerify(GetPrimType()); + constexpr int numBitsInByte = 8; + bool opnd0SizeVerf = (numBitsInByte * GetPrimTypeSize(GetBOpnd(0)->GetPrimType()) >= bitsSize); + if (!opnd0SizeVerf) { + LogInfo::MapleLogger() << "\n#Error:opnd0 of depositbits must be large enough to contain the specified bitfield\n"; + } + return opndsVerf && resTypeVerf && opnd0SizeVerf; +} + +bool IntrinsicopNode::Verify() const { + return VerifyOpnds(); +} + +bool TernaryNode::Verify() const { + bool comp1Verf = CompatibleTypeVerify(*topnd[kSecondOpnd], *this); + bool comp2Verf = CompatibleTypeVerify(*topnd[kThirdOpnd], *this); + bool opnd0TypeVerf = IsPrimitiveInteger(topnd[kFirstOpnd]->GetPrimType()); + if (!opnd0TypeVerf) { + LogInfo::MapleLogger() << "\n#Error:select-opnd0 must be of integer type\n"; + } + return comp1Verf && comp2Verf && opnd0TypeVerf; +} + +bool SizeoftypeNode::Verify() const { + return true; +} + +bool ArrayNode::Verify() const { + bool opndsVerf = VerifyOpnds(); + bool resTypeVerf = IsAddress(GetPrimType()); + bool opndsTypeVerf = true; + if (!resTypeVerf) { + LogInfo::MapleLogger() << "\n#Error:result-type of array must be in [ptr,ref,a32,a64]\n"; + } + bool opnd0TypeVerf = IsAddress(GetNopndAt(0)->GetPrimType()); + if (!opnd0TypeVerf) { + LogInfo::MapleLogger() << "\n#Error:result-type of array-opnd0 must be in [ptr,ref,a32,a64]\n"; + } + for (size_t i = 1; i < NumOpnds(); ++i) { + if (!IntTypeVerify(GetNopndAt(i)->GetPrimType())) { + opndsTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:result of the array index operands must be in [i32,u32,i64,u64]\n"; + } + } + return opndsVerf && resTypeVerf && opnd0TypeVerf && opndsTypeVerf; +} + +bool DassignNode::Verify() const { + bool structVerf = IsStructureVerify(fieldID, stIdx); + bool rhsVerf = GetRHS()->Verify(); + return structVerf && rhsVerf; +} + +bool AddrofNode::Verify() const { + bool pTypeVerf = true; + bool structVerf = IsStructureVerify(fieldID, GetStIdx()); + if (GetOpCode() == OP_dread) { + if (fieldID == 0 && IsStructureTypeKind(GetTypeKind(GetStIdx()))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:if variable is a structure, prim-type should specify agg\n"; + } + } + if (fieldID != 0 && structVerf) { + const MIRSymbol *var = theMIRModule->CurFunction()->GetLocalOrGlobalSymbol(GetStIdx()); + ASSERT(var != nullptr, "null ptr check"); + MIRType *type = var->GetType(); + auto *stTy = static_cast(type); + if (IsStructureTypeKind(GetFieldTypeKind(stTy, fieldID))) { + if (GetPrimType() != PTY_agg) { + pTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:if the field itself is a structure, prim-type should specify agg\n"; + } + } + } + } else { + pTypeVerf = IsAddress(GetPrimType()); + if (!pTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result-type of addrof,addroflabel,addroffunc,iaddrof must be in [ptr,ref,a32,a64]\n"; + } + } + return pTypeVerf && structVerf; +} + +bool AddroffuncNode::Verify() const { + bool addrTypeVerf = IsAddress(GetPrimType()); + if (!addrTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result-type of addrof,addroflabel,addroffunc,iaddrof must be in [ptr,ref,a32,a64]\n"; + } + return addrTypeVerf; +} + +bool AddroflabelNode::Verify() const { + bool addrTypeVerf = IsAddress(GetPrimType()); + if (!addrTypeVerf) { + LogInfo::MapleLogger() + << "\n#Error:result-type of addrof,addroflabel,addroffunc,iaddrof must be in [ptr,ref,a32,a64]\n"; + } + return addrTypeVerf; +} + +bool IassignNode::Verify() const { + bool addrExpVerf = addrExpr->Verify(); + bool rhsVerf = rhs->Verify(); + bool structVerf = true; + if (GetTypeKind(tyIdx) != kTypePointer) { + LogInfo::MapleLogger() << "\n#Error: must be a pointer type\n"; + return false; + } + if (fieldID != 0) { + if (!IsStructureTypeKind(GetPointedTypeKind(tyIdx))) { + structVerf = false; + LogInfo::MapleLogger() << "\n#Error:If field-id is not 0, the computed address must correspond to a structure\n"; + } + } + return addrExpVerf && rhsVerf && structVerf; +} + +bool IassignoffNode::Verify() const { + bool addrVerf = GetBOpnd(0)->Verify(); + bool rhsVerf = GetBOpnd(1)->Verify(); + bool compVerf = CompatibleTypeVerify(*this, *GetBOpnd(1)); + return addrVerf && rhsVerf && compVerf; +} + +bool IassignFPoffNode::Verify() const { + bool rhsVerf = Opnd(0)->Verify(); + bool compVerf = CompatibleTypeVerify(*this, *Opnd(0)); + return rhsVerf && compVerf; +} + +bool RegassignNode::Verify() const { + bool rhsVerf = Opnd(0)->Verify(); + bool compVerf = CompatibleTypeVerify(*this, *Opnd(0)); + return rhsVerf && compVerf; +} + +bool CondGotoNode::Verify() const { + bool opndExprVerf = UnaryStmtNode::Opnd(0)->Verify(); + bool opndTypeVerf = true; + if (!IsPrimitiveInteger(UnaryStmtNode::Opnd(0)->GetPrimType())) { + opndTypeVerf = false; + LogInfo::MapleLogger() << "\n#Error:the operand of brfalse and trfalse must be primitive integer\n"; + } + return opndExprVerf && opndTypeVerf; +} + +bool SwitchNode::Verify() const { + bool opndExprVerf = switchOpnd->Verify(); + bool opndTypeVerf = IntTypeVerify(switchOpnd->GetPrimType()); + if (!opndTypeVerf) { + LogInfo::MapleLogger() << "\n#Error: the operand of switch must be in [i32,u32,i64,u64]\n"; + } + return opndExprVerf && opndTypeVerf; +} + +bool BinaryStmtNode::Verify() const { + return GetBOpnd(0)->Verify() && GetBOpnd(1)->Verify() && CompatibleTypeVerify(*GetBOpnd(0), *GetBOpnd(1)) && + BinaryStrictSignVerify0(GetBOpnd(0), GetBOpnd(1)); +} + +bool RangeGotoNode::Verify() const { + bool opndExprVerf = Opnd(0)->Verify(); + bool opndTypeVerf = IntTypeVerify(Opnd(0)->GetPrimType()); + if (!opndTypeVerf) { + LogInfo::MapleLogger() << "\n#Error: the operand of rangegoto must be in [i32,u32,i64,u64]\n"; + } + return opndExprVerf && opndTypeVerf; +} + +bool BlockNode::Verify() const { + for (auto &stmt : GetStmtNodes()) { + if (!stmt.Verify()) { + return false; + } + } + return true; +} + +bool BlockNode::Verify(VerifyResult &verifyResult) const { + auto &nodes = GetStmtNodes(); + return !std::any_of(nodes.begin(), nodes.end(), [&verifyResult](auto &stmt) { return !stmt.Verify(verifyResult); }); +} + +bool DoloopNode::Verify() const { + bool startVerf = startExpr->Verify(); + bool contVerf = condExpr->Verify(); + bool incrVerf = incrExpr->Verify(); + bool doBodyVerf = true; + if (doBody) { + doBodyVerf = doBody->Verify(); + } + return startVerf && contVerf && incrVerf && doBodyVerf; +} + +bool IfStmtNode::Verify() const { + bool condVerf = Opnd()->Verify(); + bool thenVerf = true; + bool elseVerf = true; + if (thenPart != nullptr) { + thenVerf = thenPart->Verify(); + } + if (elsePart != nullptr) { + elseVerf = elsePart->Verify(); + } + return condVerf && thenVerf && elseVerf; +} + +bool WhileStmtNode::Verify() const { + bool condVerf = Opnd(0)->Verify(); + bool bodyVerf = true; + if (body != nullptr) { + bodyVerf = body->Verify(); + } + return condVerf && bodyVerf; +} + +bool NaryStmtNode::Verify() const { + return VerifyOpnds(); +} + +bool CallNode::Verify() const { + return VerifyOpnds(); +} + +bool IcallNode::Verify() const { + bool nOpndsVerf = true; + for (size_t i = 0; i < NumOpnds(); ++i) { + if (!GetNopndAt(i)->Verify()) { + nOpndsVerf = false; + break; + } + } + return nOpndsVerf; +} + +bool IntrinsiccallNode::Verify() const { + return VerifyOpnds(); +} + +std::string SafetyCallCheckStmtNode::GetFuncName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(callFuncNameIdx); +} + +std::string SafetyCallCheckStmtNode::GetStmtFuncName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(stmtFuncNameIdx); +} + +std::string SafetyCheckStmtNode::GetFuncName() const { + return GlobalTables::GetStrTable().GetStringFromStrIdx(funcNameIdx); +} + +bool UnaryNode::IsSameContent(const BaseNode *node) const { + auto *unaryNode = dynamic_cast(node); + if ((this == unaryNode) || + (unaryNode != nullptr && (GetOpCode() == unaryNode->GetOpCode()) && (GetPrimType() == unaryNode->GetPrimType()) && + (uOpnd && unaryNode->Opnd(0) && uOpnd->IsSameContent(unaryNode->Opnd(0))))) { + return true; + } else { + return false; + } +} + +bool TypeCvtNode::IsSameContent(const BaseNode *node) const { + auto *tyCvtNode = dynamic_cast(node); + if ((this == tyCvtNode) || + (tyCvtNode != nullptr && (fromPrimType == tyCvtNode->FromType()) && UnaryNode::IsSameContent(tyCvtNode))) { + return true; + } else { + return false; + } +} + +bool IreadNode::IsSameContent(const BaseNode *node) const { + auto *ireadNode = dynamic_cast(node); + if ((this == ireadNode) || (ireadNode != nullptr && (tyIdx == ireadNode->GetTyIdx()) && + (fieldID == ireadNode->GetFieldID()) && UnaryNode::IsSameContent(ireadNode))) { + return true; + } else { + return false; + } +} + +bool IreadoffNode::IsSameContent(const BaseNode *node) const { + auto *ireadoffNode = dynamic_cast(node); + if ((this == ireadoffNode) || (ireadoffNode != nullptr && (GetOffset() == ireadoffNode->GetOffset()) && + UnaryNode::IsSameContent(ireadoffNode))) { + return true; + } else { + return false; + } +} + +bool IreadFPoffNode::IsSameContent(const BaseNode *node) const { + auto *ireadFPoffNode = dynamic_cast(node); + if ((this == ireadFPoffNode) || + (ireadFPoffNode != nullptr && (GetOpCode() == ireadFPoffNode->GetOpCode()) && + (GetPrimType() == ireadFPoffNode->GetPrimType()) && (GetOffset() == ireadFPoffNode->GetOffset()))) { + return true; + } else { + return false; + } +} + +bool BinaryOpnds::IsSameContent(const BaseNode *node) const { + auto *binaryOpnds = dynamic_cast(node); + if ((this == binaryOpnds) || (binaryOpnds != nullptr && GetBOpnd(0)->IsSameContent(binaryOpnds->GetBOpnd(0)) && + GetBOpnd(1)->IsSameContent(binaryOpnds->GetBOpnd(1)))) { + return true; + } else { + return false; + } +} + +bool BinaryNode::IsSameContent(const BaseNode *node) const { + auto *binaryNode = dynamic_cast(node); + if ((this == binaryNode) || + (binaryNode != nullptr && (GetOpCode() == binaryNode->GetOpCode()) && + (GetPrimType() == binaryNode->GetPrimType()) && BinaryOpnds::IsSameContent(binaryNode))) { + return true; + } else { + return false; + } +} + +bool ConstvalNode::IsSameContent(const BaseNode *node) const { + auto *constvalNode = dynamic_cast(node); + if (this == constvalNode) { + return true; + } + if (constvalNode == nullptr) { + return false; + } + const MIRConst *mirConst = constvalNode->GetConstVal(); + if (constVal == mirConst) { + return true; + } + if (constVal->GetKind() != mirConst->GetKind()) { + return false; + } + if (constVal->GetKind() == kConstInt) { + // integer may differ in primtype, and they may be different MIRIntConst Node + auto &thisValue = static_cast(constVal)->GetValue(); + auto &rightValue = static_cast(mirConst)->GetValue(); + return thisValue.Equal(rightValue, GetPrimType()) && thisValue.Equal(rightValue, node->GetPrimType()); + } else { + return false; + } +} + +bool ConststrNode::IsSameContent(const BaseNode *node) const { + if (node->GetOpCode() != OP_conststr) { + return false; + } + auto *cstrNode = static_cast(node); + return strIdx == cstrNode->strIdx; +} + +bool Conststr16Node::IsSameContent(const BaseNode *node) const { + if (node->GetOpCode() != OP_conststr16) { + return false; + } + auto *cstr16Node = static_cast(node); + return strIdx == cstr16Node->strIdx; +} + +bool AddrofNode::IsSameContent(const BaseNode *node) const { + return (GetOpCode() == node->GetOpCode()) && (GetPrimType() == node->GetPrimType()) && MayAccessSameMemory(node); +} + +// identify dread and address of nodes that may use same memory localtion +bool AddrofNode::MayAccessSameMemory(const BaseNode *node) const { + auto *addrofNode = dynamic_cast(node); + if ((this == addrofNode) || + (addrofNode != nullptr && (GetNumOpnds() == addrofNode->GetNumOpnds()) && + (stIdx.FullIdx() == addrofNode->GetStIdx().FullIdx()) && (fieldID == addrofNode->GetFieldID()))) { + return true; + } else { + return false; + } +} + +bool DreadoffNode::IsSameContent(const BaseNode *node) const { + auto *dreaddoffNode = dynamic_cast(node); + if ((this == dreaddoffNode) || (dreaddoffNode != nullptr && (GetOpCode() == dreaddoffNode->GetOpCode()) && + (GetPrimType() == dreaddoffNode->GetPrimType()) && (stIdx == dreaddoffNode->stIdx) && + (offset == dreaddoffNode->offset))) { + return true; + } else { + return false; + } +} + +bool RegreadNode::IsSameContent(const BaseNode *node) const { + auto *regreadNode = dynamic_cast(node); + if ((this == regreadNode) || + (regreadNode != nullptr && (GetOpCode() == regreadNode->GetOpCode()) && + (GetPrimType() == regreadNode->GetPrimType()) && (regIdx == regreadNode->GetRegIdx()))) { + return true; + } else { + return false; + } +} + +bool AddroffuncNode::IsSameContent(const BaseNode *node) const { + auto *addroffuncNode = dynamic_cast(node); + if ((this == addroffuncNode) || + (addroffuncNode != nullptr && (GetOpCode() == addroffuncNode->GetOpCode()) && + (GetPrimType() == addroffuncNode->GetPrimType()) && (puIdx == addroffuncNode->GetPUIdx()))) { + return true; + } else { + return false; + } +} + +bool AddroflabelNode::IsSameContent(const BaseNode *node) const { + auto *addroflabelNode = dynamic_cast(node); + if ((this == addroflabelNode) || + (addroflabelNode != nullptr && (GetOpCode() == addroflabelNode->GetOpCode()) && + (GetPrimType() == addroflabelNode->GetPrimType()) && (offset == addroflabelNode->GetOffset()))) { + return true; + } else { + return false; + } +} + +MIRType *IassignNode::GetLHSType() const { + MIRPtrType *ptrType = static_cast(GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyIdx)); + if (fieldID == 0) { + return ptrType->GetPointedType(); + } + return GlobalTables::GetTypeTable().GetTypeFromTyIdx(ptrType->GetPointedTyIdxWithFieldID(fieldID)); +} +} // namespace maple diff --git a/src/mapleall/maple_ir/src/mir_parser.cpp b/src/mapleall/maple_ir/src/mir_parser.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a422fccbb02aa06772de86e0feb200dadde1c13e --- /dev/null +++ b/src/mapleall/maple_ir/src/mir_parser.cpp @@ -0,0 +1,3519 @@ +/* + * Copyright (c) [2019-2021] Huawei Technologies Co.,Ltd.All rights reserved. + * + * OpenArkCompiler is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR + * FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ +#include "mir_parser.h" +#include "mir_function.h" +#include "opcode_info.h" + +namespace maple { +std::map MIRParser::funcPtrMapForParseExpr = + MIRParser::InitFuncPtrMapForParseExpr(); +std::map MIRParser::funcPtrMapForParseStmt = + MIRParser::InitFuncPtrMapForParseStmt(); +std::map MIRParser::funcPtrMapForParseStmtBlock = + MIRParser::InitFuncPtrMapForParseStmtBlock(); + +bool MIRParser::ParseStmtDassign(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_dassign) { + Error("expect dassign but get "); + return false; + } + // parse %i + lexer.NextToken(); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol parsing ParseStmtDassign"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + auto *assignStmt = mod.CurFuncCodeMemPool()->New(); + assignStmt->SetStIdx(stidx); + TokenKind nextToken = lexer.NextToken(); + // parse field id + if (nextToken == TK_intconst) { // may be a field id + assignStmt->SetFieldID(lexer.GetTheIntVal()); + (void)lexer.NextToken(); + } + // parse expression like (constval i32 0) + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + assignStmt->SetRHS(expr); + stmt = assignStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtDassignoff(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_dassignoff) { + Error("expect dassignoff but get "); + return false; + } + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + PrimType primType = GetPrimitiveType(lexer.GetTokenKind()); + // parse %i + lexer.NextToken(); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol parsing ParseStmtDassign"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + DassignoffNode *assignStmt = mod.CurFuncCodeMemPool()->New(); + assignStmt->SetPrimType(primType); + assignStmt->stIdx = stidx; + TokenKind nextToken = lexer.NextToken(); + // parse offset + if (nextToken == TK_intconst) { + assignStmt->offset = static_cast(lexer.GetTheIntVal()); + (void)lexer.NextToken(); + } else { + Error("expect integer offset but get "); + return false; + } + // parse expression like (constval i32 0) + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + assignStmt->SetRHS(expr); + stmt = assignStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtRegassign(StmtNodePtr &stmt) { + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + auto *regAssign = mod.CurFuncCodeMemPool()->New(); + regAssign->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + lexer.NextToken(); + if (lexer.GetTokenKind() == TK_specialreg) { + PregIdx tempPregIdx = regAssign->GetRegIdx(); + bool isSuccess = ParseSpecialReg(tempPregIdx); + regAssign->SetRegIdx(tempPregIdx); + if (!isSuccess) { + return false; + } + } else if (lexer.GetTokenKind() == TK_preg) { + PregIdx tempPregIdx = regAssign->GetRegIdx(); + bool isSuccess = ParsePseudoReg(regAssign->GetPrimType(), tempPregIdx); + regAssign->SetRegIdx(tempPregIdx); + if (!isSuccess) { + return false; + } + } else { + Error("expect special or pseudo register but get "); + return false; + } + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + regAssign->SetOpnd(expr, 0); + if (regAssign->GetRegIdx() > 0) { // check type consistenency for the preg + MIRPreg *preg = mod.CurFunction()->GetPregTab()->PregFromPregIdx(regAssign->GetRegIdx()); + if (preg->GetPrimType() == kPtyInvalid) { + preg->SetPrimType(expr->GetPrimType()); + } else if (preg->GetPrimType() == PTY_dynany) { + if (!IsPrimitiveDynType(expr->GetPrimType())) { + Error("inconsistent preg primitive dynamic type at "); + return false; + } + } + } + stmt = regAssign; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtIassign(StmtNodePtr &stmt) { + // iAssign <* [10] int> () + if (lexer.GetTokenKind() != TK_iassign) { + Error("expect iassign but get "); + return false; + } + // expect <> derived type + lexer.NextToken(); + TyIdx tyIdx(0); + if (!ParseDerivedType(tyIdx)) { + Error("ParseStmtIassign failed when parsing derived type"); + return false; + } + auto *iAssign = mod.CurFuncCodeMemPool()->New(); + iAssign->SetTyIdx(tyIdx); + if (lexer.GetTokenKind() == TK_intconst) { + iAssign->SetFieldID(lexer.theIntVal); + lexer.NextToken(); + } + BaseNode *addr = nullptr; + BaseNode *rhs = nullptr; + // parse 2 operands then, #1 is address, the other would be value + if (!ParseExprTwoOperand(addr, rhs)) { + return false; + } + iAssign->SetOpnd(addr, 0); + iAssign->SetRHS(rhs); + lexer.NextToken(); + stmt = iAssign; + return true; +} + +bool MIRParser::ParseStmtIassignoff(StmtNodePtr &stmt) { + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + // iassign ( , ) + auto *iAssignOff = mod.CurFuncCodeMemPool()->New(); + iAssignOff->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iAssignOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *addr = nullptr; + BaseNode *rhs = nullptr; + if (!ParseExprTwoOperand(addr, rhs)) { + return false; + } + iAssignOff->SetBOpnd(addr, 0); + iAssignOff->SetBOpnd(rhs, 1); + lexer.NextToken(); + stmt = iAssignOff; + return true; +} + +bool MIRParser::ParseStmtIassignFPoff(StmtNodePtr &stmt) { + Opcode op = lexer.GetTokenKind() == TK_iassignfpoff ? OP_iassignfpoff : OP_iassignspoff; + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + // iassignfpoff ( ) + auto *iAssignOff = mod.CurFuncCodeMemPool()->New(op); + iAssignOff->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iAssignOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + iAssignOff->SetOpnd(expr, 0); + lexer.NextToken(); + stmt = iAssignOff; + return true; +} + +bool MIRParser::ParseStmtBlkassignoff(StmtNodePtr &stmt) { + // blkassignoff (, ) + BlkassignoffNode *bassignoff = mod.CurFuncCodeMemPool()->New(); + if (lexer.NextToken() != TK_intconst) { + Error("expect offset but get "); + return false; + } + bassignoff->offset = static_cast(lexer.GetTheIntVal()); + if (lexer.NextToken() != TK_intconst) { + Error("expect align but get "); + return false; + } + bassignoff->SetAlign(static_cast(lexer.GetTheIntVal())); + if (lexer.NextToken() != TK_intconst) { + Error("expect size but get "); + return false; + } + bassignoff->blockSize = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *destAddr = nullptr; + BaseNode *srcAddr = nullptr; + // parse 2 operands, the dest address followed by src address + if (!ParseExprTwoOperand(destAddr, srcAddr)) { + return false; + } + bassignoff->SetOpnd(destAddr, 0); + bassignoff->SetOpnd(srcAddr, 1); + lexer.NextToken(); + stmt = bassignoff; + return true; +} + +bool MIRParser::ParseStmtDoloop(StmtNodePtr &stmt) { + // syntax: doloop (, , ) { + // } + auto *doLoopNode = mod.CurFuncCodeMemPool()->New(); + stmt = doLoopNode; + lexer.NextToken(); + if (lexer.GetTokenKind() == TK_preg) { + uint32 pregNo = static_cast(lexer.GetTheIntVal()); + MIRFunction *mirFunc = mod.CurFunction(); + PregIdx pregIdx = mirFunc->GetPregTab()->EnterPregNo(pregNo, kPtyInvalid); + doLoopNode->SetIsPreg(true); + doLoopNode->SetDoVarStFullIdx(pregIdx); + // let other appearances handle the preg primitive type + } else { + StIdx stIdx; + if (!ParseDeclaredSt(stIdx)) { + return false; + } + if (stIdx.FullIdx() == 0) { + Error("expect a symbol parsing ParseStmtDoloop"); + return false; + } + if (stIdx.IsGlobal()) { + Error("expect local variable for doloop var but get "); + return false; + } + doLoopNode->SetDoVarStIdx(stIdx); + } + // parse ( + if (lexer.NextToken() != TK_lparen) { + Error("expect ( but get "); + return false; + } + // parse start expression + lexer.NextToken(); + BaseNode *start = nullptr; + if (!ParseExpression(start)) { + Error("ParseStmtDoloop when parsing start expression"); + return false; + } + if (doLoopNode->IsPreg()) { + auto regIdx = static_cast(doLoopNode->GetDoVarStIdx().FullIdx()); + MIRPreg *mpReg = mod.CurFunction()->GetPregTab()->PregFromPregIdx(regIdx); + if (mpReg->GetPrimType() == kPtyInvalid) { + CHECK_FATAL(start != nullptr, "null ptr check"); + mpReg->SetPrimType(start->GetPrimType()); + } + } + if (lexer.GetTokenKind() != TK_coma) { + Error("expect , after start expression but get "); + return false; + } + doLoopNode->SetStartExpr(start); + // parse end expression + lexer.NextToken(); + BaseNode *end = nullptr; + if (!ParseExpression(end)) { // here should be a compare expression + Error("ParseStmtDoloop when parsing end expression"); + return false; + } + if (lexer.GetTokenKind() != TK_coma) { + Error("expect , after condition expression but get "); + return false; + } + doLoopNode->SetContExpr(end); + // parse renew induction expression + lexer.NextToken(); + BaseNode *induction = nullptr; + if (!ParseExpression(induction)) { + Error("ParseStmtDoloop when parsing induction"); + return false; + } + // parse ) + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing doloop but get "); + return false; + } + doLoopNode->SetIncrExpr(induction); + // parse body of the loop + lexer.NextToken(); + BlockNode *bodyStmt = nullptr; + if (!ParseStmtBlock(bodyStmt)) { + Error("ParseStmtDoloop when parsing body of the loop"); + return false; + } + doLoopNode->SetDoBody(bodyStmt); + return true; +} + +bool MIRParser::ParseStmtForeachelem(StmtNodePtr &stmt) { + // syntax: foreachelem { + // } + auto *forNode = mod.CurFuncCodeMemPool()->New(); + stmt = forNode; + lexer.NextToken(); // skip foreachelem token + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("error parsing element variable of foreachelem in "); + return false; + } + if (stidx.IsGlobal()) { + Error("illegal global scope for element variable for foreachelem in "); + return false; + } + forNode->SetElemStIdx(stidx); + lexer.NextToken(); + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("error parsing array/collection variable of foreachelem in "); + return false; + } + forNode->SetArrayStIdx(stidx); + lexer.NextToken(); + // parse body of the loop + BlockNode *bodyStmt = nullptr; + if (!ParseStmtBlock(bodyStmt)) { + Error("error when parsing body of foreachelem loop in "); + return false; + } + forNode->SetLoopBody(bodyStmt); + return true; +} + +bool MIRParser::ParseStmtIf(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_if) { + Error("expect if but get "); + return false; + } + auto *ifStmt = mod.CurFuncCodeMemPool()->New(); + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + ifStmt->SetOpnd(expr, 0); + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + BlockNode *thenBlock = nullptr; + if (!ParseStmtBlock(thenBlock)) { + Error("ParseStmtIf failed when parsing then block"); + return false; + } + ifStmt->SetThenPart(thenBlock); + + BlockNode *elseBlock = nullptr; + if (lexer.GetTokenKind() == TK_else) { + // has else part + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + if (!ParseStmtBlock(elseBlock)) { + Error("ParseStmtIf failed when parsing else block"); + return false; + } + ifStmt->SetElsePart(elseBlock); + } + stmt = ifStmt; + return true; +} + +bool MIRParser::ParseStmtWhile(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_while) { + Error("expect while but get "); + return false; + } + auto *whileStmt = mod.CurFuncCodeMemPool()->New(OP_while); + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + whileStmt->SetOpnd(expr, 0); + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + BlockNode *whileBody = nullptr; + if (!ParseStmtBlock(whileBody)) { + Error("ParseStmtWhile failed when parse while body"); + return false; + } + whileStmt->SetBody(whileBody); + stmt = whileStmt; + return true; +} + +bool MIRParser::ParseStmtDowhile(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_dowhile) { + Error("expect while but get "); + return false; + } + auto *whileStmt = mod.CurFuncCodeMemPool()->New(OP_dowhile); + if (lexer.NextToken() != TK_lbrace) { + Error("expect { begin if body but get "); + return false; + } + BlockNode *doWhileBody = nullptr; + if (!ParseStmtBlock(doWhileBody)) { + Error("ParseStmtDowhile failed when trying to parsing do while body"); + return false; + } + whileStmt->SetBody(doWhileBody); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + whileStmt->SetOpnd(expr, 0); + lexer.NextToken(); + stmt = whileStmt; + return true; +} + +bool MIRParser::ParseStmtLabel(StmtNodePtr &stmt) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } else { + if (definedLabels.size() > labIdx && definedLabels[labIdx]) { + Error("label multiply declared "); + return false; + } + } + if (definedLabels.size() <= labIdx) { + definedLabels.resize(labIdx + 1); + } + definedLabels[labIdx] = true; + auto *labNode = mod.CurFuncCodeMemPool()->New(); + labNode->SetLabelIdx(labIdx); + stmt = labNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtGoto(StmtNodePtr &stmt) { + if (lexer.GetTokenKind() != TK_goto) { + Error("expect goto but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect label in goto but get "); + return false; + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } + auto *gotoNode = mod.CurFuncCodeMemPool()->New(OP_goto); + gotoNode->SetOffset(labIdx); + stmt = gotoNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBr(StmtNodePtr &stmt) { + TokenKind tk = lexer.GetTokenKind(); + if (tk != TK_brtrue && tk != TK_brfalse) { + Error("expect brtrue/brfalse but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect label in goto but get "); + return false; + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } + auto *condGoto = mod.CurFuncCodeMemPool()->New(tk == TK_brtrue ? OP_brtrue : OP_brfalse); + condGoto->SetOffset(labIdx); + lexer.NextToken(); + // parse () + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + condGoto->SetOpnd(expr, 0); + stmt = condGoto; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseSwitchCase(int64 &constVal, LabelIdx &lblIdx) { + // syntax : goto + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect intconst in switch but get "); + return false; + } + constVal = static_cast(lexer.GetTheIntVal()); + if (lexer.NextToken() != TK_colon) { + Error("expect : in switch but get "); + return false; + } + if (lexer.NextToken() != TK_goto) { + Error("expect goto in switch case but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect label in switch but get "); + return false; + } + lblIdx = mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtSwitch(StmtNodePtr &stmt) { + auto *switchNode = mod.CurFuncCodeMemPool()->New(mod); + stmt = switchNode; + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + switchNode->SetSwitchOpnd(expr); + if (!IsPrimitiveInteger(expr->GetPrimType())) { + Error("expect expression return integer but get "); + return false; + } + if (lexer.NextToken() == TK_label) { + switchNode->SetDefaultLabel(mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName())); + } else if (lexer.GetTokenKind() == TK_intconst && lexer.GetTheIntVal() == 0) { + switchNode->SetDefaultLabel(0); + } else { + Error("expect label in switch but get "); + return false; + } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { in switch but get "); + return false; + } + // : goto + // : goto + // ... + // : goto + TokenKind tk = lexer.NextToken(); + std::set casesSet; + while (tk != TK_rbrace) { + int64 constVal = 0; + LabelIdx lbl = 0; + if (!ParseSwitchCase(constVal, lbl)) { + Error("parse switch case failed "); + return false; + } + if (casesSet.find(constVal) != casesSet.end()) { + Error("duplicated switch case "); + return false; + } + switchNode->InsertCasePair(CasePair(constVal, lbl)); + (void)casesSet.insert(constVal); + tk = lexer.GetTokenKind(); + } + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtRangegoto(StmtNodePtr &stmt) { + auto *rangeGotoNode = mod.CurFuncCodeMemPool()->New(mod); + stmt = rangeGotoNode; + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + if (!IsPrimitiveInteger(expr->GetPrimType())) { + Error("expect expression return integer but get "); + return false; + } + rangeGotoNode->SetOpnd(expr, 0); + if (lexer.NextToken() == TK_intconst) { + rangeGotoNode->SetTagOffset(static_cast(lexer.GetTheIntVal())); + } else { + Error("expect tag offset in rangegoto but get "); + return false; + } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { in switch but get "); + return false; + } + // : goto + // : goto + // ... + // : goto + TokenKind tk = lexer.NextToken(); + std::set casesSet; + int32 minIdx = UINT16_MAX; + int32 maxIdx = 0; + while (tk != TK_rbrace) { + int64 constVal = 0; + LabelIdx lbl = 0; + if (!ParseSwitchCase(constVal, lbl)) { + Error("parse switch case failed "); + return false; + } + if (constVal > UINT16_MAX || constVal < 0) { + Error("rangegoto case tag not within unsigned 16 bits range "); + return false; + } + if (casesSet.find(constVal) != casesSet.end()) { + Error("duplicated switch case "); + return false; + } + if (constVal < minIdx) { + minIdx = static_cast(constVal); + } + if (constVal > maxIdx) { + maxIdx = static_cast(constVal); + } + rangeGotoNode->AddRangeGoto(static_cast(constVal), static_cast(lbl)); + (void)casesSet.insert(constVal); + tk = lexer.GetTokenKind(); + } + ASSERT(rangeGotoNode->GetNumOpnds() == 1, "Rangegoto is a UnaryOpnd; numOpnds must be 1"); + // check there is no gap + if (static_cast(static_cast(maxIdx - minIdx) + 1) != casesSet.size()) { + Error("gap not allowed in rangegoto case tags "); + return false; + } + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtMultiway(StmtNodePtr &stmt) { + auto *multiwayNode = mod.CurFuncCodeMemPool()->New(mod); + stmt = multiwayNode; + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + multiwayNode->SetMultiWayOpnd(expr); + if (lexer.NextToken() == TK_label) { + multiwayNode->SetDefaultlabel(mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName())); + } else { + Error("expect label in multiway but get "); + return false; + } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { in switch but get "); + return false; + } + // (): goto + // (): goto + // ... + // (): goto + TokenKind tk = lexer.NextToken(); + while (tk != TK_rbrace) { + BaseNode *x = nullptr; + if (!ParseExprOneOperand(x)) { + return false; + } + if (lexer.NextToken() != TK_colon) { + Error("expect : parsing multiway case tag specification but get "); + return false; + } + if (lexer.NextToken() != TK_goto) { + Error("expect goto in multiway case expression but get "); + return false; + } + if (lexer.NextToken() != TK_label) { + Error("expect goto label after multiway case expression but get "); + return false; + } + LabelIdx lblIdx = mod.CurFunction()->GetOrCreateLableIdxFromName(lexer.GetName()); + lexer.NextToken(); + multiwayNode->AppendElemToMultiWayTable(MCasePair(static_cast(x), lblIdx)); + tk = lexer.GetTokenKind(); + } + const MapleVector &multiWayTable = multiwayNode->GetMultiWayTable(); + multiwayNode->SetNumOpnds(multiWayTable.size()); + lexer.NextToken(); + return true; +} + +// used only when parsing mmpl +PUIdx MIRParser::EnterUndeclaredFunction(bool isMcount) { + std::string funcName; + if (isMcount) { + funcName = "_mcount"; + } else { + funcName = lexer.GetName(); + } + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + MIRSymbol *funcSt = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + funcSt->SetNameStrIdx(strIdx); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSt); + funcSt->SetStorageClass(kScText); + funcSt->SetSKind(kStFunc); + auto *fn = mod.GetMemPool()->New(&mod, funcSt->GetStIdx()); + fn->SetPuidx(GlobalTables::GetFunctionTable().GetFuncTable().size()); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + auto *funcType = mod.GetMemPool()->New(); + fn->SetMIRFuncType(funcType); + if (isMcount) { + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(TyIdx(PTY_void)); + fn->SetReturnTyIdx(retType->GetTypeIndex()); + } + return fn->GetPuidx(); +} + +bool MIRParser::ParseStmtCallMcount(StmtNodePtr &stmt) { + // syntax: call (, ..., ) + Opcode o = OP_call; + PUIdx pIdx = EnterUndeclaredFunction(true); + auto *callStmt = mod.CurFuncCodeMemPool()->New(mod, o); + callStmt->SetPUIdx(pIdx); + MapleVector opndsvec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + callStmt->SetNOpnd(opndsvec); + callStmt->SetNumOpnds(opndsvec.size()); + stmt = callStmt; + return true; +} + +bool MIRParser::ParseStmtCall(StmtNodePtr &stmt) { + // syntax: call (, ..., ) + TokenKind tk = lexer.GetTokenKind(); + Opcode o = GetOpFromToken(tk); + ASSERT(kOpcodeInfo.IsCall(o), "ParseStmtCall: not a call opcode"); + bool hasAssigned = kOpcodeInfo.IsCallAssigned(o); + bool hasInstant = false; + bool withType = false; + switch (tk) { + case TK_polymorphiccall: + case TK_polymorphiccallassigned: + withType = true; + break; + case TK_callinstant: + case TK_virtualcallinstant: + case TK_superclasscallinstant: + case TK_interfacecallinstant: + case TK_callinstantassigned: + case TK_virtualcallinstantassigned: + case TK_superclasscallinstantassigned: + case TK_interfacecallinstantassigned: + hasInstant = true; + break; + default: + break; + } + TyIdx polymophicTyidx(0); + if (o == OP_polymorphiccallassigned || o == OP_polymorphiccall) { + TokenKind nextTk = lexer.NextToken(); + if (nextTk == TK_langle) { + nextTk = lexer.NextToken(); + if (nextTk == TK_func) { + lexer.NextToken(); + if (!ParseFuncType(polymophicTyidx)) { + Error("error parsing functype in ParseStmtCall for polymorphiccallassigned at "); + return false; + } + } else { + Error("expect func in functype but get "); + return false; + } + } else { + Error("expect < in functype but get "); + return false; + } + } + TokenKind funcTk = lexer.NextToken(); + if (funcTk != TK_fname) { + Error("expect func name in call but get "); + return false; + } + PUIdx pIdx; + if (!ParseDeclaredFunc(pIdx)) { + if (mod.GetFlavor() < kMmpl) { + Error("expect .mmpl"); + return false; + } + pIdx = EnterUndeclaredFunction(); + } + lexer.NextToken(); + CallNode *callStmt = nullptr; + CallinstantNode *callInstantStmt = nullptr; + if (withType) { + callStmt = mod.CurFuncCodeMemPool()->New(mod, o); + callStmt->SetTyIdx(polymophicTyidx); + } else if (hasInstant) { + TokenKind langleTk = lexer.GetTokenKind(); + if (langleTk != TK_langle) { + Error("missing < in generic method instantiation at "); + return false; + } + TokenKind lbraceTk = lexer.NextToken(); + if (lbraceTk != TK_lbrace) { + Error("missing { in generic method instantiation at "); + return false; + } + MIRInstantVectorType instVecTy; + if (!ParseGenericInstantVector(instVecTy)) { + Error("error parsing generic method instantiation at "); + return false; + } + TokenKind rangleTk = lexer.GetTokenKind(); + if (rangleTk != TK_rangle) { + Error("missing > in generic method instantiation at "); + return false; + } + TyIdx tyIdx = GlobalTables::GetTypeTable().GetOrCreateMIRType(&instVecTy); + callInstantStmt = mod.CurFuncCodeMemPool()->New(mod, o, tyIdx); + callStmt = callInstantStmt; + lexer.NextToken(); // skip the > + } else { + callStmt = mod.CurFuncCodeMemPool()->New(mod, o); + } + callStmt->SetPUIdx(pIdx); + + MIRFunction *callee = GlobalTables::GetFunctionTable().GetFuncTable()[pIdx]; + CHECK_NULL_FATAL(callee); + callee->GetFuncSymbol()->SetAppearsInCode(true); + if (callee->GetName() == "setjmp") { + mod.CurFunction()->SetHasSetjmp(); + } + + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + callStmt->SetNOpnd(opndsVec); + callStmt->SetNumOpnds(opndsVec.size()); + if (hasAssigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + if (!hasInstant) { + ASSERT(callStmt != nullptr, "callstmt is null in MIRParser::ParseStmtCall"); + callStmt->SetReturnVec(retsVec); + } else { + ASSERT(callInstantStmt != nullptr, "callinstantstmt is null in MIRParser::ParseStmtCall"); + callInstantStmt->SetReturnVec(retsVec); + } + } + lexer.NextToken(); + stmt = callStmt; + return true; +} + +bool MIRParser::ParseStmtIcall(StmtNodePtr &stmt, Opcode op) { + // syntax: icall (, , ..., ) + // icallassigned (, ..., ) { + // dassign + // dassign + // . . . + // dassign } + // icallproto (, , ..., ) + // icallprotoassigned (, , ..., ) { + // dassign + // dassign + // . . . + // dassign } + IcallNode *iCallStmt = mod.CurFuncCodeMemPool()->New(mod, op); + lexer.NextToken(); + if (op == OP_icallproto || op == OP_icallprotoassigned) { + TyIdx tyIdx(0); + if (!ParseDerivedType(tyIdx)) { + Error("error parsing type in ParseStmtIcall for icallproto at "); + return false; + } + iCallStmt->SetRetTyIdx(tyIdx); + } + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + iCallStmt->SetNOpnd(opndsVec); + iCallStmt->SetNumOpnds(opndsVec.size()); + if (op == OP_icallassigned || op == OP_icallprotoassigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + iCallStmt->SetReturnVec(retsVec); + } + lexer.NextToken(); + stmt = iCallStmt; + return true; +} + +bool MIRParser::ParseStmtIcall(StmtNodePtr &stmt) { + return ParseStmtIcall(stmt, OP_icall); +} + +bool MIRParser::ParseStmtIcallassigned(StmtNodePtr &stmt) { + return ParseStmtIcall(stmt, OP_icallassigned); +} + +bool MIRParser::ParseStmtIcallproto(StmtNodePtr &stmt) { + return ParseStmtIcall(stmt, OP_icallproto); +} + +bool MIRParser::ParseStmtIcallprotoassigned(StmtNodePtr &stmt) { + return ParseStmtIcall(stmt, OP_icallprotoassigned); +} + +bool MIRParser::ParseStmtIntrinsiccall(StmtNodePtr &stmt, bool isAssigned) { + Opcode o = !isAssigned ? (lexer.GetTokenKind() == TK_intrinsiccall ? OP_intrinsiccall : OP_xintrinsiccall) + : (lexer.GetTokenKind() == TK_intrinsiccallassigned ? OP_intrinsiccallassigned + : OP_xintrinsiccallassigned); + auto *intrnCallNode = mod.CurFuncCodeMemPool()->New(mod, o); + lexer.NextToken(); + if (o == (!isAssigned) ? OP_intrinsiccall : OP_intrinsiccallassigned) { + intrnCallNode->SetIntrinsic(GetIntrinsicID(lexer.GetTokenKind())); + } else { + intrnCallNode->SetIntrinsic(static_cast(lexer.GetTheIntVal())); + } + lexer.NextToken(); + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + intrnCallNode->SetNOpnd(opndsVec); + intrnCallNode->SetNumOpnds(opndsVec.size()); + if (isAssigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + // store return type of IntrinsiccallNode + if (retsVec.size() == 1 && retsVec[0].first.Idx() != 0) { + MIRSymbol *retSymbol = curFunc->GetSymTab()->GetSymbolFromStIdx(retsVec[0].first.Idx()); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retSymbol->GetTyIdx()); + CHECK_FATAL(retType != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallAssigned"); + intrnCallNode->SetPrimType(retType->GetPrimType()); + } + intrnCallNode->SetReturnVec(retsVec); + } + stmt = intrnCallNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtIntrinsiccall(StmtNodePtr &stmt) { + return ParseStmtIntrinsiccall(stmt, false); +} + +bool MIRParser::ParseStmtIntrinsiccallassigned(StmtNodePtr &stmt) { + return ParseStmtIntrinsiccall(stmt, true); +} + +bool MIRParser::ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt, bool isAssigned) { + Opcode o = (!isAssigned) ? OP_intrinsiccallwithtype : OP_intrinsiccallwithtypeassigned; + IntrinsiccallNode *intrnCallNode = mod.CurFuncCodeMemPool()->New(mod, o); + TokenKind tk = lexer.NextToken(); + TyIdx tyIdx(0); + if (IsPrimitiveType(tk)) { + if (!ParsePrimType(tyIdx)) { + Error("expect primitive type in ParseStmtIntrinsiccallwithtype but get "); + return false; + } + } else if (!ParseDerivedType(tyIdx)) { + Error("error parsing type in ParseStmtIntrinsiccallwithtype at "); + return false; + } + intrnCallNode->SetTyIdx(tyIdx); + intrnCallNode->SetIntrinsic(GetIntrinsicID(lexer.GetTokenKind())); + lexer.NextToken(); + MapleVector opndsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseExprNaryOperand(opndsVec)) { + return false; + } + intrnCallNode->SetNOpnd(opndsVec); + intrnCallNode->SetNumOpnds(opndsVec.size()); + if (isAssigned) { + CallReturnVector retsVec(mod.CurFuncCodeMemPoolAllocator()->Adapter()); + if (!ParseCallReturns(retsVec)) { + return false; + } + // store return type of IntrinsiccallNode + if (retsVec.size() == 1 && retsVec[0].first.Idx() != 0) { + MIRSymbol *retSymbol = curFunc->GetSymTab()->GetSymbolFromStIdx(retsVec[0].first.Idx()); + MIRType *retType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(retSymbol->GetTyIdx()); + CHECK_FATAL(retType != nullptr, "rettype is null in MIRParser::ParseStmtIntrinsiccallwithtypeAssigned"); + intrnCallNode->SetPrimType(retType->GetPrimType()); + } + intrnCallNode->SetReturnVec(retsVec); + } + stmt = intrnCallNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtIntrinsiccallwithtype(StmtNodePtr &stmt) { + return ParseStmtIntrinsiccallwithtype(stmt, false); +} + +bool MIRParser::ParseStmtIntrinsiccallwithtypeassigned(StmtNodePtr &stmt) { + return ParseStmtIntrinsiccallwithtype(stmt, true); +} + +bool MIRParser::ParseCallReturnPair(CallReturnPair &retpair) { + bool isst = (lexer.GetTokenKind() == TK_dassign); + if (isst) { + // parse %i + lexer.NextToken(); + StIdx stidx; + // How to use islocal?? + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (lexer.GetTokenKind() == TK_lname) { + MIRSymbolTable *lSymTab = mod.CurFunction()->GetSymTab(); + MIRSymbol *lSym = lSymTab->GetSymbolFromStIdx(stidx.Idx(), 0); + ASSERT(lSym != nullptr, "lsym MIRSymbol is null"); + if (lSym->GetName().find("L_STR") == 0) { + MIRType *ty = GlobalTables::GetTypeTable().GetTypeFromTyIdx(lSym->GetTyIdx()); + auto *ptrTy = static_cast(ty->CopyMIRTypeNode()); + ASSERT(ptrTy != nullptr, "null ptr check"); + ptrTy->SetPrimType(GetExactPtrPrimType()); + TyIdx newTyidx = GlobalTables::GetTypeTable().GetOrCreateMIRType(ptrTy); + delete ptrTy; + lSym->SetTyIdx(newTyidx); + } + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol parsing call return assignment but get"); + return false; + } + uint16 fieldId = 0; + TokenKind nextToken = lexer.NextToken(); + // parse field id + if (nextToken == TK_intconst) { + fieldId = lexer.GetTheIntVal(); + lexer.NextToken(); + } + RegFieldPair regFieldPair; + regFieldPair.SetFieldID(fieldId); + retpair = CallReturnPair(stidx, regFieldPair); + } else { + // parse type + lexer.NextToken(); + TyIdx tyidx(0); + // RegreadNode regreadexpr; + bool ret = ParsePrimType(tyidx); + if (ret != true) { + Error("call ParsePrimType failed in ParseCallReturns"); + return false; + } + if (tyidx == 0u) { + Error("expect primitive type but get "); + return false; + } + PrimType ptype = GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx); + PregIdx pregIdx; + if (lexer.GetTokenKind() == TK_specialreg) { + if (!ParseSpecialReg(pregIdx)) { + Error("expect specialreg parsing callassign CallReturnVector"); + return false; + } + } else if (lexer.GetTokenKind() == TK_preg) { + if (!ParsePseudoReg(ptype, pregIdx)) { + Error("expect pseudoreg parsing callassign CallReturnVector"); + return false; + } + } else { + Error("expect special or pseudo register but get "); + return false; + } + ASSERT(pregIdx > 0, "register number is zero"); + ASSERT(pregIdx <= 0xffff, "register number is over 16 bits"); + RegFieldPair regFieldPair; + regFieldPair.SetPregIdx(pregIdx); + retpair = CallReturnPair(StIdx(), regFieldPair); + } + return true; +} + +bool MIRParser::ParseCallReturns(CallReturnVector &retsvec) { + // { + // dassign + // dassign + // . . . + // dassign } + // OR + // { + // regassign + // regassign + // regassign + // } + if (lexer.NextToken() != TK_lbrace) { + Error("expect { parsing call return values. "); + return false; + } + TokenKind tk = lexer.NextToken(); + CallReturnPair retpair; + while (tk != TK_rbrace) { + if (lexer.GetTokenKind() != TK_dassign && lexer.GetTokenKind() != TK_regassign) { + Error("expect dassign/regassign but get "); + return false; + } + if (!ParseCallReturnPair(retpair)) { + Error("error parsing call returns. "); + return false; + } + retsvec.push_back(retpair); + tk = lexer.GetTokenKind(); + } + return true; +} + +bool MIRParser::ParseStmtAsm(StmtNodePtr &stmt) { + AsmNode *asmNode = mod.CurFuncCodeMemPool()->New(&mod.GetCurFuncCodeMPAllocator()); + mod.CurFunction()->SetHasAsm(); + lexer.NextToken(); + // parse qualifiers + while (lexer.GetTokenKind() == TK_volatile || + lexer.GetTokenKind() == TK_inline || + lexer.GetTokenKind() == TK_goto) { + AsmQualifierKind qual; + switch (lexer.GetTokenKind()) { + case TK_volatile: { + qual = kASMvolatile; + break; + } + case TK_inline: { + qual = kASMinline; + break; + } + case TK_goto: + default: { + qual = kASMgoto; + break; + } + } + asmNode->SetQualifier(qual); + lexer.NextToken(); + } + // parse open brace + if (lexer.GetTokenKind() != TK_lbrace) { + Error("Open brace not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse asm string + if (lexer.GetTokenKind() != TK_string) { + Error("asm string not found parsing asm statement."); + return false; + } + asmNode->asmString = lexer.GetName(); + lexer.NextToken(); + // parse first colon + if (lexer.GetTokenKind() != TK_colon) { + Error("first colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse outputs + UStrIdx uStrIdx; + CallReturnPair retpair; + while (lexer.GetTokenKind() == TK_string) { + // parse an output constraint string + uStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + lexer.NextToken(); + if (!ParseCallReturnPair(retpair)) { + Error("error parsing call returns. "); + return false; + } + asmNode->outputConstraints.push_back(uStrIdx); + asmNode->asmOutputs.push_back(retpair); + if (lexer.GetTokenKind() == TK_coma) { + lexer.NextToken(); + } + } + // parse second colon + if (lexer.GetTokenKind() != TK_colon) { + Error("second colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse inputs + while (lexer.GetTokenKind() == TK_string) { + // parse an input constraint string + uStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + if (lexer.GetName()[0] == '+') { + asmNode->SetHasWriteInputs(); + } + if (lexer.NextToken() != TK_lparen) { + Error("expect ( but get "); + return false; + } + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExpression(expr)) { + Error("ParseExpression failed"); + return false; + } + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) but get "); + return false; + } + asmNode->inputConstraints.push_back(uStrIdx); + asmNode->GetNopnd().push_back(expr); + if (lexer.NextToken() == TK_coma) { + lexer.NextToken(); + } + } + asmNode->SetNumOpnds(static_cast(asmNode->GetNopndSize())); + // parse third colon + if (lexer.GetTokenKind() != TK_colon) { + Error("third colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse clobber list + while (lexer.GetTokenKind() == TK_string) { + // parse an input constraint string + uStrIdx = GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + asmNode->clobberList.push_back(uStrIdx); + if (lexer.NextToken() == TK_coma) { + lexer.NextToken(); + } + } + // parse fourth colon + if (lexer.GetTokenKind() != TK_colon) { + Error("fourth colon not found parsing asm statement."); + return false; + } + lexer.NextToken(); + // parse labels + while (lexer.GetTokenKind() == TK_label) { + GStrIdx strIdx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labIdx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(strIdx); + if (labIdx == 0) { + labIdx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labIdx, strIdx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labIdx); + } + asmNode->gotoLabels.push_back(labIdx); + if (lexer.NextToken() == TK_coma) { + lexer.NextToken(); + } + } + // parse closing brace + if (lexer.GetTokenKind() != TK_rbrace) { + Error("Closing brace not found parsing asm statement."); + return false; + } + stmt = asmNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtSafeRegion(StmtNodePtr &stmt) { + switch (lexer.GetTokenKind()) { + case TK_safe: + safeRegionFlag.push(true); + break; + case TK_unsafe: + safeRegionFlag.push(false); + break; + case TK_endsafe: + case TK_endunsafe: + safeRegionFlag.pop(); + break; + default: + Error("Only support safe/unsafe/endsafe/endunsafe."); + return false; + } + (void)stmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtJsTry(StmtNodePtr &stmt) { + auto *tryNode = mod.CurFuncCodeMemPool()->New(); + lexer.NextToken(); + // parse handler label + if (lexer.GetTokenKind() == TK_intconst && lexer.GetTheIntVal() == 0) { + tryNode->SetCatchOffset(0); + } else { + if (lexer.GetTokenKind() != TK_label) { + Error("expect handler label in try but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + tryNode->SetCatchOffset(labidx); + } + lexer.NextToken(); + // parse finally label + if (lexer.GetTokenKind() == TK_intconst && lexer.GetTheIntVal() == 0) { + tryNode->SetFinallyOffset(0); + } else { + if (lexer.GetTokenKind() != TK_label) { + Error("expect finally label in try but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + tryNode->SetFinallyOffset(labidx); + } + stmt = tryNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtTry(StmtNodePtr &stmt) { + auto *tryNode = mod.CurFuncCodeMemPool()->New(mod); + lexer.NextToken(); + ASSERT(lexer.GetTokenKind() == TK_lbrace, "expect left brace in try but get "); + lexer.NextToken(); + // parse handler label + while (lexer.GetTokenKind() != TK_rbrace) { + if (lexer.GetTokenKind() != TK_label) { + Error("expect handler label in try but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + tryNode->AddOffset(labidx); + lexer.NextToken(); + } + stmt = tryNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtCatch(StmtNodePtr &stmt) { + auto *catchNode = mod.CurFuncCodeMemPool()->New(mod); + lexer.NextToken(); + ASSERT(lexer.GetTokenKind() == TK_lbrace, "expect left brace in catch but get "); + lexer.NextToken(); + while (lexer.GetTokenKind() != TK_rbrace) { + TyIdx tyidx(0); + if (!ParseType(tyidx)) { + Error("expect type parsing java catch statement"); + return false; + } + catchNode->PushBack(tyidx); + } + catchNode->SetNumOpnds(0); + stmt = catchNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseUnaryStmt(Opcode op, StmtNodePtr &stmt) { + lexer.NextToken(); + auto *throwStmt = mod.CurFuncCodeMemPool()->New(op); + stmt = throwStmt; + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + throwStmt->SetOpnd(expr, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseUnaryStmtThrow(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_throw, stmt); +} + +bool MIRParser::ParseUnaryStmtDecRef(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_decref, stmt); +} + +bool MIRParser::ParseUnaryStmtIncRef(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_incref, stmt); +} + +bool MIRParser::ParseUnaryStmtDecRefReset(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_decrefreset, stmt); +} + +bool MIRParser::ParseUnaryStmtIGoto(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_igoto, stmt); +} + +bool MIRParser::ParseUnaryStmtEval(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_eval, stmt); +} + +bool MIRParser::ParseUnaryStmtFree(StmtNodePtr &stmt) { + return ParseUnaryStmt(OP_free, stmt); +} + +bool MIRParser::ParseUnaryStmtCallAssertNonNull(StmtNodePtr &stmt) { + std::string funcName; + std::string stmtFuncName; + int index = 0; + if (!ParseCallAssertInfo(funcName, &index, stmtFuncName)) { + Error("ParseCallAssertInfo failed"); + return false; + } + lexer.NextToken(); + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + GStrIdx stmtstridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(stmtFuncName); + stmt = mod.CurFuncCodeMemPool()->New(OP_callassertnonnull, stridx, index, stmtstridx); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + stmt->SetOpnd(expr, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseAssertInfo(std::string &funcName) { + if (lexer.NextToken() != TK_langle) { + Error("expect < parsing safey assert check "); + return false; + } + if (lexer.NextToken() != TK_fname) { + Error("expect &funcname parsing parsing safey assert check "); + return false; + } + funcName = lexer.GetName(); + if (lexer.NextToken() != TK_rangle) { + Error("expect > parsing safey assert check "); + return false; + } + return true; +} + +bool MIRParser::ParseUnaryStmtAssertNonNullCheck(Opcode op, StmtNodePtr &stmt) { + std::string funcName; + if (!ParseAssertInfo(funcName)) { + Error("ParseAssertInfo failed"); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + lexer.NextToken(); + stmt = mod.CurFuncCodeMemPool()->New(op, stridx); + BaseNode *expr = nullptr; + if (!ParseExprOneOperand(expr)) { + return false; + } + stmt->SetOpnd(expr, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseUnaryStmtAssertNonNull(StmtNodePtr &stmt) { + if (mod.IsCModule()) { + return ParseUnaryStmtAssertNonNullCheck(OP_assertnonnull, stmt); + } else { + return ParseUnaryStmt(OP_assertnonnull, stmt); + } +} + +bool MIRParser::ParseUnaryStmtAssignAssertNonNull(StmtNodePtr &stmt) { + return ParseUnaryStmtAssertNonNullCheck(OP_assignassertnonnull, stmt); +} + +bool MIRParser::ParseUnaryStmtReturnAssertNonNull(StmtNodePtr &stmt) { + return ParseUnaryStmtAssertNonNullCheck(OP_returnassertnonnull, stmt); +} + +bool MIRParser::ParseStmtMarker(StmtNodePtr &stmt) { + Opcode op; + switch (paramTokenKindForStmt) { + case TK_jscatch: + op = OP_jscatch; + break; + case TK_finally: + op = OP_finally; + break; + case TK_cleanuptry: + op = OP_cleanuptry; + break; + case TK_endtry: + op = OP_endtry; + break; + case TK_retsub: + op = OP_retsub; + break; + case TK_membaracquire: + op = OP_membaracquire; + break; + case TK_membarrelease: + op = OP_membarrelease; + break; + case TK_membarstoreload: + op = OP_membarstoreload; + break; + case TK_membarstorestore: + op = OP_membarstorestore; + break; + default: + return false; + } + auto *stmtNode = mod.CurFuncCodeMemPool()->New(op); + stmt = stmtNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtGosub(StmtNodePtr &stmt) { + if (lexer.NextToken() != TK_label) { + Error("expect finally label in gosub but get "); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + LabelIdx labidx = mod.CurFunction()->GetLabelTab()->GetLabelIdxFromStrIdx(stridx); + if (labidx == 0) { + labidx = mod.CurFunction()->GetLabelTab()->CreateLabel(); + mod.CurFunction()->GetLabelTab()->SetSymbolFromStIdx(labidx, stridx); + mod.CurFunction()->GetLabelTab()->AddToStringLabelMap(labidx); + } + auto *goSubNode = mod.CurFuncCodeMemPool()->New(OP_gosub); + goSubNode->SetOffset(labidx); + stmt = goSubNode; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseBinaryStmt(StmtNodePtr &stmt, Opcode op) { + auto *assStmt = mod.CurFuncCodeMemPool()->New(op); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + return false; + } + assStmt->SetBOpnd(opnd0, 0); + assStmt->SetBOpnd(opnd1, 1); + stmt = assStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryStmtAssert(StmtNodePtr &stmt, Opcode op) { + std::string funcName; + if (!ParseAssertInfo(funcName)) { + Error("ParseAssertInfo failed"); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + auto *assStmt = mod.CurFuncCodeMemPool()->New(mod, op, stridx); + if (!ParseNaryExpr(*assStmt)) { + Error("ParseNaryStmtAssert failed"); + return false; + } + assStmt->SetNumOpnds(static_cast(assStmt->GetNopndSize())); + stmt = assStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryStmtAssertGE(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_assertge); +} + +bool MIRParser::ParseNaryStmtAssertLT(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_assertlt); +} + +bool MIRParser::ParseNaryStmtReturnAssertLE(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_returnassertle); +} + +bool MIRParser::ParseNaryStmtAssignAssertLE(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_assignassertle); +} + +bool MIRParser::ParseNaryStmtCalcassertGE(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_calcassertge); +} + +bool MIRParser::ParseNaryStmtCalcassertLT(StmtNodePtr &stmt) { + return ParseNaryStmtAssert(stmt, OP_calcassertlt); +} + +bool MIRParser::ParseCallAssertInfo(std::string &funcName, int *paramIndex, std::string &stmtFuncName) { + if (lexer.NextToken() != TK_langle) { + Error("expect < parsing safey call check "); + return false; + } + if (lexer.NextToken() != TK_fname) { + Error("expect &funcname parsing parsing safey call check "); + return false; + } + funcName = lexer.GetName(); + if (lexer.NextToken() != TK_coma) { + Error("expect , parsing parsing safey call check "); + return false; + } + if (lexer.NextToken() != TK_intconst) { + Error("expect intconst parsing parsing safey call check "); + return false; + } + *paramIndex = static_cast(lexer.GetTheIntVal()); + if (lexer.NextToken() != TK_coma) { + Error("expect , parsing parsing safey call check "); + return false; + } + if (lexer.NextToken() != TK_fname) { + Error("expect &stmtfuncname parsing parsing safey call check "); + return false; + } + stmtFuncName = lexer.GetName(); + if (lexer.NextToken() != TK_rangle) { + Error("expect > parsing parsing safey call check "); + return false; + } + return true; +} + +bool MIRParser::ParseNaryStmtCallAssertLE(StmtNodePtr &stmt) { + std::string funcName; + std::string stmtFuncName; + int index = 0; + if (!ParseCallAssertInfo(funcName, &index, stmtFuncName)) { + Error("ParseCallAssertInfo failed"); + return false; + } + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(funcName); + GStrIdx stmtstridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(stmtFuncName); + auto *assStmt = mod.CurFuncCodeMemPool()->New(mod, OP_callassertle, stridx, index, + stmtstridx); + if (!ParseNaryExpr(*assStmt)) { + Error("ParseNaryExpr failed"); + return false; + } + assStmt->SetNumOpnds(static_cast(assStmt->GetNopndSize())); + stmt = assStmt; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryExpr(NaryStmtNode &stmtNode) { + if (lexer.NextToken() != TK_lparen) { + Error("expect ( parsing NaryExpr "); + return false; + } + (void)lexer.NextToken(); // skip TK_lparen + while (lexer.GetTokenKind() != TK_rparen) { + BaseNode *expr = nullptr; + if (!ParseExpression(expr)) { + Error("ParseStmtReturn failed"); + return false; + } + stmtNode.GetNopnd().push_back(expr); + if (lexer.GetTokenKind() != TK_coma && lexer.GetTokenKind() != TK_rparen) { + Error("expect , or ) parsing NaryStmt"); + return false; + } + if (lexer.GetTokenKind() == TK_coma) { + lexer.NextToken(); + } + } + return true; +} + +bool MIRParser::ParseNaryStmt(StmtNodePtr &stmt, Opcode op) { + auto *stmtReturn = mod.CurFuncCodeMemPool()->New(mod, op); + if (op == OP_syncenter) { // old code reconstruct later + if (lexer.NextToken() != TK_lparen) { + Error("expect return with ( but get "); + return false; + } + lexer.NextToken(); + BaseNode *expr = nullptr; + if (!ParseExpression(expr)) { + Error("ParseStmtReturn failed"); + return false; + } + stmtReturn->GetNopnd().push_back(expr); + if (lexer.GetTokenKind() == TK_coma) { + lexer.NextToken(); + BaseNode *exprSync = nullptr; + if (!ParseExpression(exprSync)) { + Error("ParseStmtReturn failed"); + return false; + } + stmtReturn->GetNopnd().push_back(exprSync); + } else { + MIRType *intType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(static_cast(PTY_i32)); + // default 2 for __sync_enter_fast() + MIRIntConst *intConst = GlobalTables::GetIntConstTable().GetOrCreateIntConst(2, *intType); + ConstvalNode *exprConst = mod.GetMemPool()->New(); + exprConst->SetPrimType(PTY_i32); + exprConst->SetConstVal(intConst); + stmtReturn->GetNopnd().push_back(exprConst); + stmtReturn->SetNumOpnds(stmtReturn->GetNopndSize()); + } + + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing NaryStmt"); + return false; + } + } else if (!ParseNaryExpr(*stmtReturn)) { + Error("ParseNaryExpr failed"); + return false; + } + stmtReturn->SetNumOpnds(stmtReturn->GetNopndSize()); + stmt = stmtReturn; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseNaryStmtReturn(StmtNodePtr &stmt) { + return ParseNaryStmt(stmt, OP_return); +} + +bool MIRParser::ParseNaryStmtSyncEnter(StmtNodePtr &stmt) { + return ParseNaryStmt(stmt, OP_syncenter); +} + +bool MIRParser::ParseNaryStmtSyncExit(StmtNodePtr &stmt) { + return ParseNaryStmt(stmt, OP_syncexit); +} + +bool MIRParser::ParseLoc() { + if (lexer.NextToken() != TK_intconst) { + Error("expect intconst in LOC but get "); + return false; + } + lastFileNum = lexer.GetTheIntVal(); + if (lexer.NextToken() != TK_intconst) { + Error("expect intconst in LOC but get "); + return false; + } + lastLineNum = lexer.GetTheIntVal(); + if (firstLineNum == 0) { + firstLineNum = lastLineNum; + } + if (lexer.NextToken() == TK_intconst) { // optional column number + lastColumnNum = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + } + return true; +} + +bool MIRParser::ParseLocStmt(StmtNodePtr&) { + return ParseLoc(); +} + +bool MIRParser::ParseStatement(StmtNodePtr &stmt) { + paramTokenKindForStmt = lexer.GetTokenKind(); + uint32 mplNum = lexer.GetLineNum(); + uint32 lnum = lastLineNum; + uint32 fnum = lastFileNum; + uint16 cnum = lastColumnNum; + std::map::iterator itFuncPtr = funcPtrMapForParseStmt.find(paramTokenKindForStmt); + if (itFuncPtr != funcPtrMapForParseStmt.end()) { + if (!(this->*(itFuncPtr->second))(stmt)) { + return false; + } + } else { + return false; + } + if (stmt && stmt->GetSrcPos().MplLineNum() == 0) { + stmt->GetSrcPos().SetFileNum(fnum); + stmt->GetSrcPos().SetLineNum(lnum); + stmt->GetSrcPos().SetColumn(cnum); + stmt->GetSrcPos().SetMplLineNum(mplNum); + if (safeRegionFlag.top()) { + stmt->SetInSafeRegion(); + } + } + return true; +} + +/* parse the statements enclosed by { and } + */ +bool MIRParser::ParseStmtBlock(BlockNodePtr &blk) { + bool retval = false; + if (lexer.GetTokenKind() != TK_lbrace) { + Error("expect { for func body but get "); + return false; + } + SrcPosition posB(lastFileNum, lastLineNum, lastColumnNum, lexer.GetLineNum()); + blk = mod.CurFuncCodeMemPool()->New(); + MIRFunction *fn = mod.CurFunction(); + paramCurrFuncForParseStmtBlock = fn; + lexer.NextToken(); + bool first = true; + // Insert _mcount for PI. + if (mod.GetWithProfileInfo()) { + StmtNode *stmtt = nullptr; + if (!ParseStmtCallMcount(stmtt)) { + SrcPosition posE(lastFileNum, lastLineNum, lastColumnNum, lexer.GetLineNum()); + mod.CurFunction()->GetScope()->AddTuple(blk->GetSrcPos(), posB, posE); + return retval; + } + blk->AddStatement(stmtt); + } + while (true) { + TokenKind stmtTk = lexer.GetTokenKind(); + // calculate the mpl file line number mplNum here to get accurate result + uint32 mplNum = lexer.GetLineNum(); + if (IsStatement(stmtTk)) { + ParseStmtBlockForSeenComment(blk, mplNum); + StmtNode *stmt = nullptr; + if (!ParseStatement(stmt)) { + Error("ParseStmtBlock failed when parsing a statement"); + break; + } + if (stmt == nullptr) { // stmt is nullptr if it is a LOC + continue; + } + + blk->AddStatement(stmt); + // set blk src position + if (first) { + blk->SetSrcPos(stmt->GetSrcPos()); + first = false; + } + } else { + std::map::iterator itFuncPtr = funcPtrMapForParseStmtBlock.find(stmtTk); + if (itFuncPtr == funcPtrMapForParseStmtBlock.end()) { + if (stmtTk == TK_rbrace) { + ParseStmtBlockForSeenComment(blk, mplNum); + lexer.NextToken(); + retval = true; + } else { + Error("expect } or var or statement for func body but get "); + } + break; + } else { + if (!(this->*(itFuncPtr->second))()) { + break; + } + } + } + } + + SrcPosition posE(lastFileNum, lastLineNum, lastColumnNum, lexer.GetLineNum()); + mod.CurFunction()->GetScope()->AddTuple(blk->GetSrcPos(), posB, posE); + return retval; +} + +void MIRParser::ParseStmtBlockForSeenComment(BlockNodePtr blk, uint32 mplNum) { + if (Options::noComment) { + lexer.seenComments.clear(); + return; + } + // collect accumulated comments into comment statement nodes + if (!lexer.seenComments.empty()) { + for (size_t i = 0; i < lexer.seenComments.size(); ++i) { + auto *cmnt = mod.CurFuncCodeMemPool()->New(mod); + cmnt->SetComment(lexer.seenComments[i]); + SetSrcPos(cmnt->GetSrcPos(), mplNum); + blk->AddStatement(cmnt); + } + lexer.seenComments.clear(); + } +} + +bool MIRParser::ParseStmtBlockForVar(TokenKind stmtTK) { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + MIRSymbol *st = fn->GetSymTab()->CreateSymbol(kScopeLocal); + st->SetStorageClass(kScAuto); + st->SetSKind(kStVar); + SetSrcPos(st->GetSrcPosition(), lexer.GetLineNum()); + if (stmtTK == TK_tempvar) { + st->SetIsTmp(true); + } + if (!ParseDeclareVar(*st)) { + return false; + } + if (!fn->GetSymTab()->AddToStringSymbolMap(*st)) { + Error("duplicate declare symbol parse function "); + return false; + } + if (!ParseDeclareVarInitValue(*st)) { + return false; + } + return true; +} + +bool MIRParser::ParseStmtBlockForVar() { + return ParseStmtBlockForVar(TK_var); +} + +bool MIRParser::ParseStmtBlockForTempVar() { + return ParseStmtBlockForVar(TK_tempvar); +} + +bool MIRParser::ParseStmtBlockForReg() { + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_preg) { + Error("expect %%preg after reg"); + return false; + } + PregIdx pregIdx; + if (!ParsePseudoReg(PTY_ref, pregIdx)) { + return false; + } + MIRPreg *preg = mod.CurFunction()->GetPregTab()->PregFromPregIdx(pregIdx); + TyIdx tyidx(0); + if (!ParseType(tyidx)) { + Error("ParseDeclareVar failed when parsing the type"); + return false; + } + ASSERT(tyidx > 0, "parse declare var failed "); + MIRType *mirType = GlobalTables::GetTypeTable().GetTypeFromTyIdx(tyidx); + preg->SetMIRType(mirType); + if (lexer.GetTokenKind() == TK_intconst) { + int64 theIntVal = static_cast(lexer.GetTheIntVal()); + if (theIntVal != 0 && theIntVal != 1) { + Error("parseDeclareReg failed"); + return false; + } + preg->SetNeedRC(theIntVal == 0 ? false : true); + } else { + Error("parseDeclareReg failed"); + return false; + } + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForType() { + paramParseLocalType = true; + if (!ParseTypeDefine()) { + return false; + } + return true; +} + +bool MIRParser::ParseStmtBlockForFrameSize() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after frameSize but get "); + return false; + } + fn->SetFrameSize(static_cast(lexer.GetTheIntVal())); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForUpformalSize() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after upFormalSize but get "); + return false; + } + fn->SetUpFormalSize(static_cast(lexer.GetTheIntVal())); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForModuleID() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after moduleid but get "); + return false; + } + fn->SetModuleID(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForFuncSize() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after funcSize but get "); + return false; + } + fn->SetFuncSize(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForFuncID() { + // funcid is for debugging purpose + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + lexer.NextToken(); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect integer after funcid but get "); + return false; + } + fn->SetPuidxOrigin(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseStmtBlockForFormalWordsTypeTagged() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetUpFormalSize()); + if (addr == nullptr) { + Error("parser error for formalwordstypetagged"); + return false; + } + fn->SetFormalWordsTypeTagged(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForLocalWordsTypeTagged() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetFrameSize()); + if (addr == nullptr) { + Error("parser error for localWordsTypeTagged"); + return false; + } + fn->SetLocalWordsTypeTagged(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForFormalWordsRefCounted() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetUpFormalSize()); + if (addr == nullptr) { + Error("parser error for formalwordsrefcounted"); + return false; + } + fn->SetFormalWordsRefCounted(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForLocalWordsRefCounted() { + MIRFunction *fn = paramCurrFuncForParseStmtBlock; + uint8 *addr = ParseWordsInfo(fn->GetFrameSize()); + if (addr == nullptr) { + Error("parser error for localwordsrefcounted"); + return false; + } + fn->SetLocalWordsRefCounted(addr); + return true; +} + +bool MIRParser::ParseStmtBlockForFuncInfo() { + lexer.NextToken(); + if (!ParseFuncInfo()) { + return false; + } + return true; +} + +/* exprparser */ +static Opcode GetUnaryOp(TokenKind tk) { + switch (tk) { +#define UNARYOP(P) \ + case TK_##P: \ + return OP_##P; +#include "unary_op.def" +#undef UNARYOP + default: + return OP_undef; + } +} + +static Opcode GetBinaryOp(TokenKind tk) { + switch (tk) { +#define BINARYOP(P) \ + case TK_##P: \ + return OP_##P; +#include "binary_op.def" +#undef BINARYOP + default: + return OP_undef; + } +} + +static Opcode GetConvertOp(TokenKind tk) { + switch (tk) { + case TK_ceil: + return OP_ceil; + case TK_cvt: + return OP_cvt; + case TK_floor: + return OP_floor; + case TK_round: + return OP_round; + case TK_trunc: + return OP_trunc; + default: + return OP_undef; + } +} + +bool MIRParser::ParseExprOneOperand(BaseNodePtr &expr) { + if (lexer.GetTokenKind() != TK_lparen) { + Error("expect ( parsing operand parsing unary "); + return false; + } + lexer.NextToken(); + if (!ParseExpression(expr)) { + Error("expect expression as openrand of unary expression "); + return false; + } + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing operand parsing unary "); + return false; + } + return true; +} + +bool MIRParser::ParseExprTwoOperand(BaseNodePtr &opnd0, BaseNodePtr &opnd1) { + if (lexer.GetTokenKind() != TK_lparen) { + Error("expect ( parsing operand parsing unary "); + return false; + } + lexer.NextToken(); + if (!ParseExpression(opnd0)) { + return false; + } + if (lexer.GetTokenKind() != TK_coma) { + Error("expect , between two operands but get "); + return false; + } + lexer.NextToken(); + if (!ParseExpression(opnd1)) { + return false; + } + if (lexer.GetTokenKind() != TK_rparen) { + Error("expect ) parsing operand parsing unary "); + return false; + } + return true; +} + +bool MIRParser::ParseExprNaryOperand(MapleVector &opndVec) { + if (lexer.GetTokenKind() != TK_lparen) { + Error("expect ( parsing operand parsing nary operands "); + return false; + } + TokenKind tk = lexer.NextToken(); + while (tk != TK_rparen) { + BaseNode *opnd = nullptr; + if (!ParseExpression(opnd)) { + Error("expect expression parsing nary operands "); + return false; + } + opndVec.push_back(opnd); + tk = lexer.GetTokenKind(); + if (tk == TK_coma) { + tk = lexer.NextToken(); + } + } + return true; +} + +bool MIRParser::ParseDeclaredSt(StIdx &stidx) { + TokenKind varTk = lexer.GetTokenKind(); + stidx.SetFullIdx(0); + GStrIdx stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + if (varTk == TK_gname) { + stidx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(stridx); + if (stidx.FullIdx() == 0) { + MIRSymbol *st = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + st->SetNameStrIdx(stridx); + st->SetSKind(kStVar); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*st); + stidx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(stridx); + return true; + } + } else if (varTk == TK_lname) { + stidx = mod.CurFunction()->GetSymTab()->GetStIdxFromStrIdx(stridx); + if (stidx.FullIdx() == 0) { + Error("local symbol not declared "); + return false; + } + } else { + Error("expect global/local name but get "); + return false; + } + return true; +} + +void MIRParser::CreateFuncMIRSymbol(PUIdx &puidx, GStrIdx strIdx) { + MIRSymbol *funcSt = GlobalTables::GetGsymTable().CreateSymbol(kScopeGlobal); + funcSt->SetNameStrIdx(strIdx); + (void)GlobalTables::GetGsymTable().AddToStringSymbolMap(*funcSt); + funcSt->SetStorageClass(kScText); + funcSt->SetSKind(kStFunc); + funcSt->SetNeedForwDecl(); + auto *fn = mod.GetMemPool()->New(&mod, funcSt->GetStIdx()); + puidx = static_cast(GlobalTables::GetFunctionTable().GetFuncTable().size()); + fn->SetPuidx(puidx); + GlobalTables::GetFunctionTable().GetFuncTable().push_back(fn); + funcSt->SetFunction(fn); + if ((options & kParseInlineFuncBody) != 0) { + funcSt->SetIsTmpUnused(true); + } +} + +bool MIRParser::ParseDeclaredFunc(PUIdx &puidx) { + GStrIdx stridx = GlobalTables::GetStrTable().GetStrIdxFromName(lexer.GetName()); + if (stridx == 0u) { + stridx = GlobalTables::GetStrTable().GetOrCreateStrIdxFromName(lexer.GetName()); + } + StIdx stidx = GlobalTables::GetGsymTable().GetStIdxFromStrIdx(stridx); + if (stidx.FullIdx() == 0) { + CreateFuncMIRSymbol(puidx, stridx); + return true; + } + MIRSymbol *st = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + ASSERT(st != nullptr, "null ptr check"); + if (st->GetSKind() != kStFunc) { + Error("function name not declared as function"); + return false; + } + MIRFunction *func = st->GetFunction(); + CHECK_NULL_FATAL(func); + puidx = func->GetPuidx(); + st->SetAppearsInCode(true); + return true; +} + +bool MIRParser::ParseExprDread(BaseNodePtr &expr) { + if (lexer.GetTokenKind() != TK_dread) { + Error("expect dread but get "); + return false; + } + AddrofNode *dexpr = mod.CurFuncCodeMemPool()->New(OP_dread); + expr = dexpr; + lexer.NextToken(); + TyIdx tyidx(0); + bool parseRet = ParsePrimType(tyidx); + if (tyidx == 0u || !parseRet) { + Error("expect primitive type but get "); + return false; + } + expr->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol ParseExprDread failed"); + return false; + } + dexpr->SetStIdx(stidx); + TokenKind endtk = lexer.NextToken(); + if (endtk == TK_intconst) { + dexpr->SetFieldID(lexer.GetTheIntVal()); + lexer.NextToken(); + } else if (!IsDelimitationTK(endtk)) { + Error("expect , or ) delimitation token but get "); + return false; + } else { + dexpr->SetFieldID(0); + } + if (!dexpr->CheckNode(mod)) { + Error("dread is not legal"); + return false; + } + return true; +} + +bool MIRParser::ParseExprDreadoff(BaseNodePtr &expr) { + if (lexer.GetTokenKind() != TK_dreadoff) { + Error("expect dreadoff but get "); + return false; + } + DreadoffNode *dexpr = mod.CurFuncCodeMemPool()->New(OP_dreadoff); + expr = dexpr; + lexer.NextToken(); + TyIdx tyidx(0); + bool parseRet = ParsePrimType(tyidx); + if (tyidx == 0u || !parseRet) { + Error("expect primitive type but get "); + return false; + } + expr->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect a symbol ParseExprDread failed"); + return false; + } + dexpr->stIdx = stidx; + TokenKind endtk = lexer.NextToken(); + if (endtk == TK_intconst) { + dexpr->offset = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + } else { + Error("expect integer offset but get "); + return false; + } + return true; +} + +bool MIRParser::ParseExprRegread(BaseNodePtr &expr) { + auto *regRead = mod.CurFuncCodeMemPool()->New(); + expr = regRead; + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + if (tyidx == 0u) { + Error("expect primitive type but get "); + return false; + } + expr->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (lexer.GetTokenKind() == TK_specialreg) { + PregIdx tempPregIdx = regRead->GetRegIdx(); + bool isSuccess = ParseSpecialReg(tempPregIdx); + regRead->SetRegIdx(tempPregIdx); + return isSuccess; + } + if (lexer.GetTokenKind() == TK_preg) { + PregIdx tempPregIdx = regRead->GetRegIdx(); + bool isSuccess = ParsePseudoReg(regRead->GetPrimType(), tempPregIdx); + regRead->SetRegIdx(tempPregIdx); + return isSuccess; + } + Error("expect special or pseudo register but get "); + return false; +} + +bool MIRParser::ParseExprConstval(BaseNodePtr &expr) { + auto *exprConst = mod.CurFuncCodeMemPool()->New(); + TokenKind typeTk = lexer.NextToken(); + if (!IsPrimitiveType(typeTk)) { + Error("expect type for GetConstVal but get "); + return false; + } + exprConst->SetPrimType(GetPrimitiveType(typeTk)); + lexer.NextToken(); + MIRConst *constVal = nullptr; + if (!ParseScalarValue(constVal, *GlobalTables::GetTypeTable().GetPrimType(exprConst->GetPrimType()))) { + Error("expect scalar type but get "); + return false; + } + exprConst->SetConstVal(constVal); + expr = exprConst; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprConststr(BaseNodePtr &expr) { + auto *strConst = mod.CurFuncCodeMemPool()->New(); + TokenKind tk = lexer.NextToken(); + if (!IsPrimitiveType(tk)) { + Error("expect primitive type for conststr but get "); + return false; + } + strConst->SetPrimType(GetPrimitiveType(tk)); + if (!IsAddress(strConst->GetPrimType())) { + Error("expect primitive type for conststr but get "); + return false; + } + tk = lexer.NextToken(); + if (tk != TK_string) { + Error("expect string literal for conststr but get "); + return false; + } + strConst->SetStrIdx(GlobalTables::GetUStrTable().GetOrCreateStrIdxFromName(lexer.GetName())); + expr = strConst; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprConststr16(BaseNodePtr &expr) { + auto *str16Const = mod.CurFuncCodeMemPool()->New(); + TokenKind tk = lexer.NextToken(); + if (!IsPrimitiveType(tk)) { + Error("expect primitive type for conststr16 but get "); + return false; + } + str16Const->SetPrimType(GetPrimitiveType(tk)); + if (!IsAddress(str16Const->GetPrimType())) { + Error("expect primitive type for conststr16 but get "); + return false; + } + tk = lexer.NextToken(); + if (tk != TK_string) { + Error("expect string literal for conststr16 but get "); + return false; + } + // UTF-16 strings in mpl files are presented as UTF-8 strings + // to keep the printable chars in ascii form + // so we need to do a UTF8ToUTF16 conversion + std::string str = lexer.GetName(); + std::u16string str16; + (void)namemangler::UTF8ToUTF16(str16, str); + str16Const->SetStrIdx(GlobalTables::GetU16StrTable().GetOrCreateStrIdxFromName(str16)); + expr = str16Const; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprSizeoftype(BaseNodePtr &expr) { + auto *exprSizeOfType = mod.CurFuncCodeMemPool()->New(); + TokenKind typeTk = lexer.NextToken(); + if (!IsPrimitiveType(typeTk)) { + Error("expect type for GetConstVal but get "); + return false; + } + exprSizeOfType->SetPrimType(GetPrimitiveType(typeTk)); + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParseType(tyidx)) { + Error("expect type parsing array but get "); + return false; + } + exprSizeOfType->SetTyIdx(tyidx); + expr = exprSizeOfType; + return true; +} + +bool MIRParser::ParseExprFieldsDist(BaseNodePtr &expr) { + TokenKind typeTk = lexer.NextToken(); + if (!IsPrimitiveType(typeTk)) { + Error("expect type for GetConstVal but get "); + return false; + } + auto *node = mod.CurFuncCodeMemPool()->New(); + node->SetPrimType(GetPrimitiveType(typeTk)); + lexer.NextToken(); + TyIdx tyIdx(0); + if (!ParseType(tyIdx)) { + Error("expect type parsing array but get "); + return false; + } + node->SetTyIdx(tyIdx); + TokenKind tk = lexer.GetTokenKind(); + if (tk != TK_intconst) { + Error("expect type int but get"); + return false; + } + node->SetFiledID1(lexer.GetTheIntVal()); + tk = lexer.NextToken(); + if (tk != TK_intconst) { + Error("expect type int but get"); + return false; + } + node->SetFiledID2(lexer.GetTheIntVal()); + lexer.NextToken(); + expr = node; + return true; +} + +bool MIRParser::ParseExprBinary(BaseNodePtr &expr) { + Opcode opcode = GetBinaryOp(lexer.GetTokenKind()); + if (opcode == OP_undef) { + Error("expect add operator but get "); + return false; + } + auto *addExpr = mod.CurFuncCodeMemPool()->New(opcode); + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing binary operator but get "); + return false; + } + addExpr->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + return false; + } + addExpr->SetBOpnd(opnd0, 0); + addExpr->SetBOpnd(opnd1, 1); + expr = addExpr; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprCompare(BaseNodePtr &expr) { + Opcode opcode = GetBinaryOp(lexer.GetTokenKind()); + auto *addExpr = mod.CurFuncCodeMemPool()->New(opcode); + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect type parsing compare operator but get "); + return false; + } + addExpr->SetPrimType(GetPrimitiveType(lexer.GetTokenKind())); + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect operand type parsing compare operator but get "); + return false; + } + addExpr->SetOpndType(GetPrimitiveType(lexer.GetTokenKind())); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + return false; + } + addExpr->SetBOpnd(opnd0, 0); + addExpr->SetBOpnd(opnd1, 1); + expr = addExpr; + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprDepositbits(BaseNodePtr &expr) { + // syntax: depositbits (, ) + if (lexer.GetTokenKind() != TK_depositbits) { + Error("expect depositbits but get "); + return false; + } + auto *dpsbNode = mod.CurFuncCodeMemPool()->New(); + expr = dpsbNode; + PrimType ptyp = GetPrimitiveType(lexer.NextToken()); + if (!IsPrimitiveInteger(ptyp)) { + Error("expect but get "); + return false; + } + dpsbNode->SetPrimType(ptyp); + if (lexer.NextToken() != TK_intconst) { + Error("expect bOffset but get "); + return false; + } + dpsbNode->SetBitsOffset(lexer.GetTheIntVal()); + if (lexer.NextToken() != TK_intconst) { + Error("expect bSize but get "); + return false; + } + dpsbNode->SetBitsSize(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *opnd0 = nullptr; + BaseNode *opnd1 = nullptr; + if (!ParseExprTwoOperand(opnd0, opnd1)) { + Error("ParseExprDepositbits when parsing two operand"); + return false; + } + dpsbNode->SetBOpnd(opnd0, 0); + dpsbNode->SetBOpnd(opnd1, 1); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprIreadIaddrof(IreadNode &expr) { + // syntax : iread/iaddrof () + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + expr.SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + tyidx = TyIdx(0); + if (!ParseDerivedType(tyidx)) { + Error("ParseExprIreadIaddrof failed when paring derived type"); + return false; + } + expr.SetTyIdx(tyidx); + if (lexer.GetTokenKind() == TK_intconst) { + expr.SetFieldID(lexer.theIntVal); + lexer.NextToken(); + } + BaseNode *opnd0 = nullptr; + if (!ParseExprOneOperand(opnd0)) { + return false; + } + expr.SetOpnd(opnd0, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprIread(BaseNodePtr &expr) { + // syntax : iread () + auto *iExpr = mod.CurFuncCodeMemPool()->New(OP_iread); + if (!ParseExprIreadIaddrof(*iExpr)) { + Error("ParseExprIread failed when trying to parse addof"); + return false; + } + expr = iExpr; + return true; +} + +bool MIRParser::ParseExprIaddrof(BaseNodePtr &expr) { + // syntax : iaddrof () + auto *iExpr = mod.CurFuncCodeMemPool()->New(OP_iaddrof); + if (!ParseExprIreadIaddrof(*iExpr)) { + Error("ParseExprIaddrof failed when trying to parse addof"); + return false; + } + expr = iExpr; + return true; +} + +bool MIRParser::ParseExprIreadoff(BaseNodePtr &expr) { + // syntax : iread () + auto *iReadOff = mod.CurFuncCodeMemPool()->New(); + expr = iReadOff; + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + iReadOff->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (!IsPrimitiveScalar(iReadOff->GetPrimType())) { + Error("only scalar types allowed for ireadoff"); + return false; + } + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iReadOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + BaseNode *opnd = nullptr; + if (!ParseExprOneOperand(opnd)) { + Error("ParseExprIreadoff when paring one operand"); + return false; + } + iReadOff->SetOpnd(opnd, 0); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprIreadFPoff(BaseNodePtr &expr) { + // syntax : iread + auto *iReadOff = mod.CurFuncCodeMemPool()->New(); + expr = iReadOff; + if (!IsPrimitiveType(lexer.NextToken())) { + Error("expect primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + return false; + } + iReadOff->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (lexer.GetTokenKind() != TK_intconst) { + Error("expect offset but get "); + return false; + } + iReadOff->SetOffset(lexer.GetTheIntVal()); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprAddrof(BaseNodePtr &expr) { + // syntax: addrof + auto *addrofNode = mod.CurFuncCodeMemPool()->New(OP_addrof); + expr = addrofNode; + if (lexer.GetTokenKind() != TK_addrof) { + Error("expect addrof but get "); + return false; + } + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + Error("expect primitive type but get "); + return false; + } + addrofNode->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect symbol ParseExprAddroffunc"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + addrofNode->SetStIdx(stidx); + TokenKind tk = lexer.NextToken(); + if (IsDelimitationTK(tk)) { + addrofNode->SetFieldID(0); + } else if (tk == TK_intconst) { + addrofNode->SetFieldID(lexer.GetTheIntVal()); + lexer.NextToken(); + } else { + addrofNode->SetFieldID(0); + } + return true; +} + +bool MIRParser::ParseExprAddrofoff(BaseNodePtr &expr) { + // syntax: addrofoff + AddrofoffNode *addrofoffNode = mod.CurFuncCodeMemPool()->New(OP_addrofoff); + expr = addrofoffNode; + if (lexer.GetTokenKind() != TK_addrofoff) { + Error("expect addrofoff but get "); + return false; + } + lexer.NextToken(); + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + Error("expect primitive type but get "); + return false; + } + addrofoffNode->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + StIdx stidx; + if (!ParseDeclaredSt(stidx)) { + return false; + } + if (stidx.FullIdx() == 0) { + Error("expect symbol ParseExprAddroffunc"); + return false; + } + if (stidx.IsGlobal()) { + MIRSymbol *sym = GlobalTables::GetGsymTable().GetSymbolFromStidx(stidx.Idx()); + ASSERT(sym != nullptr, "null ptr check"); + sym->SetHasPotentialAssignment(); + } + addrofoffNode->stIdx = stidx; + TokenKind tk = lexer.NextToken(); + if (tk == TK_intconst) { + addrofoffNode->offset = static_cast(lexer.GetTheIntVal()); + lexer.NextToken(); + } else { + Error("expect integer offset but get "); + return false; + } + return true; +} + +bool MIRParser::ParseExprAddroffunc(BaseNodePtr &expr) { + auto *addrOfFuncNode = mod.CurFuncCodeMemPool()->New(); + expr = addrOfFuncNode; + TokenKind tk = lexer.NextToken(); + if (tk != TK_a32 && tk != TK_a64 && tk != TK_ptr) { + Error("expect address primitive type but get "); + return false; + } + TyIdx tyidx(0); + if (!ParsePrimType(tyidx)) { + Error("ParseExprAddroffunc failed when parsing primitive type"); + return false; + } + addrOfFuncNode->SetPrimType(GlobalTables::GetTypeTable().GetPrimTypeFromTyIdx(tyidx)); + if (lexer.GetTokenKind() != TK_fname) { + Error("expect function name but get "); + return false; + } + PUIdx pidx; + if (!ParseDeclaredFunc(pidx)) { + if (mod.GetFlavor() < kMmpl) { + Error("expect .mmpl file"); + return false; + } + pidx = EnterUndeclaredFunction(); + } + addrOfFuncNode->SetPUIdx(pidx); + lexer.NextToken(); + return true; +} + +bool MIRParser::ParseExprAddroflabel(BaseNodePtr &expr) { + // syntax: addroflabel